@@ -72,6 +72,10 @@ class SingleOpModelWithNNAPI : public SingleOpModel {
7272 interpreter_->SetBufferHandle (index, handle, stateful_delegate_.get ());
7373 }
7474
75+ void MarkInputTensorDataStale (int index) {
76+ interpreter_->tensor (index)->data_is_stale = true ;
77+ }
78+
7579 TfLiteStatus AllocateTensors () { return interpreter_->AllocateTensors (); }
7680
7781 protected:
@@ -391,12 +395,10 @@ TEST(NNAPIDelegate, StatefulDelegateWithBufferHandles) {
391395 !NnApiImplementation ()->ANeuralNetworksMemory_createFromFd ) {
392396 GTEST_SKIP ();
393397 }
394- // TODO(b/176241505): Fix incorrect outputs on API 29.
395- if (NnApiImplementation ()->android_sdk_version == 29 ) {
396- GTEST_SKIP ();
397- }
398398
399399 StatefulNnApiDelegate::Options options;
400+ // Allow NNAPI CPU fallback path.
401+ options.disallow_nnapi_cpu = false ;
400402 FloatAddOpModel m (options, {TensorType_FLOAT32, {1 , 2 , 2 , 1 }},
401403 {TensorType_FLOAT32, {1 , 2 , 2 , 1 }},
402404 {TensorType_FLOAT32, {}}, ActivationFunctionType_NONE);
@@ -443,6 +445,7 @@ TEST(NNAPIDelegate, StatefulDelegateWithBufferHandles) {
443445 auto input1_handle = delegate->RegisterNnapiMemory (
444446 input1_memory, memory_callback, &memory_context);
445447 m.SetBufferHandle (m.input1 (), input1_handle);
448+ m.MarkInputTensorDataStale (m.input1 ());
446449 m.PopulateTensor <float >(m.input2 (), {0.1 , 0.2 , 0.3 , 0.5 });
447450 m.Invoke ();
448451 EXPECT_THAT (m.GetOutput (), ElementsAreArray ({-1.9 , 0.4 , 1.0 , 1.3 }));
@@ -454,6 +457,7 @@ TEST(NNAPIDelegate, StatefulDelegateWithBufferHandles) {
454457 auto input1_handle = delegate->RegisterNnapiMemory (
455458 input1_memory, memory_callback, &memory_context);
456459 m.SetBufferHandle (m.input1 (), input1_handle);
460+ m.MarkInputTensorDataStale (m.input1 ());
457461 m.PopulateTensor <float >(m.input2 (), {0.1 , 0.2 , 0.3 , 0.5 });
458462 m.Invoke ();
459463 EXPECT_THAT (m.GetOutput (), ElementsAreArray ({-1.9 + i, 0.4 , 1.0 , 1.3 }));
0 commit comments