Skip to content

Commit 952a3c2

Browse files
jdduketensorflower-gardener
authored andcommitted
[tf.lite] Remove Interpreter::UseNNAPI
This method was deprecated and is no longer supported. Clients should use the more stable method of applyiing the delegate manually: `Interpreter::ModifyGraphWithDelegate(NnApiDelegate())` PiperOrigin-RevId: 339319268 Change-Id: I687baa9e2b380eac58c1e882cd1692a978375ec2
1 parent dde0fe7 commit 952a3c2

File tree

9 files changed

+12
-70
lines changed

9 files changed

+12
-70
lines changed

RELEASE.md

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,13 @@
2828
* Exposing `tf.data.experimental.ExternalStatePolicy`, which can be used
2929
to control how external state should be handled during dataset
3030
serialization or iterator checkpointing.
31-
31+
32+
* `tf.lite`:
33+
* NNAPI
34+
* Removed deprecated `Interpreter::UseNNAPI(bool)` C++ API.
35+
* Use `NnApiDelegate()` and related delegate configuration methods
36+
directly.
37+
3238
## Thanks to our Contributors
3339

3440
This release contains contributions from many people at Google, as well as:

tensorflow/lite/BUILD

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -261,10 +261,8 @@ cc_library(
261261
"//tensorflow/lite/core/api",
262262
"//tensorflow/lite/core/api:verifier",
263263
"//tensorflow/lite/delegates:status",
264-
"//tensorflow/lite/delegates/nnapi:nnapi_delegate",
265264
"//tensorflow/lite/experimental/resource",
266265
"//tensorflow/lite/kernels/internal:compatibility",
267-
"//tensorflow/lite/nnapi:nnapi_implementation",
268266
"//tensorflow/lite/profiling:platform_profiler",
269267
"//tensorflow/lite/schema:schema_fbs",
270268
"//tensorflow/lite/schema:schema_utils",
@@ -296,9 +294,7 @@ cc_library(
296294
"//tensorflow/lite/c:common",
297295
"//tensorflow/lite/core/api",
298296
"//tensorflow/lite/core/api:verifier",
299-
"//tensorflow/lite/delegates/nnapi:nnapi_delegate",
300297
"//tensorflow/lite/experimental/resource",
301-
"//tensorflow/lite/nnapi:nnapi_implementation",
302298
"//tensorflow/lite/schema:schema_fbs",
303299
],
304300
)

tensorflow/lite/core/subgraph.cc

Lines changed: 0 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,6 @@ limitations under the License.
2323
#include "tensorflow/lite/c/common.h"
2424
#include "tensorflow/lite/context_util.h"
2525
#include "tensorflow/lite/core/api/tensor_utils.h"
26-
#include "tensorflow/lite/delegates/nnapi/nnapi_delegate.h"
2726
#include "tensorflow/lite/graph_info.h"
2827
#include "tensorflow/lite/minimal_logging.h"
2928
#include "tensorflow/lite/schema/schema_generated.h"
@@ -991,13 +990,6 @@ TfLiteStatus Subgraph::Invoke() {
991990
return kTfLiteError;
992991
}
993992

994-
// This is only needed for UseNNAPI(true);
995-
if (should_apply_nnapi_delegate_ && !applied_nnapi_delegate_) {
996-
TF_LITE_ENSURE_OK(&context_, ModifyGraphWithDelegate(NnApiDelegate()));
997-
// only need to modify the graph once upon the first invocation.
998-
applied_nnapi_delegate_ = true;
999-
}
1000-
1001993
// Invocations are always done in node order.
1002994
// Note that calling Invoke repeatedly will cause the original memory plan to
1003995
// be reused, unless either ResizeInputTensor() or AllocateTensors() has been
@@ -1335,16 +1327,6 @@ TfLiteStatus Subgraph::ResizeTensorImpl(TfLiteTensor* tensor,
13351327
return kTfLiteOk;
13361328
}
13371329

1338-
void Subgraph::UseNNAPI(bool enable) {
1339-
// Note that there is no way to disable the delegate once it modified the
1340-
// graph.
1341-
if (applied_nnapi_delegate_ && !enable) {
1342-
ReportError("Attempting to disable NNAPI delegate after it's applied.");
1343-
} else {
1344-
should_apply_nnapi_delegate_ = enable;
1345-
}
1346-
}
1347-
13481330
void Subgraph::SwitchToDelegateContext() {
13491331
context_.GetNodeAndRegistration = GetNodeAndRegistration;
13501332
context_.ReplaceNodeSubsetsWithDelegateKernels =

tensorflow/lite/core/subgraph.h

Lines changed: 0 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -25,16 +25,12 @@ limitations under the License.
2525
#include "tensorflow/lite/c/common.h"
2626
#include "tensorflow/lite/core/api/profiler.h"
2727
#include "tensorflow/lite/core/macros.h"
28-
#include "tensorflow/lite/delegates/nnapi/nnapi_delegate.h"
2928
#include "tensorflow/lite/experimental/resource/resource_base.h"
3029
#include "tensorflow/lite/memory_planner.h"
3130
#include "tensorflow/lite/util.h"
3231

3332
namespace tflite {
3433

35-
// Forward declare since NNAPIDelegate uses Interpreter.
36-
class NNAPIDelegate;
37-
3834
class Subgraph {
3935
public:
4036
friend class Interpreter;
@@ -247,8 +243,6 @@ class Subgraph {
247243
// Entry point for C node plugin API to report an error.
248244
void ReportError(const char* format, ...);
249245

250-
void UseNNAPI(bool enable);
251-
252246
// Return the subgraph specific context.
253247
TfLiteContext* context() { return &context_; }
254248

@@ -704,10 +698,6 @@ class Subgraph {
704698
// Used by PreviewDelegateParitioning.
705699
std::vector<TfLiteDelegateParams> partitioning_preview_cache_;
706700

707-
// Whether to use delegate to modify the graph.
708-
bool should_apply_nnapi_delegate_ = false;
709-
bool applied_nnapi_delegate_ = false;
710-
711701
std::unique_ptr<MemoryPlanner> memory_planner_;
712702

713703
// Contains <tensor idx, custom allocation> pairs for all applicable tensors.

tensorflow/lite/g3doc/guide/faq.md

Lines changed: 3 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -135,11 +135,9 @@ like this:
135135
to do this. However, increasing threads results in performance variability
136136
depending on the environment.
137137
* *Use Hardware Accelerators.* TensorFlow Lite supports model acceleration for
138-
specific hardware using delegates. For example, to use Android’s Neural
139-
Networks API, call
140-
[`UseNNAPI`](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/lite/interpreter.h#L343)
141-
on the interpreter. Or take a look at our
142-
[GPU delegate tutorial](../performance/gpu.md).
138+
specific hardware using delegates. See our
139+
[Delegates](../performance/delegates.md) guide for information on what
140+
accelerators are supported and how to use them with your model on-device.
143141
* *(Advanced) Profile Model.* The Tensorflow Lite
144142
[benchmarking tool](https://github.com/tensorflow/tensorflow/tree/master/tensorflow/lite/tools/benchmark)
145143
has a built-in profiler that can show per-operator statistics. If you know

tensorflow/lite/g3doc/performance/best_practices.md

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -107,9 +107,8 @@ interpreter execution. TensorFlow Lite can use delegates by:
107107
* Using Android's
108108
[Neural Networks API](https://developer.android.com/ndk/guides/neuralnetworks/).
109109
You can utilize these hardware accelerator backends to improve the speed and
110-
efficiency of your model. To enable the Neural Networks API, call
111-
[UseNNAPI](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/lite/interpreter.h#L343)
112-
on the interpreter instance.
110+
efficiency of your model. To enable the Neural Networks API, check out
111+
the [NNAPI delegate](nnapi.md) guide.
113112
* GPU delegate is available on Android and iOS, using OpenGL/OpenCL and Metal,
114113
respectively. To try them out, see the [GPU delegate tutorial](gpu.md) and
115114
[documentation](gpu_advanced.md).

tensorflow/lite/interpreter.cc

Lines changed: 0 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -109,8 +109,6 @@ Interpreter::Interpreter(ErrorReporter* error_reporter)
109109
own_external_cpu_backend_context_.reset(new ExternalCpuBackendContext());
110110
external_contexts_[kTfLiteCpuBackendContext] =
111111
own_external_cpu_backend_context_.get();
112-
113-
primary_subgraph().UseNNAPI(false);
114112
}
115113

116114
Interpreter::~Interpreter() {
@@ -344,13 +342,6 @@ TfLiteStatus Interpreter::SetExecutionPlan(const std::vector<int>& new_plan) {
344342
return primary_subgraph().SetExecutionPlan(new_plan);
345343
}
346344

347-
void Interpreter::UseNNAPI(bool enable) {
348-
TFLITE_LOG_PROD_ONCE(TFLITE_LOG_INFO,
349-
"Interpreter::UseNNAPI() is deprecated. Use "
350-
"tflite::NnApiDelegate() directly instead.");
351-
primary_subgraph().UseNNAPI(enable);
352-
}
353-
354345
TfLiteStatus Interpreter::SetNumThreads(int num_threads) {
355346
if (num_threads < -1) {
356347
context_->ReportError(context_,

tensorflow/lite/interpreter.h

Lines changed: 0 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -429,15 +429,6 @@ class Interpreter {
429429
/// Returns status of success or failure.
430430
TfLiteStatus Invoke();
431431

432-
/// Enable or disable NNAPI (true to enable). Disabled by default.
433-
///
434-
/// WARNING: NNAPI cannot be disabled after the graph has been prepared
435-
/// (via `AllocateTensors`) with NNAPI enabled.
436-
///
437-
/// WARNING: This API is deprecated, prefer using the NNAPI delegate directly.
438-
/// This method will be removed in a future release.
439-
void UseNNAPI(bool enable);
440-
441432
/// Set the number of threads available to the interpreter.
442433
///
443434
/// NOTE: num_threads should be >= -1.

tensorflow/lite/interpreter_test.cc

Lines changed: 0 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -985,17 +985,6 @@ TEST(BasicInterpreter, TestOverflow) {
985985
}
986986
}
987987

988-
TEST(BasicInterpreter, TestUseNNAPI) {
989-
TestErrorReporter reporter;
990-
Interpreter interpreter(&reporter);
991-
interpreter.UseNNAPI(true);
992-
ASSERT_EQ(interpreter.AllocateTensors(), kTfLiteOk);
993-
ASSERT_EQ(interpreter.Invoke(), kTfLiteOk);
994-
interpreter.UseNNAPI(false);
995-
ASSERT_EQ(reporter.error_messages(),
996-
"Attempting to disable NNAPI delegate after it's applied.");
997-
}
998-
999988
TEST(BasicInterpreter, TestUnsupportedDelegateFunctions) {
1000989
Interpreter interpreter;
1001990
ASSERT_EQ(interpreter.AddTensors(2), kTfLiteOk);

0 commit comments

Comments
 (0)