From a88c46347c20f6e4875f4c1c75ffc5b5bf38edb8 Mon Sep 17 00:00:00 2001
From: Koan-Sin Tan <koansin.tan@gmail.com>
Date: Wed, 13 May 2020 15:35:18 +0800
Subject: [PATCH] change and cleanup per review

---
 tensorflow/lite/delegates/nnapi/nnapi_delegate.cc        | 9 ++++-----
 tensorflow/lite/delegates/nnapi/nnapi_delegate_kernel.h  | 4 ++--
 .../tools/accuracy/ilsvrc/imagenet_model_evaluator.cc    | 2 --
 .../tools/accuracy/ilsvrc/imagenet_model_evaluator.h     | 3 ---
 .../tools/benchmark/benchmark_performance_options.cc     | 3 ---
 .../lite/tools/delegates/default_execution_provider.cc   | 4 ----
 .../lite/tools/delegates/nnapi_delegate_provider.cc      | 2 +-
 .../tools/evaluation/evaluation_delegate_provider.cc     | 4 ----
 .../evaluation/tasks/coco_object_detection/run_eval.cc   | 5 -----
 .../tasks/imagenet_image_classification/run_eval.cc      | 5 -----
 .../tools/evaluation/tasks/inference_diff/run_eval.cc    | 4 ----
 11 files changed, 7 insertions(+), 38 deletions(-)

diff --git a/tensorflow/lite/delegates/nnapi/nnapi_delegate.cc b/tensorflow/lite/delegates/nnapi/nnapi_delegate.cc
index 867d03f5227..ff6ad0dc0d9 100644
--- a/tensorflow/lite/delegates/nnapi/nnapi_delegate.cc
+++ b/tensorflow/lite/delegates/nnapi/nnapi_delegate.cc
@@ -3151,7 +3151,7 @@ TfLiteStatus NNAPIDelegateKernel::Init(TfLiteContext* context,
                                     "creating NNAPI model", nnapi_errno);
     nn_model_.reset(model);
 
-    TF_LITE_ENSURE_STATUS(BuildGraph(context, params->delegate,
+    TF_LITE_ENSURE_STATUS(BuildGraph(context, delegate_options,
                                      params->input_tensors,
                                      params->output_tensors, nnapi_errno));
   }
@@ -3203,7 +3203,6 @@ TfLiteStatus NNAPIDelegateKernel::Prepare(TfLiteContext* context,
 
   const auto delegate_options =
       StatefulNnApiDelegate::GetOptions(node->delegate);
-
   ANeuralNetworksCompilation* compilation = nullptr;
   if (!nnapi_devices_.empty()) {
     // Compile for the selected accelerator.
@@ -3877,7 +3876,8 @@ TfLiteStatus NNAPIDelegateKernel::AddOpsAndTensors(TfLiteContext* context,
 }
 
 TfLiteStatus NNAPIDelegateKernel::BuildGraph(
-    TfLiteContext* context, TfLiteDelegate* delegate,
+    TfLiteContext* context,
+    const StatefulNnApiDelegate::Options& delegate_options,
     const TfLiteIntArray* input_tensors, const TfLiteIntArray* output_tensors,
     int* nnapi_errno) {
   // Build the ops and tensors.
@@ -3888,7 +3888,6 @@ TfLiteStatus NNAPIDelegateKernel::BuildGraph(
   std::vector<uint32_t> outputs;
   outputs.reserve(output_tensors->size);
 
-  const auto delegate_options = StatefulNnApiDelegate::GetOptions(delegate);
   size_t total_input_byte_size = 0;
   // Make the TensorFlow Lite inputs and outputs to ann_indices.
   for (int i : TfLiteIntArrayView(input_tensors)) {
@@ -4025,9 +4024,9 @@ StatefulNnApiDelegate::StatefulNnApiDelegate(const NnApi* nnapi,
   delegate_data_.disallow_nnapi_cpu = options.disallow_nnapi_cpu;
   delegate_data_.max_number_delegated_partitions =
       options.max_number_delegated_partitions;
+  delegate_data_.allow_fp16 = options.allow_fp16;
   TFLITE_LOG_PROD_ONCE(tflite::TFLITE_LOG_INFO,
                        "Created TensorFlow Lite delegate for NNAPI.");
-  delegate_data_.allow_fp16 = options.allow_fp16;
   Prepare = DoPrepare;
   CopyFromBufferHandle = DoCopyFromBufferHandle;
   CopyToBufferHandle = DoCopyToBufferHandle;
diff --git a/tensorflow/lite/delegates/nnapi/nnapi_delegate_kernel.h b/tensorflow/lite/delegates/nnapi/nnapi_delegate_kernel.h
index 60151196372..5d0ea63ab4c 100644
--- a/tensorflow/lite/delegates/nnapi/nnapi_delegate_kernel.h
+++ b/tensorflow/lite/delegates/nnapi/nnapi_delegate_kernel.h
@@ -349,8 +349,8 @@ class NNAPIDelegateKernel {
   TfLiteStatus AddOpsAndTensors(TfLiteContext* context, int* nnapi_errno);
 
   TfLiteStatus BuildGraph(TfLiteContext* context,
-                          TfLiteDelegate* delegate,
-                          const TfLiteIntArray* input_tensors,
+                          const StatefulNnApiDelegate::Options& options,
+			  const TfLiteIntArray* input_tensors,
                           const TfLiteIntArray* output_tensors,
                           int* nnapi_errno);
 };
diff --git a/tensorflow/lite/tools/accuracy/ilsvrc/imagenet_model_evaluator.cc b/tensorflow/lite/tools/accuracy/ilsvrc/imagenet_model_evaluator.cc
index 64ce87ae8aa..f318dc68d09 100644
--- a/tensorflow/lite/tools/accuracy/ilsvrc/imagenet_model_evaluator.cc
+++ b/tensorflow/lite/tools/accuracy/ilsvrc/imagenet_model_evaluator.cc
@@ -141,8 +141,6 @@ class CompositeObserver : public ImagenetModelEvaluator::Observer {
       tflite::Flag::CreateFlag(kNumRanksFlag, &params.num_ranks,
                                "Generates the top-1 to top-k accuracy values"
                                "where k = num_ranks. Default: 10"),
-      tflite::Flag::CreateFlag("nnapi_allow_fp16", &params.nnapi_allow_fp16,
-                               "allow fp16 in nnapi"),
   };
   tflite::Flags::Parse(argc, const_cast<const char**>(argv), flag_list);
 
diff --git a/tensorflow/lite/tools/accuracy/ilsvrc/imagenet_model_evaluator.h b/tensorflow/lite/tools/accuracy/ilsvrc/imagenet_model_evaluator.h
index 3ba22cbc2af..65d4a2c49f8 100644
--- a/tensorflow/lite/tools/accuracy/ilsvrc/imagenet_model_evaluator.h
+++ b/tensorflow/lite/tools/accuracy/ilsvrc/imagenet_model_evaluator.h
@@ -78,9 +78,6 @@ class ImagenetModelEvaluator {
 
     // Number of interpreter threads.
     int num_interpreter_threads = 1;
-
-    // allow fp16
-    bool nnapi_allow_fp16 = false;
   };
 
   // An evaluation observer.
diff --git a/tensorflow/lite/tools/benchmark/benchmark_performance_options.cc b/tensorflow/lite/tools/benchmark/benchmark_performance_options.cc
index c2d9374506e..cfce23c4595 100644
--- a/tensorflow/lite/tools/benchmark/benchmark_performance_options.cc
+++ b/tensorflow/lite/tools/benchmark/benchmark_performance_options.cc
@@ -303,9 +303,6 @@ void BenchmarkPerformanceOptions::CreatePerformanceOptions() {
                         BenchmarkParam::Create<bool>(false));
         params.AddParam("max_delegated_partitions",
                         BenchmarkParam::Create<int>(0));
-        params.AddParam("max_delegated_partitions",
-        params.AddParam("nnapi_allow_fp16",
-                        BenchmarkParam::Create<bool>(false));
         all_run_params_.emplace_back(std::move(params));
       }
     }
diff --git a/tensorflow/lite/tools/delegates/default_execution_provider.cc b/tensorflow/lite/tools/delegates/default_execution_provider.cc
index 67c38308206..f75fd791072 100644
--- a/tensorflow/lite/tools/delegates/default_execution_provider.cc
+++ b/tensorflow/lite/tools/delegates/default_execution_provider.cc
@@ -30,7 +30,6 @@ class DefaultExecutionProvider : public DelegateProvider {
                              ToolParam::Create<int32_t>(0));
     default_params_.AddParam("min_nodes_per_partition",
                              ToolParam::Create<int32_t>(0));
-    default_params_.AddParam("allow_fp16", ToolParam::Create<bool>(false));
   }
 
   std::vector<Flag> CreateFlags(ToolParams* params) const final;
@@ -45,7 +44,6 @@ std::vector<Flag> DefaultExecutionProvider::CreateFlags(
   std::vector<Flag> flags = {
       CreateFlag<int32_t>("num_threads", params,
                           "number of threads used for inference on CPU."),
-      CreateFlag<bool>("allow_fp16", params, "allow_fp16"),
       CreateFlag<int32_t>("max_delegated_partitions", params,
                           "Max number of partitions to be delegated."),
       CreateFlag<int32_t>(
@@ -63,8 +61,6 @@ void DefaultExecutionProvider::LogParams(const ToolParams& params) const {
                    << params.Get<int32_t>("max_delegated_partitions") << "]";
   TFLITE_LOG(INFO) << "Min nodes per partition : ["
                    << params.Get<int32_t>("min_nodes_per_partition") << "]";
-  TFLITE_LOG(INFO) << "allow_fp16: ["
-                   << params.Get<bool>("allow_fp16") << "]";
 }
 
 TfLiteDelegatePtr DefaultExecutionProvider::CreateTfLiteDelegate(
diff --git a/tensorflow/lite/tools/delegates/nnapi_delegate_provider.cc b/tensorflow/lite/tools/delegates/nnapi_delegate_provider.cc
index 6492ba82849..2fbfb791e8c 100644
--- a/tensorflow/lite/tools/delegates/nnapi_delegate_provider.cc
+++ b/tensorflow/lite/tools/delegates/nnapi_delegate_provider.cc
@@ -88,7 +88,7 @@ void NnapiDelegateProvider::LogParams(const ToolParams& params) const {
                        << params.Get<bool>("disable_nnapi_cpu") << "]";
     }
     if (params.Get<bool>("nnapi_allow_fp16")) {
-      TFLITE_LOG(INFO) << "nnapi_allow_fp16: ["
+      TFLITE_LOG(INFO) << "Allow fp16 in NNAPI: ["
                        << params.Get<bool>("nnapi_allow_fp16") << "]";
     }
   }
diff --git a/tensorflow/lite/tools/evaluation/evaluation_delegate_provider.cc b/tensorflow/lite/tools/evaluation/evaluation_delegate_provider.cc
index ea07378a8fa..42f2666ba9b 100644
--- a/tensorflow/lite/tools/evaluation/evaluation_delegate_provider.cc
+++ b/tensorflow/lite/tools/evaluation/evaluation_delegate_provider.cc
@@ -132,10 +132,6 @@ tools::ToolParams DelegateProviders::GetAllParams(
     tool_params.Set<int32_t>("num_threads", params.num_threads());
   }
 
-  if (params.has_nnapi_allow_fp16()) {
-    tool_params.Set<bool>("nnapi_allow_fp16", params.nnapi_allow_fp16());
-  }
-
   const auto type = params.delegate();
   switch (type) {
     case TfliteInferenceParams::NNAPI:
diff --git a/tensorflow/lite/tools/evaluation/tasks/coco_object_detection/run_eval.cc b/tensorflow/lite/tools/evaluation/tasks/coco_object_detection/run_eval.cc
index de1ae6e2e94..765e8fc6465 100644
--- a/tensorflow/lite/tools/evaluation/tasks/coco_object_detection/run_eval.cc
+++ b/tensorflow/lite/tools/evaluation/tasks/coco_object_detection/run_eval.cc
@@ -65,7 +65,6 @@ class CocoObjectDetection : public TaskExecutor {
   bool debug_mode_;
   std::string delegate_;
   int num_interpreter_threads_;
-  bool allow_fp16_;
   DelegateProviders delegate_providers_;
 };
 
@@ -105,9 +104,6 @@ CocoObjectDetection::CocoObjectDetection(int* argc, char* argv[])
           kDelegateFlag, &delegate_,
           "Delegate to use for inference, if available. "
           "Must be one of {'nnapi', 'gpu', 'xnnpack', 'hexagon'}"),
-      tflite::Flag::CreateFlag(
-          "nnapi_allow_fp16", &allow_fp16_,
-          "nnapi allow fp16"),
   };
   tflite::Flags::Parse(argc, const_cast<const char**>(argv), flag_list);
   DelegateProviders delegate_providers;
@@ -136,7 +132,6 @@ absl::optional<EvaluationStageMetrics> CocoObjectDetection::Run() {
   inference_params->set_model_file_path(model_file_path_);
   inference_params->set_num_threads(num_interpreter_threads_);
   inference_params->set_delegate(ParseStringToDelegateType(delegate_));
-  inference_params->set_nnapi_allow_fp16(allow_fp16_);
 
   // Get ground truth data.
   absl::flat_hash_map<std::string, ObjectDetectionResult> ground_truth_map;
diff --git a/tensorflow/lite/tools/evaluation/tasks/imagenet_image_classification/run_eval.cc b/tensorflow/lite/tools/evaluation/tasks/imagenet_image_classification/run_eval.cc
index 8a7fd864c6e..13eeb313ad4 100644
--- a/tensorflow/lite/tools/evaluation/tasks/imagenet_image_classification/run_eval.cc
+++ b/tensorflow/lite/tools/evaluation/tasks/imagenet_image_classification/run_eval.cc
@@ -67,7 +67,6 @@ class ImagenetClassification : public TaskExecutor {
   std::string delegate_;
   int num_images_;
   int num_interpreter_threads_;
-  bool allow_fp16_;
   DelegateProviders delegate_providers_;
 };
 
@@ -107,9 +106,6 @@ ImagenetClassification::ImagenetClassification(int* argc, char* argv[])
           kDelegateFlag, &delegate_,
           "Delegate to use for inference, if available. "
           "Must be one of {'nnapi', 'gpu', 'hexagon', 'xnnpack'}"),
-      tflite::Flag::CreateFlag(
-          "nnapi_allow_fp16", &allow_fp16_,
-          "nnapi allow fp16"),
   };
   tflite::Flags::Parse(argc, const_cast<const char**>(argv), flag_list);
   delegate_providers_.InitFromCmdlineArgs(argc, const_cast<const char**>(argv));
@@ -159,7 +155,6 @@ absl::optional<EvaluationStageMetrics> ImagenetClassification::Run() {
   inference_params->set_model_file_path(model_file_path_);
   inference_params->set_num_threads(num_interpreter_threads_);
   inference_params->set_delegate(ParseStringToDelegateType(delegate_));
-  inference_params->set_nnapi_allow_fp16(allow_fp16_);
   classification_params->mutable_topk_accuracy_eval_params()->set_k(10);
 
   ImageClassificationStage eval(eval_config);
diff --git a/tensorflow/lite/tools/evaluation/tasks/inference_diff/run_eval.cc b/tensorflow/lite/tools/evaluation/tasks/inference_diff/run_eval.cc
index c85d997974b..814ebe3b3bf 100644
--- a/tensorflow/lite/tools/evaluation/tasks/inference_diff/run_eval.cc
+++ b/tensorflow/lite/tools/evaluation/tasks/inference_diff/run_eval.cc
@@ -50,7 +50,6 @@ class InferenceDiff : public TaskExecutor {
   std::string delegate_;
   int num_runs_;
   int num_interpreter_threads_;
-  bool allow_fp16_;
   DelegateProviders delegate_providers_;
 };
 
@@ -72,8 +71,6 @@ InferenceDiff::InferenceDiff(int* argc, char* argv[])
           kDelegateFlag, &delegate_,
           "Delegate to use for test inference, if available. "
           "Must be one of {'nnapi', 'gpu', 'hexagon', 'xnnpack'}"),
-      tflite::Flag::CreateFlag("nnapi_allow_fp16", &allow_fp16_,
-                               "nnapi allow fp16")
   };
   tflite::Flags::Parse(argc, const_cast<const char**>(argv), flag_list);
   delegate_providers_.InitFromCmdlineArgs(argc, const_cast<const char**>(argv));
@@ -91,7 +88,6 @@ absl::optional<EvaluationStageMetrics> InferenceDiff::Run() {
   // generating random data.
   inference_params->set_invocations_per_run(3);
   inference_params->set_delegate(ParseStringToDelegateType(delegate_));
-  inference_params->set_nnapi_allow_fp16(allow_fp16_);
   if (!delegate_.empty() &&
       inference_params->delegate() == TfliteInferenceParams::NONE) {
     TFLITE_LOG(WARN) << "Unsupported TFLite delegate: " << delegate_;