From bec38f2829a81dc512d5062070d50516039bea0a Mon Sep 17 00:00:00 2001
From: Robert David <lrdx@google.com>
Date: Tue, 8 Sep 2020 17:37:10 -0700
Subject: [PATCH] Separate AddVariableInput and AddInput functions in
 test_util.h/.cc

Some of the usages were wrong before; the bool input was invoked as {}.

PiperOrigin-RevId: 330623020
Change-Id: I9505c199297d6a1e7465c0610184652168f61430
---
 .../delegates/nnapi/nnapi_delegate_test.cc    | 11 +++----
 .../unidirectional_sequence_gru_test.cc       |  2 +-
 tensorflow/lite/kernels/basic_rnn_test.cc     |  2 +-
 .../bidirectional_sequence_lstm_test.cc       | 20 +++++-------
 .../bidirectional_sequence_rnn_test.cc        |  4 +--
 tensorflow/lite/kernels/lstm_test.cc          | 32 ++++++++-----------
 .../lite/kernels/non_max_suppression_test.cc  |  4 +--
 .../lite/kernels/optional_tensor_test.cc      |  6 ++--
 .../lite/kernels/quant_basic_lstm_test.cc     | 10 +++---
 tensorflow/lite/kernels/svdf_test.cc          | 10 +++---
 tensorflow/lite/kernels/test_util.cc          | 15 +++++++--
 tensorflow/lite/kernels/test_util.h           |  7 ++--
 .../unidirectional_sequence_lstm_test.cc      |  9 +++---
 .../unidirectional_sequence_rnn_test.cc       |  2 +-
 14 files changed, 64 insertions(+), 70 deletions(-)

diff --git a/tensorflow/lite/delegates/nnapi/nnapi_delegate_test.cc b/tensorflow/lite/delegates/nnapi/nnapi_delegate_test.cc
index fe022199dd6..205a44991dc 100644
--- a/tensorflow/lite/delegates/nnapi/nnapi_delegate_test.cc
+++ b/tensorflow/lite/delegates/nnapi/nnapi_delegate_test.cc
@@ -2682,7 +2682,7 @@ class RNNOpModel : public SingleOpModelWithNNAPI {
     weights_ = AddInput(weights);
     recurrent_weights_ = AddInput(recurrent_weights);
     bias_ = AddInput(TensorType_FLOAT32);
-    hidden_state_ = AddInput(TensorType_FLOAT32, true);
+    hidden_state_ = AddVariableInput(TensorType_FLOAT32);
     output_ = AddOutput(TensorType_FLOAT32);
     SetBuiltinOp(
         BuiltinOperator_RNN, BuiltinOptions_RNNOptions,
@@ -2872,9 +2872,8 @@ class BaseSVDFOpModel : public SingleOpModelWithNNAPI {
     // when using NNAPI delegate.
     bias_ = AddInput(TensorType_FLOAT32);
     const int num_filters = units * rank;
-    activation_state_ = AddInput(
-        TensorData{TensorType_FLOAT32, {batches, memory_size * num_filters}},
-        /*is_variable=*/true);
+    activation_state_ = AddVariableInput(
+        TensorData{TensorType_FLOAT32, {batches, memory_size * num_filters}});
     output_ = AddOutput(TensorType_FLOAT32);
     SetBuiltinOp(
         BuiltinOperator_SVDF, BuiltinOptions_SVDFOptions,
@@ -3098,8 +3097,8 @@ class LSTMOpModel : public SingleOpModelWithNNAPI {
     }
 
     // Adding the 2 input state tensors.
-    input_activation_state_ = AddInput(TensorType_FLOAT32, true);
-    input_cell_state_ = AddInput(TensorType_FLOAT32, true);
+    input_activation_state_ = AddVariableInput(TensorType_FLOAT32);
+    input_cell_state_ = AddVariableInput(TensorType_FLOAT32);
 
     const bool use_layer_norm = input_shapes.size() > 20;
     // Layer norm weights.
diff --git a/tensorflow/lite/experimental/kernels/unidirectional_sequence_gru_test.cc b/tensorflow/lite/experimental/kernels/unidirectional_sequence_gru_test.cc
index 593d714e557..0436636a0af 100644
--- a/tensorflow/lite/experimental/kernels/unidirectional_sequence_gru_test.cc
+++ b/tensorflow/lite/experimental/kernels/unidirectional_sequence_gru_test.cc
@@ -37,7 +37,7 @@ class GRUOpModel : public SingleOpModel {
       : n_batch_(n_batch), n_input_(n_input), n_output_(n_output) {
     input_ = AddInput(TensorType_FLOAT32);
     input_state_ =
-        AddInput(TensorData{TensorType_FLOAT32, {n_batch, n_output}}, true);
+        AddVariableInput(TensorData{TensorType_FLOAT32, {n_batch, n_output}});
     gate_weight_ = AddInput(TensorType_FLOAT32);
     gate_bias_ = AddInput(TensorType_FLOAT32);
     candidate_weight_ = AddInput(TensorType_FLOAT32);
diff --git a/tensorflow/lite/kernels/basic_rnn_test.cc b/tensorflow/lite/kernels/basic_rnn_test.cc
index 2146d086c9a..675c7dcbf5b 100644
--- a/tensorflow/lite/kernels/basic_rnn_test.cc
+++ b/tensorflow/lite/kernels/basic_rnn_test.cc
@@ -179,7 +179,7 @@ class RNNOpModel : public SingleOpModel {
     weights_ = AddInput(weights);
     recurrent_weights_ = AddInput(recurrent_weights);
     bias_ = AddInput(TensorType_FLOAT32);
-    hidden_state_ = AddInput(TensorType_FLOAT32, true);
+    hidden_state_ = AddVariableInput(TensorType_FLOAT32);
     output_ = AddOutput(TensorType_FLOAT32);
     SetBuiltinOp(BuiltinOperator_RNN, BuiltinOptions_RNNOptions,
                  CreateRNNOptions(builder_, ActivationFunctionType_RELU,
diff --git a/tensorflow/lite/kernels/bidirectional_sequence_lstm_test.cc b/tensorflow/lite/kernels/bidirectional_sequence_lstm_test.cc
index 778751aa04b..6f47fd0d315 100644
--- a/tensorflow/lite/kernels/bidirectional_sequence_lstm_test.cc
+++ b/tensorflow/lite/kernels/bidirectional_sequence_lstm_test.cc
@@ -160,20 +160,16 @@ class BidirectionalLSTMOpModel : public SingleOpModel {
     }
 
     // Adding the 2 input state tensors.
-    fw_input_activation_state_ =
-        AddInput(TensorData{TensorType_FLOAT32, {n_fw_output_ * n_batch_}},
-                 /*is_variable=*/true);
-    fw_input_cell_state_ =
-        AddInput(TensorData{TensorType_FLOAT32, {n_fw_cell_ * n_batch_}},
-                 /*is_variable=*/true);
+    fw_input_activation_state_ = AddVariableInput(
+        TensorData{TensorType_FLOAT32, {n_fw_output_ * n_batch_}});
+    fw_input_cell_state_ = AddVariableInput(
+        TensorData{TensorType_FLOAT32, {n_fw_cell_ * n_batch_}});
 
     // Adding the 2 input state tensors.
-    bw_input_activation_state_ =
-        AddInput(TensorData{TensorType_FLOAT32, {n_bw_output_ * n_batch_}},
-                 /*is_variable=*/true);
-    bw_input_cell_state_ =
-        AddInput(TensorData{TensorType_FLOAT32, {n_bw_cell_ * n_batch_}},
-                 /*is_variable=*/true);
+    bw_input_activation_state_ = AddVariableInput(
+        TensorData{TensorType_FLOAT32, {n_bw_output_ * n_batch_}});
+    bw_input_cell_state_ = AddVariableInput(
+        TensorData{TensorType_FLOAT32, {n_bw_cell_ * n_batch_}});
 
     fw_output_ = AddOutput(TensorType_FLOAT32);
 
diff --git a/tensorflow/lite/kernels/bidirectional_sequence_rnn_test.cc b/tensorflow/lite/kernels/bidirectional_sequence_rnn_test.cc
index 870b99d7437..e683a2a2271 100644
--- a/tensorflow/lite/kernels/bidirectional_sequence_rnn_test.cc
+++ b/tensorflow/lite/kernels/bidirectional_sequence_rnn_test.cc
@@ -680,11 +680,11 @@ class BidirectionalRNNOpModel : public SingleOpModel {
     fw_weights_ = AddInput(tensor_type);
     fw_recurrent_weights_ = AddInput(tensor_type);
     fw_bias_ = AddInput(TensorType_FLOAT32);
-    fw_hidden_state_ = AddInput(TensorType_FLOAT32, true);
+    fw_hidden_state_ = AddVariableInput(TensorType_FLOAT32);
     bw_weights_ = AddInput(tensor_type);
     bw_recurrent_weights_ = AddInput(tensor_type);
     bw_bias_ = AddInput(TensorType_FLOAT32);
-    bw_hidden_state_ = AddInput(TensorType_FLOAT32, true);
+    bw_hidden_state_ = AddVariableInput(TensorType_FLOAT32);
 
     const auto input_shape =
         (time_major) ? std::vector<int>({sequence_len_, batches_, input_size_})
diff --git a/tensorflow/lite/kernels/lstm_test.cc b/tensorflow/lite/kernels/lstm_test.cc
index edd6002cfcb..ccdc8193f09 100644
--- a/tensorflow/lite/kernels/lstm_test.cc
+++ b/tensorflow/lite/kernels/lstm_test.cc
@@ -101,8 +101,8 @@ class LSTMOpModel : public SingleOpModel {
     }
 
     // Adding the 2 state tensors.
-    AddInput({TensorType_FLOAT32, {n_batch, n_output}}, true);
-    AddInput({TensorType_FLOAT32, {n_batch, n_cell}}, true);
+    AddVariableInput({TensorType_FLOAT32, {n_batch, n_output}});
+    AddVariableInput({TensorType_FLOAT32, {n_batch, n_cell}});
 
     // Layer norm weights.
     if (!model_has_legacy_20_inputs) {
@@ -1412,16 +1412,14 @@ class LSTMIntegerOpModel : public SingleOpModel {
     }
 
     // Adding the 2 state tensors.
-    AddInput({TensorType_INT16,
-              {n_batch, n_output},
-              ranges[18].first,
-              ranges[18].second},
-             true);
-    AddInput({TensorType_INT16,
-              {n_batch, n_cell},
-              ranges[19].first,
-              ranges[19].second},
-             true);
+    AddVariableInput({TensorType_INT16,
+                      {n_batch, n_output},
+                      ranges[18].first,
+                      ranges[18].second});
+    AddVariableInput({TensorType_INT16,
+                      {n_batch, n_cell},
+                      ranges[19].first,
+                      ranges[19].second});
 
     // Layer norm weights.
     if (use_layer_norm) {
@@ -2204,12 +2202,10 @@ class HybridSparseLSTMOpModel : public ::tflite::SingleOpModel {
     }
 
     // Adding the 2 state tensors.
-    output_state_ = AddInput(::tflite::TensorData{::tflite::TensorType_FLOAT32,
-                                                  {n_output_ * n_batch_}},
-                             true);
-    cell_state_ = AddInput(::tflite::TensorData{::tflite::TensorType_FLOAT32,
-                                                {n_cell_ * n_batch_}},
-                           true);
+    output_state_ = AddVariableInput(::tflite::TensorData{
+        ::tflite::TensorType_FLOAT32, {n_output_ * n_batch_}});
+    cell_state_ = AddVariableInput(::tflite::TensorData{
+        ::tflite::TensorType_FLOAT32, {n_cell_ * n_batch_}});
 
     if (use_cifg) {
       input_layer_norm_weights_ = AddNullInput();
diff --git a/tensorflow/lite/kernels/non_max_suppression_test.cc b/tensorflow/lite/kernels/non_max_suppression_test.cc
index 9b7baa147e5..3ca54010fcd 100644
--- a/tensorflow/lite/kernels/non_max_suppression_test.cc
+++ b/tensorflow/lite/kernels/non_max_suppression_test.cc
@@ -77,7 +77,7 @@ class NonMaxSuppressionV4OpModel : public BaseNMSOp {
       input_max_output_size_ =
           AddConstInput(TensorType_INT32, {max_output_size});
     } else {
-      input_max_output_size_ = AddInput(TensorType_INT32, {});
+      input_max_output_size_ = AddInput(TensorType_INT32);
     }
     input_iou_threshold_ = AddConstInput(TensorType_FLOAT32, {iou_threshold});
     input_score_threshold_ = AddInput({TensorType_FLOAT32, {}});
@@ -168,7 +168,7 @@ class NonMaxSuppressionV5OpModel : public BaseNMSOp {
       input_max_output_size_ =
           AddConstInput(TensorType_INT32, {max_output_size});
     } else {
-      input_max_output_size_ = AddInput(TensorType_INT32, {});
+      input_max_output_size_ = AddInput(TensorType_INT32);
     }
     input_iou_threshold_ = AddConstInput(TensorType_FLOAT32, {iou_threshold});
     input_score_threshold_ = AddInput({TensorType_FLOAT32, {}});
diff --git a/tensorflow/lite/kernels/optional_tensor_test.cc b/tensorflow/lite/kernels/optional_tensor_test.cc
index 9e83c74da8d..1468c89e375 100644
--- a/tensorflow/lite/kernels/optional_tensor_test.cc
+++ b/tensorflow/lite/kernels/optional_tensor_test.cc
@@ -94,10 +94,10 @@ class LSTMOpModel : public SingleOpModel {
     }
 
     // Adding the 2 input state tensors.
-    input_activation_state_ =
-        AddInput(TensorData{TensorType_FLOAT32, {n_output_ * n_batch_}}, true);
+    input_activation_state_ = AddVariableInput(
+        TensorData{TensorType_FLOAT32, {n_output_ * n_batch_}});
     input_cell_state_ =
-        AddInput(TensorData{TensorType_FLOAT32, {n_cell_ * n_batch_}}, true);
+        AddVariableInput(TensorData{TensorType_FLOAT32, {n_cell_ * n_batch_}});
 
     output_ = AddOutput(TensorType_FLOAT32);
 
diff --git a/tensorflow/lite/kernels/quant_basic_lstm_test.cc b/tensorflow/lite/kernels/quant_basic_lstm_test.cc
index 3e081c221c5..787cb6d2e97 100644
--- a/tensorflow/lite/kernels/quant_basic_lstm_test.cc
+++ b/tensorflow/lite/kernels/quant_basic_lstm_test.cc
@@ -61,9 +61,8 @@ class QuantizedLSTMOpModel : public MultiOpModel {
 
     input_ = AddInput(input_tensor_data);
 
-    prev_output_ =
-        AddInput({TensorType_UINT8, output_shape, 0.0f, 0.0f, 1. / 128., 128},
-                 /*is_variable=*/true);
+    prev_output_ = AddVariableInput(
+        {TensorType_UINT8, output_shape, 0.0f, 0.0f, 1. / 128., 128});
     // Biases and Weights have to be constant in order to allow NNAPI
     // delegation
     weights_ = AddConstInput<uint8_t>({TensorType_UINT8, weight_shape, 0.0f,
@@ -72,9 +71,8 @@ class QuantizedLSTMOpModel : public MultiOpModel {
     biases_ = AddConstInput<int32_t>(
         {TensorType_INT32, bias_shape, 0.0f, 0.0f, weightsScale / 128, 0},
         biases);
-    prev_cell_state_ =
-        AddInput({TensorType_INT16, state_shape, 0.0f, 0.0f, 1. / 2048., 0},
-                 /*is_variable=*/true);
+    prev_cell_state_ = AddVariableInput(
+        {TensorType_INT16, state_shape, 0.0f, 0.0f, 1. / 2048., 0});
 
     sum_out_ = AddOutput(input_tensor_data);
 
diff --git a/tensorflow/lite/kernels/svdf_test.cc b/tensorflow/lite/kernels/svdf_test.cc
index b0ac2011948..c13810d3b75 100644
--- a/tensorflow/lite/kernels/svdf_test.cc
+++ b/tensorflow/lite/kernels/svdf_test.cc
@@ -143,9 +143,8 @@ class BaseSVDFOpModel : public SingleOpModel {
     weights_time_ = AddInput(weights_time_type);
     bias_ = AddNullInput();
     const int num_filters = units * rank;
-    activation_state_ = AddInput(
-        TensorData{TensorType_FLOAT32, {batches, memory_size * num_filters}},
-        /*is_variable=*/true);
+    activation_state_ = AddVariableInput(
+        TensorData{TensorType_FLOAT32, {batches, memory_size * num_filters}});
     output_ = AddOutput(TensorType_FLOAT32);
     SetBuiltinOp(BuiltinOperator_SVDF, BuiltinOptions_SVDFOptions,
                  CreateSVDFOptions(builder_, rank, ActivationFunctionType_NONE,
@@ -482,9 +481,8 @@ class IntegerSVDFOpModel : public SingleOpModel {
     weights_time_ =
         AddInput({TensorType_INT16, {num_filters, memory_size}, -1, 1});
     bias_ = AddInput({TensorType_INT32, {units}, -512, 512});
-    activation_state_ = AddInput(
-        {TensorType_INT16, {batches, memory_size * num_filters}, -16, 16},
-        /*is_variable=*/true);
+    activation_state_ = AddVariableInput(
+        {TensorType_INT16, {batches, memory_size * num_filters}, -16, 16});
     output_ = AddOutput({TensorType_INT8, {batches, units}, -0.5, 0.5});
     SetBuiltinOp(
         BuiltinOperator_SVDF, BuiltinOptions_SVDFOptions,
diff --git a/tensorflow/lite/kernels/test_util.cc b/tensorflow/lite/kernels/test_util.cc
index cfa55575604..d8e1e84f938 100644
--- a/tensorflow/lite/kernels/test_util.cc
+++ b/tensorflow/lite/kernels/test_util.cc
@@ -79,12 +79,23 @@ std::vector<Matcher<std::complex<float>>> ArrayComplex64Near(
   return matchers;
 }
 
-int SingleOpModel::AddInput(const TensorData& t, bool is_variable) {
+int SingleOpModel::AddInput(const TensorData& t) {
   int id = 0;
   if (t.per_channel_quantization) {
     id = AddTensorPerChannelQuant(t);
   } else {
-    id = AddTensor<float>(t, {}, is_variable);
+    id = AddTensor<float>(t, {});
+  }
+  inputs_.push_back(id);
+  return id;
+}
+
+int SingleOpModel::AddVariableInput(const TensorData& t) {
+  int id = 0;
+  if (t.per_channel_quantization) {
+    id = AddTensorPerChannelQuant(t);
+  } else {
+    id = AddTensor<float>(t, {}, true);
   }
   inputs_.push_back(id);
   return id;
diff --git a/tensorflow/lite/kernels/test_util.h b/tensorflow/lite/kernels/test_util.h
index 63193b021aa..4f4195cf4de 100644
--- a/tensorflow/lite/kernels/test_util.h
+++ b/tensorflow/lite/kernels/test_util.h
@@ -188,10 +188,8 @@ class SingleOpModel {
   SingleOpModel& operator=(const SingleOpModel&) = delete;
 
   // Add a TensorType input tensor and return its index.
-  int AddInput(TensorType type, bool is_variable = false) {
-    return AddInput(TensorData{type}, is_variable);
-  }
-  int AddInput(const TensorData& t, bool is_variable = false);
+  int AddInput(const TensorData& t);
+  int AddVariableInput(const TensorData& t);
 
   int AddIntermediate(TensorType type, const std::vector<float>& scale,
                       const std::vector<int64_t>& zero_point);
@@ -378,7 +376,6 @@ class SingleOpModel {
   int AddNullInput();
 
   // Add a TensorType output tensor and return its index.
-  int AddOutput(TensorType type) { return AddOutput(TensorData{type}); }
   int AddOutput(const TensorData& t);
 
   template <typename T>
diff --git a/tensorflow/lite/kernels/unidirectional_sequence_lstm_test.cc b/tensorflow/lite/kernels/unidirectional_sequence_lstm_test.cc
index 74584ec9e85..90a96ca98fe 100644
--- a/tensorflow/lite/kernels/unidirectional_sequence_lstm_test.cc
+++ b/tensorflow/lite/kernels/unidirectional_sequence_lstm_test.cc
@@ -101,11 +101,10 @@ class UnidirectionalLSTMOpModel : public SingleOpModel {
     }
 
     // Adding the 2 state tensors.
-    output_state_ =
-        AddInput(TensorData{TensorType_FLOAT32, {n_output_ * n_batch_}},
-                 /*is_variable=*/true);
-    cell_state_ = AddInput(TensorData{TensorType_FLOAT32, {n_cell_ * n_batch_}},
-                           /*is_variable=*/true);
+    output_state_ = AddVariableInput(
+        TensorData{TensorType_FLOAT32, {n_output_ * n_batch_}});
+    cell_state_ =
+        AddVariableInput(TensorData{TensorType_FLOAT32, {n_cell_ * n_batch_}});
 
     // Layer norm weights.
     if (is_layer_norm) {
diff --git a/tensorflow/lite/kernels/unidirectional_sequence_rnn_test.cc b/tensorflow/lite/kernels/unidirectional_sequence_rnn_test.cc
index f1486267c17..8ae562ea0b0 100644
--- a/tensorflow/lite/kernels/unidirectional_sequence_rnn_test.cc
+++ b/tensorflow/lite/kernels/unidirectional_sequence_rnn_test.cc
@@ -183,7 +183,7 @@ class UnidirectionalRNNOpModel : public SingleOpModel {
     weights_ = AddInput(weights);
     recurrent_weights_ = AddInput(recurrent_weights);
     bias_ = AddInput(TensorType_FLOAT32);
-    hidden_state_ = AddInput(TensorType_FLOAT32, true);
+    hidden_state_ = AddVariableInput(TensorType_FLOAT32);
     output_ = AddOutput(TensorType_FLOAT32);
     SetBuiltinOp(BuiltinOperator_UNIDIRECTIONAL_SEQUENCE_RNN,
                  BuiltinOptions_SequenceRNNOptions,