From b5633e6b5f78a897bb04fec5f20c2fc6c51f8752 Mon Sep 17 00:00:00 2001
From: ddavis-2015 <ddavis@bdti.com>
Date: Fri, 12 Feb 2021 02:29:06 -0800
Subject: [PATCH] micro: prepare to port operator ADD_N kernel from lite with
 test

Implement skeleton (non-working) code for operator and test.
Header files changed.
Namespaces changed.
Some original code deleted.
Some original code modified.

PR step 4 of the work to port operator ADD_N as tracked in Issue #46162
---
 tensorflow/lite/micro/kernels/add_n.cc      | 18 ++----
 tensorflow/lite/micro/kernels/add_n_test.cc | 70 ++++++---------------
 2 files changed, 24 insertions(+), 64 deletions(-)

diff --git a/tensorflow/lite/micro/kernels/add_n.cc b/tensorflow/lite/micro/kernels/add_n.cc
index 390d285f3bb..be0fd47fdd2 100644
--- a/tensorflow/lite/micro/kernels/add_n.cc
+++ b/tensorflow/lite/micro/kernels/add_n.cc
@@ -22,8 +22,9 @@ limitations under the License.
 
 namespace tflite {
 namespace ops {
-namespace builtin {
+namespace micro {
 namespace add_n {
+namespace {
 
 constexpr int kInputTensor1 = 0;
 constexpr int kOutputTensor = 0;
@@ -49,11 +50,7 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
     TF_LITE_ENSURE_TYPES_EQ(context, input1->type, input->type);
   }
 
-  // Use the first input node's dimension to be the dimension of the output
-  // node.
-  TfLiteIntArray* input1_dims = input1->dims;
-  TfLiteIntArray* output_dims = TfLiteIntArrayCopy(input1_dims);
-  return context->ResizeTensor(context, output, output_dims);
+  return kTfLiteError;
 }
 
 template <typename T>
@@ -88,14 +85,11 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
   return kTfLiteOk;
 }
 
+}  // namespace
 }  // namespace add_n
 
-TfLiteRegistration* Register_ADD_N() {
-  static TfLiteRegistration r = {/*init*/ nullptr, /*free*/ nullptr,
-                                 add_n::Prepare, add_n::Eval};
-  return &r;
-}
+TfLiteRegistration* Register_ADD_N() { return nullptr; }
 
-}  // namespace builtin
+}  // namespace micro
 }  // namespace ops
 }  // namespace tflite
diff --git a/tensorflow/lite/micro/kernels/add_n_test.cc b/tensorflow/lite/micro/kernels/add_n_test.cc
index 4b229b6563b..d09c32ceb79 100644
--- a/tensorflow/lite/micro/kernels/add_n_test.cc
+++ b/tensorflow/lite/micro/kernels/add_n_test.cc
@@ -12,59 +12,25 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 See the License for the specific language governing permissions and
 limitations under the License.
 ==============================================================================*/
-#include <stdint.h>
 
-#include <vector>
+#include <type_traits>
 
-#include "flatbuffers/flatbuffers.h"  // from @flatbuffers
-#include "tensorflow/lite/kernels/test_util.h"
-#include "tensorflow/lite/schema/schema_generated.h"
+#include "tensorflow/lite/c/builtin_op_data.h"
+#include "tensorflow/lite/c/common.h"
+#include "tensorflow/lite/micro/kernels/kernel_runner.h"
+#include "tensorflow/lite/micro/test_helpers.h"
+#include "tensorflow/lite/micro/testing/micro_test.h"
 
 namespace tflite {
-namespace {
+namespace testing {
+namespace {}  // namespace
+}  // namespace testing
+}  // namespace tflite
 
-using ::testing::ElementsAreArray;
+TF_LITE_MICRO_TESTS_BEGIN
 
-class BaseAddNOpModel : public SingleOpModel {
- public:
-  BaseAddNOpModel(const std::vector<TensorData>& inputs,
-                  const TensorData& output) {
-    int num_inputs = inputs.size();
-    std::vector<std::vector<int>> input_shapes;
-
-    for (int i = 0; i < num_inputs; ++i) {
-      inputs_.push_back(AddInput(inputs[i]));
-      input_shapes.push_back(GetShape(inputs_[i]));
-    }
-
-    output_ = AddOutput(output);
-    SetBuiltinOp(BuiltinOperator_ADD_N, BuiltinOptions_AddNOptions,
-                 CreateAddNOptions(builder_).Union());
-    BuildInterpreter(input_shapes);
-  }
-
-  int input(int i) { return inputs_[i]; }
-
- protected:
-  std::vector<int> inputs_;
-  int output_;
-};
-
-class FloatAddNOpModel : public BaseAddNOpModel {
- public:
-  using BaseAddNOpModel::BaseAddNOpModel;
-
-  std::vector<float> GetOutput() { return ExtractVector<float>(output_); }
-};
-
-class IntegerAddNOpModel : public BaseAddNOpModel {
- public:
-  using BaseAddNOpModel::BaseAddNOpModel;
-
-  std::vector<int32_t> GetOutput() { return ExtractVector<int32_t>(output_); }
-};
-
-TEST(FloatAddNOpModel, AddMultipleTensors) {
+TF_LITE_MICRO_TEST(FloatAddNOpAddMultipleTensors) {
+#ifdef notdef
   FloatAddNOpModel m({{TensorType_FLOAT32, {1, 2, 2, 1}},
                       {TensorType_FLOAT32, {1, 2, 2, 1}},
                       {TensorType_FLOAT32, {1, 2, 2, 1}}},
@@ -72,11 +38,12 @@ TEST(FloatAddNOpModel, AddMultipleTensors) {
   m.PopulateTensor<float>(m.input(0), {-2.0, 0.2, 0.7, 0.8});
   m.PopulateTensor<float>(m.input(1), {0.1, 0.2, 0.3, 0.5});
   m.PopulateTensor<float>(m.input(2), {0.5, 0.1, 0.1, 0.2});
-  m.Invoke();
   EXPECT_THAT(m.GetOutput(), ElementsAreArray({-1.4, 0.5, 1.1, 1.5}));
+#endif  // notdef
 }
 
-TEST(IntegerAddNOpModel, AddMultipleTensors) {
+TF_LITE_MICRO_TEST(IntegerAddNOpAddMultipleTensors) {
+#ifdef notdef
   IntegerAddNOpModel m({{TensorType_INT32, {1, 2, 2, 1}},
                         {TensorType_INT32, {1, 2, 2, 1}},
                         {TensorType_INT32, {1, 2, 2, 1}}},
@@ -84,9 +51,8 @@ TEST(IntegerAddNOpModel, AddMultipleTensors) {
   m.PopulateTensor<int32_t>(m.input(0), {-20, 2, 7, 8});
   m.PopulateTensor<int32_t>(m.input(1), {1, 2, 3, 5});
   m.PopulateTensor<int32_t>(m.input(2), {10, -5, 1, -2});
-  m.Invoke();
   EXPECT_THAT(m.GetOutput(), ElementsAreArray({-9, -1, 11, 11}));
+#endif  // notdef
 }
 
-}  // namespace
-}  // namespace tflite
+TF_LITE_MICRO_TESTS_END