From b6d13bb0a850347f9f885d38b7fbe47c46c494b1 Mon Sep 17 00:00:00 2001
From: Nick Kreeger <kreeger@google.com>
Date: Thu, 11 Jun 2020 16:24:01 -0700
Subject: [PATCH] Remove TF Micro tests that use the "name" field in
 TfLiteTensor.

The TFLM team is preparing to provide an "optimized" memory build option. This build option will eliminate non-needed/essential fields from core TFLite structs. The first big change is to reduce the number of pointers on TfLiteTensor. Many models have multiple tensors (e.g. benchmark keyword has 54) and each pointer adds up for TFLM. This cleanup pass removes the soon to be un-used 'name' field from TfLiteTensor.

PiperOrigin-RevId: 316000388
Change-Id: I230865014d5a59b78c1c1c9f5eda784f6d611e77
---
 .../lite/micro/benchmarks/conv_benchmark.cc   |  12 +-
 .../benchmarks/depthwise_conv_benchmark.cc    |  13 +-
 .../micro_speech/recognize_commands_test.cc   |  18 +-
 .../lite/micro/kernels/activations_test.cc    |  24 +--
 tensorflow/lite/micro/kernels/add_test.cc     |  21 ++-
 .../lite/micro/kernels/arg_min_max_test.cc    |  12 +-
 tensorflow/lite/micro/kernels/ceil_test.cc    |   4 +-
 .../micro/kernels/circular_buffer_test.cc     |   8 +-
 .../lite/micro/kernels/comparisons_test.cc    |  30 ++--
 .../lite/micro/kernels/concatenation.cc       |   4 +-
 .../lite/micro/kernels/concatenation_test.cc  |  15 +-
 tensorflow/lite/micro/kernels/conv_test.cc    |  68 ++++----
 .../lite/micro/kernels/depthwise_conv_test.cc |  78 ++++-----
 .../lite/micro/kernels/dequantize_test.cc     |   8 +-
 .../lite/micro/kernels/elementwise_test.cc    |   8 +-
 tensorflow/lite/micro/kernels/floor_test.cc   |   4 +-
 .../micro/kernels/fully_connected_test.cc     |  20 +--
 tensorflow/lite/micro/kernels/l2norm_test.cc  | 158 ++++++++----------
 tensorflow/lite/micro/kernels/logical_test.cc |   6 +-
 .../lite/micro/kernels/logistic_test.cc       |  10 +-
 .../micro/kernels/maximum_minimum_test.cc     |  24 +--
 tensorflow/lite/micro/kernels/mul_test.cc     |  15 +-
 tensorflow/lite/micro/kernels/neg_test.cc     |   4 +-
 tensorflow/lite/micro/kernels/pack_test.cc    |  26 +--
 tensorflow/lite/micro/kernels/pad_test.cc     |  29 ++--
 tensorflow/lite/micro/kernels/pooling_test.cc |  20 +--
 tensorflow/lite/micro/kernels/prelu_test.cc   |  15 +-
 .../lite/micro/kernels/quantize_test.cc       |  13 +-
 tensorflow/lite/micro/kernels/reduce_test.cc  |   6 +-
 tensorflow/lite/micro/kernels/reshape_test.cc |  26 ++-
 .../kernels/resize_nearest_neighbor_test.cc   |  22 +--
 tensorflow/lite/micro/kernels/round_test.cc   |   4 +-
 tensorflow/lite/micro/kernels/softmax_test.cc |  16 +-
 tensorflow/lite/micro/kernels/split_test.cc   |  40 ++---
 .../lite/micro/kernels/strided_slice_test.cc  |  22 +--
 tensorflow/lite/micro/kernels/sub_test.cc     |  21 ++-
 tensorflow/lite/micro/kernels/svdf_test.cc    |  27 ++-
 tensorflow/lite/micro/kernels/tanh_test.cc    |  10 +-
 tensorflow/lite/micro/kernels/unpack_test.cc  |  34 ++--
 tensorflow/lite/micro/micro_allocator.cc      |   3 -
 .../lite/micro/micro_optional_debug_tools.cc  |   7 +-
 tensorflow/lite/micro/test_helpers.cc         |  61 +++----
 tensorflow/lite/micro/test_helpers.h          |  24 ++-
 tensorflow/lite/micro/testing/test_utils.cc   |  58 +++----
 tensorflow/lite/micro/testing/test_utils.h    |  41 ++---
 tensorflow/lite/micro/testing_helpers_test.cc |  13 +-
 46 files changed, 472 insertions(+), 630 deletions(-)

diff --git a/tensorflow/lite/micro/benchmarks/conv_benchmark.cc b/tensorflow/lite/micro/benchmarks/conv_benchmark.cc
index fa1724e5c99..aef5813f6d1 100644
--- a/tensorflow/lite/micro/benchmarks/conv_benchmark.cc
+++ b/tensorflow/lite/micro/benchmarks/conv_benchmark.cc
@@ -160,8 +160,7 @@ int main() {
   // Create per-tensor quantized int8 input tensor.
   int8_t input_quantized[32];
   TfLiteTensor input_tensor = tflite::testing::CreateQuantizedTensor(
-      input_values, input_quantized, input_dims, input_scale, input_zero_point,
-      "input_tensor");
+      input_values, input_quantized, input_dims, input_scale, input_zero_point);
   // Set zero point and scale arrays with a single element for each.
   int input_zero_points[] = {1, input_zero_point};
   float input_scales[] = {1, input_scale};
@@ -174,7 +173,7 @@ int main() {
   int8_t filter_quantized[32 * 32];
   TfLiteTensor filter_tensor = tflite::testing::CreateQuantizedTensor(
       filter_values, filter_quantized, filter_dims, filter_scale,
-      filter_zero_point, "filter_tensor");
+      filter_zero_point);
   // Set zero point and scale arrays with a single element for each.
   int filter_zero_points[] = {1, filter_zero_point};
   float filter_scales[] = {1, filter_scale};
@@ -187,8 +186,8 @@ int main() {
   int32_t bias_quantized[32];
   tflite::SymmetricQuantize(bias_values, bias_quantized, 32,
                             input_scale * output_scale);
-  TfLiteTensor bias_tensor = tflite::testing::CreateInt32Tensor(
-      bias_quantized, bias_dims, "bias_tensor");
+  TfLiteTensor bias_tensor =
+      tflite::testing::CreateInt32Tensor(bias_quantized, bias_dims);
 
   // There is a single zero point of 0, and a single scale of
   // input_scale * filter_scale.
@@ -202,8 +201,7 @@ int main() {
   // Create per-tensor quantized int8 output tensor.
   int8_t output_quantized[32];
   TfLiteTensor output_tensor = tflite::testing::CreateQuantizedTensor(
-      output_quantized, output_dims, output_scale, output_zero_point,
-      "output_tensor");
+      output_quantized, output_dims, output_scale, output_zero_point);
   // Set zero point and scale arrays with a single element for each.
   int output_zero_points[] = {1, output_zero_point};
   float output_scales[] = {1, output_scale};
diff --git a/tensorflow/lite/micro/benchmarks/depthwise_conv_benchmark.cc b/tensorflow/lite/micro/benchmarks/depthwise_conv_benchmark.cc
index 4259424a42e..2098531dbfb 100644
--- a/tensorflow/lite/micro/benchmarks/depthwise_conv_benchmark.cc
+++ b/tensorflow/lite/micro/benchmarks/depthwise_conv_benchmark.cc
@@ -166,8 +166,7 @@ int main() {
   // Create per-tensor quantized int8 input tensor.
   int8_t input_quantized[input_elements];
   TfLiteTensor input_tensor = tflite::testing::CreateQuantizedTensor(
-      input_values, input_quantized, input_dims, input_scale, input_zero_point,
-      "input_tensor");
+      input_values, input_quantized, input_dims, input_scale, input_zero_point);
 
   // Set zero point and scale arrays with a single element for each.
   int input_zero_points[] = {1, input_zero_point};
@@ -180,8 +179,7 @@ int main() {
   // Create per-tensor quantized int8 filter tensor.
   int8_t filter_quantized[filter_elements];
   TfLiteTensor filter_tensor = tflite::testing::CreateQuantizedTensor(
-      filter_values, filter_quantized, filter_dims, filter_scale, 0,
-      "filter_tensor");
+      filter_values, filter_quantized, filter_dims, filter_scale, 0);
 
   // Set zero point and scale arrays with a single element for each.
   int filter_zero_points[] = {1, 0};
@@ -197,8 +195,8 @@ int main() {
   // detailed explanation of why bias scale is input_scale * filter_scale.
   tflite::SymmetricQuantize(bias_values, bias_quantized, bias_elements,
                             input_scale * output_scale);
-  TfLiteTensor bias_tensor = tflite::testing::CreateInt32Tensor(
-      bias_quantized, bias_dims, "bias_tensor");
+  TfLiteTensor bias_tensor =
+      tflite::testing::CreateInt32Tensor(bias_quantized, bias_dims);
 
   // Set zero point and scale arrays with a single element for each.
   int bias_zero_points[] = {1, 0};
@@ -211,8 +209,7 @@ int main() {
   // Create per-tensor quantized int8 output tensor.
   int8_t output_quantized[output_elements];
   TfLiteTensor output_tensor = tflite::testing::CreateQuantizedTensor(
-      output_quantized, output_dims, output_scale, output_zero_point,
-      "output_tensor");
+      output_quantized, output_dims, output_scale, output_zero_point);
 
   // Set zero point and scale arrays with a single element for each.
   int output_zero_points[] = {1, output_zero_point};
diff --git a/tensorflow/lite/micro/examples/micro_speech/recognize_commands_test.cc b/tensorflow/lite/micro/examples/micro_speech/recognize_commands_test.cc
index dcff73cf7ee..9ad20b68c8c 100644
--- a/tensorflow/lite/micro/examples/micro_speech/recognize_commands_test.cc
+++ b/tensorflow/lite/micro/examples/micro_speech/recognize_commands_test.cc
@@ -82,7 +82,7 @@ TF_LITE_MICRO_TEST(RecognizeCommandsTestBasic) {
   auto result_dims = {2, 1, 4};
   TfLiteTensor results = tflite::testing::CreateQuantizedTensor(
       result_data, tflite::testing::IntArrayFromInitializer(result_dims),
-      "input_tensor", -128.0f, 127.0f);
+      -128.0f, 127.0f);
 
   const char* found_command;
   uint8_t score;
@@ -101,8 +101,8 @@ TF_LITE_MICRO_TEST(RecognizeCommandsTestFindCommands) {
   std::initializer_list<int8_t> yes_data = {-128, -128, 127, -128};
   auto yes_dims = {2, 1, 4};
   TfLiteTensor yes_results = tflite::testing::CreateQuantizedTensor(
-      yes_data, tflite::testing::IntArrayFromInitializer(yes_dims),
-      "input_tensor", -128.0f, 127.0f);
+      yes_data, tflite::testing::IntArrayFromInitializer(yes_dims), -128.0f,
+      127.0f);
 
   bool has_found_new_command = false;
   const char* new_command;
@@ -129,8 +129,8 @@ TF_LITE_MICRO_TEST(RecognizeCommandsTestFindCommands) {
   std::initializer_list<int8_t> no_data = {-128, -128, -128, 127};
   auto no_dims = {2, 1, 4};
   TfLiteTensor no_results = tflite::testing::CreateQuantizedTensor(
-      no_data, tflite::testing::IntArrayFromInitializer(no_dims),
-      "input_tensor", -128.0f, 127.0f);
+      no_data, tflite::testing::IntArrayFromInitializer(no_dims), -128.0f,
+      127.0f);
   has_found_new_command = false;
   new_command = "";
   uint8_t score;
@@ -164,8 +164,8 @@ TF_LITE_MICRO_TEST(RecognizeCommandsTestBadInputLength) {
   std::initializer_list<int8_t> bad_data = {-128, -128, 127};
   auto bad_dims = {2, 1, 3};
   TfLiteTensor bad_results = tflite::testing::CreateQuantizedTensor(
-      bad_data, tflite::testing::IntArrayFromInitializer(bad_dims),
-      "input_tensor", -128.0f, 127.0f);
+      bad_data, tflite::testing::IntArrayFromInitializer(bad_dims), -128.0f,
+      127.0f);
 
   const char* found_command;
   uint8_t score;
@@ -185,7 +185,7 @@ TF_LITE_MICRO_TEST(RecognizeCommandsTestBadInputTimes) {
   auto result_dims = {2, 1, 4};
   TfLiteTensor results = tflite::testing::CreateQuantizedTensor(
       result_data, tflite::testing::IntArrayFromInitializer(result_dims),
-      "input_tensor", -128.0f, 127.0f);
+      -128.0f, 127.0f);
 
   const char* found_command;
   uint8_t score;
@@ -208,7 +208,7 @@ TF_LITE_MICRO_TEST(RecognizeCommandsTestTooFewInputs) {
   auto result_dims = {2, 1, 4};
   TfLiteTensor results = tflite::testing::CreateQuantizedTensor(
       result_data, tflite::testing::IntArrayFromInitializer(result_dims),
-      "input_tensor", -128.0f, 127.0f);
+      -128.0f, 127.0f);
 
   const char* found_command;
   uint8_t score;
diff --git a/tensorflow/lite/micro/kernels/activations_test.cc b/tensorflow/lite/micro/kernels/activations_test.cc
index 686139e51a5..221f8f66d58 100644
--- a/tensorflow/lite/micro/kernels/activations_test.cc
+++ b/tensorflow/lite/micro/kernels/activations_test.cc
@@ -34,8 +34,8 @@ void TestReluFloat(const int* input_dims_data, const float* input_data,
   constexpr int outputs_size = 1;
   constexpr int tensors_size = inputs_size + outputs_size;
   TfLiteTensor tensors[tensors_size] = {
-      CreateFloatTensor(input_data, input_dims, "input_tensor"),
-      CreateFloatTensor(output_data, output_dims, "output_tensor"),
+      CreateFloatTensor(input_data, input_dims),
+      CreateFloatTensor(output_data, output_dims),
   };
 
   TfLiteContext context;
@@ -90,8 +90,8 @@ void TestRelu6Float(const int* input_dims_data, const float* input_data,
   constexpr int outputs_size = 1;
   constexpr int tensors_size = inputs_size + outputs_size;
   TfLiteTensor tensors[tensors_size] = {
-      CreateFloatTensor(input_data, input_dims, "input_tensor"),
-      CreateFloatTensor(output_data, output_dims, "output_tensor"),
+      CreateFloatTensor(input_data, input_dims),
+      CreateFloatTensor(output_data, output_dims),
   };
 
   TfLiteContext context;
@@ -150,9 +150,9 @@ void TestReluUint8(const int* input_dims_data, const float* input_data,
   constexpr int tensors_size = inputs_size + outputs_size;
   TfLiteTensor tensors[tensors_size] = {
       CreateQuantizedTensor(input_data, input_data_quantized, input_dims,
-                            input_scale, input_zero_point, "input_tensor"),
+                            input_scale, input_zero_point),
       CreateQuantizedTensor(output_data, output_dims, output_scale,
-                            output_zero_point, "output_tensor"),
+                            output_zero_point),
   };
 
   TfLiteContext context;
@@ -215,9 +215,9 @@ void TestRelu6Uint8(const int* input_dims_data, const float* input_data,
   constexpr int tensors_size = inputs_size + outputs_size;
   TfLiteTensor tensors[tensors_size] = {
       CreateQuantizedTensor(input_data, input_data_quantized, input_dims,
-                            input_scale, input_zero_point, "input_tensor"),
+                            input_scale, input_zero_point),
       CreateQuantizedTensor(output_data, output_dims, output_scale,
-                            output_zero_point, "output_tensor"),
+                            output_zero_point),
   };
 
   TfLiteContext context;
@@ -279,9 +279,9 @@ void TestReluInt8(const int* input_dims_data, const float* input_data,
   constexpr int tensors_size = inputs_size + outputs_size;
   TfLiteTensor tensors[tensors_size] = {
       CreateQuantizedTensor(input_data, input_data_quantized, input_dims,
-                            input_scale, input_zero_point, "input_tensor"),
+                            input_scale, input_zero_point),
       CreateQuantizedTensor(output_data, output_dims, output_scale,
-                            output_zero_point, "output_tensor"),
+                            output_zero_point),
   };
 
   TfLiteContext context;
@@ -345,9 +345,9 @@ void TestRelu6Int8(const int* input_dims_data, const float* input_data,
   constexpr int tensors_size = inputs_size + outputs_size;
   TfLiteTensor tensors[tensors_size] = {
       CreateQuantizedTensor(input_data, input_data_quantized, input_dims,
-                            input_scale, input_zero_point, "input_tensor"),
+                            input_scale, input_zero_point),
       CreateQuantizedTensor(output_data, output_dims, output_scale,
-                            output_zero_point, "output_tensor"),
+                            output_zero_point),
   };
 
   TfLiteContext context;
diff --git a/tensorflow/lite/micro/kernels/add_test.cc b/tensorflow/lite/micro/kernels/add_test.cc
index d97739a345b..60164ab4746 100644
--- a/tensorflow/lite/micro/kernels/add_test.cc
+++ b/tensorflow/lite/micro/kernels/add_test.cc
@@ -129,9 +129,9 @@ void TestAddFloat(const int* input1_dims_data, const float* input1_data,
   constexpr int outputs_size = 1;
   constexpr int tensors_size = inputs_size + outputs_size;
   TfLiteTensor tensors[tensors_size] = {
-      CreateFloatTensor(input1_data, input1_dims, "input1_tensor"),
-      CreateFloatTensor(input2_data, input2_dims, "input2_tensor"),
-      CreateFloatTensor(output_data, output_dims, "output_tensor"),
+      CreateFloatTensor(input1_data, input1_dims),
+      CreateFloatTensor(input2_data, input2_dims),
+      CreateFloatTensor(output_data, output_dims),
   };
 
   ValidateAddGoldens(tensors, tensors_size, expected_output, output_data,
@@ -156,15 +156,14 @@ void TestAddQuantized(const int* input1_dims_data, const float* input1_data,
   constexpr int outputs_size = 1;
   constexpr int tensors_size = inputs_size + outputs_size;
   TfLiteTensor tensors[tensors_size] = {
-      tflite::testing::CreateQuantizedTensor(
-          input1_data, input1_quantized, input1_dims, input1_scale,
-          input1_zero_point, "input1_tensor"),
-      tflite::testing::CreateQuantizedTensor(
-          input2_data, input2_quantized, input2_dims, input2_scale,
-          input2_zero_point, "input2_tensor"),
+      tflite::testing::CreateQuantizedTensor(input1_data, input1_quantized,
+                                             input1_dims, input1_scale,
+                                             input1_zero_point),
+      tflite::testing::CreateQuantizedTensor(input2_data, input2_quantized,
+                                             input2_dims, input2_scale,
+                                             input2_zero_point),
       tflite::testing::CreateQuantizedTensor(output_data, output_dims,
-                                             output_scale, output_zero_point,
-                                             "output_tensor"),
+                                             output_scale, output_zero_point),
   };
   tflite::AsymmetricQuantize(golden, golden_quantized,
                              ElementCount(*output_dims), output_scale,
diff --git a/tensorflow/lite/micro/kernels/arg_min_max_test.cc b/tensorflow/lite/micro/kernels/arg_min_max_test.cc
index c5bbd537fc8..57e761f816d 100644
--- a/tensorflow/lite/micro/kernels/arg_min_max_test.cc
+++ b/tensorflow/lite/micro/kernels/arg_min_max_test.cc
@@ -83,9 +83,9 @@ void TestArgMinMaxFloat(const int* input_dims_data, const float* input_values,
   constexpr int outputs_size = 1;
   constexpr int tensors_size = inputs_size + outputs_size;
   TfLiteTensor tensors[tensors_size] = {
-      CreateFloatTensor(input_values, input_dims, "input_tensor"),
-      CreateInt32Tensor(axis_values, axis_dims, "axis_tensor"),
-      CreateInt32Tensor(output, output_dims, "output_tensor"),
+      CreateFloatTensor(input_values, input_dims),
+      CreateInt32Tensor(axis_values, axis_dims),
+      CreateInt32Tensor(output, output_dims),
   };
 
   ValidateArgMinMaxGoldens(tensors, tensors_size, goldens, output,
@@ -110,9 +110,9 @@ void TestArgMinMaxQuantized(const int* input_dims_data,
   constexpr int tensors_size = inputs_size + outputs_size;
   TfLiteTensor tensors[tensors_size] = {
       CreateQuantizedTensor(input_values, input_quantized, input_dims,
-                            input_scale, input_zero_point, "input_tensor"),
-      CreateInt32Tensor(axis_values, axis_dims, "axis_tensor"),
-      CreateInt32Tensor(output, output_dims, "output_tensor"),
+                            input_scale, input_zero_point),
+      CreateInt32Tensor(axis_values, axis_dims),
+      CreateInt32Tensor(output, output_dims),
   };
 
   ValidateArgMinMaxGoldens(tensors, tensors_size, goldens, output,
diff --git a/tensorflow/lite/micro/kernels/ceil_test.cc b/tensorflow/lite/micro/kernels/ceil_test.cc
index db876a37fcb..67161e01556 100644
--- a/tensorflow/lite/micro/kernels/ceil_test.cc
+++ b/tensorflow/lite/micro/kernels/ceil_test.cc
@@ -32,8 +32,8 @@ void TestCeil(const int* input_dims_data, const float* input_data,
   constexpr int outputs_size = 1;
   constexpr int tensors_size = inputs_size + outputs_size;
   TfLiteTensor tensors[tensors_size] = {
-      CreateFloatTensor(input_data, input_dims, "input_tensor"),
-      CreateFloatTensor(output_data, output_dims, "output_tensor"),
+      CreateFloatTensor(input_data, input_dims),
+      CreateFloatTensor(output_data, output_dims),
   };
   TfLiteContext context;
   PopulateContext(tensors, tensors_size, micro_test::reporter, &context);
diff --git a/tensorflow/lite/micro/kernels/circular_buffer_test.cc b/tensorflow/lite/micro/kernels/circular_buffer_test.cc
index cb123c50b19..c622f12ead2 100644
--- a/tensorflow/lite/micro/kernels/circular_buffer_test.cc
+++ b/tensorflow/lite/micro/kernels/circular_buffer_test.cc
@@ -44,8 +44,8 @@ TfLiteNode PrepareCircularBufferInt8(const int* input_dims_data,
   constexpr int outputs_size = 1;
   constexpr int tensors_size = inputs_size + outputs_size;
   TfLiteTensor tensors[tensors_size] = {
-      CreateQuantizedTensor(input_data, input_dims, 1, 0, "input_tensor"),
-      CreateQuantizedTensor(output_data, output_dims, 1, 0, "output_tensor"),
+      CreateQuantizedTensor(input_data, input_dims, 1, 0),
+      CreateQuantizedTensor(output_data, output_dims, 1, 0),
   };
   TfLiteContext context;
   PopulateContext(tensors, tensors_size, micro_test::reporter, &context);
@@ -92,8 +92,8 @@ TfLiteStatus InvokeCircularBufferInt8(const int* input_dims_data,
   constexpr int outputs_size = 1;
   constexpr int tensors_size = inputs_size + outputs_size;
   TfLiteTensor tensors[tensors_size] = {
-      CreateQuantizedTensor(input_data, input_dims, 1, 0, "input_tensor"),
-      CreateQuantizedTensor(output_data, output_dims, 1, 0, "output_tensor"),
+      CreateQuantizedTensor(input_data, input_dims, 1, 0),
+      CreateQuantizedTensor(output_data, output_dims, 1, 0),
   };
   TfLiteContext context;
   PopulateContext(tensors, tensors_size, micro_test::reporter, &context);
diff --git a/tensorflow/lite/micro/kernels/comparisons_test.cc b/tensorflow/lite/micro/kernels/comparisons_test.cc
index 198bb5c9d19..c57f60f3ddd 100644
--- a/tensorflow/lite/micro/kernels/comparisons_test.cc
+++ b/tensorflow/lite/micro/kernels/comparisons_test.cc
@@ -78,9 +78,9 @@ void TestComparisonFloat(tflite::BuiltinOperator op, int* input1_dims_data,
   TfLiteIntArray* output_dims = IntArrayFromInts(output_dims_data);
 
   TfLiteTensor tensors[tensors_size] = {
-      CreateFloatTensor(input1_data, input1_dims, "input1_tensor"),
-      CreateFloatTensor(input2_data, input2_dims, "input2_tensor"),
-      CreateBoolTensor(output_data, output_dims, "output_tensor"),
+      CreateFloatTensor(input1_data, input1_dims),
+      CreateFloatTensor(input2_data, input2_dims),
+      CreateBoolTensor(output_data, output_dims),
   };
 
   TestComparison(op, tensors, expected_output_data, output_data);
@@ -95,9 +95,9 @@ void TestComparisonBool(tflite::BuiltinOperator op, int* input1_dims_data,
   TfLiteIntArray* output_dims = IntArrayFromInts(output_dims_data);
 
   TfLiteTensor tensors[tensors_size] = {
-      CreateBoolTensor(input1_data, input1_dims, "input1_tensor"),
-      CreateBoolTensor(input2_data, input2_dims, "input2_tensor"),
-      CreateBoolTensor(output_data, output_dims, "output_tensor"),
+      CreateBoolTensor(input1_data, input1_dims),
+      CreateBoolTensor(input2_data, input2_dims),
+      CreateBoolTensor(output_data, output_dims),
   };
 
   TestComparison(op, tensors, expected_output_data, output_data);
@@ -112,9 +112,9 @@ void TestComparisonInt(tflite::BuiltinOperator op, int* input1_dims_data,
   TfLiteIntArray* output_dims = IntArrayFromInts(output_dims_data);
 
   TfLiteTensor tensors[tensors_size] = {
-      CreateInt32Tensor(input1_data, input1_dims, "input1_tensor"),
-      CreateInt32Tensor(input2_data, input2_dims, "input2_tensor"),
-      CreateBoolTensor(output_data, output_dims, "output_tensor"),
+      CreateInt32Tensor(input1_data, input1_dims),
+      CreateInt32Tensor(input2_data, input2_dims),
+      CreateBoolTensor(output_data, output_dims),
   };
 
   TestComparison(op, tensors, expected_output_data, output_data);
@@ -135,10 +135,10 @@ void TestComparisonQuantizedUInt8(tflite::BuiltinOperator op,
 
   TfLiteTensor tensors[tensors_size] = {
       CreateQuantizedTensor(input1_data, input1_quantized, input1_dims,
-                            input1_scale, input1_zero_point, "input1_tensor"),
+                            input1_scale, input1_zero_point),
       CreateQuantizedTensor(input2_data, input2_quantized, input2_dims,
-                            input2_scale, input2_zero_point, "input2_tensor"),
-      CreateBoolTensor(output_data, output_dims, "output_tensor"),
+                            input2_scale, input2_zero_point),
+      CreateBoolTensor(output_data, output_dims),
   };
 
   TestComparison(op, tensors, expected_output_data, output_data);
@@ -159,10 +159,10 @@ void TestComparisonQuantizedInt8(tflite::BuiltinOperator op,
 
   TfLiteTensor tensors[tensors_size] = {
       CreateQuantizedTensor(input1_data, input1_quantized, input1_dims,
-                            input1_scale, input1_zero_point, "input1_tensor"),
+                            input1_scale, input1_zero_point),
       CreateQuantizedTensor(input2_data, input2_quantized, input2_dims,
-                            input2_scale, input2_zero_point, "input2_tensor"),
-      CreateBoolTensor(output_data, output_dims, "output_tensor"),
+                            input2_scale, input2_zero_point),
+      CreateBoolTensor(output_data, output_dims),
   };
 
   TestComparison(op, tensors, expected_output_data, output_data);
diff --git a/tensorflow/lite/micro/kernels/concatenation.cc b/tensorflow/lite/micro/kernels/concatenation.cc
index add4b12619e..0d4ef35f26e 100644
--- a/tensorflow/lite/micro/kernels/concatenation.cc
+++ b/tensorflow/lite/micro/kernels/concatenation.cc
@@ -63,8 +63,8 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
       TF_LITE_KERNEL_LOG(
           context,
           "Op Concatenation does not currently support num dimensions >4 "
-          "Tensor '%s' has %d dimensions.",
-          input->name, num_dimensions);
+          "Tensor has %d dimensions.",
+          num_dimensions);
       return kTfLiteError;
     }
   }
diff --git a/tensorflow/lite/micro/kernels/concatenation_test.cc b/tensorflow/lite/micro/kernels/concatenation_test.cc
index feb79804981..5dc6a4ad669 100644
--- a/tensorflow/lite/micro/kernels/concatenation_test.cc
+++ b/tensorflow/lite/micro/kernels/concatenation_test.cc
@@ -40,9 +40,9 @@ void TestConcatenateTwoInputs(std::initializer_list<int> input1_dims_data,
   constexpr int output_size = 1;
   constexpr int tensors_size = input_size + output_size;
   TfLiteTensor tensors[tensors_size] = {
-      CreateFloatTensor(input1_data, input1_dims, "input1_tensor"),
-      CreateFloatTensor(input2_data, input2_dims, "input2_tensor"),
-      CreateFloatTensor(output_data, output_dims, "output_tensor")};
+      CreateFloatTensor(input1_data, input1_dims),
+      CreateFloatTensor(input2_data, input2_dims),
+      CreateFloatTensor(output_data, output_dims)};
 
   TfLiteContext context;
   PopulateContext(tensors, tensors_size, micro_test::reporter, &context);
@@ -99,12 +99,9 @@ void TestConcatenateQuantizedTwoInputs(
   constexpr int output_size = 1;
   constexpr int tensors_size = input_size + output_size;
   TfLiteTensor tensors[tensors_size] = {
-      CreateQuantizedTensor(input1_data, input1_dims, "input1_tensor",
-                            input_min, input_max),
-      CreateQuantizedTensor(input2_data, input2_dims, "input2_tensor",
-                            input_min, input_max),
-      CreateQuantizedTensor(output_data, output_dims, "output_tensor",
-                            output_min, output_max)};
+      CreateQuantizedTensor(input1_data, input1_dims, input_min, input_max),
+      CreateQuantizedTensor(input2_data, input2_dims, input_min, input_max),
+      CreateQuantizedTensor(output_data, output_dims, output_min, output_max)};
 
   TfLiteContext context;
   PopulateContext(tensors, tensors_size, micro_test::reporter, &context);
diff --git a/tensorflow/lite/micro/kernels/conv_test.cc b/tensorflow/lite/micro/kernels/conv_test.cc
index a8e7b12ac99..7be3a1e6f70 100644
--- a/tensorflow/lite/micro/kernels/conv_test.cc
+++ b/tensorflow/lite/micro/kernels/conv_test.cc
@@ -123,10 +123,10 @@ void TestConvFloat(const int* input_dims_data, const float* input_data,
   constexpr int outputs_size = 1;
   constexpr int tensors_size = inputs_size + outputs_size;
   TfLiteTensor tensors[tensors_size] = {
-      CreateFloatTensor(input_data, input_dims, "input_tensor"),
-      CreateFloatTensor(filter_data, filter_dims, "filter_tensor"),
-      CreateFloatTensor(bias_data, bias_dims, "bias_tensor"),
-      CreateFloatTensor(output_data, output_dims, "output_tensor"),
+      CreateFloatTensor(input_data, input_dims),
+      CreateFloatTensor(filter_data, filter_dims),
+      CreateFloatTensor(bias_data, bias_dims),
+      CreateFloatTensor(output_data, output_dims),
   };
 
   TF_LITE_MICRO_EXPECT_EQ(
@@ -157,13 +157,12 @@ void TestConvQuantizedPerLayer(
   constexpr int tensors_size = inputs_size + outputs_size;
   TfLiteTensor tensors[tensors_size] = {
       CreateQuantizedTensor(input_data, input_quantized, input_dims,
-                            input_scale, 128, "input_tensor"),
+                            input_scale, 128),
       CreateQuantizedTensor(filter_data, filter_quantized, filter_dims,
-                            filter_scale, 128, "filter_tensor"),
+                            filter_scale, 128),
       CreateQuantizedBiasTensor(bias_data, bias_quantized, bias_dims,
-                                input_scale, filter_scale, "bias_tensor"),
-      CreateQuantizedTensor(output_data, output_dims, output_scale, 128,
-                            "output_tensor")};
+                                input_scale, filter_scale),
+      CreateQuantizedTensor(output_data, output_dims, output_scale, 128)};
 
   // TODO(njeff): Affine Quantization Params should be set on tensor creation.
   float filter_scales[] = {1, filter_scale};
@@ -199,20 +198,16 @@ void TestConvQuantizedPerChannel(
   float filter_scales[5];
   TfLiteAffineQuantization filter_quant;
   TfLiteAffineQuantization bias_quant;
-  TfLiteTensor input_tensor =
-      CreateQuantizedTensor(input_data, input_quantized, input_dims,
-                            input_scale, input_zero_point, "input_tensor");
+  TfLiteTensor input_tensor = CreateQuantizedTensor(
+      input_data, input_quantized, input_dims, input_scale, input_zero_point);
   TfLiteTensor filter_tensor = CreateSymmetricPerChannelQuantizedTensor(
       filter_data, filter_data_quantized, filter_dims, filter_scales,
-      filter_zero_points, &filter_quant, 0 /* quantized dimension */,
-      "filter_tensor");
+      filter_zero_points, &filter_quant, 0 /* quantized dimension */);
   TfLiteTensor bias_tensor = CreatePerChannelQuantizedBiasTensor(
       bias_data, bias_data_quantized, bias_dims, input_scale, &filter_scales[1],
-      bias_scales, bias_zero_points, &bias_quant, 0 /* quantized dimension */,
-      "bias_tensor");
-  TfLiteTensor output_tensor =
-      CreateQuantizedTensor(output_data, output_dims, output_scale,
-                            output_zero_point, "output_tensor");
+      bias_scales, bias_zero_points, &bias_quant, 0 /* quantized dimension */);
+  TfLiteTensor output_tensor = CreateQuantizedTensor(
+      output_data, output_dims, output_scale, output_zero_point);
 
   // TODO(njeff): Affine Quantization Params should be set on tensor creation.
   float input_scales[] = {1, input_scale};
@@ -479,21 +474,18 @@ TF_LITE_MICRO_TEST(FilterDimsNotMatchingAffineQuantization) {
   TfLiteAffineQuantization filter_quant;
   TfLiteAffineQuantization bias_quant;
   TfLiteTensor input_tensor = tflite::testing::CreateQuantizedTensor(
-      tflite::testing::kInputData, input_quantized, input_dims, input_scale, 0,
-      "input_tensor");
+      tflite::testing::kInputData, input_quantized, input_dims, input_scale, 0);
   TfLiteTensor filter_tensor =
       tflite::testing::CreateSymmetricPerChannelQuantizedTensor(
           tflite::testing::kFilterData, filter_quantized, filter_dims,
           filter_scales, filter_zero_points, &filter_quant,
-          0 /* quantized dimension */, "filter_tensor");
+          0 /* quantized dimension */);
   TfLiteTensor bias_tensor =
       tflite::testing::CreatePerChannelQuantizedBiasTensor(
           tflite::testing::kBiasData, bias_quantized, bias_dims, input_scale,
-          &filter_scales[1], scales, zero_points, &bias_quant, 0,
-          "bias_tensor");
+          &filter_scales[1], scales, zero_points, &bias_quant, 0);
   TfLiteTensor output_tensor = tflite::testing::CreateQuantizedTensor(
-      output_data, output_dims, output_scale, 0 /* quantized dimension */,
-      "output_tensor");
+      output_data, output_dims, output_scale, 0 /* quantized dimension */);
 
   float input_scales[] = {1, input_scale};
   int input_zero_points[] = {1, 128};
@@ -562,8 +554,7 @@ TF_LITE_MICRO_TEST(BroadcastPerLayerQuantizationToPerChannelShouldMatchGolden) {
 
   // Create per-layer quantized int8 input tensor.
   TfLiteTensor input_tensor = tflite::testing::CreateQuantizedTensor(
-      tflite::testing::kInputData, input_quantized, input_dims, input_scale, 0,
-      "input_tensor");
+      tflite::testing::kInputData, input_quantized, input_dims, input_scale, 0);
   int input_zero_points[2] = {1, 0};
   float input_scales[2] = {1, input_scale};
   TfLiteAffineQuantization input_quant = {
@@ -574,7 +565,7 @@ TF_LITE_MICRO_TEST(BroadcastPerLayerQuantizationToPerChannelShouldMatchGolden) {
   // Create per-layer quantized int8 filter tensor.
   TfLiteTensor filter_tensor = tflite::testing::CreateQuantizedTensor(
       tflite::testing::kFilterData, filter_quantized, filter_dims, filter_scale,
-      0, "filter_tensor");
+      0);
   int filter_zero_points[2] = {1, 0};
   float filter_scales[2] = {1, filter_scale};
   TfLiteAffineQuantization filter_quant = {
@@ -586,8 +577,8 @@ TF_LITE_MICRO_TEST(BroadcastPerLayerQuantizationToPerChannelShouldMatchGolden) {
   tflite::SymmetricQuantize(tflite::testing::kBiasData, bias_quantized,
                             tflite::testing::kBiasElements,
                             input_scale * output_scale);
-  TfLiteTensor bias_tensor = tflite::testing::CreateInt32Tensor(
-      bias_quantized, bias_dims, "bias_tensor");
+  TfLiteTensor bias_tensor =
+      tflite::testing::CreateInt32Tensor(bias_quantized, bias_dims);
 
   int bias_zero_points[2] = {1, 0};
   float bias_scales[2] = {1, input_scale * filter_scale};
@@ -598,8 +589,7 @@ TF_LITE_MICRO_TEST(BroadcastPerLayerQuantizationToPerChannelShouldMatchGolden) {
 
   // Create per-layer quantized int8 output tensor.
   TfLiteTensor output_tensor = tflite::testing::CreateQuantizedTensor(
-      output_data, output_dims, output_scale, 0 /* quantized dimension */,
-      "output_tensor");
+      output_data, output_dims, output_scale, 0 /* quantized dimension */);
   int output_zero_points[2] = {1, 0};
   float output_scales[2] = {1, output_scale};
   TfLiteAffineQuantization output_quant = {
@@ -687,8 +677,7 @@ TF_LITE_MICRO_TEST(Int8Input32x1Filter32x32ShouldMatchGolden) {
   // Create per-tensor quantized int8 input tensor.
   int8_t input_quantized[kSampleSize];
   TfLiteTensor input_tensor = tflite::testing::CreateQuantizedTensor(
-      input_values, input_quantized, input_dims, input_scale, input_zero_point,
-      "input_tensor");
+      input_values, input_quantized, input_dims, input_scale, input_zero_point);
   // Set zero point and scale arrays with a single element for each.
   int input_zero_points[] = {1, input_zero_point};
   float input_scales[] = {1, input_scale};
@@ -701,7 +690,7 @@ TF_LITE_MICRO_TEST(Int8Input32x1Filter32x32ShouldMatchGolden) {
   int8_t filter_quantized[kNumFilters * kSampleSize];
   TfLiteTensor filter_tensor = tflite::testing::CreateQuantizedTensor(
       filter_values, filter_quantized, filter_dims, filter_scale,
-      filter_zero_point, "filter_tensor");
+      filter_zero_point);
   // Set zero point and scale arrays with a single element for each.
   int filter_zero_points[] = {1, filter_zero_point};
   float filter_scales[] = {1, filter_scale};
@@ -714,8 +703,8 @@ TF_LITE_MICRO_TEST(Int8Input32x1Filter32x32ShouldMatchGolden) {
   int32_t bias_quantized[kSampleSize];
   tflite::SymmetricQuantize(bias_values, bias_quantized, kSampleSize,
                             input_scale * output_scale);
-  TfLiteTensor bias_tensor = tflite::testing::CreateInt32Tensor(
-      bias_quantized, bias_dims, "bias_tensor");
+  TfLiteTensor bias_tensor =
+      tflite::testing::CreateInt32Tensor(bias_quantized, bias_dims);
 
   // There is a single zero point of 0, and a single scale of
   // input_scale * filter_scale.
@@ -729,8 +718,7 @@ TF_LITE_MICRO_TEST(Int8Input32x1Filter32x32ShouldMatchGolden) {
   // Create per-tensor quantized int8 output tensor.
   int8_t output_quantized[kSampleSize];
   TfLiteTensor output_tensor = tflite::testing::CreateQuantizedTensor(
-      output_quantized, output_dims, output_scale, output_zero_point,
-      "output_tensor");
+      output_quantized, output_dims, output_scale, output_zero_point);
   // Set zero point and scale arrays with a single element for each.
   int output_zero_points[] = {1, output_zero_point};
   float output_scales[] = {1, output_scale};
diff --git a/tensorflow/lite/micro/kernels/depthwise_conv_test.cc b/tensorflow/lite/micro/kernels/depthwise_conv_test.cc
index b696cea22b0..e2c6c71dfae 100644
--- a/tensorflow/lite/micro/kernels/depthwise_conv_test.cc
+++ b/tensorflow/lite/micro/kernels/depthwise_conv_test.cc
@@ -121,10 +121,10 @@ void TestDepthwiseConvFloat(const int* input_dims_data, const float* input_data,
   constexpr int outputs_size = 1;
   constexpr int tensors_size = inputs_size + outputs_size;
   TfLiteTensor tensors[tensors_size] = {
-      CreateFloatTensor(input_data, input_dims, "input_tensor"),
-      CreateFloatTensor(filter_data, filter_dims, "filter_tensor"),
-      CreateFloatTensor(bias_data, bias_dims, "bias_tensor"),
-      CreateFloatTensor(output_data, output_dims, "output_tensor"),
+      CreateFloatTensor(input_data, input_dims),
+      CreateFloatTensor(filter_data, filter_dims),
+      CreateFloatTensor(bias_data, bias_dims),
+      CreateFloatTensor(output_data, output_dims),
   };
 
   ValidateDepthwiseConvGoldens(expected_output_data, output_dims_count,
@@ -152,16 +152,14 @@ void TestDepthwiseConvQuantizedPerLayer(
   TfLiteTensor tensors[tensors_size] = {
       tflite::testing::CreateQuantizedTensor(input_data, input_quantized,
                                              input_dims, input_scale,
-                                             input_zero_point, "input_tensor"),
-      tflite::testing::CreateQuantizedTensor(
-          filter_data, filter_quantized, filter_dims, filter_scale,
-          filter_zero_point, "filter_tensor"),
-      tflite::testing::CreateQuantizedBiasTensor(bias_data, bias_quantized,
-                                                 bias_dims, input_scale,
-                                                 filter_scale, "bias_tensor"),
+                                             input_zero_point),
+      tflite::testing::CreateQuantizedTensor(filter_data, filter_quantized,
+                                             filter_dims, filter_scale,
+                                             filter_zero_point),
+      tflite::testing::CreateQuantizedBiasTensor(
+          bias_data, bias_quantized, bias_dims, input_scale, filter_scale),
       tflite::testing::CreateQuantizedTensor(output_data, output_dims,
-                                             output_scale, output_zero_point,
-                                             "output_tensor"),
+                                             output_scale, output_zero_point),
   };
 
   // TODO(njeff): Affine Quantization Params should be set on tensor creation.
@@ -206,20 +204,18 @@ void TestDepthwiseConvQuantizedPerChannel(
   float bias_scales[kMaxBiasChannels];
   TfLiteAffineQuantization filter_quant;
   TfLiteAffineQuantization bias_quant;
-  TfLiteTensor input_tensor =
-      CreateQuantizedTensor(input_data, input_quantized, input_dims,
-                            input_scale, input_zero_point, "input_tensor");
+  TfLiteTensor input_tensor = CreateQuantizedTensor(
+      input_data, input_quantized, input_dims, input_scale, input_zero_point);
   TfLiteTensor filter_tensor = CreateSymmetricPerChannelQuantizedTensor(
       filter_data, filter_data_quantized, filter_dims, filter_scales,
-      filter_zero_points, &filter_quant, 3 /* quantized dimension */,
-      "filter_tensor");
+      filter_zero_points, &filter_quant, 3 /* quantized dimension */
+  );
   TfLiteTensor bias_tensor = CreatePerChannelQuantizedBiasTensor(
       bias_data, bias_data_quantized, bias_dims, input_scale, &filter_scales[1],
-      bias_scales, bias_zero_points, &bias_quant, 3 /* quantized dimension */,
-      "bias_tensor");
-  TfLiteTensor output_tensor =
-      CreateQuantizedTensor(output_data, output_dims, output_scale,
-                            input_zero_point, "output_tensor");
+      bias_scales, bias_zero_points, &bias_quant, 3 /* quantized dimension */
+  );
+  TfLiteTensor output_tensor = CreateQuantizedTensor(
+      output_data, output_dims, output_scale, input_zero_point);
 
   // TODO(njeff): Affine Quantization Params should be set on tensor creation.
   float input_scales[] = {1, input_scale};
@@ -615,20 +611,17 @@ TF_LITE_MICRO_TEST(FilterDimsNotMatchingAffineQuantization) {
   TfLiteAffineQuantization filter_quant;
   TfLiteAffineQuantization bias_quant;
   TfLiteTensor input_tensor = tflite::testing::CreateQuantizedTensor(
-      input_data, input_quantized, input_dims, input_scale, input_zero_point,
-      "input_tensor");
+      input_data, input_quantized, input_dims, input_scale, input_zero_point);
   TfLiteTensor filter_tensor =
       tflite::testing::CreateSymmetricPerChannelQuantizedTensor(
           filter_data, filter_quantized, filter_dims, filter_scales,
-          filter_zero_points, &filter_quant, 0 /* quantized dimension */,
-          "filter_tensor");
+          filter_zero_points, &filter_quant, 0 /* quantized dimension */);
   TfLiteTensor bias_tensor =
       tflite::testing::CreatePerChannelQuantizedBiasTensor(
           bias_data, bias_quantized, bias_dims, input_scale, &filter_scales[1],
-          scales, zero_points, &bias_quant, 0, "bias_tensor");
+          scales, zero_points, &bias_quant, 0);
   TfLiteTensor output_tensor = tflite::testing::CreateQuantizedTensor(
-      output_data, output_dims, output_scale, output_zero_point,
-      "output_tensor");
+      output_data, output_dims, output_scale, output_zero_point);
 
   float input_scales[] = {1, input_scale};
   int input_zero_points[] = {1, input_zero_point};
@@ -700,8 +693,7 @@ TF_LITE_MICRO_TEST(PerChannelBroadcastQuantizationParams) {
 
   // Create per-layer quantized int8 input tensor.
   TfLiteTensor input_tensor = tflite::testing::CreateQuantizedTensor(
-      input_values, input_quantized, input_dims, input_scale, 0,
-      "input_tensor");
+      input_values, input_quantized, input_dims, input_scale, 0);
   int input_zero_points[2] = {1, 0};
   float input_scales[2] = {1, input_scale};
   TfLiteAffineQuantization input_quant = {
@@ -711,8 +703,7 @@ TF_LITE_MICRO_TEST(PerChannelBroadcastQuantizationParams) {
 
   // Create per-layer quantized int8 filter tensor.
   TfLiteTensor filter_tensor = tflite::testing::CreateQuantizedTensor(
-      filter_values, filter_quantized, filter_dims, filter_scale, 0,
-      "filter_tensor");
+      filter_values, filter_quantized, filter_dims, filter_scale, 0);
   int filter_zero_points[2] = {1, 0};
   float filter_scales[2] = {1, filter_scale};
   TfLiteAffineQuantization filter_quant = {
@@ -723,8 +714,8 @@ TF_LITE_MICRO_TEST(PerChannelBroadcastQuantizationParams) {
   // Create per-layer quantized int32 bias tensor.
   tflite::SymmetricQuantize(bias_values, bias_quantized, bias_elements,
                             input_scale * output_scale);
-  TfLiteTensor bias_tensor = tflite::testing::CreateInt32Tensor(
-      bias_quantized, bias_dims, "bias_tensor");
+  TfLiteTensor bias_tensor =
+      tflite::testing::CreateInt32Tensor(bias_quantized, bias_dims);
 
   int bias_zero_points[2] = {1, 0};
   float bias_scales[2] = {1, input_scale * filter_scale};
@@ -735,7 +726,7 @@ TF_LITE_MICRO_TEST(PerChannelBroadcastQuantizationParams) {
 
   // Create per-layer quantized int8 output tensor.
   TfLiteTensor output_tensor = tflite::testing::CreateQuantizedTensor(
-      output_data, output_dims, output_scale, 0, "output_tensor");
+      output_data, output_dims, output_scale, 0);
   int output_zero_points[2] = {1, 0};
   float output_scales[2] = {1, output_scale};
   TfLiteAffineQuantization output_quant = {
@@ -833,8 +824,7 @@ TF_LITE_MICRO_TEST(Int8Input32x4Filter32x4ShouldMatchGolden) {
   // Create per-tensor quantized int8 input tensor.
   int8_t input_quantized[input_elements];
   TfLiteTensor input_tensor = tflite::testing::CreateQuantizedTensor(
-      input_values, input_quantized, input_dims, input_scale, input_zero_point,
-      "input_tensor");
+      input_values, input_quantized, input_dims, input_scale, input_zero_point);
 
   // Set zero point and scale arrays with a single element for each.
   int input_zero_points[] = {1, input_zero_point};
@@ -847,8 +837,7 @@ TF_LITE_MICRO_TEST(Int8Input32x4Filter32x4ShouldMatchGolden) {
   // Create per-tensor quantized int8 filter tensor.
   int8_t filter_quantized[filter_elements];
   TfLiteTensor filter_tensor = tflite::testing::CreateQuantizedTensor(
-      filter_values, filter_quantized, filter_dims, filter_scale, 0,
-      "filter_tensor");
+      filter_values, filter_quantized, filter_dims, filter_scale, 0);
 
   // Set zero point and scale arrays with a single element for each.
   int filter_zero_points[] = {1, 0};
@@ -864,8 +853,8 @@ TF_LITE_MICRO_TEST(Int8Input32x4Filter32x4ShouldMatchGolden) {
   // detailed explanation of why bias scale is input_scale * filter_scale.
   tflite::SymmetricQuantize(bias_values, bias_quantized, bias_elements,
                             input_scale * output_scale);
-  TfLiteTensor bias_tensor = tflite::testing::CreateInt32Tensor(
-      bias_quantized, bias_dims, "bias_tensor");
+  TfLiteTensor bias_tensor =
+      tflite::testing::CreateInt32Tensor(bias_quantized, bias_dims);
 
   // Set zero point and scale arrays with a single element for each.
   int bias_zero_points[] = {1, 0};
@@ -878,8 +867,7 @@ TF_LITE_MICRO_TEST(Int8Input32x4Filter32x4ShouldMatchGolden) {
   // Create per-tensor quantized int8 output tensor.
   int8_t output_quantized[output_elements];
   TfLiteTensor output_tensor = tflite::testing::CreateQuantizedTensor(
-      output_quantized, output_dims, output_scale, output_zero_point,
-      "output_tensor");
+      output_quantized, output_dims, output_scale, output_zero_point);
 
   // Set zero point and scale arrays with a single element for each.
   int output_zero_points[] = {1, output_zero_point};
diff --git a/tensorflow/lite/micro/kernels/dequantize_test.cc b/tensorflow/lite/micro/kernels/dequantize_test.cc
index c51c48cc23f..7faf21d00f2 100644
--- a/tensorflow/lite/micro/kernels/dequantize_test.cc
+++ b/tensorflow/lite/micro/kernels/dequantize_test.cc
@@ -90,8 +90,8 @@ void TestDequantizeToFloat(const int* input_dims_data, const float* input_data,
   const int tensors_size = 2;
   TfLiteTensor tensors[tensors_size] = {
       CreateQuantizedTensor(input_data, input_data_quantized, input_dims, scale,
-                            zero_point, "input_tensor"),
-      CreateFloatTensor(output_data, output_dims, "output_tensor"),
+                            zero_point),
+      CreateFloatTensor(output_data, output_dims),
   };
 
   ValidateDequantizeGoldens(tensors, tensors_size, expected_output_data,
@@ -113,8 +113,8 @@ void TestDequantizeToInt32(const int* input_dims_data, const float* input_data,
   const int tensors_size = 2;
   TfLiteTensor tensors[tensors_size] = {
       CreateQuantizedTensor(input_data, input_data_quantized, input_dims,
-                            input_scale, input_zero_point, "input_tensor"),
-      CreateInt32Tensor(output_data, output_dims, "output_tensor"),
+                            input_scale, input_zero_point),
+      CreateInt32Tensor(output_data, output_dims),
   };
 
   TfLiteQuantizationParams output_quant;
diff --git a/tensorflow/lite/micro/kernels/elementwise_test.cc b/tensorflow/lite/micro/kernels/elementwise_test.cc
index 4086b91b0a9..a201d2cae04 100644
--- a/tensorflow/lite/micro/kernels/elementwise_test.cc
+++ b/tensorflow/lite/micro/kernels/elementwise_test.cc
@@ -36,8 +36,8 @@ void TestElementwiseFloat(tflite::BuiltinOperator op,
   constexpr int output_size = 1;
   constexpr int tensors_size = input_size + output_size;
   TfLiteTensor tensors[tensors_size] = {
-      CreateFloatTensor(input_data, input_dims, "input_tensor"),
-      CreateFloatTensor(output_data, output_dims, "output_tensor")};
+      CreateFloatTensor(input_data, input_dims),
+      CreateFloatTensor(output_data, output_dims)};
 
   // Place a unique value in the uninitialized output buffer.
   for (int i = 0; i < output_dims_count; ++i) {
@@ -101,8 +101,8 @@ void TestElementwiseBool(tflite::BuiltinOperator op,
   constexpr int output_size = 1;
   constexpr int tensors_size = input_size + output_size;
   TfLiteTensor tensors[tensors_size] = {
-      CreateBoolTensor(input_data, input_dims, "input_tensor"),
-      CreateBoolTensor(output_data, output_dims, "output_tensor")};
+      CreateBoolTensor(input_data, input_dims),
+      CreateBoolTensor(output_data, output_dims)};
 
   // Place false in the uninitialized output buffer.
   for (int i = 0; i < output_dims_count; ++i) {
diff --git a/tensorflow/lite/micro/kernels/floor_test.cc b/tensorflow/lite/micro/kernels/floor_test.cc
index e6f477bf44d..d841c7c39f3 100644
--- a/tensorflow/lite/micro/kernels/floor_test.cc
+++ b/tensorflow/lite/micro/kernels/floor_test.cc
@@ -33,8 +33,8 @@ void TestFloor(const int* input_dims_data, const float* input_data,
   constexpr int outputs_size = 1;
   constexpr int tensors_size = inputs_size + outputs_size;
   TfLiteTensor tensors[tensors_size] = {
-      CreateFloatTensor(input_data, input_dims, "input_tensor"),
-      CreateFloatTensor(output_data, output_dims, "output_tensor"),
+      CreateFloatTensor(input_data, input_dims),
+      CreateFloatTensor(output_data, output_dims),
   };
   TfLiteContext context;
   PopulateContext(tensors, tensors_size, micro_test::reporter, &context);
diff --git a/tensorflow/lite/micro/kernels/fully_connected_test.cc b/tensorflow/lite/micro/kernels/fully_connected_test.cc
index b3066be5eb0..121c58c9150 100644
--- a/tensorflow/lite/micro/kernels/fully_connected_test.cc
+++ b/tensorflow/lite/micro/kernels/fully_connected_test.cc
@@ -44,10 +44,10 @@ TfLiteStatus TestFullyConnectedFloat(
   constexpr int outputs_size = 1;
   constexpr int tensors_size = inputs_size + outputs_size;
   TfLiteTensor tensors[tensors_size] = {
-      CreateFloatTensor(input_data, input_dims, "input_tensor"),
-      CreateFloatTensor(weights_data, weights_dims, "weights_tensor"),
-      CreateFloatTensor(bias_data, bias_dims, "bias_tensor"),
-      CreateFloatTensor(output_data, output_dims, "output_tensor"),
+      CreateFloatTensor(input_data, input_dims),
+      CreateFloatTensor(weights_data, weights_dims),
+      CreateFloatTensor(bias_data, bias_dims),
+      CreateFloatTensor(output_data, output_dims),
   };
 
   TfLiteContext context;
@@ -121,13 +121,11 @@ TfLiteStatus TestFullyConnectedQuantized(
   constexpr int outputs_size = 1;
   constexpr int tensors_size = inputs_size + outputs_size;
   TfLiteTensor tensors[tensors_size] = {
-      CreateQuantizedTensor(input_data, input_dims, "input_tensor", input_min,
-                            input_max),
-      CreateQuantizedTensor(weights_data, weights_dims, "weights_tensor",
-                            weights_min, weights_max),
-      CreateQuantized32Tensor(bias_data, bias_dims, "bias_tensor", bias_scale),
-      CreateQuantizedTensor(output_data, output_dims, "output_tensor",
-                            output_min, output_max),
+      CreateQuantizedTensor(input_data, input_dims, input_min, input_max),
+      CreateQuantizedTensor(weights_data, weights_dims, weights_min,
+                            weights_max),
+      CreateQuantized32Tensor(bias_data, bias_dims, bias_scale),
+      CreateQuantizedTensor(output_data, output_dims, output_min, output_max),
   };
 
   TfLiteContext context;
diff --git a/tensorflow/lite/micro/kernels/l2norm_test.cc b/tensorflow/lite/micro/kernels/l2norm_test.cc
index 9ad3481c582..e4c679a6c2f 100644
--- a/tensorflow/lite/micro/kernels/l2norm_test.cc
+++ b/tensorflow/lite/micro/kernels/l2norm_test.cc
@@ -19,7 +19,6 @@ limitations under the License.
 #include "tensorflow/lite/micro/testing/micro_test.h"
 #include "tensorflow/lite/micro/testing/test_utils.h"
 
-
 namespace tflite {
 namespace testing {
 namespace {
@@ -30,38 +29,35 @@ constexpr float kInputMax = 2.0;
 constexpr float kOutputMin = -1.0;
 constexpr float kOutputMax = 127.0 / 128.0;
 
-
 void QuantizeInputData(const float input_data[], int length,
                        uint8_t* quantized_data) {
-  for (int i=0; i < 6; i++) {
-    quantized_data[i] = tflite::testing::F2Q(input_data[i],
-                                             tflite::testing::kInputMin,
-                                             tflite::testing::kInputMax);
+  for (int i = 0; i < 6; i++) {
+    quantized_data[i] = tflite::testing::F2Q(
+        input_data[i], tflite::testing::kInputMin, tflite::testing::kInputMax);
   }
 }
 
 void QuantizeInputData(const float input_data[], int length,
                        int8_t* quantized_data) {
-  for (int i=0; i < 6; i++) {
-    quantized_data[i] = tflite::testing::F2QS(input_data[i],
-                                             tflite::testing::kInputMin,
-                                             tflite::testing::kInputMax);
+  for (int i = 0; i < 6; i++) {
+    quantized_data[i] = tflite::testing::F2QS(
+        input_data[i], tflite::testing::kInputMin, tflite::testing::kInputMax);
   }
 }
 
 TfLiteTensor CreateL2NormTensor(const float* data, TfLiteIntArray* dims,
-                              const char* name, bool is_input) {
-  return CreateFloatTensor(data, dims, name);
+                                bool is_input) {
+  return CreateFloatTensor(data, dims);
 }
 
 TfLiteTensor CreateL2NormTensor(const uint8* data, TfLiteIntArray* dims,
-                              const char* name, bool is_input) {
+                                bool is_input) {
   TfLiteTensor tensor;
 
   if (is_input) {
-    tensor = CreateQuantizedTensor(data, dims, name, kInputMin, kInputMax);
+    tensor = CreateQuantizedTensor(data, dims, kInputMin, kInputMax);
   } else {
-    tensor = CreateQuantizedTensor(data, dims, name, kOutputMin, kOutputMax);
+    tensor = CreateQuantizedTensor(data, dims, kOutputMin, kOutputMax);
   }
 
   tensor.quantization.type = kTfLiteAffineQuantization;
@@ -69,13 +65,13 @@ TfLiteTensor CreateL2NormTensor(const uint8* data, TfLiteIntArray* dims,
 }
 
 TfLiteTensor CreateL2NormTensor(const int8* data, TfLiteIntArray* dims,
-                              const char* name, bool is_input) {
+                                bool is_input) {
   TfLiteTensor tensor;
 
   if (is_input) {
-    tensor = CreateQuantizedTensor(data, dims, name, kInputMin, kInputMax);
+    tensor = CreateQuantizedTensor(data, dims, kInputMin, kInputMax);
   } else {
-    tensor = CreateQuantizedTensor(data, dims, name, kOutputMin, kOutputMax);
+    tensor = CreateQuantizedTensor(data, dims, kOutputMin, kOutputMax);
   }
 
   tensor.quantization.type = kTfLiteAffineQuantization;
@@ -87,19 +83,18 @@ inline float Dequantize(const T data, float scale, int32_t zero_point) {
   return scale * (data - zero_point);
 }
 
-template<typename T>
-void TestL2Normalization(const int* input_dims_data,
-                               const T* input_data,
-                               const float* expected_output_data,
-                               T* output_data, float variance) {
+template <typename T>
+void TestL2Normalization(const int* input_dims_data, const T* input_data,
+                         const float* expected_output_data, T* output_data,
+                         float variance) {
   TfLiteIntArray* dims = IntArrayFromInts(input_dims_data);
 
   const int output_dims_count = ElementCount(*dims);
 
   constexpr int tensors_size = 2;
   TfLiteTensor tensors[tensors_size] = {
-      CreateL2NormTensor(input_data, dims, "input_tensor", true),
-      CreateL2NormTensor(output_data, dims, "output_tensor", false),
+      CreateL2NormTensor(input_data, dims, true),
+      CreateL2NormTensor(output_data, dims, false),
   };
 
   TfLiteContext context;
@@ -110,7 +105,7 @@ void TestL2Normalization(const int* input_dims_data,
   TF_LITE_MICRO_EXPECT_NE(nullptr, registration);
 
   TfLiteL2NormParams builtin_data = {
-    .activation = kTfLiteActNone,
+      .activation = kTfLiteActNone,
   };
 
   int inputs_array_data[] = {1, 0};
@@ -158,22 +153,18 @@ void TestL2Normalization(const int* input_dims_data,
 }  // namespace testing
 }  // namespace tflite
 
-
 TF_LITE_MICRO_TESTS_BEGIN
 
 TF_LITE_MICRO_TEST(SimpleFloatTest) {
   const int input_dims[] = {4, 1, 1, 1, 6};
   constexpr int data_length = 6;
-  const float input_data[data_length] = {
-    -1.1, 0.6, 0.7, 1.2, -0.7, 0.1
-  };
-  const float expected_output_data[data_length] = {
-    -0.55, 0.3, 0.35, 0.6, -0.35, 0.05
-  };
+  const float input_data[data_length] = {-1.1, 0.6, 0.7, 1.2, -0.7, 0.1};
+  const float expected_output_data[data_length] = {-0.55, 0.3,   0.35,
+                                                   0.6,   -0.35, 0.05};
   float output_data[data_length];
 
-  tflite::testing::TestL2Normalization<float>(input_dims, input_data,
-    expected_output_data, output_data, 0);
+  tflite::testing::TestL2Normalization<float>(
+      input_dims, input_data, expected_output_data, output_data, 0);
 }
 
 TF_LITE_MICRO_TEST(ZerosVectorFloatTest) {
@@ -183,42 +174,39 @@ TF_LITE_MICRO_TEST(ZerosVectorFloatTest) {
   const float expected_output_data[data_length] = {0, 0, 0, 0, 0, 0};
   float output_data[data_length];
 
-  tflite::testing::TestL2Normalization<float>(input_dims, input_data,
-    expected_output_data, output_data, 0);
+  tflite::testing::TestL2Normalization<float>(
+      input_dims, input_data, expected_output_data, output_data, 0);
 }
 
 TF_LITE_MICRO_TEST(SimpleFloatWithRankLessThanFourTest) {
   const int input_dims[] = {4, 1, 1, 1, 6};
   constexpr int data_length = 6;
-  const float input_data[data_length] = {
-    -1.1, 0.6, 0.7, 1.2, -0.7, 0.1
-  };
-  const float expected_output_data[data_length] = {
-    -0.55, 0.3, 0.35, 0.6, -0.35, 0.05
-  };
+  const float input_data[data_length] = {-1.1, 0.6, 0.7, 1.2, -0.7, 0.1};
+  const float expected_output_data[data_length] = {-0.55, 0.3,   0.35,
+                                                   0.6,   -0.35, 0.05};
   float output_data[data_length];
 
-  tflite::testing::TestL2Normalization<float>(input_dims, input_data,
-    expected_output_data, output_data, 0);
+  tflite::testing::TestL2Normalization<float>(
+      input_dims, input_data, expected_output_data, output_data, 0);
 }
 
 TF_LITE_MICRO_TEST(MultipleBatchFloatTest) {
   const int input_dims[] = {4, 3, 1, 1, 6};
   constexpr int data_length = 18;
   const float input_data[data_length] = {
-    -1.1, 0.6, 0.7, 1.2, -0.7, 0.1,  // batch 1
-    -1.1, 0.6, 0.7, 1.2, -0.7, 0.1,  // batch 2
-    -1.1, 0.6, 0.7, 1.2, -0.7, 0.1,  // batch 3
+      -1.1, 0.6, 0.7, 1.2, -0.7, 0.1,  // batch 1
+      -1.1, 0.6, 0.7, 1.2, -0.7, 0.1,  // batch 2
+      -1.1, 0.6, 0.7, 1.2, -0.7, 0.1,  // batch 3
   };
   const float expected_output_data[data_length] = {
-    -0.55, 0.3, 0.35, 0.6, -0.35, 0.05,  // batch 1
-    -0.55, 0.3, 0.35, 0.6, -0.35, 0.05,  // batch 2
-    -0.55, 0.3, 0.35, 0.6, -0.35, 0.05,  // batch 3
+      -0.55, 0.3, 0.35, 0.6, -0.35, 0.05,  // batch 1
+      -0.55, 0.3, 0.35, 0.6, -0.35, 0.05,  // batch 2
+      -0.55, 0.3, 0.35, 0.6, -0.35, 0.05,  // batch 3
   };
   float output_data[data_length];
 
-  tflite::testing::TestL2Normalization<float>(input_dims, input_data,
-    expected_output_data, output_data, 0);
+  tflite::testing::TestL2Normalization<float>(
+      input_dims, input_data, expected_output_data, output_data, 0);
 }
 
 TF_LITE_MICRO_TEST(ZerosVectorUint8Test) {
@@ -231,44 +219,36 @@ TF_LITE_MICRO_TEST(ZerosVectorUint8Test) {
 
   tflite::testing::QuantizeInputData(input_data, data_length, quantized_input);
 
-  tflite::testing::TestL2Normalization<uint8_t>(input_dims, quantized_input,
-    expected_output_data, output_data, .1);
+  tflite::testing::TestL2Normalization<uint8_t>(
+      input_dims, quantized_input, expected_output_data, output_data, .1);
 }
 
 TF_LITE_MICRO_TEST(SimpleUint8Test) {
   const int input_dims[] = {4, 1, 1, 1, 6};
   constexpr int data_length = 6;
-  float input_data[data_length] = {
-    -1.1, 0.6, 0.7, 1.2, -0.7, 0.1
-  };
-  float expected_output[data_length] = {
-    -0.55, 0.3, 0.35, 0.6, -0.35, 0.05
-  };
+  float input_data[data_length] = {-1.1, 0.6, 0.7, 1.2, -0.7, 0.1};
+  float expected_output[data_length] = {-0.55, 0.3, 0.35, 0.6, -0.35, 0.05};
   uint8_t quantized_input[data_length];
   uint8_t output_data[data_length];
 
   tflite::testing::QuantizeInputData(input_data, data_length, quantized_input);
 
-  tflite::testing::TestL2Normalization<uint8_t>(input_dims, quantized_input,
-    expected_output, output_data, .1);
+  tflite::testing::TestL2Normalization<uint8_t>(
+      input_dims, quantized_input, expected_output, output_data, .1);
 }
 
 TF_LITE_MICRO_TEST(SimpleInt8Test) {
   const int input_dims[] = {4, 1, 1, 1, 6};
   constexpr int data_length = 6;
-  float input_data[data_length] = {
-    -1.1, 0.6, 0.7, 1.2, -0.7, 0.1
-  };
-  float expected_output[data_length] = {
-    -0.55, 0.3, 0.35, 0.6, -0.35, 0.05
-  };
+  float input_data[data_length] = {-1.1, 0.6, 0.7, 1.2, -0.7, 0.1};
+  float expected_output[data_length] = {-0.55, 0.3, 0.35, 0.6, -0.35, 0.05};
   int8_t quantized_input[data_length];
   int8_t output_data[data_length];
 
   tflite::testing::QuantizeInputData(input_data, data_length, quantized_input);
 
-  tflite::testing::TestL2Normalization<int8_t>(input_dims, quantized_input,
-    expected_output, output_data, .1);
+  tflite::testing::TestL2Normalization<int8_t>(
+      input_dims, quantized_input, expected_output, output_data, .1);
 }
 
 TF_LITE_MICRO_TEST(ZerosVectorInt8Test) {
@@ -281,52 +261,52 @@ TF_LITE_MICRO_TEST(ZerosVectorInt8Test) {
 
   tflite::testing::QuantizeInputData(input_data, data_length, quantized_input);
 
-  tflite::testing::TestL2Normalization<int8_t>(input_dims, quantized_input,
-    expected_output_data, output_data, .1);
+  tflite::testing::TestL2Normalization<int8_t>(
+      input_dims, quantized_input, expected_output_data, output_data, .1);
 }
 
 TF_LITE_MICRO_TEST(MultipleBatchUint8Test) {
   const int input_dims[] = {4, 1, 1, 1, 6};
   constexpr int data_length = 18;
   float input_data[data_length] = {
-    -1.1, 0.6, 0.7, 1.2, -0.7, 0.1,  // batch 1
-    -1.1, 0.6, 0.7, 1.2, -0.7, 0.1,  // batch 2
-    -1.1, 0.6, 0.7, 1.2, -0.7, 0.1,  // batch 3
+      -1.1, 0.6, 0.7, 1.2, -0.7, 0.1,  // batch 1
+      -1.1, 0.6, 0.7, 1.2, -0.7, 0.1,  // batch 2
+      -1.1, 0.6, 0.7, 1.2, -0.7, 0.1,  // batch 3
   };
   float expected_output[data_length] = {
-    -0.55, 0.3, 0.35, 0.6, -0.35, 0.05,  // batch 1
-    -0.55, 0.3, 0.35, 0.6, -0.35, 0.05,  // batch 2
-    -0.55, 0.3, 0.35, 0.6, -0.35, 0.05,  // batch 3
+      -0.55, 0.3, 0.35, 0.6, -0.35, 0.05,  // batch 1
+      -0.55, 0.3, 0.35, 0.6, -0.35, 0.05,  // batch 2
+      -0.55, 0.3, 0.35, 0.6, -0.35, 0.05,  // batch 3
   };
   uint8_t quantized_input[data_length];
   uint8_t output_data[data_length];
 
   tflite::testing::QuantizeInputData(input_data, data_length, quantized_input);
 
-  tflite::testing::TestL2Normalization<uint8_t>(input_dims, quantized_input,
-    expected_output, output_data, .1);
+  tflite::testing::TestL2Normalization<uint8_t>(
+      input_dims, quantized_input, expected_output, output_data, .1);
 }
 
 TF_LITE_MICRO_TEST(MultipleBatchInt8Test) {
   const int input_dims[] = {4, 1, 1, 1, 6};
   constexpr int data_length = 18;
   float input_data[data_length] = {
-    -1.1, 0.6, 0.7, 1.2, -0.7, 0.1,  // batch 1
-    -1.1, 0.6, 0.7, 1.2, -0.7, 0.1,  // batch 2
-    -1.1, 0.6, 0.7, 1.2, -0.7, 0.1,  // batch 3
+      -1.1, 0.6, 0.7, 1.2, -0.7, 0.1,  // batch 1
+      -1.1, 0.6, 0.7, 1.2, -0.7, 0.1,  // batch 2
+      -1.1, 0.6, 0.7, 1.2, -0.7, 0.1,  // batch 3
   };
   float expected_output[data_length] = {
-    -0.55, 0.3, 0.35, 0.6, -0.35, 0.05,  // batch 1
-    -0.55, 0.3, 0.35, 0.6, -0.35, 0.05,  // batch 2
-    -0.55, 0.3, 0.35, 0.6, -0.35, 0.05,  // batch 3
+      -0.55, 0.3, 0.35, 0.6, -0.35, 0.05,  // batch 1
+      -0.55, 0.3, 0.35, 0.6, -0.35, 0.05,  // batch 2
+      -0.55, 0.3, 0.35, 0.6, -0.35, 0.05,  // batch 3
   };
   int8_t quantized_input[data_length];
   int8_t output_data[data_length];
 
   tflite::testing::QuantizeInputData(input_data, data_length, quantized_input);
 
-  tflite::testing::TestL2Normalization<int8_t>(input_dims, quantized_input,
-    expected_output, output_data, .1);
+  tflite::testing::TestL2Normalization<int8_t>(
+      input_dims, quantized_input, expected_output, output_data, .1);
 }
 
 TF_LITE_MICRO_TESTS_END
diff --git a/tensorflow/lite/micro/kernels/logical_test.cc b/tensorflow/lite/micro/kernels/logical_test.cc
index 262de56ee72..5cf116b2eb4 100644
--- a/tensorflow/lite/micro/kernels/logical_test.cc
+++ b/tensorflow/lite/micro/kernels/logical_test.cc
@@ -39,9 +39,9 @@ void TestLogicalOp(tflite::BuiltinOperator op,
   constexpr int outputs_size = 1;
   constexpr int tensors_size = inputs_size + outputs_size;
   TfLiteTensor tensors[tensors_size] = {
-      CreateBoolTensor(input1_data, input1_dims, "input1_tensor"),
-      CreateBoolTensor(input2_data, input2_dims, "input2_tensor"),
-      CreateBoolTensor(output_data, output_dims, "output_tensor"),
+      CreateBoolTensor(input1_data, input1_dims),
+      CreateBoolTensor(input2_data, input2_dims),
+      CreateBoolTensor(output_data, output_dims),
   };
 
   TfLiteContext context;
diff --git a/tensorflow/lite/micro/kernels/logistic_test.cc b/tensorflow/lite/micro/kernels/logistic_test.cc
index 7e6484b5154..0403b744227 100644
--- a/tensorflow/lite/micro/kernels/logistic_test.cc
+++ b/tensorflow/lite/micro/kernels/logistic_test.cc
@@ -36,8 +36,8 @@ void TestLogisticFloat(std::initializer_list<int> input_dims_data,
   constexpr int outputs_size = 1;
   constexpr int tensors_size = inputs_size + outputs_size;
   TfLiteTensor tensors[tensors_size] = {
-      CreateFloatTensor(input_data, input_dims, "input_tensor"),
-      CreateFloatTensor(output_data, output_dims, "output_tensor"),
+      CreateFloatTensor(input_data, input_dims),
+      CreateFloatTensor(output_data, output_dims),
   };
 
   TfLiteContext context;
@@ -96,10 +96,8 @@ void TestLogisticInt8(std::initializer_list<int> input_dims_data,
   constexpr int outputs_size = 1;
   constexpr int tensors_size = inputs_size + outputs_size;
   TfLiteTensor tensors[tensors_size] = {
-      CreateQuantizedTensor(input_data, input_dims, "input_tensor", input_min,
-                            input_max),
-      CreateQuantizedTensor(output_data, output_dims, "output_tensor",
-                            output_min, output_max),
+      CreateQuantizedTensor(input_data, input_dims, input_min, input_max),
+      CreateQuantizedTensor(output_data, output_dims, output_min, output_max),
   };
 
   TfLiteContext context;
diff --git a/tensorflow/lite/micro/kernels/maximum_minimum_test.cc b/tensorflow/lite/micro/kernels/maximum_minimum_test.cc
index 2c50552ad79..4e59b69623e 100644
--- a/tensorflow/lite/micro/kernels/maximum_minimum_test.cc
+++ b/tensorflow/lite/micro/kernels/maximum_minimum_test.cc
@@ -40,9 +40,9 @@ void TestMaxMinFloat(tflite::BuiltinOperator op,
   constexpr int outputs_size = 1;
   constexpr int tensors_size = inputs_size + outputs_size;
   TfLiteTensor tensors[tensors_size] = {
-      CreateFloatTensor(input1_data, input1_dims, "input1_tensor"),
-      CreateFloatTensor(input2_data, input2_dims, "input2_tensor"),
-      CreateFloatTensor(output_data, output_dims, "output_tensor"),
+      CreateFloatTensor(input1_data, input1_dims),
+      CreateFloatTensor(input2_data, input2_dims),
+      CreateFloatTensor(output_data, output_dims),
   };
 
   TfLiteContext context;
@@ -96,12 +96,9 @@ void TestMaxMinQuantized(
   constexpr int outputs_size = 1;
   constexpr int tensors_size = inputs_size + outputs_size;
   TfLiteTensor tensors[tensors_size] = {
-      CreateQuantizedTensor(input1_data, input1_dims, "input1_tensor",
-                            input1_min, input1_max),
-      CreateQuantizedTensor(input2_data, input2_dims, "input2_tensor",
-                            input2_min, input2_max),
-      CreateQuantizedTensor(output_data, output_dims, "output_tensor",
-                            output_min, output_max),
+      CreateQuantizedTensor(input1_data, input1_dims, input1_min, input1_max),
+      CreateQuantizedTensor(input2_data, input2_dims, input2_min, input2_max),
+      CreateQuantizedTensor(output_data, output_dims, output_min, output_max),
   };
 
   TfLiteContext context;
@@ -153,12 +150,9 @@ void TestMaxMinQuantizedInt32(
   constexpr int outputs_size = 1;
   constexpr int tensors_size = inputs_size + outputs_size;
   TfLiteTensor tensors[tensors_size] = {
-      CreateQuantized32Tensor(input1_data, input1_dims, "input1_tensor",
-                              input1_scale),
-      CreateQuantized32Tensor(input2_data, input2_dims, "input2_tensor",
-                              input2_scale),
-      CreateQuantized32Tensor(output_data, output_dims, "output_tensor",
-                              output_scale),
+      CreateQuantized32Tensor(input1_data, input1_dims, input1_scale),
+      CreateQuantized32Tensor(input2_data, input2_dims, input2_scale),
+      CreateQuantized32Tensor(output_data, output_dims, output_scale),
   };
 
   TfLiteContext context;
diff --git a/tensorflow/lite/micro/kernels/mul_test.cc b/tensorflow/lite/micro/kernels/mul_test.cc
index d58d5eaf715..6b4d4f07b64 100644
--- a/tensorflow/lite/micro/kernels/mul_test.cc
+++ b/tensorflow/lite/micro/kernels/mul_test.cc
@@ -41,9 +41,9 @@ void TestMulFloat(std::initializer_list<int> input1_dims_data,
   constexpr int outputs_size = 1;
   constexpr int tensors_size = inputs_size + outputs_size;
   TfLiteTensor tensors[tensors_size] = {
-      CreateFloatTensor(input1_data, input1_dims, "input1_tensor"),
-      CreateFloatTensor(input2_data, input2_dims, "input2_tensor"),
-      CreateFloatTensor(output_data, output_dims, "output_tensor"),
+      CreateFloatTensor(input1_data, input1_dims),
+      CreateFloatTensor(input2_data, input2_dims),
+      CreateFloatTensor(output_data, output_dims),
   };
 
   TfLiteContext context;
@@ -113,12 +113,9 @@ void TestMulQuantized(std::initializer_list<int> input1_dims_data,
   constexpr int outputs_size = 1;
   constexpr int tensors_size = inputs_size + outputs_size;
   TfLiteTensor tensors[tensors_size] = {
-      CreateQuantizedTensor(input1_data, input1_dims, "input1_tensor",
-                            input_min, input_max),
-      CreateQuantizedTensor(input2_data, input2_dims, "input2_tensor",
-                            input_min, input_max),
-      CreateQuantizedTensor(output_data, output_dims, "output_tensor",
-                            output_min, output_max),
+      CreateQuantizedTensor(input1_data, input1_dims, input_min, input_max),
+      CreateQuantizedTensor(input2_data, input2_dims, input_min, input_max),
+      CreateQuantizedTensor(output_data, output_dims, output_min, output_max),
   };
 
   TfLiteContext context;
diff --git a/tensorflow/lite/micro/kernels/neg_test.cc b/tensorflow/lite/micro/kernels/neg_test.cc
index 9608e31d73c..12a47f09900 100644
--- a/tensorflow/lite/micro/kernels/neg_test.cc
+++ b/tensorflow/lite/micro/kernels/neg_test.cc
@@ -35,8 +35,8 @@ void TestNegFloat(std::initializer_list<int> input_dims_data,
   constexpr int outputs_size = 1;
   constexpr int tensors_size = inputs_size + outputs_size;
   TfLiteTensor tensors[tensors_size] = {
-      CreateFloatTensor(input_data, input_dims, "input_tensor"),
-      CreateFloatTensor(output_data, output_dims, "output_tensor"),
+      CreateFloatTensor(input_data, input_dims),
+      CreateFloatTensor(output_data, output_dims),
   };
 
   TfLiteContext context;
diff --git a/tensorflow/lite/micro/kernels/pack_test.cc b/tensorflow/lite/micro/kernels/pack_test.cc
index 187fc2a0ddd..f9ac20a28ab 100644
--- a/tensorflow/lite/micro/kernels/pack_test.cc
+++ b/tensorflow/lite/micro/kernels/pack_test.cc
@@ -39,9 +39,9 @@ void TestPackTwoInputsFloat(std::initializer_list<int> input1_dims_data,
   constexpr int output_size = 1;
   constexpr int tensors_size = input_size + output_size;
   TfLiteTensor tensors[tensors_size] = {
-      CreateFloatTensor(input1_data, input1_dims, "input1_tensor"),
-      CreateFloatTensor(input2_data, input2_dims, "input2_tensor"),
-      CreateFloatTensor(output_data, output_dims, "output_tensor")};
+      CreateFloatTensor(input1_data, input1_dims),
+      CreateFloatTensor(input2_data, input2_dims),
+      CreateFloatTensor(output_data, output_dims)};
 
   // Place a unique value in the uninitialized output buffer.
   for (int i = 0; i < output_dims_count; ++i) {
@@ -114,10 +114,10 @@ void TestPackThreeInputsFloat(std::initializer_list<int> input1_dims_data,
   constexpr int output_size = 1;
   constexpr int tensors_size = input_size + output_size;
   TfLiteTensor tensors[tensors_size] = {
-      CreateFloatTensor(input1_data, input1_dims, "input1_tensor"),
-      CreateFloatTensor(input2_data, input2_dims, "input2_tensor"),
-      CreateFloatTensor(input3_data, input3_dims, "input3_tensor"),
-      CreateFloatTensor(output_data, output_dims, "output_tensor")};
+      CreateFloatTensor(input1_data, input1_dims),
+      CreateFloatTensor(input2_data, input2_dims),
+      CreateFloatTensor(input3_data, input3_dims),
+      CreateFloatTensor(output_data, output_dims)};
 
   // Place a unique value in the uninitialized output buffer.
   for (int i = 0; i < output_dims_count; ++i) {
@@ -189,9 +189,9 @@ void TestPackTwoInputsQuantized(
   TfLiteTensor tensors[tensors_size] = {
       // CreateQuantizedTensor needs min/max values as input, but these values
       // don't matter as to the functionality of PACK, so just set as 0 and 10.
-      CreateQuantizedTensor(input1_data, input1_dims, "input1_tensor", 0, 10),
-      CreateQuantizedTensor(input2_data, input2_dims, "input2_tensor", 0, 10),
-      CreateQuantizedTensor(output_data, output_dims, "output_tensor", 0, 10)};
+      CreateQuantizedTensor(input1_data, input1_dims, 0, 10),
+      CreateQuantizedTensor(input2_data, input2_dims, 0, 10),
+      CreateQuantizedTensor(output_data, output_dims, 0, 10)};
 
   // Place a unique value in the uninitialized output buffer.
   for (int i = 0; i < output_dims_count; ++i) {
@@ -259,9 +259,9 @@ void TestPackTwoInputsQuantized32(
   constexpr int output_size = 1;
   constexpr int tensors_size = input_size + output_size;
   TfLiteTensor tensors[tensors_size] = {
-      CreateQuantized32Tensor(input1_data, input1_dims, "input1_tensor", 1.0),
-      CreateQuantized32Tensor(input2_data, input2_dims, "input2_tensor", 1.0),
-      CreateQuantized32Tensor(output_data, output_dims, "output_tensor", 1.0)};
+      CreateQuantized32Tensor(input1_data, input1_dims, 1.0),
+      CreateQuantized32Tensor(input2_data, input2_dims, 1.0),
+      CreateQuantized32Tensor(output_data, output_dims, 1.0)};
 
   // Place a unique value in the uninitialized output buffer.
   for (int i = 0; i < output_dims_count; ++i) {
diff --git a/tensorflow/lite/micro/kernels/pad_test.cc b/tensorflow/lite/micro/kernels/pad_test.cc
index 28b2069bc18..bef7ce0fa9f 100644
--- a/tensorflow/lite/micro/kernels/pad_test.cc
+++ b/tensorflow/lite/micro/kernels/pad_test.cc
@@ -121,9 +121,9 @@ void TestPadFloat(const int* input_dims_data, const float* input_data,
   constexpr int outputs_size = 1;
   constexpr int tensors_size = inputs_size + outputs_size;
   TfLiteTensor tensors[tensors_size] = {
-      CreateFloatTensor(input_data, input_dims, "input_tensor"),
-      CreateInt32Tensor(pad_data, pad_dims, "padding tensor"),
-      CreateFloatTensor(output_data, output_dims, "output_tensor")};
+      CreateFloatTensor(input_data, input_dims),
+      CreateInt32Tensor(pad_data, pad_dims),
+      CreateFloatTensor(output_data, output_dims)};
 
   // Pad tensor must be constant.
   tensors[1].allocation_type = kTfLiteMmapRo;
@@ -149,10 +149,10 @@ void TestPadV2Float(const int* input_dims_data, const float* input_data,
   constexpr int outputs_size = 1;
   constexpr int tensors_size = inputs_size + outputs_size;
   TfLiteTensor tensors[tensors_size] = {
-      CreateFloatTensor(input_data, input_dims, "input_tensor"),
-      CreateInt32Tensor(pad_data, pad_dims, "padding tensor"),
-      CreateFloatTensor(&pad_value, pad_value_dims, "pad value tensor"),
-      CreateFloatTensor(output_data, output_dims, "output_tensor")};
+      CreateFloatTensor(input_data, input_dims),
+      CreateInt32Tensor(pad_data, pad_dims),
+      CreateFloatTensor(&pad_value, pad_value_dims),
+      CreateFloatTensor(output_data, output_dims)};
 
   // Pad tensor must be constant.
   tensors[1].allocation_type = kTfLiteMmapRo;
@@ -179,10 +179,10 @@ void TestPadQuantized(const int* input_dims_data, const float* input_data,
   constexpr int tensors_size = inputs_size + outputs_size;
   TfLiteTensor tensors[tensors_size] = {
       CreateQuantizedTensor(input_data, input_quantized, input_dims,
-                            input_scale, input_zero_point, "input_tensor"),
-      CreateInt32Tensor(pad_data, pad_dims, "padding tensor"),
+                            input_scale, input_zero_point),
+      CreateInt32Tensor(pad_data, pad_dims),
       CreateQuantizedTensor(output_data, output_dims, output_scale,
-                            output_zero_point, "output_tensor")};
+                            output_zero_point)};
 
   // Pad tensor must be constant.
   tensors[1].allocation_type = kTfLiteMmapRo;
@@ -218,13 +218,12 @@ void TestPadV2Quantized(const int* input_dims_data, const float* input_data,
   constexpr int tensors_size = inputs_size + outputs_size;
   TfLiteTensor tensors[tensors_size] = {
       CreateQuantizedTensor(input_data, input_quantized, input_dims,
-                            input_scale, input_zero_point, "input_tensor"),
-      CreateInt32Tensor(pad_data, pad_dims, "padding tensor"),
+                            input_scale, input_zero_point),
+      CreateInt32Tensor(pad_data, pad_dims),
       CreateQuantizedTensor(&pad_value, &pad_value_quantized, pad_value_dims,
-                            pad_value_scale, pad_value_zero_point,
-                            "pad value tensor"),
+                            pad_value_scale, pad_value_zero_point),
       CreateQuantizedTensor(output_data, output_dims, output_scale,
-                            output_zero_point, "output_tensor")};
+                            output_zero_point)};
 
   // Pad tensor must be constant.
   tensors[1].allocation_type = kTfLiteMmapRo;
diff --git a/tensorflow/lite/micro/kernels/pooling_test.cc b/tensorflow/lite/micro/kernels/pooling_test.cc
index e656fb802ed..9e11e9a4d57 100644
--- a/tensorflow/lite/micro/kernels/pooling_test.cc
+++ b/tensorflow/lite/micro/kernels/pooling_test.cc
@@ -42,8 +42,8 @@ void TestAveragePoolingFloat(std::initializer_list<int> input_dims_data,
   constexpr int outputs_size = 1;
   constexpr int tensors_size = inputs_size + outputs_size;
   TfLiteTensor tensors[tensors_size] = {
-      CreateFloatTensor(input_data, input_dims, "input_tensor"),
-      CreateFloatTensor(output_data, output_dims, "output_tensor"),
+      CreateFloatTensor(input_data, input_dims),
+      CreateFloatTensor(output_data, output_dims),
   };
 
   TfLiteContext context;
@@ -114,10 +114,8 @@ void TestAveragePoolingQuantized(
   constexpr int outputs_size = 1;
   constexpr int tensors_size = inputs_size + outputs_size;
   TfLiteTensor tensors[tensors_size] = {
-      CreateQuantizedTensor(input_data, input_dims, "input_tensor", input_min,
-                            input_max),
-      CreateQuantizedTensor(output_data, output_dims, "output_tensor",
-                            output_min, output_max),
+      CreateQuantizedTensor(input_data, input_dims, input_min, input_max),
+      CreateQuantizedTensor(output_data, output_dims, output_min, output_max),
   };
 
   TfLiteContext context;
@@ -183,8 +181,8 @@ void TestMaxPoolFloat(std::initializer_list<int> input_dims_data,
   constexpr int outputs_size = 1;
   constexpr int tensors_size = inputs_size + outputs_size;
   TfLiteTensor tensors[tensors_size] = {
-      CreateFloatTensor(input_data, input_dims, "input_tensor"),
-      CreateFloatTensor(output_data, output_dims, "output_tensor"),
+      CreateFloatTensor(input_data, input_dims),
+      CreateFloatTensor(output_data, output_dims),
   };
 
   TfLiteContext context;
@@ -257,10 +255,8 @@ void TestMaxPoolQuantized(std::initializer_list<int> input_dims_data,
   constexpr int outputs_size = 1;
   constexpr int tensors_size = inputs_size + outputs_size;
   TfLiteTensor tensors[tensors_size] = {
-      CreateQuantizedTensor(input_data, input_dims, "input_tensor", input_min,
-                            input_max),
-      CreateQuantizedTensor(output_data, output_dims, "output_tensor",
-                            output_min, output_max),
+      CreateQuantizedTensor(input_data, input_dims, input_min, input_max),
+      CreateQuantizedTensor(output_data, output_dims, output_min, output_max),
   };
 
   TfLiteContext context;
diff --git a/tensorflow/lite/micro/kernels/prelu_test.cc b/tensorflow/lite/micro/kernels/prelu_test.cc
index c60e33313bb..37bb51660e2 100644
--- a/tensorflow/lite/micro/kernels/prelu_test.cc
+++ b/tensorflow/lite/micro/kernels/prelu_test.cc
@@ -38,9 +38,9 @@ void TestPreluFloat(std::initializer_list<int> input_dims_data,
   constexpr int outputs_size = 1;
   constexpr int tensors_size = inputs_size + outputs_size;
   TfLiteTensor tensors[tensors_size] = {
-      CreateFloatTensor(input_data, input_dims, "input_tensor"),
-      CreateFloatTensor(alpha_data, alpha_dims, "alpha_tensor"),
-      CreateFloatTensor(output_data, output_dims, "output_tensor"),
+      CreateFloatTensor(input_data, input_dims),
+      CreateFloatTensor(alpha_data, alpha_dims),
+      CreateFloatTensor(output_data, output_dims),
   };
   TfLiteContext context;
   PopulateContext(tensors, tensors_size, micro_test::reporter, &context);
@@ -102,12 +102,9 @@ void TestPreluQuantized(std::initializer_list<int> input_dims_data,
   constexpr int outputs_size = 1;
   constexpr int tensors_size = inputs_size + outputs_size;
   TfLiteTensor tensors[tensors_size] = {
-      CreateQuantizedTensor(input_data, input_dims, "input_tensor", input_min,
-                            input_max),
-      CreateQuantizedTensor(alpha_data, alpha_dims, "alpha_tensor", alpha_min,
-                            alpha_max),
-      CreateQuantizedTensor(output_data, output_dims, "output_tensor",
-                            output_min, output_max),
+      CreateQuantizedTensor(input_data, input_dims, input_min, input_max),
+      CreateQuantizedTensor(alpha_data, alpha_dims, alpha_min, alpha_max),
+      CreateQuantizedTensor(output_data, output_dims, output_min, output_max),
   };
   TfLiteContext context;
   PopulateContext(tensors, tensors_size, micro_test::reporter, &context);
diff --git a/tensorflow/lite/micro/kernels/quantize_test.cc b/tensorflow/lite/micro/kernels/quantize_test.cc
index 33d356f9191..37f06a29c58 100644
--- a/tensorflow/lite/micro/kernels/quantize_test.cc
+++ b/tensorflow/lite/micro/kernels/quantize_test.cc
@@ -89,8 +89,8 @@ void TestQuantizeFloat(const int* input_dims_data, const float* input_data,
   TfLiteIntArray* output_dims = IntArrayFromInts(output_dims_data);
   const int output_dims_count = ElementCount(*output_dims);
 
-  TfLiteTensor output_tensor = CreateQuantizedTensor(
-      output_data, output_dims, scale, zero_point, "output_tensor");
+  TfLiteTensor output_tensor =
+      CreateQuantizedTensor(output_data, output_dims, scale, zero_point);
 
   TfLiteAffineQuantization quant;
   float scales[] = {1, scale};
@@ -102,7 +102,7 @@ void TestQuantizeFloat(const int* input_dims_data, const float* input_data,
   // 1 input, 1 output.
   constexpr int tensors_size = 2;
   TfLiteTensor tensors[tensors_size] = {
-      CreateFloatTensor(input_data, input_dims, "input_tensor"),
+      CreateFloatTensor(input_data, input_dims),
       output_tensor,
   };
 
@@ -121,9 +121,8 @@ void TestRequantize(const int* input_dims_data, const float* input_data,
   TfLiteIntArray* output_dims = IntArrayFromInts(output_dims_data);
   const int output_dims_count = ElementCount(*output_dims);
 
-  TfLiteTensor output_tensor =
-      CreateQuantizedTensor(output_data, output_dims, output_scale,
-                            output_zero_point, "output_tensor");
+  TfLiteTensor output_tensor = CreateQuantizedTensor(
+      output_data, output_dims, output_scale, output_zero_point);
 
   TfLiteAffineQuantization quant;
   float scales[] = {1, output_scale};
@@ -136,7 +135,7 @@ void TestRequantize(const int* input_dims_data, const float* input_data,
   constexpr int tensors_size = 2;
   TfLiteTensor tensors[tensors_size] = {
       CreateQuantizedTensor(input_data, input_quantized, input_dims,
-                            input_scale, input_zero_point, "input_tensor"),
+                            input_scale, input_zero_point),
       output_tensor,
   };
 
diff --git a/tensorflow/lite/micro/kernels/reduce_test.cc b/tensorflow/lite/micro/kernels/reduce_test.cc
index a65ba8cd376..b25b4f76766 100644
--- a/tensorflow/lite/micro/kernels/reduce_test.cc
+++ b/tensorflow/lite/micro/kernels/reduce_test.cc
@@ -117,9 +117,9 @@ void TestMeanFloatInput4D(const int* input_dims_data, const float* input_data,
 
   constexpr int tensors_size = num_of_inputs + num_of_outputs;
   TfLiteTensor tensors[tensors_size] = {
-      CreateFloatTensor(input_data, input_dims, "input_tensor"),
-      CreateInt32Tensor(axis_data, axis_dims, "axis_tensor"),
-      CreateFloatTensor(output_data, output_dims, "output_tensor"),
+      CreateFloatTensor(input_data, input_dims),
+      CreateInt32Tensor(axis_data, axis_dims),
+      CreateFloatTensor(output_data, output_dims),
   };
 
   TF_LITE_MICRO_EXPECT_EQ(
diff --git a/tensorflow/lite/micro/kernels/reshape_test.cc b/tensorflow/lite/micro/kernels/reshape_test.cc
index 1a88997cf65..07c64969ba1 100644
--- a/tensorflow/lite/micro/kernels/reshape_test.cc
+++ b/tensorflow/lite/micro/kernels/reshape_test.cc
@@ -116,18 +116,18 @@ void TestReshape(std::initializer_list<int> input_dims_data,
                  bool expect_failure = false) {
   TfLiteIntArray* input_dims = IntArrayFromInitializer(input_dims_data);
   TfLiteIntArray* output_dims = IntArrayFromInts(output_dims_data);
-  TfLiteTensor input_tensor = CreateTensor<T, tensor_input_type>(
-      input_data, input_dims, "input_tensor");
+  TfLiteTensor input_tensor =
+      CreateTensor<T, tensor_input_type>(input_data, input_dims);
   T* output_data = reinterpret_cast<T*>(output_data_raw);
-  TfLiteTensor output_tensor = CreateTensor<T, tensor_input_type>(
-      output_data, output_dims, "input_tensor");
+  TfLiteTensor output_tensor =
+      CreateTensor<T, tensor_input_type>(output_data, output_dims);
   // Reshape param is passed as op's param.
   TestReshapeImpl<T>(&input_tensor, nullptr, &output_tensor, expected_output,
                      expected_dims, expect_failure);
   // Reshape param is passed as a tensor.
   TfLiteIntArray* shape_dims = IntArrayFromInitializer(shape_dims_data);
-  auto shape_tensor = CreateTensor<int32_t, kTfLiteInt32>(
-      shape_data, shape_dims, "shape_tensor");
+  auto shape_tensor =
+      CreateTensor<int32_t, kTfLiteInt32>(shape_data, shape_dims);
   TestReshapeImpl<T>(&input_tensor, &shape_tensor, &output_tensor,
                      expected_output, expected_dims, expect_failure);
 }
@@ -194,12 +194,11 @@ TF_LITE_MICRO_TEST(InvalidShape) {
   using tflite::testing::IntArrayFromInts;
   TfLiteIntArray* input_dims = IntArrayFromInitializer({3, 1, 2, 2});
   auto input_data = {3.0f};
-  auto input_tensor = CreateFloatTensor(input_data, input_dims, "input_tensor");
+  auto input_tensor = CreateFloatTensor(input_data, input_dims);
   float output_data[4];
   int output_dims_data[6] = {2, 2, 1, 2, 2, 1};
   TfLiteIntArray* output_dims = IntArrayFromInts(output_dims_data);
-  auto output_tensor =
-      CreateFloatTensor(output_data, output_dims, "input_tensor");
+  auto output_tensor = CreateFloatTensor(output_data, output_dims);
   tflite::testing::TestReshapeImpl<float>(&input_tensor,   // input_tensor
                                           nullptr,         // shape_tensor
                                           &output_tensor,  // output_tensor
@@ -258,15 +257,14 @@ TF_LITE_MICRO_TEST(LegacyScalarOutput) {
   using tflite::testing::IntArrayFromInts;
   TfLiteIntArray* input_dims = IntArrayFromInitializer({1, 1});
   auto input_data = {3.0f};
-  auto input_tensor = CreateFloatTensor(input_data, input_dims, "input_tensor");
+  auto input_tensor = CreateFloatTensor(input_data, input_dims);
   float output_data[1];
   int output_dims_data[2] = {1, 0};
   TfLiteIntArray* output_dims = IntArrayFromInts(output_dims_data);
-  auto output_tensor =
-      CreateFloatTensor(output_data, output_dims, "input_tensor");
+  auto output_tensor = CreateFloatTensor(output_data, output_dims);
   TfLiteIntArray* shape_dims = tflite::testing::IntArrayFromInitializer({1, 0});
-  auto shape_tensor = tflite::testing::CreateTensor<int32_t, kTfLiteInt32>(
-      {0}, shape_dims, "shape_tensor");
+  auto shape_tensor =
+      tflite::testing::CreateTensor<int32_t, kTfLiteInt32>({0}, shape_dims);
   tflite::testing::TestReshapeImpl<float>(&input_tensor,   // input_tensor
                                           &shape_tensor,   // shape_tensor
                                           &output_tensor,  // output_tensor
diff --git a/tensorflow/lite/micro/kernels/resize_nearest_neighbor_test.cc b/tensorflow/lite/micro/kernels/resize_nearest_neighbor_test.cc
index fda4a1f5173..e8dad09a635 100644
--- a/tensorflow/lite/micro/kernels/resize_nearest_neighbor_test.cc
+++ b/tensorflow/lite/micro/kernels/resize_nearest_neighbor_test.cc
@@ -26,22 +26,18 @@ namespace {
 using uint8 = std::uint8_t;
 using int32 = std::int32_t;
 
-TfLiteTensor TestCreateTensor(const float* data, TfLiteIntArray* dims,
-                              const char* name) {
-  return CreateFloatTensor(data, dims, name);
+TfLiteTensor TestCreateTensor(const float* data, TfLiteIntArray* dims) {
+  return CreateFloatTensor(data, dims);
 }
 
-TfLiteTensor TestCreateTensor(const uint8* data, TfLiteIntArray* dims,
-                              const char* name) {
-  return CreateQuantizedTensor(data, dims, name, 0, 255);
+TfLiteTensor TestCreateTensor(const uint8* data, TfLiteIntArray* dims) {
+  return CreateQuantizedTensor(data, dims, 0, 255);
 }
 
-TfLiteTensor TestCreateTensor(const int8* data, TfLiteIntArray* dims,
-                              const char* name) {
-  return CreateQuantizedTensor(data, dims, name, -128, 127);
+TfLiteTensor TestCreateTensor(const int8* data, TfLiteIntArray* dims) {
+  return CreateQuantizedTensor(data, dims, -128, 127);
 }
 
-
 // Input data expects a 4-D tensor of [batch, height, width, channels]
 // Output data should match input datas batch and channels
 // Expected sizes should be a 1-D tensor with 2 elements: new_height & new_width
@@ -62,9 +58,9 @@ void TestResizeNearestNeighbor(const int* input_dims_data, const T* input_data,
 
   constexpr int tensors_size = 3;
   TfLiteTensor tensors[tensors_size] = {
-      TestCreateTensor(input_data, input_dims, "input_tensor"),
-      CreateInt32Tensor(expected_size_data, expected_size_dims, "size_tensor"),
-      TestCreateTensor(output_data, output_dims, "output_tensor"),
+      TestCreateTensor(input_data, input_dims),
+      CreateInt32Tensor(expected_size_data, expected_size_dims),
+      TestCreateTensor(output_data, output_dims),
   };
 
   TfLiteContext context;
diff --git a/tensorflow/lite/micro/kernels/round_test.cc b/tensorflow/lite/micro/kernels/round_test.cc
index c1421e099ea..e239faf4c00 100644
--- a/tensorflow/lite/micro/kernels/round_test.cc
+++ b/tensorflow/lite/micro/kernels/round_test.cc
@@ -32,8 +32,8 @@ void TestRound(const int* input_dims_data, const float* input_data,
   constexpr int outputs_size = 1;
   constexpr int tensors_size = inputs_size + outputs_size;
   TfLiteTensor tensors[tensors_size] = {
-      CreateFloatTensor(input_data, input_dims, "input_tensor"),
-      CreateFloatTensor(output_data, output_dims, "output_tensor"),
+      CreateFloatTensor(input_data, input_dims),
+      CreateFloatTensor(output_data, output_dims),
   };
   TfLiteContext context;
   PopulateContext(tensors, tensors_size, micro_test::reporter, &context);
diff --git a/tensorflow/lite/micro/kernels/softmax_test.cc b/tensorflow/lite/micro/kernels/softmax_test.cc
index b8d89673837..5ed994fe9f0 100644
--- a/tensorflow/lite/micro/kernels/softmax_test.cc
+++ b/tensorflow/lite/micro/kernels/softmax_test.cc
@@ -36,8 +36,8 @@ void TestSoftmaxFloat(std::initializer_list<int> input_dims_data,
   constexpr int outputs_size = 1;
   constexpr int tensors_size = inputs_size + outputs_size;
   TfLiteTensor tensors[tensors_size] = {
-      CreateFloatTensor(input_data, input_dims, "input_tensor"),
-      CreateFloatTensor(output_data, output_dims, "output_tensor"),
+      CreateFloatTensor(input_data, input_dims),
+      CreateFloatTensor(output_data, output_dims),
   };
 
   TfLiteContext context;
@@ -100,10 +100,8 @@ void TestSoftmaxQuantized(std::initializer_list<int> input_dims_data,
   constexpr int outputs_size = 1;
   constexpr int tensors_size = inputs_size + outputs_size;
   TfLiteTensor tensors[tensors_size] = {
-      CreateQuantizedTensor(input_data, input_dims, "input_tensor", input_min,
-                            input_max),
-      CreateQuantizedTensor(output_data, output_dims, "output_tensor",
-                            output_min, output_max),
+      CreateQuantizedTensor(input_data, input_dims, input_min, input_max),
+      CreateQuantizedTensor(output_data, output_dims, output_min, output_max),
   };
 
   TfLiteContext context;
@@ -166,10 +164,8 @@ void TestSoftmaxQuantizedSigned(
   constexpr int outputs_size = 1;
   constexpr int tensors_size = inputs_size + outputs_size;
   TfLiteTensor tensors[tensors_size] = {
-      CreateQuantizedTensor(input_data, input_dims, "input_tensor", input_min,
-                            input_max),
-      CreateQuantizedTensor(output_data, output_dims, "output_tensor",
-                            output_min, output_max),
+      CreateQuantizedTensor(input_data, input_dims, input_min, input_max),
+      CreateQuantizedTensor(output_data, output_dims, output_min, output_max),
   };
 
   TfLiteContext context;
diff --git a/tensorflow/lite/micro/kernels/split_test.cc b/tensorflow/lite/micro/kernels/split_test.cc
index fb46dfbfb49..b4423c01dca 100644
--- a/tensorflow/lite/micro/kernels/split_test.cc
+++ b/tensorflow/lite/micro/kernels/split_test.cc
@@ -45,10 +45,10 @@ void TestSplitTwoOutputsFloat(
   constexpr int axis_size = 1;
   constexpr int tensors_size = input_size + output_size + axis_size;
   TfLiteTensor tensors[tensors_size] = {
-      CreateQuantized32Tensor(axis_data, axis_dims, "axis_tensor", 1.0),
-      CreateFloatTensor(input_data, input_dims, "input_tensor"),
-      CreateFloatTensor(output1_data, output1_dims, "output1_tensor"),
-      CreateFloatTensor(output2_data, output2_dims, "output2_tensor")};
+      CreateQuantized32Tensor(axis_data, axis_dims, 1.0),
+      CreateFloatTensor(input_data, input_dims),
+      CreateFloatTensor(output1_data, output1_dims),
+      CreateFloatTensor(output2_data, output2_dims)};
 
   // Currently only support constant axis tensor.
   tensors[0].allocation_type = kTfLiteMmapRo;
@@ -141,12 +141,12 @@ void TestSplitFourOutputsFloat(
   constexpr int axis_size = 1;
   constexpr int tensors_size = input_size + output_size + axis_size;
   TfLiteTensor tensors[tensors_size] = {
-      CreateQuantized32Tensor(axis_data, axis_dims, "axis_tensor", 1.0),
-      CreateFloatTensor(input_data, input_dims, "input_tensor"),
-      CreateFloatTensor(output1_data, output1_dims, "output1_tensor"),
-      CreateFloatTensor(output2_data, output2_dims, "output2_tensor"),
-      CreateFloatTensor(output3_data, output1_dims, "output3_tensor"),
-      CreateFloatTensor(output4_data, output1_dims, "output4_tensor")};
+      CreateQuantized32Tensor(axis_data, axis_dims, 1.0),
+      CreateFloatTensor(input_data, input_dims),
+      CreateFloatTensor(output1_data, output1_dims),
+      CreateFloatTensor(output2_data, output2_dims),
+      CreateFloatTensor(output3_data, output1_dims),
+      CreateFloatTensor(output4_data, output1_dims)};
 
   // Currently only support constant axis tensor.
   tensors[0].allocation_type = kTfLiteMmapRo;
@@ -243,12 +243,10 @@ void TestSplitTwoOutputsQuantized(
   constexpr int axis_size = 1;
   constexpr int tensors_size = input_size + output_size + axis_size;
   TfLiteTensor tensors[tensors_size] = {
-      CreateQuantized32Tensor(axis_data, axis_dims, "axis_tensor", 1.0),
-      CreateQuantizedTensor(input_data, input_dims, "input_tensor", 0, 10),
-      CreateQuantizedTensor(output1_data, output1_dims, "output1_tensor", 0,
-                            10),
-      CreateQuantizedTensor(output2_data, output2_dims, "output2_tensor", 0,
-                            10)};
+      CreateQuantized32Tensor(axis_data, axis_dims, 1.0),
+      CreateQuantizedTensor(input_data, input_dims, 0, 10),
+      CreateQuantizedTensor(output1_data, output1_dims, 0, 10),
+      CreateQuantizedTensor(output2_data, output2_dims, 0, 10)};
 
   // Currently only support constant axis tensor.
   tensors[0].allocation_type = kTfLiteMmapRo;
@@ -332,12 +330,10 @@ void TestSplitTwoOutputsQuantized32(
   constexpr int axis_size = 1;
   constexpr int tensors_size = input_size + output_size + axis_size;
   TfLiteTensor tensors[tensors_size] = {
-      CreateQuantized32Tensor(axis_data, axis_dims, "axis_tensor", 1.0),
-      CreateQuantized32Tensor(input_data, input_dims, "input_tensor", 1.0),
-      CreateQuantized32Tensor(output1_data, output1_dims, "output1_tensor",
-                              1.0),
-      CreateQuantized32Tensor(output2_data, output2_dims, "output2_tensor",
-                              1.0)};
+      CreateQuantized32Tensor(axis_data, axis_dims, 1.0),
+      CreateQuantized32Tensor(input_data, input_dims, 1.0),
+      CreateQuantized32Tensor(output1_data, output1_dims, 1.0),
+      CreateQuantized32Tensor(output2_data, output2_dims, 1.0)};
 
   // Currently only support constant axis tensor.
   tensors[0].allocation_type = kTfLiteMmapRo;
diff --git a/tensorflow/lite/micro/kernels/strided_slice_test.cc b/tensorflow/lite/micro/kernels/strided_slice_test.cc
index 308c20ec0d8..0ff01f7a71b 100644
--- a/tensorflow/lite/micro/kernels/strided_slice_test.cc
+++ b/tensorflow/lite/micro/kernels/strided_slice_test.cc
@@ -25,15 +25,13 @@ namespace {
 template <typename input_type = int32_t,
           TfLiteType tensor_input_type = kTfLiteInt32>
 inline TfLiteTensor CreateTensor(const input_type* data, TfLiteIntArray* dims,
-                                 const char* name, bool is_variable = false) {
+                                 bool is_variable = false) {
   TfLiteTensor result;
   result.type = tensor_input_type;
   result.data.raw = reinterpret_cast<char*>(const_cast<input_type*>(data));
   result.dims = dims;
   result.allocation_type = kTfLiteMemNone;
   result.bytes = ElementCount(*dims) * sizeof(input_type);
-  result.allocation = nullptr;
-  result.name = name;
   result.is_variable = is_variable;
   return result;
 }
@@ -41,9 +39,9 @@ inline TfLiteTensor CreateTensor(const input_type* data, TfLiteIntArray* dims,
 template <typename input_type = int32_t,
           TfLiteType tensor_input_type = kTfLiteInt32>
 inline TfLiteTensor CreateTensor(std::initializer_list<input_type> data,
-                                 TfLiteIntArray* dims, const char* name,
+                                 TfLiteIntArray* dims,
                                  bool is_variable = false) {
-  return CreateTensor<input_type, tensor_input_type>(data.begin(), dims, name,
+  return CreateTensor<input_type, tensor_input_type>(data.begin(), dims,
                                                      is_variable);
 }
 
@@ -73,15 +71,11 @@ void TestStrideSlide(std::initializer_list<int> input_shape,
   constexpr int outputs_size = 1;
   constexpr int tensors_size = inputs_size + outputs_size;
   TfLiteTensor tensors[tensors_size] = {
-      CreateTensor<input_type, tensor_input_type>(input_data, input_dims,
-                                                  "input_tensor"),
-      CreateTensor<int32_t, kTfLiteInt32>(begin_data, begin_dims,
-                                          "begin_tensor"),
-      CreateTensor<int32_t, kTfLiteInt32>(end_data, end_dims, "end_tensor"),
-      CreateTensor<int32_t, kTfLiteInt32>(strides_data, strides_dims,
-                                          "stride_tensor"),
-      CreateTensor<input_type, tensor_input_type>(output_data, output_dims,
-                                                  "output_tensor"),
+      CreateTensor<input_type, tensor_input_type>(input_data, input_dims),
+      CreateTensor<int32_t, kTfLiteInt32>(begin_data, begin_dims),
+      CreateTensor<int32_t, kTfLiteInt32>(end_data, end_dims),
+      CreateTensor<int32_t, kTfLiteInt32>(strides_data, strides_dims),
+      CreateTensor<input_type, tensor_input_type>(output_data, output_dims),
   };
   TfLiteContext context;
   PopulateContext(tensors, tensors_size, micro_test::reporter, &context);
diff --git a/tensorflow/lite/micro/kernels/sub_test.cc b/tensorflow/lite/micro/kernels/sub_test.cc
index fd298e55276..d6ab48ead36 100644
--- a/tensorflow/lite/micro/kernels/sub_test.cc
+++ b/tensorflow/lite/micro/kernels/sub_test.cc
@@ -129,9 +129,9 @@ void TestSubFloat(const int* input1_dims_data, const float* input1_data,
   constexpr int outputs_size = 1;
   constexpr int tensors_size = inputs_size + outputs_size;
   TfLiteTensor tensors[tensors_size] = {
-      CreateFloatTensor(input1_data, input1_dims, "input1_tensor"),
-      CreateFloatTensor(input2_data, input2_dims, "input2_tensor"),
-      CreateFloatTensor(output_data, output_dims, "output_tensor"),
+      CreateFloatTensor(input1_data, input1_dims),
+      CreateFloatTensor(input2_data, input2_dims),
+      CreateFloatTensor(output_data, output_dims),
   };
 
   ValidateSubGoldens(tensors, tensors_size, expected_output, output_data,
@@ -156,15 +156,14 @@ void TestSubQuantized(const int* input1_dims_data, const float* input1_data,
   constexpr int outputs_size = 1;
   constexpr int tensors_size = inputs_size + outputs_size;
   TfLiteTensor tensors[tensors_size] = {
-      tflite::testing::CreateQuantizedTensor(
-          input1_data, input1_quantized, input1_dims, input1_scale,
-          input1_zero_point, "input1_tensor"),
-      tflite::testing::CreateQuantizedTensor(
-          input2_data, input2_quantized, input2_dims, input2_scale,
-          input2_zero_point, "input2_tensor"),
+      tflite::testing::CreateQuantizedTensor(input1_data, input1_quantized,
+                                             input1_dims, input1_scale,
+                                             input1_zero_point),
+      tflite::testing::CreateQuantizedTensor(input2_data, input2_quantized,
+                                             input2_dims, input2_scale,
+                                             input2_zero_point),
       tflite::testing::CreateQuantizedTensor(output_data, output_dims,
-                                             output_scale, output_zero_point,
-                                             "output_tensor"),
+                                             output_scale, output_zero_point),
   };
   tflite::AsymmetricQuantize(golden, golden_quantized,
                              ElementCount(*output_dims), output_scale,
diff --git a/tensorflow/lite/micro/kernels/svdf_test.cc b/tensorflow/lite/micro/kernels/svdf_test.cc
index 31c610a72c7..560f6986a51 100644
--- a/tensorflow/lite/micro/kernels/svdf_test.cc
+++ b/tensorflow/lite/micro/kernels/svdf_test.cc
@@ -341,13 +341,12 @@ void TestSVDF(const int batch_size, const int num_units, const int input_size,
 
   const int tensor_count = 5;  // 4 inputs, 1 output
   TfLiteTensor tensors[] = {
-      CreateFloatTensor(input_data, input_dims, "input"),
-      CreateFloatTensor(weights_feature_data, weights_feature_dims,
-                        "weights_feature"),
-      CreateFloatTensor(weights_time_data, weights_time_dims, "weights_time"),
+      CreateFloatTensor(input_data, input_dims),
+      CreateFloatTensor(weights_feature_data, weights_feature_dims),
+      CreateFloatTensor(weights_time_data, weights_time_dims),
       CreateFloatTensor(activation_state_data, activation_state_dims,
-                        "activation_state", true /* is_variable */),
-      CreateFloatTensor(output_data, output_dims, "output"),
+                        /*is_variable=*/true),
+      CreateFloatTensor(output_data, output_dims),
   };
 
   ValidateSVDFGoldens(batch_size, num_units, input_size, rank, tensors,
@@ -393,19 +392,17 @@ inline void TestIntegerSVDF(
 
   TfLiteTensor tensors[] = {
       CreateQuantizedTensor(input_data, input_dims, input_scale,
-                            0 /* zero-point */, "input"),
+                            /*zero_point=*/0),
       CreateQuantizedTensor(weights_feature_data, weights_feature_dims,
-                            weights_feature_scale, 0 /* zero-point */,
-                            "weights_feature"),
+                            weights_feature_scale, /*zero_point=*/0),
       CreateQuantizedTensor(weights_time_data, weights_time_dims,
-                            weights_time_scale, 0 /* zero-point */,
-                            "weights_time"),
-      CreateQuantized32Tensor(bias_data, bias_dims, "bias", bias_scale),
+                            weights_time_scale, /*zero_point=*/0),
+      CreateQuantized32Tensor(bias_data, bias_dims, bias_scale),
       CreateQuantizedTensor(activation_state_data, activation_state_dims,
-                            activation_scale, 0 /* zero-point */,
-                            "activation_state", true /* is_variable */),
+                            activation_scale, /*zero_point=*/0,
+                            /*is_variable=*/true),
       CreateQuantizedTensor(output_data, output_dims, output_scale,
-                            0 /* zero-point */, "output")};
+                            /*zero_point=*/0)};
 
   // TODO(b/147839421): Affine Quantization Params should be set on tensor
   // creation.
diff --git a/tensorflow/lite/micro/kernels/tanh_test.cc b/tensorflow/lite/micro/kernels/tanh_test.cc
index cfdef61b271..cd61ef32157 100644
--- a/tensorflow/lite/micro/kernels/tanh_test.cc
+++ b/tensorflow/lite/micro/kernels/tanh_test.cc
@@ -36,8 +36,8 @@ void TestTanhFloat(std::initializer_list<int> input_dims_data,
   constexpr int outputs_size = 1;
   constexpr int tensors_size = inputs_size + outputs_size;
   TfLiteTensor tensors[tensors_size] = {
-      CreateFloatTensor(input_data, input_dims, "input_tensor"),
-      CreateFloatTensor(output_data, output_dims, "output_tensor"),
+      CreateFloatTensor(input_data, input_dims),
+      CreateFloatTensor(output_data, output_dims),
   };
 
   TfLiteContext context;
@@ -96,10 +96,8 @@ void TestTanhInt8(std::initializer_list<int> input_dims_data,
   constexpr int outputs_size = 1;
   constexpr int tensors_size = inputs_size + outputs_size;
   TfLiteTensor tensors[tensors_size] = {
-      CreateQuantizedTensor(input_data, input_dims, "input_tensor", input_min,
-                            input_max),
-      CreateQuantizedTensor(output_data, output_dims, "output_tensor",
-                            output_min, output_max),
+      CreateQuantizedTensor(input_data, input_dims, input_min, input_max),
+      CreateQuantizedTensor(output_data, output_dims, output_min, output_max),
   };
 
   TfLiteContext context;
diff --git a/tensorflow/lite/micro/kernels/unpack_test.cc b/tensorflow/lite/micro/kernels/unpack_test.cc
index fc524e90cd8..3fd6b1bf242 100644
--- a/tensorflow/lite/micro/kernels/unpack_test.cc
+++ b/tensorflow/lite/micro/kernels/unpack_test.cc
@@ -45,10 +45,10 @@ void TestUnpackThreeOutputsFloat(
   constexpr int output_size = 3;
   constexpr int tensors_size = input_size + output_size;
   TfLiteTensor tensors[tensors_size] = {
-      CreateFloatTensor(input_data, input_dims, "input_tensor"),
-      CreateFloatTensor(output1_data, output1_dims, "output1_tensor"),
-      CreateFloatTensor(output2_data, output2_dims, "output2_tensor"),
-      CreateFloatTensor(output3_data, output3_dims, "output3_tensor")};
+      CreateFloatTensor(input_data, input_dims),
+      CreateFloatTensor(output1_data, output1_dims),
+      CreateFloatTensor(output2_data, output2_dims),
+      CreateFloatTensor(output3_data, output3_dims)};
 
   // Place a unique value in the uninitialized output buffer.
   for (int i = 0; i < output1_dims_count; ++i) {
@@ -132,8 +132,8 @@ void TestUnpackOneOutputFloat(std::initializer_list<int> input_dims_data,
   constexpr int output_size = 1;
   constexpr int tensors_size = input_size + output_size;
   TfLiteTensor tensors[tensors_size] = {
-      CreateFloatTensor(input_data, input_dims, "input_tensor"),
-      CreateFloatTensor(output_data, output_dims, "output_tensor")};
+      CreateFloatTensor(input_data, input_dims),
+      CreateFloatTensor(output_data, output_dims)};
 
   // Place a unique value in the uninitialized output buffer.
   for (int i = 0; i < output_dims_count; ++i) {
@@ -211,13 +211,10 @@ void TestUnpackThreeOutputsQuantized(
       // CreateQuantizedTensor needs min/max values as input, but these values
       // don't matter as to the functionality of UNPACK, so just set as 0
       // and 10.
-      CreateQuantizedTensor(input_data, input_dims, "input_tensor", 0, 10),
-      CreateQuantizedTensor(output1_data, output1_dims, "output1_tensor", 0,
-                            10),
-      CreateQuantizedTensor(output2_data, output2_dims, "output2_tensor", 0,
-                            10),
-      CreateQuantizedTensor(output3_data, output3_dims, "output3_tensor", 0,
-                            10)};
+      CreateQuantizedTensor(input_data, input_dims, 0, 10),
+      CreateQuantizedTensor(output1_data, output1_dims, 0, 10),
+      CreateQuantizedTensor(output2_data, output2_dims, 0, 10),
+      CreateQuantizedTensor(output3_data, output3_dims, 0, 10)};
 
   // Place a unique value in the uninitialized output buffer.
   for (int i = 0; i < output1_dims_count; ++i) {
@@ -307,13 +304,10 @@ void TestUnpackThreeOutputsQuantized32(
   constexpr int output_size = 3;
   constexpr int tensors_size = input_size + output_size;
   TfLiteTensor tensors[tensors_size] = {
-      CreateQuantized32Tensor(input_data, input_dims, "input_tensor", 1.0),
-      CreateQuantized32Tensor(output1_data, output1_dims, "output1_tensor",
-                              1.0),
-      CreateQuantized32Tensor(output2_data, output2_dims, "output2_tensor",
-                              1.0),
-      CreateQuantized32Tensor(output3_data, output3_dims, "output3_tensor",
-                              1.0)};
+      CreateQuantized32Tensor(input_data, input_dims, 1.0),
+      CreateQuantized32Tensor(output1_data, output1_dims, 1.0),
+      CreateQuantized32Tensor(output2_data, output2_dims, 1.0),
+      CreateQuantized32Tensor(output3_data, output3_dims, 1.0)};
 
   // Place a unique value in the uninitialized output buffer.
   for (int i = 0; i < output1_dims_count; ++i) {
diff --git a/tensorflow/lite/micro/micro_allocator.cc b/tensorflow/lite/micro/micro_allocator.cc
index be0cd9c436b..b5127d51f72 100644
--- a/tensorflow/lite/micro/micro_allocator.cc
+++ b/tensorflow/lite/micro/micro_allocator.cc
@@ -426,9 +426,6 @@ TfLiteStatus InitializeTfLiteTensorFromFlatbuffer(
 
     result->quantization = {kTfLiteAffineQuantization, quantization};
   }
-  if (flatbuffer_tensor.name() != nullptr) {
-    result->name = flatbuffer_tensor.name()->c_str();
-  }
   return kTfLiteOk;
 }
 
diff --git a/tensorflow/lite/micro/micro_optional_debug_tools.cc b/tensorflow/lite/micro/micro_optional_debug_tools.cc
index daa5d007cdf..96ea8f1a287 100644
--- a/tensorflow/lite/micro/micro_optional_debug_tools.cc
+++ b/tensorflow/lite/micro/micro_optional_debug_tools.cc
@@ -124,10 +124,9 @@ void PrintInterpreterState(MicroInterpreter* interpreter) {
   for (size_t tensor_index = 0; tensor_index < interpreter->tensors_size();
        tensor_index++) {
     TfLiteTensor* tensor = interpreter->tensor(static_cast<int>(tensor_index));
-    printf("Tensor %3zu %-20s %10s %15s %10zu bytes (%4.1f MB) ", tensor_index,
-           tensor->name, TensorTypeName(tensor->type),
-           AllocTypeName(tensor->allocation_type), tensor->bytes,
-           static_cast<double>(tensor->bytes / (1 << 20)));
+    printf("Tensor %3zu %10s %15s %10zu bytes (%4.1f MB) ", tensor_index,
+           TensorTypeName(tensor->type), AllocTypeName(tensor->allocation_type),
+           tensor->bytes, static_cast<double>(tensor->bytes / (1 << 20)));
     PrintTfLiteIntVector(tensor->dims);
   }
   printf("\n");
diff --git a/tensorflow/lite/micro/test_helpers.cc b/tensorflow/lite/micro/test_helpers.cc
index 7d1b4d895d9..832033c6828 100644
--- a/tensorflow/lite/micro/test_helpers.cc
+++ b/tensorflow/lite/micro/test_helpers.cc
@@ -758,22 +758,19 @@ TfLiteFloatArray* FloatArrayFromFloats(const float* floats) {
   return reinterpret_cast<TfLiteFloatArray*>(const_cast<float*>(floats));
 }
 
-TfLiteTensor CreateTensor(TfLiteIntArray* dims, const char* name,
-                          bool is_variable) {
+TfLiteTensor CreateTensor(TfLiteIntArray* dims, bool is_variable) {
   TfLiteTensor result;
   result.dims = dims;
-  result.name = name;
   result.params = {};
   result.quantization = {kTfLiteNoQuantization, nullptr};
   result.is_variable = is_variable;
   result.allocation_type = kTfLiteMemNone;
-  result.allocation = nullptr;
   return result;
 }
 
 TfLiteTensor CreateFloatTensor(const float* data, TfLiteIntArray* dims,
-                               const char* name, bool is_variable) {
-  TfLiteTensor result = CreateTensor(dims, name, is_variable);
+                               bool is_variable) {
+  TfLiteTensor result = CreateTensor(dims, is_variable);
   result.type = kTfLiteFloat32;
   result.data.f = const_cast<float*>(data);
   result.bytes = ElementCount(*dims) * sizeof(float);
@@ -789,8 +786,8 @@ void PopulateFloatTensor(TfLiteTensor* tensor, float* begin, float* end) {
 }
 
 TfLiteTensor CreateBoolTensor(const bool* data, TfLiteIntArray* dims,
-                              const char* name, bool is_variable) {
-  TfLiteTensor result = CreateTensor(dims, name, is_variable);
+                              bool is_variable) {
+  TfLiteTensor result = CreateTensor(dims, is_variable);
   result.type = kTfLiteBool;
   result.data.b = const_cast<bool*>(data);
   result.bytes = ElementCount(*dims) * sizeof(bool);
@@ -798,8 +795,8 @@ TfLiteTensor CreateBoolTensor(const bool* data, TfLiteIntArray* dims,
 }
 
 TfLiteTensor CreateInt32Tensor(const int32_t* data, TfLiteIntArray* dims,
-                               const char* name, bool is_variable) {
-  TfLiteTensor result = CreateTensor(dims, name, is_variable);
+                               bool is_variable) {
+  TfLiteTensor result = CreateTensor(dims, is_variable);
   result.type = kTfLiteInt32;
   result.data.i32 = const_cast<int32_t*>(data);
   result.bytes = ElementCount(*dims) * sizeof(int32_t);
@@ -808,8 +805,8 @@ TfLiteTensor CreateInt32Tensor(const int32_t* data, TfLiteIntArray* dims,
 
 TfLiteTensor CreateQuantizedTensor(const uint8_t* data, TfLiteIntArray* dims,
                                    float scale, int zero_point,
-                                   const char* name, bool is_variable) {
-  TfLiteTensor result = CreateTensor(dims, name, is_variable);
+                                   bool is_variable) {
+  TfLiteTensor result = CreateTensor(dims, is_variable);
   result.type = kTfLiteUInt8;
   result.data.uint8 = const_cast<uint8_t*>(data);
   result.params = {scale, zero_point};
@@ -820,8 +817,8 @@ TfLiteTensor CreateQuantizedTensor(const uint8_t* data, TfLiteIntArray* dims,
 
 TfLiteTensor CreateQuantizedTensor(const int8_t* data, TfLiteIntArray* dims,
                                    float scale, int zero_point,
-                                   const char* name, bool is_variable) {
-  TfLiteTensor result = CreateTensor(dims, name, is_variable);
+                                   bool is_variable) {
+  TfLiteTensor result = CreateTensor(dims, is_variable);
   result.type = kTfLiteInt8;
   result.data.int8 = const_cast<int8_t*>(data);
   result.params = {scale, zero_point};
@@ -832,8 +829,8 @@ TfLiteTensor CreateQuantizedTensor(const int8_t* data, TfLiteIntArray* dims,
 
 TfLiteTensor CreateQuantizedTensor(const int16_t* data, TfLiteIntArray* dims,
                                    float scale, int zero_point,
-                                   const char* name, bool is_variable) {
-  TfLiteTensor result = CreateTensor(dims, name, is_variable);
+                                   bool is_variable) {
+  TfLiteTensor result = CreateTensor(dims, is_variable);
   result.type = kTfLiteInt16;
   result.data.i16 = const_cast<int16_t*>(data);
   result.params = {scale, zero_point};
@@ -842,38 +839,30 @@ TfLiteTensor CreateQuantizedTensor(const int16_t* data, TfLiteIntArray* dims,
   return result;
 }
 
-TfLiteTensor CreateQuantized32Tensor(const int32_t* data, TfLiteIntArray* dims,
-                                     float scale, const char* name,
-                                     bool is_variable) {
-  TfLiteTensor result = CreateTensor(dims, name, is_variable);
+TfLiteTensor CreateQuantizedBiasTensor(const float* data, int32_t* quantized,
+                                       TfLiteIntArray* dims, float input_scale,
+                                       float weights_scale, bool is_variable) {
+  float bias_scale = input_scale * weights_scale;
+  tflite::SymmetricQuantize(data, quantized, ElementCount(*dims), bias_scale);
+  TfLiteTensor result = CreateTensor(dims, is_variable);
   result.type = kTfLiteInt32;
-  result.data.i32 = const_cast<int32_t*>(data);
+  result.data.i32 = const_cast<int32_t*>(quantized);
   // Quantized int32 tensors always have a zero point of 0, since the range of
   // int32 values is large, and because zero point costs extra cycles during
   // processing.
-  result.params = {scale, 0};
+  result.params = {bias_scale, 0};
   result.quantization = {kTfLiteAffineQuantization, nullptr};
   result.bytes = ElementCount(*dims) * sizeof(int32_t);
   return result;
 }
 
-TfLiteTensor CreateQuantizedBiasTensor(const float* data, int32_t* quantized,
-                                       TfLiteIntArray* dims, float input_scale,
-                                       float weights_scale, const char* name,
-                                       bool is_variable) {
-  float bias_scale = input_scale * weights_scale;
-  tflite::SymmetricQuantize(data, quantized, ElementCount(*dims), bias_scale);
-  return CreateQuantized32Tensor(quantized, dims, bias_scale, name,
-                                 is_variable);
-}
-
 // Quantizes int32 bias tensor with per-channel weights determined by input
 // scale multiplied by weight scale for each channel.
 TfLiteTensor CreatePerChannelQuantizedBiasTensor(
     const float* input, int32_t* quantized, TfLiteIntArray* dims,
     float input_scale, float* weight_scales, float* scales, int* zero_points,
     TfLiteAffineQuantization* affine_quant, int quantized_dimension,
-    const char* name, bool is_variable) {
+    bool is_variable) {
   int input_size = ElementCount(*dims);
   int num_channels = dims->data[quantized_dimension];
   // First element is reserved for array length
@@ -892,7 +881,7 @@ TfLiteTensor CreatePerChannelQuantizedBiasTensor(
   affine_quant->zero_point = IntArrayFromInts(zero_points);
   affine_quant->quantized_dimension = quantized_dimension;
 
-  TfLiteTensor result = CreateTensor(dims, name, is_variable);
+  TfLiteTensor result = CreateTensor(dims, is_variable);
   result.type = kTfLiteInt32;
   result.data.i32 = const_cast<int32_t*>(quantized);
   result.quantization = {kTfLiteAffineQuantization, affine_quant};
@@ -903,7 +892,7 @@ TfLiteTensor CreatePerChannelQuantizedBiasTensor(
 TfLiteTensor CreateSymmetricPerChannelQuantizedTensor(
     const float* input, int8_t* quantized, TfLiteIntArray* dims, float* scales,
     int* zero_points, TfLiteAffineQuantization* affine_quant,
-    int quantized_dimension, const char* name, bool is_variable) {
+    int quantized_dimension, bool is_variable) {
   int channel_count = dims->data[quantized_dimension];
   scales[0] = static_cast<float>(channel_count);
   zero_points[0] = channel_count;
@@ -919,7 +908,7 @@ TfLiteTensor CreateSymmetricPerChannelQuantizedTensor(
   affine_quant->zero_point = IntArrayFromInts(zero_points);
   affine_quant->quantized_dimension = quantized_dimension;
 
-  TfLiteTensor result = CreateTensor(dims, name, is_variable);
+  TfLiteTensor result = CreateTensor(dims, is_variable);
   result.type = kTfLiteInt8;
   result.data.int8 = const_cast<int8_t*>(quantized);
   result.quantization = {kTfLiteAffineQuantization, affine_quant};
diff --git a/tensorflow/lite/micro/test_helpers.h b/tensorflow/lite/micro/test_helpers.h
index 4353f69fdbd..634b856cd76 100644
--- a/tensorflow/lite/micro/test_helpers.h
+++ b/tensorflow/lite/micro/test_helpers.h
@@ -117,42 +117,40 @@ TfLiteIntArray* IntArrayFromInts(const int* int_array);
 TfLiteFloatArray* FloatArrayFromFloats(const float* floats);
 
 TfLiteTensor CreateFloatTensor(const float* data, TfLiteIntArray* dims,
-                               const char* name, bool is_variable = false);
+                               bool is_variable = false);
 
 void PopulateFloatTensor(TfLiteTensor* tensor, float* begin, float* end);
 
 TfLiteTensor CreateBoolTensor(const bool* data, TfLiteIntArray* dims,
-                              const char* name, bool is_variable = false);
+                              bool is_variable = false);
 
 TfLiteTensor CreateInt32Tensor(const int32_t*, TfLiteIntArray* dims,
-                               const char* name, bool is_variable = false);
+                               bool is_variable = false);
 
 TfLiteTensor CreateQuantizedTensor(const uint8_t* data, TfLiteIntArray* dims,
                                    float scale, int zero_point,
-                                   const char* name, bool is_variable = false);
+                                   bool is_variable = false);
 
 TfLiteTensor CreateQuantizedTensor(const int8_t* data, TfLiteIntArray* dims,
                                    float scale, int zero_point,
-                                   const char* name, bool is_variable = false);
+                                   bool is_variable = false);
 
 TfLiteTensor CreateQuantizedTensor(const int16_t* data, TfLiteIntArray* dims,
                                    float scale, int zero_point,
-                                   const char* name, bool is_variable = false);
+                                   bool is_variable = false);
 
 template <typename T>
 TfLiteTensor CreateQuantizedTensor(const float* input, T* quantized,
                                    TfLiteIntArray* dims, float scale,
-                                   int zero_point, const char* name,
-                                   bool is_variable = false) {
+                                   int zero_point, bool is_variable = false) {
   int input_size = ElementCount(*dims);
   tflite::AsymmetricQuantize(input, quantized, input_size, scale, zero_point);
-  return CreateQuantizedTensor(quantized, dims, scale, zero_point, name,
-                               is_variable);
+  return CreateQuantizedTensor(quantized, dims, scale, zero_point, is_variable);
 }
 
 TfLiteTensor CreateQuantizedBiasTensor(const float* data, int32_t* quantized,
                                        TfLiteIntArray* dims, float input_scale,
-                                       float weights_scale, const char* name,
+                                       float weights_scale,
                                        bool is_variable = false);
 
 // Quantizes int32 bias tensor with per-channel weights determined by input
@@ -161,12 +159,12 @@ TfLiteTensor CreatePerChannelQuantizedBiasTensor(
     const float* input, int32_t* quantized, TfLiteIntArray* dims,
     float input_scale, float* weight_scales, float* scales, int* zero_points,
     TfLiteAffineQuantization* affine_quant, int quantized_dimension,
-    const char* name, bool is_variable = false);
+    bool is_variable = false);
 
 TfLiteTensor CreateSymmetricPerChannelQuantizedTensor(
     const float* input, int8_t* quantized, TfLiteIntArray* dims, float* scales,
     int* zero_points, TfLiteAffineQuantization* affine_quant,
-    int quantized_dimension, const char* name, bool is_variable = false);
+    int quantized_dimension, bool is_variable = false);
 
 }  // namespace testing
 }  // namespace tflite
diff --git a/tensorflow/lite/micro/testing/test_utils.cc b/tensorflow/lite/micro/testing/test_utils.cc
index 7d7a5554b10..4471b2e2929 100644
--- a/tensorflow/lite/micro/testing/test_utils.cc
+++ b/tensorflow/lite/micro/testing/test_utils.cc
@@ -149,20 +149,17 @@ void PopulateContext(TfLiteTensor* tensors, int tensors_size,
 }
 
 TfLiteTensor CreateFloatTensor(std::initializer_list<float> data,
-                               TfLiteIntArray* dims, const char* name,
-                               bool is_variable) {
-  return CreateFloatTensor(data.begin(), dims, name, is_variable);
+                               TfLiteIntArray* dims, bool is_variable) {
+  return CreateFloatTensor(data.begin(), dims, is_variable);
 }
 
 TfLiteTensor CreateBoolTensor(std::initializer_list<bool> data,
-                              TfLiteIntArray* dims, const char* name,
-                              bool is_variable) {
-  return CreateBoolTensor(data.begin(), dims, name, is_variable);
+                              TfLiteIntArray* dims, bool is_variable) {
+  return CreateBoolTensor(data.begin(), dims, is_variable);
 }
 
 TfLiteTensor CreateQuantizedTensor(const uint8_t* data, TfLiteIntArray* dims,
-                                   const char* name, float min, float max,
-                                   bool is_variable) {
+                                   float min, float max, bool is_variable) {
   TfLiteTensor result;
   result.type = kTfLiteUInt8;
   result.data.uint8 = const_cast<uint8_t*>(data);
@@ -171,21 +168,18 @@ TfLiteTensor CreateQuantizedTensor(const uint8_t* data, TfLiteIntArray* dims,
                    ZeroPointFromMinMax<uint8_t>(min, max)};
   result.allocation_type = kTfLiteMemNone;
   result.bytes = ElementCount(*dims) * sizeof(uint8_t);
-  result.allocation = nullptr;
-  result.name = name;
   result.is_variable = false;
   return result;
 }
 
 TfLiteTensor CreateQuantizedTensor(std::initializer_list<uint8_t> data,
-                                   TfLiteIntArray* dims, const char* name,
-                                   float min, float max, bool is_variable) {
-  return CreateQuantizedTensor(data.begin(), dims, name, min, max, is_variable);
+                                   TfLiteIntArray* dims, float min, float max,
+                                   bool is_variable) {
+  return CreateQuantizedTensor(data.begin(), dims, min, max, is_variable);
 }
 
 TfLiteTensor CreateQuantizedTensor(const int8_t* data, TfLiteIntArray* dims,
-                                   const char* name, float min, float max,
-                                   bool is_variable) {
+                                   float min, float max, bool is_variable) {
   TfLiteTensor result;
   result.type = kTfLiteInt8;
   result.data.int8 = const_cast<int8_t*>(data);
@@ -194,21 +188,18 @@ TfLiteTensor CreateQuantizedTensor(const int8_t* data, TfLiteIntArray* dims,
                    ZeroPointFromMinMax<int8_t>(min, max)};
   result.allocation_type = kTfLiteMemNone;
   result.bytes = ElementCount(*dims) * sizeof(int8_t);
-  result.allocation = nullptr;
-  result.name = name;
   result.is_variable = is_variable;
   return result;
 }
 
 TfLiteTensor CreateQuantizedTensor(std::initializer_list<int8_t> data,
-                                   TfLiteIntArray* dims, const char* name,
-                                   float min, float max, bool is_variable) {
-  return CreateQuantizedTensor(data.begin(), dims, name, min, max, is_variable);
+                                   TfLiteIntArray* dims, float min, float max,
+                                   bool is_variable) {
+  return CreateQuantizedTensor(data.begin(), dims, min, max, is_variable);
 }
 
 TfLiteTensor CreateQuantizedTensor(float* data, uint8_t* quantized_data,
-                                   TfLiteIntArray* dims, const char* name,
-                                   bool is_variable) {
+                                   TfLiteIntArray* dims, bool is_variable) {
   TfLiteTensor result;
   SymmetricQuantize(data, dims, quantized_data, &result.params.scale);
   result.data.uint8 = quantized_data;
@@ -217,15 +208,12 @@ TfLiteTensor CreateQuantizedTensor(float* data, uint8_t* quantized_data,
   result.params.zero_point = 128;
   result.allocation_type = kTfLiteMemNone;
   result.bytes = ElementCount(*dims) * sizeof(uint8_t);
-  result.allocation = nullptr;
-  result.name = name;
   result.is_variable = is_variable;
   return result;
 }
 
 TfLiteTensor CreateQuantizedTensor(float* data, int8_t* quantized_data,
-                                   TfLiteIntArray* dims, const char* name,
-                                   bool is_variable) {
+                                   TfLiteIntArray* dims, bool is_variable) {
   TfLiteTensor result;
   SignedSymmetricQuantize(data, dims, quantized_data, &result.params.scale);
   result.data.int8 = quantized_data;
@@ -234,15 +222,12 @@ TfLiteTensor CreateQuantizedTensor(float* data, int8_t* quantized_data,
   result.params.zero_point = 0;
   result.allocation_type = kTfLiteMemNone;
   result.bytes = ElementCount(*dims) * sizeof(int8_t);
-  result.allocation = nullptr;
-  result.name = name;
   result.is_variable = is_variable;
   return result;
 }
 
 TfLiteTensor CreateQuantizedTensor(float* data, int16_t* quantized_data,
-                                   TfLiteIntArray* dims, const char* name,
-                                   bool is_variable) {
+                                   TfLiteIntArray* dims, bool is_variable) {
   TfLiteTensor result;
   SignedSymmetricQuantize(data, dims, quantized_data, &result.params.scale);
   result.data.i16 = quantized_data;
@@ -251,15 +236,12 @@ TfLiteTensor CreateQuantizedTensor(float* data, int16_t* quantized_data,
   result.params.zero_point = 0;
   result.allocation_type = kTfLiteMemNone;
   result.bytes = ElementCount(*dims) * sizeof(int16_t);
-  result.allocation = nullptr;
-  result.name = name;
   result.is_variable = is_variable;
   return result;
 }
 
 TfLiteTensor CreateQuantized32Tensor(const int32_t* data, TfLiteIntArray* dims,
-                                     const char* name, float scale,
-                                     bool is_variable) {
+                                     float scale, bool is_variable) {
   TfLiteTensor result;
   result.type = kTfLiteInt32;
   result.data.i32 = const_cast<int32_t*>(data);
@@ -270,16 +252,14 @@ TfLiteTensor CreateQuantized32Tensor(const int32_t* data, TfLiteIntArray* dims,
   result.params = {scale, 0};
   result.allocation_type = kTfLiteMemNone;
   result.bytes = ElementCount(*dims) * sizeof(int32_t);
-  result.allocation = nullptr;
-  result.name = name;
   result.is_variable = is_variable;
   return result;
 }
 
 TfLiteTensor CreateQuantized32Tensor(std::initializer_list<int32_t> data,
-                                     TfLiteIntArray* dims, const char* name,
-                                     float scale, bool is_variable) {
-  return CreateQuantized32Tensor(data.begin(), dims, name, scale, is_variable);
+                                     TfLiteIntArray* dims, float scale,
+                                     bool is_variable) {
+  return CreateQuantized32Tensor(data.begin(), dims, scale, is_variable);
 }
 
 }  // namespace testing
diff --git a/tensorflow/lite/micro/testing/test_utils.h b/tensorflow/lite/micro/testing/test_utils.h
index b0ebe159b67..0165cbb707a 100644
--- a/tensorflow/lite/micro/testing/test_utils.h
+++ b/tensorflow/lite/micro/testing/test_utils.h
@@ -80,63 +80,56 @@ void PopulateContext(TfLiteTensor* tensors, int tensors_size,
                      ErrorReporter* error_reporter, TfLiteContext* context);
 
 TfLiteTensor CreateFloatTensor(std::initializer_list<float> data,
-                               TfLiteIntArray* dims, const char* name,
-                               bool is_variable = false);
+                               TfLiteIntArray* dims, bool is_variable = false);
 
 TfLiteTensor CreateBoolTensor(std::initializer_list<bool> data,
-                              TfLiteIntArray* dims, const char* name,
-                              bool is_variable = false);
+                              TfLiteIntArray* dims, bool is_variable = false);
 
 TfLiteTensor CreateQuantizedTensor(const uint8_t* data, TfLiteIntArray* dims,
-                                   const char* name, float min, float max,
+                                   float min, float max,
                                    bool is_variable = false);
 
 TfLiteTensor CreateQuantizedTensor(std::initializer_list<uint8_t> data,
-                                   TfLiteIntArray* dims, const char* name,
-                                   float min, float max,
+                                   TfLiteIntArray* dims, float min, float max,
                                    bool is_variable = false);
 
 TfLiteTensor CreateQuantizedTensor(const int8_t* data, TfLiteIntArray* dims,
-                                   const char* name, float min, float max,
-                                   bool is_variable = false);
-
-TfLiteTensor CreateQuantizedTensor(std::initializer_list<int8_t> data,
-                                   TfLiteIntArray* dims, const char* name,
                                    float min, float max,
                                    bool is_variable = false);
 
+TfLiteTensor CreateQuantizedTensor(std::initializer_list<int8_t> data,
+                                   TfLiteIntArray* dims, float min, float max,
+                                   bool is_variable = false);
+
 TfLiteTensor CreateQuantizedTensor(float* data, uint8_t* quantized_data,
-                                   TfLiteIntArray* dims, const char* name,
+                                   TfLiteIntArray* dims,
                                    bool is_variable = false);
 
 TfLiteTensor CreateQuantizedTensor(float* data, int8_t* quantized_data,
-                                   TfLiteIntArray* dims, const char* name,
+                                   TfLiteIntArray* dims,
                                    bool is_variable = false);
 
 TfLiteTensor CreateQuantizedTensor(float* data, int16_t* quantized_data,
-                                   TfLiteIntArray* dims, const char* name,
+                                   TfLiteIntArray* dims,
                                    bool is_variable = false);
 
 TfLiteTensor CreateQuantized32Tensor(const int32_t* data, TfLiteIntArray* dims,
-                                     const char* name, float scale,
-                                     bool is_variable = false);
+                                     float scale, bool is_variable = false);
 
 TfLiteTensor CreateQuantized32Tensor(std::initializer_list<int32_t> data,
-                                     TfLiteIntArray* dims, const char* name,
-                                     float scale, bool is_variable = false);
+                                     TfLiteIntArray* dims, float scale,
+                                     bool is_variable = false);
 
 template <typename input_type = int32_t,
           TfLiteType tensor_input_type = kTfLiteInt32>
 inline TfLiteTensor CreateTensor(const input_type* data, TfLiteIntArray* dims,
-                                 const char* name, bool is_variable = false) {
+                                 bool is_variable = false) {
   TfLiteTensor result;
   result.type = tensor_input_type;
   result.data.raw = reinterpret_cast<char*>(const_cast<input_type*>(data));
   result.dims = dims;
   result.allocation_type = kTfLiteMemNone;
   result.bytes = ElementCount(*dims) * sizeof(input_type);
-  result.allocation = nullptr;
-  result.name = name;
   result.is_variable = is_variable;
   return result;
 }
@@ -144,9 +137,9 @@ inline TfLiteTensor CreateTensor(const input_type* data, TfLiteIntArray* dims,
 template <typename input_type = int32_t,
           TfLiteType tensor_input_type = kTfLiteInt32>
 inline TfLiteTensor CreateTensor(std::initializer_list<input_type> data,
-                                 TfLiteIntArray* dims, const char* name,
+                                 TfLiteIntArray* dims,
                                  bool is_variable = false) {
-  return CreateTensor<input_type, tensor_input_type>(data.begin(), dims, name,
+  return CreateTensor<input_type, tensor_input_type>(data.begin(), dims,
                                                      is_variable);
 }
 
diff --git a/tensorflow/lite/micro/testing_helpers_test.cc b/tensorflow/lite/micro/testing_helpers_test.cc
index 478f5ae6336..710ca2a4a9e 100644
--- a/tensorflow/lite/micro/testing_helpers_test.cc
+++ b/tensorflow/lite/micro/testing_helpers_test.cc
@@ -23,7 +23,6 @@ TF_LITE_MICRO_TEST(CreateQuantizedBiasTensor) {
   float weight_scale = 0.5;
   constexpr int tensor_size = 12;
   int dims_arr[] = {4, 2, 3, 2, 1};
-  const char* tensor_name = "test_tensor";
   int32_t quantized[tensor_size];
   float pre_quantized[] = {-10, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 10};
   int32_t expected_quantized_values[] = {-40, -20, -16, -12, -8, -4,
@@ -31,11 +30,10 @@ TF_LITE_MICRO_TEST(CreateQuantizedBiasTensor) {
   TfLiteIntArray* dims = tflite::testing::IntArrayFromInts(dims_arr);
 
   TfLiteTensor result = tflite::testing::CreateQuantizedBiasTensor(
-      pre_quantized, quantized, dims, input_scale, weight_scale, tensor_name);
+      pre_quantized, quantized, dims, input_scale, weight_scale);
 
   TF_LITE_MICRO_EXPECT_EQ(result.bytes, tensor_size * sizeof(int32_t));
   TF_LITE_MICRO_EXPECT_EQ(result.dims, dims);
-  TF_LITE_MICRO_EXPECT_EQ(result.name, tensor_name);
   TF_LITE_MICRO_EXPECT_EQ(result.params.scale, input_scale * weight_scale);
   for (int i = 0; i < tensor_size; i++) {
     TF_LITE_MICRO_EXPECT_EQ(expected_quantized_values[i], result.data.i32[i]);
@@ -48,7 +46,6 @@ TF_LITE_MICRO_TEST(CreatePerChannelQuantizedBiasTensor) {
   constexpr int tensor_size = 12;
   const int channels = 4;
   int dims_arr[] = {4, 4, 3, 1, 1};
-  const char* tensor_name = "test_tensor";
   int32_t quantized[tensor_size];
   float scales[channels + 1];
   int zero_points[] = {4, 0, 0, 0, 0};
@@ -60,7 +57,7 @@ TF_LITE_MICRO_TEST(CreatePerChannelQuantizedBiasTensor) {
   TfLiteAffineQuantization quant;
   TfLiteTensor result = tflite::testing::CreatePerChannelQuantizedBiasTensor(
       pre_quantized, quantized, dims, input_scale, weight_scales, scales,
-      zero_points, &quant, 0, tensor_name);
+      zero_points, &quant, 0);
 
   // Values in scales array start at index 1 since index 0 is dedicated to
   // tracking the tensor size.
@@ -70,7 +67,6 @@ TF_LITE_MICRO_TEST(CreatePerChannelQuantizedBiasTensor) {
 
   TF_LITE_MICRO_EXPECT_EQ(result.bytes, tensor_size * sizeof(int32_t));
   TF_LITE_MICRO_EXPECT_EQ(result.dims, dims);
-  TF_LITE_MICRO_EXPECT_EQ(result.name, tensor_name);
   for (int i = 0; i < tensor_size; i++) {
     TF_LITE_MICRO_EXPECT_EQ(expected_quantized_values[i], result.data.i32[i]);
   }
@@ -80,7 +76,6 @@ TF_LITE_MICRO_TEST(CreateSymmetricPerChannelQuantizedTensor) {
   const int tensor_size = 12;
   constexpr int channels = 2;
   const int dims_arr[] = {4, channels, 3, 2, 1};
-  const char* tensor_name = "test_tensor";
   int8_t quantized[12];
   const float pre_quantized[] = {-127, -55, -4, -3, -2, -1,
                                  0,    1,   2,  3,  4,  63.5};
@@ -94,12 +89,10 @@ TF_LITE_MICRO_TEST(CreateSymmetricPerChannelQuantizedTensor) {
   TfLiteAffineQuantization quant;
   TfLiteTensor result =
       tflite::testing::CreateSymmetricPerChannelQuantizedTensor(
-          pre_quantized, quantized, dims, scales, zero_points, &quant, 0,
-          "test_tensor");
+          pre_quantized, quantized, dims, scales, zero_points, &quant, 0);
 
   TF_LITE_MICRO_EXPECT_EQ(result.bytes, tensor_size * sizeof(int8_t));
   TF_LITE_MICRO_EXPECT_EQ(result.dims, dims);
-  TF_LITE_MICRO_EXPECT_EQ(result.name, tensor_name);
   TfLiteFloatArray* result_scales =
       static_cast<TfLiteAffineQuantization*>(result.quantization.params)->scale;
   for (int i = 0; i < channels; i++) {