Remove TF Micro tests that use the "name" field in TfLiteTensor.

The TFLM team is preparing to provide an "optimized" memory build option. This build option will eliminate non-needed/essential fields from core TFLite structs. The first big change is to reduce the number of pointers on TfLiteTensor. Many models have multiple tensors (e.g. benchmark keyword has 54) and each pointer adds up for TFLM. This cleanup pass removes the soon to be un-used 'name' field from TfLiteTensor.

PiperOrigin-RevId: 316000388
Change-Id: I230865014d5a59b78c1c1c9f5eda784f6d611e77
This commit is contained in:
Nick Kreeger 2020-06-11 16:24:01 -07:00 committed by TensorFlower Gardener
parent 8b997d655d
commit b6d13bb0a8
46 changed files with 472 additions and 630 deletions

View File

@ -160,8 +160,7 @@ int main() {
// Create per-tensor quantized int8 input tensor.
int8_t input_quantized[32];
TfLiteTensor input_tensor = tflite::testing::CreateQuantizedTensor(
input_values, input_quantized, input_dims, input_scale, input_zero_point,
"input_tensor");
input_values, input_quantized, input_dims, input_scale, input_zero_point);
// Set zero point and scale arrays with a single element for each.
int input_zero_points[] = {1, input_zero_point};
float input_scales[] = {1, input_scale};
@ -174,7 +173,7 @@ int main() {
int8_t filter_quantized[32 * 32];
TfLiteTensor filter_tensor = tflite::testing::CreateQuantizedTensor(
filter_values, filter_quantized, filter_dims, filter_scale,
filter_zero_point, "filter_tensor");
filter_zero_point);
// Set zero point and scale arrays with a single element for each.
int filter_zero_points[] = {1, filter_zero_point};
float filter_scales[] = {1, filter_scale};
@ -187,8 +186,8 @@ int main() {
int32_t bias_quantized[32];
tflite::SymmetricQuantize(bias_values, bias_quantized, 32,
input_scale * output_scale);
TfLiteTensor bias_tensor = tflite::testing::CreateInt32Tensor(
bias_quantized, bias_dims, "bias_tensor");
TfLiteTensor bias_tensor =
tflite::testing::CreateInt32Tensor(bias_quantized, bias_dims);
// There is a single zero point of 0, and a single scale of
// input_scale * filter_scale.
@ -202,8 +201,7 @@ int main() {
// Create per-tensor quantized int8 output tensor.
int8_t output_quantized[32];
TfLiteTensor output_tensor = tflite::testing::CreateQuantizedTensor(
output_quantized, output_dims, output_scale, output_zero_point,
"output_tensor");
output_quantized, output_dims, output_scale, output_zero_point);
// Set zero point and scale arrays with a single element for each.
int output_zero_points[] = {1, output_zero_point};
float output_scales[] = {1, output_scale};

View File

@ -166,8 +166,7 @@ int main() {
// Create per-tensor quantized int8 input tensor.
int8_t input_quantized[input_elements];
TfLiteTensor input_tensor = tflite::testing::CreateQuantizedTensor(
input_values, input_quantized, input_dims, input_scale, input_zero_point,
"input_tensor");
input_values, input_quantized, input_dims, input_scale, input_zero_point);
// Set zero point and scale arrays with a single element for each.
int input_zero_points[] = {1, input_zero_point};
@ -180,8 +179,7 @@ int main() {
// Create per-tensor quantized int8 filter tensor.
int8_t filter_quantized[filter_elements];
TfLiteTensor filter_tensor = tflite::testing::CreateQuantizedTensor(
filter_values, filter_quantized, filter_dims, filter_scale, 0,
"filter_tensor");
filter_values, filter_quantized, filter_dims, filter_scale, 0);
// Set zero point and scale arrays with a single element for each.
int filter_zero_points[] = {1, 0};
@ -197,8 +195,8 @@ int main() {
// detailed explanation of why bias scale is input_scale * filter_scale.
tflite::SymmetricQuantize(bias_values, bias_quantized, bias_elements,
input_scale * output_scale);
TfLiteTensor bias_tensor = tflite::testing::CreateInt32Tensor(
bias_quantized, bias_dims, "bias_tensor");
TfLiteTensor bias_tensor =
tflite::testing::CreateInt32Tensor(bias_quantized, bias_dims);
// Set zero point and scale arrays with a single element for each.
int bias_zero_points[] = {1, 0};
@ -211,8 +209,7 @@ int main() {
// Create per-tensor quantized int8 output tensor.
int8_t output_quantized[output_elements];
TfLiteTensor output_tensor = tflite::testing::CreateQuantizedTensor(
output_quantized, output_dims, output_scale, output_zero_point,
"output_tensor");
output_quantized, output_dims, output_scale, output_zero_point);
// Set zero point and scale arrays with a single element for each.
int output_zero_points[] = {1, output_zero_point};

View File

@ -82,7 +82,7 @@ TF_LITE_MICRO_TEST(RecognizeCommandsTestBasic) {
auto result_dims = {2, 1, 4};
TfLiteTensor results = tflite::testing::CreateQuantizedTensor(
result_data, tflite::testing::IntArrayFromInitializer(result_dims),
"input_tensor", -128.0f, 127.0f);
-128.0f, 127.0f);
const char* found_command;
uint8_t score;
@ -101,8 +101,8 @@ TF_LITE_MICRO_TEST(RecognizeCommandsTestFindCommands) {
std::initializer_list<int8_t> yes_data = {-128, -128, 127, -128};
auto yes_dims = {2, 1, 4};
TfLiteTensor yes_results = tflite::testing::CreateQuantizedTensor(
yes_data, tflite::testing::IntArrayFromInitializer(yes_dims),
"input_tensor", -128.0f, 127.0f);
yes_data, tflite::testing::IntArrayFromInitializer(yes_dims), -128.0f,
127.0f);
bool has_found_new_command = false;
const char* new_command;
@ -129,8 +129,8 @@ TF_LITE_MICRO_TEST(RecognizeCommandsTestFindCommands) {
std::initializer_list<int8_t> no_data = {-128, -128, -128, 127};
auto no_dims = {2, 1, 4};
TfLiteTensor no_results = tflite::testing::CreateQuantizedTensor(
no_data, tflite::testing::IntArrayFromInitializer(no_dims),
"input_tensor", -128.0f, 127.0f);
no_data, tflite::testing::IntArrayFromInitializer(no_dims), -128.0f,
127.0f);
has_found_new_command = false;
new_command = "";
uint8_t score;
@ -164,8 +164,8 @@ TF_LITE_MICRO_TEST(RecognizeCommandsTestBadInputLength) {
std::initializer_list<int8_t> bad_data = {-128, -128, 127};
auto bad_dims = {2, 1, 3};
TfLiteTensor bad_results = tflite::testing::CreateQuantizedTensor(
bad_data, tflite::testing::IntArrayFromInitializer(bad_dims),
"input_tensor", -128.0f, 127.0f);
bad_data, tflite::testing::IntArrayFromInitializer(bad_dims), -128.0f,
127.0f);
const char* found_command;
uint8_t score;
@ -185,7 +185,7 @@ TF_LITE_MICRO_TEST(RecognizeCommandsTestBadInputTimes) {
auto result_dims = {2, 1, 4};
TfLiteTensor results = tflite::testing::CreateQuantizedTensor(
result_data, tflite::testing::IntArrayFromInitializer(result_dims),
"input_tensor", -128.0f, 127.0f);
-128.0f, 127.0f);
const char* found_command;
uint8_t score;
@ -208,7 +208,7 @@ TF_LITE_MICRO_TEST(RecognizeCommandsTestTooFewInputs) {
auto result_dims = {2, 1, 4};
TfLiteTensor results = tflite::testing::CreateQuantizedTensor(
result_data, tflite::testing::IntArrayFromInitializer(result_dims),
"input_tensor", -128.0f, 127.0f);
-128.0f, 127.0f);
const char* found_command;
uint8_t score;

View File

@ -34,8 +34,8 @@ void TestReluFloat(const int* input_dims_data, const float* input_data,
constexpr int outputs_size = 1;
constexpr int tensors_size = inputs_size + outputs_size;
TfLiteTensor tensors[tensors_size] = {
CreateFloatTensor(input_data, input_dims, "input_tensor"),
CreateFloatTensor(output_data, output_dims, "output_tensor"),
CreateFloatTensor(input_data, input_dims),
CreateFloatTensor(output_data, output_dims),
};
TfLiteContext context;
@ -90,8 +90,8 @@ void TestRelu6Float(const int* input_dims_data, const float* input_data,
constexpr int outputs_size = 1;
constexpr int tensors_size = inputs_size + outputs_size;
TfLiteTensor tensors[tensors_size] = {
CreateFloatTensor(input_data, input_dims, "input_tensor"),
CreateFloatTensor(output_data, output_dims, "output_tensor"),
CreateFloatTensor(input_data, input_dims),
CreateFloatTensor(output_data, output_dims),
};
TfLiteContext context;
@ -150,9 +150,9 @@ void TestReluUint8(const int* input_dims_data, const float* input_data,
constexpr int tensors_size = inputs_size + outputs_size;
TfLiteTensor tensors[tensors_size] = {
CreateQuantizedTensor(input_data, input_data_quantized, input_dims,
input_scale, input_zero_point, "input_tensor"),
input_scale, input_zero_point),
CreateQuantizedTensor(output_data, output_dims, output_scale,
output_zero_point, "output_tensor"),
output_zero_point),
};
TfLiteContext context;
@ -215,9 +215,9 @@ void TestRelu6Uint8(const int* input_dims_data, const float* input_data,
constexpr int tensors_size = inputs_size + outputs_size;
TfLiteTensor tensors[tensors_size] = {
CreateQuantizedTensor(input_data, input_data_quantized, input_dims,
input_scale, input_zero_point, "input_tensor"),
input_scale, input_zero_point),
CreateQuantizedTensor(output_data, output_dims, output_scale,
output_zero_point, "output_tensor"),
output_zero_point),
};
TfLiteContext context;
@ -279,9 +279,9 @@ void TestReluInt8(const int* input_dims_data, const float* input_data,
constexpr int tensors_size = inputs_size + outputs_size;
TfLiteTensor tensors[tensors_size] = {
CreateQuantizedTensor(input_data, input_data_quantized, input_dims,
input_scale, input_zero_point, "input_tensor"),
input_scale, input_zero_point),
CreateQuantizedTensor(output_data, output_dims, output_scale,
output_zero_point, "output_tensor"),
output_zero_point),
};
TfLiteContext context;
@ -345,9 +345,9 @@ void TestRelu6Int8(const int* input_dims_data, const float* input_data,
constexpr int tensors_size = inputs_size + outputs_size;
TfLiteTensor tensors[tensors_size] = {
CreateQuantizedTensor(input_data, input_data_quantized, input_dims,
input_scale, input_zero_point, "input_tensor"),
input_scale, input_zero_point),
CreateQuantizedTensor(output_data, output_dims, output_scale,
output_zero_point, "output_tensor"),
output_zero_point),
};
TfLiteContext context;

View File

@ -129,9 +129,9 @@ void TestAddFloat(const int* input1_dims_data, const float* input1_data,
constexpr int outputs_size = 1;
constexpr int tensors_size = inputs_size + outputs_size;
TfLiteTensor tensors[tensors_size] = {
CreateFloatTensor(input1_data, input1_dims, "input1_tensor"),
CreateFloatTensor(input2_data, input2_dims, "input2_tensor"),
CreateFloatTensor(output_data, output_dims, "output_tensor"),
CreateFloatTensor(input1_data, input1_dims),
CreateFloatTensor(input2_data, input2_dims),
CreateFloatTensor(output_data, output_dims),
};
ValidateAddGoldens(tensors, tensors_size, expected_output, output_data,
@ -156,15 +156,14 @@ void TestAddQuantized(const int* input1_dims_data, const float* input1_data,
constexpr int outputs_size = 1;
constexpr int tensors_size = inputs_size + outputs_size;
TfLiteTensor tensors[tensors_size] = {
tflite::testing::CreateQuantizedTensor(
input1_data, input1_quantized, input1_dims, input1_scale,
input1_zero_point, "input1_tensor"),
tflite::testing::CreateQuantizedTensor(
input2_data, input2_quantized, input2_dims, input2_scale,
input2_zero_point, "input2_tensor"),
tflite::testing::CreateQuantizedTensor(input1_data, input1_quantized,
input1_dims, input1_scale,
input1_zero_point),
tflite::testing::CreateQuantizedTensor(input2_data, input2_quantized,
input2_dims, input2_scale,
input2_zero_point),
tflite::testing::CreateQuantizedTensor(output_data, output_dims,
output_scale, output_zero_point,
"output_tensor"),
output_scale, output_zero_point),
};
tflite::AsymmetricQuantize(golden, golden_quantized,
ElementCount(*output_dims), output_scale,

View File

@ -83,9 +83,9 @@ void TestArgMinMaxFloat(const int* input_dims_data, const float* input_values,
constexpr int outputs_size = 1;
constexpr int tensors_size = inputs_size + outputs_size;
TfLiteTensor tensors[tensors_size] = {
CreateFloatTensor(input_values, input_dims, "input_tensor"),
CreateInt32Tensor(axis_values, axis_dims, "axis_tensor"),
CreateInt32Tensor(output, output_dims, "output_tensor"),
CreateFloatTensor(input_values, input_dims),
CreateInt32Tensor(axis_values, axis_dims),
CreateInt32Tensor(output, output_dims),
};
ValidateArgMinMaxGoldens(tensors, tensors_size, goldens, output,
@ -110,9 +110,9 @@ void TestArgMinMaxQuantized(const int* input_dims_data,
constexpr int tensors_size = inputs_size + outputs_size;
TfLiteTensor tensors[tensors_size] = {
CreateQuantizedTensor(input_values, input_quantized, input_dims,
input_scale, input_zero_point, "input_tensor"),
CreateInt32Tensor(axis_values, axis_dims, "axis_tensor"),
CreateInt32Tensor(output, output_dims, "output_tensor"),
input_scale, input_zero_point),
CreateInt32Tensor(axis_values, axis_dims),
CreateInt32Tensor(output, output_dims),
};
ValidateArgMinMaxGoldens(tensors, tensors_size, goldens, output,

View File

@ -32,8 +32,8 @@ void TestCeil(const int* input_dims_data, const float* input_data,
constexpr int outputs_size = 1;
constexpr int tensors_size = inputs_size + outputs_size;
TfLiteTensor tensors[tensors_size] = {
CreateFloatTensor(input_data, input_dims, "input_tensor"),
CreateFloatTensor(output_data, output_dims, "output_tensor"),
CreateFloatTensor(input_data, input_dims),
CreateFloatTensor(output_data, output_dims),
};
TfLiteContext context;
PopulateContext(tensors, tensors_size, micro_test::reporter, &context);

View File

@ -44,8 +44,8 @@ TfLiteNode PrepareCircularBufferInt8(const int* input_dims_data,
constexpr int outputs_size = 1;
constexpr int tensors_size = inputs_size + outputs_size;
TfLiteTensor tensors[tensors_size] = {
CreateQuantizedTensor(input_data, input_dims, 1, 0, "input_tensor"),
CreateQuantizedTensor(output_data, output_dims, 1, 0, "output_tensor"),
CreateQuantizedTensor(input_data, input_dims, 1, 0),
CreateQuantizedTensor(output_data, output_dims, 1, 0),
};
TfLiteContext context;
PopulateContext(tensors, tensors_size, micro_test::reporter, &context);
@ -92,8 +92,8 @@ TfLiteStatus InvokeCircularBufferInt8(const int* input_dims_data,
constexpr int outputs_size = 1;
constexpr int tensors_size = inputs_size + outputs_size;
TfLiteTensor tensors[tensors_size] = {
CreateQuantizedTensor(input_data, input_dims, 1, 0, "input_tensor"),
CreateQuantizedTensor(output_data, output_dims, 1, 0, "output_tensor"),
CreateQuantizedTensor(input_data, input_dims, 1, 0),
CreateQuantizedTensor(output_data, output_dims, 1, 0),
};
TfLiteContext context;
PopulateContext(tensors, tensors_size, micro_test::reporter, &context);

View File

@ -78,9 +78,9 @@ void TestComparisonFloat(tflite::BuiltinOperator op, int* input1_dims_data,
TfLiteIntArray* output_dims = IntArrayFromInts(output_dims_data);
TfLiteTensor tensors[tensors_size] = {
CreateFloatTensor(input1_data, input1_dims, "input1_tensor"),
CreateFloatTensor(input2_data, input2_dims, "input2_tensor"),
CreateBoolTensor(output_data, output_dims, "output_tensor"),
CreateFloatTensor(input1_data, input1_dims),
CreateFloatTensor(input2_data, input2_dims),
CreateBoolTensor(output_data, output_dims),
};
TestComparison(op, tensors, expected_output_data, output_data);
@ -95,9 +95,9 @@ void TestComparisonBool(tflite::BuiltinOperator op, int* input1_dims_data,
TfLiteIntArray* output_dims = IntArrayFromInts(output_dims_data);
TfLiteTensor tensors[tensors_size] = {
CreateBoolTensor(input1_data, input1_dims, "input1_tensor"),
CreateBoolTensor(input2_data, input2_dims, "input2_tensor"),
CreateBoolTensor(output_data, output_dims, "output_tensor"),
CreateBoolTensor(input1_data, input1_dims),
CreateBoolTensor(input2_data, input2_dims),
CreateBoolTensor(output_data, output_dims),
};
TestComparison(op, tensors, expected_output_data, output_data);
@ -112,9 +112,9 @@ void TestComparisonInt(tflite::BuiltinOperator op, int* input1_dims_data,
TfLiteIntArray* output_dims = IntArrayFromInts(output_dims_data);
TfLiteTensor tensors[tensors_size] = {
CreateInt32Tensor(input1_data, input1_dims, "input1_tensor"),
CreateInt32Tensor(input2_data, input2_dims, "input2_tensor"),
CreateBoolTensor(output_data, output_dims, "output_tensor"),
CreateInt32Tensor(input1_data, input1_dims),
CreateInt32Tensor(input2_data, input2_dims),
CreateBoolTensor(output_data, output_dims),
};
TestComparison(op, tensors, expected_output_data, output_data);
@ -135,10 +135,10 @@ void TestComparisonQuantizedUInt8(tflite::BuiltinOperator op,
TfLiteTensor tensors[tensors_size] = {
CreateQuantizedTensor(input1_data, input1_quantized, input1_dims,
input1_scale, input1_zero_point, "input1_tensor"),
input1_scale, input1_zero_point),
CreateQuantizedTensor(input2_data, input2_quantized, input2_dims,
input2_scale, input2_zero_point, "input2_tensor"),
CreateBoolTensor(output_data, output_dims, "output_tensor"),
input2_scale, input2_zero_point),
CreateBoolTensor(output_data, output_dims),
};
TestComparison(op, tensors, expected_output_data, output_data);
@ -159,10 +159,10 @@ void TestComparisonQuantizedInt8(tflite::BuiltinOperator op,
TfLiteTensor tensors[tensors_size] = {
CreateQuantizedTensor(input1_data, input1_quantized, input1_dims,
input1_scale, input1_zero_point, "input1_tensor"),
input1_scale, input1_zero_point),
CreateQuantizedTensor(input2_data, input2_quantized, input2_dims,
input2_scale, input2_zero_point, "input2_tensor"),
CreateBoolTensor(output_data, output_dims, "output_tensor"),
input2_scale, input2_zero_point),
CreateBoolTensor(output_data, output_dims),
};
TestComparison(op, tensors, expected_output_data, output_data);

View File

@ -63,8 +63,8 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_KERNEL_LOG(
context,
"Op Concatenation does not currently support num dimensions >4 "
"Tensor '%s' has %d dimensions.",
input->name, num_dimensions);
"Tensor has %d dimensions.",
num_dimensions);
return kTfLiteError;
}
}

View File

@ -40,9 +40,9 @@ void TestConcatenateTwoInputs(std::initializer_list<int> input1_dims_data,
constexpr int output_size = 1;
constexpr int tensors_size = input_size + output_size;
TfLiteTensor tensors[tensors_size] = {
CreateFloatTensor(input1_data, input1_dims, "input1_tensor"),
CreateFloatTensor(input2_data, input2_dims, "input2_tensor"),
CreateFloatTensor(output_data, output_dims, "output_tensor")};
CreateFloatTensor(input1_data, input1_dims),
CreateFloatTensor(input2_data, input2_dims),
CreateFloatTensor(output_data, output_dims)};
TfLiteContext context;
PopulateContext(tensors, tensors_size, micro_test::reporter, &context);
@ -99,12 +99,9 @@ void TestConcatenateQuantizedTwoInputs(
constexpr int output_size = 1;
constexpr int tensors_size = input_size + output_size;
TfLiteTensor tensors[tensors_size] = {
CreateQuantizedTensor(input1_data, input1_dims, "input1_tensor",
input_min, input_max),
CreateQuantizedTensor(input2_data, input2_dims, "input2_tensor",
input_min, input_max),
CreateQuantizedTensor(output_data, output_dims, "output_tensor",
output_min, output_max)};
CreateQuantizedTensor(input1_data, input1_dims, input_min, input_max),
CreateQuantizedTensor(input2_data, input2_dims, input_min, input_max),
CreateQuantizedTensor(output_data, output_dims, output_min, output_max)};
TfLiteContext context;
PopulateContext(tensors, tensors_size, micro_test::reporter, &context);

View File

@ -123,10 +123,10 @@ void TestConvFloat(const int* input_dims_data, const float* input_data,
constexpr int outputs_size = 1;
constexpr int tensors_size = inputs_size + outputs_size;
TfLiteTensor tensors[tensors_size] = {
CreateFloatTensor(input_data, input_dims, "input_tensor"),
CreateFloatTensor(filter_data, filter_dims, "filter_tensor"),
CreateFloatTensor(bias_data, bias_dims, "bias_tensor"),
CreateFloatTensor(output_data, output_dims, "output_tensor"),
CreateFloatTensor(input_data, input_dims),
CreateFloatTensor(filter_data, filter_dims),
CreateFloatTensor(bias_data, bias_dims),
CreateFloatTensor(output_data, output_dims),
};
TF_LITE_MICRO_EXPECT_EQ(
@ -157,13 +157,12 @@ void TestConvQuantizedPerLayer(
constexpr int tensors_size = inputs_size + outputs_size;
TfLiteTensor tensors[tensors_size] = {
CreateQuantizedTensor(input_data, input_quantized, input_dims,
input_scale, 128, "input_tensor"),
input_scale, 128),
CreateQuantizedTensor(filter_data, filter_quantized, filter_dims,
filter_scale, 128, "filter_tensor"),
filter_scale, 128),
CreateQuantizedBiasTensor(bias_data, bias_quantized, bias_dims,
input_scale, filter_scale, "bias_tensor"),
CreateQuantizedTensor(output_data, output_dims, output_scale, 128,
"output_tensor")};
input_scale, filter_scale),
CreateQuantizedTensor(output_data, output_dims, output_scale, 128)};
// TODO(njeff): Affine Quantization Params should be set on tensor creation.
float filter_scales[] = {1, filter_scale};
@ -199,20 +198,16 @@ void TestConvQuantizedPerChannel(
float filter_scales[5];
TfLiteAffineQuantization filter_quant;
TfLiteAffineQuantization bias_quant;
TfLiteTensor input_tensor =
CreateQuantizedTensor(input_data, input_quantized, input_dims,
input_scale, input_zero_point, "input_tensor");
TfLiteTensor input_tensor = CreateQuantizedTensor(
input_data, input_quantized, input_dims, input_scale, input_zero_point);
TfLiteTensor filter_tensor = CreateSymmetricPerChannelQuantizedTensor(
filter_data, filter_data_quantized, filter_dims, filter_scales,
filter_zero_points, &filter_quant, 0 /* quantized dimension */,
"filter_tensor");
filter_zero_points, &filter_quant, 0 /* quantized dimension */);
TfLiteTensor bias_tensor = CreatePerChannelQuantizedBiasTensor(
bias_data, bias_data_quantized, bias_dims, input_scale, &filter_scales[1],
bias_scales, bias_zero_points, &bias_quant, 0 /* quantized dimension */,
"bias_tensor");
TfLiteTensor output_tensor =
CreateQuantizedTensor(output_data, output_dims, output_scale,
output_zero_point, "output_tensor");
bias_scales, bias_zero_points, &bias_quant, 0 /* quantized dimension */);
TfLiteTensor output_tensor = CreateQuantizedTensor(
output_data, output_dims, output_scale, output_zero_point);
// TODO(njeff): Affine Quantization Params should be set on tensor creation.
float input_scales[] = {1, input_scale};
@ -479,21 +474,18 @@ TF_LITE_MICRO_TEST(FilterDimsNotMatchingAffineQuantization) {
TfLiteAffineQuantization filter_quant;
TfLiteAffineQuantization bias_quant;
TfLiteTensor input_tensor = tflite::testing::CreateQuantizedTensor(
tflite::testing::kInputData, input_quantized, input_dims, input_scale, 0,
"input_tensor");
tflite::testing::kInputData, input_quantized, input_dims, input_scale, 0);
TfLiteTensor filter_tensor =
tflite::testing::CreateSymmetricPerChannelQuantizedTensor(
tflite::testing::kFilterData, filter_quantized, filter_dims,
filter_scales, filter_zero_points, &filter_quant,
0 /* quantized dimension */, "filter_tensor");
0 /* quantized dimension */);
TfLiteTensor bias_tensor =
tflite::testing::CreatePerChannelQuantizedBiasTensor(
tflite::testing::kBiasData, bias_quantized, bias_dims, input_scale,
&filter_scales[1], scales, zero_points, &bias_quant, 0,
"bias_tensor");
&filter_scales[1], scales, zero_points, &bias_quant, 0);
TfLiteTensor output_tensor = tflite::testing::CreateQuantizedTensor(
output_data, output_dims, output_scale, 0 /* quantized dimension */,
"output_tensor");
output_data, output_dims, output_scale, 0 /* quantized dimension */);
float input_scales[] = {1, input_scale};
int input_zero_points[] = {1, 128};
@ -562,8 +554,7 @@ TF_LITE_MICRO_TEST(BroadcastPerLayerQuantizationToPerChannelShouldMatchGolden) {
// Create per-layer quantized int8 input tensor.
TfLiteTensor input_tensor = tflite::testing::CreateQuantizedTensor(
tflite::testing::kInputData, input_quantized, input_dims, input_scale, 0,
"input_tensor");
tflite::testing::kInputData, input_quantized, input_dims, input_scale, 0);
int input_zero_points[2] = {1, 0};
float input_scales[2] = {1, input_scale};
TfLiteAffineQuantization input_quant = {
@ -574,7 +565,7 @@ TF_LITE_MICRO_TEST(BroadcastPerLayerQuantizationToPerChannelShouldMatchGolden) {
// Create per-layer quantized int8 filter tensor.
TfLiteTensor filter_tensor = tflite::testing::CreateQuantizedTensor(
tflite::testing::kFilterData, filter_quantized, filter_dims, filter_scale,
0, "filter_tensor");
0);
int filter_zero_points[2] = {1, 0};
float filter_scales[2] = {1, filter_scale};
TfLiteAffineQuantization filter_quant = {
@ -586,8 +577,8 @@ TF_LITE_MICRO_TEST(BroadcastPerLayerQuantizationToPerChannelShouldMatchGolden) {
tflite::SymmetricQuantize(tflite::testing::kBiasData, bias_quantized,
tflite::testing::kBiasElements,
input_scale * output_scale);
TfLiteTensor bias_tensor = tflite::testing::CreateInt32Tensor(
bias_quantized, bias_dims, "bias_tensor");
TfLiteTensor bias_tensor =
tflite::testing::CreateInt32Tensor(bias_quantized, bias_dims);
int bias_zero_points[2] = {1, 0};
float bias_scales[2] = {1, input_scale * filter_scale};
@ -598,8 +589,7 @@ TF_LITE_MICRO_TEST(BroadcastPerLayerQuantizationToPerChannelShouldMatchGolden) {
// Create per-layer quantized int8 output tensor.
TfLiteTensor output_tensor = tflite::testing::CreateQuantizedTensor(
output_data, output_dims, output_scale, 0 /* quantized dimension */,
"output_tensor");
output_data, output_dims, output_scale, 0 /* quantized dimension */);
int output_zero_points[2] = {1, 0};
float output_scales[2] = {1, output_scale};
TfLiteAffineQuantization output_quant = {
@ -687,8 +677,7 @@ TF_LITE_MICRO_TEST(Int8Input32x1Filter32x32ShouldMatchGolden) {
// Create per-tensor quantized int8 input tensor.
int8_t input_quantized[kSampleSize];
TfLiteTensor input_tensor = tflite::testing::CreateQuantizedTensor(
input_values, input_quantized, input_dims, input_scale, input_zero_point,
"input_tensor");
input_values, input_quantized, input_dims, input_scale, input_zero_point);
// Set zero point and scale arrays with a single element for each.
int input_zero_points[] = {1, input_zero_point};
float input_scales[] = {1, input_scale};
@ -701,7 +690,7 @@ TF_LITE_MICRO_TEST(Int8Input32x1Filter32x32ShouldMatchGolden) {
int8_t filter_quantized[kNumFilters * kSampleSize];
TfLiteTensor filter_tensor = tflite::testing::CreateQuantizedTensor(
filter_values, filter_quantized, filter_dims, filter_scale,
filter_zero_point, "filter_tensor");
filter_zero_point);
// Set zero point and scale arrays with a single element for each.
int filter_zero_points[] = {1, filter_zero_point};
float filter_scales[] = {1, filter_scale};
@ -714,8 +703,8 @@ TF_LITE_MICRO_TEST(Int8Input32x1Filter32x32ShouldMatchGolden) {
int32_t bias_quantized[kSampleSize];
tflite::SymmetricQuantize(bias_values, bias_quantized, kSampleSize,
input_scale * output_scale);
TfLiteTensor bias_tensor = tflite::testing::CreateInt32Tensor(
bias_quantized, bias_dims, "bias_tensor");
TfLiteTensor bias_tensor =
tflite::testing::CreateInt32Tensor(bias_quantized, bias_dims);
// There is a single zero point of 0, and a single scale of
// input_scale * filter_scale.
@ -729,8 +718,7 @@ TF_LITE_MICRO_TEST(Int8Input32x1Filter32x32ShouldMatchGolden) {
// Create per-tensor quantized int8 output tensor.
int8_t output_quantized[kSampleSize];
TfLiteTensor output_tensor = tflite::testing::CreateQuantizedTensor(
output_quantized, output_dims, output_scale, output_zero_point,
"output_tensor");
output_quantized, output_dims, output_scale, output_zero_point);
// Set zero point and scale arrays with a single element for each.
int output_zero_points[] = {1, output_zero_point};
float output_scales[] = {1, output_scale};

View File

@ -121,10 +121,10 @@ void TestDepthwiseConvFloat(const int* input_dims_data, const float* input_data,
constexpr int outputs_size = 1;
constexpr int tensors_size = inputs_size + outputs_size;
TfLiteTensor tensors[tensors_size] = {
CreateFloatTensor(input_data, input_dims, "input_tensor"),
CreateFloatTensor(filter_data, filter_dims, "filter_tensor"),
CreateFloatTensor(bias_data, bias_dims, "bias_tensor"),
CreateFloatTensor(output_data, output_dims, "output_tensor"),
CreateFloatTensor(input_data, input_dims),
CreateFloatTensor(filter_data, filter_dims),
CreateFloatTensor(bias_data, bias_dims),
CreateFloatTensor(output_data, output_dims),
};
ValidateDepthwiseConvGoldens(expected_output_data, output_dims_count,
@ -152,16 +152,14 @@ void TestDepthwiseConvQuantizedPerLayer(
TfLiteTensor tensors[tensors_size] = {
tflite::testing::CreateQuantizedTensor(input_data, input_quantized,
input_dims, input_scale,
input_zero_point, "input_tensor"),
tflite::testing::CreateQuantizedTensor(
filter_data, filter_quantized, filter_dims, filter_scale,
filter_zero_point, "filter_tensor"),
tflite::testing::CreateQuantizedBiasTensor(bias_data, bias_quantized,
bias_dims, input_scale,
filter_scale, "bias_tensor"),
input_zero_point),
tflite::testing::CreateQuantizedTensor(filter_data, filter_quantized,
filter_dims, filter_scale,
filter_zero_point),
tflite::testing::CreateQuantizedBiasTensor(
bias_data, bias_quantized, bias_dims, input_scale, filter_scale),
tflite::testing::CreateQuantizedTensor(output_data, output_dims,
output_scale, output_zero_point,
"output_tensor"),
output_scale, output_zero_point),
};
// TODO(njeff): Affine Quantization Params should be set on tensor creation.
@ -206,20 +204,18 @@ void TestDepthwiseConvQuantizedPerChannel(
float bias_scales[kMaxBiasChannels];
TfLiteAffineQuantization filter_quant;
TfLiteAffineQuantization bias_quant;
TfLiteTensor input_tensor =
CreateQuantizedTensor(input_data, input_quantized, input_dims,
input_scale, input_zero_point, "input_tensor");
TfLiteTensor input_tensor = CreateQuantizedTensor(
input_data, input_quantized, input_dims, input_scale, input_zero_point);
TfLiteTensor filter_tensor = CreateSymmetricPerChannelQuantizedTensor(
filter_data, filter_data_quantized, filter_dims, filter_scales,
filter_zero_points, &filter_quant, 3 /* quantized dimension */,
"filter_tensor");
filter_zero_points, &filter_quant, 3 /* quantized dimension */
);
TfLiteTensor bias_tensor = CreatePerChannelQuantizedBiasTensor(
bias_data, bias_data_quantized, bias_dims, input_scale, &filter_scales[1],
bias_scales, bias_zero_points, &bias_quant, 3 /* quantized dimension */,
"bias_tensor");
TfLiteTensor output_tensor =
CreateQuantizedTensor(output_data, output_dims, output_scale,
input_zero_point, "output_tensor");
bias_scales, bias_zero_points, &bias_quant, 3 /* quantized dimension */
);
TfLiteTensor output_tensor = CreateQuantizedTensor(
output_data, output_dims, output_scale, input_zero_point);
// TODO(njeff): Affine Quantization Params should be set on tensor creation.
float input_scales[] = {1, input_scale};
@ -615,20 +611,17 @@ TF_LITE_MICRO_TEST(FilterDimsNotMatchingAffineQuantization) {
TfLiteAffineQuantization filter_quant;
TfLiteAffineQuantization bias_quant;
TfLiteTensor input_tensor = tflite::testing::CreateQuantizedTensor(
input_data, input_quantized, input_dims, input_scale, input_zero_point,
"input_tensor");
input_data, input_quantized, input_dims, input_scale, input_zero_point);
TfLiteTensor filter_tensor =
tflite::testing::CreateSymmetricPerChannelQuantizedTensor(
filter_data, filter_quantized, filter_dims, filter_scales,
filter_zero_points, &filter_quant, 0 /* quantized dimension */,
"filter_tensor");
filter_zero_points, &filter_quant, 0 /* quantized dimension */);
TfLiteTensor bias_tensor =
tflite::testing::CreatePerChannelQuantizedBiasTensor(
bias_data, bias_quantized, bias_dims, input_scale, &filter_scales[1],
scales, zero_points, &bias_quant, 0, "bias_tensor");
scales, zero_points, &bias_quant, 0);
TfLiteTensor output_tensor = tflite::testing::CreateQuantizedTensor(
output_data, output_dims, output_scale, output_zero_point,
"output_tensor");
output_data, output_dims, output_scale, output_zero_point);
float input_scales[] = {1, input_scale};
int input_zero_points[] = {1, input_zero_point};
@ -700,8 +693,7 @@ TF_LITE_MICRO_TEST(PerChannelBroadcastQuantizationParams) {
// Create per-layer quantized int8 input tensor.
TfLiteTensor input_tensor = tflite::testing::CreateQuantizedTensor(
input_values, input_quantized, input_dims, input_scale, 0,
"input_tensor");
input_values, input_quantized, input_dims, input_scale, 0);
int input_zero_points[2] = {1, 0};
float input_scales[2] = {1, input_scale};
TfLiteAffineQuantization input_quant = {
@ -711,8 +703,7 @@ TF_LITE_MICRO_TEST(PerChannelBroadcastQuantizationParams) {
// Create per-layer quantized int8 filter tensor.
TfLiteTensor filter_tensor = tflite::testing::CreateQuantizedTensor(
filter_values, filter_quantized, filter_dims, filter_scale, 0,
"filter_tensor");
filter_values, filter_quantized, filter_dims, filter_scale, 0);
int filter_zero_points[2] = {1, 0};
float filter_scales[2] = {1, filter_scale};
TfLiteAffineQuantization filter_quant = {
@ -723,8 +714,8 @@ TF_LITE_MICRO_TEST(PerChannelBroadcastQuantizationParams) {
// Create per-layer quantized int32 bias tensor.
tflite::SymmetricQuantize(bias_values, bias_quantized, bias_elements,
input_scale * output_scale);
TfLiteTensor bias_tensor = tflite::testing::CreateInt32Tensor(
bias_quantized, bias_dims, "bias_tensor");
TfLiteTensor bias_tensor =
tflite::testing::CreateInt32Tensor(bias_quantized, bias_dims);
int bias_zero_points[2] = {1, 0};
float bias_scales[2] = {1, input_scale * filter_scale};
@ -735,7 +726,7 @@ TF_LITE_MICRO_TEST(PerChannelBroadcastQuantizationParams) {
// Create per-layer quantized int8 output tensor.
TfLiteTensor output_tensor = tflite::testing::CreateQuantizedTensor(
output_data, output_dims, output_scale, 0, "output_tensor");
output_data, output_dims, output_scale, 0);
int output_zero_points[2] = {1, 0};
float output_scales[2] = {1, output_scale};
TfLiteAffineQuantization output_quant = {
@ -833,8 +824,7 @@ TF_LITE_MICRO_TEST(Int8Input32x4Filter32x4ShouldMatchGolden) {
// Create per-tensor quantized int8 input tensor.
int8_t input_quantized[input_elements];
TfLiteTensor input_tensor = tflite::testing::CreateQuantizedTensor(
input_values, input_quantized, input_dims, input_scale, input_zero_point,
"input_tensor");
input_values, input_quantized, input_dims, input_scale, input_zero_point);
// Set zero point and scale arrays with a single element for each.
int input_zero_points[] = {1, input_zero_point};
@ -847,8 +837,7 @@ TF_LITE_MICRO_TEST(Int8Input32x4Filter32x4ShouldMatchGolden) {
// Create per-tensor quantized int8 filter tensor.
int8_t filter_quantized[filter_elements];
TfLiteTensor filter_tensor = tflite::testing::CreateQuantizedTensor(
filter_values, filter_quantized, filter_dims, filter_scale, 0,
"filter_tensor");
filter_values, filter_quantized, filter_dims, filter_scale, 0);
// Set zero point and scale arrays with a single element for each.
int filter_zero_points[] = {1, 0};
@ -864,8 +853,8 @@ TF_LITE_MICRO_TEST(Int8Input32x4Filter32x4ShouldMatchGolden) {
// detailed explanation of why bias scale is input_scale * filter_scale.
tflite::SymmetricQuantize(bias_values, bias_quantized, bias_elements,
input_scale * output_scale);
TfLiteTensor bias_tensor = tflite::testing::CreateInt32Tensor(
bias_quantized, bias_dims, "bias_tensor");
TfLiteTensor bias_tensor =
tflite::testing::CreateInt32Tensor(bias_quantized, bias_dims);
// Set zero point and scale arrays with a single element for each.
int bias_zero_points[] = {1, 0};
@ -878,8 +867,7 @@ TF_LITE_MICRO_TEST(Int8Input32x4Filter32x4ShouldMatchGolden) {
// Create per-tensor quantized int8 output tensor.
int8_t output_quantized[output_elements];
TfLiteTensor output_tensor = tflite::testing::CreateQuantizedTensor(
output_quantized, output_dims, output_scale, output_zero_point,
"output_tensor");
output_quantized, output_dims, output_scale, output_zero_point);
// Set zero point and scale arrays with a single element for each.
int output_zero_points[] = {1, output_zero_point};

View File

@ -90,8 +90,8 @@ void TestDequantizeToFloat(const int* input_dims_data, const float* input_data,
const int tensors_size = 2;
TfLiteTensor tensors[tensors_size] = {
CreateQuantizedTensor(input_data, input_data_quantized, input_dims, scale,
zero_point, "input_tensor"),
CreateFloatTensor(output_data, output_dims, "output_tensor"),
zero_point),
CreateFloatTensor(output_data, output_dims),
};
ValidateDequantizeGoldens(tensors, tensors_size, expected_output_data,
@ -113,8 +113,8 @@ void TestDequantizeToInt32(const int* input_dims_data, const float* input_data,
const int tensors_size = 2;
TfLiteTensor tensors[tensors_size] = {
CreateQuantizedTensor(input_data, input_data_quantized, input_dims,
input_scale, input_zero_point, "input_tensor"),
CreateInt32Tensor(output_data, output_dims, "output_tensor"),
input_scale, input_zero_point),
CreateInt32Tensor(output_data, output_dims),
};
TfLiteQuantizationParams output_quant;

View File

@ -36,8 +36,8 @@ void TestElementwiseFloat(tflite::BuiltinOperator op,
constexpr int output_size = 1;
constexpr int tensors_size = input_size + output_size;
TfLiteTensor tensors[tensors_size] = {
CreateFloatTensor(input_data, input_dims, "input_tensor"),
CreateFloatTensor(output_data, output_dims, "output_tensor")};
CreateFloatTensor(input_data, input_dims),
CreateFloatTensor(output_data, output_dims)};
// Place a unique value in the uninitialized output buffer.
for (int i = 0; i < output_dims_count; ++i) {
@ -101,8 +101,8 @@ void TestElementwiseBool(tflite::BuiltinOperator op,
constexpr int output_size = 1;
constexpr int tensors_size = input_size + output_size;
TfLiteTensor tensors[tensors_size] = {
CreateBoolTensor(input_data, input_dims, "input_tensor"),
CreateBoolTensor(output_data, output_dims, "output_tensor")};
CreateBoolTensor(input_data, input_dims),
CreateBoolTensor(output_data, output_dims)};
// Place false in the uninitialized output buffer.
for (int i = 0; i < output_dims_count; ++i) {

View File

@ -33,8 +33,8 @@ void TestFloor(const int* input_dims_data, const float* input_data,
constexpr int outputs_size = 1;
constexpr int tensors_size = inputs_size + outputs_size;
TfLiteTensor tensors[tensors_size] = {
CreateFloatTensor(input_data, input_dims, "input_tensor"),
CreateFloatTensor(output_data, output_dims, "output_tensor"),
CreateFloatTensor(input_data, input_dims),
CreateFloatTensor(output_data, output_dims),
};
TfLiteContext context;
PopulateContext(tensors, tensors_size, micro_test::reporter, &context);

View File

@ -44,10 +44,10 @@ TfLiteStatus TestFullyConnectedFloat(
constexpr int outputs_size = 1;
constexpr int tensors_size = inputs_size + outputs_size;
TfLiteTensor tensors[tensors_size] = {
CreateFloatTensor(input_data, input_dims, "input_tensor"),
CreateFloatTensor(weights_data, weights_dims, "weights_tensor"),
CreateFloatTensor(bias_data, bias_dims, "bias_tensor"),
CreateFloatTensor(output_data, output_dims, "output_tensor"),
CreateFloatTensor(input_data, input_dims),
CreateFloatTensor(weights_data, weights_dims),
CreateFloatTensor(bias_data, bias_dims),
CreateFloatTensor(output_data, output_dims),
};
TfLiteContext context;
@ -121,13 +121,11 @@ TfLiteStatus TestFullyConnectedQuantized(
constexpr int outputs_size = 1;
constexpr int tensors_size = inputs_size + outputs_size;
TfLiteTensor tensors[tensors_size] = {
CreateQuantizedTensor(input_data, input_dims, "input_tensor", input_min,
input_max),
CreateQuantizedTensor(weights_data, weights_dims, "weights_tensor",
weights_min, weights_max),
CreateQuantized32Tensor(bias_data, bias_dims, "bias_tensor", bias_scale),
CreateQuantizedTensor(output_data, output_dims, "output_tensor",
output_min, output_max),
CreateQuantizedTensor(input_data, input_dims, input_min, input_max),
CreateQuantizedTensor(weights_data, weights_dims, weights_min,
weights_max),
CreateQuantized32Tensor(bias_data, bias_dims, bias_scale),
CreateQuantizedTensor(output_data, output_dims, output_min, output_max),
};
TfLiteContext context;

View File

@ -19,7 +19,6 @@ limitations under the License.
#include "tensorflow/lite/micro/testing/micro_test.h"
#include "tensorflow/lite/micro/testing/test_utils.h"
namespace tflite {
namespace testing {
namespace {
@ -30,38 +29,35 @@ constexpr float kInputMax = 2.0;
constexpr float kOutputMin = -1.0;
constexpr float kOutputMax = 127.0 / 128.0;
void QuantizeInputData(const float input_data[], int length,
uint8_t* quantized_data) {
for (int i = 0; i < 6; i++) {
quantized_data[i] = tflite::testing::F2Q(input_data[i],
tflite::testing::kInputMin,
tflite::testing::kInputMax);
quantized_data[i] = tflite::testing::F2Q(
input_data[i], tflite::testing::kInputMin, tflite::testing::kInputMax);
}
}
void QuantizeInputData(const float input_data[], int length,
int8_t* quantized_data) {
for (int i = 0; i < 6; i++) {
quantized_data[i] = tflite::testing::F2QS(input_data[i],
tflite::testing::kInputMin,
tflite::testing::kInputMax);
quantized_data[i] = tflite::testing::F2QS(
input_data[i], tflite::testing::kInputMin, tflite::testing::kInputMax);
}
}
TfLiteTensor CreateL2NormTensor(const float* data, TfLiteIntArray* dims,
const char* name, bool is_input) {
return CreateFloatTensor(data, dims, name);
bool is_input) {
return CreateFloatTensor(data, dims);
}
TfLiteTensor CreateL2NormTensor(const uint8* data, TfLiteIntArray* dims,
const char* name, bool is_input) {
bool is_input) {
TfLiteTensor tensor;
if (is_input) {
tensor = CreateQuantizedTensor(data, dims, name, kInputMin, kInputMax);
tensor = CreateQuantizedTensor(data, dims, kInputMin, kInputMax);
} else {
tensor = CreateQuantizedTensor(data, dims, name, kOutputMin, kOutputMax);
tensor = CreateQuantizedTensor(data, dims, kOutputMin, kOutputMax);
}
tensor.quantization.type = kTfLiteAffineQuantization;
@ -69,13 +65,13 @@ TfLiteTensor CreateL2NormTensor(const uint8* data, TfLiteIntArray* dims,
}
TfLiteTensor CreateL2NormTensor(const int8* data, TfLiteIntArray* dims,
const char* name, bool is_input) {
bool is_input) {
TfLiteTensor tensor;
if (is_input) {
tensor = CreateQuantizedTensor(data, dims, name, kInputMin, kInputMax);
tensor = CreateQuantizedTensor(data, dims, kInputMin, kInputMax);
} else {
tensor = CreateQuantizedTensor(data, dims, name, kOutputMin, kOutputMax);
tensor = CreateQuantizedTensor(data, dims, kOutputMin, kOutputMax);
}
tensor.quantization.type = kTfLiteAffineQuantization;
@ -88,18 +84,17 @@ inline float Dequantize(const T data, float scale, int32_t zero_point) {
}
template <typename T>
void TestL2Normalization(const int* input_dims_data,
const T* input_data,
const float* expected_output_data,
T* output_data, float variance) {
void TestL2Normalization(const int* input_dims_data, const T* input_data,
const float* expected_output_data, T* output_data,
float variance) {
TfLiteIntArray* dims = IntArrayFromInts(input_dims_data);
const int output_dims_count = ElementCount(*dims);
constexpr int tensors_size = 2;
TfLiteTensor tensors[tensors_size] = {
CreateL2NormTensor(input_data, dims, "input_tensor", true),
CreateL2NormTensor(output_data, dims, "output_tensor", false),
CreateL2NormTensor(input_data, dims, true),
CreateL2NormTensor(output_data, dims, false),
};
TfLiteContext context;
@ -158,22 +153,18 @@ void TestL2Normalization(const int* input_dims_data,
} // namespace testing
} // namespace tflite
TF_LITE_MICRO_TESTS_BEGIN
TF_LITE_MICRO_TEST(SimpleFloatTest) {
const int input_dims[] = {4, 1, 1, 1, 6};
constexpr int data_length = 6;
const float input_data[data_length] = {
-1.1, 0.6, 0.7, 1.2, -0.7, 0.1
};
const float expected_output_data[data_length] = {
-0.55, 0.3, 0.35, 0.6, -0.35, 0.05
};
const float input_data[data_length] = {-1.1, 0.6, 0.7, 1.2, -0.7, 0.1};
const float expected_output_data[data_length] = {-0.55, 0.3, 0.35,
0.6, -0.35, 0.05};
float output_data[data_length];
tflite::testing::TestL2Normalization<float>(input_dims, input_data,
expected_output_data, output_data, 0);
tflite::testing::TestL2Normalization<float>(
input_dims, input_data, expected_output_data, output_data, 0);
}
TF_LITE_MICRO_TEST(ZerosVectorFloatTest) {
@ -183,23 +174,20 @@ TF_LITE_MICRO_TEST(ZerosVectorFloatTest) {
const float expected_output_data[data_length] = {0, 0, 0, 0, 0, 0};
float output_data[data_length];
tflite::testing::TestL2Normalization<float>(input_dims, input_data,
expected_output_data, output_data, 0);
tflite::testing::TestL2Normalization<float>(
input_dims, input_data, expected_output_data, output_data, 0);
}
TF_LITE_MICRO_TEST(SimpleFloatWithRankLessThanFourTest) {
const int input_dims[] = {4, 1, 1, 1, 6};
constexpr int data_length = 6;
const float input_data[data_length] = {
-1.1, 0.6, 0.7, 1.2, -0.7, 0.1
};
const float expected_output_data[data_length] = {
-0.55, 0.3, 0.35, 0.6, -0.35, 0.05
};
const float input_data[data_length] = {-1.1, 0.6, 0.7, 1.2, -0.7, 0.1};
const float expected_output_data[data_length] = {-0.55, 0.3, 0.35,
0.6, -0.35, 0.05};
float output_data[data_length];
tflite::testing::TestL2Normalization<float>(input_dims, input_data,
expected_output_data, output_data, 0);
tflite::testing::TestL2Normalization<float>(
input_dims, input_data, expected_output_data, output_data, 0);
}
TF_LITE_MICRO_TEST(MultipleBatchFloatTest) {
@ -217,8 +205,8 @@ TF_LITE_MICRO_TEST(MultipleBatchFloatTest) {
};
float output_data[data_length];
tflite::testing::TestL2Normalization<float>(input_dims, input_data,
expected_output_data, output_data, 0);
tflite::testing::TestL2Normalization<float>(
input_dims, input_data, expected_output_data, output_data, 0);
}
TF_LITE_MICRO_TEST(ZerosVectorUint8Test) {
@ -231,44 +219,36 @@ TF_LITE_MICRO_TEST(ZerosVectorUint8Test) {
tflite::testing::QuantizeInputData(input_data, data_length, quantized_input);
tflite::testing::TestL2Normalization<uint8_t>(input_dims, quantized_input,
expected_output_data, output_data, .1);
tflite::testing::TestL2Normalization<uint8_t>(
input_dims, quantized_input, expected_output_data, output_data, .1);
}
TF_LITE_MICRO_TEST(SimpleUint8Test) {
const int input_dims[] = {4, 1, 1, 1, 6};
constexpr int data_length = 6;
float input_data[data_length] = {
-1.1, 0.6, 0.7, 1.2, -0.7, 0.1
};
float expected_output[data_length] = {
-0.55, 0.3, 0.35, 0.6, -0.35, 0.05
};
float input_data[data_length] = {-1.1, 0.6, 0.7, 1.2, -0.7, 0.1};
float expected_output[data_length] = {-0.55, 0.3, 0.35, 0.6, -0.35, 0.05};
uint8_t quantized_input[data_length];
uint8_t output_data[data_length];
tflite::testing::QuantizeInputData(input_data, data_length, quantized_input);
tflite::testing::TestL2Normalization<uint8_t>(input_dims, quantized_input,
expected_output, output_data, .1);
tflite::testing::TestL2Normalization<uint8_t>(
input_dims, quantized_input, expected_output, output_data, .1);
}
TF_LITE_MICRO_TEST(SimpleInt8Test) {
const int input_dims[] = {4, 1, 1, 1, 6};
constexpr int data_length = 6;
float input_data[data_length] = {
-1.1, 0.6, 0.7, 1.2, -0.7, 0.1
};
float expected_output[data_length] = {
-0.55, 0.3, 0.35, 0.6, -0.35, 0.05
};
float input_data[data_length] = {-1.1, 0.6, 0.7, 1.2, -0.7, 0.1};
float expected_output[data_length] = {-0.55, 0.3, 0.35, 0.6, -0.35, 0.05};
int8_t quantized_input[data_length];
int8_t output_data[data_length];
tflite::testing::QuantizeInputData(input_data, data_length, quantized_input);
tflite::testing::TestL2Normalization<int8_t>(input_dims, quantized_input,
expected_output, output_data, .1);
tflite::testing::TestL2Normalization<int8_t>(
input_dims, quantized_input, expected_output, output_data, .1);
}
TF_LITE_MICRO_TEST(ZerosVectorInt8Test) {
@ -281,8 +261,8 @@ TF_LITE_MICRO_TEST(ZerosVectorInt8Test) {
tflite::testing::QuantizeInputData(input_data, data_length, quantized_input);
tflite::testing::TestL2Normalization<int8_t>(input_dims, quantized_input,
expected_output_data, output_data, .1);
tflite::testing::TestL2Normalization<int8_t>(
input_dims, quantized_input, expected_output_data, output_data, .1);
}
TF_LITE_MICRO_TEST(MultipleBatchUint8Test) {
@ -303,8 +283,8 @@ TF_LITE_MICRO_TEST(MultipleBatchUint8Test) {
tflite::testing::QuantizeInputData(input_data, data_length, quantized_input);
tflite::testing::TestL2Normalization<uint8_t>(input_dims, quantized_input,
expected_output, output_data, .1);
tflite::testing::TestL2Normalization<uint8_t>(
input_dims, quantized_input, expected_output, output_data, .1);
}
TF_LITE_MICRO_TEST(MultipleBatchInt8Test) {
@ -325,8 +305,8 @@ TF_LITE_MICRO_TEST(MultipleBatchInt8Test) {
tflite::testing::QuantizeInputData(input_data, data_length, quantized_input);
tflite::testing::TestL2Normalization<int8_t>(input_dims, quantized_input,
expected_output, output_data, .1);
tflite::testing::TestL2Normalization<int8_t>(
input_dims, quantized_input, expected_output, output_data, .1);
}
TF_LITE_MICRO_TESTS_END

View File

@ -39,9 +39,9 @@ void TestLogicalOp(tflite::BuiltinOperator op,
constexpr int outputs_size = 1;
constexpr int tensors_size = inputs_size + outputs_size;
TfLiteTensor tensors[tensors_size] = {
CreateBoolTensor(input1_data, input1_dims, "input1_tensor"),
CreateBoolTensor(input2_data, input2_dims, "input2_tensor"),
CreateBoolTensor(output_data, output_dims, "output_tensor"),
CreateBoolTensor(input1_data, input1_dims),
CreateBoolTensor(input2_data, input2_dims),
CreateBoolTensor(output_data, output_dims),
};
TfLiteContext context;

View File

@ -36,8 +36,8 @@ void TestLogisticFloat(std::initializer_list<int> input_dims_data,
constexpr int outputs_size = 1;
constexpr int tensors_size = inputs_size + outputs_size;
TfLiteTensor tensors[tensors_size] = {
CreateFloatTensor(input_data, input_dims, "input_tensor"),
CreateFloatTensor(output_data, output_dims, "output_tensor"),
CreateFloatTensor(input_data, input_dims),
CreateFloatTensor(output_data, output_dims),
};
TfLiteContext context;
@ -96,10 +96,8 @@ void TestLogisticInt8(std::initializer_list<int> input_dims_data,
constexpr int outputs_size = 1;
constexpr int tensors_size = inputs_size + outputs_size;
TfLiteTensor tensors[tensors_size] = {
CreateQuantizedTensor(input_data, input_dims, "input_tensor", input_min,
input_max),
CreateQuantizedTensor(output_data, output_dims, "output_tensor",
output_min, output_max),
CreateQuantizedTensor(input_data, input_dims, input_min, input_max),
CreateQuantizedTensor(output_data, output_dims, output_min, output_max),
};
TfLiteContext context;

View File

@ -40,9 +40,9 @@ void TestMaxMinFloat(tflite::BuiltinOperator op,
constexpr int outputs_size = 1;
constexpr int tensors_size = inputs_size + outputs_size;
TfLiteTensor tensors[tensors_size] = {
CreateFloatTensor(input1_data, input1_dims, "input1_tensor"),
CreateFloatTensor(input2_data, input2_dims, "input2_tensor"),
CreateFloatTensor(output_data, output_dims, "output_tensor"),
CreateFloatTensor(input1_data, input1_dims),
CreateFloatTensor(input2_data, input2_dims),
CreateFloatTensor(output_data, output_dims),
};
TfLiteContext context;
@ -96,12 +96,9 @@ void TestMaxMinQuantized(
constexpr int outputs_size = 1;
constexpr int tensors_size = inputs_size + outputs_size;
TfLiteTensor tensors[tensors_size] = {
CreateQuantizedTensor(input1_data, input1_dims, "input1_tensor",
input1_min, input1_max),
CreateQuantizedTensor(input2_data, input2_dims, "input2_tensor",
input2_min, input2_max),
CreateQuantizedTensor(output_data, output_dims, "output_tensor",
output_min, output_max),
CreateQuantizedTensor(input1_data, input1_dims, input1_min, input1_max),
CreateQuantizedTensor(input2_data, input2_dims, input2_min, input2_max),
CreateQuantizedTensor(output_data, output_dims, output_min, output_max),
};
TfLiteContext context;
@ -153,12 +150,9 @@ void TestMaxMinQuantizedInt32(
constexpr int outputs_size = 1;
constexpr int tensors_size = inputs_size + outputs_size;
TfLiteTensor tensors[tensors_size] = {
CreateQuantized32Tensor(input1_data, input1_dims, "input1_tensor",
input1_scale),
CreateQuantized32Tensor(input2_data, input2_dims, "input2_tensor",
input2_scale),
CreateQuantized32Tensor(output_data, output_dims, "output_tensor",
output_scale),
CreateQuantized32Tensor(input1_data, input1_dims, input1_scale),
CreateQuantized32Tensor(input2_data, input2_dims, input2_scale),
CreateQuantized32Tensor(output_data, output_dims, output_scale),
};
TfLiteContext context;

View File

@ -41,9 +41,9 @@ void TestMulFloat(std::initializer_list<int> input1_dims_data,
constexpr int outputs_size = 1;
constexpr int tensors_size = inputs_size + outputs_size;
TfLiteTensor tensors[tensors_size] = {
CreateFloatTensor(input1_data, input1_dims, "input1_tensor"),
CreateFloatTensor(input2_data, input2_dims, "input2_tensor"),
CreateFloatTensor(output_data, output_dims, "output_tensor"),
CreateFloatTensor(input1_data, input1_dims),
CreateFloatTensor(input2_data, input2_dims),
CreateFloatTensor(output_data, output_dims),
};
TfLiteContext context;
@ -113,12 +113,9 @@ void TestMulQuantized(std::initializer_list<int> input1_dims_data,
constexpr int outputs_size = 1;
constexpr int tensors_size = inputs_size + outputs_size;
TfLiteTensor tensors[tensors_size] = {
CreateQuantizedTensor(input1_data, input1_dims, "input1_tensor",
input_min, input_max),
CreateQuantizedTensor(input2_data, input2_dims, "input2_tensor",
input_min, input_max),
CreateQuantizedTensor(output_data, output_dims, "output_tensor",
output_min, output_max),
CreateQuantizedTensor(input1_data, input1_dims, input_min, input_max),
CreateQuantizedTensor(input2_data, input2_dims, input_min, input_max),
CreateQuantizedTensor(output_data, output_dims, output_min, output_max),
};
TfLiteContext context;

View File

@ -35,8 +35,8 @@ void TestNegFloat(std::initializer_list<int> input_dims_data,
constexpr int outputs_size = 1;
constexpr int tensors_size = inputs_size + outputs_size;
TfLiteTensor tensors[tensors_size] = {
CreateFloatTensor(input_data, input_dims, "input_tensor"),
CreateFloatTensor(output_data, output_dims, "output_tensor"),
CreateFloatTensor(input_data, input_dims),
CreateFloatTensor(output_data, output_dims),
};
TfLiteContext context;

View File

@ -39,9 +39,9 @@ void TestPackTwoInputsFloat(std::initializer_list<int> input1_dims_data,
constexpr int output_size = 1;
constexpr int tensors_size = input_size + output_size;
TfLiteTensor tensors[tensors_size] = {
CreateFloatTensor(input1_data, input1_dims, "input1_tensor"),
CreateFloatTensor(input2_data, input2_dims, "input2_tensor"),
CreateFloatTensor(output_data, output_dims, "output_tensor")};
CreateFloatTensor(input1_data, input1_dims),
CreateFloatTensor(input2_data, input2_dims),
CreateFloatTensor(output_data, output_dims)};
// Place a unique value in the uninitialized output buffer.
for (int i = 0; i < output_dims_count; ++i) {
@ -114,10 +114,10 @@ void TestPackThreeInputsFloat(std::initializer_list<int> input1_dims_data,
constexpr int output_size = 1;
constexpr int tensors_size = input_size + output_size;
TfLiteTensor tensors[tensors_size] = {
CreateFloatTensor(input1_data, input1_dims, "input1_tensor"),
CreateFloatTensor(input2_data, input2_dims, "input2_tensor"),
CreateFloatTensor(input3_data, input3_dims, "input3_tensor"),
CreateFloatTensor(output_data, output_dims, "output_tensor")};
CreateFloatTensor(input1_data, input1_dims),
CreateFloatTensor(input2_data, input2_dims),
CreateFloatTensor(input3_data, input3_dims),
CreateFloatTensor(output_data, output_dims)};
// Place a unique value in the uninitialized output buffer.
for (int i = 0; i < output_dims_count; ++i) {
@ -189,9 +189,9 @@ void TestPackTwoInputsQuantized(
TfLiteTensor tensors[tensors_size] = {
// CreateQuantizedTensor needs min/max values as input, but these values
// don't matter as to the functionality of PACK, so just set as 0 and 10.
CreateQuantizedTensor(input1_data, input1_dims, "input1_tensor", 0, 10),
CreateQuantizedTensor(input2_data, input2_dims, "input2_tensor", 0, 10),
CreateQuantizedTensor(output_data, output_dims, "output_tensor", 0, 10)};
CreateQuantizedTensor(input1_data, input1_dims, 0, 10),
CreateQuantizedTensor(input2_data, input2_dims, 0, 10),
CreateQuantizedTensor(output_data, output_dims, 0, 10)};
// Place a unique value in the uninitialized output buffer.
for (int i = 0; i < output_dims_count; ++i) {
@ -259,9 +259,9 @@ void TestPackTwoInputsQuantized32(
constexpr int output_size = 1;
constexpr int tensors_size = input_size + output_size;
TfLiteTensor tensors[tensors_size] = {
CreateQuantized32Tensor(input1_data, input1_dims, "input1_tensor", 1.0),
CreateQuantized32Tensor(input2_data, input2_dims, "input2_tensor", 1.0),
CreateQuantized32Tensor(output_data, output_dims, "output_tensor", 1.0)};
CreateQuantized32Tensor(input1_data, input1_dims, 1.0),
CreateQuantized32Tensor(input2_data, input2_dims, 1.0),
CreateQuantized32Tensor(output_data, output_dims, 1.0)};
// Place a unique value in the uninitialized output buffer.
for (int i = 0; i < output_dims_count; ++i) {

View File

@ -121,9 +121,9 @@ void TestPadFloat(const int* input_dims_data, const float* input_data,
constexpr int outputs_size = 1;
constexpr int tensors_size = inputs_size + outputs_size;
TfLiteTensor tensors[tensors_size] = {
CreateFloatTensor(input_data, input_dims, "input_tensor"),
CreateInt32Tensor(pad_data, pad_dims, "padding tensor"),
CreateFloatTensor(output_data, output_dims, "output_tensor")};
CreateFloatTensor(input_data, input_dims),
CreateInt32Tensor(pad_data, pad_dims),
CreateFloatTensor(output_data, output_dims)};
// Pad tensor must be constant.
tensors[1].allocation_type = kTfLiteMmapRo;
@ -149,10 +149,10 @@ void TestPadV2Float(const int* input_dims_data, const float* input_data,
constexpr int outputs_size = 1;
constexpr int tensors_size = inputs_size + outputs_size;
TfLiteTensor tensors[tensors_size] = {
CreateFloatTensor(input_data, input_dims, "input_tensor"),
CreateInt32Tensor(pad_data, pad_dims, "padding tensor"),
CreateFloatTensor(&pad_value, pad_value_dims, "pad value tensor"),
CreateFloatTensor(output_data, output_dims, "output_tensor")};
CreateFloatTensor(input_data, input_dims),
CreateInt32Tensor(pad_data, pad_dims),
CreateFloatTensor(&pad_value, pad_value_dims),
CreateFloatTensor(output_data, output_dims)};
// Pad tensor must be constant.
tensors[1].allocation_type = kTfLiteMmapRo;
@ -179,10 +179,10 @@ void TestPadQuantized(const int* input_dims_data, const float* input_data,
constexpr int tensors_size = inputs_size + outputs_size;
TfLiteTensor tensors[tensors_size] = {
CreateQuantizedTensor(input_data, input_quantized, input_dims,
input_scale, input_zero_point, "input_tensor"),
CreateInt32Tensor(pad_data, pad_dims, "padding tensor"),
input_scale, input_zero_point),
CreateInt32Tensor(pad_data, pad_dims),
CreateQuantizedTensor(output_data, output_dims, output_scale,
output_zero_point, "output_tensor")};
output_zero_point)};
// Pad tensor must be constant.
tensors[1].allocation_type = kTfLiteMmapRo;
@ -218,13 +218,12 @@ void TestPadV2Quantized(const int* input_dims_data, const float* input_data,
constexpr int tensors_size = inputs_size + outputs_size;
TfLiteTensor tensors[tensors_size] = {
CreateQuantizedTensor(input_data, input_quantized, input_dims,
input_scale, input_zero_point, "input_tensor"),
CreateInt32Tensor(pad_data, pad_dims, "padding tensor"),
input_scale, input_zero_point),
CreateInt32Tensor(pad_data, pad_dims),
CreateQuantizedTensor(&pad_value, &pad_value_quantized, pad_value_dims,
pad_value_scale, pad_value_zero_point,
"pad value tensor"),
pad_value_scale, pad_value_zero_point),
CreateQuantizedTensor(output_data, output_dims, output_scale,
output_zero_point, "output_tensor")};
output_zero_point)};
// Pad tensor must be constant.
tensors[1].allocation_type = kTfLiteMmapRo;

View File

@ -42,8 +42,8 @@ void TestAveragePoolingFloat(std::initializer_list<int> input_dims_data,
constexpr int outputs_size = 1;
constexpr int tensors_size = inputs_size + outputs_size;
TfLiteTensor tensors[tensors_size] = {
CreateFloatTensor(input_data, input_dims, "input_tensor"),
CreateFloatTensor(output_data, output_dims, "output_tensor"),
CreateFloatTensor(input_data, input_dims),
CreateFloatTensor(output_data, output_dims),
};
TfLiteContext context;
@ -114,10 +114,8 @@ void TestAveragePoolingQuantized(
constexpr int outputs_size = 1;
constexpr int tensors_size = inputs_size + outputs_size;
TfLiteTensor tensors[tensors_size] = {
CreateQuantizedTensor(input_data, input_dims, "input_tensor", input_min,
input_max),
CreateQuantizedTensor(output_data, output_dims, "output_tensor",
output_min, output_max),
CreateQuantizedTensor(input_data, input_dims, input_min, input_max),
CreateQuantizedTensor(output_data, output_dims, output_min, output_max),
};
TfLiteContext context;
@ -183,8 +181,8 @@ void TestMaxPoolFloat(std::initializer_list<int> input_dims_data,
constexpr int outputs_size = 1;
constexpr int tensors_size = inputs_size + outputs_size;
TfLiteTensor tensors[tensors_size] = {
CreateFloatTensor(input_data, input_dims, "input_tensor"),
CreateFloatTensor(output_data, output_dims, "output_tensor"),
CreateFloatTensor(input_data, input_dims),
CreateFloatTensor(output_data, output_dims),
};
TfLiteContext context;
@ -257,10 +255,8 @@ void TestMaxPoolQuantized(std::initializer_list<int> input_dims_data,
constexpr int outputs_size = 1;
constexpr int tensors_size = inputs_size + outputs_size;
TfLiteTensor tensors[tensors_size] = {
CreateQuantizedTensor(input_data, input_dims, "input_tensor", input_min,
input_max),
CreateQuantizedTensor(output_data, output_dims, "output_tensor",
output_min, output_max),
CreateQuantizedTensor(input_data, input_dims, input_min, input_max),
CreateQuantizedTensor(output_data, output_dims, output_min, output_max),
};
TfLiteContext context;

View File

@ -38,9 +38,9 @@ void TestPreluFloat(std::initializer_list<int> input_dims_data,
constexpr int outputs_size = 1;
constexpr int tensors_size = inputs_size + outputs_size;
TfLiteTensor tensors[tensors_size] = {
CreateFloatTensor(input_data, input_dims, "input_tensor"),
CreateFloatTensor(alpha_data, alpha_dims, "alpha_tensor"),
CreateFloatTensor(output_data, output_dims, "output_tensor"),
CreateFloatTensor(input_data, input_dims),
CreateFloatTensor(alpha_data, alpha_dims),
CreateFloatTensor(output_data, output_dims),
};
TfLiteContext context;
PopulateContext(tensors, tensors_size, micro_test::reporter, &context);
@ -102,12 +102,9 @@ void TestPreluQuantized(std::initializer_list<int> input_dims_data,
constexpr int outputs_size = 1;
constexpr int tensors_size = inputs_size + outputs_size;
TfLiteTensor tensors[tensors_size] = {
CreateQuantizedTensor(input_data, input_dims, "input_tensor", input_min,
input_max),
CreateQuantizedTensor(alpha_data, alpha_dims, "alpha_tensor", alpha_min,
alpha_max),
CreateQuantizedTensor(output_data, output_dims, "output_tensor",
output_min, output_max),
CreateQuantizedTensor(input_data, input_dims, input_min, input_max),
CreateQuantizedTensor(alpha_data, alpha_dims, alpha_min, alpha_max),
CreateQuantizedTensor(output_data, output_dims, output_min, output_max),
};
TfLiteContext context;
PopulateContext(tensors, tensors_size, micro_test::reporter, &context);

View File

@ -89,8 +89,8 @@ void TestQuantizeFloat(const int* input_dims_data, const float* input_data,
TfLiteIntArray* output_dims = IntArrayFromInts(output_dims_data);
const int output_dims_count = ElementCount(*output_dims);
TfLiteTensor output_tensor = CreateQuantizedTensor(
output_data, output_dims, scale, zero_point, "output_tensor");
TfLiteTensor output_tensor =
CreateQuantizedTensor(output_data, output_dims, scale, zero_point);
TfLiteAffineQuantization quant;
float scales[] = {1, scale};
@ -102,7 +102,7 @@ void TestQuantizeFloat(const int* input_dims_data, const float* input_data,
// 1 input, 1 output.
constexpr int tensors_size = 2;
TfLiteTensor tensors[tensors_size] = {
CreateFloatTensor(input_data, input_dims, "input_tensor"),
CreateFloatTensor(input_data, input_dims),
output_tensor,
};
@ -121,9 +121,8 @@ void TestRequantize(const int* input_dims_data, const float* input_data,
TfLiteIntArray* output_dims = IntArrayFromInts(output_dims_data);
const int output_dims_count = ElementCount(*output_dims);
TfLiteTensor output_tensor =
CreateQuantizedTensor(output_data, output_dims, output_scale,
output_zero_point, "output_tensor");
TfLiteTensor output_tensor = CreateQuantizedTensor(
output_data, output_dims, output_scale, output_zero_point);
TfLiteAffineQuantization quant;
float scales[] = {1, output_scale};
@ -136,7 +135,7 @@ void TestRequantize(const int* input_dims_data, const float* input_data,
constexpr int tensors_size = 2;
TfLiteTensor tensors[tensors_size] = {
CreateQuantizedTensor(input_data, input_quantized, input_dims,
input_scale, input_zero_point, "input_tensor"),
input_scale, input_zero_point),
output_tensor,
};

View File

@ -117,9 +117,9 @@ void TestMeanFloatInput4D(const int* input_dims_data, const float* input_data,
constexpr int tensors_size = num_of_inputs + num_of_outputs;
TfLiteTensor tensors[tensors_size] = {
CreateFloatTensor(input_data, input_dims, "input_tensor"),
CreateInt32Tensor(axis_data, axis_dims, "axis_tensor"),
CreateFloatTensor(output_data, output_dims, "output_tensor"),
CreateFloatTensor(input_data, input_dims),
CreateInt32Tensor(axis_data, axis_dims),
CreateFloatTensor(output_data, output_dims),
};
TF_LITE_MICRO_EXPECT_EQ(

View File

@ -116,18 +116,18 @@ void TestReshape(std::initializer_list<int> input_dims_data,
bool expect_failure = false) {
TfLiteIntArray* input_dims = IntArrayFromInitializer(input_dims_data);
TfLiteIntArray* output_dims = IntArrayFromInts(output_dims_data);
TfLiteTensor input_tensor = CreateTensor<T, tensor_input_type>(
input_data, input_dims, "input_tensor");
TfLiteTensor input_tensor =
CreateTensor<T, tensor_input_type>(input_data, input_dims);
T* output_data = reinterpret_cast<T*>(output_data_raw);
TfLiteTensor output_tensor = CreateTensor<T, tensor_input_type>(
output_data, output_dims, "input_tensor");
TfLiteTensor output_tensor =
CreateTensor<T, tensor_input_type>(output_data, output_dims);
// Reshape param is passed as op's param.
TestReshapeImpl<T>(&input_tensor, nullptr, &output_tensor, expected_output,
expected_dims, expect_failure);
// Reshape param is passed as a tensor.
TfLiteIntArray* shape_dims = IntArrayFromInitializer(shape_dims_data);
auto shape_tensor = CreateTensor<int32_t, kTfLiteInt32>(
shape_data, shape_dims, "shape_tensor");
auto shape_tensor =
CreateTensor<int32_t, kTfLiteInt32>(shape_data, shape_dims);
TestReshapeImpl<T>(&input_tensor, &shape_tensor, &output_tensor,
expected_output, expected_dims, expect_failure);
}
@ -194,12 +194,11 @@ TF_LITE_MICRO_TEST(InvalidShape) {
using tflite::testing::IntArrayFromInts;
TfLiteIntArray* input_dims = IntArrayFromInitializer({3, 1, 2, 2});
auto input_data = {3.0f};
auto input_tensor = CreateFloatTensor(input_data, input_dims, "input_tensor");
auto input_tensor = CreateFloatTensor(input_data, input_dims);
float output_data[4];
int output_dims_data[6] = {2, 2, 1, 2, 2, 1};
TfLiteIntArray* output_dims = IntArrayFromInts(output_dims_data);
auto output_tensor =
CreateFloatTensor(output_data, output_dims, "input_tensor");
auto output_tensor = CreateFloatTensor(output_data, output_dims);
tflite::testing::TestReshapeImpl<float>(&input_tensor, // input_tensor
nullptr, // shape_tensor
&output_tensor, // output_tensor
@ -258,15 +257,14 @@ TF_LITE_MICRO_TEST(LegacyScalarOutput) {
using tflite::testing::IntArrayFromInts;
TfLiteIntArray* input_dims = IntArrayFromInitializer({1, 1});
auto input_data = {3.0f};
auto input_tensor = CreateFloatTensor(input_data, input_dims, "input_tensor");
auto input_tensor = CreateFloatTensor(input_data, input_dims);
float output_data[1];
int output_dims_data[2] = {1, 0};
TfLiteIntArray* output_dims = IntArrayFromInts(output_dims_data);
auto output_tensor =
CreateFloatTensor(output_data, output_dims, "input_tensor");
auto output_tensor = CreateFloatTensor(output_data, output_dims);
TfLiteIntArray* shape_dims = tflite::testing::IntArrayFromInitializer({1, 0});
auto shape_tensor = tflite::testing::CreateTensor<int32_t, kTfLiteInt32>(
{0}, shape_dims, "shape_tensor");
auto shape_tensor =
tflite::testing::CreateTensor<int32_t, kTfLiteInt32>({0}, shape_dims);
tflite::testing::TestReshapeImpl<float>(&input_tensor, // input_tensor
&shape_tensor, // shape_tensor
&output_tensor, // output_tensor

View File

@ -26,22 +26,18 @@ namespace {
using uint8 = std::uint8_t;
using int32 = std::int32_t;
TfLiteTensor TestCreateTensor(const float* data, TfLiteIntArray* dims,
const char* name) {
return CreateFloatTensor(data, dims, name);
TfLiteTensor TestCreateTensor(const float* data, TfLiteIntArray* dims) {
return CreateFloatTensor(data, dims);
}
TfLiteTensor TestCreateTensor(const uint8* data, TfLiteIntArray* dims,
const char* name) {
return CreateQuantizedTensor(data, dims, name, 0, 255);
TfLiteTensor TestCreateTensor(const uint8* data, TfLiteIntArray* dims) {
return CreateQuantizedTensor(data, dims, 0, 255);
}
TfLiteTensor TestCreateTensor(const int8* data, TfLiteIntArray* dims,
const char* name) {
return CreateQuantizedTensor(data, dims, name, -128, 127);
TfLiteTensor TestCreateTensor(const int8* data, TfLiteIntArray* dims) {
return CreateQuantizedTensor(data, dims, -128, 127);
}
// Input data expects a 4-D tensor of [batch, height, width, channels]
// Output data should match input datas batch and channels
// Expected sizes should be a 1-D tensor with 2 elements: new_height & new_width
@ -62,9 +58,9 @@ void TestResizeNearestNeighbor(const int* input_dims_data, const T* input_data,
constexpr int tensors_size = 3;
TfLiteTensor tensors[tensors_size] = {
TestCreateTensor(input_data, input_dims, "input_tensor"),
CreateInt32Tensor(expected_size_data, expected_size_dims, "size_tensor"),
TestCreateTensor(output_data, output_dims, "output_tensor"),
TestCreateTensor(input_data, input_dims),
CreateInt32Tensor(expected_size_data, expected_size_dims),
TestCreateTensor(output_data, output_dims),
};
TfLiteContext context;

View File

@ -32,8 +32,8 @@ void TestRound(const int* input_dims_data, const float* input_data,
constexpr int outputs_size = 1;
constexpr int tensors_size = inputs_size + outputs_size;
TfLiteTensor tensors[tensors_size] = {
CreateFloatTensor(input_data, input_dims, "input_tensor"),
CreateFloatTensor(output_data, output_dims, "output_tensor"),
CreateFloatTensor(input_data, input_dims),
CreateFloatTensor(output_data, output_dims),
};
TfLiteContext context;
PopulateContext(tensors, tensors_size, micro_test::reporter, &context);

View File

@ -36,8 +36,8 @@ void TestSoftmaxFloat(std::initializer_list<int> input_dims_data,
constexpr int outputs_size = 1;
constexpr int tensors_size = inputs_size + outputs_size;
TfLiteTensor tensors[tensors_size] = {
CreateFloatTensor(input_data, input_dims, "input_tensor"),
CreateFloatTensor(output_data, output_dims, "output_tensor"),
CreateFloatTensor(input_data, input_dims),
CreateFloatTensor(output_data, output_dims),
};
TfLiteContext context;
@ -100,10 +100,8 @@ void TestSoftmaxQuantized(std::initializer_list<int> input_dims_data,
constexpr int outputs_size = 1;
constexpr int tensors_size = inputs_size + outputs_size;
TfLiteTensor tensors[tensors_size] = {
CreateQuantizedTensor(input_data, input_dims, "input_tensor", input_min,
input_max),
CreateQuantizedTensor(output_data, output_dims, "output_tensor",
output_min, output_max),
CreateQuantizedTensor(input_data, input_dims, input_min, input_max),
CreateQuantizedTensor(output_data, output_dims, output_min, output_max),
};
TfLiteContext context;
@ -166,10 +164,8 @@ void TestSoftmaxQuantizedSigned(
constexpr int outputs_size = 1;
constexpr int tensors_size = inputs_size + outputs_size;
TfLiteTensor tensors[tensors_size] = {
CreateQuantizedTensor(input_data, input_dims, "input_tensor", input_min,
input_max),
CreateQuantizedTensor(output_data, output_dims, "output_tensor",
output_min, output_max),
CreateQuantizedTensor(input_data, input_dims, input_min, input_max),
CreateQuantizedTensor(output_data, output_dims, output_min, output_max),
};
TfLiteContext context;

View File

@ -45,10 +45,10 @@ void TestSplitTwoOutputsFloat(
constexpr int axis_size = 1;
constexpr int tensors_size = input_size + output_size + axis_size;
TfLiteTensor tensors[tensors_size] = {
CreateQuantized32Tensor(axis_data, axis_dims, "axis_tensor", 1.0),
CreateFloatTensor(input_data, input_dims, "input_tensor"),
CreateFloatTensor(output1_data, output1_dims, "output1_tensor"),
CreateFloatTensor(output2_data, output2_dims, "output2_tensor")};
CreateQuantized32Tensor(axis_data, axis_dims, 1.0),
CreateFloatTensor(input_data, input_dims),
CreateFloatTensor(output1_data, output1_dims),
CreateFloatTensor(output2_data, output2_dims)};
// Currently only support constant axis tensor.
tensors[0].allocation_type = kTfLiteMmapRo;
@ -141,12 +141,12 @@ void TestSplitFourOutputsFloat(
constexpr int axis_size = 1;
constexpr int tensors_size = input_size + output_size + axis_size;
TfLiteTensor tensors[tensors_size] = {
CreateQuantized32Tensor(axis_data, axis_dims, "axis_tensor", 1.0),
CreateFloatTensor(input_data, input_dims, "input_tensor"),
CreateFloatTensor(output1_data, output1_dims, "output1_tensor"),
CreateFloatTensor(output2_data, output2_dims, "output2_tensor"),
CreateFloatTensor(output3_data, output1_dims, "output3_tensor"),
CreateFloatTensor(output4_data, output1_dims, "output4_tensor")};
CreateQuantized32Tensor(axis_data, axis_dims, 1.0),
CreateFloatTensor(input_data, input_dims),
CreateFloatTensor(output1_data, output1_dims),
CreateFloatTensor(output2_data, output2_dims),
CreateFloatTensor(output3_data, output1_dims),
CreateFloatTensor(output4_data, output1_dims)};
// Currently only support constant axis tensor.
tensors[0].allocation_type = kTfLiteMmapRo;
@ -243,12 +243,10 @@ void TestSplitTwoOutputsQuantized(
constexpr int axis_size = 1;
constexpr int tensors_size = input_size + output_size + axis_size;
TfLiteTensor tensors[tensors_size] = {
CreateQuantized32Tensor(axis_data, axis_dims, "axis_tensor", 1.0),
CreateQuantizedTensor(input_data, input_dims, "input_tensor", 0, 10),
CreateQuantizedTensor(output1_data, output1_dims, "output1_tensor", 0,
10),
CreateQuantizedTensor(output2_data, output2_dims, "output2_tensor", 0,
10)};
CreateQuantized32Tensor(axis_data, axis_dims, 1.0),
CreateQuantizedTensor(input_data, input_dims, 0, 10),
CreateQuantizedTensor(output1_data, output1_dims, 0, 10),
CreateQuantizedTensor(output2_data, output2_dims, 0, 10)};
// Currently only support constant axis tensor.
tensors[0].allocation_type = kTfLiteMmapRo;
@ -332,12 +330,10 @@ void TestSplitTwoOutputsQuantized32(
constexpr int axis_size = 1;
constexpr int tensors_size = input_size + output_size + axis_size;
TfLiteTensor tensors[tensors_size] = {
CreateQuantized32Tensor(axis_data, axis_dims, "axis_tensor", 1.0),
CreateQuantized32Tensor(input_data, input_dims, "input_tensor", 1.0),
CreateQuantized32Tensor(output1_data, output1_dims, "output1_tensor",
1.0),
CreateQuantized32Tensor(output2_data, output2_dims, "output2_tensor",
1.0)};
CreateQuantized32Tensor(axis_data, axis_dims, 1.0),
CreateQuantized32Tensor(input_data, input_dims, 1.0),
CreateQuantized32Tensor(output1_data, output1_dims, 1.0),
CreateQuantized32Tensor(output2_data, output2_dims, 1.0)};
// Currently only support constant axis tensor.
tensors[0].allocation_type = kTfLiteMmapRo;

View File

@ -25,15 +25,13 @@ namespace {
template <typename input_type = int32_t,
TfLiteType tensor_input_type = kTfLiteInt32>
inline TfLiteTensor CreateTensor(const input_type* data, TfLiteIntArray* dims,
const char* name, bool is_variable = false) {
bool is_variable = false) {
TfLiteTensor result;
result.type = tensor_input_type;
result.data.raw = reinterpret_cast<char*>(const_cast<input_type*>(data));
result.dims = dims;
result.allocation_type = kTfLiteMemNone;
result.bytes = ElementCount(*dims) * sizeof(input_type);
result.allocation = nullptr;
result.name = name;
result.is_variable = is_variable;
return result;
}
@ -41,9 +39,9 @@ inline TfLiteTensor CreateTensor(const input_type* data, TfLiteIntArray* dims,
template <typename input_type = int32_t,
TfLiteType tensor_input_type = kTfLiteInt32>
inline TfLiteTensor CreateTensor(std::initializer_list<input_type> data,
TfLiteIntArray* dims, const char* name,
TfLiteIntArray* dims,
bool is_variable = false) {
return CreateTensor<input_type, tensor_input_type>(data.begin(), dims, name,
return CreateTensor<input_type, tensor_input_type>(data.begin(), dims,
is_variable);
}
@ -73,15 +71,11 @@ void TestStrideSlide(std::initializer_list<int> input_shape,
constexpr int outputs_size = 1;
constexpr int tensors_size = inputs_size + outputs_size;
TfLiteTensor tensors[tensors_size] = {
CreateTensor<input_type, tensor_input_type>(input_data, input_dims,
"input_tensor"),
CreateTensor<int32_t, kTfLiteInt32>(begin_data, begin_dims,
"begin_tensor"),
CreateTensor<int32_t, kTfLiteInt32>(end_data, end_dims, "end_tensor"),
CreateTensor<int32_t, kTfLiteInt32>(strides_data, strides_dims,
"stride_tensor"),
CreateTensor<input_type, tensor_input_type>(output_data, output_dims,
"output_tensor"),
CreateTensor<input_type, tensor_input_type>(input_data, input_dims),
CreateTensor<int32_t, kTfLiteInt32>(begin_data, begin_dims),
CreateTensor<int32_t, kTfLiteInt32>(end_data, end_dims),
CreateTensor<int32_t, kTfLiteInt32>(strides_data, strides_dims),
CreateTensor<input_type, tensor_input_type>(output_data, output_dims),
};
TfLiteContext context;
PopulateContext(tensors, tensors_size, micro_test::reporter, &context);

View File

@ -129,9 +129,9 @@ void TestSubFloat(const int* input1_dims_data, const float* input1_data,
constexpr int outputs_size = 1;
constexpr int tensors_size = inputs_size + outputs_size;
TfLiteTensor tensors[tensors_size] = {
CreateFloatTensor(input1_data, input1_dims, "input1_tensor"),
CreateFloatTensor(input2_data, input2_dims, "input2_tensor"),
CreateFloatTensor(output_data, output_dims, "output_tensor"),
CreateFloatTensor(input1_data, input1_dims),
CreateFloatTensor(input2_data, input2_dims),
CreateFloatTensor(output_data, output_dims),
};
ValidateSubGoldens(tensors, tensors_size, expected_output, output_data,
@ -156,15 +156,14 @@ void TestSubQuantized(const int* input1_dims_data, const float* input1_data,
constexpr int outputs_size = 1;
constexpr int tensors_size = inputs_size + outputs_size;
TfLiteTensor tensors[tensors_size] = {
tflite::testing::CreateQuantizedTensor(
input1_data, input1_quantized, input1_dims, input1_scale,
input1_zero_point, "input1_tensor"),
tflite::testing::CreateQuantizedTensor(
input2_data, input2_quantized, input2_dims, input2_scale,
input2_zero_point, "input2_tensor"),
tflite::testing::CreateQuantizedTensor(input1_data, input1_quantized,
input1_dims, input1_scale,
input1_zero_point),
tflite::testing::CreateQuantizedTensor(input2_data, input2_quantized,
input2_dims, input2_scale,
input2_zero_point),
tflite::testing::CreateQuantizedTensor(output_data, output_dims,
output_scale, output_zero_point,
"output_tensor"),
output_scale, output_zero_point),
};
tflite::AsymmetricQuantize(golden, golden_quantized,
ElementCount(*output_dims), output_scale,

View File

@ -341,13 +341,12 @@ void TestSVDF(const int batch_size, const int num_units, const int input_size,
const int tensor_count = 5; // 4 inputs, 1 output
TfLiteTensor tensors[] = {
CreateFloatTensor(input_data, input_dims, "input"),
CreateFloatTensor(weights_feature_data, weights_feature_dims,
"weights_feature"),
CreateFloatTensor(weights_time_data, weights_time_dims, "weights_time"),
CreateFloatTensor(input_data, input_dims),
CreateFloatTensor(weights_feature_data, weights_feature_dims),
CreateFloatTensor(weights_time_data, weights_time_dims),
CreateFloatTensor(activation_state_data, activation_state_dims,
"activation_state", true /* is_variable */),
CreateFloatTensor(output_data, output_dims, "output"),
/*is_variable=*/true),
CreateFloatTensor(output_data, output_dims),
};
ValidateSVDFGoldens(batch_size, num_units, input_size, rank, tensors,
@ -393,19 +392,17 @@ inline void TestIntegerSVDF(
TfLiteTensor tensors[] = {
CreateQuantizedTensor(input_data, input_dims, input_scale,
0 /* zero-point */, "input"),
/*zero_point=*/0),
CreateQuantizedTensor(weights_feature_data, weights_feature_dims,
weights_feature_scale, 0 /* zero-point */,
"weights_feature"),
weights_feature_scale, /*zero_point=*/0),
CreateQuantizedTensor(weights_time_data, weights_time_dims,
weights_time_scale, 0 /* zero-point */,
"weights_time"),
CreateQuantized32Tensor(bias_data, bias_dims, "bias", bias_scale),
weights_time_scale, /*zero_point=*/0),
CreateQuantized32Tensor(bias_data, bias_dims, bias_scale),
CreateQuantizedTensor(activation_state_data, activation_state_dims,
activation_scale, 0 /* zero-point */,
"activation_state", true /* is_variable */),
activation_scale, /*zero_point=*/0,
/*is_variable=*/true),
CreateQuantizedTensor(output_data, output_dims, output_scale,
0 /* zero-point */, "output")};
/*zero_point=*/0)};
// TODO(b/147839421): Affine Quantization Params should be set on tensor
// creation.

View File

@ -36,8 +36,8 @@ void TestTanhFloat(std::initializer_list<int> input_dims_data,
constexpr int outputs_size = 1;
constexpr int tensors_size = inputs_size + outputs_size;
TfLiteTensor tensors[tensors_size] = {
CreateFloatTensor(input_data, input_dims, "input_tensor"),
CreateFloatTensor(output_data, output_dims, "output_tensor"),
CreateFloatTensor(input_data, input_dims),
CreateFloatTensor(output_data, output_dims),
};
TfLiteContext context;
@ -96,10 +96,8 @@ void TestTanhInt8(std::initializer_list<int> input_dims_data,
constexpr int outputs_size = 1;
constexpr int tensors_size = inputs_size + outputs_size;
TfLiteTensor tensors[tensors_size] = {
CreateQuantizedTensor(input_data, input_dims, "input_tensor", input_min,
input_max),
CreateQuantizedTensor(output_data, output_dims, "output_tensor",
output_min, output_max),
CreateQuantizedTensor(input_data, input_dims, input_min, input_max),
CreateQuantizedTensor(output_data, output_dims, output_min, output_max),
};
TfLiteContext context;

View File

@ -45,10 +45,10 @@ void TestUnpackThreeOutputsFloat(
constexpr int output_size = 3;
constexpr int tensors_size = input_size + output_size;
TfLiteTensor tensors[tensors_size] = {
CreateFloatTensor(input_data, input_dims, "input_tensor"),
CreateFloatTensor(output1_data, output1_dims, "output1_tensor"),
CreateFloatTensor(output2_data, output2_dims, "output2_tensor"),
CreateFloatTensor(output3_data, output3_dims, "output3_tensor")};
CreateFloatTensor(input_data, input_dims),
CreateFloatTensor(output1_data, output1_dims),
CreateFloatTensor(output2_data, output2_dims),
CreateFloatTensor(output3_data, output3_dims)};
// Place a unique value in the uninitialized output buffer.
for (int i = 0; i < output1_dims_count; ++i) {
@ -132,8 +132,8 @@ void TestUnpackOneOutputFloat(std::initializer_list<int> input_dims_data,
constexpr int output_size = 1;
constexpr int tensors_size = input_size + output_size;
TfLiteTensor tensors[tensors_size] = {
CreateFloatTensor(input_data, input_dims, "input_tensor"),
CreateFloatTensor(output_data, output_dims, "output_tensor")};
CreateFloatTensor(input_data, input_dims),
CreateFloatTensor(output_data, output_dims)};
// Place a unique value in the uninitialized output buffer.
for (int i = 0; i < output_dims_count; ++i) {
@ -211,13 +211,10 @@ void TestUnpackThreeOutputsQuantized(
// CreateQuantizedTensor needs min/max values as input, but these values
// don't matter as to the functionality of UNPACK, so just set as 0
// and 10.
CreateQuantizedTensor(input_data, input_dims, "input_tensor", 0, 10),
CreateQuantizedTensor(output1_data, output1_dims, "output1_tensor", 0,
10),
CreateQuantizedTensor(output2_data, output2_dims, "output2_tensor", 0,
10),
CreateQuantizedTensor(output3_data, output3_dims, "output3_tensor", 0,
10)};
CreateQuantizedTensor(input_data, input_dims, 0, 10),
CreateQuantizedTensor(output1_data, output1_dims, 0, 10),
CreateQuantizedTensor(output2_data, output2_dims, 0, 10),
CreateQuantizedTensor(output3_data, output3_dims, 0, 10)};
// Place a unique value in the uninitialized output buffer.
for (int i = 0; i < output1_dims_count; ++i) {
@ -307,13 +304,10 @@ void TestUnpackThreeOutputsQuantized32(
constexpr int output_size = 3;
constexpr int tensors_size = input_size + output_size;
TfLiteTensor tensors[tensors_size] = {
CreateQuantized32Tensor(input_data, input_dims, "input_tensor", 1.0),
CreateQuantized32Tensor(output1_data, output1_dims, "output1_tensor",
1.0),
CreateQuantized32Tensor(output2_data, output2_dims, "output2_tensor",
1.0),
CreateQuantized32Tensor(output3_data, output3_dims, "output3_tensor",
1.0)};
CreateQuantized32Tensor(input_data, input_dims, 1.0),
CreateQuantized32Tensor(output1_data, output1_dims, 1.0),
CreateQuantized32Tensor(output2_data, output2_dims, 1.0),
CreateQuantized32Tensor(output3_data, output3_dims, 1.0)};
// Place a unique value in the uninitialized output buffer.
for (int i = 0; i < output1_dims_count; ++i) {

View File

@ -426,9 +426,6 @@ TfLiteStatus InitializeTfLiteTensorFromFlatbuffer(
result->quantization = {kTfLiteAffineQuantization, quantization};
}
if (flatbuffer_tensor.name() != nullptr) {
result->name = flatbuffer_tensor.name()->c_str();
}
return kTfLiteOk;
}

View File

@ -124,10 +124,9 @@ void PrintInterpreterState(MicroInterpreter* interpreter) {
for (size_t tensor_index = 0; tensor_index < interpreter->tensors_size();
tensor_index++) {
TfLiteTensor* tensor = interpreter->tensor(static_cast<int>(tensor_index));
printf("Tensor %3zu %-20s %10s %15s %10zu bytes (%4.1f MB) ", tensor_index,
tensor->name, TensorTypeName(tensor->type),
AllocTypeName(tensor->allocation_type), tensor->bytes,
static_cast<double>(tensor->bytes / (1 << 20)));
printf("Tensor %3zu %10s %15s %10zu bytes (%4.1f MB) ", tensor_index,
TensorTypeName(tensor->type), AllocTypeName(tensor->allocation_type),
tensor->bytes, static_cast<double>(tensor->bytes / (1 << 20)));
PrintTfLiteIntVector(tensor->dims);
}
printf("\n");

View File

@ -758,22 +758,19 @@ TfLiteFloatArray* FloatArrayFromFloats(const float* floats) {
return reinterpret_cast<TfLiteFloatArray*>(const_cast<float*>(floats));
}
TfLiteTensor CreateTensor(TfLiteIntArray* dims, const char* name,
bool is_variable) {
TfLiteTensor CreateTensor(TfLiteIntArray* dims, bool is_variable) {
TfLiteTensor result;
result.dims = dims;
result.name = name;
result.params = {};
result.quantization = {kTfLiteNoQuantization, nullptr};
result.is_variable = is_variable;
result.allocation_type = kTfLiteMemNone;
result.allocation = nullptr;
return result;
}
TfLiteTensor CreateFloatTensor(const float* data, TfLiteIntArray* dims,
const char* name, bool is_variable) {
TfLiteTensor result = CreateTensor(dims, name, is_variable);
bool is_variable) {
TfLiteTensor result = CreateTensor(dims, is_variable);
result.type = kTfLiteFloat32;
result.data.f = const_cast<float*>(data);
result.bytes = ElementCount(*dims) * sizeof(float);
@ -789,8 +786,8 @@ void PopulateFloatTensor(TfLiteTensor* tensor, float* begin, float* end) {
}
TfLiteTensor CreateBoolTensor(const bool* data, TfLiteIntArray* dims,
const char* name, bool is_variable) {
TfLiteTensor result = CreateTensor(dims, name, is_variable);
bool is_variable) {
TfLiteTensor result = CreateTensor(dims, is_variable);
result.type = kTfLiteBool;
result.data.b = const_cast<bool*>(data);
result.bytes = ElementCount(*dims) * sizeof(bool);
@ -798,8 +795,8 @@ TfLiteTensor CreateBoolTensor(const bool* data, TfLiteIntArray* dims,
}
TfLiteTensor CreateInt32Tensor(const int32_t* data, TfLiteIntArray* dims,
const char* name, bool is_variable) {
TfLiteTensor result = CreateTensor(dims, name, is_variable);
bool is_variable) {
TfLiteTensor result = CreateTensor(dims, is_variable);
result.type = kTfLiteInt32;
result.data.i32 = const_cast<int32_t*>(data);
result.bytes = ElementCount(*dims) * sizeof(int32_t);
@ -808,8 +805,8 @@ TfLiteTensor CreateInt32Tensor(const int32_t* data, TfLiteIntArray* dims,
TfLiteTensor CreateQuantizedTensor(const uint8_t* data, TfLiteIntArray* dims,
float scale, int zero_point,
const char* name, bool is_variable) {
TfLiteTensor result = CreateTensor(dims, name, is_variable);
bool is_variable) {
TfLiteTensor result = CreateTensor(dims, is_variable);
result.type = kTfLiteUInt8;
result.data.uint8 = const_cast<uint8_t*>(data);
result.params = {scale, zero_point};
@ -820,8 +817,8 @@ TfLiteTensor CreateQuantizedTensor(const uint8_t* data, TfLiteIntArray* dims,
TfLiteTensor CreateQuantizedTensor(const int8_t* data, TfLiteIntArray* dims,
float scale, int zero_point,
const char* name, bool is_variable) {
TfLiteTensor result = CreateTensor(dims, name, is_variable);
bool is_variable) {
TfLiteTensor result = CreateTensor(dims, is_variable);
result.type = kTfLiteInt8;
result.data.int8 = const_cast<int8_t*>(data);
result.params = {scale, zero_point};
@ -832,8 +829,8 @@ TfLiteTensor CreateQuantizedTensor(const int8_t* data, TfLiteIntArray* dims,
TfLiteTensor CreateQuantizedTensor(const int16_t* data, TfLiteIntArray* dims,
float scale, int zero_point,
const char* name, bool is_variable) {
TfLiteTensor result = CreateTensor(dims, name, is_variable);
bool is_variable) {
TfLiteTensor result = CreateTensor(dims, is_variable);
result.type = kTfLiteInt16;
result.data.i16 = const_cast<int16_t*>(data);
result.params = {scale, zero_point};
@ -842,38 +839,30 @@ TfLiteTensor CreateQuantizedTensor(const int16_t* data, TfLiteIntArray* dims,
return result;
}
TfLiteTensor CreateQuantized32Tensor(const int32_t* data, TfLiteIntArray* dims,
float scale, const char* name,
bool is_variable) {
TfLiteTensor result = CreateTensor(dims, name, is_variable);
TfLiteTensor CreateQuantizedBiasTensor(const float* data, int32_t* quantized,
TfLiteIntArray* dims, float input_scale,
float weights_scale, bool is_variable) {
float bias_scale = input_scale * weights_scale;
tflite::SymmetricQuantize(data, quantized, ElementCount(*dims), bias_scale);
TfLiteTensor result = CreateTensor(dims, is_variable);
result.type = kTfLiteInt32;
result.data.i32 = const_cast<int32_t*>(data);
result.data.i32 = const_cast<int32_t*>(quantized);
// Quantized int32 tensors always have a zero point of 0, since the range of
// int32 values is large, and because zero point costs extra cycles during
// processing.
result.params = {scale, 0};
result.params = {bias_scale, 0};
result.quantization = {kTfLiteAffineQuantization, nullptr};
result.bytes = ElementCount(*dims) * sizeof(int32_t);
return result;
}
TfLiteTensor CreateQuantizedBiasTensor(const float* data, int32_t* quantized,
TfLiteIntArray* dims, float input_scale,
float weights_scale, const char* name,
bool is_variable) {
float bias_scale = input_scale * weights_scale;
tflite::SymmetricQuantize(data, quantized, ElementCount(*dims), bias_scale);
return CreateQuantized32Tensor(quantized, dims, bias_scale, name,
is_variable);
}
// Quantizes int32 bias tensor with per-channel weights determined by input
// scale multiplied by weight scale for each channel.
TfLiteTensor CreatePerChannelQuantizedBiasTensor(
const float* input, int32_t* quantized, TfLiteIntArray* dims,
float input_scale, float* weight_scales, float* scales, int* zero_points,
TfLiteAffineQuantization* affine_quant, int quantized_dimension,
const char* name, bool is_variable) {
bool is_variable) {
int input_size = ElementCount(*dims);
int num_channels = dims->data[quantized_dimension];
// First element is reserved for array length
@ -892,7 +881,7 @@ TfLiteTensor CreatePerChannelQuantizedBiasTensor(
affine_quant->zero_point = IntArrayFromInts(zero_points);
affine_quant->quantized_dimension = quantized_dimension;
TfLiteTensor result = CreateTensor(dims, name, is_variable);
TfLiteTensor result = CreateTensor(dims, is_variable);
result.type = kTfLiteInt32;
result.data.i32 = const_cast<int32_t*>(quantized);
result.quantization = {kTfLiteAffineQuantization, affine_quant};
@ -903,7 +892,7 @@ TfLiteTensor CreatePerChannelQuantizedBiasTensor(
TfLiteTensor CreateSymmetricPerChannelQuantizedTensor(
const float* input, int8_t* quantized, TfLiteIntArray* dims, float* scales,
int* zero_points, TfLiteAffineQuantization* affine_quant,
int quantized_dimension, const char* name, bool is_variable) {
int quantized_dimension, bool is_variable) {
int channel_count = dims->data[quantized_dimension];
scales[0] = static_cast<float>(channel_count);
zero_points[0] = channel_count;
@ -919,7 +908,7 @@ TfLiteTensor CreateSymmetricPerChannelQuantizedTensor(
affine_quant->zero_point = IntArrayFromInts(zero_points);
affine_quant->quantized_dimension = quantized_dimension;
TfLiteTensor result = CreateTensor(dims, name, is_variable);
TfLiteTensor result = CreateTensor(dims, is_variable);
result.type = kTfLiteInt8;
result.data.int8 = const_cast<int8_t*>(quantized);
result.quantization = {kTfLiteAffineQuantization, affine_quant};

View File

@ -117,42 +117,40 @@ TfLiteIntArray* IntArrayFromInts(const int* int_array);
TfLiteFloatArray* FloatArrayFromFloats(const float* floats);
TfLiteTensor CreateFloatTensor(const float* data, TfLiteIntArray* dims,
const char* name, bool is_variable = false);
bool is_variable = false);
void PopulateFloatTensor(TfLiteTensor* tensor, float* begin, float* end);
TfLiteTensor CreateBoolTensor(const bool* data, TfLiteIntArray* dims,
const char* name, bool is_variable = false);
bool is_variable = false);
TfLiteTensor CreateInt32Tensor(const int32_t*, TfLiteIntArray* dims,
const char* name, bool is_variable = false);
bool is_variable = false);
TfLiteTensor CreateQuantizedTensor(const uint8_t* data, TfLiteIntArray* dims,
float scale, int zero_point,
const char* name, bool is_variable = false);
bool is_variable = false);
TfLiteTensor CreateQuantizedTensor(const int8_t* data, TfLiteIntArray* dims,
float scale, int zero_point,
const char* name, bool is_variable = false);
bool is_variable = false);
TfLiteTensor CreateQuantizedTensor(const int16_t* data, TfLiteIntArray* dims,
float scale, int zero_point,
const char* name, bool is_variable = false);
bool is_variable = false);
template <typename T>
TfLiteTensor CreateQuantizedTensor(const float* input, T* quantized,
TfLiteIntArray* dims, float scale,
int zero_point, const char* name,
bool is_variable = false) {
int zero_point, bool is_variable = false) {
int input_size = ElementCount(*dims);
tflite::AsymmetricQuantize(input, quantized, input_size, scale, zero_point);
return CreateQuantizedTensor(quantized, dims, scale, zero_point, name,
is_variable);
return CreateQuantizedTensor(quantized, dims, scale, zero_point, is_variable);
}
TfLiteTensor CreateQuantizedBiasTensor(const float* data, int32_t* quantized,
TfLiteIntArray* dims, float input_scale,
float weights_scale, const char* name,
float weights_scale,
bool is_variable = false);
// Quantizes int32 bias tensor with per-channel weights determined by input
@ -161,12 +159,12 @@ TfLiteTensor CreatePerChannelQuantizedBiasTensor(
const float* input, int32_t* quantized, TfLiteIntArray* dims,
float input_scale, float* weight_scales, float* scales, int* zero_points,
TfLiteAffineQuantization* affine_quant, int quantized_dimension,
const char* name, bool is_variable = false);
bool is_variable = false);
TfLiteTensor CreateSymmetricPerChannelQuantizedTensor(
const float* input, int8_t* quantized, TfLiteIntArray* dims, float* scales,
int* zero_points, TfLiteAffineQuantization* affine_quant,
int quantized_dimension, const char* name, bool is_variable = false);
int quantized_dimension, bool is_variable = false);
} // namespace testing
} // namespace tflite

View File

@ -149,20 +149,17 @@ void PopulateContext(TfLiteTensor* tensors, int tensors_size,
}
TfLiteTensor CreateFloatTensor(std::initializer_list<float> data,
TfLiteIntArray* dims, const char* name,
bool is_variable) {
return CreateFloatTensor(data.begin(), dims, name, is_variable);
TfLiteIntArray* dims, bool is_variable) {
return CreateFloatTensor(data.begin(), dims, is_variable);
}
TfLiteTensor CreateBoolTensor(std::initializer_list<bool> data,
TfLiteIntArray* dims, const char* name,
bool is_variable) {
return CreateBoolTensor(data.begin(), dims, name, is_variable);
TfLiteIntArray* dims, bool is_variable) {
return CreateBoolTensor(data.begin(), dims, is_variable);
}
TfLiteTensor CreateQuantizedTensor(const uint8_t* data, TfLiteIntArray* dims,
const char* name, float min, float max,
bool is_variable) {
float min, float max, bool is_variable) {
TfLiteTensor result;
result.type = kTfLiteUInt8;
result.data.uint8 = const_cast<uint8_t*>(data);
@ -171,21 +168,18 @@ TfLiteTensor CreateQuantizedTensor(const uint8_t* data, TfLiteIntArray* dims,
ZeroPointFromMinMax<uint8_t>(min, max)};
result.allocation_type = kTfLiteMemNone;
result.bytes = ElementCount(*dims) * sizeof(uint8_t);
result.allocation = nullptr;
result.name = name;
result.is_variable = false;
return result;
}
TfLiteTensor CreateQuantizedTensor(std::initializer_list<uint8_t> data,
TfLiteIntArray* dims, const char* name,
float min, float max, bool is_variable) {
return CreateQuantizedTensor(data.begin(), dims, name, min, max, is_variable);
TfLiteIntArray* dims, float min, float max,
bool is_variable) {
return CreateQuantizedTensor(data.begin(), dims, min, max, is_variable);
}
TfLiteTensor CreateQuantizedTensor(const int8_t* data, TfLiteIntArray* dims,
const char* name, float min, float max,
bool is_variable) {
float min, float max, bool is_variable) {
TfLiteTensor result;
result.type = kTfLiteInt8;
result.data.int8 = const_cast<int8_t*>(data);
@ -194,21 +188,18 @@ TfLiteTensor CreateQuantizedTensor(const int8_t* data, TfLiteIntArray* dims,
ZeroPointFromMinMax<int8_t>(min, max)};
result.allocation_type = kTfLiteMemNone;
result.bytes = ElementCount(*dims) * sizeof(int8_t);
result.allocation = nullptr;
result.name = name;
result.is_variable = is_variable;
return result;
}
TfLiteTensor CreateQuantizedTensor(std::initializer_list<int8_t> data,
TfLiteIntArray* dims, const char* name,
float min, float max, bool is_variable) {
return CreateQuantizedTensor(data.begin(), dims, name, min, max, is_variable);
TfLiteIntArray* dims, float min, float max,
bool is_variable) {
return CreateQuantizedTensor(data.begin(), dims, min, max, is_variable);
}
TfLiteTensor CreateQuantizedTensor(float* data, uint8_t* quantized_data,
TfLiteIntArray* dims, const char* name,
bool is_variable) {
TfLiteIntArray* dims, bool is_variable) {
TfLiteTensor result;
SymmetricQuantize(data, dims, quantized_data, &result.params.scale);
result.data.uint8 = quantized_data;
@ -217,15 +208,12 @@ TfLiteTensor CreateQuantizedTensor(float* data, uint8_t* quantized_data,
result.params.zero_point = 128;
result.allocation_type = kTfLiteMemNone;
result.bytes = ElementCount(*dims) * sizeof(uint8_t);
result.allocation = nullptr;
result.name = name;
result.is_variable = is_variable;
return result;
}
TfLiteTensor CreateQuantizedTensor(float* data, int8_t* quantized_data,
TfLiteIntArray* dims, const char* name,
bool is_variable) {
TfLiteIntArray* dims, bool is_variable) {
TfLiteTensor result;
SignedSymmetricQuantize(data, dims, quantized_data, &result.params.scale);
result.data.int8 = quantized_data;
@ -234,15 +222,12 @@ TfLiteTensor CreateQuantizedTensor(float* data, int8_t* quantized_data,
result.params.zero_point = 0;
result.allocation_type = kTfLiteMemNone;
result.bytes = ElementCount(*dims) * sizeof(int8_t);
result.allocation = nullptr;
result.name = name;
result.is_variable = is_variable;
return result;
}
TfLiteTensor CreateQuantizedTensor(float* data, int16_t* quantized_data,
TfLiteIntArray* dims, const char* name,
bool is_variable) {
TfLiteIntArray* dims, bool is_variable) {
TfLiteTensor result;
SignedSymmetricQuantize(data, dims, quantized_data, &result.params.scale);
result.data.i16 = quantized_data;
@ -251,15 +236,12 @@ TfLiteTensor CreateQuantizedTensor(float* data, int16_t* quantized_data,
result.params.zero_point = 0;
result.allocation_type = kTfLiteMemNone;
result.bytes = ElementCount(*dims) * sizeof(int16_t);
result.allocation = nullptr;
result.name = name;
result.is_variable = is_variable;
return result;
}
TfLiteTensor CreateQuantized32Tensor(const int32_t* data, TfLiteIntArray* dims,
const char* name, float scale,
bool is_variable) {
float scale, bool is_variable) {
TfLiteTensor result;
result.type = kTfLiteInt32;
result.data.i32 = const_cast<int32_t*>(data);
@ -270,16 +252,14 @@ TfLiteTensor CreateQuantized32Tensor(const int32_t* data, TfLiteIntArray* dims,
result.params = {scale, 0};
result.allocation_type = kTfLiteMemNone;
result.bytes = ElementCount(*dims) * sizeof(int32_t);
result.allocation = nullptr;
result.name = name;
result.is_variable = is_variable;
return result;
}
TfLiteTensor CreateQuantized32Tensor(std::initializer_list<int32_t> data,
TfLiteIntArray* dims, const char* name,
float scale, bool is_variable) {
return CreateQuantized32Tensor(data.begin(), dims, name, scale, is_variable);
TfLiteIntArray* dims, float scale,
bool is_variable) {
return CreateQuantized32Tensor(data.begin(), dims, scale, is_variable);
}
} // namespace testing

View File

@ -80,63 +80,56 @@ void PopulateContext(TfLiteTensor* tensors, int tensors_size,
ErrorReporter* error_reporter, TfLiteContext* context);
TfLiteTensor CreateFloatTensor(std::initializer_list<float> data,
TfLiteIntArray* dims, const char* name,
bool is_variable = false);
TfLiteIntArray* dims, bool is_variable = false);
TfLiteTensor CreateBoolTensor(std::initializer_list<bool> data,
TfLiteIntArray* dims, const char* name,
bool is_variable = false);
TfLiteIntArray* dims, bool is_variable = false);
TfLiteTensor CreateQuantizedTensor(const uint8_t* data, TfLiteIntArray* dims,
const char* name, float min, float max,
float min, float max,
bool is_variable = false);
TfLiteTensor CreateQuantizedTensor(std::initializer_list<uint8_t> data,
TfLiteIntArray* dims, const char* name,
float min, float max,
TfLiteIntArray* dims, float min, float max,
bool is_variable = false);
TfLiteTensor CreateQuantizedTensor(const int8_t* data, TfLiteIntArray* dims,
const char* name, float min, float max,
bool is_variable = false);
TfLiteTensor CreateQuantizedTensor(std::initializer_list<int8_t> data,
TfLiteIntArray* dims, const char* name,
float min, float max,
bool is_variable = false);
TfLiteTensor CreateQuantizedTensor(std::initializer_list<int8_t> data,
TfLiteIntArray* dims, float min, float max,
bool is_variable = false);
TfLiteTensor CreateQuantizedTensor(float* data, uint8_t* quantized_data,
TfLiteIntArray* dims, const char* name,
TfLiteIntArray* dims,
bool is_variable = false);
TfLiteTensor CreateQuantizedTensor(float* data, int8_t* quantized_data,
TfLiteIntArray* dims, const char* name,
TfLiteIntArray* dims,
bool is_variable = false);
TfLiteTensor CreateQuantizedTensor(float* data, int16_t* quantized_data,
TfLiteIntArray* dims, const char* name,
TfLiteIntArray* dims,
bool is_variable = false);
TfLiteTensor CreateQuantized32Tensor(const int32_t* data, TfLiteIntArray* dims,
const char* name, float scale,
bool is_variable = false);
float scale, bool is_variable = false);
TfLiteTensor CreateQuantized32Tensor(std::initializer_list<int32_t> data,
TfLiteIntArray* dims, const char* name,
float scale, bool is_variable = false);
TfLiteIntArray* dims, float scale,
bool is_variable = false);
template <typename input_type = int32_t,
TfLiteType tensor_input_type = kTfLiteInt32>
inline TfLiteTensor CreateTensor(const input_type* data, TfLiteIntArray* dims,
const char* name, bool is_variable = false) {
bool is_variable = false) {
TfLiteTensor result;
result.type = tensor_input_type;
result.data.raw = reinterpret_cast<char*>(const_cast<input_type*>(data));
result.dims = dims;
result.allocation_type = kTfLiteMemNone;
result.bytes = ElementCount(*dims) * sizeof(input_type);
result.allocation = nullptr;
result.name = name;
result.is_variable = is_variable;
return result;
}
@ -144,9 +137,9 @@ inline TfLiteTensor CreateTensor(const input_type* data, TfLiteIntArray* dims,
template <typename input_type = int32_t,
TfLiteType tensor_input_type = kTfLiteInt32>
inline TfLiteTensor CreateTensor(std::initializer_list<input_type> data,
TfLiteIntArray* dims, const char* name,
TfLiteIntArray* dims,
bool is_variable = false) {
return CreateTensor<input_type, tensor_input_type>(data.begin(), dims, name,
return CreateTensor<input_type, tensor_input_type>(data.begin(), dims,
is_variable);
}

View File

@ -23,7 +23,6 @@ TF_LITE_MICRO_TEST(CreateQuantizedBiasTensor) {
float weight_scale = 0.5;
constexpr int tensor_size = 12;
int dims_arr[] = {4, 2, 3, 2, 1};
const char* tensor_name = "test_tensor";
int32_t quantized[tensor_size];
float pre_quantized[] = {-10, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 10};
int32_t expected_quantized_values[] = {-40, -20, -16, -12, -8, -4,
@ -31,11 +30,10 @@ TF_LITE_MICRO_TEST(CreateQuantizedBiasTensor) {
TfLiteIntArray* dims = tflite::testing::IntArrayFromInts(dims_arr);
TfLiteTensor result = tflite::testing::CreateQuantizedBiasTensor(
pre_quantized, quantized, dims, input_scale, weight_scale, tensor_name);
pre_quantized, quantized, dims, input_scale, weight_scale);
TF_LITE_MICRO_EXPECT_EQ(result.bytes, tensor_size * sizeof(int32_t));
TF_LITE_MICRO_EXPECT_EQ(result.dims, dims);
TF_LITE_MICRO_EXPECT_EQ(result.name, tensor_name);
TF_LITE_MICRO_EXPECT_EQ(result.params.scale, input_scale * weight_scale);
for (int i = 0; i < tensor_size; i++) {
TF_LITE_MICRO_EXPECT_EQ(expected_quantized_values[i], result.data.i32[i]);
@ -48,7 +46,6 @@ TF_LITE_MICRO_TEST(CreatePerChannelQuantizedBiasTensor) {
constexpr int tensor_size = 12;
const int channels = 4;
int dims_arr[] = {4, 4, 3, 1, 1};
const char* tensor_name = "test_tensor";
int32_t quantized[tensor_size];
float scales[channels + 1];
int zero_points[] = {4, 0, 0, 0, 0};
@ -60,7 +57,7 @@ TF_LITE_MICRO_TEST(CreatePerChannelQuantizedBiasTensor) {
TfLiteAffineQuantization quant;
TfLiteTensor result = tflite::testing::CreatePerChannelQuantizedBiasTensor(
pre_quantized, quantized, dims, input_scale, weight_scales, scales,
zero_points, &quant, 0, tensor_name);
zero_points, &quant, 0);
// Values in scales array start at index 1 since index 0 is dedicated to
// tracking the tensor size.
@ -70,7 +67,6 @@ TF_LITE_MICRO_TEST(CreatePerChannelQuantizedBiasTensor) {
TF_LITE_MICRO_EXPECT_EQ(result.bytes, tensor_size * sizeof(int32_t));
TF_LITE_MICRO_EXPECT_EQ(result.dims, dims);
TF_LITE_MICRO_EXPECT_EQ(result.name, tensor_name);
for (int i = 0; i < tensor_size; i++) {
TF_LITE_MICRO_EXPECT_EQ(expected_quantized_values[i], result.data.i32[i]);
}
@ -80,7 +76,6 @@ TF_LITE_MICRO_TEST(CreateSymmetricPerChannelQuantizedTensor) {
const int tensor_size = 12;
constexpr int channels = 2;
const int dims_arr[] = {4, channels, 3, 2, 1};
const char* tensor_name = "test_tensor";
int8_t quantized[12];
const float pre_quantized[] = {-127, -55, -4, -3, -2, -1,
0, 1, 2, 3, 4, 63.5};
@ -94,12 +89,10 @@ TF_LITE_MICRO_TEST(CreateSymmetricPerChannelQuantizedTensor) {
TfLiteAffineQuantization quant;
TfLiteTensor result =
tflite::testing::CreateSymmetricPerChannelQuantizedTensor(
pre_quantized, quantized, dims, scales, zero_points, &quant, 0,
"test_tensor");
pre_quantized, quantized, dims, scales, zero_points, &quant, 0);
TF_LITE_MICRO_EXPECT_EQ(result.bytes, tensor_size * sizeof(int8_t));
TF_LITE_MICRO_EXPECT_EQ(result.dims, dims);
TF_LITE_MICRO_EXPECT_EQ(result.name, tensor_name);
TfLiteFloatArray* result_scales =
static_cast<TfLiteAffineQuantization*>(result.quantization.params)->scale;
for (int i = 0; i < channels; i++) {