Remove TF Micro tests that use the "name" field in TfLiteTensor.

The TFLM team is preparing to provide an "optimized" memory build option. This build option will eliminate non-needed/essential fields from core TFLite structs. The first big change is to reduce the number of pointers on TfLiteTensor. Many models have multiple tensors (e.g. benchmark keyword has 54) and each pointer adds up for TFLM. This cleanup pass removes the soon to be un-used 'name' field from TfLiteTensor.

PiperOrigin-RevId: 316000388
Change-Id: I230865014d5a59b78c1c1c9f5eda784f6d611e77
This commit is contained in:
Nick Kreeger 2020-06-11 16:24:01 -07:00 committed by TensorFlower Gardener
parent 8b997d655d
commit b6d13bb0a8
46 changed files with 472 additions and 630 deletions

View File

@ -160,8 +160,7 @@ int main() {
// Create per-tensor quantized int8 input tensor. // Create per-tensor quantized int8 input tensor.
int8_t input_quantized[32]; int8_t input_quantized[32];
TfLiteTensor input_tensor = tflite::testing::CreateQuantizedTensor( TfLiteTensor input_tensor = tflite::testing::CreateQuantizedTensor(
input_values, input_quantized, input_dims, input_scale, input_zero_point, input_values, input_quantized, input_dims, input_scale, input_zero_point);
"input_tensor");
// Set zero point and scale arrays with a single element for each. // Set zero point and scale arrays with a single element for each.
int input_zero_points[] = {1, input_zero_point}; int input_zero_points[] = {1, input_zero_point};
float input_scales[] = {1, input_scale}; float input_scales[] = {1, input_scale};
@ -174,7 +173,7 @@ int main() {
int8_t filter_quantized[32 * 32]; int8_t filter_quantized[32 * 32];
TfLiteTensor filter_tensor = tflite::testing::CreateQuantizedTensor( TfLiteTensor filter_tensor = tflite::testing::CreateQuantizedTensor(
filter_values, filter_quantized, filter_dims, filter_scale, filter_values, filter_quantized, filter_dims, filter_scale,
filter_zero_point, "filter_tensor"); filter_zero_point);
// Set zero point and scale arrays with a single element for each. // Set zero point and scale arrays with a single element for each.
int filter_zero_points[] = {1, filter_zero_point}; int filter_zero_points[] = {1, filter_zero_point};
float filter_scales[] = {1, filter_scale}; float filter_scales[] = {1, filter_scale};
@ -187,8 +186,8 @@ int main() {
int32_t bias_quantized[32]; int32_t bias_quantized[32];
tflite::SymmetricQuantize(bias_values, bias_quantized, 32, tflite::SymmetricQuantize(bias_values, bias_quantized, 32,
input_scale * output_scale); input_scale * output_scale);
TfLiteTensor bias_tensor = tflite::testing::CreateInt32Tensor( TfLiteTensor bias_tensor =
bias_quantized, bias_dims, "bias_tensor"); tflite::testing::CreateInt32Tensor(bias_quantized, bias_dims);
// There is a single zero point of 0, and a single scale of // There is a single zero point of 0, and a single scale of
// input_scale * filter_scale. // input_scale * filter_scale.
@ -202,8 +201,7 @@ int main() {
// Create per-tensor quantized int8 output tensor. // Create per-tensor quantized int8 output tensor.
int8_t output_quantized[32]; int8_t output_quantized[32];
TfLiteTensor output_tensor = tflite::testing::CreateQuantizedTensor( TfLiteTensor output_tensor = tflite::testing::CreateQuantizedTensor(
output_quantized, output_dims, output_scale, output_zero_point, output_quantized, output_dims, output_scale, output_zero_point);
"output_tensor");
// Set zero point and scale arrays with a single element for each. // Set zero point and scale arrays with a single element for each.
int output_zero_points[] = {1, output_zero_point}; int output_zero_points[] = {1, output_zero_point};
float output_scales[] = {1, output_scale}; float output_scales[] = {1, output_scale};

View File

@ -166,8 +166,7 @@ int main() {
// Create per-tensor quantized int8 input tensor. // Create per-tensor quantized int8 input tensor.
int8_t input_quantized[input_elements]; int8_t input_quantized[input_elements];
TfLiteTensor input_tensor = tflite::testing::CreateQuantizedTensor( TfLiteTensor input_tensor = tflite::testing::CreateQuantizedTensor(
input_values, input_quantized, input_dims, input_scale, input_zero_point, input_values, input_quantized, input_dims, input_scale, input_zero_point);
"input_tensor");
// Set zero point and scale arrays with a single element for each. // Set zero point and scale arrays with a single element for each.
int input_zero_points[] = {1, input_zero_point}; int input_zero_points[] = {1, input_zero_point};
@ -180,8 +179,7 @@ int main() {
// Create per-tensor quantized int8 filter tensor. // Create per-tensor quantized int8 filter tensor.
int8_t filter_quantized[filter_elements]; int8_t filter_quantized[filter_elements];
TfLiteTensor filter_tensor = tflite::testing::CreateQuantizedTensor( TfLiteTensor filter_tensor = tflite::testing::CreateQuantizedTensor(
filter_values, filter_quantized, filter_dims, filter_scale, 0, filter_values, filter_quantized, filter_dims, filter_scale, 0);
"filter_tensor");
// Set zero point and scale arrays with a single element for each. // Set zero point and scale arrays with a single element for each.
int filter_zero_points[] = {1, 0}; int filter_zero_points[] = {1, 0};
@ -197,8 +195,8 @@ int main() {
// detailed explanation of why bias scale is input_scale * filter_scale. // detailed explanation of why bias scale is input_scale * filter_scale.
tflite::SymmetricQuantize(bias_values, bias_quantized, bias_elements, tflite::SymmetricQuantize(bias_values, bias_quantized, bias_elements,
input_scale * output_scale); input_scale * output_scale);
TfLiteTensor bias_tensor = tflite::testing::CreateInt32Tensor( TfLiteTensor bias_tensor =
bias_quantized, bias_dims, "bias_tensor"); tflite::testing::CreateInt32Tensor(bias_quantized, bias_dims);
// Set zero point and scale arrays with a single element for each. // Set zero point and scale arrays with a single element for each.
int bias_zero_points[] = {1, 0}; int bias_zero_points[] = {1, 0};
@ -211,8 +209,7 @@ int main() {
// Create per-tensor quantized int8 output tensor. // Create per-tensor quantized int8 output tensor.
int8_t output_quantized[output_elements]; int8_t output_quantized[output_elements];
TfLiteTensor output_tensor = tflite::testing::CreateQuantizedTensor( TfLiteTensor output_tensor = tflite::testing::CreateQuantizedTensor(
output_quantized, output_dims, output_scale, output_zero_point, output_quantized, output_dims, output_scale, output_zero_point);
"output_tensor");
// Set zero point and scale arrays with a single element for each. // Set zero point and scale arrays with a single element for each.
int output_zero_points[] = {1, output_zero_point}; int output_zero_points[] = {1, output_zero_point};

View File

@ -82,7 +82,7 @@ TF_LITE_MICRO_TEST(RecognizeCommandsTestBasic) {
auto result_dims = {2, 1, 4}; auto result_dims = {2, 1, 4};
TfLiteTensor results = tflite::testing::CreateQuantizedTensor( TfLiteTensor results = tflite::testing::CreateQuantizedTensor(
result_data, tflite::testing::IntArrayFromInitializer(result_dims), result_data, tflite::testing::IntArrayFromInitializer(result_dims),
"input_tensor", -128.0f, 127.0f); -128.0f, 127.0f);
const char* found_command; const char* found_command;
uint8_t score; uint8_t score;
@ -101,8 +101,8 @@ TF_LITE_MICRO_TEST(RecognizeCommandsTestFindCommands) {
std::initializer_list<int8_t> yes_data = {-128, -128, 127, -128}; std::initializer_list<int8_t> yes_data = {-128, -128, 127, -128};
auto yes_dims = {2, 1, 4}; auto yes_dims = {2, 1, 4};
TfLiteTensor yes_results = tflite::testing::CreateQuantizedTensor( TfLiteTensor yes_results = tflite::testing::CreateQuantizedTensor(
yes_data, tflite::testing::IntArrayFromInitializer(yes_dims), yes_data, tflite::testing::IntArrayFromInitializer(yes_dims), -128.0f,
"input_tensor", -128.0f, 127.0f); 127.0f);
bool has_found_new_command = false; bool has_found_new_command = false;
const char* new_command; const char* new_command;
@ -129,8 +129,8 @@ TF_LITE_MICRO_TEST(RecognizeCommandsTestFindCommands) {
std::initializer_list<int8_t> no_data = {-128, -128, -128, 127}; std::initializer_list<int8_t> no_data = {-128, -128, -128, 127};
auto no_dims = {2, 1, 4}; auto no_dims = {2, 1, 4};
TfLiteTensor no_results = tflite::testing::CreateQuantizedTensor( TfLiteTensor no_results = tflite::testing::CreateQuantizedTensor(
no_data, tflite::testing::IntArrayFromInitializer(no_dims), no_data, tflite::testing::IntArrayFromInitializer(no_dims), -128.0f,
"input_tensor", -128.0f, 127.0f); 127.0f);
has_found_new_command = false; has_found_new_command = false;
new_command = ""; new_command = "";
uint8_t score; uint8_t score;
@ -164,8 +164,8 @@ TF_LITE_MICRO_TEST(RecognizeCommandsTestBadInputLength) {
std::initializer_list<int8_t> bad_data = {-128, -128, 127}; std::initializer_list<int8_t> bad_data = {-128, -128, 127};
auto bad_dims = {2, 1, 3}; auto bad_dims = {2, 1, 3};
TfLiteTensor bad_results = tflite::testing::CreateQuantizedTensor( TfLiteTensor bad_results = tflite::testing::CreateQuantizedTensor(
bad_data, tflite::testing::IntArrayFromInitializer(bad_dims), bad_data, tflite::testing::IntArrayFromInitializer(bad_dims), -128.0f,
"input_tensor", -128.0f, 127.0f); 127.0f);
const char* found_command; const char* found_command;
uint8_t score; uint8_t score;
@ -185,7 +185,7 @@ TF_LITE_MICRO_TEST(RecognizeCommandsTestBadInputTimes) {
auto result_dims = {2, 1, 4}; auto result_dims = {2, 1, 4};
TfLiteTensor results = tflite::testing::CreateQuantizedTensor( TfLiteTensor results = tflite::testing::CreateQuantizedTensor(
result_data, tflite::testing::IntArrayFromInitializer(result_dims), result_data, tflite::testing::IntArrayFromInitializer(result_dims),
"input_tensor", -128.0f, 127.0f); -128.0f, 127.0f);
const char* found_command; const char* found_command;
uint8_t score; uint8_t score;
@ -208,7 +208,7 @@ TF_LITE_MICRO_TEST(RecognizeCommandsTestTooFewInputs) {
auto result_dims = {2, 1, 4}; auto result_dims = {2, 1, 4};
TfLiteTensor results = tflite::testing::CreateQuantizedTensor( TfLiteTensor results = tflite::testing::CreateQuantizedTensor(
result_data, tflite::testing::IntArrayFromInitializer(result_dims), result_data, tflite::testing::IntArrayFromInitializer(result_dims),
"input_tensor", -128.0f, 127.0f); -128.0f, 127.0f);
const char* found_command; const char* found_command;
uint8_t score; uint8_t score;

View File

@ -34,8 +34,8 @@ void TestReluFloat(const int* input_dims_data, const float* input_data,
constexpr int outputs_size = 1; constexpr int outputs_size = 1;
constexpr int tensors_size = inputs_size + outputs_size; constexpr int tensors_size = inputs_size + outputs_size;
TfLiteTensor tensors[tensors_size] = { TfLiteTensor tensors[tensors_size] = {
CreateFloatTensor(input_data, input_dims, "input_tensor"), CreateFloatTensor(input_data, input_dims),
CreateFloatTensor(output_data, output_dims, "output_tensor"), CreateFloatTensor(output_data, output_dims),
}; };
TfLiteContext context; TfLiteContext context;
@ -90,8 +90,8 @@ void TestRelu6Float(const int* input_dims_data, const float* input_data,
constexpr int outputs_size = 1; constexpr int outputs_size = 1;
constexpr int tensors_size = inputs_size + outputs_size; constexpr int tensors_size = inputs_size + outputs_size;
TfLiteTensor tensors[tensors_size] = { TfLiteTensor tensors[tensors_size] = {
CreateFloatTensor(input_data, input_dims, "input_tensor"), CreateFloatTensor(input_data, input_dims),
CreateFloatTensor(output_data, output_dims, "output_tensor"), CreateFloatTensor(output_data, output_dims),
}; };
TfLiteContext context; TfLiteContext context;
@ -150,9 +150,9 @@ void TestReluUint8(const int* input_dims_data, const float* input_data,
constexpr int tensors_size = inputs_size + outputs_size; constexpr int tensors_size = inputs_size + outputs_size;
TfLiteTensor tensors[tensors_size] = { TfLiteTensor tensors[tensors_size] = {
CreateQuantizedTensor(input_data, input_data_quantized, input_dims, CreateQuantizedTensor(input_data, input_data_quantized, input_dims,
input_scale, input_zero_point, "input_tensor"), input_scale, input_zero_point),
CreateQuantizedTensor(output_data, output_dims, output_scale, CreateQuantizedTensor(output_data, output_dims, output_scale,
output_zero_point, "output_tensor"), output_zero_point),
}; };
TfLiteContext context; TfLiteContext context;
@ -215,9 +215,9 @@ void TestRelu6Uint8(const int* input_dims_data, const float* input_data,
constexpr int tensors_size = inputs_size + outputs_size; constexpr int tensors_size = inputs_size + outputs_size;
TfLiteTensor tensors[tensors_size] = { TfLiteTensor tensors[tensors_size] = {
CreateQuantizedTensor(input_data, input_data_quantized, input_dims, CreateQuantizedTensor(input_data, input_data_quantized, input_dims,
input_scale, input_zero_point, "input_tensor"), input_scale, input_zero_point),
CreateQuantizedTensor(output_data, output_dims, output_scale, CreateQuantizedTensor(output_data, output_dims, output_scale,
output_zero_point, "output_tensor"), output_zero_point),
}; };
TfLiteContext context; TfLiteContext context;
@ -279,9 +279,9 @@ void TestReluInt8(const int* input_dims_data, const float* input_data,
constexpr int tensors_size = inputs_size + outputs_size; constexpr int tensors_size = inputs_size + outputs_size;
TfLiteTensor tensors[tensors_size] = { TfLiteTensor tensors[tensors_size] = {
CreateQuantizedTensor(input_data, input_data_quantized, input_dims, CreateQuantizedTensor(input_data, input_data_quantized, input_dims,
input_scale, input_zero_point, "input_tensor"), input_scale, input_zero_point),
CreateQuantizedTensor(output_data, output_dims, output_scale, CreateQuantizedTensor(output_data, output_dims, output_scale,
output_zero_point, "output_tensor"), output_zero_point),
}; };
TfLiteContext context; TfLiteContext context;
@ -345,9 +345,9 @@ void TestRelu6Int8(const int* input_dims_data, const float* input_data,
constexpr int tensors_size = inputs_size + outputs_size; constexpr int tensors_size = inputs_size + outputs_size;
TfLiteTensor tensors[tensors_size] = { TfLiteTensor tensors[tensors_size] = {
CreateQuantizedTensor(input_data, input_data_quantized, input_dims, CreateQuantizedTensor(input_data, input_data_quantized, input_dims,
input_scale, input_zero_point, "input_tensor"), input_scale, input_zero_point),
CreateQuantizedTensor(output_data, output_dims, output_scale, CreateQuantizedTensor(output_data, output_dims, output_scale,
output_zero_point, "output_tensor"), output_zero_point),
}; };
TfLiteContext context; TfLiteContext context;

View File

@ -129,9 +129,9 @@ void TestAddFloat(const int* input1_dims_data, const float* input1_data,
constexpr int outputs_size = 1; constexpr int outputs_size = 1;
constexpr int tensors_size = inputs_size + outputs_size; constexpr int tensors_size = inputs_size + outputs_size;
TfLiteTensor tensors[tensors_size] = { TfLiteTensor tensors[tensors_size] = {
CreateFloatTensor(input1_data, input1_dims, "input1_tensor"), CreateFloatTensor(input1_data, input1_dims),
CreateFloatTensor(input2_data, input2_dims, "input2_tensor"), CreateFloatTensor(input2_data, input2_dims),
CreateFloatTensor(output_data, output_dims, "output_tensor"), CreateFloatTensor(output_data, output_dims),
}; };
ValidateAddGoldens(tensors, tensors_size, expected_output, output_data, ValidateAddGoldens(tensors, tensors_size, expected_output, output_data,
@ -156,15 +156,14 @@ void TestAddQuantized(const int* input1_dims_data, const float* input1_data,
constexpr int outputs_size = 1; constexpr int outputs_size = 1;
constexpr int tensors_size = inputs_size + outputs_size; constexpr int tensors_size = inputs_size + outputs_size;
TfLiteTensor tensors[tensors_size] = { TfLiteTensor tensors[tensors_size] = {
tflite::testing::CreateQuantizedTensor( tflite::testing::CreateQuantizedTensor(input1_data, input1_quantized,
input1_data, input1_quantized, input1_dims, input1_scale, input1_dims, input1_scale,
input1_zero_point, "input1_tensor"), input1_zero_point),
tflite::testing::CreateQuantizedTensor( tflite::testing::CreateQuantizedTensor(input2_data, input2_quantized,
input2_data, input2_quantized, input2_dims, input2_scale, input2_dims, input2_scale,
input2_zero_point, "input2_tensor"), input2_zero_point),
tflite::testing::CreateQuantizedTensor(output_data, output_dims, tflite::testing::CreateQuantizedTensor(output_data, output_dims,
output_scale, output_zero_point, output_scale, output_zero_point),
"output_tensor"),
}; };
tflite::AsymmetricQuantize(golden, golden_quantized, tflite::AsymmetricQuantize(golden, golden_quantized,
ElementCount(*output_dims), output_scale, ElementCount(*output_dims), output_scale,

View File

@ -83,9 +83,9 @@ void TestArgMinMaxFloat(const int* input_dims_data, const float* input_values,
constexpr int outputs_size = 1; constexpr int outputs_size = 1;
constexpr int tensors_size = inputs_size + outputs_size; constexpr int tensors_size = inputs_size + outputs_size;
TfLiteTensor tensors[tensors_size] = { TfLiteTensor tensors[tensors_size] = {
CreateFloatTensor(input_values, input_dims, "input_tensor"), CreateFloatTensor(input_values, input_dims),
CreateInt32Tensor(axis_values, axis_dims, "axis_tensor"), CreateInt32Tensor(axis_values, axis_dims),
CreateInt32Tensor(output, output_dims, "output_tensor"), CreateInt32Tensor(output, output_dims),
}; };
ValidateArgMinMaxGoldens(tensors, tensors_size, goldens, output, ValidateArgMinMaxGoldens(tensors, tensors_size, goldens, output,
@ -110,9 +110,9 @@ void TestArgMinMaxQuantized(const int* input_dims_data,
constexpr int tensors_size = inputs_size + outputs_size; constexpr int tensors_size = inputs_size + outputs_size;
TfLiteTensor tensors[tensors_size] = { TfLiteTensor tensors[tensors_size] = {
CreateQuantizedTensor(input_values, input_quantized, input_dims, CreateQuantizedTensor(input_values, input_quantized, input_dims,
input_scale, input_zero_point, "input_tensor"), input_scale, input_zero_point),
CreateInt32Tensor(axis_values, axis_dims, "axis_tensor"), CreateInt32Tensor(axis_values, axis_dims),
CreateInt32Tensor(output, output_dims, "output_tensor"), CreateInt32Tensor(output, output_dims),
}; };
ValidateArgMinMaxGoldens(tensors, tensors_size, goldens, output, ValidateArgMinMaxGoldens(tensors, tensors_size, goldens, output,

View File

@ -32,8 +32,8 @@ void TestCeil(const int* input_dims_data, const float* input_data,
constexpr int outputs_size = 1; constexpr int outputs_size = 1;
constexpr int tensors_size = inputs_size + outputs_size; constexpr int tensors_size = inputs_size + outputs_size;
TfLiteTensor tensors[tensors_size] = { TfLiteTensor tensors[tensors_size] = {
CreateFloatTensor(input_data, input_dims, "input_tensor"), CreateFloatTensor(input_data, input_dims),
CreateFloatTensor(output_data, output_dims, "output_tensor"), CreateFloatTensor(output_data, output_dims),
}; };
TfLiteContext context; TfLiteContext context;
PopulateContext(tensors, tensors_size, micro_test::reporter, &context); PopulateContext(tensors, tensors_size, micro_test::reporter, &context);

View File

@ -44,8 +44,8 @@ TfLiteNode PrepareCircularBufferInt8(const int* input_dims_data,
constexpr int outputs_size = 1; constexpr int outputs_size = 1;
constexpr int tensors_size = inputs_size + outputs_size; constexpr int tensors_size = inputs_size + outputs_size;
TfLiteTensor tensors[tensors_size] = { TfLiteTensor tensors[tensors_size] = {
CreateQuantizedTensor(input_data, input_dims, 1, 0, "input_tensor"), CreateQuantizedTensor(input_data, input_dims, 1, 0),
CreateQuantizedTensor(output_data, output_dims, 1, 0, "output_tensor"), CreateQuantizedTensor(output_data, output_dims, 1, 0),
}; };
TfLiteContext context; TfLiteContext context;
PopulateContext(tensors, tensors_size, micro_test::reporter, &context); PopulateContext(tensors, tensors_size, micro_test::reporter, &context);
@ -92,8 +92,8 @@ TfLiteStatus InvokeCircularBufferInt8(const int* input_dims_data,
constexpr int outputs_size = 1; constexpr int outputs_size = 1;
constexpr int tensors_size = inputs_size + outputs_size; constexpr int tensors_size = inputs_size + outputs_size;
TfLiteTensor tensors[tensors_size] = { TfLiteTensor tensors[tensors_size] = {
CreateQuantizedTensor(input_data, input_dims, 1, 0, "input_tensor"), CreateQuantizedTensor(input_data, input_dims, 1, 0),
CreateQuantizedTensor(output_data, output_dims, 1, 0, "output_tensor"), CreateQuantizedTensor(output_data, output_dims, 1, 0),
}; };
TfLiteContext context; TfLiteContext context;
PopulateContext(tensors, tensors_size, micro_test::reporter, &context); PopulateContext(tensors, tensors_size, micro_test::reporter, &context);

View File

@ -78,9 +78,9 @@ void TestComparisonFloat(tflite::BuiltinOperator op, int* input1_dims_data,
TfLiteIntArray* output_dims = IntArrayFromInts(output_dims_data); TfLiteIntArray* output_dims = IntArrayFromInts(output_dims_data);
TfLiteTensor tensors[tensors_size] = { TfLiteTensor tensors[tensors_size] = {
CreateFloatTensor(input1_data, input1_dims, "input1_tensor"), CreateFloatTensor(input1_data, input1_dims),
CreateFloatTensor(input2_data, input2_dims, "input2_tensor"), CreateFloatTensor(input2_data, input2_dims),
CreateBoolTensor(output_data, output_dims, "output_tensor"), CreateBoolTensor(output_data, output_dims),
}; };
TestComparison(op, tensors, expected_output_data, output_data); TestComparison(op, tensors, expected_output_data, output_data);
@ -95,9 +95,9 @@ void TestComparisonBool(tflite::BuiltinOperator op, int* input1_dims_data,
TfLiteIntArray* output_dims = IntArrayFromInts(output_dims_data); TfLiteIntArray* output_dims = IntArrayFromInts(output_dims_data);
TfLiteTensor tensors[tensors_size] = { TfLiteTensor tensors[tensors_size] = {
CreateBoolTensor(input1_data, input1_dims, "input1_tensor"), CreateBoolTensor(input1_data, input1_dims),
CreateBoolTensor(input2_data, input2_dims, "input2_tensor"), CreateBoolTensor(input2_data, input2_dims),
CreateBoolTensor(output_data, output_dims, "output_tensor"), CreateBoolTensor(output_data, output_dims),
}; };
TestComparison(op, tensors, expected_output_data, output_data); TestComparison(op, tensors, expected_output_data, output_data);
@ -112,9 +112,9 @@ void TestComparisonInt(tflite::BuiltinOperator op, int* input1_dims_data,
TfLiteIntArray* output_dims = IntArrayFromInts(output_dims_data); TfLiteIntArray* output_dims = IntArrayFromInts(output_dims_data);
TfLiteTensor tensors[tensors_size] = { TfLiteTensor tensors[tensors_size] = {
CreateInt32Tensor(input1_data, input1_dims, "input1_tensor"), CreateInt32Tensor(input1_data, input1_dims),
CreateInt32Tensor(input2_data, input2_dims, "input2_tensor"), CreateInt32Tensor(input2_data, input2_dims),
CreateBoolTensor(output_data, output_dims, "output_tensor"), CreateBoolTensor(output_data, output_dims),
}; };
TestComparison(op, tensors, expected_output_data, output_data); TestComparison(op, tensors, expected_output_data, output_data);
@ -135,10 +135,10 @@ void TestComparisonQuantizedUInt8(tflite::BuiltinOperator op,
TfLiteTensor tensors[tensors_size] = { TfLiteTensor tensors[tensors_size] = {
CreateQuantizedTensor(input1_data, input1_quantized, input1_dims, CreateQuantizedTensor(input1_data, input1_quantized, input1_dims,
input1_scale, input1_zero_point, "input1_tensor"), input1_scale, input1_zero_point),
CreateQuantizedTensor(input2_data, input2_quantized, input2_dims, CreateQuantizedTensor(input2_data, input2_quantized, input2_dims,
input2_scale, input2_zero_point, "input2_tensor"), input2_scale, input2_zero_point),
CreateBoolTensor(output_data, output_dims, "output_tensor"), CreateBoolTensor(output_data, output_dims),
}; };
TestComparison(op, tensors, expected_output_data, output_data); TestComparison(op, tensors, expected_output_data, output_data);
@ -159,10 +159,10 @@ void TestComparisonQuantizedInt8(tflite::BuiltinOperator op,
TfLiteTensor tensors[tensors_size] = { TfLiteTensor tensors[tensors_size] = {
CreateQuantizedTensor(input1_data, input1_quantized, input1_dims, CreateQuantizedTensor(input1_data, input1_quantized, input1_dims,
input1_scale, input1_zero_point, "input1_tensor"), input1_scale, input1_zero_point),
CreateQuantizedTensor(input2_data, input2_quantized, input2_dims, CreateQuantizedTensor(input2_data, input2_quantized, input2_dims,
input2_scale, input2_zero_point, "input2_tensor"), input2_scale, input2_zero_point),
CreateBoolTensor(output_data, output_dims, "output_tensor"), CreateBoolTensor(output_data, output_dims),
}; };
TestComparison(op, tensors, expected_output_data, output_data); TestComparison(op, tensors, expected_output_data, output_data);

View File

@ -63,8 +63,8 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_KERNEL_LOG( TF_LITE_KERNEL_LOG(
context, context,
"Op Concatenation does not currently support num dimensions >4 " "Op Concatenation does not currently support num dimensions >4 "
"Tensor '%s' has %d dimensions.", "Tensor has %d dimensions.",
input->name, num_dimensions); num_dimensions);
return kTfLiteError; return kTfLiteError;
} }
} }

View File

@ -40,9 +40,9 @@ void TestConcatenateTwoInputs(std::initializer_list<int> input1_dims_data,
constexpr int output_size = 1; constexpr int output_size = 1;
constexpr int tensors_size = input_size + output_size; constexpr int tensors_size = input_size + output_size;
TfLiteTensor tensors[tensors_size] = { TfLiteTensor tensors[tensors_size] = {
CreateFloatTensor(input1_data, input1_dims, "input1_tensor"), CreateFloatTensor(input1_data, input1_dims),
CreateFloatTensor(input2_data, input2_dims, "input2_tensor"), CreateFloatTensor(input2_data, input2_dims),
CreateFloatTensor(output_data, output_dims, "output_tensor")}; CreateFloatTensor(output_data, output_dims)};
TfLiteContext context; TfLiteContext context;
PopulateContext(tensors, tensors_size, micro_test::reporter, &context); PopulateContext(tensors, tensors_size, micro_test::reporter, &context);
@ -99,12 +99,9 @@ void TestConcatenateQuantizedTwoInputs(
constexpr int output_size = 1; constexpr int output_size = 1;
constexpr int tensors_size = input_size + output_size; constexpr int tensors_size = input_size + output_size;
TfLiteTensor tensors[tensors_size] = { TfLiteTensor tensors[tensors_size] = {
CreateQuantizedTensor(input1_data, input1_dims, "input1_tensor", CreateQuantizedTensor(input1_data, input1_dims, input_min, input_max),
input_min, input_max), CreateQuantizedTensor(input2_data, input2_dims, input_min, input_max),
CreateQuantizedTensor(input2_data, input2_dims, "input2_tensor", CreateQuantizedTensor(output_data, output_dims, output_min, output_max)};
input_min, input_max),
CreateQuantizedTensor(output_data, output_dims, "output_tensor",
output_min, output_max)};
TfLiteContext context; TfLiteContext context;
PopulateContext(tensors, tensors_size, micro_test::reporter, &context); PopulateContext(tensors, tensors_size, micro_test::reporter, &context);

View File

@ -123,10 +123,10 @@ void TestConvFloat(const int* input_dims_data, const float* input_data,
constexpr int outputs_size = 1; constexpr int outputs_size = 1;
constexpr int tensors_size = inputs_size + outputs_size; constexpr int tensors_size = inputs_size + outputs_size;
TfLiteTensor tensors[tensors_size] = { TfLiteTensor tensors[tensors_size] = {
CreateFloatTensor(input_data, input_dims, "input_tensor"), CreateFloatTensor(input_data, input_dims),
CreateFloatTensor(filter_data, filter_dims, "filter_tensor"), CreateFloatTensor(filter_data, filter_dims),
CreateFloatTensor(bias_data, bias_dims, "bias_tensor"), CreateFloatTensor(bias_data, bias_dims),
CreateFloatTensor(output_data, output_dims, "output_tensor"), CreateFloatTensor(output_data, output_dims),
}; };
TF_LITE_MICRO_EXPECT_EQ( TF_LITE_MICRO_EXPECT_EQ(
@ -157,13 +157,12 @@ void TestConvQuantizedPerLayer(
constexpr int tensors_size = inputs_size + outputs_size; constexpr int tensors_size = inputs_size + outputs_size;
TfLiteTensor tensors[tensors_size] = { TfLiteTensor tensors[tensors_size] = {
CreateQuantizedTensor(input_data, input_quantized, input_dims, CreateQuantizedTensor(input_data, input_quantized, input_dims,
input_scale, 128, "input_tensor"), input_scale, 128),
CreateQuantizedTensor(filter_data, filter_quantized, filter_dims, CreateQuantizedTensor(filter_data, filter_quantized, filter_dims,
filter_scale, 128, "filter_tensor"), filter_scale, 128),
CreateQuantizedBiasTensor(bias_data, bias_quantized, bias_dims, CreateQuantizedBiasTensor(bias_data, bias_quantized, bias_dims,
input_scale, filter_scale, "bias_tensor"), input_scale, filter_scale),
CreateQuantizedTensor(output_data, output_dims, output_scale, 128, CreateQuantizedTensor(output_data, output_dims, output_scale, 128)};
"output_tensor")};
// TODO(njeff): Affine Quantization Params should be set on tensor creation. // TODO(njeff): Affine Quantization Params should be set on tensor creation.
float filter_scales[] = {1, filter_scale}; float filter_scales[] = {1, filter_scale};
@ -199,20 +198,16 @@ void TestConvQuantizedPerChannel(
float filter_scales[5]; float filter_scales[5];
TfLiteAffineQuantization filter_quant; TfLiteAffineQuantization filter_quant;
TfLiteAffineQuantization bias_quant; TfLiteAffineQuantization bias_quant;
TfLiteTensor input_tensor = TfLiteTensor input_tensor = CreateQuantizedTensor(
CreateQuantizedTensor(input_data, input_quantized, input_dims, input_data, input_quantized, input_dims, input_scale, input_zero_point);
input_scale, input_zero_point, "input_tensor");
TfLiteTensor filter_tensor = CreateSymmetricPerChannelQuantizedTensor( TfLiteTensor filter_tensor = CreateSymmetricPerChannelQuantizedTensor(
filter_data, filter_data_quantized, filter_dims, filter_scales, filter_data, filter_data_quantized, filter_dims, filter_scales,
filter_zero_points, &filter_quant, 0 /* quantized dimension */, filter_zero_points, &filter_quant, 0 /* quantized dimension */);
"filter_tensor");
TfLiteTensor bias_tensor = CreatePerChannelQuantizedBiasTensor( TfLiteTensor bias_tensor = CreatePerChannelQuantizedBiasTensor(
bias_data, bias_data_quantized, bias_dims, input_scale, &filter_scales[1], bias_data, bias_data_quantized, bias_dims, input_scale, &filter_scales[1],
bias_scales, bias_zero_points, &bias_quant, 0 /* quantized dimension */, bias_scales, bias_zero_points, &bias_quant, 0 /* quantized dimension */);
"bias_tensor"); TfLiteTensor output_tensor = CreateQuantizedTensor(
TfLiteTensor output_tensor = output_data, output_dims, output_scale, output_zero_point);
CreateQuantizedTensor(output_data, output_dims, output_scale,
output_zero_point, "output_tensor");
// TODO(njeff): Affine Quantization Params should be set on tensor creation. // TODO(njeff): Affine Quantization Params should be set on tensor creation.
float input_scales[] = {1, input_scale}; float input_scales[] = {1, input_scale};
@ -479,21 +474,18 @@ TF_LITE_MICRO_TEST(FilterDimsNotMatchingAffineQuantization) {
TfLiteAffineQuantization filter_quant; TfLiteAffineQuantization filter_quant;
TfLiteAffineQuantization bias_quant; TfLiteAffineQuantization bias_quant;
TfLiteTensor input_tensor = tflite::testing::CreateQuantizedTensor( TfLiteTensor input_tensor = tflite::testing::CreateQuantizedTensor(
tflite::testing::kInputData, input_quantized, input_dims, input_scale, 0, tflite::testing::kInputData, input_quantized, input_dims, input_scale, 0);
"input_tensor");
TfLiteTensor filter_tensor = TfLiteTensor filter_tensor =
tflite::testing::CreateSymmetricPerChannelQuantizedTensor( tflite::testing::CreateSymmetricPerChannelQuantizedTensor(
tflite::testing::kFilterData, filter_quantized, filter_dims, tflite::testing::kFilterData, filter_quantized, filter_dims,
filter_scales, filter_zero_points, &filter_quant, filter_scales, filter_zero_points, &filter_quant,
0 /* quantized dimension */, "filter_tensor"); 0 /* quantized dimension */);
TfLiteTensor bias_tensor = TfLiteTensor bias_tensor =
tflite::testing::CreatePerChannelQuantizedBiasTensor( tflite::testing::CreatePerChannelQuantizedBiasTensor(
tflite::testing::kBiasData, bias_quantized, bias_dims, input_scale, tflite::testing::kBiasData, bias_quantized, bias_dims, input_scale,
&filter_scales[1], scales, zero_points, &bias_quant, 0, &filter_scales[1], scales, zero_points, &bias_quant, 0);
"bias_tensor");
TfLiteTensor output_tensor = tflite::testing::CreateQuantizedTensor( TfLiteTensor output_tensor = tflite::testing::CreateQuantizedTensor(
output_data, output_dims, output_scale, 0 /* quantized dimension */, output_data, output_dims, output_scale, 0 /* quantized dimension */);
"output_tensor");
float input_scales[] = {1, input_scale}; float input_scales[] = {1, input_scale};
int input_zero_points[] = {1, 128}; int input_zero_points[] = {1, 128};
@ -562,8 +554,7 @@ TF_LITE_MICRO_TEST(BroadcastPerLayerQuantizationToPerChannelShouldMatchGolden) {
// Create per-layer quantized int8 input tensor. // Create per-layer quantized int8 input tensor.
TfLiteTensor input_tensor = tflite::testing::CreateQuantizedTensor( TfLiteTensor input_tensor = tflite::testing::CreateQuantizedTensor(
tflite::testing::kInputData, input_quantized, input_dims, input_scale, 0, tflite::testing::kInputData, input_quantized, input_dims, input_scale, 0);
"input_tensor");
int input_zero_points[2] = {1, 0}; int input_zero_points[2] = {1, 0};
float input_scales[2] = {1, input_scale}; float input_scales[2] = {1, input_scale};
TfLiteAffineQuantization input_quant = { TfLiteAffineQuantization input_quant = {
@ -574,7 +565,7 @@ TF_LITE_MICRO_TEST(BroadcastPerLayerQuantizationToPerChannelShouldMatchGolden) {
// Create per-layer quantized int8 filter tensor. // Create per-layer quantized int8 filter tensor.
TfLiteTensor filter_tensor = tflite::testing::CreateQuantizedTensor( TfLiteTensor filter_tensor = tflite::testing::CreateQuantizedTensor(
tflite::testing::kFilterData, filter_quantized, filter_dims, filter_scale, tflite::testing::kFilterData, filter_quantized, filter_dims, filter_scale,
0, "filter_tensor"); 0);
int filter_zero_points[2] = {1, 0}; int filter_zero_points[2] = {1, 0};
float filter_scales[2] = {1, filter_scale}; float filter_scales[2] = {1, filter_scale};
TfLiteAffineQuantization filter_quant = { TfLiteAffineQuantization filter_quant = {
@ -586,8 +577,8 @@ TF_LITE_MICRO_TEST(BroadcastPerLayerQuantizationToPerChannelShouldMatchGolden) {
tflite::SymmetricQuantize(tflite::testing::kBiasData, bias_quantized, tflite::SymmetricQuantize(tflite::testing::kBiasData, bias_quantized,
tflite::testing::kBiasElements, tflite::testing::kBiasElements,
input_scale * output_scale); input_scale * output_scale);
TfLiteTensor bias_tensor = tflite::testing::CreateInt32Tensor( TfLiteTensor bias_tensor =
bias_quantized, bias_dims, "bias_tensor"); tflite::testing::CreateInt32Tensor(bias_quantized, bias_dims);
int bias_zero_points[2] = {1, 0}; int bias_zero_points[2] = {1, 0};
float bias_scales[2] = {1, input_scale * filter_scale}; float bias_scales[2] = {1, input_scale * filter_scale};
@ -598,8 +589,7 @@ TF_LITE_MICRO_TEST(BroadcastPerLayerQuantizationToPerChannelShouldMatchGolden) {
// Create per-layer quantized int8 output tensor. // Create per-layer quantized int8 output tensor.
TfLiteTensor output_tensor = tflite::testing::CreateQuantizedTensor( TfLiteTensor output_tensor = tflite::testing::CreateQuantizedTensor(
output_data, output_dims, output_scale, 0 /* quantized dimension */, output_data, output_dims, output_scale, 0 /* quantized dimension */);
"output_tensor");
int output_zero_points[2] = {1, 0}; int output_zero_points[2] = {1, 0};
float output_scales[2] = {1, output_scale}; float output_scales[2] = {1, output_scale};
TfLiteAffineQuantization output_quant = { TfLiteAffineQuantization output_quant = {
@ -687,8 +677,7 @@ TF_LITE_MICRO_TEST(Int8Input32x1Filter32x32ShouldMatchGolden) {
// Create per-tensor quantized int8 input tensor. // Create per-tensor quantized int8 input tensor.
int8_t input_quantized[kSampleSize]; int8_t input_quantized[kSampleSize];
TfLiteTensor input_tensor = tflite::testing::CreateQuantizedTensor( TfLiteTensor input_tensor = tflite::testing::CreateQuantizedTensor(
input_values, input_quantized, input_dims, input_scale, input_zero_point, input_values, input_quantized, input_dims, input_scale, input_zero_point);
"input_tensor");
// Set zero point and scale arrays with a single element for each. // Set zero point and scale arrays with a single element for each.
int input_zero_points[] = {1, input_zero_point}; int input_zero_points[] = {1, input_zero_point};
float input_scales[] = {1, input_scale}; float input_scales[] = {1, input_scale};
@ -701,7 +690,7 @@ TF_LITE_MICRO_TEST(Int8Input32x1Filter32x32ShouldMatchGolden) {
int8_t filter_quantized[kNumFilters * kSampleSize]; int8_t filter_quantized[kNumFilters * kSampleSize];
TfLiteTensor filter_tensor = tflite::testing::CreateQuantizedTensor( TfLiteTensor filter_tensor = tflite::testing::CreateQuantizedTensor(
filter_values, filter_quantized, filter_dims, filter_scale, filter_values, filter_quantized, filter_dims, filter_scale,
filter_zero_point, "filter_tensor"); filter_zero_point);
// Set zero point and scale arrays with a single element for each. // Set zero point and scale arrays with a single element for each.
int filter_zero_points[] = {1, filter_zero_point}; int filter_zero_points[] = {1, filter_zero_point};
float filter_scales[] = {1, filter_scale}; float filter_scales[] = {1, filter_scale};
@ -714,8 +703,8 @@ TF_LITE_MICRO_TEST(Int8Input32x1Filter32x32ShouldMatchGolden) {
int32_t bias_quantized[kSampleSize]; int32_t bias_quantized[kSampleSize];
tflite::SymmetricQuantize(bias_values, bias_quantized, kSampleSize, tflite::SymmetricQuantize(bias_values, bias_quantized, kSampleSize,
input_scale * output_scale); input_scale * output_scale);
TfLiteTensor bias_tensor = tflite::testing::CreateInt32Tensor( TfLiteTensor bias_tensor =
bias_quantized, bias_dims, "bias_tensor"); tflite::testing::CreateInt32Tensor(bias_quantized, bias_dims);
// There is a single zero point of 0, and a single scale of // There is a single zero point of 0, and a single scale of
// input_scale * filter_scale. // input_scale * filter_scale.
@ -729,8 +718,7 @@ TF_LITE_MICRO_TEST(Int8Input32x1Filter32x32ShouldMatchGolden) {
// Create per-tensor quantized int8 output tensor. // Create per-tensor quantized int8 output tensor.
int8_t output_quantized[kSampleSize]; int8_t output_quantized[kSampleSize];
TfLiteTensor output_tensor = tflite::testing::CreateQuantizedTensor( TfLiteTensor output_tensor = tflite::testing::CreateQuantizedTensor(
output_quantized, output_dims, output_scale, output_zero_point, output_quantized, output_dims, output_scale, output_zero_point);
"output_tensor");
// Set zero point and scale arrays with a single element for each. // Set zero point and scale arrays with a single element for each.
int output_zero_points[] = {1, output_zero_point}; int output_zero_points[] = {1, output_zero_point};
float output_scales[] = {1, output_scale}; float output_scales[] = {1, output_scale};

View File

@ -121,10 +121,10 @@ void TestDepthwiseConvFloat(const int* input_dims_data, const float* input_data,
constexpr int outputs_size = 1; constexpr int outputs_size = 1;
constexpr int tensors_size = inputs_size + outputs_size; constexpr int tensors_size = inputs_size + outputs_size;
TfLiteTensor tensors[tensors_size] = { TfLiteTensor tensors[tensors_size] = {
CreateFloatTensor(input_data, input_dims, "input_tensor"), CreateFloatTensor(input_data, input_dims),
CreateFloatTensor(filter_data, filter_dims, "filter_tensor"), CreateFloatTensor(filter_data, filter_dims),
CreateFloatTensor(bias_data, bias_dims, "bias_tensor"), CreateFloatTensor(bias_data, bias_dims),
CreateFloatTensor(output_data, output_dims, "output_tensor"), CreateFloatTensor(output_data, output_dims),
}; };
ValidateDepthwiseConvGoldens(expected_output_data, output_dims_count, ValidateDepthwiseConvGoldens(expected_output_data, output_dims_count,
@ -152,16 +152,14 @@ void TestDepthwiseConvQuantizedPerLayer(
TfLiteTensor tensors[tensors_size] = { TfLiteTensor tensors[tensors_size] = {
tflite::testing::CreateQuantizedTensor(input_data, input_quantized, tflite::testing::CreateQuantizedTensor(input_data, input_quantized,
input_dims, input_scale, input_dims, input_scale,
input_zero_point, "input_tensor"), input_zero_point),
tflite::testing::CreateQuantizedTensor( tflite::testing::CreateQuantizedTensor(filter_data, filter_quantized,
filter_data, filter_quantized, filter_dims, filter_scale, filter_dims, filter_scale,
filter_zero_point, "filter_tensor"), filter_zero_point),
tflite::testing::CreateQuantizedBiasTensor(bias_data, bias_quantized, tflite::testing::CreateQuantizedBiasTensor(
bias_dims, input_scale, bias_data, bias_quantized, bias_dims, input_scale, filter_scale),
filter_scale, "bias_tensor"),
tflite::testing::CreateQuantizedTensor(output_data, output_dims, tflite::testing::CreateQuantizedTensor(output_data, output_dims,
output_scale, output_zero_point, output_scale, output_zero_point),
"output_tensor"),
}; };
// TODO(njeff): Affine Quantization Params should be set on tensor creation. // TODO(njeff): Affine Quantization Params should be set on tensor creation.
@ -206,20 +204,18 @@ void TestDepthwiseConvQuantizedPerChannel(
float bias_scales[kMaxBiasChannels]; float bias_scales[kMaxBiasChannels];
TfLiteAffineQuantization filter_quant; TfLiteAffineQuantization filter_quant;
TfLiteAffineQuantization bias_quant; TfLiteAffineQuantization bias_quant;
TfLiteTensor input_tensor = TfLiteTensor input_tensor = CreateQuantizedTensor(
CreateQuantizedTensor(input_data, input_quantized, input_dims, input_data, input_quantized, input_dims, input_scale, input_zero_point);
input_scale, input_zero_point, "input_tensor");
TfLiteTensor filter_tensor = CreateSymmetricPerChannelQuantizedTensor( TfLiteTensor filter_tensor = CreateSymmetricPerChannelQuantizedTensor(
filter_data, filter_data_quantized, filter_dims, filter_scales, filter_data, filter_data_quantized, filter_dims, filter_scales,
filter_zero_points, &filter_quant, 3 /* quantized dimension */, filter_zero_points, &filter_quant, 3 /* quantized dimension */
"filter_tensor"); );
TfLiteTensor bias_tensor = CreatePerChannelQuantizedBiasTensor( TfLiteTensor bias_tensor = CreatePerChannelQuantizedBiasTensor(
bias_data, bias_data_quantized, bias_dims, input_scale, &filter_scales[1], bias_data, bias_data_quantized, bias_dims, input_scale, &filter_scales[1],
bias_scales, bias_zero_points, &bias_quant, 3 /* quantized dimension */, bias_scales, bias_zero_points, &bias_quant, 3 /* quantized dimension */
"bias_tensor"); );
TfLiteTensor output_tensor = TfLiteTensor output_tensor = CreateQuantizedTensor(
CreateQuantizedTensor(output_data, output_dims, output_scale, output_data, output_dims, output_scale, input_zero_point);
input_zero_point, "output_tensor");
// TODO(njeff): Affine Quantization Params should be set on tensor creation. // TODO(njeff): Affine Quantization Params should be set on tensor creation.
float input_scales[] = {1, input_scale}; float input_scales[] = {1, input_scale};
@ -615,20 +611,17 @@ TF_LITE_MICRO_TEST(FilterDimsNotMatchingAffineQuantization) {
TfLiteAffineQuantization filter_quant; TfLiteAffineQuantization filter_quant;
TfLiteAffineQuantization bias_quant; TfLiteAffineQuantization bias_quant;
TfLiteTensor input_tensor = tflite::testing::CreateQuantizedTensor( TfLiteTensor input_tensor = tflite::testing::CreateQuantizedTensor(
input_data, input_quantized, input_dims, input_scale, input_zero_point, input_data, input_quantized, input_dims, input_scale, input_zero_point);
"input_tensor");
TfLiteTensor filter_tensor = TfLiteTensor filter_tensor =
tflite::testing::CreateSymmetricPerChannelQuantizedTensor( tflite::testing::CreateSymmetricPerChannelQuantizedTensor(
filter_data, filter_quantized, filter_dims, filter_scales, filter_data, filter_quantized, filter_dims, filter_scales,
filter_zero_points, &filter_quant, 0 /* quantized dimension */, filter_zero_points, &filter_quant, 0 /* quantized dimension */);
"filter_tensor");
TfLiteTensor bias_tensor = TfLiteTensor bias_tensor =
tflite::testing::CreatePerChannelQuantizedBiasTensor( tflite::testing::CreatePerChannelQuantizedBiasTensor(
bias_data, bias_quantized, bias_dims, input_scale, &filter_scales[1], bias_data, bias_quantized, bias_dims, input_scale, &filter_scales[1],
scales, zero_points, &bias_quant, 0, "bias_tensor"); scales, zero_points, &bias_quant, 0);
TfLiteTensor output_tensor = tflite::testing::CreateQuantizedTensor( TfLiteTensor output_tensor = tflite::testing::CreateQuantizedTensor(
output_data, output_dims, output_scale, output_zero_point, output_data, output_dims, output_scale, output_zero_point);
"output_tensor");
float input_scales[] = {1, input_scale}; float input_scales[] = {1, input_scale};
int input_zero_points[] = {1, input_zero_point}; int input_zero_points[] = {1, input_zero_point};
@ -700,8 +693,7 @@ TF_LITE_MICRO_TEST(PerChannelBroadcastQuantizationParams) {
// Create per-layer quantized int8 input tensor. // Create per-layer quantized int8 input tensor.
TfLiteTensor input_tensor = tflite::testing::CreateQuantizedTensor( TfLiteTensor input_tensor = tflite::testing::CreateQuantizedTensor(
input_values, input_quantized, input_dims, input_scale, 0, input_values, input_quantized, input_dims, input_scale, 0);
"input_tensor");
int input_zero_points[2] = {1, 0}; int input_zero_points[2] = {1, 0};
float input_scales[2] = {1, input_scale}; float input_scales[2] = {1, input_scale};
TfLiteAffineQuantization input_quant = { TfLiteAffineQuantization input_quant = {
@ -711,8 +703,7 @@ TF_LITE_MICRO_TEST(PerChannelBroadcastQuantizationParams) {
// Create per-layer quantized int8 filter tensor. // Create per-layer quantized int8 filter tensor.
TfLiteTensor filter_tensor = tflite::testing::CreateQuantizedTensor( TfLiteTensor filter_tensor = tflite::testing::CreateQuantizedTensor(
filter_values, filter_quantized, filter_dims, filter_scale, 0, filter_values, filter_quantized, filter_dims, filter_scale, 0);
"filter_tensor");
int filter_zero_points[2] = {1, 0}; int filter_zero_points[2] = {1, 0};
float filter_scales[2] = {1, filter_scale}; float filter_scales[2] = {1, filter_scale};
TfLiteAffineQuantization filter_quant = { TfLiteAffineQuantization filter_quant = {
@ -723,8 +714,8 @@ TF_LITE_MICRO_TEST(PerChannelBroadcastQuantizationParams) {
// Create per-layer quantized int32 bias tensor. // Create per-layer quantized int32 bias tensor.
tflite::SymmetricQuantize(bias_values, bias_quantized, bias_elements, tflite::SymmetricQuantize(bias_values, bias_quantized, bias_elements,
input_scale * output_scale); input_scale * output_scale);
TfLiteTensor bias_tensor = tflite::testing::CreateInt32Tensor( TfLiteTensor bias_tensor =
bias_quantized, bias_dims, "bias_tensor"); tflite::testing::CreateInt32Tensor(bias_quantized, bias_dims);
int bias_zero_points[2] = {1, 0}; int bias_zero_points[2] = {1, 0};
float bias_scales[2] = {1, input_scale * filter_scale}; float bias_scales[2] = {1, input_scale * filter_scale};
@ -735,7 +726,7 @@ TF_LITE_MICRO_TEST(PerChannelBroadcastQuantizationParams) {
// Create per-layer quantized int8 output tensor. // Create per-layer quantized int8 output tensor.
TfLiteTensor output_tensor = tflite::testing::CreateQuantizedTensor( TfLiteTensor output_tensor = tflite::testing::CreateQuantizedTensor(
output_data, output_dims, output_scale, 0, "output_tensor"); output_data, output_dims, output_scale, 0);
int output_zero_points[2] = {1, 0}; int output_zero_points[2] = {1, 0};
float output_scales[2] = {1, output_scale}; float output_scales[2] = {1, output_scale};
TfLiteAffineQuantization output_quant = { TfLiteAffineQuantization output_quant = {
@ -833,8 +824,7 @@ TF_LITE_MICRO_TEST(Int8Input32x4Filter32x4ShouldMatchGolden) {
// Create per-tensor quantized int8 input tensor. // Create per-tensor quantized int8 input tensor.
int8_t input_quantized[input_elements]; int8_t input_quantized[input_elements];
TfLiteTensor input_tensor = tflite::testing::CreateQuantizedTensor( TfLiteTensor input_tensor = tflite::testing::CreateQuantizedTensor(
input_values, input_quantized, input_dims, input_scale, input_zero_point, input_values, input_quantized, input_dims, input_scale, input_zero_point);
"input_tensor");
// Set zero point and scale arrays with a single element for each. // Set zero point and scale arrays with a single element for each.
int input_zero_points[] = {1, input_zero_point}; int input_zero_points[] = {1, input_zero_point};
@ -847,8 +837,7 @@ TF_LITE_MICRO_TEST(Int8Input32x4Filter32x4ShouldMatchGolden) {
// Create per-tensor quantized int8 filter tensor. // Create per-tensor quantized int8 filter tensor.
int8_t filter_quantized[filter_elements]; int8_t filter_quantized[filter_elements];
TfLiteTensor filter_tensor = tflite::testing::CreateQuantizedTensor( TfLiteTensor filter_tensor = tflite::testing::CreateQuantizedTensor(
filter_values, filter_quantized, filter_dims, filter_scale, 0, filter_values, filter_quantized, filter_dims, filter_scale, 0);
"filter_tensor");
// Set zero point and scale arrays with a single element for each. // Set zero point and scale arrays with a single element for each.
int filter_zero_points[] = {1, 0}; int filter_zero_points[] = {1, 0};
@ -864,8 +853,8 @@ TF_LITE_MICRO_TEST(Int8Input32x4Filter32x4ShouldMatchGolden) {
// detailed explanation of why bias scale is input_scale * filter_scale. // detailed explanation of why bias scale is input_scale * filter_scale.
tflite::SymmetricQuantize(bias_values, bias_quantized, bias_elements, tflite::SymmetricQuantize(bias_values, bias_quantized, bias_elements,
input_scale * output_scale); input_scale * output_scale);
TfLiteTensor bias_tensor = tflite::testing::CreateInt32Tensor( TfLiteTensor bias_tensor =
bias_quantized, bias_dims, "bias_tensor"); tflite::testing::CreateInt32Tensor(bias_quantized, bias_dims);
// Set zero point and scale arrays with a single element for each. // Set zero point and scale arrays with a single element for each.
int bias_zero_points[] = {1, 0}; int bias_zero_points[] = {1, 0};
@ -878,8 +867,7 @@ TF_LITE_MICRO_TEST(Int8Input32x4Filter32x4ShouldMatchGolden) {
// Create per-tensor quantized int8 output tensor. // Create per-tensor quantized int8 output tensor.
int8_t output_quantized[output_elements]; int8_t output_quantized[output_elements];
TfLiteTensor output_tensor = tflite::testing::CreateQuantizedTensor( TfLiteTensor output_tensor = tflite::testing::CreateQuantizedTensor(
output_quantized, output_dims, output_scale, output_zero_point, output_quantized, output_dims, output_scale, output_zero_point);
"output_tensor");
// Set zero point and scale arrays with a single element for each. // Set zero point and scale arrays with a single element for each.
int output_zero_points[] = {1, output_zero_point}; int output_zero_points[] = {1, output_zero_point};

View File

@ -90,8 +90,8 @@ void TestDequantizeToFloat(const int* input_dims_data, const float* input_data,
const int tensors_size = 2; const int tensors_size = 2;
TfLiteTensor tensors[tensors_size] = { TfLiteTensor tensors[tensors_size] = {
CreateQuantizedTensor(input_data, input_data_quantized, input_dims, scale, CreateQuantizedTensor(input_data, input_data_quantized, input_dims, scale,
zero_point, "input_tensor"), zero_point),
CreateFloatTensor(output_data, output_dims, "output_tensor"), CreateFloatTensor(output_data, output_dims),
}; };
ValidateDequantizeGoldens(tensors, tensors_size, expected_output_data, ValidateDequantizeGoldens(tensors, tensors_size, expected_output_data,
@ -113,8 +113,8 @@ void TestDequantizeToInt32(const int* input_dims_data, const float* input_data,
const int tensors_size = 2; const int tensors_size = 2;
TfLiteTensor tensors[tensors_size] = { TfLiteTensor tensors[tensors_size] = {
CreateQuantizedTensor(input_data, input_data_quantized, input_dims, CreateQuantizedTensor(input_data, input_data_quantized, input_dims,
input_scale, input_zero_point, "input_tensor"), input_scale, input_zero_point),
CreateInt32Tensor(output_data, output_dims, "output_tensor"), CreateInt32Tensor(output_data, output_dims),
}; };
TfLiteQuantizationParams output_quant; TfLiteQuantizationParams output_quant;

View File

@ -36,8 +36,8 @@ void TestElementwiseFloat(tflite::BuiltinOperator op,
constexpr int output_size = 1; constexpr int output_size = 1;
constexpr int tensors_size = input_size + output_size; constexpr int tensors_size = input_size + output_size;
TfLiteTensor tensors[tensors_size] = { TfLiteTensor tensors[tensors_size] = {
CreateFloatTensor(input_data, input_dims, "input_tensor"), CreateFloatTensor(input_data, input_dims),
CreateFloatTensor(output_data, output_dims, "output_tensor")}; CreateFloatTensor(output_data, output_dims)};
// Place a unique value in the uninitialized output buffer. // Place a unique value in the uninitialized output buffer.
for (int i = 0; i < output_dims_count; ++i) { for (int i = 0; i < output_dims_count; ++i) {
@ -101,8 +101,8 @@ void TestElementwiseBool(tflite::BuiltinOperator op,
constexpr int output_size = 1; constexpr int output_size = 1;
constexpr int tensors_size = input_size + output_size; constexpr int tensors_size = input_size + output_size;
TfLiteTensor tensors[tensors_size] = { TfLiteTensor tensors[tensors_size] = {
CreateBoolTensor(input_data, input_dims, "input_tensor"), CreateBoolTensor(input_data, input_dims),
CreateBoolTensor(output_data, output_dims, "output_tensor")}; CreateBoolTensor(output_data, output_dims)};
// Place false in the uninitialized output buffer. // Place false in the uninitialized output buffer.
for (int i = 0; i < output_dims_count; ++i) { for (int i = 0; i < output_dims_count; ++i) {

View File

@ -33,8 +33,8 @@ void TestFloor(const int* input_dims_data, const float* input_data,
constexpr int outputs_size = 1; constexpr int outputs_size = 1;
constexpr int tensors_size = inputs_size + outputs_size; constexpr int tensors_size = inputs_size + outputs_size;
TfLiteTensor tensors[tensors_size] = { TfLiteTensor tensors[tensors_size] = {
CreateFloatTensor(input_data, input_dims, "input_tensor"), CreateFloatTensor(input_data, input_dims),
CreateFloatTensor(output_data, output_dims, "output_tensor"), CreateFloatTensor(output_data, output_dims),
}; };
TfLiteContext context; TfLiteContext context;
PopulateContext(tensors, tensors_size, micro_test::reporter, &context); PopulateContext(tensors, tensors_size, micro_test::reporter, &context);

View File

@ -44,10 +44,10 @@ TfLiteStatus TestFullyConnectedFloat(
constexpr int outputs_size = 1; constexpr int outputs_size = 1;
constexpr int tensors_size = inputs_size + outputs_size; constexpr int tensors_size = inputs_size + outputs_size;
TfLiteTensor tensors[tensors_size] = { TfLiteTensor tensors[tensors_size] = {
CreateFloatTensor(input_data, input_dims, "input_tensor"), CreateFloatTensor(input_data, input_dims),
CreateFloatTensor(weights_data, weights_dims, "weights_tensor"), CreateFloatTensor(weights_data, weights_dims),
CreateFloatTensor(bias_data, bias_dims, "bias_tensor"), CreateFloatTensor(bias_data, bias_dims),
CreateFloatTensor(output_data, output_dims, "output_tensor"), CreateFloatTensor(output_data, output_dims),
}; };
TfLiteContext context; TfLiteContext context;
@ -121,13 +121,11 @@ TfLiteStatus TestFullyConnectedQuantized(
constexpr int outputs_size = 1; constexpr int outputs_size = 1;
constexpr int tensors_size = inputs_size + outputs_size; constexpr int tensors_size = inputs_size + outputs_size;
TfLiteTensor tensors[tensors_size] = { TfLiteTensor tensors[tensors_size] = {
CreateQuantizedTensor(input_data, input_dims, "input_tensor", input_min, CreateQuantizedTensor(input_data, input_dims, input_min, input_max),
input_max), CreateQuantizedTensor(weights_data, weights_dims, weights_min,
CreateQuantizedTensor(weights_data, weights_dims, "weights_tensor", weights_max),
weights_min, weights_max), CreateQuantized32Tensor(bias_data, bias_dims, bias_scale),
CreateQuantized32Tensor(bias_data, bias_dims, "bias_tensor", bias_scale), CreateQuantizedTensor(output_data, output_dims, output_min, output_max),
CreateQuantizedTensor(output_data, output_dims, "output_tensor",
output_min, output_max),
}; };
TfLiteContext context; TfLiteContext context;

View File

@ -19,7 +19,6 @@ limitations under the License.
#include "tensorflow/lite/micro/testing/micro_test.h" #include "tensorflow/lite/micro/testing/micro_test.h"
#include "tensorflow/lite/micro/testing/test_utils.h" #include "tensorflow/lite/micro/testing/test_utils.h"
namespace tflite { namespace tflite {
namespace testing { namespace testing {
namespace { namespace {
@ -30,38 +29,35 @@ constexpr float kInputMax = 2.0;
constexpr float kOutputMin = -1.0; constexpr float kOutputMin = -1.0;
constexpr float kOutputMax = 127.0 / 128.0; constexpr float kOutputMax = 127.0 / 128.0;
void QuantizeInputData(const float input_data[], int length, void QuantizeInputData(const float input_data[], int length,
uint8_t* quantized_data) { uint8_t* quantized_data) {
for (int i=0; i < 6; i++) { for (int i = 0; i < 6; i++) {
quantized_data[i] = tflite::testing::F2Q(input_data[i], quantized_data[i] = tflite::testing::F2Q(
tflite::testing::kInputMin, input_data[i], tflite::testing::kInputMin, tflite::testing::kInputMax);
tflite::testing::kInputMax);
} }
} }
void QuantizeInputData(const float input_data[], int length, void QuantizeInputData(const float input_data[], int length,
int8_t* quantized_data) { int8_t* quantized_data) {
for (int i=0; i < 6; i++) { for (int i = 0; i < 6; i++) {
quantized_data[i] = tflite::testing::F2QS(input_data[i], quantized_data[i] = tflite::testing::F2QS(
tflite::testing::kInputMin, input_data[i], tflite::testing::kInputMin, tflite::testing::kInputMax);
tflite::testing::kInputMax);
} }
} }
TfLiteTensor CreateL2NormTensor(const float* data, TfLiteIntArray* dims, TfLiteTensor CreateL2NormTensor(const float* data, TfLiteIntArray* dims,
const char* name, bool is_input) { bool is_input) {
return CreateFloatTensor(data, dims, name); return CreateFloatTensor(data, dims);
} }
TfLiteTensor CreateL2NormTensor(const uint8* data, TfLiteIntArray* dims, TfLiteTensor CreateL2NormTensor(const uint8* data, TfLiteIntArray* dims,
const char* name, bool is_input) { bool is_input) {
TfLiteTensor tensor; TfLiteTensor tensor;
if (is_input) { if (is_input) {
tensor = CreateQuantizedTensor(data, dims, name, kInputMin, kInputMax); tensor = CreateQuantizedTensor(data, dims, kInputMin, kInputMax);
} else { } else {
tensor = CreateQuantizedTensor(data, dims, name, kOutputMin, kOutputMax); tensor = CreateQuantizedTensor(data, dims, kOutputMin, kOutputMax);
} }
tensor.quantization.type = kTfLiteAffineQuantization; tensor.quantization.type = kTfLiteAffineQuantization;
@ -69,13 +65,13 @@ TfLiteTensor CreateL2NormTensor(const uint8* data, TfLiteIntArray* dims,
} }
TfLiteTensor CreateL2NormTensor(const int8* data, TfLiteIntArray* dims, TfLiteTensor CreateL2NormTensor(const int8* data, TfLiteIntArray* dims,
const char* name, bool is_input) { bool is_input) {
TfLiteTensor tensor; TfLiteTensor tensor;
if (is_input) { if (is_input) {
tensor = CreateQuantizedTensor(data, dims, name, kInputMin, kInputMax); tensor = CreateQuantizedTensor(data, dims, kInputMin, kInputMax);
} else { } else {
tensor = CreateQuantizedTensor(data, dims, name, kOutputMin, kOutputMax); tensor = CreateQuantizedTensor(data, dims, kOutputMin, kOutputMax);
} }
tensor.quantization.type = kTfLiteAffineQuantization; tensor.quantization.type = kTfLiteAffineQuantization;
@ -87,19 +83,18 @@ inline float Dequantize(const T data, float scale, int32_t zero_point) {
return scale * (data - zero_point); return scale * (data - zero_point);
} }
template<typename T> template <typename T>
void TestL2Normalization(const int* input_dims_data, void TestL2Normalization(const int* input_dims_data, const T* input_data,
const T* input_data, const float* expected_output_data, T* output_data,
const float* expected_output_data, float variance) {
T* output_data, float variance) {
TfLiteIntArray* dims = IntArrayFromInts(input_dims_data); TfLiteIntArray* dims = IntArrayFromInts(input_dims_data);
const int output_dims_count = ElementCount(*dims); const int output_dims_count = ElementCount(*dims);
constexpr int tensors_size = 2; constexpr int tensors_size = 2;
TfLiteTensor tensors[tensors_size] = { TfLiteTensor tensors[tensors_size] = {
CreateL2NormTensor(input_data, dims, "input_tensor", true), CreateL2NormTensor(input_data, dims, true),
CreateL2NormTensor(output_data, dims, "output_tensor", false), CreateL2NormTensor(output_data, dims, false),
}; };
TfLiteContext context; TfLiteContext context;
@ -110,7 +105,7 @@ void TestL2Normalization(const int* input_dims_data,
TF_LITE_MICRO_EXPECT_NE(nullptr, registration); TF_LITE_MICRO_EXPECT_NE(nullptr, registration);
TfLiteL2NormParams builtin_data = { TfLiteL2NormParams builtin_data = {
.activation = kTfLiteActNone, .activation = kTfLiteActNone,
}; };
int inputs_array_data[] = {1, 0}; int inputs_array_data[] = {1, 0};
@ -158,22 +153,18 @@ void TestL2Normalization(const int* input_dims_data,
} // namespace testing } // namespace testing
} // namespace tflite } // namespace tflite
TF_LITE_MICRO_TESTS_BEGIN TF_LITE_MICRO_TESTS_BEGIN
TF_LITE_MICRO_TEST(SimpleFloatTest) { TF_LITE_MICRO_TEST(SimpleFloatTest) {
const int input_dims[] = {4, 1, 1, 1, 6}; const int input_dims[] = {4, 1, 1, 1, 6};
constexpr int data_length = 6; constexpr int data_length = 6;
const float input_data[data_length] = { const float input_data[data_length] = {-1.1, 0.6, 0.7, 1.2, -0.7, 0.1};
-1.1, 0.6, 0.7, 1.2, -0.7, 0.1 const float expected_output_data[data_length] = {-0.55, 0.3, 0.35,
}; 0.6, -0.35, 0.05};
const float expected_output_data[data_length] = {
-0.55, 0.3, 0.35, 0.6, -0.35, 0.05
};
float output_data[data_length]; float output_data[data_length];
tflite::testing::TestL2Normalization<float>(input_dims, input_data, tflite::testing::TestL2Normalization<float>(
expected_output_data, output_data, 0); input_dims, input_data, expected_output_data, output_data, 0);
} }
TF_LITE_MICRO_TEST(ZerosVectorFloatTest) { TF_LITE_MICRO_TEST(ZerosVectorFloatTest) {
@ -183,42 +174,39 @@ TF_LITE_MICRO_TEST(ZerosVectorFloatTest) {
const float expected_output_data[data_length] = {0, 0, 0, 0, 0, 0}; const float expected_output_data[data_length] = {0, 0, 0, 0, 0, 0};
float output_data[data_length]; float output_data[data_length];
tflite::testing::TestL2Normalization<float>(input_dims, input_data, tflite::testing::TestL2Normalization<float>(
expected_output_data, output_data, 0); input_dims, input_data, expected_output_data, output_data, 0);
} }
TF_LITE_MICRO_TEST(SimpleFloatWithRankLessThanFourTest) { TF_LITE_MICRO_TEST(SimpleFloatWithRankLessThanFourTest) {
const int input_dims[] = {4, 1, 1, 1, 6}; const int input_dims[] = {4, 1, 1, 1, 6};
constexpr int data_length = 6; constexpr int data_length = 6;
const float input_data[data_length] = { const float input_data[data_length] = {-1.1, 0.6, 0.7, 1.2, -0.7, 0.1};
-1.1, 0.6, 0.7, 1.2, -0.7, 0.1 const float expected_output_data[data_length] = {-0.55, 0.3, 0.35,
}; 0.6, -0.35, 0.05};
const float expected_output_data[data_length] = {
-0.55, 0.3, 0.35, 0.6, -0.35, 0.05
};
float output_data[data_length]; float output_data[data_length];
tflite::testing::TestL2Normalization<float>(input_dims, input_data, tflite::testing::TestL2Normalization<float>(
expected_output_data, output_data, 0); input_dims, input_data, expected_output_data, output_data, 0);
} }
TF_LITE_MICRO_TEST(MultipleBatchFloatTest) { TF_LITE_MICRO_TEST(MultipleBatchFloatTest) {
const int input_dims[] = {4, 3, 1, 1, 6}; const int input_dims[] = {4, 3, 1, 1, 6};
constexpr int data_length = 18; constexpr int data_length = 18;
const float input_data[data_length] = { const float input_data[data_length] = {
-1.1, 0.6, 0.7, 1.2, -0.7, 0.1, // batch 1 -1.1, 0.6, 0.7, 1.2, -0.7, 0.1, // batch 1
-1.1, 0.6, 0.7, 1.2, -0.7, 0.1, // batch 2 -1.1, 0.6, 0.7, 1.2, -0.7, 0.1, // batch 2
-1.1, 0.6, 0.7, 1.2, -0.7, 0.1, // batch 3 -1.1, 0.6, 0.7, 1.2, -0.7, 0.1, // batch 3
}; };
const float expected_output_data[data_length] = { const float expected_output_data[data_length] = {
-0.55, 0.3, 0.35, 0.6, -0.35, 0.05, // batch 1 -0.55, 0.3, 0.35, 0.6, -0.35, 0.05, // batch 1
-0.55, 0.3, 0.35, 0.6, -0.35, 0.05, // batch 2 -0.55, 0.3, 0.35, 0.6, -0.35, 0.05, // batch 2
-0.55, 0.3, 0.35, 0.6, -0.35, 0.05, // batch 3 -0.55, 0.3, 0.35, 0.6, -0.35, 0.05, // batch 3
}; };
float output_data[data_length]; float output_data[data_length];
tflite::testing::TestL2Normalization<float>(input_dims, input_data, tflite::testing::TestL2Normalization<float>(
expected_output_data, output_data, 0); input_dims, input_data, expected_output_data, output_data, 0);
} }
TF_LITE_MICRO_TEST(ZerosVectorUint8Test) { TF_LITE_MICRO_TEST(ZerosVectorUint8Test) {
@ -231,44 +219,36 @@ TF_LITE_MICRO_TEST(ZerosVectorUint8Test) {
tflite::testing::QuantizeInputData(input_data, data_length, quantized_input); tflite::testing::QuantizeInputData(input_data, data_length, quantized_input);
tflite::testing::TestL2Normalization<uint8_t>(input_dims, quantized_input, tflite::testing::TestL2Normalization<uint8_t>(
expected_output_data, output_data, .1); input_dims, quantized_input, expected_output_data, output_data, .1);
} }
TF_LITE_MICRO_TEST(SimpleUint8Test) { TF_LITE_MICRO_TEST(SimpleUint8Test) {
const int input_dims[] = {4, 1, 1, 1, 6}; const int input_dims[] = {4, 1, 1, 1, 6};
constexpr int data_length = 6; constexpr int data_length = 6;
float input_data[data_length] = { float input_data[data_length] = {-1.1, 0.6, 0.7, 1.2, -0.7, 0.1};
-1.1, 0.6, 0.7, 1.2, -0.7, 0.1 float expected_output[data_length] = {-0.55, 0.3, 0.35, 0.6, -0.35, 0.05};
};
float expected_output[data_length] = {
-0.55, 0.3, 0.35, 0.6, -0.35, 0.05
};
uint8_t quantized_input[data_length]; uint8_t quantized_input[data_length];
uint8_t output_data[data_length]; uint8_t output_data[data_length];
tflite::testing::QuantizeInputData(input_data, data_length, quantized_input); tflite::testing::QuantizeInputData(input_data, data_length, quantized_input);
tflite::testing::TestL2Normalization<uint8_t>(input_dims, quantized_input, tflite::testing::TestL2Normalization<uint8_t>(
expected_output, output_data, .1); input_dims, quantized_input, expected_output, output_data, .1);
} }
TF_LITE_MICRO_TEST(SimpleInt8Test) { TF_LITE_MICRO_TEST(SimpleInt8Test) {
const int input_dims[] = {4, 1, 1, 1, 6}; const int input_dims[] = {4, 1, 1, 1, 6};
constexpr int data_length = 6; constexpr int data_length = 6;
float input_data[data_length] = { float input_data[data_length] = {-1.1, 0.6, 0.7, 1.2, -0.7, 0.1};
-1.1, 0.6, 0.7, 1.2, -0.7, 0.1 float expected_output[data_length] = {-0.55, 0.3, 0.35, 0.6, -0.35, 0.05};
};
float expected_output[data_length] = {
-0.55, 0.3, 0.35, 0.6, -0.35, 0.05
};
int8_t quantized_input[data_length]; int8_t quantized_input[data_length];
int8_t output_data[data_length]; int8_t output_data[data_length];
tflite::testing::QuantizeInputData(input_data, data_length, quantized_input); tflite::testing::QuantizeInputData(input_data, data_length, quantized_input);
tflite::testing::TestL2Normalization<int8_t>(input_dims, quantized_input, tflite::testing::TestL2Normalization<int8_t>(
expected_output, output_data, .1); input_dims, quantized_input, expected_output, output_data, .1);
} }
TF_LITE_MICRO_TEST(ZerosVectorInt8Test) { TF_LITE_MICRO_TEST(ZerosVectorInt8Test) {
@ -281,52 +261,52 @@ TF_LITE_MICRO_TEST(ZerosVectorInt8Test) {
tflite::testing::QuantizeInputData(input_data, data_length, quantized_input); tflite::testing::QuantizeInputData(input_data, data_length, quantized_input);
tflite::testing::TestL2Normalization<int8_t>(input_dims, quantized_input, tflite::testing::TestL2Normalization<int8_t>(
expected_output_data, output_data, .1); input_dims, quantized_input, expected_output_data, output_data, .1);
} }
TF_LITE_MICRO_TEST(MultipleBatchUint8Test) { TF_LITE_MICRO_TEST(MultipleBatchUint8Test) {
const int input_dims[] = {4, 1, 1, 1, 6}; const int input_dims[] = {4, 1, 1, 1, 6};
constexpr int data_length = 18; constexpr int data_length = 18;
float input_data[data_length] = { float input_data[data_length] = {
-1.1, 0.6, 0.7, 1.2, -0.7, 0.1, // batch 1 -1.1, 0.6, 0.7, 1.2, -0.7, 0.1, // batch 1
-1.1, 0.6, 0.7, 1.2, -0.7, 0.1, // batch 2 -1.1, 0.6, 0.7, 1.2, -0.7, 0.1, // batch 2
-1.1, 0.6, 0.7, 1.2, -0.7, 0.1, // batch 3 -1.1, 0.6, 0.7, 1.2, -0.7, 0.1, // batch 3
}; };
float expected_output[data_length] = { float expected_output[data_length] = {
-0.55, 0.3, 0.35, 0.6, -0.35, 0.05, // batch 1 -0.55, 0.3, 0.35, 0.6, -0.35, 0.05, // batch 1
-0.55, 0.3, 0.35, 0.6, -0.35, 0.05, // batch 2 -0.55, 0.3, 0.35, 0.6, -0.35, 0.05, // batch 2
-0.55, 0.3, 0.35, 0.6, -0.35, 0.05, // batch 3 -0.55, 0.3, 0.35, 0.6, -0.35, 0.05, // batch 3
}; };
uint8_t quantized_input[data_length]; uint8_t quantized_input[data_length];
uint8_t output_data[data_length]; uint8_t output_data[data_length];
tflite::testing::QuantizeInputData(input_data, data_length, quantized_input); tflite::testing::QuantizeInputData(input_data, data_length, quantized_input);
tflite::testing::TestL2Normalization<uint8_t>(input_dims, quantized_input, tflite::testing::TestL2Normalization<uint8_t>(
expected_output, output_data, .1); input_dims, quantized_input, expected_output, output_data, .1);
} }
TF_LITE_MICRO_TEST(MultipleBatchInt8Test) { TF_LITE_MICRO_TEST(MultipleBatchInt8Test) {
const int input_dims[] = {4, 1, 1, 1, 6}; const int input_dims[] = {4, 1, 1, 1, 6};
constexpr int data_length = 18; constexpr int data_length = 18;
float input_data[data_length] = { float input_data[data_length] = {
-1.1, 0.6, 0.7, 1.2, -0.7, 0.1, // batch 1 -1.1, 0.6, 0.7, 1.2, -0.7, 0.1, // batch 1
-1.1, 0.6, 0.7, 1.2, -0.7, 0.1, // batch 2 -1.1, 0.6, 0.7, 1.2, -0.7, 0.1, // batch 2
-1.1, 0.6, 0.7, 1.2, -0.7, 0.1, // batch 3 -1.1, 0.6, 0.7, 1.2, -0.7, 0.1, // batch 3
}; };
float expected_output[data_length] = { float expected_output[data_length] = {
-0.55, 0.3, 0.35, 0.6, -0.35, 0.05, // batch 1 -0.55, 0.3, 0.35, 0.6, -0.35, 0.05, // batch 1
-0.55, 0.3, 0.35, 0.6, -0.35, 0.05, // batch 2 -0.55, 0.3, 0.35, 0.6, -0.35, 0.05, // batch 2
-0.55, 0.3, 0.35, 0.6, -0.35, 0.05, // batch 3 -0.55, 0.3, 0.35, 0.6, -0.35, 0.05, // batch 3
}; };
int8_t quantized_input[data_length]; int8_t quantized_input[data_length];
int8_t output_data[data_length]; int8_t output_data[data_length];
tflite::testing::QuantizeInputData(input_data, data_length, quantized_input); tflite::testing::QuantizeInputData(input_data, data_length, quantized_input);
tflite::testing::TestL2Normalization<int8_t>(input_dims, quantized_input, tflite::testing::TestL2Normalization<int8_t>(
expected_output, output_data, .1); input_dims, quantized_input, expected_output, output_data, .1);
} }
TF_LITE_MICRO_TESTS_END TF_LITE_MICRO_TESTS_END

View File

@ -39,9 +39,9 @@ void TestLogicalOp(tflite::BuiltinOperator op,
constexpr int outputs_size = 1; constexpr int outputs_size = 1;
constexpr int tensors_size = inputs_size + outputs_size; constexpr int tensors_size = inputs_size + outputs_size;
TfLiteTensor tensors[tensors_size] = { TfLiteTensor tensors[tensors_size] = {
CreateBoolTensor(input1_data, input1_dims, "input1_tensor"), CreateBoolTensor(input1_data, input1_dims),
CreateBoolTensor(input2_data, input2_dims, "input2_tensor"), CreateBoolTensor(input2_data, input2_dims),
CreateBoolTensor(output_data, output_dims, "output_tensor"), CreateBoolTensor(output_data, output_dims),
}; };
TfLiteContext context; TfLiteContext context;

View File

@ -36,8 +36,8 @@ void TestLogisticFloat(std::initializer_list<int> input_dims_data,
constexpr int outputs_size = 1; constexpr int outputs_size = 1;
constexpr int tensors_size = inputs_size + outputs_size; constexpr int tensors_size = inputs_size + outputs_size;
TfLiteTensor tensors[tensors_size] = { TfLiteTensor tensors[tensors_size] = {
CreateFloatTensor(input_data, input_dims, "input_tensor"), CreateFloatTensor(input_data, input_dims),
CreateFloatTensor(output_data, output_dims, "output_tensor"), CreateFloatTensor(output_data, output_dims),
}; };
TfLiteContext context; TfLiteContext context;
@ -96,10 +96,8 @@ void TestLogisticInt8(std::initializer_list<int> input_dims_data,
constexpr int outputs_size = 1; constexpr int outputs_size = 1;
constexpr int tensors_size = inputs_size + outputs_size; constexpr int tensors_size = inputs_size + outputs_size;
TfLiteTensor tensors[tensors_size] = { TfLiteTensor tensors[tensors_size] = {
CreateQuantizedTensor(input_data, input_dims, "input_tensor", input_min, CreateQuantizedTensor(input_data, input_dims, input_min, input_max),
input_max), CreateQuantizedTensor(output_data, output_dims, output_min, output_max),
CreateQuantizedTensor(output_data, output_dims, "output_tensor",
output_min, output_max),
}; };
TfLiteContext context; TfLiteContext context;

View File

@ -40,9 +40,9 @@ void TestMaxMinFloat(tflite::BuiltinOperator op,
constexpr int outputs_size = 1; constexpr int outputs_size = 1;
constexpr int tensors_size = inputs_size + outputs_size; constexpr int tensors_size = inputs_size + outputs_size;
TfLiteTensor tensors[tensors_size] = { TfLiteTensor tensors[tensors_size] = {
CreateFloatTensor(input1_data, input1_dims, "input1_tensor"), CreateFloatTensor(input1_data, input1_dims),
CreateFloatTensor(input2_data, input2_dims, "input2_tensor"), CreateFloatTensor(input2_data, input2_dims),
CreateFloatTensor(output_data, output_dims, "output_tensor"), CreateFloatTensor(output_data, output_dims),
}; };
TfLiteContext context; TfLiteContext context;
@ -96,12 +96,9 @@ void TestMaxMinQuantized(
constexpr int outputs_size = 1; constexpr int outputs_size = 1;
constexpr int tensors_size = inputs_size + outputs_size; constexpr int tensors_size = inputs_size + outputs_size;
TfLiteTensor tensors[tensors_size] = { TfLiteTensor tensors[tensors_size] = {
CreateQuantizedTensor(input1_data, input1_dims, "input1_tensor", CreateQuantizedTensor(input1_data, input1_dims, input1_min, input1_max),
input1_min, input1_max), CreateQuantizedTensor(input2_data, input2_dims, input2_min, input2_max),
CreateQuantizedTensor(input2_data, input2_dims, "input2_tensor", CreateQuantizedTensor(output_data, output_dims, output_min, output_max),
input2_min, input2_max),
CreateQuantizedTensor(output_data, output_dims, "output_tensor",
output_min, output_max),
}; };
TfLiteContext context; TfLiteContext context;
@ -153,12 +150,9 @@ void TestMaxMinQuantizedInt32(
constexpr int outputs_size = 1; constexpr int outputs_size = 1;
constexpr int tensors_size = inputs_size + outputs_size; constexpr int tensors_size = inputs_size + outputs_size;
TfLiteTensor tensors[tensors_size] = { TfLiteTensor tensors[tensors_size] = {
CreateQuantized32Tensor(input1_data, input1_dims, "input1_tensor", CreateQuantized32Tensor(input1_data, input1_dims, input1_scale),
input1_scale), CreateQuantized32Tensor(input2_data, input2_dims, input2_scale),
CreateQuantized32Tensor(input2_data, input2_dims, "input2_tensor", CreateQuantized32Tensor(output_data, output_dims, output_scale),
input2_scale),
CreateQuantized32Tensor(output_data, output_dims, "output_tensor",
output_scale),
}; };
TfLiteContext context; TfLiteContext context;

View File

@ -41,9 +41,9 @@ void TestMulFloat(std::initializer_list<int> input1_dims_data,
constexpr int outputs_size = 1; constexpr int outputs_size = 1;
constexpr int tensors_size = inputs_size + outputs_size; constexpr int tensors_size = inputs_size + outputs_size;
TfLiteTensor tensors[tensors_size] = { TfLiteTensor tensors[tensors_size] = {
CreateFloatTensor(input1_data, input1_dims, "input1_tensor"), CreateFloatTensor(input1_data, input1_dims),
CreateFloatTensor(input2_data, input2_dims, "input2_tensor"), CreateFloatTensor(input2_data, input2_dims),
CreateFloatTensor(output_data, output_dims, "output_tensor"), CreateFloatTensor(output_data, output_dims),
}; };
TfLiteContext context; TfLiteContext context;
@ -113,12 +113,9 @@ void TestMulQuantized(std::initializer_list<int> input1_dims_data,
constexpr int outputs_size = 1; constexpr int outputs_size = 1;
constexpr int tensors_size = inputs_size + outputs_size; constexpr int tensors_size = inputs_size + outputs_size;
TfLiteTensor tensors[tensors_size] = { TfLiteTensor tensors[tensors_size] = {
CreateQuantizedTensor(input1_data, input1_dims, "input1_tensor", CreateQuantizedTensor(input1_data, input1_dims, input_min, input_max),
input_min, input_max), CreateQuantizedTensor(input2_data, input2_dims, input_min, input_max),
CreateQuantizedTensor(input2_data, input2_dims, "input2_tensor", CreateQuantizedTensor(output_data, output_dims, output_min, output_max),
input_min, input_max),
CreateQuantizedTensor(output_data, output_dims, "output_tensor",
output_min, output_max),
}; };
TfLiteContext context; TfLiteContext context;

View File

@ -35,8 +35,8 @@ void TestNegFloat(std::initializer_list<int> input_dims_data,
constexpr int outputs_size = 1; constexpr int outputs_size = 1;
constexpr int tensors_size = inputs_size + outputs_size; constexpr int tensors_size = inputs_size + outputs_size;
TfLiteTensor tensors[tensors_size] = { TfLiteTensor tensors[tensors_size] = {
CreateFloatTensor(input_data, input_dims, "input_tensor"), CreateFloatTensor(input_data, input_dims),
CreateFloatTensor(output_data, output_dims, "output_tensor"), CreateFloatTensor(output_data, output_dims),
}; };
TfLiteContext context; TfLiteContext context;

View File

@ -39,9 +39,9 @@ void TestPackTwoInputsFloat(std::initializer_list<int> input1_dims_data,
constexpr int output_size = 1; constexpr int output_size = 1;
constexpr int tensors_size = input_size + output_size; constexpr int tensors_size = input_size + output_size;
TfLiteTensor tensors[tensors_size] = { TfLiteTensor tensors[tensors_size] = {
CreateFloatTensor(input1_data, input1_dims, "input1_tensor"), CreateFloatTensor(input1_data, input1_dims),
CreateFloatTensor(input2_data, input2_dims, "input2_tensor"), CreateFloatTensor(input2_data, input2_dims),
CreateFloatTensor(output_data, output_dims, "output_tensor")}; CreateFloatTensor(output_data, output_dims)};
// Place a unique value in the uninitialized output buffer. // Place a unique value in the uninitialized output buffer.
for (int i = 0; i < output_dims_count; ++i) { for (int i = 0; i < output_dims_count; ++i) {
@ -114,10 +114,10 @@ void TestPackThreeInputsFloat(std::initializer_list<int> input1_dims_data,
constexpr int output_size = 1; constexpr int output_size = 1;
constexpr int tensors_size = input_size + output_size; constexpr int tensors_size = input_size + output_size;
TfLiteTensor tensors[tensors_size] = { TfLiteTensor tensors[tensors_size] = {
CreateFloatTensor(input1_data, input1_dims, "input1_tensor"), CreateFloatTensor(input1_data, input1_dims),
CreateFloatTensor(input2_data, input2_dims, "input2_tensor"), CreateFloatTensor(input2_data, input2_dims),
CreateFloatTensor(input3_data, input3_dims, "input3_tensor"), CreateFloatTensor(input3_data, input3_dims),
CreateFloatTensor(output_data, output_dims, "output_tensor")}; CreateFloatTensor(output_data, output_dims)};
// Place a unique value in the uninitialized output buffer. // Place a unique value in the uninitialized output buffer.
for (int i = 0; i < output_dims_count; ++i) { for (int i = 0; i < output_dims_count; ++i) {
@ -189,9 +189,9 @@ void TestPackTwoInputsQuantized(
TfLiteTensor tensors[tensors_size] = { TfLiteTensor tensors[tensors_size] = {
// CreateQuantizedTensor needs min/max values as input, but these values // CreateQuantizedTensor needs min/max values as input, but these values
// don't matter as to the functionality of PACK, so just set as 0 and 10. // don't matter as to the functionality of PACK, so just set as 0 and 10.
CreateQuantizedTensor(input1_data, input1_dims, "input1_tensor", 0, 10), CreateQuantizedTensor(input1_data, input1_dims, 0, 10),
CreateQuantizedTensor(input2_data, input2_dims, "input2_tensor", 0, 10), CreateQuantizedTensor(input2_data, input2_dims, 0, 10),
CreateQuantizedTensor(output_data, output_dims, "output_tensor", 0, 10)}; CreateQuantizedTensor(output_data, output_dims, 0, 10)};
// Place a unique value in the uninitialized output buffer. // Place a unique value in the uninitialized output buffer.
for (int i = 0; i < output_dims_count; ++i) { for (int i = 0; i < output_dims_count; ++i) {
@ -259,9 +259,9 @@ void TestPackTwoInputsQuantized32(
constexpr int output_size = 1; constexpr int output_size = 1;
constexpr int tensors_size = input_size + output_size; constexpr int tensors_size = input_size + output_size;
TfLiteTensor tensors[tensors_size] = { TfLiteTensor tensors[tensors_size] = {
CreateQuantized32Tensor(input1_data, input1_dims, "input1_tensor", 1.0), CreateQuantized32Tensor(input1_data, input1_dims, 1.0),
CreateQuantized32Tensor(input2_data, input2_dims, "input2_tensor", 1.0), CreateQuantized32Tensor(input2_data, input2_dims, 1.0),
CreateQuantized32Tensor(output_data, output_dims, "output_tensor", 1.0)}; CreateQuantized32Tensor(output_data, output_dims, 1.0)};
// Place a unique value in the uninitialized output buffer. // Place a unique value in the uninitialized output buffer.
for (int i = 0; i < output_dims_count; ++i) { for (int i = 0; i < output_dims_count; ++i) {

View File

@ -121,9 +121,9 @@ void TestPadFloat(const int* input_dims_data, const float* input_data,
constexpr int outputs_size = 1; constexpr int outputs_size = 1;
constexpr int tensors_size = inputs_size + outputs_size; constexpr int tensors_size = inputs_size + outputs_size;
TfLiteTensor tensors[tensors_size] = { TfLiteTensor tensors[tensors_size] = {
CreateFloatTensor(input_data, input_dims, "input_tensor"), CreateFloatTensor(input_data, input_dims),
CreateInt32Tensor(pad_data, pad_dims, "padding tensor"), CreateInt32Tensor(pad_data, pad_dims),
CreateFloatTensor(output_data, output_dims, "output_tensor")}; CreateFloatTensor(output_data, output_dims)};
// Pad tensor must be constant. // Pad tensor must be constant.
tensors[1].allocation_type = kTfLiteMmapRo; tensors[1].allocation_type = kTfLiteMmapRo;
@ -149,10 +149,10 @@ void TestPadV2Float(const int* input_dims_data, const float* input_data,
constexpr int outputs_size = 1; constexpr int outputs_size = 1;
constexpr int tensors_size = inputs_size + outputs_size; constexpr int tensors_size = inputs_size + outputs_size;
TfLiteTensor tensors[tensors_size] = { TfLiteTensor tensors[tensors_size] = {
CreateFloatTensor(input_data, input_dims, "input_tensor"), CreateFloatTensor(input_data, input_dims),
CreateInt32Tensor(pad_data, pad_dims, "padding tensor"), CreateInt32Tensor(pad_data, pad_dims),
CreateFloatTensor(&pad_value, pad_value_dims, "pad value tensor"), CreateFloatTensor(&pad_value, pad_value_dims),
CreateFloatTensor(output_data, output_dims, "output_tensor")}; CreateFloatTensor(output_data, output_dims)};
// Pad tensor must be constant. // Pad tensor must be constant.
tensors[1].allocation_type = kTfLiteMmapRo; tensors[1].allocation_type = kTfLiteMmapRo;
@ -179,10 +179,10 @@ void TestPadQuantized(const int* input_dims_data, const float* input_data,
constexpr int tensors_size = inputs_size + outputs_size; constexpr int tensors_size = inputs_size + outputs_size;
TfLiteTensor tensors[tensors_size] = { TfLiteTensor tensors[tensors_size] = {
CreateQuantizedTensor(input_data, input_quantized, input_dims, CreateQuantizedTensor(input_data, input_quantized, input_dims,
input_scale, input_zero_point, "input_tensor"), input_scale, input_zero_point),
CreateInt32Tensor(pad_data, pad_dims, "padding tensor"), CreateInt32Tensor(pad_data, pad_dims),
CreateQuantizedTensor(output_data, output_dims, output_scale, CreateQuantizedTensor(output_data, output_dims, output_scale,
output_zero_point, "output_tensor")}; output_zero_point)};
// Pad tensor must be constant. // Pad tensor must be constant.
tensors[1].allocation_type = kTfLiteMmapRo; tensors[1].allocation_type = kTfLiteMmapRo;
@ -218,13 +218,12 @@ void TestPadV2Quantized(const int* input_dims_data, const float* input_data,
constexpr int tensors_size = inputs_size + outputs_size; constexpr int tensors_size = inputs_size + outputs_size;
TfLiteTensor tensors[tensors_size] = { TfLiteTensor tensors[tensors_size] = {
CreateQuantizedTensor(input_data, input_quantized, input_dims, CreateQuantizedTensor(input_data, input_quantized, input_dims,
input_scale, input_zero_point, "input_tensor"), input_scale, input_zero_point),
CreateInt32Tensor(pad_data, pad_dims, "padding tensor"), CreateInt32Tensor(pad_data, pad_dims),
CreateQuantizedTensor(&pad_value, &pad_value_quantized, pad_value_dims, CreateQuantizedTensor(&pad_value, &pad_value_quantized, pad_value_dims,
pad_value_scale, pad_value_zero_point, pad_value_scale, pad_value_zero_point),
"pad value tensor"),
CreateQuantizedTensor(output_data, output_dims, output_scale, CreateQuantizedTensor(output_data, output_dims, output_scale,
output_zero_point, "output_tensor")}; output_zero_point)};
// Pad tensor must be constant. // Pad tensor must be constant.
tensors[1].allocation_type = kTfLiteMmapRo; tensors[1].allocation_type = kTfLiteMmapRo;

View File

@ -42,8 +42,8 @@ void TestAveragePoolingFloat(std::initializer_list<int> input_dims_data,
constexpr int outputs_size = 1; constexpr int outputs_size = 1;
constexpr int tensors_size = inputs_size + outputs_size; constexpr int tensors_size = inputs_size + outputs_size;
TfLiteTensor tensors[tensors_size] = { TfLiteTensor tensors[tensors_size] = {
CreateFloatTensor(input_data, input_dims, "input_tensor"), CreateFloatTensor(input_data, input_dims),
CreateFloatTensor(output_data, output_dims, "output_tensor"), CreateFloatTensor(output_data, output_dims),
}; };
TfLiteContext context; TfLiteContext context;
@ -114,10 +114,8 @@ void TestAveragePoolingQuantized(
constexpr int outputs_size = 1; constexpr int outputs_size = 1;
constexpr int tensors_size = inputs_size + outputs_size; constexpr int tensors_size = inputs_size + outputs_size;
TfLiteTensor tensors[tensors_size] = { TfLiteTensor tensors[tensors_size] = {
CreateQuantizedTensor(input_data, input_dims, "input_tensor", input_min, CreateQuantizedTensor(input_data, input_dims, input_min, input_max),
input_max), CreateQuantizedTensor(output_data, output_dims, output_min, output_max),
CreateQuantizedTensor(output_data, output_dims, "output_tensor",
output_min, output_max),
}; };
TfLiteContext context; TfLiteContext context;
@ -183,8 +181,8 @@ void TestMaxPoolFloat(std::initializer_list<int> input_dims_data,
constexpr int outputs_size = 1; constexpr int outputs_size = 1;
constexpr int tensors_size = inputs_size + outputs_size; constexpr int tensors_size = inputs_size + outputs_size;
TfLiteTensor tensors[tensors_size] = { TfLiteTensor tensors[tensors_size] = {
CreateFloatTensor(input_data, input_dims, "input_tensor"), CreateFloatTensor(input_data, input_dims),
CreateFloatTensor(output_data, output_dims, "output_tensor"), CreateFloatTensor(output_data, output_dims),
}; };
TfLiteContext context; TfLiteContext context;
@ -257,10 +255,8 @@ void TestMaxPoolQuantized(std::initializer_list<int> input_dims_data,
constexpr int outputs_size = 1; constexpr int outputs_size = 1;
constexpr int tensors_size = inputs_size + outputs_size; constexpr int tensors_size = inputs_size + outputs_size;
TfLiteTensor tensors[tensors_size] = { TfLiteTensor tensors[tensors_size] = {
CreateQuantizedTensor(input_data, input_dims, "input_tensor", input_min, CreateQuantizedTensor(input_data, input_dims, input_min, input_max),
input_max), CreateQuantizedTensor(output_data, output_dims, output_min, output_max),
CreateQuantizedTensor(output_data, output_dims, "output_tensor",
output_min, output_max),
}; };
TfLiteContext context; TfLiteContext context;

View File

@ -38,9 +38,9 @@ void TestPreluFloat(std::initializer_list<int> input_dims_data,
constexpr int outputs_size = 1; constexpr int outputs_size = 1;
constexpr int tensors_size = inputs_size + outputs_size; constexpr int tensors_size = inputs_size + outputs_size;
TfLiteTensor tensors[tensors_size] = { TfLiteTensor tensors[tensors_size] = {
CreateFloatTensor(input_data, input_dims, "input_tensor"), CreateFloatTensor(input_data, input_dims),
CreateFloatTensor(alpha_data, alpha_dims, "alpha_tensor"), CreateFloatTensor(alpha_data, alpha_dims),
CreateFloatTensor(output_data, output_dims, "output_tensor"), CreateFloatTensor(output_data, output_dims),
}; };
TfLiteContext context; TfLiteContext context;
PopulateContext(tensors, tensors_size, micro_test::reporter, &context); PopulateContext(tensors, tensors_size, micro_test::reporter, &context);
@ -102,12 +102,9 @@ void TestPreluQuantized(std::initializer_list<int> input_dims_data,
constexpr int outputs_size = 1; constexpr int outputs_size = 1;
constexpr int tensors_size = inputs_size + outputs_size; constexpr int tensors_size = inputs_size + outputs_size;
TfLiteTensor tensors[tensors_size] = { TfLiteTensor tensors[tensors_size] = {
CreateQuantizedTensor(input_data, input_dims, "input_tensor", input_min, CreateQuantizedTensor(input_data, input_dims, input_min, input_max),
input_max), CreateQuantizedTensor(alpha_data, alpha_dims, alpha_min, alpha_max),
CreateQuantizedTensor(alpha_data, alpha_dims, "alpha_tensor", alpha_min, CreateQuantizedTensor(output_data, output_dims, output_min, output_max),
alpha_max),
CreateQuantizedTensor(output_data, output_dims, "output_tensor",
output_min, output_max),
}; };
TfLiteContext context; TfLiteContext context;
PopulateContext(tensors, tensors_size, micro_test::reporter, &context); PopulateContext(tensors, tensors_size, micro_test::reporter, &context);

View File

@ -89,8 +89,8 @@ void TestQuantizeFloat(const int* input_dims_data, const float* input_data,
TfLiteIntArray* output_dims = IntArrayFromInts(output_dims_data); TfLiteIntArray* output_dims = IntArrayFromInts(output_dims_data);
const int output_dims_count = ElementCount(*output_dims); const int output_dims_count = ElementCount(*output_dims);
TfLiteTensor output_tensor = CreateQuantizedTensor( TfLiteTensor output_tensor =
output_data, output_dims, scale, zero_point, "output_tensor"); CreateQuantizedTensor(output_data, output_dims, scale, zero_point);
TfLiteAffineQuantization quant; TfLiteAffineQuantization quant;
float scales[] = {1, scale}; float scales[] = {1, scale};
@ -102,7 +102,7 @@ void TestQuantizeFloat(const int* input_dims_data, const float* input_data,
// 1 input, 1 output. // 1 input, 1 output.
constexpr int tensors_size = 2; constexpr int tensors_size = 2;
TfLiteTensor tensors[tensors_size] = { TfLiteTensor tensors[tensors_size] = {
CreateFloatTensor(input_data, input_dims, "input_tensor"), CreateFloatTensor(input_data, input_dims),
output_tensor, output_tensor,
}; };
@ -121,9 +121,8 @@ void TestRequantize(const int* input_dims_data, const float* input_data,
TfLiteIntArray* output_dims = IntArrayFromInts(output_dims_data); TfLiteIntArray* output_dims = IntArrayFromInts(output_dims_data);
const int output_dims_count = ElementCount(*output_dims); const int output_dims_count = ElementCount(*output_dims);
TfLiteTensor output_tensor = TfLiteTensor output_tensor = CreateQuantizedTensor(
CreateQuantizedTensor(output_data, output_dims, output_scale, output_data, output_dims, output_scale, output_zero_point);
output_zero_point, "output_tensor");
TfLiteAffineQuantization quant; TfLiteAffineQuantization quant;
float scales[] = {1, output_scale}; float scales[] = {1, output_scale};
@ -136,7 +135,7 @@ void TestRequantize(const int* input_dims_data, const float* input_data,
constexpr int tensors_size = 2; constexpr int tensors_size = 2;
TfLiteTensor tensors[tensors_size] = { TfLiteTensor tensors[tensors_size] = {
CreateQuantizedTensor(input_data, input_quantized, input_dims, CreateQuantizedTensor(input_data, input_quantized, input_dims,
input_scale, input_zero_point, "input_tensor"), input_scale, input_zero_point),
output_tensor, output_tensor,
}; };

View File

@ -117,9 +117,9 @@ void TestMeanFloatInput4D(const int* input_dims_data, const float* input_data,
constexpr int tensors_size = num_of_inputs + num_of_outputs; constexpr int tensors_size = num_of_inputs + num_of_outputs;
TfLiteTensor tensors[tensors_size] = { TfLiteTensor tensors[tensors_size] = {
CreateFloatTensor(input_data, input_dims, "input_tensor"), CreateFloatTensor(input_data, input_dims),
CreateInt32Tensor(axis_data, axis_dims, "axis_tensor"), CreateInt32Tensor(axis_data, axis_dims),
CreateFloatTensor(output_data, output_dims, "output_tensor"), CreateFloatTensor(output_data, output_dims),
}; };
TF_LITE_MICRO_EXPECT_EQ( TF_LITE_MICRO_EXPECT_EQ(

View File

@ -116,18 +116,18 @@ void TestReshape(std::initializer_list<int> input_dims_data,
bool expect_failure = false) { bool expect_failure = false) {
TfLiteIntArray* input_dims = IntArrayFromInitializer(input_dims_data); TfLiteIntArray* input_dims = IntArrayFromInitializer(input_dims_data);
TfLiteIntArray* output_dims = IntArrayFromInts(output_dims_data); TfLiteIntArray* output_dims = IntArrayFromInts(output_dims_data);
TfLiteTensor input_tensor = CreateTensor<T, tensor_input_type>( TfLiteTensor input_tensor =
input_data, input_dims, "input_tensor"); CreateTensor<T, tensor_input_type>(input_data, input_dims);
T* output_data = reinterpret_cast<T*>(output_data_raw); T* output_data = reinterpret_cast<T*>(output_data_raw);
TfLiteTensor output_tensor = CreateTensor<T, tensor_input_type>( TfLiteTensor output_tensor =
output_data, output_dims, "input_tensor"); CreateTensor<T, tensor_input_type>(output_data, output_dims);
// Reshape param is passed as op's param. // Reshape param is passed as op's param.
TestReshapeImpl<T>(&input_tensor, nullptr, &output_tensor, expected_output, TestReshapeImpl<T>(&input_tensor, nullptr, &output_tensor, expected_output,
expected_dims, expect_failure); expected_dims, expect_failure);
// Reshape param is passed as a tensor. // Reshape param is passed as a tensor.
TfLiteIntArray* shape_dims = IntArrayFromInitializer(shape_dims_data); TfLiteIntArray* shape_dims = IntArrayFromInitializer(shape_dims_data);
auto shape_tensor = CreateTensor<int32_t, kTfLiteInt32>( auto shape_tensor =
shape_data, shape_dims, "shape_tensor"); CreateTensor<int32_t, kTfLiteInt32>(shape_data, shape_dims);
TestReshapeImpl<T>(&input_tensor, &shape_tensor, &output_tensor, TestReshapeImpl<T>(&input_tensor, &shape_tensor, &output_tensor,
expected_output, expected_dims, expect_failure); expected_output, expected_dims, expect_failure);
} }
@ -194,12 +194,11 @@ TF_LITE_MICRO_TEST(InvalidShape) {
using tflite::testing::IntArrayFromInts; using tflite::testing::IntArrayFromInts;
TfLiteIntArray* input_dims = IntArrayFromInitializer({3, 1, 2, 2}); TfLiteIntArray* input_dims = IntArrayFromInitializer({3, 1, 2, 2});
auto input_data = {3.0f}; auto input_data = {3.0f};
auto input_tensor = CreateFloatTensor(input_data, input_dims, "input_tensor"); auto input_tensor = CreateFloatTensor(input_data, input_dims);
float output_data[4]; float output_data[4];
int output_dims_data[6] = {2, 2, 1, 2, 2, 1}; int output_dims_data[6] = {2, 2, 1, 2, 2, 1};
TfLiteIntArray* output_dims = IntArrayFromInts(output_dims_data); TfLiteIntArray* output_dims = IntArrayFromInts(output_dims_data);
auto output_tensor = auto output_tensor = CreateFloatTensor(output_data, output_dims);
CreateFloatTensor(output_data, output_dims, "input_tensor");
tflite::testing::TestReshapeImpl<float>(&input_tensor, // input_tensor tflite::testing::TestReshapeImpl<float>(&input_tensor, // input_tensor
nullptr, // shape_tensor nullptr, // shape_tensor
&output_tensor, // output_tensor &output_tensor, // output_tensor
@ -258,15 +257,14 @@ TF_LITE_MICRO_TEST(LegacyScalarOutput) {
using tflite::testing::IntArrayFromInts; using tflite::testing::IntArrayFromInts;
TfLiteIntArray* input_dims = IntArrayFromInitializer({1, 1}); TfLiteIntArray* input_dims = IntArrayFromInitializer({1, 1});
auto input_data = {3.0f}; auto input_data = {3.0f};
auto input_tensor = CreateFloatTensor(input_data, input_dims, "input_tensor"); auto input_tensor = CreateFloatTensor(input_data, input_dims);
float output_data[1]; float output_data[1];
int output_dims_data[2] = {1, 0}; int output_dims_data[2] = {1, 0};
TfLiteIntArray* output_dims = IntArrayFromInts(output_dims_data); TfLiteIntArray* output_dims = IntArrayFromInts(output_dims_data);
auto output_tensor = auto output_tensor = CreateFloatTensor(output_data, output_dims);
CreateFloatTensor(output_data, output_dims, "input_tensor");
TfLiteIntArray* shape_dims = tflite::testing::IntArrayFromInitializer({1, 0}); TfLiteIntArray* shape_dims = tflite::testing::IntArrayFromInitializer({1, 0});
auto shape_tensor = tflite::testing::CreateTensor<int32_t, kTfLiteInt32>( auto shape_tensor =
{0}, shape_dims, "shape_tensor"); tflite::testing::CreateTensor<int32_t, kTfLiteInt32>({0}, shape_dims);
tflite::testing::TestReshapeImpl<float>(&input_tensor, // input_tensor tflite::testing::TestReshapeImpl<float>(&input_tensor, // input_tensor
&shape_tensor, // shape_tensor &shape_tensor, // shape_tensor
&output_tensor, // output_tensor &output_tensor, // output_tensor

View File

@ -26,22 +26,18 @@ namespace {
using uint8 = std::uint8_t; using uint8 = std::uint8_t;
using int32 = std::int32_t; using int32 = std::int32_t;
TfLiteTensor TestCreateTensor(const float* data, TfLiteIntArray* dims, TfLiteTensor TestCreateTensor(const float* data, TfLiteIntArray* dims) {
const char* name) { return CreateFloatTensor(data, dims);
return CreateFloatTensor(data, dims, name);
} }
TfLiteTensor TestCreateTensor(const uint8* data, TfLiteIntArray* dims, TfLiteTensor TestCreateTensor(const uint8* data, TfLiteIntArray* dims) {
const char* name) { return CreateQuantizedTensor(data, dims, 0, 255);
return CreateQuantizedTensor(data, dims, name, 0, 255);
} }
TfLiteTensor TestCreateTensor(const int8* data, TfLiteIntArray* dims, TfLiteTensor TestCreateTensor(const int8* data, TfLiteIntArray* dims) {
const char* name) { return CreateQuantizedTensor(data, dims, -128, 127);
return CreateQuantizedTensor(data, dims, name, -128, 127);
} }
// Input data expects a 4-D tensor of [batch, height, width, channels] // Input data expects a 4-D tensor of [batch, height, width, channels]
// Output data should match input datas batch and channels // Output data should match input datas batch and channels
// Expected sizes should be a 1-D tensor with 2 elements: new_height & new_width // Expected sizes should be a 1-D tensor with 2 elements: new_height & new_width
@ -62,9 +58,9 @@ void TestResizeNearestNeighbor(const int* input_dims_data, const T* input_data,
constexpr int tensors_size = 3; constexpr int tensors_size = 3;
TfLiteTensor tensors[tensors_size] = { TfLiteTensor tensors[tensors_size] = {
TestCreateTensor(input_data, input_dims, "input_tensor"), TestCreateTensor(input_data, input_dims),
CreateInt32Tensor(expected_size_data, expected_size_dims, "size_tensor"), CreateInt32Tensor(expected_size_data, expected_size_dims),
TestCreateTensor(output_data, output_dims, "output_tensor"), TestCreateTensor(output_data, output_dims),
}; };
TfLiteContext context; TfLiteContext context;

View File

@ -32,8 +32,8 @@ void TestRound(const int* input_dims_data, const float* input_data,
constexpr int outputs_size = 1; constexpr int outputs_size = 1;
constexpr int tensors_size = inputs_size + outputs_size; constexpr int tensors_size = inputs_size + outputs_size;
TfLiteTensor tensors[tensors_size] = { TfLiteTensor tensors[tensors_size] = {
CreateFloatTensor(input_data, input_dims, "input_tensor"), CreateFloatTensor(input_data, input_dims),
CreateFloatTensor(output_data, output_dims, "output_tensor"), CreateFloatTensor(output_data, output_dims),
}; };
TfLiteContext context; TfLiteContext context;
PopulateContext(tensors, tensors_size, micro_test::reporter, &context); PopulateContext(tensors, tensors_size, micro_test::reporter, &context);

View File

@ -36,8 +36,8 @@ void TestSoftmaxFloat(std::initializer_list<int> input_dims_data,
constexpr int outputs_size = 1; constexpr int outputs_size = 1;
constexpr int tensors_size = inputs_size + outputs_size; constexpr int tensors_size = inputs_size + outputs_size;
TfLiteTensor tensors[tensors_size] = { TfLiteTensor tensors[tensors_size] = {
CreateFloatTensor(input_data, input_dims, "input_tensor"), CreateFloatTensor(input_data, input_dims),
CreateFloatTensor(output_data, output_dims, "output_tensor"), CreateFloatTensor(output_data, output_dims),
}; };
TfLiteContext context; TfLiteContext context;
@ -100,10 +100,8 @@ void TestSoftmaxQuantized(std::initializer_list<int> input_dims_data,
constexpr int outputs_size = 1; constexpr int outputs_size = 1;
constexpr int tensors_size = inputs_size + outputs_size; constexpr int tensors_size = inputs_size + outputs_size;
TfLiteTensor tensors[tensors_size] = { TfLiteTensor tensors[tensors_size] = {
CreateQuantizedTensor(input_data, input_dims, "input_tensor", input_min, CreateQuantizedTensor(input_data, input_dims, input_min, input_max),
input_max), CreateQuantizedTensor(output_data, output_dims, output_min, output_max),
CreateQuantizedTensor(output_data, output_dims, "output_tensor",
output_min, output_max),
}; };
TfLiteContext context; TfLiteContext context;
@ -166,10 +164,8 @@ void TestSoftmaxQuantizedSigned(
constexpr int outputs_size = 1; constexpr int outputs_size = 1;
constexpr int tensors_size = inputs_size + outputs_size; constexpr int tensors_size = inputs_size + outputs_size;
TfLiteTensor tensors[tensors_size] = { TfLiteTensor tensors[tensors_size] = {
CreateQuantizedTensor(input_data, input_dims, "input_tensor", input_min, CreateQuantizedTensor(input_data, input_dims, input_min, input_max),
input_max), CreateQuantizedTensor(output_data, output_dims, output_min, output_max),
CreateQuantizedTensor(output_data, output_dims, "output_tensor",
output_min, output_max),
}; };
TfLiteContext context; TfLiteContext context;

View File

@ -45,10 +45,10 @@ void TestSplitTwoOutputsFloat(
constexpr int axis_size = 1; constexpr int axis_size = 1;
constexpr int tensors_size = input_size + output_size + axis_size; constexpr int tensors_size = input_size + output_size + axis_size;
TfLiteTensor tensors[tensors_size] = { TfLiteTensor tensors[tensors_size] = {
CreateQuantized32Tensor(axis_data, axis_dims, "axis_tensor", 1.0), CreateQuantized32Tensor(axis_data, axis_dims, 1.0),
CreateFloatTensor(input_data, input_dims, "input_tensor"), CreateFloatTensor(input_data, input_dims),
CreateFloatTensor(output1_data, output1_dims, "output1_tensor"), CreateFloatTensor(output1_data, output1_dims),
CreateFloatTensor(output2_data, output2_dims, "output2_tensor")}; CreateFloatTensor(output2_data, output2_dims)};
// Currently only support constant axis tensor. // Currently only support constant axis tensor.
tensors[0].allocation_type = kTfLiteMmapRo; tensors[0].allocation_type = kTfLiteMmapRo;
@ -141,12 +141,12 @@ void TestSplitFourOutputsFloat(
constexpr int axis_size = 1; constexpr int axis_size = 1;
constexpr int tensors_size = input_size + output_size + axis_size; constexpr int tensors_size = input_size + output_size + axis_size;
TfLiteTensor tensors[tensors_size] = { TfLiteTensor tensors[tensors_size] = {
CreateQuantized32Tensor(axis_data, axis_dims, "axis_tensor", 1.0), CreateQuantized32Tensor(axis_data, axis_dims, 1.0),
CreateFloatTensor(input_data, input_dims, "input_tensor"), CreateFloatTensor(input_data, input_dims),
CreateFloatTensor(output1_data, output1_dims, "output1_tensor"), CreateFloatTensor(output1_data, output1_dims),
CreateFloatTensor(output2_data, output2_dims, "output2_tensor"), CreateFloatTensor(output2_data, output2_dims),
CreateFloatTensor(output3_data, output1_dims, "output3_tensor"), CreateFloatTensor(output3_data, output1_dims),
CreateFloatTensor(output4_data, output1_dims, "output4_tensor")}; CreateFloatTensor(output4_data, output1_dims)};
// Currently only support constant axis tensor. // Currently only support constant axis tensor.
tensors[0].allocation_type = kTfLiteMmapRo; tensors[0].allocation_type = kTfLiteMmapRo;
@ -243,12 +243,10 @@ void TestSplitTwoOutputsQuantized(
constexpr int axis_size = 1; constexpr int axis_size = 1;
constexpr int tensors_size = input_size + output_size + axis_size; constexpr int tensors_size = input_size + output_size + axis_size;
TfLiteTensor tensors[tensors_size] = { TfLiteTensor tensors[tensors_size] = {
CreateQuantized32Tensor(axis_data, axis_dims, "axis_tensor", 1.0), CreateQuantized32Tensor(axis_data, axis_dims, 1.0),
CreateQuantizedTensor(input_data, input_dims, "input_tensor", 0, 10), CreateQuantizedTensor(input_data, input_dims, 0, 10),
CreateQuantizedTensor(output1_data, output1_dims, "output1_tensor", 0, CreateQuantizedTensor(output1_data, output1_dims, 0, 10),
10), CreateQuantizedTensor(output2_data, output2_dims, 0, 10)};
CreateQuantizedTensor(output2_data, output2_dims, "output2_tensor", 0,
10)};
// Currently only support constant axis tensor. // Currently only support constant axis tensor.
tensors[0].allocation_type = kTfLiteMmapRo; tensors[0].allocation_type = kTfLiteMmapRo;
@ -332,12 +330,10 @@ void TestSplitTwoOutputsQuantized32(
constexpr int axis_size = 1; constexpr int axis_size = 1;
constexpr int tensors_size = input_size + output_size + axis_size; constexpr int tensors_size = input_size + output_size + axis_size;
TfLiteTensor tensors[tensors_size] = { TfLiteTensor tensors[tensors_size] = {
CreateQuantized32Tensor(axis_data, axis_dims, "axis_tensor", 1.0), CreateQuantized32Tensor(axis_data, axis_dims, 1.0),
CreateQuantized32Tensor(input_data, input_dims, "input_tensor", 1.0), CreateQuantized32Tensor(input_data, input_dims, 1.0),
CreateQuantized32Tensor(output1_data, output1_dims, "output1_tensor", CreateQuantized32Tensor(output1_data, output1_dims, 1.0),
1.0), CreateQuantized32Tensor(output2_data, output2_dims, 1.0)};
CreateQuantized32Tensor(output2_data, output2_dims, "output2_tensor",
1.0)};
// Currently only support constant axis tensor. // Currently only support constant axis tensor.
tensors[0].allocation_type = kTfLiteMmapRo; tensors[0].allocation_type = kTfLiteMmapRo;

View File

@ -25,15 +25,13 @@ namespace {
template <typename input_type = int32_t, template <typename input_type = int32_t,
TfLiteType tensor_input_type = kTfLiteInt32> TfLiteType tensor_input_type = kTfLiteInt32>
inline TfLiteTensor CreateTensor(const input_type* data, TfLiteIntArray* dims, inline TfLiteTensor CreateTensor(const input_type* data, TfLiteIntArray* dims,
const char* name, bool is_variable = false) { bool is_variable = false) {
TfLiteTensor result; TfLiteTensor result;
result.type = tensor_input_type; result.type = tensor_input_type;
result.data.raw = reinterpret_cast<char*>(const_cast<input_type*>(data)); result.data.raw = reinterpret_cast<char*>(const_cast<input_type*>(data));
result.dims = dims; result.dims = dims;
result.allocation_type = kTfLiteMemNone; result.allocation_type = kTfLiteMemNone;
result.bytes = ElementCount(*dims) * sizeof(input_type); result.bytes = ElementCount(*dims) * sizeof(input_type);
result.allocation = nullptr;
result.name = name;
result.is_variable = is_variable; result.is_variable = is_variable;
return result; return result;
} }
@ -41,9 +39,9 @@ inline TfLiteTensor CreateTensor(const input_type* data, TfLiteIntArray* dims,
template <typename input_type = int32_t, template <typename input_type = int32_t,
TfLiteType tensor_input_type = kTfLiteInt32> TfLiteType tensor_input_type = kTfLiteInt32>
inline TfLiteTensor CreateTensor(std::initializer_list<input_type> data, inline TfLiteTensor CreateTensor(std::initializer_list<input_type> data,
TfLiteIntArray* dims, const char* name, TfLiteIntArray* dims,
bool is_variable = false) { bool is_variable = false) {
return CreateTensor<input_type, tensor_input_type>(data.begin(), dims, name, return CreateTensor<input_type, tensor_input_type>(data.begin(), dims,
is_variable); is_variable);
} }
@ -73,15 +71,11 @@ void TestStrideSlide(std::initializer_list<int> input_shape,
constexpr int outputs_size = 1; constexpr int outputs_size = 1;
constexpr int tensors_size = inputs_size + outputs_size; constexpr int tensors_size = inputs_size + outputs_size;
TfLiteTensor tensors[tensors_size] = { TfLiteTensor tensors[tensors_size] = {
CreateTensor<input_type, tensor_input_type>(input_data, input_dims, CreateTensor<input_type, tensor_input_type>(input_data, input_dims),
"input_tensor"), CreateTensor<int32_t, kTfLiteInt32>(begin_data, begin_dims),
CreateTensor<int32_t, kTfLiteInt32>(begin_data, begin_dims, CreateTensor<int32_t, kTfLiteInt32>(end_data, end_dims),
"begin_tensor"), CreateTensor<int32_t, kTfLiteInt32>(strides_data, strides_dims),
CreateTensor<int32_t, kTfLiteInt32>(end_data, end_dims, "end_tensor"), CreateTensor<input_type, tensor_input_type>(output_data, output_dims),
CreateTensor<int32_t, kTfLiteInt32>(strides_data, strides_dims,
"stride_tensor"),
CreateTensor<input_type, tensor_input_type>(output_data, output_dims,
"output_tensor"),
}; };
TfLiteContext context; TfLiteContext context;
PopulateContext(tensors, tensors_size, micro_test::reporter, &context); PopulateContext(tensors, tensors_size, micro_test::reporter, &context);

View File

@ -129,9 +129,9 @@ void TestSubFloat(const int* input1_dims_data, const float* input1_data,
constexpr int outputs_size = 1; constexpr int outputs_size = 1;
constexpr int tensors_size = inputs_size + outputs_size; constexpr int tensors_size = inputs_size + outputs_size;
TfLiteTensor tensors[tensors_size] = { TfLiteTensor tensors[tensors_size] = {
CreateFloatTensor(input1_data, input1_dims, "input1_tensor"), CreateFloatTensor(input1_data, input1_dims),
CreateFloatTensor(input2_data, input2_dims, "input2_tensor"), CreateFloatTensor(input2_data, input2_dims),
CreateFloatTensor(output_data, output_dims, "output_tensor"), CreateFloatTensor(output_data, output_dims),
}; };
ValidateSubGoldens(tensors, tensors_size, expected_output, output_data, ValidateSubGoldens(tensors, tensors_size, expected_output, output_data,
@ -156,15 +156,14 @@ void TestSubQuantized(const int* input1_dims_data, const float* input1_data,
constexpr int outputs_size = 1; constexpr int outputs_size = 1;
constexpr int tensors_size = inputs_size + outputs_size; constexpr int tensors_size = inputs_size + outputs_size;
TfLiteTensor tensors[tensors_size] = { TfLiteTensor tensors[tensors_size] = {
tflite::testing::CreateQuantizedTensor( tflite::testing::CreateQuantizedTensor(input1_data, input1_quantized,
input1_data, input1_quantized, input1_dims, input1_scale, input1_dims, input1_scale,
input1_zero_point, "input1_tensor"), input1_zero_point),
tflite::testing::CreateQuantizedTensor( tflite::testing::CreateQuantizedTensor(input2_data, input2_quantized,
input2_data, input2_quantized, input2_dims, input2_scale, input2_dims, input2_scale,
input2_zero_point, "input2_tensor"), input2_zero_point),
tflite::testing::CreateQuantizedTensor(output_data, output_dims, tflite::testing::CreateQuantizedTensor(output_data, output_dims,
output_scale, output_zero_point, output_scale, output_zero_point),
"output_tensor"),
}; };
tflite::AsymmetricQuantize(golden, golden_quantized, tflite::AsymmetricQuantize(golden, golden_quantized,
ElementCount(*output_dims), output_scale, ElementCount(*output_dims), output_scale,

View File

@ -341,13 +341,12 @@ void TestSVDF(const int batch_size, const int num_units, const int input_size,
const int tensor_count = 5; // 4 inputs, 1 output const int tensor_count = 5; // 4 inputs, 1 output
TfLiteTensor tensors[] = { TfLiteTensor tensors[] = {
CreateFloatTensor(input_data, input_dims, "input"), CreateFloatTensor(input_data, input_dims),
CreateFloatTensor(weights_feature_data, weights_feature_dims, CreateFloatTensor(weights_feature_data, weights_feature_dims),
"weights_feature"), CreateFloatTensor(weights_time_data, weights_time_dims),
CreateFloatTensor(weights_time_data, weights_time_dims, "weights_time"),
CreateFloatTensor(activation_state_data, activation_state_dims, CreateFloatTensor(activation_state_data, activation_state_dims,
"activation_state", true /* is_variable */), /*is_variable=*/true),
CreateFloatTensor(output_data, output_dims, "output"), CreateFloatTensor(output_data, output_dims),
}; };
ValidateSVDFGoldens(batch_size, num_units, input_size, rank, tensors, ValidateSVDFGoldens(batch_size, num_units, input_size, rank, tensors,
@ -393,19 +392,17 @@ inline void TestIntegerSVDF(
TfLiteTensor tensors[] = { TfLiteTensor tensors[] = {
CreateQuantizedTensor(input_data, input_dims, input_scale, CreateQuantizedTensor(input_data, input_dims, input_scale,
0 /* zero-point */, "input"), /*zero_point=*/0),
CreateQuantizedTensor(weights_feature_data, weights_feature_dims, CreateQuantizedTensor(weights_feature_data, weights_feature_dims,
weights_feature_scale, 0 /* zero-point */, weights_feature_scale, /*zero_point=*/0),
"weights_feature"),
CreateQuantizedTensor(weights_time_data, weights_time_dims, CreateQuantizedTensor(weights_time_data, weights_time_dims,
weights_time_scale, 0 /* zero-point */, weights_time_scale, /*zero_point=*/0),
"weights_time"), CreateQuantized32Tensor(bias_data, bias_dims, bias_scale),
CreateQuantized32Tensor(bias_data, bias_dims, "bias", bias_scale),
CreateQuantizedTensor(activation_state_data, activation_state_dims, CreateQuantizedTensor(activation_state_data, activation_state_dims,
activation_scale, 0 /* zero-point */, activation_scale, /*zero_point=*/0,
"activation_state", true /* is_variable */), /*is_variable=*/true),
CreateQuantizedTensor(output_data, output_dims, output_scale, CreateQuantizedTensor(output_data, output_dims, output_scale,
0 /* zero-point */, "output")}; /*zero_point=*/0)};
// TODO(b/147839421): Affine Quantization Params should be set on tensor // TODO(b/147839421): Affine Quantization Params should be set on tensor
// creation. // creation.

View File

@ -36,8 +36,8 @@ void TestTanhFloat(std::initializer_list<int> input_dims_data,
constexpr int outputs_size = 1; constexpr int outputs_size = 1;
constexpr int tensors_size = inputs_size + outputs_size; constexpr int tensors_size = inputs_size + outputs_size;
TfLiteTensor tensors[tensors_size] = { TfLiteTensor tensors[tensors_size] = {
CreateFloatTensor(input_data, input_dims, "input_tensor"), CreateFloatTensor(input_data, input_dims),
CreateFloatTensor(output_data, output_dims, "output_tensor"), CreateFloatTensor(output_data, output_dims),
}; };
TfLiteContext context; TfLiteContext context;
@ -96,10 +96,8 @@ void TestTanhInt8(std::initializer_list<int> input_dims_data,
constexpr int outputs_size = 1; constexpr int outputs_size = 1;
constexpr int tensors_size = inputs_size + outputs_size; constexpr int tensors_size = inputs_size + outputs_size;
TfLiteTensor tensors[tensors_size] = { TfLiteTensor tensors[tensors_size] = {
CreateQuantizedTensor(input_data, input_dims, "input_tensor", input_min, CreateQuantizedTensor(input_data, input_dims, input_min, input_max),
input_max), CreateQuantizedTensor(output_data, output_dims, output_min, output_max),
CreateQuantizedTensor(output_data, output_dims, "output_tensor",
output_min, output_max),
}; };
TfLiteContext context; TfLiteContext context;

View File

@ -45,10 +45,10 @@ void TestUnpackThreeOutputsFloat(
constexpr int output_size = 3; constexpr int output_size = 3;
constexpr int tensors_size = input_size + output_size; constexpr int tensors_size = input_size + output_size;
TfLiteTensor tensors[tensors_size] = { TfLiteTensor tensors[tensors_size] = {
CreateFloatTensor(input_data, input_dims, "input_tensor"), CreateFloatTensor(input_data, input_dims),
CreateFloatTensor(output1_data, output1_dims, "output1_tensor"), CreateFloatTensor(output1_data, output1_dims),
CreateFloatTensor(output2_data, output2_dims, "output2_tensor"), CreateFloatTensor(output2_data, output2_dims),
CreateFloatTensor(output3_data, output3_dims, "output3_tensor")}; CreateFloatTensor(output3_data, output3_dims)};
// Place a unique value in the uninitialized output buffer. // Place a unique value in the uninitialized output buffer.
for (int i = 0; i < output1_dims_count; ++i) { for (int i = 0; i < output1_dims_count; ++i) {
@ -132,8 +132,8 @@ void TestUnpackOneOutputFloat(std::initializer_list<int> input_dims_data,
constexpr int output_size = 1; constexpr int output_size = 1;
constexpr int tensors_size = input_size + output_size; constexpr int tensors_size = input_size + output_size;
TfLiteTensor tensors[tensors_size] = { TfLiteTensor tensors[tensors_size] = {
CreateFloatTensor(input_data, input_dims, "input_tensor"), CreateFloatTensor(input_data, input_dims),
CreateFloatTensor(output_data, output_dims, "output_tensor")}; CreateFloatTensor(output_data, output_dims)};
// Place a unique value in the uninitialized output buffer. // Place a unique value in the uninitialized output buffer.
for (int i = 0; i < output_dims_count; ++i) { for (int i = 0; i < output_dims_count; ++i) {
@ -211,13 +211,10 @@ void TestUnpackThreeOutputsQuantized(
// CreateQuantizedTensor needs min/max values as input, but these values // CreateQuantizedTensor needs min/max values as input, but these values
// don't matter as to the functionality of UNPACK, so just set as 0 // don't matter as to the functionality of UNPACK, so just set as 0
// and 10. // and 10.
CreateQuantizedTensor(input_data, input_dims, "input_tensor", 0, 10), CreateQuantizedTensor(input_data, input_dims, 0, 10),
CreateQuantizedTensor(output1_data, output1_dims, "output1_tensor", 0, CreateQuantizedTensor(output1_data, output1_dims, 0, 10),
10), CreateQuantizedTensor(output2_data, output2_dims, 0, 10),
CreateQuantizedTensor(output2_data, output2_dims, "output2_tensor", 0, CreateQuantizedTensor(output3_data, output3_dims, 0, 10)};
10),
CreateQuantizedTensor(output3_data, output3_dims, "output3_tensor", 0,
10)};
// Place a unique value in the uninitialized output buffer. // Place a unique value in the uninitialized output buffer.
for (int i = 0; i < output1_dims_count; ++i) { for (int i = 0; i < output1_dims_count; ++i) {
@ -307,13 +304,10 @@ void TestUnpackThreeOutputsQuantized32(
constexpr int output_size = 3; constexpr int output_size = 3;
constexpr int tensors_size = input_size + output_size; constexpr int tensors_size = input_size + output_size;
TfLiteTensor tensors[tensors_size] = { TfLiteTensor tensors[tensors_size] = {
CreateQuantized32Tensor(input_data, input_dims, "input_tensor", 1.0), CreateQuantized32Tensor(input_data, input_dims, 1.0),
CreateQuantized32Tensor(output1_data, output1_dims, "output1_tensor", CreateQuantized32Tensor(output1_data, output1_dims, 1.0),
1.0), CreateQuantized32Tensor(output2_data, output2_dims, 1.0),
CreateQuantized32Tensor(output2_data, output2_dims, "output2_tensor", CreateQuantized32Tensor(output3_data, output3_dims, 1.0)};
1.0),
CreateQuantized32Tensor(output3_data, output3_dims, "output3_tensor",
1.0)};
// Place a unique value in the uninitialized output buffer. // Place a unique value in the uninitialized output buffer.
for (int i = 0; i < output1_dims_count; ++i) { for (int i = 0; i < output1_dims_count; ++i) {

View File

@ -426,9 +426,6 @@ TfLiteStatus InitializeTfLiteTensorFromFlatbuffer(
result->quantization = {kTfLiteAffineQuantization, quantization}; result->quantization = {kTfLiteAffineQuantization, quantization};
} }
if (flatbuffer_tensor.name() != nullptr) {
result->name = flatbuffer_tensor.name()->c_str();
}
return kTfLiteOk; return kTfLiteOk;
} }

View File

@ -124,10 +124,9 @@ void PrintInterpreterState(MicroInterpreter* interpreter) {
for (size_t tensor_index = 0; tensor_index < interpreter->tensors_size(); for (size_t tensor_index = 0; tensor_index < interpreter->tensors_size();
tensor_index++) { tensor_index++) {
TfLiteTensor* tensor = interpreter->tensor(static_cast<int>(tensor_index)); TfLiteTensor* tensor = interpreter->tensor(static_cast<int>(tensor_index));
printf("Tensor %3zu %-20s %10s %15s %10zu bytes (%4.1f MB) ", tensor_index, printf("Tensor %3zu %10s %15s %10zu bytes (%4.1f MB) ", tensor_index,
tensor->name, TensorTypeName(tensor->type), TensorTypeName(tensor->type), AllocTypeName(tensor->allocation_type),
AllocTypeName(tensor->allocation_type), tensor->bytes, tensor->bytes, static_cast<double>(tensor->bytes / (1 << 20)));
static_cast<double>(tensor->bytes / (1 << 20)));
PrintTfLiteIntVector(tensor->dims); PrintTfLiteIntVector(tensor->dims);
} }
printf("\n"); printf("\n");

View File

@ -758,22 +758,19 @@ TfLiteFloatArray* FloatArrayFromFloats(const float* floats) {
return reinterpret_cast<TfLiteFloatArray*>(const_cast<float*>(floats)); return reinterpret_cast<TfLiteFloatArray*>(const_cast<float*>(floats));
} }
TfLiteTensor CreateTensor(TfLiteIntArray* dims, const char* name, TfLiteTensor CreateTensor(TfLiteIntArray* dims, bool is_variable) {
bool is_variable) {
TfLiteTensor result; TfLiteTensor result;
result.dims = dims; result.dims = dims;
result.name = name;
result.params = {}; result.params = {};
result.quantization = {kTfLiteNoQuantization, nullptr}; result.quantization = {kTfLiteNoQuantization, nullptr};
result.is_variable = is_variable; result.is_variable = is_variable;
result.allocation_type = kTfLiteMemNone; result.allocation_type = kTfLiteMemNone;
result.allocation = nullptr;
return result; return result;
} }
TfLiteTensor CreateFloatTensor(const float* data, TfLiteIntArray* dims, TfLiteTensor CreateFloatTensor(const float* data, TfLiteIntArray* dims,
const char* name, bool is_variable) { bool is_variable) {
TfLiteTensor result = CreateTensor(dims, name, is_variable); TfLiteTensor result = CreateTensor(dims, is_variable);
result.type = kTfLiteFloat32; result.type = kTfLiteFloat32;
result.data.f = const_cast<float*>(data); result.data.f = const_cast<float*>(data);
result.bytes = ElementCount(*dims) * sizeof(float); result.bytes = ElementCount(*dims) * sizeof(float);
@ -789,8 +786,8 @@ void PopulateFloatTensor(TfLiteTensor* tensor, float* begin, float* end) {
} }
TfLiteTensor CreateBoolTensor(const bool* data, TfLiteIntArray* dims, TfLiteTensor CreateBoolTensor(const bool* data, TfLiteIntArray* dims,
const char* name, bool is_variable) { bool is_variable) {
TfLiteTensor result = CreateTensor(dims, name, is_variable); TfLiteTensor result = CreateTensor(dims, is_variable);
result.type = kTfLiteBool; result.type = kTfLiteBool;
result.data.b = const_cast<bool*>(data); result.data.b = const_cast<bool*>(data);
result.bytes = ElementCount(*dims) * sizeof(bool); result.bytes = ElementCount(*dims) * sizeof(bool);
@ -798,8 +795,8 @@ TfLiteTensor CreateBoolTensor(const bool* data, TfLiteIntArray* dims,
} }
TfLiteTensor CreateInt32Tensor(const int32_t* data, TfLiteIntArray* dims, TfLiteTensor CreateInt32Tensor(const int32_t* data, TfLiteIntArray* dims,
const char* name, bool is_variable) { bool is_variable) {
TfLiteTensor result = CreateTensor(dims, name, is_variable); TfLiteTensor result = CreateTensor(dims, is_variable);
result.type = kTfLiteInt32; result.type = kTfLiteInt32;
result.data.i32 = const_cast<int32_t*>(data); result.data.i32 = const_cast<int32_t*>(data);
result.bytes = ElementCount(*dims) * sizeof(int32_t); result.bytes = ElementCount(*dims) * sizeof(int32_t);
@ -808,8 +805,8 @@ TfLiteTensor CreateInt32Tensor(const int32_t* data, TfLiteIntArray* dims,
TfLiteTensor CreateQuantizedTensor(const uint8_t* data, TfLiteIntArray* dims, TfLiteTensor CreateQuantizedTensor(const uint8_t* data, TfLiteIntArray* dims,
float scale, int zero_point, float scale, int zero_point,
const char* name, bool is_variable) { bool is_variable) {
TfLiteTensor result = CreateTensor(dims, name, is_variable); TfLiteTensor result = CreateTensor(dims, is_variable);
result.type = kTfLiteUInt8; result.type = kTfLiteUInt8;
result.data.uint8 = const_cast<uint8_t*>(data); result.data.uint8 = const_cast<uint8_t*>(data);
result.params = {scale, zero_point}; result.params = {scale, zero_point};
@ -820,8 +817,8 @@ TfLiteTensor CreateQuantizedTensor(const uint8_t* data, TfLiteIntArray* dims,
TfLiteTensor CreateQuantizedTensor(const int8_t* data, TfLiteIntArray* dims, TfLiteTensor CreateQuantizedTensor(const int8_t* data, TfLiteIntArray* dims,
float scale, int zero_point, float scale, int zero_point,
const char* name, bool is_variable) { bool is_variable) {
TfLiteTensor result = CreateTensor(dims, name, is_variable); TfLiteTensor result = CreateTensor(dims, is_variable);
result.type = kTfLiteInt8; result.type = kTfLiteInt8;
result.data.int8 = const_cast<int8_t*>(data); result.data.int8 = const_cast<int8_t*>(data);
result.params = {scale, zero_point}; result.params = {scale, zero_point};
@ -832,8 +829,8 @@ TfLiteTensor CreateQuantizedTensor(const int8_t* data, TfLiteIntArray* dims,
TfLiteTensor CreateQuantizedTensor(const int16_t* data, TfLiteIntArray* dims, TfLiteTensor CreateQuantizedTensor(const int16_t* data, TfLiteIntArray* dims,
float scale, int zero_point, float scale, int zero_point,
const char* name, bool is_variable) { bool is_variable) {
TfLiteTensor result = CreateTensor(dims, name, is_variable); TfLiteTensor result = CreateTensor(dims, is_variable);
result.type = kTfLiteInt16; result.type = kTfLiteInt16;
result.data.i16 = const_cast<int16_t*>(data); result.data.i16 = const_cast<int16_t*>(data);
result.params = {scale, zero_point}; result.params = {scale, zero_point};
@ -842,38 +839,30 @@ TfLiteTensor CreateQuantizedTensor(const int16_t* data, TfLiteIntArray* dims,
return result; return result;
} }
TfLiteTensor CreateQuantized32Tensor(const int32_t* data, TfLiteIntArray* dims, TfLiteTensor CreateQuantizedBiasTensor(const float* data, int32_t* quantized,
float scale, const char* name, TfLiteIntArray* dims, float input_scale,
bool is_variable) { float weights_scale, bool is_variable) {
TfLiteTensor result = CreateTensor(dims, name, is_variable); float bias_scale = input_scale * weights_scale;
tflite::SymmetricQuantize(data, quantized, ElementCount(*dims), bias_scale);
TfLiteTensor result = CreateTensor(dims, is_variable);
result.type = kTfLiteInt32; result.type = kTfLiteInt32;
result.data.i32 = const_cast<int32_t*>(data); result.data.i32 = const_cast<int32_t*>(quantized);
// Quantized int32 tensors always have a zero point of 0, since the range of // Quantized int32 tensors always have a zero point of 0, since the range of
// int32 values is large, and because zero point costs extra cycles during // int32 values is large, and because zero point costs extra cycles during
// processing. // processing.
result.params = {scale, 0}; result.params = {bias_scale, 0};
result.quantization = {kTfLiteAffineQuantization, nullptr}; result.quantization = {kTfLiteAffineQuantization, nullptr};
result.bytes = ElementCount(*dims) * sizeof(int32_t); result.bytes = ElementCount(*dims) * sizeof(int32_t);
return result; return result;
} }
TfLiteTensor CreateQuantizedBiasTensor(const float* data, int32_t* quantized,
TfLiteIntArray* dims, float input_scale,
float weights_scale, const char* name,
bool is_variable) {
float bias_scale = input_scale * weights_scale;
tflite::SymmetricQuantize(data, quantized, ElementCount(*dims), bias_scale);
return CreateQuantized32Tensor(quantized, dims, bias_scale, name,
is_variable);
}
// Quantizes int32 bias tensor with per-channel weights determined by input // Quantizes int32 bias tensor with per-channel weights determined by input
// scale multiplied by weight scale for each channel. // scale multiplied by weight scale for each channel.
TfLiteTensor CreatePerChannelQuantizedBiasTensor( TfLiteTensor CreatePerChannelQuantizedBiasTensor(
const float* input, int32_t* quantized, TfLiteIntArray* dims, const float* input, int32_t* quantized, TfLiteIntArray* dims,
float input_scale, float* weight_scales, float* scales, int* zero_points, float input_scale, float* weight_scales, float* scales, int* zero_points,
TfLiteAffineQuantization* affine_quant, int quantized_dimension, TfLiteAffineQuantization* affine_quant, int quantized_dimension,
const char* name, bool is_variable) { bool is_variable) {
int input_size = ElementCount(*dims); int input_size = ElementCount(*dims);
int num_channels = dims->data[quantized_dimension]; int num_channels = dims->data[quantized_dimension];
// First element is reserved for array length // First element is reserved for array length
@ -892,7 +881,7 @@ TfLiteTensor CreatePerChannelQuantizedBiasTensor(
affine_quant->zero_point = IntArrayFromInts(zero_points); affine_quant->zero_point = IntArrayFromInts(zero_points);
affine_quant->quantized_dimension = quantized_dimension; affine_quant->quantized_dimension = quantized_dimension;
TfLiteTensor result = CreateTensor(dims, name, is_variable); TfLiteTensor result = CreateTensor(dims, is_variable);
result.type = kTfLiteInt32; result.type = kTfLiteInt32;
result.data.i32 = const_cast<int32_t*>(quantized); result.data.i32 = const_cast<int32_t*>(quantized);
result.quantization = {kTfLiteAffineQuantization, affine_quant}; result.quantization = {kTfLiteAffineQuantization, affine_quant};
@ -903,7 +892,7 @@ TfLiteTensor CreatePerChannelQuantizedBiasTensor(
TfLiteTensor CreateSymmetricPerChannelQuantizedTensor( TfLiteTensor CreateSymmetricPerChannelQuantizedTensor(
const float* input, int8_t* quantized, TfLiteIntArray* dims, float* scales, const float* input, int8_t* quantized, TfLiteIntArray* dims, float* scales,
int* zero_points, TfLiteAffineQuantization* affine_quant, int* zero_points, TfLiteAffineQuantization* affine_quant,
int quantized_dimension, const char* name, bool is_variable) { int quantized_dimension, bool is_variable) {
int channel_count = dims->data[quantized_dimension]; int channel_count = dims->data[quantized_dimension];
scales[0] = static_cast<float>(channel_count); scales[0] = static_cast<float>(channel_count);
zero_points[0] = channel_count; zero_points[0] = channel_count;
@ -919,7 +908,7 @@ TfLiteTensor CreateSymmetricPerChannelQuantizedTensor(
affine_quant->zero_point = IntArrayFromInts(zero_points); affine_quant->zero_point = IntArrayFromInts(zero_points);
affine_quant->quantized_dimension = quantized_dimension; affine_quant->quantized_dimension = quantized_dimension;
TfLiteTensor result = CreateTensor(dims, name, is_variable); TfLiteTensor result = CreateTensor(dims, is_variable);
result.type = kTfLiteInt8; result.type = kTfLiteInt8;
result.data.int8 = const_cast<int8_t*>(quantized); result.data.int8 = const_cast<int8_t*>(quantized);
result.quantization = {kTfLiteAffineQuantization, affine_quant}; result.quantization = {kTfLiteAffineQuantization, affine_quant};

View File

@ -117,42 +117,40 @@ TfLiteIntArray* IntArrayFromInts(const int* int_array);
TfLiteFloatArray* FloatArrayFromFloats(const float* floats); TfLiteFloatArray* FloatArrayFromFloats(const float* floats);
TfLiteTensor CreateFloatTensor(const float* data, TfLiteIntArray* dims, TfLiteTensor CreateFloatTensor(const float* data, TfLiteIntArray* dims,
const char* name, bool is_variable = false); bool is_variable = false);
void PopulateFloatTensor(TfLiteTensor* tensor, float* begin, float* end); void PopulateFloatTensor(TfLiteTensor* tensor, float* begin, float* end);
TfLiteTensor CreateBoolTensor(const bool* data, TfLiteIntArray* dims, TfLiteTensor CreateBoolTensor(const bool* data, TfLiteIntArray* dims,
const char* name, bool is_variable = false); bool is_variable = false);
TfLiteTensor CreateInt32Tensor(const int32_t*, TfLiteIntArray* dims, TfLiteTensor CreateInt32Tensor(const int32_t*, TfLiteIntArray* dims,
const char* name, bool is_variable = false); bool is_variable = false);
TfLiteTensor CreateQuantizedTensor(const uint8_t* data, TfLiteIntArray* dims, TfLiteTensor CreateQuantizedTensor(const uint8_t* data, TfLiteIntArray* dims,
float scale, int zero_point, float scale, int zero_point,
const char* name, bool is_variable = false); bool is_variable = false);
TfLiteTensor CreateQuantizedTensor(const int8_t* data, TfLiteIntArray* dims, TfLiteTensor CreateQuantizedTensor(const int8_t* data, TfLiteIntArray* dims,
float scale, int zero_point, float scale, int zero_point,
const char* name, bool is_variable = false); bool is_variable = false);
TfLiteTensor CreateQuantizedTensor(const int16_t* data, TfLiteIntArray* dims, TfLiteTensor CreateQuantizedTensor(const int16_t* data, TfLiteIntArray* dims,
float scale, int zero_point, float scale, int zero_point,
const char* name, bool is_variable = false); bool is_variable = false);
template <typename T> template <typename T>
TfLiteTensor CreateQuantizedTensor(const float* input, T* quantized, TfLiteTensor CreateQuantizedTensor(const float* input, T* quantized,
TfLiteIntArray* dims, float scale, TfLiteIntArray* dims, float scale,
int zero_point, const char* name, int zero_point, bool is_variable = false) {
bool is_variable = false) {
int input_size = ElementCount(*dims); int input_size = ElementCount(*dims);
tflite::AsymmetricQuantize(input, quantized, input_size, scale, zero_point); tflite::AsymmetricQuantize(input, quantized, input_size, scale, zero_point);
return CreateQuantizedTensor(quantized, dims, scale, zero_point, name, return CreateQuantizedTensor(quantized, dims, scale, zero_point, is_variable);
is_variable);
} }
TfLiteTensor CreateQuantizedBiasTensor(const float* data, int32_t* quantized, TfLiteTensor CreateQuantizedBiasTensor(const float* data, int32_t* quantized,
TfLiteIntArray* dims, float input_scale, TfLiteIntArray* dims, float input_scale,
float weights_scale, const char* name, float weights_scale,
bool is_variable = false); bool is_variable = false);
// Quantizes int32 bias tensor with per-channel weights determined by input // Quantizes int32 bias tensor with per-channel weights determined by input
@ -161,12 +159,12 @@ TfLiteTensor CreatePerChannelQuantizedBiasTensor(
const float* input, int32_t* quantized, TfLiteIntArray* dims, const float* input, int32_t* quantized, TfLiteIntArray* dims,
float input_scale, float* weight_scales, float* scales, int* zero_points, float input_scale, float* weight_scales, float* scales, int* zero_points,
TfLiteAffineQuantization* affine_quant, int quantized_dimension, TfLiteAffineQuantization* affine_quant, int quantized_dimension,
const char* name, bool is_variable = false); bool is_variable = false);
TfLiteTensor CreateSymmetricPerChannelQuantizedTensor( TfLiteTensor CreateSymmetricPerChannelQuantizedTensor(
const float* input, int8_t* quantized, TfLiteIntArray* dims, float* scales, const float* input, int8_t* quantized, TfLiteIntArray* dims, float* scales,
int* zero_points, TfLiteAffineQuantization* affine_quant, int* zero_points, TfLiteAffineQuantization* affine_quant,
int quantized_dimension, const char* name, bool is_variable = false); int quantized_dimension, bool is_variable = false);
} // namespace testing } // namespace testing
} // namespace tflite } // namespace tflite

View File

@ -149,20 +149,17 @@ void PopulateContext(TfLiteTensor* tensors, int tensors_size,
} }
TfLiteTensor CreateFloatTensor(std::initializer_list<float> data, TfLiteTensor CreateFloatTensor(std::initializer_list<float> data,
TfLiteIntArray* dims, const char* name, TfLiteIntArray* dims, bool is_variable) {
bool is_variable) { return CreateFloatTensor(data.begin(), dims, is_variable);
return CreateFloatTensor(data.begin(), dims, name, is_variable);
} }
TfLiteTensor CreateBoolTensor(std::initializer_list<bool> data, TfLiteTensor CreateBoolTensor(std::initializer_list<bool> data,
TfLiteIntArray* dims, const char* name, TfLiteIntArray* dims, bool is_variable) {
bool is_variable) { return CreateBoolTensor(data.begin(), dims, is_variable);
return CreateBoolTensor(data.begin(), dims, name, is_variable);
} }
TfLiteTensor CreateQuantizedTensor(const uint8_t* data, TfLiteIntArray* dims, TfLiteTensor CreateQuantizedTensor(const uint8_t* data, TfLiteIntArray* dims,
const char* name, float min, float max, float min, float max, bool is_variable) {
bool is_variable) {
TfLiteTensor result; TfLiteTensor result;
result.type = kTfLiteUInt8; result.type = kTfLiteUInt8;
result.data.uint8 = const_cast<uint8_t*>(data); result.data.uint8 = const_cast<uint8_t*>(data);
@ -171,21 +168,18 @@ TfLiteTensor CreateQuantizedTensor(const uint8_t* data, TfLiteIntArray* dims,
ZeroPointFromMinMax<uint8_t>(min, max)}; ZeroPointFromMinMax<uint8_t>(min, max)};
result.allocation_type = kTfLiteMemNone; result.allocation_type = kTfLiteMemNone;
result.bytes = ElementCount(*dims) * sizeof(uint8_t); result.bytes = ElementCount(*dims) * sizeof(uint8_t);
result.allocation = nullptr;
result.name = name;
result.is_variable = false; result.is_variable = false;
return result; return result;
} }
TfLiteTensor CreateQuantizedTensor(std::initializer_list<uint8_t> data, TfLiteTensor CreateQuantizedTensor(std::initializer_list<uint8_t> data,
TfLiteIntArray* dims, const char* name, TfLiteIntArray* dims, float min, float max,
float min, float max, bool is_variable) { bool is_variable) {
return CreateQuantizedTensor(data.begin(), dims, name, min, max, is_variable); return CreateQuantizedTensor(data.begin(), dims, min, max, is_variable);
} }
TfLiteTensor CreateQuantizedTensor(const int8_t* data, TfLiteIntArray* dims, TfLiteTensor CreateQuantizedTensor(const int8_t* data, TfLiteIntArray* dims,
const char* name, float min, float max, float min, float max, bool is_variable) {
bool is_variable) {
TfLiteTensor result; TfLiteTensor result;
result.type = kTfLiteInt8; result.type = kTfLiteInt8;
result.data.int8 = const_cast<int8_t*>(data); result.data.int8 = const_cast<int8_t*>(data);
@ -194,21 +188,18 @@ TfLiteTensor CreateQuantizedTensor(const int8_t* data, TfLiteIntArray* dims,
ZeroPointFromMinMax<int8_t>(min, max)}; ZeroPointFromMinMax<int8_t>(min, max)};
result.allocation_type = kTfLiteMemNone; result.allocation_type = kTfLiteMemNone;
result.bytes = ElementCount(*dims) * sizeof(int8_t); result.bytes = ElementCount(*dims) * sizeof(int8_t);
result.allocation = nullptr;
result.name = name;
result.is_variable = is_variable; result.is_variable = is_variable;
return result; return result;
} }
TfLiteTensor CreateQuantizedTensor(std::initializer_list<int8_t> data, TfLiteTensor CreateQuantizedTensor(std::initializer_list<int8_t> data,
TfLiteIntArray* dims, const char* name, TfLiteIntArray* dims, float min, float max,
float min, float max, bool is_variable) { bool is_variable) {
return CreateQuantizedTensor(data.begin(), dims, name, min, max, is_variable); return CreateQuantizedTensor(data.begin(), dims, min, max, is_variable);
} }
TfLiteTensor CreateQuantizedTensor(float* data, uint8_t* quantized_data, TfLiteTensor CreateQuantizedTensor(float* data, uint8_t* quantized_data,
TfLiteIntArray* dims, const char* name, TfLiteIntArray* dims, bool is_variable) {
bool is_variable) {
TfLiteTensor result; TfLiteTensor result;
SymmetricQuantize(data, dims, quantized_data, &result.params.scale); SymmetricQuantize(data, dims, quantized_data, &result.params.scale);
result.data.uint8 = quantized_data; result.data.uint8 = quantized_data;
@ -217,15 +208,12 @@ TfLiteTensor CreateQuantizedTensor(float* data, uint8_t* quantized_data,
result.params.zero_point = 128; result.params.zero_point = 128;
result.allocation_type = kTfLiteMemNone; result.allocation_type = kTfLiteMemNone;
result.bytes = ElementCount(*dims) * sizeof(uint8_t); result.bytes = ElementCount(*dims) * sizeof(uint8_t);
result.allocation = nullptr;
result.name = name;
result.is_variable = is_variable; result.is_variable = is_variable;
return result; return result;
} }
TfLiteTensor CreateQuantizedTensor(float* data, int8_t* quantized_data, TfLiteTensor CreateQuantizedTensor(float* data, int8_t* quantized_data,
TfLiteIntArray* dims, const char* name, TfLiteIntArray* dims, bool is_variable) {
bool is_variable) {
TfLiteTensor result; TfLiteTensor result;
SignedSymmetricQuantize(data, dims, quantized_data, &result.params.scale); SignedSymmetricQuantize(data, dims, quantized_data, &result.params.scale);
result.data.int8 = quantized_data; result.data.int8 = quantized_data;
@ -234,15 +222,12 @@ TfLiteTensor CreateQuantizedTensor(float* data, int8_t* quantized_data,
result.params.zero_point = 0; result.params.zero_point = 0;
result.allocation_type = kTfLiteMemNone; result.allocation_type = kTfLiteMemNone;
result.bytes = ElementCount(*dims) * sizeof(int8_t); result.bytes = ElementCount(*dims) * sizeof(int8_t);
result.allocation = nullptr;
result.name = name;
result.is_variable = is_variable; result.is_variable = is_variable;
return result; return result;
} }
TfLiteTensor CreateQuantizedTensor(float* data, int16_t* quantized_data, TfLiteTensor CreateQuantizedTensor(float* data, int16_t* quantized_data,
TfLiteIntArray* dims, const char* name, TfLiteIntArray* dims, bool is_variable) {
bool is_variable) {
TfLiteTensor result; TfLiteTensor result;
SignedSymmetricQuantize(data, dims, quantized_data, &result.params.scale); SignedSymmetricQuantize(data, dims, quantized_data, &result.params.scale);
result.data.i16 = quantized_data; result.data.i16 = quantized_data;
@ -251,15 +236,12 @@ TfLiteTensor CreateQuantizedTensor(float* data, int16_t* quantized_data,
result.params.zero_point = 0; result.params.zero_point = 0;
result.allocation_type = kTfLiteMemNone; result.allocation_type = kTfLiteMemNone;
result.bytes = ElementCount(*dims) * sizeof(int16_t); result.bytes = ElementCount(*dims) * sizeof(int16_t);
result.allocation = nullptr;
result.name = name;
result.is_variable = is_variable; result.is_variable = is_variable;
return result; return result;
} }
TfLiteTensor CreateQuantized32Tensor(const int32_t* data, TfLiteIntArray* dims, TfLiteTensor CreateQuantized32Tensor(const int32_t* data, TfLiteIntArray* dims,
const char* name, float scale, float scale, bool is_variable) {
bool is_variable) {
TfLiteTensor result; TfLiteTensor result;
result.type = kTfLiteInt32; result.type = kTfLiteInt32;
result.data.i32 = const_cast<int32_t*>(data); result.data.i32 = const_cast<int32_t*>(data);
@ -270,16 +252,14 @@ TfLiteTensor CreateQuantized32Tensor(const int32_t* data, TfLiteIntArray* dims,
result.params = {scale, 0}; result.params = {scale, 0};
result.allocation_type = kTfLiteMemNone; result.allocation_type = kTfLiteMemNone;
result.bytes = ElementCount(*dims) * sizeof(int32_t); result.bytes = ElementCount(*dims) * sizeof(int32_t);
result.allocation = nullptr;
result.name = name;
result.is_variable = is_variable; result.is_variable = is_variable;
return result; return result;
} }
TfLiteTensor CreateQuantized32Tensor(std::initializer_list<int32_t> data, TfLiteTensor CreateQuantized32Tensor(std::initializer_list<int32_t> data,
TfLiteIntArray* dims, const char* name, TfLiteIntArray* dims, float scale,
float scale, bool is_variable) { bool is_variable) {
return CreateQuantized32Tensor(data.begin(), dims, name, scale, is_variable); return CreateQuantized32Tensor(data.begin(), dims, scale, is_variable);
} }
} // namespace testing } // namespace testing

View File

@ -80,63 +80,56 @@ void PopulateContext(TfLiteTensor* tensors, int tensors_size,
ErrorReporter* error_reporter, TfLiteContext* context); ErrorReporter* error_reporter, TfLiteContext* context);
TfLiteTensor CreateFloatTensor(std::initializer_list<float> data, TfLiteTensor CreateFloatTensor(std::initializer_list<float> data,
TfLiteIntArray* dims, const char* name, TfLiteIntArray* dims, bool is_variable = false);
bool is_variable = false);
TfLiteTensor CreateBoolTensor(std::initializer_list<bool> data, TfLiteTensor CreateBoolTensor(std::initializer_list<bool> data,
TfLiteIntArray* dims, const char* name, TfLiteIntArray* dims, bool is_variable = false);
bool is_variable = false);
TfLiteTensor CreateQuantizedTensor(const uint8_t* data, TfLiteIntArray* dims, TfLiteTensor CreateQuantizedTensor(const uint8_t* data, TfLiteIntArray* dims,
const char* name, float min, float max, float min, float max,
bool is_variable = false); bool is_variable = false);
TfLiteTensor CreateQuantizedTensor(std::initializer_list<uint8_t> data, TfLiteTensor CreateQuantizedTensor(std::initializer_list<uint8_t> data,
TfLiteIntArray* dims, const char* name, TfLiteIntArray* dims, float min, float max,
float min, float max,
bool is_variable = false); bool is_variable = false);
TfLiteTensor CreateQuantizedTensor(const int8_t* data, TfLiteIntArray* dims, TfLiteTensor CreateQuantizedTensor(const int8_t* data, TfLiteIntArray* dims,
const char* name, float min, float max,
bool is_variable = false);
TfLiteTensor CreateQuantizedTensor(std::initializer_list<int8_t> data,
TfLiteIntArray* dims, const char* name,
float min, float max, float min, float max,
bool is_variable = false); bool is_variable = false);
TfLiteTensor CreateQuantizedTensor(std::initializer_list<int8_t> data,
TfLiteIntArray* dims, float min, float max,
bool is_variable = false);
TfLiteTensor CreateQuantizedTensor(float* data, uint8_t* quantized_data, TfLiteTensor CreateQuantizedTensor(float* data, uint8_t* quantized_data,
TfLiteIntArray* dims, const char* name, TfLiteIntArray* dims,
bool is_variable = false); bool is_variable = false);
TfLiteTensor CreateQuantizedTensor(float* data, int8_t* quantized_data, TfLiteTensor CreateQuantizedTensor(float* data, int8_t* quantized_data,
TfLiteIntArray* dims, const char* name, TfLiteIntArray* dims,
bool is_variable = false); bool is_variable = false);
TfLiteTensor CreateQuantizedTensor(float* data, int16_t* quantized_data, TfLiteTensor CreateQuantizedTensor(float* data, int16_t* quantized_data,
TfLiteIntArray* dims, const char* name, TfLiteIntArray* dims,
bool is_variable = false); bool is_variable = false);
TfLiteTensor CreateQuantized32Tensor(const int32_t* data, TfLiteIntArray* dims, TfLiteTensor CreateQuantized32Tensor(const int32_t* data, TfLiteIntArray* dims,
const char* name, float scale, float scale, bool is_variable = false);
bool is_variable = false);
TfLiteTensor CreateQuantized32Tensor(std::initializer_list<int32_t> data, TfLiteTensor CreateQuantized32Tensor(std::initializer_list<int32_t> data,
TfLiteIntArray* dims, const char* name, TfLiteIntArray* dims, float scale,
float scale, bool is_variable = false); bool is_variable = false);
template <typename input_type = int32_t, template <typename input_type = int32_t,
TfLiteType tensor_input_type = kTfLiteInt32> TfLiteType tensor_input_type = kTfLiteInt32>
inline TfLiteTensor CreateTensor(const input_type* data, TfLiteIntArray* dims, inline TfLiteTensor CreateTensor(const input_type* data, TfLiteIntArray* dims,
const char* name, bool is_variable = false) { bool is_variable = false) {
TfLiteTensor result; TfLiteTensor result;
result.type = tensor_input_type; result.type = tensor_input_type;
result.data.raw = reinterpret_cast<char*>(const_cast<input_type*>(data)); result.data.raw = reinterpret_cast<char*>(const_cast<input_type*>(data));
result.dims = dims; result.dims = dims;
result.allocation_type = kTfLiteMemNone; result.allocation_type = kTfLiteMemNone;
result.bytes = ElementCount(*dims) * sizeof(input_type); result.bytes = ElementCount(*dims) * sizeof(input_type);
result.allocation = nullptr;
result.name = name;
result.is_variable = is_variable; result.is_variable = is_variable;
return result; return result;
} }
@ -144,9 +137,9 @@ inline TfLiteTensor CreateTensor(const input_type* data, TfLiteIntArray* dims,
template <typename input_type = int32_t, template <typename input_type = int32_t,
TfLiteType tensor_input_type = kTfLiteInt32> TfLiteType tensor_input_type = kTfLiteInt32>
inline TfLiteTensor CreateTensor(std::initializer_list<input_type> data, inline TfLiteTensor CreateTensor(std::initializer_list<input_type> data,
TfLiteIntArray* dims, const char* name, TfLiteIntArray* dims,
bool is_variable = false) { bool is_variable = false) {
return CreateTensor<input_type, tensor_input_type>(data.begin(), dims, name, return CreateTensor<input_type, tensor_input_type>(data.begin(), dims,
is_variable); is_variable);
} }

View File

@ -23,7 +23,6 @@ TF_LITE_MICRO_TEST(CreateQuantizedBiasTensor) {
float weight_scale = 0.5; float weight_scale = 0.5;
constexpr int tensor_size = 12; constexpr int tensor_size = 12;
int dims_arr[] = {4, 2, 3, 2, 1}; int dims_arr[] = {4, 2, 3, 2, 1};
const char* tensor_name = "test_tensor";
int32_t quantized[tensor_size]; int32_t quantized[tensor_size];
float pre_quantized[] = {-10, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 10}; float pre_quantized[] = {-10, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 10};
int32_t expected_quantized_values[] = {-40, -20, -16, -12, -8, -4, int32_t expected_quantized_values[] = {-40, -20, -16, -12, -8, -4,
@ -31,11 +30,10 @@ TF_LITE_MICRO_TEST(CreateQuantizedBiasTensor) {
TfLiteIntArray* dims = tflite::testing::IntArrayFromInts(dims_arr); TfLiteIntArray* dims = tflite::testing::IntArrayFromInts(dims_arr);
TfLiteTensor result = tflite::testing::CreateQuantizedBiasTensor( TfLiteTensor result = tflite::testing::CreateQuantizedBiasTensor(
pre_quantized, quantized, dims, input_scale, weight_scale, tensor_name); pre_quantized, quantized, dims, input_scale, weight_scale);
TF_LITE_MICRO_EXPECT_EQ(result.bytes, tensor_size * sizeof(int32_t)); TF_LITE_MICRO_EXPECT_EQ(result.bytes, tensor_size * sizeof(int32_t));
TF_LITE_MICRO_EXPECT_EQ(result.dims, dims); TF_LITE_MICRO_EXPECT_EQ(result.dims, dims);
TF_LITE_MICRO_EXPECT_EQ(result.name, tensor_name);
TF_LITE_MICRO_EXPECT_EQ(result.params.scale, input_scale * weight_scale); TF_LITE_MICRO_EXPECT_EQ(result.params.scale, input_scale * weight_scale);
for (int i = 0; i < tensor_size; i++) { for (int i = 0; i < tensor_size; i++) {
TF_LITE_MICRO_EXPECT_EQ(expected_quantized_values[i], result.data.i32[i]); TF_LITE_MICRO_EXPECT_EQ(expected_quantized_values[i], result.data.i32[i]);
@ -48,7 +46,6 @@ TF_LITE_MICRO_TEST(CreatePerChannelQuantizedBiasTensor) {
constexpr int tensor_size = 12; constexpr int tensor_size = 12;
const int channels = 4; const int channels = 4;
int dims_arr[] = {4, 4, 3, 1, 1}; int dims_arr[] = {4, 4, 3, 1, 1};
const char* tensor_name = "test_tensor";
int32_t quantized[tensor_size]; int32_t quantized[tensor_size];
float scales[channels + 1]; float scales[channels + 1];
int zero_points[] = {4, 0, 0, 0, 0}; int zero_points[] = {4, 0, 0, 0, 0};
@ -60,7 +57,7 @@ TF_LITE_MICRO_TEST(CreatePerChannelQuantizedBiasTensor) {
TfLiteAffineQuantization quant; TfLiteAffineQuantization quant;
TfLiteTensor result = tflite::testing::CreatePerChannelQuantizedBiasTensor( TfLiteTensor result = tflite::testing::CreatePerChannelQuantizedBiasTensor(
pre_quantized, quantized, dims, input_scale, weight_scales, scales, pre_quantized, quantized, dims, input_scale, weight_scales, scales,
zero_points, &quant, 0, tensor_name); zero_points, &quant, 0);
// Values in scales array start at index 1 since index 0 is dedicated to // Values in scales array start at index 1 since index 0 is dedicated to
// tracking the tensor size. // tracking the tensor size.
@ -70,7 +67,6 @@ TF_LITE_MICRO_TEST(CreatePerChannelQuantizedBiasTensor) {
TF_LITE_MICRO_EXPECT_EQ(result.bytes, tensor_size * sizeof(int32_t)); TF_LITE_MICRO_EXPECT_EQ(result.bytes, tensor_size * sizeof(int32_t));
TF_LITE_MICRO_EXPECT_EQ(result.dims, dims); TF_LITE_MICRO_EXPECT_EQ(result.dims, dims);
TF_LITE_MICRO_EXPECT_EQ(result.name, tensor_name);
for (int i = 0; i < tensor_size; i++) { for (int i = 0; i < tensor_size; i++) {
TF_LITE_MICRO_EXPECT_EQ(expected_quantized_values[i], result.data.i32[i]); TF_LITE_MICRO_EXPECT_EQ(expected_quantized_values[i], result.data.i32[i]);
} }
@ -80,7 +76,6 @@ TF_LITE_MICRO_TEST(CreateSymmetricPerChannelQuantizedTensor) {
const int tensor_size = 12; const int tensor_size = 12;
constexpr int channels = 2; constexpr int channels = 2;
const int dims_arr[] = {4, channels, 3, 2, 1}; const int dims_arr[] = {4, channels, 3, 2, 1};
const char* tensor_name = "test_tensor";
int8_t quantized[12]; int8_t quantized[12];
const float pre_quantized[] = {-127, -55, -4, -3, -2, -1, const float pre_quantized[] = {-127, -55, -4, -3, -2, -1,
0, 1, 2, 3, 4, 63.5}; 0, 1, 2, 3, 4, 63.5};
@ -94,12 +89,10 @@ TF_LITE_MICRO_TEST(CreateSymmetricPerChannelQuantizedTensor) {
TfLiteAffineQuantization quant; TfLiteAffineQuantization quant;
TfLiteTensor result = TfLiteTensor result =
tflite::testing::CreateSymmetricPerChannelQuantizedTensor( tflite::testing::CreateSymmetricPerChannelQuantizedTensor(
pre_quantized, quantized, dims, scales, zero_points, &quant, 0, pre_quantized, quantized, dims, scales, zero_points, &quant, 0);
"test_tensor");
TF_LITE_MICRO_EXPECT_EQ(result.bytes, tensor_size * sizeof(int8_t)); TF_LITE_MICRO_EXPECT_EQ(result.bytes, tensor_size * sizeof(int8_t));
TF_LITE_MICRO_EXPECT_EQ(result.dims, dims); TF_LITE_MICRO_EXPECT_EQ(result.dims, dims);
TF_LITE_MICRO_EXPECT_EQ(result.name, tensor_name);
TfLiteFloatArray* result_scales = TfLiteFloatArray* result_scales =
static_cast<TfLiteAffineQuantization*>(result.quantization.params)->scale; static_cast<TfLiteAffineQuantization*>(result.quantization.params)->scale;
for (int i = 0; i < channels; i++) { for (int i = 0; i < channels; i++) {