Refactor micro_utils and test_helpers to use template methods.
PiperOrigin-RevId: 337845815 Change-Id: I013df3bf64b289fcde4a7c661fec53eaadbbb313
This commit is contained in:
parent
ba79107f74
commit
4a988e4792
tensorflow/lite/micro
BUILD
kernels
BUILDactivations.ccactivations_test.ccadd_test.ccarg_min_max_test.ccceil_test.cccomparisons_test.ccconcatenation_test.ccconv_test.ccdepthwise_conv_test.ccdequantize_test.ccelementwise_test.ccfloor_test.ccfully_connected_test.cchard_swish_test.ccl2norm_test.cclogical_test.cclogistic_test.ccmaximum_minimum_test.ccmul_test.ccneg_test.ccpack_test.ccpad_test.ccpooling_test.ccprelu_test.ccquantize_test.ccreduce_test.ccreshape_test.ccresize_nearest_neighbor_test.ccround_test.ccshape_test.ccsoftmax_test.ccsplit_test.ccsplit_v_test.ccstrided_slice_test.ccsub_test.ccsvdf_test.cctanh_test.ccunpack_test.cc
memory_helpers_test.ccmicro_utils.ccmicro_utils.hmicro_utils_test.cctest_helpers.cctest_helpers.h@ -81,6 +81,7 @@ cc_library(
|
||||
deps = [
|
||||
":micro_utils",
|
||||
":op_resolvers",
|
||||
"//tensorflow/lite:type_to_tflitetype",
|
||||
"//tensorflow/lite/c:common",
|
||||
"//tensorflow/lite/core/api",
|
||||
"//tensorflow/lite/kernels:kernel_util",
|
||||
|
@ -216,7 +216,6 @@ tflite_micro_cc_test(
|
||||
deps = [
|
||||
":kernel_runner",
|
||||
"//tensorflow/lite/c:common",
|
||||
"//tensorflow/lite/micro:op_resolvers",
|
||||
"//tensorflow/lite/micro:test_helpers",
|
||||
"//tensorflow/lite/micro/testing:micro_test",
|
||||
],
|
||||
@ -231,7 +230,6 @@ tflite_micro_cc_test(
|
||||
":kernel_runner",
|
||||
"//tensorflow/lite/c:common",
|
||||
"//tensorflow/lite/kernels/internal:tensor",
|
||||
"//tensorflow/lite/micro:op_resolvers",
|
||||
"//tensorflow/lite/micro:test_helpers",
|
||||
"//tensorflow/lite/micro/testing:micro_test",
|
||||
],
|
||||
@ -243,10 +241,8 @@ tflite_micro_cc_test(
|
||||
"fully_connected_test.cc",
|
||||
],
|
||||
deps = [
|
||||
":fully_connected",
|
||||
":kernel_runner",
|
||||
"//tensorflow/lite/c:common",
|
||||
"//tensorflow/lite/micro:micro_framework",
|
||||
"//tensorflow/lite/micro:micro_utils",
|
||||
"//tensorflow/lite/micro:op_resolvers",
|
||||
"//tensorflow/lite/micro:test_helpers",
|
||||
@ -290,7 +286,6 @@ tflite_micro_cc_test(
|
||||
deps = [
|
||||
":kernel_runner",
|
||||
"//tensorflow/lite/c:common",
|
||||
"//tensorflow/lite/micro:op_resolvers",
|
||||
"//tensorflow/lite/micro:test_helpers",
|
||||
"//tensorflow/lite/micro/testing:micro_test",
|
||||
],
|
||||
@ -305,7 +300,6 @@ tflite_micro_cc_test(
|
||||
":kernel_runner",
|
||||
"//tensorflow/lite/c:common",
|
||||
"//tensorflow/lite/micro:micro_utils",
|
||||
"//tensorflow/lite/micro:op_resolvers",
|
||||
"//tensorflow/lite/micro:test_helpers",
|
||||
"//tensorflow/lite/micro/testing:micro_test",
|
||||
],
|
||||
@ -319,7 +313,6 @@ tflite_micro_cc_test(
|
||||
deps = [
|
||||
":kernel_runner",
|
||||
"//tensorflow/lite/c:common",
|
||||
"//tensorflow/lite/micro:op_resolvers",
|
||||
"//tensorflow/lite/micro:test_helpers",
|
||||
"//tensorflow/lite/micro/testing:micro_test",
|
||||
],
|
||||
@ -389,7 +382,6 @@ tflite_micro_cc_test(
|
||||
deps = [
|
||||
":kernel_runner",
|
||||
"//tensorflow/lite/c:common",
|
||||
"//tensorflow/lite/micro:op_resolvers",
|
||||
"//tensorflow/lite/micro:test_helpers",
|
||||
"//tensorflow/lite/micro/testing:micro_test",
|
||||
],
|
||||
@ -403,7 +395,6 @@ tflite_micro_cc_test(
|
||||
deps = [
|
||||
":kernel_runner",
|
||||
"//tensorflow/lite/c:common",
|
||||
"//tensorflow/lite/micro:op_resolvers",
|
||||
"//tensorflow/lite/micro:test_helpers",
|
||||
"//tensorflow/lite/micro/testing:micro_test",
|
||||
],
|
||||
@ -431,7 +422,6 @@ tflite_micro_cc_test(
|
||||
deps = [
|
||||
":kernel_runner",
|
||||
"//tensorflow/lite/c:common",
|
||||
"//tensorflow/lite/micro:op_resolvers",
|
||||
"//tensorflow/lite/micro:test_helpers",
|
||||
"//tensorflow/lite/micro/testing:micro_test",
|
||||
],
|
||||
@ -488,7 +478,6 @@ tflite_micro_cc_test(
|
||||
":kernel_runner",
|
||||
"//tensorflow/lite/c:common",
|
||||
"//tensorflow/lite/micro:debug_log",
|
||||
"//tensorflow/lite/micro:op_resolvers",
|
||||
"//tensorflow/lite/micro:test_helpers",
|
||||
"//tensorflow/lite/micro/testing:micro_test",
|
||||
],
|
||||
@ -503,7 +492,6 @@ tflite_micro_cc_test(
|
||||
":kernel_runner",
|
||||
"//tensorflow/lite/c:common",
|
||||
"//tensorflow/lite/micro:debug_log",
|
||||
"//tensorflow/lite/micro:op_resolvers",
|
||||
"//tensorflow/lite/micro:test_helpers",
|
||||
"//tensorflow/lite/micro/testing:micro_test",
|
||||
],
|
||||
@ -582,8 +570,6 @@ tflite_micro_cc_test(
|
||||
deps = [
|
||||
":kernel_runner",
|
||||
"//tensorflow/lite/c:common",
|
||||
"//tensorflow/lite/micro:micro_framework",
|
||||
"//tensorflow/lite/micro:op_resolvers",
|
||||
"//tensorflow/lite/micro:test_helpers",
|
||||
"//tensorflow/lite/micro/testing:micro_test",
|
||||
],
|
||||
@ -597,8 +583,6 @@ tflite_micro_cc_test(
|
||||
deps = [
|
||||
":kernel_runner",
|
||||
"//tensorflow/lite/c:common",
|
||||
"//tensorflow/lite/micro:micro_framework",
|
||||
"//tensorflow/lite/micro:op_resolvers",
|
||||
"//tensorflow/lite/micro:test_helpers",
|
||||
"//tensorflow/lite/micro/testing:micro_test",
|
||||
],
|
||||
@ -644,9 +628,7 @@ tflite_micro_cc_test(
|
||||
":kernel_runner",
|
||||
"//tensorflow/lite/c:common",
|
||||
"//tensorflow/lite/kernels/internal:tensor",
|
||||
"//tensorflow/lite/micro:micro_framework",
|
||||
"//tensorflow/lite/micro:micro_utils",
|
||||
"//tensorflow/lite/micro:op_resolvers",
|
||||
"//tensorflow/lite/micro:test_helpers",
|
||||
"//tensorflow/lite/micro/testing:micro_test",
|
||||
],
|
||||
@ -674,7 +656,6 @@ tflite_micro_cc_test(
|
||||
deps = [
|
||||
":kernel_runner",
|
||||
"//tensorflow/lite/c:common",
|
||||
"//tensorflow/lite/micro:op_resolvers",
|
||||
"//tensorflow/lite/micro:test_helpers",
|
||||
"//tensorflow/lite/micro/testing:micro_test",
|
||||
],
|
||||
@ -688,7 +669,6 @@ tflite_micro_cc_test(
|
||||
deps = [
|
||||
":kernel_runner",
|
||||
"//tensorflow/lite/c:common",
|
||||
"//tensorflow/lite/micro:micro_framework",
|
||||
"//tensorflow/lite/micro:op_resolvers",
|
||||
"//tensorflow/lite/micro:test_helpers",
|
||||
"//tensorflow/lite/micro/testing:micro_test",
|
||||
@ -758,8 +738,6 @@ tflite_micro_cc_test(
|
||||
deps = [
|
||||
":kernel_runner",
|
||||
"//tensorflow/lite/c:common",
|
||||
"//tensorflow/lite/micro:micro_framework",
|
||||
"//tensorflow/lite/micro:op_resolvers",
|
||||
"//tensorflow/lite/micro:test_helpers",
|
||||
"//tensorflow/lite/micro/testing:micro_test",
|
||||
],
|
||||
@ -771,7 +749,18 @@ tflite_micro_cc_test(
|
||||
deps = [
|
||||
":kernel_runner",
|
||||
"//tensorflow/lite/c:common",
|
||||
"//tensorflow/lite/micro:micro_framework",
|
||||
"//tensorflow/lite/micro:op_resolvers",
|
||||
"//tensorflow/lite/micro:test_helpers",
|
||||
"//tensorflow/lite/micro/testing:micro_test",
|
||||
],
|
||||
)
|
||||
|
||||
cc_test(
|
||||
name = "shape_test",
|
||||
srcs = ["shape_test.cc"],
|
||||
deps = [
|
||||
":kernel_runner",
|
||||
"//tensorflow/lite/c:common",
|
||||
"//tensorflow/lite/micro:op_resolvers",
|
||||
"//tensorflow/lite/micro:test_helpers",
|
||||
"//tensorflow/lite/micro/testing:micro_test",
|
||||
|
@ -205,12 +205,12 @@ TfLiteStatus Relu6Prepare(TfLiteContext* context, TfLiteNode* node) {
|
||||
TF_LITE_ENSURE(context, input != nullptr);
|
||||
|
||||
if (input->type == kTfLiteInt8) {
|
||||
data->six_int8 = FloatToAsymmetricQuantizedInt8(6.0f, input->params.scale,
|
||||
input->params.zero_point);
|
||||
data->six_int8 = FloatToQuantizedType<int8_t>(6.0f, input->params.scale,
|
||||
input->params.zero_point);
|
||||
data->zero_int8 = input->params.zero_point;
|
||||
} else if (input->type == kTfLiteUInt8) {
|
||||
data->six_uint8 = FloatToAsymmetricQuantizedUInt8(6.0f, input->params.scale,
|
||||
input->params.zero_point);
|
||||
data->six_uint8 = FloatToQuantizedType<uint8_t>(6.0f, input->params.scale,
|
||||
input->params.zero_point);
|
||||
data->zero_uint8 = input->params.zero_point;
|
||||
}
|
||||
|
||||
|
@ -35,8 +35,8 @@ void TestReluFloat(const int* input_dims_data, const float* input_data,
|
||||
constexpr int outputs_size = 1;
|
||||
constexpr int tensors_size = inputs_size + outputs_size;
|
||||
TfLiteTensor tensors[tensors_size] = {
|
||||
CreateFloatTensor(input_data, input_dims),
|
||||
CreateFloatTensor(output_data, output_dims),
|
||||
CreateTensor(input_data, input_dims),
|
||||
CreateTensor(output_data, output_dims),
|
||||
};
|
||||
|
||||
int inputs_array_data[] = {1, 0};
|
||||
@ -68,8 +68,8 @@ void TestRelu6Float(const int* input_dims_data, const float* input_data,
|
||||
constexpr int outputs_size = 1;
|
||||
constexpr int tensors_size = inputs_size + outputs_size;
|
||||
TfLiteTensor tensors[tensors_size] = {
|
||||
CreateFloatTensor(input_data, input_dims),
|
||||
CreateFloatTensor(output_data, output_dims),
|
||||
CreateTensor(input_data, input_dims),
|
||||
CreateTensor(output_data, output_dims),
|
||||
};
|
||||
|
||||
int inputs_array_data[] = {1, 0};
|
||||
@ -123,8 +123,8 @@ void TestReluUint8(const int* input_dims_data, const float* input_data,
|
||||
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.InitAndPrepare());
|
||||
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.Invoke());
|
||||
|
||||
AsymmetricQuantize(golden, golden_quantized, output_elements_count,
|
||||
output_scale, output_zero_point);
|
||||
Quantize(golden, golden_quantized, output_elements_count, output_scale,
|
||||
output_zero_point);
|
||||
|
||||
for (int i = 0; i < output_elements_count; ++i) {
|
||||
TF_LITE_MICRO_EXPECT_EQ(golden_quantized[i], output_data[i]);
|
||||
@ -164,8 +164,8 @@ void TestRelu6Uint8(const int* input_dims_data, const float* input_data,
|
||||
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.InitAndPrepare());
|
||||
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.Invoke());
|
||||
|
||||
AsymmetricQuantize(golden, golden_quantized, output_elements_count,
|
||||
output_scale, output_zero_point);
|
||||
Quantize(golden, golden_quantized, output_elements_count, output_scale,
|
||||
output_zero_point);
|
||||
|
||||
for (int i = 0; i < output_elements_count; ++i) {
|
||||
TF_LITE_MICRO_EXPECT_EQ(golden_quantized[i], output_data[i]);
|
||||
@ -204,8 +204,8 @@ void TestReluInt8(const int* input_dims_data, const float* input_data,
|
||||
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.InitAndPrepare());
|
||||
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.Invoke());
|
||||
|
||||
AsymmetricQuantize(golden, golden_quantized, output_elements_count,
|
||||
output_scale, output_zero_point);
|
||||
Quantize(golden, golden_quantized, output_elements_count, output_scale,
|
||||
output_zero_point);
|
||||
|
||||
for (int i = 0; i < output_elements_count; ++i) {
|
||||
TF_LITE_MICRO_EXPECT_EQ(golden_quantized[i], output_data[i]);
|
||||
@ -244,8 +244,8 @@ void TestRelu6Int8(const int* input_dims_data, const float* input_data,
|
||||
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.InitAndPrepare());
|
||||
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.Invoke());
|
||||
|
||||
AsymmetricQuantize(golden, golden_quantized, output_elements_count,
|
||||
output_scale, output_zero_point);
|
||||
Quantize(golden, golden_quantized, output_elements_count, output_scale,
|
||||
output_zero_point);
|
||||
|
||||
for (int i = 0; i < output_elements_count; ++i) {
|
||||
TF_LITE_MICRO_EXPECT_EQ(golden_quantized[i], output_data[i]);
|
||||
|
@ -100,9 +100,9 @@ void TestAddFloat(const int* input1_dims_data, const float* input1_data,
|
||||
constexpr int outputs_size = 1;
|
||||
constexpr int tensors_size = inputs_size + outputs_size;
|
||||
TfLiteTensor tensors[tensors_size] = {
|
||||
CreateFloatTensor(input1_data, input1_dims),
|
||||
CreateFloatTensor(input2_data, input2_dims),
|
||||
CreateFloatTensor(output_data, output_dims),
|
||||
CreateTensor(input1_data, input1_dims),
|
||||
CreateTensor(input2_data, input2_dims),
|
||||
CreateTensor(output_data, output_dims),
|
||||
};
|
||||
|
||||
ValidateAddGoldens(tensors, tensors_size, expected_output, output_data,
|
||||
@ -136,9 +136,8 @@ void TestAddQuantized(const int* input1_dims_data, const float* input1_data,
|
||||
tflite::testing::CreateQuantizedTensor(output_data, output_dims,
|
||||
output_scale, output_zero_point),
|
||||
};
|
||||
tflite::AsymmetricQuantize(golden, golden_quantized,
|
||||
ElementCount(*output_dims), output_scale,
|
||||
output_zero_point);
|
||||
tflite::Quantize(golden, golden_quantized, ElementCount(*output_dims),
|
||||
output_scale, output_zero_point);
|
||||
|
||||
ValidateAddGoldens(tensors, tensors_size, golden_quantized, output_data,
|
||||
ElementCount(*output_dims), activation);
|
||||
|
@ -60,9 +60,9 @@ void TestArgMinMaxFloat(const int* input_dims_data, const float* input_values,
|
||||
constexpr int outputs_size = 1;
|
||||
constexpr int tensors_size = inputs_size + outputs_size;
|
||||
TfLiteTensor tensors[tensors_size] = {
|
||||
CreateFloatTensor(input_values, input_dims),
|
||||
CreateInt32Tensor(axis_values, axis_dims),
|
||||
CreateInt32Tensor(output, output_dims),
|
||||
CreateTensor(input_values, input_dims),
|
||||
CreateTensor(axis_values, axis_dims),
|
||||
CreateTensor(output, output_dims),
|
||||
};
|
||||
|
||||
ValidateArgMinMaxGoldens(tensors, tensors_size, goldens, output,
|
||||
@ -88,8 +88,8 @@ void TestArgMinMaxQuantized(const int* input_dims_data,
|
||||
TfLiteTensor tensors[tensors_size] = {
|
||||
CreateQuantizedTensor(input_values, input_quantized, input_dims,
|
||||
input_scale, input_zero_point),
|
||||
CreateInt32Tensor(axis_values, axis_dims),
|
||||
CreateInt32Tensor(output, output_dims),
|
||||
CreateTensor(axis_values, axis_dims),
|
||||
CreateTensor(output, output_dims),
|
||||
};
|
||||
|
||||
ValidateArgMinMaxGoldens(tensors, tensors_size, goldens, output,
|
||||
|
@ -33,8 +33,8 @@ void TestCeil(const int* input_dims_data, const float* input_data,
|
||||
constexpr int outputs_size = 1;
|
||||
constexpr int tensors_size = inputs_size + outputs_size;
|
||||
TfLiteTensor tensors[tensors_size] = {
|
||||
CreateFloatTensor(input_data, input_dims),
|
||||
CreateFloatTensor(output_data, output_dims),
|
||||
CreateTensor(input_data, input_dims),
|
||||
CreateTensor(output_data, output_dims),
|
||||
};
|
||||
|
||||
int inputs_array_data[] = {1, 0};
|
||||
|
@ -61,9 +61,9 @@ void TestComparisonFloat(const TfLiteRegistration& registration,
|
||||
TfLiteIntArray* output_dims = IntArrayFromInts(output_dims_data);
|
||||
|
||||
TfLiteTensor tensors[tensors_size] = {
|
||||
CreateFloatTensor(input1_data, input1_dims),
|
||||
CreateFloatTensor(input2_data, input2_dims),
|
||||
CreateBoolTensor(output_data, output_dims),
|
||||
CreateTensor(input1_data, input1_dims),
|
||||
CreateTensor(input2_data, input2_dims),
|
||||
CreateTensor(output_data, output_dims),
|
||||
};
|
||||
|
||||
TestComparison(registration, tensors, expected_output_data, output_data);
|
||||
@ -79,9 +79,9 @@ void TestComparisonBool(const TfLiteRegistration& registration,
|
||||
TfLiteIntArray* output_dims = IntArrayFromInts(output_dims_data);
|
||||
|
||||
TfLiteTensor tensors[tensors_size] = {
|
||||
CreateBoolTensor(input1_data, input1_dims),
|
||||
CreateBoolTensor(input2_data, input2_dims),
|
||||
CreateBoolTensor(output_data, output_dims),
|
||||
CreateTensor(input1_data, input1_dims),
|
||||
CreateTensor(input2_data, input2_dims),
|
||||
CreateTensor(output_data, output_dims),
|
||||
};
|
||||
|
||||
TestComparison(registration, tensors, expected_output_data, output_data);
|
||||
@ -97,9 +97,9 @@ void TestComparisonInt(const TfLiteRegistration& registration,
|
||||
TfLiteIntArray* output_dims = IntArrayFromInts(output_dims_data);
|
||||
|
||||
TfLiteTensor tensors[tensors_size] = {
|
||||
CreateInt32Tensor(input1_data, input1_dims),
|
||||
CreateInt32Tensor(input2_data, input2_dims),
|
||||
CreateBoolTensor(output_data, output_dims),
|
||||
CreateTensor(input1_data, input1_dims),
|
||||
CreateTensor(input2_data, input2_dims),
|
||||
CreateTensor(output_data, output_dims),
|
||||
};
|
||||
|
||||
TestComparison(registration, tensors, expected_output_data, output_data);
|
||||
@ -122,7 +122,7 @@ void TestComparisonQuantizedUInt8(const TfLiteRegistration& registration,
|
||||
input1_scale, input1_zero_point),
|
||||
CreateQuantizedTensor(input2_data, input2_quantized, input2_dims,
|
||||
input2_scale, input2_zero_point),
|
||||
CreateBoolTensor(output_data, output_dims),
|
||||
CreateTensor(output_data, output_dims),
|
||||
};
|
||||
|
||||
TestComparison(registration, tensors, expected_output_data, output_data);
|
||||
@ -145,7 +145,7 @@ void TestComparisonQuantizedInt8(const TfLiteRegistration& registration,
|
||||
input1_scale, input1_zero_point),
|
||||
CreateQuantizedTensor(input2_data, input2_quantized, input2_dims,
|
||||
input2_scale, input2_zero_point),
|
||||
CreateBoolTensor(output_data, output_dims),
|
||||
CreateTensor(output_data, output_dims),
|
||||
};
|
||||
|
||||
TestComparison(registration, tensors, expected_output_data, output_data);
|
||||
|
@ -38,10 +38,9 @@ void TestConcatenateTwoInputs(const int* input1_dims_data,
|
||||
constexpr int input_size = 2;
|
||||
constexpr int output_size = 1;
|
||||
constexpr int tensors_size = input_size + output_size;
|
||||
TfLiteTensor tensors[tensors_size] = {
|
||||
CreateFloatTensor(input1_data, input1_dims),
|
||||
CreateFloatTensor(input2_data, input2_dims),
|
||||
CreateFloatTensor(output_data, output_dims)};
|
||||
TfLiteTensor tensors[tensors_size] = {CreateTensor(input1_data, input1_dims),
|
||||
CreateTensor(input2_data, input2_dims),
|
||||
CreateTensor(output_data, output_dims)};
|
||||
|
||||
int inputs_array_data[] = {2, 0, 1};
|
||||
TfLiteIntArray* inputs_array = IntArrayFromInts(inputs_array_data);
|
||||
|
@ -107,10 +107,10 @@ void TestConvFloat(const int* input_dims_data, const float* input_data,
|
||||
constexpr int outputs_size = 1;
|
||||
constexpr int tensors_size = inputs_size + outputs_size;
|
||||
TfLiteTensor tensors[tensors_size] = {
|
||||
CreateFloatTensor(input_data, input_dims),
|
||||
CreateFloatTensor(filter_data, filter_dims),
|
||||
CreateFloatTensor(bias_data, bias_dims),
|
||||
CreateFloatTensor(output_data, output_dims),
|
||||
CreateTensor(input_data, input_dims),
|
||||
CreateTensor(filter_data, filter_dims),
|
||||
CreateTensor(bias_data, bias_dims),
|
||||
CreateTensor(output_data, output_dims),
|
||||
};
|
||||
|
||||
TF_LITE_MICRO_EXPECT_EQ(
|
||||
@ -133,8 +133,8 @@ void TestConvQuantizedPerLayer(
|
||||
TfLiteIntArray* output_dims = IntArrayFromInts(output_dims_data);
|
||||
const int output_dims_count = ElementCount(*output_dims);
|
||||
|
||||
tflite::AsymmetricQuantize(expected_output_data, expected_output_quantized,
|
||||
output_dims_count, output_scale, 128);
|
||||
tflite::Quantize(expected_output_data, expected_output_quantized,
|
||||
output_dims_count, output_scale, 128);
|
||||
|
||||
constexpr int inputs_size = 3;
|
||||
constexpr int outputs_size = 1;
|
||||
@ -218,9 +218,8 @@ void TestConvQuantizedPerChannel(
|
||||
output_tensor,
|
||||
};
|
||||
|
||||
tflite::AsymmetricQuantize(expected_output_data,
|
||||
expected_output_data_quantized, output_dims_count,
|
||||
output_scale, output_zero_point);
|
||||
tflite::Quantize(expected_output_data, expected_output_data_quantized,
|
||||
output_dims_count, output_scale, output_zero_point);
|
||||
TF_LITE_MICRO_EXPECT_EQ(
|
||||
kTfLiteOk,
|
||||
ValidateConvGoldens(tensors, tensors_size, expected_output_data_quantized,
|
||||
@ -286,8 +285,8 @@ TF_LITE_MICRO_TEST(SimpleTestQuantized) {
|
||||
}
|
||||
|
||||
TF_LITE_MICRO_TEST(InputOutputDifferentTypeIsError) {
|
||||
using tflite::testing::CreateFloatTensor;
|
||||
using tflite::testing::CreateQuantizedTensor;
|
||||
using tflite::testing::CreateTensor;
|
||||
using tflite::testing::IntArrayFromInts;
|
||||
|
||||
TfLiteIntArray* input_dims = IntArrayFromInts(tflite::testing::kInputShape);
|
||||
@ -301,9 +300,9 @@ TF_LITE_MICRO_TEST(InputOutputDifferentTypeIsError) {
|
||||
|
||||
int8_t output_data[tflite::testing::kOutputElements];
|
||||
TfLiteTensor tensors[tensors_size] = {
|
||||
CreateFloatTensor(tflite::testing::kInputData, input_dims),
|
||||
CreateFloatTensor(tflite::testing::kFilterData, filter_dims),
|
||||
CreateFloatTensor(tflite::testing::kBiasData, bias_dims),
|
||||
CreateTensor(tflite::testing::kInputData, input_dims),
|
||||
CreateTensor(tflite::testing::kFilterData, filter_dims),
|
||||
CreateTensor(tflite::testing::kBiasData, bias_dims),
|
||||
CreateQuantizedTensor(output_data, output_dims, /*scale=*/0.0f,
|
||||
/*zero_point=*/0),
|
||||
};
|
||||
@ -314,8 +313,8 @@ TF_LITE_MICRO_TEST(InputOutputDifferentTypeIsError) {
|
||||
}
|
||||
|
||||
TF_LITE_MICRO_TEST(HybridModeIsError) {
|
||||
using tflite::testing::CreateFloatTensor;
|
||||
using tflite::testing::CreateQuantizedTensor;
|
||||
using tflite::testing::CreateTensor;
|
||||
using tflite::testing::IntArrayFromInts;
|
||||
|
||||
TfLiteIntArray* input_dims = IntArrayFromInts(tflite::testing::kInputShape);
|
||||
@ -330,12 +329,12 @@ TF_LITE_MICRO_TEST(HybridModeIsError) {
|
||||
int8_t filter_data[tflite::testing::kFilterElements] = {};
|
||||
float output_data[tflite::testing::kOutputElements];
|
||||
TfLiteTensor tensors[tensors_size] = {
|
||||
CreateFloatTensor(tflite::testing::kInputData, input_dims),
|
||||
CreateTensor(tflite::testing::kInputData, input_dims),
|
||||
CreateQuantizedTensor(filter_data, filter_dims,
|
||||
/*scale=*/0.0f,
|
||||
/*zero_point=*/0),
|
||||
CreateFloatTensor(tflite::testing::kBiasData, bias_dims),
|
||||
CreateFloatTensor(output_data, output_dims),
|
||||
CreateTensor(tflite::testing::kBiasData, bias_dims),
|
||||
CreateTensor(output_data, output_dims),
|
||||
};
|
||||
TF_LITE_MICRO_EXPECT_EQ(
|
||||
kTfLiteError, tflite::testing::InvokeConv(
|
||||
@ -632,8 +631,8 @@ TF_LITE_MICRO_TEST(FilterDimsNotMatchingAffineQuantization) {
|
||||
output_tensor,
|
||||
};
|
||||
|
||||
tflite::AsymmetricQuantize(tflite::testing::kGoldenData, golden_quantized,
|
||||
output_dims_count, output_scale, 0);
|
||||
tflite::Quantize(tflite::testing::kGoldenData, golden_quantized,
|
||||
output_dims_count, output_scale, 0);
|
||||
|
||||
// Set filter quant to mismatched dimension.
|
||||
TfLiteAffineQuantization* quant = reinterpret_cast<TfLiteAffineQuantization*>(
|
||||
@ -706,7 +705,7 @@ TF_LITE_MICRO_TEST(BroadcastPerLayerQuantizationToPerChannelShouldMatchGolden) {
|
||||
tflite::testing::kBiasElements,
|
||||
input_scale * output_scale);
|
||||
TfLiteTensor bias_tensor =
|
||||
tflite::testing::CreateInt32Tensor(bias_quantized, bias_dims);
|
||||
tflite::testing::CreateTensor(bias_quantized, bias_dims);
|
||||
|
||||
int bias_zero_points[2] = {1, 0};
|
||||
float bias_scales[2] = {1, input_scale * filter_scale};
|
||||
@ -735,8 +734,8 @@ TF_LITE_MICRO_TEST(BroadcastPerLayerQuantizationToPerChannelShouldMatchGolden) {
|
||||
output_tensor,
|
||||
};
|
||||
|
||||
tflite::AsymmetricQuantize(tflite::testing::kGoldenData, golden_quantized,
|
||||
output_dims_count, output_scale, 0);
|
||||
tflite::Quantize(tflite::testing::kGoldenData, golden_quantized,
|
||||
output_dims_count, output_scale, 0);
|
||||
|
||||
TF_LITE_MICRO_EXPECT_EQ(
|
||||
kTfLiteOk, tflite::testing::ValidateConvGoldens(
|
||||
@ -832,7 +831,7 @@ TF_LITE_MICRO_TEST(Int8Input32x1Filter32x32ShouldMatchGolden) {
|
||||
tflite::SymmetricQuantize(bias_values, bias_quantized, kSampleSize,
|
||||
input_scale * output_scale);
|
||||
TfLiteTensor bias_tensor =
|
||||
tflite::testing::CreateInt32Tensor(bias_quantized, bias_dims);
|
||||
tflite::testing::CreateTensor(bias_quantized, bias_dims);
|
||||
|
||||
// There is a single zero point of 0, and a single scale of
|
||||
// input_scale * filter_scale.
|
||||
@ -867,9 +866,8 @@ TF_LITE_MICRO_TEST(Int8Input32x1Filter32x32ShouldMatchGolden) {
|
||||
};
|
||||
|
||||
int8_t golden_quantized[kSampleSize];
|
||||
tflite::AsymmetricQuantize(expected_output, golden_quantized,
|
||||
output_dims_count, output_scale,
|
||||
output_zero_point);
|
||||
tflite::Quantize(expected_output, golden_quantized, output_dims_count,
|
||||
output_scale, output_zero_point);
|
||||
|
||||
// Rounding errors due to quantization should not exceed 1.
|
||||
constexpr int kQuantizationTolerance = 1;
|
||||
|
@ -96,10 +96,10 @@ void TestDepthwiseConvFloat(const int* input_dims_data, const float* input_data,
|
||||
constexpr int outputs_size = 1;
|
||||
constexpr int tensors_size = inputs_size + outputs_size;
|
||||
TfLiteTensor tensors[tensors_size] = {
|
||||
CreateFloatTensor(input_data, input_dims),
|
||||
CreateFloatTensor(filter_data, filter_dims),
|
||||
CreateFloatTensor(bias_data, bias_dims),
|
||||
CreateFloatTensor(output_data, output_dims),
|
||||
CreateTensor(input_data, input_dims),
|
||||
CreateTensor(filter_data, filter_dims),
|
||||
CreateTensor(bias_data, bias_dims),
|
||||
CreateTensor(output_data, output_dims),
|
||||
};
|
||||
|
||||
ValidateDepthwiseConvGoldens(expected_output_data, output_dims_count,
|
||||
@ -151,8 +151,8 @@ void TestDepthwiseConvQuantizedPerLayer(
|
||||
IntArrayFromInts(bias_zero_points), 0};
|
||||
tensors[2].quantization = {kTfLiteAffineQuantization, &bias_quant};
|
||||
|
||||
AsymmetricQuantize(golden, golden_quantized, output_dims_count, output_scale,
|
||||
output_zero_point);
|
||||
Quantize(golden, golden_quantized, output_dims_count, output_scale,
|
||||
output_zero_point);
|
||||
ValidateDepthwiseConvGoldens(golden_quantized, output_dims_count, conv_params,
|
||||
1.0, tensors_size, tensors);
|
||||
}
|
||||
@ -217,8 +217,8 @@ void TestDepthwiseConvQuantizedPerChannel(
|
||||
output_tensor,
|
||||
};
|
||||
|
||||
AsymmetricQuantize(expected_output_data, expected_output_data_quantized,
|
||||
output_dims_count, output_scale, output_zero_point);
|
||||
Quantize(expected_output_data, expected_output_data_quantized,
|
||||
output_dims_count, output_scale, output_zero_point);
|
||||
|
||||
TF_LITE_MICRO_EXPECT_EQ(
|
||||
kTfLiteOk, ValidateDepthwiseConvGoldens(expected_output_data_quantized,
|
||||
@ -810,7 +810,7 @@ TF_LITE_MICRO_TEST(PerChannelBroadcastQuantizationParams) {
|
||||
tflite::SymmetricQuantize(bias_values, bias_quantized, bias_elements,
|
||||
input_scale * output_scale);
|
||||
TfLiteTensor bias_tensor =
|
||||
tflite::testing::CreateInt32Tensor(bias_quantized, bias_dims);
|
||||
tflite::testing::CreateTensor(bias_quantized, bias_dims);
|
||||
|
||||
int bias_zero_points[2] = {1, 0};
|
||||
float bias_scales[2] = {1, input_scale * filter_scale};
|
||||
@ -839,8 +839,8 @@ TF_LITE_MICRO_TEST(PerChannelBroadcastQuantizationParams) {
|
||||
output_tensor,
|
||||
};
|
||||
|
||||
tflite::AsymmetricQuantize(golden, golden_quantized, output_dims_count,
|
||||
output_scale, 0);
|
||||
tflite::Quantize(golden, golden_quantized, output_dims_count, output_scale,
|
||||
0);
|
||||
|
||||
TfLiteDepthwiseConvParams conv_params;
|
||||
conv_params.activation = kTfLiteActNone;
|
||||
@ -954,7 +954,7 @@ TF_LITE_MICRO_TEST(Int8Input32x4Filter32x4ShouldMatchGolden) {
|
||||
tflite::SymmetricQuantize(bias_values, bias_quantized, bias_elements,
|
||||
input_scale * output_scale);
|
||||
TfLiteTensor bias_tensor =
|
||||
tflite::testing::CreateInt32Tensor(bias_quantized, bias_dims);
|
||||
tflite::testing::CreateTensor(bias_quantized, bias_dims);
|
||||
|
||||
// Set zero point and scale arrays with a single element for each.
|
||||
int bias_zero_points[] = {1, 0};
|
||||
@ -989,8 +989,7 @@ TF_LITE_MICRO_TEST(Int8Input32x4Filter32x4ShouldMatchGolden) {
|
||||
};
|
||||
|
||||
int8_t golden_quantized[output_elements];
|
||||
tflite::AsymmetricQuantize(golden, golden_quantized, output_elements,
|
||||
output_scale, 0);
|
||||
tflite::Quantize(golden, golden_quantized, output_elements, output_scale, 0);
|
||||
|
||||
// Errors due to quantization should not exceed 1.
|
||||
constexpr int kQuantizationTolerance = 1;
|
||||
|
@ -61,7 +61,7 @@ void TestDequantizeToFloat(const int* input_dims_data, const float* input_data,
|
||||
TfLiteTensor tensors[tensors_size] = {
|
||||
CreateQuantizedTensor(input_data, input_data_quantized, input_dims, scale,
|
||||
zero_point),
|
||||
CreateFloatTensor(output_data, output_dims),
|
||||
CreateTensor(output_data, output_dims),
|
||||
};
|
||||
|
||||
ValidateDequantizeGoldens(tensors, tensors_size, expected_output_data,
|
||||
@ -84,7 +84,7 @@ void TestDequantizeToInt32(const int* input_dims_data, const float* input_data,
|
||||
TfLiteTensor tensors[tensors_size] = {
|
||||
CreateQuantizedTensor(input_data, input_data_quantized, input_dims,
|
||||
input_scale, input_zero_point),
|
||||
CreateInt32Tensor(output_data, output_dims),
|
||||
CreateTensor(output_data, output_dims),
|
||||
};
|
||||
|
||||
tensors[1].params.scale = output_scale;
|
||||
|
@ -35,9 +35,8 @@ void TestElementwiseFloat(const TfLiteRegistration& registration,
|
||||
constexpr int input_size = 1;
|
||||
constexpr int output_size = 1;
|
||||
constexpr int tensors_size = input_size + output_size;
|
||||
TfLiteTensor tensors[tensors_size] = {
|
||||
CreateFloatTensor(input_data, input_dims),
|
||||
CreateFloatTensor(output_data, output_dims)};
|
||||
TfLiteTensor tensors[tensors_size] = {CreateTensor(input_data, input_dims),
|
||||
CreateTensor(output_data, output_dims)};
|
||||
|
||||
// Place a unique value in the uninitialized output buffer.
|
||||
for (int i = 0; i < output_dims_count; ++i) {
|
||||
@ -72,9 +71,8 @@ void TestElementwiseBool(const TfLiteRegistration& registration,
|
||||
constexpr int input_size = 1;
|
||||
constexpr int output_size = 1;
|
||||
constexpr int tensors_size = input_size + output_size;
|
||||
TfLiteTensor tensors[tensors_size] = {
|
||||
CreateBoolTensor(input_data, input_dims),
|
||||
CreateBoolTensor(output_data, output_dims)};
|
||||
TfLiteTensor tensors[tensors_size] = {CreateTensor(input_data, input_dims),
|
||||
CreateTensor(output_data, output_dims)};
|
||||
|
||||
// Place false in the uninitialized output buffer.
|
||||
for (int i = 0; i < output_dims_count; ++i) {
|
||||
|
@ -34,8 +34,8 @@ void TestFloor(const int* input_dims_data, const float* input_data,
|
||||
constexpr int outputs_size = 1;
|
||||
constexpr int tensors_size = inputs_size + outputs_size;
|
||||
TfLiteTensor tensors[tensors_size] = {
|
||||
CreateFloatTensor(input_data, input_dims),
|
||||
CreateFloatTensor(output_data, output_dims),
|
||||
CreateTensor(input_data, input_dims),
|
||||
CreateTensor(output_data, output_dims),
|
||||
};
|
||||
|
||||
int inputs_array_data[] = {1, 0};
|
||||
|
@ -276,10 +276,10 @@ TfLiteStatus TestFullyConnectedFloat(
|
||||
constexpr int outputs_size = 1;
|
||||
constexpr int tensors_size = inputs_size + outputs_size;
|
||||
TfLiteTensor tensors[tensors_size] = {
|
||||
CreateFloatTensor(input_data, input_dims),
|
||||
CreateFloatTensor(weights_data, weights_dims),
|
||||
CreateFloatTensor(bias_data, bias_dims),
|
||||
CreateFloatTensor(output_data, output_dims),
|
||||
CreateTensor(input_data, input_dims),
|
||||
CreateTensor(weights_data, weights_dims),
|
||||
CreateTensor(bias_data, bias_dims),
|
||||
CreateTensor(output_data, output_dims),
|
||||
};
|
||||
|
||||
return ValidateFullyConnectedGoldens(tensors, tensors_size, activation, 1e-4f,
|
||||
@ -317,8 +317,8 @@ TfLiteStatus TestFullyConnectedQuantized(
|
||||
output_zero_point),
|
||||
};
|
||||
|
||||
AsymmetricQuantize(golden, golden_quantized, output_dims_count, output_scale,
|
||||
output_zero_point);
|
||||
Quantize(golden, golden_quantized, output_dims_count, output_scale,
|
||||
output_zero_point);
|
||||
|
||||
return ValidateFullyConnectedGoldens(tensors, tensors_size, activation, 0.0f,
|
||||
output_dims_count, golden_quantized,
|
||||
|
@ -114,8 +114,8 @@ void TestHardSwishQuantized(int size, const T* output_data,
|
||||
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.InitAndPrepare());
|
||||
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.Invoke());
|
||||
|
||||
AsymmetricDequantize<T>(output_data, output_elements_count, output_scale,
|
||||
output_zero_point, dequantized_output);
|
||||
Dequantize<T>(output_data, output_elements_count, output_scale,
|
||||
output_zero_point, dequantized_output);
|
||||
|
||||
for (int i = 0; i < output_elements_count; ++i) {
|
||||
TF_LITE_MICRO_EXPECT_NEAR(float_ref_output_values[i], dequantized_output[i],
|
||||
@ -194,8 +194,8 @@ void TestHardSwishQuantizedBias(const int size, const T* output_data,
|
||||
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.InitAndPrepare());
|
||||
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.Invoke());
|
||||
|
||||
AsymmetricDequantize<T>(output_data, output_elements_count, output_scale,
|
||||
output_zero_point, dequantized_output);
|
||||
Dequantize<T>(output_data, output_elements_count, output_scale,
|
||||
output_zero_point, dequantized_output);
|
||||
|
||||
float sum_diff = 0;
|
||||
for (int i = 0; i < size; i++) {
|
||||
@ -229,8 +229,8 @@ void TestHardSwishFloat(const int size, float* output_data,
|
||||
constexpr int outputs_size = 1;
|
||||
constexpr int tensors_size = inputs_size + outputs_size;
|
||||
TfLiteTensor tensors[tensors_size] = {
|
||||
CreateFloatTensor(float_input_values, input_dims),
|
||||
CreateFloatTensor(output_data, output_dims),
|
||||
CreateTensor(float_input_values, input_dims),
|
||||
CreateTensor(output_data, output_dims),
|
||||
};
|
||||
|
||||
int inputs_array_data[] = {1, 0};
|
||||
|
@ -32,7 +32,7 @@ constexpr float kOutputMax = 127.0 / 128.0;
|
||||
|
||||
TfLiteTensor CreateL2NormTensor(const float* data, TfLiteIntArray* dims,
|
||||
bool is_input) {
|
||||
return CreateFloatTensor(data, dims);
|
||||
return CreateTensor(data, dims);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
|
@ -38,9 +38,9 @@ void TestLogicalOp(const TfLiteRegistration& registration,
|
||||
constexpr int outputs_size = 1;
|
||||
constexpr int tensors_size = inputs_size + outputs_size;
|
||||
TfLiteTensor tensors[tensors_size] = {
|
||||
CreateBoolTensor(input1_data, input1_dims),
|
||||
CreateBoolTensor(input2_data, input2_dims),
|
||||
CreateBoolTensor(output_data, output_dims),
|
||||
CreateTensor(input1_data, input1_dims),
|
||||
CreateTensor(input2_data, input2_dims),
|
||||
CreateTensor(output_data, output_dims),
|
||||
};
|
||||
|
||||
int inputs_array_data[] = {2, 0, 1};
|
||||
|
@ -79,8 +79,8 @@ void TestLogisticFloat(const int* input_dims_data, const float* input_data,
|
||||
constexpr int outputs_size = 1;
|
||||
constexpr int tensors_size = inputs_size + outputs_size;
|
||||
TfLiteTensor tensors[tensors_size] = {
|
||||
CreateFloatTensor(input_data, input_dims),
|
||||
CreateFloatTensor(output_data, output_dims),
|
||||
CreateTensor(input_data, input_dims),
|
||||
CreateTensor(output_data, output_dims),
|
||||
};
|
||||
|
||||
ValidateLogisticGoldens(tensors, tensors_size, output_data, golden,
|
||||
@ -108,8 +108,8 @@ void TestLogisticQuantized(const int* input_dims_data, const float* input_data,
|
||||
output_zero_point),
|
||||
};
|
||||
|
||||
tflite::AsymmetricQuantize(golden, golden_quantized, output_elements_count,
|
||||
output_scale, output_zero_point);
|
||||
tflite::Quantize(golden, golden_quantized, output_elements_count,
|
||||
output_scale, output_zero_point);
|
||||
ValidateLogisticGoldens(tensors, tensors_size, output_data, golden_quantized,
|
||||
output_elements_count, 1.0);
|
||||
}
|
||||
|
@ -38,9 +38,9 @@ void TestMaxMinFloat(const TfLiteRegistration& registration,
|
||||
constexpr int outputs_size = 1;
|
||||
constexpr int tensors_size = inputs_size + outputs_size;
|
||||
TfLiteTensor tensors[tensors_size] = {
|
||||
CreateFloatTensor(input1_data, input1_dims),
|
||||
CreateFloatTensor(input2_data, input2_dims),
|
||||
CreateFloatTensor(output_data, output_dims),
|
||||
CreateTensor(input1_data, input1_dims),
|
||||
CreateTensor(input2_data, input2_dims),
|
||||
CreateTensor(output_data, output_dims),
|
||||
};
|
||||
|
||||
int inputs_array_data[] = {2, 0, 1};
|
||||
@ -118,9 +118,9 @@ void TestMaxMinQuantizedInt32(
|
||||
constexpr int outputs_size = 1;
|
||||
constexpr int tensors_size = inputs_size + outputs_size;
|
||||
TfLiteTensor tensors[tensors_size] = {
|
||||
CreateInt32Tensor(input1_data, input1_dims),
|
||||
CreateInt32Tensor(input2_data, input2_dims),
|
||||
CreateInt32Tensor(output_data, output_dims),
|
||||
CreateTensor(input1_data, input1_dims),
|
||||
CreateTensor(input2_data, input2_dims),
|
||||
CreateTensor(output_data, output_dims),
|
||||
};
|
||||
|
||||
int inputs_array_data[] = {2, 0, 1};
|
||||
|
@ -80,9 +80,9 @@ void TestMulFloat(const int* input1_dims_data, const float* input1_data,
|
||||
constexpr int outputs_size = 1;
|
||||
constexpr int tensors_size = inputs_size + outputs_size;
|
||||
TfLiteTensor tensors[tensors_size] = {
|
||||
CreateFloatTensor(input1_data, input1_dims),
|
||||
CreateFloatTensor(input2_data, input2_dims),
|
||||
CreateFloatTensor(output_data, output_dims),
|
||||
CreateTensor(input1_data, input1_dims),
|
||||
CreateTensor(input2_data, input2_dims),
|
||||
CreateTensor(output_data, output_dims),
|
||||
};
|
||||
|
||||
ValidateMulGoldens(tensors, tensors_size, activation, golden,
|
||||
@ -114,8 +114,8 @@ void TestMulQuantized(const int* input1_dims_data, const float* input1_data,
|
||||
CreateQuantizedTensor(output_data, output_dims, output_scale,
|
||||
output_zero_point)};
|
||||
|
||||
AsymmetricQuantize(golden, golden_quantized, output_dims_count, output_scale,
|
||||
output_zero_point);
|
||||
Quantize(golden, golden_quantized, output_dims_count, output_scale,
|
||||
output_zero_point);
|
||||
|
||||
ValidateMulGoldens(tensors, tensors_size, activation, golden_quantized,
|
||||
output_dims_count, 1.0f, output_data);
|
||||
|
@ -34,8 +34,8 @@ void TestNegFloat(const int* input_dims_data, const float* input_data,
|
||||
constexpr int outputs_size = 1;
|
||||
constexpr int tensors_size = inputs_size + outputs_size;
|
||||
TfLiteTensor tensors[tensors_size] = {
|
||||
CreateFloatTensor(input_data, input_dims),
|
||||
CreateFloatTensor(output_data, output_dims),
|
||||
CreateTensor(input_data, input_dims),
|
||||
CreateTensor(output_data, output_dims),
|
||||
};
|
||||
|
||||
int inputs_array_data[] = {1, 0};
|
||||
|
@ -61,10 +61,9 @@ void TestPackTwoInputsFloat(const int* input1_dims_data,
|
||||
constexpr int input_size = 2;
|
||||
constexpr int output_size = 1;
|
||||
constexpr int tensors_size = input_size + output_size;
|
||||
TfLiteTensor tensors[tensors_size] = {
|
||||
CreateFloatTensor(input1_data, input1_dims),
|
||||
CreateFloatTensor(input2_data, input2_dims),
|
||||
CreateFloatTensor(output_data, output_dims)};
|
||||
TfLiteTensor tensors[tensors_size] = {CreateTensor(input1_data, input1_dims),
|
||||
CreateTensor(input2_data, input2_dims),
|
||||
CreateTensor(output_data, output_dims)};
|
||||
|
||||
TfLitePackParams builtin_data = {
|
||||
.values_count = 2,
|
||||
@ -95,11 +94,10 @@ void TestPackThreeInputsFloat(
|
||||
constexpr int input_size = 3;
|
||||
constexpr int output_size = 1;
|
||||
constexpr int tensors_size = input_size + output_size;
|
||||
TfLiteTensor tensors[tensors_size] = {
|
||||
CreateFloatTensor(input1_data, input1_dims),
|
||||
CreateFloatTensor(input2_data, input2_dims),
|
||||
CreateFloatTensor(input3_data, input3_dims),
|
||||
CreateFloatTensor(output_data, output_dims)};
|
||||
TfLiteTensor tensors[tensors_size] = {CreateTensor(input1_data, input1_dims),
|
||||
CreateTensor(input2_data, input2_dims),
|
||||
CreateTensor(input3_data, input3_dims),
|
||||
CreateTensor(output_data, output_dims)};
|
||||
|
||||
TfLitePackParams builtin_data = {
|
||||
.values_count = 3,
|
||||
@ -167,10 +165,9 @@ void TestPackTwoInputsQuantized32(const int* input1_dims_data,
|
||||
constexpr int input_size = 2;
|
||||
constexpr int output_size = 1;
|
||||
constexpr int tensors_size = input_size + output_size;
|
||||
TfLiteTensor tensors[tensors_size] = {
|
||||
CreateInt32Tensor(input1_data, input1_dims),
|
||||
CreateInt32Tensor(input2_data, input2_dims),
|
||||
CreateInt32Tensor(output_data, output_dims)};
|
||||
TfLiteTensor tensors[tensors_size] = {CreateTensor(input1_data, input1_dims),
|
||||
CreateTensor(input2_data, input2_dims),
|
||||
CreateTensor(output_data, output_dims)};
|
||||
|
||||
TfLitePackParams builtin_data = {
|
||||
.values_count = 2,
|
||||
|
@ -101,10 +101,9 @@ void TestPadFloat(const int* input_dims_data, const float* input_data,
|
||||
constexpr int inputs_size = 2;
|
||||
constexpr int outputs_size = 1;
|
||||
constexpr int tensors_size = inputs_size + outputs_size;
|
||||
TfLiteTensor tensors[tensors_size] = {
|
||||
CreateFloatTensor(input_data, input_dims),
|
||||
CreateInt32Tensor(pad_data, pad_dims),
|
||||
CreateFloatTensor(output_data, output_dims)};
|
||||
TfLiteTensor tensors[tensors_size] = {CreateTensor(input_data, input_dims),
|
||||
CreateTensor(pad_data, pad_dims),
|
||||
CreateTensor(output_data, output_dims)};
|
||||
|
||||
// Pad tensor must be constant.
|
||||
tensors[1].allocation_type = kTfLiteMmapRo;
|
||||
@ -130,10 +129,9 @@ void TestPadV2Float(const int* input_dims_data, const float* input_data,
|
||||
constexpr int outputs_size = 1;
|
||||
constexpr int tensors_size = inputs_size + outputs_size;
|
||||
TfLiteTensor tensors[tensors_size] = {
|
||||
CreateFloatTensor(input_data, input_dims),
|
||||
CreateInt32Tensor(pad_data, pad_dims),
|
||||
CreateFloatTensor(&pad_value, pad_value_dims),
|
||||
CreateFloatTensor(output_data, output_dims)};
|
||||
CreateTensor(input_data, input_dims), CreateTensor(pad_data, pad_dims),
|
||||
CreateTensor(&pad_value, pad_value_dims),
|
||||
CreateTensor(output_data, output_dims)};
|
||||
|
||||
// Pad tensor must be constant.
|
||||
tensors[1].allocation_type = kTfLiteMmapRo;
|
||||
@ -161,15 +159,15 @@ void TestPadQuantized(const int* input_dims_data, const float* input_data,
|
||||
TfLiteTensor tensors[tensors_size] = {
|
||||
CreateQuantizedTensor(input_data, input_quantized, input_dims,
|
||||
input_scale, input_zero_point),
|
||||
CreateInt32Tensor(pad_data, pad_dims),
|
||||
CreateTensor(pad_data, pad_dims),
|
||||
CreateQuantizedTensor(output_data, output_dims, output_scale,
|
||||
output_zero_point)};
|
||||
|
||||
// Pad tensor must be constant.
|
||||
tensors[1].allocation_type = kTfLiteMmapRo;
|
||||
|
||||
tflite::AsymmetricQuantize(golden, golden_quantized, output_dims_count,
|
||||
output_scale, output_zero_point);
|
||||
tflite::Quantize(golden, golden_quantized, output_dims_count, output_scale,
|
||||
output_zero_point);
|
||||
TF_LITE_MICRO_EXPECT_EQ(
|
||||
expected_status,
|
||||
ValidatePadGoldens(tensors, tensors_size, golden_quantized, output_data,
|
||||
@ -200,7 +198,7 @@ void TestPadV2Quantized(const int* input_dims_data, const float* input_data,
|
||||
TfLiteTensor tensors[tensors_size] = {
|
||||
CreateQuantizedTensor(input_data, input_quantized, input_dims,
|
||||
input_scale, input_zero_point),
|
||||
CreateInt32Tensor(pad_data, pad_dims),
|
||||
CreateTensor(pad_data, pad_dims),
|
||||
CreateQuantizedTensor(&pad_value, &pad_value_quantized, pad_value_dims,
|
||||
pad_value_scale, pad_value_zero_point),
|
||||
CreateQuantizedTensor(output_data, output_dims, output_scale,
|
||||
@ -211,8 +209,8 @@ void TestPadV2Quantized(const int* input_dims_data, const float* input_data,
|
||||
tensors[2].params.scale = pad_value_scale;
|
||||
tensors[3].params.scale = output_scale;
|
||||
|
||||
tflite::AsymmetricQuantize(golden, golden_quantized, output_dims_count,
|
||||
output_scale, output_zero_point);
|
||||
tflite::Quantize(golden, golden_quantized, output_dims_count, output_scale,
|
||||
output_zero_point);
|
||||
TF_LITE_MICRO_EXPECT_EQ(
|
||||
expected_status,
|
||||
ValidatePadV2Goldens(tensors, tensors_size, golden_quantized, output_data,
|
||||
|
@ -73,8 +73,8 @@ void TestAveragePoolFloat(const int* input_dims_data, const float* input_data,
|
||||
constexpr int outputs_size = 1;
|
||||
constexpr int tensors_size = inputs_size + outputs_size;
|
||||
TfLiteTensor tensors[tensors_size] = {
|
||||
CreateFloatTensor(input_data, input_dims),
|
||||
CreateFloatTensor(output_data, output_dims),
|
||||
CreateTensor(input_data, input_dims),
|
||||
CreateTensor(output_data, output_dims),
|
||||
};
|
||||
|
||||
const TfLiteRegistration registration =
|
||||
@ -131,8 +131,8 @@ void TestMaxPoolFloat(const int* input_dims_data, const float* input_data,
|
||||
constexpr int outputs_size = 1;
|
||||
constexpr int tensors_size = inputs_size + outputs_size;
|
||||
TfLiteTensor tensors[tensors_size] = {
|
||||
CreateFloatTensor(input_data, input_dims),
|
||||
CreateFloatTensor(output_data, output_dims),
|
||||
CreateTensor(input_data, input_dims),
|
||||
CreateTensor(output_data, output_dims),
|
||||
};
|
||||
|
||||
const TfLiteRegistration registration =
|
||||
|
@ -57,9 +57,9 @@ void TestPreluFloat(const int* input_dims_data, const float* input_data,
|
||||
constexpr int outputs_size = 1;
|
||||
constexpr int tensors_size = inputs_size + outputs_size;
|
||||
TfLiteTensor tensors[tensors_size] = {
|
||||
CreateFloatTensor(input_data, input_dims),
|
||||
CreateFloatTensor(alpha_data, alpha_dims),
|
||||
CreateFloatTensor(output_data, output_dims),
|
||||
CreateTensor(input_data, input_dims),
|
||||
CreateTensor(alpha_data, alpha_dims),
|
||||
CreateTensor(output_data, output_dims),
|
||||
};
|
||||
|
||||
ValidatePreluGoldens(tensors, tensors_size, expected_output_data,
|
||||
@ -93,8 +93,8 @@ void TestPreluQuantized(const int* input_dims_data, const float* input_data,
|
||||
output_zero_point),
|
||||
};
|
||||
|
||||
AsymmetricQuantize(golden, golden_quantized, output_dims_count, output_scale,
|
||||
output_zero_point);
|
||||
Quantize(golden, golden_quantized, output_dims_count, output_scale,
|
||||
output_zero_point);
|
||||
|
||||
ValidatePreluGoldens(tensors, tensors_size, golden_quantized,
|
||||
output_dims_count, output_data);
|
||||
|
@ -43,7 +43,7 @@ void ValidateQuantizeGoldens(TfLiteTensor* tensors, int tensors_size,
|
||||
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.Invoke());
|
||||
|
||||
// Use reference quantization from test utils to compare against op output.
|
||||
AsymmetricQuantize(golden, golden_quantized, output_len, scale, zero_point);
|
||||
Quantize(golden, golden_quantized, output_len, scale, zero_point);
|
||||
for (int i = 0; i < output_len; ++i) {
|
||||
TF_LITE_MICRO_EXPECT_EQ(golden_quantized[i], output_data[i]);
|
||||
}
|
||||
@ -71,7 +71,7 @@ void TestQuantizeFloat(const int* input_dims_data, const float* input_data,
|
||||
// 1 input, 1 output.
|
||||
constexpr int tensors_size = 2;
|
||||
TfLiteTensor tensors[tensors_size] = {
|
||||
CreateFloatTensor(input_data, input_dims),
|
||||
CreateTensor(input_data, input_dims),
|
||||
output_tensor,
|
||||
};
|
||||
|
||||
|
@ -106,9 +106,9 @@ void TestMeanFloatInput4D(const int* input_dims_data, const float* input_data,
|
||||
|
||||
constexpr int tensors_size = num_of_inputs + num_of_outputs;
|
||||
TfLiteTensor tensors[tensors_size] = {
|
||||
CreateFloatTensor(input_data, input_dims),
|
||||
CreateInt32Tensor(axis_data, axis_dims),
|
||||
CreateFloatTensor(output_data, output_dims),
|
||||
CreateTensor(input_data, input_dims),
|
||||
CreateTensor(axis_data, axis_dims),
|
||||
CreateTensor(output_data, output_dims),
|
||||
};
|
||||
|
||||
TF_LITE_MICRO_EXPECT_EQ(
|
||||
@ -133,9 +133,9 @@ void TestReduceOpFloat(const int* input_dims_data, const float* input_data,
|
||||
|
||||
constexpr int tensors_size = num_of_inputs + num_of_outputs;
|
||||
TfLiteTensor tensors[tensors_size] = {
|
||||
CreateFloatTensor(input_data, input_dims),
|
||||
CreateInt32Tensor(axis_data, axis_dims),
|
||||
CreateFloatTensor(output_data, output_dims),
|
||||
CreateTensor(input_data, input_dims),
|
||||
CreateTensor(axis_data, axis_dims),
|
||||
CreateTensor(output_data, output_dims),
|
||||
};
|
||||
|
||||
TF_LITE_MICRO_EXPECT_EQ(
|
||||
@ -165,15 +165,14 @@ void TestReduceOpQuantized(
|
||||
TfLiteTensor tensors[] = {
|
||||
CreateQuantizedTensor(input_data, input_data_quant, input_dims,
|
||||
input_scale, input_zero_point),
|
||||
CreateInt32Tensor(axis_data, axis_dims),
|
||||
CreateTensor(axis_data, axis_dims),
|
||||
CreateQuantizedTensor(output_data_quant, output_dims, output_scale,
|
||||
output_zero_point),
|
||||
};
|
||||
|
||||
// Quantize expected output
|
||||
tflite::AsymmetricQuantize(expected_output_data, expected_output_data_quant,
|
||||
output_dims_count, output_scale,
|
||||
output_zero_point);
|
||||
tflite::Quantize(expected_output_data, expected_output_data_quant,
|
||||
output_dims_count, output_scale, output_zero_point);
|
||||
|
||||
TF_LITE_MICRO_EXPECT_EQ(
|
||||
kTfLiteOk,
|
||||
@ -204,15 +203,14 @@ void TestMeanOpQuantized(const int* input_dims_data, const float* input_data,
|
||||
TfLiteTensor tensors[] = {
|
||||
CreateQuantizedTensor(input_data, input_data_quant, input_dims,
|
||||
input_scale, input_zero_point),
|
||||
CreateInt32Tensor(axis_data, axis_dims),
|
||||
CreateTensor(axis_data, axis_dims),
|
||||
CreateQuantizedTensor(output_data_quant, output_dims, output_scale,
|
||||
output_zero_point),
|
||||
};
|
||||
|
||||
// Quantize expected output
|
||||
tflite::AsymmetricQuantize(expected_output_data, expected_output_data_quant,
|
||||
output_dims_count, output_scale,
|
||||
output_zero_point);
|
||||
tflite::Quantize(expected_output_data, expected_output_data_quant,
|
||||
output_dims_count, output_scale, output_zero_point);
|
||||
|
||||
TF_LITE_MICRO_EXPECT_EQ(
|
||||
kTfLiteOk,
|
||||
|
@ -121,9 +121,9 @@ void TestReshape(const int* input_dims_data, const float* input_data,
|
||||
TfLiteIntArray* input_dims = IntArrayFromInts(input_dims_data);
|
||||
TfLiteIntArray* shape_dims = IntArrayFromInts(shape_dims_data);
|
||||
TfLiteIntArray* output_dims = IntArrayFromInts(output_dims_data);
|
||||
TfLiteTensor input_tensor = CreateFloatTensor(input_data, input_dims);
|
||||
TfLiteTensor shape_tensor = CreateInt32Tensor(shape_data, shape_dims);
|
||||
TfLiteTensor output_tensor = CreateFloatTensor(output_data, output_dims);
|
||||
TfLiteTensor input_tensor = CreateTensor(input_data, input_dims);
|
||||
TfLiteTensor shape_tensor = CreateTensor(shape_data, shape_dims);
|
||||
TfLiteTensor output_tensor = CreateTensor(output_data, output_dims);
|
||||
|
||||
TestReshapeWithShape(&input_tensor, &shape_tensor, &output_tensor,
|
||||
expected_output, expected_output_len, expected_dims,
|
||||
@ -144,7 +144,7 @@ void TestReshapeQuantized(const int* input_dims_data, const T* input_data,
|
||||
TfLiteIntArray* output_dims = IntArrayFromInts(output_dims_data);
|
||||
TfLiteTensor input_tensor = CreateQuantizedTensor(
|
||||
input_data, input_dims, /*scale=*/1.f, /*zero_point=*/0);
|
||||
TfLiteTensor shape_tensor = CreateInt32Tensor(shape_data, shape_dims);
|
||||
TfLiteTensor shape_tensor = CreateTensor(shape_data, shape_dims);
|
||||
TfLiteTensor output_tensor = CreateQuantizedTensor(
|
||||
output_data, output_dims, /*scale=*/1.f, /*zero_point=*/0);
|
||||
|
||||
@ -213,14 +213,12 @@ TF_LITE_MICRO_TEST(ReshapeWithInvalidShapeShouldFail) {
|
||||
TfLiteIntArray* input_dims =
|
||||
tflite::testing::IntArrayFromInts(input_dims_data);
|
||||
const float input_data[] = {3.0f};
|
||||
auto input_tensor =
|
||||
tflite::testing::CreateFloatTensor(input_data, input_dims);
|
||||
auto input_tensor = tflite::testing::CreateTensor(input_data, input_dims);
|
||||
float output_data[4];
|
||||
int output_dims_data[6] = {2, 2, 1, 2, 2, 1};
|
||||
TfLiteIntArray* output_dims =
|
||||
tflite::testing::IntArrayFromInts(output_dims_data);
|
||||
auto output_tensor =
|
||||
tflite::testing::CreateFloatTensor(output_data, output_dims);
|
||||
auto output_tensor = tflite::testing::CreateTensor(output_data, output_dims);
|
||||
const int expected_output[] = {};
|
||||
const int expected_output_len = 0;
|
||||
const int expected_dims[] = {};
|
||||
@ -328,25 +326,24 @@ TF_LITE_MICRO_TEST(ReshapeWithScalarOutputShouldSucceed) {
|
||||
// Some old models specify '[0]' as the new shape, indicating that both input
|
||||
// and output are scalars.
|
||||
TF_LITE_MICRO_TEST(ReshapeWithLegacyScalarOutputShouldSucceed) {
|
||||
using tflite::testing::CreateFloatTensor;
|
||||
using tflite::testing::CreateTensor;
|
||||
using tflite::testing::IntArrayFromInts;
|
||||
|
||||
int input_dims_data[] = {1, 1};
|
||||
TfLiteIntArray* input_dims = IntArrayFromInts(input_dims_data);
|
||||
const float input_data[] = {3.0f};
|
||||
auto input_tensor = CreateFloatTensor(input_data, input_dims);
|
||||
auto input_tensor = CreateTensor(input_data, input_dims);
|
||||
|
||||
float output_data[1];
|
||||
int output_dims_data[2] = {1, 0};
|
||||
TfLiteIntArray* output_dims = IntArrayFromInts(output_dims_data);
|
||||
auto output_tensor = CreateFloatTensor(output_data, output_dims);
|
||||
auto output_tensor = CreateTensor(output_data, output_dims);
|
||||
|
||||
int shape_dims_data[] = {1, 0};
|
||||
TfLiteIntArray* shape_dims = IntArrayFromInts(shape_dims_data);
|
||||
|
||||
const int32_t shape_data[] = {0};
|
||||
auto shape_tensor =
|
||||
tflite::testing::CreateInt32Tensor(shape_data, shape_dims);
|
||||
auto shape_tensor = tflite::testing::CreateTensor(shape_data, shape_dims);
|
||||
const float expected_output_with_shape[] = {};
|
||||
const int expected_output_with_shape_len = 0;
|
||||
const float expected_output_no_shape[] = {3};
|
||||
|
@ -27,7 +27,7 @@ using uint8_t = std::uint8_t;
|
||||
using int32_t = std::int32_t;
|
||||
|
||||
TfLiteTensor TestCreateTensor(const float* data, TfLiteIntArray* dims) {
|
||||
return CreateFloatTensor(data, dims);
|
||||
return CreateTensor(data, dims);
|
||||
}
|
||||
|
||||
TfLiteTensor TestCreateTensor(const uint8_t* data, TfLiteIntArray* dims) {
|
||||
@ -59,7 +59,7 @@ void TestResizeNearestNeighbor(const int* input_dims_data, const T* input_data,
|
||||
constexpr int tensors_size = 3;
|
||||
TfLiteTensor tensors[tensors_size] = {
|
||||
TestCreateTensor(input_data, input_dims),
|
||||
CreateInt32Tensor(expected_size_data, expected_size_dims),
|
||||
CreateTensor(expected_size_data, expected_size_dims),
|
||||
TestCreateTensor(output_data, output_dims),
|
||||
};
|
||||
|
||||
|
@ -33,8 +33,8 @@ void TestRound(const int* input_dims_data, const float* input_data,
|
||||
constexpr int outputs_size = 1;
|
||||
constexpr int tensors_size = inputs_size + outputs_size;
|
||||
TfLiteTensor tensors[tensors_size] = {
|
||||
CreateFloatTensor(input_data, input_dims),
|
||||
CreateFloatTensor(output_data, output_dims),
|
||||
CreateTensor(input_data, input_dims),
|
||||
CreateTensor(output_data, output_dims),
|
||||
};
|
||||
|
||||
int inputs_array_data[] = {1, 0};
|
||||
|
@ -55,8 +55,8 @@ void TestShape(const int* input_dims_data, const float* input_data,
|
||||
constexpr int outputs_size = 1;
|
||||
constexpr int tensors_size = inputs_size + outputs_size;
|
||||
TfLiteTensor tensors[tensors_size] = {
|
||||
CreateFloatTensor(input_data, input_dims),
|
||||
CreateInt32Tensor(output_data, output_dims, true),
|
||||
CreateTensor(input_data, input_dims),
|
||||
CreateTensor(output_data, output_dims, true),
|
||||
};
|
||||
|
||||
ValidateShape(tensors, tensors_size, output_data, expected_output_data,
|
||||
|
@ -281,8 +281,8 @@ void TestSoftmaxFloat(const int* input_dims_data, const float* input_data,
|
||||
constexpr int outputs_size = 1;
|
||||
constexpr int tensors_size = inputs_size + outputs_size;
|
||||
TfLiteTensor tensors[tensors_size] = {
|
||||
CreateFloatTensor(input_data, input_dims),
|
||||
CreateFloatTensor(output_data, output_dims),
|
||||
CreateTensor(input_data, input_dims),
|
||||
CreateTensor(output_data, output_dims),
|
||||
};
|
||||
|
||||
ValidateSoftmaxGoldens(tensors, tensors_size, output_data,
|
||||
@ -310,8 +310,8 @@ void TestSoftmaxQuantized(const int* input_dims_data, const float* input_data,
|
||||
output_zero_point),
|
||||
};
|
||||
|
||||
AsymmetricQuantize(golden, golden_quantized, output_dims_count, output_scale,
|
||||
output_zero_point);
|
||||
Quantize(golden, golden_quantized, output_dims_count, output_scale,
|
||||
output_zero_point);
|
||||
|
||||
ValidateSoftmaxGoldens(tensors, tensors_size, output_data, golden_quantized,
|
||||
output_dims_count, tolerance);
|
||||
|
@ -42,10 +42,9 @@ void TestSplitTwoOutputsFloat(
|
||||
constexpr int axis_size = 1;
|
||||
constexpr int tensors_size = input_size + output_size + axis_size;
|
||||
TfLiteTensor tensors[tensors_size] = {
|
||||
CreateInt32Tensor(axis_data, axis_dims),
|
||||
CreateFloatTensor(input_data, input_dims),
|
||||
CreateFloatTensor(output1_data, output1_dims),
|
||||
CreateFloatTensor(output2_data, output2_dims)};
|
||||
CreateTensor(axis_data, axis_dims), CreateTensor(input_data, input_dims),
|
||||
CreateTensor(output1_data, output1_dims),
|
||||
CreateTensor(output2_data, output2_dims)};
|
||||
|
||||
// Currently only support constant axis tensor.
|
||||
tensors[0].allocation_type = kTfLiteMmapRo;
|
||||
@ -104,12 +103,12 @@ void TestSplitFourOutputsFloat(
|
||||
constexpr int axis_size = 1;
|
||||
constexpr int tensors_size = input_size + output_size + axis_size;
|
||||
TfLiteTensor tensors[tensors_size] = {
|
||||
CreateInt32Tensor(axis_data, axis_dims),
|
||||
CreateFloatTensor(input_data, input_dims),
|
||||
CreateFloatTensor(output1_data, output1_dims),
|
||||
CreateFloatTensor(output2_data, output2_dims),
|
||||
CreateFloatTensor(output3_data, output1_dims),
|
||||
CreateFloatTensor(output4_data, output1_dims)};
|
||||
CreateTensor(axis_data, axis_dims),
|
||||
CreateTensor(input_data, input_dims),
|
||||
CreateTensor(output1_data, output1_dims),
|
||||
CreateTensor(output2_data, output2_dims),
|
||||
CreateTensor(output3_data, output1_dims),
|
||||
CreateTensor(output4_data, output1_dims)};
|
||||
|
||||
// Currently only support constant axis tensor.
|
||||
tensors[0].allocation_type = kTfLiteMmapRo;
|
||||
@ -171,7 +170,7 @@ void TestSplitTwoOutputsQuantized(
|
||||
constexpr int axis_size = 1;
|
||||
constexpr int tensors_size = input_size + output_size + axis_size;
|
||||
TfLiteTensor tensors[tensors_size] = {
|
||||
CreateInt32Tensor(axis_data, axis_dims),
|
||||
CreateTensor(axis_data, axis_dims),
|
||||
CreateQuantizedTensor(input_data, input_dims, 0, 10),
|
||||
CreateQuantizedTensor(output1_data, output1_dims, 0, 10),
|
||||
CreateQuantizedTensor(output2_data, output2_dims, 0, 10)};
|
||||
@ -227,10 +226,9 @@ void TestSplitTwoOutputsQuantized32(
|
||||
constexpr int axis_size = 1;
|
||||
constexpr int tensors_size = input_size + output_size + axis_size;
|
||||
TfLiteTensor tensors[tensors_size] = {
|
||||
CreateInt32Tensor(axis_data, axis_dims),
|
||||
CreateInt32Tensor(input_data, input_dims),
|
||||
CreateInt32Tensor(output1_data, output1_dims),
|
||||
CreateInt32Tensor(output2_data, output2_dims)};
|
||||
CreateTensor(axis_data, axis_dims), CreateTensor(input_data, input_dims),
|
||||
CreateTensor(output1_data, output1_dims),
|
||||
CreateTensor(output2_data, output2_dims)};
|
||||
|
||||
// Currently only support constant axis tensor.
|
||||
tensors[0].allocation_type = kTfLiteMmapRo;
|
||||
|
@ -63,13 +63,13 @@ void TestSplitVFloat(const int* input_dims_data, const float* input_data,
|
||||
// then come outputs
|
||||
|
||||
TfLiteTensor tensors[tensors_size];
|
||||
tensors[0] = CreateFloatTensor(input_data, input_dims);
|
||||
tensors[1] = CreateInt32Tensor(split_data, split_dims);
|
||||
tensors[2] = CreateInt32Tensor(axis_data, axis_dims);
|
||||
tensors[0] = CreateTensor(input_data, input_dims);
|
||||
tensors[1] = CreateTensor(split_data, split_dims);
|
||||
tensors[2] = CreateTensor(axis_data, axis_dims);
|
||||
|
||||
// add output tensors
|
||||
for (int i = 0; i < N; i++)
|
||||
tensors[3 + i] = CreateFloatTensor(output_tensors.data[i], output_dims[i]);
|
||||
tensors[3 + i] = CreateTensor(output_tensors.data[i], output_dims[i]);
|
||||
|
||||
tensors[2].allocation_type = kTfLiteMmapRo;
|
||||
tensors[1].allocation_type = kTfLiteMmapRo;
|
||||
|
@ -74,11 +74,11 @@ void TestStridedSliceFloat(const int* input_shape, const int* begin_shape,
|
||||
constexpr int outputs_size = 1;
|
||||
constexpr int tensors_size = inputs_size + outputs_size;
|
||||
TfLiteTensor tensors[tensors_size] = {
|
||||
CreateFloatTensor(input_data, input_dims),
|
||||
CreateInt32Tensor(begin_data, begin_dims),
|
||||
CreateInt32Tensor(end_data, end_dims),
|
||||
CreateInt32Tensor(strides_data, strides_dims),
|
||||
CreateFloatTensor(output_data, output_dims),
|
||||
CreateTensor(input_data, input_dims),
|
||||
CreateTensor(begin_data, begin_dims),
|
||||
CreateTensor(end_data, end_dims),
|
||||
CreateTensor(strides_data, strides_dims),
|
||||
CreateTensor(output_data, output_dims),
|
||||
};
|
||||
|
||||
ValidateStridedSliceGoldens(tensors, tensors_size, expected_output,
|
||||
@ -106,9 +106,9 @@ void TestStridedSliceQuantized(
|
||||
std::numeric_limits<T>::max() + std::numeric_limits<T>::min() / 2;
|
||||
TfLiteTensor tensors[tensors_size] = {
|
||||
CreateQuantizedTensor(input_data, input_dims, 1.0, zero_point),
|
||||
CreateInt32Tensor(begin_data, begin_dims),
|
||||
CreateInt32Tensor(end_data, end_dims),
|
||||
CreateInt32Tensor(strides_data, strides_dims),
|
||||
CreateTensor(begin_data, begin_dims),
|
||||
CreateTensor(end_data, end_dims),
|
||||
CreateTensor(strides_data, strides_dims),
|
||||
CreateQuantizedTensor(output_data, output_dims, 1.0, zero_point),
|
||||
};
|
||||
|
||||
|
@ -99,9 +99,9 @@ void TestSubFloat(const int* input1_dims_data, const float* input1_data,
|
||||
constexpr int outputs_size = 1;
|
||||
constexpr int tensors_size = inputs_size + outputs_size;
|
||||
TfLiteTensor tensors[tensors_size] = {
|
||||
CreateFloatTensor(input1_data, input1_dims),
|
||||
CreateFloatTensor(input2_data, input2_dims),
|
||||
CreateFloatTensor(output_data, output_dims),
|
||||
CreateTensor(input1_data, input1_dims),
|
||||
CreateTensor(input2_data, input2_dims),
|
||||
CreateTensor(output_data, output_dims),
|
||||
};
|
||||
|
||||
ValidateSubGoldens(tensors, tensors_size, expected_output, output_data,
|
||||
@ -135,9 +135,8 @@ void TestSubQuantized(const int* input1_dims_data, const float* input1_data,
|
||||
tflite::testing::CreateQuantizedTensor(output_data, output_dims,
|
||||
output_scale, output_zero_point),
|
||||
};
|
||||
tflite::AsymmetricQuantize(golden, golden_quantized,
|
||||
ElementCount(*output_dims), output_scale,
|
||||
output_zero_point);
|
||||
tflite::Quantize(golden, golden_quantized, ElementCount(*output_dims),
|
||||
output_scale, output_zero_point);
|
||||
|
||||
ValidateSubGoldens(tensors, tensors_size, golden_quantized, output_data,
|
||||
ElementCount(*output_dims), activation);
|
||||
|
@ -565,13 +565,13 @@ void TestSVDF(const int batch_size, const int num_units, const int input_size,
|
||||
|
||||
const int tensor_count = 6; // 5 inputs, 1 output
|
||||
TfLiteTensor tensors[] = {
|
||||
CreateFloatTensor(input_data, input_dims),
|
||||
CreateFloatTensor(feature_weights_data, feature_weights_dims),
|
||||
CreateFloatTensor(time_weights_data, time_weights_dims),
|
||||
CreateFloatTensor(bias_data, bias_dims),
|
||||
CreateFloatTensor(activation_state_data, activation_state_dims,
|
||||
/*is_variable=*/true),
|
||||
CreateFloatTensor(output_data, output_dims),
|
||||
CreateTensor(input_data, input_dims),
|
||||
CreateTensor(feature_weights_data, feature_weights_dims),
|
||||
CreateTensor(time_weights_data, time_weights_dims),
|
||||
CreateTensor(bias_data, bias_dims),
|
||||
CreateTensor(activation_state_data, activation_state_dims,
|
||||
/*is_variable=*/true),
|
||||
CreateTensor(output_data, output_dims),
|
||||
};
|
||||
|
||||
ValidateSVDFGoldens(batch_size, num_units, input_size, rank, tensors,
|
||||
@ -640,12 +640,10 @@ inline void TestIntegerSVDF(
|
||||
CreateQuantizedTensor(output_data, output_dims, output_scale,
|
||||
output_zero_point)};
|
||||
|
||||
tflite::AsymmetricQuantize(golden_output, golden_output_quantized,
|
||||
golden_output_len, output_scale,
|
||||
output_zero_point);
|
||||
tflite::AsymmetricQuantize(input_sequences_data, input_sequences_quantized,
|
||||
input_sequences_len, input_scale,
|
||||
input_zero_point);
|
||||
tflite::Quantize(golden_output, golden_output_quantized, golden_output_len,
|
||||
output_scale, output_zero_point);
|
||||
tflite::Quantize(input_sequences_data, input_sequences_quantized,
|
||||
input_sequences_len, input_scale, input_zero_point);
|
||||
|
||||
ValidateSVDFGoldens(batch_size, num_units, input_size, rank, tensors,
|
||||
tensor_count, activation, input_sequences_quantized,
|
||||
|
@ -77,8 +77,8 @@ void TestTanhFloat(const int input_dims_data[], const float* input_data,
|
||||
constexpr int outputs_size = 1;
|
||||
constexpr int tensors_size = inputs_size + outputs_size;
|
||||
TfLiteTensor tensors[tensors_size] = {
|
||||
CreateFloatTensor(input_data, input_dims),
|
||||
CreateFloatTensor(output_data, output_dims),
|
||||
CreateTensor(input_data, input_dims),
|
||||
CreateTensor(output_data, output_dims),
|
||||
};
|
||||
|
||||
int inputs_array_data[] = {1, 0};
|
||||
@ -113,9 +113,8 @@ void TestTanhQuantized(const int input_dims_data[], const float* input_data,
|
||||
TfLiteIntArray* output_dims = IntArrayFromInts(output_dims_data);
|
||||
const int output_elements_count = ElementCount(*output_dims);
|
||||
|
||||
tflite::AsymmetricQuantize(expected_output_data, expected_output_quantized,
|
||||
output_elements_count, output_scale,
|
||||
output_zero_point);
|
||||
tflite::Quantize(expected_output_data, expected_output_quantized,
|
||||
output_elements_count, output_scale, output_zero_point);
|
||||
|
||||
constexpr int inputs_size = 1;
|
||||
constexpr int outputs_size = 1;
|
||||
|
@ -41,10 +41,10 @@ void TestUnpackThreeOutputsFloat(
|
||||
constexpr int output_size = 3;
|
||||
constexpr int tensors_size = input_size + output_size;
|
||||
TfLiteTensor tensors[tensors_size] = {
|
||||
CreateFloatTensor(input_data, input_dims),
|
||||
CreateFloatTensor(output1_data, output1_dims),
|
||||
CreateFloatTensor(output2_data, output2_dims),
|
||||
CreateFloatTensor(output3_data, output3_dims)};
|
||||
CreateTensor(input_data, input_dims),
|
||||
CreateTensor(output1_data, output1_dims),
|
||||
CreateTensor(output2_data, output2_dims),
|
||||
CreateTensor(output3_data, output3_dims)};
|
||||
|
||||
// Place a unique value in the uninitialized output buffer.
|
||||
for (int i = 0; i < output1_dims_count; ++i) {
|
||||
@ -102,9 +102,8 @@ void TestUnpackOneOutputFloat(const int* input_dims_data,
|
||||
constexpr int input_size = 1;
|
||||
constexpr int output_size = 1;
|
||||
constexpr int tensors_size = input_size + output_size;
|
||||
TfLiteTensor tensors[tensors_size] = {
|
||||
CreateFloatTensor(input_data, input_dims),
|
||||
CreateFloatTensor(output_data, output_dims)};
|
||||
TfLiteTensor tensors[tensors_size] = {CreateTensor(input_data, input_dims),
|
||||
CreateTensor(output_data, output_dims)};
|
||||
|
||||
// Place a unique value in the uninitialized output buffer.
|
||||
for (int i = 0; i < output_dims_count; ++i) {
|
||||
@ -222,10 +221,10 @@ void TestUnpackThreeOutputsQuantized32(
|
||||
constexpr int output_size = 3;
|
||||
constexpr int tensors_size = input_size + output_size;
|
||||
TfLiteTensor tensors[tensors_size] = {
|
||||
CreateInt32Tensor(input_data, input_dims),
|
||||
CreateInt32Tensor(output1_data, output1_dims),
|
||||
CreateInt32Tensor(output2_data, output2_dims),
|
||||
CreateInt32Tensor(output3_data, output3_dims)};
|
||||
CreateTensor(input_data, input_dims),
|
||||
CreateTensor(output1_data, output1_dims),
|
||||
CreateTensor(output2_data, output2_dims),
|
||||
CreateTensor(output3_data, output3_dims)};
|
||||
|
||||
// Place a unique value in the uninitialized output buffer.
|
||||
for (int i = 0; i < output1_dims_count; ++i) {
|
||||
|
@ -180,11 +180,11 @@ TF_LITE_MICRO_TEST(TestAllocateOutputDimensionsFromInput) {
|
||||
const int input1_dims[] = {1, 1};
|
||||
const int input2_dims[] = {kDimsLen, 5, 5, 5, 5};
|
||||
int output_dims[] = {0, 0, 0, 0, 0};
|
||||
TfLiteTensor input_tensor1 = tflite::testing::CreateInt32Tensor(
|
||||
TfLiteTensor input_tensor1 = tflite::testing::CreateTensor<int32_t>(
|
||||
nullptr, tflite::testing::IntArrayFromInts(input1_dims));
|
||||
TfLiteTensor input_tensor2 = tflite::testing::CreateInt32Tensor(
|
||||
TfLiteTensor input_tensor2 = tflite::testing::CreateTensor<int32_t>(
|
||||
nullptr, tflite::testing::IntArrayFromInts(input2_dims));
|
||||
TfLiteTensor output_tensor = tflite::testing::CreateInt32Tensor(
|
||||
TfLiteTensor output_tensor = tflite::testing::CreateTensor<int32_t>(
|
||||
nullptr, tflite::testing::IntArrayFromInts(output_dims));
|
||||
TfLiteContext context;
|
||||
// Only need to allocate space for output_tensor.dims. Use a simple
|
||||
|
@ -15,34 +15,15 @@ limitations under the License.
|
||||
|
||||
#include "tensorflow/lite/micro/micro_utils.h"
|
||||
|
||||
#include <limits.h>
|
||||
#include <math.h>
|
||||
#include <stdint.h>
|
||||
#include <cmath>
|
||||
#include <cstdint>
|
||||
#include <limits>
|
||||
|
||||
#include "tensorflow/lite/c/common.h"
|
||||
#include "tensorflow/lite/kernels/op_macros.h"
|
||||
|
||||
namespace tflite {
|
||||
|
||||
namespace {
|
||||
|
||||
static const uint8_t kAsymmetricUInt8Min = 0;
|
||||
static const uint8_t kAsymmetricUInt8Max = UINT8_MAX;
|
||||
static const uint8_t kSymmetricUInt8Min = 1;
|
||||
static const uint8_t kSymmetricUInt8Max = UINT8_MAX;
|
||||
static const int8_t kAsymmetricInt8Min = INT8_MIN;
|
||||
static const int8_t kAsymmetricInt8Max = INT8_MAX;
|
||||
static const int kSymmetricInt8Scale = kAsymmetricInt8Max;
|
||||
|
||||
static const int16_t kAsymmetricInt16Min = INT16_MIN;
|
||||
static const int16_t kAsymmetricInt16Max = INT16_MAX;
|
||||
static const int kSymmetricInt16Scale = kAsymmetricInt16Max;
|
||||
|
||||
static const int32_t kAsymmetricInt32Max = INT32_MAX;
|
||||
static const int kSymmetricInt32Scale = kAsymmetricInt32Max;
|
||||
|
||||
} // namespace
|
||||
|
||||
int ElementCount(const TfLiteIntArray& dims) {
|
||||
int result = 1;
|
||||
for (int i = 0; i < dims.size; ++i) {
|
||||
@ -51,109 +32,6 @@ int ElementCount(const TfLiteIntArray& dims) {
|
||||
return result;
|
||||
}
|
||||
|
||||
// Converts a float value into an unsigned eight-bit quantized value.
|
||||
uint8_t FloatToAsymmetricQuantizedUInt8(const float value, const float scale,
|
||||
const int zero_point) {
|
||||
int32_t result = round(value / scale) + zero_point;
|
||||
if (result < kAsymmetricUInt8Min) {
|
||||
result = kAsymmetricUInt8Min;
|
||||
}
|
||||
if (result > kAsymmetricUInt8Max) {
|
||||
result = kAsymmetricUInt8Max;
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
uint8_t FloatToSymmetricQuantizedUInt8(const float value, const float scale) {
|
||||
int32_t result = round(value / scale);
|
||||
if (result < kSymmetricUInt8Min) {
|
||||
result = kSymmetricUInt8Min;
|
||||
}
|
||||
if (result > kSymmetricUInt8Max) {
|
||||
result = kSymmetricUInt8Max;
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
int8_t FloatToAsymmetricQuantizedInt8(const float value, const float scale,
|
||||
const int zero_point) {
|
||||
int32_t result = round(value / scale) + zero_point;
|
||||
if (result < kAsymmetricInt8Min) {
|
||||
result = kAsymmetricInt8Min;
|
||||
}
|
||||
if (result > kAsymmetricInt8Max) {
|
||||
result = kAsymmetricInt8Max;
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
int16_t FloatToAsymmetricQuantizedInt16(const float value, const float scale,
|
||||
const int zero_point) {
|
||||
int32_t result = round(value / scale) + zero_point;
|
||||
if (result < kAsymmetricInt16Min) {
|
||||
result = kAsymmetricInt16Min;
|
||||
}
|
||||
if (result > kAsymmetricInt16Max) {
|
||||
result = kAsymmetricInt16Max;
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
int8_t FloatToSymmetricQuantizedInt8(const float value, const float scale) {
|
||||
return FloatToAsymmetricQuantizedInt8(value, scale, 0.0f);
|
||||
}
|
||||
|
||||
int32_t FloatToSymmetricQuantizedInt32(const float value, const float scale) {
|
||||
float quantized = round(value / scale);
|
||||
if (static_cast<int>(quantized) > INT_MAX) {
|
||||
quantized = static_cast<float>(INT_MAX);
|
||||
} else if (quantized < INT_MIN) {
|
||||
quantized = static_cast<float> INT_MIN;
|
||||
}
|
||||
|
||||
return static_cast<int>(quantized);
|
||||
}
|
||||
|
||||
void AsymmetricQuantize(const float* input, int8_t* output, int num_elements,
|
||||
float scale, int zero_point) {
|
||||
for (int i = 0; i < num_elements; i++) {
|
||||
output[i] = FloatToAsymmetricQuantizedInt8(input[i], scale, zero_point);
|
||||
}
|
||||
}
|
||||
|
||||
void AsymmetricQuantize(const float* input, uint8_t* output, int num_elements,
|
||||
float scale, int zero_point) {
|
||||
for (int i = 0; i < num_elements; i++) {
|
||||
output[i] = FloatToAsymmetricQuantizedUInt8(input[i], scale, zero_point);
|
||||
}
|
||||
}
|
||||
|
||||
void AsymmetricQuantize(const float* input, int16_t* output, int num_elements,
|
||||
float scale, int zero_point) {
|
||||
for (int i = 0; i < num_elements; i++) {
|
||||
output[i] = FloatToAsymmetricQuantizedInt16(input[i], scale, zero_point);
|
||||
}
|
||||
}
|
||||
|
||||
void SymmetricQuantize(const float* input, int32_t* output, int num_elements,
|
||||
float scale) {
|
||||
for (int i = 0; i < num_elements; i++) {
|
||||
output[i] = FloatToSymmetricQuantizedInt32(input[i], scale);
|
||||
}
|
||||
}
|
||||
|
||||
void SymmetricPerChannelQuantize(const float* input, int32_t* output,
|
||||
int num_elements, int num_channels,
|
||||
float* scales) {
|
||||
int elements_per_channel = num_elements / num_channels;
|
||||
for (int i = 0; i < num_channels; i++) {
|
||||
for (int j = 0; j < elements_per_channel; j++) {
|
||||
output[i * elements_per_channel + j] = FloatToSymmetricQuantizedInt32(
|
||||
input[i * elements_per_channel + j], scales[i]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void SignedSymmetricPerChannelQuantize(const float* values,
|
||||
TfLiteIntArray* dims,
|
||||
int quantized_dimension,
|
||||
@ -186,94 +64,17 @@ void SignedSymmetricPerChannelQuantize(const float* values,
|
||||
max = fmaxf(max, values[idx]);
|
||||
}
|
||||
scaling_factors[channel] =
|
||||
fmaxf(fabs(min), fabs(max)) / kSymmetricInt8Scale;
|
||||
fmaxf(fabs(min), fabs(max)) / std::numeric_limits<int8_t>::max();
|
||||
for (int i = 0; i < per_channel_size; i++) {
|
||||
int idx = channel * channel_stride + i * stride;
|
||||
const int32_t quantized_value =
|
||||
static_cast<int32_t>(roundf(values[idx] / scaling_factors[channel]));
|
||||
// Clamp: just in case some odd numeric offset.
|
||||
quantized_values[idx] = fminf(
|
||||
kSymmetricInt8Scale, fmaxf(-kSymmetricInt8Scale, quantized_value));
|
||||
quantized_values[idx] =
|
||||
fminf(std::numeric_limits<int8_t>::max(),
|
||||
fmaxf(std::numeric_limits<int8_t>::min() + 1, quantized_value));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void SignedSymmetricQuantize(const float* values, TfLiteIntArray* dims,
|
||||
int8_t* quantized_values, float* scaling_factor) {
|
||||
int input_size = ElementCount(*dims);
|
||||
|
||||
float min = 0;
|
||||
float max = 0;
|
||||
for (int i = 0; i < input_size; i++) {
|
||||
min = fminf(min, values[i]);
|
||||
max = fmaxf(max, values[i]);
|
||||
}
|
||||
*scaling_factor = fmaxf(fabs(min), fabs(max)) / kSymmetricInt8Scale;
|
||||
for (int i = 0; i < input_size; i++) {
|
||||
const int32_t quantized_value =
|
||||
static_cast<int32_t>(roundf(values[i] / *scaling_factor));
|
||||
// Clamp: just in case some odd numeric offset.
|
||||
quantized_values[i] = fminf(kSymmetricInt8Scale,
|
||||
fmaxf(-kSymmetricInt8Scale, quantized_value));
|
||||
}
|
||||
}
|
||||
|
||||
void SignedSymmetricQuantize(const float* values, TfLiteIntArray* dims,
|
||||
int16_t* quantized_values, float* scaling_factor) {
|
||||
int input_size = ElementCount(*dims);
|
||||
|
||||
float min = 0;
|
||||
float max = 0;
|
||||
for (int i = 0; i < input_size; i++) {
|
||||
min = fminf(min, values[i]);
|
||||
max = fmaxf(max, values[i]);
|
||||
}
|
||||
*scaling_factor = fmaxf(fabs(min), fabs(max)) / kSymmetricInt16Scale;
|
||||
for (int i = 0; i < input_size; i++) {
|
||||
const int32_t quantized_value =
|
||||
static_cast<int32_t>(roundf(values[i] / *scaling_factor));
|
||||
// Clamp: just in case some odd numeric offset.
|
||||
quantized_values[i] = fminf(kSymmetricInt16Scale,
|
||||
fmaxf(-kSymmetricInt16Scale, quantized_value));
|
||||
}
|
||||
}
|
||||
|
||||
void SignedSymmetricQuantize(const float* values, TfLiteIntArray* dims,
|
||||
int32_t* quantized_values, float* scaling_factor) {
|
||||
int input_size = ElementCount(*dims);
|
||||
|
||||
float min = 0;
|
||||
float max = 0;
|
||||
for (int i = 0; i < input_size; i++) {
|
||||
min = fminf(min, values[i]);
|
||||
max = fmaxf(max, values[i]);
|
||||
}
|
||||
|
||||
*scaling_factor =
|
||||
fmaxf(fabs(min), fabs(max)) / static_cast<float>(kSymmetricInt32Scale);
|
||||
for (int i = 0; i < input_size; i++) {
|
||||
const int32_t quantized_value =
|
||||
static_cast<int32_t>(roundf(values[i] / *scaling_factor));
|
||||
// Clamp: just in case some odd numeric offset.
|
||||
quantized_values[i] = fminf(
|
||||
static_cast<float>(kSymmetricInt32Scale),
|
||||
fmaxf(static_cast<float>(-kSymmetricInt32Scale), quantized_value));
|
||||
}
|
||||
}
|
||||
|
||||
void SymmetricQuantize(const float* values, TfLiteIntArray* dims,
|
||||
uint8_t* quantized_values, float* scaling_factor) {
|
||||
SignedSymmetricQuantize(values, dims,
|
||||
reinterpret_cast<int8_t*>(quantized_values),
|
||||
scaling_factor);
|
||||
}
|
||||
|
||||
void SymmetricDequantize(const int8_t* values, const int size,
|
||||
const float dequantization_scale,
|
||||
float* dequantized_values) {
|
||||
for (int i = 0; i < size; ++i) {
|
||||
dequantized_values[i] = values[i] * dequantization_scale;
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace tflite
|
||||
|
@ -16,7 +16,9 @@ limitations under the License.
|
||||
#ifndef TENSORFLOW_LITE_MICRO_MICRO_UTILS_H_
|
||||
#define TENSORFLOW_LITE_MICRO_MICRO_UTILS_H_
|
||||
|
||||
#include <stdint.h>
|
||||
#include <algorithm>
|
||||
#include <cmath>
|
||||
#include <cstdint>
|
||||
|
||||
#include "tensorflow/lite/c/common.h"
|
||||
|
||||
@ -26,23 +28,28 @@ namespace tflite {
|
||||
|
||||
int ElementCount(const TfLiteIntArray& dims);
|
||||
|
||||
uint8_t FloatToAsymmetricQuantizedUInt8(const float value, const float scale,
|
||||
const int zero_point);
|
||||
// Converts a float value into a quantized value. Note that large values (close
|
||||
// to max int and min int) may see significant error due to a lack of floating
|
||||
// point granularity for large values.
|
||||
template <typename T>
|
||||
T FloatToQuantizedType(const float value, const float scale, int zero_point) {
|
||||
int32_t result = round(value / scale) + zero_point;
|
||||
result =
|
||||
std::max(static_cast<int32_t>(std::numeric_limits<T>::min()), result);
|
||||
result =
|
||||
std::min(static_cast<int32_t>(std::numeric_limits<T>::max()), result);
|
||||
return result;
|
||||
}
|
||||
|
||||
uint8_t FloatToSymmetricQuantizedUInt8(const float value, const float scale);
|
||||
|
||||
int8_t FloatToAsymmetricQuantizedInt8(const float value, const float scale,
|
||||
const int zero_point);
|
||||
|
||||
int16_t FloatToAsymmetricQuantizedInt16(const float value, const float scale,
|
||||
const int zero_point);
|
||||
|
||||
int8_t FloatToSymmetricQuantizedInt8(const float value, const float scale);
|
||||
|
||||
// Converts a float value into a signed thirty-two-bit quantized value. Note
|
||||
// that values close to max int and min int may see significant error due to
|
||||
// a lack of floating point granularity for large values.
|
||||
int32_t FloatToSymmetricQuantizedInt32(const float value, const float scale);
|
||||
template <typename T>
|
||||
T FloatToSymmetricQuantizedType(const float value, const float scale) {
|
||||
int32_t result = round(value / scale);
|
||||
result =
|
||||
std::max(static_cast<int32_t>(std::numeric_limits<T>::min() + 1), result);
|
||||
result =
|
||||
std::min(static_cast<int32_t>(std::numeric_limits<T>::max()), result);
|
||||
return result;
|
||||
}
|
||||
|
||||
// Helper methods to quantize arrays of floats to the desired format.
|
||||
//
|
||||
@ -55,22 +62,34 @@ int32_t FloatToSymmetricQuantizedInt32(const float value, const float scale);
|
||||
//
|
||||
// The per-op quantization spec can be found here:
|
||||
// https://www.tensorflow.org/lite/performance/quantization_spec
|
||||
template <typename T>
|
||||
void Quantize(const float* input, T* output, int num_elements, float scale,
|
||||
int zero_point) {
|
||||
for (int i = 0; i < num_elements; i++) {
|
||||
output[i] = FloatToQuantizedType<T>(input[i], scale, zero_point);
|
||||
}
|
||||
}
|
||||
|
||||
void AsymmetricQuantize(const float* input, int8_t* output, int num_elements,
|
||||
float scale, int zero_point = 0);
|
||||
template <typename T>
|
||||
void SymmetricQuantize(const float* input, T* output, int num_elements,
|
||||
float scale) {
|
||||
for (int i = 0; i < num_elements; i++) {
|
||||
output[i] = FloatToSymmetricQuantizedType<T>(input[i], scale);
|
||||
}
|
||||
}
|
||||
|
||||
void AsymmetricQuantize(const float* input, uint8_t* output, int num_elements,
|
||||
float scale, int zero_point = 128);
|
||||
|
||||
void AsymmetricQuantize(const float* input, int16_t* output, int num_elements,
|
||||
float scale, int zero_point = 0);
|
||||
|
||||
void SymmetricQuantize(const float* input, int32_t* output, int num_elements,
|
||||
float scale);
|
||||
|
||||
void SymmetricPerChannelQuantize(const float* input, int32_t* output,
|
||||
template <typename T>
|
||||
void SymmetricPerChannelQuantize(const float* input, T* output,
|
||||
int num_elements, int num_channels,
|
||||
float* scales);
|
||||
float* scales) {
|
||||
int elements_per_channel = num_elements / num_channels;
|
||||
for (int i = 0; i < num_channels; i++) {
|
||||
for (int j = 0; j < elements_per_channel; j++) {
|
||||
output[i * elements_per_channel + j] = FloatToSymmetricQuantizedType<T>(
|
||||
input[i * elements_per_channel + j], scales[i]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void SignedSymmetricPerChannelQuantize(const float* values,
|
||||
TfLiteIntArray* dims,
|
||||
@ -78,30 +97,35 @@ void SignedSymmetricPerChannelQuantize(const float* values,
|
||||
int8_t* quantized_values,
|
||||
float* scaling_factor);
|
||||
|
||||
void SignedSymmetricQuantize(const float* values, TfLiteIntArray* dims,
|
||||
int8_t* quantized_values, float* scaling_factor);
|
||||
// Quantizes inputs based on the values provided, choosing the smallest range
|
||||
// which includes all input values.
|
||||
template <typename T>
|
||||
void SymmetricQuantizeCalculateScales(const float* values, TfLiteIntArray* dims,
|
||||
T* output, float* scale) {
|
||||
int input_size = ElementCount(*dims);
|
||||
|
||||
void SignedSymmetricQuantize(const float* values, TfLiteIntArray* dims,
|
||||
int16_t* quantized_values, float* scaling_factor);
|
||||
|
||||
void SignedSymmetricQuantize(const float* values, TfLiteIntArray* dims,
|
||||
int32_t* quantized_values, float* scaling_factor);
|
||||
|
||||
void SymmetricQuantize(const float* values, TfLiteIntArray* dims,
|
||||
uint8_t* quantized_values, float* scaling_factor);
|
||||
|
||||
void SymmetricDequantize(const int8_t* values, const int size,
|
||||
const float dequantization_scale,
|
||||
float* dequantized_values);
|
||||
float min = 0;
|
||||
float max = 0;
|
||||
for (int i = 0; i < input_size; i++) {
|
||||
min = fminf(min, values[i]);
|
||||
max = fmaxf(max, values[i]);
|
||||
}
|
||||
*scale = fmaxf(std::abs(min), std::abs(max)) / std::numeric_limits<T>::max();
|
||||
for (int i = 0; i < input_size; i++) {
|
||||
const int32_t quantized_value =
|
||||
static_cast<int32_t>(roundf(values[i] / *scale));
|
||||
// Clamp: just in case some odd numeric offset.
|
||||
quantized_value = fminf(std::numeric_limits<T>::max(), quantized_value);
|
||||
quantized_value = fmaxf(std::numeric_limits<T>::min() + 1, quantized_value);
|
||||
output[i] = quantized_value;
|
||||
}
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void AsymmetricDequantize(const T* values, const int size,
|
||||
const float dequantization_scale,
|
||||
int dequantization_zero_point,
|
||||
float* dequantized_values) {
|
||||
void Dequantize(const T* values, const int size, const float scale,
|
||||
int zero_point, float* dequantized_values) {
|
||||
for (int i = 0; i < size; ++i) {
|
||||
dequantized_values[i] =
|
||||
(values[i] - dequantization_zero_point) * dequantization_scale;
|
||||
dequantized_values[i] = (values[i] - zero_point) * scale;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -20,63 +20,68 @@ limitations under the License.
|
||||
TF_LITE_MICRO_TESTS_BEGIN
|
||||
|
||||
TF_LITE_MICRO_TEST(FloatToAsymmetricQuantizedUInt8Test) {
|
||||
using tflite::FloatToAsymmetricQuantizedUInt8;
|
||||
using tflite::FloatToQuantizedType;
|
||||
// [0, 127.5] -> zero_point=0, scale=0.5
|
||||
TF_LITE_MICRO_EXPECT_EQ(0, FloatToAsymmetricQuantizedUInt8(0, 0.5, 0));
|
||||
TF_LITE_MICRO_EXPECT_EQ(254, FloatToAsymmetricQuantizedUInt8(127, 0.5, 0));
|
||||
TF_LITE_MICRO_EXPECT_EQ(255, FloatToAsymmetricQuantizedUInt8(127.5, 0.5, 0));
|
||||
TF_LITE_MICRO_EXPECT_EQ(0, FloatToQuantizedType<uint8_t>(0, 0.5, 0));
|
||||
TF_LITE_MICRO_EXPECT_EQ(254, FloatToQuantizedType<uint8_t>(127, 0.5, 0));
|
||||
TF_LITE_MICRO_EXPECT_EQ(255, FloatToQuantizedType<uint8_t>(127.5, 0.5, 0));
|
||||
// [-10, 245] -> zero_point=10, scale=1.0
|
||||
TF_LITE_MICRO_EXPECT_EQ(0, FloatToAsymmetricQuantizedUInt8(-10, 1.0, 10));
|
||||
TF_LITE_MICRO_EXPECT_EQ(1, FloatToAsymmetricQuantizedUInt8(-9, 1.0, 10));
|
||||
TF_LITE_MICRO_EXPECT_EQ(128, FloatToAsymmetricQuantizedUInt8(118, 1.0, 10));
|
||||
TF_LITE_MICRO_EXPECT_EQ(253, FloatToAsymmetricQuantizedUInt8(243, 1.0, 10));
|
||||
TF_LITE_MICRO_EXPECT_EQ(254, FloatToAsymmetricQuantizedUInt8(244, 1.0, 10));
|
||||
TF_LITE_MICRO_EXPECT_EQ(255, FloatToAsymmetricQuantizedUInt8(245, 1.0, 10));
|
||||
TF_LITE_MICRO_EXPECT_EQ(0, FloatToQuantizedType<uint8_t>(-10, 1.0, 10));
|
||||
TF_LITE_MICRO_EXPECT_EQ(1, FloatToQuantizedType<uint8_t>(-9, 1.0, 10));
|
||||
TF_LITE_MICRO_EXPECT_EQ(128, FloatToQuantizedType<uint8_t>(118, 1.0, 10));
|
||||
TF_LITE_MICRO_EXPECT_EQ(253, FloatToQuantizedType<uint8_t>(243, 1.0, 10));
|
||||
TF_LITE_MICRO_EXPECT_EQ(254, FloatToQuantizedType<uint8_t>(244, 1.0, 10));
|
||||
TF_LITE_MICRO_EXPECT_EQ(255, FloatToQuantizedType<uint8_t>(245, 1.0, 10));
|
||||
}
|
||||
|
||||
TF_LITE_MICRO_TEST(FloatToAsymmetricQuantizedInt8Test) {
|
||||
using tflite::FloatToAsymmetricQuantizedInt8;
|
||||
using tflite::FloatToQuantizedType;
|
||||
// [-64, 63.5] -> zero_point=0, scale=0.5
|
||||
TF_LITE_MICRO_EXPECT_EQ(2, FloatToAsymmetricQuantizedInt8(1, 0.5, 0));
|
||||
TF_LITE_MICRO_EXPECT_EQ(4, FloatToAsymmetricQuantizedInt8(2, 0.5, 0));
|
||||
TF_LITE_MICRO_EXPECT_EQ(6, FloatToAsymmetricQuantizedInt8(3, 0.5, 0));
|
||||
TF_LITE_MICRO_EXPECT_EQ(-10, FloatToAsymmetricQuantizedInt8(-5, 0.5, 0));
|
||||
TF_LITE_MICRO_EXPECT_EQ(-128, FloatToAsymmetricQuantizedInt8(-64, 0.5, 0));
|
||||
TF_LITE_MICRO_EXPECT_EQ(127, FloatToAsymmetricQuantizedInt8(63.5, 0.5, 0));
|
||||
TF_LITE_MICRO_EXPECT_EQ(2, FloatToQuantizedType<int8_t>(1, 0.5, 0));
|
||||
TF_LITE_MICRO_EXPECT_EQ(4, FloatToQuantizedType<int8_t>(2, 0.5, 0));
|
||||
TF_LITE_MICRO_EXPECT_EQ(6, FloatToQuantizedType<int8_t>(3, 0.5, 0));
|
||||
TF_LITE_MICRO_EXPECT_EQ(-10, FloatToQuantizedType<int8_t>(-5, 0.5, 0));
|
||||
TF_LITE_MICRO_EXPECT_EQ(-128, FloatToQuantizedType<int8_t>(-64, 0.5, 0));
|
||||
TF_LITE_MICRO_EXPECT_EQ(127, FloatToQuantizedType<int8_t>(63.5, 0.5, 0));
|
||||
// [-127, 128] -> zero_point=-1, scale=1.0
|
||||
TF_LITE_MICRO_EXPECT_EQ(0, FloatToAsymmetricQuantizedInt8(1, 1.0, -1));
|
||||
TF_LITE_MICRO_EXPECT_EQ(-1, FloatToAsymmetricQuantizedInt8(0, 1.0, -1));
|
||||
TF_LITE_MICRO_EXPECT_EQ(126, FloatToAsymmetricQuantizedInt8(127, 1.0, -1));
|
||||
TF_LITE_MICRO_EXPECT_EQ(127, FloatToAsymmetricQuantizedInt8(128, 1.0, -1));
|
||||
TF_LITE_MICRO_EXPECT_EQ(-127, FloatToAsymmetricQuantizedInt8(-126, 1.0, -1));
|
||||
TF_LITE_MICRO_EXPECT_EQ(-128, FloatToAsymmetricQuantizedInt8(-127, 1.0, -1));
|
||||
TF_LITE_MICRO_EXPECT_EQ(0, FloatToQuantizedType<int8_t>(1, 1.0, -1));
|
||||
TF_LITE_MICRO_EXPECT_EQ(-1, FloatToQuantizedType<int8_t>(0, 1.0, -1));
|
||||
TF_LITE_MICRO_EXPECT_EQ(126, FloatToQuantizedType<int8_t>(127, 1.0, -1));
|
||||
TF_LITE_MICRO_EXPECT_EQ(127, FloatToQuantizedType<int8_t>(128, 1.0, -1));
|
||||
TF_LITE_MICRO_EXPECT_EQ(-127, FloatToQuantizedType<int8_t>(-126, 1.0, -1));
|
||||
TF_LITE_MICRO_EXPECT_EQ(-128, FloatToQuantizedType<int8_t>(-127, 1.0, -1));
|
||||
}
|
||||
|
||||
TF_LITE_MICRO_TEST(FloatToSymmetricQuantizedInt8Test) {
|
||||
using tflite::FloatToSymmetricQuantizedInt8;
|
||||
using tflite::FloatToSymmetricQuantizedType;
|
||||
// [-64, 63.5] -> zero_point=0, scale=0.5
|
||||
TF_LITE_MICRO_EXPECT_EQ(2, FloatToSymmetricQuantizedInt8(1, 0.5));
|
||||
TF_LITE_MICRO_EXPECT_EQ(4, FloatToSymmetricQuantizedInt8(2, 0.5));
|
||||
TF_LITE_MICRO_EXPECT_EQ(6, FloatToSymmetricQuantizedInt8(3, 0.5));
|
||||
TF_LITE_MICRO_EXPECT_EQ(-10, FloatToSymmetricQuantizedInt8(-5, 0.5));
|
||||
TF_LITE_MICRO_EXPECT_EQ(-128, FloatToSymmetricQuantizedInt8(-64, 0.5));
|
||||
TF_LITE_MICRO_EXPECT_EQ(127, FloatToSymmetricQuantizedInt8(63.5, 0.5));
|
||||
TF_LITE_MICRO_EXPECT_EQ(2, FloatToSymmetricQuantizedType<int8_t>(1, 0.5));
|
||||
TF_LITE_MICRO_EXPECT_EQ(4, FloatToSymmetricQuantizedType<int8_t>(2, 0.5));
|
||||
TF_LITE_MICRO_EXPECT_EQ(6, FloatToSymmetricQuantizedType<int8_t>(3, 0.5));
|
||||
TF_LITE_MICRO_EXPECT_EQ(-10, FloatToSymmetricQuantizedType<int8_t>(-5, 0.5));
|
||||
TF_LITE_MICRO_EXPECT_EQ(-127,
|
||||
FloatToSymmetricQuantizedType<int8_t>(-64, 0.5));
|
||||
TF_LITE_MICRO_EXPECT_EQ(127,
|
||||
FloatToSymmetricQuantizedType<int8_t>(63.5, 0.5));
|
||||
// [-127, 128] -> zero_point=-1, scale=1.0
|
||||
TF_LITE_MICRO_EXPECT_EQ(1, FloatToSymmetricQuantizedInt8(1, 1.0));
|
||||
TF_LITE_MICRO_EXPECT_EQ(0, FloatToSymmetricQuantizedInt8(0, 1.0));
|
||||
TF_LITE_MICRO_EXPECT_EQ(127, FloatToSymmetricQuantizedInt8(127, 1.0));
|
||||
TF_LITE_MICRO_EXPECT_EQ(127, FloatToSymmetricQuantizedInt8(128, 1.0));
|
||||
TF_LITE_MICRO_EXPECT_EQ(-126, FloatToSymmetricQuantizedInt8(-126, 1.0));
|
||||
TF_LITE_MICRO_EXPECT_EQ(-127, FloatToSymmetricQuantizedInt8(-127, 1.0));
|
||||
TF_LITE_MICRO_EXPECT_EQ(1, FloatToSymmetricQuantizedType<int8_t>(1, 1.0));
|
||||
TF_LITE_MICRO_EXPECT_EQ(0, FloatToSymmetricQuantizedType<int8_t>(0, 1.0));
|
||||
TF_LITE_MICRO_EXPECT_EQ(127, FloatToSymmetricQuantizedType<int8_t>(127, 1.0));
|
||||
TF_LITE_MICRO_EXPECT_EQ(127, FloatToSymmetricQuantizedType<int8_t>(128, 1.0));
|
||||
TF_LITE_MICRO_EXPECT_EQ(-126,
|
||||
FloatToSymmetricQuantizedType<int8_t>(-126, 1.0));
|
||||
TF_LITE_MICRO_EXPECT_EQ(-127,
|
||||
FloatToSymmetricQuantizedType<int8_t>(-127, 1.0));
|
||||
}
|
||||
|
||||
TF_LITE_MICRO_TEST(FloatToAsymmetricQuantizedInt32Test) {
|
||||
using tflite::FloatToSymmetricQuantizedInt32;
|
||||
TF_LITE_MICRO_EXPECT_EQ(0, FloatToSymmetricQuantizedInt32(0, 0.5));
|
||||
TF_LITE_MICRO_EXPECT_EQ(2, FloatToSymmetricQuantizedInt32(1, 0.5));
|
||||
TF_LITE_MICRO_EXPECT_EQ(-2, FloatToSymmetricQuantizedInt32(-1, 0.5));
|
||||
TF_LITE_MICRO_EXPECT_EQ(-100, FloatToSymmetricQuantizedInt32(-50, 0.5));
|
||||
TF_LITE_MICRO_EXPECT_EQ(100, FloatToSymmetricQuantizedInt32(50, 0.5));
|
||||
using tflite::FloatToSymmetricQuantizedType;
|
||||
TF_LITE_MICRO_EXPECT_EQ(0, FloatToSymmetricQuantizedType<int32_t>(0, 0.5));
|
||||
TF_LITE_MICRO_EXPECT_EQ(2, FloatToSymmetricQuantizedType<int32_t>(1, 0.5));
|
||||
TF_LITE_MICRO_EXPECT_EQ(-2, FloatToSymmetricQuantizedType<int32_t>(-1, 0.5));
|
||||
TF_LITE_MICRO_EXPECT_EQ(-100,
|
||||
FloatToSymmetricQuantizedType<int32_t>(-50, 0.5));
|
||||
TF_LITE_MICRO_EXPECT_EQ(100, FloatToSymmetricQuantizedType<int32_t>(50, 0.5));
|
||||
}
|
||||
|
||||
TF_LITE_MICRO_TEST(AsymmetricQuantizeInt8) {
|
||||
@ -84,7 +89,7 @@ TF_LITE_MICRO_TEST(AsymmetricQuantizeInt8) {
|
||||
int8_t goldens[] = {-20, -5, -3, -3, -1, 1, 3, 5, 7, 9};
|
||||
constexpr int length = sizeof(values) / sizeof(float);
|
||||
int8_t quantized[length];
|
||||
tflite::AsymmetricQuantize(values, quantized, length, 0.5, 1);
|
||||
tflite::Quantize(values, quantized, length, 0.5, 1);
|
||||
for (int i = 0; i < length; i++) {
|
||||
TF_LITE_MICRO_EXPECT_EQ(quantized[i], goldens[i]);
|
||||
}
|
||||
@ -95,7 +100,7 @@ TF_LITE_MICRO_TEST(AsymmetricQuantizeUInt8) {
|
||||
uint8_t goldens[] = {106, 121, 123, 123, 125, 127, 129, 131, 133, 135};
|
||||
constexpr int length = sizeof(values) / sizeof(float);
|
||||
uint8_t quantized[length];
|
||||
tflite::AsymmetricQuantize(values, quantized, length, 0.5, 127);
|
||||
tflite::Quantize(values, quantized, length, 0.5, 127);
|
||||
for (int i = 0; i < length; i++) {
|
||||
TF_LITE_MICRO_EXPECT_EQ(quantized[i], goldens[i]);
|
||||
}
|
||||
|
@ -870,101 +870,17 @@ TfLiteFloatArray* FloatArrayFromFloats(const float* floats) {
|
||||
return reinterpret_cast<TfLiteFloatArray*>(const_cast<float*>(floats));
|
||||
}
|
||||
|
||||
TfLiteTensor CreateTensor(TfLiteIntArray* dims, bool is_variable) {
|
||||
TfLiteTensor result;
|
||||
result.dims = dims;
|
||||
result.params = {};
|
||||
result.quantization = {kTfLiteNoQuantization, nullptr};
|
||||
result.is_variable = is_variable;
|
||||
result.allocation_type = kTfLiteMemNone;
|
||||
return result;
|
||||
}
|
||||
|
||||
TfLiteTensor CreateFloatTensor(const float* data, TfLiteIntArray* dims,
|
||||
bool is_variable) {
|
||||
TfLiteTensor result = CreateTensor(dims, is_variable);
|
||||
result.type = kTfLiteFloat32;
|
||||
result.data.f = const_cast<float*>(data);
|
||||
result.bytes = ElementCount(*dims) * sizeof(float);
|
||||
return result;
|
||||
}
|
||||
|
||||
void PopulateFloatTensor(TfLiteTensor* tensor, float* begin, float* end) {
|
||||
float* p = begin;
|
||||
float* v = tensor->data.f;
|
||||
while (p != end) {
|
||||
*v++ = *p++;
|
||||
}
|
||||
}
|
||||
|
||||
TfLiteTensor CreateBoolTensor(const bool* data, TfLiteIntArray* dims,
|
||||
bool is_variable) {
|
||||
TfLiteTensor result = CreateTensor(dims, is_variable);
|
||||
result.type = kTfLiteBool;
|
||||
result.data.b = const_cast<bool*>(data);
|
||||
result.bytes = ElementCount(*dims) * sizeof(bool);
|
||||
return result;
|
||||
}
|
||||
|
||||
TfLiteTensor CreateInt32Tensor(const int32_t* data, TfLiteIntArray* dims,
|
||||
bool is_variable) {
|
||||
TfLiteTensor result = CreateTensor(dims, is_variable);
|
||||
result.type = kTfLiteInt32;
|
||||
result.data.i32 = const_cast<int32_t*>(data);
|
||||
result.bytes = ElementCount(*dims) * sizeof(int32_t);
|
||||
return result;
|
||||
}
|
||||
|
||||
TfLiteTensor CreateQuantizedTensor(const uint8_t* data, TfLiteIntArray* dims,
|
||||
float scale, int zero_point,
|
||||
bool is_variable) {
|
||||
TfLiteTensor result = CreateTensor(dims, is_variable);
|
||||
result.type = kTfLiteUInt8;
|
||||
result.data.uint8 = const_cast<uint8_t*>(data);
|
||||
result.params = {scale, zero_point};
|
||||
result.quantization = {kTfLiteAffineQuantization, nullptr};
|
||||
result.bytes = ElementCount(*dims) * sizeof(uint8_t);
|
||||
return result;
|
||||
}
|
||||
|
||||
TfLiteTensor CreateQuantizedTensor(const int8_t* data, TfLiteIntArray* dims,
|
||||
float scale, int zero_point,
|
||||
bool is_variable) {
|
||||
TfLiteTensor result = CreateTensor(dims, is_variable);
|
||||
result.type = kTfLiteInt8;
|
||||
result.data.int8 = const_cast<int8_t*>(data);
|
||||
result.params = {scale, zero_point};
|
||||
result.quantization = {kTfLiteAffineQuantization, nullptr};
|
||||
result.bytes = ElementCount(*dims) * sizeof(int8_t);
|
||||
return result;
|
||||
}
|
||||
|
||||
TfLiteTensor CreateQuantizedTensor(const int16_t* data, TfLiteIntArray* dims,
|
||||
float scale, int zero_point,
|
||||
bool is_variable) {
|
||||
TfLiteTensor result = CreateTensor(dims, is_variable);
|
||||
result.type = kTfLiteInt16;
|
||||
result.data.i16 = const_cast<int16_t*>(data);
|
||||
result.params = {scale, zero_point};
|
||||
result.quantization = {kTfLiteAffineQuantization, nullptr};
|
||||
result.bytes = ElementCount(*dims) * sizeof(int16_t);
|
||||
return result;
|
||||
}
|
||||
|
||||
TfLiteTensor CreateQuantizedBiasTensor(const float* data, int32_t* quantized,
|
||||
TfLiteIntArray* dims, float input_scale,
|
||||
float weights_scale, bool is_variable) {
|
||||
float bias_scale = input_scale * weights_scale;
|
||||
tflite::SymmetricQuantize(data, quantized, ElementCount(*dims), bias_scale);
|
||||
TfLiteTensor result = CreateTensor(dims, is_variable);
|
||||
result.type = kTfLiteInt32;
|
||||
result.data.i32 = const_cast<int32_t*>(quantized);
|
||||
|
||||
// Quantized int32_t tensors always have a zero point of 0, since the range of
|
||||
// int32_t values is large, and because zero point costs extra cycles during
|
||||
// processing.
|
||||
result.params = {bias_scale, 0};
|
||||
result.quantization = {kTfLiteAffineQuantization, nullptr};
|
||||
result.bytes = ElementCount(*dims) * sizeof(int32_t);
|
||||
TfLiteTensor result =
|
||||
CreateQuantizedTensor(quantized, dims, bias_scale, 0, is_variable);
|
||||
return result;
|
||||
}
|
||||
|
||||
@ -986,18 +902,15 @@ TfLiteTensor CreatePerChannelQuantizedBiasTensor(
|
||||
zero_points[i + 1] = 0;
|
||||
}
|
||||
|
||||
SymmetricPerChannelQuantize(input, quantized, input_size, num_channels,
|
||||
scales_array);
|
||||
SymmetricPerChannelQuantize<int32_t>(input, quantized, input_size,
|
||||
num_channels, scales_array);
|
||||
|
||||
affine_quant->scale = FloatArrayFromFloats(scales);
|
||||
affine_quant->zero_point = IntArrayFromInts(zero_points);
|
||||
affine_quant->quantized_dimension = quantized_dimension;
|
||||
|
||||
TfLiteTensor result = CreateTensor(dims, is_variable);
|
||||
result.type = kTfLiteInt32;
|
||||
result.data.i32 = const_cast<int32_t*>(quantized);
|
||||
TfLiteTensor result = CreateTensor(quantized, dims, is_variable);
|
||||
result.quantization = {kTfLiteAffineQuantization, affine_quant};
|
||||
result.bytes = ElementCount(*dims) * sizeof(int32_t);
|
||||
return result;
|
||||
}
|
||||
|
||||
@ -1020,11 +933,8 @@ TfLiteTensor CreateSymmetricPerChannelQuantizedTensor(
|
||||
affine_quant->zero_point = IntArrayFromInts(zero_points);
|
||||
affine_quant->quantized_dimension = quantized_dimension;
|
||||
|
||||
TfLiteTensor result = CreateTensor(dims, is_variable);
|
||||
result.type = kTfLiteInt8;
|
||||
result.data.int8 = const_cast<int8_t*>(quantized);
|
||||
TfLiteTensor result = CreateTensor(quantized, dims, is_variable);
|
||||
result.quantization = {kTfLiteAffineQuantization, affine_quant};
|
||||
result.bytes = ElementCount(*dims) * sizeof(int8_t);
|
||||
return result;
|
||||
}
|
||||
|
||||
|
@ -22,10 +22,12 @@ limitations under the License.
|
||||
#include <limits>
|
||||
|
||||
#include "flatbuffers/flatbuffers.h" // from @flatbuffers
|
||||
#include "tensorflow/lite//kernels/internal/tensor_ctypes.h"
|
||||
#include "tensorflow/lite/c/common.h"
|
||||
#include "tensorflow/lite/kernels/internal/compatibility.h"
|
||||
#include "tensorflow/lite/micro/all_ops_resolver.h"
|
||||
#include "tensorflow/lite/micro/micro_utils.h"
|
||||
#include "tensorflow/lite/portable_type_to_tflitetype.h"
|
||||
#include "tensorflow/lite/schema/schema_generated.h"
|
||||
|
||||
namespace tflite {
|
||||
@ -140,35 +142,42 @@ TfLiteIntArray* IntArrayFromInts(const int* int_array);
|
||||
// supplied array must be the size of the array expressed as a float.
|
||||
TfLiteFloatArray* FloatArrayFromFloats(const float* floats);
|
||||
|
||||
TfLiteTensor CreateFloatTensor(const float* data, TfLiteIntArray* dims,
|
||||
bool is_variable = false);
|
||||
template <typename T>
|
||||
TfLiteTensor CreateTensor(const T* data, TfLiteIntArray* dims,
|
||||
const bool is_variable = false) {
|
||||
TfLiteTensor result;
|
||||
result.dims = dims;
|
||||
result.params = {};
|
||||
result.quantization = {kTfLiteNoQuantization, nullptr};
|
||||
result.is_variable = is_variable;
|
||||
result.allocation_type = kTfLiteMemNone;
|
||||
result.type = typeToTfLiteType<T>();
|
||||
// Const cast is used to allow passing in const and non-const arrays within a
|
||||
// single CreateTensor method. A Const array should be used for immutable
|
||||
// input tensors and non-const array should be used for mutable and output
|
||||
// tensors.
|
||||
result.data.data = const_cast<T*>(data);
|
||||
result.quantization = {kTfLiteAffineQuantization, nullptr};
|
||||
result.bytes = ElementCount(*dims) * sizeof(T);
|
||||
return result;
|
||||
}
|
||||
|
||||
void PopulateFloatTensor(TfLiteTensor* tensor, float* begin, float* end);
|
||||
|
||||
TfLiteTensor CreateBoolTensor(const bool* data, TfLiteIntArray* dims,
|
||||
bool is_variable = false);
|
||||
|
||||
TfLiteTensor CreateInt32Tensor(const int32_t*, TfLiteIntArray* dims,
|
||||
bool is_variable = false);
|
||||
|
||||
TfLiteTensor CreateQuantizedTensor(const uint8_t* data, TfLiteIntArray* dims,
|
||||
float scale, int zero_point,
|
||||
bool is_variable = false);
|
||||
|
||||
TfLiteTensor CreateQuantizedTensor(const int8_t* data, TfLiteIntArray* dims,
|
||||
float scale, int zero_point,
|
||||
bool is_variable = false);
|
||||
|
||||
TfLiteTensor CreateQuantizedTensor(const int16_t* data, TfLiteIntArray* dims,
|
||||
float scale, int zero_point,
|
||||
bool is_variable = false);
|
||||
template <typename T>
|
||||
TfLiteTensor CreateQuantizedTensor(const T* data, TfLiteIntArray* dims,
|
||||
const float scale, const int zero_point = 0,
|
||||
const bool is_variable = false) {
|
||||
TfLiteTensor result = CreateTensor(data, dims, is_variable);
|
||||
result.params = {scale, zero_point};
|
||||
result.quantization = {kTfLiteAffineQuantization, nullptr};
|
||||
return result;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
TfLiteTensor CreateQuantizedTensor(const float* input, T* quantized,
|
||||
TfLiteIntArray* dims, float scale,
|
||||
int zero_point, bool is_variable = false) {
|
||||
int input_size = ElementCount(*dims);
|
||||
tflite::AsymmetricQuantize(input, quantized, input_size, scale, zero_point);
|
||||
tflite::Quantize(input, quantized, input_size, scale, zero_point);
|
||||
return CreateQuantizedTensor(quantized, dims, scale, zero_point, is_variable);
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user