Deprecate and remove testing/test_utils.h from TF Micro.

PiperOrigin-RevId: 335957483
Change-Id: I878ba86a6eaa01f331097240c019c57bb53688e8
This commit is contained in:
Nick Kreeger 2020-10-07 14:47:48 -07:00 committed by TensorFlower Gardener
parent e9947ca30a
commit d39ff65920
51 changed files with 126 additions and 445 deletions

View File

@ -328,6 +328,7 @@ tflite_micro_cc_test(
],
deps = [
":micro_framework",
"//tensorflow/lite/micro:test_helpers",
"//tensorflow/lite/micro/testing:micro_test",
],
)

View File

@ -16,7 +16,6 @@ limitations under the License.
#include "tensorflow/lite/micro/examples/hello_world/output_handler.h"
#include "tensorflow/lite/micro/testing/micro_test.h"
#include "tensorflow/lite/micro/testing/test_utils.h"
TF_LITE_MICRO_TESTS_BEGIN

View File

@ -16,7 +16,6 @@ limitations under the License.
#include "tensorflow/lite/micro/examples/magic_wand/output_handler.h"
#include "tensorflow/lite/micro/testing/micro_test.h"
#include "tensorflow/lite/micro/testing/test_utils.h"
TF_LITE_MICRO_TESTS_BEGIN

View File

@ -316,6 +316,7 @@ tflite_micro_cc_test(
"//tensorflow/lite/c:common",
"//tensorflow/lite/micro:micro_error_reporter",
"//tensorflow/lite/micro:micro_framework",
"//tensorflow/lite/micro:test_helpers",
"//tensorflow/lite/micro/testing:micro_test",
],
)

View File

@ -16,7 +16,6 @@ limitations under the License.
#include "tensorflow/lite/micro/examples/micro_speech/command_responder.h"
#include "tensorflow/lite/micro/testing/micro_test.h"
#include "tensorflow/lite/micro/testing/test_utils.h"
TF_LITE_MICRO_TESTS_BEGIN

View File

@ -15,8 +15,8 @@ limitations under the License.
#include "tensorflow/lite/micro/examples/micro_speech/recognize_commands.h"
#include "tensorflow/lite/micro/test_helpers.h"
#include "tensorflow/lite/micro/testing/micro_test.h"
#include "tensorflow/lite/micro/testing/test_utils.h"
TF_LITE_MICRO_TESTS_BEGIN

View File

@ -21,7 +21,6 @@ limitations under the License.
#include "tensorflow/lite/micro/micro_interpreter.h"
#include "tensorflow/lite/micro/micro_utils.h"
#include "tensorflow/lite/micro/testing/micro_test.h"
#include "tensorflow/lite/micro/testing/test_utils.h"
#include "tensorflow/lite/schema/schema_generated.h"
#include "tensorflow/lite/version.h"

View File

@ -16,7 +16,6 @@ limitations under the License.
#include "tensorflow/lite/micro/examples/person_detection/detection_responder.h"
#include "tensorflow/lite/micro/testing/micro_test.h"
#include "tensorflow/lite/micro/testing/test_utils.h"
TF_LITE_MICRO_TESTS_BEGIN

View File

@ -16,7 +16,6 @@ limitations under the License.
#include "tensorflow/lite/micro/examples/person_detection_experimental/detection_responder.h"
#include "tensorflow/lite/micro/testing/micro_test.h"
#include "tensorflow/lite/micro/testing/test_utils.h"
TF_LITE_MICRO_TESTS_BEGIN

View File

@ -136,6 +136,7 @@ tflite_micro_cc_test(
"//tensorflow/lite/c:common",
"//tensorflow/lite/micro:debug_log",
"//tensorflow/lite/micro:op_resolvers",
"//tensorflow/lite/micro:test_helpers",
"//tensorflow/lite/micro/testing:micro_test",
],
)
@ -149,6 +150,7 @@ tflite_micro_cc_test(
":kernel_runner",
"//tensorflow/lite/c:common",
"//tensorflow/lite/micro:op_resolvers",
"//tensorflow/lite/micro:test_helpers",
"//tensorflow/lite/micro/testing:micro_test",
],
)
@ -163,6 +165,7 @@ tflite_micro_cc_test(
"//tensorflow/lite/c:common",
"//tensorflow/lite/kernels/internal:tensor",
"//tensorflow/lite/micro:op_resolvers",
"//tensorflow/lite/micro:test_helpers",
"//tensorflow/lite/micro/testing:micro_test",
],
)
@ -207,6 +210,7 @@ tflite_micro_cc_test(
":kernel_runner",
"//tensorflow/lite/c:common",
"//tensorflow/lite/micro:op_resolvers",
"//tensorflow/lite/micro:test_helpers",
"//tensorflow/lite/micro/testing:micro_test",
],
)
@ -235,6 +239,7 @@ tflite_micro_cc_test(
"//tensorflow/lite/c:common",
"//tensorflow/lite/micro:micro_utils",
"//tensorflow/lite/micro:op_resolvers",
"//tensorflow/lite/micro:test_helpers",
"//tensorflow/lite/micro/testing:micro_test",
],
)
@ -248,6 +253,7 @@ tflite_micro_cc_test(
":kernel_runner",
"//tensorflow/lite/c:common",
"//tensorflow/lite/micro:op_resolvers",
"//tensorflow/lite/micro:test_helpers",
"//tensorflow/lite/micro/testing:micro_test",
],
)
@ -261,6 +267,7 @@ tflite_micro_cc_test(
":kernel_runner",
"//tensorflow/lite/c:common",
"//tensorflow/lite/micro:op_resolvers",
"//tensorflow/lite/micro:test_helpers",
"//tensorflow/lite/micro/testing:micro_test",
],
)
@ -274,6 +281,7 @@ tflite_micro_cc_test(
":kernel_runner",
"//tensorflow/lite/c:common",
"//tensorflow/lite/micro:op_resolvers",
"//tensorflow/lite/micro:test_helpers",
"//tensorflow/lite/micro/testing:micro_test",
],
)
@ -287,6 +295,7 @@ tflite_micro_cc_test(
":kernel_runner",
"//tensorflow/lite/c:common",
"//tensorflow/lite/micro:op_resolvers",
"//tensorflow/lite/micro:test_helpers",
"//tensorflow/lite/micro/testing:micro_test",
],
)
@ -300,6 +309,7 @@ tflite_micro_cc_test(
":kernel_runner",
"//tensorflow/lite/c:common",
"//tensorflow/lite/micro:op_resolvers",
"//tensorflow/lite/micro:test_helpers",
"//tensorflow/lite/micro/testing:micro_test",
],
)
@ -313,6 +323,7 @@ tflite_micro_cc_test(
":kernel_runner",
"//tensorflow/lite/c:common",
"//tensorflow/lite/micro:op_resolvers",
"//tensorflow/lite/micro:test_helpers",
"//tensorflow/lite/micro/testing:micro_test",
],
)
@ -326,6 +337,7 @@ tflite_micro_cc_test(
":kernel_runner",
"//tensorflow/lite/c:common",
"//tensorflow/lite/micro:op_resolvers",
"//tensorflow/lite/micro:test_helpers",
"//tensorflow/lite/micro/testing:micro_test",
],
)
@ -339,6 +351,7 @@ tflite_micro_cc_test(
":kernel_runner",
"//tensorflow/lite/c:common",
"//tensorflow/lite/micro:op_resolvers",
"//tensorflow/lite/micro:test_helpers",
"//tensorflow/lite/micro/testing:micro_test",
],
)
@ -352,6 +365,7 @@ tflite_micro_cc_test(
":kernel_runner",
"//tensorflow/lite/c:common",
"//tensorflow/lite/micro:op_resolvers",
"//tensorflow/lite/micro:test_helpers",
"//tensorflow/lite/micro/testing:micro_test",
],
)
@ -365,6 +379,7 @@ tflite_micro_cc_test(
":kernel_runner",
"//tensorflow/lite/c:common",
"//tensorflow/lite/micro:op_resolvers",
"//tensorflow/lite/micro:test_helpers",
"//tensorflow/lite/micro/testing:micro_test",
],
)
@ -378,6 +393,7 @@ tflite_micro_cc_test(
":kernel_runner",
"//tensorflow/lite/c:common",
"//tensorflow/lite/micro:op_resolvers",
"//tensorflow/lite/micro:test_helpers",
"//tensorflow/lite/micro/testing:micro_test",
],
)
@ -391,6 +407,7 @@ tflite_micro_cc_test(
":kernel_runner",
"//tensorflow/lite/c:common",
"//tensorflow/lite/micro:op_resolvers",
"//tensorflow/lite/micro:test_helpers",
"//tensorflow/lite/micro/testing:micro_test",
],
)
@ -405,6 +422,7 @@ tflite_micro_cc_test(
"//tensorflow/lite/c:common",
"//tensorflow/lite/micro:debug_log",
"//tensorflow/lite/micro:op_resolvers",
"//tensorflow/lite/micro:test_helpers",
"//tensorflow/lite/micro/testing:micro_test",
],
)
@ -419,6 +437,7 @@ tflite_micro_cc_test(
"//tensorflow/lite/c:common",
"//tensorflow/lite/micro:debug_log",
"//tensorflow/lite/micro:op_resolvers",
"//tensorflow/lite/micro:test_helpers",
"//tensorflow/lite/micro/testing:micro_test",
],
)
@ -433,6 +452,22 @@ tflite_micro_cc_test(
"//tensorflow/lite/c:common",
"//tensorflow/lite/micro:debug_log",
"//tensorflow/lite/micro:op_resolvers",
"//tensorflow/lite/micro:test_helpers",
"//tensorflow/lite/micro/testing:micro_test",
],
)
tflite_micro_cc_test(
name = "split_v_test",
srcs = [
"split_v_test.cc",
],
deps = [
":kernel_runner",
"//tensorflow/lite/c:common",
"//tensorflow/lite/micro:debug_log",
"//tensorflow/lite/micro:op_resolvers",
"//tensorflow/lite/micro:test_helpers",
"//tensorflow/lite/micro/testing:micro_test",
],
)
@ -446,6 +481,7 @@ tflite_micro_cc_test(
":kernel_runner",
"//tensorflow/lite/c:common",
"//tensorflow/lite/micro:op_resolvers",
"//tensorflow/lite/micro:test_helpers",
"//tensorflow/lite/micro/testing:micro_test",
],
)
@ -558,6 +594,7 @@ tflite_micro_cc_test(
":kernel_runner",
"//tensorflow/lite/c:common",
"//tensorflow/lite/micro:op_resolvers",
"//tensorflow/lite/micro:test_helpers",
"//tensorflow/lite/micro/testing:micro_test",
],
)
@ -571,6 +608,7 @@ tflite_micro_cc_test(
":kernel_runner",
"//tensorflow/lite/c:common",
"//tensorflow/lite/micro:op_resolvers",
"//tensorflow/lite/micro:test_helpers",
"//tensorflow/lite/micro/testing:micro_test",
],
)
@ -599,6 +637,7 @@ tflite_micro_cc_test(
":kernel_runner",
"//tensorflow/lite/c:common",
"//tensorflow/lite/micro:op_resolvers",
"//tensorflow/lite/micro:test_helpers",
"//tensorflow/lite/micro/testing:micro_test",
],
)
@ -613,6 +652,7 @@ tflite_micro_cc_test(
":micro_ops",
"//tensorflow/lite/c:common",
"//tensorflow/lite/micro:op_resolvers",
"//tensorflow/lite/micro:test_helpers",
"//tensorflow/lite/micro/testing:micro_test",
],
)
@ -626,6 +666,7 @@ tflite_micro_cc_test(
":kernel_runner",
"//tensorflow/lite/c:common",
"//tensorflow/lite/micro:op_resolvers",
"//tensorflow/lite/micro:test_helpers",
"//tensorflow/lite/micro/testing:micro_test",
],
)
@ -652,6 +693,7 @@ tflite_micro_cc_test(
"//tensorflow/lite/c:common",
"//tensorflow/lite/micro:micro_framework",
"//tensorflow/lite/micro:op_resolvers",
"//tensorflow/lite/micro:test_helpers",
"//tensorflow/lite/micro/testing:micro_test",
],
)
@ -664,6 +706,7 @@ tflite_micro_cc_test(
"//tensorflow/lite/c:common",
"//tensorflow/lite/micro:micro_framework",
"//tensorflow/lite/micro:op_resolvers",
"//tensorflow/lite/micro:test_helpers",
"//tensorflow/lite/micro/testing:micro_test",
],
)

View File

@ -17,8 +17,8 @@ limitations under the License.
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/micro/all_ops_resolver.h"
#include "tensorflow/lite/micro/kernels/kernel_runner.h"
#include "tensorflow/lite/micro/test_helpers.h"
#include "tensorflow/lite/micro/testing/micro_test.h"
#include "tensorflow/lite/micro/testing/test_utils.h"
namespace tflite {
namespace testing {

View File

@ -19,8 +19,8 @@ limitations under the License.
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/micro/all_ops_resolver.h"
#include "tensorflow/lite/micro/kernels/kernel_runner.h"
#include "tensorflow/lite/micro/test_helpers.h"
#include "tensorflow/lite/micro/testing/micro_test.h"
#include "tensorflow/lite/micro/testing/test_utils.h"
namespace tflite {
namespace testing {

View File

@ -17,8 +17,8 @@ limitations under the License.
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/micro/all_ops_resolver.h"
#include "tensorflow/lite/micro/kernels/kernel_runner.h"
#include "tensorflow/lite/micro/test_helpers.h"
#include "tensorflow/lite/micro/testing/micro_test.h"
#include "tensorflow/lite/micro/testing/test_utils.h"
namespace tflite {
namespace testing {

View File

@ -17,8 +17,8 @@ limitations under the License.
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/micro/all_ops_resolver.h"
#include "tensorflow/lite/micro/kernels/kernel_runner.h"
#include "tensorflow/lite/micro/test_helpers.h"
#include "tensorflow/lite/micro/testing/micro_test.h"
#include "tensorflow/lite/micro/testing/test_utils.h"
namespace tflite {
namespace testing {

View File

@ -18,8 +18,8 @@ limitations under the License.
#include "tensorflow/lite/micro/all_ops_resolver.h"
#include "tensorflow/lite/micro/kernels/kernel_runner.h"
#include "tensorflow/lite/micro/kernels/micro_ops.h"
#include "tensorflow/lite/micro/test_helpers.h"
#include "tensorflow/lite/micro/testing/micro_test.h"
#include "tensorflow/lite/micro/testing/test_utils.h"
namespace tflite {
namespace testing {

View File

@ -18,8 +18,8 @@ limitations under the License.
#include "tensorflow/lite/c/builtin_op_data.h"
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/micro/kernels/kernel_runner.h"
#include "tensorflow/lite/micro/test_helpers.h"
#include "tensorflow/lite/micro/testing/micro_test.h"
#include "tensorflow/lite/micro/testing/test_utils.h"
namespace tflite {
namespace testing {

View File

@ -17,8 +17,8 @@ limitations under the License.
#include "tensorflow/lite/c/builtin_op_data.h"
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/micro/kernels/kernel_runner.h"
#include "tensorflow/lite/micro/test_helpers.h"
#include "tensorflow/lite/micro/testing/micro_test.h"
#include "tensorflow/lite/micro/testing/test_utils.h"
namespace tflite {
namespace testing {

View File

@ -17,8 +17,8 @@ limitations under the License.
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/micro/kernels/kernel_runner.h"
#include "tensorflow/lite/micro/micro_utils.h"
#include "tensorflow/lite/micro/test_helpers.h"
#include "tensorflow/lite/micro/testing/micro_test.h"
#include "tensorflow/lite/micro/testing/test_utils.h"
namespace tflite {
namespace testing {

View File

@ -17,8 +17,8 @@ limitations under the License.
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/micro/kernels/kernel_runner.h"
#include "tensorflow/lite/micro/test_helpers.h"
#include "tensorflow/lite/micro/testing/micro_test.h"
#include "tensorflow/lite/micro/testing/test_utils.h"
namespace tflite {
namespace testing {

View File

@ -18,7 +18,6 @@ limitations under the License.
#include "tensorflow/lite/micro/kernels/kernel_runner.h"
#include "tensorflow/lite/micro/test_helpers.h"
#include "tensorflow/lite/micro/testing/micro_test.h"
#include "tensorflow/lite/micro/testing/test_utils.h"
namespace tflite {
namespace testing {

View File

@ -17,8 +17,8 @@ limitations under the License.
#include "tensorflow/lite/micro/all_ops_resolver.h"
#include "tensorflow/lite/micro/debug_log.h"
#include "tensorflow/lite/micro/kernels/kernel_runner.h"
#include "tensorflow/lite/micro/test_helpers.h"
#include "tensorflow/lite/micro/testing/micro_test.h"
#include "tensorflow/lite/micro/testing/test_utils.h"
namespace tflite {
namespace testing {

View File

@ -17,8 +17,8 @@ limitations under the License.
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/micro/all_ops_resolver.h"
#include "tensorflow/lite/micro/kernels/kernel_runner.h"
#include "tensorflow/lite/micro/test_helpers.h"
#include "tensorflow/lite/micro/testing/micro_test.h"
#include "tensorflow/lite/micro/testing/test_utils.h"
namespace tflite {
namespace testing {

View File

@ -24,7 +24,6 @@ limitations under the License.
#include "tensorflow/lite/micro/micro_utils.h"
#include "tensorflow/lite/micro/test_helpers.h"
#include "tensorflow/lite/micro/testing/micro_test.h"
#include "tensorflow/lite/micro/testing/test_utils.h"
namespace tflite {
namespace testing {

View File

@ -19,8 +19,8 @@ limitations under the License.
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/micro/all_ops_resolver.h"
#include "tensorflow/lite/micro/kernels/kernel_runner.h"
#include "tensorflow/lite/micro/test_helpers.h"
#include "tensorflow/lite/micro/testing/micro_test.h"
#include "tensorflow/lite/micro/testing/test_utils.h"
namespace tflite {
namespace testing {

View File

@ -19,7 +19,6 @@ limitations under the License.
#include "tensorflow/lite/micro/kernels/kernel_runner.h"
#include "tensorflow/lite/micro/test_helpers.h"
#include "tensorflow/lite/micro/testing/micro_test.h"
#include "tensorflow/lite/micro/testing/test_utils.h"
namespace tflite {
namespace testing {

View File

@ -17,8 +17,8 @@ limitations under the License.
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/micro/all_ops_resolver.h"
#include "tensorflow/lite/micro/kernels/kernel_runner.h"
#include "tensorflow/lite/micro/test_helpers.h"
#include "tensorflow/lite/micro/testing/micro_test.h"
#include "tensorflow/lite/micro/testing/test_utils.h"
namespace tflite {
namespace testing {

View File

@ -17,8 +17,8 @@ limitations under the License.
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/micro/all_ops_resolver.h"
#include "tensorflow/lite/micro/kernels/kernel_runner.h"
#include "tensorflow/lite/micro/test_helpers.h"
#include "tensorflow/lite/micro/testing/micro_test.h"
#include "tensorflow/lite/micro/testing/test_utils.h"
namespace tflite {
namespace testing {

View File

@ -17,8 +17,8 @@ limitations under the License.
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/micro/all_ops_resolver.h"
#include "tensorflow/lite/micro/kernels/kernel_runner.h"
#include "tensorflow/lite/micro/test_helpers.h"
#include "tensorflow/lite/micro/testing/micro_test.h"
#include "tensorflow/lite/micro/testing/test_utils.h"
namespace tflite {
namespace testing {
@ -104,14 +104,11 @@ void TestMaxMinQuantized(const TfLiteRegistration& registration,
}
}
void TestMaxMinQuantizedInt32(const TfLiteRegistration& registration,
const int* input1_dims_data,
const int32_t* input1_data, float input1_scale,
const int* input2_dims_data,
const int32_t* input2_data, float input2_scale,
const int32_t* expected_output_data,
float output_scale, const int* output_dims_data,
int32_t* output_data) {
void TestMaxMinQuantizedInt32(
const TfLiteRegistration& registration, const int* input1_dims_data,
const int32_t* input1_data, const int* input2_dims_data,
const int32_t* input2_data, const int32_t* expected_output_data,
const int* output_dims_data, int32_t* output_data) {
TfLiteIntArray* input1_dims = IntArrayFromInts(input1_dims_data);
TfLiteIntArray* input2_dims = IntArrayFromInts(input2_dims_data);
TfLiteIntArray* output_dims = IntArrayFromInts(output_dims_data);
@ -121,9 +118,9 @@ void TestMaxMinQuantizedInt32(const TfLiteRegistration& registration,
constexpr int outputs_size = 1;
constexpr int tensors_size = inputs_size + outputs_size;
TfLiteTensor tensors[tensors_size] = {
CreateQuantized32Tensor(input1_data, input1_dims, input1_scale),
CreateQuantized32Tensor(input2_data, input2_dims, input2_scale),
CreateQuantized32Tensor(output_data, output_dims, output_scale),
CreateInt32Tensor(input1_data, input1_dims),
CreateInt32Tensor(input2_data, input2_dims),
CreateInt32Tensor(output_data, output_dims),
};
int inputs_array_data[] = {2, 0, 1};
@ -210,9 +207,6 @@ TF_LITE_MICRO_TEST(FloatWithBroadcastTest) {
}
TF_LITE_MICRO_TEST(Int32WithBroadcastTest) {
const float input1_scale = 0.5;
const float input2_scale = 0.5;
const float output_scale = 0.5;
const int dims[] = {3, 3, 1, 2};
const int dims_scalar[] = {1, 1};
const int32_t data1[] = {1, 0, -1, -2, 3, 11};
@ -222,14 +216,12 @@ TF_LITE_MICRO_TEST(Int32WithBroadcastTest) {
int32_t output_data[6];
tflite::testing::TestMaxMinQuantizedInt32(
tflite::ops::micro::Register_MAXIMUM(), dims, data1, input1_scale,
dims_scalar, data2, input2_scale, golden_max, output_scale, dims,
output_data);
tflite::ops::micro::Register_MAXIMUM(), dims, data1, dims_scalar, data2,
golden_max, dims, output_data);
tflite::testing::TestMaxMinQuantizedInt32(
tflite::ops::micro::Register_MINIMUM(), dims, data1, input1_scale,
dims_scalar, data2, input2_scale, golden_min, output_scale, dims,
output_data);
tflite::ops::micro::Register_MINIMUM(), dims, data1, dims_scalar, data2,
golden_min, dims, output_data);
}
TF_LITE_MICRO_TESTS_END

View File

@ -16,8 +16,8 @@ limitations under the License.
#include "tensorflow/lite/c/builtin_op_data.h"
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/micro/kernels/kernel_runner.h"
#include "tensorflow/lite/micro/test_helpers.h"
#include "tensorflow/lite/micro/testing/micro_test.h"
#include "tensorflow/lite/micro/testing/test_utils.h"
namespace tflite {
namespace testing {

View File

@ -17,8 +17,8 @@ limitations under the License.
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/micro/all_ops_resolver.h"
#include "tensorflow/lite/micro/kernels/kernel_runner.h"
#include "tensorflow/lite/micro/test_helpers.h"
#include "tensorflow/lite/micro/testing/micro_test.h"
#include "tensorflow/lite/micro/testing/test_utils.h"
namespace tflite {
namespace testing {

View File

@ -17,8 +17,8 @@ limitations under the License.
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/micro/debug_log.h"
#include "tensorflow/lite/micro/kernels/kernel_runner.h"
#include "tensorflow/lite/micro/test_helpers.h"
#include "tensorflow/lite/micro/testing/micro_test.h"
#include "tensorflow/lite/micro/testing/test_utils.h"
namespace tflite {
namespace testing {
@ -168,9 +168,9 @@ void TestPackTwoInputsQuantized32(const int* input1_dims_data,
constexpr int output_size = 1;
constexpr int tensors_size = input_size + output_size;
TfLiteTensor tensors[tensors_size] = {
CreateQuantized32Tensor(input1_data, input1_dims, 1.0),
CreateQuantized32Tensor(input2_data, input2_dims, 1.0),
CreateQuantized32Tensor(output_data, output_dims, 1.0)};
CreateInt32Tensor(input1_data, input1_dims),
CreateInt32Tensor(input2_data, input2_dims),
CreateInt32Tensor(output_data, output_dims)};
TfLitePackParams builtin_data = {
.values_count = 2,

View File

@ -19,7 +19,6 @@ limitations under the License.
#include "tensorflow/lite/micro/kernels/kernel_runner.h"
#include "tensorflow/lite/micro/test_helpers.h"
#include "tensorflow/lite/micro/testing/micro_test.h"
#include "tensorflow/lite/micro/testing/test_utils.h"
namespace tflite {
namespace testing {

View File

@ -18,8 +18,8 @@ limitations under the License.
#include "tensorflow/lite/c/builtin_op_data.h"
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/micro/kernels/kernel_runner.h"
#include "tensorflow/lite/micro/test_helpers.h"
#include "tensorflow/lite/micro/testing/micro_test.h"
#include "tensorflow/lite/micro/testing/test_utils.h"
namespace tflite {
namespace testing {

View File

@ -16,8 +16,8 @@ limitations under the License.
#include "tensorflow/lite/c/builtin_op_data.h"
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/micro/kernels/kernel_runner.h"
#include "tensorflow/lite/micro/test_helpers.h"
#include "tensorflow/lite/micro/testing/micro_test.h"
#include "tensorflow/lite/micro/testing/test_utils.h"
namespace tflite {
namespace testing {

View File

@ -18,7 +18,6 @@ limitations under the License.
#include "tensorflow/lite/micro/kernels/kernel_runner.h"
#include "tensorflow/lite/micro/test_helpers.h"
#include "tensorflow/lite/micro/testing/micro_test.h"
#include "tensorflow/lite/micro/testing/test_utils.h"
namespace tflite {
namespace testing {

View File

@ -17,8 +17,8 @@ limitations under the License.
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/micro/all_ops_resolver.h"
#include "tensorflow/lite/micro/kernels/kernel_runner.h"
#include "tensorflow/lite/micro/test_helpers.h"
#include "tensorflow/lite/micro/testing/micro_test.h"
#include "tensorflow/lite/micro/testing/test_utils.h"
namespace tflite {
namespace testing {

View File

@ -16,8 +16,8 @@ limitations under the License.
#include "tensorflow/lite/c/builtin_op_data.h"
#include "tensorflow/lite/micro/all_ops_resolver.h"
#include "tensorflow/lite/micro/kernels/kernel_runner.h"
#include "tensorflow/lite/micro/test_helpers.h"
#include "tensorflow/lite/micro/testing/micro_test.h"
#include "tensorflow/lite/micro/testing/test_utils.h"
namespace tflite {
namespace testing {

View File

@ -17,8 +17,8 @@ limitations under the License.
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/micro/all_ops_resolver.h"
#include "tensorflow/lite/micro/kernels/kernel_runner.h"
#include "tensorflow/lite/micro/test_helpers.h"
#include "tensorflow/lite/micro/testing/micro_test.h"
#include "tensorflow/lite/micro/testing/test_utils.h"
namespace tflite {
namespace testing {

View File

@ -18,8 +18,8 @@ limitations under the License.
#include "tensorflow/lite/micro/all_ops_resolver.h"
#include "tensorflow/lite/micro/debug_log.h"
#include "tensorflow/lite/micro/kernels/kernel_runner.h"
#include "tensorflow/lite/micro/test_helpers.h"
#include "tensorflow/lite/micro/testing/micro_test.h"
#include "tensorflow/lite/micro/testing/test_utils.h"
namespace tflite {
namespace testing {
@ -42,7 +42,7 @@ void TestSplitTwoOutputsFloat(
constexpr int axis_size = 1;
constexpr int tensors_size = input_size + output_size + axis_size;
TfLiteTensor tensors[tensors_size] = {
CreateQuantized32Tensor(axis_data, axis_dims, 1.0),
CreateInt32Tensor(axis_data, axis_dims),
CreateFloatTensor(input_data, input_dims),
CreateFloatTensor(output1_data, output1_dims),
CreateFloatTensor(output2_data, output2_dims)};
@ -104,7 +104,7 @@ void TestSplitFourOutputsFloat(
constexpr int axis_size = 1;
constexpr int tensors_size = input_size + output_size + axis_size;
TfLiteTensor tensors[tensors_size] = {
CreateQuantized32Tensor(axis_data, axis_dims, 1.0),
CreateInt32Tensor(axis_data, axis_dims),
CreateFloatTensor(input_data, input_dims),
CreateFloatTensor(output1_data, output1_dims),
CreateFloatTensor(output2_data, output2_dims),
@ -171,7 +171,7 @@ void TestSplitTwoOutputsQuantized(
constexpr int axis_size = 1;
constexpr int tensors_size = input_size + output_size + axis_size;
TfLiteTensor tensors[tensors_size] = {
CreateQuantized32Tensor(axis_data, axis_dims, 1.0),
CreateInt32Tensor(axis_data, axis_dims),
CreateQuantizedTensor(input_data, input_dims, 0, 10),
CreateQuantizedTensor(output1_data, output1_dims, 0, 10),
CreateQuantizedTensor(output2_data, output2_dims, 0, 10)};
@ -227,10 +227,10 @@ void TestSplitTwoOutputsQuantized32(
constexpr int axis_size = 1;
constexpr int tensors_size = input_size + output_size + axis_size;
TfLiteTensor tensors[tensors_size] = {
CreateQuantized32Tensor(axis_data, axis_dims, 1.0),
CreateQuantized32Tensor(input_data, input_dims, 1.0),
CreateQuantized32Tensor(output1_data, output1_dims, 1.0),
CreateQuantized32Tensor(output2_data, output2_dims, 1.0)};
CreateInt32Tensor(axis_data, axis_dims),
CreateInt32Tensor(input_data, input_dims),
CreateInt32Tensor(output1_data, output1_dims),
CreateInt32Tensor(output2_data, output2_dims)};
// Currently only support constant axis tensor.
tensors[0].allocation_type = kTfLiteMmapRo;

View File

@ -18,8 +18,8 @@ limitations under the License.
#include "tensorflow/lite/micro/all_ops_resolver.h"
#include "tensorflow/lite/micro/debug_log.h"
#include "tensorflow/lite/micro/kernels/kernel_runner.h"
#include "tensorflow/lite/micro/test_helpers.h"
#include "tensorflow/lite/micro/testing/micro_test.h"
#include "tensorflow/lite/micro/testing/test_utils.h"
namespace tflite {
namespace testing {
@ -64,8 +64,8 @@ void TestSplitVFloat(const int* input_dims_data, const float* input_data,
TfLiteTensor tensors[tensors_size];
tensors[0] = CreateFloatTensor(input_data, input_dims);
tensors[1] = CreateQuantized32Tensor(split_data, split_dims, 1.0);
tensors[2] = CreateQuantized32Tensor(axis_data, axis_dims, 1.0);
tensors[1] = CreateInt32Tensor(split_data, split_dims);
tensors[2] = CreateInt32Tensor(axis_data, axis_dims);
// add output tensors
for (int i = 0; i < N; i++)

View File

@ -18,8 +18,8 @@ limitations under the License.
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/micro/all_ops_resolver.h"
#include "tensorflow/lite/micro/kernels/kernel_runner.h"
#include "tensorflow/lite/micro/test_helpers.h"
#include "tensorflow/lite/micro/testing/micro_test.h"
#include "tensorflow/lite/micro/testing/test_utils.h"
namespace tflite {
namespace testing {
@ -75,9 +75,9 @@ void TestStridedSliceFloat(const int* input_shape, const int* begin_shape,
constexpr int tensors_size = inputs_size + outputs_size;
TfLiteTensor tensors[tensors_size] = {
CreateFloatTensor(input_data, input_dims),
CreateQuantized32Tensor(begin_data, begin_dims, 1.0),
CreateQuantized32Tensor(end_data, end_dims, 1.0),
CreateQuantized32Tensor(strides_data, strides_dims, 1.0),
CreateInt32Tensor(begin_data, begin_dims),
CreateInt32Tensor(end_data, end_dims),
CreateInt32Tensor(strides_data, strides_dims),
CreateFloatTensor(output_data, output_dims),
};
@ -106,9 +106,9 @@ void TestStridedSliceQuantized(
std::numeric_limits<T>::max() + std::numeric_limits<T>::min() / 2;
TfLiteTensor tensors[tensors_size] = {
CreateQuantizedTensor(input_data, input_dims, 1.0, zero_point),
CreateQuantized32Tensor(begin_data, begin_dims, 1.0),
CreateQuantized32Tensor(end_data, end_dims, 1.0),
CreateQuantized32Tensor(strides_data, strides_dims, 1.0),
CreateInt32Tensor(begin_data, begin_dims),
CreateInt32Tensor(end_data, end_dims),
CreateInt32Tensor(strides_data, strides_dims),
CreateQuantizedTensor(output_data, output_dims, 1.0, zero_point),
};

View File

@ -18,8 +18,8 @@ limitations under the License.
#include "tensorflow/lite/c/builtin_op_data.h"
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/micro/kernels/kernel_runner.h"
#include "tensorflow/lite/micro/test_helpers.h"
#include "tensorflow/lite/micro/testing/micro_test.h"
#include "tensorflow/lite/micro/testing/test_utils.h"
namespace tflite {
namespace testing {

View File

@ -16,8 +16,8 @@ limitations under the License.
#include "tensorflow/lite/c/builtin_op_data.h"
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/micro/kernels/kernel_runner.h"
#include "tensorflow/lite/micro/test_helpers.h"
#include "tensorflow/lite/micro/testing/micro_test.h"
#include "tensorflow/lite/micro/testing/test_utils.h"
namespace tflite {
namespace testing {

View File

@ -17,8 +17,8 @@ limitations under the License.
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/micro/debug_log.h"
#include "tensorflow/lite/micro/kernels/kernel_runner.h"
#include "tensorflow/lite/micro/test_helpers.h"
#include "tensorflow/lite/micro/testing/micro_test.h"
#include "tensorflow/lite/micro/testing/test_utils.h"
namespace tflite {
namespace testing {
@ -222,10 +222,10 @@ void TestUnpackThreeOutputsQuantized32(
constexpr int output_size = 3;
constexpr int tensors_size = input_size + output_size;
TfLiteTensor tensors[tensors_size] = {
CreateQuantized32Tensor(input_data, input_dims, 1.0),
CreateQuantized32Tensor(output1_data, output1_dims, 1.0),
CreateQuantized32Tensor(output2_data, output2_dims, 1.0),
CreateQuantized32Tensor(output3_data, output3_dims, 1.0)};
CreateInt32Tensor(input_data, input_dims),
CreateInt32Tensor(output1_data, output1_dims),
CreateInt32Tensor(output2_data, output2_dims),
CreateInt32Tensor(output3_data, output3_dims)};
// Place a unique value in the uninitialized output buffer.
for (int i = 0; i < output1_dims_count; ++i) {

View File

@ -19,6 +19,7 @@ limitations under the License.
// Useful functions for writing tests.
#include <cstdint>
#include <limits>
#include "flatbuffers/flatbuffers.h" // from @flatbuffers
#include "tensorflow/lite/c/common.h"
@ -192,6 +193,21 @@ TfLiteTensor CreateSymmetricPerChannelQuantizedTensor(
// Returns the number of tensors in the default subgraph for a tflite::Model.
size_t GetModelTensorCount(const Model* model);
// Derives the quantization scaling factor from a min and max range.
template <typename T>
inline float ScaleFromMinMax(const float min, const float max) {
return (max - min) /
static_cast<float>((std::numeric_limits<T>::max() * 1.0) -
std::numeric_limits<T>::min());
}
// Derives the quantization zero point from a min and max range.
template <typename T>
inline int ZeroPointFromMinMax(const float min, const float max) {
return static_cast<int>(std::numeric_limits<T>::min()) +
static_cast<int>(-min / ScaleFromMinMax<T>(min, max) + 0.5f);
}
} // namespace testing
} // namespace tflite

View File

@ -20,12 +20,8 @@ exports_files(["test_linux_binary.sh"])
cc_library(
name = "micro_test",
srcs = [
"test_utils.cc",
],
hdrs = [
"micro_test.h",
"test_utils.h",
],
visibility = [
":micro",

View File

@ -1,240 +0,0 @@
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "tensorflow/lite/micro/testing/test_utils.h"
#include "tensorflow/lite/micro/simple_memory_allocator.h"
namespace tflite {
namespace testing {
namespace {
// TODO(b/141330728): Refactor out of test_utils.cc
// The variables below (and the AllocatePersistentBuffer function) are only
// needed for the kernel tests and benchmarks, i.e. where we do not have an
// interpreter object, and the fully featured MicroAllocator.
// Currently, these need to be sufficient for all the kernel_tests. If that
// becomes problematic, we can investigate allowing the arena_size to be
// specified for each call to PopulatContext.
constexpr size_t kArenaSize = 10000;
uint8_t raw_arena_[kArenaSize];
SimpleMemoryAllocator* simple_memory_allocator_ = nullptr;
constexpr size_t kBufferAlignment = 16;
// We store the pointer to the ith scratch buffer to implement the Request/Get
// ScratchBuffer API for the tests. scratch_buffers_[i] will be the ith scratch
// buffer and will still be allocated from within raw_arena_.
constexpr int kNumScratchBuffers = 5;
uint8_t* scratch_buffers_[kNumScratchBuffers];
int scratch_buffer_count_ = 0;
// Note that the context parameter in this function is only needed to match the
// signature of TfLiteContext::AllocatePersistentBuffer and isn't needed in the
// implementation because we are assuming a single global
// simple_memory_allocator_
void* AllocatePersistentBuffer(TfLiteContext* context, size_t bytes) {
TFLITE_DCHECK(simple_memory_allocator_ != nullptr);
return simple_memory_allocator_->AllocateFromTail(bytes, kBufferAlignment);
}
TfLiteStatus RequestScratchBufferInArena(TfLiteContext* context, size_t bytes,
int* buffer_index) {
TFLITE_DCHECK(simple_memory_allocator_ != nullptr);
TFLITE_DCHECK(buffer_index != nullptr);
if (scratch_buffer_count_ == kNumScratchBuffers) {
TF_LITE_REPORT_ERROR(
static_cast<ErrorReporter*>(context->impl_),
"Exceeded the maximum number of scratch tensors allowed (%d).",
kNumScratchBuffers);
return kTfLiteError;
}
// For tests, we allocate scratch buffers from the tail and keep them around
// for the lifetime of model. This means that the arena size in the tests will
// be more than what we would have if the scratch buffers could share memory.
scratch_buffers_[scratch_buffer_count_] =
simple_memory_allocator_->AllocateFromTail(bytes, kBufferAlignment);
TFLITE_DCHECK(scratch_buffers_[scratch_buffer_count_] != nullptr);
*buffer_index = scratch_buffer_count_++;
return kTfLiteOk;
}
void* GetScratchBuffer(TfLiteContext* context, int buffer_index) {
TFLITE_DCHECK(scratch_buffer_count_ <= kNumScratchBuffers);
if (buffer_index >= scratch_buffer_count_) {
return nullptr;
}
return scratch_buffers_[buffer_index];
}
TfLiteTensor* GetTensor(const struct TfLiteContext* context, int subgraph_idx) {
// TODO(b/160894903): Return this value from temp allocated memory.
return &context->tensors[subgraph_idx];
}
} // namespace
uint8_t F2Q(float value, float min, float max) {
int32_t result = ZeroPointFromMinMax<uint8_t>(min, max) +
(value / ScaleFromMinMax<uint8_t>(min, max)) + 0.5f;
if (result < std::numeric_limits<uint8_t>::min()) {
result = std::numeric_limits<uint8_t>::min();
}
if (result > std::numeric_limits<uint8_t>::max()) {
result = std::numeric_limits<uint8_t>::max();
}
return result;
}
// Converts a float value into a signed eight-bit quantized value.
int8_t F2QS(float value, float min, float max) {
return F2Q(value, min, max) + std::numeric_limits<int8_t>::min();
}
int32_t F2Q32(float value, float scale) {
double quantized = static_cast<double>(value / scale);
if (quantized > std::numeric_limits<int32_t>::max()) {
quantized = std::numeric_limits<int32_t>::max();
} else if (quantized < std::numeric_limits<int32_t>::min()) {
quantized = std::numeric_limits<int32_t>::min();
}
return static_cast<int>(quantized);
}
// TODO(b/141330728): Move this method elsewhere as part clean up.
void PopulateContext(TfLiteTensor* tensors, int tensors_size,
ErrorReporter* error_reporter, TfLiteContext* context) {
simple_memory_allocator_ =
SimpleMemoryAllocator::Create(error_reporter, raw_arena_, kArenaSize);
TFLITE_DCHECK(simple_memory_allocator_ != nullptr);
scratch_buffer_count_ = 0;
context->tensors_size = tensors_size;
context->tensors = tensors;
context->impl_ = static_cast<void*>(error_reporter);
context->GetExecutionPlan = nullptr;
context->ResizeTensor = nullptr;
context->ReportError = ReportOpError;
context->AddTensors = nullptr;
context->GetNodeAndRegistration = nullptr;
context->ReplaceNodeSubsetsWithDelegateKernels = nullptr;
context->recommended_num_threads = 1;
context->GetExternalContext = nullptr;
context->SetExternalContext = nullptr;
context->GetTensor = GetTensor;
context->GetEvalTensor = nullptr;
context->AllocatePersistentBuffer = AllocatePersistentBuffer;
context->RequestScratchBufferInArena = RequestScratchBufferInArena;
context->GetScratchBuffer = GetScratchBuffer;
for (int i = 0; i < tensors_size; ++i) {
if (context->tensors[i].is_variable) {
ResetVariableTensor(&context->tensors[i]);
}
}
}
TfLiteTensor CreateQuantizedTensor(const uint8_t* data, TfLiteIntArray* dims,
float min, float max, bool is_variable) {
TfLiteTensor result;
result.type = kTfLiteUInt8;
result.data.uint8 = const_cast<uint8_t*>(data);
result.dims = dims;
result.params = {ScaleFromMinMax<uint8_t>(min, max),
ZeroPointFromMinMax<uint8_t>(min, max)};
result.allocation_type = kTfLiteMemNone;
result.bytes = ElementCount(*dims) * sizeof(uint8_t);
result.is_variable = false;
return result;
}
TfLiteTensor CreateQuantizedTensor(const int8_t* data, TfLiteIntArray* dims,
float min, float max, bool is_variable) {
TfLiteTensor result;
result.type = kTfLiteInt8;
result.data.int8 = const_cast<int8_t*>(data);
result.dims = dims;
result.params = {ScaleFromMinMax<int8_t>(min, max),
ZeroPointFromMinMax<int8_t>(min, max)};
result.allocation_type = kTfLiteMemNone;
result.bytes = ElementCount(*dims) * sizeof(int8_t);
result.is_variable = is_variable;
return result;
}
TfLiteTensor CreateQuantizedTensor(const float* data, uint8_t* quantized_data,
TfLiteIntArray* dims, bool is_variable) {
TfLiteTensor result;
SymmetricQuantize(data, dims, quantized_data, &result.params.scale);
result.data.uint8 = quantized_data;
result.type = kTfLiteUInt8;
result.dims = dims;
result.params.zero_point = 128;
result.allocation_type = kTfLiteMemNone;
result.bytes = ElementCount(*dims) * sizeof(uint8_t);
result.is_variable = is_variable;
return result;
}
TfLiteTensor CreateQuantizedTensor(const float* data, int8_t* quantized_data,
TfLiteIntArray* dims, bool is_variable) {
TfLiteTensor result;
SignedSymmetricQuantize(data, dims, quantized_data, &result.params.scale);
result.data.int8 = quantized_data;
result.type = kTfLiteInt8;
result.dims = dims;
result.params.zero_point = 0;
result.allocation_type = kTfLiteMemNone;
result.bytes = ElementCount(*dims) * sizeof(int8_t);
result.is_variable = is_variable;
return result;
}
TfLiteTensor CreateQuantizedTensor(const float* data, int16_t* quantized_data,
TfLiteIntArray* dims, bool is_variable) {
TfLiteTensor result;
SignedSymmetricQuantize(data, dims, quantized_data, &result.params.scale);
result.data.i16 = quantized_data;
result.type = kTfLiteInt16;
result.dims = dims;
result.params.zero_point = 0;
result.allocation_type = kTfLiteMemNone;
result.bytes = ElementCount(*dims) * sizeof(int16_t);
result.is_variable = is_variable;
return result;
}
TfLiteTensor CreateQuantized32Tensor(const int32_t* data, TfLiteIntArray* dims,
float scale, bool is_variable) {
TfLiteTensor result;
result.type = kTfLiteInt32;
result.data.i32 = const_cast<int32_t*>(data);
result.dims = dims;
// Quantized int32_t tensors always have a zero point of 0, since the range of
// int32_t values is large, and because zero point costs extra cycles during
// processing.
result.params = {scale, 0};
result.allocation_type = kTfLiteMemNone;
result.bytes = ElementCount(*dims) * sizeof(int32_t);
result.is_variable = is_variable;
return result;
}
} // namespace testing
} // namespace tflite

View File

@ -1,115 +0,0 @@
/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_MICRO_TESTING_TEST_UTILS_H_
#define TENSORFLOW_LITE_MICRO_TESTING_TEST_UTILS_H_
#include <cmath>
#include <cstdint>
#include <limits>
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/core/api/tensor_utils.h"
#include "tensorflow/lite/micro/micro_utils.h"
#include "tensorflow/lite/micro/test_helpers.h"
#include "tensorflow/lite/micro/testing/micro_test.h"
namespace tflite {
namespace testing {
// Note: These methods are deprecated, do not use. See b/141332970.
// Derives the quantization range max from scaling factor and zero point.
template <typename T>
inline float MaxFromZeroPointScale(const int zero_point, const float scale) {
return (std::numeric_limits<T>::max() - zero_point) * scale;
}
// Derives the quantization range min from scaling factor and zero point.
template <typename T>
inline float MinFromZeroPointScale(const int zero_point, const float scale) {
return (std::numeric_limits<T>::min() - zero_point) * scale;
}
// Derives the quantization scaling factor from a min and max range.
template <typename T>
inline float ScaleFromMinMax(const float min, const float max) {
return (max - min) /
static_cast<float>((std::numeric_limits<T>::max() * 1.0) -
std::numeric_limits<T>::min());
}
// Derives the quantization zero point from a min and max range.
template <typename T>
inline int ZeroPointFromMinMax(const float min, const float max) {
return static_cast<int>(std::numeric_limits<T>::min()) +
static_cast<int>(-min / ScaleFromMinMax<T>(min, max) + 0.5f);
}
// Converts a float value into an unsigned eight-bit quantized value.
uint8_t F2Q(float value, float min, float max);
// Converts a float value into a signed eight-bit quantized value.
int8_t F2QS(const float value, const float min, const float max);
// Converts a float value into a signed thirty-two-bit quantized value. Note
// that values close to max int and min int may see significant error due to
// a lack of floating point granularity for large values.
int32_t F2Q32(const float value, const float scale);
// TODO(b/141330728): Move this method elsewhere as part clean up.
void PopulateContext(TfLiteTensor* tensors, int tensors_size,
ErrorReporter* error_reporter, TfLiteContext* context);
TfLiteTensor CreateQuantizedTensor(const uint8_t* data, TfLiteIntArray* dims,
float min, float max,
bool is_variable = false);
TfLiteTensor CreateQuantizedTensor(const int8_t* data, TfLiteIntArray* dims,
float min, float max,
bool is_variable = false);
TfLiteTensor CreateQuantizedTensor(const float* data, uint8_t* quantized_data,
TfLiteIntArray* dims,
bool is_variable = false);
TfLiteTensor CreateQuantizedTensor(const float* data, int8_t* quantized_data,
TfLiteIntArray* dims,
bool is_variable = false);
TfLiteTensor CreateQuantizedTensor(const float* data, int16_t* quantized_data,
TfLiteIntArray* dims,
bool is_variable = false);
TfLiteTensor CreateQuantized32Tensor(const int32_t* data, TfLiteIntArray* dims,
float scale, bool is_variable = false);
template <typename input_type = int32_t,
TfLiteType tensor_input_type = kTfLiteInt32>
inline TfLiteTensor CreateTensor(const input_type* data, TfLiteIntArray* dims,
bool is_variable = false) {
TfLiteTensor result;
result.type = tensor_input_type;
result.data.raw = reinterpret_cast<char*>(const_cast<input_type*>(data));
result.dims = dims;
result.allocation_type = kTfLiteMemNone;
result.bytes = ElementCount(*dims) * sizeof(input_type);
result.is_variable = is_variable;
return result;
}
} // namespace testing
} // namespace tflite
#endif // TENSORFLOW_LITE_MICRO_TESTING_TEST_UTILS_H_

View File

@ -14,7 +14,6 @@ limitations under the License.
==============================================================================*/
#include "tensorflow/lite/micro/testing/micro_test.h"
#include "tensorflow/lite/micro/testing/test_utils.h"
TF_LITE_MICRO_TESTS_BEGIN

View File

@ -13,8 +13,8 @@ See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "tensorflow/lite/micro/test_helpers.h"
#include "tensorflow/lite/micro/testing/micro_test.h"
#include "tensorflow/lite/micro/testing/test_utils.h"
TF_LITE_MICRO_TESTS_BEGIN

View File

@ -224,7 +224,6 @@ tensorflow/lite/core/api/op_resolver.cc \
tensorflow/lite/core/api/tensor_utils.cc \
tensorflow/lite/kernels/internal/quantization_util.cc \
tensorflow/lite/kernels/kernel_util.cc \
tensorflow/lite/micro/testing/test_utils.cc \
tensorflow/lite/schema/schema_utils.cc
MICROLITE_CC_SRCS := $(filter-out $(MICROLITE_TEST_SRCS), $(MICROLITE_CC_BASE_SRCS))