From 72499dfa3a7302e296a42f6751a326bc3c3b20dc Mon Sep 17 00:00:00 2001 From: Advait Jain Date: Wed, 3 Feb 2021 13:48:36 -0800 Subject: [PATCH] Add a MicroPrintf function that is independant of the ErrorReporter. Additionally, * remove the global error reporter from micro_test.h * change all the kernel tests to make use of MicroPrintf * add a GetMicroErrorReporter() function that returns a pointer to a singleton MicroErrorReporter object. - This enables the current change to not spread beyond the tests. - Even if we move large parts of the TFLM code to make use MicroPrintf (in favor of error_reporter), there is still going to be shared TfLite/TFLM code that will need an error_reporter. Next steps, if we want to continue down this path * remove the error_reporter from the TFLM functions and class implementations and instead use either MicroPrintf or GetMicroErrorReporter() * Add new APIs that do not have error_reporter to the TFLM classes and functions. * Ask users to switch to the new error_reporter-free APIs and depreacte the APIs that do make use of the error_reporter. * Remove the error_reporter APIs completely. Prior to this change, we would have to use the ErrorReporter interface for all the logging. This was problematic on a few fronts: * The name ErrorReporter was often misleading since sometimes we just want to log, even when there isn't an error. * For even the simplest logging, we need to have access to an ErrorReporter object which means that pointers to an ErrorReporter are part of most classes in TFLM. With this change, we can simply call MicroPrintf(), and it can be a no-op if binary size is important. If we find this approach useful, we can consider incrementally reducing the usage of ErrorReporter from TFLM. Progress towards http://b/158205789 starting to address review comments. re-do micro_test.h --- tensorflow/lite/micro/kernels/BUILD | 1 + .../lite/micro/kernels/activations_test.cc | 12 +- tensorflow/lite/micro/kernels/add_test.cc | 3 +- .../kernels/arc_mli/conv_slicing_test.cc | 2 +- .../arc_mli/depthwise_conv_slicing_test.cc | 2 +- .../arc_mli/fully_connected_slicing_test.cc | 2 +- .../kernels/arc_mli/pooling_slicing_test.cc | 4 +- .../lite/micro/kernels/arg_min_max_test.cc | 2 +- .../micro/kernels/batch_to_space_nd_test.cc | 2 +- tensorflow/lite/micro/kernels/cast_test.cc | 4 +- tensorflow/lite/micro/kernels/ceil_test.cc | 2 +- .../micro/kernels/circular_buffer_test.cc | 4 +- .../lite/micro/kernels/comparisons_test.cc | 3 +- .../lite/micro/kernels/concatenation_test.cc | 12 +- .../lite/micro/kernels/conv_test_common.cc | 7 +- .../lite/micro/kernels/depthwise_conv_test.cc | 6 +- .../lite/micro/kernels/dequantize_test.cc | 2 +- .../kernels/detection_postprocess_test.cc | 2 +- .../lite/micro/kernels/elementwise_test.cc | 4 +- tensorflow/lite/micro/kernels/exp_test.cc | 2 +- tensorflow/lite/micro/kernels/floor_test.cc | 3 +- .../micro/kernels/fully_connected_test.cc | 6 +- .../lite/micro/kernels/hard_swish_test.cc | 9 +- .../lite/micro/kernels/kernel_runner.cc | 27 +- tensorflow/lite/micro/kernels/kernel_runner.h | 7 +- tensorflow/lite/micro/kernels/l2norm_test.cc | 6 +- tensorflow/lite/micro/kernels/logical_test.cc | 2 +- .../lite/micro/kernels/logistic_test.cc | 2 +- .../micro/kernels/maximum_minimum_test.cc | 6 +- tensorflow/lite/micro/kernels/mul_test.cc | 6 +- tensorflow/lite/micro/kernels/neg_test.cc | 2 +- tensorflow/lite/micro/kernels/pack_test.cc | 3 +- tensorflow/lite/micro/kernels/pad_test.cc | 4 +- tensorflow/lite/micro/kernels/pooling_test.cc | 6 +- tensorflow/lite/micro/kernels/prelu_test.cc | 2 +- .../lite/micro/kernels/quantize_test.cc | 2 +- tensorflow/lite/micro/kernels/reduce_test.cc | 2 +- tensorflow/lite/micro/kernels/reshape_test.cc | 2 +- .../kernels/resize_nearest_neighbor_test.cc | 3 +- tensorflow/lite/micro/kernels/round_test.cc | 2 +- tensorflow/lite/micro/kernels/shape_test.cc | 2 +- tensorflow/lite/micro/kernels/softmax_test.cc | 3 +- .../micro/kernels/space_to_batch_nd_test.cc | 2 +- tensorflow/lite/micro/kernels/split_test.cc | 8 +- tensorflow/lite/micro/kernels/split_v_test.cc | 2 +- .../lite/micro/kernels/strided_slice_test.cc | 3 +- tensorflow/lite/micro/kernels/sub_test.cc | 3 +- tensorflow/lite/micro/kernels/svdf_test.cc | 2 +- tensorflow/lite/micro/kernels/tanh_test.cc | 6 +- tensorflow/lite/micro/kernels/unpack_test.cc | 24 +- .../lite/micro/kernels/zeros_like_test.cc | 6 +- .../lite/micro/memory_arena_threshold_test.cc | 8 +- tensorflow/lite/micro/memory_helpers_test.cc | 13 +- tensorflow/lite/micro/memory_planner/BUILD | 1 + .../greedy_memory_planner_test.cc | 1 + .../linear_memory_planner_test.cc | 1 + tensorflow/lite/micro/micro_allocator_test.cc | 104 ++++---- tensorflow/lite/micro/micro_error_reporter.cc | 43 +++- tensorflow/lite/micro/micro_error_reporter.h | 13 + .../lite/micro/micro_interpreter_test.cc | 50 ++-- .../micro/recording_micro_allocator_test.cc | 13 +- .../recording_simple_memory_allocator_test.cc | 25 +- .../micro/simple_memory_allocator_test.cc | 53 ++-- tensorflow/lite/micro/testing/micro_test.h | 231 ++++++++---------- .../tools/make/targets/bluepill_makefile.inc | 3 + .../tools/make/targets/stm32f4_makefile.inc | 6 +- 66 files changed, 421 insertions(+), 385 deletions(-) diff --git a/tensorflow/lite/micro/kernels/BUILD b/tensorflow/lite/micro/kernels/BUILD index 758fcbdb937..857ca367017 100644 --- a/tensorflow/lite/micro/kernels/BUILD +++ b/tensorflow/lite/micro/kernels/BUILD @@ -668,6 +668,7 @@ cc_library( deps = [ "//tensorflow/lite/c:common", "//tensorflow/lite/kernels/internal:compatibility", + "//tensorflow/lite/micro:micro_error_reporter", "//tensorflow/lite/micro:micro_framework", ], ) diff --git a/tensorflow/lite/micro/kernels/activations_test.cc b/tensorflow/lite/micro/kernels/activations_test.cc index 3a51472f9bb..8e6dec4f477 100644 --- a/tensorflow/lite/micro/kernels/activations_test.cc +++ b/tensorflow/lite/micro/kernels/activations_test.cc @@ -47,7 +47,7 @@ void TestReluFloat(const int* input_dims_data, const float* input_data, const TfLiteRegistration registration = ops::micro::Register_RELU(); micro::KernelRunner runner(registration, tensors, tensors_size, inputs_array, outputs_array, - /*builtin_data=*/nullptr, micro_test::reporter); + /*builtin_data=*/nullptr); TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.InitAndPrepare()); TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.Invoke()); @@ -80,7 +80,7 @@ void TestRelu6Float(const int* input_dims_data, const float* input_data, const TfLiteRegistration registration = ops::micro::Register_RELU6(); micro::KernelRunner runner(registration, tensors, tensors_size, inputs_array, outputs_array, - /*builtin_data=*/nullptr, micro_test::reporter); + /*builtin_data=*/nullptr); TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.InitAndPrepare()); TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.Invoke()); @@ -118,7 +118,7 @@ void TestReluUint8(const int* input_dims_data, const float* input_data, const TfLiteRegistration registration = ops::micro::Register_RELU(); micro::KernelRunner runner(registration, tensors, tensors_size, inputs_array, outputs_array, - /*builtin_data=*/nullptr, micro_test::reporter); + /*builtin_data=*/nullptr); TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.InitAndPrepare()); TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.Invoke()); @@ -159,7 +159,7 @@ void TestRelu6Uint8(const int* input_dims_data, const float* input_data, const TfLiteRegistration registration = ops::micro::Register_RELU6(); micro::KernelRunner runner(registration, tensors, tensors_size, inputs_array, outputs_array, - /*builtin_data=*/nullptr, micro_test::reporter); + /*builtin_data=*/nullptr); TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.InitAndPrepare()); TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.Invoke()); @@ -199,7 +199,7 @@ void TestReluInt8(const int* input_dims_data, const float* input_data, const TfLiteRegistration registration = ops::micro::Register_RELU(); micro::KernelRunner runner(registration, tensors, tensors_size, inputs_array, outputs_array, - /*builtin_data=*/nullptr, micro_test::reporter); + /*builtin_data=*/nullptr); TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.InitAndPrepare()); TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.Invoke()); @@ -239,7 +239,7 @@ void TestRelu6Int8(const int* input_dims_data, const float* input_data, const TfLiteRegistration registration = ops::micro::Register_RELU6(); micro::KernelRunner runner(registration, tensors, tensors_size, inputs_array, outputs_array, - /*builtin_data=*/nullptr, micro_test::reporter); + /*builtin_data=*/nullptr); TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.InitAndPrepare()); TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.Invoke()); diff --git a/tensorflow/lite/micro/kernels/add_test.cc b/tensorflow/lite/micro/kernels/add_test.cc index a11b73c3290..66645a08477 100644 --- a/tensorflow/lite/micro/kernels/add_test.cc +++ b/tensorflow/lite/micro/kernels/add_test.cc @@ -77,8 +77,7 @@ void ValidateAddGoldens(TfLiteTensor* tensors, int tensors_size, const TfLiteRegistration registration = ops::micro::Register_ADD(); micro::KernelRunner runner(registration, tensors, tensors_size, inputs_array, - outputs_array, &builtin_data, - micro_test::reporter); + outputs_array, &builtin_data); TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.InitAndPrepare()); TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.Invoke()); diff --git a/tensorflow/lite/micro/kernels/arc_mli/conv_slicing_test.cc b/tensorflow/lite/micro/kernels/arc_mli/conv_slicing_test.cc index 25d5377009e..8a44e020104 100644 --- a/tensorflow/lite/micro/kernels/arc_mli/conv_slicing_test.cc +++ b/tensorflow/lite/micro/kernels/arc_mli/conv_slicing_test.cc @@ -134,7 +134,7 @@ TfLiteStatus ValidateConvGoldens(TfLiteTensor* tensors, int tensors_size, TfLiteConvParams* conv_params, float tolerance = 1e-5) { TfLiteContext context; - PopulateContext(tensors, tensors_size, micro_test::reporter, &context); + PopulateContext(tensors, tensors_size, &context); ::tflite::AllOpsResolver resolver; diff --git a/tensorflow/lite/micro/kernels/arc_mli/depthwise_conv_slicing_test.cc b/tensorflow/lite/micro/kernels/arc_mli/depthwise_conv_slicing_test.cc index d9efb3ae709..3d39bc518ac 100644 --- a/tensorflow/lite/micro/kernels/arc_mli/depthwise_conv_slicing_test.cc +++ b/tensorflow/lite/micro/kernels/arc_mli/depthwise_conv_slicing_test.cc @@ -53,7 +53,7 @@ TfLiteStatus ValidateDepthwiseConvGoldens(const T* expected_output_data, float tolerance, int tensors_size, TfLiteTensor* tensors) { TfLiteContext context; - PopulateContext(tensors, tensors_size, micro_test::reporter, &context); + PopulateContext(tensors, tensors_size, &context); ::tflite::AllOpsResolver resolver; const TfLiteRegistration* registration = diff --git a/tensorflow/lite/micro/kernels/arc_mli/fully_connected_slicing_test.cc b/tensorflow/lite/micro/kernels/arc_mli/fully_connected_slicing_test.cc index d0c7143b18b..8a6749f3285 100644 --- a/tensorflow/lite/micro/kernels/arc_mli/fully_connected_slicing_test.cc +++ b/tensorflow/lite/micro/kernels/arc_mli/fully_connected_slicing_test.cc @@ -67,7 +67,7 @@ void TestFullyConnectedQuantized( tensors[3].params.zero_point = 0; TfLiteContext context; - PopulateContext(tensors, tensors_size, micro_test::reporter, &context); + PopulateContext(tensors, tensors_size, &context); ::tflite::AllOpsResolver resolver; const TfLiteRegistration* registration = diff --git a/tensorflow/lite/micro/kernels/arc_mli/pooling_slicing_test.cc b/tensorflow/lite/micro/kernels/arc_mli/pooling_slicing_test.cc index 516b1bf63d6..e367bb2782a 100644 --- a/tensorflow/lite/micro/kernels/arc_mli/pooling_slicing_test.cc +++ b/tensorflow/lite/micro/kernels/arc_mli/pooling_slicing_test.cc @@ -58,7 +58,7 @@ void TestAveragePoolingQuantized( }; TfLiteContext context; - PopulateContext(tensors, tensors_size, micro_test::reporter, &context); + PopulateContext(tensors, tensors_size, &context); ::tflite::AllOpsResolver resolver; const TfLiteRegistration* registration = @@ -129,7 +129,7 @@ void TestMaxPoolQuantized(const int* input_dims_data, const T* input_data, }; TfLiteContext context; - PopulateContext(tensors, tensors_size, micro_test::reporter, &context); + PopulateContext(tensors, tensors_size, &context); ::tflite::AllOpsResolver resolver; const TfLiteRegistration* registration = diff --git a/tensorflow/lite/micro/kernels/arg_min_max_test.cc b/tensorflow/lite/micro/kernels/arg_min_max_test.cc index e1e87d39be3..0b9e7f15687 100644 --- a/tensorflow/lite/micro/kernels/arg_min_max_test.cc +++ b/tensorflow/lite/micro/kernels/arg_min_max_test.cc @@ -37,7 +37,7 @@ void ValidateArgMinMaxGoldens(TfLiteTensor* tensors, int tensors_size, : ops::micro::Register_ARG_MAX(); micro::KernelRunner runner(registration, tensors, tensors_size, inputs_array, outputs_array, - /*builtin_data=*/nullptr, micro_test::reporter); + /*builtin_data=*/nullptr); TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.InitAndPrepare()); TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.Invoke()); diff --git a/tensorflow/lite/micro/kernels/batch_to_space_nd_test.cc b/tensorflow/lite/micro/kernels/batch_to_space_nd_test.cc index 9a800a152fd..0903e79630c 100644 --- a/tensorflow/lite/micro/kernels/batch_to_space_nd_test.cc +++ b/tensorflow/lite/micro/kernels/batch_to_space_nd_test.cc @@ -48,7 +48,7 @@ TfLiteStatus ValidateBatchToSpaceNdGoldens(TfLiteTensor* tensors, const TfLiteRegistration registration = Register_BATCH_TO_SPACE_ND(); micro::KernelRunner runner(registration, tensors, tensors_size, inputs_array, - outputs_array, nullptr, micro_test::reporter); + outputs_array, nullptr); TF_LITE_ENSURE_STATUS(runner.InitAndPrepare()); TF_LITE_ENSURE_STATUS(runner.Invoke()); diff --git a/tensorflow/lite/micro/kernels/cast_test.cc b/tensorflow/lite/micro/kernels/cast_test.cc index a4e28cf1f75..3633a61648a 100644 --- a/tensorflow/lite/micro/kernels/cast_test.cc +++ b/tensorflow/lite/micro/kernels/cast_test.cc @@ -46,7 +46,7 @@ void TestCastFloatToInt8(const int* input_dims_data, const float* input_data, const TfLiteRegistration registration = Register_CAST(); micro::KernelRunner runner(registration, tensors, tensors_size, inputs_array, outputs_array, - /*builtin_data=*/nullptr, micro_test::reporter); + /*builtin_data=*/nullptr); TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.InitAndPrepare()); TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.Invoke()); @@ -78,7 +78,7 @@ void TestCastInt8ToFloat(const int* input_dims_data, const int8_t* input_data, const TfLiteRegistration registration = Register_CAST(); micro::KernelRunner runner(registration, tensors, tensors_size, inputs_array, outputs_array, - /*builtin_data=*/nullptr, micro_test::reporter); + /*builtin_data=*/nullptr); TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.InitAndPrepare()); TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.Invoke()); diff --git a/tensorflow/lite/micro/kernels/ceil_test.cc b/tensorflow/lite/micro/kernels/ceil_test.cc index 286cbd2f194..52c39a21fff 100644 --- a/tensorflow/lite/micro/kernels/ceil_test.cc +++ b/tensorflow/lite/micro/kernels/ceil_test.cc @@ -45,7 +45,7 @@ void TestCeil(const int* input_dims_data, const float* input_data, const TfLiteRegistration registration = ops::micro::Register_CEIL(); micro::KernelRunner runner(registration, tensors, tensors_size, inputs_array, outputs_array, - /*builtin_data=*/nullptr, micro_test::reporter); + /*builtin_data=*/nullptr); TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.InitAndPrepare()); TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.Invoke()); diff --git a/tensorflow/lite/micro/kernels/circular_buffer_test.cc b/tensorflow/lite/micro/kernels/circular_buffer_test.cc index 3e321ce070a..eea3e7a3b0f 100644 --- a/tensorflow/lite/micro/kernels/circular_buffer_test.cc +++ b/tensorflow/lite/micro/kernels/circular_buffer_test.cc @@ -79,7 +79,7 @@ TF_LITE_MICRO_TEST(OutputTensorLength4) { tflite::ops::micro::Register_CIRCULAR_BUFFER(); tflite::micro::KernelRunner runner = tflite::micro::KernelRunner( *registration, tensors, tensors_size, inputs_array, outputs_array, - /*builtin_data=*/nullptr, micro_test::reporter); + /*builtin_data=*/nullptr); TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.InitAndPrepare()); const int8_t goldens[5][16] = {{0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 3}, @@ -147,7 +147,7 @@ TF_LITE_MICRO_TEST(OutputTensorLength5) { tflite::ops::micro::Register_CIRCULAR_BUFFER(); tflite::micro::KernelRunner runner = tflite::micro::KernelRunner( *registration, tensors, tensors_size, inputs_array, outputs_array, - /*builtin_data=*/nullptr, micro_test::reporter); + /*builtin_data=*/nullptr); TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.InitAndPrepare()); const int8_t goldens[6][20] = { diff --git a/tensorflow/lite/micro/kernels/comparisons_test.cc b/tensorflow/lite/micro/kernels/comparisons_test.cc index addb08aa4da..fe55a239962 100644 --- a/tensorflow/lite/micro/kernels/comparisons_test.cc +++ b/tensorflow/lite/micro/kernels/comparisons_test.cc @@ -40,8 +40,7 @@ void TestComparison(const TfLiteRegistration& registration, TfLiteIntArray* outputs_array = IntArrayFromInts(outputs_array_data); micro::KernelRunner runner(registration, tensors, tensors_size, inputs_array, - outputs_array, /*builtin_data=*/nullptr, - micro_test::reporter); + outputs_array, /*builtin_data=*/nullptr); TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.InitAndPrepare()); TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.Invoke()); diff --git a/tensorflow/lite/micro/kernels/concatenation_test.cc b/tensorflow/lite/micro/kernels/concatenation_test.cc index cb7e0bff626..0fd24665c96 100644 --- a/tensorflow/lite/micro/kernels/concatenation_test.cc +++ b/tensorflow/lite/micro/kernels/concatenation_test.cc @@ -54,9 +54,9 @@ void TestConcatenateTwoInputs(const int* input1_dims_data, const TfLiteRegistration registration = tflite::ops::micro::Register_CONCATENATION(); - micro::KernelRunner runner( - registration, tensors, tensors_size, inputs_array, outputs_array, - reinterpret_cast(&builtin_data), micro_test::reporter); + micro::KernelRunner runner(registration, tensors, tensors_size, inputs_array, + outputs_array, + reinterpret_cast(&builtin_data)); TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.InitAndPrepare()); TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.Invoke()); @@ -101,9 +101,9 @@ void TestConcatenateQuantizedTwoInputs( const TfLiteRegistration registration = tflite::ops::micro::Register_CONCATENATION(); - micro::KernelRunner runner( - registration, tensors, tensors_size, inputs_array, outputs_array, - reinterpret_cast(&builtin_data), micro_test::reporter); + micro::KernelRunner runner(registration, tensors, tensors_size, inputs_array, + outputs_array, + reinterpret_cast(&builtin_data)); TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.InitAndPrepare()); TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.Invoke()); diff --git a/tensorflow/lite/micro/kernels/conv_test_common.cc b/tensorflow/lite/micro/kernels/conv_test_common.cc index aea38d9d88e..3fc318d0da4 100644 --- a/tensorflow/lite/micro/kernels/conv_test_common.cc +++ b/tensorflow/lite/micro/kernels/conv_test_common.cc @@ -27,10 +27,9 @@ TfLiteStatus InvokeConv(TfLiteTensor* tensors, int tensors_size, int outputs_array_data[] = {1, 3}; TfLiteIntArray* outputs_array = IntArrayFromInts(outputs_array_data); - tflite::MicroErrorReporter reporter; - micro::KernelRunner runner( - registration, tensors, tensors_size, inputs_array, outputs_array, - reinterpret_cast(conv_params), &reporter); + micro::KernelRunner runner(registration, tensors, tensors_size, inputs_array, + outputs_array, + reinterpret_cast(conv_params)); const char* init_data = reinterpret_cast(conv_params); TfLiteStatus status = runner.InitAndPrepare(init_data); diff --git a/tensorflow/lite/micro/kernels/depthwise_conv_test.cc b/tensorflow/lite/micro/kernels/depthwise_conv_test.cc index dcf4c2d28c3..6e98d669b22 100644 --- a/tensorflow/lite/micro/kernels/depthwise_conv_test.cc +++ b/tensorflow/lite/micro/kernels/depthwise_conv_test.cc @@ -50,9 +50,9 @@ TfLiteStatus ValidateDepthwiseConvGoldens( TfLiteIntArray* outputs_array = IntArrayFromInts(outputs_array_data); const TfLiteRegistration registration = Register_DEPTHWISE_CONV_2D(); - micro::KernelRunner runner( - registration, tensors, tensors_size, inputs_array, outputs_array, - reinterpret_cast(conv_params), micro_test::reporter); + micro::KernelRunner runner(registration, tensors, tensors_size, inputs_array, + outputs_array, + reinterpret_cast(conv_params)); int input_depth = tensors[0].dims->data[3]; int output_depth = tensors[1].dims->data[3]; diff --git a/tensorflow/lite/micro/kernels/dequantize_test.cc b/tensorflow/lite/micro/kernels/dequantize_test.cc index fb6641186c0..5bee09fba24 100644 --- a/tensorflow/lite/micro/kernels/dequantize_test.cc +++ b/tensorflow/lite/micro/kernels/dequantize_test.cc @@ -36,7 +36,7 @@ void ValidateDequantizeGoldens(TfLiteTensor* tensors, int tensors_size, tflite::ops::micro::Register_DEQUANTIZE(); micro::KernelRunner runner(registration, tensors, tensors_size, inputs_array, outputs_array, - /*builtin_data=*/nullptr, micro_test::reporter); + /*builtin_data=*/nullptr); TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.InitAndPrepare()); TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.Invoke()); diff --git a/tensorflow/lite/micro/kernels/detection_postprocess_test.cc b/tensorflow/lite/micro/kernels/detection_postprocess_test.cc index f58376d2043..bf114a4796d 100644 --- a/tensorflow/lite/micro/kernels/detection_postprocess_test.cc +++ b/tensorflow/lite/micro/kernels/detection_postprocess_test.cc @@ -162,7 +162,7 @@ void TestDetectionPostprocess( TfLiteIntArray* outputs_array = IntArrayFromInts(outputs_array_data); micro::KernelRunner runner(*registration, tensors, tensors_size, inputs_array, - outputs_array, nullptr, micro_test::reporter); + outputs_array, nullptr); // Using generated data as input to operator. int data_size = 0; diff --git a/tensorflow/lite/micro/kernels/elementwise_test.cc b/tensorflow/lite/micro/kernels/elementwise_test.cc index 665f8d4e0d6..a59106cd76c 100644 --- a/tensorflow/lite/micro/kernels/elementwise_test.cc +++ b/tensorflow/lite/micro/kernels/elementwise_test.cc @@ -50,7 +50,7 @@ void TestElementwiseFloat(const TfLiteRegistration& registration, micro::KernelRunner runner(registration, tensors, tensors_size, inputs_array, outputs_array, - /*builtin_data=*/nullptr, micro_test::reporter); + /*builtin_data=*/nullptr); TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.InitAndPrepare()); TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.Invoke()); @@ -86,7 +86,7 @@ void TestElementwiseBool(const TfLiteRegistration& registration, micro::KernelRunner runner(registration, tensors, tensors_size, inputs_array, outputs_array, - /*builtin_data=*/nullptr, micro_test::reporter); + /*builtin_data=*/nullptr); TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.InitAndPrepare()); TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.Invoke()); diff --git a/tensorflow/lite/micro/kernels/exp_test.cc b/tensorflow/lite/micro/kernels/exp_test.cc index dd995e9b517..536b7f491c4 100644 --- a/tensorflow/lite/micro/kernels/exp_test.cc +++ b/tensorflow/lite/micro/kernels/exp_test.cc @@ -46,7 +46,7 @@ void TestExp(const int* input_dims_data, const float* input_data, const TfLiteRegistration registration = Register_EXP(); micro::KernelRunner runner(registration, tensors, tensors_size, inputs_array, outputs_array, - /*builtin_data=*/nullptr, micro_test::reporter); + /*builtin_data=*/nullptr); TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.InitAndPrepare()); TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.Invoke()); diff --git a/tensorflow/lite/micro/kernels/floor_test.cc b/tensorflow/lite/micro/kernels/floor_test.cc index 9e9da1ddd57..5b7f8f9b195 100644 --- a/tensorflow/lite/micro/kernels/floor_test.cc +++ b/tensorflow/lite/micro/kernels/floor_test.cc @@ -45,8 +45,7 @@ void TestFloor(const int* input_dims_data, const float* input_data, const TfLiteRegistration registration = tflite::ops::micro::Register_FLOOR(); micro::KernelRunner runner(registration, tensors, tensors_size, inputs_array, - outputs_array, /*builtin_data=*/nullptr, - micro_test::reporter); + outputs_array, /*builtin_data=*/nullptr); TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.InitAndPrepare()); TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.Invoke()); diff --git a/tensorflow/lite/micro/kernels/fully_connected_test.cc b/tensorflow/lite/micro/kernels/fully_connected_test.cc index dd7b7e9e833..b1715fc2df3 100644 --- a/tensorflow/lite/micro/kernels/fully_connected_test.cc +++ b/tensorflow/lite/micro/kernels/fully_connected_test.cc @@ -240,9 +240,9 @@ TfLiteStatus ValidateFullyConnectedGoldens( TfLiteIntArray* outputs_array = IntArrayFromInts(outputs_array_data); const TfLiteRegistration registration = Register_FULLY_CONNECTED(); - micro::KernelRunner runner( - registration, tensors, tensors_size, inputs_array, outputs_array, - reinterpret_cast(&builtin_data), micro_test::reporter); + micro::KernelRunner runner(registration, tensors, tensors_size, inputs_array, + outputs_array, + reinterpret_cast(&builtin_data)); TfLiteStatus status = runner.InitAndPrepare(); if (status != kTfLiteOk) { diff --git a/tensorflow/lite/micro/kernels/hard_swish_test.cc b/tensorflow/lite/micro/kernels/hard_swish_test.cc index 2b92e902aa3..a877ff07452 100644 --- a/tensorflow/lite/micro/kernels/hard_swish_test.cc +++ b/tensorflow/lite/micro/kernels/hard_swish_test.cc @@ -108,8 +108,7 @@ void TestHardSwishQuantized(int size, const T* output_data, const TfLiteRegistration registration = tflite::ops::micro::Register_HARD_SWISH(); micro::KernelRunner runner(registration, tensors, tensors_size, inputs_array, - outputs_array, /*builtin_data=*/nullptr, - micro_test::reporter); + outputs_array, /*builtin_data=*/nullptr); TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.InitAndPrepare()); TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.Invoke()); @@ -188,8 +187,7 @@ void TestHardSwishQuantizedBias(const int size, const T* output_data, const TfLiteRegistration registration = tflite::ops::micro::Register_HARD_SWISH(); micro::KernelRunner runner(registration, tensors, tensors_size, inputs_array, - outputs_array, /*builtin_data=*/nullptr, - micro_test::reporter); + outputs_array, /*builtin_data=*/nullptr); TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.InitAndPrepare()); TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.Invoke()); @@ -241,8 +239,7 @@ void TestHardSwishFloat(const int size, float* output_data, const TfLiteRegistration registration = tflite::ops::micro::Register_HARD_SWISH(); micro::KernelRunner runner(registration, tensors, tensors_size, inputs_array, - outputs_array, /*builtin_data=*/nullptr, - micro_test::reporter); + outputs_array, /*builtin_data=*/nullptr); TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.InitAndPrepare()); TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.Invoke()); diff --git a/tensorflow/lite/micro/kernels/kernel_runner.cc b/tensorflow/lite/micro/kernels/kernel_runner.cc index 5d034791655..dd0ba8ba48d 100644 --- a/tensorflow/lite/micro/kernels/kernel_runner.cc +++ b/tensorflow/lite/micro/kernels/kernel_runner.cc @@ -15,6 +15,8 @@ limitations under the License. #include "tensorflow/lite/micro/kernels/kernel_runner.h" +#include "tensorflow/lite/micro/micro_error_reporter.h" + namespace tflite { namespace micro { @@ -30,12 +32,12 @@ uint8_t KernelRunner::kKernelRunnerBuffer_[]; KernelRunner::KernelRunner(const TfLiteRegistration& registration, TfLiteTensor* tensors, int tensors_size, TfLiteIntArray* inputs, TfLiteIntArray* outputs, - void* builtin_data, ErrorReporter* error_reporter) - : allocator_(SimpleMemoryAllocator::Create( - error_reporter, kKernelRunnerBuffer_, kKernelRunnerBufferSize_)), + void* builtin_data) + : allocator_(SimpleMemoryAllocator::Create(GetMicroErrorReporter(), + kKernelRunnerBuffer_, + kKernelRunnerBufferSize_)), registration_(registration), - tensors_(tensors), - error_reporter_(error_reporter) { + tensors_(tensors) { // Prepare TfLiteContext: context_.impl_ = static_cast(this); context_.ReportError = ReportOpError; @@ -65,8 +67,7 @@ TfLiteStatus KernelRunner::InitAndPrepare(const char* init_data, TfLiteStatus KernelRunner::Invoke() { if (registration_.invoke == nullptr) { - TF_LITE_REPORT_ERROR(error_reporter_, - "TfLiteRegistration missing invoke function pointer!"); + MicroPrintf("TfLiteRegistration missing invoke function pointer!"); return kTfLiteError; } return registration_.invoke(&context_, &node_); @@ -119,10 +120,8 @@ TfLiteStatus KernelRunner::RequestScratchBufferInArena(TfLiteContext* context, TFLITE_DCHECK(runner != nullptr); if (runner->scratch_buffer_count_ == kNumScratchBuffers_) { - TF_LITE_REPORT_ERROR( - runner->error_reporter_, - "Exceeded the maximum number of scratch tensors allowed (%d).", - kNumScratchBuffers_); + MicroPrintf("Exceeded the maximum number of scratch tensors allowed (%d).", + kNumScratchBuffers_); return kTfLiteError; } @@ -152,13 +151,9 @@ void* KernelRunner::GetScratchBuffer(TfLiteContext* context, int buffer_index) { void KernelRunner::ReportOpError(struct TfLiteContext* context, const char* format, ...) { - TFLITE_DCHECK(context != nullptr); - KernelRunner* runner = reinterpret_cast(context->impl_); - TFLITE_DCHECK(runner != nullptr); - va_list args; va_start(args, format); - TF_LITE_REPORT_ERROR(runner->error_reporter_, format, args); + GetMicroErrorReporter()->Report(format, args); va_end(args); } diff --git a/tensorflow/lite/micro/kernels/kernel_runner.h b/tensorflow/lite/micro/kernels/kernel_runner.h index 34a2149dd7e..b145097d100 100644 --- a/tensorflow/lite/micro/kernels/kernel_runner.h +++ b/tensorflow/lite/micro/kernels/kernel_runner.h @@ -33,12 +33,10 @@ class KernelRunner { public: KernelRunner(const TfLiteRegistration& registration, TfLiteTensor* tensors, int tensors_size, TfLiteIntArray* inputs, - TfLiteIntArray* outputs, void* builtin_data, - ErrorReporter* error_reporter); + TfLiteIntArray* outputs, void* builtin_data); // Calls init and prepare on the kernel (i.e. TfLiteRegistration) struct. Any - // exceptions will be reported through the error_reporter and returned as a - // status code here. + // exceptions will be DebugLog'd and returned as a status code. TfLiteStatus InitAndPrepare(const char* init_data = nullptr, size_t length = 0); @@ -69,7 +67,6 @@ class KernelRunner { SimpleMemoryAllocator* allocator_ = nullptr; const TfLiteRegistration& registration_; TfLiteTensor* tensors_ = nullptr; - ErrorReporter* error_reporter_ = nullptr; TfLiteContext context_ = {}; TfLiteNode node_ = {}; diff --git a/tensorflow/lite/micro/kernels/l2norm_test.cc b/tensorflow/lite/micro/kernels/l2norm_test.cc index cac39278f10..9e2a48eda26 100644 --- a/tensorflow/lite/micro/kernels/l2norm_test.cc +++ b/tensorflow/lite/micro/kernels/l2norm_test.cc @@ -77,9 +77,9 @@ void TestL2Normalization(const int* input_dims_data, const T* input_data, const TfLiteRegistration registration = ops::micro::Register_L2_NORMALIZATION(); - micro::KernelRunner runner( - registration, tensors, tensors_size, inputs_array, outputs_array, - reinterpret_cast(&builtin_data), micro_test::reporter); + micro::KernelRunner runner(registration, tensors, tensors_size, inputs_array, + outputs_array, + reinterpret_cast(&builtin_data)); TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.InitAndPrepare()); TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.Invoke()); diff --git a/tensorflow/lite/micro/kernels/logical_test.cc b/tensorflow/lite/micro/kernels/logical_test.cc index cca2e6a2eb7..a1e4eb5bfdc 100644 --- a/tensorflow/lite/micro/kernels/logical_test.cc +++ b/tensorflow/lite/micro/kernels/logical_test.cc @@ -50,7 +50,7 @@ void TestLogicalOp(const TfLiteRegistration& registration, micro::KernelRunner runner(registration, tensors, tensors_size, inputs_array, outputs_array, - /*builtin_data=*/nullptr, micro_test::reporter); + /*builtin_data=*/nullptr); TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.InitAndPrepare()); TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.Invoke()); diff --git a/tensorflow/lite/micro/kernels/logistic_test.cc b/tensorflow/lite/micro/kernels/logistic_test.cc index 3099f2972dc..868af2c06ed 100644 --- a/tensorflow/lite/micro/kernels/logistic_test.cc +++ b/tensorflow/lite/micro/kernels/logistic_test.cc @@ -58,7 +58,7 @@ void ValidateLogisticGoldens(TfLiteTensor* tensors, const int tensor_count, const TfLiteRegistration registration = tflite::ops::micro::Register_LOGISTIC(); micro::KernelRunner runner(registration, tensors, tensor_count, inputs_array, - outputs_array, nullptr, micro_test::reporter); + outputs_array, nullptr); TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.InitAndPrepare()); TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.Invoke()); diff --git a/tensorflow/lite/micro/kernels/maximum_minimum_test.cc b/tensorflow/lite/micro/kernels/maximum_minimum_test.cc index 9c0eac0726e..76a6a98d081 100644 --- a/tensorflow/lite/micro/kernels/maximum_minimum_test.cc +++ b/tensorflow/lite/micro/kernels/maximum_minimum_test.cc @@ -50,7 +50,7 @@ void TestMaxMinFloat(const TfLiteRegistration& registration, micro::KernelRunner runner(registration, tensors, tensors_size, inputs_array, outputs_array, - /*builtin_data=*/nullptr, micro_test::reporter); + /*builtin_data=*/nullptr); TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.InitAndPrepare()); TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.Invoke()); @@ -94,7 +94,7 @@ void TestMaxMinQuantized(const TfLiteRegistration& registration, micro::KernelRunner runner(registration, tensors, tensors_size, inputs_array, outputs_array, - /*builtin_data=*/nullptr, micro_test::reporter); + /*builtin_data=*/nullptr); TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.InitAndPrepare()); TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.Invoke()); @@ -130,7 +130,7 @@ void TestMaxMinQuantizedInt32( micro::KernelRunner runner(registration, tensors, tensors_size, inputs_array, outputs_array, - /*builtin_data=*/nullptr, micro_test::reporter); + /*builtin_data=*/nullptr); TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.InitAndPrepare()); TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.Invoke()); diff --git a/tensorflow/lite/micro/kernels/mul_test.cc b/tensorflow/lite/micro/kernels/mul_test.cc index 5c0fe275e07..46d7f5d68c8 100644 --- a/tensorflow/lite/micro/kernels/mul_test.cc +++ b/tensorflow/lite/micro/kernels/mul_test.cc @@ -55,9 +55,9 @@ void ValidateMulGoldens(TfLiteTensor* tensors, int tensors_size, TfLiteIntArray* outputs_array = IntArrayFromInts(outputs_array_data); const TfLiteRegistration registration = tflite::ops::micro::Register_MUL(); - micro::KernelRunner runner( - registration, tensors, tensors_size, inputs_array, outputs_array, - reinterpret_cast(&builtin_data), micro_test::reporter); + micro::KernelRunner runner(registration, tensors, tensors_size, inputs_array, + outputs_array, + reinterpret_cast(&builtin_data)); TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.InitAndPrepare()); TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.Invoke()); diff --git a/tensorflow/lite/micro/kernels/neg_test.cc b/tensorflow/lite/micro/kernels/neg_test.cc index 40111dca0d4..4490f2a72f9 100644 --- a/tensorflow/lite/micro/kernels/neg_test.cc +++ b/tensorflow/lite/micro/kernels/neg_test.cc @@ -46,7 +46,7 @@ void TestNegFloat(const int* input_dims_data, const float* input_data, const TfLiteRegistration registration = tflite::ops::micro::Register_NEG(); micro::KernelRunner runner(registration, tensors, tensors_size, inputs_array, outputs_array, - /*builtin_data=*/nullptr, micro_test::reporter); + /*builtin_data=*/nullptr); TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.InitAndPrepare()); TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.Invoke()); diff --git a/tensorflow/lite/micro/kernels/pack_test.cc b/tensorflow/lite/micro/kernels/pack_test.cc index d523db3e983..e8c6a4d4a70 100644 --- a/tensorflow/lite/micro/kernels/pack_test.cc +++ b/tensorflow/lite/micro/kernels/pack_test.cc @@ -35,8 +35,7 @@ void ValidatePackGoldens(TfLiteTensor* tensors, int tensors_size, const TfLiteRegistration registration = tflite::ops::micro::Register_PACK(); micro::KernelRunner runner(registration, tensors, tensors_size, inputs_array, - outputs_array, reinterpret_cast(¶ms), - micro_test::reporter); + outputs_array, reinterpret_cast(¶ms)); TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.InitAndPrepare()); TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.Invoke()); diff --git a/tensorflow/lite/micro/kernels/pad_test.cc b/tensorflow/lite/micro/kernels/pad_test.cc index 859fc1b05e9..eeeb785e401 100644 --- a/tensorflow/lite/micro/kernels/pad_test.cc +++ b/tensorflow/lite/micro/kernels/pad_test.cc @@ -36,7 +36,7 @@ TfLiteStatus ValidatePadGoldens(TfLiteTensor* tensors, int tensors_size, const TfLiteRegistration registration = tflite::ops::micro::Register_PAD(); micro::KernelRunner runner(registration, tensors, tensors_size, inputs_array, outputs_array, - /*builtin_data=*/nullptr, micro_test::reporter); + /*builtin_data=*/nullptr); // Prepare should catch dimension mismatches. TfLiteStatus prepare_status = runner.InitAndPrepare(); @@ -68,7 +68,7 @@ TfLiteStatus ValidatePadV2Goldens(TfLiteTensor* tensors, int tensors_size, const TfLiteRegistration registration = tflite::ops::micro::Register_PADV2(); micro::KernelRunner runner(registration, tensors, tensors_size, inputs_array, outputs_array, - /*builtin_data=*/nullptr, micro_test::reporter); + /*builtin_data=*/nullptr); // Prepare should catch dimension mismatches. TfLiteStatus prepare_status = runner.InitAndPrepare(); diff --git a/tensorflow/lite/micro/kernels/pooling_test.cc b/tensorflow/lite/micro/kernels/pooling_test.cc index 2f384597e7c..6f4871023c5 100644 --- a/tensorflow/lite/micro/kernels/pooling_test.cc +++ b/tensorflow/lite/micro/kernels/pooling_test.cc @@ -46,9 +46,9 @@ void ValidatePoolingGoldens(TfLiteTensor* tensors, int tensors_size, activation, {}}; - micro::KernelRunner runner( - registration, tensors, tensors_size, inputs_array, outputs_array, - reinterpret_cast(&builtin_data), micro_test::reporter); + micro::KernelRunner runner(registration, tensors, tensors_size, inputs_array, + outputs_array, + reinterpret_cast(&builtin_data)); TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.InitAndPrepare()); TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.Invoke()); diff --git a/tensorflow/lite/micro/kernels/prelu_test.cc b/tensorflow/lite/micro/kernels/prelu_test.cc index 92acecf052a..bbe8e2d8c08 100644 --- a/tensorflow/lite/micro/kernels/prelu_test.cc +++ b/tensorflow/lite/micro/kernels/prelu_test.cc @@ -35,7 +35,7 @@ void ValidatePreluGoldens(TfLiteTensor* tensors, int tensors_size, const TfLiteRegistration registration = tflite::ops::micro::Register_PRELU(); micro::KernelRunner runner(registration, tensors, tensors_size, inputs_array, outputs_array, - /*builtin_data=*/nullptr, micro_test::reporter); + /*builtin_data=*/nullptr); TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.InitAndPrepare()); TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.Invoke()); diff --git a/tensorflow/lite/micro/kernels/quantize_test.cc b/tensorflow/lite/micro/kernels/quantize_test.cc index 50d50214586..ad302f0438d 100644 --- a/tensorflow/lite/micro/kernels/quantize_test.cc +++ b/tensorflow/lite/micro/kernels/quantize_test.cc @@ -37,7 +37,7 @@ void ValidateQuantizeGoldens(TfLiteTensor* tensors, int tensors_size, const TfLiteRegistration registration = Register_QUANTIZE(); micro::KernelRunner runner(registration, tensors, tensors_size, inputs_array, outputs_array, - /*builtin_data=*/nullptr, micro_test::reporter); + /*builtin_data=*/nullptr); TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.InitAndPrepare()); TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.Invoke()); diff --git a/tensorflow/lite/micro/kernels/reduce_test.cc b/tensorflow/lite/micro/kernels/reduce_test.cc index 3666bc0b2fb..e06a1110d82 100644 --- a/tensorflow/lite/micro/kernels/reduce_test.cc +++ b/tensorflow/lite/micro/kernels/reduce_test.cc @@ -77,7 +77,7 @@ TfLiteStatus ValidateReduceGoldens(TfLiteTensor* tensors, int tensors_size, TfLiteIntArray* outputs_array = IntArrayFromInts(outputs_array_data); micro::KernelRunner runner(registration, tensors, tensors_size, inputs_array, - outputs_array, params, micro_test::reporter); + outputs_array, params); TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.InitAndPrepare()); TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.Invoke()); diff --git a/tensorflow/lite/micro/kernels/reshape_test.cc b/tensorflow/lite/micro/kernels/reshape_test.cc index 9e1da3ca51d..2b7d13ca489 100644 --- a/tensorflow/lite/micro/kernels/reshape_test.cc +++ b/tensorflow/lite/micro/kernels/reshape_test.cc @@ -40,7 +40,7 @@ void ValidateReshapeGoldens( tflite::ops::micro::Register_RESHAPE(); micro::KernelRunner runner(registration, tensors, tensors_size, inputs_array, outputs_array, - /*builtin_data=*/nullptr, micro_test::reporter); + /*builtin_data=*/nullptr); if (expect_failure) { TF_LITE_MICRO_EXPECT_NE(kTfLiteOk, runner.InitAndPrepare()); diff --git a/tensorflow/lite/micro/kernels/resize_nearest_neighbor_test.cc b/tensorflow/lite/micro/kernels/resize_nearest_neighbor_test.cc index f1af763d9bb..0f511728f43 100644 --- a/tensorflow/lite/micro/kernels/resize_nearest_neighbor_test.cc +++ b/tensorflow/lite/micro/kernels/resize_nearest_neighbor_test.cc @@ -75,8 +75,7 @@ void TestResizeNearestNeighbor(const int* input_dims_data, const T* input_data, const TfLiteRegistration registration = tflite::ops::micro::Register_RESIZE_NEAREST_NEIGHBOR(); micro::KernelRunner runner(registration, tensors, tensors_size, inputs_array, - outputs_array, &builtin_data, - micro_test::reporter); + outputs_array, &builtin_data); TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.InitAndPrepare()); TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.Invoke()); diff --git a/tensorflow/lite/micro/kernels/round_test.cc b/tensorflow/lite/micro/kernels/round_test.cc index 412ecf5b539..534e3f24999 100644 --- a/tensorflow/lite/micro/kernels/round_test.cc +++ b/tensorflow/lite/micro/kernels/round_test.cc @@ -44,7 +44,7 @@ void TestRound(const int* input_dims_data, const float* input_data, const TfLiteRegistration registration = tflite::ops::micro::Register_ROUND(); micro::KernelRunner runner(registration, tensors, tensors_size, inputs_array, - outputs_array, nullptr, micro_test::reporter); + outputs_array, nullptr); TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.InitAndPrepare()); TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.Invoke()); diff --git a/tensorflow/lite/micro/kernels/shape_test.cc b/tensorflow/lite/micro/kernels/shape_test.cc index 5bfdee5bb10..b0827ef3f5f 100755 --- a/tensorflow/lite/micro/kernels/shape_test.cc +++ b/tensorflow/lite/micro/kernels/shape_test.cc @@ -34,7 +34,7 @@ void ValidateShape(TfLiteTensor* tensors, const int tensor_count, const TfLiteRegistration registration = tflite::Register_SHAPE(); micro::KernelRunner runner(registration, tensors, tensor_count, inputs_array, - outputs_array, nullptr, micro_test::reporter); + outputs_array, nullptr); TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.InitAndPrepare()); TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.Invoke()); diff --git a/tensorflow/lite/micro/kernels/softmax_test.cc b/tensorflow/lite/micro/kernels/softmax_test.cc index 16dc6e26dd0..8e3f205332b 100644 --- a/tensorflow/lite/micro/kernels/softmax_test.cc +++ b/tensorflow/lite/micro/kernels/softmax_test.cc @@ -263,8 +263,7 @@ void ValidateSoftmaxGoldens(TfLiteTensor* tensors, const int tensor_count, const TfLiteRegistration registration = Register_SOFTMAX(); micro::KernelRunner runner(registration, tensors, tensor_count, inputs_array, - outputs_array, &builtin_data, - micro_test::reporter); + outputs_array, &builtin_data); TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.InitAndPrepare()); TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.Invoke()); diff --git a/tensorflow/lite/micro/kernels/space_to_batch_nd_test.cc b/tensorflow/lite/micro/kernels/space_to_batch_nd_test.cc index 8707facd345..a5ab5536111 100644 --- a/tensorflow/lite/micro/kernels/space_to_batch_nd_test.cc +++ b/tensorflow/lite/micro/kernels/space_to_batch_nd_test.cc @@ -48,7 +48,7 @@ TfLiteStatus ValidateSpaceToBatchNdGoldens(TfLiteTensor* tensors, const TfLiteRegistration registration = Register_SPACE_TO_BATCH_ND(); micro::KernelRunner runner(registration, tensors, tensors_size, inputs_array, - outputs_array, nullptr, micro_test::reporter); + outputs_array, nullptr); TF_LITE_ENSURE_STATUS(runner.InitAndPrepare()); TF_LITE_ENSURE_STATUS(runner.Invoke()); diff --git a/tensorflow/lite/micro/kernels/split_test.cc b/tensorflow/lite/micro/kernels/split_test.cc index b5d038cdc3a..1890d8e1fa0 100644 --- a/tensorflow/lite/micro/kernels/split_test.cc +++ b/tensorflow/lite/micro/kernels/split_test.cc @@ -64,7 +64,7 @@ void TestSplitTwoOutputsFloat( const TfLiteRegistration registration = tflite::ops::micro::Register_SPLIT(); micro::KernelRunner runner(registration, tensors, tensors_size, inputs_array, - outputs_array, nullptr, micro_test::reporter); + outputs_array, nullptr); TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.InitAndPrepare()); TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.Invoke()); @@ -133,7 +133,7 @@ void TestSplitFourOutputsFloat( const TfLiteRegistration registration = tflite::ops::micro::Register_SPLIT(); micro::KernelRunner runner(registration, tensors, tensors_size, inputs_array, - outputs_array, nullptr, micro_test::reporter); + outputs_array, nullptr); TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.InitAndPrepare()); TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.Invoke()); @@ -194,7 +194,7 @@ void TestSplitTwoOutputsQuantized( const TfLiteRegistration registration = tflite::ops::micro::Register_SPLIT(); micro::KernelRunner runner(registration, tensors, tensors_size, inputs_array, - outputs_array, nullptr, micro_test::reporter); + outputs_array, nullptr); TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.InitAndPrepare()); TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.Invoke()); @@ -249,7 +249,7 @@ void TestSplitTwoOutputsQuantized32( const TfLiteRegistration registration = tflite::ops::micro::Register_SPLIT(); micro::KernelRunner runner(registration, tensors, tensors_size, inputs_array, - outputs_array, nullptr, micro_test::reporter); + outputs_array, nullptr); TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.InitAndPrepare()); TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.Invoke()); diff --git a/tensorflow/lite/micro/kernels/split_v_test.cc b/tensorflow/lite/micro/kernels/split_v_test.cc index 73c4e1c50f9..6fd3adc45a0 100755 --- a/tensorflow/lite/micro/kernels/split_v_test.cc +++ b/tensorflow/lite/micro/kernels/split_v_test.cc @@ -84,7 +84,7 @@ void TestSplitVFloat(const int* input_dims_data, const float* input_data, const TfLiteRegistration registration = tflite::ops::micro::Register_SPLIT_V(); micro::KernelRunner runner(registration, tensors, tensors_size, inputs_array, - outputs_array, nullptr, micro_test::reporter); + outputs_array, nullptr); TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.InitAndPrepare()); TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.Invoke()); diff --git a/tensorflow/lite/micro/kernels/strided_slice_test.cc b/tensorflow/lite/micro/kernels/strided_slice_test.cc index 7f8446001eb..2225be1418a 100644 --- a/tensorflow/lite/micro/kernels/strided_slice_test.cc +++ b/tensorflow/lite/micro/kernels/strided_slice_test.cc @@ -39,8 +39,7 @@ void ValidateStridedSliceGoldens(TfLiteTensor* tensors, int tensors_size, const TfLiteRegistration registration = tflite::ops::micro::Register_STRIDED_SLICE(); micro::KernelRunner runner(registration, tensors, tensors_size, inputs_array, - outputs_array, reinterpret_cast(params), - micro_test::reporter); + outputs_array, reinterpret_cast(params)); if (expect_prepare_err) { TF_LITE_MICRO_EXPECT_EQ(kTfLiteError, runner.InitAndPrepare()); return; diff --git a/tensorflow/lite/micro/kernels/sub_test.cc b/tensorflow/lite/micro/kernels/sub_test.cc index badca6e14e4..83da86fcc15 100644 --- a/tensorflow/lite/micro/kernels/sub_test.cc +++ b/tensorflow/lite/micro/kernels/sub_test.cc @@ -76,8 +76,7 @@ void ValidateSubGoldens(TfLiteTensor* tensors, int tensors_size, const TfLiteRegistration registration = tflite::ops::micro::Register_SUB(); micro::KernelRunner runner(registration, tensors, tensors_size, inputs_array, - outputs_array, &builtin_data, - micro_test::reporter); + outputs_array, &builtin_data); TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.InitAndPrepare()); TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.Invoke()); diff --git a/tensorflow/lite/micro/kernels/svdf_test.cc b/tensorflow/lite/micro/kernels/svdf_test.cc index aa00494349a..2f3b06e75f8 100644 --- a/tensorflow/lite/micro/kernels/svdf_test.cc +++ b/tensorflow/lite/micro/kernels/svdf_test.cc @@ -499,7 +499,7 @@ void ValidateSVDFGoldens(const int batch_size, const int num_units, const TfLiteRegistration registration = Register_SVDF(); micro::KernelRunner runner(registration, tensors, tensor_count, inputs_array, - outputs_array, ¶ms, micro_test::reporter); + outputs_array, ¶ms); TfLiteStatus init_and_prepare_status = runner.InitAndPrepare(); TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, init_and_prepare_status); diff --git a/tensorflow/lite/micro/kernels/tanh_test.cc b/tensorflow/lite/micro/kernels/tanh_test.cc index 52a03aedcff..20401f33f3b 100644 --- a/tensorflow/lite/micro/kernels/tanh_test.cc +++ b/tensorflow/lite/micro/kernels/tanh_test.cc @@ -88,8 +88,7 @@ void TestTanhFloat(const int input_dims_data[], const float* input_data, const TfLiteRegistration registration = tflite::ops::micro::Register_TANH(); micro::KernelRunner runner(registration, tensors, tensors_size, inputs_array, - outputs_array, /*builtin_data=*/nullptr, - micro_test::reporter); + outputs_array, /*builtin_data=*/nullptr); TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.InitAndPrepare()); TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.Invoke()); @@ -132,8 +131,7 @@ void TestTanhQuantized(const int input_dims_data[], const float* input_data, const TfLiteRegistration registration = tflite::ops::micro::Register_TANH(); micro::KernelRunner runner(registration, tensors, tensors_size, inputs_array, - outputs_array, /*builtin_data=*/nullptr, - micro_test::reporter); + outputs_array, /*builtin_data=*/nullptr); TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.InitAndPrepare()); TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.Invoke()); diff --git a/tensorflow/lite/micro/kernels/unpack_test.cc b/tensorflow/lite/micro/kernels/unpack_test.cc index 95846651cd0..90773a7a5f3 100644 --- a/tensorflow/lite/micro/kernels/unpack_test.cc +++ b/tensorflow/lite/micro/kernels/unpack_test.cc @@ -70,9 +70,9 @@ void TestUnpackThreeOutputsFloat( TfLiteIntArray* outputs_array = IntArrayFromInts(outputs_array_data); const TfLiteRegistration registration = tflite::ops::micro::Register_UNPACK(); - micro::KernelRunner runner( - registration, tensors, tensors_size, inputs_array, outputs_array, - reinterpret_cast(&builtin_data), micro_test::reporter); + micro::KernelRunner runner(registration, tensors, tensors_size, inputs_array, + outputs_array, + reinterpret_cast(&builtin_data)); TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.InitAndPrepare()); TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.Invoke()); @@ -121,9 +121,9 @@ void TestUnpackOneOutputFloat(const int* input_dims_data, TfLiteIntArray* outputs_array = IntArrayFromInts(outputs_array_data); const TfLiteRegistration registration = tflite::ops::micro::Register_UNPACK(); - micro::KernelRunner runner( - registration, tensors, tensors_size, inputs_array, outputs_array, - reinterpret_cast(&builtin_data), micro_test::reporter); + micro::KernelRunner runner(registration, tensors, tensors_size, inputs_array, + outputs_array, + reinterpret_cast(&builtin_data)); TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.InitAndPrepare()); TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.Invoke()); @@ -183,9 +183,9 @@ void TestUnpackThreeOutputsQuantized( TfLiteIntArray* outputs_array = IntArrayFromInts(outputs_array_data); const TfLiteRegistration registration = tflite::ops::micro::Register_UNPACK(); - micro::KernelRunner runner( - registration, tensors, tensors_size, inputs_array, outputs_array, - reinterpret_cast(&builtin_data), micro_test::reporter); + micro::KernelRunner runner(registration, tensors, tensors_size, inputs_array, + outputs_array, + reinterpret_cast(&builtin_data)); TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.InitAndPrepare()); TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.Invoke()); @@ -250,9 +250,9 @@ void TestUnpackThreeOutputsQuantized32( TfLiteIntArray* outputs_array = IntArrayFromInts(outputs_array_data); const TfLiteRegistration registration = tflite::ops::micro::Register_UNPACK(); - micro::KernelRunner runner( - registration, tensors, tensors_size, inputs_array, outputs_array, - reinterpret_cast(&builtin_data), micro_test::reporter); + micro::KernelRunner runner(registration, tensors, tensors_size, inputs_array, + outputs_array, + reinterpret_cast(&builtin_data)); TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.InitAndPrepare()); TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.Invoke()); diff --git a/tensorflow/lite/micro/kernels/zeros_like_test.cc b/tensorflow/lite/micro/kernels/zeros_like_test.cc index f9bd61532cd..20385ea3bea 100644 --- a/tensorflow/lite/micro/kernels/zeros_like_test.cc +++ b/tensorflow/lite/micro/kernels/zeros_like_test.cc @@ -45,7 +45,7 @@ void TestZerosLikeFloat(const int* input_dims_data, const float* input_data, const TfLiteRegistration registration = Register_ZEROS_LIKE(); micro::KernelRunner runner(registration, tensors, tensors_size, inputs_array, outputs_array, - /*builtin_data=*/nullptr, micro_test::reporter); + /*builtin_data=*/nullptr); TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.InitAndPrepare()); TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.Invoke()); @@ -77,7 +77,7 @@ void TestZerosLikeInt32(const int* input_dims_data, const int32_t* input_data, const TfLiteRegistration registration = Register_ZEROS_LIKE(); micro::KernelRunner runner(registration, tensors, tensors_size, inputs_array, outputs_array, - /*builtin_data=*/nullptr, micro_test::reporter); + /*builtin_data=*/nullptr); TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.InitAndPrepare()); TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.Invoke()); @@ -109,7 +109,7 @@ void TestZerosLikeInt64(const int* input_dims_data, const int64_t* input_data, const TfLiteRegistration registration = Register_ZEROS_LIKE(); micro::KernelRunner runner(registration, tensors, tensors_size, inputs_array, outputs_array, - /*builtin_data=*/nullptr, micro_test::reporter); + /*builtin_data=*/nullptr); TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.InitAndPrepare()); TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.Invoke()); diff --git a/tensorflow/lite/micro/memory_arena_threshold_test.cc b/tensorflow/lite/micro/memory_arena_threshold_test.cc index e6ee89a7b40..c828210d4ef 100644 --- a/tensorflow/lite/micro/memory_arena_threshold_test.cc +++ b/tensorflow/lite/micro/memory_arena_threshold_test.cc @@ -17,6 +17,7 @@ limitations under the License. #include "tensorflow/lite/micro/all_ops_resolver.h" #include "tensorflow/lite/micro/benchmarks/keyword_scrambled_model_data.h" +#include "tensorflow/lite/micro/micro_error_reporter.h" #include "tensorflow/lite/micro/recording_micro_allocator.h" #include "tensorflow/lite/micro/recording_micro_interpreter.h" #include "tensorflow/lite/micro/testing/micro_test.h" @@ -108,7 +109,7 @@ void EnsureAllocatedSizeThreshold(const char* allocation_type, size_t actual, TF_LITE_MICRO_EXPECT_NEAR(actual, expected, expected * kAllocationThreshold); if (actual != expected) { - TF_LITE_REPORT_ERROR(micro_test::reporter, + TF_LITE_REPORT_ERROR(tflite::GetMicroErrorReporter(), "%s threshold failed: %d != %d", allocation_type, actual, expected); } @@ -201,7 +202,7 @@ TF_LITE_MICRO_TEST(TestKeywordModelMemoryThreshold) { tflite::RecordingMicroInterpreter interpreter( tflite::GetModel(g_keyword_scrambled_model_data), all_ops_resolver, keyword_model_tensor_arena, kKeywordModelTensorArenaSize, - micro_test::reporter); + tflite::GetMicroErrorReporter()); interpreter.AllocateTensors(); @@ -229,7 +230,8 @@ TF_LITE_MICRO_TEST(TestConvModelMemoryThreshold) { tflite::AllOpsResolver all_ops_resolver; tflite::RecordingMicroInterpreter interpreter( tflite::GetModel(kTestConvModelData), all_ops_resolver, - test_conv_tensor_arena, kTestConvModelArenaSize, micro_test::reporter); + test_conv_tensor_arena, kTestConvModelArenaSize, + tflite::GetMicroErrorReporter()); interpreter.AllocateTensors(); diff --git a/tensorflow/lite/micro/memory_helpers_test.cc b/tensorflow/lite/micro/memory_helpers_test.cc index 8da02d7b2da..5f28dea3750 100644 --- a/tensorflow/lite/micro/memory_helpers_test.cc +++ b/tensorflow/lite/micro/memory_helpers_test.cc @@ -15,6 +15,7 @@ limitations under the License. #include "tensorflow/lite/micro/memory_helpers.h" +#include "tensorflow/lite/micro/micro_error_reporter.h" #include "tensorflow/lite/micro/test_helpers.h" #include "tensorflow/lite/micro/testing/micro_test.h" @@ -172,17 +173,17 @@ TF_LITE_MICRO_TEST(TestBytesRequiredForTensor) { tflite::testing::Create1dFlatbufferTensor(100); size_t bytes; size_t type_size; - TF_LITE_MICRO_EXPECT_EQ( - kTfLiteOk, tflite::BytesRequiredForTensor(*tensor100, &bytes, &type_size, - micro_test::reporter)); + TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, tflite::BytesRequiredForTensor( + *tensor100, &bytes, &type_size, + tflite::GetMicroErrorReporter())); TF_LITE_MICRO_EXPECT_EQ(static_cast(400), bytes); TF_LITE_MICRO_EXPECT_EQ(static_cast(4), type_size); const tflite::Tensor* tensor200 = tflite::testing::Create1dFlatbufferTensor(200); - TF_LITE_MICRO_EXPECT_EQ( - kTfLiteOk, tflite::BytesRequiredForTensor(*tensor200, &bytes, &type_size, - micro_test::reporter)); + TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, tflite::BytesRequiredForTensor( + *tensor200, &bytes, &type_size, + tflite::GetMicroErrorReporter())); TF_LITE_MICRO_EXPECT_EQ(static_cast(800), bytes); TF_LITE_MICRO_EXPECT_EQ(static_cast(4), type_size); } diff --git a/tensorflow/lite/micro/memory_planner/BUILD b/tensorflow/lite/micro/memory_planner/BUILD index 3023e449fcd..e524e85f0ed 100644 --- a/tensorflow/lite/micro/memory_planner/BUILD +++ b/tensorflow/lite/micro/memory_planner/BUILD @@ -38,6 +38,7 @@ cc_library( ":memory_planner", "//tensorflow/lite/c:common", "//tensorflow/lite/micro:micro_compatibility", + "//tensorflow/lite/micro:micro_error_reporter", ], ) diff --git a/tensorflow/lite/micro/memory_planner/greedy_memory_planner_test.cc b/tensorflow/lite/micro/memory_planner/greedy_memory_planner_test.cc index 12e5b392cc5..48b1785ea8c 100644 --- a/tensorflow/lite/micro/memory_planner/greedy_memory_planner_test.cc +++ b/tensorflow/lite/micro/memory_planner/greedy_memory_planner_test.cc @@ -15,6 +15,7 @@ limitations under the License. #include "tensorflow/lite/micro/memory_planner/greedy_memory_planner.h" +#include "tensorflow/lite/micro/micro_error_reporter.h" #include "tensorflow/lite/micro/testing/micro_test.h" namespace tflite { diff --git a/tensorflow/lite/micro/memory_planner/linear_memory_planner_test.cc b/tensorflow/lite/micro/memory_planner/linear_memory_planner_test.cc index f0b50383dfd..dc136846779 100644 --- a/tensorflow/lite/micro/memory_planner/linear_memory_planner_test.cc +++ b/tensorflow/lite/micro/memory_planner/linear_memory_planner_test.cc @@ -15,6 +15,7 @@ limitations under the License. #include "tensorflow/lite/micro/memory_planner/linear_memory_planner.h" +#include "tensorflow/lite/micro/micro_error_reporter.h" #include "tensorflow/lite/micro/testing/micro_test.h" TF_LITE_MICRO_TESTS_BEGIN diff --git a/tensorflow/lite/micro/micro_allocator_test.cc b/tensorflow/lite/micro/micro_allocator_test.cc index 26322760b19..53bc55fdbc6 100644 --- a/tensorflow/lite/micro/micro_allocator_test.cc +++ b/tensorflow/lite/micro/micro_allocator_test.cc @@ -18,6 +18,7 @@ limitations under the License. #include #include "tensorflow/lite/micro/memory_helpers.h" +#include "tensorflow/lite/micro/micro_error_reporter.h" #include "tensorflow/lite/micro/simple_memory_allocator.h" #include "tensorflow/lite/micro/test_helpers.h" #include "tensorflow/lite/micro/testing/micro_test.h" @@ -126,8 +127,8 @@ TF_LITE_MICRO_TEST(TestInitializeRuntimeTensor) { constexpr size_t arena_size = 1024; uint8_t arena[arena_size]; tflite::SimpleMemoryAllocator* simple_allocator = - tflite::SimpleMemoryAllocator::Create(micro_test::reporter, arena, - arena_size); + tflite::SimpleMemoryAllocator::Create(tflite::GetMicroErrorReporter(), + arena, arena_size); const tflite::Tensor* tensor = tflite::testing::Create1dFlatbufferTensor(100); const flatbuffers::Vector>* buffers = @@ -135,9 +136,10 @@ TF_LITE_MICRO_TEST(TestInitializeRuntimeTensor) { TfLiteTensor allocated_tensor; TF_LITE_MICRO_EXPECT_EQ( - kTfLiteOk, tflite::internal::InitializeTfLiteTensorFromFlatbuffer( - simple_allocator, /*allocate_temp=*/false, *tensor, - buffers, micro_test::reporter, &allocated_tensor)); + kTfLiteOk, + tflite::internal::InitializeTfLiteTensorFromFlatbuffer( + simple_allocator, /*allocate_temp=*/false, *tensor, buffers, + tflite::GetMicroErrorReporter(), &allocated_tensor)); TF_LITE_MICRO_EXPECT_EQ(kTfLiteInt32, allocated_tensor.type); TF_LITE_MICRO_EXPECT_EQ(1, allocated_tensor.dims->size); TF_LITE_MICRO_EXPECT_EQ(100, allocated_tensor.dims->data[0]); @@ -155,8 +157,8 @@ TF_LITE_MICRO_TEST(TestInitializeTempRuntimeTensor) { constexpr size_t arena_size = 1024; uint8_t arena[arena_size]; tflite::SimpleMemoryAllocator* simple_allocator = - tflite::SimpleMemoryAllocator::Create(micro_test::reporter, arena, - arena_size); + tflite::SimpleMemoryAllocator::Create(tflite::GetMicroErrorReporter(), + arena, arena_size); const tflite::Tensor* tensor = tflite::testing::Create1dFlatbufferTensor(100); const flatbuffers::Vector>* buffers = @@ -166,7 +168,7 @@ TF_LITE_MICRO_TEST(TestInitializeTempRuntimeTensor) { TF_LITE_MICRO_EXPECT_EQ( kTfLiteOk, tflite::internal::InitializeTfLiteTensorFromFlatbuffer( simple_allocator, /*allocate_temp=*/true, *tensor, buffers, - micro_test::reporter, &allocated_temp_tensor)); + tflite::GetMicroErrorReporter(), &allocated_temp_tensor)); TF_LITE_MICRO_EXPECT_EQ(kTfLiteInt32, allocated_temp_tensor.type); TF_LITE_MICRO_EXPECT_EQ(1, allocated_temp_tensor.dims->size); TF_LITE_MICRO_EXPECT_EQ(100, allocated_temp_tensor.dims->data[0]); @@ -183,8 +185,8 @@ TF_LITE_MICRO_TEST(TestInitializeQuantizedTensor) { constexpr size_t arena_size = 1024; uint8_t arena[arena_size]; tflite::SimpleMemoryAllocator* simple_allocator = - tflite::SimpleMemoryAllocator::Create(micro_test::reporter, arena, - arena_size); + tflite::SimpleMemoryAllocator::Create(tflite::GetMicroErrorReporter(), + arena, arena_size); const tflite::Tensor* tensor = tflite::testing::CreateQuantizedFlatbufferTensor(100); @@ -193,9 +195,10 @@ TF_LITE_MICRO_TEST(TestInitializeQuantizedTensor) { TfLiteTensor allocated_tensor; TF_LITE_MICRO_EXPECT_EQ( - kTfLiteOk, tflite::internal::InitializeTfLiteTensorFromFlatbuffer( - simple_allocator, /*allocate_temp=*/false, *tensor, - buffers, micro_test::reporter, &allocated_tensor)); + kTfLiteOk, + tflite::internal::InitializeTfLiteTensorFromFlatbuffer( + simple_allocator, /*allocate_temp=*/false, *tensor, buffers, + tflite::GetMicroErrorReporter(), &allocated_tensor)); TF_LITE_MICRO_EXPECT_EQ(kTfLiteInt32, allocated_tensor.type); TF_LITE_MICRO_EXPECT_EQ(1, allocated_tensor.dims->size); TF_LITE_MICRO_EXPECT_EQ(100, allocated_tensor.dims->data[0]); @@ -210,8 +213,8 @@ TF_LITE_MICRO_TEST(TestMissingQuantization) { constexpr size_t arena_size = 1024; uint8_t arena[arena_size]; tflite::SimpleMemoryAllocator* simple_allocator = - tflite::SimpleMemoryAllocator::Create(micro_test::reporter, arena, - arena_size); + tflite::SimpleMemoryAllocator::Create(tflite::GetMicroErrorReporter(), + arena, arena_size); const tflite::Tensor* tensor = tflite::testing::CreateMissingQuantizationFlatbufferTensor(100); @@ -220,9 +223,10 @@ TF_LITE_MICRO_TEST(TestMissingQuantization) { TfLiteTensor allocated_tensor; TF_LITE_MICRO_EXPECT_EQ( - kTfLiteOk, tflite::internal::InitializeTfLiteTensorFromFlatbuffer( - simple_allocator, /*allocate_temp=*/false, *tensor, - buffers, micro_test::reporter, &allocated_tensor)); + kTfLiteOk, + tflite::internal::InitializeTfLiteTensorFromFlatbuffer( + simple_allocator, /*allocate_temp=*/false, *tensor, buffers, + tflite::GetMicroErrorReporter(), &allocated_tensor)); TF_LITE_MICRO_EXPECT_EQ(kTfLiteInt32, allocated_tensor.type); TF_LITE_MICRO_EXPECT_EQ(1, allocated_tensor.dims->size); TF_LITE_MICRO_EXPECT_EQ(100, allocated_tensor.dims->data[0]); @@ -237,8 +241,8 @@ TF_LITE_MICRO_TEST(TestFailsWhenModelStartsTwice) { tflite::NodeAndRegistration* node_and_registration; constexpr size_t arena_size = 1024; uint8_t arena[arena_size]; - tflite::MicroAllocator* allocator = - tflite::MicroAllocator::Create(arena, arena_size, micro_test::reporter); + tflite::MicroAllocator* allocator = tflite::MicroAllocator::Create( + arena, arena_size, tflite::GetMicroErrorReporter()); TF_LITE_MICRO_EXPECT(nullptr != allocator); TF_LITE_MICRO_EXPECT_EQ( kTfLiteOk, @@ -258,8 +262,8 @@ TF_LITE_MICRO_TEST(TestFailsWithWrongSequence) { tflite::NodeAndRegistration* node_and_registration; constexpr size_t arena_size = 1024; uint8_t arena[arena_size]; - tflite::MicroAllocator* allocator = - tflite::MicroAllocator::Create(arena, arena_size, micro_test::reporter); + tflite::MicroAllocator* allocator = tflite::MicroAllocator::Create( + arena, arena_size, tflite::GetMicroErrorReporter()); TF_LITE_MICRO_EXPECT_NE(nullptr, allocator); // We can't finish allocation before it ever got started. @@ -286,8 +290,8 @@ TF_LITE_MICRO_TEST(TestMockModelAllocation) { tflite::NodeAndRegistration* node_and_registration; constexpr size_t arena_size = 1024; uint8_t arena[arena_size]; - tflite::MicroAllocator* allocator = - tflite::MicroAllocator::Create(arena, arena_size, micro_test::reporter); + tflite::MicroAllocator* allocator = tflite::MicroAllocator::Create( + arena, arena_size, tflite::GetMicroErrorReporter()); TF_LITE_MICRO_EXPECT(nullptr != allocator); TF_LITE_MICRO_EXPECT_EQ( kTfLiteOk, @@ -326,8 +330,8 @@ TF_LITE_MICRO_TEST(TestMultiTenantAllocation) { // Create a shared allocator. constexpr size_t arena_size = 4096; uint8_t arena[arena_size]; - tflite::MicroAllocator* allocator = - tflite::MicroAllocator::Create(arena, arena_size, micro_test::reporter); + tflite::MicroAllocator* allocator = tflite::MicroAllocator::Create( + arena, arena_size, tflite::GetMicroErrorReporter()); TF_LITE_MICRO_EXPECT_NE(nullptr, allocator); TfLiteEvalTensor* eval_tensors = nullptr; tflite::ScratchBufferHandle* scratch_buffer_handles = nullptr; @@ -369,8 +373,8 @@ TF_LITE_MICRO_TEST(TestAllocationForModelsWithBranches) { tflite::NodeAndRegistration* node_and_registration; constexpr size_t arena_size = 4096; uint8_t arena[arena_size]; - tflite::MicroAllocator* allocator = - tflite::MicroAllocator::Create(arena, arena_size, micro_test::reporter); + tflite::MicroAllocator* allocator = tflite::MicroAllocator::Create( + arena, arena_size, tflite::GetMicroErrorReporter()); TF_LITE_MICRO_EXPECT_NE(nullptr, allocator); TF_LITE_MICRO_EXPECT_EQ( kTfLiteOk, @@ -410,8 +414,8 @@ TF_LITE_MICRO_TEST(TestAllocationForComplexModelAllocation) { tflite::NodeAndRegistration* node_and_registration; constexpr size_t arena_size = 2048; uint8_t arena[arena_size]; - tflite::MicroAllocator* allocator = - tflite::MicroAllocator::Create(arena, arena_size, micro_test::reporter); + tflite::MicroAllocator* allocator = tflite::MicroAllocator::Create( + arena, arena_size, tflite::GetMicroErrorReporter()); TF_LITE_MICRO_EXPECT(nullptr != allocator); TF_LITE_MICRO_EXPECT_EQ( kTfLiteOk, @@ -485,8 +489,8 @@ TF_LITE_MICRO_TEST(OfflinePlannerBranchesAllOnline) { constexpr size_t arena_size = 4096; uint8_t arena[arena_size]; - tflite::MicroAllocator* allocator = - tflite::MicroAllocator::Create(arena, arena_size, micro_test::reporter); + tflite::MicroAllocator* allocator = tflite::MicroAllocator::Create( + arena, arena_size, tflite::GetMicroErrorReporter()); TF_LITE_MICRO_EXPECT_EQ( kTfLiteOk, @@ -537,8 +541,8 @@ TF_LITE_MICRO_TEST(OfflinePlannerBasic) { tflite::ScratchBufferHandle* scratch_buffer_handles = nullptr; constexpr size_t arena_size = 4096; uint8_t arena[arena_size]; - tflite::MicroAllocator* allocator = - tflite::MicroAllocator::Create(arena, arena_size, micro_test::reporter); + tflite::MicroAllocator* allocator = tflite::MicroAllocator::Create( + arena, arena_size, tflite::GetMicroErrorReporter()); TF_LITE_MICRO_EXPECT_EQ( kTfLiteOk, @@ -583,8 +587,8 @@ TF_LITE_MICRO_TEST(OfflinePlannerOverlappingAllocation) { tflite::ScratchBufferHandle* scratch_buffer_handles = nullptr; constexpr size_t arena_size = 4096; uint8_t arena[arena_size]; - tflite::MicroAllocator* allocator = - tflite::MicroAllocator::Create(arena, arena_size, micro_test::reporter); + tflite::MicroAllocator* allocator = tflite::MicroAllocator::Create( + arena, arena_size, tflite::GetMicroErrorReporter()); TF_LITE_MICRO_EXPECT_EQ( kTfLiteOk, @@ -635,8 +639,8 @@ TF_LITE_MICRO_TEST(OfflinePlannerOfflineOnline) { tflite::ScratchBufferHandle* scratch_buffer_handles = nullptr; constexpr size_t arena_size = 4096; uint8_t arena[arena_size]; - tflite::MicroAllocator* allocator = - tflite::MicroAllocator::Create(arena, arena_size, micro_test::reporter); + tflite::MicroAllocator* allocator = tflite::MicroAllocator::Create( + arena, arena_size, tflite::GetMicroErrorReporter()); TF_LITE_MICRO_EXPECT_EQ( kTfLiteOk, @@ -658,8 +662,8 @@ TF_LITE_MICRO_TEST(TestAllocatePersistentTfLiteTensor) { const tflite::Model* model = tflite::GetModel(kTestConvModelData); constexpr size_t arena_size = 1024 * 12; uint8_t arena[arena_size]; - tflite::MicroAllocator* allocator = - tflite::MicroAllocator::Create(arena, arena_size, micro_test::reporter); + tflite::MicroAllocator* allocator = tflite::MicroAllocator::Create( + arena, arena_size, tflite::GetMicroErrorReporter()); TF_LITE_MICRO_EXPECT_NE(allocator, nullptr); TfLiteTensor* tensor1 = allocator->AllocatePersistentTfLiteTensor( @@ -683,8 +687,8 @@ TF_LITE_MICRO_TEST(TestAllocateSingleTempTfLiteTensor) { const tflite::Model* model = tflite::testing::GetSimpleMockModel(); constexpr size_t arena_size = 1024; uint8_t arena[arena_size]; - tflite::MicroAllocator* allocator = - tflite::MicroAllocator::Create(arena, arena_size, micro_test::reporter); + tflite::MicroAllocator* allocator = tflite::MicroAllocator::Create( + arena, arena_size, tflite::GetMicroErrorReporter()); TF_LITE_MICRO_EXPECT_NE(allocator, nullptr); TfLiteTensor* tensor1 = allocator->AllocateTempTfLiteTensor( @@ -696,8 +700,8 @@ TF_LITE_MICRO_TEST(TestAllocateChainOfTfLiteTensor) { const tflite::Model* model = tflite::testing::GetSimpleMockModel(); constexpr size_t arena_size = 1024; uint8_t arena[arena_size]; - tflite::MicroAllocator* allocator = - tflite::MicroAllocator::Create(arena, arena_size, micro_test::reporter); + tflite::MicroAllocator* allocator = tflite::MicroAllocator::Create( + arena, arena_size, tflite::GetMicroErrorReporter()); TF_LITE_MICRO_EXPECT_NE(allocator, nullptr); TfLiteTensor* tensor1 = allocator->AllocateTempTfLiteTensor( @@ -717,8 +721,8 @@ TF_LITE_MICRO_TEST(TestAllocateTfLiteTensorWithReset) { const tflite::Model* model = tflite::testing::GetSimpleMockModel(); constexpr size_t arena_size = 1024; uint8_t arena[arena_size]; - tflite::MicroAllocator* allocator = - tflite::MicroAllocator::Create(arena, arena_size, micro_test::reporter); + tflite::MicroAllocator* allocator = tflite::MicroAllocator::Create( + arena, arena_size, tflite::GetMicroErrorReporter()); TF_LITE_MICRO_EXPECT(allocator != nullptr); TfLiteTensor* tensor1 = allocator->AllocateTempTfLiteTensor( @@ -770,8 +774,8 @@ TF_LITE_MICRO_TEST(TestOperatorInputsNotInSubgraphInputs) { tflite::ScratchBufferHandle* scratch_buffer_handles = nullptr; constexpr size_t arena_size = 4096; uint8_t arena[arena_size]; - tflite::MicroAllocator* allocator = - tflite::MicroAllocator::Create(arena, arena_size, micro_test::reporter); + tflite::MicroAllocator* allocator = tflite::MicroAllocator::Create( + arena, arena_size, tflite::GetMicroErrorReporter()); TF_LITE_MICRO_EXPECT_EQ( kTfLiteOk, @@ -826,8 +830,8 @@ TF_LITE_MICRO_TEST(TestTypicalFirstOpAndSecondOpWithScratchTensors) { tflite::ScratchBufferHandle* scratch_buffer_handles = nullptr; constexpr size_t arena_size = 4096; uint8_t arena[arena_size]; - tflite::MicroAllocator* allocator = - tflite::MicroAllocator::Create(arena, arena_size, micro_test::reporter); + tflite::MicroAllocator* allocator = tflite::MicroAllocator::Create( + arena, arena_size, tflite::GetMicroErrorReporter()); TF_LITE_MICRO_EXPECT_EQ( kTfLiteOk, diff --git a/tensorflow/lite/micro/micro_error_reporter.cc b/tensorflow/lite/micro/micro_error_reporter.cc index 6d8361cd25a..14f7f28ca0a 100644 --- a/tensorflow/lite/micro/micro_error_reporter.cc +++ b/tensorflow/lite/micro/micro_error_reporter.cc @@ -16,16 +16,20 @@ limitations under the License. #include "tensorflow/lite/micro/micro_error_reporter.h" #include +#include +#include -#ifndef TF_LITE_STRIP_ERROR_STRINGS +#if !defined(TF_LITE_STRIP_ERROR_STRINGS) #include "tensorflow/lite/micro/debug_log.h" #include "tensorflow/lite/micro/micro_string.h" #endif -namespace tflite { +namespace { +uint8_t micro_error_reporter_buffer[sizeof(tflite::MicroErrorReporter)]; +tflite::MicroErrorReporter* error_reporter_ = nullptr; -int MicroErrorReporter::Report(const char* format, va_list args) { -#ifndef TF_LITE_STRIP_ERROR_STRINGS +void Log(const char* format, va_list args) { +#if !defined(TF_LITE_STRIP_ERROR_STRINGS) // Only pulling in the implementation of this function for builds where we // expect to make use of it to be extra cautious about not increasing the code // size. @@ -35,6 +39,37 @@ int MicroErrorReporter::Report(const char* format, va_list args) { DebugLog(log_buffer); DebugLog("\r\n"); #endif +} + +} // namespace + +#if !defined(TF_LITE_STRIP_ERROR_STRINGS) +void MicroPrintf(const char* format, ...) { + va_list args; + va_start(args, format); + Log(format, args); + va_end(args); +} +#endif + +namespace tflite { +ErrorReporter* GetMicroErrorReporter() { +#if !defined(RENODE) + if (error_reporter_ == nullptr) { + error_reporter_ = new (micro_error_reporter_buffer) MicroErrorReporter(); + } +#else + // TODO(#46937): Until we resolve the global variable issue with Renode, we + // will be creating a new ErrorReporter object each time. While this is + // inefficient, it still allows us to make progress. + error_reporter_ = new (micro_error_reporter_buffer) MicroErrorReporter(); +#endif + + return error_reporter_; +} + +int MicroErrorReporter::Report(const char* format, va_list args) { + Log(format, args); return 0; } diff --git a/tensorflow/lite/micro/micro_error_reporter.h b/tensorflow/lite/micro/micro_error_reporter.h index e2c073a465d..ac45224ab86 100644 --- a/tensorflow/lite/micro/micro_error_reporter.h +++ b/tensorflow/lite/micro/micro_error_reporter.h @@ -20,8 +20,21 @@ limitations under the License. #include "tensorflow/lite/core/api/error_reporter.h" #include "tensorflow/lite/micro/compatibility.h" +#if !defined(TF_LITE_STRIP_ERROR_STRINGS) +// This function can be used independent of the MicroErrorReporter to get +// printf-like functionalitys and are common to all target platforms. +void MicroPrintf(const char* format, ...); +#else +// We use a #define to ensure that the strings are completely stripped, to +// prevent an unnecessary increase in the binary size. +#define MicroPrintf(format, ...) +#endif + namespace tflite { +// Get a pointer to a singleton global error reporter. +ErrorReporter* GetMicroErrorReporter(); + class MicroErrorReporter : public ErrorReporter { public: ~MicroErrorReporter() override {} diff --git a/tensorflow/lite/micro/micro_interpreter_test.cc b/tensorflow/lite/micro/micro_interpreter_test.cc index 3f4bb813d2d..fbc7a99912d 100644 --- a/tensorflow/lite/micro/micro_interpreter_test.cc +++ b/tensorflow/lite/micro/micro_interpreter_test.cc @@ -19,6 +19,7 @@ limitations under the License. #include "tensorflow/lite/core/api/flatbuffer_conversions.h" #include "tensorflow/lite/micro/all_ops_resolver.h" +#include "tensorflow/lite/micro/micro_error_reporter.h" #include "tensorflow/lite/micro/micro_utils.h" #include "tensorflow/lite/micro/recording_micro_allocator.h" #include "tensorflow/lite/micro/test_helpers.h" @@ -78,7 +79,7 @@ TF_LITE_MICRO_TEST(TestInterpreter) { { tflite::MicroInterpreter interpreter(model, op_resolver, allocator_buffer, allocator_buffer_size, - micro_test::reporter); + tflite::GetMicroErrorReporter()); TF_LITE_MICRO_EXPECT_EQ(interpreter.AllocateTensors(), kTfLiteOk); TF_LITE_MICRO_EXPECT_LE(interpreter.arena_used_bytes(), 928 + 100); TF_LITE_MICRO_EXPECT_EQ(static_cast(1), interpreter.inputs_size()); @@ -128,11 +129,11 @@ TF_LITE_MICRO_TEST(TestMultiTenantInterpreter) { // Get simple_model_head_usage. { tflite::RecordingMicroAllocator* allocator = - tflite::RecordingMicroAllocator::Create(arena, arena_size, - micro_test::reporter); + tflite::RecordingMicroAllocator::Create( + arena, arena_size, tflite::GetMicroErrorReporter()); const tflite::Model* model0 = tflite::testing::GetSimpleMockModel(); tflite::MicroInterpreter interpreter0(model0, op_resolver, allocator, - micro_test::reporter); + tflite::GetMicroErrorReporter()); TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, interpreter0.AllocateTensors()); simple_model_head_usage = allocator->GetSimpleMemoryAllocator()->GetHeadUsedBytes(); @@ -147,13 +148,13 @@ TF_LITE_MICRO_TEST(TestMultiTenantInterpreter) { // Shared allocator for various models. tflite::RecordingMicroAllocator* allocator = tflite::RecordingMicroAllocator::Create(arena, arena_size, - micro_test::reporter); + tflite::GetMicroErrorReporter()); // Get complex_model_head_usage. No head space reuse since it's the first // model allocated in the `allocator`. const tflite::Model* model1 = tflite::testing::GetComplexMockModel(); tflite::MicroInterpreter interpreter1(model1, op_resolver, allocator, - micro_test::reporter); + tflite::GetMicroErrorReporter()); TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, interpreter1.AllocateTensors()); TfLiteTensor* input1 = interpreter1.input(0); TfLiteTensor* output1 = interpreter1.output(0); @@ -165,7 +166,7 @@ TF_LITE_MICRO_TEST(TestMultiTenantInterpreter) { // the output is correct. const tflite::Model* model2 = tflite::testing::GetSimpleMockModel(); tflite::MicroInterpreter interpreter2(model2, op_resolver, allocator, - micro_test::reporter); + tflite::GetMicroErrorReporter()); TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, interpreter2.AllocateTensors()); TfLiteTensor* input2 = interpreter2.input(0); TfLiteTensor* output2 = interpreter2.output(0); @@ -195,7 +196,7 @@ TF_LITE_MICRO_TEST(TestMultiTenantInterpreter) { // head space usage. const tflite::Model* model3 = tflite::testing::GetComplexMockModel(); tflite::MicroInterpreter interpreter3(model3, op_resolver, allocator, - micro_test::reporter); + tflite::GetMicroErrorReporter()); TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, interpreter3.AllocateTensors()); TfLiteTensor* input3 = interpreter3.input(0); TfLiteTensor* output3 = interpreter3.output(0); @@ -221,13 +222,14 @@ TF_LITE_MICRO_TEST(TestKernelMemoryPlanning) { uint8_t allocator_buffer[allocator_buffer_size]; tflite::RecordingMicroAllocator* allocator = - tflite::RecordingMicroAllocator::Create( - allocator_buffer, allocator_buffer_size, micro_test::reporter); + tflite::RecordingMicroAllocator::Create(allocator_buffer, + allocator_buffer_size, + tflite::GetMicroErrorReporter()); // Make sure kernel memory planning works in multi-tenant context. for (int i = 0; i < 3; i++) { tflite::MicroInterpreter interpreter(model, op_resolver, allocator, - micro_test::reporter); + tflite::GetMicroErrorReporter()); TF_LITE_MICRO_EXPECT_EQ(interpreter.AllocateTensors(), kTfLiteOk); TF_LITE_MICRO_EXPECT_EQ(static_cast(1), interpreter.inputs_size()); TF_LITE_MICRO_EXPECT_EQ(static_cast(2), interpreter.outputs_size()); @@ -271,7 +273,7 @@ TF_LITE_MICRO_TEST(TestVariableTensorReset) { uint8_t allocator_buffer[allocator_buffer_size]; tflite::MicroInterpreter interpreter(model, op_resolver, allocator_buffer, allocator_buffer_size, - micro_test::reporter); + tflite::GetMicroErrorReporter()); TF_LITE_MICRO_EXPECT_EQ(interpreter.AllocateTensors(), kTfLiteOk); TF_LITE_MICRO_EXPECT_LE(interpreter.arena_used_bytes(), 2096 + 100); TF_LITE_MICRO_EXPECT_EQ(static_cast(1), interpreter.inputs_size()); @@ -349,7 +351,7 @@ TF_LITE_MICRO_TEST(TestIncompleteInitialization) { tflite::MicroInterpreter interpreter(model, op_resolver, allocator_buffer, allocator_buffer_size, - micro_test::reporter); + tflite::GetMicroErrorReporter()); } // Test that an interpreter with a supplied profiler correctly calls the @@ -363,9 +365,9 @@ TF_LITE_MICRO_TEST(InterpreterWithProfilerShouldProfileOps) { constexpr size_t allocator_buffer_size = 2048; uint8_t allocator_buffer[allocator_buffer_size]; tflite::MockProfiler profiler; - tflite::MicroInterpreter interpreter(model, op_resolver, allocator_buffer, - allocator_buffer_size, - micro_test::reporter, &profiler); + tflite::MicroInterpreter interpreter( + model, op_resolver, allocator_buffer, allocator_buffer_size, + tflite::GetMicroErrorReporter(), &profiler); TF_LITE_MICRO_EXPECT_EQ(profiler.event_starts(), 0); TF_LITE_MICRO_EXPECT_EQ(profiler.event_ends(), 0); @@ -390,12 +392,13 @@ TF_LITE_MICRO_TEST(TestIncompleteInitializationAllocationsWithSmallArena) { uint8_t allocator_buffer[allocator_buffer_size]; tflite::RecordingMicroAllocator* allocator = - tflite::RecordingMicroAllocator::Create( - allocator_buffer, allocator_buffer_size, micro_test::reporter); + tflite::RecordingMicroAllocator::Create(allocator_buffer, + allocator_buffer_size, + tflite::GetMicroErrorReporter()); TF_LITE_MICRO_EXPECT_NE(nullptr, allocator); tflite::MicroInterpreter interpreter(model, op_resolver, allocator, - micro_test::reporter); + tflite::GetMicroErrorReporter()); // Interpreter fails because arena is too small: TF_LITE_MICRO_EXPECT_EQ(interpreter.Invoke(), kTfLiteError); @@ -434,12 +437,13 @@ TF_LITE_MICRO_TEST(TestInterpreterDoesNotAllocateUntilInvoke) { uint8_t allocator_buffer[allocator_buffer_size]; tflite::RecordingMicroAllocator* allocator = - tflite::RecordingMicroAllocator::Create( - allocator_buffer, allocator_buffer_size, micro_test::reporter); + tflite::RecordingMicroAllocator::Create(allocator_buffer, + allocator_buffer_size, + tflite::GetMicroErrorReporter()); TF_LITE_MICRO_EXPECT_NE(nullptr, allocator); tflite::MicroInterpreter interpreter(model, op_resolver, allocator, - micro_test::reporter); + tflite::GetMicroErrorReporter()); // Ensure allocations are zero (ignore tail since some internal structs are // initialized with this space): @@ -507,7 +511,7 @@ TF_LITE_MICRO_TEST(TestInterpreterMultipleInputs) { { tflite::MicroInterpreter interpreter(model, op_resolver, allocator_buffer, allocator_buffer_size, - micro_test::reporter); + tflite::GetMicroErrorReporter()); TF_LITE_MICRO_EXPECT_EQ(interpreter.AllocateTensors(), kTfLiteOk); TF_LITE_MICRO_EXPECT_LE(interpreter.arena_used_bytes(), 928 + 100); diff --git a/tensorflow/lite/micro/recording_micro_allocator_test.cc b/tensorflow/lite/micro/recording_micro_allocator_test.cc index 6854341de90..b5f4080b2b9 100644 --- a/tensorflow/lite/micro/recording_micro_allocator_test.cc +++ b/tensorflow/lite/micro/recording_micro_allocator_test.cc @@ -16,6 +16,7 @@ limitations under the License. #include "tensorflow/lite/micro/recording_micro_allocator.h" #include "tensorflow/lite/micro/all_ops_resolver.h" +#include "tensorflow/lite/micro/micro_error_reporter.h" #include "tensorflow/lite/micro/test_helpers.h" #include "tensorflow/lite/micro/testing/micro_test.h" #include "tensorflow/lite/micro/testing/test_conv_model.h" @@ -44,7 +45,7 @@ TF_LITE_MICRO_TEST(TestRecordsTfLiteEvalTensorArrayData) { tflite::RecordingMicroAllocator* micro_allocator = tflite::RecordingMicroAllocator::Create(arena, kTestConvArenaSize, - micro_test::reporter); + tflite::GetMicroErrorReporter()); // TODO(b/158102673): ugly workaround for not having fatal assertions. Same // throughout this file. TF_LITE_MICRO_EXPECT_NE(micro_allocator, nullptr); @@ -88,7 +89,7 @@ TF_LITE_MICRO_TEST(TestRecordsNodeAndRegistrationArrayData) { tflite::RecordingMicroAllocator* micro_allocator = tflite::RecordingMicroAllocator::Create(arena, kTestConvArenaSize, - micro_test::reporter); + tflite::GetMicroErrorReporter()); TF_LITE_MICRO_EXPECT_NE(micro_allocator, nullptr); if (micro_allocator == nullptr) return 1; @@ -128,7 +129,7 @@ TF_LITE_MICRO_TEST(TestRecordsMultiTenantAllocations) { tflite::RecordingMicroAllocator* micro_allocator = tflite::RecordingMicroAllocator::Create(arena, kTestConvArenaSize * 2, - micro_test::reporter); + tflite::GetMicroErrorReporter()); TF_LITE_MICRO_EXPECT_NE(micro_allocator, nullptr); if (micro_allocator == nullptr) return 1; @@ -172,7 +173,7 @@ TF_LITE_MICRO_TEST(TestRecordsPersistentTfLiteTensorData) { tflite::RecordingMicroAllocator* micro_allocator = tflite::RecordingMicroAllocator::Create(arena, kTestConvArenaSize, - micro_test::reporter); + tflite::GetMicroErrorReporter()); TF_LITE_MICRO_EXPECT_NE(micro_allocator, nullptr); if (micro_allocator == nullptr) return 1; @@ -198,7 +199,7 @@ TF_LITE_MICRO_TEST(TestRecordsPersistentTfLiteTensorQuantizationData) { tflite::RecordingMicroAllocator* micro_allocator = tflite::RecordingMicroAllocator::Create(arena, kTestConvArenaSize, - micro_test::reporter); + tflite::GetMicroErrorReporter()); TF_LITE_MICRO_EXPECT_NE(micro_allocator, nullptr); if (micro_allocator == nullptr) return 1; @@ -244,7 +245,7 @@ TF_LITE_MICRO_TEST(TestRecordsPersistentBufferData) { tflite::RecordingMicroAllocator* micro_allocator = tflite::RecordingMicroAllocator::Create(arena, kTestConvArenaSize, - micro_test::reporter); + tflite::GetMicroErrorReporter()); TF_LITE_MICRO_EXPECT_NE(micro_allocator, nullptr); if (micro_allocator == nullptr) return 1; diff --git a/tensorflow/lite/micro/recording_simple_memory_allocator_test.cc b/tensorflow/lite/micro/recording_simple_memory_allocator_test.cc index 910c991978d..cf9078c86eb 100644 --- a/tensorflow/lite/micro/recording_simple_memory_allocator_test.cc +++ b/tensorflow/lite/micro/recording_simple_memory_allocator_test.cc @@ -17,6 +17,7 @@ limitations under the License. #include +#include "tensorflow/lite/micro/micro_error_reporter.h" #include "tensorflow/lite/micro/test_helpers.h" #include "tensorflow/lite/micro/testing/micro_test.h" @@ -25,8 +26,8 @@ TF_LITE_MICRO_TESTS_BEGIN TF_LITE_MICRO_TEST(TestRecordsTailAllocations) { constexpr size_t arena_size = 1024; uint8_t arena[arena_size]; - tflite::RecordingSimpleMemoryAllocator allocator(micro_test::reporter, arena, - arena_size); + tflite::RecordingSimpleMemoryAllocator allocator( + tflite::GetMicroErrorReporter(), arena, arena_size); uint8_t* result = allocator.AllocateFromTail(/*size=*/10, /*alignment=*/1); TF_LITE_MICRO_EXPECT_NE(result, nullptr); @@ -48,8 +49,8 @@ TF_LITE_MICRO_TEST(TestRecordsTailAllocations) { TF_LITE_MICRO_TEST(TestRecordsMisalignedTailAllocations) { constexpr size_t arena_size = 1024; uint8_t arena[arena_size]; - tflite::RecordingSimpleMemoryAllocator allocator(micro_test::reporter, arena, - arena_size); + tflite::RecordingSimpleMemoryAllocator allocator( + tflite::GetMicroErrorReporter(), arena, arena_size); uint8_t* result = allocator.AllocateFromTail(/*size=*/10, /*alignment=*/12); TF_LITE_MICRO_EXPECT_NE(result, nullptr); @@ -65,8 +66,8 @@ TF_LITE_MICRO_TEST(TestRecordsMisalignedTailAllocations) { TF_LITE_MICRO_TEST(TestDoesNotRecordFailedTailAllocations) { constexpr size_t arena_size = 1024; uint8_t arena[arena_size]; - tflite::RecordingSimpleMemoryAllocator allocator(micro_test::reporter, arena, - arena_size); + tflite::RecordingSimpleMemoryAllocator allocator( + tflite::GetMicroErrorReporter(), arena, arena_size); uint8_t* result = allocator.AllocateFromTail(/*size=*/2048, /*alignment=*/1); TF_LITE_MICRO_EXPECT(result == nullptr); @@ -80,8 +81,8 @@ TF_LITE_MICRO_TEST(TestDoesNotRecordFailedTailAllocations) { TF_LITE_MICRO_TEST(TestRecordsHeadSizeAdjustment) { constexpr size_t arena_size = 1024; uint8_t arena[arena_size]; - tflite::RecordingSimpleMemoryAllocator allocator(micro_test::reporter, arena, - arena_size); + tflite::RecordingSimpleMemoryAllocator allocator( + tflite::GetMicroErrorReporter(), arena, arena_size); TF_LITE_MICRO_EXPECT_EQ( kTfLiteOk, allocator.SetHeadBufferSize(/*size=*/5, /*alignment=*/1)); @@ -104,8 +105,8 @@ TF_LITE_MICRO_TEST(TestRecordsHeadSizeAdjustment) { TF_LITE_MICRO_TEST(TestRecordsMisalignedHeadSizeAdjustments) { constexpr size_t arena_size = 1024; uint8_t arena[arena_size]; - tflite::RecordingSimpleMemoryAllocator allocator(micro_test::reporter, arena, - arena_size); + tflite::RecordingSimpleMemoryAllocator allocator( + tflite::GetMicroErrorReporter(), arena, arena_size); TF_LITE_MICRO_EXPECT_EQ( kTfLiteOk, allocator.SetHeadBufferSize(/*size=*/10, /*alignment=*/12)); @@ -122,8 +123,8 @@ TF_LITE_MICRO_TEST(TestRecordsMisalignedHeadSizeAdjustments) { TF_LITE_MICRO_TEST(TestDoesNotRecordFailedTailAllocations) { constexpr size_t arena_size = 1024; uint8_t arena[arena_size]; - tflite::RecordingSimpleMemoryAllocator allocator(micro_test::reporter, arena, - arena_size); + tflite::RecordingSimpleMemoryAllocator allocator( + tflite::GetMicroErrorReporter(), arena, arena_size); TF_LITE_MICRO_EXPECT_EQ(kTfLiteError, allocator.SetHeadBufferSize( /*size=*/2048, /*alignment=*/1)); diff --git a/tensorflow/lite/micro/simple_memory_allocator_test.cc b/tensorflow/lite/micro/simple_memory_allocator_test.cc index eea7a7fad86..5b7c2606173 100644 --- a/tensorflow/lite/micro/simple_memory_allocator_test.cc +++ b/tensorflow/lite/micro/simple_memory_allocator_test.cc @@ -17,6 +17,7 @@ limitations under the License. #include +#include "tensorflow/lite/micro/micro_error_reporter.h" #include "tensorflow/lite/micro/test_helpers.h" #include "tensorflow/lite/micro/testing/micro_test.h" @@ -25,8 +26,8 @@ TF_LITE_MICRO_TESTS_BEGIN TF_LITE_MICRO_TEST(TestEnsureHeadSizeSimpleAlignment) { constexpr size_t arena_size = 1024; uint8_t arena[arena_size]; - tflite::SimpleMemoryAllocator allocator(micro_test::reporter, arena, - arena_size); + tflite::SimpleMemoryAllocator allocator(tflite::GetMicroErrorReporter(), + arena, arena_size); TF_LITE_MICRO_EXPECT_EQ( kTfLiteOk, allocator.SetHeadBufferSize(/*size=*/100, /*alignment=*/1)); @@ -47,8 +48,8 @@ TF_LITE_MICRO_TEST(TestEnsureHeadSizeSimpleAlignment) { TF_LITE_MICRO_TEST(TestAdjustHeadSizeMisalignment) { constexpr size_t arena_size = 1024; uint8_t arena[arena_size]; - tflite::SimpleMemoryAllocator allocator(micro_test::reporter, arena, - arena_size); + tflite::SimpleMemoryAllocator allocator(tflite::GetMicroErrorReporter(), + arena, arena_size); // First head adjustment of 100 bytes (aligned 12): TF_LITE_MICRO_EXPECT_EQ( @@ -73,8 +74,8 @@ TF_LITE_MICRO_TEST(TestAdjustHeadSizeMisalignment) { TF_LITE_MICRO_TEST(TestAdjustHeadSizeMisalignedHandlesCorrectBytesAvailable) { constexpr size_t arena_size = 1024; uint8_t arena[arena_size]; - tflite::SimpleMemoryAllocator allocator(micro_test::reporter, arena, - arena_size); + tflite::SimpleMemoryAllocator allocator(tflite::GetMicroErrorReporter(), + arena, arena_size); // First head adjustment of 100 bytes (aligned 12): TF_LITE_MICRO_EXPECT_EQ( @@ -104,8 +105,8 @@ TF_LITE_MICRO_TEST(TestAdjustHeadSizeMisalignedHandlesCorrectBytesAvailable) { TF_LITE_MICRO_TEST(TestGetAvailableMemory) { constexpr size_t arena_size = 1024; uint8_t arena[arena_size]; - tflite::SimpleMemoryAllocator allocator(micro_test::reporter, arena, - arena_size); + tflite::SimpleMemoryAllocator allocator(tflite::GetMicroErrorReporter(), + arena, arena_size); constexpr size_t allocation_size = 100; allocator.SetHeadBufferSize(/*size=*/allocation_size, @@ -120,8 +121,8 @@ TF_LITE_MICRO_TEST(TestGetAvailableMemory) { TF_LITE_MICRO_TEST(TestGetAvailableMemoryWithTempAllocations) { constexpr size_t arena_size = 1024; uint8_t arena[arena_size]; - tflite::SimpleMemoryAllocator allocator(micro_test::reporter, arena, - arena_size); + tflite::SimpleMemoryAllocator allocator(tflite::GetMicroErrorReporter(), + arena, arena_size); constexpr size_t allocation_size = 100; allocator.AllocateTemp(/*size=*/allocation_size, @@ -141,8 +142,8 @@ TF_LITE_MICRO_TEST(TestGetAvailableMemoryWithTempAllocations) { TF_LITE_MICRO_TEST(TestGetUsedBytes) { constexpr size_t arena_size = 1024; uint8_t arena[arena_size]; - tflite::SimpleMemoryAllocator allocator(micro_test::reporter, arena, - arena_size); + tflite::SimpleMemoryAllocator allocator(tflite::GetMicroErrorReporter(), + arena, arena_size); TF_LITE_MICRO_EXPECT_EQ(allocator.GetUsedBytes(), static_cast(0)); constexpr size_t allocation_size = 100; @@ -157,8 +158,8 @@ TF_LITE_MICRO_TEST(TestGetUsedBytes) { TF_LITE_MICRO_TEST(TestGetUsedBytesTempAllocations) { constexpr size_t arena_size = 1024; uint8_t arena[arena_size]; - tflite::SimpleMemoryAllocator allocator(micro_test::reporter, arena, - arena_size); + tflite::SimpleMemoryAllocator allocator(tflite::GetMicroErrorReporter(), + arena, arena_size); constexpr size_t allocation_size = 100; allocator.AllocateTemp(/*size=*/allocation_size, @@ -176,8 +177,8 @@ TF_LITE_MICRO_TEST(TestGetUsedBytesTempAllocations) { TF_LITE_MICRO_TEST(TestJustFits) { constexpr size_t arena_size = 1024; uint8_t arena[arena_size]; - tflite::SimpleMemoryAllocator allocator(micro_test::reporter, arena, - arena_size); + tflite::SimpleMemoryAllocator allocator(tflite::GetMicroErrorReporter(), + arena, arena_size); uint8_t* result = allocator.AllocateFromTail(arena_size, 1); TF_LITE_MICRO_EXPECT(nullptr != result); @@ -186,8 +187,8 @@ TF_LITE_MICRO_TEST(TestJustFits) { TF_LITE_MICRO_TEST(TestAligned) { constexpr size_t arena_size = 1024; uint8_t arena[arena_size]; - tflite::SimpleMemoryAllocator allocator(micro_test::reporter, arena, - arena_size); + tflite::SimpleMemoryAllocator allocator(tflite::GetMicroErrorReporter(), + arena, arena_size); uint8_t* result = allocator.AllocateFromTail(1, 1); TF_LITE_MICRO_EXPECT(nullptr != result); @@ -201,8 +202,8 @@ TF_LITE_MICRO_TEST(TestAligned) { TF_LITE_MICRO_TEST(TestMultipleTooLarge) { constexpr size_t arena_size = 1024; uint8_t arena[arena_size]; - tflite::SimpleMemoryAllocator allocator(micro_test::reporter, arena, - arena_size); + tflite::SimpleMemoryAllocator allocator(tflite::GetMicroErrorReporter(), + arena, arena_size); uint8_t* result = allocator.AllocateFromTail(768, 1); TF_LITE_MICRO_EXPECT(nullptr != result); @@ -214,8 +215,8 @@ TF_LITE_MICRO_TEST(TestMultipleTooLarge) { TF_LITE_MICRO_TEST(TestTempAllocations) { constexpr size_t arena_size = 1024; uint8_t arena[arena_size]; - tflite::SimpleMemoryAllocator allocator(micro_test::reporter, arena, - arena_size); + tflite::SimpleMemoryAllocator allocator(tflite::GetMicroErrorReporter(), + arena, arena_size); uint8_t* temp1 = allocator.AllocateTemp(100, 1); TF_LITE_MICRO_EXPECT(nullptr != temp1); @@ -230,8 +231,8 @@ TF_LITE_MICRO_TEST(TestTempAllocations) { TF_LITE_MICRO_TEST(TestResetTempAllocations) { constexpr size_t arena_size = 1024; uint8_t arena[arena_size]; - tflite::SimpleMemoryAllocator allocator(micro_test::reporter, arena, - arena_size); + tflite::SimpleMemoryAllocator allocator(tflite::GetMicroErrorReporter(), + arena, arena_size); uint8_t* temp1 = allocator.AllocateTemp(100, 1); TF_LITE_MICRO_EXPECT(nullptr != temp1); @@ -248,8 +249,8 @@ TF_LITE_MICRO_TEST(TestResetTempAllocations) { TF_LITE_MICRO_TEST(TestEnsureHeadSizeWithoutResettingTemp) { constexpr size_t arena_size = 1024; uint8_t arena[arena_size]; - tflite::SimpleMemoryAllocator allocator(micro_test::reporter, arena, - arena_size); + tflite::SimpleMemoryAllocator allocator(tflite::GetMicroErrorReporter(), + arena, arena_size); uint8_t* temp = allocator.AllocateTemp(100, 1); TF_LITE_MICRO_EXPECT(nullptr != temp); diff --git a/tensorflow/lite/micro/testing/micro_test.h b/tensorflow/lite/micro/testing/micro_test.h index d74d8f4f1a6..8b08d0f0751 100644 --- a/tensorflow/lite/micro/testing/micro_test.h +++ b/tensorflow/lite/micro/testing/micro_test.h @@ -62,177 +62,162 @@ extern int tests_passed; extern int tests_failed; extern bool is_test_complete; extern bool did_test_fail; -extern tflite::ErrorReporter* reporter; } // namespace micro_test -#define TF_LITE_MICRO_TESTS_BEGIN \ - namespace micro_test { \ - int tests_passed; \ - int tests_failed; \ - bool is_test_complete; \ - bool did_test_fail; \ - tflite::ErrorReporter* reporter; \ - } \ - \ - int main(int argc, char** argv) { \ - micro_test::tests_passed = 0; \ - micro_test::tests_failed = 0; \ - tflite::MicroErrorReporter error_reporter; \ - micro_test::reporter = &error_reporter; +#define TF_LITE_MICRO_TESTS_BEGIN \ + namespace micro_test { \ + int tests_passed; \ + int tests_failed; \ + bool is_test_complete; \ + bool did_test_fail; \ + } \ + \ + int main(int argc, char** argv) { \ + micro_test::tests_passed = 0; \ + micro_test::tests_failed = 0; -#define TF_LITE_MICRO_TESTS_END \ - micro_test::reporter->Report( \ - "%d/%d tests passed", micro_test::tests_passed, \ - (micro_test::tests_failed + micro_test::tests_passed)); \ - if (micro_test::tests_failed == 0) { \ - micro_test::reporter->Report("~~~ALL TESTS PASSED~~~\n"); \ - return kTfLiteOk; \ - } else { \ - micro_test::reporter->Report("~~~SOME TESTS FAILED~~~\n"); \ - return kTfLiteError; \ - } \ +#define TF_LITE_MICRO_TESTS_END \ + MicroPrintf("%d/%d tests passed", micro_test::tests_passed, \ + (micro_test::tests_failed + micro_test::tests_passed)); \ + if (micro_test::tests_failed == 0) { \ + MicroPrintf("~~~ALL TESTS PASSED~~~\n"); \ + return kTfLiteOk; \ + } else { \ + MicroPrintf("~~~SOME TESTS FAILED~~~\n"); \ + return kTfLiteError; \ + } \ } // TODO(petewarden): I'm going to hell for what I'm doing to this poor for loop. #define TF_LITE_MICRO_TEST(name) \ - micro_test::reporter->Report("Testing " #name); \ + MicroPrintf("Testing " #name); \ for (micro_test::is_test_complete = false, \ micro_test::did_test_fail = false; \ !micro_test::is_test_complete; micro_test::is_test_complete = true, \ micro_test::tests_passed += (micro_test::did_test_fail) ? 0 : 1, \ micro_test::tests_failed += (micro_test::did_test_fail) ? 1 : 0) -#define TF_LITE_MICRO_EXPECT(x) \ - do { \ - if (!(x)) { \ - micro_test::reporter->Report(#x " failed at %s:%d", __FILE__, __LINE__); \ - micro_test::did_test_fail = true; \ - } \ +#define TF_LITE_MICRO_EXPECT(x) \ + do { \ + if (!(x)) { \ + MicroPrintf(#x " failed at %s:%d", __FILE__, __LINE__); \ + micro_test::did_test_fail = true; \ + } \ } while (false) // TODO(b/139142772): this macro is used with types other than ints even though // the printf specifier is %d. -#define TF_LITE_MICRO_EXPECT_EQ(x, y) \ - do { \ - auto vx = x; \ - auto vy = y; \ - if ((vx) != (vy)) { \ - micro_test::reporter->Report(#x " == " #y " failed at %s:%d (%d vs %d)", \ - __FILE__, __LINE__, static_cast(vx), \ - static_cast(vy)); \ - micro_test::did_test_fail = true; \ - } \ +#define TF_LITE_MICRO_EXPECT_EQ(x, y) \ + do { \ + auto vx = x; \ + auto vy = y; \ + if ((vx) != (vy)) { \ + MicroPrintf(#x " == " #y " failed at %s:%d (%d vs %d)", __FILE__, \ + __LINE__, static_cast(vx), static_cast(vy)); \ + micro_test::did_test_fail = true; \ + } \ } while (false) -#define TF_LITE_MICRO_EXPECT_NE(x, y) \ - do { \ - if ((x) == (y)) { \ - micro_test::reporter->Report(#x " != " #y " failed at %s:%d", __FILE__, \ - __LINE__); \ - micro_test::did_test_fail = true; \ - } \ - } while (false) - -// TODO(wangtz): Making it more generic once needed. -#define TF_LITE_MICRO_ARRAY_ELEMENT_EXPECT_NEAR(arr1, idx1, arr2, idx2, \ - epsilon) \ +#define TF_LITE_MICRO_EXPECT_NE(x, y) \ do { \ - auto delta = ((arr1)[(idx1)] > (arr2)[(idx2)]) \ - ? ((arr1)[(idx1)] - (arr2)[(idx2)]) \ - : ((arr2)[(idx2)] - (arr1)[(idx1)]); \ - if (delta > epsilon) { \ - micro_test::reporter->Report( \ - #arr1 "[%d] (%f) near " #arr2 "[%d] (%f) failed at %s:%d", \ - static_cast(idx1), static_cast((arr1)[(idx1)]), \ - static_cast(idx2), static_cast((arr2)[(idx2)]), \ - __FILE__, __LINE__); \ + if ((x) == (y)) { \ + MicroPrintf(#x " != " #y " failed at %s:%d", __FILE__, __LINE__); \ micro_test::did_test_fail = true; \ } \ } while (false) -#define TF_LITE_MICRO_EXPECT_NEAR(x, y, epsilon) \ - do { \ - auto vx = (x); \ - auto vy = (y); \ - auto delta = ((vx) > (vy)) ? ((vx) - (vy)) : ((vy) - (vx)); \ - if (delta > epsilon) { \ - micro_test::reporter->Report( \ - #x " (%f) near " #y " (%f) failed at %s:%d", \ - static_cast(vx), static_cast(vy), __FILE__, \ - __LINE__); \ - micro_test::did_test_fail = true; \ - } \ - } while (false) - -#define TF_LITE_MICRO_EXPECT_GT(x, y) \ - do { \ - if ((x) <= (y)) { \ - micro_test::reporter->Report(#x " > " #y " failed at %s:%d", __FILE__, \ - __LINE__); \ - micro_test::did_test_fail = true; \ - } \ - } while (false) - -#define TF_LITE_MICRO_EXPECT_LT(x, y) \ - do { \ - if ((x) >= (y)) { \ - micro_test::reporter->Report(#x " < " #y " failed at %s:%d", __FILE__, \ - __LINE__); \ - micro_test::did_test_fail = true; \ - } \ - } while (false) - -#define TF_LITE_MICRO_EXPECT_GE(x, y) \ +// TODO(wangtz): Making it more generic once needed. +#define TF_LITE_MICRO_ARRAY_ELEMENT_EXPECT_NEAR(arr1, idx1, arr2, idx2, \ + epsilon) \ do { \ - if ((x) < (y)) { \ - micro_test::reporter->Report(#x " >= " #y " failed at %s:%d", __FILE__, \ - __LINE__); \ + auto delta = ((arr1)[(idx1)] > (arr2)[(idx2)]) \ + ? ((arr1)[(idx1)] - (arr2)[(idx2)]) \ + : ((arr2)[(idx2)] - (arr1)[(idx1)]); \ + if (delta > epsilon) { \ + MicroPrintf(#arr1 "[%d] (%f) near " #arr2 "[%d] (%f) failed at %s:%d", \ + static_cast(idx1), static_cast((arr1)[(idx1)]), \ + static_cast(idx2), static_cast((arr2)[(idx2)]), \ + __FILE__, __LINE__); \ micro_test::did_test_fail = true; \ } \ } while (false) -#define TF_LITE_MICRO_EXPECT_LE(x, y) \ +#define TF_LITE_MICRO_EXPECT_NEAR(x, y, epsilon) \ do { \ - if ((x) > (y)) { \ - micro_test::reporter->Report(#x " <= " #y " failed at %s:%d", __FILE__, \ - __LINE__); \ + auto vx = (x); \ + auto vy = (y); \ + auto delta = ((vx) > (vy)) ? ((vx) - (vy)) : ((vy) - (vx)); \ + if (delta > epsilon) { \ + MicroPrintf(#x " (%f) near " #y " (%f) failed at %s:%d", \ + static_cast(vx), static_cast(vy), __FILE__, \ + __LINE__); \ micro_test::did_test_fail = true; \ } \ } while (false) -#define TF_LITE_MICRO_EXPECT_TRUE(x) \ +#define TF_LITE_MICRO_EXPECT_GT(x, y) \ do { \ - if (!(x)) { \ - micro_test::reporter->Report(#x " was not true failed at %s:%d", \ - __FILE__, __LINE__); \ + if ((x) <= (y)) { \ + MicroPrintf(#x " > " #y " failed at %s:%d", __FILE__, __LINE__); \ micro_test::did_test_fail = true; \ } \ } while (false) -#define TF_LITE_MICRO_EXPECT_FALSE(x) \ +#define TF_LITE_MICRO_EXPECT_LT(x, y) \ + do { \ + if ((x) >= (y)) { \ + MicroPrintf(#x " < " #y " failed at %s:%d", __FILE__, __LINE__); \ + micro_test::did_test_fail = true; \ + } \ + } while (false) + +#define TF_LITE_MICRO_EXPECT_GE(x, y) \ do { \ - if (x) { \ - micro_test::reporter->Report(#x " was not false failed at %s:%d", \ - __FILE__, __LINE__); \ + if ((x) < (y)) { \ + MicroPrintf(#x " >= " #y " failed at %s:%d", __FILE__, __LINE__); \ micro_test::did_test_fail = true; \ } \ } while (false) -#define TF_LITE_MICRO_FAIL(msg) \ - do { \ - micro_test::reporter->Report("FAIL: %s", msg, __FILE__, __LINE__); \ - micro_test::did_test_fail = true; \ +#define TF_LITE_MICRO_EXPECT_LE(x, y) \ + do { \ + if ((x) > (y)) { \ + MicroPrintf(#x " <= " #y " failed at %s:%d", __FILE__, __LINE__); \ + micro_test::did_test_fail = true; \ + } \ } while (false) -#define TF_LITE_MICRO_EXPECT_STRING_EQ(string1, string2) \ +#define TF_LITE_MICRO_EXPECT_TRUE(x) \ do { \ - for (int i = 0; string1[i] != '\0' && string2[i] != '\0'; i++) { \ - if (string1[i] != string2[i]) { \ - micro_test::reporter->Report("FAIL: %s did not match %s", string1, \ - string2, __FILE__, __LINE__); \ - micro_test::did_test_fail = true; \ - } \ + if (!(x)) { \ + MicroPrintf(#x " was not true failed at %s:%d", __FILE__, __LINE__); \ + micro_test::did_test_fail = true; \ } \ } while (false) +#define TF_LITE_MICRO_EXPECT_FALSE(x) \ + do { \ + if (x) { \ + MicroPrintf(#x " was not false failed at %s:%d", __FILE__, __LINE__); \ + micro_test::did_test_fail = true; \ + } \ + } while (false) + +#define TF_LITE_MICRO_FAIL(msg) \ + do { \ + MicroPrintf("FAIL: %s", msg, __FILE__, __LINE__); \ + micro_test::did_test_fail = true; \ + } while (false) + +#define TF_LITE_MICRO_EXPECT_STRING_EQ(string1, string2) \ + do { \ + for (int i = 0; string1[i] != '\0' && string2[i] != '\0'; i++) { \ + if (string1[i] != string2[i]) { \ + MicroPrintf("FAIL: %s did not match %s", string1, string2, __FILE__, \ + __LINE__); \ + micro_test::did_test_fail = true; \ + } \ + } \ + } while (false) + #endif // TENSORFLOW_LITE_MICRO_TESTING_MICRO_TEST_H_ diff --git a/tensorflow/lite/micro/tools/make/targets/bluepill_makefile.inc b/tensorflow/lite/micro/tools/make/targets/bluepill_makefile.inc index 6356a1166c6..0a04d9b16f6 100644 --- a/tensorflow/lite/micro/tools/make/targets/bluepill_makefile.inc +++ b/tensorflow/lite/micro/tools/make/targets/bluepill_makefile.inc @@ -34,6 +34,9 @@ PLATFORM_FLAGS = \ # broken w/o it. Remove this workaround once the issue is resolved. PLATFORM_FLAGS += -DNDEBUG +# TODO(#46937): Remove once initialization of global variables is sorted out. +PLATFORM_FLAGS += -DRENODE + CXXFLAGS += $(PLATFORM_FLAGS) -fno-use-cxa-atexit CCFLAGS += $(PLATFORM_FLAGS) diff --git a/tensorflow/lite/micro/tools/make/targets/stm32f4_makefile.inc b/tensorflow/lite/micro/tools/make/targets/stm32f4_makefile.inc index de8c0d735d1..9ad23667c57 100644 --- a/tensorflow/lite/micro/tools/make/targets/stm32f4_makefile.inc +++ b/tensorflow/lite/micro/tools/make/targets/stm32f4_makefile.inc @@ -22,7 +22,7 @@ ifneq ($(DOWNLOAD_RESULT), SUCCESS) $(error Something went wrong with the CMSIS download: $(DOWNLOAD_RESULT)) endif -# TODO(b/161478030) : change - Wno - vla to - Wvla and remove - Wno-shadow once +# TODO(b/161478030): change -Wno-vla to -Wvla and remove -Wno-shadow once # we have a solution for fixing / avoiding being tripped up by these warnings. PLATFORM_FLAGS = \ -DGEMMLOWP_ALLOW_SLOW_SCALAR_FALLBACK \ @@ -52,6 +52,10 @@ PLATFORM_FLAGS = \ -fomit-frame-pointer \ -g \ -Os + +# TODO(#46937): Remove once initialization of global variables is sorted out. +PLATFORM_FLAGS += -DRENODE + CXXFLAGS += $(PLATFORM_FLAGS) -std=gnu++11 -fno-rtti -fno-use-cxa-atexit CCFLAGS += $(PLATFORM_FLAGS) LDFLAGS += \