Add a MicroPrintf function that is independant of the ErrorReporter.

Additionally,
  * remove the global error reporter from micro_test.h
  * change all the kernel tests to make use of MicroPrintf
  * add a GetMicroErrorReporter() function that returns a pointer to a
    singleton MicroErrorReporter object.
      - This enables the current change to not spread beyond the tests.
      - Even if we move large parts of the TFLM code to make use
        MicroPrintf (in favor of error_reporter), there is still going to
        be shared TfLite/TFLM code that will need an error_reporter.

Next steps, if we want to continue down this path
  * remove the error_reporter from the TFLM functions and class
    implementations and instead use either MicroPrintf or
    GetMicroErrorReporter()
  * Add new APIs that do not have error_reporter to the TFLM classes and
    functions.
  * Ask users to switch to the new error_reporter-free APIs and
    depreacte the APIs that do make use of the error_reporter.
  * Remove the error_reporter APIs completely.

Prior to this change, we would have to use the ErrorReporter interface
for all the logging.

This was problematic on a few fronts:
 * The name ErrorReporter was often misleading since sometimes we just
   want to log, even when there isn't an error.
 * For even the simplest logging, we need to have access to an
   ErrorReporter object which means that pointers to an ErrorReporter
   are part of most classes in TFLM.

With this change, we can simply call MicroPrintf(), and it can be a no-op
if binary size is important.

If we find this approach useful, we can consider incrementally reducing
the usage of ErrorReporter from TFLM.

Progress towards http://b/158205789

starting to address review comments.

re-do micro_test.h
This commit is contained in:
Advait Jain 2021-02-03 13:48:36 -08:00
parent aeeafe8f66
commit 72499dfa3a
66 changed files with 421 additions and 385 deletions

View File

@ -668,6 +668,7 @@ cc_library(
deps = [
"//tensorflow/lite/c:common",
"//tensorflow/lite/kernels/internal:compatibility",
"//tensorflow/lite/micro:micro_error_reporter",
"//tensorflow/lite/micro:micro_framework",
],
)

View File

@ -47,7 +47,7 @@ void TestReluFloat(const int* input_dims_data, const float* input_data,
const TfLiteRegistration registration = ops::micro::Register_RELU();
micro::KernelRunner runner(registration, tensors, tensors_size, inputs_array,
outputs_array,
/*builtin_data=*/nullptr, micro_test::reporter);
/*builtin_data=*/nullptr);
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.InitAndPrepare());
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.Invoke());
@ -80,7 +80,7 @@ void TestRelu6Float(const int* input_dims_data, const float* input_data,
const TfLiteRegistration registration = ops::micro::Register_RELU6();
micro::KernelRunner runner(registration, tensors, tensors_size, inputs_array,
outputs_array,
/*builtin_data=*/nullptr, micro_test::reporter);
/*builtin_data=*/nullptr);
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.InitAndPrepare());
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.Invoke());
@ -118,7 +118,7 @@ void TestReluUint8(const int* input_dims_data, const float* input_data,
const TfLiteRegistration registration = ops::micro::Register_RELU();
micro::KernelRunner runner(registration, tensors, tensors_size, inputs_array,
outputs_array,
/*builtin_data=*/nullptr, micro_test::reporter);
/*builtin_data=*/nullptr);
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.InitAndPrepare());
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.Invoke());
@ -159,7 +159,7 @@ void TestRelu6Uint8(const int* input_dims_data, const float* input_data,
const TfLiteRegistration registration = ops::micro::Register_RELU6();
micro::KernelRunner runner(registration, tensors, tensors_size, inputs_array,
outputs_array,
/*builtin_data=*/nullptr, micro_test::reporter);
/*builtin_data=*/nullptr);
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.InitAndPrepare());
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.Invoke());
@ -199,7 +199,7 @@ void TestReluInt8(const int* input_dims_data, const float* input_data,
const TfLiteRegistration registration = ops::micro::Register_RELU();
micro::KernelRunner runner(registration, tensors, tensors_size, inputs_array,
outputs_array,
/*builtin_data=*/nullptr, micro_test::reporter);
/*builtin_data=*/nullptr);
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.InitAndPrepare());
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.Invoke());
@ -239,7 +239,7 @@ void TestRelu6Int8(const int* input_dims_data, const float* input_data,
const TfLiteRegistration registration = ops::micro::Register_RELU6();
micro::KernelRunner runner(registration, tensors, tensors_size, inputs_array,
outputs_array,
/*builtin_data=*/nullptr, micro_test::reporter);
/*builtin_data=*/nullptr);
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.InitAndPrepare());
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.Invoke());

View File

@ -77,8 +77,7 @@ void ValidateAddGoldens(TfLiteTensor* tensors, int tensors_size,
const TfLiteRegistration registration = ops::micro::Register_ADD();
micro::KernelRunner runner(registration, tensors, tensors_size, inputs_array,
outputs_array, &builtin_data,
micro_test::reporter);
outputs_array, &builtin_data);
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.InitAndPrepare());
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.Invoke());

View File

@ -134,7 +134,7 @@ TfLiteStatus ValidateConvGoldens(TfLiteTensor* tensors, int tensors_size,
TfLiteConvParams* conv_params,
float tolerance = 1e-5) {
TfLiteContext context;
PopulateContext(tensors, tensors_size, micro_test::reporter, &context);
PopulateContext(tensors, tensors_size, &context);
::tflite::AllOpsResolver resolver;

View File

@ -53,7 +53,7 @@ TfLiteStatus ValidateDepthwiseConvGoldens(const T* expected_output_data,
float tolerance, int tensors_size,
TfLiteTensor* tensors) {
TfLiteContext context;
PopulateContext(tensors, tensors_size, micro_test::reporter, &context);
PopulateContext(tensors, tensors_size, &context);
::tflite::AllOpsResolver resolver;
const TfLiteRegistration* registration =

View File

@ -67,7 +67,7 @@ void TestFullyConnectedQuantized(
tensors[3].params.zero_point = 0;
TfLiteContext context;
PopulateContext(tensors, tensors_size, micro_test::reporter, &context);
PopulateContext(tensors, tensors_size, &context);
::tflite::AllOpsResolver resolver;
const TfLiteRegistration* registration =

View File

@ -58,7 +58,7 @@ void TestAveragePoolingQuantized(
};
TfLiteContext context;
PopulateContext(tensors, tensors_size, micro_test::reporter, &context);
PopulateContext(tensors, tensors_size, &context);
::tflite::AllOpsResolver resolver;
const TfLiteRegistration* registration =
@ -129,7 +129,7 @@ void TestMaxPoolQuantized(const int* input_dims_data, const T* input_data,
};
TfLiteContext context;
PopulateContext(tensors, tensors_size, micro_test::reporter, &context);
PopulateContext(tensors, tensors_size, &context);
::tflite::AllOpsResolver resolver;
const TfLiteRegistration* registration =

View File

@ -37,7 +37,7 @@ void ValidateArgMinMaxGoldens(TfLiteTensor* tensors, int tensors_size,
: ops::micro::Register_ARG_MAX();
micro::KernelRunner runner(registration, tensors, tensors_size, inputs_array,
outputs_array,
/*builtin_data=*/nullptr, micro_test::reporter);
/*builtin_data=*/nullptr);
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.InitAndPrepare());
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.Invoke());

View File

@ -48,7 +48,7 @@ TfLiteStatus ValidateBatchToSpaceNdGoldens(TfLiteTensor* tensors,
const TfLiteRegistration registration = Register_BATCH_TO_SPACE_ND();
micro::KernelRunner runner(registration, tensors, tensors_size, inputs_array,
outputs_array, nullptr, micro_test::reporter);
outputs_array, nullptr);
TF_LITE_ENSURE_STATUS(runner.InitAndPrepare());
TF_LITE_ENSURE_STATUS(runner.Invoke());

View File

@ -46,7 +46,7 @@ void TestCastFloatToInt8(const int* input_dims_data, const float* input_data,
const TfLiteRegistration registration = Register_CAST();
micro::KernelRunner runner(registration, tensors, tensors_size, inputs_array,
outputs_array,
/*builtin_data=*/nullptr, micro_test::reporter);
/*builtin_data=*/nullptr);
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.InitAndPrepare());
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.Invoke());
@ -78,7 +78,7 @@ void TestCastInt8ToFloat(const int* input_dims_data, const int8_t* input_data,
const TfLiteRegistration registration = Register_CAST();
micro::KernelRunner runner(registration, tensors, tensors_size, inputs_array,
outputs_array,
/*builtin_data=*/nullptr, micro_test::reporter);
/*builtin_data=*/nullptr);
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.InitAndPrepare());
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.Invoke());

View File

@ -45,7 +45,7 @@ void TestCeil(const int* input_dims_data, const float* input_data,
const TfLiteRegistration registration = ops::micro::Register_CEIL();
micro::KernelRunner runner(registration, tensors, tensors_size, inputs_array,
outputs_array,
/*builtin_data=*/nullptr, micro_test::reporter);
/*builtin_data=*/nullptr);
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.InitAndPrepare());
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.Invoke());

View File

@ -79,7 +79,7 @@ TF_LITE_MICRO_TEST(OutputTensorLength4) {
tflite::ops::micro::Register_CIRCULAR_BUFFER();
tflite::micro::KernelRunner runner = tflite::micro::KernelRunner(
*registration, tensors, tensors_size, inputs_array, outputs_array,
/*builtin_data=*/nullptr, micro_test::reporter);
/*builtin_data=*/nullptr);
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.InitAndPrepare());
const int8_t goldens[5][16] = {{0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 3},
@ -147,7 +147,7 @@ TF_LITE_MICRO_TEST(OutputTensorLength5) {
tflite::ops::micro::Register_CIRCULAR_BUFFER();
tflite::micro::KernelRunner runner = tflite::micro::KernelRunner(
*registration, tensors, tensors_size, inputs_array, outputs_array,
/*builtin_data=*/nullptr, micro_test::reporter);
/*builtin_data=*/nullptr);
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.InitAndPrepare());
const int8_t goldens[6][20] = {

View File

@ -40,8 +40,7 @@ void TestComparison(const TfLiteRegistration& registration,
TfLiteIntArray* outputs_array = IntArrayFromInts(outputs_array_data);
micro::KernelRunner runner(registration, tensors, tensors_size, inputs_array,
outputs_array, /*builtin_data=*/nullptr,
micro_test::reporter);
outputs_array, /*builtin_data=*/nullptr);
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.InitAndPrepare());
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.Invoke());

View File

@ -54,9 +54,9 @@ void TestConcatenateTwoInputs(const int* input1_dims_data,
const TfLiteRegistration registration =
tflite::ops::micro::Register_CONCATENATION();
micro::KernelRunner runner(
registration, tensors, tensors_size, inputs_array, outputs_array,
reinterpret_cast<void*>(&builtin_data), micro_test::reporter);
micro::KernelRunner runner(registration, tensors, tensors_size, inputs_array,
outputs_array,
reinterpret_cast<void*>(&builtin_data));
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.InitAndPrepare());
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.Invoke());
@ -101,9 +101,9 @@ void TestConcatenateQuantizedTwoInputs(
const TfLiteRegistration registration =
tflite::ops::micro::Register_CONCATENATION();
micro::KernelRunner runner(
registration, tensors, tensors_size, inputs_array, outputs_array,
reinterpret_cast<void*>(&builtin_data), micro_test::reporter);
micro::KernelRunner runner(registration, tensors, tensors_size, inputs_array,
outputs_array,
reinterpret_cast<void*>(&builtin_data));
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.InitAndPrepare());
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.Invoke());

View File

@ -27,10 +27,9 @@ TfLiteStatus InvokeConv(TfLiteTensor* tensors, int tensors_size,
int outputs_array_data[] = {1, 3};
TfLiteIntArray* outputs_array = IntArrayFromInts(outputs_array_data);
tflite::MicroErrorReporter reporter;
micro::KernelRunner runner(
registration, tensors, tensors_size, inputs_array, outputs_array,
reinterpret_cast<TfLiteStatus*>(conv_params), &reporter);
micro::KernelRunner runner(registration, tensors, tensors_size, inputs_array,
outputs_array,
reinterpret_cast<TfLiteStatus*>(conv_params));
const char* init_data = reinterpret_cast<const char*>(conv_params);
TfLiteStatus status = runner.InitAndPrepare(init_data);

View File

@ -50,9 +50,9 @@ TfLiteStatus ValidateDepthwiseConvGoldens(
TfLiteIntArray* outputs_array = IntArrayFromInts(outputs_array_data);
const TfLiteRegistration registration = Register_DEPTHWISE_CONV_2D();
micro::KernelRunner runner(
registration, tensors, tensors_size, inputs_array, outputs_array,
reinterpret_cast<void*>(conv_params), micro_test::reporter);
micro::KernelRunner runner(registration, tensors, tensors_size, inputs_array,
outputs_array,
reinterpret_cast<void*>(conv_params));
int input_depth = tensors[0].dims->data[3];
int output_depth = tensors[1].dims->data[3];

View File

@ -36,7 +36,7 @@ void ValidateDequantizeGoldens(TfLiteTensor* tensors, int tensors_size,
tflite::ops::micro::Register_DEQUANTIZE();
micro::KernelRunner runner(registration, tensors, tensors_size, inputs_array,
outputs_array,
/*builtin_data=*/nullptr, micro_test::reporter);
/*builtin_data=*/nullptr);
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.InitAndPrepare());
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.Invoke());

View File

@ -162,7 +162,7 @@ void TestDetectionPostprocess(
TfLiteIntArray* outputs_array = IntArrayFromInts(outputs_array_data);
micro::KernelRunner runner(*registration, tensors, tensors_size, inputs_array,
outputs_array, nullptr, micro_test::reporter);
outputs_array, nullptr);
// Using generated data as input to operator.
int data_size = 0;

View File

@ -50,7 +50,7 @@ void TestElementwiseFloat(const TfLiteRegistration& registration,
micro::KernelRunner runner(registration, tensors, tensors_size, inputs_array,
outputs_array,
/*builtin_data=*/nullptr, micro_test::reporter);
/*builtin_data=*/nullptr);
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.InitAndPrepare());
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.Invoke());
@ -86,7 +86,7 @@ void TestElementwiseBool(const TfLiteRegistration& registration,
micro::KernelRunner runner(registration, tensors, tensors_size, inputs_array,
outputs_array,
/*builtin_data=*/nullptr, micro_test::reporter);
/*builtin_data=*/nullptr);
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.InitAndPrepare());
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.Invoke());

View File

@ -46,7 +46,7 @@ void TestExp(const int* input_dims_data, const float* input_data,
const TfLiteRegistration registration = Register_EXP();
micro::KernelRunner runner(registration, tensors, tensors_size, inputs_array,
outputs_array,
/*builtin_data=*/nullptr, micro_test::reporter);
/*builtin_data=*/nullptr);
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.InitAndPrepare());
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.Invoke());

View File

@ -45,8 +45,7 @@ void TestFloor(const int* input_dims_data, const float* input_data,
const TfLiteRegistration registration = tflite::ops::micro::Register_FLOOR();
micro::KernelRunner runner(registration, tensors, tensors_size, inputs_array,
outputs_array, /*builtin_data=*/nullptr,
micro_test::reporter);
outputs_array, /*builtin_data=*/nullptr);
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.InitAndPrepare());
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.Invoke());

View File

@ -240,9 +240,9 @@ TfLiteStatus ValidateFullyConnectedGoldens(
TfLiteIntArray* outputs_array = IntArrayFromInts(outputs_array_data);
const TfLiteRegistration registration = Register_FULLY_CONNECTED();
micro::KernelRunner runner(
registration, tensors, tensors_size, inputs_array, outputs_array,
reinterpret_cast<void*>(&builtin_data), micro_test::reporter);
micro::KernelRunner runner(registration, tensors, tensors_size, inputs_array,
outputs_array,
reinterpret_cast<void*>(&builtin_data));
TfLiteStatus status = runner.InitAndPrepare();
if (status != kTfLiteOk) {

View File

@ -108,8 +108,7 @@ void TestHardSwishQuantized(int size, const T* output_data,
const TfLiteRegistration registration =
tflite::ops::micro::Register_HARD_SWISH();
micro::KernelRunner runner(registration, tensors, tensors_size, inputs_array,
outputs_array, /*builtin_data=*/nullptr,
micro_test::reporter);
outputs_array, /*builtin_data=*/nullptr);
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.InitAndPrepare());
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.Invoke());
@ -188,8 +187,7 @@ void TestHardSwishQuantizedBias(const int size, const T* output_data,
const TfLiteRegistration registration =
tflite::ops::micro::Register_HARD_SWISH();
micro::KernelRunner runner(registration, tensors, tensors_size, inputs_array,
outputs_array, /*builtin_data=*/nullptr,
micro_test::reporter);
outputs_array, /*builtin_data=*/nullptr);
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.InitAndPrepare());
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.Invoke());
@ -241,8 +239,7 @@ void TestHardSwishFloat(const int size, float* output_data,
const TfLiteRegistration registration =
tflite::ops::micro::Register_HARD_SWISH();
micro::KernelRunner runner(registration, tensors, tensors_size, inputs_array,
outputs_array, /*builtin_data=*/nullptr,
micro_test::reporter);
outputs_array, /*builtin_data=*/nullptr);
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.InitAndPrepare());
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.Invoke());

View File

@ -15,6 +15,8 @@ limitations under the License.
#include "tensorflow/lite/micro/kernels/kernel_runner.h"
#include "tensorflow/lite/micro/micro_error_reporter.h"
namespace tflite {
namespace micro {
@ -30,12 +32,12 @@ uint8_t KernelRunner::kKernelRunnerBuffer_[];
KernelRunner::KernelRunner(const TfLiteRegistration& registration,
TfLiteTensor* tensors, int tensors_size,
TfLiteIntArray* inputs, TfLiteIntArray* outputs,
void* builtin_data, ErrorReporter* error_reporter)
: allocator_(SimpleMemoryAllocator::Create(
error_reporter, kKernelRunnerBuffer_, kKernelRunnerBufferSize_)),
void* builtin_data)
: allocator_(SimpleMemoryAllocator::Create(GetMicroErrorReporter(),
kKernelRunnerBuffer_,
kKernelRunnerBufferSize_)),
registration_(registration),
tensors_(tensors),
error_reporter_(error_reporter) {
tensors_(tensors) {
// Prepare TfLiteContext:
context_.impl_ = static_cast<void*>(this);
context_.ReportError = ReportOpError;
@ -65,8 +67,7 @@ TfLiteStatus KernelRunner::InitAndPrepare(const char* init_data,
TfLiteStatus KernelRunner::Invoke() {
if (registration_.invoke == nullptr) {
TF_LITE_REPORT_ERROR(error_reporter_,
"TfLiteRegistration missing invoke function pointer!");
MicroPrintf("TfLiteRegistration missing invoke function pointer!");
return kTfLiteError;
}
return registration_.invoke(&context_, &node_);
@ -119,10 +120,8 @@ TfLiteStatus KernelRunner::RequestScratchBufferInArena(TfLiteContext* context,
TFLITE_DCHECK(runner != nullptr);
if (runner->scratch_buffer_count_ == kNumScratchBuffers_) {
TF_LITE_REPORT_ERROR(
runner->error_reporter_,
"Exceeded the maximum number of scratch tensors allowed (%d).",
kNumScratchBuffers_);
MicroPrintf("Exceeded the maximum number of scratch tensors allowed (%d).",
kNumScratchBuffers_);
return kTfLiteError;
}
@ -152,13 +151,9 @@ void* KernelRunner::GetScratchBuffer(TfLiteContext* context, int buffer_index) {
void KernelRunner::ReportOpError(struct TfLiteContext* context,
const char* format, ...) {
TFLITE_DCHECK(context != nullptr);
KernelRunner* runner = reinterpret_cast<KernelRunner*>(context->impl_);
TFLITE_DCHECK(runner != nullptr);
va_list args;
va_start(args, format);
TF_LITE_REPORT_ERROR(runner->error_reporter_, format, args);
GetMicroErrorReporter()->Report(format, args);
va_end(args);
}

View File

@ -33,12 +33,10 @@ class KernelRunner {
public:
KernelRunner(const TfLiteRegistration& registration, TfLiteTensor* tensors,
int tensors_size, TfLiteIntArray* inputs,
TfLiteIntArray* outputs, void* builtin_data,
ErrorReporter* error_reporter);
TfLiteIntArray* outputs, void* builtin_data);
// Calls init and prepare on the kernel (i.e. TfLiteRegistration) struct. Any
// exceptions will be reported through the error_reporter and returned as a
// status code here.
// exceptions will be DebugLog'd and returned as a status code.
TfLiteStatus InitAndPrepare(const char* init_data = nullptr,
size_t length = 0);
@ -69,7 +67,6 @@ class KernelRunner {
SimpleMemoryAllocator* allocator_ = nullptr;
const TfLiteRegistration& registration_;
TfLiteTensor* tensors_ = nullptr;
ErrorReporter* error_reporter_ = nullptr;
TfLiteContext context_ = {};
TfLiteNode node_ = {};

View File

@ -77,9 +77,9 @@ void TestL2Normalization(const int* input_dims_data, const T* input_data,
const TfLiteRegistration registration =
ops::micro::Register_L2_NORMALIZATION();
micro::KernelRunner runner(
registration, tensors, tensors_size, inputs_array, outputs_array,
reinterpret_cast<void*>(&builtin_data), micro_test::reporter);
micro::KernelRunner runner(registration, tensors, tensors_size, inputs_array,
outputs_array,
reinterpret_cast<void*>(&builtin_data));
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.InitAndPrepare());
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.Invoke());

View File

@ -50,7 +50,7 @@ void TestLogicalOp(const TfLiteRegistration& registration,
micro::KernelRunner runner(registration, tensors, tensors_size, inputs_array,
outputs_array,
/*builtin_data=*/nullptr, micro_test::reporter);
/*builtin_data=*/nullptr);
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.InitAndPrepare());
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.Invoke());

View File

@ -58,7 +58,7 @@ void ValidateLogisticGoldens(TfLiteTensor* tensors, const int tensor_count,
const TfLiteRegistration registration =
tflite::ops::micro::Register_LOGISTIC();
micro::KernelRunner runner(registration, tensors, tensor_count, inputs_array,
outputs_array, nullptr, micro_test::reporter);
outputs_array, nullptr);
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.InitAndPrepare());
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.Invoke());

View File

@ -50,7 +50,7 @@ void TestMaxMinFloat(const TfLiteRegistration& registration,
micro::KernelRunner runner(registration, tensors, tensors_size, inputs_array,
outputs_array,
/*builtin_data=*/nullptr, micro_test::reporter);
/*builtin_data=*/nullptr);
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.InitAndPrepare());
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.Invoke());
@ -94,7 +94,7 @@ void TestMaxMinQuantized(const TfLiteRegistration& registration,
micro::KernelRunner runner(registration, tensors, tensors_size, inputs_array,
outputs_array,
/*builtin_data=*/nullptr, micro_test::reporter);
/*builtin_data=*/nullptr);
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.InitAndPrepare());
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.Invoke());
@ -130,7 +130,7 @@ void TestMaxMinQuantizedInt32(
micro::KernelRunner runner(registration, tensors, tensors_size, inputs_array,
outputs_array,
/*builtin_data=*/nullptr, micro_test::reporter);
/*builtin_data=*/nullptr);
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.InitAndPrepare());
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.Invoke());

View File

@ -55,9 +55,9 @@ void ValidateMulGoldens(TfLiteTensor* tensors, int tensors_size,
TfLiteIntArray* outputs_array = IntArrayFromInts(outputs_array_data);
const TfLiteRegistration registration = tflite::ops::micro::Register_MUL();
micro::KernelRunner runner(
registration, tensors, tensors_size, inputs_array, outputs_array,
reinterpret_cast<void*>(&builtin_data), micro_test::reporter);
micro::KernelRunner runner(registration, tensors, tensors_size, inputs_array,
outputs_array,
reinterpret_cast<void*>(&builtin_data));
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.InitAndPrepare());
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.Invoke());

View File

@ -46,7 +46,7 @@ void TestNegFloat(const int* input_dims_data, const float* input_data,
const TfLiteRegistration registration = tflite::ops::micro::Register_NEG();
micro::KernelRunner runner(registration, tensors, tensors_size, inputs_array,
outputs_array,
/*builtin_data=*/nullptr, micro_test::reporter);
/*builtin_data=*/nullptr);
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.InitAndPrepare());
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.Invoke());

View File

@ -35,8 +35,7 @@ void ValidatePackGoldens(TfLiteTensor* tensors, int tensors_size,
const TfLiteRegistration registration = tflite::ops::micro::Register_PACK();
micro::KernelRunner runner(registration, tensors, tensors_size, inputs_array,
outputs_array, reinterpret_cast<void*>(&params),
micro_test::reporter);
outputs_array, reinterpret_cast<void*>(&params));
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.InitAndPrepare());
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.Invoke());

View File

@ -36,7 +36,7 @@ TfLiteStatus ValidatePadGoldens(TfLiteTensor* tensors, int tensors_size,
const TfLiteRegistration registration = tflite::ops::micro::Register_PAD();
micro::KernelRunner runner(registration, tensors, tensors_size, inputs_array,
outputs_array,
/*builtin_data=*/nullptr, micro_test::reporter);
/*builtin_data=*/nullptr);
// Prepare should catch dimension mismatches.
TfLiteStatus prepare_status = runner.InitAndPrepare();
@ -68,7 +68,7 @@ TfLiteStatus ValidatePadV2Goldens(TfLiteTensor* tensors, int tensors_size,
const TfLiteRegistration registration = tflite::ops::micro::Register_PADV2();
micro::KernelRunner runner(registration, tensors, tensors_size, inputs_array,
outputs_array,
/*builtin_data=*/nullptr, micro_test::reporter);
/*builtin_data=*/nullptr);
// Prepare should catch dimension mismatches.
TfLiteStatus prepare_status = runner.InitAndPrepare();

View File

@ -46,9 +46,9 @@ void ValidatePoolingGoldens(TfLiteTensor* tensors, int tensors_size,
activation,
{}};
micro::KernelRunner runner(
registration, tensors, tensors_size, inputs_array, outputs_array,
reinterpret_cast<void*>(&builtin_data), micro_test::reporter);
micro::KernelRunner runner(registration, tensors, tensors_size, inputs_array,
outputs_array,
reinterpret_cast<void*>(&builtin_data));
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.InitAndPrepare());
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.Invoke());

View File

@ -35,7 +35,7 @@ void ValidatePreluGoldens(TfLiteTensor* tensors, int tensors_size,
const TfLiteRegistration registration = tflite::ops::micro::Register_PRELU();
micro::KernelRunner runner(registration, tensors, tensors_size, inputs_array,
outputs_array,
/*builtin_data=*/nullptr, micro_test::reporter);
/*builtin_data=*/nullptr);
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.InitAndPrepare());
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.Invoke());

View File

@ -37,7 +37,7 @@ void ValidateQuantizeGoldens(TfLiteTensor* tensors, int tensors_size,
const TfLiteRegistration registration = Register_QUANTIZE();
micro::KernelRunner runner(registration, tensors, tensors_size, inputs_array,
outputs_array,
/*builtin_data=*/nullptr, micro_test::reporter);
/*builtin_data=*/nullptr);
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.InitAndPrepare());
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.Invoke());

View File

@ -77,7 +77,7 @@ TfLiteStatus ValidateReduceGoldens(TfLiteTensor* tensors, int tensors_size,
TfLiteIntArray* outputs_array = IntArrayFromInts(outputs_array_data);
micro::KernelRunner runner(registration, tensors, tensors_size, inputs_array,
outputs_array, params, micro_test::reporter);
outputs_array, params);
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.InitAndPrepare());
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.Invoke());

View File

@ -40,7 +40,7 @@ void ValidateReshapeGoldens(
tflite::ops::micro::Register_RESHAPE();
micro::KernelRunner runner(registration, tensors, tensors_size, inputs_array,
outputs_array,
/*builtin_data=*/nullptr, micro_test::reporter);
/*builtin_data=*/nullptr);
if (expect_failure) {
TF_LITE_MICRO_EXPECT_NE(kTfLiteOk, runner.InitAndPrepare());

View File

@ -75,8 +75,7 @@ void TestResizeNearestNeighbor(const int* input_dims_data, const T* input_data,
const TfLiteRegistration registration =
tflite::ops::micro::Register_RESIZE_NEAREST_NEIGHBOR();
micro::KernelRunner runner(registration, tensors, tensors_size, inputs_array,
outputs_array, &builtin_data,
micro_test::reporter);
outputs_array, &builtin_data);
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.InitAndPrepare());
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.Invoke());

View File

@ -44,7 +44,7 @@ void TestRound(const int* input_dims_data, const float* input_data,
const TfLiteRegistration registration = tflite::ops::micro::Register_ROUND();
micro::KernelRunner runner(registration, tensors, tensors_size, inputs_array,
outputs_array, nullptr, micro_test::reporter);
outputs_array, nullptr);
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.InitAndPrepare());
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.Invoke());

View File

@ -34,7 +34,7 @@ void ValidateShape(TfLiteTensor* tensors, const int tensor_count,
const TfLiteRegistration registration = tflite::Register_SHAPE();
micro::KernelRunner runner(registration, tensors, tensor_count, inputs_array,
outputs_array, nullptr, micro_test::reporter);
outputs_array, nullptr);
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.InitAndPrepare());
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.Invoke());

View File

@ -263,8 +263,7 @@ void ValidateSoftmaxGoldens(TfLiteTensor* tensors, const int tensor_count,
const TfLiteRegistration registration = Register_SOFTMAX();
micro::KernelRunner runner(registration, tensors, tensor_count, inputs_array,
outputs_array, &builtin_data,
micro_test::reporter);
outputs_array, &builtin_data);
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.InitAndPrepare());
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.Invoke());

View File

@ -48,7 +48,7 @@ TfLiteStatus ValidateSpaceToBatchNdGoldens(TfLiteTensor* tensors,
const TfLiteRegistration registration = Register_SPACE_TO_BATCH_ND();
micro::KernelRunner runner(registration, tensors, tensors_size, inputs_array,
outputs_array, nullptr, micro_test::reporter);
outputs_array, nullptr);
TF_LITE_ENSURE_STATUS(runner.InitAndPrepare());
TF_LITE_ENSURE_STATUS(runner.Invoke());

View File

@ -64,7 +64,7 @@ void TestSplitTwoOutputsFloat(
const TfLiteRegistration registration = tflite::ops::micro::Register_SPLIT();
micro::KernelRunner runner(registration, tensors, tensors_size, inputs_array,
outputs_array, nullptr, micro_test::reporter);
outputs_array, nullptr);
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.InitAndPrepare());
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.Invoke());
@ -133,7 +133,7 @@ void TestSplitFourOutputsFloat(
const TfLiteRegistration registration = tflite::ops::micro::Register_SPLIT();
micro::KernelRunner runner(registration, tensors, tensors_size, inputs_array,
outputs_array, nullptr, micro_test::reporter);
outputs_array, nullptr);
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.InitAndPrepare());
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.Invoke());
@ -194,7 +194,7 @@ void TestSplitTwoOutputsQuantized(
const TfLiteRegistration registration = tflite::ops::micro::Register_SPLIT();
micro::KernelRunner runner(registration, tensors, tensors_size, inputs_array,
outputs_array, nullptr, micro_test::reporter);
outputs_array, nullptr);
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.InitAndPrepare());
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.Invoke());
@ -249,7 +249,7 @@ void TestSplitTwoOutputsQuantized32(
const TfLiteRegistration registration = tflite::ops::micro::Register_SPLIT();
micro::KernelRunner runner(registration, tensors, tensors_size, inputs_array,
outputs_array, nullptr, micro_test::reporter);
outputs_array, nullptr);
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.InitAndPrepare());
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.Invoke());

View File

@ -84,7 +84,7 @@ void TestSplitVFloat(const int* input_dims_data, const float* input_data,
const TfLiteRegistration registration =
tflite::ops::micro::Register_SPLIT_V();
micro::KernelRunner runner(registration, tensors, tensors_size, inputs_array,
outputs_array, nullptr, micro_test::reporter);
outputs_array, nullptr);
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.InitAndPrepare());
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.Invoke());

View File

@ -39,8 +39,7 @@ void ValidateStridedSliceGoldens(TfLiteTensor* tensors, int tensors_size,
const TfLiteRegistration registration =
tflite::ops::micro::Register_STRIDED_SLICE();
micro::KernelRunner runner(registration, tensors, tensors_size, inputs_array,
outputs_array, reinterpret_cast<void*>(params),
micro_test::reporter);
outputs_array, reinterpret_cast<void*>(params));
if (expect_prepare_err) {
TF_LITE_MICRO_EXPECT_EQ(kTfLiteError, runner.InitAndPrepare());
return;

View File

@ -76,8 +76,7 @@ void ValidateSubGoldens(TfLiteTensor* tensors, int tensors_size,
const TfLiteRegistration registration = tflite::ops::micro::Register_SUB();
micro::KernelRunner runner(registration, tensors, tensors_size, inputs_array,
outputs_array, &builtin_data,
micro_test::reporter);
outputs_array, &builtin_data);
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.InitAndPrepare());
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.Invoke());

View File

@ -499,7 +499,7 @@ void ValidateSVDFGoldens(const int batch_size, const int num_units,
const TfLiteRegistration registration = Register_SVDF();
micro::KernelRunner runner(registration, tensors, tensor_count, inputs_array,
outputs_array, &params, micro_test::reporter);
outputs_array, &params);
TfLiteStatus init_and_prepare_status = runner.InitAndPrepare();
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, init_and_prepare_status);

View File

@ -88,8 +88,7 @@ void TestTanhFloat(const int input_dims_data[], const float* input_data,
const TfLiteRegistration registration = tflite::ops::micro::Register_TANH();
micro::KernelRunner runner(registration, tensors, tensors_size, inputs_array,
outputs_array, /*builtin_data=*/nullptr,
micro_test::reporter);
outputs_array, /*builtin_data=*/nullptr);
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.InitAndPrepare());
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.Invoke());
@ -132,8 +131,7 @@ void TestTanhQuantized(const int input_dims_data[], const float* input_data,
const TfLiteRegistration registration = tflite::ops::micro::Register_TANH();
micro::KernelRunner runner(registration, tensors, tensors_size, inputs_array,
outputs_array, /*builtin_data=*/nullptr,
micro_test::reporter);
outputs_array, /*builtin_data=*/nullptr);
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.InitAndPrepare());
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.Invoke());

View File

@ -70,9 +70,9 @@ void TestUnpackThreeOutputsFloat(
TfLiteIntArray* outputs_array = IntArrayFromInts(outputs_array_data);
const TfLiteRegistration registration = tflite::ops::micro::Register_UNPACK();
micro::KernelRunner runner(
registration, tensors, tensors_size, inputs_array, outputs_array,
reinterpret_cast<void*>(&builtin_data), micro_test::reporter);
micro::KernelRunner runner(registration, tensors, tensors_size, inputs_array,
outputs_array,
reinterpret_cast<void*>(&builtin_data));
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.InitAndPrepare());
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.Invoke());
@ -121,9 +121,9 @@ void TestUnpackOneOutputFloat(const int* input_dims_data,
TfLiteIntArray* outputs_array = IntArrayFromInts(outputs_array_data);
const TfLiteRegistration registration = tflite::ops::micro::Register_UNPACK();
micro::KernelRunner runner(
registration, tensors, tensors_size, inputs_array, outputs_array,
reinterpret_cast<void*>(&builtin_data), micro_test::reporter);
micro::KernelRunner runner(registration, tensors, tensors_size, inputs_array,
outputs_array,
reinterpret_cast<void*>(&builtin_data));
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.InitAndPrepare());
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.Invoke());
@ -183,9 +183,9 @@ void TestUnpackThreeOutputsQuantized(
TfLiteIntArray* outputs_array = IntArrayFromInts(outputs_array_data);
const TfLiteRegistration registration = tflite::ops::micro::Register_UNPACK();
micro::KernelRunner runner(
registration, tensors, tensors_size, inputs_array, outputs_array,
reinterpret_cast<void*>(&builtin_data), micro_test::reporter);
micro::KernelRunner runner(registration, tensors, tensors_size, inputs_array,
outputs_array,
reinterpret_cast<void*>(&builtin_data));
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.InitAndPrepare());
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.Invoke());
@ -250,9 +250,9 @@ void TestUnpackThreeOutputsQuantized32(
TfLiteIntArray* outputs_array = IntArrayFromInts(outputs_array_data);
const TfLiteRegistration registration = tflite::ops::micro::Register_UNPACK();
micro::KernelRunner runner(
registration, tensors, tensors_size, inputs_array, outputs_array,
reinterpret_cast<void*>(&builtin_data), micro_test::reporter);
micro::KernelRunner runner(registration, tensors, tensors_size, inputs_array,
outputs_array,
reinterpret_cast<void*>(&builtin_data));
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.InitAndPrepare());
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.Invoke());

View File

@ -45,7 +45,7 @@ void TestZerosLikeFloat(const int* input_dims_data, const float* input_data,
const TfLiteRegistration registration = Register_ZEROS_LIKE();
micro::KernelRunner runner(registration, tensors, tensors_size, inputs_array,
outputs_array,
/*builtin_data=*/nullptr, micro_test::reporter);
/*builtin_data=*/nullptr);
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.InitAndPrepare());
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.Invoke());
@ -77,7 +77,7 @@ void TestZerosLikeInt32(const int* input_dims_data, const int32_t* input_data,
const TfLiteRegistration registration = Register_ZEROS_LIKE();
micro::KernelRunner runner(registration, tensors, tensors_size, inputs_array,
outputs_array,
/*builtin_data=*/nullptr, micro_test::reporter);
/*builtin_data=*/nullptr);
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.InitAndPrepare());
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.Invoke());
@ -109,7 +109,7 @@ void TestZerosLikeInt64(const int* input_dims_data, const int64_t* input_data,
const TfLiteRegistration registration = Register_ZEROS_LIKE();
micro::KernelRunner runner(registration, tensors, tensors_size, inputs_array,
outputs_array,
/*builtin_data=*/nullptr, micro_test::reporter);
/*builtin_data=*/nullptr);
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.InitAndPrepare());
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.Invoke());

View File

@ -17,6 +17,7 @@ limitations under the License.
#include "tensorflow/lite/micro/all_ops_resolver.h"
#include "tensorflow/lite/micro/benchmarks/keyword_scrambled_model_data.h"
#include "tensorflow/lite/micro/micro_error_reporter.h"
#include "tensorflow/lite/micro/recording_micro_allocator.h"
#include "tensorflow/lite/micro/recording_micro_interpreter.h"
#include "tensorflow/lite/micro/testing/micro_test.h"
@ -108,7 +109,7 @@ void EnsureAllocatedSizeThreshold(const char* allocation_type, size_t actual,
TF_LITE_MICRO_EXPECT_NEAR(actual, expected,
expected * kAllocationThreshold);
if (actual != expected) {
TF_LITE_REPORT_ERROR(micro_test::reporter,
TF_LITE_REPORT_ERROR(tflite::GetMicroErrorReporter(),
"%s threshold failed: %d != %d", allocation_type,
actual, expected);
}
@ -201,7 +202,7 @@ TF_LITE_MICRO_TEST(TestKeywordModelMemoryThreshold) {
tflite::RecordingMicroInterpreter interpreter(
tflite::GetModel(g_keyword_scrambled_model_data), all_ops_resolver,
keyword_model_tensor_arena, kKeywordModelTensorArenaSize,
micro_test::reporter);
tflite::GetMicroErrorReporter());
interpreter.AllocateTensors();
@ -229,7 +230,8 @@ TF_LITE_MICRO_TEST(TestConvModelMemoryThreshold) {
tflite::AllOpsResolver all_ops_resolver;
tflite::RecordingMicroInterpreter interpreter(
tflite::GetModel(kTestConvModelData), all_ops_resolver,
test_conv_tensor_arena, kTestConvModelArenaSize, micro_test::reporter);
test_conv_tensor_arena, kTestConvModelArenaSize,
tflite::GetMicroErrorReporter());
interpreter.AllocateTensors();

View File

@ -15,6 +15,7 @@ limitations under the License.
#include "tensorflow/lite/micro/memory_helpers.h"
#include "tensorflow/lite/micro/micro_error_reporter.h"
#include "tensorflow/lite/micro/test_helpers.h"
#include "tensorflow/lite/micro/testing/micro_test.h"
@ -172,17 +173,17 @@ TF_LITE_MICRO_TEST(TestBytesRequiredForTensor) {
tflite::testing::Create1dFlatbufferTensor(100);
size_t bytes;
size_t type_size;
TF_LITE_MICRO_EXPECT_EQ(
kTfLiteOk, tflite::BytesRequiredForTensor(*tensor100, &bytes, &type_size,
micro_test::reporter));
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, tflite::BytesRequiredForTensor(
*tensor100, &bytes, &type_size,
tflite::GetMicroErrorReporter()));
TF_LITE_MICRO_EXPECT_EQ(static_cast<size_t>(400), bytes);
TF_LITE_MICRO_EXPECT_EQ(static_cast<size_t>(4), type_size);
const tflite::Tensor* tensor200 =
tflite::testing::Create1dFlatbufferTensor(200);
TF_LITE_MICRO_EXPECT_EQ(
kTfLiteOk, tflite::BytesRequiredForTensor(*tensor200, &bytes, &type_size,
micro_test::reporter));
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, tflite::BytesRequiredForTensor(
*tensor200, &bytes, &type_size,
tflite::GetMicroErrorReporter()));
TF_LITE_MICRO_EXPECT_EQ(static_cast<size_t>(800), bytes);
TF_LITE_MICRO_EXPECT_EQ(static_cast<size_t>(4), type_size);
}

View File

@ -38,6 +38,7 @@ cc_library(
":memory_planner",
"//tensorflow/lite/c:common",
"//tensorflow/lite/micro:micro_compatibility",
"//tensorflow/lite/micro:micro_error_reporter",
],
)

View File

@ -15,6 +15,7 @@ limitations under the License.
#include "tensorflow/lite/micro/memory_planner/greedy_memory_planner.h"
#include "tensorflow/lite/micro/micro_error_reporter.h"
#include "tensorflow/lite/micro/testing/micro_test.h"
namespace tflite {

View File

@ -15,6 +15,7 @@ limitations under the License.
#include "tensorflow/lite/micro/memory_planner/linear_memory_planner.h"
#include "tensorflow/lite/micro/micro_error_reporter.h"
#include "tensorflow/lite/micro/testing/micro_test.h"
TF_LITE_MICRO_TESTS_BEGIN

View File

@ -18,6 +18,7 @@ limitations under the License.
#include <cstdint>
#include "tensorflow/lite/micro/memory_helpers.h"
#include "tensorflow/lite/micro/micro_error_reporter.h"
#include "tensorflow/lite/micro/simple_memory_allocator.h"
#include "tensorflow/lite/micro/test_helpers.h"
#include "tensorflow/lite/micro/testing/micro_test.h"
@ -126,8 +127,8 @@ TF_LITE_MICRO_TEST(TestInitializeRuntimeTensor) {
constexpr size_t arena_size = 1024;
uint8_t arena[arena_size];
tflite::SimpleMemoryAllocator* simple_allocator =
tflite::SimpleMemoryAllocator::Create(micro_test::reporter, arena,
arena_size);
tflite::SimpleMemoryAllocator::Create(tflite::GetMicroErrorReporter(),
arena, arena_size);
const tflite::Tensor* tensor = tflite::testing::Create1dFlatbufferTensor(100);
const flatbuffers::Vector<flatbuffers::Offset<tflite::Buffer>>* buffers =
@ -135,9 +136,10 @@ TF_LITE_MICRO_TEST(TestInitializeRuntimeTensor) {
TfLiteTensor allocated_tensor;
TF_LITE_MICRO_EXPECT_EQ(
kTfLiteOk, tflite::internal::InitializeTfLiteTensorFromFlatbuffer(
simple_allocator, /*allocate_temp=*/false, *tensor,
buffers, micro_test::reporter, &allocated_tensor));
kTfLiteOk,
tflite::internal::InitializeTfLiteTensorFromFlatbuffer(
simple_allocator, /*allocate_temp=*/false, *tensor, buffers,
tflite::GetMicroErrorReporter(), &allocated_tensor));
TF_LITE_MICRO_EXPECT_EQ(kTfLiteInt32, allocated_tensor.type);
TF_LITE_MICRO_EXPECT_EQ(1, allocated_tensor.dims->size);
TF_LITE_MICRO_EXPECT_EQ(100, allocated_tensor.dims->data[0]);
@ -155,8 +157,8 @@ TF_LITE_MICRO_TEST(TestInitializeTempRuntimeTensor) {
constexpr size_t arena_size = 1024;
uint8_t arena[arena_size];
tflite::SimpleMemoryAllocator* simple_allocator =
tflite::SimpleMemoryAllocator::Create(micro_test::reporter, arena,
arena_size);
tflite::SimpleMemoryAllocator::Create(tflite::GetMicroErrorReporter(),
arena, arena_size);
const tflite::Tensor* tensor = tflite::testing::Create1dFlatbufferTensor(100);
const flatbuffers::Vector<flatbuffers::Offset<tflite::Buffer>>* buffers =
@ -166,7 +168,7 @@ TF_LITE_MICRO_TEST(TestInitializeTempRuntimeTensor) {
TF_LITE_MICRO_EXPECT_EQ(
kTfLiteOk, tflite::internal::InitializeTfLiteTensorFromFlatbuffer(
simple_allocator, /*allocate_temp=*/true, *tensor, buffers,
micro_test::reporter, &allocated_temp_tensor));
tflite::GetMicroErrorReporter(), &allocated_temp_tensor));
TF_LITE_MICRO_EXPECT_EQ(kTfLiteInt32, allocated_temp_tensor.type);
TF_LITE_MICRO_EXPECT_EQ(1, allocated_temp_tensor.dims->size);
TF_LITE_MICRO_EXPECT_EQ(100, allocated_temp_tensor.dims->data[0]);
@ -183,8 +185,8 @@ TF_LITE_MICRO_TEST(TestInitializeQuantizedTensor) {
constexpr size_t arena_size = 1024;
uint8_t arena[arena_size];
tflite::SimpleMemoryAllocator* simple_allocator =
tflite::SimpleMemoryAllocator::Create(micro_test::reporter, arena,
arena_size);
tflite::SimpleMemoryAllocator::Create(tflite::GetMicroErrorReporter(),
arena, arena_size);
const tflite::Tensor* tensor =
tflite::testing::CreateQuantizedFlatbufferTensor(100);
@ -193,9 +195,10 @@ TF_LITE_MICRO_TEST(TestInitializeQuantizedTensor) {
TfLiteTensor allocated_tensor;
TF_LITE_MICRO_EXPECT_EQ(
kTfLiteOk, tflite::internal::InitializeTfLiteTensorFromFlatbuffer(
simple_allocator, /*allocate_temp=*/false, *tensor,
buffers, micro_test::reporter, &allocated_tensor));
kTfLiteOk,
tflite::internal::InitializeTfLiteTensorFromFlatbuffer(
simple_allocator, /*allocate_temp=*/false, *tensor, buffers,
tflite::GetMicroErrorReporter(), &allocated_tensor));
TF_LITE_MICRO_EXPECT_EQ(kTfLiteInt32, allocated_tensor.type);
TF_LITE_MICRO_EXPECT_EQ(1, allocated_tensor.dims->size);
TF_LITE_MICRO_EXPECT_EQ(100, allocated_tensor.dims->data[0]);
@ -210,8 +213,8 @@ TF_LITE_MICRO_TEST(TestMissingQuantization) {
constexpr size_t arena_size = 1024;
uint8_t arena[arena_size];
tflite::SimpleMemoryAllocator* simple_allocator =
tflite::SimpleMemoryAllocator::Create(micro_test::reporter, arena,
arena_size);
tflite::SimpleMemoryAllocator::Create(tflite::GetMicroErrorReporter(),
arena, arena_size);
const tflite::Tensor* tensor =
tflite::testing::CreateMissingQuantizationFlatbufferTensor(100);
@ -220,9 +223,10 @@ TF_LITE_MICRO_TEST(TestMissingQuantization) {
TfLiteTensor allocated_tensor;
TF_LITE_MICRO_EXPECT_EQ(
kTfLiteOk, tflite::internal::InitializeTfLiteTensorFromFlatbuffer(
simple_allocator, /*allocate_temp=*/false, *tensor,
buffers, micro_test::reporter, &allocated_tensor));
kTfLiteOk,
tflite::internal::InitializeTfLiteTensorFromFlatbuffer(
simple_allocator, /*allocate_temp=*/false, *tensor, buffers,
tflite::GetMicroErrorReporter(), &allocated_tensor));
TF_LITE_MICRO_EXPECT_EQ(kTfLiteInt32, allocated_tensor.type);
TF_LITE_MICRO_EXPECT_EQ(1, allocated_tensor.dims->size);
TF_LITE_MICRO_EXPECT_EQ(100, allocated_tensor.dims->data[0]);
@ -237,8 +241,8 @@ TF_LITE_MICRO_TEST(TestFailsWhenModelStartsTwice) {
tflite::NodeAndRegistration* node_and_registration;
constexpr size_t arena_size = 1024;
uint8_t arena[arena_size];
tflite::MicroAllocator* allocator =
tflite::MicroAllocator::Create(arena, arena_size, micro_test::reporter);
tflite::MicroAllocator* allocator = tflite::MicroAllocator::Create(
arena, arena_size, tflite::GetMicroErrorReporter());
TF_LITE_MICRO_EXPECT(nullptr != allocator);
TF_LITE_MICRO_EXPECT_EQ(
kTfLiteOk,
@ -258,8 +262,8 @@ TF_LITE_MICRO_TEST(TestFailsWithWrongSequence) {
tflite::NodeAndRegistration* node_and_registration;
constexpr size_t arena_size = 1024;
uint8_t arena[arena_size];
tflite::MicroAllocator* allocator =
tflite::MicroAllocator::Create(arena, arena_size, micro_test::reporter);
tflite::MicroAllocator* allocator = tflite::MicroAllocator::Create(
arena, arena_size, tflite::GetMicroErrorReporter());
TF_LITE_MICRO_EXPECT_NE(nullptr, allocator);
// We can't finish allocation before it ever got started.
@ -286,8 +290,8 @@ TF_LITE_MICRO_TEST(TestMockModelAllocation) {
tflite::NodeAndRegistration* node_and_registration;
constexpr size_t arena_size = 1024;
uint8_t arena[arena_size];
tflite::MicroAllocator* allocator =
tflite::MicroAllocator::Create(arena, arena_size, micro_test::reporter);
tflite::MicroAllocator* allocator = tflite::MicroAllocator::Create(
arena, arena_size, tflite::GetMicroErrorReporter());
TF_LITE_MICRO_EXPECT(nullptr != allocator);
TF_LITE_MICRO_EXPECT_EQ(
kTfLiteOk,
@ -326,8 +330,8 @@ TF_LITE_MICRO_TEST(TestMultiTenantAllocation) {
// Create a shared allocator.
constexpr size_t arena_size = 4096;
uint8_t arena[arena_size];
tflite::MicroAllocator* allocator =
tflite::MicroAllocator::Create(arena, arena_size, micro_test::reporter);
tflite::MicroAllocator* allocator = tflite::MicroAllocator::Create(
arena, arena_size, tflite::GetMicroErrorReporter());
TF_LITE_MICRO_EXPECT_NE(nullptr, allocator);
TfLiteEvalTensor* eval_tensors = nullptr;
tflite::ScratchBufferHandle* scratch_buffer_handles = nullptr;
@ -369,8 +373,8 @@ TF_LITE_MICRO_TEST(TestAllocationForModelsWithBranches) {
tflite::NodeAndRegistration* node_and_registration;
constexpr size_t arena_size = 4096;
uint8_t arena[arena_size];
tflite::MicroAllocator* allocator =
tflite::MicroAllocator::Create(arena, arena_size, micro_test::reporter);
tflite::MicroAllocator* allocator = tflite::MicroAllocator::Create(
arena, arena_size, tflite::GetMicroErrorReporter());
TF_LITE_MICRO_EXPECT_NE(nullptr, allocator);
TF_LITE_MICRO_EXPECT_EQ(
kTfLiteOk,
@ -410,8 +414,8 @@ TF_LITE_MICRO_TEST(TestAllocationForComplexModelAllocation) {
tflite::NodeAndRegistration* node_and_registration;
constexpr size_t arena_size = 2048;
uint8_t arena[arena_size];
tflite::MicroAllocator* allocator =
tflite::MicroAllocator::Create(arena, arena_size, micro_test::reporter);
tflite::MicroAllocator* allocator = tflite::MicroAllocator::Create(
arena, arena_size, tflite::GetMicroErrorReporter());
TF_LITE_MICRO_EXPECT(nullptr != allocator);
TF_LITE_MICRO_EXPECT_EQ(
kTfLiteOk,
@ -485,8 +489,8 @@ TF_LITE_MICRO_TEST(OfflinePlannerBranchesAllOnline) {
constexpr size_t arena_size = 4096;
uint8_t arena[arena_size];
tflite::MicroAllocator* allocator =
tflite::MicroAllocator::Create(arena, arena_size, micro_test::reporter);
tflite::MicroAllocator* allocator = tflite::MicroAllocator::Create(
arena, arena_size, tflite::GetMicroErrorReporter());
TF_LITE_MICRO_EXPECT_EQ(
kTfLiteOk,
@ -537,8 +541,8 @@ TF_LITE_MICRO_TEST(OfflinePlannerBasic) {
tflite::ScratchBufferHandle* scratch_buffer_handles = nullptr;
constexpr size_t arena_size = 4096;
uint8_t arena[arena_size];
tflite::MicroAllocator* allocator =
tflite::MicroAllocator::Create(arena, arena_size, micro_test::reporter);
tflite::MicroAllocator* allocator = tflite::MicroAllocator::Create(
arena, arena_size, tflite::GetMicroErrorReporter());
TF_LITE_MICRO_EXPECT_EQ(
kTfLiteOk,
@ -583,8 +587,8 @@ TF_LITE_MICRO_TEST(OfflinePlannerOverlappingAllocation) {
tflite::ScratchBufferHandle* scratch_buffer_handles = nullptr;
constexpr size_t arena_size = 4096;
uint8_t arena[arena_size];
tflite::MicroAllocator* allocator =
tflite::MicroAllocator::Create(arena, arena_size, micro_test::reporter);
tflite::MicroAllocator* allocator = tflite::MicroAllocator::Create(
arena, arena_size, tflite::GetMicroErrorReporter());
TF_LITE_MICRO_EXPECT_EQ(
kTfLiteOk,
@ -635,8 +639,8 @@ TF_LITE_MICRO_TEST(OfflinePlannerOfflineOnline) {
tflite::ScratchBufferHandle* scratch_buffer_handles = nullptr;
constexpr size_t arena_size = 4096;
uint8_t arena[arena_size];
tflite::MicroAllocator* allocator =
tflite::MicroAllocator::Create(arena, arena_size, micro_test::reporter);
tflite::MicroAllocator* allocator = tflite::MicroAllocator::Create(
arena, arena_size, tflite::GetMicroErrorReporter());
TF_LITE_MICRO_EXPECT_EQ(
kTfLiteOk,
@ -658,8 +662,8 @@ TF_LITE_MICRO_TEST(TestAllocatePersistentTfLiteTensor) {
const tflite::Model* model = tflite::GetModel(kTestConvModelData);
constexpr size_t arena_size = 1024 * 12;
uint8_t arena[arena_size];
tflite::MicroAllocator* allocator =
tflite::MicroAllocator::Create(arena, arena_size, micro_test::reporter);
tflite::MicroAllocator* allocator = tflite::MicroAllocator::Create(
arena, arena_size, tflite::GetMicroErrorReporter());
TF_LITE_MICRO_EXPECT_NE(allocator, nullptr);
TfLiteTensor* tensor1 = allocator->AllocatePersistentTfLiteTensor(
@ -683,8 +687,8 @@ TF_LITE_MICRO_TEST(TestAllocateSingleTempTfLiteTensor) {
const tflite::Model* model = tflite::testing::GetSimpleMockModel();
constexpr size_t arena_size = 1024;
uint8_t arena[arena_size];
tflite::MicroAllocator* allocator =
tflite::MicroAllocator::Create(arena, arena_size, micro_test::reporter);
tflite::MicroAllocator* allocator = tflite::MicroAllocator::Create(
arena, arena_size, tflite::GetMicroErrorReporter());
TF_LITE_MICRO_EXPECT_NE(allocator, nullptr);
TfLiteTensor* tensor1 = allocator->AllocateTempTfLiteTensor(
@ -696,8 +700,8 @@ TF_LITE_MICRO_TEST(TestAllocateChainOfTfLiteTensor) {
const tflite::Model* model = tflite::testing::GetSimpleMockModel();
constexpr size_t arena_size = 1024;
uint8_t arena[arena_size];
tflite::MicroAllocator* allocator =
tflite::MicroAllocator::Create(arena, arena_size, micro_test::reporter);
tflite::MicroAllocator* allocator = tflite::MicroAllocator::Create(
arena, arena_size, tflite::GetMicroErrorReporter());
TF_LITE_MICRO_EXPECT_NE(allocator, nullptr);
TfLiteTensor* tensor1 = allocator->AllocateTempTfLiteTensor(
@ -717,8 +721,8 @@ TF_LITE_MICRO_TEST(TestAllocateTfLiteTensorWithReset) {
const tflite::Model* model = tflite::testing::GetSimpleMockModel();
constexpr size_t arena_size = 1024;
uint8_t arena[arena_size];
tflite::MicroAllocator* allocator =
tflite::MicroAllocator::Create(arena, arena_size, micro_test::reporter);
tflite::MicroAllocator* allocator = tflite::MicroAllocator::Create(
arena, arena_size, tflite::GetMicroErrorReporter());
TF_LITE_MICRO_EXPECT(allocator != nullptr);
TfLiteTensor* tensor1 = allocator->AllocateTempTfLiteTensor(
@ -770,8 +774,8 @@ TF_LITE_MICRO_TEST(TestOperatorInputsNotInSubgraphInputs) {
tflite::ScratchBufferHandle* scratch_buffer_handles = nullptr;
constexpr size_t arena_size = 4096;
uint8_t arena[arena_size];
tflite::MicroAllocator* allocator =
tflite::MicroAllocator::Create(arena, arena_size, micro_test::reporter);
tflite::MicroAllocator* allocator = tflite::MicroAllocator::Create(
arena, arena_size, tflite::GetMicroErrorReporter());
TF_LITE_MICRO_EXPECT_EQ(
kTfLiteOk,
@ -826,8 +830,8 @@ TF_LITE_MICRO_TEST(TestTypicalFirstOpAndSecondOpWithScratchTensors) {
tflite::ScratchBufferHandle* scratch_buffer_handles = nullptr;
constexpr size_t arena_size = 4096;
uint8_t arena[arena_size];
tflite::MicroAllocator* allocator =
tflite::MicroAllocator::Create(arena, arena_size, micro_test::reporter);
tflite::MicroAllocator* allocator = tflite::MicroAllocator::Create(
arena, arena_size, tflite::GetMicroErrorReporter());
TF_LITE_MICRO_EXPECT_EQ(
kTfLiteOk,

View File

@ -16,16 +16,20 @@ limitations under the License.
#include "tensorflow/lite/micro/micro_error_reporter.h"
#include <cstdarg>
#include <cstdint>
#include <new>
#ifndef TF_LITE_STRIP_ERROR_STRINGS
#if !defined(TF_LITE_STRIP_ERROR_STRINGS)
#include "tensorflow/lite/micro/debug_log.h"
#include "tensorflow/lite/micro/micro_string.h"
#endif
namespace tflite {
namespace {
uint8_t micro_error_reporter_buffer[sizeof(tflite::MicroErrorReporter)];
tflite::MicroErrorReporter* error_reporter_ = nullptr;
int MicroErrorReporter::Report(const char* format, va_list args) {
#ifndef TF_LITE_STRIP_ERROR_STRINGS
void Log(const char* format, va_list args) {
#if !defined(TF_LITE_STRIP_ERROR_STRINGS)
// Only pulling in the implementation of this function for builds where we
// expect to make use of it to be extra cautious about not increasing the code
// size.
@ -35,6 +39,37 @@ int MicroErrorReporter::Report(const char* format, va_list args) {
DebugLog(log_buffer);
DebugLog("\r\n");
#endif
}
} // namespace
#if !defined(TF_LITE_STRIP_ERROR_STRINGS)
void MicroPrintf(const char* format, ...) {
va_list args;
va_start(args, format);
Log(format, args);
va_end(args);
}
#endif
namespace tflite {
ErrorReporter* GetMicroErrorReporter() {
#if !defined(RENODE)
if (error_reporter_ == nullptr) {
error_reporter_ = new (micro_error_reporter_buffer) MicroErrorReporter();
}
#else
// TODO(#46937): Until we resolve the global variable issue with Renode, we
// will be creating a new ErrorReporter object each time. While this is
// inefficient, it still allows us to make progress.
error_reporter_ = new (micro_error_reporter_buffer) MicroErrorReporter();
#endif
return error_reporter_;
}
int MicroErrorReporter::Report(const char* format, va_list args) {
Log(format, args);
return 0;
}

View File

@ -20,8 +20,21 @@ limitations under the License.
#include "tensorflow/lite/core/api/error_reporter.h"
#include "tensorflow/lite/micro/compatibility.h"
#if !defined(TF_LITE_STRIP_ERROR_STRINGS)
// This function can be used independent of the MicroErrorReporter to get
// printf-like functionalitys and are common to all target platforms.
void MicroPrintf(const char* format, ...);
#else
// We use a #define to ensure that the strings are completely stripped, to
// prevent an unnecessary increase in the binary size.
#define MicroPrintf(format, ...)
#endif
namespace tflite {
// Get a pointer to a singleton global error reporter.
ErrorReporter* GetMicroErrorReporter();
class MicroErrorReporter : public ErrorReporter {
public:
~MicroErrorReporter() override {}

View File

@ -19,6 +19,7 @@ limitations under the License.
#include "tensorflow/lite/core/api/flatbuffer_conversions.h"
#include "tensorflow/lite/micro/all_ops_resolver.h"
#include "tensorflow/lite/micro/micro_error_reporter.h"
#include "tensorflow/lite/micro/micro_utils.h"
#include "tensorflow/lite/micro/recording_micro_allocator.h"
#include "tensorflow/lite/micro/test_helpers.h"
@ -78,7 +79,7 @@ TF_LITE_MICRO_TEST(TestInterpreter) {
{
tflite::MicroInterpreter interpreter(model, op_resolver, allocator_buffer,
allocator_buffer_size,
micro_test::reporter);
tflite::GetMicroErrorReporter());
TF_LITE_MICRO_EXPECT_EQ(interpreter.AllocateTensors(), kTfLiteOk);
TF_LITE_MICRO_EXPECT_LE(interpreter.arena_used_bytes(), 928 + 100);
TF_LITE_MICRO_EXPECT_EQ(static_cast<size_t>(1), interpreter.inputs_size());
@ -128,11 +129,11 @@ TF_LITE_MICRO_TEST(TestMultiTenantInterpreter) {
// Get simple_model_head_usage.
{
tflite::RecordingMicroAllocator* allocator =
tflite::RecordingMicroAllocator::Create(arena, arena_size,
micro_test::reporter);
tflite::RecordingMicroAllocator::Create(
arena, arena_size, tflite::GetMicroErrorReporter());
const tflite::Model* model0 = tflite::testing::GetSimpleMockModel();
tflite::MicroInterpreter interpreter0(model0, op_resolver, allocator,
micro_test::reporter);
tflite::GetMicroErrorReporter());
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, interpreter0.AllocateTensors());
simple_model_head_usage =
allocator->GetSimpleMemoryAllocator()->GetHeadUsedBytes();
@ -147,13 +148,13 @@ TF_LITE_MICRO_TEST(TestMultiTenantInterpreter) {
// Shared allocator for various models.
tflite::RecordingMicroAllocator* allocator =
tflite::RecordingMicroAllocator::Create(arena, arena_size,
micro_test::reporter);
tflite::GetMicroErrorReporter());
// Get complex_model_head_usage. No head space reuse since it's the first
// model allocated in the `allocator`.
const tflite::Model* model1 = tflite::testing::GetComplexMockModel();
tflite::MicroInterpreter interpreter1(model1, op_resolver, allocator,
micro_test::reporter);
tflite::GetMicroErrorReporter());
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, interpreter1.AllocateTensors());
TfLiteTensor* input1 = interpreter1.input(0);
TfLiteTensor* output1 = interpreter1.output(0);
@ -165,7 +166,7 @@ TF_LITE_MICRO_TEST(TestMultiTenantInterpreter) {
// the output is correct.
const tflite::Model* model2 = tflite::testing::GetSimpleMockModel();
tflite::MicroInterpreter interpreter2(model2, op_resolver, allocator,
micro_test::reporter);
tflite::GetMicroErrorReporter());
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, interpreter2.AllocateTensors());
TfLiteTensor* input2 = interpreter2.input(0);
TfLiteTensor* output2 = interpreter2.output(0);
@ -195,7 +196,7 @@ TF_LITE_MICRO_TEST(TestMultiTenantInterpreter) {
// head space usage.
const tflite::Model* model3 = tflite::testing::GetComplexMockModel();
tflite::MicroInterpreter interpreter3(model3, op_resolver, allocator,
micro_test::reporter);
tflite::GetMicroErrorReporter());
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, interpreter3.AllocateTensors());
TfLiteTensor* input3 = interpreter3.input(0);
TfLiteTensor* output3 = interpreter3.output(0);
@ -221,13 +222,14 @@ TF_LITE_MICRO_TEST(TestKernelMemoryPlanning) {
uint8_t allocator_buffer[allocator_buffer_size];
tflite::RecordingMicroAllocator* allocator =
tflite::RecordingMicroAllocator::Create(
allocator_buffer, allocator_buffer_size, micro_test::reporter);
tflite::RecordingMicroAllocator::Create(allocator_buffer,
allocator_buffer_size,
tflite::GetMicroErrorReporter());
// Make sure kernel memory planning works in multi-tenant context.
for (int i = 0; i < 3; i++) {
tflite::MicroInterpreter interpreter(model, op_resolver, allocator,
micro_test::reporter);
tflite::GetMicroErrorReporter());
TF_LITE_MICRO_EXPECT_EQ(interpreter.AllocateTensors(), kTfLiteOk);
TF_LITE_MICRO_EXPECT_EQ(static_cast<size_t>(1), interpreter.inputs_size());
TF_LITE_MICRO_EXPECT_EQ(static_cast<size_t>(2), interpreter.outputs_size());
@ -271,7 +273,7 @@ TF_LITE_MICRO_TEST(TestVariableTensorReset) {
uint8_t allocator_buffer[allocator_buffer_size];
tflite::MicroInterpreter interpreter(model, op_resolver, allocator_buffer,
allocator_buffer_size,
micro_test::reporter);
tflite::GetMicroErrorReporter());
TF_LITE_MICRO_EXPECT_EQ(interpreter.AllocateTensors(), kTfLiteOk);
TF_LITE_MICRO_EXPECT_LE(interpreter.arena_used_bytes(), 2096 + 100);
TF_LITE_MICRO_EXPECT_EQ(static_cast<size_t>(1), interpreter.inputs_size());
@ -349,7 +351,7 @@ TF_LITE_MICRO_TEST(TestIncompleteInitialization) {
tflite::MicroInterpreter interpreter(model, op_resolver, allocator_buffer,
allocator_buffer_size,
micro_test::reporter);
tflite::GetMicroErrorReporter());
}
// Test that an interpreter with a supplied profiler correctly calls the
@ -363,9 +365,9 @@ TF_LITE_MICRO_TEST(InterpreterWithProfilerShouldProfileOps) {
constexpr size_t allocator_buffer_size = 2048;
uint8_t allocator_buffer[allocator_buffer_size];
tflite::MockProfiler profiler;
tflite::MicroInterpreter interpreter(model, op_resolver, allocator_buffer,
allocator_buffer_size,
micro_test::reporter, &profiler);
tflite::MicroInterpreter interpreter(
model, op_resolver, allocator_buffer, allocator_buffer_size,
tflite::GetMicroErrorReporter(), &profiler);
TF_LITE_MICRO_EXPECT_EQ(profiler.event_starts(), 0);
TF_LITE_MICRO_EXPECT_EQ(profiler.event_ends(), 0);
@ -390,12 +392,13 @@ TF_LITE_MICRO_TEST(TestIncompleteInitializationAllocationsWithSmallArena) {
uint8_t allocator_buffer[allocator_buffer_size];
tflite::RecordingMicroAllocator* allocator =
tflite::RecordingMicroAllocator::Create(
allocator_buffer, allocator_buffer_size, micro_test::reporter);
tflite::RecordingMicroAllocator::Create(allocator_buffer,
allocator_buffer_size,
tflite::GetMicroErrorReporter());
TF_LITE_MICRO_EXPECT_NE(nullptr, allocator);
tflite::MicroInterpreter interpreter(model, op_resolver, allocator,
micro_test::reporter);
tflite::GetMicroErrorReporter());
// Interpreter fails because arena is too small:
TF_LITE_MICRO_EXPECT_EQ(interpreter.Invoke(), kTfLiteError);
@ -434,12 +437,13 @@ TF_LITE_MICRO_TEST(TestInterpreterDoesNotAllocateUntilInvoke) {
uint8_t allocator_buffer[allocator_buffer_size];
tflite::RecordingMicroAllocator* allocator =
tflite::RecordingMicroAllocator::Create(
allocator_buffer, allocator_buffer_size, micro_test::reporter);
tflite::RecordingMicroAllocator::Create(allocator_buffer,
allocator_buffer_size,
tflite::GetMicroErrorReporter());
TF_LITE_MICRO_EXPECT_NE(nullptr, allocator);
tflite::MicroInterpreter interpreter(model, op_resolver, allocator,
micro_test::reporter);
tflite::GetMicroErrorReporter());
// Ensure allocations are zero (ignore tail since some internal structs are
// initialized with this space):
@ -507,7 +511,7 @@ TF_LITE_MICRO_TEST(TestInterpreterMultipleInputs) {
{
tflite::MicroInterpreter interpreter(model, op_resolver, allocator_buffer,
allocator_buffer_size,
micro_test::reporter);
tflite::GetMicroErrorReporter());
TF_LITE_MICRO_EXPECT_EQ(interpreter.AllocateTensors(), kTfLiteOk);
TF_LITE_MICRO_EXPECT_LE(interpreter.arena_used_bytes(), 928 + 100);

View File

@ -16,6 +16,7 @@ limitations under the License.
#include "tensorflow/lite/micro/recording_micro_allocator.h"
#include "tensorflow/lite/micro/all_ops_resolver.h"
#include "tensorflow/lite/micro/micro_error_reporter.h"
#include "tensorflow/lite/micro/test_helpers.h"
#include "tensorflow/lite/micro/testing/micro_test.h"
#include "tensorflow/lite/micro/testing/test_conv_model.h"
@ -44,7 +45,7 @@ TF_LITE_MICRO_TEST(TestRecordsTfLiteEvalTensorArrayData) {
tflite::RecordingMicroAllocator* micro_allocator =
tflite::RecordingMicroAllocator::Create(arena, kTestConvArenaSize,
micro_test::reporter);
tflite::GetMicroErrorReporter());
// TODO(b/158102673): ugly workaround for not having fatal assertions. Same
// throughout this file.
TF_LITE_MICRO_EXPECT_NE(micro_allocator, nullptr);
@ -88,7 +89,7 @@ TF_LITE_MICRO_TEST(TestRecordsNodeAndRegistrationArrayData) {
tflite::RecordingMicroAllocator* micro_allocator =
tflite::RecordingMicroAllocator::Create(arena, kTestConvArenaSize,
micro_test::reporter);
tflite::GetMicroErrorReporter());
TF_LITE_MICRO_EXPECT_NE(micro_allocator, nullptr);
if (micro_allocator == nullptr) return 1;
@ -128,7 +129,7 @@ TF_LITE_MICRO_TEST(TestRecordsMultiTenantAllocations) {
tflite::RecordingMicroAllocator* micro_allocator =
tflite::RecordingMicroAllocator::Create(arena, kTestConvArenaSize * 2,
micro_test::reporter);
tflite::GetMicroErrorReporter());
TF_LITE_MICRO_EXPECT_NE(micro_allocator, nullptr);
if (micro_allocator == nullptr) return 1;
@ -172,7 +173,7 @@ TF_LITE_MICRO_TEST(TestRecordsPersistentTfLiteTensorData) {
tflite::RecordingMicroAllocator* micro_allocator =
tflite::RecordingMicroAllocator::Create(arena, kTestConvArenaSize,
micro_test::reporter);
tflite::GetMicroErrorReporter());
TF_LITE_MICRO_EXPECT_NE(micro_allocator, nullptr);
if (micro_allocator == nullptr) return 1;
@ -198,7 +199,7 @@ TF_LITE_MICRO_TEST(TestRecordsPersistentTfLiteTensorQuantizationData) {
tflite::RecordingMicroAllocator* micro_allocator =
tflite::RecordingMicroAllocator::Create(arena, kTestConvArenaSize,
micro_test::reporter);
tflite::GetMicroErrorReporter());
TF_LITE_MICRO_EXPECT_NE(micro_allocator, nullptr);
if (micro_allocator == nullptr) return 1;
@ -244,7 +245,7 @@ TF_LITE_MICRO_TEST(TestRecordsPersistentBufferData) {
tflite::RecordingMicroAllocator* micro_allocator =
tflite::RecordingMicroAllocator::Create(arena, kTestConvArenaSize,
micro_test::reporter);
tflite::GetMicroErrorReporter());
TF_LITE_MICRO_EXPECT_NE(micro_allocator, nullptr);
if (micro_allocator == nullptr) return 1;

View File

@ -17,6 +17,7 @@ limitations under the License.
#include <cstdint>
#include "tensorflow/lite/micro/micro_error_reporter.h"
#include "tensorflow/lite/micro/test_helpers.h"
#include "tensorflow/lite/micro/testing/micro_test.h"
@ -25,8 +26,8 @@ TF_LITE_MICRO_TESTS_BEGIN
TF_LITE_MICRO_TEST(TestRecordsTailAllocations) {
constexpr size_t arena_size = 1024;
uint8_t arena[arena_size];
tflite::RecordingSimpleMemoryAllocator allocator(micro_test::reporter, arena,
arena_size);
tflite::RecordingSimpleMemoryAllocator allocator(
tflite::GetMicroErrorReporter(), arena, arena_size);
uint8_t* result = allocator.AllocateFromTail(/*size=*/10, /*alignment=*/1);
TF_LITE_MICRO_EXPECT_NE(result, nullptr);
@ -48,8 +49,8 @@ TF_LITE_MICRO_TEST(TestRecordsTailAllocations) {
TF_LITE_MICRO_TEST(TestRecordsMisalignedTailAllocations) {
constexpr size_t arena_size = 1024;
uint8_t arena[arena_size];
tflite::RecordingSimpleMemoryAllocator allocator(micro_test::reporter, arena,
arena_size);
tflite::RecordingSimpleMemoryAllocator allocator(
tflite::GetMicroErrorReporter(), arena, arena_size);
uint8_t* result = allocator.AllocateFromTail(/*size=*/10, /*alignment=*/12);
TF_LITE_MICRO_EXPECT_NE(result, nullptr);
@ -65,8 +66,8 @@ TF_LITE_MICRO_TEST(TestRecordsMisalignedTailAllocations) {
TF_LITE_MICRO_TEST(TestDoesNotRecordFailedTailAllocations) {
constexpr size_t arena_size = 1024;
uint8_t arena[arena_size];
tflite::RecordingSimpleMemoryAllocator allocator(micro_test::reporter, arena,
arena_size);
tflite::RecordingSimpleMemoryAllocator allocator(
tflite::GetMicroErrorReporter(), arena, arena_size);
uint8_t* result = allocator.AllocateFromTail(/*size=*/2048, /*alignment=*/1);
TF_LITE_MICRO_EXPECT(result == nullptr);
@ -80,8 +81,8 @@ TF_LITE_MICRO_TEST(TestDoesNotRecordFailedTailAllocations) {
TF_LITE_MICRO_TEST(TestRecordsHeadSizeAdjustment) {
constexpr size_t arena_size = 1024;
uint8_t arena[arena_size];
tflite::RecordingSimpleMemoryAllocator allocator(micro_test::reporter, arena,
arena_size);
tflite::RecordingSimpleMemoryAllocator allocator(
tflite::GetMicroErrorReporter(), arena, arena_size);
TF_LITE_MICRO_EXPECT_EQ(
kTfLiteOk, allocator.SetHeadBufferSize(/*size=*/5, /*alignment=*/1));
@ -104,8 +105,8 @@ TF_LITE_MICRO_TEST(TestRecordsHeadSizeAdjustment) {
TF_LITE_MICRO_TEST(TestRecordsMisalignedHeadSizeAdjustments) {
constexpr size_t arena_size = 1024;
uint8_t arena[arena_size];
tflite::RecordingSimpleMemoryAllocator allocator(micro_test::reporter, arena,
arena_size);
tflite::RecordingSimpleMemoryAllocator allocator(
tflite::GetMicroErrorReporter(), arena, arena_size);
TF_LITE_MICRO_EXPECT_EQ(
kTfLiteOk, allocator.SetHeadBufferSize(/*size=*/10, /*alignment=*/12));
@ -122,8 +123,8 @@ TF_LITE_MICRO_TEST(TestRecordsMisalignedHeadSizeAdjustments) {
TF_LITE_MICRO_TEST(TestDoesNotRecordFailedTailAllocations) {
constexpr size_t arena_size = 1024;
uint8_t arena[arena_size];
tflite::RecordingSimpleMemoryAllocator allocator(micro_test::reporter, arena,
arena_size);
tflite::RecordingSimpleMemoryAllocator allocator(
tflite::GetMicroErrorReporter(), arena, arena_size);
TF_LITE_MICRO_EXPECT_EQ(kTfLiteError, allocator.SetHeadBufferSize(
/*size=*/2048, /*alignment=*/1));

View File

@ -17,6 +17,7 @@ limitations under the License.
#include <cstdint>
#include "tensorflow/lite/micro/micro_error_reporter.h"
#include "tensorflow/lite/micro/test_helpers.h"
#include "tensorflow/lite/micro/testing/micro_test.h"
@ -25,8 +26,8 @@ TF_LITE_MICRO_TESTS_BEGIN
TF_LITE_MICRO_TEST(TestEnsureHeadSizeSimpleAlignment) {
constexpr size_t arena_size = 1024;
uint8_t arena[arena_size];
tflite::SimpleMemoryAllocator allocator(micro_test::reporter, arena,
arena_size);
tflite::SimpleMemoryAllocator allocator(tflite::GetMicroErrorReporter(),
arena, arena_size);
TF_LITE_MICRO_EXPECT_EQ(
kTfLiteOk, allocator.SetHeadBufferSize(/*size=*/100, /*alignment=*/1));
@ -47,8 +48,8 @@ TF_LITE_MICRO_TEST(TestEnsureHeadSizeSimpleAlignment) {
TF_LITE_MICRO_TEST(TestAdjustHeadSizeMisalignment) {
constexpr size_t arena_size = 1024;
uint8_t arena[arena_size];
tflite::SimpleMemoryAllocator allocator(micro_test::reporter, arena,
arena_size);
tflite::SimpleMemoryAllocator allocator(tflite::GetMicroErrorReporter(),
arena, arena_size);
// First head adjustment of 100 bytes (aligned 12):
TF_LITE_MICRO_EXPECT_EQ(
@ -73,8 +74,8 @@ TF_LITE_MICRO_TEST(TestAdjustHeadSizeMisalignment) {
TF_LITE_MICRO_TEST(TestAdjustHeadSizeMisalignedHandlesCorrectBytesAvailable) {
constexpr size_t arena_size = 1024;
uint8_t arena[arena_size];
tflite::SimpleMemoryAllocator allocator(micro_test::reporter, arena,
arena_size);
tflite::SimpleMemoryAllocator allocator(tflite::GetMicroErrorReporter(),
arena, arena_size);
// First head adjustment of 100 bytes (aligned 12):
TF_LITE_MICRO_EXPECT_EQ(
@ -104,8 +105,8 @@ TF_LITE_MICRO_TEST(TestAdjustHeadSizeMisalignedHandlesCorrectBytesAvailable) {
TF_LITE_MICRO_TEST(TestGetAvailableMemory) {
constexpr size_t arena_size = 1024;
uint8_t arena[arena_size];
tflite::SimpleMemoryAllocator allocator(micro_test::reporter, arena,
arena_size);
tflite::SimpleMemoryAllocator allocator(tflite::GetMicroErrorReporter(),
arena, arena_size);
constexpr size_t allocation_size = 100;
allocator.SetHeadBufferSize(/*size=*/allocation_size,
@ -120,8 +121,8 @@ TF_LITE_MICRO_TEST(TestGetAvailableMemory) {
TF_LITE_MICRO_TEST(TestGetAvailableMemoryWithTempAllocations) {
constexpr size_t arena_size = 1024;
uint8_t arena[arena_size];
tflite::SimpleMemoryAllocator allocator(micro_test::reporter, arena,
arena_size);
tflite::SimpleMemoryAllocator allocator(tflite::GetMicroErrorReporter(),
arena, arena_size);
constexpr size_t allocation_size = 100;
allocator.AllocateTemp(/*size=*/allocation_size,
@ -141,8 +142,8 @@ TF_LITE_MICRO_TEST(TestGetAvailableMemoryWithTempAllocations) {
TF_LITE_MICRO_TEST(TestGetUsedBytes) {
constexpr size_t arena_size = 1024;
uint8_t arena[arena_size];
tflite::SimpleMemoryAllocator allocator(micro_test::reporter, arena,
arena_size);
tflite::SimpleMemoryAllocator allocator(tflite::GetMicroErrorReporter(),
arena, arena_size);
TF_LITE_MICRO_EXPECT_EQ(allocator.GetUsedBytes(), static_cast<size_t>(0));
constexpr size_t allocation_size = 100;
@ -157,8 +158,8 @@ TF_LITE_MICRO_TEST(TestGetUsedBytes) {
TF_LITE_MICRO_TEST(TestGetUsedBytesTempAllocations) {
constexpr size_t arena_size = 1024;
uint8_t arena[arena_size];
tflite::SimpleMemoryAllocator allocator(micro_test::reporter, arena,
arena_size);
tflite::SimpleMemoryAllocator allocator(tflite::GetMicroErrorReporter(),
arena, arena_size);
constexpr size_t allocation_size = 100;
allocator.AllocateTemp(/*size=*/allocation_size,
@ -176,8 +177,8 @@ TF_LITE_MICRO_TEST(TestGetUsedBytesTempAllocations) {
TF_LITE_MICRO_TEST(TestJustFits) {
constexpr size_t arena_size = 1024;
uint8_t arena[arena_size];
tflite::SimpleMemoryAllocator allocator(micro_test::reporter, arena,
arena_size);
tflite::SimpleMemoryAllocator allocator(tflite::GetMicroErrorReporter(),
arena, arena_size);
uint8_t* result = allocator.AllocateFromTail(arena_size, 1);
TF_LITE_MICRO_EXPECT(nullptr != result);
@ -186,8 +187,8 @@ TF_LITE_MICRO_TEST(TestJustFits) {
TF_LITE_MICRO_TEST(TestAligned) {
constexpr size_t arena_size = 1024;
uint8_t arena[arena_size];
tflite::SimpleMemoryAllocator allocator(micro_test::reporter, arena,
arena_size);
tflite::SimpleMemoryAllocator allocator(tflite::GetMicroErrorReporter(),
arena, arena_size);
uint8_t* result = allocator.AllocateFromTail(1, 1);
TF_LITE_MICRO_EXPECT(nullptr != result);
@ -201,8 +202,8 @@ TF_LITE_MICRO_TEST(TestAligned) {
TF_LITE_MICRO_TEST(TestMultipleTooLarge) {
constexpr size_t arena_size = 1024;
uint8_t arena[arena_size];
tflite::SimpleMemoryAllocator allocator(micro_test::reporter, arena,
arena_size);
tflite::SimpleMemoryAllocator allocator(tflite::GetMicroErrorReporter(),
arena, arena_size);
uint8_t* result = allocator.AllocateFromTail(768, 1);
TF_LITE_MICRO_EXPECT(nullptr != result);
@ -214,8 +215,8 @@ TF_LITE_MICRO_TEST(TestMultipleTooLarge) {
TF_LITE_MICRO_TEST(TestTempAllocations) {
constexpr size_t arena_size = 1024;
uint8_t arena[arena_size];
tflite::SimpleMemoryAllocator allocator(micro_test::reporter, arena,
arena_size);
tflite::SimpleMemoryAllocator allocator(tflite::GetMicroErrorReporter(),
arena, arena_size);
uint8_t* temp1 = allocator.AllocateTemp(100, 1);
TF_LITE_MICRO_EXPECT(nullptr != temp1);
@ -230,8 +231,8 @@ TF_LITE_MICRO_TEST(TestTempAllocations) {
TF_LITE_MICRO_TEST(TestResetTempAllocations) {
constexpr size_t arena_size = 1024;
uint8_t arena[arena_size];
tflite::SimpleMemoryAllocator allocator(micro_test::reporter, arena,
arena_size);
tflite::SimpleMemoryAllocator allocator(tflite::GetMicroErrorReporter(),
arena, arena_size);
uint8_t* temp1 = allocator.AllocateTemp(100, 1);
TF_LITE_MICRO_EXPECT(nullptr != temp1);
@ -248,8 +249,8 @@ TF_LITE_MICRO_TEST(TestResetTempAllocations) {
TF_LITE_MICRO_TEST(TestEnsureHeadSizeWithoutResettingTemp) {
constexpr size_t arena_size = 1024;
uint8_t arena[arena_size];
tflite::SimpleMemoryAllocator allocator(micro_test::reporter, arena,
arena_size);
tflite::SimpleMemoryAllocator allocator(tflite::GetMicroErrorReporter(),
arena, arena_size);
uint8_t* temp = allocator.AllocateTemp(100, 1);
TF_LITE_MICRO_EXPECT(nullptr != temp);

View File

@ -62,177 +62,162 @@ extern int tests_passed;
extern int tests_failed;
extern bool is_test_complete;
extern bool did_test_fail;
extern tflite::ErrorReporter* reporter;
} // namespace micro_test
#define TF_LITE_MICRO_TESTS_BEGIN \
namespace micro_test { \
int tests_passed; \
int tests_failed; \
bool is_test_complete; \
bool did_test_fail; \
tflite::ErrorReporter* reporter; \
} \
\
int main(int argc, char** argv) { \
micro_test::tests_passed = 0; \
micro_test::tests_failed = 0; \
tflite::MicroErrorReporter error_reporter; \
micro_test::reporter = &error_reporter;
#define TF_LITE_MICRO_TESTS_BEGIN \
namespace micro_test { \
int tests_passed; \
int tests_failed; \
bool is_test_complete; \
bool did_test_fail; \
} \
\
int main(int argc, char** argv) { \
micro_test::tests_passed = 0; \
micro_test::tests_failed = 0;
#define TF_LITE_MICRO_TESTS_END \
micro_test::reporter->Report( \
"%d/%d tests passed", micro_test::tests_passed, \
(micro_test::tests_failed + micro_test::tests_passed)); \
if (micro_test::tests_failed == 0) { \
micro_test::reporter->Report("~~~ALL TESTS PASSED~~~\n"); \
return kTfLiteOk; \
} else { \
micro_test::reporter->Report("~~~SOME TESTS FAILED~~~\n"); \
return kTfLiteError; \
} \
#define TF_LITE_MICRO_TESTS_END \
MicroPrintf("%d/%d tests passed", micro_test::tests_passed, \
(micro_test::tests_failed + micro_test::tests_passed)); \
if (micro_test::tests_failed == 0) { \
MicroPrintf("~~~ALL TESTS PASSED~~~\n"); \
return kTfLiteOk; \
} else { \
MicroPrintf("~~~SOME TESTS FAILED~~~\n"); \
return kTfLiteError; \
} \
}
// TODO(petewarden): I'm going to hell for what I'm doing to this poor for loop.
#define TF_LITE_MICRO_TEST(name) \
micro_test::reporter->Report("Testing " #name); \
MicroPrintf("Testing " #name); \
for (micro_test::is_test_complete = false, \
micro_test::did_test_fail = false; \
!micro_test::is_test_complete; micro_test::is_test_complete = true, \
micro_test::tests_passed += (micro_test::did_test_fail) ? 0 : 1, \
micro_test::tests_failed += (micro_test::did_test_fail) ? 1 : 0)
#define TF_LITE_MICRO_EXPECT(x) \
do { \
if (!(x)) { \
micro_test::reporter->Report(#x " failed at %s:%d", __FILE__, __LINE__); \
micro_test::did_test_fail = true; \
} \
#define TF_LITE_MICRO_EXPECT(x) \
do { \
if (!(x)) { \
MicroPrintf(#x " failed at %s:%d", __FILE__, __LINE__); \
micro_test::did_test_fail = true; \
} \
} while (false)
// TODO(b/139142772): this macro is used with types other than ints even though
// the printf specifier is %d.
#define TF_LITE_MICRO_EXPECT_EQ(x, y) \
do { \
auto vx = x; \
auto vy = y; \
if ((vx) != (vy)) { \
micro_test::reporter->Report(#x " == " #y " failed at %s:%d (%d vs %d)", \
__FILE__, __LINE__, static_cast<int>(vx), \
static_cast<int>(vy)); \
micro_test::did_test_fail = true; \
} \
#define TF_LITE_MICRO_EXPECT_EQ(x, y) \
do { \
auto vx = x; \
auto vy = y; \
if ((vx) != (vy)) { \
MicroPrintf(#x " == " #y " failed at %s:%d (%d vs %d)", __FILE__, \
__LINE__, static_cast<int>(vx), static_cast<int>(vy)); \
micro_test::did_test_fail = true; \
} \
} while (false)
#define TF_LITE_MICRO_EXPECT_NE(x, y) \
do { \
if ((x) == (y)) { \
micro_test::reporter->Report(#x " != " #y " failed at %s:%d", __FILE__, \
__LINE__); \
micro_test::did_test_fail = true; \
} \
} while (false)
// TODO(wangtz): Making it more generic once needed.
#define TF_LITE_MICRO_ARRAY_ELEMENT_EXPECT_NEAR(arr1, idx1, arr2, idx2, \
epsilon) \
#define TF_LITE_MICRO_EXPECT_NE(x, y) \
do { \
auto delta = ((arr1)[(idx1)] > (arr2)[(idx2)]) \
? ((arr1)[(idx1)] - (arr2)[(idx2)]) \
: ((arr2)[(idx2)] - (arr1)[(idx1)]); \
if (delta > epsilon) { \
micro_test::reporter->Report( \
#arr1 "[%d] (%f) near " #arr2 "[%d] (%f) failed at %s:%d", \
static_cast<int>(idx1), static_cast<float>((arr1)[(idx1)]), \
static_cast<int>(idx2), static_cast<float>((arr2)[(idx2)]), \
__FILE__, __LINE__); \
if ((x) == (y)) { \
MicroPrintf(#x " != " #y " failed at %s:%d", __FILE__, __LINE__); \
micro_test::did_test_fail = true; \
} \
} while (false)
#define TF_LITE_MICRO_EXPECT_NEAR(x, y, epsilon) \
do { \
auto vx = (x); \
auto vy = (y); \
auto delta = ((vx) > (vy)) ? ((vx) - (vy)) : ((vy) - (vx)); \
if (delta > epsilon) { \
micro_test::reporter->Report( \
#x " (%f) near " #y " (%f) failed at %s:%d", \
static_cast<double>(vx), static_cast<double>(vy), __FILE__, \
__LINE__); \
micro_test::did_test_fail = true; \
} \
} while (false)
#define TF_LITE_MICRO_EXPECT_GT(x, y) \
do { \
if ((x) <= (y)) { \
micro_test::reporter->Report(#x " > " #y " failed at %s:%d", __FILE__, \
__LINE__); \
micro_test::did_test_fail = true; \
} \
} while (false)
#define TF_LITE_MICRO_EXPECT_LT(x, y) \
do { \
if ((x) >= (y)) { \
micro_test::reporter->Report(#x " < " #y " failed at %s:%d", __FILE__, \
__LINE__); \
micro_test::did_test_fail = true; \
} \
} while (false)
#define TF_LITE_MICRO_EXPECT_GE(x, y) \
// TODO(wangtz): Making it more generic once needed.
#define TF_LITE_MICRO_ARRAY_ELEMENT_EXPECT_NEAR(arr1, idx1, arr2, idx2, \
epsilon) \
do { \
if ((x) < (y)) { \
micro_test::reporter->Report(#x " >= " #y " failed at %s:%d", __FILE__, \
__LINE__); \
auto delta = ((arr1)[(idx1)] > (arr2)[(idx2)]) \
? ((arr1)[(idx1)] - (arr2)[(idx2)]) \
: ((arr2)[(idx2)] - (arr1)[(idx1)]); \
if (delta > epsilon) { \
MicroPrintf(#arr1 "[%d] (%f) near " #arr2 "[%d] (%f) failed at %s:%d", \
static_cast<int>(idx1), static_cast<float>((arr1)[(idx1)]), \
static_cast<int>(idx2), static_cast<float>((arr2)[(idx2)]), \
__FILE__, __LINE__); \
micro_test::did_test_fail = true; \
} \
} while (false)
#define TF_LITE_MICRO_EXPECT_LE(x, y) \
#define TF_LITE_MICRO_EXPECT_NEAR(x, y, epsilon) \
do { \
if ((x) > (y)) { \
micro_test::reporter->Report(#x " <= " #y " failed at %s:%d", __FILE__, \
__LINE__); \
auto vx = (x); \
auto vy = (y); \
auto delta = ((vx) > (vy)) ? ((vx) - (vy)) : ((vy) - (vx)); \
if (delta > epsilon) { \
MicroPrintf(#x " (%f) near " #y " (%f) failed at %s:%d", \
static_cast<double>(vx), static_cast<double>(vy), __FILE__, \
__LINE__); \
micro_test::did_test_fail = true; \
} \
} while (false)
#define TF_LITE_MICRO_EXPECT_TRUE(x) \
#define TF_LITE_MICRO_EXPECT_GT(x, y) \
do { \
if (!(x)) { \
micro_test::reporter->Report(#x " was not true failed at %s:%d", \
__FILE__, __LINE__); \
if ((x) <= (y)) { \
MicroPrintf(#x " > " #y " failed at %s:%d", __FILE__, __LINE__); \
micro_test::did_test_fail = true; \
} \
} while (false)
#define TF_LITE_MICRO_EXPECT_FALSE(x) \
#define TF_LITE_MICRO_EXPECT_LT(x, y) \
do { \
if ((x) >= (y)) { \
MicroPrintf(#x " < " #y " failed at %s:%d", __FILE__, __LINE__); \
micro_test::did_test_fail = true; \
} \
} while (false)
#define TF_LITE_MICRO_EXPECT_GE(x, y) \
do { \
if (x) { \
micro_test::reporter->Report(#x " was not false failed at %s:%d", \
__FILE__, __LINE__); \
if ((x) < (y)) { \
MicroPrintf(#x " >= " #y " failed at %s:%d", __FILE__, __LINE__); \
micro_test::did_test_fail = true; \
} \
} while (false)
#define TF_LITE_MICRO_FAIL(msg) \
do { \
micro_test::reporter->Report("FAIL: %s", msg, __FILE__, __LINE__); \
micro_test::did_test_fail = true; \
#define TF_LITE_MICRO_EXPECT_LE(x, y) \
do { \
if ((x) > (y)) { \
MicroPrintf(#x " <= " #y " failed at %s:%d", __FILE__, __LINE__); \
micro_test::did_test_fail = true; \
} \
} while (false)
#define TF_LITE_MICRO_EXPECT_STRING_EQ(string1, string2) \
#define TF_LITE_MICRO_EXPECT_TRUE(x) \
do { \
for (int i = 0; string1[i] != '\0' && string2[i] != '\0'; i++) { \
if (string1[i] != string2[i]) { \
micro_test::reporter->Report("FAIL: %s did not match %s", string1, \
string2, __FILE__, __LINE__); \
micro_test::did_test_fail = true; \
} \
if (!(x)) { \
MicroPrintf(#x " was not true failed at %s:%d", __FILE__, __LINE__); \
micro_test::did_test_fail = true; \
} \
} while (false)
#define TF_LITE_MICRO_EXPECT_FALSE(x) \
do { \
if (x) { \
MicroPrintf(#x " was not false failed at %s:%d", __FILE__, __LINE__); \
micro_test::did_test_fail = true; \
} \
} while (false)
#define TF_LITE_MICRO_FAIL(msg) \
do { \
MicroPrintf("FAIL: %s", msg, __FILE__, __LINE__); \
micro_test::did_test_fail = true; \
} while (false)
#define TF_LITE_MICRO_EXPECT_STRING_EQ(string1, string2) \
do { \
for (int i = 0; string1[i] != '\0' && string2[i] != '\0'; i++) { \
if (string1[i] != string2[i]) { \
MicroPrintf("FAIL: %s did not match %s", string1, string2, __FILE__, \
__LINE__); \
micro_test::did_test_fail = true; \
} \
} \
} while (false)
#endif // TENSORFLOW_LITE_MICRO_TESTING_MICRO_TEST_H_

View File

@ -34,6 +34,9 @@ PLATFORM_FLAGS = \
# broken w/o it. Remove this workaround once the issue is resolved.
PLATFORM_FLAGS += -DNDEBUG
# TODO(#46937): Remove once initialization of global variables is sorted out.
PLATFORM_FLAGS += -DRENODE
CXXFLAGS += $(PLATFORM_FLAGS) -fno-use-cxa-atexit
CCFLAGS += $(PLATFORM_FLAGS)

View File

@ -22,7 +22,7 @@ ifneq ($(DOWNLOAD_RESULT), SUCCESS)
$(error Something went wrong with the CMSIS download: $(DOWNLOAD_RESULT))
endif
# TODO(b/161478030) : change - Wno - vla to - Wvla and remove - Wno-shadow once
# TODO(b/161478030): change -Wno-vla to -Wvla and remove -Wno-shadow once
# we have a solution for fixing / avoiding being tripped up by these warnings.
PLATFORM_FLAGS = \
-DGEMMLOWP_ALLOW_SLOW_SCALAR_FALLBACK \
@ -52,6 +52,10 @@ PLATFORM_FLAGS = \
-fomit-frame-pointer \
-g \
-Os
# TODO(#46937): Remove once initialization of global variables is sorted out.
PLATFORM_FLAGS += -DRENODE
CXXFLAGS += $(PLATFORM_FLAGS) -std=gnu++11 -fno-rtti -fno-use-cxa-atexit
CCFLAGS += $(PLATFORM_FLAGS)
LDFLAGS += \