diff --git a/tensorflow/lite/experimental/microfrontend/lib/BUILD b/tensorflow/lite/experimental/microfrontend/lib/BUILD index 18bfdb24a84..57f8055e9df 100644 --- a/tensorflow/lite/experimental/microfrontend/lib/BUILD +++ b/tensorflow/lite/experimental/microfrontend/lib/BUILD @@ -135,6 +135,9 @@ tflite_micro_cc_test( tflite_micro_cc_test( name = "filterbank_test", srcs = ["filterbank_test.cc"], + # Setting copts for experimental code to [], but this code should be fixed + # to build with the default copts (micro_copts()) + copts = [], deps = [ ":filterbank", "//tensorflow/lite/micro/testing:micro_test", @@ -144,6 +147,9 @@ tflite_micro_cc_test( tflite_micro_cc_test( name = "frontend_test", srcs = ["frontend_test.cc"], + # Setting copts for experimental code to [], but this code should be fixed + # to build with the default copts (micro_copts()) + copts = [], deps = [ ":frontend", "//tensorflow/lite/micro/testing:micro_test", @@ -153,6 +159,9 @@ tflite_micro_cc_test( tflite_micro_cc_test( name = "log_scale_test", srcs = ["log_scale_test.cc"], + # Setting copts for experimental code to [], but this code should be fixed + # to build with the default copts (micro_copts()) + copts = [], deps = [ ":log_scale", "//tensorflow/lite/micro/testing:micro_test", @@ -162,6 +171,9 @@ tflite_micro_cc_test( tflite_micro_cc_test( name = "noise_reduction_test", srcs = ["noise_reduction_test.cc"], + # Setting copts for experimental code to [], but this code should be fixed + # to build with the default copts (micro_copts()) + copts = [], deps = [ ":noise_reduction", "//tensorflow/lite/micro/testing:micro_test", @@ -171,6 +183,9 @@ tflite_micro_cc_test( tflite_micro_cc_test( name = "pcan_gain_control_test", srcs = ["pcan_gain_control_test.cc"], + # Setting copts for experimental code to [], but this code should be fixed + # to build with the default copts (micro_copts()) + copts = [], deps = [ ":pcan_gain_control", "//tensorflow/lite/micro/testing:micro_test", @@ -180,6 +195,9 @@ tflite_micro_cc_test( tflite_micro_cc_test( name = "window_test", srcs = ["window_test.cc"], + # Setting copts for experimental code to [], but this code should be fixed + # to build with the default copts (micro_copts()) + copts = [], deps = [ ":window", "//tensorflow/lite/micro/testing:micro_test", diff --git a/tensorflow/lite/experimental/microfrontend/lib/frontend_test.cc b/tensorflow/lite/experimental/microfrontend/lib/frontend_test.cc index adf59a1b8b5..9c981decf48 100644 --- a/tensorflow/lite/experimental/microfrontend/lib/frontend_test.cc +++ b/tensorflow/lite/experimental/microfrontend/lib/frontend_test.cc @@ -123,7 +123,7 @@ TF_LITE_MICRO_TEST(FrontendTest_CheckNotEnoughSamples) { &num_samples_read); TF_LITE_MICRO_EXPECT_EQ(output.size, 0); - TF_LITE_MICRO_EXPECT_EQ(output.values, nullptr); + TF_LITE_MICRO_EXPECT(output.values == nullptr); FrontendFreeStateContents(&state); } diff --git a/tensorflow/lite/micro/examples/hello_world/hello_world_test.cc b/tensorflow/lite/micro/examples/hello_world/hello_world_test.cc index 4da4ba7fa94..c76491b6b7c 100644 --- a/tensorflow/lite/micro/examples/hello_world/hello_world_test.cc +++ b/tensorflow/lite/micro/examples/hello_world/hello_world_test.cc @@ -26,13 +26,12 @@ TF_LITE_MICRO_TESTS_BEGIN TF_LITE_MICRO_TEST(LoadModelAndPerformInference) { // Set up logging tflite::MicroErrorReporter micro_error_reporter; - tflite::ErrorReporter* error_reporter = µ_error_reporter; // Map the model into a usable data structure. This doesn't involve any // copying or parsing, it's a very lightweight operation. const tflite::Model* model = ::tflite::GetModel(g_model); if (model->version() != TFLITE_SCHEMA_VERSION) { - TF_LITE_REPORT_ERROR(error_reporter, + TF_LITE_REPORT_ERROR(µ_error_reporter, "Model provided is schema version %d not equal " "to supported version %d.\n", model->version(), TFLITE_SCHEMA_VERSION); @@ -52,8 +51,8 @@ TF_LITE_MICRO_TEST(LoadModelAndPerformInference) { uint8_t tensor_arena[tensor_arena_size]; // Build an interpreter to run the model with - tflite::MicroInterpreter interpreter(model, resolver, tensor_arena, - tensor_arena_size, error_reporter); + tflite::MicroInterpreter interpreter( + model, resolver, tensor_arena, tensor_arena_size, µ_error_reporter); // Allocate memory from the tensor_arena for the model's tensors TF_LITE_MICRO_EXPECT_EQ(interpreter.AllocateTensors(), kTfLiteOk); @@ -95,7 +94,7 @@ TF_LITE_MICRO_TEST(LoadModelAndPerformInference) { // Obtain the output value from the tensor float value = output->data.f[0]; // Check that the output value is within 0.05 of the expected value - TF_LITE_MICRO_EXPECT_NEAR(0., value, 0.05); + TF_LITE_MICRO_EXPECT_NEAR(0.f, value, 0.05f); // Run inference on several more values and confirm the expected outputs input->data.f[0] = 1.; @@ -103,21 +102,21 @@ TF_LITE_MICRO_TEST(LoadModelAndPerformInference) { TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, invoke_status); value = output->data.f[0]; - TF_LITE_MICRO_EXPECT_NEAR(0.841, value, 0.05); + TF_LITE_MICRO_EXPECT_NEAR(0.841f, value, 0.05f); - input->data.f[0] = 3.; + input->data.f[0] = 3.f; invoke_status = interpreter.Invoke(); TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, invoke_status); value = output->data.f[0]; - TF_LITE_MICRO_EXPECT_NEAR(0.141, value, 0.05); + TF_LITE_MICRO_EXPECT_NEAR(0.141f, value, 0.05f); - input->data.f[0] = 5.; + input->data.f[0] = 5.f; invoke_status = interpreter.Invoke(); TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, invoke_status); value = output->data.f[0]; - TF_LITE_MICRO_EXPECT_NEAR(-0.959, value, 0.05); + TF_LITE_MICRO_EXPECT_NEAR(-0.959f, value, 0.05f); } TF_LITE_MICRO_TESTS_END diff --git a/tensorflow/lite/micro/examples/hello_world/output_handler_test.cc b/tensorflow/lite/micro/examples/hello_world/output_handler_test.cc index cbed83e1c75..206113d1427 100644 --- a/tensorflow/lite/micro/examples/hello_world/output_handler_test.cc +++ b/tensorflow/lite/micro/examples/hello_world/output_handler_test.cc @@ -22,12 +22,11 @@ TF_LITE_MICRO_TESTS_BEGIN TF_LITE_MICRO_TEST(TestCallability) { tflite::MicroErrorReporter micro_error_reporter; - tflite::ErrorReporter* error_reporter = µ_error_reporter; // This will have external side-effects (like printing to the debug console // or lighting an LED) that are hard to observe, so the most we can do is // make sure the call doesn't crash. - HandleOutput(error_reporter, 0, 0); + HandleOutput(µ_error_reporter, 0, 0); } TF_LITE_MICRO_TESTS_END diff --git a/tensorflow/lite/micro/examples/image_recognition_experimental/image_recognition_test.cc b/tensorflow/lite/micro/examples/image_recognition_experimental/image_recognition_test.cc index 61071fd5696..ff9ed498137 100644 --- a/tensorflow/lite/micro/examples/image_recognition_experimental/image_recognition_test.cc +++ b/tensorflow/lite/micro/examples/image_recognition_experimental/image_recognition_test.cc @@ -31,11 +31,10 @@ TF_LITE_MICRO_TESTS_BEGIN TF_LITE_MICRO_TEST(TestImageRecognitionInvoke) { tflite::MicroErrorReporter micro_error_reporter; - tflite::ErrorReporter* error_reporter = µ_error_reporter; const tflite::Model* model = ::tflite::GetModel(image_recognition_model_data); if (model->version() != TFLITE_SCHEMA_VERSION) { - TF_LITE_REPORT_ERROR(error_reporter, + TF_LITE_REPORT_ERROR(µ_error_reporter, "Model provided is schema version %d not equal " "to supported version %d.\n", model->version(), TFLITE_SCHEMA_VERSION); @@ -52,7 +51,8 @@ TF_LITE_MICRO_TEST(TestImageRecognitionInvoke) { uint8_t tensor_arena[tensor_arena_size]; tflite::MicroInterpreter interpreter(model, micro_op_resolver, tensor_arena, - tensor_arena_size, error_reporter); + tensor_arena_size, + µ_error_reporter); interpreter.AllocateTensors(); TfLiteTensor* input = interpreter.input(0); @@ -83,7 +83,7 @@ TF_LITE_MICRO_TEST(TestImageRecognitionInvoke) { TfLiteStatus invoke_status = interpreter.Invoke(); TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, invoke_status); if (invoke_status != kTfLiteOk) { - TF_LITE_REPORT_ERROR(error_reporter, "Invoke failed\n"); + TF_LITE_REPORT_ERROR(µ_error_reporter, "Invoke failed\n"); } TfLiteTensor* output = interpreter.output(0); diff --git a/tensorflow/lite/micro/examples/magic_wand/magic_wand_test.cc b/tensorflow/lite/micro/examples/magic_wand/magic_wand_test.cc index 96a2b971d9b..920440509f7 100644 --- a/tensorflow/lite/micro/examples/magic_wand/magic_wand_test.cc +++ b/tensorflow/lite/micro/examples/magic_wand/magic_wand_test.cc @@ -28,13 +28,12 @@ TF_LITE_MICRO_TESTS_BEGIN TF_LITE_MICRO_TEST(LoadModelAndPerformInference) { // Set up logging tflite::MicroErrorReporter micro_error_reporter; - tflite::ErrorReporter* error_reporter = µ_error_reporter; // Map the model into a usable data structure. This doesn't involve any // copying or parsing, it's a very lightweight operation. const tflite::Model* model = ::tflite::GetModel(g_magic_wand_model_data); if (model->version() != TFLITE_SCHEMA_VERSION) { - TF_LITE_REPORT_ERROR(error_reporter, + TF_LITE_REPORT_ERROR(µ_error_reporter, "Model provided is schema version %d not equal " "to supported version %d.\n", model->version(), TFLITE_SCHEMA_VERSION); @@ -59,7 +58,8 @@ TF_LITE_MICRO_TEST(LoadModelAndPerformInference) { // Build an interpreter to run the model with tflite::MicroInterpreter interpreter(model, micro_op_resolver, tensor_arena, - tensor_arena_size, error_reporter); + tensor_arena_size, + µ_error_reporter); // Allocate memory from the tensor_arena for the model's tensors interpreter.AllocateTensors(); @@ -80,15 +80,15 @@ TF_LITE_MICRO_TEST(LoadModelAndPerformInference) { // Provide an input value const float* ring_features_data = g_ring_micro_f9643d42_nohash_4_data; - TF_LITE_REPORT_ERROR(error_reporter, "%d", input->bytes); - for (int i = 0; i < (input->bytes / sizeof(float)); ++i) { + TF_LITE_REPORT_ERROR(µ_error_reporter, "%d", input->bytes); + for (size_t i = 0; i < (input->bytes / sizeof(float)); ++i) { input->data.f[i] = ring_features_data[i]; } // Run the model on this input and check that it succeeds TfLiteStatus invoke_status = interpreter.Invoke(); if (invoke_status != kTfLiteOk) { - TF_LITE_REPORT_ERROR(error_reporter, "Invoke failed\n"); + TF_LITE_REPORT_ERROR(µ_error_reporter, "Invoke failed\n"); } TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, invoke_status); @@ -118,14 +118,14 @@ TF_LITE_MICRO_TEST(LoadModelAndPerformInference) { // Now test with a different input, from a recording of "Slope". const float* slope_features_data = g_slope_micro_f2e59fea_nohash_1_data; - for (int i = 0; i < (input->bytes / sizeof(float)); ++i) { + for (size_t i = 0; i < (input->bytes / sizeof(float)); ++i) { input->data.f[i] = slope_features_data[i]; } // Run the model on this "Slope" input. invoke_status = interpreter.Invoke(); if (invoke_status != kTfLiteOk) { - TF_LITE_REPORT_ERROR(error_reporter, "Invoke failed\n"); + TF_LITE_REPORT_ERROR(µ_error_reporter, "Invoke failed\n"); } TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, invoke_status); diff --git a/tensorflow/lite/micro/examples/magic_wand/output_handler_test.cc b/tensorflow/lite/micro/examples/magic_wand/output_handler_test.cc index 6ac5468531d..133d62427a1 100644 --- a/tensorflow/lite/micro/examples/magic_wand/output_handler_test.cc +++ b/tensorflow/lite/micro/examples/magic_wand/output_handler_test.cc @@ -22,11 +22,10 @@ TF_LITE_MICRO_TESTS_BEGIN TF_LITE_MICRO_TEST(TestCallability) { tflite::MicroErrorReporter micro_error_reporter; - tflite::ErrorReporter* error_reporter = µ_error_reporter; - HandleOutput(error_reporter, 0); - HandleOutput(error_reporter, 1); - HandleOutput(error_reporter, 2); - HandleOutput(error_reporter, 3); + HandleOutput(µ_error_reporter, 0); + HandleOutput(µ_error_reporter, 1); + HandleOutput(µ_error_reporter, 2); + HandleOutput(µ_error_reporter, 3); } TF_LITE_MICRO_TESTS_END diff --git a/tensorflow/lite/micro/examples/micro_speech/Makefile.inc b/tensorflow/lite/micro/examples/micro_speech/Makefile.inc index 18d5fa52505..70ad3e94238 100644 --- a/tensorflow/lite/micro/examples/micro_speech/Makefile.inc +++ b/tensorflow/lite/micro/examples/micro_speech/Makefile.inc @@ -238,9 +238,17 @@ $(MICRO_FEATURES_GENERATOR_HDRS) include $(wildcard tensorflow/lite/micro/examples/micro_speech/*/Makefile.inc) # Test the code for feature generation. +#TEMP_CXXFLAGS := CXXFLAGS +#CXXFLAGS := $(filter-out $(CC_WARNINGS),$(CXXFLAGS)) + +TEMP_CCFLAGS := CCFLAGS +CCFLAGS := $(filter-out $(CC_WARNINGS),$(CCFLAGS)) + $(eval $(call microlite_test,micro_features_generator_test,\ $(MICRO_FEATURES_GENERATOR_TEST_SRCS), $(MICRO_FEATURES_GENERATOR_TEST_HDRS))) +#CXXFLAGS := TEMP_CXXFLAGS + # Tests loading and running a speech model. $(eval $(call microlite_test,micro_speech_test,\ $(MICRO_SPEECH_TEST_SRCS),$(MICRO_SPEECH_TEST_HDRS))) diff --git a/tensorflow/lite/micro/examples/micro_speech/audio_provider_mock_test.cc b/tensorflow/lite/micro/examples/micro_speech/audio_provider_mock_test.cc index d874210ccea..91419035048 100644 --- a/tensorflow/lite/micro/examples/micro_speech/audio_provider_mock_test.cc +++ b/tensorflow/lite/micro/examples/micro_speech/audio_provider_mock_test.cc @@ -27,12 +27,11 @@ TF_LITE_MICRO_TESTS_BEGIN TF_LITE_MICRO_TEST(TestAudioProviderMock) { tflite::MicroErrorReporter micro_error_reporter; - tflite::ErrorReporter* error_reporter = µ_error_reporter; int audio_samples_size = 0; int16_t* audio_samples = nullptr; TfLiteStatus get_status = - GetAudioSamples(error_reporter, 0, kFeatureSliceDurationMs, + GetAudioSamples(µ_error_reporter, 0, kFeatureSliceDurationMs, &audio_samples_size, &audio_samples); TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, get_status); TF_LITE_MICRO_EXPECT_LE(audio_samples_size, kMaxAudioSampleSize); @@ -41,8 +40,9 @@ TF_LITE_MICRO_TEST(TestAudioProviderMock) { TF_LITE_MICRO_EXPECT_EQ(g_yes_1000ms_sample_data[i], audio_samples[i]); } - get_status = GetAudioSamples(error_reporter, 500, kFeatureSliceDurationMs, - &audio_samples_size, &audio_samples); + get_status = + GetAudioSamples(µ_error_reporter, 500, kFeatureSliceDurationMs, + &audio_samples_size, &audio_samples); TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, get_status); TF_LITE_MICRO_EXPECT_LE(audio_samples_size, kMaxAudioSampleSize); TF_LITE_MICRO_EXPECT_NE(audio_samples, nullptr); @@ -51,8 +51,9 @@ TF_LITE_MICRO_TEST(TestAudioProviderMock) { audio_samples[i]); } - get_status = GetAudioSamples(error_reporter, 1500, kFeatureSliceDurationMs, - &audio_samples_size, &audio_samples); + get_status = + GetAudioSamples(µ_error_reporter, 1500, kFeatureSliceDurationMs, + &audio_samples_size, &audio_samples); TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, get_status); TF_LITE_MICRO_EXPECT_LE(audio_samples_size, kMaxAudioSampleSize); TF_LITE_MICRO_EXPECT_NE(audio_samples, nullptr); @@ -60,8 +61,9 @@ TF_LITE_MICRO_TEST(TestAudioProviderMock) { TF_LITE_MICRO_EXPECT_EQ(0, audio_samples[i]); } - get_status = GetAudioSamples(error_reporter, 12250, kFeatureSliceDurationMs, - &audio_samples_size, &audio_samples); + get_status = + GetAudioSamples(µ_error_reporter, 12250, kFeatureSliceDurationMs, + &audio_samples_size, &audio_samples); TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, get_status); TF_LITE_MICRO_EXPECT_LE(audio_samples_size, kMaxAudioSampleSize); TF_LITE_MICRO_EXPECT_NE(audio_samples, nullptr); diff --git a/tensorflow/lite/micro/examples/micro_speech/audio_provider_test.cc b/tensorflow/lite/micro/examples/micro_speech/audio_provider_test.cc index 065f0f6f996..8e32c92b8a2 100644 --- a/tensorflow/lite/micro/examples/micro_speech/audio_provider_test.cc +++ b/tensorflow/lite/micro/examples/micro_speech/audio_provider_test.cc @@ -26,12 +26,11 @@ TF_LITE_MICRO_TESTS_BEGIN TF_LITE_MICRO_TEST(TestAudioProvider) { tflite::MicroErrorReporter micro_error_reporter; - tflite::ErrorReporter* error_reporter = µ_error_reporter; int audio_samples_size = 0; int16_t* audio_samples = nullptr; TfLiteStatus get_status = - GetAudioSamples(error_reporter, 0, kFeatureSliceDurationMs, + GetAudioSamples(µ_error_reporter, 0, kFeatureSliceDurationMs, &audio_samples_size, &audio_samples); TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, get_status); TF_LITE_MICRO_EXPECT_LE(audio_samples_size, kMaxAudioSampleSize); diff --git a/tensorflow/lite/micro/examples/micro_speech/command_responder_test.cc b/tensorflow/lite/micro/examples/micro_speech/command_responder_test.cc index fe811ea52bc..818b0840d08 100644 --- a/tensorflow/lite/micro/examples/micro_speech/command_responder_test.cc +++ b/tensorflow/lite/micro/examples/micro_speech/command_responder_test.cc @@ -22,12 +22,11 @@ TF_LITE_MICRO_TESTS_BEGIN TF_LITE_MICRO_TEST(TestCallability) { tflite::MicroErrorReporter micro_error_reporter; - tflite::ErrorReporter* error_reporter = µ_error_reporter; // This will have external side-effects (like printing to the debug console // or lighting an LED) that are hard to observe, so the most we can do is // make sure the call doesn't crash. - RespondToCommand(error_reporter, 0, "foo", 0, true); + RespondToCommand(µ_error_reporter, 0, "foo", 0, true); } TF_LITE_MICRO_TESTS_END diff --git a/tensorflow/lite/micro/examples/micro_speech/feature_provider_mock_test.cc b/tensorflow/lite/micro/examples/micro_speech/feature_provider_mock_test.cc index aae556bf6e0..c093f31ad10 100644 --- a/tensorflow/lite/micro/examples/micro_speech/feature_provider_mock_test.cc +++ b/tensorflow/lite/micro/examples/micro_speech/feature_provider_mock_test.cc @@ -25,14 +25,13 @@ TF_LITE_MICRO_TESTS_BEGIN TF_LITE_MICRO_TEST(TestFeatureProviderMockYes) { tflite::MicroErrorReporter micro_error_reporter; - tflite::ErrorReporter* error_reporter = µ_error_reporter; int8_t feature_data[kFeatureElementCount]; FeatureProvider feature_provider(kFeatureElementCount, feature_data); int how_many_new_slices = 0; TfLiteStatus populate_status = feature_provider.PopulateFeatureData( - error_reporter, /* last_time_in_ms= */ 0, /* time_in_ms= */ 970, + µ_error_reporter, /* last_time_in_ms= */ 0, /* time_in_ms= */ 970, &how_many_new_slices); TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, populate_status); TF_LITE_MICRO_EXPECT_EQ(kFeatureSliceCount, how_many_new_slices); @@ -45,15 +44,14 @@ TF_LITE_MICRO_TEST(TestFeatureProviderMockYes) { TF_LITE_MICRO_TEST(TestFeatureProviderMockNo) { tflite::MicroErrorReporter micro_error_reporter; - tflite::ErrorReporter* error_reporter = µ_error_reporter; int8_t feature_data[kFeatureElementCount]; FeatureProvider feature_provider(kFeatureElementCount, feature_data); int how_many_new_slices = 0; TfLiteStatus populate_status = feature_provider.PopulateFeatureData( - error_reporter, /* last_time_in_ms= */ 4000, /* time_in_ms= */ 4970, - &how_many_new_slices); + µ_error_reporter, /* last_time_in_ms= */ 4000, + /* time_in_ms= */ 4970, &how_many_new_slices); TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, populate_status); TF_LITE_MICRO_EXPECT_EQ(kFeatureSliceCount, how_many_new_slices); diff --git a/tensorflow/lite/micro/examples/micro_speech/feature_provider_test.cc b/tensorflow/lite/micro/examples/micro_speech/feature_provider_test.cc index 5d6816a91e4..e0fc95c6336 100644 --- a/tensorflow/lite/micro/examples/micro_speech/feature_provider_test.cc +++ b/tensorflow/lite/micro/examples/micro_speech/feature_provider_test.cc @@ -24,14 +24,13 @@ TF_LITE_MICRO_TESTS_BEGIN TF_LITE_MICRO_TEST(TestFeatureProvider) { tflite::MicroErrorReporter micro_error_reporter; - tflite::ErrorReporter* error_reporter = µ_error_reporter; int8_t feature_data[kFeatureElementCount]; FeatureProvider feature_provider(kFeatureElementCount, feature_data); int how_many_new_slices = 0; TfLiteStatus populate_status = feature_provider.PopulateFeatureData( - error_reporter, /* last_time_in_ms= */ 0, /* time_in_ms= */ 10000, + µ_error_reporter, /* last_time_in_ms= */ 0, /* time_in_ms= */ 10000, &how_many_new_slices); TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, populate_status); TF_LITE_MICRO_EXPECT_EQ(kFeatureSliceCount, how_many_new_slices); diff --git a/tensorflow/lite/micro/examples/micro_speech/micro_features/micro_features_generator.cc b/tensorflow/lite/micro/examples/micro_speech/micro_features/micro_features_generator.cc index fbb6e6e4a9f..9e076431288 100644 --- a/tensorflow/lite/micro/examples/micro_speech/micro_features/micro_features_generator.cc +++ b/tensorflow/lite/micro/examples/micro_speech/micro_features/micro_features_generator.cc @@ -81,7 +81,7 @@ TfLiteStatus GenerateMicroFeatures(tflite::ErrorReporter* error_reporter, FrontendOutput frontend_output = FrontendProcessSamples( &g_micro_features_state, frontend_input, input_size, num_samples_read); - for (int i = 0; i < frontend_output.size; ++i) { + for (size_t i = 0; i < frontend_output.size; ++i) { // These scaling values are derived from those used in input_data.py in the // training pipeline. // The feature pipeline outputs 16-bit signed integers in roughly a 0 to 670 diff --git a/tensorflow/lite/micro/examples/micro_speech/micro_features/micro_features_generator_test.cc b/tensorflow/lite/micro/examples/micro_speech/micro_features/micro_features_generator_test.cc index ee3ee03763f..083c3cc479d 100644 --- a/tensorflow/lite/micro/examples/micro_speech/micro_features/micro_features_generator_test.cc +++ b/tensorflow/lite/micro/examples/micro_speech/micro_features/micro_features_generator_test.cc @@ -30,9 +30,9 @@ TF_LITE_MICRO_TESTS_BEGIN TF_LITE_MICRO_TEST(TestMicroFeaturesGeneratorYes) { tflite::MicroErrorReporter micro_error_reporter; - tflite::ErrorReporter* error_reporter = µ_error_reporter; - TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, InitializeMicroFeatures(error_reporter)); + TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, + InitializeMicroFeatures(µ_error_reporter)); // The micro features pipeline retains state from previous calls to help // estimate the background noise. Unfortunately this makes it harder to @@ -51,8 +51,9 @@ TF_LITE_MICRO_TEST(TestMicroFeaturesGeneratorYes) { int8_t yes_calculated_data[g_yes_feature_data_slice_size]; size_t num_samples_read; TfLiteStatus yes_status = GenerateMicroFeatures( - error_reporter, g_yes_30ms_sample_data, g_yes_30ms_sample_data_size, - g_yes_feature_data_slice_size, yes_calculated_data, &num_samples_read); + µ_error_reporter, g_yes_30ms_sample_data, + g_yes_30ms_sample_data_size, g_yes_feature_data_slice_size, + yes_calculated_data, &num_samples_read); TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, yes_status); for (int i = 0; i < g_yes_feature_data_slice_size; ++i) { @@ -60,17 +61,17 @@ TF_LITE_MICRO_TEST(TestMicroFeaturesGeneratorYes) { const int actual = yes_calculated_data[i]; TF_LITE_MICRO_EXPECT_EQ(expected, actual); if (expected != actual) { - TF_LITE_REPORT_ERROR(error_reporter, "Expected value %d but found %d", - expected, actual); + TF_LITE_REPORT_ERROR(µ_error_reporter, + "Expected value %d but found %d", expected, actual); } } } TF_LITE_MICRO_TEST(TestMicroFeaturesGeneratorNo) { tflite::MicroErrorReporter micro_error_reporter; - tflite::ErrorReporter* error_reporter = µ_error_reporter; - TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, InitializeMicroFeatures(error_reporter)); + TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, + InitializeMicroFeatures(µ_error_reporter)); // As we did for the previous features, set known good noise state // parameters. const uint32_t no_estimate_presets[] = { @@ -85,17 +86,17 @@ TF_LITE_MICRO_TEST(TestMicroFeaturesGeneratorNo) { int8_t no_calculated_data[g_no_feature_data_slice_size]; size_t num_samples_read; TfLiteStatus no_status = GenerateMicroFeatures( - error_reporter, g_no_30ms_sample_data, g_no_30ms_sample_data_size, + µ_error_reporter, g_no_30ms_sample_data, g_no_30ms_sample_data_size, g_no_feature_data_slice_size, no_calculated_data, &num_samples_read); TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, no_status); - for (int i = 0; i < g_no_feature_data_slice_size; ++i) { + for (size_t i = 0; i < g_no_feature_data_slice_size; ++i) { const int expected = g_no_feature_data_slice[i]; const int actual = no_calculated_data[i]; TF_LITE_MICRO_EXPECT_EQ(expected, actual); if (expected != actual) { - TF_LITE_REPORT_ERROR(error_reporter, "Expected value %d but found %d", - expected, actual); + TF_LITE_REPORT_ERROR(µ_error_reporter, + "Expected value %d but found %d", expected, actual); } } } diff --git a/tensorflow/lite/micro/examples/micro_speech/micro_speech_test.cc b/tensorflow/lite/micro/examples/micro_speech/micro_speech_test.cc index 4598dd3662f..b58515d1833 100644 --- a/tensorflow/lite/micro/examples/micro_speech/micro_speech_test.cc +++ b/tensorflow/lite/micro/examples/micro_speech/micro_speech_test.cc @@ -28,13 +28,12 @@ TF_LITE_MICRO_TESTS_BEGIN TF_LITE_MICRO_TEST(TestInvoke) { // Set up logging. tflite::MicroErrorReporter micro_error_reporter; - tflite::ErrorReporter* error_reporter = µ_error_reporter; // Map the model into a usable data structure. This doesn't involve any // copying or parsing, it's a very lightweight operation. const tflite::Model* model = ::tflite::GetModel(g_model); if (model->version() != TFLITE_SCHEMA_VERSION) { - TF_LITE_REPORT_ERROR(error_reporter, + TF_LITE_REPORT_ERROR(µ_error_reporter, "Model provided is schema version %d not equal " "to supported version %d.\n", model->version(), TFLITE_SCHEMA_VERSION); @@ -59,7 +58,8 @@ TF_LITE_MICRO_TEST(TestInvoke) { // Build an interpreter to run the model with. tflite::MicroInterpreter interpreter(model, micro_op_resolver, tensor_arena, - tensor_arena_size, error_reporter); + tensor_arena_size, + µ_error_reporter); interpreter.AllocateTensors(); // Get information about the memory area to use for the model's input. @@ -75,14 +75,14 @@ TF_LITE_MICRO_TEST(TestInvoke) { // Copy a spectrogram created from a .wav audio file of someone saying "Yes", // into the memory area used for the input. const int8_t* yes_features_data = g_yes_micro_f2e59fea_nohash_1_data; - for (int i = 0; i < input->bytes; ++i) { + for (size_t i = 0; i < input->bytes; ++i) { input->data.int8[i] = yes_features_data[i]; } // Run the model on this input and make sure it succeeds. TfLiteStatus invoke_status = interpreter.Invoke(); if (invoke_status != kTfLiteOk) { - TF_LITE_REPORT_ERROR(error_reporter, "Invoke failed\n"); + TF_LITE_REPORT_ERROR(µ_error_reporter, "Invoke failed\n"); } TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, invoke_status); @@ -111,14 +111,14 @@ TF_LITE_MICRO_TEST(TestInvoke) { // Now test with a different input, from a recording of "No". const int8_t* no_features_data = g_no_micro_f9643d42_nohash_4_data; - for (int i = 0; i < input->bytes; ++i) { + for (size_t i = 0; i < input->bytes; ++i) { input->data.int8[i] = no_features_data[i]; } // Run the model on this "No" input. invoke_status = interpreter.Invoke(); if (invoke_status != kTfLiteOk) { - TF_LITE_REPORT_ERROR(error_reporter, "Invoke failed\n"); + TF_LITE_REPORT_ERROR(µ_error_reporter, "Invoke failed\n"); } TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, invoke_status); @@ -139,7 +139,7 @@ TF_LITE_MICRO_TEST(TestInvoke) { TF_LITE_MICRO_EXPECT_GT(no_score, unknown_score); TF_LITE_MICRO_EXPECT_GT(no_score, yes_score); - TF_LITE_REPORT_ERROR(error_reporter, "Ran successfully\n"); + TF_LITE_REPORT_ERROR(µ_error_reporter, "Ran successfully\n"); } TF_LITE_MICRO_TESTS_END diff --git a/tensorflow/lite/micro/examples/micro_speech/recognize_commands_test.cc b/tensorflow/lite/micro/examples/micro_speech/recognize_commands_test.cc index 9ad20b68c8c..eff7b4eb37b 100644 --- a/tensorflow/lite/micro/examples/micro_speech/recognize_commands_test.cc +++ b/tensorflow/lite/micro/examples/micro_speech/recognize_commands_test.cc @@ -22,9 +22,8 @@ TF_LITE_MICRO_TESTS_BEGIN TF_LITE_MICRO_TEST(PreviousResultsQueueBasic) { tflite::MicroErrorReporter micro_error_reporter; - tflite::ErrorReporter* error_reporter = µ_error_reporter; - PreviousResultsQueue queue(error_reporter); + PreviousResultsQueue queue(µ_error_reporter); TF_LITE_MICRO_EXPECT_EQ(0, queue.size()); int8_t scores_a[4] = {0, 0, 0, 1}; @@ -54,9 +53,8 @@ TF_LITE_MICRO_TEST(PreviousResultsQueueBasic) { TF_LITE_MICRO_TEST(PreviousResultsQueuePushPop) { tflite::MicroErrorReporter micro_error_reporter; - tflite::ErrorReporter* error_reporter = µ_error_reporter; - PreviousResultsQueue queue(error_reporter); + PreviousResultsQueue queue(µ_error_reporter); TF_LITE_MICRO_EXPECT_EQ(0, queue.size()); for (int i = 0; i < 123; ++i) { @@ -74,9 +72,8 @@ TF_LITE_MICRO_TEST(PreviousResultsQueuePushPop) { TF_LITE_MICRO_TEST(RecognizeCommandsTestBasic) { tflite::MicroErrorReporter micro_error_reporter; - tflite::ErrorReporter* error_reporter = µ_error_reporter; - RecognizeCommands recognize_commands(error_reporter); + RecognizeCommands recognize_commands(µ_error_reporter); std::initializer_list result_data = {127, -128, -128, -128}; auto result_dims = {2, 1, 4}; @@ -94,9 +91,8 @@ TF_LITE_MICRO_TEST(RecognizeCommandsTestBasic) { TF_LITE_MICRO_TEST(RecognizeCommandsTestFindCommands) { tflite::MicroErrorReporter micro_error_reporter; - tflite::ErrorReporter* error_reporter = µ_error_reporter; - RecognizeCommands recognize_commands(error_reporter, 1000, 51); + RecognizeCommands recognize_commands(µ_error_reporter, 1000, 51); std::initializer_list yes_data = {-128, -128, 127, -128}; auto yes_dims = {2, 1, 4}; @@ -157,9 +153,8 @@ TF_LITE_MICRO_TEST(RecognizeCommandsTestFindCommands) { TF_LITE_MICRO_TEST(RecognizeCommandsTestBadInputLength) { tflite::MicroErrorReporter micro_error_reporter; - tflite::ErrorReporter* error_reporter = µ_error_reporter; - RecognizeCommands recognize_commands(error_reporter, 1000, 51); + RecognizeCommands recognize_commands(µ_error_reporter, 1000, 51); std::initializer_list bad_data = {-128, -128, 127}; auto bad_dims = {2, 1, 3}; @@ -177,9 +172,8 @@ TF_LITE_MICRO_TEST(RecognizeCommandsTestBadInputLength) { TF_LITE_MICRO_TEST(RecognizeCommandsTestBadInputTimes) { tflite::MicroErrorReporter micro_error_reporter; - tflite::ErrorReporter* error_reporter = µ_error_reporter; - RecognizeCommands recognize_commands(error_reporter, 1000, 51); + RecognizeCommands recognize_commands(µ_error_reporter, 1000, 51); std::initializer_list result_data = {-128, -128, 127, -128}; auto result_dims = {2, 1, 4}; @@ -200,9 +194,8 @@ TF_LITE_MICRO_TEST(RecognizeCommandsTestBadInputTimes) { TF_LITE_MICRO_TEST(RecognizeCommandsTestTooFewInputs) { tflite::MicroErrorReporter micro_error_reporter; - tflite::ErrorReporter* error_reporter = µ_error_reporter; - RecognizeCommands recognize_commands(error_reporter, 1000, 51); + RecognizeCommands recognize_commands(µ_error_reporter, 1000, 51); std::initializer_list result_data = {-128, -128, 127, -128}; auto result_dims = {2, 1, 4}; diff --git a/tensorflow/lite/micro/examples/micro_speech/simple_features/simple_features_generator.cc b/tensorflow/lite/micro/examples/micro_speech/simple_features/simple_features_generator.cc index 22434c995c4..0de36b48e41 100644 --- a/tensorflow/lite/micro/examples/micro_speech/simple_features/simple_features_generator.cc +++ b/tensorflow/lite/micro/examples/micro_speech/simple_features/simple_features_generator.cc @@ -67,7 +67,7 @@ void CalculateDiscreteFourierTransform(float* time_series, int time_series_size, // of the current sample window are weighted more heavily than those at the end. void CalculatePeriodicHann(int window_length, float* window_function) { for (int i = 0; i < window_length; ++i) { - window_function[i] = 0.5 - 0.5 * std::cos((2 * kPi * i) / window_length); + window_function[i] = 0.5f - 0.5f * std::cos((2 * kPi * i) / window_length); } } diff --git a/tensorflow/lite/micro/examples/micro_speech/simple_features/simple_features_generator_test.cc b/tensorflow/lite/micro/examples/micro_speech/simple_features/simple_features_generator_test.cc index 9ac19b374da..f54feecadfa 100644 --- a/tensorflow/lite/micro/examples/micro_speech/simple_features/simple_features_generator_test.cc +++ b/tensorflow/lite/micro/examples/micro_speech/simple_features/simple_features_generator_test.cc @@ -27,34 +27,35 @@ TF_LITE_MICRO_TESTS_BEGIN TF_LITE_MICRO_TEST(TestSimpleFeaturesGenerator) { tflite::MicroErrorReporter micro_error_reporter; - tflite::ErrorReporter* error_reporter = µ_error_reporter; uint8_t yes_calculated_data[g_yes_power_spectrum_data_size]; TfLiteStatus yes_status = GenerateSimpleFeatures( - error_reporter, g_yes_30ms_sample_data, g_yes_30ms_sample_data_size, - g_yes_power_spectrum_data_size, yes_calculated_data); + µ_error_reporter, g_yes_30ms_sample_data, + g_yes_30ms_sample_data_size, g_yes_power_spectrum_data_size, + yes_calculated_data); TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, yes_status); for (int i = 0; i < g_yes_power_spectrum_data_size; ++i) { TF_LITE_MICRO_EXPECT_EQ(g_yes_power_spectrum_data[i], yes_calculated_data[i]); if (g_yes_power_spectrum_data[i] != yes_calculated_data[i]) { - TF_LITE_REPORT_ERROR(error_reporter, "Expected value %d but found %d", - g_yes_power_spectrum_data[i], - yes_calculated_data[i]); + TF_LITE_REPORT_ERROR( + µ_error_reporter, "Expected value %d but found %d", + g_yes_power_spectrum_data[i], yes_calculated_data[i]); } } uint8_t no_calculated_data[g_yes_power_spectrum_data_size]; TfLiteStatus no_status = GenerateSimpleFeatures( - error_reporter, g_no_30ms_sample_data, g_no_30ms_sample_data_size, + µ_error_reporter, g_no_30ms_sample_data, g_no_30ms_sample_data_size, g_no_power_spectrum_data_size, no_calculated_data); TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, no_status); for (int i = 0; i < g_no_power_spectrum_data_size; ++i) { TF_LITE_MICRO_EXPECT_EQ(g_no_power_spectrum_data[i], no_calculated_data[i]); if (g_no_power_spectrum_data[i] != no_calculated_data[i]) { - TF_LITE_REPORT_ERROR(error_reporter, "Expected value %d but found %d", + TF_LITE_REPORT_ERROR(µ_error_reporter, + "Expected value %d but found %d", g_no_power_spectrum_data[i], no_calculated_data[i]); } } diff --git a/tensorflow/lite/micro/examples/network_tester/expected_output_data.h b/tensorflow/lite/micro/examples/network_tester/expected_output_data.h index 934722bad94..18937a9b601 100644 --- a/tensorflow/lite/micro/examples/network_tester/expected_output_data.h +++ b/tensorflow/lite/micro/examples/network_tester/expected_output_data.h @@ -16,7 +16,6 @@ limitations under the License. #ifndef TENSORFLOW_LITE_MICRO_EXAMPLES_NETWORK_TESTER_EXPECTED_OUTPUT_DATA_H_ #define TENSORFLOW_LITE_MICRO_EXAMPLES_NETWORK_TESTER_EXPECTED_OUTPUT_DATA_H_ -static unsigned int expected_output_data_len = 4; static unsigned char expected_output_data[1][4] = {6, 8, 14, 16}; #endif // TENSORFLOW_LITE_MICRO_EXAMPLES_NETWORK_TESTER_EXPECTED_OUTPUT_DATA_H_ diff --git a/tensorflow/lite/micro/examples/network_tester/network_tester_test.cc b/tensorflow/lite/micro/examples/network_tester/network_tester_test.cc index 6ea02b3f4a5..563500f2115 100644 --- a/tensorflow/lite/micro/examples/network_tester/network_tester_test.cc +++ b/tensorflow/lite/micro/examples/network_tester/network_tester_test.cc @@ -64,21 +64,20 @@ inline void print_output_data(TfLiteTensor* output) { #endif template -void check_output_elem(TfLiteTensor* output, const T* expected_output_data, +void check_output_elem(TfLiteTensor* output, const T* expected_output, const int index) { TF_LITE_MICRO_EXPECT_EQ(tflite::GetTensorData(output)[index], - expected_output_data[index]); + expected_output[index]); } TF_LITE_MICRO_TESTS_BEGIN TF_LITE_MICRO_TEST(TestInvoke) { tflite::MicroErrorReporter micro_error_reporter; - tflite::ErrorReporter* error_reporter = µ_error_reporter; const tflite::Model* model = ::tflite::GetModel(network_model); if (model->version() != TFLITE_SCHEMA_VERSION) { - TF_LITE_REPORT_ERROR(error_reporter, + TF_LITE_REPORT_ERROR(µ_error_reporter, "Model provided is schema version %d not equal " "to supported version %d.\n", model->version(), TFLITE_SCHEMA_VERSION); @@ -87,23 +86,23 @@ TF_LITE_MICRO_TEST(TestInvoke) { tflite::AllOpsResolver resolver; - tflite::MicroInterpreter interpreter(model, resolver, tensor_arena, - TENSOR_ARENA_SIZE, error_reporter); + tflite::MicroInterpreter interpreter( + model, resolver, tensor_arena, TENSOR_ARENA_SIZE, µ_error_reporter); TfLiteStatus allocate_status = interpreter.AllocateTensors(); if (allocate_status != kTfLiteOk) { - TF_LITE_REPORT_ERROR(error_reporter, "Tensor allocation failed\n"); + TF_LITE_REPORT_ERROR(µ_error_reporter, "Tensor allocation failed\n"); return kTfLiteError; } for (int n = 0; n < NUM_INFERENCES; n++) { - for (int i = 0; i < interpreter.inputs_size(); ++i) { + for (size_t i = 0; i < interpreter.inputs_size(); ++i) { TfLiteTensor* input = interpreter.input(i); memcpy(input->data.data, input_data[i], input->bytes); } TfLiteStatus invoke_status = interpreter.Invoke(); if (invoke_status != kTfLiteOk) { - TF_LITE_REPORT_ERROR(error_reporter, "Invoke failed\n"); + TF_LITE_REPORT_ERROR(µ_error_reporter, "Invoke failed\n"); return kTfLiteError; } TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, invoke_status); @@ -126,7 +125,7 @@ TF_LITE_MICRO_TEST(TestInvoke) { #endif #ifndef NO_COMPARE_OUTPUT_DATA - for (int i = 0; i < interpreter.outputs_size(); i++) { + for (size_t i = 0; i < interpreter.outputs_size(); i++) { TfLiteTensor* output = interpreter.output(i); for (int j = 0; j < tflite::ElementCount(*(output->dims)); ++j) { check_output_elem(output, expected_output_data[i], j); @@ -134,7 +133,7 @@ TF_LITE_MICRO_TEST(TestInvoke) { } #endif } - TF_LITE_REPORT_ERROR(error_reporter, "Ran successfully\n"); + TF_LITE_REPORT_ERROR(µ_error_reporter, "Ran successfully\n"); } TF_LITE_MICRO_TESTS_END diff --git a/tensorflow/lite/micro/examples/person_detection/detection_responder_test.cc b/tensorflow/lite/micro/examples/person_detection/detection_responder_test.cc index 6ef17d38dc9..1714079f39a 100644 --- a/tensorflow/lite/micro/examples/person_detection/detection_responder_test.cc +++ b/tensorflow/lite/micro/examples/person_detection/detection_responder_test.cc @@ -22,13 +22,12 @@ TF_LITE_MICRO_TESTS_BEGIN TF_LITE_MICRO_TEST(TestCallability) { tflite::MicroErrorReporter micro_error_reporter; - tflite::ErrorReporter* error_reporter = µ_error_reporter; // This will have external side-effects (like printing to the debug console // or lighting an LED) that are hard to observe, so the most we can do is // make sure the call doesn't crash. - RespondToDetection(error_reporter, 100, 200); - RespondToDetection(error_reporter, 200, 100); + RespondToDetection(µ_error_reporter, 100, 200); + RespondToDetection(µ_error_reporter, 200, 100); } TF_LITE_MICRO_TESTS_END diff --git a/tensorflow/lite/micro/examples/person_detection/image_provider_test.cc b/tensorflow/lite/micro/examples/person_detection/image_provider_test.cc index 73695035d14..60c89c8aaf2 100644 --- a/tensorflow/lite/micro/examples/person_detection/image_provider_test.cc +++ b/tensorflow/lite/micro/examples/person_detection/image_provider_test.cc @@ -26,11 +26,10 @@ TF_LITE_MICRO_TESTS_BEGIN TF_LITE_MICRO_TEST(TestImageProvider) { tflite::MicroErrorReporter micro_error_reporter; - tflite::ErrorReporter* error_reporter = µ_error_reporter; uint8_t image_data[kMaxImageSize]; - TfLiteStatus get_status = - GetImage(error_reporter, kNumCols, kNumRows, kNumChannels, image_data); + TfLiteStatus get_status = GetImage(µ_error_reporter, kNumCols, kNumRows, + kNumChannels, image_data); TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, get_status); TF_LITE_MICRO_EXPECT_NE(image_data, nullptr); diff --git a/tensorflow/lite/micro/examples/person_detection/person_detection_test.cc b/tensorflow/lite/micro/examples/person_detection/person_detection_test.cc index 149d0d50746..548b95e0acc 100644 --- a/tensorflow/lite/micro/examples/person_detection/person_detection_test.cc +++ b/tensorflow/lite/micro/examples/person_detection/person_detection_test.cc @@ -35,18 +35,17 @@ TF_LITE_MICRO_TESTS_BEGIN TF_LITE_MICRO_TEST(TestInvoke) { // Set up logging. tflite::MicroErrorReporter micro_error_reporter; - tflite::ErrorReporter* error_reporter = µ_error_reporter; // Map the model into a usable data structure. This doesn't involve any // copying or parsing, it's a very lightweight operation. const tflite::Model* model = ::tflite::GetModel(g_person_detect_model_data); if (model->version() != TFLITE_SCHEMA_VERSION) { - TF_LITE_REPORT_ERROR(error_reporter, + TF_LITE_REPORT_ERROR(µ_error_reporter, "Model provided is schema version %d not equal " "to supported version %d.\n", model->version(), TFLITE_SCHEMA_VERSION); } - PrintModelData(model, error_reporter); + PrintModelData(model, µ_error_reporter); // Pull in only the operation implementations we need. // This relies on a complete list of all the ops needed by this graph. @@ -62,7 +61,8 @@ TF_LITE_MICRO_TEST(TestInvoke) { // Build an interpreter to run the model with. tflite::MicroInterpreter interpreter(model, micro_op_resolver, tensor_arena, - tensor_arena_size, error_reporter); + tensor_arena_size, + µ_error_reporter); interpreter.AllocateTensors(); // Get information about the memory area to use for the model's input. @@ -79,14 +79,14 @@ TF_LITE_MICRO_TEST(TestInvoke) { // Copy an image with a person into the memory area used for the input. const uint8_t* person_data = g_person_data; - for (int i = 0; i < input->bytes; ++i) { + for (size_t i = 0; i < input->bytes; ++i) { input->data.uint8[i] = person_data[i]; } // Run the model on this input and make sure it succeeds. TfLiteStatus invoke_status = interpreter.Invoke(); if (invoke_status != kTfLiteOk) { - TF_LITE_REPORT_ERROR(error_reporter, "Invoke failed\n"); + TF_LITE_REPORT_ERROR(µ_error_reporter, "Invoke failed\n"); } TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, invoke_status); @@ -103,21 +103,21 @@ TF_LITE_MICRO_TEST(TestInvoke) { // Make sure that the expected "Person" score is higher than the other class. uint8_t person_score = output->data.uint8[kPersonIndex]; uint8_t no_person_score = output->data.uint8[kNotAPersonIndex]; - TF_LITE_REPORT_ERROR(error_reporter, + TF_LITE_REPORT_ERROR(µ_error_reporter, "person data. person score: %d, no person score: %d\n", person_score, no_person_score); TF_LITE_MICRO_EXPECT_GT(person_score, no_person_score); // Now test with a different input, from an image without a person. const uint8_t* no_person_data = g_no_person_data; - for (int i = 0; i < input->bytes; ++i) { + for (size_t i = 0; i < input->bytes; ++i) { input->data.uint8[i] = no_person_data[i]; } // Run the model on this "No Person" input. invoke_status = interpreter.Invoke(); if (invoke_status != kTfLiteOk) { - TF_LITE_REPORT_ERROR(error_reporter, "Invoke failed\n"); + TF_LITE_REPORT_ERROR(µ_error_reporter, "Invoke failed\n"); } TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, invoke_status); @@ -135,12 +135,12 @@ TF_LITE_MICRO_TEST(TestInvoke) { person_score = output->data.uint8[kPersonIndex]; no_person_score = output->data.uint8[kNotAPersonIndex]; TF_LITE_REPORT_ERROR( - error_reporter, + µ_error_reporter, "no person data. person score: %d, no person score: %d\n", person_score, no_person_score); TF_LITE_MICRO_EXPECT_GT(no_person_score, person_score); - TF_LITE_REPORT_ERROR(error_reporter, "Ran successfully\n"); + TF_LITE_REPORT_ERROR(µ_error_reporter, "Ran successfully\n"); } TF_LITE_MICRO_TESTS_END diff --git a/tensorflow/lite/micro/examples/person_detection_experimental/detection_responder_test.cc b/tensorflow/lite/micro/examples/person_detection_experimental/detection_responder_test.cc index 48dbe5e9f7c..3d86baa9d59 100644 --- a/tensorflow/lite/micro/examples/person_detection_experimental/detection_responder_test.cc +++ b/tensorflow/lite/micro/examples/person_detection_experimental/detection_responder_test.cc @@ -22,13 +22,12 @@ TF_LITE_MICRO_TESTS_BEGIN TF_LITE_MICRO_TEST(TestCallability) { tflite::MicroErrorReporter micro_error_reporter; - tflite::ErrorReporter* error_reporter = µ_error_reporter; // This will have external side-effects (like printing to the debug console // or lighting an LED) that are hard to observe, so the most we can do is // make sure the call doesn't crash. - RespondToDetection(error_reporter, -100, 100); - RespondToDetection(error_reporter, 100, 50); + RespondToDetection(µ_error_reporter, -100, 100); + RespondToDetection(µ_error_reporter, 100, 50); } TF_LITE_MICRO_TESTS_END diff --git a/tensorflow/lite/micro/examples/person_detection_experimental/image_provider_test.cc b/tensorflow/lite/micro/examples/person_detection_experimental/image_provider_test.cc index f282ed55651..cd5022446b6 100644 --- a/tensorflow/lite/micro/examples/person_detection_experimental/image_provider_test.cc +++ b/tensorflow/lite/micro/examples/person_detection_experimental/image_provider_test.cc @@ -26,11 +26,10 @@ TF_LITE_MICRO_TESTS_BEGIN TF_LITE_MICRO_TEST(TestImageProvider) { tflite::MicroErrorReporter micro_error_reporter; - tflite::ErrorReporter* error_reporter = µ_error_reporter; int8_t image_data[kMaxImageSize]; - TfLiteStatus get_status = - GetImage(error_reporter, kNumCols, kNumRows, kNumChannels, image_data); + TfLiteStatus get_status = GetImage(µ_error_reporter, kNumCols, kNumRows, + kNumChannels, image_data); TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, get_status); TF_LITE_MICRO_EXPECT_NE(image_data, nullptr); diff --git a/tensorflow/lite/micro/examples/person_detection_experimental/person_detection_test.cc b/tensorflow/lite/micro/examples/person_detection_experimental/person_detection_test.cc index 4ceeb753283..78ac037be94 100644 --- a/tensorflow/lite/micro/examples/person_detection_experimental/person_detection_test.cc +++ b/tensorflow/lite/micro/examples/person_detection_experimental/person_detection_test.cc @@ -34,13 +34,12 @@ TF_LITE_MICRO_TESTS_BEGIN TF_LITE_MICRO_TEST(TestInvoke) { // Set up logging. tflite::MicroErrorReporter micro_error_reporter; - tflite::ErrorReporter* error_reporter = µ_error_reporter; // Map the model into a usable data structure. This doesn't involve any // copying or parsing, it's a very lightweight operation. const tflite::Model* model = ::tflite::GetModel(g_person_detect_model_data); if (model->version() != TFLITE_SCHEMA_VERSION) { - TF_LITE_REPORT_ERROR(error_reporter, + TF_LITE_REPORT_ERROR(µ_error_reporter, "Model provided is schema version %d not equal " "to supported version %d.\n", model->version(), TFLITE_SCHEMA_VERSION); @@ -60,7 +59,8 @@ TF_LITE_MICRO_TEST(TestInvoke) { // Build an interpreter to run the model with. tflite::MicroInterpreter interpreter(model, micro_op_resolver, tensor_arena, - tensor_arena_size, error_reporter); + tensor_arena_size, + µ_error_reporter); interpreter.AllocateTensors(); // Get information about the memory area to use for the model's input. @@ -76,7 +76,7 @@ TF_LITE_MICRO_TEST(TestInvoke) { TF_LITE_MICRO_EXPECT_EQ(kTfLiteInt8, input->type); // Copy an image with a person into the memory area used for the input. - for (int i = 0; i < input->bytes; ++i) { + for (size_t i = 0; i < input->bytes; ++i) { // Subtract 128 to convert between uint8 and int8. input->data.int8[i] = g_person_data[i] - 128; } @@ -84,7 +84,7 @@ TF_LITE_MICRO_TEST(TestInvoke) { // Run the model on this input and make sure it succeeds. TfLiteStatus invoke_status = interpreter.Invoke(); if (invoke_status != kTfLiteOk) { - TF_LITE_REPORT_ERROR(error_reporter, "Invoke failed\n"); + TF_LITE_REPORT_ERROR(µ_error_reporter, "Invoke failed\n"); } TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, invoke_status); @@ -99,20 +99,20 @@ TF_LITE_MICRO_TEST(TestInvoke) { // Make sure that the expected "Person" score is higher than the other class. int8_t person_score = output->data.int8[kPersonIndex]; int8_t no_person_score = output->data.int8[kNotAPersonIndex]; - TF_LITE_REPORT_ERROR(error_reporter, + TF_LITE_REPORT_ERROR(µ_error_reporter, "person data. person score: %d, no person score: %d\n", person_score, no_person_score); TF_LITE_MICRO_EXPECT_GT(person_score, no_person_score); // Now test with a blank image. - for (int i = 0; i < input->bytes; ++i) { + for (size_t i = 0; i < input->bytes; ++i) { input->data.int8[i] = 0; } // Run the model on this "No Person" input. invoke_status = interpreter.Invoke(); if (invoke_status != kTfLiteOk) { - TF_LITE_REPORT_ERROR(error_reporter, "Invoke failed\n"); + TF_LITE_REPORT_ERROR(µ_error_reporter, "Invoke failed\n"); } TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, invoke_status); @@ -128,12 +128,12 @@ TF_LITE_MICRO_TEST(TestInvoke) { person_score = output->data.int8[kPersonIndex]; no_person_score = output->data.int8[kNotAPersonIndex]; TF_LITE_REPORT_ERROR( - error_reporter, + µ_error_reporter, "no person data. person score: %d, no person score: %d\n", person_score, no_person_score); TF_LITE_MICRO_EXPECT_GT(no_person_score, person_score); - TF_LITE_REPORT_ERROR(error_reporter, "Ran successfully\n"); + TF_LITE_REPORT_ERROR(µ_error_reporter, "Ran successfully\n"); } TF_LITE_MICRO_TESTS_END diff --git a/tensorflow/lite/micro/kernels/add_test.cc b/tensorflow/lite/micro/kernels/add_test.cc index 2d703600f56..332f3edf865 100644 --- a/tensorflow/lite/micro/kernels/add_test.cc +++ b/tensorflow/lite/micro/kernels/add_test.cc @@ -431,12 +431,6 @@ TF_LITE_MICRO_TEST(QuantizedAddWithScalarBroadcastUint8) { } } TF_LITE_MICRO_TEST(QuantizedAddWithScalarBroadcastFloat) { - const float scales[] = {0.1, 0.05, 0.1}; - const int zero_points[] = {127, 131, 139}; - uint8_t input1_quantized[tflite::testing::broadcast_output_dims_count]; - uint8_t input2_quantized[tflite::testing::broadcast_output_dims_count]; - uint8_t golden_quantized[tflite::testing::broadcast_output_dims_count]; - uint8_t output[tflite::testing::broadcast_output_dims_count]; float output_float[tflite::testing::broadcast_output_dims_count]; for (int i = 0; i < tflite::testing::broadcast_num_shapes; ++i) { @@ -491,7 +485,6 @@ TF_LITE_MICRO_TEST(QuantizedAddWithMixedBroadcastUint8) { uint8_t input2_quantized[tflite::testing::broadcast_output_dims_count]; uint8_t golden_quantized[tflite::testing::broadcast_output_dims_count]; uint8_t output[tflite::testing::broadcast_output_dims_count]; - float output_float[tflite::testing::broadcast_output_dims_count]; for (int i = 0; i < tflite::testing::broadcast_num_shapes; ++i) { tflite::testing::TestAddQuantized( @@ -512,7 +505,6 @@ TF_LITE_MICRO_TEST(QuantizedAddWithMixedBroadcastInt8) { int8_t input2_quantized[tflite::testing::broadcast_output_dims_count]; int8_t golden_quantized[tflite::testing::broadcast_output_dims_count]; int8_t output[tflite::testing::broadcast_output_dims_count]; - float output_float[tflite::testing::broadcast_output_dims_count]; for (int i = 0; i < tflite::testing::broadcast_num_shapes; ++i) { tflite::testing::TestAddQuantized( diff --git a/tensorflow/lite/micro/kernels/comparisons_test.cc b/tensorflow/lite/micro/kernels/comparisons_test.cc index 64c39c5d2c7..c8a1e2646b3 100644 --- a/tensorflow/lite/micro/kernels/comparisons_test.cc +++ b/tensorflow/lite/micro/kernels/comparisons_test.cc @@ -132,7 +132,6 @@ void TestComparisonQuantizedUInt8(tflite::BuiltinOperator op, TfLiteIntArray* input1_dims = IntArrayFromInts(input1_dims_data); TfLiteIntArray* input2_dims = IntArrayFromInts(input2_dims_data); TfLiteIntArray* output_dims = IntArrayFromInts(output_dims_data); - const int output_dims_count = ElementCount(*output_dims); TfLiteTensor tensors[tensors_size] = { CreateQuantizedTensor(input1_data, input1_quantized, input1_dims, @@ -156,7 +155,6 @@ void TestComparisonQuantizedInt8(tflite::BuiltinOperator op, TfLiteIntArray* input1_dims = IntArrayFromInts(input1_dims_data); TfLiteIntArray* input2_dims = IntArrayFromInts(input2_dims_data); TfLiteIntArray* output_dims = IntArrayFromInts(output_dims_data); - const int output_dims_count = ElementCount(*output_dims); TfLiteTensor tensors[tensors_size] = { CreateQuantizedTensor(input1_data, input1_quantized, input1_dims, @@ -749,8 +747,6 @@ TF_LITE_MICRO_TEST(GreaterUInt8EqualQuantized) { const float input1_scale = 0.5; const int input1_zero_point = 128; - const float input2_scale = 0.25; - const int input2_zero_point = 125; uint8_t input1_quantized[4]; uint8_t input2_quantized[4]; @@ -774,8 +770,6 @@ TF_LITE_MICRO_TEST(LessQuantizedUInt8) { const float input1_scale = 0.5; const int input1_zero_point = 128; - const float input2_scale = 0.25; - const int input2_zero_point = 125; uint8_t input1_quantized[4]; uint8_t input2_quantized[4]; @@ -799,8 +793,6 @@ TF_LITE_MICRO_TEST(LessEqualQuantizedUInt8) { const float input1_scale = 0.5; const int input1_zero_point = 128; - const float input2_scale = 0.25; - const int input2_zero_point = 125; uint8_t input1_quantized[4]; uint8_t input2_quantized[4]; @@ -829,8 +821,6 @@ TF_LITE_MICRO_TEST(EqualQuantizedUInt8WithBroadcast) { const float input1_scale = 0.5; const int input1_zero_point = 128; - const float input2_scale = 0.25; - const int input2_zero_point = 125; uint8_t input1_quantized[6]; uint8_t input2_quantized[6]; @@ -860,8 +850,6 @@ TF_LITE_MICRO_TEST(NotEqualQuantizedUInt8WithBroadcast) { const float input1_scale = 0.5; const int input1_zero_point = 128; - const float input2_scale = 0.25; - const int input2_zero_point = 125; uint8_t input1_quantized[6]; uint8_t input2_quantized[6]; @@ -891,8 +879,6 @@ TF_LITE_MICRO_TEST(NotEqualQuantizedInt8WithBroadcast) { const float input1_scale = 0.5; const int input1_zero_point = -9; - const float input2_scale = 0.25; - const int input2_zero_point = 9; int8_t input1_quantized[6]; int8_t input2_quantized[6]; @@ -922,8 +908,6 @@ TF_LITE_MICRO_TEST(GreaterQuantizedUInt8WithBroadcast) { const float input1_scale = 0.5; const int input1_zero_point = 128; - const float input2_scale = 0.25; - const int input2_zero_point = 125; uint8_t input1_quantized[6]; uint8_t input2_quantized[6]; @@ -953,8 +937,6 @@ TF_LITE_MICRO_TEST(GreaterQuantizedInt8WithBroadcast) { const float input1_scale = 0.5; const int input1_zero_point = -9; - const float input2_scale = 0.25; - const int input2_zero_point = 9; int8_t input1_quantized[6]; int8_t input2_quantized[6]; @@ -984,8 +966,6 @@ TF_LITE_MICRO_TEST(GreaterEqualQuantizedUInt8WithBroadcast) { const float input1_scale = 0.5; const int input1_zero_point = 128; - const float input2_scale = 0.25; - const int input2_zero_point = 125; uint8_t input1_quantized[6]; uint8_t input2_quantized[6]; @@ -1015,8 +995,6 @@ TF_LITE_MICRO_TEST(GreaterEqualQuantizedInt8WithBroadcast) { const float input1_scale = 0.5; const int input1_zero_point = -9; - const float input2_scale = 0.25; - const int input2_zero_point = 9; int8_t input1_quantized[6]; int8_t input2_quantized[6]; @@ -1046,8 +1024,6 @@ TF_LITE_MICRO_TEST(LessQuantizedUInt8WithBroadcast) { const float input1_scale = 0.5; const int input1_zero_point = 128; - const float input2_scale = 0.25; - const int input2_zero_point = 125; uint8_t input1_quantized[6]; uint8_t input2_quantized[6]; @@ -1077,8 +1053,6 @@ TF_LITE_MICRO_TEST(LessQuantizedInt8WithBroadcast) { const float input1_scale = 0.5; const int input1_zero_point = -9; - const float input2_scale = 0.25; - const int input2_zero_point = 9; int8_t input1_quantized[6]; int8_t input2_quantized[6]; @@ -1108,8 +1082,6 @@ TF_LITE_MICRO_TEST(LessEqualQuantizedUInt8WithBroadcast) { const float input1_scale = 0.5; const int input1_zero_point = 128; - const float input2_scale = 0.25; - const int input2_zero_point = 125; uint8_t input1_quantized[6]; uint8_t input2_quantized[6]; @@ -1139,8 +1111,6 @@ TF_LITE_MICRO_TEST(LessEqualQuantizedInt8WithBroadcast) { const float input1_scale = 0.5; const int input1_zero_point = -9; - const float input2_scale = 0.25; - const int input2_zero_point = 9; int8_t input1_quantized[6]; int8_t input2_quantized[6]; diff --git a/tensorflow/lite/micro/kernels/conv_test.cc b/tensorflow/lite/micro/kernels/conv_test.cc index 686b3f98ff5..6343496dd5a 100644 --- a/tensorflow/lite/micro/kernels/conv_test.cc +++ b/tensorflow/lite/micro/kernels/conv_test.cc @@ -163,9 +163,9 @@ void TestConvQuantizedPerLayer( // TODO(njeff): Affine Quantization Params should be set on tensor creation. float filter_scales[] = {1, filter_scale}; int filter_zero_points[] = {1, 128}; - TfLiteAffineQuantization filter_quant = { - FloatArrayFromFloats(filter_scales), - IntArrayFromInts(filter_zero_points)}; + TfLiteAffineQuantization filter_quant = {FloatArrayFromFloats(filter_scales), + IntArrayFromInts(filter_zero_points), + 0}; tensors[1].quantization = {kTfLiteAffineQuantization, &filter_quant}; TF_LITE_MICRO_EXPECT_EQ( @@ -209,14 +209,15 @@ void TestConvQuantizedPerChannel( float input_scales[] = {1, input_scale}; int input_zero_points[] = {1, input_zero_point}; TfLiteAffineQuantization input_quant = {FloatArrayFromFloats(input_scales), - IntArrayFromInts(input_zero_points)}; + IntArrayFromInts(input_zero_points), + 0}; input_tensor.quantization = {kTfLiteAffineQuantization, &input_quant}; float output_scales[] = {1, output_scale}; int output_zero_points[] = {1, output_zero_point}; - TfLiteAffineQuantization output_quant = { - FloatArrayFromFloats(output_scales), - IntArrayFromInts(output_zero_points)}; + TfLiteAffineQuantization output_quant = {FloatArrayFromFloats(output_scales), + IntArrayFromInts(output_zero_points), + 0}; output_tensor.quantization = {kTfLiteAffineQuantization, &output_quant}; constexpr int inputs_size = 3; @@ -401,9 +402,6 @@ TF_LITE_MICRO_TEST(SimpleTestDilatedQuantizedPerChannel) { } TF_LITE_MICRO_TEST(SimpleTestQuantizedPerChannelRelu6) { - // conv params: - // padding, stride_, dilation_, activation - TfLiteConvParams conv_params = {kTfLitePaddingValid, 1, 1, kTfLiteActRelu6}; const int output_dims_count = 12; int8_t output_data[output_dims_count]; @@ -565,7 +563,7 @@ TF_LITE_MICRO_TEST(FilterDimsNotMatchingAffineQuantization) { int input_zero_points[] = {1, 128}; TfLiteAffineQuantization input_quant = { tflite::testing::FloatArrayFromFloats(input_scales), - tflite::testing::IntArrayFromInts(input_zero_points)}; + tflite::testing::IntArrayFromInts(input_zero_points), 0}; input_tensor.quantization = {kTfLiteAffineQuantization, &input_quant}; constexpr int inputs_size = 3; @@ -633,7 +631,7 @@ TF_LITE_MICRO_TEST(BroadcastPerLayerQuantizationToPerChannelShouldMatchGolden) { float input_scales[2] = {1, input_scale}; TfLiteAffineQuantization input_quant = { tflite::testing::FloatArrayFromFloats(input_scales), - tflite::testing::IntArrayFromInts(input_zero_points)}; + tflite::testing::IntArrayFromInts(input_zero_points), 0}; input_tensor.quantization = {kTfLiteAffineQuantization, &input_quant}; // Create per-layer quantized int8 filter tensor. @@ -644,7 +642,7 @@ TF_LITE_MICRO_TEST(BroadcastPerLayerQuantizationToPerChannelShouldMatchGolden) { float filter_scales[2] = {1, filter_scale}; TfLiteAffineQuantization filter_quant = { tflite::testing::FloatArrayFromFloats(filter_scales), - tflite::testing::IntArrayFromInts(filter_zero_points)}; + tflite::testing::IntArrayFromInts(filter_zero_points), 0}; filter_tensor.quantization = {kTfLiteAffineQuantization, &filter_quant}; // Create per-layer quantized int32 bias tensor. @@ -658,7 +656,7 @@ TF_LITE_MICRO_TEST(BroadcastPerLayerQuantizationToPerChannelShouldMatchGolden) { float bias_scales[2] = {1, input_scale * filter_scale}; TfLiteAffineQuantization bias_quant = { tflite::testing::FloatArrayFromFloats(bias_scales), - tflite::testing::IntArrayFromInts(bias_zero_points)}; + tflite::testing::IntArrayFromInts(bias_zero_points), 0}; bias_tensor.quantization = {kTfLiteAffineQuantization, &bias_quant}; // Create per-layer quantized int8 output tensor. @@ -668,7 +666,7 @@ TF_LITE_MICRO_TEST(BroadcastPerLayerQuantizationToPerChannelShouldMatchGolden) { float output_scales[2] = {1, output_scale}; TfLiteAffineQuantization output_quant = { tflite::testing::FloatArrayFromFloats(output_scales), - tflite::testing::IntArrayFromInts(output_zero_points)}; + tflite::testing::IntArrayFromInts(output_zero_points), 0}; output_tensor.quantization = {kTfLiteAffineQuantization, &output_quant}; constexpr int inputs_size = 3; @@ -757,7 +755,7 @@ TF_LITE_MICRO_TEST(Int8Input32x1Filter32x32ShouldMatchGolden) { float input_scales[] = {1, input_scale}; TfLiteAffineQuantization input_quant = { tflite::testing::FloatArrayFromFloats(input_scales), - tflite::testing::IntArrayFromInts(input_zero_points)}; + tflite::testing::IntArrayFromInts(input_zero_points), 0}; input_tensor.quantization = {kTfLiteAffineQuantization, &input_quant}; // Create per-tensor quantized int8 filter tensor. @@ -770,7 +768,7 @@ TF_LITE_MICRO_TEST(Int8Input32x1Filter32x32ShouldMatchGolden) { float filter_scales[] = {1, filter_scale}; TfLiteAffineQuantization filter_quant = { tflite::testing::FloatArrayFromFloats(filter_scales), - tflite::testing::IntArrayFromInts(filter_zero_points)}; + tflite::testing::IntArrayFromInts(filter_zero_points), 0}; filter_tensor.quantization = {kTfLiteAffineQuantization, &filter_quant}; // Create per-tensor quantized int32 bias tensor. @@ -786,7 +784,7 @@ TF_LITE_MICRO_TEST(Int8Input32x1Filter32x32ShouldMatchGolden) { float bias_scales[] = {1, input_scale * filter_scale}; TfLiteAffineQuantization bias_quant = { tflite::testing::FloatArrayFromFloats(bias_scales), - tflite::testing::IntArrayFromInts(bias_zero_points)}; + tflite::testing::IntArrayFromInts(bias_zero_points), 0}; bias_tensor.quantization = {kTfLiteAffineQuantization, &bias_quant}; // Create per-tensor quantized int8 output tensor. @@ -798,7 +796,7 @@ TF_LITE_MICRO_TEST(Int8Input32x1Filter32x32ShouldMatchGolden) { float output_scales[] = {1, output_scale}; TfLiteAffineQuantization output_quant = { tflite::testing::FloatArrayFromFloats(output_scales), - tflite::testing::IntArrayFromInts(output_zero_points)}; + tflite::testing::IntArrayFromInts(output_zero_points), 0}; output_tensor.quantization = {kTfLiteAffineQuantization, &output_quant}; // The 3 inputs include the input, filter and bias tensors. diff --git a/tensorflow/lite/micro/kernels/depthwise_conv_test.cc b/tensorflow/lite/micro/kernels/depthwise_conv_test.cc index cd62de0d17e..4b9ac7ee775 100644 --- a/tensorflow/lite/micro/kernels/depthwise_conv_test.cc +++ b/tensorflow/lite/micro/kernels/depthwise_conv_test.cc @@ -157,15 +157,15 @@ void TestDepthwiseConvQuantizedPerLayer( // TODO(njeff): Affine Quantization Params should be set on tensor creation. float filter_scales[] = {1, filter_scale}; int filter_zero_points[] = {1, 128}; - TfLiteAffineQuantization filter_quant = { - FloatArrayFromFloats(filter_scales), - IntArrayFromInts(filter_zero_points)}; + TfLiteAffineQuantization filter_quant = {FloatArrayFromFloats(filter_scales), + IntArrayFromInts(filter_zero_points), + 0}; tensors[1].quantization = {kTfLiteAffineQuantization, &filter_quant}; float bias_scales[] = {1, filter_scale * input_scale}; int bias_zero_points[] = {1, 128}; TfLiteAffineQuantization bias_quant = {FloatArrayFromFloats(bias_scales), - IntArrayFromInts(bias_zero_points)}; + IntArrayFromInts(bias_zero_points), 0}; tensors[2].quantization = {kTfLiteAffineQuantization, &bias_quant}; AsymmetricQuantize(golden, golden_quantized, output_dims_count, output_scale, @@ -213,14 +213,15 @@ void TestDepthwiseConvQuantizedPerChannel( float input_scales[] = {1, input_scale}; int input_zero_points[] = {1, input_zero_point}; TfLiteAffineQuantization input_quant = {FloatArrayFromFloats(input_scales), - IntArrayFromInts(input_zero_points)}; + IntArrayFromInts(input_zero_points), + 0}; input_tensor.quantization = {kTfLiteAffineQuantization, &input_quant}; float output_scales[] = {1, output_scale}; int output_zero_points[] = {1, output_zero_point}; - TfLiteAffineQuantization output_quant = { - FloatArrayFromFloats(output_scales), - IntArrayFromInts(output_zero_points)}; + TfLiteAffineQuantization output_quant = {FloatArrayFromFloats(output_scales), + IntArrayFromInts(output_zero_points), + 0}; output_tensor.quantization = {kTfLiteAffineQuantization, &output_quant}; constexpr int inputs_size = 3; @@ -249,14 +250,11 @@ void TestDepthwiseConvQuantizedPerChannel( TF_LITE_MICRO_TESTS_BEGIN TF_LITE_MICRO_TEST(SimpleTest) { - const int input_elements = 12; const int input_shape[] = {4, 1, 3, 2, 2}; const float input_values[] = {1, 2, 7, 8, 3, 4, 9, 10, 5, 6, 11, 12}; - const int filter_elements = 16; const int filter_shape[] = {4, 1, 2, 2, 4}; const float filter_values[] = {1, 2, 3, 4, -9, 10, -11, 12, 5, 6, 7, 8, 13, -14, 15, -16}; - const int bias_elements = 4; const int bias_shape[] = {4, 1, 1, 1, 4}; const float bias_values[] = {1, 2, 3, 4}; const float golden[] = { @@ -367,16 +365,12 @@ TF_LITE_MICRO_TEST(SimpleTestDilatedQuantized) { } TF_LITE_MICRO_TEST(SimpleTestRelu) { - const int input_elements = 12; const int input_shape[] = {4, 1, 3, 2, 2}; const float input_values[] = {1, 2, 7, 8, 3, 4, 9, 10, 5, 6, 11, 12}; - const int filter_elements = 16; const int filter_shape[] = {4, 1, 2, 2, 4}; const float filter_values[] = {1, 2, 3, 4, -9, 10, -11, 12, 5, 6, 7, 8, 13, -14, 15, -16}; - const int bias_elements = 4; const int bias_shape[] = {4, 1, 1, 1, 4}; - const int output_elements = 8; const float bias_values[] = {1, 2, 3, 4}; const int output_shape[] = {4, 1, 2, 1, 4}; const int output_dims_count = 8; @@ -505,8 +499,6 @@ TF_LITE_MICRO_TEST(SimpleTestQuantizedPerChannel) { int8_t filter_quantized[filter_elements]; int32_t bias_quantized[bias_elements]; int8_t golden_quantized[output_elements]; - int zero_points[bias_elements + 1]; - float scales[bias_elements + 1]; TfLiteDepthwiseConvParams conv_params; conv_params.activation = kTfLiteActNone; @@ -550,8 +542,6 @@ TF_LITE_MICRO_TEST(SimpleTestQuantizedPerChannelDepthMultiplier1) { int8_t filter_quantized[filter_elements]; int32_t bias_quantized[bias_elements]; int8_t golden_quantized[output_elements]; - int zero_points[bias_elements + 1]; - float scales[bias_elements + 1]; TfLiteDepthwiseConvParams conv_params; conv_params.activation = kTfLiteActNone; @@ -583,7 +573,6 @@ TF_LITE_MICRO_TEST(TestQuantizedPerChannelDepthMultiplier1Relu6) { }; const int output_shape[] = {4, 1, 2, 1, 4}; int8_t output_data[output_elements]; - float output_float[output_elements]; const float input_scale = 0.023529f; const float output_scale = 0.023529f; @@ -594,8 +583,6 @@ TF_LITE_MICRO_TEST(TestQuantizedPerChannelDepthMultiplier1Relu6) { int8_t filter_quantized[filter_elements]; int32_t bias_quantized[bias_elements]; int8_t golden_quantized[output_elements]; - int zero_points[bias_elements + 1]; - float scales[bias_elements + 1]; TfLiteDepthwiseConvParams conv_params; conv_params.activation = kTfLiteActRelu6; @@ -640,8 +627,6 @@ TF_LITE_MICRO_TEST(SimpleTestDilatedQuantizedPerChannel) { int8_t filter_quantized[filter_elements]; int32_t bias_quantized[bias_elements]; int8_t golden_quantized[output_elements]; - int zero_points[bias_elements + 1]; - float scales[bias_elements + 1]; TfLiteDepthwiseConvParams conv_params; conv_params.activation = kTfLiteActNone; @@ -673,8 +658,6 @@ TF_LITE_MICRO_TEST(TestQuantizedPerChannelCompareWithFloat) { int8_t filter_quantized[filter_size]; int32_t bias_quantized[bias_size]; int8_t golden_quantized[output_size]; - int zero_points[bias_size + 1]; - float scales[bias_size + 1]; int8_t output_data[output_size]; float output_float[output_size]; @@ -707,7 +690,6 @@ TF_LITE_MICRO_TEST(FilterDimsNotMatchingAffineQuantization) { const int bias_shape[] = {4, 1, 1, 1, 4}; const float bias_data[] = {3, -2, 4, 6}; const int output_shape[] = {4, 1, 1, 2, 4}; - const float golden[] = {43, 48, 18, 22, 3, -4, -28, -36}; const int input_size = 12; const int filter_size = 16; @@ -720,7 +702,6 @@ TF_LITE_MICRO_TEST(FilterDimsNotMatchingAffineQuantization) { int zero_points[bias_size + 1]; float scales[bias_size + 1]; int8_t output_data[output_size]; - float output_float[output_size]; const float input_scale = 0.5; const float output_scale = 1.0; @@ -753,7 +734,7 @@ TF_LITE_MICRO_TEST(FilterDimsNotMatchingAffineQuantization) { int input_zero_points[] = {1, input_zero_point}; TfLiteAffineQuantization input_quant = { tflite::testing::FloatArrayFromFloats(input_scales), - tflite::testing::IntArrayFromInts(input_zero_points)}; + tflite::testing::IntArrayFromInts(input_zero_points), 0}; input_tensor.quantization = {kTfLiteAffineQuantization, &input_quant}; constexpr int inputs_size = 3; @@ -829,7 +810,7 @@ TF_LITE_MICRO_TEST(PerChannelBroadcastQuantizationParams) { float input_scales[2] = {1, input_scale}; TfLiteAffineQuantization input_quant = { tflite::testing::FloatArrayFromFloats(input_scales), - tflite::testing::IntArrayFromInts(input_zero_points)}; + tflite::testing::IntArrayFromInts(input_zero_points), 0}; input_tensor.quantization = {kTfLiteAffineQuantization, &input_quant}; // Create per-layer quantized int8 filter tensor. @@ -839,7 +820,7 @@ TF_LITE_MICRO_TEST(PerChannelBroadcastQuantizationParams) { float filter_scales[2] = {1, filter_scale}; TfLiteAffineQuantization filter_quant = { tflite::testing::FloatArrayFromFloats(filter_scales), - tflite::testing::IntArrayFromInts(filter_zero_points)}; + tflite::testing::IntArrayFromInts(filter_zero_points), 0}; filter_tensor.quantization = {kTfLiteAffineQuantization, &filter_quant}; // Create per-layer quantized int32 bias tensor. @@ -852,7 +833,7 @@ TF_LITE_MICRO_TEST(PerChannelBroadcastQuantizationParams) { float bias_scales[2] = {1, input_scale * filter_scale}; TfLiteAffineQuantization bias_quant = { tflite::testing::FloatArrayFromFloats(bias_scales), - tflite::testing::IntArrayFromInts(bias_zero_points)}; + tflite::testing::IntArrayFromInts(bias_zero_points), 0}; bias_tensor.quantization = {kTfLiteAffineQuantization, &bias_quant}; // Create per-layer quantized int8 output tensor. @@ -862,7 +843,7 @@ TF_LITE_MICRO_TEST(PerChannelBroadcastQuantizationParams) { float output_scales[2] = {1, output_scale}; TfLiteAffineQuantization output_quant = { tflite::testing::FloatArrayFromFloats(output_scales), - tflite::testing::IntArrayFromInts(output_zero_points)}; + tflite::testing::IntArrayFromInts(output_zero_points), 0}; output_tensor.quantization = {kTfLiteAffineQuantization, &output_quant}; constexpr int inputs_size = 3; @@ -967,7 +948,7 @@ TF_LITE_MICRO_TEST(Int8Input32x4Filter32x4ShouldMatchGolden) { float input_scales[] = {1, input_scale}; TfLiteAffineQuantization input_quant = { tflite::testing::FloatArrayFromFloats(input_scales), - tflite::testing::IntArrayFromInts(input_zero_points)}; + tflite::testing::IntArrayFromInts(input_zero_points), 0}; input_tensor.quantization = {kTfLiteAffineQuantization, &input_quant}; // Create per-tensor quantized int8 filter tensor. @@ -980,7 +961,7 @@ TF_LITE_MICRO_TEST(Int8Input32x4Filter32x4ShouldMatchGolden) { float filter_scales[] = {1, filter_scale}; TfLiteAffineQuantization filter_quant = { tflite::testing::FloatArrayFromFloats(filter_scales), - tflite::testing::IntArrayFromInts(filter_zero_points)}; + tflite::testing::IntArrayFromInts(filter_zero_points), 0}; filter_tensor.quantization = {kTfLiteAffineQuantization, &filter_quant}; // Create per-tensor quantized int32 bias tensor. @@ -997,7 +978,7 @@ TF_LITE_MICRO_TEST(Int8Input32x4Filter32x4ShouldMatchGolden) { float bias_scales[] = {1, input_scale * filter_scale}; TfLiteAffineQuantization bias_quant = { tflite::testing::FloatArrayFromFloats(bias_scales), - tflite::testing::IntArrayFromInts(bias_zero_points)}; + tflite::testing::IntArrayFromInts(bias_zero_points), 0}; bias_tensor.quantization = {kTfLiteAffineQuantization, &bias_quant}; // Create per-tensor quantized int8 output tensor. @@ -1010,7 +991,7 @@ TF_LITE_MICRO_TEST(Int8Input32x4Filter32x4ShouldMatchGolden) { float output_scales[] = {1, output_scale}; TfLiteAffineQuantization output_quant = { tflite::testing::FloatArrayFromFloats(output_scales), - tflite::testing::IntArrayFromInts(output_zero_points)}; + tflite::testing::IntArrayFromInts(output_zero_points), 0}; output_tensor.quantization = {kTfLiteAffineQuantization, &output_quant}; // The 3 inputs include the input, filter and bias tensors. @@ -1035,7 +1016,7 @@ TF_LITE_MICRO_TEST(Int8Input32x4Filter32x4ShouldMatchGolden) { conv_params.activation = kTfLiteActNone; conv_params.dilation_width_factor = 1; conv_params.dilation_height_factor = 1; - TfLiteStatus status = tflite::testing::ValidateDepthwiseConvGoldens( + tflite::testing::ValidateDepthwiseConvGoldens( golden_quantized, output_elements, &conv_params, kQuantizationTolerance, kTensorsSize, tensors); } diff --git a/tensorflow/lite/micro/kernels/dequantize_test.cc b/tensorflow/lite/micro/kernels/dequantize_test.cc index 21b42aedc50..5eb3d80e41e 100644 --- a/tensorflow/lite/micro/kernels/dequantize_test.cc +++ b/tensorflow/lite/micro/kernels/dequantize_test.cc @@ -68,7 +68,7 @@ void ValidateDequantizeGoldens(TfLiteTensor* tensors, int tensors_size, } for (int i = 0; i < output_length; ++i) { - TF_LITE_MICRO_EXPECT_NEAR(expected_output_data[i], output_data[i], 0.001); + TF_LITE_MICRO_EXPECT_NEAR(expected_output_data[i], output_data[i], 0.001f); } } @@ -113,7 +113,6 @@ void TestDequantizeToInt32(const int* input_dims_data, const float* input_data, CreateInt32Tensor(output_data, output_dims), }; - TfLiteQuantizationParams output_quant; tensors[1].params.scale = output_scale; tensors[1].params.zero_point = output_zero_point; diff --git a/tensorflow/lite/micro/kernels/fully_connected_test.cc b/tensorflow/lite/micro/kernels/fully_connected_test.cc index 5723248a408..f977904a37c 100644 --- a/tensorflow/lite/micro/kernels/fully_connected_test.cc +++ b/tensorflow/lite/micro/kernels/fully_connected_test.cc @@ -59,9 +59,8 @@ TfLiteStatus TestFullyConnectedFloat( TF_LITE_MICRO_EXPECT_NE(nullptr, registration); TfLiteFullyConnectedParams builtin_data = { - activation, - kTfLiteFullyConnectedWeightsFormatDefault, - }; + activation, kTfLiteFullyConnectedWeightsFormatDefault, false, false}; + const char* init_data = reinterpret_cast(&builtin_data); size_t init_data_size = 0; void* user_data = nullptr; @@ -133,9 +132,7 @@ TfLiteStatus TestFullyConnectedQuantized( TF_LITE_MICRO_EXPECT_NE(nullptr, registration); TfLiteFullyConnectedParams builtin_data = { - activation, - kTfLiteFullyConnectedWeightsFormatDefault, - }; + activation, kTfLiteFullyConnectedWeightsFormatDefault, false, false}; const char* init_data = reinterpret_cast(&builtin_data); size_t init_data_size = 0; void* user_data = nullptr; diff --git a/tensorflow/lite/micro/kernels/hard_swish_test.cc b/tensorflow/lite/micro/kernels/hard_swish_test.cc index cfedd523512..50cafc9b5e5 100644 --- a/tensorflow/lite/micro/kernels/hard_swish_test.cc +++ b/tensorflow/lite/micro/kernels/hard_swish_test.cc @@ -151,10 +151,6 @@ void TestHardSwishQuantizedBias(const int size, const T* output_data, float output_max, float tolerated_bias, float* float_input_values, float* float_ref_output_values) { - const float quantized_type_range = - static_cast(std::numeric_limits::max()) - - static_cast(std::numeric_limits::min()); - const float input_scale = ScaleFromMinMax(input_min, input_max); const float output_scale = ScaleFromMinMax(output_min, output_max); @@ -188,13 +184,6 @@ void TestHardSwishQuantizedBias(const int size, const T* output_data, const int input_dims_data[] = {2, 1, size}; const int output_dims_data[] = {2, 1, size}; - // The numerical error for any 8bit quantized function is at least one half - // times the quantization step: 0.5 * (kOutMax - kOutMin) / 256. - // To that we add again the quantization step (kOutMax - kOutMin) / 256 - // to allow for an off-by-one rounding error. - const float kTolerance = - std::max(input_max - input_min, output_max - output_min) * (1.5f / 256.f); - TfLiteIntArray* input_dims = IntArrayFromInts(input_dims_data); TfLiteIntArray* output_dims = IntArrayFromInts(output_dims_data); const int output_elements_count = ElementCount(*output_dims); diff --git a/tensorflow/lite/micro/kernels/maximum_minimum_test.cc b/tensorflow/lite/micro/kernels/maximum_minimum_test.cc index 8635db3b60b..39b892a8212 100644 --- a/tensorflow/lite/micro/kernels/maximum_minimum_test.cc +++ b/tensorflow/lite/micro/kernels/maximum_minimum_test.cc @@ -74,7 +74,7 @@ void TestMaxMinFloat(tflite::BuiltinOperator op, for (int i = 0; i < output_dims_count; ++i) { TF_LITE_MICRO_EXPECT_NEAR(expected_output_data.begin()[i], output_data[i], - 1e-5); + 1e-5f); } } diff --git a/tensorflow/lite/micro/kernels/pooling_test.cc b/tensorflow/lite/micro/kernels/pooling_test.cc index d1f21da7533..23d4b506d8e 100644 --- a/tensorflow/lite/micro/kernels/pooling_test.cc +++ b/tensorflow/lite/micro/kernels/pooling_test.cc @@ -54,8 +54,13 @@ void TestAveragePoolingFloat(std::initializer_list input_dims_data, resolver.FindOp(tflite::BuiltinOperator_AVERAGE_POOL_2D); TF_LITE_MICRO_EXPECT_NE(nullptr, registration); - TfLitePoolParams builtin_data = {padding, stride_width, stride_height, - filter_width, filter_height, activation}; + TfLitePoolParams builtin_data = {padding, + stride_width, + stride_height, + filter_width, + filter_height, + activation, + {}}; const char* init_data = reinterpret_cast(&builtin_data); size_t init_data_size = 0; void* user_data = nullptr; @@ -122,8 +127,13 @@ void TestAveragePoolingQuantized( resolver.FindOp(tflite::BuiltinOperator_AVERAGE_POOL_2D); TF_LITE_MICRO_EXPECT_NE(nullptr, registration); - TfLitePoolParams builtin_data = {padding, stride_width, stride_height, - filter_width, filter_height, activation}; + TfLitePoolParams builtin_data = {padding, + stride_width, + stride_height, + filter_width, + filter_height, + activation, + {}}; const char* init_data = reinterpret_cast(&builtin_data); size_t init_data_size = 0; void* user_data = nullptr; @@ -185,10 +195,13 @@ void TestMaxPoolFloat(std::initializer_list input_dims_data, resolver.FindOp(tflite::BuiltinOperator_MAX_POOL_2D); TF_LITE_MICRO_EXPECT_NE(nullptr, registration); - TfLitePoolParams builtin_data = { - padding, stride_width, stride_height, - filter_width, filter_height, activation, - }; + TfLitePoolParams builtin_data = {padding, + stride_width, + stride_height, + filter_width, + filter_height, + activation, + {}}; const char* init_data = reinterpret_cast(&builtin_data); size_t init_data_size = 0; @@ -255,10 +268,13 @@ void TestMaxPoolQuantized(std::initializer_list input_dims_data, resolver.FindOp(tflite::BuiltinOperator_MAX_POOL_2D); TF_LITE_MICRO_EXPECT_NE(nullptr, registration); - TfLitePoolParams builtin_data = { - padding, stride_width, stride_height, - filter_width, filter_height, activation, - }; + TfLitePoolParams builtin_data = {padding, + stride_width, + stride_height, + filter_width, + filter_height, + activation, + {}}; const char* init_data = reinterpret_cast(&builtin_data); size_t init_data_size = 0; diff --git a/tensorflow/lite/micro/kernels/prelu_test.cc b/tensorflow/lite/micro/kernels/prelu_test.cc index 4b4bfd12e60..ae5bacca988 100644 --- a/tensorflow/lite/micro/kernels/prelu_test.cc +++ b/tensorflow/lite/micro/kernels/prelu_test.cc @@ -170,8 +170,6 @@ TF_LITE_MICRO_TEST(QuantizedUint8PreluActivationsOpTest) { using tflite::testing::F2Q; const float kMin = -4; const float kMax = 127.f / 32.f; - const float kAlphaMin = -0.5f; - const float kAlphaMax = 0.5f; const int output_dims_count = 12; uint8_t output_data[output_dims_count]; tflite::testing::TestPreluQuantized( @@ -197,8 +195,6 @@ TF_LITE_MICRO_TEST(QuantizedInt8PreluActivationsOpTest) { using tflite::testing::F2QS; const float kMin = -1; const float kMax = 127.f / 128.f; - const float kAlphaMin = -0.5f; - const float kAlphaMax = 0.5f; const int output_dims_count = 12; int8_t output_data[output_dims_count]; tflite::testing::TestPreluQuantized( diff --git a/tensorflow/lite/micro/kernels/quantization_util_test.cc b/tensorflow/lite/micro/kernels/quantization_util_test.cc index e9b219128fe..5929f5fd7b5 100644 --- a/tensorflow/lite/micro/kernels/quantization_util_test.cc +++ b/tensorflow/lite/micro/kernels/quantization_util_test.cc @@ -27,40 +27,55 @@ void RunSafeCastTests() { const IntOut imin = std::numeric_limits::min(); const bool s = std::numeric_limits::is_signed; if (s) { - TF_LITE_MICRO_EXPECT_LT(imin, 0); + TF_LITE_MICRO_EXPECT_LT(static_cast(imin), 0); } else { - TF_LITE_MICRO_EXPECT_EQ(0, imin); + TF_LITE_MICRO_EXPECT_EQ(static_cast(0), imin); } // Some basic tests. - TF_LITE_MICRO_EXPECT_EQ(SafeCast(static_cast(0.0)), 0); - TF_LITE_MICRO_EXPECT_EQ(SafeCast(static_cast(-0.0)), 0); - TF_LITE_MICRO_EXPECT_EQ(SafeCast(static_cast(0.99)), 0); - TF_LITE_MICRO_EXPECT_EQ(SafeCast(static_cast(1.0)), 1); - TF_LITE_MICRO_EXPECT_EQ(SafeCast(static_cast(1.01)), 1); - TF_LITE_MICRO_EXPECT_EQ(SafeCast(static_cast(1.99)), 1); - TF_LITE_MICRO_EXPECT_EQ(SafeCast(static_cast(2.0)), 2); - TF_LITE_MICRO_EXPECT_EQ(SafeCast(static_cast(2.01)), 2); - TF_LITE_MICRO_EXPECT_EQ(SafeCast(static_cast(-0.99)), 0); + TF_LITE_MICRO_EXPECT_EQ(SafeCast(static_cast(0.0)), + static_cast(0)); + TF_LITE_MICRO_EXPECT_EQ(SafeCast(static_cast(-0.0)), + static_cast(0)); + TF_LITE_MICRO_EXPECT_EQ(SafeCast(static_cast(0.99)), + static_cast(0)); + TF_LITE_MICRO_EXPECT_EQ(SafeCast(static_cast(1.0)), + static_cast(1)); + TF_LITE_MICRO_EXPECT_EQ(SafeCast(static_cast(1.01)), + static_cast(1)); + TF_LITE_MICRO_EXPECT_EQ(SafeCast(static_cast(1.99)), + static_cast(1)); + TF_LITE_MICRO_EXPECT_EQ(SafeCast(static_cast(2.0)), + static_cast(2)); + TF_LITE_MICRO_EXPECT_EQ(SafeCast(static_cast(2.01)), + static_cast(2)); + TF_LITE_MICRO_EXPECT_EQ(SafeCast(static_cast(-0.99)), + static_cast(0)); TF_LITE_MICRO_EXPECT_EQ(SafeCast(static_cast(-1.0)), - s ? -1 : 0); + s ? static_cast(-1) : static_cast(0)); TF_LITE_MICRO_EXPECT_EQ(SafeCast(static_cast(-1.01)), - s ? -1 : 0); + s ? static_cast(-1) : static_cast(0)); TF_LITE_MICRO_EXPECT_EQ(SafeCast(static_cast(-1.99)), - s ? -1 : 0); + s ? static_cast(-1) : static_cast(0)); TF_LITE_MICRO_EXPECT_EQ(SafeCast(static_cast(-2.0)), - s ? -2 : 0); + s ? static_cast(-2) : static_cast(0)); TF_LITE_MICRO_EXPECT_EQ(SafeCast(static_cast(-2.01)), - s ? -2 : 0); - TF_LITE_MICRO_EXPECT_EQ(SafeCast(static_cast(117.9)), 117); - TF_LITE_MICRO_EXPECT_EQ(SafeCast(static_cast(118.0)), 118); - TF_LITE_MICRO_EXPECT_EQ(SafeCast(static_cast(118.1)), 118); - TF_LITE_MICRO_EXPECT_EQ(SafeCast(static_cast(-117.9)), - s ? -117 : 0); - TF_LITE_MICRO_EXPECT_EQ(SafeCast(static_cast(-118.0)), - s ? -118 : 0); - TF_LITE_MICRO_EXPECT_EQ(SafeCast(static_cast(-118.1)), - s ? -118 : 0); + s ? static_cast(-2) : static_cast(0)); + TF_LITE_MICRO_EXPECT_EQ(SafeCast(static_cast(117.9)), + static_cast(117)); + TF_LITE_MICRO_EXPECT_EQ(SafeCast(static_cast(118.0)), + static_cast(118)); + TF_LITE_MICRO_EXPECT_EQ(SafeCast(static_cast(118.1)), + static_cast(118)); + TF_LITE_MICRO_EXPECT_EQ( + SafeCast(static_cast(-117.9)), + s ? static_cast(-117) : static_cast(0)); + TF_LITE_MICRO_EXPECT_EQ( + SafeCast(static_cast(-118.0)), + s ? static_cast(-118) : static_cast(0)); + TF_LITE_MICRO_EXPECT_EQ( + SafeCast(static_cast(-118.1)), + s ? static_cast(-118) : static_cast(0)); // Some edge cases. TF_LITE_MICRO_EXPECT_EQ(SafeCast(std::numeric_limits::max()), @@ -72,52 +87,66 @@ void RunSafeCastTests() { TF_LITE_MICRO_EXPECT_EQ( SafeCast(-std::numeric_limits::infinity()), imin); TF_LITE_MICRO_EXPECT_EQ( - SafeCast(std::numeric_limits::quiet_NaN()), 0); + SafeCast(std::numeric_limits::quiet_NaN()), + static_cast(0)); // Some larger numbers. - if (sizeof(IntOut) >= 4 && sizeof(FloatIn) > 4) { + if (sizeof(IntOut) >= static_cast(4) && + sizeof(FloatIn) > static_cast(4)) { TF_LITE_MICRO_EXPECT_EQ(SafeCast(static_cast(0x76543210)), - 0x76543210); + static_cast(0x76543210)); } if (sizeof(FloatIn) > sizeof(IntOut)) { // Check values near imax. - TF_LITE_MICRO_EXPECT_EQ(SafeCast(static_cast( - static_cast(imax) + 0.1)), - imax); - TF_LITE_MICRO_EXPECT_EQ(SafeCast(static_cast( - static_cast(imax) + 0.99)), - imax); - TF_LITE_MICRO_EXPECT_EQ(SafeCast(static_cast( - static_cast(imax) + 1.0)), - imax); - TF_LITE_MICRO_EXPECT_EQ(SafeCast(static_cast( - static_cast(imax) + 1.99)), - imax); - TF_LITE_MICRO_EXPECT_EQ(SafeCast(static_cast( - static_cast(imax) + 2.0)), - imax); - TF_LITE_MICRO_EXPECT_EQ(SafeCast(static_cast( - static_cast(imax) - 0.1)), - imax - 1); - TF_LITE_MICRO_EXPECT_EQ(SafeCast(static_cast( - static_cast(imax) - 0.99)), - imax - 1); - TF_LITE_MICRO_EXPECT_EQ(SafeCast(static_cast( - static_cast(imax) - 1.0)), - imax - 1); - TF_LITE_MICRO_EXPECT_EQ(SafeCast(static_cast( - static_cast(imax) - 1.01)), - imax - 2); - TF_LITE_MICRO_EXPECT_EQ(SafeCast(static_cast( - static_cast(imax) - 1.99)), - imax - 2); - TF_LITE_MICRO_EXPECT_EQ(SafeCast(static_cast( - static_cast(imax) - 2.0)), - imax - 2); - TF_LITE_MICRO_EXPECT_EQ(SafeCast(static_cast( - static_cast(imax) - 2.01)), - imax - 3); + TF_LITE_MICRO_EXPECT_EQ( + SafeCast(static_cast(static_cast(imax) + + static_cast(0.1))), + imax); + TF_LITE_MICRO_EXPECT_EQ( + SafeCast(static_cast(static_cast(imax) + + static_cast(0.99))), + imax); + TF_LITE_MICRO_EXPECT_EQ( + SafeCast(static_cast(static_cast(imax) + + static_cast(1.0))), + imax); + TF_LITE_MICRO_EXPECT_EQ( + SafeCast(static_cast(static_cast(imax) + + static_cast(1.99))), + imax); + TF_LITE_MICRO_EXPECT_EQ( + SafeCast(static_cast(static_cast(imax) + + static_cast(2.0))), + imax); + TF_LITE_MICRO_EXPECT_EQ( + SafeCast(static_cast(static_cast(imax) - + static_cast(0.1))), + imax - 1); + TF_LITE_MICRO_EXPECT_EQ( + SafeCast(static_cast(static_cast(imax) - + static_cast(0.99))), + imax - 1); + TF_LITE_MICRO_EXPECT_EQ( + SafeCast(static_cast(static_cast(imax) - + static_cast(1.0))), + imax - 1); + TF_LITE_MICRO_EXPECT_EQ( + SafeCast(static_cast(static_cast(imax) - + static_cast(1.01))), + imax - 2); + TF_LITE_MICRO_EXPECT_EQ( + SafeCast(static_cast(static_cast(imax) - + static_cast(1.99))), + imax - 2); + TF_LITE_MICRO_EXPECT_EQ( + SafeCast(static_cast(static_cast(imax) - + static_cast(2.0))), + imax - 2); + TF_LITE_MICRO_EXPECT_EQ( + SafeCast(static_cast(static_cast(imax) - + static_cast(2.01))), + imax - 3); } // Check values considerably larger in magnitude than imin and imax @@ -210,30 +239,30 @@ TF_LITE_MICRO_TEST(QuantizationUtilTest_IntegerFrExp) { TF_LITE_MICRO_EXPECT_EQ(0, shift); result = tflite::IntegerFrExp(1.0, &shift); - TF_LITE_MICRO_EXPECT_NEAR(0x40000000, result, 1); + TF_LITE_MICRO_EXPECT_NEAR(0x40000000, result, 1ll); TF_LITE_MICRO_EXPECT_EQ(1, shift); result = tflite::IntegerFrExp(0.25, &shift); - TF_LITE_MICRO_EXPECT_NEAR(0x40000000, result, 1); + TF_LITE_MICRO_EXPECT_NEAR(0x40000000, result, 1ll); TF_LITE_MICRO_EXPECT_EQ(-1, shift); result = tflite::IntegerFrExp(-1.0, &shift); - TF_LITE_MICRO_EXPECT_NEAR(-(1 << 30), result, 1); + TF_LITE_MICRO_EXPECT_NEAR(-(1 << 30), result, 1ll); TF_LITE_MICRO_EXPECT_EQ(1, shift); result = tflite::IntegerFrExp(123.45, &shift); - TF_LITE_MICRO_EXPECT_NEAR(2071147315, result, 1); + TF_LITE_MICRO_EXPECT_NEAR(2071147315, result, 1ll); TF_LITE_MICRO_EXPECT_EQ(7, shift); - result = tflite::IntegerFrExp(NAN, &shift); + result = tflite::IntegerFrExp(static_cast(NAN), &shift); TF_LITE_MICRO_EXPECT_NEAR(0, result, 1); TF_LITE_MICRO_EXPECT_EQ(0x7fffffff, shift); - result = tflite::IntegerFrExp(INFINITY, &shift); + result = tflite::IntegerFrExp(static_cast(INFINITY), &shift); TF_LITE_MICRO_EXPECT_NEAR(std::numeric_limits::max(), result, 1); TF_LITE_MICRO_EXPECT_EQ(0x7fffffff, shift); - result = tflite::IntegerFrExp(-INFINITY, &shift); + result = tflite::IntegerFrExp(-static_cast(INFINITY), &shift); TF_LITE_MICRO_EXPECT_NEAR(std::numeric_limits::min(), result, 1); TF_LITE_MICRO_EXPECT_EQ(0x7fffffff, shift); } @@ -301,11 +330,11 @@ TF_LITE_MICRO_TEST(QuantizationUtilTest_DoubleFromFractionAndShift) { result = tflite::DoubleFromFractionAndShift(fraction, shift); TF_LITE_MICRO_EXPECT_NEAR(-23.232323, result, 1e-5); - fraction = tflite::IntegerFrExp(NAN, &shift); + fraction = tflite::IntegerFrExp(static_cast(NAN), &shift); result = tflite::DoubleFromFractionAndShift(fraction, shift); TF_LITE_MICRO_EXPECT_TRUE(std::isnan(result)); - fraction = tflite::IntegerFrExp(INFINITY, &shift); + fraction = tflite::IntegerFrExp(static_cast(INFINITY), &shift); result = tflite::DoubleFromFractionAndShift(fraction, shift); TF_LITE_MICRO_EXPECT_FALSE(std::isfinite(result)); } @@ -326,10 +355,10 @@ TF_LITE_MICRO_TEST(QuantizationUtilTest_IntegerDoubleMultiply) { 1e-5); TF_LITE_MICRO_EXPECT_NEAR( 15000000.0, tflite::IntegerDoubleMultiply(3000.0, 5000.0), 1e-5); - TF_LITE_MICRO_EXPECT_TRUE( - std::isnan(tflite::IntegerDoubleMultiply(NAN, 5000.0))); - TF_LITE_MICRO_EXPECT_TRUE( - std::isnan(tflite::IntegerDoubleMultiply(3000.0, NAN))); + TF_LITE_MICRO_EXPECT_TRUE(std::isnan( + tflite::IntegerDoubleMultiply(static_cast(NAN), 5000.0))); + TF_LITE_MICRO_EXPECT_TRUE(std::isnan( + tflite::IntegerDoubleMultiply(3000.0, static_cast(NAN)))); } TF_LITE_MICRO_TEST(QuantizationUtilTest_IntegerDoubleCompare) { @@ -339,8 +368,12 @@ TF_LITE_MICRO_TEST(QuantizationUtilTest_IntegerDoubleCompare) { TF_LITE_MICRO_EXPECT_EQ(0, tflite::IntegerDoubleCompare(0.0, 0.0)); TF_LITE_MICRO_EXPECT_EQ(-1, tflite::IntegerDoubleCompare(-10.0, 10.0)); TF_LITE_MICRO_EXPECT_EQ(1, tflite::IntegerDoubleCompare(123.45, 10.0)); - TF_LITE_MICRO_EXPECT_EQ(1, tflite::IntegerDoubleCompare(NAN, INFINITY)); - TF_LITE_MICRO_EXPECT_EQ(1, tflite::IntegerDoubleCompare(INFINITY, NAN)); + TF_LITE_MICRO_EXPECT_EQ( + 1, tflite::IntegerDoubleCompare(static_cast(NAN), + static_cast(INFINITY))); + TF_LITE_MICRO_EXPECT_EQ( + 1, tflite::IntegerDoubleCompare(static_cast(INFINITY), + static_cast(NAN))); } TF_LITE_MICRO_TEST(QuantizationUtilTest_PreprocessSoftmaxScaling) { diff --git a/tensorflow/lite/micro/kernels/reduce_test.cc b/tensorflow/lite/micro/kernels/reduce_test.cc index 928dda287aa..65f32efaf5d 100644 --- a/tensorflow/lite/micro/kernels/reduce_test.cc +++ b/tensorflow/lite/micro/kernels/reduce_test.cc @@ -38,10 +38,6 @@ static const int kOutputElements = 4; static const int kOutputShape[] = {4, 2, 1, 1, 2}; static const float kGoldenData[] = {6, 7, 18, 19}; -static TfLiteReducerParams params = { - true // keep_dims -}; - template TfLiteStatus ValidateReduceGoldens(TfLiteTensor* tensors, int tensors_size, const T* expected_output_data, @@ -135,11 +131,15 @@ TF_LITE_MICRO_TESTS_BEGIN TF_LITE_MICRO_TEST(MeanFloat4DKeepDims) { float output_data[tflite::testing::kOutputElements]; + TfLiteReducerParams params = { + true // keep_dims + }; + tflite::testing::TestMeanFloatInput4D( tflite::testing::kInputShape4D, tflite::testing::kInputData4D, tflite::testing::kAxisShape, tflite::testing::kAxisData, tflite::testing::kOutputShape, tflite::testing::kGoldenData, output_data, - &tflite::testing::params); + ¶ms); } TF_LITE_MICRO_TEST(MeanFloat4DWithoutKeepDims) { diff --git a/tensorflow/lite/micro/kernels/reshape_test.cc b/tensorflow/lite/micro/kernels/reshape_test.cc index 5913c7f86bb..2c84ac1ff04 100644 --- a/tensorflow/lite/micro/kernels/reshape_test.cc +++ b/tensorflow/lite/micro/kernels/reshape_test.cc @@ -46,8 +46,8 @@ void TestReshapeImpl(TfLiteContext* context, TfLiteNode* node, node->custom_initial_data = nullptr; node->custom_initial_data_size = 0; - TF_LITE_MICRO_EXPECT_EQ(registration->init, nullptr); - TF_LITE_MICRO_EXPECT_EQ(registration->free, nullptr); + TF_LITE_MICRO_EXPECT(registration->init == nullptr); + TF_LITE_MICRO_EXPECT(registration->free == nullptr); if (registration->prepare) { // Error can happen either in Prepare or eval stage. @@ -64,14 +64,14 @@ void TestReshapeImpl(TfLiteContext* context, TfLiteNode* node, } TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, registration->invoke(context, node)); - const int output_dims_count = ElementCount(*output_tensor->dims); const T* output_data = GetTensorData(output_tensor); - for (int i = 0; i < expected_output.size(); ++i) { + for (size_t i = 0; i < expected_output.size(); ++i) { TF_LITE_MICRO_EXPECT_NEAR(expected_output.begin()[i], output_data[i], 1e-5f); } - TF_LITE_MICRO_EXPECT_EQ(expected_dims.size(), output_tensor->dims->size); - for (int i = 0; i < expected_dims.size(); ++i) { + TF_LITE_MICRO_EXPECT_EQ(expected_dims.size(), + static_cast(output_tensor->dims->size)); + for (size_t i = 0; i < expected_dims.size(); ++i) { TF_LITE_MICRO_EXPECT_NEAR(expected_dims.begin()[i], output_tensor->dims->data[i], 1e-5f); } diff --git a/tensorflow/lite/micro/kernels/strided_slice_test.cc b/tensorflow/lite/micro/kernels/strided_slice_test.cc index 6ef162aea3d..4387e4bdde3 100644 --- a/tensorflow/lite/micro/kernels/strided_slice_test.cc +++ b/tensorflow/lite/micro/kernels/strided_slice_test.cc @@ -124,8 +124,7 @@ void TestStrideSlide(std::initializer_list input_shape, if (registration->free) { registration->free(&context, user_data); } - auto* output_tensor = &context.tensors[node.outputs->data[0]]; - for (int i = 0; i < expected_output.size(); ++i) { + for (size_t i = 0; i < expected_output.size(); ++i) { TF_LITE_MICRO_EXPECT_NEAR(expected_output.begin()[i], output_data[i], 1e-5f); } diff --git a/tensorflow/lite/micro/kernels/sub_test.cc b/tensorflow/lite/micro/kernels/sub_test.cc index 169f3ad9568..9c8d476352e 100644 --- a/tensorflow/lite/micro/kernels/sub_test.cc +++ b/tensorflow/lite/micro/kernels/sub_test.cc @@ -431,12 +431,6 @@ TF_LITE_MICRO_TEST(QuantizedSubWithScalarBroadcastUint8) { } } TF_LITE_MICRO_TEST(QuantizedSubWithScalarBroadcastFloat) { - const float scales[] = {0.1, 0.05, 0.1}; - const int zero_points[] = {127, 131, 139}; - uint8_t input1_quantized[tflite::testing::broadcast_output_dims_count]; - uint8_t input2_quantized[tflite::testing::broadcast_output_dims_count]; - uint8_t golden_quantized[tflite::testing::broadcast_output_dims_count]; - uint8_t output[tflite::testing::broadcast_output_dims_count]; float output_float[tflite::testing::broadcast_output_dims_count]; for (int i = 0; i < tflite::testing::broadcast_num_shapes; ++i) { @@ -491,7 +485,6 @@ TF_LITE_MICRO_TEST(QuantizedSubWithMixedBroadcastUint8) { uint8_t input2_quantized[tflite::testing::broadcast_output_dims_count]; uint8_t golden_quantized[tflite::testing::broadcast_output_dims_count]; uint8_t output[tflite::testing::broadcast_output_dims_count]; - float output_float[tflite::testing::broadcast_output_dims_count]; for (int i = 0; i < tflite::testing::broadcast_num_shapes; ++i) { tflite::testing::TestSubQuantized( @@ -512,7 +505,6 @@ TF_LITE_MICRO_TEST(QuantizedSubWithMixedBroadcastInt8) { int8_t input2_quantized[tflite::testing::broadcast_output_dims_count]; int8_t golden_quantized[tflite::testing::broadcast_output_dims_count]; int8_t output[tflite::testing::broadcast_output_dims_count]; - float output_float[tflite::testing::broadcast_output_dims_count]; for (int i = 0; i < tflite::testing::broadcast_num_shapes; ++i) { tflite::testing::TestSubQuantized( diff --git a/tensorflow/lite/micro/kernels/svdf_test.cc b/tensorflow/lite/micro/kernels/svdf_test.cc index ea129efaaa8..fc0a91481fb 100644 --- a/tensorflow/lite/micro/kernels/svdf_test.cc +++ b/tensorflow/lite/micro/kernels/svdf_test.cc @@ -409,34 +409,35 @@ inline void TestIntegerSVDF( // Input quant params: float input_scales[] = {1, input_scale}; TfLiteAffineQuantization input_quant = {FloatArrayFromFloats(input_scales), - IntArrayFromInts(zero_points)}; + IntArrayFromInts(zero_points), 0}; tensors[0].quantization = {kTfLiteAffineQuantization, &input_quant}; // Weights features quant params: float weights_features_scales[] = {1, weights_feature_scale}; TfLiteAffineQuantization weights_feature_quant = { FloatArrayFromFloats(weights_features_scales), - IntArrayFromInts(zero_points)}; + IntArrayFromInts(zero_points), 0}; tensors[1].quantization = {kTfLiteAffineQuantization, &weights_feature_quant}; // Weights time quant params: float weights_time_scales[] = {1, weights_time_scale}; TfLiteAffineQuantization weights_time_quant = { - FloatArrayFromFloats(weights_time_scales), IntArrayFromInts(zero_points)}; + FloatArrayFromFloats(weights_time_scales), IntArrayFromInts(zero_points), + 0}; tensors[2].quantization = {kTfLiteAffineQuantization, &weights_time_quant}; // Activation state quant params: float activation_state_scales[] = {1, activation_scale}; TfLiteAffineQuantization activation_state_quant = { FloatArrayFromFloats(activation_state_scales), - IntArrayFromInts(zero_points)}; + IntArrayFromInts(zero_points), 0}; tensors[4].quantization = {kTfLiteAffineQuantization, &activation_state_quant}; // Output quant params: float output_scales[] = {1, output_scale}; TfLiteAffineQuantization output_quant = {FloatArrayFromFloats(output_scales), - IntArrayFromInts(zero_points)}; + IntArrayFromInts(zero_points), 0}; tensors[5].quantization = {kTfLiteAffineQuantization, &output_quant}; ValidateIntegerSVDFGoldens( @@ -627,7 +628,6 @@ TF_LITE_MICRO_TEST(SvdfIntegerInputSize2Rank1ShouldMatchGolden) { int8_t weights_feature_data[] = {-81, -92, 2, 96, 57, 32, 71, 70, 100, -92, -17, -27}; - const int weights_feature_dims_count = num_filters * input_size; int16_t weights_time_data[] = { -10464, 12324, 9142, -11842, -11836, 7273, 9029, -2175, 260, 4067, @@ -635,7 +635,6 @@ TF_LITE_MICRO_TEST(SvdfIntegerInputSize2Rank1ShouldMatchGolden) { -12098, 12461, -7072, 8870, 7739, 11447, 5954, 11765, -5733, 10643, -3534, 8912, 4693, -7761, -8886, -519, -4898, 5067, 3205, -1107, }; - const int weights_time_dims_count = num_filters * memory_size; int32_t bias_data[] = {-409707, 641518, 1662434, -113372}; @@ -669,12 +668,6 @@ TF_LITE_MICRO_TEST(SvdfIntegerInputSize2Rank1ShouldMatchGolden) { batch_size * memory_size * num_filters; int16_t activation_state_data[activation_state_dims_count]; - const int scratch_dims_count = batch_size * num_filters; - int32_t scratch_data[scratch_dims_count]; - - const int scratch_output_dims_count = batch_size * num_units; - int32_t scratch_output_data[scratch_output_dims_count]; - const int output_dims_count = batch_size * num_units; int8_t output_data[output_dims_count]; diff --git a/tensorflow/lite/micro/kernels/tanh_test.cc b/tensorflow/lite/micro/kernels/tanh_test.cc index 54c9816c9a9..4ad51a189ec 100644 --- a/tensorflow/lite/micro/kernels/tanh_test.cc +++ b/tensorflow/lite/micro/kernels/tanh_test.cc @@ -217,7 +217,7 @@ TF_LITE_MICRO_TEST(SimpleTestTanhUInt8) { const float input_scale = 16 / 256.f; const int input_zero_point = 128; - const float output_scale = 1.99999955 / 256.f; + const float output_scale = 1.99999955f / 256.f; const int output_zero_point = 128; const int input_shape[] = {2, 1, tanh_vec_size}; @@ -245,7 +245,7 @@ TF_LITE_MICRO_TEST(SimpleTestTanhUInt8) { const float input_scale = 16 / 256.f; const int input_zero_point = 0; - const float output_scale = 1.99999955 / 256.f; + const float output_scale = 1.99999955f / 256.f; const int output_zero_point = 0; const int input_shape[] = {2, 1, tanh_vec_size}; diff --git a/tensorflow/lite/micro/memory_helpers_test.cc b/tensorflow/lite/micro/memory_helpers_test.cc index 82096c6890d..25ade769b01 100644 --- a/tensorflow/lite/micro/memory_helpers_test.cc +++ b/tensorflow/lite/micro/memory_helpers_test.cc @@ -33,78 +33,78 @@ TF_LITE_MICRO_TEST(TestAlignPointerUp) { uint8_t* input0 = reinterpret_cast(0); uint8_t* input0_aligned1 = tflite::AlignPointerUp(input0, 1); - TF_LITE_MICRO_EXPECT_EQ(input0, input0_aligned1); + TF_LITE_MICRO_EXPECT(input0 == input0_aligned1); uint8_t* input0_aligned2 = tflite::AlignPointerUp(input0, 2); - TF_LITE_MICRO_EXPECT_EQ(input0, input0_aligned2); + TF_LITE_MICRO_EXPECT(input0 == input0_aligned2); uint8_t* input0_aligned3 = tflite::AlignPointerUp(input0, 3); - TF_LITE_MICRO_EXPECT_EQ(input0, input0_aligned3); + TF_LITE_MICRO_EXPECT(input0 == input0_aligned3); uint8_t* input0_aligned16 = tflite::AlignPointerUp(input0, 16); - TF_LITE_MICRO_EXPECT_EQ(input0, input0_aligned16); + TF_LITE_MICRO_EXPECT(input0 == input0_aligned16); uint8_t* input23 = reinterpret_cast(23); uint8_t* input23_aligned1 = tflite::AlignPointerUp(input23, 1); - TF_LITE_MICRO_EXPECT_EQ(input23, input23_aligned1); + TF_LITE_MICRO_EXPECT(input23 == input23_aligned1); uint8_t* input23_aligned2 = tflite::AlignPointerUp(input23, 2); uint8_t* expected23_aligned2 = reinterpret_cast(24); - TF_LITE_MICRO_EXPECT_EQ(expected23_aligned2, input23_aligned2); + TF_LITE_MICRO_EXPECT(expected23_aligned2 == input23_aligned2); uint8_t* input23_aligned3 = tflite::AlignPointerUp(input23, 3); uint8_t* expected23_aligned3 = reinterpret_cast(24); - TF_LITE_MICRO_EXPECT_EQ(expected23_aligned3, input23_aligned3); + TF_LITE_MICRO_EXPECT(expected23_aligned3 == input23_aligned3); uint8_t* input23_aligned16 = tflite::AlignPointerUp(input23, 16); uint8_t* expected23_aligned16 = reinterpret_cast(32); - TF_LITE_MICRO_EXPECT_EQ(expected23_aligned16, input23_aligned16); + TF_LITE_MICRO_EXPECT(expected23_aligned16 == input23_aligned16); } TF_LITE_MICRO_TEST(TestAlignPointerDown) { uint8_t* input0 = reinterpret_cast(0); uint8_t* input0_aligned1 = tflite::AlignPointerDown(input0, 1); - TF_LITE_MICRO_EXPECT_EQ(input0, input0_aligned1); + TF_LITE_MICRO_EXPECT(input0 == input0_aligned1); uint8_t* input0_aligned2 = tflite::AlignPointerDown(input0, 2); - TF_LITE_MICRO_EXPECT_EQ(input0, input0_aligned2); + TF_LITE_MICRO_EXPECT(input0 == input0_aligned2); uint8_t* input0_aligned3 = tflite::AlignPointerDown(input0, 3); - TF_LITE_MICRO_EXPECT_EQ(input0, input0_aligned3); + TF_LITE_MICRO_EXPECT(input0 == input0_aligned3); uint8_t* input0_aligned16 = tflite::AlignPointerDown(input0, 16); - TF_LITE_MICRO_EXPECT_EQ(input0, input0_aligned16); + TF_LITE_MICRO_EXPECT(input0 == input0_aligned16); uint8_t* input23 = reinterpret_cast(23); uint8_t* input23_aligned1 = tflite::AlignPointerDown(input23, 1); - TF_LITE_MICRO_EXPECT_EQ(input23, input23_aligned1); + TF_LITE_MICRO_EXPECT(input23 == input23_aligned1); uint8_t* input23_aligned2 = tflite::AlignPointerDown(input23, 2); uint8_t* expected23_aligned2 = reinterpret_cast(22); - TF_LITE_MICRO_EXPECT_EQ(expected23_aligned2, input23_aligned2); + TF_LITE_MICRO_EXPECT(expected23_aligned2 == input23_aligned2); uint8_t* input23_aligned3 = tflite::AlignPointerDown(input23, 3); uint8_t* expected23_aligned3 = reinterpret_cast(21); - TF_LITE_MICRO_EXPECT_EQ(expected23_aligned3, input23_aligned3); + TF_LITE_MICRO_EXPECT(expected23_aligned3 == input23_aligned3); uint8_t* input23_aligned16 = tflite::AlignPointerDown(input23, 16); uint8_t* expected23_aligned16 = reinterpret_cast(16); - TF_LITE_MICRO_EXPECT_EQ(expected23_aligned16, input23_aligned16); + TF_LITE_MICRO_EXPECT(expected23_aligned16 == input23_aligned16); } TF_LITE_MICRO_TEST(TestAlignSizeUp) { - TF_LITE_MICRO_EXPECT_EQ(1, tflite::AlignSizeUp(1, 1)); - TF_LITE_MICRO_EXPECT_EQ(2, tflite::AlignSizeUp(1, 2)); - TF_LITE_MICRO_EXPECT_EQ(3, tflite::AlignSizeUp(1, 3)); - TF_LITE_MICRO_EXPECT_EQ(16, tflite::AlignSizeUp(1, 16)); + TF_LITE_MICRO_EXPECT_EQ(static_cast(1), tflite::AlignSizeUp(1, 1)); + TF_LITE_MICRO_EXPECT_EQ(static_cast(2), tflite::AlignSizeUp(1, 2)); + TF_LITE_MICRO_EXPECT_EQ(static_cast(3), tflite::AlignSizeUp(1, 3)); + TF_LITE_MICRO_EXPECT_EQ(static_cast(16), tflite::AlignSizeUp(1, 16)); - TF_LITE_MICRO_EXPECT_EQ(23, tflite::AlignSizeUp(23, 1)); - TF_LITE_MICRO_EXPECT_EQ(24, tflite::AlignSizeUp(23, 2)); - TF_LITE_MICRO_EXPECT_EQ(24, tflite::AlignSizeUp(23, 3)); - TF_LITE_MICRO_EXPECT_EQ(32, tflite::AlignSizeUp(23, 16)); + TF_LITE_MICRO_EXPECT_EQ(static_cast(23), tflite::AlignSizeUp(23, 1)); + TF_LITE_MICRO_EXPECT_EQ(static_cast(24), tflite::AlignSizeUp(23, 2)); + TF_LITE_MICRO_EXPECT_EQ(static_cast(24), tflite::AlignSizeUp(23, 3)); + TF_LITE_MICRO_EXPECT_EQ(static_cast(32), tflite::AlignSizeUp(23, 16)); } TF_LITE_MICRO_TEST(TestTypeSizeOf) { @@ -157,16 +157,16 @@ TF_LITE_MICRO_TEST(TestBytesRequiredForTensor) { TF_LITE_MICRO_EXPECT_EQ( kTfLiteOk, tflite::BytesRequiredForTensor(*tensor100, &bytes, &type_size, micro_test::reporter)); - TF_LITE_MICRO_EXPECT_EQ(400, bytes); - TF_LITE_MICRO_EXPECT_EQ(4, type_size); + TF_LITE_MICRO_EXPECT_EQ(static_cast(400), bytes); + TF_LITE_MICRO_EXPECT_EQ(static_cast(4), type_size); const tflite::Tensor* tensor200 = tflite::testing::Create1dFlatbufferTensor(200); TF_LITE_MICRO_EXPECT_EQ( kTfLiteOk, tflite::BytesRequiredForTensor(*tensor200, &bytes, &type_size, micro_test::reporter)); - TF_LITE_MICRO_EXPECT_EQ(800, bytes); - TF_LITE_MICRO_EXPECT_EQ(4, type_size); + TF_LITE_MICRO_EXPECT_EQ(static_cast(800), bytes); + TF_LITE_MICRO_EXPECT_EQ(static_cast(4), type_size); } TF_LITE_MICRO_TEST(TestAllocateOutputDimensionsFromInput) { diff --git a/tensorflow/lite/micro/memory_planner/greedy_memory_planner_test.cc b/tensorflow/lite/micro/memory_planner/greedy_memory_planner_test.cc index 923013845fa..12e5b392cc5 100644 --- a/tensorflow/lite/micro/memory_planner/greedy_memory_planner_test.cc +++ b/tensorflow/lite/micro/memory_planner/greedy_memory_planner_test.cc @@ -32,7 +32,6 @@ TF_LITE_MICRO_TESTS_BEGIN TF_LITE_MICRO_TEST(TestReverseSortInPlace) { tflite::MicroErrorReporter micro_error_reporter; - tflite::ErrorReporter* error_reporter = µ_error_reporter; constexpr int a_size = 10; int a_values[a_size] = {10, 9, 8, 7, 6, 5, 4, 3, 2, 1}; @@ -92,179 +91,182 @@ TF_LITE_MICRO_TEST(TestReverseSortInPlace) { TF_LITE_MICRO_TEST(TestGreedyBasics) { tflite::MicroErrorReporter micro_error_reporter; - tflite::ErrorReporter* error_reporter = µ_error_reporter; tflite::GreedyMemoryPlanner planner(g_scratch_buffer, kScratchBufferSize); TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, - planner.AddBuffer(error_reporter, 10, 0, 1)); + planner.AddBuffer(µ_error_reporter, 10, 0, 1)); TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, - planner.AddBuffer(error_reporter, 20, 2, 3)); + planner.AddBuffer(µ_error_reporter, 20, 2, 3)); - TF_LITE_MICRO_EXPECT_EQ(false, planner.DoAnyBuffersOverlap(error_reporter)); + TF_LITE_MICRO_EXPECT_EQ(false, + planner.DoAnyBuffersOverlap(µ_error_reporter)); - TF_LITE_MICRO_EXPECT_EQ(20, planner.GetMaximumMemorySize()); + TF_LITE_MICRO_EXPECT_EQ(static_cast(20), + planner.GetMaximumMemorySize()); int offset = -1; TF_LITE_MICRO_EXPECT_EQ( - kTfLiteOk, planner.GetOffsetForBuffer(error_reporter, 0, &offset)); + kTfLiteOk, planner.GetOffsetForBuffer(µ_error_reporter, 0, &offset)); TF_LITE_MICRO_EXPECT_EQ(0, offset); TF_LITE_MICRO_EXPECT_EQ( - kTfLiteOk, planner.GetOffsetForBuffer(error_reporter, 1, &offset)); + kTfLiteOk, planner.GetOffsetForBuffer(µ_error_reporter, 1, &offset)); TF_LITE_MICRO_EXPECT_EQ(0, offset); } TF_LITE_MICRO_TEST(TestGreedyMedium) { tflite::MicroErrorReporter micro_error_reporter; - tflite::ErrorReporter* error_reporter = µ_error_reporter; tflite::GreedyMemoryPlanner planner(g_scratch_buffer, kScratchBufferSize); TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, - planner.AddBuffer(error_reporter, 10, 0, 1)); + planner.AddBuffer(µ_error_reporter, 10, 0, 1)); TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, - planner.AddBuffer(error_reporter, 20, 1, 2)); + planner.AddBuffer(µ_error_reporter, 20, 1, 2)); TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, - planner.AddBuffer(error_reporter, 30, 2, 3)); + planner.AddBuffer(µ_error_reporter, 30, 2, 3)); TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, - planner.AddBuffer(error_reporter, 40, 3, 4)); + planner.AddBuffer(µ_error_reporter, 40, 3, 4)); TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, - planner.AddBuffer(error_reporter, 50, 0, 1)); + planner.AddBuffer(µ_error_reporter, 50, 0, 1)); int offset = -1; TF_LITE_MICRO_EXPECT_EQ( - kTfLiteOk, planner.GetOffsetForBuffer(error_reporter, 0, &offset)); + kTfLiteOk, planner.GetOffsetForBuffer(µ_error_reporter, 0, &offset)); TF_LITE_MICRO_EXPECT_EQ(50, offset); TF_LITE_MICRO_EXPECT_EQ( - kTfLiteOk, planner.GetOffsetForBuffer(error_reporter, 1, &offset)); + kTfLiteOk, planner.GetOffsetForBuffer(µ_error_reporter, 1, &offset)); TF_LITE_MICRO_EXPECT_EQ(70, offset); TF_LITE_MICRO_EXPECT_EQ( - kTfLiteOk, planner.GetOffsetForBuffer(error_reporter, 2, &offset)); + kTfLiteOk, planner.GetOffsetForBuffer(µ_error_reporter, 2, &offset)); TF_LITE_MICRO_EXPECT_EQ(40, offset); TF_LITE_MICRO_EXPECT_EQ( - kTfLiteOk, planner.GetOffsetForBuffer(error_reporter, 3, &offset)); + kTfLiteOk, planner.GetOffsetForBuffer(µ_error_reporter, 3, &offset)); TF_LITE_MICRO_EXPECT_EQ(0, offset); TF_LITE_MICRO_EXPECT_EQ( - kTfLiteOk, planner.GetOffsetForBuffer(error_reporter, 4, &offset)); + kTfLiteOk, planner.GetOffsetForBuffer(µ_error_reporter, 4, &offset)); TF_LITE_MICRO_EXPECT_EQ(0, offset); - planner.PrintMemoryPlan(error_reporter); + planner.PrintMemoryPlan(µ_error_reporter); - TF_LITE_MICRO_EXPECT_EQ(false, planner.DoAnyBuffersOverlap(error_reporter)); + TF_LITE_MICRO_EXPECT_EQ(false, + planner.DoAnyBuffersOverlap(µ_error_reporter)); - TF_LITE_MICRO_EXPECT_EQ(90, planner.GetMaximumMemorySize()); + TF_LITE_MICRO_EXPECT_EQ(static_cast(90), + planner.GetMaximumMemorySize()); } TF_LITE_MICRO_TEST(TestPersonDetectionModel) { tflite::MicroErrorReporter micro_error_reporter; - tflite::ErrorReporter* error_reporter = µ_error_reporter; tflite::GreedyMemoryPlanner planner(g_scratch_buffer, kScratchBufferSize); // These buffer sizes and time ranges are taken from the 250KB MobileNet model // used in the person detection example. + TF_LITE_MICRO_EXPECT_EQ( + kTfLiteOk, planner.AddBuffer(µ_error_reporter, 9216, 0, 29)); TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, - planner.AddBuffer(error_reporter, 9216, 0, 29)); + planner.AddBuffer(µ_error_reporter, 3, 28, 29)); + TF_LITE_MICRO_EXPECT_EQ( + kTfLiteOk, planner.AddBuffer(µ_error_reporter, 256, 27, 28)); + TF_LITE_MICRO_EXPECT_EQ( + kTfLiteOk, planner.AddBuffer(µ_error_reporter, 2304, 26, 27)); + TF_LITE_MICRO_EXPECT_EQ( + kTfLiteOk, planner.AddBuffer(µ_error_reporter, 2304, 25, 26)); + TF_LITE_MICRO_EXPECT_EQ( + kTfLiteOk, planner.AddBuffer(µ_error_reporter, 2304, 24, 25)); + TF_LITE_MICRO_EXPECT_EQ( + kTfLiteOk, planner.AddBuffer(µ_error_reporter, 1152, 23, 24)); + TF_LITE_MICRO_EXPECT_EQ( + kTfLiteOk, planner.AddBuffer(µ_error_reporter, 4608, 22, 23)); + TF_LITE_MICRO_EXPECT_EQ( + kTfLiteOk, planner.AddBuffer(µ_error_reporter, 4608, 21, 22)); + TF_LITE_MICRO_EXPECT_EQ( + kTfLiteOk, planner.AddBuffer(µ_error_reporter, 4608, 20, 21)); + TF_LITE_MICRO_EXPECT_EQ( + kTfLiteOk, planner.AddBuffer(µ_error_reporter, 4608, 19, 20)); + TF_LITE_MICRO_EXPECT_EQ( + kTfLiteOk, planner.AddBuffer(µ_error_reporter, 4608, 18, 19)); + TF_LITE_MICRO_EXPECT_EQ( + kTfLiteOk, planner.AddBuffer(µ_error_reporter, 4608, 17, 18)); + TF_LITE_MICRO_EXPECT_EQ( + kTfLiteOk, planner.AddBuffer(µ_error_reporter, 4608, 16, 17)); + TF_LITE_MICRO_EXPECT_EQ( + kTfLiteOk, planner.AddBuffer(µ_error_reporter, 4608, 15, 16)); + TF_LITE_MICRO_EXPECT_EQ( + kTfLiteOk, planner.AddBuffer(µ_error_reporter, 4608, 14, 15)); + TF_LITE_MICRO_EXPECT_EQ( + kTfLiteOk, planner.AddBuffer(µ_error_reporter, 4608, 13, 14)); + TF_LITE_MICRO_EXPECT_EQ( + kTfLiteOk, planner.AddBuffer(µ_error_reporter, 4608, 12, 13)); + TF_LITE_MICRO_EXPECT_EQ( + kTfLiteOk, planner.AddBuffer(µ_error_reporter, 2304, 11, 12)); + TF_LITE_MICRO_EXPECT_EQ( + kTfLiteOk, planner.AddBuffer(µ_error_reporter, 9216, 10, 11)); + TF_LITE_MICRO_EXPECT_EQ( + kTfLiteOk, planner.AddBuffer(µ_error_reporter, 9216, 9, 10)); TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, - planner.AddBuffer(error_reporter, 3, 28, 29)); + planner.AddBuffer(µ_error_reporter, 9216, 8, 9)); TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, - planner.AddBuffer(error_reporter, 256, 27, 28)); + planner.AddBuffer(µ_error_reporter, 4608, 7, 8)); + TF_LITE_MICRO_EXPECT_EQ( + kTfLiteOk, planner.AddBuffer(µ_error_reporter, 18432, 6, 7)); + TF_LITE_MICRO_EXPECT_EQ( + kTfLiteOk, planner.AddBuffer(µ_error_reporter, 18432, 5, 6)); + TF_LITE_MICRO_EXPECT_EQ( + kTfLiteOk, planner.AddBuffer(µ_error_reporter, 18432, 4, 5)); TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, - planner.AddBuffer(error_reporter, 2304, 26, 27)); - TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, - planner.AddBuffer(error_reporter, 2304, 25, 26)); - TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, - planner.AddBuffer(error_reporter, 2304, 24, 25)); - TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, - planner.AddBuffer(error_reporter, 1152, 23, 24)); - TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, - planner.AddBuffer(error_reporter, 4608, 22, 23)); - TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, - planner.AddBuffer(error_reporter, 4608, 21, 22)); - TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, - planner.AddBuffer(error_reporter, 4608, 20, 21)); - TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, - planner.AddBuffer(error_reporter, 4608, 19, 20)); - TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, - planner.AddBuffer(error_reporter, 4608, 18, 19)); - TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, - planner.AddBuffer(error_reporter, 4608, 17, 18)); - TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, - planner.AddBuffer(error_reporter, 4608, 16, 17)); - TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, - planner.AddBuffer(error_reporter, 4608, 15, 16)); - TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, - planner.AddBuffer(error_reporter, 4608, 14, 15)); - TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, - planner.AddBuffer(error_reporter, 4608, 13, 14)); - TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, - planner.AddBuffer(error_reporter, 4608, 12, 13)); - TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, - planner.AddBuffer(error_reporter, 2304, 11, 12)); - TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, - planner.AddBuffer(error_reporter, 9216, 10, 11)); - TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, - planner.AddBuffer(error_reporter, 9216, 9, 10)); - TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, - planner.AddBuffer(error_reporter, 9216, 8, 9)); - TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, - planner.AddBuffer(error_reporter, 4608, 7, 8)); - TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, - planner.AddBuffer(error_reporter, 18432, 6, 7)); - TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, - planner.AddBuffer(error_reporter, 18432, 5, 6)); - TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, - planner.AddBuffer(error_reporter, 18432, 4, 5)); - TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, - planner.AddBuffer(error_reporter, 9216, 3, 4)); - TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, - planner.AddBuffer(error_reporter, 36864, 2, 3)); - TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, - planner.AddBuffer(error_reporter, 18432, 1, 2)); - TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, - planner.AddBuffer(error_reporter, 18432, 0, 1)); + planner.AddBuffer(µ_error_reporter, 9216, 3, 4)); + TF_LITE_MICRO_EXPECT_EQ( + kTfLiteOk, planner.AddBuffer(µ_error_reporter, 36864, 2, 3)); + TF_LITE_MICRO_EXPECT_EQ( + kTfLiteOk, planner.AddBuffer(µ_error_reporter, 18432, 1, 2)); + TF_LITE_MICRO_EXPECT_EQ( + kTfLiteOk, planner.AddBuffer(µ_error_reporter, 18432, 0, 1)); - planner.PrintMemoryPlan(error_reporter); + planner.PrintMemoryPlan(µ_error_reporter); - TF_LITE_MICRO_EXPECT_EQ(false, planner.DoAnyBuffersOverlap(error_reporter)); + TF_LITE_MICRO_EXPECT_EQ(false, + planner.DoAnyBuffersOverlap(µ_error_reporter)); // The sum of all the buffers is 241,027 bytes, so we at least expect the plan // to come up with something smaller than this. - TF_LITE_MICRO_EXPECT_GT(241027, planner.GetMaximumMemorySize()); + TF_LITE_MICRO_EXPECT_GT(static_cast(241027), + planner.GetMaximumMemorySize()); } TF_LITE_MICRO_TEST(TestOverlapCase) { tflite::MicroErrorReporter micro_error_reporter; - tflite::ErrorReporter* error_reporter = µ_error_reporter; tflite::GreedyMemoryPlanner planner(g_scratch_buffer, kScratchBufferSize); TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, - planner.AddBuffer(error_reporter, 100, 0, 1)); + planner.AddBuffer(µ_error_reporter, 100, 0, 1)); TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, - planner.AddBuffer(error_reporter, 50, 2, 3)); + planner.AddBuffer(µ_error_reporter, 50, 2, 3)); TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, - planner.AddBuffer(error_reporter, 20, 1, 2)); + planner.AddBuffer(µ_error_reporter, 20, 1, 2)); - planner.PrintMemoryPlan(error_reporter); + planner.PrintMemoryPlan(µ_error_reporter); - TF_LITE_MICRO_EXPECT_EQ(false, planner.DoAnyBuffersOverlap(error_reporter)); + TF_LITE_MICRO_EXPECT_EQ(false, + planner.DoAnyBuffersOverlap(µ_error_reporter)); - TF_LITE_MICRO_EXPECT_EQ(120, planner.GetMaximumMemorySize()); + TF_LITE_MICRO_EXPECT_EQ(static_cast(120), + planner.GetMaximumMemorySize()); } TF_LITE_MICRO_TEST(TestSmallScratch) { tflite::MicroErrorReporter micro_error_reporter; - tflite::ErrorReporter* error_reporter = µ_error_reporter; constexpr int scratch_buffer_size = 40; unsigned char scratch_buffer[scratch_buffer_size]; tflite::GreedyMemoryPlanner planner(scratch_buffer, scratch_buffer_size); TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, - planner.AddBuffer(error_reporter, 100, 0, 1)); + planner.AddBuffer(µ_error_reporter, 100, 0, 1)); TF_LITE_MICRO_EXPECT_EQ(kTfLiteError, - planner.AddBuffer(error_reporter, 50, 2, 3)); + planner.AddBuffer(µ_error_reporter, 50, 2, 3)); } TF_LITE_MICRO_TESTS_END diff --git a/tensorflow/lite/micro/memory_planner/linear_memory_planner_test.cc b/tensorflow/lite/micro/memory_planner/linear_memory_planner_test.cc index 61a914b5e91..f0b50383dfd 100644 --- a/tensorflow/lite/micro/memory_planner/linear_memory_planner_test.cc +++ b/tensorflow/lite/micro/memory_planner/linear_memory_planner_test.cc @@ -21,104 +21,103 @@ TF_LITE_MICRO_TESTS_BEGIN TF_LITE_MICRO_TEST(TestBasics) { tflite::MicroErrorReporter micro_error_reporter; - tflite::ErrorReporter* error_reporter = µ_error_reporter; tflite::LinearMemoryPlanner planner; TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, - planner.AddBuffer(error_reporter, 10, 0, 1)); + planner.AddBuffer(µ_error_reporter, 10, 0, 1)); TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, - planner.AddBuffer(error_reporter, 20, 1, 2)); - TF_LITE_MICRO_EXPECT_EQ(30, planner.GetMaximumMemorySize()); + planner.AddBuffer(µ_error_reporter, 20, 1, 2)); + TF_LITE_MICRO_EXPECT_EQ(static_cast(30), + planner.GetMaximumMemorySize()); int offset = -1; TF_LITE_MICRO_EXPECT_EQ( - kTfLiteOk, planner.GetOffsetForBuffer(error_reporter, 0, &offset)); + kTfLiteOk, planner.GetOffsetForBuffer(µ_error_reporter, 0, &offset)); TF_LITE_MICRO_EXPECT_EQ(0, offset); TF_LITE_MICRO_EXPECT_EQ( - kTfLiteOk, planner.GetOffsetForBuffer(error_reporter, 1, &offset)); + kTfLiteOk, planner.GetOffsetForBuffer(µ_error_reporter, 1, &offset)); TF_LITE_MICRO_EXPECT_EQ(10, offset); } TF_LITE_MICRO_TEST(TestErrorHandling) { tflite::MicroErrorReporter micro_error_reporter; - tflite::ErrorReporter* error_reporter = µ_error_reporter; tflite::LinearMemoryPlanner planner; TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, - planner.AddBuffer(error_reporter, 10, 0, 1)); + planner.AddBuffer(µ_error_reporter, 10, 0, 1)); int offset = -1; - TF_LITE_MICRO_EXPECT_EQ( - kTfLiteError, planner.GetOffsetForBuffer(error_reporter, 1, &offset)); + TF_LITE_MICRO_EXPECT_EQ(kTfLiteError, planner.GetOffsetForBuffer( + µ_error_reporter, 1, &offset)); } TF_LITE_MICRO_TEST(TestPersonDetectionModel) { tflite::MicroErrorReporter micro_error_reporter; - tflite::ErrorReporter* error_reporter = µ_error_reporter; tflite::LinearMemoryPlanner planner; + TF_LITE_MICRO_EXPECT_EQ( + kTfLiteOk, planner.AddBuffer(µ_error_reporter, 9216, 0, 29)); TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, - planner.AddBuffer(error_reporter, 9216, 0, 29)); + planner.AddBuffer(µ_error_reporter, 3, 28, 29)); + TF_LITE_MICRO_EXPECT_EQ( + kTfLiteOk, planner.AddBuffer(µ_error_reporter, 256, 27, 28)); + TF_LITE_MICRO_EXPECT_EQ( + kTfLiteOk, planner.AddBuffer(µ_error_reporter, 2304, 26, 27)); + TF_LITE_MICRO_EXPECT_EQ( + kTfLiteOk, planner.AddBuffer(µ_error_reporter, 2304, 25, 26)); + TF_LITE_MICRO_EXPECT_EQ( + kTfLiteOk, planner.AddBuffer(µ_error_reporter, 2304, 24, 25)); + TF_LITE_MICRO_EXPECT_EQ( + kTfLiteOk, planner.AddBuffer(µ_error_reporter, 1152, 23, 24)); + TF_LITE_MICRO_EXPECT_EQ( + kTfLiteOk, planner.AddBuffer(µ_error_reporter, 4608, 22, 23)); + TF_LITE_MICRO_EXPECT_EQ( + kTfLiteOk, planner.AddBuffer(µ_error_reporter, 4608, 21, 22)); + TF_LITE_MICRO_EXPECT_EQ( + kTfLiteOk, planner.AddBuffer(µ_error_reporter, 4608, 20, 21)); + TF_LITE_MICRO_EXPECT_EQ( + kTfLiteOk, planner.AddBuffer(µ_error_reporter, 4608, 19, 20)); + TF_LITE_MICRO_EXPECT_EQ( + kTfLiteOk, planner.AddBuffer(µ_error_reporter, 4608, 18, 19)); + TF_LITE_MICRO_EXPECT_EQ( + kTfLiteOk, planner.AddBuffer(µ_error_reporter, 4608, 17, 18)); + TF_LITE_MICRO_EXPECT_EQ( + kTfLiteOk, planner.AddBuffer(µ_error_reporter, 4608, 16, 17)); + TF_LITE_MICRO_EXPECT_EQ( + kTfLiteOk, planner.AddBuffer(µ_error_reporter, 4608, 15, 16)); + TF_LITE_MICRO_EXPECT_EQ( + kTfLiteOk, planner.AddBuffer(µ_error_reporter, 4608, 14, 15)); + TF_LITE_MICRO_EXPECT_EQ( + kTfLiteOk, planner.AddBuffer(µ_error_reporter, 4608, 13, 14)); + TF_LITE_MICRO_EXPECT_EQ( + kTfLiteOk, planner.AddBuffer(µ_error_reporter, 4608, 12, 13)); + TF_LITE_MICRO_EXPECT_EQ( + kTfLiteOk, planner.AddBuffer(µ_error_reporter, 2304, 11, 12)); + TF_LITE_MICRO_EXPECT_EQ( + kTfLiteOk, planner.AddBuffer(µ_error_reporter, 9216, 10, 11)); + TF_LITE_MICRO_EXPECT_EQ( + kTfLiteOk, planner.AddBuffer(µ_error_reporter, 9216, 9, 10)); TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, - planner.AddBuffer(error_reporter, 3, 28, 29)); + planner.AddBuffer(µ_error_reporter, 9216, 8, 9)); TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, - planner.AddBuffer(error_reporter, 256, 27, 28)); + planner.AddBuffer(µ_error_reporter, 4608, 7, 8)); + TF_LITE_MICRO_EXPECT_EQ( + kTfLiteOk, planner.AddBuffer(µ_error_reporter, 18432, 6, 7)); + TF_LITE_MICRO_EXPECT_EQ( + kTfLiteOk, planner.AddBuffer(µ_error_reporter, 18432, 5, 6)); + TF_LITE_MICRO_EXPECT_EQ( + kTfLiteOk, planner.AddBuffer(µ_error_reporter, 18432, 4, 5)); TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, - planner.AddBuffer(error_reporter, 2304, 26, 27)); - TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, - planner.AddBuffer(error_reporter, 2304, 25, 26)); - TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, - planner.AddBuffer(error_reporter, 2304, 24, 25)); - TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, - planner.AddBuffer(error_reporter, 1152, 23, 24)); - TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, - planner.AddBuffer(error_reporter, 4608, 22, 23)); - TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, - planner.AddBuffer(error_reporter, 4608, 21, 22)); - TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, - planner.AddBuffer(error_reporter, 4608, 20, 21)); - TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, - planner.AddBuffer(error_reporter, 4608, 19, 20)); - TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, - planner.AddBuffer(error_reporter, 4608, 18, 19)); - TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, - planner.AddBuffer(error_reporter, 4608, 17, 18)); - TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, - planner.AddBuffer(error_reporter, 4608, 16, 17)); - TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, - planner.AddBuffer(error_reporter, 4608, 15, 16)); - TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, - planner.AddBuffer(error_reporter, 4608, 14, 15)); - TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, - planner.AddBuffer(error_reporter, 4608, 13, 14)); - TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, - planner.AddBuffer(error_reporter, 4608, 12, 13)); - TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, - planner.AddBuffer(error_reporter, 2304, 11, 12)); - TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, - planner.AddBuffer(error_reporter, 9216, 10, 11)); - TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, - planner.AddBuffer(error_reporter, 9216, 9, 10)); - TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, - planner.AddBuffer(error_reporter, 9216, 8, 9)); - TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, - planner.AddBuffer(error_reporter, 4608, 7, 8)); - TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, - planner.AddBuffer(error_reporter, 18432, 6, 7)); - TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, - planner.AddBuffer(error_reporter, 18432, 5, 6)); - TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, - planner.AddBuffer(error_reporter, 18432, 4, 5)); - TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, - planner.AddBuffer(error_reporter, 9216, 3, 4)); - TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, - planner.AddBuffer(error_reporter, 36864, 2, 3)); - TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, - planner.AddBuffer(error_reporter, 18432, 1, 2)); - TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, - planner.AddBuffer(error_reporter, 18432, 0, 1)); - TF_LITE_MICRO_EXPECT_EQ(241027, planner.GetMaximumMemorySize()); + planner.AddBuffer(µ_error_reporter, 9216, 3, 4)); + TF_LITE_MICRO_EXPECT_EQ( + kTfLiteOk, planner.AddBuffer(µ_error_reporter, 36864, 2, 3)); + TF_LITE_MICRO_EXPECT_EQ( + kTfLiteOk, planner.AddBuffer(µ_error_reporter, 18432, 1, 2)); + TF_LITE_MICRO_EXPECT_EQ( + kTfLiteOk, planner.AddBuffer(µ_error_reporter, 18432, 0, 1)); + TF_LITE_MICRO_EXPECT_EQ(static_cast(241027), + planner.GetMaximumMemorySize()); } TF_LITE_MICRO_TESTS_END diff --git a/tensorflow/lite/micro/micro_allocator_test.cc b/tensorflow/lite/micro/micro_allocator_test.cc index 6b63c8ceb4f..67da95c3b0a 100644 --- a/tensorflow/lite/micro/micro_allocator_test.cc +++ b/tensorflow/lite/micro/micro_allocator_test.cc @@ -32,9 +32,9 @@ void VerifyMockTensor(TfLiteTensor* tensor, bool is_variable = false) { TF_LITE_MICRO_EXPECT_EQ(1, tensor->dims->size); TF_LITE_MICRO_EXPECT_EQ(1, tensor->dims->data[0]); TF_LITE_MICRO_EXPECT_EQ(is_variable, tensor->is_variable); - TF_LITE_MICRO_EXPECT_EQ(4, tensor->bytes); + TF_LITE_MICRO_EXPECT_EQ(static_cast(4), tensor->bytes); TF_LITE_MICRO_EXPECT_NE(nullptr, tensor->data.raw); - TF_LITE_MICRO_EXPECT_EQ(0, + TF_LITE_MICRO_EXPECT_EQ(static_cast(0), (reinterpret_cast(tensor->data.raw) % kExpectedAlignment)); } @@ -43,14 +43,14 @@ void VerifyMockWeightTensor(TfLiteTensor* tensor) { TF_LITE_MICRO_EXPECT_EQ(kTfLiteUInt8, tensor->type); TF_LITE_MICRO_EXPECT_EQ(1, tensor->dims->size); TF_LITE_MICRO_EXPECT_EQ(1, tensor->dims->data[0]); - TF_LITE_MICRO_EXPECT_EQ(1, tensor->bytes); + TF_LITE_MICRO_EXPECT_EQ(static_cast(1), tensor->bytes); TF_LITE_MICRO_EXPECT_NE(nullptr, tensor->data.raw); } void EnsureUniqueVariableTensorBuffer(TfLiteContext* context, const int variable_tensor_idx) { - for (int i = 0; i < context->tensors_size; i++) { - if (i != variable_tensor_idx) { + for (size_t i = 0; i < context->tensors_size; i++) { + if (i != static_cast(variable_tensor_idx)) { TF_LITE_MICRO_EXPECT_NE(context->tensors[variable_tensor_idx].data.raw, context->tensors[i].data.raw); } @@ -73,8 +73,6 @@ void VerifyRegistrationAndNodeAllocation( TF_LITE_MICRO_TESTS_BEGIN TF_LITE_MICRO_TEST(TestInitializeRuntimeTensor) { - const tflite::Model* model = tflite::testing::GetSimpleMockModel(); - TfLiteContext context; constexpr size_t arena_size = 1024; uint8_t arena[arena_size]; tflite::SimpleMemoryAllocator* simple_allocator = @@ -93,16 +91,14 @@ TF_LITE_MICRO_TEST(TestInitializeRuntimeTensor) { TF_LITE_MICRO_EXPECT_EQ(kTfLiteInt32, allocated_tensor.type); TF_LITE_MICRO_EXPECT_EQ(1, allocated_tensor.dims->size); TF_LITE_MICRO_EXPECT_EQ(100, allocated_tensor.dims->data[0]); - TF_LITE_MICRO_EXPECT_EQ(400, allocated_tensor.bytes); - TF_LITE_MICRO_EXPECT_EQ(nullptr, allocated_tensor.data.i32); + TF_LITE_MICRO_EXPECT_EQ(static_cast(400), allocated_tensor.bytes); + TF_LITE_MICRO_EXPECT(nullptr == allocated_tensor.data.i32); TF_LITE_MICRO_EXPECT_EQ(kTfLiteArenaRw, allocated_tensor.allocation_type); simple_allocator->~SimpleMemoryAllocator(); } TF_LITE_MICRO_TEST(TestInitializeQuantizedTensor) { - const tflite::Model* model = tflite::testing::GetSimpleMockModel(); - TfLiteContext context; constexpr size_t arena_size = 1024; uint8_t arena[arena_size]; tflite::SimpleMemoryAllocator* simple_allocator = @@ -122,16 +118,14 @@ TF_LITE_MICRO_TEST(TestInitializeQuantizedTensor) { TF_LITE_MICRO_EXPECT_EQ(kTfLiteInt32, allocated_tensor.type); TF_LITE_MICRO_EXPECT_EQ(1, allocated_tensor.dims->size); TF_LITE_MICRO_EXPECT_EQ(100, allocated_tensor.dims->data[0]); - TF_LITE_MICRO_EXPECT_EQ(400, allocated_tensor.bytes); - TF_LITE_MICRO_EXPECT_EQ(nullptr, allocated_tensor.data.i32); + TF_LITE_MICRO_EXPECT_EQ(static_cast(400), allocated_tensor.bytes); + TF_LITE_MICRO_EXPECT(nullptr == allocated_tensor.data.i32); TF_LITE_MICRO_EXPECT_EQ(kTfLiteArenaRw, allocated_tensor.allocation_type); simple_allocator->~SimpleMemoryAllocator(); } TF_LITE_MICRO_TEST(TestMissingQuantization) { - const tflite::Model* model = tflite::testing::GetSimpleMockModel(); - TfLiteContext context; constexpr size_t arena_size = 1024; uint8_t arena[arena_size]; tflite::SimpleMemoryAllocator* simple_allocator = @@ -151,8 +145,8 @@ TF_LITE_MICRO_TEST(TestMissingQuantization) { TF_LITE_MICRO_EXPECT_EQ(kTfLiteInt32, allocated_tensor.type); TF_LITE_MICRO_EXPECT_EQ(1, allocated_tensor.dims->size); TF_LITE_MICRO_EXPECT_EQ(100, allocated_tensor.dims->data[0]); - TF_LITE_MICRO_EXPECT_EQ(400, allocated_tensor.bytes); - TF_LITE_MICRO_EXPECT_EQ(nullptr, allocated_tensor.data.i32); + TF_LITE_MICRO_EXPECT_EQ(static_cast(400), allocated_tensor.bytes); + TF_LITE_MICRO_EXPECT(nullptr == allocated_tensor.data.i32); } TF_LITE_MICRO_TEST(TestFailsWhenModelStartsTwice) { @@ -164,7 +158,7 @@ TF_LITE_MICRO_TEST(TestFailsWhenModelStartsTwice) { uint8_t arena[arena_size]; tflite::MicroAllocator* allocator = tflite::MicroAllocator::Create(arena, arena_size, micro_test::reporter); - TF_LITE_MICRO_EXPECT_NE(nullptr, allocator); + TF_LITE_MICRO_EXPECT(nullptr != allocator); TF_LITE_MICRO_EXPECT_EQ( kTfLiteOk, allocator->StartModelAllocation(model, &context, op_resolver, &node_and_registration)); @@ -177,12 +171,11 @@ TF_LITE_MICRO_TEST(TestFailsWhenModelFinishesBeforeStart) { const tflite::Model* model = tflite::testing::GetSimpleMockModel(); TfLiteContext context; tflite::AllOpsResolver op_resolver = tflite::testing::GetOpResolver(); - tflite::NodeAndRegistration* node_and_registration; constexpr size_t arena_size = 1024; uint8_t arena[arena_size]; tflite::MicroAllocator* allocator = tflite::MicroAllocator::Create(arena, arena_size, micro_test::reporter); - TF_LITE_MICRO_EXPECT_NE(nullptr, allocator); + TF_LITE_MICRO_EXPECT(nullptr != allocator); TF_LITE_MICRO_EXPECT_EQ(kTfLiteError, allocator->FinishModelAllocation(model, &context)); } @@ -196,14 +189,14 @@ TF_LITE_MICRO_TEST(TestMockModelAllocation) { uint8_t arena[arena_size]; tflite::MicroAllocator* allocator = tflite::MicroAllocator::Create(arena, arena_size, micro_test::reporter); - TF_LITE_MICRO_EXPECT_NE(nullptr, allocator); + TF_LITE_MICRO_EXPECT(nullptr != allocator); TF_LITE_MICRO_EXPECT_EQ( kTfLiteOk, allocator->StartModelAllocation(model, &context, op_resolver, &node_and_registration)); TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, allocator->FinishModelAllocation(model, &context)); - TF_LITE_MICRO_EXPECT_EQ(4, context.tensors_size); + TF_LITE_MICRO_EXPECT_EQ(static_cast(4), context.tensors_size); // NOTE: Tensor indexes match the values in GetSimpleMockModel(). tflite::testing::VerifyMockTensor(&context.tensors[0]); @@ -251,7 +244,7 @@ TF_LITE_MICRO_TEST(TestAllocationForModelsWithBranches) { // t0 is the first tensor, so place it in offset 0. TF_LITE_MICRO_EXPECT_EQ(0, context.tensors[0].data.uint8 - start); // bytes = 2 * 2 * 3 * sizeof(float32) = 48, same for other tensors. - TF_LITE_MICRO_EXPECT_EQ(48, context.tensors[0].bytes); + TF_LITE_MICRO_EXPECT_EQ(static_cast(48), context.tensors[0].bytes); // t1 can't reuse any memory, as n0 requires both t0 and t1. TF_LITE_MICRO_EXPECT_EQ(96, context.tensors[1].data.uint8 - start); // t2 can't reuse any memory, as n1 requires both t0 and t2. Also n2 requires @@ -274,14 +267,14 @@ TF_LITE_MICRO_TEST(TestAllocationForComplexModelAllocation) { uint8_t arena[arena_size]; tflite::MicroAllocator* allocator = tflite::MicroAllocator::Create(arena, arena_size, micro_test::reporter); - TF_LITE_MICRO_EXPECT_NE(nullptr, allocator); + TF_LITE_MICRO_EXPECT(nullptr != allocator); TF_LITE_MICRO_EXPECT_EQ( kTfLiteOk, allocator->StartModelAllocation(model, &context, op_resolver, &node_and_registration)); TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, allocator->FinishModelAllocation(model, &context)); - TF_LITE_MICRO_EXPECT_EQ(10, context.tensors_size); + TF_LITE_MICRO_EXPECT_EQ(static_cast(10), context.tensors_size); // NOTE: Tensor indexes match the values in GetComplexMockModel(). tflite::testing::VerifyMockTensor(&context.tensors[0]); @@ -356,7 +349,7 @@ TF_LITE_MICRO_TEST(OfflinePlannerBranchesAllOnline) { // the offsets be should identical to that test. uint8_t* start = context.tensors[0].data.uint8; TF_LITE_MICRO_EXPECT_EQ(0, context.tensors[0].data.uint8 - start); - TF_LITE_MICRO_EXPECT_EQ(48, context.tensors[0].bytes); + TF_LITE_MICRO_EXPECT_EQ(static_cast(48), context.tensors[0].bytes); TF_LITE_MICRO_EXPECT_EQ(96, context.tensors[1].data.uint8 - start); TF_LITE_MICRO_EXPECT_EQ(48, context.tensors[2].data.uint8 - start); TF_LITE_MICRO_EXPECT_EQ(0, context.tensors[3].data.uint8 - start); @@ -464,7 +457,7 @@ TF_LITE_MICRO_TEST(OfflinePlannerOverlappingAllocation) { TF_LITE_MICRO_EXPECT_EQ(0, context.tensors[1].data.uint8 - start); TF_LITE_MICRO_EXPECT_EQ(48, context.tensors[2].data.uint8 - start); TF_LITE_MICRO_EXPECT_EQ(0, context.tensors[3].data.uint8 - start); - TF_LITE_MICRO_EXPECT_EQ(48, context.tensors[0].bytes); + TF_LITE_MICRO_EXPECT_EQ(static_cast(48), context.tensors[0].bytes); } TF_LITE_MICRO_TEST(OfflinePlannerOfflineOnline) { @@ -562,21 +555,21 @@ TF_LITE_MICRO_TEST(TestAllocateTfLiteTensorWithReset) { uint8_t arena[arena_size]; tflite::MicroAllocator* allocator = tflite::MicroAllocator::Create(arena, arena_size, micro_test::reporter); - TF_LITE_MICRO_EXPECT_NE(allocator, nullptr); + TF_LITE_MICRO_EXPECT(allocator != nullptr); TfLiteTensor* tensor1 = allocator->AllocateTfLiteTensor(model, /*subgraph_idx=*/1); - TF_LITE_MICRO_EXPECT_NE(tensor1, nullptr); + TF_LITE_MICRO_EXPECT(tensor1 != nullptr); allocator->ResetTempAllocations(); TfLiteTensor* tensor2 = allocator->AllocateTfLiteTensor(model, /*subgraph_idx=*/2); - TF_LITE_MICRO_EXPECT_NE(tensor1, nullptr); + TF_LITE_MICRO_EXPECT(tensor1 != nullptr); // The address of tensor2 should be equal than the address of tensor1 since // allocations were not chained: - TF_LITE_MICRO_EXPECT_EQ(tensor2, tensor1); + TF_LITE_MICRO_EXPECT(tensor2 == tensor1); } TF_LITE_MICRO_TESTS_END diff --git a/tensorflow/lite/micro/micro_interpreter_test.cc b/tensorflow/lite/micro/micro_interpreter_test.cc index a5be011e2f0..21c7e935f17 100644 --- a/tensorflow/lite/micro/micro_interpreter_test.cc +++ b/tensorflow/lite/micro/micro_interpreter_test.cc @@ -82,15 +82,15 @@ TF_LITE_MICRO_TEST(TestInterpreter) { micro_test::reporter); TF_LITE_MICRO_EXPECT_EQ(interpreter.AllocateTensors(), kTfLiteOk); TF_LITE_MICRO_EXPECT_LE(interpreter.arena_used_bytes(), 928 + 100); - TF_LITE_MICRO_EXPECT_EQ(1, interpreter.inputs_size()); - TF_LITE_MICRO_EXPECT_EQ(2, interpreter.outputs_size()); + TF_LITE_MICRO_EXPECT_EQ(static_cast(1), interpreter.inputs_size()); + TF_LITE_MICRO_EXPECT_EQ(static_cast(2), interpreter.outputs_size()); TfLiteTensor* input = interpreter.input(0); TF_LITE_MICRO_EXPECT_NE(nullptr, input); TF_LITE_MICRO_EXPECT_EQ(kTfLiteInt32, input->type); TF_LITE_MICRO_EXPECT_EQ(1, input->dims->size); TF_LITE_MICRO_EXPECT_EQ(1, input->dims->data[0]); - TF_LITE_MICRO_EXPECT_EQ(4, input->bytes); + TF_LITE_MICRO_EXPECT_EQ(static_cast(4), input->bytes); TF_LITE_MICRO_EXPECT_NE(nullptr, input->data.i32); input->data.i32[0] = 21; @@ -101,7 +101,7 @@ TF_LITE_MICRO_TEST(TestInterpreter) { TF_LITE_MICRO_EXPECT_EQ(kTfLiteInt32, output->type); TF_LITE_MICRO_EXPECT_EQ(1, output->dims->size); TF_LITE_MICRO_EXPECT_EQ(1, output->dims->data[0]); - TF_LITE_MICRO_EXPECT_EQ(4, output->bytes); + TF_LITE_MICRO_EXPECT_EQ(static_cast(4), output->bytes); TF_LITE_MICRO_EXPECT_NE(nullptr, output->data.i32); TF_LITE_MICRO_EXPECT_EQ(42, output->data.i32[0]); @@ -110,7 +110,7 @@ TF_LITE_MICRO_TEST(TestInterpreter) { TF_LITE_MICRO_EXPECT_EQ(kTfLiteInt32, output->type); TF_LITE_MICRO_EXPECT_EQ(1, output->dims->size); TF_LITE_MICRO_EXPECT_EQ(1, output->dims->data[0]); - TF_LITE_MICRO_EXPECT_EQ(4, output->bytes); + TF_LITE_MICRO_EXPECT_EQ(static_cast(4), output->bytes); TF_LITE_MICRO_EXPECT_NE(nullptr, output->data.i32); TF_LITE_MICRO_EXPECT_EQ(42, output->data.i32[0]); @@ -133,8 +133,8 @@ TF_LITE_MICRO_TEST(TestKernelMemoryPlanning) { allocator_buffer_size, micro_test::reporter); TF_LITE_MICRO_EXPECT_EQ(interpreter.AllocateTensors(), kTfLiteOk); - TF_LITE_MICRO_EXPECT_EQ(1, interpreter.inputs_size()); - TF_LITE_MICRO_EXPECT_EQ(2, interpreter.outputs_size()); + TF_LITE_MICRO_EXPECT_EQ(static_cast(1), interpreter.inputs_size()); + TF_LITE_MICRO_EXPECT_EQ(static_cast(2), interpreter.outputs_size()); TfLiteTensor* input = interpreter.input(0); TF_LITE_MICRO_EXPECT_EQ(1, input->dims->size); @@ -177,8 +177,8 @@ TF_LITE_MICRO_TEST(TestVariableTensorReset) { micro_test::reporter); TF_LITE_MICRO_EXPECT_EQ(interpreter.AllocateTensors(), kTfLiteOk); TF_LITE_MICRO_EXPECT_LE(interpreter.arena_used_bytes(), 2096 + 100); - TF_LITE_MICRO_EXPECT_EQ(1, interpreter.inputs_size()); - TF_LITE_MICRO_EXPECT_EQ(1, interpreter.outputs_size()); + TF_LITE_MICRO_EXPECT_EQ(static_cast(1), interpreter.inputs_size()); + TF_LITE_MICRO_EXPECT_EQ(static_cast(1), interpreter.outputs_size()); // Assign hard-code values: for (size_t i = 0; i < interpreter.tensors_size(); ++i) { @@ -306,25 +306,28 @@ TF_LITE_MICRO_TEST(TestIncompleteInitializationAllocationsWithSmallArena) { // Ensure allocations are zero (ignore tail since some internal structs are // initialized with this space): TF_LITE_MICRO_EXPECT_EQ( - 0, allocator->GetSimpleMemoryAllocator()->GetHeadUsedBytes()); + static_cast(0), + allocator->GetSimpleMemoryAllocator()->GetHeadUsedBytes()); TF_LITE_MICRO_EXPECT_EQ( - 0, allocator - ->GetRecordedAllocation( - tflite::RecordedAllocationType::kTfLiteTensorArray) - .used_bytes); + static_cast(0), + allocator + ->GetRecordedAllocation( + tflite::RecordedAllocationType::kTfLiteTensorArray) + .used_bytes); TF_LITE_MICRO_EXPECT_EQ( - 0, allocator - ->GetRecordedAllocation(tflite::RecordedAllocationType:: - kTfLiteTensorArrayQuantizationData) - .used_bytes); + static_cast(0), + allocator + ->GetRecordedAllocation(tflite::RecordedAllocationType:: + kTfLiteTensorArrayQuantizationData) + .used_bytes); TF_LITE_MICRO_EXPECT_EQ( - 0, + static_cast(0), allocator ->GetRecordedAllocation( tflite::RecordedAllocationType::kTfLiteTensorVariableBufferData) .used_bytes); TF_LITE_MICRO_EXPECT_EQ( - 0, + static_cast(0), allocator->GetRecordedAllocation(tflite::RecordedAllocationType::kOpData) .used_bytes); } @@ -349,20 +352,22 @@ TF_LITE_MICRO_TEST(TestInterpreterDoesNotAllocateUntilInvoke) { // Ensure allocations are zero (ignore tail since some internal structs are // initialized with this space): TF_LITE_MICRO_EXPECT_EQ( - 0, allocator->GetSimpleMemoryAllocator()->GetHeadUsedBytes()); + static_cast(0), + allocator->GetSimpleMemoryAllocator()->GetHeadUsedBytes()); TF_LITE_MICRO_EXPECT_EQ( - 0, allocator - ->GetRecordedAllocation( - tflite::RecordedAllocationType::kTfLiteTensorArray) - .used_bytes); + static_cast(0), + allocator + ->GetRecordedAllocation( + tflite::RecordedAllocationType::kTfLiteTensorArray) + .used_bytes); TF_LITE_MICRO_EXPECT_EQ( - 0, + static_cast(0), allocator ->GetRecordedAllocation( tflite::RecordedAllocationType::kTfLiteTensorVariableBufferData) .used_bytes); TF_LITE_MICRO_EXPECT_EQ( - 0, + static_cast(0), allocator->GetRecordedAllocation(tflite::RecordedAllocationType::kOpData) .used_bytes); @@ -372,28 +377,29 @@ TF_LITE_MICRO_TEST(TestInterpreterDoesNotAllocateUntilInvoke) { // Allocation sizes vary based on platform - check that allocations are now // non-zero: TF_LITE_MICRO_EXPECT_GT( - allocator->GetSimpleMemoryAllocator()->GetHeadUsedBytes(), 0); + allocator->GetSimpleMemoryAllocator()->GetHeadUsedBytes(), + static_cast(0)); TF_LITE_MICRO_EXPECT_GT( allocator ->GetRecordedAllocation( tflite::RecordedAllocationType::kTfLiteTensorArray) .used_bytes, - 0); + static_cast(0)); TF_LITE_MICRO_EXPECT_GT( allocator ->GetRecordedAllocation( tflite::RecordedAllocationType::kTfLiteTensorVariableBufferData) .used_bytes, - 0); + static_cast(0)); // TODO(b/160160549): This check is mostly meaningless right now because the - // operator creation in our mock models is inconsistent. Revisit what this + // operator creation in our mock models is inconsistent. Revisit what this // check should be once the mock models are properly created. TF_LITE_MICRO_EXPECT_EQ( allocator->GetRecordedAllocation(tflite::RecordedAllocationType::kOpData) .used_bytes, - 0); + static_cast(0)); } TF_LITE_MICRO_TESTS_END diff --git a/tensorflow/lite/micro/micro_mutable_op_resolver_test.cc b/tensorflow/lite/micro/micro_mutable_op_resolver_test.cc index fe9c8de5959..efe41ff4e2f 100644 --- a/tensorflow/lite/micro/micro_mutable_op_resolver_test.cc +++ b/tensorflow/lite/micro/micro_mutable_op_resolver_test.cc @@ -65,8 +65,11 @@ TF_LITE_MICRO_TEST(TestOperations) { using tflite::MicroMutableOpResolver; using tflite::OpResolver; - static TfLiteRegistration r = {tflite::MockInit, tflite::MockFree, - tflite::MockPrepare, tflite::MockInvoke}; + static TfLiteRegistration r = {}; + r.init = tflite::MockInit; + r.free = tflite::MockFree; + r.prepare = tflite::MockPrepare; + r.invoke = tflite::MockInvoke; MicroMutableOpResolver<1> micro_op_resolver; TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, @@ -78,20 +81,21 @@ TF_LITE_MICRO_TEST(TestOperations) { tflite::MicroOpResolver* resolver = µ_op_resolver; - TF_LITE_MICRO_EXPECT_EQ(1, micro_op_resolver.GetRegistrationLength()); + TF_LITE_MICRO_EXPECT_EQ(static_cast(1), + micro_op_resolver.GetRegistrationLength()); const TfLiteRegistration* registration = resolver->FindOp(BuiltinOperator_RELU); - TF_LITE_MICRO_EXPECT_EQ(nullptr, registration); + TF_LITE_MICRO_EXPECT(nullptr == registration); registration = resolver->FindOp("mock_custom"); - TF_LITE_MICRO_EXPECT_NE(nullptr, registration); - TF_LITE_MICRO_EXPECT_EQ(nullptr, registration->init(nullptr, nullptr, 0)); + TF_LITE_MICRO_EXPECT(nullptr != registration); + TF_LITE_MICRO_EXPECT(nullptr == registration->init(nullptr, nullptr, 0)); TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, registration->prepare(nullptr, nullptr)); TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, registration->invoke(nullptr, nullptr)); registration = resolver->FindOp("nonexistent_custom"); - TF_LITE_MICRO_EXPECT_EQ(nullptr, registration); + TF_LITE_MICRO_EXPECT(nullptr == registration); } TF_LITE_MICRO_TEST(TestErrorReporting) { @@ -99,8 +103,11 @@ TF_LITE_MICRO_TEST(TestErrorReporting) { using tflite::BuiltinOperator_RELU; using tflite::MicroMutableOpResolver; - static TfLiteRegistration r = {tflite::MockInit, tflite::MockFree, - tflite::MockPrepare, tflite::MockInvoke}; + static TfLiteRegistration r = {}; + r.init = tflite::MockInit; + r.free = tflite::MockFree; + r.prepare = tflite::MockPrepare; + r.invoke = tflite::MockInvoke; tflite::MockErrorReporter mock_reporter; MicroMutableOpResolver<1> micro_op_resolver(&mock_reporter); diff --git a/tensorflow/lite/micro/micro_string_test.cc b/tensorflow/lite/micro/micro_string_test.cc index fb8183bb492..400f908f97f 100644 --- a/tensorflow/lite/micro/micro_string_test.cc +++ b/tensorflow/lite/micro/micro_string_test.cc @@ -24,7 +24,7 @@ TF_LITE_MICRO_TEST(FormatPositiveIntShouldMatchExpected) { char buffer[kBufferLen]; const char golden[] = "Int: 55"; int bytes_written = MicroSnprintf(buffer, kBufferLen, "Int: %d", 55); - TF_LITE_MICRO_EXPECT_EQ(sizeof(golden), bytes_written); + TF_LITE_MICRO_EXPECT_EQ(static_cast(sizeof(golden)), bytes_written); TF_LITE_MICRO_EXPECT_STRING_EQ(golden, buffer); } @@ -33,7 +33,7 @@ TF_LITE_MICRO_TEST(FormatNegativeIntShouldMatchExpected) { char buffer[kBufferLen]; const char golden[] = "Int: -55"; int bytes_written = MicroSnprintf(buffer, kBufferLen, "Int: %d", -55); - TF_LITE_MICRO_EXPECT_EQ(sizeof(golden), bytes_written); + TF_LITE_MICRO_EXPECT_EQ(static_cast(sizeof(golden)), bytes_written); TF_LITE_MICRO_EXPECT_STRING_EQ(golden, buffer); } @@ -42,7 +42,7 @@ TF_LITE_MICRO_TEST(FormatUnsignedIntShouldMatchExpected) { char buffer[kBufferLen]; const char golden[] = "UInt: 12345"; int bytes_written = MicroSnprintf(buffer, kBufferLen, "UInt: %u", 12345); - TF_LITE_MICRO_EXPECT_EQ(sizeof(golden), bytes_written); + TF_LITE_MICRO_EXPECT_EQ(static_cast(sizeof(golden)), bytes_written); TF_LITE_MICRO_EXPECT_STRING_EQ(golden, buffer); } @@ -51,7 +51,7 @@ TF_LITE_MICRO_TEST(FormatHexShouldMatchExpected) { char buffer[kBufferLen]; const char golden[] = "Hex: 0x12345"; int bytes_written = MicroSnprintf(buffer, kBufferLen, "Hex: %x", 0x12345); - TF_LITE_MICRO_EXPECT_EQ(sizeof(golden), bytes_written); + TF_LITE_MICRO_EXPECT_EQ(static_cast(sizeof(golden)), bytes_written); TF_LITE_MICRO_EXPECT_STRING_EQ(golden, buffer); } @@ -59,8 +59,8 @@ TF_LITE_MICRO_TEST(FormatFloatShouldMatchExpected) { const int kBufferLen = 32; char buffer[kBufferLen]; const char golden[] = "Float: 1.0*2^4"; - int bytes_written = MicroSnprintf(buffer, kBufferLen, "Float: %f", 16.f); - TF_LITE_MICRO_EXPECT_EQ(sizeof(golden), bytes_written); + int bytes_written = MicroSnprintf(buffer, kBufferLen, "Float: %f", 16.); + TF_LITE_MICRO_EXPECT_EQ(static_cast(sizeof(golden)), bytes_written); TF_LITE_MICRO_EXPECT_STRING_EQ(golden, buffer); } @@ -70,7 +70,7 @@ TF_LITE_MICRO_TEST(BadlyFormattedStringShouldProduceReasonableString) { const char golden[] = "Test Badly % formated % string"; int bytes_written = MicroSnprintf(buffer, kBufferLen, "Test Badly %% formated %% string%"); - TF_LITE_MICRO_EXPECT_EQ(sizeof(golden), bytes_written); + TF_LITE_MICRO_EXPECT_EQ(static_cast(sizeof(golden)), bytes_written); TF_LITE_MICRO_EXPECT_STRING_EQ(golden, buffer); } @@ -79,7 +79,7 @@ TF_LITE_MICRO_TEST(IntFormatOverrunShouldTruncate) { char buffer[kBufferLen]; const char golden[] = "Int: "; int bytes_written = MicroSnprintf(buffer, kBufferLen, "Int: %d", 12345); - TF_LITE_MICRO_EXPECT_EQ(sizeof(golden), bytes_written); + TF_LITE_MICRO_EXPECT_EQ(static_cast(sizeof(golden)), bytes_written); TF_LITE_MICRO_EXPECT_STRING_EQ(golden, buffer); } @@ -88,7 +88,7 @@ TF_LITE_MICRO_TEST(UnsignedIntFormatOverrunShouldTruncate) { char buffer[kBufferLen]; const char golden[] = "UInt: "; int bytes_written = MicroSnprintf(buffer, kBufferLen, "UInt: %u", 12345); - TF_LITE_MICRO_EXPECT_EQ(sizeof(golden), bytes_written); + TF_LITE_MICRO_EXPECT_EQ(static_cast(sizeof(golden)), bytes_written); TF_LITE_MICRO_EXPECT_STRING_EQ(golden, buffer); } @@ -97,7 +97,7 @@ TF_LITE_MICRO_TEST(HexFormatOverrunShouldTruncate) { char buffer[kBufferLen]; const char golden[] = "Hex: "; int bytes_written = MicroSnprintf(buffer, kBufferLen, "Hex: %x", 0x12345); - TF_LITE_MICRO_EXPECT_EQ(sizeof(golden), bytes_written); + TF_LITE_MICRO_EXPECT_EQ(static_cast(sizeof(golden)), bytes_written); TF_LITE_MICRO_EXPECT_STRING_EQ(golden, buffer); } @@ -105,8 +105,8 @@ TF_LITE_MICRO_TEST(FloatFormatOverrunShouldTruncate) { const int kBufferLen = 12; char buffer[kBufferLen]; const char golden[] = "Float: "; - int bytes_written = MicroSnprintf(buffer, kBufferLen, "Float: %x", 12345.f); - TF_LITE_MICRO_EXPECT_EQ(sizeof(golden), bytes_written); + int bytes_written = MicroSnprintf(buffer, kBufferLen, "Float: %x", 12345.); + TF_LITE_MICRO_EXPECT_EQ(static_cast(sizeof(golden)), bytes_written); TF_LITE_MICRO_EXPECT_STRING_EQ(golden, buffer); } @@ -115,9 +115,8 @@ TF_LITE_MICRO_TEST(FloatFormatShouldPrintFractionCorrectly) { char buffer[kBufferLen]; const char golden[] = "Float: 1.0625*2^0"; // Add small offset to float value to account for float rounding error. - int bytes_written = - MicroSnprintf(buffer, kBufferLen, "Float: %f", 1.0625001f); - TF_LITE_MICRO_EXPECT_EQ(sizeof(golden), bytes_written); + int bytes_written = MicroSnprintf(buffer, kBufferLen, "Float: %f", 1.0625001); + TF_LITE_MICRO_EXPECT_EQ(static_cast(sizeof(golden)), bytes_written); TF_LITE_MICRO_EXPECT_STRING_EQ(golden, buffer); } @@ -127,7 +126,7 @@ TF_LITE_MICRO_TEST(StringFormatOverrunShouldTruncate) { const char golden[] = "String: h"; int bytes_written = MicroSnprintf(buffer, kBufferLen, "String: %s", "hello world"); - TF_LITE_MICRO_EXPECT_EQ(sizeof(golden), bytes_written); + TF_LITE_MICRO_EXPECT_EQ(static_cast(sizeof(golden)), bytes_written); TF_LITE_MICRO_EXPECT_STRING_EQ(golden, buffer); } @@ -136,7 +135,7 @@ TF_LITE_MICRO_TEST(StringFormatWithExactOutputSizeOverrunShouldTruncate) { char buffer[kBufferLen]; const char golden[] = "format st"; int bytes_written = MicroSnprintf(buffer, kBufferLen, "format str"); - TF_LITE_MICRO_EXPECT_EQ(sizeof(golden), bytes_written); + TF_LITE_MICRO_EXPECT_EQ(static_cast(sizeof(golden)), bytes_written); TF_LITE_MICRO_EXPECT_STRING_EQ(golden, buffer); } diff --git a/tensorflow/lite/micro/recording_simple_memory_allocator_test.cc b/tensorflow/lite/micro/recording_simple_memory_allocator_test.cc index 8fc4745a70e..16dbdb74437 100644 --- a/tensorflow/lite/micro/recording_simple_memory_allocator_test.cc +++ b/tensorflow/lite/micro/recording_simple_memory_allocator_test.cc @@ -30,15 +30,19 @@ TF_LITE_MICRO_TEST(TestRecordsTailAllocations) { uint8_t* result = allocator.AllocateFromTail(/*size=*/10, /*alignment=*/1); TF_LITE_MICRO_EXPECT_NE(result, nullptr); - TF_LITE_MICRO_EXPECT_EQ(allocator.GetUsedBytes(), 10); - TF_LITE_MICRO_EXPECT_EQ(allocator.GetRequestedBytes(), 10); - TF_LITE_MICRO_EXPECT_EQ(allocator.GetAllocatedCount(), 1); + TF_LITE_MICRO_EXPECT_EQ(allocator.GetUsedBytes(), static_cast(10)); + TF_LITE_MICRO_EXPECT_EQ(allocator.GetRequestedBytes(), + static_cast(10)); + TF_LITE_MICRO_EXPECT_EQ(allocator.GetAllocatedCount(), + static_cast(1)); result = allocator.AllocateFromTail(/*size=*/20, /*alignment=*/1); TF_LITE_MICRO_EXPECT_NE(result, nullptr); - TF_LITE_MICRO_EXPECT_EQ(allocator.GetUsedBytes(), 30); - TF_LITE_MICRO_EXPECT_EQ(allocator.GetRequestedBytes(), 30); - TF_LITE_MICRO_EXPECT_EQ(allocator.GetAllocatedCount(), 2); + TF_LITE_MICRO_EXPECT_EQ(allocator.GetUsedBytes(), static_cast(30)); + TF_LITE_MICRO_EXPECT_EQ(allocator.GetRequestedBytes(), + static_cast(30)); + TF_LITE_MICRO_EXPECT_EQ(allocator.GetAllocatedCount(), + static_cast(2)); } TF_LITE_MICRO_TEST(TestRecordsMisalignedTailAllocations) { @@ -50,10 +54,12 @@ TF_LITE_MICRO_TEST(TestRecordsMisalignedTailAllocations) { uint8_t* result = allocator.AllocateFromTail(/*size=*/10, /*alignment=*/12); TF_LITE_MICRO_EXPECT_NE(result, nullptr); // Validate used bytes in 8 byte range that can included alignment of 12: - TF_LITE_MICRO_EXPECT_GE(allocator.GetUsedBytes(), 10); - TF_LITE_MICRO_EXPECT_LE(allocator.GetUsedBytes(), 20); - TF_LITE_MICRO_EXPECT_EQ(allocator.GetRequestedBytes(), 10); - TF_LITE_MICRO_EXPECT_EQ(allocator.GetAllocatedCount(), 1); + TF_LITE_MICRO_EXPECT_GE(allocator.GetUsedBytes(), static_cast(10)); + TF_LITE_MICRO_EXPECT_LE(allocator.GetUsedBytes(), static_cast(20)); + TF_LITE_MICRO_EXPECT_EQ(allocator.GetRequestedBytes(), + static_cast(10)); + TF_LITE_MICRO_EXPECT_EQ(allocator.GetAllocatedCount(), + static_cast(1)); } TF_LITE_MICRO_TEST(TestDoesNotRecordFailedTailAllocations) { @@ -63,10 +69,12 @@ TF_LITE_MICRO_TEST(TestDoesNotRecordFailedTailAllocations) { arena_size); uint8_t* result = allocator.AllocateFromTail(/*size=*/2048, /*alignment=*/1); - TF_LITE_MICRO_EXPECT_EQ(result, nullptr); - TF_LITE_MICRO_EXPECT_EQ(allocator.GetUsedBytes(), 0); - TF_LITE_MICRO_EXPECT_EQ(allocator.GetRequestedBytes(), 0); - TF_LITE_MICRO_EXPECT_EQ(allocator.GetAllocatedCount(), 0); + TF_LITE_MICRO_EXPECT(result == nullptr); + TF_LITE_MICRO_EXPECT_EQ(allocator.GetUsedBytes(), static_cast(0)); + TF_LITE_MICRO_EXPECT_EQ(allocator.GetRequestedBytes(), + static_cast(0)); + TF_LITE_MICRO_EXPECT_EQ(allocator.GetAllocatedCount(), + static_cast(0)); } TF_LITE_MICRO_TEST(TestRecordsHeadAllocations) { @@ -77,15 +85,19 @@ TF_LITE_MICRO_TEST(TestRecordsHeadAllocations) { uint8_t* result = allocator.AllocateFromHead(/*size=*/5, /*alignment=*/1); TF_LITE_MICRO_EXPECT_NE(result, nullptr); - TF_LITE_MICRO_EXPECT_EQ(allocator.GetUsedBytes(), 5); - TF_LITE_MICRO_EXPECT_EQ(allocator.GetRequestedBytes(), 5); - TF_LITE_MICRO_EXPECT_EQ(allocator.GetAllocatedCount(), 1); + TF_LITE_MICRO_EXPECT_EQ(allocator.GetUsedBytes(), static_cast(5)); + TF_LITE_MICRO_EXPECT_EQ(allocator.GetRequestedBytes(), + static_cast(5)); + TF_LITE_MICRO_EXPECT_EQ(allocator.GetAllocatedCount(), + static_cast(1)); result = allocator.AllocateFromTail(/*size=*/15, /*alignment=*/1); TF_LITE_MICRO_EXPECT_NE(result, nullptr); - TF_LITE_MICRO_EXPECT_EQ(allocator.GetUsedBytes(), 20); - TF_LITE_MICRO_EXPECT_EQ(allocator.GetRequestedBytes(), 20); - TF_LITE_MICRO_EXPECT_EQ(allocator.GetAllocatedCount(), 2); + TF_LITE_MICRO_EXPECT_EQ(allocator.GetUsedBytes(), static_cast(20)); + TF_LITE_MICRO_EXPECT_EQ(allocator.GetRequestedBytes(), + static_cast(20)); + TF_LITE_MICRO_EXPECT_EQ(allocator.GetAllocatedCount(), + static_cast(2)); } TF_LITE_MICRO_TEST(TestRecordsMisalignedHeadAllocations) { @@ -97,10 +109,12 @@ TF_LITE_MICRO_TEST(TestRecordsMisalignedHeadAllocations) { uint8_t* result = allocator.AllocateFromHead(/*size=*/10, /*alignment=*/12); TF_LITE_MICRO_EXPECT_NE(result, nullptr); // Validate used bytes in 8 byte range that can included alignment of 12: - TF_LITE_MICRO_EXPECT_GE(allocator.GetUsedBytes(), 10); - TF_LITE_MICRO_EXPECT_LE(allocator.GetUsedBytes(), 20); - TF_LITE_MICRO_EXPECT_EQ(allocator.GetRequestedBytes(), 10); - TF_LITE_MICRO_EXPECT_EQ(allocator.GetAllocatedCount(), 1); + TF_LITE_MICRO_EXPECT_GE(allocator.GetUsedBytes(), static_cast(10)); + TF_LITE_MICRO_EXPECT_LE(allocator.GetUsedBytes(), static_cast(20)); + TF_LITE_MICRO_EXPECT_EQ(allocator.GetRequestedBytes(), + static_cast(10)); + TF_LITE_MICRO_EXPECT_EQ(allocator.GetAllocatedCount(), + static_cast(1)); } TF_LITE_MICRO_TEST(TestDoesNotRecordFailedTailAllocations) { @@ -110,10 +124,12 @@ TF_LITE_MICRO_TEST(TestDoesNotRecordFailedTailAllocations) { arena_size); uint8_t* result = allocator.AllocateFromHead(/*size=*/2048, /*alignment=*/1); - TF_LITE_MICRO_EXPECT_EQ(result, nullptr); - TF_LITE_MICRO_EXPECT_EQ(allocator.GetUsedBytes(), 0); - TF_LITE_MICRO_EXPECT_EQ(allocator.GetRequestedBytes(), 0); - TF_LITE_MICRO_EXPECT_EQ(allocator.GetAllocatedCount(), 0); + TF_LITE_MICRO_EXPECT(result == nullptr); + TF_LITE_MICRO_EXPECT_EQ(allocator.GetUsedBytes(), static_cast(0)); + TF_LITE_MICRO_EXPECT_EQ(allocator.GetRequestedBytes(), + static_cast(0)); + TF_LITE_MICRO_EXPECT_EQ(allocator.GetAllocatedCount(), + static_cast(0)); } TF_LITE_MICRO_TESTS_END diff --git a/tensorflow/lite/micro/simple_memory_allocator_test.cc b/tensorflow/lite/micro/simple_memory_allocator_test.cc index d9ee979d5b0..ef97089b00b 100644 --- a/tensorflow/lite/micro/simple_memory_allocator_test.cc +++ b/tensorflow/lite/micro/simple_memory_allocator_test.cc @@ -29,7 +29,7 @@ TF_LITE_MICRO_TEST(TestJustFits) { arena_size); uint8_t* result = allocator.AllocateFromTail(arena_size, 1); - TF_LITE_MICRO_EXPECT_NE(nullptr, result); + TF_LITE_MICRO_EXPECT(nullptr != result); } TF_LITE_MICRO_TEST(TestAligned) { @@ -39,11 +39,12 @@ TF_LITE_MICRO_TEST(TestAligned) { arena_size); uint8_t* result = allocator.AllocateFromTail(1, 1); - TF_LITE_MICRO_EXPECT_NE(nullptr, result); + TF_LITE_MICRO_EXPECT(nullptr != result); result = allocator.AllocateFromTail(16, 4); - TF_LITE_MICRO_EXPECT_NE(nullptr, result); - TF_LITE_MICRO_EXPECT_EQ(0, reinterpret_cast(result) & 3); + TF_LITE_MICRO_EXPECT(nullptr != result); + TF_LITE_MICRO_EXPECT_EQ(static_cast(0), + reinterpret_cast(result) & 3); } TF_LITE_MICRO_TEST(TestMultipleTooLarge) { @@ -53,10 +54,10 @@ TF_LITE_MICRO_TEST(TestMultipleTooLarge) { arena_size); uint8_t* result = allocator.AllocateFromTail(768, 1); - TF_LITE_MICRO_EXPECT_NE(nullptr, result); + TF_LITE_MICRO_EXPECT(nullptr != result); result = allocator.AllocateFromTail(768, 1); - TF_LITE_MICRO_EXPECT_EQ(nullptr, result); + TF_LITE_MICRO_EXPECT(nullptr == result); } TF_LITE_MICRO_TEST(TestTempAllocations) { @@ -66,10 +67,10 @@ TF_LITE_MICRO_TEST(TestTempAllocations) { arena_size); uint8_t* temp1 = allocator.AllocateTemp(100, 1); - TF_LITE_MICRO_EXPECT_NE(nullptr, temp1); + TF_LITE_MICRO_EXPECT(nullptr != temp1); uint8_t* temp2 = allocator.AllocateTemp(100, 1); - TF_LITE_MICRO_EXPECT_NE(nullptr, temp2); + TF_LITE_MICRO_EXPECT(nullptr != temp2); // Expect that the next micro allocation is 100 bytes away from each other. TF_LITE_MICRO_EXPECT_EQ(temp2 - temp1, 100); @@ -82,12 +83,12 @@ TF_LITE_MICRO_TEST(TestResetTempAllocations) { arena_size); uint8_t* temp1 = allocator.AllocateTemp(100, 1); - TF_LITE_MICRO_EXPECT_NE(nullptr, temp1); + TF_LITE_MICRO_EXPECT(nullptr != temp1); allocator.ResetTempAllocations(); uint8_t* temp2 = allocator.AllocateTemp(100, 1); - TF_LITE_MICRO_EXPECT_NE(nullptr, temp2); + TF_LITE_MICRO_EXPECT(nullptr != temp2); // Reset temp allocations should have the same start address: TF_LITE_MICRO_EXPECT_EQ(temp2 - temp1, 0); @@ -100,21 +101,21 @@ TF_LITE_MICRO_TEST(TestAllocateHeadWithoutResettingTemp) { arena_size); uint8_t* temp = allocator.AllocateTemp(100, 1); - TF_LITE_MICRO_EXPECT_NE(nullptr, temp); + TF_LITE_MICRO_EXPECT(nullptr != temp); // Allocation should be null since temp allocation was not followed by a call // to ResetTempAllocations(). uint8_t* head = allocator.AllocateFromHead(100, 1); - TF_LITE_MICRO_EXPECT_EQ(nullptr, head); + TF_LITE_MICRO_EXPECT(nullptr == head); allocator.ResetTempAllocations(); head = allocator.AllocateFromHead(100, 1); - TF_LITE_MICRO_EXPECT_NE(nullptr, head); + TF_LITE_MICRO_EXPECT(nullptr != head); // The most recent head allocation should be in the same location as the // original temp allocation pointer. - TF_LITE_MICRO_EXPECT_EQ(temp, head); + TF_LITE_MICRO_EXPECT(temp == head); } // TODO(b/161171251): Add more coverage to this test - specifically around -1 diff --git a/tensorflow/lite/micro/testing/micro_test.bzl b/tensorflow/lite/micro/testing/micro_test.bzl index 532a1a16ac6..5e1a56fdc48 100644 --- a/tensorflow/lite/micro/testing/micro_test.bzl +++ b/tensorflow/lite/micro/testing/micro_test.bzl @@ -1,5 +1,10 @@ """Rules for simple testing without dependencies by parsing output logs.""" +load( + "//tensorflow/lite/micro:build_def.bzl", + "micro_copts", +) + def tflite_micro_cc_test( name, size = "medium", @@ -7,7 +12,7 @@ def tflite_micro_cc_test( srcs = [], includes = [], defines = [], - copts = ["-Werror", "-Wno-unused-variable"], + copts = micro_copts(), nocopts = "", linkopts = [], deps = [], diff --git a/tensorflow/lite/micro/testing/micro_test.h b/tensorflow/lite/micro/testing/micro_test.h index 95796e64ff1..d74d8f4f1a6 100644 --- a/tensorflow/lite/micro/testing/micro_test.h +++ b/tensorflow/lite/micro/testing/micro_test.h @@ -110,13 +110,16 @@ extern tflite::ErrorReporter* reporter; } \ } while (false) +// TODO(b/139142772): this macro is used with types other than ints even though +// the printf specifier is %d. #define TF_LITE_MICRO_EXPECT_EQ(x, y) \ do { \ auto vx = x; \ auto vy = y; \ if ((vx) != (vy)) { \ micro_test::reporter->Report(#x " == " #y " failed at %s:%d (%d vs %d)", \ - __FILE__, __LINE__, (vx), (vy)); \ + __FILE__, __LINE__, static_cast(vx), \ + static_cast(vy)); \ micro_test::did_test_fail = true; \ } \ } while (false) @@ -147,17 +150,18 @@ extern tflite::ErrorReporter* reporter; } \ } while (false) -#define TF_LITE_MICRO_EXPECT_NEAR(x, y, epsilon) \ - do { \ - auto vx = (x); \ - auto vy = (y); \ - auto delta = ((vx) > (vy)) ? ((vx) - (vy)) : ((vy) - (vx)); \ - if (delta > epsilon) { \ - micro_test::reporter->Report( \ - #x " (%f) near " #y " (%f) failed at %s:%d", static_cast(vx), \ - static_cast(vy), __FILE__, __LINE__); \ - micro_test::did_test_fail = true; \ - } \ +#define TF_LITE_MICRO_EXPECT_NEAR(x, y, epsilon) \ + do { \ + auto vx = (x); \ + auto vy = (y); \ + auto delta = ((vx) > (vy)) ? ((vx) - (vy)) : ((vy) - (vx)); \ + if (delta > epsilon) { \ + micro_test::reporter->Report( \ + #x " (%f) near " #y " (%f) failed at %s:%d", \ + static_cast(vx), static_cast(vy), __FILE__, \ + __LINE__); \ + micro_test::did_test_fail = true; \ + } \ } while (false) #define TF_LITE_MICRO_EXPECT_GT(x, y) \ diff --git a/tensorflow/lite/micro/testing/test_utils.h b/tensorflow/lite/micro/testing/test_utils.h index 0165cbb707a..053c4417f52 100644 --- a/tensorflow/lite/micro/testing/test_utils.h +++ b/tensorflow/lite/micro/testing/test_utils.h @@ -53,8 +53,9 @@ inline float MinFromZeroPointScale(const int zero_point, const float scale) { // Derives the quantization scaling factor from a min and max range. template inline float ScaleFromMinMax(const float min, const float max) { - return (max - min) / ((std::numeric_limits::max() * 1.0) - - std::numeric_limits::min()); + return (max - min) / + static_cast((std::numeric_limits::max() * 1.0) - + std::numeric_limits::min()); } // Derives the quantization zero point from a min and max range. diff --git a/tensorflow/lite/micro/testing/util_test.cc b/tensorflow/lite/micro/testing/util_test.cc index f4eb28e121a..261e9f29a25 100644 --- a/tensorflow/lite/micro/testing/util_test.cc +++ b/tensorflow/lite/micro/testing/util_test.cc @@ -21,10 +21,10 @@ TF_LITE_MICRO_TESTS_BEGIN TF_LITE_MICRO_TEST(ArgumentsExecutedOnlyOnce) { float count = 0.; // Make sure either argument is executed once after macro expansion. - TF_LITE_MICRO_EXPECT_NEAR(0, count++, 0.1); - TF_LITE_MICRO_EXPECT_NEAR(1, count++, 0.1); - TF_LITE_MICRO_EXPECT_NEAR(count++, 2, 0.1); - TF_LITE_MICRO_EXPECT_NEAR(count++, 3, 0.1); + TF_LITE_MICRO_EXPECT_NEAR(0, count++, 0.1f); + TF_LITE_MICRO_EXPECT_NEAR(1, count++, 0.1f); + TF_LITE_MICRO_EXPECT_NEAR(count++, 2, 0.1f); + TF_LITE_MICRO_EXPECT_NEAR(count++, 3, 0.1f); } TF_LITE_MICRO_TESTS_END diff --git a/tensorflow/lite/micro/testing_helpers_test.cc b/tensorflow/lite/micro/testing_helpers_test.cc index 710ca2a4a9e..885bd873b53 100644 --- a/tensorflow/lite/micro/testing_helpers_test.cc +++ b/tensorflow/lite/micro/testing_helpers_test.cc @@ -33,7 +33,7 @@ TF_LITE_MICRO_TEST(CreateQuantizedBiasTensor) { pre_quantized, quantized, dims, input_scale, weight_scale); TF_LITE_MICRO_EXPECT_EQ(result.bytes, tensor_size * sizeof(int32_t)); - TF_LITE_MICRO_EXPECT_EQ(result.dims, dims); + TF_LITE_MICRO_EXPECT(result.dims == dims); TF_LITE_MICRO_EXPECT_EQ(result.params.scale, input_scale * weight_scale); for (int i = 0; i < tensor_size; i++) { TF_LITE_MICRO_EXPECT_EQ(expected_quantized_values[i], result.data.i32[i]); @@ -66,7 +66,7 @@ TF_LITE_MICRO_TEST(CreatePerChannelQuantizedBiasTensor) { } TF_LITE_MICRO_EXPECT_EQ(result.bytes, tensor_size * sizeof(int32_t)); - TF_LITE_MICRO_EXPECT_EQ(result.dims, dims); + TF_LITE_MICRO_EXPECT(result.dims == dims); for (int i = 0; i < tensor_size; i++) { TF_LITE_MICRO_EXPECT_EQ(expected_quantized_values[i], result.data.i32[i]); } @@ -92,7 +92,7 @@ TF_LITE_MICRO_TEST(CreateSymmetricPerChannelQuantizedTensor) { pre_quantized, quantized, dims, scales, zero_points, &quant, 0); TF_LITE_MICRO_EXPECT_EQ(result.bytes, tensor_size * sizeof(int8_t)); - TF_LITE_MICRO_EXPECT_EQ(result.dims, dims); + TF_LITE_MICRO_EXPECT(result.dims == dims); TfLiteFloatArray* result_scales = static_cast(result.quantization.params)->scale; for (int i = 0; i < channels; i++) {