Optionally strip error message strings to reduce binary size

This initial change is limited to framework code. I'll follow up with a version that stubs out kernel error reporting calls too.

PiperOrigin-RevId: 294713603
Change-Id: Iaa7ee8be0e43e401b86de009287cc6d2d00ef91c
This commit is contained in:
Pete Warden 2020-02-12 11:28:44 -08:00 committed by TensorFlower Gardener
parent 56fc8fcc44
commit 6b161482c9
66 changed files with 546 additions and 374 deletions
tensorflow/lite
allocation.cc
core/api
g3doc/microcontrollers
micro
model.cc
tools

View File

@ -98,7 +98,8 @@ MemoryAllocation::MemoryAllocation(const void* ptr, size_t num_bytes,
//
// Note that 64-bit ARM may also suffer a performance impact, but no crash -
// that case is not checked.
error_reporter->Report("The supplied buffer is not 4-bytes aligned");
TF_LITE_REPORT_ERROR(error_reporter,
"The supplied buffer is not 4-bytes aligned");
buffer_ = nullptr;
buffer_size_bytes_ = 0;
return;

View File

@ -42,4 +42,18 @@ class ErrorReporter {
} // namespace tflite
// You should not make bare calls to the error reporter, instead use the
// TF_LITE_REPORT_ERROR macro, since this allows message strings to be
// stripped when the binary size has to be optimized. If you are looking to
// reduce binary size, define TF_LITE_STRIP_ERROR_STRINGS when compiling and
// every call will be stubbed out, taking no memory.
#ifndef TF_LITE_STRIP_ERROR_STRINGS
#define TF_LITE_REPORT_ERROR(reporter, ...) \
do { \
reporter->Report(__VA_ARGS__); \
} while (false)
#else // TF_LITE_STRIP_ERROR_STRINGS
#define TF_LITE_REPORT_ERROR(reporter, ...)
#endif // TF_LITE_STRIP_ERROR_STRINGS
#endif // TENSORFLOW_LITE_CORE_API_ERROR_REPORTER_H_

View File

@ -23,6 +23,7 @@ namespace tflite {
class MockErrorReporter : public ErrorReporter {
public:
MockErrorReporter() { buffer_[0] = 0; }
int Report(const char* format, va_list args) override {
vsnprintf(buffer_, kBufferSize, format, args);
return 0;
@ -41,6 +42,22 @@ TEST(ErrorReporter, TestReport) {
EXPECT_EQ(0, strcmp(mock_reporter.GetBuffer(), "Error: 23"));
}
TEST(ErrorReporter, TestReportMacro) {
MockErrorReporter mock_reporter;
// Only define the reporter if it's used, to avoid warnings.
#ifndef TF_LITE_STRIP_ERROR_STRINGS
ErrorReporter* reporter = &mock_reporter;
#endif // TFLITE_STRIP_ERROR_STRINGS
TF_LITE_REPORT_ERROR(reporter, "Error: %d", 23);
#ifndef TF_LITE_STRIP_ERROR_STRINGS
EXPECT_EQ(0, strcmp(mock_reporter.GetBuffer(), "Error: 23"));
#else // TF_LITE_STRIP_ERROR_STRINGS
EXPECT_EQ(0, strcmp(mock_reporter.GetBuffer(), ""));
#endif // TF_LITE_STRIP_ERROR_STRINGS
}
} // namespace tflite
int main(int argc, char** argv) {

View File

@ -64,13 +64,15 @@ TfLiteStatus FlatBufferIntVectorToArray(
int max_size_of_buffer, const flatbuffers::Vector<int32_t>* flat_vector,
int* buffer, ErrorReporter* error_reporter, const char* op_name) {
if (!flat_vector) {
error_reporter->Report("Input array not provided for operation '%s'.\n",
op_name);
TF_LITE_REPORT_ERROR(error_reporter,
"Input array not provided for operation '%s'.\n",
op_name);
return kTfLiteError;
} else {
size_t num_dimensions = flat_vector->size();
if (num_dimensions > max_size_of_buffer / sizeof(int)) {
error_reporter->Report(
TF_LITE_REPORT_ERROR(
error_reporter,
"Found too many dimensions in the input array of operation '%s'.\n",
op_name);
return kTfLiteError;
@ -121,7 +123,8 @@ TfLiteStatus ConvertTensorType(TensorType tensor_type, TfLiteType* type,
break;
}
if (*type == kTfLiteNoType) {
error_reporter->Report("Unsupported data type %d in tensor\n", tensor_type);
TF_LITE_REPORT_ERROR(error_reporter, "Unsupported data type %d in tensor\n",
tensor_type);
return kTfLiteError;
}
return kTfLiteOk;
@ -329,7 +332,8 @@ TfLiteStatus ParseOpData(const Operator* op, BuiltinOperator op_type,
kTfLiteFullyConnectedWeightsFormatShuffled4x16Int8;
break;
default:
error_reporter->Report("Unhandled fully-connected weights format.");
TF_LITE_REPORT_ERROR(error_reporter,
"Unhandled fully-connected weights format.");
return kTfLiteError;
}
}
@ -431,12 +435,14 @@ TfLiteStatus ParseOpData(const Operator* op, BuiltinOperator op_type,
params->kernel_type = kTfLiteLSTMBasicKernel;
break;
default:
error_reporter->Report("Unhandled LSTM kernel type: %d",
lstm_params->kernel_type());
TF_LITE_REPORT_ERROR(error_reporter,
"Unhandled LSTM kernel type: %d",
lstm_params->kernel_type());
return kTfLiteError;
}
} else {
error_reporter->Report("No valid LSTM builtin options exist");
TF_LITE_REPORT_ERROR(error_reporter,
"No valid LSTM builtin options exist");
return kTfLiteError;
}
*builtin_data = reinterpret_cast<void*>(params.release());
@ -669,7 +675,8 @@ TfLiteStatus ParseOpData(const Operator* op, BuiltinOperator op_type,
}
case BuiltinOperator_DELEGATE: {
// TODO(ycling): Revisit when supporting saving delegated models.
error_reporter->Report("DELEGATE op shouldn't exist in model.");
TF_LITE_REPORT_ERROR(error_reporter,
"DELEGATE op shouldn't exist in model.");
return kTfLiteError;
}
case BuiltinOperator_FAKE_QUANT: {

View File

@ -27,7 +27,8 @@ TfLiteStatus GetRegistrationFromOpCode(
if (builtin_code > BuiltinOperator_MAX ||
builtin_code < BuiltinOperator_MIN) {
error_reporter->Report(
TF_LITE_REPORT_ERROR(
error_reporter,
"Op builtin_code out of range: %d. Are you using old TFLite binary "
"with newer model?",
builtin_code);
@ -35,13 +36,15 @@ TfLiteStatus GetRegistrationFromOpCode(
} else if (builtin_code != BuiltinOperator_CUSTOM) {
*registration = op_resolver.FindOp(builtin_code, version);
if (*registration == nullptr) {
error_reporter->Report(
TF_LITE_REPORT_ERROR(
error_reporter,
"Didn't find op for builtin opcode '%s' version '%d'\n",
EnumNameBuiltinOperator(builtin_code), version);
status = kTfLiteError;
}
} else if (!opcode->custom_code()) {
error_reporter->Report(
TF_LITE_REPORT_ERROR(
error_reporter,
"Operator with CUSTOM builtin_code has no custom_code.\n");
status = kTfLiteError;
} else {

View File

@ -171,7 +171,7 @@ model to ensure its schema version is compatible with the version we are using:
```C++
const tflite::Model* model = ::tflite::GetModel(g_sine_model_data);
if (model->version() != TFLITE_SCHEMA_VERSION) {
error_reporter->Report(
TF_LITE_REPORT_ERROR(error_reporter,
"Model provided is schema version %d not equal "
"to supported version %d.\n",
model->version(), TFLITE_SCHEMA_VERSION);
@ -284,7 +284,7 @@ instance:
```C++
TfLiteStatus invoke_status = interpreter.Invoke();
if (invoke_status != kTfLiteOk) {
error_reporter->Report("Invoke failed\n");
TF_LITE_REPORT_ERROR(error_reporter, "Invoke failed\n");
}
```

View File

@ -43,5 +43,5 @@ void HandleOutput(tflite::ErrorReporter* error_reporter, float x_value,
analogWrite(led, brightness);
// Log the current brightness value for display in the Arduino plotter
error_reporter->Report("%d\n", brightness);
TF_LITE_REPORT_ERROR(error_reporter, "%d\n", brightness);
}

View File

@ -55,7 +55,8 @@ void HandleOutput(tflite::ErrorReporter* error_reporter, float x_value,
}
// Log the current X and Y values
error_reporter->Report("x_value: %f, y_value: %f\n", x_value, y_value);
TF_LITE_REPORT_ERROR(error_reporter, "x_value: %f, y_value: %f\n", x_value,
y_value);
// Clear the previous drawing
lcd.Clear(background_color);

View File

@ -33,10 +33,10 @@ TF_LITE_MICRO_TEST(LoadModelAndPerformInference) {
// copying or parsing, it's a very lightweight operation.
const tflite::Model* model = ::tflite::GetModel(g_sine_model_data);
if (model->version() != TFLITE_SCHEMA_VERSION) {
error_reporter->Report(
"Model provided is schema version %d not equal "
"to supported version %d.\n",
model->version(), TFLITE_SCHEMA_VERSION);
TF_LITE_REPORT_ERROR(error_reporter,
"Model provided is schema version %d not equal "
"to supported version %d.\n",
model->version(), TFLITE_SCHEMA_VERSION);
}
// This pulls in all the operation implementations we need

View File

@ -51,10 +51,10 @@ void setup() {
// copying or parsing, it's a very lightweight operation.
model = tflite::GetModel(g_sine_model_data);
if (model->version() != TFLITE_SCHEMA_VERSION) {
error_reporter->Report(
"Model provided is schema version %d not equal "
"to supported version %d.",
model->version(), TFLITE_SCHEMA_VERSION);
TF_LITE_REPORT_ERROR(error_reporter,
"Model provided is schema version %d not equal "
"to supported version %d.",
model->version(), TFLITE_SCHEMA_VERSION);
return;
}
@ -70,7 +70,7 @@ void setup() {
// Allocate memory from the tensor_arena for the model's tensors.
TfLiteStatus allocate_status = interpreter->AllocateTensors();
if (allocate_status != kTfLiteOk) {
error_reporter->Report("AllocateTensors() failed");
TF_LITE_REPORT_ERROR(error_reporter, "AllocateTensors() failed");
return;
}
@ -98,8 +98,8 @@ void loop() {
// Run inference, and report any error
TfLiteStatus invoke_status = interpreter->Invoke();
if (invoke_status != kTfLiteOk) {
error_reporter->Report("Invoke failed on x_val: %f\n",
static_cast<double>(x_val));
TF_LITE_REPORT_ERROR(error_reporter, "Invoke failed on x_val: %f\n",
static_cast<double>(x_val));
return;
}

View File

@ -18,7 +18,7 @@ limitations under the License.
void HandleOutput(tflite::ErrorReporter* error_reporter, float x_value,
float y_value) {
// Log the current X and Y values
error_reporter->Report("x_value: %f, y_value: %f\n",
static_cast<double>(x_value),
static_cast<double>(y_value));
TF_LITE_REPORT_ERROR(error_reporter, "x_value: %f, y_value: %f\n",
static_cast<double>(x_value),
static_cast<double>(y_value));
}

View File

@ -75,5 +75,6 @@ void HandleOutput(tflite::ErrorReporter* error_reporter, float x_value,
}
}
// Log the current X and Y values
error_reporter->Report("x_value: %f, y_value: %f\n", x_value, y_value);
TF_LITE_REPORT_ERROR(error_reporter, "x_value: %f, y_value: %f\n", x_value,
y_value);
}

View File

@ -38,7 +38,7 @@ TfLiteStatus SetupAccelerometer(tflite::ErrorReporter* error_reporter) {
// Switch on the IMU
if (!IMU.begin()) {
error_reporter->Report("Failed to initialize IMU");
TF_LITE_REPORT_ERROR(error_reporter, "Failed to initialize IMU");
return kTfLiteError;
}
@ -47,7 +47,7 @@ TfLiteStatus SetupAccelerometer(tflite::ErrorReporter* error_reporter) {
float sample_rate = IMU.accelerationSampleRate();
sample_every_n = static_cast<int>(roundf(sample_rate / kTargetHz));
error_reporter->Report("Magic starts!");
TF_LITE_REPORT_ERROR(error_reporter, "Magic starts!");
return kTfLiteOk;
}
@ -67,7 +67,7 @@ bool ReadAccelerometer(tflite::ErrorReporter* error_reporter, float* input,
float x, y, z;
// Read each sample, removing it from the device's FIFO buffer
if (!IMU.readAcceleration(x, y, z)) {
error_reporter->Report("Failed to read data");
TF_LITE_REPORT_ERROR(error_reporter, "Failed to read data");
break;
}
// Throw away this sample unless it's the nth

View File

@ -34,17 +34,20 @@ void HandleOutput(tflite::ErrorReporter* error_reporter, int kind) {
}
// Print some ASCII art for each gesture
if (kind == 0) {
error_reporter->Report(
TF_LITE_REPORT_ERROR(
error_reporter,
"WING:\n\r* * *\n\r * * * "
"*\n\r * * * *\n\r * * * *\n\r * * "
"* *\n\r * *\n\r");
} else if (kind == 1) {
error_reporter->Report(
TF_LITE_REPORT_ERROR(
error_reporter,
"RING:\n\r *\n\r * *\n\r * *\n\r "
" * *\n\r * *\n\r * *\n\r "
" *\n\r");
} else if (kind == 2) {
error_reporter->Report(
TF_LITE_REPORT_ERROR(
error_reporter,
"SLOPE:\n\r *\n\r *\n\r *\n\r *\n\r "
"*\n\r *\n\r *\n\r * * * * * * * *\n\r");
}

View File

@ -35,10 +35,10 @@ TF_LITE_MICRO_TEST(LoadModelAndPerformInference) {
// copying or parsing, it's a very lightweight operation.
const tflite::Model* model = ::tflite::GetModel(g_magic_wand_model_data);
if (model->version() != TFLITE_SCHEMA_VERSION) {
error_reporter->Report(
"Model provided is schema version %d not equal "
"to supported version %d.\n",
model->version(), TFLITE_SCHEMA_VERSION);
TF_LITE_REPORT_ERROR(error_reporter,
"Model provided is schema version %d not equal "
"to supported version %d.\n",
model->version(), TFLITE_SCHEMA_VERSION);
}
// Pull in only the operation implementations we need.
@ -87,7 +87,7 @@ TF_LITE_MICRO_TEST(LoadModelAndPerformInference) {
// Provide an input value
const float* ring_features_data = g_ring_micro_f9643d42_nohash_4_data;
error_reporter->Report("%d", input->bytes);
TF_LITE_REPORT_ERROR(error_reporter, "%d", input->bytes);
for (int i = 0; i < (input->bytes / sizeof(float)); ++i) {
input->data.f[i] = ring_features_data[i];
}
@ -95,7 +95,7 @@ TF_LITE_MICRO_TEST(LoadModelAndPerformInference) {
// Run the model on this input and check that it succeeds
TfLiteStatus invoke_status = interpreter.Invoke();
if (invoke_status != kTfLiteOk) {
error_reporter->Report("Invoke failed\n");
TF_LITE_REPORT_ERROR(error_reporter, "Invoke failed\n");
}
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, invoke_status);
@ -132,7 +132,7 @@ TF_LITE_MICRO_TEST(LoadModelAndPerformInference) {
// Run the model on this "Slope" input.
invoke_status = interpreter.Invoke();
if (invoke_status != kTfLiteOk) {
error_reporter->Report("Invoke failed\n");
TF_LITE_REPORT_ERROR(error_reporter, "Invoke failed\n");
}
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, invoke_status);

View File

@ -55,10 +55,10 @@ void setup() {
// copying or parsing, it's a very lightweight operation.
model = tflite::GetModel(g_magic_wand_model_data);
if (model->version() != TFLITE_SCHEMA_VERSION) {
error_reporter->Report(
"Model provided is schema version %d not equal "
"to supported version %d.",
model->version(), TFLITE_SCHEMA_VERSION);
TF_LITE_REPORT_ERROR(error_reporter,
"Model provided is schema version %d not equal "
"to supported version %d.",
model->version(), TFLITE_SCHEMA_VERSION);
return;
}
@ -94,7 +94,8 @@ void setup() {
(model_input->dims->data[1] != 128) ||
(model_input->dims->data[2] != kChannelNumber) ||
(model_input->type != kTfLiteFloat32)) {
error_reporter->Report("Bad input tensor parameters in model");
TF_LITE_REPORT_ERROR(error_reporter,
"Bad input tensor parameters in model");
return;
}
@ -102,7 +103,7 @@ void setup() {
TfLiteStatus setup_status = SetupAccelerometer(error_reporter);
if (setup_status != kTfLiteOk) {
error_reporter->Report("Set up failed\n");
TF_LITE_REPORT_ERROR(error_reporter, "Set up failed\n");
}
}
@ -117,7 +118,8 @@ void loop() {
// Run inference, and report any error
TfLiteStatus invoke_status = interpreter->Invoke();
if (invoke_status != kTfLiteOk) {
error_reporter->Report("Invoke failed on index: %d\n", begin_index);
TF_LITE_REPORT_ERROR(error_reporter, "Invoke failed on index: %d\n",
begin_index);
return;
}
// Analyze the results to obtain a prediction

View File

@ -18,17 +18,20 @@ limitations under the License.
void HandleOutput(tflite::ErrorReporter* error_reporter, int kind) {
// light (red: wing, blue: ring, green: slope)
if (kind == 0) {
error_reporter->Report(
TF_LITE_REPORT_ERROR(
error_reporter,
"WING:\n\r* * *\n\r * * * "
"*\n\r * * * *\n\r * * * *\n\r * * "
"* *\n\r * *\n\r");
} else if (kind == 1) {
error_reporter->Report(
TF_LITE_REPORT_ERROR(
error_reporter,
"RING:\n\r *\n\r * *\n\r * *\n\r "
" * *\n\r * *\n\r * *\n\r "
" *\n\r");
} else if (kind == 2) {
error_reporter->Report(
TF_LITE_REPORT_ERROR(
error_reporter,
"SLOPE:\n\r *\n\r *\n\r *\n\r *\n\r "
"*\n\r *\n\r *\n\r * * * * * * * *\n\r");
}

View File

@ -117,8 +117,9 @@ TfLiteStatus SetupAccelerometer(tflite::ErrorReporter* error_reporter) {
// Collecting data at 25Hz.
int accInitRes = initAccelerometer();
if (accInitRes != (int)AM_HAL_STATUS_SUCCESS) {
error_reporter->Report("Failed to initialize the accelerometer. (code %d)",
accInitRes);
TF_LITE_REPORT_ERROR(error_reporter,
"Failed to initialize the accelerometer. (code %d)",
accInitRes);
}
// Enable the accelerometer's FIFO buffer.
@ -127,20 +128,20 @@ TfLiteStatus SetupAccelerometer(tflite::ErrorReporter* error_reporter) {
// it's not fetched in time, so we need to make sure that model inference is
// faster than 1/25Hz * 32 = 1.28s
if (lis2dh12_fifo_set(&dev_ctx, 1)) {
error_reporter->Report("Failed to enable FIFO buffer.");
TF_LITE_REPORT_ERROR(error_reporter, "Failed to enable FIFO buffer.");
}
if (lis2dh12_fifo_mode_set(&dev_ctx, LIS2DH12_BYPASS_MODE)) {
error_reporter->Report("Failed to clear FIFO buffer.");
TF_LITE_REPORT_ERROR(error_reporter, "Failed to clear FIFO buffer.");
return 0;
}
if (lis2dh12_fifo_mode_set(&dev_ctx, LIS2DH12_DYNAMIC_STREAM_MODE)) {
error_reporter->Report("Failed to set streaming mode.");
TF_LITE_REPORT_ERROR(error_reporter, "Failed to set streaming mode.");
return 0;
}
error_reporter->Report("Magic starts!");
TF_LITE_REPORT_ERROR(error_reporter, "Magic starts!");
return kTfLiteOk;
}
@ -158,7 +159,7 @@ bool ReadAccelerometer(tflite::ErrorReporter* error_reporter, float* input,
// Check FIFO buffer for new samples
lis2dh12_fifo_src_reg_t status;
if (lis2dh12_fifo_status_get(&dev_ctx, &status)) {
error_reporter->Report("Failed to get FIFO status.");
TF_LITE_REPORT_ERROR(error_reporter, "Failed to get FIFO status.");
return false;
}
@ -179,7 +180,7 @@ bool ReadAccelerometer(tflite::ErrorReporter* error_reporter, float* input,
memset(data_raw_acceleration.u8bit, 0x00, 3 * sizeof(int16_t));
// If the return value is non-zero, sensor data was successfully read
if (lis2dh12_acceleration_raw_get(&dev_ctx, data_raw_acceleration.u8bit)) {
error_reporter->Report("Failed to get raw data.");
TF_LITE_REPORT_ERROR(error_reporter, "Failed to get raw data.");
} else {
// Convert each raw 16-bit value into floating point values representing
// milli-Gs, a unit of acceleration, and store in the current position of

View File

@ -36,7 +36,8 @@ void HandleOutput(tflite::ErrorReporter* error_reporter, int kind) {
// Set the LED color and print a symbol (red: wing, blue: ring, green: slope)
if (kind == 0) {
error_reporter->Report(
TF_LITE_REPORT_ERROR(
error_reporter,
"WING:\n\r* * *\n\r * * * "
"*\n\r * * * *\n\r * * * *\n\r * * "
"* *\n\r * *\n\r");
@ -44,7 +45,8 @@ void HandleOutput(tflite::ErrorReporter* error_reporter, int kind) {
am_devices_led_off(am_bsp_psLEDs, AM_BSP_LED_BLUE);
am_devices_led_off(am_bsp_psLEDs, AM_BSP_LED_GREEN);
} else if (kind == 1) {
error_reporter->Report(
TF_LITE_REPORT_ERROR(
error_reporter,
"RING:\n\r *\n\r * *\n\r * *\n\r "
" * *\n\r * *\n\r * *\n\r "
" *\n\r");
@ -52,7 +54,8 @@ void HandleOutput(tflite::ErrorReporter* error_reporter, int kind) {
am_devices_led_on(am_bsp_psLEDs, AM_BSP_LED_BLUE);
am_devices_led_off(am_bsp_psLEDs, AM_BSP_LED_GREEN);
} else if (kind == 2) {
error_reporter->Report(
TF_LITE_REPORT_ERROR(
error_reporter,
"SLOPE:\n\r *\n\r *\n\r *\n\r *\n\r "
"*\n\r *\n\r *\n\r * * * * * * * *\n\r");
am_devices_led_off(am_bsp_psLEDs, AM_BSP_LED_RED);

View File

@ -63,10 +63,10 @@ TF_LITE_MICRO_TEST(TestSimpleFeaturesGenerator) {
const tflite::Model* model =
::tflite::GetModel(g_tiny_conv_simple_features_model_data);
if (model->version() != TFLITE_SCHEMA_VERSION) {
error_reporter->Report(
"Model provided is schema version %d not equal "
"to supported version %d.\n",
model->version(), TFLITE_SCHEMA_VERSION);
TF_LITE_REPORT_ERROR(error_reporter,
"Model provided is schema version %d not equal "
"to supported version %d.\n",
model->version(), TFLITE_SCHEMA_VERSION);
}
// This pulls in all the operation implementations we need.
@ -102,7 +102,7 @@ TF_LITE_MICRO_TEST(TestSimpleFeaturesGenerator) {
// Run the model on this input and make sure it succeeds.
TfLiteStatus invoke_status = interpreter.Invoke();
if (invoke_status != kTfLiteOk) {
error_reporter->Report("Invoke failed\n");
TF_LITE_REPORT_ERROR(error_reporter, "Invoke failed\n");
}
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, invoke_status);
@ -126,7 +126,7 @@ TF_LITE_MICRO_TEST(TestSimpleFeaturesGenerator) {
g_yes_score = output->data.uint8[kYesIndex];
g_no_score = output->data.uint8[kNoIndex];
error_reporter->Report("Ran successfully\n");
TF_LITE_REPORT_ERROR(error_reporter, "Ran successfully\n");
}
TF_LITE_MICRO_TESTS_END

View File

@ -144,21 +144,25 @@ void enable_burst_mode(tflite::ErrorReporter* error_reporter) {
if (AM_HAL_STATUS_SUCCESS ==
am_hal_burst_mode_initialize(&eBurstModeAvailable)) {
if (AM_HAL_BURST_AVAIL == eBurstModeAvailable) {
error_reporter->Report("Apollo3 Burst Mode is Available\n");
TF_LITE_REPORT_ERROR(error_reporter, "Apollo3 Burst Mode is Available\n");
} else {
error_reporter->Report("Apollo3 Burst Mode is Not Available\n");
TF_LITE_REPORT_ERROR(error_reporter,
"Apollo3 Burst Mode is Not Available\n");
}
} else {
error_reporter->Report("Failed to Initialize for Burst Mode operation\n");
TF_LITE_REPORT_ERROR(error_reporter,
"Failed to Initialize for Burst Mode operation\n");
}
// Put the MCU into "Burst" mode.
if (AM_HAL_STATUS_SUCCESS == am_hal_burst_mode_enable(&eBurstMode)) {
if (AM_HAL_BURST_MODE == eBurstMode) {
error_reporter->Report("Apollo3 operating in Burst Mode (96MHz)\n");
TF_LITE_REPORT_ERROR(error_reporter,
"Apollo3 operating in Burst Mode (96MHz)\n");
}
} else {
error_reporter->Report("Failed to Enable Burst Mode operation\n");
TF_LITE_REPORT_ERROR(error_reporter,
"Failed to Enable Burst Mode operation\n");
}
}
@ -242,7 +246,7 @@ void pdm_start_dma(tflite::ErrorReporter* error_reporter) {
// Start the data transfer.
if (AM_HAL_STATUS_SUCCESS != am_hal_pdm_dma_start(g_pdm_handle, &sTransfer)) {
error_reporter->Report("Error - configuring PDM DMA failed.");
TF_LITE_REPORT_ERROR(error_reporter, "Error - configuring PDM DMA failed.");
}
// Reset the PDM DMA flags.
@ -309,13 +313,15 @@ extern "C" void am_pdm0_isr(void) {
// Read the interrupt status.
if (AM_HAL_STATUS_SUCCESS !=
am_hal_pdm_interrupt_status_get(g_pdm_handle, &ui32IntMask, false)) {
g_pdm_dma_error_reporter->Report("Error reading PDM0 interrupt status.");
TF_LITE_REPORT_ERROR(g_pdm_dma_error_reporter,
"Error reading PDM0 interrupt status.");
}
// Clear the PDM interrupt.
if (AM_HAL_STATUS_SUCCESS !=
am_hal_pdm_interrupt_clear(g_pdm_handle, ui32IntMask)) {
g_pdm_dma_error_reporter->Report("Error clearing PDM interrupt status.");
TF_LITE_REPORT_ERROR(g_pdm_dma_error_reporter,
"Error clearing PDM interrupt status.");
}
#if USE_DEBUG_GPIO
@ -370,7 +376,8 @@ TfLiteStatus InitAudioRecording(tflite::ErrorReporter* error_reporter) {
// Set the clock frequency.
if (AM_HAL_STATUS_SUCCESS !=
am_hal_clkgen_control(AM_HAL_CLKGEN_CONTROL_SYSCLK_MAX, 0)) {
error_reporter->Report("Error - configuring the system clock failed.");
TF_LITE_REPORT_ERROR(error_reporter,
"Error - configuring the system clock failed.");
return kTfLiteError;
}
@ -380,11 +387,13 @@ TfLiteStatus InitAudioRecording(tflite::ErrorReporter* error_reporter) {
// Set the default cache configuration and enable it.
if (AM_HAL_STATUS_SUCCESS !=
am_hal_cachectrl_config(&am_hal_cachectrl_defaults)) {
error_reporter->Report("Error - configuring the system cache failed.");
TF_LITE_REPORT_ERROR(error_reporter,
"Error - configuring the system cache failed.");
return kTfLiteError;
}
if (AM_HAL_STATUS_SUCCESS != am_hal_cachectrl_enable()) {
error_reporter->Report("Error - enabling the system cache failed.");
TF_LITE_REPORT_ERROR(error_reporter,
"Error - enabling the system cache failed.");
return kTfLiteError;
}
@ -397,7 +406,8 @@ TfLiteStatus InitAudioRecording(tflite::ErrorReporter* error_reporter) {
uint32_t ui32LPMMode = CACHECTRL_FLASHCFG_LPMMODE_STANDBY;
if (am_hal_cachectrl_control(AM_HAL_CACHECTRL_CONTROL_LPMMODE_SET,
&ui32LPMMode)) {
error_reporter->Report("Error - enabling cache sleep state failed.");
TF_LITE_REPORT_ERROR(error_reporter,
"Error - enabling cache sleep state failed.");
}
// Enable Instruction & Data pre-fetching.
@ -409,7 +419,7 @@ TfLiteStatus InitAudioRecording(tflite::ErrorReporter* error_reporter) {
// Enable the floating point module, and configure the core for lazy stacking.
am_hal_sysctrl_fpu_enable();
am_hal_sysctrl_fpu_stacking_enable(true);
error_reporter->Report("FPU Enabled.");
TF_LITE_REPORT_ERROR(error_reporter, "FPU Enabled.");
// Configure the LEDs.
am_devices_led_array_init(am_bsp_psLEDs, AM_BSP_NUM_LEDS);
@ -466,7 +476,8 @@ TfLiteStatus InitAudioRecording(tflite::ErrorReporter* error_reporter) {
// Trigger the PDM DMA for the first time manually.
pdm_start_dma(g_pdm_dma_error_reporter);
error_reporter->Report("\nPDM DMA Threshold = %d", PDMn(0)->FIFOTHR);
TF_LITE_REPORT_ERROR(error_reporter, "\nPDM DMA Threshold = %d",
PDMn(0)->FIFOTHR);
// Turn on LED 0 to indicate PDM initialized
am_devices_led_on(am_bsp_psLEDs, 0);

View File

@ -54,8 +54,8 @@ void RespondToCommand(tflite::ErrorReporter* error_reporter,
#endif
g_PreviousCommandTimestamp = current_time;
error_reporter->Report("\nHeard %s (%d) @%dms", found_command, score,
current_time);
TF_LITE_REPORT_ERROR(error_reporter, "\nHeard %s (%d) @%dms", found_command,
score, current_time);
#if USE_MAYA
uint32_t delay = 60;

View File

@ -42,8 +42,8 @@ void RespondToCommand(tflite::ErrorReporter* error_reporter,
static int certainty = 220;
if (is_new_command) {
error_reporter->Report("Heard %s (%d) @%dms", found_command, score,
current_time);
TF_LITE_REPORT_ERROR(error_reporter, "Heard %s (%d) @%dms", found_command,
score, current_time);
// If we hear a command, light up the appropriate LED
if (found_command[0] == 'y') {
last_command_time = current_time;

View File

@ -22,7 +22,7 @@ void RespondToCommand(tflite::ErrorReporter* error_reporter,
int32_t current_time, const char* found_command,
uint8_t score, bool is_new_command) {
if (is_new_command) {
error_reporter->Report("Heard %s (%d) @%dms", found_command, score,
current_time);
TF_LITE_REPORT_ERROR(error_reporter, "Heard %s (%d) @%dms", found_command,
score, current_time);
}
}

View File

@ -25,8 +25,8 @@ void RespondToCommand(tflite::ErrorReporter *error_reporter,
int32_t current_time, const char *found_command,
uint8_t score, bool is_new_command) {
if (is_new_command) {
error_reporter->Report("Heard %s (%d) @%dms", found_command, score,
current_time);
TF_LITE_REPORT_ERROR(error_reporter, "Heard %s (%d) @%dms", found_command,
score, current_time);
if (*found_command == 'y') {
lcd.Clear(0xFF0F9D58);
lcd.DisplayStringAt(0, LINE(5), (uint8_t *)"Heard yes!", CENTER_MODE);

View File

@ -35,8 +35,9 @@ TfLiteStatus FeatureProvider::PopulateFeatureData(
tflite::ErrorReporter* error_reporter, int32_t last_time_in_ms,
int32_t time_in_ms, int* how_many_new_slices) {
if (feature_size_ != kFeatureElementCount) {
error_reporter->Report("Requested feature_data_ size %d doesn't match %d",
feature_size_, kFeatureElementCount);
TF_LITE_REPORT_ERROR(error_reporter,
"Requested feature_data_ size %d doesn't match %d",
feature_size_, kFeatureElementCount);
return kTfLiteError;
}
@ -100,8 +101,9 @@ TfLiteStatus FeatureProvider::PopulateFeatureData(
kFeatureSliceDurationMs, &audio_samples_size,
&audio_samples);
if (audio_samples_size < kMaxAudioSampleSize) {
error_reporter->Report("Audio data size %d too small, want %d",
audio_samples_size, kMaxAudioSampleSize);
TF_LITE_REPORT_ERROR(error_reporter,
"Audio data size %d too small, want %d",
audio_samples_size, kMaxAudioSampleSize);
return kTfLiteError;
}
uint8_t* new_slice_data = feature_data_ + (new_slice * kFeatureSliceSize);

View File

@ -59,10 +59,10 @@ void setup() {
// copying or parsing, it's a very lightweight operation.
model = tflite::GetModel(g_tiny_conv_micro_features_model_data);
if (model->version() != TFLITE_SCHEMA_VERSION) {
error_reporter->Report(
"Model provided is schema version %d not equal "
"to supported version %d.",
model->version(), TFLITE_SCHEMA_VERSION);
TF_LITE_REPORT_ERROR(error_reporter,
"Model provided is schema version %d not equal "
"to supported version %d.",
model->version(), TFLITE_SCHEMA_VERSION);
return;
}
@ -91,7 +91,7 @@ void setup() {
// Allocate memory from the tensor_arena for the model's tensors.
TfLiteStatus allocate_status = interpreter->AllocateTensors();
if (allocate_status != kTfLiteOk) {
error_reporter->Report("AllocateTensors() failed");
TF_LITE_REPORT_ERROR(error_reporter, "AllocateTensors() failed");
return;
}
@ -101,7 +101,8 @@ void setup() {
(model_input->dims->data[1] != kFeatureSliceCount) ||
(model_input->dims->data[2] != kFeatureSliceSize) ||
(model_input->type != kTfLiteUInt8)) {
error_reporter->Report("Bad input tensor parameters in model");
TF_LITE_REPORT_ERROR(error_reporter,
"Bad input tensor parameters in model");
return;
}
model_input_buffer = model_input->data.uint8;
@ -127,7 +128,7 @@ void loop() {
TfLiteStatus feature_status = feature_provider->PopulateFeatureData(
error_reporter, previous_time, current_time, &how_many_new_slices);
if (feature_status != kTfLiteOk) {
error_reporter->Report("Feature generation failed");
TF_LITE_REPORT_ERROR(error_reporter, "Feature generation failed");
return;
}
previous_time = current_time;
@ -145,7 +146,7 @@ void loop() {
// Run the model on the spectrogram input and make sure it succeeds.
TfLiteStatus invoke_status = interpreter->Invoke();
if (invoke_status != kTfLiteOk) {
error_reporter->Report("Invoke failed");
TF_LITE_REPORT_ERROR(error_reporter, "Invoke failed");
return;
}
@ -158,7 +159,8 @@ void loop() {
TfLiteStatus process_status = recognizer->ProcessLatestResults(
output, current_time, &found_command, &score, &is_new_command);
if (process_status != kTfLiteOk) {
error_reporter->Report("RecognizeCommands::ProcessLatestResults() failed");
TF_LITE_REPORT_ERROR(error_reporter,
"RecognizeCommands::ProcessLatestResults() failed");
return;
}
// Do something based on the recognized command. The default implementation

View File

@ -52,7 +52,7 @@ TfLiteStatus InitializeMicroFeatures(tflite::ErrorReporter* error_reporter) {
config.log_scale.scale_shift = 6;
if (!FrontendPopulateState(&config, &g_micro_features_state,
kAudioSampleFrequency)) {
error_reporter->Report("FrontendPopulateState() failed");
TF_LITE_REPORT_ERROR(error_reporter, "FrontendPopulateState() failed");
return kTfLiteError;
}
g_is_first_time = true;

View File

@ -59,9 +59,8 @@ TF_LITE_MICRO_TEST(TestMicroFeaturesGeneratorYes) {
TF_LITE_MICRO_EXPECT_EQ(g_yes_feature_data_slice[i],
yes_calculated_data[i]);
if (g_yes_feature_data_slice[i] != yes_calculated_data[i]) {
error_reporter->Report("Expected value %d but found %d",
g_yes_feature_data_slice[i],
yes_calculated_data[i]);
TF_LITE_REPORT_ERROR(error_reporter, "Expected value %d but found %d",
g_yes_feature_data_slice[i], yes_calculated_data[i]);
}
}
}
@ -92,8 +91,8 @@ TF_LITE_MICRO_TEST(TestMicroFeaturesGeneratorNo) {
for (int i = 0; i < g_no_feature_data_slice_size; ++i) {
TF_LITE_MICRO_EXPECT_EQ(g_no_feature_data_slice[i], no_calculated_data[i]);
if (g_no_feature_data_slice[i] != no_calculated_data[i]) {
error_reporter->Report("Expected value %d but found %d",
g_no_feature_data_slice[i], no_calculated_data[i]);
TF_LITE_REPORT_ERROR(error_reporter, "Expected value %d but found %d",
g_no_feature_data_slice[i], no_calculated_data[i]);
}
}
}

View File

@ -20,13 +20,14 @@ limitations under the License.
// at least the number of bytes requested. This doesn't work with raw pointers
// since sizeof() doesn't know their actual length, so only use this to check
// statically-allocated arrays with known sizes.
#define STATIC_ALLOC_ENSURE_ARRAY_SIZE(A, N) \
do { \
if (sizeof(A) < (N)) { \
error_reporter->Report(#A " too small (%d bytes, wanted %d) at %s:%d", \
sizeof(A), (N), __FILE__, __LINE__); \
return 0; \
} \
#define STATIC_ALLOC_ENSURE_ARRAY_SIZE(A, N) \
do { \
if (sizeof(A) < (N)) { \
TF_LITE_REPORT_ERROR(error_reporter, \
#A " too small (%d bytes, wanted %d) at %s:%d", \
sizeof(A), (N), __FILE__, __LINE__); \
return 0; \
} \
} while (0)
#endif // TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_MICRO_FEATURES_STATIC_ALLOC_H_

View File

@ -36,10 +36,10 @@ TF_LITE_MICRO_TEST(TestInvoke) {
const tflite::Model* model =
::tflite::GetModel(g_tiny_conv_micro_features_model_data);
if (model->version() != TFLITE_SCHEMA_VERSION) {
error_reporter->Report(
"Model provided is schema version %d not equal "
"to supported version %d.\n",
model->version(), TFLITE_SCHEMA_VERSION);
TF_LITE_REPORT_ERROR(error_reporter,
"Model provided is schema version %d not equal "
"to supported version %d.\n",
model->version(), TFLITE_SCHEMA_VERSION);
}
// Pull in only the operation implementations we need.
@ -89,7 +89,7 @@ TF_LITE_MICRO_TEST(TestInvoke) {
// Run the model on this input and make sure it succeeds.
TfLiteStatus invoke_status = interpreter.Invoke();
if (invoke_status != kTfLiteOk) {
error_reporter->Report("Invoke failed\n");
TF_LITE_REPORT_ERROR(error_reporter, "Invoke failed\n");
}
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, invoke_status);
@ -125,7 +125,7 @@ TF_LITE_MICRO_TEST(TestInvoke) {
// Run the model on this "No" input.
invoke_status = interpreter.Invoke();
if (invoke_status != kTfLiteOk) {
error_reporter->Report("Invoke failed\n");
TF_LITE_REPORT_ERROR(error_reporter, "Invoke failed\n");
}
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, invoke_status);
@ -146,7 +146,7 @@ TF_LITE_MICRO_TEST(TestInvoke) {
TF_LITE_MICRO_EXPECT_GT(no_score, unknown_score);
TF_LITE_MICRO_EXPECT_GT(no_score, yes_score);
error_reporter->Report("Ran successfully\n");
TF_LITE_REPORT_ERROR(error_reporter, "Ran successfully\n");
}
TF_LITE_MICRO_TESTS_END

View File

@ -29,13 +29,13 @@ int16_t g_audio_output_buffer[kMaxAudioSampleSize];
int32_t g_latest_audio_timestamp = 0;
// Checks for MacOS errors, prints information and returns a TF Lite version.
#define RETURN_IF_OS_ERROR(error, error_reporter) \
do { \
if (error != noErr) { \
error_reporter->Report("Error: %s:%d (%d)\n", __FILE__, __LINE__, \
error); \
return kTfLiteError; \
} \
#define RETURN_IF_OS_ERROR(error, error_reporter) \
do { \
if (error != noErr) { \
TF_LITE_REPORT_ERROR(error_reporter, "Error: %s:%d (%d)\n", __FILE__, \
__LINE__, error); \
return kTfLiteError; \
} \
} while (0);
// Called when an audio input buffer has been filled.

View File

@ -46,13 +46,14 @@ TfLiteStatus GenerateSimpleFeatures(tflite::ErrorReporter* error_reporter,
const int16_t* input, int input_size,
int output_size, uint8_t* output) {
if (input_size > kInputSize) {
error_reporter->Report("Input size %d larger than %d", input_size,
kInputSize);
TF_LITE_REPORT_ERROR(error_reporter, "Input size %d larger than %d",
input_size, kInputSize);
return kTfLiteError;
}
if (output_size != kOutputSize) {
error_reporter->Report("Requested output size %d doesn't match %d",
output_size, kOutputSize);
TF_LITE_REPORT_ERROR(error_reporter,
"Requested output size %d doesn't match %d",
output_size, kOutputSize);
return kTfLiteError;
}

View File

@ -123,13 +123,14 @@ TfLiteStatus GenerateSimpleFeatures(tflite::ErrorReporter* error_reporter,
int output_size, uint8_t* output) {
// Ensure our input and output data arrays are valid.
if (input_size > kMaxAudioSampleSize) {
error_reporter->Report("Input size %d larger than %d", input_size,
kMaxAudioSampleSize);
TF_LITE_REPORT_ERROR(error_reporter, "Input size %d larger than %d",
input_size, kMaxAudioSampleSize);
return kTfLiteError;
}
if (output_size != kFeatureSliceSize) {
error_reporter->Report("Requested output size %d doesn't match %d",
output_size, kFeatureSliceSize);
TF_LITE_REPORT_ERROR(error_reporter,
"Requested output size %d doesn't match %d",
output_size, kFeatureSliceSize);
return kTfLiteError;
}

View File

@ -78,13 +78,14 @@ TfLiteStatus GenerateSimpleFeatures(tflite::ErrorReporter* error_reporter,
int output_size, uint8_t* output) {
// Ensure our input and output data arrays are valid.
if (input_size > kMaxAudioSampleSize) {
error_reporter->Report("Input size %d larger than %d", input_size,
kMaxAudioSampleSize);
TF_LITE_REPORT_ERROR(error_reporter, "Input size %d larger than %d",
input_size, kMaxAudioSampleSize);
return kTfLiteError;
}
if (output_size != kFeatureSliceSize) {
error_reporter->Report("Requested output size %d doesn't match %d",
output_size, kFeatureSliceSize);
TF_LITE_REPORT_ERROR(error_reporter,
"Requested output size %d doesn't match %d",
output_size, kFeatureSliceSize);
return kTfLiteError;
}

View File

@ -39,9 +39,9 @@ TF_LITE_MICRO_TEST(TestSimpleFeaturesGenerator) {
TF_LITE_MICRO_EXPECT_EQ(g_yes_power_spectrum_data[i],
yes_calculated_data[i]);
if (g_yes_power_spectrum_data[i] != yes_calculated_data[i]) {
error_reporter->Report("Expected value %d but found %d",
g_yes_power_spectrum_data[i],
yes_calculated_data[i]);
TF_LITE_REPORT_ERROR(error_reporter, "Expected value %d but found %d",
g_yes_power_spectrum_data[i],
yes_calculated_data[i]);
}
}
@ -54,9 +54,8 @@ TF_LITE_MICRO_TEST(TestSimpleFeaturesGenerator) {
for (int i = 0; i < g_no_power_spectrum_data_size; ++i) {
TF_LITE_MICRO_EXPECT_EQ(g_no_power_spectrum_data[i], no_calculated_data[i]);
if (g_no_power_spectrum_data[i] != no_calculated_data[i]) {
error_reporter->Report("Expected value %d but found %d",
g_no_power_spectrum_data[i],
no_calculated_data[i]);
TF_LITE_REPORT_ERROR(error_reporter, "Expected value %d but found %d",
g_no_power_spectrum_data[i], no_calculated_data[i]);
}
}
}

View File

@ -67,7 +67,7 @@ void adc_start_dma(tflite::ErrorReporter* error_reporter) {
}
if (AM_HAL_STATUS_SUCCESS !=
am_hal_adc_configure_dma(g_adc_handle, &ADCDMAConfig)) {
error_reporter->Report("Error - configuring ADC DMA failed.");
TF_LITE_REPORT_ERROR(error_reporter, "Error - configuring ADC DMA failed.");
}
// Reset the ADC DMA flags.
@ -82,13 +82,14 @@ void adc_config0(tflite::ErrorReporter* error_reporter) {
// Initialize the ADC and get the handle.
if (AM_HAL_STATUS_SUCCESS != am_hal_adc_initialize(0, &g_adc_handle)) {
error_reporter->Report("Error - reservation of the ADC0 instance failed.");
TF_LITE_REPORT_ERROR(error_reporter,
"Error - reservation of the ADC0 instance failed.");
}
// Power on the ADC.
if (AM_HAL_STATUS_SUCCESS !=
am_hal_adc_power_control(g_adc_handle, AM_HAL_SYSCTRL_WAKE, false)) {
error_reporter->Report("Error - ADC0 power on failed.");
TF_LITE_REPORT_ERROR(error_reporter, "Error - ADC0 power on failed.");
}
// Set up the ADC configuration parameters. These settings are reasonable
@ -102,7 +103,7 @@ void adc_config0(tflite::ErrorReporter* error_reporter) {
ADCConfig.ePowerMode = AM_HAL_ADC_LPMODE0;
ADCConfig.eRepeat = AM_HAL_ADC_REPEATING_SCAN;
if (AM_HAL_STATUS_SUCCESS != am_hal_adc_configure(g_adc_handle, &ADCConfig)) {
error_reporter->Report("Error - configuring ADC0 failed.");
TF_LITE_REPORT_ERROR(error_reporter, "Error - configuring ADC0 failed.");
}
// Set up an ADC slot (2)
@ -113,7 +114,8 @@ void adc_config0(tflite::ErrorReporter* error_reporter) {
ADCSlotConfig.bEnabled = true;
if (AM_HAL_STATUS_SUCCESS !=
am_hal_adc_configure_slot(g_adc_handle, 2, &ADCSlotConfig)) {
error_reporter->Report("Error - configuring ADC Slot 2 failed.");
TF_LITE_REPORT_ERROR(error_reporter,
"Error - configuring ADC Slot 2 failed.");
}
// Set up an ADC slot (1)
@ -124,7 +126,8 @@ void adc_config0(tflite::ErrorReporter* error_reporter) {
ADCSlotConfig.bEnabled = true;
if (AM_HAL_STATUS_SUCCESS !=
am_hal_adc_configure_slot(g_adc_handle, 1, &ADCSlotConfig)) {
error_reporter->Report("Error - configuring ADC Slot 1 failed.");
TF_LITE_REPORT_ERROR(error_reporter,
"Error - configuring ADC Slot 1 failed.");
}
// Configure the ADC to use DMA for the sample transfer.
@ -137,7 +140,7 @@ void adc_config0(tflite::ErrorReporter* error_reporter) {
// Enable the ADC.
if (AM_HAL_STATUS_SUCCESS != am_hal_adc_enable(g_adc_handle)) {
error_reporter->Report("Error - enabling ADC0 failed.");
TF_LITE_REPORT_ERROR(error_reporter, "Error - enabling ADC0 failed.");
}
}
@ -170,21 +173,25 @@ void enable_burst_mode(tflite::ErrorReporter* error_reporter) {
if (AM_HAL_STATUS_SUCCESS ==
am_hal_burst_mode_initialize(&eBurstModeAvailable)) {
if (AM_HAL_BURST_AVAIL == eBurstModeAvailable) {
error_reporter->Report("Apollo3 Burst Mode is Available\n");
TF_LITE_REPORT_ERROR(error_reporter, "Apollo3 Burst Mode is Available\n");
} else {
error_reporter->Report("Apollo3 Burst Mode is Not Available\n");
TF_LITE_REPORT_ERROR(error_reporter,
"Apollo3 Burst Mode is Not Available\n");
}
} else {
error_reporter->Report("Failed to Initialize for Burst Mode operation\n");
TF_LITE_REPORT_ERROR(error_reporter,
"Failed to Initialize for Burst Mode operation\n");
}
// Put the MCU into "Burst" mode.
if (AM_HAL_STATUS_SUCCESS == am_hal_burst_mode_enable(&eBurstMode)) {
if (AM_HAL_BURST_MODE == eBurstMode) {
error_reporter->Report("Apollo3 operating in Burst Mode (96MHz)\n");
TF_LITE_REPORT_ERROR(error_reporter,
"Apollo3 operating in Burst Mode (96MHz)\n");
}
} else {
error_reporter->Report("Failed to Enable Burst Mode operation\n");
TF_LITE_REPORT_ERROR(error_reporter,
"Failed to Enable Burst Mode operation\n");
}
}
@ -197,13 +204,15 @@ extern "C" void am_adc_isr(void) {
// Read the interrupt status.
if (AM_HAL_STATUS_SUCCESS !=
am_hal_adc_interrupt_status(g_adc_handle, &ui32IntMask, false)) {
g_adc_dma_error_reporter->Report("Error reading ADC0 interrupt status.");
TF_LITE_REPORT_ERROR(g_adc_dma_error_reporter,
"Error reading ADC0 interrupt status.");
}
// Clear the ADC interrupt.
if (AM_HAL_STATUS_SUCCESS !=
am_hal_adc_interrupt_clear(g_adc_handle, ui32IntMask)) {
g_adc_dma_error_reporter->Report("Error clearing ADC0 interrupt status.");
TF_LITE_REPORT_ERROR(g_adc_dma_error_reporter,
"Error clearing ADC0 interrupt status.");
}
// If we got a DMA complete, set the flag.
@ -249,18 +258,21 @@ TfLiteStatus InitAudioRecording(tflite::ErrorReporter* error_reporter) {
// Set the clock frequency.
if (AM_HAL_STATUS_SUCCESS !=
am_hal_clkgen_control(AM_HAL_CLKGEN_CONTROL_SYSCLK_MAX, 0)) {
error_reporter->Report("Error - configuring the system clock failed.");
TF_LITE_REPORT_ERROR(error_reporter,
"Error - configuring the system clock failed.");
return kTfLiteError;
}
// Set the default cache configuration and enable it.
if (AM_HAL_STATUS_SUCCESS !=
am_hal_cachectrl_config(&am_hal_cachectrl_defaults)) {
error_reporter->Report("Error - configuring the system cache failed.");
TF_LITE_REPORT_ERROR(error_reporter,
"Error - configuring the system cache failed.");
return kTfLiteError;
}
if (AM_HAL_STATUS_SUCCESS != am_hal_cachectrl_enable()) {
error_reporter->Report("Error - enabling the system cache failed.");
TF_LITE_REPORT_ERROR(error_reporter,
"Error - enabling the system cache failed.");
return kTfLiteError;
}
@ -293,7 +305,7 @@ TfLiteStatus InitAudioRecording(tflite::ErrorReporter* error_reporter) {
// Trigger the ADC sampling for the first time manually.
if (AM_HAL_STATUS_SUCCESS != am_hal_adc_sw_trigger(g_adc_handle)) {
error_reporter->Report("Error - triggering the ADC0 failed.");
TF_LITE_REPORT_ERROR(error_reporter, "Error - triggering the ADC0 failed.");
return kTfLiteError;
}

View File

@ -40,8 +40,8 @@ void RespondToCommand(tflite::ErrorReporter* error_reporter,
am_devices_led_off(am_bsp_psLEDs, AM_BSP_LED_YELLOW);
am_devices_led_off(am_bsp_psLEDs, AM_BSP_LED_GREEN);
if (is_new_command) {
error_reporter->Report("Heard %s (%d) @%dms", found_command, score,
current_time);
TF_LITE_REPORT_ERROR(error_reporter, "Heard %s (%d) @%dms", found_command,
score, current_time);
if (found_command[0] == 'y') {
am_devices_led_on(am_bsp_psLEDs, AM_BSP_LED_YELLOW);
}

View File

@ -59,10 +59,10 @@ TF_LITE_MICRO_TEST(TestInvoke) {
const tflite::Model* model = ::tflite::GetModel(network_model);
if (model->version() != TFLITE_SCHEMA_VERSION) {
error_reporter->Report(
"Model provided is schema version %d not equal "
"to supported version %d.\n",
model->version(), TFLITE_SCHEMA_VERSION);
TF_LITE_REPORT_ERROR(error_reporter,
"Model provided is schema version %d not equal "
"to supported version %d.\n",
model->version(), TFLITE_SCHEMA_VERSION);
return 1;
}
@ -77,7 +77,7 @@ TF_LITE_MICRO_TEST(TestInvoke) {
TfLiteStatus invoke_status = interpreter.Invoke();
if (invoke_status != kTfLiteOk) {
error_reporter->Report("Invoke failed\n");
TF_LITE_REPORT_ERROR(error_reporter, "Invoke failed\n");
}
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, invoke_status);
@ -92,7 +92,7 @@ TF_LITE_MICRO_TEST(TestInvoke) {
TF_LITE_MICRO_EXPECT_EQ(output->data.uint8[i], expected_output_data[i]);
}
#endif
error_reporter->Report("Ran successfully\n");
TF_LITE_REPORT_ERROR(error_reporter, "Ran successfully\n");
}
TF_LITE_MICRO_TESTS_END

View File

@ -87,32 +87,38 @@ void boost_mode_enable(tflite::ErrorReporter* error_reporter, bool bEnable) {
if (AM_HAL_STATUS_SUCCESS ==
am_hal_burst_mode_initialize(&eBurstModeAvailable)) {
if (AM_HAL_BURST_AVAIL == eBurstModeAvailable) {
error_reporter->Report("Apollo3 Burst Mode is Available\n");
TF_LITE_REPORT_ERROR(error_reporter, "Apollo3 Burst Mode is Available\n");
} else {
error_reporter->Report("Apollo3 Burst Mode is Not Available\n");
TF_LITE_REPORT_ERROR(error_reporter,
"Apollo3 Burst Mode is Not Available\n");
return;
}
} else {
error_reporter->Report("Failed to Initialize for Burst Mode operation\n");
TF_LITE_REPORT_ERROR(error_reporter,
"Failed to Initialize for Burst Mode operation\n");
}
// Make sure we are in "Normal" mode.
if (AM_HAL_STATUS_SUCCESS == am_hal_burst_mode_disable(&eBurstMode)) {
if (AM_HAL_NORMAL_MODE == eBurstMode) {
error_reporter->Report("Apollo3 operating in Normal Mode (48MHz)\n");
TF_LITE_REPORT_ERROR(error_reporter,
"Apollo3 operating in Normal Mode (48MHz)\n");
}
} else {
error_reporter->Report("Failed to Disable Burst Mode operation\n");
TF_LITE_REPORT_ERROR(error_reporter,
"Failed to Disable Burst Mode operation\n");
}
// Put the MCU into "Burst" mode.
if (bEnable) {
if (AM_HAL_STATUS_SUCCESS == am_hal_burst_mode_enable(&eBurstMode)) {
if (AM_HAL_BURST_MODE == eBurstMode) {
error_reporter->Report("Apollo3 operating in Burst Mode (96MHz)\n");
TF_LITE_REPORT_ERROR(error_reporter,
"Apollo3 operating in Burst Mode (96MHz)\n");
}
} else {
error_reporter->Report("Failed to Enable Burst Mode operation\n");
TF_LITE_REPORT_ERROR(error_reporter,
"Failed to Enable Burst Mode operation\n");
}
}
}
@ -120,7 +126,7 @@ void boost_mode_enable(tflite::ErrorReporter* error_reporter, bool bEnable) {
} // namespace
TfLiteStatus InitCamera(tflite::ErrorReporter* error_reporter) {
error_reporter->Report("Initializing HM01B0...\n");
TF_LITE_REPORT_ERROR(error_reporter, "Initializing HM01B0...\n");
am_hal_clkgen_control(AM_HAL_CLKGEN_CONTROL_SYSCLK_MAX, 0);

View File

@ -51,6 +51,6 @@ void RespondToDetection(tflite::ErrorReporter* error_reporter,
digitalWrite(LEDR, LOW);
}
error_reporter->Report("Person score: %d No person score: %d", person_score,
no_person_score);
TF_LITE_REPORT_ERROR(error_reporter, "Person score: %d No person score: %d",
person_score, no_person_score);
}

View File

@ -64,7 +64,7 @@ uint32_t jpeg_length = 0;
// Get the camera module ready
TfLiteStatus InitCamera(tflite::ErrorReporter* error_reporter) {
error_reporter->Report("Attempting to start Arducam");
TF_LITE_REPORT_ERROR(error_reporter, "Attempting to start Arducam");
// Enable the Wire library
Wire.begin();
// Configure the CS pin
@ -82,7 +82,7 @@ TfLiteStatus InitCamera(tflite::ErrorReporter* error_reporter) {
uint8_t test;
test = myCAM.read_reg(ARDUCHIP_TEST1);
if (test != 0x55) {
error_reporter->Report("Can't communicate with Arducam");
TF_LITE_REPORT_ERROR(error_reporter, "Can't communicate with Arducam");
delay(1000);
return kTfLiteError;
}
@ -98,7 +98,7 @@ TfLiteStatus InitCamera(tflite::ErrorReporter* error_reporter) {
// Begin the capture and wait for it to finish
TfLiteStatus PerformCapture(tflite::ErrorReporter* error_reporter) {
error_reporter->Report("Starting capture");
TF_LITE_REPORT_ERROR(error_reporter, "Starting capture");
// Make sure the buffer is emptied before each capture
myCAM.flush_fifo();
myCAM.clear_fifo_flag();
@ -107,7 +107,7 @@ TfLiteStatus PerformCapture(tflite::ErrorReporter* error_reporter) {
// Wait for indication that it is done
while (!myCAM.get_bit(ARDUCHIP_TRIG, CAP_DONE_MASK)) {
}
error_reporter->Report("Image captured");
TF_LITE_REPORT_ERROR(error_reporter, "Image captured");
delay(50);
// Clear the capture done flag
myCAM.clear_fifo_flag();
@ -118,15 +118,16 @@ TfLiteStatus PerformCapture(tflite::ErrorReporter* error_reporter) {
TfLiteStatus ReadData(tflite::ErrorReporter* error_reporter) {
// This represents the total length of the JPEG data
jpeg_length = myCAM.read_fifo_length();
error_reporter->Report("Reading %d bytes from Arducam", jpeg_length);
TF_LITE_REPORT_ERROR(error_reporter, "Reading %d bytes from Arducam",
jpeg_length);
// Ensure there's not too much data for our buffer
if (jpeg_length > MAX_JPEG_BYTES) {
error_reporter->Report("Too many bytes in FIFO buffer (%d)",
MAX_JPEG_BYTES);
TF_LITE_REPORT_ERROR(error_reporter, "Too many bytes in FIFO buffer (%d)",
MAX_JPEG_BYTES);
return kTfLiteError;
}
if (jpeg_length == 0) {
error_reporter->Report("No data in Arducam FIFO buffer");
TF_LITE_REPORT_ERROR(error_reporter, "No data in Arducam FIFO buffer");
return kTfLiteError;
}
myCAM.CS_LOW();
@ -135,7 +136,7 @@ TfLiteStatus ReadData(tflite::ErrorReporter* error_reporter) {
jpeg_buffer[index] = SPI.transfer(0x00);
}
delayMicroseconds(15);
error_reporter->Report("Finished reading");
TF_LITE_REPORT_ERROR(error_reporter, "Finished reading");
myCAM.CS_HIGH();
return kTfLiteOk;
}
@ -144,7 +145,8 @@ TfLiteStatus ReadData(tflite::ErrorReporter* error_reporter) {
TfLiteStatus DecodeAndProcessImage(tflite::ErrorReporter* error_reporter,
int image_width, int image_height,
uint8_t* image_data) {
error_reporter->Report("Decoding JPEG and converting to greyscale");
TF_LITE_REPORT_ERROR(error_reporter,
"Decoding JPEG and converting to greyscale");
// Parse the JPEG headers. The image will be decoded as a sequence of Minimum
// Coded Units (MCUs), which are 16x8 blocks of pixels.
JpegDec.decodeArray(jpeg_buffer, jpeg_length);
@ -221,7 +223,7 @@ TfLiteStatus DecodeAndProcessImage(tflite::ErrorReporter* error_reporter,
}
}
}
error_reporter->Report("Image decoded and processed");
TF_LITE_REPORT_ERROR(error_reporter, "Image decoded and processed");
return kTfLiteOk;
}
@ -232,7 +234,7 @@ TfLiteStatus GetImage(tflite::ErrorReporter* error_reporter, int image_width,
if (!g_is_camera_initialized) {
TfLiteStatus init_status = InitCamera(error_reporter);
if (init_status != kTfLiteOk) {
error_reporter->Report("InitCamera failed");
TF_LITE_REPORT_ERROR(error_reporter, "InitCamera failed");
return init_status;
}
g_is_camera_initialized = true;
@ -240,20 +242,20 @@ TfLiteStatus GetImage(tflite::ErrorReporter* error_reporter, int image_width,
TfLiteStatus capture_status = PerformCapture(error_reporter);
if (capture_status != kTfLiteOk) {
error_reporter->Report("PerformCapture failed");
TF_LITE_REPORT_ERROR(error_reporter, "PerformCapture failed");
return capture_status;
}
TfLiteStatus read_data_status = ReadData(error_reporter);
if (read_data_status != kTfLiteOk) {
error_reporter->Report("ReadData failed");
TF_LITE_REPORT_ERROR(error_reporter, "ReadData failed");
return read_data_status;
}
TfLiteStatus decode_status = DecodeAndProcessImage(
error_reporter, image_width, image_height, image_data);
if (decode_status != kTfLiteOk) {
error_reporter->Report("DecodeAndProcessImage failed");
TF_LITE_REPORT_ERROR(error_reporter, "DecodeAndProcessImage failed");
return decode_status;
}

View File

@ -20,6 +20,6 @@ limitations under the License.
// should implement their own versions of this function.
void RespondToDetection(tflite::ErrorReporter* error_reporter,
uint8_t person_score, uint8_t no_person_score) {
error_reporter->Report("person score:%d no person score %d", person_score,
no_person_score);
TF_LITE_REPORT_ERROR(error_reporter, "person score:%d no person score %d",
person_score, no_person_score);
}

View File

@ -50,10 +50,10 @@ void setup() {
// copying or parsing, it's a very lightweight operation.
model = tflite::GetModel(g_person_detect_model_data);
if (model->version() != TFLITE_SCHEMA_VERSION) {
error_reporter->Report(
"Model provided is schema version %d not equal "
"to supported version %d.",
model->version(), TFLITE_SCHEMA_VERSION);
TF_LITE_REPORT_ERROR(error_reporter,
"Model provided is schema version %d not equal "
"to supported version %d.",
model->version(), TFLITE_SCHEMA_VERSION);
return;
}
@ -82,7 +82,7 @@ void setup() {
// Allocate memory from the tensor_arena for the model's tensors.
TfLiteStatus allocate_status = interpreter->AllocateTensors();
if (allocate_status != kTfLiteOk) {
error_reporter->Report("AllocateTensors() failed");
TF_LITE_REPORT_ERROR(error_reporter, "AllocateTensors() failed");
return;
}
@ -95,12 +95,12 @@ void loop() {
// Get image from provider.
if (kTfLiteOk != GetImage(error_reporter, kNumCols, kNumRows, kNumChannels,
input->data.uint8)) {
error_reporter->Report("Image capture failed.");
TF_LITE_REPORT_ERROR(error_reporter, "Image capture failed.");
}
// Run the model on this input and make sure it succeeds.
if (kTfLiteOk != interpreter->Invoke()) {
error_reporter->Report("Invoke failed.");
TF_LITE_REPORT_ERROR(error_reporter, "Invoke failed.");
}
TfLiteTensor* output = interpreter->output(0);

View File

@ -41,10 +41,10 @@ TF_LITE_MICRO_TEST(TestInvoke) {
// copying or parsing, it's a very lightweight operation.
const tflite::Model* model = ::tflite::GetModel(g_person_detect_model_data);
if (model->version() != TFLITE_SCHEMA_VERSION) {
error_reporter->Report(
"Model provided is schema version %d not equal "
"to supported version %d.\n",
model->version(), TFLITE_SCHEMA_VERSION);
TF_LITE_REPORT_ERROR(error_reporter,
"Model provided is schema version %d not equal "
"to supported version %d.\n",
model->version(), TFLITE_SCHEMA_VERSION);
}
// Pull in only the operation implementations we need.
@ -89,7 +89,7 @@ TF_LITE_MICRO_TEST(TestInvoke) {
// Run the model on this input and make sure it succeeds.
TfLiteStatus invoke_status = interpreter.Invoke();
if (invoke_status != kTfLiteOk) {
error_reporter->Report("Invoke failed\n");
TF_LITE_REPORT_ERROR(error_reporter, "Invoke failed\n");
}
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, invoke_status);
@ -106,9 +106,9 @@ TF_LITE_MICRO_TEST(TestInvoke) {
// Make sure that the expected "Person" score is higher than the other class.
uint8_t person_score = output->data.uint8[kPersonIndex];
uint8_t no_person_score = output->data.uint8[kNotAPersonIndex];
error_reporter->Report(
"person data. person score: %d, no person score: %d\n", person_score,
no_person_score);
TF_LITE_REPORT_ERROR(error_reporter,
"person data. person score: %d, no person score: %d\n",
person_score, no_person_score);
TF_LITE_MICRO_EXPECT_GT(person_score, no_person_score);
// Now test with a different input, from an image without a person.
@ -120,7 +120,7 @@ TF_LITE_MICRO_TEST(TestInvoke) {
// Run the model on this "No Person" input.
invoke_status = interpreter.Invoke();
if (invoke_status != kTfLiteOk) {
error_reporter->Report("Invoke failed\n");
TF_LITE_REPORT_ERROR(error_reporter, "Invoke failed\n");
}
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, invoke_status);
@ -137,12 +137,13 @@ TF_LITE_MICRO_TEST(TestInvoke) {
// Make sure that the expected "No Person" score is higher.
person_score = output->data.uint8[kPersonIndex];
no_person_score = output->data.uint8[kNotAPersonIndex];
error_reporter->Report(
TF_LITE_REPORT_ERROR(
error_reporter,
"no person data. person score: %d, no person score: %d\n", person_score,
no_person_score);
TF_LITE_MICRO_EXPECT_GT(no_person_score, person_score);
error_reporter->Report("Ran successfully\n");
TF_LITE_REPORT_ERROR(error_reporter, "Ran successfully\n");
}
TF_LITE_MICRO_TESTS_END

View File

@ -44,6 +44,6 @@ void RespondToDetection(tflite::ErrorReporter* error_reporter,
am_devices_led_on(am_bsp_psLEDs, AM_BSP_LED_YELLOW);
}
error_reporter->Report("Person score: %d No person score: %d", person_score,
no_person_score);
TF_LITE_REPORT_ERROR(error_reporter, "Person score: %d No person score: %d",
person_score, no_person_score);
}

View File

@ -87,32 +87,38 @@ void burst_mode_enable(tflite::ErrorReporter* error_reporter, bool bEnable) {
if (AM_HAL_STATUS_SUCCESS ==
am_hal_burst_mode_initialize(&eBurstModeAvailable)) {
if (AM_HAL_BURST_AVAIL == eBurstModeAvailable) {
error_reporter->Report("Apollo3 Burst Mode is Available\n");
TF_LITE_REPORT_ERROR(error_reporter, "Apollo3 Burst Mode is Available\n");
} else {
error_reporter->Report("Apollo3 Burst Mode is Not Available\n");
TF_LITE_REPORT_ERROR(error_reporter,
"Apollo3 Burst Mode is Not Available\n");
return;
}
} else {
error_reporter->Report("Failed to Initialize for Burst Mode operation\n");
TF_LITE_REPORT_ERROR(error_reporter,
"Failed to Initialize for Burst Mode operation\n");
}
// Make sure we are in "Normal" mode.
if (AM_HAL_STATUS_SUCCESS == am_hal_burst_mode_disable(&eBurstMode)) {
if (AM_HAL_NORMAL_MODE == eBurstMode) {
error_reporter->Report("Apollo3 operating in Normal Mode (48MHz)\n");
TF_LITE_REPORT_ERROR(error_reporter,
"Apollo3 operating in Normal Mode (48MHz)\n");
}
} else {
error_reporter->Report("Failed to Disable Burst Mode operation\n");
TF_LITE_REPORT_ERROR(error_reporter,
"Failed to Disable Burst Mode operation\n");
}
// Put the MCU into "Burst" mode.
if (bEnable) {
if (AM_HAL_STATUS_SUCCESS == am_hal_burst_mode_enable(&eBurstMode)) {
if (AM_HAL_BURST_MODE == eBurstMode) {
error_reporter->Report("Apollo3 operating in Burst Mode (96MHz)\n");
TF_LITE_REPORT_ERROR(error_reporter,
"Apollo3 operating in Burst Mode (96MHz)\n");
}
} else {
error_reporter->Report("Failed to Enable Burst Mode operation\n");
TF_LITE_REPORT_ERROR(error_reporter,
"Failed to Enable Burst Mode operation\n");
}
}
}
@ -120,7 +126,7 @@ void burst_mode_enable(tflite::ErrorReporter* error_reporter, bool bEnable) {
} // namespace
TfLiteStatus InitCamera(tflite::ErrorReporter* error_reporter) {
error_reporter->Report("Initializing HM01B0...\n");
TF_LITE_REPORT_ERROR(error_reporter, "Initializing HM01B0...\n");
am_hal_clkgen_control(AM_HAL_CLKGEN_CONTROL_SYSCLK_MAX, 0);

View File

@ -87,32 +87,38 @@ void boost_mode_enable(tflite::ErrorReporter* error_reporter, bool bEnable) {
if (AM_HAL_STATUS_SUCCESS ==
am_hal_burst_mode_initialize(&eBurstModeAvailable)) {
if (AM_HAL_BURST_AVAIL == eBurstModeAvailable) {
error_reporter->Report("Apollo3 Burst Mode is Available\n");
TF_LITE_REPORT_ERROR(error_reporter, "Apollo3 Burst Mode is Available\n");
} else {
error_reporter->Report("Apollo3 Burst Mode is Not Available\n");
TF_LITE_REPORT_ERROR(error_reporter,
"Apollo3 Burst Mode is Not Available\n");
return;
}
} else {
error_reporter->Report("Failed to Initialize for Burst Mode operation\n");
TF_LITE_REPORT_ERROR(error_reporter,
"Failed to Initialize for Burst Mode operation\n");
}
// Make sure we are in "Normal" mode.
if (AM_HAL_STATUS_SUCCESS == am_hal_burst_mode_disable(&eBurstMode)) {
if (AM_HAL_NORMAL_MODE == eBurstMode) {
error_reporter->Report("Apollo3 operating in Normal Mode (48MHz)\n");
TF_LITE_REPORT_ERROR(error_reporter,
"Apollo3 operating in Normal Mode (48MHz)\n");
}
} else {
error_reporter->Report("Failed to Disable Burst Mode operation\n");
TF_LITE_REPORT_ERROR(error_reporter,
"Failed to Disable Burst Mode operation\n");
}
// Put the MCU into "Burst" mode.
if (bEnable) {
if (AM_HAL_STATUS_SUCCESS == am_hal_burst_mode_enable(&eBurstMode)) {
if (AM_HAL_BURST_MODE == eBurstMode) {
error_reporter->Report("Apollo3 operating in Burst Mode (96MHz)\n");
TF_LITE_REPORT_ERROR(error_reporter,
"Apollo3 operating in Burst Mode (96MHz)\n");
}
} else {
error_reporter->Report("Failed to Enable Burst Mode operation\n");
TF_LITE_REPORT_ERROR(error_reporter,
"Failed to Enable Burst Mode operation\n");
}
}
}
@ -120,7 +126,7 @@ void boost_mode_enable(tflite::ErrorReporter* error_reporter, bool bEnable) {
} // namespace
TfLiteStatus InitCamera(tflite::ErrorReporter* error_reporter) {
error_reporter->Report("Initializing HM01B0...\n");
TF_LITE_REPORT_ERROR(error_reporter, "Initializing HM01B0...\n");
am_hal_clkgen_control(AM_HAL_CLKGEN_CONTROL_SYSCLK_MAX, 0);

View File

@ -51,6 +51,6 @@ void RespondToDetection(tflite::ErrorReporter* error_reporter,
digitalWrite(LEDR, LOW);
}
error_reporter->Report("Person score: %d No person score: %d", person_score,
no_person_score);
TF_LITE_REPORT_ERROR(error_reporter, "Person score: %d No person score: %d",
person_score, no_person_score);
}

View File

@ -64,7 +64,7 @@ uint32_t jpeg_length = 0;
// Get the camera module ready
TfLiteStatus InitCamera(tflite::ErrorReporter* error_reporter) {
error_reporter->Report("Attempting to start Arducam");
TF_LITE_REPORT_ERROR(error_reporter, "Attempting to start Arducam");
// Enable the Wire library
Wire.begin();
// Configure the CS pin
@ -82,7 +82,7 @@ TfLiteStatus InitCamera(tflite::ErrorReporter* error_reporter) {
uint8_t test;
test = myCAM.read_reg(ARDUCHIP_TEST1);
if (test != 0x55) {
error_reporter->Report("Can't communicate with Arducam");
TF_LITE_REPORT_ERROR(error_reporter, "Can't communicate with Arducam");
delay(1000);
return kTfLiteError;
}
@ -98,7 +98,7 @@ TfLiteStatus InitCamera(tflite::ErrorReporter* error_reporter) {
// Begin the capture and wait for it to finish
TfLiteStatus PerformCapture(tflite::ErrorReporter* error_reporter) {
error_reporter->Report("Starting capture");
TF_LITE_REPORT_ERROR(error_reporter, "Starting capture");
// Make sure the buffer is emptied before each capture
myCAM.flush_fifo();
myCAM.clear_fifo_flag();
@ -107,7 +107,7 @@ TfLiteStatus PerformCapture(tflite::ErrorReporter* error_reporter) {
// Wait for indication that it is done
while (!myCAM.get_bit(ARDUCHIP_TRIG, CAP_DONE_MASK)) {
}
error_reporter->Report("Image captured");
TF_LITE_REPORT_ERROR(error_reporter, "Image captured");
delay(50);
// Clear the capture done flag
myCAM.clear_fifo_flag();
@ -118,15 +118,16 @@ TfLiteStatus PerformCapture(tflite::ErrorReporter* error_reporter) {
TfLiteStatus ReadData(tflite::ErrorReporter* error_reporter) {
// This represents the total length of the JPEG data
jpeg_length = myCAM.read_fifo_length();
error_reporter->Report("Reading %d bytes from Arducam", jpeg_length);
TF_LITE_REPORT_ERROR(error_reporter, "Reading %d bytes from Arducam",
jpeg_length);
// Ensure there's not too much data for our buffer
if (jpeg_length > MAX_JPEG_BYTES) {
error_reporter->Report("Too many bytes in FIFO buffer (%d)",
MAX_JPEG_BYTES);
TF_LITE_REPORT_ERROR(error_reporter, "Too many bytes in FIFO buffer (%d)",
MAX_JPEG_BYTES);
return kTfLiteError;
}
if (jpeg_length == 0) {
error_reporter->Report("No data in Arducam FIFO buffer");
TF_LITE_REPORT_ERROR(error_reporter, "No data in Arducam FIFO buffer");
return kTfLiteError;
}
myCAM.CS_LOW();
@ -135,7 +136,7 @@ TfLiteStatus ReadData(tflite::ErrorReporter* error_reporter) {
jpeg_buffer[index] = SPI.transfer(0x00);
}
delayMicroseconds(15);
error_reporter->Report("Finished reading");
TF_LITE_REPORT_ERROR(error_reporter, "Finished reading");
myCAM.CS_HIGH();
return kTfLiteOk;
}
@ -144,7 +145,8 @@ TfLiteStatus ReadData(tflite::ErrorReporter* error_reporter) {
TfLiteStatus DecodeAndProcessImage(tflite::ErrorReporter* error_reporter,
int image_width, int image_height,
int8_t* image_data) {
error_reporter->Report("Decoding JPEG and converting to greyscale");
TF_LITE_REPORT_ERROR(error_reporter,
"Decoding JPEG and converting to greyscale");
// Parse the JPEG headers. The image will be decoded as a sequence of Minimum
// Coded Units (MCUs), which are 16x8 blocks of pixels.
JpegDec.decodeArray(jpeg_buffer, jpeg_length);
@ -224,7 +226,7 @@ TfLiteStatus DecodeAndProcessImage(tflite::ErrorReporter* error_reporter,
}
}
}
error_reporter->Report("Image decoded and processed");
TF_LITE_REPORT_ERROR(error_reporter, "Image decoded and processed");
return kTfLiteOk;
}
@ -235,7 +237,7 @@ TfLiteStatus GetImage(tflite::ErrorReporter* error_reporter, int image_width,
if (!g_is_camera_initialized) {
TfLiteStatus init_status = InitCamera(error_reporter);
if (init_status != kTfLiteOk) {
error_reporter->Report("InitCamera failed");
TF_LITE_REPORT_ERROR(error_reporter, "InitCamera failed");
return init_status;
}
g_is_camera_initialized = true;
@ -243,20 +245,20 @@ TfLiteStatus GetImage(tflite::ErrorReporter* error_reporter, int image_width,
TfLiteStatus capture_status = PerformCapture(error_reporter);
if (capture_status != kTfLiteOk) {
error_reporter->Report("PerformCapture failed");
TF_LITE_REPORT_ERROR(error_reporter, "PerformCapture failed");
return capture_status;
}
TfLiteStatus read_data_status = ReadData(error_reporter);
if (read_data_status != kTfLiteOk) {
error_reporter->Report("ReadData failed");
TF_LITE_REPORT_ERROR(error_reporter, "ReadData failed");
return read_data_status;
}
TfLiteStatus decode_status = DecodeAndProcessImage(
error_reporter, image_width, image_height, image_data);
if (decode_status != kTfLiteOk) {
error_reporter->Report("DecodeAndProcessImage failed");
TF_LITE_REPORT_ERROR(error_reporter, "DecodeAndProcessImage failed");
return decode_status;
}

View File

@ -20,6 +20,6 @@ limitations under the License.
// should implement their own versions of this function.
void RespondToDetection(tflite::ErrorReporter* error_reporter,
int8_t person_score, int8_t no_person_score) {
error_reporter->Report("person score:%d no person score %d", person_score,
no_person_score);
TF_LITE_REPORT_ERROR(error_reporter, "person score:%d no person score %d",
person_score, no_person_score);
}

View File

@ -57,10 +57,10 @@ void setup() {
// copying or parsing, it's a very lightweight operation.
model = tflite::GetModel(g_person_detect_model_data);
if (model->version() != TFLITE_SCHEMA_VERSION) {
error_reporter->Report(
"Model provided is schema version %d not equal "
"to supported version %d.",
model->version(), TFLITE_SCHEMA_VERSION);
TF_LITE_REPORT_ERROR(error_reporter,
"Model provided is schema version %d not equal "
"to supported version %d.",
model->version(), TFLITE_SCHEMA_VERSION);
return;
}
@ -95,7 +95,7 @@ void setup() {
// Allocate memory from the tensor_arena for the model's tensors.
TfLiteStatus allocate_status = interpreter->AllocateTensors();
if (allocate_status != kTfLiteOk) {
error_reporter->Report("AllocateTensors() failed");
TF_LITE_REPORT_ERROR(error_reporter, "AllocateTensors() failed");
return;
}
@ -108,12 +108,12 @@ void loop() {
// Get image from provider.
if (kTfLiteOk != GetImage(error_reporter, kNumCols, kNumRows, kNumChannels,
input->data.int8)) {
error_reporter->Report("Image capture failed.");
TF_LITE_REPORT_ERROR(error_reporter, "Image capture failed.");
}
// Run the model on this input and make sure it succeeds.
if (kTfLiteOk != interpreter->Invoke()) {
error_reporter->Report("Invoke failed.");
TF_LITE_REPORT_ERROR(error_reporter, "Invoke failed.");
}
TfLiteTensor* output = interpreter->output(0);

View File

@ -41,10 +41,10 @@ TF_LITE_MICRO_TEST(TestInvoke) {
// copying or parsing, it's a very lightweight operation.
const tflite::Model* model = ::tflite::GetModel(g_person_detect_model_data);
if (model->version() != TFLITE_SCHEMA_VERSION) {
error_reporter->Report(
"Model provided is schema version %d not equal "
"to supported version %d.\n",
model->version(), TFLITE_SCHEMA_VERSION);
TF_LITE_REPORT_ERROR(error_reporter,
"Model provided is schema version %d not equal "
"to supported version %d.\n",
model->version(), TFLITE_SCHEMA_VERSION);
}
// Pull in only the operation implementations we need.
@ -92,7 +92,7 @@ TF_LITE_MICRO_TEST(TestInvoke) {
// Run the model on this input and make sure it succeeds.
TfLiteStatus invoke_status = interpreter.Invoke();
if (invoke_status != kTfLiteOk) {
error_reporter->Report("Invoke failed\n");
TF_LITE_REPORT_ERROR(error_reporter, "Invoke failed\n");
}
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, invoke_status);
@ -107,9 +107,9 @@ TF_LITE_MICRO_TEST(TestInvoke) {
// Make sure that the expected "Person" score is higher than the other class.
int8_t person_score = output->data.int8[kPersonIndex];
int8_t no_person_score = output->data.int8[kNotAPersonIndex];
error_reporter->Report(
"person data. person score: %d, no person score: %d\n", person_score,
no_person_score);
TF_LITE_REPORT_ERROR(error_reporter,
"person data. person score: %d, no person score: %d\n",
person_score, no_person_score);
TF_LITE_MICRO_EXPECT_GT(person_score, no_person_score);
// Now test with a blank image.
@ -120,7 +120,7 @@ TF_LITE_MICRO_TEST(TestInvoke) {
// Run the model on this "No Person" input.
invoke_status = interpreter.Invoke();
if (invoke_status != kTfLiteOk) {
error_reporter->Report("Invoke failed\n");
TF_LITE_REPORT_ERROR(error_reporter, "Invoke failed\n");
}
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, invoke_status);
@ -135,12 +135,13 @@ TF_LITE_MICRO_TEST(TestInvoke) {
// Make sure that the expected "No Person" score is higher.
person_score = output->data.int8[kPersonIndex];
no_person_score = output->data.int8[kNotAPersonIndex];
error_reporter->Report(
TF_LITE_REPORT_ERROR(
error_reporter,
"no person data. person score: %d, no person score: %d\n", person_score,
no_person_score);
TF_LITE_MICRO_EXPECT_GT(no_person_score, person_score);
error_reporter->Report("Ran successfully\n");
TF_LITE_REPORT_ERROR(error_reporter, "Ran successfully\n");
}
TF_LITE_MICRO_TESTS_END

View File

@ -49,6 +49,6 @@ void RespondToDetection(tflite::ErrorReporter* error_reporter,
am_hal_gpio_output_set(AM_BSP_GPIO_LED_YELLOW);
}
error_reporter->Report("Person score: %d No person score: %d", person_score,
no_person_score);
TF_LITE_REPORT_ERROR(error_reporter, "Person score: %d No person score: %d",
person_score, no_person_score);
}

View File

@ -87,32 +87,38 @@ void burst_mode_enable(tflite::ErrorReporter* error_reporter, bool bEnable) {
if (AM_HAL_STATUS_SUCCESS ==
am_hal_burst_mode_initialize(&eBurstModeAvailable)) {
if (AM_HAL_BURST_AVAIL == eBurstModeAvailable) {
error_reporter->Report("Apollo3 Burst Mode is Available\n");
TF_LITE_REPORT_ERROR(error_reporter, "Apollo3 Burst Mode is Available\n");
} else {
error_reporter->Report("Apollo3 Burst Mode is Not Available\n");
TF_LITE_REPORT_ERROR(error_reporter,
"Apollo3 Burst Mode is Not Available\n");
return;
}
} else {
error_reporter->Report("Failed to Initialize for Burst Mode operation\n");
TF_LITE_REPORT_ERROR(error_reporter,
"Failed to Initialize for Burst Mode operation\n");
}
// Make sure we are in "Normal" mode.
if (AM_HAL_STATUS_SUCCESS == am_hal_burst_mode_disable(&eBurstMode)) {
if (AM_HAL_NORMAL_MODE == eBurstMode) {
error_reporter->Report("Apollo3 operating in Normal Mode (48MHz)\n");
TF_LITE_REPORT_ERROR(error_reporter,
"Apollo3 operating in Normal Mode (48MHz)\n");
}
} else {
error_reporter->Report("Failed to Disable Burst Mode operation\n");
TF_LITE_REPORT_ERROR(error_reporter,
"Failed to Disable Burst Mode operation\n");
}
// Put the MCU into "Burst" mode.
if (bEnable) {
if (AM_HAL_STATUS_SUCCESS == am_hal_burst_mode_enable(&eBurstMode)) {
if (AM_HAL_BURST_MODE == eBurstMode) {
error_reporter->Report("Apollo3 operating in Burst Mode (96MHz)\n");
TF_LITE_REPORT_ERROR(error_reporter,
"Apollo3 operating in Burst Mode (96MHz)\n");
}
} else {
error_reporter->Report("Failed to Enable Burst Mode operation\n");
TF_LITE_REPORT_ERROR(error_reporter,
"Failed to Enable Burst Mode operation\n");
}
}
}
@ -120,7 +126,7 @@ void burst_mode_enable(tflite::ErrorReporter* error_reporter, bool bEnable) {
} // namespace
TfLiteStatus InitCamera(tflite::ErrorReporter* error_reporter) {
error_reporter->Report("Initializing HM01B0...\n");
TF_LITE_REPORT_ERROR(error_reporter, "Initializing HM01B0...\n");
am_hal_clkgen_control(AM_HAL_CLKGEN_CONTROL_SYSCLK_MAX, 0);

View File

@ -73,7 +73,8 @@ TfLiteStatus GreedyMemoryPlanner::AddBuffer(
tflite::ErrorReporter* error_reporter, int size, int first_time_used,
int last_time_used) {
if (buffer_count_ >= max_buffer_count_) {
error_reporter->Report("Too many buffers (max is %d)", max_buffer_count_);
TF_LITE_REPORT_ERROR(error_reporter, "Too many buffers (max is %d)",
max_buffer_count_);
return kTfLiteError;
}
BufferRequirements* current = &requirements_[buffer_count_];
@ -268,7 +269,8 @@ void GreedyMemoryPlanner::PrintMemoryPlan(ErrorReporter* error_reporter) {
CalculateOffsetsIfNeeded();
for (int i = 0; i < buffer_count_; ++i) {
error_reporter->Report(
TF_LITE_REPORT_ERROR(
error_reporter,
"Planner buffer ID: %d, calculated offset: %d, size required: %d, "
"first_time_created: %d, "
"last_time_used: %d",
@ -329,7 +331,7 @@ void GreedyMemoryPlanner::PrintMemoryPlan(ErrorReporter* error_reporter) {
}
}
line[kLineWidth] = 0;
error_reporter->Report("%s", line);
TF_LITE_REPORT_ERROR(error_reporter, "%s", line);
}
}
@ -339,8 +341,9 @@ TfLiteStatus GreedyMemoryPlanner::GetOffsetForBuffer(
tflite::ErrorReporter* error_reporter, int buffer_index, int* offset) {
CalculateOffsetsIfNeeded();
if ((buffer_index < 0) || (buffer_index >= buffer_count_)) {
error_reporter->Report("buffer index %d is outside range 0 to %d",
buffer_index, buffer_count_);
TF_LITE_REPORT_ERROR(error_reporter,
"buffer index %d is outside range 0 to %d",
buffer_index, buffer_count_);
return kTfLiteError;
}
*offset = buffer_offsets_[buffer_index];
@ -376,10 +379,10 @@ bool GreedyMemoryPlanner::DoAnyBuffersOverlap(ErrorReporter* error_reporter) {
continue;
}
were_overlaps_found = true;
error_reporter->Report(
"Overlap: %d (%d=>%d, %d->%d) vs %d (%d=>%d, %d->%d)", i,
a_first_time_used, a_last_time_used, a_start_offset, a_end_offset, j,
b_first_time_used, b_last_time_used, b_start_offset, b_end_offset);
TF_LITE_REPORT_ERROR(
error_reporter, "Overlap: %d (%d=>%d, %d->%d) vs %d (%d=>%d, %d->%d)",
i, a_first_time_used, a_last_time_used, a_start_offset, a_end_offset,
j, b_first_time_used, b_last_time_used, b_start_offset, b_end_offset);
}
}
return were_overlaps_found;

View File

@ -25,7 +25,8 @@ TfLiteStatus LinearMemoryPlanner::AddBuffer(
tflite::ErrorReporter* error_reporter, int size, int first_time_used,
int last_time_used) {
if (current_buffer_count_ >= kMaxBufferCount) {
error_reporter->Report("Too many buffers (max is %d)", kMaxBufferCount);
TF_LITE_REPORT_ERROR(error_reporter, "Too many buffers (max is %d)",
kMaxBufferCount);
return kTfLiteError;
}
buffer_offsets_[current_buffer_count_] = next_free_offset_;
@ -41,8 +42,9 @@ int LinearMemoryPlanner::GetBufferCount() { return current_buffer_count_; }
TfLiteStatus LinearMemoryPlanner::GetOffsetForBuffer(
tflite::ErrorReporter* error_reporter, int buffer_index, int* offset) {
if ((buffer_index < 0) || (buffer_index >= current_buffer_count_)) {
error_reporter->Report("buffer index %d is outside range 0 to %d",
buffer_index, current_buffer_count_);
TF_LITE_REPORT_ERROR(error_reporter,
"buffer index %d is outside range 0 to %d",
buffer_index, current_buffer_count_);
return kTfLiteError;
}
*offset = buffer_offsets_[buffer_index];

View File

@ -103,7 +103,8 @@ AllocationInfo* AllocateAndCalculateAllocationInfo(
allocator->AllocateFromTail(sizeof(AllocationInfo) * allocation_info_size,
alignof(AllocationInfo)));
if (allocation_info == nullptr) {
error_reporter->Report(
TF_LITE_REPORT_ERROR(
error_reporter,
"Failed to allocate memory for allocation_info, %d bytes required",
sizeof(TfLiteTensor) * allocation_info_size);
return nullptr;
@ -165,7 +166,8 @@ AllocationInfo* AllocateAndCalculateAllocationInfo(
!is_read_only &&
((current->first_created == -1) || (current->last_used == -1));
if (has_partial_lifetime && current->needs_allocating) {
error_reporter->Report(
TF_LITE_REPORT_ERROR(
error_reporter,
"Logic error in memory planner, tensor %d has an invalid lifetime: "
"first_created: %d, last_used: %d",
i, current->first_created, current->last_used);

View File

@ -18,8 +18,9 @@ limitations under the License.
int main(int argc, char** argv) {
tflite::MicroErrorReporter micro_error_reporter;
tflite::ErrorReporter* error_reporter = &micro_error_reporter;
error_reporter->Report("Number: %d", 42);
error_reporter->Report("Badly-formed format string %");
error_reporter->Report("Another % badly-formed %% format string");
error_reporter->Report("~~~%s~~~", "ALL TESTS PASSED");
TF_LITE_REPORT_ERROR(error_reporter, "Number: %d", 42);
TF_LITE_REPORT_ERROR(error_reporter, "Badly-formed format string %");
TF_LITE_REPORT_ERROR(error_reporter,
"Another % badly-formed %% format string");
TF_LITE_REPORT_ERROR(error_reporter, "~~~%s~~~", "ALL TESTS PASSED");
}

View File

@ -57,7 +57,8 @@ MicroInterpreter::MicroInterpreter(const Model* model,
const flatbuffers::Vector<flatbuffers::Offset<SubGraph>>* subgraphs =
model->subgraphs();
if (subgraphs->size() != 1) {
error_reporter->Report("Only 1 subgraph is currently supported.\n");
TF_LITE_REPORT_ERROR(error_reporter,
"Only 1 subgraph is currently supported.\n");
initialization_status_ = kTfLiteError;
return;
}

View File

@ -399,7 +399,7 @@ void ReportOpError(struct TfLiteContext* context, const char* format, ...) {
ErrorReporter* error_reporter = static_cast<ErrorReporter*>(context->impl_);
va_list args;
va_start(args, format);
error_reporter->Report(format, args);
TF_LITE_REPORT_ERROR(error_reporter, format, args);
va_end(args);
}

View File

@ -100,7 +100,8 @@ std::unique_ptr<FlatBufferModel> FlatBufferModel::VerifyAndBuildFromFile(
reinterpret_cast<const uint8_t*>(allocation->base()),
allocation->bytes());
if (!VerifyModelBuffer(base_verifier)) {
error_reporter->Report("The model is not a valid Flatbuffer file");
TF_LITE_REPORT_ERROR(error_reporter,
"The model is not a valid Flatbuffer file");
return nullptr;
}
@ -136,7 +137,8 @@ std::unique_ptr<FlatBufferModel> FlatBufferModel::VerifyAndBuildFromBuffer(
flatbuffers::Verifier base_verifier(
reinterpret_cast<const uint8_t*>(caller_owned_buffer), buffer_size);
if (!VerifyModelBuffer(base_verifier)) {
error_reporter->Report("The model is not a valid Flatbuffer buffer");
TF_LITE_REPORT_ERROR(error_reporter,
"The model is not a valid Flatbuffer buffer");
return nullptr;
}

View File

@ -30,7 +30,8 @@ TfLiteStatus MinMax::Update(const float* values, size_t tensor_size,
// TODO(shashishekhar): Make it possible to use weighted/moving average.
for (size_t i = 0; i < tensor_size; ++i) {
if (std::isnan(values[i])) {
error_reporter->Report(
TF_LITE_REPORT_ERROR(
error_reporter,
"Model resulted in Nan value during calibration. Please "
"make sure model results in all real-values during "
"inference with provided dataset.");

View File

@ -100,17 +100,20 @@ TfLiteStatus FillPerChannelMinMax(const float* const input,
QuantizationParametersT* quantization_params,
ErrorReporter* error_reporter) {
if (!quantization_params->min.empty() || !quantization_params->max.empty()) {
error_reporter->Report(
TF_LITE_REPORT_ERROR(
error_reporter,
"Min or max already present in tensor quantization params.");
return kTfLiteError;
}
if (dimension.size() != 4) {
error_reporter->Report("Expected tensor with four dimensions, but got %d.",
dimension.size());
TF_LITE_REPORT_ERROR(error_reporter,
"Expected tensor with four dimensions, but got %d.",
dimension.size());
return kTfLiteError;
}
if (channel_dim_index > 3) {
error_reporter->Report(
TF_LITE_REPORT_ERROR(
error_reporter,
"Expected channel_dim_index to be less than four, but got %d.",
channel_dim_index);
return kTfLiteError;
@ -155,15 +158,18 @@ TfLiteStatus GetSymmetricScalesFromMaxMin(QuantizationParametersT* quant_params,
ErrorReporter* error_reporter) {
// Check that max and min values are present and their sizes match.
if (quant_params->min.empty() || quant_params->max.empty()) {
error_reporter->Report("Max and min values are not populated.");
TF_LITE_REPORT_ERROR(error_reporter,
"Max and min values are not populated.");
return kTfLiteError;
}
if (quant_params->min.size() != quant_params->max.size()) {
error_reporter->Report("Dimensions of max and min values do not match.");
TF_LITE_REPORT_ERROR(error_reporter,
"Dimensions of max and min values do not match.");
return kTfLiteError;
}
if (scales->size() != quant_params->min.size()) {
error_reporter->Report("Provided scale vector has incorrect size.");
TF_LITE_REPORT_ERROR(error_reporter,
"Provided scale vector has incorrect size.");
return kTfLiteError;
}
@ -196,14 +202,16 @@ TfLiteStatus AdjustWeightsForBiasScale(QuantizationParametersT* quant_params,
// TODO(dmolitor) Test using a separate strategy for scales of 0.
const int32_t kScale = std::numeric_limits<int32_t>::max();
if (quant_params == nullptr) {
error_reporter->Report("Missing max and min values for weight tensor.");
TF_LITE_REPORT_ERROR(error_reporter,
"Missing max and min values for weight tensor.");
return kTfLiteError;
}
// channel_dim_size is calculated from min.size() to infer whether
// quantization is per axis
int channel_dim_size = quant_params->min.size();
if (channel_dim_size == 0) {
error_reporter->Report(
TF_LITE_REPORT_ERROR(
error_reporter,
"Missing weight scales. Unable to check compatibility with bias "
"scale.");
return kTfLiteError;
@ -250,7 +258,7 @@ TfLiteStatus SymmetricPerChannelQuantization(TensorT* tensor,
std::vector<int8_t>* output_value,
ErrorReporter* error_reporter) {
if (tensor == nullptr) {
error_reporter->Report("Cannot quantize. Tensor is null.");
TF_LITE_REPORT_ERROR(error_reporter, "Cannot quantize. Tensor is null.");
return kTfLiteError;
}
const int32_t channel_dim_size = tensor->shape[channel_dim_index];
@ -348,23 +356,25 @@ void SymmetricPerChannelQuantizeValues(const float* const input,
TfLiteStatus SymmetricQuantizeTensorFromMinMax(ModelT* model, TensorT* tensor,
ErrorReporter* error_reporter) {
if (model == nullptr || tensor == nullptr) {
error_reporter->Report("No tensor to quantize.");
TF_LITE_REPORT_ERROR(error_reporter, "No tensor to quantize.");
return kTfLiteError;
}
BufferT* buffer = model->buffers[tensor->buffer].get();
if (buffer == nullptr) {
error_reporter->Report("Missing buffer.");
TF_LITE_REPORT_ERROR(error_reporter, "Missing buffer.");
return kTfLiteError;
}
if (!HasMinMax(tensor)) {
error_reporter->Report("Missing min or max values for quantization.");
TF_LITE_REPORT_ERROR(error_reporter,
"Missing min or max values for quantization.");
return kTfLiteError;
}
if (tensor->quantization->min.size() != 1 ||
tensor->quantization->max.size() != 1) {
error_reporter->Report("Expected single entry in max and min.");
TF_LITE_REPORT_ERROR(error_reporter,
"Expected single entry in max and min.");
return kTfLiteError;
}
@ -482,7 +492,8 @@ TfLiteStatus AddQuantizationParams(const std::vector<float>& scales,
}
tensor->quantization->scale.assign(scales.begin(), scales.end());
if (zero_point.size() != scales.size()) {
error_reporter->Report(
TF_LITE_REPORT_ERROR(
error_reporter,
"Received zero_point of size %d and scales of size %d. "
"These sizes should match.",
zero_point.size(), scales.size());
@ -501,7 +512,8 @@ TfLiteStatus SymmetricQuantizeTensorPerChannel(ModelT* model, TensorT* tensor,
int32_t channel_dim_index,
ErrorReporter* error_reporter) {
if (tensor->shape.size() != 4) {
error_reporter->Report(
TF_LITE_REPORT_ERROR(
error_reporter,
"SymmetricQuantizeTensorPerChannel requires tensor with four "
"dimensions, but got %d dimension(s).",
tensor->shape.size());

View File

@ -66,7 +66,7 @@ TfLiteStatus QuantizeBias(ModelT* model, const TensorT* input_tensor,
bool is_per_channel, int channel_dim_index,
ErrorReporter* error_reporter) {
if (bias_tensor->shape.size() != 1) {
error_reporter->Report("Expected bias tensor shape to be 1.");
TF_LITE_REPORT_ERROR(error_reporter, "Expected bias tensor shape to be 1.");
return kTfLiteError;
}
@ -76,20 +76,23 @@ TfLiteStatus QuantizeBias(ModelT* model, const TensorT* input_tensor,
if (is_per_channel) {
if (bias_tensor->shape[0] != weight_tensor->shape[channel_dim_index]) {
error_reporter->Report(
TF_LITE_REPORT_ERROR(
error_reporter,
"Channel mismatch between bias and weight tensors %d vs %d",
bias_tensor->shape[0], weight_tensor->shape[channel_dim_index]);
return kTfLiteError;
}
if (!input_tensor->quantization ||
input_tensor->quantization->scale.size() != 1) {
error_reporter->Report("Input tensor missing quantization information");
TF_LITE_REPORT_ERROR(error_reporter,
"Input tensor missing quantization information");
return kTfLiteError;
}
if (weight_scales.size() != channel_dim_size) {
error_reporter->Report("Mismatch weight scale dimension: %d",
weight_scales.size());
TF_LITE_REPORT_ERROR(error_reporter,
"Mismatch weight scale dimension: %d",
weight_scales.size());
return kTfLiteError;
}
return utils::SymmetricPerChannelBiasQuantize(
@ -97,7 +100,8 @@ TfLiteStatus QuantizeBias(ModelT* model, const TensorT* input_tensor,
weight_scales.data(), channel_dim_size, error_reporter);
} else {
if (weight_scales.size() != 1) {
error_reporter->Report(
TF_LITE_REPORT_ERROR(
error_reporter,
"Expected per-layer weight scale dimension size 1, got %d",
weight_scales.size());
return kTfLiteError;
@ -247,7 +251,8 @@ TfLiteStatus SetInputAndOutputTypes(ModelT* model, const TensorType& input_type,
TensorT* tensor = subgraph->tensors[subgraph->inputs[i]].get();
// TODO(suharshs): Add support for this case if it ever comes up.
if (tensor->type == TensorType_FLOAT32 && input_type != tensor->type) {
error_reporter->Report(
TF_LITE_REPORT_ERROR(
error_reporter,
"Unsupported input type %s for input tensor %d of type %s.",
EnumNameTensorType(input_type), subgraph->inputs[i],
EnumNameTensorType(tensor->type));
@ -264,7 +269,8 @@ TfLiteStatus SetInputAndOutputTypes(ModelT* model, const TensorType& input_type,
TensorT* tensor = subgraph->tensors[subgraph->outputs[i]].get();
// TODO(suharshs): Add support for this case if it ever comes up.
if (tensor->type == TensorType_FLOAT32 && output_type != tensor->type) {
error_reporter->Report(
TF_LITE_REPORT_ERROR(
error_reporter,
"Unsupported output type %s for output tensor '%s' of type %s.",
EnumNameTensorType(output_type), tensor->name.c_str(),
EnumNameTensorType(tensor->type));
@ -308,7 +314,8 @@ TfLiteStatus ApplyConstraints(ModelT* model,
// of max, which means using the scale and zero point of output.
TensorT* output_tensor = subgraph->tensors[op->outputs[0]].get();
if (!utils::QuantizationParametersExist(output_tensor)) {
error_reporter->Report(
TF_LITE_REPORT_ERROR(
error_reporter,
"Unable to get scale or zero point from the tensor at %d.",
op->outputs[0]);
return kTfLiteError;
@ -318,7 +325,8 @@ TfLiteStatus ApplyConstraints(ModelT* model,
for (size_t input_idx = 0; input_idx < op->inputs.size(); ++input_idx) {
TensorT* input_tensor = subgraph->tensors[op->inputs[input_idx]].get();
if (!utils::QuantizationParametersExist(input_tensor)) {
error_reporter->Report(
TF_LITE_REPORT_ERROR(
error_reporter,
"Unable to get scale or zero point from tensor at %d.",
op->inputs[input_idx]);
return kTfLiteError;
@ -409,7 +417,8 @@ TfLiteStatus QuantizeOpInput(
const BuiltinOperator op_code =
model->operator_codes[op->opcode_index]->builtin_code;
if (input_idx >= op->inputs.size()) {
error_reporter->Report(
TF_LITE_REPORT_ERROR(
error_reporter,
"Required input index %d is larger than the input length of op "
"%s at index %d in subgraph %d",
input_idx, op->inputs.size(), EnumNameBuiltinOperator(op_code), *op_idx,
@ -437,7 +446,8 @@ TfLiteStatus QuantizeOpInput(
if (utils::QuantizeWeight(model, tensor, tensor_property.per_axis,
tensor_property.per_axis_index,
error_reporter) == kTfLiteError) {
error_reporter->Report(
TF_LITE_REPORT_ERROR(
error_reporter,
"Unable to quantize buffer or min/max value for input %d "
"in op %s in subgraph %d, node: %d",
input_idx, EnumNameBuiltinOperator(op_code), subgraph_idx,
@ -500,7 +510,8 @@ TfLiteStatus QuantizeOpInput(
} else {
// Only 8, 16, 32, 10 are supported.
// TODO(jianlijianli): extend this to support arbitrary bits.
error_reporter->Report(
TF_LITE_REPORT_ERROR(
error_reporter,
"Unable to quantize buffer or min/max value for input %d "
"in op %s in subgraph %d, node: %d",
input_idx, EnumNameBuiltinOperator(op_code), subgraph_idx, *op_idx);
@ -550,7 +561,8 @@ TfLiteStatus QuantizeOpInput(
*op_idx += 1;
}
} else {
error_reporter->Report(
TF_LITE_REPORT_ERROR(
error_reporter,
"Unable to find buffer or min/max value for input activation "
"%d in %s in subgraph %d, node: %d",
input_idx, EnumNameBuiltinOperator(op_code), subgraph_idx, *op_idx);
@ -593,7 +605,8 @@ TfLiteStatus QuantizeOpOutput(
const BuiltinOperator op_code =
model->operator_codes[op->opcode_index]->builtin_code;
if (output_idx >= op->outputs.size()) {
error_reporter->Report(
TF_LITE_REPORT_ERROR(
error_reporter,
"Required output index %d is larger than the output length of "
"op %s at index %d in subgraph %d",
output_idx, op->outputs.size(), EnumNameBuiltinOperator(op_code),
@ -611,7 +624,8 @@ TfLiteStatus QuantizeOpOutput(
// min/max can be different but we want them to be the same.
// Get scale and zero point of input.
if (property.inputs[0].first >= op->inputs.size()) {
error_reporter->Report(
TF_LITE_REPORT_ERROR(
error_reporter,
"Required input index %d is larger than the input length of "
"op %s at index %d in subgraph %d",
property.inputs[0].first, op->inputs.size(),
@ -622,10 +636,11 @@ TfLiteStatus QuantizeOpOutput(
TensorT* input_tensor = subgraph->tensors[input_tensor_idx].get();
if (input_tensor->quantization->scale.size() != 1 ||
input_tensor->quantization->zero_point.size() != 1) {
error_reporter->Report(
"Invalid quantization params for op %s at index %d "
"in subgraph %d",
EnumNameBuiltinOperator(op_code), op_idx, subgraph_idx);
TF_LITE_REPORT_ERROR(error_reporter,
"Invalid quantization params for op %s at index %d "
"in subgraph %d",
EnumNameBuiltinOperator(op_code), op_idx,
subgraph_idx);
return kTfLiteError;
}
@ -657,7 +672,8 @@ TfLiteStatus QuantizeOpOutput(
if (utils::HasMinMax(output_tensor)) {
utils::QuantizeActivation(output_tensor);
} else {
error_reporter->Report(
TF_LITE_REPORT_ERROR(
error_reporter,
"Unable to find min/max value for output %d in %s in "
"subgraph %d, node: %d",
output_idx, EnumNameBuiltinOperator(op_code), subgraph_idx, op_idx);
@ -693,7 +709,8 @@ TfLiteStatus QuantizeIntemediateTensors(ModelT* model,
if (utils::HasMinMax(tensor)) {
utils::QuantizeActivation(tensor);
} else {
error_reporter->Report(
TF_LITE_REPORT_ERROR(
error_reporter,
"Unable to find min/max value for output %d in %s in "
"subgraph %d, node: %d",
tensor, EnumNameBuiltinOperator(op_code), subgraph_idx,
@ -806,8 +823,9 @@ TfLiteStatus QuantizeWeightsInputOutput(
subgraph->tensors[op->outputs[0]]->name);
if (!property.quantizable && !allow_float) {
error_reporter->Report("Quantization not yet supported for op: %s",
EnumNameBuiltinOperator(op_code));
TF_LITE_REPORT_ERROR(error_reporter,
"Quantization not yet supported for op: %s",
EnumNameBuiltinOperator(op_code));
return kTfLiteError;
}
@ -851,7 +869,8 @@ TfLiteStatus QuantizeBiases(ModelT* model,
continue;
}
if (bias_idx >= op->inputs.size()) {
error_reporter->Report(
TF_LITE_REPORT_ERROR(
error_reporter,
"Required input index %d is larger than the input length of "
"op %s at index %d in subgraph %d",
bias_idx, op->inputs.size(), EnumNameBuiltinOperator(op_code),
@ -864,11 +883,12 @@ TfLiteStatus QuantizeBiases(ModelT* model,
if (!utils::QuantizationParametersExist(bias_tensor)) {
if (utils::HasBuffer(model, subgraph, op->inputs[bias_idx])) {
if (property.inputs.size() != 2) {
error_reporter->Report(
"Expect the input length of "
"op %s at index %d in subgraph %d to be 2",
bias_idx, op->inputs.size(), EnumNameBuiltinOperator(op_code),
op_idx, subgraph_idx);
TF_LITE_REPORT_ERROR(error_reporter,
"Expect the input length of "
"op %s at index %d in subgraph %d to be 2",
bias_idx, op->inputs.size(),
EnumNameBuiltinOperator(op_code), op_idx,
subgraph_idx);
return kTfLiteError;
}
TensorT* input_tensor =
@ -951,7 +971,8 @@ TfLiteStatus FillQuantizationParams(
float_input_data, tensor->shape, channel_dim_index,
tensor->quantization.get(), error_reporter));
} else {
error_reporter->Report(
TF_LITE_REPORT_ERROR(
error_reporter,
"Could not fill max min for tensor as the dimension is %d "
"and not 4 as expected.",
tensor->shape.size());
@ -968,7 +989,8 @@ TfLiteStatus FillQuantizationParams(
}
if (tensor->quantization->quantized_dimension !=
input.second.per_axis_index) {
error_reporter->Report(
TF_LITE_REPORT_ERROR(
error_reporter,
"Quantized dimension for tensor property and quantization "
"parameters do not match. Got %d and %d respectively.",
input.second.per_axis_index,
@ -979,14 +1001,15 @@ TfLiteStatus FillQuantizationParams(
// Dynamic tensor.
} else if (!utils::HasMinMax(tensor) &&
!utils::HasBuffer(model, subgraph, tensor_idx)) {
error_reporter->Report(
"Max and min for dynamic tensors should be"
" recorded during calibration");
TF_LITE_REPORT_ERROR(error_reporter,
"Max and min for dynamic tensors should be"
" recorded during calibration");
return kTfLiteError;
}
if (utils::QuantizationParametersExist(tensor)) {
error_reporter->Report(
TF_LITE_REPORT_ERROR(
error_reporter,
"Scale and zero points should not be recorded before "
"quantization.");
return kTfLiteError;
@ -1018,12 +1041,14 @@ TfLiteStatus EnsureBiasScaleCompatibility(
TensorT* bias_tensor = subgraph->tensors[op->inputs[bias_idx]].get();
int32_t channel_dim_size = bias_tensor->shape[0];
if (bias_tensor->shape.size() != 1) {
error_reporter->Report("Expected bias tensor to be a vector.");
TF_LITE_REPORT_ERROR(error_reporter,
"Expected bias tensor to be a vector.");
return kTfLiteError;
}
if (property.inputs.size() != 2) { // Only works for two input tensors.
error_reporter->Report(
TF_LITE_REPORT_ERROR(
error_reporter,
"Expect %d inputs for op %s at index %d in subgraph %d to be 2",
property.inputs.size(), op_idx, subgraph_idx);
return kTfLiteError;
@ -1041,7 +1066,8 @@ TfLiteStatus EnsureBiasScaleCompatibility(
// Check quantization parameters exist for input.
if (!utils::HasMinMax(input_tensor)) {
error_reporter->Report(
TF_LITE_REPORT_ERROR(
error_reporter,
"Input tensor missing quantization information. Should be "
"populated during calibration.");
return kTfLiteError;
@ -1055,14 +1081,16 @@ TfLiteStatus EnsureBiasScaleCompatibility(
std::numeric_limits<int8_t>::min(),
std::numeric_limits<int8_t>::max(), &temp_quant_params);
if (temp_quant_params.scale.size() != 1) {
error_reporter->Report("Unexpected input quantization scale size.");
TF_LITE_REPORT_ERROR(error_reporter,
"Unexpected input quantization scale size.");
return kTfLiteError;
}
float input_scale = temp_quant_params.scale[0];
// Check that max/min values have been filled for weights.
if (!utils::HasMinMax(weight_tensor)) {
error_reporter->Report(
TF_LITE_REPORT_ERROR(
error_reporter,
"Min and/or max values have not been recorded for weight "
"tensor. This should have happened in FillQuantizationParams.");
return kTfLiteError;
@ -1072,7 +1100,8 @@ TfLiteStatus EnsureBiasScaleCompatibility(
if (weight_property.per_axis) {
if (bias_tensor->shape[0] !=
weight_tensor->shape[weight_property.per_axis_index]) {
error_reporter->Report(
TF_LITE_REPORT_ERROR(
error_reporter,
"Channel mismatch between bias and weight tensors %d vs %d",
bias_tensor->shape[0],
weight_tensor->shape[weight_property.per_axis_index]);
@ -1080,14 +1109,16 @@ TfLiteStatus EnsureBiasScaleCompatibility(
}
// Ensure that the number of max/mins matches the channel_dim_size.
if (weight_tensor->quantization->max.size() != channel_dim_size) {
error_reporter->Report(
TF_LITE_REPORT_ERROR(
error_reporter,
"Mismatch between number of weight maxs and channels: %d vs "
"%d",
weight_tensor->quantization->max.size(), channel_dim_size);
return kTfLiteError;
}
if (weight_tensor->quantization->min.size() != channel_dim_size) {
error_reporter->Report(
TF_LITE_REPORT_ERROR(
error_reporter,
"Mismatch between number of weight mins and channels: %d",
weight_tensor->quantization->min.size());
return kTfLiteError;
@ -1107,13 +1138,15 @@ TfLiteStatus EnsureBiasScaleCompatibility(
input_scale, error_reporter));
if (utils::QuantizationParametersExist(weight_tensor)) {
error_reporter->Report(
TF_LITE_REPORT_ERROR(
error_reporter,
"Scale and zero points should not be recorded for the weight "
"tensor before quantization.");
return kTfLiteError;
}
if (utils::QuantizationParametersExist(input_tensor)) {
error_reporter->Report(
TF_LITE_REPORT_ERROR(
error_reporter,
"Scale and zero points should not be recorded for the input "
"tensor before quantization.");
return kTfLiteError;

View File

@ -32,7 +32,7 @@ void ReportError(ErrorReporter* error_reporter, const char* format, ...) {
if (error_reporter) {
va_list args;
va_start(args, format);
error_reporter->Report(format, args);
TF_LITE_REPORT_ERROR(error_reporter, format, args);
va_end(args);
}
}