diff --git a/tensorflow/lite/micro/memory_helpers.cc b/tensorflow/lite/micro/memory_helpers.cc index d1e0392a3bc..c6180cb4951 100644 --- a/tensorflow/lite/micro/memory_helpers.cc +++ b/tensorflow/lite/micro/memory_helpers.cc @@ -131,7 +131,7 @@ TfLiteStatus AllocateOutputDimensionsFromInput(TfLiteContext* context, input = input1->dims->size > input2->dims->size ? input1 : input2; TF_LITE_ENSURE(context, output->type == input->type); - size_t size; + size_t size = 0; TfLiteTypeSizeOf(input->type, &size); const int dimensions_count = tflite::GetTensorShape(input).DimensionsCount(); for (int i = 0; i < dimensions_count; i++) { diff --git a/tensorflow/lite/micro/micro_allocator.cc b/tensorflow/lite/micro/micro_allocator.cc index 76cd617fe99..39358b33b16 100644 --- a/tensorflow/lite/micro/micro_allocator.cc +++ b/tensorflow/lite/micro/micro_allocator.cc @@ -97,7 +97,9 @@ TfLiteStatus CheckOfflinePlannedOffsets(const Model* model, int version = metadata_buffer[0]; int subgraph_idx = metadata_buffer[1]; const int nbr_offline_offsets = metadata_buffer[2]; +#ifndef TF_LITE_STRIP_ERROR_STRINGS int* offline_planner_offsets = (int*)&metadata_buffer[3]; +#endif TF_LITE_REPORT_ERROR(error_reporter, "==== Model metadata info: ====="); TF_LITE_REPORT_ERROR(error_reporter, diff --git a/tensorflow/lite/micro/micro_interpreter.cc b/tensorflow/lite/micro/micro_interpreter.cc index 41efe9ecf51..f9eb263f552 100644 --- a/tensorflow/lite/micro/micro_interpreter.cc +++ b/tensorflow/lite/micro/micro_interpreter.cc @@ -68,11 +68,13 @@ void* ContextHelper::GetScratchBuffer(TfLiteContext* ctx, int buffer_idx) { void ContextHelper::ReportOpError(struct TfLiteContext* context, const char* format, ...) { +#ifndef TF_LITE_STRIP_ERROR_STRINGS ContextHelper* helper = static_cast(context->impl_); va_list args; va_start(args, format); TF_LITE_REPORT_ERROR(helper->error_reporter_, format, args); va_end(args); +#endif } TfLiteTensor* ContextHelper::GetTensor(const struct TfLiteContext* context, diff --git a/tensorflow/lite/micro/micro_optional_debug_tools.cc b/tensorflow/lite/micro/micro_optional_debug_tools.cc index 516def3ebe4..4617b3d9825 100644 --- a/tensorflow/lite/micro/micro_optional_debug_tools.cc +++ b/tensorflow/lite/micro/micro_optional_debug_tools.cc @@ -117,6 +117,7 @@ const char* AllocTypeName(TfLiteAllocationType type) { // Helper function to print model flatbuffer data. This function is not called // by default. Hence it's not linked in to the final binary code. void PrintModelData(const Model* model, ErrorReporter* error_reporter) { +#ifndef TF_LITE_STRIP_ERROR_STRINGS auto* subgraphs = model->subgraphs(); const SubGraph* subgraph = (*subgraphs)[0]; const flatbuffers::Vector>* tensors = @@ -139,6 +140,7 @@ void PrintModelData(const Model* model, ErrorReporter* error_reporter) { error_reporter, "Tensor index: %d arena tensor %d size %d ", i, !array_size && !flatbuffer_tensor.is_variable(), tensor_size); } +#endif } // Prints a dump of what tensors and what nodes are in the interpreter. diff --git a/tensorflow/lite/micro/micro_profiler.cc b/tensorflow/lite/micro/micro_profiler.cc index a765b918108..83fb9f64713 100644 --- a/tensorflow/lite/micro/micro_profiler.cc +++ b/tensorflow/lite/micro/micro_profiler.cc @@ -33,9 +33,10 @@ uint32_t MicroProfiler::BeginEvent(const char* tag, EventType event_type, } void MicroProfiler::EndEvent(uint32_t event_handle) { +#ifndef TF_LITE_STRIP_ERROR_STRINGS int32_t end_time = GetCurrentTimeTicks(); TF_LITE_REPORT_ERROR(reporter_, "%s took %d cycles\n", event_tag_, end_time - start_time_); +#endif } - } // namespace tflite diff --git a/tensorflow/lite/micro/recording_micro_allocator.cc b/tensorflow/lite/micro/recording_micro_allocator.cc index 5e338a339ca..7e11523fea0 100644 --- a/tensorflow/lite/micro/recording_micro_allocator.cc +++ b/tensorflow/lite/micro/recording_micro_allocator.cc @@ -104,6 +104,7 @@ void RecordingMicroAllocator::PrintAllocations() const { void RecordingMicroAllocator::PrintRecordedAllocation( RecordedAllocationType allocation_type, const char* allocation_name, const char* allocation_description) const { +#ifndef TF_LITE_STRIP_ERROR_STRINGS RecordedAllocation allocation = GetRecordedAllocation(allocation_type); TF_LITE_REPORT_ERROR( error_reporter(), @@ -111,6 +112,7 @@ void RecordingMicroAllocator::PrintRecordedAllocation( "(requested %d bytes for %d %s)", allocation_name, allocation.used_bytes, allocation.requested_bytes, allocation.count, allocation_description); +#endif } TfLiteStatus RecordingMicroAllocator::AllocateNodeAndRegistrations( diff --git a/tensorflow/lite/micro/simple_memory_allocator.cc b/tensorflow/lite/micro/simple_memory_allocator.cc index 3abec015fe3..48cfdc02a34 100644 --- a/tensorflow/lite/micro/simple_memory_allocator.cc +++ b/tensorflow/lite/micro/simple_memory_allocator.cc @@ -78,11 +78,13 @@ uint8_t* SimpleMemoryAllocator::AllocateFromTail(size_t size, size_t alignment) { uint8_t* const aligned_result = AlignPointerDown(tail_ - size, alignment); if (aligned_result < head_) { +#ifndef TF_LITE_STRIP_ERROR_STRINGS const size_t missing_memory = head_ - aligned_result; TF_LITE_REPORT_ERROR( error_reporter_, "Failed to allocate memory. Requested: %u, available %u, missing: %u", size, size - missing_memory, missing_memory); +#endif return nullptr; } tail_ = aligned_result; diff --git a/tensorflow/lite/micro/test_helpers.cc b/tensorflow/lite/micro/test_helpers.cc index 2888a846e94..23c7ca96408 100644 --- a/tensorflow/lite/micro/test_helpers.cc +++ b/tensorflow/lite/micro/test_helpers.cc @@ -812,11 +812,13 @@ int TestStrcmp(const char* a, const char* b) { // Wrapper to forward kernel errors to the interpreter's error reporter. void ReportOpError(struct TfLiteContext* context, const char* format, ...) { +#ifndef TF_LITE_STRIP_ERROR_STRINGS ErrorReporter* error_reporter = static_cast(context->impl_); va_list args; va_start(args, format); TF_LITE_REPORT_ERROR(error_reporter, format, args); va_end(args); +#endif } // Create a TfLiteIntArray from an array of ints. The first element in the