From 32e823339b297af8fd778fdda7483121116f69b0 Mon Sep 17 00:00:00 2001 From: "A. Unique TensorFlower" Date: Fri, 6 Dec 2019 10:46:43 -0800 Subject: [PATCH] Fix minor warnings. When compiling with -Wall/-Werror, several warnings related to signed/unsigned comparison and an incorrect format string kill the build. Additionally, when compiling under GCC 4.8.x, `max_align_t` is not a member of `std`. This change fixes these minor errors. PiperOrigin-RevId: 284216526 Change-Id: I05b6e76a626dc01da9400772ba0fbda2bb6b5b63 --- .../lite/experimental/micro/micro_allocator.cc | 17 +++++++++++++++-- .../experimental/micro/micro_interpreter.cc | 4 ++-- .../micro/micro_optional_debug_tools.cc | 9 ++++++++- .../lite/experimental/micro/test_helpers.cc | 2 +- 4 files changed, 26 insertions(+), 6 deletions(-) diff --git a/tensorflow/lite/experimental/micro/micro_allocator.cc b/tensorflow/lite/experimental/micro/micro_allocator.cc index 82b3b350c23..73c2bda1d20 100644 --- a/tensorflow/lite/experimental/micro/micro_allocator.cc +++ b/tensorflow/lite/experimental/micro/micro_allocator.cc @@ -42,6 +42,19 @@ struct TensorInfo { // requirement for SIMD extensions. constexpr int kBufferAlignment = 16; +// If building with GNU clib from GCC 4.8.x or lower, `max_align_t` is not a +// member of `std`. If using a newer version of clib, we import `max_align_t` +// into the local anonymous namespace to be able to use it like the global +// `max_align_t` from the older clib. +#if defined(__GNUC__) && defined(__GNUC_PREREQ) +#if __GNUC_PREREQ(4, 9) +using std::max_align_t; +#endif +#else +// We assume other compiler/clib configurations don't have this issue. +using std::max_align_t; +#endif + class MicroBuiltinDataAllocator : public BuiltinDataAllocator { public: explicit MicroBuiltinDataAllocator(SimpleMemoryAllocator* memory_allocator) @@ -51,7 +64,7 @@ class MicroBuiltinDataAllocator : public BuiltinDataAllocator { // Align to an address that is proper for all primitive types, but no more // than the size. return memory_allocator_->AllocateFromTail( - size, std::min(size, alignof(std::max_align_t))); + size, std::min(size, alignof(max_align_t))); } void Deallocate(void* data) override { // Do not deallocate, builtin data needs to be available for the life time @@ -412,7 +425,7 @@ TfLiteStatus MicroAllocator::InitializeRuntimeTensor( // If we've found a buffer, does it have any data? if (auto* array = buffer->data()) { // If it has any data, is the data size larger than zero? - if (size_t array_size = array->size()) { + if (array->size()) { // We've found a buffer with valid data, so update the runtime tensor // data structure to point to it. result->data.raw = diff --git a/tensorflow/lite/experimental/micro/micro_interpreter.cc b/tensorflow/lite/experimental/micro/micro_interpreter.cc index ba46cbfd95a..7185d643514 100644 --- a/tensorflow/lite/experimental/micro/micro_interpreter.cc +++ b/tensorflow/lite/experimental/micro/micro_interpreter.cc @@ -21,7 +21,7 @@ limitations under the License. namespace tflite { namespace { -const int kStackDataAllocatorSize = 128; +const size_t kStackDataAllocatorSize = 128; class StackDataAllocator : public BuiltinDataAllocator { public: void* Allocate(size_t size) override { @@ -91,7 +91,7 @@ MicroInterpreter::MicroInterpreter(const Model* model, // NOTE: This requires that the flatbuffer is held in memory which can be // modified by this process. if (!FLATBUFFERS_LITTLEENDIAN) { - for (int t = 0; t < tensors_size(); ++t) { + for (size_t t = 0; t < tensors_size(); ++t) { TfLiteTensor* thisTensor = &context_.tensors[t]; if (thisTensor->allocation_type == kTfLiteMmapRo) CorrectTensorEndianness(thisTensor); diff --git a/tensorflow/lite/experimental/micro/micro_optional_debug_tools.cc b/tensorflow/lite/experimental/micro/micro_optional_debug_tools.cc index e27317a5443..1f6ce531f05 100644 --- a/tensorflow/lite/experimental/micro/micro_optional_debug_tools.cc +++ b/tensorflow/lite/experimental/micro/micro_optional_debug_tools.cc @@ -14,6 +14,13 @@ limitations under the License. ==============================================================================*/ #include "tensorflow/lite/experimental/micro/micro_optional_debug_tools.h" +// `cinttypes` requires `__STDC_FORMAT_MACROS` to be defined to expose `PRId32`. +#ifndef __STDC_FORMAT_MACROS +#define __STDC_FORMAT_MACROS +#endif + +#include + #include "tensorflow/lite/schema/schema_generated.h" namespace tflite { namespace { @@ -122,7 +129,7 @@ void PrintInterpreterState(MicroInterpreter* interpreter) { printf("Node %3zu Operator Custom Name %s\n", node_index, reg->custom_name); } else { - printf("Node %3zu Operator Builtin Code %3d %s\n", node_index, + printf("Node %3zu Operator Builtin Code %3" PRId32 " %s\n", node_index, reg->builtin_code, EnumNamesBuiltinOperator()[reg->builtin_code]); } printf(" Inputs:"); diff --git a/tensorflow/lite/experimental/micro/test_helpers.cc b/tensorflow/lite/experimental/micro/test_helpers.cc index 03e1d91fce0..a1b9801ffc9 100644 --- a/tensorflow/lite/experimental/micro/test_helpers.cc +++ b/tensorflow/lite/experimental/micro/test_helpers.cc @@ -47,7 +47,7 @@ class StackAllocator : public flatbuffers::Allocator { return *inst; } - static constexpr int kStackAllocatorSize = 4096; + static constexpr size_t kStackAllocatorSize = 4096; private: uint8_t data_backing_[kStackAllocatorSize];