From e01ad771da72a9f1aa328b0edd928573f08a237b Mon Sep 17 00:00:00 2001 From: "A. Unique TensorFlower" Date: Mon, 6 Aug 2018 08:44:13 -0700 Subject: [PATCH] Add support for interpreting tflite flatbuffers in micro builds. PiperOrigin-RevId: 207550881 --- tensorflow/contrib/lite/Makefile | 8 ++++++-- tensorflow/contrib/lite/allocation.cc | 2 ++ tensorflow/contrib/lite/model.cc | 4 ++++ 3 files changed, 12 insertions(+), 2 deletions(-) diff --git a/tensorflow/contrib/lite/Makefile b/tensorflow/contrib/lite/Makefile index 92635ff430e..9cc8f10b429 100644 --- a/tensorflow/contrib/lite/Makefile +++ b/tensorflow/contrib/lite/Makefile @@ -95,6 +95,7 @@ ARFLAGS := -r INCLUDES := \ -I. \ -I$(MAKEFILE_DIR)/../../../ \ +-I$(MAKEFILE_DIR)/../../../../ \ -I$(MAKEFILE_DIR)/downloads/ \ -I$(MAKEFILE_DIR)/downloads/eigen \ -I$(MAKEFILE_DIR)/downloads/gemmlowp \ @@ -177,7 +178,6 @@ $(MINIMAL_SRCS) ifeq ($(BUILD_TYPE),micro) CORE_CC_EXCLUDE_SRCS += \ tensorflow/contrib/lite/mmap_allocation.cc \ -tensorflow/contrib/lite/model.cc \ tensorflow/contrib/lite/nnapi_delegate.cc else CORE_CC_EXCLUDE_SRCS += \ @@ -219,8 +219,12 @@ all: $(LIB_PATH) $(MINIMAL_PATH) $(BENCHMARK_BINARY) # The target that's compiled for micro-controllers micro: $(LIB_PATH) +# Hack for generating schema file bypassing flatbuffer parsing +tensorflow/contrib/lite/schema/schema_generated.h: + @cp -u tensorflow/contrib/lite/schema/schema_generated.h.OPENSOURCE tensorflow/contrib/lite/schema/schema_generated.h + # Gathers together all the objects we've compiled into a single '.a' archive. -$(LIB_PATH): $(LIB_OBJS) +$(LIB_PATH): tensorflow/contrib/lite/schema/schema_generated.h $(LIB_OBJS) @mkdir -p $(dir $@) $(AR) $(ARFLAGS) $(LIB_PATH) $(LIB_OBJS) diff --git a/tensorflow/contrib/lite/allocation.cc b/tensorflow/contrib/lite/allocation.cc index 6d834f71c02..89462618148 100644 --- a/tensorflow/contrib/lite/allocation.cc +++ b/tensorflow/contrib/lite/allocation.cc @@ -28,6 +28,7 @@ limitations under the License. namespace tflite { +#ifndef TFLITE_MCU FileCopyAllocation::FileCopyAllocation(const char* filename, ErrorReporter* error_reporter) : Allocation(error_reporter) { @@ -71,6 +72,7 @@ const void* FileCopyAllocation::base() const { return copied_buffer_.get(); } size_t FileCopyAllocation::bytes() const { return buffer_size_bytes_; } bool FileCopyAllocation::valid() const { return copied_buffer_ != nullptr; } +#endif MemoryAllocation::MemoryAllocation(const void* ptr, size_t num_bytes, ErrorReporter* error_reporter) diff --git a/tensorflow/contrib/lite/model.cc b/tensorflow/contrib/lite/model.cc index e50b262ab81..9edf5ba38f4 100644 --- a/tensorflow/contrib/lite/model.cc +++ b/tensorflow/contrib/lite/model.cc @@ -23,7 +23,9 @@ limitations under the License. #include "tensorflow/contrib/lite/builtin_op_data.h" #include "tensorflow/contrib/lite/error_reporter.h" #include "tensorflow/contrib/lite/model.h" +#ifndef TFLITE_MCU #include "tensorflow/contrib/lite/nnapi_delegate.h" +#endif #include "tensorflow/contrib/lite/version.h" namespace tflite { @@ -72,6 +74,7 @@ TfLiteStatus ConvertTensorType(TensorType tensor_type, TfLiteType* type, return kTfLiteOk; } +#ifndef TFLITE_MCU // Loads a model from `filename`. If `mmap_file` is true then use mmap, // otherwise make a copy of the model in a buffer. std::unique_ptr GetAllocationFromFile(const char* filename, @@ -119,6 +122,7 @@ std::unique_ptr FlatBufferModel::VerifyAndBuildFromFile( if (!model->initialized()) model.reset(); return model; } +#endif std::unique_ptr FlatBufferModel::BuildFromBuffer( const char* buffer, size_t buffer_size, ErrorReporter* error_reporter) {