Add support for interpreting tflite flatbuffers in micro builds.

PiperOrigin-RevId: 207550881
This commit is contained in:
A. Unique TensorFlower 2018-08-06 08:44:13 -07:00 committed by TensorFlower Gardener
parent 7304afd4f2
commit e01ad771da
3 changed files with 12 additions and 2 deletions

View File

@ -95,6 +95,7 @@ ARFLAGS := -r
INCLUDES := \ INCLUDES := \
-I. \ -I. \
-I$(MAKEFILE_DIR)/../../../ \ -I$(MAKEFILE_DIR)/../../../ \
-I$(MAKEFILE_DIR)/../../../../ \
-I$(MAKEFILE_DIR)/downloads/ \ -I$(MAKEFILE_DIR)/downloads/ \
-I$(MAKEFILE_DIR)/downloads/eigen \ -I$(MAKEFILE_DIR)/downloads/eigen \
-I$(MAKEFILE_DIR)/downloads/gemmlowp \ -I$(MAKEFILE_DIR)/downloads/gemmlowp \
@ -177,7 +178,6 @@ $(MINIMAL_SRCS)
ifeq ($(BUILD_TYPE),micro) ifeq ($(BUILD_TYPE),micro)
CORE_CC_EXCLUDE_SRCS += \ CORE_CC_EXCLUDE_SRCS += \
tensorflow/contrib/lite/mmap_allocation.cc \ tensorflow/contrib/lite/mmap_allocation.cc \
tensorflow/contrib/lite/model.cc \
tensorflow/contrib/lite/nnapi_delegate.cc tensorflow/contrib/lite/nnapi_delegate.cc
else else
CORE_CC_EXCLUDE_SRCS += \ CORE_CC_EXCLUDE_SRCS += \
@ -219,8 +219,12 @@ all: $(LIB_PATH) $(MINIMAL_PATH) $(BENCHMARK_BINARY)
# The target that's compiled for micro-controllers # The target that's compiled for micro-controllers
micro: $(LIB_PATH) micro: $(LIB_PATH)
# Hack for generating schema file bypassing flatbuffer parsing
tensorflow/contrib/lite/schema/schema_generated.h:
@cp -u tensorflow/contrib/lite/schema/schema_generated.h.OPENSOURCE tensorflow/contrib/lite/schema/schema_generated.h
# Gathers together all the objects we've compiled into a single '.a' archive. # Gathers together all the objects we've compiled into a single '.a' archive.
$(LIB_PATH): $(LIB_OBJS) $(LIB_PATH): tensorflow/contrib/lite/schema/schema_generated.h $(LIB_OBJS)
@mkdir -p $(dir $@) @mkdir -p $(dir $@)
$(AR) $(ARFLAGS) $(LIB_PATH) $(LIB_OBJS) $(AR) $(ARFLAGS) $(LIB_PATH) $(LIB_OBJS)

View File

@ -28,6 +28,7 @@ limitations under the License.
namespace tflite { namespace tflite {
#ifndef TFLITE_MCU
FileCopyAllocation::FileCopyAllocation(const char* filename, FileCopyAllocation::FileCopyAllocation(const char* filename,
ErrorReporter* error_reporter) ErrorReporter* error_reporter)
: Allocation(error_reporter) { : Allocation(error_reporter) {
@ -71,6 +72,7 @@ const void* FileCopyAllocation::base() const { return copied_buffer_.get(); }
size_t FileCopyAllocation::bytes() const { return buffer_size_bytes_; } size_t FileCopyAllocation::bytes() const { return buffer_size_bytes_; }
bool FileCopyAllocation::valid() const { return copied_buffer_ != nullptr; } bool FileCopyAllocation::valid() const { return copied_buffer_ != nullptr; }
#endif
MemoryAllocation::MemoryAllocation(const void* ptr, size_t num_bytes, MemoryAllocation::MemoryAllocation(const void* ptr, size_t num_bytes,
ErrorReporter* error_reporter) ErrorReporter* error_reporter)

View File

@ -23,7 +23,9 @@ limitations under the License.
#include "tensorflow/contrib/lite/builtin_op_data.h" #include "tensorflow/contrib/lite/builtin_op_data.h"
#include "tensorflow/contrib/lite/error_reporter.h" #include "tensorflow/contrib/lite/error_reporter.h"
#include "tensorflow/contrib/lite/model.h" #include "tensorflow/contrib/lite/model.h"
#ifndef TFLITE_MCU
#include "tensorflow/contrib/lite/nnapi_delegate.h" #include "tensorflow/contrib/lite/nnapi_delegate.h"
#endif
#include "tensorflow/contrib/lite/version.h" #include "tensorflow/contrib/lite/version.h"
namespace tflite { namespace tflite {
@ -72,6 +74,7 @@ TfLiteStatus ConvertTensorType(TensorType tensor_type, TfLiteType* type,
return kTfLiteOk; return kTfLiteOk;
} }
#ifndef TFLITE_MCU
// Loads a model from `filename`. If `mmap_file` is true then use mmap, // Loads a model from `filename`. If `mmap_file` is true then use mmap,
// otherwise make a copy of the model in a buffer. // otherwise make a copy of the model in a buffer.
std::unique_ptr<Allocation> GetAllocationFromFile(const char* filename, std::unique_ptr<Allocation> GetAllocationFromFile(const char* filename,
@ -119,6 +122,7 @@ std::unique_ptr<FlatBufferModel> FlatBufferModel::VerifyAndBuildFromFile(
if (!model->initialized()) model.reset(); if (!model->initialized()) model.reset();
return model; return model;
} }
#endif
std::unique_ptr<FlatBufferModel> FlatBufferModel::BuildFromBuffer( std::unique_ptr<FlatBufferModel> FlatBufferModel::BuildFromBuffer(
const char* buffer, size_t buffer_size, ErrorReporter* error_reporter) { const char* buffer, size_t buffer_size, ErrorReporter* error_reporter) {