Add support for interpreting tflite flatbuffers in micro builds.
PiperOrigin-RevId: 207550881
This commit is contained in:
parent
7304afd4f2
commit
e01ad771da
@ -95,6 +95,7 @@ ARFLAGS := -r
|
||||
INCLUDES := \
|
||||
-I. \
|
||||
-I$(MAKEFILE_DIR)/../../../ \
|
||||
-I$(MAKEFILE_DIR)/../../../../ \
|
||||
-I$(MAKEFILE_DIR)/downloads/ \
|
||||
-I$(MAKEFILE_DIR)/downloads/eigen \
|
||||
-I$(MAKEFILE_DIR)/downloads/gemmlowp \
|
||||
@ -177,7 +178,6 @@ $(MINIMAL_SRCS)
|
||||
ifeq ($(BUILD_TYPE),micro)
|
||||
CORE_CC_EXCLUDE_SRCS += \
|
||||
tensorflow/contrib/lite/mmap_allocation.cc \
|
||||
tensorflow/contrib/lite/model.cc \
|
||||
tensorflow/contrib/lite/nnapi_delegate.cc
|
||||
else
|
||||
CORE_CC_EXCLUDE_SRCS += \
|
||||
@ -219,8 +219,12 @@ all: $(LIB_PATH) $(MINIMAL_PATH) $(BENCHMARK_BINARY)
|
||||
# The target that's compiled for micro-controllers
|
||||
micro: $(LIB_PATH)
|
||||
|
||||
# Hack for generating schema file bypassing flatbuffer parsing
|
||||
tensorflow/contrib/lite/schema/schema_generated.h:
|
||||
@cp -u tensorflow/contrib/lite/schema/schema_generated.h.OPENSOURCE tensorflow/contrib/lite/schema/schema_generated.h
|
||||
|
||||
# Gathers together all the objects we've compiled into a single '.a' archive.
|
||||
$(LIB_PATH): $(LIB_OBJS)
|
||||
$(LIB_PATH): tensorflow/contrib/lite/schema/schema_generated.h $(LIB_OBJS)
|
||||
@mkdir -p $(dir $@)
|
||||
$(AR) $(ARFLAGS) $(LIB_PATH) $(LIB_OBJS)
|
||||
|
||||
|
@ -28,6 +28,7 @@ limitations under the License.
|
||||
|
||||
namespace tflite {
|
||||
|
||||
#ifndef TFLITE_MCU
|
||||
FileCopyAllocation::FileCopyAllocation(const char* filename,
|
||||
ErrorReporter* error_reporter)
|
||||
: Allocation(error_reporter) {
|
||||
@ -71,6 +72,7 @@ const void* FileCopyAllocation::base() const { return copied_buffer_.get(); }
|
||||
size_t FileCopyAllocation::bytes() const { return buffer_size_bytes_; }
|
||||
|
||||
bool FileCopyAllocation::valid() const { return copied_buffer_ != nullptr; }
|
||||
#endif
|
||||
|
||||
MemoryAllocation::MemoryAllocation(const void* ptr, size_t num_bytes,
|
||||
ErrorReporter* error_reporter)
|
||||
|
@ -23,7 +23,9 @@ limitations under the License.
|
||||
#include "tensorflow/contrib/lite/builtin_op_data.h"
|
||||
#include "tensorflow/contrib/lite/error_reporter.h"
|
||||
#include "tensorflow/contrib/lite/model.h"
|
||||
#ifndef TFLITE_MCU
|
||||
#include "tensorflow/contrib/lite/nnapi_delegate.h"
|
||||
#endif
|
||||
#include "tensorflow/contrib/lite/version.h"
|
||||
|
||||
namespace tflite {
|
||||
@ -72,6 +74,7 @@ TfLiteStatus ConvertTensorType(TensorType tensor_type, TfLiteType* type,
|
||||
return kTfLiteOk;
|
||||
}
|
||||
|
||||
#ifndef TFLITE_MCU
|
||||
// Loads a model from `filename`. If `mmap_file` is true then use mmap,
|
||||
// otherwise make a copy of the model in a buffer.
|
||||
std::unique_ptr<Allocation> GetAllocationFromFile(const char* filename,
|
||||
@ -119,6 +122,7 @@ std::unique_ptr<FlatBufferModel> FlatBufferModel::VerifyAndBuildFromFile(
|
||||
if (!model->initialized()) model.reset();
|
||||
return model;
|
||||
}
|
||||
#endif
|
||||
|
||||
std::unique_ptr<FlatBufferModel> FlatBufferModel::BuildFromBuffer(
|
||||
const char* buffer, size_t buffer_size, ErrorReporter* error_reporter) {
|
||||
|
Loading…
Reference in New Issue
Block a user