Add flag that allows TFLite Micro Interpreter to re-use data in the prepare block of shared/forked operators.

PiperOrigin-RevId: 290785973
Change-Id: I41fa20dba09c387950404b1358202abec99ac7a1
This commit is contained in:
Nick Kreeger 2020-01-21 11:27:57 -08:00 committed by TensorFlower Gardener
parent 39f2beb018
commit 7076e27f8e
3 changed files with 21 additions and 12 deletions

View File

@ -52,7 +52,8 @@ MicroInterpreter::MicroInterpreter(const Model* model,
error_reporter_(error_reporter), error_reporter_(error_reporter),
allocator_(&context_, model_, tensor_arena, tensor_arena_size, allocator_(&context_, model_, tensor_arena, tensor_arena_size,
error_reporter_), error_reporter_),
tensors_allocated_(false) { tensors_allocated_(false),
tensors_prepared_(false) {
const flatbuffers::Vector<flatbuffers::Offset<SubGraph>>* subgraphs = const flatbuffers::Vector<flatbuffers::Offset<SubGraph>>* subgraphs =
model->subgraphs(); model->subgraphs();
if (subgraphs->size() != 1) { if (subgraphs->size() != 1) {
@ -155,24 +156,30 @@ TfLiteStatus MicroInterpreter::Invoke() {
init_data = reinterpret_cast<const char*>(node->builtin_data); init_data = reinterpret_cast<const char*>(node->builtin_data);
init_data_size = 0; init_data_size = 0;
} }
if (registration->init) { if (!tensors_prepared_ && registration->init) {
node->user_data = node->user_data =
registration->init(&context_, init_data, init_data_size); registration->init(&context_, init_data, init_data_size);
} }
} }
for (size_t i = 0; i < operators_->size(); ++i) { if (!tensors_prepared_) {
auto* node = &(node_and_registrations_[i].node); for (size_t i = 0; i < operators_->size(); ++i) {
auto* registration = node_and_registrations_[i].registration; auto* node = &(node_and_registrations_[i].node);
if (registration->prepare) { auto* registration = node_and_registrations_[i].registration;
TfLiteStatus prepare_status = registration->prepare(&context_, node); if (registration->prepare) {
if (prepare_status != kTfLiteOk) { TfLiteStatus prepare_status = registration->prepare(&context_, node);
error_reporter_->Report( if (prepare_status != kTfLiteOk) {
"Node %s (number %d) failed to prepare with status %d", error_reporter_->Report(
OpNameFromRegistration(registration), i, prepare_status); "Node %s (number %d) failed to prepare with status %d",
return kTfLiteError; OpNameFromRegistration(registration), i, prepare_status);
return kTfLiteError;
}
} }
} }
#ifdef TF_LITE_MICRO_TENSORS_PREPARED
// TODO(b/148085107): Turn this value on by default.
tensors_prepared_ = true;
#endif
} }
for (size_t i = 0; i < operators_->size(); ++i) { for (size_t i = 0; i < operators_->size(); ++i) {

View File

@ -117,6 +117,7 @@ class MicroInterpreter {
TfLiteContext context_ = {}; TfLiteContext context_ = {};
MicroAllocator allocator_; MicroAllocator allocator_;
bool tensors_allocated_; bool tensors_allocated_;
bool tensors_prepared_;
TfLiteStatus initialization_status_; TfLiteStatus initialization_status_;
const flatbuffers::Vector<flatbuffers::Offset<Tensor>>* tensors_; const flatbuffers::Vector<flatbuffers::Offset<Tensor>>* tensors_;

View File

@ -7,6 +7,7 @@ ifeq ($(TARGET), xtensa-xpg)
TARGET_ARCH := xtensa-xpg TARGET_ARCH := xtensa-xpg
PLATFORM_ARGS = \ PLATFORM_ARGS = \
-DTF_LITE_MICRO_TENSORS_PREPARED \
-DTF_LITE_STATIC_MEMORY \ -DTF_LITE_STATIC_MEMORY \
-DNDEBUG \ -DNDEBUG \
-DTF_LITE_MCU_DEBUG_LOG \ -DTF_LITE_MCU_DEBUG_LOG \