Fix review comments, 24/4
This commit is contained in:
parent
4703dd2ae4
commit
e9b965a29c
tensorflow/lite/micro
examples/person_detection
memory_planner
micro_allocator.ccmicro_optional_debug_tools.ccmicro_optional_debug_tools.h@ -22,6 +22,7 @@ limitations under the License.
|
||||
#include "tensorflow/lite/micro/micro_error_reporter.h"
|
||||
#include "tensorflow/lite/micro/micro_interpreter.h"
|
||||
#include "tensorflow/lite/micro/micro_mutable_op_resolver.h"
|
||||
#include "tensorflow/lite/micro/micro_optional_debug_tools.h"
|
||||
#include "tensorflow/lite/micro/testing/micro_test.h"
|
||||
#include "tensorflow/lite/schema/schema_generated.h"
|
||||
#include "tensorflow/lite/version.h"
|
||||
@ -46,6 +47,7 @@ TF_LITE_MICRO_TEST(TestInvoke) {
|
||||
"to supported version %d.\n",
|
||||
model->version(), TFLITE_SCHEMA_VERSION);
|
||||
}
|
||||
PrintModelData(model, error_reporter);
|
||||
|
||||
// Pull in only the operation implementations we need.
|
||||
// This relies on a complete list of all the ops needed by this graph.
|
||||
|
@ -144,8 +144,10 @@ class GreedyMemoryPlanner : public MemoryPlanner {
|
||||
int* buffer_sizes_sorted_;
|
||||
int* buffer_ids_sorted_;
|
||||
ListEntry* buffers_sorted_by_offset_;
|
||||
int next_free_entry_;
|
||||
int first_entry_index_;
|
||||
int next_free_entry_; // Index of the next free entry of
|
||||
// buffers_sorted_by_offset_
|
||||
int first_entry_index_; // Index of the first entry (smallest offset) of
|
||||
// buffers_sorted_by_offset_
|
||||
|
||||
// Stores the outcome of the plan, the location of each buffer in the arena.
|
||||
int* buffer_offsets_;
|
||||
|
@ -45,6 +45,8 @@ struct AllocationInfo {
|
||||
// requirement for SIMD extensions.
|
||||
constexpr int kBufferAlignment = 16;
|
||||
|
||||
constexpr char kOfflineMemAllocMetadata[] = "OfflineMemoryAllocation";
|
||||
|
||||
class MicroBuiltinDataAllocator : public BuiltinDataAllocator {
|
||||
public:
|
||||
explicit MicroBuiltinDataAllocator(SimpleMemoryAllocator* memory_allocator)
|
||||
@ -81,33 +83,6 @@ TfLiteStatus AllocateVariables(
|
||||
return kTfLiteOk;
|
||||
}
|
||||
|
||||
// Helper function to print model flatbuffer data. This function is not called
|
||||
// by default. Hence it's not linked in to the final binary code.
|
||||
void PrintModelData(const Model* model, ErrorReporter* error_reporter) {
|
||||
auto* subgraphs = model->subgraphs();
|
||||
const SubGraph* subgraph = (*subgraphs)[0];
|
||||
const flatbuffers::Vector<flatbuffers::Offset<Tensor>>* tensors =
|
||||
subgraph->tensors();
|
||||
const flatbuffers::Vector<flatbuffers::Offset<Buffer>>* buffers =
|
||||
model->buffers();
|
||||
TF_LITE_REPORT_ERROR(error_reporter, "==== Model info: =====");
|
||||
for (int i = 0; i < tensors->size(); ++i) {
|
||||
const tflite::Tensor& flatbuffer_tensor = *tensors->Get(i);
|
||||
auto* quantization = flatbuffer_tensor.quantization();
|
||||
size_t type_size, tensor_size;
|
||||
auto* buffer = (*buffers)[flatbuffer_tensor.buffer()];
|
||||
auto* array = buffer->data();
|
||||
int array_size = 0;
|
||||
if (array) {
|
||||
array_size = array->size();
|
||||
}
|
||||
BytesRequiredForTensor(flatbuffer_tensor, &tensor_size, &type_size,
|
||||
error_reporter);
|
||||
TF_LITE_REPORT_ERROR(
|
||||
error_reporter, "Tensor index: %d arena tensor %d size %d", i,
|
||||
!array_size && !flatbuffer_tensor.is_variable(), tensor_size);
|
||||
}
|
||||
}
|
||||
|
||||
// Helper function to check flatbuffer metadata correctness. This function is
|
||||
// not called by default. Hence it's not linked in to the final binary code.
|
||||
@ -116,8 +91,8 @@ TfLiteStatus CheckOfflinePlannedOffsets(const Model* model,
|
||||
if (model->metadata()) {
|
||||
for (int i = 0; i < model->metadata()->size(); ++i) {
|
||||
auto metadata = model->metadata()->Get(i);
|
||||
if (strncmp(metadata->name()->c_str(), "OfflineMemoryAllocation",
|
||||
strlen("OfflineMemoryAllocation")) == 0) {
|
||||
if (strncmp(metadata->name()->c_str(), kOfflineMemAllocMetadata,
|
||||
strlen(kOfflineMemAllocMetadata)) == 0) {
|
||||
auto* subgraphs = model->subgraphs();
|
||||
const SubGraph* subgraph = (*subgraphs)[0];
|
||||
const flatbuffers::Vector<flatbuffers::Offset<Tensor>>* tensors =
|
||||
@ -311,8 +286,9 @@ TfLiteStatus AllocationInfoBuilder::AddTensors(const SubGraph* subgraph,
|
||||
// | name:string | “OfflineMemoryAllocation” |
|
||||
// | buffer:unit | Index of buffer containing memory allocation data |
|
||||
//
|
||||
// The buffer contents for the memory allocation is a list of 32-bit integers of
|
||||
// the following format:
|
||||
// The buffer contents for the memory allocation is a list of 32-bit integers.
|
||||
// The number of tensors, n, must be equal to the number of tensors defined in
|
||||
// the model. The following encoding applies:
|
||||
//
|
||||
// | Offset | Value |
|
||||
// | 0 | Offline allocation format version – set to 0 |
|
||||
@ -326,8 +302,8 @@ TfLiteStatus AllocationInfoBuilder::GetOfflinePlannedOffsets(
|
||||
if (model->metadata()) {
|
||||
for (int i = 0; i < model->metadata()->size(); ++i) {
|
||||
auto metadata = model->metadata()->Get(i);
|
||||
if (strncmp(metadata->name()->c_str(), "OfflineMemoryAllocation",
|
||||
strlen("OfflineMemoryAllocation")) == 0) {
|
||||
if (strncmp(metadata->name()->c_str(), kOfflineMemAllocMetadata,
|
||||
strlen(kOfflineMemAllocMetadata)) == 0) {
|
||||
const flatbuffers::Vector<flatbuffers::Offset<Buffer>>* buffers =
|
||||
model->buffers();
|
||||
auto* buffer = (*buffers)[metadata->buffer()];
|
||||
@ -365,7 +341,8 @@ TfLiteStatus AllocationInfoBuilder::AddScratchBuffers(
|
||||
return kTfLiteOk;
|
||||
}
|
||||
|
||||
TfLiteStatus CreatePlan(ErrorReporter* error_reporter, MemoryPlanner* planner,
|
||||
TfLiteStatus CreatePlan(ErrorReporter* error_reporter,
|
||||
GreedyMemoryPlanner* planner,
|
||||
const AllocationInfo* allocation_info,
|
||||
size_t allocation_info_size) {
|
||||
// Add the tensors to our allocation plan.
|
||||
@ -380,10 +357,9 @@ TfLiteStatus CreatePlan(ErrorReporter* error_reporter, MemoryPlanner* planner,
|
||||
current->first_created, current->last_used));
|
||||
} else {
|
||||
TF_LITE_ENSURE_STATUS(
|
||||
(static_cast<GreedyMemoryPlanner*>(planner))
|
||||
->AddBuffer(error_reporter, aligned_bytes_required,
|
||||
current->first_created, current->last_used,
|
||||
current->offline_offset));
|
||||
planner->AddBuffer(error_reporter, aligned_bytes_required,
|
||||
current->first_created, current->last_used,
|
||||
current->offline_offset));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -22,6 +22,8 @@ limitations under the License.
|
||||
#include <cinttypes>
|
||||
|
||||
#include "tensorflow/lite/schema/schema_generated.h"
|
||||
#include "tensorflow/lite/micro/memory_helpers.h"
|
||||
|
||||
namespace tflite {
|
||||
namespace {
|
||||
|
||||
@ -100,6 +102,35 @@ const char* AllocTypeName(TfLiteAllocationType type) {
|
||||
}
|
||||
} // namespace
|
||||
|
||||
// Helper function to print model flatbuffer data. This function is not called
|
||||
// by default. Hence it's not linked in to the final binary code.
|
||||
void PrintModelData(const Model* model, ErrorReporter* error_reporter) {
|
||||
auto* subgraphs = model->subgraphs();
|
||||
const SubGraph* subgraph = (*subgraphs)[0];
|
||||
const flatbuffers::Vector<flatbuffers::Offset<Tensor>>* tensors =
|
||||
subgraph->tensors();
|
||||
const flatbuffers::Vector<flatbuffers::Offset<Buffer>>* buffers =
|
||||
model->buffers();
|
||||
TF_LITE_REPORT_ERROR(error_reporter, "==== Model info: =====");
|
||||
for (int i = 0; i < tensors->size(); ++i) {
|
||||
const tflite::Tensor& flatbuffer_tensor = *tensors->Get(i);
|
||||
auto* quantization = flatbuffer_tensor.quantization();
|
||||
size_t type_size, tensor_size;
|
||||
auto* buffer = (*buffers)[flatbuffer_tensor.buffer()];
|
||||
auto* array = buffer->data();
|
||||
int array_size = 0;
|
||||
if (array) {
|
||||
array_size = array->size();
|
||||
}
|
||||
BytesRequiredForTensor(flatbuffer_tensor, &tensor_size, &type_size,
|
||||
error_reporter);
|
||||
TF_LITE_REPORT_ERROR(
|
||||
error_reporter,
|
||||
"Tensor index: %d arena tensor %d size %d ",
|
||||
i, !array_size && !flatbuffer_tensor.is_variable(), tensor_size);
|
||||
}
|
||||
}
|
||||
|
||||
// Prints a dump of what tensors and what nodes are in the interpreter.
|
||||
void PrintInterpreterState(MicroInterpreter* interpreter) {
|
||||
printf("Interpreter has %zu tensors and %zu nodes\n",
|
||||
|
@ -20,6 +20,9 @@ limitations under the License.
|
||||
#include "tensorflow/lite/micro/micro_interpreter.h"
|
||||
|
||||
namespace tflite {
|
||||
// Helper function to print model flatbuffer data. This function is not called
|
||||
// by default. Hence it's not linked in to the final binary code.
|
||||
void PrintModelData(const Model* model, ErrorReporter* error_reporter);
|
||||
// Prints a dump of what tensors and what nodes are in the interpreter.
|
||||
void PrintInterpreterState(MicroInterpreter* interpreter);
|
||||
} // namespace tflite
|
||||
|
Loading…
Reference in New Issue
Block a user