Cleanup bug references to TfLiteEvalTensor API and point to bug to add this functionality on the MicroInterpreter.
All kernels have been ported aside from some optimized versions. The only open issues involve adding buffer/TfLiteEvalTensor API functionality to MicroInterpreter. This change simple cleans up references in the allocation and interpreter code. PiperOrigin-RevId: 336274016 Change-Id: I0707739fa51b40a1621410639779e576b63bfcb7
This commit is contained in:
parent
a7bdaeba61
commit
b024551db4
@ -1,4 +1,4 @@
|
||||
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
|
||||
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
@ -450,7 +450,7 @@ void* GetFlatbufferTensorBuffer(
|
||||
// and if there is update the runtime structure to point to its location in
|
||||
// memory.
|
||||
// First see if there's any buffer information in the serialized tensor.
|
||||
// TODO(b/160894903): Add better unit tests that validate flatbuffer values.
|
||||
// TODO(b/170379532): Add better unit tests to validate flatbuffer values.
|
||||
void* out_buffer = nullptr;
|
||||
if (auto* buffer = (*buffers)[flatbuffer_tensor.buffer()]) {
|
||||
// If we've found a buffer, does it have any data?
|
||||
@ -1019,9 +1019,9 @@ TfLiteTensor* MicroAllocator::AllocatePersistentTfLiteTensorInternal(
|
||||
TfLiteStatus MicroAllocator::PopulateTfLiteTensorFromFlatbuffer(
|
||||
const Model* model, const SubGraph* subgraph, TfLiteTensor* tensor,
|
||||
int tensor_index, bool allocate_temp) {
|
||||
// TODO(b/160894903): This method serves as a stub to ensure quantized
|
||||
// allocations in the tail can be recorded. Once all kernels have been ported
|
||||
// to the new API this can be dropped.
|
||||
// TODO(b/162311891): This method serves as a stub to ensure quantized
|
||||
// allocations in the tail can be recorded. Once the interpreter has APIs for
|
||||
// accessing buffers on TfLiteEvalTensor this method can be dropped.
|
||||
return internal::InitializeTfLiteTensorFromFlatbuffer(
|
||||
memory_allocator_, allocate_temp, *subgraph->tensors()->Get(tensor_index),
|
||||
model->buffers(), error_reporter_, tensor);
|
||||
|
@ -1,5 +1,5 @@
|
||||
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
|
||||
b/160894903
|
||||
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
@ -32,10 +32,8 @@ namespace internal {
|
||||
|
||||
// Sets up all of the data structure members for a TfLiteTensor based on the
|
||||
// contents of a serialized tensor in the flatbuffer.
|
||||
// TODO(b/160894903): Once all kernels have been updated to the new
|
||||
// TfLiteEvalTensor API - drop the allocate_temp flag. This enables internal
|
||||
// flatbuffer quantization or dimension allocations to take place in either the
|
||||
// temp or tail section of the arena.
|
||||
// TODO(b/162311891): Drop this method when the interpreter has an API for
|
||||
// returning buffers on TfLiteEvalTensor.
|
||||
TfLiteStatus InitializeTfLiteTensorFromFlatbuffer(
|
||||
SimpleMemoryAllocator* allocator, bool allocate_temp,
|
||||
const tflite::Tensor& flatbuffer_tensor,
|
||||
@ -215,17 +213,15 @@ class MicroAllocator {
|
||||
virtual TfLiteStatus AllocateVariables(const SubGraph* subgraph,
|
||||
TfLiteEvalTensor* eval_tensors);
|
||||
|
||||
// TODO(b/160894903): Once all kernels have been updated to the new API drop
|
||||
// this method. It is only used to record TfLiteTensor persistent allocations.
|
||||
// Allocate and return a persistent TfLiteTensor.
|
||||
// TODO(b/162311891): Drop this method when the interpreter has an API for
|
||||
// accessing TfLiteEvalTensor structs.
|
||||
virtual TfLiteTensor* AllocatePersistentTfLiteTensorInternal(
|
||||
const Model* model, TfLiteEvalTensor* eval_tensors, int tensor_index);
|
||||
|
||||
// Populates a TfLiteTensor struct with data from the model flatbuffer. Any
|
||||
// quantization data is allocated from either the tail (persistent) or temp
|
||||
// sections of the arena based on the allocation flag.
|
||||
// TODO(b/160894903): Once all kernels have been updated to the new API drop
|
||||
// this function since all allocations for quantized data will take place in
|
||||
// the temp section.
|
||||
virtual TfLiteStatus PopulateTfLiteTensorFromFlatbuffer(
|
||||
const Model* model, const SubGraph* subgraph, TfLiteTensor* tensor,
|
||||
int tensor_index, bool allocate_temp);
|
||||
|
@ -1,4 +1,4 @@
|
||||
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
|
||||
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
@ -142,8 +142,9 @@ TF_LITE_MICRO_TEST(TestInitializeRuntimeTensor) {
|
||||
simple_allocator->~SimpleMemoryAllocator();
|
||||
}
|
||||
|
||||
// TODO(b/160894903): Drop this test when InitializeTfLiteTensorFromFlatbuffer()
|
||||
// always allocates from temp (kernels are using the new TfLiteEvalTensor API):
|
||||
// TODO(b/162311891): Drop this test when InitializeTfLiteTensorFromFlatbuffer()
|
||||
// always allocates from temp (interpreter returns buffers from
|
||||
// TfLiteEvalTensor):
|
||||
TF_LITE_MICRO_TEST(TestInitializeTempRuntimeTensor) {
|
||||
constexpr size_t arena_size = 1024;
|
||||
uint8_t arena[arena_size];
|
||||
|
@ -1,4 +1,4 @@
|
||||
/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
|
||||
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
@ -408,8 +408,8 @@ TfLiteTensor* MicroInterpreter::output(size_t index) {
|
||||
outputs().Get(index));
|
||||
}
|
||||
if (output_tensor_ == nullptr) {
|
||||
// TODO(b/160894903): This API will allocate TfLiteTensor structs from
|
||||
// persistent (tail) memory and cache on this pointer.
|
||||
// TODO(b/162311891): Drop these allocations when the interpreter supports
|
||||
// handling buffers from TfLiteEvalTensor.
|
||||
output_tensor_ = allocator_.AllocatePersistentTfLiteTensor(
|
||||
model_, eval_tensors_, outputs().Get(index));
|
||||
}
|
||||
|
@ -1,4 +1,4 @@
|
||||
/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
|
||||
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
@ -200,8 +200,8 @@ class MicroInterpreter {
|
||||
// TODO(b/16157777): Drop this reference:
|
||||
internal::ContextHelper context_helper_;
|
||||
|
||||
// TODO(b/160894903): Clean these pointers up when all APIs are updated to new
|
||||
// TfLiteEvalTensor buffers.
|
||||
// TODO(b/162311891): Clean these pointers up when this class supports buffers
|
||||
// from TfLiteEvalTensor.
|
||||
TfLiteTensor* input_tensor_;
|
||||
TfLiteTensor* output_tensor_;
|
||||
};
|
||||
|
@ -78,12 +78,12 @@ class RecordingMicroAllocator : public MicroAllocator {
|
||||
const Model* model, TfLiteEvalTensor** eval_tensors) override;
|
||||
TfLiteStatus AllocateVariables(const SubGraph* subgraph,
|
||||
TfLiteEvalTensor* eval_tensors) override;
|
||||
// TODO(b/160894903): Once all kernels have been updated to the new API drop
|
||||
// TODO(b/162311891): Once all kernels have been updated to the new API drop
|
||||
// this method. It is only used to record TfLiteTensor persistent allocations.
|
||||
TfLiteTensor* AllocatePersistentTfLiteTensorInternal(
|
||||
const Model* model, TfLiteEvalTensor* eval_tensors,
|
||||
int tensor_index) override;
|
||||
// TODO(b/160894903): Once all kernels have been updated to the new API drop
|
||||
// TODO(b/162311891): Once all kernels have been updated to the new API drop
|
||||
// this function since all allocations for quantized data will take place in
|
||||
// the temp section.
|
||||
TfLiteStatus PopulateTfLiteTensorFromFlatbuffer(const Model* model,
|
||||
|
@ -1,4 +1,4 @@
|
||||
/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
|
||||
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
@ -1,4 +1,4 @@
|
||||
/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
|
||||
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
@ -1,4 +1,4 @@
|
||||
/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
|
||||
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
Loading…
Reference in New Issue
Block a user