Fix for Micro crash when quantization scale and zero point parameters aren't specified for inputs

PiperOrigin-RevId: 252880624
This commit is contained in:
Pete Warden 2019-06-12 12:51:17 -07:00 committed by TensorFlower Gardener
parent 03195f1345
commit 7689e20507
2 changed files with 46 additions and 5 deletions

View File

@ -129,10 +129,13 @@ TfLiteStatus SimpleTensorAllocator::AllocateTensor(
for (int n = 0; n < flatbuffer_tensor.shape()->Length(); ++n) {
result->dims->data[n] = flatbuffer_tensor.shape()->Get(n);
}
if (flatbuffer_tensor.quantization()) {
result->params.scale = flatbuffer_tensor.quantization()->scale()->Get(0);
result->params.zero_point =
flatbuffer_tensor.quantization()->zero_point()->Get(0);
const auto* src_quantization = flatbuffer_tensor.quantization();
if (src_quantization && src_quantization->scale() &&
(src_quantization->scale()->size() > 0) &&
src_quantization->zero_point() &&
(src_quantization->zero_point()->size() > 0)) {
result->params.scale = src_quantization->scale()->Get(0);
result->params.zero_point = src_quantization->zero_point()->Get(0);
}
result->allocation = nullptr;
if (flatbuffer_tensor.name()) {

View File

@ -14,7 +14,6 @@ limitations under the License.
==============================================================================*/
#include "tensorflow/lite/experimental/micro/micro_interpreter.h"
#include "tensorflow/lite/experimental/micro/testing/micro_test.h"
namespace tflite {
@ -73,6 +72,24 @@ const Tensor* Create1dTensor(int size) {
return tensor;
}
const Tensor* CreateMissingQuantizationTensor(int size) {
using flatbuffers::Offset;
flatbuffers::FlatBufferBuilder* builder = BuilderInstance();
const Offset<QuantizationParameters> quant_params =
CreateQuantizationParameters(*builder, 0, 0, 0, 0,
QuantizationDetails_NONE, 0, 0);
constexpr size_t tensor_shape_size = 1;
const int32_t tensor_shape[tensor_shape_size] = {size};
const Offset<Tensor> tensor_offset = CreateTensor(
*builder, builder->CreateVector(tensor_shape, tensor_shape_size),
TensorType_INT32, 0, builder->CreateString("test_tensor"), quant_params,
false);
builder->Finish(tensor_offset);
void* tensor_pointer = builder->GetBufferPointer();
const Tensor* tensor = flatbuffers::GetRoot<Tensor>(tensor_pointer);
return tensor;
}
const flatbuffers::Vector<flatbuffers::Offset<Buffer>>* CreateBuffers() {
using flatbuffers::Offset;
flatbuffers::FlatBufferBuilder* builder = BuilderInstance();
@ -166,4 +183,25 @@ TF_LITE_MICRO_TEST(TestMultipleTooLarge) {
TF_LITE_MICRO_EXPECT_EQ(nullptr, result);
}
TF_LITE_MICRO_TEST(TestAllocateTensor) {
constexpr size_t arena_size = 1024;
uint8_t arena[arena_size];
tflite::SimpleTensorAllocator allocator(arena, arena_size);
const tflite::Tensor* tensor = tflite::CreateMissingQuantizationTensor(100);
const flatbuffers::Vector<flatbuffers::Offset<tflite::Buffer>>* buffers =
tflite::CreateBuffers();
TfLiteTensor allocated_tensor;
TF_LITE_MICRO_EXPECT_EQ(
kTfLiteOk,
allocator.AllocateTensor(*tensor, 0, 1, buffers, micro_test::reporter,
&allocated_tensor));
TF_LITE_MICRO_EXPECT_EQ(kTfLiteInt32, allocated_tensor.type);
TF_LITE_MICRO_EXPECT_EQ(1, allocated_tensor.dims->size);
TF_LITE_MICRO_EXPECT_EQ(100, allocated_tensor.dims->data[0]);
TF_LITE_MICRO_EXPECT_EQ(400, allocated_tensor.bytes);
TF_LITE_MICRO_EXPECT_NE(nullptr, allocated_tensor.data.i32);
}
TF_LITE_MICRO_TESTS_END