Fix tensor initialization in unit test.

PiperOrigin-RevId: 281649411
Change-Id: I5e909fc1b19ab3e098dfc81ac74c476e0b728f6e
This commit is contained in:
Yunlu Li 2019-11-20 18:22:50 -08:00 committed by TensorFlower Gardener
parent 3333280ab2
commit 8f14765913
2 changed files with 21 additions and 27 deletions

View File

@ -34,7 +34,7 @@ using UniqueTfLiteTensor =
template <typename T>
UniqueTfLiteTensor MakeLiteTensor(const std::vector<int>& shape,
const std::vector<T>& data) {
auto tensor = UniqueTfLiteTensor(new TfLiteTensor, [](TfLiteTensor* t) {
auto tensor = UniqueTfLiteTensor(new TfLiteTensor(), [](TfLiteTensor* t) {
TfLiteTensorDataFree(t);
TfLiteIntArrayFree(t->dims);
delete t;
@ -42,9 +42,6 @@ UniqueTfLiteTensor MakeLiteTensor(const std::vector<int>& shape,
tensor->allocation_type = kTfLiteDynamic;
tensor->type = typeToTfLiteType<T>();
tensor->dims = ConvertVectorToTfLiteIntArray(shape);
tensor->data.raw = nullptr;
tensor->is_variable = false;
memset(&tensor->quantization, 0, sizeof(TfLiteQuantization));
TfLiteTensorRealloc(data.size() * sizeof(T), tensor.get());
memcpy(tensor->data.raw, data.data(), data.size() * sizeof(T));
return tensor;
@ -53,7 +50,7 @@ UniqueTfLiteTensor MakeLiteTensor(const std::vector<int>& shape,
template <>
UniqueTfLiteTensor MakeLiteTensor<string>(const std::vector<int>& shape,
const std::vector<string>& data) {
auto tensor = UniqueTfLiteTensor(new TfLiteTensor, [](TfLiteTensor* t) {
auto tensor = UniqueTfLiteTensor(new TfLiteTensor(), [](TfLiteTensor* t) {
TfLiteTensorDataFree(t);
TfLiteIntArrayFree(t->dims);
delete t;
@ -61,9 +58,6 @@ UniqueTfLiteTensor MakeLiteTensor<string>(const std::vector<int>& shape,
tensor->allocation_type = kTfLiteDynamic;
tensor->type = typeToTfLiteType<string>();
tensor->dims = ConvertVectorToTfLiteIntArray(shape);
tensor->data.raw = nullptr;
tensor->is_variable = false;
memset(&tensor->quantization, 0, sizeof(TfLiteQuantization));
TfLiteTensorRealloc(data.size() * sizeof(string), tensor.get());
DynamicBuffer b;

View File

@ -146,7 +146,7 @@ TEST_F(KernelUtilTest, BroadcastShapeDifferentSizes) {
TEST_F(KernelUtilTest, CheckAndPopulate) {
// Create input.
TfLiteTensor input;
TfLiteTensor input = {};
input.type = kTfLiteInt8;
input.allocation_type = kTfLiteArenaRw;
input.dims = TfLiteIntArrayCreate(1);
@ -163,7 +163,7 @@ TEST_F(KernelUtilTest, CheckAndPopulate) {
input.quantization.params = reinterpret_cast<void*>(input_params);
// Create filter.
TfLiteTensor filter;
TfLiteTensor filter = {};
filter.type = kTfLiteInt8;
filter.allocation_type = kTfLiteArenaRw;
filter.dims = TfLiteIntArrayCreate(4);
@ -188,7 +188,7 @@ TEST_F(KernelUtilTest, CheckAndPopulate) {
filter.quantization.params = reinterpret_cast<void*>(filter_params);
// Create bias.
TfLiteTensor bias;
TfLiteTensor bias = {};
bias.type = kTfLiteInt32;
bias.allocation_type = kTfLiteArenaRw;
bias.dims = TfLiteIntArrayCreate(4);
@ -208,7 +208,7 @@ TEST_F(KernelUtilTest, CheckAndPopulate) {
bias.quantization.params = reinterpret_cast<void*>(bias_params);
// Create output.
TfLiteTensor output;
TfLiteTensor output = {};
output.type = kTfLiteInt8;
output.allocation_type = kTfLiteArenaRw;
output.dims = nullptr;
@ -252,7 +252,7 @@ TEST_F(KernelUtilTest, CheckAndPopulate) {
TEST_F(KernelUtilTest, CheckAndPopulateShift) {
// Create input of type kTfLiteUInt8.
TfLiteTensor input;
TfLiteTensor input = {};
input.type = kTfLiteUInt8;
input.allocation_type = kTfLiteArenaRw;
input.dims = TfLiteIntArrayCreate(1);
@ -269,7 +269,7 @@ TEST_F(KernelUtilTest, CheckAndPopulateShift) {
input.quantization.params = reinterpret_cast<void*>(input_params);
// Create filter of type kTfLiteUInt8.
TfLiteTensor filter;
TfLiteTensor filter = {};
filter.type = kTfLiteUInt8;
filter.allocation_type = kTfLiteArenaRw;
filter.dims = TfLiteIntArrayCreate(4);
@ -291,7 +291,7 @@ TEST_F(KernelUtilTest, CheckAndPopulateShift) {
filter.quantization.params = reinterpret_cast<void*>(filter_params);
// Create bias for kTfLiteUInt8.
TfLiteTensor bias;
TfLiteTensor bias = {};
bias.type = kTfLiteUInt8;
bias.allocation_type = kTfLiteArenaRw;
bias.dims = TfLiteIntArrayCreate(4);
@ -311,7 +311,7 @@ TEST_F(KernelUtilTest, CheckAndPopulateShift) {
bias.quantization.params = reinterpret_cast<void*>(bias_params);
// Create output for kTfLiteUInt8.
TfLiteTensor output;
TfLiteTensor output = {};
output.type = kTfLiteUInt8;
output.allocation_type = kTfLiteArenaRw;
output.dims = nullptr;
@ -359,7 +359,7 @@ TEST_F(KernelUtilTest, CheckAndPopulateShift) {
#ifndef __APPLE__ // Some Apple toolchains don't support std::ldexp
TEST_F(KernelUtilTest, CheckAndPopulateZeroValue) {
// Create input.
TfLiteTensor input;
TfLiteTensor input = {};
input.type = kTfLiteInt8;
input.allocation_type = kTfLiteArenaRw;
input.dims = TfLiteIntArrayCreate(1);
@ -376,7 +376,7 @@ TEST_F(KernelUtilTest, CheckAndPopulateZeroValue) {
input.quantization.params = reinterpret_cast<void*>(input_params);
// Create filter.
TfLiteTensor filter;
TfLiteTensor filter = {};
filter.type = kTfLiteInt8;
filter.allocation_type = kTfLiteArenaRw;
filter.dims = TfLiteIntArrayCreate(4);
@ -401,7 +401,7 @@ TEST_F(KernelUtilTest, CheckAndPopulateZeroValue) {
filter.quantization.params = reinterpret_cast<void*>(filter_params);
// Create bias.
TfLiteTensor bias;
TfLiteTensor bias = {};
bias.type = kTfLiteInt32;
bias.allocation_type = kTfLiteArenaRw;
bias.dims = TfLiteIntArrayCreate(4);
@ -421,7 +421,7 @@ TEST_F(KernelUtilTest, CheckAndPopulateZeroValue) {
bias.quantization.params = reinterpret_cast<void*>(bias_params);
// Create output.
TfLiteTensor output;
TfLiteTensor output = {};
output.type = kTfLiteInt8;
output.allocation_type = kTfLiteArenaRw;
output.dims = nullptr;
@ -466,7 +466,7 @@ TEST_F(KernelUtilTest, CheckAndPopulateZeroValue) {
TEST_F(KernelUtilTest, CheckAndPopulateUint8) {
// Create input.
TfLiteTensor input;
TfLiteTensor input = {};
input.type = kTfLiteUInt8;
input.allocation_type = kTfLiteArenaRw;
input.dims = TfLiteIntArrayCreate(1);
@ -483,7 +483,7 @@ TEST_F(KernelUtilTest, CheckAndPopulateUint8) {
input.quantization.params = reinterpret_cast<void*>(input_params);
// Create filter.
TfLiteTensor filter;
TfLiteTensor filter = {};
filter.type = kTfLiteUInt8;
filter.allocation_type = kTfLiteArenaRw;
filter.dims = TfLiteIntArrayCreate(4);
@ -505,7 +505,7 @@ TEST_F(KernelUtilTest, CheckAndPopulateUint8) {
filter.quantization.params = reinterpret_cast<void*>(filter_params);
// Create bias.
TfLiteTensor bias;
TfLiteTensor bias = {};
bias.type = kTfLiteInt32;
bias.allocation_type = kTfLiteArenaRw;
bias.dims = TfLiteIntArrayCreate(4);
@ -521,7 +521,7 @@ TEST_F(KernelUtilTest, CheckAndPopulateUint8) {
bias.quantization.params = reinterpret_cast<void*>(bias_params);
// Create output.
TfLiteTensor output;
TfLiteTensor output = {};
output.type = kTfLiteUInt8;
output.allocation_type = kTfLiteArenaRw;
output.dims = nullptr;
@ -564,7 +564,7 @@ TEST_F(KernelUtilTest, CheckAndPopulateUint8) {
TEST_F(KernelUtilTest, CheckAndPopulateWithoutBias) {
// Create input.
TfLiteTensor input;
TfLiteTensor input = {};
input.type = kTfLiteUInt8;
input.allocation_type = kTfLiteArenaRw;
input.dims = TfLiteIntArrayCreate(1);
@ -581,7 +581,7 @@ TEST_F(KernelUtilTest, CheckAndPopulateWithoutBias) {
input.quantization.params = reinterpret_cast<void*>(input_params);
// Create filter.
TfLiteTensor filter;
TfLiteTensor filter = {};
filter.type = kTfLiteUInt8;
filter.allocation_type = kTfLiteArenaRw;
filter.dims = TfLiteIntArrayCreate(4);
@ -603,7 +603,7 @@ TEST_F(KernelUtilTest, CheckAndPopulateWithoutBias) {
filter.quantization.params = reinterpret_cast<void*>(filter_params);
// Create output.
TfLiteTensor output;
TfLiteTensor output = {};
output.type = kTfLiteUInt8;
output.allocation_type = kTfLiteArenaRw;
output.dims = nullptr;