1491 lines
57 KiB
C++
1491 lines
57 KiB
C++
/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
|
|
|
|
Licensed under the Apache License, Version 2.0 (the "License");
|
|
you may not use this file except in compliance with the License.
|
|
You may obtain a copy of the License at
|
|
|
|
http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
Unless required by applicable law or agreed to in writing, software
|
|
distributed under the License is distributed on an "AS IS" BASIS,
|
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
See the License for the specific language governing permissions and
|
|
limitations under the License.
|
|
==============================================================================*/
|
|
|
|
#include "tensorflow/lite/interpreter.h"
|
|
|
|
#include <stdint.h>
|
|
|
|
#include <memory>
|
|
|
|
#include <gmock/gmock.h>
|
|
#include <gtest/gtest.h>
|
|
#include "third_party/eigen3/Eigen/Core"
|
|
#include "tensorflow/lite/core/api/error_reporter.h"
|
|
#include "tensorflow/lite/external_cpu_backend_context.h"
|
|
#include "tensorflow/lite/kernels/cpu_backend_context.h"
|
|
#include "tensorflow/lite/kernels/internal/compatibility.h"
|
|
#include "tensorflow/lite/kernels/kernel_util.h"
|
|
#include "tensorflow/lite/kernels/register.h"
|
|
#include "tensorflow/lite/schema/schema_generated.h"
|
|
#include "tensorflow/lite/string_util.h"
|
|
#include "tensorflow/lite/testing/util.h"
|
|
#include "tensorflow/lite/version.h"
|
|
|
|
namespace tflite {
|
|
|
|
// InterpreterTest is a friend of Interpreter, so it can access context_.
|
|
class InterpreterTest : public ::testing::Test {
|
|
public:
|
|
template <typename Delegate>
|
|
static TfLiteStatus ModifyGraphWithDelegate(
|
|
Interpreter* interpreter, std::unique_ptr<Delegate> delegate) {
|
|
Interpreter::TfLiteDelegatePtr tflite_delegate(
|
|
delegate.release(), [](TfLiteDelegate* delegate) {
|
|
delete reinterpret_cast<Delegate*>(delegate);
|
|
});
|
|
return interpreter->ModifyGraphWithDelegate(std::move(tflite_delegate));
|
|
}
|
|
|
|
protected:
|
|
TfLiteContext* GetInterpreterContext() { return interpreter_.context_; }
|
|
|
|
Interpreter interpreter_;
|
|
};
|
|
|
|
namespace ops {
|
|
namespace builtin {
|
|
TfLiteRegistration* Register_PADV2();
|
|
TfLiteRegistration* Register_NEG();
|
|
} // namespace builtin
|
|
} // namespace ops
|
|
namespace {
|
|
|
|
using ::testing::IsEmpty;
|
|
|
|
// Make an interpreter that has no tensors and no nodes
|
|
TEST(BasicInterpreter, ZeroInterpreter) {
|
|
testing::internal::CaptureStderr();
|
|
|
|
Interpreter interpreter;
|
|
|
|
#ifndef NDEBUG
|
|
const char* kExpectedLog = "INFO: Initialized TensorFlow Lite runtime";
|
|
#else
|
|
const char* kExpectedLog = "";
|
|
#endif
|
|
EXPECT_THAT(testing::internal::GetCapturedStderr(),
|
|
testing::HasSubstr(kExpectedLog));
|
|
|
|
interpreter.SetInputs({});
|
|
interpreter.SetOutputs({});
|
|
ASSERT_EQ(interpreter.AllocateTensors(), kTfLiteOk);
|
|
ASSERT_EQ(interpreter.Invoke(), kTfLiteOk);
|
|
|
|
// Creating a new interpreter should not redundantly log runtime init.
|
|
testing::internal::CaptureStderr();
|
|
Interpreter interpreter2;
|
|
EXPECT_THAT(testing::internal::GetCapturedStderr(), IsEmpty());
|
|
}
|
|
|
|
// Test various error conditions.
|
|
TEST(BasicInterpreter, InvokeInvalidModel) {
|
|
Interpreter interpreter;
|
|
ASSERT_NE(interpreter.Invoke(), kTfLiteOk);
|
|
ASSERT_EQ(interpreter.AllocateTensors(), kTfLiteOk);
|
|
ASSERT_EQ(interpreter.Invoke(), kTfLiteOk);
|
|
}
|
|
|
|
TEST(BasicInterpreter, TestAllocateTensorsResetVariableTensorsFloatAndHyrbid) {
|
|
Interpreter interpreter;
|
|
int tensor_index;
|
|
ASSERT_EQ(interpreter.AddTensors(1, &tensor_index), kTfLiteOk);
|
|
constexpr int kTensorSize = 16;
|
|
TfLiteQuantizationParams quant;
|
|
interpreter.SetTensorParametersReadWrite(tensor_index, kTfLiteFloat32, "",
|
|
{kTensorSize}, quant,
|
|
/*is_variable=*/true);
|
|
interpreter.SetVariables({tensor_index});
|
|
ASSERT_EQ(interpreter.AllocateTensors(), kTfLiteOk);
|
|
TfLiteTensor* tensor = interpreter.tensor(tensor_index);
|
|
// Ensure that variable tensors are reset to zero.
|
|
for (int i = 0; i < kTensorSize; ++i) {
|
|
ASSERT_EQ(tensor->data.f[i], 0.0f);
|
|
}
|
|
}
|
|
|
|
TEST(BasicInterpreter, TestAllocateTensorsResetVariableTensorsInt8) {
|
|
Interpreter interpreter;
|
|
int tensor_index;
|
|
ASSERT_EQ(interpreter.AddTensors(1, &tensor_index), kTfLiteOk);
|
|
constexpr int kTensorSize = 16;
|
|
TfLiteQuantizationParams quant;
|
|
quant.scale = 0.15;
|
|
quant.zero_point = -3;
|
|
interpreter.SetTensorParametersReadWrite(tensor_index, kTfLiteInt8, "",
|
|
{kTensorSize}, quant,
|
|
/*is_variable=*/true);
|
|
interpreter.SetVariables({tensor_index});
|
|
ASSERT_EQ(interpreter.AllocateTensors(), kTfLiteOk);
|
|
TfLiteTensor* tensor = interpreter.tensor(tensor_index);
|
|
// Ensure that variable tensors are reset to zero point.
|
|
for (int i = 0; i < kTensorSize; ++i) {
|
|
ASSERT_EQ(tensor->data.int8[i], -3);
|
|
}
|
|
}
|
|
|
|
// Test size accessor functions.
|
|
TEST(BasicInterpreter, TestSizeFunctions) {
|
|
Interpreter interpreter;
|
|
int base_index;
|
|
ASSERT_EQ(interpreter.nodes_size(), 0);
|
|
ASSERT_EQ(interpreter.tensors_size(), 0);
|
|
ASSERT_EQ(interpreter.AddTensors(2, &base_index), kTfLiteOk);
|
|
ASSERT_EQ(interpreter.tensors_size(), 2);
|
|
ASSERT_EQ(base_index, 0);
|
|
ASSERT_EQ(interpreter.AddTensors(3, &base_index), kTfLiteOk);
|
|
ASSERT_EQ(interpreter.tensors_size(), 5);
|
|
ASSERT_EQ(interpreter.AddTensors(1), kTfLiteOk);
|
|
ASSERT_EQ(interpreter.tensors_size(), 6);
|
|
ASSERT_EQ(base_index, 2);
|
|
}
|
|
|
|
// Test if invalid indices make a model inconsistent (and conversely if
|
|
// valid indices keep a model consistent).
|
|
TEST(BasicInterpreter, InconsistentModel) {
|
|
// Invalid inputs
|
|
{
|
|
Interpreter interpreter;
|
|
ASSERT_NE(interpreter.SetInputs({5}), kTfLiteOk);
|
|
ASSERT_NE(interpreter.AllocateTensors(), kTfLiteOk);
|
|
ASSERT_NE(interpreter.Invoke(), kTfLiteOk);
|
|
ASSERT_EQ(interpreter.inputs(), std::vector<int>());
|
|
}
|
|
// Invalid outputs
|
|
{
|
|
Interpreter interpreter;
|
|
ASSERT_NE(interpreter.SetOutputs({5}), kTfLiteOk);
|
|
ASSERT_NE(interpreter.AllocateTensors(), kTfLiteOk);
|
|
ASSERT_NE(interpreter.Invoke(), kTfLiteOk);
|
|
ASSERT_EQ(interpreter.outputs(), std::vector<int>());
|
|
}
|
|
// Invalid node inputs
|
|
{
|
|
Interpreter interpreter;
|
|
TfLiteRegistration registration = {nullptr, nullptr, nullptr, nullptr};
|
|
ASSERT_NE(interpreter.AddNodeWithParameters({3}, {0}, nullptr, 0, nullptr,
|
|
®istration),
|
|
kTfLiteOk);
|
|
ASSERT_NE(interpreter.AllocateTensors(), kTfLiteOk);
|
|
ASSERT_NE(interpreter.Invoke(), kTfLiteOk);
|
|
}
|
|
// Valid inputs and outputs and a node with valid inputs and outputs
|
|
{
|
|
Interpreter interpreter;
|
|
ASSERT_EQ(interpreter.AddTensors(2), kTfLiteOk);
|
|
TfLiteRegistration registration = {nullptr, nullptr, nullptr, nullptr};
|
|
ASSERT_EQ(interpreter.SetInputs({0}), kTfLiteOk);
|
|
ASSERT_EQ(interpreter.SetOutputs({0}), kTfLiteOk);
|
|
ASSERT_EQ(interpreter.AddNodeWithParameters({0}, {1}, nullptr, 0, nullptr,
|
|
®istration),
|
|
kTfLiteOk);
|
|
}
|
|
}
|
|
|
|
// Make an interpreter that has one tensor but no ops
|
|
TEST(BasicInterpreter, CheckAllocate) {
|
|
struct {
|
|
TfLiteType type;
|
|
size_t size;
|
|
} cases[] = {
|
|
{kTfLiteFloat32, sizeof(float)}, {kTfLiteInt32, sizeof(int32_t)},
|
|
{kTfLiteUInt8, sizeof(uint8_t)}, {kTfLiteInt64, sizeof(int64_t)},
|
|
{kTfLiteInt16, sizeof(int16_t)}, {kTfLiteFloat16, sizeof(TfLiteFloat16)},
|
|
};
|
|
|
|
for (auto test : cases) {
|
|
Interpreter interpreter;
|
|
ASSERT_EQ(interpreter.AddTensors(2), kTfLiteOk);
|
|
interpreter.SetInputs({0, 1});
|
|
interpreter.SetOutputs({});
|
|
TfLiteQuantizationParams quant;
|
|
|
|
interpreter.SetTensorParametersReadWrite(0, test.type, "", {3}, quant);
|
|
interpreter.SetTensorParametersReadWrite(1, test.type, "", {4}, quant);
|
|
ASSERT_EQ(interpreter.AllocateTensors(), kTfLiteOk);
|
|
ASSERT_EQ(interpreter.tensor(0)->bytes, 3 * test.size);
|
|
ASSERT_NE(interpreter.tensor(0)->data.raw, nullptr);
|
|
ASSERT_EQ(interpreter.tensor(1)->bytes, 4 * test.size);
|
|
ASSERT_NE(interpreter.tensor(1)->data.raw, nullptr);
|
|
}
|
|
}
|
|
|
|
TEST(BasicInterpreter, CheckQuantization) {
|
|
Interpreter interpreter;
|
|
ASSERT_EQ(interpreter.AddTensors(2), kTfLiteOk);
|
|
interpreter.SetInputs({0, 1});
|
|
interpreter.SetOutputs({});
|
|
TfLiteType tensor_type = kTfLiteInt8;
|
|
const uint8_t int8s[] = {3, 4};
|
|
float scale = 0.5f;
|
|
int32_t zero_point = 12;
|
|
|
|
TfLiteQuantization rw_quantization;
|
|
rw_quantization.type = kTfLiteAffineQuantization;
|
|
auto* rw_affine_quantization = static_cast<TfLiteAffineQuantization*>(
|
|
malloc(sizeof(TfLiteAffineQuantization)));
|
|
rw_affine_quantization->scale = TfLiteFloatArrayCreate(1);
|
|
rw_affine_quantization->zero_point = TfLiteIntArrayCreate(1);
|
|
rw_affine_quantization->scale->data[0] = scale;
|
|
rw_affine_quantization->zero_point->data[0] = zero_point;
|
|
rw_quantization.params = rw_affine_quantization;
|
|
|
|
TfLiteQuantization ro_quantization;
|
|
ro_quantization.type = kTfLiteAffineQuantization;
|
|
auto* ro_affine_quantization = static_cast<TfLiteAffineQuantization*>(
|
|
malloc(sizeof(TfLiteAffineQuantization)));
|
|
ro_affine_quantization->scale = TfLiteFloatArrayCreate(1);
|
|
ro_affine_quantization->zero_point = TfLiteIntArrayCreate(1);
|
|
ro_affine_quantization->scale->data[0] = scale;
|
|
ro_affine_quantization->zero_point->data[0] = zero_point;
|
|
ro_quantization.params = ro_affine_quantization;
|
|
|
|
ASSERT_EQ(interpreter.SetTensorParametersReadWrite(0, tensor_type, "", {3},
|
|
rw_quantization),
|
|
kTfLiteOk);
|
|
ASSERT_EQ(interpreter.SetTensorParametersReadOnly(
|
|
1, tensor_type, "", {2}, ro_quantization,
|
|
reinterpret_cast<const char*>(int8s), 2),
|
|
kTfLiteOk);
|
|
ASSERT_EQ(interpreter.AllocateTensors(), kTfLiteOk);
|
|
// Check that the legacy scale and zero_point are set correctly.
|
|
ASSERT_EQ(interpreter.tensor(0)->params.scale, scale);
|
|
ASSERT_EQ(interpreter.tensor(0)->params.zero_point, zero_point);
|
|
ASSERT_EQ(interpreter.tensor(0)->quantization.type, rw_quantization.type);
|
|
ASSERT_EQ(interpreter.tensor(1)->params.scale, scale);
|
|
ASSERT_EQ(interpreter.tensor(1)->params.zero_point, zero_point);
|
|
ASSERT_EQ(interpreter.tensor(1)->quantization.type, ro_quantization.type);
|
|
}
|
|
|
|
TEST(BasicInterpreter, CheckResize) {
|
|
const float floats[] = {-3., -4.};
|
|
const int32_t int32s[] = {-3, -4};
|
|
const uint8_t uint8s[] = {3, 4};
|
|
const int64_t int64s[] = {6, -7};
|
|
const int16_t int16s[] = {8, -9};
|
|
const Eigen::half float16s[] = {Eigen::half_impl::float_to_half_rtne(-3.f),
|
|
Eigen::half_impl::float_to_half_rtne(-4.f)};
|
|
|
|
struct {
|
|
TfLiteType type;
|
|
size_t size;
|
|
const char* array;
|
|
} cases[] = {
|
|
{kTfLiteFloat32, sizeof(float), reinterpret_cast<const char*>(floats)},
|
|
{kTfLiteInt32, sizeof(int32_t), reinterpret_cast<const char*>(int32s)},
|
|
{kTfLiteUInt8, sizeof(uint8_t), reinterpret_cast<const char*>(uint8s)},
|
|
{kTfLiteInt64, sizeof(int64_t), reinterpret_cast<const char*>(int64s)},
|
|
{kTfLiteInt16, sizeof(int16_t), reinterpret_cast<const char*>(int16s)},
|
|
{kTfLiteFloat16, sizeof(TfLiteFloat16),
|
|
reinterpret_cast<const char*>(float16s)},
|
|
};
|
|
|
|
for (auto test : cases) {
|
|
Interpreter interpreter;
|
|
|
|
ASSERT_EQ(interpreter.AddTensors(2), kTfLiteOk);
|
|
interpreter.SetInputs({0, 1});
|
|
interpreter.SetOutputs({});
|
|
TfLiteQuantizationParams quant;
|
|
|
|
ASSERT_EQ(
|
|
interpreter.SetTensorParametersReadWrite(0, test.type, "", {3}, quant),
|
|
kTfLiteOk);
|
|
ASSERT_EQ(interpreter.SetTensorParametersReadOnly(
|
|
1, test.type, "", {2}, quant, test.array, 2 * test.size),
|
|
kTfLiteOk);
|
|
ASSERT_EQ(interpreter.AllocateTensors(), kTfLiteOk);
|
|
ASSERT_EQ(interpreter.ResizeInputTensor(0, {1, 2}), kTfLiteOk);
|
|
// Resizing a mmapped tensor is not allowed and should produce error.
|
|
ASSERT_NE(interpreter.ResizeInputTensor(1, {3}), kTfLiteOk);
|
|
// Set the tensor to be mmapped but with a buffer size that is insufficient
|
|
// to match the dimensionality.
|
|
ASSERT_NE(interpreter.SetTensorParametersReadOnly(
|
|
1, test.type, "", {2}, quant, test.array, 1 * test.size),
|
|
kTfLiteOk);
|
|
// Allocating should work since we should have our last correct array
|
|
// values in place.
|
|
ASSERT_EQ(interpreter.AllocateTensors(), kTfLiteOk);
|
|
}
|
|
}
|
|
|
|
TEST(BasicInterpreter, CheckAlignment) {
|
|
struct {
|
|
TfLiteType type;
|
|
} cases[] = {{kTfLiteFloat32}, {kTfLiteInt32}, {kTfLiteUInt8},
|
|
{kTfLiteInt64}, {kTfLiteInt16}, {kTfLiteFloat16}};
|
|
|
|
for (auto test : cases) {
|
|
Interpreter interpreter;
|
|
|
|
ASSERT_EQ(interpreter.AddTensors(4), kTfLiteOk);
|
|
|
|
for (int i = 0; i < 4; i++) {
|
|
TfLiteQuantizationParams quant;
|
|
interpreter.SetTensorParametersReadWrite(i, test.type, "", {2 * i + 1},
|
|
quant);
|
|
}
|
|
interpreter.AllocateTensors();
|
|
for (int i = 0; i < 4; i++) {
|
|
const TfLiteTensor& tensor = *interpreter.tensor(i);
|
|
ASSERT_EQ(reinterpret_cast<intptr_t>(tensor.data.raw) % 4, 0);
|
|
}
|
|
}
|
|
}
|
|
|
|
TEST(BasicInterpreter, CheckArenaAllocation) {
|
|
Interpreter interpreter;
|
|
ASSERT_EQ(interpreter.AddTensors(10), kTfLiteOk);
|
|
|
|
TfLiteQuantizationParams quant;
|
|
TfLiteRegistration reg = {nullptr, nullptr, nullptr, nullptr};
|
|
|
|
std::vector<int> sizes{2048, 4096, 1023, 2047, 1021,
|
|
2047, 1023, 2046, 0, 2048};
|
|
for (size_t i = 0; i < sizes.size(); ++i) {
|
|
interpreter.SetTensorParametersReadWrite(static_cast<int>(i), kTfLiteUInt8,
|
|
"", {sizes[i]}, quant);
|
|
}
|
|
interpreter.SetInputs({0, 1});
|
|
interpreter.SetOutputs({9, 4});
|
|
interpreter.AddNodeWithParameters({0, 1}, {2, 3}, nullptr, 0, nullptr, ®);
|
|
interpreter.AddNodeWithParameters({2, 1}, {4, 5}, nullptr, 0, nullptr, ®);
|
|
interpreter.AddNodeWithParameters({4, 3}, {6, 7}, nullptr, 0, nullptr, ®);
|
|
interpreter.AddNodeWithParameters({6, 5}, {8}, nullptr, 0, nullptr, ®);
|
|
interpreter.AddNodeWithParameters({8, 7}, {9}, nullptr, 0, nullptr, ®);
|
|
|
|
ASSERT_EQ(interpreter.AllocateTensors(), kTfLiteOk);
|
|
|
|
ASSERT_LT(interpreter.tensor(0)->data.raw, interpreter.tensor(1)->data.raw);
|
|
ASSERT_LT(interpreter.tensor(1)->data.raw, interpreter.tensor(3)->data.raw);
|
|
ASSERT_EQ(interpreter.tensor(3)->data.raw, interpreter.tensor(9)->data.raw);
|
|
ASSERT_LT(interpreter.tensor(3)->data.raw, interpreter.tensor(5)->data.raw);
|
|
ASSERT_LT(interpreter.tensor(5)->data.raw, interpreter.tensor(2)->data.raw);
|
|
ASSERT_EQ(interpreter.tensor(2)->data.raw, interpreter.tensor(7)->data.raw);
|
|
ASSERT_LT(interpreter.tensor(2)->data.raw, interpreter.tensor(4)->data.raw);
|
|
// #4 is the one with the largest pointer.
|
|
ASSERT_EQ(interpreter.tensor(8)->data.raw, nullptr);
|
|
}
|
|
|
|
TEST(BasicInterpreter, BufferAccess) {
|
|
Interpreter interpreter;
|
|
ASSERT_EQ(interpreter.AddTensors(1), kTfLiteOk);
|
|
ASSERT_EQ(interpreter.SetInputs({0}), kTfLiteOk);
|
|
|
|
ASSERT_EQ(interpreter.SetTensorParametersReadWrite(
|
|
0, kTfLiteFloat32, "", {3}, TfLiteQuantizationParams()),
|
|
kTfLiteOk);
|
|
ASSERT_EQ(interpreter.AllocateTensors(), kTfLiteOk);
|
|
// Verify we get a valid pointer.
|
|
ASSERT_NE(interpreter.typed_tensor<float>(0), nullptr);
|
|
// Verify incorrect pointer is not returned.
|
|
ASSERT_EQ(interpreter.typed_tensor<int>(0), nullptr);
|
|
// Verify that raw c interface ptr matches safe interface.
|
|
ASSERT_EQ(interpreter.typed_tensor<float>(0), interpreter.tensor(0)->data.f);
|
|
}
|
|
|
|
TEST(BasicInterpreter, NoOpInterpreter) {
|
|
Interpreter interpreter;
|
|
ASSERT_EQ(interpreter.AddTensors(1), kTfLiteOk);
|
|
ASSERT_EQ(interpreter.SetInputs({0}), kTfLiteOk);
|
|
ASSERT_EQ(interpreter.SetOutputs({0}), kTfLiteOk);
|
|
|
|
ASSERT_EQ(interpreter.SetTensorParametersReadWrite(
|
|
0, kTfLiteFloat32, "", {3}, TfLiteQuantizationParams()),
|
|
kTfLiteOk);
|
|
|
|
ASSERT_EQ(interpreter.ResizeInputTensor(interpreter.inputs()[0], {1, 2, 3}),
|
|
kTfLiteOk);
|
|
ASSERT_EQ(interpreter.AllocateTensors(), kTfLiteOk);
|
|
ASSERT_EQ(interpreter.Invoke(), kTfLiteOk);
|
|
}
|
|
|
|
TEST(BasicInterpreter, RedundantAllocateTensors) {
|
|
Interpreter interpreter;
|
|
ASSERT_EQ(interpreter.AddTensors(1), kTfLiteOk);
|
|
ASSERT_EQ(interpreter.SetInputs({0}), kTfLiteOk);
|
|
|
|
ASSERT_EQ(interpreter.SetTensorParametersReadWrite(
|
|
0, kTfLiteFloat32, "", {3}, TfLiteQuantizationParams()),
|
|
kTfLiteOk);
|
|
|
|
ASSERT_EQ(interpreter.AllocateTensors(), kTfLiteOk);
|
|
const auto data_raw = interpreter.tensor(0)->data.raw;
|
|
ASSERT_NE(data_raw, nullptr);
|
|
|
|
// A redundant allocation request should have no impact.
|
|
ASSERT_EQ(interpreter.AllocateTensors(), kTfLiteOk);
|
|
ASSERT_EQ(interpreter.tensor(0)->data.raw, data_raw);
|
|
}
|
|
|
|
TEST(BasicInterpreter, RedundantAllocateTensorsWithDynamicInputs) {
|
|
Interpreter interpreter;
|
|
TfLiteRegistration reg = {nullptr, nullptr, nullptr, nullptr};
|
|
ASSERT_EQ(interpreter.AddTensors(2), kTfLiteOk);
|
|
interpreter.SetInputs({0});
|
|
interpreter.SetOutputs({1});
|
|
interpreter.AddNodeWithParameters({0}, {1}, nullptr, 0, nullptr, ®);
|
|
|
|
ASSERT_EQ(interpreter.SetTensorParametersReadWrite(
|
|
0, kTfLiteFloat32, "", {3}, TfLiteQuantizationParams()),
|
|
kTfLiteOk);
|
|
ASSERT_EQ(interpreter.SetTensorParametersReadWrite(
|
|
1, kTfLiteFloat32, "", {3}, TfLiteQuantizationParams()),
|
|
kTfLiteOk);
|
|
|
|
// Configure the input tensor as dynamic.
|
|
interpreter.tensor(0)->data.raw = nullptr;
|
|
interpreter.tensor(0)->allocation_type = kTfLiteDynamic;
|
|
|
|
ASSERT_EQ(interpreter.ResizeInputTensor(interpreter.inputs()[0], {1, 2, 3}),
|
|
kTfLiteOk);
|
|
ASSERT_EQ(interpreter.AllocateTensors(), kTfLiteOk);
|
|
ASSERT_NE(interpreter.tensor(1)->data.raw, nullptr);
|
|
|
|
// Reset the output tensor's buffer.
|
|
interpreter.tensor(1)->data.raw = nullptr;
|
|
|
|
// A redundant allocation request should be honored, as the input tensor
|
|
// was marked dynamic.
|
|
ASSERT_EQ(interpreter.AllocateTensors(), kTfLiteOk);
|
|
ASSERT_NE(interpreter.tensor(1)->data.raw, nullptr);
|
|
}
|
|
|
|
TEST(BasicInterpreter, ResizingTensors) {
|
|
Interpreter interpreter;
|
|
ASSERT_EQ(interpreter.AddTensors(1), kTfLiteOk);
|
|
ASSERT_EQ(interpreter.SetInputs({0}), kTfLiteOk);
|
|
ASSERT_EQ(interpreter.SetOutputs({0}), kTfLiteOk);
|
|
|
|
ASSERT_EQ(interpreter.SetTensorParametersReadWrite(
|
|
0, kTfLiteFloat32, "", {3}, TfLiteQuantizationParams()),
|
|
kTfLiteOk);
|
|
|
|
int t = interpreter.inputs()[0];
|
|
TfLiteTensor* tensor = interpreter.tensor(t);
|
|
|
|
ASSERT_EQ(interpreter.ResizeInputTensor(t, {1, 2, 3}), kTfLiteOk);
|
|
EXPECT_EQ(tensor->bytes, 6 * sizeof(float));
|
|
ASSERT_EQ(interpreter.AllocateTensors(), kTfLiteOk);
|
|
|
|
tensor->data.f[5] = 0.123f;
|
|
|
|
// Changing from kTfLiteArenaRw to kTfLiteDynamic is quite complicate: we need
|
|
// to unset data.raw, otherwise Realloc will try to free that memory.
|
|
tensor->data.raw = nullptr;
|
|
tensor->allocation_type = kTfLiteDynamic;
|
|
|
|
ASSERT_EQ(interpreter.ResizeInputTensor(t, {1, 2, 4}), kTfLiteOk);
|
|
EXPECT_EQ(tensor->bytes, 8 * sizeof(float));
|
|
ASSERT_EQ(interpreter.AllocateTensors(), kTfLiteOk);
|
|
|
|
ASSERT_EQ(interpreter.ResizeInputTensor(t, {}), kTfLiteOk);
|
|
EXPECT_EQ(tensor->bytes, 1 * sizeof(float));
|
|
ASSERT_EQ(interpreter.AllocateTensors(), kTfLiteOk);
|
|
|
|
ASSERT_EQ(interpreter.ResizeInputTensor(t, {0}), kTfLiteOk);
|
|
EXPECT_EQ(tensor->bytes, 0);
|
|
ASSERT_EQ(interpreter.AllocateTensors(), kTfLiteOk);
|
|
|
|
ASSERT_EQ(interpreter.ResizeInputTensor(t, {1, 2, 0}), kTfLiteOk);
|
|
EXPECT_EQ(tensor->bytes, 0);
|
|
ASSERT_EQ(interpreter.AllocateTensors(), kTfLiteOk);
|
|
|
|
// TODO(ahentz): We shouldn't have to force reallocation, but
|
|
// ResizeInputTensor doesn't realloc dynamic tensors. Also note that
|
|
// TfLiteTensorRealloc(tensor->bytes, tensor) is a no-op.
|
|
TfLiteTensorRealloc(9 * sizeof(float), tensor);
|
|
tensor->data.f[7] = 0.123f;
|
|
|
|
ASSERT_EQ(interpreter.ResizeInputTensor(t, {2, 2, 4}), kTfLiteOk);
|
|
EXPECT_EQ(tensor->bytes, 16 * sizeof(float));
|
|
ASSERT_EQ(interpreter.AllocateTensors(), kTfLiteOk);
|
|
|
|
// TODO(ahentz): We shouldn't have to force reallocation, but
|
|
// ResizeInputTensor doesn't realloc dynamic tensors. Also note that
|
|
// TfLiteTensorRealloc(tensor->bytes, tensor) is a no-op.
|
|
TfLiteTensorRealloc(17 * sizeof(float), tensor);
|
|
tensor->data.f[15] = 0.123f;
|
|
}
|
|
|
|
TEST(BasicInterpreter, NoopResizingTensors) {
|
|
Interpreter interpreter;
|
|
ASSERT_EQ(interpreter.AddTensors(1), kTfLiteOk);
|
|
ASSERT_EQ(interpreter.SetInputs({0}), kTfLiteOk);
|
|
ASSERT_EQ(interpreter.SetOutputs({0}), kTfLiteOk);
|
|
|
|
ASSERT_EQ(interpreter.SetTensorParametersReadWrite(
|
|
0, kTfLiteFloat32, "", {3}, TfLiteQuantizationParams()),
|
|
kTfLiteOk);
|
|
|
|
int t = interpreter.inputs()[0];
|
|
TfLiteTensor* tensor = interpreter.tensor(t);
|
|
|
|
ASSERT_EQ(interpreter.ResizeInputTensor(t, {1, 2, 3}), kTfLiteOk);
|
|
EXPECT_EQ(tensor->bytes, 6 * sizeof(float));
|
|
ASSERT_EQ(interpreter.AllocateTensors(), kTfLiteOk);
|
|
tensor->data.f[5] = 0.123f;
|
|
|
|
// Resizing to the same size should not trigger re-allocation.
|
|
ASSERT_EQ(interpreter.ResizeInputTensor(t, {1, 2, 3}), kTfLiteOk);
|
|
EXPECT_EQ(tensor->bytes, 6 * sizeof(float));
|
|
ASSERT_NE(tensor->data.raw, nullptr);
|
|
ASSERT_EQ(tensor->data.f[5], 0.123f);
|
|
|
|
// Explicitly allocating should be a no-op, as no resize was performed.
|
|
ASSERT_EQ(interpreter.AllocateTensors(), kTfLiteOk);
|
|
EXPECT_EQ(tensor->bytes, 6 * sizeof(float));
|
|
ASSERT_NE(tensor->data.raw, nullptr);
|
|
ASSERT_EQ(tensor->data.f[5], 0.123f);
|
|
}
|
|
|
|
TEST(BasicInterpreter, ResizingTensorsStrictInvalid) {
|
|
// Tests ResizeInputTensorStrict where `dims_signature` is not specified.
|
|
Interpreter interpreter;
|
|
ASSERT_EQ(interpreter.AddTensors(1), kTfLiteOk);
|
|
ASSERT_EQ(interpreter.SetInputs({0}), kTfLiteOk);
|
|
ASSERT_EQ(interpreter.SetOutputs({0}), kTfLiteOk);
|
|
|
|
ASSERT_EQ(interpreter.SetTensorParametersReadWrite(
|
|
0, kTfLiteFloat32, "", {1, 1, 3}, TfLiteQuantizationParams()),
|
|
kTfLiteOk);
|
|
|
|
int t = interpreter.inputs()[0];
|
|
TfLiteTensor* tensor = interpreter.tensor(t);
|
|
|
|
ASSERT_EQ(interpreter.ResizeInputTensorStrict(t, {1, 1, 3}), kTfLiteOk);
|
|
EXPECT_EQ(tensor->bytes, 3 * sizeof(float));
|
|
ASSERT_EQ(interpreter.AllocateTensors(), kTfLiteOk);
|
|
|
|
// Invalid becuase `dims_signature` is not specified.
|
|
ASSERT_EQ(interpreter.ResizeInputTensorStrict(t, {1, 2, 3}), kTfLiteError);
|
|
EXPECT_EQ(tensor->bytes, 3 * sizeof(float));
|
|
ASSERT_EQ(interpreter.AllocateTensors(), kTfLiteOk);
|
|
|
|
// Assert that ResizeInputTensor works for this value.
|
|
ASSERT_EQ(interpreter.ResizeInputTensor(t, {1, 2, 3}), kTfLiteOk);
|
|
EXPECT_EQ(tensor->bytes, 6 * sizeof(float));
|
|
ASSERT_EQ(interpreter.AllocateTensors(), kTfLiteOk);
|
|
}
|
|
|
|
TEST(BasicInterpreter, ResizingTensorsStrict) {
|
|
// Tests ResizeInputTensorStrict where `dims_signature` is specified.
|
|
Interpreter interpreter;
|
|
ASSERT_EQ(interpreter.AddTensors(1), kTfLiteOk);
|
|
ASSERT_EQ(interpreter.SetInputs({0}), kTfLiteOk);
|
|
ASSERT_EQ(interpreter.SetOutputs({0}), kTfLiteOk);
|
|
|
|
std::vector<int> dims_signature = {-1, -1, 3};
|
|
ASSERT_EQ(interpreter.SetTensorParametersReadWrite(
|
|
0, kTfLiteFloat32, "", {1, 1, 3}, TfLiteQuantizationParams(),
|
|
false, &dims_signature),
|
|
kTfLiteOk);
|
|
|
|
int t = interpreter.inputs()[0];
|
|
TfLiteTensor* tensor = interpreter.tensor(t);
|
|
|
|
ASSERT_EQ(interpreter.ResizeInputTensorStrict(t, {1, 2, 3}), kTfLiteOk);
|
|
EXPECT_EQ(tensor->bytes, 6 * sizeof(float));
|
|
ASSERT_EQ(interpreter.AllocateTensors(), kTfLiteOk);
|
|
|
|
ASSERT_EQ(interpreter.ResizeInputTensorStrict(t, {1, 2, 4}), kTfLiteError);
|
|
EXPECT_EQ(tensor->bytes, 6 * sizeof(float));
|
|
ASSERT_EQ(interpreter.AllocateTensors(), kTfLiteOk);
|
|
|
|
// Assert that ResizeInputTensor works for this value.
|
|
ASSERT_EQ(interpreter.ResizeInputTensor(t, {1, 2, 4}), kTfLiteOk);
|
|
EXPECT_EQ(tensor->bytes, 8 * sizeof(float));
|
|
ASSERT_EQ(interpreter.AllocateTensors(), kTfLiteOk);
|
|
}
|
|
|
|
// Simple op that does input = output.
|
|
TfLiteRegistration GetPassthroughOpRegistration() {
|
|
TfLiteRegistration reg = {nullptr, nullptr, nullptr, nullptr};
|
|
reg.init = [](TfLiteContext* context, const char*, size_t) -> void* {
|
|
auto* first_new_tensor = new int;
|
|
context->AddTensors(context, 2, first_new_tensor);
|
|
return first_new_tensor;
|
|
};
|
|
reg.free = [](TfLiteContext* context, void* buffer) {
|
|
delete static_cast<int*>(buffer);
|
|
};
|
|
reg.prepare = [](TfLiteContext* context, TfLiteNode* node) {
|
|
auto* first_new_tensor = static_cast<int*>(node->user_data);
|
|
|
|
const TfLiteTensor* tensor0 = GetInput(context, node, 0);
|
|
TfLiteTensor* tensor1 = GetOutput(context, node, 0);
|
|
|
|
TfLiteIntArray* newSize = TfLiteIntArrayCopy(tensor0->dims);
|
|
TF_LITE_ENSURE_STATUS(context->ResizeTensor(context, tensor1, newSize));
|
|
|
|
TfLiteIntArrayFree(node->temporaries);
|
|
node->temporaries = TfLiteIntArrayCreate(2);
|
|
for (int i = 0; i < 2; ++i) {
|
|
node->temporaries->data[i] = *(first_new_tensor) + i;
|
|
}
|
|
|
|
auto setup_temporary = [&](int id) {
|
|
TfLiteTensor* tmp = &context->tensors[id];
|
|
tmp->type = kTfLiteFloat32;
|
|
tmp->allocation_type = kTfLiteArenaRw;
|
|
return context->ResizeTensor(context, tmp,
|
|
TfLiteIntArrayCopy(tensor0->dims));
|
|
};
|
|
TF_LITE_ENSURE_STATUS(setup_temporary(node->temporaries->data[0]));
|
|
TF_LITE_ENSURE_STATUS(setup_temporary(node->temporaries->data[1]));
|
|
|
|
return kTfLiteOk;
|
|
};
|
|
reg.invoke = [](TfLiteContext* context, TfLiteNode* node) {
|
|
const TfLiteTensor* a0 = GetInput(context, node, 0);
|
|
|
|
auto populate = [&](int id) {
|
|
TfLiteTensor* t = &context->tensors[id];
|
|
int num = a0->dims->data[0];
|
|
for (int i = 0; i < num; i++) {
|
|
t->data.f[i] = a0->data.f[i];
|
|
}
|
|
};
|
|
|
|
populate(node->outputs->data[0]);
|
|
populate(node->temporaries->data[0]);
|
|
populate(node->temporaries->data[1]);
|
|
return kTfLiteOk;
|
|
};
|
|
|
|
return reg;
|
|
}
|
|
|
|
TEST(BasicInterpreter, OneOpInterpreter) {
|
|
Interpreter interpreter;
|
|
ASSERT_EQ(interpreter.AddTensors(2), kTfLiteOk);
|
|
ASSERT_EQ(interpreter.SetInputs({0}), kTfLiteOk);
|
|
ASSERT_EQ(interpreter.SetOutputs({1}), kTfLiteOk);
|
|
|
|
TfLiteQuantizationParams quantized;
|
|
ASSERT_EQ(interpreter.SetTensorParametersReadWrite(0, kTfLiteFloat32, "in1",
|
|
{3}, quantized),
|
|
kTfLiteOk);
|
|
ASSERT_EQ(interpreter.SetTensorParametersReadWrite(1, kTfLiteFloat32, "out0",
|
|
{3}, quantized),
|
|
kTfLiteOk);
|
|
|
|
ASSERT_EQ(interpreter.GetInputName(0), "in1");
|
|
ASSERT_EQ(interpreter.GetOutputName(0), "out0");
|
|
|
|
TfLiteRegistration reg = GetPassthroughOpRegistration();
|
|
|
|
ASSERT_EQ(
|
|
interpreter.AddNodeWithParameters({0}, {1}, nullptr, 0, nullptr, ®),
|
|
kTfLiteOk);
|
|
ASSERT_EQ(interpreter.ResizeInputTensor(0, {3}), kTfLiteOk);
|
|
ASSERT_EQ(interpreter.AllocateTensors(), kTfLiteOk);
|
|
|
|
ASSERT_EQ(interpreter.Invoke(), kTfLiteOk);
|
|
}
|
|
|
|
TEST(BasicInterpreter, ReleaseNonPersistentMemory) {
|
|
Interpreter interpreter;
|
|
ASSERT_EQ(interpreter.AddTensors(2), kTfLiteOk);
|
|
ASSERT_EQ(interpreter.SetInputs({0}), kTfLiteOk);
|
|
ASSERT_EQ(interpreter.SetOutputs({1}), kTfLiteOk);
|
|
|
|
TfLiteQuantizationParams quantized;
|
|
ASSERT_EQ(interpreter.SetTensorParametersReadWrite(0, kTfLiteFloat32, "in1",
|
|
{3}, quantized),
|
|
kTfLiteOk);
|
|
ASSERT_EQ(interpreter.SetTensorParametersReadWrite(1, kTfLiteFloat32, "out0",
|
|
{3}, quantized),
|
|
kTfLiteOk);
|
|
|
|
TfLiteRegistration reg = GetPassthroughOpRegistration();
|
|
|
|
ASSERT_EQ(
|
|
interpreter.AddNodeWithParameters({0}, {1}, nullptr, 0, nullptr, ®),
|
|
kTfLiteOk);
|
|
ASSERT_EQ(interpreter.ResizeInputTensor(0, {3}), kTfLiteOk);
|
|
|
|
// AllocateTensors() hasn't been called yet, so this should be a no-op.
|
|
ASSERT_EQ(interpreter.ReleaseNonPersistentMemory(), kTfLiteOk);
|
|
|
|
ASSERT_EQ(interpreter.AllocateTensors(), kTfLiteOk);
|
|
ASSERT_EQ(interpreter.Invoke(), kTfLiteOk);
|
|
|
|
ASSERT_EQ(interpreter.ReleaseNonPersistentMemory(), kTfLiteOk);
|
|
// Invoke() now fails because non-persistent arenas have been released.
|
|
ASSERT_NE(interpreter.Invoke(), kTfLiteOk);
|
|
|
|
ASSERT_EQ(interpreter.AllocateTensors(), kTfLiteOk);
|
|
ASSERT_EQ(interpreter.Invoke(), kTfLiteOk);
|
|
|
|
// ResizeInputTensors just after ReleaseNonPersistentMemory should also need
|
|
// AllocateTensors, without causing any unexpected crashes.
|
|
ASSERT_EQ(interpreter.ReleaseNonPersistentMemory(), kTfLiteOk);
|
|
ASSERT_EQ(interpreter.ResizeInputTensor(0, {4}), kTfLiteOk);
|
|
ASSERT_NE(interpreter.Invoke(), kTfLiteOk);
|
|
ASSERT_EQ(interpreter.AllocateTensors(), kTfLiteOk);
|
|
ASSERT_EQ(interpreter.Invoke(), kTfLiteOk);
|
|
}
|
|
|
|
// Forcefully divides tensor allocation in three steps: one before invocation
|
|
// and two more at invocation time. This happens because we use string tensors
|
|
// and their sizes can't be determined until invocation time.
|
|
TEST(BasicInterpreter, ThreeStepAllocate) {
|
|
Interpreter interpreter;
|
|
ASSERT_EQ(interpreter.AddTensors(5), kTfLiteOk);
|
|
ASSERT_EQ(interpreter.SetInputs({0}), kTfLiteOk);
|
|
ASSERT_EQ(interpreter.SetOutputs({4}), kTfLiteOk);
|
|
|
|
TfLiteQuantizationParams quantized;
|
|
|
|
// String tensor with one string of length 3
|
|
union {
|
|
char raw_bytes[15];
|
|
struct {
|
|
int32_t num_strs;
|
|
int32_t offsets[2];
|
|
char str_data[3];
|
|
} tensor_data;
|
|
} data;
|
|
data.tensor_data = {1, {12, 15}, {'A', 'B', 'C'}};
|
|
|
|
// Read only string tensor.
|
|
ASSERT_EQ(interpreter.SetTensorParametersReadOnly(0, kTfLiteString, "", {1},
|
|
quantized, data.raw_bytes,
|
|
sizeof(data.raw_bytes)),
|
|
kTfLiteOk);
|
|
// Read-write string tensor.
|
|
ASSERT_EQ(interpreter.SetTensorParametersReadWrite(1, kTfLiteString, "", {1},
|
|
quantized),
|
|
kTfLiteOk);
|
|
ASSERT_EQ(interpreter.SetTensorParametersReadWrite(2, kTfLiteInt32, "", {1},
|
|
quantized),
|
|
kTfLiteOk);
|
|
ASSERT_EQ(interpreter.SetTensorParametersReadWrite(3, kTfLiteString, "", {1},
|
|
quantized),
|
|
kTfLiteOk);
|
|
ASSERT_EQ(interpreter.SetTensorParametersReadWrite(4, kTfLiteInt32, "", {1},
|
|
quantized),
|
|
kTfLiteOk);
|
|
|
|
// String-in String-out node.
|
|
TfLiteRegistration reg_copy = {nullptr, nullptr, nullptr, nullptr};
|
|
reg_copy.invoke = [](TfLiteContext* context, TfLiteNode* node) {
|
|
const TfLiteTensor* input = GetInput(context, node, 0);
|
|
TfLiteTensor* output = GetOutput(context, node, 0);
|
|
DynamicBuffer buf;
|
|
StringRef str_ref = GetString(input, 0);
|
|
buf.AddString(str_ref);
|
|
buf.WriteToTensorAsVector(output);
|
|
return kTfLiteOk;
|
|
};
|
|
|
|
// String-in Int-out node.
|
|
TfLiteRegistration reg_len = {nullptr, nullptr, nullptr, nullptr};
|
|
reg_len.prepare = [](TfLiteContext* context, TfLiteNode* node) {
|
|
TfLiteTensor* output = GetOutput(context, node, 0);
|
|
TfLiteIntArray* outputSize = TfLiteIntArrayCreate(1);
|
|
outputSize->data[0] = 1;
|
|
return context->ResizeTensor(context, output, outputSize);
|
|
};
|
|
reg_len.invoke = [](TfLiteContext* context, TfLiteNode* node) {
|
|
const TfLiteTensor* a0 = GetInput(context, node, 0);
|
|
TfLiteTensor* a1 = GetOutput(context, node, 0);
|
|
a1->data.i32[0] = a0->bytes;
|
|
return kTfLiteOk;
|
|
};
|
|
|
|
ASSERT_EQ(interpreter.AddNodeWithParameters({0}, {1}, nullptr, 0, nullptr,
|
|
®_copy),
|
|
kTfLiteOk);
|
|
ASSERT_EQ(interpreter.AddNodeWithParameters({1}, {2}, nullptr, 0, nullptr,
|
|
®_len),
|
|
kTfLiteOk);
|
|
ASSERT_EQ(interpreter.AddNodeWithParameters({0}, {3}, nullptr, 0, nullptr,
|
|
®_copy),
|
|
kTfLiteOk);
|
|
ASSERT_EQ(interpreter.AddNodeWithParameters({3}, {4}, nullptr, 0, nullptr,
|
|
®_len),
|
|
kTfLiteOk);
|
|
|
|
ASSERT_EQ(interpreter.AllocateTensors(), kTfLiteOk);
|
|
ASSERT_EQ(interpreter.Invoke(), kTfLiteOk);
|
|
|
|
ASSERT_EQ(interpreter.tensor(0)->bytes, 15);
|
|
ASSERT_NE(interpreter.tensor(0)->data.raw, nullptr);
|
|
ASSERT_EQ(interpreter.tensor(1)->bytes, 15);
|
|
ASSERT_NE(interpreter.tensor(1)->data.raw, nullptr);
|
|
ASSERT_EQ(interpreter.tensor(3)->bytes, 15);
|
|
ASSERT_NE(interpreter.tensor(4)->data.raw, nullptr);
|
|
ASSERT_EQ(interpreter.tensor(2)->bytes, 4);
|
|
ASSERT_EQ(interpreter.tensor(2)->data.i32[0], 15);
|
|
ASSERT_EQ(interpreter.tensor(4)->bytes, 4);
|
|
ASSERT_EQ(interpreter.tensor(4)->data.i32[0], 15);
|
|
}
|
|
|
|
TEST(BasicInterpreter, AllocateTwice) {
|
|
Interpreter interpreter;
|
|
ASSERT_EQ(interpreter.AddTensors(2), kTfLiteOk);
|
|
ASSERT_EQ(interpreter.SetInputs({0}), kTfLiteOk);
|
|
ASSERT_EQ(interpreter.SetOutputs({1}), kTfLiteOk);
|
|
|
|
TfLiteQuantizationParams quantized;
|
|
ASSERT_EQ(interpreter.SetTensorParametersReadWrite(0, kTfLiteFloat32, "", {3},
|
|
quantized),
|
|
kTfLiteOk);
|
|
ASSERT_EQ(interpreter.SetTensorParametersReadWrite(1, kTfLiteFloat32, "", {3},
|
|
quantized),
|
|
kTfLiteOk);
|
|
|
|
TfLiteRegistration reg = {nullptr, nullptr, nullptr, nullptr};
|
|
reg.prepare = [](TfLiteContext* context, TfLiteNode* node) {
|
|
const TfLiteTensor* tensor0 = GetInput(context, node, 0);
|
|
TfLiteTensor* tensor1 = GetOutput(context, node, 0);
|
|
TfLiteIntArray* newSize = TfLiteIntArrayCopy(tensor0->dims);
|
|
return context->ResizeTensor(context, tensor1, newSize);
|
|
};
|
|
reg.invoke = [](TfLiteContext* context, TfLiteNode* node) {
|
|
const TfLiteTensor* a0 = GetInput(context, node, 0);
|
|
TfLiteTensor* a1 = GetOutput(context, node, 0);
|
|
int num = a0->dims->data[0];
|
|
for (int i = 0; i < num; i++) {
|
|
a1->data.f[i] = a0->data.f[i];
|
|
}
|
|
return kTfLiteOk;
|
|
};
|
|
ASSERT_EQ(
|
|
interpreter.AddNodeWithParameters({0}, {1}, nullptr, 0, nullptr, ®),
|
|
kTfLiteOk);
|
|
ASSERT_EQ(interpreter.ResizeInputTensor(0, {3}), kTfLiteOk);
|
|
ASSERT_EQ(interpreter.AllocateTensors(), kTfLiteOk);
|
|
ASSERT_EQ(interpreter.Invoke(), kTfLiteOk);
|
|
char* old_tensor0_ptr = interpreter.tensor(0)->data.raw;
|
|
char* old_tensor1_ptr = interpreter.tensor(1)->data.raw;
|
|
|
|
ASSERT_EQ(interpreter.AllocateTensors(), kTfLiteOk);
|
|
ASSERT_EQ(interpreter.Invoke(), kTfLiteOk);
|
|
ASSERT_EQ(old_tensor0_ptr, interpreter.tensor(0)->data.raw);
|
|
ASSERT_EQ(old_tensor1_ptr, interpreter.tensor(1)->data.raw);
|
|
}
|
|
|
|
TEST(BasicInterpreter, TestNullErrorReporter) {
|
|
TestErrorReporter reporter;
|
|
Interpreter interpreter;
|
|
}
|
|
|
|
TEST(BasicInterpreter, TestCustomErrorReporter) {
|
|
TestErrorReporter reporter;
|
|
Interpreter interpreter(&reporter);
|
|
ASSERT_NE(interpreter.Invoke(), kTfLiteOk);
|
|
ASSERT_EQ(reporter.error_messages(),
|
|
"Invoke called on model that is not ready.");
|
|
ASSERT_EQ(reporter.num_calls(), 1);
|
|
}
|
|
|
|
TEST(BasicInterpreter, TestOverflow) {
|
|
TestErrorReporter reporter;
|
|
Interpreter interpreter(&reporter);
|
|
TfLiteQuantizationParams quantized;
|
|
|
|
ASSERT_EQ(interpreter.AddTensors(1), kTfLiteOk);
|
|
ASSERT_EQ(interpreter.SetInputs({0}), kTfLiteOk);
|
|
ASSERT_EQ(interpreter.SetOutputs({0}), kTfLiteOk);
|
|
// Overflow testing is pointer word size dependent.
|
|
if (sizeof(size_t) == 8) {
|
|
// #bits for bytecount = 30 + 30 + 2 = 62 < 64
|
|
ASSERT_EQ(interpreter.SetTensorParametersReadWrite(
|
|
0, kTfLiteFloat32, "in1", {1 << 30, 1 << 30}, quantized),
|
|
kTfLiteOk);
|
|
// #bits for element count = 30 + 30 + 2 = 62 < 64 (no overflow)
|
|
// #bits for byte count = 30 + 30 + 2 + 2 = 64 == 64 (overflow)
|
|
ASSERT_NE(
|
|
interpreter.SetTensorParametersReadWrite(
|
|
0, kTfLiteFloat32, "in1", {1 << 30, 1 << 30, 1 << 2}, quantized),
|
|
kTfLiteOk);
|
|
EXPECT_THAT(
|
|
reporter.error_messages(),
|
|
testing::EndsWith("BytesRequired number of bytes overflowed.\n"));
|
|
// #bits for element count = 30 + 30 + 2 + 4 = 66 > 64 (overflow).
|
|
// #bits for byte count = 30 + 30 + 2 + 4 + 2 = 68 > 64 (overflow).
|
|
reporter.Reset();
|
|
ASSERT_NE(interpreter.SetTensorParametersReadWrite(
|
|
0, kTfLiteFloat32, "in1", {1 << 30, 1 << 30, 1 << 2, 1 << 4},
|
|
quantized),
|
|
kTfLiteOk);
|
|
EXPECT_THAT(
|
|
reporter.error_messages(),
|
|
testing::EndsWith("BytesRequired number of elements overflowed.\n"));
|
|
|
|
} else if (sizeof(size_t) == 4) {
|
|
// #bits for bytecount = 14 + 14 + 2 = 30 < 32
|
|
ASSERT_EQ(interpreter.SetTensorParametersReadWrite(
|
|
0, kTfLiteFloat32, "in1", {1 << 14, 1 << 14}, quantized),
|
|
kTfLiteOk);
|
|
// #bits for element count = 14 + 14 + 3 = 31 < 32 (no overflow).
|
|
// #bits for byte count = 14 + 14 + 3 + 2 = 33 > 32 (overflow).
|
|
ASSERT_NE(
|
|
interpreter.SetTensorParametersReadWrite(
|
|
0, kTfLiteFloat32, "in1", {1 << 14, 1 << 14, 1 << 3}, quantized),
|
|
kTfLiteOk);
|
|
EXPECT_THAT(
|
|
reporter.error_messages(),
|
|
testing::EndsWith("BytesRequired number of bytes overflowed.\n"));
|
|
// #bits for element count = 14 + 14 + 4 = 32 == 32 (overflow).
|
|
// byte count also overflows, but we don't get to that check.
|
|
reporter.Reset();
|
|
ASSERT_NE(
|
|
interpreter.SetTensorParametersReadWrite(
|
|
0, kTfLiteFloat32, "in1", {1 << 14, 1 << 14, 1 << 4}, quantized),
|
|
kTfLiteOk);
|
|
EXPECT_THAT(
|
|
reporter.error_messages(),
|
|
testing::EndsWith("BytesRequired number of elements overflowed.\n"));
|
|
} else {
|
|
// This test failing means that we are using a non 32/64 bit architecture.
|
|
ASSERT_TRUE(false);
|
|
}
|
|
}
|
|
|
|
TEST(BasicInterpreter, TestUseNNAPI) {
|
|
TestErrorReporter reporter;
|
|
Interpreter interpreter(&reporter);
|
|
interpreter.UseNNAPI(true);
|
|
ASSERT_EQ(interpreter.AllocateTensors(), kTfLiteOk);
|
|
ASSERT_EQ(interpreter.Invoke(), kTfLiteOk);
|
|
interpreter.UseNNAPI(false);
|
|
ASSERT_EQ(reporter.error_messages(),
|
|
"Attempting to disable NNAPI delegate after it's applied.");
|
|
}
|
|
|
|
TEST(BasicInterpreter, TestUnsupportedDelegateFunctions) {
|
|
Interpreter interpreter;
|
|
ASSERT_EQ(interpreter.AddTensors(2), kTfLiteOk);
|
|
TfLiteRegistration registration = {nullptr, nullptr, nullptr, nullptr};
|
|
// These functions are only supported inside Delegate's Prepare function.
|
|
// The test verifies that these functions returns `kTfLiteError`, but not
|
|
// `kTfLiteOk` or just crashes.
|
|
registration.prepare = [](TfLiteContext* context, TfLiteNode* node) {
|
|
{
|
|
TfLiteIntArray* execution_plan;
|
|
EXPECT_EQ(context->GetExecutionPlan(context, &execution_plan),
|
|
kTfLiteError);
|
|
}
|
|
{
|
|
TfLiteNode* node;
|
|
TfLiteRegistration* registration;
|
|
EXPECT_EQ(
|
|
context->GetNodeAndRegistration(context, 0, &node, ®istration),
|
|
kTfLiteError);
|
|
}
|
|
{
|
|
TfLiteRegistration delegate_registration = {nullptr, nullptr, nullptr,
|
|
nullptr};
|
|
TfLiteIntArray nodes_to_replace;
|
|
nodes_to_replace.size = 0;
|
|
EXPECT_EQ(context->ReplaceNodeSubsetsWithDelegateKernels(
|
|
context, delegate_registration, &nodes_to_replace, nullptr),
|
|
kTfLiteError);
|
|
}
|
|
return kTfLiteError;
|
|
};
|
|
ASSERT_EQ(interpreter.SetInputs({0}), kTfLiteOk);
|
|
ASSERT_EQ(interpreter.SetOutputs({0}), kTfLiteOk);
|
|
ASSERT_EQ(interpreter.AddNodeWithParameters({0}, {1}, nullptr, 0, nullptr,
|
|
®istration),
|
|
kTfLiteOk);
|
|
EXPECT_EQ(interpreter.AllocateTensors(), kTfLiteError);
|
|
}
|
|
|
|
TEST(BasicInterpreter, DynamicTensorsResizeDescendants) {
|
|
// Assemble a graph with a node that has dynamically sized output (via the
|
|
// pad op), followed by a node with a standard element-wise op (negate).
|
|
Interpreter interpreter;
|
|
interpreter.AddTensors(4);
|
|
interpreter.SetInputs({0, 1});
|
|
interpreter.SetOutputs({3});
|
|
TfLiteQuantizationParams quant;
|
|
interpreter.SetTensorParametersReadWrite(0, kTfLiteFloat32, "", {2, 2, 1, 1},
|
|
quant);
|
|
interpreter.SetTensorParametersReadWrite(1, kTfLiteInt32, "", {4, 2}, quant);
|
|
interpreter.SetTensorParametersReadWrite(2, kTfLiteFloat32, "", {}, quant);
|
|
interpreter.SetTensorParametersReadWrite(3, kTfLiteFloat32, "", {}, quant);
|
|
|
|
TfLiteRegistration* pad_op = tflite::ops::builtin::Register_PADV2();
|
|
TfLiteRegistration* neg_op = tflite::ops::builtin::Register_NEG();
|
|
interpreter.AddNodeWithParameters({0, 1}, {2}, nullptr, 0, nullptr, pad_op);
|
|
interpreter.AddNodeWithParameters({2}, {3}, nullptr, 0, nullptr, neg_op);
|
|
ASSERT_EQ(interpreter.AllocateTensors(), kTfLiteOk);
|
|
|
|
// Configure [[2,2],[4,4]] padding and execute the graph.
|
|
interpreter.typed_tensor<int>(1)[0] = 2;
|
|
interpreter.typed_tensor<int>(1)[1] = 2;
|
|
interpreter.typed_tensor<int>(1)[2] = 2;
|
|
interpreter.typed_tensor<int>(1)[3] = 2;
|
|
interpreter.typed_tensor<int>(1)[4] = 0;
|
|
interpreter.typed_tensor<int>(1)[5] = 0;
|
|
interpreter.typed_tensor<int>(1)[6] = 0;
|
|
interpreter.typed_tensor<int>(1)[7] = 0;
|
|
ASSERT_EQ(interpreter.Invoke(), kTfLiteOk);
|
|
|
|
// Both the output and intermediate tensor sizes should reflect the output
|
|
// from the dynamic pad operation.
|
|
ASSERT_EQ(interpreter.tensor(2)->bytes, sizeof(float) * 6 * 6);
|
|
ASSERT_EQ(interpreter.tensor(3)->bytes, sizeof(float) * 6 * 6);
|
|
|
|
// Now configure [[4,4],[6,6]] padding and execute the graph.
|
|
interpreter.typed_tensor<int>(1)[0] = 4;
|
|
interpreter.typed_tensor<int>(1)[1] = 4;
|
|
interpreter.typed_tensor<int>(1)[2] = 6;
|
|
interpreter.typed_tensor<int>(1)[3] = 6;
|
|
interpreter.typed_tensor<int>(1)[4] = 0;
|
|
interpreter.typed_tensor<int>(1)[5] = 0;
|
|
interpreter.typed_tensor<int>(1)[6] = 0;
|
|
interpreter.typed_tensor<int>(1)[7] = 0;
|
|
ASSERT_EQ(interpreter.Invoke(), kTfLiteOk);
|
|
|
|
// Again, the output and intermediate tensor sizes should reflect the *new*
|
|
// resize from the latest pad operation.
|
|
ASSERT_EQ(interpreter.tensor(2)->bytes, sizeof(float) * 10 * 14);
|
|
ASSERT_EQ(interpreter.tensor(3)->bytes, sizeof(float) * 10 * 14);
|
|
}
|
|
|
|
TEST(InterpreterTensorsCapacityTest, TestWithinHeadroom) {
|
|
Interpreter interpreter;
|
|
ASSERT_EQ(interpreter.AddTensors(Interpreter::kTensorsReservedCapacity),
|
|
kTfLiteOk);
|
|
TfLiteRegistration registration = {nullptr, nullptr, nullptr, nullptr};
|
|
registration.prepare = [](TfLiteContext* context, TfLiteNode* node) {
|
|
TfLiteTensor* first_tensor = context->tensors;
|
|
|
|
int new_tensor_index;
|
|
context->AddTensors(context, Interpreter::kTensorsCapacityHeadroom,
|
|
&new_tensor_index);
|
|
EXPECT_EQ(first_tensor, context->tensors);
|
|
return kTfLiteOk;
|
|
};
|
|
ASSERT_EQ(interpreter.AddNodeWithParameters({0}, {1}, nullptr, 0, nullptr,
|
|
®istration),
|
|
kTfLiteOk);
|
|
ASSERT_EQ(interpreter.AllocateTensors(), kTfLiteOk);
|
|
}
|
|
|
|
TEST(InterpreterTensorsCapacityTest, TestExceedHeadroom) {
|
|
Interpreter interpreter;
|
|
ASSERT_EQ(interpreter.AddTensors(Interpreter::kTensorsReservedCapacity),
|
|
kTfLiteOk);
|
|
TfLiteRegistration registration = {nullptr, nullptr, nullptr, nullptr};
|
|
registration.prepare = [](TfLiteContext* context, TfLiteNode* node) {
|
|
TfLiteTensor* first_tensor = context->tensors;
|
|
|
|
int new_tensor_index;
|
|
// Add enough tensors to trigger buffer re-allocation.
|
|
context->AddTensors(
|
|
context,
|
|
(context->tensors_size + Interpreter::kTensorsCapacityHeadroom + 1) * 2,
|
|
&new_tensor_index);
|
|
EXPECT_NE(first_tensor, context->tensors);
|
|
return kTfLiteOk;
|
|
};
|
|
ASSERT_EQ(interpreter.AddNodeWithParameters({0}, {1}, nullptr, 0, nullptr,
|
|
®istration),
|
|
kTfLiteOk);
|
|
ASSERT_EQ(interpreter.AllocateTensors(), kTfLiteOk);
|
|
}
|
|
|
|
struct TestExternalContext : public TfLiteExternalContext {
|
|
static constexpr TfLiteExternalContextType kType = kTfLiteGemmLowpContext;
|
|
|
|
static TestExternalContext* Get(TfLiteContext* context) {
|
|
return reinterpret_cast<TestExternalContext*>(
|
|
context->GetExternalContext(context, kType));
|
|
}
|
|
|
|
static void Set(TfLiteContext* context, TestExternalContext* value) {
|
|
context->SetExternalContext(context, kType, value);
|
|
}
|
|
|
|
int num_refreshes = 0;
|
|
};
|
|
|
|
TEST_F(InterpreterTest, GetSetResetExternalContexts) {
|
|
auto* context = GetInterpreterContext();
|
|
|
|
TestExternalContext external_context;
|
|
external_context.Refresh = [](TfLiteContext* context) {
|
|
auto* ptr = TestExternalContext::Get(context);
|
|
if (ptr != nullptr) {
|
|
++ptr->num_refreshes;
|
|
}
|
|
return kTfLiteOk;
|
|
};
|
|
|
|
EXPECT_EQ(TestExternalContext::Get(context), nullptr);
|
|
interpreter_.SetNumThreads(4);
|
|
|
|
TestExternalContext::Set(context, &external_context);
|
|
EXPECT_EQ(TestExternalContext::Get(context), &external_context);
|
|
interpreter_.SetNumThreads(4);
|
|
interpreter_.SetNumThreads(5);
|
|
EXPECT_EQ(external_context.num_refreshes, 2);
|
|
|
|
// Reset refresh count to 0
|
|
external_context.num_refreshes = 0;
|
|
// Below should not call external context refresh
|
|
interpreter_.SetNumThreads(-2);
|
|
EXPECT_EQ(external_context.num_refreshes, 0);
|
|
|
|
interpreter_.SetNumThreads(-1);
|
|
EXPECT_EQ(external_context.num_refreshes, 1);
|
|
|
|
TestExternalContext::Set(context, nullptr);
|
|
EXPECT_EQ(TestExternalContext::Get(context), nullptr);
|
|
interpreter_.SetNumThreads(4);
|
|
}
|
|
|
|
struct TestCpuBackendContext : public TfLiteInternalBackendContext {
|
|
// Count the number of calls to ClearCaches for the backend context.
|
|
void ClearCaches() override { ++num_calls; }
|
|
void SetMaxNumThreads(int num_threads) override {}
|
|
int num_calls = 0;
|
|
};
|
|
|
|
TEST_F(InterpreterTest, ExternalBackendContextClearsCachesOnDelete) {
|
|
ExternalCpuBackendContext external_cpu_context;
|
|
TestCpuBackendContext* cpu_backend_context = new TestCpuBackendContext();
|
|
external_cpu_context.set_internal_backend_context(
|
|
std::unique_ptr<TfLiteInternalBackendContext>(cpu_backend_context));
|
|
|
|
{
|
|
// Create an interpreter with an external Cpu backend context and ensure
|
|
// it goes out of scope.
|
|
Interpreter interpreter;
|
|
interpreter.SetExternalContext(kTfLiteCpuBackendContext,
|
|
&external_cpu_context);
|
|
EXPECT_EQ(cpu_backend_context->num_calls, 0);
|
|
}
|
|
EXPECT_EQ(cpu_backend_context->num_calls, 1);
|
|
}
|
|
|
|
// Test fixture that allows playing with execution plans. It creates a two
|
|
// node graph that can be executed in either [0,1] order or [1,0] order.
|
|
// The CopyOp records when it is invoked in the class member run_order_
|
|
// so we can test whether the execution plan was honored.
|
|
class TestExecutionPlan : public ::testing::Test {
|
|
// Encapsulates the node ids and provides them to a C primitive data type
|
|
// Allocatable with placement new, but never destructed, so make sure this
|
|
// doesn't own any heap allocated data. This is then is used as op local
|
|
// data to allow access to the test fixture data.
|
|
class CallReporting {
|
|
public:
|
|
CallReporting(int node_id, std::vector<int>* run_order)
|
|
: node_id_(node_id), run_order_(run_order) {}
|
|
|
|
void Record() { run_order_->push_back(node_id_); }
|
|
|
|
private:
|
|
// The node id for this particular node
|
|
int node_id_;
|
|
// A pointer to the global run-order
|
|
std::vector<int>* run_order_;
|
|
};
|
|
|
|
// Build a kernel registration for an op that copies its one input
|
|
// to an output
|
|
TfLiteRegistration CopyOpRegistration() {
|
|
TfLiteRegistration reg = {nullptr, nullptr, nullptr, nullptr};
|
|
|
|
reg.prepare = [](TfLiteContext* context, TfLiteNode* node) {
|
|
// Set output size to input size
|
|
const TfLiteTensor* tensor0 = GetInput(context, node, 0);
|
|
TfLiteTensor* tensor1 = GetOutput(context, node, 0);
|
|
TfLiteIntArray* newSize = TfLiteIntArrayCopy(tensor0->dims);
|
|
return context->ResizeTensor(context, tensor1, newSize);
|
|
};
|
|
|
|
reg.invoke = [](TfLiteContext* context, TfLiteNode* node) {
|
|
CallReporting* call_reporting =
|
|
static_cast<CallReporting*>(node->builtin_data);
|
|
// Copy input data to output data.
|
|
const TfLiteTensor* a0 = GetInput(context, node, 0);
|
|
TfLiteTensor* a1 = GetOutput(context, node, 0);
|
|
int num = a0->dims->data[0];
|
|
for (int i = 0; i < num; i++) {
|
|
a1->data.f[i] = a0->data.f[i];
|
|
}
|
|
call_reporting->Record();
|
|
return kTfLiteOk;
|
|
};
|
|
return reg;
|
|
}
|
|
|
|
// Adds a copy node going from tensor `input` to output tensor `output`.
|
|
// Note, input is used as the node_id. Inject run_order as op accessible
|
|
// data. Note: this is a little strange of a way to do this, but it is
|
|
// using op functionality to avoid static global variables.
|
|
void MakeCopyNode(int input, int output) {
|
|
// Ownership of call_reporting is taken by interpreter (malloc is used due
|
|
// to nodes being a C99 interface so free() is used).
|
|
TfLiteRegistration copy_op = CopyOpRegistration();
|
|
CallReporting* call_reporting_1 =
|
|
static_cast<CallReporting*>(malloc(sizeof(CallReporting)));
|
|
new (call_reporting_1) CallReporting(input, &run_order_);
|
|
ASSERT_EQ(interpreter_.AddNodeWithParameters(
|
|
{0}, {2}, nullptr, 0, static_cast<void*>(call_reporting_1),
|
|
©_op),
|
|
kTfLiteOk);
|
|
ASSERT_EQ(interpreter_.ResizeInputTensor(input, {3}), kTfLiteOk);
|
|
}
|
|
|
|
void SetUp() final {
|
|
// Add two inputs and two outputs that don't depend on each other
|
|
ASSERT_EQ(interpreter_.AddTensors(4), kTfLiteOk);
|
|
interpreter_.SetInputs({0, 1});
|
|
interpreter_.SetOutputs({2, 3});
|
|
TfLiteQuantizationParams quantized;
|
|
for (int tensor_index = 0; tensor_index < 4; tensor_index++) {
|
|
ASSERT_EQ(interpreter_.SetTensorParametersReadWrite(
|
|
tensor_index, kTfLiteFloat32, "", {3}, quantized),
|
|
kTfLiteOk);
|
|
}
|
|
|
|
// Define two copy functions that also use the user_data to report that
|
|
// they were called.
|
|
// i.e. tensor[2] = copy(tensor[0]); tensor[3] = copy(tensor[1]);
|
|
// thus we can reorder the two nodes arbitrary and still satisfy dependency
|
|
// order.
|
|
MakeCopyNode(0, 2);
|
|
MakeCopyNode(1, 3);
|
|
|
|
ASSERT_EQ(interpreter_.AllocateTensors(), kTfLiteOk);
|
|
}
|
|
|
|
protected:
|
|
Interpreter interpreter_;
|
|
|
|
// list of node_ids that were run
|
|
std::vector<int> run_order_;
|
|
};
|
|
|
|
TEST_F(TestExecutionPlan, DefaultExecutionPlan) {
|
|
// Check default order
|
|
ASSERT_EQ(interpreter_.Invoke(), kTfLiteOk);
|
|
ASSERT_EQ(run_order_, std::vector<int>({0, 1}));
|
|
}
|
|
|
|
TEST_F(TestExecutionPlan, ReversedExecutionPlan) {
|
|
// Check reversed order
|
|
interpreter_.SetExecutionPlan({1, 0});
|
|
ASSERT_EQ(interpreter_.Invoke(), kTfLiteOk);
|
|
ASSERT_EQ(run_order_, std::vector<int>({1, 0}));
|
|
}
|
|
|
|
TEST_F(TestExecutionPlan, SubsetExecutionPlan) {
|
|
// Check running only node index 1
|
|
interpreter_.SetExecutionPlan({1});
|
|
ASSERT_EQ(interpreter_.Invoke(), kTfLiteOk);
|
|
ASSERT_EQ(run_order_, std::vector<int>({1}));
|
|
}
|
|
|
|
TEST_F(TestExecutionPlan, NullExecutionPlan) {
|
|
// Check nothing executed.
|
|
interpreter_.SetExecutionPlan({});
|
|
ASSERT_EQ(interpreter_.Invoke(), kTfLiteOk);
|
|
ASSERT_EQ(run_order_, std::vector<int>());
|
|
}
|
|
|
|
TEST(TestDelegateOwnership, ProperlyDisposed) {
|
|
struct TfLiteInterpreterOwnedDelegate : public TfLiteDelegate {
|
|
TfLiteInterpreterOwnedDelegate(bool* destroyed, bool* prepared)
|
|
: destroyed(destroyed), prepared(prepared) {
|
|
flags = kTfLiteDelegateFlagsNone;
|
|
Prepare = [](TfLiteContext*, TfLiteDelegate* delegate) -> TfLiteStatus {
|
|
*static_cast<TfLiteInterpreterOwnedDelegate*>(delegate)->prepared =
|
|
true;
|
|
return kTfLiteOk;
|
|
};
|
|
}
|
|
~TfLiteInterpreterOwnedDelegate() { *destroyed = true; }
|
|
|
|
bool* destroyed;
|
|
bool* prepared;
|
|
};
|
|
|
|
// Construct a delegate with flags for indicating preparation/destruction.
|
|
bool destroyed = false;
|
|
bool prepared = false;
|
|
std::unique_ptr<TfLiteInterpreterOwnedDelegate> delegate(
|
|
new TfLiteInterpreterOwnedDelegate(&destroyed, &prepared));
|
|
{
|
|
// Create an interpreter and assemble a simple graph.
|
|
Interpreter interpreter;
|
|
TfLiteRegistration registration = {nullptr, nullptr, nullptr, nullptr};
|
|
ASSERT_EQ(interpreter.AddTensors(2), kTfLiteOk);
|
|
ASSERT_EQ(interpreter.SetInputs({0}), kTfLiteOk);
|
|
ASSERT_EQ(interpreter.SetOutputs({1}), kTfLiteOk);
|
|
ASSERT_EQ(interpreter.AddNodeWithParameters({0}, {1}, nullptr, 0, nullptr,
|
|
®istration),
|
|
kTfLiteOk);
|
|
|
|
// Pass delegate ownership to that interpreter.
|
|
ASSERT_EQ(InterpreterTest::ModifyGraphWithDelegate(&interpreter,
|
|
std::move(delegate)),
|
|
kTfLiteOk);
|
|
|
|
// The delegate should be prepared as normal, and should be preserved.
|
|
EXPECT_TRUE(prepared);
|
|
EXPECT_FALSE(destroyed);
|
|
|
|
// Interpreter interaction should not impact the delegate's validity.
|
|
interpreter.AllocateTensors();
|
|
interpreter.Invoke();
|
|
EXPECT_FALSE(destroyed);
|
|
}
|
|
|
|
// Only after the interpreter is destroyed should the delegate be destroyed.
|
|
EXPECT_TRUE(destroyed);
|
|
}
|
|
|
|
// CancellationData contains the data required to cancel a call to Invoke().
|
|
struct CancellationData {
|
|
bool is_cancelled = false;
|
|
};
|
|
|
|
// Indicates whether Invoke() has been cancelled based on the value of the
|
|
// CancellationData object passed in.
|
|
bool CheckCancellation(void* data) {
|
|
CancellationData* cancellation_data =
|
|
static_cast<struct CancellationData*>(data);
|
|
return cancellation_data->is_cancelled;
|
|
}
|
|
|
|
static struct CancellationData cancellation_data_;
|
|
|
|
// Test fixture to test cancellation within the Interpreter.
|
|
class CancellationTest : public ::testing::Test {
|
|
public:
|
|
TfLiteStatus Invoke() { return interpreter_.Invoke(); }
|
|
void Cancel() { cancellation_data_.is_cancelled = true; }
|
|
|
|
// Adds an CancelOp with input tensor `input` and output tensor `output`.
|
|
void MakeCancelNode(int input, int output) {
|
|
TfLiteRegistration op = CancelOpRegistration();
|
|
ASSERT_EQ(interpreter_.AddNodeWithParameters({input}, {output}, nullptr, 0,
|
|
nullptr, &op),
|
|
kTfLiteOk);
|
|
ASSERT_EQ(interpreter_.ResizeInputTensor(input, {3}), kTfLiteOk);
|
|
}
|
|
|
|
// Adds an OkOp with input tensor `input` and output tensor `output`.
|
|
void MakeOkNode(int input, int output) {
|
|
TfLiteRegistration op = OkOpRegistration();
|
|
ASSERT_EQ(interpreter_.AddNodeWithParameters({input}, {output}, nullptr, 0,
|
|
nullptr, &op),
|
|
kTfLiteOk);
|
|
ASSERT_EQ(interpreter_.ResizeInputTensor(input, {3}), kTfLiteOk);
|
|
}
|
|
|
|
Interpreter interpreter_;
|
|
|
|
private:
|
|
// Build the kernel registration for an op that cancels the operation.
|
|
TfLiteRegistration CancelOpRegistration() {
|
|
TfLiteRegistration reg = {nullptr, nullptr, nullptr, nullptr};
|
|
|
|
// Set output size to the input size in CancelOp::Prepare(). Code exists to
|
|
// have a framework in Prepare. The input and output tensors are not used.
|
|
reg.prepare = [](TfLiteContext* context, TfLiteNode* node) {
|
|
const TfLiteTensor* in_tensor = GetInput(context, node, 0);
|
|
TfLiteTensor* out_tensor = GetOutput(context, node, 0);
|
|
TfLiteIntArray* new_size = TfLiteIntArrayCopy(in_tensor->dims);
|
|
return context->ResizeTensor(context, out_tensor, new_size);
|
|
};
|
|
|
|
reg.invoke = [](TfLiteContext* context, TfLiteNode* node) {
|
|
cancellation_data_.is_cancelled = true;
|
|
return kTfLiteOk;
|
|
};
|
|
return reg;
|
|
}
|
|
|
|
// Build the kernel registration for an op that returns kTfLiteOk.
|
|
TfLiteRegistration OkOpRegistration() {
|
|
TfLiteRegistration reg = {nullptr, nullptr, nullptr, nullptr};
|
|
|
|
// Set output size to the input size in OkOp::Prepare(). Code exists to have
|
|
// a framework in Prepare. The input and output tensors are not used.
|
|
reg.prepare = [](TfLiteContext* context, TfLiteNode* node) {
|
|
const TfLiteTensor* in_tensor = GetInput(context, node, 0);
|
|
TfLiteTensor* out_tensor = GetOutput(context, node, 0);
|
|
TfLiteIntArray* new_size = TfLiteIntArrayCopy(in_tensor->dims);
|
|
return context->ResizeTensor(context, out_tensor, new_size);
|
|
};
|
|
|
|
reg.invoke = [](TfLiteContext* context, TfLiteNode* node) {
|
|
return kTfLiteOk;
|
|
};
|
|
return reg;
|
|
}
|
|
|
|
void SetUp() final {
|
|
cancellation_data_.is_cancelled = false;
|
|
|
|
// Set up the interpreter. Create the input and output tensors.
|
|
int num_tensors = 3;
|
|
ASSERT_EQ(interpreter_.AddTensors(num_tensors), kTfLiteOk);
|
|
interpreter_.SetInputs({0});
|
|
interpreter_.SetOutputs({2});
|
|
TfLiteQuantizationParams quantized;
|
|
for (int tensor_index = 0; tensor_index < num_tensors; tensor_index++) {
|
|
ASSERT_EQ(interpreter_.SetTensorParametersReadWrite(
|
|
tensor_index, kTfLiteFloat32, "", {3}, quantized),
|
|
kTfLiteOk);
|
|
}
|
|
interpreter_.SetCancellationFunction(&cancellation_data_,
|
|
&CheckCancellation);
|
|
}
|
|
};
|
|
|
|
TEST_F(CancellationTest, CancelBeforeInvoke) {
|
|
// Cancel prior to calling Invoke.
|
|
CancellationTest::MakeOkNode(1, 2);
|
|
ASSERT_EQ(interpreter_.AllocateTensors(), kTfLiteOk);
|
|
|
|
CancellationTest::Cancel();
|
|
TfLiteStatus invoke_error_code = CancellationTest::Invoke();
|
|
ASSERT_EQ(invoke_error_code, kTfLiteError);
|
|
}
|
|
|
|
TEST_F(CancellationTest, CancelDuringInvoke) {
|
|
// Tests a model which sets the cancel in order to test cancellation works
|
|
// between ops.
|
|
//
|
|
// The first op will set the cancellation bit to true. The second op returns
|
|
// `kTfLiteOk` if executed.
|
|
CancellationTest::MakeCancelNode(0, 1);
|
|
CancellationTest::MakeOkNode(1, 2);
|
|
ASSERT_EQ(interpreter_.AllocateTensors(), kTfLiteOk);
|
|
|
|
TfLiteStatus invoke_error_code = CancellationTest::Invoke();
|
|
ASSERT_EQ(invoke_error_code, kTfLiteError);
|
|
}
|
|
|
|
} // namespace
|
|
} // namespace tflite
|
|
|
|
int main(int argc, char** argv) {
|
|
::tflite::LogToStderr();
|
|
::testing::InitGoogleTest(&argc, argv);
|
|
return RUN_ALL_TESTS();
|
|
}
|