Merge pull request #44939 from mansnils:multiple_inputs_test
PiperOrigin-RevId: 343252603 Change-Id: I5b9b995e0ffceae70f4e7ac4c2d34c13ffbce416
This commit is contained in:
commit
bc2845045f
@ -497,4 +497,68 @@ TF_LITE_MICRO_TEST(TestInterpreterDoesNotAllocateUntilInvoke) {
|
||||
static_cast<size_t>(0));
|
||||
}
|
||||
|
||||
TF_LITE_MICRO_TEST(TestInterpreterMultipleInputs) {
|
||||
const tflite::Model* model = tflite::testing::GetSimpleMultipleInputsModel();
|
||||
TF_LITE_MICRO_EXPECT_NE(nullptr, model);
|
||||
|
||||
tflite::AllOpsResolver op_resolver = tflite::testing::GetOpResolver();
|
||||
|
||||
constexpr size_t allocator_buffer_size = 2000;
|
||||
uint8_t allocator_buffer[allocator_buffer_size];
|
||||
|
||||
// Create a new scope so that we can test the destructor.
|
||||
{
|
||||
tflite::MicroInterpreter interpreter(model, op_resolver, allocator_buffer,
|
||||
allocator_buffer_size,
|
||||
micro_test::reporter);
|
||||
|
||||
TF_LITE_MICRO_EXPECT_EQ(interpreter.AllocateTensors(), kTfLiteOk);
|
||||
TF_LITE_MICRO_EXPECT_LE(interpreter.arena_used_bytes(), 928 + 100);
|
||||
|
||||
TF_LITE_MICRO_EXPECT_EQ(static_cast<size_t>(3), interpreter.inputs_size());
|
||||
TF_LITE_MICRO_EXPECT_EQ(static_cast<size_t>(1), interpreter.outputs_size());
|
||||
TF_LITE_MICRO_EXPECT_EQ(static_cast<size_t>(4), interpreter.tensors_size());
|
||||
|
||||
TfLiteTensor* input = interpreter.input(0);
|
||||
TF_LITE_MICRO_EXPECT_NE(nullptr, input);
|
||||
TF_LITE_MICRO_EXPECT_EQ(kTfLiteInt32, input->type);
|
||||
TF_LITE_MICRO_EXPECT_EQ(1, input->dims->size);
|
||||
TF_LITE_MICRO_EXPECT_EQ(1, input->dims->data[0]);
|
||||
TF_LITE_MICRO_EXPECT_EQ(static_cast<size_t>(4), input->bytes);
|
||||
TF_LITE_MICRO_EXPECT_NE(nullptr, input->data.i32);
|
||||
input->data.i32[0] = 21;
|
||||
|
||||
TfLiteTensor* input1 = interpreter.input(1);
|
||||
TF_LITE_MICRO_EXPECT_NE(nullptr, input1);
|
||||
TF_LITE_MICRO_EXPECT_EQ(kTfLiteInt8, input1->type);
|
||||
TF_LITE_MICRO_EXPECT_EQ(1, input1->dims->size);
|
||||
TF_LITE_MICRO_EXPECT_EQ(1, input1->dims->data[0]);
|
||||
TF_LITE_MICRO_EXPECT_EQ(static_cast<size_t>(1), input1->bytes);
|
||||
TF_LITE_MICRO_EXPECT_NE(nullptr, input1->data.i32);
|
||||
input1->data.i32[0] = 21;
|
||||
|
||||
TfLiteTensor* input2 = interpreter.input(2);
|
||||
TF_LITE_MICRO_EXPECT_NE(nullptr, input2);
|
||||
TF_LITE_MICRO_EXPECT_EQ(kTfLiteInt32, input2->type);
|
||||
TF_LITE_MICRO_EXPECT_EQ(1, input2->dims->size);
|
||||
TF_LITE_MICRO_EXPECT_EQ(1, input2->dims->data[0]);
|
||||
TF_LITE_MICRO_EXPECT_EQ(static_cast<size_t>(4), input2->bytes);
|
||||
TF_LITE_MICRO_EXPECT_NE(nullptr, input2->data.i32);
|
||||
input2->data.i32[0] = 24;
|
||||
|
||||
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, interpreter.Invoke());
|
||||
|
||||
TfLiteTensor* output = interpreter.output(0);
|
||||
TF_LITE_MICRO_EXPECT_NE(nullptr, output);
|
||||
TF_LITE_MICRO_EXPECT_EQ(kTfLiteInt32, output->type);
|
||||
TF_LITE_MICRO_EXPECT_EQ(1, output->dims->size);
|
||||
TF_LITE_MICRO_EXPECT_EQ(1, output->dims->data[0]);
|
||||
TF_LITE_MICRO_EXPECT_EQ(static_cast<size_t>(4), output->bytes);
|
||||
TF_LITE_MICRO_EXPECT_NE(nullptr, output->data.i32);
|
||||
TF_LITE_MICRO_EXPECT_EQ(66, output->data.i32[0]);
|
||||
}
|
||||
|
||||
TF_LITE_MICRO_EXPECT_EQ(tflite::testing::MultipleInputs::freed_, true);
|
||||
}
|
||||
|
||||
TF_LITE_MICRO_TESTS_END
|
||||
|
@ -570,6 +570,74 @@ const Model* BuildComplexMockModel() {
|
||||
return model;
|
||||
}
|
||||
|
||||
const Model* BuildSimpleMultipleInputsModel() {
|
||||
using flatbuffers::Offset;
|
||||
flatbuffers::FlatBufferBuilder* builder = BuilderInstance();
|
||||
|
||||
constexpr size_t buffers_size = 1;
|
||||
const Offset<Buffer> buffers[buffers_size] = {
|
||||
CreateBuffer(*builder),
|
||||
};
|
||||
constexpr size_t tensor_shape_size = 1;
|
||||
const int32_t tensor_shape[tensor_shape_size] = {1};
|
||||
constexpr size_t tensors_size = 4;
|
||||
const Offset<Tensor> tensors[tensors_size] = {
|
||||
CreateTensor(*builder,
|
||||
builder->CreateVector(tensor_shape, tensor_shape_size),
|
||||
TensorType_INT32, 0,
|
||||
builder->CreateString("test_input_tensor1"), 0, false),
|
||||
CreateTensor(*builder,
|
||||
builder->CreateVector(tensor_shape, tensor_shape_size),
|
||||
TensorType_INT8, 0,
|
||||
builder->CreateString("test_input_tensor2"), 0, false),
|
||||
CreateTensor(*builder,
|
||||
builder->CreateVector(tensor_shape, tensor_shape_size),
|
||||
TensorType_INT32, 0,
|
||||
builder->CreateString("test_input_tensor3"), 0, false),
|
||||
CreateTensor(*builder,
|
||||
builder->CreateVector(tensor_shape, tensor_shape_size),
|
||||
TensorType_INT32, 0,
|
||||
builder->CreateString("test_output_tensor"), 0, false),
|
||||
};
|
||||
constexpr size_t inputs_size = 3;
|
||||
const int32_t inputs[inputs_size] = {0, 1, 2};
|
||||
constexpr size_t outputs_size = 1;
|
||||
const int32_t outputs[outputs_size] = {3};
|
||||
constexpr size_t operator_inputs_size = 3;
|
||||
const int32_t operator_inputs[operator_inputs_size] = {0, 1, 2};
|
||||
constexpr size_t operator_outputs_size = 1;
|
||||
const int32_t operator_outputs[operator_outputs_size] = {3};
|
||||
constexpr size_t operators_size = 1;
|
||||
const Offset<Operator> operators[operators_size] = {
|
||||
CreateOperator(
|
||||
*builder, 0,
|
||||
builder->CreateVector(operator_inputs, operator_inputs_size),
|
||||
builder->CreateVector(operator_outputs, operator_outputs_size),
|
||||
BuiltinOptions_NONE),
|
||||
};
|
||||
constexpr size_t subgraphs_size = 1;
|
||||
const Offset<SubGraph> subgraphs[subgraphs_size] = {
|
||||
CreateSubGraph(*builder, builder->CreateVector(tensors, tensors_size),
|
||||
builder->CreateVector(inputs, inputs_size),
|
||||
builder->CreateVector(outputs, outputs_size),
|
||||
builder->CreateVector(operators, operators_size),
|
||||
builder->CreateString("test_subgraph"))};
|
||||
constexpr size_t operator_codes_size = 1;
|
||||
const Offset<OperatorCode> operator_codes[operator_codes_size] = {
|
||||
CreateOperatorCodeDirect(*builder, /*deprecated_builtin_code=*/0,
|
||||
"multiple_inputs_op",
|
||||
/*version=*/0, BuiltinOperator_CUSTOM)};
|
||||
const Offset<Model> model_offset = CreateModel(
|
||||
*builder, 0, builder->CreateVector(operator_codes, operator_codes_size),
|
||||
builder->CreateVector(subgraphs, subgraphs_size),
|
||||
builder->CreateString("test_model"),
|
||||
builder->CreateVector(buffers, buffers_size));
|
||||
FinishModelBuffer(*builder, model_offset);
|
||||
void* model_pointer = builder->GetBufferPointer();
|
||||
const Model* model = flatbuffers::GetRoot<Model>(model_pointer);
|
||||
return model;
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
const TfLiteRegistration* SimpleStatefulOp::getRegistration() {
|
||||
@ -704,12 +772,66 @@ TfLiteStatus MockCustom::Invoke(TfLiteContext* context, TfLiteNode* node) {
|
||||
|
||||
bool MockCustom::freed_ = false;
|
||||
|
||||
const TfLiteRegistration* MultipleInputs::getRegistration() {
|
||||
return GetMutableRegistration();
|
||||
}
|
||||
|
||||
TfLiteRegistration* MultipleInputs::GetMutableRegistration() {
|
||||
static TfLiteRegistration r;
|
||||
r.init = Init;
|
||||
r.prepare = Prepare;
|
||||
r.invoke = Invoke;
|
||||
r.free = Free;
|
||||
return &r;
|
||||
}
|
||||
|
||||
void* MultipleInputs::Init(TfLiteContext* context, const char* buffer,
|
||||
size_t length) {
|
||||
// We don't support delegate in TFL micro. This is a weak check to test if
|
||||
// context struct being zero-initialized.
|
||||
TFLITE_DCHECK(context->ReplaceNodeSubsetsWithDelegateKernels == nullptr);
|
||||
freed_ = false;
|
||||
// Do nothing.
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
void MultipleInputs::Free(TfLiteContext* context, void* buffer) {
|
||||
freed_ = true;
|
||||
}
|
||||
|
||||
TfLiteStatus MultipleInputs::Prepare(TfLiteContext* context, TfLiteNode* node) {
|
||||
return kTfLiteOk;
|
||||
}
|
||||
|
||||
TfLiteStatus MultipleInputs::Invoke(TfLiteContext* context, TfLiteNode* node) {
|
||||
const TfLiteTensor* input;
|
||||
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));
|
||||
const int32_t* input_data = input->data.i32;
|
||||
const TfLiteTensor* input1;
|
||||
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 1, &input1));
|
||||
const int32_t* input_data1 = input1->data.i32;
|
||||
const TfLiteTensor* input2;
|
||||
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 2, &input2));
|
||||
const int32_t* input_data2 = input2->data.i32;
|
||||
|
||||
TfLiteTensor* output;
|
||||
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));
|
||||
int32_t* output_data = output->data.i32;
|
||||
output_data[0] =
|
||||
0; // Catch output tensor sharing memory with an input tensor
|
||||
output_data[0] = input_data[0] + input_data1[0] + input_data2[0];
|
||||
return kTfLiteOk;
|
||||
}
|
||||
|
||||
bool MultipleInputs::freed_ = false;
|
||||
|
||||
AllOpsResolver GetOpResolver() {
|
||||
AllOpsResolver op_resolver;
|
||||
op_resolver.AddCustom("mock_custom", MockCustom::GetMutableRegistration());
|
||||
op_resolver.AddCustom("simple_stateful_op",
|
||||
SimpleStatefulOp::GetMutableRegistration());
|
||||
|
||||
op_resolver.AddCustom("multiple_inputs_op",
|
||||
MultipleInputs::GetMutableRegistration());
|
||||
return op_resolver;
|
||||
}
|
||||
|
||||
@ -721,6 +843,14 @@ const Model* GetSimpleMockModel() {
|
||||
return model;
|
||||
}
|
||||
|
||||
const Model* GetSimpleMultipleInputsModel() {
|
||||
static Model* model = nullptr;
|
||||
if (!model) {
|
||||
model = const_cast<Model*>(BuildSimpleMultipleInputsModel());
|
||||
}
|
||||
return model;
|
||||
}
|
||||
|
||||
const Model* GetComplexMockModel() {
|
||||
static Model* model = nullptr;
|
||||
if (!model) {
|
||||
|
@ -76,6 +76,20 @@ class MockCustom {
|
||||
static bool freed_;
|
||||
};
|
||||
|
||||
// A simple operator with the purpose of testing multiple inputs. It returns
|
||||
// the sum of the inputs.
|
||||
class MultipleInputs {
|
||||
public:
|
||||
static const TfLiteRegistration* getRegistration();
|
||||
static TfLiteRegistration* GetMutableRegistration();
|
||||
static void* Init(TfLiteContext* context, const char* buffer, size_t length);
|
||||
static void Free(TfLiteContext* context, void* buffer);
|
||||
static TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node);
|
||||
static TfLiteStatus Invoke(TfLiteContext* context, TfLiteNode* node);
|
||||
|
||||
static bool freed_;
|
||||
};
|
||||
|
||||
// Returns an Op Resolver that can be used in the testing code.
|
||||
AllOpsResolver GetOpResolver();
|
||||
|
||||
@ -90,6 +104,10 @@ const Model* GetComplexMockModel();
|
||||
// Returns a simple flatbuffer model with two branches.
|
||||
const Model* GetSimpleModelWithBranch();
|
||||
|
||||
// Returns a simple example flatbuffer TensorFlow Lite model. Contains 3 inputs,
|
||||
// 1 output Tensor, and 1 operator.
|
||||
const Model* GetSimpleMultipleInputsModel();
|
||||
|
||||
// Returns a simple flatbuffer model with offline planned tensors
|
||||
// @param[in] num_tensors Number of tensors in the model.
|
||||
// @param[in] metadata_buffer Metadata for offline planner.
|
||||
|
Loading…
Reference in New Issue
Block a user