Marat Dukhan 5dbc34f565 Support Pad with static paddings in XNNPACK delegate
PiperOrigin-RevId: 313107760
Change-Id: I7b04b9977081e760e9604f72d9da5f499ada88f3
2020-05-25 15:53:01 -07:00

188 lines
7.1 KiB
C++

/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "tensorflow/lite/delegates/xnnpack/pad_tester.h"
#include <array>
#include <cstdint>
#include <functional>
#include <numeric>
#include <random>
#include <vector>
#include <gtest/gtest.h>
#include "flatbuffers/flatbuffers.h" // from @flatbuffers
#include "tensorflow/lite/interpreter.h"
#include "tensorflow/lite/kernels/register.h"
#include "tensorflow/lite/model.h"
#include "tensorflow/lite/schema/schema_generated.h"
#include "tensorflow/lite/version.h"
namespace tflite {
namespace xnnpack {
std::vector<int32_t> PadTester::OutputShape() const {
std::vector<int32_t> output_shape;
output_shape.reserve(InputShape().size());
for (size_t i = 0; i < InputShape().size(); i++) {
int32_t output_dim = InputShape()[i];
if (i < InputPrePaddings().size()) {
output_dim += InputPrePaddings()[i];
}
if (i < InputPostPaddings().size()) {
output_dim += InputPostPaddings()[i];
}
output_shape.push_back(output_dim);
}
return output_shape;
}
void PadTester::Test(TfLiteDelegate* delegate) const {
ASSERT_EQ(InputPrePaddings().size(), InputPostPaddings().size());
ASSERT_LE(InputPrePaddings().size(), InputShape().size());
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto input_rng =
std::bind(std::uniform_real_distribution<float>(), std::ref(rng));
std::vector<char> buffer = CreateTfLiteModel();
const Model* model = GetModel(buffer.data());
std::unique_ptr<Interpreter> delegate_interpreter;
ASSERT_EQ(
InterpreterBuilder(model, ::tflite::ops::builtin::BuiltinOpResolver())(
&delegate_interpreter),
kTfLiteOk);
std::unique_ptr<Interpreter> default_interpreter;
ASSERT_EQ(
InterpreterBuilder(model, ::tflite::ops::builtin::BuiltinOpResolver())(
&default_interpreter),
kTfLiteOk);
ASSERT_TRUE(delegate_interpreter);
ASSERT_TRUE(default_interpreter);
ASSERT_EQ(delegate_interpreter->inputs().size(), 1);
ASSERT_EQ(default_interpreter->inputs().size(), 1);
ASSERT_EQ(delegate_interpreter->outputs().size(), 1);
ASSERT_EQ(default_interpreter->outputs().size(), 1);
ASSERT_EQ(delegate_interpreter->AllocateTensors(), kTfLiteOk);
ASSERT_EQ(default_interpreter->AllocateTensors(), kTfLiteOk);
ASSERT_EQ(delegate_interpreter->ModifyGraphWithDelegate(delegate), kTfLiteOk);
float* default_input_data = default_interpreter->typed_tensor<float>(
default_interpreter->inputs()[0]);
std::generate(default_input_data,
default_input_data + ComputeSize(InputShape()),
std::ref(input_rng));
float* delegate_input_data = delegate_interpreter->typed_tensor<float>(
delegate_interpreter->inputs()[0]);
std::copy(default_input_data, default_input_data + ComputeSize(InputShape()),
delegate_input_data);
ASSERT_EQ(default_interpreter->Invoke(), kTfLiteOk);
ASSERT_EQ(delegate_interpreter->Invoke(), kTfLiteOk);
float* default_output_data = default_interpreter->typed_tensor<float>(
default_interpreter->outputs()[0]);
float* delegate_output_data = delegate_interpreter->typed_tensor<float>(
delegate_interpreter->outputs()[0]);
for (size_t i = 0; i < ComputeSize(OutputShape()); i++) {
ASSERT_EQ(default_output_data[i], delegate_output_data[i]);
}
}
std::vector<char> PadTester::CreateTfLiteModel() const {
flatbuffers::FlatBufferBuilder builder;
flatbuffers::Offset<OperatorCode> operator_code =
CreateOperatorCode(builder, BuiltinOperator_PAD);
std::vector<int32_t> paddings(InputPrePaddings().size() +
InputPostPaddings().size());
for (size_t i = 0; i < InputPrePaddings().size(); i++) {
paddings[i * 2] = InputPrePaddings()[i];
paddings[i * 2 + 1] = InputPostPaddings()[i];
}
const std::array<flatbuffers::Offset<Buffer>, 2> buffers{{
CreateBuffer(builder, builder.CreateVector({})),
CreateBuffer(builder,
builder.CreateVector(
reinterpret_cast<const uint8_t*>(paddings.data()),
sizeof(float) * paddings.size())),
}};
const std::vector<int32_t> output_shape = OutputShape();
const std::array<int32_t, 2> paddings_shape{
{static_cast<int32_t>(InputPrePaddings().size()), 2}};
const std::array<flatbuffers::Offset<Tensor>, 3> tensors{{
CreateTensor(builder,
builder.CreateVector<int32_t>(InputShape().data(),
InputShape().size()),
TensorType_FLOAT32),
CreateTensor(builder,
builder.CreateVector<int32_t>(paddings_shape.data(),
paddings_shape.size()),
TensorType_INT32, /*buffer=*/1),
CreateTensor(builder,
builder.CreateVector<int32_t>(output_shape.data(),
output_shape.size()),
TensorType_FLOAT32),
}};
const std::array<int32_t, 2> op_inputs{{0, 1}};
const std::array<int32_t, 1> op_outputs{{2}};
flatbuffers::Offset<Operator> op = CreateOperator(
builder, /*opcode_index=*/0,
builder.CreateVector<int32_t>(op_inputs.data(), op_inputs.size()),
builder.CreateVector<int32_t>(op_outputs.data(), op_outputs.size()));
const std::array<int32_t, 1> subgraph_inputs{{0}};
const std::array<int32_t, 1> subgraph_outputs{{2}};
flatbuffers::Offset<SubGraph> subgraph = CreateSubGraph(
builder, builder.CreateVector(tensors.data(), tensors.size()),
builder.CreateVector<int32_t>(subgraph_inputs.data(),
subgraph_inputs.size()),
builder.CreateVector<int32_t>(subgraph_outputs.data(),
subgraph_outputs.size()),
builder.CreateVector(&op, 1));
flatbuffers::Offset<flatbuffers::String> description =
builder.CreateString("Pad model");
flatbuffers::Offset<Model> model_buffer = CreateModel(
builder, TFLITE_SCHEMA_VERSION, builder.CreateVector(&operator_code, 1),
builder.CreateVector(&subgraph, 1), description,
builder.CreateVector(buffers.data(), buffers.size()));
builder.Finish(model_buffer);
return std::vector<char>(builder.GetBufferPointer(),
builder.GetBufferPointer() + builder.GetSize());
}
int32_t PadTester::ComputeSize(const std::vector<int32_t>& shape) {
return std::accumulate(shape.cbegin(), shape.cend(), 1,
std::multiplies<int32_t>());
}
} // namespace xnnpack
} // namespace tflite