Support RESIZE_BILINEAR operator in XNNPACK delegate

PiperOrigin-RevId: 322749575
Change-Id: Ifa2ec971fc18f4877976f2d9970ac45203a43029
This commit is contained in:
Marat Dukhan 2020-07-23 02:41:23 -07:00 committed by TensorFlower Gardener
parent 60c6a58339
commit 1dccb05678
7 changed files with 542 additions and 4 deletions

View File

@ -229,6 +229,22 @@ cc_library(
],
)
cc_library(
name = "resize_bilinear_tester",
testonly = 1,
srcs = ["resize_bilinear_tester.cc"],
hdrs = ["resize_bilinear_tester.h"],
deps = [
"//tensorflow/lite:framework",
"//tensorflow/lite:schema_fbs_version",
"//tensorflow/lite/c:common",
"//tensorflow/lite/kernels:builtin_ops",
"//tensorflow/lite/schema:schema_fbs",
"@com_google_googletest//:gtest",
"@flatbuffers",
],
)
cc_library(
name = "softmax_tester",
testonly = 1,
@ -635,6 +651,21 @@ cc_test(
],
)
cc_test(
name = "resize_bilinear_test",
srcs = ["resize_bilinear_test.cc"],
linkopts = select({
"//tensorflow:emscripten": EMSCRIPTEN_LINKOPTS,
"//conditions:default": [],
}),
deps = [
":resize_bilinear_tester",
":test_main",
":xnnpack_delegate_test_mode",
"@com_google_googletest//:gtest",
],
)
cc_test(
name = "round_test",
srcs = ["round_test.cc"],

View File

@ -260,6 +260,13 @@ Below is the list of current operators and limitations:
static (use `kTfLiteMmapRo` allocation type), or absent (with the new shape
specified via `ReshapeOptions` table).
### `RESIZE_BILINEAR`
* The first input and the output must be 4D tensors in 32-bit floating-point
format.
* The second input (the input with the new shape specification) must be
static (use `kTfLiteMmapRo` allocation type).
### `ROUND`
* Inputs and outputs must be in 32-bit floating-point format.

View File

@ -0,0 +1,119 @@
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include <algorithm>
#include <cstdint>
#include <functional>
#include <memory>
#include <random>
#include <gtest/gtest.h>
#include "tensorflow/lite/delegates/xnnpack/resize_bilinear_tester.h"
#include "tensorflow/lite/delegates/xnnpack/xnnpack_delegate.h"
namespace tflite {
namespace xnnpack {
TEST(ResizeBilinear, AlignCenters) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto size_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 10), std::ref(rng));
auto channel_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 16), std::ref(rng));
ResizeBilinearTester()
.HalfPixelCenters(true)
.InputHeight(size_rng())
.InputWidth(size_rng())
.OutputHeight(size_rng())
.OutputWidth(size_rng())
.Channels(channel_rng())
.Test(xnnpack_delegate.get());
}
TEST(ResizeBilinear, AlignCentersTF1X) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto size_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 10), std::ref(rng));
auto channel_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 16), std::ref(rng));
ResizeBilinearTester()
.InputHeight(size_rng())
.InputWidth(size_rng())
.OutputHeight(size_rng())
.OutputWidth(size_rng())
.Channels(channel_rng())
.Test(xnnpack_delegate.get());
}
TEST(ResizeBilinear, AlignCorners) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto size_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 10), std::ref(rng));
auto channel_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 16), std::ref(rng));
ResizeBilinearTester()
.AlignCorners(true)
.InputHeight(size_rng())
.InputWidth(size_rng())
.OutputHeight(size_rng())
.OutputWidth(size_rng())
.Channels(channel_rng())
.Test(xnnpack_delegate.get());
}
TEST(ResizeBilinear, MultiThreading) {
TfLiteXNNPackDelegateOptions delegate_options =
TfLiteXNNPackDelegateOptionsDefault();
delegate_options.num_threads = 2;
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(&delegate_options),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto size_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 10), std::ref(rng));
auto channel_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 16), std::ref(rng));
ResizeBilinearTester()
.InputHeight(size_rng())
.InputWidth(size_rng())
.OutputHeight(size_rng())
.OutputWidth(size_rng())
.Channels(channel_rng())
.Test(xnnpack_delegate.get());
}
} // namespace xnnpack
} // namespace tflite

View File

@ -0,0 +1,183 @@
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "tensorflow/lite/delegates/xnnpack/resize_bilinear_tester.h"
#include <array>
#include <cstdint>
#include <functional>
#include <numeric>
#include <random>
#include <vector>
#include <gtest/gtest.h>
#include "flatbuffers/flatbuffers.h" // from @flatbuffers
#include "tensorflow/lite/interpreter.h"
#include "tensorflow/lite/kernels/register.h"
#include "tensorflow/lite/model.h"
#include "tensorflow/lite/schema/schema_generated.h"
#include "tensorflow/lite/version.h"
namespace tflite {
namespace xnnpack {
void ResizeBilinearTester::Test(TfLiteDelegate* delegate) const {
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto input_rng =
std::bind(std::uniform_real_distribution<float>(), std::ref(rng));
std::vector<char> buffer = CreateTfLiteModel();
const Model* model = GetModel(buffer.data());
std::unique_ptr<Interpreter> delegate_interpreter;
ASSERT_EQ(
InterpreterBuilder(model, ::tflite::ops::builtin::BuiltinOpResolver())(
&delegate_interpreter),
kTfLiteOk);
std::unique_ptr<Interpreter> default_interpreter;
ASSERT_EQ(
InterpreterBuilder(model, ::tflite::ops::builtin::BuiltinOpResolver())(
&default_interpreter),
kTfLiteOk);
ASSERT_TRUE(delegate_interpreter);
ASSERT_TRUE(default_interpreter);
ASSERT_EQ(delegate_interpreter->inputs().size(), 1);
ASSERT_EQ(default_interpreter->inputs().size(), 1);
ASSERT_EQ(delegate_interpreter->outputs().size(), 1);
ASSERT_EQ(default_interpreter->outputs().size(), 1);
ASSERT_EQ(delegate_interpreter->AllocateTensors(), kTfLiteOk);
ASSERT_EQ(default_interpreter->AllocateTensors(), kTfLiteOk);
ASSERT_EQ(delegate_interpreter->ModifyGraphWithDelegate(delegate), kTfLiteOk);
float* default_input_data = default_interpreter->typed_tensor<float>(
default_interpreter->inputs()[0]);
std::generate(default_input_data,
default_input_data +
BatchSize() * InputHeight() * InputWidth() * Channels(),
std::ref(input_rng));
float* delegate_input_data = delegate_interpreter->typed_tensor<float>(
delegate_interpreter->inputs()[0]);
std::copy(default_input_data,
default_input_data +
BatchSize() * InputHeight() * InputWidth() * Channels(),
delegate_input_data);
ASSERT_EQ(default_interpreter->Invoke(), kTfLiteOk);
ASSERT_EQ(delegate_interpreter->Invoke(), kTfLiteOk);
float* default_output_data = default_interpreter->typed_tensor<float>(
default_interpreter->outputs()[0]);
float* delegate_output_data = delegate_interpreter->typed_tensor<float>(
delegate_interpreter->outputs()[0]);
for (int i = 0; i < BatchSize(); i++) {
for (int y = 0; y < OutputHeight(); y++) {
for (int x = 0; x < OutputWidth(); x++) {
for (int c = 0; c < Channels(); c++) {
const int index =
((i * OutputHeight() + y) * OutputWidth() + x) * Channels() + c;
ASSERT_NEAR(default_output_data[index], delegate_output_data[index],
std::max(std::abs(default_output_data[index]) * 1.0e-4f,
10.0f * std::numeric_limits<float>::epsilon()))
<< "batch " << i << " / " << BatchSize() << ", y position " << y
<< " / " << OutputHeight() << ", x position " << x << " / "
<< OutputWidth() << ", channel " << c << " / " << Channels();
}
}
}
}
}
std::vector<char> ResizeBilinearTester::CreateTfLiteModel() const {
flatbuffers::FlatBufferBuilder builder;
flatbuffers::Offset<OperatorCode> operator_code =
CreateOperatorCode(builder, BuiltinOperator_RESIZE_BILINEAR);
flatbuffers::Offset<tflite::ResizeBilinearOptions> resize_bilinear_options =
CreateResizeBilinearOptions(builder, AlignCorners(), HalfPixelCenters());
const std::array<int32_t, 2> size_data{{OutputHeight(), OutputWidth()}};
const std::array<flatbuffers::Offset<Buffer>, 2> buffers{{
CreateBuffer(builder, builder.CreateVector({})),
CreateBuffer(builder,
builder.CreateVector(
reinterpret_cast<const uint8_t*>(size_data.data()),
size_data.size() * sizeof(int32_t))),
}};
const std::array<int32_t, 4> input_shape{
{BatchSize(), InputHeight(), InputWidth(), Channels()}};
const std::array<int32_t, 4> output_shape{
{BatchSize(), OutputHeight(), OutputWidth(), Channels()}};
const std::array<int32_t, 1> size_shape{
{static_cast<int32_t>(size_data.size())}};
const std::array<flatbuffers::Offset<Tensor>, 3> tensors{{
CreateTensor(
builder,
builder.CreateVector<int32_t>(input_shape.data(), input_shape.size()),
TensorType_FLOAT32),
CreateTensor(
builder,
builder.CreateVector<int32_t>(size_shape.data(), size_shape.size()),
TensorType_INT32, /*buffer=*/1),
CreateTensor(builder,
builder.CreateVector<int32_t>(output_shape.data(),
output_shape.size()),
TensorType_FLOAT32),
}};
const std::array<int32_t, 2> op_inputs{{0, 1}};
const std::array<int32_t, 1> op_outputs{{2}};
flatbuffers::Offset<Operator> op = CreateOperator(
builder, /*opcode_index=*/0,
builder.CreateVector<int32_t>(op_inputs.data(), op_inputs.size()),
builder.CreateVector<int32_t>(op_outputs.data(), op_outputs.size()),
BuiltinOptions_ResizeBilinearOptions, resize_bilinear_options.Union());
const std::array<int32_t, 1> subgraph_inputs{{0}};
const std::array<int32_t, 1> subgraph_outputs{{2}};
flatbuffers::Offset<SubGraph> subgraph = CreateSubGraph(
builder, builder.CreateVector(tensors.data(), tensors.size()),
builder.CreateVector<int32_t>(subgraph_inputs.data(),
subgraph_inputs.size()),
builder.CreateVector<int32_t>(subgraph_outputs.data(),
subgraph_outputs.size()),
builder.CreateVector(&op, 1));
flatbuffers::Offset<flatbuffers::String> description =
builder.CreateString("Resize Bilinear model");
flatbuffers::Offset<Model> model_buffer = CreateModel(
builder, TFLITE_SCHEMA_VERSION, builder.CreateVector(&operator_code, 1),
builder.CreateVector(&subgraph, 1), description,
builder.CreateVector(buffers.data(), buffers.size()));
builder.Finish(model_buffer);
return std::vector<char>(builder.GetBufferPointer(),
builder.GetBufferPointer() + builder.GetSize());
}
} // namespace xnnpack
} // namespace tflite

View File

@ -0,0 +1,115 @@
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_DELEGATES_XNNPACK_RESIZE_BILINEAR_TESTER_H_
#define TENSORFLOW_LITE_DELEGATES_XNNPACK_RESIZE_BILINEAR_TESTER_H_
#include <cstdint>
#include <vector>
#include <gtest/gtest.h>
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace xnnpack {
class ResizeBilinearTester {
public:
ResizeBilinearTester() = default;
ResizeBilinearTester(const ResizeBilinearTester&) = delete;
ResizeBilinearTester& operator=(const ResizeBilinearTester&) = delete;
inline ResizeBilinearTester& BatchSize(int32_t batch_size) {
EXPECT_GT(batch_size, 0);
batch_size_ = batch_size;
return *this;
}
inline int32_t BatchSize() const { return batch_size_; }
inline ResizeBilinearTester& Channels(int32_t channels) {
EXPECT_GT(channels, 0);
channels_ = channels;
return *this;
}
inline int32_t Channels() const { return channels_; }
inline ResizeBilinearTester& InputHeight(int32_t input_height) {
EXPECT_GT(input_height, 0);
input_height_ = input_height;
return *this;
}
inline int32_t InputHeight() const { return input_height_; }
inline ResizeBilinearTester& InputWidth(int32_t input_width) {
EXPECT_GT(input_width, 0);
input_width_ = input_width;
return *this;
}
inline int32_t InputWidth() const { return input_width_; }
inline ResizeBilinearTester& OutputHeight(int32_t output_height) {
EXPECT_GT(output_height, 0);
output_height_ = output_height;
return *this;
}
inline int32_t OutputHeight() const { return output_height_; }
inline ResizeBilinearTester& OutputWidth(int32_t output_width) {
EXPECT_GT(output_width, 0);
output_width_ = output_width;
return *this;
}
inline int32_t OutputWidth() const { return output_width_; }
ResizeBilinearTester& AlignCorners(bool align_corners) {
align_corners_ = align_corners;
return *this;
}
bool AlignCorners() const { return align_corners_; }
ResizeBilinearTester& HalfPixelCenters(bool half_pixel_centers) {
half_pixel_centers_ = half_pixel_centers;
return *this;
}
bool HalfPixelCenters() const { return half_pixel_centers_; }
void Test(TfLiteDelegate* delegate) const;
private:
std::vector<char> CreateTfLiteModel() const;
int32_t batch_size_ = 1;
int32_t channels_ = 1;
int32_t input_height_ = 1;
int32_t input_width_ = 1;
int32_t output_height_ = 1;
int32_t output_width_ = 1;
bool align_corners_ = false;
bool half_pixel_centers_ = false;
};
} // namespace xnnpack
} // namespace tflite
#endif // TENSORFLOW_LITE_DELEGATES_XNNPACK_RESIZE_BILINEAR_TESTER_H_

View File

@ -157,6 +157,7 @@ class Subgraph {
case kTfLiteBuiltinMean:
case kTfLiteBuiltinPad:
case kTfLiteBuiltinReshape:
case kTfLiteBuiltinResizeBilinear:
// Ignore the second input (axes, static padding, or new shape),
// because it is represented as parameters of the XNNPACK operator
// rather than extra input.
@ -930,6 +931,14 @@ class Subgraph {
context->tensors, reshape_params,
xnnpack_tensors);
}
case kTfLiteBuiltinResizeBilinear: {
const TfLiteResizeBilinearParams* resize_params =
static_cast<const TfLiteResizeBilinearParams*>(node->builtin_data);
return VisitResizeBilinearNode(subgraph, logging_context, node_index,
node, context->tensors, resize_params,
xnnpack_tensors);
}
case kTfLiteBuiltinRound:
return VisitRoundNode(subgraph, logging_context, node_index, node,
context->tensors, xnnpack_tensors);
@ -2460,6 +2469,80 @@ class Subgraph {
return kTfLiteOk;
}
static TfLiteStatus VisitResizeBilinearNode(
xnn_subgraph_t subgraph, TfLiteContext* logging_context, int node_index,
TfLiteNode* node, const TfLiteTensor* tensors,
const TfLiteResizeBilinearParams* resize_params,
const std::vector<uint32_t>& xnnpack_tensors) {
TF_LITE_ENSURE_STATUS(
CheckNumInputsAndOutputs(logging_context, node, 2, 1, node_index));
const TfLiteTensor& input_tensor = tensors[node->inputs->data[0]];
TF_LITE_ENSURE_STATUS(CheckTensorFloatType(
logging_context, input_tensor, node->inputs->data[0], node_index));
TF_LITE_ENSURE_STATUS(CheckTensorShape(logging_context, input_tensor, 4,
node->inputs->data[0]));
TF_LITE_ENSURE_STATUS(CheckTensorNonDynamicAllocation(
logging_context, input_tensor, node->inputs->data[0], node_index));
const TfLiteTensor& shape_tensor = tensors[node->inputs->data[1]];
TF_LITE_ENSURE_STATUS(CheckTensorType(logging_context, shape_tensor,
kTfLiteInt32, node->inputs->data[1],
node_index));
TF_LITE_ENSURE_STATUS(CheckShapeTensorShape(
logging_context, shape_tensor, node->inputs->data[1], node_index));
if (shape_tensor.dims->data[0] != 2) {
TF_LITE_MAYBE_KERNEL_LOG(
logging_context,
"unexpected number of dimensions %d in the output shape in node %d",
shape_tensor.dims->data[0], node_index);
}
TF_LITE_ENSURE_STATUS(CheckTensorStaticAllocation(
logging_context, shape_tensor, node->inputs->data[1], node_index));
const TfLiteTensor& output_tensor = tensors[node->outputs->data[0]];
TF_LITE_ENSURE_STATUS(CheckTensorFloatType(
logging_context, output_tensor, node->outputs->data[0], node_index));
TF_LITE_ENSURE_STATUS(CheckTensorShape(logging_context, output_tensor, 4,
node->outputs->data[0]));
TF_LITE_ENSURE_STATUS(CheckTensorNonDynamicAllocation(
logging_context, output_tensor, node->outputs->data[0], node_index));
const int32_t* shape_data =
reinterpret_cast<const int32_t*>(shape_tensor.data.data);
for (int i = 0; i < shape_tensor.dims->size; i++) {
const int32_t dim = shape_data[i];
if (dim <= 0) {
TF_LITE_MAYBE_KERNEL_LOG(
logging_context, "invalid output dimension #%d value %d in node %d",
i, dim, node_index);
return kTfLiteError;
}
}
if (subgraph != nullptr) {
uint32_t flags = 0;
if (resize_params->align_corners) {
flags |= XNN_FLAG_ALIGN_CORNERS;
} else if (!resize_params->half_pixel_centers) {
flags |= XNN_FLAG_TENSORFLOW_LEGACY_MODE;
}
const xnn_status status = xnn_define_static_resize_bilinear_2d(
subgraph, static_cast<size_t>(shape_data[0]),
static_cast<size_t>(shape_data[1]),
/*input_id=*/xnnpack_tensors[node->inputs->data[0]],
/*output_id=*/xnnpack_tensors[node->outputs->data[0]], flags);
if (status != xnn_status_success) {
TF_LITE_KERNEL_LOG(logging_context,
"failed to delegate RESIZE_BILINEAR node #%d",
node_index);
return kTfLiteError;
}
}
return kTfLiteOk;
}
static TfLiteStatus VisitRoundNode(
xnn_subgraph_t subgraph, TfLiteContext* logging_context, int node_index,
TfLiteNode* node, const TfLiteTensor* tensors,

View File

@ -164,11 +164,11 @@ def tf_repositories(path_prefix = "", tf_repo_name = ""):
tf_http_archive(
name = "XNNPACK",
sha256 = "bd4278ebbe3f6b104f46548717b00bdba95acaab3cbac3de4015c65d868259f8",
strip_prefix = "XNNPACK-d27202dfeaa8d3a96670ba47f3dce2f19305a092",
sha256 = "c6eae589a4af7785da467162acd339bae359842e14c93bddc8fbe84ffd361c70",
strip_prefix = "XNNPACK-aff24e26a760552ee98a036f2a6e95b123e1bc6d",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/google/XNNPACK/archive/d27202dfeaa8d3a96670ba47f3dce2f19305a092.zip",
"https://github.com/google/XNNPACK/archive/d27202dfeaa8d3a96670ba47f3dce2f19305a092.zip",
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/google/XNNPACK/archive/aff24e26a760552ee98a036f2a6e95b123e1bc6d.zip",
"https://github.com/google/XNNPACK/archive/aff24e26a760552ee98a036f2a6e95b123e1bc6d.zip",
],
)