Support RESHAPE operator in XNNPACK delegate

PiperOrigin-RevId: 320678814
Change-Id: I5229605df654b35c84a968db3eef8afd498487e9
This commit is contained in:
Marat Dukhan 2020-07-10 14:40:00 -07:00 committed by TensorFlower Gardener
parent 9060253512
commit 37d20f87f3
6 changed files with 628 additions and 7 deletions

View File

@ -213,6 +213,22 @@ cc_library(
],
)
cc_library(
name = "reshape_tester",
testonly = 1,
srcs = ["reshape_tester.cc"],
hdrs = ["reshape_tester.h"],
deps = [
"//tensorflow/lite:framework",
"//tensorflow/lite:schema_fbs_version",
"//tensorflow/lite/c:common",
"//tensorflow/lite/kernels:builtin_ops",
"//tensorflow/lite/schema:schema_fbs",
"@com_google_googletest//:gtest",
"@flatbuffers",
],
)
cc_library(
name = "softmax_tester",
testonly = 1,
@ -604,6 +620,21 @@ cc_test(
],
)
cc_test(
name = "reshape_test",
srcs = ["reshape_test.cc"],
linkopts = select({
"//tensorflow:emscripten": EMSCRIPTEN_LINKOPTS,
"//conditions:default": [],
}),
deps = [
":reshape_tester",
":test_main",
":xnnpack_delegate_test_mode",
"@com_google_googletest//:gtest",
],
)
cc_test(
name = "round_test",
srcs = ["round_test.cc"],

View File

@ -0,0 +1,225 @@
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include <algorithm>
#include <cstdint>
#include <functional>
#include <memory>
#include <random>
#include <gtest/gtest.h>
#include "tensorflow/lite/delegates/xnnpack/reshape_tester.h"
#include "tensorflow/lite/delegates/xnnpack/xnnpack_delegate.h"
namespace tflite {
namespace xnnpack {
TEST(Reshape, 4DShapeAsInput) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 10), std::ref(rng));
const std::vector<int32_t> input_shape{
{shape_rng(), shape_rng(), shape_rng(), shape_rng()}};
std::vector<int32_t> output_shape(input_shape.cbegin(), input_shape.cend());
std::shuffle(output_shape.begin(), output_shape.end(), rng);
ReshapeTester()
.InputShape(input_shape)
.OutputShape(output_shape)
.OutputShapeAsInput(true)
.Test(xnnpack_delegate.get());
}
TEST(Reshape, 4DShapeAsParam) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 10), std::ref(rng));
const std::vector<int32_t> input_shape{
{shape_rng(), shape_rng(), shape_rng(), shape_rng()}};
std::vector<int32_t> output_shape(input_shape.cbegin(), input_shape.cend());
std::shuffle(output_shape.begin(), output_shape.end(), rng);
ReshapeTester()
.InputShape(input_shape)
.OutputShape(output_shape)
.OutputShapeAsInput(false)
.Test(xnnpack_delegate.get());
}
TEST(Reshape, 3DShapeAsInput) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 10), std::ref(rng));
const std::vector<int32_t> input_shape{
{shape_rng(), shape_rng(), shape_rng()}};
std::vector<int32_t> output_shape(input_shape.cbegin(), input_shape.cend());
std::shuffle(output_shape.begin(), output_shape.end(), rng);
ReshapeTester()
.InputShape(input_shape)
.OutputShape(output_shape)
.OutputShapeAsInput(true)
.Test(xnnpack_delegate.get());
}
TEST(Reshape, 3DShapeAsParam) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 10), std::ref(rng));
const std::vector<int32_t> input_shape{
{shape_rng(), shape_rng(), shape_rng()}};
std::vector<int32_t> output_shape(input_shape.cbegin(), input_shape.cend());
std::shuffle(output_shape.begin(), output_shape.end(), rng);
ReshapeTester()
.InputShape(input_shape)
.OutputShape(output_shape)
.OutputShapeAsInput(false)
.Test(xnnpack_delegate.get());
}
TEST(Reshape, 2DShapeAsInput) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 10), std::ref(rng));
const std::vector<int32_t> input_shape{{shape_rng(), shape_rng()}};
std::vector<int32_t> output_shape(input_shape.cbegin(), input_shape.cend());
std::shuffle(output_shape.begin(), output_shape.end(), rng);
ReshapeTester()
.InputShape(input_shape)
.OutputShape(output_shape)
.OutputShapeAsInput(true)
.Test(xnnpack_delegate.get());
}
TEST(Reshape, 2DShapeAsParam) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 10), std::ref(rng));
const std::vector<int32_t> input_shape{{shape_rng(), shape_rng()}};
std::vector<int32_t> output_shape(input_shape.cbegin(), input_shape.cend());
std::shuffle(output_shape.begin(), output_shape.end(), rng);
ReshapeTester()
.InputShape(input_shape)
.OutputShape(output_shape)
.OutputShapeAsInput(false)
.Test(xnnpack_delegate.get());
}
TEST(Reshape, 1DShapeAsInput) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 10), std::ref(rng));
const std::vector<int32_t> shape({shape_rng()});
ReshapeTester()
.InputShape(shape)
.OutputShape(shape)
.OutputShapeAsInput(true)
.Test(xnnpack_delegate.get());
}
TEST(Reshape, 1DShapeAsParam) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 10), std::ref(rng));
const std::vector<int32_t> shape({shape_rng()});
ReshapeTester()
.InputShape(shape)
.OutputShape(shape)
.OutputShapeAsInput(false)
.Test(xnnpack_delegate.get());
}
TEST(Reshape, 0D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
ReshapeTester()
.InputShape(std::vector<int32_t>())
.OutputShape(std::vector<int32_t>())
.Test(xnnpack_delegate.get());
}
TEST(Reshape, MultiThreading) {
TfLiteXNNPackDelegateOptions delegate_options =
TfLiteXNNPackDelegateOptionsDefault();
delegate_options.num_threads = 2;
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(&delegate_options),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 10), std::ref(rng));
const std::vector<int32_t> input_shape{
{shape_rng(), shape_rng(), shape_rng(), shape_rng()}};
std::vector<int32_t> output_shape(input_shape.cbegin(), input_shape.cend());
std::shuffle(output_shape.begin(), output_shape.end(), rng);
ReshapeTester()
.InputShape(input_shape)
.OutputShape(output_shape)
.OutputShapeAsInput(true)
.Test(xnnpack_delegate.get());
}
} // namespace xnnpack
} // namespace tflite

View File

@ -0,0 +1,181 @@
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "tensorflow/lite/delegates/xnnpack/reshape_tester.h"
#include <array>
#include <cstdint>
#include <functional>
#include <numeric>
#include <random>
#include <vector>
#include <gtest/gtest.h>
#include "flatbuffers/flatbuffers.h" // from @flatbuffers
#include "tensorflow/lite/interpreter.h"
#include "tensorflow/lite/kernels/register.h"
#include "tensorflow/lite/model.h"
#include "tensorflow/lite/schema/schema_generated.h"
#include "tensorflow/lite/version.h"
namespace tflite {
namespace xnnpack {
void ReshapeTester::Test(TfLiteDelegate* delegate) const {
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto f32rng =
std::bind(std::uniform_real_distribution<float>(), std::ref(rng));
ASSERT_EQ(InputSize(), OutputSize());
std::vector<char> buffer = CreateTfLiteModel();
const Model* model = GetModel(buffer.data());
std::unique_ptr<Interpreter> delegate_interpreter;
ASSERT_EQ(
InterpreterBuilder(model, ::tflite::ops::builtin::BuiltinOpResolver())(
&delegate_interpreter),
kTfLiteOk);
std::unique_ptr<Interpreter> default_interpreter;
ASSERT_EQ(
InterpreterBuilder(model, ::tflite::ops::builtin::BuiltinOpResolver())(
&default_interpreter),
kTfLiteOk);
ASSERT_TRUE(delegate_interpreter);
ASSERT_TRUE(default_interpreter);
ASSERT_EQ(delegate_interpreter->inputs().size(), 1);
ASSERT_EQ(default_interpreter->inputs().size(), 1);
ASSERT_EQ(delegate_interpreter->outputs().size(), 1);
ASSERT_EQ(default_interpreter->outputs().size(), 1);
ASSERT_EQ(delegate_interpreter->AllocateTensors(), kTfLiteOk);
ASSERT_EQ(default_interpreter->AllocateTensors(), kTfLiteOk);
ASSERT_EQ(delegate_interpreter->ModifyGraphWithDelegate(delegate), kTfLiteOk);
float* default_input_data = default_interpreter->typed_tensor<float>(
default_interpreter->inputs()[0]);
std::generate(default_input_data, default_input_data + InputSize(),
std::ref(f32rng));
float* delegate_input_data = delegate_interpreter->typed_tensor<float>(
delegate_interpreter->inputs()[0]);
std::copy(default_input_data, default_input_data + InputSize(),
delegate_input_data);
ASSERT_EQ(default_interpreter->Invoke(), kTfLiteOk);
ASSERT_EQ(delegate_interpreter->Invoke(), kTfLiteOk);
float* default_output_data = default_interpreter->typed_tensor<float>(
default_interpreter->outputs()[0]);
float* delegate_output_data = delegate_interpreter->typed_tensor<float>(
delegate_interpreter->outputs()[0]);
for (size_t i = 0; i < OutputSize(); i++) {
ASSERT_EQ(delegate_output_data[i], default_output_data[i]);
}
}
std::vector<char> ReshapeTester::CreateTfLiteModel() const {
flatbuffers::FlatBufferBuilder builder;
flatbuffers::Offset<OperatorCode> operator_code =
CreateOperatorCode(builder, BuiltinOperator_RESHAPE, 0);
std::vector<flatbuffers::Offset<Buffer>> buffers{{
CreateBuffer(builder, builder.CreateVector({})),
}};
if (OutputShapeAsInput()) {
buffers.emplace_back(CreateBuffer(
builder, builder.CreateVector(
reinterpret_cast<const uint8_t*>(OutputShape().data()),
OutputShape().size() * sizeof(int32_t))));
}
std::vector<flatbuffers::Offset<Tensor>> tensors{{
CreateTensor(builder,
builder.CreateVector<int32_t>(InputShape().data(),
InputShape().size()),
TensorType_FLOAT32),
CreateTensor(builder,
builder.CreateVector<int32_t>(OutputShape().data(),
OutputShape().size()),
TensorType_FLOAT32),
}};
if (OutputShapeAsInput()) {
const std::array<int32_t, 1> reshape_shape{
{static_cast<int32_t>(InputShape().size())}};
tensors.insert(tensors.begin() + 1,
CreateTensor(builder,
builder.CreateVector<int32_t>(
reshape_shape.data(), reshape_shape.size()),
TensorType_INT32, /*buffer=*/1));
}
std::vector<int32_t> op_inputs({0});
if (OutputShapeAsInput()) {
op_inputs.push_back(1);
}
const std::array<int32_t, 1> op_outputs{{OutputShapeAsInput() ? 2 : 1}};
BuiltinOptions builtin_options_type = tflite::BuiltinOptions_NONE;
flatbuffers::Offset<void> builtin_options = 0;
if (!OutputShapeAsInput()) {
builtin_options_type = tflite::BuiltinOptions_ReshapeOptions;
builtin_options =
CreateReshapeOptions(
builder, builder.CreateVector<int32_t>(OutputShape().data(),
OutputShape().size()))
.Union();
}
const flatbuffers::Offset<Operator> op = CreateOperator(
builder, /*opcode_index=*/0,
builder.CreateVector<int32_t>(op_inputs.data(), op_inputs.size()),
builder.CreateVector<int32_t>(op_outputs.data(), op_outputs.size()),
builtin_options_type, builtin_options);
const std::array<int32_t, 1> subgraph_inputs{{op_inputs.front()}};
const std::array<int32_t, 1> subgraph_outputs{{op_outputs.front()}};
flatbuffers::Offset<SubGraph> subgraph = CreateSubGraph(
builder, builder.CreateVector(tensors.data(), tensors.size()),
builder.CreateVector<int32_t>(subgraph_inputs.data(),
subgraph_inputs.size()),
builder.CreateVector<int32_t>(subgraph_outputs.data(),
subgraph_outputs.size()),
builder.CreateVector(&op, 1));
const flatbuffers::Offset<Model> model_buffer = CreateModel(
builder, TFLITE_SCHEMA_VERSION, builder.CreateVector(&operator_code, 1),
builder.CreateVector(&subgraph, 1), builder.CreateString("Reshape model"),
builder.CreateVector(buffers.data(), buffers.size()));
builder.Finish(model_buffer);
return std::vector<char>(builder.GetBufferPointer(),
builder.GetBufferPointer() + builder.GetSize());
}
int32_t ReshapeTester::ComputeSize(const std::vector<int32_t>& shape) {
return std::accumulate(shape.cbegin(), shape.cend(), 1,
std::multiplies<int32_t>());
}
} // namespace xnnpack
} // namespace tflite

View File

@ -0,0 +1,87 @@
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_DELEGATES_XNNPACK_RESHAPE_TESTER_H_
#define TENSORFLOW_LITE_DELEGATES_XNNPACK_RESHAPE_TESTER_H_
#include <cstdint>
#include <vector>
#include <gtest/gtest.h>
#include "tensorflow/lite/c/common.h"
namespace tflite {
namespace xnnpack {
class ReshapeTester {
public:
ReshapeTester() = default;
ReshapeTester(const ReshapeTester&) = delete;
ReshapeTester& operator=(const ReshapeTester&) = delete;
inline ReshapeTester& InputShape(const std::vector<int32_t>& input_shape) {
for (int32_t input_dim : input_shape) {
EXPECT_GT(input_dim, 0);
}
input_shape_ = std::vector<int32_t>(input_shape.begin(), input_shape.end());
input_size_ = ReshapeTester::ComputeSize(input_shape);
return *this;
}
inline const std::vector<int32_t>& InputShape() const { return input_shape_; }
inline ReshapeTester& OutputShape(const std::vector<int32_t>& output_shape) {
for (int32_t output_dim : output_shape) {
EXPECT_GT(output_dim, 0);
}
output_shape_ =
std::vector<int32_t>(output_shape.begin(), output_shape.end());
output_size_ = ReshapeTester::ComputeSize(output_shape);
return *this;
}
inline const std::vector<int32_t>& OutputShape() const {
return output_shape_;
}
inline int32_t InputSize() const { return input_size_; }
inline int32_t OutputSize() const { return output_size_; }
inline ReshapeTester& OutputShapeAsInput(bool shape_as_input) {
shape_as_input_ = shape_as_input;
return *this;
}
inline bool OutputShapeAsInput() const { return shape_as_input_; }
void Test(TfLiteDelegate* delegate) const;
private:
std::vector<char> CreateTfLiteModel() const;
static int32_t ComputeSize(const std::vector<int32_t>& shape);
std::vector<int32_t> input_shape_;
std::vector<int32_t> output_shape_;
int32_t input_size_ = 1;
int32_t output_size_ = 1;
bool shape_as_input_ = false;
};
} // namespace xnnpack
} // namespace tflite
#endif // TENSORFLOW_LITE_DELEGATES_XNNPACK_RESHAPE_TESTER_H_

View File

@ -156,9 +156,10 @@ class Subgraph {
switch (registration->builtin_code) {
case kTfLiteBuiltinMean:
case kTfLiteBuiltinPad:
// Ignore the second input (static padding, or axes), because it is
// represented as parameters of the XNNPACK operator rather than
// extra input.
case kTfLiteBuiltinReshape:
// Ignore the second input (axes, static padding, or new shape),
// because it is represented as parameters of the XNNPACK operator
// rather than extra input.
{
const int t = node->inputs->data[0];
tensors[t] = t;
@ -742,6 +743,20 @@ class Subgraph {
return kTfLiteOk;
}
static TfLiteStatus CheckShapeTensorShape(TfLiteContext* context,
const TfLiteTensor& tensor,
int tensor_index, int node_index) {
if (tensor.dims->size != 1) {
TF_LITE_MAYBE_KERNEL_LOG(context,
"unexpected number of shape dimensions (%d) in "
"shape tensor #%d in node #%d: "
"expected a 1D tensor",
tensor.dims->size, tensor_index, node_index);
return kTfLiteError;
}
return kTfLiteOk;
}
static TfLiteStatus CheckTensorNonDynamicAllocation(
TfLiteContext* context, const TfLiteTensor& tensor, int tensor_index,
int node_index) {
@ -902,6 +917,14 @@ class Subgraph {
case kTfLiteBuiltinRelu6:
return VisitReluNode(subgraph, logging_context, node_index, node,
context->tensors, 0.0f, 6.0f, xnnpack_tensors);
case kTfLiteBuiltinReshape: {
const TfLiteReshapeParams* reshape_params =
static_cast<const TfLiteReshapeParams*>(node->builtin_data);
return VisitReshapeNode(subgraph, logging_context, node_index, node,
context->tensors, reshape_params,
xnnpack_tensors);
}
case kTfLiteBuiltinRound:
return VisitRoundNode(subgraph, logging_context, node_index, node,
context->tensors, xnnpack_tensors);
@ -2343,6 +2366,80 @@ class Subgraph {
return kTfLiteOk;
}
static TfLiteStatus VisitReshapeNode(
xnn_subgraph_t subgraph, TfLiteContext* logging_context, int node_index,
TfLiteNode* node, const TfLiteTensor* tensors,
const TfLiteReshapeParams* reshape_params,
const std::vector<uint32_t>& xnnpack_tensors) {
switch (node->inputs->size) {
case 1:
case 2:
break;
default:
TF_LITE_MAYBE_KERNEL_LOG(
logging_context,
"unexpected number of inputs (%d) in node #%d: "
"either one or two inputs expected",
node->inputs->size, node_index);
return kTfLiteError;
}
if (node->outputs->size != 1) {
TF_LITE_MAYBE_KERNEL_LOG(
logging_context,
"unexpected number of outputs (%d) in node #%d: one output expected",
node->outputs->size, node_index);
return kTfLiteError;
}
const TfLiteTensor& input_tensor = tensors[node->inputs->data[0]];
TF_LITE_ENSURE_STATUS(CheckTensorFloatType(
logging_context, input_tensor, node->inputs->data[0], node_index));
TF_LITE_ENSURE_STATUS(CheckTensorShape(logging_context, input_tensor, 0,
XNN_MAX_TENSOR_DIMS,
node->inputs->data[0]));
TF_LITE_ENSURE_STATUS(CheckTensorNonDynamicAllocation(
logging_context, input_tensor, node->inputs->data[0], node_index));
if (node->inputs->size == 2) {
const TfLiteTensor& shape_tensor = tensors[node->inputs->data[1]];
TF_LITE_ENSURE_STATUS(CheckTensorType(logging_context, shape_tensor,
kTfLiteInt32, node->inputs->data[1],
node_index));
TF_LITE_ENSURE_STATUS(CheckShapeTensorShape(
logging_context, shape_tensor, node->inputs->data[1], node_index));
TF_LITE_ENSURE_STATUS(CheckTensorStaticAllocation(
logging_context, shape_tensor, node->inputs->data[1], node_index));
}
const TfLiteTensor& output_tensor = tensors[node->outputs->data[0]];
TF_LITE_ENSURE_STATUS(CheckTensorFloatType(
logging_context, output_tensor, node->outputs->data[0], node_index));
TF_LITE_ENSURE_STATUS(CheckTensorShape(logging_context, output_tensor, 0,
XNN_MAX_TENSOR_DIMS,
node->outputs->data[0]));
TF_LITE_ENSURE_STATUS(CheckTensorNonDynamicAllocation(
logging_context, output_tensor, node->outputs->data[0], node_index));
if (subgraph != nullptr) {
std::array<size_t, XNN_MAX_TENSOR_DIMS> new_shape;
std::copy(&output_tensor.dims->data[0],
&output_tensor.dims->data[output_tensor.dims->size],
new_shape.begin());
const xnn_status status = xnn_define_static_reshape(
subgraph, static_cast<size_t>(output_tensor.dims->size),
new_shape.data(),
/*input_id=*/xnnpack_tensors[node->inputs->data[0]],
/*output_id=*/xnnpack_tensors[node->outputs->data[0]], /*flags=*/0);
if (status != xnn_status_success) {
TF_LITE_KERNEL_LOG(logging_context,
"failed to delegate RESHAPE node #%d", node_index);
return kTfLiteError;
}
}
return kTfLiteOk;
}
static TfLiteStatus VisitRoundNode(
xnn_subgraph_t subgraph, TfLiteContext* logging_context, int node_index,
TfLiteNode* node, const TfLiteTensor* tensors,

View File

@ -164,11 +164,11 @@ def tf_repositories(path_prefix = "", tf_repo_name = ""):
tf_http_archive(
name = "XNNPACK",
sha256 = "e37a92154c2ff72c3ebf97247617ce2e159ccc23e648fd62ded44a71c3d68c6a",
strip_prefix = "XNNPACK-51a01c66c78334c3d5abf4034e9a8a550a8ad4ad",
sha256 = "bd4278ebbe3f6b104f46548717b00bdba95acaab3cbac3de4015c65d868259f8",
strip_prefix = "XNNPACK-d27202dfeaa8d3a96670ba47f3dce2f19305a092",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/google/XNNPACK/archive/51a01c66c78334c3d5abf4034e9a8a550a8ad4ad.zip",
"https://github.com/google/XNNPACK/archive/51a01c66c78334c3d5abf4034e9a8a550a8ad4ad.zip",
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/google/XNNPACK/archive/d27202dfeaa8d3a96670ba47f3dce2f19305a092.zip",
"https://github.com/google/XNNPACK/archive/d27202dfeaa8d3a96670ba47f3dce2f19305a092.zip",
],
)