From 1dccb05678ccefb5f32f1a9b37b6c5e4cb93d8fd Mon Sep 17 00:00:00 2001 From: Marat Dukhan Date: Thu, 23 Jul 2020 02:41:23 -0700 Subject: [PATCH] Support RESIZE_BILINEAR operator in XNNPACK delegate PiperOrigin-RevId: 322749575 Change-Id: Ifa2ec971fc18f4877976f2d9970ac45203a43029 --- tensorflow/lite/delegates/xnnpack/BUILD | 31 +++ tensorflow/lite/delegates/xnnpack/README.md | 7 + .../delegates/xnnpack/resize_bilinear_test.cc | 119 ++++++++++++ .../xnnpack/resize_bilinear_tester.cc | 183 ++++++++++++++++++ .../xnnpack/resize_bilinear_tester.h | 115 +++++++++++ .../delegates/xnnpack/xnnpack_delegate.cc | 83 ++++++++ tensorflow/workspace.bzl | 8 +- 7 files changed, 542 insertions(+), 4 deletions(-) create mode 100644 tensorflow/lite/delegates/xnnpack/resize_bilinear_test.cc create mode 100644 tensorflow/lite/delegates/xnnpack/resize_bilinear_tester.cc create mode 100644 tensorflow/lite/delegates/xnnpack/resize_bilinear_tester.h diff --git a/tensorflow/lite/delegates/xnnpack/BUILD b/tensorflow/lite/delegates/xnnpack/BUILD index e2efac24243..3c580edae10 100644 --- a/tensorflow/lite/delegates/xnnpack/BUILD +++ b/tensorflow/lite/delegates/xnnpack/BUILD @@ -229,6 +229,22 @@ cc_library( ], ) +cc_library( + name = "resize_bilinear_tester", + testonly = 1, + srcs = ["resize_bilinear_tester.cc"], + hdrs = ["resize_bilinear_tester.h"], + deps = [ + "//tensorflow/lite:framework", + "//tensorflow/lite:schema_fbs_version", + "//tensorflow/lite/c:common", + "//tensorflow/lite/kernels:builtin_ops", + "//tensorflow/lite/schema:schema_fbs", + "@com_google_googletest//:gtest", + "@flatbuffers", + ], +) + cc_library( name = "softmax_tester", testonly = 1, @@ -635,6 +651,21 @@ cc_test( ], ) +cc_test( + name = "resize_bilinear_test", + srcs = ["resize_bilinear_test.cc"], + linkopts = select({ + "//tensorflow:emscripten": EMSCRIPTEN_LINKOPTS, + "//conditions:default": [], + }), + deps = [ + ":resize_bilinear_tester", + ":test_main", + ":xnnpack_delegate_test_mode", + "@com_google_googletest//:gtest", + ], +) + cc_test( name = "round_test", srcs = ["round_test.cc"], diff --git a/tensorflow/lite/delegates/xnnpack/README.md b/tensorflow/lite/delegates/xnnpack/README.md index 47ed79033cf..6f597006c1b 100644 --- a/tensorflow/lite/delegates/xnnpack/README.md +++ b/tensorflow/lite/delegates/xnnpack/README.md @@ -260,6 +260,13 @@ Below is the list of current operators and limitations: static (use `kTfLiteMmapRo` allocation type), or absent (with the new shape specified via `ReshapeOptions` table). +### `RESIZE_BILINEAR` + +* The first input and the output must be 4D tensors in 32-bit floating-point + format. +* The second input (the input with the new shape specification) must be + static (use `kTfLiteMmapRo` allocation type). + ### `ROUND` * Inputs and outputs must be in 32-bit floating-point format. diff --git a/tensorflow/lite/delegates/xnnpack/resize_bilinear_test.cc b/tensorflow/lite/delegates/xnnpack/resize_bilinear_test.cc new file mode 100644 index 00000000000..e4ff3e63388 --- /dev/null +++ b/tensorflow/lite/delegates/xnnpack/resize_bilinear_test.cc @@ -0,0 +1,119 @@ +/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include +#include +#include +#include +#include + +#include +#include "tensorflow/lite/delegates/xnnpack/resize_bilinear_tester.h" +#include "tensorflow/lite/delegates/xnnpack/xnnpack_delegate.h" + +namespace tflite { +namespace xnnpack { + +TEST(ResizeBilinear, AlignCenters) { + std::unique_ptr + xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr), + TfLiteXNNPackDelegateDelete); + + std::random_device random_device; + auto rng = std::mt19937(random_device()); + auto size_rng = + std::bind(std::uniform_int_distribution(2, 10), std::ref(rng)); + auto channel_rng = + std::bind(std::uniform_int_distribution(2, 16), std::ref(rng)); + + ResizeBilinearTester() + .HalfPixelCenters(true) + .InputHeight(size_rng()) + .InputWidth(size_rng()) + .OutputHeight(size_rng()) + .OutputWidth(size_rng()) + .Channels(channel_rng()) + .Test(xnnpack_delegate.get()); +} + +TEST(ResizeBilinear, AlignCentersTF1X) { + std::unique_ptr + xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr), + TfLiteXNNPackDelegateDelete); + + std::random_device random_device; + auto rng = std::mt19937(random_device()); + auto size_rng = + std::bind(std::uniform_int_distribution(2, 10), std::ref(rng)); + auto channel_rng = + std::bind(std::uniform_int_distribution(2, 16), std::ref(rng)); + + ResizeBilinearTester() + .InputHeight(size_rng()) + .InputWidth(size_rng()) + .OutputHeight(size_rng()) + .OutputWidth(size_rng()) + .Channels(channel_rng()) + .Test(xnnpack_delegate.get()); +} + +TEST(ResizeBilinear, AlignCorners) { + std::unique_ptr + xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr), + TfLiteXNNPackDelegateDelete); + + std::random_device random_device; + auto rng = std::mt19937(random_device()); + auto size_rng = + std::bind(std::uniform_int_distribution(2, 10), std::ref(rng)); + auto channel_rng = + std::bind(std::uniform_int_distribution(2, 16), std::ref(rng)); + + ResizeBilinearTester() + .AlignCorners(true) + .InputHeight(size_rng()) + .InputWidth(size_rng()) + .OutputHeight(size_rng()) + .OutputWidth(size_rng()) + .Channels(channel_rng()) + .Test(xnnpack_delegate.get()); +} + +TEST(ResizeBilinear, MultiThreading) { + TfLiteXNNPackDelegateOptions delegate_options = + TfLiteXNNPackDelegateOptionsDefault(); + delegate_options.num_threads = 2; + std::unique_ptr + xnnpack_delegate(TfLiteXNNPackDelegateCreate(&delegate_options), + TfLiteXNNPackDelegateDelete); + + std::random_device random_device; + auto rng = std::mt19937(random_device()); + auto size_rng = + std::bind(std::uniform_int_distribution(2, 10), std::ref(rng)); + auto channel_rng = + std::bind(std::uniform_int_distribution(2, 16), std::ref(rng)); + + ResizeBilinearTester() + .InputHeight(size_rng()) + .InputWidth(size_rng()) + .OutputHeight(size_rng()) + .OutputWidth(size_rng()) + .Channels(channel_rng()) + .Test(xnnpack_delegate.get()); +} + +} // namespace xnnpack +} // namespace tflite diff --git a/tensorflow/lite/delegates/xnnpack/resize_bilinear_tester.cc b/tensorflow/lite/delegates/xnnpack/resize_bilinear_tester.cc new file mode 100644 index 00000000000..34730c05719 --- /dev/null +++ b/tensorflow/lite/delegates/xnnpack/resize_bilinear_tester.cc @@ -0,0 +1,183 @@ +/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "tensorflow/lite/delegates/xnnpack/resize_bilinear_tester.h" + +#include +#include +#include +#include +#include +#include + +#include +#include "flatbuffers/flatbuffers.h" // from @flatbuffers +#include "tensorflow/lite/interpreter.h" +#include "tensorflow/lite/kernels/register.h" +#include "tensorflow/lite/model.h" +#include "tensorflow/lite/schema/schema_generated.h" +#include "tensorflow/lite/version.h" + +namespace tflite { +namespace xnnpack { + +void ResizeBilinearTester::Test(TfLiteDelegate* delegate) const { + std::random_device random_device; + auto rng = std::mt19937(random_device()); + auto input_rng = + std::bind(std::uniform_real_distribution(), std::ref(rng)); + + std::vector buffer = CreateTfLiteModel(); + const Model* model = GetModel(buffer.data()); + + std::unique_ptr delegate_interpreter; + ASSERT_EQ( + InterpreterBuilder(model, ::tflite::ops::builtin::BuiltinOpResolver())( + &delegate_interpreter), + kTfLiteOk); + std::unique_ptr default_interpreter; + ASSERT_EQ( + InterpreterBuilder(model, ::tflite::ops::builtin::BuiltinOpResolver())( + &default_interpreter), + kTfLiteOk); + + ASSERT_TRUE(delegate_interpreter); + ASSERT_TRUE(default_interpreter); + + ASSERT_EQ(delegate_interpreter->inputs().size(), 1); + ASSERT_EQ(default_interpreter->inputs().size(), 1); + + ASSERT_EQ(delegate_interpreter->outputs().size(), 1); + ASSERT_EQ(default_interpreter->outputs().size(), 1); + + ASSERT_EQ(delegate_interpreter->AllocateTensors(), kTfLiteOk); + ASSERT_EQ(default_interpreter->AllocateTensors(), kTfLiteOk); + + ASSERT_EQ(delegate_interpreter->ModifyGraphWithDelegate(delegate), kTfLiteOk); + + float* default_input_data = default_interpreter->typed_tensor( + default_interpreter->inputs()[0]); + std::generate(default_input_data, + default_input_data + + BatchSize() * InputHeight() * InputWidth() * Channels(), + std::ref(input_rng)); + + float* delegate_input_data = delegate_interpreter->typed_tensor( + delegate_interpreter->inputs()[0]); + std::copy(default_input_data, + default_input_data + + BatchSize() * InputHeight() * InputWidth() * Channels(), + delegate_input_data); + + ASSERT_EQ(default_interpreter->Invoke(), kTfLiteOk); + ASSERT_EQ(delegate_interpreter->Invoke(), kTfLiteOk); + + float* default_output_data = default_interpreter->typed_tensor( + default_interpreter->outputs()[0]); + float* delegate_output_data = delegate_interpreter->typed_tensor( + delegate_interpreter->outputs()[0]); + + for (int i = 0; i < BatchSize(); i++) { + for (int y = 0; y < OutputHeight(); y++) { + for (int x = 0; x < OutputWidth(); x++) { + for (int c = 0; c < Channels(); c++) { + const int index = + ((i * OutputHeight() + y) * OutputWidth() + x) * Channels() + c; + ASSERT_NEAR(default_output_data[index], delegate_output_data[index], + std::max(std::abs(default_output_data[index]) * 1.0e-4f, + 10.0f * std::numeric_limits::epsilon())) + << "batch " << i << " / " << BatchSize() << ", y position " << y + << " / " << OutputHeight() << ", x position " << x << " / " + << OutputWidth() << ", channel " << c << " / " << Channels(); + } + } + } + } +} + +std::vector ResizeBilinearTester::CreateTfLiteModel() const { + flatbuffers::FlatBufferBuilder builder; + flatbuffers::Offset operator_code = + CreateOperatorCode(builder, BuiltinOperator_RESIZE_BILINEAR); + + flatbuffers::Offset resize_bilinear_options = + CreateResizeBilinearOptions(builder, AlignCorners(), HalfPixelCenters()); + + const std::array size_data{{OutputHeight(), OutputWidth()}}; + + const std::array, 2> buffers{{ + CreateBuffer(builder, builder.CreateVector({})), + CreateBuffer(builder, + builder.CreateVector( + reinterpret_cast(size_data.data()), + size_data.size() * sizeof(int32_t))), + }}; + + const std::array input_shape{ + {BatchSize(), InputHeight(), InputWidth(), Channels()}}; + const std::array output_shape{ + {BatchSize(), OutputHeight(), OutputWidth(), Channels()}}; + const std::array size_shape{ + {static_cast(size_data.size())}}; + + const std::array, 3> tensors{{ + CreateTensor( + builder, + builder.CreateVector(input_shape.data(), input_shape.size()), + TensorType_FLOAT32), + CreateTensor( + builder, + builder.CreateVector(size_shape.data(), size_shape.size()), + TensorType_INT32, /*buffer=*/1), + CreateTensor(builder, + builder.CreateVector(output_shape.data(), + output_shape.size()), + TensorType_FLOAT32), + }}; + + const std::array op_inputs{{0, 1}}; + const std::array op_outputs{{2}}; + flatbuffers::Offset op = CreateOperator( + builder, /*opcode_index=*/0, + builder.CreateVector(op_inputs.data(), op_inputs.size()), + builder.CreateVector(op_outputs.data(), op_outputs.size()), + BuiltinOptions_ResizeBilinearOptions, resize_bilinear_options.Union()); + + const std::array subgraph_inputs{{0}}; + const std::array subgraph_outputs{{2}}; + flatbuffers::Offset subgraph = CreateSubGraph( + builder, builder.CreateVector(tensors.data(), tensors.size()), + builder.CreateVector(subgraph_inputs.data(), + subgraph_inputs.size()), + builder.CreateVector(subgraph_outputs.data(), + subgraph_outputs.size()), + builder.CreateVector(&op, 1)); + + flatbuffers::Offset description = + builder.CreateString("Resize Bilinear model"); + + flatbuffers::Offset model_buffer = CreateModel( + builder, TFLITE_SCHEMA_VERSION, builder.CreateVector(&operator_code, 1), + builder.CreateVector(&subgraph, 1), description, + builder.CreateVector(buffers.data(), buffers.size())); + + builder.Finish(model_buffer); + + return std::vector(builder.GetBufferPointer(), + builder.GetBufferPointer() + builder.GetSize()); +} + +} // namespace xnnpack +} // namespace tflite diff --git a/tensorflow/lite/delegates/xnnpack/resize_bilinear_tester.h b/tensorflow/lite/delegates/xnnpack/resize_bilinear_tester.h new file mode 100644 index 00000000000..6885fcf9033 --- /dev/null +++ b/tensorflow/lite/delegates/xnnpack/resize_bilinear_tester.h @@ -0,0 +1,115 @@ +/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_LITE_DELEGATES_XNNPACK_RESIZE_BILINEAR_TESTER_H_ +#define TENSORFLOW_LITE_DELEGATES_XNNPACK_RESIZE_BILINEAR_TESTER_H_ + +#include +#include + +#include +#include "tensorflow/lite/c/common.h" +#include "tensorflow/lite/schema/schema_generated.h" + +namespace tflite { +namespace xnnpack { + +class ResizeBilinearTester { + public: + ResizeBilinearTester() = default; + ResizeBilinearTester(const ResizeBilinearTester&) = delete; + ResizeBilinearTester& operator=(const ResizeBilinearTester&) = delete; + + inline ResizeBilinearTester& BatchSize(int32_t batch_size) { + EXPECT_GT(batch_size, 0); + batch_size_ = batch_size; + return *this; + } + + inline int32_t BatchSize() const { return batch_size_; } + + inline ResizeBilinearTester& Channels(int32_t channels) { + EXPECT_GT(channels, 0); + channels_ = channels; + return *this; + } + + inline int32_t Channels() const { return channels_; } + + inline ResizeBilinearTester& InputHeight(int32_t input_height) { + EXPECT_GT(input_height, 0); + input_height_ = input_height; + return *this; + } + + inline int32_t InputHeight() const { return input_height_; } + + inline ResizeBilinearTester& InputWidth(int32_t input_width) { + EXPECT_GT(input_width, 0); + input_width_ = input_width; + return *this; + } + + inline int32_t InputWidth() const { return input_width_; } + + inline ResizeBilinearTester& OutputHeight(int32_t output_height) { + EXPECT_GT(output_height, 0); + output_height_ = output_height; + return *this; + } + + inline int32_t OutputHeight() const { return output_height_; } + + inline ResizeBilinearTester& OutputWidth(int32_t output_width) { + EXPECT_GT(output_width, 0); + output_width_ = output_width; + return *this; + } + + inline int32_t OutputWidth() const { return output_width_; } + + ResizeBilinearTester& AlignCorners(bool align_corners) { + align_corners_ = align_corners; + return *this; + } + + bool AlignCorners() const { return align_corners_; } + + ResizeBilinearTester& HalfPixelCenters(bool half_pixel_centers) { + half_pixel_centers_ = half_pixel_centers; + return *this; + } + + bool HalfPixelCenters() const { return half_pixel_centers_; } + + void Test(TfLiteDelegate* delegate) const; + + private: + std::vector CreateTfLiteModel() const; + + int32_t batch_size_ = 1; + int32_t channels_ = 1; + int32_t input_height_ = 1; + int32_t input_width_ = 1; + int32_t output_height_ = 1; + int32_t output_width_ = 1; + bool align_corners_ = false; + bool half_pixel_centers_ = false; +}; + +} // namespace xnnpack +} // namespace tflite + +#endif // TENSORFLOW_LITE_DELEGATES_XNNPACK_RESIZE_BILINEAR_TESTER_H_ diff --git a/tensorflow/lite/delegates/xnnpack/xnnpack_delegate.cc b/tensorflow/lite/delegates/xnnpack/xnnpack_delegate.cc index c7aea59b231..eec223597cb 100644 --- a/tensorflow/lite/delegates/xnnpack/xnnpack_delegate.cc +++ b/tensorflow/lite/delegates/xnnpack/xnnpack_delegate.cc @@ -157,6 +157,7 @@ class Subgraph { case kTfLiteBuiltinMean: case kTfLiteBuiltinPad: case kTfLiteBuiltinReshape: + case kTfLiteBuiltinResizeBilinear: // Ignore the second input (axes, static padding, or new shape), // because it is represented as parameters of the XNNPACK operator // rather than extra input. @@ -930,6 +931,14 @@ class Subgraph { context->tensors, reshape_params, xnnpack_tensors); } + case kTfLiteBuiltinResizeBilinear: { + const TfLiteResizeBilinearParams* resize_params = + static_cast(node->builtin_data); + + return VisitResizeBilinearNode(subgraph, logging_context, node_index, + node, context->tensors, resize_params, + xnnpack_tensors); + } case kTfLiteBuiltinRound: return VisitRoundNode(subgraph, logging_context, node_index, node, context->tensors, xnnpack_tensors); @@ -2460,6 +2469,80 @@ class Subgraph { return kTfLiteOk; } + static TfLiteStatus VisitResizeBilinearNode( + xnn_subgraph_t subgraph, TfLiteContext* logging_context, int node_index, + TfLiteNode* node, const TfLiteTensor* tensors, + const TfLiteResizeBilinearParams* resize_params, + const std::vector& xnnpack_tensors) { + TF_LITE_ENSURE_STATUS( + CheckNumInputsAndOutputs(logging_context, node, 2, 1, node_index)); + + const TfLiteTensor& input_tensor = tensors[node->inputs->data[0]]; + TF_LITE_ENSURE_STATUS(CheckTensorFloatType( + logging_context, input_tensor, node->inputs->data[0], node_index)); + TF_LITE_ENSURE_STATUS(CheckTensorShape(logging_context, input_tensor, 4, + node->inputs->data[0])); + TF_LITE_ENSURE_STATUS(CheckTensorNonDynamicAllocation( + logging_context, input_tensor, node->inputs->data[0], node_index)); + + const TfLiteTensor& shape_tensor = tensors[node->inputs->data[1]]; + TF_LITE_ENSURE_STATUS(CheckTensorType(logging_context, shape_tensor, + kTfLiteInt32, node->inputs->data[1], + node_index)); + TF_LITE_ENSURE_STATUS(CheckShapeTensorShape( + logging_context, shape_tensor, node->inputs->data[1], node_index)); + if (shape_tensor.dims->data[0] != 2) { + TF_LITE_MAYBE_KERNEL_LOG( + logging_context, + "unexpected number of dimensions %d in the output shape in node %d", + shape_tensor.dims->data[0], node_index); + } + TF_LITE_ENSURE_STATUS(CheckTensorStaticAllocation( + logging_context, shape_tensor, node->inputs->data[1], node_index)); + + const TfLiteTensor& output_tensor = tensors[node->outputs->data[0]]; + TF_LITE_ENSURE_STATUS(CheckTensorFloatType( + logging_context, output_tensor, node->outputs->data[0], node_index)); + TF_LITE_ENSURE_STATUS(CheckTensorShape(logging_context, output_tensor, 4, + node->outputs->data[0])); + TF_LITE_ENSURE_STATUS(CheckTensorNonDynamicAllocation( + logging_context, output_tensor, node->outputs->data[0], node_index)); + + const int32_t* shape_data = + reinterpret_cast(shape_tensor.data.data); + for (int i = 0; i < shape_tensor.dims->size; i++) { + const int32_t dim = shape_data[i]; + if (dim <= 0) { + TF_LITE_MAYBE_KERNEL_LOG( + logging_context, "invalid output dimension #%d value %d in node %d", + i, dim, node_index); + return kTfLiteError; + } + } + + if (subgraph != nullptr) { + uint32_t flags = 0; + if (resize_params->align_corners) { + flags |= XNN_FLAG_ALIGN_CORNERS; + } else if (!resize_params->half_pixel_centers) { + flags |= XNN_FLAG_TENSORFLOW_LEGACY_MODE; + } + const xnn_status status = xnn_define_static_resize_bilinear_2d( + subgraph, static_cast(shape_data[0]), + static_cast(shape_data[1]), + /*input_id=*/xnnpack_tensors[node->inputs->data[0]], + /*output_id=*/xnnpack_tensors[node->outputs->data[0]], flags); + if (status != xnn_status_success) { + TF_LITE_KERNEL_LOG(logging_context, + "failed to delegate RESIZE_BILINEAR node #%d", + node_index); + return kTfLiteError; + } + } + + return kTfLiteOk; + } + static TfLiteStatus VisitRoundNode( xnn_subgraph_t subgraph, TfLiteContext* logging_context, int node_index, TfLiteNode* node, const TfLiteTensor* tensors, diff --git a/tensorflow/workspace.bzl b/tensorflow/workspace.bzl index 586bda6a3a2..0d4f64cf0c2 100755 --- a/tensorflow/workspace.bzl +++ b/tensorflow/workspace.bzl @@ -164,11 +164,11 @@ def tf_repositories(path_prefix = "", tf_repo_name = ""): tf_http_archive( name = "XNNPACK", - sha256 = "bd4278ebbe3f6b104f46548717b00bdba95acaab3cbac3de4015c65d868259f8", - strip_prefix = "XNNPACK-d27202dfeaa8d3a96670ba47f3dce2f19305a092", + sha256 = "c6eae589a4af7785da467162acd339bae359842e14c93bddc8fbe84ffd361c70", + strip_prefix = "XNNPACK-aff24e26a760552ee98a036f2a6e95b123e1bc6d", urls = [ - "https://storage.googleapis.com/mirror.tensorflow.org/github.com/google/XNNPACK/archive/d27202dfeaa8d3a96670ba47f3dce2f19305a092.zip", - "https://github.com/google/XNNPACK/archive/d27202dfeaa8d3a96670ba47f3dce2f19305a092.zip", + "https://storage.googleapis.com/mirror.tensorflow.org/github.com/google/XNNPACK/archive/aff24e26a760552ee98a036f2a6e95b123e1bc6d.zip", + "https://github.com/google/XNNPACK/archive/aff24e26a760552ee98a036f2a6e95b123e1bc6d.zip", ], )