SOFTMAX tests for XNNPACK delegate

PiperOrigin-RevId: 307631610
Change-Id: I7615d500c4b707fd2bfb343d08c801ae57fe2f74
This commit is contained in:
Marat Dukhan 2020-04-21 10:27:22 -07:00 committed by TensorFlower Gardener
parent 98a5b3b6d1
commit 203c417a20
4 changed files with 403 additions and 0 deletions

View File

@ -90,6 +90,21 @@ cc_library(
],
)
cc_library(
name = "softmax_tester",
testonly = 1,
srcs = ["softmax_tester.cc"],
hdrs = ["softmax_tester.h"],
deps = [
"//tensorflow/lite:framework",
"//tensorflow/lite:schema_fbs_version",
"//tensorflow/lite/kernels:builtin_ops",
"//tensorflow/lite/schema:schema_fbs",
"@com_google_googletest//:gtest",
"@flatbuffers",
],
)
cc_library(
name = "unary_elementwise_tester",
testonly = 1,
@ -289,4 +304,19 @@ cc_test(
],
)
cc_test(
name = "softmax_test",
srcs = ["softmax_test.cc"],
linkopts = select({
"//tensorflow:emscripten": EMSCRIPTEN_LINKOPTS,
"//conditions:default": [],
}),
deps = [
":softmax_tester",
":test_main",
":xnnpack_delegate_test_mode",
"@com_google_googletest//:gtest",
],
)
tflite_portable_test_suite_combined(combine_conditions = {"deps": [":test_main"]})

View File

@ -0,0 +1,140 @@
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include <cstdint>
#include <functional>
#include <memory>
#include <random>
#include <gtest/gtest.h>
#include "tensorflow/lite/delegates/xnnpack/softmax_tester.h"
#include "tensorflow/lite/delegates/xnnpack/xnnpack_delegate.h"
namespace tflite {
namespace xnnpack {
TEST(Softmax, 4D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
SoftmaxTester()
.Shape({batch, height, width, channels})
.Test(xnnpack_delegate.get());
}
TEST(Softmax, 3D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
SoftmaxTester().Shape({batch, width, channels}).Test(xnnpack_delegate.get());
}
TEST(Softmax, 2D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto channels = shape_rng();
SoftmaxTester().Shape({batch, channels}).Test(xnnpack_delegate.get());
}
TEST(Softmax, 1D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
SoftmaxTester().Shape({batch}).Test(xnnpack_delegate.get());
}
TEST(Softmax, DISABLED_Beta) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
SoftmaxTester()
.Shape({batch, height, width, channels})
.Beta(0.1f)
.Test(xnnpack_delegate.get());
SoftmaxTester()
.Shape({batch, height, width, channels})
.Beta(10.0f)
.Test(xnnpack_delegate.get());
}
TEST(Softmax, MultiThreading) {
TfLiteXNNPackDelegateOptions delegate_options =
TfLiteXNNPackDelegateOptionsDefault();
delegate_options.num_threads = 2;
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(&delegate_options),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
SoftmaxTester()
.Shape({batch, height, width, channels})
.Test(xnnpack_delegate.get());
}
} // namespace xnnpack
} // namespace tflite

View File

@ -0,0 +1,156 @@
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "tensorflow/lite/delegates/xnnpack/softmax_tester.h"
#include <array>
#include <cstdint>
#include <functional>
#include <numeric>
#include <random>
#include <vector>
#include <gtest/gtest.h>
#include "flatbuffers/flatbuffers.h" // from @flatbuffers
#include "tensorflow/lite/interpreter.h"
#include "tensorflow/lite/kernels/register.h"
#include "tensorflow/lite/model.h"
#include "tensorflow/lite/schema/schema_generated.h"
#include "tensorflow/lite/version.h"
namespace tflite {
namespace xnnpack {
void SoftmaxTester::Test(TfLiteDelegate* delegate) const {
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto input_rng = std::bind(
std::uniform_real_distribution<float>(-15.0f, 15.0f), std::ref(rng));
std::vector<char> buffer = CreateTfLiteModel();
const Model* model = GetModel(buffer.data());
std::unique_ptr<Interpreter> delegate_interpreter;
ASSERT_EQ(
InterpreterBuilder(model, ::tflite::ops::builtin::BuiltinOpResolver())(
&delegate_interpreter),
kTfLiteOk);
std::unique_ptr<Interpreter> default_interpreter;
ASSERT_EQ(
InterpreterBuilder(model, ::tflite::ops::builtin::BuiltinOpResolver())(
&default_interpreter),
kTfLiteOk);
ASSERT_TRUE(delegate_interpreter);
ASSERT_TRUE(default_interpreter);
ASSERT_EQ(delegate_interpreter->inputs().size(), 1);
ASSERT_EQ(default_interpreter->inputs().size(), 1);
ASSERT_EQ(delegate_interpreter->outputs().size(), 1);
ASSERT_EQ(default_interpreter->outputs().size(), 1);
ASSERT_EQ(delegate_interpreter->AllocateTensors(), kTfLiteOk);
ASSERT_EQ(default_interpreter->AllocateTensors(), kTfLiteOk);
ASSERT_EQ(delegate_interpreter->ModifyGraphWithDelegate(delegate), kTfLiteOk);
float* default_input_data = default_interpreter->typed_tensor<float>(
default_interpreter->inputs()[0]);
std::generate(default_input_data, default_input_data + Size(),
std::ref(input_rng));
float* delegate_input_data = delegate_interpreter->typed_tensor<float>(
delegate_interpreter->inputs()[0]);
std::copy(default_input_data, default_input_data + Size(),
delegate_input_data);
ASSERT_EQ(default_interpreter->Invoke(), kTfLiteOk);
ASSERT_EQ(delegate_interpreter->Invoke(), kTfLiteOk);
float* default_output_data = default_interpreter->typed_tensor<float>(
default_interpreter->outputs()[0]);
float* delegate_output_data = delegate_interpreter->typed_tensor<float>(
delegate_interpreter->outputs()[0]);
for (size_t i = 0; i < Size(); i++) {
ASSERT_NEAR(default_output_data[i], delegate_output_data[i],
std::numeric_limits<float>::epsilon() *
std::max(std::abs(default_output_data[i]) * 10.0f, 1.0f));
}
}
std::vector<char> SoftmaxTester::CreateTfLiteModel() const {
flatbuffers::FlatBufferBuilder builder;
flatbuffers::Offset<OperatorCode> operator_code =
CreateOperatorCode(builder, BuiltinOperator_SOFTMAX);
const std::array<flatbuffers::Offset<Buffer>, 1> buffers{{
CreateBuffer(builder, builder.CreateVector({})),
}};
const std::array<flatbuffers::Offset<Tensor>, 2> tensors{{
CreateTensor(
builder,
builder.CreateVector<int32_t>(Shape().data(), Shape().size()),
TensorType_FLOAT32),
CreateTensor(
builder,
builder.CreateVector<int32_t>(Shape().data(), Shape().size()),
TensorType_FLOAT32),
}};
flatbuffers::Offset<SoftmaxOptions> softmax_options =
CreateSoftmaxOptions(builder, Beta());
const std::array<int32_t, 1> op_inputs{{0}};
const std::array<int32_t, 1> op_outputs{{1}};
flatbuffers::Offset<Operator> op = CreateOperator(
builder, /*opcode_index=*/0,
builder.CreateVector<int32_t>(op_inputs.data(), op_inputs.size()),
builder.CreateVector<int32_t>(op_outputs.data(), op_outputs.size()),
BuiltinOptions_SoftmaxOptions, softmax_options.Union());
const std::array<int32_t, 1> subgraph_inputs{{0}};
const std::array<int32_t, 1> subgraph_outputs{{1}};
flatbuffers::Offset<SubGraph> subgraph = CreateSubGraph(
builder, builder.CreateVector(tensors.data(), tensors.size()),
builder.CreateVector<int32_t>(subgraph_inputs.data(),
subgraph_inputs.size()),
builder.CreateVector<int32_t>(subgraph_outputs.data(),
subgraph_outputs.size()),
builder.CreateVector(&op, 1));
flatbuffers::Offset<flatbuffers::String> description =
builder.CreateString("Softmax model");
flatbuffers::Offset<Model> model_buffer = CreateModel(
builder, TFLITE_SCHEMA_VERSION, builder.CreateVector(&operator_code, 1),
builder.CreateVector(&subgraph, 1), description,
builder.CreateVector(buffers.data(), buffers.size()));
builder.Finish(model_buffer);
return std::vector<char>(builder.GetBufferPointer(),
builder.GetBufferPointer() + builder.GetSize());
}
int32_t SoftmaxTester::ComputeSize(const std::vector<int32_t>& shape) {
return std::accumulate(shape.cbegin(), shape.cend(), 1,
std::multiplies<int32_t>());
}
} // namespace xnnpack
} // namespace tflite

View File

@ -0,0 +1,77 @@
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_DELEGATES_XNNPACK_SOFTMAX_TESTER_H_
#define TENSORFLOW_LITE_DELEGATES_XNNPACK_SOFTMAX_TESTER_H_
#include <cstdint>
#include <functional>
#include <random>
#include <vector>
#include <gtest/gtest.h>
#include "flatbuffers/flatbuffers.h" // from @flatbuffers
#include "tensorflow/lite/interpreter.h"
#include "tensorflow/lite/kernels/register.h"
#include "tensorflow/lite/model.h"
#include "tensorflow/lite/schema/schema_generated.h"
#include "tensorflow/lite/version.h"
namespace tflite {
namespace xnnpack {
class SoftmaxTester {
public:
SoftmaxTester() = default;
SoftmaxTester(const SoftmaxTester&) = delete;
SoftmaxTester& operator=(const SoftmaxTester&) = delete;
inline SoftmaxTester& Shape(std::initializer_list<int32_t> shape) {
EXPECT_GT(shape.size(), 0);
for (auto it = shape.begin(); it != shape.end(); ++it) {
EXPECT_GT(*it, 0);
}
shape_ = std::vector<int32_t>(shape.begin(), shape.end());
size_ = SoftmaxTester::ComputeSize(shape_);
return *this;
}
const std::vector<int32_t>& Shape() const { return shape_; }
int32_t Size() const { return size_; }
inline SoftmaxTester& Beta(float beta) {
beta_ = beta;
return *this;
}
float Beta() const { return beta_; }
void Test(TfLiteDelegate* delegate) const;
private:
std::vector<char> CreateTfLiteModel() const;
static int32_t ComputeSize(const std::vector<int32_t>& shape);
std::vector<int32_t> shape_;
int32_t size_;
float beta_ = 1.0f;
};
} // namespace xnnpack
} // namespace tflite
#endif // TENSORFLOW_LITE_DELEGATES_XNNPACK_SOFTMAX_TESTER_H_