Add unit tests for the hexagon delegate

PiperOrigin-RevId: 292012389
Change-Id: I4e3b1062820361db9d8a4c566bafbc4990f59bf4
This commit is contained in:
Karim Nosir 2020-01-28 14:20:14 -08:00 committed by TensorFlower Gardener
parent acb0623342
commit 4152ed672e
20 changed files with 1597 additions and 2 deletions

View File

@ -75,6 +75,4 @@ cc_library(
"manual",
"nobuilder",
],
deps = [
],
)

View File

@ -14,6 +14,8 @@ limitations under the License.
==============================================================================*/
#include "tensorflow/lite/experimental/delegates/hexagon/builders/neg_op_builder.h"
#include <limits>
namespace tflite {
namespace delegates {
namespace hexagon {

View File

@ -0,0 +1,45 @@
load(":tests.bzl", "hexagon_op_tests")
package(
default_visibility = [
"//visibility:public",
],
licenses = ["notice"], # Apache 2.0
)
cc_library(
name = "hexagon_delegate_op_model",
testonly = 1,
hdrs = ["hexagon_delegate_op_model.h"],
deps = [
"//tensorflow/lite:framework",
"//tensorflow/lite/c:common",
"//tensorflow/lite/experimental/delegates/hexagon:hexagon_delegate",
"//tensorflow/lite/kernels:builtin_ops",
"//tensorflow/lite/kernels:test_util",
"//tensorflow/lite/schema:schema_fbs",
"@com_google_googletest//:gtest",
],
)
hexagon_op_tests(
srcs = [
"activations_test.cc",
"arg_min_max_test.cc",
"concat_test.cc",
"depthwise_conv_test.cc",
"neg_test.cc",
"pad_test.cc",
"pool_test.cc",
"reduce_test.cc",
"resize_bilinear_test.cc",
"space_to_depth_test.cc",
"split_test.cc",
"transpose_conv_test.cc",
"transpose_test.cc",
],
deps = [
":hexagon_delegate_op_model",
"@com_google_googletest//:gtest_main",
],
)

View File

@ -0,0 +1,19 @@
# Hexagon Delegate Testing
This directory contains unit-tests for Op Builders for the hexagon delegate.
To Run the all the tests use the run_tests.sh under directory and pass
the path to the directory containing libhexagon_nn_skel*.so files.
The script will copy all files to the device and build all tests and execute
them.
The test should stop if one of the tests failed.
Example:
Follow the [Instructions](https://www.tensorflow.org/lite/performance/hexagon_delegate)
and download the hexagon_nn_skel and extract the files.
For example if files are extracted in /tmp/hexagon_skel, the sample command.
`
bash tensorflow/lite/experimental/delegates/hexagon/builders/tests/run_tests.sh /tmp/hexagon_skel
`

View File

@ -0,0 +1,113 @@
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include <gtest/gtest.h>
#include "tensorflow/lite/experimental/delegates/hexagon/builders/tests/hexagon_delegate_op_model.h"
namespace tflite {
using testing::ElementsAreArray;
class ActivationOpModel : public SingleOpModelWithHexagon {
public:
explicit ActivationOpModel(BuiltinOperator type, const TensorData& input,
const TensorData& output) {
input_ = AddInput(input);
output_ = AddOutput(output);
SetBuiltinOp(type, BuiltinOptions_NONE, 0);
BuildInterpreter({GetShape(input_)});
}
template <typename T>
void SetInput(const std::vector<float>& data) {
QuantizeAndPopulate<T>(input_, data);
}
template <typename T>
std::vector<float> GetDequantizedOutput() {
return Dequantize<T>(ExtractVector<T>(output_), GetScale(output_),
GetZeroPoint(output_));
}
std::vector<int> GetOutputShape() { return GetTensorShape(output_); }
protected:
BuiltinOperator op_code_;
int input_;
int output_;
};
TEST(ActivationOpModel, ReluOutput) {
const float kMin = -6;
const float kMax = 6;
ActivationOpModel model(BuiltinOperator_RELU,
/*input=*/{TensorType_UINT8, {1, 3}, kMin, kMax},
/*output=*/{TensorType_UINT8, {1, 3}, kMin, kMax});
model.SetInput<uint8_t>({1, 5, 7});
model.ApplyDelegateAndInvoke();
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({1, 3}));
EXPECT_THAT(
model.GetDequantizedOutput<uint8_t>(),
ElementsAreArray(ArrayFloatNear({1.0, 5.0, 6.0}, /*tolerance=*/0.03)));
}
TEST(ActivationOpModel, Relu6Output) {
const float kMin = -8;
const float kMax = 8;
ActivationOpModel model(BuiltinOperator_RELU6,
/*input=*/{TensorType_UINT8, {1, 3}, kMin, kMax},
/*output=*/{TensorType_UINT8, {1, 3}, kMin, kMax});
model.SetInput<uint8_t>({4, -1.0, 8});
model.ApplyDelegateAndInvoke();
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({1, 3}));
EXPECT_THAT(
model.GetDequantizedOutput<uint8_t>(),
ElementsAreArray(ArrayFloatNear({4.0, 0.0, 6.0}, /*tolerance=*/0.03)));
}
TEST(ActivationOpModel, TanhOutput) {
const float kMin = -8;
const float kMax = 8;
ActivationOpModel model(BuiltinOperator_TANH,
/*input=*/{TensorType_UINT8, {1, 3}, kMin, kMax},
/*output=*/{TensorType_UINT8, {1, 3}, kMin, kMax});
model.SetInput<uint8_t>({4, -1.0, 8});
model.ApplyDelegateAndInvoke();
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({1, 3}));
EXPECT_THAT(model.GetDequantizedOutput<uint8_t>(),
ElementsAreArray(
ArrayFloatNear({7.96, -6.09, 7.97}, /*tolerance=*/0.03)));
}
TEST(ActivationOpModel, SigmoidOutput) {
const float kMin = -8;
const float kMax = 8;
// Sigmoid requires output min/max to be set to these numbers.
ActivationOpModel model(
BuiltinOperator_LOGISTIC,
/*input=*/{TensorType_UINT8, {1, 3}, kMin, kMax},
/*output=*/{TensorType_UINT8, {1, 3}, 0, 0, 1. / 256});
model.SetInput<uint8_t>({4, -1.0, 8});
model.ApplyDelegateAndInvoke();
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({1, 3}));
EXPECT_THAT(model.GetDequantizedOutput<uint8_t>(),
ElementsAreArray(
ArrayFloatNear({0.977, 0.266, 0.996}, /*tolerance=*/0.03)));
}
} // namespace tflite

View File

@ -0,0 +1,107 @@
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include <gtest/gtest.h>
#include "tensorflow/lite/experimental/delegates/hexagon/builders/tests/hexagon_delegate_op_model.h"
namespace tflite {
using testing::ElementsAreArray;
class ArgBaseOpModel : public SingleOpModelWithHexagon {
public:
explicit ArgBaseOpModel(TensorType output_type) {
input_ = AddInput(TensorType_UINT8);
output_ = AddOutput(output_type);
}
int input() const { return input_; }
std::vector<int32_t> GetInt32Output() const {
return ExtractVector<int32_t>(output_);
}
std::vector<int> GetOutputShape() { return GetTensorShape(output_); }
protected:
using SingleOpModelWithHexagon::builder_;
int input_;
int output_;
};
class ArgMinOpModel : public ArgBaseOpModel {
public:
ArgMinOpModel(std::initializer_list<int> input_shape)
: ArgBaseOpModel(TensorType_INT32 /*output_type*/),
input_shape_(input_shape) {}
void Build() {
SetBuiltinOp(BuiltinOperator_ARG_MIN, BuiltinOptions_ArgMinOptions,
CreateArgMinOptions(builder_, TensorType_INT32 /*output_type*/)
.Union());
BuildInterpreter({input_shape_, {1}});
}
private:
std::vector<int> input_shape_;
};
class ArgMaxOpModel : public ArgBaseOpModel {
public:
ArgMaxOpModel(std::initializer_list<int> input_shape)
: ArgBaseOpModel(TensorType_INT32 /*output_type*/),
input_shape_(input_shape) {}
void Build() {
SetBuiltinOp(BuiltinOperator_ARG_MAX, BuiltinOptions_ArgMaxOptions,
CreateArgMaxOptions(builder_, TensorType_INT32 /*output_type*/)
.Union());
BuildInterpreter({input_shape_, {1}});
}
private:
std::vector<int> input_shape_;
};
TEST(ArgMinTest, GetArgMin) {
ArgMinOpModel model({1, 1, 1, 4});
model.AddConstInput(TensorType_INT32, {3}, {1});
model.Build();
model.SymmetricQuantizeAndPopulate(model.input(), {1, 5, 0, 7});
model.ApplyDelegateAndInvoke();
EXPECT_THAT(model.GetInt32Output(), ElementsAreArray({2}));
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({1, 1, 1}));
}
TEST(ArgMinTest, GetArgMinNegative) {
ArgMinOpModel model({1, 1, 2, 4});
model.AddConstInput(TensorType_INT32, {-2}, {1});
model.Build();
model.SymmetricQuantizeAndPopulate(model.input(), {1, 2, 7, 8, 1, 9, 7, 3});
model.ApplyDelegateAndInvoke();
EXPECT_THAT(model.GetInt32Output(), ElementsAreArray({0, 0, 0, 1}));
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({1, 1, 4}));
}
TEST(ArgMaxTest, GetArgMax) {
ArgMaxOpModel model({1, 1, 1, 4});
model.AddConstInput(TensorType_INT32, {3}, {1});
model.Build();
model.SymmetricQuantizeAndPopulate(model.input(), {1, 5, 0, 7});
model.ApplyDelegateAndInvoke();
EXPECT_THAT(model.GetInt32Output(), ElementsAreArray({3}));
}
} // namespace tflite

View File

@ -0,0 +1,98 @@
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include <gtest/gtest.h>
#include "tensorflow/lite/experimental/delegates/hexagon/builders/tests/hexagon_delegate_op_model.h"
namespace tflite {
using testing::ElementsAreArray;
class QuantizedConcatenationOpModel : public SingleOpModelWithHexagon {
public:
QuantizedConcatenationOpModel(const std::vector<TensorData>& input_template,
int axis, const TensorData& output_template) {
std::vector<std::vector<int>> all_input_shapes;
for (int i = 0; i < input_template.size(); ++i) {
all_input_shapes.push_back(input_template[i].shape);
AddInput(input_template[i]);
}
output_ = AddOutput({output_template.type, /*shape=*/{},
output_template.min, output_template.max});
SetBuiltinOp(
BuiltinOperator_CONCATENATION, BuiltinOptions_ConcatenationOptions,
CreateConcatenationOptions(builder_, axis, ActivationFunctionType_NONE)
.Union());
BuildInterpreter(all_input_shapes);
}
template <typename T>
void SetInput(int index, std::initializer_list<float> data) {
QuantizeAndPopulate<T>(index, data);
}
template <typename T>
std::vector<float> GetDequantizedOutput() {
return Dequantize<T>(ExtractVector<T>(output_), GetScale(output_),
GetZeroPoint(output_));
}
private:
int output_;
};
TEST(QuantizedConcatenationOpModel, FourInputsQuantizedSameRange) {
QuantizedConcatenationOpModel m0(
{{TensorType_UINT8, {2, 1, 1, 2}, -12.7, 12.8},
{TensorType_UINT8, {2, 1, 1, 2}, -12.7, 12.8},
{TensorType_UINT8, {2, 1, 1, 2}, -12.7, 12.8},
{TensorType_UINT8, {2, 1, 1, 2}, -12.7, 12.8}},
/*axis=*/3, {TensorType_UINT8, {}, -12.7, 12.8});
m0.SetInput<uint8_t>(0, {1.0f, 3.0f, 4.0f, 7.0f});
m0.SetInput<uint8_t>(1, {1.1f, 3.1f, 4.1f, 7.1f});
m0.SetInput<uint8_t>(2, {1.2f, 3.2f, 4.2f, 7.2f});
m0.SetInput<uint8_t>(3, {1.3f, 3.3f, 4.3f, 7.3f});
m0.ApplyDelegateAndInvoke();
EXPECT_THAT(m0.GetDequantizedOutput<uint8_t>(),
ElementsAreArray(ArrayFloatNear(
{
1.0f, 3.0f, 1.1f, 3.1f, 1.2f, 3.2f, 1.3f, 3.3f, //
4.0f, 7.0f, 4.1f, 7.1f, 4.2f, 7.2f, 4.3f, 7.3f, //
},
/*max_abs_error=*/0.2)));
}
TEST(QuantizedConcatenationOpModel, FourInputsQuantizedMixedRange) {
QuantizedConcatenationOpModel m0(
{{TensorType_UINT8, {2, 1, 1, 2}, -10.7, 10.8},
{TensorType_UINT8, {2, 1, 1, 2}, 0, 12.8},
{TensorType_UINT8, {2, 1, 1, 2}, -11, 11.8},
{TensorType_UINT8, {2, 1, 1, 2}, 0, 7.4}},
/*axis=*/3, {TensorType_UINT8, {}, -12.7, 12.8});
m0.SetInput<uint8_t>(0, {1.0f, 3.0f, 4.0f, 7.0f});
m0.SetInput<uint8_t>(1, {1.1f, 3.1f, 4.1f, 7.1f});
m0.SetInput<uint8_t>(2, {1.2f, 3.2f, 4.2f, 7.2f});
m0.SetInput<uint8_t>(3, {1.3f, 3.3f, 4.3f, 7.3f});
m0.ApplyDelegateAndInvoke();
EXPECT_THAT(m0.GetDequantizedOutput<uint8_t>(),
ElementsAreArray(ArrayFloatNear(
{
1.0f, 3.0f, 1.1f, 3.1f, 1.2f, 3.2f, 1.3f, 3.3f, //
4.0f, 7.0f, 4.1f, 7.1f, 4.2f, 7.2f, 4.3f, 7.3f, //
},
/*max_abs_error=*/0.2)));
}
} // namespace tflite

View File

@ -0,0 +1,138 @@
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include <gtest/gtest.h>
#include "tensorflow/lite/experimental/delegates/hexagon/builders/tests/hexagon_delegate_op_model.h"
namespace tflite {
using testing::ElementsAreArray;
class QuantizedDepthwiseConvolutionOpModel : public SingleOpModelWithHexagon {
public:
QuantizedDepthwiseConvolutionOpModel(const TensorData& input,
const TensorData& filter,
const TensorData& output,
Padding padding_type,
int dilation_factor = 1) {
input_ = AddInput(input);
uint8_t zero = static_cast<uint8_t>(0);
filter_ = AddConstInput(
filter, {zero, zero, zero, zero, zero, zero, zero, zero, zero});
int bias_size = GetShape(filter_)[3];
// per tensor quantization.
auto bias_scale = GetScale(input_) * GetScale(filter_);
TensorData bias{TensorType_INT32, {bias_size}, 0, 0, bias_scale};
bias_ = AddInput(bias);
output_ = AddOutput(output);
int input_depth = GetShape(input_)[3];
int output_depth = GetShape(filter_)[3];
int depth_mul = output_depth / input_depth;
SetBuiltinOp(
BuiltinOperator_DEPTHWISE_CONV_2D,
BuiltinOptions_DepthwiseConv2DOptions,
CreateDepthwiseConv2DOptions(builder_, padding_type, 1, 1, depth_mul,
ActivationFunctionType_NONE,
dilation_factor, dilation_factor)
.Union());
BuildInterpreter({GetShape(input_), GetShape(filter_), GetShape(bias_)});
}
void SetInput(std::initializer_list<float> data) {
QuantizeAndPopulate<uint8_t>(input_, data);
}
void SetFilter(std::initializer_list<float> data) {
QuantizeAndPopulate<uint8_t>(filter_, data);
}
void SetBias(std::initializer_list<float> data) {
QuantizeAndPopulate<int32_t>(bias_, data);
}
std::vector<float> GetDequantizedOutput() {
return Dequantize<uint8_t>(ExtractVector<uint8_t>(output_),
GetScale(output_), GetZeroPoint(output_));
}
protected:
int input_;
int filter_;
int bias_;
int output_;
};
TEST(QuantizedDepthwiseConvolutionOpTest, SimpleDilatedTestPaddingValid) {
const int depth = 1;
const int image_width = 9;
const int image_height = 9;
const int image_batch_count = 1;
const int filter_size = 3;
const int filter_count = 1;
const int dilation_factor = 3;
QuantizedDepthwiseConvolutionOpModel m(
{TensorType_UINT8,
{image_batch_count, image_height, image_width, depth},
0,
255},
{TensorType_UINT8,
{depth, filter_size, filter_size, filter_count},
0,
255},
{TensorType_UINT8, {}, 0, 255}, Padding_VALID, dilation_factor);
// The image matrix is:
// | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
// | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
// | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
// | 0 | 0 | 0 | 1 | 1 | 1 | 0 | 0 | 0 |
// | 0 | 0 | 0 | 1 | 1 | 1 | 0 | 0 | 0 |
// | 0 | 0 | 0 | 1 | 1 | 1 | 0 | 0 | 0 |
// | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
// | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
// | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
// clang-format off
m.SetInput({0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 1, 1, 1, 0, 0, 0,
0, 0, 0, 1, 1, 1, 0, 0, 0,
0, 0, 0, 1, 1, 1, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0});
// clang-format on
// The filter matrix is:
// | 1 | 2 | 3 |
// | 4 | 5 | 6 |
// | 7 | 8 | 9 |
m.SetFilter({1, 2, 3, 4, 5, 6, 7, 8, 9});
// No bias for this test.
m.SetBias({0});
m.ApplyDelegateAndInvoke();
// Since the dilation rate is 3 this will reduce the size of the output from
// 10x10 to 3x3 of all 5s. Specifically:
// | 5 | 5 | 5 |
// | 5 | 5 | 5 |
// | 5 | 5 | 5 |
EXPECT_THAT(m.GetDequantizedOutput(),
ElementsAreArray({5, 5, 5, 5, 5, 5, 5, 5, 5}));
}
} // namespace tflite

View File

@ -0,0 +1,83 @@
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_EXPERIMENTAL_DELEGATES_HEXAGON_BUILDERS_TESTS_HEXAGON_DELEGATE_OP_MODEL_H_
#define TENSORFLOW_LITE_EXPERIMENTAL_DELEGATES_HEXAGON_BUILDERS_TESTS_HEXAGON_DELEGATE_OP_MODEL_H_
#include <algorithm>
#include <gtest/gtest.h>
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/experimental/delegates/hexagon/hexagon_delegate.h"
#include "tensorflow/lite/interpreter.h"
#include "tensorflow/lite/kernels/internal/reference/reference_ops.h"
#include "tensorflow/lite/kernels/internal/tensor.h"
#include "tensorflow/lite/kernels/internal/types.h"
#include "tensorflow/lite/kernels/register.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/model.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
class SingleOpModelWithHexagon : public SingleOpModel {
public:
SingleOpModelWithHexagon() : delegate_(nullptr, [](TfLiteDelegate*) {}) {}
void ApplyDelegateAndInvoke() {
static const char kDelegateName[] = "TfLiteHexagonDelegate";
// Make sure we set the environment.
setenv(
"ADSP_LIBRARY_PATH",
"/data/local/tmp/hexagon_delegate_test;/system/lib/rfsa/adsp;/system/"
"vendor/lib/rfsa/adsp;/dsp",
1 /*overwrite*/);
auto* delegate_ptr = TfLiteHexagonDelegateCreate(&params_);
ASSERT_TRUE(delegate_ptr != nullptr);
delegate_ = Interpreter::TfLiteDelegatePtr(
delegate_ptr, [](TfLiteDelegate* delegate) {
TfLiteHexagonDelegateDelete(delegate);
// Turn off the fast rpc and cleanup.
// Any communication with the DSP will fail unless new
// HexagonDelegateInit called.
TfLiteHexagonTearDown();
});
TfLiteHexagonInit();
// Make sure we have valid interpreter.
ASSERT_TRUE(interpreter_ != nullptr);
// Add delegate.
EXPECT_TRUE(interpreter_->ModifyGraphWithDelegate(delegate_.get()) !=
kTfLiteError);
// Make sure graph has one Op which is the delegate node.
ASSERT_EQ(1, interpreter_->execution_plan().size());
const int node = interpreter_->execution_plan()[0];
const auto* node_and_reg = interpreter_->node_and_registration(node);
ASSERT_TRUE(node_and_reg != nullptr);
ASSERT_TRUE(node_and_reg->second.custom_name != nullptr);
ASSERT_STREQ(kDelegateName, node_and_reg->second.custom_name);
Invoke();
}
protected:
using SingleOpModel::builder_;
private:
Interpreter::TfLiteDelegatePtr delegate_;
TfLiteHexagonDelegateOptions params_ = {0};
};
} // namespace tflite
#endif // TENSORFLOW_LITE_EXPERIMENTAL_DELEGATES_HEXAGON_BUILDERS_TESTS_HEXAGON_DELEGATE_OP_MODEL_H_

View File

@ -0,0 +1,56 @@
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include <gtest/gtest.h>
#include "tensorflow/lite/experimental/delegates/hexagon/builders/tests/hexagon_delegate_op_model.h"
namespace tflite {
using testing::ElementsAreArray;
class NegOpModel : public SingleOpModelWithHexagon {
public:
NegOpModel(const TensorData& input, const TensorData& output) {
input_ = AddInput(input);
output_ = AddOutput(output);
SetBuiltinOp(BuiltinOperator_NEG, BuiltinOptions_NegOptions,
CreateNegOptions(builder_).Union());
BuildInterpreter({GetShape(input_)});
}
void SetQuantizedInput(std::initializer_list<float> data) {
QuantizeAndPopulate<uint8_t>(input_, data);
}
std::vector<float> GetDequantizedOutput() {
return Dequantize<uint8_t>(ExtractVector<uint8_t>(output_),
GetScale(output_), GetZeroPoint(output_));
}
protected:
int input_;
int output_;
};
TEST(NegOpModel, NegTest) {
NegOpModel m({TensorType_UINT8, {2, 3}, -10, 10},
{TensorType_UINT8, {2, 3}, -10, 10});
m.SetQuantizedInput({-2.0f, -1.0f, 0.f, 1.0f, 2.0f, 3.0f});
m.ApplyDelegateAndInvoke();
EXPECT_THAT(
m.GetDequantizedOutput(),
ElementsAreArray(ArrayFloatNear({2.0f, 1.0f, 0.f, -1.0f, -2.0f, -3.0f},
/*max_abs_error=*/0.1)));
}
} // namespace tflite

View File

@ -0,0 +1,88 @@
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include <gtest/gtest.h>
#include "tensorflow/lite/experimental/delegates/hexagon/builders/tests/hexagon_delegate_op_model.h"
namespace tflite {
using testing::ElementsAreArray;
class PadOpConstModel : public SingleOpModelWithHexagon {
public:
PadOpConstModel(const TensorData& input,
std::initializer_list<int> paddings_shape,
std::initializer_list<int> paddings,
const TensorData& output) {
this->input_ = AddInput(input);
paddings_ = AddConstInput(TensorType_INT32, paddings, paddings_shape);
output_ = AddOutput(output);
SetBuiltinOp(BuiltinOperator_PAD, BuiltinOptions_PadOptions,
CreatePadOptions(builder_).Union());
BuildInterpreter({input.shape});
}
void SetQuantizedInput(std::initializer_list<float> data) {
QuantizeAndPopulate<uint8_t>(input_, data);
}
void SetPaddings(std::initializer_list<int> paddings) {
PopulateTensor<int>(paddings_, paddings);
}
std::vector<int> GetOutputShape() { return GetTensorShape(output_); }
std::vector<float> GetDequantizedOutput() {
return Dequantize<uint8_t>(ExtractVector<uint8_t>(output_),
GetScale(output_), GetZeroPoint(output_));
}
protected:
int input_;
int output_;
int paddings_;
};
TEST(PadOpConstModel, UInt8SimpleConstTest) {
const float quantization_tolerance = 2 / 255.0;
// Padding is represented as four 2-D lists representing above padding and
// below padding (i.e. {{0, 0}, {1, 1}, {1, 1}, {0, 0}}).
PadOpConstModel m({TensorType_UINT8, {1, 2, 2, 1}, -1.0, 1.0}, {4, 2},
{0, 0, 1, 1, 1, 1, 0, 0},
{TensorType_UINT8, {}, -1.0, 1.0});
m.SetQuantizedInput({-0.8, 0.2, 0.9, 0.7});
m.ApplyDelegateAndInvoke();
EXPECT_THAT(m.GetDequantizedOutput(),
ElementsAreArray(ArrayFloatNear(
{0, 0, 0, 0, 0, -0.8, 0.2, 0, 0, 0.9, 0.7, 0, 0, 0, 0, 0},
quantization_tolerance)));
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({1, 4, 4, 1}));
}
TEST(PadOpConstModel, UInt8AdvancedConstTest) {
const float quantization_tolerance = 2 / 255.0;
PadOpConstModel m({TensorType_UINT8, {1, 2, 3, 1}, -1.0, 1.0}, {4, 2},
{0, 0, 0, 2, 1, 3, 0, 0},
{TensorType_UINT8, {}, -1.0, 1.0});
m.SetQuantizedInput({-0.8, 0.2, 0.9, 0.7, 0.1, -0.3});
m.ApplyDelegateAndInvoke();
EXPECT_THAT(m.GetDequantizedOutput(),
ElementsAreArray(ArrayFloatNear(
{0, -0.8, 0.2, 0.9, 0, 0, 0, 0, 0.7, 0.1, -0.3, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
quantization_tolerance)));
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({1, 4, 7, 1}));
}
} // namespace tflite

View File

@ -0,0 +1,81 @@
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include <gtest/gtest.h>
#include "tensorflow/lite/experimental/delegates/hexagon/builders/tests/hexagon_delegate_op_model.h"
namespace tflite {
using testing::ElementsAreArray;
class AveragePoolingOpModel : public SingleOpModelWithHexagon {
public:
explicit AveragePoolingOpModel(const TensorData& input, int filter_width,
int filter_height, const TensorData& output) {
input_ = AddInput(input);
output_ = AddOutput(output);
SetBuiltinOp(BuiltinOperator_AVERAGE_POOL_2D, BuiltinOptions_Pool2DOptions,
CreatePool2DOptions(builder_, Padding_VALID, /*stride_w=*/2,
/*stride_h=*/2, filter_width,
filter_height, ActivationFunctionType_NONE)
.Union());
BuildInterpreter({GetShape(input_)});
}
void SetInput(std::initializer_list<float> data) {
QuantizeAndPopulate<uint8_t>(input_, data);
}
std::vector<float> GetDequantizedOutput() {
return Dequantize<uint8_t>(ExtractVector<uint8_t>(output_),
GetScale(output_), GetZeroPoint(output_));
}
private:
int input_;
int output_;
};
TEST(QuantizedPoolingOpTest, AveragePool) {
AveragePoolingOpModel m(
/*input=*/{TensorType_UINT8, {1, 16, 8, 1}, 0, 10},
/*filter_width=*/8, /*filter_height=*/8,
/*output=*/{TensorType_UINT8, {}, 0, 10});
m.SetInput({
0, 6, 2, 4, 0, 6, 2, 4, //
3, 2, 10, 7, 3, 2, 10, 7, //
0, 6, 2, 4, 0, 6, 2, 4, //
3, 2, 10, 7, 3, 2, 10, 7, //
0, 6, 2, 4, 0, 6, 2, 4, //
3, 2, 10, 7, 3, 2, 10, 7, //
3, 2, 10, 7, 3, 2, 10, 7, //
3, 2, 10, 7, 3, 2, 10, 7, //
0, 6, 2, 4, 0, 6, 2, 4, //
3, 2, 10, 7, 3, 2, 10, 7, //
3, 2, 10, 7, 3, 2, 10, 7, //
3, 2, 10, 7, 3, 2, 10, 7, //
0, 6, 2, 4, 0, 6, 2, 4, //
0, 6, 2, 4, 0, 6, 2, 4, //
0, 6, 2, 4, 0, 6, 2, 4, //
3, 2, 10, 7, 3, 2, 10, 7, //
});
m.ApplyDelegateAndInvoke();
EXPECT_THAT(m.GetDequantizedOutput(),
ElementsAreArray(ArrayFloatNear(
{4.58824, 4.58824, 4.90196, 4.58824, 4.27451})));
}
} // namespace tflite

View File

@ -0,0 +1,108 @@
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include <gtest/gtest.h>
#include "tensorflow/lite/experimental/delegates/hexagon/builders/tests/hexagon_delegate_op_model.h"
namespace tflite {
using testing::ElementsAreArray;
// TODO(b/148390890): All tests are disabled, enable after fix is availabel
// and op is enabled.
class ReduceOpModel : public SingleOpModelWithHexagon {
public:
ReduceOpModel(BuiltinOperator type, const TensorData& input,
const TensorData& output, std::initializer_list<int> axis_shape,
std::initializer_list<int> axis, bool keep_dims) {
input_ = AddInput(input);
axis_ = AddConstInput(TensorType_INT32, axis, axis_shape);
output_ = AddOutput(output);
SetBuiltinOp(type, BuiltinOptions_ReducerOptions,
CreateReducerOptions(builder_, keep_dims).Union());
BuildInterpreter({GetShape(input_)});
}
int Input() { return input_; }
std::vector<int> GetOutputShape() { return GetTensorShape(output_); }
template <typename T>
std::vector<float> GetDequantizedOutput() {
return Dequantize<T>(ExtractVector<T>(output_), GetScale(output_),
GetZeroPoint(output_));
}
private:
int input_;
int axis_;
int output_;
};
TEST(ReduceOpModel, DISABLED_MeanNotKeepDims) {
float kQuantizedTolerance = 2.0 / 255;
std::vector<float> data = {0.4, 0.2, 0.3, 0.4, 0.5, 0.6};
ReduceOpModel m(BuiltinOperator_MEAN,
{TensorType_UINT8, {1, 1, 3, 2}, -1.0, 1.0},
{TensorType_UINT8, {2}, -1.0, 1.0}, {1}, {2}, false);
m.QuantizeAndPopulate<uint8_t>(m.Input(), data);
m.ApplyDelegateAndInvoke();
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({1, 1, 2}));
EXPECT_THAT(
m.GetDequantizedOutput<uint8_t>(),
ElementsAreArray(ArrayFloatNear({0.4, 0.4}, kQuantizedTolerance)));
}
TEST(ReduceOpModel, DISABLED_MeanKeepDims) {
float kQuantizedTolerance = 2.0 / 255;
std::vector<float> data = {0.4, 0.2, 0.3, 0.4, 0.5, 0.6};
ReduceOpModel m(BuiltinOperator_MEAN,
{TensorType_UINT8, {1, 1, 3, 2}, -1.0, 1.0},
{TensorType_UINT8, {3}, -1.0, 1.0}, {1}, {3}, true);
m.QuantizeAndPopulate<uint8_t>(m.Input(), data);
m.ApplyDelegateAndInvoke();
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({1, 1, 3, 1}));
EXPECT_THAT(
m.GetDequantizedOutput<uint8_t>(),
ElementsAreArray(ArrayFloatNear({0.3, 0.35, 0.55}, kQuantizedTolerance)));
}
TEST(ReduceOpModel, DISABLED_SumNotKeepDims) {
float kQuantizedTolerance = 2.0 / 255;
std::vector<float> data = {0.4, 0.2, 0.3, 0.4, 0.5, 0.6};
ReduceOpModel m(BuiltinOperator_SUM,
{TensorType_UINT8, {1, 1, 3, 2}, -1.0, 1.0},
{TensorType_UINT8, {2}, -1.0, 1.0}, {1}, {2}, false);
m.QuantizeAndPopulate<uint8_t>(m.Input(), data);
m.ApplyDelegateAndInvoke();
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({1, 1, 2}));
EXPECT_THAT(m.GetDequantizedOutput<uint8_t>(),
ElementsAreArray(
ArrayFloatNear({-0.823529, -0.815686}, kQuantizedTolerance)));
}
TEST(ReduceOpModel, DISABLED_SumKeepDims) {
float kQuantizedTolerance = 2.0 / 255;
std::vector<float> data = {0.4, 0.2, 0.3, 0.4, 0.5, 0.6};
ReduceOpModel m(BuiltinOperator_SUM,
{TensorType_UINT8, {1, 1, 3, 2}, -1.0, 1.0},
{TensorType_UINT8, {3}, -1.0, 1.0}, {1}, {3}, true);
m.QuantizeAndPopulate<uint8_t>(m.Input(), data);
m.ApplyDelegateAndInvoke();
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({1, 1, 3, 1}));
EXPECT_THAT(m.GetDequantizedOutput<uint8_t>(),
ElementsAreArray(ArrayFloatNear({-0.407843, -0.313726, 0.0941177},
kQuantizedTolerance)));
}
} // namespace tflite

View File

@ -0,0 +1,119 @@
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include <gtest/gtest.h>
#include "tensorflow/lite/experimental/delegates/hexagon/builders/tests/hexagon_delegate_op_model.h"
namespace tflite {
using testing::ElementsAreArray;
class ResizeBilinearOpModel : public SingleOpModelWithHexagon {
public:
explicit ResizeBilinearOpModel(const TensorData& input,
std::initializer_list<int> size_data,
const TensorData& output) {
input_ = AddInput(input);
size_ = AddConstInput(TensorType_INT32, size_data, {2});
output_ = AddOutput(output);
SetBuiltinOp(BuiltinOperator_RESIZE_BILINEAR,
BuiltinOptions_ResizeBilinearOptions,
CreateResizeBilinearOptions(builder_).Union());
BuildInterpreter({GetShape(input_)});
}
template <typename T>
void SetInput(std::initializer_list<T> data) {
PopulateTensor(input_, data);
}
template <typename T>
std::vector<T> GetOutput() {
return ExtractVector<T>(output_);
}
void SetQuantizedInput(std::initializer_list<float> data) {
QuantizeAndPopulate<uint8_t>(input_, data);
}
std::vector<float> GetDequantizedOutput() {
return Dequantize<uint8_t>(ExtractVector<uint8_t>(output_),
GetScale(output_), GetZeroPoint(output_));
}
int input() { return input_; }
private:
int input_;
int size_;
int output_;
};
TEST(ResizeBilinearOpTest, HorizontalResizeUInt8) {
ResizeBilinearOpModel m({TensorType_UINT8, {1, 1, 2, 1}, -2.0, 10}, {1, 3},
{TensorType_UINT8, {}, -2.0, 10});
m.SetQuantizedInput({3, 6});
m.ApplyDelegateAndInvoke();
EXPECT_THAT(m.GetDequantizedOutput(),
ElementsAreArray(ArrayFloatNear({3, 5, 6}, /*max_abs_error=*/1)));
}
TEST(ResizeBilinearOpTest, VerticalResizeUInt8) {
ResizeBilinearOpModel m({TensorType_UINT8, {1, 2, 1, 1}, -2.0, 20}, {3, 1},
{TensorType_UINT8, {}, -2.0, 20});
m.SetQuantizedInput({3, 9});
m.ApplyDelegateAndInvoke();
EXPECT_THAT(m.GetDequantizedOutput(),
ElementsAreArray(ArrayFloatNear({3, 7, 9}, /*max_abs_error=*/1)));
}
TEST(ResizeBilinearOpTest, ThreeDimensionalResizeUInt8) {
ResizeBilinearOpModel m({TensorType_UINT8, {1, 2, 2, 2}, -2, 30}, {3, 3},
{TensorType_UINT8, {}, -2.0, 30.0});
m.SetQuantizedInput({
3, 4, 6, 10, //
10, 12, 14, 16, //
});
m.ApplyDelegateAndInvoke();
EXPECT_THAT(m.GetDequantizedOutput(), ElementsAreArray(ArrayFloatNear(
{
3, 4, 5, 8, 6, 10, //
7, 9, 10, 12, 11, 14, //
10, 12, 12, 14, 14, 16, //
},
/*max_abs_error=*/1)));
}
TEST(ResizeBilinearOpTest, TwoDimensionalResizeWithTwoBatchesUInt8) {
ResizeBilinearOpModel m({TensorType_UINT8, {2, 2, 2, 1}, -2, 30}, {3, 3},
{TensorType_UINT8, {}, -2.0, 30.0});
m.SetQuantizedInput({
3, 6, //
9, 12, //
4, 10, //
12, 16 //
});
m.ApplyDelegateAndInvoke();
EXPECT_THAT(m.GetDequantizedOutput(), ElementsAreArray(ArrayFloatNear(
{
3, 5, 6, //
7, 9, 10, //
9, 11, 12, //
4, 8, 10, //
9, 12, 14, //
12, 14, 16, //
},
/*max_abs_error=*/1)));
}
} // namespace tflite

View File

@ -0,0 +1,59 @@
#!/bin/bash
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
set -e
on_device_dir="/data/local/tmp/hexagon_delegate_test/"
hexagon_libs_path=""
if [ "$1" != "" ]; then
hexagon_libs_path=$1
fi
hexagon_libs_path="${hexagon_libs_path}/libhexagon_nn_skel*"
adb shell rm -rf "${on_device_dir}"
adb shell mkdir "${on_device_dir}"
bazel --bazelrc=/dev/null build -c opt --config=android_arm64 //tensorflow/lite/experimental/delegates/hexagon/builders/tests:all
bazel --bazelrc=/dev/null build -c opt --config=android_arm64 //tensorflow/lite/experimental/delegates/hexagon/hexagon_nn:libhexagon_interface.so
adb push bazel-bin/tensorflow/lite/experimental/delegates/hexagon/hexagon_nn/libhexagon_interface.so "${on_device_dir}"
adb push ${hexagon_libs_path} "${on_device_dir}"
for test_binary in bazel-bin/tensorflow/lite/experimental/delegates/hexagon/builders/tests/hexagon_*_test; do
echo "Copying $test_binary"
adb push $test_binary "${on_device_dir}"
IFS='/'
read -ra split_path <<< "$test_binary"
binary_name=${split_path[-1]}
run_command="/data/local/tmp/hexagon_delegate_test/${binary_name}"
echo "Running ${run_command}"
result=$(adb shell 'LD_LIBRARY_PATH=/data/local/tmp/hexagon_delegate_test:${LD_LIBRARY_PATH} '"${run_command}")
echo 'Output: '
echo "${result}"
IFS=$'\n'
result=($result)
echo "${result[-1]}"
if [[ "${result[-1]}" == *"FAILED"* ]]; then
echo "TEST FAILED"
exit
fi
# Reset delimiter
IFS=' '
done
echo 'ALL TESTS PASSED -- Yay!!'

View File

@ -0,0 +1,69 @@
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include <gtest/gtest.h>
#include "tensorflow/lite/experimental/delegates/hexagon/builders/tests/hexagon_delegate_op_model.h"
namespace tflite {
using testing::ElementsAreArray;
class SpaceToDepthOpModel : public SingleOpModelWithHexagon {
public:
SpaceToDepthOpModel(const TensorData& tensor_data, int block_size,
BuiltinOperator type) {
input_ = AddInput(tensor_data);
output_ = AddOutput(tensor_data);
if (type == BuiltinOperator_SPACE_TO_DEPTH) {
SetBuiltinOp(BuiltinOperator_SPACE_TO_DEPTH,
BuiltinOptions_SpaceToDepthOptions,
CreateSpaceToDepthOptions(builder_, block_size).Union());
} else {
SetBuiltinOp(BuiltinOperator_DEPTH_TO_SPACE,
BuiltinOptions_DepthToSpaceOptions,
CreateDepthToSpaceOptions(builder_, block_size).Union());
}
BuildInterpreter({GetShape(input_)});
}
void SetInput(const std::vector<uint8_t>& data) {
PopulateTensor<uint8_t>(input_, data);
}
std::vector<uint8_t> GetOutput() { return ExtractVector<uint8_t>(output_); }
std::vector<int> GetOutputShape() { return GetTensorShape(output_); }
private:
int input_;
int output_;
};
TEST(SpaceToDepthOpModel, SpaceToDepth) {
SpaceToDepthOpModel m({TensorType_UINT8, {1, 2, 2, 1}, -5, 5}, 2,
BuiltinOperator_SPACE_TO_DEPTH);
m.SetInput({1, 2, 3, 4});
m.ApplyDelegateAndInvoke();
EXPECT_THAT(m.GetOutput(), ElementsAreArray({1, 2, 3, 4}));
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({1, 1, 1, 4}));
}
TEST(SpaceToDepthOpModel, DepthToSpace) {
SpaceToDepthOpModel m({TensorType_UINT8, {1, 1, 2, 4}, -8, 8}, 2,
BuiltinOperator_DEPTH_TO_SPACE);
m.SetInput({1, 2, 3, 4, 5, 6, 7, 8});
m.ApplyDelegateAndInvoke();
EXPECT_THAT(m.GetOutput(), ElementsAreArray({1, 2, 5, 6, 3, 4, 7, 8}));
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({1, 2, 4, 1}));
}
} // namespace tflite

View File

@ -0,0 +1,130 @@
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include <gtest/gtest.h>
#include "tensorflow/lite/experimental/delegates/hexagon/builders/tests/hexagon_delegate_op_model.h"
namespace tflite {
using testing::ElementsAreArray;
class SplitOpModel : public SingleOpModelWithHexagon {
public:
explicit SplitOpModel(const TensorData& input, const TensorData& output,
int num_splits, int axis) {
axis_ = AddConstInput(TensorType_INT32, {axis}, {1});
input_ = AddInput(input);
for (int i = 0; i < num_splits; ++i) {
outputs_.push_back(AddOutput(output));
}
SetBuiltinOp(BuiltinOperator_SPLIT, BuiltinOptions_SplitOptions,
CreateSplitOptions(builder_, num_splits).Union());
BuildInterpreter({{}, GetShape(input_)});
}
template <typename T>
void SetInput(const std::vector<float>& data) {
QuantizeAndPopulate<T>(input_, data);
}
template <typename T>
std::vector<float> GetDequantizedOutput(int idx) {
return Dequantize<T>(ExtractVector<T>(outputs_[idx]),
GetScale(outputs_[idx]), GetZeroPoint(outputs_[idx]));
}
std::vector<int> GetOutputShape(int i) { return GetTensorShape(outputs_[i]); }
private:
int input_;
int axis_;
std::vector<int> outputs_;
};
void CheckSplitBehavior(
int axis, int num_splits, std::initializer_list<int> input_shape,
std::initializer_list<int> output_shape,
const std::initializer_list<float>& input_data,
const std::vector<std::initializer_list<float>>& output_data) {
auto debug = [&](int i) {
std::stringstream ss;
ss << "for output tensor " << i << " axis=" << axis
<< " and num_splits=" << num_splits;
return ss.str();
};
const float kMin = std::min({0.0f, std::min(input_data)});
const float kMax = std::max(input_data);
SplitOpModel const_m({TensorType_UINT8, input_shape, kMin, kMax},
{TensorType_UINT8, output_shape, kMin, kMax}, num_splits,
axis);
const_m.SetInput<uint8_t>(input_data);
const_m.ApplyDelegateAndInvoke();
for (int i = 0; i < num_splits; ++i) {
EXPECT_THAT(
const_m.GetDequantizedOutput<uint8_t>(i),
ElementsAreArray(ArrayFloatNear(output_data[i], /*tolerance=*/0.1)))
<< debug(i);
EXPECT_THAT(const_m.GetOutputShape(i), ElementsAreArray(output_shape))
<< debug(i);
}
}
TEST(SplitOpModel, CheckFourDimSplits) {
CheckSplitBehavior(
/*axis=*/0, /*num_splits=*/2, {2, 2, 2, 2}, {1, 2, 2, 2},
{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16},
{
{1, 2, 3, 4, 5, 6, 7, 8},
{9, 10, 11, 12, 13, 14, 15, 16},
});
CheckSplitBehavior(
/*axis=*/1, /*num_splits=*/2, {2, 2, 2, 2}, {2, 1, 2, 2},
{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16},
{
{1, 2, 3, 4, 9, 10, 11, 12},
{5, 6, 7, 8, 13, 14, 15, 16},
});
CheckSplitBehavior(
/*axis=*/2, /*num_splits=*/2, {2, 2, 2, 2}, {2, 2, 1, 2},
{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16},
{
{1, 2, 5, 6, 9, 10, 13, 14},
{3, 4, 7, 8, 11, 12, 15, 16},
});
CheckSplitBehavior(
/*axis=*/3, /*num_splits=*/2, {2, 2, 2, 2}, {2, 2, 2, 1},
{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16},
{
{1, 3, 5, 7, 9, 11, 13, 15},
{2, 4, 6, 8, 10, 12, 14, 16},
});
}
TEST(SplitOpModel, CheckOneDimensionalSplit) {
CheckSplitBehavior(/*axis=*/0, /*num_splits=*/8, {8}, {1},
{1, 2, 3, 4, 5, 6, 7, 8},
{{1}, {2}, {3}, {4}, {5}, {6}, {7}, {8}});
}
TEST(SplitOpModel, CheckNegativeAxisSplit) {
CheckSplitBehavior(
/*axis=*/-4, /*num_splits=*/2, {2, 2, 2, 2}, {1, 2, 2, 2},
{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16},
{
{1, 2, 3, 4, 5, 6, 7, 8},
{9, 10, 11, 12, 13, 14, 15, 16},
});
}
} // namespace tflite

View File

@ -0,0 +1,25 @@
"""Rules for generating unit-tests using hexagon delegates."""
def hexagon_op_tests(
srcs = [],
deps = []):
"""Create separate unit test targets for each test file in 'srcs'.
Args:
srcs: list of test files, separate target will be created for each item in the list.
deps: Dependencies will be added to all test targets.
"""
for src in srcs:
parts = src.split(".cc")
native.cc_test(
name = "hexagon_" + parts[0],
srcs = [src],
deps = deps,
linkstatic = 1,
tags = [
"no_oss",
"nobuilder",
"notap",
],
)

View File

@ -0,0 +1,133 @@
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include <gtest/gtest.h>
#include "tensorflow/lite/experimental/delegates/hexagon/builders/tests/hexagon_delegate_op_model.h"
namespace tflite {
using testing::ElementsAreArray;
class QuantizedTransposeConvOpModel : public SingleOpModelWithHexagon {
public:
QuantizedTransposeConvOpModel(std::initializer_list<int> output_shape_data,
const TensorData& filter,
std::initializer_list<uint8_t> filter_data,
const TensorData& input,
const TensorData& output, Padding padding,
int stride_w, int stride_h) {
// Just to be confusing, transpose_conv has an _input_ named "output_shape"
// that sets the shape of the output tensor of the op :). It must always be
// an int32 1D four element tensor.
output_shape_ = AddConstInput(TensorType_INT32, output_shape_data, {4});
filter_ = AddConstInput(filter, filter_data);
input_ = AddInput(input);
output_ = AddOutput(output);
SetBuiltinOp(
BuiltinOperator_TRANSPOSE_CONV, BuiltinOptions_TransposeConvOptions,
CreateTransposeConvOptions(builder_, padding, stride_w, stride_h)
.Union());
BuildInterpreter(
{GetShape(output_shape_), GetShape(filter_), GetShape(input_)});
}
void SetInput(std::initializer_list<float> data) {
QuantizeAndPopulate<uint8_t>(input_, data);
}
std::vector<float> GetDequantizedOutput() {
return Dequantize<uint8_t>(ExtractVector<uint8_t>(output_),
GetScale(output_), GetZeroPoint(output_));
}
std::vector<int> GetOutputShape() { return GetTensorShape(output_); }
protected:
int output_shape_;
int filter_;
int input_;
int output_;
};
TEST(QuantizedTransposeConvOpModel, SimpleTestQuantized) {
// Float would be {1, 2, 3, 4, 5, 6, 7, 8, 9}
std::initializer_list<uint8_t> filter_data = {129, 131, 133, 135, 137,
139, 141, 143, 145};
QuantizedTransposeConvOpModel model(
{1, 4, 4, 1}, {TensorType_UINT8, {1, 3, 3, 1}, -63.5, 64}, filter_data,
{TensorType_UINT8, {1, 4, 4, 1}, -63.5, 64},
{TensorType_UINT8, {}, -508, 512}, Padding_SAME, 1, 1);
model.SetInput({1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16});
model.ApplyDelegateAndInvoke();
EXPECT_THAT(
model.GetDequantizedOutput(),
ElementsAreArray(ArrayFloatNear({28, 64, 84, 76, 100, 192, 236, 200, 208,
372, 416, 332, 264, 448, 484, 364},
1e-5)));
// GetOutputShape() should always be same as model.SetOutputShape(...);
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({1, 4, 4, 1}));
}
TEST(QuantizedTransposeConvOpModel, PaddingValidTestQuantized) {
// Float would be {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17,
// 18}
std::initializer_list<uint8_t> filter_data = {129, 131, 133, 135, 137, 139,
141, 143, 145, 147, 149, 151,
153, 155, 157, 159, 161, 163};
QuantizedTransposeConvOpModel model(
{1, 6, 6, 1}, {TensorType_UINT8, {1, 3, 3, 2}, -63.5, 64}, filter_data,
{TensorType_UINT8, {1, 4, 4, 2}, -63.5, 64},
{TensorType_UINT8, {}, -4064, 4096}, Padding_VALID, 1, 1);
model.SetInput({1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22,
23, 24, 25, 26, 27, 28, 29, 30, 31, 32});
model.ApplyDelegateAndInvoke();
EXPECT_THAT(model.GetDequantizedOutput(),
ElementsAreArray(ArrayFloatNear(
{0, 32, 64, 96, 128, 96, 64, 192, 416,
576, 544, 352, 224, 672, 1344, 1696, 1440, 864,
608, 1504, 2720, 3072, 2432, 1440, 864, 1984, 3360,
3648, 2752, 1536, 704, 1536, 2528, 2720, 2016, 1088},
1e-5)));
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({1, 6, 6, 1}));
}
TEST(QuantizedTransposeConvOpModel, TwoFiltersTestQuantized) {
// Float would be {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17,
// 18}
std::initializer_list<uint8_t> filter_data = {129, 131, 133, 135, 137, 139,
141, 143, 145, 147, 149, 151,
153, 155, 157, 159, 161, 163};
QuantizedTransposeConvOpModel model(
{1, 4, 4, 1}, {TensorType_UINT8, {1, 3, 3, 2}, -63.5, 64}, filter_data,
{TensorType_UINT8, {1, 4, 4, 2}, -63.5, 64},
{TensorType_UINT8, {}, -4064, 4096}, Padding_SAME, 1, 1);
model.SetInput({1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22,
23, 24, 25, 26, 27, 28, 29, 30, 31, 32});
model.ApplyDelegateAndInvoke();
EXPECT_THAT(model.GetDequantizedOutput(),
ElementsAreArray(ArrayFloatNear(
{192, 416, 576, 544, 672, 1344, 1696, 1440, 1504, 2720, 3072,
2432, 1984, 3360, 3648, 2752},
1e-5)));
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({1, 4, 4, 1}));
}
} // namespace tflite

View File

@ -0,0 +1,124 @@
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include <gtest/gtest.h>
#include "tensorflow/lite/experimental/delegates/hexagon/builders/tests/hexagon_delegate_op_model.h"
namespace tflite {
using testing::ElementsAreArray;
class TransposeOpModel : public SingleOpModelWithHexagon {
public:
TransposeOpModel(const TensorData& input,
std::initializer_list<int> perm_shape,
std::initializer_list<int> perm, bool const_perm,
const TensorData& output) {
input_ = AddInput(input);
if (const_perm) {
perm_ = AddConstInput(TensorType_INT32, perm, perm_shape);
} else {
perm_ = AddInput({TensorType_INT32, perm_shape});
}
output_ = AddOutput(output);
SetBuiltinOp(BuiltinOperator_TRANSPOSE, BuiltinOptions_TransposeOptions,
CreateTransposeOptions(builder_).Union());
BuildInterpreter({GetShape(input_)});
if (!const_perm) {
PopulateTensor<int32_t>(perm_, perm);
}
}
void SetInput(const std::vector<uint8_t>& data) {
PopulateTensor<uint8_t>(input_, data);
}
std::vector<uint8_t> GetOutput() { return ExtractVector<uint8_t>(output_); }
protected:
int input_;
int perm_;
int output_;
};
void ComputeExpectedTransposeResult(const std::vector<int>& shape,
const std::vector<int>& perms,
std::vector<uint8_t>* input,
std::vector<uint8_t>* input_transposed) {
// Count elements and allocate output.
int count = 1;
for (auto factor : shape) count *= factor;
input_transposed->resize(count);
// Create the dummy data
(*input).resize(count);
for (int i = 0; i < count; i++) {
(*input)[i] = i;
}
// Make input and output shapes.
const RuntimeShape input_shape = ::tflite::GetTensorShape(shape);
RuntimeShape output_shape(perms.size());
for (int i = 0; i < perms.size(); i++) {
output_shape.SetDim(i, input_shape.Dims(perms[i]));
}
TransposeParams params;
params.perm_count = perms.size();
for (int i = 0; i < perms.size(); ++i) {
params.perm[i] = perms[i];
}
reference_ops::Transpose<uint8_t>(params, input_shape, input->data(),
output_shape, input_transposed->data());
}
TEST(TransposeOpTest, Test1D) {
// Basic 1D identity.
std::vector<uint8_t> expected_output, input;
std::vector<int> input_shape = {3};
ComputeExpectedTransposeResult(input_shape, {0}, &input, &expected_output);
TransposeOpModel model({TensorType_UINT8, input_shape, -10, 10}, {1}, {0},
true, {TensorType_UINT8, {}, -10, 10});
model.SetInput(input);
model.ApplyDelegateAndInvoke();
EXPECT_THAT(model.GetOutput(), ElementsAreArray(expected_output));
}
TEST(TransposeOpTest, Test2D) {
std::vector<uint8_t> expected_output, input;
std::vector<int> input_shape = {3, 2};
std::vector<int> perm = {1, 0};
ComputeExpectedTransposeResult(input_shape, perm, &input, &expected_output);
TransposeOpModel model({TensorType_UINT8, input_shape, -10, 10}, {2}, {1, 0},
true, {TensorType_UINT8, {}, -10, 10});
model.SetInput(input);
model.ApplyDelegateAndInvoke();
EXPECT_THAT(model.GetOutput(), ElementsAreArray(expected_output));
}
TEST(TransposeOpTest, Test4D) {
std::vector<uint8_t> expected_output, input;
std::vector<int> input_shape = {2, 2, 3, 1};
std::vector<int> perm = {3, 0, 1, 2};
ComputeExpectedTransposeResult(input_shape, perm, &input, &expected_output);
TransposeOpModel model({TensorType_UINT8, input_shape, -10, 10}, {4},
{3, 0, 1, 2}, true, {TensorType_UINT8, {}, -10, 10});
model.SetInput(input);
model.ApplyDelegateAndInvoke();
EXPECT_THAT(model.GetOutput(), ElementsAreArray(expected_output));
}
} // namespace tflite