Hexagon Delegate:

add pack op support for int8/uint8

PiperOrigin-RevId: 312542489
Change-Id: I26c101a6e888a2ad918093761b0eb055d3aae7f8
This commit is contained in:
Karim Nosir 2020-05-20 13:20:25 -07:00 committed by TensorFlower Gardener
parent 236b503131
commit e510776645
9 changed files with 322 additions and 0 deletions

View File

@ -86,6 +86,7 @@ are verified in `IsNodeSupportedByHexagon`:
* MirrorPad
* Mul (without any activation) (b/129276536)
* Neg
* Pack
* Pad: Only supports 0 padding (b/139277813)
* Quantize (8-bit inputs & outputs only)
* Relu

View File

@ -23,6 +23,7 @@ cc_library(
"mirror_pad_builder.cc",
"neg_op_builder.cc",
"op_builder.cc",
"pack_builder.cc",
"pad_builder.cc",
"pool_2d_builder.cc",
"quantize_builder.cc",
@ -52,6 +53,7 @@ cc_library(
"mirror_pad_builder.h",
"neg_op_builder.h",
"op_builder.h",
"pack_builder.h",
"pad_builder.h",
"pool_2d_builder.h",
"quantize_builder.h",

View File

@ -99,6 +99,8 @@ OpBuilder* GraphBuilder::CreateOpBuilderFromTfLiteOp(int op_type) {
return CreateMinMaxBuilder(this, OP_QuantizedMaximum_8);
case kTfLiteBuiltinSlice:
return CreateSliceOpBuilder(this, OP_QuantizedSlice_8);
case kTfLiteBuiltinPack:
return CreatePackBuilder(this, OP_QuantizedPack_8);
default:
context_->ReportError(context_, "Op not supported: %d", op_type);
return nullptr;

View File

@ -56,6 +56,7 @@ OpBuilder* CreateHardSwishBuilder(GraphBuilder* graph_builder, int op_type);
OpBuilder* CreateCastBuilder(GraphBuilder* graph_builder, int op_type);
OpBuilder* CreateMinMaxBuilder(GraphBuilder* graph_builder, int op_type);
OpBuilder* CreateSliceOpBuilder(GraphBuilder* graph_builder, int op_type);
OpBuilder* CreatePackBuilder(GraphBuilder* graph_builder, int op_type);
} // namespace hexagon
} // namespace delegates

View File

@ -0,0 +1,134 @@
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "tensorflow/lite/experimental/delegates/hexagon/builders/pack_builder.h"
#include <stdint.h>
#include <limits>
#include "tensorflow/lite/c/builtin_op_data.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace delegates {
namespace hexagon {
namespace {
int GetAxis(int axis, const TfLiteIntArray* inputs, TfLiteContext* context) {
auto& input_tensor = context->tensors[inputs->data[0]];
// Handle -ve axis.
if (axis < 0) {
axis += input_tensor.dims->size + 1;
}
// We need to adjust the axis to be as if the inputs are of rank 4, since
// we represent tensors in Hexagon of rank 4.
return (4 - input_tensor.dims->size) + axis - 1;
}
} // namespace
TfLiteStatus PackOpBuilder::PopulateSubGraph(const TfLiteIntArray* inputs,
const TfLiteIntArray* outputs,
TfLiteContext* context) {
static int scalar_shape[] = {1, 1, 1, 1};
auto* params = reinterpret_cast<TfLitePackParams*>(builtin_data_);
int axis = GetAxis(params->axis, inputs, context);
// Add axis
auto* axis_node = graph_builder_->AddConstNodeWithData(
scalar_shape, reinterpret_cast<char*>(&axis), sizeof(axis));
AddInput(TensorID(axis_node->GetID(), 0));
// Add all input tensors.
minima_.reserve(inputs->size);
maxima_.reserve(inputs->size);
int tensor_id = -1;
float data_min, data_max;
for (int i = 0; i < inputs->size; ++i) {
tensor_id = inputs->data[i];
auto& input_tensor = context->tensors[tensor_id];
AddInput(graph_builder_->GetHexagonTensorId(tensor_id));
TF_LITE_ENSURE_STATUS(
ComputeMinAndMaxQuantValues(input_tensor, &data_min, &data_max));
minima_.push_back(data_min);
maxima_.push_back(data_max);
}
// Minima tensors.
for (int i = 0; i < minima_.size(); ++i) {
auto* data_min_const = graph_builder_->AddConstNodeWithData(
scalar_shape, reinterpret_cast<char*>(&minima_[i]), sizeof(minima_[i]));
AddInput(TensorID(data_min_const->GetID(), 0));
}
// Maxima tensors.
for (int i = 0; i < maxima_.size(); ++i) {
auto* data_max_const = graph_builder_->AddConstNodeWithData(
scalar_shape, reinterpret_cast<char*>(&maxima_[i]), sizeof(maxima_[i]));
AddInput(TensorID(data_max_const->GetID(), 0));
}
// Hexagon outputs for this node.
int output_batch_size, output_height_size, output_width_size,
output_depth_size;
GetDims(&output_batch_size, &output_height_size, &output_width_size,
&output_depth_size, context->tensors[outputs->data[0]].dims);
TensorID pack_out = AddOutput(sizeof(uint8_t), 4,
{output_batch_size, output_height_size,
output_width_size, output_depth_size});
// Output min/max for requantization.
float output_min, output_max;
TF_LITE_ENSURE_STATUS(ComputeMinAndMaxQuantValues(
context->tensors[outputs->data[0]], &output_min, &output_max));
auto* output_min_const = graph_builder_->AddConstNodeWithData(
scalar_shape, reinterpret_cast<char*>(&output_min), sizeof(output_min));
auto* output_max_const = graph_builder_->AddConstNodeWithData(
scalar_shape, reinterpret_cast<char*>(&output_max), sizeof(output_max));
const auto& pack_out_min = AddOutput(sizeof(float), 4, {1, 1, 1, 1});
const auto& pack_out_max = AddOutput(sizeof(float), 4, {1, 1, 1, 1});
// Requantize output to the expected min/max.
auto* requantize_op = graph_builder_->AddNode(GetTFLiteNodeID());
requantize_op->SetOpType(OP_Requantize_8to8);
requantize_op->AddInput(pack_out);
requantize_op->AddInput(pack_out_min);
requantize_op->AddInput(pack_out_max);
requantize_op->AddInput(TensorID(output_min_const->GetID(), 0));
requantize_op->AddInput(TensorID(output_max_const->GetID(), 0));
node_output_ =
requantize_op->AddOutput(sizeof(uint8_t), 4,
{output_batch_size, output_height_size,
output_width_size, output_depth_size});
requantize_op->AddOutput(sizeof(float), 4, {1, 1, 1, 1});
requantize_op->AddOutput(sizeof(float), 4, {1, 1, 1, 1});
return kTfLiteOk;
}
TfLiteStatus PackOpBuilder::RegisterOutputs(const TfLiteIntArray* outputs,
TfLiteContext* context) {
// Should be only 1 output.
graph_builder_->AddTensorWithID(outputs->data[0], node_output_.first,
node_output_.second);
return kTfLiteOk;
}
OpBuilder* CreatePackBuilder(GraphBuilder* graph_builder, int op_type) {
return new PackOpBuilder(graph_builder, op_type);
}
} // namespace hexagon
} // namespace delegates
} // namespace tflite

View File

@ -0,0 +1,46 @@
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_EXPERIMENTAL_DELEGATES_HEXAGON_BUILDERS_PACK_BUILDER_H_
#define TENSORFLOW_LITE_EXPERIMENTAL_DELEGATES_HEXAGON_BUILDERS_PACK_BUILDER_H_
#include <vector>
#include "tensorflow/lite/experimental/delegates/hexagon/builders/op_builder.h"
namespace tflite {
namespace delegates {
namespace hexagon {
class PackOpBuilder : public OpBuilder {
public:
explicit PackOpBuilder(GraphBuilder* graph_builder, int op_type)
: OpBuilder(graph_builder, op_type) {}
TfLiteStatus PopulateSubGraph(const TfLiteIntArray* inputs,
const TfLiteIntArray* outputs,
TfLiteContext* context) override;
TfLiteStatus RegisterOutputs(const TfLiteIntArray* outputs,
TfLiteContext* context) override;
private:
TensorID node_output_;
// Min/max for all inputs.
std::vector<float> minima_, maxima_;
};
} // namespace hexagon
} // namespace delegates
} // namespace tflite
#endif // TENSORFLOW_LITE_EXPERIMENTAL_DELEGATES_HEXAGON_BUILDERS_PACK_BUILDER_H_

View File

@ -34,6 +34,7 @@ hexagon_op_tests(
"mirror_pad_test.cc",
"mul_test.cc",
"neg_test.cc",
"pack_test.cc",
"pad_test.cc",
"pool_test.cc",
"quantize_test.cc",

View File

@ -0,0 +1,125 @@
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include <gtest/gtest.h>
#include "tensorflow/lite/experimental/delegates/hexagon/builders/tests/hexagon_delegate_op_model.h"
namespace tflite {
using testing::ElementsAreArray;
class PackOpModel : public SingleOpModelWithHexagon {
public:
PackOpModel(const TensorData& input_template, int axis, int values_count) {
std::vector<std::vector<int>> all_input_shapes;
for (int i = 0; i < values_count; ++i) {
all_input_shapes.push_back(input_template.shape);
AddInput(input_template);
}
output_ = AddOutput({input_template.type, /*shape=*/{}, input_template.min,
input_template.max});
SetBuiltinOp(BuiltinOperator_PACK, BuiltinOptions_PackOptions,
CreatePackOptions(builder_, values_count, axis).Union());
BuildInterpreter(all_input_shapes);
}
std::vector<int> GetOutputShape() { return GetTensorShape(output_); }
template <typename integer_type>
void SetInput(int index, std::initializer_list<float> data) {
QuantizeAndPopulate<integer_type>(index, data);
}
template <typename integer_type>
std::vector<float> GetDequantizedOutput() {
return Dequantize<integer_type>(ExtractVector<integer_type>(output_),
GetScale(output_), GetZeroPoint(output_));
}
private:
int output_;
};
template <typename InputType>
struct PackOpTest : public ::testing::Test {
using TypeToTest = InputType;
TensorType TENSOR_TYPE =
(std::is_same<InputType, int16_t>::value
? TensorType_INT16
: (std::is_same<InputType, uint8_t>::value ? TensorType_UINT8
: TensorType_INT8));
};
using TestTypes = testing::Types<int8_t, uint8_t>;
TYPED_TEST_CASE(PackOpTest, TestTypes);
TYPED_TEST(PackOpTest, ThreeInputs) {
PackOpModel model({TestFixture::TENSOR_TYPE, {2}, -10, 10}, 0, 3);
model.SetInput<typename TestFixture::TypeToTest>(0, {1, 4});
model.SetInput<typename TestFixture::TypeToTest>(1, {2, 5});
model.SetInput<typename TestFixture::TypeToTest>(2, {3, 6});
model.Invoke();
auto ref_output_shape = model.GetOutputShape();
auto ref_output =
model.GetDequantizedOutput<typename TestFixture::TypeToTest>();
model.ApplyDelegateAndInvoke();
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray(ref_output_shape));
EXPECT_THAT(model.GetDequantizedOutput<typename TestFixture::TypeToTest>(),
ElementsAreArray(ArrayFloatNear(ref_output)));
}
TYPED_TEST(PackOpTest, ThreeInputsDifferentAxis) {
PackOpModel model({TestFixture::TENSOR_TYPE, {2}, -10, 10}, 1, 3);
model.SetInput<typename TestFixture::TypeToTest>(0, {1, 4});
model.SetInput<typename TestFixture::TypeToTest>(1, {2, 5});
model.SetInput<typename TestFixture::TypeToTest>(2, {3, 6});
model.Invoke();
auto ref_output_shape = model.GetOutputShape();
auto ref_output =
model.GetDequantizedOutput<typename TestFixture::TypeToTest>();
model.ApplyDelegateAndInvoke();
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray(ref_output_shape));
EXPECT_THAT(model.GetDequantizedOutput<typename TestFixture::TypeToTest>(),
ElementsAreArray(ArrayFloatNear(ref_output)));
}
TYPED_TEST(PackOpTest, ThreeInputsNegativeAxis) {
PackOpModel model({TestFixture::TENSOR_TYPE, {2}, -10, 10}, -1, 3);
model.SetInput<typename TestFixture::TypeToTest>(0, {1, 4});
model.SetInput<typename TestFixture::TypeToTest>(1, {2, 5});
model.SetInput<typename TestFixture::TypeToTest>(2, {3, 6});
model.Invoke();
auto ref_output_shape = model.GetOutputShape();
auto ref_output =
model.GetDequantizedOutput<typename TestFixture::TypeToTest>();
model.ApplyDelegateAndInvoke();
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray(ref_output_shape));
EXPECT_THAT(model.GetDequantizedOutput<typename TestFixture::TypeToTest>(),
ElementsAreArray(ArrayFloatNear(ref_output)));
}
TYPED_TEST(PackOpTest, MultilDimensions) {
PackOpModel model({TestFixture::TENSOR_TYPE, {2, 3}, -10, 20}, 1, 2);
model.SetInput<typename TestFixture::TypeToTest>(0, {1, 2, 3, 4, 5, 6});
model.SetInput<typename TestFixture::TypeToTest>(1, {7, 8, 9, 10, 11, 12});
model.Invoke();
auto ref_output_shape = model.GetOutputShape();
auto ref_output =
model.GetDequantizedOutput<typename TestFixture::TypeToTest>();
model.ApplyDelegateAndInvoke();
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray(ref_output_shape));
EXPECT_THAT(model.GetDequantizedOutput<typename TestFixture::TypeToTest>(),
ElementsAreArray(ArrayFloatNear(ref_output)));
}
} // namespace tflite

View File

@ -87,6 +87,7 @@ bool CheckOpVersion(const TfLiteRegistration* registration) {
case kTfLiteBuiltinMinimum:
case kTfLiteBuiltinMirrorPad:
case kTfLiteBuiltinMul:
case kTfLiteBuiltinPack:
case kTfLiteBuiltinPad:
case kTfLiteBuiltinQuantize:
case kTfLiteBuiltinRelu6:
@ -398,6 +399,15 @@ bool IsNodeSupportedByHexagon(const TfLiteRegistration* registration,
{kTfLiteInt32, kTfLiteInt64},
{kTfLiteInt32, kTfLiteInt64}});
}
case kTfLiteBuiltinPack: {
// All tensors must be 8-bit.
for (int i = 0; i < node->inputs->size; ++i) {
if (!TensorTypeMatch(node->inputs->data[i], context, kTfLiteUInt8) &&
!TensorTypeMatch(node->inputs->data[i], context, kTfLiteInt8))
return false;
}
return true;
}
default:
return false;
}