Add topK_v2 support to NN API delegate

PiperOrigin-RevId: 252994468
This commit is contained in:
A. Unique TensorFlower 2019-06-13 03:01:22 -07:00 committed by TensorFlower Gardener
parent 16b680525b
commit a2c10678f6
5 changed files with 104 additions and 90 deletions

View File

@ -613,18 +613,18 @@ class NNAPIOpBuilder {
nn_type = ANEURALNETWORKS_TENSOR_FLOAT32;
break;
case kTfLiteUInt8:
nn_type = ANEURALNETWORKS_TENSOR_QUANT8_ASYMM;
case kTfLiteInt8:
nn_type = (tensor_type == kTfLiteUInt8)
? ANEURALNETWORKS_TENSOR_QUANT8_ASYMM
: ANEURALNETWORKS_TENSOR_QUANT8_SYMM;
scale = tensor->params.scale;
zeroPoint = tensor->params.zero_point;
if (scale == 0) {
// TENSOR_QUANT8_ASYMM with zero scale is not valid in NNAPI.
// TENSOR_QUANT8_ASYMM and ANEURALNETWORKS_TENSOR_QUANT8_ASYMM
// with zero scale are not valid in NNAPI.
scale = 1;
}
break;
case kTfLiteInt8:
nn_type = ANEURALNETWORKS_TENSOR_QUANT8_SYMM;
scale = tensor->params.scale;
break;
case kTfLiteInt32:
nn_type = ANEURALNETWORKS_TENSOR_INT32;
scale = tensor->params.scale;
@ -1518,6 +1518,27 @@ class NNAPIDelegateKernel {
return BasicMappingFn<ANEURALNETWORKS_NOT_EQUAL>;
}
} break;
case kTfLiteBuiltinTopkV2: {
if (version == 1 && android_sdk_version >= kMinSdkVersionForNNAPI12) {
const auto& input = context->tensors[node->outputs->data[0]];
const auto& k_param = context->tensors[node->outputs->data[1]];
if ((input.type == kTfLiteFloat32 || input.type == kTfLiteInt32 ||
input.type == kTfLiteInt8) &&
(k_param.type == kTfLiteInt32 &&
k_param.allocation_type == kTfLiteMmapRo)) {
return [](const NNAPIOpMappingArgs& mapping_args)
-> ANeuralNetworksOperationType {
const TfLiteTensor& k_param =
mapping_args.context
->tensors[mapping_args.node->inputs->data[1]];
mapping_args.builder->AddScalarInt32Operand(*k_param.data.i32);
return ANEURALNETWORKS_TOPK_V2;
};
} else {
return nullptr;
}
}
} break;
default:
// All other operators are not mapped.
return nullptr;
@ -1938,6 +1959,12 @@ class NNAPIDelegateKernel {
TF_LITE_ENSURE_STATUS(
builder.AddTensorInput(input_index, hybrid_op));
}
} else if (reg->builtin_code == kTfLiteBuiltinTopkV2 &&
num_added_inputs > 0) {
// The K parameter tensor is not handled here but by the functor
// returned by Map, the input tensor is instead added in
// the else clause below
continue;
} else {
TF_LITE_ENSURE_STATUS(
builder.AddTensorInput(input_index, hybrid_op, scalar_as_tensor));

View File

@ -996,6 +996,7 @@ cc_test(
name = "topk_v2_test",
size = "small",
srcs = ["topk_v2_test.cc"],
tags = ["tflite_nnapi"],
deps = [
":builtin_ops",
":test_main",

View File

@ -577,7 +577,9 @@ TensorType GetTensorType() {
if (std::is_same<T, float>::value) return TensorType_FLOAT32;
if (std::is_same<T, TfLiteFloat16>::value) return TensorType_FLOAT16;
if (std::is_same<T, int32_t>::value) return TensorType_INT32;
if (std::is_same<T, int64_t>::value) return TensorType_INT64;
if (std::is_same<T, uint8_t>::value) return TensorType_UINT8;
if (std::is_same<T, int8_t>::value) return TensorType_INT8;
if (std::is_same<T, string>::value) return TensorType_STRING;
return TensorType_MIN; // default value
}

View File

@ -25,61 +25,42 @@ namespace {
using ::testing::ElementsAreArray;
enum class TestType {
CONST = 0,
DYNAMIC = 1,
};
template <typename InputType>
class TopKV2OpModel : public SingleOpModel {
public:
TopKV2OpModel(std::initializer_list<int> input_shape, TensorType input_type,
int top_k) {
input_ = AddInput(input_type);
top_k_ = AddInput(TensorType_INT32);
output_values_ = AddOutput(input_type);
TopKV2OpModel(int top_k, std::initializer_list<int> input_shape,
std::initializer_list<InputType> input_data,
TestType input_tensor_types) {
if (input_tensor_types == TestType::DYNAMIC) {
input_ = AddInput(GetTensorType<InputType>());
top_k_ = AddInput(TensorType_INT32);
} else {
input_ =
AddConstInput(GetTensorType<InputType>(), input_data, input_shape);
top_k_ = AddConstInput(TensorType_INT32, {top_k}, {1});
}
output_values_ = AddOutput(GetTensorType<InputType>());
output_indexes_ = AddOutput(TensorType_INT32);
SetBuiltinOp(BuiltinOperator_TOPK_V2, BuiltinOptions_TopKV2Options, 0);
BuildInterpreter({input_shape, {1}});
PopulateTensor<int32_t>(top_k_, {top_k});
}
void SetInputFloat(std::initializer_list<float> data) {
PopulateTensor<float>(input_, data);
}
void SetInputUInt8(std::initializer_list<uint8_t> data) {
PopulateTensor<uint8_t>(input_, data);
}
void SetInputInt8(std::initializer_list<int8_t> data) {
PopulateTensor<int8_t>(input_, data);
}
void SetInputInt32(std::initializer_list<int32_t> data) {
PopulateTensor<int32_t>(input_, data);
}
void SetInputInt64(std::initializer_list<int64_t> data) {
PopulateTensor<int64_t>(input_, data);
if (input_tensor_types == TestType::DYNAMIC) {
PopulateTensor<InputType>(input_, input_data);
PopulateTensor<int32_t>(top_k_, {top_k});
}
}
std::vector<int32_t> GetIndexes() {
return ExtractVector<int32_t>(output_indexes_);
}
std::vector<float> GetValuesFloat() {
return ExtractVector<float>(output_values_);
}
std::vector<uint8_t> GetValuesUInt8() {
return ExtractVector<uint8_t>(output_values_);
}
std::vector<int8_t> GetValuesInt8() {
return ExtractVector<int8_t>(output_values_);
}
std::vector<int32_t> GetValuesInt32() {
return ExtractVector<int32_t>(output_values_);
}
std::vector<int64_t> GetValuesInt64() {
return ExtractVector<int64_t>(output_values_);
std::vector<InputType> GetValues() {
return ExtractVector<InputType>(output_values_);
}
protected:
@ -89,77 +70,79 @@ class TopKV2OpModel : public SingleOpModel {
int output_values_;
};
class TopKV2OpTest : public ::testing::TestWithParam<TestType> {};
// The test where the tensor dimension is equal to top.
TEST(TopKV2OpTest, EqualFloat) {
TopKV2OpModel m({2, 2}, TensorType_FLOAT32, 2);
m.SetInputFloat({-2.0, 0.2, 0.8, 0.1});
TEST_P(TopKV2OpTest, EqualFloat) {
TopKV2OpModel<float> m(2, {2, 2}, {-2.0, 0.2, 0.8, 0.1}, GetParam());
m.Invoke();
EXPECT_THAT(m.GetIndexes(), ElementsAreArray({1, 0, 0, 1}));
EXPECT_THAT(m.GetValuesFloat(),
EXPECT_THAT(m.GetValues(),
ElementsAreArray(ArrayFloatNear({0.2, -2.0, 0.8, 0.1})));
}
// Test when internal dimension is k+1.
TEST(TopKV2OpTest, BorderFloat) {
TopKV2OpModel m({2, 3}, TensorType_FLOAT32, 2);
m.SetInputFloat({-2.0, -3.0, 0.2, 0.8, 0.1, -0.1});
TEST_P(TopKV2OpTest, BorderFloat) {
TopKV2OpModel<float> m(2, {2, 3}, {-2.0, -3.0, 0.2, 0.8, 0.1, -0.1},
GetParam());
m.Invoke();
EXPECT_THAT(m.GetIndexes(), ElementsAreArray({2, 0, 0, 1}));
EXPECT_THAT(m.GetValuesFloat(),
EXPECT_THAT(m.GetValues(),
ElementsAreArray(ArrayFloatNear({0.2, -2.0, 0.8, 0.1})));
}
// Test when internal dimension is higher than k.
TEST(TopKV2OpTest, LargeFloat) {
TopKV2OpModel m({2, 4}, TensorType_FLOAT32, 2);
m.SetInputFloat({-2.0, -3.0, -4.0, 0.2, 0.8, 0.1, -0.1, -0.8});
TEST_P(TopKV2OpTest, LargeFloat) {
TopKV2OpModel<float> m(
2, {2, 4}, {-2.0, -3.0, -4.0, 0.2, 0.8, 0.1, -0.1, -0.8}, GetParam());
m.Invoke();
EXPECT_THAT(m.GetIndexes(), ElementsAreArray({3, 0, 0, 1}));
EXPECT_THAT(m.GetValuesFloat(),
EXPECT_THAT(m.GetValues(),
ElementsAreArray(ArrayFloatNear({0.2, -2.0, 0.8, 0.1})));
}
// Test 1D case.
TEST(TopKV2OpTest, VectorFloat) {
TopKV2OpModel m({8}, TensorType_FLOAT32, 2);
m.SetInputFloat({-2.0, -3.0, -4.0, 0.2, 0.8, 0.1, -0.1, -0.8});
TEST_P(TopKV2OpTest, VectorFloat) {
TopKV2OpModel<float> m(2, {8}, {-2.0, -3.0, -4.0, 0.2, 0.8, 0.1, -0.1, -0.8},
GetParam());
m.Invoke();
EXPECT_THAT(m.GetIndexes(), ElementsAreArray({4, 3}));
EXPECT_THAT(m.GetValuesFloat(), ElementsAreArray(ArrayFloatNear({0.8, 0.2})));
}
// Check that uint8_t works.
TEST(TopKV2OpTest, TypeUint8) {
TopKV2OpModel m({2, 3}, TensorType_UINT8, 2);
m.SetInputUInt8({1, 2, 3, 251, 250, 249});
m.Invoke();
EXPECT_THAT(m.GetIndexes(), ElementsAreArray({2, 1, 0, 1}));
EXPECT_THAT(m.GetValuesUInt8(), ElementsAreArray({3, 2, 251, 250}));
}
TEST(TopKV2OpTest, TypeInt8) {
TopKV2OpModel m({2, 3}, TensorType_INT8, 2);
m.SetInputInt8({1, 2, 3, -126, 125, -24});
m.Invoke();
EXPECT_THAT(m.GetIndexes(), ElementsAreArray({2, 1, 1, 2}));
EXPECT_THAT(m.GetValuesInt8(), ElementsAreArray({3, 2, 125, -24}));
EXPECT_THAT(m.GetValues(), ElementsAreArray(ArrayFloatNear({0.8, 0.2})));
}
// Check that int32_t works.
TEST(TopKV2OpTest, TypeInt32) {
TopKV2OpModel m({2, 3}, TensorType_INT32, 2);
m.SetInputInt32({1, 2, 3, 10251, 10250, 10249});
TEST_P(TopKV2OpTest, TypeInt32) {
TopKV2OpModel<int32_t> m(2, {2, 3}, {1, 2, 3, 10251, 10250, 10249},
GetParam());
m.Invoke();
EXPECT_THAT(m.GetIndexes(), ElementsAreArray({2, 1, 0, 1}));
EXPECT_THAT(m.GetValuesInt32(), ElementsAreArray({3, 2, 10251, 10250}));
EXPECT_THAT(m.GetValues(), ElementsAreArray({3, 2, 10251, 10250}));
}
INSTANTIATE_TEST_SUITE_P(TopKV2OpTest, TopKV2OpTest,
::testing::Values(TestType::CONST, TestType::DYNAMIC));
// Check that uint8_t works.
TEST_P(TopKV2OpTest, TypeUint8) {
TopKV2OpModel<uint8_t> m(2, {2, 3}, {1, 2, 3, 251, 250, 249}, GetParam());
m.Invoke();
EXPECT_THAT(m.GetIndexes(), ElementsAreArray({2, 1, 0, 1}));
EXPECT_THAT(m.GetValues(), ElementsAreArray({3, 2, 251, 250}));
}
TEST_P(TopKV2OpTest, TypeInt8) {
TopKV2OpModel<int8_t> m(2, {2, 3}, {1, 2, 3, -126, 125, -24}, GetParam());
m.Invoke();
EXPECT_THAT(m.GetIndexes(), ElementsAreArray({2, 1, 1, 2}));
EXPECT_THAT(m.GetValues(), ElementsAreArray({3, 2, 125, -24}));
}
// Check that int64 works.
TEST(TopKV2OpTest, TypeInt64) {
TopKV2OpModel m({2, 3}, TensorType_INT64, 2);
m.SetInputInt64({1, 2, 3, -1, -2, -3});
TEST_P(TopKV2OpTest, TypeInt64) {
TopKV2OpModel<int64_t> m(2, {2, 3}, {1, 2, 3, -1, -2, -3}, GetParam());
m.Invoke();
EXPECT_THAT(m.GetIndexes(), ElementsAreArray({2, 1, 0, 1}));
EXPECT_THAT(m.GetValuesInt64(), ElementsAreArray({3, 2, -1, -2}));
EXPECT_THAT(m.GetValues(), ElementsAreArray({3, 2, -1, -2}));
}
} // namespace
} // namespace tflite

View File

@ -108,6 +108,7 @@ enum {
ANEURALNETWORKS_SIN = 85,
ANEURALNETWORKS_SQRT = 88,
ANEURALNETWORKS_TILE = 89,
ANEURALNETWORKS_TOPK_V2 = 90,
};
/**