Add NNAPI delegate support for Fill

PiperOrigin-RevId: 314116176
Change-Id: I8216b776ec823ff9c0696410091291f6b2d3a385
This commit is contained in:
Lev Proleev 2020-06-01 05:49:15 -07:00 committed by TensorFlower Gardener
parent de32c75f2f
commit a26381f3dc
5 changed files with 179 additions and 50 deletions

View File

@ -173,6 +173,11 @@ ExpOpTest/FloatTest,29
# Only constant tensors models
ExpandDimsOpTest/.+/1,29
# fill_test
FillOpTest/FillOpTest/FillInt32/0,30
FillOpTest/FillOpTest/FillFloat/0,30
FillOpTest/FillOpTest/FillFloatInt32Dims/0,30
# floor_test
FloorOpTest/.+

View File

@ -2343,6 +2343,50 @@ bool NNAPIDelegateKernel::Validate(
ExpectMinAndroidSdkVersion(android_sdk_version, kMinSdkVersionForNNAPI13,
&val_ctx);
} break;
case kTfLiteBuiltinFill: {
ExpectOpVersion(version, 1, &val_ctx);
ExpectMinAndroidSdkVersion(android_sdk_version, kMinSdkVersionForNNAPI13,
&val_ctx);
const auto& dims_tensor = context->tensors[node->inputs->data[0]];
Expect(IsConstantTensor(&dims_tensor),
NNAPIValidationFailureType::kUnsupportedInputType,
"NNAPI doesn't support dynamic dimensions tensor.", &val_ctx);
EXPECT_INPUT_TYPE_IN(dims_tensor.type, kTfLiteInt32, kTfLiteInt64);
if (IsConstantTensor(&dims_tensor)) {
Expect(dims_tensor.dims->data[0] != 0,
NNAPIValidationFailureType::kUnsupportedOperandValue,
"NNAPI doesn't support generating scalars from FILL", &val_ctx);
if (dims_tensor.type == kTfLiteInt64) {
bool fit_in_int32 =
std::all_of(dims_tensor.data.i64,
dims_tensor.data.i64 + dims_tensor.dims->data[0],
[](int64_t dim) {
return std::numeric_limits<int32_t>::min() <= dim &&
dim <= std::numeric_limits<int32_t>::max();
});
Expect(fit_in_int32,
NNAPIValidationFailureType::kUnsupportedOperandValue,
"NNAPI only supports int32 dimensions tensor. If the "
"dimensions type is int64 and they are constant we can "
"convert them to int32 if the value isn't too large.",
&val_ctx);
}
}
const auto& value_tensor = context->tensors[node->inputs->data[1]];
EXPECT_INPUT_TYPE_IN(value_tensor.type, kTfLiteFloat32, kTfLiteInt32,
kTfLiteInt64);
if (value_tensor.type == kTfLiteInt64) {
Expect(
IsConstantTensor(&value_tensor) &&
*value_tensor.data.i64 <= std::numeric_limits<int32_t>::max() &&
*value_tensor.data.i64 >= std::numeric_limits<int32_t>::min(),
NNAPIValidationFailureType::kUnsupportedInputType,
"NNAPI only supports int32 input. If the input type is int64 and "
"constant we can convert it to int32 if the value isn't too "
"large.",
&val_ctx);
}
} break;
default:
// All other operators are not mapped.
AddValidationFailure(NNAPIValidationFailureType::kUnsupportedOperator,
@ -3127,6 +3171,9 @@ TfLiteStatus NNAPIDelegateKernel::Map(
mapping_args.builder->AddScalarFloat32Operand(1.0);
*nn_op_type = ANEURALNETWORKS_ELU;
} break;
case kTfLiteBuiltinFill: {
*nn_op_type = ANEURALNETWORKS_FILL;
} break;
default:
// All other operators are not mapped.
return kTfLiteError;
@ -3882,6 +3929,52 @@ TfLiteStatus NNAPIDelegateKernel::AddOpsAndTensors(TfLiteContext* context,
TF_LITE_ENSURE_STATUS(builder.AddTensorInput(input_index, hybrid_op,
input_tensor_flags));
}
} else if (reg->builtin_code == kTfLiteBuiltinFill) {
if (input_pos == 0) {
const int dims_id = node->inputs->data[0];
const TfLiteTensor& dims_tensor = context->tensors[dims_id];
switch (dims_tensor.type) {
case kTfLiteInt32:
TF_LITE_ENSURE_STATUS(
builder.AddTensorInput(input_index, hybrid_op));
break;
case kTfLiteInt64: {
// We made sure that dimensions are constant and fit into int32 in
// Map(), so we can safely create a new tensor with casted values.
const int dims_size = dims_tensor.dims->data[0];
std::vector<int32_t> dims_int32(dims_size);
std::copy(dims_tensor.data.i64, dims_tensor.data.i64 + dims_size,
dims_int32.begin());
int new_tensor_index = -1;
builder.AddNewInputConstantTensor(
ANEURALNETWORKS_TENSOR_INT32, kTfLiteInt32, dims_tensor.dims,
dims_int32, dims_tensor.params, &new_tensor_index);
} break;
default:
return kTfLiteError;
}
} else {
const int value_id = node->inputs->data[1];
const TfLiteTensor& value_tensor = context->tensors[value_id];
switch (value_tensor.type) {
case kTfLiteFloat32:
TF_LITE_ENSURE_STATUS(
builder.AddScalarFloat32Operand(*value_tensor.data.f));
break;
case kTfLiteInt32:
TF_LITE_ENSURE_STATUS(
builder.AddScalarInt32Operand(*value_tensor.data.i32));
break;
case kTfLiteInt64:
// Map() function already makes sure int64 input is constant and
// fits into int32.
TF_LITE_ENSURE_STATUS(builder.AddScalarInt32Operand(
static_cast<int32_t>(*value_tensor.data.i64)));
break;
default:
return kTfLiteError;
}
}
} else {
TF_LITE_ENSURE_STATUS(
builder.AddTensorInput(input_index, hybrid_op, input_tensor_flags));

View File

@ -2014,6 +2014,7 @@ cc_test(
name = "fill_test",
size = "small",
srcs = ["fill_test.cc"],
tags = ["tflite_nnapi"],
deps = [
":builtin_ops",
":test_main",

View File

@ -24,87 +24,115 @@ namespace {
using ::testing::ElementsAreArray;
using ::testing::IsEmpty;
enum class TestType {
kConst = 0,
kDynamic = 1,
};
template <typename dims_type, typename value_type>
class FillOpModel : public SingleOpModel {
public:
explicit FillOpModel(const TensorData& input1, const TensorData& input2) {
input1_ = AddInput(input1);
input2_ = AddInput(input2);
output_ = AddOutput(input1);
explicit FillOpModel(TensorType dims_tensor_type,
std::initializer_list<int> dims_shape,
std::initializer_list<dims_type> dims_data,
value_type value, TestType input_tensor_types) {
if (input_tensor_types == TestType::kDynamic) {
dims_ = AddInput(dims_tensor_type);
value_ = AddInput(GetTensorType<value_type>());
} else {
dims_ = AddConstInput(dims_tensor_type, dims_data, dims_shape);
value_ = AddConstInput(GetTensorType<value_type>(), {value}, {});
}
output_ = AddOutput(GetTensorType<value_type>());
SetBuiltinOp(BuiltinOperator_FILL, BuiltinOptions_FillOptions,
CreateFillOptions(builder_).Union());
BuildInterpreter({GetShape(input1_), GetShape(input2_)});
BuildInterpreter({dims_shape, {}});
if (input_tensor_types == TestType::kDynamic) {
if (dims_data.size() > 0) {
PopulateTensor<dims_type>(dims_, dims_data);
}
PopulateTensor<value_type>(value_, {value});
}
}
int input1() { return input1_; }
int input2() { return input2_; }
int output() { return output_; }
std::vector<value_type> GetOutput() {
return ExtractVector<value_type>(output_);
}
std::vector<int> GetOutputShape() { return GetTensorShape(output_); }
protected:
int input1_;
int input2_;
int dims_;
int value_;
int output_;
};
TEST(FillOpModel, FillInt32) {
FillOpModel m({TensorType_INT32, {2}}, {TensorType_INT32});
m.PopulateTensor<int32_t>(m.input1(), {2, 3});
m.PopulateTensor<int32_t>(m.input2(), {-11});
class FillOpTest : public ::testing::TestWithParam<TestType> {};
TEST_P(FillOpTest, FillInt32) {
FillOpModel<int32_t, int32_t> m(TensorType_INT32, {2}, {2, 3}, -11,
GetParam());
m.Invoke();
EXPECT_THAT(m.ExtractVector<int32_t>(m.output()),
ElementsAreArray({-11, -11, -11, -11, -11, -11}));
EXPECT_THAT(m.GetTensorShape(m.output()), ElementsAreArray({2, 3}));
EXPECT_THAT(m.GetOutput(), ElementsAreArray({-11, -11, -11, -11, -11, -11}));
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({2, 3}));
}
TEST(FillOpModel, FillInt64) {
FillOpModel m({TensorType_INT32, {2}}, {TensorType_INT64});
m.PopulateTensor<int32_t>(m.input1(), {2, 4});
m.PopulateTensor<int64_t>(m.input2(), {1LL << 45});
TEST_P(FillOpTest, FillInt64) {
FillOpModel<int64_t, int64_t> m(TensorType_INT64, {2}, {2, 4}, 1LL << 45,
GetParam());
m.Invoke();
EXPECT_THAT(m.ExtractVector<int64_t>(m.output()),
EXPECT_THAT(m.GetOutput(),
ElementsAreArray({1LL << 45, 1LL << 45, 1LL << 45, 1LL << 45,
1LL << 45, 1LL << 45, 1LL << 45, 1LL << 45}));
EXPECT_THAT(m.GetTensorShape(m.output()), ElementsAreArray({2, 4}));
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({2, 4}));
}
TEST(FillOpModel, FillFloat) {
FillOpModel m({TensorType_INT64, {3}}, {TensorType_FLOAT32});
m.PopulateTensor<int64_t>(m.input1(), {2, 2, 2});
m.PopulateTensor<float>(m.input2(), {4.0});
TEST_P(FillOpTest, FillFloat) {
FillOpModel<int64_t, float> m(TensorType_INT64, {3}, {2, 2, 2}, 4.0,
GetParam());
m.Invoke();
EXPECT_THAT(m.ExtractVector<float>(m.output()),
EXPECT_THAT(m.GetOutput(),
ElementsAreArray({4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0}));
EXPECT_THAT(m.GetTensorShape(m.output()), ElementsAreArray({2, 2, 2}));
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({2, 2, 2}));
}
TEST(FillOpModel, FillOutputScalar) {
FillOpModel m({TensorType_INT64, {0}}, {TensorType_FLOAT32});
m.PopulateTensor<float>(m.input2(), {4.0});
TEST_P(FillOpTest, FillFloatInt32Dims) {
FillOpModel<int32_t, float> m(TensorType_INT32, {3}, {2, 2, 2}, 4.0,
GetParam());
m.Invoke();
EXPECT_THAT(m.ExtractVector<float>(m.output()), ElementsAreArray({4.0}));
EXPECT_THAT(m.GetTensorShape(m.output()), IsEmpty());
EXPECT_THAT(m.GetOutput(),
ElementsAreArray({4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0}));
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({2, 2, 2}));
}
TEST(FillOpModel, FillBool) {
FillOpModel m({TensorType_INT64, {3}}, {TensorType_BOOL});
m.PopulateTensor<int64_t>(m.input1(), {2, 2, 2});
m.PopulateTensor<bool>(m.input2(), {true});
TEST_P(FillOpTest, FillOutputScalar) {
FillOpModel<int64_t, float> m(TensorType_INT64, {0}, {}, 4.0, GetParam());
m.Invoke();
EXPECT_THAT(
m.ExtractVector<bool>(m.output()),
ElementsAreArray({true, true, true, true, true, true, true, true}));
EXPECT_THAT(m.GetTensorShape(m.output()), ElementsAreArray({2, 2, 2}));
EXPECT_THAT(m.GetOutput(), ElementsAreArray({4.0}));
EXPECT_THAT(m.GetOutputShape(), IsEmpty());
}
TEST(FillOpModel, FillString) {
FillOpModel m({TensorType_INT64, {3}}, {TensorType_STRING});
m.PopulateTensor<int64_t>(m.input1(), {2, 2, 2});
m.PopulateTensor<std::string>(m.input2(), {"AB"});
TEST_P(FillOpTest, FillBool) {
FillOpModel<int64_t, bool> m(TensorType_INT64, {3}, {2, 2, 2}, true,
GetParam());
m.Invoke();
EXPECT_THAT(
m.ExtractVector<std::string>(m.output()),
ElementsAreArray({"AB", "AB", "AB", "AB", "AB", "AB", "AB", "AB"}));
EXPECT_THAT(m.GetTensorShape(m.output()), ElementsAreArray({2, 2, 2}));
EXPECT_THAT(m.GetOutput(), ElementsAreArray({true, true, true, true, true,
true, true, true}));
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({2, 2, 2}));
}
TEST(FillOpTest, FillString) {
FillOpModel<int64_t, std::string> m(TensorType_INT64, {3}, {2, 2, 2}, "AB",
TestType::kDynamic);
m.Invoke();
EXPECT_THAT(m.GetOutput(), ElementsAreArray({"AB", "AB", "AB", "AB", "AB",
"AB", "AB", "AB"}));
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({2, 2, 2}));
}
INSTANTIATE_TEST_SUITE_P(FillOpTest, FillOpTest,
::testing::Values(TestType::kConst,
TestType::kDynamic));
} // namespace
} // namespace tflite

View File

@ -17,6 +17,7 @@ limitations under the License.
#include <cmath>
#include <complex>
#include <type_traits>
#include <vector>
#include <gmock/gmock.h>
@ -812,6 +813,7 @@ TensorType GetTensorType() {
if (std::is_same<T, int64_t>::value) return TensorType_INT64;
if (std::is_same<T, uint8_t>::value) return TensorType_UINT8;
if (std::is_same<T, string>::value) return TensorType_STRING;
if (std::is_same<T, bool>::value) return TensorType_BOOL;
return TensorType_MIN; // default value
}