Merge pull request #45296 from freedomtan:map_leaky_relu_to_prelu_in_nnapi_delegate

PiperOrigin-RevId: 346995198
Change-Id: I482037667057b69780f5fccf20fc8648dcc53fd3
This commit is contained in:
TensorFlower Gardener 2020-12-11 07:52:48 -08:00
commit a95d4c82a5
2 changed files with 98 additions and 0 deletions

View File

@ -221,6 +221,8 @@ bool IsScalarInputSupported(int builtin_code) {
case kTfLiteBuiltinPow:
case kTfLiteBuiltinMaximum:
case kTfLiteBuiltinMinimum:
case kTfLiteBuiltinPrelu:
case kTfLiteBuiltinLeakyRelu:
return true;
default:
return false;
@ -2448,6 +2450,7 @@ bool NNAPIDelegateKernel::Validate(
&val_ctx);
}
} break;
case kTfLiteBuiltinLeakyRelu:
case kTfLiteBuiltinPrelu: {
ExpectOpVersion(version, 1, &val_ctx);
ExpectMinAndroidSdkVersion(android_sdk_version, kMinSdkVersionForNNAPI12,
@ -3357,6 +3360,38 @@ TfLiteStatus NNAPIDelegateKernel::Map(
case kTfLiteBuiltinCast: {
*nn_op_type = ANEURALNETWORKS_CAST;
} break;
case kTfLiteBuiltinLeakyRelu: {
const auto input_type =
mapping_args.context->tensors[mapping_args.node->inputs->data[0]]
.type;
auto builtin = reinterpret_cast<TfLiteLeakyReluParams*>(
mapping_args.node->builtin_data);
TfLiteTensor alpha_tensor;
alpha_tensor.type = input_type;
alpha_tensor.allocation_type = kTfLiteDynamic;
alpha_tensor.dims = TfLiteIntArrayCreate(1);
alpha_tensor.dims->data[0] = 1;
alpha_tensor.params.zero_point = 0;
int new_tensor_index = -1;
if (input_type == kTfLiteFloat32) {
alpha_tensor.params.scale = 0;
std::vector<float> alpha_value = {builtin->alpha};
mapping_args.builder->AddNewInputConstantTensor(
ANEURALNETWORKS_TENSOR_FLOAT32, kTfLiteFloat32, alpha_tensor.dims,
alpha_value, alpha_tensor.params, &new_tensor_index);
} else {
alpha_tensor.params.scale = builtin->alpha;
std::vector<uint8_t> alpha_value = {1};
mapping_args.builder->AddNewInputConstantTensor(
ANEURALNETWORKS_TENSOR_QUANT8_ASYMM, kTfLiteUInt8,
alpha_tensor.dims, alpha_value, alpha_tensor.params,
&new_tensor_index);
}
*nn_op_type = ANEURALNETWORKS_PRELU;
} break;
case kTfLiteBuiltinPrelu: {
*nn_op_type = ANEURALNETWORKS_PRELU;
} break;

View File

@ -5334,6 +5334,69 @@ TEST(QuantizedPadV2OpTest, Int8AdvancedDynamicValuedTest) {
AdvancedDynamicValuedTest<int8_t, TensorType_INT8>();
}
// A base class of Leaky ReLU op model. It provides the constructor for
// FloatLeakyReluOpModel and QuantizedLeakyReluOpModel.
class LeakyReluOpModel : public SingleOpModelWithNNAPI {
public:
LeakyReluOpModel(const TensorData& input, const float& alpha)
: input_type_(input.type) {
input_ = AddInput(input);
output_ = AddOutput({input.type, input.shape, input.min, input.max});
SetBuiltinOp(BuiltinOperator_LEAKY_RELU, BuiltinOptions_LeakyReluOptions,
CreateLeakyReluOptions(builder_, alpha).Union());
BuildInterpreterWithNNAPI({GetShape(input_)});
}
void SetInput(std::initializer_list<float> data) {
SetData(input_, input_type_, data);
}
std::vector<float> GetOutput() {
std::vector<float> output;
GetData(output_, input_type_, &output);
return output;
}
protected:
int input_;
int output_;
const TensorType input_type_;
};
TEST(NNAPIDelegate, LeakyReluFloat) {
LeakyReluOpModel m({TensorType_FLOAT32, {2, 3}}, 0.5f);
m.SetInput({
0.0f, 1.0f, 3.0f, // Row 1
1.0f, -1.0f, -2.0f, // Row 2
});
m.Invoke();
EXPECT_THAT(m.GetOutput(), ElementsAreArray({
0.0f, 1.0f, 3.0f, // Row 1
1.0f, -0.5f, -1.0f, // Row 2
}));
}
TEST(NNAPIDelegate, LeakyReluQuantized) {
const float kMin = -1;
const float kMax = 127.f / 128.f;
LeakyReluOpModel m({TensorType_UINT8, {2, 3}, 8 * kMin, 8 * kMax}, 0.5f);
m.SetInput({
0.0f, 1.0f, 3.0f, // Row 1
1.0f, -1.0f, -2.0f, // Row 2
});
m.Invoke();
EXPECT_THAT(m.GetOutput(), ElementsAreArray(ArrayFloatNear(
{
0.0f, 1.0f, 3.0f, // Row 1
1.0f, -0.5f, -1.0f, // Row 2
},
kQuantizedTolerance)));
}
} // namespace
} // namespace tflite