diff --git a/tensorflow/lite/delegates/nnapi/nnapi_delegate.cc b/tensorflow/lite/delegates/nnapi/nnapi_delegate.cc index 67404405c2d..47b531b71b2 100644 --- a/tensorflow/lite/delegates/nnapi/nnapi_delegate.cc +++ b/tensorflow/lite/delegates/nnapi/nnapi_delegate.cc @@ -25,6 +25,7 @@ limitations under the License. #include "tensorflow/lite/builtin_op_data.h" #include "tensorflow/lite/builtin_ops.h" +#include "tensorflow/lite/c/builtin_op_data.h" #include "tensorflow/lite/c/c_api_internal.h" #include "tensorflow/lite/context_util.h" #include "tensorflow/lite/kernels/kernel_util.h" @@ -135,6 +136,14 @@ bool IsHybridOperator(const TfLiteContext* context, int builtin_code, const TfLiteType weights_type = context->tensors[weights_id].type; return IsFloat(input_type) && IsQuantized(weights_type); } + case kTfLiteBuiltinBidirectionalSequenceLstm: { + const int input_id = node->inputs->data[0]; + // Input #1 is optional so use #2 to determine if hybrid. + const int weights_id = node->inputs->data[2]; + const TfLiteType input_type = context->tensors[input_id].type; + const TfLiteType weights_type = context->tensors[weights_id].type; + return IsFloat(input_type) && IsQuantized(weights_type); + } case kTfLiteBuiltinUnidirectionalSequenceRnn: { const int input_id = node->inputs->data[0]; const int weights_id = node->inputs->data[1]; @@ -1633,6 +1642,31 @@ class NNAPIDelegateKernel { }; } } break; + case kTfLiteBuiltinBidirectionalSequenceLstm: + if (version == 1 && android_sdk_version >= kMinSdkVersionForNNAPI12) { + if (IsHybridOperator(context, builtin_code, node)) { + // Hybrid version of this op is not supported by NN API. + return nullptr; + } + return [](const NNAPIOpMappingArgs& mapping_args) + -> ANeuralNetworksOperationType { + auto builtin = + reinterpret_cast( + mapping_args.node->builtin_data); + mapping_args.builder->AddScalarInt32Operand(builtin->activation); + mapping_args.builder->AddScalarFloat32Operand(builtin->cell_clip); + mapping_args.builder->AddScalarFloat32Operand(builtin->proj_clip); + mapping_args.builder->AddScalarBoolOperand(builtin->merge_outputs); + mapping_args.builder->AddScalarBoolOperand(builtin->time_major); + // TF Lite doesn't support layer normalization in bidirectional + // sequence LSTM, so we insert optional tensors for NNAPI + for (int i = 0; i < 8; ++i) { + mapping_args.builder->AddVectorFloat32Operand(nullptr, 0); + } + return ANEURALNETWORKS_BIDIRECTIONAL_SEQUENCE_LSTM; + }; + } + break; default: // All other operators are not mapped. return nullptr; @@ -2037,7 +2071,8 @@ class NNAPIDelegateKernel { if (input_index == kOptionalTensor && (reg->builtin_code == kTfLiteBuiltinLstm || - reg->builtin_code == kTfLiteBuiltinSvdf)) { + reg->builtin_code == kTfLiteBuiltinSvdf || + reg->builtin_code == kTfLiteBuiltinBidirectionalSequenceLstm)) { // properly handle the optional tensor for LSTM and SVDF. // currently only support float32. // TODO(miaowang): make sure this is also able to handle quantized diff --git a/tensorflow/lite/kernels/BUILD b/tensorflow/lite/kernels/BUILD index ee27e27145a..923b8e08c54 100644 --- a/tensorflow/lite/kernels/BUILD +++ b/tensorflow/lite/kernels/BUILD @@ -754,6 +754,7 @@ cc_test( name = "bidirectional_sequence_lstm_test", size = "small", srcs = ["bidirectional_sequence_lstm_test.cc"], + tags = ["tflite_nnapi"], deps = [ ":builtin_ops", ":test_main", diff --git a/tensorflow/lite/nnapi/NeuralNetworksTypes.h b/tensorflow/lite/nnapi/NeuralNetworksTypes.h index 8d308628b8a..1199c571d71 100644 --- a/tensorflow/lite/nnapi/NeuralNetworksTypes.h +++ b/tensorflow/lite/nnapi/NeuralNetworksTypes.h @@ -89,6 +89,7 @@ enum { ANEURALNETWORKS_SUB = 36, ANEURALNETWORKS_TRANSPOSE = 37, ANEURALNETWORKS_ABS = 38, + ANEURALNETWORKS_BIDIRECTIONAL_SEQUENCE_LSTM = 42, ANEURALNETWORKS_EQUAL = 48, ANEURALNETWORKS_EXP = 49, ANEURALNETWORKS_GATHER = 51,