NNAPI TransposeConv op takes tensor inputs from TFLite node

PiperOrigin-RevId: 267384015
This commit is contained in:
T.J. Alumbaugh 2019-09-05 09:04:26 -07:00 committed by Jared Duke
parent 705083378c
commit cdb35502d8

View File

@ -1719,9 +1719,13 @@ NNAPIDelegateKernel::MappingFn NNAPIDelegateKernel::Map(
const bool hybrid_op = const bool hybrid_op =
IsHybridOperator(mapping_args.context, IsHybridOperator(mapping_args.context,
kTfLiteBuiltinTransposeConv, mapping_args.node); kTfLiteBuiltinTransposeConv, mapping_args.node);
mapping_args.builder->AddTensorInput(/*kDataInputTensor*/ 2, mapping_args.builder->AddTensorInput(
mapping_args.node->inputs->data[/* kDataInputTensor */ 2],
hybrid_op);
mapping_args.builder->AddTensorInput(
mapping_args.node->inputs->data[/* kWeightsTensor */ 1],
hybrid_op); hybrid_op);
mapping_args.builder->AddTensorInput(/*kWeightsTensor*/ 1, hybrid_op);
// NNAPI requires a bias tensor, so we allocate a new tensor to fill // NNAPI requires a bias tensor, so we allocate a new tensor to fill
// it with zeroes. It is deleted with other tensors in the context // it with zeroes. It is deleted with other tensors in the context
@ -1780,7 +1784,8 @@ NNAPIDelegateKernel::MappingFn NNAPIDelegateKernel::Map(
/*zero_point=*/0); /*zero_point=*/0);
} }
mapping_args.builder->AddTensorInput(/*kOutputShapeTensor*/ 0, mapping_args.builder->AddTensorInput(
mapping_args.node->inputs->data[/* kOutputShapeTensor */ 0],
hybrid_op); hybrid_op);
auto builtin = reinterpret_cast<TfLiteTransposeConvParams*>( auto builtin = reinterpret_cast<TfLiteTransposeConvParams*>(