NNAPI TransposeConv op takes tensor inputs from TFLite node

PiperOrigin-RevId: 267384015
This commit is contained in:
T.J. Alumbaugh 2019-09-05 09:04:26 -07:00 committed by Jared Duke
parent 705083378c
commit cdb35502d8

View File

@ -1719,9 +1719,13 @@ NNAPIDelegateKernel::MappingFn NNAPIDelegateKernel::Map(
const bool hybrid_op =
IsHybridOperator(mapping_args.context,
kTfLiteBuiltinTransposeConv, mapping_args.node);
mapping_args.builder->AddTensorInput(/*kDataInputTensor*/ 2,
hybrid_op);
mapping_args.builder->AddTensorInput(/*kWeightsTensor*/ 1, hybrid_op);
mapping_args.builder->AddTensorInput(
mapping_args.node->inputs->data[/* kDataInputTensor */ 2],
hybrid_op);
mapping_args.builder->AddTensorInput(
mapping_args.node->inputs->data[/* kWeightsTensor */ 1],
hybrid_op);
// NNAPI requires a bias tensor, so we allocate a new tensor to fill
// it with zeroes. It is deleted with other tensors in the context
@ -1780,8 +1784,9 @@ NNAPIDelegateKernel::MappingFn NNAPIDelegateKernel::Map(
/*zero_point=*/0);
}
mapping_args.builder->AddTensorInput(/*kOutputShapeTensor*/ 0,
hybrid_op);
mapping_args.builder->AddTensorInput(
mapping_args.node->inputs->data[/* kOutputShapeTensor */ 0],
hybrid_op);
auto builtin = reinterpret_cast<TfLiteTransposeConvParams*>(
mapping_args.node->builtin_data);