Refactoring, and avoid calling function twice in CHECK_NN.
PiperOrigin-RevId: 225461929
This commit is contained in:
parent
bc99c3db7f
commit
352a08f34f
@ -37,11 +37,15 @@ namespace {
|
|||||||
|
|
||||||
// TODO(b/80621585): Consider printing error string, but don't for now to
|
// TODO(b/80621585): Consider printing error string, but don't for now to
|
||||||
// minimize binary size.
|
// minimize binary size.
|
||||||
#define CHECK_NN(context, code) \
|
#define CHECK_NN(context, code) \
|
||||||
if (code != ANEURALNETWORKS_NO_ERROR) { \
|
do { \
|
||||||
context->ReportError(context, "NN API returned error (%d).\n", code); \
|
const auto _code = (code); \
|
||||||
return kTfLiteError; \
|
if (_code != ANEURALNETWORKS_NO_ERROR) { \
|
||||||
}
|
context->ReportError(context, "NN API returned error (%d, line %d).\n", \
|
||||||
|
_code, __LINE__); \
|
||||||
|
return kTfLiteError; \
|
||||||
|
} \
|
||||||
|
} while (0)
|
||||||
|
|
||||||
namespace {
|
namespace {
|
||||||
int32_t GetAndroidSdkVersion() {
|
int32_t GetAndroidSdkVersion() {
|
||||||
@ -349,19 +353,18 @@ class NNAPIOpBuilder {
|
|||||||
return kTfLiteOk;
|
return kTfLiteOk;
|
||||||
}
|
}
|
||||||
|
|
||||||
// TfLiteContext for error handling. Must be named context for macros to
|
// TfLiteContext for error handling.
|
||||||
// work.
|
TfLiteContext* const context_;
|
||||||
TfLiteContext* context_;
|
|
||||||
|
|
||||||
// Tracks relationship between indices
|
// Tracks relationship between indices.
|
||||||
OperandMapping* operand_mapping_;
|
OperandMapping* operand_mapping_;
|
||||||
|
|
||||||
// The model
|
// The NNAPI model.
|
||||||
ANeuralNetworksModel* nn_model_;
|
ANeuralNetworksModel* const nn_model_;
|
||||||
|
|
||||||
// Inputs and outputs for the current op. These are augmented in the sense
|
// Inputs and outputs for the current op. These are augmented in the sense
|
||||||
// that NN API uses operands for all arguments, not just tensors, unlike
|
// that NN API uses operands for all arguments, not just tensors, unlike
|
||||||
// TensorFlow lite.
|
// TensorFlow Lite.
|
||||||
std::vector<uint32_t> augmented_inputs_;
|
std::vector<uint32_t> augmented_inputs_;
|
||||||
std::vector<uint32_t> augmented_outputs_;
|
std::vector<uint32_t> augmented_outputs_;
|
||||||
};
|
};
|
||||||
@ -374,6 +377,14 @@ struct NNAPIOpMappingArgs {
|
|||||||
std::vector<int>* model_state_tfl_inputs;
|
std::vector<int>* model_state_tfl_inputs;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
// Mapping function simply returning the operation type without adding any
|
||||||
|
// additional parameter.
|
||||||
|
template <ANeuralNetworksOperationType OperationType>
|
||||||
|
ANeuralNetworksOperationType BasicMappingFn(
|
||||||
|
const NNAPIOpMappingArgs& mapping_args) {
|
||||||
|
return OperationType;
|
||||||
|
}
|
||||||
|
|
||||||
// The kernel that represents the node sub set of TF Lite being run on NN API.
|
// The kernel that represents the node sub set of TF Lite being run on NN API.
|
||||||
class NNAPIDelegateKernel {
|
class NNAPIDelegateKernel {
|
||||||
public:
|
public:
|
||||||
@ -385,8 +396,8 @@ class NNAPIDelegateKernel {
|
|||||||
// Return a function that knows how to translate a node into its operands
|
// Return a function that knows how to translate a node into its operands
|
||||||
// when called. You can use this function to see if a node is supported
|
// when called. You can use this function to see if a node is supported
|
||||||
// (i.e. that MappingFn is not nullptr).
|
// (i.e. that MappingFn is not nullptr).
|
||||||
MappingFn Map(TfLiteContext* context, int builtin_code, int version,
|
static MappingFn Map(TfLiteContext* context, int builtin_code, int version,
|
||||||
TfLiteNode* node) {
|
TfLiteNode* node) {
|
||||||
switch (builtin_code) {
|
switch (builtin_code) {
|
||||||
case kTfLiteBuiltinAdd:
|
case kTfLiteBuiltinAdd:
|
||||||
if (version == 1) {
|
if (version == 1) {
|
||||||
@ -397,8 +408,6 @@ class NNAPIDelegateKernel {
|
|||||||
mapping_args.builder->AddScalarInt32Operand(builtin->activation);
|
mapping_args.builder->AddScalarInt32Operand(builtin->activation);
|
||||||
return ANEURALNETWORKS_ADD;
|
return ANEURALNETWORKS_ADD;
|
||||||
};
|
};
|
||||||
} else {
|
|
||||||
return nullptr;
|
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case kTfLiteBuiltinMul:
|
case kTfLiteBuiltinMul:
|
||||||
@ -410,8 +419,6 @@ class NNAPIDelegateKernel {
|
|||||||
mapping_args.builder->AddScalarInt32Operand(builtin->activation);
|
mapping_args.builder->AddScalarInt32Operand(builtin->activation);
|
||||||
return ANEURALNETWORKS_MUL;
|
return ANEURALNETWORKS_MUL;
|
||||||
};
|
};
|
||||||
} else {
|
|
||||||
return nullptr;
|
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case kTfLiteBuiltinAveragePool2d:
|
case kTfLiteBuiltinAveragePool2d:
|
||||||
@ -422,8 +429,6 @@ class NNAPIDelegateKernel {
|
|||||||
mapping_args.node->builtin_data);
|
mapping_args.node->builtin_data);
|
||||||
return ANEURALNETWORKS_AVERAGE_POOL_2D;
|
return ANEURALNETWORKS_AVERAGE_POOL_2D;
|
||||||
};
|
};
|
||||||
} else {
|
|
||||||
return nullptr;
|
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case kTfLiteBuiltinMaxPool2d:
|
case kTfLiteBuiltinMaxPool2d:
|
||||||
@ -434,8 +439,6 @@ class NNAPIDelegateKernel {
|
|||||||
mapping_args.node->builtin_data);
|
mapping_args.node->builtin_data);
|
||||||
return ANEURALNETWORKS_MAX_POOL_2D;
|
return ANEURALNETWORKS_MAX_POOL_2D;
|
||||||
};
|
};
|
||||||
} else {
|
|
||||||
return nullptr;
|
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case kTfLiteBuiltinL2Pool2d:
|
case kTfLiteBuiltinL2Pool2d:
|
||||||
@ -446,8 +449,6 @@ class NNAPIDelegateKernel {
|
|||||||
mapping_args.node->builtin_data);
|
mapping_args.node->builtin_data);
|
||||||
return ANEURALNETWORKS_L2_POOL_2D;
|
return ANEURALNETWORKS_L2_POOL_2D;
|
||||||
};
|
};
|
||||||
} else {
|
|
||||||
return nullptr;
|
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case kTfLiteBuiltinConv2d:
|
case kTfLiteBuiltinConv2d:
|
||||||
@ -469,8 +470,6 @@ class NNAPIDelegateKernel {
|
|||||||
mapping_args.builder->AddScalarInt32Operand(builtin->activation);
|
mapping_args.builder->AddScalarInt32Operand(builtin->activation);
|
||||||
return ANEURALNETWORKS_CONV_2D;
|
return ANEURALNETWORKS_CONV_2D;
|
||||||
};
|
};
|
||||||
} else {
|
|
||||||
return nullptr;
|
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case kTfLiteBuiltinDepthwiseConv2d:
|
case kTfLiteBuiltinDepthwiseConv2d:
|
||||||
@ -487,8 +486,6 @@ class NNAPIDelegateKernel {
|
|||||||
mapping_args.builder->AddScalarInt32Operand(builtin->activation);
|
mapping_args.builder->AddScalarInt32Operand(builtin->activation);
|
||||||
return ANEURALNETWORKS_DEPTHWISE_CONV_2D;
|
return ANEURALNETWORKS_DEPTHWISE_CONV_2D;
|
||||||
};
|
};
|
||||||
} else {
|
|
||||||
return nullptr;
|
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case kTfLiteBuiltinFullyConnected:
|
case kTfLiteBuiltinFullyConnected:
|
||||||
@ -500,8 +497,6 @@ class NNAPIDelegateKernel {
|
|||||||
mapping_args.builder->AddScalarInt32Operand(builtin->activation);
|
mapping_args.builder->AddScalarInt32Operand(builtin->activation);
|
||||||
return ANEURALNETWORKS_FULLY_CONNECTED;
|
return ANEURALNETWORKS_FULLY_CONNECTED;
|
||||||
};
|
};
|
||||||
} else {
|
|
||||||
return nullptr;
|
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case kTfLiteBuiltinSoftmax:
|
case kTfLiteBuiltinSoftmax:
|
||||||
@ -513,18 +508,11 @@ class NNAPIDelegateKernel {
|
|||||||
mapping_args.builder->AddScalarFloat32Operand(builtin->beta);
|
mapping_args.builder->AddScalarFloat32Operand(builtin->beta);
|
||||||
return ANEURALNETWORKS_SOFTMAX;
|
return ANEURALNETWORKS_SOFTMAX;
|
||||||
};
|
};
|
||||||
} else {
|
|
||||||
return nullptr;
|
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case kTfLiteBuiltinReshape:
|
case kTfLiteBuiltinReshape:
|
||||||
if (version == 1 && node->inputs->size == 2) {
|
if (version == 1 && node->inputs->size == 2) {
|
||||||
return [](const NNAPIOpMappingArgs& mapping_args)
|
return BasicMappingFn<ANEURALNETWORKS_RESHAPE>;
|
||||||
-> ANeuralNetworksOperationType {
|
|
||||||
return ANEURALNETWORKS_RESHAPE;
|
|
||||||
};
|
|
||||||
} else {
|
|
||||||
return nullptr;
|
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case kTfLiteBuiltinSqueeze:
|
case kTfLiteBuiltinSqueeze:
|
||||||
@ -540,20 +528,15 @@ class NNAPIDelegateKernel {
|
|||||||
static_cast<uint32_t>(builtin->num_squeeze_dims));
|
static_cast<uint32_t>(builtin->num_squeeze_dims));
|
||||||
return ANEURALNETWORKS_SQUEEZE;
|
return ANEURALNETWORKS_SQUEEZE;
|
||||||
};
|
};
|
||||||
} else {
|
|
||||||
return nullptr;
|
|
||||||
}
|
}
|
||||||
|
break;
|
||||||
case kTfLiteBuiltinL2Normalization: {
|
case kTfLiteBuiltinL2Normalization: {
|
||||||
auto builtin =
|
auto builtin =
|
||||||
reinterpret_cast<TfLiteL2NormParams*>(node->builtin_data);
|
reinterpret_cast<TfLiteL2NormParams*>(node->builtin_data);
|
||||||
if (builtin->activation != kTfLiteActNone) {
|
if (builtin->activation == kTfLiteActNone) {
|
||||||
// NNAPI does not support activations
|
return BasicMappingFn<ANEURALNETWORKS_L2_NORMALIZATION>;
|
||||||
return nullptr;
|
|
||||||
}
|
}
|
||||||
return [](const NNAPIOpMappingArgs& mapping_args)
|
break;
|
||||||
-> ANeuralNetworksOperationType {
|
|
||||||
return ANEURALNETWORKS_L2_NORMALIZATION;
|
|
||||||
};
|
|
||||||
}
|
}
|
||||||
case kTfLiteBuiltinLocalResponseNormalization:
|
case kTfLiteBuiltinLocalResponseNormalization:
|
||||||
if (version == 1) {
|
if (version == 1) {
|
||||||
@ -567,10 +550,6 @@ class NNAPIDelegateKernel {
|
|||||||
mapping_args.builder->AddScalarFloat32Operand(builtin->beta);
|
mapping_args.builder->AddScalarFloat32Operand(builtin->beta);
|
||||||
return ANEURALNETWORKS_LOCAL_RESPONSE_NORMALIZATION;
|
return ANEURALNETWORKS_LOCAL_RESPONSE_NORMALIZATION;
|
||||||
};
|
};
|
||||||
} else {
|
|
||||||
// TODO(miaowang): clean-up code and return early in the unsupported
|
|
||||||
// case.
|
|
||||||
return nullptr;
|
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case kTfLiteBuiltinLshProjection:
|
case kTfLiteBuiltinLshProjection:
|
||||||
@ -587,8 +566,6 @@ class NNAPIDelegateKernel {
|
|||||||
mapping_args.builder->AddScalarInt32Operand(builtin->type);
|
mapping_args.builder->AddScalarInt32Operand(builtin->type);
|
||||||
return ANEURALNETWORKS_LSH_PROJECTION;
|
return ANEURALNETWORKS_LSH_PROJECTION;
|
||||||
};
|
};
|
||||||
} else {
|
|
||||||
return nullptr;
|
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case kTfLiteBuiltinConcatenation:
|
case kTfLiteBuiltinConcatenation:
|
||||||
@ -599,7 +576,7 @@ class NNAPIDelegateKernel {
|
|||||||
// NNAPI only support concatenating quantized tensor of the same
|
// NNAPI only support concatenating quantized tensor of the same
|
||||||
// scale and offset.
|
// scale and offset.
|
||||||
auto first_param = context->tensors[node->inputs->data[0]].params;
|
auto first_param = context->tensors[node->inputs->data[0]].params;
|
||||||
for (int i = 0; i < node->inputs->size; i++) {
|
for (int i = 1; i < node->inputs->size; i++) {
|
||||||
auto curr_param = context->tensors[node->inputs->data[i]].params;
|
auto curr_param = context->tensors[node->inputs->data[i]].params;
|
||||||
if (curr_param.scale != first_param.scale ||
|
if (curr_param.scale != first_param.scale ||
|
||||||
curr_param.zero_point != first_param.zero_point) {
|
curr_param.zero_point != first_param.zero_point) {
|
||||||
@ -614,68 +591,36 @@ class NNAPIDelegateKernel {
|
|||||||
mapping_args.builder->AddScalarInt32Operand(builtin->axis);
|
mapping_args.builder->AddScalarInt32Operand(builtin->axis);
|
||||||
return ANEURALNETWORKS_CONCATENATION;
|
return ANEURALNETWORKS_CONCATENATION;
|
||||||
};
|
};
|
||||||
} else {
|
|
||||||
return nullptr;
|
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case kTfLiteBuiltinDequantize:
|
case kTfLiteBuiltinDequantize:
|
||||||
if (version == 1) {
|
if (version == 1) {
|
||||||
return [](const NNAPIOpMappingArgs& mapping_args)
|
return BasicMappingFn<ANEURALNETWORKS_DEQUANTIZE>;
|
||||||
-> ANeuralNetworksOperationType {
|
|
||||||
return ANEURALNETWORKS_DEQUANTIZE;
|
|
||||||
};
|
|
||||||
} else {
|
|
||||||
return nullptr;
|
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case kTfLiteBuiltinFloor:
|
case kTfLiteBuiltinFloor:
|
||||||
if (version == 1) {
|
if (version == 1) {
|
||||||
return [](const NNAPIOpMappingArgs& mapping_args)
|
return BasicMappingFn<ANEURALNETWORKS_FLOOR>;
|
||||||
-> ANeuralNetworksOperationType {
|
|
||||||
return ANEURALNETWORKS_FLOOR;
|
|
||||||
};
|
|
||||||
} else {
|
|
||||||
return nullptr;
|
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case kTfLiteBuiltinRelu:
|
case kTfLiteBuiltinRelu:
|
||||||
if (version == 1) {
|
if (version == 1) {
|
||||||
return [](const NNAPIOpMappingArgs& mapping_args)
|
return BasicMappingFn<ANEURALNETWORKS_RELU>;
|
||||||
-> ANeuralNetworksOperationType {
|
|
||||||
return ANEURALNETWORKS_RELU;
|
|
||||||
};
|
|
||||||
} else {
|
|
||||||
return nullptr;
|
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case kTfLiteBuiltinReluN1To1:
|
case kTfLiteBuiltinReluN1To1:
|
||||||
if (version == 1) {
|
if (version == 1) {
|
||||||
return [](const NNAPIOpMappingArgs& mapping_args)
|
return BasicMappingFn<ANEURALNETWORKS_RELU1>;
|
||||||
-> ANeuralNetworksOperationType {
|
|
||||||
return ANEURALNETWORKS_RELU1;
|
|
||||||
};
|
|
||||||
} else {
|
|
||||||
return nullptr;
|
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case kTfLiteBuiltinRelu6:
|
case kTfLiteBuiltinRelu6:
|
||||||
if (version == 1) {
|
if (version == 1) {
|
||||||
return [](const NNAPIOpMappingArgs& mapping_args)
|
return BasicMappingFn<ANEURALNETWORKS_RELU6>;
|
||||||
-> ANeuralNetworksOperationType {
|
|
||||||
return ANEURALNETWORKS_RELU6;
|
|
||||||
};
|
|
||||||
} else {
|
|
||||||
return nullptr;
|
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case kTfLiteBuiltinLogistic:
|
case kTfLiteBuiltinLogistic:
|
||||||
if (version == 1) {
|
if (version == 1) {
|
||||||
return [](const NNAPIOpMappingArgs& mapping_args)
|
return BasicMappingFn<ANEURALNETWORKS_LOGISTIC>;
|
||||||
-> ANeuralNetworksOperationType {
|
|
||||||
return ANEURALNETWORKS_LOGISTIC;
|
|
||||||
};
|
|
||||||
} else {
|
|
||||||
return nullptr;
|
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case kTfLiteBuiltinTanh:
|
case kTfLiteBuiltinTanh:
|
||||||
@ -683,12 +628,7 @@ class NNAPIDelegateKernel {
|
|||||||
if (version == 1 &&
|
if (version == 1 &&
|
||||||
context->tensors[node->inputs->data[0]].type == kTfLiteFloat32) {
|
context->tensors[node->inputs->data[0]].type == kTfLiteFloat32) {
|
||||||
// NNAPI only support float tanh.
|
// NNAPI only support float tanh.
|
||||||
return [](const NNAPIOpMappingArgs& mapping_args)
|
return BasicMappingFn<ANEURALNETWORKS_TANH>;
|
||||||
-> ANeuralNetworksOperationType {
|
|
||||||
return ANEURALNETWORKS_TANH;
|
|
||||||
};
|
|
||||||
} else {
|
|
||||||
return nullptr;
|
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case kTfLiteBuiltinSub:
|
case kTfLiteBuiltinSub:
|
||||||
@ -702,8 +642,6 @@ class NNAPIDelegateKernel {
|
|||||||
mapping_args.builder->AddScalarInt32Operand(builtin->activation);
|
mapping_args.builder->AddScalarInt32Operand(builtin->activation);
|
||||||
return ANEURALNETWORKS_SUB;
|
return ANEURALNETWORKS_SUB;
|
||||||
};
|
};
|
||||||
} else {
|
|
||||||
return nullptr;
|
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case kTfLiteBuiltinDiv:
|
case kTfLiteBuiltinDiv:
|
||||||
@ -717,8 +655,6 @@ class NNAPIDelegateKernel {
|
|||||||
mapping_args.builder->AddScalarInt32Operand(builtin->activation);
|
mapping_args.builder->AddScalarInt32Operand(builtin->activation);
|
||||||
return ANEURALNETWORKS_DIV;
|
return ANEURALNETWORKS_DIV;
|
||||||
};
|
};
|
||||||
} else {
|
|
||||||
return nullptr;
|
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case kTfLiteBuiltinPad:
|
case kTfLiteBuiltinPad:
|
||||||
@ -728,22 +664,12 @@ class NNAPIDelegateKernel {
|
|||||||
// NNAPI does not support specifying the padding value.
|
// NNAPI does not support specifying the padding value.
|
||||||
// NNAPI pads physical zero for quantized tensors, so only delegate
|
// NNAPI pads physical zero for quantized tensors, so only delegate
|
||||||
// float pad to NNAPI.
|
// float pad to NNAPI.
|
||||||
return [](const NNAPIOpMappingArgs& mapping_args)
|
return BasicMappingFn<ANEURALNETWORKS_PAD>;
|
||||||
-> ANeuralNetworksOperationType {
|
|
||||||
return ANEURALNETWORKS_PAD;
|
|
||||||
};
|
|
||||||
} else {
|
|
||||||
return nullptr;
|
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case kTfLiteBuiltinSpaceToBatchNd:
|
case kTfLiteBuiltinSpaceToBatchNd:
|
||||||
if (version == 1 && kAndroidSdkVersion >= kMinSdkVersionForNNAPI11) {
|
if (version == 1 && kAndroidSdkVersion >= kMinSdkVersionForNNAPI11) {
|
||||||
return [](const NNAPIOpMappingArgs& mapping_args)
|
return BasicMappingFn<ANEURALNETWORKS_SPACE_TO_BATCH_ND>;
|
||||||
-> ANeuralNetworksOperationType {
|
|
||||||
return ANEURALNETWORKS_SPACE_TO_BATCH_ND;
|
|
||||||
};
|
|
||||||
} else {
|
|
||||||
return nullptr;
|
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case kTfLiteBuiltinStridedSlice:
|
case kTfLiteBuiltinStridedSlice:
|
||||||
@ -758,8 +684,6 @@ class NNAPIDelegateKernel {
|
|||||||
builtin->shrink_axis_mask);
|
builtin->shrink_axis_mask);
|
||||||
return ANEURALNETWORKS_STRIDED_SLICE;
|
return ANEURALNETWORKS_STRIDED_SLICE;
|
||||||
};
|
};
|
||||||
} else {
|
|
||||||
return nullptr;
|
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case kTfLiteBuiltinTranspose:
|
case kTfLiteBuiltinTranspose:
|
||||||
@ -771,12 +695,7 @@ class NNAPIDelegateKernel {
|
|||||||
(node->inputs->size > 1) &&
|
(node->inputs->size > 1) &&
|
||||||
(context->tensors[node->inputs->data[1]].allocation_type ==
|
(context->tensors[node->inputs->data[1]].allocation_type ==
|
||||||
kTfLiteMmapRo)) {
|
kTfLiteMmapRo)) {
|
||||||
return [](const NNAPIOpMappingArgs& mapping_args)
|
return BasicMappingFn<ANEURALNETWORKS_TRANSPOSE>;
|
||||||
-> ANeuralNetworksOperationType {
|
|
||||||
return ANEURALNETWORKS_TRANSPOSE;
|
|
||||||
};
|
|
||||||
} else {
|
|
||||||
return nullptr;
|
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case kTfLiteBuiltinRnn:
|
case kTfLiteBuiltinRnn:
|
||||||
@ -799,8 +718,6 @@ class NNAPIDelegateKernel {
|
|||||||
mapping_args.builder->AddScalarInt32Operand(builtin->activation);
|
mapping_args.builder->AddScalarInt32Operand(builtin->activation);
|
||||||
return ANEURALNETWORKS_RNN;
|
return ANEURALNETWORKS_RNN;
|
||||||
};
|
};
|
||||||
} else {
|
|
||||||
return nullptr;
|
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case kTfLiteBuiltinSvdf:
|
case kTfLiteBuiltinSvdf:
|
||||||
@ -827,8 +744,6 @@ class NNAPIDelegateKernel {
|
|||||||
mapping_args.builder->AddScalarInt32Operand(builtin->activation);
|
mapping_args.builder->AddScalarInt32Operand(builtin->activation);
|
||||||
return ANEURALNETWORKS_SVDF;
|
return ANEURALNETWORKS_SVDF;
|
||||||
};
|
};
|
||||||
} else {
|
|
||||||
return nullptr;
|
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case kTfLiteBuiltinLstm:
|
case kTfLiteBuiltinLstm:
|
||||||
@ -870,8 +785,6 @@ class NNAPIDelegateKernel {
|
|||||||
|
|
||||||
return ANEURALNETWORKS_LSTM;
|
return ANEURALNETWORKS_LSTM;
|
||||||
};
|
};
|
||||||
} else {
|
|
||||||
return nullptr;
|
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case kTfLiteBuiltinMean:
|
case kTfLiteBuiltinMean:
|
||||||
@ -888,36 +801,27 @@ class NNAPIDelegateKernel {
|
|||||||
mapping_args.builder->AddScalarInt32Operand(keep_dims);
|
mapping_args.builder->AddScalarInt32Operand(keep_dims);
|
||||||
return ANEURALNETWORKS_MEAN;
|
return ANEURALNETWORKS_MEAN;
|
||||||
};
|
};
|
||||||
} else {
|
|
||||||
return nullptr;
|
|
||||||
}
|
}
|
||||||
|
break;
|
||||||
case kTfLiteBuiltinEmbeddingLookup:
|
case kTfLiteBuiltinEmbeddingLookup:
|
||||||
// NNAPI only support float32 values.
|
// NNAPI only support float32 values.
|
||||||
if (version == 1 &&
|
if (version == 1 &&
|
||||||
context->tensors[node->inputs->data[1]].type == kTfLiteFloat32) {
|
context->tensors[node->inputs->data[1]].type == kTfLiteFloat32) {
|
||||||
return [](const NNAPIOpMappingArgs& mapping_args)
|
return BasicMappingFn<ANEURALNETWORKS_EMBEDDING_LOOKUP>;
|
||||||
-> ANeuralNetworksOperationType {
|
|
||||||
return ANEURALNETWORKS_EMBEDDING_LOOKUP;
|
|
||||||
};
|
|
||||||
} else {
|
|
||||||
return nullptr;
|
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case kTfLiteBuiltinHashtableLookup:
|
case kTfLiteBuiltinHashtableLookup:
|
||||||
// NNAPI only support float32 output.
|
// NNAPI only support float32 output.
|
||||||
if (version == 1 &&
|
if (version == 1 &&
|
||||||
context->tensors[node->outputs->data[0]].type == kTfLiteFloat32) {
|
context->tensors[node->outputs->data[0]].type == kTfLiteFloat32) {
|
||||||
return [](const NNAPIOpMappingArgs& mapping_args)
|
return BasicMappingFn<ANEURALNETWORKS_HASHTABLE_LOOKUP>;
|
||||||
-> ANeuralNetworksOperationType {
|
|
||||||
return ANEURALNETWORKS_HASHTABLE_LOOKUP;
|
|
||||||
};
|
|
||||||
} else {
|
|
||||||
return nullptr;
|
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
|
// All other operators are not mapped.
|
||||||
return nullptr;
|
return nullptr;
|
||||||
}
|
}
|
||||||
|
return nullptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Initialize the kernel (a NN model).
|
// Initialize the kernel (a NN model).
|
||||||
@ -1090,7 +994,7 @@ class NNAPIDelegateKernel {
|
|||||||
outputs.reserve(output_tensors->size);
|
outputs.reserve(output_tensors->size);
|
||||||
|
|
||||||
size_t total_input_byte_size = 0;
|
size_t total_input_byte_size = 0;
|
||||||
// Make the TensorFlow lite inputs and outputs to ann_indices.
|
// Make the TensorFlow Lite inputs and outputs to ann_indices.
|
||||||
for (int i : TfLiteIntArrayView(input_tensors)) {
|
for (int i : TfLiteIntArrayView(input_tensors)) {
|
||||||
// Constant tensors are not NNAPI inputs.
|
// Constant tensors are not NNAPI inputs.
|
||||||
if (i != kOptionalTensor &&
|
if (i != kOptionalTensor &&
|
||||||
@ -1149,12 +1053,14 @@ TfLiteDelegate* NnApiDelegate() {
|
|||||||
return kTfLiteOk;
|
return kTfLiteOk;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Allocate one element in vector already since TensorFlow Lite uses
|
||||||
|
// the first value as the number of nodes. The actual value will be set
|
||||||
|
// later, after the vector has been filled.
|
||||||
std::vector<int> supported_nodes(1);
|
std::vector<int> supported_nodes(1);
|
||||||
// We don't care about all nodes_, we only care about ones in the
|
// We don't care about all nodes_, we only care about ones in the
|
||||||
// current plan.
|
// current plan.
|
||||||
TfLiteIntArray* plan;
|
TfLiteIntArray* plan;
|
||||||
TF_LITE_ENSURE_STATUS(context->GetExecutionPlan(context, &plan));
|
TF_LITE_ENSURE_STATUS(context->GetExecutionPlan(context, &plan));
|
||||||
int total_supported_nodes = 0;
|
|
||||||
|
|
||||||
// Check for every node if it is supported
|
// Check for every node if it is supported
|
||||||
// TODO(b/80625235): Fix this to do more careful checking of versioning.
|
// TODO(b/80625235): Fix this to do more careful checking of versioning.
|
||||||
@ -1163,14 +1069,12 @@ TfLiteDelegate* NnApiDelegate() {
|
|||||||
TfLiteRegistration* registration;
|
TfLiteRegistration* registration;
|
||||||
TF_LITE_ENSURE_STATUS(context->GetNodeAndRegistration(
|
TF_LITE_ENSURE_STATUS(context->GetNodeAndRegistration(
|
||||||
context, node_index, &node, ®istration));
|
context, node_index, &node, ®istration));
|
||||||
NNAPIDelegateKernel dummy_kernel;
|
if (NNAPIDelegateKernel::Map(context, registration->builtin_code,
|
||||||
if (dummy_kernel.Map(context, registration->builtin_code,
|
registration->version, node)) {
|
||||||
registration->version, node)) {
|
|
||||||
supported_nodes.push_back(node_index);
|
supported_nodes.push_back(node_index);
|
||||||
}
|
}
|
||||||
total_supported_nodes += 1;
|
|
||||||
}
|
}
|
||||||
// Put the size at the beginning of the array.
|
// First element in vector must be the number of actual nodes.
|
||||||
supported_nodes[0] = supported_nodes.size() - 1;
|
supported_nodes[0] = supported_nodes.size() - 1;
|
||||||
|
|
||||||
// NN API Delegate Registration (the pseudo kernel that will invoke NN
|
// NN API Delegate Registration (the pseudo kernel that will invoke NN
|
||||||
@ -1208,11 +1112,10 @@ TfLiteDelegate* NnApiDelegate() {
|
|||||||
|
|
||||||
// Request TFLite to partition the graph and make kernels
|
// Request TFLite to partition the graph and make kernels
|
||||||
// for each independent node sub set a new nnapi_delegate_kernel.
|
// for each independent node sub set a new nnapi_delegate_kernel.
|
||||||
context->ReplaceNodeSubsetsWithDelegateKernels(
|
return context->ReplaceNodeSubsetsWithDelegateKernels(
|
||||||
context, nnapi_delegate_kernel,
|
context, nnapi_delegate_kernel,
|
||||||
reinterpret_cast<TfLiteIntArray*>(supported_nodes.data()),
|
reinterpret_cast<TfLiteIntArray*>(supported_nodes.data()),
|
||||||
delegate);
|
delegate);
|
||||||
return kTfLiteOk;
|
|
||||||
}};
|
}};
|
||||||
|
|
||||||
return &delegate;
|
return &delegate;
|
||||||
|
Loading…
Reference in New Issue
Block a user