Log the human-readable name of current types in "not supported" messages.

PiperOrigin-RevId: 305104719
Change-Id: I61ec15ad55b6d37d7cc78740a6d47efc1dbcc5ad
This commit is contained in:
Robert David 2020-04-06 13:28:45 -07:00 committed by TensorFlower Gardener
parent d120689f46
commit 76ac3a41aa
17 changed files with 52 additions and 70 deletions

View File

@ -177,8 +177,8 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_OK(context, EvalAddQuantized(context, node, params, &data, TF_LITE_ENSURE_OK(context, EvalAddQuantized(context, node, params, &data,
input1, input2, output)); input1, input2, output));
} else { } else {
TF_LITE_KERNEL_LOG(context, TF_LITE_KERNEL_LOG(context, "Type %s (%d) not supported.",
"Inputs and outputs not all float|uint8|int8 types."); TfLiteTypeGetName(output->type), output->type);
return kTfLiteError; return kTfLiteError;
} }

View File

@ -167,9 +167,8 @@ TfLiteStatus EvalQuantized(TfLiteContext* context, TfLiteNode* node,
TF_LITE_FULLY_CONNECTED(int16_t); TF_LITE_FULLY_CONNECTED(int16_t);
break; break;
default: default:
TF_LITE_KERNEL_LOG( TF_LITE_KERNEL_LOG(context, "Type %s (%d) not supported.",
context, TfLiteTypeGetName(output->type), output->type);
"Quantized FullyConnected expects output data type uint8 or int16");
return kTfLiteError; return kTfLiteError;
} }
@ -222,8 +221,8 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
output); output);
default: default:
TF_LITE_KERNEL_LOG(context, "Type %d not currently supported.", TF_LITE_KERNEL_LOG(context, "Type %s (%d) not supported.",
filter->type); TfLiteTypeGetName(filter->type), filter->type);
return kTfLiteError; return kTfLiteError;
} }
return kTfLiteOk; return kTfLiteOk;

View File

@ -186,8 +186,8 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_OK(context, EvalAddQuantized(context, node, params, &data, TF_LITE_ENSURE_OK(context, EvalAddQuantized(context, node, params, &data,
input1, input2, output)); input1, input2, output));
} else { } else {
TF_LITE_KERNEL_LOG(context, TF_LITE_KERNEL_LOG(context, "Type %s (%d) not supported.",
"Inputs and outputs not all float|uint8|int8 types."); TfLiteTypeGetName(output->type), output->type);
return kTfLiteError; return kTfLiteError;
} }

View File

@ -186,9 +186,8 @@ TfLiteStatus EvalQuantized(TfLiteContext* context, TfLiteNode* node,
TF_LITE_FULLY_CONNECTED(int16_t); TF_LITE_FULLY_CONNECTED(int16_t);
break; break;
default: default:
TF_LITE_KERNEL_LOG( TF_LITE_KERNEL_LOG(context, "Type %s (%d) not supported.",
context, TfLiteTypeGetName(output->type), output->type);
"Quantized FullyConnected expects output data type uint8 or int16");
return kTfLiteError; return kTfLiteError;
} }
@ -241,8 +240,8 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
output); output);
default: default:
TF_LITE_KERNEL_LOG(context, "Type %d not currently supported.", TF_LITE_KERNEL_LOG(context, "Type %s (%d) not supported.",
filter->type); TfLiteTypeGetName(input->type), input->type);
return kTfLiteError; return kTfLiteError;
} }
return kTfLiteOk; return kTfLiteOk;

View File

@ -124,10 +124,8 @@ TfLiteStatus SoftmaxEval(TfLiteContext* context, TfLiteNode* node) {
return kTfLiteOk; return kTfLiteOk;
} }
default: default:
TF_LITE_KERNEL_LOG( TF_LITE_KERNEL_LOG(context, "Type %s (%d) not supported.",
context, TfLiteTypeGetName(input->type), input->type);
"Only float32, uint8_t and int8_t input supported currently, got %d.",
input->type);
return kTfLiteError; return kTfLiteError;
} }
} }

View File

@ -124,9 +124,8 @@ TfLiteStatus EqualEval(TfLiteContext* context, TfLiteNode* node) {
requires_broadcast); requires_broadcast);
break; break;
default: default:
TF_LITE_KERNEL_LOG( TF_LITE_KERNEL_LOG(context, "Type %s (%d) not supported.",
context, "Does not support type %d, requires bool|float|int|uint8", TfLiteTypeGetName(input1->type), input1->type);
input1->type);
return kTfLiteError; return kTfLiteError;
} }
return kTfLiteOk; return kTfLiteOk;
@ -160,9 +159,8 @@ TfLiteStatus NotEqualEval(TfLiteContext* context, TfLiteNode* node) {
requires_broadcast); requires_broadcast);
break; break;
default: default:
TF_LITE_KERNEL_LOG( TF_LITE_KERNEL_LOG(context, "Type %s (%d) not supported.",
context, "Does not support type %d, requires bool|float|int|uint8", TfLiteTypeGetName(input1->type), input1->type);
input1->type);
return kTfLiteError; return kTfLiteError;
} }
return kTfLiteOk; return kTfLiteOk;
@ -192,9 +190,8 @@ TfLiteStatus GreaterEval(TfLiteContext* context, TfLiteNode* node) {
requires_broadcast); requires_broadcast);
break; break;
default: default:
TF_LITE_KERNEL_LOG(context, TF_LITE_KERNEL_LOG(context, "Type %s (%d) not supported.",
"Does not support type %d, requires float|int|uint8", TfLiteTypeGetName(input1->type), input1->type);
input1->type);
return kTfLiteError; return kTfLiteError;
} }
return kTfLiteOk; return kTfLiteOk;
@ -224,9 +221,8 @@ TfLiteStatus GreaterEqualEval(TfLiteContext* context, TfLiteNode* node) {
requires_broadcast); requires_broadcast);
break; break;
default: default:
TF_LITE_KERNEL_LOG(context, TF_LITE_KERNEL_LOG(context, "Type %s (%d) not supported.",
"Does not support type %d, requires float|int|uint8", TfLiteTypeGetName(input1->type), input1->type);
input1->type);
return kTfLiteError; return kTfLiteError;
} }
return kTfLiteOk; return kTfLiteOk;
@ -256,9 +252,8 @@ TfLiteStatus LessEval(TfLiteContext* context, TfLiteNode* node) {
requires_broadcast); requires_broadcast);
break; break;
default: default:
TF_LITE_KERNEL_LOG(context, TF_LITE_KERNEL_LOG(context, "Type %s (%d) not supported.",
"Does not support type %d, requires float|int|uint8", TfLiteTypeGetName(input1->type), input1->type);
input1->type);
return kTfLiteError; return kTfLiteError;
} }
return kTfLiteOk; return kTfLiteOk;
@ -288,9 +283,8 @@ TfLiteStatus LessEqualEval(TfLiteContext* context, TfLiteNode* node) {
requires_broadcast); requires_broadcast);
break; break;
default: default:
TF_LITE_KERNEL_LOG(context, TF_LITE_KERNEL_LOG(context, "Type %s (%d) not supported.",
"Does not support type %d, requires float|int|uint8", TfLiteTypeGetName(input1->type), input1->type);
input1->type);
return kTfLiteError; return kTfLiteError;
} }
return kTfLiteOk; return kTfLiteOk;

View File

@ -157,9 +157,8 @@ TfLiteStatus EvalQuantized(TfLiteContext* context, TfLiteNode* node,
TF_LITE_FULLY_CONNECTED(int16_t); TF_LITE_FULLY_CONNECTED(int16_t);
break; break;
default: default:
TF_LITE_KERNEL_LOG( TF_LITE_KERNEL_LOG(context, "Type %s (%d) not supported.",
context, TfLiteTypeGetName(output->type), output->type);
"Quantized FullyConnected expects output data type uint8 or int16");
return kTfLiteError; return kTfLiteError;
} }
@ -209,8 +208,8 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
output); output);
default: default:
TF_LITE_KERNEL_LOG(context, "Type %d not currently supported.", TF_LITE_KERNEL_LOG(context, "Type %s (%d) not supported.",
filter->type); TfLiteTypeGetName(input->type), input->type);
return kTfLiteError; return kTfLiteError;
} }
return kTfLiteOk; return kTfLiteOk;

View File

@ -149,8 +149,8 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
EvalFloat(context, node, params, &data, input1, input2, output); EvalFloat(context, node, params, &data, input1, input2, output);
break; break;
default: default:
TF_LITE_KERNEL_LOG(context, "Type %d not currently supported.", TF_LITE_KERNEL_LOG(context, "Type %s (%d) not supported.",
input1->type); TfLiteTypeGetName(input1->type), input1->type);
return kTfLiteError; return kTfLiteError;
} }

View File

@ -38,8 +38,8 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
GetTensorData<float>(output)); GetTensorData<float>(output));
break; break;
default: default:
TF_LITE_KERNEL_LOG( TF_LITE_KERNEL_LOG(context, "Type %s (%d) not supported.",
context, "Neg only currently supports float32, got %d.", input->type); TfLiteTypeGetName(input->type), input->type);
return kTfLiteError; return kTfLiteError;
} }
return kTfLiteOk; return kTfLiteOk;

View File

@ -129,10 +129,8 @@ TfLiteStatus SoftmaxEval(TfLiteContext* context, TfLiteNode* node) {
return kTfLiteOk; return kTfLiteOk;
} }
default: default:
TF_LITE_KERNEL_LOG( TF_LITE_KERNEL_LOG(context, "Type %s (%d) not supported.",
context, TfLiteTypeGetName(input->type), input->type);
"Only float32, uint8_t and int8_t input supported currently, got %d.",
input->type);
return kTfLiteError; return kTfLiteError;
} }
} }

View File

@ -157,9 +157,8 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
} }
break; break;
default: default:
TF_LITE_KERNEL_LOG(context, TF_LITE_KERNEL_LOG(context, "Type %s (%d) not supported.",
"Type %d is currently not supported " TfLiteTypeGetName(op_context.input->type),
"by StridedSlice.",
op_context.input->type); op_context.input->type);
return kTfLiteError; return kTfLiteError;
} }

View File

@ -174,8 +174,8 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_OK(context, EvalSubQuantized(context, node, params, &data, TF_LITE_ENSURE_OK(context, EvalSubQuantized(context, node, params, &data,
input1, input2, output)); input1, input2, output));
} else { } else {
TF_LITE_KERNEL_LOG(context, TF_LITE_KERNEL_LOG(context, "Type %s (%d) not supported.",
"Inputs and outputs not all float|uint8|int8 types."); TfLiteTypeGetName(output->type), output->type);
return kTfLiteError; return kTfLiteError;
} }

View File

@ -173,9 +173,8 @@ TfLiteStatus EvalQuantized(TfLiteContext* context, TfLiteNode* node,
TF_LITE_FULLY_CONNECTED(int16_t); TF_LITE_FULLY_CONNECTED(int16_t);
break; break;
default: default:
TF_LITE_KERNEL_LOG( TF_LITE_KERNEL_LOG(context, "Type %s (%d) not supported.",
context, TfLiteTypeGetName(output->type), output->type);
"Quantized FullyConnected expects output data type uint8 or int16");
return kTfLiteError; return kTfLiteError;
} }
@ -244,8 +243,8 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
output); output);
default: default:
TF_LITE_KERNEL_LOG(context, "Type %d not currently supported.", TF_LITE_KERNEL_LOG(context, "Type %s (%d) not supported.",
filter->type); TfLiteTypeGetName(filter->type), filter->type);
return kTfLiteError; return kTfLiteError;
} }
return kTfLiteOk; return kTfLiteOk;

View File

@ -198,10 +198,8 @@ TfLiteStatus SoftmaxEval(TfLiteContext* context, TfLiteNode* node) {
return SoftmaxQuantized(context, input, output, op_data); return SoftmaxQuantized(context, input, output, op_data);
} }
default: default:
TF_LITE_KERNEL_LOG( TF_LITE_KERNEL_LOG(context, "Type %s (%d) not supported.",
context, TfLiteTypeGetName(input->type), input->type);
"Only float32, uint8_t and int8_t input supported currently, got %d.",
input->type);
return kTfLiteError; return kTfLiteError;
} }
} }

View File

@ -235,8 +235,8 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
bias, output); bias, output);
default: default:
TF_LITE_KERNEL_LOG(context, "Type %d not currently supported.", TF_LITE_KERNEL_LOG(context, "Type %s (%d) not supported.",
filter->type); TfLiteTypeGetName(filter->type), filter->type);
return kTfLiteError; return kTfLiteError;
} }
return kTfLiteOk; return kTfLiteOk;

View File

@ -118,9 +118,8 @@ TfLiteStatus SoftmaxEval(TfLiteContext* context, TfLiteNode* node) {
return kTfLiteOk; return kTfLiteOk;
} }
default: default:
TF_LITE_KERNEL_LOG(context, TF_LITE_KERNEL_LOG(context, "Type %s (%d) not supported.",
"Only int8_t input supported currently, got %d.", TfLiteTypeGetName(input->type), input->type);
input->type);
return kTfLiteError; return kTfLiteError;
} }
} }

View File

@ -288,8 +288,8 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
const int memory_size = weights_time->dims->data[1]; const int memory_size = weights_time->dims->data[1];
if (input->type != kTfLiteInt8) { if (input->type != kTfLiteInt8) {
TF_LITE_KERNEL_LOG(context, TF_LITE_KERNEL_LOG(context, "Type %s (%d) not supported.",
"HiFi Mini kernel SVDF only supports full integer."); TfLiteTypeGetName(input->type), input->type);
return kTfLiteError; return kTfLiteError;
} }