Rollback: Report unsupported operators when the given operators have unknown dimensions

PiperOrigin-RevId: 356182411
Change-Id: I4b91e2935923b201c8bd6b6404e997a56a799117
This commit is contained in:
Jaesung Chung 2021-02-07 20:25:09 -08:00 committed by TensorFlower Gardener
parent eb88a73aa8
commit fcbc424700
7 changed files with 9 additions and 153 deletions

View File

@ -47,15 +47,4 @@ TfLiteStatus ResetVariableTensor(TfLiteTensor* tensor) {
return kTfLiteOk;
}
bool HasUnspecifiedDimension(const TfLiteTensor* tensor) {
#ifndef TF_LITE_STATIC_MEMORY
if (tensor->dims_signature) {
for (int i = 0; i < tensor->dims_signature->size; ++i) {
if (tensor->dims_signature->data[i] == -1) return true;
}
}
#endif
return false;
}
} // namespace tflite

View File

@ -23,9 +23,6 @@ namespace tflite {
// Resets a variable tensor to the default value.
TfLiteStatus ResetVariableTensor(TfLiteTensor* tensor);
// Returns true when the given tensor has unknown dimensions.
bool HasUnspecifiedDimension(const TfLiteTensor* tensor);
} // namespace tflite
#endif // TENSORFLOW_LITE_CORE_API_TENSOR_UTILS_H_

View File

@ -162,7 +162,6 @@ cc_library(
"//tensorflow/lite/delegates:utils",
"//tensorflow/lite:kernel_api",
"//tensorflow/lite:util",
"//tensorflow/lite/core/api",
"//tensorflow/lite/c:common",
"//tensorflow/lite/delegates/gpu/common/transformations:model_transformations",
"//tensorflow/lite/kernels:kernel_util",

View File

@ -33,7 +33,6 @@ limitations under the License.
#include "tensorflow/lite/builtin_ops.h"
#include "tensorflow/lite/c/builtin_op_data.h"
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/core/api/tensor_utils.h"
#include "tensorflow/lite/delegates/gpu/common/custom_parsers.h"
#include "tensorflow/lite/delegates/gpu/common/data_type.h"
#include "tensorflow/lite/delegates/gpu/common/lstm_parser.h"
@ -2412,24 +2411,6 @@ std::unique_ptr<TFLiteOperationParser> NewOperationParser(
absl::Status IsSupported(const TfLiteContext* context, TfLiteNode* node,
const TfLiteRegistration* registration,
bool allow_quant_ops = false) {
// Report it unsupported if there are unknown shapes in input/output tensors.
for (int i = 0; i < node->inputs->size; i++) {
const int input_id = node->inputs->data[i];
const auto* tensor = context->tensors + input_id;
if (HasUnspecifiedDimension(tensor)) {
return absl::UnimplementedError(
"one of input tensors has unknown dimension(s)");
}
}
for (int i = 0; i < node->outputs->size; i++) {
const int output_id = node->outputs->data[i];
const auto* tensor = context->tensors + output_id;
if (HasUnspecifiedDimension(tensor)) {
return absl::UnimplementedError(
"one of output tensors has unknown dimension(s)");
}
}
return NewOperationParser(registration, allow_quant_ops)
->IsSupported(context, node, registration);
}

View File

@ -1248,122 +1248,6 @@ TEST(ModelBuilderTest, GetOpsToReplace_AllowQuantOps) {
TfLiteIntArrayFree(ops_to_replace_without_quant);
}
class InterpreterDynamicDimensions : public DelegatedInterpreter {
public:
InterpreterDynamicDimensions() : DelegatedInterpreter(3) {
void* builtin_data = malloc(sizeof(int));
EXPECT_EQ(interpreter_.AddTensors(5), kTfLiteOk);
EXPECT_EQ(interpreter_.SetInputs({0, 2}), kTfLiteOk);
EXPECT_EQ(interpreter_.SetOutputs({3, 4}), kTfLiteOk);
// Add a Dequantize Node with uint8 input.
const TfLiteRegistration reg_dequant0 = {/*init=*/nullptr,
/*free=*/nullptr,
/*prepare=*/nullptr,
/*invoke=*/nullptr,
/*profiling_string=*/nullptr,
kTfLiteBuiltinDequantize};
EXPECT_EQ(interpreter_.AddNodeWithParameters(
/*inputs=*/{0}, /*outputs=*/{1}, /*init_data=*/nullptr,
/*init_data_size=*/0, /*builtin_data=*/nullptr,
/*registration=*/&reg_dequant0),
kTfLiteOk);
const TfLiteRegistration reg_add = {
[](TfLiteContext* context, const char* buffer, size_t length) {
return reinterpret_cast<void*>(new int(1));
},
[](TfLiteContext* context, void* buffer) {
delete reinterpret_cast<int*>(buffer);
},
nullptr,
nullptr,
nullptr,
kTfLiteBuiltinSub};
// Add an Add node that contains a dynamic output.
EXPECT_EQ(interpreter_.AddNodeWithParameters(
/*inputs=*/{1, 2}, /*outputs=*/{3}, /*init_data=*/nullptr,
/*init_data_size=*/0,
/*builtin_data=*/builtin_data,
/*registration=*/&reg_add),
kTfLiteOk);
// Add an Add node that contains a dynamic input.
EXPECT_EQ(interpreter_.AddNodeWithParameters(
/*inputs=*/{0, 2}, /*outputs=*/{4}, /*init_data=*/nullptr,
/*init_data_size=*/0,
/*builtin_data=*/builtin_data,
/*registration=*/&reg_add),
kTfLiteOk);
const std::vector<int> dims = {1};
const std::vector<int> dims_signature = {-1};
TfLiteQuantizationParams quantization;
EXPECT_EQ(interpreter_.SetTensorParametersReadWrite(
0, TfLiteType::kTfLiteUInt8, "t0", dims, quantization, false,
&dims_signature),
kTfLiteOk);
EXPECT_EQ(interpreter_.SetTensorParametersReadWrite(
1, TfLiteType::kTfLiteFloat32, "t1", dims, quantization,
false, &dims),
kTfLiteOk);
EXPECT_EQ(interpreter_.SetTensorParametersReadWrite(
2, TfLiteType::kTfLiteFloat32, "t2", dims, quantization,
false, &dims),
kTfLiteOk);
EXPECT_EQ(interpreter_.SetTensorParametersReadWrite(
3, TfLiteType::kTfLiteFloat32, "t3", dims, quantization,
false, &dims_signature),
kTfLiteOk);
EXPECT_EQ(interpreter_.SetTensorParametersReadWrite(
4, TfLiteType::kTfLiteFloat32, "t4", dims, quantization,
false, &dims),
kTfLiteOk);
exec_plan()->data[0] = 0;
exec_plan()->data[1] = 1;
exec_plan()->data[2] = 2;
}
};
InterpreterDynamicDimensions* interpreter_dynamic_dimensions =
new InterpreterDynamicDimensions();
TEST(ModelBuilderTest, GetOpsToReplacePrunesDynamicDimensions) {
TfLiteContext* context = interpreter_dynamic_dimensions->context();
context->GetExecutionPlan = [](struct TfLiteContext* context,
TfLiteIntArray** execution_plan) {
*execution_plan = interpreter_dynamic_dimensions->exec_plan();
return kTfLiteOk;
};
context->GetNodeAndRegistration = [](struct TfLiteContext*, int node_index,
TfLiteNode** node,
TfLiteRegistration** registration) {
*node = interpreter_dynamic_dimensions->node(node_index);
*registration = interpreter_dynamic_dimensions->registration(node_index);
return kTfLiteOk;
};
context->PreviewDelegatePartitioning =
[](struct TfLiteContext* context, const TfLiteIntArray* nodes_to_replace,
TfLiteDelegateParams** partition_params_array, int* num_partitions) {
// No selected nodes.
EXPECT_EQ(nodes_to_replace->size, 0);
*partition_params_array = nullptr;
*num_partitions = 0;
return kTfLiteOk;
};
TfLiteIntArray* ops_to_replace = GetOpsToReplace(context);
// Reject all the ops since they are based on dynamic dimensions.
EXPECT_EQ(ops_to_replace->size, 0);
TfLiteIntArrayFree(ops_to_replace);
}
} // namespace
} // namespace gpu
} // namespace tflite

View File

@ -34,7 +34,6 @@ cc_library(
"//tensorflow/lite:minimal_logging",
"//tensorflow/lite:util",
"//tensorflow/lite/c:common",
"//tensorflow/lite/core/api",
"//tensorflow/lite/delegates:utils",
"//tensorflow/lite/kernels:kernel_util",
"//tensorflow/lite/nnapi:nnapi_implementation",
@ -69,7 +68,6 @@ cc_library(
"//tensorflow/lite:minimal_logging",
"//tensorflow/lite:util",
"//tensorflow/lite/c:common",
"//tensorflow/lite/core/api",
"//tensorflow/lite/delegates:utils",
"//tensorflow/lite/kernels:kernel_util",
"//tensorflow/lite/nnapi:nnapi_implementation",

View File

@ -49,7 +49,6 @@ limitations under the License.
#include "tensorflow/lite/c/builtin_op_data.h"
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/context_util.h"
#include "tensorflow/lite/core/api/tensor_utils.h"
#include "tensorflow/lite/delegates/nnapi/nnapi_delegate_kernel.h"
#include "tensorflow/lite/delegates/nnapi/quant_lstm_sup.h"
#include "tensorflow/lite/delegates/utils.h"
@ -381,6 +380,15 @@ bool IsHybridOperator(const TfLiteContext* context, int builtin_code,
}
}
bool HasUnspecifiedDimension(const TfLiteTensor* tensor) {
if (tensor->dims_signature) {
for (int i : TfLiteIntArrayView(tensor->dims_signature)) {
if (i == -1) return true;
}
}
return false;
}
ANeuralNetworksOperandType ConvertTensorTypeToNNType(
const TfLiteTensor* tensor, TfLiteType ann_type_equivalent) {
int32_t nn_type = 0;