[Lite] Support Int8 Unpack Operator

Added support for Unpack Operator
Added relevant tests.
This commit is contained in:
Mohamed Nour Abouelseoud 2019-08-27 16:29:28 +01:00
parent 875f14e490
commit 51cec9092e
6 changed files with 63 additions and 1 deletions

View File

@ -242,6 +242,7 @@ tf_cc_test(
"//tensorflow/lite/tools/optimize:testdata/single_conv_weights_min_minus_127_max_plus_127.bin",
"//tensorflow/lite/tools/optimize:testdata/single_softmax_min_minus_5_max_plus_5.bin",
"//tensorflow/lite/tools/optimize:testdata/split.bin",
"//tensorflow/lite/tools/optimize:testdata/unpack.bin",
],
tags = [
"tflite_not_portable_android",

View File

@ -70,9 +70,9 @@ OperatorProperty GetOperatorProperty(const ModelT* model, int subgraph_index,
property.version = 2;
break;
case BuiltinOperator_SPLIT:
property.arbitrary_outputs = true;
// We skip input 0 since it is the split dim which is not real valued.
property.inputs = {{1, {}}};
property.arbitrary_outputs = true;
property.restrict_same_input_output_scale = true;
property.version = 2;
break;
@ -383,6 +383,12 @@ OperatorProperty GetOperatorProperty(const ModelT* model, int subgraph_index,
property.restrict_same_input_output_scale = true;
property.version = 2;
break;
case BuiltinOperator_UNPACK:
property.inputs = {{0, {}}};
property.arbitrary_outputs = true;
property.restrict_same_input_output_scale = true;
property.version = 1;
break;
default:
// No quantized implementation exists for this operation.
property.quantizable = false;

View File

@ -1124,6 +1124,56 @@ TEST_F(QuantizeCustomOpTest, VerifyMixedQuantization) {
}
}
class QuantizeUnpackTest : public QuantizeModelTest {
protected:
QuantizeUnpackTest() {
input_model_ = ReadModel(internal::kModelWithUnpack);
readonly_model_ = input_model_->GetModel();
readonly_model_->UnPackTo(&model_);
}
};
TEST_F(QuantizeUnpackTest, VerifyUnpack) {
auto status = QuantizeModel(&builder_, &model_, &error_reporter_);
ASSERT_EQ(kTfLiteOk, status);
const auto subgraph = model_.subgraphs[0].get();
auto op = subgraph->operators[1].get();
auto float_graph = readonly_model_->subgraphs()->Get(0);
ASSERT_EQ(model_.operator_codes[op->opcode_index].get()->builtin_code,
BuiltinOperator_UNPACK);
// Get unpack input and output tensors
auto unpack_input = subgraph->tensors[op->inputs[0]].get();
auto unpack_output_0 = subgraph->tensors[op->outputs[0]].get();
auto unpack_output_1 = subgraph->tensors[op->outputs[1]].get();
// Verify Unpack input is quantized.
ASSERT_EQ(float_graph->tensors()->Get(op->inputs[0])->type(),
TensorType_FLOAT32);
EXPECT_EQ(unpack_input->type, TensorType_INT8);
// The model should only have one input and 2 outputs.
EXPECT_EQ(subgraph->inputs.size(), 1);
EXPECT_EQ(subgraph->outputs.size(), 2);
// Ensure quantization parameters before and after unpack
// are preserved after quantization for all outputs of
// unpack.
EXPECT_FLOAT_EQ(unpack_input->quantization->scale[0],
unpack_output_0->quantization->scale[0]);
EXPECT_FLOAT_EQ(unpack_input->quantization->scale[0],
unpack_output_1->quantization->scale[0]);
EXPECT_FLOAT_EQ(unpack_input->quantization->zero_point[0],
unpack_output_0->quantization->zero_point[0]);
EXPECT_FLOAT_EQ(unpack_input->quantization->zero_point[0],
unpack_output_1->quantization->zero_point[0]);
}
} // namespace
} // namespace optimize
} // namespace tflite

View File

@ -52,6 +52,8 @@ const char* kModelSplit = "split.bin";
const char* kLstmCalibrated = "lstm_calibrated.bin";
const char* kLstmQuantized = "lstm_quantized.bin";
const char* kModelWithUnpack = "unpack.bin";
int FailOnErrorReporter::Report(const char* format, va_list args) {
char buf[1024];
vsnprintf(buf, sizeof(buf), format, args);

View File

@ -80,6 +80,9 @@ extern const char* kModelSplit;
extern const char* kLstmCalibrated;
extern const char* kLstmQuantized;
// Test model with an unpack op.
extern const char* kModelWithUnpack;
// An error reporter that fails on testing.
class FailOnErrorReporter : public ErrorReporter {
public:

Binary file not shown.