Add quantizer test for UnidirectionalSequenceLSTM.
PiperOrigin-RevId: 337864606 Change-Id: I32d315126edc886fff78b123f29dd60a09a2f3b5
This commit is contained in:
parent
4a988e4792
commit
b3695af15e
@ -312,6 +312,8 @@ tf_cc_test(
|
||||
"//tensorflow/lite/tools/optimize:testdata/svdf_calibrated.bin",
|
||||
"//tensorflow/lite/tools/optimize:testdata/svdf_quantized.bin",
|
||||
"//tensorflow/lite/tools/optimize:testdata/transpose.bin",
|
||||
"//tensorflow/lite/tools/optimize:testdata/unidirectional_sequence_lstm_calibrated.bin",
|
||||
"//tensorflow/lite/tools/optimize:testdata/unidirectional_sequence_lstm_quantized.bin",
|
||||
"//tensorflow/lite/tools/optimize:testdata/unpack.bin",
|
||||
],
|
||||
tags = [
|
||||
|
@ -829,12 +829,13 @@ TfLiteStatus QuantizeIntemediateTensors(ModelT* model,
|
||||
utils::QuantizeActivation(tensor, activations_type,
|
||||
error_reporter);
|
||||
} else {
|
||||
TF_LITE_REPORT_ERROR(
|
||||
error_reporter,
|
||||
"Unable to find min/max value for output %d in %s in "
|
||||
TF_LITE_REPORT_ERROR(error_reporter,
|
||||
"Unable to find min/max value for "
|
||||
"intermediate tensor %d in %s in "
|
||||
"subgraph %d, node: %d",
|
||||
tensor, EnumNameBuiltinOperator(op_code), subgraph_idx,
|
||||
op_idx);
|
||||
index_local,
|
||||
EnumNameBuiltinOperator(op_code),
|
||||
subgraph_idx, op_idx);
|
||||
return kTfLiteError;
|
||||
}
|
||||
} else if (input.second.number_of_bits == 16 &&
|
||||
|
@ -81,6 +81,44 @@ class QuantizeModelTest : public testing::Test {
|
||||
internal::FailOnErrorReporter error_reporter_;
|
||||
};
|
||||
|
||||
void ExpectSameModels(const ModelT& model, const ModelT& expected_model) {
|
||||
ASSERT_EQ(model.subgraphs.size(), expected_model.subgraphs.size());
|
||||
for (size_t subgraph_idx = 0; subgraph_idx < model.subgraphs.size();
|
||||
subgraph_idx++) {
|
||||
const auto graph = model.subgraphs[subgraph_idx].get();
|
||||
const auto expected_graph = expected_model.subgraphs[subgraph_idx].get();
|
||||
ASSERT_EQ(graph->tensors.size(), expected_graph->tensors.size());
|
||||
for (size_t i = 0; i < graph->tensors.size(); i++) {
|
||||
const auto tensor = graph->tensors[i].get();
|
||||
const auto expected_tensor = expected_graph->tensors[i].get();
|
||||
EXPECT_EQ(tensor->buffer, expected_tensor->buffer);
|
||||
EXPECT_EQ(tensor->is_variable, expected_tensor->is_variable);
|
||||
EXPECT_EQ(tensor->shape, expected_tensor->shape);
|
||||
EXPECT_EQ(tensor->name, expected_tensor->name);
|
||||
EXPECT_EQ(tensor->type, expected_tensor->type);
|
||||
const auto quantization_params = tensor->quantization.get();
|
||||
const auto expected_quantization_params =
|
||||
expected_tensor->quantization.get();
|
||||
if (quantization_params != nullptr ||
|
||||
expected_quantization_params != nullptr) {
|
||||
EXPECT_NE(quantization_params, nullptr);
|
||||
EXPECT_NE(expected_quantization_params, nullptr);
|
||||
EXPECT_EQ(quantization_params->scale,
|
||||
expected_quantization_params->scale);
|
||||
EXPECT_EQ(quantization_params->zero_point,
|
||||
expected_quantization_params->zero_point);
|
||||
}
|
||||
}
|
||||
}
|
||||
ASSERT_EQ(model.buffers.size(), expected_model.buffers.size());
|
||||
for (size_t buffer_idx = 0; buffer_idx < model.buffers.size(); ++buffer_idx) {
|
||||
const auto buffer = model.buffers[buffer_idx].get()->data;
|
||||
const auto expected_buffer = expected_model.buffers[buffer_idx].get()->data;
|
||||
EXPECT_EQ(buffer, expected_buffer);
|
||||
}
|
||||
// TODO(jianlijianli): Compare operators as well.
|
||||
}
|
||||
|
||||
class QuantizeConvModelTest : public QuantizeModelTest,
|
||||
public testing::WithParamInterface<TensorType> {
|
||||
protected:
|
||||
@ -1121,42 +1159,7 @@ TEST_F(QuantizeLSTMTest, VerifyLSTM) {
|
||||
ModelT expected_model;
|
||||
expected_read_only_model->UnPackTo(&expected_model);
|
||||
|
||||
// Comparison.
|
||||
ASSERT_EQ(model_.subgraphs.size(), expected_model.subgraphs.size());
|
||||
for (size_t subgraph_idx = 0; subgraph_idx < model_.subgraphs.size();
|
||||
subgraph_idx++) {
|
||||
const auto graph = model_.subgraphs[subgraph_idx].get();
|
||||
const auto expected_graph = expected_model.subgraphs[subgraph_idx].get();
|
||||
ASSERT_EQ(graph->tensors.size(), expected_graph->tensors.size());
|
||||
for (size_t i = 0; i < graph->tensors.size(); i++) {
|
||||
const auto tensor = graph->tensors[i].get();
|
||||
const auto expected_tensor = expected_graph->tensors[i].get();
|
||||
EXPECT_EQ(tensor->buffer, expected_tensor->buffer);
|
||||
EXPECT_EQ(tensor->is_variable, expected_tensor->is_variable);
|
||||
EXPECT_EQ(tensor->shape, expected_tensor->shape);
|
||||
EXPECT_EQ(tensor->name, expected_tensor->name);
|
||||
EXPECT_EQ(tensor->type, expected_tensor->type);
|
||||
const auto quantization_params = tensor->quantization.get();
|
||||
const auto expected_quantization_params =
|
||||
expected_tensor->quantization.get();
|
||||
if (quantization_params != nullptr ||
|
||||
expected_quantization_params != nullptr) {
|
||||
EXPECT_NE(quantization_params, nullptr);
|
||||
EXPECT_NE(expected_quantization_params, nullptr);
|
||||
EXPECT_EQ(quantization_params->scale,
|
||||
expected_quantization_params->scale);
|
||||
EXPECT_EQ(quantization_params->zero_point,
|
||||
expected_quantization_params->zero_point);
|
||||
}
|
||||
}
|
||||
}
|
||||
ASSERT_EQ(model_.buffers.size(), expected_model.buffers.size());
|
||||
for (size_t buffer_idx = 0; buffer_idx < model_.buffers.size();
|
||||
++buffer_idx) {
|
||||
const auto buffer = model_.buffers[buffer_idx].get()->data;
|
||||
const auto expected_buffer = expected_model.buffers[buffer_idx].get()->data;
|
||||
EXPECT_EQ(buffer, expected_buffer);
|
||||
}
|
||||
ExpectSameModels(model_, expected_model);
|
||||
}
|
||||
|
||||
class QuantizeLSTM2Test : public QuantizeModelTest {
|
||||
@ -1181,42 +1184,34 @@ TEST_F(QuantizeLSTM2Test, VerifyLSTM) {
|
||||
ModelT expected_model;
|
||||
expected_read_only_model->UnPackTo(&expected_model);
|
||||
|
||||
// Comparison.
|
||||
ASSERT_EQ(model_.subgraphs.size(), expected_model.subgraphs.size());
|
||||
for (size_t subgraph_idx = 0; subgraph_idx < model_.subgraphs.size();
|
||||
subgraph_idx++) {
|
||||
const auto graph = model_.subgraphs[subgraph_idx].get();
|
||||
const auto expected_graph = expected_model.subgraphs[subgraph_idx].get();
|
||||
ASSERT_EQ(graph->tensors.size(), expected_graph->tensors.size());
|
||||
for (size_t i = 0; i < graph->tensors.size(); i++) {
|
||||
const auto tensor = graph->tensors[i].get();
|
||||
const auto expected_tensor = expected_graph->tensors[i].get();
|
||||
EXPECT_EQ(tensor->buffer, expected_tensor->buffer);
|
||||
EXPECT_EQ(tensor->is_variable, expected_tensor->is_variable);
|
||||
EXPECT_EQ(tensor->shape, expected_tensor->shape);
|
||||
EXPECT_EQ(tensor->name, expected_tensor->name);
|
||||
EXPECT_EQ(tensor->type, expected_tensor->type);
|
||||
const auto quantization_params = tensor->quantization.get();
|
||||
const auto expected_quantization_params =
|
||||
expected_tensor->quantization.get();
|
||||
if (quantization_params != nullptr ||
|
||||
expected_quantization_params != nullptr) {
|
||||
EXPECT_NE(quantization_params, nullptr);
|
||||
EXPECT_NE(expected_quantization_params, nullptr);
|
||||
EXPECT_EQ(quantization_params->scale,
|
||||
expected_quantization_params->scale);
|
||||
EXPECT_EQ(quantization_params->zero_point,
|
||||
expected_quantization_params->zero_point);
|
||||
}
|
||||
}
|
||||
}
|
||||
ASSERT_EQ(model_.buffers.size(), expected_model.buffers.size());
|
||||
for (size_t buffer_idx = 0; buffer_idx < model_.buffers.size();
|
||||
++buffer_idx) {
|
||||
const auto buffer = model_.buffers[buffer_idx].get()->data;
|
||||
const auto expected_buffer = expected_model.buffers[buffer_idx].get()->data;
|
||||
EXPECT_EQ(buffer, expected_buffer);
|
||||
ExpectSameModels(model_, expected_model);
|
||||
}
|
||||
|
||||
class QuantizeUnidirectionalSequenceLSTMTest : public QuantizeModelTest {
|
||||
protected:
|
||||
QuantizeUnidirectionalSequenceLSTMTest() {
|
||||
input_model_ = ReadModel(internal::kUnidirectionalSequenceLstmCalibrated);
|
||||
readonly_model_ = input_model_->GetModel();
|
||||
readonly_model_->UnPackTo(&model_);
|
||||
}
|
||||
};
|
||||
|
||||
TEST_F(QuantizeUnidirectionalSequenceLSTMTest,
|
||||
VerifyUnidirectionalSequenceLSTM) {
|
||||
// Quantize model.
|
||||
auto status = QuantizeModelAllOperators(
|
||||
&builder_, &model_, TensorType_FLOAT32, TensorType_FLOAT32, false,
|
||||
TensorType_INT8, &error_reporter_);
|
||||
ASSERT_EQ(kTfLiteOk, status);
|
||||
|
||||
// Read expected model.
|
||||
auto expected_fb_model =
|
||||
ReadModel(internal::kUnidirectionalSequenceLstmQuantized);
|
||||
auto expected_read_only_model = expected_fb_model->GetModel();
|
||||
ModelT expected_model;
|
||||
expected_read_only_model->UnPackTo(&expected_model);
|
||||
|
||||
ExpectSameModels(model_, expected_model);
|
||||
}
|
||||
|
||||
class QuantizeSVDFTest : public QuantizeModelTest {
|
||||
|
@ -57,6 +57,11 @@ const char* kModelPack = "pack.bin";
|
||||
const char* kLstmCalibrated = "lstm_calibrated.bin";
|
||||
const char* kLstmQuantized = "lstm_quantized.bin";
|
||||
|
||||
const char* kUnidirectionalSequenceLstmCalibrated =
|
||||
"unidirectional_sequence_lstm_calibrated.bin";
|
||||
const char* kUnidirectionalSequenceLstmQuantized =
|
||||
"unidirectional_sequence_lstm_quantized.bin";
|
||||
|
||||
const char* kModelWithMinimumOp = "minimum.bin";
|
||||
const char* kModelWithMaximumOp = "maximum.bin";
|
||||
const char* kLstmCalibrated2 = "lstm_calibrated2.bin";
|
||||
|
@ -92,17 +92,20 @@ extern const char* kModelPack;
|
||||
extern const char* kLstmCalibrated;
|
||||
extern const char* kLstmQuantized;
|
||||
|
||||
// Test model with LSTM op that has peephole, without layer norm, without
|
||||
// projection, without cifg.
|
||||
extern const char* kLstmCalibrated2;
|
||||
extern const char* kLstmQuantized2;
|
||||
|
||||
extern const char* kUnidirectionalSequenceLstmCalibrated;
|
||||
extern const char* kUnidirectionalSequenceLstmQuantized;
|
||||
|
||||
// Test model with a minimum op.
|
||||
extern const char* kModelWithMinimumOp;
|
||||
|
||||
// Test model with a maximum op.
|
||||
extern const char* kModelWithMaximumOp;
|
||||
|
||||
// Test model with LSTM op that has peephole, without layer norm, without
|
||||
// projection, without cifg.
|
||||
extern const char* kLstmCalibrated2;
|
||||
extern const char* kLstmQuantized2;
|
||||
|
||||
// Test model with a transpose op.
|
||||
extern const char* kModelWithTranspose;
|
||||
|
||||
|
BIN
tensorflow/lite/tools/optimize/testdata/unidirectional_sequence_lstm_calibrated.bin
vendored
Normal file
BIN
tensorflow/lite/tools/optimize/testdata/unidirectional_sequence_lstm_calibrated.bin
vendored
Normal file
Binary file not shown.
BIN
tensorflow/lite/tools/optimize/testdata/unidirectional_sequence_lstm_quantized.bin
vendored
Normal file
BIN
tensorflow/lite/tools/optimize/testdata/unidirectional_sequence_lstm_quantized.bin
vendored
Normal file
Binary file not shown.
Loading…
Reference in New Issue
Block a user