Quantization propagation shouldn't change the graph if the value has been quantized

Since the same pass will be used for post-training quantization and partial
quantization, we should allow the quantization pass to handle this cases.

PiperOrigin-RevId: 275334735
Change-Id: I66a01cc07454118f3cf9079bb37e07cc8e6f3704
This commit is contained in:
Feng Liu 2019-10-17 14:06:33 -07:00 committed by TensorFlower Gardener
parent b700b798ba
commit b78675a3bd
2 changed files with 13 additions and 1 deletions
tensorflow/compiler/mlir/lite

View File

@ -688,7 +688,10 @@ bool QuantizationDriver::PropagateParams() {
auto key = std::make_pair(8, is_signed_);
auto &restricted_outputs = spec->restricted_output_params[key];
for (int i = 0, e = restricted_outputs.size(); i != e; ++i) {
changed |= SetResultParams(op, i, restricted_outputs[i]);
// The restrict can be nullptr if the result has been quantized.
if (auto params = restricted_outputs[i]) {
changed |= SetResultParams(op, i, params);
}
}
for (auto &it : spec->biases_params) {

View File

@ -292,6 +292,15 @@ func @QuantizeLogistic(tensor<1x6x6x16x!quant.uniform<u8:f32, 7.812500e-03:128>>
// CHECK: return %3 : tensor<1x6x6x16xf32>
}
// CHECK-LABEL: NotRescaleLogistic
func @NotRescaleLogistic(%arg0: tensor<1x6x6x16x!quant.uniform<u8:f32, 7.812500e-03:128>>) -> tensor<1x6x6x16x!quant.uniform<u8:f32, 3.906250e-03>> {
%0 = "tfl.logistic"(%arg0) : (tensor<1x6x6x16x!quant.uniform<u8:f32, 7.812500e-03:128>>) -> tensor<1x6x6x16x!quant.uniform<u8:f32, 3.906250e-03>>
return %0 : tensor<1x6x6x16x!quant.uniform<u8:f32, 3.906250e-03>>
// CHECK: %[[log:.*]] = "tfl.logistic"(%arg0)
// CHECK: return %[[log]]
}
// CHECK-LABEL: NotQuantizeConcatConstantOperand
func @NotQuantizeConcatConstantOperand(%arg0: tensor<1x2xf32>) -> tensor<2x2xf32> {
%0 = constant dense<1.0> : tensor<1x2xf32>