Add quantization spec to the tfl.l2_quantization op

It has a fixed output scale.

PiperOrigin-RevId: 276628662
Change-Id: Iafbeacf1fb401aa47c5489c3a9487f0bd3f73ca9
This commit is contained in:
Feng Liu 2019-10-24 22:00:09 -07:00 committed by TensorFlower Gardener
parent 1aee2ca527
commit 44848dc0b3
2 changed files with 19 additions and 1 deletions

View File

@ -1181,7 +1181,12 @@ def TFL_InputOp : Op<TFL_Dialect, "pseudo_input", [SameOperandsAndResultType]> {
let results = (outs AnyTensor:$output);
}
def TFL_L2NormalizationOp : TFL_Op<"l2_normalization", [NoSideEffect]> {
def TFL_L2NormalizationOp : TFL_Op<"l2_normalization", [NoSideEffect,
// central_value = min_value / 2 + (max_value - 1) / 2 + 1
// zero_point = central_value
// scale = 1. / (central_value - min_value)
FixedResultScale<Int8UniformQuantizedType<0, 78125, -7>>,
FixedResultScale<UInt8UniformQuantizedType<128, 78125, -7>>]> {
let summary = "L2 Normalize Operator";
let description = [{

View File

@ -301,6 +301,19 @@ func @NotRescaleLogistic(%arg0: tensor<1x6x6x16x!quant.uniform<u8:f32, 7.812500e
// CHECK: return %[[log]]
}
// CHECK-LABEL: QuantizeL2Norm
func @QuantizeL2Norm(%arg0: tensor<1x6x6x16x!quant.uniform<u8:f32, 1.0>>) -> tensor<1x6x6x16xf32> {
%0 = "tfl.dequantize"(%arg0) : (tensor<1x6x6x16x!quant.uniform<u8:f32, 1.0>>) -> tensor<1x6x6x16xf32>
%1 = "tfl.l2_normalization"(%0) {fused_activation_function = "NONE"} : (tensor<1x6x6x16xf32>) -> tensor<1x6x6x16xf32>
return %1 : tensor<1x6x6x16xf32>
// CHECK: %[[in:.*]] = "tfl.dequantize"(%arg0)
// CHECK: %[[l2:.*]] = "tfl.l2_normalization"(%[[in]])
// CHECK: %[[q:.*]] = "tfl.quantize"(%[[l2]]) {qtype = tensor<1x6x6x16x!quant.uniform<u8:f32, 7.812500e-03:128>>}
// CHECK: %[[dq:.*]] = "tfl.dequantize"(%[[q]])
// CHECK: return %[[dq]] : tensor<1x6x6x16xf32>
}
// CHECK-LABEL: NotQuantizeConcatConstantOperand
func @NotQuantizeConcatConstantOperand(%arg0: tensor<1x2xf32>) -> tensor<2x2xf32> {
%0 = constant dense<1.0> : tensor<1x2xf32>