Fix for pylint errors.
Change-Id: Idd96d7a41fd459c86ab0f6fbb63e5d543509145d
This commit is contained in:
parent
dbc7faeecd
commit
507c754931
@ -103,7 +103,8 @@ class OpsSet(enum.Enum):
|
|||||||
# significantly, but only slightly increases the model size.
|
# significantly, but only slightly increases the model size.
|
||||||
# WARNING: These ops are currently experimental and have not yet been finalized.
|
# WARNING: These ops are currently experimental and have not yet been finalized.
|
||||||
# They are only compatible with CPU execution, and have not been optimized for production.
|
# They are only compatible with CPU execution, and have not been optimized for production.
|
||||||
EXPERIMENTAL_TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8 = "EXPERIMENTAL_TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8"
|
EXPERIMENTAL_TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8 = \
|
||||||
|
"EXPERIMENTAL_TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8"
|
||||||
|
|
||||||
def __str__(self):
|
def __str__(self):
|
||||||
return self.value
|
return self.value
|
||||||
|
@ -251,7 +251,8 @@ class QuantizationMode(object):
|
|||||||
self.post_training_fp16())
|
self.post_training_fp16())
|
||||||
|
|
||||||
def activations_type(self):
|
def activations_type(self):
|
||||||
return constants.INT16 if self._is_int16x8_target_required() else constants.INT8
|
return constants.INT16 if self._is_int16x8_target_required() \
|
||||||
|
else constants.INT8
|
||||||
|
|
||||||
def converter_flags(self, inference_ty=None, inference_input_ty=None):
|
def converter_flags(self, inference_ty=None, inference_input_ty=None):
|
||||||
"""Flags to the converter."""
|
"""Flags to the converter."""
|
||||||
@ -262,7 +263,8 @@ class QuantizationMode(object):
|
|||||||
|
|
||||||
if self.training_time_int8_allow_float():
|
if self.training_time_int8_allow_float():
|
||||||
return {
|
return {
|
||||||
"inference_type": inference_ty if inference_ty else self.activations_type(),
|
"inference_type": inference_ty if inference_ty else \
|
||||||
|
self.activations_type(),
|
||||||
"inference_input_type":
|
"inference_input_type":
|
||||||
inference_input_ty if inference_input_ty else constants.FLOAT,
|
inference_input_ty if inference_input_ty else constants.FLOAT,
|
||||||
"post_training_quantize": False, # disable dynamic range quantization
|
"post_training_quantize": False, # disable dynamic range quantization
|
||||||
@ -441,7 +443,8 @@ class TFLiteConverterBase(object):
|
|||||||
return _get_grappler_config(optimizers)
|
return _get_grappler_config(optimizers)
|
||||||
|
|
||||||
def _calibrate_quantize_model(self, result, inference_input_type,
|
def _calibrate_quantize_model(self, result, inference_input_type,
|
||||||
inference_output_type, activations_type, allow_float):
|
inference_output_type, activations_type,
|
||||||
|
allow_float):
|
||||||
"""Calibrate and quantize the model."""
|
"""Calibrate and quantize the model."""
|
||||||
if not isinstance(self.representative_dataset, RepresentativeDataset):
|
if not isinstance(self.representative_dataset, RepresentativeDataset):
|
||||||
self.representative_dataset = RepresentativeDataset(
|
self.representative_dataset = RepresentativeDataset(
|
||||||
|
@ -882,11 +882,15 @@ class FromSessionTest(TestModels, parameterized.TestCase):
|
|||||||
|
|
||||||
@parameterized.named_parameters(
|
@parameterized.named_parameters(
|
||||||
# Quantize model to Int8: with enable mlir
|
# Quantize model to Int8: with enable mlir
|
||||||
('UseTfliteBuiltinsIntEnableMLIR', [lite.OpsSet.TFLITE_BUILTINS_INT8], True),
|
('UseTfliteBuiltinsIntEnableMLIR',
|
||||||
|
[lite.OpsSet.TFLITE_BUILTINS_INT8], True),
|
||||||
# Quantize model to Int8: with disable mlir
|
# Quantize model to Int8: with disable mlir
|
||||||
('UseTfliteBuiltinsIntDisableMLIR', [lite.OpsSet.TFLITE_BUILTINS_INT8], False),
|
('UseTfliteBuiltinsIntDisableMLIR',
|
||||||
|
[lite.OpsSet.TFLITE_BUILTINS_INT8], False),
|
||||||
# Quantize model to Int16: with disable mlir
|
# Quantize model to Int16: with disable mlir
|
||||||
('UseTfliteBuiltinsInt16DisableMLIR', [lite.OpsSet.EXPERIMENTAL_TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8], False))
|
('UseTfliteBuiltinsInt16DisableMLIR',
|
||||||
|
[lite.OpsSet.EXPERIMENTAL_TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8],
|
||||||
|
False))
|
||||||
def testCalibrateAndQuantizeBuiltinInt(self, supported_ops, enable_mlir):
|
def testCalibrateAndQuantizeBuiltinInt(self, supported_ops, enable_mlir):
|
||||||
with ops.Graph().as_default():
|
with ops.Graph().as_default():
|
||||||
inp, output, calibration_gen = self._getCalibrationQuantizeModel()
|
inp, output, calibration_gen = self._getCalibrationQuantizeModel()
|
||||||
|
@ -78,7 +78,8 @@ class Calibrator(object):
|
|||||||
computation, useful when targeting an integer-only backend.
|
computation, useful when targeting an integer-only backend.
|
||||||
If False, an error will be thrown if an operation cannot be
|
If False, an error will be thrown if an operation cannot be
|
||||||
quantized, otherwise the model will fallback to float ops.
|
quantized, otherwise the model will fallback to float ops.
|
||||||
activations_type: A tf.dtype representing the desired type for activations.
|
activations_type: A tf.dtype representing the desired type for
|
||||||
|
activations.
|
||||||
resize_input: A boolean. True if the shape of the sample data is different
|
resize_input: A boolean. True if the shape of the sample data is different
|
||||||
from the input.
|
from the input.
|
||||||
"""
|
"""
|
||||||
|
@ -96,7 +96,8 @@ class CalibratorTest(test_util.TensorFlowTestCase, parameterized.TestCase):
|
|||||||
('UseActivationTypeInt8 - EnableMlirQuantizer', constants.INT8),
|
('UseActivationTypeInt8 - EnableMlirQuantizer', constants.INT8),
|
||||||
# Activation type Int16
|
# Activation type Int16
|
||||||
('UseActivationTypeInt16 - DisableEnableMlirQuantizer', constants.INT16))
|
('UseActivationTypeInt16 - DisableEnableMlirQuantizer', constants.INT16))
|
||||||
def test_calibration_with_quantization_multiple_inputs(self, activations_type):
|
def test_calibration_with_quantization_multiple_inputs(self,
|
||||||
|
activations_type):
|
||||||
# Load multi add model from test data.
|
# Load multi add model from test data.
|
||||||
# This model has 4 inputs of size (1, 8, 8, 3).
|
# This model has 4 inputs of size (1, 8, 8, 3).
|
||||||
model_path = resource_loader.get_path_to_datafile(
|
model_path = resource_loader.get_path_to_datafile(
|
||||||
|
Loading…
Reference in New Issue
Block a user