Fix for pylint errors.

Change-Id: Idd96d7a41fd459c86ab0f6fbb63e5d543509145d
This commit is contained in:
Elena Zhelezina 2020-06-09 17:47:16 +01:00
parent dbc7faeecd
commit 507c754931
5 changed files with 28 additions and 18 deletions

View File

@ -103,7 +103,8 @@ class OpsSet(enum.Enum):
# significantly, but only slightly increases the model size.
# WARNING: These ops are currently experimental and have not yet been finalized.
# They are only compatible with CPU execution, and have not been optimized for production.
EXPERIMENTAL_TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8 = "EXPERIMENTAL_TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8"
EXPERIMENTAL_TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8 = \
"EXPERIMENTAL_TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8"
def __str__(self):
return self.value

View File

@ -251,7 +251,8 @@ class QuantizationMode(object):
self.post_training_fp16())
def activations_type(self):
return constants.INT16 if self._is_int16x8_target_required() else constants.INT8
return constants.INT16 if self._is_int16x8_target_required() \
else constants.INT8
def converter_flags(self, inference_ty=None, inference_input_ty=None):
"""Flags to the converter."""
@ -262,7 +263,8 @@ class QuantizationMode(object):
if self.training_time_int8_allow_float():
return {
"inference_type": inference_ty if inference_ty else self.activations_type(),
"inference_type": inference_ty if inference_ty else \
self.activations_type(),
"inference_input_type":
inference_input_ty if inference_input_ty else constants.FLOAT,
"post_training_quantize": False, # disable dynamic range quantization
@ -359,15 +361,15 @@ class QuantizationMode(object):
def _is_int16x8_target_required(self):
return bool(
set(self._target_spec.supported_ops).intersection([
OpsSet.EXPERIMENTAL_TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8
]))
set(self._target_spec.supported_ops).intersection([
OpsSet.EXPERIMENTAL_TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8
]))
def _is_allow_float(self):
return bool(
set(self._target_spec.supported_ops).intersection([
OpsSet.TFLITE_BUILTINS
]))
set(self._target_spec.supported_ops).intersection([
OpsSet.TFLITE_BUILTINS
]))
def _any_optimization_enabled(self):
return bool(
@ -441,7 +443,8 @@ class TFLiteConverterBase(object):
return _get_grappler_config(optimizers)
def _calibrate_quantize_model(self, result, inference_input_type,
inference_output_type, activations_type, allow_float):
inference_output_type, activations_type,
allow_float):
"""Calibrate and quantize the model."""
if not isinstance(self.representative_dataset, RepresentativeDataset):
self.representative_dataset = RepresentativeDataset(
@ -458,8 +461,8 @@ class TFLiteConverterBase(object):
return _mlir_quantize(calibrated)
else:
return calibrate_quantize.calibrate_and_quantize(
self.representative_dataset.input_gen, inference_input_type,
inference_output_type, allow_float, activations_type)
self.representative_dataset.input_gen, inference_input_type,
inference_output_type, allow_float, activations_type)
def _is_unknown_shapes_allowed(self):
# Unknown dimensions are only allowed with the new converter.
@ -1992,7 +1995,7 @@ class TocoConverter(object):
@classmethod
@_deprecation.deprecated(
None, "Use `lite.TFLiteConverter.from_keras_model_file` instead.")
None, "Use `lite.TFLiteConverter.from_keras_model_file` instead.")
def from_keras_model_file(cls,
model_file,
input_arrays=None,

View File

@ -882,11 +882,15 @@ class FromSessionTest(TestModels, parameterized.TestCase):
@parameterized.named_parameters(
# Quantize model to Int8: with enable mlir
('UseTfliteBuiltinsIntEnableMLIR', [lite.OpsSet.TFLITE_BUILTINS_INT8], True),
('UseTfliteBuiltinsIntEnableMLIR',
[lite.OpsSet.TFLITE_BUILTINS_INT8], True),
# Quantize model to Int8: with disable mlir
('UseTfliteBuiltinsIntDisableMLIR', [lite.OpsSet.TFLITE_BUILTINS_INT8], False),
('UseTfliteBuiltinsIntDisableMLIR',
[lite.OpsSet.TFLITE_BUILTINS_INT8], False),
# Quantize model to Int16: with disable mlir
('UseTfliteBuiltinsInt16DisableMLIR', [lite.OpsSet.EXPERIMENTAL_TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8], False))
('UseTfliteBuiltinsInt16DisableMLIR',
[lite.OpsSet.EXPERIMENTAL_TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8],
False))
def testCalibrateAndQuantizeBuiltinInt(self, supported_ops, enable_mlir):
with ops.Graph().as_default():
inp, output, calibration_gen = self._getCalibrationQuantizeModel()

View File

@ -78,7 +78,8 @@ class Calibrator(object):
computation, useful when targeting an integer-only backend.
If False, an error will be thrown if an operation cannot be
quantized, otherwise the model will fallback to float ops.
activations_type: A tf.dtype representing the desired type for activations.
activations_type: A tf.dtype representing the desired type for
activations.
resize_input: A boolean. True if the shape of the sample data is different
from the input.
"""

View File

@ -96,7 +96,8 @@ class CalibratorTest(test_util.TensorFlowTestCase, parameterized.TestCase):
('UseActivationTypeInt8 - EnableMlirQuantizer', constants.INT8),
# Activation type Int16
('UseActivationTypeInt16 - DisableEnableMlirQuantizer', constants.INT16))
def test_calibration_with_quantization_multiple_inputs(self, activations_type):
def test_calibration_with_quantization_multiple_inputs(self,
activations_type):
# Load multi add model from test data.
# This model has 4 inputs of size (1, 8, 8, 3).
model_path = resource_loader.get_path_to_datafile(