Fix for pylint errors.
Change-Id: Idd96d7a41fd459c86ab0f6fbb63e5d543509145d
This commit is contained in:
parent
dbc7faeecd
commit
507c754931
@ -103,7 +103,8 @@ class OpsSet(enum.Enum):
|
|||||||
# significantly, but only slightly increases the model size.
|
# significantly, but only slightly increases the model size.
|
||||||
# WARNING: These ops are currently experimental and have not yet been finalized.
|
# WARNING: These ops are currently experimental and have not yet been finalized.
|
||||||
# They are only compatible with CPU execution, and have not been optimized for production.
|
# They are only compatible with CPU execution, and have not been optimized for production.
|
||||||
EXPERIMENTAL_TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8 = "EXPERIMENTAL_TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8"
|
EXPERIMENTAL_TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8 = \
|
||||||
|
"EXPERIMENTAL_TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8"
|
||||||
|
|
||||||
def __str__(self):
|
def __str__(self):
|
||||||
return self.value
|
return self.value
|
||||||
|
@ -251,7 +251,8 @@ class QuantizationMode(object):
|
|||||||
self.post_training_fp16())
|
self.post_training_fp16())
|
||||||
|
|
||||||
def activations_type(self):
|
def activations_type(self):
|
||||||
return constants.INT16 if self._is_int16x8_target_required() else constants.INT8
|
return constants.INT16 if self._is_int16x8_target_required() \
|
||||||
|
else constants.INT8
|
||||||
|
|
||||||
def converter_flags(self, inference_ty=None, inference_input_ty=None):
|
def converter_flags(self, inference_ty=None, inference_input_ty=None):
|
||||||
"""Flags to the converter."""
|
"""Flags to the converter."""
|
||||||
@ -262,7 +263,8 @@ class QuantizationMode(object):
|
|||||||
|
|
||||||
if self.training_time_int8_allow_float():
|
if self.training_time_int8_allow_float():
|
||||||
return {
|
return {
|
||||||
"inference_type": inference_ty if inference_ty else self.activations_type(),
|
"inference_type": inference_ty if inference_ty else \
|
||||||
|
self.activations_type(),
|
||||||
"inference_input_type":
|
"inference_input_type":
|
||||||
inference_input_ty if inference_input_ty else constants.FLOAT,
|
inference_input_ty if inference_input_ty else constants.FLOAT,
|
||||||
"post_training_quantize": False, # disable dynamic range quantization
|
"post_training_quantize": False, # disable dynamic range quantization
|
||||||
@ -359,15 +361,15 @@ class QuantizationMode(object):
|
|||||||
|
|
||||||
def _is_int16x8_target_required(self):
|
def _is_int16x8_target_required(self):
|
||||||
return bool(
|
return bool(
|
||||||
set(self._target_spec.supported_ops).intersection([
|
set(self._target_spec.supported_ops).intersection([
|
||||||
OpsSet.EXPERIMENTAL_TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8
|
OpsSet.EXPERIMENTAL_TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8
|
||||||
]))
|
]))
|
||||||
|
|
||||||
def _is_allow_float(self):
|
def _is_allow_float(self):
|
||||||
return bool(
|
return bool(
|
||||||
set(self._target_spec.supported_ops).intersection([
|
set(self._target_spec.supported_ops).intersection([
|
||||||
OpsSet.TFLITE_BUILTINS
|
OpsSet.TFLITE_BUILTINS
|
||||||
]))
|
]))
|
||||||
|
|
||||||
def _any_optimization_enabled(self):
|
def _any_optimization_enabled(self):
|
||||||
return bool(
|
return bool(
|
||||||
@ -441,7 +443,8 @@ class TFLiteConverterBase(object):
|
|||||||
return _get_grappler_config(optimizers)
|
return _get_grappler_config(optimizers)
|
||||||
|
|
||||||
def _calibrate_quantize_model(self, result, inference_input_type,
|
def _calibrate_quantize_model(self, result, inference_input_type,
|
||||||
inference_output_type, activations_type, allow_float):
|
inference_output_type, activations_type,
|
||||||
|
allow_float):
|
||||||
"""Calibrate and quantize the model."""
|
"""Calibrate and quantize the model."""
|
||||||
if not isinstance(self.representative_dataset, RepresentativeDataset):
|
if not isinstance(self.representative_dataset, RepresentativeDataset):
|
||||||
self.representative_dataset = RepresentativeDataset(
|
self.representative_dataset = RepresentativeDataset(
|
||||||
@ -458,8 +461,8 @@ class TFLiteConverterBase(object):
|
|||||||
return _mlir_quantize(calibrated)
|
return _mlir_quantize(calibrated)
|
||||||
else:
|
else:
|
||||||
return calibrate_quantize.calibrate_and_quantize(
|
return calibrate_quantize.calibrate_and_quantize(
|
||||||
self.representative_dataset.input_gen, inference_input_type,
|
self.representative_dataset.input_gen, inference_input_type,
|
||||||
inference_output_type, allow_float, activations_type)
|
inference_output_type, allow_float, activations_type)
|
||||||
|
|
||||||
def _is_unknown_shapes_allowed(self):
|
def _is_unknown_shapes_allowed(self):
|
||||||
# Unknown dimensions are only allowed with the new converter.
|
# Unknown dimensions are only allowed with the new converter.
|
||||||
@ -1992,7 +1995,7 @@ class TocoConverter(object):
|
|||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
@_deprecation.deprecated(
|
@_deprecation.deprecated(
|
||||||
None, "Use `lite.TFLiteConverter.from_keras_model_file` instead.")
|
None, "Use `lite.TFLiteConverter.from_keras_model_file` instead.")
|
||||||
def from_keras_model_file(cls,
|
def from_keras_model_file(cls,
|
||||||
model_file,
|
model_file,
|
||||||
input_arrays=None,
|
input_arrays=None,
|
||||||
|
@ -882,11 +882,15 @@ class FromSessionTest(TestModels, parameterized.TestCase):
|
|||||||
|
|
||||||
@parameterized.named_parameters(
|
@parameterized.named_parameters(
|
||||||
# Quantize model to Int8: with enable mlir
|
# Quantize model to Int8: with enable mlir
|
||||||
('UseTfliteBuiltinsIntEnableMLIR', [lite.OpsSet.TFLITE_BUILTINS_INT8], True),
|
('UseTfliteBuiltinsIntEnableMLIR',
|
||||||
|
[lite.OpsSet.TFLITE_BUILTINS_INT8], True),
|
||||||
# Quantize model to Int8: with disable mlir
|
# Quantize model to Int8: with disable mlir
|
||||||
('UseTfliteBuiltinsIntDisableMLIR', [lite.OpsSet.TFLITE_BUILTINS_INT8], False),
|
('UseTfliteBuiltinsIntDisableMLIR',
|
||||||
|
[lite.OpsSet.TFLITE_BUILTINS_INT8], False),
|
||||||
# Quantize model to Int16: with disable mlir
|
# Quantize model to Int16: with disable mlir
|
||||||
('UseTfliteBuiltinsInt16DisableMLIR', [lite.OpsSet.EXPERIMENTAL_TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8], False))
|
('UseTfliteBuiltinsInt16DisableMLIR',
|
||||||
|
[lite.OpsSet.EXPERIMENTAL_TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8],
|
||||||
|
False))
|
||||||
def testCalibrateAndQuantizeBuiltinInt(self, supported_ops, enable_mlir):
|
def testCalibrateAndQuantizeBuiltinInt(self, supported_ops, enable_mlir):
|
||||||
with ops.Graph().as_default():
|
with ops.Graph().as_default():
|
||||||
inp, output, calibration_gen = self._getCalibrationQuantizeModel()
|
inp, output, calibration_gen = self._getCalibrationQuantizeModel()
|
||||||
|
@ -78,7 +78,8 @@ class Calibrator(object):
|
|||||||
computation, useful when targeting an integer-only backend.
|
computation, useful when targeting an integer-only backend.
|
||||||
If False, an error will be thrown if an operation cannot be
|
If False, an error will be thrown if an operation cannot be
|
||||||
quantized, otherwise the model will fallback to float ops.
|
quantized, otherwise the model will fallback to float ops.
|
||||||
activations_type: A tf.dtype representing the desired type for activations.
|
activations_type: A tf.dtype representing the desired type for
|
||||||
|
activations.
|
||||||
resize_input: A boolean. True if the shape of the sample data is different
|
resize_input: A boolean. True if the shape of the sample data is different
|
||||||
from the input.
|
from the input.
|
||||||
"""
|
"""
|
||||||
|
@ -96,7 +96,8 @@ class CalibratorTest(test_util.TensorFlowTestCase, parameterized.TestCase):
|
|||||||
('UseActivationTypeInt8 - EnableMlirQuantizer', constants.INT8),
|
('UseActivationTypeInt8 - EnableMlirQuantizer', constants.INT8),
|
||||||
# Activation type Int16
|
# Activation type Int16
|
||||||
('UseActivationTypeInt16 - DisableEnableMlirQuantizer', constants.INT16))
|
('UseActivationTypeInt16 - DisableEnableMlirQuantizer', constants.INT16))
|
||||||
def test_calibration_with_quantization_multiple_inputs(self, activations_type):
|
def test_calibration_with_quantization_multiple_inputs(self,
|
||||||
|
activations_type):
|
||||||
# Load multi add model from test data.
|
# Load multi add model from test data.
|
||||||
# This model has 4 inputs of size (1, 8, 8, 3).
|
# This model has 4 inputs of size (1, 8, 8, 3).
|
||||||
model_path = resource_loader.get_path_to_datafile(
|
model_path = resource_loader.get_path_to_datafile(
|
||||||
|
Loading…
Reference in New Issue
Block a user