Renamed option with the prefix EXPERIMENTAL_.

Change-Id: Idb84736507d5c07ebdf182b8a15d55906d0d7fc0
This commit is contained in:
Elena Zhelezina 2020-06-03 17:56:39 +01:00
parent 29fdee8e85
commit 761d850ac6
3 changed files with 3 additions and 3 deletions

View File

@ -98,7 +98,7 @@ class OpsSet(enum.Enum):
# and int16 activations.
# Specifying this will throw an error for operations that do not yet have
# quantized implementations.
TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8 = "TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8"
EXPERIMENTAL_TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8 = "EXPERIMENTAL_TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8"
def __str__(self):
return self.value

View File

@ -356,7 +356,7 @@ class QuantizationMode(object):
def _is_int16x8_target_required(self):
return bool(
set(self._target_spec.supported_ops).intersection([
OpsSet.TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8
OpsSet.EXPERIMENTAL_TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8
]))
def _is_allow_float(self):

View File

@ -885,7 +885,7 @@ class FromSessionTest(TestModels, parameterized.TestCase):
# Quantize model to Int8: with disable mlir
('UseTfliteBuiltinsIntDisableMLIR', [lite.OpsSet.TFLITE_BUILTINS_INT8], False),
# Quantize model to Int16: with disable mlir
('UseTfliteBuiltinsInt16DisableMLIR', [lite.OpsSet.TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8], False))
('UseTfliteBuiltinsInt16DisableMLIR', [lite.OpsSet.EXPERIMENTAL_TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8], False))
def testCalibrateAndQuantizeBuiltinInt(self, supported_ops, enable_mlir):
with ops.Graph().as_default():
inp, output, calibration_gen = self._getCalibrationQuantizeModel()