From 761d850ac6456aed93ab250ff49af3f0a6a62960 Mon Sep 17 00:00:00 2001 From: Elena Zhelezina Date: Wed, 3 Jun 2020 17:56:39 +0100 Subject: [PATCH] Renamed option with the prefix EXPERIMENTAL_. Change-Id: Idb84736507d5c07ebdf182b8a15d55906d0d7fc0 --- tensorflow/lite/python/convert.py | 2 +- tensorflow/lite/python/lite.py | 2 +- tensorflow/lite/python/lite_test.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/tensorflow/lite/python/convert.py b/tensorflow/lite/python/convert.py index c30987a5898..939de61c608 100644 --- a/tensorflow/lite/python/convert.py +++ b/tensorflow/lite/python/convert.py @@ -98,7 +98,7 @@ class OpsSet(enum.Enum): # and int16 activations. # Specifying this will throw an error for operations that do not yet have # quantized implementations. - TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8 = "TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8" + EXPERIMENTAL_TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8 = "EXPERIMENTAL_TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8" def __str__(self): return self.value diff --git a/tensorflow/lite/python/lite.py b/tensorflow/lite/python/lite.py index 010952820b9..781007241b4 100644 --- a/tensorflow/lite/python/lite.py +++ b/tensorflow/lite/python/lite.py @@ -356,7 +356,7 @@ class QuantizationMode(object): def _is_int16x8_target_required(self): return bool( set(self._target_spec.supported_ops).intersection([ - OpsSet.TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8 + OpsSet.EXPERIMENTAL_TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8 ])) def _is_allow_float(self): diff --git a/tensorflow/lite/python/lite_test.py b/tensorflow/lite/python/lite_test.py index 4075a887943..1d052b88c10 100644 --- a/tensorflow/lite/python/lite_test.py +++ b/tensorflow/lite/python/lite_test.py @@ -885,7 +885,7 @@ class FromSessionTest(TestModels, parameterized.TestCase): # Quantize model to Int8: with disable mlir ('UseTfliteBuiltinsIntDisableMLIR', [lite.OpsSet.TFLITE_BUILTINS_INT8], False), # Quantize model to Int16: with disable mlir - ('UseTfliteBuiltinsInt16DisableMLIR', [lite.OpsSet.TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8], False)) + ('UseTfliteBuiltinsInt16DisableMLIR', [lite.OpsSet.EXPERIMENTAL_TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8], False)) def testCalibrateAndQuantizeBuiltinInt(self, supported_ops, enable_mlir): with ops.Graph().as_default(): inp, output, calibration_gen = self._getCalibrationQuantizeModel()