Clarify that MLIR is now the default for the TFLite converter.
PiperOrigin-RevId: 314625466 Change-Id: I0000e7f3fe91f0024d2871a2d1e8be3af0c0cad5
This commit is contained in:
parent
911c0ec205
commit
778ed688df
@ -579,10 +579,9 @@ class TFLiteConverterBaseV2(TFLiteConverterBase):
|
||||
converter_kwargs.update(quant_mode.converter_flags())
|
||||
if not self.experimental_new_converter:
|
||||
logging.warning(
|
||||
"Please consider switching to use new converter by setting "
|
||||
"experimental_new_converter to true. "
|
||||
"Old converter (TOCO) is deprecated and flow will be switched on "
|
||||
"by default to use new converter soon.")
|
||||
"Please consider switching to the new converter by setting "
|
||||
"experimental_new_converter=True. "
|
||||
"The old converter (TOCO) is deprecated.")
|
||||
else:
|
||||
logging.info("Using experimental converter: If you encountered a problem "
|
||||
"please file a bug. You can opt-out "
|
||||
@ -875,7 +874,7 @@ class TFLiteConverterV2(TFLiteFrozenGraphConverterV2):
|
||||
training integer quantization. (default tf.float32, must be in
|
||||
{tf.float32, tf.int8, tf.uint8})
|
||||
experimental_new_converter: Experimental flag, subject to change. Enables
|
||||
MLIR-based conversion instead of TOCO conversion.
|
||||
MLIR-based conversion instead of TOCO conversion. (default True)
|
||||
|
||||
Example usage:
|
||||
|
||||
@ -1095,7 +1094,7 @@ class TFLiteConverterBaseV1(TFLiteConverterBase):
|
||||
generate input and output samples for the model. The converter can use the
|
||||
dataset to evaluate different optimizations.
|
||||
experimental_new_converter: Experimental flag, subject to change. Enables
|
||||
MLIR-based conversion instead of TOCO conversion.
|
||||
MLIR-based conversion instead of TOCO conversion. (default True)
|
||||
"""
|
||||
|
||||
def __init__(self, experimental_debug_info_func):
|
||||
@ -1256,10 +1255,9 @@ class TFLiteConverterBaseV1(TFLiteConverterBase):
|
||||
|
||||
if not self.experimental_new_converter:
|
||||
logging.warning(
|
||||
"Please consider switching to use new converter by setting "
|
||||
"experimental_new_converter to true. "
|
||||
"Old converter (TOCO) is deprecated and flow will be switched on "
|
||||
"by default to use new converter soon.")
|
||||
"Please consider switching to the new converter by setting "
|
||||
"experimental_new_converter=True. "
|
||||
"The old converter (TOCO) is deprecated.")
|
||||
else:
|
||||
logging.info("Using experimental converter: If you encountered a problem "
|
||||
"please file a bug. You can opt-out "
|
||||
@ -1637,7 +1635,7 @@ class TFLiteConverter(TFLiteFrozenGraphConverter):
|
||||
generate input and output samples for the model. The converter can use
|
||||
the dataset to evaluate different optimizations.
|
||||
experimental_new_converter: Experimental flag, subject to change.
|
||||
Enables MLIR-based conversion instead of TOCO conversion.
|
||||
Enables MLIR-based conversion instead of TOCO conversion. (default True)
|
||||
|
||||
Example usage:
|
||||
|
||||
|
@ -1767,7 +1767,7 @@ class FromSavedModelTest(TestModels):
|
||||
log = io.BytesIO() if six.PY2 else io.StringIO()
|
||||
handler = logging.StreamHandler(log)
|
||||
logging.root.addHandler(handler)
|
||||
warning_message = 'Please consider switching to use new converter'
|
||||
warning_message = 'Please consider switching to the new converter'
|
||||
# Convert model and ensure model is not None.
|
||||
converter = lite.TFLiteConverter.from_saved_model(saved_model_dir)
|
||||
converter.experimental_new_converter = False
|
||||
|
@ -13,7 +13,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# ==============================================================================
|
||||
"""Python command line interface for running TOCO."""
|
||||
"""Python command line interface for converting TF models to TFLite models."""
|
||||
|
||||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
@ -71,7 +71,7 @@ def _parse_inference_type(value, flag):
|
||||
"QUANTIZED_UINT8 are supported.".format(flag))
|
||||
|
||||
|
||||
def _get_toco_converter(flags):
|
||||
def _get_tflite_converter(flags):
|
||||
"""Makes a TFLiteConverter object based on the flags provided.
|
||||
|
||||
Args:
|
||||
@ -129,7 +129,7 @@ def _convert_tf1_model(flags):
|
||||
ValueError: Invalid flags.
|
||||
"""
|
||||
# Create converter.
|
||||
converter = _get_toco_converter(flags)
|
||||
converter = _get_tflite_converter(flags)
|
||||
if flags.inference_type:
|
||||
converter.inference_type = _parse_inference_type(flags.inference_type,
|
||||
"inference_type")
|
||||
@ -589,7 +589,7 @@ def _get_parser(use_v2_converter):
|
||||
action=_ParseExperimentalNewConverter,
|
||||
nargs="?",
|
||||
help=("Experimental flag, subject to change. Enables MLIR-based "
|
||||
"conversion instead of TOCO conversion."))
|
||||
"conversion instead of TOCO conversion. (default True)"))
|
||||
return parser
|
||||
|
||||
|
||||
|
@ -281,11 +281,11 @@ class TfLiteConvertV1Test(TestModels):
|
||||
self._input_shapes,
|
||||
custom_opdefs_str))
|
||||
|
||||
# Ensure --experimental_new_converter.
|
||||
# Ensure --allow_custom_ops.
|
||||
flags_str_final = ('{} --allow_custom_ops').format(flags_str)
|
||||
self._run(flags_str_final, should_succeed=False)
|
||||
|
||||
# Ensure --allow_custom_ops.
|
||||
# Ensure --experimental_new_converter.
|
||||
flags_str_final = ('{} --experimental_new_converter').format(flags_str)
|
||||
self._run(flags_str_final, should_succeed=False)
|
||||
|
||||
@ -344,15 +344,18 @@ class ArgParserTest(test_util.TensorFlowTestCase):
|
||||
'--output_file=/tmp/output.tflite',
|
||||
]
|
||||
|
||||
# Note that when the flag parses to None, the converter uses the default
|
||||
# value, which is True.
|
||||
|
||||
# V1 parser.
|
||||
parser = tflite_convert._get_parser(False)
|
||||
parser = tflite_convert._get_parser(use_v2_converter=False)
|
||||
parsed_args = parser.parse_args(args)
|
||||
self.assertFalse(parsed_args.experimental_new_converter)
|
||||
self.assertIsNone(parsed_args.experimental_new_converter)
|
||||
|
||||
# V2 parser.
|
||||
parser = tflite_convert._get_parser(True)
|
||||
parser = tflite_convert._get_parser(use_v2_converter=True)
|
||||
parsed_args = parser.parse_args(args)
|
||||
self.assertFalse(parsed_args.experimental_new_converter)
|
||||
self.assertIsNone(parsed_args.experimental_new_converter)
|
||||
|
||||
def test_experimental_new_converter(self):
|
||||
args = [
|
||||
@ -362,12 +365,12 @@ class ArgParserTest(test_util.TensorFlowTestCase):
|
||||
]
|
||||
|
||||
# V1 parser.
|
||||
parser = tflite_convert._get_parser(False)
|
||||
parser = tflite_convert._get_parser(use_v2_converter=False)
|
||||
parsed_args = parser.parse_args(args)
|
||||
self.assertTrue(parsed_args.experimental_new_converter)
|
||||
|
||||
# V2 parser.
|
||||
parser = tflite_convert._get_parser(True)
|
||||
parser = tflite_convert._get_parser(use_v2_converter=True)
|
||||
parsed_args = parser.parse_args(args)
|
||||
self.assertTrue(parsed_args.experimental_new_converter)
|
||||
|
||||
@ -396,12 +399,12 @@ class ArgParserTest(test_util.TensorFlowTestCase):
|
||||
]
|
||||
|
||||
# V1 parser.
|
||||
parser = tflite_convert._get_parser(False)
|
||||
parser = tflite_convert._get_parser(use_v2_converter=False)
|
||||
parsed_args = parser.parse_args(args)
|
||||
self.assertFalse(parsed_args.experimental_new_converter)
|
||||
|
||||
# V2 parser.
|
||||
parser = tflite_convert._get_parser(True)
|
||||
parser = tflite_convert._get_parser(use_v2_converter=True)
|
||||
parsed_args = parser.parse_args(args)
|
||||
self.assertFalse(parsed_args.experimental_new_converter)
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user