diff --git a/tensorflow/lite/testing/op_tests/abs.py b/tensorflow/lite/testing/op_tests/abs.py index 581831db120..6122d9e1f81 100644 --- a/tensorflow/lite/testing/op_tests/abs.py +++ b/tensorflow/lite/testing/op_tests/abs.py @@ -32,6 +32,7 @@ def make_abs_tests(options): test_parameters = [{ "input_shape": [[], [1], [2, 3], [1, 1, 1, 1], [1, 3, 4, 3], [3, 15, 14, 3], [3, 1, 2, 4, 6], [2, 2, 3, 4, 5, 6]], + "dynamic_range_quantize": [False, True] }] def build_graph(parameters): diff --git a/tensorflow/lite/testing/toco_convert.py b/tensorflow/lite/testing/toco_convert.py index f244fde55e3..4db2cf32fb1 100644 --- a/tensorflow/lite/testing/toco_convert.py +++ b/tensorflow/lite/testing/toco_convert.py @@ -103,10 +103,9 @@ def toco_convert(options, graph_def, input_tensors, output_tensors, **kwargs): input_arrays = [x[0] for x in input_tensors] data_types = [zip_test_utils.TF_TYPE_INFO[x[2]][1] for x in input_tensors] - if test_params.get("fully_quantize", False): - # Read the input range for the representative dataset from parameters. - min_value, max_value = test_params.get("input_range", (-1, 1)) - + fully_quantize = test_params.get("fully_quantize", False) + dynamic_range_quantize = test_params.get("dynamic_range_quantize", False) + if dynamic_range_quantize or fully_quantize: with tempfile.NamedTemporaryFile() as graphdef_file: graphdef_file.write(graph_def_str) graphdef_file.flush() @@ -115,32 +114,38 @@ def toco_convert(options, graph_def, input_tensors, output_tensors, **kwargs): converter = tf.compat.v1.lite.TFLiteConverter.from_frozen_graph( graphdef_file.name, input_arrays, output_tensors, input_shapes) - def representative_dataset(input_tensors): - calibration_inputs = [] - for _, shape, _ in input_tensors: - if shape: - dims = [dim.value for dim in shape.dims] - calibration_inputs.append( - np.random.uniform(min_value, max_value, - tuple(dims)).astype(np.float32)) - return calibration_inputs + converter.optimizations = [tf.lite.Optimize.DEFAULT] - def representative_dataset_gen(): - for _ in range(100): - yield representative_dataset(input_tensors) + if fully_quantize: + # Read the input range for the representative dataset from parameters. + min_value, max_value = test_params.get("input_range", (-1, 1)) - converter.target_spec.supported_ops = [ - tf.lite.OpsSet.TFLITE_BUILTINS_INT8 - ] - converter.representative_dataset = representative_dataset_gen - if extra_toco_options.inference_input_type: - converter.inference_input_type = ( - extra_toco_options.inference_input_type) - if extra_toco_options.inference_output_type: - converter.inference_output_type = ( - extra_toco_options.inference_output_type) - else: - converter.inference_output_type = tf.int8 + def representative_dataset(input_tensors): + calibration_inputs = [] + for _, shape, _ in input_tensors: + if shape: + dims = [dim.value for dim in shape.dims] + calibration_inputs.append( + np.random.uniform(min_value, max_value, + tuple(dims)).astype(np.float32)) + return calibration_inputs + + def representative_dataset_gen(): + for _ in range(100): + yield representative_dataset(input_tensors) + + converter.target_spec.supported_ops = [ + tf.lite.OpsSet.TFLITE_BUILTINS_INT8 + ] + converter.representative_dataset = representative_dataset_gen + if extra_toco_options.inference_input_type: + converter.inference_input_type = ( + extra_toco_options.inference_input_type) + if extra_toco_options.inference_output_type: + converter.inference_output_type = ( + extra_toco_options.inference_output_type) + else: + converter.inference_output_type = tf.int8 try: tflite_model = converter.convert()