Fix documentation for int16x8 quantization

PiperOrigin-RevId: 322704757
Change-Id: Id3c785de088c04ce5c261e7152fcdefeb6d7db29
This commit is contained in:
Meghna Natraj 2020-07-22 19:20:04 -07:00 committed by TensorFlower Gardener
parent f112d4bd7d
commit fffa1e6548

View File

@ -161,7 +161,7 @@ def representative_dataset_gen():
yield [input]
converter.representative_dataset = representative_dataset_gen
<b>converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.target_spec.supported_ops = [tf.lite.constants.EXPERIMENTAL_TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8]</b>
converter.target_spec.supported_ops = [tf.lite.OpsSet.EXPERIMENTAL_TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8]</b>
tflite_quant_model = converter.convert()
</pre>
@ -177,7 +177,7 @@ def representative_dataset_gen():
yield [input]
converter.representative_dataset = representative_dataset_gen
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.target_spec.supported_ops = [tf.lite.constants.EXPERIMENTAL_TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8,
converter.target_spec.supported_ops = [tf.lite.OpsSet.EXPERIMENTAL_TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8,
<b>tf.lite.OpsSet.TFLITE_BUILTINS</b>]
tflite_quant_model = converter.convert()
</pre>