diff --git a/tensorflow/lite/g3doc/performance/post_training_quantization.md b/tensorflow/lite/g3doc/performance/post_training_quantization.md index ac584dd4c1c..dcf251e6d3d 100644 --- a/tensorflow/lite/g3doc/performance/post_training_quantization.md +++ b/tensorflow/lite/g3doc/performance/post_training_quantization.md @@ -34,7 +34,7 @@ weights from floating point to integer, which has 8-bits of precision:
import tensorflow as tf -converter = tf.lite.TFLiteConverter.from_keras_model(keras_model) +converter = tf.lite.TFLiteConverter.from_saved_model(saved_model_dir) converter.optimizations = [tf.lite.Optimize.DEFAULT] tflite_quant_model = converter.convert()@@ -68,7 +68,7 @@ the following steps:
import tensorflow as tf -converter = tf.lite.TFLiteConverter.from_keras_model(keras_model) +converter = tf.lite.TFLiteConverter.from_saved_model(saved_model_dir) converter.optimizations = [tf.lite.Optimize.DEFAULT] def representative_dataset_gen(): for _ in range(num_calibration_steps): @@ -96,7 +96,7 @@ the following steps:import tensorflow as tf -converter = tf.lite.TFLiteConverter.from_keras_model(keras_model) +converter = tf.lite.TFLiteConverter.from_saved_model(saved_model_dir) converter.optimizations = [tf.lite.Optimize.DEFAULT] def representative_dataset_gen(): for _ in range(num_calibration_steps): @@ -120,7 +120,7 @@ quantization of weights, use the following steps:import tensorflow as tf -converter = tf.lite.TFLiteConverter.from_keras_model(keras_model) +converter = tf.lite.TFLiteConverter.from_saved_model(saved_model_dir) converter.optimizations = [tf.lite.Optimize.DEFAULT] converter.target_spec.supported_types = [tf.float16] tflite_quant_model = converter.convert()