Remove the precision mode restriction in create_inference_graph() since we can

still apply INT8 for quantization-aware trained models.

PiperOrigin-RevId: 238311183
This commit is contained in:
Guangda Lai 2019-03-13 14:34:42 -07:00 committed by TensorFlower Gardener
parent d8a1d6d450
commit 38d9711a73

View File

@ -855,8 +855,6 @@ def create_inference_graph(
Raises:
ValueError: if the combination of the parameters is invalid.
"""
if precision_mode not in [TrtPrecisionMode.FP32, TrtPrecisionMode.FP16]:
raise ValueError("Invalid precision mode: {}".format(precision_mode))
trt_converter = TrtGraphConverter(
input_saved_model_dir=input_saved_model_dir,
input_saved_model_tags=input_saved_model_tags,