Remove the precision mode restriction in create_inference_graph() since we can
still apply INT8 for quantization-aware trained models. PiperOrigin-RevId: 238311183
This commit is contained in:
parent
d8a1d6d450
commit
38d9711a73
@ -855,8 +855,6 @@ def create_inference_graph(
|
|||||||
Raises:
|
Raises:
|
||||||
ValueError: if the combination of the parameters is invalid.
|
ValueError: if the combination of the parameters is invalid.
|
||||||
"""
|
"""
|
||||||
if precision_mode not in [TrtPrecisionMode.FP32, TrtPrecisionMode.FP16]:
|
|
||||||
raise ValueError("Invalid precision mode: {}".format(precision_mode))
|
|
||||||
trt_converter = TrtGraphConverter(
|
trt_converter = TrtGraphConverter(
|
||||||
input_saved_model_dir=input_saved_model_dir,
|
input_saved_model_dir=input_saved_model_dir,
|
||||||
input_saved_model_tags=input_saved_model_tags,
|
input_saved_model_tags=input_saved_model_tags,
|
||||||
|
Loading…
Reference in New Issue
Block a user