From 38d9711a73b66af4aeba273d6e5cb105ce4f4f15 Mon Sep 17 00:00:00 2001 From: Guangda Lai Date: Wed, 13 Mar 2019 14:34:42 -0700 Subject: [PATCH] Remove the precision mode restriction in create_inference_graph() since we can still apply INT8 for quantization-aware trained models. PiperOrigin-RevId: 238311183 --- tensorflow/python/compiler/tensorrt/trt_convert.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/tensorflow/python/compiler/tensorrt/trt_convert.py b/tensorflow/python/compiler/tensorrt/trt_convert.py index a6087703fc7..1363c872812 100644 --- a/tensorflow/python/compiler/tensorrt/trt_convert.py +++ b/tensorflow/python/compiler/tensorrt/trt_convert.py @@ -855,8 +855,6 @@ def create_inference_graph( Raises: ValueError: if the combination of the parameters is invalid. """ - if precision_mode not in [TrtPrecisionMode.FP32, TrtPrecisionMode.FP16]: - raise ValueError("Invalid precision mode: {}".format(precision_mode)) trt_converter = TrtGraphConverter( input_saved_model_dir=input_saved_model_dir, input_saved_model_tags=input_saved_model_tags,