From ea8f7346035761b8019a0fe334001c37fe2f3381 Mon Sep 17 00:00:00 2001 From: Bixia Zheng Date: Thu, 20 Aug 2020 11:24:58 -0700 Subject: [PATCH] [TF:TRT] Clean up the implementation for getting the TensorRT version. Move the implementation from utils/py_utils.cc to common/utils.cc. Delete the string returning version of the routines from convert/utils.cc PiperOrigin-RevId: 327663715 Change-Id: Ic8652e03677ebad0730c9685cd43c14079a741e9 --- tensorflow/compiler/tf2tensorrt/BUILD | 2 + .../compiler/tf2tensorrt/common/utils.cc | 48 +++++++++++++++++++ .../compiler/tf2tensorrt/common/utils.h | 14 ++++++ .../tf2tensorrt/convert/convert_nodes.cc | 9 ++-- .../compiler/tf2tensorrt/convert/utils.cc | 30 ------------ .../compiler/tf2tensorrt/convert/utils.h | 8 ---- .../compiler/tf2tensorrt/utils/py_utils.cc | 26 ---------- .../compiler/tf2tensorrt/utils/py_utils.h | 6 --- .../tf2tensorrt/utils/py_utils_wrapper.cc | 9 ++-- 9 files changed, 73 insertions(+), 79 deletions(-) create mode 100644 tensorflow/compiler/tf2tensorrt/common/utils.cc diff --git a/tensorflow/compiler/tf2tensorrt/BUILD b/tensorflow/compiler/tf2tensorrt/BUILD index d429097625f..44fb5513886 100644 --- a/tensorflow/compiler/tf2tensorrt/BUILD +++ b/tensorflow/compiler/tf2tensorrt/BUILD @@ -80,6 +80,7 @@ tf_cuda_cc_test( cc_library( name = "common_utils", + srcs = ["common/utils.cc"], hdrs = ["common/utils.h"], copts = tf_copts(), deps = [ @@ -587,6 +588,7 @@ pybind_extension( link_in_framework = True, module_name = "_pywrap_py_utils", deps = [ + ":common_utils", ":py_utils", "//tensorflow/core/platform:env", "//tensorflow/core/platform:logging", diff --git a/tensorflow/compiler/tf2tensorrt/common/utils.cc b/tensorflow/compiler/tf2tensorrt/common/utils.cc new file mode 100644 index 00000000000..c305b6942dc --- /dev/null +++ b/tensorflow/compiler/tf2tensorrt/common/utils.cc @@ -0,0 +1,48 @@ +/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "tensorflow/compiler/tf2tensorrt/common/utils.h" + +#if GOOGLE_CUDA && GOOGLE_TENSORRT +#include "third_party/tensorrt/NvInfer.h" +#endif // GOOGLE_CUDA && GOOGLE_TENSORRT + +namespace tensorflow { +namespace tensorrt { + +std::tuple GetLinkedTensorRTVersion() { +#if GOOGLE_CUDA && GOOGLE_TENSORRT + return std::tuple{NV_TENSORRT_MAJOR, NV_TENSORRT_MINOR, + NV_TENSORRT_PATCH}; +#else + return std::tuple{0, 0, 0}; +#endif +} + +std::tuple GetLoadedTensorRTVersion() { +#if GOOGLE_CUDA && GOOGLE_TENSORRT + int ver = getInferLibVersion(); + int major = ver / 1000; + ver = ver - major * 1000; + int minor = ver / 100; + int patch = ver - minor * 100; + return std::tuple{major, minor, patch}; +#else + return std::tuple{0, 0, 0}; +#endif +} + +} // namespace tensorrt +} // namespace tensorflow diff --git a/tensorflow/compiler/tf2tensorrt/common/utils.h b/tensorflow/compiler/tf2tensorrt/common/utils.h index b428733ecd4..51a21a93ca4 100644 --- a/tensorflow/compiler/tf2tensorrt/common/utils.h +++ b/tensorflow/compiler/tf2tensorrt/common/utils.h @@ -16,6 +16,20 @@ limitations under the License. #ifndef TENSORFLOW_COMPILER_TF2TENSORRT_COMMON_UTILS_H_ #define TENSORFLOW_COMPILER_TF2TENSORRT_COMMON_UTILS_H_ +#include + +namespace tensorflow { +namespace tensorrt { +// Returns the compile time TensorRT library version information +// {Maj, Min, Patch}. +std::tuple GetLinkedTensorRTVersion(); + +// Returns the runtime time TensorRT library version information +// {Maj, Min, Patch}. +std::tuple GetLoadedTensorRTVersion(); +} // namespace tensorrt +} // namespace tensorflow + #if GOOGLE_CUDA && GOOGLE_TENSORRT #include "tensorflow/core/platform/logging.h" diff --git a/tensorflow/compiler/tf2tensorrt/convert/convert_nodes.cc b/tensorflow/compiler/tf2tensorrt/convert/convert_nodes.cc index f80c0f42eca..c51981aadab 100644 --- a/tensorflow/compiler/tf2tensorrt/convert/convert_nodes.cc +++ b/tensorflow/compiler/tf2tensorrt/convert/convert_nodes.cc @@ -1203,8 +1203,10 @@ static void InitializeTrtPlugins(nvinfer1::ILogger* trt_logger) { mutex_lock lock(plugin_mutex); if (plugin_initialized) return; - LOG(INFO) << "Linked TensorRT version: " << GetLinkedTensorRTVersion(); - LOG(INFO) << "Loaded TensorRT version: " << GetLoadedTensorRTVersion(); + LOG(INFO) << "Linked TensorRT version: " + << absl::StrJoin(GetLinkedTensorRTVersion(), "."); + LOG(INFO) << "Loaded TensorRT version: " + << absl::StrJoin(GetLoadedTensorRTVersion(), "."); plugin_initialized = initLibNvInferPlugins(trt_logger, ""); if (!plugin_initialized) { @@ -1434,7 +1436,8 @@ Status Converter::BuildCudaEngine( TF_RETURN_IF_ERROR( TrtPrecisionModeToName(precision_mode_, &precision_mode_str)); string trt_network_name = StrCat( - "TF:", TF_VERSION_STRING, ", ", "TRT:", GetLoadedTensorRTVersion(), "-", + "TF:", TF_VERSION_STRING, ", ", + "TRT:", absl::StrJoin(GetLoadedTensorRTVersion(), "."), "-", "Precision:", precision_mode_str, ", ", "Calibration:", use_calibration_, ", ", "Max-Batch-Size:", max_batch_size, ", ", "Max-Workspace-Size:", max_workspace_size_bytes); diff --git a/tensorflow/compiler/tf2tensorrt/convert/utils.cc b/tensorflow/compiler/tf2tensorrt/convert/utils.cc index a69960005fc..1fc0d13c993 100644 --- a/tensorflow/compiler/tf2tensorrt/convert/utils.cc +++ b/tensorflow/compiler/tf2tensorrt/convert/utils.cc @@ -241,36 +241,6 @@ int GetNumberOfEngineInputs(const nvinfer1::ICudaEngine* engine) { #endif -string GetLinkedTensorRTVersion() { - int major, minor, patch; -#if GOOGLE_CUDA && GOOGLE_TENSORRT - major = NV_TENSORRT_MAJOR; - minor = NV_TENSORRT_MINOR; - patch = NV_TENSORRT_PATCH; -#else - major = 0; - minor = 0; - patch = 0; -#endif - return absl::StrCat(major, ".", minor, ".", patch); -} - -string GetLoadedTensorRTVersion() { - int major, minor, patch; -#if GOOGLE_CUDA && GOOGLE_TENSORRT - int ver = getInferLibVersion(); - major = ver / 1000; - ver = ver - major * 1000; - minor = ver / 100; - patch = ver - minor * 100; -#else - major = 0; - minor = 0; - patch = 0; -#endif - return absl::StrCat(major, ".", minor, ".", patch); -} - absl::string_view GetDeviceName(const Node* node) { if (node->has_assigned_device_name()) { return node->assigned_device_name(); diff --git a/tensorflow/compiler/tf2tensorrt/convert/utils.h b/tensorflow/compiler/tf2tensorrt/convert/utils.h index a0505c3f922..7570dff1c9d 100644 --- a/tensorflow/compiler/tf2tensorrt/convert/utils.h +++ b/tensorflow/compiler/tf2tensorrt/convert/utils.h @@ -117,14 +117,6 @@ Status TrtDimsToTensorShape(const nvinfer1::Dims trt_dims, Status TfTypeToTrtType(DataType tf_type, nvinfer1::DataType* trt_type); Status TrtTypeToTfType(nvinfer1::DataType trt_type, DataType* tf_type); -// Returns a string that includes compile time TensorRT library version -// information {Maj, Min, Patch}. -string GetLinkedTensorRTVersion(); - -// Returns a string that includes runtime time TensorRT library version -// information {Maj, Min, Patch}. -string GetLoadedTensorRTVersion(); - // Returns true if an engine built for cached_shapes can also run actual_shapes. bool AreShapesCompatible(const std::vector& actual_shapes, const std::vector& cached_shapes); diff --git a/tensorflow/compiler/tf2tensorrt/utils/py_utils.cc b/tensorflow/compiler/tf2tensorrt/utils/py_utils.cc index a8e24aa8983..3f8a11f7410 100644 --- a/tensorflow/compiler/tf2tensorrt/utils/py_utils.cc +++ b/tensorflow/compiler/tf2tensorrt/utils/py_utils.cc @@ -41,31 +41,5 @@ bool IsGoogleTensorRTEnabled() { #endif } -void GetLinkedTensorRTVersion(int* major, int* minor, int* patch) { -#if GOOGLE_CUDA && GOOGLE_TENSORRT - *major = NV_TENSORRT_MAJOR; - *minor = NV_TENSORRT_MINOR; - *patch = NV_TENSORRT_PATCH; -#else - *major = 0; - *minor = 0; - *patch = 0; -#endif -} - -void GetLoadedTensorRTVersion(int* major, int* minor, int* patch) { -#if GOOGLE_CUDA && GOOGLE_TENSORRT - int ver = getInferLibVersion(); - *major = ver / 1000; - ver = ver - *major * 1000; - *minor = ver / 100; - *patch = ver - *minor * 100; -#else - *major = 0; - *minor = 0; - *patch = 0; -#endif -} - } // namespace tensorrt } // namespace tensorflow diff --git a/tensorflow/compiler/tf2tensorrt/utils/py_utils.h b/tensorflow/compiler/tf2tensorrt/utils/py_utils.h index f52bb6f1bad..9b24eb36cf9 100644 --- a/tensorflow/compiler/tf2tensorrt/utils/py_utils.h +++ b/tensorflow/compiler/tf2tensorrt/utils/py_utils.h @@ -21,12 +21,6 @@ namespace tensorrt { bool IsGoogleTensorRTEnabled(); -// Return compile time TensorRT library version information {Maj, Min, Patch}. -void GetLinkedTensorRTVersion(int* major, int* minor, int* patch); - -// Return runtime time TensorRT library version information {Maj, Min, Patch}. -void GetLoadedTensorRTVersion(int* major, int* minor, int* patch); - } // namespace tensorrt } // namespace tensorflow diff --git a/tensorflow/compiler/tf2tensorrt/utils/py_utils_wrapper.cc b/tensorflow/compiler/tf2tensorrt/utils/py_utils_wrapper.cc index 03f77c6bd5f..52252f125ac 100644 --- a/tensorflow/compiler/tf2tensorrt/utils/py_utils_wrapper.cc +++ b/tensorflow/compiler/tf2tensorrt/utils/py_utils_wrapper.cc @@ -16,18 +16,15 @@ limitations under the License. #include #include "pybind11/pybind11.h" +#include "tensorflow/compiler/tf2tensorrt/common/utils.h" #include "tensorflow/compiler/tf2tensorrt/utils/py_utils.h" std::tuple get_linked_tensorrt_version() { - int major, minor, patch; - tensorflow::tensorrt::GetLinkedTensorRTVersion(&major, &minor, &patch); - return std::tuple{major, minor, patch}; + return tensorflow::tensorrt::GetLinkedTensorRTVersion(); } std::tuple get_loaded_tensorrt_version() { - int major, minor, patch; - tensorflow::tensorrt::GetLoadedTensorRTVersion(&major, &minor, &patch); - return std::tuple{major, minor, patch}; + return tensorflow::tensorrt::GetLoadedTensorRTVersion(); } PYBIND11_MODULE(_pywrap_py_utils, m) {