[TF:TRT] Cosmetic fix.

Rewrite two lines of #if into #if GOOGLE_CUDA && GOOGLE_TENSORRT.

PiperOrigin-RevId: 317386436
Change-Id: Icc8ae27a17900b6f0a198d32c6d73345084eab50
This commit is contained in:
Bixia Zheng 2020-06-19 14:59:47 -07:00 committed by TensorFlower Gardener
parent 8e88146931
commit f129485019
39 changed files with 83 additions and 162 deletions

View File

@ -16,8 +16,7 @@ limitations under the License.
#ifndef TENSORFLOW_COMPILER_TF2TENSORRT_COMMON_UTILS_H_ #ifndef TENSORFLOW_COMPILER_TF2TENSORRT_COMMON_UTILS_H_
#define TENSORFLOW_COMPILER_TF2TENSORRT_COMMON_UTILS_H_ #define TENSORFLOW_COMPILER_TF2TENSORRT_COMMON_UTILS_H_
#if GOOGLE_CUDA #if GOOGLE_CUDA && GOOGLE_TENSORRT
#if GOOGLE_TENSORRT
#include "tensorflow/core/platform/logging.h" #include "tensorflow/core/platform/logging.h"
@ -29,7 +28,6 @@ namespace tensorrt {
} // namespace tensorrt } // namespace tensorrt
} // namespace tensorflow } // namespace tensorflow
#endif #endif // GOOGLE_CUDA && GOOGLE_TENSORRT
#endif
#endif // TENSORFLOW_COMPILER_TF2TENSORRT_COMMON_UTILS_H_ #endif // TENSORFLOW_COMPILER_TF2TENSORRT_COMMON_UTILS_H_

View File

@ -53,8 +53,7 @@ limitations under the License.
#include "tensorflow/core/util/device_name_utils.h" #include "tensorflow/core/util/device_name_utils.h"
#include "tensorflow/tools/graph_transforms/transform_utils.h" #include "tensorflow/tools/graph_transforms/transform_utils.h"
#if GOOGLE_CUDA #if GOOGLE_CUDA && GOOGLE_TENSORRT
#if GOOGLE_TENSORRT
#include "third_party/gpus/cuda/include/cuda_runtime_api.h" #include "third_party/gpus/cuda/include/cuda_runtime_api.h"
#include "third_party/tensorrt/NvInfer.h" #include "third_party/tensorrt/NvInfer.h"
namespace tensorflow { namespace tensorflow {
@ -884,5 +883,4 @@ Status ConvertAfterShapes(const ConversionParams& params) {
} // namespace tensorrt } // namespace tensorrt
} // namespace tensorflow } // namespace tensorflow
#endif // GOOGLE_TENSORRT #endif // GOOGLE_CUDA && GOOGLE_TENSORRT
#endif // GOOGLE_CUDA

View File

@ -24,8 +24,7 @@ limitations under the License.
#include "tensorflow/core/lib/core/status.h" #include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/platform/types.h" #include "tensorflow/core/platform/types.h"
#if GOOGLE_CUDA #if GOOGLE_CUDA && GOOGLE_TENSORRT
#if GOOGLE_TENSORRT
namespace tensorflow { namespace tensorflow {
namespace tensorrt { namespace tensorrt {
@ -66,7 +65,6 @@ Status RegisterGraphToFunctionLibrary(const GraphDef& segment_graph_def,
} // namespace tensorrt } // namespace tensorrt
} // namespace tensorflow } // namespace tensorflow
#endif // GOOGLE_TENSORRT #endif // GOOGLE_CUDA && GOOGLE_TENSORRT
#endif // GOOGLE_CUDA
#endif // TENSORFLOW_COMPILER_TF2TENSORRT_CONVERT_CONVERT_GRAPH_H_ #endif // TENSORFLOW_COMPILER_TF2TENSORRT_CONVERT_CONVERT_GRAPH_H_

View File

@ -34,8 +34,7 @@ limitations under the License.
#include "tensorflow/core/protobuf/config.pb.h" // NOLINT #include "tensorflow/core/protobuf/config.pb.h" // NOLINT
#include "tensorflow/core/public/session.h" #include "tensorflow/core/public/session.h"
#if GOOGLE_CUDA #if GOOGLE_CUDA && GOOGLE_TENSORRT
#if GOOGLE_TENSORRT
namespace tensorflow { namespace tensorflow {
namespace tensorrt { namespace tensorrt {
@ -231,5 +230,4 @@ TEST_F(ConvertAfterShapesTest, DirectlyConnectedEngines) {
} // namespace tensorrt } // namespace tensorrt
} // namespace tensorflow } // namespace tensorflow
#endif // GOOGLE_TENSORRT #endif // GOOGLE_CUDA && GOOGLE_TENSORRT
#endif // GOOGLE_CUDA

View File

@ -59,8 +59,7 @@ limitations under the License.
#include "tensorflow/core/util/env_var.h" #include "tensorflow/core/util/env_var.h"
#include "tensorflow/core/util/strided_slice_op.h" #include "tensorflow/core/util/strided_slice_op.h"
#if GOOGLE_CUDA #if GOOGLE_CUDA && GOOGLE_TENSORRT
#if GOOGLE_TENSORRT
#include "third_party/tensorrt/NvInfer.h" #include "third_party/tensorrt/NvInfer.h"
#include "third_party/tensorrt/NvInferPlugin.h" #include "third_party/tensorrt/NvInferPlugin.h"
@ -6258,5 +6257,4 @@ bool OutputEdgeValidator::operator()(const Edge* out_edge) const {
} // namespace tensorrt } // namespace tensorrt
} // namespace tensorflow } // namespace tensorflow
#endif // GOOGLE_TENSORRT #endif // GOOGLE_CUDA && GOOGLE_TENSORRT
#endif // GOOGLE_CUDA

View File

@ -33,8 +33,7 @@ limitations under the License.
#include "tensorflow/core/lib/core/status.h" #include "tensorflow/core/lib/core/status.h"
#include "tensorflow/stream_executor/lib/statusor.h" #include "tensorflow/stream_executor/lib/statusor.h"
#if GOOGLE_CUDA #if GOOGLE_CUDA && GOOGLE_TENSORRT
#if GOOGLE_TENSORRT
#include "third_party/tensorrt/NvInfer.h" #include "third_party/tensorrt/NvInfer.h"
namespace tensorflow { namespace tensorflow {
@ -694,7 +693,6 @@ BinaryOperationMap();
} // namespace tensorrt } // namespace tensorrt
} // namespace tensorflow } // namespace tensorflow
#endif // GOOGLE_TENSORRT #endif // GOOGLE_CUDA && GOOGLE_TENSORRT
#endif // GOOGLE_CUDA
#endif // TENSORFLOW_COMPILER_TF2TENSORRT_CONVERT_CONVERT_NODES_H_ #endif // TENSORFLOW_COMPILER_TF2TENSORRT_CONVERT_CONVERT_NODES_H_

View File

@ -21,8 +21,7 @@ limitations under the License.
#include <unordered_map> #include <unordered_map>
#include <vector> #include <vector>
#if GOOGLE_CUDA #if GOOGLE_CUDA && GOOGLE_TENSORRT
#if GOOGLE_TENSORRT
#include <gmock/gmock.h> #include <gmock/gmock.h>
#include <gtest/gtest.h> #include <gtest/gtest.h>
@ -6636,5 +6635,4 @@ TEST_F(OpConverterTest, ConvertPad) {
} // namespace tensorrt } // namespace tensorrt
} // namespace tensorflow } // namespace tensorflow
#endif // GOOGLE_TENSORRT #endif // GOOGLE_CUDA && GOOGLE_TENSORRT
#endif // GOOGLE_CUDA

View File

@ -12,8 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. limitations under the License.
==============================================================================*/ ==============================================================================*/
#if GOOGLE_CUDA #if GOOGLE_CUDA && GOOGLE_TENSORRT
#if GOOGLE_TENSORRT
#include "tensorflow/compiler/tf2tensorrt/convert/logger_registry.h" #include "tensorflow/compiler/tf2tensorrt/convert/logger_registry.h"
@ -58,5 +57,4 @@ LoggerRegistry* GetLoggerRegistry() {
} // namespace tensorrt } // namespace tensorrt
} // namespace tensorflow } // namespace tensorflow
#endif // GOOGLE_TENSORRT #endif // GOOGLE_CUDA && GOOGLE_TENSORRT
#endif // GOOGLE_CUDA

View File

@ -19,7 +19,8 @@ limitations under the License.
#include "tensorflow/core/platform/macros.h" #include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/platform/types.h" #include "tensorflow/core/platform/types.h"
#if GOOGLE_CUDA #if GOOGLE_CUDA && GOOGLE_TENSORRT
#include "third_party/tensorrt/NvInfer.h" #include "third_party/tensorrt/NvInfer.h"
namespace tensorflow { namespace tensorflow {
@ -53,5 +54,5 @@ class RegisterLogger {
} // namespace tensorrt } // namespace tensorrt
} // namespace tensorflow } // namespace tensorflow
#endif // GOOGLE_CUDA #endif // GOOGLE_CUDA && GOOGLE_TENSORRT
#endif // TENSORFLOW_COMPILER_TF2TENSORRT_CONVERT_LOGGER_REGISTRY_H_ #endif // TENSORFLOW_COMPILER_TF2TENSORRT_CONVERT_LOGGER_REGISTRY_H_

View File

@ -28,8 +28,7 @@ limitations under the License.
#include "tensorflow/core/platform/logging.h" #include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/stacktrace.h" #include "tensorflow/core/platform/stacktrace.h"
#if GOOGLE_CUDA #if GOOGLE_CUDA && GOOGLE_TENSORRT
#if GOOGLE_TENSORRT
namespace tensorflow { namespace tensorflow {
namespace tensorrt { namespace tensorrt {
namespace convert { namespace convert {
@ -302,5 +301,4 @@ static VerboseCustomGraphOptimizerRegistrar TRTOptimizationPass_Registrar(
} // namespace tensorrt } // namespace tensorrt
} // namespace tensorflow } // namespace tensorflow
#endif #endif // GOOGLE_CUDA && GOOGLE_TENSORRT
#endif

View File

@ -23,8 +23,7 @@ limitations under the License.
#include "tensorflow/core/grappler/optimizers/custom_graph_optimizer.h" #include "tensorflow/core/grappler/optimizers/custom_graph_optimizer.h"
#include "tensorflow/core/platform/logging.h" #include "tensorflow/core/platform/logging.h"
#if GOOGLE_CUDA #if GOOGLE_CUDA && GOOGLE_TENSORRT
#if GOOGLE_TENSORRT
namespace tensorflow { namespace tensorflow {
namespace tensorrt { namespace tensorrt {
@ -83,6 +82,5 @@ class TRTOptimizationPass : public grappler::CustomGraphOptimizer {
} // namespace tensorrt } // namespace tensorrt
} // namespace tensorflow } // namespace tensorflow
#endif // GOOGLE_CUDA #endif // GOOGLE_CUDA && GOOGLE_TENSORRT
#endif // GOOGLE_TENSORRT
#endif // TENSORFLOW_COMPILER_TF2TENSORRT_CONVERT_TRT_OPTIMIZATION_PASS_H_ #endif // TENSORFLOW_COMPILER_TF2TENSORRT_CONVERT_TRT_OPTIMIZATION_PASS_H_

View File

@ -22,8 +22,7 @@ limitations under the License.
#include "tensorflow/core/framework/resource_mgr.h" #include "tensorflow/core/framework/resource_mgr.h"
#include "tensorflow/core/lib/core/refcount.h" #include "tensorflow/core/lib/core/refcount.h"
#if GOOGLE_CUDA #if GOOGLE_CUDA && GOOGLE_TENSORRT
#if GOOGLE_TENSORRT
namespace tensorflow { namespace tensorflow {
namespace tensorrt { namespace tensorrt {
@ -67,5 +66,4 @@ REGISTER_KERNEL_BUILDER(Name("GetCalibrationDataOp").Device(DEVICE_GPU),
} // namespace tensorrt } // namespace tensorrt
} // namespace tensorflow } // namespace tensorflow
#endif // GOOGLE_TENSORRT #endif // GOOGLE_CUDA && GOOGLE_TENSORRT
#endif // GOOGLE_CUDA

View File

@ -48,8 +48,7 @@ limitations under the License.
#include "tensorflow/core/util/env_var.h" #include "tensorflow/core/util/env_var.h"
#include "tensorflow/stream_executor/lib/statusor.h" #include "tensorflow/stream_executor/lib/statusor.h"
#if GOOGLE_CUDA #if GOOGLE_CUDA && GOOGLE_TENSORRT
#if GOOGLE_TENSORRT
#include "third_party/gpus/cuda/include/cuda_runtime_api.h" #include "third_party/gpus/cuda/include/cuda_runtime_api.h"
#include "third_party/tensorrt/NvInfer.h" #include "third_party/tensorrt/NvInfer.h"
@ -1009,5 +1008,4 @@ REGISTER_KERNEL_BUILDER(Name("TRTEngineOp").Device(DEVICE_GPU), TRTEngineOp);
} // namespace tensorrt } // namespace tensorrt
} // namespace tensorflow } // namespace tensorflow
#endif // GOOGLE_TENSORRT #endif // GOOGLE_CUDA && GOOGLE_TENSORRT
#endif // GOOGLE_CUDA

View File

@ -50,8 +50,7 @@ limitations under the License.
#include "tensorflow/core/platform/status.h" #include "tensorflow/core/platform/status.h"
#include "tensorflow/core/public/version.h" #include "tensorflow/core/public/version.h"
#if GOOGLE_CUDA #if GOOGLE_CUDA && GOOGLE_TENSORRT
#if GOOGLE_TENSORRT
namespace tensorflow { namespace tensorflow {
namespace tensorrt { namespace tensorrt {
@ -306,5 +305,4 @@ TYPED_TEST(TRTEngineOpTest, Basic) {
} // namespace tensorrt } // namespace tensorrt
} // namespace tensorflow } // namespace tensorflow
#endif // GOOGLE_TENSORRT #endif // GOOGLE_CUDA && GOOGLE_TENSORRT
#endif // GOOGLE_CUDA

View File

@ -33,8 +33,7 @@ limitations under the License.
#include "tensorflow/core/platform/mutex.h" #include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/thread_annotations.h" #include "tensorflow/core/platform/thread_annotations.h"
#if GOOGLE_CUDA #if GOOGLE_CUDA && GOOGLE_TENSORRT
#if GOOGLE_TENSORRT
#include "third_party/tensorrt/NvInfer.h" #include "third_party/tensorrt/NvInfer.h"
namespace tensorflow { namespace tensorflow {
@ -251,5 +250,4 @@ REGISTER_KERNEL_BUILDER(Name("SerializeTRTResource").Device(DEVICE_GPU),
} // namespace tensorrt } // namespace tensorrt
} // namespace tensorflow } // namespace tensorflow
#endif // GOOGLE_TENSORRT #endif // GOOGLE_CUDA && GOOGLE_TENSORRT
#endif // GOOGLE_CUDA

View File

@ -48,8 +48,7 @@ limitations under the License.
#include "tensorflow/core/platform/tstring.h" #include "tensorflow/core/platform/tstring.h"
#include "tensorflow/core/platform/types.h" #include "tensorflow/core/platform/types.h"
#if GOOGLE_CUDA #if GOOGLE_CUDA && GOOGLE_TENSORRT
#if GOOGLE_TENSORRT
namespace tensorflow { namespace tensorflow {
namespace tensorrt { namespace tensorrt {
@ -246,5 +245,4 @@ TEST_F(TRTEngineResourceOpsTest, Basic) {
} // namespace tensorrt } // namespace tensorrt
} // namespace tensorflow } // namespace tensorflow
#endif // GOOGLE_TENSORRT #endif // GOOGLE_CUDA && GOOGLE_TENSORRT
#endif // GOOGLE_CUDA

View File

@ -13,8 +13,7 @@ See the License for the specific language governing permissions and
limitations under the License. limitations under the License.
==============================================================================*/ ==============================================================================*/
#if GOOGLE_CUDA #if GOOGLE_CUDA && GOOGLE_TENSORRT
#if GOOGLE_TENSORRT
#include "tensorflow/core/framework/common_shape_fns.h" #include "tensorflow/core/framework/common_shape_fns.h"
#include "tensorflow/core/framework/op.h" #include "tensorflow/core/framework/op.h"
@ -34,5 +33,4 @@ Returns calibration data for the given resource name
} // namespace tensorflow } // namespace tensorflow
#endif // GOOGLE_TENSORRT #endif // GOOGLE_CUDA && GOOGLE_TENSORRT
#endif // GOOGLE_CUDA

View File

@ -13,8 +13,7 @@ See the License for the specific language governing permissions and
limitations under the License. limitations under the License.
==============================================================================*/ ==============================================================================*/
#if GOOGLE_CUDA #if GOOGLE_CUDA && GOOGLE_TENSORRT
#if GOOGLE_TENSORRT
#include "tensorflow/core/framework/common_shape_fns.h" #include "tensorflow/core/framework/common_shape_fns.h"
#include "tensorflow/core/framework/op.h" #include "tensorflow/core/framework/op.h"
@ -59,5 +58,4 @@ REGISTER_OP("TRTEngineOp")
.Attr("static_engine: bool = true"); .Attr("static_engine: bool = true");
} // namespace tensorflow } // namespace tensorflow
#endif // GOOGLE_TENSORRT #endif // GOOGLE_CUDA && GOOGLE_TENSORRT
#endif // GOOGLE_CUDA

View File

@ -13,8 +13,7 @@ See the License for the specific language governing permissions and
limitations under the License. limitations under the License.
==============================================================================*/ ==============================================================================*/
#if GOOGLE_CUDA #if GOOGLE_CUDA && GOOGLE_TENSORRT
#if GOOGLE_TENSORRT
#include "tensorflow/core/framework/common_shape_fns.h" #include "tensorflow/core/framework/common_shape_fns.h"
#include "tensorflow/core/framework/op.h" #include "tensorflow/core/framework/op.h"
@ -46,5 +45,4 @@ REGISTER_OP("SerializeTRTResource")
} // namespace tensorflow } // namespace tensorflow
#endif // GOOGLE_TENSORRT #endif // GOOGLE_CUDA && GOOGLE_TENSORRT
#endif // GOOGLE_CUDA

View File

@ -17,8 +17,7 @@ limitations under the License.
#include "tensorflow/compiler/tf2tensorrt/plugin/trt_plugin.h" #include "tensorflow/compiler/tf2tensorrt/plugin/trt_plugin.h"
#include "tensorflow/core/platform/logging.h" #include "tensorflow/core/platform/logging.h"
#if GOOGLE_CUDA #if GOOGLE_CUDA && GOOGLE_TENSORRT
#if GOOGLE_TENSORRT
#define EIGEN_USE_GPU // For definition of Eigen::GpuDevice. #define EIGEN_USE_GPU // For definition of Eigen::GpuDevice.
#include "third_party/gpus/cuda/include/cuda_runtime_api.h" #include "third_party/gpus/cuda/include/cuda_runtime_api.h"
#include "tensorflow/core/util/gpu_kernel_helper.h" #include "tensorflow/core/util/gpu_kernel_helper.h"
@ -234,5 +233,4 @@ REGISTER_TFTRT_PLUGIN(CastPluginCreator);
} // namespace tensorrt } // namespace tensorrt
} // namespace tensorflow } // namespace tensorflow
#endif // GOOGLE_CUDA #endif // GOOGLE_CUDA && GOOGLE_TENSORRT
#endif // GOOGLE_TENSORRT

View File

@ -17,8 +17,7 @@ limitations under the License.
#include <cstring> #include <cstring>
#if GOOGLE_CUDA #if GOOGLE_CUDA && GOOGLE_TENSORRT
#if GOOGLE_TENSORRT
namespace tensorflow { namespace tensorflow {
namespace tensorrt { namespace tensorrt {
@ -30,5 +29,4 @@ const char* kTfTrtPluginNamespace = "TF";
} // namespace tensorrt } // namespace tensorrt
} // namespace tensorflow } // namespace tensorflow
#endif // GOOGLE_CUDA #endif // GOOGLE_CUDA && GOOGLE_TENSORRT
#endif // GOOGLE_TENSORRT

View File

@ -20,8 +20,7 @@ limitations under the License.
#include "tensorflow/core/platform/logging.h" #include "tensorflow/core/platform/logging.h"
#if GOOGLE_CUDA #if GOOGLE_CUDA && GOOGLE_TENSORRT
#if GOOGLE_TENSORRT
#include "third_party/tensorrt/NvInfer.h" #include "third_party/tensorrt/NvInfer.h"
namespace tensorflow { namespace tensorflow {
@ -90,7 +89,6 @@ class TrtPluginRegistrar {
} // namespace tensorrt } // namespace tensorrt
} // namespace tensorflow } // namespace tensorflow
#endif // GOOGLE_TENSORRT #endif // GOOGLE_CUDA && GOOGLE_TENSORRT
#endif // GOOGLE_CUDA
#endif // TENSORFLOW_COMPILER_TF2TENSORRT_PLUGIN_TRT_PLUGIN_H_ #endif // TENSORFLOW_COMPILER_TF2TENSORRT_PLUGIN_TRT_PLUGIN_H_

View File

@ -35,8 +35,7 @@ limitations under the License.
#include "tensorflow/core/platform/types.h" #include "tensorflow/core/platform/types.h"
#include "tensorflow/core/util/env_var.h" #include "tensorflow/core/util/env_var.h"
#if GOOGLE_CUDA #if GOOGLE_CUDA && GOOGLE_TENSORRT
#if GOOGLE_TENSORRT
namespace tensorflow { namespace tensorflow {
namespace tensorrt { namespace tensorrt {
@ -1062,5 +1061,4 @@ Status SegmentGraph(const Graph* tf_graph,
} // namespace tensorrt } // namespace tensorrt
} // namespace tensorflow } // namespace tensorflow
#endif // GOOGLE_TENSORRT #endif // GOOGLE_CUDA && GOOGLE_TENSORRT
#endif // GOOGLE_CUDA

View File

@ -25,8 +25,7 @@ limitations under the License.
#include "tensorflow/core/lib/core/status.h" #include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/platform/types.h" #include "tensorflow/core/platform/types.h"
#if GOOGLE_CUDA #if GOOGLE_CUDA && GOOGLE_TENSORRT
#if GOOGLE_TENSORRT
namespace tensorflow { namespace tensorflow {
namespace tensorrt { namespace tensorrt {
@ -67,7 +66,6 @@ Status SegmentGraph(const Graph* tf_graph,
} // namespace tensorrt } // namespace tensorrt
} // namespace tensorflow } // namespace tensorflow
#endif // GOOGLE_TENSORRT #endif // GOOGLE_CUDA && GOOGLE_TENSORRT
#endif // GOOGLE_CUDA
#endif // TENSORFLOW_COMPILER_TF2TENSORRT_SEGMENT_SEGMENT_H_ #endif // TENSORFLOW_COMPILER_TF2TENSORRT_SEGMENT_SEGMENT_H_

View File

@ -26,8 +26,7 @@ limitations under the License.
#include "tensorflow/core/platform/types.h" #include "tensorflow/core/platform/types.h"
#include "tensorflow/core/public/session.h" #include "tensorflow/core/public/session.h"
#if GOOGLE_CUDA #if GOOGLE_CUDA && GOOGLE_TENSORRT
#if GOOGLE_TENSORRT
namespace tensorflow { namespace tensorflow {
namespace tensorrt { namespace tensorrt {
@ -522,5 +521,4 @@ TEST_F(SegmentTest, IncompatibleBatchSizes) {
} // namespace tensorrt } // namespace tensorrt
} // namespace tensorflow } // namespace tensorflow
#endif // GOOGLE_TENSORRT #endif // GOOGLE_CUDA && GOOGLE_TENSORRT
#endif // GOOGLE_CUDA

View File

@ -19,8 +19,7 @@ limitations under the License.
#include "absl/strings/str_format.h" #include "absl/strings/str_format.h"
#include "absl/types/optional.h" #include "absl/types/optional.h"
#if GOOGLE_CUDA #if GOOGLE_CUDA && GOOGLE_TENSORRT
#if GOOGLE_TENSORRT
namespace tensorflow { namespace tensorflow {
namespace tensorrt { namespace tensorrt {
@ -217,7 +216,6 @@ UnionFind<T>* UnionFind<T>::FindRoot() {
} // namespace tensorrt } // namespace tensorrt
} // namespace tensorflow } // namespace tensorflow
#endif // GOOGLE_TENSORRT #endif // GOOGLE_CUDA && GOOGLE_TENSORRT
#endif // GOOGLE_CUDA
#endif // TENSORFLOW_COMPILER_TF2TENSORRT_SEGMENT_UNION_FIND_H_ #endif // TENSORFLOW_COMPILER_TF2TENSORRT_SEGMENT_UNION_FIND_H_

View File

@ -18,8 +18,7 @@ limitations under the License.
#include "tensorflow/core/platform/stream_executor.h" #include "tensorflow/core/platform/stream_executor.h"
#include "tensorflow/core/platform/test.h" #include "tensorflow/core/platform/test.h"
#if GOOGLE_CUDA #if GOOGLE_CUDA && GOOGLE_TENSORRT
#if GOOGLE_TENSORRT
#include "third_party/gpus/cuda/include/cuda.h" #include "third_party/gpus/cuda/include/cuda.h"
#include "third_party/gpus/cuda/include/cuda_runtime_api.h" #include "third_party/gpus/cuda/include/cuda_runtime_api.h"
#include "third_party/tensorrt/NvInfer.h" #include "third_party/tensorrt/NvInfer.h"
@ -164,5 +163,4 @@ TEST(TensorrtTest, BasicFunctions) {
} // namespace } // namespace
} // namespace tensorflow } // namespace tensorflow
#endif // GOOGLE_TENSORRT #endif // GOOGLE_CUDA && GOOGLE_TENSORRT
#endif // GOOGLE_CUDA

View File

@ -17,11 +17,9 @@ limitations under the License.
#include "tensorflow/core/platform/logging.h" #include "tensorflow/core/platform/logging.h"
#if GOOGLE_CUDA #if GOOGLE_CUDA && GOOGLE_TENSORRT
#if GOOGLE_TENSORRT
#include "third_party/gpus/cuda/include/cuda_runtime_api.h" #include "third_party/gpus/cuda/include/cuda_runtime_api.h"
#endif // GOOGLE_TENSORRT #endif // GOOGLE_CUDA && GOOGLE_TENSORRT
#endif // GOOGLE_CUDA
namespace tensorflow { namespace tensorflow {
namespace tensorrt { namespace tensorrt {
@ -52,8 +50,7 @@ void* Align(uint64_t alignment, uint64_t size, void*& ptr, uint64_t& space) {
} // namespace tensorrt } // namespace tensorrt
} // namespace tensorflow } // namespace tensorflow
#if GOOGLE_CUDA #if GOOGLE_CUDA && GOOGLE_TENSORRT
#if GOOGLE_TENSORRT
namespace tensorflow { namespace tensorflow {
namespace tensorrt { namespace tensorrt {
@ -113,5 +110,4 @@ void TRTDeviceAllocator::free(void* memory) {
} // namespace tensorrt } // namespace tensorrt
} // namespace tensorflow } // namespace tensorflow
#endif // GOOGLE_TENSORRT #endif // GOOGLE_CUDA && GOOGLE_TENSORRT
#endif // GOOGLE_CUDA

View File

@ -20,11 +20,9 @@ limitations under the License.
#include "tensorflow/core/framework/allocator.h" #include "tensorflow/core/framework/allocator.h"
#if GOOGLE_CUDA #if GOOGLE_CUDA && GOOGLE_TENSORRT
#if GOOGLE_TENSORRT
#include "third_party/tensorrt/NvInfer.h" #include "third_party/tensorrt/NvInfer.h"
#endif // GOOGLE_TENSORRT #endif // GOOGLE_CUDA && GOOGLE_TENSORRT
#endif // GOOGLE_CUDA
namespace tensorflow { namespace tensorflow {
namespace tensorrt { namespace tensorrt {
@ -33,8 +31,7 @@ void* Align(uint64_t alignment, uint64_t size, void*& ptr, uint64_t& space);
} // namespace tensorrt } // namespace tensorrt
} // namespace tensorflow } // namespace tensorflow
#if GOOGLE_CUDA #if GOOGLE_CUDA && GOOGLE_TENSORRT
#if GOOGLE_TENSORRT
namespace tensorflow { namespace tensorflow {
namespace tensorrt { namespace tensorrt {
@ -69,6 +66,5 @@ class TRTDeviceAllocator : public TRTBaseAllocator {
} // namespace tensorrt } // namespace tensorrt
} // namespace tensorflow } // namespace tensorflow
#endif // GOOGLE_TENSORRT #endif // GOOGLE_CUDA && GOOGLE_TENSORRT
#endif // GOOGLE_CUDA
#endif // TENSORFLOW_COMPILER_TF2TENSORRT_UTILS_TRT_ALLOCATOR_H_ #endif // TENSORFLOW_COMPILER_TF2TENSORRT_UTILS_TRT_ALLOCATOR_H_

View File

@ -25,8 +25,7 @@ limitations under the License.
#include "tensorflow/core/lib/core/status.h" #include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/platform/errors.h" #include "tensorflow/core/platform/errors.h"
#if GOOGLE_CUDA #if GOOGLE_CUDA && GOOGLE_TENSORRT
#if GOOGLE_TENSORRT
#include "third_party/tensorrt/NvInfer.h" #include "third_party/tensorrt/NvInfer.h"
namespace tensorflow { namespace tensorflow {
@ -257,5 +256,4 @@ Status TrtEnqueue(nvinfer1::IExecutionContext* execution_context,
} // namespace tensorrt } // namespace tensorrt
} // namespace tensorflow } // namespace tensorflow
#endif // GOOGLE_TENSORRT #endif // GOOGLE_CUDA && GOOGLE_TENSORRT
#endif // GOOGLE_CUDA

View File

@ -24,8 +24,7 @@ limitations under the License.
#include "tensorflow/core/framework/tensor_shape.h" #include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/lib/core/status.h" #include "tensorflow/core/lib/core/status.h"
#if GOOGLE_CUDA #if GOOGLE_CUDA && GOOGLE_TENSORRT
#if GOOGLE_TENSORRT
#include "third_party/tensorrt/NvInfer.h" #include "third_party/tensorrt/NvInfer.h"
namespace tensorflow { namespace tensorflow {
@ -91,7 +90,6 @@ Status TrtEnqueue(nvinfer1::IExecutionContext* execution_context,
} // namespace tensorrt } // namespace tensorrt
} // namespace tensorflow } // namespace tensorflow
#endif // GOOGLE_TENSORRT #endif // GOOGLE_CUDA && GOOGLE_TENSORRT
#endif // GOOGLE_CUDA
#endif // TENSORFLOW_COMPILER_TF2TENSORRT_UTILS_TRT_ENGINE_UTILS_H_ #endif // TENSORFLOW_COMPILER_TF2TENSORRT_UTILS_TRT_ENGINE_UTILS_H_

View File

@ -20,8 +20,7 @@ limitations under the License.
#include "tensorflow/core/platform/logging.h" #include "tensorflow/core/platform/logging.h"
#if GOOGLE_CUDA #if GOOGLE_CUDA && GOOGLE_TENSORRT
#if GOOGLE_TENSORRT
#include "third_party/gpus/cuda/include/cuda_runtime_api.h" #include "third_party/gpus/cuda/include/cuda_runtime_api.h"
namespace tensorflow { namespace tensorflow {
@ -147,5 +146,4 @@ TRTInt8Calibrator::~TRTInt8Calibrator() {
} // namespace tensorrt } // namespace tensorrt
} // namespace tensorflow } // namespace tensorflow
#endif #endif // GOOGLE_CUDA && GOOGLE_TENSORRT
#endif

View File

@ -22,8 +22,7 @@ limitations under the License.
#include <utility> #include <utility>
#include "tensorflow/core/platform/mutex.h" #include "tensorflow/core/platform/mutex.h"
#if GOOGLE_CUDA #if GOOGLE_CUDA && GOOGLE_TENSORRT
#if GOOGLE_TENSORRT
#include "third_party/gpus/cuda/include/cuda_runtime_api.h" #include "third_party/gpus/cuda/include/cuda_runtime_api.h"
#include "third_party/tensorrt/NvInfer.h" #include "third_party/tensorrt/NvInfer.h"
@ -101,6 +100,5 @@ struct TRTInt8Calibrator : public nvinfer1::IInt8EntropyCalibrator {
} // namespace tensorrt } // namespace tensorrt
} // namespace tensorflow } // namespace tensorflow
#endif #endif // GOOGLE_CUDA && GOOGLE_TENSORRT
#endif
#endif // TENSORFLOW_COMPILER_TF2TENSORRT_UTILS_TRT_INT8_CALIBRATOR_H_ #endif // TENSORFLOW_COMPILER_TF2TENSORRT_UTILS_TRT_INT8_CALIBRATOR_H_

View File

@ -15,8 +15,7 @@ limitations under the License.
#include "tensorflow/compiler/tf2tensorrt/utils/trt_logger.h" #include "tensorflow/compiler/tf2tensorrt/utils/trt_logger.h"
#if GOOGLE_CUDA #if GOOGLE_CUDA && GOOGLE_TENSORRT
#if GOOGLE_TENSORRT
#include "tensorflow/compiler/tf2tensorrt/common/utils.h" #include "tensorflow/compiler/tf2tensorrt/common/utils.h"
#include "tensorflow/compiler/tf2tensorrt/convert/logger_registry.h" #include "tensorflow/compiler/tf2tensorrt/convert/logger_registry.h"
#include "tensorflow/core/platform/logging.h" #include "tensorflow/core/platform/logging.h"
@ -68,5 +67,4 @@ REGISTER_TENSORRT_LOGGER("DefaultLogger", Logger::GetLogger());
} // namespace tensorrt } // namespace tensorrt
} // namespace tensorflow } // namespace tensorflow
#endif // GOOGLE_CUDA #endif // GOOGLE_CUDA && GOOGLE_TENSORRT
#endif // GOOGLE_TENSORRT

View File

@ -18,8 +18,7 @@ limitations under the License.
#include "tensorflow/core/platform/types.h" #include "tensorflow/core/platform/types.h"
#if GOOGLE_CUDA #if GOOGLE_CUDA && GOOGLE_TENSORRT
#if GOOGLE_TENSORRT
#include "third_party/tensorrt/NvInfer.h" #include "third_party/tensorrt/NvInfer.h"
namespace tensorflow { namespace tensorflow {
@ -40,7 +39,6 @@ class Logger : public nvinfer1::ILogger {
} // namespace tensorrt } // namespace tensorrt
} // namespace tensorflow } // namespace tensorflow
#endif // GOOGLE_TENSORRT #endif // GOOGLE_CUDA && GOOGLE_TENSORRT
#endif // GOOGLE_CUDA
#endif // TENSORFLOW_COMPILER_TF2TENSORRT_UTILS_TRT_LOGGER_H_ #endif // TENSORFLOW_COMPILER_TF2TENSORRT_UTILS_TRT_LOGGER_H_

View File

@ -23,8 +23,7 @@ limitations under the License.
#include "tensorflow/core/framework/tensor_shape.h" #include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/platform/mutex.h" #include "tensorflow/core/platform/mutex.h"
#if GOOGLE_CUDA #if GOOGLE_CUDA && GOOGLE_TENSORRT
#if GOOGLE_TENSORRT
#include "third_party/tensorrt/NvInfer.h" #include "third_party/tensorrt/NvInfer.h"
namespace tensorflow { namespace tensorflow {
@ -141,5 +140,4 @@ EngineContext* TRTEngineCacheResource::GetEngineContext(const int profile_id) {
} // namespace tensorrt } // namespace tensorrt
} // namespace tensorflow } // namespace tensorflow
#endif // GOOGLE_TENSORRT #endif // GOOGLE_CUDA && GOOGLE_TENSORRT
#endif // GOOGLE_CUDA

View File

@ -115,8 +115,7 @@ class LRUCache {
} }
}; };
#if GOOGLE_CUDA #if GOOGLE_CUDA && GOOGLE_TENSORRT
#if GOOGLE_TENSORRT
struct EngineContext { struct EngineContext {
EngineContext() {} // Creates an empty context. EngineContext() {} // Creates an empty context.
@ -223,8 +222,7 @@ class TRTEngineCacheResource : public ResourceBase {
TrtShapeOptimizationProfile profiles_; TrtShapeOptimizationProfile profiles_;
}; };
#endif // GOOGLE_TENSORRT #endif // GOOGLE_CUDA && GOOGLE_TENSORRT
#endif // GOOGLE_CUDA
} // namespace tensorrt } // namespace tensorrt
} // namespace tensorflow } // namespace tensorflow

View File

@ -29,8 +29,7 @@ limitations under the License.
#include "tensorflow/core/lib/strings/str_util.h" #include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/lib/strings/strcat.h" #include "tensorflow/core/lib/strings/strcat.h"
#if GOOGLE_CUDA #if GOOGLE_CUDA && GOOGLE_TENSORRT
#if GOOGLE_TENSORRT
#include "third_party/tensorrt/NvInfer.h" #include "third_party/tensorrt/NvInfer.h"
@ -173,6 +172,5 @@ class TrtShapeOptimizationProfile {
} // namespace tensorrt } // namespace tensorrt
} // namespace tensorflow } // namespace tensorflow
#endif // GOOGLE_TENSORRT #endif // GOOGLE_CUDA && GOOGLE_TENSORRT
#endif // GOOGLE_CUDA
#endif // TENSORFLOW_COMPILER_TF2TENSORRT_UTILS_TRT_SHAPE_OPTIMIZATION_PROFILES_H_ #endif // TENSORFLOW_COMPILER_TF2TENSORRT_UTILS_TRT_SHAPE_OPTIMIZATION_PROFILES_H_

View File

@ -13,8 +13,7 @@ See the License for the specific language governing permissions and
limitations under the License. limitations under the License.
==============================================================================*/ ==============================================================================*/
#if GOOGLE_CUDA #if GOOGLE_CUDA && GOOGLE_TENSORRT
#if GOOGLE_TENSORRT
#include <string.h> #include <string.h>
@ -214,5 +213,4 @@ TEST_F(TrtShapeOptimizationProfileTest, Dynamic) {
} // namespace tensorrt } // namespace tensorrt
} // namespace tensorflow } // namespace tensorflow
#endif // GOOGLE_TENSORRT #endif // GOOGLE_CUDA && GOOGLE_TENSORRT
#endif // GOOGLE_CUDA