Rollback "[TF:TRT] Cosmetic fix."

PiperOrigin-RevId: 317407274
Change-Id: I73cd486acf9091e6678e553ab9b0545288f73324
This commit is contained in:
Sanjoy Das 2020-06-19 17:08:17 -07:00 committed by TensorFlower Gardener
parent 6116b7f911
commit f840a62268
39 changed files with 162 additions and 83 deletions

View File

@ -16,7 +16,8 @@ limitations under the License.
#ifndef TENSORFLOW_COMPILER_TF2TENSORRT_COMMON_UTILS_H_
#define TENSORFLOW_COMPILER_TF2TENSORRT_COMMON_UTILS_H_
#if GOOGLE_CUDA && GOOGLE_TENSORRT
#if GOOGLE_CUDA
#if GOOGLE_TENSORRT
#include "tensorflow/core/platform/logging.h"
@ -28,6 +29,7 @@ namespace tensorrt {
} // namespace tensorrt
} // namespace tensorflow
#endif // GOOGLE_CUDA && GOOGLE_TENSORRT
#endif
#endif
#endif // TENSORFLOW_COMPILER_TF2TENSORRT_COMMON_UTILS_H_

View File

@ -53,7 +53,8 @@ limitations under the License.
#include "tensorflow/core/util/device_name_utils.h"
#include "tensorflow/tools/graph_transforms/transform_utils.h"
#if GOOGLE_CUDA && GOOGLE_TENSORRT
#if GOOGLE_CUDA
#if GOOGLE_TENSORRT
#include "third_party/gpus/cuda/include/cuda_runtime_api.h"
#include "third_party/tensorrt/NvInfer.h"
namespace tensorflow {
@ -883,4 +884,5 @@ Status ConvertAfterShapes(const ConversionParams& params) {
} // namespace tensorrt
} // namespace tensorflow
#endif // GOOGLE_CUDA && GOOGLE_TENSORRT
#endif // GOOGLE_TENSORRT
#endif // GOOGLE_CUDA

View File

@ -24,7 +24,8 @@ limitations under the License.
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/platform/types.h"
#if GOOGLE_CUDA && GOOGLE_TENSORRT
#if GOOGLE_CUDA
#if GOOGLE_TENSORRT
namespace tensorflow {
namespace tensorrt {
@ -65,6 +66,7 @@ Status RegisterGraphToFunctionLibrary(const GraphDef& segment_graph_def,
} // namespace tensorrt
} // namespace tensorflow
#endif // GOOGLE_CUDA && GOOGLE_TENSORRT
#endif // GOOGLE_TENSORRT
#endif // GOOGLE_CUDA
#endif // TENSORFLOW_COMPILER_TF2TENSORRT_CONVERT_CONVERT_GRAPH_H_

View File

@ -34,7 +34,8 @@ limitations under the License.
#include "tensorflow/core/protobuf/config.pb.h" // NOLINT
#include "tensorflow/core/public/session.h"
#if GOOGLE_CUDA && GOOGLE_TENSORRT
#if GOOGLE_CUDA
#if GOOGLE_TENSORRT
namespace tensorflow {
namespace tensorrt {
@ -230,4 +231,5 @@ TEST_F(ConvertAfterShapesTest, DirectlyConnectedEngines) {
} // namespace tensorrt
} // namespace tensorflow
#endif // GOOGLE_CUDA && GOOGLE_TENSORRT
#endif // GOOGLE_TENSORRT
#endif // GOOGLE_CUDA

View File

@ -59,7 +59,8 @@ limitations under the License.
#include "tensorflow/core/util/env_var.h"
#include "tensorflow/core/util/strided_slice_op.h"
#if GOOGLE_CUDA && GOOGLE_TENSORRT
#if GOOGLE_CUDA
#if GOOGLE_TENSORRT
#include "third_party/tensorrt/NvInfer.h"
#include "third_party/tensorrt/NvInferPlugin.h"
@ -6257,4 +6258,5 @@ bool OutputEdgeValidator::operator()(const Edge* out_edge) const {
} // namespace tensorrt
} // namespace tensorflow
#endif // GOOGLE_CUDA && GOOGLE_TENSORRT
#endif // GOOGLE_TENSORRT
#endif // GOOGLE_CUDA

View File

@ -33,7 +33,8 @@ limitations under the License.
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/stream_executor/lib/statusor.h"
#if GOOGLE_CUDA && GOOGLE_TENSORRT
#if GOOGLE_CUDA
#if GOOGLE_TENSORRT
#include "third_party/tensorrt/NvInfer.h"
namespace tensorflow {
@ -693,6 +694,7 @@ BinaryOperationMap();
} // namespace tensorrt
} // namespace tensorflow
#endif // GOOGLE_CUDA && GOOGLE_TENSORRT
#endif // GOOGLE_TENSORRT
#endif // GOOGLE_CUDA
#endif // TENSORFLOW_COMPILER_TF2TENSORRT_CONVERT_CONVERT_NODES_H_

View File

@ -21,7 +21,8 @@ limitations under the License.
#include <unordered_map>
#include <vector>
#if GOOGLE_CUDA && GOOGLE_TENSORRT
#if GOOGLE_CUDA
#if GOOGLE_TENSORRT
#include <gmock/gmock.h>
#include <gtest/gtest.h>
@ -6635,4 +6636,5 @@ TEST_F(OpConverterTest, ConvertPad) {
} // namespace tensorrt
} // namespace tensorflow
#endif // GOOGLE_CUDA && GOOGLE_TENSORRT
#endif // GOOGLE_TENSORRT
#endif // GOOGLE_CUDA

View File

@ -12,7 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#if GOOGLE_CUDA && GOOGLE_TENSORRT
#if GOOGLE_CUDA
#if GOOGLE_TENSORRT
#include "tensorflow/compiler/tf2tensorrt/convert/logger_registry.h"
@ -57,4 +58,5 @@ LoggerRegistry* GetLoggerRegistry() {
} // namespace tensorrt
} // namespace tensorflow
#endif // GOOGLE_CUDA && GOOGLE_TENSORRT
#endif // GOOGLE_TENSORRT
#endif // GOOGLE_CUDA

View File

@ -19,8 +19,7 @@ limitations under the License.
#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/platform/types.h"
#if GOOGLE_CUDA && GOOGLE_TENSORRT
#if GOOGLE_CUDA
#include "third_party/tensorrt/NvInfer.h"
namespace tensorflow {
@ -54,5 +53,5 @@ class RegisterLogger {
} // namespace tensorrt
} // namespace tensorflow
#endif // GOOGLE_CUDA && GOOGLE_TENSORRT
#endif // GOOGLE_CUDA
#endif // TENSORFLOW_COMPILER_TF2TENSORRT_CONVERT_LOGGER_REGISTRY_H_

View File

@ -28,7 +28,8 @@ limitations under the License.
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/stacktrace.h"
#if GOOGLE_CUDA && GOOGLE_TENSORRT
#if GOOGLE_CUDA
#if GOOGLE_TENSORRT
namespace tensorflow {
namespace tensorrt {
namespace convert {
@ -301,4 +302,5 @@ static VerboseCustomGraphOptimizerRegistrar TRTOptimizationPass_Registrar(
} // namespace tensorrt
} // namespace tensorflow
#endif // GOOGLE_CUDA && GOOGLE_TENSORRT
#endif
#endif

View File

@ -23,7 +23,8 @@ limitations under the License.
#include "tensorflow/core/grappler/optimizers/custom_graph_optimizer.h"
#include "tensorflow/core/platform/logging.h"
#if GOOGLE_CUDA && GOOGLE_TENSORRT
#if GOOGLE_CUDA
#if GOOGLE_TENSORRT
namespace tensorflow {
namespace tensorrt {
@ -82,5 +83,6 @@ class TRTOptimizationPass : public grappler::CustomGraphOptimizer {
} // namespace tensorrt
} // namespace tensorflow
#endif // GOOGLE_CUDA && GOOGLE_TENSORRT
#endif // GOOGLE_CUDA
#endif // GOOGLE_TENSORRT
#endif // TENSORFLOW_COMPILER_TF2TENSORRT_CONVERT_TRT_OPTIMIZATION_PASS_H_

View File

@ -22,7 +22,8 @@ limitations under the License.
#include "tensorflow/core/framework/resource_mgr.h"
#include "tensorflow/core/lib/core/refcount.h"
#if GOOGLE_CUDA && GOOGLE_TENSORRT
#if GOOGLE_CUDA
#if GOOGLE_TENSORRT
namespace tensorflow {
namespace tensorrt {
@ -66,4 +67,5 @@ REGISTER_KERNEL_BUILDER(Name("GetCalibrationDataOp").Device(DEVICE_GPU),
} // namespace tensorrt
} // namespace tensorflow
#endif // GOOGLE_CUDA && GOOGLE_TENSORRT
#endif // GOOGLE_TENSORRT
#endif // GOOGLE_CUDA

View File

@ -48,7 +48,8 @@ limitations under the License.
#include "tensorflow/core/util/env_var.h"
#include "tensorflow/stream_executor/lib/statusor.h"
#if GOOGLE_CUDA && GOOGLE_TENSORRT
#if GOOGLE_CUDA
#if GOOGLE_TENSORRT
#include "third_party/gpus/cuda/include/cuda_runtime_api.h"
#include "third_party/tensorrt/NvInfer.h"
@ -1008,4 +1009,5 @@ REGISTER_KERNEL_BUILDER(Name("TRTEngineOp").Device(DEVICE_GPU), TRTEngineOp);
} // namespace tensorrt
} // namespace tensorflow
#endif // GOOGLE_CUDA && GOOGLE_TENSORRT
#endif // GOOGLE_TENSORRT
#endif // GOOGLE_CUDA

View File

@ -50,7 +50,8 @@ limitations under the License.
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/public/version.h"
#if GOOGLE_CUDA && GOOGLE_TENSORRT
#if GOOGLE_CUDA
#if GOOGLE_TENSORRT
namespace tensorflow {
namespace tensorrt {
@ -305,4 +306,5 @@ TYPED_TEST(TRTEngineOpTest, Basic) {
} // namespace tensorrt
} // namespace tensorflow
#endif // GOOGLE_CUDA && GOOGLE_TENSORRT
#endif // GOOGLE_TENSORRT
#endif // GOOGLE_CUDA

View File

@ -33,7 +33,8 @@ limitations under the License.
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/thread_annotations.h"
#if GOOGLE_CUDA && GOOGLE_TENSORRT
#if GOOGLE_CUDA
#if GOOGLE_TENSORRT
#include "third_party/tensorrt/NvInfer.h"
namespace tensorflow {
@ -250,4 +251,5 @@ REGISTER_KERNEL_BUILDER(Name("SerializeTRTResource").Device(DEVICE_GPU),
} // namespace tensorrt
} // namespace tensorflow
#endif // GOOGLE_CUDA && GOOGLE_TENSORRT
#endif // GOOGLE_TENSORRT
#endif // GOOGLE_CUDA

View File

@ -48,7 +48,8 @@ limitations under the License.
#include "tensorflow/core/platform/tstring.h"
#include "tensorflow/core/platform/types.h"
#if GOOGLE_CUDA && GOOGLE_TENSORRT
#if GOOGLE_CUDA
#if GOOGLE_TENSORRT
namespace tensorflow {
namespace tensorrt {
@ -245,4 +246,5 @@ TEST_F(TRTEngineResourceOpsTest, Basic) {
} // namespace tensorrt
} // namespace tensorflow
#endif // GOOGLE_CUDA && GOOGLE_TENSORRT
#endif // GOOGLE_TENSORRT
#endif // GOOGLE_CUDA

View File

@ -13,7 +13,8 @@ See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#if GOOGLE_CUDA && GOOGLE_TENSORRT
#if GOOGLE_CUDA
#if GOOGLE_TENSORRT
#include "tensorflow/core/framework/common_shape_fns.h"
#include "tensorflow/core/framework/op.h"
@ -33,4 +34,5 @@ Returns calibration data for the given resource name
} // namespace tensorflow
#endif // GOOGLE_CUDA && GOOGLE_TENSORRT
#endif // GOOGLE_TENSORRT
#endif // GOOGLE_CUDA

View File

@ -13,7 +13,8 @@ See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#if GOOGLE_CUDA && GOOGLE_TENSORRT
#if GOOGLE_CUDA
#if GOOGLE_TENSORRT
#include "tensorflow/core/framework/common_shape_fns.h"
#include "tensorflow/core/framework/op.h"
@ -58,4 +59,5 @@ REGISTER_OP("TRTEngineOp")
.Attr("static_engine: bool = true");
} // namespace tensorflow
#endif // GOOGLE_CUDA && GOOGLE_TENSORRT
#endif // GOOGLE_TENSORRT
#endif // GOOGLE_CUDA

View File

@ -13,7 +13,8 @@ See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#if GOOGLE_CUDA && GOOGLE_TENSORRT
#if GOOGLE_CUDA
#if GOOGLE_TENSORRT
#include "tensorflow/core/framework/common_shape_fns.h"
#include "tensorflow/core/framework/op.h"
@ -45,4 +46,5 @@ REGISTER_OP("SerializeTRTResource")
} // namespace tensorflow
#endif // GOOGLE_CUDA && GOOGLE_TENSORRT
#endif // GOOGLE_TENSORRT
#endif // GOOGLE_CUDA

View File

@ -17,7 +17,8 @@ limitations under the License.
#include "tensorflow/compiler/tf2tensorrt/plugin/trt_plugin.h"
#include "tensorflow/core/platform/logging.h"
#if GOOGLE_CUDA && GOOGLE_TENSORRT
#if GOOGLE_CUDA
#if GOOGLE_TENSORRT
#define EIGEN_USE_GPU // For definition of Eigen::GpuDevice.
#include "third_party/gpus/cuda/include/cuda_runtime_api.h"
#include "tensorflow/core/util/gpu_kernel_helper.h"
@ -233,4 +234,5 @@ REGISTER_TFTRT_PLUGIN(CastPluginCreator);
} // namespace tensorrt
} // namespace tensorflow
#endif // GOOGLE_CUDA && GOOGLE_TENSORRT
#endif // GOOGLE_CUDA
#endif // GOOGLE_TENSORRT

View File

@ -17,7 +17,8 @@ limitations under the License.
#include <cstring>
#if GOOGLE_CUDA && GOOGLE_TENSORRT
#if GOOGLE_CUDA
#if GOOGLE_TENSORRT
namespace tensorflow {
namespace tensorrt {
@ -29,4 +30,5 @@ const char* kTfTrtPluginNamespace = "TF";
} // namespace tensorrt
} // namespace tensorflow
#endif // GOOGLE_CUDA && GOOGLE_TENSORRT
#endif // GOOGLE_CUDA
#endif // GOOGLE_TENSORRT

View File

@ -20,7 +20,8 @@ limitations under the License.
#include "tensorflow/core/platform/logging.h"
#if GOOGLE_CUDA && GOOGLE_TENSORRT
#if GOOGLE_CUDA
#if GOOGLE_TENSORRT
#include "third_party/tensorrt/NvInfer.h"
namespace tensorflow {
@ -89,6 +90,7 @@ class TrtPluginRegistrar {
} // namespace tensorrt
} // namespace tensorflow
#endif // GOOGLE_CUDA && GOOGLE_TENSORRT
#endif // GOOGLE_TENSORRT
#endif // GOOGLE_CUDA
#endif // TENSORFLOW_COMPILER_TF2TENSORRT_PLUGIN_TRT_PLUGIN_H_

View File

@ -35,7 +35,8 @@ limitations under the License.
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/util/env_var.h"
#if GOOGLE_CUDA && GOOGLE_TENSORRT
#if GOOGLE_CUDA
#if GOOGLE_TENSORRT
namespace tensorflow {
namespace tensorrt {
@ -1061,4 +1062,5 @@ Status SegmentGraph(const Graph* tf_graph,
} // namespace tensorrt
} // namespace tensorflow
#endif // GOOGLE_CUDA && GOOGLE_TENSORRT
#endif // GOOGLE_TENSORRT
#endif // GOOGLE_CUDA

View File

@ -25,7 +25,8 @@ limitations under the License.
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/platform/types.h"
#if GOOGLE_CUDA && GOOGLE_TENSORRT
#if GOOGLE_CUDA
#if GOOGLE_TENSORRT
namespace tensorflow {
namespace tensorrt {
@ -66,6 +67,7 @@ Status SegmentGraph(const Graph* tf_graph,
} // namespace tensorrt
} // namespace tensorflow
#endif // GOOGLE_CUDA && GOOGLE_TENSORRT
#endif // GOOGLE_TENSORRT
#endif // GOOGLE_CUDA
#endif // TENSORFLOW_COMPILER_TF2TENSORRT_SEGMENT_SEGMENT_H_

View File

@ -26,7 +26,8 @@ limitations under the License.
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/public/session.h"
#if GOOGLE_CUDA && GOOGLE_TENSORRT
#if GOOGLE_CUDA
#if GOOGLE_TENSORRT
namespace tensorflow {
namespace tensorrt {
@ -521,4 +522,5 @@ TEST_F(SegmentTest, IncompatibleBatchSizes) {
} // namespace tensorrt
} // namespace tensorflow
#endif // GOOGLE_CUDA && GOOGLE_TENSORRT
#endif // GOOGLE_TENSORRT
#endif // GOOGLE_CUDA

View File

@ -19,7 +19,8 @@ limitations under the License.
#include "absl/strings/str_format.h"
#include "absl/types/optional.h"
#if GOOGLE_CUDA && GOOGLE_TENSORRT
#if GOOGLE_CUDA
#if GOOGLE_TENSORRT
namespace tensorflow {
namespace tensorrt {
@ -216,6 +217,7 @@ UnionFind<T>* UnionFind<T>::FindRoot() {
} // namespace tensorrt
} // namespace tensorflow
#endif // GOOGLE_CUDA && GOOGLE_TENSORRT
#endif // GOOGLE_TENSORRT
#endif // GOOGLE_CUDA
#endif // TENSORFLOW_COMPILER_TF2TENSORRT_SEGMENT_UNION_FIND_H_

View File

@ -18,7 +18,8 @@ limitations under the License.
#include "tensorflow/core/platform/stream_executor.h"
#include "tensorflow/core/platform/test.h"
#if GOOGLE_CUDA && GOOGLE_TENSORRT
#if GOOGLE_CUDA
#if GOOGLE_TENSORRT
#include "third_party/gpus/cuda/include/cuda.h"
#include "third_party/gpus/cuda/include/cuda_runtime_api.h"
#include "third_party/tensorrt/NvInfer.h"
@ -163,4 +164,5 @@ TEST(TensorrtTest, BasicFunctions) {
} // namespace
} // namespace tensorflow
#endif // GOOGLE_CUDA && GOOGLE_TENSORRT
#endif // GOOGLE_TENSORRT
#endif // GOOGLE_CUDA

View File

@ -17,9 +17,11 @@ limitations under the License.
#include "tensorflow/core/platform/logging.h"
#if GOOGLE_CUDA && GOOGLE_TENSORRT
#if GOOGLE_CUDA
#if GOOGLE_TENSORRT
#include "third_party/gpus/cuda/include/cuda_runtime_api.h"
#endif // GOOGLE_CUDA && GOOGLE_TENSORRT
#endif // GOOGLE_TENSORRT
#endif // GOOGLE_CUDA
namespace tensorflow {
namespace tensorrt {
@ -50,7 +52,8 @@ void* Align(uint64_t alignment, uint64_t size, void*& ptr, uint64_t& space) {
} // namespace tensorrt
} // namespace tensorflow
#if GOOGLE_CUDA && GOOGLE_TENSORRT
#if GOOGLE_CUDA
#if GOOGLE_TENSORRT
namespace tensorflow {
namespace tensorrt {
@ -110,4 +113,5 @@ void TRTDeviceAllocator::free(void* memory) {
} // namespace tensorrt
} // namespace tensorflow
#endif // GOOGLE_CUDA && GOOGLE_TENSORRT
#endif // GOOGLE_TENSORRT
#endif // GOOGLE_CUDA

View File

@ -20,9 +20,11 @@ limitations under the License.
#include "tensorflow/core/framework/allocator.h"
#if GOOGLE_CUDA && GOOGLE_TENSORRT
#if GOOGLE_CUDA
#if GOOGLE_TENSORRT
#include "third_party/tensorrt/NvInfer.h"
#endif // GOOGLE_CUDA && GOOGLE_TENSORRT
#endif // GOOGLE_TENSORRT
#endif // GOOGLE_CUDA
namespace tensorflow {
namespace tensorrt {
@ -31,7 +33,8 @@ void* Align(uint64_t alignment, uint64_t size, void*& ptr, uint64_t& space);
} // namespace tensorrt
} // namespace tensorflow
#if GOOGLE_CUDA && GOOGLE_TENSORRT
#if GOOGLE_CUDA
#if GOOGLE_TENSORRT
namespace tensorflow {
namespace tensorrt {
@ -66,5 +69,6 @@ class TRTDeviceAllocator : public TRTBaseAllocator {
} // namespace tensorrt
} // namespace tensorflow
#endif // GOOGLE_CUDA && GOOGLE_TENSORRT
#endif // GOOGLE_TENSORRT
#endif // GOOGLE_CUDA
#endif // TENSORFLOW_COMPILER_TF2TENSORRT_UTILS_TRT_ALLOCATOR_H_

View File

@ -25,7 +25,8 @@ limitations under the License.
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/platform/errors.h"
#if GOOGLE_CUDA && GOOGLE_TENSORRT
#if GOOGLE_CUDA
#if GOOGLE_TENSORRT
#include "third_party/tensorrt/NvInfer.h"
namespace tensorflow {
@ -256,4 +257,5 @@ Status TrtEnqueue(nvinfer1::IExecutionContext* execution_context,
} // namespace tensorrt
} // namespace tensorflow
#endif // GOOGLE_CUDA && GOOGLE_TENSORRT
#endif // GOOGLE_TENSORRT
#endif // GOOGLE_CUDA

View File

@ -24,7 +24,8 @@ limitations under the License.
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/lib/core/status.h"
#if GOOGLE_CUDA && GOOGLE_TENSORRT
#if GOOGLE_CUDA
#if GOOGLE_TENSORRT
#include "third_party/tensorrt/NvInfer.h"
namespace tensorflow {
@ -90,6 +91,7 @@ Status TrtEnqueue(nvinfer1::IExecutionContext* execution_context,
} // namespace tensorrt
} // namespace tensorflow
#endif // GOOGLE_CUDA && GOOGLE_TENSORRT
#endif // GOOGLE_TENSORRT
#endif // GOOGLE_CUDA
#endif // TENSORFLOW_COMPILER_TF2TENSORRT_UTILS_TRT_ENGINE_UTILS_H_

View File

@ -20,7 +20,8 @@ limitations under the License.
#include "tensorflow/core/platform/logging.h"
#if GOOGLE_CUDA && GOOGLE_TENSORRT
#if GOOGLE_CUDA
#if GOOGLE_TENSORRT
#include "third_party/gpus/cuda/include/cuda_runtime_api.h"
namespace tensorflow {
@ -146,4 +147,5 @@ TRTInt8Calibrator::~TRTInt8Calibrator() {
} // namespace tensorrt
} // namespace tensorflow
#endif // GOOGLE_CUDA && GOOGLE_TENSORRT
#endif
#endif

View File

@ -22,7 +22,8 @@ limitations under the License.
#include <utility>
#include "tensorflow/core/platform/mutex.h"
#if GOOGLE_CUDA && GOOGLE_TENSORRT
#if GOOGLE_CUDA
#if GOOGLE_TENSORRT
#include "third_party/gpus/cuda/include/cuda_runtime_api.h"
#include "third_party/tensorrt/NvInfer.h"
@ -100,5 +101,6 @@ struct TRTInt8Calibrator : public nvinfer1::IInt8EntropyCalibrator {
} // namespace tensorrt
} // namespace tensorflow
#endif // GOOGLE_CUDA && GOOGLE_TENSORRT
#endif
#endif
#endif // TENSORFLOW_COMPILER_TF2TENSORRT_UTILS_TRT_INT8_CALIBRATOR_H_

View File

@ -15,7 +15,8 @@ limitations under the License.
#include "tensorflow/compiler/tf2tensorrt/utils/trt_logger.h"
#if GOOGLE_CUDA && GOOGLE_TENSORRT
#if GOOGLE_CUDA
#if GOOGLE_TENSORRT
#include "tensorflow/compiler/tf2tensorrt/common/utils.h"
#include "tensorflow/compiler/tf2tensorrt/convert/logger_registry.h"
#include "tensorflow/core/platform/logging.h"
@ -67,4 +68,5 @@ REGISTER_TENSORRT_LOGGER("DefaultLogger", Logger::GetLogger());
} // namespace tensorrt
} // namespace tensorflow
#endif // GOOGLE_CUDA && GOOGLE_TENSORRT
#endif // GOOGLE_CUDA
#endif // GOOGLE_TENSORRT

View File

@ -18,7 +18,8 @@ limitations under the License.
#include "tensorflow/core/platform/types.h"
#if GOOGLE_CUDA && GOOGLE_TENSORRT
#if GOOGLE_CUDA
#if GOOGLE_TENSORRT
#include "third_party/tensorrt/NvInfer.h"
namespace tensorflow {
@ -39,6 +40,7 @@ class Logger : public nvinfer1::ILogger {
} // namespace tensorrt
} // namespace tensorflow
#endif // GOOGLE_CUDA && GOOGLE_TENSORRT
#endif // GOOGLE_TENSORRT
#endif // GOOGLE_CUDA
#endif // TENSORFLOW_COMPILER_TF2TENSORRT_UTILS_TRT_LOGGER_H_

View File

@ -23,7 +23,8 @@ limitations under the License.
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/platform/mutex.h"
#if GOOGLE_CUDA && GOOGLE_TENSORRT
#if GOOGLE_CUDA
#if GOOGLE_TENSORRT
#include "third_party/tensorrt/NvInfer.h"
namespace tensorflow {
@ -140,4 +141,5 @@ EngineContext* TRTEngineCacheResource::GetEngineContext(const int profile_id) {
} // namespace tensorrt
} // namespace tensorflow
#endif // GOOGLE_CUDA && GOOGLE_TENSORRT
#endif // GOOGLE_TENSORRT
#endif // GOOGLE_CUDA

View File

@ -115,7 +115,8 @@ class LRUCache {
}
};
#if GOOGLE_CUDA && GOOGLE_TENSORRT
#if GOOGLE_CUDA
#if GOOGLE_TENSORRT
struct EngineContext {
EngineContext() {} // Creates an empty context.
@ -222,7 +223,8 @@ class TRTEngineCacheResource : public ResourceBase {
TrtShapeOptimizationProfile profiles_;
};
#endif // GOOGLE_CUDA && GOOGLE_TENSORRT
#endif // GOOGLE_TENSORRT
#endif // GOOGLE_CUDA
} // namespace tensorrt
} // namespace tensorflow

View File

@ -29,7 +29,8 @@ limitations under the License.
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/lib/strings/strcat.h"
#if GOOGLE_CUDA && GOOGLE_TENSORRT
#if GOOGLE_CUDA
#if GOOGLE_TENSORRT
#include "third_party/tensorrt/NvInfer.h"
@ -172,5 +173,6 @@ class TrtShapeOptimizationProfile {
} // namespace tensorrt
} // namespace tensorflow
#endif // GOOGLE_CUDA && GOOGLE_TENSORRT
#endif // GOOGLE_TENSORRT
#endif // GOOGLE_CUDA
#endif // TENSORFLOW_COMPILER_TF2TENSORRT_UTILS_TRT_SHAPE_OPTIMIZATION_PROFILES_H_

View File

@ -13,7 +13,8 @@ See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#if GOOGLE_CUDA && GOOGLE_TENSORRT
#if GOOGLE_CUDA
#if GOOGLE_TENSORRT
#include <string.h>
@ -213,4 +214,5 @@ TEST_F(TrtShapeOptimizationProfileTest, Dynamic) {
} // namespace tensorrt
} // namespace tensorflow
#endif // GOOGLE_CUDA && GOOGLE_TENSORRT
#endif // GOOGLE_TENSORRT
#endif // GOOGLE_CUDA