[TF:TRT] Add a prefix to the warning messages from TF-TRT.

Add LOG_WARNING_WITH_PREFIX to common/utils.h. Replace the use of LOG(WARNING)
with this new macro.

PiperOrigin-RevId: 317330336
Change-Id: Ife0aa0347dd72f6eb0f8805af4d46a7d4cb099ea
This commit is contained in:
Bixia Zheng 2020-06-19 10:17:18 -07:00 committed by TensorFlower Gardener
parent d00691f7aa
commit 57f9d638c0
8 changed files with 108 additions and 44 deletions

View File

@ -79,6 +79,15 @@ tf_cuda_cc_test(
]),
)
cc_library(
name = "common_utils",
hdrs = ["common/utils.h"],
copts = tf_copts(),
deps = [
"//tensorflow/core/platform:logging",
] + if_tensorrt([":tensorrt_lib"]),
)
cc_library(
name = "trt_op_kernels",
srcs = [
@ -95,6 +104,7 @@ cc_library(
":trt_plugins",
":trt_resources",
":utils",
":common_utils",
"@com_google_absl//absl/memory",
"@com_google_absl//absl/strings",
"@local_config_cuda//cuda:cuda_headers",
@ -240,6 +250,7 @@ tf_cuda_library(
hdrs = ["utils/trt_logger.h"],
visibility = ["//visibility:public"],
deps = [
":common_utils",
":logger_registry",
"//tensorflow/core:lib_proto_parsing",
] + if_tensorrt([":tensorrt_lib"]),
@ -375,6 +386,7 @@ tf_cuda_library(
"convert/trt_optimization_pass.h",
],
deps = [
":common_utils",
":logger_registry",
":segment",
":trt_allocator",
@ -488,6 +500,7 @@ cc_library(
],
copts = tf_copts(),
deps = [
":common_utils",
"//tensorflow/core:graph",
"//tensorflow/core:lib",
"//tensorflow/core:lib_internal",
@ -575,6 +588,7 @@ cc_library(
hdrs = ["utils/py_utils.h"],
copts = tf_copts(),
deps = if_tensorrt([
":common_utils",
":tensorrt_lib",
"//tensorflow/stream_executor/platform:dso_loader",
]),

View File

@ -0,0 +1,35 @@
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_COMPILER_TF2TENSORRT_COMMON_UTILS_H_
#define TENSORFLOW_COMPILER_TF2TENSORRT_COMMON_UTILS_H_
#if GOOGLE_CUDA
#if GOOGLE_TENSORRT
#include "tensorflow/core/platform/logging.h"
namespace tensorflow {
namespace tensorrt {
#define LOG_WARNING_WITH_PREFIX LOG(WARNING) << "TF-TRT Warning: "
} // namespace tensorrt
} // namespace tensorflow
#endif
#endif
#endif // TENSORFLOW_COMPILER_TF2TENSORRT_COMMON_UTILS_H_

View File

@ -25,6 +25,7 @@ limitations under the License.
#include <vector>
#include "absl/strings/str_cat.h"
#include "tensorflow/compiler/tf2tensorrt/common/utils.h"
#include "tensorflow/compiler/tf2tensorrt/convert/convert_nodes.h"
#include "tensorflow/compiler/tf2tensorrt/convert/logger_registry.h"
#include "tensorflow/compiler/tf2tensorrt/convert/utils.h"
@ -276,7 +277,8 @@ Status GetEngineInfo(const Graph* g,
if (segment_devices.size() == 1) {
info->device = *segment_devices.begin();
} else if (segment_devices.size() > 1) {
LOG(WARNING) << "Detected multiple (" << segment_devices.size()
LOG_WARNING_WITH_PREFIX
<< "Detected multiple (" << segment_devices.size()
<< ") devices for the segment. Picking first one to continue.";
info->device = *segment_devices.begin();
} else {
@ -663,7 +665,7 @@ std::pair<int, Allocator*> GetDeviceAndAllocator(const ConversionParams& params,
StrAppend(&msg, engine.device, "': ");
for (auto d : devices) StrAppend(&msg, d->name(), ", ");
StrAppend(&msg, ". Will get the allocator from first one.");
LOG(WARNING) << msg;
LOG_WARNING_WITH_PREFIX << msg;
}
AllocatorAttributes alloc_attr;
cuda_device_id = devices[0]->tensorflow_gpu_device_info()->gpu_id;
@ -671,7 +673,7 @@ std::pair<int, Allocator*> GetDeviceAndAllocator(const ConversionParams& params,
VLOG(1) << "Using allocator " << dev_allocator->Name()
<< " and cuda_device_id " << cuda_device_id;
} else {
LOG(WARNING) << "Cluster is set but device '" << engine.device
LOG_WARNING_WITH_PREFIX << "Cluster is set but device '" << engine.device
<< "' is not found in the cluster";
}
return std::make_pair(cuda_device_id, dev_allocator);
@ -770,8 +772,8 @@ Status ConvertAfterShapes(const ConversionParams& params) {
Status status = GetEngineInfo(&graph, static_graph_properties, curr_segment,
node_map, reverse_topo_order, &curr_engine);
if (!status.ok()) {
LOG(WARNING) << "Failed to get engine info for segment " << t << ": "
<< status;
LOG_WARNING_WITH_PREFIX << "Failed to get engine info for segment " << t
<< ": " << status;
continue;
}
curr_engine.precision_mode = params.precision_mode;
@ -784,8 +786,9 @@ Status ConvertAfterShapes(const ConversionParams& params) {
&graph, curr_engine.engine_name);
if (!status.ok()) {
LOG(WARNING) << "Failed to register segment graphdef to the library " << t
<< ": " << status;
LOG_WARNING_WITH_PREFIX
<< "Failed to register segment graphdef to the library " << t << ": "
<< status;
continue;
}
@ -836,7 +839,8 @@ Status ConvertAfterShapes(const ConversionParams& params) {
alloc.reset(new TRTDeviceAllocator(device_alloc.second));
} else {
// Setting allocator as nullptr should get revert to the cudamalloc
LOG(WARNING) << "Can't identify the cuda device. Running on device 0 ";
LOG_WARNING_WITH_PREFIX
<< "Can't identify the cuda device. Running on device 0 ";
}
cudaSetDevice(cuda_device_id);
auto status =
@ -850,7 +854,7 @@ Status ConvertAfterShapes(const ConversionParams& params) {
LOG(INFO) << "Replaced " << msg << ".";
} else {
// Graph is not modified.
LOG(WARNING) << "Cannot replace " << msg
LOG_WARNING_WITH_PREFIX << "Cannot replace " << msg
<< " reason: " << status.error_message()
<< " (keeping original segment).";
}

View File

@ -31,6 +31,7 @@ limitations under the License.
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/string_view.h"
#include "tensorflow/compiler/tf2tensorrt/common/utils.h"
#include "tensorflow/compiler/tf2tensorrt/convert/utils.h"
#include "tensorflow/compiler/tf2tensorrt/utils/trt_logger.h"
#include "tensorflow/compiler/tf2tensorrt/utils/trt_shape_optimization_profiles.h"
@ -1214,13 +1215,14 @@ static void InitializeTrtPlugins(nvinfer1::ILogger* trt_logger) {
nvinfer1::IPluginCreator* const* trt_plugin_creator_list =
getPluginRegistry()->getPluginCreatorList(&num_trt_plugins);
if (!trt_plugin_creator_list) {
LOG(WARNING) << "Can not find any TensorRT plugins in registry.";
LOG_WARNING_WITH_PREFIX << "Can not find any TensorRT plugins in registry.";
} else {
VLOG(1) << "Found the following " << num_trt_plugins
<< " TensorRT plugins in registry:";
for (int i = 0; i < num_trt_plugins; ++i) {
if (!trt_plugin_creator_list[i]) {
LOG(WARNING) << "TensorRT plugin at index " << i
LOG_WARNING_WITH_PREFIX
<< "TensorRT plugin at index " << i
<< " is not accessible (null pointer returned by "
"getPluginCreatorList for this plugin)";
} else {
@ -1827,7 +1829,7 @@ void Converter::MaybeApplyQuantizationRanges() {
// are tensors which are created internally by TF-TRT. The ranges for
// these unnamed ITensors are always inferred from user provided ranges,
// thus there will also be a warning for the range(s) the user missed.
LOG(WARNING) << "Quantization range was not found for "
LOG_WARNING_WITH_PREFIX << "Quantization range was not found for "
<< tensor->getName() << ". "
<< "Setting invalid quantization range.";
// Set the range to something unusable so the engine will fail if it
@ -4898,7 +4900,8 @@ Status ConvertFusedBatchNorm(OpConverterParams* params) {
// Trying to use batchnorm in training mode is a very common problem.
// Because the error message will only be printed in VLOG(1) by the
// segmenter, we issue a special warning so that users will actually see it.
LOG(WARNING) << node_def.op() << " only supports is_training=false. If you "
LOG_WARNING_WITH_PREFIX
<< node_def.op() << " only supports is_training=false. If you "
<< "are using Keras, please call "
<< "keras.backend.set_learning_phase(0) before constructing "
<< "your model. At " << node_def.name();
@ -6039,7 +6042,7 @@ Status ConvertGraphDefToEngine(
const string error_message =
StrCat("Validation failed for ", node_name, " and input slot ",
slot_number, ": ", status.error_message());
LOG(WARNING) << error_message;
LOG_WARNING_WITH_PREFIX << error_message;
return Status(status.code(), error_message);
}
VLOG(2) << "Adding engine input tensor " << node_name << " with shape "

View File

@ -20,6 +20,7 @@ limitations under the License.
#include "absl/strings/ascii.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "tensorflow/compiler/tf2tensorrt/common/utils.h"
#include "tensorflow/compiler/tf2tensorrt/convert/convert_nodes.h"
#include "tensorflow/compiler/tf2tensorrt/convert/utils.h"
#include "tensorflow/compiler/tf2tensorrt/utils/trt_allocator.h"
@ -613,7 +614,7 @@ void TRTEngineOp::ComputeAsync(OpKernelContext* ctx,
}
Status stat = ExecuteTrtEngine(ctx, engine_context, trt_context_idx);
if (!stat.ok()) {
LOG(WARNING) << "Failed to execute engine: " << stat
LOG_WARNING_WITH_PREFIX << "Failed to execute engine: " << stat
<< " Retrying with native segment for " << name();
// Release any outputs that are allocated, ExecuteNativeSegment will
// re-allocate them and fail if they are currently allocated.
@ -727,7 +728,7 @@ StatusOr<TrtUniquePtrType<nvinfer1::ICudaEngine>> TRTEngineOp::BuildEngine(
calibrator, &engine, use_calibration, use_implicit_batch_, nullptr,
&cache_resource->profiles_);
if (!status.ok()) {
LOG(WARNING) << "Engine creation for " << name() << " failed. "
LOG_WARNING_WITH_PREFIX << "Engine creation for " << name() << " failed. "
<< "The native segment will be used instead. "
<< "Reason: " << status;
// Store an empty engine in the cache for these input shapes so we don't try
@ -791,7 +792,8 @@ StatusOr<std::pair<EngineContext*, int>> TRTEngineOp::GetEngine(
FunctionDefToGraphDef(func_handle_, lib, &segment_graph_def_);
}
if (!status.ok()) {
LOG(WARNING) << "Getting segment graph for " << name() << " failed. "
LOG_WARNING_WITH_PREFIX << "Getting segment graph for " << name()
<< " failed. "
<< "Reason: " << status;
}
}
@ -851,7 +853,8 @@ StatusOr<std::pair<EngineContext*, int>> TRTEngineOp::GetEngine(
// If cache does not have a compatible engine then create a new engine.
if (engine_contexts == nullptr) {
if (!allow_build_at_runtime_) {
LOG(WARNING) << "Found no engine in cache matching input shapes. "
LOG_WARNING_WITH_PREFIX
<< "Found no engine in cache matching input shapes. "
<< "Not building a new engine because "
<< "allow_build_at_runtime=False. "
<< "The native segment will be used instead.";

View File

@ -22,6 +22,7 @@ limitations under the License.
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "tensorflow/compiler/tf2tensorrt/common/utils.h"
#include "tensorflow/compiler/tf2tensorrt/segment/union_find.h"
#include "tensorflow/core/common_runtime/graph_constructor.h"
#include "tensorflow/core/graph/algorithm.h"
@ -748,7 +749,8 @@ Status SegmentGraph(const Graph* tf_graph,
exclude_node(status.error_message());
} else if (tftrt_op_blacklist.count(node->tf_node()->type_string())) {
// WARNING verbosity since the user explicitly requests this behavior.
LOG(WARNING) << "Blacklisted as TF-TRT candidate, "
LOG_WARNING_WITH_PREFIX
<< "Blacklisted as TF-TRT candidate, "
<< "(Op type: " << node->tf_node()->type_string() << "), "
<< "(Op name: " << node->name() << ")";
exclude_node("Blacklisted with the env var TF_TRT_OP_BLACKLIST");
@ -1038,7 +1040,7 @@ Status SegmentGraph(const Graph* tf_graph,
for (const auto& dev : dev_itr->second) {
StrAppend(&s, dev, ", ");
}
LOG(WARNING) << s;
LOG_WARNING_WITH_PREFIX << s;
}
segments->emplace_back(segment_nodes);

View File

@ -16,6 +16,7 @@ limitations under the License.
#include "tensorflow/compiler/tf2tensorrt/utils/py_utils.h"
#if GOOGLE_CUDA && GOOGLE_TENSORRT
#include "tensorflow/compiler/tf2tensorrt/common/utils.h"
#include "tensorflow/stream_executor/platform/dso_loader.h"
#include "third_party/tensorrt/NvInfer.h"
#endif
@ -27,7 +28,8 @@ bool IsGoogleTensorRTEnabled() {
#if GOOGLE_CUDA && GOOGLE_TENSORRT
auto handle_or = se::internal::DsoLoader::TryDlopenTensorRTLibraries();
if (!handle_or.ok()) {
LOG(WARNING) << "Cannot dlopen some TensorRT libraries. If you would like "
LOG_WARNING_WITH_PREFIX
<< "Cannot dlopen some TensorRT libraries. If you would like "
"to use Nvidia GPU with TensorRT, please make sure the "
"missing libraries mentioned above are installed properly.";
return false;

View File

@ -17,6 +17,7 @@ limitations under the License.
#if GOOGLE_CUDA
#if GOOGLE_TENSORRT
#include "tensorflow/compiler/tf2tensorrt/common/utils.h"
#include "tensorflow/compiler/tf2tensorrt/convert/logger_registry.h"
#include "tensorflow/core/platform/logging.h"
@ -35,7 +36,7 @@ void Logger::log(Severity severity, const char* msg) {
break;
}
case Severity::kWARNING: {
LOG(WARNING) << name_ << " " << msg;
LOG_WARNING_WITH_PREFIX << name_ << " " << msg;
break;
}
case Severity::kERROR: {