From 8187dd591f67862fe57a95dfa14434a46f52ca62 Mon Sep 17 00:00:00 2001 From: Adrian Kuegel Date: Thu, 27 Feb 2020 06:36:46 -0800 Subject: [PATCH] Add logging in case we fallback to the secondary backend. This makes it possible to see from the logs whether the failover compiler used the secondary (classic xla gpu) backend instead of the mlir_gpu backend. PiperOrigin-RevId: 297583118 Change-Id: Ic76736684abec9764a16408fda1d52a7b2323bfc --- .../compiler/xla/service/mlir_gpu/failover_compiler.cc | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/tensorflow/compiler/xla/service/mlir_gpu/failover_compiler.cc b/tensorflow/compiler/xla/service/mlir_gpu/failover_compiler.cc index 7855f1da1cf..f71267935cd 100644 --- a/tensorflow/compiler/xla/service/mlir_gpu/failover_compiler.cc +++ b/tensorflow/compiler/xla/service/mlir_gpu/failover_compiler.cc @@ -32,6 +32,8 @@ StatusOr> FailoverCompiler::RunHloPasses( auto result = primary_->RunHloPasses(module->Clone(), stream_exec, device_allocator); if (IsUnimplemented(result)) { + VLOG(2) << "RunHloPasses resulted in " << result.status() + << ", falling back to secondary backend"; return secondary_->RunHloPasses(std::move(module), stream_exec, device_allocator); } @@ -44,6 +46,8 @@ StatusOr> FailoverCompiler::RunBackend( auto result = primary_->RunBackend(module->Clone(), stream_exec, device_allocator); if (IsUnimplemented(result)) { + VLOG(2) << "RunBackend resulted in " << result.status() + << ", falling back to secondary backend"; return secondary_->RunBackend(std::move(module), stream_exec, device_allocator); } @@ -78,6 +82,8 @@ StatusOr>> FailoverCompiler::Compile( }(modules[i]->Clone()); if (IsUnimplemented(executable)) { + VLOG(2) << "Compile resulted in " << executable.status() + << ", falling back to secondary backend"; TF_ASSIGN_OR_RETURN( modules[i], secondary_->RunHloPasses(std::move(modules[i]), stream_execs[i][0],