From 85f156915b9213fc21f3096d22e04cc7e0aea4fc Mon Sep 17 00:00:00 2001 From: "A. Unique TensorFlower" Date: Tue, 31 Mar 2020 18:33:38 -0700 Subject: [PATCH] don't "print xla expression" in trunk annotation. PiperOrigin-RevId: 304090402 Change-Id: Iff60d4b92ce38d8e1d6204b5013f5b8d9fcf459b --- tensorflow/compiler/xla/service/gpu/gpu_executable.cc | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/tensorflow/compiler/xla/service/gpu/gpu_executable.cc b/tensorflow/compiler/xla/service/gpu/gpu_executable.cc index d4797e094fd..247413d4961 100644 --- a/tensorflow/compiler/xla/service/gpu/gpu_executable.cc +++ b/tensorflow/compiler/xla/service/gpu/gpu_executable.cc @@ -96,10 +96,8 @@ void GpuExecutable::ComputeThunkAnnotations() { const HloInstruction* hlo = thunk->hlo_instruction(); CHECK(hlo); thunk_annotations_[thunk] = - absl::StrFormat("%s:#hlo_op=%s,hlo_module=%s#", - hlo->ToStringWithCanonicalNameMap( - HloPrintOptions::Canonical(), &canonical_name_map), - hlo->name(), hlo->GetModule()->name()); + absl::StrFormat("Thunk#hlo_op=%s,hlo_module=%s#", hlo->name(), + hlo->GetModule()->name()); } }