diff --git a/tensorflow/compiler/xla/debug_options_flags.cc b/tensorflow/compiler/xla/debug_options_flags.cc index 716fcf217b1..aaef4a15fae 100644 --- a/tensorflow/compiler/xla/debug_options_flags.cc +++ b/tensorflow/compiler/xla/debug_options_flags.cc @@ -355,11 +355,6 @@ static void AllocateFlags() { "If specified, dumps HLO before and after optimization passes which " "match this regular expression, in addition to dumping at the very " "beginning and end of compilation."), - tensorflow::Flag("xla_dump_ir", - bool_setter_for(&DebugOptions::set_xla_dump_ir), - flag_values->xla_dump_ir(), - "If specified, dumps intermediate results (e.g. LLVM " - "IR) to the directory specified by --xla_dump_to."), tensorflow::Flag( "xla_hlo_graph_addresses", bool_setter_for(&DebugOptions::set_xla_hlo_graph_addresses), diff --git a/tensorflow/compiler/xla/service/cpu/cpu_compiler.cc b/tensorflow/compiler/xla/service/cpu/cpu_compiler.cc index 04c99a9417a..d5845577be7 100644 --- a/tensorflow/compiler/xla/service/cpu/cpu_compiler.cc +++ b/tensorflow/compiler/xla/service/cpu/cpu_compiler.cc @@ -441,7 +441,7 @@ Status InitializeModuleHooks( // // * Calls the user supplied module hook. // * Writes out the IR to a file in the output directory designated by - // --xla_dump_ir + // --xla_dump_to const HloModule* hlo_module_ptr = &hlo_module; auto hook = [user_pre_optimization_hook, user_post_optimization_hook, hlo_module_ptr](bool optimized, @@ -474,7 +474,7 @@ Status VerifyLlvmModule(const llvm::Module& llvm_module) { << "Invalid LLVM IR before optimizations:\n" << err_stream.str() << "\nThis probably indicates a bug in the HLO -> LLVM IR lowering. " - "Rerun with --xla_dump_ir to get the IR. "; + "Rerun with --xla_dump_to to get the IR. "; return Status::OK(); } diff --git a/tensorflow/compiler/xla/service/gpu/nvptx_compiler.cc b/tensorflow/compiler/xla/service/gpu/nvptx_compiler.cc index 28d11a5c109..17b7c784396 100644 --- a/tensorflow/compiler/xla/service/gpu/nvptx_compiler.cc +++ b/tensorflow/compiler/xla/service/gpu/nvptx_compiler.cc @@ -717,7 +717,7 @@ StatusOr> NVPTXCompiler::RunBackend( << "Invalid LLVM IR before optimizations:\n" << err_stream.str() << "\nThis probably indicates a bug in the HLO -> LLVM IR lowering. " - "Rerun with --xla_dump_ir to get the IR. "; + "Rerun with --xla_dump_to to get the IR. "; } string libdevice_dir; @@ -756,8 +756,7 @@ StatusOr> NVPTXCompiler::RunBackend( TF_CHECK_OK(user_post_optimization_hook_(llvm_module)); } // Write PTX to IR dump directory, if IR dumping was requested. - const auto& debug_opts = module->config().debug_options(); - if (DumpingEnabledForHloModule(*module) && debug_opts.xla_dump_ir()) { + if (DumpingEnabledForHloModule(*module)) { DumpToFileInDirOrStdout(*module, "ptx", ptx); } diff --git a/tensorflow/compiler/xla/service/llvm_ir/llvm_util.cc b/tensorflow/compiler/xla/service/llvm_ir/llvm_util.cc index 3bd2762187e..cc21a9f60a9 100644 --- a/tensorflow/compiler/xla/service/llvm_ir/llvm_util.cc +++ b/tensorflow/compiler/xla/service/llvm_ir/llvm_util.cc @@ -584,7 +584,7 @@ static Status CreateAndWriteStringToFile(const string& directory_name, void DumpIrIfEnabled(const HloModule& hlo_module, const llvm::Module& llvm_module, bool optimized) { const auto& debug_opts = hlo_module.config().debug_options(); - if (!debug_opts.xla_dump_ir() || !DumpingEnabledForHloModule(hlo_module)) { + if (!DumpingEnabledForHloModule(hlo_module)) { return; } // We can end up compiling different modules with the same name when using diff --git a/tensorflow/compiler/xla/xla.proto b/tensorflow/compiler/xla/xla.proto index 395b9451202..879929697a4 100644 --- a/tensorflow/compiler/xla/xla.proto +++ b/tensorflow/compiler/xla/xla.proto @@ -64,8 +64,6 @@ message DebugOptions { // Show addresses of HLO ops in graph dump. bool xla_hlo_graph_addresses = 2; - reserved 5; // Was xla_hlo_dump_as_graphdef - // Instrument the computation to collect per-HLO cycle counts. bool xla_hlo_profile = 9; @@ -243,10 +241,6 @@ message DebugOptions { // Dump HLO graphs as an HTML (DOT -> SVG inlined in HTML) bool xla_dump_hlo_as_html = 116; - // If true, we will dump intermediate results (e.g. LLVM IR) to the - // --xla_dump_to directory. - bool xla_dump_ir = 117; - // If true, every time an HLO module is run, we will dump an HloSnapshot // (essentially, a serialized module plus its inputs) to the --xla_dump_to // directory. @@ -261,6 +255,9 @@ message DebugOptions { // Extra options to pass to the compilation backend (e.g. LLVM); specific // interpretation of these values is left to the backend. map xla_backend_extra_options = 500; + + reserved 117; // was xla_dump_to + reserved 5; // Was xla_hlo_dump_as_graphdef } // These settings control how XLA compiles and/or runs code. Not all settings