[XLA] Remove --xla_dump_ir flag.

Just dump IR whenever we're dumping the HLO modules.

PiperOrigin-RevId: 238120677
This commit is contained in:
Justin Lebar 2019-03-12 16:19:20 -07:00 committed by TensorFlower Gardener
parent b4db28ca0d
commit 130d901289
5 changed files with 8 additions and 17 deletions

View File

@ -355,11 +355,6 @@ static void AllocateFlags() {
"If specified, dumps HLO before and after optimization passes which "
"match this regular expression, in addition to dumping at the very "
"beginning and end of compilation."),
tensorflow::Flag("xla_dump_ir",
bool_setter_for(&DebugOptions::set_xla_dump_ir),
flag_values->xla_dump_ir(),
"If specified, dumps intermediate results (e.g. LLVM "
"IR) to the directory specified by --xla_dump_to."),
tensorflow::Flag(
"xla_hlo_graph_addresses",
bool_setter_for(&DebugOptions::set_xla_hlo_graph_addresses),

View File

@ -441,7 +441,7 @@ Status InitializeModuleHooks(
//
// * Calls the user supplied module hook.
// * Writes out the IR to a file in the output directory designated by
// --xla_dump_ir
// --xla_dump_to
const HloModule* hlo_module_ptr = &hlo_module;
auto hook = [user_pre_optimization_hook, user_post_optimization_hook,
hlo_module_ptr](bool optimized,
@ -474,7 +474,7 @@ Status VerifyLlvmModule(const llvm::Module& llvm_module) {
<< "Invalid LLVM IR before optimizations:\n"
<< err_stream.str()
<< "\nThis probably indicates a bug in the HLO -> LLVM IR lowering. "
"Rerun with --xla_dump_ir to get the IR. ";
"Rerun with --xla_dump_to to get the IR. ";
return Status::OK();
}

View File

@ -717,7 +717,7 @@ StatusOr<std::unique_ptr<Executable>> NVPTXCompiler::RunBackend(
<< "Invalid LLVM IR before optimizations:\n"
<< err_stream.str()
<< "\nThis probably indicates a bug in the HLO -> LLVM IR lowering. "
"Rerun with --xla_dump_ir to get the IR. ";
"Rerun with --xla_dump_to to get the IR. ";
}
string libdevice_dir;
@ -756,8 +756,7 @@ StatusOr<std::unique_ptr<Executable>> NVPTXCompiler::RunBackend(
TF_CHECK_OK(user_post_optimization_hook_(llvm_module));
}
// Write PTX to IR dump directory, if IR dumping was requested.
const auto& debug_opts = module->config().debug_options();
if (DumpingEnabledForHloModule(*module) && debug_opts.xla_dump_ir()) {
if (DumpingEnabledForHloModule(*module)) {
DumpToFileInDirOrStdout(*module, "ptx", ptx);
}

View File

@ -584,7 +584,7 @@ static Status CreateAndWriteStringToFile(const string& directory_name,
void DumpIrIfEnabled(const HloModule& hlo_module,
const llvm::Module& llvm_module, bool optimized) {
const auto& debug_opts = hlo_module.config().debug_options();
if (!debug_opts.xla_dump_ir() || !DumpingEnabledForHloModule(hlo_module)) {
if (!DumpingEnabledForHloModule(hlo_module)) {
return;
}
// We can end up compiling different modules with the same name when using

View File

@ -64,8 +64,6 @@ message DebugOptions {
// Show addresses of HLO ops in graph dump.
bool xla_hlo_graph_addresses = 2;
reserved 5; // Was xla_hlo_dump_as_graphdef
// Instrument the computation to collect per-HLO cycle counts.
bool xla_hlo_profile = 9;
@ -243,10 +241,6 @@ message DebugOptions {
// Dump HLO graphs as an HTML (DOT -> SVG inlined in HTML)
bool xla_dump_hlo_as_html = 116;
// If true, we will dump intermediate results (e.g. LLVM IR) to the
// --xla_dump_to directory.
bool xla_dump_ir = 117;
// If true, every time an HLO module is run, we will dump an HloSnapshot
// (essentially, a serialized module plus its inputs) to the --xla_dump_to
// directory.
@ -261,6 +255,9 @@ message DebugOptions {
// Extra options to pass to the compilation backend (e.g. LLVM); specific
// interpretation of these values is left to the backend.
map<string, string> xla_backend_extra_options = 500;
reserved 117; // was xla_dump_to
reserved 5; // Was xla_hlo_dump_as_graphdef
}
// These settings control how XLA compiles and/or runs code. Not all settings