[TF:XLA] Remove flag --xla_enable_buffer_reuse.
The corresponding issues have been fixed, so this should not be needed anymore. PiperOrigin-RevId: 161120874
This commit is contained in:
parent
a22dad9836
commit
b87774c9d8
@ -42,7 +42,6 @@ struct DebugOptionsFlags {
|
||||
string xla_dump_ir_to;
|
||||
string xla_dump_debug_json_to;
|
||||
bool xla_eliminate_hlo_implicit_broadcast;
|
||||
bool xla_enable_buffer_reuse;
|
||||
|
||||
bool xla_cpu_multi_thread_eigen;
|
||||
|
||||
@ -79,7 +78,6 @@ void AllocateFlags() {
|
||||
flag_values->xla_dump_ir_to = "";
|
||||
flag_values->xla_dump_debug_json_to = "";
|
||||
flag_values->xla_eliminate_hlo_implicit_broadcast = false;
|
||||
flag_values->xla_enable_buffer_reuse = true;
|
||||
flag_values->xla_cpu_multi_thread_eigen = true;
|
||||
flag_values->xla_gpu_cuda_data_dir = "./cuda_sdk_lib";
|
||||
flag_values->xla_gpu_ftz = false;
|
||||
@ -146,9 +144,6 @@ void AllocateFlags() {
|
||||
"Eliminate implicit broadcasts when lowering user "
|
||||
"computations to HLO instructions; use explicit "
|
||||
"broadcast instead."),
|
||||
tensorflow::Flag("xla_enable_buffer_reuse",
|
||||
&flag_values->xla_enable_buffer_reuse,
|
||||
"Enable reuse of buffers between HLO operations."),
|
||||
tensorflow::Flag("xla_cpu_multi_thread_eigen",
|
||||
&flag_values->xla_cpu_multi_thread_eigen,
|
||||
"When generating calls to Eigen in the CPU backend, "
|
||||
@ -207,7 +202,6 @@ xla::DebugOptions GetDebugOptionsFromFlags() {
|
||||
options.set_xla_dump_ir_to(flag_values->xla_dump_ir_to);
|
||||
options.set_xla_eliminate_hlo_implicit_broadcast(
|
||||
flag_values->xla_eliminate_hlo_implicit_broadcast);
|
||||
options.set_xla_enable_buffer_reuse(flag_values->xla_enable_buffer_reuse);
|
||||
options.set_xla_dump_debug_json_to(flag_values->xla_dump_debug_json_to);
|
||||
|
||||
options.set_xla_cpu_multi_thread_eigen(
|
||||
|
@ -796,8 +796,7 @@ Status BufferAssigner::AssignBuffersForComputation(
|
||||
continue;
|
||||
}
|
||||
|
||||
if (!debug_options.xla_enable_buffer_reuse() || is_thread_local ||
|
||||
instruction->opcode() == HloOpcode::kCustomCall) {
|
||||
if (is_thread_local || instruction->opcode() == HloOpcode::kCustomCall) {
|
||||
// Custom call operations never have reusable buffers. Also we do not
|
||||
// reuse thread-local buffers for now, because they are dynamically
|
||||
// allocated and their lifetimes are hard to compute.
|
||||
|
@ -99,9 +99,6 @@ message DebugOptions {
|
||||
// the generated IR.
|
||||
bool xla_llvm_enable_invariant_load_metadata = 65;
|
||||
|
||||
// Enable reuse of buffers between HLO operations.
|
||||
bool xla_enable_buffer_reuse = 66;
|
||||
|
||||
// Extra options to pass to the compilation backend; specific interpretation
|
||||
// of these values is left to the backend.
|
||||
map<string, string> xla_backend_extra_options = 500;
|
||||
|
Loading…
Reference in New Issue
Block a user