From b87774c9d8f0eae067bd65793f52f90f0b2d8228 Mon Sep 17 00:00:00 2001 From: "A. Unique TensorFlower" Date: Thu, 6 Jul 2017 13:17:39 -0700 Subject: [PATCH] [TF:XLA] Remove flag --xla_enable_buffer_reuse. The corresponding issues have been fixed, so this should not be needed anymore. PiperOrigin-RevId: 161120874 --- tensorflow/compiler/xla/legacy_flags/debug_options_flags.cc | 6 ------ tensorflow/compiler/xla/service/buffer_assignment.cc | 3 +-- tensorflow/compiler/xla/xla.proto | 3 --- 3 files changed, 1 insertion(+), 11 deletions(-) diff --git a/tensorflow/compiler/xla/legacy_flags/debug_options_flags.cc b/tensorflow/compiler/xla/legacy_flags/debug_options_flags.cc index 4a6b0b581c8..2f3ec403a05 100644 --- a/tensorflow/compiler/xla/legacy_flags/debug_options_flags.cc +++ b/tensorflow/compiler/xla/legacy_flags/debug_options_flags.cc @@ -42,7 +42,6 @@ struct DebugOptionsFlags { string xla_dump_ir_to; string xla_dump_debug_json_to; bool xla_eliminate_hlo_implicit_broadcast; - bool xla_enable_buffer_reuse; bool xla_cpu_multi_thread_eigen; @@ -79,7 +78,6 @@ void AllocateFlags() { flag_values->xla_dump_ir_to = ""; flag_values->xla_dump_debug_json_to = ""; flag_values->xla_eliminate_hlo_implicit_broadcast = false; - flag_values->xla_enable_buffer_reuse = true; flag_values->xla_cpu_multi_thread_eigen = true; flag_values->xla_gpu_cuda_data_dir = "./cuda_sdk_lib"; flag_values->xla_gpu_ftz = false; @@ -146,9 +144,6 @@ void AllocateFlags() { "Eliminate implicit broadcasts when lowering user " "computations to HLO instructions; use explicit " "broadcast instead."), - tensorflow::Flag("xla_enable_buffer_reuse", - &flag_values->xla_enable_buffer_reuse, - "Enable reuse of buffers between HLO operations."), tensorflow::Flag("xla_cpu_multi_thread_eigen", &flag_values->xla_cpu_multi_thread_eigen, "When generating calls to Eigen in the CPU backend, " @@ -207,7 +202,6 @@ xla::DebugOptions GetDebugOptionsFromFlags() { options.set_xla_dump_ir_to(flag_values->xla_dump_ir_to); options.set_xla_eliminate_hlo_implicit_broadcast( flag_values->xla_eliminate_hlo_implicit_broadcast); - options.set_xla_enable_buffer_reuse(flag_values->xla_enable_buffer_reuse); options.set_xla_dump_debug_json_to(flag_values->xla_dump_debug_json_to); options.set_xla_cpu_multi_thread_eigen( diff --git a/tensorflow/compiler/xla/service/buffer_assignment.cc b/tensorflow/compiler/xla/service/buffer_assignment.cc index d00749247e7..f372b18f7e7 100644 --- a/tensorflow/compiler/xla/service/buffer_assignment.cc +++ b/tensorflow/compiler/xla/service/buffer_assignment.cc @@ -796,8 +796,7 @@ Status BufferAssigner::AssignBuffersForComputation( continue; } - if (!debug_options.xla_enable_buffer_reuse() || is_thread_local || - instruction->opcode() == HloOpcode::kCustomCall) { + if (is_thread_local || instruction->opcode() == HloOpcode::kCustomCall) { // Custom call operations never have reusable buffers. Also we do not // reuse thread-local buffers for now, because they are dynamically // allocated and their lifetimes are hard to compute. diff --git a/tensorflow/compiler/xla/xla.proto b/tensorflow/compiler/xla/xla.proto index c4badfaede5..72b47c4223f 100644 --- a/tensorflow/compiler/xla/xla.proto +++ b/tensorflow/compiler/xla/xla.proto @@ -99,9 +99,6 @@ message DebugOptions { // the generated IR. bool xla_llvm_enable_invariant_load_metadata = 65; - // Enable reuse of buffers between HLO operations. - bool xla_enable_buffer_reuse = 66; - // Extra options to pass to the compilation backend; specific interpretation // of these values is left to the backend. map xla_backend_extra_options = 500;