diff --git a/tensorflow/compiler/xla/service/compiler.h b/tensorflow/compiler/xla/service/compiler.h index 685b0d15142..eee2e26ec9f 100644 --- a/tensorflow/compiler/xla/service/compiler.h +++ b/tensorflow/compiler/xla/service/compiler.h @@ -165,14 +165,6 @@ class Compiler { std::unique_ptr module, se::StreamExecutor* executor, se::DeviceMemoryAllocator* device_allocator) = 0; - // Compiles a set of HLO modules that can run in parallel, potentially - // communicating data between the modules. - virtual StatusOr>> - RunBackendOnModuleGroup( - std::unique_ptr module_group, - std::vector> stream_exec, - se::DeviceMemoryAllocator* device_allocator) = 0; - // Compiles a set of HLO modules that can run in parallel, potentially // communicating data between the modules, and returns a corresponding // sequence of executable objects. diff --git a/tensorflow/compiler/xla/service/interpreter/compiler.cc b/tensorflow/compiler/xla/service/interpreter/compiler.cc index 94194d6145d..85768225892 100644 --- a/tensorflow/compiler/xla/service/interpreter/compiler.cc +++ b/tensorflow/compiler/xla/service/interpreter/compiler.cc @@ -126,15 +126,6 @@ StatusOr> InterpreterCompiler::RunBackend( return std::move(executable); } -StatusOr>> -InterpreterCompiler::RunBackendOnModuleGroup( - std::unique_ptr module_group, - std::vector> stream_exec, - se::DeviceMemoryAllocator* device_allocator) { - return Unimplemented( - "Module group compilation is not supported on Interpreter."); -} - StatusOr>> InterpreterCompiler::Compile( std::unique_ptr module_group, std::vector> stream_exec, diff --git a/tensorflow/compiler/xla/service/interpreter/compiler.h b/tensorflow/compiler/xla/service/interpreter/compiler.h index fa99779309b..824594dfd84 100644 --- a/tensorflow/compiler/xla/service/interpreter/compiler.h +++ b/tensorflow/compiler/xla/service/interpreter/compiler.h @@ -49,11 +49,6 @@ class InterpreterCompiler : public Compiler { StatusOr> RunBackend( std::unique_ptr hlo_module, se::StreamExecutor* stream_exec, se::DeviceMemoryAllocator* device_allocator) override; - StatusOr>> RunBackendOnModuleGroup( - std::unique_ptr module_group, - std::vector> stream_exec, - se::DeviceMemoryAllocator* device_allocator) override; - StatusOr>> Compile( std::unique_ptr module_group, std::vector> stream_exec, diff --git a/tensorflow/compiler/xla/service/llvm_compiler.cc b/tensorflow/compiler/xla/service/llvm_compiler.cc index 50bae1d39a4..aa759b26226 100644 --- a/tensorflow/compiler/xla/service/llvm_compiler.cc +++ b/tensorflow/compiler/xla/service/llvm_compiler.cc @@ -21,15 +21,6 @@ limitations under the License. #endif namespace xla { -StatusOr>> -LLVMCompiler::RunBackendOnModuleGroup( - std::unique_ptr module_group, - std::vector> stream_exec, - se::DeviceMemoryAllocator* device_allocator) { - return Unimplemented( - "Model partitioning not implemented for the CPU/GPU compilers!"); -} - StatusOr>> LLVMCompiler::Compile( std::unique_ptr module_group, std::vector> stream_execs, diff --git a/tensorflow/compiler/xla/service/llvm_compiler.h b/tensorflow/compiler/xla/service/llvm_compiler.h index b983ff575a3..bddda50d3e1 100644 --- a/tensorflow/compiler/xla/service/llvm_compiler.h +++ b/tensorflow/compiler/xla/service/llvm_compiler.h @@ -69,11 +69,6 @@ class LLVMCompiler : public Compiler { using Compiler::RunBackend; using Compiler::RunHloPasses; - StatusOr>> RunBackendOnModuleGroup( - std::unique_ptr module_group, - std::vector> stream_exec, - se::DeviceMemoryAllocator* device_allocator) override; - StatusOr>> Compile( std::unique_ptr module_group, std::vector> stream_execs, diff --git a/tensorflow/compiler/xla/service/mlir_gpu/failover_compiler.cc b/tensorflow/compiler/xla/service/mlir_gpu/failover_compiler.cc index 7db22dc8439..4107d92da7e 100644 --- a/tensorflow/compiler/xla/service/mlir_gpu/failover_compiler.cc +++ b/tensorflow/compiler/xla/service/mlir_gpu/failover_compiler.cc @@ -50,16 +50,6 @@ StatusOr> FailoverCompiler::RunBackend( return result; } -StatusOr>> -FailoverCompiler::RunBackendOnModuleGroup( - std::unique_ptr module_group, - std::vector> stream_exec, - se::DeviceMemoryAllocator* device_allocator) { - // This is not supported by GPU compiler anyway. - return Unimplemented( - "Model partitioning not implemented for the failover compiler!"); -} - StatusOr>> FailoverCompiler::Compile( std::unique_ptr module_group, std::vector> stream_execs, diff --git a/tensorflow/compiler/xla/service/mlir_gpu/failover_compiler.h b/tensorflow/compiler/xla/service/mlir_gpu/failover_compiler.h index 653dd95f4d2..5eb3bf188bb 100644 --- a/tensorflow/compiler/xla/service/mlir_gpu/failover_compiler.h +++ b/tensorflow/compiler/xla/service/mlir_gpu/failover_compiler.h @@ -57,11 +57,6 @@ class FailoverCompiler final : public Compiler { std::unique_ptr module, se::StreamExecutor* stream_exec, se::DeviceMemoryAllocator* device_allocator) override; - StatusOr>> RunBackendOnModuleGroup( - std::unique_ptr module_group, - std::vector> stream_exec, - se::DeviceMemoryAllocator* device_allocator) override; - StatusOr>> Compile( std::unique_ptr module_group, std::vector> stream_execs, diff --git a/tensorflow/compiler/xla/service/mlir_gpu/mlir_compiler.cc b/tensorflow/compiler/xla/service/mlir_gpu/mlir_compiler.cc index 2afbb7389ba..7b7f7b89dde 100644 --- a/tensorflow/compiler/xla/service/mlir_gpu/mlir_compiler.cc +++ b/tensorflow/compiler/xla/service/mlir_gpu/mlir_compiler.cc @@ -65,14 +65,6 @@ StatusOr> MlirCompiler::RunBackend( return Unimplemented("Not yet implemented in MLIR compiler"); } -StatusOr>> -MlirCompiler::RunBackendOnModuleGroup( - std::unique_ptr module_group, - std::vector> stream_exec, - se::DeviceMemoryAllocator* device_allocator) { - return Unimplemented("Not yet implemented in MLIR compiler"); -} - StatusOr>> MlirCompiler::Compile( std::unique_ptr module_group, std::vector> stream_execs, diff --git a/tensorflow/compiler/xla/service/mlir_gpu/mlir_compiler.h b/tensorflow/compiler/xla/service/mlir_gpu/mlir_compiler.h index a8b46149085..6979f73990e 100644 --- a/tensorflow/compiler/xla/service/mlir_gpu/mlir_compiler.h +++ b/tensorflow/compiler/xla/service/mlir_gpu/mlir_compiler.h @@ -39,11 +39,6 @@ class MlirCompiler : public Compiler { std::unique_ptr module, se::StreamExecutor* stream_exec, se::DeviceMemoryAllocator* device_allocator) override; - StatusOr>> RunBackendOnModuleGroup( - std::unique_ptr module_group, - std::vector> stream_exec, - se::DeviceMemoryAllocator* device_allocator) override; - StatusOr>> Compile( std::unique_ptr module_group, std::vector> stream_execs,