diff --git a/tensorflow/compiler/xla/service/gpu/collective_permute_thunk.cc b/tensorflow/compiler/xla/service/gpu/collective_permute_thunk.cc index 1a17a35ebbe..10d8763133c 100644 --- a/tensorflow/compiler/xla/service/gpu/collective_permute_thunk.cc +++ b/tensorflow/compiler/xla/service/gpu/collective_permute_thunk.cc @@ -227,7 +227,7 @@ CollectivePermuteConfig GetCollectivePermuteConfig( } CollectivePermuteThunk::CollectivePermuteThunk( - ThunkInfo thunk_info, CollectivePermuteConfig&& config, + ThunkInfo thunk_info, CollectivePermuteConfig config, const BufferAllocation::Slice& src, const BufferAllocation::Slice& dest) : Thunk(kCollectivePermute, thunk_info), config_(std::move(config)), diff --git a/tensorflow/compiler/xla/service/gpu/collective_permute_thunk.h b/tensorflow/compiler/xla/service/gpu/collective_permute_thunk.h index bef86eec9af..35dda8dad7d 100644 --- a/tensorflow/compiler/xla/service/gpu/collective_permute_thunk.h +++ b/tensorflow/compiler/xla/service/gpu/collective_permute_thunk.h @@ -33,7 +33,7 @@ CollectivePermuteConfig GetCollectivePermuteConfig(const HloInstruction* instr); // Thunk that implements the collective-permute HLO. class CollectivePermuteThunk : public Thunk { public: - CollectivePermuteThunk(ThunkInfo thunk_info, CollectivePermuteConfig&& config, + CollectivePermuteThunk(ThunkInfo thunk_info, CollectivePermuteConfig config, const BufferAllocation::Slice& src, const BufferAllocation::Slice& dest); diff --git a/tensorflow/compiler/xla/service/gpu/conditional_thunk.cc b/tensorflow/compiler/xla/service/gpu/conditional_thunk.cc index 6560c1a819c..6156f6a8ba8 100644 --- a/tensorflow/compiler/xla/service/gpu/conditional_thunk.cc +++ b/tensorflow/compiler/xla/service/gpu/conditional_thunk.cc @@ -26,8 +26,8 @@ namespace gpu { ConditionalThunkConfig GetConditionalThunkConfig( const HloInstruction* instr, - std::vector&& branch_thunk_sequences, - std::vector>&& branch_profile_indices) { + std::vector branch_thunk_sequences, + std::vector> branch_profile_indices) { ConditionalThunkConfig config; config.branch_index_is_bool = instr->operand(0)->shape().element_type() == PRED; @@ -45,7 +45,7 @@ ConditionalThunkConfig GetConditionalThunkConfig( } ConditionalThunk::ConditionalThunk( - ThunkInfo thunk_info, ConditionalThunkConfig&& config, + ThunkInfo thunk_info, ConditionalThunkConfig config, const BufferAllocation::Slice& branch_index_buffer_index, absl::Span branch_operand_buffer_indexes) : Thunk(Kind::kConditional, thunk_info), diff --git a/tensorflow/compiler/xla/service/gpu/conditional_thunk.h b/tensorflow/compiler/xla/service/gpu/conditional_thunk.h index bf4280cdb12..2acd526e1a0 100644 --- a/tensorflow/compiler/xla/service/gpu/conditional_thunk.h +++ b/tensorflow/compiler/xla/service/gpu/conditional_thunk.h @@ -39,8 +39,8 @@ struct ConditionalThunkConfig { ConditionalThunkConfig GetConditionalThunkConfig( const HloInstruction* instr, - std::vector&& branch_thunk_sequences, - std::vector>&& branch_profile_indices); + std::vector branch_thunk_sequences, + std::vector> branch_profile_indices); // ConditionalThunk implements the conditional instruction on GPU by reading the // predicate of the conditional and executing the true or the false computation @@ -55,7 +55,7 @@ ConditionalThunkConfig GetConditionalThunkConfig( class ConditionalThunk : public Thunk { public: ConditionalThunk( - ThunkInfo thunk_info, ConditionalThunkConfig&& config, + ThunkInfo thunk_info, ConditionalThunkConfig config, const BufferAllocation::Slice& branch_index_buffer_index, absl::Span branch_operand_buffer_indexes); diff --git a/tensorflow/compiler/xla/service/gpu/convolution_thunk.cc b/tensorflow/compiler/xla/service/gpu/convolution_thunk.cc index 85adffc5f80..16855373066 100644 --- a/tensorflow/compiler/xla/service/gpu/convolution_thunk.cc +++ b/tensorflow/compiler/xla/service/gpu/convolution_thunk.cc @@ -31,7 +31,7 @@ namespace xla { namespace gpu { ConvolutionThunk::ConvolutionThunk( - ThunkInfo thunk_info, GpuConvConfig&& config, + ThunkInfo thunk_info, GpuConvConfig config, std::vector operand_slices, BufferAllocation::Slice result_slice, BufferAllocation::Slice scratch_slice) : Thunk(Kind::kConvolution, thunk_info), diff --git a/tensorflow/compiler/xla/service/gpu/convolution_thunk.h b/tensorflow/compiler/xla/service/gpu/convolution_thunk.h index 35f130ab36b..36d0e0c3f96 100644 --- a/tensorflow/compiler/xla/service/gpu/convolution_thunk.h +++ b/tensorflow/compiler/xla/service/gpu/convolution_thunk.h @@ -42,7 +42,7 @@ class ConvolutionThunk : public Thunk { // Constructs a thunk for launching a DNN convolution. // // operand_slices should be in the same order as cudnn_call->operands(). - ConvolutionThunk(ThunkInfo thunk_info, GpuConvConfig&& config, + ConvolutionThunk(ThunkInfo thunk_info, GpuConvConfig config, std::vector operand_slices, BufferAllocation::Slice result_slice, BufferAllocation::Slice scratch_slice); diff --git a/tensorflow/compiler/xla/service/gpu/cudnn_batchnorm_thunk.cc b/tensorflow/compiler/xla/service/gpu/cudnn_batchnorm_thunk.cc index 8d70bb2f424..9ca130e1d89 100644 --- a/tensorflow/compiler/xla/service/gpu/cudnn_batchnorm_thunk.cc +++ b/tensorflow/compiler/xla/service/gpu/cudnn_batchnorm_thunk.cc @@ -32,7 +32,7 @@ namespace gpu { namespace dnn = se::dnn; CudnnBatchNormForwardInferenceThunk::CudnnBatchNormForwardInferenceThunk( - ThunkInfo thunk_info, CudnnBatchNormConfig&& config, + ThunkInfo thunk_info, CudnnBatchNormConfig config, const BufferAllocation::Slice& operand, const BufferAllocation::Slice& scale, const BufferAllocation::Slice& offset, const BufferAllocation::Slice& mean, @@ -71,7 +71,7 @@ Status CudnnBatchNormForwardInferenceThunk::ExecuteOnStream( } CudnnBatchNormForwardTrainingThunk::CudnnBatchNormForwardTrainingThunk( - ThunkInfo thunk_info, CudnnBatchNormConfig&& config, + ThunkInfo thunk_info, CudnnBatchNormConfig config, const BufferAllocation::Slice& operand, const BufferAllocation::Slice& scale, const BufferAllocation::Slice& offset, const BufferAllocation::Slice& output_data, @@ -115,7 +115,7 @@ Status CudnnBatchNormForwardTrainingThunk::ExecuteOnStream( } CudnnBatchNormBackwardThunk::CudnnBatchNormBackwardThunk( - ThunkInfo thunk_info, CudnnBatchNormConfig&& config, + ThunkInfo thunk_info, CudnnBatchNormConfig config, const BufferAllocation::Slice& operand, const BufferAllocation::Slice& scale, const BufferAllocation::Slice& mean, const BufferAllocation::Slice& inv_stddev, diff --git a/tensorflow/compiler/xla/service/gpu/cudnn_batchnorm_thunk.h b/tensorflow/compiler/xla/service/gpu/cudnn_batchnorm_thunk.h index 48c46a6bc08..462656fe716 100644 --- a/tensorflow/compiler/xla/service/gpu/cudnn_batchnorm_thunk.h +++ b/tensorflow/compiler/xla/service/gpu/cudnn_batchnorm_thunk.h @@ -48,7 +48,7 @@ namespace gpu { class CudnnBatchNormForwardInferenceThunk : public Thunk { public: CudnnBatchNormForwardInferenceThunk(ThunkInfo thunk_info, - CudnnBatchNormConfig&& config, + CudnnBatchNormConfig config, const BufferAllocation::Slice& operand, const BufferAllocation::Slice& scale, const BufferAllocation::Slice& offset, @@ -76,7 +76,7 @@ class CudnnBatchNormForwardInferenceThunk : public Thunk { class CudnnBatchNormForwardTrainingThunk : public Thunk { public: CudnnBatchNormForwardTrainingThunk( - ThunkInfo thunk_info, CudnnBatchNormConfig&& config, + ThunkInfo thunk_info, CudnnBatchNormConfig config, const BufferAllocation::Slice& operand, const BufferAllocation::Slice& scale, const BufferAllocation::Slice& offset, @@ -104,7 +104,7 @@ class CudnnBatchNormForwardTrainingThunk : public Thunk { class CudnnBatchNormBackwardThunk : public Thunk { public: CudnnBatchNormBackwardThunk( - ThunkInfo thunk_info, CudnnBatchNormConfig&& config, + ThunkInfo thunk_info, CudnnBatchNormConfig config, const BufferAllocation::Slice& operand, const BufferAllocation::Slice& scale, const BufferAllocation::Slice& mean, const BufferAllocation::Slice& inv_stddev, diff --git a/tensorflow/compiler/xla/service/gpu/gemm_thunk.cc b/tensorflow/compiler/xla/service/gpu/gemm_thunk.cc index 32e695b6b20..19e02b24343 100644 --- a/tensorflow/compiler/xla/service/gpu/gemm_thunk.cc +++ b/tensorflow/compiler/xla/service/gpu/gemm_thunk.cc @@ -43,7 +43,7 @@ GpuGemmConfig GetGpuGemmConfig(const HloInstruction *gemm) { return config; } -GemmThunk::GemmThunk(ThunkInfo thunk_info, GpuGemmConfig &&config, +GemmThunk::GemmThunk(ThunkInfo thunk_info, GpuGemmConfig config, const BufferAllocation::Slice &lhs_buffer, const BufferAllocation::Slice &rhs_buffer, const BufferAllocation::Slice &output_buffer, diff --git a/tensorflow/compiler/xla/service/gpu/gemm_thunk.h b/tensorflow/compiler/xla/service/gpu/gemm_thunk.h index 9d6613dbe77..9e11763c091 100644 --- a/tensorflow/compiler/xla/service/gpu/gemm_thunk.h +++ b/tensorflow/compiler/xla/service/gpu/gemm_thunk.h @@ -48,7 +48,7 @@ class GemmThunk : public Thunk { public: // Constructs a thunk that computes "output = (lhs rhs) * alpha" using // BLAS gemm (alpha is stored in the instruction GemmBackendConfig). - GemmThunk(ThunkInfo thunk_info, GpuGemmConfig&& config, + GemmThunk(ThunkInfo thunk_info, GpuGemmConfig config, const BufferAllocation::Slice& lhs_buffer, const BufferAllocation::Slice& rhs_buffer, const BufferAllocation::Slice& output_buffer, diff --git a/tensorflow/compiler/xla/service/gpu/infeed_thunk.cc b/tensorflow/compiler/xla/service/gpu/infeed_thunk.cc index df53fb57f2c..1548fed801a 100644 --- a/tensorflow/compiler/xla/service/gpu/infeed_thunk.cc +++ b/tensorflow/compiler/xla/service/gpu/infeed_thunk.cc @@ -27,7 +27,7 @@ namespace xla { namespace gpu { InfeedThunk::InfeedThunk(ThunkInfo thunk_info, - std::vector&& dest_slices) + std::vector dest_slices) : Thunk(Kind::kInfeed, thunk_info), dest_slices_(std::move(dest_slices)) {} Status InfeedThunk::ExecuteOnStream(const ExecuteParams& params) { diff --git a/tensorflow/compiler/xla/service/gpu/infeed_thunk.h b/tensorflow/compiler/xla/service/gpu/infeed_thunk.h index 6994bd5e54a..7b2b8cf6c88 100644 --- a/tensorflow/compiler/xla/service/gpu/infeed_thunk.h +++ b/tensorflow/compiler/xla/service/gpu/infeed_thunk.h @@ -34,7 +34,7 @@ class InfeedThunk : public Thunk { public: // Constructs a InfeedThunk that copies data from the on-device // infeed queue into the buffers in the given shape tree. - InfeedThunk(ThunkInfo thunk_info, std::vector&& dest_slices); + InfeedThunk(ThunkInfo thunk_info, std::vector dest_slices); InfeedThunk(const InfeedThunk&) = delete; InfeedThunk& operator=(const InfeedThunk&) = delete;