diff --git a/tensorflow/compiler/xla/service/gpu/collective_permute_thunk.h b/tensorflow/compiler/xla/service/gpu/collective_permute_thunk.h index 53e82502f54..bef86eec9af 100644 --- a/tensorflow/compiler/xla/service/gpu/collective_permute_thunk.h +++ b/tensorflow/compiler/xla/service/gpu/collective_permute_thunk.h @@ -40,9 +40,9 @@ class CollectivePermuteThunk : public Thunk { Status ExecuteOnStream(const ExecuteParams& params) override; private: - CollectivePermuteConfig config_; - BufferAllocation::Slice src_; - BufferAllocation::Slice dest_; + const CollectivePermuteConfig config_; + const BufferAllocation::Slice src_; + const BufferAllocation::Slice dest_; }; } // namespace gpu diff --git a/tensorflow/compiler/xla/service/gpu/conditional_thunk.h b/tensorflow/compiler/xla/service/gpu/conditional_thunk.h index ce0b26388d3..bf4280cdb12 100644 --- a/tensorflow/compiler/xla/service/gpu/conditional_thunk.h +++ b/tensorflow/compiler/xla/service/gpu/conditional_thunk.h @@ -67,7 +67,7 @@ class ConditionalThunk : public Thunk { Status ExecuteOnStream(const ExecuteParams& params) override; private: - ConditionalThunkConfig config_; + const ConditionalThunkConfig config_; BufferAllocation::Slice branch_index_buffer_index_; std::vector<BufferAllocation::Slice> branch_operand_buffer_indexes_; }; diff --git a/tensorflow/compiler/xla/service/gpu/convolution_thunk.h b/tensorflow/compiler/xla/service/gpu/convolution_thunk.h index ffefe58e229..7f8377ebe4c 100644 --- a/tensorflow/compiler/xla/service/gpu/convolution_thunk.h +++ b/tensorflow/compiler/xla/service/gpu/convolution_thunk.h @@ -61,7 +61,7 @@ class ConvolutionThunk : public Thunk { BufferAllocation::Slice tuple_result_buffer_; // Convolution config - GpuConvConfig config_; + const GpuConvConfig config_; }; } // namespace gpu diff --git a/tensorflow/compiler/xla/service/gpu/cudnn_batchnorm_thunk.h b/tensorflow/compiler/xla/service/gpu/cudnn_batchnorm_thunk.h index 79b915b59a7..d45e284ea2c 100644 --- a/tensorflow/compiler/xla/service/gpu/cudnn_batchnorm_thunk.h +++ b/tensorflow/compiler/xla/service/gpu/cudnn_batchnorm_thunk.h @@ -124,7 +124,7 @@ class CudnnBatchNormBackwardThunk : public Thunk { Status ExecuteOnStream(const ExecuteParams& params) override; private: - CudnnBatchNormConfig config_; + const CudnnBatchNormConfig config_; BufferAllocation::Slice operand_; BufferAllocation::Slice scale_; BufferAllocation::Slice mean_; diff --git a/tensorflow/compiler/xla/service/gpu/custom_call_thunk.cc b/tensorflow/compiler/xla/service/gpu/custom_call_thunk.cc index e35a7dece3d..c9b2318af79 100644 --- a/tensorflow/compiler/xla/service/gpu/custom_call_thunk.cc +++ b/tensorflow/compiler/xla/service/gpu/custom_call_thunk.cc @@ -24,12 +24,12 @@ namespace gpu { CustomCallThunk::CustomCallThunk( ThunkInfo thunk_info, void* call_target, std::vector<ShapeTree<BufferAllocation::Slice>> operand_slices, - ShapeTree<BufferAllocation::Slice> result_slices, std::string opaque) + ShapeTree<BufferAllocation::Slice> result_slices, const std::string& opaque) : Thunk(Thunk::kCustomCall, thunk_info), call_target_(call_target), operand_slices_(std::move(operand_slices)), result_slices_(std::move(result_slices)), - opaque_(std::move(opaque)) {} + opaque_(opaque) {} // For each leaf in a preorder traversal of `slices`, appends its device address // to `buffers`. diff --git a/tensorflow/compiler/xla/service/gpu/custom_call_thunk.h b/tensorflow/compiler/xla/service/gpu/custom_call_thunk.h index 72175daf3dd..f36eaa9cef2 100644 --- a/tensorflow/compiler/xla/service/gpu/custom_call_thunk.h +++ b/tensorflow/compiler/xla/service/gpu/custom_call_thunk.h @@ -41,7 +41,8 @@ class CustomCallThunk : public Thunk { CustomCallThunk( ThunkInfo thunk_info, void* call_target, std::vector<ShapeTree<BufferAllocation::Slice>> operand_slices, - ShapeTree<BufferAllocation::Slice> result_slices, std::string opaque); + ShapeTree<BufferAllocation::Slice> result_slices, + const std::string& opaque); Status ExecuteOnStream(const ExecuteParams& params) override; @@ -49,7 +50,7 @@ class CustomCallThunk : public Thunk { void* call_target_; std::vector<ShapeTree<BufferAllocation::Slice>> operand_slices_; ShapeTree<BufferAllocation::Slice> result_slices_; - std::string opaque_; + const std::string opaque_; }; } // namespace gpu diff --git a/tensorflow/compiler/xla/service/gpu/for_thunk.h b/tensorflow/compiler/xla/service/gpu/for_thunk.h index 96f0534cd52..9a8bd069290 100644 --- a/tensorflow/compiler/xla/service/gpu/for_thunk.h +++ b/tensorflow/compiler/xla/service/gpu/for_thunk.h @@ -44,7 +44,7 @@ class ForThunk : public Thunk { private: const int64 loop_limit_; std::unique_ptr<SequentialThunk> body_thunk_sequence_; - absl::optional<size_t> body_profile_index_; + const absl::optional<size_t> body_profile_index_; }; } // namespace gpu diff --git a/tensorflow/compiler/xla/service/gpu/gemm_thunk.h b/tensorflow/compiler/xla/service/gpu/gemm_thunk.h index e79a4cba908..9d6613dbe77 100644 --- a/tensorflow/compiler/xla/service/gpu/gemm_thunk.h +++ b/tensorflow/compiler/xla/service/gpu/gemm_thunk.h @@ -60,11 +60,11 @@ class GemmThunk : public Thunk { Status ExecuteOnStream(const ExecuteParams& params) override; private: - GpuGemmConfig config_; + const GpuGemmConfig config_; const BufferAllocation::Slice lhs_buffer_; const BufferAllocation::Slice rhs_buffer_; const BufferAllocation::Slice output_buffer_; - bool implements_whole_instruction_; + const bool implements_whole_instruction_; }; // Run the given GEMM instruction `gemm` subject to the configuration diff --git a/tensorflow/compiler/xla/service/gpu/memset_thunk.h b/tensorflow/compiler/xla/service/gpu/memset_thunk.h index 8a1890a0769..fb18b7041b7 100644 --- a/tensorflow/compiler/xla/service/gpu/memset_thunk.h +++ b/tensorflow/compiler/xla/service/gpu/memset_thunk.h @@ -55,7 +55,7 @@ class Memset32BitValueThunk : public Thunk { Status ExecuteOnStream(const ExecuteParams& params) override; private: - uint32 value_; + const uint32 value_; const BufferAllocation::Slice dest_; }; diff --git a/tensorflow/compiler/xla/service/gpu/nccl_all_reduce_thunk.h b/tensorflow/compiler/xla/service/gpu/nccl_all_reduce_thunk.h index 1e9b511c4a2..42060e82428 100644 --- a/tensorflow/compiler/xla/service/gpu/nccl_all_reduce_thunk.h +++ b/tensorflow/compiler/xla/service/gpu/nccl_all_reduce_thunk.h @@ -91,7 +91,7 @@ class NcclAllReduceThunk : public Thunk { static bool CanImplement(const HloInstruction* crs); private: - NcclAllReduceConfig config_; + const NcclAllReduceConfig config_; const std::vector<Buffer> buffers_; }; diff --git a/tensorflow/compiler/xla/service/gpu/outfeed_thunk.h b/tensorflow/compiler/xla/service/gpu/outfeed_thunk.h index a763645ce82..60c64858ee7 100644 --- a/tensorflow/compiler/xla/service/gpu/outfeed_thunk.h +++ b/tensorflow/compiler/xla/service/gpu/outfeed_thunk.h @@ -47,7 +47,7 @@ class OutfeedThunk : public Thunk { Status ExecuteOnStream(const ExecuteParams& params) override; private: - OutfeedConfig config_; + const OutfeedConfig config_; const ShapeTree<BufferAllocation::Slice> outfeed_slices_; }; diff --git a/tensorflow/compiler/xla/service/gpu/while_thunk.h b/tensorflow/compiler/xla/service/gpu/while_thunk.h index dc09c142a88..707edbdc192 100644 --- a/tensorflow/compiler/xla/service/gpu/while_thunk.h +++ b/tensorflow/compiler/xla/service/gpu/while_thunk.h @@ -53,12 +53,11 @@ class WhileThunk : public Thunk { Status ExecuteOnStream(const ExecuteParams& params) override; private: - const HloInstruction* hlo_instruction_; const BufferAllocation::Slice condition_result_buffer_index_; std::unique_ptr<SequentialThunk> condition_thunk_sequence_; std::unique_ptr<SequentialThunk> body_thunk_sequence_; - absl::optional<size_t> condition_profile_index_; - absl::optional<size_t> body_profile_index_; + const absl::optional<size_t> condition_profile_index_; + const absl::optional<size_t> body_profile_index_; }; } // namespace gpu