[NFC] Mark Thunk subclass members const when possible
- Also eliminate unused HLO Instruction pointer from WhileThunk PiperOrigin-RevId: 335893833 Change-Id: I56c04b552265c1aa0b5874395a49117c6c6624ae
This commit is contained in:
parent
37995ed712
commit
a89724bda9
@ -40,9 +40,9 @@ class CollectivePermuteThunk : public Thunk {
|
||||
Status ExecuteOnStream(const ExecuteParams& params) override;
|
||||
|
||||
private:
|
||||
CollectivePermuteConfig config_;
|
||||
BufferAllocation::Slice src_;
|
||||
BufferAllocation::Slice dest_;
|
||||
const CollectivePermuteConfig config_;
|
||||
const BufferAllocation::Slice src_;
|
||||
const BufferAllocation::Slice dest_;
|
||||
};
|
||||
|
||||
} // namespace gpu
|
||||
|
@ -67,7 +67,7 @@ class ConditionalThunk : public Thunk {
|
||||
Status ExecuteOnStream(const ExecuteParams& params) override;
|
||||
|
||||
private:
|
||||
ConditionalThunkConfig config_;
|
||||
const ConditionalThunkConfig config_;
|
||||
BufferAllocation::Slice branch_index_buffer_index_;
|
||||
std::vector<BufferAllocation::Slice> branch_operand_buffer_indexes_;
|
||||
};
|
||||
|
@ -61,7 +61,7 @@ class ConvolutionThunk : public Thunk {
|
||||
BufferAllocation::Slice tuple_result_buffer_;
|
||||
|
||||
// Convolution config
|
||||
GpuConvConfig config_;
|
||||
const GpuConvConfig config_;
|
||||
};
|
||||
|
||||
} // namespace gpu
|
||||
|
@ -124,7 +124,7 @@ class CudnnBatchNormBackwardThunk : public Thunk {
|
||||
Status ExecuteOnStream(const ExecuteParams& params) override;
|
||||
|
||||
private:
|
||||
CudnnBatchNormConfig config_;
|
||||
const CudnnBatchNormConfig config_;
|
||||
BufferAllocation::Slice operand_;
|
||||
BufferAllocation::Slice scale_;
|
||||
BufferAllocation::Slice mean_;
|
||||
|
@ -24,12 +24,12 @@ namespace gpu {
|
||||
CustomCallThunk::CustomCallThunk(
|
||||
ThunkInfo thunk_info, void* call_target,
|
||||
std::vector<ShapeTree<BufferAllocation::Slice>> operand_slices,
|
||||
ShapeTree<BufferAllocation::Slice> result_slices, std::string opaque)
|
||||
ShapeTree<BufferAllocation::Slice> result_slices, const std::string& opaque)
|
||||
: Thunk(Thunk::kCustomCall, thunk_info),
|
||||
call_target_(call_target),
|
||||
operand_slices_(std::move(operand_slices)),
|
||||
result_slices_(std::move(result_slices)),
|
||||
opaque_(std::move(opaque)) {}
|
||||
opaque_(opaque) {}
|
||||
|
||||
// For each leaf in a preorder traversal of `slices`, appends its device address
|
||||
// to `buffers`.
|
||||
|
@ -41,7 +41,8 @@ class CustomCallThunk : public Thunk {
|
||||
CustomCallThunk(
|
||||
ThunkInfo thunk_info, void* call_target,
|
||||
std::vector<ShapeTree<BufferAllocation::Slice>> operand_slices,
|
||||
ShapeTree<BufferAllocation::Slice> result_slices, std::string opaque);
|
||||
ShapeTree<BufferAllocation::Slice> result_slices,
|
||||
const std::string& opaque);
|
||||
|
||||
Status ExecuteOnStream(const ExecuteParams& params) override;
|
||||
|
||||
@ -49,7 +50,7 @@ class CustomCallThunk : public Thunk {
|
||||
void* call_target_;
|
||||
std::vector<ShapeTree<BufferAllocation::Slice>> operand_slices_;
|
||||
ShapeTree<BufferAllocation::Slice> result_slices_;
|
||||
std::string opaque_;
|
||||
const std::string opaque_;
|
||||
};
|
||||
|
||||
} // namespace gpu
|
||||
|
@ -44,7 +44,7 @@ class ForThunk : public Thunk {
|
||||
private:
|
||||
const int64 loop_limit_;
|
||||
std::unique_ptr<SequentialThunk> body_thunk_sequence_;
|
||||
absl::optional<size_t> body_profile_index_;
|
||||
const absl::optional<size_t> body_profile_index_;
|
||||
};
|
||||
|
||||
} // namespace gpu
|
||||
|
@ -60,11 +60,11 @@ class GemmThunk : public Thunk {
|
||||
Status ExecuteOnStream(const ExecuteParams& params) override;
|
||||
|
||||
private:
|
||||
GpuGemmConfig config_;
|
||||
const GpuGemmConfig config_;
|
||||
const BufferAllocation::Slice lhs_buffer_;
|
||||
const BufferAllocation::Slice rhs_buffer_;
|
||||
const BufferAllocation::Slice output_buffer_;
|
||||
bool implements_whole_instruction_;
|
||||
const bool implements_whole_instruction_;
|
||||
};
|
||||
|
||||
// Run the given GEMM instruction `gemm` subject to the configuration
|
||||
|
@ -55,7 +55,7 @@ class Memset32BitValueThunk : public Thunk {
|
||||
Status ExecuteOnStream(const ExecuteParams& params) override;
|
||||
|
||||
private:
|
||||
uint32 value_;
|
||||
const uint32 value_;
|
||||
const BufferAllocation::Slice dest_;
|
||||
};
|
||||
|
||||
|
@ -91,7 +91,7 @@ class NcclAllReduceThunk : public Thunk {
|
||||
static bool CanImplement(const HloInstruction* crs);
|
||||
|
||||
private:
|
||||
NcclAllReduceConfig config_;
|
||||
const NcclAllReduceConfig config_;
|
||||
const std::vector<Buffer> buffers_;
|
||||
};
|
||||
|
||||
|
@ -47,7 +47,7 @@ class OutfeedThunk : public Thunk {
|
||||
Status ExecuteOnStream(const ExecuteParams& params) override;
|
||||
|
||||
private:
|
||||
OutfeedConfig config_;
|
||||
const OutfeedConfig config_;
|
||||
const ShapeTree<BufferAllocation::Slice> outfeed_slices_;
|
||||
};
|
||||
|
||||
|
@ -53,12 +53,11 @@ class WhileThunk : public Thunk {
|
||||
Status ExecuteOnStream(const ExecuteParams& params) override;
|
||||
|
||||
private:
|
||||
const HloInstruction* hlo_instruction_;
|
||||
const BufferAllocation::Slice condition_result_buffer_index_;
|
||||
std::unique_ptr<SequentialThunk> condition_thunk_sequence_;
|
||||
std::unique_ptr<SequentialThunk> body_thunk_sequence_;
|
||||
absl::optional<size_t> condition_profile_index_;
|
||||
absl::optional<size_t> body_profile_index_;
|
||||
const absl::optional<size_t> condition_profile_index_;
|
||||
const absl::optional<size_t> body_profile_index_;
|
||||
};
|
||||
|
||||
} // namespace gpu
|
||||
|
Loading…
Reference in New Issue
Block a user