[XLA:GPU][NFC] Prefer using * to acces absl::optional<> values.
See https://abseil.io/tips/181 PiperOrigin-RevId: 355681618 Change-Id: I6a1ba85e049461de4555647ec123bc91145a4c36
This commit is contained in:
parent
2f4acbfb7c
commit
9f9c094ef1
@ -959,7 +959,7 @@ mlir::NamedAttribute HloFunctionImporter::ConvertReplicaGroups(
|
||||
mlir::NamedAttribute HloFunctionImporter::ConvertChannelHandle(
|
||||
absl::optional<tensorflow::int64> channel_id) {
|
||||
xla::ChannelHandle channel_handle;
|
||||
if (channel_id.has_value()) channel_handle.set_handle(channel_id.value());
|
||||
if (channel_id) channel_handle.set_handle(*channel_id);
|
||||
return ConvertChannelHandle(channel_handle);
|
||||
}
|
||||
|
||||
|
@ -1337,7 +1337,7 @@ LogicalResult ConvertToHloModule::Lower(
|
||||
xla::OpSharding sharding;
|
||||
sharding.set_type(xla::OpSharding::TUPLE);
|
||||
for (auto& ret_sharding : ret_shardings)
|
||||
*sharding.add_tuple_shardings() = ret_sharding.value();
|
||||
*sharding.add_tuple_shardings() = *ret_sharding;
|
||||
|
||||
builder->SetSharding(sharding);
|
||||
}
|
||||
@ -1490,8 +1490,7 @@ LogicalResult ConvertToHloModule::SetEntryTupleShardings(
|
||||
xla::OpSharding sharding;
|
||||
sharding.set_type(xla::OpSharding::TUPLE);
|
||||
for (auto arg_sharding : llvm::enumerate(arg_shardings)) {
|
||||
auto hlo_sharding =
|
||||
xla::HloSharding::FromProto(arg_sharding.value().value());
|
||||
auto hlo_sharding = xla::HloSharding::FromProto(*arg_sharding.value());
|
||||
if (!hlo_sharding.ok())
|
||||
return block->getParentOp()->emitError()
|
||||
<< hlo_sharding.status().error_message();
|
||||
@ -1502,7 +1501,7 @@ LogicalResult ConvertToHloModule::SetEntryTupleShardings(
|
||||
if (!status.ok())
|
||||
return block->getParentOp()->emitError() << status.error_message();
|
||||
|
||||
*sharding.add_tuple_shardings() = arg_sharding.value().value();
|
||||
*sharding.add_tuple_shardings() = *arg_sharding.value();
|
||||
}
|
||||
|
||||
builder->SetSharding(sharding);
|
||||
@ -1543,7 +1542,7 @@ LogicalResult ConvertToHloModule::LowerBasicBlockAsFunction(
|
||||
!arg_shardings.empty() && AllOptionalShardingsAreSet(arg_shardings);
|
||||
for (BlockArgument& arg : block->getArguments()) {
|
||||
if (set_tuple_element_sharding)
|
||||
builder->SetSharding(arg_shardings[arg.getArgNumber()].value());
|
||||
builder->SetSharding(*arg_shardings[arg.getArgNumber()]);
|
||||
lowering[arg] = xla::GetTupleElement(tuple, arg.getArgNumber());
|
||||
}
|
||||
builder->ClearSharding();
|
||||
|
@ -37,9 +37,9 @@ TEST_F(AliasPassthroughParamsTest, AliasPassThroughParams) {
|
||||
.ValueOrDie();
|
||||
EXPECT_TRUE(AliasPassthroughParams().Run(module.get()).ValueOrDie());
|
||||
const auto& alias_config = module->input_output_alias_config();
|
||||
EXPECT_EQ(0, alias_config.GetAliasedParameter({0}).value().parameter_number);
|
||||
EXPECT_EQ(0, alias_config.GetAliasedParameter({0})->parameter_number);
|
||||
EXPECT_FALSE(alias_config.OutputHasAlias({1}));
|
||||
EXPECT_EQ(1, alias_config.GetAliasedParameter({2}).value().parameter_number);
|
||||
EXPECT_EQ(1, alias_config.GetAliasedParameter({2})->parameter_number);
|
||||
}
|
||||
|
||||
TEST_F(AliasPassthroughParamsTest, DoNotAliasPassThroughParamsMoreThanOnce) {
|
||||
@ -53,7 +53,7 @@ TEST_F(AliasPassthroughParamsTest, DoNotAliasPassThroughParamsMoreThanOnce) {
|
||||
.ValueOrDie();
|
||||
EXPECT_TRUE(AliasPassthroughParams().Run(module.get()).ValueOrDie());
|
||||
const auto& alias_config = module->input_output_alias_config();
|
||||
EXPECT_EQ(0, alias_config.GetAliasedParameter({0}).value().parameter_number);
|
||||
EXPECT_EQ(0, alias_config.GetAliasedParameter({0})->parameter_number);
|
||||
EXPECT_FALSE(alias_config.OutputHasAlias({1}));
|
||||
}
|
||||
|
||||
|
@ -237,7 +237,6 @@ static StatusOr<absl::optional<se::blas::AlgorithmType>> DoGemmAutotune(
|
||||
// Don't run autotuning concurrently on the same GPU.
|
||||
tensorflow::mutex_lock gpu_lock = LockGpu(stream->parent());
|
||||
|
||||
|
||||
GemmCacheKey key =
|
||||
std::make_tuple(stream->parent(), lhs->shape(), rhs->shape(),
|
||||
instr->shape(), gemm_config.SerializeAsString());
|
||||
@ -253,7 +252,7 @@ static StatusOr<absl::optional<se::blas::AlgorithmType>> DoGemmAutotune(
|
||||
if (it != autotune_cache.end()) {
|
||||
cache_hits++;
|
||||
VLOG(4) << "Autotuning cache hit, using algorithm: "
|
||||
<< (it->second.has_value() ? absl::StrCat(it->second.value())
|
||||
<< (it->second.has_value() ? absl::StrCat(*(it->second))
|
||||
: "<generic>");
|
||||
return it->second;
|
||||
}
|
||||
|
@ -93,7 +93,7 @@ void HloExecutionProfiler::FinishHloComputation(
|
||||
absl::optional<size_t> profile_index) {
|
||||
if (do_profile_) {
|
||||
profile_->SetCyclesTakenBy(
|
||||
profile_index.value(),
|
||||
*profile_index,
|
||||
GetCyclesTaken(&timers_, sub_streams_, stream_, clock_rate_ghz_));
|
||||
}
|
||||
}
|
||||
|
@ -104,7 +104,7 @@ static NcclAllReduceConfig GetNcclAllReduceConfig(mlir::lmhlo::AllReduceOp op,
|
||||
|
||||
NcclAllReduceConfig config;
|
||||
config.config = GetNcclCollectiveConfigForMlir(op, replica_count);
|
||||
config.reduction_kind = reduction_kind.value();
|
||||
config.reduction_kind = *reduction_kind;
|
||||
return config;
|
||||
}
|
||||
|
||||
|
@ -68,7 +68,7 @@ NcclCollectiveConfig GetNcclCollectiveConfig(const HloInstruction* hlo,
|
||||
|
||||
if (hlo->channel_id().has_value()) {
|
||||
config.collective_op_kind = RendezvousKey::kCrossModule;
|
||||
config.op_id = hlo->channel_id().value();
|
||||
config.op_id = *hlo->channel_id();
|
||||
} else {
|
||||
config.collective_op_kind = RendezvousKey::kCrossReplica;
|
||||
config.op_id = static_cast<int64>(hlo->GetModule()->unique_id());
|
||||
|
Loading…
x
Reference in New Issue
Block a user