[XLA] gtl::optional->absl::optional
PiperOrigin-RevId: 209686671
This commit is contained in:
parent
95d718a8a4
commit
3cb3a450ed
@ -619,6 +619,7 @@ Status GenerateMetadata(const CodegenOpts& opts,
|
||||
if (opts.gen_program_shape) {
|
||||
program_shape =
|
||||
absl::make_unique<xla::ProgramShape>(compile_result.program_shape);
|
||||
|
||||
// The parameter names are currently meaningless, and redundant with the
|
||||
// rest of our metadata, so clear them out to avoid confusion and save
|
||||
// space.
|
||||
|
@ -362,9 +362,9 @@ cc_library(
|
||||
"//tensorflow/compiler/jit/graphcycles",
|
||||
"//tensorflow/core:framework",
|
||||
"//tensorflow/core:graph",
|
||||
"//tensorflow/core:lib",
|
||||
"//tensorflow/core:protos_all_cc",
|
||||
"//tensorflow/core/kernels:bounds_check",
|
||||
"@com_google_absl//absl/types:optional",
|
||||
],
|
||||
)
|
||||
|
||||
|
@ -565,7 +565,7 @@ static void VLogClusteringSummary(const Graph& g) {
|
||||
int clustered_node_count = 0;
|
||||
|
||||
for (Node* n : g.nodes()) {
|
||||
gtl::optional<StringPiece> cluster_name = GetXlaClusterForNode(*n);
|
||||
absl::optional<StringPiece> cluster_name = GetXlaClusterForNode(*n);
|
||||
if (cluster_name) {
|
||||
clustered_node_count++;
|
||||
cluster_name_to_size[*cluster_name]++;
|
||||
|
@ -30,7 +30,7 @@ Status FindNodesToDecluster(const Graph& graph, gtl::FlatSet<Node*>* result,
|
||||
MemoryTypeVector input_mtypes, output_mtypes;
|
||||
|
||||
for (Node* n : post_order) {
|
||||
gtl::optional<StringPiece> from_cluster = GetXlaClusterForNode(*n);
|
||||
absl::optional<StringPiece> from_cluster = GetXlaClusterForNode(*n);
|
||||
if (!from_cluster) {
|
||||
continue;
|
||||
}
|
||||
@ -79,8 +79,8 @@ Status FindNodesToDecluster(const Graph& graph, gtl::FlatSet<Node*>* result,
|
||||
// Check if `dst` is in a different cluster, unclustered, or about to be
|
||||
// partially declustered (here we rely on the post-order traversal order).
|
||||
// If yes, decluster `n` to avoid the device-to-host memcpy.
|
||||
gtl::optional<StringPiece> dst_cluster =
|
||||
result->count(dst) ? gtl::nullopt : GetXlaClusterForNode(*dst);
|
||||
absl::optional<StringPiece> dst_cluster =
|
||||
result->count(dst) ? absl::nullopt : GetXlaClusterForNode(*dst);
|
||||
if (from_cluster != dst_cluster) {
|
||||
CHECK(result->insert(n).second);
|
||||
break;
|
||||
@ -99,7 +99,7 @@ Status PartiallyDeclusterNode(Graph* graph, Node* n) {
|
||||
}
|
||||
|
||||
Node* dst = out_edge->dst();
|
||||
gtl::optional<StringPiece> dst_cluster_name = GetXlaClusterForNode(*dst);
|
||||
absl::optional<StringPiece> dst_cluster_name = GetXlaClusterForNode(*dst);
|
||||
if (dst_cluster_name != cluster_name) {
|
||||
out_edges_to_clone.push_back(out_edge);
|
||||
}
|
||||
|
@ -185,14 +185,14 @@ Status CreateCycleDetectionGraph(const Graph* graph, GraphCycles* cycles) {
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
gtl::optional<StringPiece> GetXlaClusterForNode(const Node& node) {
|
||||
absl::optional<StringPiece> GetXlaClusterForNode(const Node& node) {
|
||||
const AttrValue* attr_value = node.attrs().Find(kXlaClusterAttr);
|
||||
if (attr_value == nullptr) {
|
||||
return gtl::nullopt;
|
||||
return absl::nullopt;
|
||||
}
|
||||
Status s = AttrValueHasType(*attr_value, "string");
|
||||
if (!s.ok()) {
|
||||
return gtl::nullopt;
|
||||
return absl::nullopt;
|
||||
}
|
||||
return attr_value->s();
|
||||
}
|
||||
|
@ -18,9 +18,9 @@ limitations under the License.
|
||||
#ifndef TENSORFLOW_COMPILER_JIT_XLA_CLUSTER_UTIL_H_
|
||||
#define TENSORFLOW_COMPILER_JIT_XLA_CLUSTER_UTIL_H_
|
||||
|
||||
#include "absl/types/optional.h"
|
||||
#include "tensorflow/compiler/jit/graphcycles/graphcycles.h"
|
||||
#include "tensorflow/core/graph/algorithm.h"
|
||||
#include "tensorflow/core/lib/gtl/optional.h"
|
||||
|
||||
namespace tensorflow {
|
||||
|
||||
@ -47,7 +47,7 @@ Status CreateCycleDetectionGraph(const Graph* graph, GraphCycles* cycles);
|
||||
|
||||
// Returns the XLA cluster in which `node` is placed if it is in an XLA cluster,
|
||||
// otherwise returns nullopt.
|
||||
gtl::optional<StringPiece> GetXlaClusterForNode(const Node& node);
|
||||
absl::optional<StringPiece> GetXlaClusterForNode(const Node& node);
|
||||
|
||||
// Removes `node_def` its XLA cluster (by clearing its _XlaCluster attribute).
|
||||
void RemoveFromXlaCluster(NodeDef* node_def);
|
||||
|
@ -288,6 +288,7 @@ cc_library(
|
||||
"//tensorflow/core:graph",
|
||||
"//tensorflow/core:lib",
|
||||
"//tensorflow/core:protos_all_cc",
|
||||
"@com_google_absl//absl/types:optional",
|
||||
],
|
||||
)
|
||||
|
||||
@ -480,8 +481,8 @@ cc_library(
|
||||
"//tensorflow/core:core_cpu_internal",
|
||||
"//tensorflow/core:framework",
|
||||
"//tensorflow/core:graph",
|
||||
"//tensorflow/core:lib",
|
||||
"@com_google_absl//absl/memory",
|
||||
"@com_google_absl//absl/types:optional",
|
||||
],
|
||||
)
|
||||
|
||||
@ -506,8 +507,8 @@ cc_library(
|
||||
"//tensorflow/core:core_cpu_internal",
|
||||
"//tensorflow/core:framework",
|
||||
"//tensorflow/core:graph",
|
||||
"//tensorflow/core:lib",
|
||||
"@com_google_absl//absl/memory",
|
||||
"@com_google_absl//absl/types:optional",
|
||||
],
|
||||
)
|
||||
|
||||
@ -530,8 +531,8 @@ cc_library(
|
||||
"//tensorflow/core:core_cpu_internal",
|
||||
"//tensorflow/core:framework",
|
||||
"//tensorflow/core:graph",
|
||||
"//tensorflow/core:lib",
|
||||
"@com_google_absl//absl/memory",
|
||||
"@com_google_absl//absl/types:optional",
|
||||
],
|
||||
)
|
||||
|
||||
|
@ -22,6 +22,7 @@ limitations under the License.
|
||||
#include <vector>
|
||||
|
||||
#include "absl/memory/memory.h"
|
||||
#include "absl/types/optional.h"
|
||||
#include "tensorflow/compiler/jit/union_find.h"
|
||||
#include "tensorflow/compiler/tf2xla/dump_graph.h"
|
||||
#include "tensorflow/compiler/tf2xla/functionalize_control_flow_util.h"
|
||||
@ -32,7 +33,6 @@ limitations under the License.
|
||||
#include "tensorflow/core/graph/algorithm.h"
|
||||
#include "tensorflow/core/graph/control_flow.h"
|
||||
#include "tensorflow/core/graph/node_builder.h"
|
||||
#include "tensorflow/core/lib/gtl/optional.h"
|
||||
|
||||
using xla::StatusOr;
|
||||
|
||||
@ -863,7 +863,7 @@ CondStateMap::ContainsResult CondStateMap::LhsHoldsWhereverRhsHolds(
|
||||
|
||||
BranchType CondStateMap::FindBranchOf(CondId id, OutputTensor predicate) const {
|
||||
if (IsEmpty(id)) return BranchType::kNeither;
|
||||
gtl::optional<BranchType> b;
|
||||
absl::optional<BranchType> b;
|
||||
const CondState& nodes = *id;
|
||||
for (auto it = nodes.rbegin(); it != nodes.rend(); ++it) {
|
||||
if (it->type == CondStateMap::CondNode::Type::kSwitch &&
|
||||
|
@ -22,6 +22,7 @@ limitations under the License.
|
||||
#include <vector>
|
||||
|
||||
#include "absl/memory/memory.h"
|
||||
#include "absl/types/optional.h"
|
||||
#include "tensorflow/compiler/jit/union_find.h"
|
||||
#include "tensorflow/compiler/tf2xla/dump_graph.h"
|
||||
#include "tensorflow/compiler/tf2xla/functionalize_cond.h"
|
||||
@ -35,7 +36,6 @@ limitations under the License.
|
||||
#include "tensorflow/core/graph/algorithm.h"
|
||||
#include "tensorflow/core/graph/control_flow.h"
|
||||
#include "tensorflow/core/graph/node_builder.h"
|
||||
#include "tensorflow/core/lib/gtl/optional.h"
|
||||
|
||||
namespace tensorflow {
|
||||
|
||||
|
@ -22,6 +22,7 @@ limitations under the License.
|
||||
#include <vector>
|
||||
|
||||
#include "absl/memory/memory.h"
|
||||
#include "absl/types/optional.h"
|
||||
#include "tensorflow/compiler/jit/union_find.h"
|
||||
#include "tensorflow/compiler/tf2xla/dump_graph.h"
|
||||
#include "tensorflow/compiler/tf2xla/functionalize_control_flow_util.h"
|
||||
@ -33,7 +34,6 @@ limitations under the License.
|
||||
#include "tensorflow/core/graph/algorithm.h"
|
||||
#include "tensorflow/core/graph/control_flow.h"
|
||||
#include "tensorflow/core/graph/node_builder.h"
|
||||
#include "tensorflow/core/lib/gtl/optional.h"
|
||||
|
||||
namespace tensorflow {
|
||||
namespace {
|
||||
|
@ -27,10 +27,10 @@ const char kShardingAttribute[] = "_XlaSharding";
|
||||
} // namespace
|
||||
|
||||
namespace {
|
||||
xla::StatusOr<tensorflow::gtl::optional<xla::OpSharding>>
|
||||
GetShardingFromNodeDef(const NodeDef& node_def) {
|
||||
xla::StatusOr<absl::optional<xla::OpSharding>> GetShardingFromNodeDef(
|
||||
const NodeDef& node_def) {
|
||||
if (!HasNodeAttr(node_def, kShardingAttribute)) {
|
||||
return tensorflow::gtl::optional<xla::OpSharding>();
|
||||
return absl::optional<xla::OpSharding>();
|
||||
}
|
||||
string value;
|
||||
xla::OpSharding sharding;
|
||||
@ -40,7 +40,7 @@ GetShardingFromNodeDef(const NodeDef& node_def) {
|
||||
"Experimental _XlaSharding attribute was not a valid encoded "
|
||||
"xla::OpSharding proto.");
|
||||
}
|
||||
return tensorflow::gtl::optional<xla::OpSharding>(sharding);
|
||||
return absl::optional<xla::OpSharding>(sharding);
|
||||
}
|
||||
|
||||
Status CoreOutOfRangeError(int core, int num_cores_per_replica) {
|
||||
@ -50,12 +50,11 @@ Status CoreOutOfRangeError(int core, int num_cores_per_replica) {
|
||||
}
|
||||
} // namespace
|
||||
|
||||
xla::StatusOr<tensorflow::gtl::optional<xla::OpSharding>>
|
||||
ParseShardingFromDevice(
|
||||
xla::StatusOr<absl::optional<xla::OpSharding>> ParseShardingFromDevice(
|
||||
const string& device_name, int num_cores_per_replica,
|
||||
tensorflow::gtl::optional<xla::OpSharding> explicit_sharding) {
|
||||
absl::optional<xla::OpSharding> explicit_sharding) {
|
||||
if (device_name.empty()) {
|
||||
return tensorflow::gtl::optional<xla::OpSharding>();
|
||||
return absl::optional<xla::OpSharding>();
|
||||
}
|
||||
DeviceNameUtils::ParsedName parsed_device;
|
||||
if (!DeviceNameUtils::ParseFullName(device_name, &parsed_device)) {
|
||||
@ -68,32 +67,32 @@ ParseShardingFromDevice(
|
||||
} else if (!parsed_device.has_type || !parsed_device.has_id ||
|
||||
!str_util::StrContains(parsed_device.type,
|
||||
kDeviceSuffixReplicatedCore)) {
|
||||
return tensorflow::gtl::optional<xla::OpSharding>();
|
||||
return absl::optional<xla::OpSharding>();
|
||||
} else {
|
||||
const int core = parsed_device.id;
|
||||
if (core < 0 || core >= num_cores_per_replica) {
|
||||
return CoreOutOfRangeError(core, num_cores_per_replica);
|
||||
}
|
||||
return tensorflow::gtl::optional<xla::OpSharding>(
|
||||
return absl::optional<xla::OpSharding>(
|
||||
xla::sharding_builder::AssignDevice(core));
|
||||
}
|
||||
}
|
||||
|
||||
xla::StatusOr<tensorflow::gtl::optional<xla::OpSharding>>
|
||||
ParseShardingFromDevice(const NodeDef& node_def, int num_cores_per_replica) {
|
||||
xla::StatusOr<absl::optional<xla::OpSharding>> ParseShardingFromDevice(
|
||||
const NodeDef& node_def, int num_cores_per_replica) {
|
||||
const string& device_name = node_def.device();
|
||||
TF_ASSIGN_OR_RETURN(tensorflow::gtl::optional<xla::OpSharding> sharding,
|
||||
TF_ASSIGN_OR_RETURN(absl::optional<xla::OpSharding> sharding,
|
||||
GetShardingFromNodeDef(node_def));
|
||||
return ParseShardingFromDevice(device_name, num_cores_per_replica, sharding);
|
||||
}
|
||||
|
||||
xla::StatusOr<tensorflow::gtl::optional<xla::OpSharding>>
|
||||
ParseShardingFromDevice(const Node& node, int num_cores_per_replica) {
|
||||
xla::StatusOr<absl::optional<xla::OpSharding>> ParseShardingFromDevice(
|
||||
const Node& node, int num_cores_per_replica) {
|
||||
string device_name = node.assigned_device_name();
|
||||
if (device_name.empty()) {
|
||||
device_name = node.requested_device();
|
||||
}
|
||||
TF_ASSIGN_OR_RETURN(tensorflow::gtl::optional<xla::OpSharding> sharding,
|
||||
TF_ASSIGN_OR_RETURN(absl::optional<xla::OpSharding> sharding,
|
||||
GetShardingFromNodeDef(node.def()));
|
||||
return ParseShardingFromDevice(device_name, num_cores_per_replica, sharding);
|
||||
}
|
||||
|
@ -33,16 +33,15 @@ namespace tensorflow {
|
||||
// - explicit_sharding if explicit_sharding.has_value()
|
||||
// - a non-value if there is no assigned core or
|
||||
// - a sharding set as per xla::sharding_builder::AssignDevice.
|
||||
xla::StatusOr<tensorflow::gtl::optional<xla::OpSharding>>
|
||||
ParseShardingFromDevice(const string& device_name, int num_cores_per_replica,
|
||||
tensorflow::gtl::optional<xla::OpSharding>
|
||||
explicit_sharding = tensorflow::gtl::nullopt);
|
||||
xla::StatusOr<absl::optional<xla::OpSharding>> ParseShardingFromDevice(
|
||||
const string& device_name, int num_cores_per_replica,
|
||||
absl::optional<xla::OpSharding> explicit_sharding = absl::nullopt);
|
||||
|
||||
xla::StatusOr<tensorflow::gtl::optional<xla::OpSharding>>
|
||||
ParseShardingFromDevice(const Node& node, int num_cores_per_replica);
|
||||
xla::StatusOr<absl::optional<xla::OpSharding>> ParseShardingFromDevice(
|
||||
const Node& node, int num_cores_per_replica);
|
||||
|
||||
xla::StatusOr<tensorflow::gtl::optional<xla::OpSharding>>
|
||||
ParseShardingFromDevice(const NodeDef& node_def, int num_cores_per_replica);
|
||||
xla::StatusOr<absl::optional<xla::OpSharding>> ParseShardingFromDevice(
|
||||
const NodeDef& node_def, int num_cores_per_replica);
|
||||
|
||||
void SetShardingDeviceAssignmentFromNode(const Node& src, Node* dst);
|
||||
|
||||
|
@ -23,7 +23,7 @@ TEST(CoreUtilTest, ParseShardingFromDevice) {
|
||||
Graph graph(OpRegistry::Global());
|
||||
|
||||
auto core_from_sharding =
|
||||
[](tensorflow::gtl::optional<xla::OpSharding> sharding) -> int64 {
|
||||
[](absl::optional<xla::OpSharding> sharding) -> int64 {
|
||||
if (sharding.has_value() &&
|
||||
sharding.value().type() ==
|
||||
xla::OpSharding::Type::OpSharding_Type_MAXIMAL) {
|
||||
|
@ -20,6 +20,7 @@ limitations under the License.
|
||||
#include <set>
|
||||
#include <unordered_map>
|
||||
|
||||
#include "absl/types/optional.h"
|
||||
#include "tensorflow/compiler/tf2xla/sharding_util.h"
|
||||
#include "tensorflow/compiler/tf2xla/tf2xla.pb.h"
|
||||
#include "tensorflow/compiler/xla/xla_data.pb.h"
|
||||
@ -32,7 +33,6 @@ limitations under the License.
|
||||
#include "tensorflow/core/graph/tensor_id.h"
|
||||
#include "tensorflow/core/lib/core/errors.h"
|
||||
#include "tensorflow/core/lib/core/status.h"
|
||||
#include "tensorflow/core/lib/gtl/optional.h"
|
||||
#include "tensorflow/core/lib/strings/strcat.h"
|
||||
|
||||
namespace tensorflow {
|
||||
@ -268,7 +268,7 @@ Status SetNodeShardingFromNeighbors(Node* n, bool out_edges) {
|
||||
if (edge->IsControlEdge()) continue;
|
||||
const Node* possible_match = out_edges ? edge->dst() : edge->src();
|
||||
TF_ASSIGN_OR_RETURN(
|
||||
tensorflow::gtl::optional<xla::OpSharding> sharding,
|
||||
absl::optional<xla::OpSharding> sharding,
|
||||
ParseShardingFromDevice(
|
||||
*possible_match,
|
||||
/*num_cores_per_replica=*/std::numeric_limits<int32>::max()));
|
||||
|
@ -103,7 +103,7 @@ void XlaCompilationDevice::Compute(OpKernel* op_kernel,
|
||||
auto sharding_parse_result = ParseShardingFromDevice(
|
||||
op_kernel->def(), std::numeric_limits<int>::max());
|
||||
OP_REQUIRES_OK(context, sharding_parse_result.status());
|
||||
tensorflow::gtl::optional<xla::OpSharding> op_sharding =
|
||||
absl::optional<xla::OpSharding> op_sharding =
|
||||
sharding_parse_result.ValueOrDie();
|
||||
|
||||
// If no sharding metadata is found, XLA is free to use whatever device it
|
||||
|
@ -414,7 +414,7 @@ Status BuildComputation(
|
||||
|
||||
// Request that the value be returned on a specific core.
|
||||
xla::XlaScopedShardingAssignment assign_sharding(
|
||||
builder, core == -1 ? tensorflow::gtl::optional<xla::OpSharding>()
|
||||
builder, core == -1 ? absl::optional<xla::OpSharding>()
|
||||
: xla::sharding_builder::AssignDevice(core));
|
||||
|
||||
xla::XlaOp handle;
|
||||
@ -571,7 +571,7 @@ Status XlaCompiler::BuildArguments(
|
||||
for (std::vector<int>::size_type i = 0; i < input_mapping->size(); ++i) {
|
||||
const int core = (*arg_cores)[input_mapping->at(i)];
|
||||
xla::XlaScopedShardingAssignment assign_sharding(
|
||||
builder, core == -1 ? tensorflow::gtl::optional<xla::OpSharding>()
|
||||
builder, core == -1 ? absl::optional<xla::OpSharding>()
|
||||
: xla::sharding_builder::AssignDevice(core));
|
||||
arg_handles[i] = xla::GetTupleElement(tuple, i);
|
||||
}
|
||||
@ -579,7 +579,7 @@ Status XlaCompiler::BuildArguments(
|
||||
for (std::vector<int>::size_type i = 0; i < input_mapping->size(); ++i) {
|
||||
const int core = (*arg_cores)[input_mapping->at(i)];
|
||||
xla::XlaScopedShardingAssignment assign_sharding(
|
||||
builder, core == -1 ? tensorflow::gtl::optional<xla::OpSharding>()
|
||||
builder, core == -1 ? absl::optional<xla::OpSharding>()
|
||||
: xla::sharding_builder::AssignDevice(core));
|
||||
arg_handles[i] = xla::Parameter(builder, i, (*input_shapes)[i],
|
||||
strings::StrCat("arg", i));
|
||||
|
@ -237,10 +237,12 @@ cc_library(
|
||||
":types",
|
||||
":util",
|
||||
":xla_data_proto",
|
||||
"//tensorflow/core:framework",
|
||||
"//tensorflow/core:lib",
|
||||
"//tensorflow/core:lib_internal",
|
||||
"//tensorflow/core:regexp_internal",
|
||||
"@com_google_absl//absl/container:inlined_vector",
|
||||
"@com_google_absl//absl/types:optional",
|
||||
],
|
||||
)
|
||||
|
||||
@ -586,6 +588,7 @@ cc_library(
|
||||
"//tensorflow/core:lib",
|
||||
"//tensorflow/core:lib_internal",
|
||||
"@com_google_absl//absl/memory",
|
||||
"@com_google_absl//absl/types:optional",
|
||||
],
|
||||
)
|
||||
|
||||
|
@ -90,6 +90,7 @@ cc_library(
|
||||
"//tensorflow/compiler/xla:xla_data_proto",
|
||||
"//tensorflow/compiler/xla/service:device_memory_allocator",
|
||||
"//tensorflow/core:lib",
|
||||
"@com_google_absl//absl/types:optional",
|
||||
],
|
||||
)
|
||||
|
||||
|
@ -71,8 +71,8 @@ ExecutableBuildOptions& ExecutableBuildOptions::set_generate_hlo_graph(
|
||||
return *this;
|
||||
}
|
||||
|
||||
const tensorflow::gtl::optional<string>&
|
||||
ExecutableBuildOptions::generate_hlo_graph() const {
|
||||
const absl::optional<string>& ExecutableBuildOptions::generate_hlo_graph()
|
||||
const {
|
||||
return generate_hlo_graph_;
|
||||
}
|
||||
|
||||
@ -82,7 +82,7 @@ ExecutableBuildOptions& ExecutableBuildOptions::set_dump_optimized_hlo_proto_to(
|
||||
return *this;
|
||||
}
|
||||
|
||||
const tensorflow::gtl::optional<string>&
|
||||
const absl::optional<string>&
|
||||
ExecutableBuildOptions::dump_optimized_hlo_proto_to() const {
|
||||
return dump_optimized_hlo_proto_to_;
|
||||
}
|
||||
@ -94,7 +94,7 @@ ExecutableBuildOptions::set_dump_unoptimized_hlo_proto_to(
|
||||
return *this;
|
||||
}
|
||||
|
||||
const tensorflow::gtl::optional<string>&
|
||||
const absl::optional<string>&
|
||||
ExecutableBuildOptions::dump_unoptimized_hlo_proto_to() const {
|
||||
return dump_unoptimized_hlo_proto_to_;
|
||||
}
|
||||
@ -105,7 +105,7 @@ ExecutableBuildOptions& ExecutableBuildOptions::set_dump_per_pass_hlo_proto_to(
|
||||
return *this;
|
||||
}
|
||||
|
||||
const tensorflow::gtl::optional<string>&
|
||||
const absl::optional<string>&
|
||||
ExecutableBuildOptions::dump_per_pass_hlo_proto_to() const {
|
||||
return dump_per_pass_hlo_proto_to_;
|
||||
}
|
||||
@ -115,7 +115,7 @@ ExecutableBuildOptions& ExecutableBuildOptions::set_hlo_profile(bool enabled) {
|
||||
return *this;
|
||||
}
|
||||
|
||||
tensorflow::gtl::optional<bool> ExecutableBuildOptions::hlo_profile() const {
|
||||
absl::optional<bool> ExecutableBuildOptions::hlo_profile() const {
|
||||
return hlo_profile_;
|
||||
}
|
||||
|
||||
|
@ -16,11 +16,11 @@ limitations under the License.
|
||||
#ifndef TENSORFLOW_COMPILER_XLA_CLIENT_EXECUTABLE_BUILD_OPTIONS_H_
|
||||
#define TENSORFLOW_COMPILER_XLA_CLIENT_EXECUTABLE_BUILD_OPTIONS_H_
|
||||
|
||||
#include "absl/types/optional.h"
|
||||
#include "tensorflow/compiler/xla/service/device_memory_allocator.h"
|
||||
#include "tensorflow/compiler/xla/util.h"
|
||||
#include "tensorflow/compiler/xla/xla_data.pb.h"
|
||||
#include "tensorflow/core/lib/core/stringpiece.h"
|
||||
#include "tensorflow/core/lib/gtl/optional.h"
|
||||
|
||||
namespace xla {
|
||||
|
||||
@ -57,32 +57,31 @@ class ExecutableBuildOptions {
|
||||
|
||||
// If set, specifies a regexp of HLO graphs to dump (as in DebugOptions).
|
||||
ExecutableBuildOptions& set_generate_hlo_graph(string regex);
|
||||
const tensorflow::gtl::optional<string>& generate_hlo_graph() const;
|
||||
const absl::optional<string>& generate_hlo_graph() const;
|
||||
|
||||
// If set, specifies a dirpath to dump the end-of-optimization-pipeline HLO
|
||||
// protobuf to (as in DebugOptions).
|
||||
ExecutableBuildOptions& set_dump_optimized_hlo_proto_to(
|
||||
tensorflow::StringPiece dirpath);
|
||||
const tensorflow::gtl::optional<string>& dump_optimized_hlo_proto_to() const;
|
||||
const absl::optional<string>& dump_optimized_hlo_proto_to() const;
|
||||
|
||||
// If set, specifies a dirpath to dump the start-of-optimization-pipeline HLO
|
||||
// protobuf to (as in DebugOptions).
|
||||
ExecutableBuildOptions& set_dump_unoptimized_hlo_proto_to(
|
||||
tensorflow::StringPiece dirpath);
|
||||
const tensorflow::gtl::optional<string>& dump_unoptimized_hlo_proto_to()
|
||||
const;
|
||||
const absl::optional<string>& dump_unoptimized_hlo_proto_to() const;
|
||||
|
||||
// If set, specifies a dirpath to dump the per-pass-in-pipeline HLO protobufs
|
||||
// to (as in DebugOptions).
|
||||
ExecutableBuildOptions& set_dump_per_pass_hlo_proto_to(
|
||||
tensorflow::StringPiece dirpath);
|
||||
const tensorflow::gtl::optional<string>& dump_per_pass_hlo_proto_to() const;
|
||||
const absl::optional<string>& dump_per_pass_hlo_proto_to() const;
|
||||
|
||||
// If true, specifies that we should record an HLO profile during execution
|
||||
// and log it after execution (as in DebugOptions). If nullopt the default is
|
||||
// used.
|
||||
ExecutableBuildOptions& set_hlo_profile(bool enabled);
|
||||
tensorflow::gtl::optional<bool> hlo_profile() const;
|
||||
absl::optional<bool> hlo_profile() const;
|
||||
|
||||
void add_disabled_hlo_pass(tensorflow::StringPiece pass_name) {
|
||||
disabled_hlo_passes_.push_back(std::string(pass_name));
|
||||
@ -96,14 +95,14 @@ class ExecutableBuildOptions {
|
||||
string ToString() const;
|
||||
|
||||
private:
|
||||
tensorflow::gtl::optional<bool> hlo_profile_;
|
||||
absl::optional<bool> hlo_profile_;
|
||||
int device_ordinal_ = -1;
|
||||
Shape result_layout_;
|
||||
bool result_layout_set_ = false;
|
||||
tensorflow::gtl::optional<string> generate_hlo_graph_;
|
||||
tensorflow::gtl::optional<string> dump_optimized_hlo_proto_to_;
|
||||
tensorflow::gtl::optional<string> dump_unoptimized_hlo_proto_to_;
|
||||
tensorflow::gtl::optional<string> dump_per_pass_hlo_proto_to_;
|
||||
absl::optional<string> generate_hlo_graph_;
|
||||
absl::optional<string> dump_optimized_hlo_proto_to_;
|
||||
absl::optional<string> dump_unoptimized_hlo_proto_to_;
|
||||
absl::optional<string> dump_per_pass_hlo_proto_to_;
|
||||
DeviceMemoryAllocator* device_allocator_ = nullptr;
|
||||
std::vector<std::string> disabled_hlo_passes_;
|
||||
};
|
||||
|
@ -1451,7 +1451,7 @@ XlaOp XlaBuilder::Rev(const XlaOp& operand,
|
||||
});
|
||||
}
|
||||
|
||||
XlaOp XlaBuilder::Sort(XlaOp keys, tensorflow::gtl::optional<XlaOp> values,
|
||||
XlaOp XlaBuilder::Sort(XlaOp keys, absl::optional<XlaOp> values,
|
||||
int64 dimension) {
|
||||
return ReportErrorOrReturn([&]() -> StatusOr<XlaOp> {
|
||||
HloInstructionProto instr;
|
||||
@ -1872,14 +1872,14 @@ XlaOp XlaBuilder::CrossReplicaSum(
|
||||
b->Parameter(/*parameter_number=*/1, scalar_shape, "y"));
|
||||
TF_ASSIGN_OR_RETURN(auto computation, b->Build());
|
||||
return CrossReplicaSum(operand, computation, replica_group_ids,
|
||||
/*channel_id=*/tensorflow::gtl::nullopt);
|
||||
/*channel_id=*/absl::nullopt);
|
||||
});
|
||||
}
|
||||
|
||||
XlaOp XlaBuilder::CrossReplicaSum(
|
||||
const XlaOp& operand, const XlaComputation& computation,
|
||||
tensorflow::gtl::ArraySlice<int64> replica_group_ids,
|
||||
const tensorflow::gtl::optional<ChannelHandle>& channel_id) {
|
||||
const absl::optional<ChannelHandle>& channel_id) {
|
||||
return ReportErrorOrReturn([&]() -> StatusOr<XlaOp> {
|
||||
HloInstructionProto instr;
|
||||
TF_ASSIGN_OR_RETURN(const Shape& operand_shape, GetShape(operand));
|
||||
@ -2744,10 +2744,9 @@ XlaOp CrossReplicaSum(const XlaOp& operand,
|
||||
return operand.builder()->CrossReplicaSum(operand, replica_group_ids);
|
||||
}
|
||||
|
||||
XlaOp CrossReplicaSum(
|
||||
const XlaOp& operand, const XlaComputation& computation,
|
||||
tensorflow::gtl::ArraySlice<int64> replica_group_ids,
|
||||
const tensorflow::gtl::optional<ChannelHandle>& channel_id) {
|
||||
XlaOp CrossReplicaSum(const XlaOp& operand, const XlaComputation& computation,
|
||||
tensorflow::gtl::ArraySlice<int64> replica_group_ids,
|
||||
const absl::optional<ChannelHandle>& channel_id) {
|
||||
return operand.builder()->CrossReplicaSum(operand, computation,
|
||||
replica_group_ids, channel_id);
|
||||
}
|
||||
@ -2844,8 +2843,7 @@ XlaOp Rev(const XlaOp& operand, tensorflow::gtl::ArraySlice<int64> dimensions) {
|
||||
return operand.builder()->Rev(operand, dimensions);
|
||||
}
|
||||
|
||||
XlaOp Sort(XlaOp keys, tensorflow::gtl::optional<XlaOp> values,
|
||||
int64 dimension) {
|
||||
XlaOp Sort(XlaOp keys, absl::optional<XlaOp> values, int64 dimension) {
|
||||
return keys.builder()->Sort(keys, std::move(values), dimension);
|
||||
}
|
||||
|
||||
|
@ -154,12 +154,10 @@ class XlaBuilder {
|
||||
|
||||
// Clears the sharding. Ops will be sharded according to the default placement
|
||||
// policy.
|
||||
void ClearSharding() { sharding_ = tensorflow::gtl::nullopt; }
|
||||
void ClearSharding() { sharding_ = absl::nullopt; }
|
||||
|
||||
// Returns the OpSharding that will be attached to all instructions.
|
||||
const tensorflow::gtl::optional<OpSharding>& sharding() const {
|
||||
return sharding_;
|
||||
}
|
||||
const absl::optional<OpSharding>& sharding() const { return sharding_; }
|
||||
|
||||
// Sets the builder to a mode where it will die immediately when an error is
|
||||
// encountered, rather than producing it in a deferred fashion when Build() is
|
||||
@ -701,8 +699,7 @@ class XlaBuilder {
|
||||
XlaOp CrossReplicaSum(
|
||||
const XlaOp& operand, const XlaComputation& computation,
|
||||
tensorflow::gtl::ArraySlice<int64> replica_group_ids = {},
|
||||
const tensorflow::gtl::optional<ChannelHandle>& channel_id =
|
||||
tensorflow::gtl::nullopt);
|
||||
const absl::optional<ChannelHandle>& channel_id = absl::nullopt);
|
||||
|
||||
// Enqueues an operation that do an Alltoall of the operand cross cores.
|
||||
//
|
||||
@ -831,8 +828,7 @@ class XlaBuilder {
|
||||
// * The result is a tuple that consists of a sorted tensor of keys (along the
|
||||
// provided dimension, as above) as the first element, and a tensor with their
|
||||
// corresponding values as the second element.
|
||||
XlaOp Sort(XlaOp keys,
|
||||
tensorflow::gtl::optional<XlaOp> values = tensorflow::gtl::nullopt,
|
||||
XlaOp Sort(XlaOp keys, absl::optional<XlaOp> values = absl::nullopt,
|
||||
int64 dimension = -1);
|
||||
|
||||
// Enqueues a clamp instruction onto the computation.
|
||||
@ -1039,7 +1035,7 @@ class XlaBuilder {
|
||||
|
||||
// Sharding for this operator. This is structured as a "model"-like operation,
|
||||
// in order to simplify client code, similar to metadata_.
|
||||
tensorflow::gtl::optional<OpSharding> sharding_;
|
||||
absl::optional<OpSharding> sharding_;
|
||||
|
||||
// Mode bit that indicates whether to die when a first error is encountered.
|
||||
bool die_immediately_on_error_ = false;
|
||||
@ -1246,7 +1242,7 @@ class XlaBuilder {
|
||||
friend XlaOp CrossReplicaSum(
|
||||
const XlaOp& operand, const XlaComputation& computation,
|
||||
tensorflow::gtl::ArraySlice<int64> replica_group_ids,
|
||||
const tensorflow::gtl::optional<ChannelHandle>& channel_id);
|
||||
const absl::optional<ChannelHandle>& channel_id);
|
||||
friend XlaOp AllToAll(const XlaOp& operand, int64 split_dimension,
|
||||
int64 concat_dimension, int64 split_count,
|
||||
const std::vector<ReplicaGroup>& replica_groups);
|
||||
@ -1295,8 +1291,7 @@ class XlaBuilder {
|
||||
tensorflow::gtl::ArraySlice<int64> permutation);
|
||||
friend XlaOp Rev(const XlaOp& operand,
|
||||
tensorflow::gtl::ArraySlice<int64> dimensions);
|
||||
friend XlaOp Sort(XlaOp keys, tensorflow::gtl::optional<XlaOp> values,
|
||||
int64 dimension);
|
||||
friend XlaOp Sort(XlaOp keys, absl::optional<XlaOp> values, int64 dimension);
|
||||
friend XlaOp Clamp(const XlaOp& min, const XlaOp& operand, const XlaOp& max);
|
||||
friend XlaOp Map(XlaBuilder* builder,
|
||||
tensorflow::gtl::ArraySlice<XlaOp> operands,
|
||||
@ -1359,7 +1354,7 @@ class XlaBuilder {
|
||||
class XlaScopedShardingAssignment {
|
||||
public:
|
||||
XlaScopedShardingAssignment(xla::XlaBuilder* builder,
|
||||
tensorflow::gtl::optional<OpSharding> sharding)
|
||||
absl::optional<OpSharding> sharding)
|
||||
: builder_(builder), prev_sharding_(builder->sharding()) {
|
||||
SetSharding(sharding);
|
||||
}
|
||||
@ -1371,7 +1366,7 @@ class XlaScopedShardingAssignment {
|
||||
~XlaScopedShardingAssignment() { SetSharding(prev_sharding_); }
|
||||
|
||||
private:
|
||||
void SetSharding(const tensorflow::gtl::optional<OpSharding>& sharding) {
|
||||
void SetSharding(const absl::optional<OpSharding>& sharding) {
|
||||
if (sharding.has_value()) {
|
||||
builder_->SetSharding(sharding.value());
|
||||
} else {
|
||||
@ -1380,7 +1375,7 @@ class XlaScopedShardingAssignment {
|
||||
}
|
||||
|
||||
xla::XlaBuilder* const builder_;
|
||||
tensorflow::gtl::optional<OpSharding> prev_sharding_;
|
||||
absl::optional<OpSharding> prev_sharding_;
|
||||
};
|
||||
|
||||
// Free functions for building XlaOps. The intention is that these will
|
||||
@ -1835,10 +1830,10 @@ XlaOp CrossReplicaSum(
|
||||
// applied cross modules.
|
||||
//
|
||||
// TODO(b/79737069): Rename this to AllReduce when it's ready to use.
|
||||
XlaOp CrossReplicaSum(const XlaOp& operand, const XlaComputation& computation,
|
||||
tensorflow::gtl::ArraySlice<int64> replica_group_ids = {},
|
||||
const tensorflow::gtl::optional<ChannelHandle>&
|
||||
channel_id = tensorflow::gtl::nullopt);
|
||||
XlaOp CrossReplicaSum(
|
||||
const XlaOp& operand, const XlaComputation& computation,
|
||||
tensorflow::gtl::ArraySlice<int64> replica_group_ids = {},
|
||||
const absl::optional<ChannelHandle>& channel_id = absl::nullopt);
|
||||
|
||||
// Enqueues an operation that do an Alltoall of the operand cross cores.
|
||||
//
|
||||
@ -1963,8 +1958,7 @@ XlaOp Rev(const XlaOp& operand, tensorflow::gtl::ArraySlice<int64> dimensions);
|
||||
// * The result is a tuple that consists of a sorted tensor of keys (along the
|
||||
// provided dimension, as above) as the first element, and a tensor with their
|
||||
// corresponding values as the second element.
|
||||
XlaOp Sort(XlaOp keys,
|
||||
tensorflow::gtl::optional<XlaOp> values = tensorflow::gtl::nullopt,
|
||||
XlaOp Sort(XlaOp keys, absl::optional<XlaOp> values = absl::nullopt,
|
||||
int64 dimension = -1);
|
||||
|
||||
// Enqueues a clamp instruction onto the computation.
|
||||
|
@ -137,8 +137,7 @@ static StatusOr<ScopedShapedBuffer> ToBuffer(LocalClient* client,
|
||||
|
||||
/* static */
|
||||
StatusOr<LocalShapedBuffer*> LocalShapedBuffer::FromLiteral(
|
||||
const Literal& argument,
|
||||
const tensorflow::gtl::optional<Shape>& shape_with_layout) {
|
||||
const Literal& argument, const absl::optional<Shape>& shape_with_layout) {
|
||||
LocalClient* client = GetOrCreateLocalClient();
|
||||
StatusOr<ScopedShapedBuffer> buf = [&] {
|
||||
if (shape_with_layout) {
|
||||
@ -163,7 +162,7 @@ CompiledLocalComputation::CompiledLocalComputation(
|
||||
|
||||
StatusOr<std::unique_ptr<Literal>> CompiledLocalComputation::Execute(
|
||||
const std::vector<Literal>& arguments,
|
||||
const std::vector<tensorflow::gtl::optional<Shape>>& shapes_with_layout) {
|
||||
const std::vector<absl::optional<Shape>>& shapes_with_layout) {
|
||||
LocalClient* client = GetOrCreateLocalClient();
|
||||
|
||||
VLOG(1) << "Execution requested with " << GetReplicaCount() << " replicas.";
|
||||
@ -194,7 +193,7 @@ StatusOr<std::unique_ptr<Literal>> CompiledLocalComputation::Execute(
|
||||
scoped_buffers.reserve(arguments.size());
|
||||
for (int i = 0; i < arguments.size(); ++i) {
|
||||
const Literal& argument = arguments[i];
|
||||
const tensorflow::gtl::optional<Shape>& shape_with_layout =
|
||||
const absl::optional<Shape>& shape_with_layout =
|
||||
shapes_with_layout[i];
|
||||
|
||||
StatusOr<ScopedShapedBuffer> pushed;
|
||||
@ -576,7 +575,7 @@ StatusOr<bool> LocalComputationBuilder::IsConstant(const LocalOp& operand) {
|
||||
}
|
||||
|
||||
LocalOp LocalComputationBuilder::Sort(const LocalOp& operand, int64 dimension) {
|
||||
return xla::Sort(operand.op(), tensorflow::gtl::nullopt, dimension);
|
||||
return xla::Sort(operand.op(), absl::nullopt, dimension);
|
||||
}
|
||||
|
||||
LocalOp LocalComputationBuilder::SortKeyVal(const LocalOp& keys,
|
||||
|
@ -60,8 +60,7 @@ StatusOr<std::unique_ptr<Literal> > TransferFromOutfeedLocalReplica(
|
||||
class LocalShapedBuffer {
|
||||
public:
|
||||
static StatusOr<LocalShapedBuffer*> FromLiteral(
|
||||
const Literal& argument,
|
||||
const tensorflow::gtl::optional<Shape>& shape_with_layout);
|
||||
const Literal& argument, const absl::optional<Shape>& shape_with_layout);
|
||||
|
||||
LocalShapedBuffer(ScopedShapedBuffer shaped_buffer);
|
||||
const ScopedShapedBuffer* shaped_buffer() const;
|
||||
@ -120,7 +119,7 @@ class CompiledLocalComputation {
|
||||
// shapes_with_layout.
|
||||
StatusOr<std::unique_ptr<Literal> > Execute(
|
||||
const std::vector<Literal>& arguments,
|
||||
const std::vector<tensorflow::gtl::optional<Shape> >& shapes_with_layout);
|
||||
const std::vector<absl::optional<Shape> >& shapes_with_layout);
|
||||
|
||||
LocalShapedBuffer* ExecuteWithShapedBuffers(
|
||||
tensorflow::gtl::ArraySlice<LocalShapedBuffer*> argument_handles);
|
||||
|
@ -409,10 +409,10 @@ tensorflow::ImportNumpy();
|
||||
$1 = &temp;
|
||||
}
|
||||
|
||||
%typemap(in) const tensorflow::gtl::optional<Shape>& (
|
||||
tensorflow::gtl::optional<Shape> temp) {
|
||||
%typemap(in) const absl::optional<Shape>& (
|
||||
absl::optional<Shape> temp) {
|
||||
if ($input == Py_None) {
|
||||
temp = tensorflow::gtl::nullopt;
|
||||
temp = absl::nullopt;
|
||||
$1 = &temp;
|
||||
} else {
|
||||
StatusOr<Shape> statusor = numpy::XlaShapeFromPyShape($input);
|
||||
@ -448,8 +448,8 @@ tensorflow::ImportNumpy();
|
||||
$1 = &temps;
|
||||
}
|
||||
|
||||
%typemap(in) const std::vector<tensorflow::gtl::optional<Shape> >& (
|
||||
std::vector<tensorflow::gtl::optional<Shape> > temps) {
|
||||
%typemap(in) const std::vector<absl::optional<Shape> >& (
|
||||
std::vector<absl::optional<Shape> > temps) {
|
||||
if (!PySequence_Check($input)) {
|
||||
PyErr_SetString(PyExc_TypeError, "Argument is not a sequence");
|
||||
SWIG_fail;
|
||||
@ -458,7 +458,7 @@ tensorflow::ImportNumpy();
|
||||
for (int i = 0; i < size; ++i) {
|
||||
PyObject* o = PySequence_GetItem($input, i);
|
||||
if (o == Py_None) {
|
||||
temps.push_back(tensorflow::gtl::nullopt);
|
||||
temps.push_back(absl::nullopt);
|
||||
} else {
|
||||
StatusOr<Shape> statusor = numpy::XlaShapeFromPyShape(o);
|
||||
Py_DECREF(o);
|
||||
|
@ -281,15 +281,15 @@ StatusOr<Shape> XlaShapeFromPyShape(PyObject* o) {
|
||||
|
||||
// Helper that retrieves the member with attr_name, stringifies it if is not
|
||||
// None, and returns it as a C++ string.
|
||||
static tensorflow::gtl::optional<string> GetAttrAsString(
|
||||
PyObject* o, const string& attr_name) {
|
||||
static absl::optional<string> GetAttrAsString(PyObject* o,
|
||||
const string& attr_name) {
|
||||
if (!PyObject_HasAttrString(o, attr_name.c_str())) {
|
||||
return tensorflow::gtl::nullopt;
|
||||
return absl::nullopt;
|
||||
}
|
||||
PyObject* attr = PyObject_GetAttrString(o, attr_name.c_str());
|
||||
if (attr == Py_None) {
|
||||
Py_DECREF(attr);
|
||||
return tensorflow::gtl::nullopt;
|
||||
return absl::nullopt;
|
||||
}
|
||||
string result = PyObjectCppStr(attr);
|
||||
Py_DECREF(attr);
|
||||
@ -298,48 +298,46 @@ static tensorflow::gtl::optional<string> GetAttrAsString(
|
||||
|
||||
// Helper that retrieves the member with attr_name, checks that it is an integer
|
||||
// if it is not None, and returns it as an int32 value.
|
||||
static tensorflow::gtl::optional<int32> GetAttrAsInt32(
|
||||
PyObject* o, const string& attr_name) {
|
||||
static absl::optional<int32> GetAttrAsInt32(PyObject* o,
|
||||
const string& attr_name) {
|
||||
if (!PyObject_HasAttrString(o, attr_name.c_str())) {
|
||||
return tensorflow::gtl::nullopt;
|
||||
return absl::nullopt;
|
||||
}
|
||||
PyObject* attr = PyObject_GetAttrString(o, attr_name.c_str());
|
||||
if (attr == Py_None) {
|
||||
Py_DECREF(attr);
|
||||
return tensorflow::gtl::nullopt;
|
||||
return absl::nullopt;
|
||||
}
|
||||
if (!CheckPyIntOrLong(attr)) {
|
||||
Py_DECREF(attr);
|
||||
return tensorflow::gtl::nullopt;
|
||||
return absl::nullopt;
|
||||
}
|
||||
long value = PyIntOrPyLongToLong(attr); // NOLINT
|
||||
Py_DECREF(attr);
|
||||
if (value == -1 && PyErr_Occurred() != nullptr) {
|
||||
return tensorflow::gtl::nullopt;
|
||||
return absl::nullopt;
|
||||
}
|
||||
if (static_cast<int32>(value) != value) {
|
||||
return tensorflow::gtl::nullopt;
|
||||
return absl::nullopt;
|
||||
}
|
||||
return value;
|
||||
}
|
||||
|
||||
StatusOr<OpMetadata> OpMetadataFromPyObject(PyObject* o) {
|
||||
OpMetadata result;
|
||||
tensorflow::gtl::optional<string> op_type = GetAttrAsString(o, "op_type");
|
||||
absl::optional<string> op_type = GetAttrAsString(o, "op_type");
|
||||
if (op_type.has_value()) {
|
||||
result.set_op_type(op_type.value());
|
||||
}
|
||||
tensorflow::gtl::optional<string> op_name = GetAttrAsString(o, "op_name");
|
||||
absl::optional<string> op_name = GetAttrAsString(o, "op_name");
|
||||
if (op_name.has_value()) {
|
||||
result.set_op_name(op_name.value());
|
||||
}
|
||||
tensorflow::gtl::optional<string> source_file =
|
||||
GetAttrAsString(o, "source_file");
|
||||
absl::optional<string> source_file = GetAttrAsString(o, "source_file");
|
||||
if (source_file.has_value()) {
|
||||
result.set_source_file(source_file.value());
|
||||
}
|
||||
tensorflow::gtl::optional<int32> source_line =
|
||||
GetAttrAsInt32(o, "source_line");
|
||||
absl::optional<int32> source_line = GetAttrAsInt32(o, "source_line");
|
||||
if (source_line.has_value()) {
|
||||
result.set_source_line(source_line.value());
|
||||
}
|
||||
|
@ -241,6 +241,7 @@ cc_library(
|
||||
"@com_google_absl//absl/algorithm:container",
|
||||
"@com_google_absl//absl/container:inlined_vector",
|
||||
"@com_google_absl//absl/memory",
|
||||
"@com_google_absl//absl/types:optional",
|
||||
],
|
||||
)
|
||||
|
||||
@ -398,6 +399,7 @@ cc_library(
|
||||
"//tensorflow/compiler/xla/service:hlo_parser",
|
||||
"//tensorflow/compiler/xla/tests:xla_internal_test_main",
|
||||
"//tensorflow/core:lib",
|
||||
"@com_google_absl//absl/types:optional",
|
||||
],
|
||||
)
|
||||
|
||||
@ -1091,6 +1093,7 @@ cc_library(
|
||||
"//tensorflow/compiler/xla:util",
|
||||
"//tensorflow/core:lib",
|
||||
"@com_google_absl//absl/memory",
|
||||
"@com_google_absl//absl/types:optional",
|
||||
],
|
||||
)
|
||||
|
||||
@ -1248,6 +1251,7 @@ cc_library(
|
||||
"//tensorflow/compiler/xla:util",
|
||||
"//tensorflow/compiler/xla:xla_data_proto",
|
||||
"//tensorflow/core:lib",
|
||||
"@com_google_absl//absl/types:optional",
|
||||
],
|
||||
)
|
||||
|
||||
@ -1326,6 +1330,7 @@ cc_library(
|
||||
"//tensorflow/core:lib",
|
||||
"@com_google_absl//absl/algorithm:container",
|
||||
"@com_google_absl//absl/memory",
|
||||
"@com_google_absl//absl/types:optional",
|
||||
],
|
||||
)
|
||||
|
||||
@ -1477,8 +1482,7 @@ cc_library(
|
||||
deps = [
|
||||
":hlo",
|
||||
":hlo_evaluator",
|
||||
"//tensorflow/compiler/xla:literal",
|
||||
"//tensorflow/core:lib",
|
||||
"@com_google_absl//absl/types:optional",
|
||||
],
|
||||
)
|
||||
|
||||
@ -1493,6 +1497,7 @@ cc_library(
|
||||
":while_loop_analysis",
|
||||
"//tensorflow/compiler/xla:statusor",
|
||||
"//tensorflow/core:lib",
|
||||
"@com_google_absl//absl/types:optional",
|
||||
],
|
||||
)
|
||||
|
||||
@ -2663,6 +2668,7 @@ cc_library(
|
||||
"//tensorflow/compiler/xla:xla_proto",
|
||||
"//tensorflow/core:lib",
|
||||
"@com_google_absl//absl/memory",
|
||||
"@com_google_absl//absl/types:optional",
|
||||
],
|
||||
)
|
||||
|
||||
@ -2746,6 +2752,7 @@ cc_library(
|
||||
"//tensorflow/core:lib",
|
||||
"//tensorflow/core:lib_internal",
|
||||
"//tensorflow/core:regexp_internal",
|
||||
"@com_google_absl//absl/types:optional",
|
||||
],
|
||||
alwayslink = 1,
|
||||
)
|
||||
@ -3107,6 +3114,7 @@ cc_library(
|
||||
"//tensorflow/core:ptr_util",
|
||||
"@com_google_absl//absl/algorithm:container",
|
||||
"@com_google_absl//absl/container:inlined_vector",
|
||||
"@com_google_absl//absl/types:optional",
|
||||
],
|
||||
)
|
||||
|
||||
@ -3173,6 +3181,7 @@ cc_library(
|
||||
"//tensorflow/compiler/xla:xla_data_proto",
|
||||
"//tensorflow/core:lib",
|
||||
"//tensorflow/core:regexp_internal",
|
||||
"@com_google_absl//absl/types:optional",
|
||||
],
|
||||
)
|
||||
|
||||
|
@ -24,6 +24,7 @@ limitations under the License.
|
||||
|
||||
#include "absl/algorithm/container.h"
|
||||
#include "absl/memory/memory.h"
|
||||
#include "absl/types/optional.h"
|
||||
#include "tensorflow/compiler/xla/layout_util.h"
|
||||
#include "tensorflow/compiler/xla/literal.h"
|
||||
#include "tensorflow/compiler/xla/literal_util.h"
|
||||
@ -43,7 +44,6 @@ limitations under the License.
|
||||
#include "tensorflow/core/lib/core/errors.h"
|
||||
#include "tensorflow/core/lib/core/status.h"
|
||||
#include "tensorflow/core/lib/gtl/array_slice.h"
|
||||
#include "tensorflow/core/lib/gtl/optional.h"
|
||||
#include "tensorflow/core/platform/logging.h"
|
||||
#include "tensorflow/core/platform/types.h"
|
||||
|
||||
|
@ -20,6 +20,7 @@ limitations under the License.
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
#include "absl/types/optional.h"
|
||||
#include "tensorflow/compiler/xla/literal.h"
|
||||
#include "tensorflow/compiler/xla/literal_util.h"
|
||||
#include "tensorflow/compiler/xla/service/dfs_hlo_visitor_with_default.h"
|
||||
@ -35,7 +36,6 @@ limitations under the License.
|
||||
#include "tensorflow/core/lib/core/status.h"
|
||||
#include "tensorflow/core/lib/gtl/array_slice.h"
|
||||
#include "tensorflow/core/lib/gtl/flatmap.h"
|
||||
#include "tensorflow/core/lib/gtl/optional.h"
|
||||
#include "tensorflow/core/platform/logging.h"
|
||||
#include "tensorflow/core/platform/types.h"
|
||||
|
||||
@ -43,7 +43,7 @@ namespace xla {
|
||||
|
||||
namespace {
|
||||
|
||||
using tensorflow::gtl::optional;
|
||||
using absl::optional;
|
||||
|
||||
// BatchNormExpanderVisitor traverses the HLO computation and rewrites BatchNorm
|
||||
// operations into smaller operations.
|
||||
|
@ -236,7 +236,7 @@ TEST_F(BFloat16ConversionFoldingTest, FoldCrossReplicaSumTupleOutput) {
|
||||
builder.AddInstruction(HloInstruction::CreateCrossReplicaSum(
|
||||
ShapeUtil::MakeTupleShape({f32_shape, f32_shape}), {convert_a, b},
|
||||
sum, /*replica_group_ids=*/{}, /*barrier=*/"",
|
||||
/*all_reduce_id=*/tensorflow::gtl::nullopt));
|
||||
/*all_reduce_id=*/absl::nullopt));
|
||||
HloInstruction* gte_a = builder.AddInstruction(
|
||||
HloInstruction::CreateGetTupleElement(f32_shape, crs, 0));
|
||||
HloInstruction* gte_b = builder.AddInstruction(
|
||||
|
@ -252,7 +252,7 @@ TEST_F(BFloat16NormalizationTest, ResolveMixedPrecisionTupleCrossReplicaSum) {
|
||||
builder.AddInstruction(HloInstruction::CreateCrossReplicaSum(
|
||||
ShapeUtil::MakeTupleShape({f32_shape, bf16_shape}), {a, b}, reduction,
|
||||
/*replica_group_ids=*/{}, /*barrier=*/"",
|
||||
/*all_reduce_id=*/tensorflow::gtl::nullopt));
|
||||
/*all_reduce_id=*/absl::nullopt));
|
||||
HloInstruction* gte = builder.AddInstruction(
|
||||
HloInstruction::CreateGetTupleElement(bf16_shape, crs, 1));
|
||||
|
||||
|
@ -34,8 +34,8 @@ namespace cpu {
|
||||
// instruction stream.
|
||||
|
||||
namespace {
|
||||
using ::tensorflow::gtl::nullopt;
|
||||
using ::tensorflow::gtl::optional;
|
||||
using ::absl::nullopt;
|
||||
using ::absl::optional;
|
||||
|
||||
using ShouldMakeOperandColMajorCache =
|
||||
tensorflow::gtl::FlatMap<const HloInstruction*, bool>;
|
||||
|
@ -45,8 +45,7 @@ bool VectorizedReduceDisabled(const HloModuleConfig& config) {
|
||||
return extra_options_map.count(kXlaOptimizeForSizeCpuOption) > 0;
|
||||
}
|
||||
|
||||
tensorflow::gtl::optional<int64> LlvmIrGemvTilingFactor(
|
||||
const HloModuleConfig& config) {
|
||||
absl::optional<int64> LlvmIrGemvTilingFactor(const HloModuleConfig& config) {
|
||||
const auto& extra_options_map =
|
||||
config.debug_options().xla_backend_extra_options();
|
||||
auto it = extra_options_map.find(kLlvmIrDotTilingFactor);
|
||||
@ -55,7 +54,7 @@ tensorflow::gtl::optional<int64> LlvmIrGemvTilingFactor(
|
||||
tensorflow::strings::safe_strto64(it->second, &tiling_factor)) {
|
||||
return tiling_factor;
|
||||
}
|
||||
return tensorflow::gtl::nullopt;
|
||||
return absl::nullopt;
|
||||
}
|
||||
|
||||
bool EnableExperimentalLlvmIrGemm(const HloModuleConfig& config) {
|
||||
@ -71,13 +70,13 @@ static tensorflow::StringPiece RemoveSuffix(tensorflow::StringPiece str,
|
||||
return str.substr(0, str.size() - suffix.size());
|
||||
}
|
||||
|
||||
tensorflow::gtl::optional<std::tuple<int64, int64, int64>> LlvmIrGemmTileSize(
|
||||
absl::optional<std::tuple<int64, int64, int64>> LlvmIrGemmTileSize(
|
||||
const HloModuleConfig& config) {
|
||||
const auto& extra_options_map =
|
||||
config.debug_options().xla_backend_extra_options();
|
||||
auto it = extra_options_map.find(kLlvmIrGemmTileSize);
|
||||
if (it == extra_options_map.end()) {
|
||||
return tensorflow::gtl::nullopt;
|
||||
return absl::nullopt;
|
||||
}
|
||||
|
||||
std::vector<string> tile_components =
|
||||
|
@ -27,9 +27,8 @@ namespace options {
|
||||
bool OptimizeForSizeRequested(const HloModuleConfig& config);
|
||||
bool VectorizedReduceDisabled(const HloModuleConfig& config);
|
||||
bool EnableExperimentalLlvmIrGemm(const HloModuleConfig& config);
|
||||
tensorflow::gtl::optional<int64> LlvmIrGemvTilingFactor(
|
||||
const HloModuleConfig& config);
|
||||
tensorflow::gtl::optional<std::tuple<int64, int64, int64>> LlvmIrGemmTileSize(
|
||||
absl::optional<int64> LlvmIrGemvTilingFactor(const HloModuleConfig& config);
|
||||
absl::optional<std::tuple<int64, int64, int64>> LlvmIrGemmTileSize(
|
||||
const HloModuleConfig& config);
|
||||
|
||||
} // namespace options
|
||||
|
@ -1620,7 +1620,7 @@ bool PotentiallyImplementedAsEigenDot(
|
||||
|
||||
// For vector-matrix dot products, it is always profitable to make the Rhs
|
||||
// column major.
|
||||
tensorflow::gtl::optional<int64> ProfitableToMakeDotOperandColumnMajor(
|
||||
absl::optional<int64> ProfitableToMakeDotOperandColumnMajor(
|
||||
const HloInstruction& hlo) {
|
||||
if (hlo.opcode() == HloOpcode::kDot && hlo.shape().dimensions_size() == 2 &&
|
||||
hlo.shape().dimensions(0) == 1) {
|
||||
|
@ -38,7 +38,7 @@ bool PotentiallyImplementedAsEigenDot(
|
||||
// Returns the index for an operand to `hlo` that should ideally be column
|
||||
// major. Returns nullopt if there is no such operand or if `hlo` is not a dot
|
||||
// or a fusion containing a dot.
|
||||
tensorflow::gtl::optional<int64> ProfitableToMakeDotOperandColumnMajor(
|
||||
absl::optional<int64> ProfitableToMakeDotOperandColumnMajor(
|
||||
const HloInstruction& hlo);
|
||||
|
||||
// Returns true to indicate that we can generate a tiled LLVM IR implementation
|
||||
|
@ -24,7 +24,7 @@ limitations under the License.
|
||||
namespace xla {
|
||||
namespace {
|
||||
|
||||
using tensorflow::gtl::nullopt;
|
||||
using absl::nullopt;
|
||||
|
||||
class ElementalIrEmitterExecutionTest : public HloTestBase {
|
||||
protected:
|
||||
|
@ -186,6 +186,7 @@ cc_library(
|
||||
"@com_google_absl//absl/algorithm:container",
|
||||
"@com_google_absl//absl/container:inlined_vector",
|
||||
"@com_google_absl//absl/memory",
|
||||
"@com_google_absl//absl/types:optional",
|
||||
"@llvm//:core",
|
||||
"@llvm//:support",
|
||||
],
|
||||
@ -346,6 +347,7 @@ cc_library(
|
||||
"//tensorflow/core/platform/default/build_config:stream_executor_cuda", # build_cleaner: keep
|
||||
"//tensorflow/stream_executor",
|
||||
"@com_google_absl//absl/memory",
|
||||
"@com_google_absl//absl/types:optional",
|
||||
],
|
||||
)
|
||||
|
||||
@ -382,6 +384,7 @@ cc_library(
|
||||
"//tensorflow/compiler/xla/service:hlo_pass",
|
||||
"//tensorflow/core:lib",
|
||||
"//tensorflow/core:stream_executor_no_cuda",
|
||||
"@com_google_absl//absl/types:optional",
|
||||
],
|
||||
)
|
||||
|
||||
@ -684,6 +687,7 @@ cc_library(
|
||||
"//tensorflow/core:regexp_internal",
|
||||
"//tensorflow/core:stream_executor_no_cuda",
|
||||
"@com_google_absl//absl/memory",
|
||||
"@com_google_absl//absl/types:optional",
|
||||
"@llvm//:core",
|
||||
],
|
||||
alwayslink = True, # Contains compiler registration
|
||||
|
@ -16,6 +16,7 @@ limitations under the License.
|
||||
#ifndef TENSORFLOW_COMPILER_XLA_SERVICE_GPU_CONVOLUTION_THUNK_H_
|
||||
#define TENSORFLOW_COMPILER_XLA_SERVICE_GPU_CONVOLUTION_THUNK_H_
|
||||
|
||||
#include "absl/types/optional.h"
|
||||
#include "tensorflow/compiler/xla/service/buffer_assignment.h"
|
||||
#include "tensorflow/compiler/xla/service/gpu/buffer_allocations.h"
|
||||
#include "tensorflow/compiler/xla/service/gpu/cudnn_convolution_runner.h"
|
||||
@ -26,7 +27,6 @@ limitations under the License.
|
||||
#include "tensorflow/compiler/xla/types.h"
|
||||
#include "tensorflow/compiler/xla/xla_data.pb.h"
|
||||
#include "tensorflow/core/lib/core/status.h"
|
||||
#include "tensorflow/core/lib/gtl/optional.h"
|
||||
#include "tensorflow/core/platform/stream_executor_no_cuda.h"
|
||||
|
||||
namespace xla {
|
||||
|
@ -14,12 +14,12 @@ limitations under the License.
|
||||
==============================================================================*/
|
||||
|
||||
#include "tensorflow/compiler/xla/service/gpu/cudnn_convolution_algorithm_picker.h"
|
||||
#include "absl/types/optional.h"
|
||||
#include "tensorflow/compiler/xla/literal_util.h"
|
||||
#include "tensorflow/compiler/xla/service/gpu/backend_configs.pb.h"
|
||||
#include "tensorflow/compiler/xla/service/gpu/buffer_comparator.h"
|
||||
#include "tensorflow/compiler/xla/service/gpu/convolution_thunk.h"
|
||||
#include "tensorflow/compiler/xla/service/gpu/ir_emission_utils.h"
|
||||
#include "tensorflow/core/lib/gtl/optional.h"
|
||||
#include "tensorflow/core/lib/strings/numbers.h"
|
||||
#include "tensorflow/core/lib/strings/strcat.h"
|
||||
#include "tensorflow/core/platform/mutex.h"
|
||||
@ -28,10 +28,10 @@ namespace xla {
|
||||
namespace gpu {
|
||||
namespace {
|
||||
|
||||
using absl::optional;
|
||||
using se::DeviceMemoryBase;
|
||||
using se::dnn::AlgorithmConfig;
|
||||
using se::dnn::AlgorithmDesc;
|
||||
using tensorflow::gtl::optional;
|
||||
|
||||
class ScratchAllocator : public se::ScratchAllocator {
|
||||
public:
|
||||
|
@ -16,12 +16,12 @@ limitations under the License.
|
||||
#ifndef TENSORFLOW_COMPILER_XLA_SERVICE_GPU_CUDNN_CONVOLUTION_ALGORITHM_PICKER_H_
|
||||
#define TENSORFLOW_COMPILER_XLA_SERVICE_GPU_CUDNN_CONVOLUTION_ALGORITHM_PICKER_H_
|
||||
|
||||
#include "absl/types/optional.h"
|
||||
#include "tensorflow/compiler/xla/service/compiler.h"
|
||||
#include "tensorflow/compiler/xla/service/device_memory_allocator.h"
|
||||
#include "tensorflow/compiler/xla/service/gpu/cudnn_convolution_runner.h"
|
||||
#include "tensorflow/compiler/xla/service/hlo_module.h"
|
||||
#include "tensorflow/compiler/xla/service/hlo_pass_interface.h"
|
||||
#include "tensorflow/core/lib/gtl/optional.h"
|
||||
#include "tensorflow/core/platform/stream_executor_no_cuda.h"
|
||||
|
||||
namespace xla {
|
||||
|
@ -16,6 +16,7 @@ limitations under the License.
|
||||
#ifndef TENSORFLOW_COMPILER_XLA_SERVICE_GPU_FFT_THUNK_H_
|
||||
#define TENSORFLOW_COMPILER_XLA_SERVICE_GPU_FFT_THUNK_H_
|
||||
|
||||
#include "absl/types/optional.h"
|
||||
#include "tensorflow/compiler/xla/service/buffer_assignment.h"
|
||||
#include "tensorflow/compiler/xla/service/gpu/buffer_allocations.h"
|
||||
#include "tensorflow/compiler/xla/service/gpu/gpu_executable.h"
|
||||
@ -25,7 +26,6 @@ limitations under the License.
|
||||
#include "tensorflow/compiler/xla/types.h"
|
||||
#include "tensorflow/compiler/xla/xla_data.pb.h"
|
||||
#include "tensorflow/core/lib/core/status.h"
|
||||
#include "tensorflow/core/lib/gtl/optional.h"
|
||||
#include "tensorflow/core/platform/stream_executor_no_cuda.h"
|
||||
|
||||
namespace xla {
|
||||
|
@ -112,7 +112,7 @@ Status GpuExecutable::ExecuteThunks(
|
||||
//
|
||||
// TODO(jlebar): Should we cache the results of HloInstruction::ToString(),
|
||||
// since we expect it to be an expensive call?
|
||||
tensorflow::gtl::optional<ScopedAnnotation> op_annotation;
|
||||
absl::optional<ScopedAnnotation> op_annotation;
|
||||
if (top_level_annotation.IsEnabled()) {
|
||||
op_annotation.emplace(
|
||||
thunk->hlo_instruction() != nullptr
|
||||
|
@ -19,6 +19,7 @@ limitations under the License.
|
||||
#include <memory>
|
||||
#include <string>
|
||||
|
||||
#include "absl/types/optional.h"
|
||||
#include "tensorflow/compiler/xla/service/buffer_assignment.h"
|
||||
#include "tensorflow/compiler/xla/service/device_memory_allocator.h"
|
||||
#include "tensorflow/compiler/xla/service/executable.h"
|
||||
@ -35,7 +36,6 @@ limitations under the License.
|
||||
#include "tensorflow/core/lib/core/stringpiece.h"
|
||||
#include "tensorflow/core/lib/gtl/array_slice.h"
|
||||
#include "tensorflow/core/lib/gtl/flatmap.h"
|
||||
#include "tensorflow/core/lib/gtl/optional.h"
|
||||
#include "tensorflow/core/platform/macros.h"
|
||||
#include "tensorflow/core/platform/stream_executor_no_cuda.h"
|
||||
|
||||
|
@ -24,6 +24,7 @@ limitations under the License.
|
||||
#include "absl/algorithm/container.h"
|
||||
#include "absl/container/inlined_vector.h"
|
||||
#include "absl/memory/memory.h"
|
||||
#include "absl/types/optional.h"
|
||||
#include "llvm/ADT/StringRef.h"
|
||||
#include "llvm/IR/BasicBlock.h"
|
||||
#include "llvm/IR/Function.h"
|
||||
@ -79,7 +80,6 @@ limitations under the License.
|
||||
#include "tensorflow/core/lib/core/bits.h"
|
||||
#include "tensorflow/core/lib/core/status.h"
|
||||
#include "tensorflow/core/lib/gtl/array_slice.h"
|
||||
#include "tensorflow/core/lib/gtl/optional.h"
|
||||
#include "tensorflow/core/platform/logging.h"
|
||||
|
||||
namespace xla {
|
||||
@ -88,11 +88,11 @@ namespace gpu {
|
||||
namespace {
|
||||
|
||||
using absl::InlinedVector;
|
||||
using absl::nullopt;
|
||||
using absl::optional;
|
||||
using llvm_ir::IrArray;
|
||||
using llvm_ir::IrName;
|
||||
using tensorflow::gtl::ArraySlice;
|
||||
using tensorflow::gtl::nullopt;
|
||||
using tensorflow::gtl::optional;
|
||||
using tensorflow::strings::StrCat;
|
||||
|
||||
// If a dimensions is smaller than this, untiled transposition may be more
|
||||
@ -2098,9 +2098,9 @@ Status IrEmitterUnnested::HandleSort(HloInstruction* sort) {
|
||||
|
||||
TF_RETURN_IF_ERROR(llvm_ir::EmitSortInPlace(
|
||||
dimension_to_sort, GetIrArray(*sort, *sort, keys_shape_index),
|
||||
values != nullptr ? tensorflow::gtl::make_optional<IrArray>(
|
||||
values != nullptr ? absl::make_optional<IrArray>(
|
||||
GetIrArray(*sort, *sort, values_shape_index))
|
||||
: tensorflow::gtl::nullopt,
|
||||
: absl::nullopt,
|
||||
IrName(sort), xor_mask, &b_, &launch_dimensions));
|
||||
}
|
||||
}
|
||||
@ -2308,7 +2308,7 @@ std::unique_ptr<KernelThunk> IrEmitterUnnested::BuildKernelThunk(
|
||||
for (const auto& kv : hlo_slices) {
|
||||
buffers_needed.insert(kv.second.first.allocation());
|
||||
}
|
||||
tensorflow::gtl::optional<const BufferAllocation*> temp_buffer;
|
||||
absl::optional<const BufferAllocation*> temp_buffer;
|
||||
for (const BufferAllocation& alloc : buffer_assn.Allocations()) {
|
||||
if (alloc.IsPreallocatedTempBuffer()) {
|
||||
if (!temp_buffer.has_value()) {
|
||||
|
@ -20,13 +20,13 @@ limitations under the License.
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
#include "absl/types/optional.h"
|
||||
#include "tensorflow/compiler/xla/service/executable.h"
|
||||
#include "tensorflow/compiler/xla/service/hlo_module.h"
|
||||
#include "tensorflow/compiler/xla/service/llvm_compiler.h"
|
||||
#include "tensorflow/compiler/xla/statusor.h"
|
||||
#include "tensorflow/compiler/xla/types.h"
|
||||
#include "tensorflow/core/lib/gtl/array_slice.h"
|
||||
#include "tensorflow/core/lib/gtl/optional.h"
|
||||
#include "tensorflow/core/lib/hash/hash.h"
|
||||
#include "tensorflow/core/platform/macros.h"
|
||||
#include "tensorflow/core/platform/mutex.h"
|
||||
|
@ -19,11 +19,11 @@ limitations under the License.
|
||||
#include "absl/algorithm/container.h"
|
||||
#include "absl/container/inlined_vector.h"
|
||||
#include "absl/memory/memory.h"
|
||||
#include "absl/types/optional.h"
|
||||
#include "tensorflow/compiler/xla/literal_util.h"
|
||||
#include "tensorflow/compiler/xla/service/hlo_evaluator.h"
|
||||
#include "tensorflow/compiler/xla/service/shape_inference.h"
|
||||
#include "tensorflow/core/lib/core/casts.h"
|
||||
#include "tensorflow/core/lib/gtl/optional.h"
|
||||
|
||||
namespace xla {
|
||||
|
||||
@ -1672,8 +1672,8 @@ class HloEvaluatorTypedVisitor : public DfsHloVisitorWithDefault {
|
||||
// 2. Using the selected index, scatter value from `source` to result. We
|
||||
// do this by iterating through the window, and compare each index with
|
||||
// the selected index.
|
||||
tensorflow::gtl::optional<ReturnT> selected_val;
|
||||
tensorflow::gtl::optional<std::vector<int64>> selected_index;
|
||||
absl::optional<ReturnT> selected_val;
|
||||
absl::optional<std::vector<int64>> selected_index;
|
||||
|
||||
IterateThroughWindow(
|
||||
window_shape, window, operand_literal.shape(), source_index,
|
||||
|
@ -26,6 +26,7 @@ limitations under the License.
|
||||
#include <unordered_map>
|
||||
#include <vector>
|
||||
|
||||
#include "absl/types/optional.h"
|
||||
#include "tensorflow/compiler/xla/layout_util.h"
|
||||
#include "tensorflow/compiler/xla/literal.h"
|
||||
#include "tensorflow/compiler/xla/service/hlo_casting_utils.h"
|
||||
@ -37,7 +38,6 @@ limitations under the License.
|
||||
#include "tensorflow/compiler/xla/window_util.h"
|
||||
#include "tensorflow/core/lib/core/status.h"
|
||||
#include "tensorflow/core/lib/gtl/map_util.h"
|
||||
#include "tensorflow/core/lib/gtl/optional.h"
|
||||
#include "tensorflow/core/lib/io/path.h"
|
||||
#include "tensorflow/core/lib/strings/numbers.h"
|
||||
#include "tensorflow/core/lib/strings/str_util.h"
|
||||
@ -47,10 +47,10 @@ limitations under the License.
|
||||
#include "tensorflow/core/platform/protobuf.h"
|
||||
#include "tensorflow/core/platform/regexp.h"
|
||||
|
||||
using ::absl::nullopt;
|
||||
using ::absl::optional;
|
||||
using ::tensorflow::Env;
|
||||
using ::tensorflow::WriteStringToFile;
|
||||
using ::tensorflow::gtl::nullopt;
|
||||
using ::tensorflow::gtl::optional;
|
||||
using ::tensorflow::io::JoinPath;
|
||||
using ::tensorflow::str_util::Join;
|
||||
using ::tensorflow::str_util::StringReplace;
|
||||
|
@ -296,7 +296,7 @@ StatusOr<std::unique_ptr<HloInstruction>> HloInstruction::CreateFromProto(
|
||||
TF_RET_CHECK(proto.called_computation_ids_size() == 1)
|
||||
<< "CrossReplicaSum should have 1 called computation but sees "
|
||||
<< proto.called_computation_ids_size();
|
||||
tensorflow::gtl::optional<int64> all_reduce_id;
|
||||
absl::optional<int64> all_reduce_id;
|
||||
if (proto.all_reduce_id() > 0) {
|
||||
all_reduce_id = proto.all_reduce_id();
|
||||
}
|
||||
@ -666,7 +666,7 @@ HloInstruction::CreateCrossReplicaSum(
|
||||
HloComputation* reduce_computation,
|
||||
tensorflow::gtl::ArraySlice<int64> replica_group_ids,
|
||||
tensorflow::StringPiece barrier,
|
||||
const tensorflow::gtl::optional<int64>& all_reduce_id) {
|
||||
const absl::optional<int64>& all_reduce_id) {
|
||||
return absl::make_unique<HloAllReduceInstruction>(
|
||||
shape, operands, reduce_computation, replica_group_ids, barrier,
|
||||
all_reduce_id);
|
||||
@ -1836,7 +1836,7 @@ string HloInstruction::ToString(const HloPrintOptions& options) const {
|
||||
}
|
||||
|
||||
bool HloInstruction::IsElementwiseImpl(
|
||||
const tensorflow::gtl::optional<int64>& operand_idx) const {
|
||||
const absl::optional<int64>& operand_idx) const {
|
||||
switch (opcode_) {
|
||||
// Unary elementwise operations.
|
||||
case HloOpcode::kAbs:
|
||||
@ -2623,7 +2623,7 @@ bool HloInstruction::IsElementwiseBinary() const {
|
||||
}
|
||||
|
||||
bool HloInstruction::IsElementwise() const {
|
||||
return IsElementwiseImpl(tensorflow::gtl::nullopt);
|
||||
return IsElementwiseImpl(absl::nullopt);
|
||||
}
|
||||
|
||||
bool HloInstruction::ImplicitlyBroadcastsOperand(int64 operand_idx) const {
|
||||
@ -3156,7 +3156,7 @@ void HloInstruction::set_cross_replica_sum_barrier(const string& barrier) {
|
||||
barrier);
|
||||
}
|
||||
|
||||
tensorflow::gtl::optional<int64> HloInstruction::all_reduce_id() const {
|
||||
absl::optional<int64> HloInstruction::all_reduce_id() const {
|
||||
return Cast<HloAllReduceInstruction>(this)->all_reduce_id();
|
||||
}
|
||||
|
||||
|
@ -449,7 +449,7 @@ class HloInstruction {
|
||||
HloComputation* reduce_computation,
|
||||
tensorflow::gtl::ArraySlice<int64> replica_group_ids,
|
||||
tensorflow::StringPiece barrier,
|
||||
const tensorflow::gtl::optional<int64>& all_reduce_id);
|
||||
const absl::optional<int64>& all_reduce_id);
|
||||
|
||||
// This op handles the communication of an Alltoall operation. On each core,
|
||||
// the operands are N ops in the same shape, where N is the number of cores
|
||||
@ -1038,9 +1038,9 @@ class HloInstruction {
|
||||
return sharding_ ? *sharding_ : default_;
|
||||
}
|
||||
// Returns the sharding unique device, if any.
|
||||
tensorflow::gtl::optional<int64> sharding_unique_device() const {
|
||||
absl::optional<int64> sharding_unique_device() const {
|
||||
if (sharding_ == nullptr) {
|
||||
return tensorflow::gtl::optional<int64>();
|
||||
return absl::optional<int64>();
|
||||
}
|
||||
return sharding_->UniqueDevice();
|
||||
}
|
||||
@ -1427,7 +1427,7 @@ class HloInstruction {
|
||||
void set_cross_replica_sum_barrier(const string& barrier);
|
||||
|
||||
// Delegates to HloAllReduceInstruction::all_reduce_id.
|
||||
tensorflow::gtl::optional<int64> all_reduce_id() const;
|
||||
absl::optional<int64> all_reduce_id() const;
|
||||
|
||||
// Returns data on the window in a windowed operation such as
|
||||
// convolution.
|
||||
@ -1557,7 +1557,7 @@ class HloInstruction {
|
||||
// NOTE: For all instructions other than kFusion, being elementwise on one of
|
||||
// the operands is equivalent to being elementwise on all the operands.
|
||||
virtual bool IsElementwiseImpl(
|
||||
const tensorflow::gtl::optional<int64>& operand_idx) const;
|
||||
const absl::optional<int64>& operand_idx) const;
|
||||
// Prints an instruction to a string.
|
||||
//
|
||||
// The canonical string representation needs to name operands and instruction
|
||||
|
@ -301,8 +301,7 @@ HloAllReduceInstruction::HloAllReduceInstruction(
|
||||
const Shape& shape, tensorflow::gtl::ArraySlice<HloInstruction*> operands,
|
||||
HloComputation* reduce_computation,
|
||||
tensorflow::gtl::ArraySlice<int64> replica_group_ids,
|
||||
tensorflow::StringPiece barrier,
|
||||
const tensorflow::gtl::optional<int64>& all_reduce_id)
|
||||
tensorflow::StringPiece barrier, const absl::optional<int64>& all_reduce_id)
|
||||
: HloInstruction(HloOpcode::kCrossReplicaSum, shape),
|
||||
replica_group_ids_(replica_group_ids.begin(), replica_group_ids.end()),
|
||||
cross_replica_sum_barrier_(barrier.begin(), barrier.end()),
|
||||
@ -702,7 +701,7 @@ HloInstructionProto HloMapInstruction::ToProto() const {
|
||||
}
|
||||
|
||||
bool HloMapInstruction::IsElementwiseImpl(
|
||||
const tensorflow::gtl::optional<int64>& operand_idx) const {
|
||||
const absl::optional<int64>& operand_idx) const {
|
||||
if (!dimensions().empty()) {
|
||||
// Check that the map is executed in elementwise compatible dimensions.
|
||||
if (dimensions().size() != shape().dimensions_size()) {
|
||||
@ -815,7 +814,7 @@ HloInstructionProto HloConstantInstruction::ToProto() const {
|
||||
}
|
||||
|
||||
bool HloConstantInstruction::IsElementwiseImpl(
|
||||
const tensorflow::gtl::optional<int64>& operand_idx) const {
|
||||
const absl::optional<int64>& operand_idx) const {
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -955,7 +954,7 @@ HloInstructionProto HloFusionInstruction::ToProto() const {
|
||||
}
|
||||
|
||||
bool HloFusionInstruction::IsElementwiseImpl(
|
||||
const tensorflow::gtl::optional<int64>& operand_idx) const {
|
||||
const absl::optional<int64>& operand_idx) const {
|
||||
if (!operand_idx.has_value()) {
|
||||
for (auto* fused : fused_instructions()) {
|
||||
if (fused->opcode() != HloOpcode::kParameter && !fused->IsElementwise()) {
|
||||
@ -1387,7 +1386,7 @@ std::vector<string> HloRngInstruction::ExtraAttributesToStringImpl(
|
||||
}
|
||||
|
||||
bool HloRngInstruction::IsElementwiseImpl(
|
||||
const tensorflow::gtl::optional<int64>& operand_idx) const {
|
||||
const absl::optional<int64>& operand_idx) const {
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -225,7 +225,7 @@ class HloAllReduceInstruction : public HloInstruction {
|
||||
HloComputation* reduce_computation,
|
||||
tensorflow::gtl::ArraySlice<int64> replica_group_ids,
|
||||
tensorflow::StringPiece barrier,
|
||||
const tensorflow::gtl::optional<int64>& all_reduce_id);
|
||||
const absl::optional<int64>& all_reduce_id);
|
||||
|
||||
// Returns the group ids of each replica for CrossReplicaSum op.
|
||||
const std::vector<int64>& replica_group_ids() const {
|
||||
@ -241,9 +241,7 @@ class HloAllReduceInstruction : public HloInstruction {
|
||||
cross_replica_sum_barrier_ = barrier;
|
||||
}
|
||||
|
||||
tensorflow::gtl::optional<int64> all_reduce_id() const {
|
||||
return all_reduce_id_;
|
||||
}
|
||||
absl::optional<int64> all_reduce_id() const { return all_reduce_id_; }
|
||||
|
||||
// Returns a serialized representation of this instruction.
|
||||
HloInstructionProto ToProto() const override;
|
||||
@ -271,7 +269,7 @@ class HloAllReduceInstruction : public HloInstruction {
|
||||
// For Allreduce nodes from different modules, if they have the same
|
||||
// all_reduce_id, they will be 'Allreduce'd. If empty, Allreduce will not be
|
||||
// applied cross modules.
|
||||
tensorflow::gtl::optional<int64> all_reduce_id_;
|
||||
absl::optional<int64> all_reduce_id_;
|
||||
};
|
||||
|
||||
class HloAllToAllInstruction : public HloInstruction {
|
||||
@ -508,7 +506,7 @@ class HloMapInstruction : public HloInstruction {
|
||||
|
||||
private:
|
||||
bool IsElementwiseImpl(
|
||||
const tensorflow::gtl::optional<int64>& operand_idx) const override;
|
||||
const absl::optional<int64>& operand_idx) const override;
|
||||
std::vector<string> ExtraAttributesToStringImpl(
|
||||
const HloPrintOptions& options) const override;
|
||||
bool IdenticalSlowPath(
|
||||
@ -601,7 +599,7 @@ class HloConstantInstruction : public HloInstruction {
|
||||
|
||||
private:
|
||||
bool IsElementwiseImpl(
|
||||
const tensorflow::gtl::optional<int64>& operand_idx) const override;
|
||||
const absl::optional<int64>& operand_idx) const override;
|
||||
bool IdenticalSlowPath(
|
||||
const HloInstruction& other,
|
||||
const std::function<bool(const HloComputation*, const HloComputation*)>&
|
||||
@ -752,7 +750,7 @@ class HloFusionInstruction : public HloInstruction {
|
||||
bool add_output = false);
|
||||
|
||||
bool IsElementwiseImpl(
|
||||
const tensorflow::gtl::optional<int64>& operand_idx) const override;
|
||||
const absl::optional<int64>& operand_idx) const override;
|
||||
std::vector<string> ExtraAttributesToStringImpl(
|
||||
const HloPrintOptions& options) const override;
|
||||
bool IdenticalSlowPath(
|
||||
@ -781,7 +779,7 @@ class HloRngInstruction : public HloInstruction {
|
||||
|
||||
private:
|
||||
bool IsElementwiseImpl(
|
||||
const tensorflow::gtl::optional<int64>& operand_idx) const override;
|
||||
const absl::optional<int64>& operand_idx) const override;
|
||||
std::vector<string> ExtraAttributesToStringImpl(
|
||||
const HloPrintOptions& options) const override;
|
||||
bool IdenticalSlowPath(
|
||||
|
@ -17,10 +17,10 @@ limitations under the License.
|
||||
|
||||
#include <unordered_map>
|
||||
|
||||
#include "absl/types/optional.h"
|
||||
#include "tensorflow/compiler/xla/shape_util.h"
|
||||
#include "tensorflow/compiler/xla/statusor.h"
|
||||
#include "tensorflow/compiler/xla/util.h"
|
||||
#include "tensorflow/core/lib/gtl/optional.h"
|
||||
#include "tensorflow/core/lib/strings/numbers.h"
|
||||
#include "tensorflow/core/lib/strings/str_util.h"
|
||||
#include "tensorflow/core/platform/regexp.h"
|
||||
|
@ -16,10 +16,10 @@ limitations under the License.
|
||||
#ifndef TENSORFLOW_COMPILER_XLA_SERVICE_HLO_MATCHERS_H_
|
||||
#define TENSORFLOW_COMPILER_XLA_SERVICE_HLO_MATCHERS_H_
|
||||
|
||||
#include "absl/types/optional.h"
|
||||
#include "tensorflow/compiler/xla/service/hlo_instruction.h"
|
||||
#include "tensorflow/compiler/xla/service/hlo_parser.h"
|
||||
#include "tensorflow/compiler/xla/test.h"
|
||||
#include "tensorflow/core/lib/gtl/optional.h"
|
||||
|
||||
namespace xla {
|
||||
namespace testing {
|
||||
@ -120,8 +120,7 @@ class HloShapeAndLayoutMatcher
|
||||
class HloShardingMatcher
|
||||
: public ::testing::MatcherInterface<const HloInstruction*> {
|
||||
public:
|
||||
explicit HloShardingMatcher(
|
||||
const tensorflow::gtl::optional<HloSharding>& sharding)
|
||||
explicit HloShardingMatcher(const absl::optional<HloSharding>& sharding)
|
||||
: sharding_(sharding) {}
|
||||
|
||||
bool MatchAndExplain(const HloInstruction* instruction,
|
||||
@ -129,7 +128,7 @@ class HloShardingMatcher
|
||||
void DescribeTo(std::ostream* os) const override;
|
||||
|
||||
private:
|
||||
tensorflow::gtl::optional<HloSharding> sharding_;
|
||||
absl::optional<HloSharding> sharding_;
|
||||
};
|
||||
|
||||
// Matches a Dot HLO instruction with specific LHS and RHS contracting
|
||||
@ -337,7 +336,7 @@ inline ::testing::Matcher<const ::xla::HloInstruction*> Sharding(
|
||||
// Verifies that no HloSharding is set for an HLO instruction.
|
||||
inline ::testing::Matcher<const ::xla::HloInstruction*> NoSharding() {
|
||||
return ::testing::MakeMatcher(
|
||||
new ::xla::testing::HloShardingMatcher(tensorflow::gtl::nullopt));
|
||||
new ::xla::testing::HloShardingMatcher(absl::nullopt));
|
||||
}
|
||||
|
||||
inline ::testing::Matcher<const ::xla::HloInstruction*> Dot(
|
||||
|
@ -18,11 +18,11 @@ limitations under the License.
|
||||
|
||||
#include <string>
|
||||
|
||||
#include "absl/types/optional.h"
|
||||
#include "tensorflow/compiler/xla/service/computation_layout.h"
|
||||
#include "tensorflow/compiler/xla/types.h"
|
||||
#include "tensorflow/compiler/xla/xla.pb.h"
|
||||
#include "tensorflow/compiler/xla/xla_data.pb.h"
|
||||
#include "tensorflow/core/lib/gtl/optional.h"
|
||||
|
||||
namespace xla {
|
||||
|
||||
@ -104,7 +104,7 @@ class HloModuleConfig {
|
||||
private:
|
||||
// If you add new members, be sure to update compilation_cache_key.
|
||||
|
||||
tensorflow::gtl::optional<ComputationLayout> entry_computation_layout_;
|
||||
absl::optional<ComputationLayout> entry_computation_layout_;
|
||||
|
||||
// Whether this is a 'host module'.
|
||||
bool is_host_module_ = false;
|
||||
|
@ -271,15 +271,14 @@ int64 HloModuleGroupMetadata::GetModuleId(const HloModule* module) const {
|
||||
LOG(FATAL) << "unknown module";
|
||||
}
|
||||
|
||||
tensorflow::gtl::optional<int64> HloModuleGroupMetadata::GetInstructionDevice(
|
||||
absl::optional<int64> HloModuleGroupMetadata::GetInstructionDevice(
|
||||
const HloInstruction& instruction) const {
|
||||
// The module group metadata can be created in both "single module, multiple
|
||||
// devices" and "multiple modules, no explicit devices" fashions.
|
||||
// The API returns an optional even though the current implementation always
|
||||
// returns a device, to account for cases where we cannot guess a device.
|
||||
// In such cases the VerifyChannelInstructions() will return proper errors.
|
||||
tensorflow::gtl::optional<int64> device =
|
||||
instruction.sharding_unique_device();
|
||||
absl::optional<int64> device = instruction.sharding_unique_device();
|
||||
if (!device) {
|
||||
device = GetModuleId(instruction.parent()->parent());
|
||||
}
|
||||
|
@ -22,6 +22,7 @@ limitations under the License.
|
||||
#include <unordered_set>
|
||||
#include <vector>
|
||||
|
||||
#include "absl/types/optional.h"
|
||||
#include "tensorflow/compiler/xla/service/hlo_computation.h"
|
||||
#include "tensorflow/compiler/xla/service/hlo_instruction.h"
|
||||
#include "tensorflow/compiler/xla/service/hlo_module.h"
|
||||
@ -29,7 +30,6 @@ limitations under the License.
|
||||
#include "tensorflow/compiler/xla/statusor.h"
|
||||
#include "tensorflow/core/lib/core/status.h"
|
||||
#include "tensorflow/core/lib/gtl/flatmap.h"
|
||||
#include "tensorflow/core/lib/gtl/optional.h"
|
||||
#include "tensorflow/core/platform/types.h"
|
||||
|
||||
namespace xla {
|
||||
@ -159,7 +159,7 @@ class HloModuleGroupMetadata {
|
||||
// Retrieves the device an instruction is assigned to. Either from the
|
||||
// sharding information, or from the ordinal of the module the instruction
|
||||
// is in.
|
||||
tensorflow::gtl::optional<int64> GetInstructionDevice(
|
||||
absl::optional<int64> GetInstructionDevice(
|
||||
const HloInstruction& instruction) const;
|
||||
|
||||
// Returns the number of modules for devices (excluding the host module).
|
||||
|
@ -33,8 +33,8 @@ namespace xla {
|
||||
|
||||
namespace {
|
||||
|
||||
using ::absl::optional;
|
||||
using ::tensorflow::StringPiece;
|
||||
using ::tensorflow::gtl::optional;
|
||||
using ::tensorflow::str_util::Join;
|
||||
using ::tensorflow::str_util::Split;
|
||||
using ::tensorflow::str_util::SplitAndParseAsInts;
|
||||
|
@ -244,16 +244,16 @@ StatusOr<HloSharding> HloSharding::GetTupleSharding(const Shape& shape) const {
|
||||
return Tuple(ShapeTree<HloSharding>(shape, *this));
|
||||
}
|
||||
|
||||
tensorflow::gtl::optional<int64> HloSharding::UniqueDevice() const {
|
||||
absl::optional<int64> HloSharding::UniqueDevice() const {
|
||||
if (IsTuple()) {
|
||||
if (tuple_elements_.empty()) {
|
||||
return tensorflow::gtl::nullopt;
|
||||
return absl::nullopt;
|
||||
}
|
||||
tensorflow::gtl::optional<int64> unique_device;
|
||||
absl::optional<int64> unique_device;
|
||||
for (auto& tuple_sharding : tuple_elements_) {
|
||||
auto device = tuple_sharding.UniqueDevice();
|
||||
if (!device || (unique_device && *device != *unique_device)) {
|
||||
return tensorflow::gtl::nullopt;
|
||||
return absl::nullopt;
|
||||
}
|
||||
unique_device = device;
|
||||
}
|
||||
@ -262,7 +262,7 @@ tensorflow::gtl::optional<int64> HloSharding::UniqueDevice() const {
|
||||
if (!replicated_ && maximal_) {
|
||||
return static_cast<int64>(*tile_assignment_.begin());
|
||||
}
|
||||
return tensorflow::gtl::nullopt;
|
||||
return absl::nullopt;
|
||||
}
|
||||
|
||||
int64 HloSharding::GetUniqueDevice() const {
|
||||
@ -439,14 +439,13 @@ HloSharding HloSharding::GetSubSharding(const Shape& shape,
|
||||
: sub_shape_tree.element(ShapeIndex({}));
|
||||
}
|
||||
|
||||
tensorflow::gtl::optional<HloSharding> HloSharding::ExtractSingleSharding()
|
||||
const {
|
||||
absl::optional<HloSharding> HloSharding::ExtractSingleSharding() const {
|
||||
if (!IsTuple()) {
|
||||
return *this;
|
||||
}
|
||||
for (int64 i = 1; i < tuple_elements_.size(); ++i) {
|
||||
if (tuple_elements_[0] != tuple_elements_[i]) {
|
||||
return tensorflow::gtl::optional<HloSharding>();
|
||||
return absl::optional<HloSharding>();
|
||||
}
|
||||
}
|
||||
return tuple_elements_.front();
|
||||
|
@ -151,7 +151,7 @@ class HloSharding {
|
||||
// span a single device, the return value will be empty.
|
||||
// In order for a sharding to span a single device, every leaf sharding must
|
||||
// be maximal and not replicated, and the used device must match.
|
||||
tensorflow::gtl::optional<int64> UniqueDevice() const;
|
||||
absl::optional<int64> UniqueDevice() const;
|
||||
|
||||
// Retrieves the unique device or fails with a CHECK.
|
||||
int64 GetUniqueDevice() const;
|
||||
@ -182,7 +182,7 @@ class HloSharding {
|
||||
// be returned. If it is a tuple, and all the tuple elements are common, the
|
||||
// common element will be returned. Otherwise the optional will contain no
|
||||
// value.
|
||||
tensorflow::gtl::optional<HloSharding> ExtractSingleSharding() const;
|
||||
absl::optional<HloSharding> ExtractSingleSharding() const;
|
||||
|
||||
bool operator==(const HloSharding& other) const {
|
||||
return replicated_ == other.replicated_ && maximal_ == other.maximal_ &&
|
||||
|
@ -14,13 +14,14 @@ limitations under the License.
|
||||
==============================================================================*/
|
||||
|
||||
#include "tensorflow/compiler/xla/service/indexed_array_analysis.h"
|
||||
|
||||
#include "absl/algorithm/container.h"
|
||||
#include "absl/container/inlined_vector.h"
|
||||
#include "absl/types/optional.h"
|
||||
#include "tensorflow/compiler/xla/map_util.h"
|
||||
#include "tensorflow/compiler/xla/service/hlo_evaluator.h"
|
||||
#include "tensorflow/compiler/xla/util.h"
|
||||
#include "tensorflow/core/lib/gtl/flatset.h"
|
||||
#include "tensorflow/core/lib/gtl/optional.h"
|
||||
#include "tensorflow/core/lib/strings/strcat.h"
|
||||
|
||||
namespace xla {
|
||||
@ -971,15 +972,15 @@ namespace {
|
||||
|
||||
// Returns the non-contracting non-batch dimension (as per `contracting_dims`
|
||||
// and `batch_dims`) if there is exactly one, otherwise returns nullopt.
|
||||
gtl::optional<int64> GetOnlyNonContractingNonBatchDim(
|
||||
absl::optional<int64> GetOnlyNonContractingNonBatchDim(
|
||||
int64 rank, ArraySlice<int64> contracting_dims,
|
||||
ArraySlice<int64> batch_dims) {
|
||||
gtl::optional<int64> result;
|
||||
absl::optional<int64> result;
|
||||
for (int64 dim = 0; dim < rank; dim++) {
|
||||
if (!ArrayContains(contracting_dims, dim) &&
|
||||
!ArrayContains(batch_dims, dim)) {
|
||||
if (result.has_value()) {
|
||||
return gtl::nullopt;
|
||||
return absl::nullopt;
|
||||
}
|
||||
result = dim;
|
||||
}
|
||||
@ -999,7 +1000,7 @@ bool CanFoldDotIntoIndexedArray(
|
||||
tensorflow::StringPiece tag,
|
||||
Analysis::ScalarIndexedConstantArray* indexed_array,
|
||||
ArraySlice<int64> contracting_dims, ArraySlice<int64> batch_dims) {
|
||||
gtl::optional<int64> non_contracting_non_batch_dim =
|
||||
absl::optional<int64> non_contracting_non_batch_dim =
|
||||
GetOnlyNonContractingNonBatchDim(ShapeUtil::Rank(indexed_array->shape()),
|
||||
contracting_dims, batch_dims);
|
||||
if (!non_contracting_non_batch_dim.has_value()) {
|
||||
|
@ -134,9 +134,7 @@ cc_library(
|
||||
":llvm_util",
|
||||
"//tensorflow/compiler/xla:shape_util",
|
||||
"//tensorflow/compiler/xla:statusor",
|
||||
"//tensorflow/compiler/xla:types",
|
||||
"//tensorflow/compiler/xla:util",
|
||||
"//tensorflow/compiler/xla:xla_data_proto",
|
||||
"//tensorflow/compiler/xla/service:hlo",
|
||||
"//tensorflow/core:lib",
|
||||
"@llvm//:core",
|
||||
@ -194,6 +192,7 @@ cc_library(
|
||||
"//tensorflow/compiler/xla/service/gpu:parallel_loop_emitter",
|
||||
"//tensorflow/compiler/xla/service/gpu:partition_assignment",
|
||||
"//tensorflow/core:lib",
|
||||
"@com_google_absl//absl/types:optional",
|
||||
"@llvm//:core",
|
||||
],
|
||||
)
|
||||
|
@ -55,10 +55,10 @@ Shape MergeDimensions(tensorflow::gtl::ArraySlice<size_t> segs,
|
||||
}
|
||||
} // namespace
|
||||
|
||||
tensorflow::gtl::optional<std::vector<int64> > FindTranspose021(
|
||||
const Shape& a, const Shape& b) {
|
||||
absl::optional<std::vector<int64> > FindTranspose021(const Shape& a,
|
||||
const Shape& b) {
|
||||
if (!ShapeUtil::CompatibleIgnoringElementType(a, b)) {
|
||||
return tensorflow::gtl::nullopt;
|
||||
return absl::nullopt;
|
||||
}
|
||||
|
||||
std::vector<int64> perm(a.dimensions().size());
|
||||
@ -88,7 +88,7 @@ tensorflow::gtl::optional<std::vector<int64> > FindTranspose021(
|
||||
return dims_021;
|
||||
}
|
||||
|
||||
return tensorflow::gtl::nullopt;
|
||||
return absl::nullopt;
|
||||
}
|
||||
|
||||
IrArray::Index GetUnreducedOutputIndex(
|
||||
|
@ -36,8 +36,8 @@ namespace llvm_ir {
|
||||
|
||||
// If `b` is a 0-2-1 transpose of `a` in 0-1-2, return the dimensions for the
|
||||
// reduced shape of `b` or the 0-2-1 shape.
|
||||
tensorflow::gtl::optional<std::vector<int64> > FindTranspose021(const Shape& a,
|
||||
const Shape& b);
|
||||
absl::optional<std::vector<int64> > FindTranspose021(const Shape& a,
|
||||
const Shape& b);
|
||||
|
||||
// Return the unreduced output index corresponding to the given reduced output
|
||||
// index.
|
||||
|
@ -16,6 +16,7 @@ limitations under the License.
|
||||
#include "tensorflow/compiler/xla/service/llvm_ir/sort_util.h"
|
||||
|
||||
// IWYU pragma: no_include "llvm/IR/Intrinsics.gen.inc"
|
||||
#include "absl/types/optional.h"
|
||||
#include "llvm/IR/BasicBlock.h"
|
||||
#include "llvm/IR/Constants.h"
|
||||
#include "llvm/IR/Instructions.h"
|
||||
@ -30,7 +31,6 @@ limitations under the License.
|
||||
#include "tensorflow/compiler/xla/shape_util.h"
|
||||
#include "tensorflow/core/lib/core/status.h"
|
||||
#include "tensorflow/core/lib/core/stringpiece.h"
|
||||
#include "tensorflow/core/lib/gtl/optional.h"
|
||||
#include "tensorflow/core/platform/types.h"
|
||||
|
||||
namespace xla {
|
||||
@ -42,7 +42,7 @@ namespace {
|
||||
void EmitCompareLoop(int64 dimension_to_sort, const IrArray::Index& keys_index,
|
||||
const IrArray::Index& compare_keys_index,
|
||||
const IrArray& keys_array,
|
||||
const tensorflow::gtl::optional<IrArray>& values_array,
|
||||
const absl::optional<IrArray>& values_array,
|
||||
llvm::IRBuilder<>* b) {
|
||||
// if (is_smaller_index &&
|
||||
// compare_keys[dimension_to_sort] < dimension_to_sort_bound)
|
||||
@ -87,7 +87,7 @@ void EmitCompareLoop(int64 dimension_to_sort, const IrArray::Index& keys_index,
|
||||
} // namespace
|
||||
|
||||
Status EmitSortInPlace(int64 dimension_to_sort, const IrArray& keys_array,
|
||||
const tensorflow::gtl::optional<IrArray>& values_array,
|
||||
const absl::optional<IrArray>& values_array,
|
||||
tensorflow::StringPiece name, llvm::Value* xor_mask,
|
||||
llvm::IRBuilder<>* b,
|
||||
const gpu::LaunchDimensions* launch_dimensions) {
|
||||
|
@ -16,12 +16,12 @@ limitations under the License.
|
||||
#ifndef TENSORFLOW_COMPILER_XLA_SERVICE_LLVM_IR_SORT_UTIL_H_
|
||||
#define TENSORFLOW_COMPILER_XLA_SERVICE_LLVM_IR_SORT_UTIL_H_
|
||||
|
||||
#include "absl/types/optional.h"
|
||||
#include "llvm/IR/Value.h"
|
||||
#include "tensorflow/compiler/xla/service/gpu/partition_assignment.h"
|
||||
#include "tensorflow/compiler/xla/service/llvm_ir/ir_array.h"
|
||||
#include "tensorflow/core/lib/core/status.h"
|
||||
#include "tensorflow/core/lib/core/stringpiece.h"
|
||||
#include "tensorflow/core/lib/gtl/optional.h"
|
||||
#include "tensorflow/core/platform/types.h"
|
||||
|
||||
namespace xla {
|
||||
@ -31,7 +31,7 @@ namespace llvm_ir {
|
||||
// implements the inner loop of BitonicSort. If 'launch_dimensions' is nullptr,
|
||||
// the inner compare loop will not be parallelized.
|
||||
Status EmitSortInPlace(int64 dimension_to_sort, const IrArray& keys_array,
|
||||
const tensorflow::gtl::optional<IrArray>& values_array,
|
||||
const absl::optional<IrArray>& values_array,
|
||||
tensorflow::StringPiece name, llvm::Value* xor_mask,
|
||||
llvm::IRBuilder<>* b,
|
||||
const gpu::LaunchDimensions* launch_dimensions);
|
||||
|
@ -73,7 +73,7 @@ namespace {
|
||||
// If the parameter number is invalid for this computation, nullopt is
|
||||
// returned. When the return value has_value(), nullptr will never be
|
||||
// the held value.
|
||||
tensorflow::gtl::optional<const OpMetadata*> ParameterMetadata(
|
||||
absl::optional<const OpMetadata*> ParameterMetadata(
|
||||
const XlaComputation& computation, int parameter_number) {
|
||||
for (const HloComputationProto& comp : computation.proto().computations()) {
|
||||
if (comp.id() == computation.proto().entry_computation_id()) {
|
||||
@ -81,14 +81,14 @@ tensorflow::gtl::optional<const OpMetadata*> ParameterMetadata(
|
||||
if (instr.opcode() == HloOpcodeString(HloOpcode::kParameter) &&
|
||||
instr.parameter_number() == parameter_number) {
|
||||
if (!instr.has_metadata()) {
|
||||
return tensorflow::gtl::nullopt;
|
||||
return absl::nullopt;
|
||||
}
|
||||
return &instr.metadata();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return tensorflow::gtl::nullopt;
|
||||
return absl::nullopt;
|
||||
}
|
||||
|
||||
ExecutionOptions CreateExecutionOptions(
|
||||
@ -158,7 +158,7 @@ StatusOr<std::unique_ptr<Executable>> LocalService::CompileExecutable(
|
||||
TF_RETURN_IF_ERROR(
|
||||
ShapeUtil::ValidateShapeWithOptionalLayout(argument_shape));
|
||||
if (!ShapeUtil::Compatible(argument_shape, program_shape.parameters(i))) {
|
||||
tensorflow::gtl::optional<const OpMetadata*> metadata =
|
||||
absl::optional<const OpMetadata*> metadata =
|
||||
ParameterMetadata(computation, /*parameter_number=*/i);
|
||||
auto metadata_string = [&metadata]() -> string {
|
||||
if (!metadata.has_value()) {
|
||||
|
@ -18,8 +18,8 @@ limitations under the License.
|
||||
|
||||
namespace xla {
|
||||
|
||||
using tensorflow::gtl::nullopt;
|
||||
using tensorflow::gtl::optional;
|
||||
using absl::nullopt;
|
||||
using absl::optional;
|
||||
|
||||
// Finds and returns the non-constant operand in instr.
|
||||
//
|
||||
|
@ -16,8 +16,8 @@ limitations under the License.
|
||||
#ifndef TENSORFLOW_COMPILER_XLA_SERVICE_WHILE_LOOP_ANALYSIS_H_
|
||||
#define TENSORFLOW_COMPILER_XLA_SERVICE_WHILE_LOOP_ANALYSIS_H_
|
||||
|
||||
#include "absl/types/optional.h"
|
||||
#include "tensorflow/compiler/xla/service/hlo_instruction.h"
|
||||
#include "tensorflow/core/lib/gtl/optional.h"
|
||||
|
||||
namespace xla {
|
||||
|
||||
@ -25,8 +25,8 @@ namespace xla {
|
||||
// nullopt otherwise. max_value_returned limits the number of steps that are
|
||||
// evaluated while trying to brute force a loop trip count, trip counts larger
|
||||
// than max_value_returned result in nullopt.
|
||||
tensorflow::gtl::optional<int64> ComputeWhileLoopTripCount(
|
||||
HloInstruction *while_op, int64 max_value_returned = 128);
|
||||
absl::optional<int64> ComputeWhileLoopTripCount(HloInstruction *while_op,
|
||||
int64 max_value_returned = 128);
|
||||
|
||||
} // namespace xla
|
||||
|
||||
|
@ -14,17 +14,16 @@ limitations under the License.
|
||||
==============================================================================*/
|
||||
|
||||
#include "tensorflow/compiler/xla/service/while_loop_simplifier.h"
|
||||
#include "absl/types/optional.h"
|
||||
#include "tensorflow/compiler/xla/service/call_inliner.h"
|
||||
#include "tensorflow/compiler/xla/service/while_loop_analysis.h"
|
||||
#include "tensorflow/core/lib/gtl/flatmap.h"
|
||||
#include "tensorflow/core/lib/gtl/optional.h"
|
||||
#include "tensorflow/core/lib/strings/str_util.h"
|
||||
#include "tensorflow/core/lib/strings/strcat.h"
|
||||
|
||||
namespace xla {
|
||||
|
||||
using tensorflow::gtl::nullopt;
|
||||
using tensorflow::gtl::optional;
|
||||
using absl::optional;
|
||||
|
||||
// Determines whether the given instruction is a send/recv node, or has a
|
||||
// subcomputation which contains a send/recv node.
|
||||
|
@ -22,6 +22,7 @@ limitations under the License.
|
||||
#include <vector>
|
||||
|
||||
#include "absl/memory/memory.h"
|
||||
#include "absl/types/optional.h"
|
||||
#include "tensorflow/compiler/xla/layout_util.h"
|
||||
#include "tensorflow/compiler/xla/shape_util.h"
|
||||
#include "tensorflow/compiler/xla/status_macros.h"
|
||||
@ -30,7 +31,6 @@ limitations under the License.
|
||||
#include "tensorflow/core/lib/core/status.h"
|
||||
#include "tensorflow/core/lib/gtl/array_slice.h"
|
||||
#include "tensorflow/core/lib/gtl/iterator_range.h"
|
||||
#include "tensorflow/core/lib/gtl/optional.h"
|
||||
#include "tensorflow/core/platform/logging.h"
|
||||
#include "tensorflow/core/platform/types.h"
|
||||
|
||||
|
@ -22,6 +22,7 @@ limitations under the License.
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
#include "absl/types/optional.h"
|
||||
#include "tensorflow/compiler/xla/index_util.h"
|
||||
#include "tensorflow/compiler/xla/layout_util.h"
|
||||
#include "tensorflow/compiler/xla/overflow_util.h"
|
||||
@ -32,7 +33,6 @@ limitations under the License.
|
||||
#include "tensorflow/core/lib/core/errors.h"
|
||||
#include "tensorflow/core/lib/core/stringpiece.h"
|
||||
#include "tensorflow/core/lib/gtl/iterator_range.h"
|
||||
#include "tensorflow/core/lib/gtl/optional.h"
|
||||
#include "tensorflow/core/lib/hash/hash.h"
|
||||
#include "tensorflow/core/lib/strings/numbers.h"
|
||||
#include "tensorflow/core/lib/strings/str_util.h"
|
||||
@ -1460,7 +1460,7 @@ ShapeUtil::DimensionsUnmodifiedByReshape(const Shape& input_shape,
|
||||
check_input_unit_indices(output_shape, input_shape);
|
||||
}
|
||||
|
||||
/* static */ tensorflow::gtl::optional<Shape> ShapeUtil::AlignLayouts(
|
||||
/* static */ absl::optional<Shape> ShapeUtil::AlignLayouts(
|
||||
const Shape& input_shape, const Shape& output_shape) {
|
||||
CHECK(IsArray(input_shape));
|
||||
CHECK(IsArray(output_shape));
|
||||
@ -1499,7 +1499,7 @@ ShapeUtil::DimensionsUnmodifiedByReshape(const Shape& input_shape,
|
||||
if (input_dimension_product < output_dimension_product ||
|
||||
j == output_rank) {
|
||||
if (i == input_rank) {
|
||||
return tensorflow::gtl::nullopt;
|
||||
return absl::nullopt;
|
||||
}
|
||||
dimension_to_alignment_index[i] = alignment.size() - 1;
|
||||
input_dimension_product *= input_shape.dimensions(i);
|
||||
@ -1510,7 +1510,7 @@ ShapeUtil::DimensionsUnmodifiedByReshape(const Shape& input_shape,
|
||||
}
|
||||
}
|
||||
if (input_dimension_product != output_dimension_product) {
|
||||
return tensorflow::gtl::nullopt;
|
||||
return absl::nullopt;
|
||||
}
|
||||
// We also need to store an end element so that we know where the last
|
||||
// alignment part ends.
|
||||
@ -1554,7 +1554,7 @@ ShapeUtil::DimensionsUnmodifiedByReshape(const Shape& input_shape,
|
||||
for (int64 j = 0; j < num_non_trivial_dimensions_in_alignment_part;
|
||||
++i, ++j) {
|
||||
if (i == input_rank) {
|
||||
return tensorflow::gtl::nullopt;
|
||||
return absl::nullopt;
|
||||
}
|
||||
// Skip trivial dimensions with a bound of 1.
|
||||
if (input_shape.dimensions(input_dimension_numbers[i]) == 1) {
|
||||
@ -1567,7 +1567,7 @@ ShapeUtil::DimensionsUnmodifiedByReshape(const Shape& input_shape,
|
||||
if (dimension_to_alignment_index[input_dimension_numbers[i]] !=
|
||||
current_alignment_index ||
|
||||
input_dimension_numbers[i] > current_dimension_number) {
|
||||
return tensorflow::gtl::nullopt;
|
||||
return absl::nullopt;
|
||||
}
|
||||
current_dimension_number = input_dimension_numbers[i];
|
||||
}
|
||||
|
@ -23,6 +23,7 @@ limitations under the License.
|
||||
#include <string>
|
||||
|
||||
#include "absl/container/inlined_vector.h"
|
||||
#include "absl/types/optional.h"
|
||||
#include "tensorflow/compiler/xla/layout_util.h"
|
||||
#include "tensorflow/compiler/xla/primitive_util.h"
|
||||
#include "tensorflow/compiler/xla/status_macros.h"
|
||||
@ -32,7 +33,6 @@ limitations under the License.
|
||||
#include "tensorflow/compiler/xla/xla_data.pb.h"
|
||||
#include "tensorflow/core/lib/core/threadpool.h"
|
||||
#include "tensorflow/core/lib/gtl/array_slice.h"
|
||||
#include "tensorflow/core/lib/gtl/optional.h"
|
||||
#include "tensorflow/core/platform/cpu_info.h"
|
||||
#include "tensorflow/core/platform/env.h"
|
||||
#include "tensorflow/core/platform/macros.h"
|
||||
@ -597,8 +597,8 @@ class ShapeUtil {
|
||||
// layout). The layout of 'input_shape' is kept fixed. Returns
|
||||
// 'output_shape_with_layout' if such a layout can be found, and an error
|
||||
// otherwise.
|
||||
static tensorflow::gtl::optional<Shape> AlignLayouts(
|
||||
const Shape& input_shape, const Shape& output_shape);
|
||||
static absl::optional<Shape> AlignLayouts(const Shape& input_shape,
|
||||
const Shape& output_shape);
|
||||
|
||||
// Returns a shape with the given dimension deleted.
|
||||
// For example:
|
||||
@ -737,13 +737,13 @@ class ShapeUtil {
|
||||
int64 n = -1;
|
||||
std::vector<int64> indexes(base.begin(), base.end());
|
||||
const int kNumThreads = tensorflow::port::NumSchedulableCPUs();
|
||||
tensorflow::gtl::optional<tensorflow::thread::ThreadPool> pool;
|
||||
absl::optional<tensorflow::thread::ThreadPool> pool;
|
||||
if (parallel) {
|
||||
pool.emplace(tensorflow::Env::Default(), "foreach", kNumThreads);
|
||||
}
|
||||
|
||||
while (n < rank) {
|
||||
if (pool != tensorflow::gtl::nullopt) {
|
||||
if (pool != absl::nullopt) {
|
||||
pool->Schedule(
|
||||
[indexes, &visitor_function] { visitor_function(indexes); });
|
||||
} else {
|
||||
|
@ -98,6 +98,7 @@ cc_library(
|
||||
"//tensorflow/compiler/xla:xla_data_proto",
|
||||
"//tensorflow/core:lib",
|
||||
"//tensorflow/core:test",
|
||||
"@com_google_absl//absl/types:optional",
|
||||
],
|
||||
)
|
||||
|
||||
@ -128,6 +129,7 @@ cc_library(
|
||||
"//tensorflow/core:test",
|
||||
"@com_google_absl//absl/algorithm:container",
|
||||
"@com_google_absl//absl/memory",
|
||||
"@com_google_absl//absl/types:optional",
|
||||
],
|
||||
)
|
||||
|
||||
@ -719,10 +721,8 @@ xla_test(
|
||||
deps = [
|
||||
":client_library_test_base",
|
||||
":hlo_test_base",
|
||||
"//tensorflow/compiler/xla:execution_options_util",
|
||||
"//tensorflow/compiler/xla:status_macros",
|
||||
"//tensorflow/compiler/xla:test",
|
||||
"//tensorflow/compiler/xla/client:xla_builder",
|
||||
"//tensorflow/compiler/xla/service:hlo_parser",
|
||||
"//tensorflow/compiler/xla/tests:xla_internal_test_main",
|
||||
],
|
||||
@ -1892,7 +1892,6 @@ xla_test(
|
||||
"//tensorflow/compiler/xla:statusor",
|
||||
"//tensorflow/compiler/xla/client:local_client",
|
||||
"//tensorflow/compiler/xla/client:xla_builder",
|
||||
"//tensorflow/compiler/xla/client:xla_computation",
|
||||
"//tensorflow/compiler/xla/service:local_service",
|
||||
"//tensorflow/compiler/xla/service:shaped_buffer",
|
||||
"//tensorflow/compiler/xla/tests:literal_test_util",
|
||||
@ -1900,6 +1899,7 @@ xla_test(
|
||||
"//tensorflow/compiler/xla/tests:xla_internal_test_main",
|
||||
"//tensorflow/core:lib",
|
||||
"//tensorflow/core:test",
|
||||
"@com_google_absl//absl/types:optional",
|
||||
],
|
||||
)
|
||||
|
||||
@ -2067,6 +2067,7 @@ xla_test(
|
||||
"//tensorflow/compiler/xla/tests:literal_test_util",
|
||||
"//tensorflow/compiler/xla/tests:xla_internal_test_main",
|
||||
"//tensorflow/core:lib",
|
||||
"@com_google_absl//absl/types:optional",
|
||||
],
|
||||
)
|
||||
|
||||
|
@ -25,7 +25,7 @@ limitations under the License.
|
||||
namespace xla {
|
||||
namespace {
|
||||
|
||||
using tensorflow::gtl::nullopt;
|
||||
using absl::nullopt;
|
||||
|
||||
class GatherOperationTest : public HloTestBase {
|
||||
protected:
|
||||
|
@ -42,9 +42,9 @@ namespace xla {
|
||||
|
||||
namespace {
|
||||
|
||||
using absl::optional;
|
||||
using tensorflow::StringPiece;
|
||||
using tensorflow::gtl::ArraySlice;
|
||||
using tensorflow::gtl::optional;
|
||||
|
||||
constexpr char kInterpreter[] = "interpreter";
|
||||
|
||||
@ -239,8 +239,7 @@ StatusOr<::testing::AssertionResult> HloTestBase::RunAndCompareInternal(
|
||||
}
|
||||
|
||||
::testing::AssertionResult HloTestBase::RunAndCompare(
|
||||
const StringPiece hlo_string,
|
||||
const tensorflow::gtl::optional<ErrorSpec>& error,
|
||||
const StringPiece hlo_string, const absl::optional<ErrorSpec>& error,
|
||||
const std::function<void(HloModule*)>& reference_preprocessor) {
|
||||
auto module_or_status =
|
||||
HloRunner::CreateModuleFromString(hlo_string, GetDebugOptionsForTest());
|
||||
@ -277,7 +276,7 @@ StatusOr<::testing::AssertionResult> HloTestBase::RunAndCompareInternal(
|
||||
}
|
||||
|
||||
::testing::AssertionResult HloTestBase::RunAndCompareFromFile(
|
||||
const string& filename, const tensorflow::gtl::optional<ErrorSpec>& error,
|
||||
const string& filename, const absl::optional<ErrorSpec>& error,
|
||||
const std::function<void(HloModule*)>& reference_preprocessor) {
|
||||
auto module_or_status =
|
||||
HloRunner::ReadModuleFromHloTextFile(filename, GetDebugOptionsForTest());
|
||||
@ -290,8 +289,7 @@ StatusOr<::testing::AssertionResult> HloTestBase::RunAndCompareInternal(
|
||||
}
|
||||
|
||||
::testing::AssertionResult HloTestBase::RunAndCompareNoHloPasses(
|
||||
const StringPiece hlo_string,
|
||||
const tensorflow::gtl::optional<ErrorSpec>& error,
|
||||
const StringPiece hlo_string, const absl::optional<ErrorSpec>& error,
|
||||
const std::function<void(HloModule*)>& reference_preprocessor) {
|
||||
auto module_or_status =
|
||||
HloRunner::CreateModuleFromString(hlo_string, GetDebugOptionsForTest());
|
||||
@ -305,7 +303,7 @@ StatusOr<::testing::AssertionResult> HloTestBase::RunAndCompareInternal(
|
||||
}
|
||||
|
||||
::testing::AssertionResult HloTestBase::RunAndCompareNoHloPassesFromFile(
|
||||
const string& filename, const tensorflow::gtl::optional<ErrorSpec>& error,
|
||||
const string& filename, const absl::optional<ErrorSpec>& error,
|
||||
const std::function<void(HloModule*)>& reference_preprocessor) {
|
||||
auto module_or_status =
|
||||
HloRunner::ReadModuleFromHloTextFile(filename, GetDebugOptionsForTest());
|
||||
|
@ -20,6 +20,7 @@ limitations under the License.
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
#include "absl/types/optional.h"
|
||||
#include "tensorflow/compiler/xla/service/backend.h"
|
||||
#include "tensorflow/compiler/xla/service/computation_layout.h"
|
||||
#include "tensorflow/compiler/xla/service/hlo_module.h"
|
||||
@ -32,7 +33,6 @@ limitations under the License.
|
||||
#include "tensorflow/compiler/xla/types.h"
|
||||
#include "tensorflow/compiler/xla/xla_data.pb.h"
|
||||
#include "tensorflow/core/lib/gtl/array_slice.h"
|
||||
#include "tensorflow/core/lib/gtl/optional.h"
|
||||
#include "tensorflow/core/platform/stream_executor_no_cuda.h"
|
||||
#include "tensorflow/core/platform/test.h"
|
||||
|
||||
@ -139,7 +139,7 @@ class HloTestBase : public ::testing::Test {
|
||||
::testing::AssertionResult RunAndCompare(
|
||||
std::unique_ptr<HloModule> module,
|
||||
const tensorflow::gtl::ArraySlice<Literal*> arguments,
|
||||
const tensorflow::gtl::optional<ErrorSpec>& error,
|
||||
const absl::optional<ErrorSpec>& error,
|
||||
const std::function<void(HloModule*)>& reference_preprocessor = nullptr)
|
||||
TF_MUST_USE_RESULT;
|
||||
|
||||
@ -148,22 +148,20 @@ class HloTestBase : public ::testing::Test {
|
||||
::testing::AssertionResult RunAndCompareNoHloPasses(
|
||||
std::unique_ptr<HloModule> module,
|
||||
const tensorflow::gtl::ArraySlice<Literal*> arguments,
|
||||
const tensorflow::gtl::optional<ErrorSpec>& error,
|
||||
const absl::optional<ErrorSpec>& error,
|
||||
const std::function<void(HloModule*)>& reference_preprocessor = nullptr)
|
||||
TF_MUST_USE_RESULT;
|
||||
|
||||
// Executes an hlo module with fake inputs and compares the results.
|
||||
::testing::AssertionResult RunAndCompare(
|
||||
std::unique_ptr<HloModule> module,
|
||||
const tensorflow::gtl::optional<ErrorSpec>& error,
|
||||
std::unique_ptr<HloModule> module, const absl::optional<ErrorSpec>& error,
|
||||
const std::function<void(HloModule*)>& reference_preprocessor = nullptr)
|
||||
TF_MUST_USE_RESULT;
|
||||
|
||||
// Same as above, except that the module will be executed without Hlo
|
||||
// optimization.
|
||||
::testing::AssertionResult RunAndCompareNoHloPasses(
|
||||
std::unique_ptr<HloModule> module,
|
||||
const tensorflow::gtl::optional<ErrorSpec>& error,
|
||||
std::unique_ptr<HloModule> module, const absl::optional<ErrorSpec>& error,
|
||||
const std::function<void(HloModule*)>& reference_preprocessor = nullptr)
|
||||
TF_MUST_USE_RESULT;
|
||||
|
||||
@ -172,22 +170,22 @@ class HloTestBase : public ::testing::Test {
|
||||
// or loaded from a file.
|
||||
::testing::AssertionResult RunAndCompare(
|
||||
const tensorflow::StringPiece hlo_string,
|
||||
const tensorflow::gtl::optional<ErrorSpec>& error,
|
||||
const absl::optional<ErrorSpec>& error,
|
||||
const std::function<void(HloModule*)>& reference_preprocessor = nullptr)
|
||||
TF_MUST_USE_RESULT;
|
||||
::testing::AssertionResult Run(const tensorflow::StringPiece hlo_string)
|
||||
TF_MUST_USE_RESULT;
|
||||
::testing::AssertionResult RunAndCompareFromFile(
|
||||
const string& filename, const tensorflow::gtl::optional<ErrorSpec>& error,
|
||||
const string& filename, const absl::optional<ErrorSpec>& error,
|
||||
const std::function<void(HloModule*)>& reference_preprocessor = nullptr)
|
||||
TF_MUST_USE_RESULT;
|
||||
::testing::AssertionResult RunAndCompareNoHloPasses(
|
||||
const tensorflow::StringPiece hlo_string,
|
||||
const tensorflow::gtl::optional<ErrorSpec>& error,
|
||||
const absl::optional<ErrorSpec>& error,
|
||||
const std::function<void(HloModule*)>& reference_preprocessor = nullptr)
|
||||
TF_MUST_USE_RESULT;
|
||||
::testing::AssertionResult RunAndCompareNoHloPassesFromFile(
|
||||
const string& filename, const tensorflow::gtl::optional<ErrorSpec>& error,
|
||||
const string& filename, const absl::optional<ErrorSpec>& error,
|
||||
const std::function<void(HloModule*)>& reference_preprocessor = nullptr)
|
||||
TF_MUST_USE_RESULT;
|
||||
|
||||
@ -264,7 +262,7 @@ class HloTestBase : public ::testing::Test {
|
||||
StatusOr<::testing::AssertionResult> RunAndCompareInternal(
|
||||
std::unique_ptr<HloModule> module,
|
||||
const tensorflow::gtl::ArraySlice<Literal*> arguments,
|
||||
const tensorflow::gtl::optional<ErrorSpec>& error, bool run_hlo_passes,
|
||||
const absl::optional<ErrorSpec>& error, bool run_hlo_passes,
|
||||
const std::function<void(HloModule*)>& reference_preprocessor);
|
||||
};
|
||||
|
||||
|
@ -94,7 +94,7 @@ void OnMiscompare(const LiteralSlice& expected, const LiteralSlice& actual,
|
||||
|
||||
/* static */ ::testing::AssertionResult LiteralTestUtil::NearOrEqual(
|
||||
const LiteralSlice& expected, const LiteralSlice& actual,
|
||||
const tensorflow::gtl::optional<ErrorSpec>& error) {
|
||||
const absl::optional<ErrorSpec>& error) {
|
||||
if (error.has_value()) {
|
||||
VLOG(1) << "Expects near";
|
||||
return StatusToAssertion(literal_comparison::Near(
|
||||
|
@ -21,6 +21,7 @@ limitations under the License.
|
||||
#include <random>
|
||||
#include <string>
|
||||
|
||||
#include "absl/types/optional.h"
|
||||
#include "tensorflow/compiler/xla/array2d.h"
|
||||
#include "tensorflow/compiler/xla/array3d.h"
|
||||
#include "tensorflow/compiler/xla/array4d.h"
|
||||
@ -33,7 +34,6 @@ limitations under the License.
|
||||
#include "tensorflow/compiler/xla/xla_data.pb.h"
|
||||
#include "tensorflow/core/lib/core/errors.h"
|
||||
#include "tensorflow/core/lib/gtl/array_slice.h"
|
||||
#include "tensorflow/core/lib/gtl/optional.h"
|
||||
#include "tensorflow/core/platform/macros.h"
|
||||
#include "tensorflow/core/platform/test.h"
|
||||
#include "tensorflow/core/platform/types.h"
|
||||
@ -146,7 +146,7 @@ class LiteralTestUtil {
|
||||
// will be compared recursively.
|
||||
static ::testing::AssertionResult NearOrEqual(
|
||||
const LiteralSlice& expected, const LiteralSlice& actual,
|
||||
const tensorflow::gtl::optional<ErrorSpec>& error) TF_MUST_USE_RESULT;
|
||||
const absl::optional<ErrorSpec>& error) TF_MUST_USE_RESULT;
|
||||
|
||||
private:
|
||||
TF_DISALLOW_COPY_AND_ASSIGN(LiteralTestUtil);
|
||||
|
@ -15,6 +15,7 @@ limitations under the License.
|
||||
|
||||
#include <memory>
|
||||
|
||||
#include "absl/types/optional.h"
|
||||
#include "tensorflow/compiler/xla/client/local_client.h"
|
||||
#include "tensorflow/compiler/xla/client/xla_builder.h"
|
||||
#include "tensorflow/compiler/xla/literal.h"
|
||||
@ -24,7 +25,6 @@ limitations under the License.
|
||||
#include "tensorflow/compiler/xla/tests/literal_test_util.h"
|
||||
#include "tensorflow/compiler/xla/tests/local_client_test_base.h"
|
||||
#include "tensorflow/compiler/xla/tests/test_macros.h"
|
||||
#include "tensorflow/core/lib/gtl/optional.h"
|
||||
#include "tensorflow/core/platform/test.h"
|
||||
#include "tensorflow/core/platform/types.h"
|
||||
|
||||
@ -53,7 +53,7 @@ XLA_TEST_F(LocalClientAllocationTest, AddVectors) {
|
||||
// deallocation happen on the right allocator.
|
||||
ExecutableRunOptions options;
|
||||
options.set_allocator(allocator);
|
||||
tensorflow::gtl::optional<ScopedShapedBuffer> result =
|
||||
absl::optional<ScopedShapedBuffer> result =
|
||||
ExecuteLocallyOrDie(builder.Build().ValueOrDie(), {},
|
||||
DefaultExecutableBuildOptions(), options);
|
||||
|
||||
|
@ -1449,7 +1449,7 @@ ENTRY reduce-window-identity {
|
||||
}
|
||||
|
||||
)";
|
||||
EXPECT_TRUE(RunAndCompare(hlo_string, tensorflow::gtl::nullopt));
|
||||
EXPECT_TRUE(RunAndCompare(hlo_string, absl::nullopt));
|
||||
}
|
||||
|
||||
XLA_TEST_F(HloTestBase, ReduceWindowS32) {
|
||||
@ -1468,7 +1468,7 @@ ENTRY %reduce-window (parameter.0: s32[81,8], parameter.1: s32[]) -> s32[82,8] {
|
||||
}
|
||||
|
||||
)";
|
||||
EXPECT_TRUE(RunAndCompare(hlo_string, tensorflow::gtl::nullopt));
|
||||
EXPECT_TRUE(RunAndCompare(hlo_string, absl::nullopt));
|
||||
}
|
||||
|
||||
XLA_TEST_F(HloTestBase, ReduceWindowF16) {
|
||||
@ -1487,7 +1487,7 @@ ENTRY %reduce-window (parameter.0: f16[81,8], parameter.1: f16[]) -> f16[82,8] {
|
||||
}
|
||||
|
||||
)";
|
||||
EXPECT_TRUE(RunAndCompare(hlo_string, tensorflow::gtl::nullopt));
|
||||
EXPECT_TRUE(RunAndCompare(hlo_string, absl::nullopt));
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
@ -19,18 +19,18 @@ limitations under the License.
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
#include "absl/types/optional.h"
|
||||
#include "tensorflow/compiler/xla/test.h"
|
||||
#include "tensorflow/compiler/xla/tests/hlo_test_base.h"
|
||||
#include "tensorflow/compiler/xla/tests/literal_test_util.h"
|
||||
#include "tensorflow/compiler/xla/tests/test_macros.h"
|
||||
#include "tensorflow/compiler/xla/types.h"
|
||||
#include "tensorflow/core/lib/gtl/optional.h"
|
||||
#include "tensorflow/core/platform/types.h"
|
||||
|
||||
namespace xla {
|
||||
namespace {
|
||||
|
||||
using tensorflow::gtl::nullopt;
|
||||
using absl::nullopt;
|
||||
|
||||
class SampleTextTest : public HloTestBase {};
|
||||
|
||||
|
@ -23,7 +23,7 @@ limitations under the License.
|
||||
namespace xla {
|
||||
namespace {
|
||||
|
||||
using tensorflow::gtl::nullopt;
|
||||
using absl::nullopt;
|
||||
|
||||
class ScatterTest : public HloTestBase {
|
||||
protected:
|
||||
|
@ -160,7 +160,7 @@ StatusOr<Literal> ReplayComputation(const HloSnapshot& module,
|
||||
// concurrent infeed occur via the fake_infeed_shape, or when
|
||||
// --generate_fake_infeed is passed and there exists an infeed operation in
|
||||
// the HloSnapshot.
|
||||
tensorflow::gtl::optional<tensorflow::thread::ThreadPool> pool;
|
||||
absl::optional<tensorflow::thread::ThreadPool> pool;
|
||||
std::unique_ptr<Literal> data;
|
||||
if (provide_infeed) {
|
||||
data = std::move(MakeFakeLiteral(infeed_shape)).ValueOrDie();
|
||||
@ -196,7 +196,7 @@ StatusOr<Literal> ReplayComputation(const HloSnapshot& module,
|
||||
StreamExecutorMemoryAllocator allocator(
|
||||
client->platform(),
|
||||
{client->platform()->ExecutorForDevice(0).ValueOrDie()});
|
||||
tensorflow::gtl::optional<ScopedShapedBuffer> result;
|
||||
absl::optional<ScopedShapedBuffer> result;
|
||||
for (int i = 0; i < opts.num_runs; ++i) {
|
||||
// If xla_hlo_profile is enabled, print a noisy message before the last run,
|
||||
// making it easier to separate this profile from the others in the logspam.
|
||||
|
Loading…
Reference in New Issue
Block a user