[XLA] Use absl::make_unique instead of xla::MakeUnique.

Same for WrapUnique.

PiperOrigin-RevId: 209531124
This commit is contained in:
Justin Lebar 2018-08-20 20:20:14 -07:00 committed by TensorFlower Gardener
parent 49115abfd3
commit e924d67bff
164 changed files with 978 additions and 797 deletions

View File

@ -55,6 +55,7 @@ cc_library(
"//tensorflow/core:lib",
"//tensorflow/core:lib_internal",
"//tensorflow/core:protos_all_cc",
"@com_google_absl//absl/memory",
],
)
@ -193,6 +194,7 @@ cc_library(
"//tensorflow/compiler/xla:util",
"//tensorflow/compiler/xla/service/llvm_ir:llvm_util",
"//tensorflow/core:lib",
"@com_google_absl//absl/memory",
"@llvm//:core",
"@llvm//:support",
"@llvm//:target",

View File

@ -19,6 +19,7 @@ limitations under the License.
#include <utility>
#include <vector>
#include "absl/memory/memory.h"
#include "tensorflow/compiler/aot/embedded_protocol_buffers.h"
#include "tensorflow/compiler/tf2xla/cpu_function_runtime.h"
#include "tensorflow/compiler/tf2xla/str_util.h"
@ -617,7 +618,7 @@ Status GenerateMetadata(const CodegenOpts& opts,
if (opts.gen_program_shape) {
program_shape =
tensorflow::MakeUnique<xla::ProgramShape>(compile_result.program_shape);
absl::make_unique<xla::ProgramShape>(compile_result.program_shape);
// The parameter names are currently meaningless, and redundant with the
// rest of our metadata, so clear them out to avoid confusion and save
// space.

View File

@ -18,6 +18,7 @@ limitations under the License.
#include <memory>
#include <string>
#include "absl/memory/memory.h"
#include "llvm/ADT/Triple.h"
#include "llvm/IR/GlobalVariable.h"
#include "llvm/IR/LLVMContext.h"
@ -27,7 +28,6 @@ limitations under the License.
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetOptions.h"
#include "tensorflow/compiler/tf2xla/str_util.h"
#include "tensorflow/compiler/xla/ptr_util.h"
#include "tensorflow/compiler/xla/service/llvm_ir/llvm_util.h"
#include "tensorflow/compiler/xla/util.h"
@ -105,7 +105,7 @@ GetTargetMachineFromTriple(StringPiece target_triple) {
error.c_str());
}
return WrapUnique(target->createTargetMachine(
return absl::WrapUnique(target->createTargetMachine(
normalized_triple, /*CPU=*/"",
/*Features=*/"", llvm::TargetOptions(), llvm::None));
}
@ -118,7 +118,7 @@ StatusOr<EmbeddedProtocolBuffers> CreateEmbeddedProtocolBuffers(
llvm::LLVMContext llvm_context;
std::unique_ptr<llvm::Module> module_with_serialized_proto =
MakeUnique<llvm::Module>("embedded_data_module", llvm_context);
absl::make_unique<llvm::Module>("embedded_data_module", llvm_context);
EmbeddedProtocolBuffers result;

View File

@ -128,11 +128,11 @@ cc_library(
"//tensorflow/compiler/tf2xla:common",
"//tensorflow/compiler/xla/client:local_client",
"//tensorflow/compiler/xla/service:shaped_buffer",
"//tensorflow/core:core_cpu",
"//tensorflow/core:core_cpu_internal",
"//tensorflow/core:framework",
"//tensorflow/core:lib",
"//tensorflow/core:lib_internal",
"@com_google_absl//absl/memory",
],
)
@ -191,6 +191,7 @@ cc_library(
"//tensorflow/core/kernels/data:generator_dataset_op",
"//tensorflow/core/kernels/data:iterator_ops",
"//tensorflow/core/kernels/data:prefetch_dataset_op",
"@com_google_absl//absl/memory",
],
)
@ -235,6 +236,7 @@ cc_library(
"//tensorflow/core:lib_internal",
"//tensorflow/core:protos_all_cc",
"//tensorflow/core/kernels:variable_ops",
"@com_google_absl//absl/memory",
],
)
@ -283,6 +285,7 @@ cc_library(
"//tensorflow/core:framework",
"//tensorflow/core:lib",
"//tensorflow/core:protos_all_cc",
"@com_google_absl//absl/memory",
],
alwayslink = 1,
)
@ -303,6 +306,7 @@ tf_cc_test(
"//tensorflow/core:test",
"//tensorflow/core:test_main",
"//tensorflow/core:testlib",
"@com_google_absl//absl/memory",
],
)

View File

@ -14,6 +14,7 @@ limitations under the License.
==============================================================================*/
#include "tensorflow/compiler/jit/create_xla_launch_op.h"
#include "absl/memory/memory.h"
#include "tensorflow/compiler/jit/defs.h"
#include "tensorflow/compiler/jit/kernels/xla_launch_op.h"
#include "tensorflow/compiler/jit/mark_for_compilation_pass.h"
@ -223,8 +224,8 @@ Status CreateXlaLaunchOp(FunctionLibraryRuntime* flr, const NodeDef& node_def,
&fbody->fdef.signature(), flr, fbody->arg_types, input_memory_types,
fbody->ret_types, output_memory_types, flr->graph_def_version(), &s);
*kernel = MakeUnique<XlaLocalLaunchBase>(&construction, constant_arg_indices,
resource_arg_indices, function);
*kernel = absl::make_unique<XlaLocalLaunchBase>(
&construction, constant_arg_indices, resource_arg_indices, function);
return s;
}

View File

@ -15,6 +15,7 @@ limitations under the License.
#include "tensorflow/compiler/jit/create_xla_launch_op.h"
#include "absl/memory/memory.h"
#include "tensorflow/core/common_runtime/device_factory.h"
#include "tensorflow/core/common_runtime/function.h"
#include "tensorflow/core/framework/function_testlib.h"
@ -65,11 +66,11 @@ class CreateXlaLaunchOpTest : public ::testing::Test {
for (const auto& fdef : flib) {
*(proto.add_function()) = fdef;
}
lib_def_ =
MakeUnique<FunctionLibraryDefinition>(OpRegistry::Global(), proto);
lib_def_ = absl::make_unique<FunctionLibraryDefinition>(
OpRegistry::Global(), proto);
OptimizerOptions opts;
device_mgr_ = MakeUnique<DeviceMgr>(devices_);
pflr_ = MakeUnique<ProcessFunctionLibraryRuntime>(
device_mgr_ = absl::make_unique<DeviceMgr>(devices_);
pflr_ = absl::make_unique<ProcessFunctionLibraryRuntime>(
device_mgr_.get(), Env::Default(), TF_GRAPH_DEF_VERSION, lib_def_.get(),
opts, /*default_thread_pool=*/nullptr, /*cluster_flr=*/nullptr);
flr_ = pflr_->GetFLR("/job:localhost/replica:0/task:0/cpu:0");

View File

@ -18,6 +18,7 @@ limitations under the License.
#include <stdlib.h>
#include <unordered_set>
#include "absl/memory/memory.h"
#include "tensorflow/compiler/jit/defs.h"
#include "tensorflow/compiler/jit/xla_compile_on_demand_op.h"
#include "tensorflow/compiler/jit/xla_device_context.h"
@ -101,7 +102,7 @@ XlaDeviceAllocator* XlaDeviceAllocatorState::GetOrCreateXlaDeviceAllocator(
}
std::unique_ptr<XlaDeviceAllocator> alloc =
xla::MakeUnique<XlaDeviceAllocator>();
absl::make_unique<XlaDeviceAllocator>();
XlaDeviceAllocator* alloc_ptr = alloc.get();
state.allocators_[{backend, device_ordinal}] = std::move(alloc);
return alloc_ptr;
@ -327,7 +328,7 @@ xla::StatusOr<XlaDeviceContext*> XlaDevice::GetDeviceContextLocked() {
// to those methods; see the bug for details. Our only saving grace at the
// moment is that this race doesn't seem to occur in practice.
if (use_gpu_device_info_) {
auto gpu_device_info = MakeUnique<GpuDeviceInfo>();
auto gpu_device_info = absl::make_unique<GpuDeviceInfo>();
gpu_device_info->stream = stream_.get();
gpu_device_info->default_context = device_context_;
set_tensorflow_gpu_device_info(gpu_device_info.get());

View File

@ -17,6 +17,7 @@ limitations under the License.
#include <memory>
#include "absl/memory/memory.h"
#include "tensorflow/compiler/jit/defs.h"
#include "tensorflow/compiler/tf2xla/shape_util.h"
#include "tensorflow/compiler/tf2xla/xla_compiler.h"
@ -175,7 +176,7 @@ void XlaComputationLaunchContext::PopulateInputs(
<< " not the same as on-host shape "
<< xla::ShapeUtil::HumanStringWithLayout(shape);
se::DeviceMemoryBase dmem = XlaTensor::DeviceMemoryFromTensor(*t);
arg_buffers_[i] = xla::MakeUnique<ShapedBuffer>(
arg_buffers_[i] = absl::make_unique<ShapedBuffer>(
/*on_host_shape=*/shape, /*on_device_shape=*/shape,
client_->platform(), client_->default_device_ordinal());
arg_buffers_[i]->set_buffer(dmem, /*index=*/{});

View File

@ -18,6 +18,7 @@ limitations under the License.
#include <memory>
#include "absl/memory/memory.h"
#include "tensorflow/compiler/xla/client/local_client.h"
#include "tensorflow/compiler/xla/service/shaped_buffer.h"
#include "tensorflow/core/framework/allocator.h"
@ -70,7 +71,7 @@ class XlaTensor {
// Mutates the XlaTensor to set the ShapedBuffer.
void set_shaped_buffer(xla::ScopedShapedBuffer shaped_buffer) {
shaped_buffer_ =
xla::MakeUnique<xla::ScopedShapedBuffer>(std::move(shaped_buffer));
absl::make_unique<xla::ScopedShapedBuffer>(std::move(shaped_buffer));
}
// Some tensors on the device may have known values on the host. We use these

View File

@ -211,6 +211,7 @@ cc_library(
"//tensorflow/core:lib_internal",
"//tensorflow/core:protos_all_cc",
"//tensorflow/core:stream_executor_no_cuda",
"@com_google_absl//absl/memory",
],
alwayslink = 1,
)
@ -475,12 +476,12 @@ cc_library(
"//tensorflow/compiler/tf2xla:dump_graph",
"//tensorflow/compiler/tf2xla/ops:xla_ops",
"//tensorflow/compiler/xla:status_macros",
"//tensorflow/compiler/xla:util",
"//tensorflow/core:core_cpu",
"//tensorflow/core:core_cpu_internal",
"//tensorflow/core:framework",
"//tensorflow/core:graph",
"//tensorflow/core:lib",
"@com_google_absl//absl/memory",
],
)
@ -501,12 +502,12 @@ cc_library(
"//tensorflow/compiler/tf2xla:dump_graph",
"//tensorflow/compiler/tf2xla/ops:xla_ops",
"//tensorflow/compiler/xla:status_macros",
"//tensorflow/compiler/xla:util",
"//tensorflow/core:core_cpu",
"//tensorflow/core:core_cpu_internal",
"//tensorflow/core:framework",
"//tensorflow/core:graph",
"//tensorflow/core:lib",
"@com_google_absl//absl/memory",
],
)
@ -525,12 +526,12 @@ cc_library(
"//tensorflow/compiler/tf2xla:dump_graph",
"//tensorflow/compiler/tf2xla/ops:xla_ops",
"//tensorflow/compiler/xla:status_macros",
"//tensorflow/compiler/xla:util",
"//tensorflow/core:core_cpu",
"//tensorflow/core:core_cpu_internal",
"//tensorflow/core:framework",
"//tensorflow/core:graph",
"//tensorflow/core:lib",
"@com_google_absl//absl/memory",
],
)

View File

@ -21,11 +21,11 @@ limitations under the License.
#include <unordered_set>
#include <vector>
#include "absl/memory/memory.h"
#include "tensorflow/compiler/jit/union_find.h"
#include "tensorflow/compiler/tf2xla/dump_graph.h"
#include "tensorflow/compiler/tf2xla/functionalize_control_flow_util.h"
#include "tensorflow/compiler/tf2xla/tf2xla_util.h"
#include "tensorflow/compiler/xla/ptr_util.h"
#include "tensorflow/core/common_runtime/function.h"
#include "tensorflow/core/framework/graph_to_functiondef.h"
#include "tensorflow/core/framework/node_def_builder.h"
@ -399,7 +399,8 @@ Status Conditional::BuildArgumentNodes() {
Status Conditional::ExtractBodies(Graph* graph) {
VLOG(2) << "Extracting bodies for " << name();
for (auto b : {BranchType::kElseBranch, BranchType::kThenBranch}) {
bodies_[static_cast<int>(b)] = xla::MakeUnique<Graph>(graph->op_registry());
bodies_[static_cast<int>(b)] =
absl::make_unique<Graph>(graph->op_registry());
}
auto find_branch = [&](const Edge* e) {

View File

@ -21,13 +21,13 @@ limitations under the License.
#include <unordered_set>
#include <vector>
#include "absl/memory/memory.h"
#include "tensorflow/compiler/jit/union_find.h"
#include "tensorflow/compiler/tf2xla/dump_graph.h"
#include "tensorflow/compiler/tf2xla/functionalize_cond.h"
#include "tensorflow/compiler/tf2xla/functionalize_control_flow_util.h"
#include "tensorflow/compiler/tf2xla/functionalize_while.h"
#include "tensorflow/compiler/tf2xla/tf2xla_util.h"
#include "tensorflow/compiler/xla/ptr_util.h"
#include "tensorflow/compiler/xla/status_macros.h"
#include "tensorflow/core/common_runtime/function.h"
#include "tensorflow/core/framework/graph_to_functiondef.h"

View File

@ -21,11 +21,11 @@ limitations under the License.
#include <unordered_set>
#include <vector>
#include "absl/memory/memory.h"
#include "tensorflow/compiler/jit/union_find.h"
#include "tensorflow/compiler/tf2xla/dump_graph.h"
#include "tensorflow/compiler/tf2xla/functionalize_control_flow_util.h"
#include "tensorflow/compiler/tf2xla/tf2xla_util.h"
#include "tensorflow/compiler/xla/ptr_util.h"
#include "tensorflow/compiler/xla/status_macros.h"
#include "tensorflow/core/common_runtime/function.h"
#include "tensorflow/core/framework/graph_to_functiondef.h"
@ -143,7 +143,7 @@ StatusOr<Node*> BuildArgNode(Graph* graph, DataType type, int index) {
Status BuildLoopCondition(const Graph& graph, Frame* frame,
std::unique_ptr<Graph>* cond_output) {
VLOG(2) << "Building loop condition for " << frame->name;
*cond_output = xla::MakeUnique<Graph>(graph.op_registry());
*cond_output = absl::make_unique<Graph>(graph.op_registry());
Graph* output = cond_output->get();
// Map from nodes in the original graph to the condition graph.
@ -180,7 +180,7 @@ Status BuildLoopBody(const Graph& graph, Frame* frame,
DataTypeVector* arg_types,
std::unique_ptr<Graph>* body_output) {
VLOG(2) << "Building loop body for " << frame->name;
*body_output = xla::MakeUnique<Graph>(graph.op_registry());
*body_output = absl::make_unique<Graph>(graph.op_registry());
Graph* output = body_output->get();
// Map from nodes in the original graph to the condition graph.

View File

@ -18,6 +18,7 @@ limitations under the License.
#include <numeric>
#include <vector>
#include "absl/memory/memory.h"
#include "tensorflow/compiler/tf2xla/dump_graph.h"
#include "tensorflow/compiler/tf2xla/functionalize_control_flow.h"
#include "tensorflow/compiler/tf2xla/graph_compiler.h"
@ -310,7 +311,7 @@ Status ExecuteGraph(XlaContext* xla_context, std::unique_ptr<Graph> graph,
// unique_ptr so we can capture the cleanup status in the end.
xla_context->Ref();
Status status;
auto step_container = xla::MakeUnique<ScopedStepContainer>(
auto step_container = absl::make_unique<ScopedStepContainer>(
step_id, [&status, device](const string& name) {
status = device->resource_manager()->Cleanup(name);
});

View File

@ -161,7 +161,6 @@ cc_library(
"iterator_util.h",
"map_util.h",
"overflow_util.h",
"ptr_util.h",
"util.h",
],
visibility = ["//visibility:public"],
@ -172,8 +171,8 @@ cc_library(
":types",
":xla_data_proto",
"//tensorflow/core:lib",
"//tensorflow/core:ptr_util",
"@com_google_absl//absl/algorithm:container",
"@com_google_absl//absl/memory",
],
)
@ -211,6 +210,7 @@ tf_cc_test(
":test",
":util",
"//tensorflow/core:test_main",
"@com_google_absl//absl/memory",
],
)
@ -298,6 +298,7 @@ cc_library(
":util",
":xla_data_proto",
"//tensorflow/core:lib",
"@com_google_absl//absl/memory",
],
)
@ -316,6 +317,7 @@ tf_cc_test(
"//tensorflow/core:lib",
"//tensorflow/core:test",
"//tensorflow/core:test_main",
"@com_google_absl//absl/memory",
],
)
@ -336,6 +338,7 @@ cc_library(
":util",
":xla_data_proto",
"//tensorflow/core:lib",
"@com_google_absl//absl/memory",
],
)
@ -406,8 +409,8 @@ cc_library(
deps = [
":array",
":types",
":util",
"//tensorflow/core:lib",
"@com_google_absl//absl/memory",
],
)
@ -490,6 +493,7 @@ cc_library(
":util",
":xla_data_proto",
"//tensorflow/core:lib",
"@com_google_absl//absl/memory",
],
)
@ -522,6 +526,7 @@ cc_library(
":xla_data_proto",
"//tensorflow/core:lib",
"//tensorflow/core:lib_internal",
"@com_google_absl//absl/memory",
],
)
@ -577,10 +582,10 @@ cc_library(
deps = [
":shape_util",
":status_macros",
":util",
":xla_data_proto",
"//tensorflow/core:lib",
"//tensorflow/core:lib_internal",
"@com_google_absl//absl/memory",
],
)
@ -594,6 +599,7 @@ tf_cc_test(
":xla_data_proto",
"//tensorflow/core:test",
"//tensorflow/core:test_main",
"@com_google_absl//absl/memory",
],
)
@ -643,6 +649,7 @@ cc_library(
"//tensorflow/compiler/xla/service:shape_inference",
"//tensorflow/compiler/xla/service/cpu:runtime_single_threaded_matmul",
"//tensorflow/core:lib",
"@com_google_absl//absl/memory",
],
)
@ -661,6 +668,7 @@ tf_cc_test(
"//tensorflow/compiler/xla/client:padding",
"//tensorflow/compiler/xla/tests:literal_test_util",
"//tensorflow/core:test_main",
"@com_google_absl//absl/memory",
],
)

View File

@ -24,8 +24,8 @@ limitations under the License.
#include <random>
#include <vector>
#include "absl/memory/memory.h"
#include "tensorflow/compiler/xla/array.h"
#include "tensorflow/compiler/xla/ptr_util.h"
#include "tensorflow/compiler/xla/types.h"
#include "tensorflow/core/lib/core/bits.h"
#include "tensorflow/core/lib/strings/str_util.h"
@ -101,7 +101,7 @@ class Array2D : public Array<T> {
template <typename NativeT = float>
std::unique_ptr<Array2D<NativeT>> MakeLinspaceArray2D(double from, double to,
int64 n1, int64 n2) {
auto array = MakeUnique<Array2D<NativeT>>(n1, n2);
auto array = absl::make_unique<Array2D<NativeT>>(n1, n2);
int64 count = n1 * n2;
NativeT step =
static_cast<NativeT>((count > 1) ? (to - from) / (count - 1) : 0);

View File

@ -71,12 +71,12 @@ cc_library(
"//tensorflow/compiler/xla:status_macros",
"//tensorflow/compiler/xla:statusor",
"//tensorflow/compiler/xla:types",
"//tensorflow/compiler/xla:util",
"//tensorflow/compiler/xla:xla_data_proto",
"//tensorflow/compiler/xla:xla_proto",
"//tensorflow/compiler/xla/legacy_flags:debug_options_flags",
"//tensorflow/compiler/xla/service:hlo_proto",
"//tensorflow/core:lib",
"@com_google_absl//absl/memory",
],
)
@ -104,7 +104,6 @@ cc_library(
"//tensorflow/compiler/xla:executable_run_options",
"//tensorflow/compiler/xla:status_macros",
"//tensorflow/compiler/xla:statusor",
"//tensorflow/compiler/xla:util",
"//tensorflow/compiler/xla:xla_data_proto",
"//tensorflow/compiler/xla/service:backend",
"//tensorflow/compiler/xla/service:compiler",
@ -117,6 +116,7 @@ cc_library(
"//tensorflow/compiler/xla/service:stream_pool",
"//tensorflow/core:lib",
"//tensorflow/core:stream_executor_no_cuda",
"@com_google_absl//absl/memory",
"@llvm//:support",
],
)
@ -130,11 +130,11 @@ cc_library(
":xla_computation",
"//tensorflow/compiler/xla:status_macros",
"//tensorflow/compiler/xla:statusor",
"//tensorflow/compiler/xla:util",
"//tensorflow/compiler/xla:xla_data_proto",
"//tensorflow/compiler/xla/service:compile_only_service",
"//tensorflow/compiler/xla/service:compiler",
"//tensorflow/core:stream_executor_no_cuda",
"@com_google_absl//absl/memory",
"@llvm//:support",
],
)
@ -159,6 +159,7 @@ cc_library(
"//tensorflow/compiler/xla/service:platform_util",
"//tensorflow/core:lib",
"//tensorflow/core:stream_executor_no_cuda",
"@com_google_absl//absl/memory",
],
)
@ -186,6 +187,7 @@ cc_library(
"//tensorflow/compiler/xla:util",
"//tensorflow/compiler/xla:xla_data_proto",
"//tensorflow/compiler/xla/service:hlo_proto",
"@com_google_absl//absl/memory",
],
)
@ -212,6 +214,7 @@ cc_library(
"//tensorflow/compiler/xla/service:shape_inference",
"//tensorflow/core:lib",
"@com_google_absl//absl/algorithm:container",
"@com_google_absl//absl/memory",
],
)

View File

@ -18,11 +18,11 @@ limitations under the License.
#include <string>
#include <utility>
#include "absl/memory/memory.h"
#include "tensorflow/compiler/xla/client/xla_computation.h"
#include "tensorflow/compiler/xla/execution_options_util.h"
#include "tensorflow/compiler/xla/legacy_flags/debug_options_flags.h"
#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/compiler/xla/ptr_util.h"
#include "tensorflow/compiler/xla/status_macros.h"
#include "tensorflow/compiler/xla/types.h"
#include "tensorflow/core/lib/core/errors.h"
@ -89,7 +89,7 @@ StatusOr<std::unique_ptr<GlobalData>> Client::TransferToServer(
"TransferToServer request");
}
return MakeUnique<GlobalData>(stub_, response.data());
return absl::make_unique<GlobalData>(stub_, response.data());
}
Status Client::TransferToInfeed(const LiteralSlice& literal, int64 replica_id,
@ -248,7 +248,7 @@ StatusOr<std::unique_ptr<GlobalData>> Client::Execute(
}
}
return MakeUnique<GlobalData>(stub_, response.output());
return absl::make_unique<GlobalData>(stub_, response.output());
}
StatusOr<std::vector<std::unique_ptr<GlobalData>>> Client::ExecuteParallel(
@ -278,7 +278,7 @@ StatusOr<std::vector<std::unique_ptr<GlobalData>>> Client::ExecuteParallel(
std::vector<std::unique_ptr<GlobalData>> outputs;
for (size_t i = 0; i < computations.size(); ++i) {
outputs.push_back(
MakeUnique<GlobalData>(stub_, response.responses(i).output()));
absl::make_unique<GlobalData>(stub_, response.responses(i).output()));
if (computations[i].execution_profile != nullptr) {
*computations[i].execution_profile = response.responses(i).profile();
}
@ -340,7 +340,7 @@ StatusOr<std::vector<std::unique_ptr<GlobalData>>> Client::DeconstructTuple(
std::vector<std::unique_ptr<GlobalData>> handles;
for (auto& handle : response.element_handles()) {
handles.push_back(MakeUnique<GlobalData>(stub_, handle));
handles.push_back(absl::make_unique<GlobalData>(stub_, handle));
}
return std::move(handles);
}
@ -369,7 +369,7 @@ StatusOr<ComputationStats> Client::GetComputationStats(
StatusOr<std::unique_ptr<ProgramShape>> Client::GetComputationShape(
const XlaComputation& computation) {
TF_ASSIGN_OR_RETURN(const auto& result, computation.GetProgramShape());
return MakeUnique<ProgramShape>(result);
return absl::make_unique<ProgramShape>(result);
}
StatusOr<Shape> Client::GetShape(const GlobalData& data) {

View File

@ -15,6 +15,7 @@ limitations under the License.
#include "tensorflow/compiler/xla/client/client_library.h"
#include "absl/memory/memory.h"
#include "tensorflow/compiler/xla/service/backend.h"
#include "tensorflow/compiler/xla/service/platform_util.h"
#include "tensorflow/compiler/xla/status_macros.h"
@ -94,10 +95,10 @@ ClientLibrary::~ClientLibrary() = default;
service_options.set_intra_op_parallelism_threads(
options.intra_op_parallelism_threads());
auto instance = MakeUnique<LocalInstance>();
auto instance = absl::make_unique<LocalInstance>();
TF_ASSIGN_OR_RETURN(instance->service,
LocalService::NewService(service_options));
instance->client = MakeUnique<LocalClient>(instance->service.get());
instance->client = absl::make_unique<LocalClient>(instance->service.get());
LocalClient* cl = instance->client.get();
client_library.local_instances_.insert(
@ -134,10 +135,11 @@ ClientLibrary::GetOrCreateCompileOnlyClient(se::Platform* platform) {
return it->second->client.get();
}
auto instance = MakeUnique<CompileOnlyInstance>();
auto instance = absl::make_unique<CompileOnlyInstance>();
TF_ASSIGN_OR_RETURN(instance->service,
CompileOnlyService::NewService(platform));
instance->client = MakeUnique<CompileOnlyClient>(instance->service.get());
instance->client =
absl::make_unique<CompileOnlyClient>(instance->service.get());
CompileOnlyClient* cl = instance->client.get();
client_library.compile_only_instances_.insert(

View File

@ -15,8 +15,8 @@ limitations under the License.
#include "tensorflow/compiler/xla/client/compile_only_client.h"
#include "absl/memory/memory.h"
#include "llvm/ADT/Triple.h"
#include "tensorflow/compiler/xla/ptr_util.h"
#include "tensorflow/compiler/xla/status_macros.h"
namespace xla {

View File

@ -17,9 +17,9 @@ limitations under the License.
#include <utility>
#include "absl/memory/memory.h"
#include "llvm/ADT/Triple.h"
#include "tensorflow/compiler/xla/client/xla_computation.h"
#include "tensorflow/compiler/xla/ptr_util.h"
#include "tensorflow/compiler/xla/service/backend.h"
#include "tensorflow/compiler/xla/service/service_executable_run_options.h"
#include "tensorflow/compiler/xla/service/source_map_util.h"
@ -257,9 +257,9 @@ StatusOr<std::unique_ptr<LocalExecutable>> LocalClient::Compile(
TF_ASSIGN_OR_RETURN(std::unique_ptr<Executable> executable,
local_service_->CompileExecutable(
computation, argument_layouts, updated_options));
return WrapUnique(new LocalExecutable(std::move(executable),
local_service_->mutable_backend(),
updated_options));
return absl::WrapUnique(new LocalExecutable(std::move(executable),
local_service_->mutable_backend(),
updated_options));
}
StatusOr<ScopedShapedBuffer> LocalClient::LiteralToShapedBuffer(

View File

@ -22,6 +22,7 @@ limitations under the License.
#include <utility>
#include "absl/algorithm/container.h"
#include "absl/memory/memory.h"
#include "tensorflow/compiler/xla/client/sharding_builder.h"
#include "tensorflow/compiler/xla/client/xla_computation.h"
#include "tensorflow/compiler/xla/execution_options_util.h"
@ -2297,7 +2298,7 @@ StatusOr<XlaComputation> XlaBuilder::BuildConstantSubGraph(
std::unique_ptr<XlaBuilder> XlaBuilder::CreateSubBuilder(
const string& computation_name) {
auto sub_builder = MakeUnique<XlaBuilder>(computation_name);
auto sub_builder = absl::make_unique<XlaBuilder>(computation_name);
sub_builder->parent_builder_ = this;
sub_builder->die_immediately_on_error_ = this->die_immediately_on_error_;
return sub_builder;

View File

@ -17,7 +17,7 @@ limitations under the License.
#include <utility>
#include "tensorflow/compiler/xla/ptr_util.h"
#include "absl/memory/memory.h"
#include "tensorflow/compiler/xla/status_macros.h"
#include "tensorflow/compiler/xla/util.h"
@ -32,7 +32,7 @@ StatusOr<std::unique_ptr<HloSnapshot>> XlaComputation::Snapshot() const {
if (IsNull()) {
return InvalidArgument("Computation is invalid.");
}
auto session = MakeUnique<HloSnapshot>();
auto session = absl::make_unique<HloSnapshot>();
*session->mutable_hlo()->mutable_hlo_module() = proto_;
return std::move(session);
}

View File

@ -18,7 +18,7 @@ limitations under the License.
#include <algorithm>
#include <list>
#include "tensorflow/compiler/xla/ptr_util.h"
#include "absl/memory/memory.h"
#include "tensorflow/compiler/xla/test.h"
namespace xla {
@ -27,7 +27,7 @@ namespace {
TEST(UnwrappingIteratorTest, Simple) {
std::vector<std::unique_ptr<int>> v;
for (int i = 0; i < 3; ++i) {
v.push_back(MakeUnique<int>(i));
v.push_back(absl::make_unique<int>(i));
}
int i = 0;
for (auto iter = MakeUnwrappingIterator(v.begin());
@ -51,7 +51,7 @@ TEST(UnwrappingIteratorTest, PostincrementOperator) {
TEST(UnwrappingIteratorTest, StdFind) {
std::list<std::unique_ptr<int>> l;
for (int i = 0; i < 3; ++i) {
l.push_back(MakeUnique<int>(i));
l.push_back(absl::make_unique<int>(i));
}
EXPECT_EQ(l.begin()->get(),
*std::find(MakeUnwrappingIterator(l.begin()),

View File

@ -22,6 +22,7 @@ limitations under the License.
#include <numeric>
#include <vector>
#include "absl/memory/memory.h"
#include "tensorflow/compiler/xla/index_util.h"
#include "tensorflow/compiler/xla/shape_util.h"
#include "tensorflow/compiler/xla/status_macros.h"
@ -134,7 +135,7 @@ void Literal::SetPiece(const Shape& shape, Piece* piece, bool allocate_arrays) {
Literal::Literal(const Shape& shape, bool allocate_arrays)
: MutableLiteralBase() {
shape_ = MakeUnique<Shape>(shape);
shape_ = absl::make_unique<Shape>(shape);
CHECK(LayoutUtil::HasLayout(*shape_));
root_piece_ = new Piece();
root_piece_->set_subshape(shape_.get());
@ -175,7 +176,7 @@ Literal& Literal::operator=(Literal&& other) {
}
std::unique_ptr<Literal> LiteralBase::CreateFromShape(const Shape& shape) {
auto literal = MakeUnique<Literal>(shape);
auto literal = absl::make_unique<Literal>(shape);
literal->root_piece_->ForEachMutableSubpiece(
[&](const ShapeIndex& index, Piece* piece) {
if (ShapeUtil::IsArray(piece->subshape())) {
@ -289,7 +290,7 @@ MutableLiteralBase::CreateFromProto(const LiteralProto& proto) {
return InvalidArgument("LiteralProto has no layout");
}
auto literal = MakeUnique<Literal>(proto.shape());
auto literal = absl::make_unique<Literal>(proto.shape());
TF_RETURN_IF_ERROR(literal->root_piece_->ForEachMutableSubpieceWithStatus(
[&](const ShapeIndex& index, Piece* piece) {
@ -479,7 +480,7 @@ Status Literal::MoveFrom(Literal&& src_literal,
dest_piece.set_sparse_indices(src_piece.sparse_indices());
});
src_literal.shape_ = MakeUnique<Shape>(ShapeUtil::MakeNil());
src_literal.shape_ = absl::make_unique<Shape>(ShapeUtil::MakeNil());
delete src_literal.root_piece_;
src_literal.root_piece_ = new LiteralBase::Piece();
src_literal.root_piece_->set_subshape(src_literal.shape_.get());
@ -566,7 +567,7 @@ std::unique_ptr<Literal> LiteralBase::Relayout(
Shape* subshape = ShapeUtil::GetMutableSubshape(&new_shape, shape_index);
TF_CHECK_OK(LayoutUtil::ValidateLayoutForShape(new_layout, *subshape));
*subshape->mutable_layout() = new_layout;
auto result = MakeUnique<Literal>(new_shape);
auto result = absl::make_unique<Literal>(new_shape);
TF_CHECK_OK(result->CopyFrom(*this));
return result;
}
@ -602,7 +603,7 @@ StatusOr<std::unique_ptr<Literal>> LiteralBase::Broadcast(
result_shape.dimensions(dimensions[i]));
}
std::unique_ptr<Literal> result = MakeUnique<Literal>(result_shape);
std::unique_ptr<Literal> result = absl::make_unique<Literal>(result_shape);
// scratch_source_index is temporary storage space for the computed index into
// the input literal. We put it here to avoid allocating an std::vector in
@ -691,7 +692,7 @@ std::unique_ptr<Literal> LiteralBase::Transpose(
for (auto index : LayoutUtil::MinorToMajor(shape())) {
layout->add_minor_to_major(inverse_permutation[index]);
}
auto new_literal = MakeUnique<Literal>(permuted_shape);
auto new_literal = absl::make_unique<Literal>(permuted_shape);
DCHECK_EQ(ShapeUtil::ByteSizeOf(new_literal->shape()),
ShapeUtil::ByteSizeOf(shape()));
std::memcpy(new_literal->untyped_data(), untyped_data(), size_bytes());
@ -702,7 +703,7 @@ template <typename NativeT>
std::unique_ptr<Literal> LiteralBase::SliceInternal(
const Shape& result_shape,
tensorflow::gtl::ArraySlice<int64> start_indices) const {
auto result_literal = MakeUnique<Literal>(result_shape);
auto result_literal = absl::make_unique<Literal>(result_shape);
DimensionVector new_indices(ShapeUtil::Rank(result_shape));
result_literal->EachCell<NativeT>(
[&](tensorflow::gtl::ArraySlice<int64> indices, NativeT /*value*/) {
@ -756,7 +757,7 @@ Literal LiteralBase::Clone() const {
}
std::unique_ptr<Literal> LiteralBase::CloneToUnique() const {
auto result = MakeUnique<Literal>(shape());
auto result = absl::make_unique<Literal>(shape());
TF_CHECK_OK(result->CopyFrom(*this));
return result;
}
@ -1203,7 +1204,7 @@ template <typename NativeSrcT, typename NativeDestT, typename ConverterType>
std::unique_ptr<Literal> ConvertBetweenNativeTypesWithConverter(
const LiteralBase& src_literal, const ConverterType& converter) {
CHECK(ShapeUtil::IsArray(src_literal.shape()));
auto result_literal = MakeUnique<Literal>(ShapeUtil::ChangeElementType(
auto result_literal = absl::make_unique<Literal>(ShapeUtil::ChangeElementType(
src_literal.shape(),
primitive_util::NativeToPrimitiveType<NativeDestT>()));
auto src_data = src_literal.data<NativeSrcT>();
@ -1249,7 +1250,7 @@ BitcastBetweenNativeTypes(const LiteralBase& src_literal) {
template <PrimitiveType primitive_src_type>
std::unique_ptr<Literal> ConvertToC64(const LiteralBase& src_literal) {
CHECK(ShapeUtil::IsArray(src_literal.shape()));
auto result_literal = MakeUnique<Literal>(
auto result_literal = absl::make_unique<Literal>(
ShapeUtil::ChangeElementType(src_literal.shape(), C64));
using NativeSrcT =
typename primitive_util::PrimitiveTypeToNative<primitive_src_type>::type;
@ -1396,7 +1397,7 @@ StatusOr<std::unique_ptr<Literal>> LiteralBase::ConvertToShape(
element.ConvertToShape(ShapeUtil::GetSubshape(dest_shape, {i})));
elements.push_back(std::move(*new_element));
}
auto converted = MakeUnique<Literal>();
auto converted = absl::make_unique<Literal>();
*converted = MutableLiteralBase::MoveIntoTuple(&elements);
return std::move(converted);
}
@ -1956,7 +1957,7 @@ MutableLiteralBase::~MutableLiteralBase() {}
MutableBorrowingLiteral::MutableBorrowingLiteral(
const MutableBorrowingLiteral& literal)
: MutableLiteralBase() {
shape_ = MakeUnique<Shape>(literal.shape());
shape_ = absl::make_unique<Shape>(literal.shape());
CHECK(LayoutUtil::HasLayout(*shape_));
root_piece_ = new Piece();
@ -1967,7 +1968,7 @@ MutableBorrowingLiteral::MutableBorrowingLiteral(
MutableBorrowingLiteral& MutableBorrowingLiteral::operator=(
const MutableBorrowingLiteral& literal) {
shape_ = MakeUnique<Shape>(literal.shape());
shape_ = absl::make_unique<Shape>(literal.shape());
CHECK(LayoutUtil::HasLayout(*shape_));
root_piece_ = new Piece();
@ -1981,7 +1982,7 @@ MutableBorrowingLiteral& MutableBorrowingLiteral::operator=(
MutableBorrowingLiteral::MutableBorrowingLiteral(
const MutableLiteralBase& literal)
: MutableLiteralBase() {
shape_ = MakeUnique<Shape>(literal.shape());
shape_ = absl::make_unique<Shape>(literal.shape());
CHECK(LayoutUtil::HasLayout(*shape_));
root_piece_ = new Piece();
@ -1992,7 +1993,7 @@ MutableBorrowingLiteral::MutableBorrowingLiteral(
MutableBorrowingLiteral::MutableBorrowingLiteral(MutableLiteralBase* literal)
: MutableLiteralBase() {
shape_ = MakeUnique<Shape>(literal->shape());
shape_ = absl::make_unique<Shape>(literal->shape());
CHECK(LayoutUtil::HasLayout(*shape_));
root_piece_ = new Piece();
@ -2004,7 +2005,7 @@ MutableBorrowingLiteral::MutableBorrowingLiteral(MutableLiteralBase* literal)
MutableBorrowingLiteral::MutableBorrowingLiteral(
MutableBorrowingLiteral literal, const ShapeIndex& view_root)
: MutableLiteralBase() {
shape_ = MakeUnique<Shape>(literal.piece(view_root).subshape());
shape_ = absl::make_unique<Shape>(literal.piece(view_root).subshape());
CHECK(LayoutUtil::HasLayout(*shape_));
root_piece_ = new Piece();
@ -2016,7 +2017,7 @@ MutableBorrowingLiteral::MutableBorrowingLiteral(
MutableBorrowingLiteral::MutableBorrowingLiteral(const char* src_buf_ptr,
const Shape& shape)
: MutableLiteralBase() {
shape_ = MakeUnique<Shape>(shape);
shape_ = absl::make_unique<Shape>(shape);
CHECK(LayoutUtil::HasLayout(*shape_));
CHECK(!ShapeUtil::IsTuple(*shape_));
@ -2061,7 +2062,7 @@ void BorrowingLiteral::BuildPieceSubtree(const Shape& shape, Piece* piece) {
}
BorrowingLiteral::BorrowingLiteral(const char* src_buf_ptr, const Shape& shape)
: LiteralBase(), shape_(MakeUnique<Shape>(shape)) {
: LiteralBase(), shape_(absl::make_unique<Shape>(shape)) {
CHECK(ShapeUtil::IsArray(*shape_));
CHECK(LayoutUtil::HasLayout(*shape_));
@ -2072,7 +2073,7 @@ BorrowingLiteral::BorrowingLiteral(const char* src_buf_ptr, const Shape& shape)
BorrowingLiteral::BorrowingLiteral(
tensorflow::gtl::ArraySlice<const char*> src_buf_ptrs, const Shape& shape)
: LiteralBase(), shape_(MakeUnique<Shape>(shape)) {
: LiteralBase(), shape_(absl::make_unique<Shape>(shape)) {
CHECK(ShapeUtil::IsTuple(*shape_));
CHECK(!ShapeUtil::IsNestedTuple(*shape_));
CHECK_EQ(src_buf_ptrs.size(), ShapeUtil::TupleElementCount(*shape_));

View File

@ -25,13 +25,13 @@ limitations under the License.
#include <type_traits>
#include <vector>
#include "absl/memory/memory.h"
#include "tensorflow/compiler/xla/array2d.h"
#include "tensorflow/compiler/xla/array3d.h"
#include "tensorflow/compiler/xla/array4d.h"
#include "tensorflow/compiler/xla/index_util.h"
#include "tensorflow/compiler/xla/layout_util.h"
#include "tensorflow/compiler/xla/primitive_util.h"
#include "tensorflow/compiler/xla/ptr_util.h"
#include "tensorflow/compiler/xla/shape_util.h"
#include "tensorflow/compiler/xla/sparse_index_array.h"
#include "tensorflow/compiler/xla/status_macros.h"
@ -312,7 +312,7 @@ class LiteralBase {
// Note: It's an antipattern to use this method then immediately call
// MutableLiteralBase::Populate on the result (since that results in zero
// initialization, then reinitialization. Conside if a call to
// MakeUnique<Literal>(shape), followed by the call to
// absl::make_unique<Literal>(shape), followed by the call to
// MutableLiteralBase::Populate can be used instead.
static std::unique_ptr<Literal> CreateFromShape(const Shape& shape);
@ -1154,8 +1154,8 @@ std::unique_ptr<Literal> LiteralBase::Replicate(int64 times) const {
for (int64 bound : shape().dimensions()) {
bounds.push_back(bound);
}
auto literal =
MakeUnique<Literal>(ShapeUtil::MakeShape(shape().element_type(), bounds));
auto literal = absl::make_unique<Literal>(
ShapeUtil::MakeShape(shape().element_type(), bounds));
int64 elements = ShapeUtil::ElementsIn(literal->shape());
if (elements == 0) {
return literal;

View File

@ -17,6 +17,7 @@ limitations under the License.
#include <vector>
#include "absl/memory/memory.h"
#include "tensorflow/compiler/tf2xla/shape_util.h"
#include "tensorflow/compiler/xla/array3d.h"
#include "tensorflow/compiler/xla/array4d.h"
@ -355,15 +356,15 @@ TEST_F(LiteralUtilTest, TokenEquality) {
TEST_F(LiteralUtilTest, DifferentLayoutEquality) {
// Test equality with literals which have different layouts.
auto colmajor =
MakeUnique<Literal>(ShapeUtil::MakeShapeWithLayout(F32, {2, 2}, {0, 1}));
auto colmajor = absl::make_unique<Literal>(
ShapeUtil::MakeShapeWithLayout(F32, {2, 2}, {0, 1}));
colmajor->Set<float>({0, 0}, 1.0);
colmajor->Set<float>({0, 1}, 2.0);
colmajor->Set<float>({1, 0}, 3.0);
colmajor->Set<float>({1, 1}, 4.0);
auto rowmajor =
MakeUnique<Literal>(ShapeUtil::MakeShapeWithLayout(F32, {2, 2}, {1, 0}));
auto rowmajor = absl::make_unique<Literal>(
ShapeUtil::MakeShapeWithLayout(F32, {2, 2}, {1, 0}));
rowmajor->Set<float>({0, 0}, 1.0);
rowmajor->Set<float>({0, 1}, 2.0);
rowmajor->Set<float>({1, 0}, 3.0);
@ -1089,7 +1090,7 @@ TEST_F(LiteralUtilTest, Populate) {
Shape shape = ShapeUtil::MakeShapeWithLayout(
primitive_util::NativeToPrimitiveType<uint32>(), data.dimensions,
data.layout);
auto literal = MakeUnique<Literal>(shape);
auto literal = absl::make_unique<Literal>(shape);
auto generator = [&](ArraySlice<int64> indexes) -> uint32 {
// Offsets from linear index just to avoid R0 literals to be initialized
// with zero.
@ -1131,7 +1132,7 @@ TEST_F(LiteralUtilTest, PopulateParallel) {
Shape shape = ShapeUtil::MakeShapeWithLayout(
primitive_util::NativeToPrimitiveType<uint32>(), data.dimensions,
data.layout);
auto literal = MakeUnique<Literal>(shape);
auto literal = absl::make_unique<Literal>(shape);
auto generator = [&](ArraySlice<int64> indexes) -> uint32 {
// Offsets from linear index just to avoid R0 literals to be initialized
// with zero.

View File

@ -22,6 +22,7 @@ limitations under the License.
#include <numeric>
#include <vector>
#include "absl/memory/memory.h"
#include "tensorflow/compiler/xla/index_util.h"
#include "tensorflow/compiler/xla/shape_util.h"
#include "tensorflow/compiler/xla/status_macros.h"
@ -57,7 +58,7 @@ std::unique_ptr<Literal> ConvertType(LiteralSlice literal) {
primitive_util::NativeToPrimitiveType<ToNativeT>());
}
});
auto result = MakeUnique<Literal>(result_shape);
auto result = absl::make_unique<Literal>(result_shape);
// Then copy over the data from 'literal' converting FromNativeT values to
// ToNativeT values as necessary.
@ -102,7 +103,7 @@ std::unique_ptr<Literal> ConvertType(LiteralSlice literal) {
}
/* static */ std::unique_ptr<Literal> LiteralUtil::CreateToken() {
return MakeUnique<Literal>(ShapeUtil::MakeTokenShape());
return absl::make_unique<Literal>(ShapeUtil::MakeTokenShape());
}
/* static */ Literal LiteralUtil::Zero(PrimitiveType primitive_type) {
@ -279,7 +280,7 @@ std::unique_ptr<Literal> ConvertType(LiteralSlice literal) {
/* static */ std::unique_ptr<Literal> LiteralUtil::CreateR1(
const tensorflow::core::Bitmap& values) {
auto literal = MakeUnique<Literal>(
auto literal = absl::make_unique<Literal>(
ShapeUtil::MakeShape(PRED, {static_cast<int64>(values.bits())}));
literal->PopulateR1(values);
return literal;
@ -287,7 +288,7 @@ std::unique_ptr<Literal> ConvertType(LiteralSlice literal) {
/* static */ std::unique_ptr<Literal> LiteralUtil::CreateR1U8(
tensorflow::StringPiece value) {
auto literal = MakeUnique<Literal>(
auto literal = absl::make_unique<Literal>(
ShapeUtil::MakeShape(U8, {static_cast<int64>(value.size())}));
for (int i = 0; i < value.size(); ++i) {
literal->Set<uint8>({i}, value[i]);
@ -312,7 +313,7 @@ std::unique_ptr<Literal> ConvertType(LiteralSlice literal) {
CHECK_EQ(ShapeUtil::ElementsIn(literal.shape()), new_num_elements);
CHECK_EQ(new_dimensions.size(), minor_to_major.size());
auto new_literal = MakeUnique<Literal>(
auto new_literal = absl::make_unique<Literal>(
ShapeUtil::MakeShape(literal.shape().element_type(), new_dimensions));
// Create a new shape with the given minor-to-major layout. This shape is used
@ -436,7 +437,8 @@ std::unique_ptr<Literal> ConvertType(LiteralSlice literal) {
for (const auto* element : elements) {
element_shapes.push_back(element->shape());
}
auto literal = MakeUnique<Literal>(ShapeUtil::MakeTupleShape(element_shapes));
auto literal =
absl::make_unique<Literal>(ShapeUtil::MakeTupleShape(element_shapes));
for (int i = 0; i < elements.size(); ++i) {
TF_CHECK_OK(literal->CopyFrom(*elements[i], /*dest_shape_index=*/{i}));
}
@ -449,7 +451,8 @@ std::unique_ptr<Literal> ConvertType(LiteralSlice literal) {
for (const auto& element : elements) {
element_shapes.push_back(element.shape());
}
auto literal = MakeUnique<Literal>(ShapeUtil::MakeTupleShape(element_shapes));
auto literal =
absl::make_unique<Literal>(ShapeUtil::MakeTupleShape(element_shapes));
for (int i = 0; i < elements.size(); ++i) {
TF_CHECK_OK(literal->CopyFrom(elements[i], /*dest_shape_index=*/{i}));
}
@ -463,7 +466,8 @@ std::unique_ptr<Literal> ConvertType(LiteralSlice literal) {
for (const auto& element : elements) {
element_shapes.push_back(element->shape());
}
auto literal = MakeUnique<Literal>(ShapeUtil::MakeTupleShape(element_shapes));
auto literal =
absl::make_unique<Literal>(ShapeUtil::MakeTupleShape(element_shapes));
for (int64 i = 0; i < elements.size(); ++i) {
TF_CHECK_OK(
literal->MoveFrom(std::move(*elements[i]), /*dest_shape_index=*/{i}));

View File

@ -27,6 +27,7 @@ limitations under the License.
#include <type_traits>
#include <vector>
#include "absl/memory/memory.h"
#include "tensorflow/compiler/xla/array2d.h"
#include "tensorflow/compiler/xla/array3d.h"
#include "tensorflow/compiler/xla/array4d.h"
@ -34,7 +35,6 @@ limitations under the License.
#include "tensorflow/compiler/xla/layout_util.h"
#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/compiler/xla/primitive_util.h"
#include "tensorflow/compiler/xla/ptr_util.h"
#include "tensorflow/compiler/xla/shape_util.h"
#include "tensorflow/compiler/xla/sparse_index_array.h"
#include "tensorflow/compiler/xla/status_macros.h"
@ -327,7 +327,7 @@ std::ostream& operator<<(std::ostream& out, const Literal& literal);
template <typename NativeT>
/* static */ std::unique_ptr<Literal> LiteralUtil::CreateR0(NativeT value) {
auto literal = MakeUnique<Literal>(ShapeUtil::MakeShape(
auto literal = absl::make_unique<Literal>(ShapeUtil::MakeShape(
primitive_util::NativeToPrimitiveType<NativeT>(), {}));
literal->Set({}, value);
return literal;
@ -336,7 +336,7 @@ template <typename NativeT>
template <typename NativeT>
/* static */ std::unique_ptr<Literal> LiteralUtil::CreateR1(
tensorflow::gtl::ArraySlice<NativeT> values) {
auto literal = MakeUnique<Literal>(
auto literal = absl::make_unique<Literal>(
ShapeUtil::MakeShape(primitive_util::NativeToPrimitiveType<NativeT>(),
{static_cast<int64>(values.size())}));
literal->PopulateR1(values);
@ -347,7 +347,7 @@ template <typename NativeT>
/* static */ std::unique_ptr<Literal> LiteralUtil::CreateR2WithLayout(
std::initializer_list<std::initializer_list<NativeT>> values,
const Layout& layout) {
auto literal = MakeUnique<Literal>(ShapeUtil::MakeShapeWithLayout(
auto literal = absl::make_unique<Literal>(ShapeUtil::MakeShapeWithLayout(
primitive_util::NativeToPrimitiveType<NativeT>(),
{static_cast<int64>(values.size()),
static_cast<int64>(values.begin()->size())},
@ -433,9 +433,10 @@ template <typename NativeT>
int64 rank = dimensions.size();
CHECK_EQ(num_elements, indices.index_count());
CHECK_EQ(rank, indices.rank());
auto literal = MakeUnique<Literal>(ShapeUtil::MakeShapeWithSparseLayout(
primitive_util::NativeToPrimitiveType<NativeT>(), dimensions,
indices.max_indices()));
auto literal =
absl::make_unique<Literal>(ShapeUtil::MakeShapeWithSparseLayout(
primitive_util::NativeToPrimitiveType<NativeT>(), dimensions,
indices.max_indices()));
literal->PopulateSparse(indices, values, sort);
return literal;
}
@ -451,7 +452,7 @@ template <typename NativeT>
template <typename NativeT>
/* static */ std::unique_ptr<Literal> LiteralUtil::CreateFromArrayWithLayout(
const Array<NativeT>& values, const Layout& layout) {
auto literal = MakeUnique<Literal>(ShapeUtil::MakeShapeWithLayout(
auto literal = absl::make_unique<Literal>(ShapeUtil::MakeShapeWithLayout(
primitive_util::NativeToPrimitiveType<NativeT>(), values.dimensions(),
AsInt64Slice(layout.minor_to_major())));
literal->PopulateFromArray(values);
@ -571,8 +572,9 @@ template <typename NativeT>
/* static */ std::unique_ptr<Literal>
LiteralUtil::CreateFullWithDescendingLayout(
tensorflow::gtl::ArraySlice<int64> dimensions, NativeT value) {
auto literal = MakeUnique<Literal>(ShapeUtil::MakeShapeWithDescendingLayout(
primitive_util::NativeToPrimitiveType<NativeT>(), dimensions));
auto literal =
absl::make_unique<Literal>(ShapeUtil::MakeShapeWithDescendingLayout(
primitive_util::NativeToPrimitiveType<NativeT>(), dimensions));
literal->PopulateWithValue(value);
return literal;
}
@ -584,7 +586,7 @@ LiteralUtil::CreateRandomLiteral(
const std::function<T(tensorflow::gtl::ArraySlice<int64>)>& generator) {
using NativeT = typename primitive_util::PrimitiveTypeToNative<type>::type;
TF_RET_CHECK(shape.element_type() == type);
auto literal = MakeUnique<Literal>(shape);
auto literal = absl::make_unique<Literal>(shape);
TF_RETURN_IF_ERROR(literal.get()->Populate<NativeT>(
[&](tensorflow::gtl::ArraySlice<int64> indexes) {
return generator(indexes);

View File

@ -19,9 +19,9 @@ limitations under the License.
#include <string>
#include <utility>
#include "absl/memory/memory.h"
#include "tensorflow/compiler/xla/layout_util.h"
#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/compiler/xla/ptr_util.h"
#include "tensorflow/compiler/xla/shape_util.h"
#include "tensorflow/compiler/xla/status_macros.h"
#include "tensorflow/compiler/xla/types.h"
@ -57,7 +57,7 @@ StatusOr<std::unique_ptr<Literal>> PackedLiteralReader::Read(
PrimitiveType_Name(shape.element_type()).c_str());
}
auto result = MakeUnique<Literal>(literal_shape);
auto result = absl::make_unique<Literal>(literal_shape);
result->PopulateWithValue(std::numeric_limits<float>::quiet_NaN());
int64 elements = ShapeUtil::ElementsIn(shape);

View File

@ -1,35 +0,0 @@
/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_COMPILER_XLA_PTR_UTIL_H_
#define TENSORFLOW_COMPILER_XLA_PTR_UTIL_H_
// As this was moved to tensorflow/core/util, provide indirections here to
// maintain current functionality of the library.
#include <stddef.h>
#include <memory>
#include <type_traits>
#include <utility>
#include "tensorflow/core/util/ptr_util.h"
namespace xla {
using tensorflow::MakeUnique;
using tensorflow::WrapUnique;
} // namespace xla
#endif // TENSORFLOW_COMPILER_XLA_PTR_UTIL_H_

View File

@ -59,6 +59,7 @@ cc_library(
"//tensorflow/compiler/xla/service:shaped_buffer",
"//tensorflow/core:framework_lite",
"//tensorflow/core:lib",
"@com_google_absl//absl/memory",
],
)

View File

@ -14,10 +14,10 @@ limitations under the License.
==============================================================================*/
#include "tensorflow/compiler/xla/python/local_computation_builder.h"
#include "absl/memory/memory.h"
#include "tensorflow/compiler/xla/client/lib/math.h"
#include "tensorflow/compiler/xla/client/xla_builder.h"
#include "tensorflow/compiler/xla/executable_run_options.h"
#include "tensorflow/compiler/xla/ptr_util.h"
#include "tensorflow/compiler/xla/util.h"
#include "tensorflow/core/platform/thread_annotations.h"

View File

@ -18,6 +18,7 @@ limitations under the License.
#include <array>
#include <utility>
#include "absl/memory/memory.h"
#include "tensorflow/compiler/xla/client/xla_builder.h"
#include "tensorflow/compiler/xla/literal_util.h"
#include "tensorflow/compiler/xla/service/cpu/runtime_single_threaded_matmul.h"
@ -43,7 +44,7 @@ std::unique_ptr<Array2D<T>> MatmulArray2DImpl(
int m = lhs.height();
int n = rhs.width();
int k = lhs.width();
auto result = MakeUnique<Array2D<T>>(m, n);
auto result = absl::make_unique<Array2D<T>>(m, n);
// Because Eigen is a header-oriented library, make sure that the Eigen code
// is the same as the code used by the CPU backend (otherwise the linker will
// randomly pick *some* definition).
@ -77,7 +78,8 @@ std::unique_ptr<Array2D<T>> MatmulArray2DImpl(
/* static */ std::unique_ptr<Array2D<double>> ReferenceUtil::Array2DF32ToF64(
const Array2D<float>& input) {
auto result = MakeUnique<Array2D<double>>(input.height(), input.width());
auto result =
absl::make_unique<Array2D<double>>(input.height(), input.width());
for (int64 rowno = 0; rowno < input.height(); ++rowno) {
for (int64 colno = 0; colno < input.height(); ++colno) {
(*result)(rowno, colno) = input(rowno, colno);
@ -126,8 +128,8 @@ ReferenceUtil::ConvArray3DGeneralDimensionsDilated(
a4dlhs, a4drhs, {kernel_stride, 1}, padding, {lhs_dilation, 1},
{rhs_dilation, 1}, dnums2d);
auto convr3 = MakeUnique<Array3D<float>>(convr4->planes(), convr4->depth(),
convr4->height());
auto convr3 = absl::make_unique<Array3D<float>>(
convr4->planes(), convr4->depth(), convr4->height());
convr4->Each(
[&](tensorflow::gtl::ArraySlice<int64> indices, float* value_ptr) {
CHECK_EQ(indices[3], 0);
@ -201,7 +203,7 @@ ReferenceUtil::ReduceWindow1DGeneric(
window_util::StridedBound(padded_width, window[i], stride[i]);
pad_low[i] = padding[i].first;
}
auto result = MakeUnique<std::vector<float>>(window_counts[0]);
auto result = absl::make_unique<std::vector<float>>(window_counts[0]);
// Do a full 1D reduce window.
for (int64 i0 = 0; i0 < window_counts[0]; ++i0) {
@ -247,7 +249,8 @@ ReferenceUtil::ReduceWindow2DGeneric(
window_util::StridedBound(padded_width, window[i], stride[i]);
pad_low[i] = padding[i].first;
}
auto result = MakeUnique<Array2D<float>>(window_counts[0], window_counts[1]);
auto result =
absl::make_unique<Array2D<float>>(window_counts[0], window_counts[1]);
// Do a full 2D reduce window.
for (int64 i0 = 0; i0 < window_counts[0]; ++i0) {
@ -296,8 +299,8 @@ ReferenceUtil::ReduceWindow2DGeneric(
WindowCount(dim_lengths[i], window[i], stride[i], padding);
pad_low[i] = padding_both[i].first;
}
auto result = MakeUnique<Array3D<float>>(window_counts[0], window_counts[1],
window_counts[2]);
auto result = absl::make_unique<Array3D<float>>(
window_counts[0], window_counts[1], window_counts[2]);
for (int64 i0 = 0; i0 < window_counts[0]; ++i0) {
for (int64 i1 = 0; i1 < window_counts[1]; ++i1) {
@ -358,8 +361,8 @@ ReferenceUtil::ReduceWindow4DGeneric(
window_util::StridedBound(padded_width, window[i], stride[i]);
pad_low[i] = padding[i].first;
}
auto result = MakeUnique<Array4D<float>>(window_counts[0], window_counts[1],
window_counts[2], window_counts[3]);
auto result = absl::make_unique<Array4D<float>>(
window_counts[0], window_counts[1], window_counts[2], window_counts[3]);
// Do a full 4D reduce window.
for (int64 i0 = 0; i0 < window_counts[0]; ++i0) {
for (int64 i1 = 0; i1 < window_counts[1]; ++i1) {
@ -426,8 +429,8 @@ ReferenceUtil::SelectAndScatter4DGePlus(
const tensorflow::gtl::ArraySlice<int64>& window,
const tensorflow::gtl::ArraySlice<int64>& stride, bool same_padding) {
Padding padding = same_padding ? Padding::kSame : Padding::kValid;
auto result = MakeUnique<Array4D<float>>(operand.n1(), operand.n2(),
operand.n3(), operand.n4());
auto result = absl::make_unique<Array4D<float>>(operand.n1(), operand.n2(),
operand.n3(), operand.n4());
std::vector<int64> dim_lengths{operand.n1(), operand.n2(), operand.n3(),
operand.n4()};
auto padding_both = xla::MakePadding(dim_lengths, window, stride, padding);
@ -583,10 +586,10 @@ ReferenceUtil::ConvArray4DGeneralDimensionsDilated(
CHECK_EQ(ShapeUtil::Rank(result_literal->shape()), 4);
auto result =
MakeUnique<Array4D<float>>(result_literal->shape().dimensions(0),
result_literal->shape().dimensions(1),
result_literal->shape().dimensions(2),
result_literal->shape().dimensions(3));
absl::make_unique<Array4D<float>>(result_literal->shape().dimensions(0),
result_literal->shape().dimensions(1),
result_literal->shape().dimensions(2),
result_literal->shape().dimensions(3));
result->Each([&](tensorflow::gtl::ArraySlice<int64> indices, float* value) {
*value = result_literal->Get<float>(indices);
@ -601,7 +604,7 @@ ReferenceUtil::ReduceToColArray2D(
const std::function<float(float, float)>& reduce_function) {
int64 rows = matrix.height();
int64 cols = matrix.width();
auto result = MakeUnique<std::vector<float>>();
auto result = absl::make_unique<std::vector<float>>();
for (int64 i = 0; i < rows; ++i) {
float acc = init;
for (int64 j = 0; j < cols; ++j) {
@ -618,7 +621,7 @@ ReferenceUtil::ReduceToRowArray2D(
const std::function<float(float, float)>& reduce_function) {
int64 rows = matrix.height();
int64 cols = matrix.width();
auto result = MakeUnique<std::vector<float>>();
auto result = absl::make_unique<std::vector<float>>();
for (int64 i = 0; i < cols; ++i) {
float acc = init;
for (int64 j = 0; j < rows; ++j) {
@ -674,8 +677,8 @@ ReferenceUtil::ReduceToRowArray2D(
/* static */ std::unique_ptr<Array4D<float>> ReferenceUtil::Broadcast1DTo4D(
const std::vector<float>& array, const std::vector<int64>& bounds,
int64 broadcast_from_dim) {
auto result =
MakeUnique<Array4D<float>>(bounds[0], bounds[1], bounds[2], bounds[3]);
auto result = absl::make_unique<Array4D<float>>(bounds[0], bounds[1],
bounds[2], bounds[3]);
for (int64 i = 0; i < result->n1(); ++i) {
for (int64 j = 0; j < result->n2(); ++j) {
for (int64 k = 0; k < result->n3(); ++k) {
@ -710,7 +713,7 @@ ReferenceUtil::ReduceToRowArray2D(
CHECK_EQ(dims.size(), 1);
int64 rows = dims[0] == 0 ? array.n2() : array.n1();
int64 cols = dims[0] == 2 ? array.n2() : array.n3();
auto result = MakeUnique<Array2D<float>>(rows, cols);
auto result = absl::make_unique<Array2D<float>>(rows, cols);
result->Fill(init);
for (int i0 = 0; i0 < array.n1(); ++i0) {
for (int i1 = 0; i1 < array.n2(); ++i1) {
@ -730,7 +733,7 @@ ReferenceUtil::ReduceToRowArray2D(
const std::function<float(float)>& map_function) {
int64 rows = matrix.height();
int64 cols = matrix.width();
auto result = MakeUnique<Array2D<float>>(rows, cols);
auto result = absl::make_unique<Array2D<float>>(rows, cols);
for (int64 i = 0; i < rows; ++i) {
for (int64 j = 0; j < cols; ++j) {
(*result)(i, j) = map_function(matrix(i, j));
@ -746,7 +749,7 @@ ReferenceUtil::ReduceToRowArray2D(
CHECK_EQ(lhs.width(), rhs.width());
int64 rows = lhs.height();
int64 cols = rhs.width();
auto result = MakeUnique<Array2D<float>>(rows, cols);
auto result = absl::make_unique<Array2D<float>>(rows, cols);
for (int64 i = 0; i < rows; ++i) {
for (int64 j = 0; j < cols; ++j) {
(*result)(i, j) = map_function(lhs(i, j), rhs(i, j));
@ -760,7 +763,7 @@ ReferenceUtil::ReduceToRowArray2D(
const std::function<float(float, int64, int64)>& map_function) {
int64 rows = matrix.height();
int64 cols = matrix.width();
auto result = MakeUnique<Array2D<float>>(rows, cols);
auto result = absl::make_unique<Array2D<float>>(rows, cols);
for (int64 i = 0; i < rows; ++i) {
for (int64 j = 0; j < cols; ++j) {
(*result)(i, j) = map_function(matrix(i, j), i, j);

View File

@ -22,11 +22,11 @@ limitations under the License.
#include <utility>
#include <vector>
#include "absl/memory/memory.h"
#include "tensorflow/compiler/xla/array2d.h"
#include "tensorflow/compiler/xla/array3d.h"
#include "tensorflow/compiler/xla/array4d.h"
#include "tensorflow/compiler/xla/client/padding.h"
#include "tensorflow/compiler/xla/ptr_util.h"
#include "tensorflow/compiler/xla/util.h"
#include "tensorflow/compiler/xla/xla_data.pb.h"
#include "tensorflow/core/lib/gtl/array_slice.h"
@ -42,7 +42,8 @@ class ReferenceUtil {
template <typename T>
static std::unique_ptr<Array2D<T>> TransposeArray2D(
const Array2D<T>& operand) {
auto result = MakeUnique<Array2D<T>>(operand.width(), operand.height());
auto result =
absl::make_unique<Array2D<T>>(operand.width(), operand.height());
for (int64 w = 0; w < operand.width(); ++w) {
for (int64 h = 0; h < operand.height(); ++h) {
(*result)(w, h) = operand(h, w);
@ -242,7 +243,7 @@ class ReferenceUtil {
const Array2D<T>& rhs,
int concatenate_dimension) {
CHECK(0 <= concatenate_dimension && concatenate_dimension < 2);
auto result = MakeUnique<Array2D<T>>(
auto result = absl::make_unique<Array2D<T>>(
concatenate_dimension == 0 ? lhs.n1() + rhs.n1() : lhs.n1(),
concatenate_dimension == 1 ? lhs.n2() + rhs.n2() : lhs.n2());
for (int64 i0 = 0; i0 < result->n1(); ++i0) {
@ -276,7 +277,8 @@ class ReferenceUtil {
out_dims[i] = lhs_dims[i] + rhs_dims[i];
}
}
auto result = MakeUnique<Array3D<T>>(out_dims[0], out_dims[1], out_dims[2]);
auto result =
absl::make_unique<Array3D<T>>(out_dims[0], out_dims[1], out_dims[2]);
for (int64 i0 = 0; i0 < result->n1(); ++i0) {
for (int64 i1 = 0; i1 < result->n2(); ++i1) {
for (int64 i2 = 0; i2 < result->n3(); ++i2) {
@ -310,8 +312,8 @@ class ReferenceUtil {
out_dims[i] = lhs_dims[i] + rhs_dims[i];
}
}
auto result = MakeUnique<Array4D<T>>(out_dims[0], out_dims[1], out_dims[2],
out_dims[3]);
auto result = absl::make_unique<Array4D<T>>(out_dims[0], out_dims[1],
out_dims[2], out_dims[3]);
for (int64 i0 = 0; i0 < result->n1(); ++i0) {
for (int64 i1 = 0; i1 < result->n2(); ++i1) {
for (int64 i2 = 0; i2 < result->n3(); ++i2) {
@ -355,9 +357,9 @@ class ReferenceUtil {
CHECK_LE(limits[1], input.n2());
CHECK_GE(strides[0], 1);
CHECK_GE(strides[1], 1);
auto result =
MakeUnique<Array2D<T>>(CeilOfRatio(limits[0] - starts[0], strides[0]),
CeilOfRatio(limits[1] - starts[1], strides[1]));
auto result = absl::make_unique<Array2D<T>>(
CeilOfRatio(limits[0] - starts[0], strides[0]),
CeilOfRatio(limits[1] - starts[1], strides[1]));
for (int64 i0 = 0; i0 < result->n1(); ++i0) {
for (int64 i1 = 0; i1 < result->n2(); ++i1) {
(*result)(i0, i1) =
@ -381,10 +383,10 @@ class ReferenceUtil {
CHECK_GE(strides[0], 1);
CHECK_GE(strides[1], 1);
CHECK_GE(strides[2], 1);
auto result =
MakeUnique<Array3D<T>>(CeilOfRatio(limits[0] - starts[0], strides[0]),
CeilOfRatio(limits[1] - starts[1], strides[1]),
CeilOfRatio(limits[2] - starts[2], strides[2]));
auto result = absl::make_unique<Array3D<T>>(
CeilOfRatio(limits[0] - starts[0], strides[0]),
CeilOfRatio(limits[1] - starts[1], strides[1]),
CeilOfRatio(limits[2] - starts[2], strides[2]));
for (int64 i0 = 0; i0 < result->n1(); ++i0) {
for (int64 i1 = 0; i1 < result->n2(); ++i1) {
@ -415,11 +417,11 @@ class ReferenceUtil {
CHECK_GE(strides[1], 1);
CHECK_GE(strides[2], 1);
CHECK_GE(strides[3], 1);
auto result =
MakeUnique<Array4D<T>>(CeilOfRatio(limits[0] - starts[0], strides[0]),
CeilOfRatio(limits[1] - starts[1], strides[1]),
CeilOfRatio(limits[2] - starts[2], strides[2]),
CeilOfRatio(limits[3] - starts[3], strides[3]));
auto result = absl::make_unique<Array4D<T>>(
CeilOfRatio(limits[0] - starts[0], strides[0]),
CeilOfRatio(limits[1] - starts[1], strides[1]),
CeilOfRatio(limits[2] - starts[2], strides[2]),
CeilOfRatio(limits[3] - starts[3], strides[3]));
for (int64 i0 = 0; i0 < result->n1(); ++i0) {
for (int64 i1 = 0; i1 < result->n2(); ++i1) {
for (int64 i2 = 0; i2 < result->n3(); ++i2) {
@ -460,8 +462,8 @@ class ReferenceUtil {
template <typename F>
static std::unique_ptr<Array4D<float>> MapWithIndexArray4D(
const Array4D<float>& input, F&& map_function) {
auto result = MakeUnique<Array4D<float>>(input.planes(), input.depth(),
input.height(), input.width());
auto result = absl::make_unique<Array4D<float>>(
input.planes(), input.depth(), input.height(), input.width());
for (int64 plane = 0; plane < input.planes(); ++plane) {
for (int64 depth = 0; depth < input.depth(); ++depth) {
for (int64 height = 0; height < input.height(); ++height) {
@ -495,8 +497,8 @@ class ReferenceUtil {
template <typename F>
static std::unique_ptr<Array4D<float>> MapWithIndexArray4D(
const Array4D<float>& lhs, const Array4D<float>& rhs, F&& map_function) {
auto result = MakeUnique<Array4D<float>>(lhs.planes(), lhs.depth(),
lhs.height(), lhs.width());
auto result = absl::make_unique<Array4D<float>>(lhs.planes(), lhs.depth(),
lhs.height(), lhs.width());
for (int64 plane = 0; plane < lhs.planes(); ++plane) {
for (int64 depth = 0; depth < lhs.depth(); ++depth) {
for (int64 height = 0; height < lhs.height(); ++height) {
@ -530,7 +532,7 @@ class ReferenceUtil {
int64 out1 =
in1 + low_padding1 + high_padding1 + (in1 - 1) * interior_padding1;
auto result = MakeUnique<Array2D<NativeT>>(out0, out1);
auto result = absl::make_unique<Array2D<NativeT>>(out0, out1);
result->Fill(pad);
int64 o0 = low_padding0;
for (int64 i0 = 0; i0 < in0; ++i0) {
@ -669,7 +671,7 @@ class ReferenceUtil {
static std::unique_ptr<Array2D<T1>> ApplyElementwise2D(
F&& f, const Array2D<T1>& array1, const Array2D<Ts>&... arrays) {
AssertSameSize2D(array1, arrays...);
auto result = MakeUnique<Array2D<T1>>(array1.n1(), array1.n2());
auto result = absl::make_unique<Array2D<T1>>(array1.n1(), array1.n2());
for (int64 i = 0; i < array1.n1(); ++i) {
for (int64 j = 0; j < array1.n2(); ++j) {
(*result)(i, j) = f(array1(i, j), arrays(i, j)...);

View File

@ -18,12 +18,12 @@ limitations under the License.
#include <cmath>
#include <memory>
#include "absl/memory/memory.h"
#include "tensorflow/compiler/xla/array2d.h"
#include "tensorflow/compiler/xla/array3d.h"
#include "tensorflow/compiler/xla/array4d.h"
#include "tensorflow/compiler/xla/client/padding.h"
#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/compiler/xla/ptr_util.h"
#include "tensorflow/compiler/xla/test.h"
#include "tensorflow/compiler/xla/tests/literal_test_util.h"
#include "tensorflow/compiler/xla/xla_data.pb.h"
@ -36,7 +36,7 @@ namespace {
class ReferenceUtilTest : public ::testing::Test {
protected:
ReferenceUtilTest() {
matrix_ = MakeUnique<Array2D<float>>(rows_, cols_);
matrix_ = absl::make_unique<Array2D<float>>(rows_, cols_);
// [1.f 2.f 3.f]
// [4.f 5.f 6.f]
for (int64 i = 0; i < rows_; ++i) {
@ -112,8 +112,8 @@ TEST_F(ReferenceUtilTest, MapWithIndexArray2D) {
}
TEST_F(ReferenceUtilTest, MapArray4D) {
auto input = MakeUnique<Array4D<float>>(/*planes=*/2, /*depth=*/3,
/*height=*/4, /*width=*/5);
auto input = absl::make_unique<Array4D<float>>(/*planes=*/2, /*depth=*/3,
/*height=*/4, /*width=*/5);
input->FillWithMultiples(1.0f);
auto multiply_by_two = [](float value) { return 2 * value; };
auto result = ReferenceUtil::MapArray4D(*input, multiply_by_two);
@ -126,8 +126,8 @@ TEST_F(ReferenceUtilTest, MapArray4D) {
}
TEST_F(ReferenceUtilTest, MapWithIndexArray4D) {
auto input = MakeUnique<Array4D<float>>(/*planes=*/2, /*depth=*/3,
/*height=*/4, /*width=*/5);
auto input = absl::make_unique<Array4D<float>>(/*planes=*/2, /*depth=*/3,
/*height=*/4, /*width=*/5);
input->FillWithMultiples(1.0f);
auto subtract_index = [](float value, int64 plane, int64 depth, int64 height,
int64 width) {

View File

@ -239,6 +239,7 @@ cc_library(
"//tensorflow/compiler/xla:xla_data_proto",
"//tensorflow/core:lib",
"@com_google_absl//absl/algorithm:container",
"@com_google_absl//absl/memory",
],
)
@ -265,6 +266,7 @@ tf_cc_test(
"//tensorflow/compiler/xla/tests:xla_internal_test_main", # fixdeps: keep
"//tensorflow/core:lib",
"//tensorflow/core:test",
"@com_google_absl//absl/memory",
],
)
@ -314,6 +316,7 @@ cc_library(
"//tensorflow/core:lib",
"//tensorflow/core:lib_internal",
"@com_google_absl//absl/algorithm:container",
"@com_google_absl//absl/memory",
],
)
@ -452,6 +455,7 @@ cc_library(
"//tensorflow/compiler/xla:status_macros",
"//tensorflow/compiler/xla:util",
"//tensorflow/core:lib",
"@com_google_absl//absl/memory",
],
)
@ -520,6 +524,7 @@ tf_cc_test(
"//tensorflow/compiler/xla/tests:xla_internal_test_main",
"//tensorflow/core:lib",
"//tensorflow/core:test",
"@com_google_absl//absl/memory",
],
)
@ -577,6 +582,7 @@ cc_library(
"//tensorflow/core:lib",
"//tensorflow/core:stream_executor_no_cuda",
"//third_party/eigen3",
"@com_google_absl//absl/memory",
],
)
@ -618,6 +624,7 @@ cc_library(
"//tensorflow/core:lib",
"//tensorflow/core:ptr_util",
"//tensorflow/core:stream_executor_no_cuda",
"@com_google_absl//absl/memory",
],
alwayslink = 1,
)
@ -650,6 +657,7 @@ cc_library(
"//tensorflow/compiler/xla/client:xla_computation",
"//tensorflow/core:lib",
"//tensorflow/core:stream_executor_no_cuda",
"@com_google_absl//absl/memory",
],
)
@ -722,6 +730,7 @@ cc_library(
"//tensorflow/compiler/xla:xla_data_proto",
"//tensorflow/core:lib",
"//tensorflow/core:stream_executor_no_cuda",
"@com_google_absl//absl/memory",
],
)
@ -739,6 +748,7 @@ tf_cc_test(
"//tensorflow/compiler/xla/tests:xla_internal_test_main",
"//tensorflow/core:ptr_util",
"//tensorflow/core:test",
"@com_google_absl//absl/memory",
],
)
@ -769,6 +779,7 @@ cc_library(
"//tensorflow/core:lib_internal",
"//tensorflow/core:stream_executor_no_cuda",
"//tensorflow/stream_executor",
"@com_google_absl//absl/memory",
],
)
@ -816,6 +827,7 @@ cc_library(
"//tensorflow/compiler/xla:xla_data_proto",
"//tensorflow/core:lib",
"//tensorflow/core:stream_executor_no_cuda",
"@com_google_absl//absl/memory",
],
)
@ -834,6 +846,7 @@ cc_library(
"//tensorflow/compiler/xla:util",
"//tensorflow/compiler/xla:xla_data_proto",
"//tensorflow/core:lib",
"@com_google_absl//absl/memory",
],
)
@ -850,6 +863,7 @@ cc_library(
"//tensorflow/compiler/xla:xla_data_proto",
"//tensorflow/core:lib",
"//tensorflow/core:stream_executor_no_cuda",
"@com_google_absl//absl/memory",
],
)
@ -867,6 +881,7 @@ cc_library(
"//tensorflow/compiler/xla:xla_data_proto",
"//tensorflow/core:lib",
"//tensorflow/core:lib_internal",
"@com_google_absl//absl/memory",
],
)
@ -926,6 +941,7 @@ tf_cc_test(
"//tensorflow/compiler/xla:xla_data_proto",
"//tensorflow/compiler/xla/tests:hlo_test_base",
"//tensorflow/compiler/xla/tests:xla_internal_test_main",
"@com_google_absl//absl/memory",
],
)
@ -953,6 +969,7 @@ cc_library(
"//tensorflow/compiler/xla:util",
"//tensorflow/core:lib",
"//tensorflow/core:lib_internal",
"@com_google_absl//absl/memory",
],
)
@ -980,6 +997,7 @@ tf_cc_test(
"//tensorflow/compiler/xla/tests:hlo_test_base",
"//tensorflow/compiler/xla/tests:xla_internal_test_main",
"//tensorflow/core:lib",
"@com_google_absl//absl/memory",
],
)
@ -1034,6 +1052,7 @@ cc_library(
"//tensorflow/compiler/xla:statusor",
"//tensorflow/compiler/xla:util",
"//tensorflow/core:lib",
"@com_google_absl//absl/memory",
],
)
@ -1052,6 +1071,7 @@ tf_cc_test(
"//tensorflow/compiler/xla/tests:hlo_test_base",
"//tensorflow/compiler/xla/tests:xla_internal_test_main",
"//tensorflow/core:lib",
"@com_google_absl//absl/memory",
],
)
@ -1068,6 +1088,7 @@ cc_library(
"//tensorflow/compiler/xla:statusor",
"//tensorflow/compiler/xla:util",
"//tensorflow/core:lib",
"@com_google_absl//absl/memory",
],
)
@ -1085,6 +1106,7 @@ cc_library(
"//tensorflow/compiler/xla:types",
"//tensorflow/compiler/xla:util",
"//tensorflow/core:lib",
"@com_google_absl//absl/memory",
],
)
@ -1186,6 +1208,7 @@ cc_library(
"//tensorflow/compiler/xla:statusor",
"//tensorflow/compiler/xla:util",
"@com_google_absl//absl/algorithm:container",
"@com_google_absl//absl/memory",
],
)
@ -1203,6 +1226,7 @@ tf_cc_test(
"//tensorflow/compiler/xla/tests:hlo_test_base",
"//tensorflow/compiler/xla/tests:xla_internal_test_main",
"//tensorflow/core:test",
"@com_google_absl//absl/memory",
],
)
@ -1274,6 +1298,7 @@ tf_cc_test(
"//tensorflow/compiler/xla/tests:hlo_test_base",
"//tensorflow/compiler/xla/tests:xla_internal_test_main",
"//tensorflow/core:lib",
"@com_google_absl//absl/memory",
],
)
@ -1297,6 +1322,7 @@ cc_library(
"//tensorflow/compiler/xla:xla_data_proto",
"//tensorflow/core:lib",
"@com_google_absl//absl/algorithm:container",
"@com_google_absl//absl/memory",
],
)
@ -1320,6 +1346,7 @@ tf_cc_test(
"//tensorflow/compiler/xla/tests:xla_internal_test_main", # fixdeps: keep
"//tensorflow/core:lib",
"//tensorflow/core:test",
"@com_google_absl//absl/memory",
],
)
@ -1421,6 +1448,7 @@ cc_library(
"//tensorflow/compiler/xla:util",
"//tensorflow/compiler/xla:xla_data_proto",
"//tensorflow/core:lib",
"@com_google_absl//absl/memory",
],
)
@ -1610,6 +1638,7 @@ tf_cc_test(
"//tensorflow/compiler/xla/tests:hlo_verified_test_base",
"//tensorflow/compiler/xla/tests:xla_internal_test_main",
"//tensorflow/core:lib",
"@com_google_absl//absl/memory",
],
)
@ -1643,6 +1672,7 @@ tf_cc_test(
"//tensorflow/compiler/xla/tests:hlo_test_base",
"//tensorflow/compiler/xla/tests:literal_test_util",
"//tensorflow/compiler/xla/tests:xla_internal_test_main",
"@com_google_absl//absl/memory",
],
)
@ -1662,6 +1692,7 @@ cc_library(
"//tensorflow/compiler/xla:xla_data_proto",
"//tensorflow/core:lib",
"//tensorflow/core:stream_executor_no_cuda",
"@com_google_absl//absl/memory",
],
alwayslink = True, # Contains per-platform computation placer registration
)
@ -1753,6 +1784,7 @@ cc_library(
"//tensorflow/core:lib",
"//tensorflow/core:stream_executor_no_cuda",
"@com_google_absl//absl/algorithm:container",
"@com_google_absl//absl/memory",
],
)
@ -1798,6 +1830,7 @@ tf_cc_binary(
"//tensorflow/compiler/xla:util",
"//tensorflow/compiler/xla:xla_data_proto",
"//tensorflow/core:lib",
"@com_google_absl//absl/memory",
],
)
@ -1814,6 +1847,7 @@ tf_cc_test(
"//tensorflow/compiler/xla/tests:hlo_test_base",
"//tensorflow/compiler/xla/tests:xla_internal_test_main",
"//tensorflow/core:lib",
"@com_google_absl//absl/memory",
],
)
@ -1873,6 +1907,7 @@ cc_library(
"//tensorflow/compiler/xla:util",
"//tensorflow/compiler/xla:xla_data_proto",
"//tensorflow/core:lib",
"@com_google_absl//absl/memory",
],
)
@ -1891,6 +1926,7 @@ cc_library(
"//tensorflow/compiler/xla:util",
"//tensorflow/compiler/xla:xla_data_proto",
"//tensorflow/core:lib",
"@com_google_absl//absl/memory",
],
)
@ -1932,6 +1968,7 @@ cc_library(
"//tensorflow/compiler/xla:types",
"//tensorflow/compiler/xla:util",
"//tensorflow/core:lib",
"@com_google_absl//absl/memory",
],
)
@ -2025,6 +2062,7 @@ cc_library(
"//tensorflow/compiler/xla:statusor",
"//tensorflow/core:lib",
"//tensorflow/core:lib_internal",
"@com_google_absl//absl/memory",
],
)
@ -2037,7 +2075,6 @@ cc_library(
":hlo_dataflow_analysis",
":logical_buffer",
":logical_buffer_analysis",
"//tensorflow/compiler/xla:literal_util",
"//tensorflow/compiler/xla:shape_tree",
"//tensorflow/compiler/xla:shape_util",
"//tensorflow/compiler/xla:statusor",
@ -2045,6 +2082,7 @@ cc_library(
"//tensorflow/compiler/xla:util",
"//tensorflow/compiler/xla:xla_data_proto",
"//tensorflow/core:lib",
"@com_google_absl//absl/memory",
],
)
@ -2095,6 +2133,7 @@ cc_library(
"//tensorflow/compiler/xla:util",
"//tensorflow/compiler/xla:xla_data_proto",
"//tensorflow/core:lib",
"@com_google_absl//absl/memory",
],
)
@ -2185,6 +2224,7 @@ cc_library(
":shape_inference",
"//tensorflow/compiler/xla:status_macros",
"//tensorflow/core:lib",
"@com_google_absl//absl/memory",
],
)
@ -2267,6 +2307,7 @@ tf_cc_test(
"//tensorflow/compiler/xla/tests:xla_internal_test_main",
"//tensorflow/core:lib",
"//tensorflow/core:test",
"@com_google_absl//absl/memory",
],
)
@ -2348,6 +2389,7 @@ cc_library(
"//tensorflow/compiler/xla:types",
"//tensorflow/compiler/xla:util",
"//tensorflow/core:lib",
"@com_google_absl//absl/memory",
],
)
@ -2385,6 +2427,7 @@ tf_cc_test(
"//tensorflow/compiler/xla/tests:literal_test_util",
"//tensorflow/compiler/xla/tests:test_utils",
"//tensorflow/core:lib",
"@com_google_absl//absl/memory",
],
)
@ -2401,6 +2444,7 @@ cc_library(
"//tensorflow/compiler/xla:shape_util",
"//tensorflow/compiler/xla:types",
"//tensorflow/core:lib",
"@com_google_absl//absl/memory",
],
)
@ -2432,6 +2476,7 @@ cc_library(
"//tensorflow/compiler/xla:types",
"//tensorflow/compiler/xla:util",
"//tensorflow/core:lib",
"@com_google_absl//absl/memory",
],
)
@ -2446,6 +2491,7 @@ cc_library(
"//tensorflow/compiler/xla:shape_tree",
"//tensorflow/compiler/xla:shape_util",
"//tensorflow/core:lib",
"@com_google_absl//absl/memory",
],
)
@ -2506,6 +2552,7 @@ tf_cc_test(
"//tensorflow/compiler/xla/tests:hlo_verified_test_base",
"//tensorflow/compiler/xla/tests:xla_internal_test_main",
"//tensorflow/core:test",
"@com_google_absl//absl/memory",
],
)
@ -2606,10 +2653,10 @@ cc_library(
":computation_layout",
"//tensorflow/compiler/xla:shape_layout",
"//tensorflow/compiler/xla:types",
"//tensorflow/compiler/xla:util",
"//tensorflow/compiler/xla:xla_data_proto",
"//tensorflow/compiler/xla:xla_proto",
"//tensorflow/core:lib",
"@com_google_absl//absl/memory",
],
)
@ -2790,9 +2837,9 @@ cc_library(
hdrs = ["stream_pool.h"],
deps = [
"//tensorflow/compiler/xla:types",
"//tensorflow/compiler/xla:util",
"//tensorflow/core:lib",
"//tensorflow/core:stream_executor_no_cuda",
"@com_google_absl//absl/memory",
],
)
@ -2890,6 +2937,7 @@ cc_library(
"//tensorflow/core:lib",
"//tensorflow/core:stream_executor_no_cuda",
"//third_party/eigen3",
"@com_google_absl//absl/memory",
],
)
@ -3085,6 +3133,7 @@ cc_library(
"//tensorflow/core:lib",
"//tensorflow/core:lib_internal",
"@com_google_absl//absl/algorithm:container",
"@com_google_absl//absl/memory",
],
)

View File

@ -23,6 +23,7 @@ limitations under the License.
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/memory/memory.h"
#include "tensorflow/compiler/xla/layout_util.h"
#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/compiler/xla/literal_util.h"
@ -541,7 +542,7 @@ Status AlgebraicSimplifierVisitor::HandleConstant(HloInstruction* constant) {
// If a literal is all the same element replace it with a scalar broadcast.
if (ShapeUtil::ElementsIn(constant->shape()) > 1 &&
constant->literal().IsAllFirst()) {
std::unique_ptr<Literal> unique_scalar = MakeUnique<Literal>(
std::unique_ptr<Literal> unique_scalar = absl::make_unique<Literal>(
LiteralUtil::GetFirstScalarLiteral(constant->literal()));
HloInstruction* scalar = computation_->AddInstruction(
HloInstruction::CreateConstant(std::move(unique_scalar)));

View File

@ -18,9 +18,9 @@ limitations under the License.
#include <memory>
#include <utility>
#include "absl/memory/memory.h"
#include "tensorflow/compiler/xla/layout_util.h"
#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/compiler/xla/ptr_util.h"
#include "tensorflow/compiler/xla/service/hlo_computation.h"
#include "tensorflow/compiler/xla/service/hlo_instruction.h"
#include "tensorflow/compiler/xla/service/hlo_matchers.h"

View File

@ -17,8 +17,8 @@ limitations under the License.
#include <utility>
#include "absl/memory/memory.h"
#include "tensorflow/compiler/xla/map_util.h"
#include "tensorflow/compiler/xla/ptr_util.h"
#include "tensorflow/compiler/xla/service/device_memory_allocator.h"
#include "tensorflow/compiler/xla/service/transfer_manager.h"
#include "tensorflow/compiler/xla/shape_util.h"
@ -91,8 +91,9 @@ StatusOr<GlobalDataHandle> AllocationTracker::RegisterInternal(
// If ShapedBufferTy is ScopedShapedBuffer, release the ScopedShapedBuffer
// into a regular ShapedBuffer, which is stored in
// handle_to_shaped_buffers_.
handle_to_shaped_buffers_[handle].emplace_back(MakeUnique<ShapedBuffer>(
ReleaseIfScopedShapedBuffer(std::move(shaped_buffer))));
handle_to_shaped_buffers_[handle].emplace_back(
absl::make_unique<ShapedBuffer>(
ReleaseIfScopedShapedBuffer(std::move(shaped_buffer))));
}
GlobalDataHandle result;

View File

@ -21,6 +21,7 @@ limitations under the License.
#include <string>
#include <utility>
#include "absl/memory/memory.h"
#include "third_party/eigen3/unsupported/Eigen/CXX11/Tensor"
#include "tensorflow/compiler/xla/service/compiler.h"
#include "tensorflow/compiler/xla/service/platform_util.h"
@ -127,8 +128,8 @@ Backend::Backend(
}
}
// Create a memory allocator for the valid stream executors.
memory_allocator_ =
MakeUnique<StreamExecutorMemoryAllocator>(platform, stream_executors);
memory_allocator_ = absl::make_unique<StreamExecutorMemoryAllocator>(
platform, stream_executors);
CHECK(!stream_executors_.empty())
<< "Service found no devices for backend " << platform_->Name() << '.';

View File

@ -18,9 +18,9 @@ limitations under the License.
#include <memory>
#include <utility>
#include "absl/memory/memory.h"
#include "tensorflow/compiler/xla/layout_util.h"
#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/compiler/xla/ptr_util.h"
#include "tensorflow/compiler/xla/service/hlo_computation.h"
#include "tensorflow/compiler/xla/service/hlo_instruction.h"
#include "tensorflow/compiler/xla/service/hlo_matchers.h"

View File

@ -22,8 +22,8 @@ limitations under the License.
#include <ostream>
#include <utility>
#include "absl/memory/memory.h"
#include "tensorflow/compiler/xla/map_util.h"
#include "tensorflow/compiler/xla/ptr_util.h"
#include "tensorflow/compiler/xla/service/buffer_value_containers.h"
#include "tensorflow/compiler/xla/service/heap_simulator.h"
#include "tensorflow/compiler/xla/service/hlo.pb.h"
@ -1100,8 +1100,8 @@ Status BufferAssigner::AssignBuffersWithSequentialOrdering(
options.buffers_to_assign = &buffer_value_set;
TF_ASSIGN_OR_RETURN(
const HeapSimulator::Result result,
HeapSimulator::Run(MakeUnique<DecreasingSizeRunsHeap>(
MakeUnique<LazyBestFitHeap>(alignment)),
HeapSimulator::Run(absl::make_unique<DecreasingSizeRunsHeap>(
absl::make_unique<LazyBestFitHeap>(alignment)),
assignment->module(), module_sequence,
assignment->points_to_analysis(),
assignment->buffer_size_, options));
@ -1130,11 +1130,12 @@ Status BufferAssigner::AssignBuffersWithSequentialOrdering(
options.buffers_to_assign = &buffer_value_set;
TF_ASSIGN_OR_RETURN(
const HeapSimulator::Result result,
HeapSimulator::Run(MakeUnique<DecreasingSizeRunsHeap>(
MakeUnique<LazyBestFitHeap>(alignment)),
*computation, *instruction_sequence,
assignment->points_to_analysis(),
assignment->buffer_size_, options));
HeapSimulator::Run(
absl::make_unique<DecreasingSizeRunsHeap>(
absl::make_unique<LazyBestFitHeap>(alignment)),
*computation, *instruction_sequence,
assignment->points_to_analysis(), assignment->buffer_size_,
options));
AssignBuffersFromHeapSimulator(result, assignment,
single_colored_set.first);
}
@ -1646,7 +1647,8 @@ StatusOr<std::unique_ptr<BufferAssignment>> BufferAssigner::CreateAssignment(
XLA_VLOG_LINES(3, liveness->ToString());
XLA_VLOG_LINES(3, liveness->points_to_analysis().ToString());
// Can't use MakeUnique because BufferAssignment constructor is private.
// Can't use absl::make_unique because BufferAssignment constructor is
// private.
std::unique_ptr<BufferAssignment> assignment(
new BufferAssignment(module, std::move(liveness), std::move(buffer_size),
std::move(color_alignment)));

View File

@ -21,8 +21,8 @@ limitations under the License.
#include <utility>
#include <vector>
#include "absl/memory/memory.h"
#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/compiler/xla/ptr_util.h"
#include "tensorflow/compiler/xla/service/buffer_value.h"
#include "tensorflow/compiler/xla/service/call_graph.h"
#include "tensorflow/compiler/xla/service/copy_insertion.h"
@ -87,7 +87,7 @@ class BufferAssignmentTest : public HloTestBase {
std::unique_ptr<BufferAssignment> RunBufferAssignment(HloModule* module,
int64 alignment = 1) {
return BufferAssigner::Run(
module, xla::MakeUnique<DependencyHloOrdering>(module),
module, absl::make_unique<DependencyHloOrdering>(module),
backend().compiler()->BufferSizeBytesFunction(),
[alignment](LogicalBuffer::Color) { return alignment; },
/*allow_input_output_aliasing=*/false,
@ -98,7 +98,7 @@ class BufferAssignmentTest : public HloTestBase {
std::unique_ptr<BufferAssignment> RunBufferAssignmentNoBuffersForConstants(
HloModule* module, int64 alignment = 1) {
return BufferAssigner::Run(
module, xla::MakeUnique<DependencyHloOrdering>(module),
module, absl::make_unique<DependencyHloOrdering>(module),
backend().compiler()->BufferSizeBytesFunction(),
[alignment](LogicalBuffer::Color) { return alignment; },
/*allow_input_output_aliasing=*/false,
@ -109,7 +109,7 @@ class BufferAssignmentTest : public HloTestBase {
std::unique_ptr<BufferAssignment> RunColoredBufferAssignment(
HloModule* module, BufferLiveness::Colorer colorer, int64 alignment = 1) {
return BufferAssigner::Run(
module, xla::MakeUnique<DependencyHloOrdering>(module),
module, absl::make_unique<DependencyHloOrdering>(module),
backend().compiler()->BufferSizeBytesFunction(),
[alignment](LogicalBuffer::Color) { return alignment; },
/*allow_input_output_aliasing=*/false,
@ -127,7 +127,8 @@ class BufferAssignmentTest : public HloTestBase {
instruction_sequence.end());
return BufferAssigner::Run(
module,
xla::MakeUnique<SequentialHloOrdering>(module, module_sequence),
absl::make_unique<SequentialHloOrdering>(module,
module_sequence),
backend().compiler()->BufferSizeBytesFunction(),
[alignment](LogicalBuffer::Color) { return alignment; },
/*allow_input_output_aliasing=*/false,
@ -1769,7 +1770,8 @@ class WhileBufferAssignmentTest : public HloTestBase {
auto sequence =
ScheduleComputationsInModule(*module, ByteSizeOf).ConsumeValueOrDie();
return BufferAssigner::Run(
module, xla::MakeUnique<SequentialHloOrdering>(module, sequence),
module,
absl::make_unique<SequentialHloOrdering>(module, sequence),
ByteSizeOf,
[alignment](LogicalBuffer::Color) { return alignment; },
/*allow_input_output_aliasing=*/false,
@ -2083,7 +2085,7 @@ TEST_F(WhileBufferAssignmentTest, ColocatedBuffers) {
auto assignment,
BufferAssigner::Run(
module.get(),
xla::MakeUnique<SequentialHloOrdering>(module.get(), sequence),
absl::make_unique<SequentialHloOrdering>(module.get(), sequence),
backend().compiler()->BufferSizeBytesFunction(),
[](LogicalBuffer::Color) { return 1; },
/*allow_input_output_aliasing=*/false,
@ -2340,7 +2342,7 @@ TEST_F(WhileBufferAssignmentTest, WhileLoopsInterferingResultRange) {
auto assignment =
BufferAssigner::Run(
module.get(),
xla::MakeUnique<SequentialHloOrdering>(module.get(), sequence),
absl::make_unique<SequentialHloOrdering>(module.get(), sequence),
ByteSizeOf, [](LogicalBuffer::Color) { return 1; },
/*allow_input_output_aliasing=*/false,
/*allocate_buffers_for_constants=*/true)

View File

@ -18,7 +18,7 @@ limitations under the License.
#include <memory>
#include <string>
#include "tensorflow/compiler/xla/ptr_util.h"
#include "absl/memory/memory.h"
#include "tensorflow/compiler/xla/service/hlo_computation.h"
#include "tensorflow/compiler/xla/service/hlo_instruction.h"
#include "tensorflow/compiler/xla/service/hlo_opcode.h"
@ -119,8 +119,8 @@ TEST_F(BufferLivenessTest, ElementwiseChain) {
module->AddEntryComputation(builder.Build());
auto liveness =
BufferLiveness::Run(module.get(),
xla::MakeUnique<DependencyHloOrdering>(module.get()))
BufferLiveness::Run(
module.get(), absl::make_unique<DependencyHloOrdering>(module.get()))
.ConsumeValueOrDie();
EXPECT_FALSE(InstructionsMayInterfere(*liveness, param, negate));
@ -167,10 +167,10 @@ TEST_F(BufferLivenessTest, MultipleEntryParameters_Sequential) {
SequentialHloOrdering::HloModuleSequence sequence;
sequence.insert({entry, {param0, negate, param1, exp, add}});
auto liveness =
BufferLiveness::Run(module.get(), xla::MakeUnique<SequentialHloOrdering>(
module.get(), sequence))
.ConsumeValueOrDie();
auto liveness = BufferLiveness::Run(module.get(),
absl::make_unique<SequentialHloOrdering>(
module.get(), sequence))
.ConsumeValueOrDie();
// Entry parameters interfere as if they are defined simultaneously at
// the very beginning.
@ -215,8 +215,8 @@ TEST_F(BufferLivenessTest, NonElementwiseOperand) {
module->AddEntryComputation(builder.Build());
auto liveness =
BufferLiveness::Run(module.get(),
xla::MakeUnique<DependencyHloOrdering>(module.get()))
BufferLiveness::Run(
module.get(), absl::make_unique<DependencyHloOrdering>(module.get()))
.ConsumeValueOrDie();
EXPECT_FALSE(InstructionsMayInterfere(*liveness, param, exp));
@ -249,8 +249,8 @@ TEST_F(BufferLivenessTest, OverlappedBuffers) {
module->AddEntryComputation(builder.Build());
auto liveness =
BufferLiveness::Run(module.get(),
xla::MakeUnique<DependencyHloOrdering>(module.get()))
BufferLiveness::Run(
module.get(), absl::make_unique<DependencyHloOrdering>(module.get()))
.ConsumeValueOrDie();
EXPECT_TRUE(InstructionsMayInterfere(*liveness, param, negate));
@ -293,10 +293,10 @@ TEST_F(BufferLivenessTest, OverlappedBuffersSequentialOrder) {
SequentialHloOrdering::HloModuleSequence module_sequence;
std::vector<const HloInstruction*> order = {param, negate, exp, add};
module_sequence.emplace(computation, order);
auto liveness =
BufferLiveness::Run(module.get(), xla::MakeUnique<SequentialHloOrdering>(
module.get(), module_sequence))
.ConsumeValueOrDie();
auto liveness = BufferLiveness::Run(module.get(),
absl::make_unique<SequentialHloOrdering>(
module.get(), module_sequence))
.ConsumeValueOrDie();
EXPECT_TRUE(InstructionsMayInterfere(*liveness, param, negate));
EXPECT_FALSE(InstructionsMayInterfere(*liveness, param, exp));
@ -342,10 +342,10 @@ TEST_F(BufferLivenessTest, RootInstructionIsNotLastInSequentialOrder) {
std::vector<const HloInstruction*> order = {param, add, recv,
recv_done, send, send_done};
module_sequence.emplace(computation, order);
auto liveness =
BufferLiveness::Run(module.get(), xla::MakeUnique<SequentialHloOrdering>(
module.get(), module_sequence))
.ConsumeValueOrDie();
auto liveness = BufferLiveness::Run(module.get(),
absl::make_unique<SequentialHloOrdering>(
module.get(), module_sequence))
.ConsumeValueOrDie();
EXPECT_FALSE(InstructionsMayInterfere(*liveness, param, add));
// Check the root instruction (add) buffer interferes with the recv buffer.
@ -376,8 +376,8 @@ TEST_F(BufferLivenessTest, TupleLiveOut) {
module->AddEntryComputation(builder.Build());
auto liveness =
BufferLiveness::Run(module.get(),
xla::MakeUnique<DependencyHloOrdering>(module.get()))
BufferLiveness::Run(
module.get(), absl::make_unique<DependencyHloOrdering>(module.get()))
.ConsumeValueOrDie();
// All buffers should be live out except the param
@ -412,8 +412,8 @@ TEST_F(BufferLivenessTest, EmbeddedComputation) {
module->AddEntryComputation(builder.Build());
auto liveness =
BufferLiveness::Run(module.get(),
xla::MakeUnique<DependencyHloOrdering>(module.get()))
BufferLiveness::Run(
module.get(), absl::make_unique<DependencyHloOrdering>(module.get()))
.ConsumeValueOrDie();
// Buffers in different computations should always interfere.
@ -453,8 +453,8 @@ TEST_F(BufferLivenessTest, TupleConstantLiveOut) {
module->AddEntryComputation(builder.Build());
auto liveness =
BufferLiveness::Run(module.get(),
xla::MakeUnique<DependencyHloOrdering>(module.get()))
BufferLiveness::Run(
module.get(), absl::make_unique<DependencyHloOrdering>(module.get()))
.ConsumeValueOrDie();
// Only the element buffers of the tuple constant which are pointed to by
@ -518,8 +518,8 @@ TEST_F(BufferLivenessTest, IndependentTupleElements) {
module->AddEmbeddedComputation(builder.Build());
auto liveness =
BufferLiveness::Run(module.get(),
xla::MakeUnique<DependencyHloOrdering>(module.get()))
BufferLiveness::Run(
module.get(), absl::make_unique<DependencyHloOrdering>(module.get()))
.ConsumeValueOrDie();
// We compare tuple element pairs that are input/output to the computation:
@ -580,8 +580,8 @@ TEST_F(BufferLivenessTest, DependentTupleElements) {
module->AddEmbeddedComputation(builder.Build());
auto liveness =
BufferLiveness::Run(module.get(),
xla::MakeUnique<DependencyHloOrdering>(module.get()))
BufferLiveness::Run(
module.get(), absl::make_unique<DependencyHloOrdering>(module.get()))
.ConsumeValueOrDie();
// We compare tuple element pairs that are input/output to the computation:
@ -668,10 +668,10 @@ class FusedDynamicUpdateSliceLivenessTest : public BufferLivenessTest {
}
// Run BufferLiveness on 'module'.
auto liveness =
BufferLiveness::Run(
module.get(), xla::MakeUnique<DependencyHloOrdering>(module.get()))
.ConsumeValueOrDie();
auto liveness = BufferLiveness::Run(
module.get(),
absl::make_unique<DependencyHloOrdering>(module.get()))
.ConsumeValueOrDie();
// Return whether or not buffers interference is detected between
// 'tuple_param0' and 'tuple_root' at shape index '{1}'.
return TupleElementsMayInterfere(*liveness, tuple_param0, tuple_root, {1});
@ -780,10 +780,10 @@ class DynamicUpdateSliceLivenessTest : public BufferLivenessTest {
module->AddEntryComputation(BuildDummyComputation());
module->AddEmbeddedComputation(builder.Build());
// Run BufferLiveness on 'module'.
auto liveness =
BufferLiveness::Run(
module.get(), xla::MakeUnique<DependencyHloOrdering>(module.get()))
.ConsumeValueOrDie();
auto liveness = BufferLiveness::Run(
module.get(),
absl::make_unique<DependencyHloOrdering>(module.get()))
.ConsumeValueOrDie();
// Return whether or not buffers interference is detected between
// 'tuple_param0' and 'tuple_root' at shape index '{1}'.
return TupleElementsMayInterfere(*liveness, tuple_param0, tuple_root, {1});

View File

@ -17,8 +17,8 @@ limitations under the License.
#include <queue>
#include "absl/memory/memory.h"
#include "tensorflow/compiler/xla/map_util.h"
#include "tensorflow/compiler/xla/ptr_util.h"
#include "tensorflow/compiler/xla/status_macros.h"
#include "tensorflow/compiler/xla/util.h"
#include "tensorflow/core/lib/core/errors.h"
@ -237,8 +237,8 @@ void CallGraph::SetCallContexts() {
/* static */
std::unique_ptr<CallGraph> CallGraph::Build(const HloModule* module) {
// Constructor for CallGraph is private so MakeUnique can't be used.
auto call_graph = WrapUnique<CallGraph>(new CallGraph(module));
// Constructor for CallGraph is private so absl::make_unique can't be used.
auto call_graph = absl::WrapUnique<CallGraph>(new CallGraph(module));
VLOG(2) << "Building call graph for:";
XLA_VLOG_LINES(2, module->ToString());

View File

@ -18,9 +18,9 @@ limitations under the License.
#include <memory>
#include <utility>
#include "absl/memory/memory.h"
#include "tensorflow/compiler/xla/layout_util.h"
#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/compiler/xla/ptr_util.h"
#include "tensorflow/compiler/xla/service/hlo_computation.h"
#include "tensorflow/compiler/xla/service/hlo_instruction.h"
#include "tensorflow/compiler/xla/service/hlo_matchers.h"

View File

@ -15,7 +15,7 @@ limitations under the License.
#include "tensorflow/compiler/xla/service/channel_tracker.h"
#include "tensorflow/compiler/xla/ptr_util.h"
#include "absl/memory/memory.h"
#include "tensorflow/compiler/xla/service/hlo_computation.h"
#include "tensorflow/compiler/xla/service/hlo_instruction.h"
#include "tensorflow/compiler/xla/status.h"

View File

@ -19,8 +19,8 @@ limitations under the License.
#include <utility>
#include <vector>
#include "absl/memory/memory.h"
#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/compiler/xla/ptr_util.h"
#include "tensorflow/compiler/xla/shape_util.h"
#include "tensorflow/compiler/xla/status.h"
#include "tensorflow/compiler/xla/status_macros.h"
@ -60,8 +60,8 @@ DeviceAssignment::Deserialize(const DeviceAssignmentProto& proto) {
"computation_count=%d",
proto.replica_count(), proto.computation_count());
}
auto assignment = MakeUnique<DeviceAssignment>(proto.replica_count(),
proto.computation_count());
auto assignment = absl::make_unique<DeviceAssignment>(
proto.replica_count(), proto.computation_count());
for (int computation = 0; computation < proto.computation_count();
++computation) {
const auto& computation_device = proto.computation_devices(computation);
@ -156,7 +156,7 @@ ComputationPlacer::GetPlatformComputationPlacers() {
} // namespace xla
static std::unique_ptr<xla::ComputationPlacer> CreateComputationPlacer() {
return xla::MakeUnique<xla::ComputationPlacer>();
return absl::make_unique<xla::ComputationPlacer>();
}
static bool InitModule() {

View File

@ -18,9 +18,9 @@ limitations under the License.
#include <memory>
#include <vector>
#include "absl/memory/memory.h"
#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/compiler/xla/literal_util.h"
#include "tensorflow/compiler/xla/ptr_util.h"
#include "tensorflow/compiler/xla/service/dfs_hlo_visitor_with_default.h"
#include "tensorflow/compiler/xla/service/hlo_computation.h"
#include "tensorflow/compiler/xla/service/hlo_instruction.h"
@ -214,7 +214,7 @@ Status ConvolutionVisitor::HandleConvolution(HloInstruction* convolution) {
expanded_filter = add(HloInstruction::CreateConcatenate(
expanded_filter_shape, concat_operands, input_feature_dim));
}
auto zero = add(HloInstruction::CreateConstant(MakeUnique<Literal>(
auto zero = add(HloInstruction::CreateConstant(absl::make_unique<Literal>(
LiteralUtil::Zero(expanded_filter_shape.element_type()))));
auto zero_filter =
add(HloInstruction::CreateBroadcast(expanded_filter_shape, zero, {}));

View File

@ -50,6 +50,7 @@ cc_library(
"//tensorflow/compiler/xla/service/cpu:cpu_runtime",
"//tensorflow/core:lib",
"//tensorflow/core:stream_executor_no_cuda",
"@com_google_absl//absl/memory",
],
alwayslink = True, # Contains per-platform transfer manager registration
)
@ -85,6 +86,7 @@ cc_library(
":ir_emitter",
":parallel_task_assignment",
":simple_orc_jit",
"@com_google_absl//absl/memory",
"//tensorflow/compiler/tf2xla:cpu_function_runtime",
"//tensorflow/compiler/xla/service:scatter_expander",
"//tensorflow/compiler/xla:literal",
@ -178,6 +180,7 @@ cc_library(
":runtime_single_threaded_conv2d",
":runtime_single_threaded_fft",
":runtime_single_threaded_matmul",
"@com_google_absl//absl/memory",
"@llvm//:execution_engine",
"@llvm//:core",
"@llvm//:mc", # fixdeps: keep
@ -418,6 +421,7 @@ cc_library(
"//tensorflow/compiler/xla/service:llvm_compiler",
"//tensorflow/compiler/xla/service/llvm_ir:llvm_util",
"//tensorflow/core:lib",
"@com_google_absl//absl/memory",
"@llvm//:analysis",
"@llvm//:core",
"@llvm//:ipo",
@ -634,6 +638,7 @@ tf_cc_test(
"//tensorflow/core:lib",
"//tensorflow/core:test",
"//third_party/eigen3",
"@com_google_absl//absl/memory",
],
)
@ -810,6 +815,7 @@ cc_library(
"//tensorflow/compiler/xla/service:hlo",
"//tensorflow/compiler/xla/service:hlo_cost_analysis",
"//tensorflow/compiler/xla/service:hlo_pass",
"@com_google_absl//absl/memory",
],
)

View File

@ -22,6 +22,7 @@ limitations under the License.
#include <utility>
#include <vector>
#include "absl/memory/memory.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Analysis/TargetLibraryInfo.h"
#include "llvm/Analysis/TargetTransformInfo.h"
@ -35,7 +36,6 @@ limitations under the License.
#include "llvm/Transforms/IPO.h"
#include "llvm/Transforms/IPO/AlwaysInliner.h"
#include "llvm/Transforms/IPO/PassManagerBuilder.h"
#include "tensorflow/compiler/xla/ptr_util.h"
#include "tensorflow/compiler/xla/service/cpu/cpu_runtime.h"
#include "tensorflow/compiler/xla/service/cpu/llvm_ir_runtime.h"
#include "tensorflow/compiler/xla/service/llvm_ir/llvm_util.h"
@ -205,7 +205,7 @@ void CompilerFunctor::AddTargetInfoPasses(
llvm::legacy::PassManagerBase* passes) const {
llvm::Triple target_triple(target_machine_->getTargetTriple());
auto target_library_info_impl =
MakeUnique<llvm::TargetLibraryInfoImpl>(target_triple);
absl::make_unique<llvm::TargetLibraryInfoImpl>(target_triple);
target_library_info_impl->addVectorizableFunctions(
VectorFunctionsForTargetLibraryInfoImpl());
passes->add(

View File

@ -26,6 +26,7 @@ limitations under the License.
// IWYU pragma: no_include "llvm/Config/Disassemblers.def.inc"
// IWYU pragma: no_include "llvm/Config/Targets.def.inc"
#include "absl/memory/memory.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/Triple.h"
#include "llvm/IR/Function.h"
@ -42,7 +43,6 @@ limitations under the License.
#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/compiler/xla/map_util.h"
#include "tensorflow/compiler/xla/protobuf_util.h"
#include "tensorflow/compiler/xla/ptr_util.h"
#include "tensorflow/compiler/xla/service/algebraic_simplifier.h"
#include "tensorflow/compiler/xla/service/batch_dot_simplification.h"
#include "tensorflow/compiler/xla/service/batchnorm_expander.h"
@ -453,7 +453,7 @@ Status CreateHloProfilingArtifacts(
computation_to_profile_idx,
std::unique_ptr<HloProfileIndexMap>* hlo_profile_index_map,
std::unique_ptr<HloProfilePrinterData>* hlo_profile_printer_data) {
*hlo_profile_index_map = MakeUnique<HloProfileIndexMap>(module);
*hlo_profile_index_map = absl::make_unique<HloProfileIndexMap>(module);
const HloComputation& entry_computation = *module.entry_computation();
TF_ASSIGN_OR_RETURN(
@ -520,11 +520,11 @@ StatusOr<std::unique_ptr<Executable>> CpuCompiler::RunBackend(
&pre_optimization_ir_hook, &post_optimization_ir_hook));
// Compile must be thread-safe so create a new LLVM context for the module.
auto llvm_context = xla::MakeUnique<llvm::LLVMContext>();
auto llvm_context = absl::make_unique<llvm::LLVMContext>();
auto llvm_module =
xla::MakeUnique<llvm::Module>("__compute_module", *llvm_context);
absl::make_unique<llvm::Module>("__compute_module", *llvm_context);
auto jit = xla::MakeUnique<SimpleOrcJIT>(
auto jit = absl::make_unique<SimpleOrcJIT>(
CompilerTargetOptions(module->config()),
CodeGenOptLevel(module->config()),
options::OptimizeForSizeRequested(module->config()),
@ -566,12 +566,12 @@ StatusOr<std::unique_ptr<Executable>> CpuCompiler::RunBackend(
// temporary buffers are required to run the computation.
TF_ASSIGN_OR_RETURN(
std::unique_ptr<BufferAssignment> assignment,
BufferAssigner::Run(
module.get(),
xla::MakeUnique<SequentialHloOrdering>(module.get(), module_sequence),
BufferSizeBytesFunction(), memory_alignment,
/*allow_input_output_aliasing=*/false,
/*allocate_buffers_for_constants=*/true));
BufferAssigner::Run(module.get(),
absl::make_unique<SequentialHloOrdering>(
module.get(), module_sequence),
BufferSizeBytesFunction(), memory_alignment,
/*allow_input_output_aliasing=*/false,
/*allocate_buffers_for_constants=*/true));
// BufferAssignment::ToString() includes a header, so no need for us to
// print one ourselves.
XLA_VLOG_LINES(2, assignment->ToString());
@ -716,7 +716,7 @@ CpuCompiler::CompileAheadOfTime(std::vector<std::unique_ptr<HloModule>> modules,
llvm::StringRef cpu_name = llvm_ir::AsStringRef(options.cpu_name());
llvm::StringRef features = llvm_ir::AsStringRef(options.features());
llvm::CodeGenOpt::Level opt_level = CodeGenOptLevel(modules[0]->config());
std::unique_ptr<llvm::TargetMachine> target_machine = WrapUnique(
std::unique_ptr<llvm::TargetMachine> target_machine = absl::WrapUnique(
target->createTargetMachine(triple.getTriple(), cpu_name, features,
CompilerTargetOptions(modules[0]->config()),
reloc_model, llvm::None, opt_level));
@ -757,7 +757,7 @@ CpuCompiler::CompileAheadOfTime(std::vector<std::unique_ptr<HloModule>> modules,
std::unique_ptr<BufferAssignment> assignment,
BufferAssigner::Run(
module,
xla::MakeUnique<SequentialHloOrdering>(module, module_sequence),
absl::make_unique<SequentialHloOrdering>(module, module_sequence),
BufferSizeBytesFunction(), memory_alignment,
/*allow_input_output_aliasing=*/false,
/*allocate_buffers_for_constants=*/true));
@ -851,7 +851,7 @@ CpuCompiler::CompileAheadOfTime(std::vector<std::unique_ptr<HloModule>> modules,
TF_ASSIGN_OR_RETURN(const BufferAllocation::Slice result_slice,
assignment->GetUniqueTopLevelOutputSlice());
results.emplace_back(MakeUnique<CpuAotCompilationResult>(
results.emplace_back(absl::make_unique<CpuAotCompilationResult>(
std::move(object_file_data), std::move(buffer_infos),
result_slice.index(), std::move(hlo_profile_printer_data)));
}
@ -874,7 +874,7 @@ HloCostAnalysis::ShapeSizeFunction CpuCompiler::ShapeSizeBytesFunction() const {
static bool InitModule() {
xla::Compiler::RegisterCompilerFactory(
stream_executor::host::kHostPlatformId,
[]() { return xla::MakeUnique<xla::cpu::CpuCompiler>(); });
[]() { return absl::make_unique<xla::cpu::CpuCompiler>(); });
return true;
}
static bool module_initialized = InitModule();

View File

@ -19,10 +19,10 @@ limitations under the License.
#include <string>
#include <tuple>
#include "absl/memory/memory.h"
#include "third_party/eigen3/unsupported/Eigen/CXX11/Tensor"
#include "tensorflow/compiler/xla/array2d.h"
#include "tensorflow/compiler/xla/client/local_client.h"
#include "tensorflow/compiler/xla/ptr_util.h"
#include "tensorflow/compiler/xla/service/cpu/runtime_matmul.h"
#include "tensorflow/compiler/xla/service/cpu/runtime_matmul_mkl.h"
#include "tensorflow/compiler/xla/service/cpu/runtime_single_threaded_matmul.h"
@ -46,7 +46,7 @@ std::unique_ptr<Array2D<float>> MaybeTransposeArray2D(const Array2D<T>& array,
if (transpose) {
std::swap(output_width, output_height);
}
auto output = MakeUnique<Array2D<float>>(output_height, output_width);
auto output = absl::make_unique<Array2D<float>>(output_height, output_width);
for (int y = 0; y < array.height(); y++) {
for (int x = 0; x < array.width(); x++) {
if (transpose) {
@ -93,7 +93,7 @@ std::unique_ptr<Array2D<float>> EigenMatrixMultiply(const Array2D<float>& a,
// Since we're going to transpose c before returning it. Swap the order of the
// dimension sizes to ensure the returned array is properly dimensioned.
auto c_transpose = MakeUnique<Array2D<float>>(n, m);
auto c_transpose = absl::make_unique<Array2D<float>>(n, m);
if (single_threaded) {
__xla_cpu_runtime_EigenSingleThreadedMatMulF32(
nullptr, c_transpose->data(), a_transpose->data(), b_transpose->data(),
@ -204,7 +204,7 @@ std::unique_ptr<Array2D<float>> MKLMatrixMultiply(const Array2D<float>& a,
// Since we're going to transpose c before returning it, swap the order of the
// dimension sizes to ensure the returned array is properly dimensioned.
auto c_transpose = MakeUnique<Array2D<float>>(n, m);
auto c_transpose = absl::make_unique<Array2D<float>>(n, m);
if (single_threaded) {
__xla_cpu_runtime_MKLSingleThreadedMatMulF32(
nullptr, c_transpose->data(), a_transpose->data(), b_transpose->data(),

View File

@ -19,6 +19,7 @@ limitations under the License.
#include <utility>
#include <vector>
#include "absl/memory/memory.h"
#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/compiler/xla/literal_util.h"
#include "tensorflow/compiler/xla/service/cpu/cpu_runtime.h"
@ -256,7 +257,7 @@ StatusOr<Shape> CpuTransferManager::TransferBuffersFromOutfeedInternal(
VLOG(2)
<< "Enqueueing outfeed buffer (for the device to populate) of length "
<< size_32 << "B";
buffers.emplace_back(MakeUnique<CpuOutfeedBuffer>(b.first, size_32));
buffers.emplace_back(absl::make_unique<CpuOutfeedBuffer>(b.first, size_32));
}
std::vector<cpu::runtime::XfeedBuffer*> buffer_pointers;
@ -283,7 +284,7 @@ StatusOr<Shape> CpuTransferManager::TransferBuffersFromOutfeedInternal(
} // namespace xla
static std::unique_ptr<xla::TransferManager> CreateCpuTransferManager() {
return xla::MakeUnique<xla::CpuTransferManager>();
return absl::make_unique<xla::CpuTransferManager>();
}
static bool InitModule() {

View File

@ -15,6 +15,7 @@ limitations under the License.
#include "tensorflow/compiler/xla/service/cpu/parallel_task_assignment.h"
#include "absl/memory/memory.h"
#include "tensorflow/compiler/xla/service/cpu/dot_op_emitter.h"
#include "tensorflow/compiler/xla/service/cpu/ir_emission_utils.h"
#include "tensorflow/compiler/xla/service/cpu/shape_partition.h"
@ -109,7 +110,7 @@ ParallelTaskAssignment::ParallelTaskAssignment(
: target_machine_features_(*target_machine_features) {
VLOG(1) << "ParallelTaskAssignment max_parallelism: " << max_parallelism;
// Run cost analysis on 'module'.
auto cost_analysis = MakeUnique<HloCostAnalysis>(shape_size);
auto cost_analysis = absl::make_unique<HloCostAnalysis>(shape_size);
HloComputation* computation = module->entry_computation();
Status status = computation->root_instruction()->Accept(cost_analysis.get());
if (status.ok()) {

View File

@ -20,13 +20,13 @@ limitations under the License.
#include <list>
#include <utility>
#include "absl/memory/memory.h"
#include "llvm/ExecutionEngine/ExecutionEngine.h"
#include "llvm/ExecutionEngine/JITSymbol.h"
#include "llvm/ExecutionEngine/SectionMemoryManager.h"
#include "llvm/IR/Mangler.h"
#include "llvm/Support/CodeGen.h"
#include "llvm/Support/Host.h"
#include "tensorflow/compiler/xla/ptr_util.h"
#include "tensorflow/compiler/xla/service/cpu/cpu_runtime.h"
#include "tensorflow/compiler/xla/service/cpu/custom_call_target_registry.h"
#include "tensorflow/compiler/xla/service/cpu/orc_jit_memory_mapper.h"

View File

@ -51,6 +51,7 @@ tf_cc_test(
"//tensorflow/compiler/xla/tests:literal_test_util",
"//tensorflow/core:test",
"//tensorflow/core:test_main",
"@com_google_absl//absl/memory",
],
)
@ -94,6 +95,7 @@ tf_cc_test(
"//tensorflow/compiler/xla/tests:filecheck",
"//tensorflow/core:test",
"//tensorflow/core:test_main",
"@com_google_absl//absl/memory",
"@llvm//:core",
],
)

View File

@ -17,8 +17,8 @@ limitations under the License.
#include <utility>
#include <vector>
#include "absl/memory/memory.h"
#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/compiler/xla/ptr_util.h"
#include "tensorflow/compiler/xla/service/cpu/cpu_instruction_fusion.h"
#include "tensorflow/compiler/xla/service/hlo_computation.h"
#include "tensorflow/compiler/xla/service/hlo_instruction.h"

View File

@ -16,9 +16,9 @@ limitations under the License.
#include <memory>
#include <utility>
#include "absl/memory/memory.h"
#include "llvm/IR/Module.h"
#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/compiler/xla/ptr_util.h"
#include "tensorflow/compiler/xla/service/buffer_assignment.h"
#include "tensorflow/compiler/xla/service/cpu/tests/cpu_codegen_test.h"
#include "tensorflow/compiler/xla/service/hlo_computation.h"
@ -62,7 +62,8 @@ TEST_F(CpuNoAliasTest, Concat) {
// Now that we have an HLO module, build an llvm_ir::AliasAnalysis for it.
auto status_or_buffer_assn = BufferAssigner::Run(
hlo_module.get(), MakeUnique<DependencyHloOrdering>(hlo_module.get()),
hlo_module.get(),
absl::make_unique<DependencyHloOrdering>(hlo_module.get()),
backend().compiler()->BufferSizeBytesFunction(),
[](LogicalBuffer::Color) { return /*alignment=*/1; });
ASSERT_EQ(status_or_buffer_assn.status(), Status::OK());

View File

@ -15,6 +15,7 @@ limitations under the License.
#include "tensorflow/compiler/xla/service/executable.h"
#include "absl/memory/memory.h"
#include "tensorflow/compiler/xla/legacy_flags/debug_options_flags.h"
#include "tensorflow/compiler/xla/service/hlo_graph_dumper.h"
#include "tensorflow/compiler/xla/status.h"
@ -76,8 +77,8 @@ StatusOr<ScopedShapedBuffer> Executable::ExecuteOnStreamWrapper(
std::unique_ptr<HloExecutionProfile> profile_ptr =
module_config().debug_options().xla_hlo_profile() &&
hlo_profiling_enabled()
? MakeUnique<HloExecutionProfile>(&hlo_profile_printer_data(),
&hlo_profile_index_map())
? absl::make_unique<HloExecutionProfile>(&hlo_profile_printer_data(),
&hlo_profile_index_map())
: nullptr;
StatusOr<ScopedShapedBuffer> return_value =

View File

@ -17,7 +17,7 @@ limitations under the License.
#include <utility>
#include "tensorflow/compiler/xla/ptr_util.h"
#include "absl/memory/memory.h"
#include "tensorflow/compiler/xla/util.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/stream_executor_no_cuda.h"
@ -53,8 +53,8 @@ ExecutionHandle ExecutionTracker::Register(Backend* backend,
tensorflow::mutex_lock lock(execution_mutex_);
int64 handle = next_handle_++;
auto inserted = handle_to_execution_.emplace(
handle,
MakeUnique<AsyncExecution>(backend, std::move(streams), profile, result));
handle, absl::make_unique<AsyncExecution>(backend, std::move(streams),
profile, result));
CHECK(inserted.second);
ExecutionHandle execution_handle;

View File

@ -56,6 +56,7 @@ cc_library(
"//tensorflow/compiler/xla/service:hlo",
"//tensorflow/core:lib",
"//tensorflow/core:stream_executor_no_cuda",
"@com_google_absl//absl/memory",
],
)
@ -91,6 +92,7 @@ cc_library(
"//tensorflow/compiler/xla/service:hlo",
"//tensorflow/compiler/xla/service:hlo_reachability",
"//tensorflow/core:lib",
"@com_google_absl//absl/memory",
],
)
@ -107,6 +109,7 @@ tf_cc_test(
"//tensorflow/compiler/xla/tests:hlo_test_base",
"//tensorflow/compiler/xla/tests:xla_internal_test_main",
"//tensorflow/core:lib",
"@com_google_absl//absl/memory",
],
)
@ -181,6 +184,7 @@ cc_library(
"//tensorflow/core:lib",
"//tensorflow/core:stream_executor_no_cuda",
"@com_google_absl//absl/algorithm:container",
"@com_google_absl//absl/memory",
"@llvm//:core",
"@llvm//:support",
],
@ -244,6 +248,7 @@ cc_library(
"//tensorflow/compiler/xla/service:device_memory_allocator",
"//tensorflow/core:lib",
"//tensorflow/core:stream_executor_no_cuda",
"@com_google_absl//absl/memory",
],
)
@ -258,6 +263,7 @@ cc_library(
"//tensorflow/core:lib",
"//tensorflow/core:ptr_util",
"//tensorflow/core:stream_executor_no_cuda",
"@com_google_absl//absl/memory",
],
)
@ -338,6 +344,7 @@ cc_library(
"//tensorflow/core/platform/default/build_config:cufft_plugin",
"//tensorflow/core/platform/default/build_config:stream_executor_cuda", # build_cleaner: keep
"//tensorflow/stream_executor",
"@com_google_absl//absl/memory",
],
)
@ -547,6 +554,7 @@ cc_library(
"//tensorflow/compiler/xla/service:hlo_creation_utils",
"//tensorflow/compiler/xla/service:hlo_pass",
"//tensorflow/compiler/xla/service:shape_inference",
"@com_google_absl//absl/memory",
],
)
@ -603,6 +611,7 @@ cc_library(
"//tensorflow/compiler/xla/service/gpu:infeed_manager",
"//tensorflow/core:lib",
"//tensorflow/core:stream_executor_no_cuda",
"@com_google_absl//absl/memory",
"@llvm//:core",
],
alwayslink = True, # Contains per-platform transfer manager registration
@ -673,6 +682,7 @@ cc_library(
"//tensorflow/core:lib_internal",
"//tensorflow/core:regexp_internal",
"//tensorflow/core:stream_executor_no_cuda",
"@com_google_absl//absl/memory",
"@llvm//:core",
],
alwayslink = True, # Contains compiler registration
@ -705,8 +715,8 @@ cc_library(
":xfeed_queue",
"//tensorflow/compiler/xla:shape_tree",
"//tensorflow/compiler/xla:types",
"//tensorflow/compiler/xla:util",
"//tensorflow/core:stream_executor_no_cuda",
"@com_google_absl//absl/memory",
],
)
@ -721,6 +731,7 @@ cc_library(
"//tensorflow/compiler/xla:shape_util",
"//tensorflow/compiler/xla:util",
"//tensorflow/core:lib",
"@com_google_absl//absl/memory",
],
)
@ -770,12 +781,12 @@ cc_library(
":stream_assignment",
"//tensorflow/compiler/xla:statusor",
"//tensorflow/compiler/xla:types",
"//tensorflow/compiler/xla:util",
"//tensorflow/compiler/xla/service:buffer_value",
"//tensorflow/compiler/xla/service:hlo",
"//tensorflow/compiler/xla/service:hlo_ordering",
"//tensorflow/compiler/xla/service:hlo_reachability",
"//tensorflow/compiler/xla/service:hlo_scheduling",
"@com_google_absl//absl/memory",
],
)
@ -792,6 +803,7 @@ tf_cc_test(
"//tensorflow/compiler/xla/service:hlo",
"//tensorflow/compiler/xla/tests:hlo_test_base",
"//tensorflow/compiler/xla/tests:xla_internal_test_main",
"@com_google_absl//absl/memory",
],
)

View File

@ -17,8 +17,8 @@ limitations under the License.
#include <utility>
#include "absl/memory/memory.h"
#include "tensorflow/compiler/xla/map_util.h"
#include "tensorflow/compiler/xla/ptr_util.h"
#include "tensorflow/compiler/xla/service/gpu/gpu_constants.h"
#include "tensorflow/compiler/xla/status_macros.h"
#include "tensorflow/compiler/xla/types.h"
@ -40,7 +40,7 @@ StatusOr<std::unique_ptr<BufferAllocations>> BufferAllocations::Builder::Build(
const BufferAssignment* buffer_assignment, int device_ordinal,
DeviceMemoryAllocator* memory_allocator) {
const int64 num_buffers = buffer_assignment->Allocations().size();
auto buffer_allocations = WrapUnique(new BufferAllocations(
auto buffer_allocations = absl::WrapUnique(new BufferAllocations(
num_buffers, device_ordinal, memory_allocator, buffer_assignment));
for (BufferAllocation::Index i = 0; i < num_buffers; ++i) {

View File

@ -15,7 +15,7 @@ limitations under the License.
#include "tensorflow/compiler/xla/service/gpu/conditional_thunk.h"
#include "tensorflow/compiler/xla/ptr_util.h"
#include "absl/memory/memory.h"
#include "tensorflow/compiler/xla/service/gpu/hlo_execution_profiler.h"
#include "tensorflow/compiler/xla/util.h"
#include "tensorflow/core/lib/core/errors.h"

View File

@ -15,7 +15,7 @@ limitations under the License.
#include "tensorflow/compiler/xla/service/gpu/for_thunk.h"
#include "tensorflow/compiler/xla/ptr_util.h"
#include "absl/memory/memory.h"
#include "tensorflow/compiler/xla/service/gpu/hlo_execution_profiler.h"
#include "tensorflow/compiler/xla/util.h"
#include "tensorflow/core/lib/core/errors.h"
@ -28,7 +28,7 @@ ForThunk::ForThunk(const int64 loop_limit,
const HloInstruction* hlo)
: Thunk(Kind::kWhile, hlo),
loop_limit_(loop_limit),
body_thunk_sequence_(MakeUnique<SequentialThunk>(
body_thunk_sequence_(absl::make_unique<SequentialThunk>(
// Pass nullptr as the HloInstruction* to the body_thunk_sequence_
// constructor because this SequentialThunk is logically "part of"
// this ForThunk, and shouldn't be profiled separately from it.

View File

@ -19,8 +19,8 @@ limitations under the License.
#include <utility>
#include <vector>
#include "absl/memory/memory.h"
#include "tensorflow/compiler/xla/map_util.h"
#include "tensorflow/compiler/xla/ptr_util.h"
#include "tensorflow/compiler/xla/service/gpu/buffer_allocations.h"
#include "tensorflow/compiler/xla/service/gpu/hlo_execution_profiler.h"
#include "tensorflow/compiler/xla/service/hlo_instruction.h"
@ -144,7 +144,7 @@ Status GpuExecutable::ExecuteThunks(
TF_RETURN_IF_ERROR(
thunk->ExecuteOnStream(buffer_allocations, stream, &profiler));
if (thunk_schedule_->Depended(thunk)) {
auto finish_event = MakeUnique<se::Event>(main_stream->parent());
auto finish_event = absl::make_unique<se::Event>(main_stream->parent());
finish_event->Init();
stream->ThenRecordEvent(finish_event.get());
thunk_to_finish_event[thunk] = std::move(finish_event);

View File

@ -19,6 +19,7 @@ limitations under the License.
#include <utility>
#include <vector>
#include "absl/memory/memory.h"
#include "llvm/IR/DataLayout.h"
#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/compiler/xla/literal_util.h"
@ -160,9 +161,10 @@ Status GpuTransferManager::TransferLiteralFromOutfeed(
if (ShapeUtil::IsTuple(shape)) {
return;
}
*buffer = MakeUnique<gpu::OutfeedBuffer>(GetByteSizeRequirement(shape));
*buffer = absl::make_unique<gpu::OutfeedBuffer>(
GetByteSizeRequirement(shape));
(*buffer)->set_destination(
MakeUnique<MutableBorrowingLiteral>(literal, index));
absl::make_unique<MutableBorrowingLiteral>(literal, index));
});
// Give the tree of buffers to the outfeed mananger. The device will fill it
@ -179,7 +181,7 @@ Status GpuTransferManager::TransferLiteralFromOutfeed(
} // namespace xla
static std::unique_ptr<xla::TransferManager> CreateNVPTXTransferManager() {
return xla::MakeUnique<xla::gpu::GpuTransferManager>(
return absl::make_unique<xla::gpu::GpuTransferManager>(
/*id=*/stream_executor::cuda::kCudaPlatformId,
/*pointer_size=*/llvm::DataLayout(xla::gpu::NVPTXCompiler::kDataLayout)
.getPointerSize(0 /* default address space */));

View File

@ -20,6 +20,7 @@ limitations under the License.
#include <unordered_set>
#include <vector>
#include "absl/memory/memory.h"
#include "tensorflow/compiler/xla/service/hlo_computation.h"
#include "tensorflow/compiler/xla/service/hlo_execution_profile.h"
#include "tensorflow/compiler/xla/service/hlo_instruction.h"
@ -33,7 +34,7 @@ namespace gpu {
namespace {
void InitAndStartTimer(std::stack<std::unique_ptr<se::Timer>>* timers,
se::Stream* stream) {
timers->push(MakeUnique<se::Timer>(stream->parent()));
timers->push(absl::make_unique<se::Timer>(stream->parent()));
stream->InitTimer(timers->top().get()).ThenStartTimer(timers->top().get());
}
@ -115,7 +116,7 @@ HloExecutionProfiler::MakeScopedInstructionProfiler(
CHECK(hlo_instructions_.insert(hlo_instruction).second)
<< hlo_instruction->name();
}
return MakeUnique<ScopedInstructionProfiler>(this, hlo_instruction);
return absl::make_unique<ScopedInstructionProfiler>(this, hlo_instruction);
}
} // namespace gpu

View File

@ -19,7 +19,7 @@ limitations under the License.
#include "tensorflow/compiler/xla/service/gpu/hlo_schedule.h"
#include "tensorflow/compiler/xla/ptr_util.h"
#include "absl/memory/memory.h"
#include "tensorflow/compiler/xla/service/buffer_value.h"
#include "tensorflow/compiler/xla/service/hlo_reachability.h"
#include "tensorflow/compiler/xla/service/hlo_scheduling.h"
@ -59,8 +59,8 @@ GpuHloOrdering::GpuHloOrdering(
: PredecessorHloOrdering(module) {
// The entry computation has a total order when there's only one stream.
if (stream_assignment.StreamCount() == 1) {
entry_sequence_ =
MakeUnique<std::vector<const HloInstruction*>>(thunk_launch_order);
entry_sequence_ = absl::make_unique<std::vector<const HloInstruction*>>(
thunk_launch_order);
}
// The ordering of instructions for the entry computation is determined by the
@ -75,7 +75,7 @@ GpuHloOrdering::GpuHloOrdering(
// same-stream predecessors of each instruction.
// Compute the set of all instructions we will want to set reachability on.
auto predecessor_map = MakeUnique<HloReachabilityMap>(
auto predecessor_map = absl::make_unique<HloReachabilityMap>(
module->entry_computation()->MakeInstructionPostOrder());
// The most recently visited instruction per stream.
@ -208,7 +208,7 @@ StatusOr<std::unique_ptr<HloSchedule>> HloSchedule::Build(
BFSLaunchOrder(entry_computation, &schedule->thunk_launch_order_);
}
schedule->hlo_ordering_ = MakeUnique<GpuHloOrdering>(
schedule->hlo_ordering_ = absl::make_unique<GpuHloOrdering>(
&module, stream_assignment, schedule->thunk_launch_order_);
return std::move(schedule);

View File

@ -18,6 +18,7 @@ limitations under the License.
#include <algorithm>
#include <unordered_set>
#include "absl/memory/memory.h"
#include "tensorflow/compiler/xla/service/gpu/stream_assignment.h"
#include "tensorflow/compiler/xla/service/hlo_computation.h"
#include "tensorflow/compiler/xla/service/hlo_instruction.h"
@ -47,7 +48,7 @@ class HloScheduleTest : public HloTestBase {
auto debug_options = GetDebugOptionsForTest();
debug_options.set_xla_gpu_disable_multi_streaming(false);
config.set_debug_options(debug_options);
return MakeUnique<HloModule>("test_module", config);
return absl::make_unique<HloModule>("test_module", config);
}
HloVec RemoveHlo(const HloVec& input,

View File

@ -15,7 +15,7 @@ limitations under the License.
#include "tensorflow/compiler/xla/service/gpu/infeed_manager.h"
#include "tensorflow/compiler/xla/ptr_util.h"
#include "absl/memory/memory.h"
namespace xla {
namespace gpu {
@ -24,7 +24,7 @@ se::Stream* InfeedManager::GetStream(se::StreamExecutor* executor) {
tensorflow::mutex_lock l(host_to_device_stream_mu_);
if (host_to_device_executor_ == nullptr) {
host_to_device_executor_ = executor;
host_to_device_stream_ = MakeUnique<se::Stream>(executor);
host_to_device_stream_ = absl::make_unique<se::Stream>(executor);
host_to_device_stream_->Init();
}

View File

@ -22,6 +22,7 @@ limitations under the License.
#include "tensorflow/compiler/xla/service/gpu/ir_emitter_unnested.h"
#include "absl/algorithm/container.h"
#include "absl/memory/memory.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/IR/BasicBlock.h"
#include "llvm/IR/Function.h"
@ -30,7 +31,6 @@ limitations under the License.
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/Module.h"
#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/compiler/xla/ptr_util.h"
#include "tensorflow/compiler/xla/service/buffer_assignment.h"
#include "tensorflow/compiler/xla/service/dfs_hlo_visitor.h"
#include "tensorflow/compiler/xla/service/gpu/backend_configs.pb.h"
@ -384,7 +384,7 @@ Status IrEmitterUnnested::HandleCustomCall(HloInstruction* custom_call) {
int64 feature_index_value = feature_index->literal().Get<int64>({});
thunk_sequence_->emplace_back(
MakeUnique<CudnnBatchNormForwardInferenceThunk>(
absl::make_unique<CudnnBatchNormForwardInferenceThunk>(
/*operand=*/GetAllocationSlice(*custom_call->operand(0)),
/*scale=*/GetAllocationSlice(*custom_call->operand(1)),
/*offset=*/GetAllocationSlice(*custom_call->operand(2)),
@ -414,7 +414,7 @@ Status IrEmitterUnnested::HandleCustomCall(HloInstruction* custom_call) {
auto output_mean = assn.GetUniqueSlice(custom_call, {1}).ValueOrDie();
auto output_inv_stddev = assn.GetUniqueSlice(custom_call, {2}).ValueOrDie();
thunk_sequence_->emplace_back(
MakeUnique<CudnnBatchNormForwardTrainingThunk>(
absl::make_unique<CudnnBatchNormForwardTrainingThunk>(
/*operand=*/GetAllocationSlice(*custom_call->operand(0)),
/*scale=*/GetAllocationSlice(*custom_call->operand(1)),
/*offset=*/GetAllocationSlice(*custom_call->operand(2)),
@ -444,19 +444,20 @@ Status IrEmitterUnnested::HandleCustomCall(HloInstruction* custom_call) {
auto output_grad_scale = assn.GetUniqueSlice(custom_call, {1}).ValueOrDie();
auto output_grad_offset =
assn.GetUniqueSlice(custom_call, {2}).ValueOrDie();
thunk_sequence_->emplace_back(MakeUnique<CudnnBatchNormBackwardThunk>(
/*operand=*/GetAllocationSlice(*custom_call->operand(0)),
/*scale=*/GetAllocationSlice(*custom_call->operand(1)),
/*mean=*/GetAllocationSlice(*custom_call->operand(2)),
/*inv_stddev=*/GetAllocationSlice(*custom_call->operand(3)),
/*grad_output=*/GetAllocationSlice(*custom_call->operand(4)),
/*epsilon=*/epsilon_value,
/*feature_index=*/feature_index_value,
/*output_grad_data=*/output_grad_data,
/*output_grad_scale=*/output_grad_scale,
/*output_grad_offset=*/output_grad_offset,
/*output_tuple=*/GetAllocationSlice(*custom_call),
/*hlo=*/custom_call));
thunk_sequence_->emplace_back(
absl::make_unique<CudnnBatchNormBackwardThunk>(
/*operand=*/GetAllocationSlice(*custom_call->operand(0)),
/*scale=*/GetAllocationSlice(*custom_call->operand(1)),
/*mean=*/GetAllocationSlice(*custom_call->operand(2)),
/*inv_stddev=*/GetAllocationSlice(*custom_call->operand(3)),
/*grad_output=*/GetAllocationSlice(*custom_call->operand(4)),
/*epsilon=*/epsilon_value,
/*feature_index=*/feature_index_value,
/*output_grad_data=*/output_grad_data,
/*output_grad_scale=*/output_grad_scale,
/*output_grad_offset=*/output_grad_offset,
/*output_tuple=*/GetAllocationSlice(*custom_call),
/*hlo=*/custom_call));
return Status::OK();
}
@ -476,7 +477,7 @@ Status IrEmitterUnnested::HandleCustomCall(HloInstruction* custom_call) {
const auto& target = custom_call->custom_call_target();
std::unique_ptr<ConvolutionThunk> thunk;
if (target == kCudnnConvForwardCallTarget) {
thunk = MakeUnique<ConvolutionThunk>(
thunk = absl::make_unique<ConvolutionThunk>(
CudnnConvKind::kForward,
/*input_buffer=*/lhs_slice,
/*filter_buffer=*/rhs_slice,
@ -490,7 +491,7 @@ Status IrEmitterUnnested::HandleCustomCall(HloInstruction* custom_call) {
backend_config.algorithm(), backend_config.tensor_ops_enabled(),
custom_call);
} else if (target == kCudnnConvBackwardInputCallTarget) {
thunk = MakeUnique<ConvolutionThunk>(
thunk = absl::make_unique<ConvolutionThunk>(
CudnnConvKind::kBackwardInput,
/*input_buffer=*/conv_result_slice,
/*filter_buffer=*/rhs_slice,
@ -504,7 +505,7 @@ Status IrEmitterUnnested::HandleCustomCall(HloInstruction* custom_call) {
backend_config.algorithm(), backend_config.tensor_ops_enabled(),
custom_call);
} else if (target == kCudnnConvBackwardFilterCallTarget) {
thunk = MakeUnique<ConvolutionThunk>(
thunk = absl::make_unique<ConvolutionThunk>(
CudnnConvKind::kBackwardFilter,
/*input_buffer=*/lhs_slice,
/*filter_buffer=*/conv_result_slice,
@ -577,7 +578,7 @@ Status IrEmitterUnnested::HandleFusion(HloInstruction* fusion) {
thunks.push_back(
BuildKernelThunk(fusion, /*implements_whole_instruction=*/false));
thunk_sequence_->emplace_back(
MakeUnique<SequentialThunk>(std::move(thunks), fusion));
absl::make_unique<SequentialThunk>(std::move(thunks), fusion));
std::vector<IrArray> parameter_arrays;
for (HloInstruction* operand : fusion->operands()) {
parameter_arrays.push_back(GetIrArray(*operand, *fusion));
@ -1719,7 +1720,7 @@ Status IrEmitterUnnested::HandleReduce(HloInstruction* reduce) {
thunks.push_back(
BuildKernelThunk(reduce, /*implements_whole_instruction=*/false));
thunk_sequence_->emplace_back(
MakeUnique<SequentialThunk>(std::move(thunks), reduce));
absl::make_unique<SequentialThunk>(std::move(thunks), reduce));
return EmitReductionToVector(
reduce, input->shape(), {[&](const IrArray::Index& index) {
@ -1761,7 +1762,7 @@ Status IrEmitterUnnested::HandleTuple(HloInstruction* tuple) {
for (const HloInstruction* tuple_element : tuple->operands()) {
tuple_element_buffers.push_back(GetAllocationSlice(*tuple_element));
}
thunk_sequence_->emplace_back(MakeUnique<TupleThunk>(
thunk_sequence_->emplace_back(absl::make_unique<TupleThunk>(
tuple_element_buffers, GetAllocationSlice(*tuple), tuple));
return Status::OK();
}
@ -1793,8 +1794,8 @@ Status IrEmitterUnnested::HandleSelectAndScatter(
thunks.push_back(std::move(initializer_thunk));
thunks.push_back(BuildKernelThunk(select_and_scatter,
/*implements_whole_instruction=*/false));
thunk_sequence_->emplace_back(
MakeUnique<SequentialThunk>(std::move(thunks), select_and_scatter));
thunk_sequence_->emplace_back(absl::make_unique<SequentialThunk>(
std::move(thunks), select_and_scatter));
// TODO(b/31410564): Implement dilation rate for select-and-scatter.
if (window_util::HasDilation(window)) {
@ -2019,7 +2020,7 @@ Status IrEmitterUnnested::HandleRng(HloInstruction* rng) {
thunks.push_back(std::move(rng_thunk));
thunks.push_back(std::move(increment_seed_thunk));
thunk_sequence_->emplace_back(
MakeUnique<SequentialThunk>(std::move(thunks), rng));
absl::make_unique<SequentialThunk>(std::move(thunks), rng));
return Status::OK();
}
@ -2044,7 +2045,7 @@ Status IrEmitterUnnested::HandleSort(HloInstruction* sort) {
auto values_destination = GetAllocationSlice(*sort, values_shape_index);
if (keys_destination != GetAllocationSlice(*keys)) {
thunks.push_back(MakeUnique<DeviceToDeviceCopyThunk>(
thunks.push_back(absl::make_unique<DeviceToDeviceCopyThunk>(
/*source_address=*/GetAllocationSlice(*keys),
/*destination_buffer=*/keys_destination,
/*mem_size=*/ShapeUtil::ByteSizeOf(keys->shape()), nullptr));
@ -2052,7 +2053,7 @@ Status IrEmitterUnnested::HandleSort(HloInstruction* sort) {
if (values != nullptr && values_destination != GetAllocationSlice(*values)) {
// TODO(b/26783907): Figure out why we never seem to share buffers for
// key/value sort.
thunks.push_back(MakeUnique<DeviceToDeviceCopyThunk>(
thunks.push_back(absl::make_unique<DeviceToDeviceCopyThunk>(
/*source_address=*/GetAllocationSlice(*values),
/*destination_buffer=*/values_destination,
/*mem_size=*/ShapeUtil::ByteSizeOf(values->shape()), nullptr));
@ -2104,7 +2105,7 @@ Status IrEmitterUnnested::HandleSort(HloInstruction* sort) {
}
thunk_sequence_->emplace_back(
MakeUnique<SequentialThunk>(std::move(thunks), sort));
absl::make_unique<SequentialThunk>(std::move(thunks), sort));
return Status::OK();
}
@ -2131,7 +2132,7 @@ Status IrEmitterUnnested::HandleCrossReplicaSum(HloInstruction* crs) {
if (crs->operand_count() == 1) {
CHECK(ShapeUtil::IsArray(crs->operand(0)->shape()))
<< "Operands to cross-replica-sum must be arrays: " << crs->ToString();
thunk_sequence_->push_back(MakeUnique<DeviceToDeviceCopyThunk>(
thunk_sequence_->push_back(absl::make_unique<DeviceToDeviceCopyThunk>(
/*source_address=*/GetAllocationSlice(*crs->operand(0)),
/*destination_buffer=*/GetAllocationSlice(*crs),
/*mem_size=*/ShapeUtil::ByteSizeOf(crs->shape()), crs));
@ -2146,17 +2147,17 @@ Status IrEmitterUnnested::HandleCrossReplicaSum(HloInstruction* crs) {
tuple_element_buffers.push_back(ir_emitter_context_->buffer_assignment()
.GetUniqueSlice(crs, {i})
.ValueOrDie());
thunks.push_back(MakeUnique<DeviceToDeviceCopyThunk>(
thunks.push_back(absl::make_unique<DeviceToDeviceCopyThunk>(
/*source_address=*/GetAllocationSlice(*crs->operand(i)),
/*destination_buffer=*/tuple_element_buffers.back(),
/*mem_size=*/ShapeUtil::ByteSizeOf(crs->operand(i)->shape()), nullptr));
}
// Output a tuple of the buffers above.
thunks.push_back(MakeUnique<TupleThunk>(tuple_element_buffers,
GetAllocationSlice(*crs), nullptr));
thunks.push_back(absl::make_unique<TupleThunk>(
tuple_element_buffers, GetAllocationSlice(*crs), nullptr));
thunk_sequence_->push_back(
MakeUnique<SequentialThunk>(std::move(thunks), crs));
absl::make_unique<SequentialThunk>(std::move(thunks), crs));
return Status::OK();
}
@ -2390,7 +2391,7 @@ std::unique_ptr<KernelThunk> IrEmitterUnnested::BuildKernelThunk(
llvm::ConstantPointerNull::get(b_.getInt8PtrTy()));
}
return MakeUnique<KernelThunk>(
return absl::make_unique<KernelThunk>(
non_constant_buffers, llvm_ir::AsString(kernel->getName()),
implements_whole_instruction ? inst : nullptr, unroll_factor);
}
@ -2399,7 +2400,7 @@ std::unique_ptr<Thunk> IrEmitterUnnested::BuildHostToDeviceCopyThunk(
const HloInstruction* inst) {
const HloInstruction* operand = inst->operand(0);
CHECK_EQ(HloOpcode::kConstant, operand->opcode());
return MakeUnique<HostToDeviceCopyThunk>(
return absl::make_unique<HostToDeviceCopyThunk>(
/*source_address=*/operand->literal().untyped_data(),
/*destination_buffer=*/GetAllocationSlice(*inst),
/*mem_size=*/
@ -2411,7 +2412,7 @@ std::unique_ptr<Thunk> IrEmitterUnnested::BuildHostToDeviceCopyThunk(
std::unique_ptr<Thunk> IrEmitterUnnested::BuildDeviceToDeviceCopyThunk(
const HloInstruction* inst) {
const HloInstruction* operand = inst->operand(0);
return MakeUnique<DeviceToDeviceCopyThunk>(
return absl::make_unique<DeviceToDeviceCopyThunk>(
/*source_address=*/GetAllocationSlice(*operand),
/*destination_buffer=*/GetAllocationSlice(*inst),
/*mem_size=*/
@ -2431,7 +2432,7 @@ std::unique_ptr<Thunk> IrEmitterUnnested::BuildInfeedThunk(
.GetUniqueSlice(inst, index)
.ConsumeValueOrDie();
});
return MakeUnique<InfeedThunk>(slices, inst);
return absl::make_unique<InfeedThunk>(slices, inst);
}
std::unique_ptr<Thunk> IrEmitterUnnested::BuildOutfeedThunk(
@ -2448,7 +2449,7 @@ std::unique_ptr<Thunk> IrEmitterUnnested::BuildOutfeedThunk(
*slice = status_or_slice.ConsumeValueOrDie();
}
});
return MakeUnique<OutfeedThunk>(std::move(slices), inst);
return absl::make_unique<OutfeedThunk>(std::move(slices), inst);
}
namespace {
@ -2471,7 +2472,7 @@ std::unique_ptr<Thunk> IrEmitterUnnested::BuildGemmThunk(
if (inst->opcode() == HloOpcode::kDot) {
const HloInstruction* lhs = inst->operand(0);
const HloInstruction* rhs = inst->operand(1);
return MakeUnique<GemmThunk>(
return absl::make_unique<GemmThunk>(
GetAllocationSlice(*lhs), // The buffer assigned to LHS.
GetAllocationSlice(*rhs), // The buffer assigned to RHS.
GetAllocationSlice(*inst), // The output buffer.
@ -2513,7 +2514,7 @@ std::unique_ptr<Thunk> IrEmitterUnnested::BuildGemmThunk(
const HloInstruction* rhs =
inst->operand(rhs_parameter->parameter_number());
return MakeUnique<GemmThunk>(
return absl::make_unique<GemmThunk>(
GetAllocationSlice(*lhs), // The buffer assigned to LHS.
GetAllocationSlice(*rhs), // The buffer assigned to RHS.
GetAllocationSlice(*inst), // The output buffer.
@ -2530,11 +2531,12 @@ std::unique_ptr<Thunk> IrEmitterUnnested::BuildGemmThunk(
std::unique_ptr<Thunk> IrEmitterUnnested::BuildFftThunk(
const HloInstruction* inst) {
const HloInstruction* operand = inst->operand(0);
return MakeUnique<FftThunk>(inst->fft_type(), inst->fft_length(),
/*input_buffer=*/GetAllocationSlice(*operand),
/*output_buffer=*/GetAllocationSlice(*inst),
/*input_shape=*/operand->shape(),
/*output_shape=*/inst->shape(), inst);
return absl::make_unique<FftThunk>(
inst->fft_type(), inst->fft_length(),
/*input_buffer=*/GetAllocationSlice(*operand),
/*output_buffer=*/GetAllocationSlice(*inst),
/*input_shape=*/operand->shape(),
/*output_shape=*/inst->shape(), inst);
}
StatusOr<std::unique_ptr<Thunk>> IrEmitterUnnested::BuildInitializerThunk(
@ -2584,8 +2586,8 @@ StatusOr<std::unique_ptr<Thunk>> IrEmitterUnnested::BuildInitializerThunk(
ArraySlice<uint8> literal_bytes(
reinterpret_cast<const uint8*>(literal.untyped_data()), num_bytes);
if (absl::c_all_of(literal_bytes, [](uint8 byte) { return byte == 0; })) {
return {
MakeUnique<MemzeroThunk>(GetAllocationSlice(*hlo, index), nullptr)};
return {absl::make_unique<MemzeroThunk>(GetAllocationSlice(*hlo, index),
nullptr)};
}
// If the literal is 8 or 16 bits wide, we can emit a 32-bit memset by
@ -2602,7 +2604,7 @@ StatusOr<std::unique_ptr<Thunk>> IrEmitterUnnested::BuildInitializerThunk(
memcpy(&pattern16, literal_bytes.data(), sizeof(pattern16));
}
uint32 pattern32 = uint32{pattern16} | (uint32{pattern16} << 16);
return {MakeUnique<Memset32BitValueThunk>(
return {absl::make_unique<Memset32BitValueThunk>(
pattern32, GetAllocationSlice(*hlo, index), nullptr)};
}
@ -2613,7 +2615,7 @@ StatusOr<std::unique_ptr<Thunk>> IrEmitterUnnested::BuildInitializerThunk(
literal_bytes.size() - 4) == 0) {
uint32 word;
memcpy(&word, literal_bytes.data(), sizeof(word));
return {MakeUnique<Memset32BitValueThunk>(
return {absl::make_unique<Memset32BitValueThunk>(
word, GetAllocationSlice(*hlo, index), nullptr)};
}
}
@ -2765,7 +2767,7 @@ std::unique_ptr<Thunk> IrEmitterUnnested::BuildWhileThunk(
ir_emitter_context_);
TF_CHECK_OK(body->Accept(&ir_emitter_body));
return MakeUnique<WhileThunk>(
return absl::make_unique<WhileThunk>(
GetAllocationSlice(*condition->root_instruction()), // cond result
ir_emitter_condition.ConsumeThunkSequence(),
ir_emitter_body.ConsumeThunkSequence(), hlo);
@ -2783,8 +2785,8 @@ std::unique_ptr<Thunk> IrEmitterUnnested::BuildForThunk(
ir_emitter_context_);
TF_CHECK_OK(body->Accept(&ir_emitter_body));
return MakeUnique<ForThunk>(loop_limit,
ir_emitter_body.ConsumeThunkSequence(), hlo);
return absl::make_unique<ForThunk>(
loop_limit, ir_emitter_body.ConsumeThunkSequence(), hlo);
}
std::unique_ptr<Thunk> IrEmitterUnnested::BuildConditionalThunk(
@ -2804,7 +2806,7 @@ std::unique_ptr<Thunk> IrEmitterUnnested::BuildConditionalThunk(
ir_emitter_context_);
TF_CHECK_OK(false_computation->Accept(&ir_emitter_false));
return MakeUnique<ConditionalThunk>(
return absl::make_unique<ConditionalThunk>(
GetAllocationSlice(*hlo->operand(0)),
GetAllocationSlice(*hlo->operand(1)),
GetAllocationSlice(*hlo->operand(2)),

View File

@ -15,7 +15,7 @@ limitations under the License.
#include "tensorflow/compiler/xla/service/gpu/kernel_thunk.h"
#include "tensorflow/compiler/xla/ptr_util.h"
#include "absl/memory/memory.h"
#include "tensorflow/compiler/xla/service/gpu/gpu_executable.h"
#include "tensorflow/compiler/xla/service/gpu/hlo_execution_profiler.h"
#include "tensorflow/compiler/xla/types.h"
@ -95,7 +95,7 @@ Status KernelThunk::ExecuteOnStream(const BufferAllocations& buffer_allocations,
VLOG(3) << "Launching " << kernel->name();
// Launch the kernel with potentially multiple blocks and threads.
static constexpr int kKernelArgsLimit = 1024;
auto kernel_args = MakeUnique<se::KernelArgsArray<kKernelArgsLimit>>();
auto kernel_args = absl::make_unique<se::KernelArgsArray<kKernelArgsLimit>>();
for (const BufferAllocation* arg : args_) {
const auto& buf = buffer_allocations.GetDeviceAddress(arg->index());
kernel_args->add_device_memory_argument(buf);

View File

@ -34,6 +34,7 @@ cc_library(
"//tensorflow/compiler/xla/service/llvm_ir:llvm_util",
"//tensorflow/core:lib",
"//tensorflow/core:lib_internal",
"@com_google_absl//absl/memory",
"@llvm//:amdgpu_code_gen",
"@llvm//:analysis",
"@llvm//:bit_reader",

View File

@ -20,7 +20,7 @@ limitations under the License.
#include <string>
#include <utility>
#include "tensorflow/compiler/xla/ptr_util.h"
#include "absl/memory/memory.h"
#include "tensorflow/compiler/xla/service/gpu/llvm_gpu_backend/dump_ir_pass.h"
#include "tensorflow/compiler/xla/service/gpu/llvm_gpu_backend/utils.h"
#include "tensorflow/compiler/xla/service/llvm_ir/llvm_util.h"
@ -205,7 +205,7 @@ std::unique_ptr<llvm::TargetMachine> GetTargetMachine(
default:
codegen_opt_level = CodeGenOpt::None;
}
return WrapUnique(target->createTargetMachine(
return absl::WrapUnique(target->createTargetMachine(
triple.str(), llvm_ir::AsStringRef(cpu_name), "+ptx60", target_options,
Optional<Reloc::Model>(RelocModel), Optional<CodeModel::Model>(CMModel),
codegen_opt_level));

View File

@ -21,13 +21,13 @@ limitations under the License.
#include <mutex> // NOLINT(build/c++11): only using std::call_once, not mutex.
#include <utility>
#include "absl/memory/memory.h"
#include "llvm/IR/DiagnosticInfo.h"
#include "llvm/IR/DiagnosticPrinter.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/Verifier.h"
#include "tensorflow/compiler/xla/protobuf_util.h"
#include "tensorflow/compiler/xla/ptr_util.h"
#include "tensorflow/compiler/xla/service/algebraic_simplifier.h"
#include "tensorflow/compiler/xla/service/batchnorm_expander.h"
#include "tensorflow/compiler/xla/service/buffer_assignment.h"
@ -690,7 +690,7 @@ StatusOr<std::unique_ptr<Executable>> NVPTXCompiler::RunBackend(
const std::vector<uint8> cubin =
CompilePtxOrGetCachedResult(ptx, cc_major, cc_minor);
auto thunk_schedule = MakeUnique<ThunkSchedule>(
auto thunk_schedule = absl::make_unique<ThunkSchedule>(
ir_emitter.ConsumeThunkSequence(), std::move(stream_assignment),
hlo_schedule->ThunkLaunchOrder());
VLOG(2) << "Printing the thunk schedule...";
@ -704,7 +704,7 @@ StatusOr<std::unique_ptr<Executable>> NVPTXCompiler::RunBackend(
cost_analysis.set_bytes_per_second(
stream_exec->GetDeviceDescription().memory_bandwidth());
TF_RETURN_IF_ERROR(module->entry_computation()->Accept(&cost_analysis));
profile_index_map = MakeUnique<HloProfileIndexMap>(*module);
profile_index_map = absl::make_unique<HloProfileIndexMap>(*module);
profile_printer =
CreateHloProfilePrinterData(*profile_index_map, cost_analysis);
}
@ -813,7 +813,7 @@ se::Platform::Id NVPTXCompiler::PlatformId() const {
static bool InitModule() {
xla::Compiler::RegisterCompilerFactory(
stream_executor::cuda::kCudaPlatformId,
[]() { return xla::MakeUnique<xla::gpu::NVPTXCompiler>(); });
[]() { return absl::make_unique<xla::gpu::NVPTXCompiler>(); });
return true;
}
static bool module_initialized = InitModule();

View File

@ -15,8 +15,8 @@ limitations under the License.
#include "tensorflow/compiler/xla/service/gpu/outfeed_manager.h"
#include "absl/memory/memory.h"
#include "tensorflow/compiler/xla/map_util.h"
#include "tensorflow/compiler/xla/ptr_util.h"
#include "tensorflow/compiler/xla/shape_util.h"
#include "tensorflow/core/platform/logging.h"

View File

@ -15,6 +15,7 @@ limitations under the License.
#include "tensorflow/compiler/xla/service/gpu/pad_insertion.h"
#include "absl/memory/memory.h"
#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/compiler/xla/literal_util.h"
#include "tensorflow/compiler/xla/service/gpu/ir_emission_utils.h"
@ -69,7 +70,7 @@ HloInstruction* MaybePaddedAndSlicedInput(
PrimitiveType element_type = input->shape().element_type();
HloInstruction* padding =
computation->AddInstruction(HloInstruction::CreateConstant(
MakeUnique<Literal>(LiteralUtil::Zero(element_type))));
absl::make_unique<Literal>(LiteralUtil::Zero(element_type))));
input = MakePadHlo(input, padding, padding_config).ValueOrDie();
}
@ -126,7 +127,7 @@ HloInstruction* MaybePaddedKernel(const Window& conv_window,
PrimitiveType element_type = kernel->shape().element_type();
HloInstruction* padding =
computation->AddInstruction(HloInstruction::CreateConstant(
MakeUnique<Literal>(LiteralUtil::Zero(element_type))));
absl::make_unique<Literal>(LiteralUtil::Zero(element_type))));
return MakePadHlo(kernel, padding, padding_config).ValueOrDie();
}
} // namespace
@ -236,7 +237,7 @@ bool PadInsertion::CanonicalizeBackwardFilterConvolution(
HloComputation* computation = backward_conv->parent();
HloInstruction* output = backward_conv->mutable_operand(1);
HloInstruction* padding = computation->AddInstruction(
HloInstruction::CreateConstant(MakeUnique<Literal>(
HloInstruction::CreateConstant(absl::make_unique<Literal>(
LiteralUtil::Zero(input->shape().element_type()))));
HloInstruction* padded_input =
MakePadHlo(input, padding, input_padding_config).ValueOrDie();

View File

@ -18,8 +18,8 @@ limitations under the License.
#include <ostream>
#include <string>
#include "absl/memory/memory.h"
#include "tensorflow/compiler/xla/map_util.h"
#include "tensorflow/compiler/xla/ptr_util.h"
#include "tensorflow/compiler/xla/service/hlo_computation.h"
#include "tensorflow/compiler/xla/service/hlo_opcode.h"
#include "tensorflow/compiler/xla/shape_util.h"

View File

@ -15,8 +15,8 @@ limitations under the License.
#include "tensorflow/compiler/xla/service/gpu/stream_assignment.h"
#include "absl/memory/memory.h"
#include "tensorflow/compiler/xla/map_util.h"
#include "tensorflow/compiler/xla/ptr_util.h"
#include "tensorflow/compiler/xla/service/gpu/ir_emission_utils.h"
#include "tensorflow/compiler/xla/service/hlo_computation.h"
#include "tensorflow/compiler/xla/service/hlo_reachability.h"
@ -119,7 +119,7 @@ int ComputeStreamToAssign(
} // namespace
std::unique_ptr<StreamAssignment> AssignStreams(const HloModule& module) {
auto stream_assignment = MakeUnique<StreamAssignment>();
auto stream_assignment = absl::make_unique<StreamAssignment>();
const HloComputation& computation = *module.entry_computation();
std::unique_ptr<HloReachabilityMap> reachability =
computation.ComputeReachability();

View File

@ -15,6 +15,7 @@ limitations under the License.
#include "tensorflow/compiler/xla/service/gpu/stream_assignment.h"
#include "absl/memory/memory.h"
#include "tensorflow/compiler/xla/service/hlo_computation.h"
#include "tensorflow/compiler/xla/service/hlo_instruction.h"
#include "tensorflow/compiler/xla/service/hlo_opcode.h"
@ -33,7 +34,7 @@ class StreamAssignmentTest : public HloTestBase {
auto debug_options = GetDebugOptionsForTest();
debug_options.set_xla_gpu_disable_multi_streaming(false);
config.set_debug_options(debug_options);
return MakeUnique<HloModule>("test_module", config);
return absl::make_unique<HloModule>("test_module", config);
}
// Pre-canned shapes.

View File

@ -35,13 +35,13 @@ cc_library(
"requires-gpu-sm35",
],
deps = [
"//tensorflow/compiler/xla:util",
"//tensorflow/compiler/xla/legacy_flags:debug_options_flags",
"//tensorflow/compiler/xla/service:gpu_plugin",
"//tensorflow/compiler/xla/service/gpu:gpu_executable",
"//tensorflow/compiler/xla/tests:filecheck",
"//tensorflow/compiler/xla/tests:llvm_irgen_test_base",
"//tensorflow/core:lib",
"@com_google_absl//absl/memory",
],
)
@ -60,6 +60,7 @@ tf_cc_test(
"//tensorflow/compiler/xla/service:hlo",
"//tensorflow/core:test",
"//tensorflow/core:test_main",
"@com_google_absl//absl/memory",
],
)
@ -94,6 +95,7 @@ tf_cc_test(
"//tensorflow/compiler/xla/tests:hlo_test_base",
"//tensorflow/core:test",
"//tensorflow/core:test_main",
"@com_google_absl//absl/memory",
],
)
@ -150,6 +152,7 @@ tf_cc_test(
"//tensorflow/compiler/xla/service:hlo",
"//tensorflow/core:test",
"//tensorflow/core:test_main",
"@com_google_absl//absl/memory",
],
)
@ -168,6 +171,7 @@ tf_cc_test(
"//tensorflow/compiler/xla/service:hlo",
"//tensorflow/core:test",
"//tensorflow/core:test_main",
"@com_google_absl//absl/memory",
],
)

View File

@ -14,8 +14,8 @@ limitations under the License.
==============================================================================*/
#include "tensorflow/compiler/xla/service/gpu/tests/gpu_codegen_test.h"
#include "absl/memory/memory.h"
#include "tensorflow/compiler/xla/legacy_flags/debug_options_flags.h"
#include "tensorflow/compiler/xla/ptr_util.h"
#include "tensorflow/compiler/xla/service/gpu/gpu_executable.h"
#include "tensorflow/compiler/xla/tests/filecheck.h"
#include "tensorflow/core/platform/logging.h"
@ -32,7 +32,7 @@ std::unique_ptr<HloModule> GpuCodegenTest::CreateNewModuleWithFTZ(bool ftz) {
debug_options.add_xla_disable_hlo_passes("constant_folding");
config.set_debug_options(debug_options);
return MakeUnique<HloModule>(TestName(), config);
return absl::make_unique<HloModule>(TestName(), config);
}
void GpuCodegenTest::CompileAndVerifyPtx(std::unique_ptr<HloModule> hlo_module,

View File

@ -16,9 +16,9 @@ limitations under the License.
#include <memory>
#include <utility>
#include "absl/memory/memory.h"
#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/compiler/xla/literal_util.h"
#include "tensorflow/compiler/xla/ptr_util.h"
#include "tensorflow/compiler/xla/service/gpu/tests/gpu_codegen_test.h"
#include "tensorflow/compiler/xla/service/hlo_computation.h"
#include "tensorflow/compiler/xla/service/hlo_instruction.h"

View File

@ -16,8 +16,8 @@ limitations under the License.
#include <memory>
#include <utility>
#include "absl/memory/memory.h"
#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/compiler/xla/ptr_util.h"
#include "tensorflow/compiler/xla/service/gpu/tests/gpu_codegen_test.h"
#include "tensorflow/compiler/xla/service/hlo_computation.h"
#include "tensorflow/compiler/xla/service/hlo_instruction.h"

View File

@ -20,8 +20,8 @@ limitations under the License.
#include <memory>
#include <utility>
#include "absl/memory/memory.h"
#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/compiler/xla/ptr_util.h"
#include "tensorflow/compiler/xla/service/gpu/tests/gpu_codegen_test.h"
#include "tensorflow/compiler/xla/service/hlo_computation.h"
#include "tensorflow/compiler/xla/service/hlo_instruction.h"

View File

@ -16,8 +16,8 @@ limitations under the License.
#include <memory>
#include <utility>
#include "absl/memory/memory.h"
#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/compiler/xla/ptr_util.h"
#include "tensorflow/compiler/xla/service/gpu/tests/gpu_codegen_test.h"
#include "tensorflow/compiler/xla/service/hlo_computation.h"
#include "tensorflow/compiler/xla/service/hlo_instruction.h"

View File

@ -15,6 +15,7 @@ limitations under the License.
#include "tensorflow/compiler/xla/service/gpu/tuple_thunk.h"
#include "absl/memory/memory.h"
#include "tensorflow/compiler/xla/service/gpu/hlo_execution_profiler.h"
#include "tensorflow/compiler/xla/util.h"
@ -25,7 +26,7 @@ Status TupleThunk::ExecuteOnStream(const BufferAllocations& buffer_allocations,
se::Stream* stream,
HloExecutionProfiler* profiler) {
auto size = tuple_element_buffers_.size();
auto tuple_element_buffer_addresses = MakeUnique<void*[]>(size);
auto tuple_element_buffer_addresses = absl::make_unique<void*[]>(size);
for (int i = 0; i != size; ++i) {
tuple_element_buffer_addresses[i] =
buffer_allocations.GetDeviceAddress(tuple_element_buffers_[i]).opaque();

View File

@ -15,7 +15,7 @@ limitations under the License.
#include "tensorflow/compiler/xla/service/gpu/while_thunk.h"
#include "tensorflow/compiler/xla/ptr_util.h"
#include "absl/memory/memory.h"
#include "tensorflow/compiler/xla/service/gpu/hlo_execution_profiler.h"
#include "tensorflow/compiler/xla/util.h"
#include "tensorflow/core/lib/core/errors.h"
@ -34,9 +34,9 @@ WhileThunk::WhileThunk(
// and body_thunk_sequence_ constructors because these SequentialThunks
// are logically "part of" this WhileThunk, and shouldn't be profiled
// separately from it.
condition_thunk_sequence_(MakeUnique<SequentialThunk>(
condition_thunk_sequence_(absl::make_unique<SequentialThunk>(
std::move(*condition_thunk_sequence), nullptr)),
body_thunk_sequence_(MakeUnique<SequentialThunk>(
body_thunk_sequence_(absl::make_unique<SequentialThunk>(
std::move(*body_thunk_sequence), nullptr)) {}
Status WhileThunk::Initialize(const GpuExecutable& executable,

View File

@ -22,9 +22,9 @@ limitations under the License.
#include <memory>
#include <string>
#include "absl/memory/memory.h"
#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/compiler/xla/literal_util.h"
#include "tensorflow/compiler/xla/ptr_util.h"
#include "tensorflow/compiler/xla/service/hlo_computation.h"
#include "tensorflow/compiler/xla/service/hlo_graph_dumper.h"
#include "tensorflow/compiler/xla/service/hlo_instruction.h"
@ -84,7 +84,7 @@ HloComputation* CallForwardingComputation(HloComputation* computation,
// the module.
std::unique_ptr<HloModule> MakeBigGraph() {
HloModuleConfig config;
auto module = MakeUnique<HloModule>("BigGraph", config);
auto module = absl::make_unique<HloModule>("BigGraph", config);
auto builder = HloComputation::Builder("TestBigGraphvizGraph");

View File

@ -18,6 +18,7 @@ limitations under the License.
#include <algorithm>
#include <vector>
#include "absl/memory/memory.h"
#include "tensorflow/compiler/xla/map_util.h"
#include "tensorflow/compiler/xla/util.h"
@ -45,7 +46,7 @@ StatusOr<int64> HeapSimulator::MinimumMemoryForModule(
// bound, by minimizing the liveness of sub-computations.
TF_ASSIGN_OR_RETURN(
HeapSimulator::Result result,
HeapSimulator::Run(MakeUnique<NoFragmentationStatsHeap>(), *module,
HeapSimulator::Run(absl::make_unique<NoFragmentationStatsHeap>(), *module,
module_sequence, *points_to_analysis, size_function));
return result.heap_size;
}
@ -60,9 +61,10 @@ StatusOr<int64> HeapSimulator::MinimumMemoryForComputation(
memory_by_computation) {
TF_ASSIGN_OR_RETURN(
HeapSimulator::Result result,
HeapSimulator::Run(MakeUnique<NoFragmentationStatsHeap>(), computation,
sequence, points_to_analysis, size_function,
HeapSimulator::Options(), memory_by_computation));
HeapSimulator::Run(absl::make_unique<NoFragmentationStatsHeap>(),
computation, sequence, points_to_analysis,
size_function, HeapSimulator::Options(),
memory_by_computation));
return result.heap_size;
}
@ -344,7 +346,7 @@ HeapSimulator::HeapSimulator(
const SequentialHloOrdering::HloModuleSequence* module_sequence,
const tensorflow::gtl::FlatMap<const HloComputation*, int64>*
memory_by_computation)
: no_fragmentation_stats_(MakeUnique<NoFragmentationStatsHeap>()),
: no_fragmentation_stats_(absl::make_unique<NoFragmentationStatsHeap>()),
algorithm_(std::move(algorithm)),
size_fn_(size_fn),
options_(options),

View File

@ -19,6 +19,7 @@ limitations under the License.
#include <utility>
#include <vector>
#include "absl/memory/memory.h"
#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/compiler/xla/service/buffer_value.h"
#include "tensorflow/compiler/xla/service/hlo_computation.h"
@ -137,7 +138,7 @@ class HeapSimulatorTracker {
const string& name, std::unique_ptr<HloComputation> computation,
const std::vector<const HloInstruction*>& instruction_sequence) {
HloModuleConfig config;
module_ = MakeUnique<HloModule>(name, config);
module_ = absl::make_unique<HloModule>(name, config);
module_->AddEntryComputation(std::move(computation));
points_to_analysis_ =
TuplePointsToAnalysis::Run(module_.get()).ConsumeValueOrDie();
@ -146,8 +147,8 @@ class HeapSimulatorTracker {
// the secondary sorting criteria of DecreasingSizeRunsHeap to sort calls by
// buffer id, for determinism in the tests.
auto zero_size = [](const BufferValue& buffer) { return 0; };
auto algorithm = MakeUnique<DecreasingSizeRunsHeap>(
MakeUnique<HeapCallRecorder>(&actual_calls_));
auto algorithm = absl::make_unique<DecreasingSizeRunsHeap>(
absl::make_unique<HeapCallRecorder>(&actual_calls_));
result_ = HeapSimulator::Run(
std::move(algorithm), *module_->entry_computation(),
instruction_sequence, *points_to_analysis_, zero_size)
@ -156,7 +157,7 @@ class HeapSimulatorTracker {
explicit HeapSimulatorTracker(const string& name) {
HloModuleConfig config;
module_ = MakeUnique<HloModule>(name, config);
module_ = absl::make_unique<HloModule>(name, config);
}
// Similar to the single entry computation constructor above, but runs the
@ -182,8 +183,8 @@ class HeapSimulatorTracker {
auto size_fn = [&reverse_position](const BufferValue& buffer) {
return reverse_position[buffer.instruction()];
};
auto algorithm = MakeUnique<DecreasingSizeRunsHeap>(
MakeUnique<HeapCallRecorder>(&actual_calls_));
auto algorithm = absl::make_unique<DecreasingSizeRunsHeap>(
absl::make_unique<HeapCallRecorder>(&actual_calls_));
result_ = HeapSimulator::Run(std::move(algorithm), *module_,
module_sequence, *points_to_analysis_, size_fn)
.ConsumeValueOrDie();
@ -675,7 +676,8 @@ class HeapAlgorithmTestBase : public ::testing::Test {
const BufferValue::Id id = buffers_.size();
auto const0 = builder_.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
buffers_.emplace_back(MakeUnique<HloValue>(id, const0, ShapeIndex{}));
buffers_.emplace_back(
absl::make_unique<HloValue>(id, const0, ShapeIndex{}));
return buffers_.back().get();
}
@ -724,7 +726,8 @@ class DecreasingSizeRunsHeapTest : public HeapAlgorithmTestBase {};
TEST_F(DecreasingSizeRunsHeapTest, Empty) {
CallSequence call_sequence;
DecreasingSizeRunsHeap heap(MakeUnique<HeapCallRecorder>(&call_sequence));
DecreasingSizeRunsHeap heap(
absl::make_unique<HeapCallRecorder>(&call_sequence));
heap.Finish();
EXPECT_EQ(call_sequence, CallSequence({
{kFinish, nullptr},
@ -733,7 +736,8 @@ TEST_F(DecreasingSizeRunsHeapTest, Empty) {
TEST_F(DecreasingSizeRunsHeapTest, Simple) {
CallSequence call_sequence;
DecreasingSizeRunsHeap heap(MakeUnique<HeapCallRecorder>(&call_sequence));
DecreasingSizeRunsHeap heap(
absl::make_unique<HeapCallRecorder>(&call_sequence));
heap.Alloc(buffer_a_, 10);
heap.Alloc(buffer_b_, 20);
heap.Alloc(buffer_c_, 30);
@ -760,7 +764,8 @@ TEST_F(DecreasingSizeRunsHeapTest, Simple) {
TEST_F(DecreasingSizeRunsHeapTest, Mixed) {
CallSequence call_sequence;
DecreasingSizeRunsHeap heap(MakeUnique<HeapCallRecorder>(&call_sequence));
DecreasingSizeRunsHeap heap(
absl::make_unique<HeapCallRecorder>(&call_sequence));
heap.Alloc(buffer_a_, 10);
heap.Alloc(buffer_b_, 20);
heap.Free(buffer_b_, 20);

View File

@ -457,7 +457,7 @@ StatusOr<std::unique_ptr<HloAliasAnalysis>> HloAliasAnalysis::Run(
VLOG(2) << "HloAliasAnalysis::Run on module " << module->name();
XLA_VLOG_LINES(2, module->ToString());
auto alias_analysis = WrapUnique(new HloAliasAnalysis(module));
auto alias_analysis = absl::WrapUnique(new HloAliasAnalysis(module));
TF_ASSIGN_OR_RETURN(alias_analysis->dataflow_analysis_,
HloDataflowAnalysis::Run(*module, /*ssa_form=*/true,
/*bitcast_defines_value=*/false,

View File

@ -24,9 +24,9 @@ limitations under the License.
#include <sstream>
#include "absl/algorithm/container.h"
#include "absl/memory/memory.h"
#include "tensorflow/compiler/xla/layout_util.h"
#include "tensorflow/compiler/xla/map_util.h"
#include "tensorflow/compiler/xla/ptr_util.h"
#include "tensorflow/compiler/xla/service/dfs_hlo_visitor_with_default.h"
#include "tensorflow/compiler/xla/service/hlo_module.h"
#include "tensorflow/compiler/xla/service/hlo_opcode.h"
@ -57,8 +57,8 @@ std::unique_ptr<HloComputation> HloComputation::Builder::Build(
HloInstruction* root =
root_instruction ? root_instruction : last_added_instruction_;
CHECK_NE(nullptr, root);
return WrapUnique(new HloComputation(name_, parameter_count, &instructions_,
root, fusion_instruction_));
return absl::WrapUnique(new HloComputation(
name_, parameter_count, &instructions_, root, fusion_instruction_));
}
HloComputation::HloComputation(
@ -494,9 +494,9 @@ HloComputation::CreateFromProto(
return to_proto_id[a.get()] < to_proto_id[b.get()];
});
return WrapUnique(new HloComputation(proto.name(), parameter_count,
&instructions, root,
/*fusion_instruction=*/nullptr));
return absl::WrapUnique(new HloComputation(proto.name(), parameter_count,
&instructions, root,
/*fusion_instruction=*/nullptr));
}
void HloComputation::FuseInstructionsInto(
@ -675,7 +675,7 @@ Status HloComputation::ReplaceInstruction(HloInstruction* old_instruction,
std::unique_ptr<HloReachabilityMap> HloComputation::ComputeReachability()
const {
const auto& all = MakeInstructionPostOrder();
auto result = MakeUnique<HloReachabilityMap>(all);
auto result = absl::make_unique<HloReachabilityMap>(all);
std::vector<HloInstruction*> inputs;
for (const HloInstruction* hlo : all) {
@ -830,7 +830,7 @@ std::unique_ptr<HloComputation> HloComputation::CloneWithReplacements(
HloCloneContext* context, const string& suffix) {
std::unique_ptr<HloCloneContext> context_ptr;
if (context == nullptr) {
context_ptr = MakeUnique<HloCloneContext>(parent(), suffix);
context_ptr = absl::make_unique<HloCloneContext>(parent(), suffix);
context = context_ptr.get();
}

View File

@ -20,6 +20,7 @@ limitations under the License.
#include <utility>
#include <vector>
#include "absl/memory/memory.h"
#include "tensorflow/compiler/xla/layout_util.h"
#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/compiler/xla/service/dfs_hlo_visitor_with_default.h"
@ -38,7 +39,7 @@ StatusOr<bool> HloConstantFolding::Run(HloModule* module) {
// Limit the constant folding to 0 iterations to skip folding loops. This
// retains the behavior from before while loop support in HloEvaluator and may
// be revised.
auto evaluator = MakeUnique<HloEvaluator>(/*max_loop_iterations=*/0);
auto evaluator = absl::make_unique<HloEvaluator>(/*max_loop_iterations=*/0);
XLA_VLOG_LINES(2,
"HloConstantFolding::Run(), before:\n" + module->ToString());

View File

@ -15,9 +15,9 @@ limitations under the License.
#include "tensorflow/compiler/xla/service/hlo_creation_utils.h"
#include "absl/algorithm/container.h"
#include "absl/memory/memory.h"
#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/compiler/xla/literal_util.h"
#include "tensorflow/compiler/xla/ptr_util.h"
#include "tensorflow/compiler/xla/service/shape_inference.h"
#include "tensorflow/compiler/xla/util.h"
@ -319,7 +319,7 @@ StatusOr<HloInstruction*> PadVectorWithZeros(HloInstruction* operand,
*padding_config.add_dimensions() = padding_config_dim;
HloInstruction* zero = computation->AddInstruction(
HloInstruction::CreateConstant(MakeUnique<Literal>(
HloInstruction::CreateConstant(absl::make_unique<Literal>(
LiteralUtil::Zero(operand->shape().element_type()))));
return MakePadHlo(operand, zero, padding_config);
}
@ -329,7 +329,7 @@ StatusOr<HloInstruction*> BroadcastZeros(
ArraySlice<int64> broadcast_dimensions) {
HloInstruction* zero =
computation->AddInstruction(HloInstruction::CreateConstant(
MakeUnique<Literal>(LiteralUtil::Zero(element_type))));
absl::make_unique<Literal>(LiteralUtil::Zero(element_type))));
return MakeBroadcastHlo(zero, /*broadcast_dimensions=*/{},
/*result_shape_bounds=*/broadcast_dimensions);
}

View File

@ -14,7 +14,7 @@ limitations under the License.
==============================================================================*/
#include "tensorflow/compiler/xla/service/hlo_creation_utils.h"
#include "tensorflow/compiler/xla/ptr_util.h"
#include "absl/memory/memory.h"
#include "tensorflow/compiler/xla/service/hlo_evaluator.h"
#include "tensorflow/compiler/xla/service/hlo_module.h"
#include "tensorflow/compiler/xla/shape_util.h"

View File

@ -20,9 +20,9 @@ limitations under the License.
#include <utility>
#include <vector>
#include "absl/memory/memory.h"
#include "tensorflow/compiler/xla/layout_util.h"
#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/compiler/xla/ptr_util.h"
#include "tensorflow/compiler/xla/service/hlo_computation.h"
#include "tensorflow/compiler/xla/service/hlo_instruction.h"
#include "tensorflow/compiler/xla/service/hlo_matchers.h"

View File

@ -19,8 +19,8 @@ limitations under the License.
#include <queue>
#include <vector>
#include "absl/memory/memory.h"
#include "tensorflow/compiler/xla/map_util.h"
#include "tensorflow/compiler/xla/ptr_util.h"
#include "tensorflow/compiler/xla/service/hlo_computation.h"
#include "tensorflow/compiler/xla/service/hlo_instruction.h"
#include "tensorflow/compiler/xla/service/hlo_opcode.h"
@ -886,7 +886,7 @@ StatusOr<std::unique_ptr<HloDataflowAnalysis>> HloDataflowAnalysis::Run(
VLOG(1) << "HloDataflowAnalysis::Run on module " << module.name();
XLA_VLOG_LINES(2, module.ToString());
auto dataflow_analysis = WrapUnique(new HloDataflowAnalysis(
auto dataflow_analysis = absl::WrapUnique(new HloDataflowAnalysis(
module, ssa_form, bitcast_defines_value, fusion_can_share_buffer));
TF_RETURN_IF_ERROR(dataflow_analysis->InitializeInstructionValueSets());

Some files were not shown because too many files have changed in this diff Show More