[XLA] Switch to absl::StrFormat.
Unlike Printf, StrFormat does not require type-length qualifiers, e.g %z, %ll. Nor does it require that you call c_str() to print strings. So these are fixed up here as well. PiperOrigin-RevId: 210435915
This commit is contained in:
parent
91f33732cb
commit
d57f5a8202
@ -175,6 +175,7 @@ cc_library(
|
||||
"@com_google_absl//absl/algorithm:container",
|
||||
"@com_google_absl//absl/container:inlined_vector",
|
||||
"@com_google_absl//absl/strings",
|
||||
"@com_google_absl//absl/strings:str_format",
|
||||
],
|
||||
)
|
||||
|
||||
@ -305,6 +306,7 @@ cc_library(
|
||||
"//tensorflow/core:lib",
|
||||
"@com_google_absl//absl/memory",
|
||||
"@com_google_absl//absl/strings",
|
||||
"@com_google_absl//absl/strings:str_format",
|
||||
],
|
||||
)
|
||||
|
||||
@ -366,6 +368,7 @@ cc_library(
|
||||
":util",
|
||||
"//tensorflow/core:lib",
|
||||
"@com_google_absl//absl/strings",
|
||||
"@com_google_absl//absl/strings:str_format",
|
||||
],
|
||||
)
|
||||
|
||||
@ -378,6 +381,7 @@ cc_library(
|
||||
":util",
|
||||
"//tensorflow/core:lib",
|
||||
"@com_google_absl//absl/strings",
|
||||
"@com_google_absl//absl/strings:str_format",
|
||||
],
|
||||
)
|
||||
|
||||
|
||||
@ -31,7 +31,6 @@ limitations under the License.
|
||||
#include "tensorflow/compiler/xla/array2d.h"
|
||||
#include "tensorflow/compiler/xla/types.h"
|
||||
#include "tensorflow/core/lib/gtl/array_slice.h"
|
||||
#include "tensorflow/core/lib/strings/stringprintf.h"
|
||||
#include "tensorflow/core/platform/logging.h"
|
||||
#include "tensorflow/core/platform/macros.h"
|
||||
#include "tensorflow/core/platform/types.h"
|
||||
|
||||
@ -92,6 +92,7 @@ cc_library(
|
||||
"//tensorflow/compiler/xla/service:device_memory_allocator",
|
||||
"//tensorflow/core:lib",
|
||||
"@com_google_absl//absl/strings",
|
||||
"@com_google_absl//absl/strings:str_format",
|
||||
"@com_google_absl//absl/types:optional",
|
||||
],
|
||||
)
|
||||
|
||||
@ -15,8 +15,8 @@ limitations under the License.
|
||||
|
||||
#include "tensorflow/compiler/xla/client/executable_build_options.h"
|
||||
|
||||
#include "absl/strings/str_format.h"
|
||||
#include "tensorflow/compiler/xla/shape_util.h"
|
||||
#include "tensorflow/core/lib/strings/stringprintf.h"
|
||||
|
||||
namespace xla {
|
||||
|
||||
@ -59,10 +59,10 @@ string ExecutableBuildOptions::ToString() const {
|
||||
if (generate_hlo_graph_.has_value()) {
|
||||
generate_hlo_graph = generate_hlo_graph_.value();
|
||||
}
|
||||
return tensorflow::strings::Printf(
|
||||
return absl::StrFormat(
|
||||
"ExecutableBuildOptions{device_ordinal=%d, result_layout=%s, "
|
||||
"generate_hlo_graph=%s}",
|
||||
device_ordinal_, result_layout.c_str(), generate_hlo_graph.c_str());
|
||||
device_ordinal_, result_layout, generate_hlo_graph);
|
||||
}
|
||||
|
||||
ExecutableBuildOptions& ExecutableBuildOptions::set_generate_hlo_graph(
|
||||
|
||||
@ -56,7 +56,7 @@ XlaOp Epsilon(XlaBuilder* builder, PrimitiveType type) {
|
||||
std::numeric_limits<double>::epsilon());
|
||||
default:
|
||||
return builder->ReportError(InvalidArgument(
|
||||
"Invalid type for Epsilon (%s).", PrimitiveType_Name(type).c_str()));
|
||||
"Invalid type for Epsilon (%s).", PrimitiveType_Name(type)));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -37,13 +37,13 @@ XlaOp ConstantR0WithType(XlaBuilder* builder, PrimitiveType type, T value) {
|
||||
primitive_util::IsComplexType(type))) {
|
||||
return builder->ReportError(InvalidArgument(
|
||||
"Invalid cast from floating point type to %s in ConstantR0WithType.",
|
||||
PrimitiveType_Name(type).c_str()));
|
||||
PrimitiveType_Name(type)));
|
||||
}
|
||||
if (std::is_same<T, complex64>::value &&
|
||||
!primitive_util::IsComplexType(type)) {
|
||||
return builder->ReportError(InvalidArgument(
|
||||
"Invalid cast from complex type to %s in ConstantR0WithType.",
|
||||
PrimitiveType_Name(type).c_str()));
|
||||
PrimitiveType_Name(type)));
|
||||
}
|
||||
switch (type) {
|
||||
case F16:
|
||||
@ -71,7 +71,7 @@ XlaOp ConstantR0WithType(XlaBuilder* builder, PrimitiveType type, T value) {
|
||||
default:
|
||||
return builder->ReportError(
|
||||
InvalidArgument("Invalid type for ConstantR0WithType (%s).",
|
||||
PrimitiveType_Name(type).c_str()));
|
||||
PrimitiveType_Name(type)));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -65,9 +65,8 @@ XlaOp Iota(XlaBuilder* builder, PrimitiveType type, int64 size) {
|
||||
case C64:
|
||||
return MakeIota<complex64>(builder, size);
|
||||
default:
|
||||
return builder->ReportError(
|
||||
InvalidArgument("Unimplemented type for Iota: %s.",
|
||||
PrimitiveType_Name(type).c_str()));
|
||||
return builder->ReportError(InvalidArgument(
|
||||
"Unimplemented type for Iota: %s.", PrimitiveType_Name(type)));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -59,7 +59,7 @@ Status LocalExecutable::ValidateExecutionOptions(
|
||||
// Check argument number, shapes, and layouts.
|
||||
if (arguments.size() != computation_layout.parameter_count()) {
|
||||
return InvalidArgument(
|
||||
"invalid number of arguments for computation: expected %d, got %zu",
|
||||
"invalid number of arguments for computation: expected %d, got %u",
|
||||
computation_layout.parameter_count(), arguments.size());
|
||||
}
|
||||
for (int i = 0; i < arguments.size(); ++i) {
|
||||
@ -71,9 +71,9 @@ Status LocalExecutable::ValidateExecutionOptions(
|
||||
"parameter "
|
||||
"%d: want %s, got %s",
|
||||
i,
|
||||
ShapeUtil::HumanString(computation_layout.parameter_layout(i).shape())
|
||||
.c_str(),
|
||||
ShapeUtil::HumanString(arguments[i]->on_host_shape()).c_str());
|
||||
ShapeUtil::HumanString(
|
||||
computation_layout.parameter_layout(i).shape()),
|
||||
ShapeUtil::HumanString(arguments[i]->on_host_shape()));
|
||||
}
|
||||
}
|
||||
|
||||
@ -88,8 +88,7 @@ Status LocalExecutable::ValidateExecutionOptions(
|
||||
if (stream_platform != backend_->platform()) {
|
||||
return InvalidArgument(
|
||||
"stream is for platform %s, but service targets platform %s",
|
||||
stream_platform->Name().c_str(),
|
||||
backend_->platform()->Name().c_str());
|
||||
stream_platform->Name(), backend_->platform()->Name());
|
||||
}
|
||||
|
||||
// Cannot specify device_ordinal with a stream. The stream determines these
|
||||
@ -120,10 +119,10 @@ Status LocalExecutable::ValidateExecutionOptions(
|
||||
return InvalidArgument(
|
||||
"executable is built for device %s of type \"%s\"; cannot run it on "
|
||||
"device %s of type \"%s\"",
|
||||
backend_->device_name(build_device_ordinal()).c_str(),
|
||||
build_executor->GetDeviceDescription().name().c_str(),
|
||||
backend_->device_name(run_device_ordinal).c_str(),
|
||||
run_executor->GetDeviceDescription().name().c_str());
|
||||
backend_->device_name(build_device_ordinal()),
|
||||
build_executor->GetDeviceDescription().name(),
|
||||
backend_->device_name(run_device_ordinal),
|
||||
run_executor->GetDeviceDescription().name());
|
||||
}
|
||||
|
||||
if (!run_options.allocator()) {
|
||||
@ -133,8 +132,8 @@ Status LocalExecutable::ValidateExecutionOptions(
|
||||
if (run_options.allocator()->platform() != backend.platform()) {
|
||||
return InvalidArgument(
|
||||
"allocator platform (%s) does not match service platform (%s)",
|
||||
run_options.allocator()->platform()->Name().c_str(),
|
||||
backend.platform()->Name().c_str());
|
||||
run_options.allocator()->platform()->Name(),
|
||||
backend.platform()->Name());
|
||||
}
|
||||
|
||||
return Status::OK();
|
||||
|
||||
@ -31,8 +31,8 @@ Status ValidatePaddingValues(
|
||||
input_dimensions.size() == window_strides.size();
|
||||
if (!ok) {
|
||||
return InvalidArgument(
|
||||
"Want input dimensions size %zu = window dimensions size %zu = window "
|
||||
"strides size %zu",
|
||||
"Want input dimensions size %u = window dimensions size %u = window "
|
||||
"strides size %u",
|
||||
input_dimensions.size(), window_dimensions.size(),
|
||||
window_strides.size());
|
||||
}
|
||||
|
||||
@ -72,7 +72,7 @@ XlaOp operator>>(const XlaOp& x, const XlaOp& y) {
|
||||
if (!ShapeUtil::ElementIsIntegral(shape)) {
|
||||
return InvalidArgument(
|
||||
"Argument to >> operator does not have an integral type (%s).",
|
||||
ShapeUtil::HumanString(shape).c_str());
|
||||
ShapeUtil::HumanString(shape));
|
||||
}
|
||||
if (ShapeUtil::ElementIsSigned(shape)) {
|
||||
return ShiftRightArithmetic(x, y);
|
||||
@ -492,7 +492,7 @@ XlaOp XlaBuilder::Parameter(int64 parameter_number, const Shape& shape,
|
||||
return ReportErrorOrReturn([&]() -> StatusOr<XlaOp> {
|
||||
HloInstructionProto instr;
|
||||
if (!parameter_numbers_.insert(parameter_number).second) {
|
||||
return InvalidArgument("parameter %lld already registered",
|
||||
return InvalidArgument("parameter %d already registered",
|
||||
parameter_number);
|
||||
}
|
||||
instr.set_parameter_number(parameter_number);
|
||||
@ -766,7 +766,7 @@ XlaOp XlaBuilder::GetTupleElement(const XlaOp& tuple_data, int64 index) {
|
||||
if (!ShapeUtil::IsTuple(tuple_shape)) {
|
||||
return InvalidArgument(
|
||||
"Operand to GetTupleElement() is not a tuple; got %s",
|
||||
ShapeUtil::HumanString(tuple_shape).c_str());
|
||||
ShapeUtil::HumanString(tuple_shape));
|
||||
}
|
||||
*instr.mutable_shape() =
|
||||
ShapeUtil::GetTupleElementShape(tuple_shape, index);
|
||||
@ -847,16 +847,14 @@ Status XlaBuilder::VerifyConvolution(
|
||||
return InvalidArgument(
|
||||
"Convolution arguments must have same number of "
|
||||
"dimensions. Got: %s and %s",
|
||||
ShapeUtil::HumanString(lhs_shape).c_str(),
|
||||
ShapeUtil::HumanString(rhs_shape).c_str());
|
||||
ShapeUtil::HumanString(lhs_shape), ShapeUtil::HumanString(rhs_shape));
|
||||
}
|
||||
int num_dims = ShapeUtil::Rank(lhs_shape);
|
||||
if (num_dims < 2) {
|
||||
return InvalidArgument(
|
||||
"Convolution expects argument arrays with >= 3 dimensions. "
|
||||
"Got: %s and %s",
|
||||
ShapeUtil::HumanString(lhs_shape).c_str(),
|
||||
ShapeUtil::HumanString(rhs_shape).c_str());
|
||||
ShapeUtil::HumanString(lhs_shape), ShapeUtil::HumanString(rhs_shape));
|
||||
}
|
||||
int num_spatial_dims = num_dims - 2;
|
||||
|
||||
@ -870,7 +868,7 @@ Status XlaBuilder::VerifyConvolution(
|
||||
}
|
||||
for (int i = 0; i < numbers.size(); ++i) {
|
||||
if (numbers.Get(i) < 0 || numbers.Get(i) >= num_dims) {
|
||||
return InvalidArgument("Convolution %s[%d] is out of bounds: %lld",
|
||||
return InvalidArgument("Convolution %s[%d] is out of bounds: %d",
|
||||
field_name, i, numbers.Get(i));
|
||||
}
|
||||
}
|
||||
@ -1016,8 +1014,7 @@ StatusOr<Window> XlaBuilder::MakeWindow(
|
||||
"Window has different number of window dimensions than of ",
|
||||
x_name,
|
||||
"\nNumber of window dimensions: ", window_dimensions.size(),
|
||||
"\nNumber of ", x_name, ": ", x, "\n")
|
||||
.c_str());
|
||||
"\nNumber of ", x_name, ": ", x, "\n"));
|
||||
}
|
||||
};
|
||||
TF_RETURN_IF_ERROR(verify_size(window_strides.size(), "window strides"));
|
||||
@ -1193,8 +1190,8 @@ void XlaBuilder::Outfeed(const XlaOp& operand, const Shape& shape_with_layout,
|
||||
if (!ShapeUtil::Compatible(operand_shape, shape_with_layout)) {
|
||||
return InvalidArgument(
|
||||
"Outfeed shape %s must be compatible with operand shape %s",
|
||||
ShapeUtil::HumanStringWithLayout(shape_with_layout).c_str(),
|
||||
ShapeUtil::HumanStringWithLayout(operand_shape).c_str());
|
||||
ShapeUtil::HumanStringWithLayout(shape_with_layout),
|
||||
ShapeUtil::HumanStringWithLayout(operand_shape));
|
||||
}
|
||||
*instr.mutable_outfeed_shape() = shape_with_layout;
|
||||
|
||||
@ -1246,8 +1243,8 @@ XlaOp XlaBuilder::OutfeedWithToken(const XlaOp& operand, const XlaOp& token,
|
||||
if (!ShapeUtil::Compatible(operand_shape, shape_with_layout)) {
|
||||
return InvalidArgument(
|
||||
"Outfeed shape %s must be compatible with operand shape %s",
|
||||
ShapeUtil::HumanStringWithLayout(shape_with_layout).c_str(),
|
||||
ShapeUtil::HumanStringWithLayout(operand_shape).c_str());
|
||||
ShapeUtil::HumanStringWithLayout(shape_with_layout),
|
||||
ShapeUtil::HumanStringWithLayout(operand_shape));
|
||||
}
|
||||
*instr.mutable_outfeed_shape() = shape_with_layout;
|
||||
|
||||
@ -1286,7 +1283,7 @@ XlaOp XlaBuilder::CustomCall(const string& call_target_name,
|
||||
return InvalidArgument(
|
||||
"Invalid custom_call_target \"%s\": Call targets that start with '$' "
|
||||
"are reserved for internal use.",
|
||||
call_target_name.c_str());
|
||||
call_target_name);
|
||||
}
|
||||
*instr.mutable_shape() = shape;
|
||||
instr.set_custom_call_target(call_target_name);
|
||||
@ -1590,7 +1587,7 @@ XlaOp XlaBuilder::RngOp(RandomDistribution distribution,
|
||||
if (parameters.size() != 2) {
|
||||
return InvalidArgument(
|
||||
"RNG distribution (%s) expects 2 parameters, but got %ld",
|
||||
RandomDistribution_Name(distribution).c_str(), parameters.size());
|
||||
RandomDistribution_Name(distribution), parameters.size());
|
||||
}
|
||||
break;
|
||||
default:
|
||||
@ -2140,13 +2137,13 @@ XlaOp XlaBuilder::SendToHost(const XlaOp& operand, const XlaOp& token,
|
||||
if (!ShapeUtil::Compatible(operand_shape, shape_with_layout)) {
|
||||
return InvalidArgument(
|
||||
"SendToHost shape %s must be compatible with operand shape %s",
|
||||
ShapeUtil::HumanStringWithLayout(shape_with_layout).c_str(),
|
||||
ShapeUtil::HumanStringWithLayout(operand_shape).c_str());
|
||||
ShapeUtil::HumanStringWithLayout(shape_with_layout),
|
||||
ShapeUtil::HumanStringWithLayout(operand_shape));
|
||||
}
|
||||
// TODO(b/111544877): Support tuple shapes.
|
||||
if (!ShapeUtil::IsArray(operand_shape)) {
|
||||
return InvalidArgument("SendToHost only supports array shapes, shape: %s",
|
||||
ShapeUtil::HumanString(operand_shape).c_str());
|
||||
ShapeUtil::HumanString(operand_shape));
|
||||
}
|
||||
|
||||
if (handle.type() != ChannelHandle::DEVICE_TO_HOST) {
|
||||
@ -2185,7 +2182,7 @@ XlaOp XlaBuilder::RecvFromHost(const XlaOp& token, const Shape& shape,
|
||||
if (!ShapeUtil::IsArray(shape)) {
|
||||
return InvalidArgument(
|
||||
"RecvFromHost only supports array shapes, shape: %s",
|
||||
ShapeUtil::HumanString(shape).c_str());
|
||||
ShapeUtil::HumanString(shape));
|
||||
}
|
||||
|
||||
if (handle.type() != ChannelHandle::HOST_TO_DEVICE) {
|
||||
@ -2240,7 +2237,7 @@ StatusOr<XlaComputation> XlaBuilder::BuildConstantSubGraph(
|
||||
"of being evaluated at XLA compile time.\n\n"
|
||||
"Please file a usability bug with the framework being used (e.g. "
|
||||
"TensorFlow).",
|
||||
op_string.c_str());
|
||||
op_string);
|
||||
}
|
||||
|
||||
TF_ASSIGN_OR_RETURN(const HloInstructionProto* root,
|
||||
@ -2348,8 +2345,8 @@ XlaBuilder::CreateDefaultConvDimensionNumbers(int num_spatial_dims) {
|
||||
dnum.input_spatial_dimensions(0), dnum.input_spatial_dimensions(1)})
|
||||
.size() != 4) {
|
||||
return FailedPrecondition(
|
||||
"dimension numbers for the input are not unique: (%lld, %lld, %lld, "
|
||||
"%lld)",
|
||||
"dimension numbers for the input are not unique: (%d, %d, %d, "
|
||||
"%d)",
|
||||
dnum.input_batch_dimension(), dnum.input_feature_dimension(),
|
||||
dnum.input_spatial_dimensions(0), dnum.input_spatial_dimensions(1));
|
||||
}
|
||||
@ -2359,8 +2356,8 @@ XlaBuilder::CreateDefaultConvDimensionNumbers(int num_spatial_dims) {
|
||||
dnum.kernel_spatial_dimensions(1)})
|
||||
.size() != 4) {
|
||||
return FailedPrecondition(
|
||||
"dimension numbers for the weight are not unique: (%lld, %lld, %lld, "
|
||||
"%lld)",
|
||||
"dimension numbers for the weight are not unique: (%d, %d, %d, "
|
||||
"%d)",
|
||||
dnum.kernel_output_feature_dimension(),
|
||||
dnum.kernel_input_feature_dimension(),
|
||||
dnum.kernel_spatial_dimensions(0), dnum.kernel_spatial_dimensions(1));
|
||||
@ -2371,8 +2368,8 @@ XlaBuilder::CreateDefaultConvDimensionNumbers(int num_spatial_dims) {
|
||||
dnum.output_spatial_dimensions(1)})
|
||||
.size() != 4) {
|
||||
return FailedPrecondition(
|
||||
"dimension numbers for the output are not unique: (%lld, %lld, %lld, "
|
||||
"%lld)",
|
||||
"dimension numbers for the output are not unique: (%d, %d, %d, "
|
||||
"%d)",
|
||||
dnum.output_batch_dimension(), dnum.output_feature_dimension(),
|
||||
dnum.output_spatial_dimensions(0), dnum.output_spatial_dimensions(1));
|
||||
}
|
||||
@ -2392,13 +2389,11 @@ StatusOr<XlaOp> XlaBuilder::AddInstruction(
|
||||
}
|
||||
for (const auto& operand : operands) {
|
||||
if (operand.builder_ == nullptr) {
|
||||
return InvalidArgument("invalid XlaOp with handle %lld",
|
||||
operand.handle());
|
||||
return InvalidArgument("invalid XlaOp with handle %d", operand.handle());
|
||||
}
|
||||
if (operand.builder_ != this) {
|
||||
return InvalidArgument("Do not add XlaOp from builder %s to builder %s",
|
||||
operand.builder_->name().c_str(),
|
||||
this->name().c_str());
|
||||
operand.builder_->name(), this->name());
|
||||
}
|
||||
instr.add_operand_ids(operand.handle());
|
||||
}
|
||||
@ -2428,18 +2423,18 @@ StatusOr<const HloInstructionProto*> XlaBuilder::LookUpInstruction(
|
||||
|
||||
if (op.builder_ == nullptr) {
|
||||
return InvalidArgument(
|
||||
"invalid XlaOp with handle %lld; the builder of this op is freed",
|
||||
"invalid XlaOp with handle %d; the builder of this op is freed",
|
||||
op.handle());
|
||||
}
|
||||
if (op.builder_ != this) {
|
||||
return InvalidArgument(
|
||||
"XlaOp with handle %lld is built by builder '%s', but is trying to use "
|
||||
"XlaOp with handle %d is built by builder '%s', but is trying to use "
|
||||
"it in builder '%s'",
|
||||
op.handle(), op.builder_->name().c_str(), this->name().c_str());
|
||||
op.handle(), op.builder_->name(), this->name());
|
||||
}
|
||||
|
||||
if (op.handle() >= instructions_.size() || op.handle() < 0) {
|
||||
return InvalidArgument("no XlaOp value %lld", op.handle());
|
||||
return InvalidArgument("no XlaOp value %d", op.handle());
|
||||
}
|
||||
return &instructions_[op.handle()];
|
||||
}
|
||||
|
||||
@ -169,7 +169,7 @@ Layout CreateDefaultLayoutForRank(int64 rank) {
|
||||
} else if (ShapeUtil::IsArray(shape)) {
|
||||
if (!shape.has_layout()) {
|
||||
return InvalidArgument("shape %s does not have a layout",
|
||||
ShapeUtil::HumanString(shape).c_str());
|
||||
ShapeUtil::HumanString(shape));
|
||||
}
|
||||
return ValidateLayoutForShape(shape.layout(), shape);
|
||||
} else {
|
||||
@ -177,7 +177,7 @@ Layout CreateDefaultLayoutForRank(int64 rank) {
|
||||
if (shape.has_layout()) {
|
||||
return InvalidArgument(
|
||||
"shape of primitive type %s should not have a layout",
|
||||
PrimitiveType_Name(shape.element_type()).c_str());
|
||||
PrimitiveType_Name(shape.element_type()));
|
||||
}
|
||||
return Status::OK();
|
||||
}
|
||||
@ -194,7 +194,7 @@ Layout CreateDefaultLayoutForRank(int64 rank) {
|
||||
layout.padded_dimensions_size() != 0) {
|
||||
return InvalidArgument(
|
||||
"shape of primitive type %s should not have a non-trivial layout",
|
||||
PrimitiveType_Name(shape.element_type()).c_str());
|
||||
PrimitiveType_Name(shape.element_type()));
|
||||
}
|
||||
return Status::OK();
|
||||
}
|
||||
@ -202,17 +202,17 @@ Layout CreateDefaultLayoutForRank(int64 rank) {
|
||||
if (layout.format() == INVALID_FORMAT) {
|
||||
return InvalidArgument(
|
||||
"Layout does not have a valid format: layout {%s}, shape {%s}",
|
||||
layout.ShortDebugString().c_str(), shape.ShortDebugString().c_str());
|
||||
layout.ShortDebugString(), shape.ShortDebugString());
|
||||
}
|
||||
|
||||
if (layout.format() == DENSE) {
|
||||
if (layout.minor_to_major_size() != ShapeUtil::Rank(shape)) {
|
||||
return InvalidArgument(
|
||||
"layout minor_to_major field contains %d elements, "
|
||||
"but shape is rank %lld: {%s}; shape: %s",
|
||||
"but shape is rank %d: {%s}; shape: %s",
|
||||
layout.minor_to_major_size(), ShapeUtil::Rank(shape),
|
||||
absl::StrJoin(layout.minor_to_major(), ", ").c_str(),
|
||||
shape.ShortDebugString().c_str());
|
||||
absl::StrJoin(layout.minor_to_major(), ", "),
|
||||
shape.ShortDebugString());
|
||||
}
|
||||
|
||||
std::vector<bool> dimensions_in_layout(ShapeUtil::Rank(shape), false);
|
||||
@ -221,12 +221,12 @@ Layout CreateDefaultLayoutForRank(int64 rank) {
|
||||
if (dim < 0 || dim >= ShapeUtil::Rank(shape)) {
|
||||
return InvalidArgument(
|
||||
"layout minor_to_major field has out-of-bounds value: %s",
|
||||
HumanString(layout).c_str());
|
||||
HumanString(layout));
|
||||
}
|
||||
if (dimensions_in_layout[dim]) {
|
||||
return InvalidArgument(
|
||||
"layout minor_to_major field has duplicate values: {%s}",
|
||||
HumanString(layout).c_str());
|
||||
HumanString(layout));
|
||||
}
|
||||
dimensions_in_layout[dim] = true;
|
||||
}
|
||||
@ -234,14 +234,14 @@ Layout CreateDefaultLayoutForRank(int64 rank) {
|
||||
if (layout.padded_dimensions_size() > 0) {
|
||||
if (layout.padded_dimensions_size() != ShapeUtil::Rank(shape)) {
|
||||
return InvalidArgument(
|
||||
"layout has %d padded dimensions, but shape is rank %lld",
|
||||
"layout has %d padded dimensions, but shape is rank %d",
|
||||
layout.padded_dimensions_size(), ShapeUtil::Rank(shape));
|
||||
}
|
||||
for (int i = 0; i < layout.padded_dimensions_size(); ++i) {
|
||||
if (layout.padded_dimensions(i) < shape.dimensions(i)) {
|
||||
return InvalidArgument(
|
||||
"for dimension %d, dimension padding (%lld) is smaller than "
|
||||
"the dimension size (%lld) of the shape",
|
||||
"for dimension %d, dimension padding (%d) is smaller than "
|
||||
"the dimension size (%d) of the shape",
|
||||
i, layout.padded_dimensions(i), shape.dimensions(i));
|
||||
}
|
||||
}
|
||||
|
||||
@ -26,6 +26,7 @@ cc_library(
|
||||
"//tensorflow/compiler/xla:types",
|
||||
"//tensorflow/core:framework_internal",
|
||||
"//tensorflow/core:lib",
|
||||
"@com_google_absl//absl/strings",
|
||||
],
|
||||
)
|
||||
|
||||
@ -39,6 +40,7 @@ tf_cc_test(
|
||||
"//tensorflow/core:framework_internal",
|
||||
"//tensorflow/core:lib",
|
||||
"//tensorflow/core:test",
|
||||
"@com_google_absl//absl/strings:str_format",
|
||||
],
|
||||
)
|
||||
|
||||
@ -75,5 +77,6 @@ tf_cc_test(
|
||||
"//tensorflow/core:lib",
|
||||
"//tensorflow/core:test",
|
||||
"@com_google_absl//absl/strings",
|
||||
"@com_google_absl//absl/strings:str_format",
|
||||
],
|
||||
)
|
||||
|
||||
@ -21,7 +21,6 @@ limitations under the License.
|
||||
#include "absl/strings/str_split.h"
|
||||
#include "tensorflow/compiler/xla/service/hlo_opcode.h"
|
||||
#include "tensorflow/compiler/xla/xla.pb.h"
|
||||
#include "tensorflow/core/lib/strings/stringprintf.h"
|
||||
|
||||
namespace xla {
|
||||
namespace legacy_flags {
|
||||
|
||||
@ -21,8 +21,8 @@ limitations under the License.
|
||||
#include <stdlib.h>
|
||||
#include <vector>
|
||||
|
||||
#include "absl/strings/str_format.h"
|
||||
#include "tensorflow/compiler/xla/types.h"
|
||||
#include "tensorflow/core/lib/strings/stringprintf.h"
|
||||
#include "tensorflow/core/platform/logging.h"
|
||||
#include "tensorflow/core/platform/subprocess.h"
|
||||
#include "tensorflow/core/platform/test.h"
|
||||
@ -106,8 +106,8 @@ TEST(ParseFlagsFromEnv, File) {
|
||||
if (tmp_dir == nullptr) {
|
||||
tmp_dir = kTempDir;
|
||||
}
|
||||
string tmp_file = tensorflow::strings::Printf("%s/parse_flags_from_env.%d",
|
||||
tmp_dir, getpid());
|
||||
string tmp_file =
|
||||
absl::StrFormat("%s/parse_flags_from_env.%d", tmp_dir, getpid());
|
||||
FILE* fp = fopen(tmp_file.c_str(), "w");
|
||||
CHECK_NE(fp, nullptr) << "can't write to " << tmp_file;
|
||||
for (int i = 0; kTestFlagString[i] != '\0'; i++) {
|
||||
|
||||
@ -24,6 +24,7 @@ limitations under the License.
|
||||
|
||||
#include "absl/memory/memory.h"
|
||||
#include "absl/strings/str_cat.h"
|
||||
#include "absl/strings/str_format.h"
|
||||
#include "absl/strings/str_join.h"
|
||||
#include "tensorflow/compiler/xla/index_util.h"
|
||||
#include "tensorflow/compiler/xla/shape_util.h"
|
||||
@ -33,7 +34,6 @@ limitations under the License.
|
||||
#include "tensorflow/core/lib/core/casts.h"
|
||||
#include "tensorflow/core/lib/core/errors.h"
|
||||
#include "tensorflow/core/lib/hash/hash.h"
|
||||
#include "tensorflow/core/lib/strings/stringprintf.h"
|
||||
#include "tensorflow/core/platform/logging.h"
|
||||
#include "tensorflow/core/platform/types.h"
|
||||
|
||||
@ -41,7 +41,7 @@ namespace xla {
|
||||
namespace {
|
||||
|
||||
using absl::StrCat;
|
||||
using tensorflow::strings::Printf;
|
||||
using absl::StrFormat;
|
||||
|
||||
constexpr bool kLittleEndian = __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__;
|
||||
|
||||
@ -303,7 +303,7 @@ MutableLiteralBase::CreateFromProto(const LiteralProto& proto) {
|
||||
if (proto_element->tuple_literals_size() !=
|
||||
ShapeUtil::TupleElementCount(piece->subshape())) {
|
||||
return InvalidArgument(
|
||||
"Expected %lld tuple elements in LiteralProto, has %d",
|
||||
"Expected %d tuple elements in LiteralProto, has %d",
|
||||
ShapeUtil::TupleElementCount(piece->subshape()),
|
||||
proto_element->tuple_literals_size());
|
||||
}
|
||||
@ -404,7 +404,7 @@ Status LiteralBase::Piece::CopyFrom(const LiteralBase::Piece& src) {
|
||||
default:
|
||||
return Unimplemented(
|
||||
"Copying a Literal object with element type %s is not implemented.",
|
||||
PrimitiveType_Name(subshape().element_type()).c_str());
|
||||
PrimitiveType_Name(subshape().element_type()));
|
||||
}
|
||||
}
|
||||
return Status::OK();
|
||||
@ -420,8 +420,8 @@ Status MutableLiteralBase::CopyFrom(const LiteralSlice& src_literal,
|
||||
if (!ShapeUtil::Compatible(dest_subshape, src_subshape)) {
|
||||
return InvalidArgument(
|
||||
"Destination subshape incompatible with source subshape: %s vs %s",
|
||||
ShapeUtil::HumanString(dest_subshape).c_str(),
|
||||
ShapeUtil::HumanString(src_subshape).c_str());
|
||||
ShapeUtil::HumanString(dest_subshape),
|
||||
ShapeUtil::HumanString(src_subshape));
|
||||
}
|
||||
return root_piece_->ForEachMutableSubpieceWithStatus(
|
||||
[&](const ShapeIndex& index, Piece* piece) {
|
||||
@ -458,8 +458,8 @@ Status Literal::MoveFrom(Literal&& src_literal,
|
||||
if (!ShapeUtil::Equal(dest_subshape, src_literal.shape())) {
|
||||
return InvalidArgument(
|
||||
"Destination subshape not equal to source shape: %s vs %s",
|
||||
ShapeUtil::HumanString(dest_subshape).c_str(),
|
||||
ShapeUtil::HumanString(src_literal.shape()).c_str());
|
||||
ShapeUtil::HumanString(dest_subshape),
|
||||
ShapeUtil::HumanString(src_literal.shape()));
|
||||
}
|
||||
|
||||
src_literal.root_piece_->ForEachSubpiece(
|
||||
@ -654,8 +654,8 @@ StatusOr<std::unique_ptr<Literal>> LiteralBase::Reshape(
|
||||
return InvalidArgument(
|
||||
"Shapes before and after Literal::Reshape have different numbers "
|
||||
"of elements: %s vs %s.",
|
||||
ShapeUtil::HumanString(shape()).c_str(),
|
||||
ShapeUtil::HumanString(output->shape()).c_str());
|
||||
ShapeUtil::HumanString(shape()),
|
||||
ShapeUtil::HumanString(output->shape()));
|
||||
}
|
||||
return std::move(output);
|
||||
}
|
||||
@ -874,9 +874,8 @@ StatusOr<int64> LiteralBase::GetIntegralAsS64(
|
||||
case U64:
|
||||
return Get<uint64>(multi_index);
|
||||
default:
|
||||
return FailedPrecondition(
|
||||
"Array element type is not integral: %s",
|
||||
PrimitiveType_Name(shape().element_type()).c_str());
|
||||
return FailedPrecondition("Array element type is not integral: %s",
|
||||
PrimitiveType_Name(shape().element_type()));
|
||||
}
|
||||
}
|
||||
|
||||
@ -924,9 +923,8 @@ Status MutableLiteralBase::SetIntegralAsS64(
|
||||
Set<uint64>(multi_index, value);
|
||||
break;
|
||||
default:
|
||||
return FailedPrecondition(
|
||||
"Array element type is not integral: %s",
|
||||
PrimitiveType_Name(shape().element_type()).c_str());
|
||||
return FailedPrecondition("Array element type is not integral: %s",
|
||||
PrimitiveType_Name(shape().element_type()));
|
||||
}
|
||||
return Status::OK();
|
||||
}
|
||||
@ -1116,9 +1114,9 @@ void ToStringHelper(const LiteralBase& literal, const ShapeIndex& shape_index,
|
||||
pieces->push_back(shape_to_string(subshape));
|
||||
pieces->push_back(" {\n");
|
||||
for (int64 i0 = 0; i0 < subshape.dimensions(0); ++i0) {
|
||||
pieces->push_back(Printf(" { /*i0=%lld*/\n", i0));
|
||||
pieces->push_back(StrFormat(" { /*i0=%d*/\n", i0));
|
||||
for (int64 i1 = 0; i1 < subshape.dimensions(1); ++i1) {
|
||||
pieces->push_back(Printf(" { /*i1=%lld*/\n", i1));
|
||||
pieces->push_back(StrFormat(" { /*i1=%d*/\n", i1));
|
||||
for (int64 i2 = 0; i2 < subshape.dimensions(2); ++i2) {
|
||||
pieces->push_back(" {");
|
||||
for (int64 i3 = 0; i3 < subshape.dimensions(3); ++i3) {
|
||||
@ -1136,11 +1134,11 @@ void ToStringHelper(const LiteralBase& literal, const ShapeIndex& shape_index,
|
||||
pieces->push_back(shape_to_string(subshape));
|
||||
pieces->push_back(" {\n");
|
||||
for (int64 i0 = 0; i0 < subshape.dimensions(0); ++i0) {
|
||||
pieces->push_back(Printf(" { /*i0=%lld*/\n", i0));
|
||||
pieces->push_back(StrFormat(" { /*i0=%d*/\n", i0));
|
||||
for (int64 i1 = 0; i1 < subshape.dimensions(1); ++i1) {
|
||||
pieces->push_back(Printf(" { /*i1=%lld*/\n", i1));
|
||||
pieces->push_back(StrFormat(" { /*i1=%d*/\n", i1));
|
||||
for (int64 i2 = 0; i2 < subshape.dimensions(2); ++i2) {
|
||||
pieces->push_back(Printf(" { /*i2=%lld*/\n", i2));
|
||||
pieces->push_back(StrFormat(" { /*i2=%d*/\n", i2));
|
||||
for (int64 i3 = 0; i3 < subshape.dimensions(3); ++i3) {
|
||||
pieces->push_back(" {");
|
||||
for (int64 i4 = 0; i4 < subshape.dimensions(4); ++i4) {
|
||||
@ -1312,10 +1310,9 @@ StatusOr<std::unique_ptr<Literal>> ConvertIfDestTypeMatches(
|
||||
default:
|
||||
break;
|
||||
}
|
||||
return Unimplemented(
|
||||
"Converting from type %s to type %s is not implemented.",
|
||||
PrimitiveType_Name(src_literal.shape().element_type()).c_str(),
|
||||
PrimitiveType_Name(primitive_dest_type).c_str());
|
||||
return Unimplemented("Converting from type %s to type %s is not implemented.",
|
||||
PrimitiveType_Name(src_literal.shape().element_type()),
|
||||
PrimitiveType_Name(primitive_dest_type));
|
||||
}
|
||||
|
||||
StatusOr<std::unique_ptr<Literal>> ConvertSwitch(
|
||||
@ -1344,11 +1341,10 @@ StatusOr<std::unique_ptr<Literal>> ConvertSwitch(
|
||||
#undef CONVERT_IF_DEST_TYPE_MATCHES
|
||||
// Other types are not yet supported.
|
||||
default:
|
||||
return Unimplemented(
|
||||
"%s from type %s to type %s is not implemented.",
|
||||
(bitcast ? "Bitcast converting" : "Converting"),
|
||||
PrimitiveType_Name(literal.shape().element_type()).c_str(),
|
||||
PrimitiveType_Name(primitive_dest_type).c_str());
|
||||
return Unimplemented("%s from type %s to type %s is not implemented.",
|
||||
(bitcast ? "Bitcast converting" : "Converting"),
|
||||
PrimitiveType_Name(literal.shape().element_type()),
|
||||
PrimitiveType_Name(primitive_dest_type));
|
||||
}
|
||||
}
|
||||
|
||||
@ -1366,8 +1362,8 @@ StatusOr<std::unique_ptr<Literal>> LiteralBase::BitcastConvert(
|
||||
return InvalidArgument(
|
||||
"Cannot bitcast convert from %s to %s, bit widths are different: %d != "
|
||||
"%d",
|
||||
PrimitiveType_Name(shape().element_type()).c_str(),
|
||||
PrimitiveType_Name(primitive_dest_type).c_str(),
|
||||
PrimitiveType_Name(shape().element_type()),
|
||||
PrimitiveType_Name(primitive_dest_type),
|
||||
primitive_util::BitWidth(shape().element_type()),
|
||||
primitive_util::BitWidth(primitive_dest_type));
|
||||
}
|
||||
|
||||
@ -20,15 +20,15 @@ limitations under the License.
|
||||
#include <vector>
|
||||
|
||||
#include "absl/strings/str_cat.h"
|
||||
#include "absl/strings/str_format.h"
|
||||
#include "tensorflow/compiler/xla/literal_util.h"
|
||||
#include "tensorflow/compiler/xla/util.h"
|
||||
#include "tensorflow/core/lib/core/casts.h"
|
||||
#include "tensorflow/core/platform/env.h"
|
||||
|
||||
using absl::StrAppend;
|
||||
using absl::StrAppendFormat;
|
||||
using absl::StrCat;
|
||||
using tensorflow::strings::Appendf;
|
||||
using tensorflow::strings::Printf;
|
||||
|
||||
namespace xla {
|
||||
namespace literal_comparison {
|
||||
@ -48,9 +48,9 @@ Status CompareFloatsBitwiseEqual(
|
||||
return InvalidArgument(
|
||||
"floating values are not bitwise-equal; and equality testing "
|
||||
"was requested: %s=%g=%a vs %s=%g=%a at array index %s",
|
||||
StrCat(absl::Hex(ulhs)).c_str(), lhs_double, lhs_double,
|
||||
StrCat(absl::Hex(urhs)).c_str(), rhs_double, rhs_double,
|
||||
LiteralUtil::MultiIndexAsString(multi_index).c_str());
|
||||
StrCat(absl::Hex(ulhs)), lhs_double, lhs_double,
|
||||
StrCat(absl::Hex(urhs)), rhs_double, rhs_double,
|
||||
LiteralUtil::MultiIndexAsString(multi_index));
|
||||
}
|
||||
return Status::OK();
|
||||
}
|
||||
@ -67,8 +67,7 @@ Status CompareEqual(NativeT lhs, NativeT rhs,
|
||||
return InvalidArgument(
|
||||
"first mismatch at array index %s:\n expected value: %s\n actual "
|
||||
"value: %s",
|
||||
LiteralUtil::MultiIndexAsString(multi_index).c_str(), StrCat(lhs).c_str(),
|
||||
StrCat(rhs).c_str());
|
||||
LiteralUtil::MultiIndexAsString(multi_index), StrCat(lhs), StrCat(rhs));
|
||||
}
|
||||
|
||||
// Specializations for floating types that do bitwise comparisons when equality
|
||||
@ -168,12 +167,12 @@ bool NanMismatch<half>(half expected, half actual, bool relaxed_nans) {
|
||||
// Converts the given floating-point value to a string.
|
||||
template <typename NativeT>
|
||||
string FpValueToString(NativeT value) {
|
||||
return Printf("%8.4g", static_cast<double>(value));
|
||||
return absl::StrFormat("%8.4g", static_cast<double>(value));
|
||||
}
|
||||
|
||||
template <>
|
||||
string FpValueToString<complex64>(complex64 value) {
|
||||
return Printf("%8.4g + %8.4fi", value.real(), value.imag());
|
||||
return absl::StrFormat("%8.4g + %8.4fi", value.real(), value.imag());
|
||||
}
|
||||
|
||||
// Returns the absolute value of the given floating point value. This function
|
||||
@ -228,13 +227,12 @@ class NearComparator {
|
||||
}
|
||||
|
||||
string ToString(const Shape& shape) const {
|
||||
return Printf(
|
||||
return absl::StrFormat(
|
||||
"actual %s, expected %s, index %s, rel error %8.3g, abs error %8.3g",
|
||||
FpValueToString(actual).c_str(), FpValueToString(expected).c_str(),
|
||||
FpValueToString(actual), FpValueToString(expected),
|
||||
LiteralUtil::MultiIndexAsString(
|
||||
IndexUtil::LinearIndexToMultidimensionalIndex(shape,
|
||||
linear_index))
|
||||
.c_str(),
|
||||
linear_index)),
|
||||
rel_error, abs_error);
|
||||
}
|
||||
};
|
||||
@ -258,7 +256,7 @@ class NearComparator {
|
||||
TF_RETURN_IF_ERROR(EqualShapes(expected_.shape(), actual_.shape()));
|
||||
if (!ShapeUtil::IsArray(expected_.shape())) {
|
||||
return InvalidArgument("Expected array shape; got %s.",
|
||||
ShapeUtil::HumanString(expected_.shape()).c_str());
|
||||
ShapeUtil::HumanString(expected_.shape()));
|
||||
}
|
||||
|
||||
mismatches_ = Literal(ShapeUtil::ChangeElementType(actual_.shape(), PRED));
|
||||
@ -271,7 +269,7 @@ class NearComparator {
|
||||
} else if (!VLOG_IS_ON(1) && miscompare_callback_ != nullptr) {
|
||||
miscompare_callback_(expected_, actual_, mismatches_);
|
||||
}
|
||||
return InvalidArgument("%s", ErrorMessage().c_str());
|
||||
return InvalidArgument("%s", ErrorMessage());
|
||||
}
|
||||
|
||||
// Insert the given absolute value into the absolute value bucket vector. The
|
||||
@ -410,23 +408,23 @@ class NearComparator {
|
||||
|
||||
auto percent_string = [](float a, float b) {
|
||||
float pct = b == 0.0 ? 0.0 : 100.0 * a / b;
|
||||
return Printf("%0.4f%%", pct);
|
||||
return absl::StrFormat("%0.4f%%", pct);
|
||||
};
|
||||
|
||||
Appendf(&out,
|
||||
"\nMismatch count %lld (%s) in shape %s (%lld elements), abs bound "
|
||||
"%g, rel bound %g\n",
|
||||
num_mismatches_,
|
||||
percent_string(num_mismatches_, element_count).c_str(),
|
||||
ShapeUtil::HumanString(actual_.shape()).c_str(),
|
||||
ShapeUtil::ElementsIn(actual_.shape()), error_.abs, error_.rel);
|
||||
StrAppendFormat(
|
||||
&out,
|
||||
"\nMismatch count %d (%s) in shape %s (%d elements), abs bound "
|
||||
"%g, rel bound %g\n",
|
||||
num_mismatches_, percent_string(num_mismatches_, element_count),
|
||||
ShapeUtil::HumanString(actual_.shape()),
|
||||
ShapeUtil::ElementsIn(actual_.shape()), error_.abs, error_.rel);
|
||||
if (num_nan_mismatches_ > 0) {
|
||||
StrAppend(&out, "nan mismatches ", num_nan_mismatches_, "\n");
|
||||
}
|
||||
Appendf(&out, "Top relative error mismatches:\n");
|
||||
StrAppendFormat(&out, "Top relative error mismatches:\n");
|
||||
for (auto it = top_rel_mismatches_.rbegin();
|
||||
it != top_rel_mismatches_.rend(); ++it) {
|
||||
StrAppend(&out, " ", it->ToString(actual_.shape()).c_str(), "\n");
|
||||
StrAppend(&out, " ", it->ToString(actual_.shape()), "\n");
|
||||
}
|
||||
|
||||
if (!detailed_message_) {
|
||||
@ -438,36 +436,37 @@ class NearComparator {
|
||||
for (int i = 0; i < abs_value_buckets_.size(); ++i) {
|
||||
const int64 bucket_size = abs_value_buckets_[i].first;
|
||||
const int64 bucket_mismatches = abs_value_buckets_[i].second;
|
||||
string mismatch_str = bucket_mismatches > 0
|
||||
? Printf(", mismatches %lld", bucket_mismatches)
|
||||
: "";
|
||||
Appendf(&out, " %-6g <= x < %-6g : %7lld (%9s)%s\n",
|
||||
kAbsValueBucketBounds[i], kAbsValueBucketBounds[i + 1],
|
||||
bucket_size, percent_string(bucket_size, element_count).c_str(),
|
||||
mismatch_str.c_str());
|
||||
string mismatch_str =
|
||||
bucket_mismatches > 0
|
||||
? absl::StrFormat(", mismatches %d", bucket_mismatches)
|
||||
: "";
|
||||
StrAppendFormat(&out, " %-6g <= x < %-6g : %7d (%9s)%s\n",
|
||||
kAbsValueBucketBounds[i], kAbsValueBucketBounds[i + 1],
|
||||
bucket_size, percent_string(bucket_size, element_count),
|
||||
mismatch_str);
|
||||
}
|
||||
|
||||
auto print_accum_buckets = [&](const string& header, int64 total,
|
||||
tensorflow::gtl::ArraySlice<int64> buckets) {
|
||||
StrAppend(&out, header, ":\n");
|
||||
Appendf(&out, " < %-6g : %7lld (%s)\n", kErrorBucketBounds[0],
|
||||
total - buckets[0],
|
||||
percent_string(total - buckets[0], total).c_str());
|
||||
StrAppendFormat(&out, " < %-6g : %7d (%s)\n", kErrorBucketBounds[0],
|
||||
total - buckets[0],
|
||||
percent_string(total - buckets[0], total));
|
||||
CHECK_EQ(buckets.size(), kErrorBucketBounds.size());
|
||||
for (int i = 0; i < kErrorBucketBounds.size(); ++i) {
|
||||
Appendf(&out, " >= %-6g : %7lld (%s)\n", kErrorBucketBounds[i],
|
||||
buckets[i], percent_string(buckets[i], total).c_str());
|
||||
StrAppendFormat(&out, " >= %-6g : %7d (%s)\n", kErrorBucketBounds[i],
|
||||
buckets[i], percent_string(buckets[i], total));
|
||||
}
|
||||
};
|
||||
Appendf(&out, "Elements exceeding abs error bound %g: %lld (%s)\n",
|
||||
error_.abs, num_abs_mismatches_,
|
||||
percent_string(num_abs_mismatches_, element_count).c_str());
|
||||
StrAppendFormat(&out, "Elements exceeding abs error bound %g: %d (%s)\n",
|
||||
error_.abs, num_abs_mismatches_,
|
||||
percent_string(num_abs_mismatches_, element_count));
|
||||
print_accum_buckets(
|
||||
"Relative error breakdown of elements exceeding abs error bound",
|
||||
num_abs_mismatches_, rel_error_buckets_);
|
||||
Appendf(&out, "Elements exceeding rel error bound %g: %lld (%s)\n",
|
||||
error_.rel, num_rel_mismatches_,
|
||||
percent_string(num_rel_mismatches_, element_count).c_str());
|
||||
StrAppendFormat(&out, "Elements exceeding rel error bound %g: %d (%s)\n",
|
||||
error_.rel, num_rel_mismatches_,
|
||||
percent_string(num_rel_mismatches_, element_count));
|
||||
print_accum_buckets(
|
||||
"Absolute error breakdown of elements exceeding rel error bound",
|
||||
num_rel_mismatches_, abs_error_buckets_);
|
||||
@ -612,9 +611,9 @@ Status NearHelper(const LiteralSlice& expected, const LiteralSlice& actual,
|
||||
NearHelper(expected_element, actual_element, error, detailed_message,
|
||||
miscompare_callback, element_index);
|
||||
if (!element_result.ok()) {
|
||||
element_result = InvalidArgument(
|
||||
"Array at shape index %s, %s", element_index.ToString().c_str(),
|
||||
element_result.error_message().c_str());
|
||||
element_result = InvalidArgument("Array at shape index %s, %s",
|
||||
element_index.ToString(),
|
||||
element_result.error_message());
|
||||
if (return_status.ok()) {
|
||||
return_status = element_result;
|
||||
} else {
|
||||
@ -627,10 +626,10 @@ Status NearHelper(const LiteralSlice& expected, const LiteralSlice& actual,
|
||||
// Emit a top-level error message containing the top-level shape in case
|
||||
// of mismatch.
|
||||
int64 total_elements = RecursiveElementCount(actual.shape());
|
||||
return_status = InvalidArgument(
|
||||
"\nMismatches in shape %s (%lld elements):\n%s",
|
||||
ShapeUtil::HumanString(actual.shape()).c_str(), total_elements,
|
||||
return_status.error_message().c_str());
|
||||
return_status =
|
||||
InvalidArgument("\nMismatches in shape %s (%d elements):\n%s",
|
||||
ShapeUtil::HumanString(actual.shape()),
|
||||
total_elements, return_status.error_message());
|
||||
}
|
||||
return return_status;
|
||||
}
|
||||
@ -674,14 +673,14 @@ Status NearHelper(const LiteralSlice& expected, const LiteralSlice& actual,
|
||||
Status EqualShapes(const Shape& expected, const Shape& actual) {
|
||||
if (expected.element_type() != actual.element_type()) {
|
||||
return InvalidArgument("element type mismatch, want: %s got %s",
|
||||
ShapeUtil::HumanString(expected).c_str(),
|
||||
ShapeUtil::HumanString(actual).c_str());
|
||||
ShapeUtil::HumanString(expected),
|
||||
ShapeUtil::HumanString(actual));
|
||||
}
|
||||
if (ShapeUtil::IsTuple(expected)) {
|
||||
if (ShapeUtil::TupleElementCount(expected) !=
|
||||
ShapeUtil::TupleElementCount(actual)) {
|
||||
return InvalidArgument(
|
||||
"want tuple element count: %lld got tuple element count: %lld",
|
||||
"want tuple element count: %d got tuple element count: %d",
|
||||
ShapeUtil::TupleElementCount(expected),
|
||||
ShapeUtil::TupleElementCount(actual));
|
||||
}
|
||||
@ -695,14 +694,13 @@ Status EqualShapes(const Shape& expected, const Shape& actual) {
|
||||
} else if (ShapeUtil::IsArray(expected)) {
|
||||
if (ShapeUtil::Rank(expected) != ShapeUtil::Rank(actual)) {
|
||||
return InvalidArgument("want rank of %s got rank of %s",
|
||||
ShapeUtil::HumanString(expected).c_str(),
|
||||
ShapeUtil::HumanString(actual).c_str());
|
||||
ShapeUtil::HumanString(expected),
|
||||
ShapeUtil::HumanString(actual));
|
||||
}
|
||||
if (expected.element_type() != actual.element_type()) {
|
||||
return InvalidArgument(
|
||||
"mismatch in primitive type %s vs %s",
|
||||
PrimitiveType_Name(expected.element_type()).c_str(),
|
||||
PrimitiveType_Name(actual.element_type()).c_str());
|
||||
return InvalidArgument("mismatch in primitive type %s vs %s",
|
||||
PrimitiveType_Name(expected.element_type()),
|
||||
PrimitiveType_Name(actual.element_type()));
|
||||
}
|
||||
if (expected.dimensions_size() != actual.dimensions_size()) {
|
||||
return InvalidArgument("want dimensions_size %d got dimensions_size %d",
|
||||
@ -713,8 +711,7 @@ Status EqualShapes(const Shape& expected, const Shape& actual) {
|
||||
if (expected.dimensions(i) != actual.dimensions(i)) {
|
||||
return InvalidArgument(
|
||||
"mismatch in dimension #%d expected: %s actual: %s", i,
|
||||
ShapeUtil::HumanString(expected).c_str(),
|
||||
ShapeUtil::HumanString(actual).c_str());
|
||||
ShapeUtil::HumanString(expected), ShapeUtil::HumanString(actual));
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -733,9 +730,8 @@ Status EmitLiteralsInErrorMessage(const Status& result,
|
||||
return result;
|
||||
}
|
||||
return InvalidArgument("%s\n\nExpected literal:\n%s\n\nActual literal:\n%s",
|
||||
result.error_message().c_str(),
|
||||
ToStringTruncated(expected).c_str(),
|
||||
ToStringTruncated(actual).c_str());
|
||||
result.error_message(), ToStringTruncated(expected),
|
||||
ToStringTruncated(actual));
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
@ -33,7 +33,6 @@ limitations under the License.
|
||||
#include "tensorflow/core/lib/core/casts.h"
|
||||
#include "tensorflow/core/lib/core/errors.h"
|
||||
#include "tensorflow/core/lib/hash/hash.h"
|
||||
#include "tensorflow/core/lib/strings/stringprintf.h"
|
||||
#include "tensorflow/core/platform/logging.h"
|
||||
#include "tensorflow/core/platform/mem.h"
|
||||
#include "tensorflow/core/platform/types.h"
|
||||
|
||||
@ -60,7 +60,7 @@ MaybeFind(const Collection& collection,
|
||||
if (it == collection.end()) {
|
||||
std::ostringstream os;
|
||||
os << key;
|
||||
return NotFound("key not found: %s", os.str().c_str());
|
||||
return NotFound("key not found: %s", os.str());
|
||||
}
|
||||
return {it->second};
|
||||
}
|
||||
|
||||
@ -19,7 +19,7 @@ limitations under the License.
|
||||
#include <unordered_map>
|
||||
|
||||
#include "absl/strings/str_cat.h"
|
||||
#include "tensorflow/core/lib/strings/stringprintf.h"
|
||||
#include "absl/strings/str_format.h"
|
||||
#include "tensorflow/core/platform/logging.h"
|
||||
#include "tensorflow/core/platform/types.h"
|
||||
|
||||
@ -264,8 +264,7 @@ string MetricTableReport::MetricString(double metric) {
|
||||
}
|
||||
|
||||
string MetricTableReport::MetricPercent(double metric) {
|
||||
return tensorflow::strings::Printf("%5.2f%%",
|
||||
metric / expected_metric_sum_ * 100.0);
|
||||
return absl::StrFormat("%5.2f%%", metric / expected_metric_sum_ * 100.0);
|
||||
}
|
||||
|
||||
} // namespace xla
|
||||
|
||||
@ -54,7 +54,7 @@ StatusOr<std::unique_ptr<Literal>> PackedLiteralReader::Read(
|
||||
if (shape.element_type() != F32) {
|
||||
return Unimplemented(
|
||||
"not yet implemented element type for packed literal reading: %s",
|
||||
PrimitiveType_Name(shape.element_type()).c_str());
|
||||
PrimitiveType_Name(shape.element_type()));
|
||||
}
|
||||
|
||||
auto result = absl::make_unique<Literal>(literal_shape);
|
||||
|
||||
@ -40,6 +40,7 @@ cc_library(
|
||||
"//tensorflow/core:lib",
|
||||
"//tensorflow/python:numpy_lib",
|
||||
"@com_google_absl//absl/strings",
|
||||
"@com_google_absl//absl/strings:str_format",
|
||||
],
|
||||
)
|
||||
|
||||
|
||||
@ -251,7 +251,7 @@ StatusOr<std::unique_ptr<Literal>> CompiledLocalComputation::Execute(
|
||||
return InternalError(
|
||||
"Failed running replica %d (other replicas may have failed as well): "
|
||||
"%s.",
|
||||
replica, statusor.status().ToString().c_str());
|
||||
replica, statusor.status().ToString());
|
||||
}
|
||||
}
|
||||
|
||||
@ -696,8 +696,7 @@ StatusOr<LocalShapedBufferTuple*> DestructureLocalShapedBufferTuple(
|
||||
"Attemped to destructure a LocalShapedBuffer that did not have a tuple "
|
||||
"shape; shape: %s",
|
||||
ShapeUtil::HumanString(
|
||||
local_shaped_buffer->shaped_buffer()->on_device_shape())
|
||||
.c_str());
|
||||
local_shaped_buffer->shaped_buffer()->on_device_shape()));
|
||||
}
|
||||
|
||||
DeviceMemoryAllocator* allocator =
|
||||
|
||||
@ -110,6 +110,7 @@ limitations under the License.
|
||||
#include "tensorflow/python/lib/core/numpy.h"
|
||||
|
||||
#include "third_party/absl/strings/str_cat.h"
|
||||
#include "third_party/absl/strings/str_format.h"
|
||||
#include "tensorflow/compiler/xla/literal.h"
|
||||
#include "tensorflow/compiler/xla/shape_util.h"
|
||||
#include "tensorflow/compiler/xla/xla_data.pb.h"
|
||||
@ -155,8 +156,8 @@ bool HandleStringAttribute(PyObject* o,
|
||||
return true; // The attribute is None, which we consider ok.
|
||||
}
|
||||
if (!PyString_Check(attr)) {
|
||||
string message = tensorflow::strings::Printf("%s must be a string or none; got %s",
|
||||
attr_name, numpy::PyObjectCppRepr(attr).c_str());
|
||||
string message = absl::StrFormat("%s must be a string or none; got %s",
|
||||
attr_name, numpy::PyObjectCppRepr(attr));
|
||||
PyErr_SetString(PyExc_TypeError, message.c_str());
|
||||
Py_DECREF(attr);
|
||||
return false; // Type error, not ok.
|
||||
|
||||
@ -15,6 +15,7 @@ limitations under the License.
|
||||
|
||||
#include "tensorflow/compiler/xla/python/numpy_bridge.h"
|
||||
#include "absl/strings/str_cat.h"
|
||||
#include "absl/strings/str_format.h"
|
||||
#include "tensorflow/compiler/xla/literal_util.h"
|
||||
#include "tensorflow/compiler/xla/shape_util.h"
|
||||
#include "tensorflow/core/platform/logging.h"
|
||||
@ -150,9 +151,7 @@ static int NumpyTypenum(PyObject* o) {
|
||||
//
|
||||
// NOTE: this is an internal helper for conversion to a C++, and so decrefs r.
|
||||
static string ExtractStringAndDecref(PyObject* r) {
|
||||
auto error = [r] {
|
||||
return tensorflow::strings::Printf("<failed conversion of %p>", r);
|
||||
};
|
||||
auto error = [r] { return absl::StrFormat("<failed conversion of %p>", r); };
|
||||
if (r == nullptr) {
|
||||
return error();
|
||||
}
|
||||
|
||||
@ -43,6 +43,7 @@ tf_cc_binary(
|
||||
"//tensorflow/compiler/xla/service:cpu_plugin",
|
||||
"//tensorflow/core:framework_internal",
|
||||
"//tensorflow/core:lib",
|
||||
"@com_google_absl//absl/strings:str_format",
|
||||
],
|
||||
)
|
||||
|
||||
@ -62,6 +63,7 @@ tf_cc_test(
|
||||
"//tensorflow/core:lib",
|
||||
"//tensorflow/core:test",
|
||||
"//tensorflow/core:test_main",
|
||||
"@com_google_absl//absl/strings:str_format",
|
||||
],
|
||||
)
|
||||
|
||||
|
||||
@ -23,12 +23,12 @@ limitations under the License.
|
||||
#include "grpcpp/create_channel.h"
|
||||
#include "grpcpp/security/credentials.h"
|
||||
|
||||
#include "absl/strings/str_format.h"
|
||||
#include "tensorflow/compiler/xla/client/client.h"
|
||||
#include "tensorflow/compiler/xla/client/xla_builder.h"
|
||||
#include "tensorflow/compiler/xla/rpc/grpc_stub.h"
|
||||
#include "tensorflow/compiler/xla/tests/literal_test_util.h"
|
||||
#include "tensorflow/core/lib/io/path.h"
|
||||
#include "tensorflow/core/lib/strings/stringprintf.h"
|
||||
#include "tensorflow/core/platform/logging.h"
|
||||
#include "tensorflow/core/platform/net.h"
|
||||
#include "tensorflow/core/platform/subprocess.h"
|
||||
@ -46,7 +46,7 @@ class GRPCClientTestBase : public ::testing::Test {
|
||||
int port = tensorflow::internal::PickUnusedPortOrDie();
|
||||
subprocess_.SetProgram(
|
||||
service_main_path,
|
||||
{service_main_path, tensorflow::strings::Printf("--port=%d", port)});
|
||||
{service_main_path, absl::StrFormat("--port=%d", port)});
|
||||
subprocess_.SetChannelAction(tensorflow::CHAN_STDOUT,
|
||||
tensorflow::ACTION_DUPPARENT);
|
||||
subprocess_.SetChannelAction(tensorflow::CHAN_STDERR,
|
||||
@ -54,9 +54,8 @@ class GRPCClientTestBase : public ::testing::Test {
|
||||
CHECK(subprocess_.Start());
|
||||
LOG(INFO) << "Launched subprocess";
|
||||
|
||||
auto channel =
|
||||
::grpc::CreateChannel(tensorflow::strings::Printf("localhost:%d", port),
|
||||
::grpc::InsecureChannelCredentials());
|
||||
auto channel = ::grpc::CreateChannel(absl::StrFormat("localhost:%d", port),
|
||||
::grpc::InsecureChannelCredentials());
|
||||
channel->WaitForConnected(gpr_time_add(
|
||||
gpr_now(GPR_CLOCK_REALTIME), gpr_time_from_seconds(10, GPR_TIMESPAN)));
|
||||
LOG(INFO) << "Channel to server is connected on port " << port;
|
||||
|
||||
@ -18,8 +18,8 @@ limitations under the License.
|
||||
#include "grpcpp/security/server_credentials.h"
|
||||
#include "grpcpp/server.h"
|
||||
#include "grpcpp/server_builder.h"
|
||||
#include "absl/strings/str_format.h"
|
||||
#include "tensorflow/compiler/xla/rpc/grpc_service.h"
|
||||
#include "tensorflow/core/lib/strings/stringprintf.h"
|
||||
#include "tensorflow/core/platform/init_main.h"
|
||||
#include "tensorflow/core/platform/logging.h"
|
||||
#include "tensorflow/core/util/command_line_flags.h"
|
||||
@ -44,7 +44,7 @@ int RealMain(int argc, char** argv) {
|
||||
xla::GRPCService::NewService().ConsumeValueOrDie();
|
||||
|
||||
::grpc::ServerBuilder builder;
|
||||
string server_address(tensorflow::strings::Printf("localhost:%d", port));
|
||||
string server_address(absl::StrFormat("localhost:%d", port));
|
||||
|
||||
builder.AddListeningPort(server_address, ::grpc::InsecureServerCredentials());
|
||||
builder.RegisterService(service.get());
|
||||
|
||||
@ -178,6 +178,7 @@ cc_library(
|
||||
"//tensorflow/core:lib",
|
||||
"@com_google_absl//absl/algorithm:container",
|
||||
"@com_google_absl//absl/strings",
|
||||
"@com_google_absl//absl/strings:str_format",
|
||||
],
|
||||
)
|
||||
|
||||
@ -465,6 +466,7 @@ cc_library(
|
||||
"//tensorflow/core:lib",
|
||||
"@com_google_absl//absl/memory",
|
||||
"@com_google_absl//absl/strings",
|
||||
"@com_google_absl//absl/strings:str_format",
|
||||
],
|
||||
)
|
||||
|
||||
@ -637,6 +639,7 @@ cc_library(
|
||||
"//tensorflow/core:stream_executor_no_cuda",
|
||||
"@com_google_absl//absl/memory",
|
||||
"@com_google_absl//absl/strings",
|
||||
"@com_google_absl//absl/strings:str_format",
|
||||
],
|
||||
alwayslink = 1,
|
||||
)
|
||||
@ -671,6 +674,7 @@ cc_library(
|
||||
"//tensorflow/core:stream_executor_no_cuda",
|
||||
"@com_google_absl//absl/memory",
|
||||
"@com_google_absl//absl/strings",
|
||||
"@com_google_absl//absl/strings:str_format",
|
||||
],
|
||||
)
|
||||
|
||||
@ -746,6 +750,7 @@ cc_library(
|
||||
"//tensorflow/core:stream_executor_no_cuda",
|
||||
"@com_google_absl//absl/memory",
|
||||
"@com_google_absl//absl/strings",
|
||||
"@com_google_absl//absl/strings:str_format",
|
||||
],
|
||||
)
|
||||
|
||||
@ -795,6 +800,7 @@ cc_library(
|
||||
"//tensorflow/core:stream_executor_no_cuda",
|
||||
"//tensorflow/stream_executor",
|
||||
"@com_google_absl//absl/memory",
|
||||
"@com_google_absl//absl/strings:str_format",
|
||||
],
|
||||
)
|
||||
|
||||
@ -946,6 +952,7 @@ cc_library(
|
||||
"//tensorflow/compiler/xla:util",
|
||||
"//tensorflow/core:lib",
|
||||
"@com_google_absl//absl/strings",
|
||||
"@com_google_absl//absl/strings:str_format",
|
||||
],
|
||||
)
|
||||
|
||||
@ -992,6 +999,7 @@ cc_library(
|
||||
"//tensorflow/core:lib_internal",
|
||||
"@com_google_absl//absl/memory",
|
||||
"@com_google_absl//absl/strings",
|
||||
"@com_google_absl//absl/strings:str_format",
|
||||
],
|
||||
)
|
||||
|
||||
@ -1040,6 +1048,7 @@ cc_library(
|
||||
"//tensorflow/compiler/xla:util",
|
||||
"//tensorflow/core:lib",
|
||||
"@com_google_absl//absl/strings",
|
||||
"@com_google_absl//absl/strings:str_format",
|
||||
],
|
||||
)
|
||||
|
||||
@ -1746,6 +1755,7 @@ cc_library(
|
||||
"//tensorflow/compiler/xla:util",
|
||||
"//tensorflow/core:lib",
|
||||
"@com_google_absl//absl/strings",
|
||||
"@com_google_absl//absl/strings:str_format",
|
||||
],
|
||||
)
|
||||
|
||||
@ -2135,6 +2145,7 @@ cc_library(
|
||||
"@com_google_absl//absl/container:inlined_vector",
|
||||
"@com_google_absl//absl/memory",
|
||||
"@com_google_absl//absl/strings",
|
||||
"@com_google_absl//absl/strings:str_format",
|
||||
],
|
||||
)
|
||||
|
||||
@ -2187,6 +2198,7 @@ cc_library(
|
||||
"//tensorflow/core:lib",
|
||||
"@com_google_absl//absl/memory",
|
||||
"@com_google_absl//absl/strings",
|
||||
"@com_google_absl//absl/strings:str_format",
|
||||
],
|
||||
)
|
||||
|
||||
@ -2325,6 +2337,7 @@ cc_library(
|
||||
"//tensorflow/core:lib_internal",
|
||||
"@com_google_absl//absl/container:inlined_vector",
|
||||
"@com_google_absl//absl/strings",
|
||||
"@com_google_absl//absl/strings:str_format",
|
||||
],
|
||||
)
|
||||
|
||||
@ -2448,6 +2461,7 @@ cc_library(
|
||||
"//tensorflow/core:lib",
|
||||
"@com_google_absl//absl/memory",
|
||||
"@com_google_absl//absl/strings",
|
||||
"@com_google_absl//absl/strings:str_format",
|
||||
],
|
||||
)
|
||||
|
||||
@ -2803,6 +2817,7 @@ cc_library(
|
||||
"//tensorflow/core:lib_internal",
|
||||
"//tensorflow/core:regexp_internal",
|
||||
"@com_google_absl//absl/strings",
|
||||
"@com_google_absl//absl/strings:str_format",
|
||||
"@com_google_absl//absl/types:optional",
|
||||
],
|
||||
alwayslink = 1,
|
||||
@ -3143,13 +3158,13 @@ cc_library(
|
||||
|
||||
cc_library(
|
||||
name = "source_map_util",
|
||||
srcs = ["source_map_util.cc"],
|
||||
srcs = [],
|
||||
hdrs = ["source_map_util.h"],
|
||||
deps = [
|
||||
":executable",
|
||||
"//tensorflow/compiler/xla:status",
|
||||
"//tensorflow/compiler/xla:util",
|
||||
"//tensorflow/core:lib",
|
||||
"@com_google_absl//absl/strings:str_format",
|
||||
],
|
||||
)
|
||||
|
||||
@ -3199,11 +3214,11 @@ cc_library(
|
||||
"//tensorflow/compiler/xla:statusor",
|
||||
"//tensorflow/compiler/xla:util",
|
||||
"//tensorflow/compiler/xla:xla_data_proto",
|
||||
"//tensorflow/core:lib",
|
||||
"//tensorflow/core:lib_internal",
|
||||
"@com_google_absl//absl/algorithm:container",
|
||||
"@com_google_absl//absl/memory",
|
||||
"@com_google_absl//absl/strings",
|
||||
"@com_google_absl//absl/strings:str_format",
|
||||
],
|
||||
)
|
||||
|
||||
|
||||
@ -69,8 +69,7 @@ StatusOr<GlobalDataHandle> AllocationTracker::RegisterInternal(
|
||||
return InvalidArgument(
|
||||
"AllocationTracker for platform %s cannot register buffer from "
|
||||
"platform %s",
|
||||
backend_->platform()->Name().c_str(),
|
||||
shaped_buffer.platform()->Name().c_str());
|
||||
backend_->platform()->Name(), shaped_buffer.platform()->Name());
|
||||
}
|
||||
}
|
||||
|
||||
@ -125,7 +124,7 @@ Status AllocationTracker::Unregister(const GlobalDataHandle& data) {
|
||||
// "handle does not exist".
|
||||
auto it = handle_to_shaped_buffers_.find(data.handle());
|
||||
if (it == handle_to_shaped_buffers_.end()) {
|
||||
return NotFound("no allocation record for global data handle: %lld",
|
||||
return NotFound("no allocation record for global data handle: %d",
|
||||
data.handle());
|
||||
}
|
||||
for (auto& shaped_buffer : it->second) {
|
||||
@ -144,7 +143,7 @@ StatusOr<std::vector<GlobalDataHandle>> AllocationTracker::DeconstructTuple(
|
||||
// the same for all buffers across replicas.
|
||||
const ShapedBuffer* shaped_buffer = replicated_buffers[0];
|
||||
if (!ShapeUtil::IsTuple(shaped_buffer->on_host_shape())) {
|
||||
return InvalidArgument("global data handle %lld is not a tuple",
|
||||
return InvalidArgument("global data handle %d is not a tuple",
|
||||
data.handle());
|
||||
}
|
||||
// If the on-host representation is a tuple, then the on-device one should be
|
||||
@ -201,14 +200,14 @@ StatusOr<std::vector<const ShapedBuffer*>> AllocationTracker::ResolveInternal(
|
||||
VLOG(2) << "resolve:" << data.handle();
|
||||
auto it = handle_to_shaped_buffers_.find(data.handle());
|
||||
if (it == handle_to_shaped_buffers_.end()) {
|
||||
return NotFound("no allocation record for global data handle: %lld",
|
||||
return NotFound("no allocation record for global data handle: %d",
|
||||
data.handle());
|
||||
}
|
||||
std::vector<const ShapedBuffer*> replicated_buffers;
|
||||
for (const auto& shaped_buffer : it->second) {
|
||||
if (shaped_buffer == nullptr) {
|
||||
return InvalidArgument(
|
||||
"global data handle %lld was previously deallocated", data.handle());
|
||||
return InvalidArgument("global data handle %d was previously deallocated",
|
||||
data.handle());
|
||||
}
|
||||
replicated_buffers.push_back(shaped_buffer.get());
|
||||
}
|
||||
|
||||
@ -177,7 +177,7 @@ StatusOr<se::StreamExecutor*> Backend::stream_executor(
|
||||
}
|
||||
}
|
||||
return InvalidArgument("device %s not supported by XLA service",
|
||||
device_name(device_ordinal).c_str());
|
||||
device_name(device_ordinal));
|
||||
}
|
||||
|
||||
StatusOr<bool> Backend::devices_equivalent(int device_ordinal_a,
|
||||
|
||||
@ -24,6 +24,7 @@ limitations under the License.
|
||||
|
||||
#include "absl/memory/memory.h"
|
||||
#include "absl/strings/str_cat.h"
|
||||
#include "absl/strings/str_format.h"
|
||||
#include "tensorflow/compiler/xla/map_util.h"
|
||||
#include "tensorflow/compiler/xla/service/buffer_value_containers.h"
|
||||
#include "tensorflow/compiler/xla/service/heap_simulator.h"
|
||||
@ -37,17 +38,15 @@ limitations under the License.
|
||||
#include "tensorflow/core/lib/core/errors.h"
|
||||
#include "tensorflow/core/lib/hash/hash.h"
|
||||
#include "tensorflow/core/lib/strings/numbers.h"
|
||||
#include "tensorflow/core/lib/strings/stringprintf.h"
|
||||
|
||||
namespace xla {
|
||||
namespace {
|
||||
|
||||
using absl::StrAppend;
|
||||
using absl::StrAppendFormat;
|
||||
using ::tensorflow::gtl::FlatMap;
|
||||
using ::tensorflow::gtl::FlatSet;
|
||||
using ::tensorflow::strings::Appendf;
|
||||
using ::tensorflow::strings::HumanReadableNumBytes;
|
||||
using ::tensorflow::strings::Printf;
|
||||
|
||||
template <typename T>
|
||||
string ColocatedBufferSetsToString(const T& container, const char* title) {
|
||||
@ -105,7 +104,7 @@ Status GatherComputationsByAllocationType(
|
||||
return InvalidArgument(
|
||||
"computation %s has conflicting allocation requirements (global "
|
||||
"and thread-local)",
|
||||
computation->name().c_str());
|
||||
computation->name());
|
||||
}
|
||||
|
||||
if (is_thread_local) {
|
||||
@ -128,7 +127,7 @@ Status GatherComputationsByAllocationType(
|
||||
return InvalidArgument(
|
||||
"computation %s cannot contain call/while op because it "
|
||||
"requires thread-local buffer allocations",
|
||||
computation->name().c_str());
|
||||
computation->name());
|
||||
}
|
||||
worklist.push_back(std::make_pair(subcomputation,
|
||||
false)); // Not thread local.
|
||||
@ -145,9 +144,8 @@ Status GatherComputationsByAllocationType(
|
||||
true)); // Thread local.
|
||||
break;
|
||||
default:
|
||||
return InternalError(
|
||||
"Unexpected calling opcode: %s",
|
||||
HloOpcodeString(instruction->opcode()).c_str());
|
||||
return InternalError("Unexpected calling opcode: %s",
|
||||
HloOpcodeString(instruction->opcode()));
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -296,7 +294,7 @@ BufferAllocationProto BufferAllocation::ToProto() const {
|
||||
|
||||
string BufferAllocation::ToString() const {
|
||||
string output;
|
||||
Appendf(&output, "allocation %lld: %p, size %lld", index_, this, size());
|
||||
StrAppendFormat(&output, "allocation %d: %p, size %d", index_, this, size());
|
||||
if (color().value() != 0) {
|
||||
StrAppend(&output, ", color ", color().value());
|
||||
}
|
||||
@ -328,11 +326,10 @@ string BufferAllocation::ToString() const {
|
||||
});
|
||||
for (const LogicalBuffer* buffer : sorted_buffers) {
|
||||
const OffsetSize& offset_size = FindOrDie(assigned_buffers_, buffer);
|
||||
StrAppend(&output,
|
||||
tensorflow::strings::Printf(
|
||||
" %s [%lld,%lld]: %s\n", buffer->ToString().c_str(),
|
||||
offset_size.offset, offset_size.size,
|
||||
ShapeUtil::HumanStringWithLayout(buffer->shape()).c_str()));
|
||||
StrAppend(&output, absl::StrFormat(
|
||||
" %s [%d,%d]: %s\n", buffer->ToString(),
|
||||
offset_size.offset, offset_size.size,
|
||||
ShapeUtil::HumanStringWithLayout(buffer->shape())));
|
||||
}
|
||||
return output;
|
||||
}
|
||||
@ -425,7 +422,7 @@ StatusOr<BufferAllocation::Slice> BufferAssignment::GetUniqueSlice(
|
||||
return FailedPrecondition(
|
||||
"BufferAllocation::Slice for instruction %s at index %s cannot "
|
||||
"be determined at compile-time.",
|
||||
instruction->name().c_str(), index.ToString().c_str());
|
||||
instruction->name(), index.ToString());
|
||||
}
|
||||
} else {
|
||||
VLOG(3) << "No allocation";
|
||||
@ -434,7 +431,7 @@ StatusOr<BufferAllocation::Slice> BufferAssignment::GetUniqueSlice(
|
||||
if (result.allocation() == nullptr) {
|
||||
return FailedPrecondition(
|
||||
"BufferAllocation::Slice not assigned for instruction %s at index %s",
|
||||
instruction->name().c_str(), index.ToString().c_str());
|
||||
instruction->name(), index.ToString());
|
||||
}
|
||||
return result;
|
||||
}
|
||||
@ -646,30 +643,29 @@ Status BufferAssignment::ComputeSummaryStats() {
|
||||
|
||||
string BufferAssignment::Stats::ToString() const {
|
||||
string s;
|
||||
Appendf(&s, "BufferAssignment stats:\n");
|
||||
Appendf(&s, " parameter allocation: %10s\n",
|
||||
HumanReadableNumBytes(parameter_allocation_bytes).c_str());
|
||||
Appendf(&s, " constant allocation: %10s\n",
|
||||
HumanReadableNumBytes(constant_allocation_bytes).c_str());
|
||||
Appendf(&s, " maybe_live_out allocation: %10s\n",
|
||||
HumanReadableNumBytes(maybe_live_out_allocation_bytes).c_str());
|
||||
Appendf(&s, " preallocated temp allocation: %10s\n",
|
||||
HumanReadableNumBytes(preallocated_temp_allocation_bytes).c_str());
|
||||
StrAppendFormat(&s, "BufferAssignment stats:\n");
|
||||
StrAppendFormat(&s, " parameter allocation: %10s\n",
|
||||
HumanReadableNumBytes(parameter_allocation_bytes));
|
||||
StrAppendFormat(&s, " constant allocation: %10s\n",
|
||||
HumanReadableNumBytes(constant_allocation_bytes));
|
||||
StrAppendFormat(&s, " maybe_live_out allocation: %10s\n",
|
||||
HumanReadableNumBytes(maybe_live_out_allocation_bytes));
|
||||
StrAppendFormat(&s, " preallocated temp allocation: %10s\n",
|
||||
HumanReadableNumBytes(preallocated_temp_allocation_bytes));
|
||||
if (preallocated_temp_fragmentation_bytes >= 0) {
|
||||
const double percent = 100. * preallocated_temp_fragmentation_bytes /
|
||||
preallocated_temp_allocation_bytes;
|
||||
Appendf(
|
||||
StrAppendFormat(
|
||||
&s, " preallocated temp fragmentation: %10s (%.2f%%)\n",
|
||||
HumanReadableNumBytes(preallocated_temp_fragmentation_bytes).c_str(),
|
||||
percent);
|
||||
HumanReadableNumBytes(preallocated_temp_fragmentation_bytes), percent);
|
||||
}
|
||||
Appendf(&s, " total allocation: %10s\n",
|
||||
HumanReadableNumBytes(total_allocation_bytes).c_str());
|
||||
StrAppendFormat(&s, " total allocation: %10s\n",
|
||||
HumanReadableNumBytes(total_allocation_bytes));
|
||||
if (total_fragmentation_bytes >= 0) {
|
||||
const double percent =
|
||||
100. * total_fragmentation_bytes / total_allocation_bytes;
|
||||
Appendf(&s, " total fragmentation: %10s (%.2f%%)\n",
|
||||
HumanReadableNumBytes(total_fragmentation_bytes).c_str(), percent);
|
||||
StrAppendFormat(&s, " total fragmentation: %10s (%.2f%%)\n",
|
||||
HumanReadableNumBytes(total_fragmentation_bytes), percent);
|
||||
}
|
||||
return s;
|
||||
}
|
||||
|
||||
@ -20,6 +20,7 @@ limitations under the License.
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
#include "absl/strings/str_format.h"
|
||||
#include "absl/strings/str_join.h"
|
||||
#include "tensorflow/compiler/xla/service/hlo_computation.h"
|
||||
#include "tensorflow/compiler/xla/service/logical_buffer.h"
|
||||
@ -29,7 +30,6 @@ limitations under the License.
|
||||
#include "tensorflow/compiler/xla/types.h"
|
||||
#include "tensorflow/compiler/xla/util.h"
|
||||
#include "tensorflow/core/lib/core/errors.h"
|
||||
#include "tensorflow/core/lib/strings/stringprintf.h"
|
||||
#include "tensorflow/core/platform/logging.h"
|
||||
|
||||
namespace xla {
|
||||
@ -75,19 +75,17 @@ Status BufferLiveness::Analyze() {
|
||||
|
||||
string BufferLiveness::ToString() const {
|
||||
std::vector<string> pieces;
|
||||
pieces.push_back(tensorflow::strings::Printf("BufferLiveness(module=%s):",
|
||||
module_->name().c_str()));
|
||||
pieces.push_back(
|
||||
absl::StrFormat("BufferLiveness(module=%s):", module_->name()));
|
||||
pieces.push_back("HloOrdering:");
|
||||
pieces.push_back(hlo_ordering_->ToString());
|
||||
pieces.push_back(tensorflow::strings::Printf("Aliased buffers:"));
|
||||
pieces.push_back("Aliased buffers:");
|
||||
for (const LogicalBuffer* buffer : aliased_buffers_) {
|
||||
pieces.push_back(
|
||||
tensorflow::strings::Printf(" %s", buffer->ToString().c_str()));
|
||||
pieces.push_back(absl::StrFormat(" %s", buffer->ToString()));
|
||||
}
|
||||
pieces.push_back(tensorflow::strings::Printf("Live out buffers:"));
|
||||
pieces.push_back("Live out buffers:");
|
||||
for (const LogicalBuffer* buffer : maybe_live_out_buffers_) {
|
||||
pieces.push_back(
|
||||
tensorflow::strings::Printf(" %s", buffer->ToString().c_str()));
|
||||
pieces.push_back(absl::StrFormat(" %s", buffer->ToString()));
|
||||
}
|
||||
return absl::StrJoin(pieces, "\n");
|
||||
}
|
||||
|
||||
@ -19,19 +19,19 @@ limitations under the License.
|
||||
|
||||
#include "absl/memory/memory.h"
|
||||
#include "absl/strings/str_cat.h"
|
||||
#include "absl/strings/str_format.h"
|
||||
#include "absl/strings/str_join.h"
|
||||
#include "tensorflow/compiler/xla/map_util.h"
|
||||
#include "tensorflow/compiler/xla/status_macros.h"
|
||||
#include "tensorflow/compiler/xla/util.h"
|
||||
#include "tensorflow/core/lib/core/errors.h"
|
||||
#include "tensorflow/core/lib/core/status.h"
|
||||
#include "tensorflow/core/lib/strings/stringprintf.h"
|
||||
#include "tensorflow/core/platform/types.h"
|
||||
|
||||
namespace xla {
|
||||
|
||||
using absl::StrAppendFormat;
|
||||
using absl::StrCat;
|
||||
using ::tensorflow::strings::Appendf;
|
||||
|
||||
string CallContextToString(CallContext context) {
|
||||
switch (context) {
|
||||
@ -356,20 +356,20 @@ CallGraph::NearestAncestorsInSameComputation(HloInstruction* a,
|
||||
|
||||
string CallGraph::ToString() const {
|
||||
string out;
|
||||
Appendf(&out, "Call graph for module %s:\n", module_->name().c_str());
|
||||
StrAppendFormat(&out, "Call graph for module %s:\n", module_->name());
|
||||
for (const CallGraphNode& node : nodes()) {
|
||||
Appendf(&out, "Computation %s:\n", node.computation()->name().c_str());
|
||||
Appendf(&out, " calls:\n");
|
||||
StrAppendFormat(&out, "Computation %s:\n", node.computation()->name());
|
||||
StrAppendFormat(&out, " calls:\n");
|
||||
for (const HloComputation* callee : node.callees()) {
|
||||
Appendf(&out, " %s\n", callee->name().c_str());
|
||||
StrAppendFormat(&out, " %s\n", callee->name());
|
||||
}
|
||||
Appendf(&out, " called by:\n");
|
||||
StrAppendFormat(&out, " called by:\n");
|
||||
for (const HloComputation* caller : node.callers()) {
|
||||
Appendf(&out, " %s\n", caller->name().c_str());
|
||||
StrAppendFormat(&out, " %s\n", caller->name());
|
||||
}
|
||||
Appendf(&out, " callsites:\n");
|
||||
StrAppendFormat(&out, " callsites:\n");
|
||||
for (const CallSite& callsite : node.callsites()) {
|
||||
Appendf(&out, " %s\n", callsite.ToString().c_str());
|
||||
StrAppendFormat(&out, " %s\n", callsite.ToString());
|
||||
}
|
||||
}
|
||||
return out;
|
||||
|
||||
@ -96,7 +96,7 @@ class SubcomputationInsertionVisitor : public DfsHloVisitorWithDefault {
|
||||
if (it == subcomputation_hlo_to_new_hlo_.end()) {
|
||||
return NotFound(
|
||||
"Could not find mapping from subcomputation HLO %s to a cloned HLO.",
|
||||
subcomputation_hlo->ToString().c_str());
|
||||
subcomputation_hlo->ToString());
|
||||
}
|
||||
return it->second;
|
||||
}
|
||||
|
||||
@ -73,20 +73,20 @@ ChannelHandle ChannelTracker::AllocateHandle(ChannelHandle::ChannelType type) {
|
||||
|
||||
Status ChannelTracker::RegisterSendInternal(const ChannelHandle& handle) {
|
||||
if (opaque_to_channel_.count(handle.handle()) == 0) {
|
||||
return NotFound("channel handle not found: %lld", handle.handle());
|
||||
return NotFound("channel handle not found: %d", handle.handle());
|
||||
}
|
||||
Channel& channel = opaque_to_channel_[handle.handle()];
|
||||
if (channel.type == ChannelHandle::HOST_TO_DEVICE) {
|
||||
return FailedPrecondition(
|
||||
"host-to-device channels cannot be used with a Send operation; "
|
||||
"channel handle: %lld",
|
||||
"channel handle: %d",
|
||||
handle.handle());
|
||||
}
|
||||
|
||||
if (channel.has_sender) {
|
||||
return FailedPrecondition(
|
||||
"when registering send, passed a channel handle that is already used "
|
||||
"by a sender: %lld",
|
||||
"by a sender: %d",
|
||||
handle.handle());
|
||||
}
|
||||
channel.has_sender = true;
|
||||
@ -95,13 +95,13 @@ Status ChannelTracker::RegisterSendInternal(const ChannelHandle& handle) {
|
||||
|
||||
Status ChannelTracker::RegisterRecvInternal(const ChannelHandle& handle) {
|
||||
if (opaque_to_channel_.count(handle.handle()) == 0) {
|
||||
return NotFound("channel handle not found: %lld", handle.handle());
|
||||
return NotFound("channel handle not found: %d", handle.handle());
|
||||
}
|
||||
Channel& channel = opaque_to_channel_[handle.handle()];
|
||||
if (channel.type == ChannelHandle::DEVICE_TO_HOST) {
|
||||
return FailedPrecondition(
|
||||
"device-to-host channels cannot be used with a Recv operation; "
|
||||
"channel handle: %lld",
|
||||
"channel handle: %d",
|
||||
handle.handle());
|
||||
}
|
||||
|
||||
@ -109,7 +109,7 @@ Status ChannelTracker::RegisterRecvInternal(const ChannelHandle& handle) {
|
||||
if (channel.receiver_count >= 1) {
|
||||
return FailedPrecondition(
|
||||
"when registering recv, passed a channel handle that is already used "
|
||||
"by a receiver: %lld",
|
||||
"by a receiver: %d",
|
||||
handle.handle());
|
||||
}
|
||||
channel.receiver_count += 1;
|
||||
|
||||
@ -101,7 +101,7 @@ Compiler::GetPlatformCompilers() {
|
||||
return NotFound(
|
||||
"could not find registered compiler for platform %s -- check "
|
||||
"target linkage",
|
||||
platform->Name().c_str());
|
||||
platform->Name());
|
||||
}
|
||||
|
||||
// And then we invoke the factory, placing the result into the mapping.
|
||||
|
||||
@ -132,7 +132,7 @@ StatusOr<DeviceAssignment> ComputationPlacer::AssignDevices(
|
||||
return NotFound(
|
||||
"could not find registered computation placer for platform %s -- check "
|
||||
"target linkage",
|
||||
platform->Name().c_str());
|
||||
platform->Name());
|
||||
}
|
||||
|
||||
if (it->second.placer == nullptr) {
|
||||
|
||||
@ -235,6 +235,7 @@ cc_library(
|
||||
"//tensorflow/core:lib",
|
||||
"//tensorflow/core:stream_executor_no_cuda",
|
||||
"@com_google_absl//absl/strings",
|
||||
"@com_google_absl//absl/strings:str_format",
|
||||
"@llvm//:orc_jit",
|
||||
],
|
||||
)
|
||||
@ -283,6 +284,7 @@ cc_library(
|
||||
"//tensorflow/compiler/xla/service/llvm_ir:tuple_ops",
|
||||
"//tensorflow/core:lib",
|
||||
"@com_google_absl//absl/strings",
|
||||
"@com_google_absl//absl/strings:str_format",
|
||||
"@llvm//:code_gen",
|
||||
"@llvm//:core",
|
||||
"@llvm//:support",
|
||||
@ -338,12 +340,12 @@ cc_library(
|
||||
hdrs = ["parallel_loop_emitter.h"],
|
||||
deps = [
|
||||
":ir_emission_utils",
|
||||
"//tensorflow/compiler/xla:xla_data_proto",
|
||||
"//tensorflow/compiler/xla/service/llvm_ir:ir_array",
|
||||
"//tensorflow/compiler/xla/service/llvm_ir:llvm_loop",
|
||||
"//tensorflow/compiler/xla/service/llvm_ir:llvm_util",
|
||||
"//tensorflow/compiler/xla/service/llvm_ir:loop_emitter",
|
||||
"//tensorflow/core:lib",
|
||||
"@com_google_absl//absl/strings:str_format",
|
||||
"@llvm//:core",
|
||||
],
|
||||
)
|
||||
@ -391,6 +393,7 @@ tf_cc_binary(
|
||||
"//tensorflow/compiler/xla/client:xla_builder",
|
||||
"//tensorflow/compiler/xla/client:xla_computation",
|
||||
"//tensorflow/core:lib",
|
||||
"@com_google_absl//absl/strings:str_format",
|
||||
],
|
||||
)
|
||||
|
||||
@ -404,6 +407,7 @@ cc_library(
|
||||
"//tensorflow/compiler/xla:types",
|
||||
"//tensorflow/compiler/xla:util",
|
||||
"//tensorflow/core:lib",
|
||||
"@com_google_absl//absl/strings:str_format",
|
||||
"@llvm//:mc",
|
||||
"@llvm//:mc_disassembler",
|
||||
"@llvm//:object",
|
||||
@ -645,6 +649,7 @@ tf_cc_test(
|
||||
"//tensorflow/core:test",
|
||||
"//third_party/eigen3",
|
||||
"@com_google_absl//absl/memory",
|
||||
"@com_google_absl//absl/strings:str_format",
|
||||
],
|
||||
)
|
||||
|
||||
|
||||
@ -705,8 +705,7 @@ CpuCompiler::CompileAheadOfTime(std::vector<std::unique_ptr<HloModule>> modules,
|
||||
const llvm::Target* target =
|
||||
llvm::TargetRegistry::lookupTarget(triple.getTriple(), error);
|
||||
if (target == nullptr) {
|
||||
return InternalError("TargetRegistry::lookupTarget failed: %s",
|
||||
error.c_str());
|
||||
return InternalError("TargetRegistry::lookupTarget failed: %s", error);
|
||||
}
|
||||
|
||||
llvm::Reloc::Model reloc_model = llvm::Reloc::Static;
|
||||
|
||||
@ -23,6 +23,7 @@ limitations under the License.
|
||||
#include <vector>
|
||||
|
||||
#include "absl/strings/str_cat.h"
|
||||
#include "absl/strings/str_format.h"
|
||||
#include "absl/strings/str_join.h"
|
||||
#include "llvm/ExecutionEngine/Orc/IRCompileLayer.h"
|
||||
#include "tensorflow/compiler/xla/service/buffer_assignment.h"
|
||||
@ -37,7 +38,6 @@ limitations under the License.
|
||||
#include "tensorflow/compiler/xla/types.h"
|
||||
#include "tensorflow/compiler/xla/util.h"
|
||||
#include "tensorflow/compiler/xla/xla_data.pb.h"
|
||||
#include "tensorflow/core/lib/strings/stringprintf.h"
|
||||
#include "tensorflow/core/platform/env.h"
|
||||
#include "tensorflow/core/platform/logging.h"
|
||||
#include "tensorflow/core/platform/mem.h"
|
||||
@ -171,20 +171,18 @@ Status CpuExecutable::ExecuteComputeFunction(
|
||||
void* result_buffer = buffer_pointers[result_slice.index()];
|
||||
if (VLOG_IS_ON(3)) {
|
||||
VLOG(3) << "Executing compute function:";
|
||||
VLOG(3) << tensorflow::strings::Printf(
|
||||
" func(void* result, void* params[null], void* temps[%zu], "
|
||||
"uint64 profile_counters[%zu])",
|
||||
VLOG(3) << absl::StrFormat(
|
||||
" func(void* result, void* params[null], void* temps[%u], "
|
||||
"uint64 profile_counters[%u])",
|
||||
buffer_pointers.size(), profile_counters_size);
|
||||
VLOG(3) << tensorflow::strings::Printf(" result = %p", result_buffer);
|
||||
VLOG(3) << absl::StrFormat(" result = %p", result_buffer);
|
||||
auto ptr_printer = [](string* out, const void* p) {
|
||||
absl::StrAppend(out, tensorflow::strings::Printf("%p", p));
|
||||
absl::StrAppend(out, absl::StrFormat("%p", p));
|
||||
};
|
||||
VLOG(3) << " params = nullptr";
|
||||
VLOG(3) << tensorflow::strings::Printf(
|
||||
" temps = [%s]",
|
||||
absl::StrJoin(buffer_pointers, ", ", ptr_printer).c_str());
|
||||
VLOG(3) << tensorflow::strings::Printf(" profile_counters = %p",
|
||||
profile_counters);
|
||||
VLOG(3) << absl::StrFormat(
|
||||
" temps = [%s]", absl::StrJoin(buffer_pointers, ", ", ptr_printer));
|
||||
VLOG(3) << absl::StrFormat(" profile_counters = %p", profile_counters);
|
||||
}
|
||||
|
||||
compute_function_(result_buffer, run_options, nullptr, buffer_pointers.data(),
|
||||
|
||||
@ -34,9 +34,8 @@ StatusOr<bool> CpuHloSupportChecker::Run(HloModule* module) {
|
||||
return xla::Unimplemented(
|
||||
"CPU backend does not support HLO instruction %s with shape "
|
||||
"containing a sparse layout: %s",
|
||||
instruction->ToString().c_str(),
|
||||
ShapeUtil::HumanStringWithLayout(instruction->shape())
|
||||
.c_str());
|
||||
instruction->ToString(),
|
||||
ShapeUtil::HumanStringWithLayout(instruction->shape()));
|
||||
}
|
||||
return Status::OK();
|
||||
}));
|
||||
|
||||
@ -20,6 +20,7 @@ limitations under the License.
|
||||
#include <tuple>
|
||||
|
||||
#include "absl/memory/memory.h"
|
||||
#include "absl/strings/str_format.h"
|
||||
#include "third_party/eigen3/unsupported/Eigen/CXX11/Tensor"
|
||||
#include "tensorflow/compiler/xla/array2d.h"
|
||||
#include "tensorflow/compiler/xla/client/local_client.h"
|
||||
@ -28,7 +29,6 @@ limitations under the License.
|
||||
#include "tensorflow/compiler/xla/service/cpu/runtime_single_threaded_matmul.h"
|
||||
#include "tensorflow/compiler/xla/types.h"
|
||||
#include "tensorflow/core/common_runtime/eigen_thread_pool.h"
|
||||
#include "tensorflow/core/lib/strings/stringprintf.h"
|
||||
#include "tensorflow/core/platform/env.h"
|
||||
#include "tensorflow/core/platform/logging.h"
|
||||
#include "tensorflow/core/platform/test.h"
|
||||
@ -142,10 +142,10 @@ class EigenMatMulTest : public CpuRuntimeTest,
|
||||
bool transpose_rhs = std::get<2>(info.param);
|
||||
bool single_threaded = std::get<3>(info.param);
|
||||
|
||||
return tensorflow::strings::Printf(
|
||||
"EigenMatMul_%lld_%lld_%lld_%s%s%s_threaded", shape.m, shape.k, shape.n,
|
||||
transpose_lhs ? "Tlhs_" : "", transpose_rhs ? "Trhs_" : "",
|
||||
single_threaded ? "single" : "multi");
|
||||
return absl::StrFormat("EigenMatMul_%d_%d_%d_%s%s%s_threaded", shape.m,
|
||||
shape.k, shape.n, transpose_lhs ? "Tlhs_" : "",
|
||||
transpose_rhs ? "Trhs_" : "",
|
||||
single_threaded ? "single" : "multi");
|
||||
}
|
||||
};
|
||||
|
||||
@ -178,10 +178,10 @@ class MKLMatMulTest : public CpuRuntimeTest,
|
||||
bool transpose_rhs = std::get<2>(info.param);
|
||||
bool single_threaded = std::get<3>(info.param);
|
||||
|
||||
return tensorflow::strings::Printf(
|
||||
"MKLMatMul_%lld_%lld_%lld_%s%s%s_threaded", shape.m, shape.k, shape.n,
|
||||
transpose_lhs ? "Tlhs_" : "", transpose_rhs ? "Trhs_" : "",
|
||||
single_threaded ? "single" : "multi");
|
||||
return absl::StrFormat("MKLMatMul_%d_%d_%d_%s%s%s_threaded", shape.m,
|
||||
shape.k, shape.n, transpose_lhs ? "Tlhs_" : "",
|
||||
transpose_rhs ? "Trhs_" : "",
|
||||
single_threaded ? "single" : "multi");
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
@ -104,7 +104,7 @@ Status CpuTransferManager::TransferLiteralToInfeed(
|
||||
if (ShapeUtil::IsNestedTuple(shape)) {
|
||||
return Unimplemented(
|
||||
"Infeed with a nested tuple shape is not supported: %s",
|
||||
ShapeUtil::HumanString(literal.shape()).c_str());
|
||||
ShapeUtil::HumanString(literal.shape()));
|
||||
}
|
||||
|
||||
// For a tuple, we transfer each of its elements to the device and
|
||||
@ -152,11 +152,11 @@ CpuTransferManager::TransferBufferToInfeedInternal(se::StreamExecutor* executor,
|
||||
int64 size,
|
||||
const void* source) {
|
||||
if (size > std::numeric_limits<int32>::max()) {
|
||||
return InvalidArgument("Infeed shape is too large: needs %lld bytes", size);
|
||||
return InvalidArgument("Infeed shape is too large: needs %d bytes", size);
|
||||
}
|
||||
|
||||
if (size <= 0) {
|
||||
return InvalidArgument("Infeed shape must have positive size; got %lld",
|
||||
return InvalidArgument("Infeed shape must have positive size; got %d",
|
||||
size);
|
||||
}
|
||||
|
||||
@ -244,12 +244,12 @@ StatusOr<Shape> CpuTransferManager::TransferBuffersFromOutfeedInternal(
|
||||
for (auto b : buffer_data) {
|
||||
int64 size = b.second;
|
||||
if (size > std::numeric_limits<int32>::max()) {
|
||||
return InvalidArgument("Outfeed shape is too large: needs %lld bytes",
|
||||
return InvalidArgument("Outfeed shape is too large: needs %d bytes",
|
||||
size);
|
||||
}
|
||||
|
||||
if (size <= 0) {
|
||||
return InvalidArgument("Outfeed shape must have positive size; got %lld",
|
||||
return InvalidArgument("Outfeed shape must have positive size; got %d",
|
||||
size);
|
||||
}
|
||||
|
||||
|
||||
@ -21,13 +21,13 @@ limitations under the License.
|
||||
#include <type_traits>
|
||||
#include <vector>
|
||||
|
||||
#include "absl/strings/str_format.h"
|
||||
#include "llvm/MC/MCInst.h"
|
||||
#include "llvm/Support/TargetRegistry.h"
|
||||
#include "llvm/Support/raw_ostream.h"
|
||||
#include "tensorflow/compiler/xla/status_macros.h"
|
||||
#include "tensorflow/compiler/xla/types.h"
|
||||
#include "tensorflow/compiler/xla/util.h"
|
||||
#include "tensorflow/core/lib/strings/stringprintf.h"
|
||||
#include "tensorflow/core/platform/logging.h"
|
||||
#include "tensorflow/core/platform/types.h"
|
||||
|
||||
@ -151,7 +151,7 @@ StatusOr<DisassemblerResult> Disassembler::DisassembleObjectFile(
|
||||
size = 1;
|
||||
}
|
||||
|
||||
ostream << tensorflow::strings::Printf("0x%08lx", index) << " ";
|
||||
ostream << absl::StrFormat("0x%08lx", index) << " ";
|
||||
|
||||
if (decode_status == llvm::MCDisassembler::Success) {
|
||||
// For branches, try to determine the actual address and emit it as an
|
||||
@ -163,7 +163,7 @@ StatusOr<DisassemblerResult> Disassembler::DisassembleObjectFile(
|
||||
uint64_t target;
|
||||
if (inst_analysis_->evaluateBranch(
|
||||
instruction, section_address + index, size, target)) {
|
||||
annotation = tensorflow::strings::Printf("[0x%08lx]", target);
|
||||
annotation = absl::StrFormat("[0x%08lx]", target);
|
||||
}
|
||||
}
|
||||
inst_printer_->printInst(&instruction, ostream, annotation.c_str(),
|
||||
|
||||
@ -1467,7 +1467,7 @@ Status DotOpEmitter::EmitCallToRuntime() {
|
||||
break;
|
||||
default:
|
||||
return Unimplemented("Invalid type %s for dot operation",
|
||||
PrimitiveType_Name(type).c_str());
|
||||
PrimitiveType_Name(type));
|
||||
}
|
||||
|
||||
llvm::Type* float_ptr_type = float_type->getPointerTo();
|
||||
|
||||
@ -28,6 +28,7 @@ limitations under the License.
|
||||
#include "tensorflow/core/platform/logging.h"
|
||||
// IWYU pragma: no_include "llvm/IR/Intrinsics.gen.inc"
|
||||
#include "absl/strings/str_cat.h"
|
||||
#include "absl/strings/str_format.h"
|
||||
#include "llvm/CodeGen/TargetRegisterInfo.h"
|
||||
#include "llvm/CodeGen/TargetSubtargetInfo.h"
|
||||
#include "llvm/IR/BasicBlock.h"
|
||||
@ -68,7 +69,6 @@ limitations under the License.
|
||||
#include "tensorflow/core/lib/gtl/array_slice.h"
|
||||
#include "tensorflow/core/lib/gtl/flatmap.h"
|
||||
#include "tensorflow/core/lib/gtl/flatset.h"
|
||||
#include "tensorflow/core/lib/strings/stringprintf.h"
|
||||
|
||||
namespace xla {
|
||||
|
||||
@ -230,9 +230,8 @@ Status IrEmitter::HandleCopy(HloInstruction* copy) {
|
||||
// Use the elemental emitter for array shapes.
|
||||
return DefaultAction(copy);
|
||||
}
|
||||
return Unimplemented(
|
||||
"unsupported operand type %s for copy instruction",
|
||||
PrimitiveType_Name(copy->shape().element_type()).c_str());
|
||||
return Unimplemented("unsupported operand type %s for copy instruction",
|
||||
PrimitiveType_Name(copy->shape().element_type()));
|
||||
}
|
||||
|
||||
// Calculate the alignment of a buffer allocated for a given primitive type.
|
||||
@ -389,7 +388,7 @@ Status IrEmitter::EmitXfeedTransfer(XfeedKind kind, const Shape& shape,
|
||||
int64 length = ByteSizeOf(shape);
|
||||
if (length <= 0 || length > std::numeric_limits<int32>::max()) {
|
||||
return InvalidArgument(
|
||||
"xfeed (infeed or outfeed) buffer length %lld is outside the valid "
|
||||
"xfeed (infeed or outfeed) buffer length %d is outside the valid "
|
||||
"size range",
|
||||
length);
|
||||
}
|
||||
@ -1620,9 +1619,8 @@ StatusOr<bool> IrEmitter::EmitVectorizedReduce(
|
||||
int64 dimension = LayoutUtil::Minor(reduce->shape().layout(), i);
|
||||
int64 start_index = 0;
|
||||
int64 end_index = reduce->shape().dimensions(dimension);
|
||||
std::unique_ptr<llvm_ir::ForLoop> loop =
|
||||
loop_nest.AddLoop(start_index, end_index,
|
||||
tensorflow::strings::Printf("dim.%lld", dimension));
|
||||
std::unique_ptr<llvm_ir::ForLoop> loop = loop_nest.AddLoop(
|
||||
start_index, end_index, absl::StrFormat("dim.%d", dimension));
|
||||
array_index[dimension] = loop->GetIndVarValue();
|
||||
}
|
||||
|
||||
@ -1641,9 +1639,9 @@ StatusOr<bool> IrEmitter::EmitVectorizedReduce(
|
||||
int64 start_index = 0;
|
||||
int64 end_index = (innermost_dimension_size / vectorization_factor) *
|
||||
vectorization_factor;
|
||||
std::unique_ptr<llvm_ir::ForLoop> loop = loop_nest.AddLoop(
|
||||
start_index, end_index, vectorization_factor,
|
||||
tensorflow::strings::Printf("dim.%lld", innermost_dimension));
|
||||
std::unique_ptr<llvm_ir::ForLoop> loop =
|
||||
loop_nest.AddLoop(start_index, end_index, vectorization_factor,
|
||||
absl::StrFormat("dim.%d", innermost_dimension));
|
||||
array_index[innermost_dimension] = loop->GetIndVarValue();
|
||||
|
||||
SetToFirstInsertPoint(loop->GetBodyBasicBlock(), &b_);
|
||||
@ -2170,8 +2168,8 @@ Status IrEmitter::HandleWhile(HloInstruction* xla_while) {
|
||||
return InternalError(
|
||||
"instruction %s %s does not share slice with "
|
||||
"instruction %s %s",
|
||||
a->ToString().c_str(), slice_a.ToString().c_str(),
|
||||
b->ToString().c_str(), slice_b.ToString().c_str());
|
||||
a->ToString(), slice_a.ToString(), b->ToString(),
|
||||
slice_b.ToString());
|
||||
}
|
||||
return Status::OK();
|
||||
};
|
||||
@ -2826,8 +2824,8 @@ Status IrEmitter::ElementTypesSameAndSupported(
|
||||
if (std::find(supported_types.begin(), supported_types.end(),
|
||||
primitive_type) == supported_types.end()) {
|
||||
return Unimplemented("unsupported operand type %s in op %s",
|
||||
PrimitiveType_Name(primitive_type).c_str(),
|
||||
HloOpcodeString(instruction.opcode()).c_str());
|
||||
PrimitiveType_Name(primitive_type),
|
||||
HloOpcodeString(instruction.opcode()));
|
||||
}
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
@ -15,9 +15,9 @@ limitations under the License.
|
||||
|
||||
#include "tensorflow/compiler/xla/service/cpu/parallel_loop_emitter.h"
|
||||
|
||||
#include "absl/strings/str_format.h"
|
||||
#include "tensorflow/compiler/xla/service/llvm_ir/llvm_loop.h"
|
||||
#include "tensorflow/compiler/xla/service/llvm_ir/llvm_util.h"
|
||||
#include "tensorflow/core/lib/strings/stringprintf.h"
|
||||
|
||||
namespace xla {
|
||||
namespace cpu {
|
||||
@ -52,15 +52,15 @@ ParallelLoopEmitter::EmitIndexAndSetExitBasicBlock(absl::string_view loop_name,
|
||||
llvm::Value* end_index = (*dynamic_loop_bounds_)[bounds_index].second;
|
||||
|
||||
std::unique_ptr<llvm_ir::ForLoop> loop = loop_nest.AddLoop(
|
||||
/*suffix=*/tensorflow::strings::Printf("dim.%lld", dimension),
|
||||
start_index, end_index);
|
||||
/*suffix=*/absl::StrFormat("dim.%d", dimension), start_index,
|
||||
end_index);
|
||||
array_index[dimension] = loop->GetIndVarValue();
|
||||
} else {
|
||||
// Emit static loop bounds for this dimension.
|
||||
std::unique_ptr<llvm_ir::ForLoop> loop = loop_nest.AddLoop(
|
||||
/*start_index=*/0,
|
||||
/*end_index=*/shape_.dimensions(dimension),
|
||||
/*suffix=*/tensorflow::strings::Printf("dim.%lld", dimension));
|
||||
/*suffix=*/absl::StrFormat("dim.%d", dimension));
|
||||
array_index[dimension] = loop->GetIndVarValue();
|
||||
}
|
||||
}
|
||||
|
||||
@ -16,6 +16,7 @@ limitations under the License.
|
||||
#include <memory>
|
||||
#include <string>
|
||||
|
||||
#include "absl/strings/str_format.h"
|
||||
#include "tensorflow/compiler/xla/array4d.h"
|
||||
#include "tensorflow/compiler/xla/client/client.h"
|
||||
#include "tensorflow/compiler/xla/client/client_library.h"
|
||||
@ -27,7 +28,6 @@ limitations under the License.
|
||||
#include "tensorflow/compiler/xla/statusor.h"
|
||||
#include "tensorflow/compiler/xla/types.h"
|
||||
#include "tensorflow/compiler/xla/xla_data.pb.h"
|
||||
#include "tensorflow/core/lib/strings/stringprintf.h"
|
||||
#include "tensorflow/core/platform/init_main.h"
|
||||
#include "tensorflow/core/platform/logging.h"
|
||||
|
||||
@ -67,8 +67,8 @@ int main(int argc, char** argv) {
|
||||
/*execution_profile=*/&profile);
|
||||
std::unique_ptr<xla::Literal> actual = result.ConsumeValueOrDie();
|
||||
|
||||
LOG(INFO) << tensorflow::strings::Printf("computation took %lldns",
|
||||
profile.compute_time_ns());
|
||||
LOG(INFO) << absl::StrFormat("computation took %dns",
|
||||
profile.compute_time_ns());
|
||||
LOG(INFO) << actual->ToString();
|
||||
|
||||
return 0;
|
||||
|
||||
@ -65,8 +65,8 @@ class CpuUnaryIntrinsicTest
|
||||
features = "";
|
||||
}
|
||||
|
||||
return absl::StrCat(opcode.c_str(), "_On_", triple.c_str(),
|
||||
features.empty() ? "" : "_With", features.c_str());
|
||||
return absl::StrCat(opcode, "_On_", triple,
|
||||
(features.empty() ? "" : "_With"), features);
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
@ -36,9 +36,8 @@ StatusOr<OwningDeviceMemory> StreamExecutorMemoryAllocator::Allocate(
|
||||
se::DeviceMemoryBase result = stream_executor->AllocateArray<uint8>(size);
|
||||
if (size > 0 && result == nullptr) {
|
||||
return ResourceExhausted(
|
||||
"Failed to allocate request for %s (%lluB) on device ordinal %d",
|
||||
tensorflow::strings::HumanReadableNumBytes(size).c_str(), size,
|
||||
device_ordinal);
|
||||
"Failed to allocate request for %s (%uB) on device ordinal %d",
|
||||
tensorflow::strings::HumanReadableNumBytes(size), size, device_ordinal);
|
||||
}
|
||||
return OwningDeviceMemory(result, device_ordinal, this);
|
||||
}
|
||||
@ -61,12 +60,12 @@ StatusOr<se::StreamExecutor*> StreamExecutorMemoryAllocator::GetStreamExecutor(
|
||||
}
|
||||
if (device_ordinal >= stream_executors_.size()) {
|
||||
return InvalidArgument(
|
||||
"device ordinal value (%d) >= number of devices (%zu)", device_ordinal,
|
||||
"device ordinal value (%d) >= number of devices (%u)", device_ordinal,
|
||||
stream_executors_.size());
|
||||
}
|
||||
if (stream_executors_[device_ordinal] == nullptr) {
|
||||
return NotFound("Device %s:%d present but not supported",
|
||||
platform()->Name().c_str(), device_ordinal);
|
||||
platform()->Name(), device_ordinal);
|
||||
}
|
||||
return stream_executors_[device_ordinal];
|
||||
}
|
||||
|
||||
@ -28,14 +28,14 @@ template <typename HloInstructionPtr>
|
||||
Status DfsHloVisitorBase<HloInstructionPtr>::HandleElementwiseUnary(
|
||||
HloInstructionPtr hlo) {
|
||||
return Unimplemented("DfsHloVisitor::HandleElementwiseUnary: %s",
|
||||
HloOpcodeString(hlo->opcode()).c_str());
|
||||
HloOpcodeString(hlo->opcode()));
|
||||
}
|
||||
|
||||
template <typename HloInstructionPtr>
|
||||
Status DfsHloVisitorBase<HloInstructionPtr>::HandleElementwiseBinary(
|
||||
HloInstructionPtr hlo) {
|
||||
return Unimplemented("DfsHloVisitor::HandleElementwiseBinary: %s",
|
||||
HloOpcodeString(hlo->opcode()).c_str());
|
||||
HloOpcodeString(hlo->opcode()));
|
||||
}
|
||||
|
||||
template <typename HloInstructionPtr>
|
||||
|
||||
@ -264,8 +264,8 @@ StatusOr<llvm::Value*> ElementalIrEmitter::EmitIntegerUnaryOp(
|
||||
}
|
||||
}
|
||||
return Unimplemented("conversion from primitive type %s to %s",
|
||||
PrimitiveType_Name(from_type).c_str(),
|
||||
PrimitiveType_Name(to_type).c_str());
|
||||
PrimitiveType_Name(from_type),
|
||||
PrimitiveType_Name(to_type));
|
||||
}
|
||||
case HloOpcode::kBitcastConvert: {
|
||||
PrimitiveType from_type = op->operand(0)->shape().element_type();
|
||||
@ -282,8 +282,7 @@ StatusOr<llvm::Value*> ElementalIrEmitter::EmitIntegerUnaryOp(
|
||||
return InvalidArgument(
|
||||
"bitcast conversion from primitive type %s to %s with unequal "
|
||||
"bit-widths (%u versus %u) ",
|
||||
PrimitiveType_Name(from_type).c_str(),
|
||||
PrimitiveType_Name(to_type).c_str(),
|
||||
PrimitiveType_Name(from_type), PrimitiveType_Name(to_type),
|
||||
primitive_util::BitWidth(from_type),
|
||||
primitive_util::BitWidth(to_type));
|
||||
}
|
||||
@ -332,7 +331,7 @@ StatusOr<llvm::Value*> ElementalIrEmitter::EmitIntegerUnaryOp(
|
||||
}
|
||||
default:
|
||||
return Unimplemented("unary integer op '%s'",
|
||||
HloOpcodeString(op->opcode()).c_str());
|
||||
HloOpcodeString(op->opcode()));
|
||||
}
|
||||
}
|
||||
|
||||
@ -389,8 +388,8 @@ StatusOr<llvm::Value*> ElementalIrEmitter::EmitFloatUnaryOp(
|
||||
operand_value, llvm_ir::PrimitiveTypeToIrType(to_type, module_));
|
||||
}
|
||||
return Unimplemented("unhandled conversion operation: %s => %s",
|
||||
PrimitiveType_Name(from_type).c_str(),
|
||||
PrimitiveType_Name(to_type).c_str());
|
||||
PrimitiveType_Name(from_type),
|
||||
PrimitiveType_Name(to_type));
|
||||
}
|
||||
case HloOpcode::kBitcastConvert: {
|
||||
PrimitiveType from_type = op->operand(0)->shape().element_type();
|
||||
@ -407,8 +406,7 @@ StatusOr<llvm::Value*> ElementalIrEmitter::EmitFloatUnaryOp(
|
||||
return InvalidArgument(
|
||||
"bitcast conversion from primitive type %s to %s with unequal "
|
||||
"bit-widths (%u versus %u) ",
|
||||
PrimitiveType_Name(from_type).c_str(),
|
||||
PrimitiveType_Name(to_type).c_str(),
|
||||
PrimitiveType_Name(from_type), PrimitiveType_Name(to_type),
|
||||
primitive_util::BitWidth(from_type),
|
||||
primitive_util::BitWidth(to_type));
|
||||
}
|
||||
@ -471,7 +469,7 @@ StatusOr<llvm::Value*> ElementalIrEmitter::EmitFloatUnaryOp(
|
||||
return llvm::ConstantFP::get(operand_value->getType(), 0.0);
|
||||
default:
|
||||
return Unimplemented("unary floating-point op '%s'",
|
||||
HloOpcodeString(op->opcode()).c_str());
|
||||
HloOpcodeString(op->opcode()));
|
||||
}
|
||||
}
|
||||
|
||||
@ -683,7 +681,7 @@ StatusOr<llvm::Value*> ElementalIrEmitter::EmitComplexUnaryOp(
|
||||
return EmitExtractImag(operand_value);
|
||||
default:
|
||||
return Unimplemented("unary complex op '%s'",
|
||||
HloOpcodeString(op->opcode()).c_str());
|
||||
HloOpcodeString(op->opcode()));
|
||||
}
|
||||
}
|
||||
|
||||
@ -755,7 +753,7 @@ StatusOr<llvm::Value*> ElementalIrEmitter::EmitFloatBinaryOp(
|
||||
return EmitAtan2(op->shape().element_type(), lhs_value, rhs_value);
|
||||
default:
|
||||
return Unimplemented("binary floating point op '%s'",
|
||||
HloOpcodeString(op->opcode()).c_str());
|
||||
HloOpcodeString(op->opcode()));
|
||||
}
|
||||
}
|
||||
|
||||
@ -873,7 +871,7 @@ StatusOr<llvm::Value*> ElementalIrEmitter::EmitComplexBinaryOp(
|
||||
}
|
||||
default:
|
||||
return Unimplemented("binary complex op '%s'",
|
||||
HloOpcodeString(op->opcode()).c_str());
|
||||
HloOpcodeString(op->opcode()));
|
||||
}
|
||||
}
|
||||
|
||||
@ -1247,7 +1245,7 @@ StatusOr<llvm::Value*> ElementalIrEmitter::EmitIntegerBinaryOp(
|
||||
/*saturate_to_sign_bit=*/false);
|
||||
default:
|
||||
return Unimplemented("binary integer op '%s'",
|
||||
HloOpcodeString(op->opcode()).c_str());
|
||||
HloOpcodeString(op->opcode()));
|
||||
}
|
||||
}
|
||||
|
||||
@ -1378,7 +1376,7 @@ StatusOr<llvm::Value*> ElementalIrEmitter::ConvertValueForDistribution(
|
||||
default:
|
||||
return InvalidArgument(
|
||||
"unhandled distribution %s",
|
||||
RandomDistribution_Name(hlo->random_distribution()).c_str());
|
||||
RandomDistribution_Name(hlo->random_distribution()));
|
||||
}
|
||||
}
|
||||
|
||||
@ -1610,7 +1608,7 @@ StatusOr<llvm::Value*> ElementalIrEmitter::EmitElementalClamp(
|
||||
max_value, EmitIntegralMax(min_value, arg_value, is_signed), is_signed);
|
||||
} else {
|
||||
return Unimplemented("Clamp unimplemented for %s",
|
||||
PrimitiveType_Name(prim_type).c_str());
|
||||
PrimitiveType_Name(prim_type));
|
||||
}
|
||||
}
|
||||
|
||||
@ -2232,7 +2230,7 @@ llvm_ir::ElementGenerator ElementalIrEmitter::MakeElementGenerator(
|
||||
default:
|
||||
return [hlo](const IrArray::Index& index) {
|
||||
return Unimplemented("Unhandled opcode for elemental IR emission: %s",
|
||||
HloOpcodeString(hlo->opcode()).c_str());
|
||||
HloOpcodeString(hlo->opcode()));
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
@ -16,6 +16,7 @@ limitations under the License.
|
||||
#include "tensorflow/compiler/xla/service/executable.h"
|
||||
|
||||
#include "absl/memory/memory.h"
|
||||
#include "absl/strings/str_format.h"
|
||||
#include "tensorflow/compiler/xla/legacy_flags/debug_options_flags.h"
|
||||
#include "tensorflow/compiler/xla/service/hlo_graph_dumper.h"
|
||||
#include "tensorflow/compiler/xla/status.h"
|
||||
@ -23,7 +24,6 @@ limitations under the License.
|
||||
#include "tensorflow/core/lib/hash/hash.h"
|
||||
#include "tensorflow/core/lib/io/path.h"
|
||||
#include "tensorflow/core/lib/strings/proto_serialization.h"
|
||||
#include "tensorflow/core/lib/strings/stringprintf.h"
|
||||
#include "tensorflow/core/platform/env.h"
|
||||
|
||||
using tensorflow::gtl::ArraySlice;
|
||||
@ -155,9 +155,9 @@ Status Executable::DumpHloSnapshot() {
|
||||
const string& directory_path =
|
||||
module_config().debug_options().xla_dump_executions_to();
|
||||
const auto& module = hlo_snapshot_->hlo().hlo_module();
|
||||
string filename = tensorflow::strings::Printf(
|
||||
"computation_%lld__%s__execution_%lld", module.id(),
|
||||
module.entry_computation_name().c_str(), ++execution_count_);
|
||||
string filename =
|
||||
absl::StrFormat("computation_%d__%s__execution_%d", module.id(),
|
||||
module.entry_computation_name(), ++execution_count_);
|
||||
return Executable::DumpToDirectory(directory_path, filename, *hlo_snapshot_);
|
||||
}
|
||||
|
||||
|
||||
@ -66,7 +66,7 @@ Status ExecutionTracker::Unregister(const ExecutionHandle& handle) {
|
||||
tensorflow::mutex_lock lock(execution_mutex_);
|
||||
auto it = handle_to_execution_.find(handle.handle());
|
||||
if (it == handle_to_execution_.end()) {
|
||||
return NotFound("no execution record for execution handle: %lld",
|
||||
return NotFound("no execution record for execution handle: %d",
|
||||
handle.handle());
|
||||
}
|
||||
handle_to_execution_.erase(handle.handle());
|
||||
@ -78,7 +78,7 @@ StatusOr<const AsyncExecution*> ExecutionTracker::Resolve(
|
||||
tensorflow::mutex_lock lock(execution_mutex_);
|
||||
auto it = handle_to_execution_.find(handle.handle());
|
||||
if (it == handle_to_execution_.end()) {
|
||||
return NotFound("no execution record for execution handle: %lld",
|
||||
return NotFound("no execution record for execution handle: %d",
|
||||
handle.handle());
|
||||
}
|
||||
return it->second.get();
|
||||
|
||||
@ -323,7 +323,7 @@ StatusOr<HloInstruction*> GatherExpander::ExpandGather(
|
||||
return Unimplemented(
|
||||
"Gather operations with more than 2147483647 gather indices are not "
|
||||
"supported. This error occurred for %s.",
|
||||
gather_instr->ToString().c_str());
|
||||
gather_instr->ToString());
|
||||
}
|
||||
|
||||
TF_ASSIGN_OR_RETURN(
|
||||
|
||||
@ -57,6 +57,7 @@ cc_library(
|
||||
"//tensorflow/core:lib",
|
||||
"//tensorflow/core:stream_executor_no_cuda",
|
||||
"@com_google_absl//absl/memory",
|
||||
"@com_google_absl//absl/strings:str_format",
|
||||
],
|
||||
)
|
||||
|
||||
@ -110,6 +111,7 @@ tf_cc_test(
|
||||
"//tensorflow/compiler/xla/tests:xla_internal_test_main",
|
||||
"//tensorflow/core:lib",
|
||||
"@com_google_absl//absl/memory",
|
||||
"@com_google_absl//absl/strings:str_format",
|
||||
],
|
||||
)
|
||||
|
||||
@ -351,6 +353,7 @@ cc_library(
|
||||
"//tensorflow/stream_executor",
|
||||
"@com_google_absl//absl/memory",
|
||||
"@com_google_absl//absl/strings",
|
||||
"@com_google_absl//absl/strings:str_format",
|
||||
"@com_google_absl//absl/types:optional",
|
||||
],
|
||||
)
|
||||
@ -389,6 +392,7 @@ cc_library(
|
||||
"//tensorflow/core:lib",
|
||||
"//tensorflow/core:stream_executor_no_cuda",
|
||||
"@com_google_absl//absl/strings",
|
||||
"@com_google_absl//absl/strings:str_format",
|
||||
"@com_google_absl//absl/types:optional",
|
||||
],
|
||||
)
|
||||
@ -819,6 +823,7 @@ tf_cc_test(
|
||||
"//tensorflow/compiler/xla/tests:hlo_test_base",
|
||||
"//tensorflow/compiler/xla/tests:xla_internal_test_main",
|
||||
"@com_google_absl//absl/memory",
|
||||
"@com_google_absl//absl/strings:str_format",
|
||||
],
|
||||
)
|
||||
|
||||
|
||||
@ -62,7 +62,7 @@ StatusOr<std::unique_ptr<BufferAllocations>> BufferAllocations::Builder::Build(
|
||||
if (reinterpret_cast<uintptr_t>(address.opaque()) % expected_alignment !=
|
||||
0) {
|
||||
return InternalError(
|
||||
"Address of registered buffer %lld must be a multiple of %llx, but "
|
||||
"Address of registered buffer %d must be a multiple of %x, but "
|
||||
"was %p",
|
||||
i, kEntryParameterAlignBytes, address.opaque());
|
||||
}
|
||||
@ -83,7 +83,7 @@ StatusOr<std::unique_ptr<BufferAllocations>> BufferAllocations::Builder::Build(
|
||||
0) {
|
||||
return InternalError(
|
||||
"Address returned by memory_allocator->Allocate must be a "
|
||||
"multiple of %llx, but was %p",
|
||||
"multiple of %x, but was %p",
|
||||
kXlaAllocatedBufferAlignBytes, buffer.opaque());
|
||||
}
|
||||
// We do manual memory management within BufferAllocations. Be sure not
|
||||
|
||||
@ -124,7 +124,7 @@ StatusOr<F16BufferComparator> F16BufferComparator::Create(
|
||||
StatusOr<bool> F16BufferComparator::CompareEqualImpl(
|
||||
se::DeviceMemory<Eigen::half> test_buffer) {
|
||||
if (ref_buffer_.root_buffer().size() != test_buffer.size()) {
|
||||
return InternalError("Mismatched buffer size: %lld vs %lld",
|
||||
return InternalError("Mismatched buffer size: %d vs %d",
|
||||
ref_buffer_.root_buffer().size(), test_buffer.size());
|
||||
}
|
||||
|
||||
|
||||
@ -59,7 +59,7 @@ Status ConditionalThunk::ExecuteOnStream(
|
||||
Status block_status = stream->BlockHostUntilDone();
|
||||
if (!block_status.ok()) {
|
||||
return InternalError("Failed to retrieve predicate value on stream %p: %s.",
|
||||
stream, block_status.error_message().c_str());
|
||||
stream, block_status.error_message());
|
||||
}
|
||||
|
||||
// Execute the true or the false computation depending on the value of the
|
||||
|
||||
@ -22,7 +22,6 @@ limitations under the License.
|
||||
#include "tensorflow/compiler/xla/service/gpu/hlo_execution_profiler.h"
|
||||
#include "tensorflow/compiler/xla/types.h"
|
||||
#include "tensorflow/compiler/xla/util.h"
|
||||
#include "tensorflow/core/lib/strings/stringprintf.h"
|
||||
#include "tensorflow/core/platform/logging.h"
|
||||
#include "tensorflow/core/platform/stream_executor_no_cuda.h"
|
||||
|
||||
|
||||
@ -22,7 +22,6 @@ limitations under the License.
|
||||
#include "tensorflow/compiler/xla/service/gpu/ir_emission_utils.h"
|
||||
#include "tensorflow/compiler/xla/types.h"
|
||||
#include "tensorflow/compiler/xla/util.h"
|
||||
#include "tensorflow/core/lib/strings/stringprintf.h"
|
||||
#include "tensorflow/core/platform/logging.h"
|
||||
#include "tensorflow/core/platform/stream_executor_no_cuda.h"
|
||||
|
||||
|
||||
@ -15,6 +15,7 @@ limitations under the License.
|
||||
|
||||
#include "tensorflow/compiler/xla/service/gpu/cudnn_convolution_algorithm_picker.h"
|
||||
#include "absl/strings/str_cat.h"
|
||||
#include "absl/strings/str_format.h"
|
||||
#include "absl/types/optional.h"
|
||||
#include "tensorflow/compiler/xla/literal_util.h"
|
||||
#include "tensorflow/compiler/xla/service/gpu/backend_configs.pb.h"
|
||||
@ -59,8 +60,8 @@ StatusOr<se::DeviceMemory<uint8>> ScratchAllocator::AllocateBytes(
|
||||
if (byte_size > GetMemoryLimitInBytes(stream)) {
|
||||
return se::port::Status(
|
||||
se::port::error::RESOURCE_EXHAUSTED,
|
||||
tensorflow::strings::Printf(
|
||||
"Allocating %lld bytes exceeds the memory limit of %lld bytes.",
|
||||
absl::StrFormat(
|
||||
"Allocating %d bytes exceeds the memory limit of %d bytes.",
|
||||
byte_size, GetMemoryLimitInBytes(stream)));
|
||||
}
|
||||
|
||||
@ -361,7 +362,7 @@ CudnnConvolutionAlgorithmPicker::PickBestAlgorithm(
|
||||
return InternalError(
|
||||
"All algorithms tried for convolution %s failed. Falling back to "
|
||||
"default algorithm.",
|
||||
instr->ToString().c_str());
|
||||
instr->ToString());
|
||||
}
|
||||
|
||||
StatusOr<bool> CudnnConvolutionAlgorithmPicker::RunOnInstruction(
|
||||
|
||||
@ -197,8 +197,8 @@ Status RunCudnnConvolution(
|
||||
|
||||
if (!stream->ok()) {
|
||||
return InternalError(
|
||||
"Unable to launch convolution with type %s and algorithm (%lld, %lld)",
|
||||
CudnnConvKindToString(kind).c_str(), algorithm.algorithm().algo_id(),
|
||||
"Unable to launch convolution with type %s and algorithm (%d, %d)",
|
||||
CudnnConvKindToString(kind), algorithm.algorithm().algo_id(),
|
||||
algorithm.algorithm_no_scratch().algo_id());
|
||||
}
|
||||
return Status::OK();
|
||||
|
||||
@ -107,7 +107,7 @@ StatusOr<llvm::Value*> GpuElementalIrEmitter::EmitLibdeviceMathCall(
|
||||
break;
|
||||
default:
|
||||
return Unimplemented("Bad type for libdevice math call: %s",
|
||||
PrimitiveType_Name(output_type).c_str());
|
||||
PrimitiveType_Name(output_type));
|
||||
}
|
||||
llvm::Value* result = EmitMathCall(munged_callee, converted_operands,
|
||||
converted_input_types, output_type)
|
||||
@ -138,7 +138,7 @@ StatusOr<llvm::Value*> GpuElementalIrEmitter::EmitLlvmIntrinsicMathCall(
|
||||
break;
|
||||
default:
|
||||
return Unimplemented("Bad type for llvm intrinsic math call: %s",
|
||||
PrimitiveType_Name(output_type).c_str());
|
||||
PrimitiveType_Name(output_type));
|
||||
}
|
||||
return EmitMathCall(munged_callee, operands, input_types, output_type);
|
||||
}
|
||||
@ -152,8 +152,8 @@ StatusOr<llvm::Value*> GpuElementalIrEmitter::EmitMathCall(
|
||||
for (PrimitiveType input_type : input_types) {
|
||||
if (output_type != input_type) {
|
||||
return Unimplemented("Input type ≠ output type: %s ≠ %s",
|
||||
PrimitiveType_Name(input_type).c_str(),
|
||||
PrimitiveType_Name(output_type).c_str());
|
||||
PrimitiveType_Name(input_type),
|
||||
PrimitiveType_Name(output_type));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -18,10 +18,10 @@ limitations under the License.
|
||||
#include <string>
|
||||
|
||||
#include "absl/strings/str_cat.h"
|
||||
#include "absl/strings/str_format.h"
|
||||
#include "tensorflow/compiler/xla/service/gpu/hlo_execution_profiler.h"
|
||||
#include "tensorflow/compiler/xla/types.h"
|
||||
#include "tensorflow/compiler/xla/util.h"
|
||||
#include "tensorflow/core/lib/strings/stringprintf.h"
|
||||
#include "tensorflow/core/platform/logging.h"
|
||||
#include "tensorflow/core/platform/stream_executor_no_cuda.h"
|
||||
|
||||
@ -43,8 +43,8 @@ StatusOr<se::DeviceMemory<uint8>> FftScratchAllocator::AllocateBytes(
|
||||
if (byte_size > GetMemoryLimitInBytes(stream)) {
|
||||
return se::port::Status(
|
||||
se::port::error::RESOURCE_EXHAUSTED,
|
||||
tensorflow::strings::Printf(
|
||||
"Allocating %lld bytes exceeds the memory limit of %lld bytes.",
|
||||
absl::StrFormat(
|
||||
"Allocating %d bytes exceeds the memory limit of %d bytes.",
|
||||
byte_size, GetMemoryLimitInBytes(stream)));
|
||||
}
|
||||
|
||||
@ -213,7 +213,7 @@ Status FftThunk::ExecuteOnStream(const BufferAllocations& buffer_allocations,
|
||||
return Status::OK();
|
||||
}
|
||||
return InternalError("Unable to launch fft for thunk %p with type %s", this,
|
||||
FftTypeToString(fft_type_).c_str());
|
||||
FftTypeToString(fft_type_));
|
||||
}
|
||||
|
||||
} // namespace gpu
|
||||
|
||||
@ -186,7 +186,7 @@ StatusOr<se::blas::AlgorithmType> DoGemmAutotune(
|
||||
}
|
||||
|
||||
return InternalError(
|
||||
"Unable to autotune cuBLAS gemm on stream %p; none of the %zu algorithms "
|
||||
"Unable to autotune cuBLAS gemm on stream %p; none of the %u algorithms "
|
||||
"ran successfully",
|
||||
stream, algorithms.size());
|
||||
}
|
||||
|
||||
@ -160,7 +160,7 @@ Status GpuExecutable::ExecuteThunks(
|
||||
if (!block_status.ok()) {
|
||||
return InternalError(
|
||||
"Failed to complete all kernels launched on stream %p: %s",
|
||||
main_stream, block_status.error_message().c_str());
|
||||
main_stream, block_status.error_message());
|
||||
}
|
||||
}
|
||||
|
||||
@ -260,10 +260,9 @@ StatusOr<ScopedShapedBuffer> GpuExecutable::ExecuteOnStream(
|
||||
if (buffer.is_null() && buffer.size() > 0) {
|
||||
return FailedPrecondition(
|
||||
"Cannot run XLA computation because pointer to (sub-)buffer at "
|
||||
"index %s of parameter %lld was null. All pointers to "
|
||||
"(sub-)buffers must not be null, unless the (sub-)buffer has zero "
|
||||
"elements.",
|
||||
allocation.param_shape_index().ToString().c_str(), param_no);
|
||||
"index %s of parameter %d was null. All pointers to (sub-)buffers "
|
||||
"must not be null, unless the (sub-)buffer has zero elements.",
|
||||
allocation.param_shape_index().ToString(), param_no);
|
||||
}
|
||||
|
||||
buffer_allocations_builder.RegisterBuffer(i, buffer);
|
||||
|
||||
@ -34,9 +34,8 @@ StatusOr<bool> GpuHloSupportChecker::Run(HloModule* module) {
|
||||
return xla::Unimplemented(
|
||||
"GPU backend does not support HLO instruction %s with shape "
|
||||
"containing a sparse layout: %s",
|
||||
instruction->ToString().c_str(),
|
||||
ShapeUtil::HumanStringWithLayout(instruction->shape())
|
||||
.c_str());
|
||||
instruction->ToString(),
|
||||
ShapeUtil::HumanStringWithLayout(instruction->shape()));
|
||||
}
|
||||
return Status::OK();
|
||||
}));
|
||||
|
||||
@ -84,7 +84,7 @@ Status GpuTransferManager::EnqueueBuffersToInfeed(
|
||||
Status block_status = stream->BlockHostUntilDone();
|
||||
if (!block_status.ok()) {
|
||||
return InternalError("Failed to complete data transfer on stream %p: %s",
|
||||
stream, block_status.error_message().c_str());
|
||||
stream, block_status.error_message());
|
||||
}
|
||||
|
||||
infeed_manager->EnqueueDestination(std::move(buffers));
|
||||
@ -97,7 +97,7 @@ Status GpuTransferManager::EnqueueBuffersToInfeed(
|
||||
StatusOr<InfeedBuffer> GpuTransferManager::TransferBufferToInfeedInternal(
|
||||
se::StreamExecutor* executor, int64 size, const void* source) {
|
||||
if (size > std::numeric_limits<int32>::max()) {
|
||||
return InvalidArgument("Infeed shape is too large: needs %lld bytes", size);
|
||||
return InvalidArgument("Infeed shape is too large: needs %d bytes", size);
|
||||
}
|
||||
|
||||
if (size == 0) {
|
||||
|
||||
@ -19,6 +19,7 @@ limitations under the License.
|
||||
#include <unordered_set>
|
||||
|
||||
#include "absl/memory/memory.h"
|
||||
#include "absl/strings/str_format.h"
|
||||
#include "tensorflow/compiler/xla/service/gpu/stream_assignment.h"
|
||||
#include "tensorflow/compiler/xla/service/hlo_computation.h"
|
||||
#include "tensorflow/compiler/xla/service/hlo_instruction.h"
|
||||
@ -266,7 +267,7 @@ TEST_F(HloScheduleTest, LatticeMatMul) {
|
||||
params.reserve(6);
|
||||
for (int i = 0; i < 6; ++i) {
|
||||
params.push_back(builder.AddInstruction(HloInstruction::CreateParameter(
|
||||
i, f32_2x2_, /*name=*/tensorflow::strings::Printf("param%d", i))));
|
||||
i, f32_2x2_, /*name=*/absl::StrFormat("param%d", i))));
|
||||
}
|
||||
HloInstruction* d00 = builder.AddInstruction(
|
||||
HloInstruction::CreateCanonicalDot(f32_2x2_, params[2], params[3]));
|
||||
|
||||
@ -96,7 +96,7 @@ Status InfeedThunk::ExecuteOnStream(const BufferAllocations& buffer_allocations,
|
||||
Status block_status = stream->BlockHostUntilDone();
|
||||
if (!block_status.ok()) {
|
||||
return InternalError("Failed to complete data transfer on stream %p: %s",
|
||||
stream, block_status.error_message().c_str());
|
||||
stream, block_status.error_message());
|
||||
}
|
||||
|
||||
VLOG(2) << "Infeeding to GPU complete";
|
||||
|
||||
@ -365,7 +365,7 @@ static StatusOr<const HloInstruction*> FindHloInstruction(
|
||||
}
|
||||
return NotFound(
|
||||
"Computation '%s' does not contain an instruction with op code '%s'.",
|
||||
computation.name().c_str(), HloOpcodeString(op).c_str());
|
||||
computation.name(), HloOpcodeString(op));
|
||||
}
|
||||
|
||||
TEST_F(InstructionFusionTest, MultiOutputFusion) {
|
||||
|
||||
@ -384,8 +384,8 @@ Status IrEmitter::EmitAtomicOperationForNestedComputation(
|
||||
// TODO(b/30258929): We only accept binary computations so far.
|
||||
return Unimplemented(
|
||||
"We only support atomic functions with exactly two parameters, but "
|
||||
"computation %s has %lld.",
|
||||
computation.name().c_str(), computation.num_parameters());
|
||||
"computation %s has %d.",
|
||||
computation.name(), computation.num_parameters());
|
||||
}
|
||||
|
||||
if (MaybeEmitDirectAtomicOperation(computation, output_address,
|
||||
|
||||
@ -2674,8 +2674,7 @@ Status CheckHloBuffersShareAllocation(
|
||||
if (slice_a != slice_b) {
|
||||
return InternalError(
|
||||
"instruction %s %s does not share allocation with instruction %s %s",
|
||||
a->ToString().c_str(), slice_a.ToString().c_str(),
|
||||
b->ToString().c_str(), slice_b.ToString().c_str());
|
||||
a->ToString(), slice_a.ToString(), b->ToString(), slice_b.ToString());
|
||||
}
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
@ -63,7 +63,7 @@ Status KernelThunk::Initialize(const GpuExecutable& executable,
|
||||
if (kernel_cache_.end() == it) {
|
||||
it = kernel_cache_.emplace(executor, se::KernelBase(executor)).first;
|
||||
if (!executor->GetKernel(*loader_spec_, &it->second)) {
|
||||
return InternalError("Unable to load kernel %s", kernel_name_.c_str());
|
||||
return InternalError("Unable to load kernel %s", kernel_name_);
|
||||
}
|
||||
}
|
||||
|
||||
@ -107,7 +107,7 @@ Status KernelThunk::ExecuteOnStream(const BufferAllocations& buffer_allocations,
|
||||
stream, se::ThreadDim(launch_dimensions.threads_per_block()),
|
||||
se::BlockDim(launch_dimensions.block_count()), *kernel,
|
||||
*kernel_args)) {
|
||||
return InternalError("Unable to launch kernel %s", kernel_name_.c_str());
|
||||
return InternalError("Unable to launch kernel %s", kernel_name_);
|
||||
}
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
@ -36,6 +36,7 @@ cc_library(
|
||||
"//tensorflow/core:lib_internal",
|
||||
"@com_google_absl//absl/memory",
|
||||
"@com_google_absl//absl/strings",
|
||||
"@com_google_absl//absl/strings:str_format",
|
||||
"@llvm//:amdgpu_code_gen",
|
||||
"@llvm//:analysis",
|
||||
"@llvm//:bit_reader",
|
||||
|
||||
@ -15,6 +15,7 @@ limitations under the License.
|
||||
|
||||
#include "tensorflow/compiler/xla/service/gpu/llvm_gpu_backend/dump_ir_pass.h"
|
||||
|
||||
#include "absl/strings/str_format.h"
|
||||
#include "absl/strings/string_view.h"
|
||||
#include "llvm/IR/Module.h"
|
||||
#include "llvm/Support/FileSystem.h"
|
||||
@ -22,7 +23,6 @@ limitations under the License.
|
||||
#include "tensorflow/compiler/xla/service/gpu/llvm_gpu_backend/utils.h"
|
||||
#include "tensorflow/compiler/xla/types.h"
|
||||
#include "tensorflow/core/lib/io/path.h"
|
||||
#include "tensorflow/core/lib/strings/stringprintf.h"
|
||||
#include "tensorflow/core/platform/logging.h"
|
||||
|
||||
namespace xla {
|
||||
@ -87,9 +87,10 @@ void IrDumpingPassManager::run(llvm::Module &module) {
|
||||
llvm::PassRegistry::getPassRegistry()->getPassInfo(P->getPassID());
|
||||
const string basename = ReplaceFilenameExtension(
|
||||
absl::string_view(tensorflow::io::Basename(input_filename_)),
|
||||
tensorflow::strings::Printf(
|
||||
absl::StrFormat(
|
||||
"pass-%02d.before.%s.ll", i,
|
||||
(PI == nullptr ? "unknown" : PI->getPassArgument().data())));
|
||||
absl::string_view(PI == nullptr ? "unknown"
|
||||
: PI->getPassArgument().data())));
|
||||
llvm::legacy::PassManager::add(
|
||||
new DumpIrPass(tensorflow::io::JoinPath(output_dir_, basename)));
|
||||
}
|
||||
|
||||
@ -57,7 +57,6 @@ limitations under the License.
|
||||
#include "llvm/Transforms/Scalar.h"
|
||||
#include "tensorflow/compiler/xla/types.h"
|
||||
#include "tensorflow/core/lib/io/path.h"
|
||||
#include "tensorflow/core/lib/strings/stringprintf.h"
|
||||
#include "tensorflow/core/platform/env.h"
|
||||
#include "tensorflow/core/platform/logging.h"
|
||||
#include "tensorflow/core/platform/tracing.h"
|
||||
|
||||
@ -96,7 +96,7 @@ Status OutfeedThunk::ExecuteOnStream(
|
||||
Status block_status = stream->BlockHostUntilDone();
|
||||
if (!block_status.ok()) {
|
||||
return InternalError("Failed to complete data transfer on stream %p: %s",
|
||||
stream, block_status.error_message().c_str());
|
||||
stream, block_status.error_message());
|
||||
}
|
||||
|
||||
VLOG(2) << "Outfeeding from GPU complete";
|
||||
|
||||
@ -19,6 +19,7 @@ limitations under the License.
|
||||
#include <string>
|
||||
|
||||
#include "absl/memory/memory.h"
|
||||
#include "absl/strings/str_format.h"
|
||||
#include "tensorflow/compiler/xla/map_util.h"
|
||||
#include "tensorflow/compiler/xla/service/hlo_computation.h"
|
||||
#include "tensorflow/compiler/xla/service/hlo_opcode.h"
|
||||
@ -26,7 +27,6 @@ limitations under the License.
|
||||
#include "tensorflow/compiler/xla/types.h"
|
||||
#include "tensorflow/compiler/xla/util.h"
|
||||
#include "tensorflow/core/lib/core/bits.h"
|
||||
#include "tensorflow/core/lib/strings/stringprintf.h"
|
||||
#include "tensorflow/core/platform/logging.h"
|
||||
|
||||
namespace xla {
|
||||
@ -34,9 +34,8 @@ namespace gpu {
|
||||
|
||||
std::ostream& operator<<(std::ostream& out,
|
||||
const LaunchDimensions& launch_dims) {
|
||||
out << tensorflow::strings::Printf("[block: %lld, thread: %lld]",
|
||||
launch_dims.block_count(),
|
||||
launch_dims.threads_per_block());
|
||||
out << absl::StrFormat("[block: %d, thread: %d]", launch_dims.block_count(),
|
||||
launch_dims.threads_per_block());
|
||||
return out;
|
||||
}
|
||||
|
||||
@ -91,9 +90,9 @@ LaunchDimensions CalculateLaunchDimensions(
|
||||
}
|
||||
|
||||
int64 block_count = CeilOfRatio(num_elements, threads_per_block);
|
||||
VLOG(2) << tensorflow::strings::Printf(
|
||||
VLOG(2) << absl::StrFormat(
|
||||
"Initialized the block count to ceil(# of elements / threads per "
|
||||
"block) = ceil(%lld/%lld) = %lld",
|
||||
"block) = ceil(%d/%d) = %d",
|
||||
num_elements, threads_per_block, block_count);
|
||||
|
||||
return LaunchDimensions(block_count, threads_per_block);
|
||||
|
||||
@ -16,13 +16,13 @@ limitations under the License.
|
||||
#include "tensorflow/compiler/xla/service/gpu/stream_assignment.h"
|
||||
|
||||
#include "absl/memory/memory.h"
|
||||
#include "absl/strings/str_format.h"
|
||||
#include "tensorflow/compiler/xla/service/hlo_computation.h"
|
||||
#include "tensorflow/compiler/xla/service/hlo_instruction.h"
|
||||
#include "tensorflow/compiler/xla/service/hlo_opcode.h"
|
||||
#include "tensorflow/compiler/xla/test_helpers.h"
|
||||
#include "tensorflow/compiler/xla/tests/hlo_test_base.h"
|
||||
#include "tensorflow/compiler/xla/types.h"
|
||||
#include "tensorflow/core/lib/strings/stringprintf.h"
|
||||
|
||||
namespace xla {
|
||||
namespace gpu {
|
||||
@ -98,7 +98,7 @@ TEST_F(StreamAssignmentTest, LatticeMatMul) {
|
||||
params.reserve(6);
|
||||
for (int i = 0; i < 6; ++i) {
|
||||
params.push_back(builder.AddInstruction(HloInstruction::CreateParameter(
|
||||
i, f32_2x2_, /*name=*/tensorflow::strings::Printf("param%d", i))));
|
||||
i, f32_2x2_, /*name=*/absl::StrFormat("param%d", i))));
|
||||
}
|
||||
HloInstruction* d00 = builder.AddInstruction(
|
||||
HloInstruction::CreateCanonicalDot(f32_2x2_, params[2], params[3]));
|
||||
|
||||
@ -70,7 +70,7 @@ Status WhileThunk::ExecuteOnStream(const BufferAllocations& buffer_allocations,
|
||||
if (!block_status.ok()) {
|
||||
return InternalError(
|
||||
"Failed to complete all kernels launched on stream %p: %s", stream,
|
||||
block_status.error_message().c_str());
|
||||
block_status.error_message());
|
||||
}
|
||||
|
||||
if (!condition_result) {
|
||||
|
||||
@ -625,16 +625,15 @@ StatusOr<HloInstruction*> HloComputation::DeepCopyInstruction(
|
||||
if (instruction->parent() != this) {
|
||||
return FailedPrecondition(
|
||||
"Can't deep copy instruction %s: instruction is not in computation %s",
|
||||
instruction->name().c_str(), name().c_str());
|
||||
instruction->name(), name());
|
||||
}
|
||||
if (indices_to_copy != nullptr &&
|
||||
!ShapeUtil::Compatible(instruction->shape(), indices_to_copy->shape())) {
|
||||
return FailedPrecondition(
|
||||
"Can't deep copy instruction %s: given shape tree of indices to copy "
|
||||
"has incompatible shapes: %s vs. %s",
|
||||
instruction->name().c_str(),
|
||||
ShapeUtil::HumanString(instruction->shape()).c_str(),
|
||||
ShapeUtil::HumanString(indices_to_copy->shape()).c_str());
|
||||
instruction->name(), ShapeUtil::HumanString(instruction->shape()),
|
||||
ShapeUtil::HumanString(indices_to_copy->shape()));
|
||||
}
|
||||
|
||||
ShapeIndex index;
|
||||
@ -664,7 +663,7 @@ StatusOr<HloInstruction*> HloComputation::DeepCopyInstructionWithCustomCopier(
|
||||
if (instruction->parent() != this) {
|
||||
return FailedPrecondition(
|
||||
"Can't deep copy instruction %s: instruction is not in computation %s",
|
||||
instruction->name().c_str(), name().c_str());
|
||||
instruction->name(), name());
|
||||
}
|
||||
ShapeIndex index;
|
||||
return DeepCopyHelper(instruction, &index, copy_leaf);
|
||||
|
||||
@ -837,7 +837,7 @@ Status HloDataflowAnalysis::InitializeInstructionValueSets() {
|
||||
return Unimplemented(
|
||||
"Computation %s is called in both a parallel (eg, kMap) and "
|
||||
"sequential (eg, kCall) context",
|
||||
computation->name().c_str());
|
||||
computation->name());
|
||||
}
|
||||
if (call_graph_node.caller_callsites().empty() ||
|
||||
call_graph_node.context() == CallContext::kParallel) {
|
||||
|
||||
@ -435,7 +435,7 @@ Status HloEvaluator::HandleIsFinite(HloInstruction* is_finite) {
|
||||
if (!ShapeUtil::ElementIsFloating(operand->shape())) {
|
||||
return InvalidArgument(
|
||||
"expected element type in shape to be float for IsFinite op, got: %s",
|
||||
PrimitiveType_Name(operand->shape().element_type()).c_str());
|
||||
PrimitiveType_Name(operand->shape().element_type()));
|
||||
}
|
||||
|
||||
switch (operand->shape().element_type()) {
|
||||
@ -476,9 +476,9 @@ Status HloEvaluator::HandleCompare(HloInstruction* compare) {
|
||||
return Unimplemented(
|
||||
"Implicit broadcasting is currently unsupported in HLO evaluator "
|
||||
"Shape Mismatch: %s vs %s vs %s",
|
||||
ShapeUtil::HumanString(compare->shape()).c_str(),
|
||||
ShapeUtil::HumanString(lhs->shape()).c_str(),
|
||||
ShapeUtil::HumanString(rhs->shape()).c_str());
|
||||
ShapeUtil::HumanString(compare->shape()),
|
||||
ShapeUtil::HumanString(lhs->shape()),
|
||||
ShapeUtil::HumanString(rhs->shape()));
|
||||
}
|
||||
|
||||
TF_RET_CHECK(lhs->shape().element_type() == rhs->shape().element_type());
|
||||
@ -1105,8 +1105,8 @@ Status HloEvaluator::HandleWhile(HloInstruction* while_hlo) {
|
||||
HloEvaluator loop_body_evaluator(max_loop_iterations_);
|
||||
while (keep_going) {
|
||||
if (max_loop_iterations_ >= 0 && iteration_count++ > max_loop_iterations_) {
|
||||
return InvalidArgument("Loop %s exceeded loop iteration limit (%lld).",
|
||||
while_hlo->name().c_str(), max_loop_iterations_);
|
||||
return InvalidArgument("Loop %s exceeded loop iteration limit (%d).",
|
||||
while_hlo->name(), max_loop_iterations_);
|
||||
}
|
||||
TF_ASSIGN_OR_RETURN(auto cond_val, cond_evaluator.Evaluate<Literal*>(
|
||||
*cond_comp, {lcv.get()}));
|
||||
@ -1262,7 +1262,7 @@ Status HloEvaluator::HandleSort(HloInstruction* sort) {
|
||||
const int64 rank = ShapeUtil::Rank(sort->operand(0)->shape());
|
||||
if (sort_dim != rank - 1) {
|
||||
return Unimplemented(
|
||||
"Trying to support along dimension %lld, which is not the last "
|
||||
"Trying to support along dimension %d, which is not the last "
|
||||
"dimension",
|
||||
sort_dim);
|
||||
}
|
||||
|
||||
@ -222,8 +222,8 @@ class HloEvaluator : public DfsHloVisitorWithDefault {
|
||||
return Unimplemented(
|
||||
"Implicit broadcasting is currently unsupported in HLO evaluator "
|
||||
"Shape Mismatch: %s vs %s",
|
||||
ShapeUtil::HumanString(shape).c_str(),
|
||||
ShapeUtil::HumanString(operand->shape()).c_str());
|
||||
ShapeUtil::HumanString(shape),
|
||||
ShapeUtil::HumanString(operand->shape()));
|
||||
}
|
||||
|
||||
auto result = absl::make_unique<Literal>(shape);
|
||||
|
||||
@ -143,7 +143,7 @@ class HloEvaluatorTypedVisitor : public DfsHloVisitorWithDefault {
|
||||
|
||||
Status DefaultAction(HloInstruction* hlo_instruction) override {
|
||||
return Unimplemented("unhandled HLO ops for HloEvaluator: %s.",
|
||||
HloOpcodeString(hlo_instruction->opcode()).c_str());
|
||||
HloOpcodeString(hlo_instruction->opcode()));
|
||||
}
|
||||
|
||||
// TODO(b/35950897): many of the stl functions used in the handlers are not
|
||||
@ -2654,9 +2654,8 @@ class HloEvaluatorTypedVisitor : public DfsHloVisitorWithDefault {
|
||||
return Unimplemented(
|
||||
"Implicit broadcasting is currently unsupported in HLO evaluator "
|
||||
"Shape Mismatch: %s vs %s vs %s: ",
|
||||
ShapeUtil::HumanString(shape).c_str(),
|
||||
ShapeUtil::HumanString(lhs->shape()).c_str(),
|
||||
ShapeUtil::HumanString(rhs->shape()).c_str());
|
||||
ShapeUtil::HumanString(shape), ShapeUtil::HumanString(lhs->shape()),
|
||||
ShapeUtil::HumanString(rhs->shape()));
|
||||
}
|
||||
|
||||
const Literal& lhs_literal = parent_->GetEvaluatedLiteralFor(lhs);
|
||||
@ -2690,10 +2689,9 @@ class HloEvaluatorTypedVisitor : public DfsHloVisitorWithDefault {
|
||||
return Unimplemented(
|
||||
"Implicit broadcasting is currently unsupported in HLO evaluator "
|
||||
"Shape Mismatch: %s vs %s vs %s vs %s: ",
|
||||
ShapeUtil::HumanString(shape).c_str(),
|
||||
ShapeUtil::HumanString(lhs->shape()).c_str(),
|
||||
ShapeUtil::HumanString(rhs->shape()).c_str(),
|
||||
ShapeUtil::HumanString(ehs->shape()).c_str());
|
||||
ShapeUtil::HumanString(shape), ShapeUtil::HumanString(lhs->shape()),
|
||||
ShapeUtil::HumanString(rhs->shape()),
|
||||
ShapeUtil::HumanString(ehs->shape()));
|
||||
}
|
||||
|
||||
const Literal& lhs_literal = parent_->GetEvaluatedLiteralFor(lhs);
|
||||
|
||||
@ -28,6 +28,7 @@ limitations under the License.
|
||||
|
||||
#include "absl/strings/match.h"
|
||||
#include "absl/strings/str_cat.h"
|
||||
#include "absl/strings/str_format.h"
|
||||
#include "absl/strings/str_join.h"
|
||||
#include "absl/strings/str_replace.h"
|
||||
#include "absl/types/optional.h"
|
||||
@ -44,7 +45,6 @@ limitations under the License.
|
||||
#include "tensorflow/core/lib/gtl/map_util.h"
|
||||
#include "tensorflow/core/lib/io/path.h"
|
||||
#include "tensorflow/core/lib/strings/numbers.h"
|
||||
#include "tensorflow/core/lib/strings/stringprintf.h"
|
||||
#include "tensorflow/core/platform/env.h"
|
||||
#include "tensorflow/core/platform/protobuf.h"
|
||||
#include "tensorflow/core/platform/regexp.h"
|
||||
@ -57,32 +57,12 @@ using absl::nullopt;
|
||||
using absl::optional;
|
||||
using absl::StrAppend;
|
||||
using absl::StrCat;
|
||||
using absl::StrFormat;
|
||||
using absl::StrJoin;
|
||||
using tensorflow::Env;
|
||||
using tensorflow::WriteStringToFile;
|
||||
using tensorflow::io::JoinPath;
|
||||
|
||||
// Helpers for Printf and Appendf.
|
||||
template <typename T>
|
||||
struct PrintfConvert {
|
||||
const T& operator()(const T& t) const { return t; }
|
||||
};
|
||||
template <>
|
||||
struct PrintfConvert<string> {
|
||||
const char* operator()(const string& s) const { return s.c_str(); }
|
||||
};
|
||||
|
||||
// Like tensorflow::strings::Printf/Appendf, but you don't need to call c_str()
|
||||
// on strings.
|
||||
template <typename... Ts>
|
||||
string Printf(const char* fmt, const Ts&... ts) {
|
||||
return tensorflow::strings::Printf(fmt, PrintfConvert<Ts>()(ts)...);
|
||||
}
|
||||
template <typename... Ts>
|
||||
void Appendf(string* s, const char* fmt, const Ts&... ts) {
|
||||
tensorflow::strings::Appendf(s, fmt, PrintfConvert<Ts>()(ts)...);
|
||||
}
|
||||
|
||||
// Used to indicate how we should treat a given HLOInstruction in the graph.
|
||||
// should we treat it like normal, hide it, and so on?
|
||||
enum NodeFilterResult {
|
||||
@ -210,10 +190,9 @@ NodeColors NodeColorsForScheme(ColorScheme color) {
|
||||
string NodeColorAttributes(ColorScheme color) {
|
||||
NodeColors node_colors = NodeColorsForScheme(color);
|
||||
|
||||
return Printf(
|
||||
R"(style="%s", fontcolor="%s", color="%s", fillcolor="%s")",
|
||||
node_colors.style, node_colors.font_color, node_colors.stroke_color,
|
||||
node_colors.fill_color);
|
||||
return StrFormat(R"(style="%s", fontcolor="%s", color="%s", fillcolor="%s")",
|
||||
node_colors.style, node_colors.font_color,
|
||||
node_colors.stroke_color, node_colors.fill_color);
|
||||
}
|
||||
|
||||
// Replaces <> with <>, so that this string is safe(er) for use in a
|
||||
@ -448,7 +427,7 @@ string HloDotDumper::Dump() {
|
||||
}
|
||||
|
||||
string HloDotDumper::Header() {
|
||||
const char* fmt = R"(digraph G {
|
||||
constexpr char fmt[] = R"(digraph G {
|
||||
rankdir = TB;
|
||||
compound = true;
|
||||
label = <<b>%s</b>>;
|
||||
@ -481,8 +460,8 @@ stylesheet=<
|
||||
}
|
||||
if (profile_ != nullptr) {
|
||||
auto cycles = profile_->total_cycles_executed(*computation_);
|
||||
Appendf(&graph_label, "<br/>total cycles = %lld (%s)", cycles,
|
||||
tensorflow::strings::HumanReadableNum(cycles));
|
||||
absl::StrAppendFormat(&graph_label, "<br/>total cycles = %d (%s)", cycles,
|
||||
tensorflow::strings::HumanReadableNum(cycles));
|
||||
}
|
||||
|
||||
// Create CSS rules that say, when you hover over the given node or cluster,
|
||||
@ -509,14 +488,14 @@ stylesheet=<
|
||||
// One could imagine other ways of writing this CSS rule that involve
|
||||
// less duplication, but this way seems to be relatively performant.
|
||||
edge_css_rules.push_back(
|
||||
Printf(" #%s%d:hover ~ #edge%lld text { fill: %s; }\n"
|
||||
" #%s%d:hover ~ #edge%lld path { "
|
||||
"stroke: %s; stroke-width: .2em; }\n"
|
||||
" #%s%d:hover ~ #edge%lld polygon { "
|
||||
"fill: %s; stroke: %s; stroke-width: .2em; }\n",
|
||||
elem_type, elem_id, edge_id, color, //
|
||||
elem_type, elem_id, edge_id, color, //
|
||||
elem_type, elem_id, edge_id, color, color));
|
||||
StrFormat(" #%s%d:hover ~ #edge%d text { fill: %s; }\n"
|
||||
" #%s%d:hover ~ #edge%d path { "
|
||||
"stroke: %s; stroke-width: .2em; }\n"
|
||||
" #%s%d:hover ~ #edge%d polygon { "
|
||||
"fill: %s; stroke: %s; stroke-width: .2em; }\n",
|
||||
elem_type, elem_id, edge_id, color, //
|
||||
elem_type, elem_id, edge_id, color, //
|
||||
elem_type, elem_id, edge_id, color, color));
|
||||
};
|
||||
|
||||
// The "to_node" value may be a NULL, indicating that this points to the
|
||||
@ -559,7 +538,7 @@ stylesheet=<
|
||||
}
|
||||
}
|
||||
|
||||
return Printf(fmt, graph_label, StrJoin(edge_css_rules, "\n"));
|
||||
return StrFormat(fmt, graph_label, StrJoin(edge_css_rules, "\n"));
|
||||
}
|
||||
|
||||
string HloDotDumper::Footer() { return StrCat(StrJoin(edges_, "\n"), "\n}"); }
|
||||
@ -600,9 +579,9 @@ string HloDotDumper::DumpSubcomputation(const HloComputation* subcomp,
|
||||
VLOG(2) << "Edge: from " << from->name() << " to " << parent_instr->name()
|
||||
<< " as " << next_edge_id_;
|
||||
edge_ids_.insert({{from, parent_instr}, next_edge_id_++});
|
||||
const char* edge_fmt =
|
||||
constexpr char edge_fmt[] =
|
||||
R"(%s -> %s [ltail="%s", style="dashed" tooltip="%s -> %s"];)";
|
||||
edges_.push_back(Printf(
|
||||
edges_.push_back(StrFormat(
|
||||
edge_fmt, InstructionId(from), InstructionId(parent_instr),
|
||||
SubcomputationId(subcomp), subcomp->name(), parent_instr->name()));
|
||||
}
|
||||
@ -619,9 +598,10 @@ string HloDotDumper::DumpSubcomputation(const HloComputation* subcomp,
|
||||
|
||||
string subcomp_label, style;
|
||||
if (parent_instr->opcode() == HloOpcode::kFusion) {
|
||||
subcomp_label = Printf("Fused expression for <b>%s</b><br/>%s",
|
||||
HtmlLikeStringSanitize(parent_instr->name()),
|
||||
HtmlLikeStringSanitize(parent_instr->ToCategory()));
|
||||
subcomp_label =
|
||||
StrFormat("Fused expression for <b>%s</b><br/>%s",
|
||||
HtmlLikeStringSanitize(parent_instr->name()),
|
||||
HtmlLikeStringSanitize(parent_instr->ToCategory()));
|
||||
string extra_info = GetInstructionNodeExtraInfo(parent_instr);
|
||||
if (!extra_info.empty()) {
|
||||
StrAppend(&subcomp_label, "<br/>", extra_info);
|
||||
@ -647,18 +627,18 @@ string HloDotDumper::DumpSubcomputation(const HloComputation* subcomp,
|
||||
strokecolor = highlight ? "#b71c1c" : "#c2c2c2";
|
||||
}
|
||||
style =
|
||||
Printf(R"(style="rounded,filled,bold"; fillcolor="%s"; color="%s;")",
|
||||
fillcolor, strokecolor);
|
||||
StrFormat(R"(style="rounded,filled,bold"; fillcolor="%s"; color="%s;")",
|
||||
fillcolor, strokecolor);
|
||||
} else {
|
||||
subcomp_label = Printf("Subcomputation for <b>%s</b><br/>%s",
|
||||
HtmlLikeStringSanitize(parent_instr->name()),
|
||||
HtmlLikeStringSanitize(subcomp->name()));
|
||||
subcomp_label = StrFormat("Subcomputation for <b>%s</b><br/>%s",
|
||||
HtmlLikeStringSanitize(parent_instr->name()),
|
||||
HtmlLikeStringSanitize(subcomp->name()));
|
||||
style = "style=rounded; color=black;";
|
||||
}
|
||||
|
||||
string comp_body = DumpComputation(subcomp);
|
||||
|
||||
const char* computation_fmt = R"(subgraph %s {
|
||||
constexpr char computation_fmt[] = R"(subgraph %s {
|
||||
%s
|
||||
label = <%s>;
|
||||
labelloc = t;
|
||||
@ -667,7 +647,7 @@ tooltip = " ";
|
||||
} // %s
|
||||
|
||||
)";
|
||||
return Printf(computation_fmt, id, style, subcomp_label, comp_body, id);
|
||||
return StrFormat(computation_fmt, id, style, subcomp_label, comp_body, id);
|
||||
}
|
||||
|
||||
string HloDotDumper::DumpComputation(const HloComputation* comp) {
|
||||
@ -718,11 +698,11 @@ string HloDotDumper::DumpRootTag() {
|
||||
VLOG(2) << "Adding edge from " << from->name() << " to root tag as "
|
||||
<< next_edge_id_;
|
||||
edge_ids_.insert({{from, to}, next_edge_id_++});
|
||||
edges_.push_back(Printf(R"(%s -> %s [tooltip=" "];)", from_id, to_id));
|
||||
edges_.push_back(StrFormat(R"(%s -> %s [tooltip=" "];)", from_id, to_id));
|
||||
|
||||
return Printf(R"(%s [label=<%s>, shape=%s, tooltip=" ", %s];)"
|
||||
"\n",
|
||||
to_id, node_body, node_shape, NodeColorAttributes(color));
|
||||
return StrFormat(R"(%s [label=<%s>, shape=%s, tooltip=" ", %s];)"
|
||||
"\n",
|
||||
to_id, node_body, node_shape, NodeColorAttributes(color));
|
||||
}
|
||||
|
||||
static const HloConstantInstruction* TryGetFusionParameterConstant(
|
||||
@ -817,10 +797,10 @@ string HloDotDumper::DumpInstruction(const HloInstruction* instr) {
|
||||
}
|
||||
}
|
||||
|
||||
return Printf(R"(%s [label=<%s>, shape=%s, tooltip="%s", %s];)"
|
||||
"\n",
|
||||
InstructionId(instr), node_body, node_shape, node_metadata,
|
||||
NodeColorAttributes(color));
|
||||
return StrFormat(R"(%s [label=<%s>, shape=%s, tooltip="%s", %s];)"
|
||||
"\n",
|
||||
InstructionId(instr), node_body, node_shape, node_metadata,
|
||||
NodeColorAttributes(color));
|
||||
}
|
||||
|
||||
string HloDotDumper::GetInstructionNodeInlinedOperands(
|
||||
@ -833,7 +813,7 @@ string HloDotDumper::GetInstructionNodeInlinedOperands(
|
||||
// enumerates all of its empty dimensions (e.g. "{ { {}, {} }, ..."), which
|
||||
// is just noise.
|
||||
if (ShapeUtil::IsZeroElementArray(shape)) {
|
||||
return Printf("{} (%s)", ShapeUtil::HumanString(constant->shape()));
|
||||
return StrFormat("{} (%s)", ShapeUtil::HumanString(constant->shape()));
|
||||
}
|
||||
|
||||
// Print the literal value of constants with <= K elements.
|
||||
@ -848,8 +828,8 @@ string HloDotDumper::GetInstructionNodeInlinedOperands(
|
||||
// collected from profiling tools. Those constants may not have a valid
|
||||
// literal.
|
||||
if (elem_count.has_value() && *elem_count <= 8 && constant->HasLiteral()) {
|
||||
return Printf("%s (%s)", constant->literal().ToString(),
|
||||
ShapeUtil::HumanString(constant->shape()));
|
||||
return StrFormat("%s (%s)", constant->literal().ToString(),
|
||||
ShapeUtil::HumanString(constant->shape()));
|
||||
}
|
||||
|
||||
// Otherwise, print e.g. "%constant.42 (s32[100])".
|
||||
@ -859,8 +839,8 @@ string HloDotDumper::GetInstructionNodeInlinedOperands(
|
||||
} else {
|
||||
constant_name = StrCat("constant ", constant->name());
|
||||
}
|
||||
return Printf("%s %s", constant_name,
|
||||
ShapeUtil::HumanString(constant->shape()));
|
||||
return StrFormat("%s %s", constant_name,
|
||||
ShapeUtil::HumanString(constant->shape()));
|
||||
};
|
||||
|
||||
std::vector<string> lines;
|
||||
@ -881,7 +861,7 @@ string HloDotDumper::GetInstructionNodeInlinedOperands(
|
||||
TryGetFusionParameterConstant(operand)) {
|
||||
operand_str = stringify_constant(constant);
|
||||
} else {
|
||||
operand_str = Printf("Parameter %lld", operand->parameter_number());
|
||||
operand_str = StrFormat("Parameter %d", operand->parameter_number());
|
||||
}
|
||||
} else {
|
||||
operand_str = operand->name();
|
||||
@ -890,9 +870,9 @@ string HloDotDumper::GetInstructionNodeInlinedOperands(
|
||||
|
||||
if (operand_str) {
|
||||
if (instr->operand_count() > 1) {
|
||||
lines.push_back(Printf("<b>operand %lld</b> = %s", i, *operand_str));
|
||||
lines.push_back(StrFormat("<b>operand %d</b> = %s", i, *operand_str));
|
||||
} else {
|
||||
lines.push_back(Printf("<b>operand</b> = %s", *operand_str));
|
||||
lines.push_back(StrFormat("<b>operand</b> = %s", *operand_str));
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1079,13 +1059,13 @@ string HloDotDumper::GetInstructionNodeShape(const HloInstruction* instr) {
|
||||
string HloDotDumper::GetInstructionNodeLabel(const HloInstruction* instr) {
|
||||
// If we have a parameter, put the param number in the name.
|
||||
if (instr->opcode() == HloOpcode::kParameter) {
|
||||
return Printf("<b>Parameter %lld</b>", instr->parameter_number());
|
||||
return StrFormat("<b>Parameter %d</b>", instr->parameter_number());
|
||||
}
|
||||
|
||||
// The HLO instruction name contains usually the opcode, e.g. "%add.42" is
|
||||
// an add instruction. In this case we render just the name.
|
||||
if (absl::StartsWith(instr->name(), HloOpcodeString(instr->opcode()))) {
|
||||
return Printf("<b>%s</b>", HtmlLikeStringSanitize(instr->name()));
|
||||
return StrFormat("<b>%s</b>", HtmlLikeStringSanitize(instr->name()));
|
||||
}
|
||||
string extended_opcode =
|
||||
StrCat(HloOpcodeString(instr->opcode()),
|
||||
@ -1093,8 +1073,8 @@ string HloDotDumper::GetInstructionNodeLabel(const HloInstruction* instr) {
|
||||
? ""
|
||||
: StrCat(":", xla::ToString(instr->fusion_kind())));
|
||||
// If the name does not contain the opcode, render both.
|
||||
return Printf("<b>%s</b><br/>%s", HtmlLikeStringSanitize(extended_opcode),
|
||||
HtmlLikeStringSanitize(instr->name()));
|
||||
return StrFormat("<b>%s</b><br/>%s", HtmlLikeStringSanitize(extended_opcode),
|
||||
HtmlLikeStringSanitize(instr->name()));
|
||||
}
|
||||
|
||||
string HloDotDumper::GetInstructionNodeMetadata(const HloInstruction* instr) {
|
||||
@ -1103,13 +1083,13 @@ string HloDotDumper::GetInstructionNodeMetadata(const HloInstruction* instr) {
|
||||
lines.push_back(HtmlLikeStringSanitize(instr->metadata().op_name()));
|
||||
}
|
||||
if (!instr->metadata().op_type().empty()) {
|
||||
lines.push_back(Printf(
|
||||
lines.push_back(StrFormat(
|
||||
"op_type: %s", HtmlLikeStringSanitize(instr->metadata().op_type())));
|
||||
}
|
||||
if (!instr->metadata().source_file().empty() &&
|
||||
instr->metadata().source_line() != 0) {
|
||||
lines.push_back(Printf("op_type: %s", instr->metadata().source_file(),
|
||||
instr->metadata().source_line()));
|
||||
lines.push_back(StrFormat("op_type: %s:%d", instr->metadata().source_file(),
|
||||
instr->metadata().source_line()));
|
||||
}
|
||||
|
||||
return StrJoin(lines, "<br/>");
|
||||
@ -1164,7 +1144,7 @@ string HloDotDumper::GetInstructionNodeExtraInfo(const HloInstruction* instr) {
|
||||
lines.push_back(instr_shape);
|
||||
}
|
||||
if (debug_options_.xla_hlo_graph_addresses()) {
|
||||
lines.push_back(Printf("[%p]", instr));
|
||||
lines.push_back(StrFormat("[%p]", instr));
|
||||
}
|
||||
if (profile_ != nullptr) {
|
||||
double hlo_cycles_executed = profile_->GetCyclesTakenBy(*instr);
|
||||
@ -1172,8 +1152,8 @@ string HloDotDumper::GetInstructionNodeExtraInfo(const HloInstruction* instr) {
|
||||
profile_->total_cycles_executed(*instr->parent());
|
||||
if (hlo_cycles_executed > 0 && total_cycles_executed > 0) {
|
||||
lines.push_back(
|
||||
Printf("%% of cycles executed=%.2f",
|
||||
100 * hlo_cycles_executed / total_cycles_executed));
|
||||
StrFormat("%% of cycles executed=%.2f",
|
||||
100 * hlo_cycles_executed / total_cycles_executed));
|
||||
}
|
||||
}
|
||||
return StrJoin(lines, "<br/>");
|
||||
@ -1208,7 +1188,8 @@ void HloDotDumper::AddInstructionIncomingEdges(const HloInstruction* instr) {
|
||||
|
||||
string edge_label;
|
||||
if (instr->operand_count() > 1 && !control_edge) {
|
||||
edge_label = Printf(R"( headlabel="%lld", labeldistance=2)", operand_num);
|
||||
edge_label =
|
||||
StrFormat(R"( headlabel="%d", labeldistance=2)", operand_num);
|
||||
} else if (control_edge) {
|
||||
edge_label = "style=\"dotted\" color=\"gray\" label=\"ctrl\"";
|
||||
}
|
||||
@ -1218,10 +1199,11 @@ void HloDotDumper::AddInstructionIncomingEdges(const HloInstruction* instr) {
|
||||
// means.
|
||||
bool is_big_array = TotalElementsInShape(from->shape()) >= 4096;
|
||||
|
||||
const char* kEdgeFmt = R"(%s -> %s [arrowhead=%s tooltip="%s -> %s" %s];)";
|
||||
edges_.push_back(Printf(kEdgeFmt, InstructionId(from), InstructionId(to),
|
||||
(is_big_array ? "normal" : "empty"), from->name(),
|
||||
to->name(), edge_label));
|
||||
constexpr char kEdgeFmt[] =
|
||||
R"(%s -> %s [arrowhead=%s tooltip="%s -> %s" %s];)";
|
||||
edges_.push_back(StrFormat(kEdgeFmt, InstructionId(from), InstructionId(to),
|
||||
(is_big_array ? "normal" : "empty"),
|
||||
from->name(), to->name(), edge_label));
|
||||
};
|
||||
|
||||
// Add edges from instr's operands to instr. Parameters within fusion
|
||||
@ -1262,11 +1244,11 @@ string HloDotDumper::GetInstructionTrivialComputationStr(
|
||||
continue;
|
||||
}
|
||||
if (instr->called_computations().size() == 1) {
|
||||
lines.push_back(Printf("Subcomputation: <b>%s</b>",
|
||||
HtmlLikeStringSanitize(*computation_type)));
|
||||
lines.push_back(StrFormat("Subcomputation: <b>%s</b>",
|
||||
HtmlLikeStringSanitize(*computation_type)));
|
||||
} else {
|
||||
lines.push_back(Printf("Subcomputation %lld: <b>%s</b>", i,
|
||||
HtmlLikeStringSanitize(*computation_type)));
|
||||
lines.push_back(StrFormat("Subcomputation %d: <b>%s</b>", i,
|
||||
HtmlLikeStringSanitize(*computation_type)));
|
||||
}
|
||||
}
|
||||
return StrJoin(lines, "<br/>");
|
||||
|
||||
@ -2381,7 +2381,7 @@ Status HloInstruction::Visit(DfsHloVisitorBase<HloInstructionPtr>* visitor) {
|
||||
return InternalError(
|
||||
"Unhandled HloOpcode for DfsHloVisitor: %s. This should not happen - "
|
||||
"please file a bug for XLA.",
|
||||
HloOpcodeString(opcode_).c_str());
|
||||
HloOpcodeString(opcode_));
|
||||
}
|
||||
|
||||
// Explicit instantiations.
|
||||
@ -2464,7 +2464,7 @@ static Status PostOrderDFS(HloInstruction* root, Visitor* visitor,
|
||||
if (!TF_PREDICT_TRUE(PushDFSChild(visitor, &dfs_stack, child))) {
|
||||
return FailedPrecondition(
|
||||
"A cycle is detected while visiting instruction %s",
|
||||
current_node->ToString().c_str());
|
||||
current_node->ToString());
|
||||
}
|
||||
}
|
||||
|
||||
@ -2473,7 +2473,7 @@ static Status PostOrderDFS(HloInstruction* root, Visitor* visitor,
|
||||
if (!TF_PREDICT_TRUE(PushDFSChild(visitor, &dfs_stack, child))) {
|
||||
return FailedPrecondition(
|
||||
"A cycle is detected while visiting instruction %s",
|
||||
current_node->ToString().c_str());
|
||||
current_node->ToString());
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -2789,7 +2789,7 @@ StatusOr<HloInstruction::FusionKind> StringToFusionKind(
|
||||
if (kind_name == "kCustom") {
|
||||
return HloInstruction::FusionKind::kCustom;
|
||||
}
|
||||
return InvalidArgument("Unknown fusion kind: %s", kind_name.c_str());
|
||||
return InvalidArgument("Unknown fusion kind: %s", kind_name);
|
||||
}
|
||||
|
||||
string PaddingConfigToString(const PaddingConfig& padding) {
|
||||
|
||||
@ -53,7 +53,7 @@ class OpAndUserCollectingVisitor : public DfsHloVisitorWithDefault {
|
||||
public:
|
||||
Status DefaultAction(HloInstruction* hlo_instruction) override {
|
||||
return Unimplemented("not implemented %s",
|
||||
HloOpcodeString(hlo_instruction->opcode()).c_str());
|
||||
HloOpcodeString(hlo_instruction->opcode()));
|
||||
}
|
||||
|
||||
Status HandleParameter(HloInstruction* parameter) override {
|
||||
|
||||
@ -306,8 +306,7 @@ TokKind HloLexer::LexNumberOrPattern() {
|
||||
R"([-]?((\d+|\d+[.]\d*|\d*[.]\d+)([eE][+-]?\d+))|[-]?(\d+[.]\d*|\d*[.]\d+))"};
|
||||
if (RE2::Consume(&consumable, *float_pattern)) {
|
||||
current_ptr_ = consumable.begin();
|
||||
CHECK(absl::SimpleAtod(string(token_start_, current_ptr_).c_str(),
|
||||
&decimal_val_));
|
||||
CHECK(absl::SimpleAtod(string(token_start_, current_ptr_), &decimal_val_));
|
||||
return TokKind::kDecimal;
|
||||
}
|
||||
|
||||
|
||||
@ -163,7 +163,7 @@ Status HloModuleGroupMetadata::VerifyCompanionSets() const {
|
||||
ss << " " << hlo->name() << std::endl;
|
||||
}
|
||||
ss << "has multiple instructions on the same device";
|
||||
return FailedPrecondition("%s", ss.str().c_str());
|
||||
return FailedPrecondition("%s", ss.str());
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -411,16 +411,16 @@ Status HloModuleGroupMetadata::AddCompanion(HloInstruction* instruction1,
|
||||
Status HloModuleGroupMetadata::VerifyChannelInstructions() {
|
||||
for (const Channel& channel : channels_) {
|
||||
if (channel.send == nullptr) {
|
||||
return FailedPrecondition("missing send for id : %lld", channel.id);
|
||||
return FailedPrecondition("missing send for id : %d", channel.id);
|
||||
}
|
||||
if (channel.recv == nullptr) {
|
||||
return FailedPrecondition("missing recv for id : %lld", channel.id);
|
||||
return FailedPrecondition("missing recv for id : %d", channel.id);
|
||||
}
|
||||
if (channel.send_done == nullptr) {
|
||||
return FailedPrecondition("missing send-done for id : %lld", channel.id);
|
||||
return FailedPrecondition("missing send-done for id : %d", channel.id);
|
||||
}
|
||||
if (channel.recv_done == nullptr) {
|
||||
return FailedPrecondition("missing recv-done for id : %lld", channel.id);
|
||||
return FailedPrecondition("missing recv-done for id : %d", channel.id);
|
||||
}
|
||||
}
|
||||
|
||||
@ -436,33 +436,33 @@ Status HloModuleGroupMetadata::VerifyChannelInstructions() {
|
||||
auto send_done_device = GetInstructionDevice(*channel.send_done);
|
||||
if (!send_device) {
|
||||
return FailedPrecondition("send instruction must have a device: %s",
|
||||
channel.send->ToString().c_str());
|
||||
channel.send->ToString());
|
||||
}
|
||||
if (!send_done_device) {
|
||||
return FailedPrecondition("send_done instruction must have a device: %s",
|
||||
channel.send_done->ToString().c_str());
|
||||
channel.send_done->ToString());
|
||||
}
|
||||
if (*send_device != *send_done_device) {
|
||||
return FailedPrecondition(
|
||||
"send and send-done (channel=%lld) must be on the same device: %lld "
|
||||
"vs. %lld",
|
||||
"send and send-done (channel=%d) must be on the same device: %d "
|
||||
"vs. %d",
|
||||
channel.id, *send_device, *send_done_device);
|
||||
}
|
||||
auto recv_device = GetInstructionDevice(*channel.recv);
|
||||
auto recv_done_device = GetInstructionDevice(*channel.recv_done);
|
||||
if (!recv_done_device) {
|
||||
return FailedPrecondition("recv_done instruction must have a device: %s",
|
||||
channel.recv_done->ToString().c_str());
|
||||
channel.recv_done->ToString());
|
||||
}
|
||||
if (*recv_device != *recv_done_device) {
|
||||
return FailedPrecondition(
|
||||
"recv and recv-done (channel=%lld) must be on the same device: %lld "
|
||||
"vs. %lld",
|
||||
"recv and recv-done (channel=%d) must be on the same device: %d "
|
||||
"vs. %d",
|
||||
channel.id, *recv_device, *recv_done_device);
|
||||
}
|
||||
if (*send_device == *recv_device) {
|
||||
return FailedPrecondition(
|
||||
"send and recv (channel=%lld) must be on different devices: %lld",
|
||||
"send and recv (channel=%d) must be on different devices: %d",
|
||||
channel.id, *send_device);
|
||||
}
|
||||
}
|
||||
@ -483,7 +483,7 @@ Status HloModuleGroupMetadata::VerifyChannelInstructions() {
|
||||
!CheckCompanionPathsCompatibility(
|
||||
path, GetCompanionsPath(channel.recv_done))) {
|
||||
return FailedPrecondition(
|
||||
"Nest companion paths do not match for channel %lld", channel.id);
|
||||
"Nest companion paths do not match for channel %d", channel.id);
|
||||
}
|
||||
}
|
||||
return Status::OK();
|
||||
|
||||
@ -282,7 +282,7 @@ Status HloModuleGroupUtil::VisitTopologicalOrder(
|
||||
"following nodes. Note that the order of the nodes is arbitrary "
|
||||
"and that the list may include nodes that are not part of the "
|
||||
"cycle.\n%s",
|
||||
predecessor->ToString().c_str(), cyclic_instructions.c_str());
|
||||
predecessor->ToString(), cyclic_instructions);
|
||||
}
|
||||
stack.push(predecessor);
|
||||
}
|
||||
|
||||
@ -39,7 +39,7 @@ StatusOr<HloOpcode> StringToHloOpcode(const string& opcode_name) {
|
||||
});
|
||||
auto it = opcode_map->find(opcode_name);
|
||||
if (it == opcode_map->end()) {
|
||||
return InvalidArgument("Unknown opcode: %s", opcode_name.c_str());
|
||||
return InvalidArgument("Unknown opcode: %s", opcode_name);
|
||||
}
|
||||
return it->second;
|
||||
}
|
||||
|
||||
@ -18,6 +18,7 @@ limitations under the License.
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
#include "absl/strings/str_format.h"
|
||||
#include "absl/strings/str_join.h"
|
||||
#include "tensorflow/compiler/xla/service/hlo_computation.h"
|
||||
#include "tensorflow/compiler/xla/shape_util.h"
|
||||
@ -26,7 +27,6 @@ limitations under the License.
|
||||
#include "tensorflow/compiler/xla/types.h"
|
||||
#include "tensorflow/compiler/xla/util.h"
|
||||
#include "tensorflow/core/lib/core/errors.h"
|
||||
#include "tensorflow/core/lib/strings/stringprintf.h"
|
||||
#include "tensorflow/core/platform/logging.h"
|
||||
|
||||
namespace xla {
|
||||
@ -306,17 +306,15 @@ string PredecessorHloOrdering::ToStringHelper(const string& name) const {
|
||||
std::vector<string> pieces;
|
||||
pieces.push_back(name);
|
||||
for (auto* computation : module_->MakeNonfusionComputations()) {
|
||||
pieces.push_back(tensorflow::strings::Printf("computation %s:",
|
||||
computation->name().c_str()));
|
||||
pieces.push_back(absl::StrFormat("computation %s:", computation->name()));
|
||||
const auto all = computation->MakeInstructionPostOrder();
|
||||
for (auto instruction : all) {
|
||||
pieces.push_back(tensorflow::strings::Printf(
|
||||
" %s predecessors:", instruction->name().c_str()));
|
||||
pieces.push_back(
|
||||
absl::StrFormat(" %s predecessors:", instruction->name()));
|
||||
for (auto predecessor : all) {
|
||||
if (predecessors_.at(computation)
|
||||
->IsReachable(predecessor, instruction)) {
|
||||
pieces.push_back(
|
||||
tensorflow::strings::Printf(" %s", predecessor->name().c_str()));
|
||||
pieces.push_back(absl::StrFormat(" %s", predecessor->name()));
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -372,8 +370,8 @@ string SequentialHloOrdering::ToString() const {
|
||||
std::vector<string> pieces;
|
||||
pieces.push_back("SequentialHloOrdering");
|
||||
for (auto* computation : module_->computations()) {
|
||||
pieces.push_back(tensorflow::strings::Printf("computation %s order:",
|
||||
computation->name().c_str()));
|
||||
pieces.push_back(
|
||||
absl::StrFormat("computation %s order:", computation->name()));
|
||||
// Gather all instructions in the module sequence for this computation and
|
||||
// sort them by their position.
|
||||
std::vector<const HloInstruction*> instructions;
|
||||
@ -388,8 +386,7 @@ string SequentialHloOrdering::ToString() const {
|
||||
return order_position_.at(a) < order_position_.at(b);
|
||||
});
|
||||
for (auto instruction : instructions) {
|
||||
pieces.push_back(
|
||||
tensorflow::strings::Printf(" %s", instruction->name().c_str()));
|
||||
pieces.push_back(absl::StrFormat(" %s", instruction->name()));
|
||||
}
|
||||
}
|
||||
return absl::StrJoin(pieces, "\n");
|
||||
|
||||
@ -18,6 +18,7 @@ limitations under the License.
|
||||
#include "absl/algorithm/container.h"
|
||||
#include "absl/memory/memory.h"
|
||||
#include "absl/strings/str_cat.h"
|
||||
#include "absl/strings/str_format.h"
|
||||
#include "absl/strings/str_join.h"
|
||||
#include "absl/strings/str_split.h"
|
||||
#include "tensorflow/compiler/xla/literal.h"
|
||||
@ -29,7 +30,6 @@ limitations under the License.
|
||||
#include "tensorflow/compiler/xla/shape_util.h"
|
||||
#include "tensorflow/compiler/xla/util.h"
|
||||
#include "tensorflow/core/lib/gtl/map_util.h"
|
||||
#include "tensorflow/core/lib/strings/stringprintf.h"
|
||||
|
||||
namespace xla {
|
||||
|
||||
@ -39,8 +39,8 @@ using absl::nullopt;
|
||||
using absl::optional;
|
||||
using absl::StrAppend;
|
||||
using absl::StrCat;
|
||||
using absl::StrFormat;
|
||||
using absl::StrJoin;
|
||||
using ::tensorflow::strings::Printf;
|
||||
|
||||
const double kF16max = 65504;
|
||||
|
||||
@ -1586,8 +1586,7 @@ bool HloParser::ParseInstructionNames(
|
||||
}
|
||||
std::pair<HloInstruction*, LocTy>* instr = FindInstruction(name);
|
||||
if (!instr) {
|
||||
return TokenError(
|
||||
Printf("instruction '%s' is not defined", name.c_str()));
|
||||
return TokenError(StrFormat("instruction '%s' is not defined", name));
|
||||
}
|
||||
instructions->push_back(instr->first);
|
||||
} while (EatIfPresent(TokKind::kComma));
|
||||
@ -1829,17 +1828,17 @@ bool HloParser::ParseDenseLiteral(std::unique_ptr<Literal>* literal,
|
||||
case TokKind::kLbrace: {
|
||||
nest_level++;
|
||||
if (nest_level > rank) {
|
||||
return TokenError(Printf(
|
||||
"expects nested array in rank %lld, but sees larger", rank));
|
||||
return TokenError(absl::StrFormat(
|
||||
"expects nested array in rank %d, but sees larger", rank));
|
||||
}
|
||||
if (nest_level > 1) {
|
||||
elems_seen_per_dim[nest_level - 2]++;
|
||||
if (elems_seen_per_dim[nest_level - 2] >
|
||||
shape.dimensions(nest_level - 2)) {
|
||||
return TokenError(Printf(
|
||||
"expects %lld elements in the %sth element, but sees more",
|
||||
return TokenError(absl::StrFormat(
|
||||
"expects %d elements in the %sth element, but sees more",
|
||||
shape.dimensions(nest_level - 2),
|
||||
get_index_str(nest_level - 2).c_str()));
|
||||
get_index_str(nest_level - 2)));
|
||||
}
|
||||
}
|
||||
lexer_.Lex();
|
||||
@ -1848,9 +1847,9 @@ bool HloParser::ParseDenseLiteral(std::unique_ptr<Literal>* literal,
|
||||
case TokKind::kRbrace: {
|
||||
nest_level--;
|
||||
if (elems_seen_per_dim[nest_level] != shape.dimensions(nest_level)) {
|
||||
return TokenError(Printf(
|
||||
"expects %lld elements in the %sth element, but sees %lld",
|
||||
shape.dimensions(nest_level), get_index_str(nest_level).c_str(),
|
||||
return TokenError(absl::StrFormat(
|
||||
"expects %d elements in the %sth element, but sees %d",
|
||||
shape.dimensions(nest_level), get_index_str(nest_level),
|
||||
elems_seen_per_dim[nest_level]));
|
||||
}
|
||||
elems_seen_per_dim[nest_level] = 0;
|
||||
@ -1871,15 +1870,15 @@ bool HloParser::ParseDenseLiteral(std::unique_ptr<Literal>* literal,
|
||||
if (rank > 0) {
|
||||
if (nest_level != rank) {
|
||||
return TokenError(
|
||||
Printf("expects nested array in rank %lld, but sees %lld", rank,
|
||||
nest_level));
|
||||
absl::StrFormat("expects nested array in rank %d, but sees %d",
|
||||
rank, nest_level));
|
||||
}
|
||||
elems_seen_per_dim[rank - 1]++;
|
||||
if (elems_seen_per_dim[rank - 1] > shape.dimensions(rank - 1)) {
|
||||
return TokenError(
|
||||
Printf("expects %lld elements on the minor-most dimension, but "
|
||||
"sees more",
|
||||
shape.dimensions(rank - 1)));
|
||||
return TokenError(absl::StrFormat(
|
||||
"expects %d elements on the minor-most dimension, but "
|
||||
"sees more",
|
||||
shape.dimensions(rank - 1)));
|
||||
}
|
||||
}
|
||||
if (lexer_.GetKind() == TokKind::kw_true ||
|
||||
@ -2135,8 +2134,8 @@ bool HloParser::ParseSubAttributes(
|
||||
for (const auto& attr_it : attrs) {
|
||||
if (attr_it.second.required &&
|
||||
seen_attrs.find(attr_it.first) == seen_attrs.end()) {
|
||||
return Error(loc, Printf("sub-attribute %s is expected but not seen",
|
||||
attr_it.first.c_str()));
|
||||
return Error(loc, StrFormat("sub-attribute %s is expected but not seen",
|
||||
attr_it.first));
|
||||
}
|
||||
}
|
||||
return ParseToken(TokKind::kRbrace, "expects '}' to end sub attributes");
|
||||
@ -2156,8 +2155,8 @@ bool HloParser::ParseAttributes(
|
||||
for (const auto& attr_it : attrs) {
|
||||
if (attr_it.second.required &&
|
||||
seen_attrs.find(attr_it.first) == seen_attrs.end()) {
|
||||
return Error(loc, Printf("attribute %s is expected but not seen",
|
||||
attr_it.first.c_str()));
|
||||
return Error(loc, StrFormat("attribute %s is expected but not seen",
|
||||
attr_it.first));
|
||||
}
|
||||
}
|
||||
return true;
|
||||
@ -2173,7 +2172,7 @@ bool HloParser::ParseAttributeHelper(
|
||||
}
|
||||
VLOG(1) << "Parsing attribute " << name;
|
||||
if (!seen_attrs->insert(name).second) {
|
||||
return Error(loc, Printf("attribute %s already exists", name.c_str()));
|
||||
return Error(loc, StrFormat("attribute %s already exists", name));
|
||||
}
|
||||
auto attr_it = attrs.find(name);
|
||||
if (attr_it == attrs.end()) {
|
||||
@ -2188,8 +2187,8 @@ bool HloParser::ParseAttributeHelper(
|
||||
StrAppend(out, kv.first);
|
||||
}));
|
||||
}
|
||||
return Error(loc, Printf("unexpected attribute \"%s\". %s", name.c_str(),
|
||||
allowed_attrs.c_str()));
|
||||
return Error(loc, StrFormat("unexpected attribute \"%s\". %s", name,
|
||||
allowed_attrs));
|
||||
}
|
||||
AttrTy attr_type = attr_it->second.attr_type;
|
||||
void* attr_out_ptr = attr_it->second.result;
|
||||
@ -2384,7 +2383,7 @@ bool HloParser::ParseAttributeHelper(
|
||||
}
|
||||
}();
|
||||
if (!success) {
|
||||
return Error(loc, Printf("error parsing attribute %s", name.c_str()));
|
||||
return Error(loc, StrFormat("error parsing attribute %s", name));
|
||||
}
|
||||
return true;
|
||||
}
|
||||
@ -2548,7 +2547,7 @@ bool HloParser::ParseConvolutionDimensionNumbers(
|
||||
dnums->set_input_spatial_dimensions(c - '0', i);
|
||||
} else {
|
||||
return TokenError(
|
||||
Printf("expects [0-%lldbf] in lhs dimension numbers", rank - 1));
|
||||
StrFormat("expects [0-%dbf] in lhs dimension numbers", rank - 1));
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -2571,7 +2570,7 @@ bool HloParser::ParseConvolutionDimensionNumbers(
|
||||
dnums->set_kernel_spatial_dimensions(c - '0', i);
|
||||
} else {
|
||||
return TokenError(
|
||||
Printf("expects [0-%lldio] in rhs dimension numbers", rank - 1));
|
||||
StrFormat("expects [0-%dio] in rhs dimension numbers", rank - 1));
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -2593,8 +2592,8 @@ bool HloParser::ParseConvolutionDimensionNumbers(
|
||||
} else if (c < '0' + rank && c >= '0') {
|
||||
dnums->set_output_spatial_dimensions(c - '0', i);
|
||||
} else {
|
||||
return TokenError(
|
||||
Printf("expects [0-%lldbf] in output dimension numbers", rank - 1));
|
||||
return TokenError(StrFormat(
|
||||
"expects [0-%dbf] in output dimension numbers", rank - 1));
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -2640,9 +2639,10 @@ bool HloParser::ParseSliceRanges(SliceRanges* result) {
|
||||
}
|
||||
const auto& range = ranges.back();
|
||||
if (range.size() != 2 && range.size() != 3) {
|
||||
return Error(loc, Printf("expects [start:limit:step] or [start:limit], "
|
||||
"but sees %ld elements.",
|
||||
range.size()));
|
||||
return Error(loc,
|
||||
StrFormat("expects [start:limit:step] or [start:limit], "
|
||||
"but sees %d elements.",
|
||||
range.size()));
|
||||
}
|
||||
} while (EatIfPresent(TokKind::kComma));
|
||||
|
||||
@ -2828,14 +2828,13 @@ bool HloParser::ParseDxD(const string& name,
|
||||
std::vector<tensorflow::int64>* result) {
|
||||
LocTy loc = lexer_.GetLoc();
|
||||
if (!result->empty()) {
|
||||
return Error(loc,
|
||||
Printf("sub-attribute '%s=' already exists", name.c_str()));
|
||||
return Error(loc, StrFormat("sub-attribute '%s=' already exists", name));
|
||||
}
|
||||
// 1D
|
||||
if (lexer_.GetKind() == TokKind::kInt) {
|
||||
tensorflow::int64 number;
|
||||
if (!ParseInt64(&number)) {
|
||||
return Error(loc, Printf("expects sub-attribute '%s=i'", name.c_str()));
|
||||
return Error(loc, StrFormat("expects sub-attribute '%s=i'", name));
|
||||
}
|
||||
result->push_back(number);
|
||||
return true;
|
||||
@ -2844,8 +2843,7 @@ bool HloParser::ParseDxD(const string& name,
|
||||
if (lexer_.GetKind() == TokKind::kDxD) {
|
||||
string str = lexer_.GetStrVal();
|
||||
if (!SplitToInt64s(str, 'x', result)) {
|
||||
return Error(loc,
|
||||
Printf("expects sub-attribute '%s=ixj...'", name.c_str()));
|
||||
return Error(loc, StrFormat("expects sub-attribute '%s=ixj...'", name));
|
||||
}
|
||||
lexer_.Lex();
|
||||
return true;
|
||||
@ -2940,9 +2938,8 @@ bool HloParser::ParseOpcode(HloOpcode* result) {
|
||||
string val = lexer_.GetStrVal();
|
||||
auto status_or_result = StringToHloOpcode(val);
|
||||
if (!status_or_result.ok()) {
|
||||
return TokenError(
|
||||
Printf("expects opcode but sees: %s, error: %s", val.c_str(),
|
||||
status_or_result.status().error_message().c_str()));
|
||||
return TokenError(StrFormat("expects opcode but sees: %s, error: %s", val,
|
||||
status_or_result.status().error_message()));
|
||||
}
|
||||
*result = status_or_result.ValueOrDie();
|
||||
lexer_.Lex();
|
||||
@ -2956,7 +2953,7 @@ bool HloParser::ParseFftType(FftType* result) {
|
||||
}
|
||||
string val = lexer_.GetStrVal();
|
||||
if (!FftType_Parse(val, result) || !FftType_IsValid(*result)) {
|
||||
return TokenError(Printf("expects fft type but sees: %s", val.c_str()));
|
||||
return TokenError(StrFormat("expects fft type but sees: %s", val));
|
||||
}
|
||||
lexer_.Lex();
|
||||
return true;
|
||||
@ -2970,9 +2967,9 @@ bool HloParser::ParseFusionKind(HloInstruction::FusionKind* result) {
|
||||
string val = lexer_.GetStrVal();
|
||||
auto status_or_result = StringToFusionKind(val);
|
||||
if (!status_or_result.ok()) {
|
||||
return TokenError(
|
||||
Printf("expects fusion kind but sees: %s, error: %s", val.c_str(),
|
||||
status_or_result.status().error_message().c_str()));
|
||||
return TokenError(StrFormat("expects fusion kind but sees: %s, error: %s",
|
||||
val,
|
||||
status_or_result.status().error_message()));
|
||||
}
|
||||
*result = status_or_result.ValueOrDie();
|
||||
lexer_.Lex();
|
||||
@ -2988,8 +2985,8 @@ bool HloParser::ParseRandomDistribution(RandomDistribution* result) {
|
||||
auto status_or_result = StringToRandomDistribution(val);
|
||||
if (!status_or_result.ok()) {
|
||||
return TokenError(
|
||||
Printf("expects random distribution but sees: %s, error: %s",
|
||||
val.c_str(), status_or_result.status().error_message().c_str()));
|
||||
StrFormat("expects random distribution but sees: %s, error: %s", val,
|
||||
status_or_result.status().error_message()));
|
||||
}
|
||||
*result = status_or_result.ValueOrDie();
|
||||
lexer_.Lex();
|
||||
@ -3004,9 +3001,9 @@ bool HloParser::ParsePrecision(PrecisionConfigProto::Precision* result) {
|
||||
string val = lexer_.GetStrVal();
|
||||
auto status_or_result = StringToPrecision(val);
|
||||
if (!status_or_result.ok()) {
|
||||
return TokenError(
|
||||
Printf("expects precision but sees: %s, error: %s", val.c_str(),
|
||||
status_or_result.status().error_message().c_str()));
|
||||
return TokenError(StrFormat("expects precision but sees: %s, error: %s",
|
||||
val,
|
||||
status_or_result.status().error_message()));
|
||||
}
|
||||
*result = status_or_result.ValueOrDie();
|
||||
lexer_.Lex();
|
||||
@ -3100,7 +3097,7 @@ StatusOr<HloSharding> HloParser::ParseShardingOnly() {
|
||||
lexer_.Lex();
|
||||
OpSharding op_sharding;
|
||||
if (!ParseSharding(&op_sharding)) {
|
||||
return InvalidArgument("Syntax error:\n%s", GetError().c_str());
|
||||
return InvalidArgument("Syntax error:\n%s", GetError());
|
||||
}
|
||||
if (lexer_.GetKind() != TokKind::kEof) {
|
||||
return InvalidArgument("Syntax error:\nExtra content after sharding");
|
||||
@ -3112,7 +3109,7 @@ StatusOr<Window> HloParser::ParseWindowOnly() {
|
||||
lexer_.Lex();
|
||||
Window window;
|
||||
if (!ParseWindow(&window, /*expect_outer_curlies=*/false)) {
|
||||
return InvalidArgument("Syntax error:\n%s", GetError().c_str());
|
||||
return InvalidArgument("Syntax error:\n%s", GetError());
|
||||
}
|
||||
if (lexer_.GetKind() != TokKind::kEof) {
|
||||
return InvalidArgument("Syntax error:\nExtra content after window");
|
||||
@ -3125,7 +3122,7 @@ HloParser::ParseConvolutionDimensionNumbersOnly() {
|
||||
lexer_.Lex();
|
||||
ConvolutionDimensionNumbers dnums;
|
||||
if (!ParseConvolutionDimensionNumbers(&dnums)) {
|
||||
return InvalidArgument("Syntax error:\n%s", GetError().c_str());
|
||||
return InvalidArgument("Syntax error:\n%s", GetError());
|
||||
}
|
||||
if (lexer_.GetKind() != TokKind::kEof) {
|
||||
return InvalidArgument(
|
||||
@ -3163,7 +3160,7 @@ Status HloParser::ParseSingleInstruction(HloComputation::Builder* builder,
|
||||
|
||||
// Parse the instruction with the registered hook.
|
||||
if (!ParseInstruction(builder, root_name)) {
|
||||
return InvalidArgument("Syntax error:\n%s", GetError().c_str());
|
||||
return InvalidArgument("Syntax error:\n%s", GetError());
|
||||
}
|
||||
return Status::OK();
|
||||
}
|
||||
@ -3174,7 +3171,7 @@ StatusOr<std::unique_ptr<HloModule>> ParseHloString(
|
||||
absl::string_view str, const HloModuleConfig& config) {
|
||||
HloParser parser(str, config);
|
||||
if (!parser.Run()) {
|
||||
return InvalidArgument("Syntax error:\n%s", parser.GetError().c_str());
|
||||
return InvalidArgument("Syntax error:\n%s", parser.GetError());
|
||||
}
|
||||
return parser.ConsumeHloModule();
|
||||
}
|
||||
|
||||
@ -18,6 +18,7 @@ limitations under the License.
|
||||
#include <functional>
|
||||
|
||||
#include "absl/strings/str_cat.h"
|
||||
#include "absl/strings/str_format.h"
|
||||
#include "absl/strings/str_join.h"
|
||||
#include "tensorflow/compiler/xla/service/hlo_graph_dumper.h"
|
||||
#include "tensorflow/compiler/xla/service/hlo_proto_util.h"
|
||||
@ -48,9 +49,9 @@ void DumpModuleProto(const HloModule& module, const string& dump_to,
|
||||
tensorflow::mutex_lock lock(mu);
|
||||
const int64 pass_number = (*module_id_to_pass_number)[module.unique_id()]++;
|
||||
|
||||
const string mod_name = SanitizeFileName(tensorflow::strings::Printf(
|
||||
"module_%04d.%04lld.%s.after_%s", module.unique_id(), pass_number,
|
||||
pipeline_name.c_str(), pass_name.c_str()));
|
||||
const string mod_name = SanitizeFileName(
|
||||
absl::StrFormat("module_%04d.%04d.%s.after_%s", module.unique_id(),
|
||||
pass_number, pipeline_name, pass_name));
|
||||
|
||||
TF_QCHECK_OK(protobuf_util::DumpProtoToDirectory(MakeHloProto(module),
|
||||
dump_to, mod_name));
|
||||
|
||||
@ -22,6 +22,7 @@ limitations under the License.
|
||||
|
||||
#include "absl/container/inlined_vector.h"
|
||||
#include "absl/strings/str_cat.h"
|
||||
#include "absl/strings/str_format.h"
|
||||
#include "absl/strings/str_join.h"
|
||||
#include "tensorflow/compiler/xla/map_util.h"
|
||||
#include "tensorflow/compiler/xla/primitive_util.h"
|
||||
@ -40,7 +41,6 @@ limitations under the License.
|
||||
#include "tensorflow/compiler/xla/statusor.h"
|
||||
#include "tensorflow/compiler/xla/types.h"
|
||||
#include "tensorflow/compiler/xla/util.h"
|
||||
#include "tensorflow/core/lib/strings/stringprintf.h"
|
||||
#include "tensorflow/core/platform/logging.h"
|
||||
|
||||
namespace xla {
|
||||
@ -1353,12 +1353,11 @@ StatusOr<bool> HloRematerialization::Run(
|
||||
XLA_VLOG_LINES(3, "After HloRematerialization:\n" + module->ToString());
|
||||
|
||||
if (current_peak_memory > memory_limit_bytes) {
|
||||
LOG(WARNING) << tensorflow::strings::Printf(
|
||||
"Can't reduce memory use below %s (%lld bytes) by rematerialization; "
|
||||
"only reduced to %s (%lld bytes)",
|
||||
HumanReadableNumBytes(memory_limit_bytes).c_str(), memory_limit_bytes,
|
||||
HumanReadableNumBytes(current_peak_memory).c_str(),
|
||||
current_peak_memory);
|
||||
LOG(WARNING) << absl::StrFormat(
|
||||
"Can't reduce memory use below %s (%d bytes) by rematerialization; "
|
||||
"only reduced to %s (%d bytes)",
|
||||
HumanReadableNumBytes(memory_limit_bytes), memory_limit_bytes,
|
||||
HumanReadableNumBytes(current_peak_memory), current_peak_memory);
|
||||
}
|
||||
|
||||
return changed;
|
||||
|
||||
@ -30,7 +30,6 @@ limitations under the License.
|
||||
#include "tensorflow/compiler/xla/util.h"
|
||||
#include "tensorflow/core/lib/core/errors.h"
|
||||
#include "tensorflow/core/lib/gtl/map_util.h"
|
||||
#include "tensorflow/core/lib/strings/stringprintf.h"
|
||||
#include "tensorflow/core/platform/logging.h"
|
||||
|
||||
namespace xla {
|
||||
|
||||
@ -128,10 +128,9 @@ Status ShapeVerifier::CheckIsTokenOperand(const HloInstruction* instruction,
|
||||
const HloInstruction* token = instruction->operand(operand_no);
|
||||
if (!ShapeUtil::Equal(token->shape(), ShapeUtil::MakeTokenShape())) {
|
||||
return InternalError(
|
||||
"Expected operand %lld to be token-shaped, actual shape is "
|
||||
"Expected operand %d to be token-shaped, actual shape is "
|
||||
"%s:\n%s",
|
||||
operand_no, StringifyShape(token->shape()).c_str(),
|
||||
instruction->ToString().c_str());
|
||||
operand_no, StringifyShape(token->shape()), instruction->ToString());
|
||||
}
|
||||
return Status::OK();
|
||||
}
|
||||
@ -144,9 +143,8 @@ Status ShapeVerifier::CheckOperandAndParameter(
|
||||
computation->parameter_instruction(parameter_number);
|
||||
if (!ShapesSame(operand->shape(), parameter->shape())) {
|
||||
return InternalError("Operand %s shape does not match parameter's %s in %s",
|
||||
operand->ToString().c_str(),
|
||||
parameter->ToString().c_str(),
|
||||
instruction->ToString().c_str());
|
||||
operand->ToString(), parameter->ToString(),
|
||||
instruction->ToString());
|
||||
}
|
||||
return Status::OK();
|
||||
}
|
||||
@ -171,9 +169,8 @@ Status ShapeVerifier::HandleOutfeed(HloInstruction* instruction) {
|
||||
return InternalError(
|
||||
"Expected outfeed shape to be equal to operand's shape %s, "
|
||||
"actual shape is %s:\n%s",
|
||||
StringifyShape(outfeed->operand(0)->shape()).c_str(),
|
||||
StringifyShape(outfeed->outfeed_shape()).c_str(),
|
||||
outfeed->ToString().c_str());
|
||||
StringifyShape(outfeed->operand(0)->shape()),
|
||||
StringifyShape(outfeed->outfeed_shape()), outfeed->ToString());
|
||||
}
|
||||
return CheckShape(outfeed, ShapeUtil::MakeTokenShape());
|
||||
}
|
||||
@ -191,7 +188,7 @@ bool ShapeVerifier::HasCompatibleElementTypes(const Shape& shape_0,
|
||||
Status ShapeVerifier::HandleRng(HloInstruction* instruction) {
|
||||
if (instruction->operand_count() != 2) {
|
||||
return InternalError("Expected two operands for Rng instruction: %s",
|
||||
instruction->ToString().c_str());
|
||||
instruction->ToString());
|
||||
}
|
||||
|
||||
const Shape& shape_0 = instruction->operand(0)->shape();
|
||||
@ -199,14 +196,14 @@ Status ShapeVerifier::HandleRng(HloInstruction* instruction) {
|
||||
if (!ShapeUtil::IsScalar(shape_0) || !ShapeUtil::IsScalar(shape_1)) {
|
||||
return InternalError(
|
||||
"Expected scalar types for the two operands of Rng instruction: %s",
|
||||
instruction->ToString().c_str());
|
||||
instruction->ToString());
|
||||
}
|
||||
|
||||
if (!HasCompatibleElementTypes(shape_0, shape_1, instruction->shape())) {
|
||||
return InternalError(
|
||||
"Expected compatible element types for the result and the two operands"
|
||||
" of Rng instruction: %s",
|
||||
instruction->ToString().c_str());
|
||||
instruction->ToString());
|
||||
}
|
||||
|
||||
PrimitiveType element_type = shape_0.element_type();
|
||||
@ -219,7 +216,7 @@ Status ShapeVerifier::HandleRng(HloInstruction* instruction) {
|
||||
"Element type not supported."
|
||||
" Expected element to be of floating point type, integral type or"
|
||||
" predicate type for RngUniform: %s",
|
||||
instruction->ToString().c_str());
|
||||
instruction->ToString());
|
||||
}
|
||||
break;
|
||||
|
||||
@ -228,13 +225,13 @@ Status ShapeVerifier::HandleRng(HloInstruction* instruction) {
|
||||
return InternalError(
|
||||
"Element type not supported."
|
||||
" Expected element to be FloatingPointType for RngNormal: %s",
|
||||
instruction->ToString().c_str());
|
||||
instruction->ToString());
|
||||
}
|
||||
break;
|
||||
default:
|
||||
return InternalError(
|
||||
"Invalid Rng distribution %s",
|
||||
RandomDistribution_Name(instruction->random_distribution()).c_str());
|
||||
RandomDistribution_Name(instruction->random_distribution()));
|
||||
}
|
||||
|
||||
return Status::OK();
|
||||
@ -253,8 +250,8 @@ Status ShapeVerifier::HandleSort(HloInstruction* sort) {
|
||||
return InternalError(
|
||||
"Expected sort to have to have the same dimensions for the keys and "
|
||||
"the values. Keys shape is: %s\n, Values shape is: %s",
|
||||
StringifyShape(sort->operand(0)->shape()).c_str(),
|
||||
StringifyShape(sort->operand(1)->shape()).c_str());
|
||||
StringifyShape(sort->operand(0)->shape()),
|
||||
StringifyShape(sort->operand(1)->shape()));
|
||||
}
|
||||
return CheckVariadicShape(sort);
|
||||
}
|
||||
@ -333,7 +330,7 @@ Status ShapeVerifier::HandleFusion(HloInstruction* fusion) {
|
||||
int64 param_no = fused_param->parameter_number();
|
||||
if (!ShapesSame(fused_param->shape(), fusion->operand(param_no)->shape())) {
|
||||
return InternalError(
|
||||
"Shape mismatch between parameter number %lld and its operand in "
|
||||
"Shape mismatch between parameter number %d and its operand in "
|
||||
"%s.",
|
||||
param_no, fusion->ToString().c_str());
|
||||
}
|
||||
@ -425,7 +422,7 @@ Status ShapeVerifier::HandleWhile(HloInstruction* xla_while) {
|
||||
return InternalError(
|
||||
"Conditional computation shape does not lead to a scalar predicate "
|
||||
"shape: %s",
|
||||
StringifyShape(conditional_shape).c_str());
|
||||
StringifyShape(conditional_shape));
|
||||
}
|
||||
// The shape of kWhile should match the shape of the body computation it
|
||||
// calls.
|
||||
@ -556,7 +553,7 @@ Status CheckMixedPrecisionOperands(const HloInstruction* instruction) {
|
||||
return InternalError(
|
||||
"Seen floating point types of different precisions in "
|
||||
"%s, but mixed precision is disallowed.",
|
||||
instruction->ToString().c_str());
|
||||
instruction->ToString());
|
||||
}
|
||||
return Status::OK();
|
||||
}));
|
||||
@ -646,9 +643,8 @@ Status ShapeVerifier::CheckShape(const HloInstruction* instruction,
|
||||
return InternalError(
|
||||
"Expected instruction to have shape equal to %s, actual "
|
||||
"shape is %s:\n%s",
|
||||
StringifyShape(inferred_shape).c_str(),
|
||||
StringifyShape(instruction->shape()).c_str(),
|
||||
instruction->ToString().c_str());
|
||||
StringifyShape(inferred_shape), StringifyShape(instruction->shape()),
|
||||
instruction->ToString());
|
||||
}
|
||||
return Status::OK();
|
||||
}
|
||||
@ -713,23 +709,23 @@ Status VerifyHloStructure(HloModule* module) {
|
||||
for (const HloComputation* computation : module->computations()) {
|
||||
if (computation->parent() == nullptr) {
|
||||
return InternalError("Computation %s has a null parent pointer",
|
||||
computation->name().c_str());
|
||||
computation->name());
|
||||
}
|
||||
if (computation->parent() != module) {
|
||||
return InternalError(
|
||||
"Computation %s parent() does not point to parent module",
|
||||
computation->name().c_str());
|
||||
computation->name());
|
||||
}
|
||||
|
||||
for (const HloInstruction* instruction : computation->instructions()) {
|
||||
if (instruction->parent() == nullptr) {
|
||||
return InternalError("Instruction %s has a null parent pointer",
|
||||
instruction->name().c_str());
|
||||
instruction->name());
|
||||
}
|
||||
if (instruction->parent() != computation) {
|
||||
return InternalError(
|
||||
"Instruction %s parent() does not point to parent computation",
|
||||
instruction->name().c_str());
|
||||
instruction->name());
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -746,9 +742,8 @@ Status VerifyHloStructure(HloModule* module) {
|
||||
return InternalError(
|
||||
"Operand %d (%s) of instruction %s is in a different "
|
||||
"computation: %s vs %s",
|
||||
i, operand->name().c_str(), instruction->name().c_str(),
|
||||
operand->parent()->name().c_str(),
|
||||
instruction->parent()->name().c_str());
|
||||
i, operand->name(), instruction->name(),
|
||||
operand->parent()->name(), instruction->parent()->name());
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -764,7 +759,7 @@ Status HloVerifier::CheckFusionInstruction(HloInstruction* fusion) const {
|
||||
"Instruction of fused computation does not match expected "
|
||||
"instruction "
|
||||
"%s.",
|
||||
fusion->ToString().c_str());
|
||||
fusion->ToString());
|
||||
}
|
||||
|
||||
// Fused root instruction and fused parameters must all be owned by the
|
||||
@ -778,7 +773,7 @@ Status HloVerifier::CheckFusionInstruction(HloInstruction* fusion) const {
|
||||
if (fused_root == instruction) {
|
||||
if (root_owned) {
|
||||
return InternalError("Root appears more than once in %s.",
|
||||
fusion->ToString().c_str());
|
||||
fusion->ToString());
|
||||
}
|
||||
root_owned = true;
|
||||
}
|
||||
@ -786,7 +781,7 @@ Status HloVerifier::CheckFusionInstruction(HloInstruction* fusion) const {
|
||||
if (fused_parameters[i] == instruction) {
|
||||
if (parameter_owned[i]) {
|
||||
return InternalError("Parameter appears more than once in %s.",
|
||||
fusion->ToString().c_str());
|
||||
fusion->ToString());
|
||||
}
|
||||
parameter_owned[i] = true;
|
||||
}
|
||||
@ -794,20 +789,19 @@ Status HloVerifier::CheckFusionInstruction(HloInstruction* fusion) const {
|
||||
}
|
||||
if (!root_owned) {
|
||||
return InternalError("Root not found in computation of %s.",
|
||||
fusion->ToString().c_str());
|
||||
fusion->ToString());
|
||||
}
|
||||
// Make sure all the parameter_owned entries are set
|
||||
for (int i = 0; i < parameter_owned.size(); i++) {
|
||||
if (!parameter_owned[i]) {
|
||||
return InternalError("Parameter %d not found in computation of %s.", i,
|
||||
fusion->ToString().c_str());
|
||||
fusion->ToString());
|
||||
}
|
||||
}
|
||||
|
||||
// Fused root must have no users.
|
||||
if (fused_root->user_count() != 0) {
|
||||
return InternalError("Root of %s may not have users.",
|
||||
fusion->ToString().c_str());
|
||||
return InternalError("Root of %s may not have users.", fusion->ToString());
|
||||
}
|
||||
|
||||
// All uses of fused instructions must be in the fusion computation, and
|
||||
@ -817,14 +811,13 @@ Status HloVerifier::CheckFusionInstruction(HloInstruction* fusion) const {
|
||||
if (instruction != fused_root) {
|
||||
if (instruction->user_count() == 0) {
|
||||
return InternalError("Non-root instruction %s in %s must have users.",
|
||||
instruction->ToString().c_str(),
|
||||
fusion->ToString().c_str());
|
||||
instruction->ToString(), fusion->ToString());
|
||||
}
|
||||
for (auto& user : instruction->users()) {
|
||||
if (fused_computation != user->parent()) {
|
||||
return InternalError(
|
||||
"Non-root instruction %s in %s may not have external users.",
|
||||
instruction->ToString().c_str(), fusion->ToString().c_str());
|
||||
instruction->ToString(), fusion->ToString());
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -837,19 +830,19 @@ Status HloVerifier::CheckFusionInstruction(HloInstruction* fusion) const {
|
||||
for (auto fused_param : fused_parameters) {
|
||||
int64 param_no = fused_param->parameter_number();
|
||||
if (param_no < 0) {
|
||||
return InternalError("Unexpected negative parameter number %lld in %s.",
|
||||
param_no, fusion->ToString().c_str());
|
||||
return InternalError("Unexpected negative parameter number %d in %s.",
|
||||
param_no, fusion->ToString());
|
||||
}
|
||||
if (param_no >= fused_parameters.size()) {
|
||||
return InternalError(
|
||||
"Unexpected parameter number %lld in %s: higher then number of "
|
||||
"Unexpected parameter number %d in %s: higher then number of "
|
||||
"parameters %lu.",
|
||||
param_no, fusion->ToString().c_str(), fused_parameters.size());
|
||||
param_no, fusion->ToString(), fused_parameters.size());
|
||||
}
|
||||
if (parameter_numbers[param_no]) {
|
||||
return InternalError(
|
||||
"Did not expect parameter number %lld more than once in %s.",
|
||||
param_no, fusion->ToString().c_str());
|
||||
"Did not expect parameter number %d more than once in %s.", param_no,
|
||||
fusion->ToString());
|
||||
}
|
||||
parameter_numbers[param_no] = true;
|
||||
}
|
||||
@ -857,7 +850,7 @@ Status HloVerifier::CheckFusionInstruction(HloInstruction* fusion) const {
|
||||
for (int i = 0; i < parameter_numbers.size(); i++) {
|
||||
if (!parameter_numbers[i]) {
|
||||
return InternalError("Did not see parameter number %d in %s.", i,
|
||||
fusion->ToString().c_str());
|
||||
fusion->ToString());
|
||||
}
|
||||
}
|
||||
|
||||
@ -872,18 +865,18 @@ Status HloVerifier::CheckWhileInstruction(HloInstruction* instruction) {
|
||||
auto* while_body = instruction->while_body();
|
||||
if (while_cond->num_parameters() != 1) {
|
||||
return FailedPrecondition(
|
||||
"While condition must have exactly 1 parameter; had %lld : %s",
|
||||
while_cond->num_parameters(), while_cond->ToString().c_str());
|
||||
"While condition must have exactly 1 parameter; had %d : %s",
|
||||
while_cond->num_parameters(), while_cond->ToString());
|
||||
}
|
||||
if (while_body->num_parameters() != 1) {
|
||||
return FailedPrecondition(
|
||||
"While body must have exactly 1 parameter; had %lld : %s",
|
||||
while_body->num_parameters(), while_body->ToString().c_str());
|
||||
"While body must have exactly 1 parameter; had %d : %s",
|
||||
while_body->num_parameters(), while_body->ToString());
|
||||
}
|
||||
if (instruction->operand_count() != 1) {
|
||||
return FailedPrecondition(
|
||||
"While loop must have exactly one operand; had %lld : %s",
|
||||
instruction->operand_count(), instruction->ToString().c_str());
|
||||
"While loop must have exactly one operand; had %d : %s",
|
||||
instruction->operand_count(), instruction->ToString());
|
||||
}
|
||||
return Status::OK();
|
||||
}
|
||||
@ -891,16 +884,14 @@ Status HloVerifier::CheckWhileInstruction(HloInstruction* instruction) {
|
||||
Status HloVerifier::CheckConditionalInstruction(HloInstruction* instruction) {
|
||||
if (instruction->true_computation()->num_parameters() != 1) {
|
||||
return FailedPrecondition(
|
||||
"True computation %s of %s must have 1 parameter insted of %lld",
|
||||
instruction->true_computation()->name().c_str(),
|
||||
instruction->ToString().c_str(),
|
||||
"True computation %s of %s must have 1 parameter insted of %d",
|
||||
instruction->true_computation()->name(), instruction->ToString(),
|
||||
instruction->true_computation()->num_parameters());
|
||||
}
|
||||
if (instruction->false_computation()->num_parameters() != 1) {
|
||||
return FailedPrecondition(
|
||||
"False computation %s of %s must have 1 parameter insted of %lld",
|
||||
instruction->false_computation()->name().c_str(),
|
||||
instruction->ToString().c_str(),
|
||||
"False computation %s of %s must have 1 parameter insted of %d",
|
||||
instruction->false_computation()->name(), instruction->ToString(),
|
||||
instruction->false_computation()->num_parameters());
|
||||
}
|
||||
return Status::OK();
|
||||
@ -915,9 +906,9 @@ Status HloVerifier::CheckElementwiseInstruction(HloInstruction* instruction) {
|
||||
"Implicit broadcast is not allowed in HLO."
|
||||
"Found different shapes for instruction %s.\n"
|
||||
"output: %s\noperand: %s\n",
|
||||
HloOpcodeString(instruction->opcode()).c_str(),
|
||||
ShapeUtil::HumanString(out_shape).c_str(),
|
||||
ShapeUtil::HumanString(operand_shape).c_str());
|
||||
HloOpcodeString(instruction->opcode()),
|
||||
ShapeUtil::HumanString(out_shape),
|
||||
ShapeUtil::HumanString(operand_shape));
|
||||
}
|
||||
}
|
||||
return Status::OK();
|
||||
@ -948,7 +939,7 @@ Status VerifyEntryAndExitShapes(const HloModule& module) {
|
||||
if (ShapeContainsToken(param->shape())) {
|
||||
return InternalError(
|
||||
"Entry parameter %d is or contains a token shape: %s", i,
|
||||
ShapeUtil::HumanString(param->shape()).c_str());
|
||||
ShapeUtil::HumanString(param->shape()));
|
||||
}
|
||||
}
|
||||
return Status::OK();
|
||||
@ -960,9 +951,9 @@ Status CheckSameChannel(const HloInstruction* instr1,
|
||||
if (instr1->channel_id() != instr2->channel_id()) {
|
||||
return InternalError(
|
||||
"Expected to have the same channel id, actual channel ids are: %s "
|
||||
"(%lld), %s (%lld)",
|
||||
instr1->ToString().c_str(), instr1->channel_id(),
|
||||
instr2->ToString().c_str(), instr2->channel_id());
|
||||
"(%d), %s (%d)",
|
||||
instr1->ToString(), instr1->channel_id(), instr2->ToString(),
|
||||
instr2->channel_id());
|
||||
}
|
||||
return Status::OK();
|
||||
}
|
||||
@ -983,7 +974,7 @@ Status CheckSameIsHostTransfer(const HloInstruction* instr1,
|
||||
"Expected instructions to have the same is-host-transfer property: "
|
||||
"%s, "
|
||||
"%s ",
|
||||
instr1->ToString().c_str(), instr2->ToString().c_str());
|
||||
instr1->ToString(), instr2->ToString());
|
||||
}
|
||||
return Status::OK();
|
||||
}
|
||||
@ -1000,12 +991,12 @@ Status VerifySendsAndRecvs(const HloModule& module) {
|
||||
host_channels.insert({sendrecv->channel_id(), sendrecv});
|
||||
if (!it_inserted.second) {
|
||||
return FailedPrecondition(
|
||||
"Channel %lld is used for multiple host send/recv instructions: "
|
||||
"Channel %d is used for multiple host send/recv instructions: "
|
||||
"%s "
|
||||
"and "
|
||||
"%s",
|
||||
sendrecv->channel_id(), sendrecv->ToString().c_str(),
|
||||
it_inserted.first->second->ToString().c_str());
|
||||
sendrecv->channel_id(), sendrecv->ToString(),
|
||||
it_inserted.first->second->ToString());
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user