Merge pull request #41752 from tg-at-google:wsign-compare-semi-final-xla
PiperOrigin-RevId: 323916787 Change-Id: Idf822bd906e69f9491741881c6c86912f067158d
This commit is contained in:
commit
bfacfb7779
@ -403,7 +403,8 @@ class Array {
|
||||
|
||||
// Returns the size of the dimension at the given index.
|
||||
int64 dim(int64 n) const {
|
||||
CHECK(n < sizes_.size());
|
||||
const int64 sizes_size = sizes_.size();
|
||||
CHECK(n < sizes_size);
|
||||
return sizes_[n];
|
||||
}
|
||||
|
||||
@ -427,7 +428,7 @@ class Array {
|
||||
if (sizes_.size() != other.sizes_.size()) {
|
||||
return false;
|
||||
}
|
||||
for (int64 i = 0; i < sizes_.size(); ++i) {
|
||||
for (int64 i = 0, end = sizes_.size(); i < end; ++i) {
|
||||
if (sizes_[i] != other.sizes_[i]) {
|
||||
return false;
|
||||
}
|
||||
|
@ -312,7 +312,7 @@ StatusOr<std::unique_ptr<GlobalData>> Client::Execute(
|
||||
// device 0.
|
||||
//
|
||||
// TODO(b/118493728): Allow Execute to return one result per computation.
|
||||
for (int64 i = 0; i < results.size(); i++) {
|
||||
for (int64 i = 0, end = results.size(); i < end; i++) {
|
||||
TF_ASSIGN_OR_RETURN(const Shape& shape, GetShape(*results[i]));
|
||||
if (!ShapeUtil::IsEmptyTuple(shape)) {
|
||||
VLOG(3) << "Fetching result from device " << i << ": "
|
||||
@ -350,7 +350,7 @@ StatusOr<std::vector<std::unique_ptr<GlobalData>>> Client::ExecuteParallel(
|
||||
}
|
||||
|
||||
std::vector<std::unique_ptr<GlobalData>> outputs;
|
||||
for (size_t i = 0; i < response.responses_size(); ++i) {
|
||||
for (size_t i = 0, end = response.responses_size(); i < end; ++i) {
|
||||
outputs.push_back(
|
||||
absl::make_unique<GlobalData>(stub_, response.responses(i).output()));
|
||||
if (i < computations.size() &&
|
||||
|
@ -511,7 +511,7 @@ XlaOp Lgamma(XlaOp input) {
|
||||
XlaOp z = Select(need_to_reflect, -input, input - one);
|
||||
|
||||
XlaOp x = base_lanczos_coeff;
|
||||
for (int i = 0; i < kLanczosCoefficients.size(); ++i) {
|
||||
for (int i = 0, end = kLanczosCoefficients.size(); i < end; ++i) {
|
||||
XlaOp lanczos_coefficient = ScalarLike(input, kLanczosCoefficients[i]);
|
||||
XlaOp index = ScalarLike(input, i);
|
||||
x = x + lanczos_coefficient / (z + index + one);
|
||||
@ -647,7 +647,7 @@ XlaOp Digamma(XlaOp input) {
|
||||
|
||||
XlaOp num = zero;
|
||||
XlaOp denom = base_lanczos_coeff;
|
||||
for (int i = 0; i < kLanczosCoefficients.size(); ++i) {
|
||||
for (int i = 0, end = kLanczosCoefficients.size(); i < end; ++i) {
|
||||
XlaOp lanczos_coefficient = ScalarLike(input, kLanczosCoefficients[i]);
|
||||
XlaOp index = ScalarLike(input, i);
|
||||
num = num - lanczos_coefficient / ((z + index + one) * (z + index + one));
|
||||
|
@ -198,15 +198,17 @@ XlaOp AvgPoolGrad(XlaOp out_backprop, absl::Span<const int64> gradients_size,
|
||||
XlaBuilder* b = out_backprop.builder();
|
||||
return b->ReportErrorOrReturn([&]() -> StatusOr<XlaOp> {
|
||||
const int num_dims = kernel_size.size();
|
||||
|
||||
if (gradients_size.size() != num_dims) {
|
||||
const int num_gradients = gradients_size.size();
|
||||
if (num_gradients != num_dims) {
|
||||
return tensorflow::errors::InvalidArgument("gradients must be ", num_dims,
|
||||
"-dimensional");
|
||||
}
|
||||
|
||||
TF_ASSIGN_OR_RETURN(Shape out_backprop_xla_shape,
|
||||
b->GetShape(out_backprop));
|
||||
if (out_backprop_xla_shape.dimensions().size() != num_dims) {
|
||||
const int backprop_xla_num_dims =
|
||||
out_backprop_xla_shape.dimensions().size();
|
||||
if (backprop_xla_num_dims != num_dims) {
|
||||
return tensorflow::errors::InvalidArgument("out_backprop must be ",
|
||||
num_dims, "-dimensional");
|
||||
}
|
||||
|
@ -74,12 +74,13 @@ XlaOp UpdateSlice(XlaOp x, XlaOp update, absl::Span<const int64> start) {
|
||||
return builder->ReportErrorOrReturn([&]() -> StatusOr<XlaOp> {
|
||||
TF_ASSIGN_OR_RETURN(Shape shape, builder->GetShape(x));
|
||||
const int64 n_dims = shape.rank();
|
||||
TF_RET_CHECK(start.size() == n_dims);
|
||||
const int64 start_size = start.size();
|
||||
TF_RET_CHECK(start_size == n_dims);
|
||||
|
||||
// TODO(phawkins): make int64 work on all backends, remove the int32 cast.
|
||||
std::vector<int32> start_as_int32(start.begin(), start.end());
|
||||
std::vector<XlaOp> start_ops(start.size());
|
||||
for (int i = 0; i < start.size(); ++i) {
|
||||
for (int i = 0, end = start.size(); i < end; ++i) {
|
||||
start_ops[i] = ConstantR0(builder, start_as_int32[i]);
|
||||
}
|
||||
return DynamicUpdateSlice(x, update, start_ops);
|
||||
|
@ -122,12 +122,13 @@ LocalExecutable::RunHelper(const absl::Span<const Shape* const> argument_shapes,
|
||||
executable_->module_config().entry_computation_layout();
|
||||
|
||||
// Check argument number, shapes, and layouts.
|
||||
if (argument_shapes.size() != computation_layout.parameter_count()) {
|
||||
const int argument_shapes_size = argument_shapes.size();
|
||||
if (argument_shapes_size != computation_layout.parameter_count()) {
|
||||
return InvalidArgument(
|
||||
"invalid number of arguments for computation: expected %d, got %u",
|
||||
computation_layout.parameter_count(), argument_shapes.size());
|
||||
}
|
||||
for (int i = 0; i < argument_shapes.size(); ++i) {
|
||||
for (int i = 0, end = argument_shapes.size(); i < end; ++i) {
|
||||
if (!computation_layout.parameter_layout(i).MatchesLayoutInShape(
|
||||
*argument_shapes[i])) {
|
||||
return InvalidParameterArgument(
|
||||
|
@ -766,15 +766,16 @@ XlaOp XlaBuilder::BroadcastInDim(
|
||||
TF_ASSIGN_OR_RETURN(auto output_shape,
|
||||
ShapeUtil::MakeValidatedShape(
|
||||
operand_shape->element_type(), out_dim_size));
|
||||
if (operand_shape->rank() != broadcast_dimensions.size()) {
|
||||
tensorflow::int64 broadcast_rank = broadcast_dimensions.size();
|
||||
if (operand_shape->rank() != broadcast_rank) {
|
||||
return InvalidArgument(
|
||||
"Size of broadcast_dimensions has to match operand's rank; operand "
|
||||
"rank: %lld, size of broadcast_dimensions %u.",
|
||||
operand_shape->rank(), broadcast_dimensions.size());
|
||||
}
|
||||
for (int i = 0; i < broadcast_dimensions.size(); i++) {
|
||||
if (broadcast_dimensions[i] < 0 ||
|
||||
broadcast_dimensions[i] > out_dim_size.size()) {
|
||||
for (int i = 0; i < broadcast_rank; i++) {
|
||||
const tensorflow::int64 num_dims = out_dim_size.size();
|
||||
if (broadcast_dimensions[i] < 0 || broadcast_dimensions[i] > num_dims) {
|
||||
return InvalidArgument("Broadcast dimension %lld is out of bound",
|
||||
broadcast_dimensions[i]);
|
||||
}
|
||||
@ -786,7 +787,7 @@ XlaOp XlaBuilder::BroadcastInDim(
|
||||
*operand_shape, output_shape, broadcast_dimensions)
|
||||
.status());
|
||||
std::vector<int64> in_dim_size(out_dim_size.begin(), out_dim_size.end());
|
||||
for (int i = 0; i < broadcast_dimensions.size(); i++) {
|
||||
for (int i = 0; i < broadcast_rank; i++) {
|
||||
in_dim_size[broadcast_dimensions[i]] = operand_shape->dimensions(i);
|
||||
}
|
||||
const auto& in_dim_shape =
|
||||
@ -835,7 +836,7 @@ StatusOr<XlaOp> XlaBuilder::SliceInternal(const Shape& shape, XlaOp operand,
|
||||
absl::Span<const int64> strides) {
|
||||
HloInstructionProto instr;
|
||||
*instr.mutable_shape() = shape.ToProto();
|
||||
for (int i = 0; i < start_indices.size(); i++) {
|
||||
for (int i = 0, end = start_indices.size(); i < end; i++) {
|
||||
auto* slice_config = instr.add_slice_dimensions();
|
||||
slice_config->set_start(start_indices[i]);
|
||||
slice_config->set_limit(limit_indices[i]);
|
||||
@ -1543,7 +1544,7 @@ XlaOp XlaBuilder::AfterAll(absl::Span<const XlaOp> tokens) {
|
||||
if (tokens.empty()) {
|
||||
return InvalidArgument("AfterAll requires at least one operand");
|
||||
}
|
||||
for (int i = 0; i < tokens.size(); ++i) {
|
||||
for (int i = 0, end = tokens.size(); i < end; ++i) {
|
||||
XlaOp operand = tokens[i];
|
||||
TF_ASSIGN_OR_RETURN(const Shape* operand_shape, GetShapePtr(operand));
|
||||
if (!operand_shape->IsToken()) {
|
||||
@ -2007,7 +2008,7 @@ XlaOp XlaBuilder::ConditionalImpl(
|
||||
std::vector<Shape> branch_operand_shapes(branch_operands.size());
|
||||
std::vector<ProgramShape> branch_computation_shapes(
|
||||
branch_computations.size());
|
||||
for (int j = 0; j < branch_operands.size(); ++j) {
|
||||
for (int j = 0, end = branch_operands.size(); j < end; ++j) {
|
||||
TF_ASSIGN_OR_RETURN(branch_operand_shapes[j],
|
||||
GetShape(branch_operands[j]));
|
||||
TF_ASSIGN_OR_RETURN(branch_computation_shapes[j],
|
||||
@ -2416,7 +2417,9 @@ XlaOp XlaBuilder::AllToAll(XlaOp operand, int64 split_dimension,
|
||||
if (layout) {
|
||||
TF_RET_CHECK(shape.IsTuple() && !ShapeUtil::IsNestedTuple(shape));
|
||||
for (int64 i = 0; i < ShapeUtil::TupleElementCount(shape); ++i) {
|
||||
if (layout->minor_to_major().size() != shape.tuple_shapes(i).rank()) {
|
||||
const int64 layout_minor_to_major_size =
|
||||
layout->minor_to_major().size();
|
||||
if (layout_minor_to_major_size != shape.tuple_shapes(i).rank()) {
|
||||
return InvalidArgument(
|
||||
"Provided layout must be compatible with the operand shape: %s "
|
||||
"vs %s",
|
||||
|
@ -143,7 +143,8 @@ namespace xla {
|
||||
/* static */ bool IndexUtil::IndexInBounds(const Shape& shape,
|
||||
absl::Span<const int64> index) {
|
||||
int64 rank = shape.rank();
|
||||
if (rank != index.size()) {
|
||||
const int64 index_size = index.size();
|
||||
if (rank != index_size) {
|
||||
return false;
|
||||
}
|
||||
for (int64 d = 0; d < rank; ++d) {
|
||||
@ -157,7 +158,8 @@ namespace xla {
|
||||
/* static */ int IndexUtil::CompareIndices(absl::Span<const int64> lhs,
|
||||
absl::Span<const int64> rhs) {
|
||||
int64 rank = lhs.size();
|
||||
CHECK_EQ(rhs.size(), rank);
|
||||
const int64 rhs_rank = rhs.size();
|
||||
CHECK_EQ(rhs_rank, rank);
|
||||
for (int64 dim = 0; dim < rank; ++dim) {
|
||||
if (lhs[dim] < rhs[dim]) {
|
||||
return -1;
|
||||
|
@ -342,7 +342,8 @@ Layout CreateDefaultLayoutForRank(int64 rank) {
|
||||
/* static */ std::vector<int64> LayoutUtil::MakeLogicalToPhysical(
|
||||
const Layout& layout) {
|
||||
std::vector<int64> logical_to_physical(layout.minor_to_major_size());
|
||||
for (int64 physical = 0; physical < logical_to_physical.size(); ++physical) {
|
||||
for (int64 physical = 0, end = logical_to_physical.size(); physical < end;
|
||||
++physical) {
|
||||
const int64 logical = Major(layout, physical);
|
||||
logical_to_physical[logical] = physical;
|
||||
}
|
||||
|
@ -58,7 +58,7 @@ constexpr int kMinimumAlignment = 64;
|
||||
// Precondition: size % 2 == 0 (elements in the array are 16 bits long)
|
||||
void ConvertEndianShort(string* bytes) {
|
||||
CHECK_EQ(bytes->size() / 2, 0);
|
||||
for (int64 i = 0; i < bytes->size(); i += 2) {
|
||||
for (int64 i = 0, end = bytes->size(); i < end; i += 2) {
|
||||
std::swap((*bytes)[i], (*bytes)[i + 1]);
|
||||
}
|
||||
}
|
||||
@ -249,8 +249,10 @@ template <typename NativeT>
|
||||
Status MutableLiteralBase::CopySliceFromInternal(
|
||||
const LiteralBase& src_literal, absl::Span<const int64> src_base,
|
||||
absl::Span<const int64> dest_base, absl::Span<const int64> copy_size) {
|
||||
TF_RET_CHECK(src_literal.shape().rank() == src_base.size());
|
||||
TF_RET_CHECK(shape().rank() == dest_base.size());
|
||||
const int64 src_base_size = src_base.size();
|
||||
const int64 dest_base_size = dest_base.size();
|
||||
TF_RET_CHECK(src_literal.shape().rank() == src_base_size);
|
||||
TF_RET_CHECK(shape().rank() == dest_base_size);
|
||||
|
||||
auto linear_index = [](const Shape& shape,
|
||||
absl::Span<const int64> multi_index) {
|
||||
@ -564,7 +566,8 @@ Status MutableLiteralBase::CopyFrom(const LiteralSlice& src_literal,
|
||||
}
|
||||
// Construct the index of the corresponding piece in the source literal.
|
||||
ShapeIndex src_piece_index = src_shape_index;
|
||||
for (int64 i = dest_shape_index.size(); i < index.size(); ++i) {
|
||||
for (int64 i = dest_shape_index.size(), end = index.size(); i < end;
|
||||
++i) {
|
||||
src_piece_index.push_back(index[i]);
|
||||
}
|
||||
TF_RETURN_IF_ERROR(
|
||||
@ -755,7 +758,7 @@ StatusOr<Literal> LiteralBase::Broadcast(
|
||||
return InvalidArgument("Broadcast only supports arrays.");
|
||||
}
|
||||
|
||||
for (int64 i = 0; i < dimensions.size(); i++) {
|
||||
for (int64 i = 0, end = dimensions.size(); i < end; i++) {
|
||||
TF_RET_CHECK(shape().dimensions(i) ==
|
||||
result_shape.dimensions(dimensions[i]));
|
||||
}
|
||||
@ -779,7 +782,7 @@ StatusOr<Literal> LiteralBase::Broadcast(
|
||||
|
||||
ShapeUtil::ForEachIndex(
|
||||
result_shape, [&](absl::Span<const int64> output_index) {
|
||||
for (int64 i = 0; i < dimensions.size(); ++i) {
|
||||
for (int64 i = 0, end = dimensions.size(); i < end; ++i) {
|
||||
scratch_source_index[i] = output_index[dimensions[i]];
|
||||
}
|
||||
int64 dest_index = IndexUtil::MultidimensionalIndexToLinearIndex(
|
||||
@ -1185,8 +1188,9 @@ void DenseArrayToStringHelper(const LiteralBase& literal,
|
||||
}
|
||||
// Handle the non-innermost tensors of a 2D+ tensor.
|
||||
if (brace == "{") {
|
||||
const int64 accum_indices_size = accum_indices->size();
|
||||
if (rank > 3 && !accum_indices->empty() &&
|
||||
accum_indices->size() < rank) {
|
||||
accum_indices_size < rank) {
|
||||
int index = accum_indices->size() - 1;
|
||||
int value = accum_indices->back();
|
||||
return StrCat(brace, " /*i", index, "=", value, "*/\n");
|
||||
@ -1520,7 +1524,7 @@ StatusOr<Literal> LiteralBase::ConvertToShape(const Shape& dest_shape) const {
|
||||
}
|
||||
Literal literal(ShapeUtil::MakeTupleShape(element_shapes),
|
||||
/*allocate_arrays=*/false);
|
||||
for (int i = 0; i < elements.size(); ++i) {
|
||||
for (int i = 0, end = elements.size(); i < end; ++i) {
|
||||
TF_CHECK_OK(
|
||||
literal.MoveFrom(std::move(elements[i]), /*dest_shape_index=*/{i}));
|
||||
}
|
||||
@ -1893,13 +1897,13 @@ bool LiteralBase::IsR1Iota() const {
|
||||
auto is_iota_at_idx = [&](const int64 idx) {
|
||||
switch (shape().element_type()) {
|
||||
case U8:
|
||||
return Get<uint8>({idx}) == idx;
|
||||
return static_cast<int64>(Get<uint8>({idx})) == idx;
|
||||
case U16:
|
||||
return Get<uint16>({idx}) == idx;
|
||||
return static_cast<int64>(Get<uint16>({idx})) == idx;
|
||||
case U32:
|
||||
return Get<uint32>({idx}) == idx;
|
||||
return static_cast<int64>(Get<uint32>({idx})) == idx;
|
||||
case U64:
|
||||
return Get<uint64>({idx}) == idx;
|
||||
return static_cast<int64>(Get<uint64>({idx})) == idx;
|
||||
case S8:
|
||||
return Get<int8>({idx}) == idx;
|
||||
case S16:
|
||||
@ -2176,8 +2180,9 @@ Status LiteralBase::Piece::CopyFromProto(const LiteralProto& proto) {
|
||||
}
|
||||
case C128: {
|
||||
auto complex_data = data<complex128>();
|
||||
TF_RET_CHECK(proto.c128s_size() == complex_data.size() * 2);
|
||||
for (int64 i = 0; i < complex_data.size(); ++i) {
|
||||
const int64 complex_data_size_doubled = complex_data.size() * 2;
|
||||
TF_RET_CHECK(proto.c128s_size() == complex_data_size_doubled);
|
||||
for (int64 i = 0, end = complex_data.size(); i < end; ++i) {
|
||||
complex_data[i] =
|
||||
complex128{proto.c128s(i * 2), proto.c128s(i * 2 + 1)};
|
||||
}
|
||||
@ -2396,7 +2401,7 @@ BorrowingLiteral::BorrowingLiteral(absl::Span<const char* const> src_buf_ptrs,
|
||||
root_piece_.set_subshape(shape_.get());
|
||||
BuildPieceSubtree(*shape_, &root_piece_);
|
||||
|
||||
for (int i = 0; i < src_buf_ptrs.size(); ++i) {
|
||||
for (int i = 0, end = src_buf_ptrs.size(); i < end; ++i) {
|
||||
const auto& src_shape = shape_->tuple_shapes(i);
|
||||
CHECK(src_shape.IsArray());
|
||||
root_piece_.child(i).set_buffer(const_cast<char*>(src_buf_ptrs[i]));
|
||||
|
@ -67,7 +67,7 @@ Literal ConvertType(LiteralSlice literal) {
|
||||
primitive_util::NativeToPrimitiveType<FromNativeT>()) {
|
||||
auto src = literal.data<FromNativeT>(shape_index);
|
||||
auto dest = result.data<ToNativeT>(shape_index);
|
||||
for (int64 i = 0; i < src.size(); ++i) {
|
||||
for (int64 i = 0, end = src.size(); i < end; ++i) {
|
||||
dest[i] = static_cast<ToNativeT>(src[i]);
|
||||
}
|
||||
} else {
|
||||
@ -329,7 +329,7 @@ Literal ConvertType(LiteralSlice literal) {
|
||||
|
||||
/* static */ Literal LiteralUtil::CreateR1U8(absl::string_view value) {
|
||||
Literal literal(ShapeUtil::MakeShape(U8, {static_cast<int64>(value.size())}));
|
||||
for (int i = 0; i < value.size(); ++i) {
|
||||
for (int i = 0, end = value.size(); i < end; ++i) {
|
||||
literal.Set<uint8>({i}, value[i]);
|
||||
}
|
||||
return literal;
|
||||
@ -345,7 +345,7 @@ Literal ConvertType(LiteralSlice literal) {
|
||||
absl::Span<const int64> new_dimensions,
|
||||
absl::Span<const int64> minor_to_major, const LiteralSlice& literal) {
|
||||
int64 new_num_elements = 1;
|
||||
for (int64 i = 0; i < new_dimensions.size(); ++i) {
|
||||
for (int64 i = 0, end = new_dimensions.size(); i < end; ++i) {
|
||||
new_num_elements *= new_dimensions[i];
|
||||
}
|
||||
CHECK_EQ(ShapeUtil::ElementsIn(literal.shape()), new_num_elements);
|
||||
@ -472,7 +472,7 @@ Literal ConvertType(LiteralSlice literal) {
|
||||
element_shapes.push_back(element->shape());
|
||||
}
|
||||
Literal literal(ShapeUtil::MakeTupleShape(element_shapes));
|
||||
for (int i = 0; i < elements.size(); ++i) {
|
||||
for (int i = 0, end = elements.size(); i < end; ++i) {
|
||||
TF_CHECK_OK(literal.CopyFrom(*elements[i], /*dest_shape_index=*/{i}));
|
||||
}
|
||||
return literal;
|
||||
@ -485,7 +485,7 @@ Literal ConvertType(LiteralSlice literal) {
|
||||
element_shapes.push_back(element.shape());
|
||||
}
|
||||
Literal literal(ShapeUtil::MakeTupleShape(element_shapes));
|
||||
for (int i = 0; i < elements.size(); ++i) {
|
||||
for (int i = 0, end = elements.size(); i < end; ++i) {
|
||||
TF_CHECK_OK(literal.CopyFrom(elements[i], /*dest_shape_index=*/{i}));
|
||||
}
|
||||
return literal;
|
||||
@ -499,7 +499,7 @@ Literal ConvertType(LiteralSlice literal) {
|
||||
element_shapes.push_back(element.shape());
|
||||
}
|
||||
Literal literal(ShapeUtil::MakeTupleShape(element_shapes));
|
||||
for (int64 i = 0; i < elements.size(); ++i) {
|
||||
for (int64 i = 0, end = elements.size(); i < end; ++i) {
|
||||
TF_CHECK_OK(
|
||||
literal.MoveFrom(std::move(elements[i]), /*dest_shape_index=*/{i}));
|
||||
}
|
||||
|
@ -80,9 +80,11 @@ void MetricTableReport::WriteReportToInfoLog(double expected_metric_sum) {
|
||||
|
||||
int64 pos = 0;
|
||||
const string report = MakeReport(expected_metric_sum);
|
||||
while (pos < report.size()) {
|
||||
const int report_size = report.size();
|
||||
while (pos < report_size) {
|
||||
int64 end_of_line = report.find('\n', pos);
|
||||
if (end_of_line == string::npos) {
|
||||
const int64 _npos = string::npos;
|
||||
if (end_of_line == _npos) {
|
||||
end_of_line = report.size();
|
||||
}
|
||||
absl::string_view line(report.data() + pos, end_of_line - pos);
|
||||
@ -161,7 +163,8 @@ void MetricTableReport::AppendCategoryTable() {
|
||||
const char* const kIndentPrefix = " * ";
|
||||
int64 entries_to_show = std::min<int64>(max_entries_per_category_to_show_,
|
||||
category.entries.size());
|
||||
if (category.entries.size() == entries_to_show + 1) {
|
||||
const int64 category_entries_size = category.entries.size();
|
||||
if (category_entries_size == entries_to_show + 1) {
|
||||
// May as well show the last entry on the line that would otherwise say
|
||||
// that there is a single entry not shown.
|
||||
++entries_to_show;
|
||||
@ -224,7 +227,8 @@ void MetricTableReport::AppendTableRow(const string& text, const double metric,
|
||||
// Don't try to make a gigantic string and crash if expected_metric_sum_ is
|
||||
// wrong somehow.
|
||||
int64 padding_len = 1;
|
||||
if (max_metric_string_size >= metric_string.size()) {
|
||||
const int64 metric_string_size = metric_string.size();
|
||||
if (max_metric_string_size >= metric_string_size) {
|
||||
padding_len += max_metric_string_size - metric_string.size();
|
||||
}
|
||||
string padding(padding_len, ' ');
|
||||
@ -254,7 +258,7 @@ string MetricTableReport::MetricString(double metric) {
|
||||
sp1.remove_prefix(1);
|
||||
}
|
||||
// Copy rest of input characters.
|
||||
for (int64 i = 0; i < sp1.size(); ++i) {
|
||||
for (int64 i = 0, end = sp1.size(); i < end; ++i) {
|
||||
if (i > 0 && (sp1.size() - i) % 3 == 0) {
|
||||
output.push_back(',');
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user