From 939db02ff575c90fecdba9022dda5fb13012e16f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tar=C3=A9=20Gaskin?= Date: Sun, 26 Jul 2020 22:14:33 +0000 Subject: [PATCH 1/8] xla directory resolutions --- tensorflow/compiler/xla/array.h | 5 +-- tensorflow/compiler/xla/client/client.cc | 4 +-- tensorflow/compiler/xla/client/lib/math.cc | 4 +-- tensorflow/compiler/xla/client/lib/pooling.cc | 7 ++-- tensorflow/compiler/xla/client/lib/slicing.cc | 5 +-- .../compiler/xla/client/local_client.cc | 5 +-- tensorflow/compiler/xla/client/xla_builder.cc | 26 +++++++++----- tensorflow/compiler/xla/index_util.cc | 6 ++-- tensorflow/compiler/xla/layout_util.cc | 3 +- tensorflow/compiler/xla/literal.cc | 34 +++++++++++-------- tensorflow/compiler/xla/literal_util.cc | 12 +++---- .../compiler/xla/metric_table_report.cc | 14 +++++--- 12 files changed, 74 insertions(+), 51 deletions(-) diff --git a/tensorflow/compiler/xla/array.h b/tensorflow/compiler/xla/array.h index 67bad0f8af7..4654ec2d53d 100644 --- a/tensorflow/compiler/xla/array.h +++ b/tensorflow/compiler/xla/array.h @@ -403,7 +403,8 @@ class Array { // Returns the size of the dimension at the given index. int64 dim(int64 n) const { - CHECK(n < sizes_.size()); + const int64 sizes_size = sizes_.size(); + CHECK(n < sizes_size); return sizes_[n]; } @@ -427,7 +428,7 @@ class Array { if (sizes_.size() != other.sizes_.size()) { return false; } - for (int64 i = 0; i < sizes_.size(); ++i) { + for (int64 i = 0, end = sizes_.size(); i < end; ++i) { if (sizes_[i] != other.sizes_[i]) { return false; } diff --git a/tensorflow/compiler/xla/client/client.cc b/tensorflow/compiler/xla/client/client.cc index 4f020bcec27..09449aeb8b8 100644 --- a/tensorflow/compiler/xla/client/client.cc +++ b/tensorflow/compiler/xla/client/client.cc @@ -312,7 +312,7 @@ StatusOr> Client::Execute( // device 0. // // TODO(b/118493728): Allow Execute to return one result per computation. - for (int64 i = 0; i < results.size(); i++) { + for (int64 i = 0, end = results.size(); i < end; i++) { TF_ASSIGN_OR_RETURN(const Shape& shape, GetShape(*results[i])); if (!ShapeUtil::IsEmptyTuple(shape)) { VLOG(3) << "Fetching result from device " << i << ": " @@ -350,7 +350,7 @@ StatusOr>> Client::ExecuteParallel( } std::vector> outputs; - for (size_t i = 0; i < response.responses_size(); ++i) { + for (size_t i = 0, end = response.responses_size(); i < end; ++i) { outputs.push_back( absl::make_unique(stub_, response.responses(i).output())); if (i < computations.size() && diff --git a/tensorflow/compiler/xla/client/lib/math.cc b/tensorflow/compiler/xla/client/lib/math.cc index baafd7d705b..6fdaab58686 100644 --- a/tensorflow/compiler/xla/client/lib/math.cc +++ b/tensorflow/compiler/xla/client/lib/math.cc @@ -511,7 +511,7 @@ XlaOp Lgamma(XlaOp input) { XlaOp z = Select(need_to_reflect, -input, input - one); XlaOp x = base_lanczos_coeff; - for (int i = 0; i < kLanczosCoefficients.size(); ++i) { + for (int i = 0, end = kLanczosCoefficients.size(); i < end; ++i) { XlaOp lanczos_coefficient = ScalarLike(input, kLanczosCoefficients[i]); XlaOp index = ScalarLike(input, i); x = x + lanczos_coefficient / (z + index + one); @@ -647,7 +647,7 @@ XlaOp Digamma(XlaOp input) { XlaOp num = zero; XlaOp denom = base_lanczos_coeff; - for (int i = 0; i < kLanczosCoefficients.size(); ++i) { + for (int i = 0, end = kLanczosCoefficients.size(); i < end; ++i) { XlaOp lanczos_coefficient = ScalarLike(input, kLanczosCoefficients[i]); XlaOp index = ScalarLike(input, i); num = num - lanczos_coefficient / ((z + index + one) * (z + index + one)); diff --git a/tensorflow/compiler/xla/client/lib/pooling.cc b/tensorflow/compiler/xla/client/lib/pooling.cc index 45033ec07e7..6a0db64b834 100644 --- a/tensorflow/compiler/xla/client/lib/pooling.cc +++ b/tensorflow/compiler/xla/client/lib/pooling.cc @@ -198,15 +198,16 @@ XlaOp AvgPoolGrad(XlaOp out_backprop, absl::Span gradients_size, XlaBuilder* b = out_backprop.builder(); return b->ReportErrorOrReturn([&]() -> StatusOr { const int num_dims = kernel_size.size(); - - if (gradients_size.size() != num_dims) { + const int gradients_size_size = gradients_size.size(); + if (gradients_size_size != num_dims) { return tensorflow::errors::InvalidArgument("gradients must be ", num_dims, "-dimensional"); } TF_ASSIGN_OR_RETURN(Shape out_backprop_xla_shape, b->GetShape(out_backprop)); - if (out_backprop_xla_shape.dimensions().size() != num_dims) { + const int obxsd_size = out_backprop_xla_shape.dimensions().size(); + if (obxsd_size != num_dims) { return tensorflow::errors::InvalidArgument("out_backprop must be ", num_dims, "-dimensional"); } diff --git a/tensorflow/compiler/xla/client/lib/slicing.cc b/tensorflow/compiler/xla/client/lib/slicing.cc index 1ea713467f8..ebb35c5df82 100644 --- a/tensorflow/compiler/xla/client/lib/slicing.cc +++ b/tensorflow/compiler/xla/client/lib/slicing.cc @@ -74,12 +74,13 @@ XlaOp UpdateSlice(XlaOp x, XlaOp update, absl::Span start) { return builder->ReportErrorOrReturn([&]() -> StatusOr { TF_ASSIGN_OR_RETURN(Shape shape, builder->GetShape(x)); const int64 n_dims = shape.rank(); - TF_RET_CHECK(start.size() == n_dims); + const int64 start_size = start.size(); + TF_RET_CHECK(start_size == n_dims); // TODO(phawkins): make int64 work on all backends, remove the int32 cast. std::vector start_as_int32(start.begin(), start.end()); std::vector start_ops(start.size()); - for (int i = 0; i < start.size(); ++i) { + for (int i = 0, end = start.size(); i < end; ++i) { start_ops[i] = ConstantR0(builder, start_as_int32[i]); } return DynamicUpdateSlice(x, update, start_ops); diff --git a/tensorflow/compiler/xla/client/local_client.cc b/tensorflow/compiler/xla/client/local_client.cc index fee92957096..1389f548c5d 100644 --- a/tensorflow/compiler/xla/client/local_client.cc +++ b/tensorflow/compiler/xla/client/local_client.cc @@ -122,12 +122,13 @@ LocalExecutable::RunHelper(const absl::Span argument_shapes, executable_->module_config().entry_computation_layout(); // Check argument number, shapes, and layouts. - if (argument_shapes.size() != computation_layout.parameter_count()) { + const int argument_shapes_size = argument_shapes.size(); + if (argument_shapes_size != computation_layout.parameter_count()) { return InvalidArgument( "invalid number of arguments for computation: expected %d, got %u", computation_layout.parameter_count(), argument_shapes.size()); } - for (int i = 0; i < argument_shapes.size(); ++i) { + for (int i = 0, end = argument_shapes.size(); i < end; ++i) { if (!computation_layout.parameter_layout(i).MatchesLayoutInShape( *argument_shapes[i])) { return InvalidParameterArgument( diff --git a/tensorflow/compiler/xla/client/xla_builder.cc b/tensorflow/compiler/xla/client/xla_builder.cc index cc6a680c4e9..c84d2b519dc 100644 --- a/tensorflow/compiler/xla/client/xla_builder.cc +++ b/tensorflow/compiler/xla/client/xla_builder.cc @@ -766,15 +766,17 @@ XlaOp XlaBuilder::BroadcastInDim( TF_ASSIGN_OR_RETURN(auto output_shape, ShapeUtil::MakeValidatedShape( operand_shape->element_type(), out_dim_size)); - if (operand_shape->rank() != broadcast_dimensions.size()) { + tensorflow::int64 broadcast_dimensions_size = broadcast_dimensions.size(); + if (operand_shape->rank() != broadcast_dimensions_size) { return InvalidArgument( "Size of broadcast_dimensions has to match operand's rank; operand " "rank: %lld, size of broadcast_dimensions %u.", operand_shape->rank(), broadcast_dimensions.size()); } - for (int i = 0; i < broadcast_dimensions.size(); i++) { + for (int i = 0, end = broadcast_dimensions.size(); i < end; i++) { + const tensorflow::int64 out_dim_size_size = out_dim_size.size(); if (broadcast_dimensions[i] < 0 || - broadcast_dimensions[i] > out_dim_size.size()) { + broadcast_dimensions[i] > out_dim_size_size) { return InvalidArgument("Broadcast dimension %lld is out of bound", broadcast_dimensions[i]); } @@ -786,7 +788,7 @@ XlaOp XlaBuilder::BroadcastInDim( *operand_shape, output_shape, broadcast_dimensions) .status()); std::vector in_dim_size(out_dim_size.begin(), out_dim_size.end()); - for (int i = 0; i < broadcast_dimensions.size(); i++) { + for (int i = 0, end = broadcast_dimensions.size(); i < end; i++) { in_dim_size[broadcast_dimensions[i]] = operand_shape->dimensions(i); } const auto& in_dim_shape = @@ -835,7 +837,7 @@ StatusOr XlaBuilder::SliceInternal(const Shape& shape, XlaOp operand, absl::Span strides) { HloInstructionProto instr; *instr.mutable_shape() = shape.ToProto(); - for (int i = 0; i < start_indices.size(); i++) { + for (int i = 0, end = start_indices.size(); i < end; i++) { auto* slice_config = instr.add_slice_dimensions(); slice_config->set_start(start_indices[i]); slice_config->set_limit(limit_indices[i]); @@ -1543,7 +1545,7 @@ XlaOp XlaBuilder::AfterAll(absl::Span tokens) { if (tokens.empty()) { return InvalidArgument("AfterAll requires at least one operand"); } - for (int i = 0; i < tokens.size(); ++i) { + for (int i = 0, end = tokens.size(); i < end; ++i) { XlaOp operand = tokens[i]; TF_ASSIGN_OR_RETURN(const Shape* operand_shape, GetShapePtr(operand)); if (!operand_shape->IsToken()) { @@ -2007,7 +2009,7 @@ XlaOp XlaBuilder::ConditionalImpl( std::vector branch_operand_shapes(branch_operands.size()); std::vector branch_computation_shapes( branch_computations.size()); - for (int j = 0; j < branch_operands.size(); ++j) { + for (int j = 0, end = branch_operands.size(); j < end; ++j) { TF_ASSIGN_OR_RETURN(branch_operand_shapes[j], GetShape(branch_operands[j])); TF_ASSIGN_OR_RETURN(branch_computation_shapes[j], @@ -2416,7 +2418,8 @@ XlaOp XlaBuilder::AllToAll(XlaOp operand, int64 split_dimension, if (layout) { TF_RET_CHECK(shape.IsTuple() && !ShapeUtil::IsNestedTuple(shape)); for (int64 i = 0; i < ShapeUtil::TupleElementCount(shape); ++i) { - if (layout->minor_to_major().size() != shape.tuple_shapes(i).rank()) { + const int64 layout_minor_to_major_size = layout->minor_to_major().size(); + if (layout_minor_to_major_size != shape.tuple_shapes(i).rank()) { return InvalidArgument( "Provided layout must be compatible with the operand shape: %s " "vs %s", @@ -3021,7 +3024,12 @@ StatusOr XlaBuilder::AddInstruction(HloInstructionProto&& instr, instr.add_operand_ids(operand.handle()); } - *instr.mutable_metadata() = metadata_; + if (one_shot_metadata_.has_value()) { + *instr.mutable_metadata() = one_shot_metadata_.value(); + one_shot_metadata_.reset(); + } else { + *instr.mutable_metadata() = metadata_; + } if (sharding_) { *instr.mutable_sharding() = *sharding_; } diff --git a/tensorflow/compiler/xla/index_util.cc b/tensorflow/compiler/xla/index_util.cc index 463a8d95fc5..4bec454e520 100644 --- a/tensorflow/compiler/xla/index_util.cc +++ b/tensorflow/compiler/xla/index_util.cc @@ -143,7 +143,8 @@ namespace xla { /* static */ bool IndexUtil::IndexInBounds(const Shape& shape, absl::Span index) { int64 rank = shape.rank(); - if (rank != index.size()) { + const int64 index_size = index.size(); + if (rank != index_size) { return false; } for (int64 d = 0; d < rank; ++d) { @@ -157,7 +158,8 @@ namespace xla { /* static */ int IndexUtil::CompareIndices(absl::Span lhs, absl::Span rhs) { int64 rank = lhs.size(); - CHECK_EQ(rhs.size(), rank); + const int64 rhs_rank = rhs.size(); + CHECK_EQ(rhs_rank, rank); for (int64 dim = 0; dim < rank; ++dim) { if (lhs[dim] < rhs[dim]) { return -1; diff --git a/tensorflow/compiler/xla/layout_util.cc b/tensorflow/compiler/xla/layout_util.cc index faa33e292c2..299a402bcf6 100644 --- a/tensorflow/compiler/xla/layout_util.cc +++ b/tensorflow/compiler/xla/layout_util.cc @@ -342,7 +342,8 @@ Layout CreateDefaultLayoutForRank(int64 rank) { /* static */ std::vector LayoutUtil::MakeLogicalToPhysical( const Layout& layout) { std::vector logical_to_physical(layout.minor_to_major_size()); - for (int64 physical = 0; physical < logical_to_physical.size(); ++physical) { + for (int64 physical = 0, end = logical_to_physical.size(); + physical < end; ++physical) { const int64 logical = Major(layout, physical); logical_to_physical[logical] = physical; } diff --git a/tensorflow/compiler/xla/literal.cc b/tensorflow/compiler/xla/literal.cc index d2b300f0b2d..d03f3f8140f 100644 --- a/tensorflow/compiler/xla/literal.cc +++ b/tensorflow/compiler/xla/literal.cc @@ -58,7 +58,7 @@ constexpr int kMinimumAlignment = 64; // Precondition: size % 2 == 0 (elements in the array are 16 bits long) void ConvertEndianShort(string* bytes) { CHECK_EQ(bytes->size() / 2, 0); - for (int64 i = 0; i < bytes->size(); i += 2) { + for (int64 i = 0, end = bytes->size(); i < end; i += 2) { std::swap((*bytes)[i], (*bytes)[i + 1]); } } @@ -249,8 +249,10 @@ template Status MutableLiteralBase::CopySliceFromInternal( const LiteralBase& src_literal, absl::Span src_base, absl::Span dest_base, absl::Span copy_size) { - TF_RET_CHECK(src_literal.shape().rank() == src_base.size()); - TF_RET_CHECK(shape().rank() == dest_base.size()); + const int64 src_base_size = src_base.size(); + const int64 dest_base_size = dest_base.size(); + TF_RET_CHECK(src_literal.shape().rank() == src_base_size); + TF_RET_CHECK(shape().rank() == dest_base_size); auto linear_index = [](const Shape& shape, absl::Span multi_index) { @@ -564,7 +566,7 @@ Status MutableLiteralBase::CopyFrom(const LiteralSlice& src_literal, } // Construct the index of the corresponding piece in the source literal. ShapeIndex src_piece_index = src_shape_index; - for (int64 i = dest_shape_index.size(); i < index.size(); ++i) { + for (int64 i = dest_shape_index.size(), end = index.size(); i < end; ++i) { src_piece_index.push_back(index[i]); } TF_RETURN_IF_ERROR( @@ -755,7 +757,7 @@ StatusOr LiteralBase::Broadcast( return InvalidArgument("Broadcast only supports arrays."); } - for (int64 i = 0; i < dimensions.size(); i++) { + for (int64 i = 0, end = dimensions.size(); i < end; i++) { TF_RET_CHECK(shape().dimensions(i) == result_shape.dimensions(dimensions[i])); } @@ -779,7 +781,7 @@ StatusOr LiteralBase::Broadcast( ShapeUtil::ForEachIndex( result_shape, [&](absl::Span output_index) { - for (int64 i = 0; i < dimensions.size(); ++i) { + for (int64 i = 0, end = dimensions.size(); i < end; ++i) { scratch_source_index[i] = output_index[dimensions[i]]; } int64 dest_index = IndexUtil::MultidimensionalIndexToLinearIndex( @@ -1185,8 +1187,9 @@ void DenseArrayToStringHelper(const LiteralBase& literal, } // Handle the non-innermost tensors of a 2D+ tensor. if (brace == "{") { + const int64 accum_indices_size = accum_indices->size(); if (rank > 3 && !accum_indices->empty() && - accum_indices->size() < rank) { + accum_indices_size < rank) { int index = accum_indices->size() - 1; int value = accum_indices->back(); return StrCat(brace, " /*i", index, "=", value, "*/\n"); @@ -1520,7 +1523,7 @@ StatusOr LiteralBase::ConvertToShape(const Shape& dest_shape) const { } Literal literal(ShapeUtil::MakeTupleShape(element_shapes), /*allocate_arrays=*/false); - for (int i = 0; i < elements.size(); ++i) { + for (int i = 0, end = elements.size(); i < end; ++i) { TF_CHECK_OK( literal.MoveFrom(std::move(elements[i]), /*dest_shape_index=*/{i})); } @@ -1891,13 +1894,13 @@ bool LiteralBase::IsR1Iota() const { auto is_iota_at_idx = [&](const int64 idx) { switch (shape().element_type()) { case U8: - return Get({idx}) == idx; + return Get({idx}) == static_cast(idx); case U16: - return Get({idx}) == idx; + return Get({idx}) == static_cast(idx); case U32: - return Get({idx}) == idx; + return Get({idx}) == static_cast(idx); case U64: - return Get({idx}) == idx; + return Get({idx}) == static_cast(idx); case S8: return Get({idx}) == idx; case S16: @@ -2174,8 +2177,9 @@ Status LiteralBase::Piece::CopyFromProto(const LiteralProto& proto) { } case C128: { auto complex_data = data(); - TF_RET_CHECK(proto.c128s_size() == complex_data.size() * 2); - for (int64 i = 0; i < complex_data.size(); ++i) { + const int64 complex_data_size_doubled = complex_data.size() * 2; + TF_RET_CHECK(proto.c128s_size() == complex_data_size_doubled); + for (int64 i = 0, end = complex_data.size(); i < end; ++i) { complex_data[i] = complex128{proto.c128s(i * 2), proto.c128s(i * 2 + 1)}; } @@ -2394,7 +2398,7 @@ BorrowingLiteral::BorrowingLiteral(absl::Span src_buf_ptrs, root_piece_.set_subshape(shape_.get()); BuildPieceSubtree(*shape_, &root_piece_); - for (int i = 0; i < src_buf_ptrs.size(); ++i) { + for (int i = 0, end = src_buf_ptrs.size(); i < end; ++i) { const auto& src_shape = shape_->tuple_shapes(i); CHECK(src_shape.IsArray()); root_piece_.child(i).set_buffer(const_cast(src_buf_ptrs[i])); diff --git a/tensorflow/compiler/xla/literal_util.cc b/tensorflow/compiler/xla/literal_util.cc index 4304c207cad..0286aa20b3b 100644 --- a/tensorflow/compiler/xla/literal_util.cc +++ b/tensorflow/compiler/xla/literal_util.cc @@ -67,7 +67,7 @@ Literal ConvertType(LiteralSlice literal) { primitive_util::NativeToPrimitiveType()) { auto src = literal.data(shape_index); auto dest = result.data(shape_index); - for (int64 i = 0; i < src.size(); ++i) { + for (int64 i = 0, end = src.size(); i < end; ++i) { dest[i] = static_cast(src[i]); } } else { @@ -329,7 +329,7 @@ Literal ConvertType(LiteralSlice literal) { /* static */ Literal LiteralUtil::CreateR1U8(absl::string_view value) { Literal literal(ShapeUtil::MakeShape(U8, {static_cast(value.size())})); - for (int i = 0; i < value.size(); ++i) { + for (int i = 0, end = value.size(); i < end; ++i) { literal.Set({i}, value[i]); } return literal; @@ -345,7 +345,7 @@ Literal ConvertType(LiteralSlice literal) { absl::Span new_dimensions, absl::Span minor_to_major, const LiteralSlice& literal) { int64 new_num_elements = 1; - for (int64 i = 0; i < new_dimensions.size(); ++i) { + for (int64 i = 0, end = new_dimensions.size(); i < end; ++i) { new_num_elements *= new_dimensions[i]; } CHECK_EQ(ShapeUtil::ElementsIn(literal.shape()), new_num_elements); @@ -472,7 +472,7 @@ Literal ConvertType(LiteralSlice literal) { element_shapes.push_back(element->shape()); } Literal literal(ShapeUtil::MakeTupleShape(element_shapes)); - for (int i = 0; i < elements.size(); ++i) { + for (int i = 0, end = elements.size(); i < end; ++i) { TF_CHECK_OK(literal.CopyFrom(*elements[i], /*dest_shape_index=*/{i})); } return literal; @@ -485,7 +485,7 @@ Literal ConvertType(LiteralSlice literal) { element_shapes.push_back(element.shape()); } Literal literal(ShapeUtil::MakeTupleShape(element_shapes)); - for (int i = 0; i < elements.size(); ++i) { + for (int i = 0, end = elements.size(); i < end; ++i) { TF_CHECK_OK(literal.CopyFrom(elements[i], /*dest_shape_index=*/{i})); } return literal; @@ -499,7 +499,7 @@ Literal ConvertType(LiteralSlice literal) { element_shapes.push_back(element.shape()); } Literal literal(ShapeUtil::MakeTupleShape(element_shapes)); - for (int64 i = 0; i < elements.size(); ++i) { + for (int64 i = 0, end = elements.size(); i < end; ++i) { TF_CHECK_OK( literal.MoveFrom(std::move(elements[i]), /*dest_shape_index=*/{i})); } diff --git a/tensorflow/compiler/xla/metric_table_report.cc b/tensorflow/compiler/xla/metric_table_report.cc index bad65ac3201..be235482718 100644 --- a/tensorflow/compiler/xla/metric_table_report.cc +++ b/tensorflow/compiler/xla/metric_table_report.cc @@ -80,9 +80,11 @@ void MetricTableReport::WriteReportToInfoLog(double expected_metric_sum) { int64 pos = 0; const string report = MakeReport(expected_metric_sum); - while (pos < report.size()) { + const int report_size = report.size(); + while (pos < report_size) { int64 end_of_line = report.find('\n', pos); - if (end_of_line == string::npos) { + const int64 _npos = string::npos; + if (end_of_line == _npos) { end_of_line = report.size(); } absl::string_view line(report.data() + pos, end_of_line - pos); @@ -161,7 +163,8 @@ void MetricTableReport::AppendCategoryTable() { const char* const kIndentPrefix = " * "; int64 entries_to_show = std::min(max_entries_per_category_to_show_, category.entries.size()); - if (category.entries.size() == entries_to_show + 1) { + const int64 category_entries_size = category.entries.size(); + if (category_entries_size == entries_to_show + 1) { // May as well show the last entry on the line that would otherwise say // that there is a single entry not shown. ++entries_to_show; @@ -224,7 +227,8 @@ void MetricTableReport::AppendTableRow(const string& text, const double metric, // Don't try to make a gigantic string and crash if expected_metric_sum_ is // wrong somehow. int64 padding_len = 1; - if (max_metric_string_size >= metric_string.size()) { + const int64 metric_string_size = metric_string.size(); + if (max_metric_string_size >= metric_string_size) { padding_len += max_metric_string_size - metric_string.size(); } string padding(padding_len, ' '); @@ -254,7 +258,7 @@ string MetricTableReport::MetricString(double metric) { sp1.remove_prefix(1); } // Copy rest of input characters. - for (int64 i = 0; i < sp1.size(); ++i) { + for (int64 i = 0, end = sp1.size(); i < end; ++i) { if (i > 0 && (sp1.size() - i) % 3 == 0) { output.push_back(','); } From 982236961f580b6b6edf09d693a89e7ad799ce4c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tar=C3=A9=20Gaskin?= Date: Tue, 28 Jul 2020 20:08:56 +0000 Subject: [PATCH 2/8] updates --- tensorflow/compiler/xla/client/lib/pooling.cc | 8 ++++---- tensorflow/compiler/xla/client/xla_builder.cc | 4 ++-- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/tensorflow/compiler/xla/client/lib/pooling.cc b/tensorflow/compiler/xla/client/lib/pooling.cc index 6a0db64b834..460c1cff03a 100644 --- a/tensorflow/compiler/xla/client/lib/pooling.cc +++ b/tensorflow/compiler/xla/client/lib/pooling.cc @@ -198,16 +198,16 @@ XlaOp AvgPoolGrad(XlaOp out_backprop, absl::Span gradients_size, XlaBuilder* b = out_backprop.builder(); return b->ReportErrorOrReturn([&]() -> StatusOr { const int num_dims = kernel_size.size(); - const int gradients_size_size = gradients_size.size(); - if (gradients_size_size != num_dims) { + const int num_gradients = gradients_size.size(); + if (num_gradients != num_dims) { return tensorflow::errors::InvalidArgument("gradients must be ", num_dims, "-dimensional"); } TF_ASSIGN_OR_RETURN(Shape out_backprop_xla_shape, b->GetShape(out_backprop)); - const int obxsd_size = out_backprop_xla_shape.dimensions().size(); - if (obxsd_size != num_dims) { + const int backprop_xla_num_dims = out_backprop_xla_shape.dimensions().size(); + if (backprop_xla_num_dims != num_dims) { return tensorflow::errors::InvalidArgument("out_backprop must be ", num_dims, "-dimensional"); } diff --git a/tensorflow/compiler/xla/client/xla_builder.cc b/tensorflow/compiler/xla/client/xla_builder.cc index c84d2b519dc..db437142665 100644 --- a/tensorflow/compiler/xla/client/xla_builder.cc +++ b/tensorflow/compiler/xla/client/xla_builder.cc @@ -774,9 +774,9 @@ XlaOp XlaBuilder::BroadcastInDim( operand_shape->rank(), broadcast_dimensions.size()); } for (int i = 0, end = broadcast_dimensions.size(); i < end; i++) { - const tensorflow::int64 out_dim_size_size = out_dim_size.size(); + const tensorflow::int64 num_dims = out_dim_size.size(); if (broadcast_dimensions[i] < 0 || - broadcast_dimensions[i] > out_dim_size_size) { + broadcast_dimensions[i] > num_dims) { return InvalidArgument("Broadcast dimension %lld is out of bound", broadcast_dimensions[i]); } From c95202254288b7df371f996150803679d6280d14 Mon Sep 17 00:00:00 2001 From: tg-at-google Date: Tue, 28 Jul 2020 21:27:13 -0400 Subject: [PATCH 3/8] Update xla_builder.cc --- tensorflow/compiler/xla/client/xla_builder.cc | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/tensorflow/compiler/xla/client/xla_builder.cc b/tensorflow/compiler/xla/client/xla_builder.cc index db437142665..84843ad821f 100644 --- a/tensorflow/compiler/xla/client/xla_builder.cc +++ b/tensorflow/compiler/xla/client/xla_builder.cc @@ -3024,12 +3024,7 @@ StatusOr XlaBuilder::AddInstruction(HloInstructionProto&& instr, instr.add_operand_ids(operand.handle()); } - if (one_shot_metadata_.has_value()) { - *instr.mutable_metadata() = one_shot_metadata_.value(); - one_shot_metadata_.reset(); - } else { - *instr.mutable_metadata() = metadata_; - } + *instr.mutable_metadata() = metadata_; if (sharding_) { *instr.mutable_sharding() = *sharding_; } From e8d82106dc7af10a9ba37e79edb69b27403f97bc Mon Sep 17 00:00:00 2001 From: tg-at-google Date: Wed, 29 Jul 2020 12:33:58 -0400 Subject: [PATCH 4/8] Update tensorflow/compiler/xla/client/xla_builder.cc Co-authored-by: Mihai Maruseac --- tensorflow/compiler/xla/client/xla_builder.cc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tensorflow/compiler/xla/client/xla_builder.cc b/tensorflow/compiler/xla/client/xla_builder.cc index 84843ad821f..167a835a769 100644 --- a/tensorflow/compiler/xla/client/xla_builder.cc +++ b/tensorflow/compiler/xla/client/xla_builder.cc @@ -766,8 +766,8 @@ XlaOp XlaBuilder::BroadcastInDim( TF_ASSIGN_OR_RETURN(auto output_shape, ShapeUtil::MakeValidatedShape( operand_shape->element_type(), out_dim_size)); - tensorflow::int64 broadcast_dimensions_size = broadcast_dimensions.size(); - if (operand_shape->rank() != broadcast_dimensions_size) { + tensorflow::int64 broadcast_rank = broadcast_dimensions.size(); + if (operand_shape->rank() != broadcast_rank) { return InvalidArgument( "Size of broadcast_dimensions has to match operand's rank; operand " "rank: %lld, size of broadcast_dimensions %u.", From 1712c1053f5d9fbb4abfc49d9b90ab835171c912 Mon Sep 17 00:00:00 2001 From: tg-at-google Date: Wed, 29 Jul 2020 12:34:08 -0400 Subject: [PATCH 5/8] Update tensorflow/compiler/xla/client/xla_builder.cc Co-authored-by: Mihai Maruseac --- tensorflow/compiler/xla/client/xla_builder.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tensorflow/compiler/xla/client/xla_builder.cc b/tensorflow/compiler/xla/client/xla_builder.cc index 167a835a769..4fbdc7b8dc9 100644 --- a/tensorflow/compiler/xla/client/xla_builder.cc +++ b/tensorflow/compiler/xla/client/xla_builder.cc @@ -788,7 +788,7 @@ XlaOp XlaBuilder::BroadcastInDim( *operand_shape, output_shape, broadcast_dimensions) .status()); std::vector in_dim_size(out_dim_size.begin(), out_dim_size.end()); - for (int i = 0, end = broadcast_dimensions.size(); i < end; i++) { + for (int i = 0; i < broadcast_rank; i++) { in_dim_size[broadcast_dimensions[i]] = operand_shape->dimensions(i); } const auto& in_dim_shape = From 41b7aff167e8cbd1ac3a2093e8df4079d2edea30 Mon Sep 17 00:00:00 2001 From: tg-at-google Date: Wed, 29 Jul 2020 12:34:17 -0400 Subject: [PATCH 6/8] Update tensorflow/compiler/xla/client/xla_builder.cc Co-authored-by: Mihai Maruseac --- tensorflow/compiler/xla/client/xla_builder.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tensorflow/compiler/xla/client/xla_builder.cc b/tensorflow/compiler/xla/client/xla_builder.cc index 4fbdc7b8dc9..1c79555aff3 100644 --- a/tensorflow/compiler/xla/client/xla_builder.cc +++ b/tensorflow/compiler/xla/client/xla_builder.cc @@ -773,7 +773,7 @@ XlaOp XlaBuilder::BroadcastInDim( "rank: %lld, size of broadcast_dimensions %u.", operand_shape->rank(), broadcast_dimensions.size()); } - for (int i = 0, end = broadcast_dimensions.size(); i < end; i++) { + for (int i = 0; i < broadcast_rank; i++) { const tensorflow::int64 num_dims = out_dim_size.size(); if (broadcast_dimensions[i] < 0 || broadcast_dimensions[i] > num_dims) { From 3451ef8a16069a569afba20f3ed01ebf8e4d1f22 Mon Sep 17 00:00:00 2001 From: tg-at-google Date: Wed, 29 Jul 2020 17:15:32 -0400 Subject: [PATCH 7/8] Update literal.cc --- tensorflow/compiler/xla/literal.cc | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tensorflow/compiler/xla/literal.cc b/tensorflow/compiler/xla/literal.cc index d03f3f8140f..50ab69b4efe 100644 --- a/tensorflow/compiler/xla/literal.cc +++ b/tensorflow/compiler/xla/literal.cc @@ -1894,13 +1894,13 @@ bool LiteralBase::IsR1Iota() const { auto is_iota_at_idx = [&](const int64 idx) { switch (shape().element_type()) { case U8: - return Get({idx}) == static_cast(idx); + return static_cast(Get({idx})) == idx; case U16: - return Get({idx}) == static_cast(idx); + return static_cast(Get({idx})) == idx; case U32: - return Get({idx}) == static_cast(idx); + return static_cast(Get({idx})) == idx; case U64: - return Get({idx}) == static_cast(idx); + return static_cast(Get({idx})) == idx; case S8: return Get({idx}) == idx; case S16: From 2158b9de8d6064d1a15e3e3a48b9c5c93c128144 Mon Sep 17 00:00:00 2001 From: tg-at-google Date: Wed, 29 Jul 2020 17:20:29 -0400 Subject: [PATCH 8/8] Update literal.cc --- tensorflow/compiler/xla/literal.cc | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tensorflow/compiler/xla/literal.cc b/tensorflow/compiler/xla/literal.cc index 50ab69b4efe..543ea18155e 100644 --- a/tensorflow/compiler/xla/literal.cc +++ b/tensorflow/compiler/xla/literal.cc @@ -1894,13 +1894,13 @@ bool LiteralBase::IsR1Iota() const { auto is_iota_at_idx = [&](const int64 idx) { switch (shape().element_type()) { case U8: - return static_cast(Get({idx})) == idx; + return static_cast(Get({idx})) == idx; case U16: - return static_cast(Get({idx})) == idx; + return static_cast(Get({idx})) == idx; case U32: - return static_cast(Get({idx})) == idx; + return static_cast(Get({idx})) == idx; case U64: - return static_cast(Get({idx})) == idx; + return static_cast(Get({idx})) == idx; case S8: return Get({idx}) == idx; case S16: