diff --git a/tensorflow/compiler/tf2xla/ops/xla_ops.cc b/tensorflow/compiler/tf2xla/ops/xla_ops.cc index 862da1f3f95..f4b9e9654d2 100644 --- a/tensorflow/compiler/tf2xla/ops/xla_ops.cc +++ b/tensorflow/compiler/tf2xla/ops/xla_ops.cc @@ -441,7 +441,8 @@ REGISTER_OP("XlaReduce") auto dim_in_range = [rank](int64 dim) { return dim >= 0 && dim < rank; }; - if (rank < dimensions_to_reduce.size() || + const int dimensions_to_reduce_size = dimensions_to_reduce.size(); + if (rank < dimensions_to_reduce_size || dims_set.size() != dimensions_to_reduce.size() || !absl::c_all_of(dimensions_to_reduce, dim_in_range)) { return errors::InvalidArgument( diff --git a/tensorflow/core/framework/shape_inference.cc b/tensorflow/core/framework/shape_inference.cc index 92e98b3fed4..456c1826572 100644 --- a/tensorflow/core/framework/shape_inference.cc +++ b/tensorflow/core/framework/shape_inference.cc @@ -62,14 +62,14 @@ InferenceContext::InferenceContext( } std::vector>> handle_data( input_shapes.size()); - for (int i = 0; i < input_handle_shapes_and_types.size(); ++i) { + for (int i = 0, end = input_handle_shapes_and_types.size(); i < end; ++i) { const auto& v = input_handle_shapes_and_types[i]; if (v == nullptr) { continue; } handle_data[i].reset(new std::vector(v->size())); auto& new_v = *handle_data[i]; - for (int j = 0; j < v->size(); ++j) { + for (int j = 0, end = v->size(); j < end; ++j) { const auto& p = (*v)[j]; construction_status_.Update( MakeShapeFromPartialTensorShape(p.first, &new_v[j].shape)); @@ -123,11 +123,12 @@ Status InferenceContext::set_output(StringPiece output_name, } else { const int start = result->second.first; const int size = result->second.second - start; - if (size != shapes.size()) { + const int shapes_size = shapes.size(); + if (size != shapes_size) { return errors::InvalidArgument("Must have exactly ", shapes.size(), " shapes."); } - for (int i = 0; i < size; ++i) { + for (int i = 0; i < shapes_size; ++i) { outputs_[i + start] = shapes[i]; } } @@ -181,7 +182,8 @@ void InferenceContext::PreInputInit( } Status InferenceContext::ExpandOutputs(int new_output_size) { - if (new_output_size < outputs_.size()) { + const int outputs_size = outputs_.size(); + if (new_output_size < outputs_size) { return errors::InvalidArgument("Trying to reduce number of outputs of op."); } outputs_.resize(new_output_size, nullptr); @@ -209,8 +211,8 @@ void InferenceContext::PostInputInit( } input_handle_shapes_and_types_ = std::move(input_handle_data); } - - if (inputs_.size() != num_inputs_from_node_def) { + const int inputs_size = inputs_.size(); + if (inputs_size != num_inputs_from_node_def) { construction_status_ = errors::InvalidArgument( "Wrong number of inputs passed: ", inputs_.size(), " while ", num_inputs_from_node_def, " expected based on NodeDef"); @@ -718,7 +720,8 @@ Status InferenceContext::MakeShapeFromShapeTensorTreatScalarAsUnknownShape( TF_RETURN_IF_ERROR(WithRankAtMost(input(input_idx), 1, &input_shape)); requested_input_tensor_as_partial_shape_[input_idx] = true; - if (input_idx < input_tensors_as_shapes_.size() && + const int input_tensors_as_shapes_size = input_tensors_as_shapes_.size(); + if (input_idx < input_tensors_as_shapes_size && input_tensors_as_shapes_[input_idx].IsSet() && RankKnown(input_tensors_as_shapes_[input_idx])) { *out = input_tensors_as_shapes_[input_idx]; @@ -736,7 +739,8 @@ Status InferenceContext::MakeShapeFromShapeTensor(int input_idx, TF_RETURN_IF_ERROR(WithRank(input(input_idx), 1, &input_shape)); requested_input_tensor_as_partial_shape_[input_idx] = true; - if (input_idx < input_tensors_as_shapes_.size() && + const int input_tensors_as_shapes_size = input_tensors_as_shapes_.size(); + if (input_idx < input_tensors_as_shapes_size && input_tensors_as_shapes_[input_idx].IsSet() && RankKnown(input_tensors_as_shapes_[input_idx])) { *out = input_tensors_as_shapes_[input_idx]; @@ -1099,14 +1103,16 @@ Status InferenceContext::AttachContext(const Status& status) { std::vector input_from_tensors_str; std::vector input_from_tensors_as_shape_str; input_from_tensors_as_shape_str.reserve(inputs_.size()); - for (int i = 0; i < inputs_.size(); ++i) { + for (int i = 0, end = inputs_.size(); i < end; ++i) { + const int input_tensors_as_shapes_size = input_tensors_as_shapes_.size(); + const int input_tensors_size = input_tensors_.size(); if (requested_input_tensor_as_partial_shape_[i] && - i < input_tensors_as_shapes_.size() && + i < input_tensors_as_shapes_size && input_tensors_as_shapes_[i].IsSet() && RankKnown(input_tensors_as_shapes_[i])) { input_from_tensors_as_shape_str.push_back(strings::StrCat( "input[", i, "] = ", DebugString(input_tensors_as_shapes_[i]))); - } else if (requested_input_tensor_[i] && i < input_tensors_.size() && + } else if (requested_input_tensor_[i] && i < input_tensors_size && input_tensors_[i] != nullptr) { input_from_tensors_str.push_back(strings::StrCat( "input[", i, "] = <", @@ -1140,7 +1146,7 @@ bool InferenceContext::MergeHandleShapesAndTypes( } std::vector new_values(shapes_and_types.size()); bool refined = false; - for (int i = 0; i < shapes_and_types.size(); ++i) { + for (int i = 0, end = shapes_and_types.size(); i < end; ++i) { const ShapeAndType& existing = (*to_update)[i]; if (shapes_and_types[i].dtype == existing.dtype) { new_values[i].dtype = existing.dtype; @@ -1164,7 +1170,7 @@ bool InferenceContext::MergeHandleShapesAndTypes( if (!refined) { return false; } - for (int i = 0; i < new_values.size(); ++i) { + for (int i = 0, end = new_values.size(); i < end; ++i) { (*to_update)[i] = new_values[i]; } return true; @@ -1199,7 +1205,7 @@ bool InferenceContext::RelaxHandleShapesAndMergeTypes( return false; } std::vector new_values(shapes_and_types.size()); - for (int i = 0; i < shapes_and_types.size(); ++i) { + for (int i = 0, end = shapes_and_types.size(); i < end; ++i) { const ShapeAndType& existing = (*to_update)[i]; if (shapes_and_types[i].dtype == existing.dtype) { new_values[i].dtype = existing.dtype; diff --git a/tensorflow/core/grappler/optimizers/common_subgraph_elimination.cc b/tensorflow/core/grappler/optimizers/common_subgraph_elimination.cc index 2489cf93e78..57f7e7c664b 100644 --- a/tensorflow/core/grappler/optimizers/common_subgraph_elimination.cc +++ b/tensorflow/core/grappler/optimizers/common_subgraph_elimination.cc @@ -73,8 +73,7 @@ class UniqueNodes { if (it == memoized_signatures_.end()) return; std::vector& candidates = rep_[it->second]; - for (int i = 0, candidates_size = candidates.size(); i < candidates_size; - ++i) { + for (int i = 0, end = candidates.size(); i < end; ++i) { if (candidates[i] == node) { std::swap(candidates[i], candidates[candidates.size() - 1]); candidates.resize(candidates.size() - 1); diff --git a/tensorflow/core/grappler/optimizers/debug_stripper.cc b/tensorflow/core/grappler/optimizers/debug_stripper.cc index d0aa79a24d7..dfc58ab7ae9 100644 --- a/tensorflow/core/grappler/optimizers/debug_stripper.cc +++ b/tensorflow/core/grappler/optimizers/debug_stripper.cc @@ -63,8 +63,7 @@ Status DebugStripper::Optimize(Cluster* cluster, const GrapplerItem& item, node.mutable_attr()->swap(new_attr); // As Identity op only takes one input, mark redundant inputs as control // input. - for (int i = 1, node_input_size = node.input_size(); i < node_input_size; - ++i) { + for (int i = 1, end = node.input_size(); i < end; ++i) { if (!IsControlInput(node.input(i))) { *node.mutable_input(i) = AsControlDependency(NodeName(node.input(i))); } diff --git a/tensorflow/core/grappler/optimizers/model_pruner.cc b/tensorflow/core/grappler/optimizers/model_pruner.cc index 3f5e3a8ea3a..5956fea4695 100644 --- a/tensorflow/core/grappler/optimizers/model_pruner.cc +++ b/tensorflow/core/grappler/optimizers/model_pruner.cc @@ -401,7 +401,7 @@ Status SplitIdentityNInputs(GraphDef* graph, } const int num_non_control_inputs = NumNonControlInputs(*node); - int terminal_second_size = terminal.second.size(); + const int terminal_second_size = terminal.second.size(); if (node->attr().count("T") == 0 || node->attr().at("T").list().type_size() != num_non_control_inputs || terminal_second_size >= num_non_control_inputs) { diff --git a/tensorflow/core/grappler/utils.cc b/tensorflow/core/grappler/utils.cc index af2bd16328b..151bb9d5d86 100644 --- a/tensorflow/core/grappler/utils.cc +++ b/tensorflow/core/grappler/utils.cc @@ -357,8 +357,7 @@ void PermuteNodesInPlace(GraphDef* graph, std::vector* permutation, } permutation->swap(inv_perm); } - for (int n = 0, permutation_size = permutation->size(); - n + 1 < permutation_size; ++n) { + for (int n = 0, end = permutation->size(); n + 1 < end; ++n) { while (n != (*permutation)[n]) { std::size_t r = (*permutation)[n]; graph->mutable_node()->SwapElements(n, r); diff --git a/tensorflow/core/grappler/utils/topological_sort.cc b/tensorflow/core/grappler/utils/topological_sort.cc index a7bef1c7014..9f108b0f396 100644 --- a/tensorflow/core/grappler/utils/topological_sort.cc +++ b/tensorflow/core/grappler/utils/topological_sort.cc @@ -81,8 +81,8 @@ Status ComputeTopologicalOrder( int ready_node = (*ready_nodes)[front]; for (int fanout : graph_view.GetFanout(ready_node)) { ++num_ready_inputs[fanout]; - if (num_ready_inputs[fanout] == - static_cast(graph_view.GetFanin(fanout).size())) { + const int max_size = graph_view.GetFanin(fanout).size(); + if (num_ready_inputs[fanout] == max_size) { ready_nodes->push_back(fanout); ++back; } @@ -96,8 +96,8 @@ Status ComputeTopologicalOrder( "at node = " << graph.node(back).DebugString(); for (int i = 0; i < graph_view.num_nodes(); ++i) { - if (num_ready_inputs[i] != - static_cast(graph_view.GetFanin(i).size())) { + const int max_size = graph_view.GetFanin(i).size(); + if (num_ready_inputs[i] != max_size) { VLOG(1) << "Node not ready: " << graph.node(i).DebugString(); } } diff --git a/tensorflow/core/profiler/utils/derived_timeline.cc b/tensorflow/core/profiler/utils/derived_timeline.cc index 43d8305f93c..364ce270439 100644 --- a/tensorflow/core/profiler/utils/derived_timeline.cc +++ b/tensorflow/core/profiler/utils/derived_timeline.cc @@ -130,8 +130,7 @@ void DerivedXLineBuilder::ExpandOrAddLevelEvent(const XEvent& event, } void DerivedXLineBuilder::ResetLastEvents(int level) { - for (int i = level, iter_limit = last_event_by_level_.size(); i < iter_limit; - ++i) { + for (int i = level, end = last_event_by_level_.size(); i < end; ++i) { last_event_by_level_[i] = absl::nullopt; } if (level == 0) ResetDependentLines(); diff --git a/tensorflow/core/profiler/utils/xplane_utils.cc b/tensorflow/core/profiler/utils/xplane_utils.cc index 1fef12580dd..867d1315053 100644 --- a/tensorflow/core/profiler/utils/xplane_utils.cc +++ b/tensorflow/core/profiler/utils/xplane_utils.cc @@ -164,7 +164,7 @@ void SortXSpace(XSpace* space) { // smaller than these value. void NormalizeTimestamps(XPlane* plane, uint64 start_time_ns) { for (XLine& line : *plane->mutable_lines()) { - if (line.timestamp_ns() >= static_cast(start_time_ns)) { + if (line.timestamp_ns() >= static_cast(start_time_ns)) { line.set_timestamp_ns(line.timestamp_ns() - start_time_ns); } } diff --git a/tensorflow/core/util/bcast.h b/tensorflow/core/util/bcast.h index 075de84964e..0a2c68d3f82 100644 --- a/tensorflow/core/util/bcast.h +++ b/tensorflow/core/util/bcast.h @@ -133,13 +133,13 @@ BCastList::BCastList(const BCastList::Vec (&x)[N], const bool return_flattened_batch_indices) { typedef BCastList::Vec Vec; bool all_equal = true; - int largest_rank = 0; + size_t largest_rank = 0; output_batch_size_ = 1; for (int i = 0; i < N; ++i) { if (x[i] != x[0]) { all_equal = false; } - if (static_cast(x[i].size()) > largest_rank) { + if (x[i].size() > largest_rank) { largest_rank = x[i].size(); } } @@ -176,7 +176,7 @@ BCastList::BCastList(const BCastList::Vec (&x)[N], // 1-extend and align all vectors. for (int i = 0; i < N; ++i) { - if (static_cast(copy[i].size()) < largest_rank) { + if (copy[i].size() < largest_rank) { copy[i].resize(largest_rank, 1); } } diff --git a/tensorflow/python/framework/python_op_gen.cc b/tensorflow/python/framework/python_op_gen.cc index 8a3c940a566..c6c5dfb7b37 100644 --- a/tensorflow/python/framework/python_op_gen.cc +++ b/tensorflow/python/framework/python_op_gen.cc @@ -90,7 +90,7 @@ void AddInferredAttr(const string& indentation, const string& attr_name, string VectorToTuple(const std::vector& l) { if (l.size() == 1) return strings::StrCat("(", l.front(), ",)"); string ret = "("; - for (int i = 0, iter_limit = l.size(); i < iter_limit; ++i) { + for (int i = 0, end = l.size(); i < end; ++i) { if (i > 0) { strings::StrAppend(&ret, ", "); } @@ -102,11 +102,11 @@ string VectorToTuple(const std::vector& l) { void Unflatten(const string& prefix, const std::vector& output_sizes, const string& var, string* result) { - for (int i = 0, iter_limit = output_sizes.size(); i < iter_limit; ++i) { + for (int i = 0, end = output_sizes.size(); i < end; ++i) { if (!output_sizes[i].empty()) { strings::StrAppend(result, prefix, var, " = "); if (i > 0) strings::StrAppend(result, var, "[:", i, "] + "); - if (i + 1 < iter_limit) { + if (i + 1 < end) { // Special case i == 0 to avoid "0 +" in the generated code. if (i == 0) { strings::StrAppend(result, "[", var, "[:", output_sizes[i], "]] + ", @@ -334,8 +334,8 @@ string GenEagerPythonOp::Code() { // from the end of params_no_default_, and adding params_no_default_. attrs_.reserve(params_no_default_.size() - op_def_.input_arg_size() + params_with_default_.size()); - for (int i = op_def_.input_arg_size(), iter_limit = params_no_default_.size(); - i < iter_limit; ++i) { + for (int i = op_def_.input_arg_size(), end = params_no_default_.size(); + i < end; ++i) { attrs_.push_back(params_no_default_[i].GetName()); } for (const auto& p : params_with_default_) { @@ -397,7 +397,7 @@ string GenEagerPythonOp::Code() { parameters_with_defaults.empty() ? "" : ", ", "name=None"); // Add attr_expressions_ for attrs that are params. - for (int i = 0, iter_limit = attrs_.size(); i < iter_limit; ++i) { + for (int i = 0, end = attrs_.size(); i < end; ++i) { const string& attr_name = attrs_[i]; const string& attr_api_name = param_names_[i + op_def_.input_arg_size()].GetRenameTo(); @@ -678,7 +678,7 @@ bool GenEagerPythonOp::GetEagerFunctionSetup(const string& indentation, } } - for (int i = 0, iter_limit = attrs_.size(); i < iter_limit; ++i) { + for (int i = 0, end = attrs_.size(); i < end; ++i) { const string& attr_name = attrs_[i]; const auto& param = param_names_[i + op_def_.input_arg_size()]; const auto& attr = *FindAttr(attr_name, op_def_); diff --git a/tensorflow/python/framework/python_op_gen_internal.cc b/tensorflow/python/framework/python_op_gen_internal.cc index b345a8da68f..adcde1052fd 100644 --- a/tensorflow/python/framework/python_op_gen_internal.cc +++ b/tensorflow/python/framework/python_op_gen_internal.cc @@ -562,12 +562,12 @@ string GenPythonOp::Code() { // from the end of args_no_default, and adding args_no_default. attrs_.reserve(params_no_default.size() - op_def_.input_arg_size() + params_with_default.size()); - for (int i = op_def_.input_arg_size(), iter_limit = params_no_default.size(); - i < iter_limit; ++i) { + for (int i = op_def_.input_arg_size(), end = params_no_default.size(); + i < end; ++i) { attrs_.push_back(params_no_default[i].GetName()); } - for (const auto& param : params_with_default) { - attrs_.push_back(param.GetName()); + for (int i = 0, end = params_with_default.size(); i < end; ++i) { + attrs_.push_back(params_with_default[i].GetName()); } param_names_.reserve(params_no_default.size() + params_with_default.size());