Merge pull request #40903 from tg-at-google:sign-compare-warning-fixes-batch-6
PiperOrigin-RevId: 321460979 Change-Id: I2598e9c48b3d31df39bc1edb0bc4875d9e547c46
This commit is contained in:
commit
c2f5a7f68d
@ -441,7 +441,8 @@ REGISTER_OP("XlaReduce")
|
||||
auto dim_in_range = [rank](int64 dim) {
|
||||
return dim >= 0 && dim < rank;
|
||||
};
|
||||
if (rank < dimensions_to_reduce.size() ||
|
||||
const int dimensions_to_reduce_size = dimensions_to_reduce.size();
|
||||
if (rank < dimensions_to_reduce_size ||
|
||||
dims_set.size() != dimensions_to_reduce.size() ||
|
||||
!absl::c_all_of(dimensions_to_reduce, dim_in_range)) {
|
||||
return errors::InvalidArgument(
|
||||
|
@ -62,14 +62,14 @@ InferenceContext::InferenceContext(
|
||||
}
|
||||
std::vector<std::unique_ptr<std::vector<ShapeAndType>>> handle_data(
|
||||
input_shapes.size());
|
||||
for (int i = 0; i < input_handle_shapes_and_types.size(); ++i) {
|
||||
for (int i = 0, end = input_handle_shapes_and_types.size(); i < end; ++i) {
|
||||
const auto& v = input_handle_shapes_and_types[i];
|
||||
if (v == nullptr) {
|
||||
continue;
|
||||
}
|
||||
handle_data[i].reset(new std::vector<ShapeAndType>(v->size()));
|
||||
auto& new_v = *handle_data[i];
|
||||
for (int j = 0; j < v->size(); ++j) {
|
||||
for (int j = 0, end = v->size(); j < end; ++j) {
|
||||
const auto& p = (*v)[j];
|
||||
construction_status_.Update(
|
||||
MakeShapeFromPartialTensorShape(p.first, &new_v[j].shape));
|
||||
@ -123,11 +123,12 @@ Status InferenceContext::set_output(StringPiece output_name,
|
||||
} else {
|
||||
const int start = result->second.first;
|
||||
const int size = result->second.second - start;
|
||||
if (size != shapes.size()) {
|
||||
const int shapes_size = shapes.size();
|
||||
if (size != shapes_size) {
|
||||
return errors::InvalidArgument("Must have exactly ", shapes.size(),
|
||||
" shapes.");
|
||||
}
|
||||
for (int i = 0; i < size; ++i) {
|
||||
for (int i = 0; i < shapes_size; ++i) {
|
||||
outputs_[i + start] = shapes[i];
|
||||
}
|
||||
}
|
||||
@ -181,7 +182,8 @@ void InferenceContext::PreInputInit(
|
||||
}
|
||||
|
||||
Status InferenceContext::ExpandOutputs(int new_output_size) {
|
||||
if (new_output_size < outputs_.size()) {
|
||||
const int outputs_size = outputs_.size();
|
||||
if (new_output_size < outputs_size) {
|
||||
return errors::InvalidArgument("Trying to reduce number of outputs of op.");
|
||||
}
|
||||
outputs_.resize(new_output_size, nullptr);
|
||||
@ -209,8 +211,8 @@ void InferenceContext::PostInputInit(
|
||||
}
|
||||
input_handle_shapes_and_types_ = std::move(input_handle_data);
|
||||
}
|
||||
|
||||
if (inputs_.size() != num_inputs_from_node_def) {
|
||||
const int inputs_size = inputs_.size();
|
||||
if (inputs_size != num_inputs_from_node_def) {
|
||||
construction_status_ = errors::InvalidArgument(
|
||||
"Wrong number of inputs passed: ", inputs_.size(), " while ",
|
||||
num_inputs_from_node_def, " expected based on NodeDef");
|
||||
@ -718,7 +720,8 @@ Status InferenceContext::MakeShapeFromShapeTensorTreatScalarAsUnknownShape(
|
||||
TF_RETURN_IF_ERROR(WithRankAtMost(input(input_idx), 1, &input_shape));
|
||||
|
||||
requested_input_tensor_as_partial_shape_[input_idx] = true;
|
||||
if (input_idx < input_tensors_as_shapes_.size() &&
|
||||
const int input_tensors_as_shapes_size = input_tensors_as_shapes_.size();
|
||||
if (input_idx < input_tensors_as_shapes_size &&
|
||||
input_tensors_as_shapes_[input_idx].IsSet() &&
|
||||
RankKnown(input_tensors_as_shapes_[input_idx])) {
|
||||
*out = input_tensors_as_shapes_[input_idx];
|
||||
@ -736,7 +739,8 @@ Status InferenceContext::MakeShapeFromShapeTensor(int input_idx,
|
||||
TF_RETURN_IF_ERROR(WithRank(input(input_idx), 1, &input_shape));
|
||||
|
||||
requested_input_tensor_as_partial_shape_[input_idx] = true;
|
||||
if (input_idx < input_tensors_as_shapes_.size() &&
|
||||
const int input_tensors_as_shapes_size = input_tensors_as_shapes_.size();
|
||||
if (input_idx < input_tensors_as_shapes_size &&
|
||||
input_tensors_as_shapes_[input_idx].IsSet() &&
|
||||
RankKnown(input_tensors_as_shapes_[input_idx])) {
|
||||
*out = input_tensors_as_shapes_[input_idx];
|
||||
@ -1099,14 +1103,16 @@ Status InferenceContext::AttachContext(const Status& status) {
|
||||
std::vector<string> input_from_tensors_str;
|
||||
std::vector<string> input_from_tensors_as_shape_str;
|
||||
input_from_tensors_as_shape_str.reserve(inputs_.size());
|
||||
for (int i = 0; i < inputs_.size(); ++i) {
|
||||
for (int i = 0, end = inputs_.size(); i < end; ++i) {
|
||||
const int input_tensors_as_shapes_size = input_tensors_as_shapes_.size();
|
||||
const int input_tensors_size = input_tensors_.size();
|
||||
if (requested_input_tensor_as_partial_shape_[i] &&
|
||||
i < input_tensors_as_shapes_.size() &&
|
||||
i < input_tensors_as_shapes_size &&
|
||||
input_tensors_as_shapes_[i].IsSet() &&
|
||||
RankKnown(input_tensors_as_shapes_[i])) {
|
||||
input_from_tensors_as_shape_str.push_back(strings::StrCat(
|
||||
"input[", i, "] = ", DebugString(input_tensors_as_shapes_[i])));
|
||||
} else if (requested_input_tensor_[i] && i < input_tensors_.size() &&
|
||||
} else if (requested_input_tensor_[i] && i < input_tensors_size &&
|
||||
input_tensors_[i] != nullptr) {
|
||||
input_from_tensors_str.push_back(strings::StrCat(
|
||||
"input[", i, "] = <",
|
||||
@ -1140,7 +1146,7 @@ bool InferenceContext::MergeHandleShapesAndTypes(
|
||||
}
|
||||
std::vector<ShapeAndType> new_values(shapes_and_types.size());
|
||||
bool refined = false;
|
||||
for (int i = 0; i < shapes_and_types.size(); ++i) {
|
||||
for (int i = 0, end = shapes_and_types.size(); i < end; ++i) {
|
||||
const ShapeAndType& existing = (*to_update)[i];
|
||||
if (shapes_and_types[i].dtype == existing.dtype) {
|
||||
new_values[i].dtype = existing.dtype;
|
||||
@ -1164,7 +1170,7 @@ bool InferenceContext::MergeHandleShapesAndTypes(
|
||||
if (!refined) {
|
||||
return false;
|
||||
}
|
||||
for (int i = 0; i < new_values.size(); ++i) {
|
||||
for (int i = 0, end = new_values.size(); i < end; ++i) {
|
||||
(*to_update)[i] = new_values[i];
|
||||
}
|
||||
return true;
|
||||
@ -1199,7 +1205,7 @@ bool InferenceContext::RelaxHandleShapesAndMergeTypes(
|
||||
return false;
|
||||
}
|
||||
std::vector<ShapeAndType> new_values(shapes_and_types.size());
|
||||
for (int i = 0; i < shapes_and_types.size(); ++i) {
|
||||
for (int i = 0, end = shapes_and_types.size(); i < end; ++i) {
|
||||
const ShapeAndType& existing = (*to_update)[i];
|
||||
if (shapes_and_types[i].dtype == existing.dtype) {
|
||||
new_values[i].dtype = existing.dtype;
|
||||
|
@ -73,8 +73,7 @@ class UniqueNodes {
|
||||
if (it == memoized_signatures_.end()) return;
|
||||
|
||||
std::vector<NodeDef*>& candidates = rep_[it->second];
|
||||
for (int i = 0, candidates_size = candidates.size(); i < candidates_size;
|
||||
++i) {
|
||||
for (int i = 0, end = candidates.size(); i < end; ++i) {
|
||||
if (candidates[i] == node) {
|
||||
std::swap(candidates[i], candidates[candidates.size() - 1]);
|
||||
candidates.resize(candidates.size() - 1);
|
||||
|
@ -63,8 +63,7 @@ Status DebugStripper::Optimize(Cluster* cluster, const GrapplerItem& item,
|
||||
node.mutable_attr()->swap(new_attr);
|
||||
// As Identity op only takes one input, mark redundant inputs as control
|
||||
// input.
|
||||
for (int i = 1, node_input_size = node.input_size(); i < node_input_size;
|
||||
++i) {
|
||||
for (int i = 1, end = node.input_size(); i < end; ++i) {
|
||||
if (!IsControlInput(node.input(i))) {
|
||||
*node.mutable_input(i) = AsControlDependency(NodeName(node.input(i)));
|
||||
}
|
||||
|
@ -401,7 +401,7 @@ Status SplitIdentityNInputs(GraphDef* graph,
|
||||
}
|
||||
|
||||
const int num_non_control_inputs = NumNonControlInputs(*node);
|
||||
int terminal_second_size = terminal.second.size();
|
||||
const int terminal_second_size = terminal.second.size();
|
||||
if (node->attr().count("T") == 0 ||
|
||||
node->attr().at("T").list().type_size() != num_non_control_inputs ||
|
||||
terminal_second_size >= num_non_control_inputs) {
|
||||
|
@ -357,8 +357,7 @@ void PermuteNodesInPlace(GraphDef* graph, std::vector<int>* permutation,
|
||||
}
|
||||
permutation->swap(inv_perm);
|
||||
}
|
||||
for (int n = 0, permutation_size = permutation->size();
|
||||
n + 1 < permutation_size; ++n) {
|
||||
for (int n = 0, end = permutation->size(); n + 1 < end; ++n) {
|
||||
while (n != (*permutation)[n]) {
|
||||
std::size_t r = (*permutation)[n];
|
||||
graph->mutable_node()->SwapElements(n, r);
|
||||
|
@ -81,8 +81,8 @@ Status ComputeTopologicalOrder(
|
||||
int ready_node = (*ready_nodes)[front];
|
||||
for (int fanout : graph_view.GetFanout(ready_node)) {
|
||||
++num_ready_inputs[fanout];
|
||||
if (num_ready_inputs[fanout] ==
|
||||
static_cast<int>(graph_view.GetFanin(fanout).size())) {
|
||||
const int max_size = graph_view.GetFanin(fanout).size();
|
||||
if (num_ready_inputs[fanout] == max_size) {
|
||||
ready_nodes->push_back(fanout);
|
||||
++back;
|
||||
}
|
||||
@ -96,8 +96,8 @@ Status ComputeTopologicalOrder(
|
||||
"at node = "
|
||||
<< graph.node(back).DebugString();
|
||||
for (int i = 0; i < graph_view.num_nodes(); ++i) {
|
||||
if (num_ready_inputs[i] !=
|
||||
static_cast<int>(graph_view.GetFanin(i).size())) {
|
||||
const int max_size = graph_view.GetFanin(i).size();
|
||||
if (num_ready_inputs[i] != max_size) {
|
||||
VLOG(1) << "Node not ready: " << graph.node(i).DebugString();
|
||||
}
|
||||
}
|
||||
|
@ -130,8 +130,7 @@ void DerivedXLineBuilder::ExpandOrAddLevelEvent(const XEvent& event,
|
||||
}
|
||||
|
||||
void DerivedXLineBuilder::ResetLastEvents(int level) {
|
||||
for (int i = level, iter_limit = last_event_by_level_.size(); i < iter_limit;
|
||||
++i) {
|
||||
for (int i = level, end = last_event_by_level_.size(); i < end; ++i) {
|
||||
last_event_by_level_[i] = absl::nullopt;
|
||||
}
|
||||
if (level == 0) ResetDependentLines();
|
||||
|
@ -164,7 +164,7 @@ void SortXSpace(XSpace* space) {
|
||||
// smaller than these value.
|
||||
void NormalizeTimestamps(XPlane* plane, uint64 start_time_ns) {
|
||||
for (XLine& line : *plane->mutable_lines()) {
|
||||
if (line.timestamp_ns() >= static_cast<long int>(start_time_ns)) {
|
||||
if (line.timestamp_ns() >= static_cast<int64>(start_time_ns)) {
|
||||
line.set_timestamp_ns(line.timestamp_ns() - start_time_ns);
|
||||
}
|
||||
}
|
||||
|
@ -133,13 +133,13 @@ BCastList<N>::BCastList(const BCastList::Vec (&x)[N],
|
||||
const bool return_flattened_batch_indices) {
|
||||
typedef BCastList::Vec Vec;
|
||||
bool all_equal = true;
|
||||
int largest_rank = 0;
|
||||
size_t largest_rank = 0;
|
||||
output_batch_size_ = 1;
|
||||
for (int i = 0; i < N; ++i) {
|
||||
if (x[i] != x[0]) {
|
||||
all_equal = false;
|
||||
}
|
||||
if (static_cast<int>(x[i].size()) > largest_rank) {
|
||||
if (x[i].size() > largest_rank) {
|
||||
largest_rank = x[i].size();
|
||||
}
|
||||
}
|
||||
@ -176,7 +176,7 @@ BCastList<N>::BCastList(const BCastList::Vec (&x)[N],
|
||||
|
||||
// 1-extend and align all vectors.
|
||||
for (int i = 0; i < N; ++i) {
|
||||
if (static_cast<int>(copy[i].size()) < largest_rank) {
|
||||
if (copy[i].size() < largest_rank) {
|
||||
copy[i].resize(largest_rank, 1);
|
||||
}
|
||||
}
|
||||
|
@ -90,7 +90,7 @@ void AddInferredAttr(const string& indentation, const string& attr_name,
|
||||
string VectorToTuple(const std::vector<string>& l) {
|
||||
if (l.size() == 1) return strings::StrCat("(", l.front(), ",)");
|
||||
string ret = "(";
|
||||
for (int i = 0, iter_limit = l.size(); i < iter_limit; ++i) {
|
||||
for (int i = 0, end = l.size(); i < end; ++i) {
|
||||
if (i > 0) {
|
||||
strings::StrAppend(&ret, ", ");
|
||||
}
|
||||
@ -102,11 +102,11 @@ string VectorToTuple(const std::vector<string>& l) {
|
||||
|
||||
void Unflatten(const string& prefix, const std::vector<string>& output_sizes,
|
||||
const string& var, string* result) {
|
||||
for (int i = 0, iter_limit = output_sizes.size(); i < iter_limit; ++i) {
|
||||
for (int i = 0, end = output_sizes.size(); i < end; ++i) {
|
||||
if (!output_sizes[i].empty()) {
|
||||
strings::StrAppend(result, prefix, var, " = ");
|
||||
if (i > 0) strings::StrAppend(result, var, "[:", i, "] + ");
|
||||
if (i + 1 < iter_limit) {
|
||||
if (i + 1 < end) {
|
||||
// Special case i == 0 to avoid "0 +" in the generated code.
|
||||
if (i == 0) {
|
||||
strings::StrAppend(result, "[", var, "[:", output_sizes[i], "]] + ",
|
||||
@ -334,8 +334,8 @@ string GenEagerPythonOp::Code() {
|
||||
// from the end of params_no_default_, and adding params_no_default_.
|
||||
attrs_.reserve(params_no_default_.size() - op_def_.input_arg_size() +
|
||||
params_with_default_.size());
|
||||
for (int i = op_def_.input_arg_size(), iter_limit = params_no_default_.size();
|
||||
i < iter_limit; ++i) {
|
||||
for (int i = op_def_.input_arg_size(), end = params_no_default_.size();
|
||||
i < end; ++i) {
|
||||
attrs_.push_back(params_no_default_[i].GetName());
|
||||
}
|
||||
for (const auto& p : params_with_default_) {
|
||||
@ -397,7 +397,7 @@ string GenEagerPythonOp::Code() {
|
||||
parameters_with_defaults.empty() ? "" : ", ", "name=None");
|
||||
|
||||
// Add attr_expressions_ for attrs that are params.
|
||||
for (int i = 0, iter_limit = attrs_.size(); i < iter_limit; ++i) {
|
||||
for (int i = 0, end = attrs_.size(); i < end; ++i) {
|
||||
const string& attr_name = attrs_[i];
|
||||
const string& attr_api_name =
|
||||
param_names_[i + op_def_.input_arg_size()].GetRenameTo();
|
||||
@ -678,7 +678,7 @@ bool GenEagerPythonOp::GetEagerFunctionSetup(const string& indentation,
|
||||
}
|
||||
}
|
||||
|
||||
for (int i = 0, iter_limit = attrs_.size(); i < iter_limit; ++i) {
|
||||
for (int i = 0, end = attrs_.size(); i < end; ++i) {
|
||||
const string& attr_name = attrs_[i];
|
||||
const auto& param = param_names_[i + op_def_.input_arg_size()];
|
||||
const auto& attr = *FindAttr(attr_name, op_def_);
|
||||
|
@ -562,12 +562,12 @@ string GenPythonOp::Code() {
|
||||
// from the end of args_no_default, and adding args_no_default.
|
||||
attrs_.reserve(params_no_default.size() - op_def_.input_arg_size() +
|
||||
params_with_default.size());
|
||||
for (int i = op_def_.input_arg_size(), iter_limit = params_no_default.size();
|
||||
i < iter_limit; ++i) {
|
||||
for (int i = op_def_.input_arg_size(), end = params_no_default.size();
|
||||
i < end; ++i) {
|
||||
attrs_.push_back(params_no_default[i].GetName());
|
||||
}
|
||||
for (const auto& param : params_with_default) {
|
||||
attrs_.push_back(param.GetName());
|
||||
for (int i = 0, end = params_with_default.size(); i < end; ++i) {
|
||||
attrs_.push_back(params_with_default[i].GetName());
|
||||
}
|
||||
|
||||
param_names_.reserve(params_no_default.size() + params_with_default.size());
|
||||
|
Loading…
Reference in New Issue
Block a user