[-Wsign-compare] warning fixes batch 4

This commit is contained in:
Tare Gaskin 2020-06-24 21:58:00 +00:00
parent 6dbeb8d948
commit 3de3dd0018
15 changed files with 46 additions and 38 deletions

View File

@ -51,7 +51,7 @@ NameUniquer::NameUniquer(const string& separator) {
if (!absl::ascii_isalpha(c) && c != '_') {
result[0] = '_';
}
for (int i = 1; i < result.length(); i++) {
for (int i = 1, iter_limit = result.length(); i < iter_limit; i++) {
if (!IsAllowed(result[i])) {
result[i] = '_';
}

View File

@ -509,7 +509,8 @@ Status InputTypeForNode(const NodeDef& node_def, const OpDef& op_def,
DataTypeVector input_types;
for (const auto& arg : op_def.input_arg()) {
TF_RETURN_IF_ERROR(AddArgToSig(node_def, arg, &input_types));
if (input_types.size() > input_port) {
int input_types_size = input_types.size();
if (input_types_size > input_port) {
const DataType dtype = input_types[input_port];
*input_type = dtype;
return Status::OK();
@ -532,7 +533,8 @@ Status OutputTypeForNode(const NodeDef& node_def, const OpDef& op_def,
DataTypeVector output_types;
for (const auto& arg : op_def.output_arg()) {
TF_RETURN_IF_ERROR(AddArgToSig(node_def, arg, &output_types));
if (output_types.size() > output_port) {
int output_types_size = output_types.size();
if (output_types_size > output_port) {
const DataType dtype = output_types[output_port];
*output_type = dtype;
return Status::OK();

View File

@ -661,7 +661,7 @@ Status OpDefCompatible(const OpDef& old_op, const OpDef& new_op) {
"' vs. '", new_in_sig, "'");
VALIDATE(old_in_ref.size() == new_in_ref.size(), // Should not happen
"Unexpected change in input ref lists.");
for (int i = 0; i < old_in_ref.size(); ++i) {
for (int i = 0, iter_limit = old_in_ref.size(); i < iter_limit; ++i) {
// Allowed to remove "ref" from an input (or leave it unchanged).
VALIDATE(old_in_ref[i] || !new_in_ref[i], "Input ", i,
" changed from non-ref to ref");
@ -677,7 +677,7 @@ Status OpDefCompatible(const OpDef& old_op, const OpDef& new_op) {
old_out_sig, "' vs. '", new_out_sig, "'");
VALIDATE(old_out_ref.size() == new_out_ref.size(), // Should not happen
"Unexpected change in output ref lists");
for (int i = 0; i < old_out_ref.size(); ++i) {
for (int i = 0, iter_limit = old_in_ref.size(); i < iter_limit; ++i) {
// Allowed to add "ref" to an output (or leave it unchanged).
VALIDATE(!old_out_ref[i] || new_out_ref[i], "Output ", i,
" changed from ref to non-ref");

View File

@ -115,7 +115,7 @@ Status RunShortCircuit(const ShortCircuitInfo& info,
const CapturedFunction* const func,
std::vector<Tensor>* rets) {
VLOG(3) << "Running function " << func->func().name() << " short circuit";
size_t num_args = args.size();
const int num_args = args.size();
rets->reserve(info.indices.size());
for (size_t i = 0; i < info.indices.size(); ++i) {
if (info.indices[i] < num_args) {
@ -131,7 +131,7 @@ Status RunShortCircuit(const ShortCircuitInfo& info, std::vector<Tensor>&& args,
const CapturedFunction* const func,
std::vector<Tensor>* rets) {
VLOG(3) << "Running function " << func->func().name() << " short circuit";
size_t num_args = args.size();
const int num_args = args.size();
rets->reserve(info.indices.size());
for (size_t i = 0; i < info.indices.size(); ++i) {
if (info.indices[i] < num_args) {
@ -198,7 +198,7 @@ Status CreateShortCircuitInfo(OpKernelConstruction* ctx,
last_use[indices[i]] = i;
}
can_move.resize(indices.size());
for (size_t i = 0; i < indices.size(); ++i) {
for (int i = 0, iter_limit = indices.size(); i < iter_limit; ++i) {
can_move[i] = last_use[indices[i]] == i;
}
}
@ -278,11 +278,12 @@ class CallFrameBase : public CallFrameInterface {
// Callee methods.
Status SetRetval(int index, const Tensor& val) override {
if (index < retvals_.size() && val.dtype() == ret_types_[index] &&
const int retvals_size_ = retvals_.size();
if (index < retvals_size_ && val.dtype() == ret_types_[index] &&
!retvals_[index]) {
retvals_[index] = val;
return Status::OK();
} else if (index >= retvals_.size()) {
} else if (index >= retvals_size_) {
return errors::InvalidArgument("Return value ", index,
" is out of range.");
} else if (val.dtype() != ret_types_[index]) {
@ -317,10 +318,12 @@ class OwnedArgsCallFrame : public CallFrameBase {
// Callee methods.
Status GetArg(int index, const Tensor** val) override {
if (index < args_.size()) {
const int args_size_ = args_.size();
const int captured_inputs_size_ = captured_inputs_->size();
if (index < args_size_) {
*val = &args_[index];
return Status::OK();
} else if (index < args_.size() + captured_inputs_->size()) {
} else if (index < args_size_ + captured_inputs_size_ ) {
*val = &(*captured_inputs_)[index - args_.size()];
return Status::OK();
} else {
@ -336,7 +339,7 @@ class OwnedArgsCallFrame : public CallFrameBase {
*val = std::move(args_[index]);
}
bool CanConsumeArg(int index) const override {
return index >= 0 && index < args_.size();
return index >= 0 && index < static_cast<int>(args_.size());
}
private:
@ -359,11 +362,13 @@ class BorrowedArgsCallFrame : public CallFrameBase {
// Callee methods.
Status GetArg(int index, const Tensor** val) override {
if (index < args_.size()) {
const int args_size_ = args_.size();
const int captured_inputs_size_ = captured_inputs_->size();
if (index < args_size_ ) {
*val = &args_[index];
return Status::OK();
} else if (index < args_.size() + captured_inputs_->size()) {
*val = &(*captured_inputs_)[index - args_.size()];
} else if (index < args_size_ + captured_inputs_size_) {
*val = &(*captured_inputs_)[index - args_size_];
return Status::OK();
} else {
return errors::InvalidArgument("Argument ", index, " is out of range.");
@ -613,7 +618,7 @@ Status CapturedFunction::Instantiate(
}
}
for (size_t i = 0; i < fdef->signature().output_arg_size(); ++i) {
for (int i = 0, iter_limit = fdef->signature().output_arg_size(); i < iter_limit; ++i) {
inst_opts.output_devices.push_back(inst_opts.target);
}

View File

@ -51,8 +51,8 @@ class SingleThreadedExecutorImpl : public Executor {
std::vector<Node*> ordered_nodes;
ordered_nodes.reserve(graph.num_nodes());
GetReversePostOrder(graph, &ordered_nodes);
if (ordered_nodes.size() != graph.num_nodes()) {
int ordered_nodes_size = ordered_nodes.size();
if (ordered_nodes_size != graph.num_nodes()) {
return errors::InvalidArgument("Graph had ", graph.num_nodes(),
" but reverse post-order had ",
ordered_nodes.size());

View File

@ -74,7 +74,7 @@ Status InitializableLookupTable::Initialize(InitTableIterator& iter) {
Status InitializableLookupTable::AreEntriesSame(const InitTableIterator& iter,
bool* result) {
*result = iter.total_size() == size();
*result = static_cast<size_t>(iter.total_size()) == size();
return Status::OK();
}

View File

@ -132,7 +132,7 @@ class TextFileLineIterator
std::vector<string> tokens;
if (!ignore_split_) {
tokens = str_util::Split(line, delimiter_);
if (std::max(key_index_, value_index_) >= tokens.size()) {
if ( static_cast<size_t>(std::max(key_index_, value_index_)) >= tokens.size()) {
status_ = errors::InvalidArgument(
"Invalid number of columns in ", filename_, " line ", next_id_,
" (", line, ") : expected ", std::max(key_index_, value_index_),

View File

@ -37,8 +37,8 @@ std::vector<const OpMetrics*> SortedOpMetricsDb(const OpMetricsDb& metrics_db,
return std::make_tuple(a->self_time_ps(), b->name()) >
std::make_tuple(b->self_time_ps(), a->name());
};
if (max_records != -1 && result.size() > max_records) {
int result_size = result.size();
if (max_records != -1 && result_size > max_records) {
absl::c_partial_sort(result, result.begin() + max_records, comp);
result.resize(max_records);
} else {

View File

@ -206,7 +206,7 @@ class TfFunctionExecutions {
std::string DebugString() const {
std::string result = "\nActivations:\n";
for (auto i = 0; i < activations_.size(); i++) {
for (int i = 0, iter_limit = activations_.size(); i < iter_limit; i++) {
absl::StrAppend(&result, "[", i, "] ", activations_[i].DebugString(),
"\n");
}

View File

@ -128,7 +128,7 @@ std::vector<EventTypeSpan> ToNonOverlappedEvents(
if (event_boundaries.empty()) return result;
result.reserve(event_boundaries.size());
PriorityTracker priority_tracker;
for (int64 i = 0; i < (event_boundaries.size() - 1); i++) {
for (int64 i = 0, iter_limit = (event_boundaries.size() - 1); i < iter_limit; i++) {
EventType highest_priority = priority_tracker.Update(event_boundaries[i]);
result.push_back({highest_priority, Timespan::FromEndPoints(
event_boundaries[i].time_ps,
@ -325,12 +325,12 @@ Timespan StepDetails::StepTime() const {
std::string StepDetails::DebugString() const {
std::string result = "([";
for (int i = 0; i < markers_.size(); i++) {
for (int i = 0, iter_limit = markers_.size(); i < iter_limit; i++) {
if (i > 0) absl::StrAppend(&result, ", ");
absl::StrAppend(&result, PrintStepMarker(markers_[i]));
}
absl::StrAppend(&result, "], [");
for (int i = 0; i < events_.size(); i++) {
for (int i = 0, iter_limit = events_.size(); i < iter_limit; i++) {
if (i > 0) absl::StrAppend(&result, ", ");
absl::StrAppend(&result, PrintEventTypeSpan(events_[i]));
}

View File

@ -37,7 +37,8 @@ Status CheckValidPadding(Padding padding_type,
const std::vector<int64>& explicit_paddings,
int num_dims, TensorFormat data_format) {
if (padding_type == Padding::EXPLICIT) {
if (explicit_paddings.size() != 2 * num_dims) {
int explicit_paddings_size = explicit_paddings.size();
if (explicit_paddings_size != 2 * num_dims) {
return errors::InvalidArgument(
"explicit_paddings attribute must contain ", 2 * num_dims,
" values, but got: ", explicit_paddings.size());

View File

@ -146,7 +146,7 @@ class SessionLogger {
// Build an index from fetch tensor name to first index in
// output_tensor_names.
std::unordered_map<string, int> output_name_to_offset;
for (int i = 0; i < output_tensor_names.size(); ++i) {
for (int i = 0, iter_limit = output_tensor_names.size(); i < iter_limit; ++i) {
const string& name = output_tensor_names[i];
if (output_name_to_offset.insert(std::make_pair(name, i)).second) {
req->add_fetch(name);

View File

@ -63,7 +63,7 @@ void AddInferredAttr(const string& indentation, const string& attr_name,
string VectorToTuple(const std::vector<string>& l) {
if (l.size() == 1) return strings::StrCat("(", l.front(), ",)");
string ret = "(";
for (int i = 0; i < l.size(); ++i) {
for (int i = 0, iter_limit = l.size(); i < iter_limit; ++i) {
if (i > 0) {
strings::StrAppend(&ret, ", ");
}
@ -75,11 +75,11 @@ string VectorToTuple(const std::vector<string>& l) {
void Unflatten(const string& prefix, const std::vector<string>& output_sizes,
const string& var, string* result) {
for (int i = 0; i < output_sizes.size(); ++i) {
for (int i = 0, iter_limit = output_sizes.size(); i < iter_limit; ++i) {
if (!output_sizes[i].empty()) {
strings::StrAppend(result, prefix, var, " = ");
if (i > 0) strings::StrAppend(result, var, "[:", i, "] + ");
if (i + 1 < output_sizes.size()) {
if (i + 1 < iter_limit) {
// Special case i == 0 to avoid "0 +" in the generated code.
if (i == 0) {
strings::StrAppend(result, "[", var, "[:", output_sizes[i], "]] + ",
@ -295,7 +295,7 @@ string GenEagerPythonOp::Code() {
// from the end of params_no_default_, and adding params_no_default_.
attrs_.reserve(params_no_default_.size() - op_def_.input_arg_size() +
params_with_default_.size());
for (int i = op_def_.input_arg_size(); i < params_no_default_.size(); ++i) {
for (int i = op_def_.input_arg_size(), iter_limit = params_no_default_.size(); i < iter_limit; ++i) {
attrs_.push_back(params_no_default_[i].GetName());
}
for (const auto& p : params_with_default_) {
@ -331,7 +331,7 @@ string GenEagerPythonOp::Code() {
parameters_with_defaults.empty() ? "" : ", ", "name=None");
// Add attr_expressions_ for attrs that are params.
for (int i = 0; i < attrs_.size(); ++i) {
for (int i = 0, iter_limit = attrs_.size(); i < iter_limit; ++i) {
const string& attr_name = attrs_[i];
const string& attr_api_name =
param_names_[i + op_def_.input_arg_size()].GetRenameTo();
@ -522,7 +522,7 @@ bool GenEagerPythonOp::GetEagerFunctionSetup(const string& indentation,
}
}
for (int i = 0; i < attrs_.size(); ++i) {
for (int i = 0, iter_limit = attrs_.size(); i < iter_limit; ++i) {
const string& attr_name = attrs_[i];
const auto& param = param_names_[i + op_def_.input_arg_size()];
const auto& attr = *FindAttr(attr_name, op_def_);

View File

@ -561,10 +561,10 @@ string GenPythonOp::Code() {
// from the end of args_no_default, and adding args_no_default.
attrs_.reserve(params_no_default.size() - op_def_.input_arg_size() +
params_with_default.size());
for (int i = op_def_.input_arg_size(); i < params_no_default.size(); ++i) {
for (int i = op_def_.input_arg_size(), iter_limit = params_no_default.size(); i < iter_limit; ++i) {
attrs_.push_back(params_no_default[i].GetName());
}
for (int i = 0; i < params_with_default.size(); ++i) {
for (int i = 0, iter_limit = params_with_default.size(); i < iter_limit; ++i) {
attrs_.push_back(params_with_default[i].GetName());
}

View File

@ -127,7 +127,7 @@ bool ThreadDimOk(const DeviceDescription &device_description,
const ThreadDim &thread_dim) {
auto total_threads = thread_dim.x * thread_dim.y * thread_dim.z;
auto threads_per_block_limit = device_description.threads_per_block_limit();
if (total_threads > threads_per_block_limit) {
if (total_threads > static_cast<long long unsigned int>(threads_per_block_limit)) {
VLOG(2) << "exceeded total-thread-per-block limit: " << total_threads
<< " vs limit " << threads_per_block_limit;
return false;