From 3de3dd00180dd4205ed5a4496c16e1c68f6957f1 Mon Sep 17 00:00:00 2001 From: Tare Gaskin Date: Wed, 24 Jun 2020 21:58:00 +0000 Subject: [PATCH] [-Wsign-compare] warning fixes batch 4 --- .../compiler/xla/service/name_uniquer.cc | 2 +- tensorflow/core/framework/node_def_util.cc | 6 ++-- tensorflow/core/framework/op_def_util.cc | 4 +-- .../core/kernels/data/captured_function.cc | 29 +++++++++++-------- .../kernels/data/single_threaded_executor.cc | 4 +-- .../kernels/initializable_lookup_table.cc | 2 +- tensorflow/core/kernels/lookup_util.cc | 2 +- .../profiler/convert/op_metrics_to_record.cc | 4 +-- .../convert/xplane_to_tf_functions.cc | 2 +- tensorflow/core/profiler/utils/event_span.cc | 6 ++-- tensorflow/core/util/padding.cc | 3 +- tensorflow/python/client/session_ref.cc | 2 +- tensorflow/python/framework/python_op_gen.cc | 12 ++++---- .../framework/python_op_gen_internal.cc | 4 +-- .../stream_executor/device_description.cc | 2 +- 15 files changed, 46 insertions(+), 38 deletions(-) diff --git a/tensorflow/compiler/xla/service/name_uniquer.cc b/tensorflow/compiler/xla/service/name_uniquer.cc index 70742b67a28..cba82975e59 100644 --- a/tensorflow/compiler/xla/service/name_uniquer.cc +++ b/tensorflow/compiler/xla/service/name_uniquer.cc @@ -51,7 +51,7 @@ NameUniquer::NameUniquer(const string& separator) { if (!absl::ascii_isalpha(c) && c != '_') { result[0] = '_'; } - for (int i = 1; i < result.length(); i++) { + for (int i = 1, iter_limit = result.length(); i < iter_limit; i++) { if (!IsAllowed(result[i])) { result[i] = '_'; } diff --git a/tensorflow/core/framework/node_def_util.cc b/tensorflow/core/framework/node_def_util.cc index 0a26ceca66f..ca67cb535f9 100644 --- a/tensorflow/core/framework/node_def_util.cc +++ b/tensorflow/core/framework/node_def_util.cc @@ -509,7 +509,8 @@ Status InputTypeForNode(const NodeDef& node_def, const OpDef& op_def, DataTypeVector input_types; for (const auto& arg : op_def.input_arg()) { TF_RETURN_IF_ERROR(AddArgToSig(node_def, arg, &input_types)); - if (input_types.size() > input_port) { + int input_types_size = input_types.size(); + if (input_types_size > input_port) { const DataType dtype = input_types[input_port]; *input_type = dtype; return Status::OK(); @@ -532,7 +533,8 @@ Status OutputTypeForNode(const NodeDef& node_def, const OpDef& op_def, DataTypeVector output_types; for (const auto& arg : op_def.output_arg()) { TF_RETURN_IF_ERROR(AddArgToSig(node_def, arg, &output_types)); - if (output_types.size() > output_port) { + int output_types_size = output_types.size(); + if (output_types_size > output_port) { const DataType dtype = output_types[output_port]; *output_type = dtype; return Status::OK(); diff --git a/tensorflow/core/framework/op_def_util.cc b/tensorflow/core/framework/op_def_util.cc index 115c24e1968..bb0c6b8757f 100644 --- a/tensorflow/core/framework/op_def_util.cc +++ b/tensorflow/core/framework/op_def_util.cc @@ -661,7 +661,7 @@ Status OpDefCompatible(const OpDef& old_op, const OpDef& new_op) { "' vs. '", new_in_sig, "'"); VALIDATE(old_in_ref.size() == new_in_ref.size(), // Should not happen "Unexpected change in input ref lists."); - for (int i = 0; i < old_in_ref.size(); ++i) { + for (int i = 0, iter_limit = old_in_ref.size(); i < iter_limit; ++i) { // Allowed to remove "ref" from an input (or leave it unchanged). VALIDATE(old_in_ref[i] || !new_in_ref[i], "Input ", i, " changed from non-ref to ref"); @@ -677,7 +677,7 @@ Status OpDefCompatible(const OpDef& old_op, const OpDef& new_op) { old_out_sig, "' vs. '", new_out_sig, "'"); VALIDATE(old_out_ref.size() == new_out_ref.size(), // Should not happen "Unexpected change in output ref lists"); - for (int i = 0; i < old_out_ref.size(); ++i) { + for (int i = 0, iter_limit = old_in_ref.size(); i < iter_limit; ++i) { // Allowed to add "ref" to an output (or leave it unchanged). VALIDATE(!old_out_ref[i] || new_out_ref[i], "Output ", i, " changed from ref to non-ref"); diff --git a/tensorflow/core/kernels/data/captured_function.cc b/tensorflow/core/kernels/data/captured_function.cc index f740d7ff1ad..fdcad3cb448 100644 --- a/tensorflow/core/kernels/data/captured_function.cc +++ b/tensorflow/core/kernels/data/captured_function.cc @@ -115,7 +115,7 @@ Status RunShortCircuit(const ShortCircuitInfo& info, const CapturedFunction* const func, std::vector* rets) { VLOG(3) << "Running function " << func->func().name() << " short circuit"; - size_t num_args = args.size(); + const int num_args = args.size(); rets->reserve(info.indices.size()); for (size_t i = 0; i < info.indices.size(); ++i) { if (info.indices[i] < num_args) { @@ -131,7 +131,7 @@ Status RunShortCircuit(const ShortCircuitInfo& info, std::vector&& args, const CapturedFunction* const func, std::vector* rets) { VLOG(3) << "Running function " << func->func().name() << " short circuit"; - size_t num_args = args.size(); + const int num_args = args.size(); rets->reserve(info.indices.size()); for (size_t i = 0; i < info.indices.size(); ++i) { if (info.indices[i] < num_args) { @@ -198,7 +198,7 @@ Status CreateShortCircuitInfo(OpKernelConstruction* ctx, last_use[indices[i]] = i; } can_move.resize(indices.size()); - for (size_t i = 0; i < indices.size(); ++i) { + for (int i = 0, iter_limit = indices.size(); i < iter_limit; ++i) { can_move[i] = last_use[indices[i]] == i; } } @@ -278,11 +278,12 @@ class CallFrameBase : public CallFrameInterface { // Callee methods. Status SetRetval(int index, const Tensor& val) override { - if (index < retvals_.size() && val.dtype() == ret_types_[index] && + const int retvals_size_ = retvals_.size(); + if (index < retvals_size_ && val.dtype() == ret_types_[index] && !retvals_[index]) { retvals_[index] = val; return Status::OK(); - } else if (index >= retvals_.size()) { + } else if (index >= retvals_size_) { return errors::InvalidArgument("Return value ", index, " is out of range."); } else if (val.dtype() != ret_types_[index]) { @@ -317,10 +318,12 @@ class OwnedArgsCallFrame : public CallFrameBase { // Callee methods. Status GetArg(int index, const Tensor** val) override { - if (index < args_.size()) { + const int args_size_ = args_.size(); + const int captured_inputs_size_ = captured_inputs_->size(); + if (index < args_size_) { *val = &args_[index]; return Status::OK(); - } else if (index < args_.size() + captured_inputs_->size()) { + } else if (index < args_size_ + captured_inputs_size_ ) { *val = &(*captured_inputs_)[index - args_.size()]; return Status::OK(); } else { @@ -336,7 +339,7 @@ class OwnedArgsCallFrame : public CallFrameBase { *val = std::move(args_[index]); } bool CanConsumeArg(int index) const override { - return index >= 0 && index < args_.size(); + return index >= 0 && index < static_cast(args_.size()); } private: @@ -359,11 +362,13 @@ class BorrowedArgsCallFrame : public CallFrameBase { // Callee methods. Status GetArg(int index, const Tensor** val) override { - if (index < args_.size()) { + const int args_size_ = args_.size(); + const int captured_inputs_size_ = captured_inputs_->size(); + if (index < args_size_ ) { *val = &args_[index]; return Status::OK(); - } else if (index < args_.size() + captured_inputs_->size()) { - *val = &(*captured_inputs_)[index - args_.size()]; + } else if (index < args_size_ + captured_inputs_size_) { + *val = &(*captured_inputs_)[index - args_size_]; return Status::OK(); } else { return errors::InvalidArgument("Argument ", index, " is out of range."); @@ -613,7 +618,7 @@ Status CapturedFunction::Instantiate( } } - for (size_t i = 0; i < fdef->signature().output_arg_size(); ++i) { + for (int i = 0, iter_limit = fdef->signature().output_arg_size(); i < iter_limit; ++i) { inst_opts.output_devices.push_back(inst_opts.target); } diff --git a/tensorflow/core/kernels/data/single_threaded_executor.cc b/tensorflow/core/kernels/data/single_threaded_executor.cc index 3a16f1018dd..eeb1ffd5ad0 100644 --- a/tensorflow/core/kernels/data/single_threaded_executor.cc +++ b/tensorflow/core/kernels/data/single_threaded_executor.cc @@ -51,8 +51,8 @@ class SingleThreadedExecutorImpl : public Executor { std::vector ordered_nodes; ordered_nodes.reserve(graph.num_nodes()); GetReversePostOrder(graph, &ordered_nodes); - - if (ordered_nodes.size() != graph.num_nodes()) { + int ordered_nodes_size = ordered_nodes.size(); + if (ordered_nodes_size != graph.num_nodes()) { return errors::InvalidArgument("Graph had ", graph.num_nodes(), " but reverse post-order had ", ordered_nodes.size()); diff --git a/tensorflow/core/kernels/initializable_lookup_table.cc b/tensorflow/core/kernels/initializable_lookup_table.cc index 196c2fe95a3..48041526022 100644 --- a/tensorflow/core/kernels/initializable_lookup_table.cc +++ b/tensorflow/core/kernels/initializable_lookup_table.cc @@ -74,7 +74,7 @@ Status InitializableLookupTable::Initialize(InitTableIterator& iter) { Status InitializableLookupTable::AreEntriesSame(const InitTableIterator& iter, bool* result) { - *result = iter.total_size() == size(); + *result = static_cast(iter.total_size()) == size(); return Status::OK(); } diff --git a/tensorflow/core/kernels/lookup_util.cc b/tensorflow/core/kernels/lookup_util.cc index 142878d8fb0..9adcedd6b1a 100644 --- a/tensorflow/core/kernels/lookup_util.cc +++ b/tensorflow/core/kernels/lookup_util.cc @@ -132,7 +132,7 @@ class TextFileLineIterator std::vector tokens; if (!ignore_split_) { tokens = str_util::Split(line, delimiter_); - if (std::max(key_index_, value_index_) >= tokens.size()) { + if ( static_cast(std::max(key_index_, value_index_)) >= tokens.size()) { status_ = errors::InvalidArgument( "Invalid number of columns in ", filename_, " line ", next_id_, " (", line, ") : expected ", std::max(key_index_, value_index_), diff --git a/tensorflow/core/profiler/convert/op_metrics_to_record.cc b/tensorflow/core/profiler/convert/op_metrics_to_record.cc index 8e28199b827..e4845b3cbd3 100644 --- a/tensorflow/core/profiler/convert/op_metrics_to_record.cc +++ b/tensorflow/core/profiler/convert/op_metrics_to_record.cc @@ -37,8 +37,8 @@ std::vector SortedOpMetricsDb(const OpMetricsDb& metrics_db, return std::make_tuple(a->self_time_ps(), b->name()) > std::make_tuple(b->self_time_ps(), a->name()); }; - - if (max_records != -1 && result.size() > max_records) { + int result_size = result.size(); + if (max_records != -1 && result_size > max_records) { absl::c_partial_sort(result, result.begin() + max_records, comp); result.resize(max_records); } else { diff --git a/tensorflow/core/profiler/convert/xplane_to_tf_functions.cc b/tensorflow/core/profiler/convert/xplane_to_tf_functions.cc index 3f3506bc8bf..e0b517d797a 100644 --- a/tensorflow/core/profiler/convert/xplane_to_tf_functions.cc +++ b/tensorflow/core/profiler/convert/xplane_to_tf_functions.cc @@ -206,7 +206,7 @@ class TfFunctionExecutions { std::string DebugString() const { std::string result = "\nActivations:\n"; - for (auto i = 0; i < activations_.size(); i++) { + for (int i = 0, iter_limit = activations_.size(); i < iter_limit; i++) { absl::StrAppend(&result, "[", i, "] ", activations_[i].DebugString(), "\n"); } diff --git a/tensorflow/core/profiler/utils/event_span.cc b/tensorflow/core/profiler/utils/event_span.cc index 5e0413c4ba2..f946f336ed8 100644 --- a/tensorflow/core/profiler/utils/event_span.cc +++ b/tensorflow/core/profiler/utils/event_span.cc @@ -128,7 +128,7 @@ std::vector ToNonOverlappedEvents( if (event_boundaries.empty()) return result; result.reserve(event_boundaries.size()); PriorityTracker priority_tracker; - for (int64 i = 0; i < (event_boundaries.size() - 1); i++) { + for (int64 i = 0, iter_limit = (event_boundaries.size() - 1); i < iter_limit; i++) { EventType highest_priority = priority_tracker.Update(event_boundaries[i]); result.push_back({highest_priority, Timespan::FromEndPoints( event_boundaries[i].time_ps, @@ -325,12 +325,12 @@ Timespan StepDetails::StepTime() const { std::string StepDetails::DebugString() const { std::string result = "(["; - for (int i = 0; i < markers_.size(); i++) { + for (int i = 0, iter_limit = markers_.size(); i < iter_limit; i++) { if (i > 0) absl::StrAppend(&result, ", "); absl::StrAppend(&result, PrintStepMarker(markers_[i])); } absl::StrAppend(&result, "], ["); - for (int i = 0; i < events_.size(); i++) { + for (int i = 0, iter_limit = events_.size(); i < iter_limit; i++) { if (i > 0) absl::StrAppend(&result, ", "); absl::StrAppend(&result, PrintEventTypeSpan(events_[i])); } diff --git a/tensorflow/core/util/padding.cc b/tensorflow/core/util/padding.cc index 5fa33d8a590..002b67049f3 100644 --- a/tensorflow/core/util/padding.cc +++ b/tensorflow/core/util/padding.cc @@ -37,7 +37,8 @@ Status CheckValidPadding(Padding padding_type, const std::vector& explicit_paddings, int num_dims, TensorFormat data_format) { if (padding_type == Padding::EXPLICIT) { - if (explicit_paddings.size() != 2 * num_dims) { + int explicit_paddings_size = explicit_paddings.size(); + if (explicit_paddings_size != 2 * num_dims) { return errors::InvalidArgument( "explicit_paddings attribute must contain ", 2 * num_dims, " values, but got: ", explicit_paddings.size()); diff --git a/tensorflow/python/client/session_ref.cc b/tensorflow/python/client/session_ref.cc index d911e185153..cc3b48cb3e6 100644 --- a/tensorflow/python/client/session_ref.cc +++ b/tensorflow/python/client/session_ref.cc @@ -146,7 +146,7 @@ class SessionLogger { // Build an index from fetch tensor name to first index in // output_tensor_names. std::unordered_map output_name_to_offset; - for (int i = 0; i < output_tensor_names.size(); ++i) { + for (int i = 0, iter_limit = output_tensor_names.size(); i < iter_limit; ++i) { const string& name = output_tensor_names[i]; if (output_name_to_offset.insert(std::make_pair(name, i)).second) { req->add_fetch(name); diff --git a/tensorflow/python/framework/python_op_gen.cc b/tensorflow/python/framework/python_op_gen.cc index ca0c5d9ef1a..12aebb6a671 100644 --- a/tensorflow/python/framework/python_op_gen.cc +++ b/tensorflow/python/framework/python_op_gen.cc @@ -63,7 +63,7 @@ void AddInferredAttr(const string& indentation, const string& attr_name, string VectorToTuple(const std::vector& l) { if (l.size() == 1) return strings::StrCat("(", l.front(), ",)"); string ret = "("; - for (int i = 0; i < l.size(); ++i) { + for (int i = 0, iter_limit = l.size(); i < iter_limit; ++i) { if (i > 0) { strings::StrAppend(&ret, ", "); } @@ -75,11 +75,11 @@ string VectorToTuple(const std::vector& l) { void Unflatten(const string& prefix, const std::vector& output_sizes, const string& var, string* result) { - for (int i = 0; i < output_sizes.size(); ++i) { + for (int i = 0, iter_limit = output_sizes.size(); i < iter_limit; ++i) { if (!output_sizes[i].empty()) { strings::StrAppend(result, prefix, var, " = "); if (i > 0) strings::StrAppend(result, var, "[:", i, "] + "); - if (i + 1 < output_sizes.size()) { + if (i + 1 < iter_limit) { // Special case i == 0 to avoid "0 +" in the generated code. if (i == 0) { strings::StrAppend(result, "[", var, "[:", output_sizes[i], "]] + ", @@ -295,7 +295,7 @@ string GenEagerPythonOp::Code() { // from the end of params_no_default_, and adding params_no_default_. attrs_.reserve(params_no_default_.size() - op_def_.input_arg_size() + params_with_default_.size()); - for (int i = op_def_.input_arg_size(); i < params_no_default_.size(); ++i) { + for (int i = op_def_.input_arg_size(), iter_limit = params_no_default_.size(); i < iter_limit; ++i) { attrs_.push_back(params_no_default_[i].GetName()); } for (const auto& p : params_with_default_) { @@ -331,7 +331,7 @@ string GenEagerPythonOp::Code() { parameters_with_defaults.empty() ? "" : ", ", "name=None"); // Add attr_expressions_ for attrs that are params. - for (int i = 0; i < attrs_.size(); ++i) { + for (int i = 0, iter_limit = attrs_.size(); i < iter_limit; ++i) { const string& attr_name = attrs_[i]; const string& attr_api_name = param_names_[i + op_def_.input_arg_size()].GetRenameTo(); @@ -522,7 +522,7 @@ bool GenEagerPythonOp::GetEagerFunctionSetup(const string& indentation, } } - for (int i = 0; i < attrs_.size(); ++i) { + for (int i = 0, iter_limit = attrs_.size(); i < iter_limit; ++i) { const string& attr_name = attrs_[i]; const auto& param = param_names_[i + op_def_.input_arg_size()]; const auto& attr = *FindAttr(attr_name, op_def_); diff --git a/tensorflow/python/framework/python_op_gen_internal.cc b/tensorflow/python/framework/python_op_gen_internal.cc index 05102db0189..d2e25e368b4 100644 --- a/tensorflow/python/framework/python_op_gen_internal.cc +++ b/tensorflow/python/framework/python_op_gen_internal.cc @@ -561,10 +561,10 @@ string GenPythonOp::Code() { // from the end of args_no_default, and adding args_no_default. attrs_.reserve(params_no_default.size() - op_def_.input_arg_size() + params_with_default.size()); - for (int i = op_def_.input_arg_size(); i < params_no_default.size(); ++i) { + for (int i = op_def_.input_arg_size(), iter_limit = params_no_default.size(); i < iter_limit; ++i) { attrs_.push_back(params_no_default[i].GetName()); } - for (int i = 0; i < params_with_default.size(); ++i) { + for (int i = 0, iter_limit = params_with_default.size(); i < iter_limit; ++i) { attrs_.push_back(params_with_default[i].GetName()); } diff --git a/tensorflow/stream_executor/device_description.cc b/tensorflow/stream_executor/device_description.cc index 9ee6e6837d7..b6d98fcbafa 100644 --- a/tensorflow/stream_executor/device_description.cc +++ b/tensorflow/stream_executor/device_description.cc @@ -127,7 +127,7 @@ bool ThreadDimOk(const DeviceDescription &device_description, const ThreadDim &thread_dim) { auto total_threads = thread_dim.x * thread_dim.y * thread_dim.z; auto threads_per_block_limit = device_description.threads_per_block_limit(); - if (total_threads > threads_per_block_limit) { + if (total_threads > static_cast(threads_per_block_limit)) { VLOG(2) << "exceeded total-thread-per-block limit: " << total_threads << " vs limit " << threads_per_block_limit; return false;