From 55ee67e1140bd7720a5c7fef125b09e2a2a85006 Mon Sep 17 00:00:00 2001 From: Tare Gaskin Date: Tue, 7 Jul 2020 01:37:54 +0000 Subject: [PATCH] [-Wsign-compare] warning fixes batch 9 --- tensorflow/compiler/tf2xla/const_analysis.cc | 2 +- tensorflow/compiler/tf2xla/graph_compiler.cc | 9 ++++---- .../compiler/tf2xla/graph_compiler_util.cc | 3 ++- .../tf2xla/kernels/conv_op_helpers.cc | 6 ++++-- tensorflow/compiler/tf2xla/lib/data_format.cc | 2 +- tensorflow/compiler/tf2xla/literal_util.cc | 2 +- tensorflow/compiler/tf2xla/mlir_tf2xla.cc | 2 +- tensorflow/compiler/tf2xla/shape_util.cc | 6 ++++-- tensorflow/compiler/tf2xla/tf2xla.cc | 2 +- tensorflow/compiler/tf2xla/xla_compiler.cc | 21 ++++++++++--------- tensorflow/compiler/tf2xla/xla_context.cc | 3 ++- 11 files changed, 33 insertions(+), 25 deletions(-) diff --git a/tensorflow/compiler/tf2xla/const_analysis.cc b/tensorflow/compiler/tf2xla/const_analysis.cc index c90261303f5..251d1c48249 100644 --- a/tensorflow/compiler/tf2xla/const_analysis.cc +++ b/tensorflow/compiler/tf2xla/const_analysis.cc @@ -74,7 +74,7 @@ Status CondConstInputIndices( *(fbody->graph), &compile_time_const_arg_indices, /*compile_time_const_nodes=*/nullptr, flib_runtime)); } - for (int i = 0; i < compile_time_const_arg_indices.size(); i++) { + for (int i = 0, iter_limit = compile_time_const_arg_indices.size(); i < iter_limit; i++) { if (compile_time_const_arg_indices[i]) { // The 0th input is the pred or branch index, which is not passed to the // branches. So the i'th input of a branch function corresponds to the diff --git a/tensorflow/compiler/tf2xla/graph_compiler.cc b/tensorflow/compiler/tf2xla/graph_compiler.cc index b6e84eabe8d..716b4f50daf 100644 --- a/tensorflow/compiler/tf2xla/graph_compiler.cc +++ b/tensorflow/compiler/tf2xla/graph_compiler.cc @@ -65,7 +65,7 @@ Status PrepareArguments(XlaOpKernelContext* ctx, Graph* graph, /*compile_time_const_nodes=*/nullptr, ctx->function_library())); args->resize(expressions.size()); - for (int i = 0; i < args->size(); ++i) { + for (int i = 0, iter_limit = args->size(); i < iter_limit; ++i) { XlaCompiler::Argument& arg = (*args)[i]; arg.type = ctx->input_type(i); arg.shape = ctx->InputShape(i); @@ -161,7 +161,8 @@ Status GraphCompiler::Compile() { for (auto* e : n->in_edges()) { if (e->IsControlEdge()) continue; const Node* src = e->src(); - TF_RET_CHECK(src->id() < output_registry.size()); + const int output_registry_size = output_registry.size(); + TF_RET_CHECK(src->id() < output_registry_size); const NodeOutputs& src_outputs = output_registry[src->id()]; tensor_inputs_.at(e->dst_input()) = src_outputs.at(e->src_output()); @@ -268,7 +269,7 @@ Status GraphCompiler::CompileFunctionalNode(Node* n, TF_RET_CHECK(arguments.size() == expressions.size()); std::vector handles; - for (int64 i = 0; i < expressions.size(); ++i) { + for (int64 i = 0, iter_limit = expressions.size(); i < iter_limit; ++i) { if (arguments[i].kind == XlaCompiler::Argument::kConstant) { continue; } @@ -312,7 +313,7 @@ Status GraphCompiler::CompileFunctionalNode(Node* n, } } - for (int64 i = 0; i < result.resource_updates.size(); i++) { + for (int64 i = 0, iter_limit = result.resource_updates.size(); i < iter_limit; i++) { if (result.resource_updates[i].modified) { XlaResource* resource = expressions[result.resource_updates[i].input_index]->resource(); diff --git a/tensorflow/compiler/tf2xla/graph_compiler_util.cc b/tensorflow/compiler/tf2xla/graph_compiler_util.cc index a9385e05564..f7adae077f7 100644 --- a/tensorflow/compiler/tf2xla/graph_compiler_util.cc +++ b/tensorflow/compiler/tf2xla/graph_compiler_util.cc @@ -216,7 +216,8 @@ Status CollectArgNodes(const Graph& graph, std::vector* arg_nodes) { } arg_nodes->clear(); for (const auto& index_node : indexed_arg_nodes) { - if (index_node.first != arg_nodes->size()) { + const int arg_nodes_size = arg_nodes->size(); + if (index_node.first != arg_nodes_size) { return errors::InvalidArgument( "Expected ", FunctionLibraryDefinition::kArgOp, " node with index ", arg_nodes->size(), ", but got index ", index_node.first); diff --git a/tensorflow/compiler/tf2xla/kernels/conv_op_helpers.cc b/tensorflow/compiler/tf2xla/kernels/conv_op_helpers.cc index b60a13972a7..e0bc2ba5052 100644 --- a/tensorflow/compiler/tf2xla/kernels/conv_op_helpers.cc +++ b/tensorflow/compiler/tf2xla/kernels/conv_op_helpers.cc @@ -124,7 +124,8 @@ xla::XlaOp ReshapeFilterForDepthwiseConvolution(const xla::Shape& filter_shape, // convolutions (as currently implemented). Status CheckConvAttrs(const ConvOpAttrs& attrs) { const int num_dims = attrs.num_spatial_dims + 2; - if (attrs.strides.size() != num_dims) { + const int attrs_strides_size = attrs.strides.size(); + if (attrs_strides_size != num_dims) { return errors::InvalidArgument("Sliding window strides field must specify ", num_dims, " dimensions"); } @@ -135,7 +136,8 @@ Status CheckConvAttrs(const ConvOpAttrs& attrs) { "Current implementation does not yet support strides in the batch and " "depth dimensions."); } - if (attrs.dilations.size() != num_dims) { + const int attrs_dilations_size = attrs.dilations.size(); + if (attrs_dilations_size != num_dims) { return errors::InvalidArgument("Dilations field must specify ", num_dims, " dimensions"); } diff --git a/tensorflow/compiler/tf2xla/lib/data_format.cc b/tensorflow/compiler/tf2xla/lib/data_format.cc index 7daff47e966..2ab86c78e44 100644 --- a/tensorflow/compiler/tf2xla/lib/data_format.cc +++ b/tensorflow/compiler/tf2xla/lib/data_format.cc @@ -66,7 +66,7 @@ xla::StatusOr Expand(xla::XlaOp input, int64 dim) { // Move the newly created dimension to the end with a transpose. std::vector permutation; - for (int64 i = 0; i != expanded_shape.size(); ++i) { + for (int64 i = 0, iter_limit = expanded_shape.size(); i != iter_limit; ++i) { permutation.push_back(i); if (i == dim) { ++i; diff --git a/tensorflow/compiler/tf2xla/literal_util.cc b/tensorflow/compiler/tf2xla/literal_util.cc index 720b81a5097..42a95bbb9f8 100644 --- a/tensorflow/compiler/tf2xla/literal_util.cc +++ b/tensorflow/compiler/tf2xla/literal_util.cc @@ -72,7 +72,7 @@ Status HostTensorsToBorrowingLiteralTuple(absl::Span host_tensors, buf_ptrs.reserve(host_tensors.size()); std::vector tensor_shapes(host_tensors.size()); - for (int i = 0; i < host_tensors.size(); i++) { + for (int i = 0, iter_limit = host_tensors.size(); i < iter_limit; i++) { // Validate runtime shapes and fail if it doesn't match the contract. const Tensor* tensor = &host_tensors[i]; buf_ptrs.emplace_back(static_cast(DMAHelper::base(tensor))); diff --git a/tensorflow/compiler/tf2xla/mlir_tf2xla.cc b/tensorflow/compiler/tf2xla/mlir_tf2xla.cc index 60d1f3da0c5..61059b3568b 100644 --- a/tensorflow/compiler/tf2xla/mlir_tf2xla.cc +++ b/tensorflow/compiler/tf2xla/mlir_tf2xla.cc @@ -140,7 +140,7 @@ Status ConvertGraphDefToXlaViaMlir( if (!debug_info_path_begin_marker.empty()) { for (size_t i = 0, e = debug_info.files_size(); i < e; ++i) { std::string* file_name = debug_info.mutable_files(i); - size_t location = + int location = file_name->rfind(std::string(debug_info_path_begin_marker)); if (location != -1) { *file_name = file_name->substr(location + diff --git a/tensorflow/compiler/tf2xla/shape_util.cc b/tensorflow/compiler/tf2xla/shape_util.cc index 2fce6e7f0c7..146694b7754 100644 --- a/tensorflow/compiler/tf2xla/shape_util.cc +++ b/tensorflow/compiler/tf2xla/shape_util.cc @@ -55,7 +55,8 @@ xla::StatusOr MakeLayout(absl::Span minor_to_major, } std::vector dim_present(minor_to_major.size(), false); for (auto dim : minor_to_major) { - if (dim < 0 || dim >= minor_to_major.size()) { + const int minor_to_major_size = minor_to_major.size(); + if (dim < 0 || dim >= minor_to_major_size) { return errors::InvalidArgument("Layout dimension out of range: dim=", dim, " rank=", minor_to_major.size()); } @@ -204,7 +205,8 @@ Status GetShapeWithLayout( *output_shape = xla::ShapeUtil::MakeTupleShape(shapes); } else { int64 rank = input_shape.rank(); - if (rank != minor_to_major.size()) { + const int64 minor_to_major_size = minor_to_major.size(); + if (rank != minor_to_major_size) { return errors::InvalidArgument( "Wrong number of layout attribute elements: rank=", rank, " elements=", minor_to_major.size()); diff --git a/tensorflow/compiler/tf2xla/tf2xla.cc b/tensorflow/compiler/tf2xla/tf2xla.cc index bcdfd1c6a8e..0454bbb771a 100644 --- a/tensorflow/compiler/tf2xla/tf2xla.cc +++ b/tensorflow/compiler/tf2xla/tf2xla.cc @@ -87,7 +87,7 @@ Status ConvertGraphToXla(std::unique_ptr graph, *computation = std::move(*result.computation); int num_const_results = 0; - for (int i = 0; i < result.outputs.size(); ++i) { + for (int i = 0, iter_limit = result.outputs.size(); i < iter_limit; ++i) { // Ending up with const results (i.e. output args) is an error, since it // means that one or more fetches that the user specified will be dropped // from the generated function. It's most likely a configuration error, diff --git a/tensorflow/compiler/tf2xla/xla_compiler.cc b/tensorflow/compiler/tf2xla/xla_compiler.cc index c1aef3ff690..6d92fd97793 100644 --- a/tensorflow/compiler/tf2xla/xla_compiler.cc +++ b/tensorflow/compiler/tf2xla/xla_compiler.cc @@ -64,7 +64,7 @@ Status CheckSignature(const DataTypeVector& types, return errors::Internal("Compilation arguments have ", args.size(), " elements while function has ", types.size()); } - for (int i = 0; i < types.size(); ++i) { + for (int i = 0, iter_limit = types.size(); i < iter_limit; ++i) { // Don't perform type checks on resource variables and tensor // lists (DT_VARIANT) as we have to trick the type system in order to // plumb them through. DT_VARIANTS are wrapped in a DT_UINT8 tensor. @@ -192,7 +192,7 @@ Status BuildComputation( // replicate sharding is used. The first element is the output index, second // element is the sharding. std::unordered_map retval_index_and_sharding; - for (int i = 0; i < retvals.size(); ++i) { + for (int i = 0, iter_limit = retvals.size(); i < iter_limit; ++i) { XlaCompiler::OutputDescription& output = (*outputs)[i]; const XlaExpression& retval = retvals[i]; output.type = retval.dtype(); @@ -356,7 +356,7 @@ Status BuildComputation( xla::Shape shape = xla::ShapeUtil::MakeTupleShape(elem_shapes); // Copy specified sharding from retval_index_and_sharding. std::vector sharding_elems; - for (int i = 0; i < elems.size(); i++) { + for (int i = 0, iter_limit = elems.size(); i < iter_limit; i++) { const auto& iter = retval_index_and_sharding.find(i); TF_RET_CHECK(iter != retval_index_and_sharding.end()); const xla::OpSharding& sub_op_sharding = iter->second; @@ -365,7 +365,8 @@ Status BuildComputation( if (elem_shapes[i].IsTuple()) { const std::vector sub_sharding_elems = sub_sharding.tuple_elements(); - TF_RET_CHECK(sub_sharding_elems.size() == + const int64 sub_sharding_elems_size = sub_sharding_elems.size(); + TF_RET_CHECK(sub_sharding_elems_size == xla::ShapeUtil::GetLeafCount(elem_shapes[i])); for (const auto& sub_sharding_elem : sub_sharding_elems) { sharding_elems.push_back(sub_sharding_elem); @@ -700,7 +701,7 @@ Status XlaCompiler::CompileFunction( // Set shapes for _Arg nodes. They are useful for constant folding (e.g. an // Xla op requires a compile-time constant input, and that input is shape of // an _Arg node. - for (int i = 0; i < args.size(); i++) { + for (int i = 0, iter_limit = args.size(); i < iter_limit; i++) { // Skip resource variables and tensor lists. DataType dtype; TF_RETURN_IF_ERROR(GetNodeAttr(fbody->arg_nodes[i]->def(), "T", &dtype)); @@ -942,7 +943,7 @@ Status XlaCompiler::BuildArguments( // to the d'th XLA input. Note that the value -1 corresponds to constants, or // other args that don't correspond to an input. std::vector arg_to_inputs(args.size(), -1); - for (int i = 0; i < input_to_args->size(); i++) { + for (int i = 0, iter_limit = input_to_args->size(); i < iter_limit; i++) { arg_to_inputs[input_to_args->at(i)] = i; } @@ -988,7 +989,7 @@ Status XlaCompiler::BuildArguments( : it->second; } std::vector is_same_across_replicas; - for (int i = 0; i < input_to_args->size(); ++i) { + for (int i = 0, iter_limit = input_to_args->size(); i < iter_limit; ++i) { // Add an entry to is_same_across_replicas for every leaf buffer. is_same_across_replicas.insert( is_same_across_replicas.end(), @@ -1004,7 +1005,7 @@ Status XlaCompiler::BuildArguments( tuple = xla::Parameter(builder, 0, (*input_shapes)[0], "arg_tuple"); } - for (int i = 0; i < input_to_args->size(); ++i) { + for (int i = 0, iter_limit = input_to_args->size(); i < iter_limit; ++i) { const XlaCompiler::Argument& arg = args[input_to_args->at(i)]; for (const auto& dim_and_arg_num : arg.dynamic_dim_to_arg_num_map) { int dynamic_size_param_index = arg_to_inputs.at(dim_and_arg_num.second); @@ -1045,7 +1046,7 @@ Status XlaCompiler::BuildArguments( } } - for (int i = 0; i < input_to_args->size(); ++i) { + for (int i = 0, iter_limit = input_to_args->size(); i < iter_limit; ++i) { const XlaCompiler::Argument& arg = args[input_to_args->at(i)]; for (const auto& dim_and_arg_num : arg.dynamic_dim_to_arg_num_map) { int dynamic_size_param_index = arg_to_inputs.at(dim_and_arg_num.second); @@ -1365,7 +1366,7 @@ void SetTransfer(const string& key, absl::Span types, tf2xla::HostTransferMetadata* transfer) { transfer->set_key(key); CHECK(types.size() == shapes.size()); - for (int i = 0; i < types.size(); ++i) { + for (int i = 0, iter_limit = types.size(); i < iter_limit; ++i) { tf2xla::TensorMetadata* metadata = transfer->add_metadata(); metadata->set_type(types[i]); shapes[i].AsProto(metadata->mutable_shape()); diff --git a/tensorflow/compiler/tf2xla/xla_context.cc b/tensorflow/compiler/tf2xla/xla_context.cc index e49c944eeb3..78619ec580d 100644 --- a/tensorflow/compiler/tf2xla/xla_context.cc +++ b/tensorflow/compiler/tf2xla/xla_context.cc @@ -64,7 +64,8 @@ XlaContext::XlaContext(XlaCompiler* compiler, xla::XlaBuilder* builder) string XlaContext::DebugString() const { return "XLA JIT context"; } void XlaContext::SetRetval(int index, const XlaExpression& expression) { - if (retvals_.size() <= index) { + const int64 retvals_size = retvals_.size(); + if (retvals_size <= index) { retvals_.resize(index + 1); } retvals_[index] = expression;