[-Wsign-compare] warning fixes batch 9

This commit is contained in:
Tare Gaskin 2020-07-07 01:37:54 +00:00
parent 9fdba01ead
commit 55ee67e114
11 changed files with 33 additions and 25 deletions

View File

@ -74,7 +74,7 @@ Status CondConstInputIndices(
*(fbody->graph), &compile_time_const_arg_indices,
/*compile_time_const_nodes=*/nullptr, flib_runtime));
}
for (int i = 0; i < compile_time_const_arg_indices.size(); i++) {
for (int i = 0, iter_limit = compile_time_const_arg_indices.size(); i < iter_limit; i++) {
if (compile_time_const_arg_indices[i]) {
// The 0th input is the pred or branch index, which is not passed to the
// branches. So the i'th input of a branch function corresponds to the

View File

@ -65,7 +65,7 @@ Status PrepareArguments(XlaOpKernelContext* ctx, Graph* graph,
/*compile_time_const_nodes=*/nullptr, ctx->function_library()));
args->resize(expressions.size());
for (int i = 0; i < args->size(); ++i) {
for (int i = 0, iter_limit = args->size(); i < iter_limit; ++i) {
XlaCompiler::Argument& arg = (*args)[i];
arg.type = ctx->input_type(i);
arg.shape = ctx->InputShape(i);
@ -161,7 +161,8 @@ Status GraphCompiler::Compile() {
for (auto* e : n->in_edges()) {
if (e->IsControlEdge()) continue;
const Node* src = e->src();
TF_RET_CHECK(src->id() < output_registry.size());
const int output_registry_size = output_registry.size();
TF_RET_CHECK(src->id() < output_registry_size);
const NodeOutputs& src_outputs = output_registry[src->id()];
tensor_inputs_.at(e->dst_input()) = src_outputs.at(e->src_output());
@ -268,7 +269,7 @@ Status GraphCompiler::CompileFunctionalNode(Node* n,
TF_RET_CHECK(arguments.size() == expressions.size());
std::vector<xla::XlaOp> handles;
for (int64 i = 0; i < expressions.size(); ++i) {
for (int64 i = 0, iter_limit = expressions.size(); i < iter_limit; ++i) {
if (arguments[i].kind == XlaCompiler::Argument::kConstant) {
continue;
}
@ -312,7 +313,7 @@ Status GraphCompiler::CompileFunctionalNode(Node* n,
}
}
for (int64 i = 0; i < result.resource_updates.size(); i++) {
for (int64 i = 0, iter_limit = result.resource_updates.size(); i < iter_limit; i++) {
if (result.resource_updates[i].modified) {
XlaResource* resource =
expressions[result.resource_updates[i].input_index]->resource();

View File

@ -216,7 +216,8 @@ Status CollectArgNodes(const Graph& graph, std::vector<Node*>* arg_nodes) {
}
arg_nodes->clear();
for (const auto& index_node : indexed_arg_nodes) {
if (index_node.first != arg_nodes->size()) {
const int arg_nodes_size = arg_nodes->size();
if (index_node.first != arg_nodes_size) {
return errors::InvalidArgument(
"Expected ", FunctionLibraryDefinition::kArgOp, " node with index ",
arg_nodes->size(), ", but got index ", index_node.first);

View File

@ -124,7 +124,8 @@ xla::XlaOp ReshapeFilterForDepthwiseConvolution(const xla::Shape& filter_shape,
// convolutions (as currently implemented).
Status CheckConvAttrs(const ConvOpAttrs& attrs) {
const int num_dims = attrs.num_spatial_dims + 2;
if (attrs.strides.size() != num_dims) {
const int attrs_strides_size = attrs.strides.size();
if (attrs_strides_size != num_dims) {
return errors::InvalidArgument("Sliding window strides field must specify ",
num_dims, " dimensions");
}
@ -135,7 +136,8 @@ Status CheckConvAttrs(const ConvOpAttrs& attrs) {
"Current implementation does not yet support strides in the batch and "
"depth dimensions.");
}
if (attrs.dilations.size() != num_dims) {
const int attrs_dilations_size = attrs.dilations.size();
if (attrs_dilations_size != num_dims) {
return errors::InvalidArgument("Dilations field must specify ", num_dims,
" dimensions");
}

View File

@ -66,7 +66,7 @@ xla::StatusOr<xla::XlaOp> Expand(xla::XlaOp input, int64 dim) {
// Move the newly created dimension to the end with a transpose.
std::vector<int64> permutation;
for (int64 i = 0; i != expanded_shape.size(); ++i) {
for (int64 i = 0, iter_limit = expanded_shape.size(); i != iter_limit; ++i) {
permutation.push_back(i);
if (i == dim) {
++i;

View File

@ -72,7 +72,7 @@ Status HostTensorsToBorrowingLiteralTuple(absl::Span<const Tensor> host_tensors,
buf_ptrs.reserve(host_tensors.size());
std::vector<xla::Shape> tensor_shapes(host_tensors.size());
for (int i = 0; i < host_tensors.size(); i++) {
for (int i = 0, iter_limit = host_tensors.size(); i < iter_limit; i++) {
// Validate runtime shapes and fail if it doesn't match the contract.
const Tensor* tensor = &host_tensors[i];
buf_ptrs.emplace_back(static_cast<const char*>(DMAHelper::base(tensor)));

View File

@ -140,7 +140,7 @@ Status ConvertGraphDefToXlaViaMlir(
if (!debug_info_path_begin_marker.empty()) {
for (size_t i = 0, e = debug_info.files_size(); i < e; ++i) {
std::string* file_name = debug_info.mutable_files(i);
size_t location =
int location =
file_name->rfind(std::string(debug_info_path_begin_marker));
if (location != -1) {
*file_name = file_name->substr(location +

View File

@ -55,7 +55,8 @@ xla::StatusOr<bool> MakeLayout(absl::Span<const int64> minor_to_major,
}
std::vector<bool> dim_present(minor_to_major.size(), false);
for (auto dim : minor_to_major) {
if (dim < 0 || dim >= minor_to_major.size()) {
const int minor_to_major_size = minor_to_major.size();
if (dim < 0 || dim >= minor_to_major_size) {
return errors::InvalidArgument("Layout dimension out of range: dim=", dim,
" rank=", minor_to_major.size());
}
@ -204,7 +205,8 @@ Status GetShapeWithLayout(
*output_shape = xla::ShapeUtil::MakeTupleShape(shapes);
} else {
int64 rank = input_shape.rank();
if (rank != minor_to_major.size()) {
const int64 minor_to_major_size = minor_to_major.size();
if (rank != minor_to_major_size) {
return errors::InvalidArgument(
"Wrong number of layout attribute elements: rank=", rank,
" elements=", minor_to_major.size());

View File

@ -87,7 +87,7 @@ Status ConvertGraphToXla(std::unique_ptr<Graph> graph,
*computation = std::move(*result.computation);
int num_const_results = 0;
for (int i = 0; i < result.outputs.size(); ++i) {
for (int i = 0, iter_limit = result.outputs.size(); i < iter_limit; ++i) {
// Ending up with const results (i.e. output args) is an error, since it
// means that one or more fetches that the user specified will be dropped
// from the generated function. It's most likely a configuration error,

View File

@ -64,7 +64,7 @@ Status CheckSignature(const DataTypeVector& types,
return errors::Internal("Compilation arguments have ", args.size(),
" elements while function has ", types.size());
}
for (int i = 0; i < types.size(); ++i) {
for (int i = 0, iter_limit = types.size(); i < iter_limit; ++i) {
// Don't perform type checks on resource variables and tensor
// lists (DT_VARIANT) as we have to trick the type system in order to
// plumb them through. DT_VARIANTS are wrapped in a DT_UINT8 tensor.
@ -192,7 +192,7 @@ Status BuildComputation(
// replicate sharding is used. The first element is the output index, second
// element is the sharding.
std::unordered_map<int, xla::OpSharding> retval_index_and_sharding;
for (int i = 0; i < retvals.size(); ++i) {
for (int i = 0, iter_limit = retvals.size(); i < iter_limit; ++i) {
XlaCompiler::OutputDescription& output = (*outputs)[i];
const XlaExpression& retval = retvals[i];
output.type = retval.dtype();
@ -356,7 +356,7 @@ Status BuildComputation(
xla::Shape shape = xla::ShapeUtil::MakeTupleShape(elem_shapes);
// Copy specified sharding from retval_index_and_sharding.
std::vector<xla::HloSharding> sharding_elems;
for (int i = 0; i < elems.size(); i++) {
for (int i = 0, iter_limit = elems.size(); i < iter_limit; i++) {
const auto& iter = retval_index_and_sharding.find(i);
TF_RET_CHECK(iter != retval_index_and_sharding.end());
const xla::OpSharding& sub_op_sharding = iter->second;
@ -365,7 +365,8 @@ Status BuildComputation(
if (elem_shapes[i].IsTuple()) {
const std::vector<xla::HloSharding> sub_sharding_elems =
sub_sharding.tuple_elements();
TF_RET_CHECK(sub_sharding_elems.size() ==
const int64 sub_sharding_elems_size = sub_sharding_elems.size();
TF_RET_CHECK(sub_sharding_elems_size ==
xla::ShapeUtil::GetLeafCount(elem_shapes[i]));
for (const auto& sub_sharding_elem : sub_sharding_elems) {
sharding_elems.push_back(sub_sharding_elem);
@ -700,7 +701,7 @@ Status XlaCompiler::CompileFunction(
// Set shapes for _Arg nodes. They are useful for constant folding (e.g. an
// Xla op requires a compile-time constant input, and that input is shape of
// an _Arg node.
for (int i = 0; i < args.size(); i++) {
for (int i = 0, iter_limit = args.size(); i < iter_limit; i++) {
// Skip resource variables and tensor lists.
DataType dtype;
TF_RETURN_IF_ERROR(GetNodeAttr(fbody->arg_nodes[i]->def(), "T", &dtype));
@ -942,7 +943,7 @@ Status XlaCompiler::BuildArguments(
// to the d'th XLA input. Note that the value -1 corresponds to constants, or
// other args that don't correspond to an input.
std::vector<int> arg_to_inputs(args.size(), -1);
for (int i = 0; i < input_to_args->size(); i++) {
for (int i = 0, iter_limit = input_to_args->size(); i < iter_limit; i++) {
arg_to_inputs[input_to_args->at(i)] = i;
}
@ -988,7 +989,7 @@ Status XlaCompiler::BuildArguments(
: it->second;
}
std::vector<bool> is_same_across_replicas;
for (int i = 0; i < input_to_args->size(); ++i) {
for (int i = 0, iter_limit = input_to_args->size(); i < iter_limit; ++i) {
// Add an entry to is_same_across_replicas for every leaf buffer.
is_same_across_replicas.insert(
is_same_across_replicas.end(),
@ -1004,7 +1005,7 @@ Status XlaCompiler::BuildArguments(
tuple = xla::Parameter(builder, 0, (*input_shapes)[0], "arg_tuple");
}
for (int i = 0; i < input_to_args->size(); ++i) {
for (int i = 0, iter_limit = input_to_args->size(); i < iter_limit; ++i) {
const XlaCompiler::Argument& arg = args[input_to_args->at(i)];
for (const auto& dim_and_arg_num : arg.dynamic_dim_to_arg_num_map) {
int dynamic_size_param_index = arg_to_inputs.at(dim_and_arg_num.second);
@ -1045,7 +1046,7 @@ Status XlaCompiler::BuildArguments(
}
}
for (int i = 0; i < input_to_args->size(); ++i) {
for (int i = 0, iter_limit = input_to_args->size(); i < iter_limit; ++i) {
const XlaCompiler::Argument& arg = args[input_to_args->at(i)];
for (const auto& dim_and_arg_num : arg.dynamic_dim_to_arg_num_map) {
int dynamic_size_param_index = arg_to_inputs.at(dim_and_arg_num.second);
@ -1365,7 +1366,7 @@ void SetTransfer(const string& key, absl::Span<const DataType> types,
tf2xla::HostTransferMetadata* transfer) {
transfer->set_key(key);
CHECK(types.size() == shapes.size());
for (int i = 0; i < types.size(); ++i) {
for (int i = 0, iter_limit = types.size(); i < iter_limit; ++i) {
tf2xla::TensorMetadata* metadata = transfer->add_metadata();
metadata->set_type(types[i]);
shapes[i].AsProto(metadata->mutable_shape());

View File

@ -64,7 +64,8 @@ XlaContext::XlaContext(XlaCompiler* compiler, xla::XlaBuilder* builder)
string XlaContext::DebugString() const { return "XLA JIT context"; }
void XlaContext::SetRetval(int index, const XlaExpression& expression) {
if (retvals_.size() <= index) {
const int64 retvals_size = retvals_.size();
if (retvals_size <= index) {
retvals_.resize(index + 1);
}
retvals_[index] = expression;