tf2xla directory resolutions
This commit is contained in:
parent
3bb28df8d4
commit
ad58928e65
@ -74,8 +74,7 @@ Status CondConstInputIndices(
|
||||
*(fbody->graph), &compile_time_const_arg_indices,
|
||||
/*compile_time_const_nodes=*/nullptr, flib_runtime));
|
||||
}
|
||||
for (int i = 0, iter_limit = compile_time_const_arg_indices.size();
|
||||
i < iter_limit; i++) {
|
||||
for (int i = 0, end = compile_time_const_arg_indices.size(); i < end; i++) {
|
||||
if (compile_time_const_arg_indices[i]) {
|
||||
// The 0th input is the pred or branch index, which is not passed to the
|
||||
// branches. So the i'th input of a branch function corresponds to the
|
||||
|
@ -224,7 +224,8 @@ string DebugString(const CondArgNodes& nodes) {
|
||||
}
|
||||
|
||||
StateMap::CondId StateMap::LookupCondId(const Node* node) const {
|
||||
if (node->id() < node_to_condid_map_.size())
|
||||
const int64 node_to_condid_map_size = node_to_condid_map_.size();
|
||||
if (node->id() < node_to_condid_map_size)
|
||||
return node_to_condid_map_[node->id()];
|
||||
return added_node_condid_mapping_.at(node->id());
|
||||
}
|
||||
@ -235,14 +236,16 @@ StateMap::CondId StateMap::GetCondId(const StateMap::CondState& state) {
|
||||
}
|
||||
|
||||
void StateMap::ResetCondId(const Node* node, StateMap::CondId id) {
|
||||
if (node->id() < node_to_condid_map_.size())
|
||||
const int64 node_to_condid_map_size = node_to_condid_map_.size();
|
||||
if (node->id() < node_to_condid_map_size)
|
||||
node_to_condid_map_[node->id()] = id;
|
||||
else
|
||||
added_node_condid_mapping_[node->id()] = id;
|
||||
}
|
||||
|
||||
StateMap::AncestorId StateMap::LookupAncestorId(const Node* node) const {
|
||||
if (node->id() < node_to_ancestorid_map_.size())
|
||||
const int64 node_to_ancestorid_map_size = node_to_ancestorid_map_.size();
|
||||
if (node->id() < node_to_ancestorid_map_size)
|
||||
return node_to_ancestorid_map_[node->id()];
|
||||
return added_node_ancestorid_mapping_.at(node->id());
|
||||
}
|
||||
@ -254,7 +257,8 @@ StateMap::AncestorId StateMap::GetAncestorId(
|
||||
}
|
||||
|
||||
void StateMap::ResetAncestorId(const Node* node, StateMap::AncestorId id) {
|
||||
if (node->id() < node_to_ancestorid_map_.size())
|
||||
const int64 node_to_ancestorid_map_size = node_to_ancestorid_map_.size();
|
||||
if (node->id() < node_to_ancestorid_map_size)
|
||||
node_to_ancestorid_map_[node->id()] = id;
|
||||
else
|
||||
added_node_ancestorid_mapping_[node->id()] = id;
|
||||
|
@ -130,7 +130,7 @@ Status BuildLoopCondition(const Graph& graph, WhileLoopFrame* frame,
|
||||
std::vector<bool> squash_src_outputs(graph.num_node_ids(), false);
|
||||
|
||||
// Build one _Arg node for each Enter node.
|
||||
for (int i = 0; i < frame->args.size(); ++i) {
|
||||
for (int i = 0, end = frame->args.size(); i < end; ++i) {
|
||||
const WhileLoopArg& arg = frame->args[i];
|
||||
|
||||
TF_ASSIGN_OR_RETURN(Node * arg_node,
|
||||
@ -170,7 +170,7 @@ Status BuildLoopBody(const Graph& graph, WhileLoopFrame* frame,
|
||||
std::vector<Node*> next_iterations;
|
||||
next_iterations.reserve(frame->args.size());
|
||||
arg_types->reserve(frame->args.size());
|
||||
for (int i = 0; i < frame->args.size(); ++i) {
|
||||
for (int i = 0, end = frame->args.size(); i < end; ++i) {
|
||||
const WhileLoopArg& arg = frame->args[i];
|
||||
|
||||
DataType dtype = arg.enter->input_type(0);
|
||||
@ -235,7 +235,7 @@ Status FunctionalizeLoop(Graph* graph, WhileLoopFrame* frame,
|
||||
} else {
|
||||
std::vector<const Edge*> edges(arg.enter->out_edges().begin(),
|
||||
arg.enter->out_edges().end());
|
||||
for (int i = 0; i < edges.size(); ++i) {
|
||||
for (int i = 0, end = edges.size(); i < end; ++i) {
|
||||
if (edges[i]->IsControlEdge() && edges[i]->dst()->IsSink()) {
|
||||
continue;
|
||||
}
|
||||
@ -447,7 +447,7 @@ Status FunctionalizeLoop(Graph* graph, WhileLoopFrame* frame,
|
||||
}
|
||||
}
|
||||
std::vector<NodeDefBuilder::NodeOut> inputs;
|
||||
for (int i = 0; i < frame->args.size(); ++i) {
|
||||
for (int i = 0, end = frame->args.size(); i < end; ++i) {
|
||||
const WhileLoopArg& arg = frame->args[i];
|
||||
const Edge* in_edge;
|
||||
TF_RETURN_IF_ERROR(arg.enter->input_edge(0, &in_edge));
|
||||
@ -463,7 +463,7 @@ Status FunctionalizeLoop(Graph* graph, WhileLoopFrame* frame,
|
||||
TF_ASSIGN_OR_RETURN(Node * while_node, AddNodeDefToGraph(while_def, graph));
|
||||
|
||||
// Copies edges to the Enter nodes and from the Exit nodes onto the While.
|
||||
for (int i = 0; i < frame->args.size(); ++i) {
|
||||
for (int i = 0, end = frame->args.size(); i < end; ++i) {
|
||||
const WhileLoopArg& arg = frame->args[i];
|
||||
const Edge* in_edge;
|
||||
TF_RETURN_IF_ERROR(arg.enter->input_edge(0, &in_edge));
|
||||
|
@ -65,7 +65,7 @@ Status PrepareArguments(XlaOpKernelContext* ctx, Graph* graph,
|
||||
/*compile_time_const_nodes=*/nullptr, ctx->function_library()));
|
||||
|
||||
args->resize(expressions.size());
|
||||
for (int i = 0, iter_limit = args->size(); i < iter_limit; ++i) {
|
||||
for (int i = 0, end = args->size(); i < end; ++i) {
|
||||
XlaCompiler::Argument& arg = (*args)[i];
|
||||
arg.type = ctx->input_type(i);
|
||||
arg.shape = ctx->InputShape(i);
|
||||
@ -269,7 +269,7 @@ Status GraphCompiler::CompileFunctionalNode(Node* n,
|
||||
TF_RET_CHECK(arguments.size() == expressions.size());
|
||||
|
||||
std::vector<xla::XlaOp> handles;
|
||||
for (int64 i = 0, iter_limit = expressions.size(); i < iter_limit; ++i) {
|
||||
for (int64 i = 0, end = expressions.size(); i < end; ++i) {
|
||||
if (arguments[i].kind == XlaCompiler::Argument::kConstant) {
|
||||
continue;
|
||||
}
|
||||
@ -313,8 +313,7 @@ Status GraphCompiler::CompileFunctionalNode(Node* n,
|
||||
}
|
||||
}
|
||||
|
||||
for (int64 i = 0, iter_limit = result.resource_updates.size(); i < iter_limit;
|
||||
i++) {
|
||||
for (int64 i = 0, end = result.resource_updates.size(); i < end; i++) {
|
||||
if (result.resource_updates[i].modified) {
|
||||
XlaResource* resource =
|
||||
expressions[result.resource_updates[i].input_index]->resource();
|
||||
|
@ -66,7 +66,7 @@ xla::StatusOr<xla::XlaOp> Expand(xla::XlaOp input, int64 dim) {
|
||||
|
||||
// Move the newly created dimension to the end with a transpose.
|
||||
std::vector<int64> permutation;
|
||||
for (int64 i = 0, iter_limit = expanded_shape.size(); i != iter_limit; ++i) {
|
||||
for (int64 i = 0, end = expanded_shape.size(); i != end; ++i) {
|
||||
permutation.push_back(i);
|
||||
if (i == dim) {
|
||||
++i;
|
||||
|
@ -72,7 +72,7 @@ Status HostTensorsToBorrowingLiteralTuple(absl::Span<const Tensor> host_tensors,
|
||||
buf_ptrs.reserve(host_tensors.size());
|
||||
std::vector<xla::Shape> tensor_shapes(host_tensors.size());
|
||||
|
||||
for (int i = 0, iter_limit = host_tensors.size(); i < iter_limit; i++) {
|
||||
for (int i = 0, end = host_tensors.size(); i < end; i++) {
|
||||
// Validate runtime shapes and fail if it doesn't match the contract.
|
||||
const Tensor* tensor = &host_tensors[i];
|
||||
buf_ptrs.emplace_back(static_cast<const char*>(DMAHelper::base(tensor)));
|
||||
|
@ -41,7 +41,7 @@ std::vector<DataType> ShuffleInputDataTypeAttribute(
|
||||
const std::vector<DataType>& in_types,
|
||||
const std::vector<int>& index_mapping) {
|
||||
std::vector<DataType> result(index_mapping.size());
|
||||
for (int i = 0; i < in_types.size(); i++) {
|
||||
for (int i = 0, end = in_types.size(); i < end; i++) {
|
||||
result[index_mapping.at(i)] = in_types[i];
|
||||
}
|
||||
return result;
|
||||
@ -56,7 +56,7 @@ Status InputTypesNeedsRearrange(const std::vector<DataType>& in_types,
|
||||
bool* need_rewrite, int* resource_input_count,
|
||||
std::vector<int>* index_mapping) {
|
||||
int first_resource_index = -1;
|
||||
for (int i = 0; i < in_types.size(); i++) {
|
||||
for (int i = 0, end = in_types.size(); i < end; i++) {
|
||||
DataType type = in_types[i];
|
||||
if (type == DT_RESOURCE) {
|
||||
first_resource_index = i;
|
||||
@ -70,7 +70,7 @@ Status InputTypesNeedsRearrange(const std::vector<DataType>& in_types,
|
||||
}
|
||||
|
||||
*need_rewrite = false;
|
||||
for (int i = first_resource_index + 1; i < in_types.size(); i++) {
|
||||
for (int i = first_resource_index + 1, end = in_types.size(); i < end; i++) {
|
||||
if (in_types[i] != DT_RESOURCE) {
|
||||
*need_rewrite = true;
|
||||
break;
|
||||
@ -81,7 +81,7 @@ Status InputTypesNeedsRearrange(const std::vector<DataType>& in_types,
|
||||
}
|
||||
|
||||
*resource_input_count = 0;
|
||||
for (int i = 0; i < in_types.size(); i++) {
|
||||
for (int i = 0, end = in_types.size(); i < end; i++) {
|
||||
DataType type = in_types[i];
|
||||
if (type == DT_RESOURCE) {
|
||||
++(*resource_input_count);
|
||||
@ -90,7 +90,7 @@ Status InputTypesNeedsRearrange(const std::vector<DataType>& in_types,
|
||||
int non_resource_index = 0,
|
||||
resource_index = in_types.size() - *resource_input_count;
|
||||
index_mapping->resize(in_types.size());
|
||||
for (int i = 0; i < in_types.size(); i++) {
|
||||
for (int i = 0, end = in_types.size(); i < end; i++) {
|
||||
if (in_types[i] != DT_RESOURCE) {
|
||||
(*index_mapping)[i] = non_resource_index;
|
||||
non_resource_index++;
|
||||
@ -146,7 +146,7 @@ Status ReorderOutputEdges(Graph* g, Node* n, int input_count,
|
||||
int dst_input = e->dst_input();
|
||||
g->RemoveEdge(e);
|
||||
|
||||
if (new_src_output < input_count - resource_input_count) {
|
||||
if (new_src_output < static_cast<int64>(input_count - resource_input_count)) {
|
||||
g->AddEdge(n, new_src_output, dst, dst_input);
|
||||
} else {
|
||||
const Edge* input_edge;
|
||||
@ -180,7 +180,7 @@ Status CalculateRetvalRearrange(
|
||||
const gtl::InlinedVector<Node*, 4>& ret_nodes, // non-absl ok
|
||||
std::map<int, int>* retval_index_mapping,
|
||||
std::map<int, int>* resource_retval_to_arg) {
|
||||
for (int i = 0; i < ret_nodes.size(); i++) {
|
||||
for (int i = 0, end = ret_nodes.size(); i < end; i++) {
|
||||
Node* n = ret_nodes[i];
|
||||
DataType t;
|
||||
TF_RETURN_IF_ERROR(GetNodeAttr(n->def(), "T", &t));
|
||||
@ -261,7 +261,7 @@ Status RearrangeOutputEdges(Node* n, Graph* g,
|
||||
void RearrangeRetvalNodes(
|
||||
const gtl::InlinedVector<Node*, 4>& ret_nodes, // non-absl ok
|
||||
Graph* g, const std::map<int, int>& retval_index_mapping) {
|
||||
for (int i = 0; i < ret_nodes.size(); i++) {
|
||||
for (int i = 0, end = ret_nodes.size(); i < end; i++) {
|
||||
Node* n = ret_nodes[i];
|
||||
auto iter = retval_index_mapping.find(i);
|
||||
if (iter == retval_index_mapping.end()) {
|
||||
@ -317,7 +317,7 @@ Status MaybeRewriteWhileNode(
|
||||
// lambda resource_var1, resource_var2: [resource_var2, resource_var1],
|
||||
// [resource_var1, resource_var2])
|
||||
if (attr_name == "body") {
|
||||
for (int i = 0; i < fbody->ret_nodes.size(); i++) {
|
||||
for (int i = 0, end = fbody->ret_nodes.size(); i < end; i++) {
|
||||
Node* n = fbody->ret_nodes[i];
|
||||
DataType dtype;
|
||||
TF_RETURN_IF_ERROR(GetNodeAttr(n->def(), "T", &dtype));
|
||||
@ -349,7 +349,7 @@ Status MaybeRewriteWhileNode(
|
||||
|
||||
RearrangeArgNodes(&fbody->arg_nodes, index_mapping);
|
||||
if (attr_name == "body") {
|
||||
for (int i = 0; i < fbody->ret_nodes.size(); i++) {
|
||||
for (int i = 0, end = fbody->ret_nodes.size(); i < end; i++) {
|
||||
Node* n = fbody->ret_nodes[i];
|
||||
int new_index = index_mapping.at(i);
|
||||
if (new_index < types.size() - resource_input_count) {
|
||||
|
@ -87,7 +87,7 @@ Status ConvertGraphToXla(std::unique_ptr<Graph> graph,
|
||||
*computation = std::move(*result.computation);
|
||||
|
||||
int num_const_results = 0;
|
||||
for (int i = 0, iter_limit = result.outputs.size(); i < iter_limit; ++i) {
|
||||
for (int i = 0, end = result.outputs.size(); i < end; ++i) {
|
||||
// Ending up with const results (i.e. output args) is an error, since it
|
||||
// means that one or more fetches that the user specified will be dropped
|
||||
// from the generated function. It's most likely a configuration error,
|
||||
|
@ -143,7 +143,7 @@ Status ReplaceArgUsageWithConstNode(
|
||||
usages.push_back({e->dst()->id(), e->dst_input()});
|
||||
}
|
||||
|
||||
for (int i = 0; i < usages.size(); i++) {
|
||||
for (int i = 0, end = usages.size(); i < end; i++) {
|
||||
// Make a copy of `usage_node`, and change its input to const node.
|
||||
Node* usage_node = g->FindNodeId(usages[i].dst_node_id);
|
||||
NodeDef replace_def = usage_node->def();
|
||||
@ -158,7 +158,7 @@ Status ReplaceArgUsageWithConstNode(
|
||||
|
||||
// Later entries in `usages` might have `usage_node` as dst node, but
|
||||
// `usage_node` is removed. Replace such entries with `replace_node`.
|
||||
for (int j = i + 1; j < usages.size(); j++) {
|
||||
for (int j = i + 1, end = usages.size(); j < end; j++) {
|
||||
if (usages[j].dst_node_id == usages[i].dst_node_id) {
|
||||
usages[j].dst_node_id = replace_node->id();
|
||||
}
|
||||
|
@ -64,7 +64,7 @@ Status CheckSignature(const DataTypeVector& types,
|
||||
return errors::Internal("Compilation arguments have ", args.size(),
|
||||
" elements while function has ", types.size());
|
||||
}
|
||||
for (int i = 0, iter_limit = types.size(); i < iter_limit; ++i) {
|
||||
for (int i = 0, end = types.size(); i < end; ++i) {
|
||||
// Don't perform type checks on resource variables and tensor
|
||||
// lists (DT_VARIANT) as we have to trick the type system in order to
|
||||
// plumb them through. DT_VARIANTS are wrapped in a DT_UINT8 tensor.
|
||||
@ -192,7 +192,7 @@ Status BuildComputation(
|
||||
// replicate sharding is used. The first element is the output index, second
|
||||
// element is the sharding.
|
||||
std::unordered_map<int, xla::OpSharding> retval_index_and_sharding;
|
||||
for (int i = 0, iter_limit = retvals.size(); i < iter_limit; ++i) {
|
||||
for (int i = 0, end = retvals.size(); i < end; ++i) {
|
||||
XlaCompiler::OutputDescription& output = (*outputs)[i];
|
||||
const XlaExpression& retval = retvals[i];
|
||||
output.type = retval.dtype();
|
||||
@ -362,7 +362,7 @@ Status BuildComputation(
|
||||
xla::Shape shape = xla::ShapeUtil::MakeTupleShape(elem_shapes);
|
||||
// Copy specified sharding from retval_index_and_sharding.
|
||||
std::vector<xla::HloSharding> sharding_elems;
|
||||
for (int i = 0, iter_limit = elems.size(); i < iter_limit; i++) {
|
||||
for (int i = 0, end = elems.size(); i < end; i++) {
|
||||
const auto& iter = retval_index_and_sharding.find(i);
|
||||
TF_RET_CHECK(iter != retval_index_and_sharding.end());
|
||||
const xla::OpSharding& sub_op_sharding = iter->second;
|
||||
@ -707,7 +707,7 @@ Status XlaCompiler::CompileFunction(
|
||||
// Set shapes for _Arg nodes. They are useful for constant folding (e.g. an
|
||||
// Xla op requires a compile-time constant input, and that input is shape of
|
||||
// an _Arg node.
|
||||
for (int i = 0, iter_limit = args.size(); i < iter_limit; i++) {
|
||||
for (int i = 0, end = args.size(); i < end; i++) {
|
||||
// Skip resource variables and tensor lists.
|
||||
DataType dtype;
|
||||
TF_RETURN_IF_ERROR(GetNodeAttr(fbody->arg_nodes[i]->def(), "T", &dtype));
|
||||
@ -949,7 +949,7 @@ Status XlaCompiler::BuildArguments(
|
||||
// to the d'th XLA input. Note that the value -1 corresponds to constants, or
|
||||
// other args that don't correspond to an input.
|
||||
std::vector<int> arg_to_inputs(args.size(), -1);
|
||||
for (int i = 0, iter_limit = input_to_args->size(); i < iter_limit; i++) {
|
||||
for (int i = 0, end = input_to_args->size(); i < end; i++) {
|
||||
arg_to_inputs[input_to_args->at(i)] = i;
|
||||
}
|
||||
|
||||
@ -995,7 +995,7 @@ Status XlaCompiler::BuildArguments(
|
||||
: it->second;
|
||||
}
|
||||
std::vector<bool> is_same_across_replicas;
|
||||
for (int i = 0, iter_limit = input_to_args->size(); i < iter_limit; ++i) {
|
||||
for (int i = 0, end = input_to_args->size(); i < end; ++i) {
|
||||
// Add an entry to is_same_across_replicas for every leaf buffer.
|
||||
is_same_across_replicas.insert(
|
||||
is_same_across_replicas.end(),
|
||||
@ -1011,7 +1011,7 @@ Status XlaCompiler::BuildArguments(
|
||||
tuple = xla::Parameter(builder, 0, (*input_shapes)[0], "arg_tuple");
|
||||
}
|
||||
|
||||
for (int i = 0, iter_limit = input_to_args->size(); i < iter_limit; ++i) {
|
||||
for (int i = 0, end = input_to_args->size(); i < end; ++i) {
|
||||
const XlaCompiler::Argument& arg = args[input_to_args->at(i)];
|
||||
for (const auto& dim_and_arg_num : arg.dynamic_dim_to_arg_num_map) {
|
||||
int dynamic_size_param_index = arg_to_inputs.at(dim_and_arg_num.second);
|
||||
@ -1030,6 +1030,11 @@ Status XlaCompiler::BuildArguments(
|
||||
xla::XlaScopedShardingAssignment assign_sharding(
|
||||
builder, it == arg_shardings.end() ? absl::optional<xla::OpSharding>()
|
||||
: it->second);
|
||||
auto& arg = args[input_to_args->at(i)];
|
||||
|
||||
xla::OpMetadata arg_metadata;
|
||||
arg_metadata.set_op_name(arg.node_name);
|
||||
builder->SetOneShotOpMetadata(arg_metadata);
|
||||
arg_handles[i] = xla::GetTupleElement(tuple, i);
|
||||
}
|
||||
} else {
|
||||
@ -1052,7 +1057,7 @@ Status XlaCompiler::BuildArguments(
|
||||
}
|
||||
}
|
||||
|
||||
for (int i = 0, iter_limit = input_to_args->size(); i < iter_limit; ++i) {
|
||||
for (int i = 0, end = input_to_args->size(); i < end; ++i) {
|
||||
const XlaCompiler::Argument& arg = args[input_to_args->at(i)];
|
||||
for (const auto& dim_and_arg_num : arg.dynamic_dim_to_arg_num_map) {
|
||||
int dynamic_size_param_index = arg_to_inputs.at(dim_and_arg_num.second);
|
||||
@ -1373,7 +1378,7 @@ void SetTransfer(const string& key, absl::Span<const DataType> types,
|
||||
tf2xla::HostTransferMetadata* transfer) {
|
||||
transfer->set_key(key);
|
||||
CHECK(types.size() == shapes.size());
|
||||
for (int i = 0, iter_limit = types.size(); i < iter_limit; ++i) {
|
||||
for (int i = 0, end = types.size(); i < end; ++i) {
|
||||
tf2xla::TensorMetadata* metadata = transfer->add_metadata();
|
||||
metadata->set_type(types[i]);
|
||||
shapes[i].AsProto(metadata->mutable_shape());
|
||||
|
Loading…
Reference in New Issue
Block a user