From cb3cf00aade8e747783b57debe324d6bc00b77b3 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Tar=C3=A9=20Gaskin?= <taregaskin@google.com>
Date: Sun, 26 Jul 2020 20:42:00 +0000
Subject: [PATCH 1/7] c, compiler, jit resolutions

---
 tensorflow/c/eager/tape.h                     | 10 +++---
 tensorflow/cc/framework/gradients.cc          |  7 ++--
 tensorflow/cc/framework/while_gradients.cc    |  2 +-
 tensorflow/compiler/aot/codegen.cc            | 16 +++++----
 tensorflow/compiler/jit/build_xla_ops_pass.cc |  2 +-
 .../compiler/jit/compilability_check_util.h   |  2 +-
 tensorflow/compiler/jit/device_util.cc        |  6 ++--
 tensorflow/compiler/jit/device_util.h         |  2 +-
 .../jit/encapsulate_subgraphs_pass.cc         |  6 ++--
 tensorflow/compiler/jit/encapsulate_util.cc   |  8 ++---
 .../jit/encapsulate_xla_computations_pass.cc  |  4 +--
 .../jit/extract_outside_compilation_pass.cc   | 33 ++++++++++---------
 .../compiler/jit/graphcycles/graphcycles.cc   |  2 +-
 .../increase_dynamism_for_auto_jit_pass.cc    |  2 +-
 tensorflow/compiler/jit/shape_inference.cc    |  2 +-
 tensorflow/compiler/jit/xla_cluster_util.cc   |  2 +-
 .../compiler/jit/xla_compilation_cache.cc     | 12 +++----
 tensorflow/compiler/jit/xla_launch_util.cc    |  8 ++---
 18 files changed, 67 insertions(+), 59 deletions(-)

diff --git a/tensorflow/c/eager/tape.h b/tensorflow/c/eager/tape.h
index 40cfa87dd66..f52a5e32c1a 100644
--- a/tensorflow/c/eager/tape.h
+++ b/tensorflow/c/eager/tape.h
@@ -573,7 +573,7 @@ Status InitialGradients(
     gtl::ArraySlice<Gradient*> output_gradients, const TensorTape& tensor_tape,
     const OpTape<BackwardFunction, TapeTensor>& op_tape,
     std::unordered_map<int64, std::vector<Gradient*>>* result) {
-  for (int i = 0; i < target_tensor_ids.size(); ++i) {
+  for (int i = 0, end = target_tensor_ids.size(); i < end; ++i) {
     const int64 id = target_tensor_ids[i];
     if (output_gradients.empty() || output_gradients[i] == nullptr) {
       auto tensor_it = tensor_tape.find(id);
@@ -699,7 +699,7 @@ Status GradientTape<Gradient, BackwardFunction, TapeTensor>::ComputeGradient(
     std::vector<Gradient*> out_gradients;
     out_gradients.reserve(trace.output_tensor_info.size());
     std::vector<int64> unneeded_gradients;
-    for (int i = 0; i < trace.input_tensor_id.size(); i++) {
+    for (int i = 0, end = trace.input_tensor_id.size(); i < end; i++) {
       const auto& in_tensor_id = trace.input_tensor_id[i];
       if (tensor_tape_.find(in_tensor_id) == tensor_tape_.end() &&
           sources_set.find(in_tensor_id) == sources_set.end()) {
@@ -709,7 +709,7 @@ Status GradientTape<Gradient, BackwardFunction, TapeTensor>::ComputeGradient(
 
     bool any_gradient_nonzero = false;
     std::vector<int> zero_indices;
-    for (int i = 0; i < trace.output_tensor_info.size(); ++i) {
+    for (int i = 0, end = trace.output_tensor_info.size(); i < end; ++i) {
       const int64 id = trace.output_tensor_info[i].GetID();
       auto grad_it = gradients.find(id);
       if (grad_it == gradients.end()) {
@@ -775,7 +775,7 @@ Status GradientTape<Gradient, BackwardFunction, TapeTensor>::ComputeGradient(
     }
     VLOG(1) << "Got " << in_gradients.size() << " in_gradients for "
             << trace.input_tensor_id.size() << " sources";
-    for (int i = 0; i < in_gradients.size(); ++i) {
+    for (int i = 0, end = in_gradients.size(); i < end; ++i) {
       const int64 id = trace.input_tensor_id[i];
       if (in_gradients[i] != nullptr) {
         auto& unaggregated_grads = gradients[id];
@@ -968,7 +968,7 @@ ForwardAccumulator<Gradient, BackwardFunction, TapeTensor>::ForwardpropFromTape(
   targets.reserve(grad.size());
   used_in_grads.reserve(grad.size());
   std::unordered_map<int64, TapeTensor> sources_that_are_targets;
-  for (int grad_index = 0; grad_index < grad.size(); ++grad_index) {
+  for (int grad_index = 0, end = grad.size(); grad_index < end; ++grad_index) {
     Gradient* grad_tensor = grad[grad_index];
     if (grad_tensor != nullptr) {
       int64 tensor_id = vspace_.TensorId(grad_tensor);
diff --git a/tensorflow/cc/framework/gradients.cc b/tensorflow/cc/framework/gradients.cc
index 88cd3fe79d6..4229c356eff 100644
--- a/tensorflow/cc/framework/gradients.cc
+++ b/tensorflow/cc/framework/gradients.cc
@@ -425,7 +425,7 @@ Status SymbolicGradientBuilder::ProcessWhileLoop(Node* exit_node,
   // Backprop along the in edges to the while loop (i.e. the inputs to the enter
   // nodes)
   DCHECK_EQ(dx.size(), while_ctx->enter_nodes().size());
-  for (int i = 0; i < dx.size(); ++i) {
+  for (int i = 0, end = dx.size(); i < end; ++i) {
     Node* enter_node = while_ctx->enter_nodes()[i];
     for (const Edge* e : enter_node->in_edges()) {
       if (e->IsControlEdge()) continue;
@@ -489,7 +489,7 @@ Status SymbolicGradientBuilder::AddGradients() {
     // All loop-specific control flow ops should have been handled above
     DCHECK(!n->IsEnter() && !n->IsNextIteration()) << n->DebugString();
 
-    const size_t num_no_grad = no_grad_dy_indices.size();
+    const int num_no_grad = no_grad_dy_indices.size();
     if (IsPrimitiveOpWithNoGrad(n->type_string()) || num_no_grad == num_y) {
       // No grad defined for this op, or all outputs returned 'NoGradient':
       // Backprop 'NoGradient' along the in edges.
@@ -525,7 +525,8 @@ Status SymbolicGradientBuilder::AddGradients() {
     for (const Edge* e : n->in_edges()) {
       if (e->IsControlEdge()) continue;
       int dx_index = e->dst_input();
-      if (dx_index >= dx.size()) {
+      const int dx_size = dx.size();
+      if (dx_index >= dx_size) {
         return errors::Internal(
             "Invalid gradient output index: ", dx_index, " size: ", dx.size());
       }
diff --git a/tensorflow/cc/framework/while_gradients.cc b/tensorflow/cc/framework/while_gradients.cc
index 81870a0efa3..e241cfaebe9 100644
--- a/tensorflow/cc/framework/while_gradients.cc
+++ b/tensorflow/cc/framework/while_gradients.cc
@@ -34,7 +34,7 @@ Output ToOutput(OutputTensor output_tensor) {
 
 std::vector<Output> ToOutputVector(
     const std::vector<OutputTensor>& output_tensors) {
-  size_t n = output_tensors.size();
+  const int n = output_tensors.size();
   std::vector<Output> result;
   result.reserve(n);
   for (int i = 0; i < n; ++i) result.push_back(ToOutput(output_tensors[i]));
diff --git a/tensorflow/compiler/aot/codegen.cc b/tensorflow/compiler/aot/codegen.cc
index e4df3090046..625e7c3532a 100644
--- a/tensorflow/compiler/aot/codegen.cc
+++ b/tensorflow/compiler/aot/codegen.cc
@@ -172,7 +172,7 @@ string RewriteWithName(const string& name, string code,
 Status GenArgMethods(const tf2xla::Config& config,
                      const xla::ProgramShapeProto& ps,
                      const CompileResult& compile_result, string* methods) {
-  size_t num_args = ps.parameters_size();
+  const int num_args = ps.parameters_size();
   // feed_size() + variable_size() is the maximum number of args as an
   // implementation may not create an argument for an unused variable.
   if (config.feed_size() + config.variable_size() < num_args) {
@@ -229,8 +229,9 @@ Status GenResultMethods(const tf2xla::Config& config,
   int readonly_variables = absl::c_count_if(
       config.variable(),
       [](const tf2xla::Variable& var) { return var.readonly(); });
-  if (config.fetch_size() + config.variable_size() - readonly_variables !=
-      num_results) {
+  const int actual_num_results = config.fetch_size()
+                                 + config.variable_size() - readonly_variables;
+  if (actual_num_results != num_results) {
     return errors::InvalidArgument("mismatch between fetch_size(",
                                    config.fetch_size(), ")+variable_size(",
                                    config.variable_size(), ") and tuple_size(",
@@ -273,7 +274,7 @@ Status GenResultMethods(const tf2xla::Config& config,
 // Generate methods for variables.
 Status GenVariableMethods(const tf2xla::Config& config,
                           const xla::ProgramShapeProto& ps, string* methods) {
-  size_t num_args = ps.parameters_size();
+  const int num_args = ps.parameters_size();
   for (int i = config.feed_size(); i < num_args; ++i) {
     std::vector<std::pair<string, string>> rewrites;
     TF_RETURN_IF_ERROR(
@@ -401,7 +402,8 @@ Status GenerateHeader(const CodegenOpts& opts, const tf2xla::Config& config,
       ::xla::cpu::CreateArgIndexTableFromBufferInfos(buffer_infos);
   std::vector<string> buffer_infos_as_strings =
       BufferInfosToCppExpression(buffer_infos);
-  if (result_index < 0 || result_index >= buffer_infos.size()) {
+  const int64 buffer_infos_size = buffer_infos.size();
+  if (result_index < 0 || result_index >= buffer_infos_size) {
     return errors::InvalidArgument("result index: ", result_index,
                                    " is outside the range of temp sizes: [0,",
                                    buffer_infos.size(), ")");
@@ -797,8 +799,8 @@ Status ParseCppClass(const string& cpp_class, string* class_name,
     // Allow a fully qualified name that starts with "::".
     parts.erase(parts.begin());
   }
-  for (int i = 0; i < parts.size(); ++i) {
-    if (i < parts.size() - 1) {
+  for (int i = 0, end = parts.size(); i < end; ++i) {
+    if (i < end - 1) {
       TF_RETURN_IF_ERROR(ValidateCppIdent(
           parts[i], "in namespace component of cpp_class: " + cpp_class));
       namespaces->push_back(parts[i]);
diff --git a/tensorflow/compiler/jit/build_xla_ops_pass.cc b/tensorflow/compiler/jit/build_xla_ops_pass.cc
index 5a57008cf61..d6f50532f62 100644
--- a/tensorflow/compiler/jit/build_xla_ops_pass.cc
+++ b/tensorflow/compiler/jit/build_xla_ops_pass.cc
@@ -452,7 +452,7 @@ Status PredicateInt32Inputs(const Scope& root, Node* n,
   root.graph()->AddControlEdge(predicate_as_control.node(),
                                identity_n.operation.node());
 
-  for (int i = 0; i < int32_inputs.size(); i++) {
+  for (int32 i = 0, end = int32_inputs.size(); i < end; i++) {
     TF_RETURN_IF_ERROR(root.graph()->UpdateEdge(identity_n[i].node(), i, n,
                                                 int32_inputs_input_idxs[i]));
   }
diff --git a/tensorflow/compiler/jit/compilability_check_util.h b/tensorflow/compiler/jit/compilability_check_util.h
index a21cb6b98dd..3b20784cc29 100644
--- a/tensorflow/compiler/jit/compilability_check_util.h
+++ b/tensorflow/compiler/jit/compilability_check_util.h
@@ -257,7 +257,7 @@ class RecursiveCompilabilityChecker {
       UncompilableNodesMap* uncompilable_nodes_map);
 
   // Make sure we don't recurse infinitely on recursive functions.
-  const int kMaxRecursionDepth = 10;
+  const size_t kMaxRecursionDepth = 10;
 
   const OperationFilter& op_filter_;
   const DeviceType& jit_device_type_;
diff --git a/tensorflow/compiler/jit/device_util.cc b/tensorflow/compiler/jit/device_util.cc
index 375d30c4cf3..d8749baf872 100644
--- a/tensorflow/compiler/jit/device_util.cc
+++ b/tensorflow/compiler/jit/device_util.cc
@@ -26,8 +26,8 @@ using xla::StatusOr;
 void DeviceSet::Insert(DeviceId device_id) {
   int word_index = device_id.id() / kWordSize;
   int bit_index = device_id.id() % kWordSize;
-
-  if (word_index >= storage_.size()) {
+  const int storage_size = storage_.size();
+  if (word_index >= storage_size) {
     storage_.resize(word_index + 1, 0);
   }
 
@@ -39,7 +39,7 @@ void DeviceSet::UnionWith(const DeviceSet& other) {
     storage_.resize(other.storage_.size(), 0);
   }
 
-  for (int i = 0; i < other.storage_.size(); i++) {
+  for (int i = 0, end = other.storage_.size(); i < end; i++) {
     storage_[i] |= other.storage_[i];
   }
 }
diff --git a/tensorflow/compiler/jit/device_util.h b/tensorflow/compiler/jit/device_util.h
index 35f3321b47b..33fb587c8ad 100644
--- a/tensorflow/compiler/jit/device_util.h
+++ b/tensorflow/compiler/jit/device_util.h
@@ -72,7 +72,7 @@ class DeviceSet {
   void ForEach(FnTy func) const {
     // This is really a poor man's iterator, we should consider writing a proper
     // iterator if this ends up being used widely.
-    for (int word_index = 0; word_index < storage_.size(); word_index++) {
+    for (int word_index = 0, end = storage_.size(); word_index < end; word_index++) {
       uint64 word = storage_[word_index];
       while (word != 0) {
         uint64 only_lowest_bit_set = word & -word;
diff --git a/tensorflow/compiler/jit/encapsulate_subgraphs_pass.cc b/tensorflow/compiler/jit/encapsulate_subgraphs_pass.cc
index 435c2ec5f7f..8230cde8660 100644
--- a/tensorflow/compiler/jit/encapsulate_subgraphs_pass.cc
+++ b/tensorflow/compiler/jit/encapsulate_subgraphs_pass.cc
@@ -1132,7 +1132,8 @@ static Status GetArgTypes(const Graph& graph, DataTypeVector* types) {
     if (n->type_string() == kArgOp) {
       int index;
       TF_RETURN_IF_ERROR(GetNodeAttr(n->attrs(), "index", &index));
-      if (index < 0 || index >= types->size()) {
+      const int types_size = types->size(); 
+      if (index < 0 || index >= types_size) {
         return errors::InvalidArgument("Invalid argument number");
       }
       (*types)[index] = n->output_type(0);
@@ -1149,7 +1150,8 @@ static Status RenumberArguments(Graph* graph,
     if (n->type_string() == kArgOp) {
       int index;
       TF_RETURN_IF_ERROR(GetNodeAttr(n->attrs(), "index", &index));
-      if (index < 0 || index >= permutation.size()) {
+      const int permutation_size = permutation.size();
+      if (index < 0 || index >= permutation_size) {
         return errors::InvalidArgument("Invalid argument number");
       }
       n->AddAttr("index", permutation[index]);
diff --git a/tensorflow/compiler/jit/encapsulate_util.cc b/tensorflow/compiler/jit/encapsulate_util.cc
index 5325f6faa31..12afee70716 100644
--- a/tensorflow/compiler/jit/encapsulate_util.cc
+++ b/tensorflow/compiler/jit/encapsulate_util.cc
@@ -139,7 +139,7 @@ Status PreprocessDataEdgesBetweenOutsideCompilations(
   // Remove the edge from host to outside compilation. Add a placeholder as
   // outside compilation node input.
   std::map<std::pair<string, int>, Node*> placeholders;
-  for (int i = 0; i < edges.size(); i++) {
+  for (int i = 0, end = edges.size(); i < end; i++) {
     Node* dst = g->FindNodeId(edges[i].dst_node_id);
     const Edge* e;
     TF_RETURN_IF_ERROR(dst->input_edge(edges[i].dst_input, &e));
@@ -185,7 +185,7 @@ Status PreprocessDataEdgesBetweenOutsideCompilations(
     // Other edge in `edges` might have `e->dst()` as src or dst
     // node. Before removing `e->dst()`, replace those edges with
     // corresponding edges for `dst_replace_node`.
-    for (int j = i + 1; j < edges.size(); j++) {
+    for (int j = i + 1, end = edges.size(); j < end; j++) {
       if (edges[j].dst_node_id == edges[i].dst_node_id) {
         edges[j].dst_node_id = dst_replace_node->id();
       }
@@ -238,7 +238,7 @@ Status PostprocessDataEdgesBetweenOutsideCompilations(
       g->AddControlEdge(original_node, e->dst());
       g->RemoveEdge(e);
     }
-    for (int i = 0; i < data_edges.size(); i++) {
+    for (int i = 0, end = data_edges.size(); i < end; i++) {
       Node* dst = data_edges[i].dst;
       NodeDef new_def = dst->def();
       int dst_input = data_edges[i].dst_input;
@@ -253,7 +253,7 @@ Status PostprocessDataEdgesBetweenOutsideCompilations(
 
       // Other edges might have `dst` as dst node. Update those edges with
       // `replace_node`.
-      for (int j = i + 1; j < data_edges.size(); j++) {
+      for (int j = i + 1, end = data_edges.size(); j < end; j++) {
         if (data_edges[j].dst == dst) {
           data_edges[j].dst = replace_node;
         }
diff --git a/tensorflow/compiler/jit/encapsulate_xla_computations_pass.cc b/tensorflow/compiler/jit/encapsulate_xla_computations_pass.cc
index 2b7a6c83b8b..ed25baa62ff 100644
--- a/tensorflow/compiler/jit/encapsulate_xla_computations_pass.cc
+++ b/tensorflow/compiler/jit/encapsulate_xla_computations_pass.cc
@@ -351,14 +351,14 @@ Status RewriteSubgraph(const std::vector<OutputTensor>& arg_source_tensors,
     if (!status.ok()) {
       return status;
     }
-    for (int i = 0; i < data_inputs.size(); ++i) {
+    for (int i = 0, end = data_inputs.size(); i < end; ++i) {
       graph->AddEdge(data_inputs[i].first, data_inputs[i].second, xla_launch,
                      i);
     }
     for (Node* n : control_inputs) {
       graph->AddControlEdge(n, xla_launch);
     }
-    for (int i = 0; i < data_outputs.size(); ++i) {
+    for (int i = 0, end = data_outputs.size(); i < end; ++i) {
       for (const auto& successor : data_outputs[i]) {
         graph->AddEdge(xla_launch, i, successor.first, successor.second);
       }
diff --git a/tensorflow/compiler/jit/extract_outside_compilation_pass.cc b/tensorflow/compiler/jit/extract_outside_compilation_pass.cc
index 5f1c3d536a8..4a2b6136d53 100644
--- a/tensorflow/compiler/jit/extract_outside_compilation_pass.cc
+++ b/tensorflow/compiler/jit/extract_outside_compilation_pass.cc
@@ -95,7 +95,7 @@ Status GetArgDataTypes(const std::vector<Node*>& arg_nodes,
     TF_RETURN_IF_ERROR(GetNodeAttr(n->attrs(), "T", &dtype));
     (*recv_at_host_dtypes)[index] = dtype;
   }
-  for (int i = 0; i < recv_at_host_dtypes->size(); i++) {
+  for (int i = 0, end = recv_at_host_dtypes->size(); i < end; i++) {
     if ((*recv_at_host_dtypes)[i] == DT_INVALID) {
       return errors::Internal("Cannot get datatype for input ", i);
     }
@@ -160,7 +160,7 @@ xla::StatusOr<Node*> ReplaceArgNodesWithRecvAtHostNode(
     }
 
     // Rewrite dst nodes because their input changed.
-    for (int i = 0; i < out_edge_info.size(); i++) {
+    for (int i = 0, end = out_edge_info.size(); i < end; i++) {
       const OutEdgeInfo edge = out_edge_info[i];
       if (edge.dst_input == Graph::kControlSlot) {
         continue;
@@ -174,7 +174,7 @@ xla::StatusOr<Node*> ReplaceArgNodesWithRecvAtHostNode(
 
       // Other edges might have `dst` as dst node as well. Update those edges
       // with `dst_replace`.
-      for (int j = i + 1; j < out_edge_info.size(); j++) {
+      for (int j = i + 1, end = out_edge_info.size(); j < end; j++) {
         if (out_edge_info[j].dst == dst) {
           out_edge_info[j].dst = dst_replace;
         }
@@ -196,7 +196,7 @@ Status GetRetDataTypes(const std::vector<Node*>& ret_nodes,
     TF_RETURN_IF_ERROR(GetNodeAttr(n->attrs(), "T", &dtype));
     (*send_from_host_dtypes)[index] = dtype;
   }
-  for (int i = 0; i < send_from_host_dtypes->size(); i++) {
+  for (int i = 0, end = send_from_host_dtypes->size(); i < end; i++) {
     if ((*send_from_host_dtypes)[i] == DT_INVALID) {
       return errors::Internal("Cannot get datatype for output ", i);
     }
@@ -226,7 +226,8 @@ xla::StatusOr<Node*> BuildSendFromHostNode(
   for (auto* n : ret_nodes) {
     int index;
     TF_RETURN_IF_ERROR(GetNodeAttr(n->attrs(), "index", &index));
-    if (index < 0 || index >= send_from_host_dtypes.size()) {
+    const int send_from_host_dtypes_size = send_from_host_dtypes.size();
+    if (index < 0 || index >= send_from_host_dtypes_size) {
       return errors::Internal("Invalid _Retval index: ", index);
     }
     for (auto edge : n->in_edges()) {
@@ -360,8 +361,9 @@ xla::StatusOr<NodeDef> BuildXlaHostComputeNodeDef(
     if (e->IsControlEdge()) {
       continue;
     }
-
-    if (e->dst_input() < 0 || e->dst_input() >= input_dtypes.size()) {
+    
+    const int input_dtypes_size = input_dtypes.size();
+    if (e->dst_input() < 0 || e->dst_input() >= input_dtypes_size) {
       return errors::Internal("Invalid dst_input: ", e->dst_input());
     }
     inputs[e->dst_input()] = NodeDefBuilder::NodeOut{
@@ -500,7 +502,7 @@ void AddEdgesFromOutsideCompilationNodes(
     const std::vector<DataType>& data_types,
     const std::vector<Node*>& outside_compilation_nodes, Graph* g, Node* n) {
   // Add edges from outside compilation nodes to While node.
-  for (int i = original_arg_count; i < data_types.size(); i++) {
+  for (int i = original_arg_count, end = data_types.size(); i < end; i++) {
     Node* outside_compilation_node =
         outside_compilation_nodes[i - original_arg_count];
     g->AddEdge(outside_compilation_node, 0, n, i + arg_to_input_edge_offset);
@@ -619,7 +621,7 @@ Status PostprocessLiftedArgsForWhile(
       lifted_arg_nodes_and_outside_compilation_nodes.end(),
       std::back_inserter(lifted_arg_nodes),
       [](const std::pair<Node*, Node*>& pair) { return pair.first; });
-  for (int i = original_arg_count; i < data_types.size(); i++) {
+  for (int i = original_arg_count, end = data_types.size(); i < end; i++) {
     TF_ASSIGN_OR_RETURN(Node * arg_node,
                         AddOutsideCompilationInputArgToFunctionBody(
                             *body_function_body, i, data_types[i]));
@@ -648,7 +650,7 @@ Status PostprocessLiftedArgsForWhile(
                                              AttrSlice(&cond_func.attr()), fld,
                                              &cond_function_body));
 
-  for (int i = original_arg_count; i < data_types.size(); i++) {
+  for (int i = original_arg_count, end = data_types.size(); i < end; i++) {
     xla::StatusOr<Node*> arg_node_or =
         AddOutsideCompilationInputArgToFunctionBody(*cond_function_body, i,
                                                     data_types[i]);
@@ -759,7 +761,7 @@ Status PostprocessLiftedArgsForIf(
                                       data_types, outside_compilation_nodes, g,
                                       n);
 
-  for (int i = original_arg_count; i < data_types.size(); ++i) {
+  for (int i = original_arg_count, end = data_types.size(); i < end; ++i) {
     TF_ASSIGN_OR_RETURN(Node * then_branch_arg_node,
                         AddOutsideCompilationInputArgToFunctionBody(
                             *then_branch_function_body, i, data_types[i]));
@@ -837,7 +839,7 @@ Status PostprocessLiftedArgsForCall(
       lifted_arg_nodes_and_outside_compilation_nodes.end(),
       std::back_inserter(lifted_arg_nodes),
       [](const std::pair<Node*, Node*>& pair) { return pair.first; });
-  for (int i = original_arg_count; i < data_types.size(); ++i) {
+  for (int i = original_arg_count, end = data_types.size(); i < end; ++i) {
     TF_ASSIGN_OR_RETURN(
         Node * arg_node,
         AddOutsideCompilationInputArgToFunctionBody(*fbody, i, data_types[i]));
@@ -855,7 +857,7 @@ Status PostprocessLiftedArgsForCall(
   // We need to recreate the node. Otherwise TF will not know n->num_inputs()
   // has increased.
   NodeDef node_def = n->def();
-  for (int i = original_arg_count; i < data_types.size(); i++) {
+  for (int i = original_arg_count, end = data_types.size(); i < end; i++) {
     Node* outside_compilation_node =
         lifted_arg_nodes_and_outside_compilation_nodes[i - original_arg_count]
             .second;
@@ -1803,8 +1805,9 @@ TF_ATTRIBUTE_NOINLINE Status ExtractOutsideCompilationForFuncCallNode(
     if (e->IsControlEdge()) {
       continue;
     }
-
-    TF_RET_CHECK(e->dst_input() >= 0 && e->dst_input() < inputs.size());
+    
+    const int input_size_check = e->dst_input() < inputs.size(); 
+    TF_RET_CHECK(e->dst_input() >= 0 && input_size_check);
     inputs[e->dst_input()] =
         NodeDefBuilder::NodeOut{e->src()->name(), e->src_output(),
                                 e->src()->output_type(e->src_output())};
diff --git a/tensorflow/compiler/jit/graphcycles/graphcycles.cc b/tensorflow/compiler/jit/graphcycles/graphcycles.cc
index 6c5e3a745e2..416e101a025 100644
--- a/tensorflow/compiler/jit/graphcycles/graphcycles.cc
+++ b/tensorflow/compiler/jit/graphcycles/graphcycles.cc
@@ -461,7 +461,7 @@ string GraphCycles::DebugString() const {
   }
 
   string result = "digraph {\n";
-  for (int i = 0; i < rep_->nodes_.size(); i++) {
+  for (int i = 0, end = rep_->nodes_.size(); i < end; i++) {
     if (free_nodes_set.contains(i)) {
       continue;
     }
diff --git a/tensorflow/compiler/jit/increase_dynamism_for_auto_jit_pass.cc b/tensorflow/compiler/jit/increase_dynamism_for_auto_jit_pass.cc
index 23931a0d7cd..bf9d88b73fa 100644
--- a/tensorflow/compiler/jit/increase_dynamism_for_auto_jit_pass.cc
+++ b/tensorflow/compiler/jit/increase_dynamism_for_auto_jit_pass.cc
@@ -194,7 +194,7 @@ Status ComputeSliceSize(const Scope& host_scope,
   ConstantCache constant_pool(host_scope, control_deps);
 
   std::vector<Output> slice_size;
-  for (int i = 0; i < slice_inputs.size_as_vector.size(); i++) {
+  for (int i = 0, end = slice_inputs.size_as_vector.size(); i < end; i++) {
     if (slice_inputs.size_as_vector[i] >= 0) {
       slice_size.push_back(
           constant_pool.Get1DHostConstant(slice_inputs.size_as_vector[i]));
diff --git a/tensorflow/compiler/jit/shape_inference.cc b/tensorflow/compiler/jit/shape_inference.cc
index 72804ff57e4..7f585e70ec4 100644
--- a/tensorflow/compiler/jit/shape_inference.cc
+++ b/tensorflow/compiler/jit/shape_inference.cc
@@ -36,7 +36,7 @@ Status ShapeHandleToTensorShape(shape_inference::InferenceContext* context,
   if (!context->RankKnown(handle)) return Status::OK();
 
   std::vector<int64> dims(context->Rank(handle));
-  for (int32 i = 0; i < dims.size(); ++i) {
+  for (int32 i = 0, end = dims.size(); i < end; ++i) {
     dims[i] = context->Value(context->Dim(handle, i));
   }
   return PartialTensorShape::MakePartialShape(dims.data(), dims.size(), shape);
diff --git a/tensorflow/compiler/jit/xla_cluster_util.cc b/tensorflow/compiler/jit/xla_cluster_util.cc
index b8b11d2c7cd..38c23b7fa25 100644
--- a/tensorflow/compiler/jit/xla_cluster_util.cc
+++ b/tensorflow/compiler/jit/xla_cluster_util.cc
@@ -489,7 +489,7 @@ Status GetNodesRelatedToRefVariablesInDirection(
                  /*stable_comparator=*/NodeComparatorName());
   }
 
-  int old_result_size;
+  size_t old_result_size;
   int iterations = 0;
 
   const int kMaxIterations = 10 * 1000;
diff --git a/tensorflow/compiler/jit/xla_compilation_cache.cc b/tensorflow/compiler/jit/xla_compilation_cache.cc
index 62b0c0ab4cf..b1525337dbc 100644
--- a/tensorflow/compiler/jit/xla_compilation_cache.cc
+++ b/tensorflow/compiler/jit/xla_compilation_cache.cc
@@ -97,7 +97,7 @@ bool XlaCompilationCache::Signature::operator==(const Signature& other) const {
   if (arg_shapes != other.arg_shapes) return false;
 
   if (arg_values.size() != other.arg_values.size()) return false;
-  for (int i = 0; i < arg_values.size(); ++i) {
+  for (int i = 0, end = arg_values.size(); i < end; ++i) {
     if (arg_values[i].dtype() != other.arg_values[i].dtype() ||
         arg_values[i].shape() != other.arg_values[i].shape() ||
         arg_values[i].tensor_data() != other.arg_values[i].tensor_data()) {
@@ -158,7 +158,7 @@ Status XlaCompilationCache::BuildExecutable(
 
   std::vector<const xla::Shape*> argument_layouts(
       result.xla_input_shapes.size());
-  for (int i = 0; i < result.xla_input_shapes.size(); ++i) {
+  for (int i = 0, end = result.xla_input_shapes.size(); i < end; ++i) {
     argument_layouts[i] = &result.xla_input_shapes[i];
   }
   xla::ExecutableBuildOptions build_options;
@@ -224,7 +224,7 @@ static xla::StatusOr<std::unique_ptr<Graph>> CreateGraph(
 
   // Create dummy _Arg nodes. Link these to `node` and also via a control
   // dependency edge to the _SOURCE node.
-  for (int64 i = 0; i < args.size(); ++i) {
+  for (int64 i = 0, end = args.size(); i < end; ++i) {
     Node* node;
     string arg_name = absl::StrCat("_arg", i);
     Status status =
@@ -240,7 +240,7 @@ static xla::StatusOr<std::unique_ptr<Graph>> CreateGraph(
   }
 
   // Similarly with return values, create dummy _Retval nodes fed by `node`.
-  for (int64 i = 0; i < result_types.size(); ++i) {
+  for (int64 i = 0, end = result_types.size(); i < end; ++i) {
     Node* node;
     string retval_name = absl::StrCat("_retval", i);
     Status status = NodeBuilder(retval_name, FunctionLibraryDefinition::kRetOp)
@@ -271,7 +271,7 @@ Status XlaCompilationCache::CompileSingleOp(
   auto compile_op = [&](XlaCompiler* compiler,
                         XlaCompiler::CompilationResult* result) {
     std::vector<DataType> result_dtypes(ctx->num_outputs());
-    for (int i = 0; i < result_dtypes.size(); ++i) {
+    for (int i = 0, end = result_dtypes.size(); i < end; ++i) {
       result_dtypes[i] = ctx->expected_output_dtype(i);
     }
 
@@ -330,7 +330,7 @@ Status XlaCompilationCache::CompileImpl(
 
   if (VLOG_IS_ON(2)) {
     VLOG(2) << "num_inputs=" << args.size();
-    for (int i = 0; i < args.size(); i++) {
+    for (int i = 0, end = args.size(); i < end; i++) {
       VLOG(3) << i << ": " << args[i].HumanString();
     }
   }
diff --git a/tensorflow/compiler/jit/xla_launch_util.cc b/tensorflow/compiler/jit/xla_launch_util.cc
index 41abe86df6e..dfa7aca2a9e 100644
--- a/tensorflow/compiler/jit/xla_launch_util.cc
+++ b/tensorflow/compiler/jit/xla_launch_util.cc
@@ -156,7 +156,7 @@ Status SnapshotResourceVariables(OpKernelContext* ctx,
                                  absl::Span<const int> variable_indices,
                                  absl::Span<VariableInfo const> variable_infos,
                                  ResourceVarsSnapshot* result) {
-  for (int i = 0; i < variable_indices.size(); i++) {
+  for (int i = 0, end = variable_indices.size(); i < end; i++) {
     Var* var = variable_infos[i].var();
     (*result)[variable_indices[i]] =
         var ? absl::make_optional(*var->tensor()) : absl::nullopt;
@@ -206,7 +206,7 @@ XlaComputationLaunchContext::PopulateInputs(
 
   xla::TransferManager* transfer_manager =
       client_->backend().transfer_manager();
-  for (int i = 0; i < compilation_result->xla_input_shapes.size(); ++i) {
+  for (int i = 0, end = compilation_result->xla_input_shapes.size(); i < end; ++i) {
     int arg_num = compilation_result->input_mapping[i];
     CHECK_GE(arg_num, missing_ctx_input_prefix);
     const xla::Shape& shape = compilation_result->xla_input_shapes[i];
@@ -466,7 +466,7 @@ Status XlaComputationLaunchContext::PopulateOutputs(
 
   // Copy XLA results to the OpOutputList.
   int output_num = 0;
-  for (int i = 0; i < ctx->num_outputs(); ++i) {
+  for (int i = 0, end = ctx->num_outputs(); i < end; ++i) {
     const TensorShape& shape = output_tensor_shapes[i];
     const DataType& type = compilation_result->outputs[i].type;
     VLOG(2) << "Populating output for retval " << i << " shape "
@@ -514,7 +514,7 @@ Status XlaComputationLaunchContext::PopulateOutputs(
   }
 
   // Apply variable updates, if any.
-  for (int i = 0; i < compilation_result->resource_updates.size(); ++i) {
+  for (int i = 0, end = compilation_result->resource_updates.size(); i < end; ++i) {
     const XlaCompiler::ResourceUpdate& write =
         compilation_result->resource_updates[i];
     int actual_input_index = write.input_index - missing_ctx_input_prefix;

From 95b8235761baa6abc876fc4658488ab57cf64d5e Mon Sep 17 00:00:00 2001
From: tg-at-google <taregaskin@google.com>
Date: Tue, 28 Jul 2020 20:56:10 -0400
Subject: [PATCH 2/7] Update gradients.cc

---
 tensorflow/cc/framework/gradients.cc | 5 ++---
 1 file changed, 2 insertions(+), 3 deletions(-)

diff --git a/tensorflow/cc/framework/gradients.cc b/tensorflow/cc/framework/gradients.cc
index 4229c356eff..3195a357186 100644
--- a/tensorflow/cc/framework/gradients.cc
+++ b/tensorflow/cc/framework/gradients.cc
@@ -524,9 +524,8 @@ Status SymbolicGradientBuilder::AddGradients() {
     // make this association explicit.
     for (const Edge* e : n->in_edges()) {
       if (e->IsControlEdge()) continue;
-      int dx_index = e->dst_input();
-      const int dx_size = dx.size();
-      if (dx_index >= dx_size) {
+      size_t dx_index = e->dst_input();
+      if (dx_index >= dx.size()) {
         return errors::Internal(
             "Invalid gradient output index: ", dx_index, " size: ", dx.size());
       }

From 1d0c8946535822ad8eb83267b7c77a03141ab619 Mon Sep 17 00:00:00 2001
From: tg-at-google <taregaskin@google.com>
Date: Wed, 29 Jul 2020 16:27:39 -0400
Subject: [PATCH 3/7] Update
 tensorflow/compiler/jit/encapsulate_subgraphs_pass.cc

Co-authored-by: Mihai Maruseac <mihai.maruseac@gmail.com>
---
 tensorflow/compiler/jit/encapsulate_subgraphs_pass.cc | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/tensorflow/compiler/jit/encapsulate_subgraphs_pass.cc b/tensorflow/compiler/jit/encapsulate_subgraphs_pass.cc
index 8230cde8660..e0d0ac57144 100644
--- a/tensorflow/compiler/jit/encapsulate_subgraphs_pass.cc
+++ b/tensorflow/compiler/jit/encapsulate_subgraphs_pass.cc
@@ -1132,8 +1132,8 @@ static Status GetArgTypes(const Graph& graph, DataTypeVector* types) {
     if (n->type_string() == kArgOp) {
       int index;
       TF_RETURN_IF_ERROR(GetNodeAttr(n->attrs(), "index", &index));
-      const int types_size = types->size(); 
-      if (index < 0 || index >= types_size) {
+      const int num_types = types->size(); 
+      if (index < 0 || index >= num_types) {
         return errors::InvalidArgument("Invalid argument number");
       }
       (*types)[index] = n->output_type(0);

From e4a3a4ece71995fb0e7deb15299c8f8b371f0c8c Mon Sep 17 00:00:00 2001
From: tg-at-google <taregaskin@google.com>
Date: Wed, 29 Jul 2020 16:27:47 -0400
Subject: [PATCH 4/7] Update
 tensorflow/compiler/jit/extract_outside_compilation_pass.cc

Co-authored-by: Mihai Maruseac <mihai.maruseac@gmail.com>
---
 tensorflow/compiler/jit/extract_outside_compilation_pass.cc | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/tensorflow/compiler/jit/extract_outside_compilation_pass.cc b/tensorflow/compiler/jit/extract_outside_compilation_pass.cc
index 4a2b6136d53..c84fca81420 100644
--- a/tensorflow/compiler/jit/extract_outside_compilation_pass.cc
+++ b/tensorflow/compiler/jit/extract_outside_compilation_pass.cc
@@ -226,8 +226,8 @@ xla::StatusOr<Node*> BuildSendFromHostNode(
   for (auto* n : ret_nodes) {
     int index;
     TF_RETURN_IF_ERROR(GetNodeAttr(n->attrs(), "index", &index));
-    const int send_from_host_dtypes_size = send_from_host_dtypes.size();
-    if (index < 0 || index >= send_from_host_dtypes_size) {
+    const int num_dtypes = send_from_host_dtypes.size();
+    if (index < 0 || index >= num_dtypes) {
       return errors::Internal("Invalid _Retval index: ", index);
     }
     for (auto edge : n->in_edges()) {

From 181df6f3fcb67e1d1dbf8753ec807d106057b2d3 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Tar=C3=A9=20Gaskin?= <taregaskin@google.com>
Date: Wed, 29 Jul 2020 20:50:51 +0000
Subject: [PATCH 5/7] removal of non- tensorflow/compiler/ changes

---
 tensorflow/c/eager/tape.h                  | 10 +++++-----
 tensorflow/cc/framework/gradients.cc       |  6 +++---
 tensorflow/cc/framework/while_gradients.cc |  2 +-
 3 files changed, 9 insertions(+), 9 deletions(-)

diff --git a/tensorflow/c/eager/tape.h b/tensorflow/c/eager/tape.h
index f52a5e32c1a..40cfa87dd66 100644
--- a/tensorflow/c/eager/tape.h
+++ b/tensorflow/c/eager/tape.h
@@ -573,7 +573,7 @@ Status InitialGradients(
     gtl::ArraySlice<Gradient*> output_gradients, const TensorTape& tensor_tape,
     const OpTape<BackwardFunction, TapeTensor>& op_tape,
     std::unordered_map<int64, std::vector<Gradient*>>* result) {
-  for (int i = 0, end = target_tensor_ids.size(); i < end; ++i) {
+  for (int i = 0; i < target_tensor_ids.size(); ++i) {
     const int64 id = target_tensor_ids[i];
     if (output_gradients.empty() || output_gradients[i] == nullptr) {
       auto tensor_it = tensor_tape.find(id);
@@ -699,7 +699,7 @@ Status GradientTape<Gradient, BackwardFunction, TapeTensor>::ComputeGradient(
     std::vector<Gradient*> out_gradients;
     out_gradients.reserve(trace.output_tensor_info.size());
     std::vector<int64> unneeded_gradients;
-    for (int i = 0, end = trace.input_tensor_id.size(); i < end; i++) {
+    for (int i = 0; i < trace.input_tensor_id.size(); i++) {
       const auto& in_tensor_id = trace.input_tensor_id[i];
       if (tensor_tape_.find(in_tensor_id) == tensor_tape_.end() &&
           sources_set.find(in_tensor_id) == sources_set.end()) {
@@ -709,7 +709,7 @@ Status GradientTape<Gradient, BackwardFunction, TapeTensor>::ComputeGradient(
 
     bool any_gradient_nonzero = false;
     std::vector<int> zero_indices;
-    for (int i = 0, end = trace.output_tensor_info.size(); i < end; ++i) {
+    for (int i = 0; i < trace.output_tensor_info.size(); ++i) {
       const int64 id = trace.output_tensor_info[i].GetID();
       auto grad_it = gradients.find(id);
       if (grad_it == gradients.end()) {
@@ -775,7 +775,7 @@ Status GradientTape<Gradient, BackwardFunction, TapeTensor>::ComputeGradient(
     }
     VLOG(1) << "Got " << in_gradients.size() << " in_gradients for "
             << trace.input_tensor_id.size() << " sources";
-    for (int i = 0, end = in_gradients.size(); i < end; ++i) {
+    for (int i = 0; i < in_gradients.size(); ++i) {
       const int64 id = trace.input_tensor_id[i];
       if (in_gradients[i] != nullptr) {
         auto& unaggregated_grads = gradients[id];
@@ -968,7 +968,7 @@ ForwardAccumulator<Gradient, BackwardFunction, TapeTensor>::ForwardpropFromTape(
   targets.reserve(grad.size());
   used_in_grads.reserve(grad.size());
   std::unordered_map<int64, TapeTensor> sources_that_are_targets;
-  for (int grad_index = 0, end = grad.size(); grad_index < end; ++grad_index) {
+  for (int grad_index = 0; grad_index < grad.size(); ++grad_index) {
     Gradient* grad_tensor = grad[grad_index];
     if (grad_tensor != nullptr) {
       int64 tensor_id = vspace_.TensorId(grad_tensor);
diff --git a/tensorflow/cc/framework/gradients.cc b/tensorflow/cc/framework/gradients.cc
index 3195a357186..88cd3fe79d6 100644
--- a/tensorflow/cc/framework/gradients.cc
+++ b/tensorflow/cc/framework/gradients.cc
@@ -425,7 +425,7 @@ Status SymbolicGradientBuilder::ProcessWhileLoop(Node* exit_node,
   // Backprop along the in edges to the while loop (i.e. the inputs to the enter
   // nodes)
   DCHECK_EQ(dx.size(), while_ctx->enter_nodes().size());
-  for (int i = 0, end = dx.size(); i < end; ++i) {
+  for (int i = 0; i < dx.size(); ++i) {
     Node* enter_node = while_ctx->enter_nodes()[i];
     for (const Edge* e : enter_node->in_edges()) {
       if (e->IsControlEdge()) continue;
@@ -489,7 +489,7 @@ Status SymbolicGradientBuilder::AddGradients() {
     // All loop-specific control flow ops should have been handled above
     DCHECK(!n->IsEnter() && !n->IsNextIteration()) << n->DebugString();
 
-    const int num_no_grad = no_grad_dy_indices.size();
+    const size_t num_no_grad = no_grad_dy_indices.size();
     if (IsPrimitiveOpWithNoGrad(n->type_string()) || num_no_grad == num_y) {
       // No grad defined for this op, or all outputs returned 'NoGradient':
       // Backprop 'NoGradient' along the in edges.
@@ -524,7 +524,7 @@ Status SymbolicGradientBuilder::AddGradients() {
     // make this association explicit.
     for (const Edge* e : n->in_edges()) {
       if (e->IsControlEdge()) continue;
-      size_t dx_index = e->dst_input();
+      int dx_index = e->dst_input();
       if (dx_index >= dx.size()) {
         return errors::Internal(
             "Invalid gradient output index: ", dx_index, " size: ", dx.size());
diff --git a/tensorflow/cc/framework/while_gradients.cc b/tensorflow/cc/framework/while_gradients.cc
index e241cfaebe9..81870a0efa3 100644
--- a/tensorflow/cc/framework/while_gradients.cc
+++ b/tensorflow/cc/framework/while_gradients.cc
@@ -34,7 +34,7 @@ Output ToOutput(OutputTensor output_tensor) {
 
 std::vector<Output> ToOutputVector(
     const std::vector<OutputTensor>& output_tensors) {
-  const int n = output_tensors.size();
+  size_t n = output_tensors.size();
   std::vector<Output> result;
   result.reserve(n);
   for (int i = 0; i < n; ++i) result.push_back(ToOutput(output_tensors[i]));

From 497525e2c426bd648f16c314c4eb4788d24535f0 Mon Sep 17 00:00:00 2001
From: tg-at-google <taregaskin@google.com>
Date: Wed, 29 Jul 2020 17:27:29 -0400
Subject: [PATCH 6/7] Update build_xla_ops_pass.cc

---
 tensorflow/compiler/jit/build_xla_ops_pass.cc | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/tensorflow/compiler/jit/build_xla_ops_pass.cc b/tensorflow/compiler/jit/build_xla_ops_pass.cc
index d6f50532f62..a340b9d3f45 100644
--- a/tensorflow/compiler/jit/build_xla_ops_pass.cc
+++ b/tensorflow/compiler/jit/build_xla_ops_pass.cc
@@ -452,7 +452,7 @@ Status PredicateInt32Inputs(const Scope& root, Node* n,
   root.graph()->AddControlEdge(predicate_as_control.node(),
                                identity_n.operation.node());
 
-  for (int32 i = 0, end = int32_inputs.size(); i < end; i++) {
+  for (int i = 0, end = int32_inputs.size(); i < end; i++) {
     TF_RETURN_IF_ERROR(root.graph()->UpdateEdge(identity_n[i].node(), i, n,
                                                 int32_inputs_input_idxs[i]));
   }

From 20c3049f075e3f1b285be4969fb754ad0440e3af Mon Sep 17 00:00:00 2001
From: tg-at-google <taregaskin@google.com>
Date: Wed, 29 Jul 2020 17:32:16 -0400
Subject: [PATCH 7/7] Update extract_outside_compilation_pass.cc

---
 tensorflow/compiler/jit/extract_outside_compilation_pass.cc | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/tensorflow/compiler/jit/extract_outside_compilation_pass.cc b/tensorflow/compiler/jit/extract_outside_compilation_pass.cc
index c84fca81420..f295efa5d8b 100644
--- a/tensorflow/compiler/jit/extract_outside_compilation_pass.cc
+++ b/tensorflow/compiler/jit/extract_outside_compilation_pass.cc
@@ -1806,7 +1806,7 @@ TF_ATTRIBUTE_NOINLINE Status ExtractOutsideCompilationForFuncCallNode(
       continue;
     }
     
-    const int input_size_check = e->dst_input() < inputs.size(); 
+    const bool input_size_check = e->dst_input() < static_cast<int>(inputs.size()); 
     TF_RET_CHECK(e->dst_input() >= 0 && input_size_check);
     inputs[e->dst_input()] =
         NodeDefBuilder::NodeOut{e->src()->name(), e->src_output(),