Merge pull request #26217 from lgeiger:simplify_py_loops

PiperOrigin-RevId: 254045472
This commit is contained in:
TensorFlower Gardener 2019-06-19 12:20:43 -07:00
commit 1ca7e613ad
15 changed files with 36 additions and 58 deletions

View File

@ -1438,8 +1438,7 @@ class DebugAnalyzer(object):
hang += DEPTH_TEMPLATE % depth
for i in xrange(len(all_inputs)):
inp = all_inputs[i]
for i, inp in enumerate(all_inputs):
op_type = self._debug_dump.node_op_type(debug_graphs.get_node_name(inp))
if op_type in self._GRAPH_STRUCT_OP_TYPE_BLACKLIST:
continue

View File

@ -25,7 +25,6 @@ import traceback
import numpy as np
import six
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python import pywrap_tensorflow_internal
from tensorflow.python.platform import gfile
@ -412,8 +411,7 @@ def regex_find(orig_screen_output, regex, font_attr):
raise ValueError("Invalid regular expression: \"%s\"" % regex)
regex_match_lines = []
for i in xrange(len(new_screen_output.lines)):
line = new_screen_output.lines[i]
for i, line in enumerate(new_screen_output.lines):
find_it = re_prog.finditer(line)
match_segs = []
@ -466,11 +464,9 @@ def wrap_rich_text_lines(inp, cols):
out = RichTextLines([])
row_counter = 0 # Counter for new row index
for i in xrange(len(inp.lines)):
for i, line in enumerate(inp.lines):
new_line_indices.append(out.num_lines())
line = inp.lines[i]
if i in inp.annotations:
out.annotations[row_counter] = inp.annotations[i]

View File

@ -568,8 +568,8 @@ class ProfileAnalyzer(object):
# Add stat totals.
row_str = ""
for col in range(len(device_total_row)):
row_str += ("{:<%d}" % column_widths[col]).format(device_total_row[col])
for width, row in zip(column_widths, device_total_row):
row_str += ("{:<%d}" % width).format(row)
output.append(RL())
output.append(RL(row_str))
return debugger_cli_common.rich_text_lines_from_rich_line_list(output)

View File

@ -249,8 +249,8 @@ def _annotate_ndarray_lines(
curr_indices = [0] * len(dims)
curr_dim = 0
for i in xrange(len(array_lines)):
line = array_lines[i].strip()
for i, raw_line in enumerate(array_lines):
line = raw_line.strip()
if not line:
# Skip empty lines, which can appear for >= 3D arrays.

View File

@ -17,8 +17,6 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.core.framework import graph_pb2
from tensorflow.python.framework import op_def_registry
from tensorflow.python.platform import tf_logging as logging
@ -346,8 +344,7 @@ class DebugGraph(object):
for node in self._node_inputs:
inputs = self._node_inputs[node]
for i in xrange(len(inputs)):
inp = inputs[i]
for i, inp in enumerate(inputs):
if is_copy_node(inp):
# Find the input to the Copy node, which should be the original
# input to the node.

View File

@ -933,8 +933,8 @@ class MultiWorkerAllReduce(AllReduceCrossDeviceOps):
aggregated_grads = range_agg_grads
else:
assert len(aggregated_grads) == len(range_agg_grads)
for i in range(len(aggregated_grads)):
aggregated_grads[i] += range_agg_grads[i]
for i, range_agg_grad in enumerate(range_agg_grads):
aggregated_grads[i] += range_agg_grad
assert not remaining_grads
return _ungroup_and_make_mirrored(aggregated_grads, per_replica_values[0],

View File

@ -514,9 +514,8 @@ def make_vjp(f, params=None, persistent=True):
try:
sources = []
args = [
ops.convert_to_tensor(args[i])
if i in parameter_positions else args[i]
for i in range(len(args))
ops.convert_to_tensor(arg) if i in parameter_positions else arg
for i, arg in enumerate(args)
]
args = _ensure_unique_tensor_objects(parameter_positions, args)
for i in parameter_positions:

View File

@ -565,8 +565,7 @@ def experimental_tpu_predict_loop(model,
prediction_result = np.concatenate(unconcatenated_outs[0], axis=0)
else:
prediction_result = [
np.concatenate(unconcatenated_outs[i], axis=0)
for i in range(len(unconcatenated_outs))
np.concatenate(out, axis=0) for out in unconcatenated_outs
]
if padding_handler:
@ -797,4 +796,3 @@ class DistributionMultiWorkerTrainingLoop(DistributionSingleWorkerTrainingLoop):
evaluate = train_with_multi_worker(
DistributionSingleWorkerTrainingLoop.evaluate)
# Currently predict is still using the single worker implementation.

View File

@ -27,6 +27,7 @@ import time
import numpy as np
import six
from six.moves import zip # pylint: disable=redefined-builtin
from tensorflow.python import tf2
from tensorflow.python.data.experimental.ops import cardinality
@ -1589,9 +1590,7 @@ class ModelInputs(object):
# TODO(karmel): There is a side-effect here where what you get
# with as_list and as_dict depends on whether you have called this
# method first, since it modifies in place.
for i in range(len(self._flattened_inputs)):
k = self._input_names[i]
v = self._flattened_inputs[i]
for i, (k, v) in enumerate(zip(self._input_names, self._flattened_inputs)):
if isinstance(v, (list, float, int)):
v = np.asarray(v)
if v.ndim == 1:
@ -1621,8 +1620,8 @@ class ModelInputs(object):
def as_dict(self):
"""An iterable over a dictionary version of inputs."""
for i in range(len(self._flattened_inputs)):
yield self._input_names[i], self._flattened_inputs[i]
for k, v in zip(self._input_names, self._flattened_inputs):
yield k, v
def as_list(self):
"""Returning the inputs as a list."""

View File

@ -116,8 +116,7 @@ def _clone_functional_model(model, input_tensors=None, layer_fn=_clone_layer):
# If tensor comes from an input layer: cache the input layer.
input_tensors = nest.flatten(input_tensors)
input_tensors_ = []
for i in range(len(input_tensors)):
input_tensor = input_tensors[i]
for i, input_tensor in enumerate(input_tensors):
if not K.is_keras_tensor(input_tensor):
original_input_layer = model._input_layers[i]
name = original_input_layer.name

View File

@ -211,9 +211,7 @@ def multi_gpu_model(model, gpus, cpu_merge=True, cpu_relocation=False):
with ops.device('/cpu:0'):
model = clone_model(model)
all_outputs = []
for i in range(len(model.outputs)):
all_outputs.append([])
all_outputs = [[] for _ in range(len(model.outputs))]
# Place a copy of the model on each GPU,
# each getting a slice of the inputs.
@ -241,8 +239,8 @@ def multi_gpu_model(model, gpus, cpu_merge=True, cpu_relocation=False):
outputs = [outputs]
# Save the outputs for merging back together later.
for o in range(len(outputs)):
all_outputs[o].append(outputs[o])
for o, output in enumerate(outputs):
all_outputs[o].append(output)
# Deduplicate output names to handle Siamese networks.
occurrences = {}

View File

@ -2426,8 +2426,7 @@ class RecordInput(object):
with ops.name_scope(self._name):
batch_list = [[] for _ in six.moves.range(self._batches)]
records = array_ops.split(records, self._batch_size, 0)
records = [array_ops.reshape(record, []) for record in records]
for index, protobuf in zip(six.moves.range(len(records)), records):
for index, protobuf in enumerate(records):
batch_index = index % self._batches
batch_list[batch_index].append(protobuf)
batch_list[batch_index].append(array_ops.reshape(protobuf, []))
return batch_list

View File

@ -21,7 +21,7 @@ from __future__ import print_function
import collections
import contextlib
from six.moves import xrange # pylint: disable=redefined-builtin
from six.moves import xrange, zip # pylint: disable=redefined-builtin
from tensorflow.core.framework import attr_value_pb2
from tensorflow.python.eager import backprop
@ -160,9 +160,7 @@ def _DefaultGradYs(grad_ys,
raise ValueError("Passed %d grad_ys for %d ys" % (len(grad_ys), len(ys)))
grad_ys = ops.convert_n_to_tensor_or_indexed_slices(grad_ys, name="grad_y")
new_grad_ys = []
for i in xrange(len(grad_ys)):
grad_y = grad_ys[i]
y = ys[i]
for i, (y, grad_y) in enumerate(zip(ys, grad_ys)):
with _maybe_colocate_with(y.op, gradient_uid, colocate_gradients_with_ops):
if grad_y is None:
if y.dtype.is_complex:

View File

@ -28,7 +28,7 @@ import traceback
import six
from six import iteritems
from six.moves import xrange # pylint: disable=redefined-builtin
from six.moves import xrange, zip # pylint: disable=redefined-builtin
from tensorflow.python import tf2
from tensorflow.python.eager import context
@ -93,9 +93,7 @@ class _PartitionInfo(object):
"full_shape is of length {}.".format(
len(var_offset), len(full_shape)))
for i in xrange(len(full_shape)):
offset = var_offset[i]
shape = full_shape[i]
for offset, shape in zip(var_offset, full_shape):
if offset < 0 or offset >= shape:
raise ValueError(
"Expected 0 <= offset < shape but found offset={}, shape={} for "

View File

@ -108,8 +108,7 @@ def generate_checkpoint_state_proto(save_dir,
if not os.path.isabs(save_dir):
if not os.path.isabs(model_checkpoint_path):
model_checkpoint_path = os.path.relpath(model_checkpoint_path, save_dir)
for i in range(len(all_model_checkpoint_paths)):
p = all_model_checkpoint_paths[i]
for i, p in enumerate(all_model_checkpoint_paths):
if not os.path.isabs(p):
all_model_checkpoint_paths[i] = os.path.relpath(p, save_dir)
@ -281,8 +280,7 @@ def get_checkpoint_state(checkpoint_dir, latest_filename=None):
if not os.path.isabs(ckpt.model_checkpoint_path):
ckpt.model_checkpoint_path = os.path.join(checkpoint_dir,
ckpt.model_checkpoint_path)
for i in range(len(ckpt.all_model_checkpoint_paths)):
p = ckpt.all_model_checkpoint_paths[i]
for i, p in enumerate(ckpt.all_model_checkpoint_paths):
if not os.path.isabs(p):
ckpt.all_model_checkpoint_paths[i] = os.path.join(checkpoint_dir, p)
except errors.OpError as e: