Simplify python loops
This PR replaces loops over range(len(...)) with enumerate() or zip(). This makes the code (arguably) easier to read and in some cases even slightly faster.
This commit is contained in:
parent
f6e917b8c2
commit
47b78e8d6b
@ -1438,8 +1438,7 @@ class DebugAnalyzer(object):
|
|||||||
|
|
||||||
hang += DEPTH_TEMPLATE % depth
|
hang += DEPTH_TEMPLATE % depth
|
||||||
|
|
||||||
for i in xrange(len(all_inputs)):
|
for i, inp in enumerate(all_inputs):
|
||||||
inp = all_inputs[i]
|
|
||||||
op_type = self._debug_dump.node_op_type(debug_graphs.get_node_name(inp))
|
op_type = self._debug_dump.node_op_type(debug_graphs.get_node_name(inp))
|
||||||
if op_type in self._GRAPH_STRUCT_OP_TYPE_BLACKLIST:
|
if op_type in self._GRAPH_STRUCT_OP_TYPE_BLACKLIST:
|
||||||
continue
|
continue
|
||||||
|
@ -25,7 +25,6 @@ import traceback
|
|||||||
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
import six
|
import six
|
||||||
from six.moves import xrange # pylint: disable=redefined-builtin
|
|
||||||
|
|
||||||
from tensorflow.python import pywrap_tensorflow_internal
|
from tensorflow.python import pywrap_tensorflow_internal
|
||||||
from tensorflow.python.platform import gfile
|
from tensorflow.python.platform import gfile
|
||||||
@ -412,8 +411,7 @@ def regex_find(orig_screen_output, regex, font_attr):
|
|||||||
raise ValueError("Invalid regular expression: \"%s\"" % regex)
|
raise ValueError("Invalid regular expression: \"%s\"" % regex)
|
||||||
|
|
||||||
regex_match_lines = []
|
regex_match_lines = []
|
||||||
for i in xrange(len(new_screen_output.lines)):
|
for i, line in enumerate(new_screen_output.lines):
|
||||||
line = new_screen_output.lines[i]
|
|
||||||
find_it = re_prog.finditer(line)
|
find_it = re_prog.finditer(line)
|
||||||
|
|
||||||
match_segs = []
|
match_segs = []
|
||||||
@ -466,11 +464,9 @@ def wrap_rich_text_lines(inp, cols):
|
|||||||
out = RichTextLines([])
|
out = RichTextLines([])
|
||||||
|
|
||||||
row_counter = 0 # Counter for new row index
|
row_counter = 0 # Counter for new row index
|
||||||
for i in xrange(len(inp.lines)):
|
for i, line in enumerate(inp.lines):
|
||||||
new_line_indices.append(out.num_lines())
|
new_line_indices.append(out.num_lines())
|
||||||
|
|
||||||
line = inp.lines[i]
|
|
||||||
|
|
||||||
if i in inp.annotations:
|
if i in inp.annotations:
|
||||||
out.annotations[row_counter] = inp.annotations[i]
|
out.annotations[row_counter] = inp.annotations[i]
|
||||||
|
|
||||||
|
@ -568,8 +568,8 @@ class ProfileAnalyzer(object):
|
|||||||
|
|
||||||
# Add stat totals.
|
# Add stat totals.
|
||||||
row_str = ""
|
row_str = ""
|
||||||
for col in range(len(device_total_row)):
|
for width, row in zip(column_widths, device_total_row):
|
||||||
row_str += ("{:<%d}" % column_widths[col]).format(device_total_row[col])
|
row_str += ("{:<%d}" % width).format(row)
|
||||||
output.append(RL())
|
output.append(RL())
|
||||||
output.append(RL(row_str))
|
output.append(RL(row_str))
|
||||||
return debugger_cli_common.rich_text_lines_from_rich_line_list(output)
|
return debugger_cli_common.rich_text_lines_from_rich_line_list(output)
|
||||||
|
@ -249,8 +249,8 @@ def _annotate_ndarray_lines(
|
|||||||
|
|
||||||
curr_indices = [0] * len(dims)
|
curr_indices = [0] * len(dims)
|
||||||
curr_dim = 0
|
curr_dim = 0
|
||||||
for i in xrange(len(array_lines)):
|
for i, raw_line in enumerate(array_lines):
|
||||||
line = array_lines[i].strip()
|
line = raw_line.strip()
|
||||||
|
|
||||||
if not line:
|
if not line:
|
||||||
# Skip empty lines, which can appear for >= 3D arrays.
|
# Skip empty lines, which can appear for >= 3D arrays.
|
||||||
|
@ -346,8 +346,7 @@ class DebugGraph(object):
|
|||||||
for node in self._node_inputs:
|
for node in self._node_inputs:
|
||||||
inputs = self._node_inputs[node]
|
inputs = self._node_inputs[node]
|
||||||
|
|
||||||
for i in xrange(len(inputs)):
|
for i, inp in enumerate(inputs):
|
||||||
inp = inputs[i]
|
|
||||||
if is_copy_node(inp):
|
if is_copy_node(inp):
|
||||||
# Find the input to the Copy node, which should be the original
|
# Find the input to the Copy node, which should be the original
|
||||||
# input to the node.
|
# input to the node.
|
||||||
|
@ -933,8 +933,8 @@ class MultiWorkerAllReduce(AllReduceCrossDeviceOps):
|
|||||||
aggregated_grads = range_agg_grads
|
aggregated_grads = range_agg_grads
|
||||||
else:
|
else:
|
||||||
assert len(aggregated_grads) == len(range_agg_grads)
|
assert len(aggregated_grads) == len(range_agg_grads)
|
||||||
for i in range(len(aggregated_grads)):
|
for i, range_agg_grad in enumerate(range_agg_grads):
|
||||||
aggregated_grads[i] += range_agg_grads[i]
|
aggregated_grads[i] += range_agg_grad
|
||||||
assert not remaining_grads
|
assert not remaining_grads
|
||||||
|
|
||||||
return _ungroup_and_make_mirrored(aggregated_grads, per_replica_values[0],
|
return _ungroup_and_make_mirrored(aggregated_grads, per_replica_values[0],
|
||||||
|
@ -514,9 +514,9 @@ def make_vjp(f, params=None, persistent=True):
|
|||||||
try:
|
try:
|
||||||
sources = []
|
sources = []
|
||||||
args = [
|
args = [
|
||||||
ops.convert_to_tensor(args[i])
|
ops.convert_to_tensor(arg)
|
||||||
if i in parameter_positions else args[i]
|
if i in parameter_positions else arg
|
||||||
for i in range(len(args))
|
for i, arg in enumerate(args)
|
||||||
]
|
]
|
||||||
args = _ensure_unique_tensor_objects(parameter_positions, args)
|
args = _ensure_unique_tensor_objects(parameter_positions, args)
|
||||||
for i in parameter_positions:
|
for i in parameter_positions:
|
||||||
|
@ -565,8 +565,7 @@ def experimental_tpu_predict_loop(model,
|
|||||||
prediction_result = np.concatenate(unconcatenated_outs[0], axis=0)
|
prediction_result = np.concatenate(unconcatenated_outs[0], axis=0)
|
||||||
else:
|
else:
|
||||||
prediction_result = [
|
prediction_result = [
|
||||||
np.concatenate(unconcatenated_outs[i], axis=0)
|
np.concatenate(out, axis=0) for out in unconcatenated_outs
|
||||||
for i in range(len(unconcatenated_outs))
|
|
||||||
]
|
]
|
||||||
|
|
||||||
if padding_handler:
|
if padding_handler:
|
||||||
|
@ -27,6 +27,7 @@ import time
|
|||||||
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
import six
|
import six
|
||||||
|
from six.moves import zip # pylint: disable=redefined-builtin
|
||||||
|
|
||||||
from tensorflow.python import tf2
|
from tensorflow.python import tf2
|
||||||
from tensorflow.python.data.experimental.ops import cardinality
|
from tensorflow.python.data.experimental.ops import cardinality
|
||||||
@ -1589,9 +1590,7 @@ class ModelInputs(object):
|
|||||||
# TODO(karmel): There is a side-effect here where what you get
|
# TODO(karmel): There is a side-effect here where what you get
|
||||||
# with as_list and as_dict depends on whether you have called this
|
# with as_list and as_dict depends on whether you have called this
|
||||||
# method first, since it modifies in place.
|
# method first, since it modifies in place.
|
||||||
for i in range(len(self._flattened_inputs)):
|
for i, (k, v) in enumerate(zip(self._input_names, self._flattened_inputs)):
|
||||||
k = self._input_names[i]
|
|
||||||
v = self._flattened_inputs[i]
|
|
||||||
if isinstance(v, (list, float, int)):
|
if isinstance(v, (list, float, int)):
|
||||||
v = np.asarray(v)
|
v = np.asarray(v)
|
||||||
if v.ndim == 1:
|
if v.ndim == 1:
|
||||||
@ -1621,8 +1620,8 @@ class ModelInputs(object):
|
|||||||
|
|
||||||
def as_dict(self):
|
def as_dict(self):
|
||||||
"""An iterable over a dictionary version of inputs."""
|
"""An iterable over a dictionary version of inputs."""
|
||||||
for i in range(len(self._flattened_inputs)):
|
for k, v in zip(self._input_names, self._flattened_inputs):
|
||||||
yield self._input_names[i], self._flattened_inputs[i]
|
yield k, v
|
||||||
|
|
||||||
def as_list(self):
|
def as_list(self):
|
||||||
"""Returning the inputs as a list."""
|
"""Returning the inputs as a list."""
|
||||||
|
@ -116,8 +116,7 @@ def _clone_functional_model(model, input_tensors=None, layer_fn=_clone_layer):
|
|||||||
# If tensor comes from an input layer: cache the input layer.
|
# If tensor comes from an input layer: cache the input layer.
|
||||||
input_tensors = nest.flatten(input_tensors)
|
input_tensors = nest.flatten(input_tensors)
|
||||||
input_tensors_ = []
|
input_tensors_ = []
|
||||||
for i in range(len(input_tensors)):
|
for i, input_tensor in enumerate(input_tensors):
|
||||||
input_tensor = input_tensors[i]
|
|
||||||
if not K.is_keras_tensor(input_tensor):
|
if not K.is_keras_tensor(input_tensor):
|
||||||
original_input_layer = model._input_layers[i]
|
original_input_layer = model._input_layers[i]
|
||||||
name = original_input_layer.name
|
name = original_input_layer.name
|
||||||
|
@ -211,9 +211,7 @@ def multi_gpu_model(model, gpus, cpu_merge=True, cpu_relocation=False):
|
|||||||
with ops.device('/cpu:0'):
|
with ops.device('/cpu:0'):
|
||||||
model = clone_model(model)
|
model = clone_model(model)
|
||||||
|
|
||||||
all_outputs = []
|
all_outputs = [[] for _ in range(len(model.outputs))]
|
||||||
for i in range(len(model.outputs)):
|
|
||||||
all_outputs.append([])
|
|
||||||
|
|
||||||
# Place a copy of the model on each GPU,
|
# Place a copy of the model on each GPU,
|
||||||
# each getting a slice of the inputs.
|
# each getting a slice of the inputs.
|
||||||
@ -241,8 +239,8 @@ def multi_gpu_model(model, gpus, cpu_merge=True, cpu_relocation=False):
|
|||||||
outputs = [outputs]
|
outputs = [outputs]
|
||||||
|
|
||||||
# Save the outputs for merging back together later.
|
# Save the outputs for merging back together later.
|
||||||
for o in range(len(outputs)):
|
for o, output in enumerate(outputs):
|
||||||
all_outputs[o].append(outputs[o])
|
all_outputs[o].append(output)
|
||||||
|
|
||||||
# Deduplicate output names to handle Siamese networks.
|
# Deduplicate output names to handle Siamese networks.
|
||||||
occurrences = {}
|
occurrences = {}
|
||||||
|
@ -2426,8 +2426,7 @@ class RecordInput(object):
|
|||||||
with ops.name_scope(self._name):
|
with ops.name_scope(self._name):
|
||||||
batch_list = [[] for _ in six.moves.range(self._batches)]
|
batch_list = [[] for _ in six.moves.range(self._batches)]
|
||||||
records = array_ops.split(records, self._batch_size, 0)
|
records = array_ops.split(records, self._batch_size, 0)
|
||||||
records = [array_ops.reshape(record, []) for record in records]
|
for index, protobuf in enumerate(records):
|
||||||
for index, protobuf in zip(six.moves.range(len(records)), records):
|
|
||||||
batch_index = index % self._batches
|
batch_index = index % self._batches
|
||||||
batch_list[batch_index].append(protobuf)
|
batch_list[batch_index].append(array_ops.reshape(protobuf, []))
|
||||||
return batch_list
|
return batch_list
|
||||||
|
@ -21,7 +21,7 @@ from __future__ import print_function
|
|||||||
import collections
|
import collections
|
||||||
import contextlib
|
import contextlib
|
||||||
|
|
||||||
from six.moves import xrange # pylint: disable=redefined-builtin
|
from six.moves import xrange, zip # pylint: disable=redefined-builtin
|
||||||
|
|
||||||
from tensorflow.core.framework import attr_value_pb2
|
from tensorflow.core.framework import attr_value_pb2
|
||||||
from tensorflow.python.eager import backprop
|
from tensorflow.python.eager import backprop
|
||||||
@ -160,9 +160,7 @@ def _DefaultGradYs(grad_ys,
|
|||||||
raise ValueError("Passed %d grad_ys for %d ys" % (len(grad_ys), len(ys)))
|
raise ValueError("Passed %d grad_ys for %d ys" % (len(grad_ys), len(ys)))
|
||||||
grad_ys = ops.convert_n_to_tensor_or_indexed_slices(grad_ys, name="grad_y")
|
grad_ys = ops.convert_n_to_tensor_or_indexed_slices(grad_ys, name="grad_y")
|
||||||
new_grad_ys = []
|
new_grad_ys = []
|
||||||
for i in xrange(len(grad_ys)):
|
for i, (y, grad_y) in enumerate(zip(ys, grad_ys)):
|
||||||
grad_y = grad_ys[i]
|
|
||||||
y = ys[i]
|
|
||||||
with _maybe_colocate_with(y.op, gradient_uid, colocate_gradients_with_ops):
|
with _maybe_colocate_with(y.op, gradient_uid, colocate_gradients_with_ops):
|
||||||
if grad_y is None:
|
if grad_y is None:
|
||||||
if y.dtype.is_complex:
|
if y.dtype.is_complex:
|
||||||
|
@ -28,7 +28,7 @@ import traceback
|
|||||||
|
|
||||||
import six
|
import six
|
||||||
from six import iteritems
|
from six import iteritems
|
||||||
from six.moves import xrange # pylint: disable=redefined-builtin
|
from six.moves import xrange, zip # pylint: disable=redefined-builtin
|
||||||
|
|
||||||
from tensorflow.python import tf2
|
from tensorflow.python import tf2
|
||||||
from tensorflow.python.eager import context
|
from tensorflow.python.eager import context
|
||||||
@ -93,9 +93,7 @@ class _PartitionInfo(object):
|
|||||||
"full_shape is of length {}.".format(
|
"full_shape is of length {}.".format(
|
||||||
len(var_offset), len(full_shape)))
|
len(var_offset), len(full_shape)))
|
||||||
|
|
||||||
for i in xrange(len(full_shape)):
|
for offset, shape in zip(var_offset, full_shape):
|
||||||
offset = var_offset[i]
|
|
||||||
shape = full_shape[i]
|
|
||||||
if offset < 0 or offset >= shape:
|
if offset < 0 or offset >= shape:
|
||||||
raise ValueError(
|
raise ValueError(
|
||||||
"Expected 0 <= offset < shape but found offset={}, shape={} for "
|
"Expected 0 <= offset < shape but found offset={}, shape={} for "
|
||||||
|
@ -108,8 +108,7 @@ def generate_checkpoint_state_proto(save_dir,
|
|||||||
if not os.path.isabs(save_dir):
|
if not os.path.isabs(save_dir):
|
||||||
if not os.path.isabs(model_checkpoint_path):
|
if not os.path.isabs(model_checkpoint_path):
|
||||||
model_checkpoint_path = os.path.relpath(model_checkpoint_path, save_dir)
|
model_checkpoint_path = os.path.relpath(model_checkpoint_path, save_dir)
|
||||||
for i in range(len(all_model_checkpoint_paths)):
|
for i, p in enumerate(all_model_checkpoint_paths):
|
||||||
p = all_model_checkpoint_paths[i]
|
|
||||||
if not os.path.isabs(p):
|
if not os.path.isabs(p):
|
||||||
all_model_checkpoint_paths[i] = os.path.relpath(p, save_dir)
|
all_model_checkpoint_paths[i] = os.path.relpath(p, save_dir)
|
||||||
|
|
||||||
@ -281,8 +280,7 @@ def get_checkpoint_state(checkpoint_dir, latest_filename=None):
|
|||||||
if not os.path.isabs(ckpt.model_checkpoint_path):
|
if not os.path.isabs(ckpt.model_checkpoint_path):
|
||||||
ckpt.model_checkpoint_path = os.path.join(checkpoint_dir,
|
ckpt.model_checkpoint_path = os.path.join(checkpoint_dir,
|
||||||
ckpt.model_checkpoint_path)
|
ckpt.model_checkpoint_path)
|
||||||
for i in range(len(ckpt.all_model_checkpoint_paths)):
|
for i, p in enumerate(ckpt.all_model_checkpoint_paths):
|
||||||
p = ckpt.all_model_checkpoint_paths[i]
|
|
||||||
if not os.path.isabs(p):
|
if not os.path.isabs(p):
|
||||||
ckpt.all_model_checkpoint_paths[i] = os.path.join(checkpoint_dir, p)
|
ckpt.all_model_checkpoint_paths[i] = os.path.join(checkpoint_dir, p)
|
||||||
except errors.OpError as e:
|
except errors.OpError as e:
|
||||||
|
Loading…
Reference in New Issue
Block a user