Replaced list comprehensions inside all/any/sum with generator expressions
PiperOrigin-RevId: 222429778
This commit is contained in:
parent
4085979982
commit
95d7bbb2fc
@ -859,7 +859,7 @@ def set_tf_cuda_version(environ_cp):
|
|||||||
cuda_toolkit_paths_full = [
|
cuda_toolkit_paths_full = [
|
||||||
os.path.join(cuda_toolkit_path, x) for x in cuda_rt_lib_paths
|
os.path.join(cuda_toolkit_path, x) for x in cuda_rt_lib_paths
|
||||||
]
|
]
|
||||||
if any([os.path.exists(x) for x in cuda_toolkit_paths_full]):
|
if any(os.path.exists(x) for x in cuda_toolkit_paths_full):
|
||||||
break
|
break
|
||||||
|
|
||||||
# Reset and retry
|
# Reset and retry
|
||||||
|
@ -42,7 +42,7 @@ def GetRunMetadataLabels(run_metadata):
|
|||||||
|
|
||||||
def InLabels(labels, substr):
|
def InLabels(labels, substr):
|
||||||
"""Returns true iff one of the labels contains substr."""
|
"""Returns true iff one of the labels contains substr."""
|
||||||
return any([substr in x for x in labels])
|
return any(substr in x for x in labels)
|
||||||
|
|
||||||
|
|
||||||
class DenseLayerTest(test.TestCase):
|
class DenseLayerTest(test.TestCase):
|
||||||
|
@ -75,7 +75,7 @@ def RunMetadataLabels(run_metadata):
|
|||||||
|
|
||||||
def InLabels(labels, substr):
|
def InLabels(labels, substr):
|
||||||
"""Returns true iff one of the labels contains substr."""
|
"""Returns true iff one of the labels contains substr."""
|
||||||
return any([substr in x for x in labels])
|
return any(substr in x for x in labels)
|
||||||
|
|
||||||
|
|
||||||
def MetadataHasXlaRunOp(run_metadata):
|
def MetadataHasXlaRunOp(run_metadata):
|
||||||
|
@ -74,8 +74,8 @@ class ConstrainedMinimizationProblem(object):
|
|||||||
|
|
||||||
if (constraints_shape.ndims is None or
|
if (constraints_shape.ndims is None or
|
||||||
proxy_constraints_shape.ndims is None or
|
proxy_constraints_shape.ndims is None or
|
||||||
any([ii is None for ii in constraints_shape.as_list()]) or
|
any(ii is None for ii in constraints_shape.as_list()) or
|
||||||
any([ii is None for ii in proxy_constraints_shape.as_list()])):
|
any(ii is None for ii in proxy_constraints_shape.as_list())):
|
||||||
raise ValueError(
|
raise ValueError(
|
||||||
"constraints and proxy_constraints must have fully-known shapes")
|
"constraints and proxy_constraints must have fully-known shapes")
|
||||||
if constraints_shape != proxy_constraints_shape:
|
if constraints_shape != proxy_constraints_shape:
|
||||||
|
@ -778,8 +778,7 @@ class CudnnParamsFormatConverterTest(TensorFlowTestCase,
|
|||||||
|
|
||||||
# Test opaque_params size lower bound
|
# Test opaque_params size lower bound
|
||||||
opaque_params_size_v = sess.run(opaque_params_size)
|
opaque_params_size_v = sess.run(opaque_params_size)
|
||||||
min_params_size = (
|
min_params_size = sum(x.size for x in ws) + np.sum(x.size for x in bs)
|
||||||
np.sum([x.size for x in ws]) + np.sum([x.size for x in bs]))
|
|
||||||
logging.info("min_parm_size: %d vs actual_opaque_param_size: %d",
|
logging.info("min_parm_size: %d vs actual_opaque_param_size: %d",
|
||||||
min_params_size, opaque_params_size_v)
|
min_params_size, opaque_params_size_v)
|
||||||
self.assertLessEqual(min_params_size, opaque_params_size_v)
|
self.assertLessEqual(min_params_size, opaque_params_size_v)
|
||||||
@ -853,8 +852,7 @@ class CudnnParamsFormatConverterTest(TensorFlowTestCase,
|
|||||||
|
|
||||||
# Test opaque_params size lower bound
|
# Test opaque_params size lower bound
|
||||||
opaque_params_size_v = sess.run(opaque_params_size)
|
opaque_params_size_v = sess.run(opaque_params_size)
|
||||||
min_params_size = (
|
min_params_size = sum(x.size for x in ws) + sum(x.size for x in bs)
|
||||||
np.sum([x.size for x in ws]) + np.sum([x.size for x in bs]))
|
|
||||||
logging.info("min_parm_size: %d vs actual_opaque_param_size: %d",
|
logging.info("min_parm_size: %d vs actual_opaque_param_size: %d",
|
||||||
min_params_size, opaque_params_size_v)
|
min_params_size, opaque_params_size_v)
|
||||||
self.assertLessEqual(min_params_size, opaque_params_size_v)
|
self.assertLessEqual(min_params_size, opaque_params_size_v)
|
||||||
|
@ -1045,8 +1045,8 @@ class CudnnRNNTestParamsSize(test_util.TensorFlowTestCase):
|
|||||||
|
|
||||||
# Min param size estimate = sum(weights.size) + sum(biases.size)
|
# Min param size estimate = sum(weights.size) + sum(biases.size)
|
||||||
min_params_size = (
|
min_params_size = (
|
||||||
np.sum(list(map(np.prod, rnn.canonical_weight_shapes))) +
|
sum(map(np.prod, rnn.canonical_weight_shapes)) +
|
||||||
np.sum([sp[0] for sp in rnn.canonical_bias_shapes]))
|
sum(sp[0] for sp in rnn.canonical_bias_shapes))
|
||||||
|
|
||||||
opaque_params = rnn.trainable_variables[0]
|
opaque_params = rnn.trainable_variables[0]
|
||||||
with self.test_session(use_gpu=True, graph=ops.get_default_graph()):
|
with self.test_session(use_gpu=True, graph=ops.get_default_graph()):
|
||||||
|
@ -344,7 +344,7 @@ class MinimizeLossStepTest(test.TestCase, parameterized.TestCase):
|
|||||||
run_step()
|
run_step()
|
||||||
|
|
||||||
v = all_vars[0]
|
v = all_vars[0]
|
||||||
self.assertTrue(all([v is vi for vi in all_vars[1:]]))
|
self.assertTrue(all(v is vi for vi in all_vars[1:]))
|
||||||
weight = numpy.squeeze(self.evaluate(v))
|
weight = numpy.squeeze(self.evaluate(v))
|
||||||
# Our model is:
|
# Our model is:
|
||||||
# predict = x * w
|
# predict = x * w
|
||||||
|
@ -254,7 +254,7 @@ class TPUExtended(distribute_lib.DistributionStrategyExtended):
|
|||||||
self, fn, multi_worker_iterator, iterations, initial_loop_values=None):
|
self, fn, multi_worker_iterator, iterations, initial_loop_values=None):
|
||||||
output_shapes = multi_worker_iterator.output_shapes
|
output_shapes = multi_worker_iterator.output_shapes
|
||||||
shapes = nest.flatten(output_shapes)
|
shapes = nest.flatten(output_shapes)
|
||||||
if any([not s.is_fully_defined() for s in shapes]):
|
if any(not s.is_fully_defined() for s in shapes):
|
||||||
raise ValueError(
|
raise ValueError(
|
||||||
"TPU currently requires fully defined shapes. Either use "
|
"TPU currently requires fully defined shapes. Either use "
|
||||||
"set_shape() on the input tensors or use "
|
"set_shape() on the input tensors or use "
|
||||||
|
@ -36,8 +36,6 @@ from __future__ import absolute_import
|
|||||||
from __future__ import division
|
from __future__ import division
|
||||||
from __future__ import print_function
|
from __future__ import print_function
|
||||||
|
|
||||||
import numpy as np
|
|
||||||
|
|
||||||
from tensorflow.contrib.framework.python.ops import variables as contrib_variables_lib
|
from tensorflow.contrib.framework.python.ops import variables as contrib_variables_lib
|
||||||
from tensorflow.python.framework import ops
|
from tensorflow.python.framework import ops
|
||||||
from tensorflow.python.framework import tensor_util
|
from tensorflow.python.framework import tensor_util
|
||||||
@ -817,7 +815,7 @@ def _numerically_stable_global_norm(tensor_list):
|
|||||||
Returns:
|
Returns:
|
||||||
A scalar tensor with the global norm.
|
A scalar tensor with the global norm.
|
||||||
"""
|
"""
|
||||||
if np.all([x is None for x in tensor_list]):
|
if all(x is None for x in tensor_list):
|
||||||
return 0.0
|
return 0.0
|
||||||
|
|
||||||
list_max = math_ops.reduce_max([math_ops.reduce_max(math_ops.abs(x)) for x in
|
list_max = math_ops.reduce_max([math_ops.reduce_max(math_ops.abs(x)) for x in
|
||||||
|
@ -759,7 +759,7 @@ class TensorPoolAdjusteModelTest(test.TestCase):
|
|||||||
# For [pool_size, ?), the pool is full, tensor2 must be equal to some
|
# For [pool_size, ?), the pool is full, tensor2 must be equal to some
|
||||||
# historical values of tensor1 (which is previously stored in the
|
# historical values of tensor1 (which is previously stored in the
|
||||||
# pool).
|
# pool).
|
||||||
self.assertTrue(any([(v == t2).all() for v in history_values]))
|
self.assertTrue(any((v == t2).all() for v in history_values))
|
||||||
|
|
||||||
def _make_new_model_and_check(self, model, pool_size):
|
def _make_new_model_and_check(self, model, pool_size):
|
||||||
pool_fn = lambda x: random_tensor_pool.tensor_pool(x, pool_size=pool_size)
|
pool_fn = lambda x: random_tensor_pool.tensor_pool(x, pool_size=pool_size)
|
||||||
|
@ -90,7 +90,7 @@ def _update_features_and_columns(features, feature_columns,
|
|||||||
mapped_column_name = column_name + "_MAPPED"
|
mapped_column_name = column_name + "_MAPPED"
|
||||||
# Construct new feature columns based on provided kernel_mappers.
|
# Construct new feature columns based on provided kernel_mappers.
|
||||||
column_kernel_mappers = kernel_mappers_dict[feature_column]
|
column_kernel_mappers = kernel_mappers_dict[feature_column]
|
||||||
new_dim = sum([mapper.output_dim for mapper in column_kernel_mappers])
|
new_dim = sum(mapper.output_dim for mapper in column_kernel_mappers)
|
||||||
mapped_columns.add(
|
mapped_columns.add(
|
||||||
layers.feature_column.real_valued_column(mapped_column_name, new_dim))
|
layers.feature_column.real_valued_column(mapped_column_name, new_dim))
|
||||||
|
|
||||||
|
@ -141,7 +141,7 @@ class RegularizerTest(test.TestCase):
|
|||||||
dummy_regularizer = lambda x: math_ops.reduce_sum(2 * x)
|
dummy_regularizer = lambda x: math_ops.reduce_sum(2 * x)
|
||||||
array_weights_list = [[1.5], [2, 3, 4.2], [10, 42, 666.6]]
|
array_weights_list = [[1.5], [2, 3, 4.2], [10, 42, 666.6]]
|
||||||
tensor_weights_list = [constant_op.constant(x) for x in array_weights_list]
|
tensor_weights_list = [constant_op.constant(x) for x in array_weights_list]
|
||||||
expected = sum([2 * x for l in array_weights_list for x in l])
|
expected = sum(2 * x for l in array_weights_list for x in l)
|
||||||
with self.cached_session():
|
with self.cached_session():
|
||||||
result = regularizers.apply_regularization(dummy_regularizer,
|
result = regularizers.apply_regularization(dummy_regularizer,
|
||||||
tensor_weights_list)
|
tensor_weights_list)
|
||||||
|
@ -150,10 +150,10 @@ def _dnn_model_fn(features, labels, mode, params, config=None):
|
|||||||
"input_from_feature_columns",
|
"input_from_feature_columns",
|
||||||
values=tuple(six.itervalues(features)),
|
values=tuple(six.itervalues(features)),
|
||||||
partitioner=input_layer_partitioner) as input_layer_scope:
|
partitioner=input_layer_partitioner) as input_layer_scope:
|
||||||
if all([
|
if all(
|
||||||
isinstance(fc, feature_column._FeatureColumn) # pylint: disable=protected-access
|
isinstance(fc, feature_column._FeatureColumn) # pylint: disable=protected-access
|
||||||
for fc in feature_columns
|
for fc in feature_columns
|
||||||
]):
|
):
|
||||||
net = layers.input_from_feature_columns(
|
net = layers.input_from_feature_columns(
|
||||||
columns_to_tensors=features,
|
columns_to_tensors=features,
|
||||||
feature_columns=feature_columns,
|
feature_columns=feature_columns,
|
||||||
|
@ -236,10 +236,10 @@ def _dnn_linear_combined_model_fn(features, labels, mode, params, config=None):
|
|||||||
"input_from_feature_columns",
|
"input_from_feature_columns",
|
||||||
values=tuple(six.itervalues(features)),
|
values=tuple(six.itervalues(features)),
|
||||||
partitioner=input_layer_partitioner) as dnn_input_scope:
|
partitioner=input_layer_partitioner) as dnn_input_scope:
|
||||||
if all([
|
if all(
|
||||||
isinstance(fc, feature_column_lib._FeatureColumn) # pylint: disable=protected-access
|
isinstance(fc, feature_column_lib._FeatureColumn) # pylint: disable=protected-access
|
||||||
for fc in dnn_feature_columns
|
for fc in dnn_feature_columns
|
||||||
]):
|
):
|
||||||
net = layers.input_from_feature_columns(
|
net = layers.input_from_feature_columns(
|
||||||
columns_to_tensors=features,
|
columns_to_tensors=features,
|
||||||
feature_columns=dnn_feature_columns,
|
feature_columns=dnn_feature_columns,
|
||||||
@ -292,8 +292,8 @@ def _dnn_linear_combined_model_fn(features, labels, mode, params, config=None):
|
|||||||
linear_parent_scope,
|
linear_parent_scope,
|
||||||
values=tuple(six.itervalues(features)),
|
values=tuple(six.itervalues(features)),
|
||||||
partitioner=linear_partitioner) as scope:
|
partitioner=linear_partitioner) as scope:
|
||||||
if all([isinstance(fc, feature_column_lib._FeatureColumn) # pylint: disable=protected-access
|
if all(isinstance(fc, feature_column_lib._FeatureColumn) # pylint: disable=protected-access
|
||||||
for fc in linear_feature_columns]):
|
for fc in linear_feature_columns):
|
||||||
if joint_linear_weights:
|
if joint_linear_weights:
|
||||||
linear_logits, _, _ = layers.joint_weighted_sum_from_feature_columns(
|
linear_logits, _, _ = layers.joint_weighted_sum_from_feature_columns(
|
||||||
columns_to_tensors=features,
|
columns_to_tensors=features,
|
||||||
|
@ -1066,11 +1066,11 @@ class BaseEstimator(sklearn.BaseEstimator, evaluable.Evaluable,
|
|||||||
chief_hooks = []
|
chief_hooks = []
|
||||||
if (self._config.save_checkpoints_secs or
|
if (self._config.save_checkpoints_secs or
|
||||||
self._config.save_checkpoints_steps):
|
self._config.save_checkpoints_steps):
|
||||||
saver_hook_exists = any([
|
saver_hook_exists = any(
|
||||||
isinstance(h, basic_session_run_hooks.CheckpointSaverHook)
|
isinstance(h, basic_session_run_hooks.CheckpointSaverHook)
|
||||||
for h in (all_hooks + model_fn_ops.training_hooks + chief_hooks +
|
for h in (all_hooks + model_fn_ops.training_hooks + chief_hooks +
|
||||||
model_fn_ops.training_chief_hooks)
|
model_fn_ops.training_chief_hooks)
|
||||||
])
|
)
|
||||||
if not saver_hook_exists:
|
if not saver_hook_exists:
|
||||||
chief_hooks = [
|
chief_hooks = [
|
||||||
basic_session_run_hooks.CheckpointSaverHook(
|
basic_session_run_hooks.CheckpointSaverHook(
|
||||||
|
@ -155,8 +155,8 @@ def _linear_model_fn(features, labels, mode, params, config=None):
|
|||||||
parent_scope,
|
parent_scope,
|
||||||
values=tuple(six.itervalues(features)),
|
values=tuple(six.itervalues(features)),
|
||||||
partitioner=partitioner) as scope:
|
partitioner=partitioner) as scope:
|
||||||
if all([isinstance(fc, feature_column._FeatureColumn) # pylint: disable=protected-access
|
if all(isinstance(fc, feature_column._FeatureColumn) # pylint: disable=protected-access
|
||||||
for fc in feature_columns]):
|
for fc in feature_columns):
|
||||||
if joint_weights:
|
if joint_weights:
|
||||||
layer_fn = layers.joint_weighted_sum_from_feature_columns
|
layer_fn = layers.joint_weighted_sum_from_feature_columns
|
||||||
else:
|
else:
|
||||||
|
@ -160,7 +160,7 @@ def Quantize(graph,
|
|||||||
# shouldn't quantize it, since the activation will be Fused into the
|
# shouldn't quantize it, since the activation will be Fused into the
|
||||||
# Add at inference time.
|
# Add at inference time.
|
||||||
consumers = input_to_ops_map.ConsumerOperations(layer_match.bypass_op)
|
consumers = input_to_ops_map.ConsumerOperations(layer_match.bypass_op)
|
||||||
if any([consumer.type in _ACTIVATION_TYPES for consumer in consumers]):
|
if any(consumer.type in _ACTIVATION_TYPES for consumer in consumers):
|
||||||
logging.info('Skipping %s, because its followed by an activation.',
|
logging.info('Skipping %s, because its followed by an activation.',
|
||||||
layer_match.bypass_op.name)
|
layer_match.bypass_op.name)
|
||||||
else:
|
else:
|
||||||
@ -195,7 +195,7 @@ def Quantize(graph,
|
|||||||
# Add at inference time.
|
# Add at inference time.
|
||||||
consumers = input_to_ops_map.ConsumerOperations(
|
consumers = input_to_ops_map.ConsumerOperations(
|
||||||
layer_match.post_activation_bypass_op)
|
layer_match.post_activation_bypass_op)
|
||||||
if any([consumer.type in _RELU_TYPES for consumer in consumers]):
|
if any(consumer.type in _RELU_TYPES for consumer in consumers):
|
||||||
logging.info('Skipping %s, because its followed by an activation.',
|
logging.info('Skipping %s, because its followed by an activation.',
|
||||||
layer_match.post_activation_bypass_op.name)
|
layer_match.post_activation_bypass_op.name)
|
||||||
else:
|
else:
|
||||||
|
@ -154,8 +154,8 @@ class AttentionWrapperTest(test.TestCase):
|
|||||||
|
|
||||||
if attention_layer_sizes is not None:
|
if attention_layer_sizes is not None:
|
||||||
# Compute sum of attention_layer_sizes. Use encoder_output_depth if None.
|
# Compute sum of attention_layer_sizes. Use encoder_output_depth if None.
|
||||||
attention_depth = sum([attention_layer_size or encoder_output_depth
|
attention_depth = sum(attention_layer_size or encoder_output_depth
|
||||||
for attention_layer_size in attention_layer_sizes])
|
for attention_layer_size in attention_layer_sizes)
|
||||||
elif attention_layers is not None:
|
elif attention_layers is not None:
|
||||||
# Compute sum of attention_layers output depth.
|
# Compute sum of attention_layers output depth.
|
||||||
attention_depth = sum(
|
attention_depth = sum(
|
||||||
|
@ -1111,7 +1111,7 @@ def validate_inference_rewrite_for_variables(graph):
|
|||||||
Raises:
|
Raises:
|
||||||
RuntimeError: if validation failed.
|
RuntimeError: if validation failed.
|
||||||
"""
|
"""
|
||||||
if not any([x.type == "GuaranteeConst" for x in graph.get_operations()]):
|
if not any(x.type == "GuaranteeConst" for x in graph.get_operations()):
|
||||||
raise RuntimeError(
|
raise RuntimeError(
|
||||||
"No GuaranteeConst ops found in the graph after running "
|
"No GuaranteeConst ops found in the graph after running "
|
||||||
"tpu.rewrite_for_inference(...). Please check that you are using "
|
"tpu.rewrite_for_inference(...). Please check that you are using "
|
||||||
|
@ -30,4 +30,4 @@ def is_tensor(*args):
|
|||||||
Returns:
|
Returns:
|
||||||
True if any *args are TensorFlow types, False if none are.
|
True if any *args are TensorFlow types, False if none are.
|
||||||
"""
|
"""
|
||||||
return any([tensor_util.is_tensor(a) for a in args])
|
return any(tensor_util.is_tensor(a) for a in args)
|
||||||
|
@ -459,7 +459,7 @@ class MapVectorizationBenchmark(test.Benchmark):
|
|||||||
return median_time
|
return median_time
|
||||||
|
|
||||||
def _compare(self, input_dataset, map_fn, batch_size, input_size, str_id):
|
def _compare(self, input_dataset, map_fn, batch_size, input_size, str_id):
|
||||||
num_elems = int(np.sum([np.prod(x) for x in input_size]))
|
num_elems = sum(np.prod(x) for x in input_size)
|
||||||
name_template = "{}__batch_size_{}_input_element_size_{}_{}"
|
name_template = "{}__batch_size_{}_input_element_size_{}_{}"
|
||||||
unoptimized = input_dataset.map(map_fn).batch(batch_size)
|
unoptimized = input_dataset.map(map_fn).batch(batch_size)
|
||||||
unoptimized_op = unoptimized.make_one_shot_iterator().get_next()
|
unoptimized_op = unoptimized.make_one_shot_iterator().get_next()
|
||||||
|
@ -17,8 +17,6 @@ from __future__ import absolute_import
|
|||||||
from __future__ import division
|
from __future__ import division
|
||||||
from __future__ import print_function
|
from __future__ import print_function
|
||||||
|
|
||||||
import numpy as np
|
|
||||||
|
|
||||||
from tensorflow.python.data.experimental.kernel_tests.serialization import dataset_serialization_test_base
|
from tensorflow.python.data.experimental.kernel_tests.serialization import dataset_serialization_test_base
|
||||||
from tensorflow.python.data.ops import dataset_ops
|
from tensorflow.python.data.ops import dataset_ops
|
||||||
from tensorflow.python.framework import sparse_tensor
|
from tensorflow.python.framework import sparse_tensor
|
||||||
@ -35,7 +33,7 @@ class FilterDatasetSerializationTest(
|
|||||||
|
|
||||||
def testFilterCore(self):
|
def testFilterCore(self):
|
||||||
div = 3
|
div = 3
|
||||||
num_outputs = np.sum([x % 3 != 2 for x in range(100)])
|
num_outputs = sum(x % 3 != 2 for x in range(100))
|
||||||
self.run_core_tests(lambda: self._build_filter_range_graph(div),
|
self.run_core_tests(lambda: self._build_filter_range_graph(div),
|
||||||
lambda: self._build_filter_range_graph(div * 2),
|
lambda: self._build_filter_range_graph(div * 2),
|
||||||
num_outputs)
|
num_outputs)
|
||||||
@ -47,7 +45,7 @@ class FilterDatasetSerializationTest(
|
|||||||
lambda d: d["foo"] + d["bar"])
|
lambda d: d["foo"] + d["bar"])
|
||||||
|
|
||||||
def testFilterDictCore(self):
|
def testFilterDictCore(self):
|
||||||
num_outputs = np.sum([(x**2) % 2 == 0 for x in range(10)])
|
num_outputs = sum((x**2) % 2 == 0 for x in range(10))
|
||||||
self.run_core_tests(self._build_filter_dict_graph, None, num_outputs)
|
self.run_core_tests(self._build_filter_dict_graph, None, num_outputs)
|
||||||
|
|
||||||
def _build_sparse_filter(self):
|
def _build_sparse_filter(self):
|
||||||
|
@ -138,10 +138,10 @@ def parse_example_dataset(features, num_parallel_calls=1):
|
|||||||
def _apply_fn(dataset):
|
def _apply_fn(dataset):
|
||||||
"""Function from `Dataset` to `Dataset` that applies the transformation."""
|
"""Function from `Dataset` to `Dataset` that applies the transformation."""
|
||||||
out_dataset = _ParseExampleDataset(dataset, features, num_parallel_calls)
|
out_dataset = _ParseExampleDataset(dataset, features, num_parallel_calls)
|
||||||
if any([
|
if any(
|
||||||
isinstance(feature, parsing_ops.SparseFeature)
|
isinstance(feature, parsing_ops.SparseFeature)
|
||||||
for _, feature in features.items()
|
for _, feature in features.items()
|
||||||
]):
|
):
|
||||||
# pylint: disable=protected-access
|
# pylint: disable=protected-access
|
||||||
# pylint: disable=g-long-lambda
|
# pylint: disable=g-long-lambda
|
||||||
out_dataset = out_dataset.map(
|
out_dataset = out_dataset.map(
|
||||||
|
@ -42,7 +42,7 @@ class DatasetOpsTest(test_base.DatasetTestBase, parameterized.TestCase):
|
|||||||
with self.cached_session() as sess:
|
with self.cached_session() as sess:
|
||||||
graph = graph_pb2.GraphDef().FromString(
|
graph = graph_pb2.GraphDef().FromString(
|
||||||
sess.run(dataset._as_serialized_graph()))
|
sess.run(dataset._as_serialized_graph()))
|
||||||
self.assertTrue(any([node.op != "RangeDataset" for node in graph.node]))
|
self.assertTrue(any(node.op != "RangeDataset" for node in graph.node))
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def make_apply_fn(dataset):
|
def make_apply_fn(dataset):
|
||||||
|
@ -34,7 +34,7 @@ def any_sparse(classes):
|
|||||||
Returns:
|
Returns:
|
||||||
`True` if `classes` contains a sparse tensor type and `False` otherwise.
|
`True` if `classes` contains a sparse tensor type and `False` otherwise.
|
||||||
"""
|
"""
|
||||||
return any([c is sparse_tensor.SparseTensor for c in nest.flatten(classes)])
|
return any(c is sparse_tensor.SparseTensor for c in nest.flatten(classes))
|
||||||
|
|
||||||
|
|
||||||
def as_dense_shapes(shapes, classes):
|
def as_dense_shapes(shapes, classes):
|
||||||
|
@ -103,10 +103,10 @@ def _validate_value_destination_pairs(value_destination_pairs):
|
|||||||
# pylint: disable=g-missing-docstring
|
# pylint: disable=g-missing-docstring
|
||||||
if not value_destination_pairs: return False
|
if not value_destination_pairs: return False
|
||||||
if not isinstance(value_destination_pairs, (list, tuple)): return False
|
if not isinstance(value_destination_pairs, (list, tuple)): return False
|
||||||
if not all([isinstance(pair, tuple) for pair in value_destination_pairs]):
|
if not all(isinstance(pair, tuple) for pair in value_destination_pairs):
|
||||||
return False
|
return False
|
||||||
if not all([isinstance(v[0], value_lib.PerReplica)
|
if not all(isinstance(v[0], value_lib.PerReplica)
|
||||||
for v in value_destination_pairs]):
|
for v in value_destination_pairs):
|
||||||
return False
|
return False
|
||||||
return True
|
return True
|
||||||
|
|
||||||
@ -132,10 +132,10 @@ def _devices_match(left, right):
|
|||||||
|
|
||||||
|
|
||||||
def _all_devices_match(value_destination_pairs):
|
def _all_devices_match(value_destination_pairs):
|
||||||
if not all([_devices_match(v, d) for v, d in value_destination_pairs]):
|
if not all(_devices_match(v, d) for v, d in value_destination_pairs):
|
||||||
return False
|
return False
|
||||||
if not all([_devices_match(v, value_destination_pairs[0][0])
|
if not all(_devices_match(v, value_destination_pairs[0][0])
|
||||||
for v, _ in value_destination_pairs[1:]]):
|
for v, _ in value_destination_pairs[1:]):
|
||||||
return False
|
return False
|
||||||
return True
|
return True
|
||||||
|
|
||||||
@ -401,7 +401,7 @@ class ConcatAndSplitPacker(object):
|
|||||||
# all gradient shapes are defined, we use another method to get the
|
# all gradient shapes are defined, we use another method to get the
|
||||||
# total size.
|
# total size.
|
||||||
# TODO(yuefengz): move this logic to array_ops.size.
|
# TODO(yuefengz): move this logic to array_ops.size.
|
||||||
if all([g.shape.is_fully_defined() for g, _ in device_grads_and_vars]):
|
if all(g.shape.is_fully_defined() for g, _ in device_grads_and_vars):
|
||||||
total_grad_size = sum(
|
total_grad_size = sum(
|
||||||
[g.shape.num_elements() for g, _ in device_grads_and_vars])
|
[g.shape.num_elements() for g, _ in device_grads_and_vars])
|
||||||
else:
|
else:
|
||||||
@ -941,7 +941,7 @@ def choose_the_best(devices, session_config=None):
|
|||||||
"TensorFlow sessions.")
|
"TensorFlow sessions.")
|
||||||
return ReductionToOneDeviceCrossDeviceOps()
|
return ReductionToOneDeviceCrossDeviceOps()
|
||||||
|
|
||||||
if any([d.device_type.lower() != "gpu" for d in using_devices]):
|
if any(d.device_type.lower() != "gpu" for d in using_devices):
|
||||||
logging.warning("Not all devices in DistributionStrategy are visible to "
|
logging.warning("Not all devices in DistributionStrategy are visible to "
|
||||||
"TensorFlow session.")
|
"TensorFlow session.")
|
||||||
return ReductionToOneDeviceCrossDeviceOps()
|
return ReductionToOneDeviceCrossDeviceOps()
|
||||||
|
@ -420,7 +420,7 @@ def sum_gradients_all_reduce(dev_prefixes, replica_grads, num_workers, alg,
|
|||||||
Returns:
|
Returns:
|
||||||
list of reduced tensors
|
list of reduced tensors
|
||||||
"""
|
"""
|
||||||
alg_contains_shuffle = any([n in alg for n in ['pscpu', 'psgpu']])
|
alg_contains_shuffle = any(n in alg for n in ['pscpu', 'psgpu'])
|
||||||
is_hierarchical = '/' in alg
|
is_hierarchical = '/' in alg
|
||||||
if 'pscpu' in alg:
|
if 'pscpu' in alg:
|
||||||
aux_devices = [prefix + '/cpu:0' for prefix in dev_prefixes]
|
aux_devices = [prefix + '/cpu:0' for prefix in dev_prefixes]
|
||||||
|
@ -547,11 +547,11 @@ def _aggregate_grads(gradients):
|
|||||||
|
|
||||||
if len(gradients) == 1:
|
if len(gradients) == 1:
|
||||||
return gradients[0]
|
return gradients[0]
|
||||||
if all([isinstance(g, ops.Tensor) for g in gradients]):
|
if all(isinstance(g, ops.Tensor) for g in gradients):
|
||||||
return gen_math_ops.add_n(gradients)
|
return gen_math_ops.add_n(gradients)
|
||||||
else:
|
else:
|
||||||
assert all([isinstance(g, (ops.Tensor, ops.IndexedSlices))
|
assert all(isinstance(g, (ops.Tensor, ops.IndexedSlices))
|
||||||
for g in gradients])
|
for g in gradients)
|
||||||
indexed_slices_list = []
|
indexed_slices_list = []
|
||||||
for grad in gradients:
|
for grad in gradients:
|
||||||
# TODO(xpan): Support nested IndexedSlices and core IndexedSlices
|
# TODO(xpan): Support nested IndexedSlices and core IndexedSlices
|
||||||
|
@ -89,8 +89,8 @@ def _parse_func_attrs(attributes):
|
|||||||
"""
|
"""
|
||||||
attrs = {}
|
attrs = {}
|
||||||
for key, value in attributes.items():
|
for key, value in attributes.items():
|
||||||
if not any([re.match(reg, key)
|
if not any(re.match(reg, key)
|
||||||
for reg in WHITELIST_FUNCTION_ATTRIBUTE_REGEX]):
|
for reg in WHITELIST_FUNCTION_ATTRIBUTE_REGEX):
|
||||||
raise ValueError("Attribute name is not whitelisted. "
|
raise ValueError("Attribute name is not whitelisted. "
|
||||||
"Whitelisted: prefix %s, got: %s" %
|
"Whitelisted: prefix %s, got: %s" %
|
||||||
(WHITELIST_FUNCTION_ATTRIBUTE_REGEX, key))
|
(WHITELIST_FUNCTION_ATTRIBUTE_REGEX, key))
|
||||||
|
@ -874,7 +874,7 @@ def func_graph_from_py_func(func, arg_names, arg_types, name=None,
|
|||||||
# If func only returned one value, make it a tuple.
|
# If func only returned one value, make it a tuple.
|
||||||
if not isinstance(outputs, (list, tuple)):
|
if not isinstance(outputs, (list, tuple)):
|
||||||
outputs = (outputs,)
|
outputs = (outputs,)
|
||||||
if any([_ is None for _ in outputs]):
|
if any(_ is None for _ in outputs):
|
||||||
raise ValueError("Function %s can not return None." % name)
|
raise ValueError("Function %s can not return None." % name)
|
||||||
# Ensures each output is a Tensor in the function graph.
|
# Ensures each output is a Tensor in the function graph.
|
||||||
outputs = [ops.convert_to_tensor(t) for t in outputs]
|
outputs = [ops.convert_to_tensor(t) for t in outputs]
|
||||||
@ -1190,7 +1190,7 @@ def get_extra_args():
|
|||||||
|
|
||||||
|
|
||||||
def _type_list_to_str(types):
|
def _type_list_to_str(types):
|
||||||
if any([_ not in _DTYPE_TO_STR for _ in types]):
|
if any(_ not in _DTYPE_TO_STR for _ in types):
|
||||||
raise ValueError("Unsupported dtypes: %s" % types)
|
raise ValueError("Unsupported dtypes: %s" % types)
|
||||||
return "".join([_DTYPE_TO_STR[_] for _ in types])
|
return "".join([_DTYPE_TO_STR[_] for _ in types])
|
||||||
|
|
||||||
|
@ -600,11 +600,11 @@ class ScopedMetaGraphTest(test.TestCase):
|
|||||||
with graph.as_default():
|
with graph.as_default():
|
||||||
variables.Variable(initial_value=1.0, trainable=True)
|
variables.Variable(initial_value=1.0, trainable=True)
|
||||||
self.assertTrue(
|
self.assertTrue(
|
||||||
all([
|
all(
|
||||||
graph.get_collection(key)
|
graph.get_collection(key)
|
||||||
for key in
|
for key in
|
||||||
[ops.GraphKeys.GLOBAL_VARIABLES, ops.GraphKeys.TRAINABLE_VARIABLES]
|
[ops.GraphKeys.GLOBAL_VARIABLES, ops.GraphKeys.TRAINABLE_VARIABLES]
|
||||||
]))
|
))
|
||||||
meta_graph.export_scoped_meta_graph(
|
meta_graph.export_scoped_meta_graph(
|
||||||
filename=meta_graph_filename, graph=graph)
|
filename=meta_graph_filename, graph=graph)
|
||||||
|
|
||||||
|
@ -570,7 +570,7 @@ class OpDefLibrary(object):
|
|||||||
"than minimum length %d." %
|
"than minimum length %d." %
|
||||||
(input_name, op_type_name, len(values), num_attr.minimum))
|
(input_name, op_type_name, len(values), num_attr.minimum))
|
||||||
# All tensors must have the same base type.
|
# All tensors must have the same base type.
|
||||||
if any([bt != base_types[0] for bt in base_types]):
|
if any(bt != base_types[0] for bt in base_types):
|
||||||
raise TypeError(
|
raise TypeError(
|
||||||
"All tensors passed to '%s' of '%s' Op "
|
"All tensors passed to '%s' of '%s' Op "
|
||||||
"must have the same type." %
|
"must have the same type." %
|
||||||
|
@ -1044,7 +1044,7 @@ def is_gpu_available(cuda_only=False, min_cuda_compute_capability=None):
|
|||||||
return True
|
return True
|
||||||
return False
|
return False
|
||||||
except errors_impl.NotFoundError as e:
|
except errors_impl.NotFoundError as e:
|
||||||
if not all([x in str(e) for x in ["CUDA", "not find"]]):
|
if not all(x in str(e) for x in ["CUDA", "not find"]):
|
||||||
raise e
|
raise e
|
||||||
else:
|
else:
|
||||||
logging.error(str(e))
|
logging.error(str(e))
|
||||||
|
@ -2325,7 +2325,7 @@ def concatenate(tensors, axis=-1):
|
|||||||
else:
|
else:
|
||||||
axis = 0
|
axis = 0
|
||||||
|
|
||||||
if py_all([is_sparse(x) for x in tensors]):
|
if py_all(is_sparse(x) for x in tensors):
|
||||||
return sparse_ops.sparse_concat(axis, tensors)
|
return sparse_ops.sparse_concat(axis, tensors)
|
||||||
else:
|
else:
|
||||||
return array_ops.concat([to_dense(x) for x in tensors], axis)
|
return array_ops.concat([to_dense(x) for x in tensors], axis)
|
||||||
|
@ -768,7 +768,7 @@ class Layer(checkpointable.CheckpointableBase):
|
|||||||
|
|
||||||
if context.executing_eagerly():
|
if context.executing_eagerly():
|
||||||
# Accept NumPy inputs by converting to Tensors when executing eagerly.
|
# Accept NumPy inputs by converting to Tensors when executing eagerly.
|
||||||
if all([isinstance(x, (np.ndarray, float, int)) for x in input_list]):
|
if all(isinstance(x, (np.ndarray, float, int)) for x in input_list):
|
||||||
inputs = nest.map_structure(ops.convert_to_tensor, inputs)
|
inputs = nest.map_structure(ops.convert_to_tensor, inputs)
|
||||||
input_list = nest.flatten(inputs)
|
input_list = nest.flatten(inputs)
|
||||||
|
|
||||||
@ -1442,8 +1442,7 @@ class Layer(checkpointable.CheckpointableBase):
|
|||||||
', but the layer isn\'t built. '
|
', but the layer isn\'t built. '
|
||||||
'You can build it manually via: `' + self.name +
|
'You can build it manually via: `' + self.name +
|
||||||
'.build(batch_input_shape)`.')
|
'.build(batch_input_shape)`.')
|
||||||
weight_shapes = [w.shape.as_list() for w in self.weights]
|
return int(sum(np.prod(w.shape.as_list()) for w in self.weights))
|
||||||
return int(sum([np.prod(w) for w in weight_shapes]))
|
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def output_shape(self):
|
def output_shape(self):
|
||||||
@ -1758,7 +1757,7 @@ def have_all_keras_metadata(iterable_or_element):
|
|||||||
iterable = [iterable_or_element]
|
iterable = [iterable_or_element]
|
||||||
else:
|
else:
|
||||||
iterable = nest.flatten(iterable_or_element)
|
iterable = nest.flatten(iterable_or_element)
|
||||||
return all([hasattr(x, '_keras_history') for x in iterable])
|
return all(hasattr(x, '_keras_history') for x in iterable)
|
||||||
|
|
||||||
|
|
||||||
def collect_previous_mask(input_tensors):
|
def collect_previous_mask(input_tensors):
|
||||||
|
@ -383,7 +383,11 @@ def validate_inputs(x, y, distribution_strategy):
|
|||||||
for i in [x, y]:
|
for i in [x, y]:
|
||||||
if isinstance(i, dataset_ops.Dataset):
|
if isinstance(i, dataset_ops.Dataset):
|
||||||
shapes = nest.flatten(i.output_shapes)
|
shapes = nest.flatten(i.output_shapes)
|
||||||
if any([not s.is_fully_defined() for s in shapes]):
|
try:
|
||||||
|
s = next(s for s in shapes if not s.is_fully_defined())
|
||||||
|
except StopIteration:
|
||||||
|
continue
|
||||||
|
else:
|
||||||
raise ValueError(
|
raise ValueError(
|
||||||
'Using TPUs currently requires fully defined shapes. Either use '
|
'Using TPUs currently requires fully defined shapes. Either use '
|
||||||
'set_shape() on the input tensors or use '
|
'set_shape() on the input tensors or use '
|
||||||
|
@ -428,8 +428,8 @@ class Network(base_layer.Layer):
|
|||||||
|
|
||||||
@property
|
@property
|
||||||
def stateful(self):
|
def stateful(self):
|
||||||
return any([(hasattr(layer, 'stateful') and layer.stateful)
|
return any((hasattr(layer, 'stateful') and layer.stateful)
|
||||||
for layer in self.layers])
|
for layer in self.layers)
|
||||||
|
|
||||||
def reset_states(self):
|
def reset_states(self):
|
||||||
for layer in self.layers:
|
for layer in self.layers:
|
||||||
|
@ -917,7 +917,7 @@ def save_attributes_to_hdf5_group(group, name, data):
|
|||||||
chunked_data = np.array_split(data_npy, num_chunks)
|
chunked_data = np.array_split(data_npy, num_chunks)
|
||||||
|
|
||||||
# This will never loop forever thanks to the test above.
|
# This will never loop forever thanks to the test above.
|
||||||
while any([x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data]):
|
while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data):
|
||||||
num_chunks += 1
|
num_chunks += 1
|
||||||
chunked_data = np.array_split(data_npy, num_chunks)
|
chunked_data = np.array_split(data_npy, num_chunks)
|
||||||
|
|
||||||
|
@ -58,10 +58,10 @@ def _map_nested(data, func):
|
|||||||
def _nested_all(data, cond_func):
|
def _nested_all(data, cond_func):
|
||||||
"""Checks if all elements in a nested structure satisfy cond_func."""
|
"""Checks if all elements in a nested structure satisfy cond_func."""
|
||||||
if isinstance(data, (tuple, list)):
|
if isinstance(data, (tuple, list)):
|
||||||
return all([_nested_all(nested_data, cond_func) for nested_data in data])
|
return all(_nested_all(nested_data, cond_func) for nested_data in data)
|
||||||
elif isinstance(data, dict):
|
elif isinstance(data, dict):
|
||||||
return all(
|
return all(
|
||||||
[_nested_all(nested_data, cond_func) for nested_data in data.values()])
|
_nested_all(nested_data, cond_func) for nested_data in data.values())
|
||||||
else:
|
else:
|
||||||
return cond_func(data)
|
return cond_func(data)
|
||||||
|
|
||||||
@ -69,7 +69,7 @@ def _nested_all(data, cond_func):
|
|||||||
def _nested_any(data, cond_func):
|
def _nested_any(data, cond_func):
|
||||||
"""Checks if any nested_elements in a nested structure satisfy cond_func."""
|
"""Checks if any nested_elements in a nested structure satisfy cond_func."""
|
||||||
if isinstance(data, (tuple, list)):
|
if isinstance(data, (tuple, list)):
|
||||||
return any([_nested_any(nested_data, cond_func) for nested_data in data])
|
return any(_nested_any(nested_data, cond_func) for nested_data in data)
|
||||||
elif isinstance(data, dict):
|
elif isinstance(data, dict):
|
||||||
return any(
|
return any(
|
||||||
[_nested_any(nested_data, cond_func) for nested_data in data.values()])
|
[_nested_any(nested_data, cond_func) for nested_data in data.values()])
|
||||||
|
@ -212,7 +212,7 @@ class _Merge(Layer):
|
|||||||
if len(mask) != len(inputs):
|
if len(mask) != len(inputs):
|
||||||
raise ValueError('The lists `inputs` and `mask` '
|
raise ValueError('The lists `inputs` and `mask` '
|
||||||
'should have the same length.')
|
'should have the same length.')
|
||||||
if all([m is None for m in mask]):
|
if all(m is None for m in mask):
|
||||||
return None
|
return None
|
||||||
masks = [array_ops.expand_dims(m, axis=0) for m in mask if m is not None]
|
masks = [array_ops.expand_dims(m, axis=0) for m in mask if m is not None]
|
||||||
return K.all(K.concatenate(masks, axis=0), axis=0, keepdims=False)
|
return K.all(K.concatenate(masks, axis=0), axis=0, keepdims=False)
|
||||||
@ -378,7 +378,7 @@ class Concatenate(_Merge):
|
|||||||
if not isinstance(input_shape, list) or len(input_shape) < 2:
|
if not isinstance(input_shape, list) or len(input_shape) < 2:
|
||||||
raise ValueError('A `Concatenate` layer should be called '
|
raise ValueError('A `Concatenate` layer should be called '
|
||||||
'on a list of at least 2 inputs')
|
'on a list of at least 2 inputs')
|
||||||
if all([shape is None for shape in input_shape]):
|
if all(shape is None for shape in input_shape):
|
||||||
return
|
return
|
||||||
reduced_inputs_shapes = [list(shape) for shape in input_shape]
|
reduced_inputs_shapes = [list(shape) for shape in input_shape]
|
||||||
shape_set = set()
|
shape_set = set()
|
||||||
@ -418,7 +418,7 @@ class Concatenate(_Merge):
|
|||||||
if len(mask) != len(inputs):
|
if len(mask) != len(inputs):
|
||||||
raise ValueError('The lists `inputs` and `mask` '
|
raise ValueError('The lists `inputs` and `mask` '
|
||||||
'should have the same length.')
|
'should have the same length.')
|
||||||
if all([m is None for m in mask]):
|
if all(m is None for m in mask):
|
||||||
return None
|
return None
|
||||||
# Make a list of masks while making sure
|
# Make a list of masks while making sure
|
||||||
# the dimensionality of each mask
|
# the dimensionality of each mask
|
||||||
|
@ -77,7 +77,7 @@ def count_params(weights):
|
|||||||
Returns:
|
Returns:
|
||||||
The total number of scalars composing the weights
|
The total number of scalars composing the weights
|
||||||
"""
|
"""
|
||||||
return int(np.sum([np.prod(p.get_shape().as_list()) for p in set(weights)]))
|
return int(sum(np.prod(p.get_shape().as_list()) for p in set(weights)))
|
||||||
|
|
||||||
|
|
||||||
def print_summary(model, line_length=None, positions=None, print_fn=None):
|
def print_summary(model, line_length=None, positions=None, print_fn=None):
|
||||||
|
@ -863,13 +863,13 @@ class ControlFlowTest(test.TestCase):
|
|||||||
# Should just be [1, 1], but possibly a sparse representation
|
# Should just be [1, 1], but possibly a sparse representation
|
||||||
gv, gi = sess.run([grad.values, grad.indices], feed_dict={c: 1})
|
gv, gi = sess.run([grad.values, grad.indices], feed_dict={c: 1})
|
||||||
dense_gv = [
|
dense_gv = [
|
||||||
sum([y for (x, y) in zip(gi, gv) if x == i]) for i in range(2)
|
sum(y for (x, y) in zip(gi, gv) if x == i) for i in range(2)
|
||||||
]
|
]
|
||||||
self.assertAllEqual(dense_gv, [1.0, 1.0])
|
self.assertAllEqual(dense_gv, [1.0, 1.0])
|
||||||
# Should be [0, 2], as the else forwards v1[1] twice
|
# Should be [0, 2], as the else forwards v1[1] twice
|
||||||
gv, gi = sess.run([grad.values, grad.indices], feed_dict={c: 3})
|
gv, gi = sess.run([grad.values, grad.indices], feed_dict={c: 3})
|
||||||
dense_gv = [
|
dense_gv = [
|
||||||
sum([y for (x, y) in zip(gi, gv) if x == i]) for i in range(2)
|
sum(y for (x, y) in zip(gi, gv) if x == i) for i in range(2)
|
||||||
]
|
]
|
||||||
self.assertAllEqual(dense_gv, [0.0, 2.0])
|
self.assertAllEqual(dense_gv, [0.0, 2.0])
|
||||||
|
|
||||||
@ -2809,7 +2809,7 @@ class ControlFlowTest(test.TestCase):
|
|||||||
self.assertAllClose([156.0, 400.0], sess.run(r, feed_dict=feed_dict))
|
self.assertAllClose([156.0, 400.0], sess.run(r, feed_dict=feed_dict))
|
||||||
name = "gradients/while/stopped_grad"
|
name = "gradients/while/stopped_grad"
|
||||||
all_ops = x.graph.get_operations()
|
all_ops = x.graph.get_operations()
|
||||||
self.assertFalse(any([name in op.name for op in all_ops]))
|
self.assertFalse(any(name in op.name for op in all_ops))
|
||||||
|
|
||||||
@test_util.disable_control_flow_v2("b/117954949")
|
@test_util.disable_control_flow_v2("b/117954949")
|
||||||
def testWhileGradGradFail(self):
|
def testWhileGradGradFail(self):
|
||||||
|
@ -59,7 +59,7 @@ class BitwiseOpTest(test_util.TensorFlowTestCase):
|
|||||||
2**31 - 1, 2**31, 2**32 - 1, 2**32, -2**32 + 1, -2**32,
|
2**31 - 1, 2**31, 2**32 - 1, 2**32, -2**32 + 1, -2**32,
|
||||||
-2**63 + 1, 2**63 - 1]
|
-2**63 + 1, 2**63 - 1]
|
||||||
def count_bits(x):
|
def count_bits(x):
|
||||||
return sum([bin(z).count("1") for z in six.iterbytes(x.tobytes())])
|
return sum(bin(z).count("1") for z in six.iterbytes(x.tobytes()))
|
||||||
for dtype in dtype_list:
|
for dtype in dtype_list:
|
||||||
with self.cached_session(use_gpu=True) as sess:
|
with self.cached_session(use_gpu=True) as sess:
|
||||||
print("PopulationCount test: ", dtype)
|
print("PopulationCount test: ", dtype)
|
||||||
|
@ -158,7 +158,7 @@ def Assert(condition, data, summarize=None, name=None):
|
|||||||
|
|
||||||
with ops.name_scope(name, "Assert", [condition, data]) as name:
|
with ops.name_scope(name, "Assert", [condition, data]) as name:
|
||||||
xs = ops.convert_n_to_tensor(data)
|
xs = ops.convert_n_to_tensor(data)
|
||||||
if all([x.dtype in {dtypes.string, dtypes.int32} for x in xs]):
|
if all(x.dtype in {dtypes.string, dtypes.int32} for x in xs):
|
||||||
# As a simple heuristic, we assume that string and int32 are
|
# As a simple heuristic, we assume that string and int32 are
|
||||||
# on host to avoid the need to use cond. If it is not case,
|
# on host to avoid the need to use cond. If it is not case,
|
||||||
# we will pay the price copying the tensor to host memory.
|
# we will pay the price copying the tensor to host memory.
|
||||||
@ -457,19 +457,19 @@ def merge(inputs, name=None):
|
|||||||
ValueError: If any of the inputs is None, or inputs are IndexedSlices and
|
ValueError: If any of the inputs is None, or inputs are IndexedSlices and
|
||||||
some but not all have a dense_shape property.
|
some but not all have a dense_shape property.
|
||||||
"""
|
"""
|
||||||
if any([inp is None for inp in inputs]):
|
if any(inp is None for inp in inputs):
|
||||||
raise ValueError("At least one of the merge inputs is None: %s" % inputs)
|
raise ValueError("At least one of the merge inputs is None: %s" % inputs)
|
||||||
with ops.name_scope(name, "Merge", inputs) as name:
|
with ops.name_scope(name, "Merge", inputs) as name:
|
||||||
inputs = [
|
inputs = [
|
||||||
ops.internal_convert_to_tensor_or_indexed_slices(inp, as_ref=True)
|
ops.internal_convert_to_tensor_or_indexed_slices(inp, as_ref=True)
|
||||||
for inp in inputs
|
for inp in inputs
|
||||||
]
|
]
|
||||||
if all([isinstance(v, ops.Tensor) for v in inputs]):
|
if all(isinstance(v, ops.Tensor) for v in inputs):
|
||||||
if all([v.dtype._is_ref_dtype for v in inputs]): # pylint: disable=protected-access
|
if all(v.dtype._is_ref_dtype for v in inputs): # pylint: disable=protected-access
|
||||||
return gen_control_flow_ops.ref_merge(inputs, name)
|
return gen_control_flow_ops.ref_merge(inputs, name)
|
||||||
else:
|
else:
|
||||||
return gen_control_flow_ops.merge(inputs, name)
|
return gen_control_flow_ops.merge(inputs, name)
|
||||||
elif all([isinstance(v, sparse_tensor.SparseTensor) for v in inputs]):
|
elif all(isinstance(v, sparse_tensor.SparseTensor) for v in inputs):
|
||||||
# Only handle the case when all inputs are SparseTensor.
|
# Only handle the case when all inputs are SparseTensor.
|
||||||
values, _ = merge([inp.values for inp in inputs], name=name)
|
values, _ = merge([inp.values for inp in inputs], name=name)
|
||||||
indices, chosen_index = gen_control_flow_ops.merge(
|
indices, chosen_index = gen_control_flow_ops.merge(
|
||||||
@ -557,7 +557,7 @@ def _SetShapeInvariants(input_vars, enter_vars, shapes):
|
|||||||
if shapes is None:
|
if shapes is None:
|
||||||
return
|
return
|
||||||
flat_shapes = nest.flatten(shapes)
|
flat_shapes = nest.flatten(shapes)
|
||||||
if not all([isinstance(s, tensor_shape.TensorShape) for s in flat_shapes]):
|
if not all(isinstance(s, tensor_shape.TensorShape) for s in flat_shapes):
|
||||||
raise ValueError("`shapes` must be a (possibly nested) list of shapes.")
|
raise ValueError("`shapes` must be a (possibly nested) list of shapes.")
|
||||||
# Check that the shapes of the inputs are less than the shape invariants,
|
# Check that the shapes of the inputs are less than the shape invariants,
|
||||||
# and set the shapes of `enter_vars` to the shape invariants.
|
# and set the shapes of `enter_vars` to the shape invariants.
|
||||||
|
@ -79,7 +79,7 @@ def _as_shape_list(shapes,
|
|||||||
shapes = [shapes]
|
shapes = [shapes]
|
||||||
shapes = [tensor_shape.as_shape(shape) for shape in shapes]
|
shapes = [tensor_shape.as_shape(shape) for shape in shapes]
|
||||||
if not unknown_dim_allowed:
|
if not unknown_dim_allowed:
|
||||||
if any([not shape.is_fully_defined() for shape in shapes]):
|
if any(not shape.is_fully_defined() for shape in shapes):
|
||||||
raise ValueError("All shapes must be fully defined: %s" % shapes)
|
raise ValueError("All shapes must be fully defined: %s" % shapes)
|
||||||
if not unknown_rank_allowed:
|
if not unknown_rank_allowed:
|
||||||
if any([shape.dims is None for shape in shapes]):
|
if any([shape.dims is None for shape in shapes]):
|
||||||
@ -198,11 +198,11 @@ class QueueBase(object):
|
|||||||
raise TypeError("A list of queues expected")
|
raise TypeError("A list of queues expected")
|
||||||
|
|
||||||
dtypes = queues[0].dtypes
|
dtypes = queues[0].dtypes
|
||||||
if not all([dtypes == q.dtypes for q in queues[1:]]):
|
if not all(dtypes == q.dtypes for q in queues[1:]):
|
||||||
raise TypeError("Queues do not have matching component dtypes.")
|
raise TypeError("Queues do not have matching component dtypes.")
|
||||||
|
|
||||||
names = queues[0].names
|
names = queues[0].names
|
||||||
if not all([names == q.names for q in queues[1:]]):
|
if not all(names == q.names for q in queues[1:]):
|
||||||
raise TypeError("Queues do not have matching component names.")
|
raise TypeError("Queues do not have matching component names.")
|
||||||
|
|
||||||
queue_shapes = [q.shapes for q in queues]
|
queue_shapes = [q.shapes for q in queues]
|
||||||
|
@ -895,7 +895,7 @@ def _HasAnyNotNoneGrads(grads, op):
|
|||||||
if isinstance(out_grad, (ops.Tensor, ops.IndexedSlices)):
|
if isinstance(out_grad, (ops.Tensor, ops.IndexedSlices)):
|
||||||
return True
|
return True
|
||||||
if out_grad and isinstance(out_grad, collections.Sequence):
|
if out_grad and isinstance(out_grad, collections.Sequence):
|
||||||
if any([g is not None for g in out_grad]):
|
if any(g is not None for g in out_grad):
|
||||||
return True
|
return True
|
||||||
return False
|
return False
|
||||||
|
|
||||||
@ -1110,11 +1110,11 @@ def _AggregatedGrads(grads,
|
|||||||
assert control_flow_util.IsLoopSwitch(op)
|
assert control_flow_util.IsLoopSwitch(op)
|
||||||
continue
|
continue
|
||||||
# Grads have to be Tensors or IndexedSlices
|
# Grads have to be Tensors or IndexedSlices
|
||||||
if (isinstance(out_grad, collections.Sequence) and not all([
|
if (isinstance(out_grad, collections.Sequence) and not all(
|
||||||
isinstance(g, (ops.Tensor, ops.IndexedSlices))
|
isinstance(g, (ops.Tensor, ops.IndexedSlices))
|
||||||
for g in out_grad
|
for g in out_grad
|
||||||
if g is not None
|
if g is not None
|
||||||
])):
|
)):
|
||||||
raise TypeError("gradients have to be either all Tensors "
|
raise TypeError("gradients have to be either all Tensors "
|
||||||
"or all IndexedSlices")
|
"or all IndexedSlices")
|
||||||
# Aggregate multiple gradients, and convert [] to None.
|
# Aggregate multiple gradients, and convert [] to None.
|
||||||
@ -1122,7 +1122,7 @@ def _AggregatedGrads(grads,
|
|||||||
if len(out_grad) < 2:
|
if len(out_grad) < 2:
|
||||||
used = "nop"
|
used = "nop"
|
||||||
out_grads[i] = out_grad[0]
|
out_grads[i] = out_grad[0]
|
||||||
elif all([isinstance(g, ops.Tensor) for g in out_grad if g is not None]):
|
elif all(isinstance(g, ops.Tensor) for g in out_grad if g is not None):
|
||||||
tensor_shape = _AccumulatorShape(out_grad)
|
tensor_shape = _AccumulatorShape(out_grad)
|
||||||
if (aggregation_method == AggregationMethod.EXPERIMENTAL_ACCUMULATE_N
|
if (aggregation_method == AggregationMethod.EXPERIMENTAL_ACCUMULATE_N
|
||||||
and len(out_grad) > 2 and tensor_shape.is_fully_defined()):
|
and len(out_grad) > 2 and tensor_shape.is_fully_defined()):
|
||||||
|
@ -1152,9 +1152,8 @@ class PFor(object):
|
|||||||
continue
|
continue
|
||||||
|
|
||||||
converted_inputs = [self._conversion_map[inp] for inp in y_op.inputs]
|
converted_inputs = [self._conversion_map[inp] for inp in y_op.inputs]
|
||||||
some_input_converted = any(
|
some_input_converted = any(self._was_converted(x) for x in y_op.inputs)
|
||||||
[self._was_converted(x) for x in y_op.inputs])
|
some_input_stacked = any(x.is_stacked for x in converted_inputs)
|
||||||
some_input_stacked = any([x.is_stacked for x in converted_inputs])
|
|
||||||
|
|
||||||
converted_control_ops = set()
|
converted_control_ops = set()
|
||||||
some_control_input_converted = False
|
some_control_input_converted = False
|
||||||
@ -1198,7 +1197,7 @@ class PFor(object):
|
|||||||
# All inputs are unstacked or uncoverted but some control inputs are
|
# All inputs are unstacked or uncoverted but some control inputs are
|
||||||
# converted.
|
# converted.
|
||||||
# TODO(rachelim): Handle the case where some inputs are sparsely
|
# TODO(rachelim): Handle the case where some inputs are sparsely
|
||||||
# stacked (i.e. any([x.is_sparse_stacked for x in converted_inputs]))
|
# stacked (i.e. any(x.is_sparse_stacked for x in converted_inputs))
|
||||||
new_op = _create_op(y_op.type, [x.t for x in converted_inputs],
|
new_op = _create_op(y_op.type, [x.t for x in converted_inputs],
|
||||||
[x.dtype for x in y_op.outputs],
|
[x.dtype for x in y_op.outputs],
|
||||||
y_op.node_def.attr)
|
y_op.node_def.attr)
|
||||||
|
@ -117,7 +117,7 @@ def _infer_state_dtype(explicit_dtype, state):
|
|||||||
inferred_dtypes = [element.dtype for element in nest.flatten(state)]
|
inferred_dtypes = [element.dtype for element in nest.flatten(state)]
|
||||||
if not inferred_dtypes:
|
if not inferred_dtypes:
|
||||||
raise ValueError("Unable to infer dtype from empty state.")
|
raise ValueError("Unable to infer dtype from empty state.")
|
||||||
all_same = all([x == inferred_dtypes[0] for x in inferred_dtypes])
|
all_same = all(x == inferred_dtypes[0] for x in inferred_dtypes)
|
||||||
if not all_same:
|
if not all_same:
|
||||||
raise ValueError(
|
raise ValueError(
|
||||||
"State has tensors of different inferred_dtypes. Unable to infer a "
|
"State has tensors of different inferred_dtypes. Unable to infer a "
|
||||||
|
@ -1456,7 +1456,7 @@ class MultiRNNCell(RNNCell):
|
|||||||
if self._state_is_tuple:
|
if self._state_is_tuple:
|
||||||
return tuple(cell.state_size for cell in self._cells)
|
return tuple(cell.state_size for cell in self._cells)
|
||||||
else:
|
else:
|
||||||
return sum([cell.state_size for cell in self._cells])
|
return sum(cell.state_size for cell in self._cells)
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def output_size(self):
|
def output_size(self):
|
||||||
|
@ -680,7 +680,7 @@ class _VariableStore(object):
|
|||||||
"Partitioner returned a partition list that does not match the "
|
"Partitioner returned a partition list that does not match the "
|
||||||
"Variable's rank: %s vs. %s" % (partitions, shape))
|
"Variable's rank: %s vs. %s" % (partitions, shape))
|
||||||
|
|
||||||
if any([p < 1 for p in partitions]):
|
if any(p < 1 for p in partitions):
|
||||||
raise ValueError(
|
raise ValueError(
|
||||||
"Partitioner returned zero partitions for some axes: %s" %
|
"Partitioner returned zero partitions for some axes: %s" %
|
||||||
partitions)
|
partitions)
|
||||||
|
@ -2482,21 +2482,21 @@ class PartitionedVariable(object):
|
|||||||
"variable_list is not a list or tuple: %s" % variable_list)
|
"variable_list is not a list or tuple: %s" % variable_list)
|
||||||
if not isinstance(partitions, (list, tuple)):
|
if not isinstance(partitions, (list, tuple)):
|
||||||
raise TypeError("partitions is not a list or tuple: %s" % partitions)
|
raise TypeError("partitions is not a list or tuple: %s" % partitions)
|
||||||
if not all([p >= 1 for p in partitions]):
|
if not all(p >= 1 for p in partitions):
|
||||||
raise ValueError("partition values must be positive: %s" % partitions)
|
raise ValueError("partition values must be positive: %s" % partitions)
|
||||||
if not variable_list:
|
if not variable_list:
|
||||||
raise ValueError("variable_list may not be empty")
|
raise ValueError("variable_list may not be empty")
|
||||||
# pylint: disable=protected-access
|
# pylint: disable=protected-access
|
||||||
for v in variable_list:
|
for v in variable_list:
|
||||||
# Sort the variable_list lexicographically according to var offset value.
|
# Sort the variable_list lexicographically according to var offset value.
|
||||||
if not all([v._get_save_slice_info() is not None for v in variable_list]):
|
if not all(v._get_save_slice_info() is not None for v in variable_list):
|
||||||
raise ValueError(
|
raise ValueError(
|
||||||
"All variables must have a save_slice_info available: %s"
|
"All variables must have a save_slice_info available: %s"
|
||||||
% [v.name for v in variable_list])
|
% [v.name for v in variable_list])
|
||||||
if len(shape) != len(partitions):
|
if len(shape) != len(partitions):
|
||||||
raise ValueError("len(shape) != len(partitions): %s vs. %s"
|
raise ValueError("len(shape) != len(partitions): %s vs. %s"
|
||||||
% (shape, partitions))
|
% (shape, partitions))
|
||||||
if not all([v._get_save_slice_info().full_shape == shape]):
|
if v._get_save_slice_info().full_shape != shape:
|
||||||
raise ValueError(
|
raise ValueError(
|
||||||
"All variables' full shapes must match shape: %s; "
|
"All variables' full shapes must match shape: %s; "
|
||||||
"but full shapes were: %s"
|
"but full shapes were: %s"
|
||||||
@ -2523,7 +2523,7 @@ class PartitionedVariable(object):
|
|||||||
return len(self._variable_list)
|
return len(self._variable_list)
|
||||||
|
|
||||||
def _partition_axes(self):
|
def _partition_axes(self):
|
||||||
if all([p == 1 for p in self._partitions]):
|
if all(p == 1 for p in self._partitions):
|
||||||
return [0]
|
return [0]
|
||||||
else:
|
else:
|
||||||
return [i for i, p in enumerate(self._partitions) if p > 1]
|
return [i for i, p in enumerate(self._partitions) if p > 1]
|
||||||
|
@ -509,7 +509,7 @@ def _grad_fn(ys, xs, args, func_graph):
|
|||||||
|
|
||||||
# TODO(b/118712257): Handle the case when grad_outs has None's e.g. when there
|
# TODO(b/118712257): Handle the case when grad_outs has None's e.g. when there
|
||||||
# is a tf.StopGradient in the loop body.
|
# is a tf.StopGradient in the loop body.
|
||||||
assert all([g is not None for g in grad_outs])
|
assert all(g is not None for g in grad_outs)
|
||||||
counter = args[0]
|
counter = args[0]
|
||||||
total_iters = args[1]
|
total_iters = args[1]
|
||||||
return [counter + 1, total_iters] + grad_outs
|
return [counter + 1, total_iters] + grad_outs
|
||||||
|
@ -63,7 +63,7 @@ def print_tensors_in_checkpoint_file(file_name, tensor_name, all_tensors,
|
|||||||
print("It's likely that your checkpoint file has been compressed "
|
print("It's likely that your checkpoint file has been compressed "
|
||||||
"with SNAPPY.")
|
"with SNAPPY.")
|
||||||
if ("Data loss" in str(e) and
|
if ("Data loss" in str(e) and
|
||||||
(any([e in file_name for e in [".index", ".meta", ".data"]]))):
|
any(e in file_name for e in [".index", ".meta", ".data"])):
|
||||||
proposed_file = ".".join(file_name.split(".")[0:-1])
|
proposed_file = ".".join(file_name.split(".")[0:-1])
|
||||||
v2_file_error_template = """
|
v2_file_error_template = """
|
||||||
It's likely that this is a V2 checkpoint and you need to provide the filename
|
It's likely that this is a V2 checkpoint and you need to provide the filename
|
||||||
|
@ -230,7 +230,7 @@ def _evaluate_once(checkpoint_path,
|
|||||||
hooks = list(hooks or [])
|
hooks = list(hooks or [])
|
||||||
|
|
||||||
if eval_ops is not None:
|
if eval_ops is not None:
|
||||||
if any([isinstance(h, _MultiStepStopAfterNEvalsHook) for h in hooks]):
|
if any(isinstance(h, _MultiStepStopAfterNEvalsHook) for h in hooks):
|
||||||
steps_per_run_variable = \
|
steps_per_run_variable = \
|
||||||
basic_session_run_hooks.get_or_create_steps_per_run_variable()
|
basic_session_run_hooks.get_or_create_steps_per_run_variable()
|
||||||
update_eval_step = state_ops.assign_add(
|
update_eval_step = state_ops.assign_add(
|
||||||
|
@ -248,7 +248,7 @@ def _warm_start_var_with_vocab(var,
|
|||||||
prev_tensor_name = _infer_var_name(var)
|
prev_tensor_name = _infer_var_name(var)
|
||||||
|
|
||||||
# TODO(eddz): Fix functionality for rank-1 Variables (like FC biases).
|
# TODO(eddz): Fix functionality for rank-1 Variables (like FC biases).
|
||||||
total_v_first_axis = sum([v.get_shape().as_list()[0] for v in var])
|
total_v_first_axis = sum(v.get_shape().as_list()[0] for v in var)
|
||||||
for v in var:
|
for v in var:
|
||||||
v_shape = v.get_shape().as_list()
|
v_shape = v.get_shape().as_list()
|
||||||
slice_info = v._get_save_slice_info()
|
slice_info = v._get_save_slice_info()
|
||||||
@ -333,12 +333,12 @@ def _get_grouped_variables(vars_to_warm_start):
|
|||||||
ops.GraphKeys.TRAINABLE_VARIABLES,
|
ops.GraphKeys.TRAINABLE_VARIABLES,
|
||||||
scope=vars_to_warm_start)
|
scope=vars_to_warm_start)
|
||||||
elif isinstance(vars_to_warm_start, list):
|
elif isinstance(vars_to_warm_start, list):
|
||||||
if all([isinstance(v, str) for v in vars_to_warm_start]):
|
if all(isinstance(v, str) for v in vars_to_warm_start):
|
||||||
list_of_vars = []
|
list_of_vars = []
|
||||||
for v in vars_to_warm_start:
|
for v in vars_to_warm_start:
|
||||||
list_of_vars += ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES,
|
list_of_vars += ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES,
|
||||||
scope=v)
|
scope=v)
|
||||||
elif all([checkpoint_utils._is_variable(v) for v in vars_to_warm_start]): # pylint: disable=protected-access
|
elif all(checkpoint_utils._is_variable(v) for v in vars_to_warm_start): # pylint: disable=protected-access
|
||||||
list_of_vars = vars_to_warm_start
|
list_of_vars = vars_to_warm_start
|
||||||
else:
|
else:
|
||||||
raise ValueError("If `vars_to_warm_start` is a list, it must be all "
|
raise ValueError("If `vars_to_warm_start` is a list, it must be all "
|
||||||
|
@ -126,9 +126,9 @@ def _FilterNonCoreGoldenFiles(golden_file_list):
|
|||||||
filtered_file_list = []
|
filtered_file_list = []
|
||||||
filtered_package_prefixes = ['tensorflow.%s.' % p for p in _NON_CORE_PACKAGES]
|
filtered_package_prefixes = ['tensorflow.%s.' % p for p in _NON_CORE_PACKAGES]
|
||||||
for f in golden_file_list:
|
for f in golden_file_list:
|
||||||
if any([
|
if any(
|
||||||
f.rsplit('/')[-1].startswith(pre) for pre in filtered_package_prefixes
|
f.rsplit('/')[-1].startswith(pre) for pre in filtered_package_prefixes
|
||||||
]):
|
):
|
||||||
continue
|
continue
|
||||||
filtered_file_list.append(f)
|
filtered_file_list.append(f)
|
||||||
return filtered_file_list
|
return filtered_file_list
|
||||||
|
Loading…
Reference in New Issue
Block a user