From bc99898e990a1f714732ec40a0a924aa0ee2fee5 Mon Sep 17 00:00:00 2001 From: Lukas Geiger Date: Tue, 26 May 2020 22:15:07 +0100 Subject: [PATCH] Prefer generator expressions over list comprehensions --- tensorflow/lite/python/lite.py | 2 +- tensorflow/python/eager/function.py | 2 +- tensorflow/python/framework/subscribe.py | 3 +-- tensorflow/python/framework/test_util.py | 2 +- tensorflow/python/keras/callbacks.py | 2 +- .../python/keras/layers/preprocessing/category_crossing.py | 4 ++-- tensorflow/python/keras/layers/preprocessing/hashing.py | 2 +- tensorflow/python/keras/saving/hdf5_format.py | 2 +- tensorflow/tools/dockerfiles/assembler.py | 2 +- third_party/gpus/check_cuda_libs.py | 2 +- 10 files changed, 11 insertions(+), 12 deletions(-) diff --git a/tensorflow/lite/python/lite.py b/tensorflow/lite/python/lite.py index ce59c56a1d0..53814bb0c43 100644 --- a/tensorflow/lite/python/lite.py +++ b/tensorflow/lite/python/lite.py @@ -279,7 +279,7 @@ class QuantizationMode(object): }) for node_def in self._graph_def.node: - if any([op in node_def.name for op in training_quant_ops]): + if any(op in node_def.name for op in training_quant_ops): return True return False diff --git a/tensorflow/python/eager/function.py b/tensorflow/python/eager/function.py index ce495d772d0..37c802b9aa6 100644 --- a/tensorflow/python/eager/function.py +++ b/tensorflow/python/eager/function.py @@ -726,7 +726,7 @@ class _DelayedRewriteGradientFunctions(object): # pylint: enable=protected-access capture_mapping = dict( - zip([ops.tensor_id(t) for t in self._func_graph.outputs], op.outputs)) + zip((ops.tensor_id(t) for t in self._func_graph.outputs), op.outputs)) remapped_captures = [ capture_mapping.get(ops.tensor_id(capture), capture) for capture in backwards_function.captured_inputs diff --git a/tensorflow/python/framework/subscribe.py b/tensorflow/python/framework/subscribe.py index 8c3f91f62d8..c7cf8ce6070 100644 --- a/tensorflow/python/framework/subscribe.py +++ b/tensorflow/python/framework/subscribe.py @@ -58,8 +58,7 @@ def _recursive_apply(tensors, apply_fn): return tuple(tensors) return tensors_type(*tensors) # collections.namedtuple elif tensors_type is dict: - return dict([(k, _recursive_apply(v, apply_fn)) for k, v in tensors.items() - ]) + return dict((k, _recursive_apply(v, apply_fn)) for k, v in tensors.items()) else: raise TypeError('_recursive_apply argument %r has invalid type %r' % (tensors, tensors_type)) diff --git a/tensorflow/python/framework/test_util.py b/tensorflow/python/framework/test_util.py index 4981e1b68fd..36b73c8ebc6 100644 --- a/tensorflow/python/framework/test_util.py +++ b/tensorflow/python/framework/test_util.py @@ -486,7 +486,7 @@ def skip_if_error(test_obj, error_type, messages=None): try: yield except error_type as e: - if not messages or any([message in str(e) for message in messages]): + if not messages or any(message in str(e) for message in messages): test_obj.skipTest("Skipping error: {}".format(str(e))) else: raise diff --git a/tensorflow/python/keras/callbacks.py b/tensorflow/python/keras/callbacks.py index db326ea32f0..58c24afbb61 100644 --- a/tensorflow/python/keras/callbacks.py +++ b/tensorflow/python/keras/callbacks.py @@ -2341,7 +2341,7 @@ class CSVLogger(Callback): if self.model.stop_training: # We set NA so that csv parsers do not fail for this last epoch. - logs = dict([(k, logs[k]) if k in logs else (k, 'NA') for k in self.keys]) + logs = dict((k, logs[k]) if k in logs else (k, 'NA') for k in self.keys) if not self.writer: diff --git a/tensorflow/python/keras/layers/preprocessing/category_crossing.py b/tensorflow/python/keras/layers/preprocessing/category_crossing.py index 79c27d9ec36..84e5332bea5 100644 --- a/tensorflow/python/keras/layers/preprocessing/category_crossing.py +++ b/tensorflow/python/keras/layers/preprocessing/category_crossing.py @@ -140,9 +140,9 @@ class CategoryCrossing(Layer): def call(self, inputs): depth_tuple = self._depth_tuple if self.depth else (len(inputs),) ragged_out = sparse_out = False - if any([ragged_tensor.is_ragged(inp) for inp in inputs]): + if any(ragged_tensor.is_ragged(inp) for inp in inputs): ragged_out = True - elif any([isinstance(inp, sparse_tensor.SparseTensor) for inp in inputs]): + elif any(isinstance(inp, sparse_tensor.SparseTensor) for inp in inputs): sparse_out = True outputs = [] diff --git a/tensorflow/python/keras/layers/preprocessing/hashing.py b/tensorflow/python/keras/layers/preprocessing/hashing.py index 05b4445829a..f4a4ae0ccc8 100644 --- a/tensorflow/python/keras/layers/preprocessing/hashing.py +++ b/tensorflow/python/keras/layers/preprocessing/hashing.py @@ -168,7 +168,7 @@ class Hashing(Layer): def _process_input_list(self, inputs): # TODO(momernick): support ragged_cross_hashed with corrected fingerprint # and siphash. - if any([isinstance(inp, ragged_tensor.RaggedTensor) for inp in inputs]): + if any(isinstance(inp, ragged_tensor.RaggedTensor) for inp in inputs): raise ValueError('Hashing with ragged input is not supported yet.') sparse_inputs = [ inp for inp in inputs if isinstance(inp, sparse_tensor.SparseTensor) diff --git a/tensorflow/python/keras/saving/hdf5_format.py b/tensorflow/python/keras/saving/hdf5_format.py index f3adb2d0695..800d609fe99 100644 --- a/tensorflow/python/keras/saving/hdf5_format.py +++ b/tensorflow/python/keras/saving/hdf5_format.py @@ -876,7 +876,7 @@ def _legacy_weights(layer): non_trainable_weights. """ weights = layer.trainable_weights + layer.non_trainable_weights - if any([not isinstance(w, variables_module.Variable) for w in weights]): + if any(not isinstance(w, variables_module.Variable) for w in weights): raise NotImplementedError( 'Save or restore weights that is not an instance of `tf.Variable` is ' 'not supported in h5, use `save_format=\'tf\'` instead. Got a model ' diff --git a/tensorflow/tools/dockerfiles/assembler.py b/tensorflow/tools/dockerfiles/assembler.py index 7b3dcbd33c0..d2135f38ab4 100644 --- a/tensorflow/tools/dockerfiles/assembler.py +++ b/tensorflow/tools/dockerfiles/assembler.py @@ -558,7 +558,7 @@ def main(argv): # Only build images for host architecture proc_arch = platform.processor() is_x86 = proc_arch.startswith('x86') - if (is_x86 and any([arch in tag for arch in ['ppc64le']]) or + if (is_x86 and any(arch in tag for arch in ['ppc64le']) or not is_x86 and proc_arch not in tag): continue diff --git a/third_party/gpus/check_cuda_libs.py b/third_party/gpus/check_cuda_libs.py index 479380da975..686d36f5c77 100644 --- a/third_party/gpus/check_cuda_libs.py +++ b/third_party/gpus/check_cuda_libs.py @@ -62,7 +62,7 @@ def check_cuda_lib(path, check_soname=True): output = subprocess.check_output([objdump, "-p", path]).decode("utf-8") output = [line for line in output.splitlines() if "SONAME" in line] sonames = [line.strip().split(" ")[-1] for line in output] - if not any([soname == os.path.basename(path) for soname in sonames]): + if not any(soname == os.path.basename(path) for soname in sonames): raise ConfigError("None of the libraries match their SONAME: " + path)