From 75f12a50203ca31370c5edc02d650ee4a47fdf03 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Micka=C3=ABl=20Schoentgen?= Date: Thu, 3 Jan 2019 22:36:43 +0100 Subject: [PATCH] Fix several DeprecationWarning: invlid escape sequence MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Mickaƫl Schoentgen --- .../contrib/kernel_methods/python/losses.py | 2 +- .../python/learn/learn_io/generator_io_test.py | 4 ++-- .../opt/python/training/adam_gs_optimizer.py | 2 +- tensorflow/contrib/optimizer_v2/adam.py | 2 +- tensorflow/python/client/session_test.py | 2 +- .../python/feature_column/feature_column_test.py | 12 ++++++------ .../feature_column/feature_column_v2_test.py | 16 ++++++++-------- .../python/kernel_tests/confusion_matrix_test.py | 4 ++-- tensorflow/python/training/adam.py | 2 +- tensorflow/tools/ci_build/copy_binary.py | 2 +- .../scripts_allreduce/k8s_generate_yaml_lib.py | 2 +- .../windows/msvc_wrapper_for_nvcc.py | 2 +- .../windows/msvc_wrapper_for_nvcc.py | 2 +- .../gcc-nvcc/windows/msvc_wrapper_for_nvcc.py | 2 +- 14 files changed, 28 insertions(+), 28 deletions(-) diff --git a/tensorflow/contrib/kernel_methods/python/losses.py b/tensorflow/contrib/kernel_methods/python/losses.py index 4ef0a66a524..294a7d69a70 100644 --- a/tensorflow/contrib/kernel_methods/python/losses.py +++ b/tensorflow/contrib/kernel_methods/python/losses.py @@ -34,7 +34,7 @@ def sparse_multiclass_hinge_loss( scope=None, loss_collection=ops.GraphKeys.LOSSES, reduction=losses.Reduction.SUM_BY_NONZERO_WEIGHTS): - """Adds Ops for computing the multiclass hinge loss. + r"""Adds Ops for computing the multiclass hinge loss. The implementation is based on the following paper: On the Algorithmic Implementation of Multiclass Kernel-based Vector Machines diff --git a/tensorflow/contrib/learn/python/learn/learn_io/generator_io_test.py b/tensorflow/contrib/learn/python/learn/learn_io/generator_io_test.py index 5e90d1fa205..318046733bf 100644 --- a/tensorflow/contrib/learn/python/learn/learn_io/generator_io_test.py +++ b/tensorflow/contrib/learn/python/learn/learn_io/generator_io_test.py @@ -174,7 +174,7 @@ class GeneratorIoTest(test.TestCase): return np.arange(32, 36) with self.cached_session(): - with self.assertRaisesRegexp(TypeError, 'x\(\) must be generator'): + with self.assertRaisesRegexp(TypeError, r'x\(\) must be generator'): failing_input_fn = generator_io.generator_input_fn( generator, batch_size=2, shuffle=False, num_epochs=1) failing_input_fn() @@ -185,7 +185,7 @@ class GeneratorIoTest(test.TestCase): yield np.arange(32, 36) with self.cached_session(): - with self.assertRaisesRegexp(TypeError, 'x\(\) must yield dict'): + with self.assertRaisesRegexp(TypeError, r'x\(\) must yield dict'): failing_input_fn = generator_io.generator_input_fn( generator, batch_size=2, shuffle=False, num_epochs=1) failing_input_fn() diff --git a/tensorflow/contrib/opt/python/training/adam_gs_optimizer.py b/tensorflow/contrib/opt/python/training/adam_gs_optimizer.py index 3fb649ea82e..855fbf58bf4 100644 --- a/tensorflow/contrib/opt/python/training/adam_gs_optimizer.py +++ b/tensorflow/contrib/opt/python/training/adam_gs_optimizer.py @@ -41,7 +41,7 @@ class AdamGSOptimizer(optimizer.Optimizer): def __init__(self, global_step=0, learning_rate=0.001, beta1=0.9, beta2=0.999, epsilon=1e-8, use_locking=False, name="Adam"): - """Construct a new Adam optimizer. + r"""Construct a new Adam optimizer. Branched from tf.train.AdamOptimizer. The only difference is to pass global step for computing beta1 and beta2 accumulators, instead of having diff --git a/tensorflow/contrib/optimizer_v2/adam.py b/tensorflow/contrib/optimizer_v2/adam.py index 248ffb1f7eb..1b7800f324b 100644 --- a/tensorflow/contrib/optimizer_v2/adam.py +++ b/tensorflow/contrib/optimizer_v2/adam.py @@ -36,7 +36,7 @@ class AdamOptimizer(optimizer_v2.OptimizerV2): def __init__(self, learning_rate=0.001, beta1=0.9, beta2=0.999, epsilon=1e-8, use_locking=False, name="Adam"): - """Construct a new Adam optimizer. + r"""Construct a new Adam optimizer. Initialization: diff --git a/tensorflow/python/client/session_test.py b/tensorflow/python/client/session_test.py index c4a118a4140..da6218663de 100644 --- a/tensorflow/python/client/session_test.py +++ b/tensorflow/python/client/session_test.py @@ -2036,7 +2036,7 @@ class SessionTest(test_util.TensorFlowTestCase): with self.cached_session() as sess: a = array_ops.placeholder(dtype=dtypes.string) with self.assertRaisesRegexp( - TypeError, 'Type of feed value 1 with type <(\w+) \'int\'> is not'): + TypeError, r'Type of feed value 1 with type <(\w+) \'int\'> is not'): sess.run(a, feed_dict={a: 1}) diff --git a/tensorflow/python/feature_column/feature_column_test.py b/tensorflow/python/feature_column/feature_column_test.py index daa0a3b3a4b..0ded2bf8c9f 100644 --- a/tensorflow/python/feature_column/feature_column_test.py +++ b/tensorflow/python/feature_column/feature_column_test.py @@ -1832,7 +1832,7 @@ class LinearModelTest(test.TestCase): } with self.assertRaisesRegexp( ValueError, - 'Batch size \(first dimension\) of each feature must be same.'): # pylint: disable=anomalous-backslash-in-string + r'Batch size \(first dimension\) of each feature must be same.'): fc.linear_model(features, [price1, price2]) def test_subset_of_static_batch_size_mismatch(self): @@ -1847,7 +1847,7 @@ class LinearModelTest(test.TestCase): } with self.assertRaisesRegexp( ValueError, - 'Batch size \(first dimension\) of each feature must be same.'): # pylint: disable=anomalous-backslash-in-string + r'Batch size \(first dimension\) of each feature must be same.'): # pylint: disable=anomalous-backslash-in-string fc.linear_model(features, [price1, price2, price3]) def test_runtime_batch_size_mismatch(self): @@ -2467,7 +2467,7 @@ class _LinearModelTest(test.TestCase): } with self.assertRaisesRegexp( ValueError, - 'Batch size \(first dimension\) of each feature must be same.'): # pylint: disable=anomalous-backslash-in-string + r'Batch size \(first dimension\) of each feature must be same.'): # pylint: disable=anomalous-backslash-in-string get_keras_linear_model_predictions(features, [price1, price2]) def test_subset_of_static_batch_size_mismatch(self): @@ -2482,7 +2482,7 @@ class _LinearModelTest(test.TestCase): } with self.assertRaisesRegexp( ValueError, - 'Batch size \(first dimension\) of each feature must be same.'): # pylint: disable=anomalous-backslash-in-string + r'Batch size \(first dimension\) of each feature must be same.'): # pylint: disable=anomalous-backslash-in-string get_keras_linear_model_predictions(features, [price1, price2, price3]) def test_runtime_batch_size_mismatch(self): @@ -2974,7 +2974,7 @@ class FunctionalInputLayerTest(test.TestCase): } with self.assertRaisesRegexp( ValueError, - 'Batch size \(first dimension\) of each feature must be same.'): # pylint: disable=anomalous-backslash-in-string + r'Batch size \(first dimension\) of each feature must be same.'): # pylint: disable=anomalous-backslash-in-string fc.input_layer(features, [price1, price2]) def test_subset_of_static_batch_size_mismatch(self): @@ -2989,7 +2989,7 @@ class FunctionalInputLayerTest(test.TestCase): } with self.assertRaisesRegexp( ValueError, - 'Batch size \(first dimension\) of each feature must be same.'): # pylint: disable=anomalous-backslash-in-string + r'Batch size \(first dimension\) of each feature must be same.'): # pylint: disable=anomalous-backslash-in-string fc.input_layer(features, [price1, price2, price3]) def test_runtime_batch_size_mismatch(self): diff --git a/tensorflow/python/feature_column/feature_column_v2_test.py b/tensorflow/python/feature_column/feature_column_v2_test.py index a2474253697..2a864a0573a 100644 --- a/tensorflow/python/feature_column/feature_column_v2_test.py +++ b/tensorflow/python/feature_column/feature_column_v2_test.py @@ -2052,7 +2052,7 @@ class LinearModelTest(test.TestCase): } with self.assertRaisesRegexp( ValueError, - 'Batch size \(first dimension\) of each feature must be same.'): # pylint: disable=anomalous-backslash-in-string + r'Batch size \(first dimension\) of each feature must be same.'): # pylint: disable=anomalous-backslash-in-string model = fc.LinearModel([price1, price2]) model(features) @@ -2068,7 +2068,7 @@ class LinearModelTest(test.TestCase): } with self.assertRaisesRegexp( ValueError, - 'Batch size \(first dimension\) of each feature must be same.'): # pylint: disable=anomalous-backslash-in-string + r'Batch size \(first dimension\) of each feature must be same.'): # pylint: disable=anomalous-backslash-in-string model = fc.LinearModel([price1, price2, price3]) model(features) @@ -2818,7 +2818,7 @@ class OldLinearModelTest(test.TestCase): } with self.assertRaisesRegexp( ValueError, - 'Batch size \(first dimension\) of each feature must be same.'): # pylint: disable=anomalous-backslash-in-string + r'Batch size \(first dimension\) of each feature must be same.'): # pylint: disable=anomalous-backslash-in-string fc_old.linear_model(features, [price1, price2]) def test_subset_of_static_batch_size_mismatch(self): @@ -2833,7 +2833,7 @@ class OldLinearModelTest(test.TestCase): } with self.assertRaisesRegexp( ValueError, - 'Batch size \(first dimension\) of each feature must be same.'): # pylint: disable=anomalous-backslash-in-string + r'Batch size \(first dimension\) of each feature must be same.'): # pylint: disable=anomalous-backslash-in-string fc_old.linear_model(features, [price1, price2, price3]) def test_runtime_batch_size_mismatch(self): @@ -3435,7 +3435,7 @@ class DenseFeaturesTest(test.TestCase): } with self.assertRaisesRegexp( ValueError, - 'Batch size \(first dimension\) of each feature must be same.'): # pylint: disable=anomalous-backslash-in-string + r'Batch size \(first dimension\) of each feature must be same.'): # pylint: disable=anomalous-backslash-in-string fc.DenseFeatures([price1, price2])(features) def test_subset_of_static_batch_size_mismatch(self): @@ -3450,7 +3450,7 @@ class DenseFeaturesTest(test.TestCase): } with self.assertRaisesRegexp( ValueError, - 'Batch size \(first dimension\) of each feature must be same.'): # pylint: disable=anomalous-backslash-in-string + r'Batch size \(first dimension\) of each feature must be same.'): # pylint: disable=anomalous-backslash-in-string fc.DenseFeatures([price1, price2, price3])(features) def test_runtime_batch_size_mismatch(self): @@ -4141,7 +4141,7 @@ class FunctionalInputLayerTest(test.TestCase): } with self.assertRaisesRegexp( ValueError, - 'Batch size \(first dimension\) of each feature must be same.'): # pylint: disable=anomalous-backslash-in-string + r'Batch size \(first dimension\) of each feature must be same.'): # pylint: disable=anomalous-backslash-in-string fc_old.input_layer(features, [price1, price2]) def test_subset_of_static_batch_size_mismatch(self): @@ -4156,7 +4156,7 @@ class FunctionalInputLayerTest(test.TestCase): } with self.assertRaisesRegexp( ValueError, - 'Batch size \(first dimension\) of each feature must be same.'): # pylint: disable=anomalous-backslash-in-string + r'Batch size \(first dimension\) of each feature must be same.'): # pylint: disable=anomalous-backslash-in-string fc_old.input_layer(features, [price1, price2, price3]) def test_runtime_batch_size_mismatch(self): diff --git a/tensorflow/python/kernel_tests/confusion_matrix_test.py b/tensorflow/python/kernel_tests/confusion_matrix_test.py index ae13c8e32e5..6670f0326cd 100644 --- a/tensorflow/python/kernel_tests/confusion_matrix_test.py +++ b/tensorflow/python/kernel_tests/confusion_matrix_test.py @@ -472,7 +472,7 @@ class RemoveSqueezableDimensionsTest(test.TestCase): } with self.assertRaisesRegexp( errors_impl.InvalidArgumentError, - "Can not squeeze dim\[2\]"): + r"Can not squeeze dim\[2\]"): dynamic_labels.eval(feed_dict=feed_dict) self.assertAllEqual( prediction_values, dynamic_predictions.eval(feed_dict=feed_dict)) @@ -500,7 +500,7 @@ class RemoveSqueezableDimensionsTest(test.TestCase): label_values, dynamic_labels.eval(feed_dict=feed_dict)) with self.assertRaisesRegexp( errors_impl.InvalidArgumentError, - "Can not squeeze dim\[2\]"): + r"Can not squeeze dim\[2\]"): dynamic_predictions.eval(feed_dict=feed_dict) diff --git a/tensorflow/python/training/adam.py b/tensorflow/python/training/adam.py index 0c701f47122..c204a5c1ee5 100644 --- a/tensorflow/python/training/adam.py +++ b/tensorflow/python/training/adam.py @@ -39,7 +39,7 @@ class AdamOptimizer(optimizer.Optimizer): def __init__(self, learning_rate=0.001, beta1=0.9, beta2=0.999, epsilon=1e-8, use_locking=False, name="Adam"): - """Construct a new Adam optimizer. + r"""Construct a new Adam optimizer. Initialization: diff --git a/tensorflow/tools/ci_build/copy_binary.py b/tensorflow/tools/ci_build/copy_binary.py index 148526492d2..40a74437456 100755 --- a/tensorflow/tools/ci_build/copy_binary.py +++ b/tensorflow/tools/ci_build/copy_binary.py @@ -33,7 +33,7 @@ import tempfile import zipfile TF_NIGHTLY_REGEX = (r"(.+)tf_nightly(|_gpu)-(\d\.[\d]{1,2}" - "\.\d.dev[\d]{0,8})-(.+)\.whl") + r"\.\d.dev[\d]{0,8})-(.+)\.whl") BINARY_STRING_TEMPLATE = "%s-%s-%s.whl" diff --git a/tensorflow/tools/dist_test/scripts_allreduce/k8s_generate_yaml_lib.py b/tensorflow/tools/dist_test/scripts_allreduce/k8s_generate_yaml_lib.py index c570d1a9f83..038a712d538 100644 --- a/tensorflow/tools/dist_test/scripts_allreduce/k8s_generate_yaml_lib.py +++ b/tensorflow/tools/dist_test/scripts_allreduce/k8s_generate_yaml_lib.py @@ -195,7 +195,7 @@ def generate_RSA(bits=2048, exponent=65537): def get_change_ssh_port(use_hostnet, port): if use_hostnet == 1: - return "sed -i '/Port 22/c\Port {}' /etc/ssh/sshd_config".format(port) + return r"sed -i '/Port 22/c\Port {}' /etc/ssh/sshd_config".format(port) return '' diff --git a/third_party/toolchains/preconfig/ubuntu14.04/gcc-nvcc-cuda10.0/windows/msvc_wrapper_for_nvcc.py b/third_party/toolchains/preconfig/ubuntu14.04/gcc-nvcc-cuda10.0/windows/msvc_wrapper_for_nvcc.py index 00483951af9..2ea20b8b530 100755 --- a/third_party/toolchains/preconfig/ubuntu14.04/gcc-nvcc-cuda10.0/windows/msvc_wrapper_for_nvcc.py +++ b/third_party/toolchains/preconfig/ubuntu14.04/gcc-nvcc-cuda10.0/windows/msvc_wrapper_for_nvcc.py @@ -104,7 +104,7 @@ def InvokeNvcc(argv, log=False): """ src_files = [f for f in argv if - re.search('\.cpp$|\.cc$|\.c$|\.cxx$|\.C$', f)] + re.search(r'\.cpp$|\.cc$|\.c$|\.cxx$|\.C$', f)] if len(src_files) == 0: raise Error('No source files found for cuda compilation.') diff --git a/third_party/toolchains/preconfig/ubuntu14.04/gcc-nvcc-cuda9.0/windows/msvc_wrapper_for_nvcc.py b/third_party/toolchains/preconfig/ubuntu14.04/gcc-nvcc-cuda9.0/windows/msvc_wrapper_for_nvcc.py index 859b3196d5d..2d0898e9cb5 100755 --- a/third_party/toolchains/preconfig/ubuntu14.04/gcc-nvcc-cuda9.0/windows/msvc_wrapper_for_nvcc.py +++ b/third_party/toolchains/preconfig/ubuntu14.04/gcc-nvcc-cuda9.0/windows/msvc_wrapper_for_nvcc.py @@ -104,7 +104,7 @@ def InvokeNvcc(argv, log=False): """ src_files = [f for f in argv if - re.search('\.cpp$|\.cc$|\.c$|\.cxx$|\.C$', f)] + re.search(r'\.cpp$|\.cc$|\.c$|\.cxx$|\.C$', f)] if len(src_files) == 0: raise Error('No source files found for cuda compilation.') diff --git a/third_party/toolchains/preconfig/ubuntu14.04/gcc-nvcc/windows/msvc_wrapper_for_nvcc.py b/third_party/toolchains/preconfig/ubuntu14.04/gcc-nvcc/windows/msvc_wrapper_for_nvcc.py index 859b3196d5d..2d0898e9cb5 100755 --- a/third_party/toolchains/preconfig/ubuntu14.04/gcc-nvcc/windows/msvc_wrapper_for_nvcc.py +++ b/third_party/toolchains/preconfig/ubuntu14.04/gcc-nvcc/windows/msvc_wrapper_for_nvcc.py @@ -104,7 +104,7 @@ def InvokeNvcc(argv, log=False): """ src_files = [f for f in argv if - re.search('\.cpp$|\.cc$|\.c$|\.cxx$|\.C$', f)] + re.search(r'\.cpp$|\.cc$|\.c$|\.cxx$|\.C$', f)] if len(src_files) == 0: raise Error('No source files found for cuda compilation.')