Fix several DeprecationWarning: invlid escape sequence

Signed-off-by: Mickaël Schoentgen <contact@tiger-222.fr>
This commit is contained in:
Mickaël Schoentgen 2019-01-03 22:36:43 +01:00
parent 41c30afbfb
commit 75f12a5020
14 changed files with 28 additions and 28 deletions

View File

@ -34,7 +34,7 @@ def sparse_multiclass_hinge_loss(
scope=None, scope=None,
loss_collection=ops.GraphKeys.LOSSES, loss_collection=ops.GraphKeys.LOSSES,
reduction=losses.Reduction.SUM_BY_NONZERO_WEIGHTS): reduction=losses.Reduction.SUM_BY_NONZERO_WEIGHTS):
"""Adds Ops for computing the multiclass hinge loss. r"""Adds Ops for computing the multiclass hinge loss.
The implementation is based on the following paper: The implementation is based on the following paper:
On the Algorithmic Implementation of Multiclass Kernel-based Vector Machines On the Algorithmic Implementation of Multiclass Kernel-based Vector Machines

View File

@ -174,7 +174,7 @@ class GeneratorIoTest(test.TestCase):
return np.arange(32, 36) return np.arange(32, 36)
with self.cached_session(): with self.cached_session():
with self.assertRaisesRegexp(TypeError, 'x\(\) must be generator'): with self.assertRaisesRegexp(TypeError, r'x\(\) must be generator'):
failing_input_fn = generator_io.generator_input_fn( failing_input_fn = generator_io.generator_input_fn(
generator, batch_size=2, shuffle=False, num_epochs=1) generator, batch_size=2, shuffle=False, num_epochs=1)
failing_input_fn() failing_input_fn()
@ -185,7 +185,7 @@ class GeneratorIoTest(test.TestCase):
yield np.arange(32, 36) yield np.arange(32, 36)
with self.cached_session(): with self.cached_session():
with self.assertRaisesRegexp(TypeError, 'x\(\) must yield dict'): with self.assertRaisesRegexp(TypeError, r'x\(\) must yield dict'):
failing_input_fn = generator_io.generator_input_fn( failing_input_fn = generator_io.generator_input_fn(
generator, batch_size=2, shuffle=False, num_epochs=1) generator, batch_size=2, shuffle=False, num_epochs=1)
failing_input_fn() failing_input_fn()

View File

@ -41,7 +41,7 @@ class AdamGSOptimizer(optimizer.Optimizer):
def __init__(self, global_step=0, learning_rate=0.001, def __init__(self, global_step=0, learning_rate=0.001,
beta1=0.9, beta2=0.999, epsilon=1e-8, beta1=0.9, beta2=0.999, epsilon=1e-8,
use_locking=False, name="Adam"): use_locking=False, name="Adam"):
"""Construct a new Adam optimizer. r"""Construct a new Adam optimizer.
Branched from tf.train.AdamOptimizer. The only difference is to pass Branched from tf.train.AdamOptimizer. The only difference is to pass
global step for computing beta1 and beta2 accumulators, instead of having global step for computing beta1 and beta2 accumulators, instead of having

View File

@ -36,7 +36,7 @@ class AdamOptimizer(optimizer_v2.OptimizerV2):
def __init__(self, learning_rate=0.001, beta1=0.9, beta2=0.999, epsilon=1e-8, def __init__(self, learning_rate=0.001, beta1=0.9, beta2=0.999, epsilon=1e-8,
use_locking=False, name="Adam"): use_locking=False, name="Adam"):
"""Construct a new Adam optimizer. r"""Construct a new Adam optimizer.
Initialization: Initialization:

View File

@ -2036,7 +2036,7 @@ class SessionTest(test_util.TensorFlowTestCase):
with self.cached_session() as sess: with self.cached_session() as sess:
a = array_ops.placeholder(dtype=dtypes.string) a = array_ops.placeholder(dtype=dtypes.string)
with self.assertRaisesRegexp( with self.assertRaisesRegexp(
TypeError, 'Type of feed value 1 with type <(\w+) \'int\'> is not'): TypeError, r'Type of feed value 1 with type <(\w+) \'int\'> is not'):
sess.run(a, feed_dict={a: 1}) sess.run(a, feed_dict={a: 1})

View File

@ -1832,7 +1832,7 @@ class LinearModelTest(test.TestCase):
} }
with self.assertRaisesRegexp( with self.assertRaisesRegexp(
ValueError, ValueError,
'Batch size \(first dimension\) of each feature must be same.'): # pylint: disable=anomalous-backslash-in-string r'Batch size \(first dimension\) of each feature must be same.'):
fc.linear_model(features, [price1, price2]) fc.linear_model(features, [price1, price2])
def test_subset_of_static_batch_size_mismatch(self): def test_subset_of_static_batch_size_mismatch(self):
@ -1847,7 +1847,7 @@ class LinearModelTest(test.TestCase):
} }
with self.assertRaisesRegexp( with self.assertRaisesRegexp(
ValueError, ValueError,
'Batch size \(first dimension\) of each feature must be same.'): # pylint: disable=anomalous-backslash-in-string r'Batch size \(first dimension\) of each feature must be same.'): # pylint: disable=anomalous-backslash-in-string
fc.linear_model(features, [price1, price2, price3]) fc.linear_model(features, [price1, price2, price3])
def test_runtime_batch_size_mismatch(self): def test_runtime_batch_size_mismatch(self):
@ -2467,7 +2467,7 @@ class _LinearModelTest(test.TestCase):
} }
with self.assertRaisesRegexp( with self.assertRaisesRegexp(
ValueError, ValueError,
'Batch size \(first dimension\) of each feature must be same.'): # pylint: disable=anomalous-backslash-in-string r'Batch size \(first dimension\) of each feature must be same.'): # pylint: disable=anomalous-backslash-in-string
get_keras_linear_model_predictions(features, [price1, price2]) get_keras_linear_model_predictions(features, [price1, price2])
def test_subset_of_static_batch_size_mismatch(self): def test_subset_of_static_batch_size_mismatch(self):
@ -2482,7 +2482,7 @@ class _LinearModelTest(test.TestCase):
} }
with self.assertRaisesRegexp( with self.assertRaisesRegexp(
ValueError, ValueError,
'Batch size \(first dimension\) of each feature must be same.'): # pylint: disable=anomalous-backslash-in-string r'Batch size \(first dimension\) of each feature must be same.'): # pylint: disable=anomalous-backslash-in-string
get_keras_linear_model_predictions(features, [price1, price2, price3]) get_keras_linear_model_predictions(features, [price1, price2, price3])
def test_runtime_batch_size_mismatch(self): def test_runtime_batch_size_mismatch(self):
@ -2974,7 +2974,7 @@ class FunctionalInputLayerTest(test.TestCase):
} }
with self.assertRaisesRegexp( with self.assertRaisesRegexp(
ValueError, ValueError,
'Batch size \(first dimension\) of each feature must be same.'): # pylint: disable=anomalous-backslash-in-string r'Batch size \(first dimension\) of each feature must be same.'): # pylint: disable=anomalous-backslash-in-string
fc.input_layer(features, [price1, price2]) fc.input_layer(features, [price1, price2])
def test_subset_of_static_batch_size_mismatch(self): def test_subset_of_static_batch_size_mismatch(self):
@ -2989,7 +2989,7 @@ class FunctionalInputLayerTest(test.TestCase):
} }
with self.assertRaisesRegexp( with self.assertRaisesRegexp(
ValueError, ValueError,
'Batch size \(first dimension\) of each feature must be same.'): # pylint: disable=anomalous-backslash-in-string r'Batch size \(first dimension\) of each feature must be same.'): # pylint: disable=anomalous-backslash-in-string
fc.input_layer(features, [price1, price2, price3]) fc.input_layer(features, [price1, price2, price3])
def test_runtime_batch_size_mismatch(self): def test_runtime_batch_size_mismatch(self):

View File

@ -2052,7 +2052,7 @@ class LinearModelTest(test.TestCase):
} }
with self.assertRaisesRegexp( with self.assertRaisesRegexp(
ValueError, ValueError,
'Batch size \(first dimension\) of each feature must be same.'): # pylint: disable=anomalous-backslash-in-string r'Batch size \(first dimension\) of each feature must be same.'): # pylint: disable=anomalous-backslash-in-string
model = fc.LinearModel([price1, price2]) model = fc.LinearModel([price1, price2])
model(features) model(features)
@ -2068,7 +2068,7 @@ class LinearModelTest(test.TestCase):
} }
with self.assertRaisesRegexp( with self.assertRaisesRegexp(
ValueError, ValueError,
'Batch size \(first dimension\) of each feature must be same.'): # pylint: disable=anomalous-backslash-in-string r'Batch size \(first dimension\) of each feature must be same.'): # pylint: disable=anomalous-backslash-in-string
model = fc.LinearModel([price1, price2, price3]) model = fc.LinearModel([price1, price2, price3])
model(features) model(features)
@ -2818,7 +2818,7 @@ class OldLinearModelTest(test.TestCase):
} }
with self.assertRaisesRegexp( with self.assertRaisesRegexp(
ValueError, ValueError,
'Batch size \(first dimension\) of each feature must be same.'): # pylint: disable=anomalous-backslash-in-string r'Batch size \(first dimension\) of each feature must be same.'): # pylint: disable=anomalous-backslash-in-string
fc_old.linear_model(features, [price1, price2]) fc_old.linear_model(features, [price1, price2])
def test_subset_of_static_batch_size_mismatch(self): def test_subset_of_static_batch_size_mismatch(self):
@ -2833,7 +2833,7 @@ class OldLinearModelTest(test.TestCase):
} }
with self.assertRaisesRegexp( with self.assertRaisesRegexp(
ValueError, ValueError,
'Batch size \(first dimension\) of each feature must be same.'): # pylint: disable=anomalous-backslash-in-string r'Batch size \(first dimension\) of each feature must be same.'): # pylint: disable=anomalous-backslash-in-string
fc_old.linear_model(features, [price1, price2, price3]) fc_old.linear_model(features, [price1, price2, price3])
def test_runtime_batch_size_mismatch(self): def test_runtime_batch_size_mismatch(self):
@ -3435,7 +3435,7 @@ class DenseFeaturesTest(test.TestCase):
} }
with self.assertRaisesRegexp( with self.assertRaisesRegexp(
ValueError, ValueError,
'Batch size \(first dimension\) of each feature must be same.'): # pylint: disable=anomalous-backslash-in-string r'Batch size \(first dimension\) of each feature must be same.'): # pylint: disable=anomalous-backslash-in-string
fc.DenseFeatures([price1, price2])(features) fc.DenseFeatures([price1, price2])(features)
def test_subset_of_static_batch_size_mismatch(self): def test_subset_of_static_batch_size_mismatch(self):
@ -3450,7 +3450,7 @@ class DenseFeaturesTest(test.TestCase):
} }
with self.assertRaisesRegexp( with self.assertRaisesRegexp(
ValueError, ValueError,
'Batch size \(first dimension\) of each feature must be same.'): # pylint: disable=anomalous-backslash-in-string r'Batch size \(first dimension\) of each feature must be same.'): # pylint: disable=anomalous-backslash-in-string
fc.DenseFeatures([price1, price2, price3])(features) fc.DenseFeatures([price1, price2, price3])(features)
def test_runtime_batch_size_mismatch(self): def test_runtime_batch_size_mismatch(self):
@ -4141,7 +4141,7 @@ class FunctionalInputLayerTest(test.TestCase):
} }
with self.assertRaisesRegexp( with self.assertRaisesRegexp(
ValueError, ValueError,
'Batch size \(first dimension\) of each feature must be same.'): # pylint: disable=anomalous-backslash-in-string r'Batch size \(first dimension\) of each feature must be same.'): # pylint: disable=anomalous-backslash-in-string
fc_old.input_layer(features, [price1, price2]) fc_old.input_layer(features, [price1, price2])
def test_subset_of_static_batch_size_mismatch(self): def test_subset_of_static_batch_size_mismatch(self):
@ -4156,7 +4156,7 @@ class FunctionalInputLayerTest(test.TestCase):
} }
with self.assertRaisesRegexp( with self.assertRaisesRegexp(
ValueError, ValueError,
'Batch size \(first dimension\) of each feature must be same.'): # pylint: disable=anomalous-backslash-in-string r'Batch size \(first dimension\) of each feature must be same.'): # pylint: disable=anomalous-backslash-in-string
fc_old.input_layer(features, [price1, price2, price3]) fc_old.input_layer(features, [price1, price2, price3])
def test_runtime_batch_size_mismatch(self): def test_runtime_batch_size_mismatch(self):

View File

@ -472,7 +472,7 @@ class RemoveSqueezableDimensionsTest(test.TestCase):
} }
with self.assertRaisesRegexp( with self.assertRaisesRegexp(
errors_impl.InvalidArgumentError, errors_impl.InvalidArgumentError,
"Can not squeeze dim\[2\]"): r"Can not squeeze dim\[2\]"):
dynamic_labels.eval(feed_dict=feed_dict) dynamic_labels.eval(feed_dict=feed_dict)
self.assertAllEqual( self.assertAllEqual(
prediction_values, dynamic_predictions.eval(feed_dict=feed_dict)) prediction_values, dynamic_predictions.eval(feed_dict=feed_dict))
@ -500,7 +500,7 @@ class RemoveSqueezableDimensionsTest(test.TestCase):
label_values, dynamic_labels.eval(feed_dict=feed_dict)) label_values, dynamic_labels.eval(feed_dict=feed_dict))
with self.assertRaisesRegexp( with self.assertRaisesRegexp(
errors_impl.InvalidArgumentError, errors_impl.InvalidArgumentError,
"Can not squeeze dim\[2\]"): r"Can not squeeze dim\[2\]"):
dynamic_predictions.eval(feed_dict=feed_dict) dynamic_predictions.eval(feed_dict=feed_dict)

View File

@ -39,7 +39,7 @@ class AdamOptimizer(optimizer.Optimizer):
def __init__(self, learning_rate=0.001, beta1=0.9, beta2=0.999, epsilon=1e-8, def __init__(self, learning_rate=0.001, beta1=0.9, beta2=0.999, epsilon=1e-8,
use_locking=False, name="Adam"): use_locking=False, name="Adam"):
"""Construct a new Adam optimizer. r"""Construct a new Adam optimizer.
Initialization: Initialization:

View File

@ -33,7 +33,7 @@ import tempfile
import zipfile import zipfile
TF_NIGHTLY_REGEX = (r"(.+)tf_nightly(|_gpu)-(\d\.[\d]{1,2}" TF_NIGHTLY_REGEX = (r"(.+)tf_nightly(|_gpu)-(\d\.[\d]{1,2}"
"\.\d.dev[\d]{0,8})-(.+)\.whl") r"\.\d.dev[\d]{0,8})-(.+)\.whl")
BINARY_STRING_TEMPLATE = "%s-%s-%s.whl" BINARY_STRING_TEMPLATE = "%s-%s-%s.whl"

View File

@ -195,7 +195,7 @@ def generate_RSA(bits=2048, exponent=65537):
def get_change_ssh_port(use_hostnet, port): def get_change_ssh_port(use_hostnet, port):
if use_hostnet == 1: if use_hostnet == 1:
return "sed -i '/Port 22/c\Port {}' /etc/ssh/sshd_config".format(port) return r"sed -i '/Port 22/c\Port {}' /etc/ssh/sshd_config".format(port)
return '' return ''

View File

@ -104,7 +104,7 @@ def InvokeNvcc(argv, log=False):
""" """
src_files = [f for f in argv if src_files = [f for f in argv if
re.search('\.cpp$|\.cc$|\.c$|\.cxx$|\.C$', f)] re.search(r'\.cpp$|\.cc$|\.c$|\.cxx$|\.C$', f)]
if len(src_files) == 0: if len(src_files) == 0:
raise Error('No source files found for cuda compilation.') raise Error('No source files found for cuda compilation.')

View File

@ -104,7 +104,7 @@ def InvokeNvcc(argv, log=False):
""" """
src_files = [f for f in argv if src_files = [f for f in argv if
re.search('\.cpp$|\.cc$|\.c$|\.cxx$|\.C$', f)] re.search(r'\.cpp$|\.cc$|\.c$|\.cxx$|\.C$', f)]
if len(src_files) == 0: if len(src_files) == 0:
raise Error('No source files found for cuda compilation.') raise Error('No source files found for cuda compilation.')

View File

@ -104,7 +104,7 @@ def InvokeNvcc(argv, log=False):
""" """
src_files = [f for f in argv if src_files = [f for f in argv if
re.search('\.cpp$|\.cc$|\.c$|\.cxx$|\.C$', f)] re.search(r'\.cpp$|\.cc$|\.c$|\.cxx$|\.C$', f)]
if len(src_files) == 0: if len(src_files) == 0:
raise Error('No source files found for cuda compilation.') raise Error('No source files found for cuda compilation.')