diff --git a/tensorflow/python/framework/function_test.py b/tensorflow/python/framework/function_test.py index 2b3e7531162..085eb9f10ed 100644 --- a/tensorflow/python/framework/function_test.py +++ b/tensorflow/python/framework/function_test.py @@ -462,7 +462,7 @@ class FunctionTest(test.TestCase): @test_util.run_deprecated_v1 def testWhileLoopCallsFunc(self): - with self.session(use_gpu=True) as sess: + with self.session() as sess: @function.Defun(dtypes.float32) def Times2(x): diff --git a/tensorflow/python/framework/test_util.py b/tensorflow/python/framework/test_util.py index 893351bdaf0..e56064bf362 100644 --- a/tensorflow/python/framework/test_util.py +++ b/tensorflow/python/framework/test_util.py @@ -2289,7 +2289,7 @@ class TensorFlowTestCase(googletest.TestCase): ``` python class MyOperatorTest(test_util.TensorFlowTestCase): def testMyOperator(self): - with self.session(use_gpu=True): + with self.session(): valid_input = [1.0, 2.0, 3.0, 4.0, 5.0] result = MyOperator(valid_input).eval() self.assertEqual(result, [1.0, 2.0, 3.0, 5.0, 8.0] @@ -2339,7 +2339,7 @@ class TensorFlowTestCase(googletest.TestCase): ```python class MyOperatorTest(test_util.TensorFlowTestCase): def testMyOperator(self): - with self.cached_session(use_gpu=True) as sess: + with self.cached_session() as sess: valid_input = [1.0, 2.0, 3.0, 4.0, 5.0] result = MyOperator(valid_input).eval() self.assertEqual(result, [1.0, 2.0, 3.0, 5.0, 8.0] diff --git a/tensorflow/python/keras/keras_parameterized.py b/tensorflow/python/keras/keras_parameterized.py index bc153dcff96..a5392e379f5 100644 --- a/tensorflow/python/keras/keras_parameterized.py +++ b/tensorflow/python/keras/keras_parameterized.py @@ -420,7 +420,7 @@ def run_all_keras_modes(test_or_class=None, def _v1_session_test(f, test_or_class, config, *args, **kwargs): with ops.get_default_graph().as_default(): with testing_utils.run_eagerly_scope(False): - with test_or_class.test_session(use_gpu=True, config=config): + with test_or_class.test_session(config=config): f(test_or_class, *args, **kwargs) diff --git a/tensorflow/python/keras/layers/convolutional_test.py b/tensorflow/python/keras/layers/convolutional_test.py index 3c099639e08..0496f51841a 100644 --- a/tensorflow/python/keras/layers/convolutional_test.py +++ b/tensorflow/python/keras/layers/convolutional_test.py @@ -42,7 +42,7 @@ class Conv1DTest(keras_parameterized.TestCase): stack_size = 3 length = 7 - with self.cached_session(use_gpu=True): + with self.cached_session(): testing_utils.layer_test( keras.layers.Conv1D, kwargs=kwargs, @@ -54,7 +54,7 @@ class Conv1DTest(keras_parameterized.TestCase): stack_size = 3 length = 7 - with self.cached_session(use_gpu=True): + with self.cached_session(): if expected_output_shape is not None: expected_output_shape = (None,) + expected_output_shape @@ -112,7 +112,7 @@ class Conv1DTest(keras_parameterized.TestCase): 'activity_regularizer': 'l2', 'strides': 1 } - with self.cached_session(use_gpu=True): + with self.cached_session(): layer = keras.layers.Conv1D(**kwargs) layer.build((None, 5, 2)) self.assertEqual(len(layer.losses), 2) @@ -131,14 +131,14 @@ class Conv1DTest(keras_parameterized.TestCase): 'bias_constraint': b_constraint, 'strides': 1 } - with self.cached_session(use_gpu=True): + with self.cached_session(): layer = keras.layers.Conv1D(**kwargs) layer.build((None, 5, 2)) self.assertEqual(layer.kernel.constraint, k_constraint) self.assertEqual(layer.bias.constraint, b_constraint) def test_conv1d_recreate_conv(self): - with self.cached_session(use_gpu=True): + with self.cached_session(): layer = keras.layers.Conv1D(filters=1, kernel_size=3, strides=1, @@ -151,7 +151,7 @@ class Conv1DTest(keras_parameterized.TestCase): self.assertEqual(outp1_shape, layer(inpt1).shape) def test_conv1d_recreate_conv_unknown_dims(self): - with self.cached_session(use_gpu=True): + with self.cached_session(): layer = keras.layers.Conv1D(filters=1, kernel_size=3, strides=1, @@ -184,7 +184,7 @@ class Conv2DTest(keras_parameterized.TestCase): input_data_shape = (num_samples, num_row or 7, num_col or 6, stack_size) input_data = 10 * np.random.random(input_data_shape).astype(np.float32) - with self.cached_session(use_gpu=True): + with self.cached_session(): testing_utils.layer_test( keras.layers.Conv2D, kwargs=kwargs, @@ -205,7 +205,7 @@ class Conv2DTest(keras_parameterized.TestCase): input_data_shape = batch_shape + (num_row or 7, num_col or 6, stack_size) input_data = 10 * np.random.random(input_data_shape).astype(np.float32) - with self.cached_session(use_gpu=True): + with self.cached_session(): if expected_output_shape is not None: expected_output_shape = (None,) + expected_output_shape testing_utils.layer_test( @@ -272,7 +272,7 @@ class Conv2DTest(keras_parameterized.TestCase): 'activity_regularizer': 'l2', 'strides': 1 } - with self.cached_session(use_gpu=True): + with self.cached_session(): layer = keras.layers.Conv2D(**kwargs) layer.build((None, 5, 5, 2)) self.assertEqual(len(layer.losses), 2) @@ -291,7 +291,7 @@ class Conv2DTest(keras_parameterized.TestCase): 'bias_constraint': b_constraint, 'strides': 1 } - with self.cached_session(use_gpu=True): + with self.cached_session(): layer = keras.layers.Conv2D(**kwargs) layer.build((None, 5, 5, 2)) self.assertEqual(layer.kernel.constraint, k_constraint) @@ -313,7 +313,7 @@ class Conv3DTest(keras_parameterized.TestCase): num_col = 6 depth = 5 - with self.cached_session(use_gpu=True): + with self.cached_session(): testing_utils.layer_test( keras.layers.Conv3D, kwargs=kwargs, @@ -331,7 +331,7 @@ class Conv3DTest(keras_parameterized.TestCase): num_col = 6 depth = 5 - with self.cached_session(use_gpu=True): + with self.cached_session(): if expected_output_shape is not None: expected_output_shape = (None,) + expected_output_shape @@ -387,7 +387,7 @@ class Conv3DTest(keras_parameterized.TestCase): 'activity_regularizer': 'l2', 'strides': 1 } - with self.cached_session(use_gpu=True): + with self.cached_session(): layer = keras.layers.Conv3D(**kwargs) layer.build((None, 5, 5, 5, 2)) self.assertEqual(len(layer.losses), 2) @@ -407,7 +407,7 @@ class Conv3DTest(keras_parameterized.TestCase): 'bias_constraint': b_constraint, 'strides': 1 } - with self.cached_session(use_gpu=True): + with self.cached_session(): layer = keras.layers.Conv3D(**kwargs) layer.build((None, 5, 5, 5, 2)) self.assertEqual(layer.kernel.constraint, k_constraint) @@ -415,7 +415,7 @@ class Conv3DTest(keras_parameterized.TestCase): def test_conv3d_dynamic_shape(self): input_data = np.random.random((1, 3, 3, 3, 3)).astype(np.float32) - with self.cached_session(use_gpu=True): + with self.cached_session(): # Won't raise error here. testing_utils.layer_test( keras.layers.Conv3D, @@ -564,7 +564,7 @@ class ConvSequentialTest(keras_parameterized.TestCase): kwargs['filters'] = 1 kwargs['kernel_size'] = 3 kwargs['dilation_rate'] = 2 - with self.cached_session(use_gpu=True): + with self.cached_session(): layer = conv_layer_cls(**kwargs) output1 = layer(np.zeros(input_shape1)) self.assertEqual(output1.shape, expected_output_shape1) @@ -607,7 +607,7 @@ class ConvSequentialTest(keras_parameterized.TestCase): expected_output_shape1, expected_output_shape2) def test_dynamic_shape(self): - with self.cached_session(use_gpu=True): + with self.cached_session(): layer = keras.layers.Conv3D(2, 3) input_shape = (5, None, None, 2) inputs = keras.Input(shape=input_shape) @@ -626,7 +626,7 @@ class ZeroPaddingTest(keras_parameterized.TestCase): shape = (num_samples, num_steps, input_dim) inputs = np.ones(shape) - with self.cached_session(use_gpu=True): + with self.cached_session(): # basic test testing_utils.layer_test( keras.layers.ZeroPadding1D, @@ -682,7 +682,7 @@ class ZeroPaddingTest(keras_parameterized.TestCase): inputs = np.ones((num_samples, input_num_row, input_num_col, stack_size)) # basic test - with self.cached_session(use_gpu=True): + with self.cached_session(): testing_utils.layer_test( keras.layers.ZeroPadding2D, kwargs={ @@ -699,7 +699,7 @@ class ZeroPaddingTest(keras_parameterized.TestCase): input_shape=inputs.shape) # correctness test - with self.cached_session(use_gpu=True): + with self.cached_session(): layer = keras.layers.ZeroPadding2D( padding=(2, 2), data_format=data_format) layer.build(inputs.shape) @@ -770,7 +770,7 @@ class ZeroPaddingTest(keras_parameterized.TestCase): inputs = np.ones((num_samples, input_len_dim1, input_len_dim2, input_len_dim3, stack_size)) - with self.cached_session(use_gpu=True): + with self.cached_session(): # basic test testing_utils.layer_test( keras.layers.ZeroPadding3D, @@ -787,7 +787,7 @@ class ZeroPaddingTest(keras_parameterized.TestCase): }, input_shape=inputs.shape) - with self.cached_session(use_gpu=True): + with self.cached_session(): # correctness test layer = keras.layers.ZeroPadding3D( padding=(2, 2, 2), data_format=data_format) @@ -856,7 +856,7 @@ class ZeroPaddingTest(keras_parameterized.TestCase): class UpSamplingTest(keras_parameterized.TestCase): def test_upsampling_1d(self): - with self.cached_session(use_gpu=True): + with self.cached_session(): testing_utils.layer_test( keras.layers.UpSampling1D, kwargs={'size': 2}, input_shape=(3, 5, 4)) @@ -875,7 +875,7 @@ class UpSamplingTest(keras_parameterized.TestCase): stack_size) # basic test - with self.cached_session(use_gpu=True): + with self.cached_session(): testing_utils.layer_test( keras.layers.UpSampling2D, kwargs={'size': (2, 2), @@ -960,7 +960,7 @@ class UpSamplingTest(keras_parameterized.TestCase): input_len_dim3, stack_size) # basic test - with self.cached_session(use_gpu=True): + with self.cached_session(): testing_utils.layer_test( keras.layers.UpSampling3D, kwargs={'size': (2, 2, 2), @@ -1010,7 +1010,7 @@ class CroppingTest(keras_parameterized.TestCase): input_len_dim1 = 2 inputs = np.random.rand(num_samples, time_length, input_len_dim1) - with self.cached_session(use_gpu=True): + with self.cached_session(): testing_utils.layer_test( keras.layers.Cropping1D, kwargs={'cropping': (2, 2)}, @@ -1036,7 +1036,7 @@ class CroppingTest(keras_parameterized.TestCase): else: inputs = np.random.rand(num_samples, input_len_dim1, input_len_dim2, stack_size) - with self.cached_session(use_gpu=True): + with self.cached_session(): # basic test testing_utils.layer_test( keras.layers.Cropping2D, @@ -1069,7 +1069,7 @@ class CroppingTest(keras_parameterized.TestCase): inputs = np.random.rand(num_samples, input_len_dim1, input_len_dim2, stack_size) # another correctness test (no cropping) - with self.cached_session(use_gpu=True): + with self.cached_session(): cropping = ((0, 0), (0, 0)) layer = keras.layers.Cropping2D( cropping=cropping, data_format=data_format) @@ -1105,7 +1105,7 @@ class CroppingTest(keras_parameterized.TestCase): inputs = np.random.rand(num_samples, input_len_dim1, input_len_dim2, input_len_dim3, stack_size) # basic test - with self.cached_session(use_gpu=True): + with self.cached_session(): testing_utils.layer_test( keras.layers.Cropping3D, kwargs={'cropping': cropping, @@ -1114,7 +1114,7 @@ class CroppingTest(keras_parameterized.TestCase): if len(croppings) == 3 and len(croppings[0]) == 2: # correctness test - with self.cached_session(use_gpu=True): + with self.cached_session(): layer = keras.layers.Cropping3D( cropping=cropping, data_format=data_format) layer.build(inputs.shape) @@ -1152,7 +1152,7 @@ class DepthwiseConv2DTest(keras_parameterized.TestCase): num_row = 7 num_col = 6 - with self.cached_session(use_gpu=True): + with self.cached_session(): testing_utils.layer_test( keras.layers.DepthwiseConv2D, kwargs=kwargs, diff --git a/tensorflow/python/keras/layers/convolutional_transpose_test.py b/tensorflow/python/keras/layers/convolutional_transpose_test.py index 4326044458e..e9adef59e56 100644 --- a/tensorflow/python/keras/layers/convolutional_transpose_test.py +++ b/tensorflow/python/keras/layers/convolutional_transpose_test.py @@ -36,7 +36,7 @@ class Conv2DTransposeTest(keras_parameterized.TestCase): num_row = 7 num_col = 6 - with self.cached_session(use_gpu=True): + with self.cached_session(): testing_utils.layer_test( keras.layers.Conv2DTranspose, kwargs=kwargs, @@ -67,7 +67,7 @@ class Conv2DTransposeTest(keras_parameterized.TestCase): 'activity_regularizer': 'l2', 'strides': 1 } - with self.cached_session(use_gpu=True): + with self.cached_session(): layer = keras.layers.Conv2DTranspose(**kwargs) layer.build((None, 5, 5, 2)) self.assertEqual(len(layer.losses), 2) @@ -86,7 +86,7 @@ class Conv2DTransposeTest(keras_parameterized.TestCase): 'bias_constraint': b_constraint, 'strides': 1 } - with self.cached_session(use_gpu=True): + with self.cached_session(): layer = keras.layers.Conv2DTranspose(**kwargs) layer.build((None, 5, 5, 2)) self.assertEqual(layer.kernel.constraint, k_constraint) @@ -127,7 +127,7 @@ class Conv3DTransposeTest(keras_parameterized.TestCase): num_col = 6 depth = 5 - with self.cached_session(use_gpu=True): + with self.cached_session(): testing_utils.layer_test( keras.layers.Conv3DTranspose, kwargs=kwargs, @@ -159,7 +159,7 @@ class Conv3DTransposeTest(keras_parameterized.TestCase): 'activity_regularizer': 'l2', 'strides': 1 } - with self.cached_session(use_gpu=True): + with self.cached_session(): layer = keras.layers.Conv3DTranspose(**kwargs) layer.build((None, 5, 5, 5, 2)) self.assertEqual(len(layer.losses), 2) @@ -178,7 +178,7 @@ class Conv3DTransposeTest(keras_parameterized.TestCase): 'bias_constraint': b_constraint, 'strides': 1 } - with self.cached_session(use_gpu=True): + with self.cached_session(): layer = keras.layers.Conv3DTranspose(**kwargs) layer.build((None, 5, 5, 5, 2)) self.assertEqual(layer.kernel.constraint, k_constraint) @@ -186,7 +186,7 @@ class Conv3DTransposeTest(keras_parameterized.TestCase): def test_conv3d_transpose_dynamic_shape(self): input_data = np.random.random((1, 3, 3, 3, 3)).astype(np.float32) - with self.cached_session(use_gpu=True): + with self.cached_session(): # Won't raise error here. testing_utils.layer_test( keras.layers.Conv3DTranspose, diff --git a/tensorflow/python/keras/layers/cudnn_recurrent_test.py b/tensorflow/python/keras/layers/cudnn_recurrent_test.py index 3bb392c85ad..fcc9dd12b13 100644 --- a/tensorflow/python/keras/layers/cudnn_recurrent_test.py +++ b/tensorflow/python/keras/layers/cudnn_recurrent_test.py @@ -205,7 +205,7 @@ class CuDNNGraphOnlyTest(keras_parameterized.TestCase): units = 2 num_samples = 32 - with self.cached_session(use_gpu=True): + with self.cached_session(): model = keras.models.Sequential() model.add( keras.layers.Embedding( diff --git a/tensorflow/python/keras/layers/normalization_test.py b/tensorflow/python/keras/layers/normalization_test.py index d468e5d6db2..a9f6856972c 100644 --- a/tensorflow/python/keras/layers/normalization_test.py +++ b/tensorflow/python/keras/layers/normalization_test.py @@ -104,7 +104,7 @@ class BatchNormalizationTest(keras_parameterized.TestCase): @keras_parameterized.run_all_keras_modes def test_batchnorm_convnet(self): if test.is_gpu_available(cuda_only=True): - with self.session(use_gpu=True): + with self.session(): model = keras.models.Sequential() norm = keras.layers.BatchNormalization( axis=1, input_shape=(3, 4, 4), momentum=0.8) diff --git a/tensorflow/python/keras/layers/preprocessing/image_preprocessing_test.py b/tensorflow/python/keras/layers/preprocessing/image_preprocessing_test.py index 6a446924deb..1e474627453 100644 --- a/tensorflow/python/keras/layers/preprocessing/image_preprocessing_test.py +++ b/tensorflow/python/keras/layers/preprocessing/image_preprocessing_test.py @@ -412,7 +412,7 @@ class RandomFlipTest(keras_parameterized.TestCase): mock_random = np.reshape(mock_random, [2, 1, 1, 1]) with test.mock.patch.object( random_ops, 'random_uniform', return_value=mock_random): - with self.cached_session(use_gpu=True): + with self.cached_session(): layer = image_preprocessing.RandomFlip() actual_output = layer(input_images, training=1) self.assertAllClose(expected_output, actual_output) @@ -698,7 +698,7 @@ class RandomTransformTest(keras_parameterized.TestCase): fill_value=0.0, interpolation='bilinear'): inp = np.arange(15).reshape((1, 5, 3, 1)).astype(np.float32) - with self.cached_session(use_gpu=True): + with self.cached_session(): output = image_preprocessing.transform( inp, transform_matrix, diff --git a/tensorflow/python/keras/layers/separable_convolutional_test.py b/tensorflow/python/keras/layers/separable_convolutional_test.py index 8234bfe704d..8fdaccc9119 100644 --- a/tensorflow/python/keras/layers/separable_convolutional_test.py +++ b/tensorflow/python/keras/layers/separable_convolutional_test.py @@ -35,7 +35,7 @@ class SeparableConv1DTest(keras_parameterized.TestCase): stack_size = 3 length = 7 - with self.cached_session(use_gpu=True): + with self.cached_session(): testing_utils.layer_test( keras.layers.SeparableConv1D, kwargs=kwargs, @@ -66,7 +66,7 @@ class SeparableConv1DTest(keras_parameterized.TestCase): 'activity_regularizer': 'l2', 'strides': 1 } - with self.cached_session(use_gpu=True): + with self.cached_session(): layer = keras.layers.SeparableConv1D(**kwargs) layer.build((None, 5, 2)) self.assertEqual(len(layer.losses), 3) @@ -87,7 +87,7 @@ class SeparableConv1DTest(keras_parameterized.TestCase): 'bias_constraint': b_constraint, 'strides': 1 } - with self.cached_session(use_gpu=True): + with self.cached_session(): layer = keras.layers.SeparableConv1D(**kwargs) layer.build((None, 5, 2)) self.assertEqual(layer.depthwise_kernel.constraint, d_constraint) @@ -104,7 +104,7 @@ class SeparableConv2DTest(keras_parameterized.TestCase): num_row = 7 num_col = 6 - with self.cached_session(use_gpu=True): + with self.cached_session(): testing_utils.layer_test( keras.layers.SeparableConv2D, kwargs=kwargs, @@ -138,7 +138,7 @@ class SeparableConv2DTest(keras_parameterized.TestCase): 'activity_regularizer': 'l2', 'strides': 1 } - with self.cached_session(use_gpu=True): + with self.cached_session(): layer = keras.layers.SeparableConv2D(**kwargs) layer.build((None, 5, 5, 2)) self.assertEqual(len(layer.losses), 3) @@ -159,7 +159,7 @@ class SeparableConv2DTest(keras_parameterized.TestCase): 'bias_constraint': b_constraint, 'strides': 1 } - with self.cached_session(use_gpu=True): + with self.cached_session(): layer = keras.layers.SeparableConv2D(**kwargs) layer.build((None, 5, 5, 2)) self.assertEqual(layer.depthwise_kernel.constraint, d_constraint) diff --git a/tensorflow/python/keras/legacy_tf_layers/normalization_test.py b/tensorflow/python/keras/legacy_tf_layers/normalization_test.py index 6b8d4cab4ab..0386e1e2295 100644 --- a/tensorflow/python/keras/legacy_tf_layers/normalization_test.py +++ b/tensorflow/python/keras/legacy_tf_layers/normalization_test.py @@ -407,7 +407,7 @@ class BNTest(test.TestCase): training = array_ops.placeholder(dtype='bool') outputs = bn.apply(inputs, training=training) - with self.session(use_gpu=True) as sess: + with self.session() as sess: # Test training with placeholder learning phase. self.evaluate(variables.global_variables_initializer()) np_gamma, np_beta = self.evaluate([bn.gamma, bn.beta]) @@ -898,7 +898,7 @@ class BNTest(test.TestCase): moving_stddev = 1. renorm_mean = 0. renorm_stddev = 1. - with self.session(use_gpu=True) as sess: + with self.session() as sess: self.evaluate(variables.global_variables_initializer()) for _ in range(5): x = np.random.random(shape) @@ -948,7 +948,7 @@ class BNTest(test.TestCase): moving_stddev = 1. renorm_mean = 0. renorm_stddev = 1. - with self.session(use_gpu=True) as sess: + with self.session() as sess: self.evaluate(variables.global_variables_initializer()) for step in range(6): x = np.random.random(shape) @@ -1002,7 +1002,7 @@ class BNTest(test.TestCase): moving_mean = 0. moving_variance = 1. - with self.session(use_gpu=True) as sess: + with self.session() as sess: self.evaluate(variables.global_variables_initializer()) for _ in range(5): x = np.random.random(shape) @@ -1055,7 +1055,7 @@ class BNTest(test.TestCase): moving_stddev = 1. renorm_mean = 0. renorm_stddev = 1. - with self.session(use_gpu=True) as sess: + with self.session() as sess: self.evaluate(variables.global_variables_initializer()) for _ in range(5): x = np.random.random(shape) @@ -1101,7 +1101,7 @@ class BNTest(test.TestCase): self.assertListEqual( out1.shape.as_list(), out2.shape.as_list()) - with self.session(use_gpu=True) as sess: + with self.session() as sess: self.evaluate(variables.global_variables_initializer()) x = np.random.random(shape) @@ -1123,7 +1123,7 @@ class BNTest(test.TestCase): out = normalization_layers.batch_normalization( inp, virtual_batch_size=2) - with self.session(use_gpu=True) as sess: + with self.session() as sess: self.evaluate(variables.global_variables_initializer()) x = np.random.random(np_shape) @@ -1154,7 +1154,7 @@ class BNTest(test.TestCase): shape[0] // virtual_batch_size, shape[1]]) - with self.session(use_gpu=True) as sess: + with self.session() as sess: self.evaluate(variables.global_variables_initializer()) for _ in range(5): x = np.random.random(shape) @@ -1207,7 +1207,7 @@ class BNTest(test.TestCase): ghost_shape = ([virtual_batch_size, shape[0] // virtual_batch_size] + shape[1:]) - with self.session(use_gpu=True) as sess: + with self.session() as sess: self.evaluate(variables.global_variables_initializer()) for _ in range(5): x = np.random.random(shape) @@ -1261,7 +1261,7 @@ class BNTest(test.TestCase): ghost_shape = ([virtual_batch_size, shape[0] // virtual_batch_size] + shape[1:]) - with self.session(use_gpu=True) as sess: + with self.session() as sess: self.evaluate(variables.global_variables_initializer()) for _ in range(5): x = np.random.random(shape) @@ -1413,7 +1413,7 @@ class BNTest(test.TestCase): ghost_shape = ([virtual_batch_size, shape[0] // virtual_batch_size] + shape[1:]) - with self.session(use_gpu=True) as sess: + with self.session() as sess: self.evaluate(variables.global_variables_initializer()) for _ in range(5): x = np.random.random(shape) diff --git a/tensorflow/python/keras/optimizer_v2/adam_test.py b/tensorflow/python/keras/optimizer_v2/adam_test.py index 9cf58177446..85958bb6325 100644 --- a/tensorflow/python/keras/optimizer_v2/adam_test.py +++ b/tensorflow/python/keras/optimizer_v2/adam_test.py @@ -113,7 +113,7 @@ class AdamOptimizerTest(test.TestCase, parameterized.TestCase): def testSparse(self): # TODO(tanzheny, omalleyt): Fix test in eager mode. for dtype in [dtypes.half, dtypes.float32, dtypes.float64]: - with ops.Graph().as_default(), self.cached_session(use_gpu=True): + with ops.Graph().as_default(), self.cached_session(): # Initialize variables for numpy implementation. m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0 var0_np = np.array([1.0, 1.0, 2.0], dtype=dtype.as_numpy_dtype) @@ -203,7 +203,7 @@ class AdamOptimizerTest(test.TestCase, parameterized.TestCase): def doTestBasic(self, use_callable_params=False): for i, dtype in enumerate([dtypes.half, dtypes.float32, dtypes.float64]): - with self.cached_session(use_gpu=True): + with self.cached_session(): # Initialize variables for numpy implementation. m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0 var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype) @@ -261,7 +261,7 @@ class AdamOptimizerTest(test.TestCase, parameterized.TestCase): @combinations.generate(combinations.combine(mode=["graph", "eager"])) def testBasicWithAmsgrad(self): for i, dtype in enumerate([dtypes.half, dtypes.float32, dtypes.float64]): - with self.cached_session(use_gpu=True): + with self.cached_session(): # Initialize variables for numpy implementation. m0, v0, v0hat, m1, v1, v1hat = 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype) @@ -353,7 +353,7 @@ class AdamOptimizerTest(test.TestCase, parameterized.TestCase): def testBasicWithLearningRateDecay(self): # TODO(tanzheny, omalleyt): Fix test in eager mode. for i, dtype in enumerate([dtypes.half, dtypes.float32, dtypes.float64]): - with ops.Graph().as_default(), self.cached_session(use_gpu=True): + with ops.Graph().as_default(), self.cached_session(): # Initialize variables for numpy implementation. m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0 var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype) @@ -398,7 +398,7 @@ class AdamOptimizerTest(test.TestCase, parameterized.TestCase): def testBasicWithLearningRateInverseTimeDecay(self): # TODO(tanzheny, omalleyt): Fix test in eager mode. for i, dtype in enumerate([dtypes.half, dtypes.float32, dtypes.float64]): - with ops.Graph().as_default(), self.cached_session(use_gpu=True): + with ops.Graph().as_default(), self.cached_session(): # Initialize variables for numpy implementation. m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0 var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype) @@ -445,7 +445,7 @@ class AdamOptimizerTest(test.TestCase, parameterized.TestCase): def testTensorLearningRate(self): # TODO(tanzheny, omalleyt): Fix test in eager mode. for dtype in [dtypes.half, dtypes.float32, dtypes.float64]: - with ops.Graph().as_default(), self.cached_session(use_gpu=True): + with ops.Graph().as_default(), self.cached_session(): # Initialize variables for numpy implementation. m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0 var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype) @@ -484,7 +484,7 @@ class AdamOptimizerTest(test.TestCase, parameterized.TestCase): def testSharing(self): # TODO(tanzheny, omalleyt): Fix test in eager mode. for dtype in [dtypes.half, dtypes.float32, dtypes.float64]: - with ops.Graph().as_default(), self.cached_session(use_gpu=True): + with ops.Graph().as_default(), self.cached_session(): # Initialize variables for numpy implementation. m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0 var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype) @@ -565,7 +565,7 @@ class NonFusedAdamOptimizerTest(test.TestCase, parameterized.TestCase): def testSparse(self): # TODO(tanzheny, omalleyt): Fix test in eager mode. for dtype in [dtypes.half, dtypes.float32, dtypes.float64]: - with ops.Graph().as_default(), self.cached_session(use_gpu=True): + with ops.Graph().as_default(), self.cached_session(): # Initialize variables for numpy implementation. m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0 var0_np = np.array([1.0, 1.0, 2.0], dtype=dtype.as_numpy_dtype) @@ -655,7 +655,7 @@ class NonFusedAdamOptimizerTest(test.TestCase, parameterized.TestCase): def doTestBasic(self, use_callable_params=False): for i, dtype in enumerate([dtypes.half, dtypes.float32, dtypes.float64]): - with self.cached_session(use_gpu=True): + with self.cached_session(): # Initialize variables for numpy implementation. m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0 var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype) @@ -715,7 +715,7 @@ class NonFusedAdamOptimizerTest(test.TestCase, parameterized.TestCase): @combinations.generate(combinations.combine(mode=["graph", "eager"])) def testBasicWithAmsgrad(self): for i, dtype in enumerate([dtypes.half, dtypes.float32, dtypes.float64]): - with self.cached_session(use_gpu=True): + with self.cached_session(): # Initialize variables for numpy implementation. m0, v0, v0hat, m1, v1, v1hat = 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype) @@ -809,7 +809,7 @@ class NonFusedAdamOptimizerTest(test.TestCase, parameterized.TestCase): def testBasicWithLearningRateDecay(self): # TODO(tanzheny, omalleyt): Fix test in eager mode. for i, dtype in enumerate([dtypes.half, dtypes.float32, dtypes.float64]): - with ops.Graph().as_default(), self.cached_session(use_gpu=True): + with ops.Graph().as_default(), self.cached_session(): # Initialize variables for numpy implementation. m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0 var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype) @@ -854,7 +854,7 @@ class NonFusedAdamOptimizerTest(test.TestCase, parameterized.TestCase): def testBasicWithLearningRateInverseTimeDecay(self): # TODO(tanzheny, omalleyt): Fix test in eager mode. for i, dtype in enumerate([dtypes.half, dtypes.float32, dtypes.float64]): - with ops.Graph().as_default(), self.cached_session(use_gpu=True): + with ops.Graph().as_default(), self.cached_session(): # Initialize variables for numpy implementation. m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0 var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype) @@ -901,7 +901,7 @@ class NonFusedAdamOptimizerTest(test.TestCase, parameterized.TestCase): def testTensorLearningRate(self): # TODO(tanzheny, omalleyt): Fix test in eager mode. for dtype in [dtypes.half, dtypes.float32, dtypes.float64]: - with ops.Graph().as_default(), self.cached_session(use_gpu=True): + with ops.Graph().as_default(), self.cached_session(): # Initialize variables for numpy implementation. m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0 var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype) @@ -940,7 +940,7 @@ class NonFusedAdamOptimizerTest(test.TestCase, parameterized.TestCase): def testSharing(self): # TODO(tanzheny, omalleyt): Fix test in eager mode. for dtype in [dtypes.half, dtypes.float32, dtypes.float64]: - with ops.Graph().as_default(), self.cached_session(use_gpu=True): + with ops.Graph().as_default(), self.cached_session(): # Initialize variables for numpy implementation. m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0 var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype) diff --git a/tensorflow/python/keras/optimizer_v2/adamax_test.py b/tensorflow/python/keras/optimizer_v2/adamax_test.py index f955df863f1..9a73fad4c36 100644 --- a/tensorflow/python/keras/optimizer_v2/adamax_test.py +++ b/tensorflow/python/keras/optimizer_v2/adamax_test.py @@ -81,7 +81,7 @@ class AdamaxOptimizerTest(test.TestCase, parameterized.TestCase): def testResourceSparse(self): # TODO(tanzheny, omalleyt): Fix test in eager mode. for dtype in [dtypes.half, dtypes.float32, dtypes.float64]: - with ops.Graph().as_default(), self.cached_session(use_gpu=True): + with ops.Graph().as_default(), self.cached_session(): # Initialize variables for numpy implementation. zero_slots = lambda: np.zeros((3), dtype=dtype.as_numpy_dtype) # pylint: disable=cell-var-from-loop m0, v0, m1, v1 = zero_slots(), zero_slots(), zero_slots(), zero_slots() @@ -275,7 +275,7 @@ class AdamaxOptimizerTest(test.TestCase, parameterized.TestCase): def testTensorLearningRate(self): # TODO(tanzheny, omalleyt): Fix test in eager mode. for dtype in [dtypes.half, dtypes.float32, dtypes.float64]: - with ops.Graph().as_default(), self.cached_session(use_gpu=True): + with ops.Graph().as_default(), self.cached_session(): # Initialize variables for numpy implementation. m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0 var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype) @@ -312,7 +312,7 @@ class AdamaxOptimizerTest(test.TestCase, parameterized.TestCase): def testSharing(self): # TODO(tanzheny, omalleyt): Fix test in eager mode. for dtype in [dtypes.half, dtypes.float32, dtypes.float64]: - with ops.Graph().as_default(), self.cached_session(use_gpu=True): + with ops.Graph().as_default(), self.cached_session(): # Initialize variables for numpy implementation. m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0 var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype) diff --git a/tensorflow/python/keras/optimizer_v2/ftrl_test.py b/tensorflow/python/keras/optimizer_v2/ftrl_test.py index 6627fc0df29..9e17462fda0 100644 --- a/tensorflow/python/keras/optimizer_v2/ftrl_test.py +++ b/tensorflow/python/keras/optimizer_v2/ftrl_test.py @@ -37,7 +37,7 @@ class FtrlOptimizerTest(test.TestCase): def doTestFtrlwithoutRegularization(self, use_resource=False): # TODO(tanzheny, omalleyt): Fix test in eager mode. for dtype in [dtypes.float32]: - with ops.Graph().as_default(), self.cached_session(use_gpu=True): + with ops.Graph().as_default(), self.cached_session(): if use_resource: var0 = variables.Variable([0.0, 0.0], dtype=dtype) var1 = variables.Variable([0.0, 0.0], dtype=dtype) @@ -77,7 +77,7 @@ class FtrlOptimizerTest(test.TestCase): def testFtrlwithoutRegularization2(self): # TODO(tanzheny, omalleyt): Fix test in eager mode. for dtype in [dtypes.half, dtypes.float32]: - with ops.Graph().as_default(), self.cached_session(use_gpu=True): + with ops.Graph().as_default(), self.cached_session(): var0 = variables.Variable([1.0, 2.0], dtype=dtype) var1 = variables.Variable([4.0, 3.0], dtype=dtype) grads0 = constant_op.constant([0.1, 0.2], dtype=dtype) @@ -107,7 +107,7 @@ class FtrlOptimizerTest(test.TestCase): def testMinimizeSparseResourceVariable(self): # TODO(tanzheny, omalleyt): Fix test in eager mode. for dtype in [dtypes.half, dtypes.float32, dtypes.float64]: - with ops.Graph().as_default(), self.cached_session(use_gpu=True): + with ops.Graph().as_default(), self.cached_session(): var0 = variables.Variable([[1.0, 2.0]], dtype=dtype) x = constant_op.constant([[4.0], [5.0]], dtype=dtype) @@ -129,7 +129,7 @@ class FtrlOptimizerTest(test.TestCase): def testFtrlWithL1(self): # TODO(tanzheny, omalleyt): Fix test in eager mode. for dtype in [dtypes.half, dtypes.float32]: - with ops.Graph().as_default(), self.cached_session(use_gpu=True): + with ops.Graph().as_default(), self.cached_session(): var0 = variables.Variable([1.0, 2.0], dtype=dtype) var1 = variables.Variable([4.0, 3.0], dtype=dtype) grads0 = constant_op.constant([0.1, 0.2], dtype=dtype) @@ -159,7 +159,7 @@ class FtrlOptimizerTest(test.TestCase): def testFtrlWithBeta(self): # TODO(tanzheny, omalleyt): Fix test in eager mode. for dtype in [dtypes.half, dtypes.float32]: - with ops.Graph().as_default(), self.cached_session(use_gpu=True): + with ops.Graph().as_default(), self.cached_session(): var0 = variables.Variable([1.0, 2.0], dtype=dtype) var1 = variables.Variable([4.0, 3.0], dtype=dtype) grads0 = constant_op.constant([0.1, 0.2], dtype=dtype) @@ -185,7 +185,7 @@ class FtrlOptimizerTest(test.TestCase): def testFtrlWithL2_Beta(self): # TODO(tanzheny, omalleyt): Fix test in eager mode. for dtype in [dtypes.half, dtypes.float32]: - with ops.Graph().as_default(), self.cached_session(use_gpu=True): + with ops.Graph().as_default(), self.cached_session(): var0 = variables.Variable([1.0, 2.0], dtype=dtype) var1 = variables.Variable([4.0, 3.0], dtype=dtype) grads0 = constant_op.constant([0.1, 0.2], dtype=dtype) @@ -216,7 +216,7 @@ class FtrlOptimizerTest(test.TestCase): def testFtrlWithL1_L2(self): # TODO(tanzheny, omalleyt): Fix test in eager mode. for dtype in [dtypes.half, dtypes.float32]: - with ops.Graph().as_default(), self.cached_session(use_gpu=True): + with ops.Graph().as_default(), self.cached_session(): var0 = variables.Variable([1.0, 2.0], dtype=dtype) var1 = variables.Variable([4.0, 3.0], dtype=dtype) grads0 = constant_op.constant([0.1, 0.2], dtype=dtype) @@ -253,7 +253,7 @@ class FtrlOptimizerTest(test.TestCase): """ # TODO(tanzheny, omalleyt): Fix test in eager mode. for dtype in [dtypes.half, dtypes.float32]: - with ops.Graph().as_default(), self.cached_session(use_gpu=True): + with ops.Graph().as_default(), self.cached_session(): var0 = variables.Variable([1.0, 2.0], dtype=dtype) var1 = variables.Variable([4.0, 3.0], dtype=dtype) grads0 = constant_op.constant([0.1, 0.2], dtype=dtype) @@ -286,7 +286,7 @@ class FtrlOptimizerTest(test.TestCase): """Tests the new FTRL op with support for l2 shrinkage on sparse grads.""" # TODO(tanzheny, omalleyt): Fix test in eager mode. for dtype in [dtypes.half, dtypes.float32]: - with ops.Graph().as_default(), self.cached_session(use_gpu=True): + with ops.Graph().as_default(), self.cached_session(): var0 = variables.Variable([[1.0], [2.0]], dtype=dtype) var1 = variables.Variable([[4.0], [3.0]], dtype=dtype) grads0 = ops.IndexedSlices( @@ -321,7 +321,7 @@ class FtrlOptimizerTest(test.TestCase): """Verifies that l2 shrinkage in FTRL does not change lr schedule.""" # TODO(tanzheny, omalleyt): Fix test in eager mode. for dtype in [dtypes.half, dtypes.float32]: - with ops.Graph().as_default(), self.cached_session(use_gpu=True) as sess: + with ops.Graph().as_default(), self.cached_session() as sess: var0 = variables.Variable([1.0, 2.0], dtype=dtype) var1 = variables.Variable([1.0, 2.0], dtype=dtype) grads0 = constant_op.constant([0.1, 0.2], dtype=dtype) @@ -404,7 +404,7 @@ class FtrlOptimizerTest(test.TestCase): def testEquivAdagradwithoutRegularization(self): # TODO(tanzheny, omalleyt): Fix test in eager mode. for dtype in [dtypes.half, dtypes.float32]: - with ops.Graph().as_default(), self.cached_session(use_gpu=True): + with ops.Graph().as_default(), self.cached_session(): val0, val1 = self.applyOptimizer( ftrl.Ftrl( 3.0, @@ -415,7 +415,7 @@ class FtrlOptimizerTest(test.TestCase): l2_regularization_strength=0.0), dtype) - with ops.Graph().as_default(), self.cached_session(use_gpu=True): + with ops.Graph().as_default(), self.cached_session(): val2, val3 = self.applyOptimizer( adagrad.AdagradOptimizer(3.0, initial_accumulator_value=0.1), dtype) @@ -449,7 +449,7 @@ class FtrlOptimizerTest(test.TestCase): def testEquivSparseGradientDescentwithoutRegularization(self): # TODO(tanzheny, omalleyt): Fix test in eager mode. for dtype in [dtypes.half, dtypes.float32]: - with ops.Graph().as_default(), self.cached_session(use_gpu=True): + with ops.Graph().as_default(), self.cached_session(): val0, val1 = self.applyOptimizer( ftrl.Ftrl( 3.0, @@ -461,7 +461,7 @@ class FtrlOptimizerTest(test.TestCase): dtype, is_sparse=True) - with ops.Graph().as_default(), self.cached_session(use_gpu=True): + with ops.Graph().as_default(), self.cached_session(): val2, val3 = self.applyOptimizer( gradient_descent.GradientDescentOptimizer(3.0), dtype, @@ -473,7 +473,7 @@ class FtrlOptimizerTest(test.TestCase): def testEquivGradientDescentwithoutRegularization(self): # TODO(tanzheny, omalleyt): Fix test in eager mode. for dtype in [dtypes.half, dtypes.float32]: - with ops.Graph().as_default(), self.cached_session(use_gpu=True): + with ops.Graph().as_default(), self.cached_session(): val0, val1 = self.applyOptimizer( ftrl.Ftrl( 3.0, @@ -484,7 +484,7 @@ class FtrlOptimizerTest(test.TestCase): l2_regularization_strength=0.0), dtype) - with ops.Graph().as_default(), self.cached_session(use_gpu=True): + with ops.Graph().as_default(), self.cached_session(): val2, val3 = self.applyOptimizer( gradient_descent.GradientDescentOptimizer(3.0), dtype) diff --git a/tensorflow/python/kernel_tests/aggregate_ops_test.py b/tensorflow/python/kernel_tests/aggregate_ops_test.py index adb4f3a0f2f..d84fda49e5f 100644 --- a/tensorflow/python/kernel_tests/aggregate_ops_test.py +++ b/tensorflow/python/kernel_tests/aggregate_ops_test.py @@ -58,7 +58,7 @@ class AddNTest(test.TestCase): def testAddN(self): np.random.seed(12345) - with self.session(use_gpu=True) as sess: + with self.session() as sess: for dtype in self._supported_types(): for count in range(1, self._MAX_N + 1): data = [self._buildData((2, 2), dtype) for _ in range(count)] @@ -71,7 +71,7 @@ class AddNTest(test.TestCase): @test_util.run_deprecated_v1 def testUnknownShapes(self): np.random.seed(12345) - with self.session(use_gpu=True) as sess: + with self.session() as sess: for dtype in self._supported_types(): data = self._buildData((2, 2), dtype) for count in range(1, self._MAX_N + 1): diff --git a/tensorflow/python/kernel_tests/argmax_op_test.py b/tensorflow/python/kernel_tests/argmax_op_test.py index 8a6ac74849c..2b4431af0ba 100644 --- a/tensorflow/python/kernel_tests/argmax_op_test.py +++ b/tensorflow/python/kernel_tests/argmax_op_test.py @@ -97,7 +97,7 @@ class ArgMaxTest(test.TestCase): def testFloatInt32Output(self): x = np.asarray(100 * np.random.randn(200), dtype=np.float32) expected_values = x.argmax() - with self.session(use_gpu=True): + with self.session(): ans = math_ops.argmax(x, axis=0, output_type=dtypes.int32) tf_ans = self.evaluate(ans) self.assertEqual(np.int32, tf_ans.dtype) @@ -105,7 +105,7 @@ class ArgMaxTest(test.TestCase): # the values don't have a range that exceeds 32-bit integers. self.assertAllEqual(tf_ans, expected_values) expected_values = x.argmin() - with self.session(use_gpu=True): + with self.session(): ans = math_ops.argmin(x, axis=0, output_type=dtypes.int32) tf_ans = self.evaluate(ans) self.assertEqual(np.int32, tf_ans.dtype) diff --git a/tensorflow/python/kernel_tests/array_ops/batch_gather_op_test.py b/tensorflow/python/kernel_tests/array_ops/batch_gather_op_test.py index e41053b3182..16ac4765b01 100644 --- a/tensorflow/python/kernel_tests/array_ops/batch_gather_op_test.py +++ b/tensorflow/python/kernel_tests/array_ops/batch_gather_op_test.py @@ -46,7 +46,7 @@ class GatherTest(test.TestCase, parameterized.TestCase): def testSimpleGather(self, indices_dtype): data = np.array([0, 1, 2, 3, 7, 5, 8, 9, 10, 11, 15, 13]) indices = [3, 4] - with self.session(use_gpu=True): + with self.session(): for dtype in _TEST_TYPES: params_np = self._buildParams(data, dtype) params = constant_op.constant(params_np) @@ -62,7 +62,7 @@ class GatherTest(test.TestCase, parameterized.TestCase): def test2DArray(self, indices_dtype): data = np.array([[0, 1, 2, 3, 7, 5], [8, 9, 10, 11, 15, 13]]) indices = [[3], [4]] - with self.session(use_gpu=True): + with self.session(): for dtype in _TEST_TYPES: params_np = self._buildParams(data, dtype) params = constant_op.constant(params_np) @@ -77,7 +77,7 @@ class GatherTest(test.TestCase, parameterized.TestCase): def testHigherRank(self): data = np.array([[[0, 1, 2], [3, 7, 5]], [[8, 9, 10], [11, 15, 13]]]) indices = [[[2, 0], [1, 2]], [[2, 0], [0, 1]]] - with self.session(use_gpu=True): + with self.session(): for dtype in _TEST_TYPES: params_np = self._buildParams(data, dtype) params = constant_op.constant(params_np) @@ -113,7 +113,7 @@ class GatherTest(test.TestCase, parameterized.TestCase): self.evaluate(array_ops.batch_gather(params, [7])) def testEmptySlices(self): - with self.session(use_gpu=True): + with self.session(): for dtype in _TEST_TYPES: for itype in np.int32, np.int64: params = np.zeros((7, 0, 0), dtype=dtype.as_numpy_dtype) diff --git a/tensorflow/python/kernel_tests/array_ops/gather_op_test.py b/tensorflow/python/kernel_tests/array_ops/gather_op_test.py index f0c762e0cba..f8050b71d24 100644 --- a/tensorflow/python/kernel_tests/array_ops/gather_op_test.py +++ b/tensorflow/python/kernel_tests/array_ops/gather_op_test.py @@ -59,7 +59,7 @@ class GatherTest(test.TestCase, parameterized.TestCase): return data def testScalar1D(self): - with self.cached_session(use_gpu=True): + with self.cached_session(): data = np.array([0, 1, 2, 3, 7, 5]) for dtype in _TEST_TYPES: for indices in 4, [1, 2, 2, 4, 5]: @@ -74,7 +74,7 @@ class GatherTest(test.TestCase, parameterized.TestCase): self.assertEqual(np_val.shape, gather_t.get_shape()) def testScalar2D(self): - with self.session(use_gpu=True): + with self.session(): data = np.array([[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 10, 11], [12, 13, 14]]) for dtype in _TEST_TYPES: @@ -90,7 +90,7 @@ class GatherTest(test.TestCase, parameterized.TestCase): self.assertEqual(expected_shape, gather_t.get_shape()) def testSimpleTwoD32(self): - with self.session(use_gpu=True): + with self.session(): data = np.array([[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 10, 11], [12, 13, 14]]) for dtype in _TEST_TYPES: @@ -304,7 +304,7 @@ class GatherTest(test.TestCase, parameterized.TestCase): # On GPU the bad indices do not raise error but fetch 0 values if not test.is_gpu_available(): return - with self.session(use_gpu=True): + with self.session(): params = [[0, 1, 2], [3, 4, 5]] with self.assertRaisesOpError(r"indices\[0,0\] = 7 is not in \[0, 2\)"): array_ops.gather(params, [[7]], axis=0).eval() diff --git a/tensorflow/python/kernel_tests/array_ops/scatter_nd_ops_test.py b/tensorflow/python/kernel_tests/array_ops/scatter_nd_ops_test.py index b3c566b882f..a8e36f0cd6d 100644 --- a/tensorflow/python/kernel_tests/array_ops/scatter_nd_ops_test.py +++ b/tensorflow/python/kernel_tests/array_ops/scatter_nd_ops_test.py @@ -211,7 +211,7 @@ class StatefulScatterNdTest(test.TestCase): scatter = state_ops.scatter_nd_update(ref, indices, updates) init = variables.global_variables_initializer() - with self.session(use_gpu=True) as sess: + with self.session() as sess: self.evaluate(init) result = self.evaluate(scatter) self.assertAllClose(result, expected) @@ -225,7 +225,7 @@ class StatefulScatterNdTest(test.TestCase): scatter = state_ops.scatter_nd_update(ref, indices, updates) init = variables.global_variables_initializer() - with self.session(use_gpu=True) as sess: + with self.session() as sess: self.evaluate(init) result = self.evaluate(scatter) self.assertAllClose(result, expected) diff --git a/tensorflow/python/kernel_tests/array_ops/slice_op_test.py b/tensorflow/python/kernel_tests/array_ops/slice_op_test.py index d8097ad15d8..55cb164b642 100644 --- a/tensorflow/python/kernel_tests/array_ops/slice_op_test.py +++ b/tensorflow/python/kernel_tests/array_ops/slice_op_test.py @@ -40,7 +40,7 @@ class SliceTest(test.TestCase): def testEmpty(self): inp = np.random.rand(4, 4).astype("f") for k in xrange(4): - with self.cached_session(use_gpu=True): + with self.cached_session(): a = constant_op.constant(inp, shape=[4, 4], dtype=dtypes.float32) slice_t = a[2, k:k] slice_val = self.evaluate(slice_t) @@ -49,7 +49,7 @@ class SliceTest(test.TestCase): def testInt32(self): inp = np.random.rand(4, 4).astype("i") for k in xrange(4): - with self.cached_session(use_gpu=True): + with self.cached_session(): a = constant_op.constant(inp, shape=[4, 4], dtype=dtypes.int32) slice_t = a[2, k:k] slice_val = self.evaluate(slice_t) @@ -119,7 +119,7 @@ class SliceTest(test.TestCase): def testSelectAll(self): for _ in range(10): - with self.cached_session(use_gpu=True): + with self.cached_session(): inp = np.random.rand(4, 4, 4, 4).astype("f") a = constant_op.constant(inp, shape=[4, 4, 4, 4], dtype=dtypes.float32) @@ -133,7 +133,7 @@ class SliceTest(test.TestCase): def testSingleDimension(self): for _ in range(10): - with self.cached_session(use_gpu=True): + with self.cached_session(): inp = np.random.rand(10).astype("f") a = constant_op.constant(inp, shape=[10], dtype=dtypes.float32) @@ -229,7 +229,7 @@ class SliceTest(test.TestCase): def testSingleElementAll(self): for _ in range(10): - with self.cached_session(use_gpu=True): + with self.cached_session(): inp = np.random.rand(4, 4).astype("f") a = constant_op.constant(inp, shape=[4, 4], dtype=dtypes.float32) @@ -312,7 +312,7 @@ class SliceTest(test.TestCase): self.assertAllEqual(m2.get_shape().as_list(), [1, 2, 3]) def _testGradientSlice(self, input_shape, slice_begin, slice_size): - with self.cached_session(use_gpu=True): + with self.cached_session(): num_inputs = np.prod(input_shape) num_grads = np.prod(slice_size) inp = np.random.rand(num_inputs).astype("f").reshape(input_shape) @@ -362,7 +362,7 @@ class SliceTest(test.TestCase): self.assertAllClose(np_ans, result) def _testGradientVariableSize(self): - with self.cached_session(use_gpu=True): + with self.cached_session(): inp = constant_op.constant([1.0, 2.0, 3.0], name="in") out = array_ops.slice(inp, [1], [-1]) grad_actual = self.evaluate(gradients_impl.gradients(out, inp)[0]) @@ -380,7 +380,7 @@ class SliceTest(test.TestCase): # Regression test for bug in slice. A low-level bug in Eigen was causing # incorrect results for negative indices in multi-dimensional tensors. # See b/114318298. - with self.cached_session(use_gpu=True): + with self.cached_session(): x = constant_op.constant([[1., 2., 3.], [4., 5., 6.], [7., 8., 7]]) loss1 = math_ops.reduce_sum(x[:-1, :-1] * 1.0) loss2 = math_ops.reduce_sum(x[:-1][:, :-1]) @@ -477,7 +477,7 @@ class SliceTest(test.TestCase): self.assertEqual([None, 2], c.get_shape().as_list()) def testSliceOfSlice(self): - with self.session(use_gpu=True): + with self.session(): a = constant_op.constant([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]]) b = a[1:, :] c = b[:-1, :] diff --git a/tensorflow/python/kernel_tests/array_ops/stack_op_test.py b/tensorflow/python/kernel_tests/array_ops/stack_op_test.py index ab1dd1d5125..f0e7db4a5ae 100644 --- a/tensorflow/python/kernel_tests/array_ops/stack_op_test.py +++ b/tensorflow/python/kernel_tests/array_ops/stack_op_test.py @@ -52,7 +52,7 @@ class StackOpTest(test.TestCase): @test_util.run_deprecated_v1 def testSimple(self): np.random.seed(7) - with self.session(use_gpu=True): + with self.session(): for shape in (2,), (3,), (2, 3), (3, 2), (8, 2, 10): rank = len(shape) for axis in range(-rank, rank): @@ -90,7 +90,7 @@ class StackOpTest(test.TestCase): @test_util.run_deprecated_v1 def testConst(self): np.random.seed(7) - with self.session(use_gpu=True): + with self.session(): # Verify that shape induction works with shapes produced via const stack a = constant_op.constant([1, 2, 3, 4, 5, 6]) b = array_ops.reshape(a, array_ops.stack([2, 3])) @@ -155,7 +155,7 @@ class StackOpTest(test.TestCase): data = np.random.randn(*shape) shapes = [shape[1:]] * shape[0] with self.subTest(shape=shape): - with self.cached_session(use_gpu=True): + with self.cached_session(): # TODO(irving): Remove list() once we handle maps correctly xs = list(map(constant_op.constant, data)) c = array_ops.stack(xs) @@ -171,7 +171,7 @@ class StackOpTest(test.TestCase): out_shape = list(shape[1:]) out_shape.insert(1, shape[0]) with self.subTest(shape=shape): - with self.cached_session(use_gpu=True): + with self.cached_session(): # TODO(irving): Remove list() once we handle maps correctly xs = list(map(constant_op.constant, data)) c = array_ops.stack(xs, axis=1) @@ -241,7 +241,7 @@ class StackOpTest(test.TestCase): for axis in range(-rank, rank): test_arrays = np_split_squeeze(expected, axis) - with self.cached_session(use_gpu=True): + with self.cached_session(): with self.subTest(shape=shape, dtype=dtype, axis=axis): actual_pack = array_ops.stack(test_arrays, axis=axis) self.assertEqual(expected.shape, actual_pack.get_shape()) @@ -265,7 +265,7 @@ class StackOpTest(test.TestCase): def testComplex(self): np.random.seed(7) - with self.session(use_gpu=True): + with self.session(): for shape in (2,), (3,), (2, 3), (3, 2), (8, 2, 10): for dtype in [np.complex64, np.complex128]: with self.subTest(shape=shape, dtype=dtype): @@ -279,7 +279,7 @@ class AutomaticStackingTest(test.TestCase): @test_util.run_deprecated_v1 def testSimple(self): - with self.session(use_gpu=True): + with self.session(): self.assertAllEqual( [1, 0, 2], ops.convert_to_tensor([1, constant_op.constant(0), 2]).eval()) @@ -299,7 +299,7 @@ class AutomaticStackingTest(test.TestCase): ]).eval()) def testWithNDArray(self): - with self.session(use_gpu=True): + with self.session(): result = ops.convert_to_tensor([[[0., 0.], constant_op.constant([1., 1.])], np.array( @@ -310,7 +310,7 @@ class AutomaticStackingTest(test.TestCase): @test_util.run_deprecated_v1 def testVariable(self): - with self.session(use_gpu=True): + with self.session(): v = variables.Variable(17) result = ops.convert_to_tensor([[0, 0, 0], [0, v, 0], [0, 0, 0]]) self.evaluate(v.initializer) @@ -364,7 +364,7 @@ class AutomaticStackingTest(test.TestCase): @test_util.run_deprecated_v1 def testPlaceholder(self): - with self.session(use_gpu=True): + with self.session(): # Test using placeholder with a defined shape. ph_0 = array_ops.placeholder(dtypes.int32, shape=[]) result_0 = ops.convert_to_tensor([[0, 0, 0], [0, ph_0, 0], [0, 0, 0]]) @@ -391,7 +391,7 @@ class AutomaticStackingTest(test.TestCase): # Dynamic shape error. ph_1 = array_ops.placeholder(dtypes.int32) result_1 = ops.convert_to_tensor([[0, 0, 0], [0, ph_1, 0], [0, 0, 0]]) - with self.session(use_gpu=True): + with self.session(): with self.assertRaises(errors_impl.InvalidArgumentError): result_1.eval(feed_dict={ph_1: [1]}) diff --git a/tensorflow/python/kernel_tests/array_ops_test.py b/tensorflow/python/kernel_tests/array_ops_test.py index 006737f95d7..6a8ea4c0315 100644 --- a/tensorflow/python/kernel_tests/array_ops_test.py +++ b/tensorflow/python/kernel_tests/array_ops_test.py @@ -474,7 +474,7 @@ class ReverseV2Test(test_util.TensorFlowTestCase): @test_util.run_deprecated_v1 def testReverseRowsOf3Channels(self): """Tests optimized code for reversing rows with last dim size = 3.""" - with self.session(use_gpu=True): + with self.session(): for reverse_f in [array_ops.reverse_v2, array_ops.reverse]: for outer_size in (1, 2): for middle_size in list(range(50)) + [100000]: @@ -491,7 +491,7 @@ class ReverseV2Test(test_util.TensorFlowTestCase): @test_util.run_deprecated_v1 def testReverseRowsOf4Channels(self): - with self.session(use_gpu=True): + with self.session(): for reverse_f in [array_ops.reverse_v2, array_ops.reverse]: for outer_size in (1, 2): for middle_size in list(range(50)) + [100000]: @@ -508,7 +508,7 @@ class ReverseV2Test(test_util.TensorFlowTestCase): @test_util.run_deprecated_v1 def testReverseColumnsOf3Channels(self): - with self.session(use_gpu=True): + with self.session(): for reverse_f in [array_ops.reverse_v2, array_ops.reverse]: for outer_size in list(range(50)) + [100000]: for middle_size in (1, 2): @@ -641,7 +641,7 @@ class StridedSliceTest(test_util.TensorFlowTestCase): def test_basic_slice(self): for tensor_type in STRIDED_SLICE_TYPES: with self.subTest(tensor_type=tensor_type): - with self.cached_session(use_gpu=True): + with self.cached_session(): checker = StridedSliceChecker( self, StridedSliceChecker.REF_TENSOR, tensor_type=tensor_type) _ = checker[:, :, :] @@ -696,7 +696,7 @@ class StridedSliceTest(test_util.TensorFlowTestCase): @test_util.run_deprecated_v1 def testDegenerateSlices(self): - with self.session(use_gpu=True): + with self.session(): checker = StridedSliceChecker(self, StridedSliceChecker.REF_TENSOR) # degenerate by offering a forward interval with a negative stride _ = checker[0:-1:-1, :, :] @@ -717,7 +717,7 @@ class StridedSliceTest(test_util.TensorFlowTestCase): @test_util.run_deprecated_v1 def testEllipsis(self): - with self.session(use_gpu=True): + with self.session(): raw = [[[[[1, 2], [3, 4], [5, 6]]], [[[7, 8], [9, 10], [11, 12]]]]] checker = StridedSliceChecker(self, raw) @@ -738,7 +738,7 @@ class StridedSliceTest(test_util.TensorFlowTestCase): @test_util.run_deprecated_v1 def testShrink(self): - with self.session(use_gpu=True): + with self.session(): raw = [[[[[1, 2, 4, 5], [5, 6, 7, 8], [9, 10, 11, 12]]], [[[13, 14, 15, 16], [17, 18, 19, 20], [21, 22, 23, 24]]]]] checker = StridedSliceChecker(self, raw) @@ -749,7 +749,7 @@ class StridedSliceTest(test_util.TensorFlowTestCase): @test_util.run_deprecated_v1 def testBothNewAxisAndShrink(self): - with self.session(use_gpu=True): + with self.session(): ones = array_ops.placeholder(shape=[2, 2], dtype=dtypes.int16) self.assertAllEqual( ones[array_ops.newaxis, :, @@ -757,7 +757,7 @@ class StridedSliceTest(test_util.TensorFlowTestCase): @test_util.run_deprecated_v1 def testTensorIndexing(self): - with self.session(use_gpu=True): + with self.session(): raw = [[[[[1, 2, 4, 5], [5, 6, 7, 8], [9, 10, 11, 12]]], [[[13, 14, 15, 16], [17, 18, 19, 20], [21, 22, 23, 24]]]]] checker = StridedSliceChecker(self, raw, check_type_infer=False) @@ -769,7 +769,7 @@ class StridedSliceTest(test_util.TensorFlowTestCase): _ = checker[..., 2**64 // 2**63] # Test longs in Python 2 def testTensorIndexingTypeError(self): - with self.session(use_gpu=True): + with self.session(): checker = StridedSliceChecker(self, StridedSliceChecker.REF_TENSOR) expected = re.escape(array_ops._SLICE_TYPE_ERROR) with self.assertRaisesRegex(TypeError, expected): @@ -787,7 +787,7 @@ class StridedSliceTest(test_util.TensorFlowTestCase): @test_util.run_deprecated_v1 def testExpand(self): - with self.session(use_gpu=True): + with self.session(): raw = [[[[[1, 2, 4, 5], [5, 6, 7, 8], [9, 10, 11, 12]]], [[[13, 14, 15, 16], [17, 18, 19, 20], [21, 22, 23, 24]]]]] checker = StridedSliceChecker(self, raw) @@ -805,7 +805,7 @@ class StridedSliceTest(test_util.TensorFlowTestCase): @test_util.run_deprecated_v1 def testExpandVariable(self): - with self.session(use_gpu=True): + with self.session(): x = variables.Variable(7, dtype=dtypes.int32) self.evaluate(x.initializer) y = x[None].eval() @@ -814,7 +814,7 @@ class StridedSliceTest(test_util.TensorFlowTestCase): @test_util.run_deprecated_v1 def testOptimizedCases(self): - with self.session(use_gpu=True): + with self.session(): checker = StridedSliceChecker(self, StridedSliceChecker.REF_TENSOR_ALIGNED) # Identity @@ -830,7 +830,7 @@ class StridedSliceTest(test_util.TensorFlowTestCase): @test_util.run_v1_only("currently failing on v2") def testMasks(self): - with self.session(use_gpu=True): + with self.session(): scalar = np.array(0) # Test tensor type mask checker = StridedSliceChecker(self, StridedSliceChecker.REF_TENSOR) @@ -870,7 +870,7 @@ class StridedSliceShapeTest(test_util.TensorFlowTestCase): @test_util.run_deprecated_v1 def testUnknown(self): - with self.session(use_gpu=True): + with self.session(): uncertain_tensor = array_ops.placeholder(dtypes.float32) a = StridedSliceShapeChecker(uncertain_tensor) a_slice_shape = a[...] @@ -882,7 +882,7 @@ class StridedSliceShapeTest(test_util.TensorFlowTestCase): @test_util.run_deprecated_v1 def testTensorShapeUncertain(self): - with self.session(use_gpu=True): + with self.session(): uncertain_tensor = array_ops.placeholder( dtypes.float32, shape=(5, None, 7)) a = StridedSliceShapeChecker(uncertain_tensor) @@ -906,7 +906,7 @@ class StridedSliceShapeTest(test_util.TensorFlowTestCase): @test_util.run_deprecated_v1 def testTensorValuedIndexShape(self): - with self.session(use_gpu=True): + with self.session(): defined_shape_tensor = array_ops.placeholder( dtypes.float32, shape=(5, 3, 7)) index_value = array_ops.placeholder(dtypes.int32, shape=()) @@ -965,7 +965,7 @@ class StridedSliceGradTest(test_util.TensorFlowTestCase): @test_util.run_v1_only("b/120545219") def testGradient(self): - with self.session(use_gpu=True) as sess: + with self.session() as sess: var = variables.Variable( array_ops.reshape( math_ops.range(1, 97, 1, dtype=dtypes.float32), shape=(6, 4, 4))) @@ -992,7 +992,7 @@ class StridedSliceGradTest(test_util.TensorFlowTestCase): @test_util.run_v1_only("b/120545219") def testGradientZero(self): - with self.session(use_gpu=True) as sess: + with self.session() as sess: var = variables.Variable(8.) init = variables.global_variables_initializer() sess.run(init) @@ -1001,7 +1001,7 @@ class StridedSliceGradTest(test_util.TensorFlowTestCase): @test_util.run_deprecated_v1 def testInt64Indices(self): - with self.session(use_gpu=True) as sess: + with self.session() as sess: a = math_ops.range(3, dtype=dtypes.float32) index = constant_op.constant(1, dtype=dtypes.int64) b = 2. * a[index] @@ -1014,7 +1014,7 @@ class StridedSliceGradTypeTest(test_util.TensorFlowTestCase): @test_util.run_deprecated_v1 def testHostVsDevice(self): - with self.session(use_gpu=True) as sess: + with self.session() as sess: var2 = variables.Variable( array_ops.reshape( math_ops.cast(math_ops.range(1, 5, 1), dtypes.float32), @@ -1029,7 +1029,7 @@ class StridedSliceGradTypeTest(test_util.TensorFlowTestCase): @test_util.run_deprecated_v1 def testInt64Shape(self): - with self.session(use_gpu=True) as sess: + with self.session() as sess: original_dy = array_ops.reshape( math_ops.cast(math_ops.range(1, 5, 1), dtypes.float32), shape=(4, 1, 1)) @@ -1044,7 +1044,7 @@ class StridedSliceGradTypeTest(test_util.TensorFlowTestCase): @test_util.run_deprecated_v1 def testMixedIndexTypes(self): - with self.session(use_gpu=True) as sess: + with self.session() as sess: original_dy = array_ops.reshape( math_ops.cast(math_ops.range(1, 5, 1), dtypes.float32), shape=(4, 1, 1)) @@ -1133,7 +1133,7 @@ class StridedSliceAssignChecker(object): if self.tensor_type.is_complex: value -= 1j * value - with self.test.test_session(use_gpu=True) as sess: + with self.test.test_session() as sess: if self._use_resource: var = resource_variable_ops.ResourceVariable(self.x) else: @@ -1514,7 +1514,7 @@ class InvertPermutationTest(test_util.TensorFlowTestCase): def testInvertPermutation(self): for dtype in [dtypes.int32, dtypes.int64]: with self.subTest(dtype=dtype): - with self.cached_session(use_gpu=True): + with self.cached_session(): x = constant_op.constant([3, 4, 0, 2, 1], dtype=dtype) y = array_ops.invert_permutation(x) self.assertAllEqual(y.get_shape(), [5]) @@ -1597,7 +1597,7 @@ class SnapshotOpTest(test_util.TensorFlowTestCase): def testInvertPermutation(self): for dtype in [dtypes.int32, dtypes.int64, dtypes.float32, dtypes.float64]: with self.subTest(dtype=dtype): - with self.cached_session(use_gpu=True): + with self.cached_session(): x = constant_op.constant([0, 1, 2, 3], dtype=dtype) y = gen_array_ops.snapshot(x) self.assertAllEqual(y, [0, 1, 2, 3]) diff --git a/tensorflow/python/kernel_tests/atrous_conv2d_test.py b/tensorflow/python/kernel_tests/atrous_conv2d_test.py index e0cf7c2cc50..1aa0b0315f8 100644 --- a/tensorflow/python/kernel_tests/atrous_conv2d_test.py +++ b/tensorflow/python/kernel_tests/atrous_conv2d_test.py @@ -61,7 +61,7 @@ class AtrousConv2DTest(test.TestCase): @test_util.run_deprecated_v1 def testAtrousConv2DForward(self): - with self.session(use_gpu=True): + with self.session(): # Input: [batch, height, width, input_depth] height = 9 for width in [9, 10]: # Test both odd and even width. @@ -108,7 +108,7 @@ class AtrousConv2DTest(test.TestCase): padding = "SAME" # The padding needs to be "SAME" np.random.seed(1) # Make it reproducible. - with self.session(use_gpu=True): + with self.session(): # Input: [batch, height, width, input_depth] for height in range(15, 17): for width in range(15, 17): @@ -138,7 +138,7 @@ class AtrousConv2DTest(test.TestCase): @test_util.run_deprecated_v1 def testGradient(self): - with self.session(use_gpu=True): + with self.session(): # Input: [batch, height, width, input_depth] x_shape = [2, 5, 6, 2] # Filter: [kernel_height, kernel_width, input_depth, output_depth] @@ -166,7 +166,7 @@ class AtrousConv2DTransposeTest(test.TestCase): @test_util.run_deprecated_v1 def testAtrousConv2DTransposeForward(self): - with self.session(use_gpu=True): + with self.session(): # Input: [batch, height, width, input_depth] height = 9 for width in [9, 10]: # Test both odd and even width. @@ -206,7 +206,7 @@ class AtrousDepthwiseConv2DTest(test.TestCase): @test_util.run_deprecated_v1 def testAtrousDepthwiseConv2DForward(self): strides = [1, 1, 1, 1] - with self.session(use_gpu=True): + with self.session(): # Input: [batch, height, width, input_depth] height = 9 for width in [9, 10]: # Test both odd and even width. diff --git a/tensorflow/python/kernel_tests/banded_triangular_solve_op_test.py b/tensorflow/python/kernel_tests/banded_triangular_solve_op_test.py index bd0fdae03c5..4545a2a442c 100644 --- a/tensorflow/python/kernel_tests/banded_triangular_solve_op_test.py +++ b/tensorflow/python/kernel_tests/banded_triangular_solve_op_test.py @@ -86,7 +86,7 @@ class BandedTriangularSolveOpTest(test.TestCase): a_np = np.tile(a_np, batch_dims + [1, 1]) b = np.tile(b, batch_dims + [1, 1]) - with self.cached_session(use_gpu=True): + with self.cached_session(): a_tf = a b_tf = b if use_placeholder: @@ -199,7 +199,7 @@ class BandedTriangularSolveOpTest(test.TestCase): # right-hand sides. matrix = np.array([[1., 1.], [1., 1.]]) rhs = np.array([[1., 0.]]) - with self.cached_session(use_gpu=True): + with self.cached_session(): with self.assertRaises(ValueError): self._verifySolve(matrix, rhs) with self.assertRaises(ValueError): @@ -208,7 +208,7 @@ class BandedTriangularSolveOpTest(test.TestCase): # Number of bands exceeds the dimension of the matrix. matrix = np.ones((6, 4)) rhs = np.ones((4, 2)) - with self.cached_session(use_gpu=True): + with self.cached_session(): with self.assertRaises(ValueError): self._verifySolve(matrix, rhs) with self.assertRaises(ValueError): diff --git a/tensorflow/python/kernel_tests/basic_gpu_test.py b/tensorflow/python/kernel_tests/basic_gpu_test.py index a64032ec216..4f4e9a9a7bb 100644 --- a/tensorflow/python/kernel_tests/basic_gpu_test.py +++ b/tensorflow/python/kernel_tests/basic_gpu_test.py @@ -40,7 +40,7 @@ from tensorflow.python.platform import test class GPUBinaryOpsTest(test.TestCase): def _compareGPU(self, x, y, np_func, tf_func): - with self.cached_session(use_gpu=True) as sess: + with self.cached_session() as sess: inx = ops.convert_to_tensor(x) iny = ops.convert_to_tensor(y) out = tf_func(inx, iny) @@ -143,7 +143,7 @@ class MathBuiltinUnaryTest(test.TestCase): np_out = np.floor_divide(x, y + 0.1) - with self.session(use_gpu=True) as sess: + with self.session() as sess: inx = ops.convert_to_tensor(x) iny = ops.convert_to_tensor(y + 0.1) ofunc = inx / iny @@ -167,7 +167,7 @@ class BroadcastSimpleTest(test.TestCase): def _compareGpu(self, x, y, np_func, tf_func): np_ans = np_func(x, y) - with self.cached_session(use_gpu=True): + with self.cached_session(): inx = ops.convert_to_tensor(x) iny = ops.convert_to_tensor(y) out = tf_func(inx, iny) diff --git a/tensorflow/python/kernel_tests/batch_matmul_op_test.py b/tensorflow/python/kernel_tests/batch_matmul_op_test.py index ac82a320bb6..331fdedbdd0 100644 --- a/tensorflow/python/kernel_tests/batch_matmul_op_test.py +++ b/tensorflow/python/kernel_tests/batch_matmul_op_test.py @@ -166,7 +166,7 @@ class BatchMatmulGradientTest(test.TestCase): def Loss(x, y): return math_ops.reduce_sum(math_ops.matmul(x, y, adjoint_a, adjoint_b)) - with self.cached_session(use_gpu=True): + with self.cached_session(): ((x_jacob_t, y_jacob_t), (x_jacob_n, y_jacob_n)) = gradient_checker_v2.compute_gradient( Loss, [x, y], delta=delta) diff --git a/tensorflow/python/kernel_tests/bincount_op_test.py b/tensorflow/python/kernel_tests/bincount_op_test.py index 133d33996f9..4ca81333ab3 100644 --- a/tensorflow/python/kernel_tests/bincount_op_test.py +++ b/tensorflow/python/kernel_tests/bincount_op_test.py @@ -36,7 +36,7 @@ from tensorflow.python.platform import googletest class BincountTest(test_util.TensorFlowTestCase): def test_empty(self): - with self.session(use_gpu=True): + with self.session(): self.assertAllEqual( self.evaluate(bincount_ops.bincount([], minlength=5)), [0, 0, 0, 0, 0]) @@ -54,7 +54,7 @@ class BincountTest(test_util.TensorFlowTestCase): np.float64) def test_values(self): - with self.session(use_gpu=True): + with self.session(): self.assertAllEqual( self.evaluate(bincount_ops.bincount([1, 1, 1, 2, 2, 3])), [0, 3, 2, 1]) @@ -74,7 +74,7 @@ class BincountTest(test_util.TensorFlowTestCase): np.ones(10000)) def test_maxlength(self): - with self.session(use_gpu=True): + with self.session(): self.assertAllEqual( self.evaluate(bincount_ops.bincount([5], maxlength=3)), [0, 0, 0]) self.assertAllEqual( @@ -84,7 +84,7 @@ class BincountTest(test_util.TensorFlowTestCase): def test_random_with_weights(self): num_samples = 10000 - with self.session(use_gpu=True): + with self.session(): np.random.seed(42) for dtype in [dtypes.int32, dtypes.int64, dtypes.float32, dtypes.float64]: arr = np.random.randint(0, 1000, num_samples) @@ -98,7 +98,7 @@ class BincountTest(test_util.TensorFlowTestCase): def test_random_without_weights(self): num_samples = 10000 - with self.session(use_gpu=True): + with self.session(): np.random.seed(42) for dtype in [np.int32, np.float32]: arr = np.random.randint(0, 1000, num_samples) @@ -108,7 +108,7 @@ class BincountTest(test_util.TensorFlowTestCase): np.bincount(arr, weights)) def test_zero_weights(self): - with self.session(use_gpu=True): + with self.session(): self.assertAllEqual( self.evaluate(bincount_ops.bincount(np.arange(1000), np.zeros(1000))), np.zeros(1000)) diff --git a/tensorflow/python/kernel_tests/broadcast_to_ops_test.py b/tensorflow/python/kernel_tests/broadcast_to_ops_test.py index f1e1ff1d86b..fd177c60762 100644 --- a/tensorflow/python/kernel_tests/broadcast_to_ops_test.py +++ b/tensorflow/python/kernel_tests/broadcast_to_ops_test.py @@ -33,21 +33,21 @@ class BroadcastToTest(test_util.TensorFlowTestCase): def testBroadcastToBasic(self): for dtype in [np.uint8, np.uint16, np.int8, np.int16, np.int32, np.int64]: - with self.session(use_gpu=True): + with self.session(): x = np.array([1, 2, 3], dtype=dtype) v_tf = array_ops.broadcast_to(constant_op.constant(x), [3, 3]) v_np = np.broadcast_to(x, [3, 3]) self.assertAllEqual(v_tf, v_np) def testBroadcastToString(self): - with self.session(use_gpu=True): + with self.session(): x = np.array([b"1", b"2", b"3"]) v_tf = array_ops.broadcast_to(constant_op.constant(x), [3, 3]) v_np = np.broadcast_to(x, [3, 3]) self.assertAllEqual(v_tf, v_np) def testBroadcastToBool(self): - with self.session(use_gpu=True): + with self.session(): x = np.array([True, False, True], dtype=np.bool) v_tf = array_ops.broadcast_to(constant_op.constant(x), [3, 3]) v_np = np.broadcast_to(x, [3, 3]) @@ -56,7 +56,7 @@ class BroadcastToTest(test_util.TensorFlowTestCase): def testBroadcastToShape(self): for input_dim in range(1, 6): for output_dim in range(input_dim, 6): - with self.cached_session(use_gpu=True): + with self.cached_session(): input_shape = [2] * input_dim output_shape = [2] * output_dim x = np.array(np.random.randint(5, size=input_shape), dtype=np.int32) @@ -67,7 +67,7 @@ class BroadcastToTest(test_util.TensorFlowTestCase): def testBroadcastToShapeInnerDim(self): input_shape = [2, 1, 3] output_shape = [2, 5, 3] - with self.cached_session(use_gpu=True): + with self.cached_session(): x = np.array(np.random.randint(5, size=input_shape), dtype=np.int32) v_tf = array_ops.broadcast_to(constant_op.constant(x), output_shape) v_np = np.broadcast_to(x, output_shape) @@ -76,7 +76,7 @@ class BroadcastToTest(test_util.TensorFlowTestCase): def testBroadcastToShapeLargerDim(self): input_shape = [2, 1, 3, 2, 2, 2] output_shape = [1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 15, 3, 2, 2, 2] - with self.cached_session(use_gpu=True): + with self.cached_session(): x = np.array(np.random.randint(5, size=input_shape), dtype=np.int32) v_tf = array_ops.broadcast_to(constant_op.constant(x), output_shape) v_np = np.broadcast_to(x, output_shape) @@ -85,21 +85,21 @@ class BroadcastToTest(test_util.TensorFlowTestCase): def testBroadcastToShapeLargerDim2(self): input_shape = [2, 1, 3, 2, 2, 2, 1, 1, 1] output_shape = [1, 1, 1, 2, 5, 3, 2, 2, 2, 3, 3, 3] - with self.cached_session(use_gpu=True): + with self.cached_session(): x = np.array(np.random.randint(5, size=input_shape), dtype=np.int32) v_tf = array_ops.broadcast_to(constant_op.constant(x), output_shape) v_np = np.broadcast_to(x, output_shape) self.assertAllEqual(v_tf, v_np) def testBroadcastToScalar(self): - with self.session(use_gpu=True): + with self.session(): x = np.array(1, dtype=np.int32) v_tf = array_ops.broadcast_to(constant_op.constant(x), [3, 3]) v_np = np.broadcast_to(x, [3, 3]) self.assertAllEqual(v_tf, v_np) def testBroadcastScalarToNonScalar(self): - with self.session(use_gpu=True): + with self.session(): x = np.array(1.0, dtype=np.float) v_tf = array_ops.broadcast_to(constant_op.constant(1.0), [2, 3, 4, 1, 1, 1]) @@ -108,7 +108,7 @@ class BroadcastToTest(test_util.TensorFlowTestCase): def testBroadcastToShapeTypeAndInference(self): for dtype in [dtypes.int32, dtypes.int64]: - with self.cached_session(use_gpu=True): + with self.cached_session(): x = np.array([1, 2, 3]) v_tf = array_ops.broadcast_to( constant_op.constant(x), diff --git a/tensorflow/python/kernel_tests/bucketize_op_test.py b/tensorflow/python/kernel_tests/bucketize_op_test.py index 59c30d8f2df..5f1eb453fac 100644 --- a/tensorflow/python/kernel_tests/bucketize_op_test.py +++ b/tensorflow/python/kernel_tests/bucketize_op_test.py @@ -36,14 +36,14 @@ class BucketizationOpTest(test.TestCase): constant_op.constant([-5, 0, 2, 3, 5, 8, 10, 11, 12]), boundaries=[0, 3, 8, 11]) expected_out = [0, 1, 1, 2, 2, 3, 3, 4, 4] - with self.session(use_gpu=True) as sess: + with self.session() as sess: self.assertAllEqual(expected_out, self.evaluate(op)) def testEmptyFloat(self): op = math_ops._bucketize( array_ops.zeros([0, 3], dtype=dtypes.float32), boundaries=[]) expected_out = np.zeros([0, 3], dtype=np.float32) - with self.session(use_gpu=True): + with self.session(): self.assertAllEqual(expected_out, self.evaluate(op)) def testFloat(self): @@ -51,7 +51,7 @@ class BucketizationOpTest(test.TestCase): constant_op.constant([-5., 0., 2., 3., 5., 8., 10., 11., 12.]), boundaries=[0., 3., 8., 11.]) expected_out = [0, 1, 1, 2, 2, 3, 3, 4, 4] - with self.session(use_gpu=True) as sess: + with self.session() as sess: self.assertAllEqual(expected_out, self.evaluate(op)) def test2DInput(self): @@ -59,14 +59,14 @@ class BucketizationOpTest(test.TestCase): constant_op.constant([[-5, 0, 2, 3, 5], [8, 10, 11, 12, 0]]), boundaries=[0, 3, 8, 11]) expected_out = [[0, 1, 1, 2, 2], [3, 3, 4, 4, 1]] - with self.session(use_gpu=True) as sess: + with self.session() as sess: self.assertAllEqual(expected_out, self.evaluate(op)) @test_util.run_deprecated_v1 def testInvalidBoundariesOrder(self): op = math_ops._bucketize( constant_op.constant([-5, 0]), boundaries=[0, 8, 3, 11]) - with self.session(use_gpu=True) as sess: + with self.session() as sess: with self.assertRaisesRegex(errors_impl.InvalidArgumentError, "Expected sorted boundaries"): self.evaluate(op) diff --git a/tensorflow/python/kernel_tests/cast_op_test.py b/tensorflow/python/kernel_tests/cast_op_test.py index 7b794153ce6..c1f8cc371c4 100644 --- a/tensorflow/python/kernel_tests/cast_op_test.py +++ b/tensorflow/python/kernel_tests/cast_op_test.py @@ -108,7 +108,7 @@ class CastOpTest(test.TestCase): with self.cached_session(use_gpu=False): b = math_ops.cast(math_ops.cast(a, dtypes.bfloat16), dtypes.float32) self.assertAllClose(a, self.evaluate(b), rtol=1 / 128.) - with self.cached_session(use_gpu=True): + with self.cached_session(): b = math_ops.cast(math_ops.cast(a, dtypes.bfloat16), dtypes.float32) self.assertAllClose(a, self.evaluate(b), rtol=1 / 128.) diff --git a/tensorflow/python/kernel_tests/cholesky_op_test.py b/tensorflow/python/kernel_tests/cholesky_op_test.py index 0697f7def1b..cc03e60a294 100644 --- a/tensorflow/python/kernel_tests/cholesky_op_test.py +++ b/tensorflow/python/kernel_tests/cholesky_op_test.py @@ -166,7 +166,7 @@ class CholeskyOpTest(test.TestCase): @test_util.disable_xla("b/123337890") def testNotInvertibleCPU(self): # The input should be invertible. - with self.session(use_gpu=True): + with self.session(): with self.assertRaisesRegex( errors_impl.InvalidArgumentError, "Cholesky decomposition was not successful. The" diff --git a/tensorflow/python/kernel_tests/clip_ops_test.py b/tensorflow/python/kernel_tests/clip_ops_test.py index d0c805f96e3..73e3b8c48cb 100644 --- a/tensorflow/python/kernel_tests/clip_ops_test.py +++ b/tensorflow/python/kernel_tests/clip_ops_test.py @@ -52,7 +52,7 @@ class ClipTest(test.TestCase): # ClipByValue test def testClipByValue(self): - with self.session(use_gpu=True): + with self.session(): x = constant_op.constant([-5.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=[2, 3]) np_ans = [[-4.4, 2.0, 3.0], [4.0, 4.4, 4.4]] clip_value = 4.4 @@ -73,7 +73,7 @@ class ClipTest(test.TestCase): dtypes.int64, dtypes.uint8, ]: - with self.cached_session(use_gpu=True): + with self.cached_session(): x = constant_op.constant([1, 2, 3, 4, 5, 6], shape=[2, 3], dtype=dtype) np_ans = [[2, 2, 3], [4, 4, 4]] clip_value_min = 2 @@ -95,7 +95,7 @@ class ClipTest(test.TestCase): dtypes.int64, dtypes.uint8, ]: - with self.cached_session(use_gpu=True): + with self.cached_session(): x = constant_op.constant([1, 2, 3, 4, 5, 6], shape=[2, 3], dtype=dtype) np_ans = [[2, 2, 3], [4, 4, 4]] clip_value_min = constant_op.constant( @@ -118,7 +118,7 @@ class ClipTest(test.TestCase): dtypes.int64, dtypes.uint8, ]: - with self.cached_session(use_gpu=True): + with self.cached_session(): x = constant_op.constant([1, 2, 3, 4, 5, 6], shape=[2, 3], dtype=dtype) np_ans = [[4, 4, 4], [4, 5, 6]] clip_value_min = 4 @@ -141,7 +141,7 @@ class ClipTest(test.TestCase): dtypes.int64, dtypes.uint8, ]: - with self.cached_session(use_gpu=True): + with self.cached_session(): x = constant_op.constant([1, 2, 3, 4, 5, 6], shape=[2, 3], dtype=dtype) np_ans = [[2, 2, 3], [5, 5, 6]] clip_value_min = constant_op.constant( @@ -154,7 +154,7 @@ class ClipTest(test.TestCase): self.assertAllClose(np_ans, tf_ans) def testClipByValueBadShape(self): - with self.session(use_gpu=True): + with self.session(): x = constant_op.constant([-5.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=[2, 3, 1]) # Use a nonsensical shape. clip = constant_op.constant([1.0, 2.0]) @@ -176,7 +176,7 @@ class ClipTest(test.TestCase): def _testClipIndexedSlicesByValue(self, values, indices, shape, clip_value_min, clip_value_max, expected): - with self.session(use_gpu=True) as sess: + with self.session() as sess: values = constant_op.constant(values) indices = constant_op.constant(indices) shape = constant_op.constant(shape) @@ -211,7 +211,7 @@ class ClipTest(test.TestCase): # ClipByNorm tests def testClipByNormClipped(self): # Norm clipping when clip_norm < 5 - with self.session(use_gpu=True): + with self.session(): x = constant_op.constant([-3.0, 0.0, 0.0, 4.0, 0.0, 0.0], shape=[2, 3]) # Norm of x = sqrt(3^2 + 4^2) = 5 np_ans = [[-2.4, 0.0, 0.0], [3.2, 0.0, 0.0]] @@ -227,14 +227,14 @@ class ClipTest(test.TestCase): @test_util.run_deprecated_v1 def testClipByNormGradientZeros(self): - with self.session(use_gpu=True): + with self.session(): x = array_ops.zeros([3]) b = clip_ops.clip_by_norm(x, 1.) grad, = gradients_impl.gradients(b, x) self.assertAllEqual(grad, [1., 1., 1.]) def testClipByNormBadShape(self): - with self.session(use_gpu=True): + with self.session(): x = constant_op.constant([-3.0, 0.0, 0.0, 4.0, 0.0, 0.0], shape=[2, 3, 1]) # Use a nonsensical shape. clip = constant_op.constant([1.0, 2.0]) @@ -243,7 +243,7 @@ class ClipTest(test.TestCase): def testClipByNormNotClipped(self): # No norm clipping when clip_norm >= 5 - with self.session(use_gpu=True): + with self.session(): x = constant_op.constant([-3.0, 0.0, 0.0, 4.0, 0.0, 0.0], shape=[2, 3]) # Norm of x = sqrt(3^2 + 4^2) = 5 np_ans = [[-3.0, 0.0, 0.0], [4.0, 0.0, 0.0]] @@ -255,7 +255,7 @@ class ClipTest(test.TestCase): def testClipByNormZero(self): # No norm clipping when norm = 0 - with self.session(use_gpu=True): + with self.session(): x = constant_op.constant([0.0, 0.0, 0.0, 0.0, 0.0, 0.0], shape=[2, 3]) # Norm = 0, no changes np_ans = [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]] @@ -267,7 +267,7 @@ class ClipTest(test.TestCase): def testClipByNormClippedWithDim0(self): # Norm clipping when clip_norm < 5 - with self.session(use_gpu=True): + with self.session(): x = constant_op.constant([-3.0, 0.0, 0.0, 4.0, 0.0, 3.0], shape=[2, 3]) # Norm of x[:, 0] = sqrt(3^2 + 4^2) = 5, x[:, 2] = 3 np_ans = [[-2.4, 0.0, 0.0], [3.2, 0.0, 3.0]] @@ -279,7 +279,7 @@ class ClipTest(test.TestCase): def testClipByNormClippedWithDim1(self): # Norm clipping when clip_norm < 5 - with self.session(use_gpu=True): + with self.session(): x = constant_op.constant([-3.0, 0.0, 0.0, 4.0, 0.0, 3.0], shape=[2, 3]) # Norm of x[0, :] = 3, x[1, :] = sqrt(3^2 + 4^2) = 5 np_ans = [[-3.0, 0.0, 0.0], [3.2, 0.0, 2.4]] @@ -291,7 +291,7 @@ class ClipTest(test.TestCase): def testClipByNormNotClippedWithAxes(self): # No norm clipping when clip_norm >= 5 - with self.session(use_gpu=True): + with self.session(): x = constant_op.constant([-3.0, 0.0, 0.0, 4.0, 0.0, 3.0], shape=[2, 3]) # Norm of x[0, :] = 3, x[1, :] = sqrt(3^2 + 4^2) = 5 np_ans = [[-3.0, 0.0, 0.0], [4.0, 0.0, 3.0]] @@ -305,7 +305,7 @@ class ClipTest(test.TestCase): @test_util.run_deprecated_v1 def testClipByGlobalNormClipped(self): # Norm clipping when clip_norm < 5 - with self.session(use_gpu=True): + with self.session(): x0 = constant_op.constant([-2.0, 0.0, 0.0, 4.0, 0.0, 0.0], shape=[2, 3]) x1 = constant_op.constant([1.0, -2.0]) # Global norm of x0 and x1 = sqrt(1 + 4^2 + 2^2 + 2^2) = 5 @@ -327,7 +327,7 @@ class ClipTest(test.TestCase): @test_util.run_deprecated_v1 def testClipByGlobalNormClippedTensor(self): # Norm clipping when clip_norm < 5 - with self.session(use_gpu=True): + with self.session(): x0 = constant_op.constant([-2.0, 0.0, 0.0, 4.0, 0.0, 0.0], shape=[2, 3]) x1 = constant_op.constant([1.0, -2.0]) # Global norm of x0 and x1 = sqrt(1 + 4^2 + 2^2 + 2^2) = 5 @@ -349,7 +349,7 @@ class ClipTest(test.TestCase): @test_util.run_deprecated_v1 def testClipByGlobalNormSupportsNone(self): # Norm clipping when clip_norm < 5 - with self.session(use_gpu=True): + with self.session(): x0 = constant_op.constant([-2.0, 0.0, 0.0, 4.0, 0.0, 0.0], shape=[2, 3]) x1 = constant_op.constant([1.0, -2.0]) # Global norm of x0 and x1 = sqrt(1 + 4^2 + 2^2 + 2^2) = 5 @@ -373,7 +373,7 @@ class ClipTest(test.TestCase): @test_util.run_deprecated_v1 def testClipByGlobalNormWithIndexedSlicesClipped(self): # Norm clipping when clip_norm < 5 - with self.session(use_gpu=True): + with self.session(): x0 = constant_op.constant([-2.0, 0.0, 0.0, 4.0, 0.0, 0.0], shape=[2, 3]) x1 = ops.IndexedSlices( constant_op.constant([1.0, -2.0]), constant_op.constant([3, 4])) @@ -407,7 +407,7 @@ class ClipTest(test.TestCase): @test_util.run_deprecated_v1 def testClipByGlobalNormNotClipped(self): # No norm clipping when clip_norm >= 5 - with self.session(use_gpu=True): + with self.session(): x0 = constant_op.constant([-2.0, 0.0, 0.0, 4.0, 0.0, 0.0], shape=[2, 3]) x1 = constant_op.constant([1.0, -2.0]) # Global norm of x0 and x1 = sqrt(1 + 4^2 + 2^2 + 2^2) = 5 @@ -427,7 +427,7 @@ class ClipTest(test.TestCase): @test_util.run_deprecated_v1 def testClipByGlobalNormZero(self): # No norm clipping when norm = 0 - with self.session(use_gpu=True): + with self.session(): x0 = constant_op.constant([0.0, 0.0, 0.0, 0.0, 0.0, 0.0], shape=[2, 3]) x1 = constant_op.constant([0.0, 0.0]) # Norm = 0, no changes @@ -447,7 +447,7 @@ class ClipTest(test.TestCase): @test_util.run_deprecated_v1 def testClipByGlobalNormInf(self): # Expect all NaNs when global norm is inf. - with self.session(use_gpu=True): + with self.session(): x0 = constant_op.constant([-2.0, 0.0, np.inf, 4.0, 0.0, 0.0], shape=[2, 3]) x1 = constant_op.constant([1.0, -2.0]) @@ -463,7 +463,7 @@ class ClipTest(test.TestCase): def testClipByAverageNormClipped(self): # Norm clipping when average clip_norm < 0.83333333 - with self.session(use_gpu=True): + with self.session(): x = constant_op.constant([-3.0, 0.0, 0.0, 4.0, 0.0, 0.0], shape=[2, 3]) # Average norm of x = sqrt(3^2 + 4^2) / 6 = 0.83333333 np_ans = [[-2.88, 0.0, 0.0], [3.84, 0.0, 0.0]] @@ -475,7 +475,7 @@ class ClipTest(test.TestCase): def testClipByAverageNormClippedTensor(self): # Norm clipping when average clip_norm < 0.83333333 - with self.session(use_gpu=True): + with self.session(): x = constant_op.constant([-3.0, 0.0, 0.0, 4.0, 0.0, 0.0], shape=[2, 3]) # Average norm of x = sqrt(3^2 + 4^2) / 6 = 0.83333333 np_ans = [[-2.88, 0.0, 0.0], [3.84, 0.0, 0.0]] @@ -487,7 +487,7 @@ class ClipTest(test.TestCase): def testClipByAverageNormNotClipped(self): # No norm clipping when average clip_norm >= 0.83333333 - with self.session(use_gpu=True): + with self.session(): x = constant_op.constant([-3.0, 0.0, 0.0, 4.0, 0.0, 0.0], shape=[2, 3]) # Average norm of x = sqrt(3^2 + 4^2) / 6 = 0.83333333 np_ans = [[-3.0, 0.0, 0.0], [4.0, 0.0, 0.0]] @@ -499,7 +499,7 @@ class ClipTest(test.TestCase): def testClipByAverageNormZero(self): # No norm clipping when average clip_norm = 0 - with self.session(use_gpu=True): + with self.session(): x = constant_op.constant([0.0, 0.0, 0.0, 0.0, 0.0, 0.0], shape=[2, 3]) # Average norm = 0, no changes np_ans = [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]] @@ -512,7 +512,7 @@ class ClipTest(test.TestCase): def testClipByAverageNormReplacedWithClipByNorm(self): # Check clip_by_average_norm(t) is the same as # clip_by_norm(t, clip_norm * tf.compat.v1.to_float(tf.size(t))) - with self.session(use_gpu=True): + with self.session(): x = constant_op.constant([-3.0, 0.0, 0.0, 4.0, 0.0, 0.0], shape=[2, 3]) # Average norm of x = sqrt(3^2 + 4^2) / 6 = 0.83333333 # expected answer [[-2.88, 0.0, 0.0], [3.84, 0.0, 0.0]] @@ -532,7 +532,7 @@ class ClipTest(test.TestCase): y = clip_ops.clip_by_value(zero, 1.0, 1.0) z = clip_ops.clip_by_value(zero, zero, 1.0) w = clip_ops.clip_by_value(zero, 1.0, zero) - with self.session(use_gpu=True) as sess: + with self.session() as sess: sess.run([x, y, z, w], feed_dict={zero: np.zeros((7, 0))}) diff --git a/tensorflow/python/kernel_tests/concat_op_test.py b/tensorflow/python/kernel_tests/concat_op_test.py index bcc31872027..da4f4f86b02 100644 --- a/tensorflow/python/kernel_tests/concat_op_test.py +++ b/tensorflow/python/kernel_tests/concat_op_test.py @@ -38,7 +38,7 @@ class ConcatOpTest(test.TestCase): @test_util.run_deprecated_v1 def testHStack(self): - with self.session(use_gpu=True): + with self.session(): p1 = array_ops.placeholder(dtypes.float32, shape=[4, 4]) p2 = array_ops.placeholder(dtypes.float32, shape=[4, 4]) c = array_ops.concat([p1, p2], 0) @@ -54,7 +54,7 @@ class ConcatOpTest(test.TestCase): @test_util.run_deprecated_v1 def testVStack(self): - with self.session(use_gpu=True): + with self.session(): p1 = array_ops.placeholder(dtypes.float32, shape=[4, 4]) p2 = array_ops.placeholder(dtypes.float32, shape=[4, 4]) c = array_ops.concat([p1, p2], 1) @@ -70,7 +70,7 @@ class ConcatOpTest(test.TestCase): @test_util.run_deprecated_v1 def test4DStack(self): - with self.session(use_gpu=True): + with self.session(): p1 = array_ops.placeholder(dtypes.float32, shape=[2, 3, 1, 1]) p2 = array_ops.placeholder(dtypes.float32, shape=[2, 3, 4, 1]) c = array_ops.concat([p1, p2], 2) @@ -121,7 +121,7 @@ class ConcatOpTest(test.TestCase): dtype_feed = dtypes.float32 else: dtype_feed = dtype - with self.session(use_gpu=True): + with self.session(): p = [] for i in np.arange(num_tensors): input_shape = shape @@ -315,7 +315,7 @@ class ConcatOpTest(test.TestCase): @test_util.run_deprecated_v1 def testGradientWithUnknownInputDim(self): - with self.session(use_gpu=True): + with self.session(): x = array_ops.placeholder(dtypes.float32) y = array_ops.placeholder(dtypes.float32) c = array_ops.concat([x, y], 2) @@ -526,7 +526,7 @@ class ConcatOpTest(test.TestCase): # shared memory is not large for all the inputs @test_util.run_deprecated_v1 def testConcatLargeNumberOfTensors(self): - with self.session(use_gpu=True): + with self.session(): for concat_dim in range(2): params = {} p = [] diff --git a/tensorflow/python/kernel_tests/constant_op_test.py b/tensorflow/python/kernel_tests/constant_op_test.py index cb014fcedd4..68d6cadc4aa 100644 --- a/tensorflow/python/kernel_tests/constant_op_test.py +++ b/tensorflow/python/kernel_tests/constant_op_test.py @@ -54,7 +54,7 @@ class ConstantTest(test.TestCase): def _testGpu(self, x): np_ans = np.array(x) - with self.cached_session(use_gpu=True): + with self.cached_session(): tf_ans = ops.convert_to_tensor(x).eval() dtype = dtypes_lib.as_dtype(np_ans.dtype) if dtype.is_floating or dtype.is_complex: diff --git a/tensorflow/python/kernel_tests/control_flow_ops_py_test.py b/tensorflow/python/kernel_tests/control_flow_ops_py_test.py index 9026100077d..8d70dc977df 100644 --- a/tensorflow/python/kernel_tests/control_flow_ops_py_test.py +++ b/tensorflow/python/kernel_tests/control_flow_ops_py_test.py @@ -557,7 +557,7 @@ class ControlFlowTest(test.TestCase, parameterized.TestCase): @test_util.run_v1_only("b/120545219") def testCondColocation(self): - with self.session(use_gpu=True): + with self.session(): with ops.device("/cpu:0"): v = variables.Variable(7.0) @@ -1224,7 +1224,7 @@ class ControlFlowTest(test.TestCase, parameterized.TestCase): def testCondGradMultiDevice(self): config = config_pb2.ConfigProto(device_count={"CPU": 2}, allow_soft_placement=True) - with self.cached_session(use_gpu=True, config=config) as sess: + with self.cached_session(config=config) as sess: pred = array_ops.placeholder(dtypes.bool, []) x = array_ops.placeholder(dtypes.float32) y = array_ops.placeholder(dtypes.float32) @@ -2621,7 +2621,7 @@ class ControlFlowTest(test.TestCase, parameterized.TestCase): def testWhileCondGradMultiDevice(self): config = config_pb2.ConfigProto(device_count={"CPU": 2}, allow_soft_placement=True) - with self.cached_session(use_gpu=True, config=config) as sess: + with self.cached_session(config=config) as sess: pred = array_ops.placeholder(dtypes.bool, []) x_init = constant_op.constant(1.0) @@ -4911,7 +4911,7 @@ class AssertTest(test.TestCase): if test_util.is_gpu_available(): self.skipTest("b/128646478 fails in opensource") - with self.session(use_gpu=True) as sess: + with self.session() as sess: with ops.device(test.gpu_device_name()): value = constant_op.constant(1.0) with ops.device("/cpu:0"): diff --git a/tensorflow/python/kernel_tests/conv1d_transpose_test.py b/tensorflow/python/kernel_tests/conv1d_transpose_test.py index 02ac5af7aae..f06823944fd 100644 --- a/tensorflow/python/kernel_tests/conv1d_transpose_test.py +++ b/tensorflow/python/kernel_tests/conv1d_transpose_test.py @@ -153,7 +153,7 @@ class Conv1DTransposeTest(test.TestCase): def testConv1DTransposeSingleStrideNCW(self): # `NCW` data format is only supported for CUDA device. if test.is_gpu_available(cuda_only=True): - with self.session(use_gpu=True): + with self.session(): strides = [1, 1, 1] # Input, output: [batch, depth, width] @@ -184,7 +184,7 @@ class Conv1DTransposeTest(test.TestCase): def testConv1DTransposeSameNCW(self): # `NCW` data format is only supported for CUDA device. if test.is_gpu_available(cuda_only=True): - with self.session(use_gpu=True): + with self.session(): strides = [1, 1, 2] # Input, output: [batch, depth, width] @@ -216,7 +216,7 @@ class Conv1DTransposeTest(test.TestCase): def testConv1DTransposeValidNCW(self): # `NCW` data format is only supported for CUDA device. if test.is_gpu_available(cuda_only=True): - with self.session(use_gpu=True): + with self.session(): strides = [1, 1, 2] # Input, output: [batch, depth, width] diff --git a/tensorflow/python/kernel_tests/conv2d_backprop_filter_grad_test.py b/tensorflow/python/kernel_tests/conv2d_backprop_filter_grad_test.py index e14a7191903..2a57d681d1a 100644 --- a/tensorflow/python/kernel_tests/conv2d_backprop_filter_grad_test.py +++ b/tensorflow/python/kernel_tests/conv2d_backprop_filter_grad_test.py @@ -77,7 +77,7 @@ class Conv2DBackpropFilterGradTest(test.TestCase): @test_util.run_deprecated_v1 def testGradientDilatedConv(self): if test.is_gpu_available(cuda_only=True): - with self.session(use_gpu=True): + with self.session(): for padding in [ "SAME", "VALID", diff --git a/tensorflow/python/kernel_tests/conv2d_transpose_test.py b/tensorflow/python/kernel_tests/conv2d_transpose_test.py index 96f1c059fa8..60f1650a2e4 100644 --- a/tensorflow/python/kernel_tests/conv2d_transpose_test.py +++ b/tensorflow/python/kernel_tests/conv2d_transpose_test.py @@ -186,7 +186,7 @@ class Conv2DTransposeTest(test.TestCase): def testConv2DTransposeSingleStrideNCHW(self): # `NCHW` data format is only supported for CUDA device. if test.is_gpu_available(cuda_only=True): - with self.session(use_gpu=True): + with self.session(): strides = [1, 1, 1, 1] # Input, output: [batch, depth, height, width, depth] @@ -221,7 +221,7 @@ class Conv2DTransposeTest(test.TestCase): def testConv2DTransposeSameNCHW(self): # `NCHW` data format is only supported for CUDA device. if test.is_gpu_available(cuda_only=True): - with self.session(use_gpu=True): + with self.session(): strides = [1, 1, 2, 2] # Input, output: [batch, depth, height, width] @@ -257,7 +257,7 @@ class Conv2DTransposeTest(test.TestCase): def testConv2DTransposeValidNCHW(self): # `NCHW` data format is only supported for CUDA device. if test.is_gpu_available(cuda_only=True): - with self.session(use_gpu=True): + with self.session(): strides = [1, 1, 2, 2] # Input, output: [batch, depth, height, width] diff --git a/tensorflow/python/kernel_tests/conv_ops_test.py b/tensorflow/python/kernel_tests/conv_ops_test.py index dd033121329..5cfb4408f5e 100644 --- a/tensorflow/python/kernel_tests/conv_ops_test.py +++ b/tensorflow/python/kernel_tests/conv_ops_test.py @@ -2787,7 +2787,7 @@ class SeparableConv2DTest(test.TestCase): expected: An array containing the expected operation outputs. data_format: string data format for input tensor. """ - with self.cached_session(use_gpu=True) as sess: + with self.cached_session() as sess: t1 = self._InitValues(tensor_in_sizes) f1 = self._InitValues(depthwise_filter_in_sizes) f1.set_shape(depthwise_filter_in_sizes) @@ -2899,7 +2899,7 @@ class SeparableConv2DTest(test.TestCase): depthwise_filter_in_sizes = [2, 2, 2, 3] pointwise_filter_in_sizes = [1, 1, 6, 7] padding = [[0, 0], [1, 2], [3, 4], [0, 0]] - with self.cached_session(use_gpu=True): + with self.cached_session(): # Compute the 'expected' values by manually padding before calling # separable_conv2d t1 = self._InitValues(tensor_in_sizes) diff --git a/tensorflow/python/kernel_tests/decode_image_op_test.py b/tensorflow/python/kernel_tests/decode_image_op_test.py index a2c0c7f63a8..59781516652 100644 --- a/tensorflow/python/kernel_tests/decode_image_op_test.py +++ b/tensorflow/python/kernel_tests/decode_image_op_test.py @@ -37,7 +37,7 @@ class DecodeImageOpTest(test.TestCase): def testBmp(self): # Read a real bmp and verify shape path = os.path.join(prefix_path, "bmp", "testdata", "lena.bmp") - with self.session(use_gpu=True) as sess: + with self.session() as sess: bmp0 = io_ops.read_file(path) image0 = image_ops.decode_image(bmp0) image1 = image_ops.decode_bmp(bmp0) @@ -53,7 +53,7 @@ class DecodeImageOpTest(test.TestCase): stride = 5 shape = (12, height, width, 3) - with self.session(use_gpu=True) as sess: + with self.session() as sess: gif0 = io_ops.read_file(path) image0 = image_ops.decode_image(gif0) image1 = image_ops.decode_gif(gif0) @@ -82,7 +82,7 @@ class DecodeImageOpTest(test.TestCase): def testJpeg(self): # Read a real jpeg and verify shape path = os.path.join(prefix_path, "jpeg", "testdata", "jpeg_merge_test1.jpg") - with self.session(use_gpu=True) as sess: + with self.session() as sess: jpeg0 = io_ops.read_file(path) image0 = image_ops.decode_image(jpeg0) image1 = image_ops.decode_jpeg(jpeg0) @@ -100,7 +100,7 @@ class DecodeImageOpTest(test.TestCase): inputs = [(1, "lena_gray.png")] for channels_in, filename in inputs: for channels in 0, 1, 3, 4: - with self.cached_session(use_gpu=True) as sess: + with self.cached_session() as sess: path = os.path.join(prefix_path, "png", "testdata", filename) png0 = io_ops.read_file(path) image0 = image_ops.decode_image(png0, channels=channels) diff --git a/tensorflow/python/kernel_tests/depthtospace_op_test.py b/tensorflow/python/kernel_tests/depthtospace_op_test.py index 27461ac4a9d..04564d8d6e3 100644 --- a/tensorflow/python/kernel_tests/depthtospace_op_test.py +++ b/tensorflow/python/kernel_tests/depthtospace_op_test.py @@ -56,7 +56,7 @@ class DepthToSpaceTest(test.TestCase): self.evaluate(output_nhwc) if test.is_gpu_available(): - with self.cached_session(use_gpu=True): + with self.cached_session(): # test NHWC (default) on GPU x_tf = array_ops.depth_to_space(input_nhwc, block_size) self.assertAllEqual(x_tf, outputs) @@ -126,7 +126,7 @@ class DepthToSpaceTest(test.TestCase): self.assertAllEqual(x_tf.shape, x_out.shape) self.evaluate(x_tf) if test.is_gpu_available(): - with self.cached_session(use_gpu=True): + with self.cached_session(): # test NHWC (default) on GPU x_tf = array_ops.depth_to_space(input_nhwc, block_size) self.assertAllEqual(x_tf.shape, x_out.shape) @@ -343,7 +343,7 @@ class DepthToSpaceGradientTest(test.TestCase): return assert 4 == x.ndim - with self.cached_session(use_gpu=True): + with self.cached_session(): tf_x = ops.convert_to_tensor(x) tf_y = array_ops.depth_to_space(tf_x, block_size, data_format=data_format) diff --git a/tensorflow/python/kernel_tests/depthwise_conv_op_test.py b/tensorflow/python/kernel_tests/depthwise_conv_op_test.py index 266a0f8d0fb..e26de9b155e 100644 --- a/tensorflow/python/kernel_tests/depthwise_conv_op_test.py +++ b/tensorflow/python/kernel_tests/depthwise_conv_op_test.py @@ -425,7 +425,7 @@ class DepthwiseConv2DTest(test.TestCase): # GitHub issue 22110. if not test.is_gpu_available(): return - with self.session(use_gpu=True): + with self.session(): x = array_ops.placeholder(dtypes.float32) f = np.ones([1, 1, 1, 1], np.float32) v = nn_impl.depthwise_conv2d( diff --git a/tensorflow/python/kernel_tests/determinant_op_test.py b/tensorflow/python/kernel_tests/determinant_op_test.py index 4eb2be0a23d..59b754293e1 100644 --- a/tensorflow/python/kernel_tests/determinant_op_test.py +++ b/tensorflow/python/kernel_tests/determinant_op_test.py @@ -154,7 +154,7 @@ class DeterminantOpTest(test.TestCase): @test_util.run_v1_only("b/120545219") def testConcurrentExecutesWithoutError(self): - with self.session(use_gpu=True) as sess: + with self.session() as sess: matrix1 = random_ops.random_normal([5, 5], seed=42) matrix2 = random_ops.random_normal([5, 5], seed=42) det1 = linalg_ops.matrix_determinant(matrix1) diff --git a/tensorflow/python/kernel_tests/diag_op_test.py b/tensorflow/python/kernel_tests/diag_op_test.py index 8e8586b88d1..99b41336e4f 100644 --- a/tensorflow/python/kernel_tests/diag_op_test.py +++ b/tensorflow/python/kernel_tests/diag_op_test.py @@ -374,7 +374,7 @@ class MatrixDiagTest(test.TestCase): @test_util.run_deprecated_v1 def testVector(self): - with self.session(use_gpu=True): + with self.session(): v = np.array([1.0, 2.0, 3.0]) mat = np.diag(v) v_diag = array_ops.matrix_diag(v) @@ -397,7 +397,7 @@ class MatrixDiagTest(test.TestCase): self.assertAllEqual(v_diags, solution[0]) def _testVectorBatch(self, dtype): - with self.cached_session(use_gpu=True): + with self.cached_session(): v_batch = np.array([[1.0, 0.0, 3.0], [4.0, 5.0, 6.0]]).astype(dtype) mat_batch = np.array([[[1.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 3.0]], [[4.0, 0.0, 0.0], [0.0, 5.0, 0.0], @@ -441,7 +441,7 @@ class MatrixDiagTest(test.TestCase): @test_util.run_deprecated_v1 def testRectangularBatch(self): - with self.cached_session(use_gpu=True): + with self.cached_session(): # Stores expected num_rows and num_cols (when the other is given). # expected[d_lower, d_upper] = (expected_num_rows, expected_num_cols) test_list = list() @@ -542,7 +542,7 @@ class MatrixDiagTest(test.TestCase): @test_util.run_deprecated_v1 def testInvalidShapeAtEval(self): - with self.session(use_gpu=True): + with self.session(): v = array_ops.placeholder(dtype=dtypes_lib.float32) with self.assertRaisesOpError("diagonal must be at least 1-dim"): array_ops.matrix_diag(v).eval(feed_dict={v: 0.0}) @@ -550,7 +550,7 @@ class MatrixDiagTest(test.TestCase): @test_util.run_deprecated_v1 def testGrad(self): shapes = ((3,), (7, 4)) - with self.session(use_gpu=True): + with self.session(): for shape in shapes: x = constant_op.constant(np.random.rand(*shape), np.float32) y = array_ops.matrix_diag(x) @@ -564,7 +564,7 @@ class MatrixDiagTest(test.TestCase): tests = dict() # tests[shape] = (d_lower, d_upper) tests[(3,)] = (-1, -1) tests[(7, 3, 4)] = (-1, 1) - with self.session(use_gpu=True): + with self.session(): for shape, diags in tests.items(): x = constant_op.constant(np.random.rand(*shape), np.float32) for align in alignment_list: @@ -580,7 +580,7 @@ class MatrixSetDiagTest(test.TestCase): @test_util.run_deprecated_v1 def testSquare(self): - with self.session(use_gpu=True): + with self.session(): v = np.array([1.0, 2.0, 3.0]) mat = np.array([[0.0, 1.0, 0.0], [1.0, 0.0, 1.0], [1.0, 1.0, 1.0]]) mat_set_diag = np.array([[1.0, 1.0, 0.0], [1.0, 2.0, 1.0], @@ -603,7 +603,7 @@ class MatrixSetDiagTest(test.TestCase): @test_util.run_deprecated_v1 def testRectangular(self): - with self.session(use_gpu=True): + with self.session(): v = np.array([3.0, 4.0]) mat = np.array([[0.0, 1.0, 0.0], [1.0, 0.0, 1.0]]) expected = np.array([[3.0, 1.0, 0.0], [1.0, 4.0, 1.0]]) @@ -631,7 +631,7 @@ class MatrixSetDiagTest(test.TestCase): self.assertAllEqual(output, solution) def _testSquareBatch(self, dtype): - with self.cached_session(use_gpu=True): + with self.cached_session(): v_batch = np.array([[-1.0, 0.0, -3.0], [-4.0, -5.0, -6.0]]).astype(dtype) mat_batch = np.array([[[1.0, 0.0, 3.0], [0.0, 2.0, 0.0], [1.0, 0.0, 3.0]], [[4.0, 0.0, 4.0], [0.0, 5.0, 0.0], @@ -668,7 +668,7 @@ class MatrixSetDiagTest(test.TestCase): @test_util.run_deprecated_v1 def testRectangularBatch(self): - with self.session(use_gpu=True): + with self.session(): v_batch = np.array([[-1.0, -2.0], [-4.0, -5.0]]) mat_batch = np.array([[[1.0, 0.0, 3.0], [0.0, 2.0, 0.0]], [[4.0, 0.0, 4.0], [0.0, 5.0, 0.0]]]) @@ -701,7 +701,7 @@ class MatrixSetDiagTest(test.TestCase): @test_util.run_deprecated_v1 def testInvalidShapeAtEval(self): - with self.session(use_gpu=True): + with self.session(): v = array_ops.placeholder(dtype=dtypes_lib.float32) with self.assertRaisesOpError("input must be at least 2-dim"): array_ops.matrix_set_diag(v, [v]).eval(feed_dict={v: 0.0}) @@ -717,7 +717,7 @@ class MatrixSetDiagTest(test.TestCase): }) def _testGrad(self, input_shape, diag_shape, diags, align): - with self.session(use_gpu=True): + with self.session(): x = constant_op.constant( np.random.rand(*input_shape), dtype=dtypes_lib.float32) x_diag = constant_op.constant( @@ -751,7 +751,7 @@ class MatrixSetDiagTest(test.TestCase): @test_util.run_deprecated_v1 def testGradWithNoShapeInformation(self): - with self.session(use_gpu=True) as sess: + with self.session() as sess: v = array_ops.placeholder(dtype=dtypes_lib.float32) mat = array_ops.placeholder(dtype=dtypes_lib.float32) grad_input = array_ops.placeholder(dtype=dtypes_lib.float32) @@ -774,7 +774,7 @@ class MatrixDiagPartTest(test.TestCase): @test_util.run_deprecated_v1 def testSquare(self): - with self.session(use_gpu=True): + with self.session(): v = np.array([1.0, 2.0, 3.0]) mat = np.diag(v) mat_diag = array_ops.matrix_diag_part(mat) @@ -798,7 +798,7 @@ class MatrixDiagPartTest(test.TestCase): @test_util.run_deprecated_v1 def testRectangular(self): - with self.session(use_gpu=True): + with self.session(): mat = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]) mat_diag = array_ops.matrix_diag_part(mat) self.assertAllEqual(mat_diag, np.array([1.0, 5.0])) @@ -817,7 +817,7 @@ class MatrixDiagPartTest(test.TestCase): self.assertAllEqual(mat_diag, solution[0]) def _testSquareBatch(self, dtype): - with self.cached_session(use_gpu=True): + with self.cached_session(): v_batch = np.array([[1.0, 0.0, 3.0], [4.0, 5.0, 6.0]]).astype(dtype) mat_batch = np.array([[[1.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 3.0]], [[4.0, 0.0, 0.0], [0.0, 5.0, 0.0], @@ -853,7 +853,7 @@ class MatrixDiagPartTest(test.TestCase): @test_util.run_deprecated_v1 def testRectangularBatch(self): - with self.session(use_gpu=True): + with self.session(): v_batch = np.array([[1.0, 2.0], [4.0, 5.0]]) mat_batch = np.array([[[1.0, 0.0, 0.0], [0.0, 2.0, 0.0]], [[4.0, 0.0, 0.0], [0.0, 5.0, 0.0]]]) @@ -880,7 +880,7 @@ class MatrixDiagPartTest(test.TestCase): matrix = array_ops.placeholder(dtypes_lib.int32, shape=[None, None]) result = array_ops.matrix_diag_part(matrix, k=-1) input_matrix = [[1, 2, 3], [4, 5, 6], [7, 8, 9]] - with self.session(use_gpu=True): + with self.session(): result_eval = result.eval(feed_dict={matrix: input_matrix}) self.assertAllEqual([4, 8], result_eval) @@ -891,7 +891,7 @@ class MatrixDiagPartTest(test.TestCase): @test_util.run_deprecated_v1 def testInvalidShapeAtEval(self): - with self.session(use_gpu=True): + with self.session(): v = array_ops.placeholder(dtype=dtypes_lib.float32) with self.assertRaisesOpError("input must be at least 2-dim"): array_ops.matrix_diag_part(v).eval(feed_dict={v: 0.0}) @@ -899,7 +899,7 @@ class MatrixDiagPartTest(test.TestCase): @test_util.run_deprecated_v1 def testGrad(self): shapes = ((3, 3), (2, 3), (3, 2), (5, 3, 3)) - with self.session(use_gpu=True): + with self.session(): for shape in shapes: x = constant_op.constant(np.random.rand(*shape), dtype=np.float32) y = array_ops.matrix_diag_part(x) @@ -913,7 +913,7 @@ class MatrixDiagPartTest(test.TestCase): tests = dict() # tests[shape] = (d_lower, d_upper) tests[(3, 3)] = (-1, -1) tests[(7, 3, 4)] = (-1, 1) - with self.session(use_gpu=True): + with self.session(): for align in alignment_list: for shape, diags in tests.items(): x = constant_op.constant(np.random.rand(*shape), np.float32) diff --git a/tensorflow/python/kernel_tests/dynamic_partition_op_test.py b/tensorflow/python/kernel_tests/dynamic_partition_op_test.py index 2858f119a72..16b7eb4c40f 100644 --- a/tensorflow/python/kernel_tests/dynamic_partition_op_test.py +++ b/tensorflow/python/kernel_tests/dynamic_partition_op_test.py @@ -39,7 +39,7 @@ class DynamicPartitionTest(test.TestCase): @test_util.run_deprecated_v1 def testSimpleOneDimensional(self): - with self.session(use_gpu=True) as sess: + with self.session() as sess: data = constant_op.constant([0, 13, 2, 39, 4, 17], dtype=dtypes.float32) indices = constant_op.constant([0, 0, 2, 3, 2, 1]) partitions = data_flow_ops.dynamic_partition( @@ -60,7 +60,7 @@ class DynamicPartitionTest(test.TestCase): @test_util.run_deprecated_v1 def testSimpleTwoDimensional(self): - with self.session(use_gpu=True) as sess: + with self.session() as sess: data = constant_op.constant([[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 10, 11], [12, 13, 14], [15, 16, 17]], dtype=dtypes.float32) @@ -87,7 +87,7 @@ class DynamicPartitionTest(test.TestCase): indices_list = [x % 2 for x in range(num)] part1 = [x for x in range(num) if x % 2 == 0] part2 = [x for x in range(num) if x % 2 == 1] - with self.session(use_gpu=True) as sess: + with self.session() as sess: data = constant_op.constant(data_list, dtype=dtypes.float32) indices = constant_op.constant(indices_list, dtype=dtypes.int32) partitions = data_flow_ops.dynamic_partition( @@ -109,7 +109,7 @@ class DynamicPartitionTest(test.TestCase): parts = [[] for _ in range(num_partitions)] for i in range(rows): parts[(i ** 2) % num_partitions].append(data_list[i]) - with self.session(use_gpu=True) as sess: + with self.session() as sess: data = constant_op.constant(data_list, dtype=dtypes.float32) indices = constant_op.constant(indices_list, dtype=dtypes.int32) partitions = data_flow_ops.dynamic_partition( @@ -125,7 +125,7 @@ class DynamicPartitionTest(test.TestCase): def testSimpleComplex(self): data_list = [1 + 2j, 3 + 4j, 5 + 6j, 7 + 8j] indices_list = [1, 0, 1, 0] - with self.session(use_gpu=True) as sess: + with self.session() as sess: data = constant_op.constant(data_list, dtype=dtypes.complex64) indices = constant_op.constant(indices_list, dtype=dtypes.int32) partitions = data_flow_ops.dynamic_partition( @@ -138,7 +138,7 @@ class DynamicPartitionTest(test.TestCase): def testScalarPartitions(self): data_list = [10, 13, 12, 11] - with self.session(use_gpu=True) as sess: + with self.session() as sess: data = constant_op.constant(data_list, dtype=dtypes.float64) indices = 3 partitions = data_flow_ops.dynamic_partition( @@ -159,7 +159,7 @@ class DynamicPartitionTest(test.TestCase): @test_util.run_deprecated_v1 def testHigherRank(self): np.random.seed(7) - with self.session(use_gpu=True) as sess: + with self.session() as sess: for n in 2, 3: for shape in (4,), (4, 5), (4, 5, 2): partitions = np.random.randint(n, size=np.prod(shape)).reshape(shape) @@ -184,7 +184,7 @@ class DynamicPartitionTest(test.TestCase): def testEmptyParts(self): data_list = [1, 2, 3, 4] indices_list = [1, 3, 1, 3] - with self.session(use_gpu=True) as sess: + with self.session() as sess: data = constant_op.constant(data_list, dtype=dtypes.float32) indices = constant_op.constant(indices_list, dtype=dtypes.int32) partitions = data_flow_ops.dynamic_partition( @@ -200,7 +200,7 @@ class DynamicPartitionTest(test.TestCase): def testEmptyDataTwoDimensional(self): data_list = [[], []] indices_list = [0, 1] - with self.session(use_gpu=True) as sess: + with self.session() as sess: data = constant_op.constant(data_list, dtype=dtypes.float32) indices = constant_op.constant(indices_list, dtype=dtypes.int32) partitions = data_flow_ops.dynamic_partition( @@ -216,7 +216,7 @@ class DynamicPartitionTest(test.TestCase): def testEmptyPartitions(self): data_list = [] indices_list = [] - with self.session(use_gpu=True) as sess: + with self.session() as sess: data = constant_op.constant(data_list, dtype=dtypes.float32) indices = constant_op.constant(indices_list, dtype=dtypes.int32) partitions = data_flow_ops.dynamic_partition( @@ -237,7 +237,7 @@ class DynamicPartitionTest(test.TestCase): data_list = [1, 2, 3, 4, 5, 6] indices_list = [6, 5, 4, 3, 1, 0] - with self.session(use_gpu=True) as sess: + with self.session() as sess: data = constant_op.constant(data_list, dtype=dtypes.float32) indices = constant_op.constant(indices_list, dtype=dtypes.int32) partitions = data_flow_ops.dynamic_partition( @@ -258,7 +258,7 @@ class DynamicPartitionTest(test.TestCase): data_list = [1, 2, 3, 4, 5, 6] indices_list = [10, 11, 2, 12, 0, 1000] - with self.session(use_gpu=True) as sess: + with self.session() as sess: data = constant_op.constant(data_list, dtype=dtypes.float32) indices = constant_op.constant(indices_list, dtype=dtypes.int32) partitions = data_flow_ops.dynamic_partition( @@ -282,7 +282,7 @@ class DynamicPartitionTest(test.TestCase): data_list = [1.1, 2.1, 3.1, 4.1, 5.1, 6.1] indices_list = [90, 70, 60, 100, 110, 40] - with self.session(use_gpu=True) as sess: + with self.session() as sess: data = constant_op.constant(data_list, dtype=dtypes.float32) indices = constant_op.constant(indices_list, dtype=dtypes.int32) partitions = data_flow_ops.dynamic_partition( diff --git a/tensorflow/python/kernel_tests/eig_op_test.py b/tensorflow/python/kernel_tests/eig_op_test.py index b1c83959f27..bfa97997f05 100644 --- a/tensorflow/python/kernel_tests/eig_op_test.py +++ b/tensorflow/python/kernel_tests/eig_op_test.py @@ -55,7 +55,7 @@ class EigTest(test.TestCase): @test_util.run_deprecated_v1 def testConcurrentExecutesWithoutError(self): all_ops = [] - with self.session(use_gpu=True) as sess: + with self.session() as sess: for compute_v_ in True, False: matrix1 = random_ops.random_normal([5, 5], seed=42) matrix2 = random_ops.random_normal([5, 5], seed=42) @@ -84,7 +84,7 @@ class EigTest(test.TestCase): "self_adjoint_eig_fail_if_denorms_flushed.txt")).astype(np.float32) self.assertEqual(matrix.shape, (32, 32)) matrix_tensor = constant_op.constant(matrix) - with self.session(use_gpu=True) as _: + with self.session() as _: (e, v) = self.evaluate(linalg_ops.self_adjoint_eig(matrix_tensor)) self.assertEqual(e.size, 32) self.assertAllClose( @@ -166,7 +166,7 @@ def _GetEigTest(dtype_, shape_, compute_v_): a = RandomInput() np_e, np_v = np.linalg.eig(a) - with self.session(use_gpu=True): + with self.session(): if compute_v_: tf_e, tf_v = linalg_ops.eig(constant_op.constant(a)) @@ -222,7 +222,7 @@ def _GetEigGradTest(dtype_, shape_, compute_v_): tol = 1e-2 else: tol = 1e-7 - with self.session(use_gpu=True): + with self.session(): def Compute(x): e, v = linalg_ops.eig(x) diff --git a/tensorflow/python/kernel_tests/embedding_ops_test.py b/tensorflow/python/kernel_tests/embedding_ops_test.py index e1a5086a670..917d7ae3f0e 100644 --- a/tensorflow/python/kernel_tests/embedding_ops_test.py +++ b/tensorflow/python/kernel_tests/embedding_ops_test.py @@ -1048,7 +1048,7 @@ class DynamicStitchOpTest(test.TestCase): @test_util.run_deprecated_v1 def testCint32Gpu(self): - with self.session(use_gpu=True): + with self.session(): indices = [ ops.convert_to_tensor([0, 1, 2]), ops.convert_to_tensor([2, 3]) @@ -1076,7 +1076,7 @@ class DynamicStitchOpTest(test.TestCase): @test_util.run_deprecated_v1 def testInt32Gpu(self): - with self.session(use_gpu=True): + with self.session(): indices = [ ops.convert_to_tensor([0, 1, 2]), ops.convert_to_tensor([2, 3]) diff --git a/tensorflow/python/kernel_tests/functional_ops_test.py b/tensorflow/python/kernel_tests/functional_ops_test.py index 84a95934607..bbd955f19ac 100644 --- a/tensorflow/python/kernel_tests/functional_ops_test.py +++ b/tensorflow/python/kernel_tests/functional_ops_test.py @@ -340,7 +340,7 @@ class FunctionalOpsTest(test.TestCase): lambda elem_, input_: (a, b), elems, initializer=(0., 0.)) loss = l0 + array_ops.stop_gradient(l1) grad = gradients_impl.gradients(ys=[loss], xs=[a, b]) - with self.test_session(use_gpu=True) as sess: + with self.test_session() as sess: self.evaluate(variables.global_variables_initializer()) self.evaluate(grad) @@ -933,7 +933,7 @@ class FunctionalOpsTest(test.TestCase): def ReturnsTooManyArgs(unused_i, v): return v, v - with self.test_session(use_gpu=True): + with self.test_session(): with self.assertRaisesRegex(errors.InvalidArgumentError, "must be a scalar"): functional_ops.For([0], 10, 1, [0.0], Foo)[0].eval() diff --git a/tensorflow/python/kernel_tests/gather_nd_op_test.py b/tensorflow/python/kernel_tests/gather_nd_op_test.py index 026683d595b..15b1e21c0e9 100644 --- a/tensorflow/python/kernel_tests/gather_nd_op_test.py +++ b/tensorflow/python/kernel_tests/gather_nd_op_test.py @@ -39,7 +39,7 @@ from tensorflow.python.platform import test class GatherNdTest(test.TestCase): def _testSimpleDtype(self, dtype): - with self.cached_session(use_gpu=True): + with self.cached_session(): params = constant_op.constant(np.array([8, 1, 2, 3, 7, 5], dtype=dtype)) indices = constant_op.constant([[4], [4], [0]]) gather_nd_t = array_ops.gather_nd(params, indices) @@ -60,7 +60,7 @@ class GatherNdTest(test.TestCase): @test_util.run_deprecated_v1 @test_util.disable_xla("b/123337890") # Error messages differ def testEmptyIndicesAndParamsOKButJustEmptyParamsFails(self): - with self.session(use_gpu=True): + with self.session(): params = np.ones((3, 3), dtype=np.float32) indices_empty = np.empty((0, 2), dtype=np.int32) @@ -91,7 +91,7 @@ class GatherNdTest(test.TestCase): self.assertAllClose(np.empty((0,), dtype=np.float32), gather_nd_ok_val) def testIndexScalar(self): - with self.session(use_gpu=True): + with self.session(): params = np.array( [[-8, -1, -2, -3, -7, -5], [8, 1, 2, 3, 7, 5]], dtype=np.float32).T indices = constant_op.constant([4, 1]) @@ -101,7 +101,7 @@ class GatherNdTest(test.TestCase): self.assertAllEqual(np.array(7), gather_nd_val) def testParamsRankLargerThanIndexIndexScalarSlices(self): - with self.session(use_gpu=True): + with self.session(): params = np.array( [[-8, -1, -2, -3, -7, -5], [8, 1, 2, 3, 7, 5]], dtype=np.float32).T indices = constant_op.constant([4]) @@ -111,7 +111,7 @@ class GatherNdTest(test.TestCase): self.assertAllEqual(np.array([-7, 7]), gather_nd_val) def testParamsRankLargerThanIndexSlices(self): - with self.session(use_gpu=True): + with self.session(): params = np.array( [[-8, -1, -2, -3, -7, -5], [8, 1, 2, 3, 7, 5]], dtype=np.float32).T indices = constant_op.constant([[4], [4], [0]]) @@ -122,7 +122,7 @@ class GatherNdTest(test.TestCase): self.assertAllEqual(np.array([[-7, 7], [-7, 7], [-8, 8]]), gather_nd_val) def testHigherRankParamsLargerThanIndexSlices(self): - with self.session(use_gpu=True): + with self.session(): params = np.array( [[[-8, -1, -2, -3, -7, -5], [8, 1, 2, 3, 7, 5]], [[-80, -10, -20, -30, -70, -50], [80, 10, 20, 30, 70, 50]]], @@ -136,7 +136,7 @@ class GatherNdTest(test.TestCase): self.assertAllEqual(params[[4, 4, 0]], gather_nd_val) def testEmptyIndicesLastRankMeansCopyEntireTensor(self): - with self.session(use_gpu=True): + with self.session(): params = np.array( [[[-8, -1, -2, -3, -7, -5], [8, 1, 2, 3, 7, 5]], [[-80, -10, -20, -30, -70, -50], [80, 10, 20, 30, 70, 50]]], @@ -153,7 +153,7 @@ class GatherNdTest(test.TestCase): gather_nd_val) def testHigherRankParamsAndIndicesLargerThanIndexSlices(self): - with self.session(use_gpu=True): + with self.session(): params = np.array( [[[-8, -1, -2, -3, -7, -5], [8, 1, 2, 3, 7, 5]], [[-80, -10, -20, -30, -70, -50], [80, 10, 20, 30, 70, 50]]], @@ -168,7 +168,7 @@ class GatherNdTest(test.TestCase): gather_nd_val) def testHigherRankParams(self): - with self.session(use_gpu=True): + with self.session(): shape = (10, 20, 5, 1, 17) params = np.random.rand(*shape) indices = np.vstack([np.random.randint(0, s, size=2000) for s in shape]).T @@ -180,7 +180,7 @@ class GatherNdTest(test.TestCase): self.assertEqual([2000], gather_nd_t.get_shape()) def testHigherRankParamsAndIndices(self): - with self.session(use_gpu=True): + with self.session(): shape = (10, 20, 5, 1, 17) params = np.random.rand(*shape) indices = np.vstack([np.random.randint(0, s, size=2000) for s in shape]).T @@ -220,7 +220,7 @@ class GatherNdTest(test.TestCase): # On GPU the bad indices do not raise error but fetch 0 values if not test.is_gpu_available(): return - with self.session(use_gpu=True): + with self.session(): params = [0, 1, 2] indices = [[[0], [7]]] # Make this one higher rank gather_nd = array_ops.gather_nd(params, indices) @@ -244,7 +244,7 @@ class GatherNdTest(test.TestCase): # On GPU the bad indices do not raise error but fetch 0 values if not test.is_gpu_available(): return - with self.session(use_gpu=True): + with self.session(): params = [[0, 1, 2]] indices = [[[0], [0], [1]]] # Make this one higher rank gather_nd = array_ops.gather_nd(params, indices) @@ -261,7 +261,7 @@ class GatherNdTest(test.TestCase): grad_vals = constant_op.constant([1, 2], dtype=dtypes.float64) grads = gradients_impl.gradients([outputs], [inputs], [grad_vals])[0] expected_grads = np.array([[1, 0], [0, 2]], dtype=np.float64) - with self.session(use_gpu=True): + with self.session(): assert np.array_equal(expected_grads, self.evaluate(grads)) @test_util.run_deprecated_v1 @@ -273,7 +273,7 @@ class GatherNdTest(test.TestCase): grad_vals = constant_op.constant([[1, 2], [3, 4]], dtype=dtypes.float64) grads = gradients_impl.gradients([outputs], [inputs], [grad_vals])[0] expected_grads = np.array([[3, 4], [1, 2]], dtype=np.float64) - with self.session(use_gpu=True): + with self.session(): self.assertIndexedSlices(grads) self.assertAllEqual(expected_grads, ops.convert_to_tensor(grads)) @@ -290,7 +290,7 @@ class GatherNdTest(test.TestCase): grads = gradients_impl.gradients([outputs], [inputs], [grad_vals])[0] expected_grads = np.array( [[[5, 6], [1, 2]], [[3, 4], [7, 8]]], dtype=np.float64) - with self.session(use_gpu=True): + with self.session(): self.assertAllEqual(expected_grads, self.evaluate(grads)) @test_util.run_deprecated_v1 @@ -320,7 +320,7 @@ class GatherNdTest(test.TestCase): [[[[5, 6], [1, 2]]]], [[[[3, 4], [7, 8]]]] ]]], dtype=np.float64) - with self.session(use_gpu=True): + with self.session(): self.assertAllEqual(expected_grads, self.evaluate(grads)) @test_util.run_deprecated_v1 @@ -336,7 +336,7 @@ class GatherNdTest(test.TestCase): grads = gradients_impl.gradients([outputs], [inputs], [grad_vals])[0] expected_grads = np.array( [[[5, 6], [1, 2]], [[3, 4], [7, 8]]], dtype=np.float64) - with self.session(use_gpu=True): + with self.session(): self.assertAllEqual(expected_grads, self.evaluate(grads)) @test_util.run_deprecated_v1 @@ -358,7 +358,7 @@ class GatherNdTest(test.TestCase): [1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0], [3, 3, 3, 3, 3, 3, 3, 3, 3]], dtype=np.float64) - with self.session(use_gpu=True): + with self.session(): self.assertIndexedSlices(grads) self.assertAllEqual(expected_grads, ops.convert_to_tensor(grads)) diff --git a/tensorflow/python/kernel_tests/in_topk_op_test.py b/tensorflow/python/kernel_tests/in_topk_op_test.py index c636cee0dd5..be3fee3b057 100644 --- a/tensorflow/python/kernel_tests/in_topk_op_test.py +++ b/tensorflow/python/kernel_tests/in_topk_op_test.py @@ -29,7 +29,7 @@ class InTopKTest(test.TestCase): def _validateInTopK(self, predictions, target, k, expected): np_ans = np.array(expected, np.bool) - with self.cached_session(use_gpu=True): + with self.cached_session(): precision = nn_ops.in_top_k(predictions, target, k) out = self.evaluate(precision) self.assertAllClose(np_ans, out) diff --git a/tensorflow/python/kernel_tests/init_ops_test.py b/tensorflow/python/kernel_tests/init_ops_test.py index f2348c6c7ac..3a36b5293e3 100644 --- a/tensorflow/python/kernel_tests/init_ops_test.py +++ b/tensorflow/python/kernel_tests/init_ops_test.py @@ -102,7 +102,7 @@ def _init_sampler(tc, init, num): """ def func(): - with tc.test_session(use_gpu=True): + with tc.test_session(): return init([num]).eval() return func @@ -112,7 +112,7 @@ class ConstantInitializersTest(test.TestCase): @test_util.run_deprecated_v1 def testZerosInitializer(self): - with self.session(use_gpu=True): + with self.session(): shape = [2, 3] x = variable_scope.get_variable( "x", shape=shape, initializer=init_ops.zeros_initializer()) @@ -121,7 +121,7 @@ class ConstantInitializersTest(test.TestCase): @test_util.run_deprecated_v1 def testOnesInitializer(self): - with self.session(use_gpu=True): + with self.session(): shape = [2, 3] x = variable_scope.get_variable( "x", shape=shape, initializer=init_ops.ones_initializer()) @@ -130,7 +130,7 @@ class ConstantInitializersTest(test.TestCase): @test_util.run_deprecated_v1 def testConstantZeroInitializer(self): - with self.session(use_gpu=True): + with self.session(): shape = [2, 3] x = variable_scope.get_variable( "x", shape=shape, initializer=init_ops.constant_initializer(0.0)) @@ -139,7 +139,7 @@ class ConstantInitializersTest(test.TestCase): @test_util.run_deprecated_v1 def testConstantOneInitializer(self): - with self.session(use_gpu=True): + with self.session(): shape = [2, 3] x = variable_scope.get_variable( "x", shape=shape, initializer=init_ops.constant_initializer(1.0)) @@ -148,7 +148,7 @@ class ConstantInitializersTest(test.TestCase): @test_util.run_deprecated_v1 def testConstantIntInitializer(self): - with self.session(use_gpu=True): + with self.session(): shape = [2, 3] x = variable_scope.get_variable( "x", @@ -161,7 +161,7 @@ class ConstantInitializersTest(test.TestCase): @test_util.run_deprecated_v1 def testConstantTupleInitializer(self): - with self.session(use_gpu=True): + with self.session(): shape = [3] x = variable_scope.get_variable( "x", @@ -173,7 +173,7 @@ class ConstantInitializersTest(test.TestCase): self.assertAllEqual(x, [10, 20, 30]) def _testNDimConstantInitializer(self, name, value, shape, expected): - with self.cached_session(use_gpu=True): + with self.cached_session(): init = init_ops.constant_initializer(value, dtype=dtypes.int32) x = variable_scope.get_variable(name, shape=shape, initializer=init) self.evaluate(x.initializer) @@ -198,7 +198,7 @@ class ConstantInitializersTest(test.TestCase): def _testNDimConstantInitializerLessValues(self, name, value, shape, expected): - with self.cached_session(use_gpu=True): + with self.cached_session(): init = init_ops.constant_initializer(value, dtype=dtypes.int32) x = variable_scope.get_variable(name, shape=shape, initializer=init) self.evaluate(x.initializer) @@ -225,7 +225,7 @@ class ConstantInitializersTest(test.TestCase): def _testNDimConstantInitializerMoreValues(self, value, shape): ops.reset_default_graph() - with self.cached_session(use_gpu=True): + with self.cached_session(): init = init_ops.constant_initializer(value, dtype=dtypes.int32) self.assertRaises( ValueError, @@ -398,7 +398,7 @@ class VarianceScalingInitializationTest(test.TestCase): init = init_ops.variance_scaling_initializer( distribution="truncated_normal") - with self.session(use_gpu=True), \ + with self.session(), \ test.mock.patch.object( random_ops, "truncated_normal", wraps=random_ops.truncated_normal) \ as mock_truncated_normal: @@ -415,7 +415,7 @@ class VarianceScalingInitializationTest(test.TestCase): expect_var = 1. / shape[0] init = init_ops.variance_scaling_initializer(distribution="normal") - with self.session(use_gpu=True), \ + with self.session(), \ test.mock.patch.object( random_ops, "truncated_normal", wraps=random_ops.truncated_normal) \ as mock_truncated_normal: @@ -433,7 +433,7 @@ class VarianceScalingInitializationTest(test.TestCase): init = init_ops.variance_scaling_initializer( distribution="untruncated_normal") - with self.session(use_gpu=True), \ + with self.session(), \ test.mock.patch.object( random_ops, "random_normal", wraps=random_ops.random_normal) \ as mock_random_normal: @@ -450,7 +450,7 @@ class VarianceScalingInitializationTest(test.TestCase): expect_var = 1. / shape[0] init = init_ops.variance_scaling_initializer(distribution="uniform") - with self.session(use_gpu=True): + with self.session(): x = init(shape).eval() self.assertNear(np.mean(x), expect_mean, err=1e-2) @@ -461,7 +461,7 @@ class VarianceScalingInitializationTest(test.TestCase): class RangeTest(test.TestCase): def _Range(self, start, limit, delta): - with self.cached_session(use_gpu=True): + with self.cached_session(): tf_ans = math_ops.range(start, limit, delta, name="range") self.assertEqual([len(np.arange(start, limit, delta))], tf_ans.get_shape()) @@ -481,7 +481,7 @@ class RangeTest(test.TestCase): @test_util.run_deprecated_v1 def testLimitOnly(self): - with self.session(use_gpu=True): + with self.session(): self.assertAllEqual(np.arange(5), math_ops.range(5)) def testEmpty(self): @@ -910,7 +910,7 @@ class ConvolutionDeltaOrthogonalInitializerTest(test.TestCase): outputs_2norm = linalg_ops.norm(outputs) ratio = outputs_2norm / inputs_2norm my_ops = variables.global_variables_initializer() - with self.session(use_gpu=True) as sess: + with self.session() as sess: self.evaluate(my_ops) # Check the shape of the outputs t = self.evaluate(outputs) @@ -925,7 +925,7 @@ class ConvolutionDeltaOrthogonalInitializerTest(test.TestCase): shape = [3, 3, 10, 10] count = 70 tol = 1e-5 - with self.session(use_gpu=True): + with self.session(): for i in range(count): x = variable_scope.get_variable( "{}".format(i), @@ -996,7 +996,7 @@ class ConvolutionOrthogonal1dInitializerTest(test.TestCase): shape = [3, 10, 10] count = 70 tol = 1e-5 - with self.session(use_gpu=True): + with self.session(): for i in range(count): x = variable_scope.get_variable( "{}".format(i), @@ -1063,7 +1063,7 @@ class ConvolutionOrthogonal1dInitializerTest(test.TestCase): outputs_2norm = linalg_ops.norm(outputs) ratio = outputs_2norm / inputs_2norm my_ops = variables.global_variables_initializer() - with self.session(use_gpu=True) as sess: + with self.session() as sess: self.evaluate(my_ops) # Check the shape of the outputs t = self.evaluate(outputs) @@ -1167,7 +1167,7 @@ class ConvolutionOrthogonal2dInitializerTest(test.TestCase): outputs_2norm = linalg_ops.norm(outputs) ratio = outputs_2norm / inputs_2norm my_ops = variables.global_variables_initializer() - with self.session(use_gpu=True) as sess: + with self.session() as sess: self.evaluate(my_ops) # Check the shape of the outputs t = self.evaluate(outputs) @@ -1227,7 +1227,7 @@ class ConvolutionOrthogonal3dInitializerTest(test.TestCase): shape = [3, 3, 3, 5, 5] count = 20 tol = 1e-5 - with self.session(use_gpu=True): + with self.session(): for i in range(count): x = variable_scope.get_variable( "{}".format(i), @@ -1302,7 +1302,7 @@ class ConvolutionOrthogonal3dInitializerTest(test.TestCase): outputs_2norm = linalg_ops.norm(outputs) ratio = outputs_2norm / inputs_2norm my_ops = variables.global_variables_initializer() - with self.cached_session(use_gpu=True) as sess: + with self.cached_session() as sess: self.evaluate(my_ops) # Check the shape of the outputs t = self.evaluate(outputs) diff --git a/tensorflow/python/kernel_tests/linalg/sparse/csr_sparse_matrix_dense_mat_mul_grad_test.py b/tensorflow/python/kernel_tests/linalg/sparse/csr_sparse_matrix_dense_mat_mul_grad_test.py index 4841c18a78c..d39f2e9b904 100644 --- a/tensorflow/python/kernel_tests/linalg/sparse/csr_sparse_matrix_dense_mat_mul_grad_test.py +++ b/tensorflow/python/kernel_tests/linalg/sparse/csr_sparse_matrix_dense_mat_mul_grad_test.py @@ -78,7 +78,7 @@ class CSRSparseMatrixDenseMatMulGradTest(test.TestCase): b_mats_val = np.transpose(b_mats_val, (0, 2, 1)) if adjoint_b: b_mats_val = np.conj(b_mats_val) - with self.test_session(use_gpu=True): + with self.test_session(): a_mats = ops.convert_to_tensor(a_mats_val, dtype=datatype) b_mats = ops.convert_to_tensor(b_mats_val, dtype=datatype) a_sm = dense_to_csr_sparse_matrix(a_mats) diff --git a/tensorflow/python/kernel_tests/linalg/sparse/csr_sparse_matrix_grad_test.py b/tensorflow/python/kernel_tests/linalg/sparse/csr_sparse_matrix_grad_test.py index 0cda66a63ad..c548ced3056 100644 --- a/tensorflow/python/kernel_tests/linalg/sparse/csr_sparse_matrix_grad_test.py +++ b/tensorflow/python/kernel_tests/linalg/sparse/csr_sparse_matrix_grad_test.py @@ -64,7 +64,7 @@ class CSRSparseMatrixGradTest(test.TestCase): sparsify = lambda m: m * (m > 0) for dense_shape in ([53, 65, 127], [127, 65]): mats_val = sparsify(np.random.randn(*dense_shape)) - with self.test_session(use_gpu=True) as sess: + with self.test_session() as sess: mats = math_ops.cast(mats_val, dtype=dtypes.float32) sparse_mats = dense_to_csr_sparse_matrix(mats) dense_mats = sparse_csr_matrix_ops.csr_sparse_matrix_to_dense( @@ -96,7 +96,7 @@ class CSRSparseMatrixGradTest(test.TestCase): grad_vals = np.random.randn(*dense_shape).astype(np.float32) expected_a_grad = alpha * grad_vals expected_b_grad = beta * grad_vals - with self.test_session(use_gpu=True) as sess: + with self.test_session() as sess: a_mats = math_ops.cast(a_mats_val, dtype=dtypes.float32) b_mats = math_ops.cast(b_mats_val, dtype=dtypes.float32) a_sm = dense_to_csr_sparse_matrix(a_mats) diff --git a/tensorflow/python/kernel_tests/linalg/sparse/csr_sparse_matrix_sparse_mat_mul_grad_test.py b/tensorflow/python/kernel_tests/linalg/sparse/csr_sparse_matrix_sparse_mat_mul_grad_test.py index 07d1e6a2a06..27bedc05e10 100644 --- a/tensorflow/python/kernel_tests/linalg/sparse/csr_sparse_matrix_sparse_mat_mul_grad_test.py +++ b/tensorflow/python/kernel_tests/linalg/sparse/csr_sparse_matrix_sparse_mat_mul_grad_test.py @@ -79,7 +79,7 @@ class CSRSparseMatrixGradTest(test.TestCase): b_mats_val = np.transpose(b_mats_val, (0, 2, 1)) if adjoint_b: b_mats_val = np.conj(b_mats_val) - with self.test_session(use_gpu=True): + with self.test_session(): a_mats = ops.convert_to_tensor(a_mats_val, dtype=datatype) b_mats = ops.convert_to_tensor(b_mats_val, dtype=datatype) a_sm = dense_to_csr_sparse_matrix(a_mats) diff --git a/tensorflow/python/kernel_tests/linalg_ops_test.py b/tensorflow/python/kernel_tests/linalg_ops_test.py index 2cddddae0dd..eb429179770 100644 --- a/tensorflow/python/kernel_tests/linalg_ops_test.py +++ b/tensorflow/python/kernel_tests/linalg_ops_test.py @@ -59,7 +59,7 @@ class CholeskySolveTest(test.TestCase): def test_works_with_five_different_random_pos_def_matrices(self): for n in range(1, 6): for np_type, atol in [(np.float32, 0.05), (np.float64, 1e-5)]: - with self.session(use_gpu=True): + with self.session(): # Create 2 x n x n matrix array = np.array( [_RandomPDMatrix(n, self.rng), @@ -85,7 +85,7 @@ class LogdetTest(test.TestCase): with self.subTest(n=n, np_dtype=np_dtype, atol=atol): matrix = _RandomPDMatrix(n, self.rng, np_dtype) _, logdet_np = np.linalg.slogdet(matrix) - with self.session(use_gpu=True): + with self.session(): # Create 2 x n x n matrix # matrix = np.array( # [_RandomPDMatrix(n, self.rng, np_dtype), @@ -99,7 +99,7 @@ class LogdetTest(test.TestCase): with self.subTest(np_dtype=np_dtype, atol=atol): matrix = (np.eye(20) * 1e-6).astype(np_dtype) _, logdet_np = np.linalg.slogdet(matrix) - with self.session(use_gpu=True): + with self.session(): logdet_tf = linalg.logdet(matrix) self.assertAllClose(logdet_np, self.evaluate(logdet_tf), atol=atol) @@ -117,7 +117,7 @@ class SlogdetTest(test.TestCase): with self.subTest(n=n, np_dtype=np_dtype, atol=atol): matrix = _RandomPDMatrix(n, self.rng, np_dtype) sign_np, log_abs_det_np = np.linalg.slogdet(matrix) - with self.session(use_gpu=True): + with self.session(): sign_tf, log_abs_det_tf = linalg.slogdet(matrix) self.assertAllClose( log_abs_det_np, self.evaluate(log_abs_det_tf), atol=atol) @@ -129,7 +129,7 @@ class SlogdetTest(test.TestCase): with self.subTest(np_dtype=np_dtype, atol=atol): matrix = (np.eye(20) * 1e-6).astype(np_dtype) sign_np, log_abs_det_np = np.linalg.slogdet(matrix) - with self.session(use_gpu=True): + with self.session(): sign_tf, log_abs_det_tf = linalg.slogdet(matrix) self.assertAllClose( log_abs_det_np, self.evaluate(log_abs_det_tf), atol=atol) @@ -259,7 +259,7 @@ class EyeTest(parameterized.TestCase, test.TestCase): num_columns=num_columns_placeholder, batch_shape=batch_shape_placeholder, dtype=dtype) - with self.session(use_gpu=True) as sess: + with self.session() as sess: eye_tf = sess.run( eye, feed_dict={ diff --git a/tensorflow/python/kernel_tests/lrn_op_test.py b/tensorflow/python/kernel_tests/lrn_op_test.py index fbe628c3944..f5488048523 100644 --- a/tensorflow/python/kernel_tests/lrn_op_test.py +++ b/tensorflow/python/kernel_tests/lrn_op_test.py @@ -55,7 +55,7 @@ class LRNOpTest(test.TestCase): return output def _RunAndVerify(self, dtype): - with self.cached_session(use_gpu=True): + with self.cached_session(): # random shape shape = np.random.randint(1, 16, size=4) # Make depth at least 2 to make it meaningful @@ -103,7 +103,7 @@ class LRNOpTest(test.TestCase): @test_util.run_deprecated_v1 def testGradientsZeroInput(self): - with self.session(use_gpu=True): + with self.session(): shape = [4, 4, 4, 4] p = array_ops.placeholder(dtypes.float32, shape=shape) inp_array = np.zeros(shape).astype("f") @@ -116,7 +116,7 @@ class LRNOpTest(test.TestCase): self.assertShapeEqual(expected, grad) def _RunAndVerifyGradients(self, dtype): - with self.cached_session(use_gpu=True): + with self.cached_session(): # random shape shape = np.random.randint(1, 5, size=4) # Make depth at least 2 to make it meaningful diff --git a/tensorflow/python/kernel_tests/manip_ops_test.py b/tensorflow/python/kernel_tests/manip_ops_test.py index 2e43d4a8e32..1b8319a7b4a 100644 --- a/tensorflow/python/kernel_tests/manip_ops_test.py +++ b/tensorflow/python/kernel_tests/manip_ops_test.py @@ -42,12 +42,12 @@ class RollTest(test_util.TensorFlowTestCase): def _testRoll(self, np_input, shift, axis): expected_roll = np.roll(np_input, shift, axis) - with self.cached_session(use_gpu=True): + with self.cached_session(): roll = manip_ops.roll(np_input, shift, axis) self.assertAllEqual(roll, expected_roll) def _testGradient(self, np_input, shift, axis): - with self.cached_session(use_gpu=True): + with self.cached_session(): inx = constant_op.constant(np_input.tolist()) xs = list(np_input.shape) y = manip_ops.roll(inx, shift, axis) @@ -98,7 +98,7 @@ class RollTest(test_util.TensorFlowTestCase): self._testAll(np.random.randint(-100, 100, (5)).astype(np.int32), 3, -1) self._testAll(np.random.randint(-100, 100, (4, 4)).astype(np.int32), 3, -2) # Make sure negative axis should be 0 <= axis + dims < dims - with self.cached_session(use_gpu=True): + with self.cached_session(): with self.assertRaisesRegex(errors_impl.InvalidArgumentError, "is out of range"): manip_ops.roll(np.random.randint(-100, 100, (4, 4)).astype(np.int32), @@ -122,7 +122,7 @@ class RollTest(test_util.TensorFlowTestCase): tensor = array_ops.placeholder(dtype=dtypes.int32) shift = 1 axis = 0 - with self.cached_session(use_gpu=True): + with self.cached_session(): with self.assertRaisesRegex(errors_impl.InvalidArgumentError, "input must be 1-D or higher"): manip_ops.roll(tensor, shift, axis).eval(feed_dict={tensor: 7}) @@ -140,7 +140,7 @@ class RollTest(test_util.TensorFlowTestCase): tensor = [[1, 2], [3, 4]] shift = 1 axis = array_ops.placeholder(dtype=dtypes.int32) - with self.cached_session(use_gpu=True): + with self.cached_session(): with self.assertRaisesRegex(errors_impl.InvalidArgumentError, "axis must be a scalar or a 1-D vector"): manip_ops.roll(tensor, shift, axis).eval(feed_dict={axis: [[0, 1]]}) @@ -158,7 +158,7 @@ class RollTest(test_util.TensorFlowTestCase): tensor = [[1, 2], [3, 4]] shift = array_ops.placeholder(dtype=dtypes.int32) axis = 1 - with self.cached_session(use_gpu=True): + with self.cached_session(): with self.assertRaisesRegex(errors_impl.InvalidArgumentError, "shift must be a scalar or a 1-D vector"): manip_ops.roll(tensor, shift, axis).eval(feed_dict={shift: [[0, 1]]}) @@ -175,7 +175,7 @@ class RollTest(test_util.TensorFlowTestCase): tensor = [[1, 2], [3, 4]] shift = array_ops.placeholder(dtype=dtypes.int32) axis = [0, 1] - with self.cached_session(use_gpu=True): + with self.cached_session(): with self.assertRaisesRegex(errors_impl.InvalidArgumentError, "shift and axis must have the same size"): manip_ops.roll(tensor, shift, axis).eval(feed_dict={shift: [1]}) @@ -184,7 +184,7 @@ class RollTest(test_util.TensorFlowTestCase): tensor = [1, 2] shift = 1 axis = 1 - with self.cached_session(use_gpu=True): + with self.cached_session(): with self.assertRaisesRegex(errors_impl.InvalidArgumentError, "is out of range"): manip_ops.roll(tensor, shift, axis).eval() diff --git a/tensorflow/python/kernel_tests/map_stage_op_test.py b/tensorflow/python/kernel_tests/map_stage_op_test.py index dd16fad6904..516fc37517c 100644 --- a/tensorflow/python/kernel_tests/map_stage_op_test.py +++ b/tensorflow/python/kernel_tests/map_stage_op_test.py @@ -46,7 +46,7 @@ class MapStageTest(test.TestCase): G.finalize() - with self.session(use_gpu=True, graph=G) as sess: + with self.session(graph=G) as sess: sess.run(stage, feed_dict={x: -1, pi: 0}) for i in range(10): _, yval = sess.run([stage, y], feed_dict={x: i, pi: i + 1, gi: i}) @@ -68,7 +68,7 @@ class MapStageTest(test.TestCase): G.finalize() - with self.session(use_gpu=True, graph=G) as sess: + with self.session(graph=G) as sess: sess.run(stage, feed_dict={x: -1, pi: 0}) for i in range(10): _, yval = sess.run([stage, y], feed_dict={x: i, pi: i + 1, gi: i}) @@ -96,7 +96,7 @@ class MapStageTest(test.TestCase): G.finalize() - with self.session(use_gpu=True, graph=G) as sess: + with self.session(graph=G) as sess: sess.run(stage, feed_dict={x: -1, pi: 0}) for i in range(10): _, yval = sess.run([stage, y], feed_dict={x: i, pi: i + 1, gi: i}) @@ -146,7 +146,7 @@ class MapStageTest(test.TestCase): n = 10 - with self.session(use_gpu=True, graph=G) as sess: + with self.session(graph=G) as sess: for i in range(n): sess.run(stage, feed_dict={x: i, pi: i}) @@ -174,7 +174,7 @@ class MapStageTest(test.TestCase): G.finalize() - with self.session(use_gpu=True, graph=G) as sess: + with self.session(graph=G) as sess: sess.run(stage, feed_dict={x: -1, pi: 3}) self.assertEqual(sess.run(size), 1) sess.run(stage, feed_dict={x: -1, pi: 1}) @@ -209,7 +209,7 @@ class MapStageTest(test.TestCase): queue = Queue.Queue() n = 8 - with self.session(use_gpu=True, graph=G) as sess: + with self.session(graph=G) as sess: # Stage data in a separate thread which will block # when it hits the staging area's capacity and thus # not fill the queue with n tokens @@ -273,7 +273,7 @@ class MapStageTest(test.TestCase): queue = Queue.Queue() n = 8 - with self.session(use_gpu=True, graph=G) as sess: + with self.session(graph=G) as sess: # Stage data in a separate thread which will block # when it hits the staging area's capacity and thus # not fill the queue with n tokens @@ -334,7 +334,7 @@ class MapStageTest(test.TestCase): n = 10 - with self.session(use_gpu=True, graph=G) as sess: + with self.session(graph=G) as sess: # Keys n-1..0 keys = list(reversed(six.moves.range(n))) @@ -372,7 +372,7 @@ class MapStageTest(test.TestCase): G.finalize() - with self.session(use_gpu=True, graph=G) as sess: + with self.session(graph=G) as sess: # 0 complete and incomplete entries self.assertTrue(sess.run([size, isize]) == [0, 0]) # Stage key 0, x and f tuple entries @@ -430,7 +430,7 @@ class MapStageTest(test.TestCase): G.finalize() - with self.session(use_gpu=True, graph=G) as sess: + with self.session(graph=G) as sess: # 0 complete and incomplete entries self.assertTrue(sess.run([size, isize]) == [0, 0]) # Stage key 0, x and f tuple entries @@ -482,7 +482,7 @@ class MapStageTest(test.TestCase): G.finalize() - with self.session(use_gpu=True, graph=G) as sess: + with self.session(graph=G) as sess: # 0 complete and incomplete entries self.assertTrue(sess.run([size, isize]) == [0, 0]) # Stage key 0, x and f tuple entries @@ -574,7 +574,7 @@ class MapStageTest(test.TestCase): G.finalize() - with self.session(use_gpu=True, graph=G) as sess: + with self.session(graph=G) as sess: # Stage complete tuple sess.run(stage_xvf, feed_dict={pi: 0, x: 1, f: 2, v: 3}) diff --git a/tensorflow/python/kernel_tests/matrix_exponential_op_test.py b/tensorflow/python/kernel_tests/matrix_exponential_op_test.py index 61e2610e595..830d2a2865a 100644 --- a/tensorflow/python/kernel_tests/matrix_exponential_op_test.py +++ b/tensorflow/python/kernel_tests/matrix_exponential_op_test.py @@ -149,7 +149,7 @@ class ExponentialOpTest(test.TestCase): @test_util.run_deprecated_v1 def testDynamic(self): - with self.session(use_gpu=True) as sess: + with self.session() as sess: inp = array_ops.placeholder(ops.dtypes.float32) expm = linalg_impl.matrix_exponential(inp) matrix = np.array([[1., 2.], [3., 4.]]) @@ -157,7 +157,7 @@ class ExponentialOpTest(test.TestCase): @test_util.run_deprecated_v1 def testConcurrentExecutesWithoutError(self): - with self.session(use_gpu=True) as sess: + with self.session() as sess: matrix1 = random_ops.random_normal([5, 5], seed=42) matrix2 = random_ops.random_normal([5, 5], seed=42) expm1 = linalg_impl.matrix_exponential(matrix1) diff --git a/tensorflow/python/kernel_tests/matrix_inverse_op_test.py b/tensorflow/python/kernel_tests/matrix_inverse_op_test.py index 9a5a467a5a1..eebd5688868 100644 --- a/tensorflow/python/kernel_tests/matrix_inverse_op_test.py +++ b/tensorflow/python/kernel_tests/matrix_inverse_op_test.py @@ -37,7 +37,7 @@ class InverseOpTest(test.TestCase): def _verifyInverse(self, x, np_type): for adjoint in False, True: y = x.astype(np_type) - with self.cached_session(use_gpu=True): + with self.cached_session(): # Verify that x^{-1} * x == Identity matrix. inv = linalg_ops.matrix_inverse(y, adjoint=adjoint) tf_ans = test_util.matmul_without_tf32(inv, y, adjoint_b=adjoint) @@ -139,7 +139,7 @@ class InverseOpTest(test.TestCase): @test_util.deprecated_graph_mode_only def testConcurrentExecutesWithoutError(self): - with self.session(use_gpu=True) as sess: + with self.session() as sess: all_ops = [] for adjoint_ in True, False: matrix1 = random_ops.random_normal([5, 5], seed=42) diff --git a/tensorflow/python/kernel_tests/matrix_solve_ls_op_test.py b/tensorflow/python/kernel_tests/matrix_solve_ls_op_test.py index d2e9c7c737b..e75d0df94e4 100644 --- a/tensorflow/python/kernel_tests/matrix_solve_ls_op_test.py +++ b/tensorflow/python/kernel_tests/matrix_solve_ls_op_test.py @@ -124,7 +124,7 @@ class MatrixSolveLsOpTest(test_lib.TestCase): feed_dict = None self.assertEqual(np_ans.shape, tf_ans.get_shape()) if feed_dict: - with self.session(use_gpu=True) as sess: + with self.session() as sess: tf_ans_val = sess.run(tf_ans, feed_dict=feed_dict) else: tf_ans_val = self.evaluate(tf_ans) @@ -137,7 +137,7 @@ class MatrixSolveLsOpTest(test_lib.TestCase): tf_r = math_ops.matmul(a, tf_r, adjoint_a=True) tf_r_norm = linalg_ops.norm(tf_r, ord="fro", axis=[-2, -1]) if feed_dict: - with self.session(use_gpu=True) as sess: + with self.session() as sess: tf_ans_val, tf_r_norm_val = sess.run([tf_ans, tf_r_norm], feed_dict=feed_dict) else: @@ -147,7 +147,7 @@ class MatrixSolveLsOpTest(test_lib.TestCase): @test_util.run_in_graph_and_eager_modes(use_gpu=True) def testWrongDimensions(self): # The matrix and right-hand sides should have the same number of rows. - with self.session(use_gpu=True): + with self.session(): matrix = constant_op.constant([[1., 0.], [0., 1.]]) rhs = constant_op.constant([[1., 0.]]) with self.assertRaises((ValueError, errors_impl.InvalidArgumentError)): diff --git a/tensorflow/python/kernel_tests/matrix_solve_op_test.py b/tensorflow/python/kernel_tests/matrix_solve_op_test.py index 209e60417da..0d149de2acb 100644 --- a/tensorflow/python/kernel_tests/matrix_solve_op_test.py +++ b/tensorflow/python/kernel_tests/matrix_solve_op_test.py @@ -63,7 +63,7 @@ class MatrixSolveOpTest(test.TestCase): a_ph = array_ops.placeholder(dtypes.as_dtype(np_type)) b_ph = array_ops.placeholder(dtypes.as_dtype(np_type)) tf_ans = linalg_ops.matrix_solve(a_ph, b_ph, adjoint=adjoint) - with self.cached_session(use_gpu=True) as sess: + with self.cached_session() as sess: out = sess.run(tf_ans, {a_ph: a, b_ph: b}) else: tf_ans = linalg_ops.matrix_solve(a, b, adjoint=adjoint) diff --git a/tensorflow/python/kernel_tests/matrix_triangular_solve_op_test.py b/tensorflow/python/kernel_tests/matrix_triangular_solve_op_test.py index a497a0d0df8..2c85b1d3109 100644 --- a/tensorflow/python/kernel_tests/matrix_triangular_solve_op_test.py +++ b/tensorflow/python/kernel_tests/matrix_triangular_solve_op_test.py @@ -195,7 +195,7 @@ class MatrixTriangularSolveOpTest(test.TestCase): def testNonSquareMatrix(self): # A non-square matrix should cause an error. matrix = np.array([[1., 2., 3.], [3., 4., 5.]]) - with self.cached_session(use_gpu=True): + with self.cached_session(): with self.assertRaises(ValueError): self._verifySolve(matrix, matrix) with self.assertRaises(ValueError): @@ -207,7 +207,7 @@ class MatrixTriangularSolveOpTest(test.TestCase): # right-hand sides. matrix = np.array([[1., 0.], [0., 1.]]) rhs = np.array([[1., 0.]]) - with self.cached_session(use_gpu=True): + with self.cached_session(): with self.assertRaises(ValueError): self._verifySolve(matrix, rhs) with self.assertRaises(ValueError): diff --git a/tensorflow/python/kernel_tests/norm_op_test.py b/tensorflow/python/kernel_tests/norm_op_test.py index f3787190426..ff32a58f212 100644 --- a/tensorflow/python/kernel_tests/norm_op_test.py +++ b/tensorflow/python/kernel_tests/norm_op_test.py @@ -68,7 +68,7 @@ def _GetNormOpTest(dtype_, shape_, ord_, axis_, keep_dims_, use_static_shape_): def _CompareNorm(self, matrix): np_norm = np.linalg.norm(matrix, ord=ord_, axis=axis_, keepdims=keep_dims_) - with self.cached_session(use_gpu=True) as sess: + with self.cached_session() as sess: if use_static_shape_: tf_matrix = constant_op.constant(matrix) tf_norm = linalg_ops.norm( diff --git a/tensorflow/python/kernel_tests/pad_op_test.py b/tensorflow/python/kernel_tests/pad_op_test.py index 30abf9a758c..6372188d0b4 100644 --- a/tensorflow/python/kernel_tests/pad_op_test.py +++ b/tensorflow/python/kernel_tests/pad_op_test.py @@ -372,7 +372,7 @@ class PadOpTest(test.TestCase): for dtype in [dtypes.int32, dtypes.int64]: paddings = np.zeros((0, 2)) inp = np.asarray(7) - with self.cached_session(use_gpu=True): + with self.cached_session(): tf_val = array_ops.pad(inp, constant_op.constant(paddings, dtype=dtype)) out = self.evaluate(tf_val) self.assertAllEqual(inp, out) @@ -397,7 +397,7 @@ class PadOpTest(test.TestCase): padded, [paddings_value[i][0] + inp.shape.dims[i].value for i in range(4)], [-1, -1, -1, -1]) - with self.cached_session(use_gpu=True): + with self.cached_session(): self.assertAllEqual(inp, self.evaluate(middle)) self.assertAllEqual( np.zeros([row[0] for row in paddings_value]), self.evaluate(left)) diff --git a/tensorflow/python/kernel_tests/pool_test.py b/tensorflow/python/kernel_tests/pool_test.py index 0e6bbebdf50..cb408ae479c 100644 --- a/tensorflow/python/kernel_tests/pool_test.py +++ b/tensorflow/python/kernel_tests/pool_test.py @@ -248,7 +248,7 @@ class PoolingTest(test.TestCase): def testPoolNC(self): if test.is_gpu_available(cuda_only=True): # "NC*" format is currently only supported on CUDA. - with self.session(use_gpu=True): + with self.session(): for padding in ["SAME", "VALID"]: self._test( input_shape=[2, 2, 9], diff --git a/tensorflow/python/kernel_tests/pooling_ops_test.py b/tensorflow/python/kernel_tests/pooling_ops_test.py index 98e043e8a80..73dca4f157a 100644 --- a/tensorflow/python/kernel_tests/pooling_ops_test.py +++ b/tensorflow/python/kernel_tests/pooling_ops_test.py @@ -906,7 +906,7 @@ class PoolingTest(test.TestCase): self._testDepthwiseMaxPoolInvalidConfig([1, 2, 2, 4], [1, 1, 1, 3], [1, 1, 1, 3], "evenly divide") if test.is_gpu_available(): - with self.session(use_gpu=True): + with self.session(): t = variables.Variable(np.ones([1, 2, 2, 4])) self.evaluate(variables.global_variables_initializer()) with self.assertRaisesOpError("for CPU devices"): @@ -922,7 +922,7 @@ class PoolingTest(test.TestCase): for dtype in [np.float32, np.float16] \ + [np.float64] if not test.is_built_with_rocm() else []: tensor_input = np.random.rand(*input_shape).astype(dtype) - with self.cached_session(use_gpu=True): + with self.cached_session(): t = constant_op.constant(tensor_input, shape=input_shape) out_op, _ = nn_ops.max_pool_with_argmax(t, ksize, strides, padding) gpu_val = self.evaluate(out_op) @@ -942,7 +942,7 @@ class PoolingTest(test.TestCase): # in the input. tensor_input = np.random.random_integers(0, 3, input_shape).astype(dtype) tensor_output = np.random.rand(*output_shape).astype(dtype) - with self.cached_session(use_gpu=True): + with self.cached_session(): t = constant_op.constant(tensor_input, shape=input_shape) _, argmax_op = nn_ops.max_pool_with_argmax(t, ksize, strides, padding) argmax = self.evaluate(argmax_op) diff --git a/tensorflow/python/kernel_tests/py_func_test.py b/tensorflow/python/kernel_tests/py_func_test.py index b374119bceb..c8a02e6bdb5 100644 --- a/tensorflow/python/kernel_tests/py_func_test.py +++ b/tensorflow/python/kernel_tests/py_func_test.py @@ -755,7 +755,7 @@ class EagerPyFuncTest(PyFuncTestBase): y = script_ops.eager_py_func(func=f, inp=[x], Tout=dtypes.float32) z = script_ops.eager_py_func(func=g, inp=[y], Tout=dtypes.float32) - with self.session(use_gpu=True) as sess: + with self.session() as sess: output = sess.run(z, feed_dict={x: 3.0}) self.assertEqual(output, 18.0) diff --git a/tensorflow/python/kernel_tests/qr_op_test.py b/tensorflow/python/kernel_tests/qr_op_test.py index 7804aa7bf53..720a4d7dc0b 100644 --- a/tensorflow/python/kernel_tests/qr_op_test.py +++ b/tensorflow/python/kernel_tests/qr_op_test.py @@ -145,7 +145,7 @@ def _GetQrOpTest(dtype_, shape_, full_matrices_, use_static_shape_): if use_static_shape_: q_tf_val, r_tf_val = self.evaluate([q_tf, r_tf]) else: - with self.session(use_gpu=True) as sess: + with self.session() as sess: q_tf_val, r_tf_val = sess.run([q_tf, r_tf], feed_dict={x_tf: x_np}) q_dims = q_tf_val.shape diff --git a/tensorflow/python/kernel_tests/random/multinomial_op_big_test.py b/tensorflow/python/kernel_tests/random/multinomial_op_big_test.py index 576720528e2..4a3b0cecdc5 100644 --- a/tensorflow/python/kernel_tests/random/multinomial_op_big_test.py +++ b/tensorflow/python/kernel_tests/random/multinomial_op_big_test.py @@ -34,7 +34,7 @@ class MultinomialTest(test.TestCase): def testLargeDynamicRange(self): random_seed.set_random_seed(10) counts_by_indices = {} - with self.test_session(use_gpu=True) as sess: + with self.test_session() as sess: samples = random_ops.multinomial( constant_op.constant([[-30, 0]], dtype=dtypes.float32), num_samples=1000000, @@ -52,7 +52,7 @@ class MultinomialTest(test.TestCase): def testLargeDynamicRange2(self): random_seed.set_random_seed(10) counts_by_indices = {} - with self.test_session(use_gpu=True) as sess: + with self.test_session() as sess: samples = random_ops.multinomial( constant_op.constant([[0, -30]], dtype=dtypes.float32), num_samples=1000000, @@ -72,7 +72,7 @@ class MultinomialTest(test.TestCase): random_seed.set_random_seed(10) counts_by_indices = {} # here the cpu undersamples and won't pass this test either - with self.test_session(use_gpu=True) as sess: + with self.test_session() as sess: samples = random_ops.multinomial( constant_op.constant([[0, -17]], dtype=dtypes.float32), num_samples=1000000, diff --git a/tensorflow/python/kernel_tests/random/parameterized_truncated_normal_op_test.py b/tensorflow/python/kernel_tests/random/parameterized_truncated_normal_op_test.py index 309c3e404db..5ec054f6bae 100644 --- a/tensorflow/python/kernel_tests/random/parameterized_truncated_normal_op_test.py +++ b/tensorflow/python/kernel_tests/random/parameterized_truncated_normal_op_test.py @@ -129,7 +129,7 @@ class ParameterizedTruncatedNormalTest(test.TestCase): # TruncatedNormalMoments requires scipy.stats. # Give up early if we are unable to import it. random_seed.set_random_seed(seed) - with self.cached_session(use_gpu=True): + with self.cached_session(): if use_stateless: # Generate a seed that stateless ops can use. new_seed = random_ops.random_uniform([2], @@ -163,7 +163,7 @@ class ParameterizedTruncatedNormalTest(test.TestCase): try: import scipy.stats # pylint: disable=g-import-not-at-top random_seed.set_random_seed(seed) - with self.cached_session(use_gpu=True): + with self.cached_session(): if use_stateless: new_seed = random_ops.random_uniform([2], seed=seed, @@ -298,7 +298,7 @@ class ParameterizedTruncatedNormalTest(test.TestCase): minvals=-1., maxvals=1.) - with self.session(use_gpu=True) as sess: + with self.session() as sess: samples, samples_stateless = sess.run([sample_op, sample_op_stateless]) # 0. is more than 16 standard deviations from the mean, and # should have a likelihood < 1e-57. @@ -313,7 +313,7 @@ class ParameterizedTruncatedNormalTest(test.TestCase): minval = variables.Variable(-1.) maxval = variables.Variable(1.) - with self.cached_session(use_gpu=True) as sess: + with self.cached_session() as sess: with backprop.GradientTape(persistent=True) as tape: samples = stateless.stateless_parameterized_truncated_normal( [1], [1, 2], mean, stddev, minval, maxval) diff --git a/tensorflow/python/kernel_tests/random/random_ops_test.py b/tensorflow/python/kernel_tests/random/random_ops_test.py index 135e4406c82..0063c7fc2b9 100644 --- a/tensorflow/python/kernel_tests/random/random_ops_test.py +++ b/tensorflow/python/kernel_tests/random/random_ops_test.py @@ -230,7 +230,7 @@ class TruncatedNormalTest(test.TestCase): @test_util.run_deprecated_v1 def testLargeShape(self): - with self.session(use_gpu=True): + with self.session(): v = variables.Variable( array_ops.zeros(dtype=dtypes.float32, shape=[2**33, 1])) n = random_ops.truncated_normal(v.shape) @@ -238,7 +238,7 @@ class TruncatedNormalTest(test.TestCase): @test_util.run_deprecated_v1 def testNoCSE(self): - with self.session(use_gpu=True): + with self.session(): shape = [2, 3, 4] rnd1 = random_ops.truncated_normal(shape, 0.0, 1.0, dtypes.float32) rnd2 = random_ops.truncated_normal(shape, 0.0, 1.0, dtypes.float32) @@ -371,7 +371,7 @@ class RandomUniformTest(RandomOpTestCommon): def testNoCSE(self): shape = [2, 3, 4] for dtype in dtypes.float16, dtypes.float32, dtypes.int32: - with self.session(use_gpu=True): + with self.session(): rnd1 = random_ops.random_uniform(shape, 0, 17, dtype=dtype) rnd2 = random_ops.random_uniform(shape, 0, 17, dtype=dtype) diff = (rnd2 - rnd1).eval() diff --git a/tensorflow/python/kernel_tests/random/random_poisson_test.py b/tensorflow/python/kernel_tests/random/random_poisson_test.py index eafa1d9382c..2d94533078d 100644 --- a/tensorflow/python/kernel_tests/random/random_poisson_test.py +++ b/tensorflow/python/kernel_tests/random/random_poisson_test.py @@ -104,7 +104,7 @@ class RandomPoissonTest(test.TestCase): merged. """ for dtype in dtypes.float16, dtypes.float32, dtypes.float64: - with self.cached_session(use_gpu=True): + with self.cached_session(): rnd1 = random_ops.random_poisson(2.0, [24], dtype=dtype) rnd2 = random_ops.random_poisson(2.0, [24], dtype=dtype) diff = rnd2 - rnd1 diff --git a/tensorflow/python/kernel_tests/random/stateless_random_ops_test.py b/tensorflow/python/kernel_tests/random/stateless_random_ops_test.py index 24b5a36d1cb..f60f5c46837 100644 --- a/tensorflow/python/kernel_tests/random/stateless_random_ops_test.py +++ b/tensorflow/python/kernel_tests/random/stateless_random_ops_test.py @@ -240,7 +240,7 @@ class StatelessOpsTest(test.TestCase, parameterized.TestCase): def _test_determinism(self, case, seed_type): # Stateless values should be equal iff the seeds are equal (roughly) seeds = [(x, y) for x in range(5) for y in range(5)] * 3 # pylint: disable=g-complex-comprehension - with self.test_session(use_gpu=True), ops.device(get_device().name): + with self.test_session(), ops.device(get_device().name): _, stateless_op, _ = case if context.executing_eagerly(): values = [ diff --git a/tensorflow/python/kernel_tests/reduction_ops_test.py b/tensorflow/python/kernel_tests/reduction_ops_test.py index 601c542eb6e..d1e23c7aa7d 100644 --- a/tensorflow/python/kernel_tests/reduction_ops_test.py +++ b/tensorflow/python/kernel_tests/reduction_ops_test.py @@ -156,7 +156,7 @@ class BaseReductionTest(test.TestCase): def _compare(self, x, reduction_axes, keepdims, feed_dict=None): np_ans = self._np_reduce(x, reduction_axes, keepdims) - with self.cached_session(use_gpu=True) as sess: + with self.cached_session() as sess: tf_ans = self._tf_reduce(x, reduction_axes, keepdims) out = sess.run(tf_ans, feed_dict) self.assertAllClose(np_ans, out) @@ -178,7 +178,7 @@ class BaseReductionTest(test.TestCase): if reduction_axes is not None and np.shape(reduction_axes) == (1,): # Test scalar reduction_axes argument self._compareGradient(x, reduction_axes[0], rtol=rtol, atol=atol) - with self.cached_session(use_gpu=True): + with self.cached_session(): t = ops.convert_to_tensor(x) su = self._tf_reduce(t, reduction_axes, False) jacob_t, jacob_n = gradient_checker.compute_gradient( @@ -208,7 +208,7 @@ class SumReductionTest(BaseReductionTest): def testAxesType(self): for dtype in [dtypes.int64, dtypes.int32]: - with self.cached_session(use_gpu=True) as sess: + with self.cached_session() as sess: v = math_ops.reduce_sum([0, 0], constant_op.constant(0, dtype=dtype)) tf_v = self.evaluate(v) self.assertAllEqual(tf_v, 0) @@ -403,7 +403,7 @@ class SumReductionTest(BaseReductionTest): @test_util.run_deprecated_v1 def testEmptyGradients(self): - with self.session(use_gpu=True): + with self.session(): x = array_ops.zeros([0, 3]) y = math_ops.reduce_sum(x, [1]) error = gradient_checker.compute_gradient_error(x, [0, 3], y, [0]) @@ -411,7 +411,7 @@ class SumReductionTest(BaseReductionTest): @test_util.run_deprecated_v1 def testDegenerate(self): - with self.session(use_gpu=True): + with self.session(): for dtype in (dtypes.float16, dtypes.float32, dtypes.float64, dtypes.complex64, dtypes.complex128): # A large number is needed to get Eigen to die @@ -446,7 +446,7 @@ class MeanReductionTest(BaseReductionTest): def testAxesType(self): for dtype in [dtypes.int64, dtypes.int32]: - with self.cached_session(use_gpu=True) as sess: + with self.cached_session() as sess: v = math_ops.reduce_mean([0, 0], constant_op.constant(0, dtype=dtype)) tf_v = self.evaluate(v) self.assertAllEqual(tf_v, 0) @@ -525,7 +525,7 @@ class MeanReductionTest(BaseReductionTest): @test_util.run_deprecated_v1 def testEmptyGradients(self): - with self.session(use_gpu=True): + with self.session(): x = array_ops.zeros([0, 3]) y = math_ops.reduce_mean(x, [1]) error = gradient_checker.compute_gradient_error(x, [0, 3], y, [0]) @@ -533,7 +533,7 @@ class MeanReductionTest(BaseReductionTest): @test_util.run_deprecated_v1 def testDegenerate(self): - with self.session(use_gpu=True): + with self.session(): for dtype in (dtypes.float16, dtypes.float32, dtypes.float64): # A large number is needed to get Eigen to die x = array_ops.zeros((0, 9938), dtype=dtype) @@ -560,7 +560,7 @@ class EuclideanNormReductionTest(BaseReductionTest): @test_util.run_deprecated_v1 def testAxesType(self): for dtype in [dtypes.int64, dtypes.int32]: - with self.cached_session(use_gpu=True): + with self.cached_session(): v = math_ops.reduce_mean([0, 0], constant_op.constant(0, dtype=dtype)) tf_v = self.evaluate(v) self.assertAllEqual(tf_v, 0) @@ -609,7 +609,7 @@ class EuclideanNormReductionTest(BaseReductionTest): np_arr = self._makeIncremental((2,) * rank, dtypes.complex128) self._compareAllAxes(np_arr) - with self.session(use_gpu=True): + with self.session(): for dtype in (dtypes.float16, dtypes.float32, dtypes.float64): # A large number is needed to get Eigen to die x = array_ops.zeros((0, 9938), dtype=dtype) @@ -640,7 +640,7 @@ class ProdReductionTest(BaseReductionTest): def testAxesType(self): for dtype in [dtypes.int64, dtypes.int32]: - with self.cached_session(use_gpu=True) as sess: + with self.cached_session() as sess: v = math_ops.reduce_prod([0, 0], constant_op.constant(0, dtype=dtype)) tf_v = self.evaluate(v) self.assertAllEqual(tf_v, 0) @@ -711,7 +711,7 @@ class ProdReductionTest(BaseReductionTest): @test_util.run_deprecated_v1 def testEmptyGradients(self): - with self.session(use_gpu=True): + with self.session(): x = array_ops.zeros([0, 3]) y = math_ops.reduce_prod(x, [1]) error = gradient_checker.compute_gradient_error(x, [0, 3], y, [0]) @@ -719,7 +719,7 @@ class ProdReductionTest(BaseReductionTest): @test_util.run_deprecated_v1 def testDegenerate(self): - with self.session(use_gpu=True): + with self.session(): for dtype in (dtypes.float16, dtypes.float32, dtypes.float64): # A large number is needed to get Eigen to die x = array_ops.zeros((0, 9938), dtype=dtype) @@ -750,7 +750,7 @@ class MinReductionTest(test.TestCase): def testAxesType(self): for dtype in [dtypes.int64, dtypes.int32]: - with self.cached_session(use_gpu=True) as sess: + with self.cached_session() as sess: v = math_ops.reduce_min([0, 0], constant_op.constant(0, dtype=dtype)) tf_v = self.evaluate(v) self.assertAllEqual(tf_v, 0) @@ -866,7 +866,7 @@ class MaxReductionTest(test.TestCase): def testAxesType(self): for dtype in [dtypes.int64, dtypes.int32]: - with self.cached_session(use_gpu=True) as sess: + with self.cached_session() as sess: v = math_ops.reduce_max([0, 0], constant_op.constant(0, dtype=dtype)) tf_v = self.evaluate(v) self.assertAllEqual(tf_v, 0) @@ -998,7 +998,7 @@ class AllReductionTest(test.TestCase): def testAxesType(self): for dtype in [dtypes.int64, dtypes.int32]: - with self.session(use_gpu=True) as sess: + with self.session() as sess: v = math_ops.reduce_all([True, True], constant_op.constant(0, dtype=dtype)) tf_v = self.evaluate(v) @@ -1047,7 +1047,7 @@ class AnyReductionTest(test.TestCase): def testAxesType(self): for dtype in [dtypes.int64, dtypes.int32]: - with self.session(use_gpu=True) as sess: + with self.session() as sess: v = math_ops.reduce_any([True, True], constant_op.constant(0, dtype=dtype)) tf_v = self.evaluate(v) diff --git a/tensorflow/python/kernel_tests/rnn_cell_test.py b/tensorflow/python/kernel_tests/rnn_cell_test.py index bb47d605941..c096357d2fb 100644 --- a/tensorflow/python/kernel_tests/rnn_cell_test.py +++ b/tensorflow/python/kernel_tests/rnn_cell_test.py @@ -223,7 +223,7 @@ class RNNTest(test.TestCase): self.assertEqual(out.get_shape(), inp.get_shape()) self.assertEqual(out.dtype, inp.dtype) - with self.session(use_gpu=True) as sess: + with self.session() as sess: input_value = np.random.randn(batch_size, input_size) values = sess.run(outputs + [state], feed_dict={inputs[0]: input_value}) @@ -260,7 +260,7 @@ class RNNTest(test.TestCase): self.assertEqual(out.get_shape().as_list(), inp.get_shape().as_list()) self.assertEqual(out.dtype, inp.dtype) - with self.session(use_gpu=True) as sess: + with self.session() as sess: input_value = np.random.randn(batch_size, input_size) values = sess.run(outputs + [state], feed_dict={inputs[0]: input_value}) full_dropout_values = sess.run( @@ -288,7 +288,7 @@ class RNNTest(test.TestCase): cell, inputs, sequence_length=sequence_length, dtype=dtypes.float32) self.assertEqual(len(dynamic_outputs), len(inputs)) - with self.session(use_gpu=True) as sess: + with self.session() as sess: input_value = np.random.randn(batch_size, input_size) dynamic_values = sess.run( dynamic_outputs, @@ -324,7 +324,7 @@ class RNNTest(test.TestCase): 1.0 * (2 + 1) * np.ones((input_size))))) def _testScope(self, factory, prefix="prefix", use_outer_scope=True): - with self.session(use_gpu=True, graph=ops.Graph()): + with self.session(graph=ops.Graph()): if use_outer_scope: with variable_scope.variable_scope(prefix) as scope: factory(scope) @@ -388,7 +388,7 @@ class LSTMTest(test.TestCase): input_size = 5 batch_size = 2 max_length = 8 - with self.session(use_gpu=True, graph=ops.Graph()) as sess: + with self.session(graph=ops.Graph()) as sess: initializer = init_ops.random_uniform_initializer( -0.01, 0.01, seed=self._seed) cell = rnn_cell.LSTMCell( @@ -411,7 +411,7 @@ class LSTMTest(test.TestCase): input_size = 5 batch_size = 2 max_length = 8 - with self.session(use_gpu=True, graph=ops.Graph()) as sess: + with self.session(graph=ops.Graph()) as sess: initializer = init_ops.random_uniform_initializer( -0.01, 0.01, seed=self._seed) cell = rnn_cell.LSTMCell( @@ -442,7 +442,7 @@ class LSTMTest(test.TestCase): input_size = 5 batch_size = 2 max_length = 8 - with self.session(use_gpu=True, graph=ops.Graph()) as sess: + with self.session(graph=ops.Graph()) as sess: initializer = init_ops.random_uniform_initializer( -0.01, 0.01, seed=self._seed) state_saver = TestStateSaver(batch_size, 2 * num_units) @@ -583,7 +583,7 @@ class LSTMTest(test.TestCase): batch_size = 2 num_proj = 4 max_length = 8 - with self.session(use_gpu=True, graph=ops.Graph()) as sess: + with self.session(graph=ops.Graph()) as sess: initializer = init_ops.random_uniform_initializer( -0.01, 0.01, seed=self._seed) inputs = max_length * [ @@ -681,7 +681,7 @@ class LSTMTest(test.TestCase): num_proj_shards = 3 num_unit_shards = 2 max_length = 8 - with self.session(use_gpu=True, graph=ops.Graph()) as sess: + with self.session(graph=ops.Graph()) as sess: initializer = init_ops.random_uniform_initializer( -0.01, 0.01, seed=self._seed) @@ -715,7 +715,7 @@ class LSTMTest(test.TestCase): num_proj_shards = 3 num_unit_shards = 2 max_length = 8 - with self.session(use_gpu=True, graph=ops.Graph()) as sess: + with self.session(graph=ops.Graph()) as sess: initializer = init_ops.random_uniform_initializer(-1, 1, seed=self._seed) inputs = max_length * [ array_ops.placeholder(dtypes.float64, shape=(None, input_size)) @@ -752,7 +752,7 @@ class LSTMTest(test.TestCase): num_proj_shards = 3 num_unit_shards = 2 max_length = 8 - with self.session(use_gpu=True, graph=ops.Graph()) as sess: + with self.session(graph=ops.Graph()) as sess: inputs = max_length * [ array_ops.placeholder(dtypes.float32, shape=(None, input_size)) ] @@ -809,7 +809,7 @@ class LSTMTest(test.TestCase): num_proj_shards = 3 num_unit_shards = 2 max_length = 8 - with self.session(use_gpu=True, graph=ops.Graph()) as sess: + with self.session(graph=ops.Graph()) as sess: sequence_length = array_ops.placeholder(dtypes.int64) initializer = init_ops.random_uniform_initializer( -0.01, 0.01, seed=self._seed) @@ -1151,7 +1151,7 @@ class LSTMTest(test.TestCase): state_is_tuple=False) ########### Step 1: Run static graph and generate readouts - with self.session(use_gpu=True, graph=ops.Graph()) as sess: + with self.session(graph=ops.Graph()) as sess: if in_graph_mode: concat_inputs = array_ops.placeholder( dtypes.float32, shape=(time_steps, batch_size, input_size)) @@ -1211,7 +1211,7 @@ class LSTMTest(test.TestCase): static_individual_variable_gradients, feed_dict=feeds) ########## Step 2: Run dynamic graph and generate readouts - with self.session(use_gpu=True, graph=ops.Graph()) as sess: + with self.session(graph=ops.Graph()) as sess: if in_graph_mode: concat_inputs = array_ops.placeholder( dtypes.float32, shape=(time_steps, batch_size, input_size)) @@ -1372,7 +1372,7 @@ class BidirectionalRNNTest(test.TestCase): return input_value, inputs, outputs, state_fw, state_bw, sequence_length def _testBidirectionalRNN(self, use_shape): - with self.session(use_gpu=True, graph=ops.Graph()) as sess: + with self.session(graph=ops.Graph()) as sess: input_value, inputs, outputs, state_fw, state_bw, sequence_length = ( self._createBidirectionalRNN(use_shape, True)) variables_lib.global_variables_initializer().run() @@ -1419,7 +1419,7 @@ class BidirectionalRNNTest(test.TestCase): self.assertAllClose(s_fw, s_bw) def _testBidirectionalRNNWithoutSequenceLength(self, use_shape): - with self.session(use_gpu=True, graph=ops.Graph()) as sess: + with self.session(graph=ops.Graph()) as sess: input_value, inputs, outputs, state_fw, state_bw, _ = ( self._createBidirectionalRNN(use_shape, False)) variables_lib.global_variables_initializer().run() @@ -1504,7 +1504,7 @@ class BidirectionalRNNTest(test.TestCase): def _testBidirectionalDynamicRNN(self, use_shape, use_state_tuple, use_time_major, use_sequence_length): - with self.session(use_gpu=True, graph=ops.Graph()) as sess: + with self.session(graph=ops.Graph()) as sess: input_value, inputs, outputs, state_fw, state_bw, sequence_length = ( self._createBidirectionalDynamicRNN( use_shape, use_state_tuple, use_time_major, use_sequence_length)) @@ -1582,7 +1582,7 @@ class BidirectionalRNNTest(test.TestCase): # REMARKS: factory(scope) is a function accepting a scope # as an argument, such scope can be None, a string # or a VariableScope instance. - with self.session(use_gpu=True, graph=ops.Graph()): + with self.session(graph=ops.Graph()): if use_outer_scope: with variable_scope.variable_scope(prefix) as scope: factory(scope) @@ -1905,7 +1905,7 @@ class StateSaverRNNTest(test.TestCase): batch_size = 2 state_saver = TestStateSaver(batch_size, 2 * num_units) - with self.session(use_gpu=True, graph=ops.Graph()): + with self.session(graph=ops.Graph()): if use_outer_scope: with variable_scope.variable_scope(prefix) as scope: self._factory(scope=scope, state_saver=state_saver) @@ -1984,7 +1984,7 @@ class GRUTest(test.TestCase): sequence_length = np.random.randint(0, time_steps, size=batch_size) - with self.session(use_gpu=True, graph=ops.Graph()) as sess: + with self.session(graph=ops.Graph()) as sess: concat_inputs = array_ops.placeholder( dtypes.float32, shape=(time_steps, batch_size, input_size)) @@ -2006,7 +2006,7 @@ class GRUTest(test.TestCase): sess.run([outputs_dynamic, state_dynamic], feed_dict=feeds) def _testScope(self, factory, prefix="prefix", use_outer_scope=True): - with self.session(use_gpu=True, graph=ops.Graph()): + with self.session(graph=ops.Graph()): if use_outer_scope: with variable_scope.variable_scope(prefix) as scope: factory(scope) @@ -2298,7 +2298,7 @@ class RawRNNTest(test.TestCase): np.ones((max_time, batch_size, 1), np.int64), output_vals[1]) def _testScope(self, factory, prefix="prefix", use_outer_scope=True): - with self.session(use_gpu=True, graph=ops.Graph()): + with self.session(graph=ops.Graph()): if use_outer_scope: with variable_scope.variable_scope(prefix) as scope: factory(scope) @@ -2416,7 +2416,7 @@ class TensorArrayOnCorrectDeviceTest(test.TestCase): sequence_length=sequence_length, dtype=dtypes.float32) - with self.session(use_gpu=True) as sess: + with self.session() as sess: opts = config_pb2.RunOptions(trace_level=config_pb2.RunOptions.FULL_TRACE) run_metadata = config_pb2.RunMetadata() variables_lib.global_variables_initializer().run() @@ -2903,7 +2903,7 @@ class RNNCellTest(test.TestCase, parameterized.TestCase): return gpu_dev = test.gpu_device_name() - with self.session(use_gpu=True) as sess: + with self.session() as sess: with variable_scope.variable_scope( "root", initializer=init_ops.constant_initializer(0.5)): x = array_ops.zeros([1, 1, 3]) diff --git a/tensorflow/python/kernel_tests/rnn_test.py b/tensorflow/python/kernel_tests/rnn_test.py index 27732de19d1..7bf99400bcb 100644 --- a/tensorflow/python/kernel_tests/rnn_test.py +++ b/tensorflow/python/kernel_tests/rnn_test.py @@ -212,7 +212,7 @@ class RNNTest(test.TestCase): else: inputs = array_ops.placeholder(dtypes.float32, shape=(1, 4, 1)) - with self.cached_session(use_gpu=True) as sess: + with self.cached_session() as sess: outputs, state = rnn.dynamic_rnn( cell, inputs, dtype=dtypes.float32, sequence_length=[4]) if not in_eager_mode: @@ -232,7 +232,7 @@ class RNNTest(test.TestCase): else: inputs = array_ops.placeholder(dtypes.float32, shape=(1, 4, 1)) - with self.cached_session(use_gpu=True) as sess: + with self.cached_session() as sess: outputs, state = rnn.dynamic_rnn( cell, inputs, dtype=dtypes.float32, sequence_length=[4]) if not in_eager_mode: @@ -262,7 +262,7 @@ class RNNTest(test.TestCase): else: inputs = array_ops.placeholder(dtypes.float32, shape=(1, 4, 1)) - with self.cached_session(use_gpu=True) as sess: + with self.cached_session() as sess: outputs, state = rnn.dynamic_rnn( cell, inputs, dtype=dtypes.float32, sequence_length=[4]) state = (state[0], state[1].stack()) diff --git a/tensorflow/python/kernel_tests/scan_ops_test.py b/tensorflow/python/kernel_tests/scan_ops_test.py index b0161b8d232..e802d5b0eb7 100644 --- a/tensorflow/python/kernel_tests/scan_ops_test.py +++ b/tensorflow/python/kernel_tests/scan_ops_test.py @@ -79,7 +79,7 @@ class CumsumTest(test.TestCase): def _compare(self, x, axis, exclusive, reverse): np_out = handle_options(np.cumsum, x, axis, exclusive, reverse) - with self.cached_session(use_gpu=True): + with self.cached_session(): tf_out = math_ops.cumsum(x, axis, exclusive, reverse).eval() self.assertAllClose(np_out, tf_out) @@ -101,7 +101,7 @@ class CumsumTest(test.TestCase): for dtype in self.valid_dtypes: x = np.arange(1, 6).reshape([5]).astype(dtype) for axis_dtype in [dtypes.int64, dtypes.int32]: - with self.cached_session(use_gpu=True): + with self.cached_session(): axis = constant_op.constant(0, axis_dtype) tf_out = math_ops.cumsum(x, axis).eval() @@ -152,7 +152,7 @@ class CumsumTest(test.TestCase): def testInvalidAxis(self): x = np.arange(0, 10).reshape([2, 5]).astype(np.float32) input_tensor = ops.convert_to_tensor(x) - with self.session(use_gpu=True): + with self.session(): with self.assertRaisesWithPredicateMatch( errors_impl.InvalidArgumentError, lambda e: "Expected scan axis in the range [-2, 2)" in str(e)): @@ -168,7 +168,7 @@ class CumsumTest(test.TestCase): def _compareGradient(self, shape, axis, exclusive, reverse): x = np.arange(0, 50).reshape(shape).astype(np.float64) - with self.cached_session(use_gpu=True): + with self.cached_session(): t = ops.convert_to_tensor(x) result = math_ops.cumsum(t, axis, exclusive, reverse) jacob_t, jacob_n = gradient_checker.compute_gradient( @@ -212,7 +212,7 @@ class CumprodTest(test.TestCase): def _compare(self, x, axis, exclusive, reverse): np_out = handle_options(np.cumprod, x, axis, exclusive, reverse) - with self.cached_session(use_gpu=True): + with self.cached_session(): tf_out = math_ops.cumprod(x, axis, exclusive, reverse).eval() self.assertAllClose(np_out, tf_out) @@ -234,7 +234,7 @@ class CumprodTest(test.TestCase): for dtype in self.valid_dtypes: x = np.arange(1, 6).reshape([5]).astype(dtype) for axis_dtype in [dtypes.int64, dtypes.int32]: - with self.cached_session(use_gpu=True): + with self.cached_session(): axis = constant_op.constant(0, axis_dtype) tf_out = math_ops.cumprod(x, axis).eval() @@ -278,7 +278,7 @@ class CumprodTest(test.TestCase): def testInvalidAxis(self): x = np.arange(0, 10).reshape([2, 5]).astype(np.float32) input_tensor = ops.convert_to_tensor(x) - with self.session(use_gpu=True): + with self.session(): with self.assertRaisesWithPredicateMatch( errors_impl.InvalidArgumentError, lambda e: "Expected scan axis in the range [-2, 2)" in str(e)): @@ -294,7 +294,7 @@ class CumprodTest(test.TestCase): def _compareGradient(self, shape, axis, exclusive, reverse): x = np.arange(1, 9).reshape(shape).astype(np.float64) - with self.cached_session(use_gpu=True): + with self.cached_session(): t = ops.convert_to_tensor(x) result = math_ops.cumprod(t, axis, exclusive, reverse) jacob_t, jacob_n = gradient_checker.compute_gradient( diff --git a/tensorflow/python/kernel_tests/scatter_ops_test.py b/tensorflow/python/kernel_tests/scatter_ops_test.py index b9206bf3221..5787098eb48 100644 --- a/tensorflow/python/kernel_tests/scatter_ops_test.py +++ b/tensorflow/python/kernel_tests/scatter_ops_test.py @@ -134,7 +134,7 @@ class ScatterTest(test.TestCase): repeat_indices=False, updates_are_scalar=False): np.random.seed(8) - with self.cached_session(use_gpu=True): + with self.cached_session(): for indices_shape in (), (2,), (3, 7), (3, 4, 7): for extra_shape in (), (5,), (5, 9): # Generate random indices with no duplicates for easy numpy comparison diff --git a/tensorflow/python/kernel_tests/segment_reduction_ops_test.py b/tensorflow/python/kernel_tests/segment_reduction_ops_test.py index 6a9350bd3da..d4ff43b8341 100644 --- a/tensorflow/python/kernel_tests/segment_reduction_ops_test.py +++ b/tensorflow/python/kernel_tests/segment_reduction_ops_test.py @@ -307,7 +307,7 @@ class UnsortedSegmentTest(SegmentReductionHelper): ops_list = self.complex_ops_list if dtype.is_complex else self.ops_list tf_x, np_x = self._input(shape, dtype=dtype) for use_gpu in [True, False]: - with self.cached_session(use_gpu=True): + with self.cached_session(): for np_op1, np_op2, tf_op, init_op in ops_list: # sqrt_n doesn't support integers if (np_op2 == self._sqrt_n_reduce_op and dtype.is_integer): @@ -333,7 +333,7 @@ class UnsortedSegmentTest(SegmentReductionHelper): for indices in indices_flat, indices_flat.reshape(5, 2): shape = indices.shape + (2,) for dtype in dtypes: - with self.cached_session(use_gpu=True): + with self.cached_session(): tf_x, np_x = self._input(shape) num_segments_constant = constant_op.constant( num_segments, dtype=dtype) @@ -433,7 +433,7 @@ class UnsortedSegmentTest(SegmentReductionHelper): shape = [n, num_cols] num_segments = max(indices) + 1 for dtype in self.differentiable_dtypes: - with self.cached_session(use_gpu=True): + with self.cached_session(): tf_x, np_x = self._input(shape, dtype=dtype) # Results from UnsortedSegmentSum unsorted_s = math_ops.unsorted_segment_sum( @@ -470,7 +470,7 @@ class UnsortedSegmentTest(SegmentReductionHelper): def testEmptySecondDimension(self): dtypes = [np.float16, np.float32, np.float64, np.int64, np.int32, np.complex64, np.complex128] - with self.session(use_gpu=True): + with self.session(): for dtype in dtypes: for itype in (np.int32, np.int64): data = np.zeros((2, 0), dtype=dtype) @@ -486,7 +486,7 @@ class UnsortedSegmentTest(SegmentReductionHelper): for indices in indices_flat, indices_flat.reshape(5, 2): shape = indices.shape + (2,) for dtype in self.all_dtypes: - with self.session(use_gpu=True): + with self.session(): tf_x, np_x = self._input(shape, dtype=dtype) np_ans = self._segmentReduce( indices, np_x, np.add, op2=None, num_segments=num_segments) diff --git a/tensorflow/python/kernel_tests/self_adjoint_eig_op_test.py b/tensorflow/python/kernel_tests/self_adjoint_eig_op_test.py index 40f8b31b7c2..4879a928a21 100644 --- a/tensorflow/python/kernel_tests/self_adjoint_eig_op_test.py +++ b/tensorflow/python/kernel_tests/self_adjoint_eig_op_test.py @@ -55,7 +55,7 @@ class SelfAdjointEigTest(test.TestCase): @test_util.run_deprecated_v1 def testConcurrentExecutesWithoutError(self): all_ops = [] - with self.session(use_gpu=True) as sess: + with self.session() as sess: for compute_v_ in True, False: matrix1 = random_ops.random_normal([5, 5], seed=42) matrix2 = random_ops.random_normal([5, 5], seed=42) @@ -84,7 +84,7 @@ class SelfAdjointEigTest(test.TestCase): "self_adjoint_eig_fail_if_denorms_flushed.txt")).astype(np.float32) self.assertEqual(matrix.shape, (32, 32)) matrix_tensor = constant_op.constant(matrix) - with self.session(use_gpu=True) as sess: + with self.session() as sess: (e, v) = self.evaluate(linalg_ops.self_adjoint_eig(matrix_tensor)) self.assertEqual(e.size, 32) self.assertAllClose( @@ -156,7 +156,7 @@ def _GetSelfAdjointEigTest(dtype_, shape_, compute_v_): else: atol = 1e-12 np_e, np_v = np.linalg.eigh(a) - with self.session(use_gpu=True): + with self.session(): if compute_v_: tf_e, tf_v = linalg_ops.self_adjoint_eig(constant_op.constant(a)) @@ -211,7 +211,8 @@ def _GetSelfAdjointEigGradTest(dtype_, shape_, compute_v_): tol = 1e-2 else: tol = 1e-7 - with self.session(use_gpu=True): + with self.session(): + def Compute(x): e, v = linalg_ops.self_adjoint_eig(x) # (complex) Eigenvectors are only unique up to an arbitrary phase diff --git a/tensorflow/python/kernel_tests/shape_ops_test.py b/tensorflow/python/kernel_tests/shape_ops_test.py index 5a165c94542..c5f6d02da64 100644 --- a/tensorflow/python/kernel_tests/shape_ops_test.py +++ b/tensorflow/python/kernel_tests/shape_ops_test.py @@ -267,7 +267,7 @@ class ShapeOpsTest(test.TestCase): for dtype in [dtypes.int32, dtypes.int64]: x = np.zeros([2]) np_ans = np.expand_dims(x, axis=0) - with self.cached_session(use_gpu=True): + with self.cached_session(): tensor = array_ops.expand_dims(x, constant_op.constant(0, dtype)) tf_ans = self.evaluate(tensor) self.assertShapeEqual(np_ans, tensor) @@ -433,7 +433,7 @@ class TileTest(test.TestCase, parameterized.TestCase): def testSimple(self): # multiples could be int32 or int64 for dtype in [dtypes.int32, dtypes.int64]: - with self.cached_session(use_gpu=True): + with self.cached_session(): inp = np.random.rand(4, 1).astype(np.float32) a = constant_op.constant(inp) tiled = array_ops.tile(a, constant_op.constant([1, 4], dtype=dtype)) @@ -505,7 +505,7 @@ class TileTest(test.TestCase, parameterized.TestCase): bytes: (dtypes.string, bytes) } for dtype_np, (dtype_tf, cast) in types_to_test.items(): - with self.cached_session(use_gpu=True): + with self.cached_session(): inp = np.random.rand(4, 1).astype(dtype_np) a = constant_op.constant( [cast(x) for x in inp.ravel(order="C")], @@ -601,7 +601,7 @@ class TileTest(test.TestCase, parameterized.TestCase): @test_util.run_deprecated_v1 def testGradientSimpleReductionOnGPU(self): - with self.session(use_gpu=True): + with self.session(): inp = np.random.rand(4, 1).astype("f") a = constant_op.constant( [float(x) for x in inp.flatten()], shape=[4, 1], dtype=dtypes.float32) @@ -616,7 +616,7 @@ class TileTest(test.TestCase, parameterized.TestCase): @test_util.run_deprecated_v1 def testGradientStridedReductionOnGPU(self): - with self.session(use_gpu=True): + with self.session(): inp = np.random.rand(4, 2).astype("f") a = constant_op.constant( [float(x) for x in inp.flatten()], shape=[4, 2], dtype=dtypes.float32) diff --git a/tensorflow/python/kernel_tests/signal/dct_ops_test.py b/tensorflow/python/kernel_tests/signal/dct_ops_test.py index d4f9e39590d..737952658ef 100644 --- a/tensorflow/python/kernel_tests/signal/dct_ops_test.py +++ b/tensorflow/python/kernel_tests/signal/dct_ops_test.py @@ -190,7 +190,7 @@ class DCTOpsTest(parameterized.TestCase, test.TestCase): # "ortho" normalization is not implemented for type I. if dct_type == 1 and norm == "ortho": return - with self.session(use_gpu=True): + with self.session(): tol = 5e-4 if dtype == np.float32 else 1e-7 signals = np.random.rand(*shape).astype(dtype) n = np.random.randint(1, 2 * signals.shape[-1]) diff --git a/tensorflow/python/kernel_tests/signal/fft_ops_test.py b/tensorflow/python/kernel_tests/signal/fft_ops_test.py index 762bdc57461..7563a408b3a 100644 --- a/tensorflow/python/kernel_tests/signal/fft_ops_test.py +++ b/tensorflow/python/kernel_tests/signal/fft_ops_test.py @@ -87,7 +87,8 @@ class BaseFFTOpsTest(test.TestCase): if test.is_built_with_rocm(): self.skipTest("Complex datatype not yet supported in ROCm.") return - with self.cached_session(use_gpu=True): + with self.cached_session(): + def f(inx, iny): inx.set_shape(x.shape) iny.set_shape(y.shape) @@ -123,12 +124,12 @@ class FFTOpsTest(BaseFFTOpsTest, parameterized.TestCase): def _tf_fft(self, x, rank, fft_length=None, feed_dict=None): # fft_length unused for complex FFTs. - with self.cached_session(use_gpu=True) as sess: + with self.cached_session() as sess: return sess.run(self._tf_fft_for_rank(rank)(x), feed_dict=feed_dict) def _tf_ifft(self, x, rank, fft_length=None, feed_dict=None): # fft_length unused for complex FFTs. - with self.cached_session(use_gpu=True) as sess: + with self.cached_session() as sess: return sess.run(self._tf_ifft_for_rank(rank)(x), feed_dict=feed_dict) def _np_fft(self, x, rank, fft_length=None): @@ -299,12 +300,12 @@ class FFTOpsTest(BaseFFTOpsTest, parameterized.TestCase): class RFFTOpsTest(BaseFFTOpsTest, parameterized.TestCase): def _tf_fft(self, x, rank, fft_length=None, feed_dict=None): - with self.cached_session(use_gpu=True) as sess: + with self.cached_session() as sess: return sess.run( self._tf_fft_for_rank(rank)(x, fft_length), feed_dict=feed_dict) def _tf_ifft(self, x, rank, fft_length=None, feed_dict=None): - with self.cached_session(use_gpu=True) as sess: + with self.cached_session() as sess: return sess.run( self._tf_ifft_for_rank(rank)(x, fft_length), feed_dict=feed_dict) diff --git a/tensorflow/python/kernel_tests/signal/shape_ops_test.py b/tensorflow/python/kernel_tests/signal/shape_ops_test.py index 6d9c77a0136..dc993903065 100644 --- a/tensorflow/python/kernel_tests/signal/shape_ops_test.py +++ b/tensorflow/python/kernel_tests/signal/shape_ops_test.py @@ -327,7 +327,7 @@ class FrameTest(test.TestCase): def test_gradient_numerical(self): if context.executing_eagerly(): return - with self.session(use_gpu=True): + with self.session(): signal_shape = (2, 128) signal = array_ops.ones(signal_shape) frame_length = 33 diff --git a/tensorflow/python/kernel_tests/signal/spectral_ops_test.py b/tensorflow/python/kernel_tests/signal/spectral_ops_test.py index f7844c60746..920d7751481 100644 --- a/tensorflow/python/kernel_tests/signal/spectral_ops_test.py +++ b/tensorflow/python/kernel_tests/signal/spectral_ops_test.py @@ -266,7 +266,7 @@ class SpectralOpsTest(test.TestCase, parameterized.TestCase): # TODO(rjryan): Update gradient tests for Eager. if context.executing_eagerly(): return - with self.session(use_gpu=True) as sess: + with self.session() as sess: signal_length = 512 # An all-zero signal has all zero gradients with respect to the sum of the diff --git a/tensorflow/python/kernel_tests/spacetobatch_op_test.py b/tensorflow/python/kernel_tests/spacetobatch_op_test.py index 0147f2b70f3..97b23b86ae8 100644 --- a/tensorflow/python/kernel_tests/spacetobatch_op_test.py +++ b/tensorflow/python/kernel_tests/spacetobatch_op_test.py @@ -101,7 +101,7 @@ class SpaceToBatchTest(test.TestCase, PythonOpImpl): """ def _testPad(self, inputs, paddings, block_size, outputs): - with self.cached_session(use_gpu=True): + with self.cached_session(): # outputs = space_to_batch(inputs) x_tf = self.space_to_batch( math_ops.cast(inputs, dtypes.float32), @@ -327,7 +327,7 @@ class SpaceToBatchSpaceToDepth(test.TestCase, PythonOpImpl): array_ops.space_to_depth( array_ops.transpose(x, [3, 1, 2, 0]), block_size=block_size), [3, 1, 2, 0]) - with self.session(use_gpu=True): + with self.session(): self.assertAllEqual(y1, y2) @@ -526,7 +526,7 @@ class SpaceToBatchGradientTest(test.TestCase, PythonOpImpl): # Check the gradients. def _checkGrad(self, x, paddings, block_size): assert 4 == x.ndim - with self.cached_session(use_gpu=True): + with self.cached_session(): tf_x = ops.convert_to_tensor(x) tf_y = self.space_to_batch(tf_x, paddings, block_size) epsilon = 1e-5 diff --git a/tensorflow/python/kernel_tests/sparse_tensor_dense_matmul_grad_test.py b/tensorflow/python/kernel_tests/sparse_tensor_dense_matmul_grad_test.py index 8e2115f9bfc..0e000c1b5cc 100644 --- a/tensorflow/python/kernel_tests/sparse_tensor_dense_matmul_grad_test.py +++ b/tensorflow/python/kernel_tests/sparse_tensor_dense_matmul_grad_test.py @@ -73,7 +73,7 @@ class SparseTensorDenseMatMulGradientTest(test.TestCase): matmul = sparse_ops.sparse_tensor_dense_matmul( sp_t, dense_t, adjoint_a=adjoint_a, adjoint_b=adjoint_b, name=name) - with self.cached_session(use_gpu=True): + with self.cached_session(): dense_t_shape = [m, k] if adjoint_b else [k, m] sp_t_val_shape = [nnz] err = gradient_checker.compute_gradient_error( diff --git a/tensorflow/python/kernel_tests/sparse_tensor_dense_matmul_op_test.py b/tensorflow/python/kernel_tests/sparse_tensor_dense_matmul_op_test.py index 8ec1756c154..2abc4e2bd23 100644 --- a/tensorflow/python/kernel_tests/sparse_tensor_dense_matmul_op_test.py +++ b/tensorflow/python/kernel_tests/sparse_tensor_dense_matmul_op_test.py @@ -66,7 +66,7 @@ class SparseTensorDenseMatMulTest(test.TestCase): x_values = x[np.where(x)] x_shape = x.shape - with self.cached_session(use_gpu=True): + with self.cached_session(): sp_x_value = sparse_tensor.SparseTensorValue( indices=x_indices, values=x_values, dense_shape=x_shape) tf_value_ans = sparse_ops.sparse_tensor_dense_matmul( diff --git a/tensorflow/python/kernel_tests/sparse_xent_op_test.py b/tensorflow/python/kernel_tests/sparse_xent_op_test.py index c53f196ecb9..52dea44cb3e 100644 --- a/tensorflow/python/kernel_tests/sparse_xent_op_test.py +++ b/tensorflow/python/kernel_tests/sparse_xent_op_test.py @@ -64,7 +64,7 @@ class SparseXentTest(test.TestCase): def _testXent(self, np_features, np_labels): np_loss, np_backprop = self._npXent(np_features, np_labels) - with self.cached_session(use_gpu=True) as sess: + with self.cached_session() as sess: loss, backprop = gen_nn_ops.sparse_softmax_cross_entropy_with_logits( np_features, np_labels) tf_loss, tf_backprop = self.evaluate([loss, backprop]) @@ -73,7 +73,7 @@ class SparseXentTest(test.TestCase): def testSingleClass(self): for label_dtype in np.int32, np.int64: - with self.cached_session(use_gpu=True) as sess: + with self.cached_session() as sess: loss, backprop = gen_nn_ops.sparse_softmax_cross_entropy_with_logits( np.array([[1.], [-1.], [0.]]).astype(np.float32), np.array([0, 0, 0]).astype(label_dtype)) @@ -145,19 +145,19 @@ class SparseXentTest(test.TestCase): np.array([1.3862, 3.4420]), np_loss, rtol=1.e-3, atol=1.e-3) def testShapeMismatch(self): - with self.session(use_gpu=True): + with self.session(): with self.assertRaisesRegex(ValueError, ".*Rank mismatch:*"): nn_ops.sparse_softmax_cross_entropy_with_logits( labels=[[0, 2]], logits=[[0., 1.], [2., 3.], [2., 3.]]) def testScalar(self): - with self.session(use_gpu=True): + with self.session(): with self.assertRaisesRegex(ValueError, ".*Logits cannot be scalars*"): nn_ops.sparse_softmax_cross_entropy_with_logits( labels=constant_op.constant(0), logits=constant_op.constant(1.0)) def testLabelsPlaceholderScalar(self): - with ops_lib.Graph().as_default(), self.session(use_gpu=True): + with ops_lib.Graph().as_default(), self.session(): labels = array_ops.placeholder(np.int32) y = nn_ops.sparse_softmax_cross_entropy_with_logits( labels=labels, logits=[[7.]]) @@ -165,7 +165,7 @@ class SparseXentTest(test.TestCase): y.eval(feed_dict={labels: 0}) def testVector(self): - with self.session(use_gpu=True): + with self.session(): loss = nn_ops.sparse_softmax_cross_entropy_with_logits( labels=constant_op.constant(0), logits=constant_op.constant([1.0])) self.assertAllClose(0.0, self.evaluate(loss)) @@ -193,7 +193,7 @@ class SparseXentTest(test.TestCase): @test_util.run_in_graph_and_eager_modes(use_gpu=True) def testGradient(self): - with self.session(use_gpu=True) as sess: + with self.session() as sess: l = constant_op.constant([3, 0, 1], name="l") f = constant_op.constant( [0.1, 0.2, 0.3, 0.4, 0.1, 0.4, 0.9, 1.6, 0.1, 0.8, 2.7, 6.4], diff --git a/tensorflow/python/kernel_tests/split_op_test.py b/tensorflow/python/kernel_tests/split_op_test.py index 16f92dbd875..58674abd144 100644 --- a/tensorflow/python/kernel_tests/split_op_test.py +++ b/tensorflow/python/kernel_tests/split_op_test.py @@ -55,13 +55,13 @@ class SplitOpTest(test.TestCase): model_input = array_ops.placeholder(dtypes.float32) inp = np.zeros((1, 10)) # check that we still fail at runtime if the shapes were unknown - with self.cached_session(use_gpu=True) as sess: + with self.cached_session() as sess: with self.assertRaises(errors_impl.InvalidArgumentError): sess.run(array_ops.split(model_input, [4]), {model_input: inp}) # scalar Tensors are not permitted as num_splits for axis in [0, -2]: - with self.cached_session(use_gpu=True) as sess: + with self.cached_session() as sess: with self.assertRaises(ValueError): # pylint: disable=expression-not-assigned sess.run( @@ -83,7 +83,7 @@ class SplitOpTest(test.TestCase): model_input2 = array_ops.placeholder(dtypes.float32, shape=[None, 2]) result = array_ops.split(model_input2, [2, 2], axis=0)[0] - with self.cached_session(use_gpu=True) as sess: + with self.cached_session() as sess: sess.run(result, feed_dict={model_input2: np.ones([4, 2])}) @test_util.run_deprecated_v1 @@ -92,7 +92,7 @@ class SplitOpTest(test.TestCase): value = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] - with self.session(use_gpu=True) as sess: + with self.session() as sess: with self.assertRaises(ValueError) as context: sess.run(array_ops.split(value, size_splits), {size_splits: [2, 2, 6]}) self.assertTrue("Cannot infer num from shape" in str(context.exception)) @@ -214,7 +214,7 @@ class SplitOpTest(test.TestCase): @test_util.run_deprecated_v1 def testOutputShape(self): for axis in [1, -1]: - with self.cached_session(use_gpu=True): + with self.cached_session(): tensor = array_ops.placeholder(dtypes.float32, shape=[None, 12]) size_splits = [3, 7, 2] outputs = array_ops.split(tensor, size_splits, axis) @@ -315,7 +315,7 @@ class SplitOpTest(test.TestCase): def _testGradientsSimple(self, dtype): inp = self._makeData((4, 4), dtype) - with self.cached_session(use_gpu=True): + with self.cached_session(): inp_tensor = ops.convert_to_tensor(inp) s = array_ops.split(value=inp_tensor, num_or_size_splits=4, axis=1) inp_grads = [self._makeData((4, 1), dtype)for _ in range(4)] @@ -382,7 +382,7 @@ class SplitOpTest(test.TestCase): splits = array_ops.placeholder(dtypes.int32, [3]) y = array_ops.split(values, splits, axis=x) - with self.session(use_gpu=True) as sess: + with self.session() as sess: with self.assertRaisesRegex(errors_impl.InvalidArgumentError, "must have exactly one element"): sess.run(y, {x: np.array([], dtype=np.int32), splits: [4, 11, 15]}) diff --git a/tensorflow/python/kernel_tests/stage_op_test.py b/tensorflow/python/kernel_tests/stage_op_test.py index 29cd00b7892..8ea4c5daa2e 100644 --- a/tensorflow/python/kernel_tests/stage_op_test.py +++ b/tensorflow/python/kernel_tests/stage_op_test.py @@ -43,7 +43,7 @@ class StageTest(test.TestCase): G.finalize() - with self.session(use_gpu=True, graph=G) as sess: + with self.session(graph=G) as sess: sess.run(stage, feed_dict={x: -1}) for i in range(10): _, yval = sess.run([stage, y], feed_dict={x: i}) @@ -63,7 +63,7 @@ class StageTest(test.TestCase): G.finalize() - with self.session(use_gpu=True, graph=G) as sess: + with self.session(graph=G) as sess: sess.run(stage, feed_dict={x: -1}) for i in range(10): _, yval = sess.run([stage, y], feed_dict={x: i}) @@ -89,7 +89,7 @@ class StageTest(test.TestCase): G.finalize() - with self.session(use_gpu=True, graph=G) as sess: + with self.session(graph=G) as sess: sess.run(stage, feed_dict={x: -1}) for i in range(10): _, yval = sess.run([stage, y], feed_dict={x: i}) @@ -131,7 +131,7 @@ class StageTest(test.TestCase): G.finalize() - with self.session(use_gpu=True, graph=G) as sess: + with self.session(graph=G) as sess: for i in range(10): sess.run(stage, feed_dict={x: i}) @@ -156,7 +156,7 @@ class StageTest(test.TestCase): G.finalize() - with self.session(use_gpu=True, graph=G) as sess: + with self.session(graph=G) as sess: sess.run(stage, feed_dict={x: -1}) self.assertEqual(sess.run(size), 1) sess.run(stage, feed_dict={x: -1}) @@ -189,7 +189,7 @@ class StageTest(test.TestCase): queue = Queue.Queue() n = 8 - with self.session(use_gpu=True, graph=G) as sess: + with self.session(graph=G) as sess: # Stage data in a separate thread which will block # when it hits the staging area's capacity and thus # not fill the queue with n tokens @@ -254,7 +254,7 @@ class StageTest(test.TestCase): queue = Queue.Queue() n = 8 - with self.session(use_gpu=True, graph=G) as sess: + with self.session(graph=G) as sess: # Stage data in a separate thread which will block # when it hits the staging area's capacity and thus # not fill the queue with n tokens diff --git a/tensorflow/python/kernel_tests/svd_op_test.py b/tensorflow/python/kernel_tests/svd_op_test.py index 8bbfc517857..d64697b41bb 100644 --- a/tensorflow/python/kernel_tests/svd_op_test.py +++ b/tensorflow/python/kernel_tests/svd_op_test.py @@ -163,7 +163,7 @@ def _GetSvdOpTest(dtype_, shape_, use_static_shape_, compute_uv_, if use_static_shape_: s_tf_val, u_tf_val, v_tf_val = self.evaluate([s_tf, u_tf, v_tf]) else: - with self.session(use_gpu=True) as sess: + with self.session() as sess: s_tf_val, u_tf_val, v_tf_val = sess.run( [s_tf, u_tf, v_tf], feed_dict={x_tf: x_np}) else: @@ -172,7 +172,7 @@ def _GetSvdOpTest(dtype_, shape_, use_static_shape_, compute_uv_, if use_static_shape_: s_tf_val = self.evaluate(s_tf) else: - with self.session(use_gpu=True) as sess: + with self.session() as sess: s_tf_val = sess.run(s_tf, feed_dict={x_tf: x_np}) if compute_uv_: @@ -284,7 +284,7 @@ def _GetSvdGradGradOpTest(dtype_, shape_, compute_uv_, full_matrices_): epsilon = np.finfo(dtype_).eps delta = 0.1 * epsilon**(1.0 / 3.0) tol = 1e-5 - with self.session(use_gpu=True): + with self.session(): tf_a = constant_op.constant(a) if compute_uv_: tf_s, tf_u, tf_v = _NormalizingSvd(tf_a, full_matrices_) diff --git a/tensorflow/python/kernel_tests/tensor_array_ops_test.py b/tensorflow/python/kernel_tests/tensor_array_ops_test.py index 892c5855e58..995f4c787f9 100644 --- a/tensorflow/python/kernel_tests/tensor_array_ops_test.py +++ b/tensorflow/python/kernel_tests/tensor_array_ops_test.py @@ -83,7 +83,7 @@ class TensorArrayTest(test.TestCase): @test_util.run_in_graph_and_eager_modes def testTensorArrayWriteRead(self): - with self.session(use_gpu=True): + with self.session(): ta = tensor_array_ops.TensorArray( dtype=dtypes.float32, tensor_array_name="foo", @@ -104,7 +104,7 @@ class TensorArrayTest(test.TestCase): self.assertAllEqual(-3.0, d2) def _testTensorArrayWritePack(self, tf_dtype): - with self.cached_session(use_gpu=True): + with self.cached_session(): ta = tensor_array_ops.TensorArray( dtype=tf_dtype, tensor_array_name="foo", size=3) @@ -133,7 +133,7 @@ class TensorArrayTest(test.TestCase): self._testTensorArrayWritePackMaybeLegacy() def testEmptyTensorArrayPack(self): - with self.session(use_gpu=True): + with self.session(): ta = tensor_array_ops.TensorArray( dtype=dtypes.float32, tensor_array_name="foo", size=3) @@ -148,7 +148,7 @@ class TensorArrayTest(test.TestCase): self.assertAllEqual([3, 0, 1], c0.shape) def testTensorArrayWriteConcatInParallel(self): - with self.session(use_gpu=True): + with self.session(): def _concat_1(): ta = tensor_array_ops.TensorArray( @@ -189,7 +189,7 @@ class TensorArrayTest(test.TestCase): self.assertAllEqual([1, 1, 1, 8, 9, 8, 9, 8, 9], c0) def _testTensorArrayWriteConcat(self, tf_dtype): - with self.cached_session(use_gpu=True): + with self.cached_session(): ta = tensor_array_ops.TensorArray( dtype=tf_dtype, tensor_array_name="foo", size=3, infer_shape=False) @@ -217,7 +217,7 @@ class TensorArrayTest(test.TestCase): self._testTensorArrayWriteConcat(dtypes.string) def _testTensorArrayReadOrPackNotAllValuesAvailableFillsZeros(self): - with self.cached_session(use_gpu=True): + with self.cached_session(): ta = tensor_array_ops.TensorArray( dtype=dtypes.float32, tensor_array_name="foo", @@ -251,7 +251,7 @@ class TensorArrayTest(test.TestCase): @test_util.run_v1_only("Uses placeholders") def testSkipEagerTensorArrayReadUninitializedInferShapeFillsZeros(self): - with self.cached_session(use_gpu=True) as sess: + with self.cached_session() as sess: ta = tensor_array_ops.TensorArray( dtype=dtypes.float32, tensor_array_name="foo", @@ -261,7 +261,7 @@ class TensorArrayTest(test.TestCase): [[0.0, 0.0]], sess.run(ta.write(1, val).read(0), {val: [[4.0, 5.0]]})) def _testTensorArrayUnpackRead(self, tf_dtype): - with self.cached_session(use_gpu=True): + with self.cached_session(): convert = _make_converter(tf_dtype) ta = _make_ta(3, "foo", dtype=tf_dtype) @@ -311,7 +311,7 @@ class TensorArrayTest(test.TestCase): self._testTensorArrayUnpackReadMaybeLegacy() def _testTensorArraySplitRead(self, tf_dtype): - with self.cached_session(use_gpu=True): + with self.cached_session(): convert = _make_converter(tf_dtype) # Split an empty vector @@ -365,7 +365,7 @@ class TensorArrayTest(test.TestCase): @test_util.disable_control_flow_v2("v2 does not support TensorArray.grad.") @test_util.run_v1_only("v2 does not support TensorArray.grad.") def testSkipEagerTensorGradArrayWriteRead(self): - with self.session(use_gpu=True) as session: + with self.session() as session: ta = tensor_array_ops.TensorArray( dtype=dtypes.float32, tensor_array_name="foo", @@ -401,7 +401,7 @@ class TensorArrayTest(test.TestCase): def testSkipEagerTensorArrayGradGrad(self): if not control_flow_util.ENABLE_CONTROL_FLOW_V2: self.skipTest("Legacy TensorArray does not support double derivatives.") - with self.test_session(use_gpu=True) as session: + with self.test_session() as session: x = constant_op.constant(4.0) ta = tensor_array_ops.TensorArray( @@ -420,7 +420,7 @@ class TensorArrayTest(test.TestCase): @test_util.disable_control_flow_v2("v2 does not support TensorArray.grad.") @test_util.run_v1_only("v2 does not support TensorArray.grad.") def testSkipEagerTensorGradArrayDynamicWriteRead(self): - with self.session(use_gpu=True) as session: + with self.session() as session: ta = tensor_array_ops.TensorArray( dtype=dtypes.float32, tensor_array_name="foo", @@ -463,7 +463,7 @@ class TensorArrayTest(test.TestCase): @test_util.disable_control_flow_v2("v2 does not support TensorArray.grad.") @test_util.run_v1_only("v2 does not support TensorArray.grad.") def testSkipEagerTensorGradAccessTwiceReceiveSameObject(self): - with self.session(use_gpu=True) as session: + with self.session() as session: ta = tensor_array_ops.TensorArray( dtype=dtypes.float32, tensor_array_name="foo", size=3) g_ta_0 = ta.grad("grad") @@ -479,7 +479,7 @@ class TensorArrayTest(test.TestCase): self.assertAllEqual([[4.0, 5.0]], d_r1_0) def testTensorArrayWriteWrongIndexOrDataTypeFails(self): - with self.session(use_gpu=True): + with self.session(): ta = _make_ta(3, "foo", dtype=dtypes.float32) # TODO(b/129870929): Remove the last 2 checks (runtime checks) after # back back from preferred_dtype= to dtype= in convert_to_tensor. Also @@ -518,7 +518,7 @@ class TensorArrayTest(test.TestCase): self.evaluate(ta.write(3, 3.0).flow) def testTensorArrayReadWrongIndexOrDataTypeFails(self): - with self.session(use_gpu=True): + with self.session(): ta = _make_ta(3, "foo", dtype=dtypes.float32) w0 = ta.write(0, [[4.0, 5.0]]) @@ -553,7 +553,7 @@ class TensorArrayTest(test.TestCase): @test_util.disable_control_flow_v2("v2 allows multiple writes.") @test_util.run_v1_only("v2 allows multiple writes.") def testSkipEagerTensorArrayWriteMultipleFails(self): - with self.session(use_gpu=True): + with self.session(): ta = tensor_array_ops.TensorArray( dtype=dtypes.float32, tensor_array_name="foo", size=3) @@ -563,7 +563,7 @@ class TensorArrayTest(test.TestCase): self.evaluate(ta.write(2, 3.0).write(2, 3.0).flow) def testTensorArrayConcatIncompatibleShapesFails(self): - with self.session(use_gpu=True): + with self.session(): ta = tensor_array_ops.TensorArray( dtype=dtypes.float32, tensor_array_name="foo", @@ -597,7 +597,7 @@ class TensorArrayTest(test.TestCase): self.evaluate(w3.concat()) def testTensorArraySplitIncompatibleShapesFails(self): - with self.session(use_gpu=True): + with self.session(): in_eager_mode = context.executing_eagerly() ta = _make_ta(3, "foo") with self.assertRaisesOpError( @@ -636,7 +636,7 @@ class TensorArrayTest(test.TestCase): self.evaluate(ta.split([1.0], [1]).flow) def _testTensorArrayWriteGradientAddMultipleAdds(self, dtype): - with self.cached_session(use_gpu=True): + with self.cached_session(): ta = tensor_array_ops.TensorArray( dtype=dtype, tensor_array_name="foo", size=3, infer_shape=False) ta_grad = ta.grad("grad") @@ -679,7 +679,7 @@ class TensorArrayTest(test.TestCase): @test_util.disable_control_flow_v2("Low level legacy TA op test.") @test_util.run_v1_only("Low level legacy TA op test.") def testSkipEagerTensorArrayGradWithShapeKnownElementShape(self): - with self.session(use_gpu=True) as sess: + with self.session() as sess: ta = tensor_array_ops.TensorArray( size=3, dtype=dtypes.float32, @@ -710,7 +710,7 @@ class TensorArrayTest(test.TestCase): @test_util.disable_control_flow_v2("Low level legacy TA op test.") @test_util.run_v1_only("Low level legacy TA op test.") def testSkipEagerTensorArrayGradWithShapeUnknownElementShape(self): - with self.session(use_gpu=True) as sess: + with self.session() as sess: ta = tensor_array_ops.TensorArray( size=3, dtype=dtypes.float32, element_shape=None) # Note that element_shape is unknown @@ -733,7 +733,7 @@ class TensorArrayTest(test.TestCase): sess.run(read_value, feed_dict={value: fed_value})) def testMultiTensorArray(self): - with self.session(use_gpu=True): + with self.session(): h1 = tensor_array_ops.TensorArray( size=1, dtype=dtypes.float32, tensor_array_name="foo") w1 = h1.write(0, 4.0) @@ -749,7 +749,7 @@ class TensorArrayTest(test.TestCase): self.assertAllClose(9.0, val) def _testTensorArrayGradientWriteReadType(self, dtype): - with self.cached_session(use_gpu=True) as session: + with self.cached_session() as session: ta = tensor_array_ops.TensorArray( dtype=dtypes.as_dtype(dtype), tensor_array_name="foo", @@ -801,7 +801,7 @@ class TensorArrayTest(test.TestCase): self._testTensorArrayGradientWriteReadType(dtype) def _testTensorArrayGradientWritePackConcatAndRead(self): - with self.cached_session(use_gpu=True) as sess: + with self.cached_session() as sess: ta = tensor_array_ops.TensorArray( dtype=dtypes.float32, tensor_array_name="foo", @@ -839,7 +839,7 @@ class TensorArrayTest(test.TestCase): @test_util.disable_control_flow_v2("v2 does not support clear_after_read.") @test_util.run_v1_only("v2 does not support clear_after_read.") def testTensorArrayReadTwice(self): - with self.session(use_gpu=True): + with self.session(): value = constant_op.constant([[1.0, -1.0], [10.0, -10.0]]) ta_readonce = tensor_array_ops.TensorArray( @@ -867,7 +867,7 @@ class TensorArrayTest(test.TestCase): self.assertAllEqual([1.0, -1.0], self.evaluate(r1_readtwice)) def _testTensorArrayGradientUnpackRead(self): - with self.cached_session(use_gpu=True) as session: + with self.cached_session() as session: ta = tensor_array_ops.TensorArray( dtype=dtypes.float32, tensor_array_name="foo", @@ -897,7 +897,7 @@ class TensorArrayTest(test.TestCase): @test_util.deprecated_graph_mode_only def testSkipEagerTensorArrayGradientSplitConcat(self): - with self.session(use_gpu=True) as session: + with self.session() as session: ta = tensor_array_ops.TensorArray( dtype=dtypes.float32, tensor_array_name="foo", size=2, infer_shape=False) @@ -920,7 +920,7 @@ class TensorArrayTest(test.TestCase): grad_vals[0]) def _testTensorArrayGradientDynamicUnpackRead(self): - with self.cached_session(use_gpu=True) as session: + with self.cached_session() as session: ta = tensor_array_ops.TensorArray( dtype=dtypes.float32, tensor_array_name="foo", @@ -946,20 +946,20 @@ class TensorArrayTest(test.TestCase): self._testTensorArrayGradientDynamicUnpackRead() def testCloseTensorArray(self): - with self.session(use_gpu=True): + with self.session(): ta = tensor_array_ops.TensorArray( dtype=dtypes.float32, tensor_array_name="foo", size=3) self.evaluate(ta.close()) def testSizeTensorArray(self): - with self.session(use_gpu=True): + with self.session(): ta = tensor_array_ops.TensorArray( dtype=dtypes.float32, tensor_array_name="foo", size=3) s = ta.size() self.assertAllEqual(3, self.evaluate(s)) def testWriteCloseTensorArray(self): - with self.session(use_gpu=True): + with self.session(): ta = tensor_array_ops.TensorArray( dtype=dtypes.float32, tensor_array_name="foo", @@ -971,7 +971,8 @@ class TensorArrayTest(test.TestCase): def _testWhileLoopWritePackGradients(self, dynamic_size, dtype): np_dtype = dtype.as_numpy_dtype - with self.cached_session(use_gpu=True): + with self.cached_session(): + def func(v0, state0, var): ta = tensor_array_ops.TensorArray( dtype=dtype, @@ -1068,7 +1069,8 @@ class TensorArrayTest(test.TestCase): dynamic_size=True, dtype=dtypes.float32) def testGradSerialTwoLoops(self): - with self.session(use_gpu=True): + with self.session(): + def loop(x): num_steps = 100 acc = tensor_array_ops.TensorArray( @@ -1117,7 +1119,7 @@ class TensorArrayTest(test.TestCase): @test_util.deprecated_graph_mode_only def testSkipEagerSumOfTwoReadVariablesWithoutRepeatGrad(self): - with self.session(use_gpu=True) as session: + with self.session() as session: a = array_ops.identity( np.arange( 3 * 5, dtype=np.float32).reshape(3, 5) + 1) @@ -1195,7 +1197,7 @@ class TensorArrayTest(test.TestCase): @test_util.deprecated_graph_mode_only def testSkipEagerWriteShape(self): - with self.session(use_gpu=True): + with self.session(): ta = tensor_array_ops.TensorArray( dtype=dtypes.float32, tensor_array_name="foo", size=3) c0 = constant_op.constant([4.0, 5.0]) @@ -1220,7 +1222,7 @@ class TensorArrayTest(test.TestCase): @test_util.deprecated_graph_mode_only def testSkipEagerPartlyUnknownShape(self): - with self.session(use_gpu=True): + with self.session(): ta = tensor_array_ops.TensorArray( dtype=dtypes.float32, tensor_array_name="foo", size=6) @@ -1260,7 +1262,7 @@ class TensorArrayTest(test.TestCase): self.assertAllEqual([5, 4, 2, 3], r5.get_shape().as_list()) def _testUnpackShape(self): - with self.cached_session(use_gpu=True): + with self.cached_session(): ta = tensor_array_ops.TensorArray( dtype=dtypes.float32, tensor_array_name="foo", @@ -1297,7 +1299,7 @@ class TensorArrayTest(test.TestCase): @test_util.deprecated_graph_mode_only def testSplitShape(self): - with self.session(use_gpu=True): + with self.session(): ta = tensor_array_ops.TensorArray( dtype=dtypes.float32, tensor_array_name="foo", @@ -1329,7 +1331,7 @@ class TensorArrayTest(test.TestCase): @test_util.deprecated_graph_mode_only def testSkipEagerWriteUnknownShape(self): - with self.session(use_gpu=True): + with self.session(): ta = tensor_array_ops.TensorArray( dtype=dtypes.float32, tensor_array_name="foo", @@ -1341,7 +1343,7 @@ class TensorArrayTest(test.TestCase): self.assertAllEqual(r0.get_shape(), tensor_shape.unknown_shape()) def _testGradientWhenNotAllComponentsRead(self): - with self.cached_session(use_gpu=True) as session: + with self.cached_session() as session: ta = tensor_array_ops.TensorArray(dtype=dtypes.float32, size=2) x = constant_op.constant([2.0, 3.0]) w = ta.unstack(x) @@ -1357,7 +1359,7 @@ class TensorArrayTest(test.TestCase): @test_util.deprecated_graph_mode_only def testSkipEagerWriteButNotAllComponentsReadGrad(self): - with self.cached_session(use_gpu=True) as session: + with self.cached_session() as session: x0 = constant_op.constant(5.0) x1 = constant_op.constant(10.0) ta = tensor_array_ops.TensorArray( @@ -1369,7 +1371,7 @@ class TensorArrayTest(test.TestCase): self.assertAllEqual(grad_r0_x1_vals, [1.0, 0.0]) def _testTensorArrayUnpackDynamic(self): - with self.cached_session(use_gpu=True) as sess: + with self.cached_session() as sess: ta = tensor_array_ops.TensorArray( dtype=dtypes.float32, size=3, dynamic_size=True) x = constant_op.constant([1.0, 2.0, 3.0]) @@ -1386,7 +1388,7 @@ class TensorArrayTest(test.TestCase): @test_util.run_deprecated_v1 def testSkipEagerTensorArraySplitDynamic(self): - with self.session(use_gpu=True) as sess: + with self.session() as sess: ta = tensor_array_ops.TensorArray( dtype=dtypes.float32, size=3, dynamic_size=True) x = constant_op.constant([1.0, 2.0, 3.0]) @@ -1449,7 +1451,7 @@ class TensorArrayTest(test.TestCase): ta_gather_with_unknown_indices_shape([0]) def _testTensorArrayEvalEmpty(self): - with self.cached_session(use_gpu=True): + with self.cached_session(): ta = tensor_array_ops.TensorArray( dtype=dtypes.float32, size=0, dynamic_size=False, infer_shape=False) v2_msg = ("Tried to stack elements of an empty list with " @@ -1469,7 +1471,7 @@ class TensorArrayTest(test.TestCase): # this test is ill-defined for Eager mode --- unpacking an empty tensor # gives an empty list / there is not equivalent of "mark_used" in Eager def _testTensorArrayEvalEmptyWithDefault(self): - with self.cached_session(use_gpu=True): + with self.cached_session(): ta = tensor_array_ops.TensorArray( dtype=dtypes.float32, size=0, dynamic_size=False, infer_shape=True) self.assertEqual(0, ta.size().eval()) @@ -1491,7 +1493,7 @@ class TensorArrayTest(test.TestCase): @test_util.run_deprecated_v1 def testSkipEagerTensorArrayScatterReadAndGradients(self): - with self.session(use_gpu=True) as session: + with self.session() as session: ta = tensor_array_ops.TensorArray( dtype=dtypes.float32, tensor_array_name="foo", @@ -1518,7 +1520,7 @@ class TensorArrayTest(test.TestCase): @test_util.run_deprecated_v1 def testSkipEagerTensorArrayScatterPartialReadAndGradients(self): - with self.session(use_gpu=True) as session: + with self.session() as session: ta = tensor_array_ops.TensorArray( dtype=dtypes.float32, tensor_array_name="foo", @@ -1554,7 +1556,7 @@ class TensorArrayTest(test.TestCase): @test_util.run_v1_only("b/118890905") def testTensorArrayWriteGatherAndGradients(self): - with self.session(use_gpu=True) as session: + with self.session() as session: ta = tensor_array_ops.TensorArray( dtype=dtypes.float32, tensor_array_name="foo", @@ -1703,7 +1705,7 @@ class TensorArrayTest(test.TestCase): [s for s in dev_stats[d] if "TensorArray" == s.node_name]) def testTensorArrayIdentity(self): - with self.session(use_gpu=True): + with self.session(): ta0 = tensor_array_ops.TensorArray(dtype=dtypes.float32, size=2, infer_shape=False) ta1 = tensor_array_ops.TensorArray(dtype=dtypes.int32, size=4, @@ -1769,7 +1771,7 @@ class TensorArrayTest(test.TestCase): # dy is outside of the gradients name scope; tf.gradients must # wrap it in the correct name scope. dx, = gradients_impl.gradients(ys=[y], xs=[x], grad_ys=[dy]) - with self.cached_session(use_gpu=True) as sess: + with self.cached_session() as sess: vdx, vdy = self.evaluate([dx, dy]) self.assertAllClose(vdx, vdy) @@ -1777,7 +1779,7 @@ class TensorArrayTest(test.TestCase): def testSkipEagerTensorArrayInt64GPU(self): if not test.is_gpu_available(): return - with self.session(use_gpu=True, force_gpu=True) as sess: + with self.session(force_gpu=True) as sess: value = array_ops.placeholder(dtypes.int64) ta = tensor_array_ops.TensorArray(dtype=dtypes.int64, size=2) ta = ta.scatter([0, 1], value) diff --git a/tensorflow/python/kernel_tests/tensordot_op_test.py b/tensorflow/python/kernel_tests/tensordot_op_test.py index 268f6891d4e..845b6347401 100644 --- a/tensorflow/python/kernel_tests/tensordot_op_test.py +++ b/tensorflow/python/kernel_tests/tensordot_op_test.py @@ -179,7 +179,7 @@ def _get_tensordot_tests(dtype_, rank_a_, rank_b_, num_dims_, dynamic_shape_): for _ in range(num_trials): a_np, b_np, a_dims_np, b_dims_np = _generate_random_tensors_and_dims() np_ans = np.tensordot(a_np, b_np, axes=(a_dims_np, b_dims_np)) - with self.cached_session(use_gpu=True) as sess: + with self.cached_session() as sess: if dynamic_shape_: a = array_ops.placeholder(dtype_) b = array_ops.placeholder(dtype_) @@ -219,7 +219,7 @@ def _get_tensordot_tests(dtype_, rank_a_, rank_b_, num_dims_, dynamic_shape_): all_axes.append(a_np.ndim - 1) for axes in all_axes: np_ans = np.tensordot(a_np, b_np, axes=axes) - with self.cached_session(use_gpu=True) as sess: + with self.cached_session() as sess: if dynamic_shape_: a = array_ops.placeholder(dtype_) b = array_ops.placeholder(dtype_) diff --git a/tensorflow/python/kernel_tests/topk_op_test.py b/tensorflow/python/kernel_tests/topk_op_test.py index b17a8f02594..106f2064224 100644 --- a/tensorflow/python/kernel_tests/topk_op_test.py +++ b/tensorflow/python/kernel_tests/topk_op_test.py @@ -47,7 +47,7 @@ class TopKTest(test.TestCase): sorted=True): # pylint: disable=redefined-builtin np_expected_values = np.array(expected_values) np_expected_indices = np.array(expected_indices) - with self.cached_session(use_gpu=True) as sess: + with self.cached_session() as sess: values_op, indices_op = nn_ops.top_k(inputs, k, sorted=sorted) values, indices = self.evaluate([values_op, indices_op]) @@ -196,7 +196,7 @@ class TopKTest(test.TestCase): @test_util.run_deprecated_v1 def testKNegative(self): inputs = [[0.1, 0.2], [0.3, 0.4]] - with self.session(use_gpu=True): + with self.session(): k = array_ops.placeholder(dtypes.int32) values, _ = nn_ops.top_k(inputs, k) with self.assertRaisesOpError("Need k >= 0, got -7"): @@ -211,7 +211,7 @@ class TopKTest(test.TestCase): @test_util.run_deprecated_v1 def testTopKGradients(self): - with self.session(use_gpu=True) as sess: + with self.session() as sess: inputs = array_ops.placeholder(dtypes.float32, shape=[2, 5]) values, _ = nn_ops.top_k(inputs, 3) grad = sess.run( diff --git a/tensorflow/python/kernel_tests/trace_op_test.py b/tensorflow/python/kernel_tests/trace_op_test.py index 52640c02c22..681203096f9 100644 --- a/tensorflow/python/kernel_tests/trace_op_test.py +++ b/tensorflow/python/kernel_tests/trace_op_test.py @@ -31,7 +31,7 @@ class TraceTest(test.TestCase): def compare(self, x): np_ans = np.trace(x, axis1=-2, axis2=-1) - with self.cached_session(use_gpu=True): + with self.cached_session(): tf_ans = math_ops.trace(x).eval() self.assertAllClose(tf_ans, np_ans) diff --git a/tensorflow/python/kernel_tests/transpose_op_test.py b/tensorflow/python/kernel_tests/transpose_op_test.py index 87096211a01..2c6f5ea0b79 100644 --- a/tensorflow/python/kernel_tests/transpose_op_test.py +++ b/tensorflow/python/kernel_tests/transpose_op_test.py @@ -79,7 +79,7 @@ class TransposeTest(test.TestCase): np_ans = self._np_transpose(x, perm) if conjugate: np_ans = np.conj(np_ans) - with self.cached_session(use_gpu=True): + with self.cached_session(): inx = ops.convert_to_tensor(x) y = array_ops.transpose(inx, p, conjugate=conjugate) tf_ans = self.evaluate(y) @@ -170,7 +170,7 @@ class TransposeTest(test.TestCase): inp = np.arange( 1, total_size + 1, dtype=datatype).reshape(input_shape) np_ans = self._np_transpose(inp, perm) - with self.cached_session(use_gpu=True): + with self.cached_session(): inx = ops.convert_to_tensor(inp) y = array_ops.transpose(inx, perm) tf_ans = self.evaluate(y) @@ -193,7 +193,7 @@ class TransposeTest(test.TestCase): inp = np.arange( 1, total_size + 1, dtype=np.float32).reshape(input_shape) np_ans = self._np_transpose(inp, perm) - with self.cached_session(use_gpu=True): + with self.cached_session(): inx = ops.convert_to_tensor(inp) y = array_ops.transpose(inx, perm) tf_ans = self.evaluate(y) @@ -230,7 +230,7 @@ class TransposeTest(test.TestCase): inp = np.arange( 1, total_size + 1, dtype=np.float32).reshape(input_shape) np_ans = self._np_transpose(inp, perm) - with self.cached_session(use_gpu=True): + with self.cached_session(): inx = ops.convert_to_tensor(inp) y = array_ops.transpose(inx, perm) tf_ans = self.evaluate(y) @@ -255,7 +255,7 @@ class TransposeTest(test.TestCase): inp = np.arange( 1, total_size + 1, dtype=datatype).reshape(input_shape) np_ans = self._np_transpose(inp, perm) - with self.cached_session(use_gpu=True): + with self.cached_session(): inx = ops.convert_to_tensor(inp) y = array_ops.transpose(inx, perm) tf_ans = self.evaluate(y) @@ -278,7 +278,7 @@ class TransposeTest(test.TestCase): inp = np.arange( 1, total_size + 1, dtype=np.float32).reshape(input_shape) np_ans = self._np_transpose(inp, perm) - with self.cached_session(use_gpu=True): + with self.cached_session(): inx = ops.convert_to_tensor(inp) y = array_ops.transpose(inx, perm) tf_ans = self.evaluate(y) @@ -331,7 +331,7 @@ class TransposeTest(test.TestCase): with self.subTest(input_shape=input_shape, perm=perm): inp = np.random.randint(10, size=input_shape) np_ans = self._np_transpose(inp, perm) - with self.cached_session(use_gpu=True): + with self.cached_session(): inx = ops.convert_to_tensor(inp) y = array_ops.transpose(inx, perm) tf_ans = self.evaluate(y) @@ -355,7 +355,7 @@ class TransposeTest(test.TestCase): x = np.arange(0, 8).reshape([2, 4]).astype(np.float32) p = np.array([1, 0]).astype(perm_dtype) np_ans = np.copy(x).transpose(p) - with self.cached_session(use_gpu=True): + with self.cached_session(): inx = ops.convert_to_tensor(x) inp = constant_op.constant(p) y = array_ops.transpose(inx, inp) diff --git a/tensorflow/python/kernel_tests/tridiagonal_matmul_op_test.py b/tensorflow/python/kernel_tests/tridiagonal_matmul_op_test.py index 456f13e86a7..38544000902 100644 --- a/tensorflow/python/kernel_tests/tridiagonal_matmul_op_test.py +++ b/tensorflow/python/kernel_tests/tridiagonal_matmul_op_test.py @@ -80,7 +80,7 @@ class TridiagonalMulOpTest(test.TestCase): diags_matrix_batch, rhs_batch, diagonals_format='matrix') ] - with self.cached_session(use_gpu=True): + with self.cached_session(): results = self.evaluate(results) results_batch = self.evaluate(results_batch) @@ -114,7 +114,7 @@ class TridiagonalMulOpTest(test.TestCase): diags = constant_op.constant(diags, dtype=dtype) rhs = constant_op.constant(rhs, dtype=dtype) - with self.cached_session(use_gpu=True): + with self.cached_session(): grad_reference, _ = gradient_checker_v2.compute_gradient( reference_matmul, [diags, rhs]) grad_theoretical, grad_numerical = gradient_checker_v2.compute_gradient( @@ -155,7 +155,7 @@ class TridiagonalMulOpTest(test.TestCase): constant_op.constant(rhs, dtype=dtypes.complex128), diagonals_format='matrix') - with self.cached_session(use_gpu=True): + with self.cached_session(): result = self.evaluate(result) self.assertAllClose(result, expected_result) diff --git a/tensorflow/python/kernel_tests/tridiagonal_solve_op_test.py b/tensorflow/python/kernel_tests/tridiagonal_solve_op_test.py index 3045461ab4d..c278fedce1b 100644 --- a/tensorflow/python/kernel_tests/tridiagonal_solve_op_test.py +++ b/tensorflow/python/kernel_tests/tridiagonal_solve_op_test.py @@ -77,7 +77,7 @@ class TridiagonalSolveOpTest(test.TestCase): diags_format="compact", transpose_rhs=False, conjugate_rhs=False): - with self.cached_session(use_gpu=True): + with self.cached_session(): pivoting = True if hasattr(self, "pivoting"): pivoting = self.pivoting @@ -412,7 +412,7 @@ class TridiagonalSolveOpTest(test.TestCase): transpose_rhs=transpose_rhs, conjugate_rhs=conjugate_rhs) res = math_ops.reduce_sum(x * y) - with self.cached_session(use_gpu=True) as sess: + with self.cached_session() as sess: actual_grad_diags = sess.run( tape_diags.gradient(res, diags), feed_dict=feed_dict) actual_rhs_diags = sess.run( @@ -563,7 +563,7 @@ class TridiagonalSolveOpTest(test.TestCase): return x = linalg_impl.tridiagonal_solve( diags, rhs, diags_format, partial_pivoting=self.pivoting) - with self.cached_session(use_gpu=True) as sess: + with self.cached_session() as sess: result = sess.run(x, feed_dict={diags: diags_feed, rhs: rhs_feed}) self.assertAllClose(result, expected) @@ -648,7 +648,7 @@ class TridiagonalSolveOpTest(test.TestCase): rhs, diagonals_format="sequence", partial_pivoting=self.pivoting) - with self.cached_session(use_gpu=True) as sess: + with self.cached_session() as sess: result = sess.run( x, feed_dict={ diff --git a/tensorflow/python/kernel_tests/variables_test.py b/tensorflow/python/kernel_tests/variables_test.py index e04bf9798eb..63966955227 100644 --- a/tensorflow/python/kernel_tests/variables_test.py +++ b/tensorflow/python/kernel_tests/variables_test.py @@ -150,7 +150,7 @@ class VariablesTestCase(test.TestCase, parameterized.TestCase): @test_util.run_deprecated_v1 def testResourceAssignments(self): - with self.session(use_gpu=True): + with self.session(): var = resource_variable_ops.ResourceVariable(0.0) plus_one = var.assign_add(1.0) minus_one = var.assign_sub(2.0) diff --git a/tensorflow/python/kernel_tests/where_op_test.py b/tensorflow/python/kernel_tests/where_op_test.py index c16d016f5e3..b54cb7e3b01 100644 --- a/tensorflow/python/kernel_tests/where_op_test.py +++ b/tensorflow/python/kernel_tests/where_op_test.py @@ -38,7 +38,7 @@ from tensorflow.python.platform import test class WhereOpTest(test.TestCase): def _testWhere(self, x, truth, expected_err_re=None, fn=array_ops.where): - with self.cached_session(use_gpu=True): + with self.cached_session(): ans = fn(x) self.assertTrue(ans.get_shape().is_compatible_with([None, x.ndim])) if expected_err_re is None: @@ -49,7 +49,7 @@ class WhereOpTest(test.TestCase): self.evaluate(ans) def _testWrongNumbers(self, fn=array_ops.where): - with self.session(use_gpu=True): + with self.session(): with self.assertRaises(ValueError): fn([False, True], [1, 2], None) with self.assertRaises(ValueError): @@ -103,7 +103,7 @@ class WhereOpTest(test.TestCase): def _testThreeArgument(self, fn=array_ops.where): x = np.array([[-2, 3, -1], [1, -3, -3]]) np_val = np.where(x > 0, x * x, -x) - with self.test_session(use_gpu=True): + with self.test_session(): tf_val = self.evaluate(fn(constant_op.constant(x) > 0, x * x, -x)) self.assertAllEqual(tf_val, np_val) @@ -223,7 +223,7 @@ class WhereOpTest(test.TestCase): x = np.zeros((7, 11)) y = np.ones((7, 11)) np_val = np.where(f < 0, x, y) - with self.test_session(use_gpu=True): + with self.test_session(): tf_val = self.evaluate( array_ops.where_v2(constant_op.constant(f) < 0, x, y)) self.assertAllEqual(tf_val, np_val) @@ -232,7 +232,7 @@ class WhereOpTest(test.TestCase): x = np.zeros((7, 11)) y = np.ones((7, 11)) np_val = np.where(True, x, y) - with self.test_session(use_gpu=True): + with self.test_session(): tf_val = self.evaluate( array_ops.where_v2( constant_op.constant(True, dtype=dtypes.bool), x, y)) @@ -242,7 +242,7 @@ class WhereOpTest(test.TestCase): x = np.zeros(7) y = np.ones(7) np_val = np.where([True], x, y) - with self.test_session(use_gpu=True): + with self.test_session(): tf_val = self.evaluate( array_ops.where_v2( constant_op.constant([True], dtype=dtypes.bool), x, y)) @@ -253,7 +253,7 @@ class WhereOpTest(test.TestCase): x = np.random.randn(3, 4) y = np.random.randn(3, 4) np_val = np.where(pred, x, y) - with self.test_session(use_gpu=True): + with self.test_session(): tf_val = self.evaluate(array_ops.where_v2(pred, x, y)) self.assertAllClose(tf_val, np_val) @@ -263,7 +263,7 @@ class WhereOpTest(test.TestCase): c_mat = np.array([[False] * 192, [True] * 192] * 8192) # [16384, 192] c_vec = np.array([False, True] * 8192) # [16384] np_val = np.where(c_mat, x * x, -x) - with self.session(use_gpu=True): + with self.session(): tf_val = array_ops.where(c_vec, x * x, -x).eval() self.assertAllEqual(tf_val, np_val) diff --git a/tensorflow/python/kernel_tests/xent_op_test.py b/tensorflow/python/kernel_tests/xent_op_test.py index 6e60a935e93..215820ea5da 100644 --- a/tensorflow/python/kernel_tests/xent_op_test.py +++ b/tensorflow/python/kernel_tests/xent_op_test.py @@ -319,7 +319,7 @@ class XentTest(test.TestCase): features = np.zeros([0, 2, 4]).astype(np.float32) labels = np.zeros([0, 2, 4]).astype(np.float32) np_loss, _ = self._npXent(features, labels) - with self.session(use_gpu=True) as sess: + with self.session() as sess: loss = nn_ops.softmax_cross_entropy_with_logits( labels=labels, logits=features) tf_loss = self.evaluate(loss) diff --git a/tensorflow/python/ops/batch_ops_test.py b/tensorflow/python/ops/batch_ops_test.py index fb8746e318f..e54e69a2366 100644 --- a/tensorflow/python/ops/batch_ops_test.py +++ b/tensorflow/python/ops/batch_ops_test.py @@ -56,7 +56,7 @@ class BatchOpsTest(test.TestCase): """Tests that a single batched tensor executes together and only once.""" if context.executing_eagerly(): return - with self.cached_session(use_gpu=True) as sess: + with self.cached_session() as sess: inp = array_ops.placeholder(dtype=dtypes.int32, shape=[1]) batched, index, _ = batch_ops.batch( [inp], num_batch_threads=1, max_batch_size=2, @@ -98,7 +98,7 @@ class BatchOpsTest(test.TestCase): """Test that batching with padding up to an allowed batch size works.""" if context.executing_eagerly(): return - with self.cached_session(use_gpu=True) as sess: + with self.cached_session() as sess: inp = array_ops.placeholder(dtype=dtypes.int32, shape=[2]) batched, index, _ = batch_ops.batch( [inp], num_batch_threads=1, max_batch_size=10, @@ -130,7 +130,7 @@ class BatchOpsTest(test.TestCase): """Tests that multiple batched tensors execute together.""" if context.executing_eagerly(): return - with self.cached_session(use_gpu=True) as sess: + with self.cached_session() as sess: inp0 = array_ops.placeholder(dtype=dtypes.int32, shape=[1]) inp1 = array_ops.placeholder(dtype=dtypes.int32, shape=[1]) batched, _, _ = batch_ops.batch( @@ -171,7 +171,7 @@ class BatchOpsTest(test.TestCase): """Tests illegally feeding tensors with different dim0 sizes.""" if context.executing_eagerly(): return - with self.cached_session(use_gpu=True) as sess: + with self.cached_session() as sess: inp0 = array_ops.placeholder(dtype=dtypes.int32, shape=[1]) inp1 = array_ops.placeholder(dtype=dtypes.int32, shape=[2]) batched, index, _ = batch_ops.batch( @@ -187,7 +187,7 @@ class BatchOpsTest(test.TestCase): """Tests that batch and unbatch work together.""" if context.executing_eagerly(): return - with self.cached_session(use_gpu=True) as sess: + with self.cached_session() as sess: inp = array_ops.placeholder(dtype=dtypes.int32, shape=[1]) batched, index, id_t = batch_ops.batch( [inp], num_batch_threads=1, max_batch_size=10, @@ -213,7 +213,7 @@ class BatchOpsTest(test.TestCase): """Tests that the batch_function decorator works.""" if context.executing_eagerly(): return - with self.cached_session(use_gpu=True) as sess: + with self.cached_session() as sess: # TODO(apassos): Removing this line causes test flakiness! Ideally should # be investigated. default_inp = array_ops.placeholder_with_default(2, shape=[]) # pylint: disable=unused-variable @@ -241,7 +241,7 @@ class BatchOpsTest(test.TestCase): """Tests that the batch_function decorator works.""" if context.executing_eagerly(): return - with self.cached_session(use_gpu=True) as sess: + with self.cached_session() as sess: captured_inp0 = array_ops.placeholder_with_default(2., shape=[]) captured_inp1 = resource_variable_ops.ResourceVariable(3.) with ops.device("/cpu:0"): @@ -270,7 +270,7 @@ class BatchOpsTest(test.TestCase): def testBatchDecoratedGpu(self): if context.executing_eagerly(): return - with self.cached_session(use_gpu=True) as sess: + with self.cached_session() as sess: @batch_ops.batch_function(1, 10, 100000) def computation(in_t): @@ -324,7 +324,7 @@ class BatchOpsTest(test.TestCase): """Tests that the batch_function op works.""" if context.executing_eagerly(): return - with self.cached_session(use_gpu=True) as sess: + with self.cached_session() as sess: @function.Defun(dtypes.int32) def computation(in_t): @@ -355,7 +355,7 @@ class BatchOpsTest(test.TestCase): """Tests that batch_function op works with captured input.""" if context.executing_eagerly(): return - with self.cached_session(use_gpu=True) as sess: + with self.cached_session() as sess: captured_inp0 = array_ops.placeholder_with_default(2, shape=[]) captured_inp1 = array_ops.placeholder_with_default(1, shape=[]) inp = array_ops.placeholder(dtype=dtypes.int32, shape=[1]) @@ -391,7 +391,7 @@ class BatchOpsTest(test.TestCase): """Tests that batch_function op works with error in the inputs.""" if context.executing_eagerly(): return - with self.cached_session(use_gpu=True) as sess: + with self.cached_session() as sess: inp = array_ops.placeholder(dtype=dtypes.int32, shape=[1]) @function.Defun(dtypes.int32, dtypes.int32) @@ -421,7 +421,7 @@ class BatchOpsTest(test.TestCase): if context.executing_eagerly(): return - with self.cached_session(use_gpu=True) as sess: + with self.cached_session() as sess: @function.Defun(dtypes.int32) def computation(in_t): @@ -475,7 +475,7 @@ class BatchOpsTest(test.TestCase): """Tests that the batch_function decorator works.""" if context.executing_eagerly(): return - with self.cached_session(use_gpu=True) as sess: + with self.cached_session() as sess: @batch_ops.batch_function(1, 10, 100000) def computation(in_t): @@ -499,7 +499,7 @@ class BatchOpsTest(test.TestCase): """Tests that the unbatch timeout works.""" if context.executing_eagerly(): return - with self.cached_session(use_gpu=True) as sess: + with self.cached_session() as sess: inp = array_ops.placeholder(dtype=dtypes.int32, shape=[1]) batched, index, id_t = batch_ops.batch( [inp], num_batch_threads=1, max_batch_size=2, diff --git a/tensorflow/python/ops/bitwise_ops_test.py b/tensorflow/python/ops/bitwise_ops_test.py index d154b6759bf..b716ca4a881 100644 --- a/tensorflow/python/ops/bitwise_ops_test.py +++ b/tensorflow/python/ops/bitwise_ops_test.py @@ -39,7 +39,7 @@ class BitwiseOpTest(test_util.TensorFlowTestCase): dtype_list = [dtypes.int8, dtypes.int16, dtypes.int32, dtypes.int64, dtypes.uint8, dtypes.uint16, dtypes.uint32, dtypes.uint64] - with self.session(use_gpu=True) as sess: + with self.session() as sess: for dtype in dtype_list: lhs = constant_op.constant([0, 5, 3, 14], dtype=dtype) rhs = constant_op.constant([5, 0, 7, 11], dtype=dtype) @@ -62,7 +62,7 @@ class BitwiseOpTest(test_util.TensorFlowTestCase): def count_bits(x): return sum(bin(z).count("1") for z in six.iterbytes(x.tobytes())) for dtype in dtype_list: - with self.cached_session(use_gpu=True) as sess: + with self.cached_session() as sess: print("PopulationCount test: ", dtype) inputs = np.array(raw_inputs, dtype=dtype.as_numpy_dtype) truth = [count_bits(x) for x in inputs] @@ -76,7 +76,7 @@ class BitwiseOpTest(test_util.TensorFlowTestCase): dtype_list = [dtypes.int8, dtypes.int16, dtypes.int32, dtypes.int64, dtypes.uint8, dtypes.uint16, dtypes.uint32, dtypes.uint64] inputs = [0, 5, 3, 14] - with self.session(use_gpu=True) as sess: + with self.session() as sess: for dtype in dtype_list: # Because of issues with negative numbers, let's test this indirectly. # 1. invert(a) and a = 0 @@ -101,7 +101,7 @@ class BitwiseOpTest(test_util.TensorFlowTestCase): dtype_list = [np.int8, np.int16, np.int32, np.int64, np.uint8, np.uint16, np.uint32, np.uint64] - with self.session(use_gpu=True) as sess: + with self.session() as sess: for dtype in dtype_list: lhs = np.array([0, 5, 3, 14], dtype=dtype) rhs = np.array([5, 0, 7, 3], dtype=dtype) @@ -115,7 +115,7 @@ class BitwiseOpTest(test_util.TensorFlowTestCase): def testShiftsWithNegativeLHS(self): dtype_list = [np.int8, np.int16, np.int32, np.int64] - with self.session(use_gpu=True) as sess: + with self.session() as sess: for dtype in dtype_list: lhs = np.array([-1, -5, -3, -14], dtype=dtype) rhs = np.array([5, 0, 7, 11], dtype=dtype) @@ -129,7 +129,7 @@ class BitwiseOpTest(test_util.TensorFlowTestCase): def testImplementationDefinedShiftsDoNotCrash(self): dtype_list = [np.int8, np.int16, np.int32, np.int64] - with self.session(use_gpu=True) as sess: + with self.session() as sess: for dtype in dtype_list: lhs = np.array([-1, -5, -3, -14], dtype=dtype) rhs = np.array([-2, 64, 101, 32], dtype=dtype) @@ -146,7 +146,7 @@ class BitwiseOpTest(test_util.TensorFlowTestCase): dtype_list = [dtypes.int8, dtypes.int16, dtypes.int32, dtypes.int64, dtypes.uint8, dtypes.uint16] - with self.session(use_gpu=True) as sess: + with self.session() as sess: for dtype in dtype_list: lhs = constant_op.constant([[0], [3], [5]], dtype=dtype) rhs = constant_op.constant([[1, 2, 4]], dtype=dtype) diff --git a/tensorflow/python/ops/gradients_test.py b/tensorflow/python/ops/gradients_test.py index 5bd31aa8c73..0d49bb7fe55 100644 --- a/tensorflow/python/ops/gradients_test.py +++ b/tensorflow/python/ops/gradients_test.py @@ -749,7 +749,7 @@ class HessianTest(test_util.TensorFlowTestCase): mat_value = rng.randn(m, m).astype("float32") x_value = rng.randn(m).astype("float32") hess_value = mat_value + mat_value.T - with self.session(use_gpu=True): + with self.session(): mat = constant_op.constant(mat_value) x = constant_op.constant(x_value) x_mat_x = math_ops.reduce_sum(x[:, None] * mat * x[None, :]) @@ -766,7 +766,7 @@ class HessianTest(test_util.TensorFlowTestCase): mat_values = [rng.randn(m, m).astype("float32") for _ in range(n)] x_values = [rng.randn(m).astype("float32") for _ in range(n)] hess_values = [mat_value + mat_value.T for mat_value in mat_values] - with self.session(use_gpu=True): + with self.session(): mats = [constant_op.constant(mat_value) for mat_value in mat_values] xs = [constant_op.constant(x_value) for x_value in x_values] xs_mats_xs = [ @@ -781,7 +781,7 @@ class HessianTest(test_util.TensorFlowTestCase): @test_util.run_v1_only("b/120545219") def testHessianInvalidDimension(self): for shape in [(10, 10), None]: - with self.cached_session(use_gpu=True): + with self.cached_session(): x = array_ops.placeholder(dtypes.float32, shape) # Expect a ValueError because the dimensions are wrong with self.assertRaises(ValueError): @@ -795,7 +795,7 @@ class HessianTest(test_util.TensorFlowTestCase): m = 3 rng = np.random.RandomState([1, 2, 3]) x_value = rng.randn(m, m).astype("float32") - with self.session(use_gpu=True): + with self.session(): x = constant_op.constant(x_value) x_square = math_ops.reduce_sum( math_ops.matmul(array_ops.transpose(x), x) * 0.5 @@ -815,7 +815,7 @@ class HessianTest(test_util.TensorFlowTestCase): n = 4 rng = np.random.RandomState([1, 2, 3]) x_value = rng.randn(m, n).astype("float32") - with self.session(use_gpu=True): + with self.session(): x = constant_op.constant(x_value) x_square = math_ops.reduce_sum( math_ops.matmul(array_ops.transpose(x), x) * 0.5 diff --git a/tensorflow/python/ops/histogram_ops_test.py b/tensorflow/python/ops/histogram_ops_test.py index 94217d931d8..da72e3be71b 100644 --- a/tensorflow/python/ops/histogram_ops_test.py +++ b/tensorflow/python/ops/histogram_ops_test.py @@ -109,7 +109,7 @@ class HistogramFixedWidthTest(test.TestCase): value_range = [0.0, 5.0] values = [] expected_bin_counts = [0, 0, 0, 0, 0] - with self.session(use_gpu=True): + with self.session(): hist = histogram_ops.histogram_fixed_width(values, value_range, nbins=5) self.assertEqual(dtypes.int32, hist.dtype) self.assertAllClose(expected_bin_counts, self.evaluate(hist)) @@ -120,7 +120,7 @@ class HistogramFixedWidthTest(test.TestCase): value_range = [0.0, 5.0] values = [-1.0, 0.0, 1.5, 2.0, 5.0, 15] expected_bin_counts = [2, 1, 1, 0, 2] - with self.session(use_gpu=True): + with self.session(): hist = histogram_ops.histogram_fixed_width( values, value_range, nbins=5, dtype=dtypes.int64) self.assertEqual(dtypes.int64, hist.dtype) @@ -132,7 +132,7 @@ class HistogramFixedWidthTest(test.TestCase): value_range = np.float64([0.0, 5.0]) values = np.float64([-1.0, 0.0, 1.5, 2.0, 5.0, 15]) expected_bin_counts = [2, 1, 1, 0, 2] - with self.session(use_gpu=True): + with self.session(): hist = histogram_ops.histogram_fixed_width(values, value_range, nbins=5) self.assertEqual(dtypes.int32, hist.dtype) self.assertAllClose(expected_bin_counts, self.evaluate(hist)) @@ -143,7 +143,7 @@ class HistogramFixedWidthTest(test.TestCase): value_range = [0.0, 5.0] values = [[-1.0, 0.0, 1.5], [2.0, 5.0, 15]] expected_bin_counts = [2, 1, 1, 0, 2] - with self.session(use_gpu=True): + with self.session(): hist = histogram_ops.histogram_fixed_width(values, value_range, nbins=5) self.assertEqual(dtypes.int32, hist.dtype) self.assertAllClose(expected_bin_counts, self.evaluate(hist)) @@ -154,7 +154,7 @@ class HistogramFixedWidthTest(test.TestCase): values = [[-1.0, 0.0, 1.5], [2.0, 5.0, 15]] expected_bin_counts = [2, 1, 1, 0, 2] placeholder = array_ops.placeholder(dtypes.int32) - with self.session(use_gpu=True): + with self.session(): hist = histogram_ops.histogram_fixed_width(values, value_range, nbins=5) self.assertAllEqual(hist.shape.as_list(), (5,)) self.assertEqual(dtypes.int32, hist.dtype) diff --git a/tensorflow/python/ops/image_grad_test_base.py b/tensorflow/python/ops/image_grad_test_base.py index 92cdf5d9783..f328f7a7a58 100644 --- a/tensorflow/python/ops/image_grad_test_base.py +++ b/tensorflow/python/ops/image_grad_test_base.py @@ -50,7 +50,7 @@ class ResizeNearestNeighborOpTestBase(test.TestCase): input_tensor = constant_op.constant(x, shape=in_shape) resize_out = image_ops.resize_nearest_neighbor(input_tensor, out_shape[1:3]) - with self.cached_session(use_gpu=True): + with self.cached_session(): self.assertEqual(out_shape, list(resize_out.get_shape())) resize_out = self.evaluate(resize_out) self.assertEqual(out_shape, list(resize_out.shape)) @@ -65,7 +65,7 @@ class ResizeNearestNeighborOpTestBase(test.TestCase): def resize_nn(t, shape=out_shape): return image_ops.resize_nearest_neighbor(t, shape[1:3]) - with self.cached_session(use_gpu=True): + with self.cached_session(): input_tensor = constant_op.constant(x, shape=in_shape) err = gradient_checker_v2.max_error( *gradient_checker_v2.compute_gradient( @@ -82,7 +82,7 @@ class ResizeNearestNeighborOpTestBase(test.TestCase): def resize_nn(t, shape=out_shape): return image_ops.resize_nearest_neighbor(t, shape[1:3]) - with self.cached_session(use_gpu=True): + with self.cached_session(): input_tensor = constant_op.constant(x, shape=in_shape) err = gradient_checker_v2.max_error( *gradient_checker_v2.compute_gradient( @@ -106,7 +106,7 @@ class ResizeNearestNeighborOpTestBase(test.TestCase): grad_cpu = gradient_checker_v2.compute_gradient( resize_nn, [input_tensor], delta=1 / 8) - with self.cached_session(use_gpu=True): + with self.cached_session(): input_tensor = constant_op.constant(x, shape=in_shape) grad_gpu = gradient_checker_v2.compute_gradient( resize_nn, [input_tensor], delta=1 / 8) @@ -444,7 +444,7 @@ class CropAndResizeOpTestBase(test.TestCase): constant_op.constant(boxes, shape=[num_boxes, 4]), constant_op.constant(box_ind, shape=[num_boxes]), constant_op.constant(crop_size, shape=[2])) - with self.session(use_gpu=True) as sess: + with self.session() as sess: self.assertEqual(crops_shape, list(crops.get_shape())) crops = self.evaluate(crops) self.assertEqual(crops_shape, list(crops.shape)) @@ -561,7 +561,7 @@ class RGBToHSVOpTestBase(test.TestCase): x = np.random.randint(0, high=255, size=[2, 20, 30, 3]).astype(nptype) rgb_input_tensor = constant_op.constant(x, shape=in_shape) hsv_out = gen_image_ops.rgb_to_hsv(rgb_input_tensor) - with self.cached_session(use_gpu=True): + with self.cached_session(): self.assertEqual(out_shape, list(hsv_out.get_shape())) hsv_out = self.evaluate(hsv_out) self.assertEqual(out_shape, list(hsv_out.shape)) diff --git a/tensorflow/python/ops/image_ops_test.py b/tensorflow/python/ops/image_ops_test.py index 7b477aab796..df40608eb09 100644 --- a/tensorflow/python/ops/image_ops_test.py +++ b/tensorflow/python/ops/image_ops_test.py @@ -71,7 +71,7 @@ class RGBToHSVTest(test_util.TensorFlowTestCase): inp = np.random.rand(*shape).astype(nptype) # Convert to HSV and back, as a batch and individually - with self.cached_session(use_gpu=True) as sess: + with self.cached_session() as sess: batch0 = constant_op.constant(inp) batch1 = image_ops.rgb_to_hsv(batch0) batch2 = image_ops.hsv_to_rgb(batch1) @@ -92,7 +92,7 @@ class RGBToHSVTest(test_util.TensorFlowTestCase): data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1] for nptype in [np.float32, np.float64]: rgb_np = np.array(data, dtype=nptype).reshape([2, 2, 3]) / 255. - with self.cached_session(use_gpu=True): + with self.cached_session(): hsv = image_ops.rgb_to_hsv(rgb_np) rgb = image_ops.hsv_to_rgb(hsv) rgb_tf = self.evaluate(rgb) @@ -113,7 +113,7 @@ class RGBToYIQTest(test_util.TensorFlowTestCase): inp = np.random.rand(*shape).astype(nptype) # Convert to YIQ and back, as a batch and individually - with self.cached_session(use_gpu=True) as sess: + with self.cached_session() as sess: batch0 = constant_op.constant(inp) batch1 = image_ops.rgb_to_yiq(batch0) batch2 = image_ops.yiq_to_rgb(batch1) @@ -145,7 +145,7 @@ class RGBToYUVTest(test_util.TensorFlowTestCase): inp = np.random.rand(*shape).astype(nptype) # Convert to YUV and back, as a batch and individually - with self.cached_session(use_gpu=True) as sess: + with self.cached_session() as sess: batch0 = constant_op.constant(inp) batch1 = image_ops.rgb_to_yuv(batch0) batch2 = image_ops.yuv_to_rgb(batch1) @@ -187,7 +187,7 @@ class GrayscaleToRGBTest(test_util.TensorFlowTestCase): def _TestRGBToGrayscale(self, x_np): y_np = self._RGBToGrayscale(x_np) - with self.cached_session(use_gpu=True): + with self.cached_session(): x_tf = constant_op.constant(x_np, shape=x_np.shape) y = image_ops.rgb_to_grayscale(x_tf) y_tf = self.evaluate(y) @@ -209,7 +209,7 @@ class GrayscaleToRGBTest(test_util.TensorFlowTestCase): y_np = np.array( [[1, 1, 1], [2, 2, 2]], dtype=np.uint8).reshape([1, 1, 2, 3]) - with self.cached_session(use_gpu=True): + with self.cached_session(): x_tf = constant_op.constant(x_np, shape=x_np.shape) y = image_ops.grayscale_to_rgb(x_tf) y_tf = self.evaluate(y) @@ -219,7 +219,7 @@ class GrayscaleToRGBTest(test_util.TensorFlowTestCase): x_np = np.array([[1, 2]], dtype=np.uint8).reshape([1, 2, 1]) y_np = np.array([[1, 1, 1], [2, 2, 2]], dtype=np.uint8).reshape([1, 2, 3]) - with self.cached_session(use_gpu=True): + with self.cached_session(): x_tf = constant_op.constant(x_np, shape=x_np.shape) y = image_ops.grayscale_to_rgb(x_tf) y_tf = self.evaluate(y) @@ -233,7 +233,7 @@ class GrayscaleToRGBTest(test_util.TensorFlowTestCase): # tests if an exception is raised if a three dimensional # input is used, i.e. the images have shape [batch size, height, width] - with self.cached_session(use_gpu=True): + with self.cached_session(): # 3-D input with batch dimension. x_np = np.array([[1, 2]], dtype=np.uint8).reshape([1, 1, 2]) @@ -246,7 +246,7 @@ class GrayscaleToRGBTest(test_util.TensorFlowTestCase): # tests if an exception is raised if a two dimensional # input is used, i.e. the images have shape [height, width] - with self.cached_session(use_gpu=True): + with self.cached_session(): # 1-D input without batch dimension. x_np = np.array([[1, 2]], dtype=np.uint8).reshape([2]) @@ -263,23 +263,23 @@ class GrayscaleToRGBTest(test_util.TensorFlowTestCase): # Shape inference works and produces expected output where possible rgb_shape = [7, None, 19, 3] gray_shape = rgb_shape[:-1] + [1] - with self.cached_session(use_gpu=True): + with self.cached_session(): rgb_tf = array_ops.placeholder(dtypes.uint8, shape=rgb_shape) gray = image_ops.rgb_to_grayscale(rgb_tf) self.assertEqual(gray_shape, gray.get_shape().as_list()) - with self.cached_session(use_gpu=True): + with self.cached_session(): gray_tf = array_ops.placeholder(dtypes.uint8, shape=gray_shape) rgb = image_ops.grayscale_to_rgb(gray_tf) self.assertEqual(rgb_shape, rgb.get_shape().as_list()) # Shape inference does not break for unknown shapes - with self.cached_session(use_gpu=True): + with self.cached_session(): rgb_tf_unknown = array_ops.placeholder(dtypes.uint8) gray_unknown = image_ops.rgb_to_grayscale(rgb_tf_unknown) self.assertFalse(gray_unknown.get_shape()) - with self.cached_session(use_gpu=True): + with self.cached_session(): gray_tf_unknown = array_ops.placeholder(dtypes.uint8) rgb_unknown = image_ops.grayscale_to_rgb(gray_tf_unknown) self.assertFalse(rgb_unknown.get_shape()) @@ -424,7 +424,7 @@ class AdjustHueTest(test_util.TensorFlowTestCase): y_data = [0, 13, 1, 54, 226, 59, 8, 234, 150, 255, 39, 1] y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape) - with self.cached_session(use_gpu=True): + with self.cached_session(): x = constant_op.constant(x_np, shape=x_shape) y = image_ops.adjust_hue(x, delta) y_tf = self.evaluate(y) @@ -439,7 +439,7 @@ class AdjustHueTest(test_util.TensorFlowTestCase): y_data = [13, 0, 11, 226, 54, 221, 234, 8, 92, 1, 217, 255] y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape) - with self.cached_session(use_gpu=True): + with self.cached_session(): x = constant_op.constant(x_np, shape=x_shape) y = image_ops.adjust_hue(x, delta) y_tf = self.evaluate(y) @@ -454,7 +454,7 @@ class AdjustHueTest(test_util.TensorFlowTestCase): y_data = [13, 0, 11, 226, 54, 221, 234, 8, 92, 1, 217, 255] y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape) - with self.cached_session(use_gpu=True): + with self.cached_session(): x = constant_op.constant(x_np, shape=x_shape) y = image_ops.adjust_hue(x, delta) y_tf = self.evaluate(y) @@ -479,7 +479,7 @@ class AdjustHueTest(test_util.TensorFlowTestCase): return y_v.reshape(x_np.shape) def _adjustHueTf(self, x_np, delta_h): - with self.cached_session(use_gpu=True): + with self.cached_session(): x = constant_op.constant(x_np) y = image_ops.adjust_hue(x, delta_h) y_tf = self.evaluate(y) @@ -910,7 +910,7 @@ class AdjustSaturationTest(test_util.TensorFlowTestCase): y_data = [6, 9, 13, 140, 180, 226, 135, 121, 234, 172, 255, 128] y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape) - with self.cached_session(use_gpu=True): + with self.cached_session(): x = constant_op.constant(x_np, shape=x_shape) y = image_ops.adjust_saturation(x, saturation_factor) y_tf = self.evaluate(y) @@ -925,7 +925,7 @@ class AdjustSaturationTest(test_util.TensorFlowTestCase): y_data = [0, 5, 13, 0, 106, 226, 30, 0, 234, 89, 255, 0] y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape) - with self.cached_session(use_gpu=True): + with self.cached_session(): x = constant_op.constant(x_np, shape=x_shape) y = image_ops.adjust_saturation(x, saturation_factor) y_tf = self.evaluate(y) @@ -940,7 +940,7 @@ class AdjustSaturationTest(test_util.TensorFlowTestCase): y_data = [6, 9, 13, 140, 180, 226, 135, 121, 234, 172, 255, 128] y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape) - with self.cached_session(use_gpu=True): + with self.cached_session(): x = constant_op.constant(x_np, shape=x_shape) y = image_ops.adjust_saturation(x, saturation_factor) y_tf = self.evaluate(y) @@ -979,7 +979,7 @@ class AdjustSaturationTest(test_util.TensorFlowTestCase): "gb_same", "rgb_same", ] - with self.cached_session(use_gpu=True): + with self.cached_session(): for x_shape in x_shapes: for test_style in test_styles: x_np = np.random.rand(*x_shape) * 255. @@ -1007,7 +1007,7 @@ class FlipTransposeRotateTest(test_util.TensorFlowTestCase, def testInvolutionLeftRight(self): x_np = np.array([[1, 2, 3], [1, 2, 3]], dtype=np.uint8).reshape([2, 3, 1]) - with self.cached_session(use_gpu=True): + with self.cached_session(): x_tf = constant_op.constant(x_np, shape=x_np.shape) y = image_ops.flip_left_right(image_ops.flip_left_right(x_tf)) y_tf = self.evaluate(y) @@ -1017,7 +1017,7 @@ class FlipTransposeRotateTest(test_util.TensorFlowTestCase, x_np = np.array( [[[1, 2, 3], [1, 2, 3]], [[1, 2, 3], [1, 2, 3]]], dtype=np.uint8).reshape([2, 2, 3, 1]) - with self.cached_session(use_gpu=True): + with self.cached_session(): x_tf = constant_op.constant(x_np, shape=x_np.shape) y = image_ops.flip_left_right(image_ops.flip_left_right(x_tf)) y_tf = self.evaluate(y) @@ -1027,7 +1027,7 @@ class FlipTransposeRotateTest(test_util.TensorFlowTestCase, x_np = np.array([[1, 2, 3], [1, 2, 3]], dtype=np.uint8).reshape([2, 3, 1]) y_np = np.array([[3, 2, 1], [3, 2, 1]], dtype=np.uint8).reshape([2, 3, 1]) - with self.cached_session(use_gpu=True): + with self.cached_session(): x_tf = constant_op.constant(x_np, shape=x_np.shape) y = image_ops.flip_left_right(x_tf) y_tf = self.evaluate(y) @@ -1041,7 +1041,7 @@ class FlipTransposeRotateTest(test_util.TensorFlowTestCase, [[[3, 2, 1], [3, 2, 1]], [[3, 2, 1], [3, 2, 1]]], dtype=np.uint8).reshape([2, 2, 3, 1]) - with self.cached_session(use_gpu=True): + with self.cached_session(): x_tf = constant_op.constant(x_np, shape=x_np.shape) y = image_ops.flip_left_right(x_tf) y_tf = self.evaluate(y) @@ -1054,7 +1054,7 @@ class FlipTransposeRotateTest(test_util.TensorFlowTestCase, y_np = np.array([[3, 2, 1], [3, 2, 1]], dtype=np.uint8).reshape([2, 3, 1]) seed = 42 - with self.cached_session(use_gpu=True): + with self.cached_session(): x_tf = constant_op.constant(x_np, shape=x_np.shape) y = image_ops.random_flip_left_right(x_tf, seed=seed) self.assertTrue(y.op.name.startswith("random_flip_left_right")) @@ -1081,7 +1081,7 @@ class FlipTransposeRotateTest(test_util.TensorFlowTestCase, x_np = np.array([[1, 2, 3], [1, 2, 3]], dtype=np.uint8).reshape([2, 3, 1]) y_np = np.array([[3, 2, 1], [3, 2, 1]], dtype=np.uint8).reshape([2, 3, 1]) - with self.cached_session(use_gpu=True): + with self.cached_session(): x_tf = constant_op.constant(x_np, shape=x_np.shape) count_flipped = 0 count_unflipped = 0 @@ -1216,7 +1216,7 @@ class FlipTransposeRotateTest(test_util.TensorFlowTestCase, x_np = np.vstack([x_np_raw for _ in range(batch_size)]) y_np = np.vstack([y_np_raw for _ in range(batch_size)]) - with self.cached_session(use_gpu=True): + with self.cached_session(): x_tf = constant_op.constant(x_np, shape=x_np.shape) count_flipped = 0 count_unflipped = 0 @@ -1238,7 +1238,7 @@ class FlipTransposeRotateTest(test_util.TensorFlowTestCase, def testInvolutionUpDown(self): x_np = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.uint8).reshape([2, 3, 1]) - with self.cached_session(use_gpu=True): + with self.cached_session(): x_tf = constant_op.constant(x_np, shape=x_np.shape) y = image_ops.flip_up_down(image_ops.flip_up_down(x_tf)) y_tf = self.evaluate(y) @@ -1249,7 +1249,7 @@ class FlipTransposeRotateTest(test_util.TensorFlowTestCase, [[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]], dtype=np.uint8).reshape([2, 2, 3, 1]) - with self.cached_session(use_gpu=True): + with self.cached_session(): x_tf = constant_op.constant(x_np, shape=x_np.shape) y = image_ops.flip_up_down(image_ops.flip_up_down(x_tf)) y_tf = self.evaluate(y) @@ -1259,7 +1259,7 @@ class FlipTransposeRotateTest(test_util.TensorFlowTestCase, x_np = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.uint8).reshape([2, 3, 1]) y_np = np.array([[4, 5, 6], [1, 2, 3]], dtype=np.uint8).reshape([2, 3, 1]) - with self.cached_session(use_gpu=True): + with self.cached_session(): x_tf = constant_op.constant(x_np, shape=x_np.shape) y = image_ops.flip_up_down(x_tf) y_tf = self.evaluate(y) @@ -1273,7 +1273,7 @@ class FlipTransposeRotateTest(test_util.TensorFlowTestCase, [[[4, 5, 6], [1, 2, 3]], [[10, 11, 12], [7, 8, 9]]], dtype=np.uint8).reshape([2, 2, 3, 1]) - with self.cached_session(use_gpu=True): + with self.cached_session(): x_tf = constant_op.constant(x_np, shape=x_np.shape) y = image_ops.flip_up_down(x_tf) y_tf = self.evaluate(y) @@ -1286,7 +1286,7 @@ class FlipTransposeRotateTest(test_util.TensorFlowTestCase, y_np = np.array([[4, 5, 6], [1, 2, 3]], dtype=np.uint8).reshape([2, 3, 1]) seed = 42 - with self.cached_session(use_gpu=True): + with self.cached_session(): x_tf = constant_op.constant(x_np, shape=x_np.shape) y = image_ops.random_flip_up_down(x_tf, seed=seed) self.assertTrue(y.op.name.startswith("random_flip_up_down")) @@ -1312,7 +1312,7 @@ class FlipTransposeRotateTest(test_util.TensorFlowTestCase, x_np = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.uint8).reshape([2, 3, 1]) y_np = np.array([[4, 5, 6], [1, 2, 3]], dtype=np.uint8).reshape([2, 3, 1]) - with self.cached_session(use_gpu=True): + with self.cached_session(): x_tf = constant_op.constant(x_np, shape=x_np.shape) count_flipped = 0 count_unflipped = 0 @@ -1344,7 +1344,7 @@ class FlipTransposeRotateTest(test_util.TensorFlowTestCase, x_np = np.vstack([x_np_raw for _ in range(batch_size)]) y_np = np.vstack([y_np_raw for _ in range(batch_size)]) - with self.cached_session(use_gpu=True): + with self.cached_session(): x_tf = constant_op.constant(x_np, shape=x_np.shape) count_flipped = 0 count_unflipped = 0 @@ -1366,7 +1366,7 @@ class FlipTransposeRotateTest(test_util.TensorFlowTestCase, def testInvolutionTranspose(self): x_np = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.uint8).reshape([2, 3, 1]) - with self.cached_session(use_gpu=True): + with self.cached_session(): x_tf = constant_op.constant(x_np, shape=x_np.shape) y = image_ops.transpose(image_ops.transpose(x_tf)) y_tf = self.evaluate(y) @@ -1377,7 +1377,7 @@ class FlipTransposeRotateTest(test_util.TensorFlowTestCase, [[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]], dtype=np.uint8).reshape([2, 2, 3, 1]) - with self.cached_session(use_gpu=True): + with self.cached_session(): x_tf = constant_op.constant(x_np, shape=x_np.shape) y = image_ops.transpose(image_ops.transpose(x_tf)) y_tf = self.evaluate(y) @@ -1387,7 +1387,7 @@ class FlipTransposeRotateTest(test_util.TensorFlowTestCase, x_np = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.uint8).reshape([2, 3, 1]) y_np = np.array([[1, 4], [2, 5], [3, 6]], dtype=np.uint8).reshape([3, 2, 1]) - with self.cached_session(use_gpu=True): + with self.cached_session(): x_tf = constant_op.constant(x_np, shape=x_np.shape) y = image_ops.transpose(x_tf) y_tf = self.evaluate(y) @@ -1402,7 +1402,7 @@ class FlipTransposeRotateTest(test_util.TensorFlowTestCase, [[[1, 4], [2, 5], [3, 6]], [[7, 10], [8, 11], [9, 12]]], dtype=np.uint8).reshape([2, 3, 2, 1]) - with self.cached_session(use_gpu=True): + with self.cached_session(): x_tf = constant_op.constant(x_np, shape=x_np.shape) y = image_ops.transpose(x_tf) y_tf = self.evaluate(y) @@ -1454,7 +1454,7 @@ class FlipTransposeRotateTest(test_util.TensorFlowTestCase, def testRot90GroupOrder(self): image = np.arange(24, dtype=np.uint8).reshape([2, 4, 3]) - with self.cached_session(use_gpu=True): + with self.cached_session(): rotated = image for _ in xrange(4): rotated = image_ops.rot90(rotated) @@ -1462,7 +1462,7 @@ class FlipTransposeRotateTest(test_util.TensorFlowTestCase, def testRot90GroupOrderWithBatch(self): image = np.arange(48, dtype=np.uint8).reshape([2, 2, 4, 3]) - with self.cached_session(use_gpu=True): + with self.cached_session(): rotated = image for _ in xrange(4): rotated = image_ops.rot90(rotated) @@ -1470,7 +1470,7 @@ class FlipTransposeRotateTest(test_util.TensorFlowTestCase, def testRot90NumpyEquivalence(self): image = np.arange(24, dtype=np.uint8).reshape([2, 4, 3]) - with self.cached_session(use_gpu=True): + with self.cached_session(): for k in xrange(4): y_np = np.rot90(image, k=k) self.assertAllEqual( @@ -1478,7 +1478,7 @@ class FlipTransposeRotateTest(test_util.TensorFlowTestCase, def testRot90NumpyEquivalenceWithBatch(self): image = np.arange(48, dtype=np.uint8).reshape([2, 2, 4, 3]) - with self.cached_session(use_gpu=True): + with self.cached_session(): for k in xrange(4): y_np = np.rot90(image, k=k, axes=(1, 2)) self.assertAllEqual( @@ -1507,7 +1507,7 @@ class FlipTransposeRotateTest(test_util.TensorFlowTestCase, class AdjustContrastTest(test_util.TensorFlowTestCase): def _testContrast(self, x_np, y_np, contrast_factor): - with self.cached_session(use_gpu=True): + with self.cached_session(): x = constant_op.constant(x_np, shape=x_np.shape) y = image_ops.adjust_contrast(x, contrast_factor) y_tf = self.evaluate(y) @@ -1562,7 +1562,7 @@ class AdjustContrastTest(test_util.TensorFlowTestCase): return y_np def _adjustContrastTf(self, x_np, contrast_factor): - with self.cached_session(use_gpu=True): + with self.cached_session(): x = constant_op.constant(x_np) y = image_ops.adjust_contrast(x, contrast_factor) y_tf = self.evaluate(y) @@ -1596,7 +1596,7 @@ class AdjustContrastTest(test_util.TensorFlowTestCase): class AdjustBrightnessTest(test_util.TensorFlowTestCase): def _testBrightness(self, x_np, y_np, delta, tol=1e-6): - with self.cached_session(use_gpu=True): + with self.cached_session(): x = constant_op.constant(x_np, shape=x_np.shape) y = image_ops.adjust_brightness(x, delta) y_tf = self.evaluate(y) @@ -1668,7 +1668,7 @@ class PerImageWhiteningTest(test_util.TensorFlowTestCase, x_np = np.arange(0, np.prod(x_shape), dtype=data_type).reshape(x_shape) y_np = self._NumpyPerImageWhitening(x_np) - with self.cached_session(use_gpu=True): + with self.cached_session(): x = constant_op.constant(x_np, dtype=data_type, shape=x_shape) y = image_ops.per_image_standardization(x) y_tf = self.evaluate(y) @@ -1678,14 +1678,14 @@ class PerImageWhiteningTest(test_util.TensorFlowTestCase, im_np = np.ones([19, 19, 3]).astype(np.float32) * 249 im = constant_op.constant(im_np) whiten = image_ops.per_image_standardization(im) - with self.cached_session(use_gpu=True): + with self.cached_session(): whiten_np = self.evaluate(whiten) self.assertFalse(np.any(np.isnan(whiten_np))) def testBatchWhitening(self): imgs_np = np.random.uniform(0., 255., [4, 24, 24, 3]) whiten_np = [self._NumpyPerImageWhitening(img) for img in imgs_np] - with self.cached_session(use_gpu=True): + with self.cached_session(): imgs = constant_op.constant(imgs_np) whiten = image_ops.per_image_standardization(imgs) whiten_tf = self.evaluate(whiten) @@ -1709,7 +1709,7 @@ class CropToBoundingBoxTest(test_util.TensorFlowTestCase): y = image_ops.crop_to_bounding_box(x_tensor, offset_height, offset_width, target_height, target_width) - with self.cached_session(use_gpu=True): + with self.cached_session(): return self.evaluate(y) def _assertReturns(self, @@ -1910,7 +1910,7 @@ class CentralCropTest(test_util.TensorFlowTestCase): dtype=np.int32).reshape(x_shape) y_np = np.array([[[3, 4, 5, 6], [3, 4, 5, 6]], [[6, 5, 4, 3], [6, 5, 4, 3]]]).reshape([2, 2, 4, 1]) - with self.cached_session(use_gpu=True): + with self.cached_session(): x = constant_op.constant(x_np, shape=x_shape) y = image_ops.central_crop(x, 0.5) y_tf = self.evaluate(y) @@ -2022,7 +2022,7 @@ class PadToBoundingBoxTest(test_util.TensorFlowTestCase, def pad_bbox(*args): return image_ops.pad_to_bounding_box(*args) - with self.cached_session(use_gpu=True): + with self.cached_session(): return self.evaluate(pad_bbox(x_tensor, offset_height, offset_width, target_height, target_width)) @@ -2079,7 +2079,7 @@ class PadToBoundingBoxTest(test_util.TensorFlowTestCase, i = constant_op.constant([1, 0, 4, 3], dtype=dtypes.int64) y_tf = image_ops.pad_to_bounding_box(x, i[0], i[1], i[2], i[3]) - with self.cached_session(use_gpu=True): + with self.cached_session(): self.assertAllClose(y, self.evaluate(y_tf)) def testNoOp(self): @@ -2259,7 +2259,7 @@ class SelectDistortedCropBoxTest(test_util.TensorFlowTestCase): fraction_object_covered = [] num_iter = 1000 - with self.cached_session(use_gpu=True): + with self.cached_session(): image_tf = constant_op.constant(image, shape=image.shape) image_size_tf = constant_op.constant( image_size_np, shape=image_size_np.shape) @@ -2386,7 +2386,7 @@ class SelectDistortedCropBoxTest(test_util.TensorFlowTestCase): def testSampleDistortedBoundingBoxShape(self): # Shape function requires placeholders and a graph. with ops.Graph().as_default(): - with self.cached_session(use_gpu=True): + with self.cached_session(): image_size = constant_op.constant( [40, 50, 1], shape=[3], dtype=dtypes.int32) bounding_box = constant_op.constant( @@ -2424,7 +2424,7 @@ class SelectDistortedCropBoxTest(test_util.TensorFlowTestCase): def testDefaultMinObjectCovered(self): # By default min_object_covered=0.1 if not provided - with self.cached_session(use_gpu=True): + with self.cached_session(): image_size = constant_op.constant( [40, 50, 1], shape=[3], dtype=dtypes.int32) bounding_box = constant_op.constant( @@ -2651,7 +2651,7 @@ class ResizeImagesV2Test(test_util.TensorFlowTestCase, parameterized.TestCase): img_np = np.array(data, dtype=nptype).reshape(img_shape) for method in self.METHODS: - with self.cached_session(use_gpu=True): + with self.cached_session(): image = constant_op.constant(img_np, shape=img_shape) y = image_ops.resize_images_v2(image, [target_height, target_width], method) @@ -2662,7 +2662,7 @@ class ResizeImagesV2Test(test_util.TensorFlowTestCase, parameterized.TestCase): self.assertAllClose(resized, img_np, atol=1e-5) # Resizing with a single image must leave the shape unchanged also. - with self.cached_session(use_gpu=True): + with self.cached_session(): img_single = img_np.reshape(single_shape) image = constant_op.constant(img_single, shape=single_shape) y = image_ops.resize_images_v2(image, [target_height, target_width], @@ -2688,7 +2688,7 @@ class ResizeImagesV2Test(test_util.TensorFlowTestCase, parameterized.TestCase): img_np = np.array(data, dtype=np.uint8).reshape(img_shape) for method in self.METHODS: - with self.cached_session(use_gpu=True): + with self.cached_session(): image = constant_op.constant(img_np, shape=img_shape) y = resize_func(image, [6, 4], method) yshape = array_ops.shape(y) @@ -2698,7 +2698,7 @@ class ResizeImagesV2Test(test_util.TensorFlowTestCase, parameterized.TestCase): self.assertAllClose(resized, img_np, atol=1e-5) # Resizing with a single image must leave the shape unchanged also. - with self.cached_session(use_gpu=True): + with self.cached_session(): img_single = img_np.reshape(single_shape) image = constant_op.constant(img_single, shape=single_shape) y = resize_func(image, [6, 4], self.METHODS[0]) @@ -2831,7 +2831,7 @@ class ResizeImagesV2Test(test_util.TensorFlowTestCase, parameterized.TestCase): for method in self.METHODS: if test.is_gpu_available() and self.shouldRunOnGPU(method, nptype): - with self.cached_session(use_gpu=True): + with self.cached_session(): image = constant_op.constant(img_np, shape=img_shape) y = image_ops.resize_images_v2( image, [target_height, target_width], method) @@ -2888,7 +2888,7 @@ class ResizeImagesV2Test(test_util.TensorFlowTestCase, parameterized.TestCase): ] for nptype in self.TYPES: for method in expected_data: - with self.cached_session(use_gpu=True): + with self.cached_session(): img_np = np.array(data, dtype=nptype).reshape(img_shape) image = constant_op.constant(img_np, shape=img_shape) y = image_ops.resize_images_v2(image, [target_height, target_width], @@ -2908,7 +2908,7 @@ class ResizeImagesV2Test(test_util.TensorFlowTestCase, parameterized.TestCase): methods_to_test = ((gen_image_ops.resize_bilinear, "triangle"), (gen_image_ops.resize_bicubic, "keyscubic")) for legacy_method, new_method in methods_to_test: - with self.cached_session(use_gpu=True): + with self.cached_session(): img_np = np.array(data, dtype=np.float32).reshape(img_shape) image = constant_op.constant(img_np, shape=img_shape) legacy_result = legacy_method( @@ -2945,7 +2945,7 @@ class ResizeImagesV2Test(test_util.TensorFlowTestCase, parameterized.TestCase): 73, 33, 23, 39, 73, 33, 23, 39, 14, 16, 19, 21, 14, 16, 19, 21 ] - with self.cached_session(use_gpu=True): + with self.cached_session(): image = constant_op.constant(img_np, shape=img_shape) y = image_ops.resize_images_v2(image, [target_height, target_width], image_ops.ResizeMethod.AREA) @@ -2963,7 +2963,7 @@ class ResizeImagesV2Test(test_util.TensorFlowTestCase, parameterized.TestCase): for nptype in [np.float32, np.float64]: img_np = np.arange( 0, np.prod(input_shape), dtype=nptype).reshape(input_shape) - with self.cached_session(use_gpu=True): + with self.cached_session(): image = constant_op.constant(img_np, shape=input_shape) new_size = constant_op.constant([target_height, target_width]) out_op = image_ops.resize_images_v2( @@ -3039,7 +3039,7 @@ class ResizeImagesV2Test(test_util.TensorFlowTestCase, parameterized.TestCase): def testNameScope(self): # Testing name scope requires placeholders and a graph. with ops.Graph().as_default(): - with self.cached_session(use_gpu=True): + with self.cached_session(): single_image = array_ops.placeholder(dtypes.float32, shape=[50, 60, 3]) y = image_ops.resize_images(single_image, [55, 66]) self.assertTrue(y.op.name.startswith("resize")) @@ -3060,7 +3060,7 @@ class ResizeImagesV2Test(test_util.TensorFlowTestCase, parameterized.TestCase): t, ops.convert_to_tensor(target_max), preserve_aspect_ratio=preserve_aspect_ratio) - with self.cached_session(use_gpu=True): + with self.cached_session(): return self.evaluate(resize_func(x_tensor)) def _assertResizeEqual(self, @@ -3199,7 +3199,7 @@ class ResizeImagesTest(test_util.TensorFlowTestCase, img_np = np.array(data, dtype=nptype).reshape(img_shape) for method in self.METHODS: - with self.cached_session(use_gpu=True) as sess: + with self.cached_session() as sess: image = constant_op.constant(img_np, shape=img_shape) y = image_ops.resize_images(image, [target_height, target_width], method) @@ -3209,7 +3209,7 @@ class ResizeImagesTest(test_util.TensorFlowTestCase, self.assertAllClose(resized, img_np, atol=1e-5) # Resizing with a single image must leave the shape unchanged also. - with self.cached_session(use_gpu=True): + with self.cached_session(): img_single = img_np.reshape(single_shape) image = constant_op.constant(img_single, shape=single_shape) y = image_ops.resize_images(image, [target_height, target_width], @@ -3234,7 +3234,7 @@ class ResizeImagesTest(test_util.TensorFlowTestCase, img_np = np.array(data, dtype=np.uint8).reshape(img_shape) for method in self.METHODS: - with self.cached_session(use_gpu=True): + with self.cached_session(): image = constant_op.constant(img_np, shape=img_shape) y = resize_func(image, [6, 4], method) yshape = array_ops.shape(y) @@ -3243,7 +3243,7 @@ class ResizeImagesTest(test_util.TensorFlowTestCase, self.assertAllClose(resized, img_np, atol=1e-5) # Resizing with a single image must leave the shape unchanged also. - with self.cached_session(use_gpu=True): + with self.cached_session(): img_single = img_np.reshape(single_shape) image = constant_op.constant(img_single, shape=single_shape) y = resize_func(image, [6, 4], self.METHODS[0]) @@ -3374,7 +3374,7 @@ class ResizeImagesTest(test_util.TensorFlowTestCase, for method in self.METHODS: if test.is_gpu_available() and self.shouldRunOnGPU(method, nptype): - with self.cached_session(use_gpu=True): + with self.cached_session(): image = constant_op.constant(img_np, shape=img_shape) y = image_ops.resize_images(image, [target_height, target_width], method) @@ -3411,7 +3411,7 @@ class ResizeImagesTest(test_util.TensorFlowTestCase, image_ops.ResizeMethodV1.NEAREST_NEIGHBOR, image_ops.ResizeMethodV1.AREA ]: - with self.cached_session(use_gpu=True): + with self.cached_session(): img_np = np.array(data, dtype=nptype).reshape(img_shape) image = constant_op.constant(img_np, shape=img_shape) y = image_ops.resize_images( @@ -3448,7 +3448,7 @@ class ResizeImagesTest(test_util.TensorFlowTestCase, image_ops.ResizeMethodV1.NEAREST_NEIGHBOR, image_ops.ResizeMethodV1.AREA ]: - with self.cached_session(use_gpu=True): + with self.cached_session(): img_np = np.array(data, dtype=nptype).reshape(img_shape) image = constant_op.constant(img_np, shape=img_shape) y = image_ops.resize_images( @@ -3476,7 +3476,7 @@ class ResizeImagesTest(test_util.TensorFlowTestCase, 75, 81, 80, 72, 69, 70, 105, 112, 75, 36, 45, 92, 111, 105 ] - with self.cached_session(use_gpu=True): + with self.cached_session(): image = constant_op.constant(img_np, shape=img_shape) y = image_ops.resize_images(image, [target_height, target_width], image_ops.ResizeMethodV1.BICUBIC) @@ -3499,7 +3499,7 @@ class ResizeImagesTest(test_util.TensorFlowTestCase, 73, 33, 23, 39, 73, 33, 23, 39, 14, 16, 19, 21, 14, 16, 19, 21 ] - with self.cached_session(use_gpu=True): + with self.cached_session(): image = constant_op.constant(img_np, shape=img_shape) y = image_ops.resize_images(image, [target_height, target_width], image_ops.ResizeMethodV1.AREA) @@ -3518,7 +3518,7 @@ class ResizeImagesTest(test_util.TensorFlowTestCase, for align_corners in [True, False]: img_np = np.arange( 0, np.prod(input_shape), dtype=nptype).reshape(input_shape) - with self.cached_session(use_gpu=True): + with self.cached_session(): image = constant_op.constant(img_np, shape=input_shape) new_size = constant_op.constant([target_height, target_width]) out_op = image_ops.resize_images( @@ -3586,7 +3586,7 @@ class ResizeImagesTest(test_util.TensorFlowTestCase, # Testing name scope requires placeholders and a graph. with ops.Graph().as_default(): img_shape = [1, 3, 2, 1] - with self.cached_session(use_gpu=True): + with self.cached_session(): single_image = array_ops.placeholder(dtypes.float32, shape=[50, 60, 3]) y = image_ops.resize_images(single_image, [55, 66]) self.assertTrue(y.op.name.startswith("resize")) @@ -3603,7 +3603,7 @@ class ResizeImagesTest(test_util.TensorFlowTestCase, y = image_ops.resize_images( x_tensor, target_max, preserve_aspect_ratio=preserve_aspect_ratio) - with self.cached_session(use_gpu=True): + with self.cached_session(): return self.evaluate(y) def _assertResizeEqual(self, x, x_shape, y, y_shape, @@ -3687,7 +3687,7 @@ class ResizeImageWithPadV1Test(test_util.TensorFlowTestCase): else: x_tensor = x - with self.cached_session(use_gpu=True): + with self.cached_session(): return self.evaluate( image_ops.resize_image_with_pad_v1(x_tensor, target_height, target_width)) @@ -3807,7 +3807,7 @@ class ResizeImageWithPadV2Test(test_util.TensorFlowTestCase): else: x_tensor = x - with self.cached_session(use_gpu=True): + with self.cached_session(): return self.evaluate( image_ops.resize_image_with_pad_v2(x_tensor, target_height, target_width)) @@ -3929,7 +3929,7 @@ class ResizeImageWithCropOrPadTest(test_util.TensorFlowTestCase): def resize_crop_or_pad(*args): return image_ops.resize_image_with_crop_or_pad(*args) - with self.cached_session(use_gpu=True): + with self.cached_session(): return self.evaluate( resize_crop_or_pad(x_tensor, target_height, target_width)) @@ -4176,7 +4176,7 @@ class JpegTest(test_util.TensorFlowTestCase): # Read a real jpeg and verify shape path = ("tensorflow/core/lib/jpeg/testdata/" "jpeg_merge_test1.jpg") - with self.cached_session(use_gpu=True) as sess: + with self.cached_session() as sess: jpeg0 = io_ops.read_file(path) image0 = image_ops.decode_jpeg(jpeg0) image1 = image_ops.decode_jpeg(image_ops.encode_jpeg(image0)) @@ -4192,7 +4192,7 @@ class JpegTest(test_util.TensorFlowTestCase): cmyk_path = os.path.join(base, "jpeg_merge_test1_cmyk.jpg") shape = 256, 128, 3 for channels in 3, 0: - with self.cached_session(use_gpu=True) as sess: + with self.cached_session() as sess: rgb = image_ops.decode_jpeg( io_ops.read_file(rgb_path), channels=channels) cmyk = image_ops.decode_jpeg( @@ -4248,7 +4248,7 @@ class JpegTest(test_util.TensorFlowTestCase): self.evaluate(result) def testSynthetic(self): - with self.cached_session(use_gpu=True) as sess: + with self.cached_session() as sess: # Encode it, then decode it, then encode it image0 = constant_op.constant(simple_color_ramp()) jpeg0 = image_ops.encode_jpeg(image0) @@ -4269,7 +4269,7 @@ class JpegTest(test_util.TensorFlowTestCase): self.assertLessEqual(len(jpeg0), 6000) def testSyntheticFasterAlgorithm(self): - with self.cached_session(use_gpu=True) as sess: + with self.cached_session() as sess: # Encode it, then decode it, then encode it image0 = constant_op.constant(simple_color_ramp()) jpeg0 = image_ops.encode_jpeg(image0) @@ -4293,7 +4293,7 @@ class JpegTest(test_util.TensorFlowTestCase): self.assertLessEqual(len(jpeg0), 6000) def testDefaultDCTMethodIsIntegerFast(self): - with self.cached_session(use_gpu=True) as sess: + with self.cached_session() as sess: # Compare decoding with both dct_option=INTEGER_FAST and # default. They should be the same. image0 = constant_op.constant(simple_color_ramp()) @@ -4308,7 +4308,7 @@ class JpegTest(test_util.TensorFlowTestCase): def testShape(self): # Shape function requires placeholders and a graph. with ops.Graph().as_default(): - with self.cached_session(use_gpu=True) as sess: + with self.cached_session() as sess: jpeg = constant_op.constant("nonsense") for channels in 0, 1, 3: image = image_ops.decode_jpeg(jpeg, channels=channels) @@ -4319,7 +4319,7 @@ class JpegTest(test_util.TensorFlowTestCase): # Read a real jpeg and verify shape. path = ("tensorflow/core/lib/jpeg/testdata/" "jpeg_merge_test1.jpg") - with self.cached_session(use_gpu=True): + with self.cached_session(): jpeg = io_ops.read_file(path) # Extract shape without decoding. image_shape = self.evaluate(image_ops.extract_jpeg_shape(jpeg)) @@ -4329,7 +4329,7 @@ class JpegTest(test_util.TensorFlowTestCase): # Read a cmyk jpeg image, and verify its shape. path = ("tensorflow/core/lib/jpeg/testdata/" "jpeg_merge_test1_cmyk.jpg") - with self.cached_session(use_gpu=True): + with self.cached_session(): jpeg = io_ops.read_file(path) image_shape = self.evaluate(image_ops.extract_jpeg_shape(jpeg)) # Cmyk jpeg image has 4 channels. @@ -4346,7 +4346,7 @@ class JpegTest(test_util.TensorFlowTestCase): jpeg = io_ops.read_file(path) image = image_ops.decode_jpeg(jpeg) random_jpeg_image = image_ops.random_jpeg_quality(image, 40, 100) - with self.cached_session(use_gpu=True) as sess: + with self.cached_session() as sess: # Test randomization. random_jpeg_images = [sess.run(random_jpeg_image) for _ in range(5)] are_images_equal = [] @@ -4398,11 +4398,11 @@ class JpegTest(test_util.TensorFlowTestCase): image = image_ops.decode_jpeg(jpeg) adjust_jpeg_quality_image = image_ops.adjust_jpeg_quality( image, jpeg_quality) - with self.cached_session(use_gpu=True) as sess: + with self.cached_session() as sess: sess.run(adjust_jpeg_quality_image) def testAdjustJpegQualityShape(self): - with self.cached_session(use_gpu=True): + with self.cached_session(): image = constant_op.constant( np.arange(24, dtype=np.uint8).reshape([2, 4, 3])) adjusted_image = image_ops.adjust_jpeg_quality(image, 80) @@ -4418,7 +4418,7 @@ class PngTest(test_util.TensorFlowTestCase): (3, "lena_palette.png"), (4, "lena_palette_trns.png")) for channels_in, filename in inputs: for channels in 0, 1, 3, 4: - with self.cached_session(use_gpu=True) as sess: + with self.cached_session() as sess: png0 = io_ops.read_file(prefix + filename) image0 = image_ops.decode_png(png0, channels=channels) png0, image0 = self.evaluate([png0, image0]) @@ -4428,7 +4428,7 @@ class PngTest(test_util.TensorFlowTestCase): self.assertAllEqual(image0, self.evaluate(image1)) def testSynthetic(self): - with self.cached_session(use_gpu=True) as sess: + with self.cached_session() as sess: # Encode it, then decode it image0 = constant_op.constant(simple_color_ramp()) png0 = image_ops.encode_png(image0, compression=7) @@ -4443,7 +4443,7 @@ class PngTest(test_util.TensorFlowTestCase): self.assertLessEqual(len(png0), 750) def testSyntheticUint16(self): - with self.cached_session(use_gpu=True) as sess: + with self.cached_session() as sess: # Encode it, then decode it image0 = constant_op.constant(simple_color_ramp(), dtype=dtypes.uint16) png0 = image_ops.encode_png(image0, compression=7) @@ -4458,7 +4458,7 @@ class PngTest(test_util.TensorFlowTestCase): self.assertLessEqual(len(png0), 1500) def testSyntheticTwoChannel(self): - with self.cached_session(use_gpu=True) as sess: + with self.cached_session() as sess: # Strip the b channel from an rgb image to get a two-channel image. gray_alpha = simple_color_ramp()[:, :, 0:2] image0 = constant_op.constant(gray_alpha) @@ -4469,7 +4469,7 @@ class PngTest(test_util.TensorFlowTestCase): self.assertAllEqual(image0, image1) def testSyntheticTwoChannelUint16(self): - with self.cached_session(use_gpu=True) as sess: + with self.cached_session() as sess: # Strip the b channel from an rgb image to get a two-channel image. gray_alpha = simple_color_ramp()[:, :, 0:2] image0 = constant_op.constant(gray_alpha, dtype=dtypes.uint16) @@ -4482,7 +4482,7 @@ class PngTest(test_util.TensorFlowTestCase): def testShape(self): # Shape function requires placeholders and a graph. with ops.Graph().as_default(): - with self.cached_session(use_gpu=True): + with self.cached_session(): png = constant_op.constant("nonsense") for channels in 0, 1, 3: image = image_ops.decode_png(png, channels=channels) @@ -4500,7 +4500,7 @@ class GifTest(test_util.TensorFlowTestCase): STRIDE = 5 shape = (12, HEIGHT, WIDTH, 3) - with self.cached_session(use_gpu=True) as sess: + with self.cached_session() as sess: gif0 = io_ops.read_file(prefix + filename) image0 = image_ops.decode_gif(gif0) gif0, image0 = self.evaluate([gif0, image0]) @@ -4528,14 +4528,14 @@ class GifTest(test_util.TensorFlowTestCase): def testShape(self): # Shape function requires placeholders and a graph. with ops.Graph().as_default(): - with self.cached_session(use_gpu=True) as sess: + with self.cached_session() as sess: gif = constant_op.constant("nonsense") image = image_ops.decode_gif(gif) self.assertEqual(image.get_shape().as_list(), [None, None, None, 3]) def testAnimatedGif(self): # Test if all frames in the animated GIF file is properly decoded. - with self.cached_session(use_gpu=True): + with self.cached_session(): base = "tensorflow/core/lib/gif/testdata" gif = io_ops.read_file(os.path.join(base, "pendulum_sm.gif")) gt_frame0 = io_ops.read_file(os.path.join(base, "pendulum_sm_frame0.png")) @@ -4560,7 +4560,7 @@ class ConvertImageTest(test_util.TensorFlowTestCase): x_np = np.array(original, dtype=original_dtype.as_numpy_dtype()) y_np = np.array(expected, dtype=output_dtype.as_numpy_dtype()) - with self.cached_session(use_gpu=True): + with self.cached_session(): image = constant_op.constant(x_np) y = image_ops.convert_image_dtype(image, output_dtype) self.assertTrue(y.dtype == output_dtype) @@ -4577,7 +4577,7 @@ class ConvertImageTest(test_util.TensorFlowTestCase): # Tests with Tensor.op requires a graph. with ops.Graph().as_default(): # Make sure converting to the same data type creates only an identity op - with self.cached_session(use_gpu=True): + with self.cached_session(): image = constant_op.constant([1], dtype=dtypes.uint8) image_ops.convert_image_dtype(image, dtypes.uint8) y = image_ops.convert_image_dtype(image, dtypes.uint8) @@ -4586,7 +4586,7 @@ class ConvertImageTest(test_util.TensorFlowTestCase): def testConvertBetweenInteger(self): # Make sure converting to between integer types scales appropriately - with self.cached_session(use_gpu=True): + with self.cached_session(): self._convert([0, 255], dtypes.uint8, dtypes.int16, [0, 255 * 128]) self._convert([0, 32767], dtypes.int16, dtypes.uint8, [0, 255]) self._convert([0, 2**32], dtypes.int64, dtypes.int32, [0, 1]) @@ -4594,7 +4594,7 @@ class ConvertImageTest(test_util.TensorFlowTestCase): def testConvertBetweenFloat(self): # Make sure converting to between float types does nothing interesting - with self.cached_session(use_gpu=True): + with self.cached_session(): self._convert([-1.0, 0, 1.0, 200000], dtypes.float32, dtypes.float64, [-1.0, 0, 1.0, 200000]) self._convert([-1.0, 0, 1.0, 200000], dtypes.float64, dtypes.float32, @@ -4602,14 +4602,14 @@ class ConvertImageTest(test_util.TensorFlowTestCase): def testConvertBetweenIntegerAndFloat(self): # Make sure converting from and to a float type scales appropriately - with self.cached_session(use_gpu=True): + with self.cached_session(): self._convert([0, 1, 255], dtypes.uint8, dtypes.float32, [0, 1.0 / 255.0, 1]) self._convert([0, 1.1 / 255.0, 1], dtypes.float32, dtypes.uint8, [0, 1, 255]) def testConvertBetweenInt16AndInt8(self): - with self.cached_session(use_gpu=True): + with self.cached_session(): # uint8, uint16 self._convert([0, 255 * 256], dtypes.uint16, dtypes.uint8, [0, 255]) self._convert([0, 255], dtypes.uint8, dtypes.uint16, [0, 255 * 256]) @@ -4640,7 +4640,7 @@ class TotalVariationTest(test_util.TensorFlowTestCase): """ # Create a TensorFlow session. - with self.cached_session(use_gpu=True): + with self.cached_session(): # Add a constant to the TensorFlow graph that holds the input. x_tf = constant_op.constant(x_np, shape=x_np.shape) @@ -5256,7 +5256,7 @@ class VerifyCompatibleImageShapesTest(test_util.TensorFlowTestCase): img = array_ops.placeholder(dtype=dtypes.float32) img_np = np.array((2, 2)) - with self.cached_session(use_gpu=True) as sess: + with self.cached_session() as sess: _, _, checks = image_ops_impl._verify_compatible_image_shapes(img, img) with self.assertRaises(errors.InvalidArgumentError): sess.run(checks, {img: img_np}) @@ -5270,7 +5270,7 @@ class VerifyCompatibleImageShapesTest(test_util.TensorFlowTestCase): img1_np = np.array([1, 2, 2, 1]) img2_np = np.array([1, 3, 3, 1]) - with self.cached_session(use_gpu=True) as sess: + with self.cached_session() as sess: _, _, checks = image_ops_impl._verify_compatible_image_shapes( img1, img2) with self.assertRaises(errors.InvalidArgumentError): @@ -5289,7 +5289,7 @@ class PSNRTest(test_util.TensorFlowTestCase): return np.expand_dims(im, axis=0) def _LoadTestImages(self): - with self.cached_session(use_gpu=True) as sess: + with self.cached_session() as sess: q20 = self._LoadTestImage(sess, "cat_q20.jpg") q72 = self._LoadTestImage(sess, "cat_q72.jpg") q95 = self._LoadTestImage(sess, "cat_q95.jpg") @@ -5309,7 +5309,7 @@ class PSNRTest(test_util.TensorFlowTestCase): image2 = self._RandomImage((8, 8, 1), 1) psnr = self._PSNR_NumPy(image1, image2, 1) - with self.cached_session(use_gpu=True): + with self.cached_session(): tf_image1 = constant_op.constant(image1, shape=image1.shape, dtype=dtypes.float32) tf_image2 = constant_op.constant(image2, shape=image2.shape, @@ -5322,7 +5322,7 @@ class PSNRTest(test_util.TensorFlowTestCase): image2 = self._RandomImage((10, 8, 8, 1), 1) psnr = self._PSNR_NumPy(image1, image2, 1) - with self.cached_session(use_gpu=True): + with self.cached_session(): tf_image1 = constant_op.constant(image1, shape=image1.shape, dtype=dtypes.float32) tf_image2 = constant_op.constant(image2, shape=image2.shape, @@ -5343,7 +5343,7 @@ class PSNRTest(test_util.TensorFlowTestCase): self.assertNear(35.302, psnr3, 0.001) # Test TensorFlow implementation. - with self.cached_session(use_gpu=True): + with self.cached_session(): tf_q20 = constant_op.constant(q20, shape=q20.shape, dtype=dtypes.float32) tf_q72 = constant_op.constant(q72, shape=q72.shape, dtype=dtypes.float32) tf_q95 = constant_op.constant(q95, shape=q95.shape, dtype=dtypes.float32) @@ -5357,7 +5357,7 @@ class PSNRTest(test_util.TensorFlowTestCase): def testInfinity(self): q20, _, _ = self._LoadTestImages() psnr = self._PSNR_NumPy(q20, q20, 1) - with self.cached_session(use_gpu=True): + with self.cached_session(): tf_q20 = constant_op.constant(q20, shape=q20.shape, dtype=dtypes.float32) tf_psnr = self.evaluate(image_ops.psnr(tf_q20, tf_q20, 1, "psnr")) self.assertAllClose(psnr, tf_psnr, atol=0.001) @@ -5371,7 +5371,7 @@ class PSNRTest(test_util.TensorFlowTestCase): img1 = image_ops.convert_image_dtype(img1, dtypes.float32) img2 = image_ops.convert_image_dtype(img2, dtypes.float32) psnr_float32 = image_ops.psnr(img1, img2, 1.0) - with self.cached_session(use_gpu=True): + with self.cached_session(): self.assertAllClose( self.evaluate(psnr_uint8), self.evaluate(psnr_float32), atol=0.001) @@ -5396,7 +5396,7 @@ class SSIMTest(test_util.TensorFlowTestCase): return np.expand_dims(im, axis=0) def _LoadTestImages(self): - with self.cached_session(use_gpu=True) as sess: + with self.cached_session() as sess: return [self._LoadTestImage(sess, f) for f in self._filenames] def _RandomImage(self, shape, max_val): @@ -5412,7 +5412,7 @@ class SSIMTest(test_util.TensorFlowTestCase): return image_ops.ssim( *x, max_val=1.0, filter_size=11, filter_sigma=1.5, k1=0.01, k2=0.03) - with self.cached_session(use_gpu=True): + with self.cached_session(): scores = [ self.evaluate(ssim_func(t)) for t in itertools.combinations_with_replacement(img, 2) @@ -5436,7 +5436,7 @@ class SSIMTest(test_util.TensorFlowTestCase): filter_sigma=1.5, k1=0.01, k2=0.03) - with self.cached_session(use_gpu=True): + with self.cached_session(): self.assertAllClose(expected, self.evaluate(ssim), atol=1e-4) def testBatchNumpyInputs(self): @@ -5447,7 +5447,7 @@ class SSIMTest(test_util.TensorFlowTestCase): img1 = np.concatenate(img1) img2 = np.concatenate(img2) - with self.cached_session(use_gpu=True): + with self.cached_session(): img1 = self.evaluate(constant_op.constant(img1)) img2 = self.evaluate(constant_op.constant(img2)) @@ -5459,7 +5459,7 @@ class SSIMTest(test_util.TensorFlowTestCase): filter_sigma=1.5, k1=0.01, k2=0.03) - with self.cached_session(use_gpu=True): + with self.cached_session(): self.assertAllClose(expected, self.evaluate(ssim), atol=1e-4) def testBroadcast(self): @@ -5472,7 +5472,7 @@ class SSIMTest(test_util.TensorFlowTestCase): ssim = image_ops.ssim( img1, img2, 1.0, filter_size=11, filter_sigma=1.5, k1=0.01, k2=0.03) - with self.cached_session(use_gpu=True): + with self.cached_session(): self.assertAllClose(expected, self.evaluate(ssim), atol=1e-4) def testNegative(self): @@ -5492,7 +5492,7 @@ class SSIMTest(test_util.TensorFlowTestCase): filter_sigma=1.5, k1=0.01, k2=0.03) - with self.cached_session(use_gpu=True): + with self.cached_session(): self.assertLess(self.evaluate(ssim), 0) def testInt(self): @@ -5506,7 +5506,7 @@ class SSIMTest(test_util.TensorFlowTestCase): img2 = image_ops.convert_image_dtype(img2, dtypes.float32) ssim_float32 = image_ops.ssim( img1, img2, 1.0, filter_size=11, filter_sigma=1.5, k1=0.01, k2=0.03) - with self.cached_session(use_gpu=True): + with self.cached_session(): self.assertAllClose( self.evaluate(ssim_uint8), self.evaluate(ssim_float32), atol=0.001) @@ -5531,7 +5531,7 @@ class MultiscaleSSIMTest(test_util.TensorFlowTestCase): return np.expand_dims(im, axis=0) def _LoadTestImages(self): - with self.cached_session(use_gpu=True) as sess: + with self.cached_session() as sess: return [self._LoadTestImage(sess, f) for f in self._filenames] def _RandomImage(self, shape, max_val): @@ -5550,7 +5550,7 @@ class MultiscaleSSIMTest(test_util.TensorFlowTestCase): return image_ops.ssim_multiscale( *x, max_val=1.0, filter_size=11, filter_sigma=1.5, k1=0.01, k2=0.03) - with self.cached_session(use_gpu=True): + with self.cached_session(): scores = [ self.evaluate(ssim_func(t)) for t in itertools.combinations_with_replacement(img, 2) @@ -5627,7 +5627,7 @@ class MultiscaleSSIMTest(test_util.TensorFlowTestCase): filter_sigma=1.5, k1=0.01, k2=0.03) - with self.cached_session(use_gpu=True): + with self.cached_session(): self.assertAllClose(expected, self.evaluate(msssim), 1e-4) def testBroadcast(self): @@ -5641,7 +5641,7 @@ class MultiscaleSSIMTest(test_util.TensorFlowTestCase): score_tensor = image_ops.ssim_multiscale( img1, img2, 1.0, filter_size=11, filter_sigma=1.5, k1=0.01, k2=0.03) - with self.cached_session(use_gpu=True): + with self.cached_session(): self.assertAllClose(expected, self.evaluate(score_tensor), 1e-4) def testRange(self): @@ -5651,7 +5651,7 @@ class MultiscaleSSIMTest(test_util.TensorFlowTestCase): If any of the value is negative so that the geometric mean is not well-defined, then treat the MS-SSIM score as zero. """ - with self.cached_session(use_gpu=True) as sess: + with self.cached_session() as sess: img1 = self._LoadTestImage(sess, "checkerboard1.png") img2 = self._LoadTestImage(sess, "checkerboard3.png") images = [img1, img2, np.zeros_like(img1), @@ -5680,7 +5680,7 @@ class MultiscaleSSIMTest(test_util.TensorFlowTestCase): img2 = image_ops.convert_image_dtype(img2, dtypes.float32) ssim_float32 = image_ops.ssim_multiscale( img1, img2, 1.0, filter_size=11, filter_sigma=1.5, k1=0.01, k2=0.03) - with self.cached_session(use_gpu=True): + with self.cached_session(): self.assertAllClose( self.evaluate(ssim_uint8), self.evaluate(ssim_float32), atol=0.001) @@ -5688,7 +5688,7 @@ class MultiscaleSSIMTest(test_util.TensorFlowTestCase): """Test case for GitHub issue 28241.""" image = np.random.random([512, 512, 1]) score_tensor = image_ops.ssim_multiscale(image, image, max_val=1.0) - with self.cached_session(use_gpu=True): + with self.cached_session(): _ = self.evaluate(score_tensor) @@ -5728,7 +5728,7 @@ class ImageGradientsTest(test_util.TensorFlowTestCase): batch = constant_op.constant(batch) assert batch.get_shape().as_list() == [2, 2, 3, 2] dy, dx = image_ops.image_gradients(batch) - with self.cached_session(use_gpu=True): + with self.cached_session(): actual_dy = self.evaluate(dy) actual_dx = self.evaluate(dx) self.assertAllClose(expected_dy, actual_dy) @@ -5749,7 +5749,7 @@ class SobelEdgesTest(test_util.TensorFlowTestCase): expected = np.reshape([[[0, 0], [0, 12], [0, 0]], [[0, 0], [0, 12], [0, 0]]], [1, 2, 3, 1, 2]) sobel = image_ops.sobel_edges(img) - with self.cached_session(use_gpu=True): + with self.cached_session(): actual_sobel = self.evaluate(sobel) self.assertAllClose(expected, actual_sobel) @@ -5771,7 +5771,7 @@ class SobelEdgesTest(test_util.TensorFlowTestCase): expected_batch = np.concatenate([expected_two_channel] * batch_size, axis=0) sobel = image_ops.sobel_edges(img) - with self.cached_session(use_gpu=True): + with self.cached_session(): actual_sobel = self.evaluate(sobel) self.assertAllClose(expected_batch, actual_sobel) @@ -5842,7 +5842,7 @@ class DecodeImageTest(test_util.TensorFlowTestCase, parameterized.TestCase): def testJpegUint16(self): for horizon in self._FORWARD_COMPATIBILITY_HORIZONS: with compat.forward_compatibility_horizon(*horizon): - with self.cached_session(use_gpu=True) as sess: + with self.cached_session() as sess: base = "tensorflow/core/lib/jpeg/testdata" jpeg0 = io_ops.read_file(os.path.join(base, "jpeg_merge_test1.jpg")) image0 = image_ops.decode_image(jpeg0, dtype=dtypes.uint16) @@ -5854,7 +5854,7 @@ class DecodeImageTest(test_util.TensorFlowTestCase, parameterized.TestCase): def testPngUint16(self): for horizon in self._FORWARD_COMPATIBILITY_HORIZONS: with compat.forward_compatibility_horizon(*horizon): - with self.cached_session(use_gpu=True) as sess: + with self.cached_session() as sess: base = "tensorflow/core/lib/png/testdata" png0 = io_ops.read_file(os.path.join(base, "lena_rgba.png")) image0 = image_ops.decode_image(png0, dtype=dtypes.uint16) @@ -5873,7 +5873,7 @@ class DecodeImageTest(test_util.TensorFlowTestCase, parameterized.TestCase): def testGifUint16(self): for horizon in self._FORWARD_COMPATIBILITY_HORIZONS: with compat.forward_compatibility_horizon(*horizon): - with self.cached_session(use_gpu=True) as sess: + with self.cached_session() as sess: base = "tensorflow/core/lib/gif/testdata" gif0 = io_ops.read_file(os.path.join(base, "scan.gif")) image0 = image_ops.decode_image(gif0, dtype=dtypes.uint16) @@ -5885,7 +5885,7 @@ class DecodeImageTest(test_util.TensorFlowTestCase, parameterized.TestCase): def testBmpUint16(self): for horizon in self._FORWARD_COMPATIBILITY_HORIZONS: with compat.forward_compatibility_horizon(*horizon): - with self.cached_session(use_gpu=True) as sess: + with self.cached_session() as sess: base = "tensorflow/core/lib/bmp/testdata" bmp0 = io_ops.read_file(os.path.join(base, "lena.bmp")) image0 = image_ops.decode_image(bmp0, dtype=dtypes.uint16) @@ -5897,7 +5897,7 @@ class DecodeImageTest(test_util.TensorFlowTestCase, parameterized.TestCase): def testJpegFloat32(self): for horizon in self._FORWARD_COMPATIBILITY_HORIZONS: with compat.forward_compatibility_horizon(*horizon): - with self.cached_session(use_gpu=True) as sess: + with self.cached_session() as sess: base = "tensorflow/core/lib/jpeg/testdata" jpeg0 = io_ops.read_file(os.path.join(base, "jpeg_merge_test1.jpg")) image0 = image_ops.decode_image(jpeg0, dtype=dtypes.float32) @@ -5909,7 +5909,7 @@ class DecodeImageTest(test_util.TensorFlowTestCase, parameterized.TestCase): def testPngFloat32(self): for horizon in self._FORWARD_COMPATIBILITY_HORIZONS: with compat.forward_compatibility_horizon(*horizon): - with self.cached_session(use_gpu=True) as sess: + with self.cached_session() as sess: base = "tensorflow/core/lib/png/testdata" png0 = io_ops.read_file(os.path.join(base, "lena_rgba.png")) image0 = image_ops.decode_image(png0, dtype=dtypes.float32) @@ -5921,7 +5921,7 @@ class DecodeImageTest(test_util.TensorFlowTestCase, parameterized.TestCase): def testGifFloat32(self): for horizon in self._FORWARD_COMPATIBILITY_HORIZONS: with compat.forward_compatibility_horizon(*horizon): - with self.cached_session(use_gpu=True) as sess: + with self.cached_session() as sess: base = "tensorflow/core/lib/gif/testdata" gif0 = io_ops.read_file(os.path.join(base, "scan.gif")) image0 = image_ops.decode_image(gif0, dtype=dtypes.float32) @@ -5933,7 +5933,7 @@ class DecodeImageTest(test_util.TensorFlowTestCase, parameterized.TestCase): def testBmpFloat32(self): for horizon in self._FORWARD_COMPATIBILITY_HORIZONS: with compat.forward_compatibility_horizon(*horizon): - with self.cached_session(use_gpu=True) as sess: + with self.cached_session() as sess: base = "tensorflow/core/lib/bmp/testdata" bmp0 = io_ops.read_file(os.path.join(base, "lena.bmp")) image0 = image_ops.decode_image(bmp0, dtype=dtypes.float32) @@ -5945,7 +5945,7 @@ class DecodeImageTest(test_util.TensorFlowTestCase, parameterized.TestCase): def testExpandAnimations(self): for horizon in self._FORWARD_COMPATIBILITY_HORIZONS: with compat.forward_compatibility_horizon(*horizon): - with self.cached_session(use_gpu=True) as sess: + with self.cached_session() as sess: base = "tensorflow/core/lib/gif/testdata" gif0 = io_ops.read_file(os.path.join(base, "scan.gif")) diff --git a/tensorflow/python/ops/math_grad_test.py b/tensorflow/python/ops/math_grad_test.py index bbd30ef5537..773084ccdc8 100644 --- a/tensorflow/python/ops/math_grad_test.py +++ b/tensorflow/python/ops/math_grad_test.py @@ -46,7 +46,7 @@ class SquaredDifferenceOpTest(test.TestCase): l = np.random.randn(*left_shape) r = np.random.randn(*right_shape) - with self.cached_session(use_gpu=True): + with self.cached_session(): left_tensor = constant_op.constant(l, shape=left_shape) right_tensor = constant_op.constant(r, shape=right_shape) output = math_ops.squared_difference(left_tensor, right_tensor) @@ -83,7 +83,7 @@ class AbsOpTest(test.TestCase): self._biasedRandN( shape, bias=bias), dtype=dtype) - with self.cached_session(use_gpu=True): + with self.cached_session(): output = math_ops.abs(value) error = gradient_checker.compute_gradient_error( value, shape, output, output.get_shape().as_list()) diff --git a/tensorflow/python/ops/nccl_ops_test.py b/tensorflow/python/ops/nccl_ops_test.py index 5b3e3e68921..b3d88c921e1 100644 --- a/tensorflow/python/ops/nccl_ops_test.py +++ b/tensorflow/python/ops/nccl_ops_test.py @@ -76,7 +76,7 @@ class NcclTestCase(test.TestCase): for dtype in [np.float16, np.float32, np.int32, np.int64, np.float64]: # Create session inside outer loop to test use of # same communicator across multiple sessions. - with self.test_session(use_gpu=True) as sess: + with self.test_session() as sess: for devices in device_sets: shape = (3, 4) diff --git a/tensorflow/python/ops/special_math_ops_test.py b/tensorflow/python/ops/special_math_ops_test.py index ba184b222ca..6caeb6b2a39 100644 --- a/tensorflow/python/ops/special_math_ops_test.py +++ b/tensorflow/python/ops/special_math_ops_test.py @@ -48,7 +48,7 @@ class LBetaTest(test.TestCase): # Should evaluate to 1 and 1/2. x_one = [1, 1.] x_one_half = [2, 1.] - with self.session(use_gpu=True): + with self.session(): self.assertAllClose( 1, self.evaluate(math_ops.exp(special_math_ops.lbeta(x_one)))) self.assertAllClose( @@ -60,7 +60,7 @@ class LBetaTest(test.TestCase): # Should evaluate to 1 and 1/2. x_one = [1, 1.] x_one_half = [2, 1.] - with self.session(use_gpu=True): + with self.session(): ph = array_ops.placeholder(dtypes.float32) beta_ph = math_ops.exp(special_math_ops.lbeta(ph)) self.assertAllClose(1, beta_ph.eval(feed_dict={ph: x_one})) @@ -76,7 +76,7 @@ class LBetaTest(test.TestCase): # = Gamma(1) * Gamma(1) * Gamma(1) * Gamma(1) / Gamma(1 + 1 + 1 + 1) # = 1 / 6 expected_beta_x = 1 / 6 * np.ones((3, 2, 3)) - with self.session(use_gpu=True): + with self.session(): x_ph = array_ops.placeholder(dtypes.float32, [3, 2, 3, None]) beta_ph = math_ops.exp(special_math_ops.lbeta(x_ph)) self.assertAllClose(expected_beta_x, @@ -86,7 +86,7 @@ class LBetaTest(test.TestCase): def test_two_dimensional_arg(self): # Should evaluate to 1/2. x_one_half = [[2, 1.], [2, 1.]] - with self.session(use_gpu=True): + with self.session(): self.assertAllClose( [0.5, 0.5], self.evaluate(math_ops.exp(special_math_ops.lbeta(x_one_half)))) @@ -96,7 +96,7 @@ class LBetaTest(test.TestCase): def test_two_dimensional_arg_dynamic(self): # Should evaluate to 1/2. x_one_half = [[2, 1.], [2, 1.]] - with self.session(use_gpu=True): + with self.session(): ph = array_ops.placeholder(dtypes.float32) beta_ph = math_ops.exp(special_math_ops.lbeta(ph)) self.assertAllClose([0.5, 0.5], @@ -106,7 +106,7 @@ class LBetaTest(test.TestCase): def test_two_dimensional_proper_shape(self): # Should evaluate to 1/2. x_one_half = [[2, 1.], [2, 1.]] - with self.session(use_gpu=True): + with self.session(): self.assertAllClose( [0.5, 0.5], self.evaluate(math_ops.exp(special_math_ops.lbeta(x_one_half)))) @@ -119,7 +119,7 @@ class LBetaTest(test.TestCase): @test_util.run_in_graph_and_eager_modes def test_complicated_shape(self): - with self.session(use_gpu=True): + with self.session(): x = ops.convert_to_tensor(np.random.rand(3, 2, 2)) self.assertAllEqual( (3, 2), self.evaluate(array_ops.shape(special_math_ops.lbeta(x)))) @@ -133,7 +133,7 @@ class LBetaTest(test.TestCase): # as the answer, always. x_a = [5.5] x_b = [0.1] - with self.session(use_gpu=True): + with self.session(): self.assertAllClose( 1, self.evaluate(math_ops.exp(special_math_ops.lbeta(x_a))), @@ -144,7 +144,7 @@ class LBetaTest(test.TestCase): @test_util.run_in_graph_and_eager_modes def test_empty_rank1_returns_negative_infinity(self): - with self.session(use_gpu=True): + with self.session(): x = constant_op.constant([], shape=[0]) lbeta_x = special_math_ops.lbeta(x) expected_result = constant_op.constant(-np.inf, shape=()) @@ -155,7 +155,7 @@ class LBetaTest(test.TestCase): @test_util.run_in_graph_and_eager_modes def test_empty_rank2_with_zero_last_dim_returns_negative_infinity(self): - with self.session(use_gpu=True): + with self.session(): event_size = 0 for batch_size in [0, 1, 2]: x = constant_op.constant([], shape=[batch_size, event_size]) @@ -168,7 +168,7 @@ class LBetaTest(test.TestCase): @test_util.run_in_graph_and_eager_modes def test_empty_rank2_with_zero_batch_dim_returns_empty(self): - with self.session(use_gpu=True): + with self.session(): batch_size = 0 for event_size in [0, 1, 2]: x = constant_op.constant([], shape=[batch_size, event_size]) diff --git a/tensorflow/python/ops/v1_compat_tests/gradient_checker_test.py b/tensorflow/python/ops/v1_compat_tests/gradient_checker_test.py index 7ecad0a2a8e..607af4712cd 100644 --- a/tensorflow/python/ops/v1_compat_tests/gradient_checker_test.py +++ b/tensorflow/python/ops/v1_compat_tests/gradient_checker_test.py @@ -65,7 +65,7 @@ class GradientCheckerTest(test.TestCase): @test_util.run_deprecated_v1 def testAddSimpleGPU(self): np.random.seed(2) # Fix seed to avoid flakiness - with self.session(use_gpu=True): + with self.session(): # a test case for Add operation size = (2, 3) x1 = constant_op.constant(2.0, shape=size, name="x1") @@ -225,7 +225,7 @@ class MiniMNISTTest(test.TestCase): s = label_data.sum(axis=1) label_data /= s[:, None] - with self.session(use_gpu=True): + with self.session(): # We treat the inputs as "parameters" here inp = constant_op.constant( inp_data.tolist(),