Remove redundant use_gpu=True params
use_gpu is True by default in test utils starting CL 356906251 I will wait a bit before checking this in since once this is checked in, it would be harder to roll back CL 356906251 PiperOrigin-RevId: 357322055 Change-Id: Ibbeb900d93f9fb43c2dc61285ee38e582b29dcfc
This commit is contained in:
parent
8a1c8335ed
commit
5bfc37ef25
@ -462,7 +462,7 @@ class FunctionTest(test.TestCase):
|
||||
|
||||
@test_util.run_deprecated_v1
|
||||
def testWhileLoopCallsFunc(self):
|
||||
with self.session(use_gpu=True) as sess:
|
||||
with self.session() as sess:
|
||||
|
||||
@function.Defun(dtypes.float32)
|
||||
def Times2(x):
|
||||
|
@ -2289,7 +2289,7 @@ class TensorFlowTestCase(googletest.TestCase):
|
||||
``` python
|
||||
class MyOperatorTest(test_util.TensorFlowTestCase):
|
||||
def testMyOperator(self):
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
valid_input = [1.0, 2.0, 3.0, 4.0, 5.0]
|
||||
result = MyOperator(valid_input).eval()
|
||||
self.assertEqual(result, [1.0, 2.0, 3.0, 5.0, 8.0]
|
||||
@ -2339,7 +2339,7 @@ class TensorFlowTestCase(googletest.TestCase):
|
||||
```python
|
||||
class MyOperatorTest(test_util.TensorFlowTestCase):
|
||||
def testMyOperator(self):
|
||||
with self.cached_session(use_gpu=True) as sess:
|
||||
with self.cached_session() as sess:
|
||||
valid_input = [1.0, 2.0, 3.0, 4.0, 5.0]
|
||||
result = MyOperator(valid_input).eval()
|
||||
self.assertEqual(result, [1.0, 2.0, 3.0, 5.0, 8.0]
|
||||
|
@ -420,7 +420,7 @@ def run_all_keras_modes(test_or_class=None,
|
||||
def _v1_session_test(f, test_or_class, config, *args, **kwargs):
|
||||
with ops.get_default_graph().as_default():
|
||||
with testing_utils.run_eagerly_scope(False):
|
||||
with test_or_class.test_session(use_gpu=True, config=config):
|
||||
with test_or_class.test_session(config=config):
|
||||
f(test_or_class, *args, **kwargs)
|
||||
|
||||
|
||||
|
@ -42,7 +42,7 @@ class Conv1DTest(keras_parameterized.TestCase):
|
||||
stack_size = 3
|
||||
length = 7
|
||||
|
||||
with self.cached_session(use_gpu=True):
|
||||
with self.cached_session():
|
||||
testing_utils.layer_test(
|
||||
keras.layers.Conv1D,
|
||||
kwargs=kwargs,
|
||||
@ -54,7 +54,7 @@ class Conv1DTest(keras_parameterized.TestCase):
|
||||
stack_size = 3
|
||||
length = 7
|
||||
|
||||
with self.cached_session(use_gpu=True):
|
||||
with self.cached_session():
|
||||
if expected_output_shape is not None:
|
||||
expected_output_shape = (None,) + expected_output_shape
|
||||
|
||||
@ -112,7 +112,7 @@ class Conv1DTest(keras_parameterized.TestCase):
|
||||
'activity_regularizer': 'l2',
|
||||
'strides': 1
|
||||
}
|
||||
with self.cached_session(use_gpu=True):
|
||||
with self.cached_session():
|
||||
layer = keras.layers.Conv1D(**kwargs)
|
||||
layer.build((None, 5, 2))
|
||||
self.assertEqual(len(layer.losses), 2)
|
||||
@ -131,14 +131,14 @@ class Conv1DTest(keras_parameterized.TestCase):
|
||||
'bias_constraint': b_constraint,
|
||||
'strides': 1
|
||||
}
|
||||
with self.cached_session(use_gpu=True):
|
||||
with self.cached_session():
|
||||
layer = keras.layers.Conv1D(**kwargs)
|
||||
layer.build((None, 5, 2))
|
||||
self.assertEqual(layer.kernel.constraint, k_constraint)
|
||||
self.assertEqual(layer.bias.constraint, b_constraint)
|
||||
|
||||
def test_conv1d_recreate_conv(self):
|
||||
with self.cached_session(use_gpu=True):
|
||||
with self.cached_session():
|
||||
layer = keras.layers.Conv1D(filters=1,
|
||||
kernel_size=3,
|
||||
strides=1,
|
||||
@ -151,7 +151,7 @@ class Conv1DTest(keras_parameterized.TestCase):
|
||||
self.assertEqual(outp1_shape, layer(inpt1).shape)
|
||||
|
||||
def test_conv1d_recreate_conv_unknown_dims(self):
|
||||
with self.cached_session(use_gpu=True):
|
||||
with self.cached_session():
|
||||
layer = keras.layers.Conv1D(filters=1,
|
||||
kernel_size=3,
|
||||
strides=1,
|
||||
@ -184,7 +184,7 @@ class Conv2DTest(keras_parameterized.TestCase):
|
||||
input_data_shape = (num_samples, num_row or 7, num_col or 6, stack_size)
|
||||
input_data = 10 * np.random.random(input_data_shape).astype(np.float32)
|
||||
|
||||
with self.cached_session(use_gpu=True):
|
||||
with self.cached_session():
|
||||
testing_utils.layer_test(
|
||||
keras.layers.Conv2D,
|
||||
kwargs=kwargs,
|
||||
@ -205,7 +205,7 @@ class Conv2DTest(keras_parameterized.TestCase):
|
||||
input_data_shape = batch_shape + (num_row or 7, num_col or 6, stack_size)
|
||||
input_data = 10 * np.random.random(input_data_shape).astype(np.float32)
|
||||
|
||||
with self.cached_session(use_gpu=True):
|
||||
with self.cached_session():
|
||||
if expected_output_shape is not None:
|
||||
expected_output_shape = (None,) + expected_output_shape
|
||||
testing_utils.layer_test(
|
||||
@ -272,7 +272,7 @@ class Conv2DTest(keras_parameterized.TestCase):
|
||||
'activity_regularizer': 'l2',
|
||||
'strides': 1
|
||||
}
|
||||
with self.cached_session(use_gpu=True):
|
||||
with self.cached_session():
|
||||
layer = keras.layers.Conv2D(**kwargs)
|
||||
layer.build((None, 5, 5, 2))
|
||||
self.assertEqual(len(layer.losses), 2)
|
||||
@ -291,7 +291,7 @@ class Conv2DTest(keras_parameterized.TestCase):
|
||||
'bias_constraint': b_constraint,
|
||||
'strides': 1
|
||||
}
|
||||
with self.cached_session(use_gpu=True):
|
||||
with self.cached_session():
|
||||
layer = keras.layers.Conv2D(**kwargs)
|
||||
layer.build((None, 5, 5, 2))
|
||||
self.assertEqual(layer.kernel.constraint, k_constraint)
|
||||
@ -313,7 +313,7 @@ class Conv3DTest(keras_parameterized.TestCase):
|
||||
num_col = 6
|
||||
depth = 5
|
||||
|
||||
with self.cached_session(use_gpu=True):
|
||||
with self.cached_session():
|
||||
testing_utils.layer_test(
|
||||
keras.layers.Conv3D,
|
||||
kwargs=kwargs,
|
||||
@ -331,7 +331,7 @@ class Conv3DTest(keras_parameterized.TestCase):
|
||||
num_col = 6
|
||||
depth = 5
|
||||
|
||||
with self.cached_session(use_gpu=True):
|
||||
with self.cached_session():
|
||||
if expected_output_shape is not None:
|
||||
expected_output_shape = (None,) + expected_output_shape
|
||||
|
||||
@ -387,7 +387,7 @@ class Conv3DTest(keras_parameterized.TestCase):
|
||||
'activity_regularizer': 'l2',
|
||||
'strides': 1
|
||||
}
|
||||
with self.cached_session(use_gpu=True):
|
||||
with self.cached_session():
|
||||
layer = keras.layers.Conv3D(**kwargs)
|
||||
layer.build((None, 5, 5, 5, 2))
|
||||
self.assertEqual(len(layer.losses), 2)
|
||||
@ -407,7 +407,7 @@ class Conv3DTest(keras_parameterized.TestCase):
|
||||
'bias_constraint': b_constraint,
|
||||
'strides': 1
|
||||
}
|
||||
with self.cached_session(use_gpu=True):
|
||||
with self.cached_session():
|
||||
layer = keras.layers.Conv3D(**kwargs)
|
||||
layer.build((None, 5, 5, 5, 2))
|
||||
self.assertEqual(layer.kernel.constraint, k_constraint)
|
||||
@ -415,7 +415,7 @@ class Conv3DTest(keras_parameterized.TestCase):
|
||||
|
||||
def test_conv3d_dynamic_shape(self):
|
||||
input_data = np.random.random((1, 3, 3, 3, 3)).astype(np.float32)
|
||||
with self.cached_session(use_gpu=True):
|
||||
with self.cached_session():
|
||||
# Won't raise error here.
|
||||
testing_utils.layer_test(
|
||||
keras.layers.Conv3D,
|
||||
@ -564,7 +564,7 @@ class ConvSequentialTest(keras_parameterized.TestCase):
|
||||
kwargs['filters'] = 1
|
||||
kwargs['kernel_size'] = 3
|
||||
kwargs['dilation_rate'] = 2
|
||||
with self.cached_session(use_gpu=True):
|
||||
with self.cached_session():
|
||||
layer = conv_layer_cls(**kwargs)
|
||||
output1 = layer(np.zeros(input_shape1))
|
||||
self.assertEqual(output1.shape, expected_output_shape1)
|
||||
@ -607,7 +607,7 @@ class ConvSequentialTest(keras_parameterized.TestCase):
|
||||
expected_output_shape1, expected_output_shape2)
|
||||
|
||||
def test_dynamic_shape(self):
|
||||
with self.cached_session(use_gpu=True):
|
||||
with self.cached_session():
|
||||
layer = keras.layers.Conv3D(2, 3)
|
||||
input_shape = (5, None, None, 2)
|
||||
inputs = keras.Input(shape=input_shape)
|
||||
@ -626,7 +626,7 @@ class ZeroPaddingTest(keras_parameterized.TestCase):
|
||||
shape = (num_samples, num_steps, input_dim)
|
||||
inputs = np.ones(shape)
|
||||
|
||||
with self.cached_session(use_gpu=True):
|
||||
with self.cached_session():
|
||||
# basic test
|
||||
testing_utils.layer_test(
|
||||
keras.layers.ZeroPadding1D,
|
||||
@ -682,7 +682,7 @@ class ZeroPaddingTest(keras_parameterized.TestCase):
|
||||
inputs = np.ones((num_samples, input_num_row, input_num_col, stack_size))
|
||||
|
||||
# basic test
|
||||
with self.cached_session(use_gpu=True):
|
||||
with self.cached_session():
|
||||
testing_utils.layer_test(
|
||||
keras.layers.ZeroPadding2D,
|
||||
kwargs={
|
||||
@ -699,7 +699,7 @@ class ZeroPaddingTest(keras_parameterized.TestCase):
|
||||
input_shape=inputs.shape)
|
||||
|
||||
# correctness test
|
||||
with self.cached_session(use_gpu=True):
|
||||
with self.cached_session():
|
||||
layer = keras.layers.ZeroPadding2D(
|
||||
padding=(2, 2), data_format=data_format)
|
||||
layer.build(inputs.shape)
|
||||
@ -770,7 +770,7 @@ class ZeroPaddingTest(keras_parameterized.TestCase):
|
||||
inputs = np.ones((num_samples, input_len_dim1, input_len_dim2,
|
||||
input_len_dim3, stack_size))
|
||||
|
||||
with self.cached_session(use_gpu=True):
|
||||
with self.cached_session():
|
||||
# basic test
|
||||
testing_utils.layer_test(
|
||||
keras.layers.ZeroPadding3D,
|
||||
@ -787,7 +787,7 @@ class ZeroPaddingTest(keras_parameterized.TestCase):
|
||||
},
|
||||
input_shape=inputs.shape)
|
||||
|
||||
with self.cached_session(use_gpu=True):
|
||||
with self.cached_session():
|
||||
# correctness test
|
||||
layer = keras.layers.ZeroPadding3D(
|
||||
padding=(2, 2, 2), data_format=data_format)
|
||||
@ -856,7 +856,7 @@ class ZeroPaddingTest(keras_parameterized.TestCase):
|
||||
class UpSamplingTest(keras_parameterized.TestCase):
|
||||
|
||||
def test_upsampling_1d(self):
|
||||
with self.cached_session(use_gpu=True):
|
||||
with self.cached_session():
|
||||
testing_utils.layer_test(
|
||||
keras.layers.UpSampling1D, kwargs={'size': 2}, input_shape=(3, 5, 4))
|
||||
|
||||
@ -875,7 +875,7 @@ class UpSamplingTest(keras_parameterized.TestCase):
|
||||
stack_size)
|
||||
|
||||
# basic test
|
||||
with self.cached_session(use_gpu=True):
|
||||
with self.cached_session():
|
||||
testing_utils.layer_test(
|
||||
keras.layers.UpSampling2D,
|
||||
kwargs={'size': (2, 2),
|
||||
@ -960,7 +960,7 @@ class UpSamplingTest(keras_parameterized.TestCase):
|
||||
input_len_dim3, stack_size)
|
||||
|
||||
# basic test
|
||||
with self.cached_session(use_gpu=True):
|
||||
with self.cached_session():
|
||||
testing_utils.layer_test(
|
||||
keras.layers.UpSampling3D,
|
||||
kwargs={'size': (2, 2, 2),
|
||||
@ -1010,7 +1010,7 @@ class CroppingTest(keras_parameterized.TestCase):
|
||||
input_len_dim1 = 2
|
||||
inputs = np.random.rand(num_samples, time_length, input_len_dim1)
|
||||
|
||||
with self.cached_session(use_gpu=True):
|
||||
with self.cached_session():
|
||||
testing_utils.layer_test(
|
||||
keras.layers.Cropping1D,
|
||||
kwargs={'cropping': (2, 2)},
|
||||
@ -1036,7 +1036,7 @@ class CroppingTest(keras_parameterized.TestCase):
|
||||
else:
|
||||
inputs = np.random.rand(num_samples, input_len_dim1, input_len_dim2,
|
||||
stack_size)
|
||||
with self.cached_session(use_gpu=True):
|
||||
with self.cached_session():
|
||||
# basic test
|
||||
testing_utils.layer_test(
|
||||
keras.layers.Cropping2D,
|
||||
@ -1069,7 +1069,7 @@ class CroppingTest(keras_parameterized.TestCase):
|
||||
inputs = np.random.rand(num_samples, input_len_dim1, input_len_dim2,
|
||||
stack_size)
|
||||
# another correctness test (no cropping)
|
||||
with self.cached_session(use_gpu=True):
|
||||
with self.cached_session():
|
||||
cropping = ((0, 0), (0, 0))
|
||||
layer = keras.layers.Cropping2D(
|
||||
cropping=cropping, data_format=data_format)
|
||||
@ -1105,7 +1105,7 @@ class CroppingTest(keras_parameterized.TestCase):
|
||||
inputs = np.random.rand(num_samples, input_len_dim1, input_len_dim2,
|
||||
input_len_dim3, stack_size)
|
||||
# basic test
|
||||
with self.cached_session(use_gpu=True):
|
||||
with self.cached_session():
|
||||
testing_utils.layer_test(
|
||||
keras.layers.Cropping3D,
|
||||
kwargs={'cropping': cropping,
|
||||
@ -1114,7 +1114,7 @@ class CroppingTest(keras_parameterized.TestCase):
|
||||
|
||||
if len(croppings) == 3 and len(croppings[0]) == 2:
|
||||
# correctness test
|
||||
with self.cached_session(use_gpu=True):
|
||||
with self.cached_session():
|
||||
layer = keras.layers.Cropping3D(
|
||||
cropping=cropping, data_format=data_format)
|
||||
layer.build(inputs.shape)
|
||||
@ -1152,7 +1152,7 @@ class DepthwiseConv2DTest(keras_parameterized.TestCase):
|
||||
num_row = 7
|
||||
num_col = 6
|
||||
|
||||
with self.cached_session(use_gpu=True):
|
||||
with self.cached_session():
|
||||
testing_utils.layer_test(
|
||||
keras.layers.DepthwiseConv2D,
|
||||
kwargs=kwargs,
|
||||
|
@ -36,7 +36,7 @@ class Conv2DTransposeTest(keras_parameterized.TestCase):
|
||||
num_row = 7
|
||||
num_col = 6
|
||||
|
||||
with self.cached_session(use_gpu=True):
|
||||
with self.cached_session():
|
||||
testing_utils.layer_test(
|
||||
keras.layers.Conv2DTranspose,
|
||||
kwargs=kwargs,
|
||||
@ -67,7 +67,7 @@ class Conv2DTransposeTest(keras_parameterized.TestCase):
|
||||
'activity_regularizer': 'l2',
|
||||
'strides': 1
|
||||
}
|
||||
with self.cached_session(use_gpu=True):
|
||||
with self.cached_session():
|
||||
layer = keras.layers.Conv2DTranspose(**kwargs)
|
||||
layer.build((None, 5, 5, 2))
|
||||
self.assertEqual(len(layer.losses), 2)
|
||||
@ -86,7 +86,7 @@ class Conv2DTransposeTest(keras_parameterized.TestCase):
|
||||
'bias_constraint': b_constraint,
|
||||
'strides': 1
|
||||
}
|
||||
with self.cached_session(use_gpu=True):
|
||||
with self.cached_session():
|
||||
layer = keras.layers.Conv2DTranspose(**kwargs)
|
||||
layer.build((None, 5, 5, 2))
|
||||
self.assertEqual(layer.kernel.constraint, k_constraint)
|
||||
@ -127,7 +127,7 @@ class Conv3DTransposeTest(keras_parameterized.TestCase):
|
||||
num_col = 6
|
||||
depth = 5
|
||||
|
||||
with self.cached_session(use_gpu=True):
|
||||
with self.cached_session():
|
||||
testing_utils.layer_test(
|
||||
keras.layers.Conv3DTranspose,
|
||||
kwargs=kwargs,
|
||||
@ -159,7 +159,7 @@ class Conv3DTransposeTest(keras_parameterized.TestCase):
|
||||
'activity_regularizer': 'l2',
|
||||
'strides': 1
|
||||
}
|
||||
with self.cached_session(use_gpu=True):
|
||||
with self.cached_session():
|
||||
layer = keras.layers.Conv3DTranspose(**kwargs)
|
||||
layer.build((None, 5, 5, 5, 2))
|
||||
self.assertEqual(len(layer.losses), 2)
|
||||
@ -178,7 +178,7 @@ class Conv3DTransposeTest(keras_parameterized.TestCase):
|
||||
'bias_constraint': b_constraint,
|
||||
'strides': 1
|
||||
}
|
||||
with self.cached_session(use_gpu=True):
|
||||
with self.cached_session():
|
||||
layer = keras.layers.Conv3DTranspose(**kwargs)
|
||||
layer.build((None, 5, 5, 5, 2))
|
||||
self.assertEqual(layer.kernel.constraint, k_constraint)
|
||||
@ -186,7 +186,7 @@ class Conv3DTransposeTest(keras_parameterized.TestCase):
|
||||
|
||||
def test_conv3d_transpose_dynamic_shape(self):
|
||||
input_data = np.random.random((1, 3, 3, 3, 3)).astype(np.float32)
|
||||
with self.cached_session(use_gpu=True):
|
||||
with self.cached_session():
|
||||
# Won't raise error here.
|
||||
testing_utils.layer_test(
|
||||
keras.layers.Conv3DTranspose,
|
||||
|
@ -205,7 +205,7 @@ class CuDNNGraphOnlyTest(keras_parameterized.TestCase):
|
||||
units = 2
|
||||
num_samples = 32
|
||||
|
||||
with self.cached_session(use_gpu=True):
|
||||
with self.cached_session():
|
||||
model = keras.models.Sequential()
|
||||
model.add(
|
||||
keras.layers.Embedding(
|
||||
|
@ -104,7 +104,7 @@ class BatchNormalizationTest(keras_parameterized.TestCase):
|
||||
@keras_parameterized.run_all_keras_modes
|
||||
def test_batchnorm_convnet(self):
|
||||
if test.is_gpu_available(cuda_only=True):
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
model = keras.models.Sequential()
|
||||
norm = keras.layers.BatchNormalization(
|
||||
axis=1, input_shape=(3, 4, 4), momentum=0.8)
|
||||
|
@ -412,7 +412,7 @@ class RandomFlipTest(keras_parameterized.TestCase):
|
||||
mock_random = np.reshape(mock_random, [2, 1, 1, 1])
|
||||
with test.mock.patch.object(
|
||||
random_ops, 'random_uniform', return_value=mock_random):
|
||||
with self.cached_session(use_gpu=True):
|
||||
with self.cached_session():
|
||||
layer = image_preprocessing.RandomFlip()
|
||||
actual_output = layer(input_images, training=1)
|
||||
self.assertAllClose(expected_output, actual_output)
|
||||
@ -698,7 +698,7 @@ class RandomTransformTest(keras_parameterized.TestCase):
|
||||
fill_value=0.0,
|
||||
interpolation='bilinear'):
|
||||
inp = np.arange(15).reshape((1, 5, 3, 1)).astype(np.float32)
|
||||
with self.cached_session(use_gpu=True):
|
||||
with self.cached_session():
|
||||
output = image_preprocessing.transform(
|
||||
inp,
|
||||
transform_matrix,
|
||||
|
@ -35,7 +35,7 @@ class SeparableConv1DTest(keras_parameterized.TestCase):
|
||||
stack_size = 3
|
||||
length = 7
|
||||
|
||||
with self.cached_session(use_gpu=True):
|
||||
with self.cached_session():
|
||||
testing_utils.layer_test(
|
||||
keras.layers.SeparableConv1D,
|
||||
kwargs=kwargs,
|
||||
@ -66,7 +66,7 @@ class SeparableConv1DTest(keras_parameterized.TestCase):
|
||||
'activity_regularizer': 'l2',
|
||||
'strides': 1
|
||||
}
|
||||
with self.cached_session(use_gpu=True):
|
||||
with self.cached_session():
|
||||
layer = keras.layers.SeparableConv1D(**kwargs)
|
||||
layer.build((None, 5, 2))
|
||||
self.assertEqual(len(layer.losses), 3)
|
||||
@ -87,7 +87,7 @@ class SeparableConv1DTest(keras_parameterized.TestCase):
|
||||
'bias_constraint': b_constraint,
|
||||
'strides': 1
|
||||
}
|
||||
with self.cached_session(use_gpu=True):
|
||||
with self.cached_session():
|
||||
layer = keras.layers.SeparableConv1D(**kwargs)
|
||||
layer.build((None, 5, 2))
|
||||
self.assertEqual(layer.depthwise_kernel.constraint, d_constraint)
|
||||
@ -104,7 +104,7 @@ class SeparableConv2DTest(keras_parameterized.TestCase):
|
||||
num_row = 7
|
||||
num_col = 6
|
||||
|
||||
with self.cached_session(use_gpu=True):
|
||||
with self.cached_session():
|
||||
testing_utils.layer_test(
|
||||
keras.layers.SeparableConv2D,
|
||||
kwargs=kwargs,
|
||||
@ -138,7 +138,7 @@ class SeparableConv2DTest(keras_parameterized.TestCase):
|
||||
'activity_regularizer': 'l2',
|
||||
'strides': 1
|
||||
}
|
||||
with self.cached_session(use_gpu=True):
|
||||
with self.cached_session():
|
||||
layer = keras.layers.SeparableConv2D(**kwargs)
|
||||
layer.build((None, 5, 5, 2))
|
||||
self.assertEqual(len(layer.losses), 3)
|
||||
@ -159,7 +159,7 @@ class SeparableConv2DTest(keras_parameterized.TestCase):
|
||||
'bias_constraint': b_constraint,
|
||||
'strides': 1
|
||||
}
|
||||
with self.cached_session(use_gpu=True):
|
||||
with self.cached_session():
|
||||
layer = keras.layers.SeparableConv2D(**kwargs)
|
||||
layer.build((None, 5, 5, 2))
|
||||
self.assertEqual(layer.depthwise_kernel.constraint, d_constraint)
|
||||
|
@ -407,7 +407,7 @@ class BNTest(test.TestCase):
|
||||
training = array_ops.placeholder(dtype='bool')
|
||||
outputs = bn.apply(inputs, training=training)
|
||||
|
||||
with self.session(use_gpu=True) as sess:
|
||||
with self.session() as sess:
|
||||
# Test training with placeholder learning phase.
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
np_gamma, np_beta = self.evaluate([bn.gamma, bn.beta])
|
||||
@ -898,7 +898,7 @@ class BNTest(test.TestCase):
|
||||
moving_stddev = 1.
|
||||
renorm_mean = 0.
|
||||
renorm_stddev = 1.
|
||||
with self.session(use_gpu=True) as sess:
|
||||
with self.session() as sess:
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
for _ in range(5):
|
||||
x = np.random.random(shape)
|
||||
@ -948,7 +948,7 @@ class BNTest(test.TestCase):
|
||||
moving_stddev = 1.
|
||||
renorm_mean = 0.
|
||||
renorm_stddev = 1.
|
||||
with self.session(use_gpu=True) as sess:
|
||||
with self.session() as sess:
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
for step in range(6):
|
||||
x = np.random.random(shape)
|
||||
@ -1002,7 +1002,7 @@ class BNTest(test.TestCase):
|
||||
|
||||
moving_mean = 0.
|
||||
moving_variance = 1.
|
||||
with self.session(use_gpu=True) as sess:
|
||||
with self.session() as sess:
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
for _ in range(5):
|
||||
x = np.random.random(shape)
|
||||
@ -1055,7 +1055,7 @@ class BNTest(test.TestCase):
|
||||
moving_stddev = 1.
|
||||
renorm_mean = 0.
|
||||
renorm_stddev = 1.
|
||||
with self.session(use_gpu=True) as sess:
|
||||
with self.session() as sess:
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
for _ in range(5):
|
||||
x = np.random.random(shape)
|
||||
@ -1101,7 +1101,7 @@ class BNTest(test.TestCase):
|
||||
self.assertListEqual(
|
||||
out1.shape.as_list(), out2.shape.as_list())
|
||||
|
||||
with self.session(use_gpu=True) as sess:
|
||||
with self.session() as sess:
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
|
||||
x = np.random.random(shape)
|
||||
@ -1123,7 +1123,7 @@ class BNTest(test.TestCase):
|
||||
out = normalization_layers.batch_normalization(
|
||||
inp, virtual_batch_size=2)
|
||||
|
||||
with self.session(use_gpu=True) as sess:
|
||||
with self.session() as sess:
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
|
||||
x = np.random.random(np_shape)
|
||||
@ -1154,7 +1154,7 @@ class BNTest(test.TestCase):
|
||||
shape[0] // virtual_batch_size,
|
||||
shape[1]])
|
||||
|
||||
with self.session(use_gpu=True) as sess:
|
||||
with self.session() as sess:
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
for _ in range(5):
|
||||
x = np.random.random(shape)
|
||||
@ -1207,7 +1207,7 @@ class BNTest(test.TestCase):
|
||||
ghost_shape = ([virtual_batch_size, shape[0] // virtual_batch_size] +
|
||||
shape[1:])
|
||||
|
||||
with self.session(use_gpu=True) as sess:
|
||||
with self.session() as sess:
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
for _ in range(5):
|
||||
x = np.random.random(shape)
|
||||
@ -1261,7 +1261,7 @@ class BNTest(test.TestCase):
|
||||
ghost_shape = ([virtual_batch_size, shape[0] // virtual_batch_size] +
|
||||
shape[1:])
|
||||
|
||||
with self.session(use_gpu=True) as sess:
|
||||
with self.session() as sess:
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
for _ in range(5):
|
||||
x = np.random.random(shape)
|
||||
@ -1413,7 +1413,7 @@ class BNTest(test.TestCase):
|
||||
ghost_shape = ([virtual_batch_size, shape[0] // virtual_batch_size] +
|
||||
shape[1:])
|
||||
|
||||
with self.session(use_gpu=True) as sess:
|
||||
with self.session() as sess:
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
for _ in range(5):
|
||||
x = np.random.random(shape)
|
||||
|
@ -113,7 +113,7 @@ class AdamOptimizerTest(test.TestCase, parameterized.TestCase):
|
||||
def testSparse(self):
|
||||
# TODO(tanzheny, omalleyt): Fix test in eager mode.
|
||||
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
|
||||
with ops.Graph().as_default(), self.cached_session(use_gpu=True):
|
||||
with ops.Graph().as_default(), self.cached_session():
|
||||
# Initialize variables for numpy implementation.
|
||||
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
|
||||
var0_np = np.array([1.0, 1.0, 2.0], dtype=dtype.as_numpy_dtype)
|
||||
@ -203,7 +203,7 @@ class AdamOptimizerTest(test.TestCase, parameterized.TestCase):
|
||||
|
||||
def doTestBasic(self, use_callable_params=False):
|
||||
for i, dtype in enumerate([dtypes.half, dtypes.float32, dtypes.float64]):
|
||||
with self.cached_session(use_gpu=True):
|
||||
with self.cached_session():
|
||||
# Initialize variables for numpy implementation.
|
||||
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
|
||||
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
|
||||
@ -261,7 +261,7 @@ class AdamOptimizerTest(test.TestCase, parameterized.TestCase):
|
||||
@combinations.generate(combinations.combine(mode=["graph", "eager"]))
|
||||
def testBasicWithAmsgrad(self):
|
||||
for i, dtype in enumerate([dtypes.half, dtypes.float32, dtypes.float64]):
|
||||
with self.cached_session(use_gpu=True):
|
||||
with self.cached_session():
|
||||
# Initialize variables for numpy implementation.
|
||||
m0, v0, v0hat, m1, v1, v1hat = 0.0, 0.0, 0.0, 0.0, 0.0, 0.0
|
||||
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
|
||||
@ -353,7 +353,7 @@ class AdamOptimizerTest(test.TestCase, parameterized.TestCase):
|
||||
def testBasicWithLearningRateDecay(self):
|
||||
# TODO(tanzheny, omalleyt): Fix test in eager mode.
|
||||
for i, dtype in enumerate([dtypes.half, dtypes.float32, dtypes.float64]):
|
||||
with ops.Graph().as_default(), self.cached_session(use_gpu=True):
|
||||
with ops.Graph().as_default(), self.cached_session():
|
||||
# Initialize variables for numpy implementation.
|
||||
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
|
||||
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
|
||||
@ -398,7 +398,7 @@ class AdamOptimizerTest(test.TestCase, parameterized.TestCase):
|
||||
def testBasicWithLearningRateInverseTimeDecay(self):
|
||||
# TODO(tanzheny, omalleyt): Fix test in eager mode.
|
||||
for i, dtype in enumerate([dtypes.half, dtypes.float32, dtypes.float64]):
|
||||
with ops.Graph().as_default(), self.cached_session(use_gpu=True):
|
||||
with ops.Graph().as_default(), self.cached_session():
|
||||
# Initialize variables for numpy implementation.
|
||||
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
|
||||
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
|
||||
@ -445,7 +445,7 @@ class AdamOptimizerTest(test.TestCase, parameterized.TestCase):
|
||||
def testTensorLearningRate(self):
|
||||
# TODO(tanzheny, omalleyt): Fix test in eager mode.
|
||||
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
|
||||
with ops.Graph().as_default(), self.cached_session(use_gpu=True):
|
||||
with ops.Graph().as_default(), self.cached_session():
|
||||
# Initialize variables for numpy implementation.
|
||||
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
|
||||
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
|
||||
@ -484,7 +484,7 @@ class AdamOptimizerTest(test.TestCase, parameterized.TestCase):
|
||||
def testSharing(self):
|
||||
# TODO(tanzheny, omalleyt): Fix test in eager mode.
|
||||
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
|
||||
with ops.Graph().as_default(), self.cached_session(use_gpu=True):
|
||||
with ops.Graph().as_default(), self.cached_session():
|
||||
# Initialize variables for numpy implementation.
|
||||
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
|
||||
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
|
||||
@ -565,7 +565,7 @@ class NonFusedAdamOptimizerTest(test.TestCase, parameterized.TestCase):
|
||||
def testSparse(self):
|
||||
# TODO(tanzheny, omalleyt): Fix test in eager mode.
|
||||
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
|
||||
with ops.Graph().as_default(), self.cached_session(use_gpu=True):
|
||||
with ops.Graph().as_default(), self.cached_session():
|
||||
# Initialize variables for numpy implementation.
|
||||
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
|
||||
var0_np = np.array([1.0, 1.0, 2.0], dtype=dtype.as_numpy_dtype)
|
||||
@ -655,7 +655,7 @@ class NonFusedAdamOptimizerTest(test.TestCase, parameterized.TestCase):
|
||||
|
||||
def doTestBasic(self, use_callable_params=False):
|
||||
for i, dtype in enumerate([dtypes.half, dtypes.float32, dtypes.float64]):
|
||||
with self.cached_session(use_gpu=True):
|
||||
with self.cached_session():
|
||||
# Initialize variables for numpy implementation.
|
||||
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
|
||||
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
|
||||
@ -715,7 +715,7 @@ class NonFusedAdamOptimizerTest(test.TestCase, parameterized.TestCase):
|
||||
@combinations.generate(combinations.combine(mode=["graph", "eager"]))
|
||||
def testBasicWithAmsgrad(self):
|
||||
for i, dtype in enumerate([dtypes.half, dtypes.float32, dtypes.float64]):
|
||||
with self.cached_session(use_gpu=True):
|
||||
with self.cached_session():
|
||||
# Initialize variables for numpy implementation.
|
||||
m0, v0, v0hat, m1, v1, v1hat = 0.0, 0.0, 0.0, 0.0, 0.0, 0.0
|
||||
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
|
||||
@ -809,7 +809,7 @@ class NonFusedAdamOptimizerTest(test.TestCase, parameterized.TestCase):
|
||||
def testBasicWithLearningRateDecay(self):
|
||||
# TODO(tanzheny, omalleyt): Fix test in eager mode.
|
||||
for i, dtype in enumerate([dtypes.half, dtypes.float32, dtypes.float64]):
|
||||
with ops.Graph().as_default(), self.cached_session(use_gpu=True):
|
||||
with ops.Graph().as_default(), self.cached_session():
|
||||
# Initialize variables for numpy implementation.
|
||||
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
|
||||
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
|
||||
@ -854,7 +854,7 @@ class NonFusedAdamOptimizerTest(test.TestCase, parameterized.TestCase):
|
||||
def testBasicWithLearningRateInverseTimeDecay(self):
|
||||
# TODO(tanzheny, omalleyt): Fix test in eager mode.
|
||||
for i, dtype in enumerate([dtypes.half, dtypes.float32, dtypes.float64]):
|
||||
with ops.Graph().as_default(), self.cached_session(use_gpu=True):
|
||||
with ops.Graph().as_default(), self.cached_session():
|
||||
# Initialize variables for numpy implementation.
|
||||
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
|
||||
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
|
||||
@ -901,7 +901,7 @@ class NonFusedAdamOptimizerTest(test.TestCase, parameterized.TestCase):
|
||||
def testTensorLearningRate(self):
|
||||
# TODO(tanzheny, omalleyt): Fix test in eager mode.
|
||||
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
|
||||
with ops.Graph().as_default(), self.cached_session(use_gpu=True):
|
||||
with ops.Graph().as_default(), self.cached_session():
|
||||
# Initialize variables for numpy implementation.
|
||||
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
|
||||
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
|
||||
@ -940,7 +940,7 @@ class NonFusedAdamOptimizerTest(test.TestCase, parameterized.TestCase):
|
||||
def testSharing(self):
|
||||
# TODO(tanzheny, omalleyt): Fix test in eager mode.
|
||||
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
|
||||
with ops.Graph().as_default(), self.cached_session(use_gpu=True):
|
||||
with ops.Graph().as_default(), self.cached_session():
|
||||
# Initialize variables for numpy implementation.
|
||||
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
|
||||
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
|
||||
|
@ -81,7 +81,7 @@ class AdamaxOptimizerTest(test.TestCase, parameterized.TestCase):
|
||||
def testResourceSparse(self):
|
||||
# TODO(tanzheny, omalleyt): Fix test in eager mode.
|
||||
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
|
||||
with ops.Graph().as_default(), self.cached_session(use_gpu=True):
|
||||
with ops.Graph().as_default(), self.cached_session():
|
||||
# Initialize variables for numpy implementation.
|
||||
zero_slots = lambda: np.zeros((3), dtype=dtype.as_numpy_dtype) # pylint: disable=cell-var-from-loop
|
||||
m0, v0, m1, v1 = zero_slots(), zero_slots(), zero_slots(), zero_slots()
|
||||
@ -275,7 +275,7 @@ class AdamaxOptimizerTest(test.TestCase, parameterized.TestCase):
|
||||
def testTensorLearningRate(self):
|
||||
# TODO(tanzheny, omalleyt): Fix test in eager mode.
|
||||
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
|
||||
with ops.Graph().as_default(), self.cached_session(use_gpu=True):
|
||||
with ops.Graph().as_default(), self.cached_session():
|
||||
# Initialize variables for numpy implementation.
|
||||
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
|
||||
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
|
||||
@ -312,7 +312,7 @@ class AdamaxOptimizerTest(test.TestCase, parameterized.TestCase):
|
||||
def testSharing(self):
|
||||
# TODO(tanzheny, omalleyt): Fix test in eager mode.
|
||||
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
|
||||
with ops.Graph().as_default(), self.cached_session(use_gpu=True):
|
||||
with ops.Graph().as_default(), self.cached_session():
|
||||
# Initialize variables for numpy implementation.
|
||||
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
|
||||
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
|
||||
|
@ -37,7 +37,7 @@ class FtrlOptimizerTest(test.TestCase):
|
||||
def doTestFtrlwithoutRegularization(self, use_resource=False):
|
||||
# TODO(tanzheny, omalleyt): Fix test in eager mode.
|
||||
for dtype in [dtypes.float32]:
|
||||
with ops.Graph().as_default(), self.cached_session(use_gpu=True):
|
||||
with ops.Graph().as_default(), self.cached_session():
|
||||
if use_resource:
|
||||
var0 = variables.Variable([0.0, 0.0], dtype=dtype)
|
||||
var1 = variables.Variable([0.0, 0.0], dtype=dtype)
|
||||
@ -77,7 +77,7 @@ class FtrlOptimizerTest(test.TestCase):
|
||||
def testFtrlwithoutRegularization2(self):
|
||||
# TODO(tanzheny, omalleyt): Fix test in eager mode.
|
||||
for dtype in [dtypes.half, dtypes.float32]:
|
||||
with ops.Graph().as_default(), self.cached_session(use_gpu=True):
|
||||
with ops.Graph().as_default(), self.cached_session():
|
||||
var0 = variables.Variable([1.0, 2.0], dtype=dtype)
|
||||
var1 = variables.Variable([4.0, 3.0], dtype=dtype)
|
||||
grads0 = constant_op.constant([0.1, 0.2], dtype=dtype)
|
||||
@ -107,7 +107,7 @@ class FtrlOptimizerTest(test.TestCase):
|
||||
def testMinimizeSparseResourceVariable(self):
|
||||
# TODO(tanzheny, omalleyt): Fix test in eager mode.
|
||||
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
|
||||
with ops.Graph().as_default(), self.cached_session(use_gpu=True):
|
||||
with ops.Graph().as_default(), self.cached_session():
|
||||
var0 = variables.Variable([[1.0, 2.0]], dtype=dtype)
|
||||
x = constant_op.constant([[4.0], [5.0]], dtype=dtype)
|
||||
|
||||
@ -129,7 +129,7 @@ class FtrlOptimizerTest(test.TestCase):
|
||||
def testFtrlWithL1(self):
|
||||
# TODO(tanzheny, omalleyt): Fix test in eager mode.
|
||||
for dtype in [dtypes.half, dtypes.float32]:
|
||||
with ops.Graph().as_default(), self.cached_session(use_gpu=True):
|
||||
with ops.Graph().as_default(), self.cached_session():
|
||||
var0 = variables.Variable([1.0, 2.0], dtype=dtype)
|
||||
var1 = variables.Variable([4.0, 3.0], dtype=dtype)
|
||||
grads0 = constant_op.constant([0.1, 0.2], dtype=dtype)
|
||||
@ -159,7 +159,7 @@ class FtrlOptimizerTest(test.TestCase):
|
||||
def testFtrlWithBeta(self):
|
||||
# TODO(tanzheny, omalleyt): Fix test in eager mode.
|
||||
for dtype in [dtypes.half, dtypes.float32]:
|
||||
with ops.Graph().as_default(), self.cached_session(use_gpu=True):
|
||||
with ops.Graph().as_default(), self.cached_session():
|
||||
var0 = variables.Variable([1.0, 2.0], dtype=dtype)
|
||||
var1 = variables.Variable([4.0, 3.0], dtype=dtype)
|
||||
grads0 = constant_op.constant([0.1, 0.2], dtype=dtype)
|
||||
@ -185,7 +185,7 @@ class FtrlOptimizerTest(test.TestCase):
|
||||
def testFtrlWithL2_Beta(self):
|
||||
# TODO(tanzheny, omalleyt): Fix test in eager mode.
|
||||
for dtype in [dtypes.half, dtypes.float32]:
|
||||
with ops.Graph().as_default(), self.cached_session(use_gpu=True):
|
||||
with ops.Graph().as_default(), self.cached_session():
|
||||
var0 = variables.Variable([1.0, 2.0], dtype=dtype)
|
||||
var1 = variables.Variable([4.0, 3.0], dtype=dtype)
|
||||
grads0 = constant_op.constant([0.1, 0.2], dtype=dtype)
|
||||
@ -216,7 +216,7 @@ class FtrlOptimizerTest(test.TestCase):
|
||||
def testFtrlWithL1_L2(self):
|
||||
# TODO(tanzheny, omalleyt): Fix test in eager mode.
|
||||
for dtype in [dtypes.half, dtypes.float32]:
|
||||
with ops.Graph().as_default(), self.cached_session(use_gpu=True):
|
||||
with ops.Graph().as_default(), self.cached_session():
|
||||
var0 = variables.Variable([1.0, 2.0], dtype=dtype)
|
||||
var1 = variables.Variable([4.0, 3.0], dtype=dtype)
|
||||
grads0 = constant_op.constant([0.1, 0.2], dtype=dtype)
|
||||
@ -253,7 +253,7 @@ class FtrlOptimizerTest(test.TestCase):
|
||||
"""
|
||||
# TODO(tanzheny, omalleyt): Fix test in eager mode.
|
||||
for dtype in [dtypes.half, dtypes.float32]:
|
||||
with ops.Graph().as_default(), self.cached_session(use_gpu=True):
|
||||
with ops.Graph().as_default(), self.cached_session():
|
||||
var0 = variables.Variable([1.0, 2.0], dtype=dtype)
|
||||
var1 = variables.Variable([4.0, 3.0], dtype=dtype)
|
||||
grads0 = constant_op.constant([0.1, 0.2], dtype=dtype)
|
||||
@ -286,7 +286,7 @@ class FtrlOptimizerTest(test.TestCase):
|
||||
"""Tests the new FTRL op with support for l2 shrinkage on sparse grads."""
|
||||
# TODO(tanzheny, omalleyt): Fix test in eager mode.
|
||||
for dtype in [dtypes.half, dtypes.float32]:
|
||||
with ops.Graph().as_default(), self.cached_session(use_gpu=True):
|
||||
with ops.Graph().as_default(), self.cached_session():
|
||||
var0 = variables.Variable([[1.0], [2.0]], dtype=dtype)
|
||||
var1 = variables.Variable([[4.0], [3.0]], dtype=dtype)
|
||||
grads0 = ops.IndexedSlices(
|
||||
@ -321,7 +321,7 @@ class FtrlOptimizerTest(test.TestCase):
|
||||
"""Verifies that l2 shrinkage in FTRL does not change lr schedule."""
|
||||
# TODO(tanzheny, omalleyt): Fix test in eager mode.
|
||||
for dtype in [dtypes.half, dtypes.float32]:
|
||||
with ops.Graph().as_default(), self.cached_session(use_gpu=True) as sess:
|
||||
with ops.Graph().as_default(), self.cached_session() as sess:
|
||||
var0 = variables.Variable([1.0, 2.0], dtype=dtype)
|
||||
var1 = variables.Variable([1.0, 2.0], dtype=dtype)
|
||||
grads0 = constant_op.constant([0.1, 0.2], dtype=dtype)
|
||||
@ -404,7 +404,7 @@ class FtrlOptimizerTest(test.TestCase):
|
||||
def testEquivAdagradwithoutRegularization(self):
|
||||
# TODO(tanzheny, omalleyt): Fix test in eager mode.
|
||||
for dtype in [dtypes.half, dtypes.float32]:
|
||||
with ops.Graph().as_default(), self.cached_session(use_gpu=True):
|
||||
with ops.Graph().as_default(), self.cached_session():
|
||||
val0, val1 = self.applyOptimizer(
|
||||
ftrl.Ftrl(
|
||||
3.0,
|
||||
@ -415,7 +415,7 @@ class FtrlOptimizerTest(test.TestCase):
|
||||
l2_regularization_strength=0.0),
|
||||
dtype)
|
||||
|
||||
with ops.Graph().as_default(), self.cached_session(use_gpu=True):
|
||||
with ops.Graph().as_default(), self.cached_session():
|
||||
val2, val3 = self.applyOptimizer(
|
||||
adagrad.AdagradOptimizer(3.0, initial_accumulator_value=0.1), dtype)
|
||||
|
||||
@ -449,7 +449,7 @@ class FtrlOptimizerTest(test.TestCase):
|
||||
def testEquivSparseGradientDescentwithoutRegularization(self):
|
||||
# TODO(tanzheny, omalleyt): Fix test in eager mode.
|
||||
for dtype in [dtypes.half, dtypes.float32]:
|
||||
with ops.Graph().as_default(), self.cached_session(use_gpu=True):
|
||||
with ops.Graph().as_default(), self.cached_session():
|
||||
val0, val1 = self.applyOptimizer(
|
||||
ftrl.Ftrl(
|
||||
3.0,
|
||||
@ -461,7 +461,7 @@ class FtrlOptimizerTest(test.TestCase):
|
||||
dtype,
|
||||
is_sparse=True)
|
||||
|
||||
with ops.Graph().as_default(), self.cached_session(use_gpu=True):
|
||||
with ops.Graph().as_default(), self.cached_session():
|
||||
val2, val3 = self.applyOptimizer(
|
||||
gradient_descent.GradientDescentOptimizer(3.0),
|
||||
dtype,
|
||||
@ -473,7 +473,7 @@ class FtrlOptimizerTest(test.TestCase):
|
||||
def testEquivGradientDescentwithoutRegularization(self):
|
||||
# TODO(tanzheny, omalleyt): Fix test in eager mode.
|
||||
for dtype in [dtypes.half, dtypes.float32]:
|
||||
with ops.Graph().as_default(), self.cached_session(use_gpu=True):
|
||||
with ops.Graph().as_default(), self.cached_session():
|
||||
val0, val1 = self.applyOptimizer(
|
||||
ftrl.Ftrl(
|
||||
3.0,
|
||||
@ -484,7 +484,7 @@ class FtrlOptimizerTest(test.TestCase):
|
||||
l2_regularization_strength=0.0),
|
||||
dtype)
|
||||
|
||||
with ops.Graph().as_default(), self.cached_session(use_gpu=True):
|
||||
with ops.Graph().as_default(), self.cached_session():
|
||||
val2, val3 = self.applyOptimizer(
|
||||
gradient_descent.GradientDescentOptimizer(3.0), dtype)
|
||||
|
||||
|
@ -58,7 +58,7 @@ class AddNTest(test.TestCase):
|
||||
|
||||
def testAddN(self):
|
||||
np.random.seed(12345)
|
||||
with self.session(use_gpu=True) as sess:
|
||||
with self.session() as sess:
|
||||
for dtype in self._supported_types():
|
||||
for count in range(1, self._MAX_N + 1):
|
||||
data = [self._buildData((2, 2), dtype) for _ in range(count)]
|
||||
@ -71,7 +71,7 @@ class AddNTest(test.TestCase):
|
||||
@test_util.run_deprecated_v1
|
||||
def testUnknownShapes(self):
|
||||
np.random.seed(12345)
|
||||
with self.session(use_gpu=True) as sess:
|
||||
with self.session() as sess:
|
||||
for dtype in self._supported_types():
|
||||
data = self._buildData((2, 2), dtype)
|
||||
for count in range(1, self._MAX_N + 1):
|
||||
|
@ -97,7 +97,7 @@ class ArgMaxTest(test.TestCase):
|
||||
def testFloatInt32Output(self):
|
||||
x = np.asarray(100 * np.random.randn(200), dtype=np.float32)
|
||||
expected_values = x.argmax()
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
ans = math_ops.argmax(x, axis=0, output_type=dtypes.int32)
|
||||
tf_ans = self.evaluate(ans)
|
||||
self.assertEqual(np.int32, tf_ans.dtype)
|
||||
@ -105,7 +105,7 @@ class ArgMaxTest(test.TestCase):
|
||||
# the values don't have a range that exceeds 32-bit integers.
|
||||
self.assertAllEqual(tf_ans, expected_values)
|
||||
expected_values = x.argmin()
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
ans = math_ops.argmin(x, axis=0, output_type=dtypes.int32)
|
||||
tf_ans = self.evaluate(ans)
|
||||
self.assertEqual(np.int32, tf_ans.dtype)
|
||||
|
@ -46,7 +46,7 @@ class GatherTest(test.TestCase, parameterized.TestCase):
|
||||
def testSimpleGather(self, indices_dtype):
|
||||
data = np.array([0, 1, 2, 3, 7, 5, 8, 9, 10, 11, 15, 13])
|
||||
indices = [3, 4]
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
for dtype in _TEST_TYPES:
|
||||
params_np = self._buildParams(data, dtype)
|
||||
params = constant_op.constant(params_np)
|
||||
@ -62,7 +62,7 @@ class GatherTest(test.TestCase, parameterized.TestCase):
|
||||
def test2DArray(self, indices_dtype):
|
||||
data = np.array([[0, 1, 2, 3, 7, 5], [8, 9, 10, 11, 15, 13]])
|
||||
indices = [[3], [4]]
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
for dtype in _TEST_TYPES:
|
||||
params_np = self._buildParams(data, dtype)
|
||||
params = constant_op.constant(params_np)
|
||||
@ -77,7 +77,7 @@ class GatherTest(test.TestCase, parameterized.TestCase):
|
||||
def testHigherRank(self):
|
||||
data = np.array([[[0, 1, 2], [3, 7, 5]], [[8, 9, 10], [11, 15, 13]]])
|
||||
indices = [[[2, 0], [1, 2]], [[2, 0], [0, 1]]]
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
for dtype in _TEST_TYPES:
|
||||
params_np = self._buildParams(data, dtype)
|
||||
params = constant_op.constant(params_np)
|
||||
@ -113,7 +113,7 @@ class GatherTest(test.TestCase, parameterized.TestCase):
|
||||
self.evaluate(array_ops.batch_gather(params, [7]))
|
||||
|
||||
def testEmptySlices(self):
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
for dtype in _TEST_TYPES:
|
||||
for itype in np.int32, np.int64:
|
||||
params = np.zeros((7, 0, 0), dtype=dtype.as_numpy_dtype)
|
||||
|
@ -59,7 +59,7 @@ class GatherTest(test.TestCase, parameterized.TestCase):
|
||||
return data
|
||||
|
||||
def testScalar1D(self):
|
||||
with self.cached_session(use_gpu=True):
|
||||
with self.cached_session():
|
||||
data = np.array([0, 1, 2, 3, 7, 5])
|
||||
for dtype in _TEST_TYPES:
|
||||
for indices in 4, [1, 2, 2, 4, 5]:
|
||||
@ -74,7 +74,7 @@ class GatherTest(test.TestCase, parameterized.TestCase):
|
||||
self.assertEqual(np_val.shape, gather_t.get_shape())
|
||||
|
||||
def testScalar2D(self):
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
data = np.array([[0, 1, 2], [3, 4, 5], [6, 7, 8],
|
||||
[9, 10, 11], [12, 13, 14]])
|
||||
for dtype in _TEST_TYPES:
|
||||
@ -90,7 +90,7 @@ class GatherTest(test.TestCase, parameterized.TestCase):
|
||||
self.assertEqual(expected_shape, gather_t.get_shape())
|
||||
|
||||
def testSimpleTwoD32(self):
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
data = np.array([[0, 1, 2], [3, 4, 5], [6, 7, 8],
|
||||
[9, 10, 11], [12, 13, 14]])
|
||||
for dtype in _TEST_TYPES:
|
||||
@ -304,7 +304,7 @@ class GatherTest(test.TestCase, parameterized.TestCase):
|
||||
# On GPU the bad indices do not raise error but fetch 0 values
|
||||
if not test.is_gpu_available():
|
||||
return
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
params = [[0, 1, 2], [3, 4, 5]]
|
||||
with self.assertRaisesOpError(r"indices\[0,0\] = 7 is not in \[0, 2\)"):
|
||||
array_ops.gather(params, [[7]], axis=0).eval()
|
||||
|
@ -211,7 +211,7 @@ class StatefulScatterNdTest(test.TestCase):
|
||||
scatter = state_ops.scatter_nd_update(ref, indices, updates)
|
||||
init = variables.global_variables_initializer()
|
||||
|
||||
with self.session(use_gpu=True) as sess:
|
||||
with self.session() as sess:
|
||||
self.evaluate(init)
|
||||
result = self.evaluate(scatter)
|
||||
self.assertAllClose(result, expected)
|
||||
@ -225,7 +225,7 @@ class StatefulScatterNdTest(test.TestCase):
|
||||
scatter = state_ops.scatter_nd_update(ref, indices, updates)
|
||||
init = variables.global_variables_initializer()
|
||||
|
||||
with self.session(use_gpu=True) as sess:
|
||||
with self.session() as sess:
|
||||
self.evaluate(init)
|
||||
result = self.evaluate(scatter)
|
||||
self.assertAllClose(result, expected)
|
||||
|
@ -40,7 +40,7 @@ class SliceTest(test.TestCase):
|
||||
def testEmpty(self):
|
||||
inp = np.random.rand(4, 4).astype("f")
|
||||
for k in xrange(4):
|
||||
with self.cached_session(use_gpu=True):
|
||||
with self.cached_session():
|
||||
a = constant_op.constant(inp, shape=[4, 4], dtype=dtypes.float32)
|
||||
slice_t = a[2, k:k]
|
||||
slice_val = self.evaluate(slice_t)
|
||||
@ -49,7 +49,7 @@ class SliceTest(test.TestCase):
|
||||
def testInt32(self):
|
||||
inp = np.random.rand(4, 4).astype("i")
|
||||
for k in xrange(4):
|
||||
with self.cached_session(use_gpu=True):
|
||||
with self.cached_session():
|
||||
a = constant_op.constant(inp, shape=[4, 4], dtype=dtypes.int32)
|
||||
slice_t = a[2, k:k]
|
||||
slice_val = self.evaluate(slice_t)
|
||||
@ -119,7 +119,7 @@ class SliceTest(test.TestCase):
|
||||
|
||||
def testSelectAll(self):
|
||||
for _ in range(10):
|
||||
with self.cached_session(use_gpu=True):
|
||||
with self.cached_session():
|
||||
inp = np.random.rand(4, 4, 4, 4).astype("f")
|
||||
a = constant_op.constant(inp, shape=[4, 4, 4, 4], dtype=dtypes.float32)
|
||||
|
||||
@ -133,7 +133,7 @@ class SliceTest(test.TestCase):
|
||||
|
||||
def testSingleDimension(self):
|
||||
for _ in range(10):
|
||||
with self.cached_session(use_gpu=True):
|
||||
with self.cached_session():
|
||||
inp = np.random.rand(10).astype("f")
|
||||
a = constant_op.constant(inp, shape=[10], dtype=dtypes.float32)
|
||||
|
||||
@ -229,7 +229,7 @@ class SliceTest(test.TestCase):
|
||||
|
||||
def testSingleElementAll(self):
|
||||
for _ in range(10):
|
||||
with self.cached_session(use_gpu=True):
|
||||
with self.cached_session():
|
||||
inp = np.random.rand(4, 4).astype("f")
|
||||
a = constant_op.constant(inp, shape=[4, 4], dtype=dtypes.float32)
|
||||
|
||||
@ -312,7 +312,7 @@ class SliceTest(test.TestCase):
|
||||
self.assertAllEqual(m2.get_shape().as_list(), [1, 2, 3])
|
||||
|
||||
def _testGradientSlice(self, input_shape, slice_begin, slice_size):
|
||||
with self.cached_session(use_gpu=True):
|
||||
with self.cached_session():
|
||||
num_inputs = np.prod(input_shape)
|
||||
num_grads = np.prod(slice_size)
|
||||
inp = np.random.rand(num_inputs).astype("f").reshape(input_shape)
|
||||
@ -362,7 +362,7 @@ class SliceTest(test.TestCase):
|
||||
self.assertAllClose(np_ans, result)
|
||||
|
||||
def _testGradientVariableSize(self):
|
||||
with self.cached_session(use_gpu=True):
|
||||
with self.cached_session():
|
||||
inp = constant_op.constant([1.0, 2.0, 3.0], name="in")
|
||||
out = array_ops.slice(inp, [1], [-1])
|
||||
grad_actual = self.evaluate(gradients_impl.gradients(out, inp)[0])
|
||||
@ -380,7 +380,7 @@ class SliceTest(test.TestCase):
|
||||
# Regression test for bug in slice. A low-level bug in Eigen was causing
|
||||
# incorrect results for negative indices in multi-dimensional tensors.
|
||||
# See b/114318298.
|
||||
with self.cached_session(use_gpu=True):
|
||||
with self.cached_session():
|
||||
x = constant_op.constant([[1., 2., 3.], [4., 5., 6.], [7., 8., 7]])
|
||||
loss1 = math_ops.reduce_sum(x[:-1, :-1] * 1.0)
|
||||
loss2 = math_ops.reduce_sum(x[:-1][:, :-1])
|
||||
@ -477,7 +477,7 @@ class SliceTest(test.TestCase):
|
||||
self.assertEqual([None, 2], c.get_shape().as_list())
|
||||
|
||||
def testSliceOfSlice(self):
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
a = constant_op.constant([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]])
|
||||
b = a[1:, :]
|
||||
c = b[:-1, :]
|
||||
|
@ -52,7 +52,7 @@ class StackOpTest(test.TestCase):
|
||||
@test_util.run_deprecated_v1
|
||||
def testSimple(self):
|
||||
np.random.seed(7)
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
for shape in (2,), (3,), (2, 3), (3, 2), (8, 2, 10):
|
||||
rank = len(shape)
|
||||
for axis in range(-rank, rank):
|
||||
@ -90,7 +90,7 @@ class StackOpTest(test.TestCase):
|
||||
@test_util.run_deprecated_v1
|
||||
def testConst(self):
|
||||
np.random.seed(7)
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
# Verify that shape induction works with shapes produced via const stack
|
||||
a = constant_op.constant([1, 2, 3, 4, 5, 6])
|
||||
b = array_ops.reshape(a, array_ops.stack([2, 3]))
|
||||
@ -155,7 +155,7 @@ class StackOpTest(test.TestCase):
|
||||
data = np.random.randn(*shape)
|
||||
shapes = [shape[1:]] * shape[0]
|
||||
with self.subTest(shape=shape):
|
||||
with self.cached_session(use_gpu=True):
|
||||
with self.cached_session():
|
||||
# TODO(irving): Remove list() once we handle maps correctly
|
||||
xs = list(map(constant_op.constant, data))
|
||||
c = array_ops.stack(xs)
|
||||
@ -171,7 +171,7 @@ class StackOpTest(test.TestCase):
|
||||
out_shape = list(shape[1:])
|
||||
out_shape.insert(1, shape[0])
|
||||
with self.subTest(shape=shape):
|
||||
with self.cached_session(use_gpu=True):
|
||||
with self.cached_session():
|
||||
# TODO(irving): Remove list() once we handle maps correctly
|
||||
xs = list(map(constant_op.constant, data))
|
||||
c = array_ops.stack(xs, axis=1)
|
||||
@ -241,7 +241,7 @@ class StackOpTest(test.TestCase):
|
||||
for axis in range(-rank, rank):
|
||||
test_arrays = np_split_squeeze(expected, axis)
|
||||
|
||||
with self.cached_session(use_gpu=True):
|
||||
with self.cached_session():
|
||||
with self.subTest(shape=shape, dtype=dtype, axis=axis):
|
||||
actual_pack = array_ops.stack(test_arrays, axis=axis)
|
||||
self.assertEqual(expected.shape, actual_pack.get_shape())
|
||||
@ -265,7 +265,7 @@ class StackOpTest(test.TestCase):
|
||||
|
||||
def testComplex(self):
|
||||
np.random.seed(7)
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
for shape in (2,), (3,), (2, 3), (3, 2), (8, 2, 10):
|
||||
for dtype in [np.complex64, np.complex128]:
|
||||
with self.subTest(shape=shape, dtype=dtype):
|
||||
@ -279,7 +279,7 @@ class AutomaticStackingTest(test.TestCase):
|
||||
|
||||
@test_util.run_deprecated_v1
|
||||
def testSimple(self):
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
self.assertAllEqual(
|
||||
[1, 0, 2],
|
||||
ops.convert_to_tensor([1, constant_op.constant(0), 2]).eval())
|
||||
@ -299,7 +299,7 @@ class AutomaticStackingTest(test.TestCase):
|
||||
]).eval())
|
||||
|
||||
def testWithNDArray(self):
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
result = ops.convert_to_tensor([[[0., 0.],
|
||||
constant_op.constant([1., 1.])],
|
||||
np.array(
|
||||
@ -310,7 +310,7 @@ class AutomaticStackingTest(test.TestCase):
|
||||
|
||||
@test_util.run_deprecated_v1
|
||||
def testVariable(self):
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
v = variables.Variable(17)
|
||||
result = ops.convert_to_tensor([[0, 0, 0], [0, v, 0], [0, 0, 0]])
|
||||
self.evaluate(v.initializer)
|
||||
@ -364,7 +364,7 @@ class AutomaticStackingTest(test.TestCase):
|
||||
|
||||
@test_util.run_deprecated_v1
|
||||
def testPlaceholder(self):
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
# Test using placeholder with a defined shape.
|
||||
ph_0 = array_ops.placeholder(dtypes.int32, shape=[])
|
||||
result_0 = ops.convert_to_tensor([[0, 0, 0], [0, ph_0, 0], [0, 0, 0]])
|
||||
@ -391,7 +391,7 @@ class AutomaticStackingTest(test.TestCase):
|
||||
# Dynamic shape error.
|
||||
ph_1 = array_ops.placeholder(dtypes.int32)
|
||||
result_1 = ops.convert_to_tensor([[0, 0, 0], [0, ph_1, 0], [0, 0, 0]])
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
with self.assertRaises(errors_impl.InvalidArgumentError):
|
||||
result_1.eval(feed_dict={ph_1: [1]})
|
||||
|
||||
|
@ -474,7 +474,7 @@ class ReverseV2Test(test_util.TensorFlowTestCase):
|
||||
@test_util.run_deprecated_v1
|
||||
def testReverseRowsOf3Channels(self):
|
||||
"""Tests optimized code for reversing rows with last dim size = 3."""
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
for reverse_f in [array_ops.reverse_v2, array_ops.reverse]:
|
||||
for outer_size in (1, 2):
|
||||
for middle_size in list(range(50)) + [100000]:
|
||||
@ -491,7 +491,7 @@ class ReverseV2Test(test_util.TensorFlowTestCase):
|
||||
|
||||
@test_util.run_deprecated_v1
|
||||
def testReverseRowsOf4Channels(self):
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
for reverse_f in [array_ops.reverse_v2, array_ops.reverse]:
|
||||
for outer_size in (1, 2):
|
||||
for middle_size in list(range(50)) + [100000]:
|
||||
@ -508,7 +508,7 @@ class ReverseV2Test(test_util.TensorFlowTestCase):
|
||||
|
||||
@test_util.run_deprecated_v1
|
||||
def testReverseColumnsOf3Channels(self):
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
for reverse_f in [array_ops.reverse_v2, array_ops.reverse]:
|
||||
for outer_size in list(range(50)) + [100000]:
|
||||
for middle_size in (1, 2):
|
||||
@ -641,7 +641,7 @@ class StridedSliceTest(test_util.TensorFlowTestCase):
|
||||
def test_basic_slice(self):
|
||||
for tensor_type in STRIDED_SLICE_TYPES:
|
||||
with self.subTest(tensor_type=tensor_type):
|
||||
with self.cached_session(use_gpu=True):
|
||||
with self.cached_session():
|
||||
checker = StridedSliceChecker(
|
||||
self, StridedSliceChecker.REF_TENSOR, tensor_type=tensor_type)
|
||||
_ = checker[:, :, :]
|
||||
@ -696,7 +696,7 @@ class StridedSliceTest(test_util.TensorFlowTestCase):
|
||||
|
||||
@test_util.run_deprecated_v1
|
||||
def testDegenerateSlices(self):
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
checker = StridedSliceChecker(self, StridedSliceChecker.REF_TENSOR)
|
||||
# degenerate by offering a forward interval with a negative stride
|
||||
_ = checker[0:-1:-1, :, :]
|
||||
@ -717,7 +717,7 @@ class StridedSliceTest(test_util.TensorFlowTestCase):
|
||||
|
||||
@test_util.run_deprecated_v1
|
||||
def testEllipsis(self):
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
raw = [[[[[1, 2], [3, 4], [5, 6]]], [[[7, 8], [9, 10], [11, 12]]]]]
|
||||
checker = StridedSliceChecker(self, raw)
|
||||
|
||||
@ -738,7 +738,7 @@ class StridedSliceTest(test_util.TensorFlowTestCase):
|
||||
|
||||
@test_util.run_deprecated_v1
|
||||
def testShrink(self):
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
raw = [[[[[1, 2, 4, 5], [5, 6, 7, 8], [9, 10, 11, 12]]],
|
||||
[[[13, 14, 15, 16], [17, 18, 19, 20], [21, 22, 23, 24]]]]]
|
||||
checker = StridedSliceChecker(self, raw)
|
||||
@ -749,7 +749,7 @@ class StridedSliceTest(test_util.TensorFlowTestCase):
|
||||
|
||||
@test_util.run_deprecated_v1
|
||||
def testBothNewAxisAndShrink(self):
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
ones = array_ops.placeholder(shape=[2, 2], dtype=dtypes.int16)
|
||||
self.assertAllEqual(
|
||||
ones[array_ops.newaxis, :,
|
||||
@ -757,7 +757,7 @@ class StridedSliceTest(test_util.TensorFlowTestCase):
|
||||
|
||||
@test_util.run_deprecated_v1
|
||||
def testTensorIndexing(self):
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
raw = [[[[[1, 2, 4, 5], [5, 6, 7, 8], [9, 10, 11, 12]]],
|
||||
[[[13, 14, 15, 16], [17, 18, 19, 20], [21, 22, 23, 24]]]]]
|
||||
checker = StridedSliceChecker(self, raw, check_type_infer=False)
|
||||
@ -769,7 +769,7 @@ class StridedSliceTest(test_util.TensorFlowTestCase):
|
||||
_ = checker[..., 2**64 // 2**63] # Test longs in Python 2
|
||||
|
||||
def testTensorIndexingTypeError(self):
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
checker = StridedSliceChecker(self, StridedSliceChecker.REF_TENSOR)
|
||||
expected = re.escape(array_ops._SLICE_TYPE_ERROR)
|
||||
with self.assertRaisesRegex(TypeError, expected):
|
||||
@ -787,7 +787,7 @@ class StridedSliceTest(test_util.TensorFlowTestCase):
|
||||
|
||||
@test_util.run_deprecated_v1
|
||||
def testExpand(self):
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
raw = [[[[[1, 2, 4, 5], [5, 6, 7, 8], [9, 10, 11, 12]]],
|
||||
[[[13, 14, 15, 16], [17, 18, 19, 20], [21, 22, 23, 24]]]]]
|
||||
checker = StridedSliceChecker(self, raw)
|
||||
@ -805,7 +805,7 @@ class StridedSliceTest(test_util.TensorFlowTestCase):
|
||||
|
||||
@test_util.run_deprecated_v1
|
||||
def testExpandVariable(self):
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
x = variables.Variable(7, dtype=dtypes.int32)
|
||||
self.evaluate(x.initializer)
|
||||
y = x[None].eval()
|
||||
@ -814,7 +814,7 @@ class StridedSliceTest(test_util.TensorFlowTestCase):
|
||||
|
||||
@test_util.run_deprecated_v1
|
||||
def testOptimizedCases(self):
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
checker = StridedSliceChecker(self,
|
||||
StridedSliceChecker.REF_TENSOR_ALIGNED)
|
||||
# Identity
|
||||
@ -830,7 +830,7 @@ class StridedSliceTest(test_util.TensorFlowTestCase):
|
||||
|
||||
@test_util.run_v1_only("currently failing on v2")
|
||||
def testMasks(self):
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
scalar = np.array(0)
|
||||
# Test tensor type mask
|
||||
checker = StridedSliceChecker(self, StridedSliceChecker.REF_TENSOR)
|
||||
@ -870,7 +870,7 @@ class StridedSliceShapeTest(test_util.TensorFlowTestCase):
|
||||
|
||||
@test_util.run_deprecated_v1
|
||||
def testUnknown(self):
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
uncertain_tensor = array_ops.placeholder(dtypes.float32)
|
||||
a = StridedSliceShapeChecker(uncertain_tensor)
|
||||
a_slice_shape = a[...]
|
||||
@ -882,7 +882,7 @@ class StridedSliceShapeTest(test_util.TensorFlowTestCase):
|
||||
|
||||
@test_util.run_deprecated_v1
|
||||
def testTensorShapeUncertain(self):
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
uncertain_tensor = array_ops.placeholder(
|
||||
dtypes.float32, shape=(5, None, 7))
|
||||
a = StridedSliceShapeChecker(uncertain_tensor)
|
||||
@ -906,7 +906,7 @@ class StridedSliceShapeTest(test_util.TensorFlowTestCase):
|
||||
|
||||
@test_util.run_deprecated_v1
|
||||
def testTensorValuedIndexShape(self):
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
defined_shape_tensor = array_ops.placeholder(
|
||||
dtypes.float32, shape=(5, 3, 7))
|
||||
index_value = array_ops.placeholder(dtypes.int32, shape=())
|
||||
@ -965,7 +965,7 @@ class StridedSliceGradTest(test_util.TensorFlowTestCase):
|
||||
|
||||
@test_util.run_v1_only("b/120545219")
|
||||
def testGradient(self):
|
||||
with self.session(use_gpu=True) as sess:
|
||||
with self.session() as sess:
|
||||
var = variables.Variable(
|
||||
array_ops.reshape(
|
||||
math_ops.range(1, 97, 1, dtype=dtypes.float32), shape=(6, 4, 4)))
|
||||
@ -992,7 +992,7 @@ class StridedSliceGradTest(test_util.TensorFlowTestCase):
|
||||
|
||||
@test_util.run_v1_only("b/120545219")
|
||||
def testGradientZero(self):
|
||||
with self.session(use_gpu=True) as sess:
|
||||
with self.session() as sess:
|
||||
var = variables.Variable(8.)
|
||||
init = variables.global_variables_initializer()
|
||||
sess.run(init)
|
||||
@ -1001,7 +1001,7 @@ class StridedSliceGradTest(test_util.TensorFlowTestCase):
|
||||
|
||||
@test_util.run_deprecated_v1
|
||||
def testInt64Indices(self):
|
||||
with self.session(use_gpu=True) as sess:
|
||||
with self.session() as sess:
|
||||
a = math_ops.range(3, dtype=dtypes.float32)
|
||||
index = constant_op.constant(1, dtype=dtypes.int64)
|
||||
b = 2. * a[index]
|
||||
@ -1014,7 +1014,7 @@ class StridedSliceGradTypeTest(test_util.TensorFlowTestCase):
|
||||
|
||||
@test_util.run_deprecated_v1
|
||||
def testHostVsDevice(self):
|
||||
with self.session(use_gpu=True) as sess:
|
||||
with self.session() as sess:
|
||||
var2 = variables.Variable(
|
||||
array_ops.reshape(
|
||||
math_ops.cast(math_ops.range(1, 5, 1), dtypes.float32),
|
||||
@ -1029,7 +1029,7 @@ class StridedSliceGradTypeTest(test_util.TensorFlowTestCase):
|
||||
|
||||
@test_util.run_deprecated_v1
|
||||
def testInt64Shape(self):
|
||||
with self.session(use_gpu=True) as sess:
|
||||
with self.session() as sess:
|
||||
original_dy = array_ops.reshape(
|
||||
math_ops.cast(math_ops.range(1, 5, 1), dtypes.float32),
|
||||
shape=(4, 1, 1))
|
||||
@ -1044,7 +1044,7 @@ class StridedSliceGradTypeTest(test_util.TensorFlowTestCase):
|
||||
|
||||
@test_util.run_deprecated_v1
|
||||
def testMixedIndexTypes(self):
|
||||
with self.session(use_gpu=True) as sess:
|
||||
with self.session() as sess:
|
||||
original_dy = array_ops.reshape(
|
||||
math_ops.cast(math_ops.range(1, 5, 1), dtypes.float32),
|
||||
shape=(4, 1, 1))
|
||||
@ -1133,7 +1133,7 @@ class StridedSliceAssignChecker(object):
|
||||
if self.tensor_type.is_complex:
|
||||
value -= 1j * value
|
||||
|
||||
with self.test.test_session(use_gpu=True) as sess:
|
||||
with self.test.test_session() as sess:
|
||||
if self._use_resource:
|
||||
var = resource_variable_ops.ResourceVariable(self.x)
|
||||
else:
|
||||
@ -1514,7 +1514,7 @@ class InvertPermutationTest(test_util.TensorFlowTestCase):
|
||||
def testInvertPermutation(self):
|
||||
for dtype in [dtypes.int32, dtypes.int64]:
|
||||
with self.subTest(dtype=dtype):
|
||||
with self.cached_session(use_gpu=True):
|
||||
with self.cached_session():
|
||||
x = constant_op.constant([3, 4, 0, 2, 1], dtype=dtype)
|
||||
y = array_ops.invert_permutation(x)
|
||||
self.assertAllEqual(y.get_shape(), [5])
|
||||
@ -1597,7 +1597,7 @@ class SnapshotOpTest(test_util.TensorFlowTestCase):
|
||||
def testInvertPermutation(self):
|
||||
for dtype in [dtypes.int32, dtypes.int64, dtypes.float32, dtypes.float64]:
|
||||
with self.subTest(dtype=dtype):
|
||||
with self.cached_session(use_gpu=True):
|
||||
with self.cached_session():
|
||||
x = constant_op.constant([0, 1, 2, 3], dtype=dtype)
|
||||
y = gen_array_ops.snapshot(x)
|
||||
self.assertAllEqual(y, [0, 1, 2, 3])
|
||||
|
@ -61,7 +61,7 @@ class AtrousConv2DTest(test.TestCase):
|
||||
|
||||
@test_util.run_deprecated_v1
|
||||
def testAtrousConv2DForward(self):
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
# Input: [batch, height, width, input_depth]
|
||||
height = 9
|
||||
for width in [9, 10]: # Test both odd and even width.
|
||||
@ -108,7 +108,7 @@ class AtrousConv2DTest(test.TestCase):
|
||||
padding = "SAME" # The padding needs to be "SAME"
|
||||
np.random.seed(1) # Make it reproducible.
|
||||
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
# Input: [batch, height, width, input_depth]
|
||||
for height in range(15, 17):
|
||||
for width in range(15, 17):
|
||||
@ -138,7 +138,7 @@ class AtrousConv2DTest(test.TestCase):
|
||||
|
||||
@test_util.run_deprecated_v1
|
||||
def testGradient(self):
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
# Input: [batch, height, width, input_depth]
|
||||
x_shape = [2, 5, 6, 2]
|
||||
# Filter: [kernel_height, kernel_width, input_depth, output_depth]
|
||||
@ -166,7 +166,7 @@ class AtrousConv2DTransposeTest(test.TestCase):
|
||||
|
||||
@test_util.run_deprecated_v1
|
||||
def testAtrousConv2DTransposeForward(self):
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
# Input: [batch, height, width, input_depth]
|
||||
height = 9
|
||||
for width in [9, 10]: # Test both odd and even width.
|
||||
@ -206,7 +206,7 @@ class AtrousDepthwiseConv2DTest(test.TestCase):
|
||||
@test_util.run_deprecated_v1
|
||||
def testAtrousDepthwiseConv2DForward(self):
|
||||
strides = [1, 1, 1, 1]
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
# Input: [batch, height, width, input_depth]
|
||||
height = 9
|
||||
for width in [9, 10]: # Test both odd and even width.
|
||||
|
@ -86,7 +86,7 @@ class BandedTriangularSolveOpTest(test.TestCase):
|
||||
a_np = np.tile(a_np, batch_dims + [1, 1])
|
||||
b = np.tile(b, batch_dims + [1, 1])
|
||||
|
||||
with self.cached_session(use_gpu=True):
|
||||
with self.cached_session():
|
||||
a_tf = a
|
||||
b_tf = b
|
||||
if use_placeholder:
|
||||
@ -199,7 +199,7 @@ class BandedTriangularSolveOpTest(test.TestCase):
|
||||
# right-hand sides.
|
||||
matrix = np.array([[1., 1.], [1., 1.]])
|
||||
rhs = np.array([[1., 0.]])
|
||||
with self.cached_session(use_gpu=True):
|
||||
with self.cached_session():
|
||||
with self.assertRaises(ValueError):
|
||||
self._verifySolve(matrix, rhs)
|
||||
with self.assertRaises(ValueError):
|
||||
@ -208,7 +208,7 @@ class BandedTriangularSolveOpTest(test.TestCase):
|
||||
# Number of bands exceeds the dimension of the matrix.
|
||||
matrix = np.ones((6, 4))
|
||||
rhs = np.ones((4, 2))
|
||||
with self.cached_session(use_gpu=True):
|
||||
with self.cached_session():
|
||||
with self.assertRaises(ValueError):
|
||||
self._verifySolve(matrix, rhs)
|
||||
with self.assertRaises(ValueError):
|
||||
|
@ -40,7 +40,7 @@ from tensorflow.python.platform import test
|
||||
class GPUBinaryOpsTest(test.TestCase):
|
||||
|
||||
def _compareGPU(self, x, y, np_func, tf_func):
|
||||
with self.cached_session(use_gpu=True) as sess:
|
||||
with self.cached_session() as sess:
|
||||
inx = ops.convert_to_tensor(x)
|
||||
iny = ops.convert_to_tensor(y)
|
||||
out = tf_func(inx, iny)
|
||||
@ -143,7 +143,7 @@ class MathBuiltinUnaryTest(test.TestCase):
|
||||
|
||||
np_out = np.floor_divide(x, y + 0.1)
|
||||
|
||||
with self.session(use_gpu=True) as sess:
|
||||
with self.session() as sess:
|
||||
inx = ops.convert_to_tensor(x)
|
||||
iny = ops.convert_to_tensor(y + 0.1)
|
||||
ofunc = inx / iny
|
||||
@ -167,7 +167,7 @@ class BroadcastSimpleTest(test.TestCase):
|
||||
|
||||
def _compareGpu(self, x, y, np_func, tf_func):
|
||||
np_ans = np_func(x, y)
|
||||
with self.cached_session(use_gpu=True):
|
||||
with self.cached_session():
|
||||
inx = ops.convert_to_tensor(x)
|
||||
iny = ops.convert_to_tensor(y)
|
||||
out = tf_func(inx, iny)
|
||||
|
@ -166,7 +166,7 @@ class BatchMatmulGradientTest(test.TestCase):
|
||||
def Loss(x, y):
|
||||
return math_ops.reduce_sum(math_ops.matmul(x, y, adjoint_a, adjoint_b))
|
||||
|
||||
with self.cached_session(use_gpu=True):
|
||||
with self.cached_session():
|
||||
((x_jacob_t, y_jacob_t),
|
||||
(x_jacob_n, y_jacob_n)) = gradient_checker_v2.compute_gradient(
|
||||
Loss, [x, y], delta=delta)
|
||||
|
@ -36,7 +36,7 @@ from tensorflow.python.platform import googletest
|
||||
class BincountTest(test_util.TensorFlowTestCase):
|
||||
|
||||
def test_empty(self):
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
self.assertAllEqual(
|
||||
self.evaluate(bincount_ops.bincount([], minlength=5)),
|
||||
[0, 0, 0, 0, 0])
|
||||
@ -54,7 +54,7 @@ class BincountTest(test_util.TensorFlowTestCase):
|
||||
np.float64)
|
||||
|
||||
def test_values(self):
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
self.assertAllEqual(
|
||||
self.evaluate(bincount_ops.bincount([1, 1, 1, 2, 2, 3])),
|
||||
[0, 3, 2, 1])
|
||||
@ -74,7 +74,7 @@ class BincountTest(test_util.TensorFlowTestCase):
|
||||
np.ones(10000))
|
||||
|
||||
def test_maxlength(self):
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
self.assertAllEqual(
|
||||
self.evaluate(bincount_ops.bincount([5], maxlength=3)), [0, 0, 0])
|
||||
self.assertAllEqual(
|
||||
@ -84,7 +84,7 @@ class BincountTest(test_util.TensorFlowTestCase):
|
||||
|
||||
def test_random_with_weights(self):
|
||||
num_samples = 10000
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
np.random.seed(42)
|
||||
for dtype in [dtypes.int32, dtypes.int64, dtypes.float32, dtypes.float64]:
|
||||
arr = np.random.randint(0, 1000, num_samples)
|
||||
@ -98,7 +98,7 @@ class BincountTest(test_util.TensorFlowTestCase):
|
||||
|
||||
def test_random_without_weights(self):
|
||||
num_samples = 10000
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
np.random.seed(42)
|
||||
for dtype in [np.int32, np.float32]:
|
||||
arr = np.random.randint(0, 1000, num_samples)
|
||||
@ -108,7 +108,7 @@ class BincountTest(test_util.TensorFlowTestCase):
|
||||
np.bincount(arr, weights))
|
||||
|
||||
def test_zero_weights(self):
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
self.assertAllEqual(
|
||||
self.evaluate(bincount_ops.bincount(np.arange(1000), np.zeros(1000))),
|
||||
np.zeros(1000))
|
||||
|
@ -33,21 +33,21 @@ class BroadcastToTest(test_util.TensorFlowTestCase):
|
||||
|
||||
def testBroadcastToBasic(self):
|
||||
for dtype in [np.uint8, np.uint16, np.int8, np.int16, np.int32, np.int64]:
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
x = np.array([1, 2, 3], dtype=dtype)
|
||||
v_tf = array_ops.broadcast_to(constant_op.constant(x), [3, 3])
|
||||
v_np = np.broadcast_to(x, [3, 3])
|
||||
self.assertAllEqual(v_tf, v_np)
|
||||
|
||||
def testBroadcastToString(self):
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
x = np.array([b"1", b"2", b"3"])
|
||||
v_tf = array_ops.broadcast_to(constant_op.constant(x), [3, 3])
|
||||
v_np = np.broadcast_to(x, [3, 3])
|
||||
self.assertAllEqual(v_tf, v_np)
|
||||
|
||||
def testBroadcastToBool(self):
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
x = np.array([True, False, True], dtype=np.bool)
|
||||
v_tf = array_ops.broadcast_to(constant_op.constant(x), [3, 3])
|
||||
v_np = np.broadcast_to(x, [3, 3])
|
||||
@ -56,7 +56,7 @@ class BroadcastToTest(test_util.TensorFlowTestCase):
|
||||
def testBroadcastToShape(self):
|
||||
for input_dim in range(1, 6):
|
||||
for output_dim in range(input_dim, 6):
|
||||
with self.cached_session(use_gpu=True):
|
||||
with self.cached_session():
|
||||
input_shape = [2] * input_dim
|
||||
output_shape = [2] * output_dim
|
||||
x = np.array(np.random.randint(5, size=input_shape), dtype=np.int32)
|
||||
@ -67,7 +67,7 @@ class BroadcastToTest(test_util.TensorFlowTestCase):
|
||||
def testBroadcastToShapeInnerDim(self):
|
||||
input_shape = [2, 1, 3]
|
||||
output_shape = [2, 5, 3]
|
||||
with self.cached_session(use_gpu=True):
|
||||
with self.cached_session():
|
||||
x = np.array(np.random.randint(5, size=input_shape), dtype=np.int32)
|
||||
v_tf = array_ops.broadcast_to(constant_op.constant(x), output_shape)
|
||||
v_np = np.broadcast_to(x, output_shape)
|
||||
@ -76,7 +76,7 @@ class BroadcastToTest(test_util.TensorFlowTestCase):
|
||||
def testBroadcastToShapeLargerDim(self):
|
||||
input_shape = [2, 1, 3, 2, 2, 2]
|
||||
output_shape = [1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 15, 3, 2, 2, 2]
|
||||
with self.cached_session(use_gpu=True):
|
||||
with self.cached_session():
|
||||
x = np.array(np.random.randint(5, size=input_shape), dtype=np.int32)
|
||||
v_tf = array_ops.broadcast_to(constant_op.constant(x), output_shape)
|
||||
v_np = np.broadcast_to(x, output_shape)
|
||||
@ -85,21 +85,21 @@ class BroadcastToTest(test_util.TensorFlowTestCase):
|
||||
def testBroadcastToShapeLargerDim2(self):
|
||||
input_shape = [2, 1, 3, 2, 2, 2, 1, 1, 1]
|
||||
output_shape = [1, 1, 1, 2, 5, 3, 2, 2, 2, 3, 3, 3]
|
||||
with self.cached_session(use_gpu=True):
|
||||
with self.cached_session():
|
||||
x = np.array(np.random.randint(5, size=input_shape), dtype=np.int32)
|
||||
v_tf = array_ops.broadcast_to(constant_op.constant(x), output_shape)
|
||||
v_np = np.broadcast_to(x, output_shape)
|
||||
self.assertAllEqual(v_tf, v_np)
|
||||
|
||||
def testBroadcastToScalar(self):
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
x = np.array(1, dtype=np.int32)
|
||||
v_tf = array_ops.broadcast_to(constant_op.constant(x), [3, 3])
|
||||
v_np = np.broadcast_to(x, [3, 3])
|
||||
self.assertAllEqual(v_tf, v_np)
|
||||
|
||||
def testBroadcastScalarToNonScalar(self):
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
x = np.array(1.0, dtype=np.float)
|
||||
v_tf = array_ops.broadcast_to(constant_op.constant(1.0), [2, 3, 4,
|
||||
1, 1, 1])
|
||||
@ -108,7 +108,7 @@ class BroadcastToTest(test_util.TensorFlowTestCase):
|
||||
|
||||
def testBroadcastToShapeTypeAndInference(self):
|
||||
for dtype in [dtypes.int32, dtypes.int64]:
|
||||
with self.cached_session(use_gpu=True):
|
||||
with self.cached_session():
|
||||
x = np.array([1, 2, 3])
|
||||
v_tf = array_ops.broadcast_to(
|
||||
constant_op.constant(x),
|
||||
|
@ -36,14 +36,14 @@ class BucketizationOpTest(test.TestCase):
|
||||
constant_op.constant([-5, 0, 2, 3, 5, 8, 10, 11, 12]),
|
||||
boundaries=[0, 3, 8, 11])
|
||||
expected_out = [0, 1, 1, 2, 2, 3, 3, 4, 4]
|
||||
with self.session(use_gpu=True) as sess:
|
||||
with self.session() as sess:
|
||||
self.assertAllEqual(expected_out, self.evaluate(op))
|
||||
|
||||
def testEmptyFloat(self):
|
||||
op = math_ops._bucketize(
|
||||
array_ops.zeros([0, 3], dtype=dtypes.float32), boundaries=[])
|
||||
expected_out = np.zeros([0, 3], dtype=np.float32)
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
self.assertAllEqual(expected_out, self.evaluate(op))
|
||||
|
||||
def testFloat(self):
|
||||
@ -51,7 +51,7 @@ class BucketizationOpTest(test.TestCase):
|
||||
constant_op.constant([-5., 0., 2., 3., 5., 8., 10., 11., 12.]),
|
||||
boundaries=[0., 3., 8., 11.])
|
||||
expected_out = [0, 1, 1, 2, 2, 3, 3, 4, 4]
|
||||
with self.session(use_gpu=True) as sess:
|
||||
with self.session() as sess:
|
||||
self.assertAllEqual(expected_out, self.evaluate(op))
|
||||
|
||||
def test2DInput(self):
|
||||
@ -59,14 +59,14 @@ class BucketizationOpTest(test.TestCase):
|
||||
constant_op.constant([[-5, 0, 2, 3, 5], [8, 10, 11, 12, 0]]),
|
||||
boundaries=[0, 3, 8, 11])
|
||||
expected_out = [[0, 1, 1, 2, 2], [3, 3, 4, 4, 1]]
|
||||
with self.session(use_gpu=True) as sess:
|
||||
with self.session() as sess:
|
||||
self.assertAllEqual(expected_out, self.evaluate(op))
|
||||
|
||||
@test_util.run_deprecated_v1
|
||||
def testInvalidBoundariesOrder(self):
|
||||
op = math_ops._bucketize(
|
||||
constant_op.constant([-5, 0]), boundaries=[0, 8, 3, 11])
|
||||
with self.session(use_gpu=True) as sess:
|
||||
with self.session() as sess:
|
||||
with self.assertRaisesRegex(errors_impl.InvalidArgumentError,
|
||||
"Expected sorted boundaries"):
|
||||
self.evaluate(op)
|
||||
|
@ -108,7 +108,7 @@ class CastOpTest(test.TestCase):
|
||||
with self.cached_session(use_gpu=False):
|
||||
b = math_ops.cast(math_ops.cast(a, dtypes.bfloat16), dtypes.float32)
|
||||
self.assertAllClose(a, self.evaluate(b), rtol=1 / 128.)
|
||||
with self.cached_session(use_gpu=True):
|
||||
with self.cached_session():
|
||||
b = math_ops.cast(math_ops.cast(a, dtypes.bfloat16), dtypes.float32)
|
||||
self.assertAllClose(a, self.evaluate(b), rtol=1 / 128.)
|
||||
|
||||
|
@ -166,7 +166,7 @@ class CholeskyOpTest(test.TestCase):
|
||||
@test_util.disable_xla("b/123337890")
|
||||
def testNotInvertibleCPU(self):
|
||||
# The input should be invertible.
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
with self.assertRaisesRegex(
|
||||
errors_impl.InvalidArgumentError,
|
||||
"Cholesky decomposition was not successful. The"
|
||||
|
@ -52,7 +52,7 @@ class ClipTest(test.TestCase):
|
||||
|
||||
# ClipByValue test
|
||||
def testClipByValue(self):
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
x = constant_op.constant([-5.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=[2, 3])
|
||||
np_ans = [[-4.4, 2.0, 3.0], [4.0, 4.4, 4.4]]
|
||||
clip_value = 4.4
|
||||
@ -73,7 +73,7 @@ class ClipTest(test.TestCase):
|
||||
dtypes.int64,
|
||||
dtypes.uint8,
|
||||
]:
|
||||
with self.cached_session(use_gpu=True):
|
||||
with self.cached_session():
|
||||
x = constant_op.constant([1, 2, 3, 4, 5, 6], shape=[2, 3], dtype=dtype)
|
||||
np_ans = [[2, 2, 3], [4, 4, 4]]
|
||||
clip_value_min = 2
|
||||
@ -95,7 +95,7 @@ class ClipTest(test.TestCase):
|
||||
dtypes.int64,
|
||||
dtypes.uint8,
|
||||
]:
|
||||
with self.cached_session(use_gpu=True):
|
||||
with self.cached_session():
|
||||
x = constant_op.constant([1, 2, 3, 4, 5, 6], shape=[2, 3], dtype=dtype)
|
||||
np_ans = [[2, 2, 3], [4, 4, 4]]
|
||||
clip_value_min = constant_op.constant(
|
||||
@ -118,7 +118,7 @@ class ClipTest(test.TestCase):
|
||||
dtypes.int64,
|
||||
dtypes.uint8,
|
||||
]:
|
||||
with self.cached_session(use_gpu=True):
|
||||
with self.cached_session():
|
||||
x = constant_op.constant([1, 2, 3, 4, 5, 6], shape=[2, 3], dtype=dtype)
|
||||
np_ans = [[4, 4, 4], [4, 5, 6]]
|
||||
clip_value_min = 4
|
||||
@ -141,7 +141,7 @@ class ClipTest(test.TestCase):
|
||||
dtypes.int64,
|
||||
dtypes.uint8,
|
||||
]:
|
||||
with self.cached_session(use_gpu=True):
|
||||
with self.cached_session():
|
||||
x = constant_op.constant([1, 2, 3, 4, 5, 6], shape=[2, 3], dtype=dtype)
|
||||
np_ans = [[2, 2, 3], [5, 5, 6]]
|
||||
clip_value_min = constant_op.constant(
|
||||
@ -154,7 +154,7 @@ class ClipTest(test.TestCase):
|
||||
self.assertAllClose(np_ans, tf_ans)
|
||||
|
||||
def testClipByValueBadShape(self):
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
x = constant_op.constant([-5.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=[2, 3, 1])
|
||||
# Use a nonsensical shape.
|
||||
clip = constant_op.constant([1.0, 2.0])
|
||||
@ -176,7 +176,7 @@ class ClipTest(test.TestCase):
|
||||
|
||||
def _testClipIndexedSlicesByValue(self, values, indices, shape,
|
||||
clip_value_min, clip_value_max, expected):
|
||||
with self.session(use_gpu=True) as sess:
|
||||
with self.session() as sess:
|
||||
values = constant_op.constant(values)
|
||||
indices = constant_op.constant(indices)
|
||||
shape = constant_op.constant(shape)
|
||||
@ -211,7 +211,7 @@ class ClipTest(test.TestCase):
|
||||
# ClipByNorm tests
|
||||
def testClipByNormClipped(self):
|
||||
# Norm clipping when clip_norm < 5
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
x = constant_op.constant([-3.0, 0.0, 0.0, 4.0, 0.0, 0.0], shape=[2, 3])
|
||||
# Norm of x = sqrt(3^2 + 4^2) = 5
|
||||
np_ans = [[-2.4, 0.0, 0.0], [3.2, 0.0, 0.0]]
|
||||
@ -227,14 +227,14 @@ class ClipTest(test.TestCase):
|
||||
|
||||
@test_util.run_deprecated_v1
|
||||
def testClipByNormGradientZeros(self):
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
x = array_ops.zeros([3])
|
||||
b = clip_ops.clip_by_norm(x, 1.)
|
||||
grad, = gradients_impl.gradients(b, x)
|
||||
self.assertAllEqual(grad, [1., 1., 1.])
|
||||
|
||||
def testClipByNormBadShape(self):
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
x = constant_op.constant([-3.0, 0.0, 0.0, 4.0, 0.0, 0.0], shape=[2, 3, 1])
|
||||
# Use a nonsensical shape.
|
||||
clip = constant_op.constant([1.0, 2.0])
|
||||
@ -243,7 +243,7 @@ class ClipTest(test.TestCase):
|
||||
|
||||
def testClipByNormNotClipped(self):
|
||||
# No norm clipping when clip_norm >= 5
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
x = constant_op.constant([-3.0, 0.0, 0.0, 4.0, 0.0, 0.0], shape=[2, 3])
|
||||
# Norm of x = sqrt(3^2 + 4^2) = 5
|
||||
np_ans = [[-3.0, 0.0, 0.0], [4.0, 0.0, 0.0]]
|
||||
@ -255,7 +255,7 @@ class ClipTest(test.TestCase):
|
||||
|
||||
def testClipByNormZero(self):
|
||||
# No norm clipping when norm = 0
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
x = constant_op.constant([0.0, 0.0, 0.0, 0.0, 0.0, 0.0], shape=[2, 3])
|
||||
# Norm = 0, no changes
|
||||
np_ans = [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]
|
||||
@ -267,7 +267,7 @@ class ClipTest(test.TestCase):
|
||||
|
||||
def testClipByNormClippedWithDim0(self):
|
||||
# Norm clipping when clip_norm < 5
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
x = constant_op.constant([-3.0, 0.0, 0.0, 4.0, 0.0, 3.0], shape=[2, 3])
|
||||
# Norm of x[:, 0] = sqrt(3^2 + 4^2) = 5, x[:, 2] = 3
|
||||
np_ans = [[-2.4, 0.0, 0.0], [3.2, 0.0, 3.0]]
|
||||
@ -279,7 +279,7 @@ class ClipTest(test.TestCase):
|
||||
|
||||
def testClipByNormClippedWithDim1(self):
|
||||
# Norm clipping when clip_norm < 5
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
x = constant_op.constant([-3.0, 0.0, 0.0, 4.0, 0.0, 3.0], shape=[2, 3])
|
||||
# Norm of x[0, :] = 3, x[1, :] = sqrt(3^2 + 4^2) = 5
|
||||
np_ans = [[-3.0, 0.0, 0.0], [3.2, 0.0, 2.4]]
|
||||
@ -291,7 +291,7 @@ class ClipTest(test.TestCase):
|
||||
|
||||
def testClipByNormNotClippedWithAxes(self):
|
||||
# No norm clipping when clip_norm >= 5
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
x = constant_op.constant([-3.0, 0.0, 0.0, 4.0, 0.0, 3.0], shape=[2, 3])
|
||||
# Norm of x[0, :] = 3, x[1, :] = sqrt(3^2 + 4^2) = 5
|
||||
np_ans = [[-3.0, 0.0, 0.0], [4.0, 0.0, 3.0]]
|
||||
@ -305,7 +305,7 @@ class ClipTest(test.TestCase):
|
||||
@test_util.run_deprecated_v1
|
||||
def testClipByGlobalNormClipped(self):
|
||||
# Norm clipping when clip_norm < 5
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
x0 = constant_op.constant([-2.0, 0.0, 0.0, 4.0, 0.0, 0.0], shape=[2, 3])
|
||||
x1 = constant_op.constant([1.0, -2.0])
|
||||
# Global norm of x0 and x1 = sqrt(1 + 4^2 + 2^2 + 2^2) = 5
|
||||
@ -327,7 +327,7 @@ class ClipTest(test.TestCase):
|
||||
@test_util.run_deprecated_v1
|
||||
def testClipByGlobalNormClippedTensor(self):
|
||||
# Norm clipping when clip_norm < 5
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
x0 = constant_op.constant([-2.0, 0.0, 0.0, 4.0, 0.0, 0.0], shape=[2, 3])
|
||||
x1 = constant_op.constant([1.0, -2.0])
|
||||
# Global norm of x0 and x1 = sqrt(1 + 4^2 + 2^2 + 2^2) = 5
|
||||
@ -349,7 +349,7 @@ class ClipTest(test.TestCase):
|
||||
@test_util.run_deprecated_v1
|
||||
def testClipByGlobalNormSupportsNone(self):
|
||||
# Norm clipping when clip_norm < 5
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
x0 = constant_op.constant([-2.0, 0.0, 0.0, 4.0, 0.0, 0.0], shape=[2, 3])
|
||||
x1 = constant_op.constant([1.0, -2.0])
|
||||
# Global norm of x0 and x1 = sqrt(1 + 4^2 + 2^2 + 2^2) = 5
|
||||
@ -373,7 +373,7 @@ class ClipTest(test.TestCase):
|
||||
@test_util.run_deprecated_v1
|
||||
def testClipByGlobalNormWithIndexedSlicesClipped(self):
|
||||
# Norm clipping when clip_norm < 5
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
x0 = constant_op.constant([-2.0, 0.0, 0.0, 4.0, 0.0, 0.0], shape=[2, 3])
|
||||
x1 = ops.IndexedSlices(
|
||||
constant_op.constant([1.0, -2.0]), constant_op.constant([3, 4]))
|
||||
@ -407,7 +407,7 @@ class ClipTest(test.TestCase):
|
||||
@test_util.run_deprecated_v1
|
||||
def testClipByGlobalNormNotClipped(self):
|
||||
# No norm clipping when clip_norm >= 5
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
x0 = constant_op.constant([-2.0, 0.0, 0.0, 4.0, 0.0, 0.0], shape=[2, 3])
|
||||
x1 = constant_op.constant([1.0, -2.0])
|
||||
# Global norm of x0 and x1 = sqrt(1 + 4^2 + 2^2 + 2^2) = 5
|
||||
@ -427,7 +427,7 @@ class ClipTest(test.TestCase):
|
||||
@test_util.run_deprecated_v1
|
||||
def testClipByGlobalNormZero(self):
|
||||
# No norm clipping when norm = 0
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
x0 = constant_op.constant([0.0, 0.0, 0.0, 0.0, 0.0, 0.0], shape=[2, 3])
|
||||
x1 = constant_op.constant([0.0, 0.0])
|
||||
# Norm = 0, no changes
|
||||
@ -447,7 +447,7 @@ class ClipTest(test.TestCase):
|
||||
@test_util.run_deprecated_v1
|
||||
def testClipByGlobalNormInf(self):
|
||||
# Expect all NaNs when global norm is inf.
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
x0 = constant_op.constant([-2.0, 0.0, np.inf, 4.0, 0.0, 0.0],
|
||||
shape=[2, 3])
|
||||
x1 = constant_op.constant([1.0, -2.0])
|
||||
@ -463,7 +463,7 @@ class ClipTest(test.TestCase):
|
||||
|
||||
def testClipByAverageNormClipped(self):
|
||||
# Norm clipping when average clip_norm < 0.83333333
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
x = constant_op.constant([-3.0, 0.0, 0.0, 4.0, 0.0, 0.0], shape=[2, 3])
|
||||
# Average norm of x = sqrt(3^2 + 4^2) / 6 = 0.83333333
|
||||
np_ans = [[-2.88, 0.0, 0.0], [3.84, 0.0, 0.0]]
|
||||
@ -475,7 +475,7 @@ class ClipTest(test.TestCase):
|
||||
|
||||
def testClipByAverageNormClippedTensor(self):
|
||||
# Norm clipping when average clip_norm < 0.83333333
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
x = constant_op.constant([-3.0, 0.0, 0.0, 4.0, 0.0, 0.0], shape=[2, 3])
|
||||
# Average norm of x = sqrt(3^2 + 4^2) / 6 = 0.83333333
|
||||
np_ans = [[-2.88, 0.0, 0.0], [3.84, 0.0, 0.0]]
|
||||
@ -487,7 +487,7 @@ class ClipTest(test.TestCase):
|
||||
|
||||
def testClipByAverageNormNotClipped(self):
|
||||
# No norm clipping when average clip_norm >= 0.83333333
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
x = constant_op.constant([-3.0, 0.0, 0.0, 4.0, 0.0, 0.0], shape=[2, 3])
|
||||
# Average norm of x = sqrt(3^2 + 4^2) / 6 = 0.83333333
|
||||
np_ans = [[-3.0, 0.0, 0.0], [4.0, 0.0, 0.0]]
|
||||
@ -499,7 +499,7 @@ class ClipTest(test.TestCase):
|
||||
|
||||
def testClipByAverageNormZero(self):
|
||||
# No norm clipping when average clip_norm = 0
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
x = constant_op.constant([0.0, 0.0, 0.0, 0.0, 0.0, 0.0], shape=[2, 3])
|
||||
# Average norm = 0, no changes
|
||||
np_ans = [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]
|
||||
@ -512,7 +512,7 @@ class ClipTest(test.TestCase):
|
||||
def testClipByAverageNormReplacedWithClipByNorm(self):
|
||||
# Check clip_by_average_norm(t) is the same as
|
||||
# clip_by_norm(t, clip_norm * tf.compat.v1.to_float(tf.size(t)))
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
x = constant_op.constant([-3.0, 0.0, 0.0, 4.0, 0.0, 0.0], shape=[2, 3])
|
||||
# Average norm of x = sqrt(3^2 + 4^2) / 6 = 0.83333333
|
||||
# expected answer [[-2.88, 0.0, 0.0], [3.84, 0.0, 0.0]]
|
||||
@ -532,7 +532,7 @@ class ClipTest(test.TestCase):
|
||||
y = clip_ops.clip_by_value(zero, 1.0, 1.0)
|
||||
z = clip_ops.clip_by_value(zero, zero, 1.0)
|
||||
w = clip_ops.clip_by_value(zero, 1.0, zero)
|
||||
with self.session(use_gpu=True) as sess:
|
||||
with self.session() as sess:
|
||||
sess.run([x, y, z, w], feed_dict={zero: np.zeros((7, 0))})
|
||||
|
||||
|
||||
|
@ -38,7 +38,7 @@ class ConcatOpTest(test.TestCase):
|
||||
|
||||
@test_util.run_deprecated_v1
|
||||
def testHStack(self):
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
p1 = array_ops.placeholder(dtypes.float32, shape=[4, 4])
|
||||
p2 = array_ops.placeholder(dtypes.float32, shape=[4, 4])
|
||||
c = array_ops.concat([p1, p2], 0)
|
||||
@ -54,7 +54,7 @@ class ConcatOpTest(test.TestCase):
|
||||
|
||||
@test_util.run_deprecated_v1
|
||||
def testVStack(self):
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
p1 = array_ops.placeholder(dtypes.float32, shape=[4, 4])
|
||||
p2 = array_ops.placeholder(dtypes.float32, shape=[4, 4])
|
||||
c = array_ops.concat([p1, p2], 1)
|
||||
@ -70,7 +70,7 @@ class ConcatOpTest(test.TestCase):
|
||||
|
||||
@test_util.run_deprecated_v1
|
||||
def test4DStack(self):
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
p1 = array_ops.placeholder(dtypes.float32, shape=[2, 3, 1, 1])
|
||||
p2 = array_ops.placeholder(dtypes.float32, shape=[2, 3, 4, 1])
|
||||
c = array_ops.concat([p1, p2], 2)
|
||||
@ -121,7 +121,7 @@ class ConcatOpTest(test.TestCase):
|
||||
dtype_feed = dtypes.float32
|
||||
else:
|
||||
dtype_feed = dtype
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
p = []
|
||||
for i in np.arange(num_tensors):
|
||||
input_shape = shape
|
||||
@ -315,7 +315,7 @@ class ConcatOpTest(test.TestCase):
|
||||
|
||||
@test_util.run_deprecated_v1
|
||||
def testGradientWithUnknownInputDim(self):
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
x = array_ops.placeholder(dtypes.float32)
|
||||
y = array_ops.placeholder(dtypes.float32)
|
||||
c = array_ops.concat([x, y], 2)
|
||||
@ -526,7 +526,7 @@ class ConcatOpTest(test.TestCase):
|
||||
# shared memory is not large for all the inputs
|
||||
@test_util.run_deprecated_v1
|
||||
def testConcatLargeNumberOfTensors(self):
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
for concat_dim in range(2):
|
||||
params = {}
|
||||
p = []
|
||||
|
@ -54,7 +54,7 @@ class ConstantTest(test.TestCase):
|
||||
|
||||
def _testGpu(self, x):
|
||||
np_ans = np.array(x)
|
||||
with self.cached_session(use_gpu=True):
|
||||
with self.cached_session():
|
||||
tf_ans = ops.convert_to_tensor(x).eval()
|
||||
dtype = dtypes_lib.as_dtype(np_ans.dtype)
|
||||
if dtype.is_floating or dtype.is_complex:
|
||||
|
@ -557,7 +557,7 @@ class ControlFlowTest(test.TestCase, parameterized.TestCase):
|
||||
|
||||
@test_util.run_v1_only("b/120545219")
|
||||
def testCondColocation(self):
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
with ops.device("/cpu:0"):
|
||||
v = variables.Variable(7.0)
|
||||
|
||||
@ -1224,7 +1224,7 @@ class ControlFlowTest(test.TestCase, parameterized.TestCase):
|
||||
def testCondGradMultiDevice(self):
|
||||
config = config_pb2.ConfigProto(device_count={"CPU": 2},
|
||||
allow_soft_placement=True)
|
||||
with self.cached_session(use_gpu=True, config=config) as sess:
|
||||
with self.cached_session(config=config) as sess:
|
||||
pred = array_ops.placeholder(dtypes.bool, [])
|
||||
x = array_ops.placeholder(dtypes.float32)
|
||||
y = array_ops.placeholder(dtypes.float32)
|
||||
@ -2621,7 +2621,7 @@ class ControlFlowTest(test.TestCase, parameterized.TestCase):
|
||||
def testWhileCondGradMultiDevice(self):
|
||||
config = config_pb2.ConfigProto(device_count={"CPU": 2},
|
||||
allow_soft_placement=True)
|
||||
with self.cached_session(use_gpu=True, config=config) as sess:
|
||||
with self.cached_session(config=config) as sess:
|
||||
pred = array_ops.placeholder(dtypes.bool, [])
|
||||
x_init = constant_op.constant(1.0)
|
||||
|
||||
@ -4911,7 +4911,7 @@ class AssertTest(test.TestCase):
|
||||
if test_util.is_gpu_available():
|
||||
self.skipTest("b/128646478 fails in opensource")
|
||||
|
||||
with self.session(use_gpu=True) as sess:
|
||||
with self.session() as sess:
|
||||
with ops.device(test.gpu_device_name()):
|
||||
value = constant_op.constant(1.0)
|
||||
with ops.device("/cpu:0"):
|
||||
|
@ -153,7 +153,7 @@ class Conv1DTransposeTest(test.TestCase):
|
||||
def testConv1DTransposeSingleStrideNCW(self):
|
||||
# `NCW` data format is only supported for CUDA device.
|
||||
if test.is_gpu_available(cuda_only=True):
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
strides = [1, 1, 1]
|
||||
|
||||
# Input, output: [batch, depth, width]
|
||||
@ -184,7 +184,7 @@ class Conv1DTransposeTest(test.TestCase):
|
||||
def testConv1DTransposeSameNCW(self):
|
||||
# `NCW` data format is only supported for CUDA device.
|
||||
if test.is_gpu_available(cuda_only=True):
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
strides = [1, 1, 2]
|
||||
|
||||
# Input, output: [batch, depth, width]
|
||||
@ -216,7 +216,7 @@ class Conv1DTransposeTest(test.TestCase):
|
||||
def testConv1DTransposeValidNCW(self):
|
||||
# `NCW` data format is only supported for CUDA device.
|
||||
if test.is_gpu_available(cuda_only=True):
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
strides = [1, 1, 2]
|
||||
|
||||
# Input, output: [batch, depth, width]
|
||||
|
@ -77,7 +77,7 @@ class Conv2DBackpropFilterGradTest(test.TestCase):
|
||||
@test_util.run_deprecated_v1
|
||||
def testGradientDilatedConv(self):
|
||||
if test.is_gpu_available(cuda_only=True):
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
for padding in [
|
||||
"SAME",
|
||||
"VALID",
|
||||
|
@ -186,7 +186,7 @@ class Conv2DTransposeTest(test.TestCase):
|
||||
def testConv2DTransposeSingleStrideNCHW(self):
|
||||
# `NCHW` data format is only supported for CUDA device.
|
||||
if test.is_gpu_available(cuda_only=True):
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
strides = [1, 1, 1, 1]
|
||||
|
||||
# Input, output: [batch, depth, height, width, depth]
|
||||
@ -221,7 +221,7 @@ class Conv2DTransposeTest(test.TestCase):
|
||||
def testConv2DTransposeSameNCHW(self):
|
||||
# `NCHW` data format is only supported for CUDA device.
|
||||
if test.is_gpu_available(cuda_only=True):
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
strides = [1, 1, 2, 2]
|
||||
|
||||
# Input, output: [batch, depth, height, width]
|
||||
@ -257,7 +257,7 @@ class Conv2DTransposeTest(test.TestCase):
|
||||
def testConv2DTransposeValidNCHW(self):
|
||||
# `NCHW` data format is only supported for CUDA device.
|
||||
if test.is_gpu_available(cuda_only=True):
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
strides = [1, 1, 2, 2]
|
||||
|
||||
# Input, output: [batch, depth, height, width]
|
||||
|
@ -2787,7 +2787,7 @@ class SeparableConv2DTest(test.TestCase):
|
||||
expected: An array containing the expected operation outputs.
|
||||
data_format: string data format for input tensor.
|
||||
"""
|
||||
with self.cached_session(use_gpu=True) as sess:
|
||||
with self.cached_session() as sess:
|
||||
t1 = self._InitValues(tensor_in_sizes)
|
||||
f1 = self._InitValues(depthwise_filter_in_sizes)
|
||||
f1.set_shape(depthwise_filter_in_sizes)
|
||||
@ -2899,7 +2899,7 @@ class SeparableConv2DTest(test.TestCase):
|
||||
depthwise_filter_in_sizes = [2, 2, 2, 3]
|
||||
pointwise_filter_in_sizes = [1, 1, 6, 7]
|
||||
padding = [[0, 0], [1, 2], [3, 4], [0, 0]]
|
||||
with self.cached_session(use_gpu=True):
|
||||
with self.cached_session():
|
||||
# Compute the 'expected' values by manually padding before calling
|
||||
# separable_conv2d
|
||||
t1 = self._InitValues(tensor_in_sizes)
|
||||
|
@ -37,7 +37,7 @@ class DecodeImageOpTest(test.TestCase):
|
||||
def testBmp(self):
|
||||
# Read a real bmp and verify shape
|
||||
path = os.path.join(prefix_path, "bmp", "testdata", "lena.bmp")
|
||||
with self.session(use_gpu=True) as sess:
|
||||
with self.session() as sess:
|
||||
bmp0 = io_ops.read_file(path)
|
||||
image0 = image_ops.decode_image(bmp0)
|
||||
image1 = image_ops.decode_bmp(bmp0)
|
||||
@ -53,7 +53,7 @@ class DecodeImageOpTest(test.TestCase):
|
||||
stride = 5
|
||||
shape = (12, height, width, 3)
|
||||
|
||||
with self.session(use_gpu=True) as sess:
|
||||
with self.session() as sess:
|
||||
gif0 = io_ops.read_file(path)
|
||||
image0 = image_ops.decode_image(gif0)
|
||||
image1 = image_ops.decode_gif(gif0)
|
||||
@ -82,7 +82,7 @@ class DecodeImageOpTest(test.TestCase):
|
||||
def testJpeg(self):
|
||||
# Read a real jpeg and verify shape
|
||||
path = os.path.join(prefix_path, "jpeg", "testdata", "jpeg_merge_test1.jpg")
|
||||
with self.session(use_gpu=True) as sess:
|
||||
with self.session() as sess:
|
||||
jpeg0 = io_ops.read_file(path)
|
||||
image0 = image_ops.decode_image(jpeg0)
|
||||
image1 = image_ops.decode_jpeg(jpeg0)
|
||||
@ -100,7 +100,7 @@ class DecodeImageOpTest(test.TestCase):
|
||||
inputs = [(1, "lena_gray.png")]
|
||||
for channels_in, filename in inputs:
|
||||
for channels in 0, 1, 3, 4:
|
||||
with self.cached_session(use_gpu=True) as sess:
|
||||
with self.cached_session() as sess:
|
||||
path = os.path.join(prefix_path, "png", "testdata", filename)
|
||||
png0 = io_ops.read_file(path)
|
||||
image0 = image_ops.decode_image(png0, channels=channels)
|
||||
|
@ -56,7 +56,7 @@ class DepthToSpaceTest(test.TestCase):
|
||||
self.evaluate(output_nhwc)
|
||||
|
||||
if test.is_gpu_available():
|
||||
with self.cached_session(use_gpu=True):
|
||||
with self.cached_session():
|
||||
# test NHWC (default) on GPU
|
||||
x_tf = array_ops.depth_to_space(input_nhwc, block_size)
|
||||
self.assertAllEqual(x_tf, outputs)
|
||||
@ -126,7 +126,7 @@ class DepthToSpaceTest(test.TestCase):
|
||||
self.assertAllEqual(x_tf.shape, x_out.shape)
|
||||
self.evaluate(x_tf)
|
||||
if test.is_gpu_available():
|
||||
with self.cached_session(use_gpu=True):
|
||||
with self.cached_session():
|
||||
# test NHWC (default) on GPU
|
||||
x_tf = array_ops.depth_to_space(input_nhwc, block_size)
|
||||
self.assertAllEqual(x_tf.shape, x_out.shape)
|
||||
@ -343,7 +343,7 @@ class DepthToSpaceGradientTest(test.TestCase):
|
||||
return
|
||||
|
||||
assert 4 == x.ndim
|
||||
with self.cached_session(use_gpu=True):
|
||||
with self.cached_session():
|
||||
tf_x = ops.convert_to_tensor(x)
|
||||
tf_y = array_ops.depth_to_space(tf_x, block_size, data_format=data_format)
|
||||
|
||||
|
@ -425,7 +425,7 @@ class DepthwiseConv2DTest(test.TestCase):
|
||||
# GitHub issue 22110.
|
||||
if not test.is_gpu_available():
|
||||
return
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
x = array_ops.placeholder(dtypes.float32)
|
||||
f = np.ones([1, 1, 1, 1], np.float32)
|
||||
v = nn_impl.depthwise_conv2d(
|
||||
|
@ -154,7 +154,7 @@ class DeterminantOpTest(test.TestCase):
|
||||
|
||||
@test_util.run_v1_only("b/120545219")
|
||||
def testConcurrentExecutesWithoutError(self):
|
||||
with self.session(use_gpu=True) as sess:
|
||||
with self.session() as sess:
|
||||
matrix1 = random_ops.random_normal([5, 5], seed=42)
|
||||
matrix2 = random_ops.random_normal([5, 5], seed=42)
|
||||
det1 = linalg_ops.matrix_determinant(matrix1)
|
||||
|
@ -374,7 +374,7 @@ class MatrixDiagTest(test.TestCase):
|
||||
|
||||
@test_util.run_deprecated_v1
|
||||
def testVector(self):
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
v = np.array([1.0, 2.0, 3.0])
|
||||
mat = np.diag(v)
|
||||
v_diag = array_ops.matrix_diag(v)
|
||||
@ -397,7 +397,7 @@ class MatrixDiagTest(test.TestCase):
|
||||
self.assertAllEqual(v_diags, solution[0])
|
||||
|
||||
def _testVectorBatch(self, dtype):
|
||||
with self.cached_session(use_gpu=True):
|
||||
with self.cached_session():
|
||||
v_batch = np.array([[1.0, 0.0, 3.0], [4.0, 5.0, 6.0]]).astype(dtype)
|
||||
mat_batch = np.array([[[1.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 3.0]],
|
||||
[[4.0, 0.0, 0.0], [0.0, 5.0, 0.0],
|
||||
@ -441,7 +441,7 @@ class MatrixDiagTest(test.TestCase):
|
||||
|
||||
@test_util.run_deprecated_v1
|
||||
def testRectangularBatch(self):
|
||||
with self.cached_session(use_gpu=True):
|
||||
with self.cached_session():
|
||||
# Stores expected num_rows and num_cols (when the other is given).
|
||||
# expected[d_lower, d_upper] = (expected_num_rows, expected_num_cols)
|
||||
test_list = list()
|
||||
@ -542,7 +542,7 @@ class MatrixDiagTest(test.TestCase):
|
||||
|
||||
@test_util.run_deprecated_v1
|
||||
def testInvalidShapeAtEval(self):
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
v = array_ops.placeholder(dtype=dtypes_lib.float32)
|
||||
with self.assertRaisesOpError("diagonal must be at least 1-dim"):
|
||||
array_ops.matrix_diag(v).eval(feed_dict={v: 0.0})
|
||||
@ -550,7 +550,7 @@ class MatrixDiagTest(test.TestCase):
|
||||
@test_util.run_deprecated_v1
|
||||
def testGrad(self):
|
||||
shapes = ((3,), (7, 4))
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
for shape in shapes:
|
||||
x = constant_op.constant(np.random.rand(*shape), np.float32)
|
||||
y = array_ops.matrix_diag(x)
|
||||
@ -564,7 +564,7 @@ class MatrixDiagTest(test.TestCase):
|
||||
tests = dict() # tests[shape] = (d_lower, d_upper)
|
||||
tests[(3,)] = (-1, -1)
|
||||
tests[(7, 3, 4)] = (-1, 1)
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
for shape, diags in tests.items():
|
||||
x = constant_op.constant(np.random.rand(*shape), np.float32)
|
||||
for align in alignment_list:
|
||||
@ -580,7 +580,7 @@ class MatrixSetDiagTest(test.TestCase):
|
||||
|
||||
@test_util.run_deprecated_v1
|
||||
def testSquare(self):
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
v = np.array([1.0, 2.0, 3.0])
|
||||
mat = np.array([[0.0, 1.0, 0.0], [1.0, 0.0, 1.0], [1.0, 1.0, 1.0]])
|
||||
mat_set_diag = np.array([[1.0, 1.0, 0.0], [1.0, 2.0, 1.0],
|
||||
@ -603,7 +603,7 @@ class MatrixSetDiagTest(test.TestCase):
|
||||
|
||||
@test_util.run_deprecated_v1
|
||||
def testRectangular(self):
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
v = np.array([3.0, 4.0])
|
||||
mat = np.array([[0.0, 1.0, 0.0], [1.0, 0.0, 1.0]])
|
||||
expected = np.array([[3.0, 1.0, 0.0], [1.0, 4.0, 1.0]])
|
||||
@ -631,7 +631,7 @@ class MatrixSetDiagTest(test.TestCase):
|
||||
self.assertAllEqual(output, solution)
|
||||
|
||||
def _testSquareBatch(self, dtype):
|
||||
with self.cached_session(use_gpu=True):
|
||||
with self.cached_session():
|
||||
v_batch = np.array([[-1.0, 0.0, -3.0], [-4.0, -5.0, -6.0]]).astype(dtype)
|
||||
mat_batch = np.array([[[1.0, 0.0, 3.0], [0.0, 2.0, 0.0], [1.0, 0.0, 3.0]],
|
||||
[[4.0, 0.0, 4.0], [0.0, 5.0, 0.0],
|
||||
@ -668,7 +668,7 @@ class MatrixSetDiagTest(test.TestCase):
|
||||
|
||||
@test_util.run_deprecated_v1
|
||||
def testRectangularBatch(self):
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
v_batch = np.array([[-1.0, -2.0], [-4.0, -5.0]])
|
||||
mat_batch = np.array([[[1.0, 0.0, 3.0], [0.0, 2.0, 0.0]],
|
||||
[[4.0, 0.0, 4.0], [0.0, 5.0, 0.0]]])
|
||||
@ -701,7 +701,7 @@ class MatrixSetDiagTest(test.TestCase):
|
||||
|
||||
@test_util.run_deprecated_v1
|
||||
def testInvalidShapeAtEval(self):
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
v = array_ops.placeholder(dtype=dtypes_lib.float32)
|
||||
with self.assertRaisesOpError("input must be at least 2-dim"):
|
||||
array_ops.matrix_set_diag(v, [v]).eval(feed_dict={v: 0.0})
|
||||
@ -717,7 +717,7 @@ class MatrixSetDiagTest(test.TestCase):
|
||||
})
|
||||
|
||||
def _testGrad(self, input_shape, diag_shape, diags, align):
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
x = constant_op.constant(
|
||||
np.random.rand(*input_shape), dtype=dtypes_lib.float32)
|
||||
x_diag = constant_op.constant(
|
||||
@ -751,7 +751,7 @@ class MatrixSetDiagTest(test.TestCase):
|
||||
|
||||
@test_util.run_deprecated_v1
|
||||
def testGradWithNoShapeInformation(self):
|
||||
with self.session(use_gpu=True) as sess:
|
||||
with self.session() as sess:
|
||||
v = array_ops.placeholder(dtype=dtypes_lib.float32)
|
||||
mat = array_ops.placeholder(dtype=dtypes_lib.float32)
|
||||
grad_input = array_ops.placeholder(dtype=dtypes_lib.float32)
|
||||
@ -774,7 +774,7 @@ class MatrixDiagPartTest(test.TestCase):
|
||||
|
||||
@test_util.run_deprecated_v1
|
||||
def testSquare(self):
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
v = np.array([1.0, 2.0, 3.0])
|
||||
mat = np.diag(v)
|
||||
mat_diag = array_ops.matrix_diag_part(mat)
|
||||
@ -798,7 +798,7 @@ class MatrixDiagPartTest(test.TestCase):
|
||||
|
||||
@test_util.run_deprecated_v1
|
||||
def testRectangular(self):
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
mat = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
|
||||
mat_diag = array_ops.matrix_diag_part(mat)
|
||||
self.assertAllEqual(mat_diag, np.array([1.0, 5.0]))
|
||||
@ -817,7 +817,7 @@ class MatrixDiagPartTest(test.TestCase):
|
||||
self.assertAllEqual(mat_diag, solution[0])
|
||||
|
||||
def _testSquareBatch(self, dtype):
|
||||
with self.cached_session(use_gpu=True):
|
||||
with self.cached_session():
|
||||
v_batch = np.array([[1.0, 0.0, 3.0], [4.0, 5.0, 6.0]]).astype(dtype)
|
||||
mat_batch = np.array([[[1.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 3.0]],
|
||||
[[4.0, 0.0, 0.0], [0.0, 5.0, 0.0],
|
||||
@ -853,7 +853,7 @@ class MatrixDiagPartTest(test.TestCase):
|
||||
|
||||
@test_util.run_deprecated_v1
|
||||
def testRectangularBatch(self):
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
v_batch = np.array([[1.0, 2.0], [4.0, 5.0]])
|
||||
mat_batch = np.array([[[1.0, 0.0, 0.0], [0.0, 2.0, 0.0]],
|
||||
[[4.0, 0.0, 0.0], [0.0, 5.0, 0.0]]])
|
||||
@ -880,7 +880,7 @@ class MatrixDiagPartTest(test.TestCase):
|
||||
matrix = array_ops.placeholder(dtypes_lib.int32, shape=[None, None])
|
||||
result = array_ops.matrix_diag_part(matrix, k=-1)
|
||||
input_matrix = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
result_eval = result.eval(feed_dict={matrix: input_matrix})
|
||||
self.assertAllEqual([4, 8], result_eval)
|
||||
|
||||
@ -891,7 +891,7 @@ class MatrixDiagPartTest(test.TestCase):
|
||||
|
||||
@test_util.run_deprecated_v1
|
||||
def testInvalidShapeAtEval(self):
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
v = array_ops.placeholder(dtype=dtypes_lib.float32)
|
||||
with self.assertRaisesOpError("input must be at least 2-dim"):
|
||||
array_ops.matrix_diag_part(v).eval(feed_dict={v: 0.0})
|
||||
@ -899,7 +899,7 @@ class MatrixDiagPartTest(test.TestCase):
|
||||
@test_util.run_deprecated_v1
|
||||
def testGrad(self):
|
||||
shapes = ((3, 3), (2, 3), (3, 2), (5, 3, 3))
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
for shape in shapes:
|
||||
x = constant_op.constant(np.random.rand(*shape), dtype=np.float32)
|
||||
y = array_ops.matrix_diag_part(x)
|
||||
@ -913,7 +913,7 @@ class MatrixDiagPartTest(test.TestCase):
|
||||
tests = dict() # tests[shape] = (d_lower, d_upper)
|
||||
tests[(3, 3)] = (-1, -1)
|
||||
tests[(7, 3, 4)] = (-1, 1)
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
for align in alignment_list:
|
||||
for shape, diags in tests.items():
|
||||
x = constant_op.constant(np.random.rand(*shape), np.float32)
|
||||
|
@ -39,7 +39,7 @@ class DynamicPartitionTest(test.TestCase):
|
||||
|
||||
@test_util.run_deprecated_v1
|
||||
def testSimpleOneDimensional(self):
|
||||
with self.session(use_gpu=True) as sess:
|
||||
with self.session() as sess:
|
||||
data = constant_op.constant([0, 13, 2, 39, 4, 17], dtype=dtypes.float32)
|
||||
indices = constant_op.constant([0, 0, 2, 3, 2, 1])
|
||||
partitions = data_flow_ops.dynamic_partition(
|
||||
@ -60,7 +60,7 @@ class DynamicPartitionTest(test.TestCase):
|
||||
|
||||
@test_util.run_deprecated_v1
|
||||
def testSimpleTwoDimensional(self):
|
||||
with self.session(use_gpu=True) as sess:
|
||||
with self.session() as sess:
|
||||
data = constant_op.constant([[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 10, 11],
|
||||
[12, 13, 14], [15, 16, 17]],
|
||||
dtype=dtypes.float32)
|
||||
@ -87,7 +87,7 @@ class DynamicPartitionTest(test.TestCase):
|
||||
indices_list = [x % 2 for x in range(num)]
|
||||
part1 = [x for x in range(num) if x % 2 == 0]
|
||||
part2 = [x for x in range(num) if x % 2 == 1]
|
||||
with self.session(use_gpu=True) as sess:
|
||||
with self.session() as sess:
|
||||
data = constant_op.constant(data_list, dtype=dtypes.float32)
|
||||
indices = constant_op.constant(indices_list, dtype=dtypes.int32)
|
||||
partitions = data_flow_ops.dynamic_partition(
|
||||
@ -109,7 +109,7 @@ class DynamicPartitionTest(test.TestCase):
|
||||
parts = [[] for _ in range(num_partitions)]
|
||||
for i in range(rows):
|
||||
parts[(i ** 2) % num_partitions].append(data_list[i])
|
||||
with self.session(use_gpu=True) as sess:
|
||||
with self.session() as sess:
|
||||
data = constant_op.constant(data_list, dtype=dtypes.float32)
|
||||
indices = constant_op.constant(indices_list, dtype=dtypes.int32)
|
||||
partitions = data_flow_ops.dynamic_partition(
|
||||
@ -125,7 +125,7 @@ class DynamicPartitionTest(test.TestCase):
|
||||
def testSimpleComplex(self):
|
||||
data_list = [1 + 2j, 3 + 4j, 5 + 6j, 7 + 8j]
|
||||
indices_list = [1, 0, 1, 0]
|
||||
with self.session(use_gpu=True) as sess:
|
||||
with self.session() as sess:
|
||||
data = constant_op.constant(data_list, dtype=dtypes.complex64)
|
||||
indices = constant_op.constant(indices_list, dtype=dtypes.int32)
|
||||
partitions = data_flow_ops.dynamic_partition(
|
||||
@ -138,7 +138,7 @@ class DynamicPartitionTest(test.TestCase):
|
||||
|
||||
def testScalarPartitions(self):
|
||||
data_list = [10, 13, 12, 11]
|
||||
with self.session(use_gpu=True) as sess:
|
||||
with self.session() as sess:
|
||||
data = constant_op.constant(data_list, dtype=dtypes.float64)
|
||||
indices = 3
|
||||
partitions = data_flow_ops.dynamic_partition(
|
||||
@ -159,7 +159,7 @@ class DynamicPartitionTest(test.TestCase):
|
||||
@test_util.run_deprecated_v1
|
||||
def testHigherRank(self):
|
||||
np.random.seed(7)
|
||||
with self.session(use_gpu=True) as sess:
|
||||
with self.session() as sess:
|
||||
for n in 2, 3:
|
||||
for shape in (4,), (4, 5), (4, 5, 2):
|
||||
partitions = np.random.randint(n, size=np.prod(shape)).reshape(shape)
|
||||
@ -184,7 +184,7 @@ class DynamicPartitionTest(test.TestCase):
|
||||
def testEmptyParts(self):
|
||||
data_list = [1, 2, 3, 4]
|
||||
indices_list = [1, 3, 1, 3]
|
||||
with self.session(use_gpu=True) as sess:
|
||||
with self.session() as sess:
|
||||
data = constant_op.constant(data_list, dtype=dtypes.float32)
|
||||
indices = constant_op.constant(indices_list, dtype=dtypes.int32)
|
||||
partitions = data_flow_ops.dynamic_partition(
|
||||
@ -200,7 +200,7 @@ class DynamicPartitionTest(test.TestCase):
|
||||
def testEmptyDataTwoDimensional(self):
|
||||
data_list = [[], []]
|
||||
indices_list = [0, 1]
|
||||
with self.session(use_gpu=True) as sess:
|
||||
with self.session() as sess:
|
||||
data = constant_op.constant(data_list, dtype=dtypes.float32)
|
||||
indices = constant_op.constant(indices_list, dtype=dtypes.int32)
|
||||
partitions = data_flow_ops.dynamic_partition(
|
||||
@ -216,7 +216,7 @@ class DynamicPartitionTest(test.TestCase):
|
||||
def testEmptyPartitions(self):
|
||||
data_list = []
|
||||
indices_list = []
|
||||
with self.session(use_gpu=True) as sess:
|
||||
with self.session() as sess:
|
||||
data = constant_op.constant(data_list, dtype=dtypes.float32)
|
||||
indices = constant_op.constant(indices_list, dtype=dtypes.int32)
|
||||
partitions = data_flow_ops.dynamic_partition(
|
||||
@ -237,7 +237,7 @@ class DynamicPartitionTest(test.TestCase):
|
||||
|
||||
data_list = [1, 2, 3, 4, 5, 6]
|
||||
indices_list = [6, 5, 4, 3, 1, 0]
|
||||
with self.session(use_gpu=True) as sess:
|
||||
with self.session() as sess:
|
||||
data = constant_op.constant(data_list, dtype=dtypes.float32)
|
||||
indices = constant_op.constant(indices_list, dtype=dtypes.int32)
|
||||
partitions = data_flow_ops.dynamic_partition(
|
||||
@ -258,7 +258,7 @@ class DynamicPartitionTest(test.TestCase):
|
||||
|
||||
data_list = [1, 2, 3, 4, 5, 6]
|
||||
indices_list = [10, 11, 2, 12, 0, 1000]
|
||||
with self.session(use_gpu=True) as sess:
|
||||
with self.session() as sess:
|
||||
data = constant_op.constant(data_list, dtype=dtypes.float32)
|
||||
indices = constant_op.constant(indices_list, dtype=dtypes.int32)
|
||||
partitions = data_flow_ops.dynamic_partition(
|
||||
@ -282,7 +282,7 @@ class DynamicPartitionTest(test.TestCase):
|
||||
|
||||
data_list = [1.1, 2.1, 3.1, 4.1, 5.1, 6.1]
|
||||
indices_list = [90, 70, 60, 100, 110, 40]
|
||||
with self.session(use_gpu=True) as sess:
|
||||
with self.session() as sess:
|
||||
data = constant_op.constant(data_list, dtype=dtypes.float32)
|
||||
indices = constant_op.constant(indices_list, dtype=dtypes.int32)
|
||||
partitions = data_flow_ops.dynamic_partition(
|
||||
|
@ -55,7 +55,7 @@ class EigTest(test.TestCase):
|
||||
@test_util.run_deprecated_v1
|
||||
def testConcurrentExecutesWithoutError(self):
|
||||
all_ops = []
|
||||
with self.session(use_gpu=True) as sess:
|
||||
with self.session() as sess:
|
||||
for compute_v_ in True, False:
|
||||
matrix1 = random_ops.random_normal([5, 5], seed=42)
|
||||
matrix2 = random_ops.random_normal([5, 5], seed=42)
|
||||
@ -84,7 +84,7 @@ class EigTest(test.TestCase):
|
||||
"self_adjoint_eig_fail_if_denorms_flushed.txt")).astype(np.float32)
|
||||
self.assertEqual(matrix.shape, (32, 32))
|
||||
matrix_tensor = constant_op.constant(matrix)
|
||||
with self.session(use_gpu=True) as _:
|
||||
with self.session() as _:
|
||||
(e, v) = self.evaluate(linalg_ops.self_adjoint_eig(matrix_tensor))
|
||||
self.assertEqual(e.size, 32)
|
||||
self.assertAllClose(
|
||||
@ -166,7 +166,7 @@ def _GetEigTest(dtype_, shape_, compute_v_):
|
||||
|
||||
a = RandomInput()
|
||||
np_e, np_v = np.linalg.eig(a)
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
if compute_v_:
|
||||
tf_e, tf_v = linalg_ops.eig(constant_op.constant(a))
|
||||
|
||||
@ -222,7 +222,7 @@ def _GetEigGradTest(dtype_, shape_, compute_v_):
|
||||
tol = 1e-2
|
||||
else:
|
||||
tol = 1e-7
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
|
||||
def Compute(x):
|
||||
e, v = linalg_ops.eig(x)
|
||||
|
@ -1048,7 +1048,7 @@ class DynamicStitchOpTest(test.TestCase):
|
||||
|
||||
@test_util.run_deprecated_v1
|
||||
def testCint32Gpu(self):
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
indices = [
|
||||
ops.convert_to_tensor([0, 1, 2]),
|
||||
ops.convert_to_tensor([2, 3])
|
||||
@ -1076,7 +1076,7 @@ class DynamicStitchOpTest(test.TestCase):
|
||||
|
||||
@test_util.run_deprecated_v1
|
||||
def testInt32Gpu(self):
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
indices = [
|
||||
ops.convert_to_tensor([0, 1, 2]),
|
||||
ops.convert_to_tensor([2, 3])
|
||||
|
@ -340,7 +340,7 @@ class FunctionalOpsTest(test.TestCase):
|
||||
lambda elem_, input_: (a, b), elems, initializer=(0., 0.))
|
||||
loss = l0 + array_ops.stop_gradient(l1)
|
||||
grad = gradients_impl.gradients(ys=[loss], xs=[a, b])
|
||||
with self.test_session(use_gpu=True) as sess:
|
||||
with self.test_session() as sess:
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
self.evaluate(grad)
|
||||
|
||||
@ -933,7 +933,7 @@ class FunctionalOpsTest(test.TestCase):
|
||||
def ReturnsTooManyArgs(unused_i, v):
|
||||
return v, v
|
||||
|
||||
with self.test_session(use_gpu=True):
|
||||
with self.test_session():
|
||||
with self.assertRaisesRegex(errors.InvalidArgumentError,
|
||||
"must be a scalar"):
|
||||
functional_ops.For([0], 10, 1, [0.0], Foo)[0].eval()
|
||||
|
@ -39,7 +39,7 @@ from tensorflow.python.platform import test
|
||||
class GatherNdTest(test.TestCase):
|
||||
|
||||
def _testSimpleDtype(self, dtype):
|
||||
with self.cached_session(use_gpu=True):
|
||||
with self.cached_session():
|
||||
params = constant_op.constant(np.array([8, 1, 2, 3, 7, 5], dtype=dtype))
|
||||
indices = constant_op.constant([[4], [4], [0]])
|
||||
gather_nd_t = array_ops.gather_nd(params, indices)
|
||||
@ -60,7 +60,7 @@ class GatherNdTest(test.TestCase):
|
||||
@test_util.run_deprecated_v1
|
||||
@test_util.disable_xla("b/123337890") # Error messages differ
|
||||
def testEmptyIndicesAndParamsOKButJustEmptyParamsFails(self):
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
params = np.ones((3, 3), dtype=np.float32)
|
||||
|
||||
indices_empty = np.empty((0, 2), dtype=np.int32)
|
||||
@ -91,7 +91,7 @@ class GatherNdTest(test.TestCase):
|
||||
self.assertAllClose(np.empty((0,), dtype=np.float32), gather_nd_ok_val)
|
||||
|
||||
def testIndexScalar(self):
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
params = np.array(
|
||||
[[-8, -1, -2, -3, -7, -5], [8, 1, 2, 3, 7, 5]], dtype=np.float32).T
|
||||
indices = constant_op.constant([4, 1])
|
||||
@ -101,7 +101,7 @@ class GatherNdTest(test.TestCase):
|
||||
self.assertAllEqual(np.array(7), gather_nd_val)
|
||||
|
||||
def testParamsRankLargerThanIndexIndexScalarSlices(self):
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
params = np.array(
|
||||
[[-8, -1, -2, -3, -7, -5], [8, 1, 2, 3, 7, 5]], dtype=np.float32).T
|
||||
indices = constant_op.constant([4])
|
||||
@ -111,7 +111,7 @@ class GatherNdTest(test.TestCase):
|
||||
self.assertAllEqual(np.array([-7, 7]), gather_nd_val)
|
||||
|
||||
def testParamsRankLargerThanIndexSlices(self):
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
params = np.array(
|
||||
[[-8, -1, -2, -3, -7, -5], [8, 1, 2, 3, 7, 5]], dtype=np.float32).T
|
||||
indices = constant_op.constant([[4], [4], [0]])
|
||||
@ -122,7 +122,7 @@ class GatherNdTest(test.TestCase):
|
||||
self.assertAllEqual(np.array([[-7, 7], [-7, 7], [-8, 8]]), gather_nd_val)
|
||||
|
||||
def testHigherRankParamsLargerThanIndexSlices(self):
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
params = np.array(
|
||||
[[[-8, -1, -2, -3, -7, -5], [8, 1, 2, 3, 7, 5]],
|
||||
[[-80, -10, -20, -30, -70, -50], [80, 10, 20, 30, 70, 50]]],
|
||||
@ -136,7 +136,7 @@ class GatherNdTest(test.TestCase):
|
||||
self.assertAllEqual(params[[4, 4, 0]], gather_nd_val)
|
||||
|
||||
def testEmptyIndicesLastRankMeansCopyEntireTensor(self):
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
params = np.array(
|
||||
[[[-8, -1, -2, -3, -7, -5], [8, 1, 2, 3, 7, 5]],
|
||||
[[-80, -10, -20, -30, -70, -50], [80, 10, 20, 30, 70, 50]]],
|
||||
@ -153,7 +153,7 @@ class GatherNdTest(test.TestCase):
|
||||
gather_nd_val)
|
||||
|
||||
def testHigherRankParamsAndIndicesLargerThanIndexSlices(self):
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
params = np.array(
|
||||
[[[-8, -1, -2, -3, -7, -5], [8, 1, 2, 3, 7, 5]],
|
||||
[[-80, -10, -20, -30, -70, -50], [80, 10, 20, 30, 70, 50]]],
|
||||
@ -168,7 +168,7 @@ class GatherNdTest(test.TestCase):
|
||||
gather_nd_val)
|
||||
|
||||
def testHigherRankParams(self):
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
shape = (10, 20, 5, 1, 17)
|
||||
params = np.random.rand(*shape)
|
||||
indices = np.vstack([np.random.randint(0, s, size=2000) for s in shape]).T
|
||||
@ -180,7 +180,7 @@ class GatherNdTest(test.TestCase):
|
||||
self.assertEqual([2000], gather_nd_t.get_shape())
|
||||
|
||||
def testHigherRankParamsAndIndices(self):
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
shape = (10, 20, 5, 1, 17)
|
||||
params = np.random.rand(*shape)
|
||||
indices = np.vstack([np.random.randint(0, s, size=2000) for s in shape]).T
|
||||
@ -220,7 +220,7 @@ class GatherNdTest(test.TestCase):
|
||||
# On GPU the bad indices do not raise error but fetch 0 values
|
||||
if not test.is_gpu_available():
|
||||
return
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
params = [0, 1, 2]
|
||||
indices = [[[0], [7]]] # Make this one higher rank
|
||||
gather_nd = array_ops.gather_nd(params, indices)
|
||||
@ -244,7 +244,7 @@ class GatherNdTest(test.TestCase):
|
||||
# On GPU the bad indices do not raise error but fetch 0 values
|
||||
if not test.is_gpu_available():
|
||||
return
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
params = [[0, 1, 2]]
|
||||
indices = [[[0], [0], [1]]] # Make this one higher rank
|
||||
gather_nd = array_ops.gather_nd(params, indices)
|
||||
@ -261,7 +261,7 @@ class GatherNdTest(test.TestCase):
|
||||
grad_vals = constant_op.constant([1, 2], dtype=dtypes.float64)
|
||||
grads = gradients_impl.gradients([outputs], [inputs], [grad_vals])[0]
|
||||
expected_grads = np.array([[1, 0], [0, 2]], dtype=np.float64)
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
assert np.array_equal(expected_grads, self.evaluate(grads))
|
||||
|
||||
@test_util.run_deprecated_v1
|
||||
@ -273,7 +273,7 @@ class GatherNdTest(test.TestCase):
|
||||
grad_vals = constant_op.constant([[1, 2], [3, 4]], dtype=dtypes.float64)
|
||||
grads = gradients_impl.gradients([outputs], [inputs], [grad_vals])[0]
|
||||
expected_grads = np.array([[3, 4], [1, 2]], dtype=np.float64)
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
self.assertIndexedSlices(grads)
|
||||
self.assertAllEqual(expected_grads, ops.convert_to_tensor(grads))
|
||||
|
||||
@ -290,7 +290,7 @@ class GatherNdTest(test.TestCase):
|
||||
grads = gradients_impl.gradients([outputs], [inputs], [grad_vals])[0]
|
||||
expected_grads = np.array(
|
||||
[[[5, 6], [1, 2]], [[3, 4], [7, 8]]], dtype=np.float64)
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
self.assertAllEqual(expected_grads, self.evaluate(grads))
|
||||
|
||||
@test_util.run_deprecated_v1
|
||||
@ -320,7 +320,7 @@ class GatherNdTest(test.TestCase):
|
||||
[[[[5, 6], [1, 2]]]],
|
||||
[[[[3, 4], [7, 8]]]]
|
||||
]]], dtype=np.float64)
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
self.assertAllEqual(expected_grads, self.evaluate(grads))
|
||||
|
||||
@test_util.run_deprecated_v1
|
||||
@ -336,7 +336,7 @@ class GatherNdTest(test.TestCase):
|
||||
grads = gradients_impl.gradients([outputs], [inputs], [grad_vals])[0]
|
||||
expected_grads = np.array(
|
||||
[[[5, 6], [1, 2]], [[3, 4], [7, 8]]], dtype=np.float64)
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
self.assertAllEqual(expected_grads, self.evaluate(grads))
|
||||
|
||||
@test_util.run_deprecated_v1
|
||||
@ -358,7 +358,7 @@ class GatherNdTest(test.TestCase):
|
||||
[1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0, 0, 0], [3, 3, 3, 3, 3, 3, 3, 3, 3]],
|
||||
dtype=np.float64)
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
self.assertIndexedSlices(grads)
|
||||
self.assertAllEqual(expected_grads, ops.convert_to_tensor(grads))
|
||||
|
||||
|
@ -29,7 +29,7 @@ class InTopKTest(test.TestCase):
|
||||
|
||||
def _validateInTopK(self, predictions, target, k, expected):
|
||||
np_ans = np.array(expected, np.bool)
|
||||
with self.cached_session(use_gpu=True):
|
||||
with self.cached_session():
|
||||
precision = nn_ops.in_top_k(predictions, target, k)
|
||||
out = self.evaluate(precision)
|
||||
self.assertAllClose(np_ans, out)
|
||||
|
@ -102,7 +102,7 @@ def _init_sampler(tc, init, num):
|
||||
"""
|
||||
|
||||
def func():
|
||||
with tc.test_session(use_gpu=True):
|
||||
with tc.test_session():
|
||||
return init([num]).eval()
|
||||
|
||||
return func
|
||||
@ -112,7 +112,7 @@ class ConstantInitializersTest(test.TestCase):
|
||||
|
||||
@test_util.run_deprecated_v1
|
||||
def testZerosInitializer(self):
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
shape = [2, 3]
|
||||
x = variable_scope.get_variable(
|
||||
"x", shape=shape, initializer=init_ops.zeros_initializer())
|
||||
@ -121,7 +121,7 @@ class ConstantInitializersTest(test.TestCase):
|
||||
|
||||
@test_util.run_deprecated_v1
|
||||
def testOnesInitializer(self):
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
shape = [2, 3]
|
||||
x = variable_scope.get_variable(
|
||||
"x", shape=shape, initializer=init_ops.ones_initializer())
|
||||
@ -130,7 +130,7 @@ class ConstantInitializersTest(test.TestCase):
|
||||
|
||||
@test_util.run_deprecated_v1
|
||||
def testConstantZeroInitializer(self):
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
shape = [2, 3]
|
||||
x = variable_scope.get_variable(
|
||||
"x", shape=shape, initializer=init_ops.constant_initializer(0.0))
|
||||
@ -139,7 +139,7 @@ class ConstantInitializersTest(test.TestCase):
|
||||
|
||||
@test_util.run_deprecated_v1
|
||||
def testConstantOneInitializer(self):
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
shape = [2, 3]
|
||||
x = variable_scope.get_variable(
|
||||
"x", shape=shape, initializer=init_ops.constant_initializer(1.0))
|
||||
@ -148,7 +148,7 @@ class ConstantInitializersTest(test.TestCase):
|
||||
|
||||
@test_util.run_deprecated_v1
|
||||
def testConstantIntInitializer(self):
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
shape = [2, 3]
|
||||
x = variable_scope.get_variable(
|
||||
"x",
|
||||
@ -161,7 +161,7 @@ class ConstantInitializersTest(test.TestCase):
|
||||
|
||||
@test_util.run_deprecated_v1
|
||||
def testConstantTupleInitializer(self):
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
shape = [3]
|
||||
x = variable_scope.get_variable(
|
||||
"x",
|
||||
@ -173,7 +173,7 @@ class ConstantInitializersTest(test.TestCase):
|
||||
self.assertAllEqual(x, [10, 20, 30])
|
||||
|
||||
def _testNDimConstantInitializer(self, name, value, shape, expected):
|
||||
with self.cached_session(use_gpu=True):
|
||||
with self.cached_session():
|
||||
init = init_ops.constant_initializer(value, dtype=dtypes.int32)
|
||||
x = variable_scope.get_variable(name, shape=shape, initializer=init)
|
||||
self.evaluate(x.initializer)
|
||||
@ -198,7 +198,7 @@ class ConstantInitializersTest(test.TestCase):
|
||||
|
||||
def _testNDimConstantInitializerLessValues(self, name, value, shape,
|
||||
expected):
|
||||
with self.cached_session(use_gpu=True):
|
||||
with self.cached_session():
|
||||
init = init_ops.constant_initializer(value, dtype=dtypes.int32)
|
||||
x = variable_scope.get_variable(name, shape=shape, initializer=init)
|
||||
self.evaluate(x.initializer)
|
||||
@ -225,7 +225,7 @@ class ConstantInitializersTest(test.TestCase):
|
||||
|
||||
def _testNDimConstantInitializerMoreValues(self, value, shape):
|
||||
ops.reset_default_graph()
|
||||
with self.cached_session(use_gpu=True):
|
||||
with self.cached_session():
|
||||
init = init_ops.constant_initializer(value, dtype=dtypes.int32)
|
||||
self.assertRaises(
|
||||
ValueError,
|
||||
@ -398,7 +398,7 @@ class VarianceScalingInitializationTest(test.TestCase):
|
||||
init = init_ops.variance_scaling_initializer(
|
||||
distribution="truncated_normal")
|
||||
|
||||
with self.session(use_gpu=True), \
|
||||
with self.session(), \
|
||||
test.mock.patch.object(
|
||||
random_ops, "truncated_normal", wraps=random_ops.truncated_normal) \
|
||||
as mock_truncated_normal:
|
||||
@ -415,7 +415,7 @@ class VarianceScalingInitializationTest(test.TestCase):
|
||||
expect_var = 1. / shape[0]
|
||||
init = init_ops.variance_scaling_initializer(distribution="normal")
|
||||
|
||||
with self.session(use_gpu=True), \
|
||||
with self.session(), \
|
||||
test.mock.patch.object(
|
||||
random_ops, "truncated_normal", wraps=random_ops.truncated_normal) \
|
||||
as mock_truncated_normal:
|
||||
@ -433,7 +433,7 @@ class VarianceScalingInitializationTest(test.TestCase):
|
||||
init = init_ops.variance_scaling_initializer(
|
||||
distribution="untruncated_normal")
|
||||
|
||||
with self.session(use_gpu=True), \
|
||||
with self.session(), \
|
||||
test.mock.patch.object(
|
||||
random_ops, "random_normal", wraps=random_ops.random_normal) \
|
||||
as mock_random_normal:
|
||||
@ -450,7 +450,7 @@ class VarianceScalingInitializationTest(test.TestCase):
|
||||
expect_var = 1. / shape[0]
|
||||
init = init_ops.variance_scaling_initializer(distribution="uniform")
|
||||
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
x = init(shape).eval()
|
||||
|
||||
self.assertNear(np.mean(x), expect_mean, err=1e-2)
|
||||
@ -461,7 +461,7 @@ class VarianceScalingInitializationTest(test.TestCase):
|
||||
class RangeTest(test.TestCase):
|
||||
|
||||
def _Range(self, start, limit, delta):
|
||||
with self.cached_session(use_gpu=True):
|
||||
with self.cached_session():
|
||||
tf_ans = math_ops.range(start, limit, delta, name="range")
|
||||
self.assertEqual([len(np.arange(start, limit, delta))],
|
||||
tf_ans.get_shape())
|
||||
@ -481,7 +481,7 @@ class RangeTest(test.TestCase):
|
||||
|
||||
@test_util.run_deprecated_v1
|
||||
def testLimitOnly(self):
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
self.assertAllEqual(np.arange(5), math_ops.range(5))
|
||||
|
||||
def testEmpty(self):
|
||||
@ -910,7 +910,7 @@ class ConvolutionDeltaOrthogonalInitializerTest(test.TestCase):
|
||||
outputs_2norm = linalg_ops.norm(outputs)
|
||||
ratio = outputs_2norm / inputs_2norm
|
||||
my_ops = variables.global_variables_initializer()
|
||||
with self.session(use_gpu=True) as sess:
|
||||
with self.session() as sess:
|
||||
self.evaluate(my_ops)
|
||||
# Check the shape of the outputs
|
||||
t = self.evaluate(outputs)
|
||||
@ -925,7 +925,7 @@ class ConvolutionDeltaOrthogonalInitializerTest(test.TestCase):
|
||||
shape = [3, 3, 10, 10]
|
||||
count = 70
|
||||
tol = 1e-5
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
for i in range(count):
|
||||
x = variable_scope.get_variable(
|
||||
"{}".format(i),
|
||||
@ -996,7 +996,7 @@ class ConvolutionOrthogonal1dInitializerTest(test.TestCase):
|
||||
shape = [3, 10, 10]
|
||||
count = 70
|
||||
tol = 1e-5
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
for i in range(count):
|
||||
x = variable_scope.get_variable(
|
||||
"{}".format(i),
|
||||
@ -1063,7 +1063,7 @@ class ConvolutionOrthogonal1dInitializerTest(test.TestCase):
|
||||
outputs_2norm = linalg_ops.norm(outputs)
|
||||
ratio = outputs_2norm / inputs_2norm
|
||||
my_ops = variables.global_variables_initializer()
|
||||
with self.session(use_gpu=True) as sess:
|
||||
with self.session() as sess:
|
||||
self.evaluate(my_ops)
|
||||
# Check the shape of the outputs
|
||||
t = self.evaluate(outputs)
|
||||
@ -1167,7 +1167,7 @@ class ConvolutionOrthogonal2dInitializerTest(test.TestCase):
|
||||
outputs_2norm = linalg_ops.norm(outputs)
|
||||
ratio = outputs_2norm / inputs_2norm
|
||||
my_ops = variables.global_variables_initializer()
|
||||
with self.session(use_gpu=True) as sess:
|
||||
with self.session() as sess:
|
||||
self.evaluate(my_ops)
|
||||
# Check the shape of the outputs
|
||||
t = self.evaluate(outputs)
|
||||
@ -1227,7 +1227,7 @@ class ConvolutionOrthogonal3dInitializerTest(test.TestCase):
|
||||
shape = [3, 3, 3, 5, 5]
|
||||
count = 20
|
||||
tol = 1e-5
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
for i in range(count):
|
||||
x = variable_scope.get_variable(
|
||||
"{}".format(i),
|
||||
@ -1302,7 +1302,7 @@ class ConvolutionOrthogonal3dInitializerTest(test.TestCase):
|
||||
outputs_2norm = linalg_ops.norm(outputs)
|
||||
ratio = outputs_2norm / inputs_2norm
|
||||
my_ops = variables.global_variables_initializer()
|
||||
with self.cached_session(use_gpu=True) as sess:
|
||||
with self.cached_session() as sess:
|
||||
self.evaluate(my_ops)
|
||||
# Check the shape of the outputs
|
||||
t = self.evaluate(outputs)
|
||||
|
@ -78,7 +78,7 @@ class CSRSparseMatrixDenseMatMulGradTest(test.TestCase):
|
||||
b_mats_val = np.transpose(b_mats_val, (0, 2, 1))
|
||||
if adjoint_b:
|
||||
b_mats_val = np.conj(b_mats_val)
|
||||
with self.test_session(use_gpu=True):
|
||||
with self.test_session():
|
||||
a_mats = ops.convert_to_tensor(a_mats_val, dtype=datatype)
|
||||
b_mats = ops.convert_to_tensor(b_mats_val, dtype=datatype)
|
||||
a_sm = dense_to_csr_sparse_matrix(a_mats)
|
||||
|
@ -64,7 +64,7 @@ class CSRSparseMatrixGradTest(test.TestCase):
|
||||
sparsify = lambda m: m * (m > 0)
|
||||
for dense_shape in ([53, 65, 127], [127, 65]):
|
||||
mats_val = sparsify(np.random.randn(*dense_shape))
|
||||
with self.test_session(use_gpu=True) as sess:
|
||||
with self.test_session() as sess:
|
||||
mats = math_ops.cast(mats_val, dtype=dtypes.float32)
|
||||
sparse_mats = dense_to_csr_sparse_matrix(mats)
|
||||
dense_mats = sparse_csr_matrix_ops.csr_sparse_matrix_to_dense(
|
||||
@ -96,7 +96,7 @@ class CSRSparseMatrixGradTest(test.TestCase):
|
||||
grad_vals = np.random.randn(*dense_shape).astype(np.float32)
|
||||
expected_a_grad = alpha * grad_vals
|
||||
expected_b_grad = beta * grad_vals
|
||||
with self.test_session(use_gpu=True) as sess:
|
||||
with self.test_session() as sess:
|
||||
a_mats = math_ops.cast(a_mats_val, dtype=dtypes.float32)
|
||||
b_mats = math_ops.cast(b_mats_val, dtype=dtypes.float32)
|
||||
a_sm = dense_to_csr_sparse_matrix(a_mats)
|
||||
|
@ -79,7 +79,7 @@ class CSRSparseMatrixGradTest(test.TestCase):
|
||||
b_mats_val = np.transpose(b_mats_val, (0, 2, 1))
|
||||
if adjoint_b:
|
||||
b_mats_val = np.conj(b_mats_val)
|
||||
with self.test_session(use_gpu=True):
|
||||
with self.test_session():
|
||||
a_mats = ops.convert_to_tensor(a_mats_val, dtype=datatype)
|
||||
b_mats = ops.convert_to_tensor(b_mats_val, dtype=datatype)
|
||||
a_sm = dense_to_csr_sparse_matrix(a_mats)
|
||||
|
@ -59,7 +59,7 @@ class CholeskySolveTest(test.TestCase):
|
||||
def test_works_with_five_different_random_pos_def_matrices(self):
|
||||
for n in range(1, 6):
|
||||
for np_type, atol in [(np.float32, 0.05), (np.float64, 1e-5)]:
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
# Create 2 x n x n matrix
|
||||
array = np.array(
|
||||
[_RandomPDMatrix(n, self.rng),
|
||||
@ -85,7 +85,7 @@ class LogdetTest(test.TestCase):
|
||||
with self.subTest(n=n, np_dtype=np_dtype, atol=atol):
|
||||
matrix = _RandomPDMatrix(n, self.rng, np_dtype)
|
||||
_, logdet_np = np.linalg.slogdet(matrix)
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
# Create 2 x n x n matrix
|
||||
# matrix = np.array(
|
||||
# [_RandomPDMatrix(n, self.rng, np_dtype),
|
||||
@ -99,7 +99,7 @@ class LogdetTest(test.TestCase):
|
||||
with self.subTest(np_dtype=np_dtype, atol=atol):
|
||||
matrix = (np.eye(20) * 1e-6).astype(np_dtype)
|
||||
_, logdet_np = np.linalg.slogdet(matrix)
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
logdet_tf = linalg.logdet(matrix)
|
||||
self.assertAllClose(logdet_np, self.evaluate(logdet_tf), atol=atol)
|
||||
|
||||
@ -117,7 +117,7 @@ class SlogdetTest(test.TestCase):
|
||||
with self.subTest(n=n, np_dtype=np_dtype, atol=atol):
|
||||
matrix = _RandomPDMatrix(n, self.rng, np_dtype)
|
||||
sign_np, log_abs_det_np = np.linalg.slogdet(matrix)
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
sign_tf, log_abs_det_tf = linalg.slogdet(matrix)
|
||||
self.assertAllClose(
|
||||
log_abs_det_np, self.evaluate(log_abs_det_tf), atol=atol)
|
||||
@ -129,7 +129,7 @@ class SlogdetTest(test.TestCase):
|
||||
with self.subTest(np_dtype=np_dtype, atol=atol):
|
||||
matrix = (np.eye(20) * 1e-6).astype(np_dtype)
|
||||
sign_np, log_abs_det_np = np.linalg.slogdet(matrix)
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
sign_tf, log_abs_det_tf = linalg.slogdet(matrix)
|
||||
self.assertAllClose(
|
||||
log_abs_det_np, self.evaluate(log_abs_det_tf), atol=atol)
|
||||
@ -259,7 +259,7 @@ class EyeTest(parameterized.TestCase, test.TestCase):
|
||||
num_columns=num_columns_placeholder,
|
||||
batch_shape=batch_shape_placeholder,
|
||||
dtype=dtype)
|
||||
with self.session(use_gpu=True) as sess:
|
||||
with self.session() as sess:
|
||||
eye_tf = sess.run(
|
||||
eye,
|
||||
feed_dict={
|
||||
|
@ -55,7 +55,7 @@ class LRNOpTest(test.TestCase):
|
||||
return output
|
||||
|
||||
def _RunAndVerify(self, dtype):
|
||||
with self.cached_session(use_gpu=True):
|
||||
with self.cached_session():
|
||||
# random shape
|
||||
shape = np.random.randint(1, 16, size=4)
|
||||
# Make depth at least 2 to make it meaningful
|
||||
@ -103,7 +103,7 @@ class LRNOpTest(test.TestCase):
|
||||
|
||||
@test_util.run_deprecated_v1
|
||||
def testGradientsZeroInput(self):
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
shape = [4, 4, 4, 4]
|
||||
p = array_ops.placeholder(dtypes.float32, shape=shape)
|
||||
inp_array = np.zeros(shape).astype("f")
|
||||
@ -116,7 +116,7 @@ class LRNOpTest(test.TestCase):
|
||||
self.assertShapeEqual(expected, grad)
|
||||
|
||||
def _RunAndVerifyGradients(self, dtype):
|
||||
with self.cached_session(use_gpu=True):
|
||||
with self.cached_session():
|
||||
# random shape
|
||||
shape = np.random.randint(1, 5, size=4)
|
||||
# Make depth at least 2 to make it meaningful
|
||||
|
@ -42,12 +42,12 @@ class RollTest(test_util.TensorFlowTestCase):
|
||||
|
||||
def _testRoll(self, np_input, shift, axis):
|
||||
expected_roll = np.roll(np_input, shift, axis)
|
||||
with self.cached_session(use_gpu=True):
|
||||
with self.cached_session():
|
||||
roll = manip_ops.roll(np_input, shift, axis)
|
||||
self.assertAllEqual(roll, expected_roll)
|
||||
|
||||
def _testGradient(self, np_input, shift, axis):
|
||||
with self.cached_session(use_gpu=True):
|
||||
with self.cached_session():
|
||||
inx = constant_op.constant(np_input.tolist())
|
||||
xs = list(np_input.shape)
|
||||
y = manip_ops.roll(inx, shift, axis)
|
||||
@ -98,7 +98,7 @@ class RollTest(test_util.TensorFlowTestCase):
|
||||
self._testAll(np.random.randint(-100, 100, (5)).astype(np.int32), 3, -1)
|
||||
self._testAll(np.random.randint(-100, 100, (4, 4)).astype(np.int32), 3, -2)
|
||||
# Make sure negative axis should be 0 <= axis + dims < dims
|
||||
with self.cached_session(use_gpu=True):
|
||||
with self.cached_session():
|
||||
with self.assertRaisesRegex(errors_impl.InvalidArgumentError,
|
||||
"is out of range"):
|
||||
manip_ops.roll(np.random.randint(-100, 100, (4, 4)).astype(np.int32),
|
||||
@ -122,7 +122,7 @@ class RollTest(test_util.TensorFlowTestCase):
|
||||
tensor = array_ops.placeholder(dtype=dtypes.int32)
|
||||
shift = 1
|
||||
axis = 0
|
||||
with self.cached_session(use_gpu=True):
|
||||
with self.cached_session():
|
||||
with self.assertRaisesRegex(errors_impl.InvalidArgumentError,
|
||||
"input must be 1-D or higher"):
|
||||
manip_ops.roll(tensor, shift, axis).eval(feed_dict={tensor: 7})
|
||||
@ -140,7 +140,7 @@ class RollTest(test_util.TensorFlowTestCase):
|
||||
tensor = [[1, 2], [3, 4]]
|
||||
shift = 1
|
||||
axis = array_ops.placeholder(dtype=dtypes.int32)
|
||||
with self.cached_session(use_gpu=True):
|
||||
with self.cached_session():
|
||||
with self.assertRaisesRegex(errors_impl.InvalidArgumentError,
|
||||
"axis must be a scalar or a 1-D vector"):
|
||||
manip_ops.roll(tensor, shift, axis).eval(feed_dict={axis: [[0, 1]]})
|
||||
@ -158,7 +158,7 @@ class RollTest(test_util.TensorFlowTestCase):
|
||||
tensor = [[1, 2], [3, 4]]
|
||||
shift = array_ops.placeholder(dtype=dtypes.int32)
|
||||
axis = 1
|
||||
with self.cached_session(use_gpu=True):
|
||||
with self.cached_session():
|
||||
with self.assertRaisesRegex(errors_impl.InvalidArgumentError,
|
||||
"shift must be a scalar or a 1-D vector"):
|
||||
manip_ops.roll(tensor, shift, axis).eval(feed_dict={shift: [[0, 1]]})
|
||||
@ -175,7 +175,7 @@ class RollTest(test_util.TensorFlowTestCase):
|
||||
tensor = [[1, 2], [3, 4]]
|
||||
shift = array_ops.placeholder(dtype=dtypes.int32)
|
||||
axis = [0, 1]
|
||||
with self.cached_session(use_gpu=True):
|
||||
with self.cached_session():
|
||||
with self.assertRaisesRegex(errors_impl.InvalidArgumentError,
|
||||
"shift and axis must have the same size"):
|
||||
manip_ops.roll(tensor, shift, axis).eval(feed_dict={shift: [1]})
|
||||
@ -184,7 +184,7 @@ class RollTest(test_util.TensorFlowTestCase):
|
||||
tensor = [1, 2]
|
||||
shift = 1
|
||||
axis = 1
|
||||
with self.cached_session(use_gpu=True):
|
||||
with self.cached_session():
|
||||
with self.assertRaisesRegex(errors_impl.InvalidArgumentError,
|
||||
"is out of range"):
|
||||
manip_ops.roll(tensor, shift, axis).eval()
|
||||
|
@ -46,7 +46,7 @@ class MapStageTest(test.TestCase):
|
||||
|
||||
G.finalize()
|
||||
|
||||
with self.session(use_gpu=True, graph=G) as sess:
|
||||
with self.session(graph=G) as sess:
|
||||
sess.run(stage, feed_dict={x: -1, pi: 0})
|
||||
for i in range(10):
|
||||
_, yval = sess.run([stage, y], feed_dict={x: i, pi: i + 1, gi: i})
|
||||
@ -68,7 +68,7 @@ class MapStageTest(test.TestCase):
|
||||
|
||||
G.finalize()
|
||||
|
||||
with self.session(use_gpu=True, graph=G) as sess:
|
||||
with self.session(graph=G) as sess:
|
||||
sess.run(stage, feed_dict={x: -1, pi: 0})
|
||||
for i in range(10):
|
||||
_, yval = sess.run([stage, y], feed_dict={x: i, pi: i + 1, gi: i})
|
||||
@ -96,7 +96,7 @@ class MapStageTest(test.TestCase):
|
||||
|
||||
G.finalize()
|
||||
|
||||
with self.session(use_gpu=True, graph=G) as sess:
|
||||
with self.session(graph=G) as sess:
|
||||
sess.run(stage, feed_dict={x: -1, pi: 0})
|
||||
for i in range(10):
|
||||
_, yval = sess.run([stage, y], feed_dict={x: i, pi: i + 1, gi: i})
|
||||
@ -146,7 +146,7 @@ class MapStageTest(test.TestCase):
|
||||
|
||||
n = 10
|
||||
|
||||
with self.session(use_gpu=True, graph=G) as sess:
|
||||
with self.session(graph=G) as sess:
|
||||
for i in range(n):
|
||||
sess.run(stage, feed_dict={x: i, pi: i})
|
||||
|
||||
@ -174,7 +174,7 @@ class MapStageTest(test.TestCase):
|
||||
|
||||
G.finalize()
|
||||
|
||||
with self.session(use_gpu=True, graph=G) as sess:
|
||||
with self.session(graph=G) as sess:
|
||||
sess.run(stage, feed_dict={x: -1, pi: 3})
|
||||
self.assertEqual(sess.run(size), 1)
|
||||
sess.run(stage, feed_dict={x: -1, pi: 1})
|
||||
@ -209,7 +209,7 @@ class MapStageTest(test.TestCase):
|
||||
queue = Queue.Queue()
|
||||
n = 8
|
||||
|
||||
with self.session(use_gpu=True, graph=G) as sess:
|
||||
with self.session(graph=G) as sess:
|
||||
# Stage data in a separate thread which will block
|
||||
# when it hits the staging area's capacity and thus
|
||||
# not fill the queue with n tokens
|
||||
@ -273,7 +273,7 @@ class MapStageTest(test.TestCase):
|
||||
queue = Queue.Queue()
|
||||
n = 8
|
||||
|
||||
with self.session(use_gpu=True, graph=G) as sess:
|
||||
with self.session(graph=G) as sess:
|
||||
# Stage data in a separate thread which will block
|
||||
# when it hits the staging area's capacity and thus
|
||||
# not fill the queue with n tokens
|
||||
@ -334,7 +334,7 @@ class MapStageTest(test.TestCase):
|
||||
|
||||
n = 10
|
||||
|
||||
with self.session(use_gpu=True, graph=G) as sess:
|
||||
with self.session(graph=G) as sess:
|
||||
# Keys n-1..0
|
||||
keys = list(reversed(six.moves.range(n)))
|
||||
|
||||
@ -372,7 +372,7 @@ class MapStageTest(test.TestCase):
|
||||
|
||||
G.finalize()
|
||||
|
||||
with self.session(use_gpu=True, graph=G) as sess:
|
||||
with self.session(graph=G) as sess:
|
||||
# 0 complete and incomplete entries
|
||||
self.assertTrue(sess.run([size, isize]) == [0, 0])
|
||||
# Stage key 0, x and f tuple entries
|
||||
@ -430,7 +430,7 @@ class MapStageTest(test.TestCase):
|
||||
|
||||
G.finalize()
|
||||
|
||||
with self.session(use_gpu=True, graph=G) as sess:
|
||||
with self.session(graph=G) as sess:
|
||||
# 0 complete and incomplete entries
|
||||
self.assertTrue(sess.run([size, isize]) == [0, 0])
|
||||
# Stage key 0, x and f tuple entries
|
||||
@ -482,7 +482,7 @@ class MapStageTest(test.TestCase):
|
||||
|
||||
G.finalize()
|
||||
|
||||
with self.session(use_gpu=True, graph=G) as sess:
|
||||
with self.session(graph=G) as sess:
|
||||
# 0 complete and incomplete entries
|
||||
self.assertTrue(sess.run([size, isize]) == [0, 0])
|
||||
# Stage key 0, x and f tuple entries
|
||||
@ -574,7 +574,7 @@ class MapStageTest(test.TestCase):
|
||||
|
||||
G.finalize()
|
||||
|
||||
with self.session(use_gpu=True, graph=G) as sess:
|
||||
with self.session(graph=G) as sess:
|
||||
# Stage complete tuple
|
||||
sess.run(stage_xvf, feed_dict={pi: 0, x: 1, f: 2, v: 3})
|
||||
|
||||
|
@ -149,7 +149,7 @@ class ExponentialOpTest(test.TestCase):
|
||||
|
||||
@test_util.run_deprecated_v1
|
||||
def testDynamic(self):
|
||||
with self.session(use_gpu=True) as sess:
|
||||
with self.session() as sess:
|
||||
inp = array_ops.placeholder(ops.dtypes.float32)
|
||||
expm = linalg_impl.matrix_exponential(inp)
|
||||
matrix = np.array([[1., 2.], [3., 4.]])
|
||||
@ -157,7 +157,7 @@ class ExponentialOpTest(test.TestCase):
|
||||
|
||||
@test_util.run_deprecated_v1
|
||||
def testConcurrentExecutesWithoutError(self):
|
||||
with self.session(use_gpu=True) as sess:
|
||||
with self.session() as sess:
|
||||
matrix1 = random_ops.random_normal([5, 5], seed=42)
|
||||
matrix2 = random_ops.random_normal([5, 5], seed=42)
|
||||
expm1 = linalg_impl.matrix_exponential(matrix1)
|
||||
|
@ -37,7 +37,7 @@ class InverseOpTest(test.TestCase):
|
||||
def _verifyInverse(self, x, np_type):
|
||||
for adjoint in False, True:
|
||||
y = x.astype(np_type)
|
||||
with self.cached_session(use_gpu=True):
|
||||
with self.cached_session():
|
||||
# Verify that x^{-1} * x == Identity matrix.
|
||||
inv = linalg_ops.matrix_inverse(y, adjoint=adjoint)
|
||||
tf_ans = test_util.matmul_without_tf32(inv, y, adjoint_b=adjoint)
|
||||
@ -139,7 +139,7 @@ class InverseOpTest(test.TestCase):
|
||||
|
||||
@test_util.deprecated_graph_mode_only
|
||||
def testConcurrentExecutesWithoutError(self):
|
||||
with self.session(use_gpu=True) as sess:
|
||||
with self.session() as sess:
|
||||
all_ops = []
|
||||
for adjoint_ in True, False:
|
||||
matrix1 = random_ops.random_normal([5, 5], seed=42)
|
||||
|
@ -124,7 +124,7 @@ class MatrixSolveLsOpTest(test_lib.TestCase):
|
||||
feed_dict = None
|
||||
self.assertEqual(np_ans.shape, tf_ans.get_shape())
|
||||
if feed_dict:
|
||||
with self.session(use_gpu=True) as sess:
|
||||
with self.session() as sess:
|
||||
tf_ans_val = sess.run(tf_ans, feed_dict=feed_dict)
|
||||
else:
|
||||
tf_ans_val = self.evaluate(tf_ans)
|
||||
@ -137,7 +137,7 @@ class MatrixSolveLsOpTest(test_lib.TestCase):
|
||||
tf_r = math_ops.matmul(a, tf_r, adjoint_a=True)
|
||||
tf_r_norm = linalg_ops.norm(tf_r, ord="fro", axis=[-2, -1])
|
||||
if feed_dict:
|
||||
with self.session(use_gpu=True) as sess:
|
||||
with self.session() as sess:
|
||||
tf_ans_val, tf_r_norm_val = sess.run([tf_ans, tf_r_norm],
|
||||
feed_dict=feed_dict)
|
||||
else:
|
||||
@ -147,7 +147,7 @@ class MatrixSolveLsOpTest(test_lib.TestCase):
|
||||
@test_util.run_in_graph_and_eager_modes(use_gpu=True)
|
||||
def testWrongDimensions(self):
|
||||
# The matrix and right-hand sides should have the same number of rows.
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
matrix = constant_op.constant([[1., 0.], [0., 1.]])
|
||||
rhs = constant_op.constant([[1., 0.]])
|
||||
with self.assertRaises((ValueError, errors_impl.InvalidArgumentError)):
|
||||
|
@ -63,7 +63,7 @@ class MatrixSolveOpTest(test.TestCase):
|
||||
a_ph = array_ops.placeholder(dtypes.as_dtype(np_type))
|
||||
b_ph = array_ops.placeholder(dtypes.as_dtype(np_type))
|
||||
tf_ans = linalg_ops.matrix_solve(a_ph, b_ph, adjoint=adjoint)
|
||||
with self.cached_session(use_gpu=True) as sess:
|
||||
with self.cached_session() as sess:
|
||||
out = sess.run(tf_ans, {a_ph: a, b_ph: b})
|
||||
else:
|
||||
tf_ans = linalg_ops.matrix_solve(a, b, adjoint=adjoint)
|
||||
|
@ -195,7 +195,7 @@ class MatrixTriangularSolveOpTest(test.TestCase):
|
||||
def testNonSquareMatrix(self):
|
||||
# A non-square matrix should cause an error.
|
||||
matrix = np.array([[1., 2., 3.], [3., 4., 5.]])
|
||||
with self.cached_session(use_gpu=True):
|
||||
with self.cached_session():
|
||||
with self.assertRaises(ValueError):
|
||||
self._verifySolve(matrix, matrix)
|
||||
with self.assertRaises(ValueError):
|
||||
@ -207,7 +207,7 @@ class MatrixTriangularSolveOpTest(test.TestCase):
|
||||
# right-hand sides.
|
||||
matrix = np.array([[1., 0.], [0., 1.]])
|
||||
rhs = np.array([[1., 0.]])
|
||||
with self.cached_session(use_gpu=True):
|
||||
with self.cached_session():
|
||||
with self.assertRaises(ValueError):
|
||||
self._verifySolve(matrix, rhs)
|
||||
with self.assertRaises(ValueError):
|
||||
|
@ -68,7 +68,7 @@ def _GetNormOpTest(dtype_, shape_, ord_, axis_, keep_dims_, use_static_shape_):
|
||||
|
||||
def _CompareNorm(self, matrix):
|
||||
np_norm = np.linalg.norm(matrix, ord=ord_, axis=axis_, keepdims=keep_dims_)
|
||||
with self.cached_session(use_gpu=True) as sess:
|
||||
with self.cached_session() as sess:
|
||||
if use_static_shape_:
|
||||
tf_matrix = constant_op.constant(matrix)
|
||||
tf_norm = linalg_ops.norm(
|
||||
|
@ -372,7 +372,7 @@ class PadOpTest(test.TestCase):
|
||||
for dtype in [dtypes.int32, dtypes.int64]:
|
||||
paddings = np.zeros((0, 2))
|
||||
inp = np.asarray(7)
|
||||
with self.cached_session(use_gpu=True):
|
||||
with self.cached_session():
|
||||
tf_val = array_ops.pad(inp, constant_op.constant(paddings, dtype=dtype))
|
||||
out = self.evaluate(tf_val)
|
||||
self.assertAllEqual(inp, out)
|
||||
@ -397,7 +397,7 @@ class PadOpTest(test.TestCase):
|
||||
padded,
|
||||
[paddings_value[i][0] + inp.shape.dims[i].value for i in range(4)],
|
||||
[-1, -1, -1, -1])
|
||||
with self.cached_session(use_gpu=True):
|
||||
with self.cached_session():
|
||||
self.assertAllEqual(inp, self.evaluate(middle))
|
||||
self.assertAllEqual(
|
||||
np.zeros([row[0] for row in paddings_value]), self.evaluate(left))
|
||||
|
@ -248,7 +248,7 @@ class PoolingTest(test.TestCase):
|
||||
def testPoolNC(self):
|
||||
if test.is_gpu_available(cuda_only=True):
|
||||
# "NC*" format is currently only supported on CUDA.
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
for padding in ["SAME", "VALID"]:
|
||||
self._test(
|
||||
input_shape=[2, 2, 9],
|
||||
|
@ -906,7 +906,7 @@ class PoolingTest(test.TestCase):
|
||||
self._testDepthwiseMaxPoolInvalidConfig([1, 2, 2, 4], [1, 1, 1, 3],
|
||||
[1, 1, 1, 3], "evenly divide")
|
||||
if test.is_gpu_available():
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
t = variables.Variable(np.ones([1, 2, 2, 4]))
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
with self.assertRaisesOpError("for CPU devices"):
|
||||
@ -922,7 +922,7 @@ class PoolingTest(test.TestCase):
|
||||
for dtype in [np.float32, np.float16] \
|
||||
+ [np.float64] if not test.is_built_with_rocm() else []:
|
||||
tensor_input = np.random.rand(*input_shape).astype(dtype)
|
||||
with self.cached_session(use_gpu=True):
|
||||
with self.cached_session():
|
||||
t = constant_op.constant(tensor_input, shape=input_shape)
|
||||
out_op, _ = nn_ops.max_pool_with_argmax(t, ksize, strides, padding)
|
||||
gpu_val = self.evaluate(out_op)
|
||||
@ -942,7 +942,7 @@ class PoolingTest(test.TestCase):
|
||||
# in the input.
|
||||
tensor_input = np.random.random_integers(0, 3, input_shape).astype(dtype)
|
||||
tensor_output = np.random.rand(*output_shape).astype(dtype)
|
||||
with self.cached_session(use_gpu=True):
|
||||
with self.cached_session():
|
||||
t = constant_op.constant(tensor_input, shape=input_shape)
|
||||
_, argmax_op = nn_ops.max_pool_with_argmax(t, ksize, strides, padding)
|
||||
argmax = self.evaluate(argmax_op)
|
||||
|
@ -755,7 +755,7 @@ class EagerPyFuncTest(PyFuncTestBase):
|
||||
y = script_ops.eager_py_func(func=f, inp=[x], Tout=dtypes.float32)
|
||||
z = script_ops.eager_py_func(func=g, inp=[y], Tout=dtypes.float32)
|
||||
|
||||
with self.session(use_gpu=True) as sess:
|
||||
with self.session() as sess:
|
||||
output = sess.run(z, feed_dict={x: 3.0})
|
||||
self.assertEqual(output, 18.0)
|
||||
|
||||
|
@ -145,7 +145,7 @@ def _GetQrOpTest(dtype_, shape_, full_matrices_, use_static_shape_):
|
||||
if use_static_shape_:
|
||||
q_tf_val, r_tf_val = self.evaluate([q_tf, r_tf])
|
||||
else:
|
||||
with self.session(use_gpu=True) as sess:
|
||||
with self.session() as sess:
|
||||
q_tf_val, r_tf_val = sess.run([q_tf, r_tf], feed_dict={x_tf: x_np})
|
||||
|
||||
q_dims = q_tf_val.shape
|
||||
|
@ -34,7 +34,7 @@ class MultinomialTest(test.TestCase):
|
||||
def testLargeDynamicRange(self):
|
||||
random_seed.set_random_seed(10)
|
||||
counts_by_indices = {}
|
||||
with self.test_session(use_gpu=True) as sess:
|
||||
with self.test_session() as sess:
|
||||
samples = random_ops.multinomial(
|
||||
constant_op.constant([[-30, 0]], dtype=dtypes.float32),
|
||||
num_samples=1000000,
|
||||
@ -52,7 +52,7 @@ class MultinomialTest(test.TestCase):
|
||||
def testLargeDynamicRange2(self):
|
||||
random_seed.set_random_seed(10)
|
||||
counts_by_indices = {}
|
||||
with self.test_session(use_gpu=True) as sess:
|
||||
with self.test_session() as sess:
|
||||
samples = random_ops.multinomial(
|
||||
constant_op.constant([[0, -30]], dtype=dtypes.float32),
|
||||
num_samples=1000000,
|
||||
@ -72,7 +72,7 @@ class MultinomialTest(test.TestCase):
|
||||
random_seed.set_random_seed(10)
|
||||
counts_by_indices = {}
|
||||
# here the cpu undersamples and won't pass this test either
|
||||
with self.test_session(use_gpu=True) as sess:
|
||||
with self.test_session() as sess:
|
||||
samples = random_ops.multinomial(
|
||||
constant_op.constant([[0, -17]], dtype=dtypes.float32),
|
||||
num_samples=1000000,
|
||||
|
@ -129,7 +129,7 @@ class ParameterizedTruncatedNormalTest(test.TestCase):
|
||||
# TruncatedNormalMoments requires scipy.stats.
|
||||
# Give up early if we are unable to import it.
|
||||
random_seed.set_random_seed(seed)
|
||||
with self.cached_session(use_gpu=True):
|
||||
with self.cached_session():
|
||||
if use_stateless:
|
||||
# Generate a seed that stateless ops can use.
|
||||
new_seed = random_ops.random_uniform([2],
|
||||
@ -163,7 +163,7 @@ class ParameterizedTruncatedNormalTest(test.TestCase):
|
||||
try:
|
||||
import scipy.stats # pylint: disable=g-import-not-at-top
|
||||
random_seed.set_random_seed(seed)
|
||||
with self.cached_session(use_gpu=True):
|
||||
with self.cached_session():
|
||||
if use_stateless:
|
||||
new_seed = random_ops.random_uniform([2],
|
||||
seed=seed,
|
||||
@ -298,7 +298,7 @@ class ParameterizedTruncatedNormalTest(test.TestCase):
|
||||
minvals=-1.,
|
||||
maxvals=1.)
|
||||
|
||||
with self.session(use_gpu=True) as sess:
|
||||
with self.session() as sess:
|
||||
samples, samples_stateless = sess.run([sample_op, sample_op_stateless])
|
||||
# 0. is more than 16 standard deviations from the mean, and
|
||||
# should have a likelihood < 1e-57.
|
||||
@ -313,7 +313,7 @@ class ParameterizedTruncatedNormalTest(test.TestCase):
|
||||
minval = variables.Variable(-1.)
|
||||
maxval = variables.Variable(1.)
|
||||
|
||||
with self.cached_session(use_gpu=True) as sess:
|
||||
with self.cached_session() as sess:
|
||||
with backprop.GradientTape(persistent=True) as tape:
|
||||
samples = stateless.stateless_parameterized_truncated_normal(
|
||||
[1], [1, 2], mean, stddev, minval, maxval)
|
||||
|
@ -230,7 +230,7 @@ class TruncatedNormalTest(test.TestCase):
|
||||
|
||||
@test_util.run_deprecated_v1
|
||||
def testLargeShape(self):
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
v = variables.Variable(
|
||||
array_ops.zeros(dtype=dtypes.float32, shape=[2**33, 1]))
|
||||
n = random_ops.truncated_normal(v.shape)
|
||||
@ -238,7 +238,7 @@ class TruncatedNormalTest(test.TestCase):
|
||||
|
||||
@test_util.run_deprecated_v1
|
||||
def testNoCSE(self):
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
shape = [2, 3, 4]
|
||||
rnd1 = random_ops.truncated_normal(shape, 0.0, 1.0, dtypes.float32)
|
||||
rnd2 = random_ops.truncated_normal(shape, 0.0, 1.0, dtypes.float32)
|
||||
@ -371,7 +371,7 @@ class RandomUniformTest(RandomOpTestCommon):
|
||||
def testNoCSE(self):
|
||||
shape = [2, 3, 4]
|
||||
for dtype in dtypes.float16, dtypes.float32, dtypes.int32:
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
rnd1 = random_ops.random_uniform(shape, 0, 17, dtype=dtype)
|
||||
rnd2 = random_ops.random_uniform(shape, 0, 17, dtype=dtype)
|
||||
diff = (rnd2 - rnd1).eval()
|
||||
|
@ -104,7 +104,7 @@ class RandomPoissonTest(test.TestCase):
|
||||
merged.
|
||||
"""
|
||||
for dtype in dtypes.float16, dtypes.float32, dtypes.float64:
|
||||
with self.cached_session(use_gpu=True):
|
||||
with self.cached_session():
|
||||
rnd1 = random_ops.random_poisson(2.0, [24], dtype=dtype)
|
||||
rnd2 = random_ops.random_poisson(2.0, [24], dtype=dtype)
|
||||
diff = rnd2 - rnd1
|
||||
|
@ -240,7 +240,7 @@ class StatelessOpsTest(test.TestCase, parameterized.TestCase):
|
||||
def _test_determinism(self, case, seed_type):
|
||||
# Stateless values should be equal iff the seeds are equal (roughly)
|
||||
seeds = [(x, y) for x in range(5) for y in range(5)] * 3 # pylint: disable=g-complex-comprehension
|
||||
with self.test_session(use_gpu=True), ops.device(get_device().name):
|
||||
with self.test_session(), ops.device(get_device().name):
|
||||
_, stateless_op, _ = case
|
||||
if context.executing_eagerly():
|
||||
values = [
|
||||
|
@ -156,7 +156,7 @@ class BaseReductionTest(test.TestCase):
|
||||
|
||||
def _compare(self, x, reduction_axes, keepdims, feed_dict=None):
|
||||
np_ans = self._np_reduce(x, reduction_axes, keepdims)
|
||||
with self.cached_session(use_gpu=True) as sess:
|
||||
with self.cached_session() as sess:
|
||||
tf_ans = self._tf_reduce(x, reduction_axes, keepdims)
|
||||
out = sess.run(tf_ans, feed_dict)
|
||||
self.assertAllClose(np_ans, out)
|
||||
@ -178,7 +178,7 @@ class BaseReductionTest(test.TestCase):
|
||||
if reduction_axes is not None and np.shape(reduction_axes) == (1,):
|
||||
# Test scalar reduction_axes argument
|
||||
self._compareGradient(x, reduction_axes[0], rtol=rtol, atol=atol)
|
||||
with self.cached_session(use_gpu=True):
|
||||
with self.cached_session():
|
||||
t = ops.convert_to_tensor(x)
|
||||
su = self._tf_reduce(t, reduction_axes, False)
|
||||
jacob_t, jacob_n = gradient_checker.compute_gradient(
|
||||
@ -208,7 +208,7 @@ class SumReductionTest(BaseReductionTest):
|
||||
|
||||
def testAxesType(self):
|
||||
for dtype in [dtypes.int64, dtypes.int32]:
|
||||
with self.cached_session(use_gpu=True) as sess:
|
||||
with self.cached_session() as sess:
|
||||
v = math_ops.reduce_sum([0, 0], constant_op.constant(0, dtype=dtype))
|
||||
tf_v = self.evaluate(v)
|
||||
self.assertAllEqual(tf_v, 0)
|
||||
@ -403,7 +403,7 @@ class SumReductionTest(BaseReductionTest):
|
||||
|
||||
@test_util.run_deprecated_v1
|
||||
def testEmptyGradients(self):
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
x = array_ops.zeros([0, 3])
|
||||
y = math_ops.reduce_sum(x, [1])
|
||||
error = gradient_checker.compute_gradient_error(x, [0, 3], y, [0])
|
||||
@ -411,7 +411,7 @@ class SumReductionTest(BaseReductionTest):
|
||||
|
||||
@test_util.run_deprecated_v1
|
||||
def testDegenerate(self):
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
for dtype in (dtypes.float16, dtypes.float32, dtypes.float64,
|
||||
dtypes.complex64, dtypes.complex128):
|
||||
# A large number is needed to get Eigen to die
|
||||
@ -446,7 +446,7 @@ class MeanReductionTest(BaseReductionTest):
|
||||
|
||||
def testAxesType(self):
|
||||
for dtype in [dtypes.int64, dtypes.int32]:
|
||||
with self.cached_session(use_gpu=True) as sess:
|
||||
with self.cached_session() as sess:
|
||||
v = math_ops.reduce_mean([0, 0], constant_op.constant(0, dtype=dtype))
|
||||
tf_v = self.evaluate(v)
|
||||
self.assertAllEqual(tf_v, 0)
|
||||
@ -525,7 +525,7 @@ class MeanReductionTest(BaseReductionTest):
|
||||
|
||||
@test_util.run_deprecated_v1
|
||||
def testEmptyGradients(self):
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
x = array_ops.zeros([0, 3])
|
||||
y = math_ops.reduce_mean(x, [1])
|
||||
error = gradient_checker.compute_gradient_error(x, [0, 3], y, [0])
|
||||
@ -533,7 +533,7 @@ class MeanReductionTest(BaseReductionTest):
|
||||
|
||||
@test_util.run_deprecated_v1
|
||||
def testDegenerate(self):
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
for dtype in (dtypes.float16, dtypes.float32, dtypes.float64):
|
||||
# A large number is needed to get Eigen to die
|
||||
x = array_ops.zeros((0, 9938), dtype=dtype)
|
||||
@ -560,7 +560,7 @@ class EuclideanNormReductionTest(BaseReductionTest):
|
||||
@test_util.run_deprecated_v1
|
||||
def testAxesType(self):
|
||||
for dtype in [dtypes.int64, dtypes.int32]:
|
||||
with self.cached_session(use_gpu=True):
|
||||
with self.cached_session():
|
||||
v = math_ops.reduce_mean([0, 0], constant_op.constant(0, dtype=dtype))
|
||||
tf_v = self.evaluate(v)
|
||||
self.assertAllEqual(tf_v, 0)
|
||||
@ -609,7 +609,7 @@ class EuclideanNormReductionTest(BaseReductionTest):
|
||||
np_arr = self._makeIncremental((2,) * rank, dtypes.complex128)
|
||||
self._compareAllAxes(np_arr)
|
||||
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
for dtype in (dtypes.float16, dtypes.float32, dtypes.float64):
|
||||
# A large number is needed to get Eigen to die
|
||||
x = array_ops.zeros((0, 9938), dtype=dtype)
|
||||
@ -640,7 +640,7 @@ class ProdReductionTest(BaseReductionTest):
|
||||
|
||||
def testAxesType(self):
|
||||
for dtype in [dtypes.int64, dtypes.int32]:
|
||||
with self.cached_session(use_gpu=True) as sess:
|
||||
with self.cached_session() as sess:
|
||||
v = math_ops.reduce_prod([0, 0], constant_op.constant(0, dtype=dtype))
|
||||
tf_v = self.evaluate(v)
|
||||
self.assertAllEqual(tf_v, 0)
|
||||
@ -711,7 +711,7 @@ class ProdReductionTest(BaseReductionTest):
|
||||
|
||||
@test_util.run_deprecated_v1
|
||||
def testEmptyGradients(self):
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
x = array_ops.zeros([0, 3])
|
||||
y = math_ops.reduce_prod(x, [1])
|
||||
error = gradient_checker.compute_gradient_error(x, [0, 3], y, [0])
|
||||
@ -719,7 +719,7 @@ class ProdReductionTest(BaseReductionTest):
|
||||
|
||||
@test_util.run_deprecated_v1
|
||||
def testDegenerate(self):
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
for dtype in (dtypes.float16, dtypes.float32, dtypes.float64):
|
||||
# A large number is needed to get Eigen to die
|
||||
x = array_ops.zeros((0, 9938), dtype=dtype)
|
||||
@ -750,7 +750,7 @@ class MinReductionTest(test.TestCase):
|
||||
|
||||
def testAxesType(self):
|
||||
for dtype in [dtypes.int64, dtypes.int32]:
|
||||
with self.cached_session(use_gpu=True) as sess:
|
||||
with self.cached_session() as sess:
|
||||
v = math_ops.reduce_min([0, 0], constant_op.constant(0, dtype=dtype))
|
||||
tf_v = self.evaluate(v)
|
||||
self.assertAllEqual(tf_v, 0)
|
||||
@ -866,7 +866,7 @@ class MaxReductionTest(test.TestCase):
|
||||
|
||||
def testAxesType(self):
|
||||
for dtype in [dtypes.int64, dtypes.int32]:
|
||||
with self.cached_session(use_gpu=True) as sess:
|
||||
with self.cached_session() as sess:
|
||||
v = math_ops.reduce_max([0, 0], constant_op.constant(0, dtype=dtype))
|
||||
tf_v = self.evaluate(v)
|
||||
self.assertAllEqual(tf_v, 0)
|
||||
@ -998,7 +998,7 @@ class AllReductionTest(test.TestCase):
|
||||
|
||||
def testAxesType(self):
|
||||
for dtype in [dtypes.int64, dtypes.int32]:
|
||||
with self.session(use_gpu=True) as sess:
|
||||
with self.session() as sess:
|
||||
v = math_ops.reduce_all([True, True],
|
||||
constant_op.constant(0, dtype=dtype))
|
||||
tf_v = self.evaluate(v)
|
||||
@ -1047,7 +1047,7 @@ class AnyReductionTest(test.TestCase):
|
||||
|
||||
def testAxesType(self):
|
||||
for dtype in [dtypes.int64, dtypes.int32]:
|
||||
with self.session(use_gpu=True) as sess:
|
||||
with self.session() as sess:
|
||||
v = math_ops.reduce_any([True, True],
|
||||
constant_op.constant(0, dtype=dtype))
|
||||
tf_v = self.evaluate(v)
|
||||
|
@ -223,7 +223,7 @@ class RNNTest(test.TestCase):
|
||||
self.assertEqual(out.get_shape(), inp.get_shape())
|
||||
self.assertEqual(out.dtype, inp.dtype)
|
||||
|
||||
with self.session(use_gpu=True) as sess:
|
||||
with self.session() as sess:
|
||||
input_value = np.random.randn(batch_size, input_size)
|
||||
values = sess.run(outputs + [state], feed_dict={inputs[0]: input_value})
|
||||
|
||||
@ -260,7 +260,7 @@ class RNNTest(test.TestCase):
|
||||
self.assertEqual(out.get_shape().as_list(), inp.get_shape().as_list())
|
||||
self.assertEqual(out.dtype, inp.dtype)
|
||||
|
||||
with self.session(use_gpu=True) as sess:
|
||||
with self.session() as sess:
|
||||
input_value = np.random.randn(batch_size, input_size)
|
||||
values = sess.run(outputs + [state], feed_dict={inputs[0]: input_value})
|
||||
full_dropout_values = sess.run(
|
||||
@ -288,7 +288,7 @@ class RNNTest(test.TestCase):
|
||||
cell, inputs, sequence_length=sequence_length, dtype=dtypes.float32)
|
||||
self.assertEqual(len(dynamic_outputs), len(inputs))
|
||||
|
||||
with self.session(use_gpu=True) as sess:
|
||||
with self.session() as sess:
|
||||
input_value = np.random.randn(batch_size, input_size)
|
||||
dynamic_values = sess.run(
|
||||
dynamic_outputs,
|
||||
@ -324,7 +324,7 @@ class RNNTest(test.TestCase):
|
||||
1.0 * (2 + 1) * np.ones((input_size)))))
|
||||
|
||||
def _testScope(self, factory, prefix="prefix", use_outer_scope=True):
|
||||
with self.session(use_gpu=True, graph=ops.Graph()):
|
||||
with self.session(graph=ops.Graph()):
|
||||
if use_outer_scope:
|
||||
with variable_scope.variable_scope(prefix) as scope:
|
||||
factory(scope)
|
||||
@ -388,7 +388,7 @@ class LSTMTest(test.TestCase):
|
||||
input_size = 5
|
||||
batch_size = 2
|
||||
max_length = 8
|
||||
with self.session(use_gpu=True, graph=ops.Graph()) as sess:
|
||||
with self.session(graph=ops.Graph()) as sess:
|
||||
initializer = init_ops.random_uniform_initializer(
|
||||
-0.01, 0.01, seed=self._seed)
|
||||
cell = rnn_cell.LSTMCell(
|
||||
@ -411,7 +411,7 @@ class LSTMTest(test.TestCase):
|
||||
input_size = 5
|
||||
batch_size = 2
|
||||
max_length = 8
|
||||
with self.session(use_gpu=True, graph=ops.Graph()) as sess:
|
||||
with self.session(graph=ops.Graph()) as sess:
|
||||
initializer = init_ops.random_uniform_initializer(
|
||||
-0.01, 0.01, seed=self._seed)
|
||||
cell = rnn_cell.LSTMCell(
|
||||
@ -442,7 +442,7 @@ class LSTMTest(test.TestCase):
|
||||
input_size = 5
|
||||
batch_size = 2
|
||||
max_length = 8
|
||||
with self.session(use_gpu=True, graph=ops.Graph()) as sess:
|
||||
with self.session(graph=ops.Graph()) as sess:
|
||||
initializer = init_ops.random_uniform_initializer(
|
||||
-0.01, 0.01, seed=self._seed)
|
||||
state_saver = TestStateSaver(batch_size, 2 * num_units)
|
||||
@ -583,7 +583,7 @@ class LSTMTest(test.TestCase):
|
||||
batch_size = 2
|
||||
num_proj = 4
|
||||
max_length = 8
|
||||
with self.session(use_gpu=True, graph=ops.Graph()) as sess:
|
||||
with self.session(graph=ops.Graph()) as sess:
|
||||
initializer = init_ops.random_uniform_initializer(
|
||||
-0.01, 0.01, seed=self._seed)
|
||||
inputs = max_length * [
|
||||
@ -681,7 +681,7 @@ class LSTMTest(test.TestCase):
|
||||
num_proj_shards = 3
|
||||
num_unit_shards = 2
|
||||
max_length = 8
|
||||
with self.session(use_gpu=True, graph=ops.Graph()) as sess:
|
||||
with self.session(graph=ops.Graph()) as sess:
|
||||
initializer = init_ops.random_uniform_initializer(
|
||||
-0.01, 0.01, seed=self._seed)
|
||||
|
||||
@ -715,7 +715,7 @@ class LSTMTest(test.TestCase):
|
||||
num_proj_shards = 3
|
||||
num_unit_shards = 2
|
||||
max_length = 8
|
||||
with self.session(use_gpu=True, graph=ops.Graph()) as sess:
|
||||
with self.session(graph=ops.Graph()) as sess:
|
||||
initializer = init_ops.random_uniform_initializer(-1, 1, seed=self._seed)
|
||||
inputs = max_length * [
|
||||
array_ops.placeholder(dtypes.float64, shape=(None, input_size))
|
||||
@ -752,7 +752,7 @@ class LSTMTest(test.TestCase):
|
||||
num_proj_shards = 3
|
||||
num_unit_shards = 2
|
||||
max_length = 8
|
||||
with self.session(use_gpu=True, graph=ops.Graph()) as sess:
|
||||
with self.session(graph=ops.Graph()) as sess:
|
||||
inputs = max_length * [
|
||||
array_ops.placeholder(dtypes.float32, shape=(None, input_size))
|
||||
]
|
||||
@ -809,7 +809,7 @@ class LSTMTest(test.TestCase):
|
||||
num_proj_shards = 3
|
||||
num_unit_shards = 2
|
||||
max_length = 8
|
||||
with self.session(use_gpu=True, graph=ops.Graph()) as sess:
|
||||
with self.session(graph=ops.Graph()) as sess:
|
||||
sequence_length = array_ops.placeholder(dtypes.int64)
|
||||
initializer = init_ops.random_uniform_initializer(
|
||||
-0.01, 0.01, seed=self._seed)
|
||||
@ -1151,7 +1151,7 @@ class LSTMTest(test.TestCase):
|
||||
state_is_tuple=False)
|
||||
|
||||
########### Step 1: Run static graph and generate readouts
|
||||
with self.session(use_gpu=True, graph=ops.Graph()) as sess:
|
||||
with self.session(graph=ops.Graph()) as sess:
|
||||
if in_graph_mode:
|
||||
concat_inputs = array_ops.placeholder(
|
||||
dtypes.float32, shape=(time_steps, batch_size, input_size))
|
||||
@ -1211,7 +1211,7 @@ class LSTMTest(test.TestCase):
|
||||
static_individual_variable_gradients, feed_dict=feeds)
|
||||
|
||||
########## Step 2: Run dynamic graph and generate readouts
|
||||
with self.session(use_gpu=True, graph=ops.Graph()) as sess:
|
||||
with self.session(graph=ops.Graph()) as sess:
|
||||
if in_graph_mode:
|
||||
concat_inputs = array_ops.placeholder(
|
||||
dtypes.float32, shape=(time_steps, batch_size, input_size))
|
||||
@ -1372,7 +1372,7 @@ class BidirectionalRNNTest(test.TestCase):
|
||||
return input_value, inputs, outputs, state_fw, state_bw, sequence_length
|
||||
|
||||
def _testBidirectionalRNN(self, use_shape):
|
||||
with self.session(use_gpu=True, graph=ops.Graph()) as sess:
|
||||
with self.session(graph=ops.Graph()) as sess:
|
||||
input_value, inputs, outputs, state_fw, state_bw, sequence_length = (
|
||||
self._createBidirectionalRNN(use_shape, True))
|
||||
variables_lib.global_variables_initializer().run()
|
||||
@ -1419,7 +1419,7 @@ class BidirectionalRNNTest(test.TestCase):
|
||||
self.assertAllClose(s_fw, s_bw)
|
||||
|
||||
def _testBidirectionalRNNWithoutSequenceLength(self, use_shape):
|
||||
with self.session(use_gpu=True, graph=ops.Graph()) as sess:
|
||||
with self.session(graph=ops.Graph()) as sess:
|
||||
input_value, inputs, outputs, state_fw, state_bw, _ = (
|
||||
self._createBidirectionalRNN(use_shape, False))
|
||||
variables_lib.global_variables_initializer().run()
|
||||
@ -1504,7 +1504,7 @@ class BidirectionalRNNTest(test.TestCase):
|
||||
|
||||
def _testBidirectionalDynamicRNN(self, use_shape, use_state_tuple,
|
||||
use_time_major, use_sequence_length):
|
||||
with self.session(use_gpu=True, graph=ops.Graph()) as sess:
|
||||
with self.session(graph=ops.Graph()) as sess:
|
||||
input_value, inputs, outputs, state_fw, state_bw, sequence_length = (
|
||||
self._createBidirectionalDynamicRNN(
|
||||
use_shape, use_state_tuple, use_time_major, use_sequence_length))
|
||||
@ -1582,7 +1582,7 @@ class BidirectionalRNNTest(test.TestCase):
|
||||
# REMARKS: factory(scope) is a function accepting a scope
|
||||
# as an argument, such scope can be None, a string
|
||||
# or a VariableScope instance.
|
||||
with self.session(use_gpu=True, graph=ops.Graph()):
|
||||
with self.session(graph=ops.Graph()):
|
||||
if use_outer_scope:
|
||||
with variable_scope.variable_scope(prefix) as scope:
|
||||
factory(scope)
|
||||
@ -1905,7 +1905,7 @@ class StateSaverRNNTest(test.TestCase):
|
||||
batch_size = 2
|
||||
state_saver = TestStateSaver(batch_size, 2 * num_units)
|
||||
|
||||
with self.session(use_gpu=True, graph=ops.Graph()):
|
||||
with self.session(graph=ops.Graph()):
|
||||
if use_outer_scope:
|
||||
with variable_scope.variable_scope(prefix) as scope:
|
||||
self._factory(scope=scope, state_saver=state_saver)
|
||||
@ -1984,7 +1984,7 @@ class GRUTest(test.TestCase):
|
||||
|
||||
sequence_length = np.random.randint(0, time_steps, size=batch_size)
|
||||
|
||||
with self.session(use_gpu=True, graph=ops.Graph()) as sess:
|
||||
with self.session(graph=ops.Graph()) as sess:
|
||||
concat_inputs = array_ops.placeholder(
|
||||
dtypes.float32, shape=(time_steps, batch_size, input_size))
|
||||
|
||||
@ -2006,7 +2006,7 @@ class GRUTest(test.TestCase):
|
||||
sess.run([outputs_dynamic, state_dynamic], feed_dict=feeds)
|
||||
|
||||
def _testScope(self, factory, prefix="prefix", use_outer_scope=True):
|
||||
with self.session(use_gpu=True, graph=ops.Graph()):
|
||||
with self.session(graph=ops.Graph()):
|
||||
if use_outer_scope:
|
||||
with variable_scope.variable_scope(prefix) as scope:
|
||||
factory(scope)
|
||||
@ -2298,7 +2298,7 @@ class RawRNNTest(test.TestCase):
|
||||
np.ones((max_time, batch_size, 1), np.int64), output_vals[1])
|
||||
|
||||
def _testScope(self, factory, prefix="prefix", use_outer_scope=True):
|
||||
with self.session(use_gpu=True, graph=ops.Graph()):
|
||||
with self.session(graph=ops.Graph()):
|
||||
if use_outer_scope:
|
||||
with variable_scope.variable_scope(prefix) as scope:
|
||||
factory(scope)
|
||||
@ -2416,7 +2416,7 @@ class TensorArrayOnCorrectDeviceTest(test.TestCase):
|
||||
sequence_length=sequence_length,
|
||||
dtype=dtypes.float32)
|
||||
|
||||
with self.session(use_gpu=True) as sess:
|
||||
with self.session() as sess:
|
||||
opts = config_pb2.RunOptions(trace_level=config_pb2.RunOptions.FULL_TRACE)
|
||||
run_metadata = config_pb2.RunMetadata()
|
||||
variables_lib.global_variables_initializer().run()
|
||||
@ -2903,7 +2903,7 @@ class RNNCellTest(test.TestCase, parameterized.TestCase):
|
||||
return
|
||||
|
||||
gpu_dev = test.gpu_device_name()
|
||||
with self.session(use_gpu=True) as sess:
|
||||
with self.session() as sess:
|
||||
with variable_scope.variable_scope(
|
||||
"root", initializer=init_ops.constant_initializer(0.5)):
|
||||
x = array_ops.zeros([1, 1, 3])
|
||||
|
@ -212,7 +212,7 @@ class RNNTest(test.TestCase):
|
||||
else:
|
||||
inputs = array_ops.placeholder(dtypes.float32, shape=(1, 4, 1))
|
||||
|
||||
with self.cached_session(use_gpu=True) as sess:
|
||||
with self.cached_session() as sess:
|
||||
outputs, state = rnn.dynamic_rnn(
|
||||
cell, inputs, dtype=dtypes.float32, sequence_length=[4])
|
||||
if not in_eager_mode:
|
||||
@ -232,7 +232,7 @@ class RNNTest(test.TestCase):
|
||||
else:
|
||||
inputs = array_ops.placeholder(dtypes.float32, shape=(1, 4, 1))
|
||||
|
||||
with self.cached_session(use_gpu=True) as sess:
|
||||
with self.cached_session() as sess:
|
||||
outputs, state = rnn.dynamic_rnn(
|
||||
cell, inputs, dtype=dtypes.float32, sequence_length=[4])
|
||||
if not in_eager_mode:
|
||||
@ -262,7 +262,7 @@ class RNNTest(test.TestCase):
|
||||
else:
|
||||
inputs = array_ops.placeholder(dtypes.float32, shape=(1, 4, 1))
|
||||
|
||||
with self.cached_session(use_gpu=True) as sess:
|
||||
with self.cached_session() as sess:
|
||||
outputs, state = rnn.dynamic_rnn(
|
||||
cell, inputs, dtype=dtypes.float32, sequence_length=[4])
|
||||
state = (state[0], state[1].stack())
|
||||
|
@ -79,7 +79,7 @@ class CumsumTest(test.TestCase):
|
||||
|
||||
def _compare(self, x, axis, exclusive, reverse):
|
||||
np_out = handle_options(np.cumsum, x, axis, exclusive, reverse)
|
||||
with self.cached_session(use_gpu=True):
|
||||
with self.cached_session():
|
||||
tf_out = math_ops.cumsum(x, axis, exclusive, reverse).eval()
|
||||
|
||||
self.assertAllClose(np_out, tf_out)
|
||||
@ -101,7 +101,7 @@ class CumsumTest(test.TestCase):
|
||||
for dtype in self.valid_dtypes:
|
||||
x = np.arange(1, 6).reshape([5]).astype(dtype)
|
||||
for axis_dtype in [dtypes.int64, dtypes.int32]:
|
||||
with self.cached_session(use_gpu=True):
|
||||
with self.cached_session():
|
||||
axis = constant_op.constant(0, axis_dtype)
|
||||
tf_out = math_ops.cumsum(x, axis).eval()
|
||||
|
||||
@ -152,7 +152,7 @@ class CumsumTest(test.TestCase):
|
||||
def testInvalidAxis(self):
|
||||
x = np.arange(0, 10).reshape([2, 5]).astype(np.float32)
|
||||
input_tensor = ops.convert_to_tensor(x)
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
with self.assertRaisesWithPredicateMatch(
|
||||
errors_impl.InvalidArgumentError,
|
||||
lambda e: "Expected scan axis in the range [-2, 2)" in str(e)):
|
||||
@ -168,7 +168,7 @@ class CumsumTest(test.TestCase):
|
||||
|
||||
def _compareGradient(self, shape, axis, exclusive, reverse):
|
||||
x = np.arange(0, 50).reshape(shape).astype(np.float64)
|
||||
with self.cached_session(use_gpu=True):
|
||||
with self.cached_session():
|
||||
t = ops.convert_to_tensor(x)
|
||||
result = math_ops.cumsum(t, axis, exclusive, reverse)
|
||||
jacob_t, jacob_n = gradient_checker.compute_gradient(
|
||||
@ -212,7 +212,7 @@ class CumprodTest(test.TestCase):
|
||||
|
||||
def _compare(self, x, axis, exclusive, reverse):
|
||||
np_out = handle_options(np.cumprod, x, axis, exclusive, reverse)
|
||||
with self.cached_session(use_gpu=True):
|
||||
with self.cached_session():
|
||||
tf_out = math_ops.cumprod(x, axis, exclusive, reverse).eval()
|
||||
|
||||
self.assertAllClose(np_out, tf_out)
|
||||
@ -234,7 +234,7 @@ class CumprodTest(test.TestCase):
|
||||
for dtype in self.valid_dtypes:
|
||||
x = np.arange(1, 6).reshape([5]).astype(dtype)
|
||||
for axis_dtype in [dtypes.int64, dtypes.int32]:
|
||||
with self.cached_session(use_gpu=True):
|
||||
with self.cached_session():
|
||||
axis = constant_op.constant(0, axis_dtype)
|
||||
tf_out = math_ops.cumprod(x, axis).eval()
|
||||
|
||||
@ -278,7 +278,7 @@ class CumprodTest(test.TestCase):
|
||||
def testInvalidAxis(self):
|
||||
x = np.arange(0, 10).reshape([2, 5]).astype(np.float32)
|
||||
input_tensor = ops.convert_to_tensor(x)
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
with self.assertRaisesWithPredicateMatch(
|
||||
errors_impl.InvalidArgumentError,
|
||||
lambda e: "Expected scan axis in the range [-2, 2)" in str(e)):
|
||||
@ -294,7 +294,7 @@ class CumprodTest(test.TestCase):
|
||||
|
||||
def _compareGradient(self, shape, axis, exclusive, reverse):
|
||||
x = np.arange(1, 9).reshape(shape).astype(np.float64)
|
||||
with self.cached_session(use_gpu=True):
|
||||
with self.cached_session():
|
||||
t = ops.convert_to_tensor(x)
|
||||
result = math_ops.cumprod(t, axis, exclusive, reverse)
|
||||
jacob_t, jacob_n = gradient_checker.compute_gradient(
|
||||
|
@ -134,7 +134,7 @@ class ScatterTest(test.TestCase):
|
||||
repeat_indices=False,
|
||||
updates_are_scalar=False):
|
||||
np.random.seed(8)
|
||||
with self.cached_session(use_gpu=True):
|
||||
with self.cached_session():
|
||||
for indices_shape in (), (2,), (3, 7), (3, 4, 7):
|
||||
for extra_shape in (), (5,), (5, 9):
|
||||
# Generate random indices with no duplicates for easy numpy comparison
|
||||
|
@ -307,7 +307,7 @@ class UnsortedSegmentTest(SegmentReductionHelper):
|
||||
ops_list = self.complex_ops_list if dtype.is_complex else self.ops_list
|
||||
tf_x, np_x = self._input(shape, dtype=dtype)
|
||||
for use_gpu in [True, False]:
|
||||
with self.cached_session(use_gpu=True):
|
||||
with self.cached_session():
|
||||
for np_op1, np_op2, tf_op, init_op in ops_list:
|
||||
# sqrt_n doesn't support integers
|
||||
if (np_op2 == self._sqrt_n_reduce_op and dtype.is_integer):
|
||||
@ -333,7 +333,7 @@ class UnsortedSegmentTest(SegmentReductionHelper):
|
||||
for indices in indices_flat, indices_flat.reshape(5, 2):
|
||||
shape = indices.shape + (2,)
|
||||
for dtype in dtypes:
|
||||
with self.cached_session(use_gpu=True):
|
||||
with self.cached_session():
|
||||
tf_x, np_x = self._input(shape)
|
||||
num_segments_constant = constant_op.constant(
|
||||
num_segments, dtype=dtype)
|
||||
@ -433,7 +433,7 @@ class UnsortedSegmentTest(SegmentReductionHelper):
|
||||
shape = [n, num_cols]
|
||||
num_segments = max(indices) + 1
|
||||
for dtype in self.differentiable_dtypes:
|
||||
with self.cached_session(use_gpu=True):
|
||||
with self.cached_session():
|
||||
tf_x, np_x = self._input(shape, dtype=dtype)
|
||||
# Results from UnsortedSegmentSum
|
||||
unsorted_s = math_ops.unsorted_segment_sum(
|
||||
@ -470,7 +470,7 @@ class UnsortedSegmentTest(SegmentReductionHelper):
|
||||
def testEmptySecondDimension(self):
|
||||
dtypes = [np.float16, np.float32, np.float64, np.int64, np.int32,
|
||||
np.complex64, np.complex128]
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
for dtype in dtypes:
|
||||
for itype in (np.int32, np.int64):
|
||||
data = np.zeros((2, 0), dtype=dtype)
|
||||
@ -486,7 +486,7 @@ class UnsortedSegmentTest(SegmentReductionHelper):
|
||||
for indices in indices_flat, indices_flat.reshape(5, 2):
|
||||
shape = indices.shape + (2,)
|
||||
for dtype in self.all_dtypes:
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
tf_x, np_x = self._input(shape, dtype=dtype)
|
||||
np_ans = self._segmentReduce(
|
||||
indices, np_x, np.add, op2=None, num_segments=num_segments)
|
||||
|
@ -55,7 +55,7 @@ class SelfAdjointEigTest(test.TestCase):
|
||||
@test_util.run_deprecated_v1
|
||||
def testConcurrentExecutesWithoutError(self):
|
||||
all_ops = []
|
||||
with self.session(use_gpu=True) as sess:
|
||||
with self.session() as sess:
|
||||
for compute_v_ in True, False:
|
||||
matrix1 = random_ops.random_normal([5, 5], seed=42)
|
||||
matrix2 = random_ops.random_normal([5, 5], seed=42)
|
||||
@ -84,7 +84,7 @@ class SelfAdjointEigTest(test.TestCase):
|
||||
"self_adjoint_eig_fail_if_denorms_flushed.txt")).astype(np.float32)
|
||||
self.assertEqual(matrix.shape, (32, 32))
|
||||
matrix_tensor = constant_op.constant(matrix)
|
||||
with self.session(use_gpu=True) as sess:
|
||||
with self.session() as sess:
|
||||
(e, v) = self.evaluate(linalg_ops.self_adjoint_eig(matrix_tensor))
|
||||
self.assertEqual(e.size, 32)
|
||||
self.assertAllClose(
|
||||
@ -156,7 +156,7 @@ def _GetSelfAdjointEigTest(dtype_, shape_, compute_v_):
|
||||
else:
|
||||
atol = 1e-12
|
||||
np_e, np_v = np.linalg.eigh(a)
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
if compute_v_:
|
||||
tf_e, tf_v = linalg_ops.self_adjoint_eig(constant_op.constant(a))
|
||||
|
||||
@ -211,7 +211,8 @@ def _GetSelfAdjointEigGradTest(dtype_, shape_, compute_v_):
|
||||
tol = 1e-2
|
||||
else:
|
||||
tol = 1e-7
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
|
||||
def Compute(x):
|
||||
e, v = linalg_ops.self_adjoint_eig(x)
|
||||
# (complex) Eigenvectors are only unique up to an arbitrary phase
|
||||
|
@ -267,7 +267,7 @@ class ShapeOpsTest(test.TestCase):
|
||||
for dtype in [dtypes.int32, dtypes.int64]:
|
||||
x = np.zeros([2])
|
||||
np_ans = np.expand_dims(x, axis=0)
|
||||
with self.cached_session(use_gpu=True):
|
||||
with self.cached_session():
|
||||
tensor = array_ops.expand_dims(x, constant_op.constant(0, dtype))
|
||||
tf_ans = self.evaluate(tensor)
|
||||
self.assertShapeEqual(np_ans, tensor)
|
||||
@ -433,7 +433,7 @@ class TileTest(test.TestCase, parameterized.TestCase):
|
||||
def testSimple(self):
|
||||
# multiples could be int32 or int64
|
||||
for dtype in [dtypes.int32, dtypes.int64]:
|
||||
with self.cached_session(use_gpu=True):
|
||||
with self.cached_session():
|
||||
inp = np.random.rand(4, 1).astype(np.float32)
|
||||
a = constant_op.constant(inp)
|
||||
tiled = array_ops.tile(a, constant_op.constant([1, 4], dtype=dtype))
|
||||
@ -505,7 +505,7 @@ class TileTest(test.TestCase, parameterized.TestCase):
|
||||
bytes: (dtypes.string, bytes)
|
||||
}
|
||||
for dtype_np, (dtype_tf, cast) in types_to_test.items():
|
||||
with self.cached_session(use_gpu=True):
|
||||
with self.cached_session():
|
||||
inp = np.random.rand(4, 1).astype(dtype_np)
|
||||
a = constant_op.constant(
|
||||
[cast(x) for x in inp.ravel(order="C")],
|
||||
@ -601,7 +601,7 @@ class TileTest(test.TestCase, parameterized.TestCase):
|
||||
|
||||
@test_util.run_deprecated_v1
|
||||
def testGradientSimpleReductionOnGPU(self):
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
inp = np.random.rand(4, 1).astype("f")
|
||||
a = constant_op.constant(
|
||||
[float(x) for x in inp.flatten()], shape=[4, 1], dtype=dtypes.float32)
|
||||
@ -616,7 +616,7 @@ class TileTest(test.TestCase, parameterized.TestCase):
|
||||
|
||||
@test_util.run_deprecated_v1
|
||||
def testGradientStridedReductionOnGPU(self):
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
inp = np.random.rand(4, 2).astype("f")
|
||||
a = constant_op.constant(
|
||||
[float(x) for x in inp.flatten()], shape=[4, 2], dtype=dtypes.float32)
|
||||
|
@ -190,7 +190,7 @@ class DCTOpsTest(parameterized.TestCase, test.TestCase):
|
||||
# "ortho" normalization is not implemented for type I.
|
||||
if dct_type == 1 and norm == "ortho":
|
||||
return
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
tol = 5e-4 if dtype == np.float32 else 1e-7
|
||||
signals = np.random.rand(*shape).astype(dtype)
|
||||
n = np.random.randint(1, 2 * signals.shape[-1])
|
||||
|
@ -87,7 +87,8 @@ class BaseFFTOpsTest(test.TestCase):
|
||||
if test.is_built_with_rocm():
|
||||
self.skipTest("Complex datatype not yet supported in ROCm.")
|
||||
return
|
||||
with self.cached_session(use_gpu=True):
|
||||
with self.cached_session():
|
||||
|
||||
def f(inx, iny):
|
||||
inx.set_shape(x.shape)
|
||||
iny.set_shape(y.shape)
|
||||
@ -123,12 +124,12 @@ class FFTOpsTest(BaseFFTOpsTest, parameterized.TestCase):
|
||||
|
||||
def _tf_fft(self, x, rank, fft_length=None, feed_dict=None):
|
||||
# fft_length unused for complex FFTs.
|
||||
with self.cached_session(use_gpu=True) as sess:
|
||||
with self.cached_session() as sess:
|
||||
return sess.run(self._tf_fft_for_rank(rank)(x), feed_dict=feed_dict)
|
||||
|
||||
def _tf_ifft(self, x, rank, fft_length=None, feed_dict=None):
|
||||
# fft_length unused for complex FFTs.
|
||||
with self.cached_session(use_gpu=True) as sess:
|
||||
with self.cached_session() as sess:
|
||||
return sess.run(self._tf_ifft_for_rank(rank)(x), feed_dict=feed_dict)
|
||||
|
||||
def _np_fft(self, x, rank, fft_length=None):
|
||||
@ -299,12 +300,12 @@ class FFTOpsTest(BaseFFTOpsTest, parameterized.TestCase):
|
||||
class RFFTOpsTest(BaseFFTOpsTest, parameterized.TestCase):
|
||||
|
||||
def _tf_fft(self, x, rank, fft_length=None, feed_dict=None):
|
||||
with self.cached_session(use_gpu=True) as sess:
|
||||
with self.cached_session() as sess:
|
||||
return sess.run(
|
||||
self._tf_fft_for_rank(rank)(x, fft_length), feed_dict=feed_dict)
|
||||
|
||||
def _tf_ifft(self, x, rank, fft_length=None, feed_dict=None):
|
||||
with self.cached_session(use_gpu=True) as sess:
|
||||
with self.cached_session() as sess:
|
||||
return sess.run(
|
||||
self._tf_ifft_for_rank(rank)(x, fft_length), feed_dict=feed_dict)
|
||||
|
||||
|
@ -327,7 +327,7 @@ class FrameTest(test.TestCase):
|
||||
def test_gradient_numerical(self):
|
||||
if context.executing_eagerly():
|
||||
return
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
signal_shape = (2, 128)
|
||||
signal = array_ops.ones(signal_shape)
|
||||
frame_length = 33
|
||||
|
@ -266,7 +266,7 @@ class SpectralOpsTest(test.TestCase, parameterized.TestCase):
|
||||
# TODO(rjryan): Update gradient tests for Eager.
|
||||
if context.executing_eagerly():
|
||||
return
|
||||
with self.session(use_gpu=True) as sess:
|
||||
with self.session() as sess:
|
||||
signal_length = 512
|
||||
|
||||
# An all-zero signal has all zero gradients with respect to the sum of the
|
||||
|
@ -101,7 +101,7 @@ class SpaceToBatchTest(test.TestCase, PythonOpImpl):
|
||||
"""
|
||||
|
||||
def _testPad(self, inputs, paddings, block_size, outputs):
|
||||
with self.cached_session(use_gpu=True):
|
||||
with self.cached_session():
|
||||
# outputs = space_to_batch(inputs)
|
||||
x_tf = self.space_to_batch(
|
||||
math_ops.cast(inputs, dtypes.float32),
|
||||
@ -327,7 +327,7 @@ class SpaceToBatchSpaceToDepth(test.TestCase, PythonOpImpl):
|
||||
array_ops.space_to_depth(
|
||||
array_ops.transpose(x, [3, 1, 2, 0]), block_size=block_size),
|
||||
[3, 1, 2, 0])
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
self.assertAllEqual(y1, y2)
|
||||
|
||||
|
||||
@ -526,7 +526,7 @@ class SpaceToBatchGradientTest(test.TestCase, PythonOpImpl):
|
||||
# Check the gradients.
|
||||
def _checkGrad(self, x, paddings, block_size):
|
||||
assert 4 == x.ndim
|
||||
with self.cached_session(use_gpu=True):
|
||||
with self.cached_session():
|
||||
tf_x = ops.convert_to_tensor(x)
|
||||
tf_y = self.space_to_batch(tf_x, paddings, block_size)
|
||||
epsilon = 1e-5
|
||||
|
@ -73,7 +73,7 @@ class SparseTensorDenseMatMulGradientTest(test.TestCase):
|
||||
matmul = sparse_ops.sparse_tensor_dense_matmul(
|
||||
sp_t, dense_t, adjoint_a=adjoint_a, adjoint_b=adjoint_b, name=name)
|
||||
|
||||
with self.cached_session(use_gpu=True):
|
||||
with self.cached_session():
|
||||
dense_t_shape = [m, k] if adjoint_b else [k, m]
|
||||
sp_t_val_shape = [nnz]
|
||||
err = gradient_checker.compute_gradient_error(
|
||||
|
@ -66,7 +66,7 @@ class SparseTensorDenseMatMulTest(test.TestCase):
|
||||
x_values = x[np.where(x)]
|
||||
x_shape = x.shape
|
||||
|
||||
with self.cached_session(use_gpu=True):
|
||||
with self.cached_session():
|
||||
sp_x_value = sparse_tensor.SparseTensorValue(
|
||||
indices=x_indices, values=x_values, dense_shape=x_shape)
|
||||
tf_value_ans = sparse_ops.sparse_tensor_dense_matmul(
|
||||
|
@ -64,7 +64,7 @@ class SparseXentTest(test.TestCase):
|
||||
|
||||
def _testXent(self, np_features, np_labels):
|
||||
np_loss, np_backprop = self._npXent(np_features, np_labels)
|
||||
with self.cached_session(use_gpu=True) as sess:
|
||||
with self.cached_session() as sess:
|
||||
loss, backprop = gen_nn_ops.sparse_softmax_cross_entropy_with_logits(
|
||||
np_features, np_labels)
|
||||
tf_loss, tf_backprop = self.evaluate([loss, backprop])
|
||||
@ -73,7 +73,7 @@ class SparseXentTest(test.TestCase):
|
||||
|
||||
def testSingleClass(self):
|
||||
for label_dtype in np.int32, np.int64:
|
||||
with self.cached_session(use_gpu=True) as sess:
|
||||
with self.cached_session() as sess:
|
||||
loss, backprop = gen_nn_ops.sparse_softmax_cross_entropy_with_logits(
|
||||
np.array([[1.], [-1.], [0.]]).astype(np.float32),
|
||||
np.array([0, 0, 0]).astype(label_dtype))
|
||||
@ -145,19 +145,19 @@ class SparseXentTest(test.TestCase):
|
||||
np.array([1.3862, 3.4420]), np_loss, rtol=1.e-3, atol=1.e-3)
|
||||
|
||||
def testShapeMismatch(self):
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
with self.assertRaisesRegex(ValueError, ".*Rank mismatch:*"):
|
||||
nn_ops.sparse_softmax_cross_entropy_with_logits(
|
||||
labels=[[0, 2]], logits=[[0., 1.], [2., 3.], [2., 3.]])
|
||||
|
||||
def testScalar(self):
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
with self.assertRaisesRegex(ValueError, ".*Logits cannot be scalars*"):
|
||||
nn_ops.sparse_softmax_cross_entropy_with_logits(
|
||||
labels=constant_op.constant(0), logits=constant_op.constant(1.0))
|
||||
|
||||
def testLabelsPlaceholderScalar(self):
|
||||
with ops_lib.Graph().as_default(), self.session(use_gpu=True):
|
||||
with ops_lib.Graph().as_default(), self.session():
|
||||
labels = array_ops.placeholder(np.int32)
|
||||
y = nn_ops.sparse_softmax_cross_entropy_with_logits(
|
||||
labels=labels, logits=[[7.]])
|
||||
@ -165,7 +165,7 @@ class SparseXentTest(test.TestCase):
|
||||
y.eval(feed_dict={labels: 0})
|
||||
|
||||
def testVector(self):
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
loss = nn_ops.sparse_softmax_cross_entropy_with_logits(
|
||||
labels=constant_op.constant(0), logits=constant_op.constant([1.0]))
|
||||
self.assertAllClose(0.0, self.evaluate(loss))
|
||||
@ -193,7 +193,7 @@ class SparseXentTest(test.TestCase):
|
||||
|
||||
@test_util.run_in_graph_and_eager_modes(use_gpu=True)
|
||||
def testGradient(self):
|
||||
with self.session(use_gpu=True) as sess:
|
||||
with self.session() as sess:
|
||||
l = constant_op.constant([3, 0, 1], name="l")
|
||||
f = constant_op.constant(
|
||||
[0.1, 0.2, 0.3, 0.4, 0.1, 0.4, 0.9, 1.6, 0.1, 0.8, 2.7, 6.4],
|
||||
|
@ -55,13 +55,13 @@ class SplitOpTest(test.TestCase):
|
||||
model_input = array_ops.placeholder(dtypes.float32)
|
||||
inp = np.zeros((1, 10))
|
||||
# check that we still fail at runtime if the shapes were unknown
|
||||
with self.cached_session(use_gpu=True) as sess:
|
||||
with self.cached_session() as sess:
|
||||
with self.assertRaises(errors_impl.InvalidArgumentError):
|
||||
sess.run(array_ops.split(model_input, [4]), {model_input: inp})
|
||||
|
||||
# scalar Tensors are not permitted as num_splits
|
||||
for axis in [0, -2]:
|
||||
with self.cached_session(use_gpu=True) as sess:
|
||||
with self.cached_session() as sess:
|
||||
with self.assertRaises(ValueError):
|
||||
# pylint: disable=expression-not-assigned
|
||||
sess.run(
|
||||
@ -83,7 +83,7 @@ class SplitOpTest(test.TestCase):
|
||||
model_input2 = array_ops.placeholder(dtypes.float32, shape=[None, 2])
|
||||
result = array_ops.split(model_input2, [2, 2], axis=0)[0]
|
||||
|
||||
with self.cached_session(use_gpu=True) as sess:
|
||||
with self.cached_session() as sess:
|
||||
sess.run(result, feed_dict={model_input2: np.ones([4, 2])})
|
||||
|
||||
@test_util.run_deprecated_v1
|
||||
@ -92,7 +92,7 @@ class SplitOpTest(test.TestCase):
|
||||
|
||||
value = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
|
||||
|
||||
with self.session(use_gpu=True) as sess:
|
||||
with self.session() as sess:
|
||||
with self.assertRaises(ValueError) as context:
|
||||
sess.run(array_ops.split(value, size_splits), {size_splits: [2, 2, 6]})
|
||||
self.assertTrue("Cannot infer num from shape" in str(context.exception))
|
||||
@ -214,7 +214,7 @@ class SplitOpTest(test.TestCase):
|
||||
@test_util.run_deprecated_v1
|
||||
def testOutputShape(self):
|
||||
for axis in [1, -1]:
|
||||
with self.cached_session(use_gpu=True):
|
||||
with self.cached_session():
|
||||
tensor = array_ops.placeholder(dtypes.float32, shape=[None, 12])
|
||||
size_splits = [3, 7, 2]
|
||||
outputs = array_ops.split(tensor, size_splits, axis)
|
||||
@ -315,7 +315,7 @@ class SplitOpTest(test.TestCase):
|
||||
|
||||
def _testGradientsSimple(self, dtype):
|
||||
inp = self._makeData((4, 4), dtype)
|
||||
with self.cached_session(use_gpu=True):
|
||||
with self.cached_session():
|
||||
inp_tensor = ops.convert_to_tensor(inp)
|
||||
s = array_ops.split(value=inp_tensor, num_or_size_splits=4, axis=1)
|
||||
inp_grads = [self._makeData((4, 1), dtype)for _ in range(4)]
|
||||
@ -382,7 +382,7 @@ class SplitOpTest(test.TestCase):
|
||||
|
||||
splits = array_ops.placeholder(dtypes.int32, [3])
|
||||
y = array_ops.split(values, splits, axis=x)
|
||||
with self.session(use_gpu=True) as sess:
|
||||
with self.session() as sess:
|
||||
with self.assertRaisesRegex(errors_impl.InvalidArgumentError,
|
||||
"must have exactly one element"):
|
||||
sess.run(y, {x: np.array([], dtype=np.int32), splits: [4, 11, 15]})
|
||||
|
@ -43,7 +43,7 @@ class StageTest(test.TestCase):
|
||||
|
||||
G.finalize()
|
||||
|
||||
with self.session(use_gpu=True, graph=G) as sess:
|
||||
with self.session(graph=G) as sess:
|
||||
sess.run(stage, feed_dict={x: -1})
|
||||
for i in range(10):
|
||||
_, yval = sess.run([stage, y], feed_dict={x: i})
|
||||
@ -63,7 +63,7 @@ class StageTest(test.TestCase):
|
||||
|
||||
G.finalize()
|
||||
|
||||
with self.session(use_gpu=True, graph=G) as sess:
|
||||
with self.session(graph=G) as sess:
|
||||
sess.run(stage, feed_dict={x: -1})
|
||||
for i in range(10):
|
||||
_, yval = sess.run([stage, y], feed_dict={x: i})
|
||||
@ -89,7 +89,7 @@ class StageTest(test.TestCase):
|
||||
|
||||
G.finalize()
|
||||
|
||||
with self.session(use_gpu=True, graph=G) as sess:
|
||||
with self.session(graph=G) as sess:
|
||||
sess.run(stage, feed_dict={x: -1})
|
||||
for i in range(10):
|
||||
_, yval = sess.run([stage, y], feed_dict={x: i})
|
||||
@ -131,7 +131,7 @@ class StageTest(test.TestCase):
|
||||
|
||||
G.finalize()
|
||||
|
||||
with self.session(use_gpu=True, graph=G) as sess:
|
||||
with self.session(graph=G) as sess:
|
||||
for i in range(10):
|
||||
sess.run(stage, feed_dict={x: i})
|
||||
|
||||
@ -156,7 +156,7 @@ class StageTest(test.TestCase):
|
||||
|
||||
G.finalize()
|
||||
|
||||
with self.session(use_gpu=True, graph=G) as sess:
|
||||
with self.session(graph=G) as sess:
|
||||
sess.run(stage, feed_dict={x: -1})
|
||||
self.assertEqual(sess.run(size), 1)
|
||||
sess.run(stage, feed_dict={x: -1})
|
||||
@ -189,7 +189,7 @@ class StageTest(test.TestCase):
|
||||
queue = Queue.Queue()
|
||||
n = 8
|
||||
|
||||
with self.session(use_gpu=True, graph=G) as sess:
|
||||
with self.session(graph=G) as sess:
|
||||
# Stage data in a separate thread which will block
|
||||
# when it hits the staging area's capacity and thus
|
||||
# not fill the queue with n tokens
|
||||
@ -254,7 +254,7 @@ class StageTest(test.TestCase):
|
||||
queue = Queue.Queue()
|
||||
n = 8
|
||||
|
||||
with self.session(use_gpu=True, graph=G) as sess:
|
||||
with self.session(graph=G) as sess:
|
||||
# Stage data in a separate thread which will block
|
||||
# when it hits the staging area's capacity and thus
|
||||
# not fill the queue with n tokens
|
||||
|
@ -163,7 +163,7 @@ def _GetSvdOpTest(dtype_, shape_, use_static_shape_, compute_uv_,
|
||||
if use_static_shape_:
|
||||
s_tf_val, u_tf_val, v_tf_val = self.evaluate([s_tf, u_tf, v_tf])
|
||||
else:
|
||||
with self.session(use_gpu=True) as sess:
|
||||
with self.session() as sess:
|
||||
s_tf_val, u_tf_val, v_tf_val = sess.run(
|
||||
[s_tf, u_tf, v_tf], feed_dict={x_tf: x_np})
|
||||
else:
|
||||
@ -172,7 +172,7 @@ def _GetSvdOpTest(dtype_, shape_, use_static_shape_, compute_uv_,
|
||||
if use_static_shape_:
|
||||
s_tf_val = self.evaluate(s_tf)
|
||||
else:
|
||||
with self.session(use_gpu=True) as sess:
|
||||
with self.session() as sess:
|
||||
s_tf_val = sess.run(s_tf, feed_dict={x_tf: x_np})
|
||||
|
||||
if compute_uv_:
|
||||
@ -284,7 +284,7 @@ def _GetSvdGradGradOpTest(dtype_, shape_, compute_uv_, full_matrices_):
|
||||
epsilon = np.finfo(dtype_).eps
|
||||
delta = 0.1 * epsilon**(1.0 / 3.0)
|
||||
tol = 1e-5
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
tf_a = constant_op.constant(a)
|
||||
if compute_uv_:
|
||||
tf_s, tf_u, tf_v = _NormalizingSvd(tf_a, full_matrices_)
|
||||
|
@ -83,7 +83,7 @@ class TensorArrayTest(test.TestCase):
|
||||
|
||||
@test_util.run_in_graph_and_eager_modes
|
||||
def testTensorArrayWriteRead(self):
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
ta = tensor_array_ops.TensorArray(
|
||||
dtype=dtypes.float32,
|
||||
tensor_array_name="foo",
|
||||
@ -104,7 +104,7 @@ class TensorArrayTest(test.TestCase):
|
||||
self.assertAllEqual(-3.0, d2)
|
||||
|
||||
def _testTensorArrayWritePack(self, tf_dtype):
|
||||
with self.cached_session(use_gpu=True):
|
||||
with self.cached_session():
|
||||
ta = tensor_array_ops.TensorArray(
|
||||
dtype=tf_dtype, tensor_array_name="foo", size=3)
|
||||
|
||||
@ -133,7 +133,7 @@ class TensorArrayTest(test.TestCase):
|
||||
self._testTensorArrayWritePackMaybeLegacy()
|
||||
|
||||
def testEmptyTensorArrayPack(self):
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
ta = tensor_array_ops.TensorArray(
|
||||
dtype=dtypes.float32, tensor_array_name="foo", size=3)
|
||||
|
||||
@ -148,7 +148,7 @@ class TensorArrayTest(test.TestCase):
|
||||
self.assertAllEqual([3, 0, 1], c0.shape)
|
||||
|
||||
def testTensorArrayWriteConcatInParallel(self):
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
|
||||
def _concat_1():
|
||||
ta = tensor_array_ops.TensorArray(
|
||||
@ -189,7 +189,7 @@ class TensorArrayTest(test.TestCase):
|
||||
self.assertAllEqual([1, 1, 1, 8, 9, 8, 9, 8, 9], c0)
|
||||
|
||||
def _testTensorArrayWriteConcat(self, tf_dtype):
|
||||
with self.cached_session(use_gpu=True):
|
||||
with self.cached_session():
|
||||
ta = tensor_array_ops.TensorArray(
|
||||
dtype=tf_dtype, tensor_array_name="foo", size=3, infer_shape=False)
|
||||
|
||||
@ -217,7 +217,7 @@ class TensorArrayTest(test.TestCase):
|
||||
self._testTensorArrayWriteConcat(dtypes.string)
|
||||
|
||||
def _testTensorArrayReadOrPackNotAllValuesAvailableFillsZeros(self):
|
||||
with self.cached_session(use_gpu=True):
|
||||
with self.cached_session():
|
||||
ta = tensor_array_ops.TensorArray(
|
||||
dtype=dtypes.float32,
|
||||
tensor_array_name="foo",
|
||||
@ -251,7 +251,7 @@ class TensorArrayTest(test.TestCase):
|
||||
|
||||
@test_util.run_v1_only("Uses placeholders")
|
||||
def testSkipEagerTensorArrayReadUninitializedInferShapeFillsZeros(self):
|
||||
with self.cached_session(use_gpu=True) as sess:
|
||||
with self.cached_session() as sess:
|
||||
ta = tensor_array_ops.TensorArray(
|
||||
dtype=dtypes.float32,
|
||||
tensor_array_name="foo",
|
||||
@ -261,7 +261,7 @@ class TensorArrayTest(test.TestCase):
|
||||
[[0.0, 0.0]], sess.run(ta.write(1, val).read(0), {val: [[4.0, 5.0]]}))
|
||||
|
||||
def _testTensorArrayUnpackRead(self, tf_dtype):
|
||||
with self.cached_session(use_gpu=True):
|
||||
with self.cached_session():
|
||||
convert = _make_converter(tf_dtype)
|
||||
|
||||
ta = _make_ta(3, "foo", dtype=tf_dtype)
|
||||
@ -311,7 +311,7 @@ class TensorArrayTest(test.TestCase):
|
||||
self._testTensorArrayUnpackReadMaybeLegacy()
|
||||
|
||||
def _testTensorArraySplitRead(self, tf_dtype):
|
||||
with self.cached_session(use_gpu=True):
|
||||
with self.cached_session():
|
||||
convert = _make_converter(tf_dtype)
|
||||
|
||||
# Split an empty vector
|
||||
@ -365,7 +365,7 @@ class TensorArrayTest(test.TestCase):
|
||||
@test_util.disable_control_flow_v2("v2 does not support TensorArray.grad.")
|
||||
@test_util.run_v1_only("v2 does not support TensorArray.grad.")
|
||||
def testSkipEagerTensorGradArrayWriteRead(self):
|
||||
with self.session(use_gpu=True) as session:
|
||||
with self.session() as session:
|
||||
ta = tensor_array_ops.TensorArray(
|
||||
dtype=dtypes.float32,
|
||||
tensor_array_name="foo",
|
||||
@ -401,7 +401,7 @@ class TensorArrayTest(test.TestCase):
|
||||
def testSkipEagerTensorArrayGradGrad(self):
|
||||
if not control_flow_util.ENABLE_CONTROL_FLOW_V2:
|
||||
self.skipTest("Legacy TensorArray does not support double derivatives.")
|
||||
with self.test_session(use_gpu=True) as session:
|
||||
with self.test_session() as session:
|
||||
x = constant_op.constant(4.0)
|
||||
|
||||
ta = tensor_array_ops.TensorArray(
|
||||
@ -420,7 +420,7 @@ class TensorArrayTest(test.TestCase):
|
||||
@test_util.disable_control_flow_v2("v2 does not support TensorArray.grad.")
|
||||
@test_util.run_v1_only("v2 does not support TensorArray.grad.")
|
||||
def testSkipEagerTensorGradArrayDynamicWriteRead(self):
|
||||
with self.session(use_gpu=True) as session:
|
||||
with self.session() as session:
|
||||
ta = tensor_array_ops.TensorArray(
|
||||
dtype=dtypes.float32,
|
||||
tensor_array_name="foo",
|
||||
@ -463,7 +463,7 @@ class TensorArrayTest(test.TestCase):
|
||||
@test_util.disable_control_flow_v2("v2 does not support TensorArray.grad.")
|
||||
@test_util.run_v1_only("v2 does not support TensorArray.grad.")
|
||||
def testSkipEagerTensorGradAccessTwiceReceiveSameObject(self):
|
||||
with self.session(use_gpu=True) as session:
|
||||
with self.session() as session:
|
||||
ta = tensor_array_ops.TensorArray(
|
||||
dtype=dtypes.float32, tensor_array_name="foo", size=3)
|
||||
g_ta_0 = ta.grad("grad")
|
||||
@ -479,7 +479,7 @@ class TensorArrayTest(test.TestCase):
|
||||
self.assertAllEqual([[4.0, 5.0]], d_r1_0)
|
||||
|
||||
def testTensorArrayWriteWrongIndexOrDataTypeFails(self):
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
ta = _make_ta(3, "foo", dtype=dtypes.float32)
|
||||
# TODO(b/129870929): Remove the last 2 checks (runtime checks) after
|
||||
# back back from preferred_dtype= to dtype= in convert_to_tensor. Also
|
||||
@ -518,7 +518,7 @@ class TensorArrayTest(test.TestCase):
|
||||
self.evaluate(ta.write(3, 3.0).flow)
|
||||
|
||||
def testTensorArrayReadWrongIndexOrDataTypeFails(self):
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
ta = _make_ta(3, "foo", dtype=dtypes.float32)
|
||||
|
||||
w0 = ta.write(0, [[4.0, 5.0]])
|
||||
@ -553,7 +553,7 @@ class TensorArrayTest(test.TestCase):
|
||||
@test_util.disable_control_flow_v2("v2 allows multiple writes.")
|
||||
@test_util.run_v1_only("v2 allows multiple writes.")
|
||||
def testSkipEagerTensorArrayWriteMultipleFails(self):
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
ta = tensor_array_ops.TensorArray(
|
||||
dtype=dtypes.float32, tensor_array_name="foo", size=3)
|
||||
|
||||
@ -563,7 +563,7 @@ class TensorArrayTest(test.TestCase):
|
||||
self.evaluate(ta.write(2, 3.0).write(2, 3.0).flow)
|
||||
|
||||
def testTensorArrayConcatIncompatibleShapesFails(self):
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
ta = tensor_array_ops.TensorArray(
|
||||
dtype=dtypes.float32,
|
||||
tensor_array_name="foo",
|
||||
@ -597,7 +597,7 @@ class TensorArrayTest(test.TestCase):
|
||||
self.evaluate(w3.concat())
|
||||
|
||||
def testTensorArraySplitIncompatibleShapesFails(self):
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
in_eager_mode = context.executing_eagerly()
|
||||
ta = _make_ta(3, "foo")
|
||||
with self.assertRaisesOpError(
|
||||
@ -636,7 +636,7 @@ class TensorArrayTest(test.TestCase):
|
||||
self.evaluate(ta.split([1.0], [1]).flow)
|
||||
|
||||
def _testTensorArrayWriteGradientAddMultipleAdds(self, dtype):
|
||||
with self.cached_session(use_gpu=True):
|
||||
with self.cached_session():
|
||||
ta = tensor_array_ops.TensorArray(
|
||||
dtype=dtype, tensor_array_name="foo", size=3, infer_shape=False)
|
||||
ta_grad = ta.grad("grad")
|
||||
@ -679,7 +679,7 @@ class TensorArrayTest(test.TestCase):
|
||||
@test_util.disable_control_flow_v2("Low level legacy TA op test.")
|
||||
@test_util.run_v1_only("Low level legacy TA op test.")
|
||||
def testSkipEagerTensorArrayGradWithShapeKnownElementShape(self):
|
||||
with self.session(use_gpu=True) as sess:
|
||||
with self.session() as sess:
|
||||
ta = tensor_array_ops.TensorArray(
|
||||
size=3,
|
||||
dtype=dtypes.float32,
|
||||
@ -710,7 +710,7 @@ class TensorArrayTest(test.TestCase):
|
||||
@test_util.disable_control_flow_v2("Low level legacy TA op test.")
|
||||
@test_util.run_v1_only("Low level legacy TA op test.")
|
||||
def testSkipEagerTensorArrayGradWithShapeUnknownElementShape(self):
|
||||
with self.session(use_gpu=True) as sess:
|
||||
with self.session() as sess:
|
||||
ta = tensor_array_ops.TensorArray(
|
||||
size=3, dtype=dtypes.float32,
|
||||
element_shape=None) # Note that element_shape is unknown
|
||||
@ -733,7 +733,7 @@ class TensorArrayTest(test.TestCase):
|
||||
sess.run(read_value, feed_dict={value: fed_value}))
|
||||
|
||||
def testMultiTensorArray(self):
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
h1 = tensor_array_ops.TensorArray(
|
||||
size=1, dtype=dtypes.float32, tensor_array_name="foo")
|
||||
w1 = h1.write(0, 4.0)
|
||||
@ -749,7 +749,7 @@ class TensorArrayTest(test.TestCase):
|
||||
self.assertAllClose(9.0, val)
|
||||
|
||||
def _testTensorArrayGradientWriteReadType(self, dtype):
|
||||
with self.cached_session(use_gpu=True) as session:
|
||||
with self.cached_session() as session:
|
||||
ta = tensor_array_ops.TensorArray(
|
||||
dtype=dtypes.as_dtype(dtype),
|
||||
tensor_array_name="foo",
|
||||
@ -801,7 +801,7 @@ class TensorArrayTest(test.TestCase):
|
||||
self._testTensorArrayGradientWriteReadType(dtype)
|
||||
|
||||
def _testTensorArrayGradientWritePackConcatAndRead(self):
|
||||
with self.cached_session(use_gpu=True) as sess:
|
||||
with self.cached_session() as sess:
|
||||
ta = tensor_array_ops.TensorArray(
|
||||
dtype=dtypes.float32,
|
||||
tensor_array_name="foo",
|
||||
@ -839,7 +839,7 @@ class TensorArrayTest(test.TestCase):
|
||||
@test_util.disable_control_flow_v2("v2 does not support clear_after_read.")
|
||||
@test_util.run_v1_only("v2 does not support clear_after_read.")
|
||||
def testTensorArrayReadTwice(self):
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
value = constant_op.constant([[1.0, -1.0], [10.0, -10.0]])
|
||||
|
||||
ta_readonce = tensor_array_ops.TensorArray(
|
||||
@ -867,7 +867,7 @@ class TensorArrayTest(test.TestCase):
|
||||
self.assertAllEqual([1.0, -1.0], self.evaluate(r1_readtwice))
|
||||
|
||||
def _testTensorArrayGradientUnpackRead(self):
|
||||
with self.cached_session(use_gpu=True) as session:
|
||||
with self.cached_session() as session:
|
||||
ta = tensor_array_ops.TensorArray(
|
||||
dtype=dtypes.float32,
|
||||
tensor_array_name="foo",
|
||||
@ -897,7 +897,7 @@ class TensorArrayTest(test.TestCase):
|
||||
|
||||
@test_util.deprecated_graph_mode_only
|
||||
def testSkipEagerTensorArrayGradientSplitConcat(self):
|
||||
with self.session(use_gpu=True) as session:
|
||||
with self.session() as session:
|
||||
ta = tensor_array_ops.TensorArray(
|
||||
dtype=dtypes.float32, tensor_array_name="foo", size=2,
|
||||
infer_shape=False)
|
||||
@ -920,7 +920,7 @@ class TensorArrayTest(test.TestCase):
|
||||
grad_vals[0])
|
||||
|
||||
def _testTensorArrayGradientDynamicUnpackRead(self):
|
||||
with self.cached_session(use_gpu=True) as session:
|
||||
with self.cached_session() as session:
|
||||
ta = tensor_array_ops.TensorArray(
|
||||
dtype=dtypes.float32,
|
||||
tensor_array_name="foo",
|
||||
@ -946,20 +946,20 @@ class TensorArrayTest(test.TestCase):
|
||||
self._testTensorArrayGradientDynamicUnpackRead()
|
||||
|
||||
def testCloseTensorArray(self):
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
ta = tensor_array_ops.TensorArray(
|
||||
dtype=dtypes.float32, tensor_array_name="foo", size=3)
|
||||
self.evaluate(ta.close())
|
||||
|
||||
def testSizeTensorArray(self):
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
ta = tensor_array_ops.TensorArray(
|
||||
dtype=dtypes.float32, tensor_array_name="foo", size=3)
|
||||
s = ta.size()
|
||||
self.assertAllEqual(3, self.evaluate(s))
|
||||
|
||||
def testWriteCloseTensorArray(self):
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
ta = tensor_array_ops.TensorArray(
|
||||
dtype=dtypes.float32,
|
||||
tensor_array_name="foo",
|
||||
@ -971,7 +971,8 @@ class TensorArrayTest(test.TestCase):
|
||||
|
||||
def _testWhileLoopWritePackGradients(self, dynamic_size, dtype):
|
||||
np_dtype = dtype.as_numpy_dtype
|
||||
with self.cached_session(use_gpu=True):
|
||||
with self.cached_session():
|
||||
|
||||
def func(v0, state0, var):
|
||||
ta = tensor_array_ops.TensorArray(
|
||||
dtype=dtype,
|
||||
@ -1068,7 +1069,8 @@ class TensorArrayTest(test.TestCase):
|
||||
dynamic_size=True, dtype=dtypes.float32)
|
||||
|
||||
def testGradSerialTwoLoops(self):
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
|
||||
def loop(x):
|
||||
num_steps = 100
|
||||
acc = tensor_array_ops.TensorArray(
|
||||
@ -1117,7 +1119,7 @@ class TensorArrayTest(test.TestCase):
|
||||
|
||||
@test_util.deprecated_graph_mode_only
|
||||
def testSkipEagerSumOfTwoReadVariablesWithoutRepeatGrad(self):
|
||||
with self.session(use_gpu=True) as session:
|
||||
with self.session() as session:
|
||||
a = array_ops.identity(
|
||||
np.arange(
|
||||
3 * 5, dtype=np.float32).reshape(3, 5) + 1)
|
||||
@ -1195,7 +1197,7 @@ class TensorArrayTest(test.TestCase):
|
||||
|
||||
@test_util.deprecated_graph_mode_only
|
||||
def testSkipEagerWriteShape(self):
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
ta = tensor_array_ops.TensorArray(
|
||||
dtype=dtypes.float32, tensor_array_name="foo", size=3)
|
||||
c0 = constant_op.constant([4.0, 5.0])
|
||||
@ -1220,7 +1222,7 @@ class TensorArrayTest(test.TestCase):
|
||||
|
||||
@test_util.deprecated_graph_mode_only
|
||||
def testSkipEagerPartlyUnknownShape(self):
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
ta = tensor_array_ops.TensorArray(
|
||||
dtype=dtypes.float32, tensor_array_name="foo", size=6)
|
||||
|
||||
@ -1260,7 +1262,7 @@ class TensorArrayTest(test.TestCase):
|
||||
self.assertAllEqual([5, 4, 2, 3], r5.get_shape().as_list())
|
||||
|
||||
def _testUnpackShape(self):
|
||||
with self.cached_session(use_gpu=True):
|
||||
with self.cached_session():
|
||||
ta = tensor_array_ops.TensorArray(
|
||||
dtype=dtypes.float32,
|
||||
tensor_array_name="foo",
|
||||
@ -1297,7 +1299,7 @@ class TensorArrayTest(test.TestCase):
|
||||
|
||||
@test_util.deprecated_graph_mode_only
|
||||
def testSplitShape(self):
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
ta = tensor_array_ops.TensorArray(
|
||||
dtype=dtypes.float32,
|
||||
tensor_array_name="foo",
|
||||
@ -1329,7 +1331,7 @@ class TensorArrayTest(test.TestCase):
|
||||
|
||||
@test_util.deprecated_graph_mode_only
|
||||
def testSkipEagerWriteUnknownShape(self):
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
ta = tensor_array_ops.TensorArray(
|
||||
dtype=dtypes.float32,
|
||||
tensor_array_name="foo",
|
||||
@ -1341,7 +1343,7 @@ class TensorArrayTest(test.TestCase):
|
||||
self.assertAllEqual(r0.get_shape(), tensor_shape.unknown_shape())
|
||||
|
||||
def _testGradientWhenNotAllComponentsRead(self):
|
||||
with self.cached_session(use_gpu=True) as session:
|
||||
with self.cached_session() as session:
|
||||
ta = tensor_array_ops.TensorArray(dtype=dtypes.float32, size=2)
|
||||
x = constant_op.constant([2.0, 3.0])
|
||||
w = ta.unstack(x)
|
||||
@ -1357,7 +1359,7 @@ class TensorArrayTest(test.TestCase):
|
||||
|
||||
@test_util.deprecated_graph_mode_only
|
||||
def testSkipEagerWriteButNotAllComponentsReadGrad(self):
|
||||
with self.cached_session(use_gpu=True) as session:
|
||||
with self.cached_session() as session:
|
||||
x0 = constant_op.constant(5.0)
|
||||
x1 = constant_op.constant(10.0)
|
||||
ta = tensor_array_ops.TensorArray(
|
||||
@ -1369,7 +1371,7 @@ class TensorArrayTest(test.TestCase):
|
||||
self.assertAllEqual(grad_r0_x1_vals, [1.0, 0.0])
|
||||
|
||||
def _testTensorArrayUnpackDynamic(self):
|
||||
with self.cached_session(use_gpu=True) as sess:
|
||||
with self.cached_session() as sess:
|
||||
ta = tensor_array_ops.TensorArray(
|
||||
dtype=dtypes.float32, size=3, dynamic_size=True)
|
||||
x = constant_op.constant([1.0, 2.0, 3.0])
|
||||
@ -1386,7 +1388,7 @@ class TensorArrayTest(test.TestCase):
|
||||
|
||||
@test_util.run_deprecated_v1
|
||||
def testSkipEagerTensorArraySplitDynamic(self):
|
||||
with self.session(use_gpu=True) as sess:
|
||||
with self.session() as sess:
|
||||
ta = tensor_array_ops.TensorArray(
|
||||
dtype=dtypes.float32, size=3, dynamic_size=True)
|
||||
x = constant_op.constant([1.0, 2.0, 3.0])
|
||||
@ -1449,7 +1451,7 @@ class TensorArrayTest(test.TestCase):
|
||||
ta_gather_with_unknown_indices_shape([0])
|
||||
|
||||
def _testTensorArrayEvalEmpty(self):
|
||||
with self.cached_session(use_gpu=True):
|
||||
with self.cached_session():
|
||||
ta = tensor_array_ops.TensorArray(
|
||||
dtype=dtypes.float32, size=0, dynamic_size=False, infer_shape=False)
|
||||
v2_msg = ("Tried to stack elements of an empty list with "
|
||||
@ -1469,7 +1471,7 @@ class TensorArrayTest(test.TestCase):
|
||||
# this test is ill-defined for Eager mode --- unpacking an empty tensor
|
||||
# gives an empty list / there is not equivalent of "mark_used" in Eager
|
||||
def _testTensorArrayEvalEmptyWithDefault(self):
|
||||
with self.cached_session(use_gpu=True):
|
||||
with self.cached_session():
|
||||
ta = tensor_array_ops.TensorArray(
|
||||
dtype=dtypes.float32, size=0, dynamic_size=False, infer_shape=True)
|
||||
self.assertEqual(0, ta.size().eval())
|
||||
@ -1491,7 +1493,7 @@ class TensorArrayTest(test.TestCase):
|
||||
|
||||
@test_util.run_deprecated_v1
|
||||
def testSkipEagerTensorArrayScatterReadAndGradients(self):
|
||||
with self.session(use_gpu=True) as session:
|
||||
with self.session() as session:
|
||||
ta = tensor_array_ops.TensorArray(
|
||||
dtype=dtypes.float32,
|
||||
tensor_array_name="foo",
|
||||
@ -1518,7 +1520,7 @@ class TensorArrayTest(test.TestCase):
|
||||
|
||||
@test_util.run_deprecated_v1
|
||||
def testSkipEagerTensorArrayScatterPartialReadAndGradients(self):
|
||||
with self.session(use_gpu=True) as session:
|
||||
with self.session() as session:
|
||||
ta = tensor_array_ops.TensorArray(
|
||||
dtype=dtypes.float32,
|
||||
tensor_array_name="foo",
|
||||
@ -1554,7 +1556,7 @@ class TensorArrayTest(test.TestCase):
|
||||
|
||||
@test_util.run_v1_only("b/118890905")
|
||||
def testTensorArrayWriteGatherAndGradients(self):
|
||||
with self.session(use_gpu=True) as session:
|
||||
with self.session() as session:
|
||||
ta = tensor_array_ops.TensorArray(
|
||||
dtype=dtypes.float32,
|
||||
tensor_array_name="foo",
|
||||
@ -1703,7 +1705,7 @@ class TensorArrayTest(test.TestCase):
|
||||
[s for s in dev_stats[d] if "TensorArray" == s.node_name])
|
||||
|
||||
def testTensorArrayIdentity(self):
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
ta0 = tensor_array_ops.TensorArray(dtype=dtypes.float32, size=2,
|
||||
infer_shape=False)
|
||||
ta1 = tensor_array_ops.TensorArray(dtype=dtypes.int32, size=4,
|
||||
@ -1769,7 +1771,7 @@ class TensorArrayTest(test.TestCase):
|
||||
# dy is outside of the gradients name scope; tf.gradients must
|
||||
# wrap it in the correct name scope.
|
||||
dx, = gradients_impl.gradients(ys=[y], xs=[x], grad_ys=[dy])
|
||||
with self.cached_session(use_gpu=True) as sess:
|
||||
with self.cached_session() as sess:
|
||||
vdx, vdy = self.evaluate([dx, dy])
|
||||
self.assertAllClose(vdx, vdy)
|
||||
|
||||
@ -1777,7 +1779,7 @@ class TensorArrayTest(test.TestCase):
|
||||
def testSkipEagerTensorArrayInt64GPU(self):
|
||||
if not test.is_gpu_available():
|
||||
return
|
||||
with self.session(use_gpu=True, force_gpu=True) as sess:
|
||||
with self.session(force_gpu=True) as sess:
|
||||
value = array_ops.placeholder(dtypes.int64)
|
||||
ta = tensor_array_ops.TensorArray(dtype=dtypes.int64, size=2)
|
||||
ta = ta.scatter([0, 1], value)
|
||||
|
@ -179,7 +179,7 @@ def _get_tensordot_tests(dtype_, rank_a_, rank_b_, num_dims_, dynamic_shape_):
|
||||
for _ in range(num_trials):
|
||||
a_np, b_np, a_dims_np, b_dims_np = _generate_random_tensors_and_dims()
|
||||
np_ans = np.tensordot(a_np, b_np, axes=(a_dims_np, b_dims_np))
|
||||
with self.cached_session(use_gpu=True) as sess:
|
||||
with self.cached_session() as sess:
|
||||
if dynamic_shape_:
|
||||
a = array_ops.placeholder(dtype_)
|
||||
b = array_ops.placeholder(dtype_)
|
||||
@ -219,7 +219,7 @@ def _get_tensordot_tests(dtype_, rank_a_, rank_b_, num_dims_, dynamic_shape_):
|
||||
all_axes.append(a_np.ndim - 1)
|
||||
for axes in all_axes:
|
||||
np_ans = np.tensordot(a_np, b_np, axes=axes)
|
||||
with self.cached_session(use_gpu=True) as sess:
|
||||
with self.cached_session() as sess:
|
||||
if dynamic_shape_:
|
||||
a = array_ops.placeholder(dtype_)
|
||||
b = array_ops.placeholder(dtype_)
|
||||
|
@ -47,7 +47,7 @@ class TopKTest(test.TestCase):
|
||||
sorted=True): # pylint: disable=redefined-builtin
|
||||
np_expected_values = np.array(expected_values)
|
||||
np_expected_indices = np.array(expected_indices)
|
||||
with self.cached_session(use_gpu=True) as sess:
|
||||
with self.cached_session() as sess:
|
||||
values_op, indices_op = nn_ops.top_k(inputs, k, sorted=sorted)
|
||||
values, indices = self.evaluate([values_op, indices_op])
|
||||
|
||||
@ -196,7 +196,7 @@ class TopKTest(test.TestCase):
|
||||
@test_util.run_deprecated_v1
|
||||
def testKNegative(self):
|
||||
inputs = [[0.1, 0.2], [0.3, 0.4]]
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
k = array_ops.placeholder(dtypes.int32)
|
||||
values, _ = nn_ops.top_k(inputs, k)
|
||||
with self.assertRaisesOpError("Need k >= 0, got -7"):
|
||||
@ -211,7 +211,7 @@ class TopKTest(test.TestCase):
|
||||
|
||||
@test_util.run_deprecated_v1
|
||||
def testTopKGradients(self):
|
||||
with self.session(use_gpu=True) as sess:
|
||||
with self.session() as sess:
|
||||
inputs = array_ops.placeholder(dtypes.float32, shape=[2, 5])
|
||||
values, _ = nn_ops.top_k(inputs, 3)
|
||||
grad = sess.run(
|
||||
|
@ -31,7 +31,7 @@ class TraceTest(test.TestCase):
|
||||
|
||||
def compare(self, x):
|
||||
np_ans = np.trace(x, axis1=-2, axis2=-1)
|
||||
with self.cached_session(use_gpu=True):
|
||||
with self.cached_session():
|
||||
tf_ans = math_ops.trace(x).eval()
|
||||
self.assertAllClose(tf_ans, np_ans)
|
||||
|
||||
|
@ -79,7 +79,7 @@ class TransposeTest(test.TestCase):
|
||||
np_ans = self._np_transpose(x, perm)
|
||||
if conjugate:
|
||||
np_ans = np.conj(np_ans)
|
||||
with self.cached_session(use_gpu=True):
|
||||
with self.cached_session():
|
||||
inx = ops.convert_to_tensor(x)
|
||||
y = array_ops.transpose(inx, p, conjugate=conjugate)
|
||||
tf_ans = self.evaluate(y)
|
||||
@ -170,7 +170,7 @@ class TransposeTest(test.TestCase):
|
||||
inp = np.arange(
|
||||
1, total_size + 1, dtype=datatype).reshape(input_shape)
|
||||
np_ans = self._np_transpose(inp, perm)
|
||||
with self.cached_session(use_gpu=True):
|
||||
with self.cached_session():
|
||||
inx = ops.convert_to_tensor(inp)
|
||||
y = array_ops.transpose(inx, perm)
|
||||
tf_ans = self.evaluate(y)
|
||||
@ -193,7 +193,7 @@ class TransposeTest(test.TestCase):
|
||||
inp = np.arange(
|
||||
1, total_size + 1, dtype=np.float32).reshape(input_shape)
|
||||
np_ans = self._np_transpose(inp, perm)
|
||||
with self.cached_session(use_gpu=True):
|
||||
with self.cached_session():
|
||||
inx = ops.convert_to_tensor(inp)
|
||||
y = array_ops.transpose(inx, perm)
|
||||
tf_ans = self.evaluate(y)
|
||||
@ -230,7 +230,7 @@ class TransposeTest(test.TestCase):
|
||||
inp = np.arange(
|
||||
1, total_size + 1, dtype=np.float32).reshape(input_shape)
|
||||
np_ans = self._np_transpose(inp, perm)
|
||||
with self.cached_session(use_gpu=True):
|
||||
with self.cached_session():
|
||||
inx = ops.convert_to_tensor(inp)
|
||||
y = array_ops.transpose(inx, perm)
|
||||
tf_ans = self.evaluate(y)
|
||||
@ -255,7 +255,7 @@ class TransposeTest(test.TestCase):
|
||||
inp = np.arange(
|
||||
1, total_size + 1, dtype=datatype).reshape(input_shape)
|
||||
np_ans = self._np_transpose(inp, perm)
|
||||
with self.cached_session(use_gpu=True):
|
||||
with self.cached_session():
|
||||
inx = ops.convert_to_tensor(inp)
|
||||
y = array_ops.transpose(inx, perm)
|
||||
tf_ans = self.evaluate(y)
|
||||
@ -278,7 +278,7 @@ class TransposeTest(test.TestCase):
|
||||
inp = np.arange(
|
||||
1, total_size + 1, dtype=np.float32).reshape(input_shape)
|
||||
np_ans = self._np_transpose(inp, perm)
|
||||
with self.cached_session(use_gpu=True):
|
||||
with self.cached_session():
|
||||
inx = ops.convert_to_tensor(inp)
|
||||
y = array_ops.transpose(inx, perm)
|
||||
tf_ans = self.evaluate(y)
|
||||
@ -331,7 +331,7 @@ class TransposeTest(test.TestCase):
|
||||
with self.subTest(input_shape=input_shape, perm=perm):
|
||||
inp = np.random.randint(10, size=input_shape)
|
||||
np_ans = self._np_transpose(inp, perm)
|
||||
with self.cached_session(use_gpu=True):
|
||||
with self.cached_session():
|
||||
inx = ops.convert_to_tensor(inp)
|
||||
y = array_ops.transpose(inx, perm)
|
||||
tf_ans = self.evaluate(y)
|
||||
@ -355,7 +355,7 @@ class TransposeTest(test.TestCase):
|
||||
x = np.arange(0, 8).reshape([2, 4]).astype(np.float32)
|
||||
p = np.array([1, 0]).astype(perm_dtype)
|
||||
np_ans = np.copy(x).transpose(p)
|
||||
with self.cached_session(use_gpu=True):
|
||||
with self.cached_session():
|
||||
inx = ops.convert_to_tensor(x)
|
||||
inp = constant_op.constant(p)
|
||||
y = array_ops.transpose(inx, inp)
|
||||
|
@ -80,7 +80,7 @@ class TridiagonalMulOpTest(test.TestCase):
|
||||
diags_matrix_batch, rhs_batch, diagonals_format='matrix')
|
||||
]
|
||||
|
||||
with self.cached_session(use_gpu=True):
|
||||
with self.cached_session():
|
||||
results = self.evaluate(results)
|
||||
results_batch = self.evaluate(results_batch)
|
||||
|
||||
@ -114,7 +114,7 @@ class TridiagonalMulOpTest(test.TestCase):
|
||||
|
||||
diags = constant_op.constant(diags, dtype=dtype)
|
||||
rhs = constant_op.constant(rhs, dtype=dtype)
|
||||
with self.cached_session(use_gpu=True):
|
||||
with self.cached_session():
|
||||
grad_reference, _ = gradient_checker_v2.compute_gradient(
|
||||
reference_matmul, [diags, rhs])
|
||||
grad_theoretical, grad_numerical = gradient_checker_v2.compute_gradient(
|
||||
@ -155,7 +155,7 @@ class TridiagonalMulOpTest(test.TestCase):
|
||||
constant_op.constant(rhs, dtype=dtypes.complex128),
|
||||
diagonals_format='matrix')
|
||||
|
||||
with self.cached_session(use_gpu=True):
|
||||
with self.cached_session():
|
||||
result = self.evaluate(result)
|
||||
|
||||
self.assertAllClose(result, expected_result)
|
||||
|
@ -77,7 +77,7 @@ class TridiagonalSolveOpTest(test.TestCase):
|
||||
diags_format="compact",
|
||||
transpose_rhs=False,
|
||||
conjugate_rhs=False):
|
||||
with self.cached_session(use_gpu=True):
|
||||
with self.cached_session():
|
||||
pivoting = True
|
||||
if hasattr(self, "pivoting"):
|
||||
pivoting = self.pivoting
|
||||
@ -412,7 +412,7 @@ class TridiagonalSolveOpTest(test.TestCase):
|
||||
transpose_rhs=transpose_rhs,
|
||||
conjugate_rhs=conjugate_rhs)
|
||||
res = math_ops.reduce_sum(x * y)
|
||||
with self.cached_session(use_gpu=True) as sess:
|
||||
with self.cached_session() as sess:
|
||||
actual_grad_diags = sess.run(
|
||||
tape_diags.gradient(res, diags), feed_dict=feed_dict)
|
||||
actual_rhs_diags = sess.run(
|
||||
@ -563,7 +563,7 @@ class TridiagonalSolveOpTest(test.TestCase):
|
||||
return
|
||||
x = linalg_impl.tridiagonal_solve(
|
||||
diags, rhs, diags_format, partial_pivoting=self.pivoting)
|
||||
with self.cached_session(use_gpu=True) as sess:
|
||||
with self.cached_session() as sess:
|
||||
result = sess.run(x, feed_dict={diags: diags_feed, rhs: rhs_feed})
|
||||
self.assertAllClose(result, expected)
|
||||
|
||||
@ -648,7 +648,7 @@ class TridiagonalSolveOpTest(test.TestCase):
|
||||
rhs,
|
||||
diagonals_format="sequence",
|
||||
partial_pivoting=self.pivoting)
|
||||
with self.cached_session(use_gpu=True) as sess:
|
||||
with self.cached_session() as sess:
|
||||
result = sess.run(
|
||||
x,
|
||||
feed_dict={
|
||||
|
@ -150,7 +150,7 @@ class VariablesTestCase(test.TestCase, parameterized.TestCase):
|
||||
|
||||
@test_util.run_deprecated_v1
|
||||
def testResourceAssignments(self):
|
||||
with self.session(use_gpu=True):
|
||||
with self.session():
|
||||
var = resource_variable_ops.ResourceVariable(0.0)
|
||||
plus_one = var.assign_add(1.0)
|
||||
minus_one = var.assign_sub(2.0)
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user