Remove redundant use_gpu=True params
use_gpu is True by default in test utils starting CL 356906251 I will wait a bit before checking this in since once this is checked in, it would be harder to roll back CL 356906251 PiperOrigin-RevId: 357322055 Change-Id: Ibbeb900d93f9fb43c2dc61285ee38e582b29dcfc
This commit is contained in:
parent
8a1c8335ed
commit
5bfc37ef25
@ -462,7 +462,7 @@ class FunctionTest(test.TestCase):
|
|||||||
|
|
||||||
@test_util.run_deprecated_v1
|
@test_util.run_deprecated_v1
|
||||||
def testWhileLoopCallsFunc(self):
|
def testWhileLoopCallsFunc(self):
|
||||||
with self.session(use_gpu=True) as sess:
|
with self.session() as sess:
|
||||||
|
|
||||||
@function.Defun(dtypes.float32)
|
@function.Defun(dtypes.float32)
|
||||||
def Times2(x):
|
def Times2(x):
|
||||||
|
@ -2289,7 +2289,7 @@ class TensorFlowTestCase(googletest.TestCase):
|
|||||||
``` python
|
``` python
|
||||||
class MyOperatorTest(test_util.TensorFlowTestCase):
|
class MyOperatorTest(test_util.TensorFlowTestCase):
|
||||||
def testMyOperator(self):
|
def testMyOperator(self):
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
valid_input = [1.0, 2.0, 3.0, 4.0, 5.0]
|
valid_input = [1.0, 2.0, 3.0, 4.0, 5.0]
|
||||||
result = MyOperator(valid_input).eval()
|
result = MyOperator(valid_input).eval()
|
||||||
self.assertEqual(result, [1.0, 2.0, 3.0, 5.0, 8.0]
|
self.assertEqual(result, [1.0, 2.0, 3.0, 5.0, 8.0]
|
||||||
@ -2339,7 +2339,7 @@ class TensorFlowTestCase(googletest.TestCase):
|
|||||||
```python
|
```python
|
||||||
class MyOperatorTest(test_util.TensorFlowTestCase):
|
class MyOperatorTest(test_util.TensorFlowTestCase):
|
||||||
def testMyOperator(self):
|
def testMyOperator(self):
|
||||||
with self.cached_session(use_gpu=True) as sess:
|
with self.cached_session() as sess:
|
||||||
valid_input = [1.0, 2.0, 3.0, 4.0, 5.0]
|
valid_input = [1.0, 2.0, 3.0, 4.0, 5.0]
|
||||||
result = MyOperator(valid_input).eval()
|
result = MyOperator(valid_input).eval()
|
||||||
self.assertEqual(result, [1.0, 2.0, 3.0, 5.0, 8.0]
|
self.assertEqual(result, [1.0, 2.0, 3.0, 5.0, 8.0]
|
||||||
|
@ -420,7 +420,7 @@ def run_all_keras_modes(test_or_class=None,
|
|||||||
def _v1_session_test(f, test_or_class, config, *args, **kwargs):
|
def _v1_session_test(f, test_or_class, config, *args, **kwargs):
|
||||||
with ops.get_default_graph().as_default():
|
with ops.get_default_graph().as_default():
|
||||||
with testing_utils.run_eagerly_scope(False):
|
with testing_utils.run_eagerly_scope(False):
|
||||||
with test_or_class.test_session(use_gpu=True, config=config):
|
with test_or_class.test_session(config=config):
|
||||||
f(test_or_class, *args, **kwargs)
|
f(test_or_class, *args, **kwargs)
|
||||||
|
|
||||||
|
|
||||||
|
@ -42,7 +42,7 @@ class Conv1DTest(keras_parameterized.TestCase):
|
|||||||
stack_size = 3
|
stack_size = 3
|
||||||
length = 7
|
length = 7
|
||||||
|
|
||||||
with self.cached_session(use_gpu=True):
|
with self.cached_session():
|
||||||
testing_utils.layer_test(
|
testing_utils.layer_test(
|
||||||
keras.layers.Conv1D,
|
keras.layers.Conv1D,
|
||||||
kwargs=kwargs,
|
kwargs=kwargs,
|
||||||
@ -54,7 +54,7 @@ class Conv1DTest(keras_parameterized.TestCase):
|
|||||||
stack_size = 3
|
stack_size = 3
|
||||||
length = 7
|
length = 7
|
||||||
|
|
||||||
with self.cached_session(use_gpu=True):
|
with self.cached_session():
|
||||||
if expected_output_shape is not None:
|
if expected_output_shape is not None:
|
||||||
expected_output_shape = (None,) + expected_output_shape
|
expected_output_shape = (None,) + expected_output_shape
|
||||||
|
|
||||||
@ -112,7 +112,7 @@ class Conv1DTest(keras_parameterized.TestCase):
|
|||||||
'activity_regularizer': 'l2',
|
'activity_regularizer': 'l2',
|
||||||
'strides': 1
|
'strides': 1
|
||||||
}
|
}
|
||||||
with self.cached_session(use_gpu=True):
|
with self.cached_session():
|
||||||
layer = keras.layers.Conv1D(**kwargs)
|
layer = keras.layers.Conv1D(**kwargs)
|
||||||
layer.build((None, 5, 2))
|
layer.build((None, 5, 2))
|
||||||
self.assertEqual(len(layer.losses), 2)
|
self.assertEqual(len(layer.losses), 2)
|
||||||
@ -131,14 +131,14 @@ class Conv1DTest(keras_parameterized.TestCase):
|
|||||||
'bias_constraint': b_constraint,
|
'bias_constraint': b_constraint,
|
||||||
'strides': 1
|
'strides': 1
|
||||||
}
|
}
|
||||||
with self.cached_session(use_gpu=True):
|
with self.cached_session():
|
||||||
layer = keras.layers.Conv1D(**kwargs)
|
layer = keras.layers.Conv1D(**kwargs)
|
||||||
layer.build((None, 5, 2))
|
layer.build((None, 5, 2))
|
||||||
self.assertEqual(layer.kernel.constraint, k_constraint)
|
self.assertEqual(layer.kernel.constraint, k_constraint)
|
||||||
self.assertEqual(layer.bias.constraint, b_constraint)
|
self.assertEqual(layer.bias.constraint, b_constraint)
|
||||||
|
|
||||||
def test_conv1d_recreate_conv(self):
|
def test_conv1d_recreate_conv(self):
|
||||||
with self.cached_session(use_gpu=True):
|
with self.cached_session():
|
||||||
layer = keras.layers.Conv1D(filters=1,
|
layer = keras.layers.Conv1D(filters=1,
|
||||||
kernel_size=3,
|
kernel_size=3,
|
||||||
strides=1,
|
strides=1,
|
||||||
@ -151,7 +151,7 @@ class Conv1DTest(keras_parameterized.TestCase):
|
|||||||
self.assertEqual(outp1_shape, layer(inpt1).shape)
|
self.assertEqual(outp1_shape, layer(inpt1).shape)
|
||||||
|
|
||||||
def test_conv1d_recreate_conv_unknown_dims(self):
|
def test_conv1d_recreate_conv_unknown_dims(self):
|
||||||
with self.cached_session(use_gpu=True):
|
with self.cached_session():
|
||||||
layer = keras.layers.Conv1D(filters=1,
|
layer = keras.layers.Conv1D(filters=1,
|
||||||
kernel_size=3,
|
kernel_size=3,
|
||||||
strides=1,
|
strides=1,
|
||||||
@ -184,7 +184,7 @@ class Conv2DTest(keras_parameterized.TestCase):
|
|||||||
input_data_shape = (num_samples, num_row or 7, num_col or 6, stack_size)
|
input_data_shape = (num_samples, num_row or 7, num_col or 6, stack_size)
|
||||||
input_data = 10 * np.random.random(input_data_shape).astype(np.float32)
|
input_data = 10 * np.random.random(input_data_shape).astype(np.float32)
|
||||||
|
|
||||||
with self.cached_session(use_gpu=True):
|
with self.cached_session():
|
||||||
testing_utils.layer_test(
|
testing_utils.layer_test(
|
||||||
keras.layers.Conv2D,
|
keras.layers.Conv2D,
|
||||||
kwargs=kwargs,
|
kwargs=kwargs,
|
||||||
@ -205,7 +205,7 @@ class Conv2DTest(keras_parameterized.TestCase):
|
|||||||
input_data_shape = batch_shape + (num_row or 7, num_col or 6, stack_size)
|
input_data_shape = batch_shape + (num_row or 7, num_col or 6, stack_size)
|
||||||
input_data = 10 * np.random.random(input_data_shape).astype(np.float32)
|
input_data = 10 * np.random.random(input_data_shape).astype(np.float32)
|
||||||
|
|
||||||
with self.cached_session(use_gpu=True):
|
with self.cached_session():
|
||||||
if expected_output_shape is not None:
|
if expected_output_shape is not None:
|
||||||
expected_output_shape = (None,) + expected_output_shape
|
expected_output_shape = (None,) + expected_output_shape
|
||||||
testing_utils.layer_test(
|
testing_utils.layer_test(
|
||||||
@ -272,7 +272,7 @@ class Conv2DTest(keras_parameterized.TestCase):
|
|||||||
'activity_regularizer': 'l2',
|
'activity_regularizer': 'l2',
|
||||||
'strides': 1
|
'strides': 1
|
||||||
}
|
}
|
||||||
with self.cached_session(use_gpu=True):
|
with self.cached_session():
|
||||||
layer = keras.layers.Conv2D(**kwargs)
|
layer = keras.layers.Conv2D(**kwargs)
|
||||||
layer.build((None, 5, 5, 2))
|
layer.build((None, 5, 5, 2))
|
||||||
self.assertEqual(len(layer.losses), 2)
|
self.assertEqual(len(layer.losses), 2)
|
||||||
@ -291,7 +291,7 @@ class Conv2DTest(keras_parameterized.TestCase):
|
|||||||
'bias_constraint': b_constraint,
|
'bias_constraint': b_constraint,
|
||||||
'strides': 1
|
'strides': 1
|
||||||
}
|
}
|
||||||
with self.cached_session(use_gpu=True):
|
with self.cached_session():
|
||||||
layer = keras.layers.Conv2D(**kwargs)
|
layer = keras.layers.Conv2D(**kwargs)
|
||||||
layer.build((None, 5, 5, 2))
|
layer.build((None, 5, 5, 2))
|
||||||
self.assertEqual(layer.kernel.constraint, k_constraint)
|
self.assertEqual(layer.kernel.constraint, k_constraint)
|
||||||
@ -313,7 +313,7 @@ class Conv3DTest(keras_parameterized.TestCase):
|
|||||||
num_col = 6
|
num_col = 6
|
||||||
depth = 5
|
depth = 5
|
||||||
|
|
||||||
with self.cached_session(use_gpu=True):
|
with self.cached_session():
|
||||||
testing_utils.layer_test(
|
testing_utils.layer_test(
|
||||||
keras.layers.Conv3D,
|
keras.layers.Conv3D,
|
||||||
kwargs=kwargs,
|
kwargs=kwargs,
|
||||||
@ -331,7 +331,7 @@ class Conv3DTest(keras_parameterized.TestCase):
|
|||||||
num_col = 6
|
num_col = 6
|
||||||
depth = 5
|
depth = 5
|
||||||
|
|
||||||
with self.cached_session(use_gpu=True):
|
with self.cached_session():
|
||||||
if expected_output_shape is not None:
|
if expected_output_shape is not None:
|
||||||
expected_output_shape = (None,) + expected_output_shape
|
expected_output_shape = (None,) + expected_output_shape
|
||||||
|
|
||||||
@ -387,7 +387,7 @@ class Conv3DTest(keras_parameterized.TestCase):
|
|||||||
'activity_regularizer': 'l2',
|
'activity_regularizer': 'l2',
|
||||||
'strides': 1
|
'strides': 1
|
||||||
}
|
}
|
||||||
with self.cached_session(use_gpu=True):
|
with self.cached_session():
|
||||||
layer = keras.layers.Conv3D(**kwargs)
|
layer = keras.layers.Conv3D(**kwargs)
|
||||||
layer.build((None, 5, 5, 5, 2))
|
layer.build((None, 5, 5, 5, 2))
|
||||||
self.assertEqual(len(layer.losses), 2)
|
self.assertEqual(len(layer.losses), 2)
|
||||||
@ -407,7 +407,7 @@ class Conv3DTest(keras_parameterized.TestCase):
|
|||||||
'bias_constraint': b_constraint,
|
'bias_constraint': b_constraint,
|
||||||
'strides': 1
|
'strides': 1
|
||||||
}
|
}
|
||||||
with self.cached_session(use_gpu=True):
|
with self.cached_session():
|
||||||
layer = keras.layers.Conv3D(**kwargs)
|
layer = keras.layers.Conv3D(**kwargs)
|
||||||
layer.build((None, 5, 5, 5, 2))
|
layer.build((None, 5, 5, 5, 2))
|
||||||
self.assertEqual(layer.kernel.constraint, k_constraint)
|
self.assertEqual(layer.kernel.constraint, k_constraint)
|
||||||
@ -415,7 +415,7 @@ class Conv3DTest(keras_parameterized.TestCase):
|
|||||||
|
|
||||||
def test_conv3d_dynamic_shape(self):
|
def test_conv3d_dynamic_shape(self):
|
||||||
input_data = np.random.random((1, 3, 3, 3, 3)).astype(np.float32)
|
input_data = np.random.random((1, 3, 3, 3, 3)).astype(np.float32)
|
||||||
with self.cached_session(use_gpu=True):
|
with self.cached_session():
|
||||||
# Won't raise error here.
|
# Won't raise error here.
|
||||||
testing_utils.layer_test(
|
testing_utils.layer_test(
|
||||||
keras.layers.Conv3D,
|
keras.layers.Conv3D,
|
||||||
@ -564,7 +564,7 @@ class ConvSequentialTest(keras_parameterized.TestCase):
|
|||||||
kwargs['filters'] = 1
|
kwargs['filters'] = 1
|
||||||
kwargs['kernel_size'] = 3
|
kwargs['kernel_size'] = 3
|
||||||
kwargs['dilation_rate'] = 2
|
kwargs['dilation_rate'] = 2
|
||||||
with self.cached_session(use_gpu=True):
|
with self.cached_session():
|
||||||
layer = conv_layer_cls(**kwargs)
|
layer = conv_layer_cls(**kwargs)
|
||||||
output1 = layer(np.zeros(input_shape1))
|
output1 = layer(np.zeros(input_shape1))
|
||||||
self.assertEqual(output1.shape, expected_output_shape1)
|
self.assertEqual(output1.shape, expected_output_shape1)
|
||||||
@ -607,7 +607,7 @@ class ConvSequentialTest(keras_parameterized.TestCase):
|
|||||||
expected_output_shape1, expected_output_shape2)
|
expected_output_shape1, expected_output_shape2)
|
||||||
|
|
||||||
def test_dynamic_shape(self):
|
def test_dynamic_shape(self):
|
||||||
with self.cached_session(use_gpu=True):
|
with self.cached_session():
|
||||||
layer = keras.layers.Conv3D(2, 3)
|
layer = keras.layers.Conv3D(2, 3)
|
||||||
input_shape = (5, None, None, 2)
|
input_shape = (5, None, None, 2)
|
||||||
inputs = keras.Input(shape=input_shape)
|
inputs = keras.Input(shape=input_shape)
|
||||||
@ -626,7 +626,7 @@ class ZeroPaddingTest(keras_parameterized.TestCase):
|
|||||||
shape = (num_samples, num_steps, input_dim)
|
shape = (num_samples, num_steps, input_dim)
|
||||||
inputs = np.ones(shape)
|
inputs = np.ones(shape)
|
||||||
|
|
||||||
with self.cached_session(use_gpu=True):
|
with self.cached_session():
|
||||||
# basic test
|
# basic test
|
||||||
testing_utils.layer_test(
|
testing_utils.layer_test(
|
||||||
keras.layers.ZeroPadding1D,
|
keras.layers.ZeroPadding1D,
|
||||||
@ -682,7 +682,7 @@ class ZeroPaddingTest(keras_parameterized.TestCase):
|
|||||||
inputs = np.ones((num_samples, input_num_row, input_num_col, stack_size))
|
inputs = np.ones((num_samples, input_num_row, input_num_col, stack_size))
|
||||||
|
|
||||||
# basic test
|
# basic test
|
||||||
with self.cached_session(use_gpu=True):
|
with self.cached_session():
|
||||||
testing_utils.layer_test(
|
testing_utils.layer_test(
|
||||||
keras.layers.ZeroPadding2D,
|
keras.layers.ZeroPadding2D,
|
||||||
kwargs={
|
kwargs={
|
||||||
@ -699,7 +699,7 @@ class ZeroPaddingTest(keras_parameterized.TestCase):
|
|||||||
input_shape=inputs.shape)
|
input_shape=inputs.shape)
|
||||||
|
|
||||||
# correctness test
|
# correctness test
|
||||||
with self.cached_session(use_gpu=True):
|
with self.cached_session():
|
||||||
layer = keras.layers.ZeroPadding2D(
|
layer = keras.layers.ZeroPadding2D(
|
||||||
padding=(2, 2), data_format=data_format)
|
padding=(2, 2), data_format=data_format)
|
||||||
layer.build(inputs.shape)
|
layer.build(inputs.shape)
|
||||||
@ -770,7 +770,7 @@ class ZeroPaddingTest(keras_parameterized.TestCase):
|
|||||||
inputs = np.ones((num_samples, input_len_dim1, input_len_dim2,
|
inputs = np.ones((num_samples, input_len_dim1, input_len_dim2,
|
||||||
input_len_dim3, stack_size))
|
input_len_dim3, stack_size))
|
||||||
|
|
||||||
with self.cached_session(use_gpu=True):
|
with self.cached_session():
|
||||||
# basic test
|
# basic test
|
||||||
testing_utils.layer_test(
|
testing_utils.layer_test(
|
||||||
keras.layers.ZeroPadding3D,
|
keras.layers.ZeroPadding3D,
|
||||||
@ -787,7 +787,7 @@ class ZeroPaddingTest(keras_parameterized.TestCase):
|
|||||||
},
|
},
|
||||||
input_shape=inputs.shape)
|
input_shape=inputs.shape)
|
||||||
|
|
||||||
with self.cached_session(use_gpu=True):
|
with self.cached_session():
|
||||||
# correctness test
|
# correctness test
|
||||||
layer = keras.layers.ZeroPadding3D(
|
layer = keras.layers.ZeroPadding3D(
|
||||||
padding=(2, 2, 2), data_format=data_format)
|
padding=(2, 2, 2), data_format=data_format)
|
||||||
@ -856,7 +856,7 @@ class ZeroPaddingTest(keras_parameterized.TestCase):
|
|||||||
class UpSamplingTest(keras_parameterized.TestCase):
|
class UpSamplingTest(keras_parameterized.TestCase):
|
||||||
|
|
||||||
def test_upsampling_1d(self):
|
def test_upsampling_1d(self):
|
||||||
with self.cached_session(use_gpu=True):
|
with self.cached_session():
|
||||||
testing_utils.layer_test(
|
testing_utils.layer_test(
|
||||||
keras.layers.UpSampling1D, kwargs={'size': 2}, input_shape=(3, 5, 4))
|
keras.layers.UpSampling1D, kwargs={'size': 2}, input_shape=(3, 5, 4))
|
||||||
|
|
||||||
@ -875,7 +875,7 @@ class UpSamplingTest(keras_parameterized.TestCase):
|
|||||||
stack_size)
|
stack_size)
|
||||||
|
|
||||||
# basic test
|
# basic test
|
||||||
with self.cached_session(use_gpu=True):
|
with self.cached_session():
|
||||||
testing_utils.layer_test(
|
testing_utils.layer_test(
|
||||||
keras.layers.UpSampling2D,
|
keras.layers.UpSampling2D,
|
||||||
kwargs={'size': (2, 2),
|
kwargs={'size': (2, 2),
|
||||||
@ -960,7 +960,7 @@ class UpSamplingTest(keras_parameterized.TestCase):
|
|||||||
input_len_dim3, stack_size)
|
input_len_dim3, stack_size)
|
||||||
|
|
||||||
# basic test
|
# basic test
|
||||||
with self.cached_session(use_gpu=True):
|
with self.cached_session():
|
||||||
testing_utils.layer_test(
|
testing_utils.layer_test(
|
||||||
keras.layers.UpSampling3D,
|
keras.layers.UpSampling3D,
|
||||||
kwargs={'size': (2, 2, 2),
|
kwargs={'size': (2, 2, 2),
|
||||||
@ -1010,7 +1010,7 @@ class CroppingTest(keras_parameterized.TestCase):
|
|||||||
input_len_dim1 = 2
|
input_len_dim1 = 2
|
||||||
inputs = np.random.rand(num_samples, time_length, input_len_dim1)
|
inputs = np.random.rand(num_samples, time_length, input_len_dim1)
|
||||||
|
|
||||||
with self.cached_session(use_gpu=True):
|
with self.cached_session():
|
||||||
testing_utils.layer_test(
|
testing_utils.layer_test(
|
||||||
keras.layers.Cropping1D,
|
keras.layers.Cropping1D,
|
||||||
kwargs={'cropping': (2, 2)},
|
kwargs={'cropping': (2, 2)},
|
||||||
@ -1036,7 +1036,7 @@ class CroppingTest(keras_parameterized.TestCase):
|
|||||||
else:
|
else:
|
||||||
inputs = np.random.rand(num_samples, input_len_dim1, input_len_dim2,
|
inputs = np.random.rand(num_samples, input_len_dim1, input_len_dim2,
|
||||||
stack_size)
|
stack_size)
|
||||||
with self.cached_session(use_gpu=True):
|
with self.cached_session():
|
||||||
# basic test
|
# basic test
|
||||||
testing_utils.layer_test(
|
testing_utils.layer_test(
|
||||||
keras.layers.Cropping2D,
|
keras.layers.Cropping2D,
|
||||||
@ -1069,7 +1069,7 @@ class CroppingTest(keras_parameterized.TestCase):
|
|||||||
inputs = np.random.rand(num_samples, input_len_dim1, input_len_dim2,
|
inputs = np.random.rand(num_samples, input_len_dim1, input_len_dim2,
|
||||||
stack_size)
|
stack_size)
|
||||||
# another correctness test (no cropping)
|
# another correctness test (no cropping)
|
||||||
with self.cached_session(use_gpu=True):
|
with self.cached_session():
|
||||||
cropping = ((0, 0), (0, 0))
|
cropping = ((0, 0), (0, 0))
|
||||||
layer = keras.layers.Cropping2D(
|
layer = keras.layers.Cropping2D(
|
||||||
cropping=cropping, data_format=data_format)
|
cropping=cropping, data_format=data_format)
|
||||||
@ -1105,7 +1105,7 @@ class CroppingTest(keras_parameterized.TestCase):
|
|||||||
inputs = np.random.rand(num_samples, input_len_dim1, input_len_dim2,
|
inputs = np.random.rand(num_samples, input_len_dim1, input_len_dim2,
|
||||||
input_len_dim3, stack_size)
|
input_len_dim3, stack_size)
|
||||||
# basic test
|
# basic test
|
||||||
with self.cached_session(use_gpu=True):
|
with self.cached_session():
|
||||||
testing_utils.layer_test(
|
testing_utils.layer_test(
|
||||||
keras.layers.Cropping3D,
|
keras.layers.Cropping3D,
|
||||||
kwargs={'cropping': cropping,
|
kwargs={'cropping': cropping,
|
||||||
@ -1114,7 +1114,7 @@ class CroppingTest(keras_parameterized.TestCase):
|
|||||||
|
|
||||||
if len(croppings) == 3 and len(croppings[0]) == 2:
|
if len(croppings) == 3 and len(croppings[0]) == 2:
|
||||||
# correctness test
|
# correctness test
|
||||||
with self.cached_session(use_gpu=True):
|
with self.cached_session():
|
||||||
layer = keras.layers.Cropping3D(
|
layer = keras.layers.Cropping3D(
|
||||||
cropping=cropping, data_format=data_format)
|
cropping=cropping, data_format=data_format)
|
||||||
layer.build(inputs.shape)
|
layer.build(inputs.shape)
|
||||||
@ -1152,7 +1152,7 @@ class DepthwiseConv2DTest(keras_parameterized.TestCase):
|
|||||||
num_row = 7
|
num_row = 7
|
||||||
num_col = 6
|
num_col = 6
|
||||||
|
|
||||||
with self.cached_session(use_gpu=True):
|
with self.cached_session():
|
||||||
testing_utils.layer_test(
|
testing_utils.layer_test(
|
||||||
keras.layers.DepthwiseConv2D,
|
keras.layers.DepthwiseConv2D,
|
||||||
kwargs=kwargs,
|
kwargs=kwargs,
|
||||||
|
@ -36,7 +36,7 @@ class Conv2DTransposeTest(keras_parameterized.TestCase):
|
|||||||
num_row = 7
|
num_row = 7
|
||||||
num_col = 6
|
num_col = 6
|
||||||
|
|
||||||
with self.cached_session(use_gpu=True):
|
with self.cached_session():
|
||||||
testing_utils.layer_test(
|
testing_utils.layer_test(
|
||||||
keras.layers.Conv2DTranspose,
|
keras.layers.Conv2DTranspose,
|
||||||
kwargs=kwargs,
|
kwargs=kwargs,
|
||||||
@ -67,7 +67,7 @@ class Conv2DTransposeTest(keras_parameterized.TestCase):
|
|||||||
'activity_regularizer': 'l2',
|
'activity_regularizer': 'l2',
|
||||||
'strides': 1
|
'strides': 1
|
||||||
}
|
}
|
||||||
with self.cached_session(use_gpu=True):
|
with self.cached_session():
|
||||||
layer = keras.layers.Conv2DTranspose(**kwargs)
|
layer = keras.layers.Conv2DTranspose(**kwargs)
|
||||||
layer.build((None, 5, 5, 2))
|
layer.build((None, 5, 5, 2))
|
||||||
self.assertEqual(len(layer.losses), 2)
|
self.assertEqual(len(layer.losses), 2)
|
||||||
@ -86,7 +86,7 @@ class Conv2DTransposeTest(keras_parameterized.TestCase):
|
|||||||
'bias_constraint': b_constraint,
|
'bias_constraint': b_constraint,
|
||||||
'strides': 1
|
'strides': 1
|
||||||
}
|
}
|
||||||
with self.cached_session(use_gpu=True):
|
with self.cached_session():
|
||||||
layer = keras.layers.Conv2DTranspose(**kwargs)
|
layer = keras.layers.Conv2DTranspose(**kwargs)
|
||||||
layer.build((None, 5, 5, 2))
|
layer.build((None, 5, 5, 2))
|
||||||
self.assertEqual(layer.kernel.constraint, k_constraint)
|
self.assertEqual(layer.kernel.constraint, k_constraint)
|
||||||
@ -127,7 +127,7 @@ class Conv3DTransposeTest(keras_parameterized.TestCase):
|
|||||||
num_col = 6
|
num_col = 6
|
||||||
depth = 5
|
depth = 5
|
||||||
|
|
||||||
with self.cached_session(use_gpu=True):
|
with self.cached_session():
|
||||||
testing_utils.layer_test(
|
testing_utils.layer_test(
|
||||||
keras.layers.Conv3DTranspose,
|
keras.layers.Conv3DTranspose,
|
||||||
kwargs=kwargs,
|
kwargs=kwargs,
|
||||||
@ -159,7 +159,7 @@ class Conv3DTransposeTest(keras_parameterized.TestCase):
|
|||||||
'activity_regularizer': 'l2',
|
'activity_regularizer': 'l2',
|
||||||
'strides': 1
|
'strides': 1
|
||||||
}
|
}
|
||||||
with self.cached_session(use_gpu=True):
|
with self.cached_session():
|
||||||
layer = keras.layers.Conv3DTranspose(**kwargs)
|
layer = keras.layers.Conv3DTranspose(**kwargs)
|
||||||
layer.build((None, 5, 5, 5, 2))
|
layer.build((None, 5, 5, 5, 2))
|
||||||
self.assertEqual(len(layer.losses), 2)
|
self.assertEqual(len(layer.losses), 2)
|
||||||
@ -178,7 +178,7 @@ class Conv3DTransposeTest(keras_parameterized.TestCase):
|
|||||||
'bias_constraint': b_constraint,
|
'bias_constraint': b_constraint,
|
||||||
'strides': 1
|
'strides': 1
|
||||||
}
|
}
|
||||||
with self.cached_session(use_gpu=True):
|
with self.cached_session():
|
||||||
layer = keras.layers.Conv3DTranspose(**kwargs)
|
layer = keras.layers.Conv3DTranspose(**kwargs)
|
||||||
layer.build((None, 5, 5, 5, 2))
|
layer.build((None, 5, 5, 5, 2))
|
||||||
self.assertEqual(layer.kernel.constraint, k_constraint)
|
self.assertEqual(layer.kernel.constraint, k_constraint)
|
||||||
@ -186,7 +186,7 @@ class Conv3DTransposeTest(keras_parameterized.TestCase):
|
|||||||
|
|
||||||
def test_conv3d_transpose_dynamic_shape(self):
|
def test_conv3d_transpose_dynamic_shape(self):
|
||||||
input_data = np.random.random((1, 3, 3, 3, 3)).astype(np.float32)
|
input_data = np.random.random((1, 3, 3, 3, 3)).astype(np.float32)
|
||||||
with self.cached_session(use_gpu=True):
|
with self.cached_session():
|
||||||
# Won't raise error here.
|
# Won't raise error here.
|
||||||
testing_utils.layer_test(
|
testing_utils.layer_test(
|
||||||
keras.layers.Conv3DTranspose,
|
keras.layers.Conv3DTranspose,
|
||||||
|
@ -205,7 +205,7 @@ class CuDNNGraphOnlyTest(keras_parameterized.TestCase):
|
|||||||
units = 2
|
units = 2
|
||||||
num_samples = 32
|
num_samples = 32
|
||||||
|
|
||||||
with self.cached_session(use_gpu=True):
|
with self.cached_session():
|
||||||
model = keras.models.Sequential()
|
model = keras.models.Sequential()
|
||||||
model.add(
|
model.add(
|
||||||
keras.layers.Embedding(
|
keras.layers.Embedding(
|
||||||
|
@ -104,7 +104,7 @@ class BatchNormalizationTest(keras_parameterized.TestCase):
|
|||||||
@keras_parameterized.run_all_keras_modes
|
@keras_parameterized.run_all_keras_modes
|
||||||
def test_batchnorm_convnet(self):
|
def test_batchnorm_convnet(self):
|
||||||
if test.is_gpu_available(cuda_only=True):
|
if test.is_gpu_available(cuda_only=True):
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
model = keras.models.Sequential()
|
model = keras.models.Sequential()
|
||||||
norm = keras.layers.BatchNormalization(
|
norm = keras.layers.BatchNormalization(
|
||||||
axis=1, input_shape=(3, 4, 4), momentum=0.8)
|
axis=1, input_shape=(3, 4, 4), momentum=0.8)
|
||||||
|
@ -412,7 +412,7 @@ class RandomFlipTest(keras_parameterized.TestCase):
|
|||||||
mock_random = np.reshape(mock_random, [2, 1, 1, 1])
|
mock_random = np.reshape(mock_random, [2, 1, 1, 1])
|
||||||
with test.mock.patch.object(
|
with test.mock.patch.object(
|
||||||
random_ops, 'random_uniform', return_value=mock_random):
|
random_ops, 'random_uniform', return_value=mock_random):
|
||||||
with self.cached_session(use_gpu=True):
|
with self.cached_session():
|
||||||
layer = image_preprocessing.RandomFlip()
|
layer = image_preprocessing.RandomFlip()
|
||||||
actual_output = layer(input_images, training=1)
|
actual_output = layer(input_images, training=1)
|
||||||
self.assertAllClose(expected_output, actual_output)
|
self.assertAllClose(expected_output, actual_output)
|
||||||
@ -698,7 +698,7 @@ class RandomTransformTest(keras_parameterized.TestCase):
|
|||||||
fill_value=0.0,
|
fill_value=0.0,
|
||||||
interpolation='bilinear'):
|
interpolation='bilinear'):
|
||||||
inp = np.arange(15).reshape((1, 5, 3, 1)).astype(np.float32)
|
inp = np.arange(15).reshape((1, 5, 3, 1)).astype(np.float32)
|
||||||
with self.cached_session(use_gpu=True):
|
with self.cached_session():
|
||||||
output = image_preprocessing.transform(
|
output = image_preprocessing.transform(
|
||||||
inp,
|
inp,
|
||||||
transform_matrix,
|
transform_matrix,
|
||||||
|
@ -35,7 +35,7 @@ class SeparableConv1DTest(keras_parameterized.TestCase):
|
|||||||
stack_size = 3
|
stack_size = 3
|
||||||
length = 7
|
length = 7
|
||||||
|
|
||||||
with self.cached_session(use_gpu=True):
|
with self.cached_session():
|
||||||
testing_utils.layer_test(
|
testing_utils.layer_test(
|
||||||
keras.layers.SeparableConv1D,
|
keras.layers.SeparableConv1D,
|
||||||
kwargs=kwargs,
|
kwargs=kwargs,
|
||||||
@ -66,7 +66,7 @@ class SeparableConv1DTest(keras_parameterized.TestCase):
|
|||||||
'activity_regularizer': 'l2',
|
'activity_regularizer': 'l2',
|
||||||
'strides': 1
|
'strides': 1
|
||||||
}
|
}
|
||||||
with self.cached_session(use_gpu=True):
|
with self.cached_session():
|
||||||
layer = keras.layers.SeparableConv1D(**kwargs)
|
layer = keras.layers.SeparableConv1D(**kwargs)
|
||||||
layer.build((None, 5, 2))
|
layer.build((None, 5, 2))
|
||||||
self.assertEqual(len(layer.losses), 3)
|
self.assertEqual(len(layer.losses), 3)
|
||||||
@ -87,7 +87,7 @@ class SeparableConv1DTest(keras_parameterized.TestCase):
|
|||||||
'bias_constraint': b_constraint,
|
'bias_constraint': b_constraint,
|
||||||
'strides': 1
|
'strides': 1
|
||||||
}
|
}
|
||||||
with self.cached_session(use_gpu=True):
|
with self.cached_session():
|
||||||
layer = keras.layers.SeparableConv1D(**kwargs)
|
layer = keras.layers.SeparableConv1D(**kwargs)
|
||||||
layer.build((None, 5, 2))
|
layer.build((None, 5, 2))
|
||||||
self.assertEqual(layer.depthwise_kernel.constraint, d_constraint)
|
self.assertEqual(layer.depthwise_kernel.constraint, d_constraint)
|
||||||
@ -104,7 +104,7 @@ class SeparableConv2DTest(keras_parameterized.TestCase):
|
|||||||
num_row = 7
|
num_row = 7
|
||||||
num_col = 6
|
num_col = 6
|
||||||
|
|
||||||
with self.cached_session(use_gpu=True):
|
with self.cached_session():
|
||||||
testing_utils.layer_test(
|
testing_utils.layer_test(
|
||||||
keras.layers.SeparableConv2D,
|
keras.layers.SeparableConv2D,
|
||||||
kwargs=kwargs,
|
kwargs=kwargs,
|
||||||
@ -138,7 +138,7 @@ class SeparableConv2DTest(keras_parameterized.TestCase):
|
|||||||
'activity_regularizer': 'l2',
|
'activity_regularizer': 'l2',
|
||||||
'strides': 1
|
'strides': 1
|
||||||
}
|
}
|
||||||
with self.cached_session(use_gpu=True):
|
with self.cached_session():
|
||||||
layer = keras.layers.SeparableConv2D(**kwargs)
|
layer = keras.layers.SeparableConv2D(**kwargs)
|
||||||
layer.build((None, 5, 5, 2))
|
layer.build((None, 5, 5, 2))
|
||||||
self.assertEqual(len(layer.losses), 3)
|
self.assertEqual(len(layer.losses), 3)
|
||||||
@ -159,7 +159,7 @@ class SeparableConv2DTest(keras_parameterized.TestCase):
|
|||||||
'bias_constraint': b_constraint,
|
'bias_constraint': b_constraint,
|
||||||
'strides': 1
|
'strides': 1
|
||||||
}
|
}
|
||||||
with self.cached_session(use_gpu=True):
|
with self.cached_session():
|
||||||
layer = keras.layers.SeparableConv2D(**kwargs)
|
layer = keras.layers.SeparableConv2D(**kwargs)
|
||||||
layer.build((None, 5, 5, 2))
|
layer.build((None, 5, 5, 2))
|
||||||
self.assertEqual(layer.depthwise_kernel.constraint, d_constraint)
|
self.assertEqual(layer.depthwise_kernel.constraint, d_constraint)
|
||||||
|
@ -407,7 +407,7 @@ class BNTest(test.TestCase):
|
|||||||
training = array_ops.placeholder(dtype='bool')
|
training = array_ops.placeholder(dtype='bool')
|
||||||
outputs = bn.apply(inputs, training=training)
|
outputs = bn.apply(inputs, training=training)
|
||||||
|
|
||||||
with self.session(use_gpu=True) as sess:
|
with self.session() as sess:
|
||||||
# Test training with placeholder learning phase.
|
# Test training with placeholder learning phase.
|
||||||
self.evaluate(variables.global_variables_initializer())
|
self.evaluate(variables.global_variables_initializer())
|
||||||
np_gamma, np_beta = self.evaluate([bn.gamma, bn.beta])
|
np_gamma, np_beta = self.evaluate([bn.gamma, bn.beta])
|
||||||
@ -898,7 +898,7 @@ class BNTest(test.TestCase):
|
|||||||
moving_stddev = 1.
|
moving_stddev = 1.
|
||||||
renorm_mean = 0.
|
renorm_mean = 0.
|
||||||
renorm_stddev = 1.
|
renorm_stddev = 1.
|
||||||
with self.session(use_gpu=True) as sess:
|
with self.session() as sess:
|
||||||
self.evaluate(variables.global_variables_initializer())
|
self.evaluate(variables.global_variables_initializer())
|
||||||
for _ in range(5):
|
for _ in range(5):
|
||||||
x = np.random.random(shape)
|
x = np.random.random(shape)
|
||||||
@ -948,7 +948,7 @@ class BNTest(test.TestCase):
|
|||||||
moving_stddev = 1.
|
moving_stddev = 1.
|
||||||
renorm_mean = 0.
|
renorm_mean = 0.
|
||||||
renorm_stddev = 1.
|
renorm_stddev = 1.
|
||||||
with self.session(use_gpu=True) as sess:
|
with self.session() as sess:
|
||||||
self.evaluate(variables.global_variables_initializer())
|
self.evaluate(variables.global_variables_initializer())
|
||||||
for step in range(6):
|
for step in range(6):
|
||||||
x = np.random.random(shape)
|
x = np.random.random(shape)
|
||||||
@ -1002,7 +1002,7 @@ class BNTest(test.TestCase):
|
|||||||
|
|
||||||
moving_mean = 0.
|
moving_mean = 0.
|
||||||
moving_variance = 1.
|
moving_variance = 1.
|
||||||
with self.session(use_gpu=True) as sess:
|
with self.session() as sess:
|
||||||
self.evaluate(variables.global_variables_initializer())
|
self.evaluate(variables.global_variables_initializer())
|
||||||
for _ in range(5):
|
for _ in range(5):
|
||||||
x = np.random.random(shape)
|
x = np.random.random(shape)
|
||||||
@ -1055,7 +1055,7 @@ class BNTest(test.TestCase):
|
|||||||
moving_stddev = 1.
|
moving_stddev = 1.
|
||||||
renorm_mean = 0.
|
renorm_mean = 0.
|
||||||
renorm_stddev = 1.
|
renorm_stddev = 1.
|
||||||
with self.session(use_gpu=True) as sess:
|
with self.session() as sess:
|
||||||
self.evaluate(variables.global_variables_initializer())
|
self.evaluate(variables.global_variables_initializer())
|
||||||
for _ in range(5):
|
for _ in range(5):
|
||||||
x = np.random.random(shape)
|
x = np.random.random(shape)
|
||||||
@ -1101,7 +1101,7 @@ class BNTest(test.TestCase):
|
|||||||
self.assertListEqual(
|
self.assertListEqual(
|
||||||
out1.shape.as_list(), out2.shape.as_list())
|
out1.shape.as_list(), out2.shape.as_list())
|
||||||
|
|
||||||
with self.session(use_gpu=True) as sess:
|
with self.session() as sess:
|
||||||
self.evaluate(variables.global_variables_initializer())
|
self.evaluate(variables.global_variables_initializer())
|
||||||
|
|
||||||
x = np.random.random(shape)
|
x = np.random.random(shape)
|
||||||
@ -1123,7 +1123,7 @@ class BNTest(test.TestCase):
|
|||||||
out = normalization_layers.batch_normalization(
|
out = normalization_layers.batch_normalization(
|
||||||
inp, virtual_batch_size=2)
|
inp, virtual_batch_size=2)
|
||||||
|
|
||||||
with self.session(use_gpu=True) as sess:
|
with self.session() as sess:
|
||||||
self.evaluate(variables.global_variables_initializer())
|
self.evaluate(variables.global_variables_initializer())
|
||||||
|
|
||||||
x = np.random.random(np_shape)
|
x = np.random.random(np_shape)
|
||||||
@ -1154,7 +1154,7 @@ class BNTest(test.TestCase):
|
|||||||
shape[0] // virtual_batch_size,
|
shape[0] // virtual_batch_size,
|
||||||
shape[1]])
|
shape[1]])
|
||||||
|
|
||||||
with self.session(use_gpu=True) as sess:
|
with self.session() as sess:
|
||||||
self.evaluate(variables.global_variables_initializer())
|
self.evaluate(variables.global_variables_initializer())
|
||||||
for _ in range(5):
|
for _ in range(5):
|
||||||
x = np.random.random(shape)
|
x = np.random.random(shape)
|
||||||
@ -1207,7 +1207,7 @@ class BNTest(test.TestCase):
|
|||||||
ghost_shape = ([virtual_batch_size, shape[0] // virtual_batch_size] +
|
ghost_shape = ([virtual_batch_size, shape[0] // virtual_batch_size] +
|
||||||
shape[1:])
|
shape[1:])
|
||||||
|
|
||||||
with self.session(use_gpu=True) as sess:
|
with self.session() as sess:
|
||||||
self.evaluate(variables.global_variables_initializer())
|
self.evaluate(variables.global_variables_initializer())
|
||||||
for _ in range(5):
|
for _ in range(5):
|
||||||
x = np.random.random(shape)
|
x = np.random.random(shape)
|
||||||
@ -1261,7 +1261,7 @@ class BNTest(test.TestCase):
|
|||||||
ghost_shape = ([virtual_batch_size, shape[0] // virtual_batch_size] +
|
ghost_shape = ([virtual_batch_size, shape[0] // virtual_batch_size] +
|
||||||
shape[1:])
|
shape[1:])
|
||||||
|
|
||||||
with self.session(use_gpu=True) as sess:
|
with self.session() as sess:
|
||||||
self.evaluate(variables.global_variables_initializer())
|
self.evaluate(variables.global_variables_initializer())
|
||||||
for _ in range(5):
|
for _ in range(5):
|
||||||
x = np.random.random(shape)
|
x = np.random.random(shape)
|
||||||
@ -1413,7 +1413,7 @@ class BNTest(test.TestCase):
|
|||||||
ghost_shape = ([virtual_batch_size, shape[0] // virtual_batch_size] +
|
ghost_shape = ([virtual_batch_size, shape[0] // virtual_batch_size] +
|
||||||
shape[1:])
|
shape[1:])
|
||||||
|
|
||||||
with self.session(use_gpu=True) as sess:
|
with self.session() as sess:
|
||||||
self.evaluate(variables.global_variables_initializer())
|
self.evaluate(variables.global_variables_initializer())
|
||||||
for _ in range(5):
|
for _ in range(5):
|
||||||
x = np.random.random(shape)
|
x = np.random.random(shape)
|
||||||
|
@ -113,7 +113,7 @@ class AdamOptimizerTest(test.TestCase, parameterized.TestCase):
|
|||||||
def testSparse(self):
|
def testSparse(self):
|
||||||
# TODO(tanzheny, omalleyt): Fix test in eager mode.
|
# TODO(tanzheny, omalleyt): Fix test in eager mode.
|
||||||
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
|
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
|
||||||
with ops.Graph().as_default(), self.cached_session(use_gpu=True):
|
with ops.Graph().as_default(), self.cached_session():
|
||||||
# Initialize variables for numpy implementation.
|
# Initialize variables for numpy implementation.
|
||||||
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
|
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
|
||||||
var0_np = np.array([1.0, 1.0, 2.0], dtype=dtype.as_numpy_dtype)
|
var0_np = np.array([1.0, 1.0, 2.0], dtype=dtype.as_numpy_dtype)
|
||||||
@ -203,7 +203,7 @@ class AdamOptimizerTest(test.TestCase, parameterized.TestCase):
|
|||||||
|
|
||||||
def doTestBasic(self, use_callable_params=False):
|
def doTestBasic(self, use_callable_params=False):
|
||||||
for i, dtype in enumerate([dtypes.half, dtypes.float32, dtypes.float64]):
|
for i, dtype in enumerate([dtypes.half, dtypes.float32, dtypes.float64]):
|
||||||
with self.cached_session(use_gpu=True):
|
with self.cached_session():
|
||||||
# Initialize variables for numpy implementation.
|
# Initialize variables for numpy implementation.
|
||||||
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
|
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
|
||||||
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
|
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
|
||||||
@ -261,7 +261,7 @@ class AdamOptimizerTest(test.TestCase, parameterized.TestCase):
|
|||||||
@combinations.generate(combinations.combine(mode=["graph", "eager"]))
|
@combinations.generate(combinations.combine(mode=["graph", "eager"]))
|
||||||
def testBasicWithAmsgrad(self):
|
def testBasicWithAmsgrad(self):
|
||||||
for i, dtype in enumerate([dtypes.half, dtypes.float32, dtypes.float64]):
|
for i, dtype in enumerate([dtypes.half, dtypes.float32, dtypes.float64]):
|
||||||
with self.cached_session(use_gpu=True):
|
with self.cached_session():
|
||||||
# Initialize variables for numpy implementation.
|
# Initialize variables for numpy implementation.
|
||||||
m0, v0, v0hat, m1, v1, v1hat = 0.0, 0.0, 0.0, 0.0, 0.0, 0.0
|
m0, v0, v0hat, m1, v1, v1hat = 0.0, 0.0, 0.0, 0.0, 0.0, 0.0
|
||||||
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
|
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
|
||||||
@ -353,7 +353,7 @@ class AdamOptimizerTest(test.TestCase, parameterized.TestCase):
|
|||||||
def testBasicWithLearningRateDecay(self):
|
def testBasicWithLearningRateDecay(self):
|
||||||
# TODO(tanzheny, omalleyt): Fix test in eager mode.
|
# TODO(tanzheny, omalleyt): Fix test in eager mode.
|
||||||
for i, dtype in enumerate([dtypes.half, dtypes.float32, dtypes.float64]):
|
for i, dtype in enumerate([dtypes.half, dtypes.float32, dtypes.float64]):
|
||||||
with ops.Graph().as_default(), self.cached_session(use_gpu=True):
|
with ops.Graph().as_default(), self.cached_session():
|
||||||
# Initialize variables for numpy implementation.
|
# Initialize variables for numpy implementation.
|
||||||
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
|
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
|
||||||
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
|
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
|
||||||
@ -398,7 +398,7 @@ class AdamOptimizerTest(test.TestCase, parameterized.TestCase):
|
|||||||
def testBasicWithLearningRateInverseTimeDecay(self):
|
def testBasicWithLearningRateInverseTimeDecay(self):
|
||||||
# TODO(tanzheny, omalleyt): Fix test in eager mode.
|
# TODO(tanzheny, omalleyt): Fix test in eager mode.
|
||||||
for i, dtype in enumerate([dtypes.half, dtypes.float32, dtypes.float64]):
|
for i, dtype in enumerate([dtypes.half, dtypes.float32, dtypes.float64]):
|
||||||
with ops.Graph().as_default(), self.cached_session(use_gpu=True):
|
with ops.Graph().as_default(), self.cached_session():
|
||||||
# Initialize variables for numpy implementation.
|
# Initialize variables for numpy implementation.
|
||||||
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
|
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
|
||||||
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
|
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
|
||||||
@ -445,7 +445,7 @@ class AdamOptimizerTest(test.TestCase, parameterized.TestCase):
|
|||||||
def testTensorLearningRate(self):
|
def testTensorLearningRate(self):
|
||||||
# TODO(tanzheny, omalleyt): Fix test in eager mode.
|
# TODO(tanzheny, omalleyt): Fix test in eager mode.
|
||||||
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
|
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
|
||||||
with ops.Graph().as_default(), self.cached_session(use_gpu=True):
|
with ops.Graph().as_default(), self.cached_session():
|
||||||
# Initialize variables for numpy implementation.
|
# Initialize variables for numpy implementation.
|
||||||
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
|
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
|
||||||
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
|
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
|
||||||
@ -484,7 +484,7 @@ class AdamOptimizerTest(test.TestCase, parameterized.TestCase):
|
|||||||
def testSharing(self):
|
def testSharing(self):
|
||||||
# TODO(tanzheny, omalleyt): Fix test in eager mode.
|
# TODO(tanzheny, omalleyt): Fix test in eager mode.
|
||||||
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
|
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
|
||||||
with ops.Graph().as_default(), self.cached_session(use_gpu=True):
|
with ops.Graph().as_default(), self.cached_session():
|
||||||
# Initialize variables for numpy implementation.
|
# Initialize variables for numpy implementation.
|
||||||
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
|
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
|
||||||
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
|
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
|
||||||
@ -565,7 +565,7 @@ class NonFusedAdamOptimizerTest(test.TestCase, parameterized.TestCase):
|
|||||||
def testSparse(self):
|
def testSparse(self):
|
||||||
# TODO(tanzheny, omalleyt): Fix test in eager mode.
|
# TODO(tanzheny, omalleyt): Fix test in eager mode.
|
||||||
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
|
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
|
||||||
with ops.Graph().as_default(), self.cached_session(use_gpu=True):
|
with ops.Graph().as_default(), self.cached_session():
|
||||||
# Initialize variables for numpy implementation.
|
# Initialize variables for numpy implementation.
|
||||||
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
|
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
|
||||||
var0_np = np.array([1.0, 1.0, 2.0], dtype=dtype.as_numpy_dtype)
|
var0_np = np.array([1.0, 1.0, 2.0], dtype=dtype.as_numpy_dtype)
|
||||||
@ -655,7 +655,7 @@ class NonFusedAdamOptimizerTest(test.TestCase, parameterized.TestCase):
|
|||||||
|
|
||||||
def doTestBasic(self, use_callable_params=False):
|
def doTestBasic(self, use_callable_params=False):
|
||||||
for i, dtype in enumerate([dtypes.half, dtypes.float32, dtypes.float64]):
|
for i, dtype in enumerate([dtypes.half, dtypes.float32, dtypes.float64]):
|
||||||
with self.cached_session(use_gpu=True):
|
with self.cached_session():
|
||||||
# Initialize variables for numpy implementation.
|
# Initialize variables for numpy implementation.
|
||||||
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
|
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
|
||||||
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
|
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
|
||||||
@ -715,7 +715,7 @@ class NonFusedAdamOptimizerTest(test.TestCase, parameterized.TestCase):
|
|||||||
@combinations.generate(combinations.combine(mode=["graph", "eager"]))
|
@combinations.generate(combinations.combine(mode=["graph", "eager"]))
|
||||||
def testBasicWithAmsgrad(self):
|
def testBasicWithAmsgrad(self):
|
||||||
for i, dtype in enumerate([dtypes.half, dtypes.float32, dtypes.float64]):
|
for i, dtype in enumerate([dtypes.half, dtypes.float32, dtypes.float64]):
|
||||||
with self.cached_session(use_gpu=True):
|
with self.cached_session():
|
||||||
# Initialize variables for numpy implementation.
|
# Initialize variables for numpy implementation.
|
||||||
m0, v0, v0hat, m1, v1, v1hat = 0.0, 0.0, 0.0, 0.0, 0.0, 0.0
|
m0, v0, v0hat, m1, v1, v1hat = 0.0, 0.0, 0.0, 0.0, 0.0, 0.0
|
||||||
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
|
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
|
||||||
@ -809,7 +809,7 @@ class NonFusedAdamOptimizerTest(test.TestCase, parameterized.TestCase):
|
|||||||
def testBasicWithLearningRateDecay(self):
|
def testBasicWithLearningRateDecay(self):
|
||||||
# TODO(tanzheny, omalleyt): Fix test in eager mode.
|
# TODO(tanzheny, omalleyt): Fix test in eager mode.
|
||||||
for i, dtype in enumerate([dtypes.half, dtypes.float32, dtypes.float64]):
|
for i, dtype in enumerate([dtypes.half, dtypes.float32, dtypes.float64]):
|
||||||
with ops.Graph().as_default(), self.cached_session(use_gpu=True):
|
with ops.Graph().as_default(), self.cached_session():
|
||||||
# Initialize variables for numpy implementation.
|
# Initialize variables for numpy implementation.
|
||||||
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
|
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
|
||||||
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
|
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
|
||||||
@ -854,7 +854,7 @@ class NonFusedAdamOptimizerTest(test.TestCase, parameterized.TestCase):
|
|||||||
def testBasicWithLearningRateInverseTimeDecay(self):
|
def testBasicWithLearningRateInverseTimeDecay(self):
|
||||||
# TODO(tanzheny, omalleyt): Fix test in eager mode.
|
# TODO(tanzheny, omalleyt): Fix test in eager mode.
|
||||||
for i, dtype in enumerate([dtypes.half, dtypes.float32, dtypes.float64]):
|
for i, dtype in enumerate([dtypes.half, dtypes.float32, dtypes.float64]):
|
||||||
with ops.Graph().as_default(), self.cached_session(use_gpu=True):
|
with ops.Graph().as_default(), self.cached_session():
|
||||||
# Initialize variables for numpy implementation.
|
# Initialize variables for numpy implementation.
|
||||||
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
|
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
|
||||||
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
|
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
|
||||||
@ -901,7 +901,7 @@ class NonFusedAdamOptimizerTest(test.TestCase, parameterized.TestCase):
|
|||||||
def testTensorLearningRate(self):
|
def testTensorLearningRate(self):
|
||||||
# TODO(tanzheny, omalleyt): Fix test in eager mode.
|
# TODO(tanzheny, omalleyt): Fix test in eager mode.
|
||||||
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
|
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
|
||||||
with ops.Graph().as_default(), self.cached_session(use_gpu=True):
|
with ops.Graph().as_default(), self.cached_session():
|
||||||
# Initialize variables for numpy implementation.
|
# Initialize variables for numpy implementation.
|
||||||
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
|
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
|
||||||
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
|
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
|
||||||
@ -940,7 +940,7 @@ class NonFusedAdamOptimizerTest(test.TestCase, parameterized.TestCase):
|
|||||||
def testSharing(self):
|
def testSharing(self):
|
||||||
# TODO(tanzheny, omalleyt): Fix test in eager mode.
|
# TODO(tanzheny, omalleyt): Fix test in eager mode.
|
||||||
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
|
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
|
||||||
with ops.Graph().as_default(), self.cached_session(use_gpu=True):
|
with ops.Graph().as_default(), self.cached_session():
|
||||||
# Initialize variables for numpy implementation.
|
# Initialize variables for numpy implementation.
|
||||||
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
|
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
|
||||||
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
|
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
|
||||||
|
@ -81,7 +81,7 @@ class AdamaxOptimizerTest(test.TestCase, parameterized.TestCase):
|
|||||||
def testResourceSparse(self):
|
def testResourceSparse(self):
|
||||||
# TODO(tanzheny, omalleyt): Fix test in eager mode.
|
# TODO(tanzheny, omalleyt): Fix test in eager mode.
|
||||||
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
|
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
|
||||||
with ops.Graph().as_default(), self.cached_session(use_gpu=True):
|
with ops.Graph().as_default(), self.cached_session():
|
||||||
# Initialize variables for numpy implementation.
|
# Initialize variables for numpy implementation.
|
||||||
zero_slots = lambda: np.zeros((3), dtype=dtype.as_numpy_dtype) # pylint: disable=cell-var-from-loop
|
zero_slots = lambda: np.zeros((3), dtype=dtype.as_numpy_dtype) # pylint: disable=cell-var-from-loop
|
||||||
m0, v0, m1, v1 = zero_slots(), zero_slots(), zero_slots(), zero_slots()
|
m0, v0, m1, v1 = zero_slots(), zero_slots(), zero_slots(), zero_slots()
|
||||||
@ -275,7 +275,7 @@ class AdamaxOptimizerTest(test.TestCase, parameterized.TestCase):
|
|||||||
def testTensorLearningRate(self):
|
def testTensorLearningRate(self):
|
||||||
# TODO(tanzheny, omalleyt): Fix test in eager mode.
|
# TODO(tanzheny, omalleyt): Fix test in eager mode.
|
||||||
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
|
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
|
||||||
with ops.Graph().as_default(), self.cached_session(use_gpu=True):
|
with ops.Graph().as_default(), self.cached_session():
|
||||||
# Initialize variables for numpy implementation.
|
# Initialize variables for numpy implementation.
|
||||||
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
|
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
|
||||||
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
|
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
|
||||||
@ -312,7 +312,7 @@ class AdamaxOptimizerTest(test.TestCase, parameterized.TestCase):
|
|||||||
def testSharing(self):
|
def testSharing(self):
|
||||||
# TODO(tanzheny, omalleyt): Fix test in eager mode.
|
# TODO(tanzheny, omalleyt): Fix test in eager mode.
|
||||||
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
|
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
|
||||||
with ops.Graph().as_default(), self.cached_session(use_gpu=True):
|
with ops.Graph().as_default(), self.cached_session():
|
||||||
# Initialize variables for numpy implementation.
|
# Initialize variables for numpy implementation.
|
||||||
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
|
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
|
||||||
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
|
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
|
||||||
|
@ -37,7 +37,7 @@ class FtrlOptimizerTest(test.TestCase):
|
|||||||
def doTestFtrlwithoutRegularization(self, use_resource=False):
|
def doTestFtrlwithoutRegularization(self, use_resource=False):
|
||||||
# TODO(tanzheny, omalleyt): Fix test in eager mode.
|
# TODO(tanzheny, omalleyt): Fix test in eager mode.
|
||||||
for dtype in [dtypes.float32]:
|
for dtype in [dtypes.float32]:
|
||||||
with ops.Graph().as_default(), self.cached_session(use_gpu=True):
|
with ops.Graph().as_default(), self.cached_session():
|
||||||
if use_resource:
|
if use_resource:
|
||||||
var0 = variables.Variable([0.0, 0.0], dtype=dtype)
|
var0 = variables.Variable([0.0, 0.0], dtype=dtype)
|
||||||
var1 = variables.Variable([0.0, 0.0], dtype=dtype)
|
var1 = variables.Variable([0.0, 0.0], dtype=dtype)
|
||||||
@ -77,7 +77,7 @@ class FtrlOptimizerTest(test.TestCase):
|
|||||||
def testFtrlwithoutRegularization2(self):
|
def testFtrlwithoutRegularization2(self):
|
||||||
# TODO(tanzheny, omalleyt): Fix test in eager mode.
|
# TODO(tanzheny, omalleyt): Fix test in eager mode.
|
||||||
for dtype in [dtypes.half, dtypes.float32]:
|
for dtype in [dtypes.half, dtypes.float32]:
|
||||||
with ops.Graph().as_default(), self.cached_session(use_gpu=True):
|
with ops.Graph().as_default(), self.cached_session():
|
||||||
var0 = variables.Variable([1.0, 2.0], dtype=dtype)
|
var0 = variables.Variable([1.0, 2.0], dtype=dtype)
|
||||||
var1 = variables.Variable([4.0, 3.0], dtype=dtype)
|
var1 = variables.Variable([4.0, 3.0], dtype=dtype)
|
||||||
grads0 = constant_op.constant([0.1, 0.2], dtype=dtype)
|
grads0 = constant_op.constant([0.1, 0.2], dtype=dtype)
|
||||||
@ -107,7 +107,7 @@ class FtrlOptimizerTest(test.TestCase):
|
|||||||
def testMinimizeSparseResourceVariable(self):
|
def testMinimizeSparseResourceVariable(self):
|
||||||
# TODO(tanzheny, omalleyt): Fix test in eager mode.
|
# TODO(tanzheny, omalleyt): Fix test in eager mode.
|
||||||
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
|
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
|
||||||
with ops.Graph().as_default(), self.cached_session(use_gpu=True):
|
with ops.Graph().as_default(), self.cached_session():
|
||||||
var0 = variables.Variable([[1.0, 2.0]], dtype=dtype)
|
var0 = variables.Variable([[1.0, 2.0]], dtype=dtype)
|
||||||
x = constant_op.constant([[4.0], [5.0]], dtype=dtype)
|
x = constant_op.constant([[4.0], [5.0]], dtype=dtype)
|
||||||
|
|
||||||
@ -129,7 +129,7 @@ class FtrlOptimizerTest(test.TestCase):
|
|||||||
def testFtrlWithL1(self):
|
def testFtrlWithL1(self):
|
||||||
# TODO(tanzheny, omalleyt): Fix test in eager mode.
|
# TODO(tanzheny, omalleyt): Fix test in eager mode.
|
||||||
for dtype in [dtypes.half, dtypes.float32]:
|
for dtype in [dtypes.half, dtypes.float32]:
|
||||||
with ops.Graph().as_default(), self.cached_session(use_gpu=True):
|
with ops.Graph().as_default(), self.cached_session():
|
||||||
var0 = variables.Variable([1.0, 2.0], dtype=dtype)
|
var0 = variables.Variable([1.0, 2.0], dtype=dtype)
|
||||||
var1 = variables.Variable([4.0, 3.0], dtype=dtype)
|
var1 = variables.Variable([4.0, 3.0], dtype=dtype)
|
||||||
grads0 = constant_op.constant([0.1, 0.2], dtype=dtype)
|
grads0 = constant_op.constant([0.1, 0.2], dtype=dtype)
|
||||||
@ -159,7 +159,7 @@ class FtrlOptimizerTest(test.TestCase):
|
|||||||
def testFtrlWithBeta(self):
|
def testFtrlWithBeta(self):
|
||||||
# TODO(tanzheny, omalleyt): Fix test in eager mode.
|
# TODO(tanzheny, omalleyt): Fix test in eager mode.
|
||||||
for dtype in [dtypes.half, dtypes.float32]:
|
for dtype in [dtypes.half, dtypes.float32]:
|
||||||
with ops.Graph().as_default(), self.cached_session(use_gpu=True):
|
with ops.Graph().as_default(), self.cached_session():
|
||||||
var0 = variables.Variable([1.0, 2.0], dtype=dtype)
|
var0 = variables.Variable([1.0, 2.0], dtype=dtype)
|
||||||
var1 = variables.Variable([4.0, 3.0], dtype=dtype)
|
var1 = variables.Variable([4.0, 3.0], dtype=dtype)
|
||||||
grads0 = constant_op.constant([0.1, 0.2], dtype=dtype)
|
grads0 = constant_op.constant([0.1, 0.2], dtype=dtype)
|
||||||
@ -185,7 +185,7 @@ class FtrlOptimizerTest(test.TestCase):
|
|||||||
def testFtrlWithL2_Beta(self):
|
def testFtrlWithL2_Beta(self):
|
||||||
# TODO(tanzheny, omalleyt): Fix test in eager mode.
|
# TODO(tanzheny, omalleyt): Fix test in eager mode.
|
||||||
for dtype in [dtypes.half, dtypes.float32]:
|
for dtype in [dtypes.half, dtypes.float32]:
|
||||||
with ops.Graph().as_default(), self.cached_session(use_gpu=True):
|
with ops.Graph().as_default(), self.cached_session():
|
||||||
var0 = variables.Variable([1.0, 2.0], dtype=dtype)
|
var0 = variables.Variable([1.0, 2.0], dtype=dtype)
|
||||||
var1 = variables.Variable([4.0, 3.0], dtype=dtype)
|
var1 = variables.Variable([4.0, 3.0], dtype=dtype)
|
||||||
grads0 = constant_op.constant([0.1, 0.2], dtype=dtype)
|
grads0 = constant_op.constant([0.1, 0.2], dtype=dtype)
|
||||||
@ -216,7 +216,7 @@ class FtrlOptimizerTest(test.TestCase):
|
|||||||
def testFtrlWithL1_L2(self):
|
def testFtrlWithL1_L2(self):
|
||||||
# TODO(tanzheny, omalleyt): Fix test in eager mode.
|
# TODO(tanzheny, omalleyt): Fix test in eager mode.
|
||||||
for dtype in [dtypes.half, dtypes.float32]:
|
for dtype in [dtypes.half, dtypes.float32]:
|
||||||
with ops.Graph().as_default(), self.cached_session(use_gpu=True):
|
with ops.Graph().as_default(), self.cached_session():
|
||||||
var0 = variables.Variable([1.0, 2.0], dtype=dtype)
|
var0 = variables.Variable([1.0, 2.0], dtype=dtype)
|
||||||
var1 = variables.Variable([4.0, 3.0], dtype=dtype)
|
var1 = variables.Variable([4.0, 3.0], dtype=dtype)
|
||||||
grads0 = constant_op.constant([0.1, 0.2], dtype=dtype)
|
grads0 = constant_op.constant([0.1, 0.2], dtype=dtype)
|
||||||
@ -253,7 +253,7 @@ class FtrlOptimizerTest(test.TestCase):
|
|||||||
"""
|
"""
|
||||||
# TODO(tanzheny, omalleyt): Fix test in eager mode.
|
# TODO(tanzheny, omalleyt): Fix test in eager mode.
|
||||||
for dtype in [dtypes.half, dtypes.float32]:
|
for dtype in [dtypes.half, dtypes.float32]:
|
||||||
with ops.Graph().as_default(), self.cached_session(use_gpu=True):
|
with ops.Graph().as_default(), self.cached_session():
|
||||||
var0 = variables.Variable([1.0, 2.0], dtype=dtype)
|
var0 = variables.Variable([1.0, 2.0], dtype=dtype)
|
||||||
var1 = variables.Variable([4.0, 3.0], dtype=dtype)
|
var1 = variables.Variable([4.0, 3.0], dtype=dtype)
|
||||||
grads0 = constant_op.constant([0.1, 0.2], dtype=dtype)
|
grads0 = constant_op.constant([0.1, 0.2], dtype=dtype)
|
||||||
@ -286,7 +286,7 @@ class FtrlOptimizerTest(test.TestCase):
|
|||||||
"""Tests the new FTRL op with support for l2 shrinkage on sparse grads."""
|
"""Tests the new FTRL op with support for l2 shrinkage on sparse grads."""
|
||||||
# TODO(tanzheny, omalleyt): Fix test in eager mode.
|
# TODO(tanzheny, omalleyt): Fix test in eager mode.
|
||||||
for dtype in [dtypes.half, dtypes.float32]:
|
for dtype in [dtypes.half, dtypes.float32]:
|
||||||
with ops.Graph().as_default(), self.cached_session(use_gpu=True):
|
with ops.Graph().as_default(), self.cached_session():
|
||||||
var0 = variables.Variable([[1.0], [2.0]], dtype=dtype)
|
var0 = variables.Variable([[1.0], [2.0]], dtype=dtype)
|
||||||
var1 = variables.Variable([[4.0], [3.0]], dtype=dtype)
|
var1 = variables.Variable([[4.0], [3.0]], dtype=dtype)
|
||||||
grads0 = ops.IndexedSlices(
|
grads0 = ops.IndexedSlices(
|
||||||
@ -321,7 +321,7 @@ class FtrlOptimizerTest(test.TestCase):
|
|||||||
"""Verifies that l2 shrinkage in FTRL does not change lr schedule."""
|
"""Verifies that l2 shrinkage in FTRL does not change lr schedule."""
|
||||||
# TODO(tanzheny, omalleyt): Fix test in eager mode.
|
# TODO(tanzheny, omalleyt): Fix test in eager mode.
|
||||||
for dtype in [dtypes.half, dtypes.float32]:
|
for dtype in [dtypes.half, dtypes.float32]:
|
||||||
with ops.Graph().as_default(), self.cached_session(use_gpu=True) as sess:
|
with ops.Graph().as_default(), self.cached_session() as sess:
|
||||||
var0 = variables.Variable([1.0, 2.0], dtype=dtype)
|
var0 = variables.Variable([1.0, 2.0], dtype=dtype)
|
||||||
var1 = variables.Variable([1.0, 2.0], dtype=dtype)
|
var1 = variables.Variable([1.0, 2.0], dtype=dtype)
|
||||||
grads0 = constant_op.constant([0.1, 0.2], dtype=dtype)
|
grads0 = constant_op.constant([0.1, 0.2], dtype=dtype)
|
||||||
@ -404,7 +404,7 @@ class FtrlOptimizerTest(test.TestCase):
|
|||||||
def testEquivAdagradwithoutRegularization(self):
|
def testEquivAdagradwithoutRegularization(self):
|
||||||
# TODO(tanzheny, omalleyt): Fix test in eager mode.
|
# TODO(tanzheny, omalleyt): Fix test in eager mode.
|
||||||
for dtype in [dtypes.half, dtypes.float32]:
|
for dtype in [dtypes.half, dtypes.float32]:
|
||||||
with ops.Graph().as_default(), self.cached_session(use_gpu=True):
|
with ops.Graph().as_default(), self.cached_session():
|
||||||
val0, val1 = self.applyOptimizer(
|
val0, val1 = self.applyOptimizer(
|
||||||
ftrl.Ftrl(
|
ftrl.Ftrl(
|
||||||
3.0,
|
3.0,
|
||||||
@ -415,7 +415,7 @@ class FtrlOptimizerTest(test.TestCase):
|
|||||||
l2_regularization_strength=0.0),
|
l2_regularization_strength=0.0),
|
||||||
dtype)
|
dtype)
|
||||||
|
|
||||||
with ops.Graph().as_default(), self.cached_session(use_gpu=True):
|
with ops.Graph().as_default(), self.cached_session():
|
||||||
val2, val3 = self.applyOptimizer(
|
val2, val3 = self.applyOptimizer(
|
||||||
adagrad.AdagradOptimizer(3.0, initial_accumulator_value=0.1), dtype)
|
adagrad.AdagradOptimizer(3.0, initial_accumulator_value=0.1), dtype)
|
||||||
|
|
||||||
@ -449,7 +449,7 @@ class FtrlOptimizerTest(test.TestCase):
|
|||||||
def testEquivSparseGradientDescentwithoutRegularization(self):
|
def testEquivSparseGradientDescentwithoutRegularization(self):
|
||||||
# TODO(tanzheny, omalleyt): Fix test in eager mode.
|
# TODO(tanzheny, omalleyt): Fix test in eager mode.
|
||||||
for dtype in [dtypes.half, dtypes.float32]:
|
for dtype in [dtypes.half, dtypes.float32]:
|
||||||
with ops.Graph().as_default(), self.cached_session(use_gpu=True):
|
with ops.Graph().as_default(), self.cached_session():
|
||||||
val0, val1 = self.applyOptimizer(
|
val0, val1 = self.applyOptimizer(
|
||||||
ftrl.Ftrl(
|
ftrl.Ftrl(
|
||||||
3.0,
|
3.0,
|
||||||
@ -461,7 +461,7 @@ class FtrlOptimizerTest(test.TestCase):
|
|||||||
dtype,
|
dtype,
|
||||||
is_sparse=True)
|
is_sparse=True)
|
||||||
|
|
||||||
with ops.Graph().as_default(), self.cached_session(use_gpu=True):
|
with ops.Graph().as_default(), self.cached_session():
|
||||||
val2, val3 = self.applyOptimizer(
|
val2, val3 = self.applyOptimizer(
|
||||||
gradient_descent.GradientDescentOptimizer(3.0),
|
gradient_descent.GradientDescentOptimizer(3.0),
|
||||||
dtype,
|
dtype,
|
||||||
@ -473,7 +473,7 @@ class FtrlOptimizerTest(test.TestCase):
|
|||||||
def testEquivGradientDescentwithoutRegularization(self):
|
def testEquivGradientDescentwithoutRegularization(self):
|
||||||
# TODO(tanzheny, omalleyt): Fix test in eager mode.
|
# TODO(tanzheny, omalleyt): Fix test in eager mode.
|
||||||
for dtype in [dtypes.half, dtypes.float32]:
|
for dtype in [dtypes.half, dtypes.float32]:
|
||||||
with ops.Graph().as_default(), self.cached_session(use_gpu=True):
|
with ops.Graph().as_default(), self.cached_session():
|
||||||
val0, val1 = self.applyOptimizer(
|
val0, val1 = self.applyOptimizer(
|
||||||
ftrl.Ftrl(
|
ftrl.Ftrl(
|
||||||
3.0,
|
3.0,
|
||||||
@ -484,7 +484,7 @@ class FtrlOptimizerTest(test.TestCase):
|
|||||||
l2_regularization_strength=0.0),
|
l2_regularization_strength=0.0),
|
||||||
dtype)
|
dtype)
|
||||||
|
|
||||||
with ops.Graph().as_default(), self.cached_session(use_gpu=True):
|
with ops.Graph().as_default(), self.cached_session():
|
||||||
val2, val3 = self.applyOptimizer(
|
val2, val3 = self.applyOptimizer(
|
||||||
gradient_descent.GradientDescentOptimizer(3.0), dtype)
|
gradient_descent.GradientDescentOptimizer(3.0), dtype)
|
||||||
|
|
||||||
|
@ -58,7 +58,7 @@ class AddNTest(test.TestCase):
|
|||||||
|
|
||||||
def testAddN(self):
|
def testAddN(self):
|
||||||
np.random.seed(12345)
|
np.random.seed(12345)
|
||||||
with self.session(use_gpu=True) as sess:
|
with self.session() as sess:
|
||||||
for dtype in self._supported_types():
|
for dtype in self._supported_types():
|
||||||
for count in range(1, self._MAX_N + 1):
|
for count in range(1, self._MAX_N + 1):
|
||||||
data = [self._buildData((2, 2), dtype) for _ in range(count)]
|
data = [self._buildData((2, 2), dtype) for _ in range(count)]
|
||||||
@ -71,7 +71,7 @@ class AddNTest(test.TestCase):
|
|||||||
@test_util.run_deprecated_v1
|
@test_util.run_deprecated_v1
|
||||||
def testUnknownShapes(self):
|
def testUnknownShapes(self):
|
||||||
np.random.seed(12345)
|
np.random.seed(12345)
|
||||||
with self.session(use_gpu=True) as sess:
|
with self.session() as sess:
|
||||||
for dtype in self._supported_types():
|
for dtype in self._supported_types():
|
||||||
data = self._buildData((2, 2), dtype)
|
data = self._buildData((2, 2), dtype)
|
||||||
for count in range(1, self._MAX_N + 1):
|
for count in range(1, self._MAX_N + 1):
|
||||||
|
@ -97,7 +97,7 @@ class ArgMaxTest(test.TestCase):
|
|||||||
def testFloatInt32Output(self):
|
def testFloatInt32Output(self):
|
||||||
x = np.asarray(100 * np.random.randn(200), dtype=np.float32)
|
x = np.asarray(100 * np.random.randn(200), dtype=np.float32)
|
||||||
expected_values = x.argmax()
|
expected_values = x.argmax()
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
ans = math_ops.argmax(x, axis=0, output_type=dtypes.int32)
|
ans = math_ops.argmax(x, axis=0, output_type=dtypes.int32)
|
||||||
tf_ans = self.evaluate(ans)
|
tf_ans = self.evaluate(ans)
|
||||||
self.assertEqual(np.int32, tf_ans.dtype)
|
self.assertEqual(np.int32, tf_ans.dtype)
|
||||||
@ -105,7 +105,7 @@ class ArgMaxTest(test.TestCase):
|
|||||||
# the values don't have a range that exceeds 32-bit integers.
|
# the values don't have a range that exceeds 32-bit integers.
|
||||||
self.assertAllEqual(tf_ans, expected_values)
|
self.assertAllEqual(tf_ans, expected_values)
|
||||||
expected_values = x.argmin()
|
expected_values = x.argmin()
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
ans = math_ops.argmin(x, axis=0, output_type=dtypes.int32)
|
ans = math_ops.argmin(x, axis=0, output_type=dtypes.int32)
|
||||||
tf_ans = self.evaluate(ans)
|
tf_ans = self.evaluate(ans)
|
||||||
self.assertEqual(np.int32, tf_ans.dtype)
|
self.assertEqual(np.int32, tf_ans.dtype)
|
||||||
|
@ -46,7 +46,7 @@ class GatherTest(test.TestCase, parameterized.TestCase):
|
|||||||
def testSimpleGather(self, indices_dtype):
|
def testSimpleGather(self, indices_dtype):
|
||||||
data = np.array([0, 1, 2, 3, 7, 5, 8, 9, 10, 11, 15, 13])
|
data = np.array([0, 1, 2, 3, 7, 5, 8, 9, 10, 11, 15, 13])
|
||||||
indices = [3, 4]
|
indices = [3, 4]
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
for dtype in _TEST_TYPES:
|
for dtype in _TEST_TYPES:
|
||||||
params_np = self._buildParams(data, dtype)
|
params_np = self._buildParams(data, dtype)
|
||||||
params = constant_op.constant(params_np)
|
params = constant_op.constant(params_np)
|
||||||
@ -62,7 +62,7 @@ class GatherTest(test.TestCase, parameterized.TestCase):
|
|||||||
def test2DArray(self, indices_dtype):
|
def test2DArray(self, indices_dtype):
|
||||||
data = np.array([[0, 1, 2, 3, 7, 5], [8, 9, 10, 11, 15, 13]])
|
data = np.array([[0, 1, 2, 3, 7, 5], [8, 9, 10, 11, 15, 13]])
|
||||||
indices = [[3], [4]]
|
indices = [[3], [4]]
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
for dtype in _TEST_TYPES:
|
for dtype in _TEST_TYPES:
|
||||||
params_np = self._buildParams(data, dtype)
|
params_np = self._buildParams(data, dtype)
|
||||||
params = constant_op.constant(params_np)
|
params = constant_op.constant(params_np)
|
||||||
@ -77,7 +77,7 @@ class GatherTest(test.TestCase, parameterized.TestCase):
|
|||||||
def testHigherRank(self):
|
def testHigherRank(self):
|
||||||
data = np.array([[[0, 1, 2], [3, 7, 5]], [[8, 9, 10], [11, 15, 13]]])
|
data = np.array([[[0, 1, 2], [3, 7, 5]], [[8, 9, 10], [11, 15, 13]]])
|
||||||
indices = [[[2, 0], [1, 2]], [[2, 0], [0, 1]]]
|
indices = [[[2, 0], [1, 2]], [[2, 0], [0, 1]]]
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
for dtype in _TEST_TYPES:
|
for dtype in _TEST_TYPES:
|
||||||
params_np = self._buildParams(data, dtype)
|
params_np = self._buildParams(data, dtype)
|
||||||
params = constant_op.constant(params_np)
|
params = constant_op.constant(params_np)
|
||||||
@ -113,7 +113,7 @@ class GatherTest(test.TestCase, parameterized.TestCase):
|
|||||||
self.evaluate(array_ops.batch_gather(params, [7]))
|
self.evaluate(array_ops.batch_gather(params, [7]))
|
||||||
|
|
||||||
def testEmptySlices(self):
|
def testEmptySlices(self):
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
for dtype in _TEST_TYPES:
|
for dtype in _TEST_TYPES:
|
||||||
for itype in np.int32, np.int64:
|
for itype in np.int32, np.int64:
|
||||||
params = np.zeros((7, 0, 0), dtype=dtype.as_numpy_dtype)
|
params = np.zeros((7, 0, 0), dtype=dtype.as_numpy_dtype)
|
||||||
|
@ -59,7 +59,7 @@ class GatherTest(test.TestCase, parameterized.TestCase):
|
|||||||
return data
|
return data
|
||||||
|
|
||||||
def testScalar1D(self):
|
def testScalar1D(self):
|
||||||
with self.cached_session(use_gpu=True):
|
with self.cached_session():
|
||||||
data = np.array([0, 1, 2, 3, 7, 5])
|
data = np.array([0, 1, 2, 3, 7, 5])
|
||||||
for dtype in _TEST_TYPES:
|
for dtype in _TEST_TYPES:
|
||||||
for indices in 4, [1, 2, 2, 4, 5]:
|
for indices in 4, [1, 2, 2, 4, 5]:
|
||||||
@ -74,7 +74,7 @@ class GatherTest(test.TestCase, parameterized.TestCase):
|
|||||||
self.assertEqual(np_val.shape, gather_t.get_shape())
|
self.assertEqual(np_val.shape, gather_t.get_shape())
|
||||||
|
|
||||||
def testScalar2D(self):
|
def testScalar2D(self):
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
data = np.array([[0, 1, 2], [3, 4, 5], [6, 7, 8],
|
data = np.array([[0, 1, 2], [3, 4, 5], [6, 7, 8],
|
||||||
[9, 10, 11], [12, 13, 14]])
|
[9, 10, 11], [12, 13, 14]])
|
||||||
for dtype in _TEST_TYPES:
|
for dtype in _TEST_TYPES:
|
||||||
@ -90,7 +90,7 @@ class GatherTest(test.TestCase, parameterized.TestCase):
|
|||||||
self.assertEqual(expected_shape, gather_t.get_shape())
|
self.assertEqual(expected_shape, gather_t.get_shape())
|
||||||
|
|
||||||
def testSimpleTwoD32(self):
|
def testSimpleTwoD32(self):
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
data = np.array([[0, 1, 2], [3, 4, 5], [6, 7, 8],
|
data = np.array([[0, 1, 2], [3, 4, 5], [6, 7, 8],
|
||||||
[9, 10, 11], [12, 13, 14]])
|
[9, 10, 11], [12, 13, 14]])
|
||||||
for dtype in _TEST_TYPES:
|
for dtype in _TEST_TYPES:
|
||||||
@ -304,7 +304,7 @@ class GatherTest(test.TestCase, parameterized.TestCase):
|
|||||||
# On GPU the bad indices do not raise error but fetch 0 values
|
# On GPU the bad indices do not raise error but fetch 0 values
|
||||||
if not test.is_gpu_available():
|
if not test.is_gpu_available():
|
||||||
return
|
return
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
params = [[0, 1, 2], [3, 4, 5]]
|
params = [[0, 1, 2], [3, 4, 5]]
|
||||||
with self.assertRaisesOpError(r"indices\[0,0\] = 7 is not in \[0, 2\)"):
|
with self.assertRaisesOpError(r"indices\[0,0\] = 7 is not in \[0, 2\)"):
|
||||||
array_ops.gather(params, [[7]], axis=0).eval()
|
array_ops.gather(params, [[7]], axis=0).eval()
|
||||||
|
@ -211,7 +211,7 @@ class StatefulScatterNdTest(test.TestCase):
|
|||||||
scatter = state_ops.scatter_nd_update(ref, indices, updates)
|
scatter = state_ops.scatter_nd_update(ref, indices, updates)
|
||||||
init = variables.global_variables_initializer()
|
init = variables.global_variables_initializer()
|
||||||
|
|
||||||
with self.session(use_gpu=True) as sess:
|
with self.session() as sess:
|
||||||
self.evaluate(init)
|
self.evaluate(init)
|
||||||
result = self.evaluate(scatter)
|
result = self.evaluate(scatter)
|
||||||
self.assertAllClose(result, expected)
|
self.assertAllClose(result, expected)
|
||||||
@ -225,7 +225,7 @@ class StatefulScatterNdTest(test.TestCase):
|
|||||||
scatter = state_ops.scatter_nd_update(ref, indices, updates)
|
scatter = state_ops.scatter_nd_update(ref, indices, updates)
|
||||||
init = variables.global_variables_initializer()
|
init = variables.global_variables_initializer()
|
||||||
|
|
||||||
with self.session(use_gpu=True) as sess:
|
with self.session() as sess:
|
||||||
self.evaluate(init)
|
self.evaluate(init)
|
||||||
result = self.evaluate(scatter)
|
result = self.evaluate(scatter)
|
||||||
self.assertAllClose(result, expected)
|
self.assertAllClose(result, expected)
|
||||||
|
@ -40,7 +40,7 @@ class SliceTest(test.TestCase):
|
|||||||
def testEmpty(self):
|
def testEmpty(self):
|
||||||
inp = np.random.rand(4, 4).astype("f")
|
inp = np.random.rand(4, 4).astype("f")
|
||||||
for k in xrange(4):
|
for k in xrange(4):
|
||||||
with self.cached_session(use_gpu=True):
|
with self.cached_session():
|
||||||
a = constant_op.constant(inp, shape=[4, 4], dtype=dtypes.float32)
|
a = constant_op.constant(inp, shape=[4, 4], dtype=dtypes.float32)
|
||||||
slice_t = a[2, k:k]
|
slice_t = a[2, k:k]
|
||||||
slice_val = self.evaluate(slice_t)
|
slice_val = self.evaluate(slice_t)
|
||||||
@ -49,7 +49,7 @@ class SliceTest(test.TestCase):
|
|||||||
def testInt32(self):
|
def testInt32(self):
|
||||||
inp = np.random.rand(4, 4).astype("i")
|
inp = np.random.rand(4, 4).astype("i")
|
||||||
for k in xrange(4):
|
for k in xrange(4):
|
||||||
with self.cached_session(use_gpu=True):
|
with self.cached_session():
|
||||||
a = constant_op.constant(inp, shape=[4, 4], dtype=dtypes.int32)
|
a = constant_op.constant(inp, shape=[4, 4], dtype=dtypes.int32)
|
||||||
slice_t = a[2, k:k]
|
slice_t = a[2, k:k]
|
||||||
slice_val = self.evaluate(slice_t)
|
slice_val = self.evaluate(slice_t)
|
||||||
@ -119,7 +119,7 @@ class SliceTest(test.TestCase):
|
|||||||
|
|
||||||
def testSelectAll(self):
|
def testSelectAll(self):
|
||||||
for _ in range(10):
|
for _ in range(10):
|
||||||
with self.cached_session(use_gpu=True):
|
with self.cached_session():
|
||||||
inp = np.random.rand(4, 4, 4, 4).astype("f")
|
inp = np.random.rand(4, 4, 4, 4).astype("f")
|
||||||
a = constant_op.constant(inp, shape=[4, 4, 4, 4], dtype=dtypes.float32)
|
a = constant_op.constant(inp, shape=[4, 4, 4, 4], dtype=dtypes.float32)
|
||||||
|
|
||||||
@ -133,7 +133,7 @@ class SliceTest(test.TestCase):
|
|||||||
|
|
||||||
def testSingleDimension(self):
|
def testSingleDimension(self):
|
||||||
for _ in range(10):
|
for _ in range(10):
|
||||||
with self.cached_session(use_gpu=True):
|
with self.cached_session():
|
||||||
inp = np.random.rand(10).astype("f")
|
inp = np.random.rand(10).astype("f")
|
||||||
a = constant_op.constant(inp, shape=[10], dtype=dtypes.float32)
|
a = constant_op.constant(inp, shape=[10], dtype=dtypes.float32)
|
||||||
|
|
||||||
@ -229,7 +229,7 @@ class SliceTest(test.TestCase):
|
|||||||
|
|
||||||
def testSingleElementAll(self):
|
def testSingleElementAll(self):
|
||||||
for _ in range(10):
|
for _ in range(10):
|
||||||
with self.cached_session(use_gpu=True):
|
with self.cached_session():
|
||||||
inp = np.random.rand(4, 4).astype("f")
|
inp = np.random.rand(4, 4).astype("f")
|
||||||
a = constant_op.constant(inp, shape=[4, 4], dtype=dtypes.float32)
|
a = constant_op.constant(inp, shape=[4, 4], dtype=dtypes.float32)
|
||||||
|
|
||||||
@ -312,7 +312,7 @@ class SliceTest(test.TestCase):
|
|||||||
self.assertAllEqual(m2.get_shape().as_list(), [1, 2, 3])
|
self.assertAllEqual(m2.get_shape().as_list(), [1, 2, 3])
|
||||||
|
|
||||||
def _testGradientSlice(self, input_shape, slice_begin, slice_size):
|
def _testGradientSlice(self, input_shape, slice_begin, slice_size):
|
||||||
with self.cached_session(use_gpu=True):
|
with self.cached_session():
|
||||||
num_inputs = np.prod(input_shape)
|
num_inputs = np.prod(input_shape)
|
||||||
num_grads = np.prod(slice_size)
|
num_grads = np.prod(slice_size)
|
||||||
inp = np.random.rand(num_inputs).astype("f").reshape(input_shape)
|
inp = np.random.rand(num_inputs).astype("f").reshape(input_shape)
|
||||||
@ -362,7 +362,7 @@ class SliceTest(test.TestCase):
|
|||||||
self.assertAllClose(np_ans, result)
|
self.assertAllClose(np_ans, result)
|
||||||
|
|
||||||
def _testGradientVariableSize(self):
|
def _testGradientVariableSize(self):
|
||||||
with self.cached_session(use_gpu=True):
|
with self.cached_session():
|
||||||
inp = constant_op.constant([1.0, 2.0, 3.0], name="in")
|
inp = constant_op.constant([1.0, 2.0, 3.0], name="in")
|
||||||
out = array_ops.slice(inp, [1], [-1])
|
out = array_ops.slice(inp, [1], [-1])
|
||||||
grad_actual = self.evaluate(gradients_impl.gradients(out, inp)[0])
|
grad_actual = self.evaluate(gradients_impl.gradients(out, inp)[0])
|
||||||
@ -380,7 +380,7 @@ class SliceTest(test.TestCase):
|
|||||||
# Regression test for bug in slice. A low-level bug in Eigen was causing
|
# Regression test for bug in slice. A low-level bug in Eigen was causing
|
||||||
# incorrect results for negative indices in multi-dimensional tensors.
|
# incorrect results for negative indices in multi-dimensional tensors.
|
||||||
# See b/114318298.
|
# See b/114318298.
|
||||||
with self.cached_session(use_gpu=True):
|
with self.cached_session():
|
||||||
x = constant_op.constant([[1., 2., 3.], [4., 5., 6.], [7., 8., 7]])
|
x = constant_op.constant([[1., 2., 3.], [4., 5., 6.], [7., 8., 7]])
|
||||||
loss1 = math_ops.reduce_sum(x[:-1, :-1] * 1.0)
|
loss1 = math_ops.reduce_sum(x[:-1, :-1] * 1.0)
|
||||||
loss2 = math_ops.reduce_sum(x[:-1][:, :-1])
|
loss2 = math_ops.reduce_sum(x[:-1][:, :-1])
|
||||||
@ -477,7 +477,7 @@ class SliceTest(test.TestCase):
|
|||||||
self.assertEqual([None, 2], c.get_shape().as_list())
|
self.assertEqual([None, 2], c.get_shape().as_list())
|
||||||
|
|
||||||
def testSliceOfSlice(self):
|
def testSliceOfSlice(self):
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
a = constant_op.constant([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]])
|
a = constant_op.constant([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]])
|
||||||
b = a[1:, :]
|
b = a[1:, :]
|
||||||
c = b[:-1, :]
|
c = b[:-1, :]
|
||||||
|
@ -52,7 +52,7 @@ class StackOpTest(test.TestCase):
|
|||||||
@test_util.run_deprecated_v1
|
@test_util.run_deprecated_v1
|
||||||
def testSimple(self):
|
def testSimple(self):
|
||||||
np.random.seed(7)
|
np.random.seed(7)
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
for shape in (2,), (3,), (2, 3), (3, 2), (8, 2, 10):
|
for shape in (2,), (3,), (2, 3), (3, 2), (8, 2, 10):
|
||||||
rank = len(shape)
|
rank = len(shape)
|
||||||
for axis in range(-rank, rank):
|
for axis in range(-rank, rank):
|
||||||
@ -90,7 +90,7 @@ class StackOpTest(test.TestCase):
|
|||||||
@test_util.run_deprecated_v1
|
@test_util.run_deprecated_v1
|
||||||
def testConst(self):
|
def testConst(self):
|
||||||
np.random.seed(7)
|
np.random.seed(7)
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
# Verify that shape induction works with shapes produced via const stack
|
# Verify that shape induction works with shapes produced via const stack
|
||||||
a = constant_op.constant([1, 2, 3, 4, 5, 6])
|
a = constant_op.constant([1, 2, 3, 4, 5, 6])
|
||||||
b = array_ops.reshape(a, array_ops.stack([2, 3]))
|
b = array_ops.reshape(a, array_ops.stack([2, 3]))
|
||||||
@ -155,7 +155,7 @@ class StackOpTest(test.TestCase):
|
|||||||
data = np.random.randn(*shape)
|
data = np.random.randn(*shape)
|
||||||
shapes = [shape[1:]] * shape[0]
|
shapes = [shape[1:]] * shape[0]
|
||||||
with self.subTest(shape=shape):
|
with self.subTest(shape=shape):
|
||||||
with self.cached_session(use_gpu=True):
|
with self.cached_session():
|
||||||
# TODO(irving): Remove list() once we handle maps correctly
|
# TODO(irving): Remove list() once we handle maps correctly
|
||||||
xs = list(map(constant_op.constant, data))
|
xs = list(map(constant_op.constant, data))
|
||||||
c = array_ops.stack(xs)
|
c = array_ops.stack(xs)
|
||||||
@ -171,7 +171,7 @@ class StackOpTest(test.TestCase):
|
|||||||
out_shape = list(shape[1:])
|
out_shape = list(shape[1:])
|
||||||
out_shape.insert(1, shape[0])
|
out_shape.insert(1, shape[0])
|
||||||
with self.subTest(shape=shape):
|
with self.subTest(shape=shape):
|
||||||
with self.cached_session(use_gpu=True):
|
with self.cached_session():
|
||||||
# TODO(irving): Remove list() once we handle maps correctly
|
# TODO(irving): Remove list() once we handle maps correctly
|
||||||
xs = list(map(constant_op.constant, data))
|
xs = list(map(constant_op.constant, data))
|
||||||
c = array_ops.stack(xs, axis=1)
|
c = array_ops.stack(xs, axis=1)
|
||||||
@ -241,7 +241,7 @@ class StackOpTest(test.TestCase):
|
|||||||
for axis in range(-rank, rank):
|
for axis in range(-rank, rank):
|
||||||
test_arrays = np_split_squeeze(expected, axis)
|
test_arrays = np_split_squeeze(expected, axis)
|
||||||
|
|
||||||
with self.cached_session(use_gpu=True):
|
with self.cached_session():
|
||||||
with self.subTest(shape=shape, dtype=dtype, axis=axis):
|
with self.subTest(shape=shape, dtype=dtype, axis=axis):
|
||||||
actual_pack = array_ops.stack(test_arrays, axis=axis)
|
actual_pack = array_ops.stack(test_arrays, axis=axis)
|
||||||
self.assertEqual(expected.shape, actual_pack.get_shape())
|
self.assertEqual(expected.shape, actual_pack.get_shape())
|
||||||
@ -265,7 +265,7 @@ class StackOpTest(test.TestCase):
|
|||||||
|
|
||||||
def testComplex(self):
|
def testComplex(self):
|
||||||
np.random.seed(7)
|
np.random.seed(7)
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
for shape in (2,), (3,), (2, 3), (3, 2), (8, 2, 10):
|
for shape in (2,), (3,), (2, 3), (3, 2), (8, 2, 10):
|
||||||
for dtype in [np.complex64, np.complex128]:
|
for dtype in [np.complex64, np.complex128]:
|
||||||
with self.subTest(shape=shape, dtype=dtype):
|
with self.subTest(shape=shape, dtype=dtype):
|
||||||
@ -279,7 +279,7 @@ class AutomaticStackingTest(test.TestCase):
|
|||||||
|
|
||||||
@test_util.run_deprecated_v1
|
@test_util.run_deprecated_v1
|
||||||
def testSimple(self):
|
def testSimple(self):
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
self.assertAllEqual(
|
self.assertAllEqual(
|
||||||
[1, 0, 2],
|
[1, 0, 2],
|
||||||
ops.convert_to_tensor([1, constant_op.constant(0), 2]).eval())
|
ops.convert_to_tensor([1, constant_op.constant(0), 2]).eval())
|
||||||
@ -299,7 +299,7 @@ class AutomaticStackingTest(test.TestCase):
|
|||||||
]).eval())
|
]).eval())
|
||||||
|
|
||||||
def testWithNDArray(self):
|
def testWithNDArray(self):
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
result = ops.convert_to_tensor([[[0., 0.],
|
result = ops.convert_to_tensor([[[0., 0.],
|
||||||
constant_op.constant([1., 1.])],
|
constant_op.constant([1., 1.])],
|
||||||
np.array(
|
np.array(
|
||||||
@ -310,7 +310,7 @@ class AutomaticStackingTest(test.TestCase):
|
|||||||
|
|
||||||
@test_util.run_deprecated_v1
|
@test_util.run_deprecated_v1
|
||||||
def testVariable(self):
|
def testVariable(self):
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
v = variables.Variable(17)
|
v = variables.Variable(17)
|
||||||
result = ops.convert_to_tensor([[0, 0, 0], [0, v, 0], [0, 0, 0]])
|
result = ops.convert_to_tensor([[0, 0, 0], [0, v, 0], [0, 0, 0]])
|
||||||
self.evaluate(v.initializer)
|
self.evaluate(v.initializer)
|
||||||
@ -364,7 +364,7 @@ class AutomaticStackingTest(test.TestCase):
|
|||||||
|
|
||||||
@test_util.run_deprecated_v1
|
@test_util.run_deprecated_v1
|
||||||
def testPlaceholder(self):
|
def testPlaceholder(self):
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
# Test using placeholder with a defined shape.
|
# Test using placeholder with a defined shape.
|
||||||
ph_0 = array_ops.placeholder(dtypes.int32, shape=[])
|
ph_0 = array_ops.placeholder(dtypes.int32, shape=[])
|
||||||
result_0 = ops.convert_to_tensor([[0, 0, 0], [0, ph_0, 0], [0, 0, 0]])
|
result_0 = ops.convert_to_tensor([[0, 0, 0], [0, ph_0, 0], [0, 0, 0]])
|
||||||
@ -391,7 +391,7 @@ class AutomaticStackingTest(test.TestCase):
|
|||||||
# Dynamic shape error.
|
# Dynamic shape error.
|
||||||
ph_1 = array_ops.placeholder(dtypes.int32)
|
ph_1 = array_ops.placeholder(dtypes.int32)
|
||||||
result_1 = ops.convert_to_tensor([[0, 0, 0], [0, ph_1, 0], [0, 0, 0]])
|
result_1 = ops.convert_to_tensor([[0, 0, 0], [0, ph_1, 0], [0, 0, 0]])
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
with self.assertRaises(errors_impl.InvalidArgumentError):
|
with self.assertRaises(errors_impl.InvalidArgumentError):
|
||||||
result_1.eval(feed_dict={ph_1: [1]})
|
result_1.eval(feed_dict={ph_1: [1]})
|
||||||
|
|
||||||
|
@ -474,7 +474,7 @@ class ReverseV2Test(test_util.TensorFlowTestCase):
|
|||||||
@test_util.run_deprecated_v1
|
@test_util.run_deprecated_v1
|
||||||
def testReverseRowsOf3Channels(self):
|
def testReverseRowsOf3Channels(self):
|
||||||
"""Tests optimized code for reversing rows with last dim size = 3."""
|
"""Tests optimized code for reversing rows with last dim size = 3."""
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
for reverse_f in [array_ops.reverse_v2, array_ops.reverse]:
|
for reverse_f in [array_ops.reverse_v2, array_ops.reverse]:
|
||||||
for outer_size in (1, 2):
|
for outer_size in (1, 2):
|
||||||
for middle_size in list(range(50)) + [100000]:
|
for middle_size in list(range(50)) + [100000]:
|
||||||
@ -491,7 +491,7 @@ class ReverseV2Test(test_util.TensorFlowTestCase):
|
|||||||
|
|
||||||
@test_util.run_deprecated_v1
|
@test_util.run_deprecated_v1
|
||||||
def testReverseRowsOf4Channels(self):
|
def testReverseRowsOf4Channels(self):
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
for reverse_f in [array_ops.reverse_v2, array_ops.reverse]:
|
for reverse_f in [array_ops.reverse_v2, array_ops.reverse]:
|
||||||
for outer_size in (1, 2):
|
for outer_size in (1, 2):
|
||||||
for middle_size in list(range(50)) + [100000]:
|
for middle_size in list(range(50)) + [100000]:
|
||||||
@ -508,7 +508,7 @@ class ReverseV2Test(test_util.TensorFlowTestCase):
|
|||||||
|
|
||||||
@test_util.run_deprecated_v1
|
@test_util.run_deprecated_v1
|
||||||
def testReverseColumnsOf3Channels(self):
|
def testReverseColumnsOf3Channels(self):
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
for reverse_f in [array_ops.reverse_v2, array_ops.reverse]:
|
for reverse_f in [array_ops.reverse_v2, array_ops.reverse]:
|
||||||
for outer_size in list(range(50)) + [100000]:
|
for outer_size in list(range(50)) + [100000]:
|
||||||
for middle_size in (1, 2):
|
for middle_size in (1, 2):
|
||||||
@ -641,7 +641,7 @@ class StridedSliceTest(test_util.TensorFlowTestCase):
|
|||||||
def test_basic_slice(self):
|
def test_basic_slice(self):
|
||||||
for tensor_type in STRIDED_SLICE_TYPES:
|
for tensor_type in STRIDED_SLICE_TYPES:
|
||||||
with self.subTest(tensor_type=tensor_type):
|
with self.subTest(tensor_type=tensor_type):
|
||||||
with self.cached_session(use_gpu=True):
|
with self.cached_session():
|
||||||
checker = StridedSliceChecker(
|
checker = StridedSliceChecker(
|
||||||
self, StridedSliceChecker.REF_TENSOR, tensor_type=tensor_type)
|
self, StridedSliceChecker.REF_TENSOR, tensor_type=tensor_type)
|
||||||
_ = checker[:, :, :]
|
_ = checker[:, :, :]
|
||||||
@ -696,7 +696,7 @@ class StridedSliceTest(test_util.TensorFlowTestCase):
|
|||||||
|
|
||||||
@test_util.run_deprecated_v1
|
@test_util.run_deprecated_v1
|
||||||
def testDegenerateSlices(self):
|
def testDegenerateSlices(self):
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
checker = StridedSliceChecker(self, StridedSliceChecker.REF_TENSOR)
|
checker = StridedSliceChecker(self, StridedSliceChecker.REF_TENSOR)
|
||||||
# degenerate by offering a forward interval with a negative stride
|
# degenerate by offering a forward interval with a negative stride
|
||||||
_ = checker[0:-1:-1, :, :]
|
_ = checker[0:-1:-1, :, :]
|
||||||
@ -717,7 +717,7 @@ class StridedSliceTest(test_util.TensorFlowTestCase):
|
|||||||
|
|
||||||
@test_util.run_deprecated_v1
|
@test_util.run_deprecated_v1
|
||||||
def testEllipsis(self):
|
def testEllipsis(self):
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
raw = [[[[[1, 2], [3, 4], [5, 6]]], [[[7, 8], [9, 10], [11, 12]]]]]
|
raw = [[[[[1, 2], [3, 4], [5, 6]]], [[[7, 8], [9, 10], [11, 12]]]]]
|
||||||
checker = StridedSliceChecker(self, raw)
|
checker = StridedSliceChecker(self, raw)
|
||||||
|
|
||||||
@ -738,7 +738,7 @@ class StridedSliceTest(test_util.TensorFlowTestCase):
|
|||||||
|
|
||||||
@test_util.run_deprecated_v1
|
@test_util.run_deprecated_v1
|
||||||
def testShrink(self):
|
def testShrink(self):
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
raw = [[[[[1, 2, 4, 5], [5, 6, 7, 8], [9, 10, 11, 12]]],
|
raw = [[[[[1, 2, 4, 5], [5, 6, 7, 8], [9, 10, 11, 12]]],
|
||||||
[[[13, 14, 15, 16], [17, 18, 19, 20], [21, 22, 23, 24]]]]]
|
[[[13, 14, 15, 16], [17, 18, 19, 20], [21, 22, 23, 24]]]]]
|
||||||
checker = StridedSliceChecker(self, raw)
|
checker = StridedSliceChecker(self, raw)
|
||||||
@ -749,7 +749,7 @@ class StridedSliceTest(test_util.TensorFlowTestCase):
|
|||||||
|
|
||||||
@test_util.run_deprecated_v1
|
@test_util.run_deprecated_v1
|
||||||
def testBothNewAxisAndShrink(self):
|
def testBothNewAxisAndShrink(self):
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
ones = array_ops.placeholder(shape=[2, 2], dtype=dtypes.int16)
|
ones = array_ops.placeholder(shape=[2, 2], dtype=dtypes.int16)
|
||||||
self.assertAllEqual(
|
self.assertAllEqual(
|
||||||
ones[array_ops.newaxis, :,
|
ones[array_ops.newaxis, :,
|
||||||
@ -757,7 +757,7 @@ class StridedSliceTest(test_util.TensorFlowTestCase):
|
|||||||
|
|
||||||
@test_util.run_deprecated_v1
|
@test_util.run_deprecated_v1
|
||||||
def testTensorIndexing(self):
|
def testTensorIndexing(self):
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
raw = [[[[[1, 2, 4, 5], [5, 6, 7, 8], [9, 10, 11, 12]]],
|
raw = [[[[[1, 2, 4, 5], [5, 6, 7, 8], [9, 10, 11, 12]]],
|
||||||
[[[13, 14, 15, 16], [17, 18, 19, 20], [21, 22, 23, 24]]]]]
|
[[[13, 14, 15, 16], [17, 18, 19, 20], [21, 22, 23, 24]]]]]
|
||||||
checker = StridedSliceChecker(self, raw, check_type_infer=False)
|
checker = StridedSliceChecker(self, raw, check_type_infer=False)
|
||||||
@ -769,7 +769,7 @@ class StridedSliceTest(test_util.TensorFlowTestCase):
|
|||||||
_ = checker[..., 2**64 // 2**63] # Test longs in Python 2
|
_ = checker[..., 2**64 // 2**63] # Test longs in Python 2
|
||||||
|
|
||||||
def testTensorIndexingTypeError(self):
|
def testTensorIndexingTypeError(self):
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
checker = StridedSliceChecker(self, StridedSliceChecker.REF_TENSOR)
|
checker = StridedSliceChecker(self, StridedSliceChecker.REF_TENSOR)
|
||||||
expected = re.escape(array_ops._SLICE_TYPE_ERROR)
|
expected = re.escape(array_ops._SLICE_TYPE_ERROR)
|
||||||
with self.assertRaisesRegex(TypeError, expected):
|
with self.assertRaisesRegex(TypeError, expected):
|
||||||
@ -787,7 +787,7 @@ class StridedSliceTest(test_util.TensorFlowTestCase):
|
|||||||
|
|
||||||
@test_util.run_deprecated_v1
|
@test_util.run_deprecated_v1
|
||||||
def testExpand(self):
|
def testExpand(self):
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
raw = [[[[[1, 2, 4, 5], [5, 6, 7, 8], [9, 10, 11, 12]]],
|
raw = [[[[[1, 2, 4, 5], [5, 6, 7, 8], [9, 10, 11, 12]]],
|
||||||
[[[13, 14, 15, 16], [17, 18, 19, 20], [21, 22, 23, 24]]]]]
|
[[[13, 14, 15, 16], [17, 18, 19, 20], [21, 22, 23, 24]]]]]
|
||||||
checker = StridedSliceChecker(self, raw)
|
checker = StridedSliceChecker(self, raw)
|
||||||
@ -805,7 +805,7 @@ class StridedSliceTest(test_util.TensorFlowTestCase):
|
|||||||
|
|
||||||
@test_util.run_deprecated_v1
|
@test_util.run_deprecated_v1
|
||||||
def testExpandVariable(self):
|
def testExpandVariable(self):
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
x = variables.Variable(7, dtype=dtypes.int32)
|
x = variables.Variable(7, dtype=dtypes.int32)
|
||||||
self.evaluate(x.initializer)
|
self.evaluate(x.initializer)
|
||||||
y = x[None].eval()
|
y = x[None].eval()
|
||||||
@ -814,7 +814,7 @@ class StridedSliceTest(test_util.TensorFlowTestCase):
|
|||||||
|
|
||||||
@test_util.run_deprecated_v1
|
@test_util.run_deprecated_v1
|
||||||
def testOptimizedCases(self):
|
def testOptimizedCases(self):
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
checker = StridedSliceChecker(self,
|
checker = StridedSliceChecker(self,
|
||||||
StridedSliceChecker.REF_TENSOR_ALIGNED)
|
StridedSliceChecker.REF_TENSOR_ALIGNED)
|
||||||
# Identity
|
# Identity
|
||||||
@ -830,7 +830,7 @@ class StridedSliceTest(test_util.TensorFlowTestCase):
|
|||||||
|
|
||||||
@test_util.run_v1_only("currently failing on v2")
|
@test_util.run_v1_only("currently failing on v2")
|
||||||
def testMasks(self):
|
def testMasks(self):
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
scalar = np.array(0)
|
scalar = np.array(0)
|
||||||
# Test tensor type mask
|
# Test tensor type mask
|
||||||
checker = StridedSliceChecker(self, StridedSliceChecker.REF_TENSOR)
|
checker = StridedSliceChecker(self, StridedSliceChecker.REF_TENSOR)
|
||||||
@ -870,7 +870,7 @@ class StridedSliceShapeTest(test_util.TensorFlowTestCase):
|
|||||||
|
|
||||||
@test_util.run_deprecated_v1
|
@test_util.run_deprecated_v1
|
||||||
def testUnknown(self):
|
def testUnknown(self):
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
uncertain_tensor = array_ops.placeholder(dtypes.float32)
|
uncertain_tensor = array_ops.placeholder(dtypes.float32)
|
||||||
a = StridedSliceShapeChecker(uncertain_tensor)
|
a = StridedSliceShapeChecker(uncertain_tensor)
|
||||||
a_slice_shape = a[...]
|
a_slice_shape = a[...]
|
||||||
@ -882,7 +882,7 @@ class StridedSliceShapeTest(test_util.TensorFlowTestCase):
|
|||||||
|
|
||||||
@test_util.run_deprecated_v1
|
@test_util.run_deprecated_v1
|
||||||
def testTensorShapeUncertain(self):
|
def testTensorShapeUncertain(self):
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
uncertain_tensor = array_ops.placeholder(
|
uncertain_tensor = array_ops.placeholder(
|
||||||
dtypes.float32, shape=(5, None, 7))
|
dtypes.float32, shape=(5, None, 7))
|
||||||
a = StridedSliceShapeChecker(uncertain_tensor)
|
a = StridedSliceShapeChecker(uncertain_tensor)
|
||||||
@ -906,7 +906,7 @@ class StridedSliceShapeTest(test_util.TensorFlowTestCase):
|
|||||||
|
|
||||||
@test_util.run_deprecated_v1
|
@test_util.run_deprecated_v1
|
||||||
def testTensorValuedIndexShape(self):
|
def testTensorValuedIndexShape(self):
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
defined_shape_tensor = array_ops.placeholder(
|
defined_shape_tensor = array_ops.placeholder(
|
||||||
dtypes.float32, shape=(5, 3, 7))
|
dtypes.float32, shape=(5, 3, 7))
|
||||||
index_value = array_ops.placeholder(dtypes.int32, shape=())
|
index_value = array_ops.placeholder(dtypes.int32, shape=())
|
||||||
@ -965,7 +965,7 @@ class StridedSliceGradTest(test_util.TensorFlowTestCase):
|
|||||||
|
|
||||||
@test_util.run_v1_only("b/120545219")
|
@test_util.run_v1_only("b/120545219")
|
||||||
def testGradient(self):
|
def testGradient(self):
|
||||||
with self.session(use_gpu=True) as sess:
|
with self.session() as sess:
|
||||||
var = variables.Variable(
|
var = variables.Variable(
|
||||||
array_ops.reshape(
|
array_ops.reshape(
|
||||||
math_ops.range(1, 97, 1, dtype=dtypes.float32), shape=(6, 4, 4)))
|
math_ops.range(1, 97, 1, dtype=dtypes.float32), shape=(6, 4, 4)))
|
||||||
@ -992,7 +992,7 @@ class StridedSliceGradTest(test_util.TensorFlowTestCase):
|
|||||||
|
|
||||||
@test_util.run_v1_only("b/120545219")
|
@test_util.run_v1_only("b/120545219")
|
||||||
def testGradientZero(self):
|
def testGradientZero(self):
|
||||||
with self.session(use_gpu=True) as sess:
|
with self.session() as sess:
|
||||||
var = variables.Variable(8.)
|
var = variables.Variable(8.)
|
||||||
init = variables.global_variables_initializer()
|
init = variables.global_variables_initializer()
|
||||||
sess.run(init)
|
sess.run(init)
|
||||||
@ -1001,7 +1001,7 @@ class StridedSliceGradTest(test_util.TensorFlowTestCase):
|
|||||||
|
|
||||||
@test_util.run_deprecated_v1
|
@test_util.run_deprecated_v1
|
||||||
def testInt64Indices(self):
|
def testInt64Indices(self):
|
||||||
with self.session(use_gpu=True) as sess:
|
with self.session() as sess:
|
||||||
a = math_ops.range(3, dtype=dtypes.float32)
|
a = math_ops.range(3, dtype=dtypes.float32)
|
||||||
index = constant_op.constant(1, dtype=dtypes.int64)
|
index = constant_op.constant(1, dtype=dtypes.int64)
|
||||||
b = 2. * a[index]
|
b = 2. * a[index]
|
||||||
@ -1014,7 +1014,7 @@ class StridedSliceGradTypeTest(test_util.TensorFlowTestCase):
|
|||||||
|
|
||||||
@test_util.run_deprecated_v1
|
@test_util.run_deprecated_v1
|
||||||
def testHostVsDevice(self):
|
def testHostVsDevice(self):
|
||||||
with self.session(use_gpu=True) as sess:
|
with self.session() as sess:
|
||||||
var2 = variables.Variable(
|
var2 = variables.Variable(
|
||||||
array_ops.reshape(
|
array_ops.reshape(
|
||||||
math_ops.cast(math_ops.range(1, 5, 1), dtypes.float32),
|
math_ops.cast(math_ops.range(1, 5, 1), dtypes.float32),
|
||||||
@ -1029,7 +1029,7 @@ class StridedSliceGradTypeTest(test_util.TensorFlowTestCase):
|
|||||||
|
|
||||||
@test_util.run_deprecated_v1
|
@test_util.run_deprecated_v1
|
||||||
def testInt64Shape(self):
|
def testInt64Shape(self):
|
||||||
with self.session(use_gpu=True) as sess:
|
with self.session() as sess:
|
||||||
original_dy = array_ops.reshape(
|
original_dy = array_ops.reshape(
|
||||||
math_ops.cast(math_ops.range(1, 5, 1), dtypes.float32),
|
math_ops.cast(math_ops.range(1, 5, 1), dtypes.float32),
|
||||||
shape=(4, 1, 1))
|
shape=(4, 1, 1))
|
||||||
@ -1044,7 +1044,7 @@ class StridedSliceGradTypeTest(test_util.TensorFlowTestCase):
|
|||||||
|
|
||||||
@test_util.run_deprecated_v1
|
@test_util.run_deprecated_v1
|
||||||
def testMixedIndexTypes(self):
|
def testMixedIndexTypes(self):
|
||||||
with self.session(use_gpu=True) as sess:
|
with self.session() as sess:
|
||||||
original_dy = array_ops.reshape(
|
original_dy = array_ops.reshape(
|
||||||
math_ops.cast(math_ops.range(1, 5, 1), dtypes.float32),
|
math_ops.cast(math_ops.range(1, 5, 1), dtypes.float32),
|
||||||
shape=(4, 1, 1))
|
shape=(4, 1, 1))
|
||||||
@ -1133,7 +1133,7 @@ class StridedSliceAssignChecker(object):
|
|||||||
if self.tensor_type.is_complex:
|
if self.tensor_type.is_complex:
|
||||||
value -= 1j * value
|
value -= 1j * value
|
||||||
|
|
||||||
with self.test.test_session(use_gpu=True) as sess:
|
with self.test.test_session() as sess:
|
||||||
if self._use_resource:
|
if self._use_resource:
|
||||||
var = resource_variable_ops.ResourceVariable(self.x)
|
var = resource_variable_ops.ResourceVariable(self.x)
|
||||||
else:
|
else:
|
||||||
@ -1514,7 +1514,7 @@ class InvertPermutationTest(test_util.TensorFlowTestCase):
|
|||||||
def testInvertPermutation(self):
|
def testInvertPermutation(self):
|
||||||
for dtype in [dtypes.int32, dtypes.int64]:
|
for dtype in [dtypes.int32, dtypes.int64]:
|
||||||
with self.subTest(dtype=dtype):
|
with self.subTest(dtype=dtype):
|
||||||
with self.cached_session(use_gpu=True):
|
with self.cached_session():
|
||||||
x = constant_op.constant([3, 4, 0, 2, 1], dtype=dtype)
|
x = constant_op.constant([3, 4, 0, 2, 1], dtype=dtype)
|
||||||
y = array_ops.invert_permutation(x)
|
y = array_ops.invert_permutation(x)
|
||||||
self.assertAllEqual(y.get_shape(), [5])
|
self.assertAllEqual(y.get_shape(), [5])
|
||||||
@ -1597,7 +1597,7 @@ class SnapshotOpTest(test_util.TensorFlowTestCase):
|
|||||||
def testInvertPermutation(self):
|
def testInvertPermutation(self):
|
||||||
for dtype in [dtypes.int32, dtypes.int64, dtypes.float32, dtypes.float64]:
|
for dtype in [dtypes.int32, dtypes.int64, dtypes.float32, dtypes.float64]:
|
||||||
with self.subTest(dtype=dtype):
|
with self.subTest(dtype=dtype):
|
||||||
with self.cached_session(use_gpu=True):
|
with self.cached_session():
|
||||||
x = constant_op.constant([0, 1, 2, 3], dtype=dtype)
|
x = constant_op.constant([0, 1, 2, 3], dtype=dtype)
|
||||||
y = gen_array_ops.snapshot(x)
|
y = gen_array_ops.snapshot(x)
|
||||||
self.assertAllEqual(y, [0, 1, 2, 3])
|
self.assertAllEqual(y, [0, 1, 2, 3])
|
||||||
|
@ -61,7 +61,7 @@ class AtrousConv2DTest(test.TestCase):
|
|||||||
|
|
||||||
@test_util.run_deprecated_v1
|
@test_util.run_deprecated_v1
|
||||||
def testAtrousConv2DForward(self):
|
def testAtrousConv2DForward(self):
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
# Input: [batch, height, width, input_depth]
|
# Input: [batch, height, width, input_depth]
|
||||||
height = 9
|
height = 9
|
||||||
for width in [9, 10]: # Test both odd and even width.
|
for width in [9, 10]: # Test both odd and even width.
|
||||||
@ -108,7 +108,7 @@ class AtrousConv2DTest(test.TestCase):
|
|||||||
padding = "SAME" # The padding needs to be "SAME"
|
padding = "SAME" # The padding needs to be "SAME"
|
||||||
np.random.seed(1) # Make it reproducible.
|
np.random.seed(1) # Make it reproducible.
|
||||||
|
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
# Input: [batch, height, width, input_depth]
|
# Input: [batch, height, width, input_depth]
|
||||||
for height in range(15, 17):
|
for height in range(15, 17):
|
||||||
for width in range(15, 17):
|
for width in range(15, 17):
|
||||||
@ -138,7 +138,7 @@ class AtrousConv2DTest(test.TestCase):
|
|||||||
|
|
||||||
@test_util.run_deprecated_v1
|
@test_util.run_deprecated_v1
|
||||||
def testGradient(self):
|
def testGradient(self):
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
# Input: [batch, height, width, input_depth]
|
# Input: [batch, height, width, input_depth]
|
||||||
x_shape = [2, 5, 6, 2]
|
x_shape = [2, 5, 6, 2]
|
||||||
# Filter: [kernel_height, kernel_width, input_depth, output_depth]
|
# Filter: [kernel_height, kernel_width, input_depth, output_depth]
|
||||||
@ -166,7 +166,7 @@ class AtrousConv2DTransposeTest(test.TestCase):
|
|||||||
|
|
||||||
@test_util.run_deprecated_v1
|
@test_util.run_deprecated_v1
|
||||||
def testAtrousConv2DTransposeForward(self):
|
def testAtrousConv2DTransposeForward(self):
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
# Input: [batch, height, width, input_depth]
|
# Input: [batch, height, width, input_depth]
|
||||||
height = 9
|
height = 9
|
||||||
for width in [9, 10]: # Test both odd and even width.
|
for width in [9, 10]: # Test both odd and even width.
|
||||||
@ -206,7 +206,7 @@ class AtrousDepthwiseConv2DTest(test.TestCase):
|
|||||||
@test_util.run_deprecated_v1
|
@test_util.run_deprecated_v1
|
||||||
def testAtrousDepthwiseConv2DForward(self):
|
def testAtrousDepthwiseConv2DForward(self):
|
||||||
strides = [1, 1, 1, 1]
|
strides = [1, 1, 1, 1]
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
# Input: [batch, height, width, input_depth]
|
# Input: [batch, height, width, input_depth]
|
||||||
height = 9
|
height = 9
|
||||||
for width in [9, 10]: # Test both odd and even width.
|
for width in [9, 10]: # Test both odd and even width.
|
||||||
|
@ -86,7 +86,7 @@ class BandedTriangularSolveOpTest(test.TestCase):
|
|||||||
a_np = np.tile(a_np, batch_dims + [1, 1])
|
a_np = np.tile(a_np, batch_dims + [1, 1])
|
||||||
b = np.tile(b, batch_dims + [1, 1])
|
b = np.tile(b, batch_dims + [1, 1])
|
||||||
|
|
||||||
with self.cached_session(use_gpu=True):
|
with self.cached_session():
|
||||||
a_tf = a
|
a_tf = a
|
||||||
b_tf = b
|
b_tf = b
|
||||||
if use_placeholder:
|
if use_placeholder:
|
||||||
@ -199,7 +199,7 @@ class BandedTriangularSolveOpTest(test.TestCase):
|
|||||||
# right-hand sides.
|
# right-hand sides.
|
||||||
matrix = np.array([[1., 1.], [1., 1.]])
|
matrix = np.array([[1., 1.], [1., 1.]])
|
||||||
rhs = np.array([[1., 0.]])
|
rhs = np.array([[1., 0.]])
|
||||||
with self.cached_session(use_gpu=True):
|
with self.cached_session():
|
||||||
with self.assertRaises(ValueError):
|
with self.assertRaises(ValueError):
|
||||||
self._verifySolve(matrix, rhs)
|
self._verifySolve(matrix, rhs)
|
||||||
with self.assertRaises(ValueError):
|
with self.assertRaises(ValueError):
|
||||||
@ -208,7 +208,7 @@ class BandedTriangularSolveOpTest(test.TestCase):
|
|||||||
# Number of bands exceeds the dimension of the matrix.
|
# Number of bands exceeds the dimension of the matrix.
|
||||||
matrix = np.ones((6, 4))
|
matrix = np.ones((6, 4))
|
||||||
rhs = np.ones((4, 2))
|
rhs = np.ones((4, 2))
|
||||||
with self.cached_session(use_gpu=True):
|
with self.cached_session():
|
||||||
with self.assertRaises(ValueError):
|
with self.assertRaises(ValueError):
|
||||||
self._verifySolve(matrix, rhs)
|
self._verifySolve(matrix, rhs)
|
||||||
with self.assertRaises(ValueError):
|
with self.assertRaises(ValueError):
|
||||||
|
@ -40,7 +40,7 @@ from tensorflow.python.platform import test
|
|||||||
class GPUBinaryOpsTest(test.TestCase):
|
class GPUBinaryOpsTest(test.TestCase):
|
||||||
|
|
||||||
def _compareGPU(self, x, y, np_func, tf_func):
|
def _compareGPU(self, x, y, np_func, tf_func):
|
||||||
with self.cached_session(use_gpu=True) as sess:
|
with self.cached_session() as sess:
|
||||||
inx = ops.convert_to_tensor(x)
|
inx = ops.convert_to_tensor(x)
|
||||||
iny = ops.convert_to_tensor(y)
|
iny = ops.convert_to_tensor(y)
|
||||||
out = tf_func(inx, iny)
|
out = tf_func(inx, iny)
|
||||||
@ -143,7 +143,7 @@ class MathBuiltinUnaryTest(test.TestCase):
|
|||||||
|
|
||||||
np_out = np.floor_divide(x, y + 0.1)
|
np_out = np.floor_divide(x, y + 0.1)
|
||||||
|
|
||||||
with self.session(use_gpu=True) as sess:
|
with self.session() as sess:
|
||||||
inx = ops.convert_to_tensor(x)
|
inx = ops.convert_to_tensor(x)
|
||||||
iny = ops.convert_to_tensor(y + 0.1)
|
iny = ops.convert_to_tensor(y + 0.1)
|
||||||
ofunc = inx / iny
|
ofunc = inx / iny
|
||||||
@ -167,7 +167,7 @@ class BroadcastSimpleTest(test.TestCase):
|
|||||||
|
|
||||||
def _compareGpu(self, x, y, np_func, tf_func):
|
def _compareGpu(self, x, y, np_func, tf_func):
|
||||||
np_ans = np_func(x, y)
|
np_ans = np_func(x, y)
|
||||||
with self.cached_session(use_gpu=True):
|
with self.cached_session():
|
||||||
inx = ops.convert_to_tensor(x)
|
inx = ops.convert_to_tensor(x)
|
||||||
iny = ops.convert_to_tensor(y)
|
iny = ops.convert_to_tensor(y)
|
||||||
out = tf_func(inx, iny)
|
out = tf_func(inx, iny)
|
||||||
|
@ -166,7 +166,7 @@ class BatchMatmulGradientTest(test.TestCase):
|
|||||||
def Loss(x, y):
|
def Loss(x, y):
|
||||||
return math_ops.reduce_sum(math_ops.matmul(x, y, adjoint_a, adjoint_b))
|
return math_ops.reduce_sum(math_ops.matmul(x, y, adjoint_a, adjoint_b))
|
||||||
|
|
||||||
with self.cached_session(use_gpu=True):
|
with self.cached_session():
|
||||||
((x_jacob_t, y_jacob_t),
|
((x_jacob_t, y_jacob_t),
|
||||||
(x_jacob_n, y_jacob_n)) = gradient_checker_v2.compute_gradient(
|
(x_jacob_n, y_jacob_n)) = gradient_checker_v2.compute_gradient(
|
||||||
Loss, [x, y], delta=delta)
|
Loss, [x, y], delta=delta)
|
||||||
|
@ -36,7 +36,7 @@ from tensorflow.python.platform import googletest
|
|||||||
class BincountTest(test_util.TensorFlowTestCase):
|
class BincountTest(test_util.TensorFlowTestCase):
|
||||||
|
|
||||||
def test_empty(self):
|
def test_empty(self):
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
self.assertAllEqual(
|
self.assertAllEqual(
|
||||||
self.evaluate(bincount_ops.bincount([], minlength=5)),
|
self.evaluate(bincount_ops.bincount([], minlength=5)),
|
||||||
[0, 0, 0, 0, 0])
|
[0, 0, 0, 0, 0])
|
||||||
@ -54,7 +54,7 @@ class BincountTest(test_util.TensorFlowTestCase):
|
|||||||
np.float64)
|
np.float64)
|
||||||
|
|
||||||
def test_values(self):
|
def test_values(self):
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
self.assertAllEqual(
|
self.assertAllEqual(
|
||||||
self.evaluate(bincount_ops.bincount([1, 1, 1, 2, 2, 3])),
|
self.evaluate(bincount_ops.bincount([1, 1, 1, 2, 2, 3])),
|
||||||
[0, 3, 2, 1])
|
[0, 3, 2, 1])
|
||||||
@ -74,7 +74,7 @@ class BincountTest(test_util.TensorFlowTestCase):
|
|||||||
np.ones(10000))
|
np.ones(10000))
|
||||||
|
|
||||||
def test_maxlength(self):
|
def test_maxlength(self):
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
self.assertAllEqual(
|
self.assertAllEqual(
|
||||||
self.evaluate(bincount_ops.bincount([5], maxlength=3)), [0, 0, 0])
|
self.evaluate(bincount_ops.bincount([5], maxlength=3)), [0, 0, 0])
|
||||||
self.assertAllEqual(
|
self.assertAllEqual(
|
||||||
@ -84,7 +84,7 @@ class BincountTest(test_util.TensorFlowTestCase):
|
|||||||
|
|
||||||
def test_random_with_weights(self):
|
def test_random_with_weights(self):
|
||||||
num_samples = 10000
|
num_samples = 10000
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
np.random.seed(42)
|
np.random.seed(42)
|
||||||
for dtype in [dtypes.int32, dtypes.int64, dtypes.float32, dtypes.float64]:
|
for dtype in [dtypes.int32, dtypes.int64, dtypes.float32, dtypes.float64]:
|
||||||
arr = np.random.randint(0, 1000, num_samples)
|
arr = np.random.randint(0, 1000, num_samples)
|
||||||
@ -98,7 +98,7 @@ class BincountTest(test_util.TensorFlowTestCase):
|
|||||||
|
|
||||||
def test_random_without_weights(self):
|
def test_random_without_weights(self):
|
||||||
num_samples = 10000
|
num_samples = 10000
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
np.random.seed(42)
|
np.random.seed(42)
|
||||||
for dtype in [np.int32, np.float32]:
|
for dtype in [np.int32, np.float32]:
|
||||||
arr = np.random.randint(0, 1000, num_samples)
|
arr = np.random.randint(0, 1000, num_samples)
|
||||||
@ -108,7 +108,7 @@ class BincountTest(test_util.TensorFlowTestCase):
|
|||||||
np.bincount(arr, weights))
|
np.bincount(arr, weights))
|
||||||
|
|
||||||
def test_zero_weights(self):
|
def test_zero_weights(self):
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
self.assertAllEqual(
|
self.assertAllEqual(
|
||||||
self.evaluate(bincount_ops.bincount(np.arange(1000), np.zeros(1000))),
|
self.evaluate(bincount_ops.bincount(np.arange(1000), np.zeros(1000))),
|
||||||
np.zeros(1000))
|
np.zeros(1000))
|
||||||
|
@ -33,21 +33,21 @@ class BroadcastToTest(test_util.TensorFlowTestCase):
|
|||||||
|
|
||||||
def testBroadcastToBasic(self):
|
def testBroadcastToBasic(self):
|
||||||
for dtype in [np.uint8, np.uint16, np.int8, np.int16, np.int32, np.int64]:
|
for dtype in [np.uint8, np.uint16, np.int8, np.int16, np.int32, np.int64]:
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
x = np.array([1, 2, 3], dtype=dtype)
|
x = np.array([1, 2, 3], dtype=dtype)
|
||||||
v_tf = array_ops.broadcast_to(constant_op.constant(x), [3, 3])
|
v_tf = array_ops.broadcast_to(constant_op.constant(x), [3, 3])
|
||||||
v_np = np.broadcast_to(x, [3, 3])
|
v_np = np.broadcast_to(x, [3, 3])
|
||||||
self.assertAllEqual(v_tf, v_np)
|
self.assertAllEqual(v_tf, v_np)
|
||||||
|
|
||||||
def testBroadcastToString(self):
|
def testBroadcastToString(self):
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
x = np.array([b"1", b"2", b"3"])
|
x = np.array([b"1", b"2", b"3"])
|
||||||
v_tf = array_ops.broadcast_to(constant_op.constant(x), [3, 3])
|
v_tf = array_ops.broadcast_to(constant_op.constant(x), [3, 3])
|
||||||
v_np = np.broadcast_to(x, [3, 3])
|
v_np = np.broadcast_to(x, [3, 3])
|
||||||
self.assertAllEqual(v_tf, v_np)
|
self.assertAllEqual(v_tf, v_np)
|
||||||
|
|
||||||
def testBroadcastToBool(self):
|
def testBroadcastToBool(self):
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
x = np.array([True, False, True], dtype=np.bool)
|
x = np.array([True, False, True], dtype=np.bool)
|
||||||
v_tf = array_ops.broadcast_to(constant_op.constant(x), [3, 3])
|
v_tf = array_ops.broadcast_to(constant_op.constant(x), [3, 3])
|
||||||
v_np = np.broadcast_to(x, [3, 3])
|
v_np = np.broadcast_to(x, [3, 3])
|
||||||
@ -56,7 +56,7 @@ class BroadcastToTest(test_util.TensorFlowTestCase):
|
|||||||
def testBroadcastToShape(self):
|
def testBroadcastToShape(self):
|
||||||
for input_dim in range(1, 6):
|
for input_dim in range(1, 6):
|
||||||
for output_dim in range(input_dim, 6):
|
for output_dim in range(input_dim, 6):
|
||||||
with self.cached_session(use_gpu=True):
|
with self.cached_session():
|
||||||
input_shape = [2] * input_dim
|
input_shape = [2] * input_dim
|
||||||
output_shape = [2] * output_dim
|
output_shape = [2] * output_dim
|
||||||
x = np.array(np.random.randint(5, size=input_shape), dtype=np.int32)
|
x = np.array(np.random.randint(5, size=input_shape), dtype=np.int32)
|
||||||
@ -67,7 +67,7 @@ class BroadcastToTest(test_util.TensorFlowTestCase):
|
|||||||
def testBroadcastToShapeInnerDim(self):
|
def testBroadcastToShapeInnerDim(self):
|
||||||
input_shape = [2, 1, 3]
|
input_shape = [2, 1, 3]
|
||||||
output_shape = [2, 5, 3]
|
output_shape = [2, 5, 3]
|
||||||
with self.cached_session(use_gpu=True):
|
with self.cached_session():
|
||||||
x = np.array(np.random.randint(5, size=input_shape), dtype=np.int32)
|
x = np.array(np.random.randint(5, size=input_shape), dtype=np.int32)
|
||||||
v_tf = array_ops.broadcast_to(constant_op.constant(x), output_shape)
|
v_tf = array_ops.broadcast_to(constant_op.constant(x), output_shape)
|
||||||
v_np = np.broadcast_to(x, output_shape)
|
v_np = np.broadcast_to(x, output_shape)
|
||||||
@ -76,7 +76,7 @@ class BroadcastToTest(test_util.TensorFlowTestCase):
|
|||||||
def testBroadcastToShapeLargerDim(self):
|
def testBroadcastToShapeLargerDim(self):
|
||||||
input_shape = [2, 1, 3, 2, 2, 2]
|
input_shape = [2, 1, 3, 2, 2, 2]
|
||||||
output_shape = [1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 15, 3, 2, 2, 2]
|
output_shape = [1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 15, 3, 2, 2, 2]
|
||||||
with self.cached_session(use_gpu=True):
|
with self.cached_session():
|
||||||
x = np.array(np.random.randint(5, size=input_shape), dtype=np.int32)
|
x = np.array(np.random.randint(5, size=input_shape), dtype=np.int32)
|
||||||
v_tf = array_ops.broadcast_to(constant_op.constant(x), output_shape)
|
v_tf = array_ops.broadcast_to(constant_op.constant(x), output_shape)
|
||||||
v_np = np.broadcast_to(x, output_shape)
|
v_np = np.broadcast_to(x, output_shape)
|
||||||
@ -85,21 +85,21 @@ class BroadcastToTest(test_util.TensorFlowTestCase):
|
|||||||
def testBroadcastToShapeLargerDim2(self):
|
def testBroadcastToShapeLargerDim2(self):
|
||||||
input_shape = [2, 1, 3, 2, 2, 2, 1, 1, 1]
|
input_shape = [2, 1, 3, 2, 2, 2, 1, 1, 1]
|
||||||
output_shape = [1, 1, 1, 2, 5, 3, 2, 2, 2, 3, 3, 3]
|
output_shape = [1, 1, 1, 2, 5, 3, 2, 2, 2, 3, 3, 3]
|
||||||
with self.cached_session(use_gpu=True):
|
with self.cached_session():
|
||||||
x = np.array(np.random.randint(5, size=input_shape), dtype=np.int32)
|
x = np.array(np.random.randint(5, size=input_shape), dtype=np.int32)
|
||||||
v_tf = array_ops.broadcast_to(constant_op.constant(x), output_shape)
|
v_tf = array_ops.broadcast_to(constant_op.constant(x), output_shape)
|
||||||
v_np = np.broadcast_to(x, output_shape)
|
v_np = np.broadcast_to(x, output_shape)
|
||||||
self.assertAllEqual(v_tf, v_np)
|
self.assertAllEqual(v_tf, v_np)
|
||||||
|
|
||||||
def testBroadcastToScalar(self):
|
def testBroadcastToScalar(self):
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
x = np.array(1, dtype=np.int32)
|
x = np.array(1, dtype=np.int32)
|
||||||
v_tf = array_ops.broadcast_to(constant_op.constant(x), [3, 3])
|
v_tf = array_ops.broadcast_to(constant_op.constant(x), [3, 3])
|
||||||
v_np = np.broadcast_to(x, [3, 3])
|
v_np = np.broadcast_to(x, [3, 3])
|
||||||
self.assertAllEqual(v_tf, v_np)
|
self.assertAllEqual(v_tf, v_np)
|
||||||
|
|
||||||
def testBroadcastScalarToNonScalar(self):
|
def testBroadcastScalarToNonScalar(self):
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
x = np.array(1.0, dtype=np.float)
|
x = np.array(1.0, dtype=np.float)
|
||||||
v_tf = array_ops.broadcast_to(constant_op.constant(1.0), [2, 3, 4,
|
v_tf = array_ops.broadcast_to(constant_op.constant(1.0), [2, 3, 4,
|
||||||
1, 1, 1])
|
1, 1, 1])
|
||||||
@ -108,7 +108,7 @@ class BroadcastToTest(test_util.TensorFlowTestCase):
|
|||||||
|
|
||||||
def testBroadcastToShapeTypeAndInference(self):
|
def testBroadcastToShapeTypeAndInference(self):
|
||||||
for dtype in [dtypes.int32, dtypes.int64]:
|
for dtype in [dtypes.int32, dtypes.int64]:
|
||||||
with self.cached_session(use_gpu=True):
|
with self.cached_session():
|
||||||
x = np.array([1, 2, 3])
|
x = np.array([1, 2, 3])
|
||||||
v_tf = array_ops.broadcast_to(
|
v_tf = array_ops.broadcast_to(
|
||||||
constant_op.constant(x),
|
constant_op.constant(x),
|
||||||
|
@ -36,14 +36,14 @@ class BucketizationOpTest(test.TestCase):
|
|||||||
constant_op.constant([-5, 0, 2, 3, 5, 8, 10, 11, 12]),
|
constant_op.constant([-5, 0, 2, 3, 5, 8, 10, 11, 12]),
|
||||||
boundaries=[0, 3, 8, 11])
|
boundaries=[0, 3, 8, 11])
|
||||||
expected_out = [0, 1, 1, 2, 2, 3, 3, 4, 4]
|
expected_out = [0, 1, 1, 2, 2, 3, 3, 4, 4]
|
||||||
with self.session(use_gpu=True) as sess:
|
with self.session() as sess:
|
||||||
self.assertAllEqual(expected_out, self.evaluate(op))
|
self.assertAllEqual(expected_out, self.evaluate(op))
|
||||||
|
|
||||||
def testEmptyFloat(self):
|
def testEmptyFloat(self):
|
||||||
op = math_ops._bucketize(
|
op = math_ops._bucketize(
|
||||||
array_ops.zeros([0, 3], dtype=dtypes.float32), boundaries=[])
|
array_ops.zeros([0, 3], dtype=dtypes.float32), boundaries=[])
|
||||||
expected_out = np.zeros([0, 3], dtype=np.float32)
|
expected_out = np.zeros([0, 3], dtype=np.float32)
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
self.assertAllEqual(expected_out, self.evaluate(op))
|
self.assertAllEqual(expected_out, self.evaluate(op))
|
||||||
|
|
||||||
def testFloat(self):
|
def testFloat(self):
|
||||||
@ -51,7 +51,7 @@ class BucketizationOpTest(test.TestCase):
|
|||||||
constant_op.constant([-5., 0., 2., 3., 5., 8., 10., 11., 12.]),
|
constant_op.constant([-5., 0., 2., 3., 5., 8., 10., 11., 12.]),
|
||||||
boundaries=[0., 3., 8., 11.])
|
boundaries=[0., 3., 8., 11.])
|
||||||
expected_out = [0, 1, 1, 2, 2, 3, 3, 4, 4]
|
expected_out = [0, 1, 1, 2, 2, 3, 3, 4, 4]
|
||||||
with self.session(use_gpu=True) as sess:
|
with self.session() as sess:
|
||||||
self.assertAllEqual(expected_out, self.evaluate(op))
|
self.assertAllEqual(expected_out, self.evaluate(op))
|
||||||
|
|
||||||
def test2DInput(self):
|
def test2DInput(self):
|
||||||
@ -59,14 +59,14 @@ class BucketizationOpTest(test.TestCase):
|
|||||||
constant_op.constant([[-5, 0, 2, 3, 5], [8, 10, 11, 12, 0]]),
|
constant_op.constant([[-5, 0, 2, 3, 5], [8, 10, 11, 12, 0]]),
|
||||||
boundaries=[0, 3, 8, 11])
|
boundaries=[0, 3, 8, 11])
|
||||||
expected_out = [[0, 1, 1, 2, 2], [3, 3, 4, 4, 1]]
|
expected_out = [[0, 1, 1, 2, 2], [3, 3, 4, 4, 1]]
|
||||||
with self.session(use_gpu=True) as sess:
|
with self.session() as sess:
|
||||||
self.assertAllEqual(expected_out, self.evaluate(op))
|
self.assertAllEqual(expected_out, self.evaluate(op))
|
||||||
|
|
||||||
@test_util.run_deprecated_v1
|
@test_util.run_deprecated_v1
|
||||||
def testInvalidBoundariesOrder(self):
|
def testInvalidBoundariesOrder(self):
|
||||||
op = math_ops._bucketize(
|
op = math_ops._bucketize(
|
||||||
constant_op.constant([-5, 0]), boundaries=[0, 8, 3, 11])
|
constant_op.constant([-5, 0]), boundaries=[0, 8, 3, 11])
|
||||||
with self.session(use_gpu=True) as sess:
|
with self.session() as sess:
|
||||||
with self.assertRaisesRegex(errors_impl.InvalidArgumentError,
|
with self.assertRaisesRegex(errors_impl.InvalidArgumentError,
|
||||||
"Expected sorted boundaries"):
|
"Expected sorted boundaries"):
|
||||||
self.evaluate(op)
|
self.evaluate(op)
|
||||||
|
@ -108,7 +108,7 @@ class CastOpTest(test.TestCase):
|
|||||||
with self.cached_session(use_gpu=False):
|
with self.cached_session(use_gpu=False):
|
||||||
b = math_ops.cast(math_ops.cast(a, dtypes.bfloat16), dtypes.float32)
|
b = math_ops.cast(math_ops.cast(a, dtypes.bfloat16), dtypes.float32)
|
||||||
self.assertAllClose(a, self.evaluate(b), rtol=1 / 128.)
|
self.assertAllClose(a, self.evaluate(b), rtol=1 / 128.)
|
||||||
with self.cached_session(use_gpu=True):
|
with self.cached_session():
|
||||||
b = math_ops.cast(math_ops.cast(a, dtypes.bfloat16), dtypes.float32)
|
b = math_ops.cast(math_ops.cast(a, dtypes.bfloat16), dtypes.float32)
|
||||||
self.assertAllClose(a, self.evaluate(b), rtol=1 / 128.)
|
self.assertAllClose(a, self.evaluate(b), rtol=1 / 128.)
|
||||||
|
|
||||||
|
@ -166,7 +166,7 @@ class CholeskyOpTest(test.TestCase):
|
|||||||
@test_util.disable_xla("b/123337890")
|
@test_util.disable_xla("b/123337890")
|
||||||
def testNotInvertibleCPU(self):
|
def testNotInvertibleCPU(self):
|
||||||
# The input should be invertible.
|
# The input should be invertible.
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
with self.assertRaisesRegex(
|
with self.assertRaisesRegex(
|
||||||
errors_impl.InvalidArgumentError,
|
errors_impl.InvalidArgumentError,
|
||||||
"Cholesky decomposition was not successful. The"
|
"Cholesky decomposition was not successful. The"
|
||||||
|
@ -52,7 +52,7 @@ class ClipTest(test.TestCase):
|
|||||||
|
|
||||||
# ClipByValue test
|
# ClipByValue test
|
||||||
def testClipByValue(self):
|
def testClipByValue(self):
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
x = constant_op.constant([-5.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=[2, 3])
|
x = constant_op.constant([-5.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=[2, 3])
|
||||||
np_ans = [[-4.4, 2.0, 3.0], [4.0, 4.4, 4.4]]
|
np_ans = [[-4.4, 2.0, 3.0], [4.0, 4.4, 4.4]]
|
||||||
clip_value = 4.4
|
clip_value = 4.4
|
||||||
@ -73,7 +73,7 @@ class ClipTest(test.TestCase):
|
|||||||
dtypes.int64,
|
dtypes.int64,
|
||||||
dtypes.uint8,
|
dtypes.uint8,
|
||||||
]:
|
]:
|
||||||
with self.cached_session(use_gpu=True):
|
with self.cached_session():
|
||||||
x = constant_op.constant([1, 2, 3, 4, 5, 6], shape=[2, 3], dtype=dtype)
|
x = constant_op.constant([1, 2, 3, 4, 5, 6], shape=[2, 3], dtype=dtype)
|
||||||
np_ans = [[2, 2, 3], [4, 4, 4]]
|
np_ans = [[2, 2, 3], [4, 4, 4]]
|
||||||
clip_value_min = 2
|
clip_value_min = 2
|
||||||
@ -95,7 +95,7 @@ class ClipTest(test.TestCase):
|
|||||||
dtypes.int64,
|
dtypes.int64,
|
||||||
dtypes.uint8,
|
dtypes.uint8,
|
||||||
]:
|
]:
|
||||||
with self.cached_session(use_gpu=True):
|
with self.cached_session():
|
||||||
x = constant_op.constant([1, 2, 3, 4, 5, 6], shape=[2, 3], dtype=dtype)
|
x = constant_op.constant([1, 2, 3, 4, 5, 6], shape=[2, 3], dtype=dtype)
|
||||||
np_ans = [[2, 2, 3], [4, 4, 4]]
|
np_ans = [[2, 2, 3], [4, 4, 4]]
|
||||||
clip_value_min = constant_op.constant(
|
clip_value_min = constant_op.constant(
|
||||||
@ -118,7 +118,7 @@ class ClipTest(test.TestCase):
|
|||||||
dtypes.int64,
|
dtypes.int64,
|
||||||
dtypes.uint8,
|
dtypes.uint8,
|
||||||
]:
|
]:
|
||||||
with self.cached_session(use_gpu=True):
|
with self.cached_session():
|
||||||
x = constant_op.constant([1, 2, 3, 4, 5, 6], shape=[2, 3], dtype=dtype)
|
x = constant_op.constant([1, 2, 3, 4, 5, 6], shape=[2, 3], dtype=dtype)
|
||||||
np_ans = [[4, 4, 4], [4, 5, 6]]
|
np_ans = [[4, 4, 4], [4, 5, 6]]
|
||||||
clip_value_min = 4
|
clip_value_min = 4
|
||||||
@ -141,7 +141,7 @@ class ClipTest(test.TestCase):
|
|||||||
dtypes.int64,
|
dtypes.int64,
|
||||||
dtypes.uint8,
|
dtypes.uint8,
|
||||||
]:
|
]:
|
||||||
with self.cached_session(use_gpu=True):
|
with self.cached_session():
|
||||||
x = constant_op.constant([1, 2, 3, 4, 5, 6], shape=[2, 3], dtype=dtype)
|
x = constant_op.constant([1, 2, 3, 4, 5, 6], shape=[2, 3], dtype=dtype)
|
||||||
np_ans = [[2, 2, 3], [5, 5, 6]]
|
np_ans = [[2, 2, 3], [5, 5, 6]]
|
||||||
clip_value_min = constant_op.constant(
|
clip_value_min = constant_op.constant(
|
||||||
@ -154,7 +154,7 @@ class ClipTest(test.TestCase):
|
|||||||
self.assertAllClose(np_ans, tf_ans)
|
self.assertAllClose(np_ans, tf_ans)
|
||||||
|
|
||||||
def testClipByValueBadShape(self):
|
def testClipByValueBadShape(self):
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
x = constant_op.constant([-5.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=[2, 3, 1])
|
x = constant_op.constant([-5.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=[2, 3, 1])
|
||||||
# Use a nonsensical shape.
|
# Use a nonsensical shape.
|
||||||
clip = constant_op.constant([1.0, 2.0])
|
clip = constant_op.constant([1.0, 2.0])
|
||||||
@ -176,7 +176,7 @@ class ClipTest(test.TestCase):
|
|||||||
|
|
||||||
def _testClipIndexedSlicesByValue(self, values, indices, shape,
|
def _testClipIndexedSlicesByValue(self, values, indices, shape,
|
||||||
clip_value_min, clip_value_max, expected):
|
clip_value_min, clip_value_max, expected):
|
||||||
with self.session(use_gpu=True) as sess:
|
with self.session() as sess:
|
||||||
values = constant_op.constant(values)
|
values = constant_op.constant(values)
|
||||||
indices = constant_op.constant(indices)
|
indices = constant_op.constant(indices)
|
||||||
shape = constant_op.constant(shape)
|
shape = constant_op.constant(shape)
|
||||||
@ -211,7 +211,7 @@ class ClipTest(test.TestCase):
|
|||||||
# ClipByNorm tests
|
# ClipByNorm tests
|
||||||
def testClipByNormClipped(self):
|
def testClipByNormClipped(self):
|
||||||
# Norm clipping when clip_norm < 5
|
# Norm clipping when clip_norm < 5
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
x = constant_op.constant([-3.0, 0.0, 0.0, 4.0, 0.0, 0.0], shape=[2, 3])
|
x = constant_op.constant([-3.0, 0.0, 0.0, 4.0, 0.0, 0.0], shape=[2, 3])
|
||||||
# Norm of x = sqrt(3^2 + 4^2) = 5
|
# Norm of x = sqrt(3^2 + 4^2) = 5
|
||||||
np_ans = [[-2.4, 0.0, 0.0], [3.2, 0.0, 0.0]]
|
np_ans = [[-2.4, 0.0, 0.0], [3.2, 0.0, 0.0]]
|
||||||
@ -227,14 +227,14 @@ class ClipTest(test.TestCase):
|
|||||||
|
|
||||||
@test_util.run_deprecated_v1
|
@test_util.run_deprecated_v1
|
||||||
def testClipByNormGradientZeros(self):
|
def testClipByNormGradientZeros(self):
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
x = array_ops.zeros([3])
|
x = array_ops.zeros([3])
|
||||||
b = clip_ops.clip_by_norm(x, 1.)
|
b = clip_ops.clip_by_norm(x, 1.)
|
||||||
grad, = gradients_impl.gradients(b, x)
|
grad, = gradients_impl.gradients(b, x)
|
||||||
self.assertAllEqual(grad, [1., 1., 1.])
|
self.assertAllEqual(grad, [1., 1., 1.])
|
||||||
|
|
||||||
def testClipByNormBadShape(self):
|
def testClipByNormBadShape(self):
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
x = constant_op.constant([-3.0, 0.0, 0.0, 4.0, 0.0, 0.0], shape=[2, 3, 1])
|
x = constant_op.constant([-3.0, 0.0, 0.0, 4.0, 0.0, 0.0], shape=[2, 3, 1])
|
||||||
# Use a nonsensical shape.
|
# Use a nonsensical shape.
|
||||||
clip = constant_op.constant([1.0, 2.0])
|
clip = constant_op.constant([1.0, 2.0])
|
||||||
@ -243,7 +243,7 @@ class ClipTest(test.TestCase):
|
|||||||
|
|
||||||
def testClipByNormNotClipped(self):
|
def testClipByNormNotClipped(self):
|
||||||
# No norm clipping when clip_norm >= 5
|
# No norm clipping when clip_norm >= 5
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
x = constant_op.constant([-3.0, 0.0, 0.0, 4.0, 0.0, 0.0], shape=[2, 3])
|
x = constant_op.constant([-3.0, 0.0, 0.0, 4.0, 0.0, 0.0], shape=[2, 3])
|
||||||
# Norm of x = sqrt(3^2 + 4^2) = 5
|
# Norm of x = sqrt(3^2 + 4^2) = 5
|
||||||
np_ans = [[-3.0, 0.0, 0.0], [4.0, 0.0, 0.0]]
|
np_ans = [[-3.0, 0.0, 0.0], [4.0, 0.0, 0.0]]
|
||||||
@ -255,7 +255,7 @@ class ClipTest(test.TestCase):
|
|||||||
|
|
||||||
def testClipByNormZero(self):
|
def testClipByNormZero(self):
|
||||||
# No norm clipping when norm = 0
|
# No norm clipping when norm = 0
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
x = constant_op.constant([0.0, 0.0, 0.0, 0.0, 0.0, 0.0], shape=[2, 3])
|
x = constant_op.constant([0.0, 0.0, 0.0, 0.0, 0.0, 0.0], shape=[2, 3])
|
||||||
# Norm = 0, no changes
|
# Norm = 0, no changes
|
||||||
np_ans = [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]
|
np_ans = [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]
|
||||||
@ -267,7 +267,7 @@ class ClipTest(test.TestCase):
|
|||||||
|
|
||||||
def testClipByNormClippedWithDim0(self):
|
def testClipByNormClippedWithDim0(self):
|
||||||
# Norm clipping when clip_norm < 5
|
# Norm clipping when clip_norm < 5
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
x = constant_op.constant([-3.0, 0.0, 0.0, 4.0, 0.0, 3.0], shape=[2, 3])
|
x = constant_op.constant([-3.0, 0.0, 0.0, 4.0, 0.0, 3.0], shape=[2, 3])
|
||||||
# Norm of x[:, 0] = sqrt(3^2 + 4^2) = 5, x[:, 2] = 3
|
# Norm of x[:, 0] = sqrt(3^2 + 4^2) = 5, x[:, 2] = 3
|
||||||
np_ans = [[-2.4, 0.0, 0.0], [3.2, 0.0, 3.0]]
|
np_ans = [[-2.4, 0.0, 0.0], [3.2, 0.0, 3.0]]
|
||||||
@ -279,7 +279,7 @@ class ClipTest(test.TestCase):
|
|||||||
|
|
||||||
def testClipByNormClippedWithDim1(self):
|
def testClipByNormClippedWithDim1(self):
|
||||||
# Norm clipping when clip_norm < 5
|
# Norm clipping when clip_norm < 5
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
x = constant_op.constant([-3.0, 0.0, 0.0, 4.0, 0.0, 3.0], shape=[2, 3])
|
x = constant_op.constant([-3.0, 0.0, 0.0, 4.0, 0.0, 3.0], shape=[2, 3])
|
||||||
# Norm of x[0, :] = 3, x[1, :] = sqrt(3^2 + 4^2) = 5
|
# Norm of x[0, :] = 3, x[1, :] = sqrt(3^2 + 4^2) = 5
|
||||||
np_ans = [[-3.0, 0.0, 0.0], [3.2, 0.0, 2.4]]
|
np_ans = [[-3.0, 0.0, 0.0], [3.2, 0.0, 2.4]]
|
||||||
@ -291,7 +291,7 @@ class ClipTest(test.TestCase):
|
|||||||
|
|
||||||
def testClipByNormNotClippedWithAxes(self):
|
def testClipByNormNotClippedWithAxes(self):
|
||||||
# No norm clipping when clip_norm >= 5
|
# No norm clipping when clip_norm >= 5
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
x = constant_op.constant([-3.0, 0.0, 0.0, 4.0, 0.0, 3.0], shape=[2, 3])
|
x = constant_op.constant([-3.0, 0.0, 0.0, 4.0, 0.0, 3.0], shape=[2, 3])
|
||||||
# Norm of x[0, :] = 3, x[1, :] = sqrt(3^2 + 4^2) = 5
|
# Norm of x[0, :] = 3, x[1, :] = sqrt(3^2 + 4^2) = 5
|
||||||
np_ans = [[-3.0, 0.0, 0.0], [4.0, 0.0, 3.0]]
|
np_ans = [[-3.0, 0.0, 0.0], [4.0, 0.0, 3.0]]
|
||||||
@ -305,7 +305,7 @@ class ClipTest(test.TestCase):
|
|||||||
@test_util.run_deprecated_v1
|
@test_util.run_deprecated_v1
|
||||||
def testClipByGlobalNormClipped(self):
|
def testClipByGlobalNormClipped(self):
|
||||||
# Norm clipping when clip_norm < 5
|
# Norm clipping when clip_norm < 5
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
x0 = constant_op.constant([-2.0, 0.0, 0.0, 4.0, 0.0, 0.0], shape=[2, 3])
|
x0 = constant_op.constant([-2.0, 0.0, 0.0, 4.0, 0.0, 0.0], shape=[2, 3])
|
||||||
x1 = constant_op.constant([1.0, -2.0])
|
x1 = constant_op.constant([1.0, -2.0])
|
||||||
# Global norm of x0 and x1 = sqrt(1 + 4^2 + 2^2 + 2^2) = 5
|
# Global norm of x0 and x1 = sqrt(1 + 4^2 + 2^2 + 2^2) = 5
|
||||||
@ -327,7 +327,7 @@ class ClipTest(test.TestCase):
|
|||||||
@test_util.run_deprecated_v1
|
@test_util.run_deprecated_v1
|
||||||
def testClipByGlobalNormClippedTensor(self):
|
def testClipByGlobalNormClippedTensor(self):
|
||||||
# Norm clipping when clip_norm < 5
|
# Norm clipping when clip_norm < 5
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
x0 = constant_op.constant([-2.0, 0.0, 0.0, 4.0, 0.0, 0.0], shape=[2, 3])
|
x0 = constant_op.constant([-2.0, 0.0, 0.0, 4.0, 0.0, 0.0], shape=[2, 3])
|
||||||
x1 = constant_op.constant([1.0, -2.0])
|
x1 = constant_op.constant([1.0, -2.0])
|
||||||
# Global norm of x0 and x1 = sqrt(1 + 4^2 + 2^2 + 2^2) = 5
|
# Global norm of x0 and x1 = sqrt(1 + 4^2 + 2^2 + 2^2) = 5
|
||||||
@ -349,7 +349,7 @@ class ClipTest(test.TestCase):
|
|||||||
@test_util.run_deprecated_v1
|
@test_util.run_deprecated_v1
|
||||||
def testClipByGlobalNormSupportsNone(self):
|
def testClipByGlobalNormSupportsNone(self):
|
||||||
# Norm clipping when clip_norm < 5
|
# Norm clipping when clip_norm < 5
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
x0 = constant_op.constant([-2.0, 0.0, 0.0, 4.0, 0.0, 0.0], shape=[2, 3])
|
x0 = constant_op.constant([-2.0, 0.0, 0.0, 4.0, 0.0, 0.0], shape=[2, 3])
|
||||||
x1 = constant_op.constant([1.0, -2.0])
|
x1 = constant_op.constant([1.0, -2.0])
|
||||||
# Global norm of x0 and x1 = sqrt(1 + 4^2 + 2^2 + 2^2) = 5
|
# Global norm of x0 and x1 = sqrt(1 + 4^2 + 2^2 + 2^2) = 5
|
||||||
@ -373,7 +373,7 @@ class ClipTest(test.TestCase):
|
|||||||
@test_util.run_deprecated_v1
|
@test_util.run_deprecated_v1
|
||||||
def testClipByGlobalNormWithIndexedSlicesClipped(self):
|
def testClipByGlobalNormWithIndexedSlicesClipped(self):
|
||||||
# Norm clipping when clip_norm < 5
|
# Norm clipping when clip_norm < 5
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
x0 = constant_op.constant([-2.0, 0.0, 0.0, 4.0, 0.0, 0.0], shape=[2, 3])
|
x0 = constant_op.constant([-2.0, 0.0, 0.0, 4.0, 0.0, 0.0], shape=[2, 3])
|
||||||
x1 = ops.IndexedSlices(
|
x1 = ops.IndexedSlices(
|
||||||
constant_op.constant([1.0, -2.0]), constant_op.constant([3, 4]))
|
constant_op.constant([1.0, -2.0]), constant_op.constant([3, 4]))
|
||||||
@ -407,7 +407,7 @@ class ClipTest(test.TestCase):
|
|||||||
@test_util.run_deprecated_v1
|
@test_util.run_deprecated_v1
|
||||||
def testClipByGlobalNormNotClipped(self):
|
def testClipByGlobalNormNotClipped(self):
|
||||||
# No norm clipping when clip_norm >= 5
|
# No norm clipping when clip_norm >= 5
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
x0 = constant_op.constant([-2.0, 0.0, 0.0, 4.0, 0.0, 0.0], shape=[2, 3])
|
x0 = constant_op.constant([-2.0, 0.0, 0.0, 4.0, 0.0, 0.0], shape=[2, 3])
|
||||||
x1 = constant_op.constant([1.0, -2.0])
|
x1 = constant_op.constant([1.0, -2.0])
|
||||||
# Global norm of x0 and x1 = sqrt(1 + 4^2 + 2^2 + 2^2) = 5
|
# Global norm of x0 and x1 = sqrt(1 + 4^2 + 2^2 + 2^2) = 5
|
||||||
@ -427,7 +427,7 @@ class ClipTest(test.TestCase):
|
|||||||
@test_util.run_deprecated_v1
|
@test_util.run_deprecated_v1
|
||||||
def testClipByGlobalNormZero(self):
|
def testClipByGlobalNormZero(self):
|
||||||
# No norm clipping when norm = 0
|
# No norm clipping when norm = 0
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
x0 = constant_op.constant([0.0, 0.0, 0.0, 0.0, 0.0, 0.0], shape=[2, 3])
|
x0 = constant_op.constant([0.0, 0.0, 0.0, 0.0, 0.0, 0.0], shape=[2, 3])
|
||||||
x1 = constant_op.constant([0.0, 0.0])
|
x1 = constant_op.constant([0.0, 0.0])
|
||||||
# Norm = 0, no changes
|
# Norm = 0, no changes
|
||||||
@ -447,7 +447,7 @@ class ClipTest(test.TestCase):
|
|||||||
@test_util.run_deprecated_v1
|
@test_util.run_deprecated_v1
|
||||||
def testClipByGlobalNormInf(self):
|
def testClipByGlobalNormInf(self):
|
||||||
# Expect all NaNs when global norm is inf.
|
# Expect all NaNs when global norm is inf.
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
x0 = constant_op.constant([-2.0, 0.0, np.inf, 4.0, 0.0, 0.0],
|
x0 = constant_op.constant([-2.0, 0.0, np.inf, 4.0, 0.0, 0.0],
|
||||||
shape=[2, 3])
|
shape=[2, 3])
|
||||||
x1 = constant_op.constant([1.0, -2.0])
|
x1 = constant_op.constant([1.0, -2.0])
|
||||||
@ -463,7 +463,7 @@ class ClipTest(test.TestCase):
|
|||||||
|
|
||||||
def testClipByAverageNormClipped(self):
|
def testClipByAverageNormClipped(self):
|
||||||
# Norm clipping when average clip_norm < 0.83333333
|
# Norm clipping when average clip_norm < 0.83333333
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
x = constant_op.constant([-3.0, 0.0, 0.0, 4.0, 0.0, 0.0], shape=[2, 3])
|
x = constant_op.constant([-3.0, 0.0, 0.0, 4.0, 0.0, 0.0], shape=[2, 3])
|
||||||
# Average norm of x = sqrt(3^2 + 4^2) / 6 = 0.83333333
|
# Average norm of x = sqrt(3^2 + 4^2) / 6 = 0.83333333
|
||||||
np_ans = [[-2.88, 0.0, 0.0], [3.84, 0.0, 0.0]]
|
np_ans = [[-2.88, 0.0, 0.0], [3.84, 0.0, 0.0]]
|
||||||
@ -475,7 +475,7 @@ class ClipTest(test.TestCase):
|
|||||||
|
|
||||||
def testClipByAverageNormClippedTensor(self):
|
def testClipByAverageNormClippedTensor(self):
|
||||||
# Norm clipping when average clip_norm < 0.83333333
|
# Norm clipping when average clip_norm < 0.83333333
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
x = constant_op.constant([-3.0, 0.0, 0.0, 4.0, 0.0, 0.0], shape=[2, 3])
|
x = constant_op.constant([-3.0, 0.0, 0.0, 4.0, 0.0, 0.0], shape=[2, 3])
|
||||||
# Average norm of x = sqrt(3^2 + 4^2) / 6 = 0.83333333
|
# Average norm of x = sqrt(3^2 + 4^2) / 6 = 0.83333333
|
||||||
np_ans = [[-2.88, 0.0, 0.0], [3.84, 0.0, 0.0]]
|
np_ans = [[-2.88, 0.0, 0.0], [3.84, 0.0, 0.0]]
|
||||||
@ -487,7 +487,7 @@ class ClipTest(test.TestCase):
|
|||||||
|
|
||||||
def testClipByAverageNormNotClipped(self):
|
def testClipByAverageNormNotClipped(self):
|
||||||
# No norm clipping when average clip_norm >= 0.83333333
|
# No norm clipping when average clip_norm >= 0.83333333
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
x = constant_op.constant([-3.0, 0.0, 0.0, 4.0, 0.0, 0.0], shape=[2, 3])
|
x = constant_op.constant([-3.0, 0.0, 0.0, 4.0, 0.0, 0.0], shape=[2, 3])
|
||||||
# Average norm of x = sqrt(3^2 + 4^2) / 6 = 0.83333333
|
# Average norm of x = sqrt(3^2 + 4^2) / 6 = 0.83333333
|
||||||
np_ans = [[-3.0, 0.0, 0.0], [4.0, 0.0, 0.0]]
|
np_ans = [[-3.0, 0.0, 0.0], [4.0, 0.0, 0.0]]
|
||||||
@ -499,7 +499,7 @@ class ClipTest(test.TestCase):
|
|||||||
|
|
||||||
def testClipByAverageNormZero(self):
|
def testClipByAverageNormZero(self):
|
||||||
# No norm clipping when average clip_norm = 0
|
# No norm clipping when average clip_norm = 0
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
x = constant_op.constant([0.0, 0.0, 0.0, 0.0, 0.0, 0.0], shape=[2, 3])
|
x = constant_op.constant([0.0, 0.0, 0.0, 0.0, 0.0, 0.0], shape=[2, 3])
|
||||||
# Average norm = 0, no changes
|
# Average norm = 0, no changes
|
||||||
np_ans = [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]
|
np_ans = [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]
|
||||||
@ -512,7 +512,7 @@ class ClipTest(test.TestCase):
|
|||||||
def testClipByAverageNormReplacedWithClipByNorm(self):
|
def testClipByAverageNormReplacedWithClipByNorm(self):
|
||||||
# Check clip_by_average_norm(t) is the same as
|
# Check clip_by_average_norm(t) is the same as
|
||||||
# clip_by_norm(t, clip_norm * tf.compat.v1.to_float(tf.size(t)))
|
# clip_by_norm(t, clip_norm * tf.compat.v1.to_float(tf.size(t)))
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
x = constant_op.constant([-3.0, 0.0, 0.0, 4.0, 0.0, 0.0], shape=[2, 3])
|
x = constant_op.constant([-3.0, 0.0, 0.0, 4.0, 0.0, 0.0], shape=[2, 3])
|
||||||
# Average norm of x = sqrt(3^2 + 4^2) / 6 = 0.83333333
|
# Average norm of x = sqrt(3^2 + 4^2) / 6 = 0.83333333
|
||||||
# expected answer [[-2.88, 0.0, 0.0], [3.84, 0.0, 0.0]]
|
# expected answer [[-2.88, 0.0, 0.0], [3.84, 0.0, 0.0]]
|
||||||
@ -532,7 +532,7 @@ class ClipTest(test.TestCase):
|
|||||||
y = clip_ops.clip_by_value(zero, 1.0, 1.0)
|
y = clip_ops.clip_by_value(zero, 1.0, 1.0)
|
||||||
z = clip_ops.clip_by_value(zero, zero, 1.0)
|
z = clip_ops.clip_by_value(zero, zero, 1.0)
|
||||||
w = clip_ops.clip_by_value(zero, 1.0, zero)
|
w = clip_ops.clip_by_value(zero, 1.0, zero)
|
||||||
with self.session(use_gpu=True) as sess:
|
with self.session() as sess:
|
||||||
sess.run([x, y, z, w], feed_dict={zero: np.zeros((7, 0))})
|
sess.run([x, y, z, w], feed_dict={zero: np.zeros((7, 0))})
|
||||||
|
|
||||||
|
|
||||||
|
@ -38,7 +38,7 @@ class ConcatOpTest(test.TestCase):
|
|||||||
|
|
||||||
@test_util.run_deprecated_v1
|
@test_util.run_deprecated_v1
|
||||||
def testHStack(self):
|
def testHStack(self):
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
p1 = array_ops.placeholder(dtypes.float32, shape=[4, 4])
|
p1 = array_ops.placeholder(dtypes.float32, shape=[4, 4])
|
||||||
p2 = array_ops.placeholder(dtypes.float32, shape=[4, 4])
|
p2 = array_ops.placeholder(dtypes.float32, shape=[4, 4])
|
||||||
c = array_ops.concat([p1, p2], 0)
|
c = array_ops.concat([p1, p2], 0)
|
||||||
@ -54,7 +54,7 @@ class ConcatOpTest(test.TestCase):
|
|||||||
|
|
||||||
@test_util.run_deprecated_v1
|
@test_util.run_deprecated_v1
|
||||||
def testVStack(self):
|
def testVStack(self):
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
p1 = array_ops.placeholder(dtypes.float32, shape=[4, 4])
|
p1 = array_ops.placeholder(dtypes.float32, shape=[4, 4])
|
||||||
p2 = array_ops.placeholder(dtypes.float32, shape=[4, 4])
|
p2 = array_ops.placeholder(dtypes.float32, shape=[4, 4])
|
||||||
c = array_ops.concat([p1, p2], 1)
|
c = array_ops.concat([p1, p2], 1)
|
||||||
@ -70,7 +70,7 @@ class ConcatOpTest(test.TestCase):
|
|||||||
|
|
||||||
@test_util.run_deprecated_v1
|
@test_util.run_deprecated_v1
|
||||||
def test4DStack(self):
|
def test4DStack(self):
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
p1 = array_ops.placeholder(dtypes.float32, shape=[2, 3, 1, 1])
|
p1 = array_ops.placeholder(dtypes.float32, shape=[2, 3, 1, 1])
|
||||||
p2 = array_ops.placeholder(dtypes.float32, shape=[2, 3, 4, 1])
|
p2 = array_ops.placeholder(dtypes.float32, shape=[2, 3, 4, 1])
|
||||||
c = array_ops.concat([p1, p2], 2)
|
c = array_ops.concat([p1, p2], 2)
|
||||||
@ -121,7 +121,7 @@ class ConcatOpTest(test.TestCase):
|
|||||||
dtype_feed = dtypes.float32
|
dtype_feed = dtypes.float32
|
||||||
else:
|
else:
|
||||||
dtype_feed = dtype
|
dtype_feed = dtype
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
p = []
|
p = []
|
||||||
for i in np.arange(num_tensors):
|
for i in np.arange(num_tensors):
|
||||||
input_shape = shape
|
input_shape = shape
|
||||||
@ -315,7 +315,7 @@ class ConcatOpTest(test.TestCase):
|
|||||||
|
|
||||||
@test_util.run_deprecated_v1
|
@test_util.run_deprecated_v1
|
||||||
def testGradientWithUnknownInputDim(self):
|
def testGradientWithUnknownInputDim(self):
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
x = array_ops.placeholder(dtypes.float32)
|
x = array_ops.placeholder(dtypes.float32)
|
||||||
y = array_ops.placeholder(dtypes.float32)
|
y = array_ops.placeholder(dtypes.float32)
|
||||||
c = array_ops.concat([x, y], 2)
|
c = array_ops.concat([x, y], 2)
|
||||||
@ -526,7 +526,7 @@ class ConcatOpTest(test.TestCase):
|
|||||||
# shared memory is not large for all the inputs
|
# shared memory is not large for all the inputs
|
||||||
@test_util.run_deprecated_v1
|
@test_util.run_deprecated_v1
|
||||||
def testConcatLargeNumberOfTensors(self):
|
def testConcatLargeNumberOfTensors(self):
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
for concat_dim in range(2):
|
for concat_dim in range(2):
|
||||||
params = {}
|
params = {}
|
||||||
p = []
|
p = []
|
||||||
|
@ -54,7 +54,7 @@ class ConstantTest(test.TestCase):
|
|||||||
|
|
||||||
def _testGpu(self, x):
|
def _testGpu(self, x):
|
||||||
np_ans = np.array(x)
|
np_ans = np.array(x)
|
||||||
with self.cached_session(use_gpu=True):
|
with self.cached_session():
|
||||||
tf_ans = ops.convert_to_tensor(x).eval()
|
tf_ans = ops.convert_to_tensor(x).eval()
|
||||||
dtype = dtypes_lib.as_dtype(np_ans.dtype)
|
dtype = dtypes_lib.as_dtype(np_ans.dtype)
|
||||||
if dtype.is_floating or dtype.is_complex:
|
if dtype.is_floating or dtype.is_complex:
|
||||||
|
@ -557,7 +557,7 @@ class ControlFlowTest(test.TestCase, parameterized.TestCase):
|
|||||||
|
|
||||||
@test_util.run_v1_only("b/120545219")
|
@test_util.run_v1_only("b/120545219")
|
||||||
def testCondColocation(self):
|
def testCondColocation(self):
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
with ops.device("/cpu:0"):
|
with ops.device("/cpu:0"):
|
||||||
v = variables.Variable(7.0)
|
v = variables.Variable(7.0)
|
||||||
|
|
||||||
@ -1224,7 +1224,7 @@ class ControlFlowTest(test.TestCase, parameterized.TestCase):
|
|||||||
def testCondGradMultiDevice(self):
|
def testCondGradMultiDevice(self):
|
||||||
config = config_pb2.ConfigProto(device_count={"CPU": 2},
|
config = config_pb2.ConfigProto(device_count={"CPU": 2},
|
||||||
allow_soft_placement=True)
|
allow_soft_placement=True)
|
||||||
with self.cached_session(use_gpu=True, config=config) as sess:
|
with self.cached_session(config=config) as sess:
|
||||||
pred = array_ops.placeholder(dtypes.bool, [])
|
pred = array_ops.placeholder(dtypes.bool, [])
|
||||||
x = array_ops.placeholder(dtypes.float32)
|
x = array_ops.placeholder(dtypes.float32)
|
||||||
y = array_ops.placeholder(dtypes.float32)
|
y = array_ops.placeholder(dtypes.float32)
|
||||||
@ -2621,7 +2621,7 @@ class ControlFlowTest(test.TestCase, parameterized.TestCase):
|
|||||||
def testWhileCondGradMultiDevice(self):
|
def testWhileCondGradMultiDevice(self):
|
||||||
config = config_pb2.ConfigProto(device_count={"CPU": 2},
|
config = config_pb2.ConfigProto(device_count={"CPU": 2},
|
||||||
allow_soft_placement=True)
|
allow_soft_placement=True)
|
||||||
with self.cached_session(use_gpu=True, config=config) as sess:
|
with self.cached_session(config=config) as sess:
|
||||||
pred = array_ops.placeholder(dtypes.bool, [])
|
pred = array_ops.placeholder(dtypes.bool, [])
|
||||||
x_init = constant_op.constant(1.0)
|
x_init = constant_op.constant(1.0)
|
||||||
|
|
||||||
@ -4911,7 +4911,7 @@ class AssertTest(test.TestCase):
|
|||||||
if test_util.is_gpu_available():
|
if test_util.is_gpu_available():
|
||||||
self.skipTest("b/128646478 fails in opensource")
|
self.skipTest("b/128646478 fails in opensource")
|
||||||
|
|
||||||
with self.session(use_gpu=True) as sess:
|
with self.session() as sess:
|
||||||
with ops.device(test.gpu_device_name()):
|
with ops.device(test.gpu_device_name()):
|
||||||
value = constant_op.constant(1.0)
|
value = constant_op.constant(1.0)
|
||||||
with ops.device("/cpu:0"):
|
with ops.device("/cpu:0"):
|
||||||
|
@ -153,7 +153,7 @@ class Conv1DTransposeTest(test.TestCase):
|
|||||||
def testConv1DTransposeSingleStrideNCW(self):
|
def testConv1DTransposeSingleStrideNCW(self):
|
||||||
# `NCW` data format is only supported for CUDA device.
|
# `NCW` data format is only supported for CUDA device.
|
||||||
if test.is_gpu_available(cuda_only=True):
|
if test.is_gpu_available(cuda_only=True):
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
strides = [1, 1, 1]
|
strides = [1, 1, 1]
|
||||||
|
|
||||||
# Input, output: [batch, depth, width]
|
# Input, output: [batch, depth, width]
|
||||||
@ -184,7 +184,7 @@ class Conv1DTransposeTest(test.TestCase):
|
|||||||
def testConv1DTransposeSameNCW(self):
|
def testConv1DTransposeSameNCW(self):
|
||||||
# `NCW` data format is only supported for CUDA device.
|
# `NCW` data format is only supported for CUDA device.
|
||||||
if test.is_gpu_available(cuda_only=True):
|
if test.is_gpu_available(cuda_only=True):
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
strides = [1, 1, 2]
|
strides = [1, 1, 2]
|
||||||
|
|
||||||
# Input, output: [batch, depth, width]
|
# Input, output: [batch, depth, width]
|
||||||
@ -216,7 +216,7 @@ class Conv1DTransposeTest(test.TestCase):
|
|||||||
def testConv1DTransposeValidNCW(self):
|
def testConv1DTransposeValidNCW(self):
|
||||||
# `NCW` data format is only supported for CUDA device.
|
# `NCW` data format is only supported for CUDA device.
|
||||||
if test.is_gpu_available(cuda_only=True):
|
if test.is_gpu_available(cuda_only=True):
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
strides = [1, 1, 2]
|
strides = [1, 1, 2]
|
||||||
|
|
||||||
# Input, output: [batch, depth, width]
|
# Input, output: [batch, depth, width]
|
||||||
|
@ -77,7 +77,7 @@ class Conv2DBackpropFilterGradTest(test.TestCase):
|
|||||||
@test_util.run_deprecated_v1
|
@test_util.run_deprecated_v1
|
||||||
def testGradientDilatedConv(self):
|
def testGradientDilatedConv(self):
|
||||||
if test.is_gpu_available(cuda_only=True):
|
if test.is_gpu_available(cuda_only=True):
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
for padding in [
|
for padding in [
|
||||||
"SAME",
|
"SAME",
|
||||||
"VALID",
|
"VALID",
|
||||||
|
@ -186,7 +186,7 @@ class Conv2DTransposeTest(test.TestCase):
|
|||||||
def testConv2DTransposeSingleStrideNCHW(self):
|
def testConv2DTransposeSingleStrideNCHW(self):
|
||||||
# `NCHW` data format is only supported for CUDA device.
|
# `NCHW` data format is only supported for CUDA device.
|
||||||
if test.is_gpu_available(cuda_only=True):
|
if test.is_gpu_available(cuda_only=True):
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
strides = [1, 1, 1, 1]
|
strides = [1, 1, 1, 1]
|
||||||
|
|
||||||
# Input, output: [batch, depth, height, width, depth]
|
# Input, output: [batch, depth, height, width, depth]
|
||||||
@ -221,7 +221,7 @@ class Conv2DTransposeTest(test.TestCase):
|
|||||||
def testConv2DTransposeSameNCHW(self):
|
def testConv2DTransposeSameNCHW(self):
|
||||||
# `NCHW` data format is only supported for CUDA device.
|
# `NCHW` data format is only supported for CUDA device.
|
||||||
if test.is_gpu_available(cuda_only=True):
|
if test.is_gpu_available(cuda_only=True):
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
strides = [1, 1, 2, 2]
|
strides = [1, 1, 2, 2]
|
||||||
|
|
||||||
# Input, output: [batch, depth, height, width]
|
# Input, output: [batch, depth, height, width]
|
||||||
@ -257,7 +257,7 @@ class Conv2DTransposeTest(test.TestCase):
|
|||||||
def testConv2DTransposeValidNCHW(self):
|
def testConv2DTransposeValidNCHW(self):
|
||||||
# `NCHW` data format is only supported for CUDA device.
|
# `NCHW` data format is only supported for CUDA device.
|
||||||
if test.is_gpu_available(cuda_only=True):
|
if test.is_gpu_available(cuda_only=True):
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
strides = [1, 1, 2, 2]
|
strides = [1, 1, 2, 2]
|
||||||
|
|
||||||
# Input, output: [batch, depth, height, width]
|
# Input, output: [batch, depth, height, width]
|
||||||
|
@ -2787,7 +2787,7 @@ class SeparableConv2DTest(test.TestCase):
|
|||||||
expected: An array containing the expected operation outputs.
|
expected: An array containing the expected operation outputs.
|
||||||
data_format: string data format for input tensor.
|
data_format: string data format for input tensor.
|
||||||
"""
|
"""
|
||||||
with self.cached_session(use_gpu=True) as sess:
|
with self.cached_session() as sess:
|
||||||
t1 = self._InitValues(tensor_in_sizes)
|
t1 = self._InitValues(tensor_in_sizes)
|
||||||
f1 = self._InitValues(depthwise_filter_in_sizes)
|
f1 = self._InitValues(depthwise_filter_in_sizes)
|
||||||
f1.set_shape(depthwise_filter_in_sizes)
|
f1.set_shape(depthwise_filter_in_sizes)
|
||||||
@ -2899,7 +2899,7 @@ class SeparableConv2DTest(test.TestCase):
|
|||||||
depthwise_filter_in_sizes = [2, 2, 2, 3]
|
depthwise_filter_in_sizes = [2, 2, 2, 3]
|
||||||
pointwise_filter_in_sizes = [1, 1, 6, 7]
|
pointwise_filter_in_sizes = [1, 1, 6, 7]
|
||||||
padding = [[0, 0], [1, 2], [3, 4], [0, 0]]
|
padding = [[0, 0], [1, 2], [3, 4], [0, 0]]
|
||||||
with self.cached_session(use_gpu=True):
|
with self.cached_session():
|
||||||
# Compute the 'expected' values by manually padding before calling
|
# Compute the 'expected' values by manually padding before calling
|
||||||
# separable_conv2d
|
# separable_conv2d
|
||||||
t1 = self._InitValues(tensor_in_sizes)
|
t1 = self._InitValues(tensor_in_sizes)
|
||||||
|
@ -37,7 +37,7 @@ class DecodeImageOpTest(test.TestCase):
|
|||||||
def testBmp(self):
|
def testBmp(self):
|
||||||
# Read a real bmp and verify shape
|
# Read a real bmp and verify shape
|
||||||
path = os.path.join(prefix_path, "bmp", "testdata", "lena.bmp")
|
path = os.path.join(prefix_path, "bmp", "testdata", "lena.bmp")
|
||||||
with self.session(use_gpu=True) as sess:
|
with self.session() as sess:
|
||||||
bmp0 = io_ops.read_file(path)
|
bmp0 = io_ops.read_file(path)
|
||||||
image0 = image_ops.decode_image(bmp0)
|
image0 = image_ops.decode_image(bmp0)
|
||||||
image1 = image_ops.decode_bmp(bmp0)
|
image1 = image_ops.decode_bmp(bmp0)
|
||||||
@ -53,7 +53,7 @@ class DecodeImageOpTest(test.TestCase):
|
|||||||
stride = 5
|
stride = 5
|
||||||
shape = (12, height, width, 3)
|
shape = (12, height, width, 3)
|
||||||
|
|
||||||
with self.session(use_gpu=True) as sess:
|
with self.session() as sess:
|
||||||
gif0 = io_ops.read_file(path)
|
gif0 = io_ops.read_file(path)
|
||||||
image0 = image_ops.decode_image(gif0)
|
image0 = image_ops.decode_image(gif0)
|
||||||
image1 = image_ops.decode_gif(gif0)
|
image1 = image_ops.decode_gif(gif0)
|
||||||
@ -82,7 +82,7 @@ class DecodeImageOpTest(test.TestCase):
|
|||||||
def testJpeg(self):
|
def testJpeg(self):
|
||||||
# Read a real jpeg and verify shape
|
# Read a real jpeg and verify shape
|
||||||
path = os.path.join(prefix_path, "jpeg", "testdata", "jpeg_merge_test1.jpg")
|
path = os.path.join(prefix_path, "jpeg", "testdata", "jpeg_merge_test1.jpg")
|
||||||
with self.session(use_gpu=True) as sess:
|
with self.session() as sess:
|
||||||
jpeg0 = io_ops.read_file(path)
|
jpeg0 = io_ops.read_file(path)
|
||||||
image0 = image_ops.decode_image(jpeg0)
|
image0 = image_ops.decode_image(jpeg0)
|
||||||
image1 = image_ops.decode_jpeg(jpeg0)
|
image1 = image_ops.decode_jpeg(jpeg0)
|
||||||
@ -100,7 +100,7 @@ class DecodeImageOpTest(test.TestCase):
|
|||||||
inputs = [(1, "lena_gray.png")]
|
inputs = [(1, "lena_gray.png")]
|
||||||
for channels_in, filename in inputs:
|
for channels_in, filename in inputs:
|
||||||
for channels in 0, 1, 3, 4:
|
for channels in 0, 1, 3, 4:
|
||||||
with self.cached_session(use_gpu=True) as sess:
|
with self.cached_session() as sess:
|
||||||
path = os.path.join(prefix_path, "png", "testdata", filename)
|
path = os.path.join(prefix_path, "png", "testdata", filename)
|
||||||
png0 = io_ops.read_file(path)
|
png0 = io_ops.read_file(path)
|
||||||
image0 = image_ops.decode_image(png0, channels=channels)
|
image0 = image_ops.decode_image(png0, channels=channels)
|
||||||
|
@ -56,7 +56,7 @@ class DepthToSpaceTest(test.TestCase):
|
|||||||
self.evaluate(output_nhwc)
|
self.evaluate(output_nhwc)
|
||||||
|
|
||||||
if test.is_gpu_available():
|
if test.is_gpu_available():
|
||||||
with self.cached_session(use_gpu=True):
|
with self.cached_session():
|
||||||
# test NHWC (default) on GPU
|
# test NHWC (default) on GPU
|
||||||
x_tf = array_ops.depth_to_space(input_nhwc, block_size)
|
x_tf = array_ops.depth_to_space(input_nhwc, block_size)
|
||||||
self.assertAllEqual(x_tf, outputs)
|
self.assertAllEqual(x_tf, outputs)
|
||||||
@ -126,7 +126,7 @@ class DepthToSpaceTest(test.TestCase):
|
|||||||
self.assertAllEqual(x_tf.shape, x_out.shape)
|
self.assertAllEqual(x_tf.shape, x_out.shape)
|
||||||
self.evaluate(x_tf)
|
self.evaluate(x_tf)
|
||||||
if test.is_gpu_available():
|
if test.is_gpu_available():
|
||||||
with self.cached_session(use_gpu=True):
|
with self.cached_session():
|
||||||
# test NHWC (default) on GPU
|
# test NHWC (default) on GPU
|
||||||
x_tf = array_ops.depth_to_space(input_nhwc, block_size)
|
x_tf = array_ops.depth_to_space(input_nhwc, block_size)
|
||||||
self.assertAllEqual(x_tf.shape, x_out.shape)
|
self.assertAllEqual(x_tf.shape, x_out.shape)
|
||||||
@ -343,7 +343,7 @@ class DepthToSpaceGradientTest(test.TestCase):
|
|||||||
return
|
return
|
||||||
|
|
||||||
assert 4 == x.ndim
|
assert 4 == x.ndim
|
||||||
with self.cached_session(use_gpu=True):
|
with self.cached_session():
|
||||||
tf_x = ops.convert_to_tensor(x)
|
tf_x = ops.convert_to_tensor(x)
|
||||||
tf_y = array_ops.depth_to_space(tf_x, block_size, data_format=data_format)
|
tf_y = array_ops.depth_to_space(tf_x, block_size, data_format=data_format)
|
||||||
|
|
||||||
|
@ -425,7 +425,7 @@ class DepthwiseConv2DTest(test.TestCase):
|
|||||||
# GitHub issue 22110.
|
# GitHub issue 22110.
|
||||||
if not test.is_gpu_available():
|
if not test.is_gpu_available():
|
||||||
return
|
return
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
x = array_ops.placeholder(dtypes.float32)
|
x = array_ops.placeholder(dtypes.float32)
|
||||||
f = np.ones([1, 1, 1, 1], np.float32)
|
f = np.ones([1, 1, 1, 1], np.float32)
|
||||||
v = nn_impl.depthwise_conv2d(
|
v = nn_impl.depthwise_conv2d(
|
||||||
|
@ -154,7 +154,7 @@ class DeterminantOpTest(test.TestCase):
|
|||||||
|
|
||||||
@test_util.run_v1_only("b/120545219")
|
@test_util.run_v1_only("b/120545219")
|
||||||
def testConcurrentExecutesWithoutError(self):
|
def testConcurrentExecutesWithoutError(self):
|
||||||
with self.session(use_gpu=True) as sess:
|
with self.session() as sess:
|
||||||
matrix1 = random_ops.random_normal([5, 5], seed=42)
|
matrix1 = random_ops.random_normal([5, 5], seed=42)
|
||||||
matrix2 = random_ops.random_normal([5, 5], seed=42)
|
matrix2 = random_ops.random_normal([5, 5], seed=42)
|
||||||
det1 = linalg_ops.matrix_determinant(matrix1)
|
det1 = linalg_ops.matrix_determinant(matrix1)
|
||||||
|
@ -374,7 +374,7 @@ class MatrixDiagTest(test.TestCase):
|
|||||||
|
|
||||||
@test_util.run_deprecated_v1
|
@test_util.run_deprecated_v1
|
||||||
def testVector(self):
|
def testVector(self):
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
v = np.array([1.0, 2.0, 3.0])
|
v = np.array([1.0, 2.0, 3.0])
|
||||||
mat = np.diag(v)
|
mat = np.diag(v)
|
||||||
v_diag = array_ops.matrix_diag(v)
|
v_diag = array_ops.matrix_diag(v)
|
||||||
@ -397,7 +397,7 @@ class MatrixDiagTest(test.TestCase):
|
|||||||
self.assertAllEqual(v_diags, solution[0])
|
self.assertAllEqual(v_diags, solution[0])
|
||||||
|
|
||||||
def _testVectorBatch(self, dtype):
|
def _testVectorBatch(self, dtype):
|
||||||
with self.cached_session(use_gpu=True):
|
with self.cached_session():
|
||||||
v_batch = np.array([[1.0, 0.0, 3.0], [4.0, 5.0, 6.0]]).astype(dtype)
|
v_batch = np.array([[1.0, 0.0, 3.0], [4.0, 5.0, 6.0]]).astype(dtype)
|
||||||
mat_batch = np.array([[[1.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 3.0]],
|
mat_batch = np.array([[[1.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 3.0]],
|
||||||
[[4.0, 0.0, 0.0], [0.0, 5.0, 0.0],
|
[[4.0, 0.0, 0.0], [0.0, 5.0, 0.0],
|
||||||
@ -441,7 +441,7 @@ class MatrixDiagTest(test.TestCase):
|
|||||||
|
|
||||||
@test_util.run_deprecated_v1
|
@test_util.run_deprecated_v1
|
||||||
def testRectangularBatch(self):
|
def testRectangularBatch(self):
|
||||||
with self.cached_session(use_gpu=True):
|
with self.cached_session():
|
||||||
# Stores expected num_rows and num_cols (when the other is given).
|
# Stores expected num_rows and num_cols (when the other is given).
|
||||||
# expected[d_lower, d_upper] = (expected_num_rows, expected_num_cols)
|
# expected[d_lower, d_upper] = (expected_num_rows, expected_num_cols)
|
||||||
test_list = list()
|
test_list = list()
|
||||||
@ -542,7 +542,7 @@ class MatrixDiagTest(test.TestCase):
|
|||||||
|
|
||||||
@test_util.run_deprecated_v1
|
@test_util.run_deprecated_v1
|
||||||
def testInvalidShapeAtEval(self):
|
def testInvalidShapeAtEval(self):
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
v = array_ops.placeholder(dtype=dtypes_lib.float32)
|
v = array_ops.placeholder(dtype=dtypes_lib.float32)
|
||||||
with self.assertRaisesOpError("diagonal must be at least 1-dim"):
|
with self.assertRaisesOpError("diagonal must be at least 1-dim"):
|
||||||
array_ops.matrix_diag(v).eval(feed_dict={v: 0.0})
|
array_ops.matrix_diag(v).eval(feed_dict={v: 0.0})
|
||||||
@ -550,7 +550,7 @@ class MatrixDiagTest(test.TestCase):
|
|||||||
@test_util.run_deprecated_v1
|
@test_util.run_deprecated_v1
|
||||||
def testGrad(self):
|
def testGrad(self):
|
||||||
shapes = ((3,), (7, 4))
|
shapes = ((3,), (7, 4))
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
for shape in shapes:
|
for shape in shapes:
|
||||||
x = constant_op.constant(np.random.rand(*shape), np.float32)
|
x = constant_op.constant(np.random.rand(*shape), np.float32)
|
||||||
y = array_ops.matrix_diag(x)
|
y = array_ops.matrix_diag(x)
|
||||||
@ -564,7 +564,7 @@ class MatrixDiagTest(test.TestCase):
|
|||||||
tests = dict() # tests[shape] = (d_lower, d_upper)
|
tests = dict() # tests[shape] = (d_lower, d_upper)
|
||||||
tests[(3,)] = (-1, -1)
|
tests[(3,)] = (-1, -1)
|
||||||
tests[(7, 3, 4)] = (-1, 1)
|
tests[(7, 3, 4)] = (-1, 1)
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
for shape, diags in tests.items():
|
for shape, diags in tests.items():
|
||||||
x = constant_op.constant(np.random.rand(*shape), np.float32)
|
x = constant_op.constant(np.random.rand(*shape), np.float32)
|
||||||
for align in alignment_list:
|
for align in alignment_list:
|
||||||
@ -580,7 +580,7 @@ class MatrixSetDiagTest(test.TestCase):
|
|||||||
|
|
||||||
@test_util.run_deprecated_v1
|
@test_util.run_deprecated_v1
|
||||||
def testSquare(self):
|
def testSquare(self):
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
v = np.array([1.0, 2.0, 3.0])
|
v = np.array([1.0, 2.0, 3.0])
|
||||||
mat = np.array([[0.0, 1.0, 0.0], [1.0, 0.0, 1.0], [1.0, 1.0, 1.0]])
|
mat = np.array([[0.0, 1.0, 0.0], [1.0, 0.0, 1.0], [1.0, 1.0, 1.0]])
|
||||||
mat_set_diag = np.array([[1.0, 1.0, 0.0], [1.0, 2.0, 1.0],
|
mat_set_diag = np.array([[1.0, 1.0, 0.0], [1.0, 2.0, 1.0],
|
||||||
@ -603,7 +603,7 @@ class MatrixSetDiagTest(test.TestCase):
|
|||||||
|
|
||||||
@test_util.run_deprecated_v1
|
@test_util.run_deprecated_v1
|
||||||
def testRectangular(self):
|
def testRectangular(self):
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
v = np.array([3.0, 4.0])
|
v = np.array([3.0, 4.0])
|
||||||
mat = np.array([[0.0, 1.0, 0.0], [1.0, 0.0, 1.0]])
|
mat = np.array([[0.0, 1.0, 0.0], [1.0, 0.0, 1.0]])
|
||||||
expected = np.array([[3.0, 1.0, 0.0], [1.0, 4.0, 1.0]])
|
expected = np.array([[3.0, 1.0, 0.0], [1.0, 4.0, 1.0]])
|
||||||
@ -631,7 +631,7 @@ class MatrixSetDiagTest(test.TestCase):
|
|||||||
self.assertAllEqual(output, solution)
|
self.assertAllEqual(output, solution)
|
||||||
|
|
||||||
def _testSquareBatch(self, dtype):
|
def _testSquareBatch(self, dtype):
|
||||||
with self.cached_session(use_gpu=True):
|
with self.cached_session():
|
||||||
v_batch = np.array([[-1.0, 0.0, -3.0], [-4.0, -5.0, -6.0]]).astype(dtype)
|
v_batch = np.array([[-1.0, 0.0, -3.0], [-4.0, -5.0, -6.0]]).astype(dtype)
|
||||||
mat_batch = np.array([[[1.0, 0.0, 3.0], [0.0, 2.0, 0.0], [1.0, 0.0, 3.0]],
|
mat_batch = np.array([[[1.0, 0.0, 3.0], [0.0, 2.0, 0.0], [1.0, 0.0, 3.0]],
|
||||||
[[4.0, 0.0, 4.0], [0.0, 5.0, 0.0],
|
[[4.0, 0.0, 4.0], [0.0, 5.0, 0.0],
|
||||||
@ -668,7 +668,7 @@ class MatrixSetDiagTest(test.TestCase):
|
|||||||
|
|
||||||
@test_util.run_deprecated_v1
|
@test_util.run_deprecated_v1
|
||||||
def testRectangularBatch(self):
|
def testRectangularBatch(self):
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
v_batch = np.array([[-1.0, -2.0], [-4.0, -5.0]])
|
v_batch = np.array([[-1.0, -2.0], [-4.0, -5.0]])
|
||||||
mat_batch = np.array([[[1.0, 0.0, 3.0], [0.0, 2.0, 0.0]],
|
mat_batch = np.array([[[1.0, 0.0, 3.0], [0.0, 2.0, 0.0]],
|
||||||
[[4.0, 0.0, 4.0], [0.0, 5.0, 0.0]]])
|
[[4.0, 0.0, 4.0], [0.0, 5.0, 0.0]]])
|
||||||
@ -701,7 +701,7 @@ class MatrixSetDiagTest(test.TestCase):
|
|||||||
|
|
||||||
@test_util.run_deprecated_v1
|
@test_util.run_deprecated_v1
|
||||||
def testInvalidShapeAtEval(self):
|
def testInvalidShapeAtEval(self):
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
v = array_ops.placeholder(dtype=dtypes_lib.float32)
|
v = array_ops.placeholder(dtype=dtypes_lib.float32)
|
||||||
with self.assertRaisesOpError("input must be at least 2-dim"):
|
with self.assertRaisesOpError("input must be at least 2-dim"):
|
||||||
array_ops.matrix_set_diag(v, [v]).eval(feed_dict={v: 0.0})
|
array_ops.matrix_set_diag(v, [v]).eval(feed_dict={v: 0.0})
|
||||||
@ -717,7 +717,7 @@ class MatrixSetDiagTest(test.TestCase):
|
|||||||
})
|
})
|
||||||
|
|
||||||
def _testGrad(self, input_shape, diag_shape, diags, align):
|
def _testGrad(self, input_shape, diag_shape, diags, align):
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
x = constant_op.constant(
|
x = constant_op.constant(
|
||||||
np.random.rand(*input_shape), dtype=dtypes_lib.float32)
|
np.random.rand(*input_shape), dtype=dtypes_lib.float32)
|
||||||
x_diag = constant_op.constant(
|
x_diag = constant_op.constant(
|
||||||
@ -751,7 +751,7 @@ class MatrixSetDiagTest(test.TestCase):
|
|||||||
|
|
||||||
@test_util.run_deprecated_v1
|
@test_util.run_deprecated_v1
|
||||||
def testGradWithNoShapeInformation(self):
|
def testGradWithNoShapeInformation(self):
|
||||||
with self.session(use_gpu=True) as sess:
|
with self.session() as sess:
|
||||||
v = array_ops.placeholder(dtype=dtypes_lib.float32)
|
v = array_ops.placeholder(dtype=dtypes_lib.float32)
|
||||||
mat = array_ops.placeholder(dtype=dtypes_lib.float32)
|
mat = array_ops.placeholder(dtype=dtypes_lib.float32)
|
||||||
grad_input = array_ops.placeholder(dtype=dtypes_lib.float32)
|
grad_input = array_ops.placeholder(dtype=dtypes_lib.float32)
|
||||||
@ -774,7 +774,7 @@ class MatrixDiagPartTest(test.TestCase):
|
|||||||
|
|
||||||
@test_util.run_deprecated_v1
|
@test_util.run_deprecated_v1
|
||||||
def testSquare(self):
|
def testSquare(self):
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
v = np.array([1.0, 2.0, 3.0])
|
v = np.array([1.0, 2.0, 3.0])
|
||||||
mat = np.diag(v)
|
mat = np.diag(v)
|
||||||
mat_diag = array_ops.matrix_diag_part(mat)
|
mat_diag = array_ops.matrix_diag_part(mat)
|
||||||
@ -798,7 +798,7 @@ class MatrixDiagPartTest(test.TestCase):
|
|||||||
|
|
||||||
@test_util.run_deprecated_v1
|
@test_util.run_deprecated_v1
|
||||||
def testRectangular(self):
|
def testRectangular(self):
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
mat = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
|
mat = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
|
||||||
mat_diag = array_ops.matrix_diag_part(mat)
|
mat_diag = array_ops.matrix_diag_part(mat)
|
||||||
self.assertAllEqual(mat_diag, np.array([1.0, 5.0]))
|
self.assertAllEqual(mat_diag, np.array([1.0, 5.0]))
|
||||||
@ -817,7 +817,7 @@ class MatrixDiagPartTest(test.TestCase):
|
|||||||
self.assertAllEqual(mat_diag, solution[0])
|
self.assertAllEqual(mat_diag, solution[0])
|
||||||
|
|
||||||
def _testSquareBatch(self, dtype):
|
def _testSquareBatch(self, dtype):
|
||||||
with self.cached_session(use_gpu=True):
|
with self.cached_session():
|
||||||
v_batch = np.array([[1.0, 0.0, 3.0], [4.0, 5.0, 6.0]]).astype(dtype)
|
v_batch = np.array([[1.0, 0.0, 3.0], [4.0, 5.0, 6.0]]).astype(dtype)
|
||||||
mat_batch = np.array([[[1.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 3.0]],
|
mat_batch = np.array([[[1.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 3.0]],
|
||||||
[[4.0, 0.0, 0.0], [0.0, 5.0, 0.0],
|
[[4.0, 0.0, 0.0], [0.0, 5.0, 0.0],
|
||||||
@ -853,7 +853,7 @@ class MatrixDiagPartTest(test.TestCase):
|
|||||||
|
|
||||||
@test_util.run_deprecated_v1
|
@test_util.run_deprecated_v1
|
||||||
def testRectangularBatch(self):
|
def testRectangularBatch(self):
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
v_batch = np.array([[1.0, 2.0], [4.0, 5.0]])
|
v_batch = np.array([[1.0, 2.0], [4.0, 5.0]])
|
||||||
mat_batch = np.array([[[1.0, 0.0, 0.0], [0.0, 2.0, 0.0]],
|
mat_batch = np.array([[[1.0, 0.0, 0.0], [0.0, 2.0, 0.0]],
|
||||||
[[4.0, 0.0, 0.0], [0.0, 5.0, 0.0]]])
|
[[4.0, 0.0, 0.0], [0.0, 5.0, 0.0]]])
|
||||||
@ -880,7 +880,7 @@ class MatrixDiagPartTest(test.TestCase):
|
|||||||
matrix = array_ops.placeholder(dtypes_lib.int32, shape=[None, None])
|
matrix = array_ops.placeholder(dtypes_lib.int32, shape=[None, None])
|
||||||
result = array_ops.matrix_diag_part(matrix, k=-1)
|
result = array_ops.matrix_diag_part(matrix, k=-1)
|
||||||
input_matrix = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
|
input_matrix = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
result_eval = result.eval(feed_dict={matrix: input_matrix})
|
result_eval = result.eval(feed_dict={matrix: input_matrix})
|
||||||
self.assertAllEqual([4, 8], result_eval)
|
self.assertAllEqual([4, 8], result_eval)
|
||||||
|
|
||||||
@ -891,7 +891,7 @@ class MatrixDiagPartTest(test.TestCase):
|
|||||||
|
|
||||||
@test_util.run_deprecated_v1
|
@test_util.run_deprecated_v1
|
||||||
def testInvalidShapeAtEval(self):
|
def testInvalidShapeAtEval(self):
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
v = array_ops.placeholder(dtype=dtypes_lib.float32)
|
v = array_ops.placeholder(dtype=dtypes_lib.float32)
|
||||||
with self.assertRaisesOpError("input must be at least 2-dim"):
|
with self.assertRaisesOpError("input must be at least 2-dim"):
|
||||||
array_ops.matrix_diag_part(v).eval(feed_dict={v: 0.0})
|
array_ops.matrix_diag_part(v).eval(feed_dict={v: 0.0})
|
||||||
@ -899,7 +899,7 @@ class MatrixDiagPartTest(test.TestCase):
|
|||||||
@test_util.run_deprecated_v1
|
@test_util.run_deprecated_v1
|
||||||
def testGrad(self):
|
def testGrad(self):
|
||||||
shapes = ((3, 3), (2, 3), (3, 2), (5, 3, 3))
|
shapes = ((3, 3), (2, 3), (3, 2), (5, 3, 3))
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
for shape in shapes:
|
for shape in shapes:
|
||||||
x = constant_op.constant(np.random.rand(*shape), dtype=np.float32)
|
x = constant_op.constant(np.random.rand(*shape), dtype=np.float32)
|
||||||
y = array_ops.matrix_diag_part(x)
|
y = array_ops.matrix_diag_part(x)
|
||||||
@ -913,7 +913,7 @@ class MatrixDiagPartTest(test.TestCase):
|
|||||||
tests = dict() # tests[shape] = (d_lower, d_upper)
|
tests = dict() # tests[shape] = (d_lower, d_upper)
|
||||||
tests[(3, 3)] = (-1, -1)
|
tests[(3, 3)] = (-1, -1)
|
||||||
tests[(7, 3, 4)] = (-1, 1)
|
tests[(7, 3, 4)] = (-1, 1)
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
for align in alignment_list:
|
for align in alignment_list:
|
||||||
for shape, diags in tests.items():
|
for shape, diags in tests.items():
|
||||||
x = constant_op.constant(np.random.rand(*shape), np.float32)
|
x = constant_op.constant(np.random.rand(*shape), np.float32)
|
||||||
|
@ -39,7 +39,7 @@ class DynamicPartitionTest(test.TestCase):
|
|||||||
|
|
||||||
@test_util.run_deprecated_v1
|
@test_util.run_deprecated_v1
|
||||||
def testSimpleOneDimensional(self):
|
def testSimpleOneDimensional(self):
|
||||||
with self.session(use_gpu=True) as sess:
|
with self.session() as sess:
|
||||||
data = constant_op.constant([0, 13, 2, 39, 4, 17], dtype=dtypes.float32)
|
data = constant_op.constant([0, 13, 2, 39, 4, 17], dtype=dtypes.float32)
|
||||||
indices = constant_op.constant([0, 0, 2, 3, 2, 1])
|
indices = constant_op.constant([0, 0, 2, 3, 2, 1])
|
||||||
partitions = data_flow_ops.dynamic_partition(
|
partitions = data_flow_ops.dynamic_partition(
|
||||||
@ -60,7 +60,7 @@ class DynamicPartitionTest(test.TestCase):
|
|||||||
|
|
||||||
@test_util.run_deprecated_v1
|
@test_util.run_deprecated_v1
|
||||||
def testSimpleTwoDimensional(self):
|
def testSimpleTwoDimensional(self):
|
||||||
with self.session(use_gpu=True) as sess:
|
with self.session() as sess:
|
||||||
data = constant_op.constant([[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 10, 11],
|
data = constant_op.constant([[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 10, 11],
|
||||||
[12, 13, 14], [15, 16, 17]],
|
[12, 13, 14], [15, 16, 17]],
|
||||||
dtype=dtypes.float32)
|
dtype=dtypes.float32)
|
||||||
@ -87,7 +87,7 @@ class DynamicPartitionTest(test.TestCase):
|
|||||||
indices_list = [x % 2 for x in range(num)]
|
indices_list = [x % 2 for x in range(num)]
|
||||||
part1 = [x for x in range(num) if x % 2 == 0]
|
part1 = [x for x in range(num) if x % 2 == 0]
|
||||||
part2 = [x for x in range(num) if x % 2 == 1]
|
part2 = [x for x in range(num) if x % 2 == 1]
|
||||||
with self.session(use_gpu=True) as sess:
|
with self.session() as sess:
|
||||||
data = constant_op.constant(data_list, dtype=dtypes.float32)
|
data = constant_op.constant(data_list, dtype=dtypes.float32)
|
||||||
indices = constant_op.constant(indices_list, dtype=dtypes.int32)
|
indices = constant_op.constant(indices_list, dtype=dtypes.int32)
|
||||||
partitions = data_flow_ops.dynamic_partition(
|
partitions = data_flow_ops.dynamic_partition(
|
||||||
@ -109,7 +109,7 @@ class DynamicPartitionTest(test.TestCase):
|
|||||||
parts = [[] for _ in range(num_partitions)]
|
parts = [[] for _ in range(num_partitions)]
|
||||||
for i in range(rows):
|
for i in range(rows):
|
||||||
parts[(i ** 2) % num_partitions].append(data_list[i])
|
parts[(i ** 2) % num_partitions].append(data_list[i])
|
||||||
with self.session(use_gpu=True) as sess:
|
with self.session() as sess:
|
||||||
data = constant_op.constant(data_list, dtype=dtypes.float32)
|
data = constant_op.constant(data_list, dtype=dtypes.float32)
|
||||||
indices = constant_op.constant(indices_list, dtype=dtypes.int32)
|
indices = constant_op.constant(indices_list, dtype=dtypes.int32)
|
||||||
partitions = data_flow_ops.dynamic_partition(
|
partitions = data_flow_ops.dynamic_partition(
|
||||||
@ -125,7 +125,7 @@ class DynamicPartitionTest(test.TestCase):
|
|||||||
def testSimpleComplex(self):
|
def testSimpleComplex(self):
|
||||||
data_list = [1 + 2j, 3 + 4j, 5 + 6j, 7 + 8j]
|
data_list = [1 + 2j, 3 + 4j, 5 + 6j, 7 + 8j]
|
||||||
indices_list = [1, 0, 1, 0]
|
indices_list = [1, 0, 1, 0]
|
||||||
with self.session(use_gpu=True) as sess:
|
with self.session() as sess:
|
||||||
data = constant_op.constant(data_list, dtype=dtypes.complex64)
|
data = constant_op.constant(data_list, dtype=dtypes.complex64)
|
||||||
indices = constant_op.constant(indices_list, dtype=dtypes.int32)
|
indices = constant_op.constant(indices_list, dtype=dtypes.int32)
|
||||||
partitions = data_flow_ops.dynamic_partition(
|
partitions = data_flow_ops.dynamic_partition(
|
||||||
@ -138,7 +138,7 @@ class DynamicPartitionTest(test.TestCase):
|
|||||||
|
|
||||||
def testScalarPartitions(self):
|
def testScalarPartitions(self):
|
||||||
data_list = [10, 13, 12, 11]
|
data_list = [10, 13, 12, 11]
|
||||||
with self.session(use_gpu=True) as sess:
|
with self.session() as sess:
|
||||||
data = constant_op.constant(data_list, dtype=dtypes.float64)
|
data = constant_op.constant(data_list, dtype=dtypes.float64)
|
||||||
indices = 3
|
indices = 3
|
||||||
partitions = data_flow_ops.dynamic_partition(
|
partitions = data_flow_ops.dynamic_partition(
|
||||||
@ -159,7 +159,7 @@ class DynamicPartitionTest(test.TestCase):
|
|||||||
@test_util.run_deprecated_v1
|
@test_util.run_deprecated_v1
|
||||||
def testHigherRank(self):
|
def testHigherRank(self):
|
||||||
np.random.seed(7)
|
np.random.seed(7)
|
||||||
with self.session(use_gpu=True) as sess:
|
with self.session() as sess:
|
||||||
for n in 2, 3:
|
for n in 2, 3:
|
||||||
for shape in (4,), (4, 5), (4, 5, 2):
|
for shape in (4,), (4, 5), (4, 5, 2):
|
||||||
partitions = np.random.randint(n, size=np.prod(shape)).reshape(shape)
|
partitions = np.random.randint(n, size=np.prod(shape)).reshape(shape)
|
||||||
@ -184,7 +184,7 @@ class DynamicPartitionTest(test.TestCase):
|
|||||||
def testEmptyParts(self):
|
def testEmptyParts(self):
|
||||||
data_list = [1, 2, 3, 4]
|
data_list = [1, 2, 3, 4]
|
||||||
indices_list = [1, 3, 1, 3]
|
indices_list = [1, 3, 1, 3]
|
||||||
with self.session(use_gpu=True) as sess:
|
with self.session() as sess:
|
||||||
data = constant_op.constant(data_list, dtype=dtypes.float32)
|
data = constant_op.constant(data_list, dtype=dtypes.float32)
|
||||||
indices = constant_op.constant(indices_list, dtype=dtypes.int32)
|
indices = constant_op.constant(indices_list, dtype=dtypes.int32)
|
||||||
partitions = data_flow_ops.dynamic_partition(
|
partitions = data_flow_ops.dynamic_partition(
|
||||||
@ -200,7 +200,7 @@ class DynamicPartitionTest(test.TestCase):
|
|||||||
def testEmptyDataTwoDimensional(self):
|
def testEmptyDataTwoDimensional(self):
|
||||||
data_list = [[], []]
|
data_list = [[], []]
|
||||||
indices_list = [0, 1]
|
indices_list = [0, 1]
|
||||||
with self.session(use_gpu=True) as sess:
|
with self.session() as sess:
|
||||||
data = constant_op.constant(data_list, dtype=dtypes.float32)
|
data = constant_op.constant(data_list, dtype=dtypes.float32)
|
||||||
indices = constant_op.constant(indices_list, dtype=dtypes.int32)
|
indices = constant_op.constant(indices_list, dtype=dtypes.int32)
|
||||||
partitions = data_flow_ops.dynamic_partition(
|
partitions = data_flow_ops.dynamic_partition(
|
||||||
@ -216,7 +216,7 @@ class DynamicPartitionTest(test.TestCase):
|
|||||||
def testEmptyPartitions(self):
|
def testEmptyPartitions(self):
|
||||||
data_list = []
|
data_list = []
|
||||||
indices_list = []
|
indices_list = []
|
||||||
with self.session(use_gpu=True) as sess:
|
with self.session() as sess:
|
||||||
data = constant_op.constant(data_list, dtype=dtypes.float32)
|
data = constant_op.constant(data_list, dtype=dtypes.float32)
|
||||||
indices = constant_op.constant(indices_list, dtype=dtypes.int32)
|
indices = constant_op.constant(indices_list, dtype=dtypes.int32)
|
||||||
partitions = data_flow_ops.dynamic_partition(
|
partitions = data_flow_ops.dynamic_partition(
|
||||||
@ -237,7 +237,7 @@ class DynamicPartitionTest(test.TestCase):
|
|||||||
|
|
||||||
data_list = [1, 2, 3, 4, 5, 6]
|
data_list = [1, 2, 3, 4, 5, 6]
|
||||||
indices_list = [6, 5, 4, 3, 1, 0]
|
indices_list = [6, 5, 4, 3, 1, 0]
|
||||||
with self.session(use_gpu=True) as sess:
|
with self.session() as sess:
|
||||||
data = constant_op.constant(data_list, dtype=dtypes.float32)
|
data = constant_op.constant(data_list, dtype=dtypes.float32)
|
||||||
indices = constant_op.constant(indices_list, dtype=dtypes.int32)
|
indices = constant_op.constant(indices_list, dtype=dtypes.int32)
|
||||||
partitions = data_flow_ops.dynamic_partition(
|
partitions = data_flow_ops.dynamic_partition(
|
||||||
@ -258,7 +258,7 @@ class DynamicPartitionTest(test.TestCase):
|
|||||||
|
|
||||||
data_list = [1, 2, 3, 4, 5, 6]
|
data_list = [1, 2, 3, 4, 5, 6]
|
||||||
indices_list = [10, 11, 2, 12, 0, 1000]
|
indices_list = [10, 11, 2, 12, 0, 1000]
|
||||||
with self.session(use_gpu=True) as sess:
|
with self.session() as sess:
|
||||||
data = constant_op.constant(data_list, dtype=dtypes.float32)
|
data = constant_op.constant(data_list, dtype=dtypes.float32)
|
||||||
indices = constant_op.constant(indices_list, dtype=dtypes.int32)
|
indices = constant_op.constant(indices_list, dtype=dtypes.int32)
|
||||||
partitions = data_flow_ops.dynamic_partition(
|
partitions = data_flow_ops.dynamic_partition(
|
||||||
@ -282,7 +282,7 @@ class DynamicPartitionTest(test.TestCase):
|
|||||||
|
|
||||||
data_list = [1.1, 2.1, 3.1, 4.1, 5.1, 6.1]
|
data_list = [1.1, 2.1, 3.1, 4.1, 5.1, 6.1]
|
||||||
indices_list = [90, 70, 60, 100, 110, 40]
|
indices_list = [90, 70, 60, 100, 110, 40]
|
||||||
with self.session(use_gpu=True) as sess:
|
with self.session() as sess:
|
||||||
data = constant_op.constant(data_list, dtype=dtypes.float32)
|
data = constant_op.constant(data_list, dtype=dtypes.float32)
|
||||||
indices = constant_op.constant(indices_list, dtype=dtypes.int32)
|
indices = constant_op.constant(indices_list, dtype=dtypes.int32)
|
||||||
partitions = data_flow_ops.dynamic_partition(
|
partitions = data_flow_ops.dynamic_partition(
|
||||||
|
@ -55,7 +55,7 @@ class EigTest(test.TestCase):
|
|||||||
@test_util.run_deprecated_v1
|
@test_util.run_deprecated_v1
|
||||||
def testConcurrentExecutesWithoutError(self):
|
def testConcurrentExecutesWithoutError(self):
|
||||||
all_ops = []
|
all_ops = []
|
||||||
with self.session(use_gpu=True) as sess:
|
with self.session() as sess:
|
||||||
for compute_v_ in True, False:
|
for compute_v_ in True, False:
|
||||||
matrix1 = random_ops.random_normal([5, 5], seed=42)
|
matrix1 = random_ops.random_normal([5, 5], seed=42)
|
||||||
matrix2 = random_ops.random_normal([5, 5], seed=42)
|
matrix2 = random_ops.random_normal([5, 5], seed=42)
|
||||||
@ -84,7 +84,7 @@ class EigTest(test.TestCase):
|
|||||||
"self_adjoint_eig_fail_if_denorms_flushed.txt")).astype(np.float32)
|
"self_adjoint_eig_fail_if_denorms_flushed.txt")).astype(np.float32)
|
||||||
self.assertEqual(matrix.shape, (32, 32))
|
self.assertEqual(matrix.shape, (32, 32))
|
||||||
matrix_tensor = constant_op.constant(matrix)
|
matrix_tensor = constant_op.constant(matrix)
|
||||||
with self.session(use_gpu=True) as _:
|
with self.session() as _:
|
||||||
(e, v) = self.evaluate(linalg_ops.self_adjoint_eig(matrix_tensor))
|
(e, v) = self.evaluate(linalg_ops.self_adjoint_eig(matrix_tensor))
|
||||||
self.assertEqual(e.size, 32)
|
self.assertEqual(e.size, 32)
|
||||||
self.assertAllClose(
|
self.assertAllClose(
|
||||||
@ -166,7 +166,7 @@ def _GetEigTest(dtype_, shape_, compute_v_):
|
|||||||
|
|
||||||
a = RandomInput()
|
a = RandomInput()
|
||||||
np_e, np_v = np.linalg.eig(a)
|
np_e, np_v = np.linalg.eig(a)
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
if compute_v_:
|
if compute_v_:
|
||||||
tf_e, tf_v = linalg_ops.eig(constant_op.constant(a))
|
tf_e, tf_v = linalg_ops.eig(constant_op.constant(a))
|
||||||
|
|
||||||
@ -222,7 +222,7 @@ def _GetEigGradTest(dtype_, shape_, compute_v_):
|
|||||||
tol = 1e-2
|
tol = 1e-2
|
||||||
else:
|
else:
|
||||||
tol = 1e-7
|
tol = 1e-7
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
|
|
||||||
def Compute(x):
|
def Compute(x):
|
||||||
e, v = linalg_ops.eig(x)
|
e, v = linalg_ops.eig(x)
|
||||||
|
@ -1048,7 +1048,7 @@ class DynamicStitchOpTest(test.TestCase):
|
|||||||
|
|
||||||
@test_util.run_deprecated_v1
|
@test_util.run_deprecated_v1
|
||||||
def testCint32Gpu(self):
|
def testCint32Gpu(self):
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
indices = [
|
indices = [
|
||||||
ops.convert_to_tensor([0, 1, 2]),
|
ops.convert_to_tensor([0, 1, 2]),
|
||||||
ops.convert_to_tensor([2, 3])
|
ops.convert_to_tensor([2, 3])
|
||||||
@ -1076,7 +1076,7 @@ class DynamicStitchOpTest(test.TestCase):
|
|||||||
|
|
||||||
@test_util.run_deprecated_v1
|
@test_util.run_deprecated_v1
|
||||||
def testInt32Gpu(self):
|
def testInt32Gpu(self):
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
indices = [
|
indices = [
|
||||||
ops.convert_to_tensor([0, 1, 2]),
|
ops.convert_to_tensor([0, 1, 2]),
|
||||||
ops.convert_to_tensor([2, 3])
|
ops.convert_to_tensor([2, 3])
|
||||||
|
@ -340,7 +340,7 @@ class FunctionalOpsTest(test.TestCase):
|
|||||||
lambda elem_, input_: (a, b), elems, initializer=(0., 0.))
|
lambda elem_, input_: (a, b), elems, initializer=(0., 0.))
|
||||||
loss = l0 + array_ops.stop_gradient(l1)
|
loss = l0 + array_ops.stop_gradient(l1)
|
||||||
grad = gradients_impl.gradients(ys=[loss], xs=[a, b])
|
grad = gradients_impl.gradients(ys=[loss], xs=[a, b])
|
||||||
with self.test_session(use_gpu=True) as sess:
|
with self.test_session() as sess:
|
||||||
self.evaluate(variables.global_variables_initializer())
|
self.evaluate(variables.global_variables_initializer())
|
||||||
self.evaluate(grad)
|
self.evaluate(grad)
|
||||||
|
|
||||||
@ -933,7 +933,7 @@ class FunctionalOpsTest(test.TestCase):
|
|||||||
def ReturnsTooManyArgs(unused_i, v):
|
def ReturnsTooManyArgs(unused_i, v):
|
||||||
return v, v
|
return v, v
|
||||||
|
|
||||||
with self.test_session(use_gpu=True):
|
with self.test_session():
|
||||||
with self.assertRaisesRegex(errors.InvalidArgumentError,
|
with self.assertRaisesRegex(errors.InvalidArgumentError,
|
||||||
"must be a scalar"):
|
"must be a scalar"):
|
||||||
functional_ops.For([0], 10, 1, [0.0], Foo)[0].eval()
|
functional_ops.For([0], 10, 1, [0.0], Foo)[0].eval()
|
||||||
|
@ -39,7 +39,7 @@ from tensorflow.python.platform import test
|
|||||||
class GatherNdTest(test.TestCase):
|
class GatherNdTest(test.TestCase):
|
||||||
|
|
||||||
def _testSimpleDtype(self, dtype):
|
def _testSimpleDtype(self, dtype):
|
||||||
with self.cached_session(use_gpu=True):
|
with self.cached_session():
|
||||||
params = constant_op.constant(np.array([8, 1, 2, 3, 7, 5], dtype=dtype))
|
params = constant_op.constant(np.array([8, 1, 2, 3, 7, 5], dtype=dtype))
|
||||||
indices = constant_op.constant([[4], [4], [0]])
|
indices = constant_op.constant([[4], [4], [0]])
|
||||||
gather_nd_t = array_ops.gather_nd(params, indices)
|
gather_nd_t = array_ops.gather_nd(params, indices)
|
||||||
@ -60,7 +60,7 @@ class GatherNdTest(test.TestCase):
|
|||||||
@test_util.run_deprecated_v1
|
@test_util.run_deprecated_v1
|
||||||
@test_util.disable_xla("b/123337890") # Error messages differ
|
@test_util.disable_xla("b/123337890") # Error messages differ
|
||||||
def testEmptyIndicesAndParamsOKButJustEmptyParamsFails(self):
|
def testEmptyIndicesAndParamsOKButJustEmptyParamsFails(self):
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
params = np.ones((3, 3), dtype=np.float32)
|
params = np.ones((3, 3), dtype=np.float32)
|
||||||
|
|
||||||
indices_empty = np.empty((0, 2), dtype=np.int32)
|
indices_empty = np.empty((0, 2), dtype=np.int32)
|
||||||
@ -91,7 +91,7 @@ class GatherNdTest(test.TestCase):
|
|||||||
self.assertAllClose(np.empty((0,), dtype=np.float32), gather_nd_ok_val)
|
self.assertAllClose(np.empty((0,), dtype=np.float32), gather_nd_ok_val)
|
||||||
|
|
||||||
def testIndexScalar(self):
|
def testIndexScalar(self):
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
params = np.array(
|
params = np.array(
|
||||||
[[-8, -1, -2, -3, -7, -5], [8, 1, 2, 3, 7, 5]], dtype=np.float32).T
|
[[-8, -1, -2, -3, -7, -5], [8, 1, 2, 3, 7, 5]], dtype=np.float32).T
|
||||||
indices = constant_op.constant([4, 1])
|
indices = constant_op.constant([4, 1])
|
||||||
@ -101,7 +101,7 @@ class GatherNdTest(test.TestCase):
|
|||||||
self.assertAllEqual(np.array(7), gather_nd_val)
|
self.assertAllEqual(np.array(7), gather_nd_val)
|
||||||
|
|
||||||
def testParamsRankLargerThanIndexIndexScalarSlices(self):
|
def testParamsRankLargerThanIndexIndexScalarSlices(self):
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
params = np.array(
|
params = np.array(
|
||||||
[[-8, -1, -2, -3, -7, -5], [8, 1, 2, 3, 7, 5]], dtype=np.float32).T
|
[[-8, -1, -2, -3, -7, -5], [8, 1, 2, 3, 7, 5]], dtype=np.float32).T
|
||||||
indices = constant_op.constant([4])
|
indices = constant_op.constant([4])
|
||||||
@ -111,7 +111,7 @@ class GatherNdTest(test.TestCase):
|
|||||||
self.assertAllEqual(np.array([-7, 7]), gather_nd_val)
|
self.assertAllEqual(np.array([-7, 7]), gather_nd_val)
|
||||||
|
|
||||||
def testParamsRankLargerThanIndexSlices(self):
|
def testParamsRankLargerThanIndexSlices(self):
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
params = np.array(
|
params = np.array(
|
||||||
[[-8, -1, -2, -3, -7, -5], [8, 1, 2, 3, 7, 5]], dtype=np.float32).T
|
[[-8, -1, -2, -3, -7, -5], [8, 1, 2, 3, 7, 5]], dtype=np.float32).T
|
||||||
indices = constant_op.constant([[4], [4], [0]])
|
indices = constant_op.constant([[4], [4], [0]])
|
||||||
@ -122,7 +122,7 @@ class GatherNdTest(test.TestCase):
|
|||||||
self.assertAllEqual(np.array([[-7, 7], [-7, 7], [-8, 8]]), gather_nd_val)
|
self.assertAllEqual(np.array([[-7, 7], [-7, 7], [-8, 8]]), gather_nd_val)
|
||||||
|
|
||||||
def testHigherRankParamsLargerThanIndexSlices(self):
|
def testHigherRankParamsLargerThanIndexSlices(self):
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
params = np.array(
|
params = np.array(
|
||||||
[[[-8, -1, -2, -3, -7, -5], [8, 1, 2, 3, 7, 5]],
|
[[[-8, -1, -2, -3, -7, -5], [8, 1, 2, 3, 7, 5]],
|
||||||
[[-80, -10, -20, -30, -70, -50], [80, 10, 20, 30, 70, 50]]],
|
[[-80, -10, -20, -30, -70, -50], [80, 10, 20, 30, 70, 50]]],
|
||||||
@ -136,7 +136,7 @@ class GatherNdTest(test.TestCase):
|
|||||||
self.assertAllEqual(params[[4, 4, 0]], gather_nd_val)
|
self.assertAllEqual(params[[4, 4, 0]], gather_nd_val)
|
||||||
|
|
||||||
def testEmptyIndicesLastRankMeansCopyEntireTensor(self):
|
def testEmptyIndicesLastRankMeansCopyEntireTensor(self):
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
params = np.array(
|
params = np.array(
|
||||||
[[[-8, -1, -2, -3, -7, -5], [8, 1, 2, 3, 7, 5]],
|
[[[-8, -1, -2, -3, -7, -5], [8, 1, 2, 3, 7, 5]],
|
||||||
[[-80, -10, -20, -30, -70, -50], [80, 10, 20, 30, 70, 50]]],
|
[[-80, -10, -20, -30, -70, -50], [80, 10, 20, 30, 70, 50]]],
|
||||||
@ -153,7 +153,7 @@ class GatherNdTest(test.TestCase):
|
|||||||
gather_nd_val)
|
gather_nd_val)
|
||||||
|
|
||||||
def testHigherRankParamsAndIndicesLargerThanIndexSlices(self):
|
def testHigherRankParamsAndIndicesLargerThanIndexSlices(self):
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
params = np.array(
|
params = np.array(
|
||||||
[[[-8, -1, -2, -3, -7, -5], [8, 1, 2, 3, 7, 5]],
|
[[[-8, -1, -2, -3, -7, -5], [8, 1, 2, 3, 7, 5]],
|
||||||
[[-80, -10, -20, -30, -70, -50], [80, 10, 20, 30, 70, 50]]],
|
[[-80, -10, -20, -30, -70, -50], [80, 10, 20, 30, 70, 50]]],
|
||||||
@ -168,7 +168,7 @@ class GatherNdTest(test.TestCase):
|
|||||||
gather_nd_val)
|
gather_nd_val)
|
||||||
|
|
||||||
def testHigherRankParams(self):
|
def testHigherRankParams(self):
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
shape = (10, 20, 5, 1, 17)
|
shape = (10, 20, 5, 1, 17)
|
||||||
params = np.random.rand(*shape)
|
params = np.random.rand(*shape)
|
||||||
indices = np.vstack([np.random.randint(0, s, size=2000) for s in shape]).T
|
indices = np.vstack([np.random.randint(0, s, size=2000) for s in shape]).T
|
||||||
@ -180,7 +180,7 @@ class GatherNdTest(test.TestCase):
|
|||||||
self.assertEqual([2000], gather_nd_t.get_shape())
|
self.assertEqual([2000], gather_nd_t.get_shape())
|
||||||
|
|
||||||
def testHigherRankParamsAndIndices(self):
|
def testHigherRankParamsAndIndices(self):
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
shape = (10, 20, 5, 1, 17)
|
shape = (10, 20, 5, 1, 17)
|
||||||
params = np.random.rand(*shape)
|
params = np.random.rand(*shape)
|
||||||
indices = np.vstack([np.random.randint(0, s, size=2000) for s in shape]).T
|
indices = np.vstack([np.random.randint(0, s, size=2000) for s in shape]).T
|
||||||
@ -220,7 +220,7 @@ class GatherNdTest(test.TestCase):
|
|||||||
# On GPU the bad indices do not raise error but fetch 0 values
|
# On GPU the bad indices do not raise error but fetch 0 values
|
||||||
if not test.is_gpu_available():
|
if not test.is_gpu_available():
|
||||||
return
|
return
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
params = [0, 1, 2]
|
params = [0, 1, 2]
|
||||||
indices = [[[0], [7]]] # Make this one higher rank
|
indices = [[[0], [7]]] # Make this one higher rank
|
||||||
gather_nd = array_ops.gather_nd(params, indices)
|
gather_nd = array_ops.gather_nd(params, indices)
|
||||||
@ -244,7 +244,7 @@ class GatherNdTest(test.TestCase):
|
|||||||
# On GPU the bad indices do not raise error but fetch 0 values
|
# On GPU the bad indices do not raise error but fetch 0 values
|
||||||
if not test.is_gpu_available():
|
if not test.is_gpu_available():
|
||||||
return
|
return
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
params = [[0, 1, 2]]
|
params = [[0, 1, 2]]
|
||||||
indices = [[[0], [0], [1]]] # Make this one higher rank
|
indices = [[[0], [0], [1]]] # Make this one higher rank
|
||||||
gather_nd = array_ops.gather_nd(params, indices)
|
gather_nd = array_ops.gather_nd(params, indices)
|
||||||
@ -261,7 +261,7 @@ class GatherNdTest(test.TestCase):
|
|||||||
grad_vals = constant_op.constant([1, 2], dtype=dtypes.float64)
|
grad_vals = constant_op.constant([1, 2], dtype=dtypes.float64)
|
||||||
grads = gradients_impl.gradients([outputs], [inputs], [grad_vals])[0]
|
grads = gradients_impl.gradients([outputs], [inputs], [grad_vals])[0]
|
||||||
expected_grads = np.array([[1, 0], [0, 2]], dtype=np.float64)
|
expected_grads = np.array([[1, 0], [0, 2]], dtype=np.float64)
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
assert np.array_equal(expected_grads, self.evaluate(grads))
|
assert np.array_equal(expected_grads, self.evaluate(grads))
|
||||||
|
|
||||||
@test_util.run_deprecated_v1
|
@test_util.run_deprecated_v1
|
||||||
@ -273,7 +273,7 @@ class GatherNdTest(test.TestCase):
|
|||||||
grad_vals = constant_op.constant([[1, 2], [3, 4]], dtype=dtypes.float64)
|
grad_vals = constant_op.constant([[1, 2], [3, 4]], dtype=dtypes.float64)
|
||||||
grads = gradients_impl.gradients([outputs], [inputs], [grad_vals])[0]
|
grads = gradients_impl.gradients([outputs], [inputs], [grad_vals])[0]
|
||||||
expected_grads = np.array([[3, 4], [1, 2]], dtype=np.float64)
|
expected_grads = np.array([[3, 4], [1, 2]], dtype=np.float64)
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
self.assertIndexedSlices(grads)
|
self.assertIndexedSlices(grads)
|
||||||
self.assertAllEqual(expected_grads, ops.convert_to_tensor(grads))
|
self.assertAllEqual(expected_grads, ops.convert_to_tensor(grads))
|
||||||
|
|
||||||
@ -290,7 +290,7 @@ class GatherNdTest(test.TestCase):
|
|||||||
grads = gradients_impl.gradients([outputs], [inputs], [grad_vals])[0]
|
grads = gradients_impl.gradients([outputs], [inputs], [grad_vals])[0]
|
||||||
expected_grads = np.array(
|
expected_grads = np.array(
|
||||||
[[[5, 6], [1, 2]], [[3, 4], [7, 8]]], dtype=np.float64)
|
[[[5, 6], [1, 2]], [[3, 4], [7, 8]]], dtype=np.float64)
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
self.assertAllEqual(expected_grads, self.evaluate(grads))
|
self.assertAllEqual(expected_grads, self.evaluate(grads))
|
||||||
|
|
||||||
@test_util.run_deprecated_v1
|
@test_util.run_deprecated_v1
|
||||||
@ -320,7 +320,7 @@ class GatherNdTest(test.TestCase):
|
|||||||
[[[[5, 6], [1, 2]]]],
|
[[[[5, 6], [1, 2]]]],
|
||||||
[[[[3, 4], [7, 8]]]]
|
[[[[3, 4], [7, 8]]]]
|
||||||
]]], dtype=np.float64)
|
]]], dtype=np.float64)
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
self.assertAllEqual(expected_grads, self.evaluate(grads))
|
self.assertAllEqual(expected_grads, self.evaluate(grads))
|
||||||
|
|
||||||
@test_util.run_deprecated_v1
|
@test_util.run_deprecated_v1
|
||||||
@ -336,7 +336,7 @@ class GatherNdTest(test.TestCase):
|
|||||||
grads = gradients_impl.gradients([outputs], [inputs], [grad_vals])[0]
|
grads = gradients_impl.gradients([outputs], [inputs], [grad_vals])[0]
|
||||||
expected_grads = np.array(
|
expected_grads = np.array(
|
||||||
[[[5, 6], [1, 2]], [[3, 4], [7, 8]]], dtype=np.float64)
|
[[[5, 6], [1, 2]], [[3, 4], [7, 8]]], dtype=np.float64)
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
self.assertAllEqual(expected_grads, self.evaluate(grads))
|
self.assertAllEqual(expected_grads, self.evaluate(grads))
|
||||||
|
|
||||||
@test_util.run_deprecated_v1
|
@test_util.run_deprecated_v1
|
||||||
@ -358,7 +358,7 @@ class GatherNdTest(test.TestCase):
|
|||||||
[1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0],
|
[1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0],
|
||||||
[0, 0, 0, 0, 0, 0, 0, 0, 0], [3, 3, 3, 3, 3, 3, 3, 3, 3]],
|
[0, 0, 0, 0, 0, 0, 0, 0, 0], [3, 3, 3, 3, 3, 3, 3, 3, 3]],
|
||||||
dtype=np.float64)
|
dtype=np.float64)
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
self.assertIndexedSlices(grads)
|
self.assertIndexedSlices(grads)
|
||||||
self.assertAllEqual(expected_grads, ops.convert_to_tensor(grads))
|
self.assertAllEqual(expected_grads, ops.convert_to_tensor(grads))
|
||||||
|
|
||||||
|
@ -29,7 +29,7 @@ class InTopKTest(test.TestCase):
|
|||||||
|
|
||||||
def _validateInTopK(self, predictions, target, k, expected):
|
def _validateInTopK(self, predictions, target, k, expected):
|
||||||
np_ans = np.array(expected, np.bool)
|
np_ans = np.array(expected, np.bool)
|
||||||
with self.cached_session(use_gpu=True):
|
with self.cached_session():
|
||||||
precision = nn_ops.in_top_k(predictions, target, k)
|
precision = nn_ops.in_top_k(predictions, target, k)
|
||||||
out = self.evaluate(precision)
|
out = self.evaluate(precision)
|
||||||
self.assertAllClose(np_ans, out)
|
self.assertAllClose(np_ans, out)
|
||||||
|
@ -102,7 +102,7 @@ def _init_sampler(tc, init, num):
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
def func():
|
def func():
|
||||||
with tc.test_session(use_gpu=True):
|
with tc.test_session():
|
||||||
return init([num]).eval()
|
return init([num]).eval()
|
||||||
|
|
||||||
return func
|
return func
|
||||||
@ -112,7 +112,7 @@ class ConstantInitializersTest(test.TestCase):
|
|||||||
|
|
||||||
@test_util.run_deprecated_v1
|
@test_util.run_deprecated_v1
|
||||||
def testZerosInitializer(self):
|
def testZerosInitializer(self):
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
shape = [2, 3]
|
shape = [2, 3]
|
||||||
x = variable_scope.get_variable(
|
x = variable_scope.get_variable(
|
||||||
"x", shape=shape, initializer=init_ops.zeros_initializer())
|
"x", shape=shape, initializer=init_ops.zeros_initializer())
|
||||||
@ -121,7 +121,7 @@ class ConstantInitializersTest(test.TestCase):
|
|||||||
|
|
||||||
@test_util.run_deprecated_v1
|
@test_util.run_deprecated_v1
|
||||||
def testOnesInitializer(self):
|
def testOnesInitializer(self):
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
shape = [2, 3]
|
shape = [2, 3]
|
||||||
x = variable_scope.get_variable(
|
x = variable_scope.get_variable(
|
||||||
"x", shape=shape, initializer=init_ops.ones_initializer())
|
"x", shape=shape, initializer=init_ops.ones_initializer())
|
||||||
@ -130,7 +130,7 @@ class ConstantInitializersTest(test.TestCase):
|
|||||||
|
|
||||||
@test_util.run_deprecated_v1
|
@test_util.run_deprecated_v1
|
||||||
def testConstantZeroInitializer(self):
|
def testConstantZeroInitializer(self):
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
shape = [2, 3]
|
shape = [2, 3]
|
||||||
x = variable_scope.get_variable(
|
x = variable_scope.get_variable(
|
||||||
"x", shape=shape, initializer=init_ops.constant_initializer(0.0))
|
"x", shape=shape, initializer=init_ops.constant_initializer(0.0))
|
||||||
@ -139,7 +139,7 @@ class ConstantInitializersTest(test.TestCase):
|
|||||||
|
|
||||||
@test_util.run_deprecated_v1
|
@test_util.run_deprecated_v1
|
||||||
def testConstantOneInitializer(self):
|
def testConstantOneInitializer(self):
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
shape = [2, 3]
|
shape = [2, 3]
|
||||||
x = variable_scope.get_variable(
|
x = variable_scope.get_variable(
|
||||||
"x", shape=shape, initializer=init_ops.constant_initializer(1.0))
|
"x", shape=shape, initializer=init_ops.constant_initializer(1.0))
|
||||||
@ -148,7 +148,7 @@ class ConstantInitializersTest(test.TestCase):
|
|||||||
|
|
||||||
@test_util.run_deprecated_v1
|
@test_util.run_deprecated_v1
|
||||||
def testConstantIntInitializer(self):
|
def testConstantIntInitializer(self):
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
shape = [2, 3]
|
shape = [2, 3]
|
||||||
x = variable_scope.get_variable(
|
x = variable_scope.get_variable(
|
||||||
"x",
|
"x",
|
||||||
@ -161,7 +161,7 @@ class ConstantInitializersTest(test.TestCase):
|
|||||||
|
|
||||||
@test_util.run_deprecated_v1
|
@test_util.run_deprecated_v1
|
||||||
def testConstantTupleInitializer(self):
|
def testConstantTupleInitializer(self):
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
shape = [3]
|
shape = [3]
|
||||||
x = variable_scope.get_variable(
|
x = variable_scope.get_variable(
|
||||||
"x",
|
"x",
|
||||||
@ -173,7 +173,7 @@ class ConstantInitializersTest(test.TestCase):
|
|||||||
self.assertAllEqual(x, [10, 20, 30])
|
self.assertAllEqual(x, [10, 20, 30])
|
||||||
|
|
||||||
def _testNDimConstantInitializer(self, name, value, shape, expected):
|
def _testNDimConstantInitializer(self, name, value, shape, expected):
|
||||||
with self.cached_session(use_gpu=True):
|
with self.cached_session():
|
||||||
init = init_ops.constant_initializer(value, dtype=dtypes.int32)
|
init = init_ops.constant_initializer(value, dtype=dtypes.int32)
|
||||||
x = variable_scope.get_variable(name, shape=shape, initializer=init)
|
x = variable_scope.get_variable(name, shape=shape, initializer=init)
|
||||||
self.evaluate(x.initializer)
|
self.evaluate(x.initializer)
|
||||||
@ -198,7 +198,7 @@ class ConstantInitializersTest(test.TestCase):
|
|||||||
|
|
||||||
def _testNDimConstantInitializerLessValues(self, name, value, shape,
|
def _testNDimConstantInitializerLessValues(self, name, value, shape,
|
||||||
expected):
|
expected):
|
||||||
with self.cached_session(use_gpu=True):
|
with self.cached_session():
|
||||||
init = init_ops.constant_initializer(value, dtype=dtypes.int32)
|
init = init_ops.constant_initializer(value, dtype=dtypes.int32)
|
||||||
x = variable_scope.get_variable(name, shape=shape, initializer=init)
|
x = variable_scope.get_variable(name, shape=shape, initializer=init)
|
||||||
self.evaluate(x.initializer)
|
self.evaluate(x.initializer)
|
||||||
@ -225,7 +225,7 @@ class ConstantInitializersTest(test.TestCase):
|
|||||||
|
|
||||||
def _testNDimConstantInitializerMoreValues(self, value, shape):
|
def _testNDimConstantInitializerMoreValues(self, value, shape):
|
||||||
ops.reset_default_graph()
|
ops.reset_default_graph()
|
||||||
with self.cached_session(use_gpu=True):
|
with self.cached_session():
|
||||||
init = init_ops.constant_initializer(value, dtype=dtypes.int32)
|
init = init_ops.constant_initializer(value, dtype=dtypes.int32)
|
||||||
self.assertRaises(
|
self.assertRaises(
|
||||||
ValueError,
|
ValueError,
|
||||||
@ -398,7 +398,7 @@ class VarianceScalingInitializationTest(test.TestCase):
|
|||||||
init = init_ops.variance_scaling_initializer(
|
init = init_ops.variance_scaling_initializer(
|
||||||
distribution="truncated_normal")
|
distribution="truncated_normal")
|
||||||
|
|
||||||
with self.session(use_gpu=True), \
|
with self.session(), \
|
||||||
test.mock.patch.object(
|
test.mock.patch.object(
|
||||||
random_ops, "truncated_normal", wraps=random_ops.truncated_normal) \
|
random_ops, "truncated_normal", wraps=random_ops.truncated_normal) \
|
||||||
as mock_truncated_normal:
|
as mock_truncated_normal:
|
||||||
@ -415,7 +415,7 @@ class VarianceScalingInitializationTest(test.TestCase):
|
|||||||
expect_var = 1. / shape[0]
|
expect_var = 1. / shape[0]
|
||||||
init = init_ops.variance_scaling_initializer(distribution="normal")
|
init = init_ops.variance_scaling_initializer(distribution="normal")
|
||||||
|
|
||||||
with self.session(use_gpu=True), \
|
with self.session(), \
|
||||||
test.mock.patch.object(
|
test.mock.patch.object(
|
||||||
random_ops, "truncated_normal", wraps=random_ops.truncated_normal) \
|
random_ops, "truncated_normal", wraps=random_ops.truncated_normal) \
|
||||||
as mock_truncated_normal:
|
as mock_truncated_normal:
|
||||||
@ -433,7 +433,7 @@ class VarianceScalingInitializationTest(test.TestCase):
|
|||||||
init = init_ops.variance_scaling_initializer(
|
init = init_ops.variance_scaling_initializer(
|
||||||
distribution="untruncated_normal")
|
distribution="untruncated_normal")
|
||||||
|
|
||||||
with self.session(use_gpu=True), \
|
with self.session(), \
|
||||||
test.mock.patch.object(
|
test.mock.patch.object(
|
||||||
random_ops, "random_normal", wraps=random_ops.random_normal) \
|
random_ops, "random_normal", wraps=random_ops.random_normal) \
|
||||||
as mock_random_normal:
|
as mock_random_normal:
|
||||||
@ -450,7 +450,7 @@ class VarianceScalingInitializationTest(test.TestCase):
|
|||||||
expect_var = 1. / shape[0]
|
expect_var = 1. / shape[0]
|
||||||
init = init_ops.variance_scaling_initializer(distribution="uniform")
|
init = init_ops.variance_scaling_initializer(distribution="uniform")
|
||||||
|
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
x = init(shape).eval()
|
x = init(shape).eval()
|
||||||
|
|
||||||
self.assertNear(np.mean(x), expect_mean, err=1e-2)
|
self.assertNear(np.mean(x), expect_mean, err=1e-2)
|
||||||
@ -461,7 +461,7 @@ class VarianceScalingInitializationTest(test.TestCase):
|
|||||||
class RangeTest(test.TestCase):
|
class RangeTest(test.TestCase):
|
||||||
|
|
||||||
def _Range(self, start, limit, delta):
|
def _Range(self, start, limit, delta):
|
||||||
with self.cached_session(use_gpu=True):
|
with self.cached_session():
|
||||||
tf_ans = math_ops.range(start, limit, delta, name="range")
|
tf_ans = math_ops.range(start, limit, delta, name="range")
|
||||||
self.assertEqual([len(np.arange(start, limit, delta))],
|
self.assertEqual([len(np.arange(start, limit, delta))],
|
||||||
tf_ans.get_shape())
|
tf_ans.get_shape())
|
||||||
@ -481,7 +481,7 @@ class RangeTest(test.TestCase):
|
|||||||
|
|
||||||
@test_util.run_deprecated_v1
|
@test_util.run_deprecated_v1
|
||||||
def testLimitOnly(self):
|
def testLimitOnly(self):
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
self.assertAllEqual(np.arange(5), math_ops.range(5))
|
self.assertAllEqual(np.arange(5), math_ops.range(5))
|
||||||
|
|
||||||
def testEmpty(self):
|
def testEmpty(self):
|
||||||
@ -910,7 +910,7 @@ class ConvolutionDeltaOrthogonalInitializerTest(test.TestCase):
|
|||||||
outputs_2norm = linalg_ops.norm(outputs)
|
outputs_2norm = linalg_ops.norm(outputs)
|
||||||
ratio = outputs_2norm / inputs_2norm
|
ratio = outputs_2norm / inputs_2norm
|
||||||
my_ops = variables.global_variables_initializer()
|
my_ops = variables.global_variables_initializer()
|
||||||
with self.session(use_gpu=True) as sess:
|
with self.session() as sess:
|
||||||
self.evaluate(my_ops)
|
self.evaluate(my_ops)
|
||||||
# Check the shape of the outputs
|
# Check the shape of the outputs
|
||||||
t = self.evaluate(outputs)
|
t = self.evaluate(outputs)
|
||||||
@ -925,7 +925,7 @@ class ConvolutionDeltaOrthogonalInitializerTest(test.TestCase):
|
|||||||
shape = [3, 3, 10, 10]
|
shape = [3, 3, 10, 10]
|
||||||
count = 70
|
count = 70
|
||||||
tol = 1e-5
|
tol = 1e-5
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
for i in range(count):
|
for i in range(count):
|
||||||
x = variable_scope.get_variable(
|
x = variable_scope.get_variable(
|
||||||
"{}".format(i),
|
"{}".format(i),
|
||||||
@ -996,7 +996,7 @@ class ConvolutionOrthogonal1dInitializerTest(test.TestCase):
|
|||||||
shape = [3, 10, 10]
|
shape = [3, 10, 10]
|
||||||
count = 70
|
count = 70
|
||||||
tol = 1e-5
|
tol = 1e-5
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
for i in range(count):
|
for i in range(count):
|
||||||
x = variable_scope.get_variable(
|
x = variable_scope.get_variable(
|
||||||
"{}".format(i),
|
"{}".format(i),
|
||||||
@ -1063,7 +1063,7 @@ class ConvolutionOrthogonal1dInitializerTest(test.TestCase):
|
|||||||
outputs_2norm = linalg_ops.norm(outputs)
|
outputs_2norm = linalg_ops.norm(outputs)
|
||||||
ratio = outputs_2norm / inputs_2norm
|
ratio = outputs_2norm / inputs_2norm
|
||||||
my_ops = variables.global_variables_initializer()
|
my_ops = variables.global_variables_initializer()
|
||||||
with self.session(use_gpu=True) as sess:
|
with self.session() as sess:
|
||||||
self.evaluate(my_ops)
|
self.evaluate(my_ops)
|
||||||
# Check the shape of the outputs
|
# Check the shape of the outputs
|
||||||
t = self.evaluate(outputs)
|
t = self.evaluate(outputs)
|
||||||
@ -1167,7 +1167,7 @@ class ConvolutionOrthogonal2dInitializerTest(test.TestCase):
|
|||||||
outputs_2norm = linalg_ops.norm(outputs)
|
outputs_2norm = linalg_ops.norm(outputs)
|
||||||
ratio = outputs_2norm / inputs_2norm
|
ratio = outputs_2norm / inputs_2norm
|
||||||
my_ops = variables.global_variables_initializer()
|
my_ops = variables.global_variables_initializer()
|
||||||
with self.session(use_gpu=True) as sess:
|
with self.session() as sess:
|
||||||
self.evaluate(my_ops)
|
self.evaluate(my_ops)
|
||||||
# Check the shape of the outputs
|
# Check the shape of the outputs
|
||||||
t = self.evaluate(outputs)
|
t = self.evaluate(outputs)
|
||||||
@ -1227,7 +1227,7 @@ class ConvolutionOrthogonal3dInitializerTest(test.TestCase):
|
|||||||
shape = [3, 3, 3, 5, 5]
|
shape = [3, 3, 3, 5, 5]
|
||||||
count = 20
|
count = 20
|
||||||
tol = 1e-5
|
tol = 1e-5
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
for i in range(count):
|
for i in range(count):
|
||||||
x = variable_scope.get_variable(
|
x = variable_scope.get_variable(
|
||||||
"{}".format(i),
|
"{}".format(i),
|
||||||
@ -1302,7 +1302,7 @@ class ConvolutionOrthogonal3dInitializerTest(test.TestCase):
|
|||||||
outputs_2norm = linalg_ops.norm(outputs)
|
outputs_2norm = linalg_ops.norm(outputs)
|
||||||
ratio = outputs_2norm / inputs_2norm
|
ratio = outputs_2norm / inputs_2norm
|
||||||
my_ops = variables.global_variables_initializer()
|
my_ops = variables.global_variables_initializer()
|
||||||
with self.cached_session(use_gpu=True) as sess:
|
with self.cached_session() as sess:
|
||||||
self.evaluate(my_ops)
|
self.evaluate(my_ops)
|
||||||
# Check the shape of the outputs
|
# Check the shape of the outputs
|
||||||
t = self.evaluate(outputs)
|
t = self.evaluate(outputs)
|
||||||
|
@ -78,7 +78,7 @@ class CSRSparseMatrixDenseMatMulGradTest(test.TestCase):
|
|||||||
b_mats_val = np.transpose(b_mats_val, (0, 2, 1))
|
b_mats_val = np.transpose(b_mats_val, (0, 2, 1))
|
||||||
if adjoint_b:
|
if adjoint_b:
|
||||||
b_mats_val = np.conj(b_mats_val)
|
b_mats_val = np.conj(b_mats_val)
|
||||||
with self.test_session(use_gpu=True):
|
with self.test_session():
|
||||||
a_mats = ops.convert_to_tensor(a_mats_val, dtype=datatype)
|
a_mats = ops.convert_to_tensor(a_mats_val, dtype=datatype)
|
||||||
b_mats = ops.convert_to_tensor(b_mats_val, dtype=datatype)
|
b_mats = ops.convert_to_tensor(b_mats_val, dtype=datatype)
|
||||||
a_sm = dense_to_csr_sparse_matrix(a_mats)
|
a_sm = dense_to_csr_sparse_matrix(a_mats)
|
||||||
|
@ -64,7 +64,7 @@ class CSRSparseMatrixGradTest(test.TestCase):
|
|||||||
sparsify = lambda m: m * (m > 0)
|
sparsify = lambda m: m * (m > 0)
|
||||||
for dense_shape in ([53, 65, 127], [127, 65]):
|
for dense_shape in ([53, 65, 127], [127, 65]):
|
||||||
mats_val = sparsify(np.random.randn(*dense_shape))
|
mats_val = sparsify(np.random.randn(*dense_shape))
|
||||||
with self.test_session(use_gpu=True) as sess:
|
with self.test_session() as sess:
|
||||||
mats = math_ops.cast(mats_val, dtype=dtypes.float32)
|
mats = math_ops.cast(mats_val, dtype=dtypes.float32)
|
||||||
sparse_mats = dense_to_csr_sparse_matrix(mats)
|
sparse_mats = dense_to_csr_sparse_matrix(mats)
|
||||||
dense_mats = sparse_csr_matrix_ops.csr_sparse_matrix_to_dense(
|
dense_mats = sparse_csr_matrix_ops.csr_sparse_matrix_to_dense(
|
||||||
@ -96,7 +96,7 @@ class CSRSparseMatrixGradTest(test.TestCase):
|
|||||||
grad_vals = np.random.randn(*dense_shape).astype(np.float32)
|
grad_vals = np.random.randn(*dense_shape).astype(np.float32)
|
||||||
expected_a_grad = alpha * grad_vals
|
expected_a_grad = alpha * grad_vals
|
||||||
expected_b_grad = beta * grad_vals
|
expected_b_grad = beta * grad_vals
|
||||||
with self.test_session(use_gpu=True) as sess:
|
with self.test_session() as sess:
|
||||||
a_mats = math_ops.cast(a_mats_val, dtype=dtypes.float32)
|
a_mats = math_ops.cast(a_mats_val, dtype=dtypes.float32)
|
||||||
b_mats = math_ops.cast(b_mats_val, dtype=dtypes.float32)
|
b_mats = math_ops.cast(b_mats_val, dtype=dtypes.float32)
|
||||||
a_sm = dense_to_csr_sparse_matrix(a_mats)
|
a_sm = dense_to_csr_sparse_matrix(a_mats)
|
||||||
|
@ -79,7 +79,7 @@ class CSRSparseMatrixGradTest(test.TestCase):
|
|||||||
b_mats_val = np.transpose(b_mats_val, (0, 2, 1))
|
b_mats_val = np.transpose(b_mats_val, (0, 2, 1))
|
||||||
if adjoint_b:
|
if adjoint_b:
|
||||||
b_mats_val = np.conj(b_mats_val)
|
b_mats_val = np.conj(b_mats_val)
|
||||||
with self.test_session(use_gpu=True):
|
with self.test_session():
|
||||||
a_mats = ops.convert_to_tensor(a_mats_val, dtype=datatype)
|
a_mats = ops.convert_to_tensor(a_mats_val, dtype=datatype)
|
||||||
b_mats = ops.convert_to_tensor(b_mats_val, dtype=datatype)
|
b_mats = ops.convert_to_tensor(b_mats_val, dtype=datatype)
|
||||||
a_sm = dense_to_csr_sparse_matrix(a_mats)
|
a_sm = dense_to_csr_sparse_matrix(a_mats)
|
||||||
|
@ -59,7 +59,7 @@ class CholeskySolveTest(test.TestCase):
|
|||||||
def test_works_with_five_different_random_pos_def_matrices(self):
|
def test_works_with_five_different_random_pos_def_matrices(self):
|
||||||
for n in range(1, 6):
|
for n in range(1, 6):
|
||||||
for np_type, atol in [(np.float32, 0.05), (np.float64, 1e-5)]:
|
for np_type, atol in [(np.float32, 0.05), (np.float64, 1e-5)]:
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
# Create 2 x n x n matrix
|
# Create 2 x n x n matrix
|
||||||
array = np.array(
|
array = np.array(
|
||||||
[_RandomPDMatrix(n, self.rng),
|
[_RandomPDMatrix(n, self.rng),
|
||||||
@ -85,7 +85,7 @@ class LogdetTest(test.TestCase):
|
|||||||
with self.subTest(n=n, np_dtype=np_dtype, atol=atol):
|
with self.subTest(n=n, np_dtype=np_dtype, atol=atol):
|
||||||
matrix = _RandomPDMatrix(n, self.rng, np_dtype)
|
matrix = _RandomPDMatrix(n, self.rng, np_dtype)
|
||||||
_, logdet_np = np.linalg.slogdet(matrix)
|
_, logdet_np = np.linalg.slogdet(matrix)
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
# Create 2 x n x n matrix
|
# Create 2 x n x n matrix
|
||||||
# matrix = np.array(
|
# matrix = np.array(
|
||||||
# [_RandomPDMatrix(n, self.rng, np_dtype),
|
# [_RandomPDMatrix(n, self.rng, np_dtype),
|
||||||
@ -99,7 +99,7 @@ class LogdetTest(test.TestCase):
|
|||||||
with self.subTest(np_dtype=np_dtype, atol=atol):
|
with self.subTest(np_dtype=np_dtype, atol=atol):
|
||||||
matrix = (np.eye(20) * 1e-6).astype(np_dtype)
|
matrix = (np.eye(20) * 1e-6).astype(np_dtype)
|
||||||
_, logdet_np = np.linalg.slogdet(matrix)
|
_, logdet_np = np.linalg.slogdet(matrix)
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
logdet_tf = linalg.logdet(matrix)
|
logdet_tf = linalg.logdet(matrix)
|
||||||
self.assertAllClose(logdet_np, self.evaluate(logdet_tf), atol=atol)
|
self.assertAllClose(logdet_np, self.evaluate(logdet_tf), atol=atol)
|
||||||
|
|
||||||
@ -117,7 +117,7 @@ class SlogdetTest(test.TestCase):
|
|||||||
with self.subTest(n=n, np_dtype=np_dtype, atol=atol):
|
with self.subTest(n=n, np_dtype=np_dtype, atol=atol):
|
||||||
matrix = _RandomPDMatrix(n, self.rng, np_dtype)
|
matrix = _RandomPDMatrix(n, self.rng, np_dtype)
|
||||||
sign_np, log_abs_det_np = np.linalg.slogdet(matrix)
|
sign_np, log_abs_det_np = np.linalg.slogdet(matrix)
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
sign_tf, log_abs_det_tf = linalg.slogdet(matrix)
|
sign_tf, log_abs_det_tf = linalg.slogdet(matrix)
|
||||||
self.assertAllClose(
|
self.assertAllClose(
|
||||||
log_abs_det_np, self.evaluate(log_abs_det_tf), atol=atol)
|
log_abs_det_np, self.evaluate(log_abs_det_tf), atol=atol)
|
||||||
@ -129,7 +129,7 @@ class SlogdetTest(test.TestCase):
|
|||||||
with self.subTest(np_dtype=np_dtype, atol=atol):
|
with self.subTest(np_dtype=np_dtype, atol=atol):
|
||||||
matrix = (np.eye(20) * 1e-6).astype(np_dtype)
|
matrix = (np.eye(20) * 1e-6).astype(np_dtype)
|
||||||
sign_np, log_abs_det_np = np.linalg.slogdet(matrix)
|
sign_np, log_abs_det_np = np.linalg.slogdet(matrix)
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
sign_tf, log_abs_det_tf = linalg.slogdet(matrix)
|
sign_tf, log_abs_det_tf = linalg.slogdet(matrix)
|
||||||
self.assertAllClose(
|
self.assertAllClose(
|
||||||
log_abs_det_np, self.evaluate(log_abs_det_tf), atol=atol)
|
log_abs_det_np, self.evaluate(log_abs_det_tf), atol=atol)
|
||||||
@ -259,7 +259,7 @@ class EyeTest(parameterized.TestCase, test.TestCase):
|
|||||||
num_columns=num_columns_placeholder,
|
num_columns=num_columns_placeholder,
|
||||||
batch_shape=batch_shape_placeholder,
|
batch_shape=batch_shape_placeholder,
|
||||||
dtype=dtype)
|
dtype=dtype)
|
||||||
with self.session(use_gpu=True) as sess:
|
with self.session() as sess:
|
||||||
eye_tf = sess.run(
|
eye_tf = sess.run(
|
||||||
eye,
|
eye,
|
||||||
feed_dict={
|
feed_dict={
|
||||||
|
@ -55,7 +55,7 @@ class LRNOpTest(test.TestCase):
|
|||||||
return output
|
return output
|
||||||
|
|
||||||
def _RunAndVerify(self, dtype):
|
def _RunAndVerify(self, dtype):
|
||||||
with self.cached_session(use_gpu=True):
|
with self.cached_session():
|
||||||
# random shape
|
# random shape
|
||||||
shape = np.random.randint(1, 16, size=4)
|
shape = np.random.randint(1, 16, size=4)
|
||||||
# Make depth at least 2 to make it meaningful
|
# Make depth at least 2 to make it meaningful
|
||||||
@ -103,7 +103,7 @@ class LRNOpTest(test.TestCase):
|
|||||||
|
|
||||||
@test_util.run_deprecated_v1
|
@test_util.run_deprecated_v1
|
||||||
def testGradientsZeroInput(self):
|
def testGradientsZeroInput(self):
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
shape = [4, 4, 4, 4]
|
shape = [4, 4, 4, 4]
|
||||||
p = array_ops.placeholder(dtypes.float32, shape=shape)
|
p = array_ops.placeholder(dtypes.float32, shape=shape)
|
||||||
inp_array = np.zeros(shape).astype("f")
|
inp_array = np.zeros(shape).astype("f")
|
||||||
@ -116,7 +116,7 @@ class LRNOpTest(test.TestCase):
|
|||||||
self.assertShapeEqual(expected, grad)
|
self.assertShapeEqual(expected, grad)
|
||||||
|
|
||||||
def _RunAndVerifyGradients(self, dtype):
|
def _RunAndVerifyGradients(self, dtype):
|
||||||
with self.cached_session(use_gpu=True):
|
with self.cached_session():
|
||||||
# random shape
|
# random shape
|
||||||
shape = np.random.randint(1, 5, size=4)
|
shape = np.random.randint(1, 5, size=4)
|
||||||
# Make depth at least 2 to make it meaningful
|
# Make depth at least 2 to make it meaningful
|
||||||
|
@ -42,12 +42,12 @@ class RollTest(test_util.TensorFlowTestCase):
|
|||||||
|
|
||||||
def _testRoll(self, np_input, shift, axis):
|
def _testRoll(self, np_input, shift, axis):
|
||||||
expected_roll = np.roll(np_input, shift, axis)
|
expected_roll = np.roll(np_input, shift, axis)
|
||||||
with self.cached_session(use_gpu=True):
|
with self.cached_session():
|
||||||
roll = manip_ops.roll(np_input, shift, axis)
|
roll = manip_ops.roll(np_input, shift, axis)
|
||||||
self.assertAllEqual(roll, expected_roll)
|
self.assertAllEqual(roll, expected_roll)
|
||||||
|
|
||||||
def _testGradient(self, np_input, shift, axis):
|
def _testGradient(self, np_input, shift, axis):
|
||||||
with self.cached_session(use_gpu=True):
|
with self.cached_session():
|
||||||
inx = constant_op.constant(np_input.tolist())
|
inx = constant_op.constant(np_input.tolist())
|
||||||
xs = list(np_input.shape)
|
xs = list(np_input.shape)
|
||||||
y = manip_ops.roll(inx, shift, axis)
|
y = manip_ops.roll(inx, shift, axis)
|
||||||
@ -98,7 +98,7 @@ class RollTest(test_util.TensorFlowTestCase):
|
|||||||
self._testAll(np.random.randint(-100, 100, (5)).astype(np.int32), 3, -1)
|
self._testAll(np.random.randint(-100, 100, (5)).astype(np.int32), 3, -1)
|
||||||
self._testAll(np.random.randint(-100, 100, (4, 4)).astype(np.int32), 3, -2)
|
self._testAll(np.random.randint(-100, 100, (4, 4)).astype(np.int32), 3, -2)
|
||||||
# Make sure negative axis should be 0 <= axis + dims < dims
|
# Make sure negative axis should be 0 <= axis + dims < dims
|
||||||
with self.cached_session(use_gpu=True):
|
with self.cached_session():
|
||||||
with self.assertRaisesRegex(errors_impl.InvalidArgumentError,
|
with self.assertRaisesRegex(errors_impl.InvalidArgumentError,
|
||||||
"is out of range"):
|
"is out of range"):
|
||||||
manip_ops.roll(np.random.randint(-100, 100, (4, 4)).astype(np.int32),
|
manip_ops.roll(np.random.randint(-100, 100, (4, 4)).astype(np.int32),
|
||||||
@ -122,7 +122,7 @@ class RollTest(test_util.TensorFlowTestCase):
|
|||||||
tensor = array_ops.placeholder(dtype=dtypes.int32)
|
tensor = array_ops.placeholder(dtype=dtypes.int32)
|
||||||
shift = 1
|
shift = 1
|
||||||
axis = 0
|
axis = 0
|
||||||
with self.cached_session(use_gpu=True):
|
with self.cached_session():
|
||||||
with self.assertRaisesRegex(errors_impl.InvalidArgumentError,
|
with self.assertRaisesRegex(errors_impl.InvalidArgumentError,
|
||||||
"input must be 1-D or higher"):
|
"input must be 1-D or higher"):
|
||||||
manip_ops.roll(tensor, shift, axis).eval(feed_dict={tensor: 7})
|
manip_ops.roll(tensor, shift, axis).eval(feed_dict={tensor: 7})
|
||||||
@ -140,7 +140,7 @@ class RollTest(test_util.TensorFlowTestCase):
|
|||||||
tensor = [[1, 2], [3, 4]]
|
tensor = [[1, 2], [3, 4]]
|
||||||
shift = 1
|
shift = 1
|
||||||
axis = array_ops.placeholder(dtype=dtypes.int32)
|
axis = array_ops.placeholder(dtype=dtypes.int32)
|
||||||
with self.cached_session(use_gpu=True):
|
with self.cached_session():
|
||||||
with self.assertRaisesRegex(errors_impl.InvalidArgumentError,
|
with self.assertRaisesRegex(errors_impl.InvalidArgumentError,
|
||||||
"axis must be a scalar or a 1-D vector"):
|
"axis must be a scalar or a 1-D vector"):
|
||||||
manip_ops.roll(tensor, shift, axis).eval(feed_dict={axis: [[0, 1]]})
|
manip_ops.roll(tensor, shift, axis).eval(feed_dict={axis: [[0, 1]]})
|
||||||
@ -158,7 +158,7 @@ class RollTest(test_util.TensorFlowTestCase):
|
|||||||
tensor = [[1, 2], [3, 4]]
|
tensor = [[1, 2], [3, 4]]
|
||||||
shift = array_ops.placeholder(dtype=dtypes.int32)
|
shift = array_ops.placeholder(dtype=dtypes.int32)
|
||||||
axis = 1
|
axis = 1
|
||||||
with self.cached_session(use_gpu=True):
|
with self.cached_session():
|
||||||
with self.assertRaisesRegex(errors_impl.InvalidArgumentError,
|
with self.assertRaisesRegex(errors_impl.InvalidArgumentError,
|
||||||
"shift must be a scalar or a 1-D vector"):
|
"shift must be a scalar or a 1-D vector"):
|
||||||
manip_ops.roll(tensor, shift, axis).eval(feed_dict={shift: [[0, 1]]})
|
manip_ops.roll(tensor, shift, axis).eval(feed_dict={shift: [[0, 1]]})
|
||||||
@ -175,7 +175,7 @@ class RollTest(test_util.TensorFlowTestCase):
|
|||||||
tensor = [[1, 2], [3, 4]]
|
tensor = [[1, 2], [3, 4]]
|
||||||
shift = array_ops.placeholder(dtype=dtypes.int32)
|
shift = array_ops.placeholder(dtype=dtypes.int32)
|
||||||
axis = [0, 1]
|
axis = [0, 1]
|
||||||
with self.cached_session(use_gpu=True):
|
with self.cached_session():
|
||||||
with self.assertRaisesRegex(errors_impl.InvalidArgumentError,
|
with self.assertRaisesRegex(errors_impl.InvalidArgumentError,
|
||||||
"shift and axis must have the same size"):
|
"shift and axis must have the same size"):
|
||||||
manip_ops.roll(tensor, shift, axis).eval(feed_dict={shift: [1]})
|
manip_ops.roll(tensor, shift, axis).eval(feed_dict={shift: [1]})
|
||||||
@ -184,7 +184,7 @@ class RollTest(test_util.TensorFlowTestCase):
|
|||||||
tensor = [1, 2]
|
tensor = [1, 2]
|
||||||
shift = 1
|
shift = 1
|
||||||
axis = 1
|
axis = 1
|
||||||
with self.cached_session(use_gpu=True):
|
with self.cached_session():
|
||||||
with self.assertRaisesRegex(errors_impl.InvalidArgumentError,
|
with self.assertRaisesRegex(errors_impl.InvalidArgumentError,
|
||||||
"is out of range"):
|
"is out of range"):
|
||||||
manip_ops.roll(tensor, shift, axis).eval()
|
manip_ops.roll(tensor, shift, axis).eval()
|
||||||
|
@ -46,7 +46,7 @@ class MapStageTest(test.TestCase):
|
|||||||
|
|
||||||
G.finalize()
|
G.finalize()
|
||||||
|
|
||||||
with self.session(use_gpu=True, graph=G) as sess:
|
with self.session(graph=G) as sess:
|
||||||
sess.run(stage, feed_dict={x: -1, pi: 0})
|
sess.run(stage, feed_dict={x: -1, pi: 0})
|
||||||
for i in range(10):
|
for i in range(10):
|
||||||
_, yval = sess.run([stage, y], feed_dict={x: i, pi: i + 1, gi: i})
|
_, yval = sess.run([stage, y], feed_dict={x: i, pi: i + 1, gi: i})
|
||||||
@ -68,7 +68,7 @@ class MapStageTest(test.TestCase):
|
|||||||
|
|
||||||
G.finalize()
|
G.finalize()
|
||||||
|
|
||||||
with self.session(use_gpu=True, graph=G) as sess:
|
with self.session(graph=G) as sess:
|
||||||
sess.run(stage, feed_dict={x: -1, pi: 0})
|
sess.run(stage, feed_dict={x: -1, pi: 0})
|
||||||
for i in range(10):
|
for i in range(10):
|
||||||
_, yval = sess.run([stage, y], feed_dict={x: i, pi: i + 1, gi: i})
|
_, yval = sess.run([stage, y], feed_dict={x: i, pi: i + 1, gi: i})
|
||||||
@ -96,7 +96,7 @@ class MapStageTest(test.TestCase):
|
|||||||
|
|
||||||
G.finalize()
|
G.finalize()
|
||||||
|
|
||||||
with self.session(use_gpu=True, graph=G) as sess:
|
with self.session(graph=G) as sess:
|
||||||
sess.run(stage, feed_dict={x: -1, pi: 0})
|
sess.run(stage, feed_dict={x: -1, pi: 0})
|
||||||
for i in range(10):
|
for i in range(10):
|
||||||
_, yval = sess.run([stage, y], feed_dict={x: i, pi: i + 1, gi: i})
|
_, yval = sess.run([stage, y], feed_dict={x: i, pi: i + 1, gi: i})
|
||||||
@ -146,7 +146,7 @@ class MapStageTest(test.TestCase):
|
|||||||
|
|
||||||
n = 10
|
n = 10
|
||||||
|
|
||||||
with self.session(use_gpu=True, graph=G) as sess:
|
with self.session(graph=G) as sess:
|
||||||
for i in range(n):
|
for i in range(n):
|
||||||
sess.run(stage, feed_dict={x: i, pi: i})
|
sess.run(stage, feed_dict={x: i, pi: i})
|
||||||
|
|
||||||
@ -174,7 +174,7 @@ class MapStageTest(test.TestCase):
|
|||||||
|
|
||||||
G.finalize()
|
G.finalize()
|
||||||
|
|
||||||
with self.session(use_gpu=True, graph=G) as sess:
|
with self.session(graph=G) as sess:
|
||||||
sess.run(stage, feed_dict={x: -1, pi: 3})
|
sess.run(stage, feed_dict={x: -1, pi: 3})
|
||||||
self.assertEqual(sess.run(size), 1)
|
self.assertEqual(sess.run(size), 1)
|
||||||
sess.run(stage, feed_dict={x: -1, pi: 1})
|
sess.run(stage, feed_dict={x: -1, pi: 1})
|
||||||
@ -209,7 +209,7 @@ class MapStageTest(test.TestCase):
|
|||||||
queue = Queue.Queue()
|
queue = Queue.Queue()
|
||||||
n = 8
|
n = 8
|
||||||
|
|
||||||
with self.session(use_gpu=True, graph=G) as sess:
|
with self.session(graph=G) as sess:
|
||||||
# Stage data in a separate thread which will block
|
# Stage data in a separate thread which will block
|
||||||
# when it hits the staging area's capacity and thus
|
# when it hits the staging area's capacity and thus
|
||||||
# not fill the queue with n tokens
|
# not fill the queue with n tokens
|
||||||
@ -273,7 +273,7 @@ class MapStageTest(test.TestCase):
|
|||||||
queue = Queue.Queue()
|
queue = Queue.Queue()
|
||||||
n = 8
|
n = 8
|
||||||
|
|
||||||
with self.session(use_gpu=True, graph=G) as sess:
|
with self.session(graph=G) as sess:
|
||||||
# Stage data in a separate thread which will block
|
# Stage data in a separate thread which will block
|
||||||
# when it hits the staging area's capacity and thus
|
# when it hits the staging area's capacity and thus
|
||||||
# not fill the queue with n tokens
|
# not fill the queue with n tokens
|
||||||
@ -334,7 +334,7 @@ class MapStageTest(test.TestCase):
|
|||||||
|
|
||||||
n = 10
|
n = 10
|
||||||
|
|
||||||
with self.session(use_gpu=True, graph=G) as sess:
|
with self.session(graph=G) as sess:
|
||||||
# Keys n-1..0
|
# Keys n-1..0
|
||||||
keys = list(reversed(six.moves.range(n)))
|
keys = list(reversed(six.moves.range(n)))
|
||||||
|
|
||||||
@ -372,7 +372,7 @@ class MapStageTest(test.TestCase):
|
|||||||
|
|
||||||
G.finalize()
|
G.finalize()
|
||||||
|
|
||||||
with self.session(use_gpu=True, graph=G) as sess:
|
with self.session(graph=G) as sess:
|
||||||
# 0 complete and incomplete entries
|
# 0 complete and incomplete entries
|
||||||
self.assertTrue(sess.run([size, isize]) == [0, 0])
|
self.assertTrue(sess.run([size, isize]) == [0, 0])
|
||||||
# Stage key 0, x and f tuple entries
|
# Stage key 0, x and f tuple entries
|
||||||
@ -430,7 +430,7 @@ class MapStageTest(test.TestCase):
|
|||||||
|
|
||||||
G.finalize()
|
G.finalize()
|
||||||
|
|
||||||
with self.session(use_gpu=True, graph=G) as sess:
|
with self.session(graph=G) as sess:
|
||||||
# 0 complete and incomplete entries
|
# 0 complete and incomplete entries
|
||||||
self.assertTrue(sess.run([size, isize]) == [0, 0])
|
self.assertTrue(sess.run([size, isize]) == [0, 0])
|
||||||
# Stage key 0, x and f tuple entries
|
# Stage key 0, x and f tuple entries
|
||||||
@ -482,7 +482,7 @@ class MapStageTest(test.TestCase):
|
|||||||
|
|
||||||
G.finalize()
|
G.finalize()
|
||||||
|
|
||||||
with self.session(use_gpu=True, graph=G) as sess:
|
with self.session(graph=G) as sess:
|
||||||
# 0 complete and incomplete entries
|
# 0 complete and incomplete entries
|
||||||
self.assertTrue(sess.run([size, isize]) == [0, 0])
|
self.assertTrue(sess.run([size, isize]) == [0, 0])
|
||||||
# Stage key 0, x and f tuple entries
|
# Stage key 0, x and f tuple entries
|
||||||
@ -574,7 +574,7 @@ class MapStageTest(test.TestCase):
|
|||||||
|
|
||||||
G.finalize()
|
G.finalize()
|
||||||
|
|
||||||
with self.session(use_gpu=True, graph=G) as sess:
|
with self.session(graph=G) as sess:
|
||||||
# Stage complete tuple
|
# Stage complete tuple
|
||||||
sess.run(stage_xvf, feed_dict={pi: 0, x: 1, f: 2, v: 3})
|
sess.run(stage_xvf, feed_dict={pi: 0, x: 1, f: 2, v: 3})
|
||||||
|
|
||||||
|
@ -149,7 +149,7 @@ class ExponentialOpTest(test.TestCase):
|
|||||||
|
|
||||||
@test_util.run_deprecated_v1
|
@test_util.run_deprecated_v1
|
||||||
def testDynamic(self):
|
def testDynamic(self):
|
||||||
with self.session(use_gpu=True) as sess:
|
with self.session() as sess:
|
||||||
inp = array_ops.placeholder(ops.dtypes.float32)
|
inp = array_ops.placeholder(ops.dtypes.float32)
|
||||||
expm = linalg_impl.matrix_exponential(inp)
|
expm = linalg_impl.matrix_exponential(inp)
|
||||||
matrix = np.array([[1., 2.], [3., 4.]])
|
matrix = np.array([[1., 2.], [3., 4.]])
|
||||||
@ -157,7 +157,7 @@ class ExponentialOpTest(test.TestCase):
|
|||||||
|
|
||||||
@test_util.run_deprecated_v1
|
@test_util.run_deprecated_v1
|
||||||
def testConcurrentExecutesWithoutError(self):
|
def testConcurrentExecutesWithoutError(self):
|
||||||
with self.session(use_gpu=True) as sess:
|
with self.session() as sess:
|
||||||
matrix1 = random_ops.random_normal([5, 5], seed=42)
|
matrix1 = random_ops.random_normal([5, 5], seed=42)
|
||||||
matrix2 = random_ops.random_normal([5, 5], seed=42)
|
matrix2 = random_ops.random_normal([5, 5], seed=42)
|
||||||
expm1 = linalg_impl.matrix_exponential(matrix1)
|
expm1 = linalg_impl.matrix_exponential(matrix1)
|
||||||
|
@ -37,7 +37,7 @@ class InverseOpTest(test.TestCase):
|
|||||||
def _verifyInverse(self, x, np_type):
|
def _verifyInverse(self, x, np_type):
|
||||||
for adjoint in False, True:
|
for adjoint in False, True:
|
||||||
y = x.astype(np_type)
|
y = x.astype(np_type)
|
||||||
with self.cached_session(use_gpu=True):
|
with self.cached_session():
|
||||||
# Verify that x^{-1} * x == Identity matrix.
|
# Verify that x^{-1} * x == Identity matrix.
|
||||||
inv = linalg_ops.matrix_inverse(y, adjoint=adjoint)
|
inv = linalg_ops.matrix_inverse(y, adjoint=adjoint)
|
||||||
tf_ans = test_util.matmul_without_tf32(inv, y, adjoint_b=adjoint)
|
tf_ans = test_util.matmul_without_tf32(inv, y, adjoint_b=adjoint)
|
||||||
@ -139,7 +139,7 @@ class InverseOpTest(test.TestCase):
|
|||||||
|
|
||||||
@test_util.deprecated_graph_mode_only
|
@test_util.deprecated_graph_mode_only
|
||||||
def testConcurrentExecutesWithoutError(self):
|
def testConcurrentExecutesWithoutError(self):
|
||||||
with self.session(use_gpu=True) as sess:
|
with self.session() as sess:
|
||||||
all_ops = []
|
all_ops = []
|
||||||
for adjoint_ in True, False:
|
for adjoint_ in True, False:
|
||||||
matrix1 = random_ops.random_normal([5, 5], seed=42)
|
matrix1 = random_ops.random_normal([5, 5], seed=42)
|
||||||
|
@ -124,7 +124,7 @@ class MatrixSolveLsOpTest(test_lib.TestCase):
|
|||||||
feed_dict = None
|
feed_dict = None
|
||||||
self.assertEqual(np_ans.shape, tf_ans.get_shape())
|
self.assertEqual(np_ans.shape, tf_ans.get_shape())
|
||||||
if feed_dict:
|
if feed_dict:
|
||||||
with self.session(use_gpu=True) as sess:
|
with self.session() as sess:
|
||||||
tf_ans_val = sess.run(tf_ans, feed_dict=feed_dict)
|
tf_ans_val = sess.run(tf_ans, feed_dict=feed_dict)
|
||||||
else:
|
else:
|
||||||
tf_ans_val = self.evaluate(tf_ans)
|
tf_ans_val = self.evaluate(tf_ans)
|
||||||
@ -137,7 +137,7 @@ class MatrixSolveLsOpTest(test_lib.TestCase):
|
|||||||
tf_r = math_ops.matmul(a, tf_r, adjoint_a=True)
|
tf_r = math_ops.matmul(a, tf_r, adjoint_a=True)
|
||||||
tf_r_norm = linalg_ops.norm(tf_r, ord="fro", axis=[-2, -1])
|
tf_r_norm = linalg_ops.norm(tf_r, ord="fro", axis=[-2, -1])
|
||||||
if feed_dict:
|
if feed_dict:
|
||||||
with self.session(use_gpu=True) as sess:
|
with self.session() as sess:
|
||||||
tf_ans_val, tf_r_norm_val = sess.run([tf_ans, tf_r_norm],
|
tf_ans_val, tf_r_norm_val = sess.run([tf_ans, tf_r_norm],
|
||||||
feed_dict=feed_dict)
|
feed_dict=feed_dict)
|
||||||
else:
|
else:
|
||||||
@ -147,7 +147,7 @@ class MatrixSolveLsOpTest(test_lib.TestCase):
|
|||||||
@test_util.run_in_graph_and_eager_modes(use_gpu=True)
|
@test_util.run_in_graph_and_eager_modes(use_gpu=True)
|
||||||
def testWrongDimensions(self):
|
def testWrongDimensions(self):
|
||||||
# The matrix and right-hand sides should have the same number of rows.
|
# The matrix and right-hand sides should have the same number of rows.
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
matrix = constant_op.constant([[1., 0.], [0., 1.]])
|
matrix = constant_op.constant([[1., 0.], [0., 1.]])
|
||||||
rhs = constant_op.constant([[1., 0.]])
|
rhs = constant_op.constant([[1., 0.]])
|
||||||
with self.assertRaises((ValueError, errors_impl.InvalidArgumentError)):
|
with self.assertRaises((ValueError, errors_impl.InvalidArgumentError)):
|
||||||
|
@ -63,7 +63,7 @@ class MatrixSolveOpTest(test.TestCase):
|
|||||||
a_ph = array_ops.placeholder(dtypes.as_dtype(np_type))
|
a_ph = array_ops.placeholder(dtypes.as_dtype(np_type))
|
||||||
b_ph = array_ops.placeholder(dtypes.as_dtype(np_type))
|
b_ph = array_ops.placeholder(dtypes.as_dtype(np_type))
|
||||||
tf_ans = linalg_ops.matrix_solve(a_ph, b_ph, adjoint=adjoint)
|
tf_ans = linalg_ops.matrix_solve(a_ph, b_ph, adjoint=adjoint)
|
||||||
with self.cached_session(use_gpu=True) as sess:
|
with self.cached_session() as sess:
|
||||||
out = sess.run(tf_ans, {a_ph: a, b_ph: b})
|
out = sess.run(tf_ans, {a_ph: a, b_ph: b})
|
||||||
else:
|
else:
|
||||||
tf_ans = linalg_ops.matrix_solve(a, b, adjoint=adjoint)
|
tf_ans = linalg_ops.matrix_solve(a, b, adjoint=adjoint)
|
||||||
|
@ -195,7 +195,7 @@ class MatrixTriangularSolveOpTest(test.TestCase):
|
|||||||
def testNonSquareMatrix(self):
|
def testNonSquareMatrix(self):
|
||||||
# A non-square matrix should cause an error.
|
# A non-square matrix should cause an error.
|
||||||
matrix = np.array([[1., 2., 3.], [3., 4., 5.]])
|
matrix = np.array([[1., 2., 3.], [3., 4., 5.]])
|
||||||
with self.cached_session(use_gpu=True):
|
with self.cached_session():
|
||||||
with self.assertRaises(ValueError):
|
with self.assertRaises(ValueError):
|
||||||
self._verifySolve(matrix, matrix)
|
self._verifySolve(matrix, matrix)
|
||||||
with self.assertRaises(ValueError):
|
with self.assertRaises(ValueError):
|
||||||
@ -207,7 +207,7 @@ class MatrixTriangularSolveOpTest(test.TestCase):
|
|||||||
# right-hand sides.
|
# right-hand sides.
|
||||||
matrix = np.array([[1., 0.], [0., 1.]])
|
matrix = np.array([[1., 0.], [0., 1.]])
|
||||||
rhs = np.array([[1., 0.]])
|
rhs = np.array([[1., 0.]])
|
||||||
with self.cached_session(use_gpu=True):
|
with self.cached_session():
|
||||||
with self.assertRaises(ValueError):
|
with self.assertRaises(ValueError):
|
||||||
self._verifySolve(matrix, rhs)
|
self._verifySolve(matrix, rhs)
|
||||||
with self.assertRaises(ValueError):
|
with self.assertRaises(ValueError):
|
||||||
|
@ -68,7 +68,7 @@ def _GetNormOpTest(dtype_, shape_, ord_, axis_, keep_dims_, use_static_shape_):
|
|||||||
|
|
||||||
def _CompareNorm(self, matrix):
|
def _CompareNorm(self, matrix):
|
||||||
np_norm = np.linalg.norm(matrix, ord=ord_, axis=axis_, keepdims=keep_dims_)
|
np_norm = np.linalg.norm(matrix, ord=ord_, axis=axis_, keepdims=keep_dims_)
|
||||||
with self.cached_session(use_gpu=True) as sess:
|
with self.cached_session() as sess:
|
||||||
if use_static_shape_:
|
if use_static_shape_:
|
||||||
tf_matrix = constant_op.constant(matrix)
|
tf_matrix = constant_op.constant(matrix)
|
||||||
tf_norm = linalg_ops.norm(
|
tf_norm = linalg_ops.norm(
|
||||||
|
@ -372,7 +372,7 @@ class PadOpTest(test.TestCase):
|
|||||||
for dtype in [dtypes.int32, dtypes.int64]:
|
for dtype in [dtypes.int32, dtypes.int64]:
|
||||||
paddings = np.zeros((0, 2))
|
paddings = np.zeros((0, 2))
|
||||||
inp = np.asarray(7)
|
inp = np.asarray(7)
|
||||||
with self.cached_session(use_gpu=True):
|
with self.cached_session():
|
||||||
tf_val = array_ops.pad(inp, constant_op.constant(paddings, dtype=dtype))
|
tf_val = array_ops.pad(inp, constant_op.constant(paddings, dtype=dtype))
|
||||||
out = self.evaluate(tf_val)
|
out = self.evaluate(tf_val)
|
||||||
self.assertAllEqual(inp, out)
|
self.assertAllEqual(inp, out)
|
||||||
@ -397,7 +397,7 @@ class PadOpTest(test.TestCase):
|
|||||||
padded,
|
padded,
|
||||||
[paddings_value[i][0] + inp.shape.dims[i].value for i in range(4)],
|
[paddings_value[i][0] + inp.shape.dims[i].value for i in range(4)],
|
||||||
[-1, -1, -1, -1])
|
[-1, -1, -1, -1])
|
||||||
with self.cached_session(use_gpu=True):
|
with self.cached_session():
|
||||||
self.assertAllEqual(inp, self.evaluate(middle))
|
self.assertAllEqual(inp, self.evaluate(middle))
|
||||||
self.assertAllEqual(
|
self.assertAllEqual(
|
||||||
np.zeros([row[0] for row in paddings_value]), self.evaluate(left))
|
np.zeros([row[0] for row in paddings_value]), self.evaluate(left))
|
||||||
|
@ -248,7 +248,7 @@ class PoolingTest(test.TestCase):
|
|||||||
def testPoolNC(self):
|
def testPoolNC(self):
|
||||||
if test.is_gpu_available(cuda_only=True):
|
if test.is_gpu_available(cuda_only=True):
|
||||||
# "NC*" format is currently only supported on CUDA.
|
# "NC*" format is currently only supported on CUDA.
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
for padding in ["SAME", "VALID"]:
|
for padding in ["SAME", "VALID"]:
|
||||||
self._test(
|
self._test(
|
||||||
input_shape=[2, 2, 9],
|
input_shape=[2, 2, 9],
|
||||||
|
@ -906,7 +906,7 @@ class PoolingTest(test.TestCase):
|
|||||||
self._testDepthwiseMaxPoolInvalidConfig([1, 2, 2, 4], [1, 1, 1, 3],
|
self._testDepthwiseMaxPoolInvalidConfig([1, 2, 2, 4], [1, 1, 1, 3],
|
||||||
[1, 1, 1, 3], "evenly divide")
|
[1, 1, 1, 3], "evenly divide")
|
||||||
if test.is_gpu_available():
|
if test.is_gpu_available():
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
t = variables.Variable(np.ones([1, 2, 2, 4]))
|
t = variables.Variable(np.ones([1, 2, 2, 4]))
|
||||||
self.evaluate(variables.global_variables_initializer())
|
self.evaluate(variables.global_variables_initializer())
|
||||||
with self.assertRaisesOpError("for CPU devices"):
|
with self.assertRaisesOpError("for CPU devices"):
|
||||||
@ -922,7 +922,7 @@ class PoolingTest(test.TestCase):
|
|||||||
for dtype in [np.float32, np.float16] \
|
for dtype in [np.float32, np.float16] \
|
||||||
+ [np.float64] if not test.is_built_with_rocm() else []:
|
+ [np.float64] if not test.is_built_with_rocm() else []:
|
||||||
tensor_input = np.random.rand(*input_shape).astype(dtype)
|
tensor_input = np.random.rand(*input_shape).astype(dtype)
|
||||||
with self.cached_session(use_gpu=True):
|
with self.cached_session():
|
||||||
t = constant_op.constant(tensor_input, shape=input_shape)
|
t = constant_op.constant(tensor_input, shape=input_shape)
|
||||||
out_op, _ = nn_ops.max_pool_with_argmax(t, ksize, strides, padding)
|
out_op, _ = nn_ops.max_pool_with_argmax(t, ksize, strides, padding)
|
||||||
gpu_val = self.evaluate(out_op)
|
gpu_val = self.evaluate(out_op)
|
||||||
@ -942,7 +942,7 @@ class PoolingTest(test.TestCase):
|
|||||||
# in the input.
|
# in the input.
|
||||||
tensor_input = np.random.random_integers(0, 3, input_shape).astype(dtype)
|
tensor_input = np.random.random_integers(0, 3, input_shape).astype(dtype)
|
||||||
tensor_output = np.random.rand(*output_shape).astype(dtype)
|
tensor_output = np.random.rand(*output_shape).astype(dtype)
|
||||||
with self.cached_session(use_gpu=True):
|
with self.cached_session():
|
||||||
t = constant_op.constant(tensor_input, shape=input_shape)
|
t = constant_op.constant(tensor_input, shape=input_shape)
|
||||||
_, argmax_op = nn_ops.max_pool_with_argmax(t, ksize, strides, padding)
|
_, argmax_op = nn_ops.max_pool_with_argmax(t, ksize, strides, padding)
|
||||||
argmax = self.evaluate(argmax_op)
|
argmax = self.evaluate(argmax_op)
|
||||||
|
@ -755,7 +755,7 @@ class EagerPyFuncTest(PyFuncTestBase):
|
|||||||
y = script_ops.eager_py_func(func=f, inp=[x], Tout=dtypes.float32)
|
y = script_ops.eager_py_func(func=f, inp=[x], Tout=dtypes.float32)
|
||||||
z = script_ops.eager_py_func(func=g, inp=[y], Tout=dtypes.float32)
|
z = script_ops.eager_py_func(func=g, inp=[y], Tout=dtypes.float32)
|
||||||
|
|
||||||
with self.session(use_gpu=True) as sess:
|
with self.session() as sess:
|
||||||
output = sess.run(z, feed_dict={x: 3.0})
|
output = sess.run(z, feed_dict={x: 3.0})
|
||||||
self.assertEqual(output, 18.0)
|
self.assertEqual(output, 18.0)
|
||||||
|
|
||||||
|
@ -145,7 +145,7 @@ def _GetQrOpTest(dtype_, shape_, full_matrices_, use_static_shape_):
|
|||||||
if use_static_shape_:
|
if use_static_shape_:
|
||||||
q_tf_val, r_tf_val = self.evaluate([q_tf, r_tf])
|
q_tf_val, r_tf_val = self.evaluate([q_tf, r_tf])
|
||||||
else:
|
else:
|
||||||
with self.session(use_gpu=True) as sess:
|
with self.session() as sess:
|
||||||
q_tf_val, r_tf_val = sess.run([q_tf, r_tf], feed_dict={x_tf: x_np})
|
q_tf_val, r_tf_val = sess.run([q_tf, r_tf], feed_dict={x_tf: x_np})
|
||||||
|
|
||||||
q_dims = q_tf_val.shape
|
q_dims = q_tf_val.shape
|
||||||
|
@ -34,7 +34,7 @@ class MultinomialTest(test.TestCase):
|
|||||||
def testLargeDynamicRange(self):
|
def testLargeDynamicRange(self):
|
||||||
random_seed.set_random_seed(10)
|
random_seed.set_random_seed(10)
|
||||||
counts_by_indices = {}
|
counts_by_indices = {}
|
||||||
with self.test_session(use_gpu=True) as sess:
|
with self.test_session() as sess:
|
||||||
samples = random_ops.multinomial(
|
samples = random_ops.multinomial(
|
||||||
constant_op.constant([[-30, 0]], dtype=dtypes.float32),
|
constant_op.constant([[-30, 0]], dtype=dtypes.float32),
|
||||||
num_samples=1000000,
|
num_samples=1000000,
|
||||||
@ -52,7 +52,7 @@ class MultinomialTest(test.TestCase):
|
|||||||
def testLargeDynamicRange2(self):
|
def testLargeDynamicRange2(self):
|
||||||
random_seed.set_random_seed(10)
|
random_seed.set_random_seed(10)
|
||||||
counts_by_indices = {}
|
counts_by_indices = {}
|
||||||
with self.test_session(use_gpu=True) as sess:
|
with self.test_session() as sess:
|
||||||
samples = random_ops.multinomial(
|
samples = random_ops.multinomial(
|
||||||
constant_op.constant([[0, -30]], dtype=dtypes.float32),
|
constant_op.constant([[0, -30]], dtype=dtypes.float32),
|
||||||
num_samples=1000000,
|
num_samples=1000000,
|
||||||
@ -72,7 +72,7 @@ class MultinomialTest(test.TestCase):
|
|||||||
random_seed.set_random_seed(10)
|
random_seed.set_random_seed(10)
|
||||||
counts_by_indices = {}
|
counts_by_indices = {}
|
||||||
# here the cpu undersamples and won't pass this test either
|
# here the cpu undersamples and won't pass this test either
|
||||||
with self.test_session(use_gpu=True) as sess:
|
with self.test_session() as sess:
|
||||||
samples = random_ops.multinomial(
|
samples = random_ops.multinomial(
|
||||||
constant_op.constant([[0, -17]], dtype=dtypes.float32),
|
constant_op.constant([[0, -17]], dtype=dtypes.float32),
|
||||||
num_samples=1000000,
|
num_samples=1000000,
|
||||||
|
@ -129,7 +129,7 @@ class ParameterizedTruncatedNormalTest(test.TestCase):
|
|||||||
# TruncatedNormalMoments requires scipy.stats.
|
# TruncatedNormalMoments requires scipy.stats.
|
||||||
# Give up early if we are unable to import it.
|
# Give up early if we are unable to import it.
|
||||||
random_seed.set_random_seed(seed)
|
random_seed.set_random_seed(seed)
|
||||||
with self.cached_session(use_gpu=True):
|
with self.cached_session():
|
||||||
if use_stateless:
|
if use_stateless:
|
||||||
# Generate a seed that stateless ops can use.
|
# Generate a seed that stateless ops can use.
|
||||||
new_seed = random_ops.random_uniform([2],
|
new_seed = random_ops.random_uniform([2],
|
||||||
@ -163,7 +163,7 @@ class ParameterizedTruncatedNormalTest(test.TestCase):
|
|||||||
try:
|
try:
|
||||||
import scipy.stats # pylint: disable=g-import-not-at-top
|
import scipy.stats # pylint: disable=g-import-not-at-top
|
||||||
random_seed.set_random_seed(seed)
|
random_seed.set_random_seed(seed)
|
||||||
with self.cached_session(use_gpu=True):
|
with self.cached_session():
|
||||||
if use_stateless:
|
if use_stateless:
|
||||||
new_seed = random_ops.random_uniform([2],
|
new_seed = random_ops.random_uniform([2],
|
||||||
seed=seed,
|
seed=seed,
|
||||||
@ -298,7 +298,7 @@ class ParameterizedTruncatedNormalTest(test.TestCase):
|
|||||||
minvals=-1.,
|
minvals=-1.,
|
||||||
maxvals=1.)
|
maxvals=1.)
|
||||||
|
|
||||||
with self.session(use_gpu=True) as sess:
|
with self.session() as sess:
|
||||||
samples, samples_stateless = sess.run([sample_op, sample_op_stateless])
|
samples, samples_stateless = sess.run([sample_op, sample_op_stateless])
|
||||||
# 0. is more than 16 standard deviations from the mean, and
|
# 0. is more than 16 standard deviations from the mean, and
|
||||||
# should have a likelihood < 1e-57.
|
# should have a likelihood < 1e-57.
|
||||||
@ -313,7 +313,7 @@ class ParameterizedTruncatedNormalTest(test.TestCase):
|
|||||||
minval = variables.Variable(-1.)
|
minval = variables.Variable(-1.)
|
||||||
maxval = variables.Variable(1.)
|
maxval = variables.Variable(1.)
|
||||||
|
|
||||||
with self.cached_session(use_gpu=True) as sess:
|
with self.cached_session() as sess:
|
||||||
with backprop.GradientTape(persistent=True) as tape:
|
with backprop.GradientTape(persistent=True) as tape:
|
||||||
samples = stateless.stateless_parameterized_truncated_normal(
|
samples = stateless.stateless_parameterized_truncated_normal(
|
||||||
[1], [1, 2], mean, stddev, minval, maxval)
|
[1], [1, 2], mean, stddev, minval, maxval)
|
||||||
|
@ -230,7 +230,7 @@ class TruncatedNormalTest(test.TestCase):
|
|||||||
|
|
||||||
@test_util.run_deprecated_v1
|
@test_util.run_deprecated_v1
|
||||||
def testLargeShape(self):
|
def testLargeShape(self):
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
v = variables.Variable(
|
v = variables.Variable(
|
||||||
array_ops.zeros(dtype=dtypes.float32, shape=[2**33, 1]))
|
array_ops.zeros(dtype=dtypes.float32, shape=[2**33, 1]))
|
||||||
n = random_ops.truncated_normal(v.shape)
|
n = random_ops.truncated_normal(v.shape)
|
||||||
@ -238,7 +238,7 @@ class TruncatedNormalTest(test.TestCase):
|
|||||||
|
|
||||||
@test_util.run_deprecated_v1
|
@test_util.run_deprecated_v1
|
||||||
def testNoCSE(self):
|
def testNoCSE(self):
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
shape = [2, 3, 4]
|
shape = [2, 3, 4]
|
||||||
rnd1 = random_ops.truncated_normal(shape, 0.0, 1.0, dtypes.float32)
|
rnd1 = random_ops.truncated_normal(shape, 0.0, 1.0, dtypes.float32)
|
||||||
rnd2 = random_ops.truncated_normal(shape, 0.0, 1.0, dtypes.float32)
|
rnd2 = random_ops.truncated_normal(shape, 0.0, 1.0, dtypes.float32)
|
||||||
@ -371,7 +371,7 @@ class RandomUniformTest(RandomOpTestCommon):
|
|||||||
def testNoCSE(self):
|
def testNoCSE(self):
|
||||||
shape = [2, 3, 4]
|
shape = [2, 3, 4]
|
||||||
for dtype in dtypes.float16, dtypes.float32, dtypes.int32:
|
for dtype in dtypes.float16, dtypes.float32, dtypes.int32:
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
rnd1 = random_ops.random_uniform(shape, 0, 17, dtype=dtype)
|
rnd1 = random_ops.random_uniform(shape, 0, 17, dtype=dtype)
|
||||||
rnd2 = random_ops.random_uniform(shape, 0, 17, dtype=dtype)
|
rnd2 = random_ops.random_uniform(shape, 0, 17, dtype=dtype)
|
||||||
diff = (rnd2 - rnd1).eval()
|
diff = (rnd2 - rnd1).eval()
|
||||||
|
@ -104,7 +104,7 @@ class RandomPoissonTest(test.TestCase):
|
|||||||
merged.
|
merged.
|
||||||
"""
|
"""
|
||||||
for dtype in dtypes.float16, dtypes.float32, dtypes.float64:
|
for dtype in dtypes.float16, dtypes.float32, dtypes.float64:
|
||||||
with self.cached_session(use_gpu=True):
|
with self.cached_session():
|
||||||
rnd1 = random_ops.random_poisson(2.0, [24], dtype=dtype)
|
rnd1 = random_ops.random_poisson(2.0, [24], dtype=dtype)
|
||||||
rnd2 = random_ops.random_poisson(2.0, [24], dtype=dtype)
|
rnd2 = random_ops.random_poisson(2.0, [24], dtype=dtype)
|
||||||
diff = rnd2 - rnd1
|
diff = rnd2 - rnd1
|
||||||
|
@ -240,7 +240,7 @@ class StatelessOpsTest(test.TestCase, parameterized.TestCase):
|
|||||||
def _test_determinism(self, case, seed_type):
|
def _test_determinism(self, case, seed_type):
|
||||||
# Stateless values should be equal iff the seeds are equal (roughly)
|
# Stateless values should be equal iff the seeds are equal (roughly)
|
||||||
seeds = [(x, y) for x in range(5) for y in range(5)] * 3 # pylint: disable=g-complex-comprehension
|
seeds = [(x, y) for x in range(5) for y in range(5)] * 3 # pylint: disable=g-complex-comprehension
|
||||||
with self.test_session(use_gpu=True), ops.device(get_device().name):
|
with self.test_session(), ops.device(get_device().name):
|
||||||
_, stateless_op, _ = case
|
_, stateless_op, _ = case
|
||||||
if context.executing_eagerly():
|
if context.executing_eagerly():
|
||||||
values = [
|
values = [
|
||||||
|
@ -156,7 +156,7 @@ class BaseReductionTest(test.TestCase):
|
|||||||
|
|
||||||
def _compare(self, x, reduction_axes, keepdims, feed_dict=None):
|
def _compare(self, x, reduction_axes, keepdims, feed_dict=None):
|
||||||
np_ans = self._np_reduce(x, reduction_axes, keepdims)
|
np_ans = self._np_reduce(x, reduction_axes, keepdims)
|
||||||
with self.cached_session(use_gpu=True) as sess:
|
with self.cached_session() as sess:
|
||||||
tf_ans = self._tf_reduce(x, reduction_axes, keepdims)
|
tf_ans = self._tf_reduce(x, reduction_axes, keepdims)
|
||||||
out = sess.run(tf_ans, feed_dict)
|
out = sess.run(tf_ans, feed_dict)
|
||||||
self.assertAllClose(np_ans, out)
|
self.assertAllClose(np_ans, out)
|
||||||
@ -178,7 +178,7 @@ class BaseReductionTest(test.TestCase):
|
|||||||
if reduction_axes is not None and np.shape(reduction_axes) == (1,):
|
if reduction_axes is not None and np.shape(reduction_axes) == (1,):
|
||||||
# Test scalar reduction_axes argument
|
# Test scalar reduction_axes argument
|
||||||
self._compareGradient(x, reduction_axes[0], rtol=rtol, atol=atol)
|
self._compareGradient(x, reduction_axes[0], rtol=rtol, atol=atol)
|
||||||
with self.cached_session(use_gpu=True):
|
with self.cached_session():
|
||||||
t = ops.convert_to_tensor(x)
|
t = ops.convert_to_tensor(x)
|
||||||
su = self._tf_reduce(t, reduction_axes, False)
|
su = self._tf_reduce(t, reduction_axes, False)
|
||||||
jacob_t, jacob_n = gradient_checker.compute_gradient(
|
jacob_t, jacob_n = gradient_checker.compute_gradient(
|
||||||
@ -208,7 +208,7 @@ class SumReductionTest(BaseReductionTest):
|
|||||||
|
|
||||||
def testAxesType(self):
|
def testAxesType(self):
|
||||||
for dtype in [dtypes.int64, dtypes.int32]:
|
for dtype in [dtypes.int64, dtypes.int32]:
|
||||||
with self.cached_session(use_gpu=True) as sess:
|
with self.cached_session() as sess:
|
||||||
v = math_ops.reduce_sum([0, 0], constant_op.constant(0, dtype=dtype))
|
v = math_ops.reduce_sum([0, 0], constant_op.constant(0, dtype=dtype))
|
||||||
tf_v = self.evaluate(v)
|
tf_v = self.evaluate(v)
|
||||||
self.assertAllEqual(tf_v, 0)
|
self.assertAllEqual(tf_v, 0)
|
||||||
@ -403,7 +403,7 @@ class SumReductionTest(BaseReductionTest):
|
|||||||
|
|
||||||
@test_util.run_deprecated_v1
|
@test_util.run_deprecated_v1
|
||||||
def testEmptyGradients(self):
|
def testEmptyGradients(self):
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
x = array_ops.zeros([0, 3])
|
x = array_ops.zeros([0, 3])
|
||||||
y = math_ops.reduce_sum(x, [1])
|
y = math_ops.reduce_sum(x, [1])
|
||||||
error = gradient_checker.compute_gradient_error(x, [0, 3], y, [0])
|
error = gradient_checker.compute_gradient_error(x, [0, 3], y, [0])
|
||||||
@ -411,7 +411,7 @@ class SumReductionTest(BaseReductionTest):
|
|||||||
|
|
||||||
@test_util.run_deprecated_v1
|
@test_util.run_deprecated_v1
|
||||||
def testDegenerate(self):
|
def testDegenerate(self):
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
for dtype in (dtypes.float16, dtypes.float32, dtypes.float64,
|
for dtype in (dtypes.float16, dtypes.float32, dtypes.float64,
|
||||||
dtypes.complex64, dtypes.complex128):
|
dtypes.complex64, dtypes.complex128):
|
||||||
# A large number is needed to get Eigen to die
|
# A large number is needed to get Eigen to die
|
||||||
@ -446,7 +446,7 @@ class MeanReductionTest(BaseReductionTest):
|
|||||||
|
|
||||||
def testAxesType(self):
|
def testAxesType(self):
|
||||||
for dtype in [dtypes.int64, dtypes.int32]:
|
for dtype in [dtypes.int64, dtypes.int32]:
|
||||||
with self.cached_session(use_gpu=True) as sess:
|
with self.cached_session() as sess:
|
||||||
v = math_ops.reduce_mean([0, 0], constant_op.constant(0, dtype=dtype))
|
v = math_ops.reduce_mean([0, 0], constant_op.constant(0, dtype=dtype))
|
||||||
tf_v = self.evaluate(v)
|
tf_v = self.evaluate(v)
|
||||||
self.assertAllEqual(tf_v, 0)
|
self.assertAllEqual(tf_v, 0)
|
||||||
@ -525,7 +525,7 @@ class MeanReductionTest(BaseReductionTest):
|
|||||||
|
|
||||||
@test_util.run_deprecated_v1
|
@test_util.run_deprecated_v1
|
||||||
def testEmptyGradients(self):
|
def testEmptyGradients(self):
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
x = array_ops.zeros([0, 3])
|
x = array_ops.zeros([0, 3])
|
||||||
y = math_ops.reduce_mean(x, [1])
|
y = math_ops.reduce_mean(x, [1])
|
||||||
error = gradient_checker.compute_gradient_error(x, [0, 3], y, [0])
|
error = gradient_checker.compute_gradient_error(x, [0, 3], y, [0])
|
||||||
@ -533,7 +533,7 @@ class MeanReductionTest(BaseReductionTest):
|
|||||||
|
|
||||||
@test_util.run_deprecated_v1
|
@test_util.run_deprecated_v1
|
||||||
def testDegenerate(self):
|
def testDegenerate(self):
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
for dtype in (dtypes.float16, dtypes.float32, dtypes.float64):
|
for dtype in (dtypes.float16, dtypes.float32, dtypes.float64):
|
||||||
# A large number is needed to get Eigen to die
|
# A large number is needed to get Eigen to die
|
||||||
x = array_ops.zeros((0, 9938), dtype=dtype)
|
x = array_ops.zeros((0, 9938), dtype=dtype)
|
||||||
@ -560,7 +560,7 @@ class EuclideanNormReductionTest(BaseReductionTest):
|
|||||||
@test_util.run_deprecated_v1
|
@test_util.run_deprecated_v1
|
||||||
def testAxesType(self):
|
def testAxesType(self):
|
||||||
for dtype in [dtypes.int64, dtypes.int32]:
|
for dtype in [dtypes.int64, dtypes.int32]:
|
||||||
with self.cached_session(use_gpu=True):
|
with self.cached_session():
|
||||||
v = math_ops.reduce_mean([0, 0], constant_op.constant(0, dtype=dtype))
|
v = math_ops.reduce_mean([0, 0], constant_op.constant(0, dtype=dtype))
|
||||||
tf_v = self.evaluate(v)
|
tf_v = self.evaluate(v)
|
||||||
self.assertAllEqual(tf_v, 0)
|
self.assertAllEqual(tf_v, 0)
|
||||||
@ -609,7 +609,7 @@ class EuclideanNormReductionTest(BaseReductionTest):
|
|||||||
np_arr = self._makeIncremental((2,) * rank, dtypes.complex128)
|
np_arr = self._makeIncremental((2,) * rank, dtypes.complex128)
|
||||||
self._compareAllAxes(np_arr)
|
self._compareAllAxes(np_arr)
|
||||||
|
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
for dtype in (dtypes.float16, dtypes.float32, dtypes.float64):
|
for dtype in (dtypes.float16, dtypes.float32, dtypes.float64):
|
||||||
# A large number is needed to get Eigen to die
|
# A large number is needed to get Eigen to die
|
||||||
x = array_ops.zeros((0, 9938), dtype=dtype)
|
x = array_ops.zeros((0, 9938), dtype=dtype)
|
||||||
@ -640,7 +640,7 @@ class ProdReductionTest(BaseReductionTest):
|
|||||||
|
|
||||||
def testAxesType(self):
|
def testAxesType(self):
|
||||||
for dtype in [dtypes.int64, dtypes.int32]:
|
for dtype in [dtypes.int64, dtypes.int32]:
|
||||||
with self.cached_session(use_gpu=True) as sess:
|
with self.cached_session() as sess:
|
||||||
v = math_ops.reduce_prod([0, 0], constant_op.constant(0, dtype=dtype))
|
v = math_ops.reduce_prod([0, 0], constant_op.constant(0, dtype=dtype))
|
||||||
tf_v = self.evaluate(v)
|
tf_v = self.evaluate(v)
|
||||||
self.assertAllEqual(tf_v, 0)
|
self.assertAllEqual(tf_v, 0)
|
||||||
@ -711,7 +711,7 @@ class ProdReductionTest(BaseReductionTest):
|
|||||||
|
|
||||||
@test_util.run_deprecated_v1
|
@test_util.run_deprecated_v1
|
||||||
def testEmptyGradients(self):
|
def testEmptyGradients(self):
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
x = array_ops.zeros([0, 3])
|
x = array_ops.zeros([0, 3])
|
||||||
y = math_ops.reduce_prod(x, [1])
|
y = math_ops.reduce_prod(x, [1])
|
||||||
error = gradient_checker.compute_gradient_error(x, [0, 3], y, [0])
|
error = gradient_checker.compute_gradient_error(x, [0, 3], y, [0])
|
||||||
@ -719,7 +719,7 @@ class ProdReductionTest(BaseReductionTest):
|
|||||||
|
|
||||||
@test_util.run_deprecated_v1
|
@test_util.run_deprecated_v1
|
||||||
def testDegenerate(self):
|
def testDegenerate(self):
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
for dtype in (dtypes.float16, dtypes.float32, dtypes.float64):
|
for dtype in (dtypes.float16, dtypes.float32, dtypes.float64):
|
||||||
# A large number is needed to get Eigen to die
|
# A large number is needed to get Eigen to die
|
||||||
x = array_ops.zeros((0, 9938), dtype=dtype)
|
x = array_ops.zeros((0, 9938), dtype=dtype)
|
||||||
@ -750,7 +750,7 @@ class MinReductionTest(test.TestCase):
|
|||||||
|
|
||||||
def testAxesType(self):
|
def testAxesType(self):
|
||||||
for dtype in [dtypes.int64, dtypes.int32]:
|
for dtype in [dtypes.int64, dtypes.int32]:
|
||||||
with self.cached_session(use_gpu=True) as sess:
|
with self.cached_session() as sess:
|
||||||
v = math_ops.reduce_min([0, 0], constant_op.constant(0, dtype=dtype))
|
v = math_ops.reduce_min([0, 0], constant_op.constant(0, dtype=dtype))
|
||||||
tf_v = self.evaluate(v)
|
tf_v = self.evaluate(v)
|
||||||
self.assertAllEqual(tf_v, 0)
|
self.assertAllEqual(tf_v, 0)
|
||||||
@ -866,7 +866,7 @@ class MaxReductionTest(test.TestCase):
|
|||||||
|
|
||||||
def testAxesType(self):
|
def testAxesType(self):
|
||||||
for dtype in [dtypes.int64, dtypes.int32]:
|
for dtype in [dtypes.int64, dtypes.int32]:
|
||||||
with self.cached_session(use_gpu=True) as sess:
|
with self.cached_session() as sess:
|
||||||
v = math_ops.reduce_max([0, 0], constant_op.constant(0, dtype=dtype))
|
v = math_ops.reduce_max([0, 0], constant_op.constant(0, dtype=dtype))
|
||||||
tf_v = self.evaluate(v)
|
tf_v = self.evaluate(v)
|
||||||
self.assertAllEqual(tf_v, 0)
|
self.assertAllEqual(tf_v, 0)
|
||||||
@ -998,7 +998,7 @@ class AllReductionTest(test.TestCase):
|
|||||||
|
|
||||||
def testAxesType(self):
|
def testAxesType(self):
|
||||||
for dtype in [dtypes.int64, dtypes.int32]:
|
for dtype in [dtypes.int64, dtypes.int32]:
|
||||||
with self.session(use_gpu=True) as sess:
|
with self.session() as sess:
|
||||||
v = math_ops.reduce_all([True, True],
|
v = math_ops.reduce_all([True, True],
|
||||||
constant_op.constant(0, dtype=dtype))
|
constant_op.constant(0, dtype=dtype))
|
||||||
tf_v = self.evaluate(v)
|
tf_v = self.evaluate(v)
|
||||||
@ -1047,7 +1047,7 @@ class AnyReductionTest(test.TestCase):
|
|||||||
|
|
||||||
def testAxesType(self):
|
def testAxesType(self):
|
||||||
for dtype in [dtypes.int64, dtypes.int32]:
|
for dtype in [dtypes.int64, dtypes.int32]:
|
||||||
with self.session(use_gpu=True) as sess:
|
with self.session() as sess:
|
||||||
v = math_ops.reduce_any([True, True],
|
v = math_ops.reduce_any([True, True],
|
||||||
constant_op.constant(0, dtype=dtype))
|
constant_op.constant(0, dtype=dtype))
|
||||||
tf_v = self.evaluate(v)
|
tf_v = self.evaluate(v)
|
||||||
|
@ -223,7 +223,7 @@ class RNNTest(test.TestCase):
|
|||||||
self.assertEqual(out.get_shape(), inp.get_shape())
|
self.assertEqual(out.get_shape(), inp.get_shape())
|
||||||
self.assertEqual(out.dtype, inp.dtype)
|
self.assertEqual(out.dtype, inp.dtype)
|
||||||
|
|
||||||
with self.session(use_gpu=True) as sess:
|
with self.session() as sess:
|
||||||
input_value = np.random.randn(batch_size, input_size)
|
input_value = np.random.randn(batch_size, input_size)
|
||||||
values = sess.run(outputs + [state], feed_dict={inputs[0]: input_value})
|
values = sess.run(outputs + [state], feed_dict={inputs[0]: input_value})
|
||||||
|
|
||||||
@ -260,7 +260,7 @@ class RNNTest(test.TestCase):
|
|||||||
self.assertEqual(out.get_shape().as_list(), inp.get_shape().as_list())
|
self.assertEqual(out.get_shape().as_list(), inp.get_shape().as_list())
|
||||||
self.assertEqual(out.dtype, inp.dtype)
|
self.assertEqual(out.dtype, inp.dtype)
|
||||||
|
|
||||||
with self.session(use_gpu=True) as sess:
|
with self.session() as sess:
|
||||||
input_value = np.random.randn(batch_size, input_size)
|
input_value = np.random.randn(batch_size, input_size)
|
||||||
values = sess.run(outputs + [state], feed_dict={inputs[0]: input_value})
|
values = sess.run(outputs + [state], feed_dict={inputs[0]: input_value})
|
||||||
full_dropout_values = sess.run(
|
full_dropout_values = sess.run(
|
||||||
@ -288,7 +288,7 @@ class RNNTest(test.TestCase):
|
|||||||
cell, inputs, sequence_length=sequence_length, dtype=dtypes.float32)
|
cell, inputs, sequence_length=sequence_length, dtype=dtypes.float32)
|
||||||
self.assertEqual(len(dynamic_outputs), len(inputs))
|
self.assertEqual(len(dynamic_outputs), len(inputs))
|
||||||
|
|
||||||
with self.session(use_gpu=True) as sess:
|
with self.session() as sess:
|
||||||
input_value = np.random.randn(batch_size, input_size)
|
input_value = np.random.randn(batch_size, input_size)
|
||||||
dynamic_values = sess.run(
|
dynamic_values = sess.run(
|
||||||
dynamic_outputs,
|
dynamic_outputs,
|
||||||
@ -324,7 +324,7 @@ class RNNTest(test.TestCase):
|
|||||||
1.0 * (2 + 1) * np.ones((input_size)))))
|
1.0 * (2 + 1) * np.ones((input_size)))))
|
||||||
|
|
||||||
def _testScope(self, factory, prefix="prefix", use_outer_scope=True):
|
def _testScope(self, factory, prefix="prefix", use_outer_scope=True):
|
||||||
with self.session(use_gpu=True, graph=ops.Graph()):
|
with self.session(graph=ops.Graph()):
|
||||||
if use_outer_scope:
|
if use_outer_scope:
|
||||||
with variable_scope.variable_scope(prefix) as scope:
|
with variable_scope.variable_scope(prefix) as scope:
|
||||||
factory(scope)
|
factory(scope)
|
||||||
@ -388,7 +388,7 @@ class LSTMTest(test.TestCase):
|
|||||||
input_size = 5
|
input_size = 5
|
||||||
batch_size = 2
|
batch_size = 2
|
||||||
max_length = 8
|
max_length = 8
|
||||||
with self.session(use_gpu=True, graph=ops.Graph()) as sess:
|
with self.session(graph=ops.Graph()) as sess:
|
||||||
initializer = init_ops.random_uniform_initializer(
|
initializer = init_ops.random_uniform_initializer(
|
||||||
-0.01, 0.01, seed=self._seed)
|
-0.01, 0.01, seed=self._seed)
|
||||||
cell = rnn_cell.LSTMCell(
|
cell = rnn_cell.LSTMCell(
|
||||||
@ -411,7 +411,7 @@ class LSTMTest(test.TestCase):
|
|||||||
input_size = 5
|
input_size = 5
|
||||||
batch_size = 2
|
batch_size = 2
|
||||||
max_length = 8
|
max_length = 8
|
||||||
with self.session(use_gpu=True, graph=ops.Graph()) as sess:
|
with self.session(graph=ops.Graph()) as sess:
|
||||||
initializer = init_ops.random_uniform_initializer(
|
initializer = init_ops.random_uniform_initializer(
|
||||||
-0.01, 0.01, seed=self._seed)
|
-0.01, 0.01, seed=self._seed)
|
||||||
cell = rnn_cell.LSTMCell(
|
cell = rnn_cell.LSTMCell(
|
||||||
@ -442,7 +442,7 @@ class LSTMTest(test.TestCase):
|
|||||||
input_size = 5
|
input_size = 5
|
||||||
batch_size = 2
|
batch_size = 2
|
||||||
max_length = 8
|
max_length = 8
|
||||||
with self.session(use_gpu=True, graph=ops.Graph()) as sess:
|
with self.session(graph=ops.Graph()) as sess:
|
||||||
initializer = init_ops.random_uniform_initializer(
|
initializer = init_ops.random_uniform_initializer(
|
||||||
-0.01, 0.01, seed=self._seed)
|
-0.01, 0.01, seed=self._seed)
|
||||||
state_saver = TestStateSaver(batch_size, 2 * num_units)
|
state_saver = TestStateSaver(batch_size, 2 * num_units)
|
||||||
@ -583,7 +583,7 @@ class LSTMTest(test.TestCase):
|
|||||||
batch_size = 2
|
batch_size = 2
|
||||||
num_proj = 4
|
num_proj = 4
|
||||||
max_length = 8
|
max_length = 8
|
||||||
with self.session(use_gpu=True, graph=ops.Graph()) as sess:
|
with self.session(graph=ops.Graph()) as sess:
|
||||||
initializer = init_ops.random_uniform_initializer(
|
initializer = init_ops.random_uniform_initializer(
|
||||||
-0.01, 0.01, seed=self._seed)
|
-0.01, 0.01, seed=self._seed)
|
||||||
inputs = max_length * [
|
inputs = max_length * [
|
||||||
@ -681,7 +681,7 @@ class LSTMTest(test.TestCase):
|
|||||||
num_proj_shards = 3
|
num_proj_shards = 3
|
||||||
num_unit_shards = 2
|
num_unit_shards = 2
|
||||||
max_length = 8
|
max_length = 8
|
||||||
with self.session(use_gpu=True, graph=ops.Graph()) as sess:
|
with self.session(graph=ops.Graph()) as sess:
|
||||||
initializer = init_ops.random_uniform_initializer(
|
initializer = init_ops.random_uniform_initializer(
|
||||||
-0.01, 0.01, seed=self._seed)
|
-0.01, 0.01, seed=self._seed)
|
||||||
|
|
||||||
@ -715,7 +715,7 @@ class LSTMTest(test.TestCase):
|
|||||||
num_proj_shards = 3
|
num_proj_shards = 3
|
||||||
num_unit_shards = 2
|
num_unit_shards = 2
|
||||||
max_length = 8
|
max_length = 8
|
||||||
with self.session(use_gpu=True, graph=ops.Graph()) as sess:
|
with self.session(graph=ops.Graph()) as sess:
|
||||||
initializer = init_ops.random_uniform_initializer(-1, 1, seed=self._seed)
|
initializer = init_ops.random_uniform_initializer(-1, 1, seed=self._seed)
|
||||||
inputs = max_length * [
|
inputs = max_length * [
|
||||||
array_ops.placeholder(dtypes.float64, shape=(None, input_size))
|
array_ops.placeholder(dtypes.float64, shape=(None, input_size))
|
||||||
@ -752,7 +752,7 @@ class LSTMTest(test.TestCase):
|
|||||||
num_proj_shards = 3
|
num_proj_shards = 3
|
||||||
num_unit_shards = 2
|
num_unit_shards = 2
|
||||||
max_length = 8
|
max_length = 8
|
||||||
with self.session(use_gpu=True, graph=ops.Graph()) as sess:
|
with self.session(graph=ops.Graph()) as sess:
|
||||||
inputs = max_length * [
|
inputs = max_length * [
|
||||||
array_ops.placeholder(dtypes.float32, shape=(None, input_size))
|
array_ops.placeholder(dtypes.float32, shape=(None, input_size))
|
||||||
]
|
]
|
||||||
@ -809,7 +809,7 @@ class LSTMTest(test.TestCase):
|
|||||||
num_proj_shards = 3
|
num_proj_shards = 3
|
||||||
num_unit_shards = 2
|
num_unit_shards = 2
|
||||||
max_length = 8
|
max_length = 8
|
||||||
with self.session(use_gpu=True, graph=ops.Graph()) as sess:
|
with self.session(graph=ops.Graph()) as sess:
|
||||||
sequence_length = array_ops.placeholder(dtypes.int64)
|
sequence_length = array_ops.placeholder(dtypes.int64)
|
||||||
initializer = init_ops.random_uniform_initializer(
|
initializer = init_ops.random_uniform_initializer(
|
||||||
-0.01, 0.01, seed=self._seed)
|
-0.01, 0.01, seed=self._seed)
|
||||||
@ -1151,7 +1151,7 @@ class LSTMTest(test.TestCase):
|
|||||||
state_is_tuple=False)
|
state_is_tuple=False)
|
||||||
|
|
||||||
########### Step 1: Run static graph and generate readouts
|
########### Step 1: Run static graph and generate readouts
|
||||||
with self.session(use_gpu=True, graph=ops.Graph()) as sess:
|
with self.session(graph=ops.Graph()) as sess:
|
||||||
if in_graph_mode:
|
if in_graph_mode:
|
||||||
concat_inputs = array_ops.placeholder(
|
concat_inputs = array_ops.placeholder(
|
||||||
dtypes.float32, shape=(time_steps, batch_size, input_size))
|
dtypes.float32, shape=(time_steps, batch_size, input_size))
|
||||||
@ -1211,7 +1211,7 @@ class LSTMTest(test.TestCase):
|
|||||||
static_individual_variable_gradients, feed_dict=feeds)
|
static_individual_variable_gradients, feed_dict=feeds)
|
||||||
|
|
||||||
########## Step 2: Run dynamic graph and generate readouts
|
########## Step 2: Run dynamic graph and generate readouts
|
||||||
with self.session(use_gpu=True, graph=ops.Graph()) as sess:
|
with self.session(graph=ops.Graph()) as sess:
|
||||||
if in_graph_mode:
|
if in_graph_mode:
|
||||||
concat_inputs = array_ops.placeholder(
|
concat_inputs = array_ops.placeholder(
|
||||||
dtypes.float32, shape=(time_steps, batch_size, input_size))
|
dtypes.float32, shape=(time_steps, batch_size, input_size))
|
||||||
@ -1372,7 +1372,7 @@ class BidirectionalRNNTest(test.TestCase):
|
|||||||
return input_value, inputs, outputs, state_fw, state_bw, sequence_length
|
return input_value, inputs, outputs, state_fw, state_bw, sequence_length
|
||||||
|
|
||||||
def _testBidirectionalRNN(self, use_shape):
|
def _testBidirectionalRNN(self, use_shape):
|
||||||
with self.session(use_gpu=True, graph=ops.Graph()) as sess:
|
with self.session(graph=ops.Graph()) as sess:
|
||||||
input_value, inputs, outputs, state_fw, state_bw, sequence_length = (
|
input_value, inputs, outputs, state_fw, state_bw, sequence_length = (
|
||||||
self._createBidirectionalRNN(use_shape, True))
|
self._createBidirectionalRNN(use_shape, True))
|
||||||
variables_lib.global_variables_initializer().run()
|
variables_lib.global_variables_initializer().run()
|
||||||
@ -1419,7 +1419,7 @@ class BidirectionalRNNTest(test.TestCase):
|
|||||||
self.assertAllClose(s_fw, s_bw)
|
self.assertAllClose(s_fw, s_bw)
|
||||||
|
|
||||||
def _testBidirectionalRNNWithoutSequenceLength(self, use_shape):
|
def _testBidirectionalRNNWithoutSequenceLength(self, use_shape):
|
||||||
with self.session(use_gpu=True, graph=ops.Graph()) as sess:
|
with self.session(graph=ops.Graph()) as sess:
|
||||||
input_value, inputs, outputs, state_fw, state_bw, _ = (
|
input_value, inputs, outputs, state_fw, state_bw, _ = (
|
||||||
self._createBidirectionalRNN(use_shape, False))
|
self._createBidirectionalRNN(use_shape, False))
|
||||||
variables_lib.global_variables_initializer().run()
|
variables_lib.global_variables_initializer().run()
|
||||||
@ -1504,7 +1504,7 @@ class BidirectionalRNNTest(test.TestCase):
|
|||||||
|
|
||||||
def _testBidirectionalDynamicRNN(self, use_shape, use_state_tuple,
|
def _testBidirectionalDynamicRNN(self, use_shape, use_state_tuple,
|
||||||
use_time_major, use_sequence_length):
|
use_time_major, use_sequence_length):
|
||||||
with self.session(use_gpu=True, graph=ops.Graph()) as sess:
|
with self.session(graph=ops.Graph()) as sess:
|
||||||
input_value, inputs, outputs, state_fw, state_bw, sequence_length = (
|
input_value, inputs, outputs, state_fw, state_bw, sequence_length = (
|
||||||
self._createBidirectionalDynamicRNN(
|
self._createBidirectionalDynamicRNN(
|
||||||
use_shape, use_state_tuple, use_time_major, use_sequence_length))
|
use_shape, use_state_tuple, use_time_major, use_sequence_length))
|
||||||
@ -1582,7 +1582,7 @@ class BidirectionalRNNTest(test.TestCase):
|
|||||||
# REMARKS: factory(scope) is a function accepting a scope
|
# REMARKS: factory(scope) is a function accepting a scope
|
||||||
# as an argument, such scope can be None, a string
|
# as an argument, such scope can be None, a string
|
||||||
# or a VariableScope instance.
|
# or a VariableScope instance.
|
||||||
with self.session(use_gpu=True, graph=ops.Graph()):
|
with self.session(graph=ops.Graph()):
|
||||||
if use_outer_scope:
|
if use_outer_scope:
|
||||||
with variable_scope.variable_scope(prefix) as scope:
|
with variable_scope.variable_scope(prefix) as scope:
|
||||||
factory(scope)
|
factory(scope)
|
||||||
@ -1905,7 +1905,7 @@ class StateSaverRNNTest(test.TestCase):
|
|||||||
batch_size = 2
|
batch_size = 2
|
||||||
state_saver = TestStateSaver(batch_size, 2 * num_units)
|
state_saver = TestStateSaver(batch_size, 2 * num_units)
|
||||||
|
|
||||||
with self.session(use_gpu=True, graph=ops.Graph()):
|
with self.session(graph=ops.Graph()):
|
||||||
if use_outer_scope:
|
if use_outer_scope:
|
||||||
with variable_scope.variable_scope(prefix) as scope:
|
with variable_scope.variable_scope(prefix) as scope:
|
||||||
self._factory(scope=scope, state_saver=state_saver)
|
self._factory(scope=scope, state_saver=state_saver)
|
||||||
@ -1984,7 +1984,7 @@ class GRUTest(test.TestCase):
|
|||||||
|
|
||||||
sequence_length = np.random.randint(0, time_steps, size=batch_size)
|
sequence_length = np.random.randint(0, time_steps, size=batch_size)
|
||||||
|
|
||||||
with self.session(use_gpu=True, graph=ops.Graph()) as sess:
|
with self.session(graph=ops.Graph()) as sess:
|
||||||
concat_inputs = array_ops.placeholder(
|
concat_inputs = array_ops.placeholder(
|
||||||
dtypes.float32, shape=(time_steps, batch_size, input_size))
|
dtypes.float32, shape=(time_steps, batch_size, input_size))
|
||||||
|
|
||||||
@ -2006,7 +2006,7 @@ class GRUTest(test.TestCase):
|
|||||||
sess.run([outputs_dynamic, state_dynamic], feed_dict=feeds)
|
sess.run([outputs_dynamic, state_dynamic], feed_dict=feeds)
|
||||||
|
|
||||||
def _testScope(self, factory, prefix="prefix", use_outer_scope=True):
|
def _testScope(self, factory, prefix="prefix", use_outer_scope=True):
|
||||||
with self.session(use_gpu=True, graph=ops.Graph()):
|
with self.session(graph=ops.Graph()):
|
||||||
if use_outer_scope:
|
if use_outer_scope:
|
||||||
with variable_scope.variable_scope(prefix) as scope:
|
with variable_scope.variable_scope(prefix) as scope:
|
||||||
factory(scope)
|
factory(scope)
|
||||||
@ -2298,7 +2298,7 @@ class RawRNNTest(test.TestCase):
|
|||||||
np.ones((max_time, batch_size, 1), np.int64), output_vals[1])
|
np.ones((max_time, batch_size, 1), np.int64), output_vals[1])
|
||||||
|
|
||||||
def _testScope(self, factory, prefix="prefix", use_outer_scope=True):
|
def _testScope(self, factory, prefix="prefix", use_outer_scope=True):
|
||||||
with self.session(use_gpu=True, graph=ops.Graph()):
|
with self.session(graph=ops.Graph()):
|
||||||
if use_outer_scope:
|
if use_outer_scope:
|
||||||
with variable_scope.variable_scope(prefix) as scope:
|
with variable_scope.variable_scope(prefix) as scope:
|
||||||
factory(scope)
|
factory(scope)
|
||||||
@ -2416,7 +2416,7 @@ class TensorArrayOnCorrectDeviceTest(test.TestCase):
|
|||||||
sequence_length=sequence_length,
|
sequence_length=sequence_length,
|
||||||
dtype=dtypes.float32)
|
dtype=dtypes.float32)
|
||||||
|
|
||||||
with self.session(use_gpu=True) as sess:
|
with self.session() as sess:
|
||||||
opts = config_pb2.RunOptions(trace_level=config_pb2.RunOptions.FULL_TRACE)
|
opts = config_pb2.RunOptions(trace_level=config_pb2.RunOptions.FULL_TRACE)
|
||||||
run_metadata = config_pb2.RunMetadata()
|
run_metadata = config_pb2.RunMetadata()
|
||||||
variables_lib.global_variables_initializer().run()
|
variables_lib.global_variables_initializer().run()
|
||||||
@ -2903,7 +2903,7 @@ class RNNCellTest(test.TestCase, parameterized.TestCase):
|
|||||||
return
|
return
|
||||||
|
|
||||||
gpu_dev = test.gpu_device_name()
|
gpu_dev = test.gpu_device_name()
|
||||||
with self.session(use_gpu=True) as sess:
|
with self.session() as sess:
|
||||||
with variable_scope.variable_scope(
|
with variable_scope.variable_scope(
|
||||||
"root", initializer=init_ops.constant_initializer(0.5)):
|
"root", initializer=init_ops.constant_initializer(0.5)):
|
||||||
x = array_ops.zeros([1, 1, 3])
|
x = array_ops.zeros([1, 1, 3])
|
||||||
|
@ -212,7 +212,7 @@ class RNNTest(test.TestCase):
|
|||||||
else:
|
else:
|
||||||
inputs = array_ops.placeholder(dtypes.float32, shape=(1, 4, 1))
|
inputs = array_ops.placeholder(dtypes.float32, shape=(1, 4, 1))
|
||||||
|
|
||||||
with self.cached_session(use_gpu=True) as sess:
|
with self.cached_session() as sess:
|
||||||
outputs, state = rnn.dynamic_rnn(
|
outputs, state = rnn.dynamic_rnn(
|
||||||
cell, inputs, dtype=dtypes.float32, sequence_length=[4])
|
cell, inputs, dtype=dtypes.float32, sequence_length=[4])
|
||||||
if not in_eager_mode:
|
if not in_eager_mode:
|
||||||
@ -232,7 +232,7 @@ class RNNTest(test.TestCase):
|
|||||||
else:
|
else:
|
||||||
inputs = array_ops.placeholder(dtypes.float32, shape=(1, 4, 1))
|
inputs = array_ops.placeholder(dtypes.float32, shape=(1, 4, 1))
|
||||||
|
|
||||||
with self.cached_session(use_gpu=True) as sess:
|
with self.cached_session() as sess:
|
||||||
outputs, state = rnn.dynamic_rnn(
|
outputs, state = rnn.dynamic_rnn(
|
||||||
cell, inputs, dtype=dtypes.float32, sequence_length=[4])
|
cell, inputs, dtype=dtypes.float32, sequence_length=[4])
|
||||||
if not in_eager_mode:
|
if not in_eager_mode:
|
||||||
@ -262,7 +262,7 @@ class RNNTest(test.TestCase):
|
|||||||
else:
|
else:
|
||||||
inputs = array_ops.placeholder(dtypes.float32, shape=(1, 4, 1))
|
inputs = array_ops.placeholder(dtypes.float32, shape=(1, 4, 1))
|
||||||
|
|
||||||
with self.cached_session(use_gpu=True) as sess:
|
with self.cached_session() as sess:
|
||||||
outputs, state = rnn.dynamic_rnn(
|
outputs, state = rnn.dynamic_rnn(
|
||||||
cell, inputs, dtype=dtypes.float32, sequence_length=[4])
|
cell, inputs, dtype=dtypes.float32, sequence_length=[4])
|
||||||
state = (state[0], state[1].stack())
|
state = (state[0], state[1].stack())
|
||||||
|
@ -79,7 +79,7 @@ class CumsumTest(test.TestCase):
|
|||||||
|
|
||||||
def _compare(self, x, axis, exclusive, reverse):
|
def _compare(self, x, axis, exclusive, reverse):
|
||||||
np_out = handle_options(np.cumsum, x, axis, exclusive, reverse)
|
np_out = handle_options(np.cumsum, x, axis, exclusive, reverse)
|
||||||
with self.cached_session(use_gpu=True):
|
with self.cached_session():
|
||||||
tf_out = math_ops.cumsum(x, axis, exclusive, reverse).eval()
|
tf_out = math_ops.cumsum(x, axis, exclusive, reverse).eval()
|
||||||
|
|
||||||
self.assertAllClose(np_out, tf_out)
|
self.assertAllClose(np_out, tf_out)
|
||||||
@ -101,7 +101,7 @@ class CumsumTest(test.TestCase):
|
|||||||
for dtype in self.valid_dtypes:
|
for dtype in self.valid_dtypes:
|
||||||
x = np.arange(1, 6).reshape([5]).astype(dtype)
|
x = np.arange(1, 6).reshape([5]).astype(dtype)
|
||||||
for axis_dtype in [dtypes.int64, dtypes.int32]:
|
for axis_dtype in [dtypes.int64, dtypes.int32]:
|
||||||
with self.cached_session(use_gpu=True):
|
with self.cached_session():
|
||||||
axis = constant_op.constant(0, axis_dtype)
|
axis = constant_op.constant(0, axis_dtype)
|
||||||
tf_out = math_ops.cumsum(x, axis).eval()
|
tf_out = math_ops.cumsum(x, axis).eval()
|
||||||
|
|
||||||
@ -152,7 +152,7 @@ class CumsumTest(test.TestCase):
|
|||||||
def testInvalidAxis(self):
|
def testInvalidAxis(self):
|
||||||
x = np.arange(0, 10).reshape([2, 5]).astype(np.float32)
|
x = np.arange(0, 10).reshape([2, 5]).astype(np.float32)
|
||||||
input_tensor = ops.convert_to_tensor(x)
|
input_tensor = ops.convert_to_tensor(x)
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
with self.assertRaisesWithPredicateMatch(
|
with self.assertRaisesWithPredicateMatch(
|
||||||
errors_impl.InvalidArgumentError,
|
errors_impl.InvalidArgumentError,
|
||||||
lambda e: "Expected scan axis in the range [-2, 2)" in str(e)):
|
lambda e: "Expected scan axis in the range [-2, 2)" in str(e)):
|
||||||
@ -168,7 +168,7 @@ class CumsumTest(test.TestCase):
|
|||||||
|
|
||||||
def _compareGradient(self, shape, axis, exclusive, reverse):
|
def _compareGradient(self, shape, axis, exclusive, reverse):
|
||||||
x = np.arange(0, 50).reshape(shape).astype(np.float64)
|
x = np.arange(0, 50).reshape(shape).astype(np.float64)
|
||||||
with self.cached_session(use_gpu=True):
|
with self.cached_session():
|
||||||
t = ops.convert_to_tensor(x)
|
t = ops.convert_to_tensor(x)
|
||||||
result = math_ops.cumsum(t, axis, exclusive, reverse)
|
result = math_ops.cumsum(t, axis, exclusive, reverse)
|
||||||
jacob_t, jacob_n = gradient_checker.compute_gradient(
|
jacob_t, jacob_n = gradient_checker.compute_gradient(
|
||||||
@ -212,7 +212,7 @@ class CumprodTest(test.TestCase):
|
|||||||
|
|
||||||
def _compare(self, x, axis, exclusive, reverse):
|
def _compare(self, x, axis, exclusive, reverse):
|
||||||
np_out = handle_options(np.cumprod, x, axis, exclusive, reverse)
|
np_out = handle_options(np.cumprod, x, axis, exclusive, reverse)
|
||||||
with self.cached_session(use_gpu=True):
|
with self.cached_session():
|
||||||
tf_out = math_ops.cumprod(x, axis, exclusive, reverse).eval()
|
tf_out = math_ops.cumprod(x, axis, exclusive, reverse).eval()
|
||||||
|
|
||||||
self.assertAllClose(np_out, tf_out)
|
self.assertAllClose(np_out, tf_out)
|
||||||
@ -234,7 +234,7 @@ class CumprodTest(test.TestCase):
|
|||||||
for dtype in self.valid_dtypes:
|
for dtype in self.valid_dtypes:
|
||||||
x = np.arange(1, 6).reshape([5]).astype(dtype)
|
x = np.arange(1, 6).reshape([5]).astype(dtype)
|
||||||
for axis_dtype in [dtypes.int64, dtypes.int32]:
|
for axis_dtype in [dtypes.int64, dtypes.int32]:
|
||||||
with self.cached_session(use_gpu=True):
|
with self.cached_session():
|
||||||
axis = constant_op.constant(0, axis_dtype)
|
axis = constant_op.constant(0, axis_dtype)
|
||||||
tf_out = math_ops.cumprod(x, axis).eval()
|
tf_out = math_ops.cumprod(x, axis).eval()
|
||||||
|
|
||||||
@ -278,7 +278,7 @@ class CumprodTest(test.TestCase):
|
|||||||
def testInvalidAxis(self):
|
def testInvalidAxis(self):
|
||||||
x = np.arange(0, 10).reshape([2, 5]).astype(np.float32)
|
x = np.arange(0, 10).reshape([2, 5]).astype(np.float32)
|
||||||
input_tensor = ops.convert_to_tensor(x)
|
input_tensor = ops.convert_to_tensor(x)
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
with self.assertRaisesWithPredicateMatch(
|
with self.assertRaisesWithPredicateMatch(
|
||||||
errors_impl.InvalidArgumentError,
|
errors_impl.InvalidArgumentError,
|
||||||
lambda e: "Expected scan axis in the range [-2, 2)" in str(e)):
|
lambda e: "Expected scan axis in the range [-2, 2)" in str(e)):
|
||||||
@ -294,7 +294,7 @@ class CumprodTest(test.TestCase):
|
|||||||
|
|
||||||
def _compareGradient(self, shape, axis, exclusive, reverse):
|
def _compareGradient(self, shape, axis, exclusive, reverse):
|
||||||
x = np.arange(1, 9).reshape(shape).astype(np.float64)
|
x = np.arange(1, 9).reshape(shape).astype(np.float64)
|
||||||
with self.cached_session(use_gpu=True):
|
with self.cached_session():
|
||||||
t = ops.convert_to_tensor(x)
|
t = ops.convert_to_tensor(x)
|
||||||
result = math_ops.cumprod(t, axis, exclusive, reverse)
|
result = math_ops.cumprod(t, axis, exclusive, reverse)
|
||||||
jacob_t, jacob_n = gradient_checker.compute_gradient(
|
jacob_t, jacob_n = gradient_checker.compute_gradient(
|
||||||
|
@ -134,7 +134,7 @@ class ScatterTest(test.TestCase):
|
|||||||
repeat_indices=False,
|
repeat_indices=False,
|
||||||
updates_are_scalar=False):
|
updates_are_scalar=False):
|
||||||
np.random.seed(8)
|
np.random.seed(8)
|
||||||
with self.cached_session(use_gpu=True):
|
with self.cached_session():
|
||||||
for indices_shape in (), (2,), (3, 7), (3, 4, 7):
|
for indices_shape in (), (2,), (3, 7), (3, 4, 7):
|
||||||
for extra_shape in (), (5,), (5, 9):
|
for extra_shape in (), (5,), (5, 9):
|
||||||
# Generate random indices with no duplicates for easy numpy comparison
|
# Generate random indices with no duplicates for easy numpy comparison
|
||||||
|
@ -307,7 +307,7 @@ class UnsortedSegmentTest(SegmentReductionHelper):
|
|||||||
ops_list = self.complex_ops_list if dtype.is_complex else self.ops_list
|
ops_list = self.complex_ops_list if dtype.is_complex else self.ops_list
|
||||||
tf_x, np_x = self._input(shape, dtype=dtype)
|
tf_x, np_x = self._input(shape, dtype=dtype)
|
||||||
for use_gpu in [True, False]:
|
for use_gpu in [True, False]:
|
||||||
with self.cached_session(use_gpu=True):
|
with self.cached_session():
|
||||||
for np_op1, np_op2, tf_op, init_op in ops_list:
|
for np_op1, np_op2, tf_op, init_op in ops_list:
|
||||||
# sqrt_n doesn't support integers
|
# sqrt_n doesn't support integers
|
||||||
if (np_op2 == self._sqrt_n_reduce_op and dtype.is_integer):
|
if (np_op2 == self._sqrt_n_reduce_op and dtype.is_integer):
|
||||||
@ -333,7 +333,7 @@ class UnsortedSegmentTest(SegmentReductionHelper):
|
|||||||
for indices in indices_flat, indices_flat.reshape(5, 2):
|
for indices in indices_flat, indices_flat.reshape(5, 2):
|
||||||
shape = indices.shape + (2,)
|
shape = indices.shape + (2,)
|
||||||
for dtype in dtypes:
|
for dtype in dtypes:
|
||||||
with self.cached_session(use_gpu=True):
|
with self.cached_session():
|
||||||
tf_x, np_x = self._input(shape)
|
tf_x, np_x = self._input(shape)
|
||||||
num_segments_constant = constant_op.constant(
|
num_segments_constant = constant_op.constant(
|
||||||
num_segments, dtype=dtype)
|
num_segments, dtype=dtype)
|
||||||
@ -433,7 +433,7 @@ class UnsortedSegmentTest(SegmentReductionHelper):
|
|||||||
shape = [n, num_cols]
|
shape = [n, num_cols]
|
||||||
num_segments = max(indices) + 1
|
num_segments = max(indices) + 1
|
||||||
for dtype in self.differentiable_dtypes:
|
for dtype in self.differentiable_dtypes:
|
||||||
with self.cached_session(use_gpu=True):
|
with self.cached_session():
|
||||||
tf_x, np_x = self._input(shape, dtype=dtype)
|
tf_x, np_x = self._input(shape, dtype=dtype)
|
||||||
# Results from UnsortedSegmentSum
|
# Results from UnsortedSegmentSum
|
||||||
unsorted_s = math_ops.unsorted_segment_sum(
|
unsorted_s = math_ops.unsorted_segment_sum(
|
||||||
@ -470,7 +470,7 @@ class UnsortedSegmentTest(SegmentReductionHelper):
|
|||||||
def testEmptySecondDimension(self):
|
def testEmptySecondDimension(self):
|
||||||
dtypes = [np.float16, np.float32, np.float64, np.int64, np.int32,
|
dtypes = [np.float16, np.float32, np.float64, np.int64, np.int32,
|
||||||
np.complex64, np.complex128]
|
np.complex64, np.complex128]
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
for dtype in dtypes:
|
for dtype in dtypes:
|
||||||
for itype in (np.int32, np.int64):
|
for itype in (np.int32, np.int64):
|
||||||
data = np.zeros((2, 0), dtype=dtype)
|
data = np.zeros((2, 0), dtype=dtype)
|
||||||
@ -486,7 +486,7 @@ class UnsortedSegmentTest(SegmentReductionHelper):
|
|||||||
for indices in indices_flat, indices_flat.reshape(5, 2):
|
for indices in indices_flat, indices_flat.reshape(5, 2):
|
||||||
shape = indices.shape + (2,)
|
shape = indices.shape + (2,)
|
||||||
for dtype in self.all_dtypes:
|
for dtype in self.all_dtypes:
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
tf_x, np_x = self._input(shape, dtype=dtype)
|
tf_x, np_x = self._input(shape, dtype=dtype)
|
||||||
np_ans = self._segmentReduce(
|
np_ans = self._segmentReduce(
|
||||||
indices, np_x, np.add, op2=None, num_segments=num_segments)
|
indices, np_x, np.add, op2=None, num_segments=num_segments)
|
||||||
|
@ -55,7 +55,7 @@ class SelfAdjointEigTest(test.TestCase):
|
|||||||
@test_util.run_deprecated_v1
|
@test_util.run_deprecated_v1
|
||||||
def testConcurrentExecutesWithoutError(self):
|
def testConcurrentExecutesWithoutError(self):
|
||||||
all_ops = []
|
all_ops = []
|
||||||
with self.session(use_gpu=True) as sess:
|
with self.session() as sess:
|
||||||
for compute_v_ in True, False:
|
for compute_v_ in True, False:
|
||||||
matrix1 = random_ops.random_normal([5, 5], seed=42)
|
matrix1 = random_ops.random_normal([5, 5], seed=42)
|
||||||
matrix2 = random_ops.random_normal([5, 5], seed=42)
|
matrix2 = random_ops.random_normal([5, 5], seed=42)
|
||||||
@ -84,7 +84,7 @@ class SelfAdjointEigTest(test.TestCase):
|
|||||||
"self_adjoint_eig_fail_if_denorms_flushed.txt")).astype(np.float32)
|
"self_adjoint_eig_fail_if_denorms_flushed.txt")).astype(np.float32)
|
||||||
self.assertEqual(matrix.shape, (32, 32))
|
self.assertEqual(matrix.shape, (32, 32))
|
||||||
matrix_tensor = constant_op.constant(matrix)
|
matrix_tensor = constant_op.constant(matrix)
|
||||||
with self.session(use_gpu=True) as sess:
|
with self.session() as sess:
|
||||||
(e, v) = self.evaluate(linalg_ops.self_adjoint_eig(matrix_tensor))
|
(e, v) = self.evaluate(linalg_ops.self_adjoint_eig(matrix_tensor))
|
||||||
self.assertEqual(e.size, 32)
|
self.assertEqual(e.size, 32)
|
||||||
self.assertAllClose(
|
self.assertAllClose(
|
||||||
@ -156,7 +156,7 @@ def _GetSelfAdjointEigTest(dtype_, shape_, compute_v_):
|
|||||||
else:
|
else:
|
||||||
atol = 1e-12
|
atol = 1e-12
|
||||||
np_e, np_v = np.linalg.eigh(a)
|
np_e, np_v = np.linalg.eigh(a)
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
if compute_v_:
|
if compute_v_:
|
||||||
tf_e, tf_v = linalg_ops.self_adjoint_eig(constant_op.constant(a))
|
tf_e, tf_v = linalg_ops.self_adjoint_eig(constant_op.constant(a))
|
||||||
|
|
||||||
@ -211,7 +211,8 @@ def _GetSelfAdjointEigGradTest(dtype_, shape_, compute_v_):
|
|||||||
tol = 1e-2
|
tol = 1e-2
|
||||||
else:
|
else:
|
||||||
tol = 1e-7
|
tol = 1e-7
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
|
|
||||||
def Compute(x):
|
def Compute(x):
|
||||||
e, v = linalg_ops.self_adjoint_eig(x)
|
e, v = linalg_ops.self_adjoint_eig(x)
|
||||||
# (complex) Eigenvectors are only unique up to an arbitrary phase
|
# (complex) Eigenvectors are only unique up to an arbitrary phase
|
||||||
|
@ -267,7 +267,7 @@ class ShapeOpsTest(test.TestCase):
|
|||||||
for dtype in [dtypes.int32, dtypes.int64]:
|
for dtype in [dtypes.int32, dtypes.int64]:
|
||||||
x = np.zeros([2])
|
x = np.zeros([2])
|
||||||
np_ans = np.expand_dims(x, axis=0)
|
np_ans = np.expand_dims(x, axis=0)
|
||||||
with self.cached_session(use_gpu=True):
|
with self.cached_session():
|
||||||
tensor = array_ops.expand_dims(x, constant_op.constant(0, dtype))
|
tensor = array_ops.expand_dims(x, constant_op.constant(0, dtype))
|
||||||
tf_ans = self.evaluate(tensor)
|
tf_ans = self.evaluate(tensor)
|
||||||
self.assertShapeEqual(np_ans, tensor)
|
self.assertShapeEqual(np_ans, tensor)
|
||||||
@ -433,7 +433,7 @@ class TileTest(test.TestCase, parameterized.TestCase):
|
|||||||
def testSimple(self):
|
def testSimple(self):
|
||||||
# multiples could be int32 or int64
|
# multiples could be int32 or int64
|
||||||
for dtype in [dtypes.int32, dtypes.int64]:
|
for dtype in [dtypes.int32, dtypes.int64]:
|
||||||
with self.cached_session(use_gpu=True):
|
with self.cached_session():
|
||||||
inp = np.random.rand(4, 1).astype(np.float32)
|
inp = np.random.rand(4, 1).astype(np.float32)
|
||||||
a = constant_op.constant(inp)
|
a = constant_op.constant(inp)
|
||||||
tiled = array_ops.tile(a, constant_op.constant([1, 4], dtype=dtype))
|
tiled = array_ops.tile(a, constant_op.constant([1, 4], dtype=dtype))
|
||||||
@ -505,7 +505,7 @@ class TileTest(test.TestCase, parameterized.TestCase):
|
|||||||
bytes: (dtypes.string, bytes)
|
bytes: (dtypes.string, bytes)
|
||||||
}
|
}
|
||||||
for dtype_np, (dtype_tf, cast) in types_to_test.items():
|
for dtype_np, (dtype_tf, cast) in types_to_test.items():
|
||||||
with self.cached_session(use_gpu=True):
|
with self.cached_session():
|
||||||
inp = np.random.rand(4, 1).astype(dtype_np)
|
inp = np.random.rand(4, 1).astype(dtype_np)
|
||||||
a = constant_op.constant(
|
a = constant_op.constant(
|
||||||
[cast(x) for x in inp.ravel(order="C")],
|
[cast(x) for x in inp.ravel(order="C")],
|
||||||
@ -601,7 +601,7 @@ class TileTest(test.TestCase, parameterized.TestCase):
|
|||||||
|
|
||||||
@test_util.run_deprecated_v1
|
@test_util.run_deprecated_v1
|
||||||
def testGradientSimpleReductionOnGPU(self):
|
def testGradientSimpleReductionOnGPU(self):
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
inp = np.random.rand(4, 1).astype("f")
|
inp = np.random.rand(4, 1).astype("f")
|
||||||
a = constant_op.constant(
|
a = constant_op.constant(
|
||||||
[float(x) for x in inp.flatten()], shape=[4, 1], dtype=dtypes.float32)
|
[float(x) for x in inp.flatten()], shape=[4, 1], dtype=dtypes.float32)
|
||||||
@ -616,7 +616,7 @@ class TileTest(test.TestCase, parameterized.TestCase):
|
|||||||
|
|
||||||
@test_util.run_deprecated_v1
|
@test_util.run_deprecated_v1
|
||||||
def testGradientStridedReductionOnGPU(self):
|
def testGradientStridedReductionOnGPU(self):
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
inp = np.random.rand(4, 2).astype("f")
|
inp = np.random.rand(4, 2).astype("f")
|
||||||
a = constant_op.constant(
|
a = constant_op.constant(
|
||||||
[float(x) for x in inp.flatten()], shape=[4, 2], dtype=dtypes.float32)
|
[float(x) for x in inp.flatten()], shape=[4, 2], dtype=dtypes.float32)
|
||||||
|
@ -190,7 +190,7 @@ class DCTOpsTest(parameterized.TestCase, test.TestCase):
|
|||||||
# "ortho" normalization is not implemented for type I.
|
# "ortho" normalization is not implemented for type I.
|
||||||
if dct_type == 1 and norm == "ortho":
|
if dct_type == 1 and norm == "ortho":
|
||||||
return
|
return
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
tol = 5e-4 if dtype == np.float32 else 1e-7
|
tol = 5e-4 if dtype == np.float32 else 1e-7
|
||||||
signals = np.random.rand(*shape).astype(dtype)
|
signals = np.random.rand(*shape).astype(dtype)
|
||||||
n = np.random.randint(1, 2 * signals.shape[-1])
|
n = np.random.randint(1, 2 * signals.shape[-1])
|
||||||
|
@ -87,7 +87,8 @@ class BaseFFTOpsTest(test.TestCase):
|
|||||||
if test.is_built_with_rocm():
|
if test.is_built_with_rocm():
|
||||||
self.skipTest("Complex datatype not yet supported in ROCm.")
|
self.skipTest("Complex datatype not yet supported in ROCm.")
|
||||||
return
|
return
|
||||||
with self.cached_session(use_gpu=True):
|
with self.cached_session():
|
||||||
|
|
||||||
def f(inx, iny):
|
def f(inx, iny):
|
||||||
inx.set_shape(x.shape)
|
inx.set_shape(x.shape)
|
||||||
iny.set_shape(y.shape)
|
iny.set_shape(y.shape)
|
||||||
@ -123,12 +124,12 @@ class FFTOpsTest(BaseFFTOpsTest, parameterized.TestCase):
|
|||||||
|
|
||||||
def _tf_fft(self, x, rank, fft_length=None, feed_dict=None):
|
def _tf_fft(self, x, rank, fft_length=None, feed_dict=None):
|
||||||
# fft_length unused for complex FFTs.
|
# fft_length unused for complex FFTs.
|
||||||
with self.cached_session(use_gpu=True) as sess:
|
with self.cached_session() as sess:
|
||||||
return sess.run(self._tf_fft_for_rank(rank)(x), feed_dict=feed_dict)
|
return sess.run(self._tf_fft_for_rank(rank)(x), feed_dict=feed_dict)
|
||||||
|
|
||||||
def _tf_ifft(self, x, rank, fft_length=None, feed_dict=None):
|
def _tf_ifft(self, x, rank, fft_length=None, feed_dict=None):
|
||||||
# fft_length unused for complex FFTs.
|
# fft_length unused for complex FFTs.
|
||||||
with self.cached_session(use_gpu=True) as sess:
|
with self.cached_session() as sess:
|
||||||
return sess.run(self._tf_ifft_for_rank(rank)(x), feed_dict=feed_dict)
|
return sess.run(self._tf_ifft_for_rank(rank)(x), feed_dict=feed_dict)
|
||||||
|
|
||||||
def _np_fft(self, x, rank, fft_length=None):
|
def _np_fft(self, x, rank, fft_length=None):
|
||||||
@ -299,12 +300,12 @@ class FFTOpsTest(BaseFFTOpsTest, parameterized.TestCase):
|
|||||||
class RFFTOpsTest(BaseFFTOpsTest, parameterized.TestCase):
|
class RFFTOpsTest(BaseFFTOpsTest, parameterized.TestCase):
|
||||||
|
|
||||||
def _tf_fft(self, x, rank, fft_length=None, feed_dict=None):
|
def _tf_fft(self, x, rank, fft_length=None, feed_dict=None):
|
||||||
with self.cached_session(use_gpu=True) as sess:
|
with self.cached_session() as sess:
|
||||||
return sess.run(
|
return sess.run(
|
||||||
self._tf_fft_for_rank(rank)(x, fft_length), feed_dict=feed_dict)
|
self._tf_fft_for_rank(rank)(x, fft_length), feed_dict=feed_dict)
|
||||||
|
|
||||||
def _tf_ifft(self, x, rank, fft_length=None, feed_dict=None):
|
def _tf_ifft(self, x, rank, fft_length=None, feed_dict=None):
|
||||||
with self.cached_session(use_gpu=True) as sess:
|
with self.cached_session() as sess:
|
||||||
return sess.run(
|
return sess.run(
|
||||||
self._tf_ifft_for_rank(rank)(x, fft_length), feed_dict=feed_dict)
|
self._tf_ifft_for_rank(rank)(x, fft_length), feed_dict=feed_dict)
|
||||||
|
|
||||||
|
@ -327,7 +327,7 @@ class FrameTest(test.TestCase):
|
|||||||
def test_gradient_numerical(self):
|
def test_gradient_numerical(self):
|
||||||
if context.executing_eagerly():
|
if context.executing_eagerly():
|
||||||
return
|
return
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
signal_shape = (2, 128)
|
signal_shape = (2, 128)
|
||||||
signal = array_ops.ones(signal_shape)
|
signal = array_ops.ones(signal_shape)
|
||||||
frame_length = 33
|
frame_length = 33
|
||||||
|
@ -266,7 +266,7 @@ class SpectralOpsTest(test.TestCase, parameterized.TestCase):
|
|||||||
# TODO(rjryan): Update gradient tests for Eager.
|
# TODO(rjryan): Update gradient tests for Eager.
|
||||||
if context.executing_eagerly():
|
if context.executing_eagerly():
|
||||||
return
|
return
|
||||||
with self.session(use_gpu=True) as sess:
|
with self.session() as sess:
|
||||||
signal_length = 512
|
signal_length = 512
|
||||||
|
|
||||||
# An all-zero signal has all zero gradients with respect to the sum of the
|
# An all-zero signal has all zero gradients with respect to the sum of the
|
||||||
|
@ -101,7 +101,7 @@ class SpaceToBatchTest(test.TestCase, PythonOpImpl):
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
def _testPad(self, inputs, paddings, block_size, outputs):
|
def _testPad(self, inputs, paddings, block_size, outputs):
|
||||||
with self.cached_session(use_gpu=True):
|
with self.cached_session():
|
||||||
# outputs = space_to_batch(inputs)
|
# outputs = space_to_batch(inputs)
|
||||||
x_tf = self.space_to_batch(
|
x_tf = self.space_to_batch(
|
||||||
math_ops.cast(inputs, dtypes.float32),
|
math_ops.cast(inputs, dtypes.float32),
|
||||||
@ -327,7 +327,7 @@ class SpaceToBatchSpaceToDepth(test.TestCase, PythonOpImpl):
|
|||||||
array_ops.space_to_depth(
|
array_ops.space_to_depth(
|
||||||
array_ops.transpose(x, [3, 1, 2, 0]), block_size=block_size),
|
array_ops.transpose(x, [3, 1, 2, 0]), block_size=block_size),
|
||||||
[3, 1, 2, 0])
|
[3, 1, 2, 0])
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
self.assertAllEqual(y1, y2)
|
self.assertAllEqual(y1, y2)
|
||||||
|
|
||||||
|
|
||||||
@ -526,7 +526,7 @@ class SpaceToBatchGradientTest(test.TestCase, PythonOpImpl):
|
|||||||
# Check the gradients.
|
# Check the gradients.
|
||||||
def _checkGrad(self, x, paddings, block_size):
|
def _checkGrad(self, x, paddings, block_size):
|
||||||
assert 4 == x.ndim
|
assert 4 == x.ndim
|
||||||
with self.cached_session(use_gpu=True):
|
with self.cached_session():
|
||||||
tf_x = ops.convert_to_tensor(x)
|
tf_x = ops.convert_to_tensor(x)
|
||||||
tf_y = self.space_to_batch(tf_x, paddings, block_size)
|
tf_y = self.space_to_batch(tf_x, paddings, block_size)
|
||||||
epsilon = 1e-5
|
epsilon = 1e-5
|
||||||
|
@ -73,7 +73,7 @@ class SparseTensorDenseMatMulGradientTest(test.TestCase):
|
|||||||
matmul = sparse_ops.sparse_tensor_dense_matmul(
|
matmul = sparse_ops.sparse_tensor_dense_matmul(
|
||||||
sp_t, dense_t, adjoint_a=adjoint_a, adjoint_b=adjoint_b, name=name)
|
sp_t, dense_t, adjoint_a=adjoint_a, adjoint_b=adjoint_b, name=name)
|
||||||
|
|
||||||
with self.cached_session(use_gpu=True):
|
with self.cached_session():
|
||||||
dense_t_shape = [m, k] if adjoint_b else [k, m]
|
dense_t_shape = [m, k] if adjoint_b else [k, m]
|
||||||
sp_t_val_shape = [nnz]
|
sp_t_val_shape = [nnz]
|
||||||
err = gradient_checker.compute_gradient_error(
|
err = gradient_checker.compute_gradient_error(
|
||||||
|
@ -66,7 +66,7 @@ class SparseTensorDenseMatMulTest(test.TestCase):
|
|||||||
x_values = x[np.where(x)]
|
x_values = x[np.where(x)]
|
||||||
x_shape = x.shape
|
x_shape = x.shape
|
||||||
|
|
||||||
with self.cached_session(use_gpu=True):
|
with self.cached_session():
|
||||||
sp_x_value = sparse_tensor.SparseTensorValue(
|
sp_x_value = sparse_tensor.SparseTensorValue(
|
||||||
indices=x_indices, values=x_values, dense_shape=x_shape)
|
indices=x_indices, values=x_values, dense_shape=x_shape)
|
||||||
tf_value_ans = sparse_ops.sparse_tensor_dense_matmul(
|
tf_value_ans = sparse_ops.sparse_tensor_dense_matmul(
|
||||||
|
@ -64,7 +64,7 @@ class SparseXentTest(test.TestCase):
|
|||||||
|
|
||||||
def _testXent(self, np_features, np_labels):
|
def _testXent(self, np_features, np_labels):
|
||||||
np_loss, np_backprop = self._npXent(np_features, np_labels)
|
np_loss, np_backprop = self._npXent(np_features, np_labels)
|
||||||
with self.cached_session(use_gpu=True) as sess:
|
with self.cached_session() as sess:
|
||||||
loss, backprop = gen_nn_ops.sparse_softmax_cross_entropy_with_logits(
|
loss, backprop = gen_nn_ops.sparse_softmax_cross_entropy_with_logits(
|
||||||
np_features, np_labels)
|
np_features, np_labels)
|
||||||
tf_loss, tf_backprop = self.evaluate([loss, backprop])
|
tf_loss, tf_backprop = self.evaluate([loss, backprop])
|
||||||
@ -73,7 +73,7 @@ class SparseXentTest(test.TestCase):
|
|||||||
|
|
||||||
def testSingleClass(self):
|
def testSingleClass(self):
|
||||||
for label_dtype in np.int32, np.int64:
|
for label_dtype in np.int32, np.int64:
|
||||||
with self.cached_session(use_gpu=True) as sess:
|
with self.cached_session() as sess:
|
||||||
loss, backprop = gen_nn_ops.sparse_softmax_cross_entropy_with_logits(
|
loss, backprop = gen_nn_ops.sparse_softmax_cross_entropy_with_logits(
|
||||||
np.array([[1.], [-1.], [0.]]).astype(np.float32),
|
np.array([[1.], [-1.], [0.]]).astype(np.float32),
|
||||||
np.array([0, 0, 0]).astype(label_dtype))
|
np.array([0, 0, 0]).astype(label_dtype))
|
||||||
@ -145,19 +145,19 @@ class SparseXentTest(test.TestCase):
|
|||||||
np.array([1.3862, 3.4420]), np_loss, rtol=1.e-3, atol=1.e-3)
|
np.array([1.3862, 3.4420]), np_loss, rtol=1.e-3, atol=1.e-3)
|
||||||
|
|
||||||
def testShapeMismatch(self):
|
def testShapeMismatch(self):
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
with self.assertRaisesRegex(ValueError, ".*Rank mismatch:*"):
|
with self.assertRaisesRegex(ValueError, ".*Rank mismatch:*"):
|
||||||
nn_ops.sparse_softmax_cross_entropy_with_logits(
|
nn_ops.sparse_softmax_cross_entropy_with_logits(
|
||||||
labels=[[0, 2]], logits=[[0., 1.], [2., 3.], [2., 3.]])
|
labels=[[0, 2]], logits=[[0., 1.], [2., 3.], [2., 3.]])
|
||||||
|
|
||||||
def testScalar(self):
|
def testScalar(self):
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
with self.assertRaisesRegex(ValueError, ".*Logits cannot be scalars*"):
|
with self.assertRaisesRegex(ValueError, ".*Logits cannot be scalars*"):
|
||||||
nn_ops.sparse_softmax_cross_entropy_with_logits(
|
nn_ops.sparse_softmax_cross_entropy_with_logits(
|
||||||
labels=constant_op.constant(0), logits=constant_op.constant(1.0))
|
labels=constant_op.constant(0), logits=constant_op.constant(1.0))
|
||||||
|
|
||||||
def testLabelsPlaceholderScalar(self):
|
def testLabelsPlaceholderScalar(self):
|
||||||
with ops_lib.Graph().as_default(), self.session(use_gpu=True):
|
with ops_lib.Graph().as_default(), self.session():
|
||||||
labels = array_ops.placeholder(np.int32)
|
labels = array_ops.placeholder(np.int32)
|
||||||
y = nn_ops.sparse_softmax_cross_entropy_with_logits(
|
y = nn_ops.sparse_softmax_cross_entropy_with_logits(
|
||||||
labels=labels, logits=[[7.]])
|
labels=labels, logits=[[7.]])
|
||||||
@ -165,7 +165,7 @@ class SparseXentTest(test.TestCase):
|
|||||||
y.eval(feed_dict={labels: 0})
|
y.eval(feed_dict={labels: 0})
|
||||||
|
|
||||||
def testVector(self):
|
def testVector(self):
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
loss = nn_ops.sparse_softmax_cross_entropy_with_logits(
|
loss = nn_ops.sparse_softmax_cross_entropy_with_logits(
|
||||||
labels=constant_op.constant(0), logits=constant_op.constant([1.0]))
|
labels=constant_op.constant(0), logits=constant_op.constant([1.0]))
|
||||||
self.assertAllClose(0.0, self.evaluate(loss))
|
self.assertAllClose(0.0, self.evaluate(loss))
|
||||||
@ -193,7 +193,7 @@ class SparseXentTest(test.TestCase):
|
|||||||
|
|
||||||
@test_util.run_in_graph_and_eager_modes(use_gpu=True)
|
@test_util.run_in_graph_and_eager_modes(use_gpu=True)
|
||||||
def testGradient(self):
|
def testGradient(self):
|
||||||
with self.session(use_gpu=True) as sess:
|
with self.session() as sess:
|
||||||
l = constant_op.constant([3, 0, 1], name="l")
|
l = constant_op.constant([3, 0, 1], name="l")
|
||||||
f = constant_op.constant(
|
f = constant_op.constant(
|
||||||
[0.1, 0.2, 0.3, 0.4, 0.1, 0.4, 0.9, 1.6, 0.1, 0.8, 2.7, 6.4],
|
[0.1, 0.2, 0.3, 0.4, 0.1, 0.4, 0.9, 1.6, 0.1, 0.8, 2.7, 6.4],
|
||||||
|
@ -55,13 +55,13 @@ class SplitOpTest(test.TestCase):
|
|||||||
model_input = array_ops.placeholder(dtypes.float32)
|
model_input = array_ops.placeholder(dtypes.float32)
|
||||||
inp = np.zeros((1, 10))
|
inp = np.zeros((1, 10))
|
||||||
# check that we still fail at runtime if the shapes were unknown
|
# check that we still fail at runtime if the shapes were unknown
|
||||||
with self.cached_session(use_gpu=True) as sess:
|
with self.cached_session() as sess:
|
||||||
with self.assertRaises(errors_impl.InvalidArgumentError):
|
with self.assertRaises(errors_impl.InvalidArgumentError):
|
||||||
sess.run(array_ops.split(model_input, [4]), {model_input: inp})
|
sess.run(array_ops.split(model_input, [4]), {model_input: inp})
|
||||||
|
|
||||||
# scalar Tensors are not permitted as num_splits
|
# scalar Tensors are not permitted as num_splits
|
||||||
for axis in [0, -2]:
|
for axis in [0, -2]:
|
||||||
with self.cached_session(use_gpu=True) as sess:
|
with self.cached_session() as sess:
|
||||||
with self.assertRaises(ValueError):
|
with self.assertRaises(ValueError):
|
||||||
# pylint: disable=expression-not-assigned
|
# pylint: disable=expression-not-assigned
|
||||||
sess.run(
|
sess.run(
|
||||||
@ -83,7 +83,7 @@ class SplitOpTest(test.TestCase):
|
|||||||
model_input2 = array_ops.placeholder(dtypes.float32, shape=[None, 2])
|
model_input2 = array_ops.placeholder(dtypes.float32, shape=[None, 2])
|
||||||
result = array_ops.split(model_input2, [2, 2], axis=0)[0]
|
result = array_ops.split(model_input2, [2, 2], axis=0)[0]
|
||||||
|
|
||||||
with self.cached_session(use_gpu=True) as sess:
|
with self.cached_session() as sess:
|
||||||
sess.run(result, feed_dict={model_input2: np.ones([4, 2])})
|
sess.run(result, feed_dict={model_input2: np.ones([4, 2])})
|
||||||
|
|
||||||
@test_util.run_deprecated_v1
|
@test_util.run_deprecated_v1
|
||||||
@ -92,7 +92,7 @@ class SplitOpTest(test.TestCase):
|
|||||||
|
|
||||||
value = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
|
value = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
|
||||||
|
|
||||||
with self.session(use_gpu=True) as sess:
|
with self.session() as sess:
|
||||||
with self.assertRaises(ValueError) as context:
|
with self.assertRaises(ValueError) as context:
|
||||||
sess.run(array_ops.split(value, size_splits), {size_splits: [2, 2, 6]})
|
sess.run(array_ops.split(value, size_splits), {size_splits: [2, 2, 6]})
|
||||||
self.assertTrue("Cannot infer num from shape" in str(context.exception))
|
self.assertTrue("Cannot infer num from shape" in str(context.exception))
|
||||||
@ -214,7 +214,7 @@ class SplitOpTest(test.TestCase):
|
|||||||
@test_util.run_deprecated_v1
|
@test_util.run_deprecated_v1
|
||||||
def testOutputShape(self):
|
def testOutputShape(self):
|
||||||
for axis in [1, -1]:
|
for axis in [1, -1]:
|
||||||
with self.cached_session(use_gpu=True):
|
with self.cached_session():
|
||||||
tensor = array_ops.placeholder(dtypes.float32, shape=[None, 12])
|
tensor = array_ops.placeholder(dtypes.float32, shape=[None, 12])
|
||||||
size_splits = [3, 7, 2]
|
size_splits = [3, 7, 2]
|
||||||
outputs = array_ops.split(tensor, size_splits, axis)
|
outputs = array_ops.split(tensor, size_splits, axis)
|
||||||
@ -315,7 +315,7 @@ class SplitOpTest(test.TestCase):
|
|||||||
|
|
||||||
def _testGradientsSimple(self, dtype):
|
def _testGradientsSimple(self, dtype):
|
||||||
inp = self._makeData((4, 4), dtype)
|
inp = self._makeData((4, 4), dtype)
|
||||||
with self.cached_session(use_gpu=True):
|
with self.cached_session():
|
||||||
inp_tensor = ops.convert_to_tensor(inp)
|
inp_tensor = ops.convert_to_tensor(inp)
|
||||||
s = array_ops.split(value=inp_tensor, num_or_size_splits=4, axis=1)
|
s = array_ops.split(value=inp_tensor, num_or_size_splits=4, axis=1)
|
||||||
inp_grads = [self._makeData((4, 1), dtype)for _ in range(4)]
|
inp_grads = [self._makeData((4, 1), dtype)for _ in range(4)]
|
||||||
@ -382,7 +382,7 @@ class SplitOpTest(test.TestCase):
|
|||||||
|
|
||||||
splits = array_ops.placeholder(dtypes.int32, [3])
|
splits = array_ops.placeholder(dtypes.int32, [3])
|
||||||
y = array_ops.split(values, splits, axis=x)
|
y = array_ops.split(values, splits, axis=x)
|
||||||
with self.session(use_gpu=True) as sess:
|
with self.session() as sess:
|
||||||
with self.assertRaisesRegex(errors_impl.InvalidArgumentError,
|
with self.assertRaisesRegex(errors_impl.InvalidArgumentError,
|
||||||
"must have exactly one element"):
|
"must have exactly one element"):
|
||||||
sess.run(y, {x: np.array([], dtype=np.int32), splits: [4, 11, 15]})
|
sess.run(y, {x: np.array([], dtype=np.int32), splits: [4, 11, 15]})
|
||||||
|
@ -43,7 +43,7 @@ class StageTest(test.TestCase):
|
|||||||
|
|
||||||
G.finalize()
|
G.finalize()
|
||||||
|
|
||||||
with self.session(use_gpu=True, graph=G) as sess:
|
with self.session(graph=G) as sess:
|
||||||
sess.run(stage, feed_dict={x: -1})
|
sess.run(stage, feed_dict={x: -1})
|
||||||
for i in range(10):
|
for i in range(10):
|
||||||
_, yval = sess.run([stage, y], feed_dict={x: i})
|
_, yval = sess.run([stage, y], feed_dict={x: i})
|
||||||
@ -63,7 +63,7 @@ class StageTest(test.TestCase):
|
|||||||
|
|
||||||
G.finalize()
|
G.finalize()
|
||||||
|
|
||||||
with self.session(use_gpu=True, graph=G) as sess:
|
with self.session(graph=G) as sess:
|
||||||
sess.run(stage, feed_dict={x: -1})
|
sess.run(stage, feed_dict={x: -1})
|
||||||
for i in range(10):
|
for i in range(10):
|
||||||
_, yval = sess.run([stage, y], feed_dict={x: i})
|
_, yval = sess.run([stage, y], feed_dict={x: i})
|
||||||
@ -89,7 +89,7 @@ class StageTest(test.TestCase):
|
|||||||
|
|
||||||
G.finalize()
|
G.finalize()
|
||||||
|
|
||||||
with self.session(use_gpu=True, graph=G) as sess:
|
with self.session(graph=G) as sess:
|
||||||
sess.run(stage, feed_dict={x: -1})
|
sess.run(stage, feed_dict={x: -1})
|
||||||
for i in range(10):
|
for i in range(10):
|
||||||
_, yval = sess.run([stage, y], feed_dict={x: i})
|
_, yval = sess.run([stage, y], feed_dict={x: i})
|
||||||
@ -131,7 +131,7 @@ class StageTest(test.TestCase):
|
|||||||
|
|
||||||
G.finalize()
|
G.finalize()
|
||||||
|
|
||||||
with self.session(use_gpu=True, graph=G) as sess:
|
with self.session(graph=G) as sess:
|
||||||
for i in range(10):
|
for i in range(10):
|
||||||
sess.run(stage, feed_dict={x: i})
|
sess.run(stage, feed_dict={x: i})
|
||||||
|
|
||||||
@ -156,7 +156,7 @@ class StageTest(test.TestCase):
|
|||||||
|
|
||||||
G.finalize()
|
G.finalize()
|
||||||
|
|
||||||
with self.session(use_gpu=True, graph=G) as sess:
|
with self.session(graph=G) as sess:
|
||||||
sess.run(stage, feed_dict={x: -1})
|
sess.run(stage, feed_dict={x: -1})
|
||||||
self.assertEqual(sess.run(size), 1)
|
self.assertEqual(sess.run(size), 1)
|
||||||
sess.run(stage, feed_dict={x: -1})
|
sess.run(stage, feed_dict={x: -1})
|
||||||
@ -189,7 +189,7 @@ class StageTest(test.TestCase):
|
|||||||
queue = Queue.Queue()
|
queue = Queue.Queue()
|
||||||
n = 8
|
n = 8
|
||||||
|
|
||||||
with self.session(use_gpu=True, graph=G) as sess:
|
with self.session(graph=G) as sess:
|
||||||
# Stage data in a separate thread which will block
|
# Stage data in a separate thread which will block
|
||||||
# when it hits the staging area's capacity and thus
|
# when it hits the staging area's capacity and thus
|
||||||
# not fill the queue with n tokens
|
# not fill the queue with n tokens
|
||||||
@ -254,7 +254,7 @@ class StageTest(test.TestCase):
|
|||||||
queue = Queue.Queue()
|
queue = Queue.Queue()
|
||||||
n = 8
|
n = 8
|
||||||
|
|
||||||
with self.session(use_gpu=True, graph=G) as sess:
|
with self.session(graph=G) as sess:
|
||||||
# Stage data in a separate thread which will block
|
# Stage data in a separate thread which will block
|
||||||
# when it hits the staging area's capacity and thus
|
# when it hits the staging area's capacity and thus
|
||||||
# not fill the queue with n tokens
|
# not fill the queue with n tokens
|
||||||
|
@ -163,7 +163,7 @@ def _GetSvdOpTest(dtype_, shape_, use_static_shape_, compute_uv_,
|
|||||||
if use_static_shape_:
|
if use_static_shape_:
|
||||||
s_tf_val, u_tf_val, v_tf_val = self.evaluate([s_tf, u_tf, v_tf])
|
s_tf_val, u_tf_val, v_tf_val = self.evaluate([s_tf, u_tf, v_tf])
|
||||||
else:
|
else:
|
||||||
with self.session(use_gpu=True) as sess:
|
with self.session() as sess:
|
||||||
s_tf_val, u_tf_val, v_tf_val = sess.run(
|
s_tf_val, u_tf_val, v_tf_val = sess.run(
|
||||||
[s_tf, u_tf, v_tf], feed_dict={x_tf: x_np})
|
[s_tf, u_tf, v_tf], feed_dict={x_tf: x_np})
|
||||||
else:
|
else:
|
||||||
@ -172,7 +172,7 @@ def _GetSvdOpTest(dtype_, shape_, use_static_shape_, compute_uv_,
|
|||||||
if use_static_shape_:
|
if use_static_shape_:
|
||||||
s_tf_val = self.evaluate(s_tf)
|
s_tf_val = self.evaluate(s_tf)
|
||||||
else:
|
else:
|
||||||
with self.session(use_gpu=True) as sess:
|
with self.session() as sess:
|
||||||
s_tf_val = sess.run(s_tf, feed_dict={x_tf: x_np})
|
s_tf_val = sess.run(s_tf, feed_dict={x_tf: x_np})
|
||||||
|
|
||||||
if compute_uv_:
|
if compute_uv_:
|
||||||
@ -284,7 +284,7 @@ def _GetSvdGradGradOpTest(dtype_, shape_, compute_uv_, full_matrices_):
|
|||||||
epsilon = np.finfo(dtype_).eps
|
epsilon = np.finfo(dtype_).eps
|
||||||
delta = 0.1 * epsilon**(1.0 / 3.0)
|
delta = 0.1 * epsilon**(1.0 / 3.0)
|
||||||
tol = 1e-5
|
tol = 1e-5
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
tf_a = constant_op.constant(a)
|
tf_a = constant_op.constant(a)
|
||||||
if compute_uv_:
|
if compute_uv_:
|
||||||
tf_s, tf_u, tf_v = _NormalizingSvd(tf_a, full_matrices_)
|
tf_s, tf_u, tf_v = _NormalizingSvd(tf_a, full_matrices_)
|
||||||
|
@ -83,7 +83,7 @@ class TensorArrayTest(test.TestCase):
|
|||||||
|
|
||||||
@test_util.run_in_graph_and_eager_modes
|
@test_util.run_in_graph_and_eager_modes
|
||||||
def testTensorArrayWriteRead(self):
|
def testTensorArrayWriteRead(self):
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
ta = tensor_array_ops.TensorArray(
|
ta = tensor_array_ops.TensorArray(
|
||||||
dtype=dtypes.float32,
|
dtype=dtypes.float32,
|
||||||
tensor_array_name="foo",
|
tensor_array_name="foo",
|
||||||
@ -104,7 +104,7 @@ class TensorArrayTest(test.TestCase):
|
|||||||
self.assertAllEqual(-3.0, d2)
|
self.assertAllEqual(-3.0, d2)
|
||||||
|
|
||||||
def _testTensorArrayWritePack(self, tf_dtype):
|
def _testTensorArrayWritePack(self, tf_dtype):
|
||||||
with self.cached_session(use_gpu=True):
|
with self.cached_session():
|
||||||
ta = tensor_array_ops.TensorArray(
|
ta = tensor_array_ops.TensorArray(
|
||||||
dtype=tf_dtype, tensor_array_name="foo", size=3)
|
dtype=tf_dtype, tensor_array_name="foo", size=3)
|
||||||
|
|
||||||
@ -133,7 +133,7 @@ class TensorArrayTest(test.TestCase):
|
|||||||
self._testTensorArrayWritePackMaybeLegacy()
|
self._testTensorArrayWritePackMaybeLegacy()
|
||||||
|
|
||||||
def testEmptyTensorArrayPack(self):
|
def testEmptyTensorArrayPack(self):
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
ta = tensor_array_ops.TensorArray(
|
ta = tensor_array_ops.TensorArray(
|
||||||
dtype=dtypes.float32, tensor_array_name="foo", size=3)
|
dtype=dtypes.float32, tensor_array_name="foo", size=3)
|
||||||
|
|
||||||
@ -148,7 +148,7 @@ class TensorArrayTest(test.TestCase):
|
|||||||
self.assertAllEqual([3, 0, 1], c0.shape)
|
self.assertAllEqual([3, 0, 1], c0.shape)
|
||||||
|
|
||||||
def testTensorArrayWriteConcatInParallel(self):
|
def testTensorArrayWriteConcatInParallel(self):
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
|
|
||||||
def _concat_1():
|
def _concat_1():
|
||||||
ta = tensor_array_ops.TensorArray(
|
ta = tensor_array_ops.TensorArray(
|
||||||
@ -189,7 +189,7 @@ class TensorArrayTest(test.TestCase):
|
|||||||
self.assertAllEqual([1, 1, 1, 8, 9, 8, 9, 8, 9], c0)
|
self.assertAllEqual([1, 1, 1, 8, 9, 8, 9, 8, 9], c0)
|
||||||
|
|
||||||
def _testTensorArrayWriteConcat(self, tf_dtype):
|
def _testTensorArrayWriteConcat(self, tf_dtype):
|
||||||
with self.cached_session(use_gpu=True):
|
with self.cached_session():
|
||||||
ta = tensor_array_ops.TensorArray(
|
ta = tensor_array_ops.TensorArray(
|
||||||
dtype=tf_dtype, tensor_array_name="foo", size=3, infer_shape=False)
|
dtype=tf_dtype, tensor_array_name="foo", size=3, infer_shape=False)
|
||||||
|
|
||||||
@ -217,7 +217,7 @@ class TensorArrayTest(test.TestCase):
|
|||||||
self._testTensorArrayWriteConcat(dtypes.string)
|
self._testTensorArrayWriteConcat(dtypes.string)
|
||||||
|
|
||||||
def _testTensorArrayReadOrPackNotAllValuesAvailableFillsZeros(self):
|
def _testTensorArrayReadOrPackNotAllValuesAvailableFillsZeros(self):
|
||||||
with self.cached_session(use_gpu=True):
|
with self.cached_session():
|
||||||
ta = tensor_array_ops.TensorArray(
|
ta = tensor_array_ops.TensorArray(
|
||||||
dtype=dtypes.float32,
|
dtype=dtypes.float32,
|
||||||
tensor_array_name="foo",
|
tensor_array_name="foo",
|
||||||
@ -251,7 +251,7 @@ class TensorArrayTest(test.TestCase):
|
|||||||
|
|
||||||
@test_util.run_v1_only("Uses placeholders")
|
@test_util.run_v1_only("Uses placeholders")
|
||||||
def testSkipEagerTensorArrayReadUninitializedInferShapeFillsZeros(self):
|
def testSkipEagerTensorArrayReadUninitializedInferShapeFillsZeros(self):
|
||||||
with self.cached_session(use_gpu=True) as sess:
|
with self.cached_session() as sess:
|
||||||
ta = tensor_array_ops.TensorArray(
|
ta = tensor_array_ops.TensorArray(
|
||||||
dtype=dtypes.float32,
|
dtype=dtypes.float32,
|
||||||
tensor_array_name="foo",
|
tensor_array_name="foo",
|
||||||
@ -261,7 +261,7 @@ class TensorArrayTest(test.TestCase):
|
|||||||
[[0.0, 0.0]], sess.run(ta.write(1, val).read(0), {val: [[4.0, 5.0]]}))
|
[[0.0, 0.0]], sess.run(ta.write(1, val).read(0), {val: [[4.0, 5.0]]}))
|
||||||
|
|
||||||
def _testTensorArrayUnpackRead(self, tf_dtype):
|
def _testTensorArrayUnpackRead(self, tf_dtype):
|
||||||
with self.cached_session(use_gpu=True):
|
with self.cached_session():
|
||||||
convert = _make_converter(tf_dtype)
|
convert = _make_converter(tf_dtype)
|
||||||
|
|
||||||
ta = _make_ta(3, "foo", dtype=tf_dtype)
|
ta = _make_ta(3, "foo", dtype=tf_dtype)
|
||||||
@ -311,7 +311,7 @@ class TensorArrayTest(test.TestCase):
|
|||||||
self._testTensorArrayUnpackReadMaybeLegacy()
|
self._testTensorArrayUnpackReadMaybeLegacy()
|
||||||
|
|
||||||
def _testTensorArraySplitRead(self, tf_dtype):
|
def _testTensorArraySplitRead(self, tf_dtype):
|
||||||
with self.cached_session(use_gpu=True):
|
with self.cached_session():
|
||||||
convert = _make_converter(tf_dtype)
|
convert = _make_converter(tf_dtype)
|
||||||
|
|
||||||
# Split an empty vector
|
# Split an empty vector
|
||||||
@ -365,7 +365,7 @@ class TensorArrayTest(test.TestCase):
|
|||||||
@test_util.disable_control_flow_v2("v2 does not support TensorArray.grad.")
|
@test_util.disable_control_flow_v2("v2 does not support TensorArray.grad.")
|
||||||
@test_util.run_v1_only("v2 does not support TensorArray.grad.")
|
@test_util.run_v1_only("v2 does not support TensorArray.grad.")
|
||||||
def testSkipEagerTensorGradArrayWriteRead(self):
|
def testSkipEagerTensorGradArrayWriteRead(self):
|
||||||
with self.session(use_gpu=True) as session:
|
with self.session() as session:
|
||||||
ta = tensor_array_ops.TensorArray(
|
ta = tensor_array_ops.TensorArray(
|
||||||
dtype=dtypes.float32,
|
dtype=dtypes.float32,
|
||||||
tensor_array_name="foo",
|
tensor_array_name="foo",
|
||||||
@ -401,7 +401,7 @@ class TensorArrayTest(test.TestCase):
|
|||||||
def testSkipEagerTensorArrayGradGrad(self):
|
def testSkipEagerTensorArrayGradGrad(self):
|
||||||
if not control_flow_util.ENABLE_CONTROL_FLOW_V2:
|
if not control_flow_util.ENABLE_CONTROL_FLOW_V2:
|
||||||
self.skipTest("Legacy TensorArray does not support double derivatives.")
|
self.skipTest("Legacy TensorArray does not support double derivatives.")
|
||||||
with self.test_session(use_gpu=True) as session:
|
with self.test_session() as session:
|
||||||
x = constant_op.constant(4.0)
|
x = constant_op.constant(4.0)
|
||||||
|
|
||||||
ta = tensor_array_ops.TensorArray(
|
ta = tensor_array_ops.TensorArray(
|
||||||
@ -420,7 +420,7 @@ class TensorArrayTest(test.TestCase):
|
|||||||
@test_util.disable_control_flow_v2("v2 does not support TensorArray.grad.")
|
@test_util.disable_control_flow_v2("v2 does not support TensorArray.grad.")
|
||||||
@test_util.run_v1_only("v2 does not support TensorArray.grad.")
|
@test_util.run_v1_only("v2 does not support TensorArray.grad.")
|
||||||
def testSkipEagerTensorGradArrayDynamicWriteRead(self):
|
def testSkipEagerTensorGradArrayDynamicWriteRead(self):
|
||||||
with self.session(use_gpu=True) as session:
|
with self.session() as session:
|
||||||
ta = tensor_array_ops.TensorArray(
|
ta = tensor_array_ops.TensorArray(
|
||||||
dtype=dtypes.float32,
|
dtype=dtypes.float32,
|
||||||
tensor_array_name="foo",
|
tensor_array_name="foo",
|
||||||
@ -463,7 +463,7 @@ class TensorArrayTest(test.TestCase):
|
|||||||
@test_util.disable_control_flow_v2("v2 does not support TensorArray.grad.")
|
@test_util.disable_control_flow_v2("v2 does not support TensorArray.grad.")
|
||||||
@test_util.run_v1_only("v2 does not support TensorArray.grad.")
|
@test_util.run_v1_only("v2 does not support TensorArray.grad.")
|
||||||
def testSkipEagerTensorGradAccessTwiceReceiveSameObject(self):
|
def testSkipEagerTensorGradAccessTwiceReceiveSameObject(self):
|
||||||
with self.session(use_gpu=True) as session:
|
with self.session() as session:
|
||||||
ta = tensor_array_ops.TensorArray(
|
ta = tensor_array_ops.TensorArray(
|
||||||
dtype=dtypes.float32, tensor_array_name="foo", size=3)
|
dtype=dtypes.float32, tensor_array_name="foo", size=3)
|
||||||
g_ta_0 = ta.grad("grad")
|
g_ta_0 = ta.grad("grad")
|
||||||
@ -479,7 +479,7 @@ class TensorArrayTest(test.TestCase):
|
|||||||
self.assertAllEqual([[4.0, 5.0]], d_r1_0)
|
self.assertAllEqual([[4.0, 5.0]], d_r1_0)
|
||||||
|
|
||||||
def testTensorArrayWriteWrongIndexOrDataTypeFails(self):
|
def testTensorArrayWriteWrongIndexOrDataTypeFails(self):
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
ta = _make_ta(3, "foo", dtype=dtypes.float32)
|
ta = _make_ta(3, "foo", dtype=dtypes.float32)
|
||||||
# TODO(b/129870929): Remove the last 2 checks (runtime checks) after
|
# TODO(b/129870929): Remove the last 2 checks (runtime checks) after
|
||||||
# back back from preferred_dtype= to dtype= in convert_to_tensor. Also
|
# back back from preferred_dtype= to dtype= in convert_to_tensor. Also
|
||||||
@ -518,7 +518,7 @@ class TensorArrayTest(test.TestCase):
|
|||||||
self.evaluate(ta.write(3, 3.0).flow)
|
self.evaluate(ta.write(3, 3.0).flow)
|
||||||
|
|
||||||
def testTensorArrayReadWrongIndexOrDataTypeFails(self):
|
def testTensorArrayReadWrongIndexOrDataTypeFails(self):
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
ta = _make_ta(3, "foo", dtype=dtypes.float32)
|
ta = _make_ta(3, "foo", dtype=dtypes.float32)
|
||||||
|
|
||||||
w0 = ta.write(0, [[4.0, 5.0]])
|
w0 = ta.write(0, [[4.0, 5.0]])
|
||||||
@ -553,7 +553,7 @@ class TensorArrayTest(test.TestCase):
|
|||||||
@test_util.disable_control_flow_v2("v2 allows multiple writes.")
|
@test_util.disable_control_flow_v2("v2 allows multiple writes.")
|
||||||
@test_util.run_v1_only("v2 allows multiple writes.")
|
@test_util.run_v1_only("v2 allows multiple writes.")
|
||||||
def testSkipEagerTensorArrayWriteMultipleFails(self):
|
def testSkipEagerTensorArrayWriteMultipleFails(self):
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
ta = tensor_array_ops.TensorArray(
|
ta = tensor_array_ops.TensorArray(
|
||||||
dtype=dtypes.float32, tensor_array_name="foo", size=3)
|
dtype=dtypes.float32, tensor_array_name="foo", size=3)
|
||||||
|
|
||||||
@ -563,7 +563,7 @@ class TensorArrayTest(test.TestCase):
|
|||||||
self.evaluate(ta.write(2, 3.0).write(2, 3.0).flow)
|
self.evaluate(ta.write(2, 3.0).write(2, 3.0).flow)
|
||||||
|
|
||||||
def testTensorArrayConcatIncompatibleShapesFails(self):
|
def testTensorArrayConcatIncompatibleShapesFails(self):
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
ta = tensor_array_ops.TensorArray(
|
ta = tensor_array_ops.TensorArray(
|
||||||
dtype=dtypes.float32,
|
dtype=dtypes.float32,
|
||||||
tensor_array_name="foo",
|
tensor_array_name="foo",
|
||||||
@ -597,7 +597,7 @@ class TensorArrayTest(test.TestCase):
|
|||||||
self.evaluate(w3.concat())
|
self.evaluate(w3.concat())
|
||||||
|
|
||||||
def testTensorArraySplitIncompatibleShapesFails(self):
|
def testTensorArraySplitIncompatibleShapesFails(self):
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
in_eager_mode = context.executing_eagerly()
|
in_eager_mode = context.executing_eagerly()
|
||||||
ta = _make_ta(3, "foo")
|
ta = _make_ta(3, "foo")
|
||||||
with self.assertRaisesOpError(
|
with self.assertRaisesOpError(
|
||||||
@ -636,7 +636,7 @@ class TensorArrayTest(test.TestCase):
|
|||||||
self.evaluate(ta.split([1.0], [1]).flow)
|
self.evaluate(ta.split([1.0], [1]).flow)
|
||||||
|
|
||||||
def _testTensorArrayWriteGradientAddMultipleAdds(self, dtype):
|
def _testTensorArrayWriteGradientAddMultipleAdds(self, dtype):
|
||||||
with self.cached_session(use_gpu=True):
|
with self.cached_session():
|
||||||
ta = tensor_array_ops.TensorArray(
|
ta = tensor_array_ops.TensorArray(
|
||||||
dtype=dtype, tensor_array_name="foo", size=3, infer_shape=False)
|
dtype=dtype, tensor_array_name="foo", size=3, infer_shape=False)
|
||||||
ta_grad = ta.grad("grad")
|
ta_grad = ta.grad("grad")
|
||||||
@ -679,7 +679,7 @@ class TensorArrayTest(test.TestCase):
|
|||||||
@test_util.disable_control_flow_v2("Low level legacy TA op test.")
|
@test_util.disable_control_flow_v2("Low level legacy TA op test.")
|
||||||
@test_util.run_v1_only("Low level legacy TA op test.")
|
@test_util.run_v1_only("Low level legacy TA op test.")
|
||||||
def testSkipEagerTensorArrayGradWithShapeKnownElementShape(self):
|
def testSkipEagerTensorArrayGradWithShapeKnownElementShape(self):
|
||||||
with self.session(use_gpu=True) as sess:
|
with self.session() as sess:
|
||||||
ta = tensor_array_ops.TensorArray(
|
ta = tensor_array_ops.TensorArray(
|
||||||
size=3,
|
size=3,
|
||||||
dtype=dtypes.float32,
|
dtype=dtypes.float32,
|
||||||
@ -710,7 +710,7 @@ class TensorArrayTest(test.TestCase):
|
|||||||
@test_util.disable_control_flow_v2("Low level legacy TA op test.")
|
@test_util.disable_control_flow_v2("Low level legacy TA op test.")
|
||||||
@test_util.run_v1_only("Low level legacy TA op test.")
|
@test_util.run_v1_only("Low level legacy TA op test.")
|
||||||
def testSkipEagerTensorArrayGradWithShapeUnknownElementShape(self):
|
def testSkipEagerTensorArrayGradWithShapeUnknownElementShape(self):
|
||||||
with self.session(use_gpu=True) as sess:
|
with self.session() as sess:
|
||||||
ta = tensor_array_ops.TensorArray(
|
ta = tensor_array_ops.TensorArray(
|
||||||
size=3, dtype=dtypes.float32,
|
size=3, dtype=dtypes.float32,
|
||||||
element_shape=None) # Note that element_shape is unknown
|
element_shape=None) # Note that element_shape is unknown
|
||||||
@ -733,7 +733,7 @@ class TensorArrayTest(test.TestCase):
|
|||||||
sess.run(read_value, feed_dict={value: fed_value}))
|
sess.run(read_value, feed_dict={value: fed_value}))
|
||||||
|
|
||||||
def testMultiTensorArray(self):
|
def testMultiTensorArray(self):
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
h1 = tensor_array_ops.TensorArray(
|
h1 = tensor_array_ops.TensorArray(
|
||||||
size=1, dtype=dtypes.float32, tensor_array_name="foo")
|
size=1, dtype=dtypes.float32, tensor_array_name="foo")
|
||||||
w1 = h1.write(0, 4.0)
|
w1 = h1.write(0, 4.0)
|
||||||
@ -749,7 +749,7 @@ class TensorArrayTest(test.TestCase):
|
|||||||
self.assertAllClose(9.0, val)
|
self.assertAllClose(9.0, val)
|
||||||
|
|
||||||
def _testTensorArrayGradientWriteReadType(self, dtype):
|
def _testTensorArrayGradientWriteReadType(self, dtype):
|
||||||
with self.cached_session(use_gpu=True) as session:
|
with self.cached_session() as session:
|
||||||
ta = tensor_array_ops.TensorArray(
|
ta = tensor_array_ops.TensorArray(
|
||||||
dtype=dtypes.as_dtype(dtype),
|
dtype=dtypes.as_dtype(dtype),
|
||||||
tensor_array_name="foo",
|
tensor_array_name="foo",
|
||||||
@ -801,7 +801,7 @@ class TensorArrayTest(test.TestCase):
|
|||||||
self._testTensorArrayGradientWriteReadType(dtype)
|
self._testTensorArrayGradientWriteReadType(dtype)
|
||||||
|
|
||||||
def _testTensorArrayGradientWritePackConcatAndRead(self):
|
def _testTensorArrayGradientWritePackConcatAndRead(self):
|
||||||
with self.cached_session(use_gpu=True) as sess:
|
with self.cached_session() as sess:
|
||||||
ta = tensor_array_ops.TensorArray(
|
ta = tensor_array_ops.TensorArray(
|
||||||
dtype=dtypes.float32,
|
dtype=dtypes.float32,
|
||||||
tensor_array_name="foo",
|
tensor_array_name="foo",
|
||||||
@ -839,7 +839,7 @@ class TensorArrayTest(test.TestCase):
|
|||||||
@test_util.disable_control_flow_v2("v2 does not support clear_after_read.")
|
@test_util.disable_control_flow_v2("v2 does not support clear_after_read.")
|
||||||
@test_util.run_v1_only("v2 does not support clear_after_read.")
|
@test_util.run_v1_only("v2 does not support clear_after_read.")
|
||||||
def testTensorArrayReadTwice(self):
|
def testTensorArrayReadTwice(self):
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
value = constant_op.constant([[1.0, -1.0], [10.0, -10.0]])
|
value = constant_op.constant([[1.0, -1.0], [10.0, -10.0]])
|
||||||
|
|
||||||
ta_readonce = tensor_array_ops.TensorArray(
|
ta_readonce = tensor_array_ops.TensorArray(
|
||||||
@ -867,7 +867,7 @@ class TensorArrayTest(test.TestCase):
|
|||||||
self.assertAllEqual([1.0, -1.0], self.evaluate(r1_readtwice))
|
self.assertAllEqual([1.0, -1.0], self.evaluate(r1_readtwice))
|
||||||
|
|
||||||
def _testTensorArrayGradientUnpackRead(self):
|
def _testTensorArrayGradientUnpackRead(self):
|
||||||
with self.cached_session(use_gpu=True) as session:
|
with self.cached_session() as session:
|
||||||
ta = tensor_array_ops.TensorArray(
|
ta = tensor_array_ops.TensorArray(
|
||||||
dtype=dtypes.float32,
|
dtype=dtypes.float32,
|
||||||
tensor_array_name="foo",
|
tensor_array_name="foo",
|
||||||
@ -897,7 +897,7 @@ class TensorArrayTest(test.TestCase):
|
|||||||
|
|
||||||
@test_util.deprecated_graph_mode_only
|
@test_util.deprecated_graph_mode_only
|
||||||
def testSkipEagerTensorArrayGradientSplitConcat(self):
|
def testSkipEagerTensorArrayGradientSplitConcat(self):
|
||||||
with self.session(use_gpu=True) as session:
|
with self.session() as session:
|
||||||
ta = tensor_array_ops.TensorArray(
|
ta = tensor_array_ops.TensorArray(
|
||||||
dtype=dtypes.float32, tensor_array_name="foo", size=2,
|
dtype=dtypes.float32, tensor_array_name="foo", size=2,
|
||||||
infer_shape=False)
|
infer_shape=False)
|
||||||
@ -920,7 +920,7 @@ class TensorArrayTest(test.TestCase):
|
|||||||
grad_vals[0])
|
grad_vals[0])
|
||||||
|
|
||||||
def _testTensorArrayGradientDynamicUnpackRead(self):
|
def _testTensorArrayGradientDynamicUnpackRead(self):
|
||||||
with self.cached_session(use_gpu=True) as session:
|
with self.cached_session() as session:
|
||||||
ta = tensor_array_ops.TensorArray(
|
ta = tensor_array_ops.TensorArray(
|
||||||
dtype=dtypes.float32,
|
dtype=dtypes.float32,
|
||||||
tensor_array_name="foo",
|
tensor_array_name="foo",
|
||||||
@ -946,20 +946,20 @@ class TensorArrayTest(test.TestCase):
|
|||||||
self._testTensorArrayGradientDynamicUnpackRead()
|
self._testTensorArrayGradientDynamicUnpackRead()
|
||||||
|
|
||||||
def testCloseTensorArray(self):
|
def testCloseTensorArray(self):
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
ta = tensor_array_ops.TensorArray(
|
ta = tensor_array_ops.TensorArray(
|
||||||
dtype=dtypes.float32, tensor_array_name="foo", size=3)
|
dtype=dtypes.float32, tensor_array_name="foo", size=3)
|
||||||
self.evaluate(ta.close())
|
self.evaluate(ta.close())
|
||||||
|
|
||||||
def testSizeTensorArray(self):
|
def testSizeTensorArray(self):
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
ta = tensor_array_ops.TensorArray(
|
ta = tensor_array_ops.TensorArray(
|
||||||
dtype=dtypes.float32, tensor_array_name="foo", size=3)
|
dtype=dtypes.float32, tensor_array_name="foo", size=3)
|
||||||
s = ta.size()
|
s = ta.size()
|
||||||
self.assertAllEqual(3, self.evaluate(s))
|
self.assertAllEqual(3, self.evaluate(s))
|
||||||
|
|
||||||
def testWriteCloseTensorArray(self):
|
def testWriteCloseTensorArray(self):
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
ta = tensor_array_ops.TensorArray(
|
ta = tensor_array_ops.TensorArray(
|
||||||
dtype=dtypes.float32,
|
dtype=dtypes.float32,
|
||||||
tensor_array_name="foo",
|
tensor_array_name="foo",
|
||||||
@ -971,7 +971,8 @@ class TensorArrayTest(test.TestCase):
|
|||||||
|
|
||||||
def _testWhileLoopWritePackGradients(self, dynamic_size, dtype):
|
def _testWhileLoopWritePackGradients(self, dynamic_size, dtype):
|
||||||
np_dtype = dtype.as_numpy_dtype
|
np_dtype = dtype.as_numpy_dtype
|
||||||
with self.cached_session(use_gpu=True):
|
with self.cached_session():
|
||||||
|
|
||||||
def func(v0, state0, var):
|
def func(v0, state0, var):
|
||||||
ta = tensor_array_ops.TensorArray(
|
ta = tensor_array_ops.TensorArray(
|
||||||
dtype=dtype,
|
dtype=dtype,
|
||||||
@ -1068,7 +1069,8 @@ class TensorArrayTest(test.TestCase):
|
|||||||
dynamic_size=True, dtype=dtypes.float32)
|
dynamic_size=True, dtype=dtypes.float32)
|
||||||
|
|
||||||
def testGradSerialTwoLoops(self):
|
def testGradSerialTwoLoops(self):
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
|
|
||||||
def loop(x):
|
def loop(x):
|
||||||
num_steps = 100
|
num_steps = 100
|
||||||
acc = tensor_array_ops.TensorArray(
|
acc = tensor_array_ops.TensorArray(
|
||||||
@ -1117,7 +1119,7 @@ class TensorArrayTest(test.TestCase):
|
|||||||
|
|
||||||
@test_util.deprecated_graph_mode_only
|
@test_util.deprecated_graph_mode_only
|
||||||
def testSkipEagerSumOfTwoReadVariablesWithoutRepeatGrad(self):
|
def testSkipEagerSumOfTwoReadVariablesWithoutRepeatGrad(self):
|
||||||
with self.session(use_gpu=True) as session:
|
with self.session() as session:
|
||||||
a = array_ops.identity(
|
a = array_ops.identity(
|
||||||
np.arange(
|
np.arange(
|
||||||
3 * 5, dtype=np.float32).reshape(3, 5) + 1)
|
3 * 5, dtype=np.float32).reshape(3, 5) + 1)
|
||||||
@ -1195,7 +1197,7 @@ class TensorArrayTest(test.TestCase):
|
|||||||
|
|
||||||
@test_util.deprecated_graph_mode_only
|
@test_util.deprecated_graph_mode_only
|
||||||
def testSkipEagerWriteShape(self):
|
def testSkipEagerWriteShape(self):
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
ta = tensor_array_ops.TensorArray(
|
ta = tensor_array_ops.TensorArray(
|
||||||
dtype=dtypes.float32, tensor_array_name="foo", size=3)
|
dtype=dtypes.float32, tensor_array_name="foo", size=3)
|
||||||
c0 = constant_op.constant([4.0, 5.0])
|
c0 = constant_op.constant([4.0, 5.0])
|
||||||
@ -1220,7 +1222,7 @@ class TensorArrayTest(test.TestCase):
|
|||||||
|
|
||||||
@test_util.deprecated_graph_mode_only
|
@test_util.deprecated_graph_mode_only
|
||||||
def testSkipEagerPartlyUnknownShape(self):
|
def testSkipEagerPartlyUnknownShape(self):
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
ta = tensor_array_ops.TensorArray(
|
ta = tensor_array_ops.TensorArray(
|
||||||
dtype=dtypes.float32, tensor_array_name="foo", size=6)
|
dtype=dtypes.float32, tensor_array_name="foo", size=6)
|
||||||
|
|
||||||
@ -1260,7 +1262,7 @@ class TensorArrayTest(test.TestCase):
|
|||||||
self.assertAllEqual([5, 4, 2, 3], r5.get_shape().as_list())
|
self.assertAllEqual([5, 4, 2, 3], r5.get_shape().as_list())
|
||||||
|
|
||||||
def _testUnpackShape(self):
|
def _testUnpackShape(self):
|
||||||
with self.cached_session(use_gpu=True):
|
with self.cached_session():
|
||||||
ta = tensor_array_ops.TensorArray(
|
ta = tensor_array_ops.TensorArray(
|
||||||
dtype=dtypes.float32,
|
dtype=dtypes.float32,
|
||||||
tensor_array_name="foo",
|
tensor_array_name="foo",
|
||||||
@ -1297,7 +1299,7 @@ class TensorArrayTest(test.TestCase):
|
|||||||
|
|
||||||
@test_util.deprecated_graph_mode_only
|
@test_util.deprecated_graph_mode_only
|
||||||
def testSplitShape(self):
|
def testSplitShape(self):
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
ta = tensor_array_ops.TensorArray(
|
ta = tensor_array_ops.TensorArray(
|
||||||
dtype=dtypes.float32,
|
dtype=dtypes.float32,
|
||||||
tensor_array_name="foo",
|
tensor_array_name="foo",
|
||||||
@ -1329,7 +1331,7 @@ class TensorArrayTest(test.TestCase):
|
|||||||
|
|
||||||
@test_util.deprecated_graph_mode_only
|
@test_util.deprecated_graph_mode_only
|
||||||
def testSkipEagerWriteUnknownShape(self):
|
def testSkipEagerWriteUnknownShape(self):
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
ta = tensor_array_ops.TensorArray(
|
ta = tensor_array_ops.TensorArray(
|
||||||
dtype=dtypes.float32,
|
dtype=dtypes.float32,
|
||||||
tensor_array_name="foo",
|
tensor_array_name="foo",
|
||||||
@ -1341,7 +1343,7 @@ class TensorArrayTest(test.TestCase):
|
|||||||
self.assertAllEqual(r0.get_shape(), tensor_shape.unknown_shape())
|
self.assertAllEqual(r0.get_shape(), tensor_shape.unknown_shape())
|
||||||
|
|
||||||
def _testGradientWhenNotAllComponentsRead(self):
|
def _testGradientWhenNotAllComponentsRead(self):
|
||||||
with self.cached_session(use_gpu=True) as session:
|
with self.cached_session() as session:
|
||||||
ta = tensor_array_ops.TensorArray(dtype=dtypes.float32, size=2)
|
ta = tensor_array_ops.TensorArray(dtype=dtypes.float32, size=2)
|
||||||
x = constant_op.constant([2.0, 3.0])
|
x = constant_op.constant([2.0, 3.0])
|
||||||
w = ta.unstack(x)
|
w = ta.unstack(x)
|
||||||
@ -1357,7 +1359,7 @@ class TensorArrayTest(test.TestCase):
|
|||||||
|
|
||||||
@test_util.deprecated_graph_mode_only
|
@test_util.deprecated_graph_mode_only
|
||||||
def testSkipEagerWriteButNotAllComponentsReadGrad(self):
|
def testSkipEagerWriteButNotAllComponentsReadGrad(self):
|
||||||
with self.cached_session(use_gpu=True) as session:
|
with self.cached_session() as session:
|
||||||
x0 = constant_op.constant(5.0)
|
x0 = constant_op.constant(5.0)
|
||||||
x1 = constant_op.constant(10.0)
|
x1 = constant_op.constant(10.0)
|
||||||
ta = tensor_array_ops.TensorArray(
|
ta = tensor_array_ops.TensorArray(
|
||||||
@ -1369,7 +1371,7 @@ class TensorArrayTest(test.TestCase):
|
|||||||
self.assertAllEqual(grad_r0_x1_vals, [1.0, 0.0])
|
self.assertAllEqual(grad_r0_x1_vals, [1.0, 0.0])
|
||||||
|
|
||||||
def _testTensorArrayUnpackDynamic(self):
|
def _testTensorArrayUnpackDynamic(self):
|
||||||
with self.cached_session(use_gpu=True) as sess:
|
with self.cached_session() as sess:
|
||||||
ta = tensor_array_ops.TensorArray(
|
ta = tensor_array_ops.TensorArray(
|
||||||
dtype=dtypes.float32, size=3, dynamic_size=True)
|
dtype=dtypes.float32, size=3, dynamic_size=True)
|
||||||
x = constant_op.constant([1.0, 2.0, 3.0])
|
x = constant_op.constant([1.0, 2.0, 3.0])
|
||||||
@ -1386,7 +1388,7 @@ class TensorArrayTest(test.TestCase):
|
|||||||
|
|
||||||
@test_util.run_deprecated_v1
|
@test_util.run_deprecated_v1
|
||||||
def testSkipEagerTensorArraySplitDynamic(self):
|
def testSkipEagerTensorArraySplitDynamic(self):
|
||||||
with self.session(use_gpu=True) as sess:
|
with self.session() as sess:
|
||||||
ta = tensor_array_ops.TensorArray(
|
ta = tensor_array_ops.TensorArray(
|
||||||
dtype=dtypes.float32, size=3, dynamic_size=True)
|
dtype=dtypes.float32, size=3, dynamic_size=True)
|
||||||
x = constant_op.constant([1.0, 2.0, 3.0])
|
x = constant_op.constant([1.0, 2.0, 3.0])
|
||||||
@ -1449,7 +1451,7 @@ class TensorArrayTest(test.TestCase):
|
|||||||
ta_gather_with_unknown_indices_shape([0])
|
ta_gather_with_unknown_indices_shape([0])
|
||||||
|
|
||||||
def _testTensorArrayEvalEmpty(self):
|
def _testTensorArrayEvalEmpty(self):
|
||||||
with self.cached_session(use_gpu=True):
|
with self.cached_session():
|
||||||
ta = tensor_array_ops.TensorArray(
|
ta = tensor_array_ops.TensorArray(
|
||||||
dtype=dtypes.float32, size=0, dynamic_size=False, infer_shape=False)
|
dtype=dtypes.float32, size=0, dynamic_size=False, infer_shape=False)
|
||||||
v2_msg = ("Tried to stack elements of an empty list with "
|
v2_msg = ("Tried to stack elements of an empty list with "
|
||||||
@ -1469,7 +1471,7 @@ class TensorArrayTest(test.TestCase):
|
|||||||
# this test is ill-defined for Eager mode --- unpacking an empty tensor
|
# this test is ill-defined for Eager mode --- unpacking an empty tensor
|
||||||
# gives an empty list / there is not equivalent of "mark_used" in Eager
|
# gives an empty list / there is not equivalent of "mark_used" in Eager
|
||||||
def _testTensorArrayEvalEmptyWithDefault(self):
|
def _testTensorArrayEvalEmptyWithDefault(self):
|
||||||
with self.cached_session(use_gpu=True):
|
with self.cached_session():
|
||||||
ta = tensor_array_ops.TensorArray(
|
ta = tensor_array_ops.TensorArray(
|
||||||
dtype=dtypes.float32, size=0, dynamic_size=False, infer_shape=True)
|
dtype=dtypes.float32, size=0, dynamic_size=False, infer_shape=True)
|
||||||
self.assertEqual(0, ta.size().eval())
|
self.assertEqual(0, ta.size().eval())
|
||||||
@ -1491,7 +1493,7 @@ class TensorArrayTest(test.TestCase):
|
|||||||
|
|
||||||
@test_util.run_deprecated_v1
|
@test_util.run_deprecated_v1
|
||||||
def testSkipEagerTensorArrayScatterReadAndGradients(self):
|
def testSkipEagerTensorArrayScatterReadAndGradients(self):
|
||||||
with self.session(use_gpu=True) as session:
|
with self.session() as session:
|
||||||
ta = tensor_array_ops.TensorArray(
|
ta = tensor_array_ops.TensorArray(
|
||||||
dtype=dtypes.float32,
|
dtype=dtypes.float32,
|
||||||
tensor_array_name="foo",
|
tensor_array_name="foo",
|
||||||
@ -1518,7 +1520,7 @@ class TensorArrayTest(test.TestCase):
|
|||||||
|
|
||||||
@test_util.run_deprecated_v1
|
@test_util.run_deprecated_v1
|
||||||
def testSkipEagerTensorArrayScatterPartialReadAndGradients(self):
|
def testSkipEagerTensorArrayScatterPartialReadAndGradients(self):
|
||||||
with self.session(use_gpu=True) as session:
|
with self.session() as session:
|
||||||
ta = tensor_array_ops.TensorArray(
|
ta = tensor_array_ops.TensorArray(
|
||||||
dtype=dtypes.float32,
|
dtype=dtypes.float32,
|
||||||
tensor_array_name="foo",
|
tensor_array_name="foo",
|
||||||
@ -1554,7 +1556,7 @@ class TensorArrayTest(test.TestCase):
|
|||||||
|
|
||||||
@test_util.run_v1_only("b/118890905")
|
@test_util.run_v1_only("b/118890905")
|
||||||
def testTensorArrayWriteGatherAndGradients(self):
|
def testTensorArrayWriteGatherAndGradients(self):
|
||||||
with self.session(use_gpu=True) as session:
|
with self.session() as session:
|
||||||
ta = tensor_array_ops.TensorArray(
|
ta = tensor_array_ops.TensorArray(
|
||||||
dtype=dtypes.float32,
|
dtype=dtypes.float32,
|
||||||
tensor_array_name="foo",
|
tensor_array_name="foo",
|
||||||
@ -1703,7 +1705,7 @@ class TensorArrayTest(test.TestCase):
|
|||||||
[s for s in dev_stats[d] if "TensorArray" == s.node_name])
|
[s for s in dev_stats[d] if "TensorArray" == s.node_name])
|
||||||
|
|
||||||
def testTensorArrayIdentity(self):
|
def testTensorArrayIdentity(self):
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
ta0 = tensor_array_ops.TensorArray(dtype=dtypes.float32, size=2,
|
ta0 = tensor_array_ops.TensorArray(dtype=dtypes.float32, size=2,
|
||||||
infer_shape=False)
|
infer_shape=False)
|
||||||
ta1 = tensor_array_ops.TensorArray(dtype=dtypes.int32, size=4,
|
ta1 = tensor_array_ops.TensorArray(dtype=dtypes.int32, size=4,
|
||||||
@ -1769,7 +1771,7 @@ class TensorArrayTest(test.TestCase):
|
|||||||
# dy is outside of the gradients name scope; tf.gradients must
|
# dy is outside of the gradients name scope; tf.gradients must
|
||||||
# wrap it in the correct name scope.
|
# wrap it in the correct name scope.
|
||||||
dx, = gradients_impl.gradients(ys=[y], xs=[x], grad_ys=[dy])
|
dx, = gradients_impl.gradients(ys=[y], xs=[x], grad_ys=[dy])
|
||||||
with self.cached_session(use_gpu=True) as sess:
|
with self.cached_session() as sess:
|
||||||
vdx, vdy = self.evaluate([dx, dy])
|
vdx, vdy = self.evaluate([dx, dy])
|
||||||
self.assertAllClose(vdx, vdy)
|
self.assertAllClose(vdx, vdy)
|
||||||
|
|
||||||
@ -1777,7 +1779,7 @@ class TensorArrayTest(test.TestCase):
|
|||||||
def testSkipEagerTensorArrayInt64GPU(self):
|
def testSkipEagerTensorArrayInt64GPU(self):
|
||||||
if not test.is_gpu_available():
|
if not test.is_gpu_available():
|
||||||
return
|
return
|
||||||
with self.session(use_gpu=True, force_gpu=True) as sess:
|
with self.session(force_gpu=True) as sess:
|
||||||
value = array_ops.placeholder(dtypes.int64)
|
value = array_ops.placeholder(dtypes.int64)
|
||||||
ta = tensor_array_ops.TensorArray(dtype=dtypes.int64, size=2)
|
ta = tensor_array_ops.TensorArray(dtype=dtypes.int64, size=2)
|
||||||
ta = ta.scatter([0, 1], value)
|
ta = ta.scatter([0, 1], value)
|
||||||
|
@ -179,7 +179,7 @@ def _get_tensordot_tests(dtype_, rank_a_, rank_b_, num_dims_, dynamic_shape_):
|
|||||||
for _ in range(num_trials):
|
for _ in range(num_trials):
|
||||||
a_np, b_np, a_dims_np, b_dims_np = _generate_random_tensors_and_dims()
|
a_np, b_np, a_dims_np, b_dims_np = _generate_random_tensors_and_dims()
|
||||||
np_ans = np.tensordot(a_np, b_np, axes=(a_dims_np, b_dims_np))
|
np_ans = np.tensordot(a_np, b_np, axes=(a_dims_np, b_dims_np))
|
||||||
with self.cached_session(use_gpu=True) as sess:
|
with self.cached_session() as sess:
|
||||||
if dynamic_shape_:
|
if dynamic_shape_:
|
||||||
a = array_ops.placeholder(dtype_)
|
a = array_ops.placeholder(dtype_)
|
||||||
b = array_ops.placeholder(dtype_)
|
b = array_ops.placeholder(dtype_)
|
||||||
@ -219,7 +219,7 @@ def _get_tensordot_tests(dtype_, rank_a_, rank_b_, num_dims_, dynamic_shape_):
|
|||||||
all_axes.append(a_np.ndim - 1)
|
all_axes.append(a_np.ndim - 1)
|
||||||
for axes in all_axes:
|
for axes in all_axes:
|
||||||
np_ans = np.tensordot(a_np, b_np, axes=axes)
|
np_ans = np.tensordot(a_np, b_np, axes=axes)
|
||||||
with self.cached_session(use_gpu=True) as sess:
|
with self.cached_session() as sess:
|
||||||
if dynamic_shape_:
|
if dynamic_shape_:
|
||||||
a = array_ops.placeholder(dtype_)
|
a = array_ops.placeholder(dtype_)
|
||||||
b = array_ops.placeholder(dtype_)
|
b = array_ops.placeholder(dtype_)
|
||||||
|
@ -47,7 +47,7 @@ class TopKTest(test.TestCase):
|
|||||||
sorted=True): # pylint: disable=redefined-builtin
|
sorted=True): # pylint: disable=redefined-builtin
|
||||||
np_expected_values = np.array(expected_values)
|
np_expected_values = np.array(expected_values)
|
||||||
np_expected_indices = np.array(expected_indices)
|
np_expected_indices = np.array(expected_indices)
|
||||||
with self.cached_session(use_gpu=True) as sess:
|
with self.cached_session() as sess:
|
||||||
values_op, indices_op = nn_ops.top_k(inputs, k, sorted=sorted)
|
values_op, indices_op = nn_ops.top_k(inputs, k, sorted=sorted)
|
||||||
values, indices = self.evaluate([values_op, indices_op])
|
values, indices = self.evaluate([values_op, indices_op])
|
||||||
|
|
||||||
@ -196,7 +196,7 @@ class TopKTest(test.TestCase):
|
|||||||
@test_util.run_deprecated_v1
|
@test_util.run_deprecated_v1
|
||||||
def testKNegative(self):
|
def testKNegative(self):
|
||||||
inputs = [[0.1, 0.2], [0.3, 0.4]]
|
inputs = [[0.1, 0.2], [0.3, 0.4]]
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
k = array_ops.placeholder(dtypes.int32)
|
k = array_ops.placeholder(dtypes.int32)
|
||||||
values, _ = nn_ops.top_k(inputs, k)
|
values, _ = nn_ops.top_k(inputs, k)
|
||||||
with self.assertRaisesOpError("Need k >= 0, got -7"):
|
with self.assertRaisesOpError("Need k >= 0, got -7"):
|
||||||
@ -211,7 +211,7 @@ class TopKTest(test.TestCase):
|
|||||||
|
|
||||||
@test_util.run_deprecated_v1
|
@test_util.run_deprecated_v1
|
||||||
def testTopKGradients(self):
|
def testTopKGradients(self):
|
||||||
with self.session(use_gpu=True) as sess:
|
with self.session() as sess:
|
||||||
inputs = array_ops.placeholder(dtypes.float32, shape=[2, 5])
|
inputs = array_ops.placeholder(dtypes.float32, shape=[2, 5])
|
||||||
values, _ = nn_ops.top_k(inputs, 3)
|
values, _ = nn_ops.top_k(inputs, 3)
|
||||||
grad = sess.run(
|
grad = sess.run(
|
||||||
|
@ -31,7 +31,7 @@ class TraceTest(test.TestCase):
|
|||||||
|
|
||||||
def compare(self, x):
|
def compare(self, x):
|
||||||
np_ans = np.trace(x, axis1=-2, axis2=-1)
|
np_ans = np.trace(x, axis1=-2, axis2=-1)
|
||||||
with self.cached_session(use_gpu=True):
|
with self.cached_session():
|
||||||
tf_ans = math_ops.trace(x).eval()
|
tf_ans = math_ops.trace(x).eval()
|
||||||
self.assertAllClose(tf_ans, np_ans)
|
self.assertAllClose(tf_ans, np_ans)
|
||||||
|
|
||||||
|
@ -79,7 +79,7 @@ class TransposeTest(test.TestCase):
|
|||||||
np_ans = self._np_transpose(x, perm)
|
np_ans = self._np_transpose(x, perm)
|
||||||
if conjugate:
|
if conjugate:
|
||||||
np_ans = np.conj(np_ans)
|
np_ans = np.conj(np_ans)
|
||||||
with self.cached_session(use_gpu=True):
|
with self.cached_session():
|
||||||
inx = ops.convert_to_tensor(x)
|
inx = ops.convert_to_tensor(x)
|
||||||
y = array_ops.transpose(inx, p, conjugate=conjugate)
|
y = array_ops.transpose(inx, p, conjugate=conjugate)
|
||||||
tf_ans = self.evaluate(y)
|
tf_ans = self.evaluate(y)
|
||||||
@ -170,7 +170,7 @@ class TransposeTest(test.TestCase):
|
|||||||
inp = np.arange(
|
inp = np.arange(
|
||||||
1, total_size + 1, dtype=datatype).reshape(input_shape)
|
1, total_size + 1, dtype=datatype).reshape(input_shape)
|
||||||
np_ans = self._np_transpose(inp, perm)
|
np_ans = self._np_transpose(inp, perm)
|
||||||
with self.cached_session(use_gpu=True):
|
with self.cached_session():
|
||||||
inx = ops.convert_to_tensor(inp)
|
inx = ops.convert_to_tensor(inp)
|
||||||
y = array_ops.transpose(inx, perm)
|
y = array_ops.transpose(inx, perm)
|
||||||
tf_ans = self.evaluate(y)
|
tf_ans = self.evaluate(y)
|
||||||
@ -193,7 +193,7 @@ class TransposeTest(test.TestCase):
|
|||||||
inp = np.arange(
|
inp = np.arange(
|
||||||
1, total_size + 1, dtype=np.float32).reshape(input_shape)
|
1, total_size + 1, dtype=np.float32).reshape(input_shape)
|
||||||
np_ans = self._np_transpose(inp, perm)
|
np_ans = self._np_transpose(inp, perm)
|
||||||
with self.cached_session(use_gpu=True):
|
with self.cached_session():
|
||||||
inx = ops.convert_to_tensor(inp)
|
inx = ops.convert_to_tensor(inp)
|
||||||
y = array_ops.transpose(inx, perm)
|
y = array_ops.transpose(inx, perm)
|
||||||
tf_ans = self.evaluate(y)
|
tf_ans = self.evaluate(y)
|
||||||
@ -230,7 +230,7 @@ class TransposeTest(test.TestCase):
|
|||||||
inp = np.arange(
|
inp = np.arange(
|
||||||
1, total_size + 1, dtype=np.float32).reshape(input_shape)
|
1, total_size + 1, dtype=np.float32).reshape(input_shape)
|
||||||
np_ans = self._np_transpose(inp, perm)
|
np_ans = self._np_transpose(inp, perm)
|
||||||
with self.cached_session(use_gpu=True):
|
with self.cached_session():
|
||||||
inx = ops.convert_to_tensor(inp)
|
inx = ops.convert_to_tensor(inp)
|
||||||
y = array_ops.transpose(inx, perm)
|
y = array_ops.transpose(inx, perm)
|
||||||
tf_ans = self.evaluate(y)
|
tf_ans = self.evaluate(y)
|
||||||
@ -255,7 +255,7 @@ class TransposeTest(test.TestCase):
|
|||||||
inp = np.arange(
|
inp = np.arange(
|
||||||
1, total_size + 1, dtype=datatype).reshape(input_shape)
|
1, total_size + 1, dtype=datatype).reshape(input_shape)
|
||||||
np_ans = self._np_transpose(inp, perm)
|
np_ans = self._np_transpose(inp, perm)
|
||||||
with self.cached_session(use_gpu=True):
|
with self.cached_session():
|
||||||
inx = ops.convert_to_tensor(inp)
|
inx = ops.convert_to_tensor(inp)
|
||||||
y = array_ops.transpose(inx, perm)
|
y = array_ops.transpose(inx, perm)
|
||||||
tf_ans = self.evaluate(y)
|
tf_ans = self.evaluate(y)
|
||||||
@ -278,7 +278,7 @@ class TransposeTest(test.TestCase):
|
|||||||
inp = np.arange(
|
inp = np.arange(
|
||||||
1, total_size + 1, dtype=np.float32).reshape(input_shape)
|
1, total_size + 1, dtype=np.float32).reshape(input_shape)
|
||||||
np_ans = self._np_transpose(inp, perm)
|
np_ans = self._np_transpose(inp, perm)
|
||||||
with self.cached_session(use_gpu=True):
|
with self.cached_session():
|
||||||
inx = ops.convert_to_tensor(inp)
|
inx = ops.convert_to_tensor(inp)
|
||||||
y = array_ops.transpose(inx, perm)
|
y = array_ops.transpose(inx, perm)
|
||||||
tf_ans = self.evaluate(y)
|
tf_ans = self.evaluate(y)
|
||||||
@ -331,7 +331,7 @@ class TransposeTest(test.TestCase):
|
|||||||
with self.subTest(input_shape=input_shape, perm=perm):
|
with self.subTest(input_shape=input_shape, perm=perm):
|
||||||
inp = np.random.randint(10, size=input_shape)
|
inp = np.random.randint(10, size=input_shape)
|
||||||
np_ans = self._np_transpose(inp, perm)
|
np_ans = self._np_transpose(inp, perm)
|
||||||
with self.cached_session(use_gpu=True):
|
with self.cached_session():
|
||||||
inx = ops.convert_to_tensor(inp)
|
inx = ops.convert_to_tensor(inp)
|
||||||
y = array_ops.transpose(inx, perm)
|
y = array_ops.transpose(inx, perm)
|
||||||
tf_ans = self.evaluate(y)
|
tf_ans = self.evaluate(y)
|
||||||
@ -355,7 +355,7 @@ class TransposeTest(test.TestCase):
|
|||||||
x = np.arange(0, 8).reshape([2, 4]).astype(np.float32)
|
x = np.arange(0, 8).reshape([2, 4]).astype(np.float32)
|
||||||
p = np.array([1, 0]).astype(perm_dtype)
|
p = np.array([1, 0]).astype(perm_dtype)
|
||||||
np_ans = np.copy(x).transpose(p)
|
np_ans = np.copy(x).transpose(p)
|
||||||
with self.cached_session(use_gpu=True):
|
with self.cached_session():
|
||||||
inx = ops.convert_to_tensor(x)
|
inx = ops.convert_to_tensor(x)
|
||||||
inp = constant_op.constant(p)
|
inp = constant_op.constant(p)
|
||||||
y = array_ops.transpose(inx, inp)
|
y = array_ops.transpose(inx, inp)
|
||||||
|
@ -80,7 +80,7 @@ class TridiagonalMulOpTest(test.TestCase):
|
|||||||
diags_matrix_batch, rhs_batch, diagonals_format='matrix')
|
diags_matrix_batch, rhs_batch, diagonals_format='matrix')
|
||||||
]
|
]
|
||||||
|
|
||||||
with self.cached_session(use_gpu=True):
|
with self.cached_session():
|
||||||
results = self.evaluate(results)
|
results = self.evaluate(results)
|
||||||
results_batch = self.evaluate(results_batch)
|
results_batch = self.evaluate(results_batch)
|
||||||
|
|
||||||
@ -114,7 +114,7 @@ class TridiagonalMulOpTest(test.TestCase):
|
|||||||
|
|
||||||
diags = constant_op.constant(diags, dtype=dtype)
|
diags = constant_op.constant(diags, dtype=dtype)
|
||||||
rhs = constant_op.constant(rhs, dtype=dtype)
|
rhs = constant_op.constant(rhs, dtype=dtype)
|
||||||
with self.cached_session(use_gpu=True):
|
with self.cached_session():
|
||||||
grad_reference, _ = gradient_checker_v2.compute_gradient(
|
grad_reference, _ = gradient_checker_v2.compute_gradient(
|
||||||
reference_matmul, [diags, rhs])
|
reference_matmul, [diags, rhs])
|
||||||
grad_theoretical, grad_numerical = gradient_checker_v2.compute_gradient(
|
grad_theoretical, grad_numerical = gradient_checker_v2.compute_gradient(
|
||||||
@ -155,7 +155,7 @@ class TridiagonalMulOpTest(test.TestCase):
|
|||||||
constant_op.constant(rhs, dtype=dtypes.complex128),
|
constant_op.constant(rhs, dtype=dtypes.complex128),
|
||||||
diagonals_format='matrix')
|
diagonals_format='matrix')
|
||||||
|
|
||||||
with self.cached_session(use_gpu=True):
|
with self.cached_session():
|
||||||
result = self.evaluate(result)
|
result = self.evaluate(result)
|
||||||
|
|
||||||
self.assertAllClose(result, expected_result)
|
self.assertAllClose(result, expected_result)
|
||||||
|
@ -77,7 +77,7 @@ class TridiagonalSolveOpTest(test.TestCase):
|
|||||||
diags_format="compact",
|
diags_format="compact",
|
||||||
transpose_rhs=False,
|
transpose_rhs=False,
|
||||||
conjugate_rhs=False):
|
conjugate_rhs=False):
|
||||||
with self.cached_session(use_gpu=True):
|
with self.cached_session():
|
||||||
pivoting = True
|
pivoting = True
|
||||||
if hasattr(self, "pivoting"):
|
if hasattr(self, "pivoting"):
|
||||||
pivoting = self.pivoting
|
pivoting = self.pivoting
|
||||||
@ -412,7 +412,7 @@ class TridiagonalSolveOpTest(test.TestCase):
|
|||||||
transpose_rhs=transpose_rhs,
|
transpose_rhs=transpose_rhs,
|
||||||
conjugate_rhs=conjugate_rhs)
|
conjugate_rhs=conjugate_rhs)
|
||||||
res = math_ops.reduce_sum(x * y)
|
res = math_ops.reduce_sum(x * y)
|
||||||
with self.cached_session(use_gpu=True) as sess:
|
with self.cached_session() as sess:
|
||||||
actual_grad_diags = sess.run(
|
actual_grad_diags = sess.run(
|
||||||
tape_diags.gradient(res, diags), feed_dict=feed_dict)
|
tape_diags.gradient(res, diags), feed_dict=feed_dict)
|
||||||
actual_rhs_diags = sess.run(
|
actual_rhs_diags = sess.run(
|
||||||
@ -563,7 +563,7 @@ class TridiagonalSolveOpTest(test.TestCase):
|
|||||||
return
|
return
|
||||||
x = linalg_impl.tridiagonal_solve(
|
x = linalg_impl.tridiagonal_solve(
|
||||||
diags, rhs, diags_format, partial_pivoting=self.pivoting)
|
diags, rhs, diags_format, partial_pivoting=self.pivoting)
|
||||||
with self.cached_session(use_gpu=True) as sess:
|
with self.cached_session() as sess:
|
||||||
result = sess.run(x, feed_dict={diags: diags_feed, rhs: rhs_feed})
|
result = sess.run(x, feed_dict={diags: diags_feed, rhs: rhs_feed})
|
||||||
self.assertAllClose(result, expected)
|
self.assertAllClose(result, expected)
|
||||||
|
|
||||||
@ -648,7 +648,7 @@ class TridiagonalSolveOpTest(test.TestCase):
|
|||||||
rhs,
|
rhs,
|
||||||
diagonals_format="sequence",
|
diagonals_format="sequence",
|
||||||
partial_pivoting=self.pivoting)
|
partial_pivoting=self.pivoting)
|
||||||
with self.cached_session(use_gpu=True) as sess:
|
with self.cached_session() as sess:
|
||||||
result = sess.run(
|
result = sess.run(
|
||||||
x,
|
x,
|
||||||
feed_dict={
|
feed_dict={
|
||||||
|
@ -150,7 +150,7 @@ class VariablesTestCase(test.TestCase, parameterized.TestCase):
|
|||||||
|
|
||||||
@test_util.run_deprecated_v1
|
@test_util.run_deprecated_v1
|
||||||
def testResourceAssignments(self):
|
def testResourceAssignments(self):
|
||||||
with self.session(use_gpu=True):
|
with self.session():
|
||||||
var = resource_variable_ops.ResourceVariable(0.0)
|
var = resource_variable_ops.ResourceVariable(0.0)
|
||||||
plus_one = var.assign_add(1.0)
|
plus_one = var.assign_add(1.0)
|
||||||
minus_one = var.assign_sub(2.0)
|
minus_one = var.assign_sub(2.0)
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user