Remove calls to test_session() or use test_util helpers

PiperOrigin-RevId: 223105516
This commit is contained in:
Gaurav Jain 2018-11-27 20:49:05 -08:00 committed by TensorFlower Gardener
parent c68b4003f8
commit 5f111d5a6c
11 changed files with 635 additions and 650 deletions

View File

@ -31,64 +31,76 @@ from tensorflow.python.platform import test
from tensorflow.python.training.rmsprop import RMSPropOptimizer
@test_util.run_all_in_graph_and_eager_modes
class CuDNNTest(test.TestCase, parameterized.TestCase):
@test_util.run_in_graph_and_eager_modes
def test_cudnn_rnn_basics(self):
if test.is_gpu_available(cuda_only=True):
with self.session(use_gpu=True):
input_size = 10
timesteps = 6
units = 2
num_samples = 32
for layer_class in [keras.layers.CuDNNGRU, keras.layers.CuDNNLSTM]:
for return_sequences in [True, False]:
with keras.utils.CustomObjectScope(
{'keras.layers.CuDNNGRU': keras.layers.CuDNNGRU,
'keras.layers.CuDNNLSTM': keras.layers.CuDNNLSTM}):
testing_utils.layer_test(
layer_class,
kwargs={'units': units,
'return_sequences': return_sequences},
input_shape=(num_samples, timesteps, input_size))
for go_backwards in [True, False]:
with keras.utils.CustomObjectScope(
{'keras.layers.CuDNNGRU': keras.layers.CuDNNGRU,
'keras.layers.CuDNNLSTM': keras.layers.CuDNNLSTM}):
testing_utils.layer_test(
layer_class,
kwargs={'units': units,
'go_backwards': go_backwards},
input_shape=(num_samples, timesteps, input_size))
if not test.is_gpu_available(cuda_only=True):
self.skipTest('No CUDA GPU available')
with test_util.use_gpu():
input_size = 10
timesteps = 6
units = 2
num_samples = 32
for layer_class in [keras.layers.CuDNNGRU, keras.layers.CuDNNLSTM]:
for return_sequences in [True, False]:
with keras.utils.CustomObjectScope({
'keras.layers.CuDNNGRU': keras.layers.CuDNNGRU,
'keras.layers.CuDNNLSTM': keras.layers.CuDNNLSTM
}):
testing_utils.layer_test(
layer_class,
kwargs={
'units': units,
'return_sequences': return_sequences
},
input_shape=(num_samples, timesteps, input_size))
for go_backwards in [True, False]:
with keras.utils.CustomObjectScope({
'keras.layers.CuDNNGRU': keras.layers.CuDNNGRU,
'keras.layers.CuDNNLSTM': keras.layers.CuDNNLSTM
}):
testing_utils.layer_test(
layer_class,
kwargs={
'units': units,
'go_backwards': go_backwards
},
input_shape=(num_samples, timesteps, input_size))
@test_util.run_in_graph_and_eager_modes
def test_trainability(self):
if test.is_gpu_available(cuda_only=True):
with self.session(use_gpu=True):
input_size = 10
units = 2
for layer_class in [keras.layers.CuDNNGRU, keras.layers.CuDNNLSTM]:
layer = layer_class(units)
layer.build((None, None, input_size))
self.assertEqual(len(layer.weights), 3)
self.assertEqual(len(layer.trainable_weights), 3)
self.assertEqual(len(layer.non_trainable_weights), 0)
layer.trainable = False
self.assertEqual(len(layer.weights), 3)
self.assertEqual(len(layer.non_trainable_weights), 3)
self.assertEqual(len(layer.trainable_weights), 0)
layer.trainable = True
self.assertEqual(len(layer.weights), 3)
self.assertEqual(len(layer.trainable_weights), 3)
self.assertEqual(len(layer.non_trainable_weights), 0)
if not test.is_gpu_available(cuda_only=True):
self.skipTest('No CUDA GPU available')
with test_util.use_gpu():
input_size = 10
units = 2
for layer_class in [keras.layers.CuDNNGRU, keras.layers.CuDNNLSTM]:
layer = layer_class(units)
layer.build((None, None, input_size))
self.assertEqual(len(layer.weights), 3)
self.assertEqual(len(layer.trainable_weights), 3)
self.assertEqual(len(layer.non_trainable_weights), 0)
layer.trainable = False
self.assertEqual(len(layer.weights), 3)
self.assertEqual(len(layer.non_trainable_weights), 3)
self.assertEqual(len(layer.trainable_weights), 0)
layer.trainable = True
self.assertEqual(len(layer.weights), 3)
self.assertEqual(len(layer.trainable_weights), 3)
self.assertEqual(len(layer.non_trainable_weights), 0)
@parameterized.named_parameters(
('cudnngru', keras.layers.CuDNNGRU),
('cudnnlstm', keras.layers.CuDNNLSTM),
)
def test_regularizer(self, layer_class):
if not test.is_gpu_available(cuda_only=True):
self.skipTest('No CUDA GPU available')
if test.is_gpu_available(cuda_only=True):
with self.session(use_gpu=True):
with test_util.use_gpu():
input_size = 10
timesteps = 6
units = 2
@ -119,132 +131,140 @@ class CuDNNTest(test.TestCase, parameterized.TestCase):
('cudnnlstm', keras.layers.CuDNNLSTM),
)
def test_return_state(self, layer_class):
if test.is_gpu_available(cuda_only=True):
with self.session(use_gpu=True):
input_size = 10
timesteps = 6
units = 2
num_samples = 32
num_states = 2 if layer_class is keras.layers.CuDNNLSTM else 1
if not test.is_gpu_available(cuda_only=True):
self.skipTest('No CUDA GPU available')
inputs = keras.Input(batch_shape=(num_samples, timesteps, input_size))
layer = layer_class(units, return_state=True, stateful=True)
outputs = layer(inputs)
_, state = outputs[0], outputs[1:]
self.assertEqual(len(state), num_states)
model = keras.models.Model(inputs, state[0])
with test_util.use_gpu():
input_size = 10
timesteps = 6
units = 2
num_samples = 32
num_states = 2 if layer_class is keras.layers.CuDNNLSTM else 1
inputs = np.random.random((num_samples, timesteps, input_size))
state = model.predict(inputs)
np.testing.assert_allclose(
keras.backend.eval(layer.states[0]), state, atol=1e-4)
inputs = keras.Input(batch_shape=(num_samples, timesteps, input_size))
layer = layer_class(units, return_state=True, stateful=True)
outputs = layer(inputs)
_, state = outputs[0], outputs[1:]
self.assertEqual(len(state), num_states)
model = keras.models.Model(inputs, state[0])
inputs = np.random.random((num_samples, timesteps, input_size))
state = model.predict(inputs)
np.testing.assert_allclose(
keras.backend.eval(layer.states[0]), state, atol=1e-4)
@parameterized.named_parameters(
('cudnngru', keras.layers.CuDNNGRU),
('cudnnlstm', keras.layers.CuDNNLSTM),
)
def test_time_major_input(self, layer_class):
if test.is_gpu_available(cuda_only=True):
with self.test_session(use_gpu=True):
input_size = 10
timesteps = 6
units = 2
num_samples = 32
if not test.is_gpu_available(cuda_only=True):
self.skipTest('No CUDA GPU available')
model = keras.models.Sequential()
model.add(
keras.layers.Lambda(lambda t: array_ops.transpose(t, [1, 0, 2])))
layer = layer_class(units, time_major=True, return_sequences=True)
model.add(layer)
model.add(
keras.layers.Lambda(lambda t: array_ops.transpose(t, [1, 0, 2])))
model.compile(loss='categorical_crossentropy', optimizer='adam')
model.fit(
np.ones((num_samples, timesteps, input_size)),
np.ones((num_samples, timesteps, units)))
out = model.predict(np.ones((num_samples, timesteps, input_size)))
self.assertEqual(out.shape, (num_samples, timesteps, units))
with test_util.use_gpu():
input_size = 10
timesteps = 6
units = 2
num_samples = 32
model = keras.models.Sequential()
model.add(
keras.layers.Lambda(lambda t: array_ops.transpose(t, [1, 0, 2])))
layer = layer_class(units, time_major=True, return_sequences=True)
model.add(layer)
model.add(
keras.layers.Lambda(lambda t: array_ops.transpose(t, [1, 0, 2])))
model.compile(loss='categorical_crossentropy', optimizer='adam')
model.fit(
np.ones((num_samples, timesteps, input_size)),
np.ones((num_samples, timesteps, units)))
out = model.predict(np.ones((num_samples, timesteps, input_size)))
self.assertEqual(out.shape, (num_samples, timesteps, units))
@parameterized.named_parameters(
('cudnngru', keras.layers.CuDNNGRU),
('cudnnlstm', keras.layers.CuDNNLSTM),
)
def test_specify_initial_state_keras_tensor(self, layer_class):
if test.is_gpu_available(cuda_only=True):
with self.session(use_gpu=True):
input_size = 10
timesteps = 6
units = 2
num_samples = 32
num_states = 2 if layer_class is keras.layers.CuDNNLSTM else 1
if not test.is_gpu_available(cuda_only=True):
self.skipTest('No CUDA GPU available')
inputs = keras.Input((timesteps, input_size))
initial_state = [keras.Input((units,)) for _ in range(num_states)]
layer = layer_class(units)
if len(initial_state) == 1:
output = layer(inputs, initial_state=initial_state[0])
else:
output = layer(inputs, initial_state=initial_state)
self.assertIn(initial_state[0], layer._inbound_nodes[0].input_tensors)
with test_util.use_gpu():
input_size = 10
timesteps = 6
units = 2
num_samples = 32
num_states = 2 if layer_class is keras.layers.CuDNNLSTM else 1
model = keras.models.Model([inputs] + initial_state, output)
model.compile(loss='categorical_crossentropy', optimizer='adam')
inputs = keras.Input((timesteps, input_size))
initial_state = [keras.Input((units,)) for _ in range(num_states)]
layer = layer_class(units)
if len(initial_state) == 1:
output = layer(inputs, initial_state=initial_state[0])
else:
output = layer(inputs, initial_state=initial_state)
self.assertIn(initial_state[0], layer._inbound_nodes[0].input_tensors)
inputs = np.random.random((num_samples, timesteps, input_size))
initial_state = [
np.random.random((num_samples, units)) for _ in range(num_states)
]
targets = np.random.random((num_samples, units))
model.fit([inputs] + initial_state, targets)
model = keras.models.Model([inputs] + initial_state, output)
model.compile(loss='categorical_crossentropy', optimizer='adam')
inputs = np.random.random((num_samples, timesteps, input_size))
initial_state = [
np.random.random((num_samples, units)) for _ in range(num_states)
]
targets = np.random.random((num_samples, units))
model.fit([inputs] + initial_state, targets)
@parameterized.named_parameters(
('cudnngru', keras.layers.CuDNNGRU),
('cudnnlstm', keras.layers.CuDNNLSTM),
)
def test_statefulness(self, layer_class):
if test.is_gpu_available(cuda_only=True):
with self.session(use_gpu=True):
input_size = 10
timesteps = 6
units = 2
num_samples = 32
if not test.is_gpu_available(cuda_only=True):
self.skipTest('No CUDA GPU available')
model = keras.models.Sequential()
model.add(
keras.layers.Embedding(
10,
input_size,
input_length=timesteps,
batch_input_shape=(num_samples, timesteps)))
layer = layer_class(
units, return_sequences=False, stateful=True, weights=None)
model.add(layer)
model.compile(optimizer='sgd', loss='mse')
out1 = model.predict(np.ones((num_samples, timesteps)))
self.assertEqual(out1.shape, (num_samples, units))
with test_util.use_gpu():
input_size = 10
timesteps = 6
units = 2
num_samples = 32
# train once so that the states change
model.train_on_batch(
np.ones((num_samples, timesteps)), np.ones((num_samples, units)))
out2 = model.predict(np.ones((num_samples, timesteps)))
model = keras.models.Sequential()
model.add(
keras.layers.Embedding(
10,
input_size,
input_length=timesteps,
batch_input_shape=(num_samples, timesteps)))
layer = layer_class(
units, return_sequences=False, stateful=True, weights=None)
model.add(layer)
model.compile(optimizer='sgd', loss='mse')
out1 = model.predict(np.ones((num_samples, timesteps)))
self.assertEqual(out1.shape, (num_samples, units))
# if the state is not reset, output should be different
self.assertNotEqual(out1.max(), out2.max())
# train once so that the states change
model.train_on_batch(
np.ones((num_samples, timesteps)), np.ones((num_samples, units)))
out2 = model.predict(np.ones((num_samples, timesteps)))
# check that output changes after states are reset
# (even though the model itself didn't change)
layer.reset_states()
out3 = model.predict(np.ones((num_samples, timesteps)))
self.assertNotEqual(out2.max(), out3.max())
# if the state is not reset, output should be different
self.assertNotEqual(out1.max(), out2.max())
# check that container-level reset_states() works
model.reset_states()
out4 = model.predict(np.ones((num_samples, timesteps)))
self.assertAllClose(out3, out4, atol=1e-5)
# check that output changes after states are reset
# (even though the model itself didn't change)
layer.reset_states()
out3 = model.predict(np.ones((num_samples, timesteps)))
self.assertNotEqual(out2.max(), out3.max())
# check that the call to `predict` updated the states
out5 = model.predict(np.ones((num_samples, timesteps)))
self.assertNotEqual(out4.max(), out5.max())
# check that container-level reset_states() works
model.reset_states()
out4 = model.predict(np.ones((num_samples, timesteps)))
self.assertAllClose(out3, out4, atol=1e-5)
# check that the call to `predict` updated the states
out5 = model.predict(np.ones((num_samples, timesteps)))
self.assertNotEqual(out4.max(), out5.max())
@parameterized.named_parameters(
*test_util.generate_combinations_with_testcase_name(
@ -254,49 +274,51 @@ class CuDNNTest(test.TestCase, parameterized.TestCase):
def test_load_weights_between_noncudnn_rnn(self, rnn_type, to_cudnn,
bidirectional, implementation,
model_nest_level, model_type):
if test.is_gpu_available(cuda_only=True):
with self.session(use_gpu=True):
input_size = 10
timesteps = 6
input_shape = (timesteps, input_size)
units = 2
num_samples = 32
inputs = np.random.random((num_samples, timesteps, input_size))
if not test.is_gpu_available(cuda_only=True):
self.skipTest('No CUDA GPU available')
rnn_layer_kwargs = {
'recurrent_activation': 'sigmoid',
# ensure biases are non-zero and properly converted
'bias_initializer': 'random_uniform',
'implementation': implementation
}
if rnn_type == 'LSTM':
rnn_layer_class = keras.layers.LSTM
cudnn_rnn_layer_class = keras.layers.CuDNNLSTM
else:
rnn_layer_class = keras.layers.GRU
cudnn_rnn_layer_class = keras.layers.CuDNNGRU
rnn_layer_kwargs['reset_after'] = True
with test_util.use_gpu():
input_size = 10
timesteps = 6
input_shape = (timesteps, input_size)
units = 2
num_samples = 32
inputs = np.random.random((num_samples, timesteps, input_size))
layer = rnn_layer_class(units, **rnn_layer_kwargs)
if bidirectional:
layer = keras.layers.Bidirectional(layer)
rnn_layer_kwargs = {
'recurrent_activation': 'sigmoid',
# ensure biases are non-zero and properly converted
'bias_initializer': 'random_uniform',
'implementation': implementation
}
if rnn_type == 'LSTM':
rnn_layer_class = keras.layers.LSTM
cudnn_rnn_layer_class = keras.layers.CuDNNLSTM
else:
rnn_layer_class = keras.layers.GRU
cudnn_rnn_layer_class = keras.layers.CuDNNGRU
rnn_layer_kwargs['reset_after'] = True
cudnn_layer = cudnn_rnn_layer_class(units)
if bidirectional:
cudnn_layer = keras.layers.Bidirectional(cudnn_layer)
layer = rnn_layer_class(units, **rnn_layer_kwargs)
if bidirectional:
layer = keras.layers.Bidirectional(layer)
model = self._make_nested_model(input_shape, layer, model_nest_level,
model_type)
cudnn_model = self._make_nested_model(input_shape, cudnn_layer,
model_nest_level, model_type)
cudnn_layer = cudnn_rnn_layer_class(units)
if bidirectional:
cudnn_layer = keras.layers.Bidirectional(cudnn_layer)
if to_cudnn:
self._convert_model_weights(model, cudnn_model)
else:
self._convert_model_weights(cudnn_model, model)
model = self._make_nested_model(input_shape, layer, model_nest_level,
model_type)
cudnn_model = self._make_nested_model(input_shape, cudnn_layer,
model_nest_level, model_type)
self.assertAllClose(model.predict(inputs), cudnn_model.predict(inputs),
atol=1e-4)
if to_cudnn:
self._convert_model_weights(model, cudnn_model)
else:
self._convert_model_weights(cudnn_model, model)
self.assertAllClose(
model.predict(inputs), cudnn_model.predict(inputs), atol=1e-4)
def _make_nested_model(self, input_shape, layer, level=1, model_type='func'):
# example: make_nested_seq_model((1,), Dense(10), level=2).summary()
@ -334,149 +356,145 @@ class CuDNNTest(test.TestCase, parameterized.TestCase):
to_cudnn):
# Similar test as test_load_weights_between_noncudnn_rnn() but has different
# rank of input due to usage of TimeDistributed. Issue: #10356.
if test.is_gpu_available(cuda_only=True):
with self.session(use_gpu=True):
input_size = 10
steps = 6
timesteps = 6
input_shape = (timesteps, steps, input_size)
units = 2
num_samples = 32
inputs = np.random.random((num_samples, timesteps, steps, input_size))
if not test.is_gpu_available(cuda_only=True):
self.skipTest('No CUDA GPU available')
rnn_layer_kwargs = {
'recurrent_activation': 'sigmoid',
# ensure biases are non-zero and properly converted
'bias_initializer': 'random_uniform',
}
if rnn_type == 'LSTM':
rnn_layer_class = keras.layers.LSTM
cudnn_rnn_layer_class = keras.layers.CuDNNLSTM
else:
rnn_layer_class = keras.layers.GRU
cudnn_rnn_layer_class = keras.layers.CuDNNGRU
rnn_layer_kwargs['reset_after'] = True
with test_util.use_gpu():
input_size = 10
steps = 6
timesteps = 6
input_shape = (timesteps, steps, input_size)
units = 2
num_samples = 32
inputs = np.random.random((num_samples, timesteps, steps, input_size))
layer = rnn_layer_class(units, **rnn_layer_kwargs)
layer = keras.layers.TimeDistributed(layer)
rnn_layer_kwargs = {
'recurrent_activation': 'sigmoid',
# ensure biases are non-zero and properly converted
'bias_initializer': 'random_uniform',
}
if rnn_type == 'LSTM':
rnn_layer_class = keras.layers.LSTM
cudnn_rnn_layer_class = keras.layers.CuDNNLSTM
else:
rnn_layer_class = keras.layers.GRU
cudnn_rnn_layer_class = keras.layers.CuDNNGRU
rnn_layer_kwargs['reset_after'] = True
cudnn_layer = cudnn_rnn_layer_class(units)
cudnn_layer = keras.layers.TimeDistributed(cudnn_layer)
layer = rnn_layer_class(units, **rnn_layer_kwargs)
layer = keras.layers.TimeDistributed(layer)
model = self._make_nested_model(input_shape, layer)
cudnn_model = self._make_nested_model(input_shape, cudnn_layer)
cudnn_layer = cudnn_rnn_layer_class(units)
cudnn_layer = keras.layers.TimeDistributed(cudnn_layer)
if to_cudnn:
self._convert_model_weights(model, cudnn_model)
else:
self._convert_model_weights(cudnn_model, model)
model = self._make_nested_model(input_shape, layer)
cudnn_model = self._make_nested_model(input_shape, cudnn_layer)
self.assertAllClose(model.predict(inputs), cudnn_model.predict(inputs),
atol=1e-4)
if to_cudnn:
self._convert_model_weights(model, cudnn_model)
else:
self._convert_model_weights(cudnn_model, model)
self.assertAllClose(
model.predict(inputs), cudnn_model.predict(inputs), atol=1e-4)
@test_util.run_in_graph_and_eager_modes
def test_cudnnrnn_bidirectional(self):
if test.is_gpu_available(cuda_only=True):
with self.session(use_gpu=True):
rnn = keras.layers.CuDNNGRU
samples = 2
dim = 2
timesteps = 2
output_dim = 2
mode = 'concat'
if not test.is_gpu_available(cuda_only=True):
self.skipTest('No CUDA GPU available')
x = np.random.random((samples, timesteps, dim))
target_dim = 2 * output_dim if mode == 'concat' else output_dim
y = np.random.random((samples, target_dim))
with test_util.use_gpu():
rnn = keras.layers.CuDNNGRU
samples = 2
dim = 2
timesteps = 2
output_dim = 2
mode = 'concat'
# test with Sequential model
model = keras.Sequential()
model.add(
keras.layers.Bidirectional(
rnn(output_dim), merge_mode=mode, input_shape=(None, dim)))
model.compile(
loss='mse', optimizer=RMSPropOptimizer(learning_rate=0.001))
model.fit(x, y, epochs=1, batch_size=1)
x = np.random.random((samples, timesteps, dim))
target_dim = 2 * output_dim if mode == 'concat' else output_dim
y = np.random.random((samples, target_dim))
# test config
model.get_config()
model = keras.models.model_from_json(model.to_json())
model.summary()
# test with Sequential model
model = keras.Sequential()
model.add(
keras.layers.Bidirectional(
rnn(output_dim), merge_mode=mode, input_shape=(None, dim)))
model.compile(loss='mse', optimizer=RMSPropOptimizer(learning_rate=0.001))
model.fit(x, y, epochs=1, batch_size=1)
# test stacked bidirectional layers
model = keras.Sequential()
model.add(
keras.layers.Bidirectional(
rnn(output_dim, return_sequences=True),
merge_mode=mode,
input_shape=(None, dim)))
model.add(keras.layers.Bidirectional(rnn(output_dim), merge_mode=mode))
model.compile(
loss='mse', optimizer=RMSPropOptimizer(learning_rate=0.001))
model.fit(x, y, epochs=1, batch_size=1)
# test config
model.get_config()
model = keras.models.model_from_json(model.to_json())
model.summary()
# test with functional API
inputs = keras.Input((timesteps, dim))
outputs = keras.layers.Bidirectional(
rnn(output_dim), merge_mode=mode)(
inputs)
model = keras.Model(inputs, outputs)
model.compile(
loss='mse', optimizer=RMSPropOptimizer(learning_rate=0.001))
model.fit(x, y, epochs=1, batch_size=1)
# test stacked bidirectional layers
model = keras.Sequential()
model.add(
keras.layers.Bidirectional(
rnn(output_dim, return_sequences=True),
merge_mode=mode,
input_shape=(None, dim)))
model.add(keras.layers.Bidirectional(rnn(output_dim), merge_mode=mode))
model.compile(loss='mse', optimizer=RMSPropOptimizer(learning_rate=0.001))
model.fit(x, y, epochs=1, batch_size=1)
# Bidirectional and stateful
inputs = keras.Input(batch_shape=(1, timesteps, dim))
outputs = keras.layers.Bidirectional(
rnn(output_dim, stateful=True), merge_mode=mode)(
inputs)
model = keras.Model(inputs, outputs)
model.compile(
loss='mse', optimizer=RMSPropOptimizer(learning_rate=0.001))
model.fit(x, y, epochs=1, batch_size=1)
# test with functional API
inputs = keras.Input((timesteps, dim))
outputs = keras.layers.Bidirectional(
rnn(output_dim), merge_mode=mode)(
inputs)
model = keras.Model(inputs, outputs)
model.compile(loss='mse', optimizer=RMSPropOptimizer(learning_rate=0.001))
model.fit(x, y, epochs=1, batch_size=1)
# Bidirectional and stateful
inputs = keras.Input(batch_shape=(1, timesteps, dim))
outputs = keras.layers.Bidirectional(
rnn(output_dim, stateful=True), merge_mode=mode)(
inputs)
model = keras.Model(inputs, outputs)
model.compile(loss='mse', optimizer=RMSPropOptimizer(learning_rate=0.001))
model.fit(x, y, epochs=1, batch_size=1)
def test_preprocess_weights_for_loading_gru_incompatible(self):
"""Test loading weights between incompatible layers.
Should fail fast with an exception.
"""
if test.is_gpu_available(cuda_only=True):
with self.session(use_gpu=True):
input_shape = (3, 5)
if not test.is_gpu_available(cuda_only=True):
self.skipTest('No CUDA GPU available')
def gru(cudnn=False, **kwargs):
layer_class = keras.layers.CuDNNGRU if cudnn else keras.layers.GRU
return layer_class(2, input_shape=input_shape, **kwargs)
with test_util.use_gpu():
input_shape = (3, 5)
def get_layer_weights(layer):
layer.build(input_shape=input_shape)
return layer.get_weights()
def gru(cudnn=False, **kwargs):
layer_class = keras.layers.CuDNNGRU if cudnn else keras.layers.GRU
return layer_class(2, input_shape=input_shape, **kwargs)
def assert_not_compatible(src, dest, message):
with self.assertRaises(ValueError) as ex:
keras.engine.saving.preprocess_weights_for_loading(
dest,
get_layer_weights(src))
self.assertIn(message, str(ex.exception))
def get_layer_weights(layer):
layer.build(input_shape=input_shape)
return layer.get_weights()
assert_not_compatible(
gru(),
gru(cudnn=True),
'GRU(reset_after=False) is not compatible with CuDNNGRU')
assert_not_compatible(
gru(cudnn=True),
gru(),
'CuDNNGRU is not compatible with GRU(reset_after=False)')
assert_not_compatible(
gru(),
gru(reset_after=True),
'GRU(reset_after=False) is not compatible with '
'GRU(reset_after=True)')
assert_not_compatible(
gru(reset_after=True),
gru(),
'GRU(reset_after=True) is not compatible with '
'GRU(reset_after=False)')
def assert_not_compatible(src, dest, message):
with self.assertRaises(ValueError) as ex:
keras.engine.saving.preprocess_weights_for_loading(
dest, get_layer_weights(src))
self.assertIn(message, str(ex.exception))
assert_not_compatible(
gru(), gru(cudnn=True),
'GRU(reset_after=False) is not compatible with CuDNNGRU')
assert_not_compatible(
gru(cudnn=True), gru(),
'CuDNNGRU is not compatible with GRU(reset_after=False)')
assert_not_compatible(
gru(), gru(reset_after=True),
'GRU(reset_after=False) is not compatible with '
'GRU(reset_after=True)')
assert_not_compatible(
gru(reset_after=True), gru(),
'GRU(reset_after=True) is not compatible with '
'GRU(reset_after=False)')
if __name__ == '__main__':

View File

@ -77,7 +77,7 @@ class BinaryOpTest(test.TestCase):
def _compareCpu(self, x, y, np_func, tf_func, also_compare_variables=False):
np_ans = np_func(x, y)
with self.test_session(use_gpu=False):
with test_util.force_cpu():
inx = ops.convert_to_tensor(x)
iny = ops.convert_to_tensor(y)
out = tf_func(inx, iny)
@ -174,7 +174,7 @@ class BinaryOpTest(test.TestCase):
def _compareGpu(self, x, y, np_func, tf_func):
np_ans = np_func(x, y)
with self.test_session(force_gpu=test_util.is_gpu_available()):
with test_util.use_gpu():
inx = ops.convert_to_tensor(x)
iny = ops.convert_to_tensor(y)
out = tf_func(inx, iny)
@ -252,10 +252,12 @@ class BinaryOpTest(test.TestCase):
y = np.array([1, 2]).reshape(2, 1).astype(np.int32)
var_x = variables.Variable(x)
var_y = variables.Variable(y)
with self.cached_session() as sess:
self.evaluate([var_x.initializer, var_y.initializer])
left_result = (var_x * y).eval()
right_result = (x * var_y).eval()
left_result = self.evaluate(var_x * y)
right_result = self.evaluate(x * var_y)
np_result = x * y
self.assertAllEqual(np_result, left_result)
self.assertAllEqual(np_result, right_result)
@ -382,7 +384,7 @@ class BinaryOpTest(test.TestCase):
def testStringComparison(self):
x = np.array([["abc", "bh"], ["c", ""]])
y = np.array([["abc", "bh"], ["def", "hi"]])
with self.test_session(use_gpu=False) as sess:
with test_util.force_cpu():
cmp_eq = math_ops.equal(x, y)
cmp_not_eq = math_ops.not_equal(x, y)
values = self.evaluate([cmp_eq, cmp_not_eq])
@ -716,35 +718,35 @@ class BinaryOpTest(test.TestCase):
def testPowNegativeExponent(self):
for dtype in [np.int32, np.int64]:
with self.test_session(use_gpu=False) as sess:
with test_util.force_cpu():
with self.assertRaisesRegexp(
errors_impl.InvalidArgumentError,
"Integers to negative integer powers are not allowed"):
x = np.array([5, 2]).astype(dtype)
y = np.array([-2, 3]).astype(dtype)
sess.run(math_ops.pow(x, y))
self.evaluate(math_ops.pow(x, y))
with self.test_session(use_gpu=False) as sess:
with test_util.force_cpu():
with self.assertRaisesRegexp(
errors_impl.InvalidArgumentError,
"Integers to negative integer powers are not allowed"):
x = np.array([5, 2]).astype(dtype)
y = np.array([2, -3]).astype(dtype)
sess.run(math_ops.pow(x, y))
self.evaluate(math_ops.pow(x, y))
with self.test_session(use_gpu=False) as sess:
with test_util.force_cpu():
with self.assertRaisesRegexp(
errors_impl.InvalidArgumentError,
"Integers to negative integer powers are not allowed"):
x = np.array([5, 2]).astype(dtype)
y = -3
sess.run(math_ops.pow(x, y))
self.evaluate(math_ops.pow(x, y))
class ComparisonOpTest(test.TestCase):
def _compareScalar(self, func, x, y, dtype):
with self.test_session(force_gpu=test_util.is_gpu_available()):
with test_util.use_gpu():
out = func(
ops.convert_to_tensor(np.array([x]).astype(dtype)),
ops.convert_to_tensor(np.array([y]).astype(dtype)))
@ -777,7 +779,7 @@ class ComparisonOpTest(test.TestCase):
def _compare(self, x, y, np_func, tf_func):
np_ans = np_func(x, y)
with self.test_session(force_gpu=test_util.is_gpu_available()):
with test_util.use_gpu():
out = tf_func(ops.convert_to_tensor(x), ops.convert_to_tensor(y))
tf_ans = self.evaluate(out)
self.assertAllEqual(np_ans, tf_ans)

View File

@ -84,7 +84,7 @@ def _default_tolerance(dtype):
class ComparisonOpTest(test.TestCase):
def _compareScalar(self, func, x, y, dtype):
with self.test_session(force_gpu=test_util.is_gpu_available()):
with test_util.use_gpu():
out = func(
ops.convert_to_tensor(np.array([x]).astype(dtype)),
ops.convert_to_tensor(np.array([y]).astype(dtype)))
@ -117,7 +117,7 @@ class ComparisonOpTest(test.TestCase):
def _compare(self, x, y, np_func, tf_func):
np_ans = np_func(x, y)
with self.test_session(force_gpu=test_util.is_gpu_available()):
with test_util.use_gpu():
out = tf_func(ops.convert_to_tensor(x), ops.convert_to_tensor(y))
tf_ans = self.evaluate(out)
self.assertAllEqual(np_ans, tf_ans)
@ -218,8 +218,7 @@ class LogicalOpTest(test.TestCase):
def _compareBinary(self, x, y, np_func, tf_func, use_gpu=False):
np_ans = np_func(x, y)
with self.test_session(use_gpu=use_gpu,
force_gpu=use_gpu and test_util.is_gpu_available()):
with test_util.device(use_gpu=use_gpu):
inx = ops.convert_to_tensor(x)
iny = ops.convert_to_tensor(y)
out = tf_func(inx, iny)
@ -230,8 +229,7 @@ class LogicalOpTest(test.TestCase):
def _not(self, x, use_gpu=False):
np_ans = np.logical_not(x)
with self.test_session(use_gpu=use_gpu,
force_gpu=use_gpu and test_util.is_gpu_available()):
with test_util.device(use_gpu=use_gpu):
out = math_ops.logical_not(ops.convert_to_tensor(x))
tf_val = self.evaluate(out)
self.assertEqual(out.dtype, dtypes_lib.bool)
@ -316,8 +314,7 @@ class SelectOpTest(test.TestCase):
def _compare(self, c, x, y, use_gpu):
np_ans = np.where(c, x, y)
with self.test_session(use_gpu=use_gpu,
force_gpu=use_gpu and test_util.is_gpu_available()):
with test_util.device(use_gpu=use_gpu):
out = array_ops.where(c, x, y)
tf_ans = self.evaluate(out)
self.assertAllEqual(np_ans, tf_ans)
@ -460,8 +457,7 @@ class BatchSelectOpTest(test.TestCase):
np_ans = np.dstack(
[x_i if c_i else y_i for c_i, x_i, y_i in zip(c, x, y)]).transpose(
[2, 0, 1])
with self.test_session(use_gpu=use_gpu,
force_gpu=use_gpu and test_util.is_gpu_available()):
with test_util.device(use_gpu=use_gpu):
out = array_ops.where(c, x, y)
tf_ans = self.evaluate(out)
self.assertAllEqual(np_ans, tf_ans)
@ -566,9 +562,7 @@ class MinMaxOpTest(test.TestCase):
def _compare(self, x, y, use_gpu):
np_min, np_max = np.minimum(x, y), np.maximum(x, y)
with self.test_session(
use_gpu=use_gpu,
force_gpu=use_gpu and test_util.is_gpu_available()) as sess:
with test_util.device(use_gpu=use_gpu):
inx = ops.convert_to_tensor(x)
iny = ops.convert_to_tensor(y)
omin, omax = math_ops.minimum(inx, iny), math_ops.maximum(inx, iny)
@ -641,13 +635,13 @@ class MinMaxOpTest(test.TestCase):
class MathOpsOverloadTest(test.TestCase):
def _computeTensorAndLiteral(self, x, y, dtype, func):
with self.test_session(use_gpu=False):
with test_util.force_cpu():
inx = ops.convert_to_tensor(x, dtype=dtype)
z = func(inx, y) # Should use __add__, __sub__, etc.
return self.evaluate(z)
def _computeLiteralAndTensor(self, x, y, dtype, func):
with self.test_session(use_gpu=False):
with test_util.force_cpu():
iny = ops.convert_to_tensor(y, dtype=dtype)
z = func(x, iny) # Should use __radd__, __rsub__, etc.
return self.evaluate(z)
@ -661,7 +655,7 @@ class MathOpsOverloadTest(test.TestCase):
def _compareUnary(self, x, dtype, np_func, tf_func):
np_ans = np_func(x).astype(dtype.as_numpy_dtype)
with self.test_session(use_gpu=False):
with test_util.force_cpu():
self.assertAllClose(
np_ans, self.evaluate(tf_func(ops.convert_to_tensor(x, dtype=dtype))))
@ -730,9 +724,7 @@ class IsFiniteInfNanTest(test.TestCase):
def _compare(self, x, use_gpu):
np_finite, np_inf, np_nan = np.isfinite(x), np.isinf(x), np.isnan(x)
with self.test_session(
use_gpu=use_gpu,
force_gpu=use_gpu and test_util.is_gpu_available()) as sess:
with test_util.device(use_gpu=use_gpu):
inx = ops.convert_to_tensor(x)
ofinite, oinf, onan = math_ops.is_finite(inx), math_ops.is_inf(
inx), math_ops.is_nan(inx)
@ -773,7 +765,7 @@ class IsFiniteInfNanTest(test.TestCase):
x = np.full((size,), value, dtype=dtype)
np_y = np.sqrt(x)
np_nan = np.isnan(np_y)
with self.test_session(force_gpu=test_util.is_gpu_available()):
with test_util.use_gpu():
tf_y = math_ops.sqrt(x)
tf_nan = math_ops.is_nan(tf_y)
if value < 0:
@ -786,18 +778,20 @@ class RoundingTest(test.TestCase):
def _compare_values(self, x, y=None):
y = np.rint(x) if y is None else np.asarray(y)
with self.cached_session() as sess:
tf_rint = math_ops.rint(x)
np_rint = self.evaluate(tf_rint)
tf_rint = math_ops.rint(x)
np_rint = self.evaluate(tf_rint)
self.assertAllEqual(y, np_rint)
self.assertShapeEqual(y, tf_rint)
def _compare(self, x):
np_floor, np_ceil = np.floor(x), np.ceil(x)
with self.cached_session() as sess:
inx = ops.convert_to_tensor(x)
ofloor, oceil = math_ops.floor(inx), math_ops.ceil(inx)
tf_floor, tf_ceil = self.evaluate([ofloor, oceil])
inx = ops.convert_to_tensor(x)
ofloor, oceil = math_ops.floor(inx), math_ops.ceil(inx)
tf_floor, tf_ceil = self.evaluate([ofloor, oceil])
self.assertAllEqual(np_floor, tf_floor)
self.assertAllEqual(np_ceil, tf_ceil)
self.assertShapeEqual(np_floor, ofloor)
@ -828,12 +822,13 @@ class ComplexMakeRealImagTest(test.TestCase):
def _compareMake(self, real, imag, use_gpu):
np_ans = real + (1j) * imag
with self.test_session(use_gpu=use_gpu,
force_gpu=use_gpu and test_util.is_gpu_available()):
with test_util.device(use_gpu=use_gpu):
real = ops.convert_to_tensor(real)
imag = ops.convert_to_tensor(imag)
tf_ans = math_ops.complex(real, imag)
out = self.evaluate(tf_ans)
self.assertAllEqual(np_ans, out)
self.assertShapeEqual(np_ans, tf_ans)
@ -848,8 +843,8 @@ class ComplexMakeRealImagTest(test.TestCase):
def _compareRealImag(self, cplx, use_gpu):
np_real, np_imag = np.real(cplx), np.imag(cplx)
np_zeros = np_real * 0
with self.test_session(use_gpu=use_gpu,
force_gpu=use_gpu and test_util.is_gpu_available()):
with test_util.device(use_gpu=use_gpu):
inx = ops.convert_to_tensor(cplx)
tf_real = math_ops.real(inx)
tf_imag = math_ops.imag(inx)
@ -876,12 +871,12 @@ class ComplexMakeRealImagTest(test.TestCase):
def _compareAngle(self, cplx, use_gpu):
np_angle = np.angle(cplx)
with self.test_session(
use_gpu=use_gpu,
force_gpu=use_gpu and test_util.is_gpu_available()) as sess:
with test_util.device(use_gpu=use_gpu):
inx = ops.convert_to_tensor(cplx)
tf_angle = math_ops.angle(inx)
tf_angle_val = self.evaluate(tf_angle)
self.assertAllEqual(np_angle, tf_angle_val)
self.assertShapeEqual(np_angle, tf_angle)
@ -912,8 +907,7 @@ class ComplexMakeRealImagTest(test.TestCase):
def _compareConj(self, cplx, use_gpu):
np_ans = np.conj(cplx)
with self.test_session(use_gpu=use_gpu,
force_gpu=use_gpu and test_util.is_gpu_available()):
with test_util.device(use_gpu=use_gpu):
inx = ops.convert_to_tensor(cplx)
tf_conj = math_ops.conj(inx)
tf_ans = self.evaluate(tf_conj)

View File

@ -76,7 +76,7 @@ class UnaryOpTest(test.TestCase):
if grad_atol is None:
grad_atol = _default_tolerance(x.dtype)
np_ans = np_func(x)
with self.test_session(use_gpu=False):
with self.cached_session(use_gpu=False):
inx = ops.convert_to_tensor(x)
if x.dtype in (np.float32, np.float64,
dtypes_lib.bfloat16.as_numpy_dtype):
@ -121,24 +121,22 @@ class UnaryOpTest(test.TestCase):
def _check(self, result_tensor, result_np, input_sp_t, tol):
self.assertTrue(isinstance(result_tensor, sparse_tensor.SparseTensor))
self.assertTrue(isinstance(input_sp_t, sparse_tensor.SparseTensor))
self.assertAllEqual(input_sp_t.indices.eval(), result_tensor.indices.eval())
self.assertAllEqual(input_sp_t.dense_shape.eval(),
result_tensor.dense_shape.eval())
self.assertAllEqual(input_sp_t.indices, result_tensor.indices)
self.assertAllEqual(input_sp_t.dense_shape, result_tensor.dense_shape)
if tol is None:
self.assertAllClose(result_np, result_tensor.values.eval())
self.assertAllClose(result_np, result_tensor.values)
else:
self.assertAllClose(
result_np, result_tensor.values.eval(), rtol=tol, atol=tol)
self.assertAllClose(result_np, result_tensor.values, rtol=tol, atol=tol)
def _compareSparseCpu(self, x, np_func, tf_func, tol):
x_sp, x_sp_vals = _sparsify(x)
res_np = np_func(x_sp_vals)
with self.test_session(use_gpu=False):
with test_util.force_cpu():
self._check(tf_func(x_sp), res_np, x_sp, tol)
def _compareGpu(self, x, np_func, tf_func):
np_ans = np_func(x)
with self.test_session(force_gpu=test_util.is_gpu_available()):
with test_util.use_gpu():
result = tf_func(ops.convert_to_tensor(x))
tf_gpu = self.evaluate(result)
if x.dtype == np.float16:
@ -150,7 +148,7 @@ class UnaryOpTest(test.TestCase):
def _compareSparseGpu(self, x, np_func, tf_func, tol):
x_sp, x_sp_vals = _sparsify(x)
res_np = np_func(x_sp_vals)
with self.test_session(force_gpu=test_util.is_gpu_available()):
with test_util.use_gpu():
self._check(tf_func(x_sp), res_np, x_sp, tol)
def _compareBoth(self, x, np_func, tf_func):

View File

@ -67,11 +67,11 @@ class MultinomialTest(test.TestCase):
self.assertAllEqual([[1] * num_samples, [2] * num_samples], samples)
def testOneOpMultipleStepsIndependent(self):
with self.test_session(use_gpu=True) as sess:
with test_util.use_gpu():
sample_op1, _ = self._make_ops(10)
# Consecutive runs shouldn't yield identical output.
sample1a = sess.run(sample_op1)
sample1b = sess.run(sample_op1)
sample1a = self.evaluate(sample_op1)
sample1b = self.evaluate(sample_op1)
self.assertFalse(np.equal(sample1a, sample1b).all())
def testEagerOneOpMultipleStepsIndependent(self):
@ -81,26 +81,26 @@ class MultinomialTest(test.TestCase):
self.assertFalse(np.equal(sample1.numpy(), sample2.numpy()).all())
def testTwoOpsIndependent(self):
with self.test_session(use_gpu=True) as sess:
with test_util.use_gpu():
sample_op1, sample_op2 = self._make_ops(32)
sample1, sample2 = sess.run([sample_op1, sample_op2])
sample1, sample2 = self.evaluate([sample_op1, sample_op2])
# We expect sample1 and sample2 to be independent.
# 1 in 2^32 chance of this assertion failing.
self.assertFalse(np.equal(sample1, sample2).all())
def testTwoOpsSameSeedDrawSameSequences(self):
with self.test_session(use_gpu=True) as sess:
with test_util.use_gpu():
sample_op1, sample_op2 = self._make_ops(1000, seed=1)
sample1, sample2 = sess.run([sample_op1, sample_op2])
sample1, sample2 = self.evaluate([sample_op1, sample_op2])
self.assertAllEqual(sample1, sample2)
def testLargeLogits(self):
for neg in [True, False]:
with self.test_session(use_gpu=True):
with test_util.use_gpu():
logits = np.array([[1000.] * 5])
if neg:
logits *= -1
samples = random_ops.multinomial(logits, 10).eval()
samples = self.evaluate(random_ops.multinomial(logits, 10))
# Sampled classes should be in-range.
self.assertTrue((samples >= 0).all())
self.assertTrue((samples < 5).all())
@ -157,10 +157,10 @@ class MultinomialTest(test.TestCase):
Returns:
Frequencies from sampled classes; shape [batch_size, num_classes].
"""
with self.test_session(use_gpu=True) as sess:
with test_util.use_gpu():
random_seed.set_random_seed(1618)
op = sampler(constant_op.constant(logits), num_samples)
d = sess.run(op)
d = self.evaluate(op)
batch_size, num_classes = logits.shape
freqs_mat = []
@ -186,25 +186,26 @@ class MultinomialTest(test.TestCase):
def testEmpty(self):
classes = 5
with self.test_session(use_gpu=True):
with test_util.use_gpu():
for batch in 0, 3:
for samples in 0, 7:
x = random_ops.multinomial(
array_ops.zeros([batch, classes]), samples).eval()
x = self.evaluate(
random_ops.multinomial(
array_ops.zeros([batch, classes]), samples))
self.assertEqual(x.shape, (batch, samples))
def testEmptyClasses(self):
with self.test_session(use_gpu=True):
with test_util.use_gpu():
x = random_ops.multinomial(array_ops.zeros([5, 0]), 7)
with self.assertRaisesOpError("num_classes should be positive"):
self.evaluate(x)
def testNegativeMinLogits(self):
random_seed.set_random_seed(78844)
with self.test_session(use_gpu=True):
with test_util.use_gpu():
logits = constant_op.constant([[np.finfo(np.float32).min] * 1023 + [0]])
num_samples = 1000
samples = random_ops.multinomial(logits, num_samples).eval()
samples = self.evaluate(random_ops.multinomial(logits, num_samples))
self.assertAllEqual([[1023] * num_samples], samples)

View File

@ -24,6 +24,7 @@ import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import stateless_random_ops as stateless
@ -58,7 +59,7 @@ class StatelessOpsTest(test.TestCase):
preseed = invert_philox(key, (seed[0], 0, seed[1], 0)).astype(np.uint64)
preseed = preseed[::2] | preseed[1::2] << 32
random_seed.set_random_seed(seed[0])
with self.test_session(use_gpu=True):
with test_util.use_gpu():
for stateless_op, stateful_op in cases:
stateful = stateful_op(seed=seed[1])
pure = stateless_op(seed=preseed)

View File

@ -456,7 +456,7 @@ class ResourceVariableOpsTest(test_util.TensorFlowTestCase):
# TODO(alive): get this to work in Eager mode.
def testGPU(self):
with self.test_session(use_gpu=True):
with test_util.use_gpu():
abc = variable_scope.get_variable(
"abc",
shape=[1],

View File

@ -24,6 +24,7 @@ from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_state_ops
from tensorflow.python.ops import math_ops
@ -164,7 +165,7 @@ class VariableOpTest(test.TestCase):
self.assertEqual(tensor_shape.unknown_shape(), subbed.get_shape())
def testTemporaryVariable(self):
with self.test_session(use_gpu=True):
with test_util.use_gpu():
var = gen_state_ops.temporary_variable(
[1, 2], dtypes.float32, var_name="foo")
var = state_ops.assign(var, [[4.0, 5.0]])
@ -173,14 +174,14 @@ class VariableOpTest(test.TestCase):
self.assertAllClose([[10.0, 12.0]], self.evaluate(final))
def testDestroyNonexistentTemporaryVariable(self):
with self.test_session(use_gpu=True):
with test_util.use_gpu():
var = gen_state_ops.temporary_variable([1, 2], dtypes.float32)
final = gen_state_ops.destroy_temporary_variable(var, var_name="bad")
with self.assertRaises(errors.NotFoundError):
self.evaluate(final)
def testDuplicateTemporaryVariable(self):
with self.test_session(use_gpu=True):
with test_util.use_gpu():
var1 = gen_state_ops.temporary_variable(
[1, 2], dtypes.float32, var_name="dup")
var1 = state_ops.assign(var1, [[1.0, 2.0]])
@ -192,7 +193,7 @@ class VariableOpTest(test.TestCase):
self.evaluate(final)
def testDestroyTemporaryVariableTwice(self):
with self.test_session(use_gpu=True):
with test_util.use_gpu():
var = gen_state_ops.temporary_variable([1, 2], dtypes.float32)
val1 = gen_state_ops.destroy_temporary_variable(var, var_name="dup")
val2 = gen_state_ops.destroy_temporary_variable(var, var_name="dup")
@ -201,14 +202,14 @@ class VariableOpTest(test.TestCase):
self.evaluate(final)
def testTemporaryVariableNoLeak(self):
with self.test_session(use_gpu=True):
with test_util.use_gpu():
var = gen_state_ops.temporary_variable(
[1, 2], dtypes.float32, var_name="bar")
final = array_ops.identity(var)
self.evaluate(final)
def testTwoTemporaryVariablesNoLeaks(self):
with self.test_session(use_gpu=True):
with test_util.use_gpu():
var1 = gen_state_ops.temporary_variable(
[1, 2], dtypes.float32, var_name="var1")
var2 = gen_state_ops.temporary_variable(
@ -217,13 +218,13 @@ class VariableOpTest(test.TestCase):
self.evaluate(final)
def testAssignDependencyAcrossDevices(self):
with self.test_session(use_gpu=True):
with test_util.use_gpu():
# The variable and an op to increment it are on the GPU.
var = state_ops.variable_op([1], dtypes.float32)
self.evaluate(state_ops.assign(var, [1.0]))
increment = state_ops.assign_add(var, [1.0])
with ops.control_dependencies([increment]):
with ops.device("/cpu:0"):
with test_util.force_cpu():
# This mul op is pinned to the CPU, but reads the variable from the
# GPU. The test ensures that the dependency on 'increment' is still
# honored, i.e., the Send and Recv from GPU to CPU should take place

View File

@ -118,8 +118,7 @@ class RaggedSegmentOpsTest(test_util.TensorFlowTestCase,
combiner)
segmented = segment_op(rt, segment_ids, num_segments)
with self.test_session():
self.assertListEqual(self.evaluate(segmented).tolist(), expected)
self.assertListEqual(self.evaluate(segmented).tolist(), expected)
@parameterized.parameters(
(ragged.segment_sum, sum, [0, 0, 1, 1, 2, 2]),
@ -155,9 +154,8 @@ class RaggedSegmentOpsTest(test_util.TensorFlowTestCase,
combiner)
segmented = segment_op(rt, segment_ids, num_segments)
with self.test_session():
self.assertNestedListAmostEqual(
self.evaluate(segmented).tolist(), expected, places=5)
self.assertNestedListAmostEqual(
self.evaluate(segmented).tolist(), expected, places=5)
def testRaggedRankTwo(self):
rt = ragged.constant([
@ -172,16 +170,14 @@ class RaggedSegmentOpsTest(test_util.TensorFlowTestCase,
[], # row 1
[[411, 412], [321, 322], [331]] # row 2
] # pyformat: disable
with self.test_session():
self.assertEqual(self.evaluate(segmented1).tolist(), expected1)
self.assertEqual(self.evaluate(segmented1).tolist(), expected1)
segment_ids2 = [1, 2, 1, 1]
segmented2 = ragged.segment_sum(rt, segment_ids2, 3)
expected2 = [[],
[[111+411, 112+412, 113, 114], [121+321, 322], [331]],
[]] # pyformat: disable
with self.test_session():
self.assertEqual(self.evaluate(segmented2).tolist(), expected2)
self.assertEqual(self.evaluate(segmented2).tolist(), expected2)
def testRaggedSegmentIds(self):
rt = ragged.constant([
@ -195,8 +191,7 @@ class RaggedSegmentOpsTest(test_util.TensorFlowTestCase,
expected = [[],
[111+321, 112+322, 113, 114],
[121+331+411, 412]] # pyformat: disable
with self.test_session():
self.assertEqual(self.evaluate(segmented).tolist(), expected)
self.assertEqual(self.evaluate(segmented).tolist(), expected)
def testShapeMismatchError1(self):
dt = constant_op.constant([1, 2, 3, 4, 5, 6])
@ -226,7 +221,7 @@ class RaggedSegmentOpsTest(test_util.TensorFlowTestCase,
array_ops.placeholder_with_default(segment_ids.values, None),
array_ops.placeholder_with_default(segment_ids.row_splits, None))
segmented2 = ragged.segment_sum(rt, segment_ids2, 3)
with self.test_session():
with self.cached_session():
self.assertRaisesRegexp(
errors.InvalidArgumentError,
'segment_ids.shape must be a prefix of data.shape.*', segmented2.eval)

View File

@ -28,49 +28,39 @@ class RaggedTensorBoundingShapeOp(test_util.TensorFlowTestCase):
def testDocStringExample(self):
# This is the example from ragged.bounding_shape.__doc__.
rt = ragged.constant([[1, 2, 3, 4], [5], [], [6, 7, 8, 9], [10]])
with self.test_session():
self.assertEqual(
self.evaluate(ragged.bounding_shape(rt)).tolist(), [5, 4])
self.assertEqual(self.evaluate(ragged.bounding_shape(rt)).tolist(), [5, 4])
def test2DRaggedTensorWithOneRaggedDimension(self):
values = ['a', 'b', 'c', 'd', 'e', 'f', 'g']
rt1 = ragged.from_row_splits(values, [0, 2, 5, 6, 6, 7])
rt2 = ragged.from_row_splits(values, [0, 7])
rt3 = ragged.from_row_splits(values, [0, 0, 7, 7])
with self.test_session():
self.assertEqual(
self.evaluate(ragged.bounding_shape(rt1)).tolist(), [5, 3])
self.assertEqual(
self.evaluate(ragged.bounding_shape(rt2)).tolist(), [1, 7])
self.assertEqual(
self.evaluate(ragged.bounding_shape(rt3)).tolist(), [3, 7])
self.assertEqual(self.evaluate(ragged.bounding_shape(rt1)).tolist(), [5, 3])
self.assertEqual(self.evaluate(ragged.bounding_shape(rt2)).tolist(), [1, 7])
self.assertEqual(self.evaluate(ragged.bounding_shape(rt3)).tolist(), [3, 7])
def test3DRaggedTensorWithOneRaggedDimension(self):
values = [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9], [10, 11], [12, 13]]
rt1 = ragged.from_row_splits(values, [0, 2, 5, 6, 6, 7])
rt2 = ragged.from_row_splits(values, [0, 7])
rt3 = ragged.from_row_splits(values, [0, 0, 7, 7])
with self.test_session():
self.assertEqual(
self.evaluate(ragged.bounding_shape(rt1)).tolist(), [5, 3, 2])
self.assertEqual(
self.evaluate(ragged.bounding_shape(rt2)).tolist(), [1, 7, 2])
self.assertEqual(
self.evaluate(ragged.bounding_shape(rt3)).tolist(), [3, 7, 2])
self.assertEqual(
self.evaluate(ragged.bounding_shape(rt1)).tolist(), [5, 3, 2])
self.assertEqual(
self.evaluate(ragged.bounding_shape(rt2)).tolist(), [1, 7, 2])
self.assertEqual(
self.evaluate(ragged.bounding_shape(rt3)).tolist(), [3, 7, 2])
def testNonRaggedTensor(self):
dt = [[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 10, 11]]
with self.test_session():
self.assertEqual(
self.evaluate(ragged.bounding_shape(dt)).tolist(), [4, 3])
self.assertEqual(self.evaluate(ragged.bounding_shape(dt)).tolist(), [4, 3])
def testExplicitAxisOptimizations(self):
rt = ragged.from_row_splits(b'a b c d e f g'.split(), [0, 2, 5, 6, 6, 7])
with self.test_session():
self.assertEqual(self.evaluate(ragged.bounding_shape(rt, 0)).tolist(), 5)
self.assertEqual(self.evaluate(ragged.bounding_shape(rt, 1)).tolist(), 3)
self.assertEqual(
self.evaluate(ragged.bounding_shape(rt, [1, 0])).tolist(), [3, 5])
self.assertEqual(self.evaluate(ragged.bounding_shape(rt, 0)).tolist(), 5)
self.assertEqual(self.evaluate(ragged.bounding_shape(rt, 1)).tolist(), 3)
self.assertEqual(
self.evaluate(ragged.bounding_shape(rt, [1, 0])).tolist(), [3, 5])
if __name__ == '__main__':

View File

@ -118,9 +118,8 @@ class RaggedTensorTest(test_util.TensorFlowTestCase, parameterized.TestCase):
# From section: "Component Tensors"
rt = ragged.from_row_splits(
values=[3, 1, 4, 1, 5, 9, 2, 6], row_splits=[0, 4, 4, 7, 8, 8])
with self.test_session():
self.assertEqual(rt.tolist(),
[[3, 1, 4, 1], [], [5, 9, 2], [6], []])
self.assertEqual(
self.evaluate(rt).tolist(), [[3, 1, 4, 1], [], [5, 9, 2], [6], []])
del rt
# From section: "Alternative Row-Partitioning Schemes"
@ -132,9 +131,8 @@ class RaggedTensorTest(test_util.TensorFlowTestCase, parameterized.TestCase):
rt4 = ragged.from_row_starts(values, row_starts=[0, 4, 4, 7, 8])
rt5 = ragged.from_row_limits(values, row_limits=[4, 4, 7, 8, 8])
for rt in (rt1, rt2, rt3, rt4, rt5):
with self.test_session():
self.assertEqual(rt.tolist(),
[[3, 1, 4, 1], [], [5, 9, 2], [6], []])
self.assertEqual(
self.evaluate(rt).tolist(), [[3, 1, 4, 1], [], [5, 9, 2], [6], []])
del rt1, rt2, rt3, rt4, rt5
# From section: "Multiple Ragged Dimensions"
@ -142,28 +140,27 @@ class RaggedTensorTest(test_util.TensorFlowTestCase, parameterized.TestCase):
values=[3, 1, 4, 1, 5, 9, 2, 6], row_splits=[0, 4, 4, 7, 8, 8])
outer_rt = ragged.from_row_splits(values=inner_rt, row_splits=[0, 3, 3, 5])
self.assertEqual(outer_rt.ragged_rank, 2)
with self.test_session():
self.assertEqual(outer_rt.tolist(),
[[[3, 1, 4, 1], [], [5, 9, 2]], [], [[6], []]])
self.assertEqual(
self.evaluate(outer_rt).tolist(),
[[[3, 1, 4, 1], [], [5, 9, 2]], [], [[6], []]])
del inner_rt, outer_rt
# From section: "Multiple Ragged Dimensions"
rt = ragged.from_nested_row_splits(
inner_values=[3, 1, 4, 1, 5, 9, 2, 6],
nested_row_splits=([0, 3, 3, 5], [0, 4, 4, 7, 8, 8]))
with self.test_session():
self.assertEqual(rt.tolist(),
[[[3, 1, 4, 1], [], [5, 9, 2]], [], [[6], []]])
self.assertEqual(
self.evaluate(rt).tolist(),
[[[3, 1, 4, 1], [], [5, 9, 2]], [], [[6], []]])
del rt
# From section: "Uniform Inner Dimensions"
rt = ragged.from_row_splits(
values=array_ops.ones([5, 3]), row_splits=[0, 2, 5])
with self.test_session():
self.assertEqual(
rt.tolist(),
[[[1, 1, 1], [1, 1, 1]], [[1, 1, 1], [1, 1, 1], [1, 1, 1]]])
self.assertEqual(rt.shape.as_list(), [2, None, 3])
self.assertEqual(
self.evaluate(rt).tolist(),
[[[1, 1, 1], [1, 1, 1]], [[1, 1, 1], [1, 1, 1], [1, 1, 1]]])
self.assertEqual(rt.shape.as_list(), [2, None, 3])
del rt
#=============================================================================
@ -208,9 +205,9 @@ class RaggedTensorTest(test_util.TensorFlowTestCase, parameterized.TestCase):
rt = ragged.RaggedTensor(
values=values, row_splits=row_splits, internal=True)
with self.test_session():
self.assertEqual(rt.tolist(),
[[b'a', b'b'], [], [b'c', b'd', b'e'], [b'f'], [b'g']])
self.assertEqual(
self.evaluate(rt).tolist(),
[[b'a', b'b'], [], [b'c', b'd', b'e'], [b'f'], [b'g']])
def testRaggedTensorConstructionErrors(self):
values = constant_op.constant(['a', 'b', 'c', 'd', 'e', 'f', 'g'])
@ -262,11 +259,11 @@ class RaggedTensorTest(test_util.TensorFlowTestCase, parameterized.TestCase):
self.assertIs(rt_values, values)
self.assertIs(rt_value_rowids, value_rowids) # cached_value_rowids
with self.test_session():
self.assertAllEqual(rt_value_rowids, value_rowids)
self.assertEqual(self.evaluate(rt_nrows), 5)
self.assertEqual(rt.tolist(),
[[b'a', b'b'], [], [b'c', b'd', b'e'], [b'f'], [b'g']])
self.assertAllEqual(rt_value_rowids, value_rowids)
self.assertEqual(self.evaluate(rt_nrows), 5)
self.assertEqual(
self.evaluate(rt).tolist(),
[[b'a', b'b'], [], [b'c', b'd', b'e'], [b'f'], [b'g']])
def testFromValueRowIdsWithDerivedNRowsDynamic(self):
# nrows is not known at graph creation time.
@ -285,11 +282,11 @@ class RaggedTensorTest(test_util.TensorFlowTestCase, parameterized.TestCase):
self.assertIs(rt_values, values)
self.assertIs(rt_value_rowids, value_rowids) # cached_value_rowids
with self.test_session():
self.assertAllEqual(rt_value_rowids, value_rowids)
self.assertEqual(self.evaluate(rt_nrows), 5)
self.assertEqual(rt.tolist(),
[[b'a', b'b'], [], [b'c', b'd', b'e'], [b'f'], [b'g']])
self.assertAllEqual(rt_value_rowids, value_rowids)
self.assertEqual(self.evaluate(rt_nrows), 5)
self.assertEqual(
self.evaluate(rt).tolist(),
[[b'a', b'b'], [], [b'c', b'd', b'e'], [b'f'], [b'g']])
def testFromValueRowIdsWithExplicitNRows(self):
values = constant_op.constant(['a', 'b', 'c', 'd', 'e', 'f', 'g'])
@ -308,10 +305,9 @@ class RaggedTensorTest(test_util.TensorFlowTestCase, parameterized.TestCase):
self.assertIs(rt_values, values)
self.assertIs(rt_value_rowids, value_rowids) # cached_value_rowids
self.assertIs(rt_nrows, nrows) # cached_nrows
with self.test_session():
self.assertEqual(
rt.tolist(),
[[b'a', b'b'], [], [b'c', b'd', b'e'], [b'f'], [b'g'], [], []])
self.assertEqual(
self.evaluate(rt).tolist(),
[[b'a', b'b'], [], [b'c', b'd', b'e'], [b'f'], [b'g'], [], []])
def testFromValueRowIdsWithExplicitNRowsEqualToDefault(self):
values = constant_op.constant(['a', 'b', 'c', 'd', 'e', 'f', 'g'])
@ -330,11 +326,11 @@ class RaggedTensorTest(test_util.TensorFlowTestCase, parameterized.TestCase):
self.assertIs(rt_values, values)
self.assertIs(rt_value_rowids, value_rowids) # cached_value_rowids
self.assertIs(rt_nrows, nrows) # cached_nrows
with self.test_session():
self.assertAllEqual(rt_value_rowids, value_rowids)
self.assertAllEqual(rt_nrows, nrows)
self.assertEqual(rt.tolist(),
[[b'a', b'b'], [], [b'c', b'd', b'e'], [b'f'], [b'g']])
self.assertAllEqual(rt_value_rowids, value_rowids)
self.assertAllEqual(rt_nrows, nrows)
self.assertEqual(
self.evaluate(rt).tolist(),
[[b'a', b'b'], [], [b'c', b'd', b'e'], [b'f'], [b'g']])
def testFromValueRowIdsWithEmptyValues(self):
rt = ragged.from_value_rowids([], [])
@ -344,9 +340,8 @@ class RaggedTensorTest(test_util.TensorFlowTestCase, parameterized.TestCase):
self.assertEqual(rt.ragged_rank, 1)
self.assertEqual(rt.values.shape.as_list(), [0])
self.assertEqual(ragged.value_rowids(rt).shape.as_list(), [0])
with self.test_session():
self.assertEqual(self.evaluate(rt_nrows).tolist(), 0)
self.assertEqual(rt.tolist(), [])
self.assertEqual(self.evaluate(rt_nrows).tolist(), 0)
self.assertEqual(self.evaluate(rt).tolist(), [])
def testFromRowSplits(self):
values = constant_op.constant(['a', 'b', 'c', 'd', 'e', 'f', 'g'])
@ -363,10 +358,10 @@ class RaggedTensorTest(test_util.TensorFlowTestCase, parameterized.TestCase):
self.assertIs(rt_values, values)
self.assertIs(rt_row_splits, row_splits)
with self.test_session():
self.assertEqual(self.evaluate(rt_nrows), 5)
self.assertEqual(rt.tolist(),
[[b'a', b'b'], [], [b'c', b'd', b'e'], [b'f'], [b'g']])
self.assertEqual(self.evaluate(rt_nrows), 5)
self.assertEqual(
self.evaluate(rt).tolist(),
[[b'a', b'b'], [], [b'c', b'd', b'e'], [b'f'], [b'g']])
def testFromRowSplitsWithEmptySplits(self):
err_msg = 'row_splits tensor may not be empty'
@ -387,11 +382,11 @@ class RaggedTensorTest(test_util.TensorFlowTestCase, parameterized.TestCase):
rt_nrows = ragged.nrows(rt)
self.assertIs(rt_values, values)
with self.test_session():
self.assertEqual(self.evaluate(rt_nrows), 5)
self.assertAllEqual(rt_row_starts, row_starts)
self.assertEqual(rt.tolist(),
[[b'a', b'b'], [], [b'c', b'd', b'e'], [b'f'], [b'g']])
self.assertEqual(self.evaluate(rt_nrows), 5)
self.assertAllEqual(rt_row_starts, row_starts)
self.assertEqual(
self.evaluate(rt).tolist(),
[[b'a', b'b'], [], [b'c', b'd', b'e'], [b'f'], [b'g']])
def testFromRowLimits(self):
values = constant_op.constant(['a', 'b', 'c', 'd', 'e', 'f', 'g'])
@ -407,11 +402,11 @@ class RaggedTensorTest(test_util.TensorFlowTestCase, parameterized.TestCase):
rt_nrows = ragged.nrows(rt)
self.assertIs(rt_values, values)
with self.test_session():
self.assertEqual(self.evaluate(rt_nrows), 5)
self.assertAllEqual(rt_row_limits, row_limits)
self.assertEqual(rt.tolist(),
[[b'a', b'b'], [], [b'c', b'd', b'e'], [b'f'], [b'g']])
self.assertEqual(self.evaluate(rt_nrows), 5)
self.assertAllEqual(rt_row_limits, row_limits)
self.assertEqual(
self.evaluate(rt).tolist(),
[[b'a', b'b'], [], [b'c', b'd', b'e'], [b'f'], [b'g']])
def testFromRowLengths(self):
values = constant_op.constant(['a', 'b', 'c', 'd', 'e', 'f', 'g'])
@ -428,11 +423,11 @@ class RaggedTensorTest(test_util.TensorFlowTestCase, parameterized.TestCase):
self.assertIs(rt_values, values)
self.assertIs(rt_row_lengths, row_lengths) # cached_nrows
with self.test_session():
self.assertEqual(self.evaluate(rt_nrows), 5)
self.assertAllEqual(rt_row_lengths, row_lengths)
self.assertEqual(rt.tolist(),
[[b'a', b'b'], [], [b'c', b'd', b'e'], [b'f'], [b'g']])
self.assertEqual(self.evaluate(rt_nrows), 5)
self.assertAllEqual(rt_row_lengths, row_lengths)
self.assertEqual(
self.evaluate(rt).tolist(),
[[b'a', b'b'], [], [b'c', b'd', b'e'], [b'f'], [b'g']])
def testFromNestedValueRowIdsWithDerivedNRows(self):
values = constant_op.constant(['a', 'b', 'c', 'd', 'e', 'f', 'g'])
@ -452,12 +447,11 @@ class RaggedTensorTest(test_util.TensorFlowTestCase, parameterized.TestCase):
rt_values_value_rowids = ragged.value_rowids(rt_values)
self.assertIs(rt_values_values, values)
with self.test_session():
self.assertAllEqual(rt_value_rowids, nested_value_rowids[0])
self.assertAllEqual(rt_values_value_rowids, nested_value_rowids[1])
self.assertEqual(
rt.tolist(),
[[[b'a', b'b'], []], [[b'c', b'd', b'e']], [], [[b'f'], [b'g']]])
self.assertAllEqual(rt_value_rowids, nested_value_rowids[0])
self.assertAllEqual(rt_values_value_rowids, nested_value_rowids[1])
self.assertEqual(
self.evaluate(rt).tolist(),
[[[b'a', b'b'], []], [[b'c', b'd', b'e']], [], [[b'f'], [b'g']]])
def testFromNestedValueRowIdsWithExplicitNRows(self):
values = constant_op.constant(['a', 'b', 'c', 'd', 'e', 'f', 'g'])
@ -483,14 +477,14 @@ class RaggedTensorTest(test_util.TensorFlowTestCase, parameterized.TestCase):
rt_values_nrows = ragged.nrows(rt_values)
self.assertIs(rt_values_values, values)
with self.test_session():
self.assertAllEqual(rt_value_rowids, nested_value_rowids[0])
self.assertAllEqual(rt_values_value_rowids, nested_value_rowids[1])
self.assertAllEqual(rt_nrows, nrows[0])
self.assertAllEqual(rt_values_nrows, nrows[1])
self.assertEqual(rt.tolist(),
[[[b'a', b'b'], []], [[b'c', b'd', b'e']], [],
[[b'f'], [b'g'], []], [], []])
self.assertAllEqual(rt_value_rowids, nested_value_rowids[0])
self.assertAllEqual(rt_values_value_rowids, nested_value_rowids[1])
self.assertAllEqual(rt_nrows, nrows[0])
self.assertAllEqual(rt_values_nrows, nrows[1])
self.assertEqual(
self.evaluate(rt).tolist(),
[[[b'a', b'b'], []], [[b'c', b'd', b'e']], [], [[b'f'], [b'g'], []], [],
[]])
def testFromNestedValueRowIdsWithExplicitNRowsMismatch(self):
values = constant_op.constant(['a', 'b', 'c', 'd', 'e', 'f', 'g'])
@ -535,10 +529,9 @@ class RaggedTensorTest(test_util.TensorFlowTestCase, parameterized.TestCase):
self.assertIs(rt_values_values, inner_values)
self.assertIs(rt_row_splits, nested_row_splits[0])
self.assertIs(rt_values_row_splits, nested_row_splits[1])
with self.test_session():
self.assertEqual(
rt.tolist(),
[[[b'a', b'b'], []], [[b'c', b'd', b'e']], [], [[b'f'], [b'g']]])
self.assertEqual(
self.evaluate(rt).tolist(),
[[[b'a', b'b'], []], [[b'c', b'd', b'e']], [], [[b'f'], [b'g']]])
def testFromNestedRowSplitsWithNonListInput(self):
with self.assertRaisesRegexp(TypeError,
@ -603,31 +596,31 @@ class RaggedTensorTest(test_util.TensorFlowTestCase, parameterized.TestCase):
rt2 = ragged.from_value_rowids(values, value_rowids)
for rt in [rt1, rt2]:
with self.test_session():
self.assertEqual(rt.tolist(),
[[b'a', b'b'], [], [b'c', b'd', b'e'], [b'f'], [b'g']])
self.assertEqual(
self.evaluate(rt.values).tolist(),
[b'a', b'b', b'c', b'd', b'e', b'f', b'g'])
self.assertEqual(rt.values.shape.dims[0].value, 7)
self.assertEqual(
self.evaluate(ragged.value_rowids(rt)).tolist(),
[0, 0, 2, 2, 2, 3, 4])
self.assertEqual(self.evaluate(ragged.nrows(rt)).tolist(), 5)
self.assertEqual(
self.evaluate(rt.row_splits).tolist(), [0, 2, 2, 5, 6, 7])
self.assertEqual(
self.evaluate(ragged.row_starts(rt)).tolist(), [0, 2, 2, 5, 6])
self.assertEqual(
self.evaluate(ragged.row_limits(rt)).tolist(), [2, 2, 5, 6, 7])
self.assertEqual(
self.evaluate(ragged.row_lengths(rt)).tolist(), [2, 0, 3, 1, 1])
self.assertEqual(
self.evaluate(rt.inner_values).tolist(),
[b'a', b'b', b'c', b'd', b'e', b'f', b'g'])
self.assertEqual(
[self.evaluate(s).tolist() for s in rt.nested_row_splits],
[[0, 2, 2, 5, 6, 7]])
self.assertEqual(
self.evaluate(rt).tolist(),
[[b'a', b'b'], [], [b'c', b'd', b'e'], [b'f'], [b'g']])
self.assertEqual(
self.evaluate(rt.values).tolist(),
[b'a', b'b', b'c', b'd', b'e', b'f', b'g'])
self.assertEqual(rt.values.shape.dims[0].value, 7)
self.assertEqual(
self.evaluate(ragged.value_rowids(rt)).tolist(),
[0, 0, 2, 2, 2, 3, 4])
self.assertEqual(self.evaluate(ragged.nrows(rt)).tolist(), 5)
self.assertEqual(
self.evaluate(rt.row_splits).tolist(), [0, 2, 2, 5, 6, 7])
self.assertEqual(
self.evaluate(ragged.row_starts(rt)).tolist(), [0, 2, 2, 5, 6])
self.assertEqual(
self.evaluate(ragged.row_limits(rt)).tolist(), [2, 2, 5, 6, 7])
self.assertEqual(
self.evaluate(ragged.row_lengths(rt)).tolist(), [2, 0, 3, 1, 1])
self.assertEqual(
self.evaluate(rt.inner_values).tolist(),
[b'a', b'b', b'c', b'd', b'e', b'f', b'g'])
self.assertEqual(
[self.evaluate(s).tolist() for s in rt.nested_row_splits],
[[0, 2, 2, 5, 6, 7]])
def testRaggedTensorAccessors_3d_with_ragged_rank_1(self):
values = [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9], [10, 11], [12, 13]]
@ -637,32 +630,32 @@ class RaggedTensorTest(test_util.TensorFlowTestCase, parameterized.TestCase):
rt2 = ragged.from_value_rowids(values, value_rowids)
for rt in [rt1, rt2]:
with self.test_session():
self.assertEqual(rt.tolist(),
[[[0, 1], [2, 3]], [], [[4, 5], [6, 7], [8, 9]],
[[10, 11]], [[12, 13]]])
self.assertEqual(
self.evaluate(rt.values).tolist(),
[[0, 1], [2, 3], [4, 5], [6, 7], [8, 9], [10, 11], [12, 13]])
self.assertEqual(rt.values.shape.dims[0].value, 7)
self.assertEqual(
self.evaluate(ragged.value_rowids(rt)).tolist(),
[0, 0, 2, 2, 2, 3, 4])
self.assertEqual(self.evaluate(ragged.nrows(rt)).tolist(), 5)
self.assertEqual(
self.evaluate(rt.row_splits).tolist(), [0, 2, 2, 5, 6, 7])
self.assertEqual(
self.evaluate(ragged.row_starts(rt)).tolist(), [0, 2, 2, 5, 6])
self.assertEqual(
self.evaluate(ragged.row_limits(rt)).tolist(), [2, 2, 5, 6, 7])
self.assertEqual(
self.evaluate(ragged.row_lengths(rt)).tolist(), [2, 0, 3, 1, 1])
self.assertEqual(
self.evaluate(rt.inner_values).tolist(),
[[0, 1], [2, 3], [4, 5], [6, 7], [8, 9], [10, 11], [12, 13]])
self.assertEqual(
[self.evaluate(s).tolist() for s in rt.nested_row_splits],
[[0, 2, 2, 5, 6, 7]])
self.assertEqual(
self.evaluate(rt).tolist(),
[[[0, 1], [2, 3]], [], [[4, 5], [6, 7], [8, 9]], [[10, 11]],
[[12, 13]]])
self.assertEqual(
self.evaluate(rt.values).tolist(),
[[0, 1], [2, 3], [4, 5], [6, 7], [8, 9], [10, 11], [12, 13]])
self.assertEqual(rt.values.shape.dims[0].value, 7)
self.assertEqual(
self.evaluate(ragged.value_rowids(rt)).tolist(),
[0, 0, 2, 2, 2, 3, 4])
self.assertEqual(self.evaluate(ragged.nrows(rt)).tolist(), 5)
self.assertEqual(
self.evaluate(rt.row_splits).tolist(), [0, 2, 2, 5, 6, 7])
self.assertEqual(
self.evaluate(ragged.row_starts(rt)).tolist(), [0, 2, 2, 5, 6])
self.assertEqual(
self.evaluate(ragged.row_limits(rt)).tolist(), [2, 2, 5, 6, 7])
self.assertEqual(
self.evaluate(ragged.row_lengths(rt)).tolist(), [2, 0, 3, 1, 1])
self.assertEqual(
self.evaluate(rt.inner_values).tolist(),
[[0, 1], [2, 3], [4, 5], [6, 7], [8, 9], [10, 11], [12, 13]])
self.assertEqual(
[self.evaluate(s).tolist() for s in rt.nested_row_splits],
[[0, 2, 2, 5, 6, 7]])
def testRaggedTensorAccessors_3d_with_ragged_rank_2(self):
values = constant_op.constant(['a', 'b', 'c', 'd', 'e', 'f', 'g'])
@ -678,42 +671,39 @@ class RaggedTensorTest(test_util.TensorFlowTestCase, parameterized.TestCase):
rt2 = ragged.from_nested_value_rowids(values, nested_value_rowids)
for rt in [rt1, rt2]:
with self.test_session():
self.assertEqual(
rt.tolist(),
[[[b'a', b'b'], []], [[b'c', b'd', b'e']], [], [[b'f'], [b'g']]])
self.assertEqual(
self.evaluate(rt.values).tolist(),
[[b'a', b'b'], [], [b'c', b'd', b'e'], [b'f'], [b'g']])
self.assertEqual(rt.values.shape.dims[0].value, 5)
self.assertEqual(
self.evaluate(ragged.value_rowids(rt)).tolist(), [0, 0, 1, 3, 3])
self.assertEqual(self.evaluate(ragged.nrows(rt)).tolist(), 4)
self.assertEqual(self.evaluate(rt.row_splits).tolist(), [0, 2, 3, 3, 5])
self.assertEqual(
self.evaluate(ragged.row_starts(rt)).tolist(), [0, 2, 3, 3])
self.assertEqual(
self.evaluate(ragged.row_limits(rt)).tolist(), [2, 3, 3, 5])
self.assertEqual(
self.evaluate(ragged.row_lengths(rt)).tolist(), [2, 1, 0, 2])
self.assertEqual(
self.evaluate(rt.inner_values).tolist(),
[b'a', b'b', b'c', b'd', b'e', b'f', b'g'])
self.assertEqual(
[self.evaluate(s).tolist() for s in rt.nested_row_splits],
[[0, 2, 3, 3, 5], [0, 2, 2, 5, 6, 7]])
self.assertEqual(
self.evaluate(rt).tolist(),
[[[b'a', b'b'], []], [[b'c', b'd', b'e']], [], [[b'f'], [b'g']]])
self.assertEqual(
self.evaluate(rt.values).tolist(),
[[b'a', b'b'], [], [b'c', b'd', b'e'], [b'f'], [b'g']])
self.assertEqual(rt.values.shape.dims[0].value, 5)
self.assertEqual(
self.evaluate(ragged.value_rowids(rt)).tolist(), [0, 0, 1, 3, 3])
self.assertEqual(self.evaluate(ragged.nrows(rt)).tolist(), 4)
self.assertEqual(self.evaluate(rt.row_splits).tolist(), [0, 2, 3, 3, 5])
self.assertEqual(
self.evaluate(ragged.row_starts(rt)).tolist(), [0, 2, 3, 3])
self.assertEqual(
self.evaluate(ragged.row_limits(rt)).tolist(), [2, 3, 3, 5])
self.assertEqual(
self.evaluate(ragged.row_lengths(rt)).tolist(), [2, 1, 0, 2])
self.assertEqual(
self.evaluate(rt.inner_values).tolist(),
[b'a', b'b', b'c', b'd', b'e', b'f', b'g'])
self.assertEqual(
[self.evaluate(s).tolist() for s in rt.nested_row_splits],
[[0, 2, 3, 3, 5], [0, 2, 2, 5, 6, 7]])
def testNRowsWithTensorInput(self):
dt = constant_op.constant([[1, 2, 3], [4, 5, 6]])
nrows = ragged.nrows(dt)
with self.test_session():
self.assertEqual(self.evaluate(nrows), 2)
self.assertEqual(self.evaluate(nrows), 2)
def testRowLengthsWithTensorInput(self):
dt = constant_op.constant([[1, 2, 3], [4, 5, 6]])
row_lengths = ragged.row_lengths(dt)
with self.test_session():
self.assertEqual(self.evaluate(row_lengths).tolist(), [3, 3])
self.assertEqual(self.evaluate(row_lengths).tolist(), [3, 3])
#=============================================================================
# RaggedTensor.shape
@ -766,29 +756,27 @@ class RaggedTensorTest(test_util.TensorFlowTestCase, parameterized.TestCase):
expected: The expected value of rt.__getitem__(slice_spec), as a python
list; or an exception class.
"""
with self.test_session():
tensor_slice_spec1 = _make_tensor_slice_spec(slice_spec, True)
tensor_slice_spec2 = _make_tensor_slice_spec(slice_spec, False)
value1 = self.evaluate(rt.__getitem__(slice_spec))
value2 = self.evaluate(rt.__getitem__(tensor_slice_spec1))
value3 = self.evaluate(rt.__getitem__(tensor_slice_spec2))
if hasattr(value1, 'tolist'):
value1 = value1.tolist()
if hasattr(value2, 'tolist'):
value2 = value2.tolist()
if hasattr(value3, 'tolist'):
value3 = value3.tolist()
self.assertEqual(value1, expected, 'slice_spec=%s' % (slice_spec,))
self.assertEqual(value2, expected, 'slice_spec=%s' % (slice_spec,))
self.assertEqual(value3, expected, 'slice_spec=%s' % (slice_spec,))
tensor_slice_spec1 = _make_tensor_slice_spec(slice_spec, True)
tensor_slice_spec2 = _make_tensor_slice_spec(slice_spec, False)
value1 = self.evaluate(rt.__getitem__(slice_spec))
value2 = self.evaluate(rt.__getitem__(tensor_slice_spec1))
value3 = self.evaluate(rt.__getitem__(tensor_slice_spec2))
if hasattr(value1, 'tolist'):
value1 = value1.tolist()
if hasattr(value2, 'tolist'):
value2 = value2.tolist()
if hasattr(value3, 'tolist'):
value3 = value3.tolist()
self.assertEqual(value1, expected, 'slice_spec=%s' % (slice_spec,))
self.assertEqual(value2, expected, 'slice_spec=%s' % (slice_spec,))
self.assertEqual(value3, expected, 'slice_spec=%s' % (slice_spec,))
def _TestGetItemException(self, rt, slice_spec, expected, message):
"""Helper function for testing RaggedTensor.__getitem__ exceptions."""
with self.test_session():
tensor_slice_spec1 = _make_tensor_slice_spec(slice_spec, True)
self.assertRaisesRegexp(expected, message, rt.__getitem__, slice_spec)
self.assertRaisesRegexp(expected, message, rt.__getitem__,
tensor_slice_spec1)
tensor_slice_spec1 = _make_tensor_slice_spec(slice_spec, True)
self.assertRaisesRegexp(expected, message, rt.__getitem__, slice_spec)
self.assertRaisesRegexp(expected, message, rt.__getitem__,
tensor_slice_spec1)
@parameterized.parameters(
# Tests for rt[i]
@ -860,8 +848,7 @@ class RaggedTensorTest(test_util.TensorFlowTestCase, parameterized.TestCase):
rt = ragged.from_row_splits(EXAMPLE_RAGGED_TENSOR_2D_VALUES,
EXAMPLE_RAGGED_TENSOR_2D_SPLITS)
with self.test_session():
self.assertEqual(rt.tolist(), EXAMPLE_RAGGED_TENSOR_2D)
self.assertEqual(self.evaluate(rt).tolist(), EXAMPLE_RAGGED_TENSOR_2D)
self._TestGetItem(rt, slice_spec, expected)
# pylint: disable=invalid-slice-index
@ -905,8 +892,7 @@ class RaggedTensorTest(test_util.TensorFlowTestCase, parameterized.TestCase):
# if sys.version_info[0] == 3:
# message = 'must be str, not int'
with self.test_session():
self.assertEqual(rt.tolist(), EXAMPLE_RAGGED_TENSOR_2D)
self.assertEqual(self.evaluate(rt).tolist(), EXAMPLE_RAGGED_TENSOR_2D)
self._TestGetItemException(rt, slice_spec, expected, message)
@parameterized.parameters(
@ -980,8 +966,7 @@ class RaggedTensorTest(test_util.TensorFlowTestCase, parameterized.TestCase):
rt = ragged.from_nested_row_splits(
EXAMPLE_RAGGED_TENSOR_4D_VALUES,
[EXAMPLE_RAGGED_TENSOR_4D_SPLITS1, EXAMPLE_RAGGED_TENSOR_4D_SPLITS2])
with self.test_session():
self.assertEqual(rt.tolist(), EXAMPLE_RAGGED_TENSOR_4D)
self.assertEqual(self.evaluate(rt).tolist(), EXAMPLE_RAGGED_TENSOR_4D)
self._TestGetItem(rt, slice_spec, expected)
@parameterized.parameters(
@ -1003,8 +988,7 @@ class RaggedTensorTest(test_util.TensorFlowTestCase, parameterized.TestCase):
rt = ragged.from_nested_row_splits(
EXAMPLE_RAGGED_TENSOR_4D_VALUES,
[EXAMPLE_RAGGED_TENSOR_4D_SPLITS1, EXAMPLE_RAGGED_TENSOR_4D_SPLITS2])
with self.test_session():
self.assertEqual(rt.tolist(), EXAMPLE_RAGGED_TENSOR_4D)
self.assertEqual(self.evaluate(rt).tolist(), EXAMPLE_RAGGED_TENSOR_4D)
self._TestGetItemException(rt, slice_spec, expected, message)
@parameterized.parameters(
@ -1044,8 +1028,7 @@ class RaggedTensorTest(test_util.TensorFlowTestCase, parameterized.TestCase):
EXAMPLE_RAGGED_TENSOR_2D_SPLITS, dtype=dtypes.int64)
splits = array_ops.placeholder_with_default(splits, None)
rt = ragged.from_row_splits(EXAMPLE_RAGGED_TENSOR_2D_VALUES, splits)
with self.test_session():
self.assertEqual(rt.tolist(), EXAMPLE_RAGGED_TENSOR_2D)
self.assertEqual(self.evaluate(rt).tolist(), EXAMPLE_RAGGED_TENSOR_2D)
self._TestGetItem(rt, slice_spec, expected)
@parameterized.parameters(
@ -1065,43 +1048,43 @@ class RaggedTensorTest(test_util.TensorFlowTestCase, parameterized.TestCase):
splits2 = [0, 2, 2, 3]
values = constant_op.constant([['a', 'b'], ['c', 'd'], ['e', 'f']])
rt = ragged.from_nested_row_splits(values, [splits1, splits2])
with self.test_session():
rt_newaxis0 = rt[array_ops.newaxis]
rt_newaxis1 = rt[:, array_ops.newaxis]
rt_newaxis2 = rt[:, :, array_ops.newaxis]
rt_newaxis3 = rt[:, :, :, array_ops.newaxis]
rt_newaxis4 = rt[:, :, :, :, array_ops.newaxis]
rt_newaxis0 = rt[array_ops.newaxis]
rt_newaxis1 = rt[:, array_ops.newaxis]
rt_newaxis2 = rt[:, :, array_ops.newaxis]
rt_newaxis3 = rt[:, :, :, array_ops.newaxis]
rt_newaxis4 = rt[:, :, :, :, array_ops.newaxis]
self.assertEqual(rt.tolist(),
[[[[b'a', b'b'], [b'c', b'd']], [], [[b'e', b'f']]], []])
self.assertEqual(
rt_newaxis0.tolist(),
[[[[[b'a', b'b'], [b'c', b'd']], [], [[b'e', b'f']]], []]])
self.assertEqual(
rt_newaxis1.tolist(),
[[[[[b'a', b'b'], [b'c', b'd']], [], [[b'e', b'f']]]], [[]]])
self.assertEqual(
rt_newaxis2.tolist(),
[[[[[b'a', b'b'], [b'c', b'd']]], [[]], [[[b'e', b'f']]]], []])
self.assertEqual(
rt_newaxis3.tolist(),
[[[[[b'a', b'b']], [[b'c', b'd']]], [], [[[b'e', b'f']]]], []])
self.assertEqual(
rt_newaxis4.tolist(),
[[[[[b'a'], [b'b']], [[b'c'], [b'd']]], [], [[[b'e'], [b'f']]]], []])
self.assertEqual(
self.evaluate(rt).tolist(),
[[[[b'a', b'b'], [b'c', b'd']], [], [[b'e', b'f']]], []])
self.assertEqual(
self.evaluate(rt_newaxis0).tolist(),
[[[[[b'a', b'b'], [b'c', b'd']], [], [[b'e', b'f']]], []]])
self.assertEqual(
self.evaluate(rt_newaxis1).tolist(),
[[[[[b'a', b'b'], [b'c', b'd']], [], [[b'e', b'f']]]], [[]]])
self.assertEqual(
self.evaluate(rt_newaxis2).tolist(),
[[[[[b'a', b'b'], [b'c', b'd']]], [[]], [[[b'e', b'f']]]], []])
self.assertEqual(
self.evaluate(rt_newaxis3).tolist(),
[[[[[b'a', b'b']], [[b'c', b'd']]], [], [[[b'e', b'f']]]], []])
self.assertEqual(
self.evaluate(rt_newaxis4).tolist(),
[[[[[b'a'], [b'b']], [[b'c'], [b'd']]], [], [[[b'e'], [b'f']]]], []])
self.assertEqual(rt.ragged_rank, 2)
self.assertEqual(rt_newaxis0.ragged_rank, 3)
self.assertEqual(rt_newaxis1.ragged_rank, 3)
self.assertEqual(rt_newaxis2.ragged_rank, 3)
self.assertEqual(rt_newaxis3.ragged_rank, 2)
self.assertEqual(rt_newaxis4.ragged_rank, 2)
self.assertEqual(rt.ragged_rank, 2)
self.assertEqual(rt_newaxis0.ragged_rank, 3)
self.assertEqual(rt_newaxis1.ragged_rank, 3)
self.assertEqual(rt_newaxis2.ragged_rank, 3)
self.assertEqual(rt_newaxis3.ragged_rank, 2)
self.assertEqual(rt_newaxis4.ragged_rank, 2)
self.assertEqual(rt_newaxis0.shape.as_list(), [1, None, None, None, 2])
self.assertEqual(rt_newaxis1.shape.as_list(), [2, None, None, None, 2])
self.assertEqual(rt_newaxis2.shape.as_list(), [2, None, None, None, 2])
self.assertEqual(rt_newaxis3.shape.as_list(), [2, None, None, 1, 2])
self.assertEqual(rt_newaxis4.shape.as_list(), [2, None, None, 2, 1])
self.assertEqual(rt_newaxis0.shape.as_list(), [1, None, None, None, 2])
self.assertEqual(rt_newaxis1.shape.as_list(), [2, None, None, None, 2])
self.assertEqual(rt_newaxis2.shape.as_list(), [2, None, None, None, 2])
self.assertEqual(rt_newaxis3.shape.as_list(), [2, None, None, 1, 2])
self.assertEqual(rt_newaxis4.shape.as_list(), [2, None, None, 2, 1])
#=============================================================================
# RaggedTensor.__str__
@ -1151,13 +1134,15 @@ class RaggedTensorTest(test_util.TensorFlowTestCase, parameterized.TestCase):
rt2_times_10 = rt2.with_inner_values(rt2.inner_values * 10)
rt1_expanded = rt1.with_values(array_ops.expand_dims(rt1.values, axis=1))
with self.test_session():
self.assertEqual(rt1_plus_10.tolist(),
[[11, 12], [13, 14, 15], [16], [], [17]])
self.assertEqual(rt2_times_10.tolist(),
[[[10, 20], [30, 40, 50]], [[60]], [], [[], [70]]])
self.assertEqual(rt1_expanded.tolist(),
[[[1], [2]], [[3], [4], [5]], [[6]], [], [[7]]])
self.assertEqual(
self.evaluate(rt1_plus_10).tolist(),
[[11, 12], [13, 14, 15], [16], [], [17]])
self.assertEqual(
self.evaluate(rt2_times_10).tolist(),
[[[10, 20], [30, 40, 50]], [[60]], [], [[], [70]]])
self.assertEqual(
self.evaluate(rt1_expanded).tolist(),
[[[1], [2]], [[3], [4], [5]], [[6]], [], [[7]]])
#=============================================================================
# Session.run