Update tests under keras.layers to use combinations.

Change all test_util.run_all_in_graph_and_eager_modes to combination.

PiperOrigin-RevId: 301238676
Change-Id: I1b09860322c133d36a8902e312dff3600de91f18
This commit is contained in:
Scott Zhu 2020-03-16 14:30:36 -07:00 committed by TensorFlower Gardener
parent 7f0f953f6b
commit fbe9eebb20
14 changed files with 94 additions and 108 deletions

View File

@ -496,6 +496,7 @@ cuda_py_test(
deps = [
"//tensorflow/python:client_testlib",
"//tensorflow/python/keras",
"//tensorflow/python/keras:combinations",
"//third_party/py/numpy",
"@absl_py//absl/testing:parameterized",
],
@ -514,6 +515,7 @@ tf_py_test(
deps = [
"//tensorflow/python:client_testlib",
"//tensorflow/python/keras",
"//tensorflow/python/keras:combinations",
"@absl_py//absl/testing:parameterized",
],
)
@ -554,6 +556,7 @@ tf_py_test(
deps = [
"//tensorflow/python:client_testlib",
"//tensorflow/python/keras",
"//tensorflow/python/keras:combinations",
"//third_party/py/numpy",
"@absl_py//absl/testing:parameterized",
],
@ -567,6 +570,7 @@ cuda_py_test(
deps = [
"//tensorflow/python:client_testlib",
"//tensorflow/python/keras",
"//tensorflow/python/keras:combinations",
"@absl_py//absl/testing:parameterized",
],
)
@ -581,6 +585,7 @@ tf_py_test(
deps = [
"//tensorflow/python:client_testlib",
"//tensorflow/python/keras",
"//tensorflow/python/keras:combinations",
"//third_party/py/numpy",
"@absl_py//absl/testing:parameterized",
],
@ -594,6 +599,7 @@ tf_py_test(
deps = [
"//tensorflow/python:client_testlib",
"//tensorflow/python/keras",
"//tensorflow/python/keras:combinations",
"//third_party/py/numpy",
"@absl_py//absl/testing:parameterized",
],
@ -623,6 +629,7 @@ cuda_py_test(
deps = [
"//tensorflow/python:client_testlib",
"//tensorflow/python/keras",
"//tensorflow/python/keras:combinations",
"//third_party/py/numpy",
"@absl_py//absl/testing:parameterized",
],
@ -654,6 +661,7 @@ tf_py_test(
deps = [
"//tensorflow/python:client_testlib",
"//tensorflow/python/keras",
"//tensorflow/python/keras:combinations",
"//third_party/py/numpy",
"@absl_py//absl/testing:parameterized",
],
@ -742,6 +750,7 @@ cuda_py_test(
deps = [
"//tensorflow/python:client_testlib",
"//tensorflow/python/keras",
"//tensorflow/python/keras:combinations",
"//third_party/py/numpy",
"@absl_py//absl/testing:parameterized",
],
@ -755,6 +764,7 @@ tf_py_test(
deps = [
"//tensorflow/python:client_testlib",
"//tensorflow/python/keras",
"//tensorflow/python/keras:combinations",
"@absl_py//absl/testing:parameterized",
],
)
@ -780,6 +790,7 @@ tf_py_test(
"//tensorflow/python/eager:context",
"//tensorflow/python/keras",
"//tensorflow/python/keras:backend",
"//tensorflow/python/keras:combinations",
"//tensorflow/python/keras:initializers",
"//third_party/py/numpy",
"@absl_py//absl/testing:parameterized",
@ -799,6 +810,7 @@ tf_py_test(
deps = [
"//tensorflow/python:client_testlib",
"//tensorflow/python/keras",
"//tensorflow/python/keras:combinations",
"//tensorflow/python/ops/ragged:ragged_concat_ops",
"//tensorflow/python/ops/ragged:ragged_factory_ops",
"//third_party/py/numpy",
@ -818,6 +830,7 @@ tf_py_test(
deps = [
"//tensorflow/python:client_testlib",
"//tensorflow/python/keras",
"//tensorflow/python/keras:combinations",
"//third_party/py/numpy",
"@absl_py//absl/testing:parameterized",
],

View File

@ -27,6 +27,7 @@ import numpy as np
from tensorflow.python import keras
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.keras import combinations
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import testing_utils
from tensorflow.python.keras.optimizer_v2.rmsprop import RMSprop
@ -244,7 +245,7 @@ class CuDNNGraphOnlyTest(keras_parameterized.TestCase):
self.assertNotEqual(out4.max(), out5.max())
@test_util.run_all_in_graph_and_eager_modes
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
class CuDNNV1OnlyTest(keras_parameterized.TestCase):
@test_util.run_gpu_only

View File

@ -18,19 +18,21 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.python import keras
from tensorflow.python.eager import context
from tensorflow.python.framework import test_util
from tensorflow.python.keras import combinations
from tensorflow.python.keras.layers import core
from tensorflow.python.keras.layers import dense_attention
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
@test_util.run_all_in_graph_and_eager_modes
class BaseDenseAttentionTest(test.TestCase):
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
class BaseDenseAttentionTest(test.TestCase, parameterized.TestCase):
def test_one_dim_with_mask(self):
# Scores tensor of shape [1, 1, 1]
@ -150,8 +152,8 @@ class BaseDenseAttentionTest(test.TestCase):
self.assertEqual(new_layer.causal, True)
@test_util.run_all_in_graph_and_eager_modes
class AttentionTest(test.TestCase):
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
class AttentionTest(test.TestCase, parameterized.TestCase):
def test_calculate_scores_one_dim(self):
# Query tensor of shape [1, 1, 1]
@ -470,8 +472,8 @@ class AttentionTest(test.TestCase):
self.assertEqual(new_layer.use_scale, True)
@test_util.run_all_in_graph_and_eager_modes
class AdditiveAttentionTest(test.TestCase):
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
class AdditiveAttentionTest(test.TestCase, parameterized.TestCase):
def test_calculate_scores_one_dim(self):
# Query tensor of shape [1, 1, 1]
@ -716,8 +718,8 @@ class AdditiveAttentionTest(test.TestCase):
self.assertEqual(new_layer.use_scale, True)
@test_util.run_all_in_graph_and_eager_modes
class LowerTriangularMaskTest(test.TestCase):
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
class LowerTriangularMaskTest(test.TestCase, parameterized.TestCase):
def test_square_shape(self):
actual = dense_attention._lower_triangular_mask([3, 3])

View File

@ -24,6 +24,7 @@ from tensorflow.python import keras
from tensorflow.python.eager import backprop
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util as tf_test_util
from tensorflow.python.keras import combinations
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import testing_utils
from tensorflow.python.ops.ragged import ragged_factory_ops
@ -85,7 +86,7 @@ class EmbeddingTest(keras_parameterized.TestCase):
outputs = model.predict(np.array([[0, 1, 0]], dtype='int32'))
self.assertAllClose(outputs, [[[1, 1], [2, 2], [1, 1]]])
@tf_test_util.run_in_graph_and_eager_modes
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def test_eager_gpu_cpu(self):
l = keras.layers.Embedding(output_dim=2, input_dim=2)
l.build((None, 2))

View File

@ -25,6 +25,7 @@ from tensorflow.python import keras
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util as tf_test_util
from tensorflow.python.keras import combinations
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import testing_utils
from tensorflow.python.keras.utils import np_utils
@ -224,7 +225,7 @@ class GRULayerTest(keras_parameterized.TestCase):
self.assertEqual(state[0].shape, initial_state.shape)
@tf_test_util.run_all_in_graph_and_eager_modes
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
class GRULayerGenericTest(test.TestCase):
def test_constraints_GRU(self):

View File

@ -35,6 +35,7 @@ from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import test_util
from tensorflow.python.keras import combinations
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import testing_utils
from tensorflow.python.keras.layers import recurrent as rnn_v1
@ -612,32 +613,31 @@ class GRUV2Test(keras_parameterized.TestCase):
model.fit(dataset)
class GRULayerGradientTapeTest(test.TestCase):
class GRULayerGradientTapeTest(keras_parameterized.TestCase):
@test_util.run_in_graph_and_eager_modes(config=_config)
@combinations.generate(combinations.combine(mode=['eager']))
def test_in_tape(self):
if not context.executing_eagerly():
self.skipTest('bloo')
time_steps = 10
embedding_size = 11
gru_unit_size = 12
with self.test_session(config=_config):
time_steps = 10
embedding_size = 11
gru_unit_size = 12
gru = rnn.GRU(gru_unit_size,
return_sequences=True,
return_state=True,
recurrent_activation='sigmoid',
recurrent_initializer='glorot_uniform')
gru = rnn.GRU(gru_unit_size,
return_sequences=True,
return_state=True,
recurrent_activation='sigmoid',
recurrent_initializer='glorot_uniform')
x = random_ops.random_uniform([1, time_steps, embedding_size])
y = random_ops.random_uniform([1, gru_unit_size])
x = random_ops.random_uniform([1, time_steps, embedding_size])
y = random_ops.random_uniform([1, gru_unit_size])
with backprop.GradientTape() as tape:
hidden_state = array_ops.zeros([1, gru_unit_size], dtype=dtypes.float32)
_, state = gru(x, initial_state=hidden_state)
with backprop.GradientTape() as tape:
hidden_state = array_ops.zeros([1, gru_unit_size], dtype=dtypes.float32)
_, state = gru(x, initial_state=hidden_state)
loss = math_ops.reduce_mean(math_ops.square(state - y))
loss = math_ops.reduce_mean(math_ops.square(state - y))
tape.gradient(loss, gru.variables)
tape.gradient(loss, gru.variables)
@keras_parameterized.run_all_keras_modes(config=_config)

View File

@ -32,6 +32,7 @@ from tensorflow.python.framework import random_seed
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.keras import backend as keras_backend
from tensorflow.python.keras import combinations
from tensorflow.python.keras import initializers
from tensorflow.python.keras.engine import base_layer_utils
from tensorflow.python.keras.layers import kernelized as kernel_layers
@ -53,6 +54,7 @@ def _exact_laplacian(stddev):
kernelized_utils.exact_laplacian_kernel, stddev=stddev)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
class RandomFourierFeaturesTest(test.TestCase, parameterized.TestCase):
def _assert_all_close(self, expected, actual, atol=0.001):
@ -63,27 +65,23 @@ class RandomFourierFeaturesTest(test.TestCase, parameterized.TestCase):
else:
self.assertAllClose(expected, actual, atol=atol)
@test_util.run_in_graph_and_eager_modes()
def test_invalid_output_dim(self):
with self.assertRaisesRegexp(
ValueError, r'`output_dim` should be a positive integer. Given: -3.'):
_ = kernel_layers.RandomFourierFeatures(output_dim=-3, scale=2.0)
@test_util.run_in_graph_and_eager_modes()
def test_unsupported_kernel_type(self):
with self.assertRaisesRegexp(
ValueError, r'Unsupported kernel type: \'unsupported_kernel\'.'):
_ = kernel_layers.RandomFourierFeatures(
3, 'unsupported_kernel', stddev=2.0)
@test_util.run_in_graph_and_eager_modes()
def test_invalid_scale(self):
with self.assertRaisesRegexp(
ValueError,
r'When provided, `scale` should be a positive float. Given: 0.0.'):
_ = kernel_layers.RandomFourierFeatures(output_dim=10, scale=0.0)
@test_util.run_in_graph_and_eager_modes()
def test_invalid_input_shape(self):
inputs = random_ops.random_uniform((3, 2, 4), seed=1)
rff_layer = kernel_layers.RandomFourierFeatures(output_dim=10, scale=3.0)
@ -95,7 +93,6 @@ class RandomFourierFeaturesTest(test.TestCase, parameterized.TestCase):
@parameterized.named_parameters(
('gaussian', 'gaussian', 10.0, False),
('random', init_ops.random_uniform_initializer, 1.0, True))
@test_util.run_in_graph_and_eager_modes()
def test_random_features_properties(self, initializer, scale, trainable):
rff_layer = kernel_layers.RandomFourierFeatures(
output_dim=10,
@ -110,7 +107,6 @@ class RandomFourierFeaturesTest(test.TestCase, parameterized.TestCase):
@parameterized.named_parameters(('gaussian', 'gaussian', False),
('laplacian', 'laplacian', True),
('other', init_ops.ones_initializer, True))
@test_util.run_in_graph_and_eager_modes()
def test_call(self, initializer, trainable):
rff_layer = kernel_layers.RandomFourierFeatures(
output_dim=10,
@ -132,7 +128,6 @@ class RandomFourierFeaturesTest(test.TestCase, parameterized.TestCase):
kernel_layers.RandomFourierFeatures(output_dim=4, name='rff')(inputs)
kernel_layers.RandomFourierFeatures(output_dim=10, scale=2.0)(inputs)
@test_util.run_in_graph_and_eager_modes()
def test_output_shape(self):
inputs = random_ops.random_uniform((3, 2), seed=1)
rff_layer = kernel_layers.RandomFourierFeatures(
@ -173,7 +168,6 @@ class RandomFourierFeaturesTest(test.TestCase, parameterized.TestCase):
@parameterized.named_parameters(('gaussian', 10, 'gaussian', 2.0),
('laplacian', 5, 'laplacian', None),
('other', 10, init_ops.ones_initializer, 1.0))
@test_util.run_in_graph_and_eager_modes()
def test_compute_output_shape(self, output_dim, initializer, scale):
rff_layer = kernel_layers.RandomFourierFeatures(
output_dim, initializer, scale=scale, name='rff')
@ -202,7 +196,6 @@ class RandomFourierFeaturesTest(test.TestCase, parameterized.TestCase):
('gaussian', 10, 'gaussian', 3.0, False),
('laplacian', 5, 'laplacian', 5.5, True),
('other', 7, init_ops.random_uniform_initializer(), None, True))
@test_util.run_in_graph_and_eager_modes()
def test_get_config(self, output_dim, initializer, scale, trainable):
rff_layer = kernel_layers.RandomFourierFeatures(
output_dim,
@ -233,7 +226,6 @@ class RandomFourierFeaturesTest(test.TestCase, parameterized.TestCase):
('gaussian', 5, 'gaussian', None, True),
('laplacian', 5, 'laplacian', 5.5, False),
('other', 7, init_ops.ones_initializer(), 2.0, True))
@test_util.run_in_graph_and_eager_modes()
def test_from_config(self, output_dim, initializer, scale, trainable):
model_config = {
'output_dim': output_dim,
@ -262,7 +254,6 @@ class RandomFourierFeaturesTest(test.TestCase, parameterized.TestCase):
('gaussian', 10, 'gaussian', 3.0, True),
('laplacian', 5, 'laplacian', 5.5, False),
('other', 10, init_ops.random_uniform_initializer(), None, True))
@test_util.run_in_graph_and_eager_modes()
def test_same_random_features_params_reused(self, output_dim, initializer,
scale, trainable):
"""Applying the layer on the same input twice gives the same output."""
@ -281,7 +272,6 @@ class RandomFourierFeaturesTest(test.TestCase, parameterized.TestCase):
@parameterized.named_parameters(
('gaussian', 'gaussian', 5.0), ('laplacian', 'laplacian', 3.0),
('other', init_ops.random_uniform_initializer(), 5.0))
@test_util.run_in_graph_and_eager_modes()
def test_different_params_similar_approximation(self, initializer, scale):
random_seed.set_random_seed(12345)
rff_layer1 = kernel_layers.RandomFourierFeatures(
@ -314,7 +304,6 @@ class RandomFourierFeaturesTest(test.TestCase, parameterized.TestCase):
@parameterized.named_parameters(
('gaussian', 'gaussian', 5.0, _exact_gaussian(stddev=5.0)),
('laplacian', 'laplacian', 20.0, _exact_laplacian(stddev=20.0)))
@test_util.run_in_graph_and_eager_modes()
def test_bad_kernel_approximation(self, initializer, scale, exact_kernel_fn):
"""Approximation is bad when output dimension is small."""
# Two distinct inputs.
@ -353,7 +342,6 @@ class RandomFourierFeaturesTest(test.TestCase, parameterized.TestCase):
@parameterized.named_parameters(
('gaussian', 'gaussian', 5.0, _exact_gaussian(stddev=5.0)),
('laplacian', 'laplacian', 10.0, _exact_laplacian(stddev=10.0)))
@test_util.run_in_graph_and_eager_modes()
def test_good_kernel_approximation_multiple_inputs(self, initializer, scale,
exact_kernel_fn):
# Parameters.

View File

@ -18,12 +18,12 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from absl.testing import parameterized
import numpy as np
from tensorflow.python import keras
from tensorflow.python.framework import test_util as tf_test_util
from tensorflow.python.keras import combinations
from tensorflow.python.keras import testing_utils
from tensorflow.python.platform import test
from tensorflow.python.training.rmsprop import RMSPropOptimizer
@ -80,7 +80,7 @@ _DATA_FORMAT_PADDING_IMPLEMENTATION = [{
}]
@tf_test_util.run_all_in_graph_and_eager_modes
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
class LocallyConnected1DLayersTest(test.TestCase, parameterized.TestCase):
@parameterized.parameters(_DATA_FORMAT_PADDING_IMPLEMENTATION)
@ -159,7 +159,7 @@ class LocallyConnected1DLayersTest(test.TestCase, parameterized.TestCase):
self.assertEqual(layer.bias.constraint, b_constraint)
@tf_test_util.run_all_in_graph_and_eager_modes
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
class LocallyConnected2DLayersTest(test.TestCase, parameterized.TestCase):
@parameterized.parameters(_DATA_FORMAT_PADDING_IMPLEMENTATION)
@ -266,7 +266,7 @@ class LocallyConnected2DLayersTest(test.TestCase, parameterized.TestCase):
self.assertEqual(layer.bias.constraint, b_constraint)
@tf_test_util.run_all_in_graph_and_eager_modes
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
class LocallyConnectedImplementationModeTest(test.TestCase,
parameterized.TestCase):

View File

@ -24,10 +24,11 @@ import numpy as np
from tensorflow.python import keras
from tensorflow.python.framework import test_util as tf_test_util
from tensorflow.python.keras import backend as K
from tensorflow.python.keras import combinations
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import testing_utils
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.ops.ragged import ragged_factory_ops
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.platform import test
@ -250,7 +251,7 @@ class MergeLayersTest(keras_parameterized.TestCase):
self.assertAllEqual(out_dense, out_ragged)
@tf_test_util.run_all_in_graph_and_eager_modes
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
class MergeLayersTestNoExecution(test.TestCase):
def test_merge_elementwise_errors(self):

View File

@ -29,6 +29,7 @@ from tensorflow.python.eager import wrap_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util as tf_test_util
from tensorflow.python.keras import combinations
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import testing_utils
from tensorflow.python.keras.layers import normalization
@ -69,18 +70,8 @@ class BatchNormalizationTest(keras_parameterized.TestCase):
kwargs={'scale': False,
'center': False},
input_shape=(3, 3))
testing_utils.layer_test(
keras.layers.BatchNormalization,
kwargs={
'momentum': 0.9,
'epsilon': 0.1,
'gamma_regularizer': keras.regularizers.l2(0.01),
'beta_regularizer': keras.regularizers.l2(0.01),
'dtype': 'float64'
},
input_shape=(3, 2, 2, 2), input_dtype='float64')
@tf_test_util.run_in_graph_and_eager_modes
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def test_batchnorm_weights(self):
layer = keras.layers.BatchNormalization(scale=False, center=False)
layer.build((None, 3, 4))
@ -92,7 +83,7 @@ class BatchNormalizationTest(keras_parameterized.TestCase):
self.assertEqual(len(layer.trainable_weights), 2)
self.assertEqual(len(layer.weights), 4)
@tf_test_util.run_in_graph_and_eager_modes
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def test_batchnorm_regularization(self):
layer = keras.layers.BatchNormalization(
gamma_regularizer='l1', beta_regularizer='l1')
@ -163,7 +154,7 @@ class BatchNormalizationTest(keras_parameterized.TestCase):
_run_batchnorm_correctness_test(
normalization_v2.BatchNormalization, dtype='float16')
@tf_test_util.run_in_graph_and_eager_modes
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
@testing_utils.enable_v2_dtype_behavior
def test_batchnorm_policy(self):
norm = keras.layers.BatchNormalization(
@ -201,7 +192,7 @@ class BatchNormalizationTest(keras_parameterized.TestCase):
train_loss = model.train_on_batch(test_data, test_targets)
self.assertAlmostEqual(test_loss, train_loss)
@tf_test_util.run_in_graph_and_eager_modes
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def test_batchnorm_non_trainable_with_tf_function(self):
inputs = keras.Input((3,))
bn = normalization_v2.BatchNormalization()
@ -260,9 +251,9 @@ class BatchNormalizationTest(keras_parameterized.TestCase):
self.assertAllClose(model.bn.moving_variance.numpy(), [0.9], atol=3e-2)
class BatchNormalizationV1Test(test.TestCase):
class BatchNormalizationV1Test(keras_parameterized.TestCase):
@tf_test_util.run_in_graph_and_eager_modes
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def test_v1_fused_attribute(self):
norm = normalization.BatchNormalization()
inp = keras.layers.Input((4, 4, 4))
@ -295,7 +286,7 @@ class BatchNormalizationV2Test(keras_parameterized.TestCase):
kwargs={'fused': None},
input_shape=(3, 3, 3))
@tf_test_util.run_in_graph_and_eager_modes
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def test_v2_fused_attribute(self):
norm = normalization_v2.BatchNormalization()
self.assertEqual(norm.fused, None)
@ -567,7 +558,7 @@ class LayerNormalizationTest(keras_parameterized.TestCase):
kwargs={'axis': (-3, -1)},
input_shape=(2, 8, 8, 3))
@tf_test_util.run_in_graph_and_eager_modes
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def test_layernorm_weights(self):
layer = keras.layers.LayerNormalization(scale=False, center=False)
layer.build((None, 3, 4))
@ -579,7 +570,7 @@ class LayerNormalizationTest(keras_parameterized.TestCase):
self.assertEqual(len(layer.trainable_weights), 2)
self.assertEqual(len(layer.weights), 2)
@tf_test_util.run_in_graph_and_eager_modes
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def test_layernorm_regularization(self):
layer = keras.layers.LayerNormalization(
gamma_regularizer='l1', beta_regularizer='l1')
@ -622,25 +613,25 @@ class LayerNormalizationTest(keras_parameterized.TestCase):
_run_layernorm_correctness_test(
normalization.LayerNormalization, dtype='float16')
@tf_test_util.run_in_graph_and_eager_modes
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def testIncorrectAxisType(self):
with self.assertRaisesRegexp(
TypeError, r'Expected an int or a list/tuple of ints'):
_ = normalization.LayerNormalization(axis={'axis': -1})
@tf_test_util.run_in_graph_and_eager_modes
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def testInvalidAxis(self):
with self.assertRaisesRegexp(ValueError, r'Invalid axis: 3'):
layer_norm = normalization.LayerNormalization(axis=3)
layer_norm.build(input_shape=(2, 2, 2))
@tf_test_util.run_in_graph_and_eager_modes
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def testDuplicateAxis(self):
with self.assertRaisesRegexp(ValueError, r'Duplicate axis:'):
layer_norm = normalization.LayerNormalization(axis=[-1, -1])
layer_norm.build(input_shape=(2, 2, 2))
@tf_test_util.run_in_graph_and_eager_modes
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def testFusedAttr(self):
layer_norm = normalization.LayerNormalization(axis=[-2, -1])
layer_norm.build(input_shape=(2, 2, 2))
@ -706,7 +697,7 @@ class LayerNormalizationNumericsTest(keras_parameterized.TestCase):
# some of the values are very close to zero.
self.assertAllClose(expected, actual, rtol=tol, atol=tol)
@tf_test_util.run_in_graph_and_eager_modes
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def test_forward(self):
# For numeric stability, we ensure the axis's dimension(s) have at least 4
# elements.

View File

@ -18,20 +18,21 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.python import keras
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import test_util as tf_test_util
from tensorflow.python.keras import combinations
from tensorflow.python.keras import testing_utils
from tensorflow.python.ops.ragged import ragged_factory_ops
from tensorflow.python.platform import test
class GlobalPoolingTest(test.TestCase):
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
class GlobalPoolingTest(test.TestCase, parameterized.TestCase):
@tf_test_util.run_in_graph_and_eager_modes
def test_globalpooling_1d(self):
testing_utils.layer_test(keras.layers.pooling.GlobalMaxPooling1D,
input_shape=(3, 4, 5))
@ -44,7 +45,6 @@ class GlobalPoolingTest(test.TestCase):
kwargs={'data_format': 'channels_first'},
input_shape=(3, 4, 5))
@tf_test_util.run_in_graph_and_eager_modes
def test_globalpooling_1d_masking_support(self):
model = keras.Sequential()
model.add(keras.layers.Masking(mask_value=0., input_shape=(None, 4)))
@ -56,7 +56,6 @@ class GlobalPoolingTest(test.TestCase):
output = model.predict(model_input)
self.assertAllClose(output[0], model_input[0, 0, :])
@tf_test_util.run_in_graph_and_eager_modes
def test_globalpooling_1d_with_ragged(self):
ragged_data = ragged_factory_ops.constant([
[[1.0, 1.0], [2.0, 2.0], [3.0, 3.0]],
@ -76,7 +75,6 @@ class GlobalPoolingTest(test.TestCase):
self.assertAllEqual(output_ragged, output_dense)
@tf_test_util.run_in_graph_and_eager_modes
def test_globalpooling_2d_with_ragged(self):
ragged_data = ragged_factory_ops.constant([
[[[1.0], [1.0]], [[2.0], [2.0]], [[3.0], [3.0]]],
@ -95,7 +93,6 @@ class GlobalPoolingTest(test.TestCase):
self.assertAllEqual(output_ragged, output_dense)
@tf_test_util.run_in_graph_and_eager_modes
def test_globalpooling_3d_with_ragged(self):
ragged_data = ragged_factory_ops.constant([
[[[[1.0]], [[1.0]]], [[[2.0]], [[2.0]]], [[[3.0]], [[3.0]]]],
@ -110,7 +107,6 @@ class GlobalPoolingTest(test.TestCase):
expected_output = constant_op.constant([[2.0], [1.5]])
self.assertAllEqual(output_ragged, expected_output)
@tf_test_util.run_in_graph_and_eager_modes
def test_globalpooling_2d(self):
testing_utils.layer_test(
keras.layers.pooling.GlobalMaxPooling2D,
@ -129,7 +125,6 @@ class GlobalPoolingTest(test.TestCase):
kwargs={'data_format': 'channels_last'},
input_shape=(3, 5, 6, 4))
@tf_test_util.run_in_graph_and_eager_modes
def test_globalpooling_3d(self):
testing_utils.layer_test(
keras.layers.pooling.GlobalMaxPooling3D,
@ -149,9 +144,9 @@ class GlobalPoolingTest(test.TestCase):
input_shape=(3, 4, 3, 4, 3))
class Pooling2DTest(test.TestCase):
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
class Pooling2DTest(test.TestCase, parameterized.TestCase):
@tf_test_util.run_in_graph_and_eager_modes
def test_maxpooling_2d(self):
pool_size = (3, 3)
for strides in [(1, 1), (2, 2)]:
@ -164,7 +159,6 @@ class Pooling2DTest(test.TestCase):
},
input_shape=(3, 5, 6, 4))
@tf_test_util.run_in_graph_and_eager_modes
def test_averagepooling_2d(self):
testing_utils.layer_test(
keras.layers.AveragePooling2D,
@ -196,9 +190,9 @@ class Pooling2DTest(test.TestCase):
input_shape=(3, 4, 5, 6))
class Pooling3DTest(test.TestCase):
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
class Pooling3DTest(test.TestCase, parameterized.TestCase):
@tf_test_util.run_in_graph_and_eager_modes
def test_maxpooling_3d(self):
if test.is_built_with_rocm():
self.skipTest('Pooling with 3D tensors is not supported in ROCm')
@ -219,7 +213,6 @@ class Pooling3DTest(test.TestCase):
},
input_shape=(3, 4, 11, 12, 10))
@tf_test_util.run_in_graph_and_eager_modes
def test_averagepooling_3d(self):
if test.is_built_with_rocm():
self.skipTest('Pooling with 3D tensors is not supported in ROCm')
@ -241,9 +234,9 @@ class Pooling3DTest(test.TestCase):
input_shape=(3, 4, 11, 12, 10))
class Pooling1DTest(test.TestCase):
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
class Pooling1DTest(test.TestCase, parameterized.TestCase):
@tf_test_util.run_in_graph_and_eager_modes
def test_maxpooling_1d(self):
for padding in ['valid', 'same']:
for stride in [1, 2]:
@ -257,7 +250,6 @@ class Pooling1DTest(test.TestCase):
kwargs={'data_format': 'channels_first'},
input_shape=(3, 2, 6))
@tf_test_util.run_in_graph_and_eager_modes
def test_averagepooling_1d(self):
for padding in ['valid', 'same']:
for stride in [1, 2]:

View File

@ -23,7 +23,7 @@ import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.keras import combinations
from tensorflow.python.keras import layers
from tensorflow.python.keras.layers import rnn_cell_wrapper_v2
from tensorflow.python.keras.utils import generic_utils
@ -35,9 +35,9 @@ from tensorflow.python.ops import variables as variables_lib
from tensorflow.python.platform import test
@combinations.generate(combinations.combine(mode=["graph", "eager"]))
class RNNCellWrapperTest(test.TestCase, parameterized.TestCase):
@test_util.run_in_graph_and_eager_modes
def testResidualWrapper(self):
wrapper_type = rnn_cell_wrapper_v2.ResidualWrapper
x = ops.convert_to_tensor_v2(np.array([[1., 1., 1.]]), dtype="float32")
@ -60,7 +60,6 @@ class RNNCellWrapperTest(test.TestCase, parameterized.TestCase):
# States are left untouched
self.assertAllClose(res[2], res[3])
@test_util.run_in_graph_and_eager_modes
def testResidualWrapperWithSlice(self):
wrapper_type = rnn_cell_wrapper_v2.ResidualWrapper
x = ops.convert_to_tensor_v2(
@ -102,7 +101,6 @@ class RNNCellWrapperTest(test.TestCase, parameterized.TestCase):
@parameterized.parameters(
[[rnn_cell_impl.DropoutWrapper, rnn_cell_wrapper_v2.DropoutWrapper],
[rnn_cell_impl.ResidualWrapper, rnn_cell_wrapper_v2.ResidualWrapper]])
@test_util.run_in_graph_and_eager_modes
def testWrapperKerasStyle(self, wrapper, wrapper_v2):
"""Tests if wrapper cell is instantiated in keras style scope."""
wrapped_cell_v2 = wrapper_v2(rnn_cell_impl.BasicRNNCell(1))
@ -113,7 +111,6 @@ class RNNCellWrapperTest(test.TestCase, parameterized.TestCase):
@parameterized.parameters(
[rnn_cell_wrapper_v2.DropoutWrapper, rnn_cell_wrapper_v2.ResidualWrapper])
@test_util.run_in_graph_and_eager_modes
def testWrapperWeights(self, wrapper):
"""Tests that wrapper weights contain wrapped cells weights."""
base_cell = layers.SimpleRNNCell(1, name="basic_rnn_cell")
@ -136,7 +133,6 @@ class RNNCellWrapperTest(test.TestCase, parameterized.TestCase):
@parameterized.parameters(
[rnn_cell_wrapper_v2.DropoutWrapper, rnn_cell_wrapper_v2.ResidualWrapper])
@test_util.run_in_graph_and_eager_modes
def testWrapperV2Caller(self, wrapper):
"""Tests that wrapper V2 is using the LayerRNNCell's caller."""
@ -153,7 +149,6 @@ class RNNCellWrapperTest(test.TestCase, parameterized.TestCase):
@parameterized.parameters(
[rnn_cell_wrapper_v2.DropoutWrapper, rnn_cell_wrapper_v2.ResidualWrapper])
@test_util.run_in_graph_and_eager_modes
def testWrapperV2Build(self, wrapper):
cell = rnn_cell_impl.LSTMCell(10)
wrapper = wrapper(cell)

View File

@ -22,7 +22,7 @@ from absl.testing import parameterized
from tensorflow.python import keras
from tensorflow.python import tf2
from tensorflow.python.framework import test_util as tf_test_util
from tensorflow.python.keras import combinations
from tensorflow.python.keras.layers import normalization as batchnorm_v1
from tensorflow.python.keras.layers import normalization_v2 as batchnorm_v2
from tensorflow.python.keras.layers import recurrent as rnn_v1
@ -43,7 +43,7 @@ class SerializableInt(int):
return cls(**config)
@tf_test_util.run_all_in_graph_and_eager_modes
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
class LayerSerializationTest(parameterized.TestCase, test.TestCase):
def test_serialize_deserialize(self):

View File

@ -28,6 +28,7 @@ from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util as tf_test_util
from tensorflow.python.keras import combinations
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import testing_utils
from tensorflow.python.keras.engine import base_layer_utils
@ -90,7 +91,7 @@ class _ResidualLSTMCell(keras.layers.LSTMCell):
class TimeDistributedTest(keras_parameterized.TestCase):
@tf_test_util.run_in_graph_and_eager_modes
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def test_timedistributed_dense(self):
model = keras.models.Sequential()
model.add(
@ -253,7 +254,7 @@ class TimeDistributedTest(keras_parameterized.TestCase):
self.assertAllEqual(mask_outputs_val[i], ref_mask_val[i])
self.assertIs(mask_outputs[-1], None) # final layer
@tf_test_util.run_in_graph_and_eager_modes
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def test_TimeDistributed_with_masking_layer(self):
# test with Masking layer
model = keras.models.Sequential()
@ -298,7 +299,7 @@ class TimeDistributedTest(keras_parameterized.TestCase):
'`TimeDistributed` Layer should be passed an `input_shape `'):
time_dist(ph)
@tf_test_util.run_in_graph_and_eager_modes
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def test_TimeDistributed_reshape(self):
class NoReshapeLayer(keras.layers.Layer):
@ -319,7 +320,7 @@ class TimeDistributedTest(keras_parameterized.TestCase):
td3 = keras.layers.TimeDistributed(NoReshapeLayer())
self.assertFalse(td3._always_use_reshape)
@tf_test_util.run_in_graph_and_eager_modes
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def test_TimeDistributed_output_shape_return_types(self):
class TestLayer(keras.layers.Layer):
@ -448,7 +449,7 @@ class TimeDistributedTest(keras_parameterized.TestCase):
self.assertAllEqual(output_ragged.to_tensor(), output_dense)
@tf_test_util.run_all_in_graph_and_eager_modes
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
class BidirectionalTest(test.TestCase, parameterized.TestCase):
@parameterized.parameters(['sum', 'concat', 'ave', 'mul'])
@ -1175,7 +1176,7 @@ class ExampleWrapper(keras.layers.Wrapper):
return self.layer(inputs, *args, **kwargs)
class WrapperTest(keras_parameterized.TestCase):
class WrapperTest(parameterized.TestCase):
def test_wrapper_from_config_no_mutation(self):
wrapper = ExampleWrapper(keras.layers.Dense(1))