Update tests under keras.mixed_precision to use combinations.

Change all test_util.run_all_in_graph_and_eager_modes to combination.

PiperOrigin-RevId: 301311255
Change-Id: Icd4f80a3e84ab06ef7d3423298918d172d1338e6
This commit is contained in:
Scott Zhu 2020-03-16 23:10:19 -07:00 committed by TensorFlower Gardener
parent e473416af2
commit c608d46195
6 changed files with 63 additions and 77 deletions

View File

@ -63,6 +63,7 @@ py_test(
"//tensorflow/python:client_testlib", "//tensorflow/python:client_testlib",
"//tensorflow/python:platform_test", "//tensorflow/python:platform_test",
"//tensorflow/python/keras", "//tensorflow/python/keras",
"//tensorflow/python/keras:combinations",
"//tensorflow/python/keras/mixed_precision/experimental:loss_scale_optimizer", "//tensorflow/python/keras/mixed_precision/experimental:loss_scale_optimizer",
"//tensorflow/python/keras/optimizer_v2", "//tensorflow/python/keras/optimizer_v2",
], ],
@ -86,6 +87,7 @@ cuda_py_test(
deps = [ deps = [
":device_compatibility_check", ":device_compatibility_check",
"//tensorflow/python:client_testlib", "//tensorflow/python:client_testlib",
"//tensorflow/python/keras:combinations",
], ],
) )
@ -137,6 +139,7 @@ py_test(
"//tensorflow/python:platform_test", "//tensorflow/python:platform_test",
"//tensorflow/python/distribute:mirrored_strategy", "//tensorflow/python/distribute:mirrored_strategy",
"//tensorflow/python/eager:context", "//tensorflow/python/eager:context",
"//tensorflow/python/keras:combinations",
"//tensorflow/python/keras/optimizer_v2", "//tensorflow/python/keras/optimizer_v2",
"@absl_py//absl/testing:parameterized", "@absl_py//absl/testing:parameterized",
], ],
@ -177,6 +180,7 @@ cuda_py_test(
"//tensorflow/python/distribute:mirrored_strategy", "//tensorflow/python/distribute:mirrored_strategy",
"//tensorflow/python/distribute:one_device_strategy", "//tensorflow/python/distribute:one_device_strategy",
"//tensorflow/python/keras", "//tensorflow/python/keras",
"//tensorflow/python/keras:combinations",
], ],
) )
@ -219,6 +223,7 @@ cuda_py_test(
"//tensorflow/python/distribute:central_storage_strategy", "//tensorflow/python/distribute:central_storage_strategy",
"//tensorflow/python/distribute:mirrored_strategy", "//tensorflow/python/distribute:mirrored_strategy",
"//tensorflow/python/keras", "//tensorflow/python/keras",
"//tensorflow/python/keras:combinations",
"@absl_py//absl/testing:parameterized", "@absl_py//absl/testing:parameterized",
], ],
) )

View File

@ -30,7 +30,7 @@ from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes from tensorflow.python.framework import dtypes
from tensorflow.python.framework import indexed_slices from tensorflow.python.framework import indexed_slices
from tensorflow.python.framework import ops from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util from tensorflow.python.keras import combinations
from tensorflow.python.keras.mixed_precision.experimental import autocast_variable from tensorflow.python.keras.mixed_precision.experimental import autocast_variable
from tensorflow.python.keras.optimizer_v2 import gradient_descent as gradient_descent_v2 from tensorflow.python.keras.optimizer_v2 import gradient_descent as gradient_descent_v2
from tensorflow.python.ops import array_ops from tensorflow.python.ops import array_ops
@ -69,7 +69,7 @@ def get_var(val, dtype, name=None):
return variables.VariableV1(val, use_resource=True, dtype=dtype, name=name) return variables.VariableV1(val, use_resource=True, dtype=dtype, name=name)
@test_util.run_all_in_graph_and_eager_modes @combinations.generate(combinations.combine(mode=['graph', 'eager']))
class AutoCastVariableTest(test.TestCase, parameterized.TestCase): class AutoCastVariableTest(test.TestCase, parameterized.TestCase):
@parameterized.named_parameters(*TESTCASES) @parameterized.named_parameters(*TESTCASES)
@ -156,7 +156,7 @@ class AutoCastVariableTest(test.TestCase, parameterized.TestCase):
def test_method_delegations(self, distribute): def test_method_delegations(self, distribute):
# Test AutoCastVariable correctly delegates Variable methods to the # Test AutoCastVariable correctly delegates Variable methods to the
# underlying variable. # underlying variable.
with get_distribute_scope(distribute): with self.test_session(), get_distribute_scope(distribute):
for read_dtype in (dtypes.float32, dtypes.float16): for read_dtype in (dtypes.float32, dtypes.float16):
if distribute: if distribute:
# MirroredVariable.assign will (incorrectly) return a Mirrored value # MirroredVariable.assign will (incorrectly) return a Mirrored value
@ -383,18 +383,19 @@ class AutoCastVariableTest(test.TestCase, parameterized.TestCase):
@parameterized.named_parameters(*TESTCASES) @parameterized.named_parameters(*TESTCASES)
def test_checkpoint(self, distribute): def test_checkpoint(self, distribute):
with get_distribute_scope(distribute): with self.test_session():
x = get_var(1., dtypes.float32) with get_distribute_scope(distribute):
x = autocast_variable.create_autocast_variable(x) x = get_var(1., dtypes.float32)
self.evaluate(x.initializer) x = autocast_variable.create_autocast_variable(x)
self.evaluate(x.assign(123.)) self.evaluate(x.initializer)
self.evaluate(x.assign(123.))
checkpoint = trackable_utils.Checkpoint(x=x) checkpoint = trackable_utils.Checkpoint(x=x)
prefix = os.path.join(self.get_temp_dir(), 'ckpt') prefix = os.path.join(self.get_temp_dir(), 'ckpt')
save_path = checkpoint.save(prefix) save_path = checkpoint.save(prefix)
self.evaluate(x.assign(234.)) self.evaluate(x.assign(234.))
checkpoint.restore(save_path).assert_consumed().run_restore_ops() checkpoint.restore(save_path).assert_consumed().run_restore_ops()
self.assertEqual(self.evaluate(x), 123.) self.assertEqual(self.evaluate(x), 123.)
@parameterized.named_parameters(*TESTCASES) @parameterized.named_parameters(*TESTCASES)
def test_invalid_wrapped_variable(self, distribute): def test_invalid_wrapped_variable(self, distribute):

View File

@ -21,7 +21,7 @@ from __future__ import print_function
import re import re
from tensorflow.core.framework import device_attributes_pb2 from tensorflow.core.framework import device_attributes_pb2
from tensorflow.python.framework import test_util from tensorflow.python.keras import combinations
from tensorflow.python.keras.mixed_precision.experimental import device_compatibility_check from tensorflow.python.keras.mixed_precision.experimental import device_compatibility_check
from tensorflow.python.platform import test from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging from tensorflow.python.platform import tf_logging
@ -42,7 +42,7 @@ def _get_device_attrs(device_type, device_name=None, cc_major=None,
device_type='GPU', physical_device_desc=physical_device_desc) device_type='GPU', physical_device_desc=physical_device_desc)
@test_util.run_all_in_graph_and_eager_modes @combinations.generate(combinations.combine(mode=['graph', 'eager']))
class DeviceCompatibilityCheckTest(test.TestCase): class DeviceCompatibilityCheckTest(test.TestCase):
def _test_compat_check(self, device_attr_list, should_warn, expected_regex, def _test_compat_check(self, device_attr_list, should_warn, expected_regex,

View File

@ -31,8 +31,8 @@ from tensorflow.python.eager import context
from tensorflow.python.eager import def_function from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.keras import backend from tensorflow.python.keras import backend
from tensorflow.python.keras import combinations
from tensorflow.python.keras import keras_parameterized from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import layers from tensorflow.python.keras import layers
from tensorflow.python.keras import models from tensorflow.python.keras import models
@ -119,11 +119,11 @@ TESTCASES = ({
}) })
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
class KerasLayerTest(keras_parameterized.TestCase): class KerasLayerTest(keras_parameterized.TestCase):
"""Test mixed precision with Keras layers.""" """Test mixed precision with Keras layers."""
@parameterized.named_parameters(*TESTCASES) @parameterized.named_parameters(*TESTCASES)
@test_util.run_in_graph_and_eager_modes
def test_mixed_policies_(self, strategy_fn): def test_mixed_policies_(self, strategy_fn):
for dtype in 'float16', 'bfloat16': for dtype in 'float16', 'bfloat16':
x = constant_op.constant([1.]) x = constant_op.constant([1.])
@ -142,7 +142,6 @@ class KerasLayerTest(keras_parameterized.TestCase):
self.evaluate(variables.global_variables_initializer()) self.evaluate(variables.global_variables_initializer())
self.assertEqual(self.evaluate(y), 1.) self.assertEqual(self.evaluate(y), 1.)
@test_util.run_in_graph_and_eager_modes
def test_layer_with_int_variable(self): def test_layer_with_int_variable(self):
class LayerWithIntVar(base_layer.Layer): class LayerWithIntVar(base_layer.Layer):
@ -159,7 +158,6 @@ class KerasLayerTest(keras_parameterized.TestCase):
self.assertEqual(layer(x).dtype, 'int32') self.assertEqual(layer(x).dtype, 'int32')
@parameterized.named_parameters(*TESTCASES) @parameterized.named_parameters(*TESTCASES)
@test_util.run_in_graph_and_eager_modes
def test_layer_with_non_autocast_variable(self, strategy_fn): def test_layer_with_non_autocast_variable(self, strategy_fn):
x = constant_op.constant([1.]) x = constant_op.constant([1.])
with strategy_fn().scope(): with strategy_fn().scope():
@ -172,7 +170,6 @@ class KerasLayerTest(keras_parameterized.TestCase):
self.assertEqual(self.evaluate(y), 1.) self.assertEqual(self.evaluate(y), 1.)
@parameterized.named_parameters(*TESTCASES) @parameterized.named_parameters(*TESTCASES)
@test_util.run_in_graph_and_eager_modes
def test_layer_calling_tf_function(self, strategy_fn): def test_layer_calling_tf_function(self, strategy_fn):
x = constant_op.constant([1.]) x = constant_op.constant([1.])
with strategy_fn().scope(): with strategy_fn().scope():
@ -185,7 +182,6 @@ class KerasLayerTest(keras_parameterized.TestCase):
self.assertEqual(self.evaluate(y), 1.) self.assertEqual(self.evaluate(y), 1.)
@parameterized.named_parameters(*TESTCASES) @parameterized.named_parameters(*TESTCASES)
@test_util.run_in_graph_and_eager_modes
def test_layer_regularizer_runs_in_var_dtype(self, strategy_fn): def test_layer_regularizer_runs_in_var_dtype(self, strategy_fn):
x = constant_op.constant([1.]) x = constant_op.constant([1.])
with strategy_fn().scope(): with strategy_fn().scope():
@ -211,7 +207,6 @@ class KerasLayerTest(keras_parameterized.TestCase):
self.assertEqual(self.evaluate(regularizer_loss), 1.) self.assertEqual(self.evaluate(regularizer_loss), 1.)
@parameterized.named_parameters(*TESTCASES) @parameterized.named_parameters(*TESTCASES)
@test_util.run_in_graph_and_eager_modes
def test_passing_policy_to_layer(self, strategy_fn): def test_passing_policy_to_layer(self, strategy_fn):
x = constant_op.constant([1.], dtype=dtypes.float16) x = constant_op.constant([1.], dtype=dtypes.float16)
with strategy_fn().scope(): with strategy_fn().scope():
@ -230,7 +225,6 @@ class KerasLayerTest(keras_parameterized.TestCase):
self.assertEqual(layer(x).dtype, dtypes.float64) self.assertEqual(layer(x).dtype, dtypes.float64)
self.assertEqual(layer.v.dtype, dtypes.float64) self.assertEqual(layer.v.dtype, dtypes.float64)
@test_util.run_in_graph_and_eager_modes
def test_error_passing_policy_string_to_layer(self): def test_error_passing_policy_string_to_layer(self):
with self.assertRaisesRegexp( with self.assertRaisesRegexp(
TypeError, "Cannot convert value 'mixed_float16' to a " TypeError, "Cannot convert value 'mixed_float16' to a "
@ -240,7 +234,6 @@ class KerasLayerTest(keras_parameterized.TestCase):
mp_test_util.MultiplyLayer(dtype='mixed_float16') mp_test_util.MultiplyLayer(dtype='mixed_float16')
@parameterized.named_parameters(*TESTCASES) @parameterized.named_parameters(*TESTCASES)
@test_util.run_in_graph_and_eager_modes
def test_gradient(self, strategy_fn): def test_gradient(self, strategy_fn):
x = constant_op.constant([1.]) x = constant_op.constant([1.])
with strategy_fn().scope() as strategy: with strategy_fn().scope() as strategy:
@ -308,17 +301,19 @@ class KerasLayerTest(keras_parameterized.TestCase):
self.assertEqual(self.evaluate(layer(x)), 100.) self.assertEqual(self.evaluate(layer(x)), 100.)
@parameterized.named_parameters(*TESTCASES) @parameterized.named_parameters(*TESTCASES)
@test_util.run_in_graph_and_eager_modes
def test_checkpointing_layer_weights(self, strategy_fn): def test_checkpointing_layer_weights(self, strategy_fn):
self._test_checkpointing_layer_weights( with self.test_session():
strategy_fn, mixed_prec_when_saving=True, mixed_prec_when_loading=True) self._test_checkpointing_layer_weights(
self._test_checkpointing_layer_weights( strategy_fn, mixed_prec_when_saving=True,
strategy_fn, mixed_prec_when_saving=True, mixed_prec_when_loading=False) mixed_prec_when_loading=True)
self._test_checkpointing_layer_weights( self._test_checkpointing_layer_weights(
strategy_fn, mixed_prec_when_saving=False, mixed_prec_when_loading=True) strategy_fn, mixed_prec_when_saving=True,
mixed_prec_when_loading=False)
self._test_checkpointing_layer_weights(
strategy_fn, mixed_prec_when_saving=False,
mixed_prec_when_loading=True)
@parameterized.named_parameters(*TESTCASES) @parameterized.named_parameters(*TESTCASES)
@test_util.run_in_graph_and_eager_modes
def test_config(self, strategy_fn): def test_config(self, strategy_fn):
x = constant_op.constant([1.], dtype=dtypes.float16) x = constant_op.constant([1.], dtype=dtypes.float16)
with strategy_fn().scope(): with strategy_fn().scope():
@ -397,7 +392,6 @@ class KerasLayerTest(keras_parameterized.TestCase):
self.assertEqual(layer(x).dtype, 'float16') self.assertEqual(layer(x).dtype, 'float16')
self.assertEqual(layer.v.dtype, 'float16') self.assertEqual(layer.v.dtype, 'float16')
@test_util.run_in_graph_and_eager_modes
def test_delete_variable(self): def test_delete_variable(self):
layer = base_layer.Layer(dtype=policy.Policy('mixed_float16')) layer = base_layer.Layer(dtype=policy.Policy('mixed_float16'))
layer.x = layer.add_weight('x') layer.x = layer.add_weight('x')
@ -405,7 +399,6 @@ class KerasLayerTest(keras_parameterized.TestCase):
del layer.x del layer.x
self.assertEqual(layer.trainable_weights, []) self.assertEqual(layer.trainable_weights, [])
@test_util.run_in_graph_and_eager_modes
def test_build_and_call_layer_in_function(self): def test_build_and_call_layer_in_function(self):
layer = mp_test_util.MultiplyLayer(dtype=policy.Policy('mixed_float16')) layer = mp_test_util.MultiplyLayer(dtype=policy.Policy('mixed_float16'))
@def_function.function @def_function.function
@ -833,7 +826,7 @@ class KerasModelTest(keras_parameterized.TestCase):
model.fit(dataset) model.fit(dataset)
self.assertEqual(backend.eval(layer.v), -3) self.assertEqual(backend.eval(layer.v), -3)
@test_util.run_in_graph_and_eager_modes @combinations.generate(combinations.combine(mode=['graph', 'eager']))
def test_loss_scale_optimizer_overrides_policy_loss_scale(self): def test_loss_scale_optimizer_overrides_policy_loss_scale(self):
with policy.policy_scope(policy.Policy('float32', loss_scale=10.)): with policy.policy_scope(policy.Policy('float32', loss_scale=10.)):
opt = gradient_descent.SGD(1.) opt = gradient_descent.SGD(1.)
@ -844,7 +837,7 @@ class KerasModelTest(keras_parameterized.TestCase):
model.compile(opt, loss='mse') model.compile(opt, loss='mse')
self.assertEqual(self.evaluate(model.optimizer.loss_scale()), 5.) self.assertEqual(self.evaluate(model.optimizer.loss_scale()), 5.)
@test_util.run_in_graph_and_eager_modes @combinations.generate(combinations.combine(mode=['graph', 'eager']))
def test_pass_invalid_optimizer_with_loss_scaling(self): def test_pass_invalid_optimizer_with_loss_scaling(self):
with policy.policy_scope(policy.Policy('float32', loss_scale=10.)): with policy.policy_scope(policy.Policy('float32', loss_scale=10.)):
x = layers.Input(shape=(1,)) x = layers.Input(shape=(1,))
@ -857,7 +850,7 @@ class KerasModelTest(keras_parameterized.TestCase):
with self.assertRaisesRegexp(ValueError, error_msg): with self.assertRaisesRegexp(ValueError, error_msg):
model.compile(optimizers.SGD(1.), 'mse') model.compile(optimizers.SGD(1.), 'mse')
@test_util.run_in_graph_and_eager_modes @combinations.generate(combinations.combine(mode=['graph', 'eager']))
def test_functional_model_loss_dtype(self): def test_functional_model_loss_dtype(self):
with policy.policy_scope('float16'): with policy.policy_scope('float16'):
x = layers.Input(shape=(1,)) x = layers.Input(shape=(1,))
@ -867,6 +860,7 @@ class KerasModelTest(keras_parameterized.TestCase):
# The loss should not be casted to the policy's dtype. # The loss should not be casted to the policy's dtype.
self.assertEqual(model.losses[0].dtype, 'float32') self.assertEqual(model.losses[0].dtype, 'float32')
@keras_parameterized.run_all_keras_modes
@parameterized.named_parameters( @parameterized.named_parameters(
{ {
'testcase_name': 'base', 'testcase_name': 'base',
@ -883,7 +877,6 @@ class KerasModelTest(keras_parameterized.TestCase):
'strategy_fn': create_mirrored_strategy, 'strategy_fn': create_mirrored_strategy,
'h5': True, 'h5': True,
}) })
@test_util.run_in_graph_and_eager_modes
def test_save_weights_with_autocast_vars(self, strategy_fn, h5=False): def test_save_weights_with_autocast_vars(self, strategy_fn, h5=False):
with strategy_fn().scope(): with strategy_fn().scope():
with policy.policy_scope('mixed_float16'): with policy.policy_scope('mixed_float16'):

View File

@ -28,6 +28,7 @@ from tensorflow.python.distribute import mirrored_strategy
from tensorflow.python.eager import context from tensorflow.python.eager import context
from tensorflow.python.framework import ops from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util from tensorflow.python.framework import test_util
from tensorflow.python.keras import combinations
from tensorflow.python.keras import optimizers from tensorflow.python.keras import optimizers
from tensorflow.python.keras.mixed_precision.experimental import loss_scale_optimizer from tensorflow.python.keras.mixed_precision.experimental import loss_scale_optimizer
from tensorflow.python.keras.mixed_precision.experimental import test_util as mp_test_util from tensorflow.python.keras.mixed_precision.experimental import test_util as mp_test_util
@ -68,6 +69,7 @@ TESTCASES = ({
@test_util.with_control_flow_v2 @test_util.with_control_flow_v2
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
class LossScaleOptimizerTest(test.TestCase, parameterized.TestCase): class LossScaleOptimizerTest(test.TestCase, parameterized.TestCase):
def _run_if_in_graph_mode(self, val): def _run_if_in_graph_mode(self, val):
@ -85,7 +87,6 @@ class LossScaleOptimizerTest(test.TestCase, parameterized.TestCase):
return lambda: opt.minimize(loss, var_list=[var]) return lambda: opt.minimize(loss, var_list=[var])
@parameterized.named_parameters(*TESTCASES) @parameterized.named_parameters(*TESTCASES)
@test_util.run_in_graph_and_eager_modes
def testFixedLossScaleAppliedToLossWithMinimize(self, strategy_fn): def testFixedLossScaleAppliedToLossWithMinimize(self, strategy_fn):
with strategy_fn().scope() as strategy: with strategy_fn().scope() as strategy:
var = variables.Variable([5.0]) var = variables.Variable([5.0])
@ -119,7 +120,6 @@ class LossScaleOptimizerTest(test.TestCase, parameterized.TestCase):
# mp_test_util.create_identity_with_grad_check_fn added an assertion op. # mp_test_util.create_identity_with_grad_check_fn added an assertion op.
self.evaluate(run_op) self.evaluate(run_op)
@test_util.run_in_graph_and_eager_modes
def testGetScaledLoss(self): def testGetScaledLoss(self):
opt = gradient_descent.SGD(2.0) opt = gradient_descent.SGD(2.0)
opt = loss_scale_optimizer.LossScaleOptimizer(opt, loss_scale=2.) opt = loss_scale_optimizer.LossScaleOptimizer(opt, loss_scale=2.)
@ -130,7 +130,6 @@ class LossScaleOptimizerTest(test.TestCase, parameterized.TestCase):
self.assertEqual(10., self.evaluate(opt.get_scaled_loss(loss))) self.assertEqual(10., self.evaluate(opt.get_scaled_loss(loss)))
self.assertEqual(10., self.evaluate(opt.get_scaled_loss(lambda: loss)())) self.assertEqual(10., self.evaluate(opt.get_scaled_loss(lambda: loss)()))
@test_util.run_in_graph_and_eager_modes
def testGetUnscaledGradients(self): def testGetUnscaledGradients(self):
opt = gradient_descent.SGD(2.0) opt = gradient_descent.SGD(2.0)
opt = loss_scale_optimizer.LossScaleOptimizer(opt, loss_scale=2) opt = loss_scale_optimizer.LossScaleOptimizer(opt, loss_scale=2)
@ -142,7 +141,6 @@ class LossScaleOptimizerTest(test.TestCase, parameterized.TestCase):
grads = [self.evaluate(g) if g is not None else g for g in grads] grads = [self.evaluate(g) if g is not None else g for g in grads]
self.assertEqual([1.5, None, -2.], grads) self.assertEqual([1.5, None, -2.], grads)
@test_util.run_in_graph_and_eager_modes
def testGetUnscaledSparseGradients(self): def testGetUnscaledSparseGradients(self):
opt = gradient_descent.SGD(2.0) opt = gradient_descent.SGD(2.0)
opt = loss_scale_optimizer.LossScaleOptimizer(opt, loss_scale=2) opt = loss_scale_optimizer.LossScaleOptimizer(opt, loss_scale=2)
@ -156,7 +154,6 @@ class LossScaleOptimizerTest(test.TestCase, parameterized.TestCase):
self.evaluate(sparse_grad.values)) self.evaluate(sparse_grad.values))
@parameterized.named_parameters(*TESTCASES) @parameterized.named_parameters(*TESTCASES)
@test_util.run_in_graph_and_eager_modes
def testDynamicLossScale(self, strategy_fn): def testDynamicLossScale(self, strategy_fn):
strategy = strategy_fn() strategy = strategy_fn()
learning_rate = 2. learning_rate = 2.
@ -190,7 +187,6 @@ class LossScaleOptimizerTest(test.TestCase, parameterized.TestCase):
self.assertAllClose([1.], self.evaluate(var)) self.assertAllClose([1.], self.evaluate(var))
@parameterized.named_parameters(*TESTCASES) @parameterized.named_parameters(*TESTCASES)
@test_util.run_in_graph_and_eager_modes
def testDynamicUpdate(self, strategy_fn): def testDynamicUpdate(self, strategy_fn):
with strategy_fn().scope() as strategy: with strategy_fn().scope() as strategy:
var = variables.Variable([1.0, 2.0]) var = variables.Variable([1.0, 2.0])
@ -221,7 +217,6 @@ class LossScaleOptimizerTest(test.TestCase, parameterized.TestCase):
self.assertEqual(2., self.evaluate(opt.loss_scale())) self.assertEqual(2., self.evaluate(opt.loss_scale()))
@parameterized.named_parameters(*TESTCASES) @parameterized.named_parameters(*TESTCASES)
@test_util.run_in_graph_and_eager_modes
def testDynamicLossScaleWithFloat16Loss(self, strategy_fn): def testDynamicLossScaleWithFloat16Loss(self, strategy_fn):
strategy = strategy_fn() strategy = strategy_fn()
learning_rate = 2. learning_rate = 2.
@ -243,7 +238,6 @@ class LossScaleOptimizerTest(test.TestCase, parameterized.TestCase):
self.assertAllClose([3.], self.evaluate(var)) self.assertAllClose([3.], self.evaluate(var))
@parameterized.named_parameters(*TESTCASES) @parameterized.named_parameters(*TESTCASES)
@test_util.run_in_graph_and_eager_modes
def testDynamicLossScaleWithSlots(self, strategy_fn): def testDynamicLossScaleWithSlots(self, strategy_fn):
strategy_obj = strategy_fn() strategy_obj = strategy_fn()
if (isinstance(strategy_obj, mirrored_strategy.MirroredStrategy) and if (isinstance(strategy_obj, mirrored_strategy.MirroredStrategy) and
@ -283,7 +277,6 @@ class LossScaleOptimizerTest(test.TestCase, parameterized.TestCase):
self.assertEqual(opt.get_slot_names(), ['momentum']) self.assertEqual(opt.get_slot_names(), ['momentum'])
@test_util.run_in_graph_and_eager_modes
def testIterations(self): def testIterations(self):
opt = gradient_descent.SGD(2.0) opt = gradient_descent.SGD(2.0)
lso = loss_scale_optimizer.LossScaleOptimizer(opt, loss_scale=10.) lso = loss_scale_optimizer.LossScaleOptimizer(opt, loss_scale=10.)
@ -291,27 +284,26 @@ class LossScaleOptimizerTest(test.TestCase, parameterized.TestCase):
self.assertEqual(lso.iterations, 7) self.assertEqual(lso.iterations, 7)
self.assertEqual(opt.iterations, 7) self.assertEqual(opt.iterations, 7)
@test_util.run_in_graph_and_eager_modes
def testWeightMethods(self): def testWeightMethods(self):
var = variables.Variable([1.0]) with self.test_session():
opt = gradient_descent.SGD(1.0) var = variables.Variable([1.0])
initial_loss_scale = 2. opt = gradient_descent.SGD(1.0)
loss_scale = loss_scale_module.DynamicLossScale( initial_loss_scale = 2.
initial_loss_scale=initial_loss_scale, increment_period=1, loss_scale = loss_scale_module.DynamicLossScale(
multiplier=4) initial_loss_scale=initial_loss_scale, increment_period=1,
opt = loss_scale_optimizer.LossScaleOptimizer(opt, loss_scale) multiplier=4)
run_op = opt.minimize(lambda: var * 2, [var]) opt = loss_scale_optimizer.LossScaleOptimizer(opt, loss_scale)
self.evaluate(variables.global_variables_initializer()) run_op = opt.minimize(lambda: var * 2, [var])
self._run_if_in_graph_mode(run_op) self.evaluate(variables.global_variables_initializer())
self._run_if_in_graph_mode(run_op)
self.assertLen(opt.weights, 1) # The 'iterations' weight self.assertLen(opt.weights, 1) # The 'iterations' weight
self.assertEqual(self.evaluate(opt.weights[0]), 1) self.assertEqual(self.evaluate(opt.weights[0]), 1)
self.assertEqual(opt.get_weights()[0], 1) self.assertEqual(opt.get_weights()[0], 1)
self.assertEqual(self.evaluate(opt.variables()[0]), 1) self.assertEqual(self.evaluate(opt.variables()[0]), 1)
opt.set_weights([np.array(2.)]) opt.set_weights([np.array(2.)])
self.assertEqual(self.evaluate(opt.variables()[0]), 2) self.assertEqual(self.evaluate(opt.variables()[0]), 2)
@test_util.run_in_graph_and_eager_modes
def testSlotMethodErrors(self): def testSlotMethodErrors(self):
opt = gradient_descent.SGD(1.0, momentum=1.0) opt = gradient_descent.SGD(1.0, momentum=1.0)
opt = loss_scale_optimizer.LossScaleOptimizer(opt, 'dynamic') opt = loss_scale_optimizer.LossScaleOptimizer(opt, 'dynamic')
@ -332,9 +324,8 @@ class LossScaleOptimizerTest(test.TestCase, parameterized.TestCase):
loss_scale_optimizer.LossScaleOptimizer(opt, None) loss_scale_optimizer.LossScaleOptimizer(opt, None)
@parameterized.named_parameters(*TESTCASES) @parameterized.named_parameters(*TESTCASES)
@test_util.run_in_graph_and_eager_modes
def testGettingAndSettingLearningRate(self, strategy_fn): def testGettingAndSettingLearningRate(self, strategy_fn):
with strategy_fn().scope() as strategy: with self.test_session(), strategy_fn().scope() as strategy:
var = variables.Variable([5.0]) var = variables.Variable([5.0])
opt = adam.Adam(learning_rate=1.0) opt = adam.Adam(learning_rate=1.0)
loss = lambda: var * 2.0 loss = lambda: var * 2.0
@ -357,7 +348,6 @@ class LossScaleOptimizerTest(test.TestCase, parameterized.TestCase):
with self.assertRaises(AttributeError): with self.assertRaises(AttributeError):
opt.not_an_attr += 3 opt.not_an_attr += 3
@test_util.run_in_graph_and_eager_modes
def testArbitraryAttributesNotExposed(self): def testArbitraryAttributesNotExposed(self):
opt = adam.Adam(learning_rate=1.0) opt = adam.Adam(learning_rate=1.0)
# Test that Adam has attributes 'epsilon' and 'beta1' # Test that Adam has attributes 'epsilon' and 'beta1'
@ -375,7 +365,6 @@ class LossScaleOptimizerTest(test.TestCase, parameterized.TestCase):
"'LossScaleOptimizer' object has no attribute 'beta_1'"): "'LossScaleOptimizer' object has no attribute 'beta_1'"):
opt.beta_1 # pylint: disable=pointless-statement opt.beta_1 # pylint: disable=pointless-statement
@test_util.run_in_graph_and_eager_modes
def testApplyGradientsGetsUnwrappedTensors(self): def testApplyGradientsGetsUnwrappedTensors(self):
# Tests that gradients passed to apply_gradients are not wrapped in a # Tests that gradients passed to apply_gradients are not wrapped in a
# DistributionStrategy wrapper, such as PerReplica, but instead are raw # DistributionStrategy wrapper, such as PerReplica, but instead are raw
@ -403,7 +392,6 @@ class LossScaleOptimizerTest(test.TestCase, parameterized.TestCase):
strategy.experimental_run(run_fn) strategy.experimental_run(run_fn)
@parameterized.named_parameters(*TESTCASES) @parameterized.named_parameters(*TESTCASES)
@test_util.run_in_graph_and_eager_modes
def testCheckpoint(self, strategy_fn): def testCheckpoint(self, strategy_fn):
strategy = strategy_fn() strategy = strategy_fn()
if (isinstance(strategy, mirrored_strategy.MirroredStrategy) and if (isinstance(strategy, mirrored_strategy.MirroredStrategy) and
@ -447,7 +435,6 @@ class LossScaleOptimizerTest(test.TestCase, parameterized.TestCase):
self.assertEqual(self.evaluate(loss_scale._num_good_steps), 1) self.assertEqual(self.evaluate(loss_scale._num_good_steps), 1)
self.assertAlmostEqual(self.evaluate(slot_var).item(), slot_value) self.assertAlmostEqual(self.evaluate(slot_var).item(), slot_value)
@test_util.run_in_graph_and_eager_modes
def testGetConfig(self): def testGetConfig(self):
opt = gradient_descent.SGD(2., momentum=0.5) opt = gradient_descent.SGD(2., momentum=0.5)
loss_scale = loss_scale_module.DynamicLossScale( loss_scale = loss_scale_module.DynamicLossScale(
@ -466,7 +453,6 @@ class LossScaleOptimizerTest(test.TestCase, parameterized.TestCase):
self.assertEqual(opt.loss_scale.increment_period, 3.) self.assertEqual(opt.loss_scale.increment_period, 3.)
self.assertEqual(opt.loss_scale.multiplier, 4.) self.assertEqual(opt.loss_scale.multiplier, 4.)
@test_util.run_in_graph_and_eager_modes
def testSerializationWithBuiltInOptimizer(self): def testSerializationWithBuiltInOptimizer(self):
opt = gradient_descent.SGD(2., momentum=0.5) opt = gradient_descent.SGD(2., momentum=0.5)
loss_scale = loss_scale_module.DynamicLossScale( loss_scale = loss_scale_module.DynamicLossScale(
@ -485,7 +471,6 @@ class LossScaleOptimizerTest(test.TestCase, parameterized.TestCase):
self.assertEqual(opt.loss_scale.increment_period, 3.) self.assertEqual(opt.loss_scale.increment_period, 3.)
self.assertEqual(opt.loss_scale.multiplier, 4.) self.assertEqual(opt.loss_scale.multiplier, 4.)
@test_util.run_in_graph_and_eager_modes
def testSerializationWithCustomOptimizer(self): def testSerializationWithCustomOptimizer(self):
class MySGD(gradient_descent.SGD): class MySGD(gradient_descent.SGD):

View File

@ -18,10 +18,12 @@ from __future__ import absolute_import
from __future__ import division from __future__ import division
from __future__ import print_function from __future__ import print_function
from absl.testing import parameterized
from tensorflow.python.eager import context from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util from tensorflow.python.keras import combinations
from tensorflow.python.keras import testing_utils from tensorflow.python.keras import testing_utils
from tensorflow.python.keras.engine import base_layer_utils from tensorflow.python.keras.engine import base_layer_utils
from tensorflow.python.keras.mixed_precision.experimental import device_compatibility_check from tensorflow.python.keras.mixed_precision.experimental import device_compatibility_check
@ -33,8 +35,8 @@ from tensorflow.python.training.experimental import loss_scale as loss_scale_mod
from tensorflow.python.training.experimental import mixed_precision from tensorflow.python.training.experimental import mixed_precision
@test_util.run_all_in_graph_and_eager_modes @combinations.generate(combinations.combine(mode=['graph', 'eager']))
class PolicyTest(test.TestCase): class PolicyTest(test.TestCase, parameterized.TestCase):
"""Tests Policies.""" """Tests Policies."""
@testing_utils.enable_v2_dtype_behavior @testing_utils.enable_v2_dtype_behavior