Update tests under keras.optimizer_v2 to use combinations.
Change all test_util.run_all_in_graph_and_eager_modes to combination. PiperOrigin-RevId: 301308528 Change-Id: I64d84ebf400c4903514eddb68b23a7b80ec8663d
This commit is contained in:
parent
f18697daa1
commit
e473416af2
@ -75,6 +75,7 @@ cuda_py_test(
|
||||
"//tensorflow/python:resources",
|
||||
"//tensorflow/python:variables",
|
||||
"//tensorflow/python/eager:context",
|
||||
"//tensorflow/python/keras:combinations",
|
||||
],
|
||||
)
|
||||
|
||||
@ -94,6 +95,7 @@ cuda_py_test(
|
||||
"//tensorflow/python:resources",
|
||||
"//tensorflow/python:variables",
|
||||
"//tensorflow/python/eager:context",
|
||||
"//tensorflow/python/keras:combinations",
|
||||
],
|
||||
)
|
||||
|
||||
@ -113,6 +115,7 @@ cuda_py_test(
|
||||
"//tensorflow/python:resources",
|
||||
"//tensorflow/python:variables",
|
||||
"//tensorflow/python/eager:context",
|
||||
"//tensorflow/python/keras:combinations",
|
||||
],
|
||||
)
|
||||
|
||||
@ -132,6 +135,7 @@ cuda_py_test(
|
||||
"//tensorflow/python:resources",
|
||||
"//tensorflow/python:variables",
|
||||
"//tensorflow/python/eager:context",
|
||||
"//tensorflow/python/keras:combinations",
|
||||
],
|
||||
)
|
||||
|
||||
@ -170,6 +174,7 @@ cuda_py_test(
|
||||
"//tensorflow/python:resources",
|
||||
"//tensorflow/python:variables",
|
||||
"//tensorflow/python/eager:context",
|
||||
"//tensorflow/python/keras:combinations",
|
||||
],
|
||||
)
|
||||
|
||||
@ -213,6 +218,7 @@ cuda_py_test(
|
||||
"//tensorflow/python:variables",
|
||||
"//tensorflow/python/eager:def_function",
|
||||
"//tensorflow/python/keras",
|
||||
"//tensorflow/python/keras:combinations",
|
||||
"@absl_py//absl/testing:parameterized",
|
||||
],
|
||||
)
|
||||
@ -226,6 +232,7 @@ cuda_py_test(
|
||||
":optimizer_v2",
|
||||
"//tensorflow/python:client_testlib",
|
||||
"//tensorflow/python/keras",
|
||||
"//tensorflow/python/keras:combinations",
|
||||
"//third_party/py/numpy",
|
||||
"@absl_py//absl/testing:parameterized",
|
||||
],
|
||||
@ -248,6 +255,7 @@ cuda_py_test(
|
||||
"//tensorflow/python:state_ops",
|
||||
"//tensorflow/python:variables",
|
||||
"//tensorflow/python/eager:def_function",
|
||||
"//tensorflow/python/keras:combinations",
|
||||
"@absl_py//absl/testing:parameterized",
|
||||
],
|
||||
)
|
||||
|
@ -18,6 +18,7 @@ from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
from absl.testing import parameterized
|
||||
import numpy as np
|
||||
|
||||
from tensorflow.python.eager import context
|
||||
@ -25,6 +26,7 @@ from tensorflow.python.framework import constant_op
|
||||
from tensorflow.python.framework import dtypes
|
||||
from tensorflow.python.framework import ops
|
||||
from tensorflow.python.framework import test_util
|
||||
from tensorflow.python.keras import combinations
|
||||
from tensorflow.python.keras.optimizer_v2 import adadelta
|
||||
from tensorflow.python.ops import embedding_ops
|
||||
from tensorflow.python.ops import math_ops
|
||||
@ -38,7 +40,7 @@ if (not test_util.IsBuiltWithNvcc() and not test.is_built_with_rocm()):
|
||||
_DATA_TYPES += [dtypes.complex64, dtypes.complex128]
|
||||
|
||||
|
||||
class AdadeltaOptimizerTest(test.TestCase):
|
||||
class AdadeltaOptimizerTest(test.TestCase, parameterized.TestCase):
|
||||
|
||||
def doTestBasic(self, use_resource=False, use_callable_params=False):
|
||||
num_updates = 4 # number of ADADELTA steps to perform
|
||||
@ -145,7 +147,7 @@ class AdadeltaOptimizerTest(test.TestCase):
|
||||
self.evaluate(var1),
|
||||
rtol=1e-5)
|
||||
|
||||
@test_util.run_in_graph_and_eager_modes(reset_test=True)
|
||||
@combinations.generate(combinations.combine(mode=["graph", "eager"]))
|
||||
def testResourceBasic(self):
|
||||
self.doTestBasic(use_resource=True)
|
||||
|
||||
|
@ -20,6 +20,7 @@ from __future__ import print_function
|
||||
|
||||
import copy
|
||||
|
||||
from absl.testing import parameterized
|
||||
import numpy as np
|
||||
|
||||
from tensorflow.python.eager import context
|
||||
@ -27,6 +28,7 @@ from tensorflow.python.framework import constant_op
|
||||
from tensorflow.python.framework import dtypes
|
||||
from tensorflow.python.framework import ops
|
||||
from tensorflow.python.framework import test_util
|
||||
from tensorflow.python.keras import combinations
|
||||
from tensorflow.python.keras.optimizer_v2 import adagrad
|
||||
from tensorflow.python.keras.optimizer_v2 import learning_rate_schedule
|
||||
from tensorflow.python.ops import embedding_ops
|
||||
@ -68,7 +70,7 @@ def sparse_adagrad_update_numpy(param,
|
||||
return param_t, accum_t
|
||||
|
||||
|
||||
class AdagradOptimizerTest(test.TestCase):
|
||||
class AdagradOptimizerTest(test.TestCase, parameterized.TestCase):
|
||||
|
||||
def doTestBasic(self, use_callable_params=False):
|
||||
for dtype in _DATA_TYPES:
|
||||
@ -113,7 +115,7 @@ class AdagradOptimizerTest(test.TestCase):
|
||||
self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0))
|
||||
self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1))
|
||||
|
||||
@test_util.run_in_graph_and_eager_modes(reset_test=True)
|
||||
@combinations.generate(combinations.combine(mode=["graph", "eager"]))
|
||||
def testBasic(self):
|
||||
self.doTestBasic()
|
||||
|
||||
|
@ -18,13 +18,14 @@ from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
from absl.testing import parameterized
|
||||
import numpy as np
|
||||
|
||||
from tensorflow.python.eager import context
|
||||
from tensorflow.python.framework import constant_op
|
||||
from tensorflow.python.framework import dtypes
|
||||
from tensorflow.python.framework import ops
|
||||
from tensorflow.python.framework import test_util
|
||||
from tensorflow.python.keras import combinations
|
||||
from tensorflow.python.keras import optimizers
|
||||
from tensorflow.python.keras.optimizer_v2 import adam
|
||||
from tensorflow.python.keras.optimizer_v2 import learning_rate_schedule
|
||||
@ -108,7 +109,7 @@ def get_beta_accumulators(opt, dtype):
|
||||
return (beta_1_power, beta_2_power)
|
||||
|
||||
|
||||
class AdamOptimizerTest(test.TestCase):
|
||||
class AdamOptimizerTest(test.TestCase, parameterized.TestCase):
|
||||
|
||||
def testSparse(self):
|
||||
# TODO(tanzheny, omalleyt): Fix test in eager mode.
|
||||
@ -252,7 +253,7 @@ class AdamOptimizerTest(test.TestCase):
|
||||
self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0))
|
||||
self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1))
|
||||
|
||||
@test_util.run_in_graph_and_eager_modes(reset_test=True)
|
||||
@combinations.generate(combinations.combine(mode=["graph", "eager"]))
|
||||
def testResourceBasic(self):
|
||||
self.doTestBasic()
|
||||
|
||||
@ -260,7 +261,7 @@ class AdamOptimizerTest(test.TestCase):
|
||||
with context.eager_mode():
|
||||
self.doTestBasic(use_callable_params=True)
|
||||
|
||||
@test_util.run_in_graph_and_eager_modes(reset_test=True)
|
||||
@combinations.generate(combinations.combine(mode=["graph", "eager"]))
|
||||
def testBasicWithAmsgrad(self):
|
||||
for i, dtype in enumerate([dtypes.half, dtypes.float32, dtypes.float64]):
|
||||
with self.cached_session(use_gpu=True):
|
||||
@ -304,7 +305,7 @@ class AdamOptimizerTest(test.TestCase):
|
||||
self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0))
|
||||
self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1))
|
||||
|
||||
@test_util.run_in_graph_and_eager_modes
|
||||
@combinations.generate(combinations.combine(mode=["graph", "eager"]))
|
||||
def testSparseWithAmsgrad(self):
|
||||
# dtypes.half does not work on gpu + eager.
|
||||
for dtype in [dtypes.float32, dtypes.float64]:
|
||||
|
@ -18,13 +18,14 @@ from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
from absl.testing import parameterized
|
||||
import numpy as np
|
||||
|
||||
from tensorflow.python.eager import context
|
||||
from tensorflow.python.framework import constant_op
|
||||
from tensorflow.python.framework import dtypes
|
||||
from tensorflow.python.framework import ops
|
||||
from tensorflow.python.framework import test_util
|
||||
from tensorflow.python.keras import combinations
|
||||
from tensorflow.python.keras.optimizer_v2 import adamax
|
||||
from tensorflow.python.ops import array_ops
|
||||
from tensorflow.python.ops import math_ops
|
||||
@ -76,7 +77,7 @@ def get_beta_accumulators(opt, dtype):
|
||||
return beta_1_power
|
||||
|
||||
|
||||
class AdamaxOptimizerTest(test.TestCase):
|
||||
class AdamaxOptimizerTest(test.TestCase, parameterized.TestCase):
|
||||
|
||||
def testResourceSparse(self):
|
||||
# TODO(tanzheny, omalleyt): Fix test in eager mode.
|
||||
@ -171,7 +172,7 @@ class AdamaxOptimizerTest(test.TestCase):
|
||||
self.assertAllClose(aggregated_update_var.eval(),
|
||||
repeated_index_update_var.eval())
|
||||
|
||||
@test_util.run_in_graph_and_eager_modes(reset_test=True)
|
||||
@combinations.generate(combinations.combine(mode=["graph", "eager"]))
|
||||
def testBasic(self):
|
||||
for i, dtype in enumerate([dtypes.half, dtypes.float32, dtypes.float64]):
|
||||
with self.session(graph=ops.Graph(), use_gpu=True):
|
||||
@ -222,7 +223,7 @@ class AdamaxOptimizerTest(test.TestCase):
|
||||
self.assertAllCloseAccordingToType(
|
||||
var1_np, self.evaluate(var1), rtol=1e-2)
|
||||
|
||||
@test_util.run_in_graph_and_eager_modes(reset_test=True)
|
||||
@combinations.generate(combinations.combine(mode=["graph", "eager"]))
|
||||
def testBasicWithLearningRateDecay(self):
|
||||
for i, dtype in enumerate([dtypes.half, dtypes.float32, dtypes.float64]):
|
||||
with self.session(graph=ops.Graph(), use_gpu=True):
|
||||
|
@ -18,6 +18,7 @@ from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
from absl.testing import parameterized
|
||||
import numpy as np
|
||||
|
||||
from tensorflow.python.eager import backprop
|
||||
@ -26,7 +27,7 @@ from tensorflow.python.eager import function
|
||||
from tensorflow.python.framework import constant_op
|
||||
from tensorflow.python.framework import dtypes
|
||||
from tensorflow.python.framework import ops
|
||||
from tensorflow.python.framework import test_util
|
||||
from tensorflow.python.keras import combinations
|
||||
from tensorflow.python.keras.optimizer_v2 import gradient_descent
|
||||
from tensorflow.python.keras.optimizer_v2 import learning_rate_schedule
|
||||
from tensorflow.python.ops import array_ops
|
||||
@ -37,9 +38,9 @@ from tensorflow.python.ops import variables
|
||||
from tensorflow.python.platform import test
|
||||
|
||||
|
||||
class GradientDescentOptimizerTest(test.TestCase):
|
||||
class GradientDescentOptimizerTest(test.TestCase, parameterized.TestCase):
|
||||
|
||||
@test_util.run_in_graph_and_eager_modes
|
||||
@combinations.generate(combinations.combine(mode=["graph", "eager"]))
|
||||
def testBasic(self):
|
||||
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
|
||||
var0 = resource_variable_ops.ResourceVariable([1.0, 2.0], dtype=dtype)
|
||||
@ -88,7 +89,7 @@ class GradientDescentOptimizerTest(test.TestCase):
|
||||
[3.0 - 3.0 * 0.01 - 2.0 * 0.01, 4.0 - 3.0 * 0.01 - 2.0 * 0.01],
|
||||
self.evaluate(var1))
|
||||
|
||||
@test_util.run_in_graph_and_eager_modes
|
||||
@combinations.generate(combinations.combine(mode=["graph", "eager"]))
|
||||
def testBasicWithLearningRateDecay(self):
|
||||
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
|
||||
learning_rate = 3.0
|
||||
@ -96,7 +97,7 @@ class GradientDescentOptimizerTest(test.TestCase):
|
||||
sgd = gradient_descent.SGD(learning_rate=learning_rate, decay=decay)
|
||||
self._test_basic_sgd_with_learning_rate_decay(sgd, dtype)
|
||||
|
||||
@test_util.run_in_graph_and_eager_modes
|
||||
@combinations.generate(combinations.combine(mode=["graph", "eager"]))
|
||||
def testBasicWithLearningRateInverseTimeDecay(self):
|
||||
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
|
||||
learning_rate = learning_rate_schedule.InverseTimeDecay(
|
||||
@ -104,7 +105,7 @@ class GradientDescentOptimizerTest(test.TestCase):
|
||||
sgd = gradient_descent.SGD(learning_rate=learning_rate)
|
||||
self._test_basic_sgd_with_learning_rate_decay(sgd, dtype)
|
||||
|
||||
@test_util.run_in_graph_and_eager_modes
|
||||
@combinations.generate(combinations.combine(mode=["graph", "eager"]))
|
||||
def testBasicWithLearningRateInverseTimeDecaySerializeAndDeserialize(self):
|
||||
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
|
||||
learning_rate = learning_rate_schedule.InverseTimeDecay(
|
||||
@ -113,7 +114,7 @@ class GradientDescentOptimizerTest(test.TestCase):
|
||||
sgd = gradient_descent.SGD.from_config(sgd.get_config())
|
||||
self._test_basic_sgd_with_learning_rate_decay(sgd, dtype)
|
||||
|
||||
@test_util.run_in_graph_and_eager_modes
|
||||
@combinations.generate(combinations.combine(mode=["graph", "eager"]))
|
||||
def testBasicCallableParams(self):
|
||||
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
|
||||
var0 = resource_variable_ops.ResourceVariable([1.0, 2.0], dtype=dtype)
|
||||
@ -132,7 +133,7 @@ class GradientDescentOptimizerTest(test.TestCase):
|
||||
self.assertAllCloseAccordingToType([3.0 - 3.0 * 0.01, 4.0 - 3.0 * 0.01],
|
||||
self.evaluate(var1))
|
||||
|
||||
@test_util.run_in_graph_and_eager_modes
|
||||
@combinations.generate(combinations.combine(mode=["graph", "eager"]))
|
||||
def testMinimizeResourceVariable(self):
|
||||
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
|
||||
var0 = resource_variable_ops.ResourceVariable([[1.0, 2.0]], dtype=dtype)
|
||||
@ -292,14 +293,14 @@ class GradientDescentOptimizerTest(test.TestCase):
|
||||
self.assertAllClose(self.evaluate(opt_3.lr), (0.1))
|
||||
|
||||
|
||||
class MomentumOptimizerTest(test.TestCase):
|
||||
class MomentumOptimizerTest(test.TestCase, parameterized.TestCase):
|
||||
|
||||
def _update_nesterov_momentum_numpy(self, var, accum, g, lr, momentum):
|
||||
accum = accum * momentum - g * lr
|
||||
var += (accum * momentum - g * lr)
|
||||
return var, accum
|
||||
|
||||
@test_util.run_in_graph_and_eager_modes
|
||||
@combinations.generate(combinations.combine(mode=["graph", "eager"]))
|
||||
def testBasic(self):
|
||||
for _, dtype in enumerate([dtypes.half, dtypes.float32, dtypes.float64]):
|
||||
var0 = resource_variable_ops.ResourceVariable([1.0, 2.0],
|
||||
@ -454,7 +455,7 @@ class MomentumOptimizerTest(test.TestCase):
|
||||
# Validate updated params
|
||||
self.assertAllCloseAccordingToType([[-111, -138]], self.evaluate(var0))
|
||||
|
||||
@test_util.run_in_graph_and_eager_modes(reset_test=True)
|
||||
@combinations.generate(combinations.combine(mode=["graph", "eager"]))
|
||||
def testMinimizeWith2DIndicesForEmbeddingLookup(self):
|
||||
var0 = resource_variable_ops.ResourceVariable(array_ops.ones([2, 2]))
|
||||
|
||||
@ -662,7 +663,7 @@ class MomentumOptimizerTest(test.TestCase):
|
||||
3.98 - ((0.9 * 0.01 + 0.01) * 2.0)
|
||||
]), self.evaluate(var1))
|
||||
|
||||
@test_util.run_in_graph_and_eager_modes
|
||||
@combinations.generate(combinations.combine(mode=["graph", "eager"]))
|
||||
def testConfig(self):
|
||||
opt = gradient_descent.SGD(learning_rate=1.0, momentum=0.9, nesterov=True)
|
||||
config = opt.get_config()
|
||||
|
@ -27,6 +27,7 @@ from tensorflow.python.eager import context
|
||||
from tensorflow.python.eager import def_function
|
||||
from tensorflow.python.framework import ops
|
||||
from tensorflow.python.framework import test_util
|
||||
from tensorflow.python.keras import combinations
|
||||
from tensorflow.python.keras.optimizer_v2 import gradient_descent
|
||||
from tensorflow.python.keras.optimizer_v2 import learning_rate_schedule
|
||||
# Import resource_variable_ops for the variables-to-tensor implicit conversion.
|
||||
@ -43,12 +44,13 @@ def _maybe_serialized(lr_decay, serialize_and_deserialize):
|
||||
return lr_decay
|
||||
|
||||
|
||||
@parameterized.named_parameters(
|
||||
("NotSerialized", False),
|
||||
("Serialized", True))
|
||||
# @parameterized.named_parameters(
|
||||
# ("NotSerialized", False),
|
||||
# ("Serialized", True))
|
||||
@combinations.generate(combinations.combine(serialize=[False, True],
|
||||
mode=["graph", "eager"]))
|
||||
class LRDecayTestV2(test_util.TensorFlowTestCase, parameterized.TestCase):
|
||||
|
||||
@test_util.run_in_graph_and_eager_modes
|
||||
def testContinuous(self, serialize):
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
step = 5
|
||||
@ -57,7 +59,6 @@ class LRDecayTestV2(test_util.TensorFlowTestCase, parameterized.TestCase):
|
||||
expected = .05 * 0.96**(5.0 / 10.0)
|
||||
self.assertAllClose(self.evaluate(decayed_lr(step)), expected, 1e-6)
|
||||
|
||||
@test_util.run_in_graph_and_eager_modes
|
||||
def testStaircase(self, serialize):
|
||||
if context.executing_eagerly():
|
||||
step = resource_variable_ops.ResourceVariable(0)
|
||||
@ -102,7 +103,6 @@ class LRDecayTestV2(test_util.TensorFlowTestCase, parameterized.TestCase):
|
||||
expected = .1 * 0.96**(100 // 3)
|
||||
self.assertAllClose(self.evaluate(decayed_lr(step)), expected, 1e-6)
|
||||
|
||||
@test_util.run_in_graph_and_eager_modes
|
||||
def testPiecewiseConstant(self, serialize):
|
||||
x = resource_variable_ops.ResourceVariable(-999)
|
||||
decayed_lr = learning_rate_schedule.PiecewiseConstantDecay(
|
||||
@ -143,7 +143,6 @@ class LRDecayTestV2(test_util.TensorFlowTestCase, parameterized.TestCase):
|
||||
minimize()
|
||||
self.assertAllEqual(v.read_value(), -1.0)
|
||||
|
||||
@test_util.run_in_graph_and_eager_modes
|
||||
def testPiecewiseConstantEdgeCases(self, serialize):
|
||||
# Test casting boundaries from int32 to int64.
|
||||
x_int64 = resource_variable_ops.ResourceVariable(
|
||||
@ -165,12 +164,13 @@ class LRDecayTestV2(test_util.TensorFlowTestCase, parameterized.TestCase):
|
||||
self.assertAllClose(self.evaluate(decayed_lr(x_int64)), 0.7, 1e-6)
|
||||
|
||||
|
||||
@parameterized.named_parameters(
|
||||
("NotSerialized", False),
|
||||
("Serialized", True))
|
||||
# @parameterized.named_parameters(
|
||||
# ("NotSerialized", False),
|
||||
# ("Serialized", True))
|
||||
@combinations.generate(combinations.combine(serialize=[False, True],
|
||||
mode=["graph", "eager"]))
|
||||
class LinearDecayTestV2(test_util.TensorFlowTestCase, parameterized.TestCase):
|
||||
|
||||
@test_util.run_in_graph_and_eager_modes
|
||||
def testHalfWay(self, serialize):
|
||||
step = 5
|
||||
lr = 0.05
|
||||
@ -180,7 +180,6 @@ class LinearDecayTestV2(test_util.TensorFlowTestCase, parameterized.TestCase):
|
||||
expected = lr * 0.5
|
||||
self.assertAllClose(self.evaluate(decayed_lr(step)), expected, 1e-6)
|
||||
|
||||
@test_util.run_in_graph_and_eager_modes
|
||||
def testEnd(self, serialize):
|
||||
step = 10
|
||||
lr = 0.05
|
||||
@ -190,7 +189,6 @@ class LinearDecayTestV2(test_util.TensorFlowTestCase, parameterized.TestCase):
|
||||
expected = end_lr
|
||||
self.assertAllClose(self.evaluate(decayed_lr(step)), expected, 1e-6)
|
||||
|
||||
@test_util.run_in_graph_and_eager_modes
|
||||
def testHalfWayWithEnd(self, serialize):
|
||||
step = 5
|
||||
lr = 0.05
|
||||
@ -200,7 +198,6 @@ class LinearDecayTestV2(test_util.TensorFlowTestCase, parameterized.TestCase):
|
||||
expected = (lr + end_lr) * 0.5
|
||||
self.assertAllClose(self.evaluate(decayed_lr(step)), expected, 1e-6)
|
||||
|
||||
@test_util.run_in_graph_and_eager_modes
|
||||
def testBeyondEnd(self, serialize):
|
||||
step = 15
|
||||
lr = 0.05
|
||||
@ -210,7 +207,6 @@ class LinearDecayTestV2(test_util.TensorFlowTestCase, parameterized.TestCase):
|
||||
expected = end_lr
|
||||
self.assertAllClose(self.evaluate(decayed_lr(step)), expected, 1e-6)
|
||||
|
||||
@test_util.run_in_graph_and_eager_modes
|
||||
def testBeyondEndWithCycle(self, serialize):
|
||||
step = 15
|
||||
lr = 0.05
|
||||
@ -222,13 +218,14 @@ class LinearDecayTestV2(test_util.TensorFlowTestCase, parameterized.TestCase):
|
||||
self.assertAllClose(self.evaluate(decayed_lr(step)), expected, 1e-6)
|
||||
|
||||
|
||||
@parameterized.named_parameters(
|
||||
("NotSerialized", False),
|
||||
("Serialized", True))
|
||||
# @parameterized.named_parameters(
|
||||
# ("NotSerialized", False),
|
||||
# ("Serialized", True))
|
||||
@combinations.generate(combinations.combine(serialize=[False, True],
|
||||
mode=["graph", "eager"]))
|
||||
class SqrtDecayTestV2(test_util.TensorFlowTestCase,
|
||||
parameterized.TestCase):
|
||||
|
||||
@test_util.run_in_graph_and_eager_modes
|
||||
def testHalfWay(self, serialize):
|
||||
step = 5
|
||||
lr = 0.05
|
||||
@ -240,7 +237,6 @@ class SqrtDecayTestV2(test_util.TensorFlowTestCase,
|
||||
expected = lr * 0.5**power
|
||||
self.assertAllClose(self.evaluate(decayed_lr(step)), expected, 1e-6)
|
||||
|
||||
@test_util.run_in_graph_and_eager_modes
|
||||
def testEnd(self, serialize):
|
||||
step = 10
|
||||
lr = 0.05
|
||||
@ -252,7 +248,6 @@ class SqrtDecayTestV2(test_util.TensorFlowTestCase,
|
||||
expected = end_lr
|
||||
self.assertAllClose(self.evaluate(decayed_lr(step)), expected, 1e-6)
|
||||
|
||||
@test_util.run_in_graph_and_eager_modes
|
||||
def testHalfWayWithEnd(self, serialize):
|
||||
step = 5
|
||||
lr = 0.05
|
||||
@ -264,7 +259,6 @@ class SqrtDecayTestV2(test_util.TensorFlowTestCase,
|
||||
expected = (lr - end_lr) * 0.5**power + end_lr
|
||||
self.assertAllClose(self.evaluate(decayed_lr(step)), expected, 1e-6)
|
||||
|
||||
@test_util.run_in_graph_and_eager_modes
|
||||
def testBeyondEnd(self, serialize):
|
||||
step = 15
|
||||
lr = 0.05
|
||||
@ -276,7 +270,6 @@ class SqrtDecayTestV2(test_util.TensorFlowTestCase,
|
||||
expected = end_lr
|
||||
self.assertAllClose(self.evaluate(decayed_lr(step)), expected, 1e-6)
|
||||
|
||||
@test_util.run_in_graph_and_eager_modes
|
||||
def testBeyondEndWithCycle(self, serialize):
|
||||
step = 15
|
||||
lr = 0.05
|
||||
@ -289,13 +282,14 @@ class SqrtDecayTestV2(test_util.TensorFlowTestCase,
|
||||
self.assertAllClose(self.evaluate(decayed_lr(step)), expected, 1e-6)
|
||||
|
||||
|
||||
@parameterized.named_parameters(
|
||||
("NotSerialized", False),
|
||||
("Serialized", True))
|
||||
# @parameterized.named_parameters(
|
||||
# ("NotSerialized", False),
|
||||
# ("Serialized", True))
|
||||
@combinations.generate(combinations.combine(serialize=[False, True],
|
||||
mode=["graph", "eager"]))
|
||||
class PolynomialDecayTestV2(test_util.TensorFlowTestCase,
|
||||
parameterized.TestCase):
|
||||
|
||||
@test_util.run_in_graph_and_eager_modes
|
||||
def testBeginWithCycle(self, serialize):
|
||||
lr = 0.001
|
||||
decay_steps = 10
|
||||
@ -307,12 +301,13 @@ class PolynomialDecayTestV2(test_util.TensorFlowTestCase,
|
||||
self.assertAllClose(self.evaluate(decayed_lr(step)), expected, 1e-6)
|
||||
|
||||
|
||||
@parameterized.named_parameters(
|
||||
("NotSerialized", False),
|
||||
("Serialized", True))
|
||||
# @parameterized.named_parameters(
|
||||
# ("NotSerialized", False),
|
||||
# ("Serialized", True))
|
||||
@combinations.generate(combinations.combine(serialize=[False, True],
|
||||
mode=["graph", "eager"]))
|
||||
class InverseDecayTestV2(test_util.TensorFlowTestCase, parameterized.TestCase):
|
||||
|
||||
@test_util.run_in_graph_and_eager_modes
|
||||
def testDecay(self, serialize):
|
||||
initial_lr = 0.1
|
||||
k = 10
|
||||
@ -328,7 +323,6 @@ class InverseDecayTestV2(test_util.TensorFlowTestCase, parameterized.TestCase):
|
||||
self.assertAllClose(self.evaluate(decayed_lr(step)), expected, 1e-6)
|
||||
self.evaluate(step.assign_add(1))
|
||||
|
||||
@test_util.run_in_graph_and_eager_modes
|
||||
def testStaircase(self, serialize):
|
||||
initial_lr = 0.1
|
||||
k = 10
|
||||
@ -345,9 +339,8 @@ class InverseDecayTestV2(test_util.TensorFlowTestCase, parameterized.TestCase):
|
||||
self.evaluate(step.assign_add(1))
|
||||
|
||||
|
||||
@parameterized.named_parameters(
|
||||
("NotSerialized", False),
|
||||
("Serialized", True))
|
||||
@combinations.generate(combinations.combine(serialize=[False, True],
|
||||
mode=["graph", "eager"]))
|
||||
class CosineDecayTestV2(test_util.TensorFlowTestCase, parameterized.TestCase):
|
||||
|
||||
def np_cosine_decay(self, step, decay_steps, alpha=0.0):
|
||||
@ -356,7 +349,6 @@ class CosineDecayTestV2(test_util.TensorFlowTestCase, parameterized.TestCase):
|
||||
decay = 0.5 * (1.0 + math.cos(math.pi * completed_fraction))
|
||||
return (1.0 - alpha) * decay + alpha
|
||||
|
||||
@test_util.run_in_graph_and_eager_modes
|
||||
def testDecay(self, serialize):
|
||||
num_training_steps = 1000
|
||||
initial_lr = 1.0
|
||||
@ -367,7 +359,6 @@ class CosineDecayTestV2(test_util.TensorFlowTestCase, parameterized.TestCase):
|
||||
expected = self.np_cosine_decay(step, num_training_steps)
|
||||
self.assertAllClose(self.evaluate(decayed_lr(step)), expected, 1e-6)
|
||||
|
||||
@test_util.run_in_graph_and_eager_modes
|
||||
def testAlpha(self, serialize):
|
||||
num_training_steps = 1000
|
||||
initial_lr = 1.0
|
||||
@ -381,9 +372,8 @@ class CosineDecayTestV2(test_util.TensorFlowTestCase, parameterized.TestCase):
|
||||
self.assertAllClose(self.evaluate(decayed_lr(step)), expected, 1e-6)
|
||||
|
||||
|
||||
@parameterized.named_parameters(
|
||||
("NotSerialized", False),
|
||||
("Serialized", True))
|
||||
@combinations.generate(combinations.combine(serialize=[False, True],
|
||||
mode=["graph", "eager"]))
|
||||
class CosineDecayRestartsTestV2(test_util.TensorFlowTestCase,
|
||||
parameterized.TestCase):
|
||||
|
||||
@ -399,7 +389,6 @@ class CosineDecayRestartsTestV2(test_util.TensorFlowTestCase,
|
||||
decay = fac * 0.5 * (1.0 + math.cos(math.pi * completed_fraction))
|
||||
return (1.0 - alpha) * decay + alpha
|
||||
|
||||
@test_util.run_in_graph_and_eager_modes
|
||||
def testDecay(self, serialize):
|
||||
num_training_steps = 1000
|
||||
initial_lr = 1.0
|
||||
@ -410,7 +399,6 @@ class CosineDecayRestartsTestV2(test_util.TensorFlowTestCase,
|
||||
expected = self.np_cosine_decay_restarts(step, num_training_steps)
|
||||
self.assertAllClose(self.evaluate(decayed_lr(step)), expected, 1e-6)
|
||||
|
||||
@test_util.run_in_graph_and_eager_modes
|
||||
def testAlpha(self, serialize):
|
||||
num_training_steps = 1000
|
||||
initial_lr = 1.0
|
||||
@ -423,7 +411,6 @@ class CosineDecayRestartsTestV2(test_util.TensorFlowTestCase,
|
||||
step, num_training_steps, alpha=alpha)
|
||||
self.assertAllClose(self.evaluate(decayed_lr(step)), expected, 1e-6)
|
||||
|
||||
@test_util.run_in_graph_and_eager_modes
|
||||
def testMMul(self, serialize):
|
||||
num_training_steps = 1000
|
||||
initial_lr = 1.0
|
||||
@ -436,7 +423,6 @@ class CosineDecayRestartsTestV2(test_util.TensorFlowTestCase,
|
||||
step, num_training_steps, m_mul=m_mul)
|
||||
self.assertAllClose(self.evaluate(decayed_lr(step)), expected, 1e-6)
|
||||
|
||||
@test_util.run_in_graph_and_eager_modes
|
||||
def testTMul(self, serialize):
|
||||
num_training_steps = 1000
|
||||
initial_lr = 1.0
|
||||
@ -450,9 +436,8 @@ class CosineDecayRestartsTestV2(test_util.TensorFlowTestCase,
|
||||
self.assertAllClose(self.evaluate(decayed_lr(step)), expected, 1e-6)
|
||||
|
||||
|
||||
@parameterized.named_parameters(
|
||||
("NotSerialized", False),
|
||||
("Serialized", True))
|
||||
@combinations.generate(combinations.combine(serialize=[False, True],
|
||||
mode=["graph", "eager"]))
|
||||
class LinearCosineDecayTestV2(test_util.TensorFlowTestCase,
|
||||
parameterized.TestCase):
|
||||
|
||||
@ -468,7 +453,6 @@ class LinearCosineDecayTestV2(test_util.TensorFlowTestCase,
|
||||
cosine_decayed = 0.5 * (1.0 + math.cos(math.pi * fraction))
|
||||
return (alpha + linear_decayed) * cosine_decayed + beta
|
||||
|
||||
@test_util.run_in_graph_and_eager_modes
|
||||
def testDefaultDecay(self, serialize):
|
||||
num_training_steps = 1000
|
||||
initial_lr = 1.0
|
||||
@ -479,7 +463,6 @@ class LinearCosineDecayTestV2(test_util.TensorFlowTestCase,
|
||||
expected = self.np_linear_cosine_decay(step, num_training_steps)
|
||||
self.assertAllClose(self.evaluate(decayed_lr(step)), expected, 1e-6)
|
||||
|
||||
@test_util.run_in_graph_and_eager_modes
|
||||
def testNonDefaultDecay(self, serialize):
|
||||
num_training_steps = 1000
|
||||
initial_lr = 1.0
|
||||
@ -496,13 +479,11 @@ class LinearCosineDecayTestV2(test_util.TensorFlowTestCase,
|
||||
self.assertAllClose(self.evaluate(decayed_lr(step)), expected, 1e-6)
|
||||
|
||||
|
||||
@parameterized.named_parameters(
|
||||
("NotSerialized", False),
|
||||
("Serialized", True))
|
||||
@combinations.generate(combinations.combine(serialize=[False, True],
|
||||
mode=["graph", "eager"]))
|
||||
class NoisyLinearCosineDecayTestV2(test_util.TensorFlowTestCase,
|
||||
parameterized.TestCase):
|
||||
|
||||
@test_util.run_in_graph_and_eager_modes
|
||||
def testDefaultNoisyLinearCosine(self, serialize):
|
||||
num_training_steps = 1000
|
||||
initial_lr = 1.0
|
||||
@ -514,7 +495,6 @@ class NoisyLinearCosineDecayTestV2(test_util.TensorFlowTestCase,
|
||||
# Cannot be deterministically tested
|
||||
self.evaluate(decayed_lr(step))
|
||||
|
||||
@test_util.run_in_graph_and_eager_modes
|
||||
def testNonDefaultNoisyLinearCosine(self, serialize):
|
||||
num_training_steps = 1000
|
||||
initial_lr = 1.0
|
||||
|
@ -32,6 +32,7 @@ from tensorflow.python.framework import ops
|
||||
from tensorflow.python.framework import test_util
|
||||
from tensorflow.python.keras import backend
|
||||
from tensorflow.python.keras import callbacks
|
||||
from tensorflow.python.keras import combinations
|
||||
from tensorflow.python.keras import keras_parameterized
|
||||
from tensorflow.python.keras import losses
|
||||
from tensorflow.python.keras import optimizers
|
||||
@ -67,9 +68,9 @@ if (not test_util.IsBuiltWithNvcc() and not test.is_built_with_rocm()):
|
||||
_DATA_TYPES += [dtypes.complex64, dtypes.complex128]
|
||||
|
||||
|
||||
class OptimizerTest(test.TestCase):
|
||||
class OptimizerTest(test.TestCase, parameterized.TestCase):
|
||||
|
||||
@test_util.run_in_graph_and_eager_modes
|
||||
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
|
||||
def testBasic(self):
|
||||
for dtype in _DATA_TYPES:
|
||||
with test_util.use_gpu():
|
||||
@ -90,9 +91,10 @@ class OptimizerTest(test.TestCase):
|
||||
self.assertAllClose([-14., -13.], self.evaluate(var0))
|
||||
self.assertAllClose([-6., -5.], self.evaluate(var1))
|
||||
|
||||
@test_util.run_in_graph_and_eager_modes
|
||||
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
|
||||
def testAdaptiveLearningRate(self):
|
||||
for dtype in _DATA_TYPES:
|
||||
with self.test_session():
|
||||
var0 = resource_variable_ops.ResourceVariable([1.0, 2.0], dtype=dtype)
|
||||
var1 = resource_variable_ops.ResourceVariable([3.0, 4.0], dtype=dtype)
|
||||
|
||||
@ -133,7 +135,7 @@ class OptimizerTest(test.TestCase):
|
||||
else:
|
||||
self.evaluate(opt_op)
|
||||
|
||||
@test_util.run_in_graph_and_eager_modes
|
||||
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
|
||||
def testPrecomputedGradient(self):
|
||||
for dtype in _DATA_TYPES:
|
||||
with test_util.use_gpu():
|
||||
@ -157,7 +159,7 @@ class OptimizerTest(test.TestCase):
|
||||
self.assertAllClose([3.0 - 3 * 3 * 42.0, 4.0 - 3 * 3 * (-42.0)],
|
||||
self.evaluate(var1))
|
||||
|
||||
@test_util.run_in_graph_and_eager_modes
|
||||
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
|
||||
def testNoGradients(self):
|
||||
for dtype in _DATA_TYPES:
|
||||
with test_util.use_gpu():
|
||||
@ -169,7 +171,7 @@ class OptimizerTest(test.TestCase):
|
||||
# var1 has no gradient
|
||||
sgd_op.minimize(loss, var_list=[var1])
|
||||
|
||||
@test_util.run_in_graph_and_eager_modes
|
||||
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
|
||||
def testNoGradientsForAnyVariables_Minimize(self):
|
||||
for dtype in _DATA_TYPES:
|
||||
with test_util.use_gpu():
|
||||
@ -182,7 +184,7 @@ class OptimizerTest(test.TestCase):
|
||||
'No gradients provided for any variable'):
|
||||
sgd_op.minimize(loss, var_list=[var0, var1])
|
||||
|
||||
@test_util.run_in_graph_and_eager_modes
|
||||
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
|
||||
def testNoGradientsForAnyVariables_ApplyGradients(self):
|
||||
for dtype in _DATA_TYPES:
|
||||
with test_util.use_gpu():
|
||||
@ -193,7 +195,7 @@ class OptimizerTest(test.TestCase):
|
||||
'No gradients provided for any variable'):
|
||||
sgd_op.apply_gradients([(None, var0), (None, var1)])
|
||||
|
||||
@test_util.run_in_graph_and_eager_modes
|
||||
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
|
||||
def testGradientsAsVariables(self):
|
||||
for i, dtype in enumerate(_DATA_TYPES):
|
||||
with test_util.use_gpu():
|
||||
@ -232,7 +234,7 @@ class OptimizerTest(test.TestCase):
|
||||
self.assertAllClose([-14., -13.], self.evaluate(var0))
|
||||
self.assertAllClose([-6., -5.], self.evaluate(var1))
|
||||
|
||||
@test_util.run_in_graph_and_eager_modes
|
||||
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
|
||||
def testComputeGradientsWithTensors(self):
|
||||
with test_util.use_gpu():
|
||||
x = ops.convert_to_tensor_v2(1.0)
|
||||
@ -250,7 +252,7 @@ class OptimizerTest(test.TestCase):
|
||||
with self.assertRaises(NotImplementedError):
|
||||
sgd.apply_gradients(grads_and_vars)
|
||||
|
||||
@test_util.run_in_graph_and_eager_modes
|
||||
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
|
||||
def testConstraint(self):
|
||||
constraint_01 = lambda x: clip_ops.clip_by_value(x, -0.1, 0.)
|
||||
constraint_0 = lambda x: clip_ops.clip_by_value(x, 0., 1.)
|
||||
@ -274,14 +276,14 @@ class OptimizerTest(test.TestCase):
|
||||
self.assertAllClose([-0.1, -0.1], self.evaluate(var0))
|
||||
self.assertAllClose([0., 0.], self.evaluate(var1))
|
||||
|
||||
@test_util.run_in_graph_and_eager_modes
|
||||
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
|
||||
def testIterationWithoutMinimize(self):
|
||||
with test_util.use_gpu():
|
||||
sgd = gradient_descent.SGD(3.0)
|
||||
self.evaluate(sgd.iterations.initializer)
|
||||
self.assertEqual(0, self.evaluate(sgd.iterations))
|
||||
|
||||
@test_util.run_in_graph_and_eager_modes
|
||||
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
|
||||
def testConfig(self):
|
||||
with test_util.use_gpu():
|
||||
opt = gradient_descent.SGD(learning_rate=1.0)
|
||||
@ -301,7 +303,7 @@ class OptimizerTest(test.TestCase):
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
self.assertEqual(self.evaluate(lr), self.evaluate(lr3))
|
||||
|
||||
@test_util.run_in_graph_and_eager_modes
|
||||
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
|
||||
def testConfigWithLearningRateDecay(self):
|
||||
with test_util.use_gpu():
|
||||
var0 = variables.Variable([[1.0], [2.0]], dtype=dtypes.float32)
|
||||
@ -332,7 +334,7 @@ class OptimizerTest(test.TestCase):
|
||||
self.evaluate(opt._get_hyper('learning_rate')(step)),
|
||||
opt3._get_hyper('learning_rate')(step))
|
||||
|
||||
@test_util.run_in_graph_and_eager_modes
|
||||
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
|
||||
def testGradClipValue(self):
|
||||
with test_util.use_gpu():
|
||||
var = resource_variable_ops.ResourceVariable([1.0, 2.0])
|
||||
@ -343,7 +345,7 @@ class OptimizerTest(test.TestCase):
|
||||
self.evaluate(opt_op)
|
||||
self.assertAllClose([0., 1.], self.evaluate(var))
|
||||
|
||||
@test_util.run_in_graph_and_eager_modes
|
||||
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
|
||||
def testGradClipNorm(self):
|
||||
with test_util.use_gpu():
|
||||
var = resource_variable_ops.ResourceVariable([1.0])
|
||||
@ -354,17 +356,17 @@ class OptimizerTest(test.TestCase):
|
||||
self.evaluate(opt_op)
|
||||
self.assertAllClose([0.], self.evaluate(var))
|
||||
|
||||
@test_util.run_in_graph_and_eager_modes
|
||||
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
|
||||
def testInvalidClipNorm(self):
|
||||
with self.assertRaisesRegexp(ValueError, '>= 0'):
|
||||
gradient_descent.SGD(learning_rate=1.0, clipnorm=-1.0)
|
||||
|
||||
@test_util.run_in_graph_and_eager_modes
|
||||
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
|
||||
def testInvalidKwargs(self):
|
||||
with self.assertRaisesRegexp(TypeError, 'Unexpected keyword argument'):
|
||||
gradient_descent.SGD(learning_rate=1.0, invalidkwargs=1.0)
|
||||
|
||||
@test_util.run_in_graph_and_eager_modes
|
||||
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
|
||||
def testWeights(self):
|
||||
with test_util.use_gpu():
|
||||
opt1 = adam.Adam(learning_rate=1.0)
|
||||
@ -415,8 +417,9 @@ class OptimizerTest(test.TestCase):
|
||||
self.assertAllClose(
|
||||
self.evaluate([var3, var4]), self.evaluate([var5, var6]))
|
||||
|
||||
@test_util.run_in_graph_and_eager_modes
|
||||
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
|
||||
def testGettingHyperParameters(self):
|
||||
with self.test_session():
|
||||
opt = adam.Adam(learning_rate=1.0)
|
||||
var = resource_variable_ops.ResourceVariable([1.0, 2.0],
|
||||
dtype=dtypes.float32)
|
||||
@ -439,8 +442,9 @@ class OptimizerTest(test.TestCase):
|
||||
with self.assertRaises(AttributeError):
|
||||
opt.not_an_attr += 3
|
||||
|
||||
@test_util.run_in_graph_and_eager_modes
|
||||
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
|
||||
def testGettingHyperParametersWithLrInConstructor(self):
|
||||
with self.test_session():
|
||||
opt = gradient_descent.SGD(lr=3.0)
|
||||
var = resource_variable_ops.ResourceVariable([1.0, 2.0],
|
||||
dtype=dtypes.float32)
|
||||
@ -449,9 +453,9 @@ class OptimizerTest(test.TestCase):
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
self.evaluate(opt_op)
|
||||
|
||||
self.assertTrue(isinstance(opt.lr, resource_variable_ops.ResourceVariable))
|
||||
self.assertTrue(
|
||||
isinstance(opt.learning_rate, resource_variable_ops.ResourceVariable))
|
||||
self.assertIsInstance(opt.lr, resource_variable_ops.ResourceVariable)
|
||||
self.assertIsInstance(opt.learning_rate,
|
||||
resource_variable_ops.ResourceVariable)
|
||||
|
||||
lr = self.evaluate(opt.lr)
|
||||
self.assertEqual(3.0, lr)
|
||||
@ -464,7 +468,7 @@ class OptimizerTest(test.TestCase):
|
||||
lr = self.evaluate(opt.lr)
|
||||
self.assertEqual(4.0, lr)
|
||||
|
||||
@test_util.run_in_graph_and_eager_modes
|
||||
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
|
||||
def testOptimizerWithKerasModel(self):
|
||||
a = input_layer.Input(shape=(3,), name='input_a')
|
||||
b = input_layer.Input(shape=(3,), name='input_b')
|
||||
@ -490,7 +494,7 @@ class OptimizerTest(test.TestCase):
|
||||
epochs=1,
|
||||
batch_size=5)
|
||||
|
||||
@test_util.run_in_graph_and_eager_modes
|
||||
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
|
||||
def testOptimizerWithCallbacks(self):
|
||||
np.random.seed(1331)
|
||||
input_np = np.random.random((10, 3))
|
||||
@ -553,7 +557,7 @@ class OptimizerTest(test.TestCase):
|
||||
new_step_value = self.evaluate(global_step)
|
||||
self.assertEqual(new_step_value, init_step_value + 1)
|
||||
|
||||
@test_util.run_in_graph_and_eager_modes
|
||||
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
|
||||
def testOptimizerWithCallableVarList(self):
|
||||
train_samples = 20
|
||||
input_dim = 1
|
||||
@ -615,13 +619,13 @@ class OptimizerTest(test.TestCase):
|
||||
self.assertLen(opt_vars, 5)
|
||||
self.assertEqual('outter/Adam/var_2/m:0', opt_vars[3].name)
|
||||
|
||||
@test_util.run_in_graph_and_eager_modes
|
||||
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
|
||||
def testEmptyVarList(self):
|
||||
opt = gradient_descent.SGD(1.)
|
||||
opt.minimize(lambda: constant_op.constant(1.), [])
|
||||
opt.apply_gradients([])
|
||||
|
||||
@test_util.run_in_graph_and_eager_modes
|
||||
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
|
||||
def testAggregationTrue(self):
|
||||
# Test that all_reduce_sum_gradients=True works without distributed
|
||||
# strategy.
|
||||
@ -636,7 +640,7 @@ class OptimizerTest(test.TestCase):
|
||||
self.evaluate(opt_op)
|
||||
self.assertAllClose([0.7, 1.7], self.evaluate(var))
|
||||
|
||||
@test_util.run_in_graph_and_eager_modes
|
||||
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
|
||||
def testAggregationFalse(self):
|
||||
# Test that all_reduce_sum_gradients=False works without distributed
|
||||
# strategy.
|
||||
|
@ -30,6 +30,7 @@ from tensorflow.python.framework import constant_op
|
||||
from tensorflow.python.framework import dtypes
|
||||
from tensorflow.python.framework import ops
|
||||
from tensorflow.python.framework import test_util
|
||||
from tensorflow.python.keras import combinations
|
||||
from tensorflow.python.keras.optimizer_v2 import learning_rate_schedule
|
||||
from tensorflow.python.keras.optimizer_v2 import rmsprop
|
||||
from tensorflow.python.ops import embedding_ops
|
||||
@ -547,11 +548,11 @@ class RMSpropOptimizerTest(test.TestCase):
|
||||
self.evaluate(opt.variables()[0]), self.evaluate(opt.iterations))
|
||||
|
||||
|
||||
@combinations.generate(combinations.combine(mode=["graph", "eager"]))
|
||||
class SlotColocationTest(test.TestCase, parameterized.TestCase):
|
||||
|
||||
@parameterized.parameters([True, False])
|
||||
@test_util.run_gpu_only
|
||||
@test_util.run_in_graph_and_eager_modes
|
||||
def testRunMinimizeOnGPUForCPUVariables(self, use_resource):
|
||||
with ops.device("/device:CPU:0"):
|
||||
if use_resource:
|
||||
|
Loading…
Reference in New Issue
Block a user