Replace unnecessary () in run_in_graph_and_eager_modes().

PiperOrigin-RevId: 201652888
This commit is contained in:
Tom Hennigan 2018-06-22 01:46:03 -07:00 committed by TensorFlower Gardener
parent 9682324b40
commit 945d1a77ae
86 changed files with 727 additions and 727 deletions
tensorflow
contrib
python

View File

@ -32,7 +32,7 @@ from tensorflow.python.training.checkpointable import util as checkpointable_uti
class UniqueNameTrackerTests(test.TestCase):
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testNames(self):
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
@ -65,7 +65,7 @@ class UniqueNameTrackerTests(test.TestCase):
self.assertEqual(4., self.evaluate(restore_slots.x_1_1))
self.assertEqual(5., self.evaluate(restore_slots.y))
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testExample(self):
class SlotManager(checkpointable.Checkpointable):
@ -97,7 +97,7 @@ class UniqueNameTrackerTests(test.TestCase):
dependency_names,
["x", "x_1", "y", "slot_manager", "slotdeps", "save_counter"])
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testLayers(self):
tracker = containers.UniqueNameTracker()
tracker.track(layers.Dense(3), "dense")

View File

@ -73,7 +73,7 @@ class OnlyOneDep(checkpointable.Checkpointable):
class SplitTests(test.TestCase):
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testSaveRestoreSplitDep(self):
save_checkpoint = checkpointable_utils.Checkpoint(
dep=SaveTensorSlicesAsDeps())

View File

@ -768,7 +768,7 @@ class CudnnRNNTestSaveRestoreCheckpointable(test_util.TensorFlowTestCase):
@unittest.skipUnless(test.is_built_with_cuda(),
"Test only applicable when running on GPUs")
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testLSTMCheckpointableSingleLayer(self):
num_units = 2
direction = CUDNN_RNN_UNIDIRECTION
@ -781,7 +781,7 @@ class CudnnRNNTestSaveRestoreCheckpointable(test_util.TensorFlowTestCase):
@unittest.skipUnless(test.is_built_with_cuda(),
"Test only applicable when running on GPUs")
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testGRUCheckpointableSingleLayer(self):
num_units = 2
direction = CUDNN_RNN_UNIDIRECTION
@ -826,7 +826,7 @@ class CudnnRNNTestSaveRestoreCheckpointable(test_util.TensorFlowTestCase):
@unittest.skipUnless(test.is_built_with_cuda(),
"Test only applicable when running on GPUs")
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testCudnnCompatibleLSTMCheckpointablMultiLayer(self):
num_units = 2
num_layers = 3

View File

@ -63,7 +63,7 @@ class ScanDatasetTest(test.TestCase):
with self.assertRaises(errors.OutOfRangeError):
sess.run(next_element)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testFibonacci(self):
iterator = dataset_ops.Dataset.from_tensors(1).repeat(None).apply(
scan_ops.scan([0, 1], lambda a, _: ([a[1], a[0] + a[1]], a[1]))

View File

@ -38,7 +38,7 @@ class IndexedSlicesUtilsTest(test.TestCase, parameterized.TestCase):
self.evaluate(ops.convert_to_tensor(left)),
self.evaluate(ops.convert_to_tensor(right)))
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testAggregateTensors(self):
t0 = constant_op.constant([[1., 2.], [0, 0], [3., 4.]])
t1 = constant_op.constant([[0., 0.], [5, 6], [7., 8.]])
@ -46,7 +46,7 @@ class IndexedSlicesUtilsTest(test.TestCase, parameterized.TestCase):
result = cross_tower_utils.aggregate_tensors_or_indexed_slices([t0, t1])
self._assert_values_equal(total, result)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testAggregateIndexedSlices(self):
t0 = math_ops._as_indexed_slices(
constant_op.constant([[1., 2.], [0, 0], [3., 4.]]))
@ -57,7 +57,7 @@ class IndexedSlicesUtilsTest(test.TestCase, parameterized.TestCase):
self.assertIsInstance(result, ops.IndexedSlices)
self._assert_values_equal(total, result)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testDivideTensor(self):
t = constant_op.constant([[1., 2.], [0, 0], [3., 4.]])
n = 2
@ -65,7 +65,7 @@ class IndexedSlicesUtilsTest(test.TestCase, parameterized.TestCase):
result = cross_tower_utils.divide_by_n_tensors_or_indexed_slices(t, n)
self._assert_values_equal(expected, result)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testDivideIndexedSlices(self):
t = math_ops._as_indexed_slices(
constant_op.constant([[1., 2.], [0, 0], [3., 4.]]))
@ -75,13 +75,13 @@ class IndexedSlicesUtilsTest(test.TestCase, parameterized.TestCase):
self.assertIsInstance(result, ops.IndexedSlices)
self._assert_values_equal(expected, result)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testIsIndexedSlices(self):
t = math_ops._as_indexed_slices(
constant_op.constant([[1., 2.], [0, 0], [3., 4.]]))
self.assertTrue(cross_tower_utils.contains_indexed_slices(t))
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testContainsIndexedSlices_List(self):
t0 = math_ops._as_indexed_slices(
constant_op.constant([[1., 2.], [0, 0], [3., 4.]]))
@ -89,7 +89,7 @@ class IndexedSlicesUtilsTest(test.TestCase, parameterized.TestCase):
constant_op.constant([[0., 0.], [5, 6], [7., 8.]]))
self.assertTrue(cross_tower_utils.contains_indexed_slices([t0, t1]))
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testContainsIndexedSlices_Tuple(self):
t0 = math_ops._as_indexed_slices(
constant_op.constant([[1., 2.], [0, 0], [3., 4.]]))
@ -97,7 +97,7 @@ class IndexedSlicesUtilsTest(test.TestCase, parameterized.TestCase):
constant_op.constant([[0., 0.], [5, 6], [7., 8.]]))
self.assertTrue(cross_tower_utils.contains_indexed_slices((t0, t1)))
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testContainsIndexedSlices_PerDevice(self):
t0 = math_ops._as_indexed_slices(
constant_op.constant([[1., 2.], [0, 0], [3., 4.]]))
@ -106,7 +106,7 @@ class IndexedSlicesUtilsTest(test.TestCase, parameterized.TestCase):
per_device = value_lib.PerDevice({"/gpu:0": t0, "/cpu:0": t1})
self.assertTrue(cross_tower_utils.contains_indexed_slices(per_device))
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testContainsIndexedSlices_PerDeviceMapOutput(self):
t0 = math_ops._as_indexed_slices(
constant_op.constant([[1., 2.], [0, 0], [3., 4.]]))

View File

@ -83,13 +83,13 @@ class MirroredTwoDeviceDistributionTest(strategy_test_lib.DistributionTestBase):
self.skipTest("Not GPU test")
self.assertEqual(2, self._get_distribution_strategy().num_towers)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testCallAndMergeExceptions(self):
if not GPU_TEST:
self.skipTest("Not GPU test")
self._test_call_and_merge_exceptions(self._get_distribution_strategy())
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testRunRegroupError(self):
def run_fn(device_id):
@ -101,7 +101,7 @@ class MirroredTwoDeviceDistributionTest(strategy_test_lib.DistributionTestBase):
with dist.scope(), self.assertRaises(AssertionError):
dist.call_for_each_tower(run_fn, dist.worker_device_index)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testReduceToCpu(self):
if not GPU_TEST:
self.skipTest("Not GPU test")

View File

@ -47,7 +47,7 @@ class MirroredOneCPUDistributionTest(strategy_test_lib.DistributionTestBase):
def testTowerId(self):
self._test_tower_id(self._get_distribution_strategy())
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testCallAndMergeExceptions(self):
self._test_call_and_merge_exceptions(self._get_distribution_strategy())

View File

@ -44,7 +44,7 @@ class OneDeviceStrategyTest(strategy_test_lib.DistributionTestBase):
def testTowerId(self):
self._test_tower_id(self._get_distribution_strategy())
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testCallAndMergeExceptions(self):
self._test_call_and_merge_exceptions(self._get_distribution_strategy())

View File

@ -46,7 +46,7 @@ class CanonicalizeVariableNameTest(test.TestCase):
class SharedVariableCreatorTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testSharedVariable(self):
shared_variable_store = {}

View File

@ -82,7 +82,7 @@ class DistributedValuesTest(test.TestCase):
class DistributedDelegateTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testGetAttr(self):
with ops.device("/device:CPU:0"):
@ -97,7 +97,7 @@ class DistributedDelegateTest(test.TestCase):
with self.assertRaises(AttributeError):
_ = v.y
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testOperatorOverride(self):
with ops.device("/device:CPU:0"):
v = values.DistributedDelegate({"/device:CPU:0": 7, "/device:GPU:0": 8})
@ -363,7 +363,7 @@ class PerDeviceDatasetTest(test.TestCase):
self._test_iterator_no_prefetch(devices, dataset, expected_values)
self._test_iterator_with_prefetch(devices, dataset, expected_values)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testOneDevice(self):
devices = ["/device:CPU:0"]
dataset = dataset_ops.Dataset.range(10)

View File

@ -31,7 +31,7 @@ from tensorflow.python.platform import test
class FillTriangularBijectorTest(test.TestCase):
"""Tests the correctness of the FillTriangular bijector."""
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testBijector(self):
x = np.float32(np.array([1., 2., 3.]))
y = np.float32(np.array([[3., 0.],
@ -51,7 +51,7 @@ class FillTriangularBijectorTest(test.TestCase):
ildj = self.evaluate(b.inverse_log_det_jacobian(y, event_ndims=2))
self.assertAllClose(ildj, 0.)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testShape(self):
x_shape = tensor_shape.TensorShape([5, 4, 6])
y_shape = tensor_shape.TensorShape([5, 4, 3, 3])
@ -76,7 +76,7 @@ class FillTriangularBijectorTest(test.TestCase):
b.inverse_event_shape_tensor(y_shape.as_list()))
self.assertAllEqual(x_shape_tensor, x_shape.as_list())
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testShapeError(self):
b = bijectors.FillTriangular(validate_args=True)

View File

@ -29,7 +29,7 @@ from tensorflow.python.platform import test
class MatrixInverseTriLBijectorTest(test.TestCase):
"""Tests the correctness of the Y = inv(tril) transformation."""
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testComputesCorrectValues(self):
inv = bijectors.MatrixInverseTriL(validate_args=True)
self.assertEqual("matrix_inverse_tril", inv.name)
@ -51,7 +51,7 @@ class MatrixInverseTriLBijectorTest(test.TestCase):
self.assertNear(expected_fldj_, fldj_, err=1e-3)
self.assertNear(-expected_fldj_, ildj_, err=1e-3)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testOneByOneMatrix(self):
inv = bijectors.MatrixInverseTriL(validate_args=True)
x_ = np.array([[5.]], dtype=np.float32)
@ -70,7 +70,7 @@ class MatrixInverseTriLBijectorTest(test.TestCase):
self.assertNear(expected_fldj_, fldj_, err=1e-3)
self.assertNear(-expected_fldj_, ildj_, err=1e-3)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testZeroByZeroMatrix(self):
inv = bijectors.MatrixInverseTriL(validate_args=True)
x_ = np.eye(0, dtype=np.float32)
@ -89,7 +89,7 @@ class MatrixInverseTriLBijectorTest(test.TestCase):
self.assertNear(expected_fldj_, fldj_, err=1e-3)
self.assertNear(-expected_fldj_, ildj_, err=1e-3)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testBatch(self):
# Test batch computation with input shape (2, 1, 2, 2), i.e. batch shape
# (2, 1).
@ -114,7 +114,7 @@ class MatrixInverseTriLBijectorTest(test.TestCase):
self.assertAllClose(expected_fldj_, fldj_, atol=0., rtol=1e-3)
self.assertAllClose(-expected_fldj_, ildj_, atol=0., rtol=1e-3)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testErrorOnInputRankTooLow(self):
inv = bijectors.MatrixInverseTriL(validate_args=True)
x_ = np.array([0.1], dtype=np.float32)
@ -149,7 +149,7 @@ class MatrixInverseTriLBijectorTest(test.TestCase):
## square_error_msg):
## inv.inverse_log_det_jacobian(x_, event_ndims=2).eval()
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testErrorOnInputNotLowerTriangular(self):
inv = bijectors.MatrixInverseTriL(validate_args=True)
x_ = np.array([[1., 2.],
@ -169,7 +169,7 @@ class MatrixInverseTriLBijectorTest(test.TestCase):
triangular_error_msg):
inv.inverse_log_det_jacobian(x_, event_ndims=2).eval()
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testErrorOnInputSingular(self):
inv = bijectors.MatrixInverseTriL(validate_args=True)
x_ = np.array([[1., 0.],

View File

@ -36,7 +36,7 @@ class OrderedBijectorTest(test.TestCase):
def setUp(self):
self._rng = np.random.RandomState(42)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testBijectorVector(self):
with self.test_session():
ordered = Ordered()
@ -82,7 +82,7 @@ class OrderedBijectorTest(test.TestCase):
atol=0.,
rtol=1e-7)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testShapeGetters(self):
with self.test_session():
x = tensor_shape.TensorShape([4])

View File

@ -46,7 +46,7 @@ class ScaleTriLBijectorTest(test.TestCase):
x_ = self.evaluate(b.inverse(y))
self.assertAllClose(x, x_)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testInvertible(self):
# Generate random inputs from an unconstrained space, with

View File

@ -40,7 +40,7 @@ class SoftsignBijectorTest(test.TestCase):
def setUp(self):
self._rng = np.random.RandomState(42)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testBijectorBounds(self):
bijector = Softsign(validate_args=True)
with self.test_session():
@ -54,7 +54,7 @@ class SoftsignBijectorTest(test.TestCase):
with self.assertRaisesOpError("less than 1"):
bijector.inverse_log_det_jacobian(3., event_ndims=0).eval()
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testBijectorForwardInverse(self):
bijector = Softsign(validate_args=True)
self.assertEqual("softsign", bijector.name)
@ -64,7 +64,7 @@ class SoftsignBijectorTest(test.TestCase):
self.assertAllClose(y, self.evaluate(bijector.forward(x)))
self.assertAllClose(x, self.evaluate(bijector.inverse(y)))
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testBijectorLogDetJacobianEventDimsZero(self):
bijector = Softsign(validate_args=True)
y = self._rng.rand(2, 10)
@ -74,7 +74,7 @@ class SoftsignBijectorTest(test.TestCase):
self.assertAllClose(ildj, self.evaluate(
bijector.inverse_log_det_jacobian(y, event_ndims=0)))
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testBijectorForwardInverseEventDimsOne(self):
bijector = Softsign(validate_args=True)
self.assertEqual("softsign", bijector.name)
@ -83,7 +83,7 @@ class SoftsignBijectorTest(test.TestCase):
self.assertAllClose(y, self.evaluate(bijector.forward(x)))
self.assertAllClose(x, self.evaluate(bijector.inverse(y)))
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testBijectorLogDetJacobianEventDimsOne(self):
bijector = Softsign(validate_args=True)
y = self._rng.rand(2, 10)

View File

@ -31,7 +31,7 @@ class TransformDiagonalBijectorTest(test.TestCase):
def setUp(self):
self._rng = np.random.RandomState(42)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testBijector(self):
x = np.float32(np.random.randn(3, 4, 4))

View File

@ -544,7 +544,7 @@ class PadDynamicTest(_PadTest, test.TestCase):
class TestMoveDimension(test.TestCase):
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_move_dimension_static_shape(self):
x = random_ops.random_normal(shape=[200, 30, 4, 1, 6])
@ -561,7 +561,7 @@ class TestMoveDimension(test.TestCase):
x_perm = distribution_util.move_dimension(x, 4, 2)
self.assertAllEqual(x_perm.shape.as_list(), [200, 30, 6, 4, 1])
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_move_dimension_dynamic_shape(self):
x_ = random_ops.random_normal(shape=[200, 30, 4, 1, 6])

View File

@ -206,7 +206,7 @@ class MetricsTest(test.TestCase):
sess.run(accumulate, feed_dict={p: 7})
self.assertAllEqual(m.result().eval(), 7)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testGraphAndEagerTensor(self):
m = metrics.Mean()
inputs = ops.convert_to_tensor([1.0, 2.0])
@ -254,7 +254,7 @@ class MetricsTest(test.TestCase):
self.assertAllEqual(m2.result().eval(), 2.0)
self.assertAllEqual(m1.result().eval(), 1.0)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testSaveRestore(self):
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")

View File

@ -126,7 +126,7 @@ class NetworkTest(test.TestCase):
self.assertAllEqual([[17.0], [34.0]], self.evaluate(result))
# TODO(allenl): This test creates garbage in some Python versions
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testNetworkSaveRestoreAlreadyBuilt(self):
net = MyNetwork(name="abcd")
with self.assertRaisesRegexp(
@ -138,7 +138,7 @@ class NetworkTest(test.TestCase):
self._save_modify_load_network_built(net, global_step=10)
# TODO(allenl): This test creates garbage in some Python versions
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testSaveRestoreDefaultGlobalStep(self):
net = MyNetwork(name="abcd")
net(constant_op.constant([[2.0]]))
@ -149,7 +149,7 @@ class NetworkTest(test.TestCase):
self.assertIn("abcd-4242", save_path)
# TODO(allenl): This test creates garbage in some Python versions
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testNetworkSaveAndRestoreIntoUnbuilt(self):
save_dir = self.get_temp_dir()
net1 = MyNetwork()
@ -166,7 +166,7 @@ class NetworkTest(test.TestCase):
self.assertAllEqual(self.evaluate(net1.variables[0]),
self.evaluate(net2.variables[0]))
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testNetworkMatchesLayerVariableNames(self):
zero = constant_op.constant([[0.]])
layer_one = core.Dense(1, use_bias=False)
@ -193,7 +193,7 @@ class NetworkTest(test.TestCase):
self.assertEqual("two_layer_net/" + layer_two.variables[0].name,
net.second.variables[0].name)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testLoadIntoUnbuiltSharedLayer(self):
class Owner(network.Network):
@ -272,7 +272,7 @@ class NetworkTest(test.TestCase):
network.restore_network_checkpoint(
load_into, save_path, map_func=_restore_map_func)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testRestoreIntoSubNetwork(self):
class Parent(network.Network):
@ -327,7 +327,7 @@ class NetworkTest(test.TestCase):
# The checkpoint is incompatible.
network.restore_network_checkpoint(save_into_parent, checkpoint)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testCustomMapCollisionErrors(self):
class Parent(network.Network):
@ -372,7 +372,7 @@ class NetworkTest(test.TestCase):
network.restore_network_checkpoint(
loader, checkpoint, map_func=lambda n: "foo")
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testDefaultMapCollisionErrors(self):
one = constant_op.constant([[1.]])
@ -571,7 +571,7 @@ class NetworkTest(test.TestCase):
expected_start="my_network_1/dense/",
actual=outside_net_after.trainable_weights[0].name)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testVariableScopeStripping(self):
with variable_scope.variable_scope("scope1"):
with variable_scope.variable_scope("scope2"):
@ -596,7 +596,7 @@ class NetworkTest(test.TestCase):
self.assertAllEqual([[42.]],
self.evaluate(restore_net.variables[0]))
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testLayerNamesRespected(self):
class ParentNetwork(network.Network):
@ -677,7 +677,7 @@ class NetworkTest(test.TestCase):
self.assertStartsWith(expected_start="my_network_1/dense/",
actual=net2.trainable_weights[0].name)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testNestableAnonymous(self):
# The case where no explicit names are specified. We make up unique names,
@ -721,7 +721,7 @@ class NetworkTest(test.TestCase):
self.assertEqual("my_network", net2.first.name)
self.assertEqual("my_network_1", net2.second.name)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testNestableExplicit(self):
# We have explicit network names and everything is globally unique.
@ -750,7 +750,7 @@ class NetworkTest(test.TestCase):
self.assertEqual("first_unique_child_name", net.first.name)
self.assertEqual("second_unique_child_name", net.second.name)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testLayerNetworkNameInteractions(self):
# Same base name as core.Dense; Networks and non-Network Layers with the
@ -801,7 +801,7 @@ class NetworkTest(test.TestCase):
actual=net.trainable_weights[4].name)
self.assertEqual("mixed_layer_network", net.name)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testNestableExplicitCollisions(self):
# We have explicit network names and they are unique within the layer
@ -831,7 +831,7 @@ class NetworkTest(test.TestCase):
self.assertEqual("nonunique_name", net.first.name)
self.assertEqual("second_unique_child_name", net.second.name)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testNestableExplicitWithAnonymousParent(self):
# A parent network is instantiated multiple times with explicitly named
@ -873,7 +873,7 @@ class NetworkTest(test.TestCase):
self.assertEqual("first_unique_child_name", net2.first.name)
self.assertEqual("second_unique_child_name", net2.second.name)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testNestableExplicitSameLayerCollisions(self):
# We have explicit network names and they are _not_ unique within the layer
@ -891,7 +891,7 @@ class NetworkTest(test.TestCase):
with self.assertRaisesRegexp(ValueError, "nonunique_name"):
ParentNetwork()
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testAnonymousVariableSharing(self):
# Two "owned" Networks
@ -989,7 +989,7 @@ class NetworkTest(test.TestCase):
self.assertEqual("my_network", net4.first.name)
self.assertEqual("my_network", net4.second.name)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testRecursiveLayerRenaming(self):
core.Dense(1) # Under default Layer naming, would change subsequent names.
@ -1041,7 +1041,7 @@ class NetworkTest(test.TestCase):
self.assertEqual("dense", net.second.first.name)
self.assertEqual("dense_1", net.second.second.name)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testCallInDifferentOrderThanConstruct(self):
shared_network = MyNetwork()
@ -1091,7 +1091,7 @@ class NetworkTest(test.TestCase):
self.assertTrue(net2.first is net1.first)
self.assertEqual("my_network", net2.second.name)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testLayerCallInDifferentOrderThanConstruct(self):
# Same idea as testCallInDifferentOrderThanConstruct, but this time with a
# non-Network Layer shared between two Networks rather than a
@ -1144,7 +1144,7 @@ class NetworkTest(test.TestCase):
self.assertTrue(net2.first is net1.first)
self.assertEqual("dense", net2.second.name)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testLayerAlreadyBuilt(self):
one = constant_op.constant([[1.]])
core.Dense(1, use_bias=False) # pre-built layers use global naming

View File

@ -34,7 +34,7 @@ from tensorflow.python.platform import tf_logging as logging
class CriticalSectionTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testCreateCriticalSection(self):
cs = critical_section_ops.CriticalSection(shared_name="cs")
v = resource_variable_ops.ResourceVariable(0.0, name="v")
@ -53,7 +53,7 @@ class CriticalSectionTest(test.TestCase):
self.assertAllClose([2.0 * i for i in range(num_concurrent)],
sorted(r_value))
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testCriticalSectionWithControlFlow(self):
for outer_cond in [False, True]:
for inner_cond in [False, True]:
@ -109,7 +109,7 @@ class CriticalSectionTest(test.TestCase):
with self.assertRaisesOpError("Error"):
self.evaluate(r)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testCreateCriticalSectionFnReturnsOp(self):
cs = critical_section_ops.CriticalSection(shared_name="cs")
v = resource_variable_ops.ResourceVariable(0.0, name="v")
@ -332,7 +332,7 @@ class CriticalSectionTest(test.TestCase):
self.evaluate(v.initializer)
self.assertEqual(10, self.evaluate(out))
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testInsideFunction(self):
cs = critical_section_ops.CriticalSection()
v = resource_variable_ops.ResourceVariable(1)

View File

@ -1397,7 +1397,7 @@ class KeyValueTensorInitializerTest(test.TestCase):
class IndexTableFromTensor(test.TestCase):
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_index_table_from_tensor_with_tensor_init(self):
table = lookup.index_table_from_tensor(
mapping=("brain", "salad", "surgery"), num_oov_buckets=1)
@ -1670,7 +1670,7 @@ class InitializeTableFromFileOpTest(test.TestCase):
f.write("\n".join(values) + "\n")
return vocabulary_file
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testInitializeStringTable(self):
vocabulary_file = self._createVocabFile("one_column_1.txt")
default_value = -1

View File

@ -34,7 +34,7 @@ def _GetExampleIter(inputs):
class FixedLossScaleManagerTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_basic(self):
itr = _GetExampleIter([True] * 10 + [False] * 10)
@ -84,13 +84,13 @@ class ExponentialUpdateLossScaleManagerTest(test.TestCase):
actual_outputs.append(self.evaluate(lsm.get_loss_scale()))
self.assertEqual(actual_outputs, expected_outputs)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_increase_every_n_steps(self):
inputs = [True] * 6
expected_outputs = [1, 2, 2, 4, 4, 8]
self._test_helper(inputs, expected_outputs)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_keep_increasing_until_capped(self):
init_loss_scale = np.finfo(np.float32).max / 4 + 10
max_float = np.finfo(np.float32).max
@ -104,7 +104,7 @@ class ExponentialUpdateLossScaleManagerTest(test.TestCase):
self._test_helper(inputs, expected_outputs, init_loss_scale)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_decrease_every_n_steps(self):
inputs = [False] * 6
init_loss_scale = 1024
@ -112,7 +112,7 @@ class ExponentialUpdateLossScaleManagerTest(test.TestCase):
self._test_helper(inputs, expected_outputs, init_loss_scale)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_keep_decreasing_until_one(self):
inputs = [False] * 10
init_loss_scale = 16
@ -120,19 +120,19 @@ class ExponentialUpdateLossScaleManagerTest(test.TestCase):
self._test_helper(inputs, expected_outputs, init_loss_scale)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_incr_bad_step_clear_good_step(self):
inputs = [True, True, True, False, True]
expected_outputs = [1, 2, 2, 2, 2]
self._test_helper(inputs, expected_outputs)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_incr_good_step_does_not_clear_bad_step(self):
inputs = [True, True, True, False, True, False]
expected_outputs = [1, 2, 2, 2, 2, 1]
self._test_helper(inputs, expected_outputs)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_trigger_loss_scale_update_each_step(self):
"""Test when incr_every_n_step and decr_every_n_nan_or_inf is 1."""
init_loss_scale = 1
@ -145,7 +145,7 @@ class ExponentialUpdateLossScaleManagerTest(test.TestCase):
self._test_helper(inputs, expected_outputs, init_loss_scale,
incr_every_n_step, decr_every_n_nan_or_inf)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_alternating_good_and_bad_gradients_trigger_each_step(self):
init_loss_scale = 1
incr_every_n_step = 1
@ -156,7 +156,7 @@ class ExponentialUpdateLossScaleManagerTest(test.TestCase):
self._test_helper(inputs, expected_outputs, init_loss_scale,
incr_every_n_step, decr_every_n_nan_or_inf)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_alternating_good_and_bad_gradients_trigger_incr_every_2steps(self):
init_loss_scale = 32
incr_every_n_step = 2
@ -167,7 +167,7 @@ class ExponentialUpdateLossScaleManagerTest(test.TestCase):
self._test_helper(inputs, expected_outputs, init_loss_scale,
incr_every_n_step, decr_every_n_nan_or_inf)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_random_mix_good_and_bad_gradients(self):
init_loss_scale = 4
inputs = [

View File

@ -54,7 +54,7 @@ class LossScaleOptimizerTest(test.TestCase):
opt = loss_scale_opt_fn(opt)
return x, loss, opt
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_float16_underflow_without_loss_scale(self):
lr = 1
init_val = 1.
@ -73,7 +73,7 @@ class LossScaleOptimizerTest(test.TestCase):
rtol=0,
atol=min(symbolic_update, 1e-6))
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_float16_with_loss_scale(self):
lr = 1.
init_val = 1.
@ -95,7 +95,7 @@ class LossScaleOptimizerTest(test.TestCase):
rtol=0,
atol=min(expected_update, 1e-6))
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_compute_gradients_with_loss_scale(self):
lr = 1
init_val = 1.
@ -115,7 +115,7 @@ class LossScaleOptimizerTest(test.TestCase):
# Gradients aren't applied.
self.assertAllClose(init_val, self.evaluate(x), rtol=0, atol=1e-6)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_compute_gradients_without_loss_scale(self):
lr = 1
init_val = 1.
@ -127,7 +127,7 @@ class LossScaleOptimizerTest(test.TestCase):
g_v = self.evaluate(grads_and_vars[0][0])
self.assertAllClose(g_v, 0)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_apply_gradients(self):
x = variable_scope.get_variable("x", initializer=1., dtype=dtypes.float32)
@ -155,7 +155,7 @@ class LossScaleOptimizerTest(test.TestCase):
actual_output.append(self.evaluate(x))
self.assertAllClose(expected_output, actual_output)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_apply_gradients_loss_scale_is_updated(self):
class SimpleLossScaleManager(lsm_lib.LossScaleManager):

View File

@ -226,7 +226,7 @@ class CheckpointingTests(test.TestCase):
optimizer_node.slot_variables[0]
.slot_variable_node_id].attributes[0].checkpoint_key)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testSaveRestore(self):
model = MyModel()
optimizer = adam.AdamOptimizer(0.001)
@ -347,7 +347,7 @@ class CheckpointingTests(test.TestCase):
self.assertEqual(training_continuation + 1,
session.run(root.save_counter))
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testAgnosticUsage(self):
"""Graph/eager agnostic usage."""
# Does create garbage when executing eagerly due to ops.Graph() creation.
@ -381,7 +381,7 @@ class CheckpointingTests(test.TestCase):
self.evaluate(root.save_counter))
# pylint: disable=cell-var-from-loop
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testWithDefun(self):
num_training_steps = 2
checkpoint_directory = self.get_temp_dir()
@ -453,7 +453,7 @@ class CheckpointingTests(test.TestCase):
optimizer.apply_gradients(
[(g, v) for g, v in zip(grad, model.vars)])
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testDeferredSlotRestoration(self):
checkpoint_directory = self.get_temp_dir()
@ -616,7 +616,7 @@ class CheckpointingTests(test.TestCase):
class TemplateTests(test.TestCase):
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_checkpointable_save_restore(self):
def _templated():
@ -712,7 +712,7 @@ class CheckpointCompatibilityTests(test.TestCase):
sess=session, save_path=checkpoint_prefix,
global_step=root.optimizer_step)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testLoadFromNameBasedSaver(self):
"""Save a name-based checkpoint, load it using the object-based API."""
with test_util.device(use_gpu=True):

View File

@ -35,7 +35,7 @@ from tensorflow.python.platform import test
class OptimizerTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testBasic(self):
for i, dtype in enumerate([dtypes.half, dtypes.float32, dtypes.float64]):
# Note that we name the variables uniquely here since the variables don't
@ -113,7 +113,7 @@ class OptimizerTest(test.TestCase):
self.assertAllClose([3.0 - 3 * 3 * 42.0, 4.0 - 3 * 3 * (-42.0)],
var1.eval())
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testNoVariables(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
# pylint: disable=cell-var-from-loop
@ -128,7 +128,7 @@ class OptimizerTest(test.TestCase):
with self.assertRaisesRegexp(ValueError, 'No.*variables'):
sgd_op.minimize(loss)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testNoGradients(self):
for i, dtype in enumerate([dtypes.half, dtypes.float32, dtypes.float64]):
# Note that we name the variables uniquely here since the variables don't
@ -146,7 +146,7 @@ class OptimizerTest(test.TestCase):
# var1 has no gradient
sgd_op.minimize(loss, var_list=[var1])
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testNoGradientsForAnyVariables_Minimize(self):
for i, dtype in enumerate([dtypes.half, dtypes.float32, dtypes.float64]):
# Note that we name the variables uniquely here since the variables don't
@ -162,7 +162,7 @@ class OptimizerTest(test.TestCase):
'No gradients provided for any variable'):
sgd_op.minimize(loss, var_list=[var0, var1])
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testNoGradientsForAnyVariables_ApplyGradients(self):
for i, dtype in enumerate([dtypes.half, dtypes.float32, dtypes.float64]):
# Note that we name the variables uniquely here since the variables don't
@ -176,7 +176,7 @@ class OptimizerTest(test.TestCase):
'No gradients provided for any variable'):
sgd_op.apply_gradients([(None, var0), (None, var1)])
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testGradientsAsVariables(self):
for i, dtype in enumerate([dtypes.half, dtypes.float32, dtypes.float64]):
# Note that we name the variables uniquely here since the variables don't
@ -216,7 +216,7 @@ class OptimizerTest(test.TestCase):
self.assertAllClose([-14., -13.], self.evaluate(var0))
self.assertAllClose([-6., -5.], self.evaluate(var1))
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testComputeGradientsWithTensors(self):
x = ops.convert_to_tensor(1.0)
def f():

View File

@ -443,7 +443,7 @@ class RNNCellTest(test.TestCase):
self.assertTrue(
float(np.linalg.norm((res[1][0, :] - res[1][i, :]))) < 1e-6)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testWrapperCheckpointing(self):
for wrapper_type in [
rnn_cell_impl.DropoutWrapper,

View File

@ -921,7 +921,7 @@ class LSTMTest(test.TestCase):
# Smoke test, this should not raise an error
rnn.dynamic_rnn(cell, inputs, dtype=dtypes.float32)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testDynamicRNNWithTupleStates(self):
num_units = 3
input_size = 5
@ -997,7 +997,7 @@ class LSTMTest(test.TestCase):
self.assertAllEqual(array_ops.stack(outputs_static), outputs_dynamic)
self.assertAllEqual(np.hstack(state_static), np.hstack(state_dynamic))
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testDynamicRNNWithNestedTupleStates(self):
num_units = 3
input_size = 5
@ -1285,7 +1285,7 @@ class LSTMTest(test.TestCase):
"Comparing individual variable gradients iteration %d" % i)
self.assertAllEqual(a, b)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testDynamicEquivalentToStaticRNN(self):
self._testDynamicEquivalentToStaticRNN(use_sequence_length=False)
self._testDynamicEquivalentToStaticRNN(use_sequence_length=False)

View File

@ -30,7 +30,7 @@ from tensorflow.python.platform import test
class RandomSeedTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testRandomSeed(self):
zero_t = constant_op.constant(0, dtype=dtypes.int64, name='zero')
one_t = constant_op.constant(1, dtype=dtypes.int64, name='one')

View File

@ -46,7 +46,7 @@ from tensorflow.python.training import training
class BackpropTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testAggregateGradients(self):
def fn(x):
@ -251,7 +251,7 @@ class BackpropTest(test.TestCase):
g, = backprop.gradients_function(loss, [0])(logits, labels)
self.assertAllEqual(g.numpy(), [[-0.5, 0.5]])
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testGradientWithinTapeBlock(self):
v1 = resource_variable_ops.ResourceVariable(1.)
self.evaluate(v1.initializer)
@ -265,7 +265,7 @@ class BackpropTest(test.TestCase):
grad = t.gradient(loss, v1)
self.assertAllEqual(self.evaluate(grad), 2.0)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testNestedSelfContexts(self):
v1 = resource_variable_ops.ResourceVariable(1.)
self.evaluate(v1.initializer)
@ -435,7 +435,7 @@ class BackpropTest(test.TestCase):
self.assertEqual(backprop.implicit_grad(f)()[0][0], None)
@test_util.assert_no_new_tensors
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testGradientTapeRepeatedSource(self):
with backprop.GradientTape(persistent=False) as g:
x = constant_op.constant(3.0)
@ -445,7 +445,7 @@ class BackpropTest(test.TestCase):
self.assertEqual(self.evaluate(grad), [2.0, 2.0])
@test_util.assert_no_new_tensors
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testPersistentGradientTapeRepeatedSource(self):
with backprop.GradientTape(persistent=True) as g:
x = constant_op.constant(3.0)
@ -459,7 +459,7 @@ class BackpropTest(test.TestCase):
self.assertEqual(self.evaluate(grad), [3.0, 11.0])
@test_util.assert_no_new_tensors
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testGradientTapeStructure(self):
with backprop.GradientTape(persistent=True) as g:
# Using different constant values because constant tensors are
@ -482,7 +482,7 @@ class BackpropTest(test.TestCase):
[1.0, {'x2': 2.0, 'x3': 3.0}])
@test_util.assert_no_new_tensors
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testGradientTape(self):
with backprop.GradientTape() as g:
x = constant_op.constant(3.0)
@ -497,7 +497,7 @@ class BackpropTest(test.TestCase):
grad = g.gradient(y, [x])[0]
self.assertEqual(self.evaluate(grad), 6.0)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testGradientTapeWithCond(self):
x = constant_op.constant(3.0)
@ -518,7 +518,7 @@ class BackpropTest(test.TestCase):
dy = g.gradient(y, [x])[0]
self.assertEqual(self.evaluate(dy), 6.0)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testGradientTapeWithWhileLoop(self):
i = constant_op.constant(1)
x = constant_op.constant(2.)
@ -553,7 +553,7 @@ class BackpropTest(test.TestCase):
g.gradient(y, [x])
@test_util.assert_no_new_tensors
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testPersistentTape(self):
with backprop.GradientTape(persistent=True) as g:
x = constant_op.constant(3.0)
@ -567,7 +567,7 @@ class BackpropTest(test.TestCase):
del g
@test_util.assert_no_new_tensors
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testHigherOrderGradient(self):
with backprop.GradientTape(persistent=True) as g:
x = constant_op.constant(3.0)
@ -584,7 +584,7 @@ class BackpropTest(test.TestCase):
del g
@test_util.assert_no_new_tensors
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testPersistentNestedTape(self):
with backprop.GradientTape(persistent=True) as g:
x = constant_op.constant(3.0)
@ -605,7 +605,7 @@ class BackpropTest(test.TestCase):
del g
@test_util.assert_no_new_tensors
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testGradientTapeVariable(self):
v = resource_variable_ops.ResourceVariable(1.0, name='v')
self.evaluate(v.initializer)
@ -615,7 +615,7 @@ class BackpropTest(test.TestCase):
self.assertAllEqual(self.evaluate(grad), 2.0)
@test_util.assert_no_new_tensors
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testNestedGradients(self):
x = constant_op.constant(3.0)
with backprop.GradientTape() as g:

View File

@ -2873,7 +2873,7 @@ class EstimatorHookOrderingTest(test.TestCase):
class EstimatorIntegrationTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_complete_flow_with_a_simple_linear_model(self):
def _model_fn(features, labels, mode):

View File

@ -2607,7 +2607,7 @@ class _LinearModelTest(test.TestCase):
class InputLayerTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_retrieving_input(self):
features = {'a': [0.]}
input_layer = InputLayer(fc.numeric_column('a'))

View File

@ -1681,7 +1681,7 @@ class ControlDependenciesTest(test_util.TensorFlowTestCase):
# e should be dominated by c.
self.assertEqual(e.op.control_inputs, [])
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testEager(self):
def future():
future.calls += 1
@ -1866,7 +1866,7 @@ class ControlDependenciesTest(test_util.TensorFlowTestCase):
class OpScopeTest(test_util.TensorFlowTestCase):
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testNames(self):
with ops.name_scope("foo") as foo:
self.assertEqual("foo/", foo)
@ -1897,7 +1897,7 @@ class OpScopeTest(test_util.TensorFlowTestCase):
with ops.name_scope("a//b/c") as foo10:
self.assertEqual("a//b/c/", foo10)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testEagerDefaultScopeName(self):
with ops.name_scope(None, "default") as scope:
self.assertEqual(scope, "default/")

View File

@ -26,7 +26,7 @@ from tensorflow.python.platform import test
class RandomSeedTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testRandomSeed(self):
test_cases = [
# Each test case is a tuple with input to get_seed:

View File

@ -941,7 +941,7 @@ class ConstantValueTest(test.TestCase):
class ConstantValueAsShapeTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testConstant(self):
np_val = np.random.rand(3).astype(np.int32)
tf_val = constant_op.constant(np_val)
@ -954,13 +954,13 @@ class ConstantValueAsShapeTest(test.TestCase):
tensor_shape.TensorShape([]),
tensor_util.constant_value_as_shape(tf_val))
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testShape(self):
tf_val = array_ops.shape(constant_op.constant(0.0, shape=[1, 2, 3]))
c_val = tensor_util.constant_value_as_shape(tf_val)
self.assertEqual(tensor_shape.TensorShape([1, 2, 3]), c_val)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testMinusOneBecomesNone(self):
tf_val = constant_op.constant([-1, 1, -1], shape=[3])
c_val = tensor_util.constant_value_as_shape(tf_val)

View File

@ -587,7 +587,7 @@ class SubclassedModel(training.Model):
class TestWeightSavingAndLoadingTFFormat(test.TestCase):
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_tensorflow_format_overwrite(self):
with self.test_session() as session:
model = SubclassedModel()
@ -676,7 +676,7 @@ class TestWeightSavingAndLoadingTFFormat(test.TestCase):
restore_on_create_y = self.evaluate(restore_on_create_y_tensor)
self.assertAllClose(ref_y, restore_on_create_y)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_weight_loading_graph_model(self):
def _make_graph_model():
a = keras.layers.Input(shape=(2,))
@ -686,7 +686,7 @@ class TestWeightSavingAndLoadingTFFormat(test.TestCase):
self._weight_loading_test_template(_make_graph_model)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_weight_loading_subclassed_model(self):
self._weight_loading_test_template(SubclassedModel)
@ -720,7 +720,7 @@ class TestWeightSavingAndLoadingTFFormat(test.TestCase):
y = self.evaluate(model(x))
self.assertAllClose(ref_y, y)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_weight_loading_graph_model_added_layer(self):
def _save_graph_model():
a = keras.layers.Input(shape=(2,))
@ -740,7 +740,7 @@ class TestWeightSavingAndLoadingTFFormat(test.TestCase):
_save_graph_model, _restore_graph_model,
_restore_init_fn)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_weight_loading_graph_model_added_no_weight_layer(self):
def _save_graph_model():
a = keras.layers.Input(shape=(2,))
@ -761,7 +761,7 @@ class TestWeightSavingAndLoadingTFFormat(test.TestCase):
_save_graph_model, _restore_graph_model,
_restore_init_fn)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_weight_loading_subclassed_model_added_layer(self):
class SubclassedModelRestore(training.Model):

View File

@ -33,7 +33,7 @@ class TestSequential(test.TestCase):
"""Most Sequential model API tests are covered in `training_test.py`.
"""
@tf_test_util.run_in_graph_and_eager_modes()
@tf_test_util.run_in_graph_and_eager_modes
def test_basic_methods(self):
model = keras.models.Sequential()
model.add(keras.layers.Dense(1, input_dim=2))
@ -44,7 +44,7 @@ class TestSequential(test.TestCase):
self.assertEqual(len(model.weights), 2 * 2)
self.assertEqual(model.get_layer(name='dp').name, 'dp')
@tf_test_util.run_in_graph_and_eager_modes()
@tf_test_util.run_in_graph_and_eager_modes
def test_sequential_pop(self):
num_hidden = 5
input_dim = 3
@ -77,7 +77,7 @@ class TestSequential(test.TestCase):
with self.assertRaises(TypeError):
model.pop()
@tf_test_util.run_in_graph_and_eager_modes()
@tf_test_util.run_in_graph_and_eager_modes
def test_sequential_deferred_build_with_np_arrays(self):
num_hidden = 5
input_dim = 3
@ -102,7 +102,7 @@ class TestSequential(test.TestCase):
[None, num_classes])
self.assertEqual(len(model.weights), 2 * 2)
@tf_test_util.run_in_graph_and_eager_modes()
@tf_test_util.run_in_graph_and_eager_modes
def test_sequential_deferred_build_with_dataset_iterators(self):
if not context.executing_eagerly():
# TODO(psv/fchollet): Add support for this use case in graph mode.
@ -136,7 +136,7 @@ class TestSequential(test.TestCase):
[None, num_classes])
self.assertEqual(len(model.weights), 2 * 2)
@tf_test_util.run_in_graph_and_eager_modes()
@tf_test_util.run_in_graph_and_eager_modes
def test_invalid_use_cases(self):
# Added objects must be layer instances
with self.assertRaises(TypeError):
@ -160,7 +160,7 @@ class TestSequential(test.TestCase):
model.add(keras.layers.Dense(1, input_dim=1))
model.add(MyLayer())
@tf_test_util.run_in_graph_and_eager_modes()
@tf_test_util.run_in_graph_and_eager_modes
def test_nested_sequential_trainability(self):
input_dim = 20
num_units = 10

View File

@ -922,7 +922,7 @@ class DeferredModeTest(test.TestCase):
self.assertEqual(repr(x),
'<DeferredTensor \'x\' shape=(?, 2) dtype=float32>')
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testSimpleNetworkBuilding(self):
inputs = keras.engine.Input(shape=(32,))
if context.executing_eagerly():
@ -947,7 +947,7 @@ class DeferredModeTest(test.TestCase):
outputs = network(inputs)
self.assertEqual(outputs.shape.as_list(), [10, 4])
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testMultiIONetworkbuilding(self):
input_a = keras.engine.Input(shape=(32,))
input_b = keras.engine.Input(shape=(16,))

View File

@ -647,7 +647,7 @@ class LossWeightingTest(test.TestCase):
class CorrectnessTest(test.TestCase):
@tf_test_util.run_in_graph_and_eager_modes()
@tf_test_util.run_in_graph_and_eager_modes
def test_loss_correctness(self):
# Test that training loss is the same in eager and graph
# (by comparing it to a reference value in a deterministic case)
@ -668,7 +668,7 @@ class CorrectnessTest(test.TestCase):
self.assertEqual(
np.around(history.history['loss'][-1], decimals=4), 0.6173)
@tf_test_util.run_in_graph_and_eager_modes()
@tf_test_util.run_in_graph_and_eager_modes
def test_metrics_correctness(self):
model = keras.Sequential()
model.add(keras.layers.Dense(3,
@ -689,7 +689,7 @@ class CorrectnessTest(test.TestCase):
outs = model.evaluate(x, y)
self.assertEqual(outs[1], 0.)
@tf_test_util.run_in_graph_and_eager_modes()
@tf_test_util.run_in_graph_and_eager_modes
def test_loss_correctness_with_iterator(self):
# Test that training loss is the same in eager and graph
# (by comparing it to a reference value in a deterministic case)
@ -712,7 +712,7 @@ class CorrectnessTest(test.TestCase):
history = model.fit(iterator, epochs=1, steps_per_epoch=10)
self.assertEqual(np.around(history.history['loss'][-1], decimals=4), 0.6173)
@tf_test_util.run_in_graph_and_eager_modes()
@tf_test_util.run_in_graph_and_eager_modes
def test_metrics_correctness_with_iterator(self):
model = keras.Sequential()
model.add(

View File

@ -1696,7 +1696,7 @@ class TestTrainingWithDataTensors(test.TestCase):
model.train_on_batch([input_a_np, input_b_np],
[output_a_np, output_b_np])
@tf_test_util.run_in_graph_and_eager_modes()
@tf_test_util.run_in_graph_and_eager_modes
def test_metric_names_are_identical_in_graph_and_eager(self):
a = keras.layers.Input(shape=(3,), name='input_a')
b = keras.layers.Input(shape=(3,), name='input_b')
@ -1723,7 +1723,7 @@ class TestTrainingWithDataTensors(test.TestCase):
class TestTrainingWithDatasetIterators(test.TestCase):
@tf_test_util.run_in_graph_and_eager_modes()
@tf_test_util.run_in_graph_and_eager_modes
def test_training_and_eval_methods_on_iterators_single_io(self):
with self.test_session():
x = keras.layers.Input(shape=(3,), name='input')
@ -1813,7 +1813,7 @@ class TestTrainingWithDatasetIterators(test.TestCase):
ops.get_default_graph().finalize()
model.fit(iterator, epochs=1, steps_per_epoch=2, verbose=1)
@tf_test_util.run_in_graph_and_eager_modes()
@tf_test_util.run_in_graph_and_eager_modes
def test_iterators_running_out_of_data(self):
with self.test_session():
x = keras.layers.Input(shape=(3,), name='input')
@ -1867,7 +1867,7 @@ class TestTrainingWithDataset(test.TestCase):
model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=0,
validation_data=dataset, validation_steps=2)
@tf_test_util.run_in_graph_and_eager_modes()
@tf_test_util.run_in_graph_and_eager_modes
def test_training_and_eval_methods_on_dataset(self):
with self.test_session():
x = keras.layers.Input(shape=(3,), name='input')

View File

@ -45,7 +45,7 @@ class Convolution1DTest(test.TestCase):
kwargs=test_kwargs,
input_shape=(num_samples, length, stack_size))
@tf_test_util.run_in_graph_and_eager_modes()
@tf_test_util.run_in_graph_and_eager_modes
def test_conv1d(self):
kwargs = {
'filters': 2,
@ -117,7 +117,7 @@ class Conv2DTest(test.TestCase):
kwargs=test_kwargs,
input_shape=(num_samples, num_row, num_col, stack_size))
@tf_test_util.run_in_graph_and_eager_modes()
@tf_test_util.run_in_graph_and_eager_modes
def test_conv2d(self):
kwargs = {
'filters': 2,
@ -192,7 +192,7 @@ class Conv2DTransposeTest(test.TestCase):
kwargs=test_kwargs,
input_shape=(num_samples, num_row, num_col, stack_size))
@tf_test_util.run_in_graph_and_eager_modes()
@tf_test_util.run_in_graph_and_eager_modes
def test_conv2dtranspose(self):
kwargs = {
'filters': 2,
@ -258,7 +258,7 @@ class Conv3DTransposeTest(test.TestCase):
kwargs=test_kwargs,
input_shape=(num_samples, depth, num_row, num_col, stack_size))
@tf_test_util.run_in_graph_and_eager_modes()
@tf_test_util.run_in_graph_and_eager_modes
def test_conv3dtranspose(self):
kwargs = {
'filters': 2,
@ -322,7 +322,7 @@ class SeparableConv1DTest(test.TestCase):
kwargs=test_kwargs,
input_shape=(num_samples, length, stack_size))
@tf_test_util.run_in_graph_and_eager_modes()
@tf_test_util.run_in_graph_and_eager_modes
def test_separable_conv1d(self):
kwargs = {
'filters': 2,
@ -398,7 +398,7 @@ class SeparableConv2DTest(test.TestCase):
kwargs=test_kwargs,
input_shape=(num_samples, num_row, num_col, stack_size))
@tf_test_util.run_in_graph_and_eager_modes()
@tf_test_util.run_in_graph_and_eager_modes
def test_separable_conv2d(self):
kwargs = {
'filters': 2,
@ -477,7 +477,7 @@ class Conv3DTest(test.TestCase):
kwargs=test_kwargs,
input_shape=(num_samples, depth, num_row, num_col, stack_size))
@tf_test_util.run_in_graph_and_eager_modes()
@tf_test_util.run_in_graph_and_eager_modes
def test_conv3d(self):
kwargs = {
'filters': 2,
@ -529,7 +529,7 @@ class Conv3DTest(test.TestCase):
class ZeroPaddingTest(test.TestCase):
@tf_test_util.run_in_graph_and_eager_modes()
@tf_test_util.run_in_graph_and_eager_modes
def test_zero_padding_1d(self):
num_samples = 2
input_dim = 2
@ -581,7 +581,7 @@ class ZeroPaddingTest(test.TestCase):
with self.assertRaises(ValueError):
keras.layers.ZeroPadding1D(padding=None)
@tf_test_util.run_in_graph_and_eager_modes()
@tf_test_util.run_in_graph_and_eager_modes
def test_zero_padding_2d(self):
num_samples = 2
stack_size = 2
@ -660,7 +660,7 @@ class ZeroPaddingTest(test.TestCase):
with self.assertRaises(ValueError):
keras.layers.ZeroPadding2D(padding=None)
@tf_test_util.run_in_graph_and_eager_modes()
@tf_test_util.run_in_graph_and_eager_modes
def test_zero_padding_3d(self):
num_samples = 2
stack_size = 2
@ -702,13 +702,13 @@ class ZeroPaddingTest(test.TestCase):
class UpSamplingTest(test.TestCase):
@tf_test_util.run_in_graph_and_eager_modes()
@tf_test_util.run_in_graph_and_eager_modes
def test_upsampling_1d(self):
with self.test_session(use_gpu=True):
testing_utils.layer_test(
keras.layers.UpSampling1D, kwargs={'size': 2}, input_shape=(3, 5, 4))
@tf_test_util.run_in_graph_and_eager_modes()
@tf_test_util.run_in_graph_and_eager_modes
def test_upsampling_2d(self):
num_samples = 2
stack_size = 2
@ -758,7 +758,7 @@ class UpSamplingTest(test.TestCase):
np.testing.assert_allclose(np_output, expected_out)
@tf_test_util.run_in_graph_and_eager_modes()
@tf_test_util.run_in_graph_and_eager_modes
def test_upsampling_3d(self):
num_samples = 2
stack_size = 2
@ -818,7 +818,7 @@ class UpSamplingTest(test.TestCase):
class CroppingTest(test.TestCase):
@tf_test_util.run_in_graph_and_eager_modes()
@tf_test_util.run_in_graph_and_eager_modes
def test_cropping_1d(self):
num_samples = 2
time_length = 4
@ -837,7 +837,7 @@ class CroppingTest(test.TestCase):
with self.assertRaises(ValueError):
keras.layers.Cropping1D(cropping=None)
@tf_test_util.run_in_graph_and_eager_modes()
@tf_test_util.run_in_graph_and_eager_modes
def test_cropping_2d(self):
num_samples = 2
stack_size = 2
@ -905,7 +905,7 @@ class CroppingTest(test.TestCase):
with self.assertRaises(ValueError):
keras.layers.Cropping2D(cropping=None)
@tf_test_util.run_in_graph_and_eager_modes()
@tf_test_util.run_in_graph_and_eager_modes
def test_cropping_3d(self):
num_samples = 2
stack_size = 2

View File

@ -51,7 +51,7 @@ class CoreLayersTest(test.TestCase):
dropout = keras.layers.Dropout(0.5)
self.assertEqual(True, dropout.supports_masking)
@tf_test_util.run_in_graph_and_eager_modes()
@tf_test_util.run_in_graph_and_eager_modes
def test_spatial_dropout(self):
testing_utils.layer_test(
keras.layers.SpatialDropout1D,
@ -78,7 +78,7 @@ class CoreLayersTest(test.TestCase):
kwargs={'rate': 0.5, 'data_format': 'channels_first'},
input_shape=(2, 3, 4, 4, 5))
@tf_test_util.run_in_graph_and_eager_modes()
@tf_test_util.run_in_graph_and_eager_modes
def test_activation(self):
# with string argument
testing_utils.layer_test(
@ -92,7 +92,7 @@ class CoreLayersTest(test.TestCase):
kwargs={'activation': keras.backend.relu},
input_shape=(3, 2))
@tf_test_util.run_in_graph_and_eager_modes()
@tf_test_util.run_in_graph_and_eager_modes
def test_reshape(self):
testing_utils.layer_test(
keras.layers.Reshape,
@ -114,12 +114,12 @@ class CoreLayersTest(test.TestCase):
kwargs={'target_shape': (-1, 1)},
input_shape=(None, None, 2))
@tf_test_util.run_in_graph_and_eager_modes()
@tf_test_util.run_in_graph_and_eager_modes
def test_permute(self):
testing_utils.layer_test(
keras.layers.Permute, kwargs={'dims': (2, 1)}, input_shape=(3, 2, 4))
@tf_test_util.run_in_graph_and_eager_modes()
@tf_test_util.run_in_graph_and_eager_modes
def test_flatten(self):
testing_utils.layer_test(
keras.layers.Flatten, kwargs={}, input_shape=(3, 2, 4))
@ -134,7 +134,7 @@ class CoreLayersTest(test.TestCase):
np.transpose(inputs, (0, 2, 3, 1)), (-1, 5 * 5 * 3))
self.assertAllClose(outputs, target_outputs)
@tf_test_util.run_in_graph_and_eager_modes()
@tf_test_util.run_in_graph_and_eager_modes
def test_repeat_vector(self):
testing_utils.layer_test(
keras.layers.RepeatVector, kwargs={'n': 3}, input_shape=(3, 2))
@ -173,7 +173,7 @@ class CoreLayersTest(test.TestCase):
config = ld.get_config()
ld = keras.layers.Lambda.from_config(config)
@tf_test_util.run_in_graph_and_eager_modes()
@tf_test_util.run_in_graph_and_eager_modes
def test_dense(self):
testing_utils.layer_test(
keras.layers.Dense, kwargs={'units': 3}, input_shape=(3, 2))

View File

@ -30,7 +30,7 @@ from tensorflow.python.training.rmsprop import RMSPropOptimizer
class CuDNNTest(test.TestCase, parameterized.TestCase):
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_cudnn_rnn_basics(self):
if test.is_gpu_available(cuda_only=True):
with self.test_session(use_gpu=True):
@ -58,7 +58,7 @@ class CuDNNTest(test.TestCase, parameterized.TestCase):
'go_backwards': go_backwards},
input_shape=(num_samples, timesteps, input_size))
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_trainability(self):
if test.is_gpu_available(cuda_only=True):
with self.test_session(use_gpu=True):
@ -288,7 +288,7 @@ class CuDNNTest(test.TestCase, parameterized.TestCase):
self.assertAllClose(
model.predict(inputs), cudnn_model.predict(inputs), atol=1e-4)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_cudnnrnn_bidirectional(self):
if test.is_gpu_available(cuda_only=True):
with self.test_session(use_gpu=True):

View File

@ -29,7 +29,7 @@ from tensorflow.python.training.rmsprop import RMSPropOptimizer
class GRULayerTest(test.TestCase):
@tf_test_util.run_in_graph_and_eager_modes()
@tf_test_util.run_in_graph_and_eager_modes
def test_return_sequences_GRU(self):
num_samples = 2
timesteps = 3
@ -41,7 +41,7 @@ class GRULayerTest(test.TestCase):
'return_sequences': True},
input_shape=(num_samples, timesteps, embedding_dim))
@tf_test_util.run_in_graph_and_eager_modes()
@tf_test_util.run_in_graph_and_eager_modes
def test_dynamic_behavior_GRU(self):
num_samples = 2
timesteps = 3
@ -55,7 +55,7 @@ class GRULayerTest(test.TestCase):
y = np.random.random((num_samples, units))
model.train_on_batch(x, y)
@tf_test_util.run_in_graph_and_eager_modes()
@tf_test_util.run_in_graph_and_eager_modes
def test_dropout_GRU(self):
num_samples = 2
timesteps = 3
@ -68,7 +68,7 @@ class GRULayerTest(test.TestCase):
'recurrent_dropout': 0.1},
input_shape=(num_samples, timesteps, embedding_dim))
@tf_test_util.run_in_graph_and_eager_modes()
@tf_test_util.run_in_graph_and_eager_modes
def test_implementation_mode_GRU(self):
num_samples = 2
timesteps = 3

View File

@ -28,7 +28,7 @@ from tensorflow.python.platform import test
class LocallyConnectedLayersTest(test.TestCase):
@tf_test_util.run_in_graph_and_eager_modes()
@tf_test_util.run_in_graph_and_eager_modes
def test_locallyconnected_1d(self):
num_samples = 2
num_steps = 8
@ -92,7 +92,7 @@ class LocallyConnectedLayersTest(test.TestCase):
self.assertEqual(layer.kernel.constraint, k_constraint)
self.assertEqual(layer.bias.constraint, b_constraint)
@tf_test_util.run_in_graph_and_eager_modes()
@tf_test_util.run_in_graph_and_eager_modes
def test_locallyconnected_2d(self):
num_samples = 8
filters = 3
@ -118,7 +118,7 @@ class LocallyConnectedLayersTest(test.TestCase):
},
input_shape=(num_samples, num_row, num_col, stack_size))
@tf_test_util.run_in_graph_and_eager_modes()
@tf_test_util.run_in_graph_and_eager_modes
def test_locallyconnected_2d_channels_first(self):
num_samples = 8
filters = 3

View File

@ -29,7 +29,7 @@ from tensorflow.python.training.rmsprop import RMSPropOptimizer
class LSTMLayerTest(test.TestCase):
@tf_test_util.run_in_graph_and_eager_modes()
@tf_test_util.run_in_graph_and_eager_modes
def test_return_sequences_LSTM(self):
num_samples = 2
timesteps = 3
@ -56,7 +56,7 @@ class LSTMLayerTest(test.TestCase):
outputs = model.layers[-1].output
self.assertEquals(outputs.get_shape().as_list(), [None, timesteps, units])
@tf_test_util.run_in_graph_and_eager_modes()
@tf_test_util.run_in_graph_and_eager_modes
def test_dynamic_behavior_LSTM(self):
num_samples = 2
timesteps = 3
@ -70,7 +70,7 @@ class LSTMLayerTest(test.TestCase):
y = np.random.random((num_samples, units))
model.train_on_batch(x, y)
@tf_test_util.run_in_graph_and_eager_modes()
@tf_test_util.run_in_graph_and_eager_modes
def test_dropout_LSTM(self):
num_samples = 2
timesteps = 3
@ -83,7 +83,7 @@ class LSTMLayerTest(test.TestCase):
'recurrent_dropout': 0.1},
input_shape=(num_samples, timesteps, embedding_dim))
@tf_test_util.run_in_graph_and_eager_modes()
@tf_test_util.run_in_graph_and_eager_modes
def test_implementation_mode_LSTM(self):
num_samples = 2
timesteps = 3

View File

@ -28,7 +28,7 @@ from tensorflow.python.platform import test
class MergeLayersTest(test.TestCase):
@tf_test_util.run_in_graph_and_eager_modes()
@tf_test_util.run_in_graph_and_eager_modes
def test_merge_add(self):
i1 = keras.layers.Input(shape=(4, 5))
i2 = keras.layers.Input(shape=(4, 5))
@ -76,7 +76,7 @@ class MergeLayersTest(test.TestCase):
with self.assertRaises(ValueError):
keras.layers.add([i1])
@tf_test_util.run_in_graph_and_eager_modes()
@tf_test_util.run_in_graph_and_eager_modes
def test_merge_multiply(self):
i1 = keras.layers.Input(shape=(4, 5))
i2 = keras.layers.Input(shape=(4, 5))
@ -92,7 +92,7 @@ class MergeLayersTest(test.TestCase):
self.assertEqual(out.shape, (2, 4, 5))
self.assertAllClose(out, x1 * x2 * x3, atol=1e-4)
@tf_test_util.run_in_graph_and_eager_modes()
@tf_test_util.run_in_graph_and_eager_modes
def test_merge_average(self):
i1 = keras.layers.Input(shape=(4, 5))
i2 = keras.layers.Input(shape=(4, 5))
@ -106,7 +106,7 @@ class MergeLayersTest(test.TestCase):
self.assertEqual(out.shape, (2, 4, 5))
self.assertAllClose(out, 0.5 * (x1 + x2), atol=1e-4)
@tf_test_util.run_in_graph_and_eager_modes()
@tf_test_util.run_in_graph_and_eager_modes
def test_merge_maximum(self):
i1 = keras.layers.Input(shape=(4, 5))
i2 = keras.layers.Input(shape=(4, 5))
@ -120,7 +120,7 @@ class MergeLayersTest(test.TestCase):
self.assertEqual(out.shape, (2, 4, 5))
self.assertAllClose(out, np.maximum(x1, x2), atol=1e-4)
@tf_test_util.run_in_graph_and_eager_modes()
@tf_test_util.run_in_graph_and_eager_modes
def test_merge_minimum(self):
i1 = keras.layers.Input(shape=(4, 5))
i2 = keras.layers.Input(shape=(4, 5))
@ -134,7 +134,7 @@ class MergeLayersTest(test.TestCase):
self.assertEqual(out.shape, (2, 4, 5))
self.assertAllClose(out, np.minimum(x1, x2), atol=1e-4)
@tf_test_util.run_in_graph_and_eager_modes()
@tf_test_util.run_in_graph_and_eager_modes
def test_merge_concatenate(self):
i1 = keras.layers.Input(shape=(4, 5))
i2 = keras.layers.Input(shape=(4, 5))
@ -169,7 +169,7 @@ class MergeLayersTest(test.TestCase):
with self.assertRaisesRegexp(ValueError, 'called on a list'):
keras.layers.concatenate([i1], axis=-1)
@tf_test_util.run_in_graph_and_eager_modes()
@tf_test_util.run_in_graph_and_eager_modes
def test_merge_dot(self):
i1 = keras.layers.Input(shape=(4,))
i2 = keras.layers.Input(shape=(4,))
@ -215,7 +215,7 @@ class MergeLayersTest(test.TestCase):
dot = keras.layers.Dot(1)
dot.compute_output_shape(1)
@tf_test_util.run_in_graph_and_eager_modes()
@tf_test_util.run_in_graph_and_eager_modes
def test_merge_subtract(self):
i1 = keras.layers.Input(shape=(4, 5))
i2 = keras.layers.Input(shape=(4, 5))

View File

@ -40,7 +40,7 @@ class NoiseLayersTest(test.TestCase):
kwargs={'rate': 0.5},
input_shape=(3, 2, 3))
@tf_test_util.run_in_graph_and_eager_modes()
@tf_test_util.run_in_graph_and_eager_modes
def test_AlphaDropout(self):
testing_utils.layer_test(
keras.layers.AlphaDropout,

View File

@ -27,14 +27,14 @@ from tensorflow.python.platform import test
class GlobalPoolingTest(test.TestCase):
@tf_test_util.run_in_graph_and_eager_modes()
@tf_test_util.run_in_graph_and_eager_modes
def test_globalpooling_1d(self):
testing_utils.layer_test(keras.layers.pooling.GlobalMaxPooling1D,
input_shape=(3, 4, 5))
testing_utils.layer_test(
keras.layers.pooling.GlobalAveragePooling1D, input_shape=(3, 4, 5))
@tf_test_util.run_in_graph_and_eager_modes()
@tf_test_util.run_in_graph_and_eager_modes
def test_globalpooling_2d(self):
testing_utils.layer_test(
keras.layers.pooling.GlobalMaxPooling2D,
@ -53,7 +53,7 @@ class GlobalPoolingTest(test.TestCase):
kwargs={'data_format': 'channels_last'},
input_shape=(3, 5, 6, 4))
@tf_test_util.run_in_graph_and_eager_modes()
@tf_test_util.run_in_graph_and_eager_modes
def test_globalpooling_3d(self):
testing_utils.layer_test(
keras.layers.pooling.GlobalMaxPooling3D,
@ -75,7 +75,7 @@ class GlobalPoolingTest(test.TestCase):
class Pooling2DTest(test.TestCase):
@tf_test_util.run_in_graph_and_eager_modes()
@tf_test_util.run_in_graph_and_eager_modes
def test_maxpooling_2d(self):
pool_size = (3, 3)
for strides in [(1, 1), (2, 2)]:
@ -88,7 +88,7 @@ class Pooling2DTest(test.TestCase):
},
input_shape=(3, 5, 6, 4))
@tf_test_util.run_in_graph_and_eager_modes()
@tf_test_util.run_in_graph_and_eager_modes
def test_averagepooling_2d(self):
testing_utils.layer_test(
keras.layers.AveragePooling2D,
@ -122,7 +122,7 @@ class Pooling2DTest(test.TestCase):
class Pooling3DTest(test.TestCase):
@tf_test_util.run_in_graph_and_eager_modes()
@tf_test_util.run_in_graph_and_eager_modes
def test_maxpooling_3d(self):
pool_size = (3, 3, 3)
testing_utils.layer_test(
@ -141,7 +141,7 @@ class Pooling3DTest(test.TestCase):
},
input_shape=(3, 4, 11, 12, 10))
@tf_test_util.run_in_graph_and_eager_modes()
@tf_test_util.run_in_graph_and_eager_modes
def test_averagepooling_3d(self):
pool_size = (3, 3, 3)
testing_utils.layer_test(
@ -163,7 +163,7 @@ class Pooling3DTest(test.TestCase):
class Pooling1DTest(test.TestCase):
@tf_test_util.run_in_graph_and_eager_modes()
@tf_test_util.run_in_graph_and_eager_modes
def test_maxpooling_1d(self):
for padding in ['valid', 'same']:
for stride in [1, 2]:
@ -173,7 +173,7 @@ class Pooling1DTest(test.TestCase):
'padding': padding},
input_shape=(3, 5, 4))
@tf_test_util.run_in_graph_and_eager_modes()
@tf_test_util.run_in_graph_and_eager_modes
def test_averagepooling_1d(self):
for padding in ['valid', 'same']:
for stride in [1, 2]:

View File

@ -29,7 +29,7 @@ from tensorflow.python.training.rmsprop import RMSPropOptimizer
class SimpleRNNLayerTest(test.TestCase):
@tf_test_util.run_in_graph_and_eager_modes()
@tf_test_util.run_in_graph_and_eager_modes
def test_return_sequences_SimpleRNN(self):
num_samples = 2
timesteps = 3
@ -41,7 +41,7 @@ class SimpleRNNLayerTest(test.TestCase):
'return_sequences': True},
input_shape=(num_samples, timesteps, embedding_dim))
@tf_test_util.run_in_graph_and_eager_modes()
@tf_test_util.run_in_graph_and_eager_modes
def test_dynamic_behavior_SimpleRNN(self):
num_samples = 2
timesteps = 3
@ -55,7 +55,7 @@ class SimpleRNNLayerTest(test.TestCase):
y = np.random.random((num_samples, units))
model.train_on_batch(x, y)
@tf_test_util.run_in_graph_and_eager_modes()
@tf_test_util.run_in_graph_and_eager_modes
def test_dropout_SimpleRNN(self):
num_samples = 2
timesteps = 3
@ -68,7 +68,7 @@ class SimpleRNNLayerTest(test.TestCase):
'recurrent_dropout': 0.1},
input_shape=(num_samples, timesteps, embedding_dim))
@tf_test_util.run_in_graph_and_eager_modes()
@tf_test_util.run_in_graph_and_eager_modes
def test_implementation_mode_SimpleRNN(self):
num_samples = 2
timesteps = 3

View File

@ -71,7 +71,7 @@ class _RNNCellWithConstants(keras.layers.Layer):
class TimeDistributedTest(test.TestCase):
@tf_test_util.run_in_graph_and_eager_modes()
@tf_test_util.run_in_graph_and_eager_modes
def test_timedistributed_dense(self):
model = keras.models.Sequential()
model.add(

View File

@ -173,7 +173,7 @@ def get_nested_model_3(input_dim, num_classes):
class ModelSubclassingTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_single_io_workflow_with_np_arrays(self):
num_classes = 2
num_samples = 100
@ -192,7 +192,7 @@ class ModelSubclassingTest(test.TestCase):
model.fit(x, y, epochs=2, batch_size=32, verbose=0)
_ = model.evaluate(x, y, verbose=0)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_multi_io_workflow_with_np_arrays(self):
num_classes = (2, 3)
num_samples = 1000
@ -251,7 +251,7 @@ class ModelSubclassingTest(test.TestCase):
model.fit([x1, x2], [y1, y2], epochs=2, steps_per_epoch=10, verbose=0)
_ = model.evaluate(steps=10, verbose=0)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_single_io_workflow_with_dataset_iterators(self):
num_classes = 2
num_samples = 10
@ -325,7 +325,7 @@ class ModelSubclassingTest(test.TestCase):
self.assertEqual(len(model.inputs), 2)
self.assertEqual(len(model.outputs), 2)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_updates(self):
# test that updates get run during training
num_samples = 100
@ -419,7 +419,7 @@ class ModelSubclassingTest(test.TestCase):
self.assertEqual(len(model.get_updates_for(x)), 2)
self.assertEqual(len(model.get_losses_for(x)), 1)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_training_and_inference_behavior(self):
# test that dropout is applied in training and not inference
@ -447,7 +447,7 @@ class ModelSubclassingTest(test.TestCase):
loss = model.train_on_batch(x, y)
self.assertGreater(loss, 0.1)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_training_methods(self):
# test fit, train_on_batch
# on different input types: list, dict
@ -500,14 +500,14 @@ class ModelSubclassingTest(test.TestCase):
model = MultiIOTestModel(num_classes=num_classes, use_bn=True)
model.predict_on_batch([x1, x2])
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_trainable_mutation(self):
# test that you can change `trainable` on a model or layer, and that
# it freezes the model state during training
# TODO(fchollet): add test after we unify BN behavior in eager and symbolic.
pass
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_saving(self):
num_classes = (2, 3)
@ -549,7 +549,7 @@ class ModelSubclassingTest(test.TestCase):
self.assertAllClose(y_ref_1, y1, atol=1e-5)
self.assertAllClose(y_ref_2, y2, atol=1e-5)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_summary(self):
class ToString(object):
@ -575,7 +575,7 @@ class ModelSubclassingTest(test.TestCase):
model.summary(print_fn=print_fn)
self.assertTrue('Trainable params: 587' in print_fn.contents)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_subclass_nested_in_subclass(self):
num_classes = 2
num_samples = 100
@ -598,7 +598,7 @@ class ModelSubclassingTest(test.TestCase):
self.assertEqual(len(model.trainable_weights),
6 + len(model.test_net.trainable_weights))
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_graph_nested_in_subclass(self):
num_classes = 2
num_samples = 100
@ -621,7 +621,7 @@ class ModelSubclassingTest(test.TestCase):
self.assertEqual(len(model.trainable_weights),
6 + len(model.test_net.trainable_weights))
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_subclass_nested_in_graph(self):
num_classes = 2
num_samples = 100
@ -643,7 +643,7 @@ class ModelSubclassingTest(test.TestCase):
len(model.non_trainable_weights), 4)
self.assertEqual(len(model.trainable_weights), 12)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_support_for_manual_training_arg(self):
# In most cases, the `training` argument is left unspecified, in which
# case it defaults to value corresponding to the Model method being used
@ -752,7 +752,7 @@ class CustomCallModel(keras.Model):
class CustomCallSignatureTests(test.TestCase):
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_no_inputs_in_signature(self):
model = CustomCallModel()
first = array_ops.ones([2, 3])
@ -766,7 +766,7 @@ class CustomCallSignatureTests(test.TestCase):
output = model(first, second=second, training=False)
self.assertAllClose(expected_output, self.evaluate(output))
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_inputs_in_signature(self):
class HasInputsAndOtherPositional(keras.Model):
@ -783,7 +783,7 @@ class CustomCallSignatureTests(test.TestCase):
x1, x2 = keras.Input((1, 1)), keras.Input((1, 1))
model(x1, x2)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_kwargs_in_signature(self):
class HasKwargs(keras.Model):
@ -797,7 +797,7 @@ class CustomCallSignatureTests(test.TestCase):
if not context.executing_eagerly():
six.assertCountEqual(self, [arg], model.inputs)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_args_in_signature(self):
class HasArgs(keras.Model):

View File

@ -129,7 +129,7 @@ class TestModelCloning(test.TestCase):
class CheckpointingTests(test.TestCase):
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_optimizer_dependency(self):
model = keras.models.Sequential()
model.add(keras.layers.Dense(1, input_shape=(4,)))

View File

@ -1006,7 +1006,7 @@ class SliceAssignTest(test_util.TensorFlowTestCase):
class ShapeSizeRankTest(test_util.TensorFlowTestCase):
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testDenseShape(self):
t_value = [[0, 42], [24, 0]]
self.assertAllEqual((2, 2), self.evaluate(array_ops.shape(t_value)))
@ -1018,7 +1018,7 @@ class ShapeSizeRankTest(test_util.TensorFlowTestCase):
self.assertEqual(4, self.evaluate(array_ops.size(t)))
self.assertEqual(2, self.evaluate(array_ops.rank(t)))
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testSparseShape(self):
sp_value = sparse_tensor.SparseTensorValue(
indices=((0, 1), (1, 0)), values=(42, 24), dense_shape=(2, 2))
@ -1031,7 +1031,7 @@ class ShapeSizeRankTest(test_util.TensorFlowTestCase):
self.assertEqual(4, self.evaluate(array_ops.size(sp)))
self.assertEqual(2, self.evaluate(array_ops.rank(sp)))
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testSizeDtype(self):
tensor = [1]
self.assertEqual(dtypes.int32, self.evaluate(array_ops.size(tensor)).dtype)
@ -1123,7 +1123,7 @@ class SequenceMaskTest(test_util.TensorFlowTestCase):
class ConcatSliceResourceTest(test_util.TensorFlowTestCase):
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testConcatSlice(self):
r1 = test_ops.stub_resource_handle_op(container="a", shared_name="b")
r2 = test_ops.stub_resource_handle_op(container="a", shared_name="c")

View File

@ -124,7 +124,7 @@ class AtrousConvolutionTest(test.TestCase):
x, w, "VALID", dilation_rate=[2, 2], data_format="NCHW")
self.assertEqual(y.shape.as_list(), [1, 20, None, None])
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testAtrousConvolution2D(self):
with self._delay_checks() as add_check:
for padding in ["SAME", "VALID"]:
@ -139,7 +139,7 @@ class AtrousConvolutionTest(test.TestCase):
dilation_rate=dilation_rate,
)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testAtrousConvolution3D(self):
with self._delay_checks() as add_check:
for padding in ["SAME", "VALID"]:
@ -158,7 +158,7 @@ class AtrousConvolutionTest(test.TestCase):
dilation_rate=dilation_rate,
)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testAtrousConvolution1D(self):
with self._delay_checks() as add_check:
for padding in ["SAME", "VALID"]:
@ -173,7 +173,7 @@ class AtrousConvolutionTest(test.TestCase):
dilation_rate=[rate],
)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testAtrousConvolutionNC(self):
if test.is_gpu_available(cuda_only=True):
# "NCW" and "NCHW" formats are currently supported only on CUDA.
@ -197,7 +197,7 @@ class AtrousConvolutionTest(test.TestCase):
data_format="NCHW",
)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testAtrousSequence(self):
"""Tests optimization of sequence of atrous convolutions.

View File

@ -34,45 +34,45 @@ from tensorflow.python.platform import test
class AssertProperIterableTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_single_tensor_raises(self):
tensor = constant_op.constant(1)
with self.assertRaisesRegexp(TypeError, "proper"):
check_ops.assert_proper_iterable(tensor)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_single_sparse_tensor_raises(self):
ten = sparse_tensor.SparseTensor(
indices=[[0, 0], [1, 2]], values=[1, 2], dense_shape=[3, 4])
with self.assertRaisesRegexp(TypeError, "proper"):
check_ops.assert_proper_iterable(ten)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_single_ndarray_raises(self):
array = np.array([1, 2, 3])
with self.assertRaisesRegexp(TypeError, "proper"):
check_ops.assert_proper_iterable(array)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_single_string_raises(self):
mystr = "hello"
with self.assertRaisesRegexp(TypeError, "proper"):
check_ops.assert_proper_iterable(mystr)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_non_iterable_object_raises(self):
non_iterable = 1234
with self.assertRaisesRegexp(TypeError, "to be iterable"):
check_ops.assert_proper_iterable(non_iterable)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_list_does_not_raise(self):
list_of_stuff = [
constant_op.constant([11, 22]), constant_op.constant([1, 2])
]
check_ops.assert_proper_iterable(list_of_stuff)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_generator_does_not_raise(self):
generator_of_stuff = (constant_op.constant([11, 22]), constant_op.constant(
[1, 2]))
@ -81,14 +81,14 @@ class AssertProperIterableTest(test.TestCase):
class AssertEqualTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_doesnt_raise_when_equal(self):
small = constant_op.constant([1, 2], name="small")
with ops.control_dependencies([check_ops.assert_equal(small, small)]):
out = array_ops.identity(small)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_scalar_comparison(self):
const_true = constant_op.constant(True, name="true")
const_false = constant_op.constant(False, name="false")
@ -101,7 +101,7 @@ class AssertEqualTest(test.TestCase):
x = check_ops.assert_equal(small, small)
assert x is None
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_raises_when_greater(self):
# Static check
static_small = constant_op.constant([1, 2], name="small")
@ -179,7 +179,7 @@ First 2 elements of y:
check_ops.assert_equal(big, small, message="big does not equal small",
summarize=2)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_raises_when_less(self):
# Static check
static_small = constant_op.constant([3, 1], name="small")
@ -196,7 +196,7 @@ First 2 elements of y:
with self.assertRaisesOpError("small.*big"):
out.eval(feed_dict={small: [3, 1], big: [4, 2]})
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_doesnt_raise_when_equal_and_broadcastable_shapes(self):
small = constant_op.constant([[1, 2], [1, 2]], name="small")
small_2 = constant_op.constant([1, 2], name="small_2")
@ -204,7 +204,7 @@ First 2 elements of y:
out = array_ops.identity(small)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_raises_when_equal_but_non_broadcastable_shapes(self):
small = constant_op.constant([1, 1, 1], name="small")
small_2 = constant_op.constant([1, 1], name="small_2")
@ -219,13 +219,13 @@ First 2 elements of y:
out = array_ops.identity(small)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_raises_when_not_equal_and_broadcastable_shapes(self):
cond = constant_op.constant([True, False], name="small")
with self.assertRaisesRegexp(errors.InvalidArgumentError, "fail"):
check_ops.assert_equal(cond, False, message="fail")
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_doesnt_raise_when_both_empty(self):
larry = constant_op.constant([])
curly = constant_op.constant([])
@ -236,7 +236,7 @@ First 2 elements of y:
class AssertNoneEqualTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_doesnt_raise_when_not_equal(self):
small = constant_op.constant([1, 2], name="small")
big = constant_op.constant([10, 20], name="small")
@ -245,7 +245,7 @@ class AssertNoneEqualTest(test.TestCase):
out = array_ops.identity(small)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_raises_when_equal(self):
small = constant_op.constant([3, 1], name="small")
with self.assertRaisesOpError("x != y did not hold"):
@ -254,7 +254,7 @@ class AssertNoneEqualTest(test.TestCase):
out = array_ops.identity(small)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_doesnt_raise_when_not_equal_and_broadcastable_shapes(self):
small = constant_op.constant([1, 2], name="small")
big = constant_op.constant([3], name="big")
@ -263,7 +263,7 @@ class AssertNoneEqualTest(test.TestCase):
out = array_ops.identity(small)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_raises_when_not_equal_but_non_broadcastable_shapes(self):
with self.test_session():
small = constant_op.constant([1, 1, 1], name="small")
@ -280,7 +280,7 @@ class AssertNoneEqualTest(test.TestCase):
out = array_ops.identity(small)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_doesnt_raise_when_both_empty(self):
with self.test_session():
larry = constant_op.constant([])
@ -300,7 +300,7 @@ class AssertNoneEqualTest(test.TestCase):
class AssertAllCloseTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_doesnt_raise_when_equal(self):
x = constant_op.constant(1., name="x")
y = constant_op.constant(1., name="y")
@ -309,7 +309,7 @@ class AssertAllCloseTest(test.TestCase):
out = array_ops.identity(x)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_doesnt_raise_when_close_enough_32_bit_due_to_default_rtol(self):
eps = np.finfo(np.float32).eps
# Default rtol/atol is 10*eps
@ -320,7 +320,7 @@ class AssertAllCloseTest(test.TestCase):
out = array_ops.identity(x)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_doesnt_raise_when_close_enough_32_bit_due_to_default_atol(self):
eps = np.finfo(np.float32).eps
# Default rtol/atol is 10*eps
@ -331,7 +331,7 @@ class AssertAllCloseTest(test.TestCase):
out = array_ops.identity(x)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_doesnt_raise_when_close_enough_64_bit_due_to_default_rtol(self):
eps = np.finfo(np.float64).eps
# Default rtol/atol is 10*eps
@ -342,7 +342,7 @@ class AssertAllCloseTest(test.TestCase):
out = array_ops.identity(x)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_doesnt_raise_when_close_enough_64_bit_due_to_default_atol(self):
eps = np.finfo(np.float64).eps
# Default rtol/atol is 10*eps
@ -353,7 +353,7 @@ class AssertAllCloseTest(test.TestCase):
out = array_ops.identity(x)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_doesnt_raise_when_close_enough_due_to_custom_rtol(self):
x = constant_op.constant(1., name="x")
y = constant_op.constant(1.1, name="y")
@ -363,7 +363,7 @@ class AssertAllCloseTest(test.TestCase):
out = array_ops.identity(x)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_doesnt_raise_when_close_enough_due_to_custom_atol(self):
x = constant_op.constant(0., name="x")
y = constant_op.constant(0.1, name="y", dtype=np.float32)
@ -373,7 +373,7 @@ class AssertAllCloseTest(test.TestCase):
out = array_ops.identity(x)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_doesnt_raise_when_both_empty(self):
larry = constant_op.constant([])
curly = constant_op.constant([])
@ -381,7 +381,7 @@ class AssertAllCloseTest(test.TestCase):
out = array_ops.identity(larry)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_raises_when_atol_violated(self):
x = constant_op.constant(10., name="x")
y = constant_op.constant(10.2, name="y")
@ -392,7 +392,7 @@ class AssertAllCloseTest(test.TestCase):
out = array_ops.identity(x)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_raises_when_default_rtol_violated(self):
x = constant_op.constant(0.1, name="x")
y = constant_op.constant(0.0, name="y")
@ -412,7 +412,7 @@ class AssertAllCloseTest(test.TestCase):
class AssertLessTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_raises_when_equal(self):
small = constant_op.constant([1, 2], name="small")
with self.assertRaisesOpError("failure message.*\n*.* x < y did not hold"):
@ -422,7 +422,7 @@ class AssertLessTest(test.TestCase):
out = array_ops.identity(small)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_raises_when_greater(self):
small = constant_op.constant([1, 2], name="small")
big = constant_op.constant([3, 4], name="big")
@ -431,7 +431,7 @@ class AssertLessTest(test.TestCase):
out = array_ops.identity(small)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_doesnt_raise_when_less(self):
small = constant_op.constant([3, 1], name="small")
big = constant_op.constant([4, 2], name="big")
@ -439,7 +439,7 @@ class AssertLessTest(test.TestCase):
out = array_ops.identity(small)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_doesnt_raise_when_less_and_broadcastable_shapes(self):
small = constant_op.constant([1], name="small")
big = constant_op.constant([3, 2], name="big")
@ -447,7 +447,7 @@ class AssertLessTest(test.TestCase):
out = array_ops.identity(small)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_raises_when_less_but_non_broadcastable_shapes(self):
small = constant_op.constant([1, 1, 1], name="small")
big = constant_op.constant([3, 2], name="big")
@ -462,7 +462,7 @@ class AssertLessTest(test.TestCase):
out = array_ops.identity(small)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_doesnt_raise_when_both_empty(self):
larry = constant_op.constant([])
curly = constant_op.constant([])
@ -480,7 +480,7 @@ class AssertLessTest(test.TestCase):
class AssertLessEqualTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_doesnt_raise_when_equal(self):
small = constant_op.constant([1, 2], name="small")
with ops.control_dependencies(
@ -488,7 +488,7 @@ class AssertLessEqualTest(test.TestCase):
out = array_ops.identity(small)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_raises_when_greater(self):
small = constant_op.constant([1, 2], name="small")
big = constant_op.constant([3, 4], name="big")
@ -499,7 +499,7 @@ class AssertLessEqualTest(test.TestCase):
out = array_ops.identity(small)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_doesnt_raise_when_less_equal(self):
small = constant_op.constant([1, 2], name="small")
big = constant_op.constant([3, 2], name="big")
@ -507,7 +507,7 @@ class AssertLessEqualTest(test.TestCase):
out = array_ops.identity(small)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_doesnt_raise_when_less_equal_and_broadcastable_shapes(self):
small = constant_op.constant([1], name="small")
big = constant_op.constant([3, 1], name="big")
@ -515,7 +515,7 @@ class AssertLessEqualTest(test.TestCase):
out = array_ops.identity(small)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_raises_when_less_equal_but_non_broadcastable_shapes(self):
small = constant_op.constant([3, 1], name="small")
big = constant_op.constant([1, 1, 1], name="big")
@ -531,7 +531,7 @@ class AssertLessEqualTest(test.TestCase):
out = array_ops.identity(small)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_doesnt_raise_when_both_empty(self):
larry = constant_op.constant([])
curly = constant_op.constant([])
@ -543,7 +543,7 @@ class AssertLessEqualTest(test.TestCase):
class AssertGreaterTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_raises_when_equal(self):
small = constant_op.constant([1, 2], name="small")
with self.assertRaisesOpError("fail"):
@ -553,7 +553,7 @@ class AssertGreaterTest(test.TestCase):
out = array_ops.identity(small)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_raises_when_less(self):
small = constant_op.constant([1, 2], name="small")
big = constant_op.constant([3, 4], name="big")
@ -562,7 +562,7 @@ class AssertGreaterTest(test.TestCase):
out = array_ops.identity(big)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_doesnt_raise_when_greater(self):
small = constant_op.constant([3, 1], name="small")
big = constant_op.constant([4, 2], name="big")
@ -570,7 +570,7 @@ class AssertGreaterTest(test.TestCase):
out = array_ops.identity(small)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_doesnt_raise_when_greater_and_broadcastable_shapes(self):
small = constant_op.constant([1], name="small")
big = constant_op.constant([3, 2], name="big")
@ -578,7 +578,7 @@ class AssertGreaterTest(test.TestCase):
out = array_ops.identity(small)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_raises_when_greater_but_non_broadcastable_shapes(self):
small = constant_op.constant([1, 1, 1], name="small")
big = constant_op.constant([3, 2], name="big")
@ -593,7 +593,7 @@ class AssertGreaterTest(test.TestCase):
out = array_ops.identity(small)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_doesnt_raise_when_both_empty(self):
larry = constant_op.constant([])
curly = constant_op.constant([])
@ -604,7 +604,7 @@ class AssertGreaterTest(test.TestCase):
class AssertGreaterEqualTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_doesnt_raise_when_equal(self):
small = constant_op.constant([1, 2], name="small")
with ops.control_dependencies(
@ -612,7 +612,7 @@ class AssertGreaterEqualTest(test.TestCase):
out = array_ops.identity(small)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_raises_when_less(self):
small = constant_op.constant([1, 2], name="small")
big = constant_op.constant([3, 4], name="big")
@ -623,7 +623,7 @@ class AssertGreaterEqualTest(test.TestCase):
out = array_ops.identity(small)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_doesnt_raise_when_greater_equal(self):
small = constant_op.constant([1, 2], name="small")
big = constant_op.constant([3, 2], name="big")
@ -632,7 +632,7 @@ class AssertGreaterEqualTest(test.TestCase):
out = array_ops.identity(small)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_doesnt_raise_when_greater_equal_and_broadcastable_shapes(self):
small = constant_op.constant([1], name="small")
big = constant_op.constant([3, 1], name="big")
@ -641,7 +641,7 @@ class AssertGreaterEqualTest(test.TestCase):
out = array_ops.identity(small)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_raises_when_less_equal_but_non_broadcastable_shapes(self):
small = constant_op.constant([1, 1, 1], name="big")
big = constant_op.constant([3, 1], name="small")
@ -657,7 +657,7 @@ class AssertGreaterEqualTest(test.TestCase):
out = array_ops.identity(small)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_doesnt_raise_when_both_empty(self):
larry = constant_op.constant([])
curly = constant_op.constant([])
@ -669,14 +669,14 @@ class AssertGreaterEqualTest(test.TestCase):
class AssertNegativeTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_doesnt_raise_when_negative(self):
frank = constant_op.constant([-1, -2], name="frank")
with ops.control_dependencies([check_ops.assert_negative(frank)]):
out = array_ops.identity(frank)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_raises_when_positive(self):
doug = constant_op.constant([1, 2], name="doug")
with self.assertRaisesOpError("fail"):
@ -686,7 +686,7 @@ class AssertNegativeTest(test.TestCase):
out = array_ops.identity(doug)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_raises_when_zero(self):
claire = constant_op.constant([0], name="claire")
with self.assertRaisesOpError("x < 0 did not hold"):
@ -694,7 +694,7 @@ class AssertNegativeTest(test.TestCase):
out = array_ops.identity(claire)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_empty_tensor_doesnt_raise(self):
# A tensor is negative when it satisfies:
# For every element x_i in x, x_i < 0
@ -708,7 +708,7 @@ class AssertNegativeTest(test.TestCase):
class AssertPositiveTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_raises_when_negative(self):
freddie = constant_op.constant([-1, -2], name="freddie")
with self.assertRaisesOpError("fail"):
@ -718,14 +718,14 @@ class AssertPositiveTest(test.TestCase):
out = array_ops.identity(freddie)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_doesnt_raise_when_positive(self):
remmy = constant_op.constant([1, 2], name="remmy")
with ops.control_dependencies([check_ops.assert_positive(remmy)]):
out = array_ops.identity(remmy)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_raises_when_zero(self):
meechum = constant_op.constant([0], name="meechum")
with self.assertRaisesOpError("x > 0 did not hold"):
@ -733,7 +733,7 @@ class AssertPositiveTest(test.TestCase):
out = array_ops.identity(meechum)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_empty_tensor_doesnt_raise(self):
# A tensor is positive when it satisfies:
# For every element x_i in x, x_i > 0
@ -747,7 +747,7 @@ class AssertPositiveTest(test.TestCase):
class AssertRankTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_rank_zero_tensor_raises_if_rank_too_small_static_rank(self):
tensor = constant_op.constant(1, name="my_tensor")
desired_rank = 1
@ -768,7 +768,7 @@ class AssertRankTest(test.TestCase):
with self.assertRaisesOpError("fail.*my_tensor.*rank"):
array_ops.identity(tensor).eval(feed_dict={tensor: 0})
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_rank_zero_tensor_doesnt_raise_if_rank_just_right_static_rank(self):
tensor = constant_op.constant(1, name="my_tensor")
desired_rank = 0
@ -784,7 +784,7 @@ class AssertRankTest(test.TestCase):
[check_ops.assert_rank(tensor, desired_rank)]):
array_ops.identity(tensor).eval(feed_dict={tensor: 0})
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_rank_one_tensor_raises_if_rank_too_large_static_rank(self):
tensor = constant_op.constant([1, 2], name="my_tensor")
desired_rank = 0
@ -802,7 +802,7 @@ class AssertRankTest(test.TestCase):
with self.assertRaisesOpError("my_tensor.*rank"):
array_ops.identity(tensor).eval(feed_dict={tensor: [1, 2]})
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_rank_one_tensor_doesnt_raise_if_rank_just_right_static_rank(self):
tensor = constant_op.constant([1, 2], name="my_tensor")
desired_rank = 1
@ -818,7 +818,7 @@ class AssertRankTest(test.TestCase):
[check_ops.assert_rank(tensor, desired_rank)]):
array_ops.identity(tensor).eval(feed_dict={tensor: [1, 2]})
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_rank_one_tensor_raises_if_rank_too_small_static_rank(self):
tensor = constant_op.constant([1, 2], name="my_tensor")
desired_rank = 2
@ -836,7 +836,7 @@ class AssertRankTest(test.TestCase):
with self.assertRaisesOpError("my_tensor.*rank"):
array_ops.identity(tensor).eval(feed_dict={tensor: [1, 2]})
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_raises_if_rank_is_not_scalar_static(self):
tensor = constant_op.constant([1, 2], name="my_tensor")
with self.assertRaisesRegexp(ValueError, "Rank must be a scalar"):
@ -852,7 +852,7 @@ class AssertRankTest(test.TestCase):
[check_ops.assert_rank(tensor, rank_tensor)]):
array_ops.identity(tensor).eval(feed_dict={rank_tensor: [1, 2]})
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_raises_if_rank_is_not_integer_static(self):
tensor = constant_op.constant([1, 2], name="my_tensor")
with self.assertRaisesRegexp(TypeError,
@ -873,7 +873,7 @@ class AssertRankTest(test.TestCase):
class AssertRankInTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_rank_zero_tensor_raises_if_rank_mismatch_static_rank(self):
tensor_rank0 = constant_op.constant(42, name="my_tensor")
with self.assertRaisesRegexp(
@ -890,7 +890,7 @@ class AssertRankInTest(test.TestCase):
with self.assertRaisesOpError("fail.*my_tensor.*rank"):
array_ops.identity(tensor_rank0).eval(feed_dict={tensor_rank0: 42.0})
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_rank_zero_tensor_doesnt_raise_if_rank_matches_static_rank(self):
tensor_rank0 = constant_op.constant(42, name="my_tensor")
for desired_ranks in ((0, 1, 2), (1, 0, 2), (1, 2, 0)):
@ -906,7 +906,7 @@ class AssertRankInTest(test.TestCase):
check_ops.assert_rank_in(tensor_rank0, desired_ranks)]):
array_ops.identity(tensor_rank0).eval(feed_dict={tensor_rank0: 42.0})
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_rank_one_tensor_doesnt_raise_if_rank_matches_static_rank(self):
tensor_rank1 = constant_op.constant([42, 43], name="my_tensor")
for desired_ranks in ((0, 1, 2), (1, 0, 2), (1, 2, 0)):
@ -924,7 +924,7 @@ class AssertRankInTest(test.TestCase):
tensor_rank1: (42.0, 43.0)
})
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_rank_one_tensor_raises_if_rank_mismatches_static_rank(self):
tensor_rank1 = constant_op.constant((42, 43), name="my_tensor")
with self.assertRaisesRegexp(ValueError, "rank"):
@ -942,7 +942,7 @@ class AssertRankInTest(test.TestCase):
tensor_rank1: (42.0, 43.0)
})
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_raises_if_rank_is_not_scalar_static(self):
tensor = constant_op.constant((42, 43), name="my_tensor")
desired_ranks = (
@ -966,7 +966,7 @@ class AssertRankInTest(test.TestCase):
desired_ranks[1]: [2, 1],
})
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_raises_if_rank_is_not_integer_static(self):
tensor = constant_op.constant((42, 43), name="my_tensor")
with self.assertRaisesRegexp(TypeError,
@ -987,7 +987,7 @@ class AssertRankInTest(test.TestCase):
class AssertRankAtLeastTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_rank_zero_tensor_raises_if_rank_too_small_static_rank(self):
tensor = constant_op.constant(1, name="my_tensor")
desired_rank = 1
@ -1005,7 +1005,7 @@ class AssertRankAtLeastTest(test.TestCase):
with self.assertRaisesOpError("my_tensor.*rank"):
array_ops.identity(tensor).eval(feed_dict={tensor: 0})
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_rank_zero_tensor_doesnt_raise_if_rank_just_right_static_rank(self):
tensor = constant_op.constant(1, name="my_tensor")
desired_rank = 0
@ -1021,7 +1021,7 @@ class AssertRankAtLeastTest(test.TestCase):
[check_ops.assert_rank_at_least(tensor, desired_rank)]):
array_ops.identity(tensor).eval(feed_dict={tensor: 0})
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_rank_one_ten_doesnt_raise_raise_if_rank_too_large_static_rank(self):
tensor = constant_op.constant([1, 2], name="my_tensor")
desired_rank = 0
@ -1037,7 +1037,7 @@ class AssertRankAtLeastTest(test.TestCase):
[check_ops.assert_rank_at_least(tensor, desired_rank)]):
array_ops.identity(tensor).eval(feed_dict={tensor: [1, 2]})
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_rank_one_tensor_doesnt_raise_if_rank_just_right_static_rank(self):
tensor = constant_op.constant([1, 2], name="my_tensor")
desired_rank = 1
@ -1053,7 +1053,7 @@ class AssertRankAtLeastTest(test.TestCase):
[check_ops.assert_rank_at_least(tensor, desired_rank)]):
array_ops.identity(tensor).eval(feed_dict={tensor: [1, 2]})
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_rank_one_tensor_raises_if_rank_too_small_static_rank(self):
tensor = constant_op.constant([1, 2], name="my_tensor")
desired_rank = 2
@ -1074,7 +1074,7 @@ class AssertRankAtLeastTest(test.TestCase):
class AssertNonNegativeTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_raises_when_negative(self):
zoe = constant_op.constant([-1, -2], name="zoe")
with self.assertRaisesOpError("x >= 0 did not hold"):
@ -1082,14 +1082,14 @@ class AssertNonNegativeTest(test.TestCase):
out = array_ops.identity(zoe)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_doesnt_raise_when_zero_and_positive(self):
lucas = constant_op.constant([0, 2], name="lucas")
with ops.control_dependencies([check_ops.assert_non_negative(lucas)]):
out = array_ops.identity(lucas)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_empty_tensor_doesnt_raise(self):
# A tensor is non-negative when it satisfies:
# For every element x_i in x, x_i >= 0
@ -1103,14 +1103,14 @@ class AssertNonNegativeTest(test.TestCase):
class AssertNonPositiveTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_doesnt_raise_when_zero_and_negative(self):
tom = constant_op.constant([0, -2], name="tom")
with ops.control_dependencies([check_ops.assert_non_positive(tom)]):
out = array_ops.identity(tom)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_raises_when_positive(self):
rachel = constant_op.constant([0, 2], name="rachel")
with self.assertRaisesOpError("x <= 0 did not hold"):
@ -1118,7 +1118,7 @@ class AssertNonPositiveTest(test.TestCase):
out = array_ops.identity(rachel)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_empty_tensor_doesnt_raise(self):
# A tensor is non-positive when it satisfies:
# For every element x_i in x, x_i <= 0
@ -1132,14 +1132,14 @@ class AssertNonPositiveTest(test.TestCase):
class AssertIntegerTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_doesnt_raise_when_integer(self):
integers = constant_op.constant([1, 2], name="integers")
with ops.control_dependencies([check_ops.assert_integer(integers)]):
out = array_ops.identity(integers)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_raises_when_float(self):
floats = constant_op.constant([1.0, 2.0], name="floats")
with self.assertRaisesRegexp(TypeError, "Expected.*integer"):
@ -1148,7 +1148,7 @@ class AssertIntegerTest(test.TestCase):
class AssertTypeTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_doesnt_raise_when_correct_type(self):
integers = constant_op.constant([1, 2], dtype=dtypes.int64)
with ops.control_dependencies([
@ -1156,7 +1156,7 @@ class AssertTypeTest(test.TestCase):
out = array_ops.identity(integers)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_raises_when_wrong_type(self):
floats = constant_op.constant([1.0, 2.0], dtype=dtypes.float16)
with self.assertRaisesRegexp(TypeError, "must be of type.*float32"):
@ -1165,74 +1165,74 @@ class AssertTypeTest(test.TestCase):
class IsStrictlyIncreasingTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_constant_tensor_is_not_strictly_increasing(self):
self.assertFalse(self.evaluate(check_ops.is_strictly_increasing([1, 1, 1])))
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_decreasing_tensor_is_not_strictly_increasing(self):
self.assertFalse(self.evaluate(
check_ops.is_strictly_increasing([1, 0, -1])))
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_2d_decreasing_tensor_is_not_strictly_increasing(self):
self.assertFalse(
self.evaluate(check_ops.is_strictly_increasing([[1, 3], [2, 4]])))
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_increasing_tensor_is_increasing(self):
self.assertTrue(self.evaluate(check_ops.is_strictly_increasing([1, 2, 3])))
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_increasing_rank_two_tensor(self):
self.assertTrue(
self.evaluate(check_ops.is_strictly_increasing([[-1, 2], [3, 4]])))
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_tensor_with_one_element_is_strictly_increasing(self):
self.assertTrue(self.evaluate(check_ops.is_strictly_increasing([1])))
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_empty_tensor_is_strictly_increasing(self):
self.assertTrue(self.evaluate(check_ops.is_strictly_increasing([])))
class IsNonDecreasingTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_constant_tensor_is_non_decreasing(self):
self.assertTrue(self.evaluate(check_ops.is_non_decreasing([1, 1, 1])))
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_decreasing_tensor_is_not_non_decreasing(self):
self.assertFalse(self.evaluate(check_ops.is_non_decreasing([3, 2, 1])))
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_2d_decreasing_tensor_is_not_non_decreasing(self):
self.assertFalse(self.evaluate(
check_ops.is_non_decreasing([[1, 3], [2, 4]])))
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_increasing_rank_one_tensor_is_non_decreasing(self):
self.assertTrue(self.evaluate(check_ops.is_non_decreasing([1, 2, 3])))
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_increasing_rank_two_tensor(self):
self.assertTrue(self.evaluate(
check_ops.is_non_decreasing([[-1, 2], [3, 3]])))
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_tensor_with_one_element_is_non_decreasing(self):
self.assertTrue(self.evaluate(check_ops.is_non_decreasing([1])))
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_empty_tensor_is_non_decreasing(self):
self.assertTrue(self.evaluate(check_ops.is_non_decreasing([])))
class FloatDTypeTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_assert_same_float_dtype(self):
self.assertIs(dtypes.float32,
check_ops.assert_same_float_dtype(None, None))
@ -1286,7 +1286,7 @@ class FloatDTypeTest(test.TestCase):
class AssertScalarTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_assert_scalar(self):
check_ops.assert_scalar(constant_op.constant(3))
check_ops.assert_scalar(constant_op.constant("foo"))

View File

@ -34,7 +34,7 @@ from tensorflow.python.platform import test
class ConfusionMatrixTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testExample(self):
"""This is a test of the example provided in pydoc."""
with self.test_session():

View File

@ -345,7 +345,7 @@ class Conv2DTest(test.TestCase):
self.assertAllClose(expected, np.ravel(value), atol=tol, rtol=tol)
self.assertShapeEqual(value, conv)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testConv2D1x1Filter(self):
expected_output = [
30.0, 36.0, 42.0, 66.0, 81.0, 96.0, 102.0, 126.0, 150.0, 138.0, 171.0,
@ -358,7 +358,7 @@ class Conv2DTest(test.TestCase):
padding="VALID",
expected=expected_output)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testConv2D2x2Filter2x1Dilation(self):
self._VerifyDilatedConvValues(
tensor_in_sizes=[1, 4, 4, 1],
@ -367,7 +367,7 @@ class Conv2DTest(test.TestCase):
dilations=[2, 1],
padding="VALID")
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testConv2DEmpty(self):
expected_output = []
self._VerifyValues(
@ -377,7 +377,7 @@ class Conv2DTest(test.TestCase):
padding="VALID",
expected=expected_output)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testConv2DEmptyDilation(self):
self._VerifyDilatedConvValues(
tensor_in_sizes=[0, 2, 3, 3],
@ -386,7 +386,7 @@ class Conv2DTest(test.TestCase):
dilations=[2, 1],
padding="VALID")
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testConv2D2x2Filter(self):
# The outputs are computed using third_party/py/IPython/notebook.
expected_output = [2271.0, 2367.0, 2463.0, 2901.0, 3033.0, 3165.0]
@ -397,7 +397,7 @@ class Conv2DTest(test.TestCase):
padding="VALID",
expected=expected_output)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testConv2D2x2FilterDilation(self):
self._VerifyDilatedConvValues(
tensor_in_sizes=[1, 2, 3, 3],
@ -406,7 +406,7 @@ class Conv2DTest(test.TestCase):
dilations=[1, 2],
padding="VALID")
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testConv2D1x2Filter(self):
# The outputs are computed using third_party/py/IPython/notebook.
expected_output = [
@ -420,7 +420,7 @@ class Conv2DTest(test.TestCase):
padding="VALID",
expected=expected_output)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testConv2D1x2FilterDilation(self):
self._VerifyDilatedConvValues(
tensor_in_sizes=[1, 2, 3, 3],
@ -429,7 +429,7 @@ class Conv2DTest(test.TestCase):
dilations=[2, 1],
padding="VALID")
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testConv2D2x2FilterStride2(self):
expected_output = [2271.0, 2367.0, 2463.0]
self._VerifyValues(
@ -439,7 +439,7 @@ class Conv2DTest(test.TestCase):
padding="VALID",
expected=expected_output)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testConv2D2x2FilterStride2Same(self):
expected_output = [2271.0, 2367.0, 2463.0, 1230.0, 1305.0, 1380.0]
self._VerifyValues(
@ -449,7 +449,7 @@ class Conv2DTest(test.TestCase):
padding="SAME",
expected=expected_output)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testConv2D2x2FilterStride1x2(self):
expected_output = [58.0, 78.0, 98.0, 118.0, 138.0, 158.0]
self._VerifyValues(
@ -459,7 +459,7 @@ class Conv2DTest(test.TestCase):
padding="VALID",
expected=expected_output)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testConv2DKernelSmallerThanStrideValid(self):
expected_output = [65, 95, 275, 305]
self._VerifyValues(
@ -469,7 +469,7 @@ class Conv2DTest(test.TestCase):
padding="VALID",
expected=expected_output)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testConv2DKernelSmallerThanStrideSame(self):
self._VerifyValues(
tensor_in_sizes=[1, 3, 3, 1],
@ -492,7 +492,7 @@ class Conv2DTest(test.TestCase):
padding="SAME",
expected=[44, 28, 41, 16])
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testConv2DKernelSizeMatchesInputSize(self):
self._VerifyValues(
tensor_in_sizes=[1, 2, 2, 1],
@ -501,7 +501,7 @@ class Conv2DTest(test.TestCase):
padding="VALID",
expected=[50, 60])
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testConv2DKernelSizeMatchesInputSizeDilation(self):
self._VerifyDilatedConvValues(
tensor_in_sizes=[1, 3, 3, 1],
@ -589,7 +589,7 @@ class Conv2DTest(test.TestCase):
for i in range(1, len(values)):
self.assertAllClose(values[0], values[i], rtol=1e-2, atol=1e-2)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testConv2D2x2Depth1ValidBackpropInput(self):
expected_output = [1.0, 4.0, 4.0, 3.0, 10.0, 8.0]
for (data_format, use_gpu) in GetTestConfigs():
@ -604,7 +604,7 @@ class Conv2DTest(test.TestCase):
use_gpu=use_gpu,
err=1e-5)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testConv2DEmptyBackpropInput(self):
expected_output = []
for (data_format, use_gpu) in GetTestConfigs():
@ -619,7 +619,7 @@ class Conv2DTest(test.TestCase):
use_gpu=use_gpu,
err=1e-5)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testConv2D2x2Depth3ValidBackpropInput(self):
expected_output = [
14.0, 32.0, 50.0, 100.0, 163.0, 226.0, 167.0, 212.0, 257.0, 122.0,
@ -639,7 +639,7 @@ class Conv2DTest(test.TestCase):
use_gpu=use_gpu,
err=1e-4)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testConv2D2x2Depth3ValidBackpropInputStride1x2(self):
expected_output = [
1.0, 2.0, 2.0, 4.0, 3.0, 6.0, 7.0, 12.0, 11.0, 18.0, 15.0, 24.0, 12.0,
@ -657,7 +657,7 @@ class Conv2DTest(test.TestCase):
use_gpu=use_gpu,
err=1e-5)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testConv2DStrideTwoFilterOneSameBackpropInput(self):
expected_output = [
1.0, 0.0, 2.0, 0.0, 0.0, 0.0, 0.0, 0.0, 3.0, 0.0, 4.0, 0.0, 0.0, 0.0,
@ -675,7 +675,7 @@ class Conv2DTest(test.TestCase):
use_gpu=use_gpu,
err=1e-5)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testConv2DKernelSizeMatchesInputSizeBackpropInput(self):
expected_output = [5.0, 11.0, 17.0, 23.0]
for (data_format, use_gpu) in GetTestConfigs():
@ -759,7 +759,7 @@ class Conv2DTest(test.TestCase):
for i in range(1, len(values)):
self.assertAllClose(values[0], values[i], rtol=1e-4, atol=1e-4)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testConv2D2x2Depth1ValidBackpropFilter(self):
expected = [5.0, 8.0, 14.0, 17.0]
for (data_format, use_gpu) in GetTestConfigs():
@ -773,7 +773,7 @@ class Conv2DTest(test.TestCase):
data_format=data_format,
use_gpu=use_gpu)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testConv2DEmptyBackpropFilter(self):
expected = []
for (data_format, use_gpu) in GetTestConfigs():
@ -787,7 +787,7 @@ class Conv2DTest(test.TestCase):
data_format=data_format,
use_gpu=use_gpu)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testConv2DBackpropFilterWithEmptyInput(self):
expected = [0, 0, 0, 0]
for (data_format, use_gpu) in GetTestConfigs():
@ -801,7 +801,7 @@ class Conv2DTest(test.TestCase):
data_format=data_format,
use_gpu=use_gpu)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testConv2D2x2Depth3ValidBackpropFilter(self):
expected = [
17.0, 22.0, 27.0, 22.0, 29.0, 36.0, 27.0, 36.0, 45.0, 32.0, 43.0, 54.0,
@ -820,7 +820,7 @@ class Conv2DTest(test.TestCase):
data_format=data_format,
use_gpu=use_gpu)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testConv2D2x2Depth3ValidBackpropFilterStride1x2(self):
expected = [161.0, 182.0, 287.0, 308.0]
for (data_format, use_gpu) in GetTestConfigs():
@ -834,7 +834,7 @@ class Conv2DTest(test.TestCase):
data_format=data_format,
use_gpu=use_gpu)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testConv2DStrideTwoFilterOneSameBackpropFilter(self):
expected_output = [78.]
for (data_format, use_gpu) in GetTestConfigs():
@ -848,7 +848,7 @@ class Conv2DTest(test.TestCase):
data_format=data_format,
use_gpu=use_gpu)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testConv2DKernelSizeMatchesInputSizeBackpropFilter(self):
expected_output = [1.0, 2.0, 2.0, 4.0, 3.0, 6.0, 4.0, 8.0]
for (data_format, use_gpu) in GetTestConfigs():
@ -1897,19 +1897,19 @@ if __name__ == "__main__":
for index, (input_size_, filter_size_, output_size_, stride_,
padding_) in enumerate(GetShrunkInceptionShapes()):
setattr(Conv2DTest, "testInceptionFwd_" + str(index),
test_util.run_in_graph_and_eager_modes()(
test_util.run_in_graph_and_eager_modes(
GetInceptionFwdTest(input_size_, filter_size_, stride_,
padding_)))
setattr(
Conv2DTest, "testInceptionFwdDilatedConv_" + str(index),
test_util.run_in_graph_and_eager_modes()(GetInceptionFwdDilatedConvTest(
test_util.run_in_graph_and_eager_modes(GetInceptionFwdDilatedConvTest(
input_size_, filter_size_, stride_, padding_)))
setattr(Conv2DTest, "testInceptionBackInput_" + str(index),
test_util.run_in_graph_and_eager_modes()(
test_util.run_in_graph_and_eager_modes(
GetInceptionBackInputTest(input_size_, filter_size_,
output_size_, stride_, padding_)))
setattr(Conv2DTest, "testInceptionBackFilter_" + str(index),
test_util.run_in_graph_and_eager_modes()(
test_util.run_in_graph_and_eager_modes(
GetInceptionBackFilterTest(input_size_, filter_size_,
output_size_, [stride_, stride_],
padding_)))
@ -1924,17 +1924,17 @@ if __name__ == "__main__":
fshape = [1, 1, 1, 256]
oshape = [1, 400, 400, 256]
setattr(Conv2DTest, "testInceptionFwd_No_Winograd_Nonfused",
test_util.run_in_graph_and_eager_modes()(
test_util.run_in_graph_and_eager_modes(
GetInceptionFwdTest(ishape, fshape, 1, "SAME", gpu_only=True)))
setattr(Conv2DTest, "testInceptionFwdDilatedConv_No_Winograd_Nonfused",
test_util.run_in_graph_and_eager_modes()(
test_util.run_in_graph_and_eager_modes(
GetInceptionFwdDilatedConvTest(ishape, fshape, 1, "SAME")))
setattr(Conv2DTest, "testInceptionBackInput_No_Winograd_Nonfused",
test_util.run_in_graph_and_eager_modes()(
test_util.run_in_graph_and_eager_modes(
GetInceptionBackInputTest(ishape, fshape, oshape, 1, "SAME",
gpu_only=True)))
setattr(Conv2DTest, "testInceptionBackFilter_No_Winograd_Nonfused",
test_util.run_in_graph_and_eager_modes()(
test_util.run_in_graph_and_eager_modes(
GetInceptionBackFilterTest(ishape, fshape, oshape, [1, 1], "SAME",
gpu_only=True)))
test.main()

View File

@ -58,14 +58,14 @@ def entropy(p):
class BernoulliTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testP(self):
p = [0.2, 0.4]
dist = bernoulli.Bernoulli(probs=p)
with self.test_session():
self.assertAllClose(p, self.evaluate(dist.probs))
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testLogits(self):
logits = [-42., 42.]
dist = bernoulli.Bernoulli(logits=logits)
@ -83,7 +83,7 @@ class BernoulliTest(test.TestCase):
with self.test_session():
self.assertAllClose(special.logit(p), self.evaluate(dist.logits))
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testInvalidP(self):
invalid_ps = [1.01, 2.]
for p in invalid_ps:
@ -105,7 +105,7 @@ class BernoulliTest(test.TestCase):
dist = bernoulli.Bernoulli(probs=p)
self.assertEqual(p, self.evaluate(dist.probs)) # Should not fail
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testShapes(self):
with self.test_session():
for batch_shape in ([], [1], [2, 3, 4]):
@ -116,7 +116,7 @@ class BernoulliTest(test.TestCase):
self.assertAllEqual([], dist.event_shape.as_list())
self.assertAllEqual([], self.evaluate(dist.event_shape_tensor()))
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testDtype(self):
dist = make_bernoulli([])
self.assertEqual(dist.dtype, dtypes.int32)
@ -134,7 +134,7 @@ class BernoulliTest(test.TestCase):
self.assertEqual(dist64.dtype, dist64.sample(5).dtype)
self.assertEqual(dist64.dtype, dist64.mode().dtype)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def _testPmf(self, **kwargs):
dist = bernoulli.Bernoulli(**kwargs)
with self.test_session():
@ -175,7 +175,7 @@ class BernoulliTest(test.TestCase):
p: [0.2, 0.3, 0.4]
}), [[0.2, 0.7, 0.4]])
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testPmfInvalid(self):
p = [0.1, 0.2, 0.7]
with self.test_session():
@ -185,7 +185,7 @@ class BernoulliTest(test.TestCase):
with self.assertRaisesOpError("Elements cannot exceed 1."):
self.evaluate(dist.prob([2, 0, 1]))
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testPmfWithP(self):
p = [[0.2, 0.4], [0.3, 0.6]]
self._testPmf(probs=p)
@ -227,21 +227,21 @@ class BernoulliTest(test.TestCase):
dist = bernoulli.Bernoulli(probs=[[0.5], [0.5]])
self.assertEqual((2, 1), dist.log_prob(1).get_shape())
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testBoundaryConditions(self):
with self.test_session():
dist = bernoulli.Bernoulli(probs=1.0)
self.assertAllClose(np.nan, self.evaluate(dist.log_prob(0)))
self.assertAllClose([np.nan], [self.evaluate(dist.log_prob(1))])
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testEntropyNoBatch(self):
p = 0.2
dist = bernoulli.Bernoulli(probs=p)
with self.test_session():
self.assertAllClose(self.evaluate(dist.entropy()), entropy(p))
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testEntropyWithBatch(self):
p = [[0.1, 0.7], [0.2, 0.6]]
dist = bernoulli.Bernoulli(probs=p, validate_args=False)
@ -251,7 +251,7 @@ class BernoulliTest(test.TestCase):
[[entropy(0.1), entropy(0.7)], [entropy(0.2),
entropy(0.6)]])
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testSampleN(self):
with self.test_session():
p = [0.2, 0.6]
@ -273,7 +273,7 @@ class BernoulliTest(test.TestCase):
dist = bernoulli.Bernoulli(np.log([.2, .4]))
self.assertAllEqual((1, 2), dist.sample(1, seed=42).get_shape().as_list())
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testNotReparameterized(self):
p = constant_op.constant([0.2, 0.6])
with backprop.GradientTape() as tape:
@ -297,14 +297,14 @@ class BernoulliTest(test.TestCase):
feed_dict={n: 1000})
self.assertAllEqual(sample, sample)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testMean(self):
with self.test_session():
p = np.array([[0.2, 0.7], [0.5, 0.4]], dtype=np.float32)
dist = bernoulli.Bernoulli(probs=p)
self.assertAllEqual(self.evaluate(dist.mean()), p)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testVarianceAndStd(self):
var = lambda p: p * (1. - p)
with self.test_session():
@ -321,7 +321,7 @@ class BernoulliTest(test.TestCase):
[np.sqrt(var(0.5)), np.sqrt(var(0.4))]],
dtype=np.float32))
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testBernoulliBernoulliKL(self):
batch_size = 6
a_p = np.array([0.5] * batch_size, dtype=np.float32)

View File

@ -78,20 +78,20 @@ class NormalTest(test.TestCase):
self.assertEqual(expected, mu_shape)
self.assertEqual(expected, sigma_shape)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testParamShapes(self):
sample_shape = [10, 3, 4]
self._testParamShapes(sample_shape, sample_shape)
self._testParamShapes(constant_op.constant(sample_shape), sample_shape)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testParamStaticShapes(self):
sample_shape = [10, 3, 4]
self._testParamStaticShapes(sample_shape, sample_shape)
self._testParamStaticShapes(
tensor_shape.TensorShape(sample_shape), sample_shape)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testNormalWithSoftplusScale(self):
with self.test_session():
mu = array_ops.zeros((10, 3))
@ -101,7 +101,7 @@ class NormalTest(test.TestCase):
self.assertAllEqual(
self.evaluate(nn_ops.softplus(rho)), self.evaluate(normal.scale))
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testNormalLogPDF(self):
with self.test_session():
batch_size = 6
@ -135,7 +135,7 @@ class NormalTest(test.TestCase):
self.assertAllClose(expected_log_pdf, self.evaluate(log_pdf))
self.assertAllClose(np.exp(expected_log_pdf), self.evaluate(pdf))
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testNormalLogPDFMultidimensional(self):
with self.test_session():
batch_size = 6
@ -173,7 +173,7 @@ class NormalTest(test.TestCase):
self.assertAllClose(expected_log_pdf, log_pdf_values)
self.assertAllClose(np.exp(expected_log_pdf), pdf_values)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testNormalCDF(self):
with self.test_session():
batch_size = 50
@ -195,7 +195,7 @@ class NormalTest(test.TestCase):
expected_cdf = stats.norm(mu, sigma).cdf(x)
self.assertAllClose(expected_cdf, self.evaluate(cdf), atol=0)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testNormalSurvivalFunction(self):
with self.test_session():
batch_size = 50
@ -218,7 +218,7 @@ class NormalTest(test.TestCase):
expected_sf = stats.norm(mu, sigma).sf(x)
self.assertAllClose(expected_sf, self.evaluate(sf), atol=0)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testNormalLogCDF(self):
with self.test_session():
batch_size = 50
@ -262,7 +262,7 @@ class NormalTest(test.TestCase):
self.assertAllFinite(grads[0])
self.assertAllFinite(grads[1])
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testNormalLogSurvivalFunction(self):
with self.test_session():
batch_size = 50
@ -286,7 +286,7 @@ class NormalTest(test.TestCase):
expected_sf = stats.norm(mu, sigma).logsf(x)
self.assertAllClose(expected_sf, self.evaluate(sf), atol=0, rtol=1e-5)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testNormalEntropyWithScalarInputs(self):
# Scipy.stats.norm cannot deal with the shapes in the other test.
with self.test_session():
@ -308,7 +308,7 @@ class NormalTest(test.TestCase):
expected_entropy = stats.norm(mu_v, sigma_v).entropy()
self.assertAllClose(expected_entropy, self.evaluate(entropy))
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testNormalEntropy(self):
with self.test_session():
mu_v = np.array([1.0, 1.0, 1.0])
@ -329,7 +329,7 @@ class NormalTest(test.TestCase):
self.assertAllEqual(normal.batch_shape, entropy.get_shape())
self.assertAllEqual(normal.batch_shape, self.evaluate(entropy).shape)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testNormalMeanAndMode(self):
with self.test_session():
# Mu will be broadcast to [7, 7, 7].
@ -344,7 +344,7 @@ class NormalTest(test.TestCase):
self.assertAllEqual((3,), normal.mode().get_shape())
self.assertAllEqual([7., 7, 7], self.evaluate(normal.mode()))
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testNormalQuantile(self):
with self.test_session():
batch_size = 52
@ -396,7 +396,7 @@ class NormalTest(test.TestCase):
def testQuantileFiniteGradientAtDifficultPointsFloat64(self):
self._baseQuantileFiniteGradientAtDifficultPoints(np.float64)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testNormalVariance(self):
with self.test_session():
# sigma will be broadcast to [7, 7, 7]
@ -408,7 +408,7 @@ class NormalTest(test.TestCase):
self.assertAllEqual((3,), normal.variance().get_shape())
self.assertAllEqual([49., 49, 49], self.evaluate(normal.variance()))
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testNormalStandardDeviation(self):
with self.test_session():
# sigma will be broadcast to [7, 7, 7]
@ -420,7 +420,7 @@ class NormalTest(test.TestCase):
self.assertAllEqual((3,), normal.stddev().get_shape())
self.assertAllEqual([7., 7, 7], self.evaluate(normal.stddev()))
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testNormalSample(self):
with self.test_session():
mu = constant_op.constant(3.0)
@ -466,7 +466,7 @@ class NormalTest(test.TestCase):
self.assertIsNotNone(grad_mu)
self.assertIsNotNone(grad_sigma)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testNormalSampleMultiDimensional(self):
with self.test_session():
batch_size = 2
@ -502,7 +502,7 @@ class NormalTest(test.TestCase):
self.assertAllEqual(expected_samples_shape, samples.get_shape())
self.assertAllEqual(expected_samples_shape, sample_values.shape)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testNegativeSigmaFails(self):
with self.test_session():
with self.assertRaisesOpError("Condition x > 0 did not hold"):
@ -510,7 +510,7 @@ class NormalTest(test.TestCase):
loc=[1.], scale=[-5.], validate_args=True, name="G")
self.evaluate(normal.mean())
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testNormalShape(self):
with self.test_session():
mu = constant_op.constant([-3.0] * 5)
@ -537,7 +537,7 @@ class NormalTest(test.TestCase):
feed_dict={mu: 5.0,
sigma: [1.0, 2.0]}), [2])
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testNormalNormalKL(self):
batch_size = 6
mu_a = np.array([3.0] * batch_size)

View File

@ -89,7 +89,7 @@ class NdtriTest(test.TestCase):
all_true = np.ones_like(is_finite, dtype=np.bool)
self.assertAllEqual(all_true, is_finite)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testNdtri(self):
"""Verifies that ndtri computation is correct."""
with self.test_session():
@ -138,11 +138,11 @@ class NdtriTest(test.TestCase):
lambda x: special_math.ndtri(x), p) # pylint: disable=unnecessary-lambda
self.assertAllFinite(self.evaluate(grads[0]))
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testNdtriFiniteGradientFloat32(self):
self._baseNdtriFiniteGradientTest(np.float32)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testNdtriFiniteGradientFloat64(self):
self._baseNdtriFiniteGradientTest(np.float64)

View File

@ -48,7 +48,7 @@ stats = try_import("scipy.stats")
class UniformTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testUniformRange(self):
with self.test_session():
a = 3.0
@ -58,7 +58,7 @@ class UniformTest(test.TestCase):
self.assertAllClose(b, self.evaluate(uniform.high))
self.assertAllClose(b - a, self.evaluate(uniform.range()))
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testUniformPDF(self):
with self.test_session():
a = constant_op.constant([-3.0] * 5 + [15.0])
@ -84,7 +84,7 @@ class UniformTest(test.TestCase):
log_pdf = uniform.log_prob(x)
self.assertAllClose(np.log(expected_pdf), self.evaluate(log_pdf))
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testUniformShape(self):
with self.test_session():
a = constant_op.constant([-3.0] * 5)
@ -96,7 +96,7 @@ class UniformTest(test.TestCase):
self.assertAllEqual(self.evaluate(uniform.event_shape_tensor()), [])
self.assertEqual(uniform.event_shape, tensor_shape.TensorShape([]))
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testUniformPDFWithScalarEndpoint(self):
with self.test_session():
a = constant_op.constant([0.0, 5.0])
@ -109,7 +109,7 @@ class UniformTest(test.TestCase):
pdf = uniform.prob(x)
self.assertAllClose(expected_pdf, self.evaluate(pdf))
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testUniformCDF(self):
with self.test_session():
batch_size = 6
@ -133,7 +133,7 @@ class UniformTest(test.TestCase):
log_cdf = uniform.log_cdf(x)
self.assertAllClose(np.log(_expected_cdf()), self.evaluate(log_cdf))
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testUniformEntropy(self):
with self.test_session():
a_v = np.array([1.0, 1.0, 1.0])
@ -143,7 +143,7 @@ class UniformTest(test.TestCase):
expected_entropy = np.log(b_v - a_v)
self.assertAllClose(expected_entropy, self.evaluate(uniform.entropy()))
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testUniformAssertMaxGtMin(self):
with self.test_session():
a_v = np.array([1.0, 1.0, 1.0], dtype=np.float32)
@ -154,7 +154,7 @@ class UniformTest(test.TestCase):
uniform = uniform_lib.Uniform(low=a_v, high=b_v, validate_args=True)
self.evaluate(uniform.low)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testUniformSample(self):
with self.test_session():
a = constant_op.constant([3.0, 4.0])
@ -177,7 +177,7 @@ class UniformTest(test.TestCase):
self.assertFalse(
np.any(sample_values[::, 1] < a2_v) or np.any(sample_values >= b_v))
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def _testUniformSampleMultiDimensional(self):
# DISABLED: Please enable this test once b/issues/30149644 is resolved.
with self.test_session():
@ -208,7 +208,7 @@ class UniformTest(test.TestCase):
self.assertAllClose(
sample_values[:, 0, 1].mean(), (a_v[1] + b_v[1]) / 2, atol=1e-2)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testUniformMean(self):
with self.test_session():
a = 10.0
@ -219,7 +219,7 @@ class UniformTest(test.TestCase):
s_uniform = stats.uniform(loc=a, scale=b - a)
self.assertAllClose(self.evaluate(uniform.mean()), s_uniform.mean())
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testUniformVariance(self):
with self.test_session():
a = 10.0
@ -230,7 +230,7 @@ class UniformTest(test.TestCase):
s_uniform = stats.uniform(loc=a, scale=b - a)
self.assertAllClose(self.evaluate(uniform.variance()), s_uniform.var())
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testUniformStd(self):
with self.test_session():
a = 10.0
@ -241,7 +241,7 @@ class UniformTest(test.TestCase):
s_uniform = stats.uniform(loc=a, scale=b - a)
self.assertAllClose(self.evaluate(uniform.stddev()), s_uniform.std())
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testUniformNans(self):
with self.test_session():
a = 10.0
@ -259,7 +259,7 @@ class UniformTest(test.TestCase):
self.assertFalse(is_nan[0])
self.assertTrue(is_nan[1])
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testUniformSamplePdf(self):
with self.test_session():
a = 10.0
@ -269,7 +269,7 @@ class UniformTest(test.TestCase):
self.evaluate(
math_ops.reduce_all(uniform.prob(uniform.sample(10)) > 0)))
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testUniformBroadcasting(self):
with self.test_session():
a = 10.0
@ -280,7 +280,7 @@ class UniformTest(test.TestCase):
expected_pdf = np.array([[1.0, 0.1], [0.0, 0.1], [1.0, 0.0]])
self.assertAllClose(expected_pdf, self.evaluate(pdf))
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testUniformSampleWithShape(self):
with self.test_session():
a = 10.0

View File

@ -91,21 +91,21 @@ class AssertCloseTest(test.TestCase):
class MaybeGetStaticTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testGetStaticInt(self):
x = 2
self.assertEqual(x, du.maybe_get_static_value(x))
self.assertAllClose(
np.array(2.), du.maybe_get_static_value(x, dtype=np.float64))
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testGetStaticNumpyArray(self):
x = np.array(2, dtype=np.int32)
self.assertEqual(x, du.maybe_get_static_value(x))
self.assertAllClose(
np.array(2.), du.maybe_get_static_value(x, dtype=np.float64))
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testGetStaticConstant(self):
x = constant_op.constant(2, dtype=dtypes.int32)
self.assertEqual(np.array(2, dtype=np.int32), du.maybe_get_static_value(x))
@ -120,7 +120,7 @@ class MaybeGetStaticTest(test.TestCase):
class GetLogitsAndProbsTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testImproperArguments(self):
with self.test_session():
with self.assertRaises(ValueError):
@ -129,7 +129,7 @@ class GetLogitsAndProbsTest(test.TestCase):
with self.assertRaises(ValueError):
du.get_logits_and_probs(logits=[0.1], probs=[0.1])
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testLogits(self):
p = np.array([0.01, 0.2, 0.5, 0.7, .99], dtype=np.float32)
logits = _logit(p)
@ -141,7 +141,7 @@ class GetLogitsAndProbsTest(test.TestCase):
self.assertAllClose(p, self.evaluate(new_p), rtol=1e-5, atol=0.)
self.assertAllClose(logits, self.evaluate(new_logits), rtol=1e-5, atol=0.)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testLogitsMultidimensional(self):
p = np.array([0.2, 0.3, 0.5], dtype=np.float32)
logits = np.log(p)
@ -153,7 +153,7 @@ class GetLogitsAndProbsTest(test.TestCase):
self.assertAllClose(self.evaluate(new_p), p)
self.assertAllClose(self.evaluate(new_logits), logits)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testProbability(self):
p = np.array([0.01, 0.2, 0.5, 0.7, .99], dtype=np.float32)
@ -164,7 +164,7 @@ class GetLogitsAndProbsTest(test.TestCase):
self.assertAllClose(_logit(p), self.evaluate(new_logits))
self.assertAllClose(p, self.evaluate(new_p))
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testProbabilityMultidimensional(self):
p = np.array([[0.3, 0.4, 0.3], [0.1, 0.5, 0.4]], dtype=np.float32)
@ -175,7 +175,7 @@ class GetLogitsAndProbsTest(test.TestCase):
self.assertAllClose(np.log(p), self.evaluate(new_logits))
self.assertAllClose(p, self.evaluate(new_p))
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testProbabilityValidateArgs(self):
p = [0.01, 0.2, 0.5, 0.7, .99]
# Component less than 0.
@ -206,7 +206,7 @@ class GetLogitsAndProbsTest(test.TestCase):
probs=p3, validate_args=False)
self.evaluate(prob)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testProbabilityValidateArgsMultidimensional(self):
p = np.array([[0.3, 0.4, 0.3], [0.1, 0.5, 0.4]], dtype=np.float32)
# Component less than 0. Still sums to 1.
@ -308,7 +308,7 @@ class EmbedCheckCategoricalEventShapeTest(test.TestCase):
param)
checked_param.eval(feed_dict={param: np.ones([int(2**11+1)])})
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testUnsupportedDtype(self):
with self.test_session():
with self.assertRaises(TypeError):
@ -493,7 +493,7 @@ class RotateTransposeTest(test.TestCase):
x = np.array(x)
return np.transpose(x, np.roll(np.arange(len(x.shape)), shift))
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testRollStatic(self):
with self.test_session():
if context.executing_eagerly():

View File

@ -126,14 +126,14 @@ class FIFOQueueTest(test.TestCase):
q.enqueue_many([[1, 2, 3, 4], [[1, 1], [2, 2], [3, 3], [4, 4]]]).run()
self.assertEqual(4, q.size().eval())
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testMultipleDequeues(self):
q = data_flow_ops.FIFOQueue(10, [dtypes_lib.int32], shapes=[()])
self.evaluate(q.enqueue_many([[1, 2, 3]]))
a, b, c = self.evaluate([q.dequeue(), q.dequeue(), q.dequeue()])
self.assertAllEqual(set([1, 2, 3]), set([a, b, c]))
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testQueuesDontShare(self):
q = data_flow_ops.FIFOQueue(10, [dtypes_lib.int32], shapes=[()])
self.evaluate(q.enqueue(1))

View File

@ -56,7 +56,7 @@ def simple_scoped_fn(a, x):
class FunctionalOpsTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testFoldl_Simple(self):
with self.test_session():
elems = constant_op.constant([1, 2, 3, 4, 5, 6], name="data")
@ -72,7 +72,7 @@ class FunctionalOpsTest(test.TestCase):
initializer=10)
self.assertAllEqual(880, self.evaluate(r))
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testFoldl_SingleInputMultiOutput(self):
with self.test_session():
elems = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0])
@ -83,7 +83,7 @@ class FunctionalOpsTest(test.TestCase):
self.assertAllEqual(22, r_value[0])
self.assertAllEqual(20, r_value[1])
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testFoldl_MultiInputSingleOutput(self):
with self.test_session():
elems = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0])
@ -111,7 +111,7 @@ class FunctionalOpsTest(test.TestCase):
self.assertEqual(len(variables.trainable_variables()), 1)
self.assertAllEqual(880, self.evaluate(r))
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testFoldr_Simple(self):
with self.test_session():
elems = constant_op.constant([1, 2, 3, 4, 5, 6], name="data")
@ -127,7 +127,7 @@ class FunctionalOpsTest(test.TestCase):
initializer=10)
self.assertAllEqual(1282, self.evaluate(r))
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testFoldr_SingleInputMultiOutput(self):
with self.test_session():
elems = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0])
@ -138,7 +138,7 @@ class FunctionalOpsTest(test.TestCase):
self.assertAllEqual(22, r_value[0])
self.assertAllEqual(20, r_value[1])
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testFoldr_MultiInputSingleOutput(self):
with self.test_session():
elems = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0])
@ -182,7 +182,7 @@ class FunctionalOpsTest(test.TestCase):
self.assertAllEqual(720.0, self.evaluate(r))
# pylint: enable=unnecessary-lambda
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testMap_Simple(self):
with self.test_session():
nums = [1, 2, 3, 4, 5, 6]
@ -202,7 +202,7 @@ class FunctionalOpsTest(test.TestCase):
values=constant_op.constant([0, 1, 2]),
dense_shape=[2, 2]))
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testMapOverScalarErrors(self):
with self.assertRaisesRegexp(ValueError, "not scalars"):
functional_ops.map_fn(lambda x: x, [1, 2])
@ -251,7 +251,7 @@ class FunctionalOpsTest(test.TestCase):
r = gradients_impl.gradients(y, elems)[0]
self.assertAllEqual([4.0, 8.0, 12.0, 16.0, 20.0, 24.0], self.evaluate(r))
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testMap_SimpleNotTensor(self):
with self.test_session():
nums = np.array([1, 2, 3, 4, 5, 6])
@ -260,7 +260,7 @@ class FunctionalOpsTest(test.TestCase):
self.assertAllEqual(
np.array([(x + 3) * 2 for x in nums]), self.evaluate(r))
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testMap_SingleInputMultiOutput(self):
with self.test_session():
nums = np.array([1, 2, 3, 4, 5, 6])
@ -275,7 +275,7 @@ class FunctionalOpsTest(test.TestCase):
self.assertAllEqual((nums + 3) * 2, received[0])
self.assertAllEqual(-(nums + 3) * 2, received[1])
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testMap_MultiOutputMismatchedDtype(self):
with self.test_session():
nums = np.array([1, 2, 3, 4, 5, 6])
@ -287,7 +287,7 @@ class FunctionalOpsTest(test.TestCase):
nums,
dtype=[dtypes.int64, dtypes.int64])
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testMap_MultiInputSingleOutput(self):
with self.test_session():
nums = np.array([1, 2, 3, 4, 5, 6])
@ -298,7 +298,7 @@ class FunctionalOpsTest(test.TestCase):
received = self.evaluate(r)
self.assertAllEqual(nums * nums + (-nums), received)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testMap_MultiInputSameStructureOutput(self):
with self.test_session():
nums = np.array([1, 2, 3, 4, 5, 6])
@ -313,7 +313,7 @@ class FunctionalOpsTest(test.TestCase):
self.assertAllEqual(-nums, received[1])
self.assertAllEqual(nums, received[2])
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testScan_Simple(self):
with self.test_session():
elems = constant_op.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], name="data")
@ -328,7 +328,7 @@ class FunctionalOpsTest(test.TestCase):
self.assertAllEqual([2., 4., 12., 48., 240., 1440.], self.evaluate(r))
# pylint: enable=unnecessary-lambda
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testScan_Reverse(self):
with self.test_session():
elems = constant_op.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], name="data")
@ -345,7 +345,7 @@ class FunctionalOpsTest(test.TestCase):
self.evaluate(r))
# pylint: enable=unnecessary-lambda
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testScan_SingleInputMultiOutput(self):
with self.test_session():
elems = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0])
@ -357,7 +357,7 @@ class FunctionalOpsTest(test.TestCase):
self.assertAllEqual([1.0, 2.0, 6.0, 24.0, 120.0, 720.0], r_value[0])
self.assertAllEqual([1.0, -2.0, 6.0, -24.0, 120.0, -720.0], r_value[1])
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testScan_MultiInputSingleOutput(self):
with self.test_session():
elems = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0])
@ -367,7 +367,7 @@ class FunctionalOpsTest(test.TestCase):
(elems + 1, -elems), initializer)
self.assertAllEqual([1.0, 1.0, 1.0, 1.0, 1.0, 1.0], self.evaluate(r))
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testScan_MultiInputSameTypeOutput(self):
with self.test_session():
elems = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0])
@ -377,7 +377,7 @@ class FunctionalOpsTest(test.TestCase):
self.assertAllEqual(np.cumsum(elems), r_value[0])
self.assertAllEqual(np.cumsum(-elems), r_value[1])
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testScan_MultiOutputMismatchedInitializer(self):
with self.test_session():
elems = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0])
@ -408,7 +408,7 @@ class FunctionalOpsTest(test.TestCase):
results = np.array([6, 16, 38, 84, 178, 368])
self.assertAllEqual(results, self.evaluate(r))
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testScanFoldl_Nested(self):
with self.test_session():
elems = constant_op.constant([1.0, 2.0, 3.0, 4.0], name="data")
@ -467,7 +467,7 @@ class FunctionalOpsTest(test.TestCase):
variables.global_variables_initializer().run()
sess.run(grad)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testFoldShape(self):
with self.test_session():
x = constant_op.constant([[1, 2, 3], [4, 5, 6]])
@ -479,7 +479,7 @@ class FunctionalOpsTest(test.TestCase):
y = functional_ops.foldl(fn, x, initializer=initializer)
self.assertAllEqual(y.get_shape(), self.evaluate(y).shape)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testMapShape(self):
with self.test_session():
x = constant_op.constant([[1, 2, 3], [4, 5, 6]])
@ -491,7 +491,7 @@ class FunctionalOpsTest(test.TestCase):
y = functional_ops.map_fn(lambda e: e, x)
self.assertIs(None, y.get_shape().dims)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testMapEmptyScalar(self):
with self.test_session():
map_return = functional_ops.map_fn(lambda x: 1, constant_op.constant([]))
@ -507,7 +507,7 @@ class FunctionalOpsTest(test.TestCase):
self.assertAllEqual([0, 3, 2], map_return.get_shape().dims)
self.assertAllEqual([0, 3, 2], self.evaluate(map_return).shape)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testScanShape(self):
with self.test_session():
x = constant_op.constant([[1, 2, 3], [4, 5, 6]])

View File

@ -46,7 +46,7 @@ def scalar_shape():
@test_util.with_c_shapes
class ListOpsTest(test_util.TensorFlowTestCase):
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testPushPop(self):
l = list_ops.empty_tensor_list(element_dtype=dtypes.float32,
element_shape=scalar_shape())
@ -54,14 +54,14 @@ class ListOpsTest(test_util.TensorFlowTestCase):
l, e = list_ops.tensor_list_pop_back(l, element_dtype=dtypes.float32)
self.assertAllEqual(self.evaluate(e), 1.0)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testPushPopGPU(self):
if not context.num_gpus():
return
with context.device("gpu:0"):
self.testPushPop()
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testStack(self):
l = list_ops.empty_tensor_list(element_dtype=dtypes.float32,
element_shape=scalar_shape())
@ -70,14 +70,14 @@ class ListOpsTest(test_util.TensorFlowTestCase):
t = list_ops.tensor_list_stack(l, element_dtype=dtypes.float32)
self.assertAllEqual(self.evaluate(t), [1.0, 2.0])
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testStackGPU(self):
if not context.num_gpus():
return
with context.device("gpu:0"):
self.testStack()
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testTensorListFromTensor(self):
t = constant_op.constant([1.0, 2.0])
l = list_ops.tensor_list_from_tensor(t, element_shape=scalar_shape())
@ -87,14 +87,14 @@ class ListOpsTest(test_util.TensorFlowTestCase):
self.assertAllEqual(self.evaluate(e), 1.0)
self.assertAllEqual(self.evaluate(list_ops.tensor_list_length(l)), 0)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testFromTensorGPU(self):
if not context.num_gpus():
return
with context.device("gpu:0"):
self.testTensorListFromTensor()
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testGetSetItem(self):
t = constant_op.constant([1.0, 2.0])
l = list_ops.tensor_list_from_tensor(t, element_shape=scalar_shape())
@ -104,14 +104,14 @@ class ListOpsTest(test_util.TensorFlowTestCase):
t = list_ops.tensor_list_stack(l, element_dtype=dtypes.float32)
self.assertAllEqual(self.evaluate(t), [3.0, 2.0])
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testGetSetGPU(self):
if not context.num_gpus():
return
with context.device("gpu:0"):
self.testGetSetItem()
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testUnknownShape(self):
l = list_ops.empty_tensor_list(
element_dtype=dtypes.float32, element_shape=-1)
@ -122,7 +122,7 @@ class ListOpsTest(test_util.TensorFlowTestCase):
l, e = list_ops.tensor_list_pop_back(l, element_dtype=dtypes.float32)
self.assertAllEqual(self.evaluate(e), 1.0)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testCPUGPUCopy(self):
if not context.num_gpus():
return
@ -140,7 +140,7 @@ class ListOpsTest(test_util.TensorFlowTestCase):
list_ops.tensor_list_pop_back(
l_cpu, element_dtype=dtypes.float32)[1]), 2.0)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testGraphStack(self):
with context.graph_mode(), self.test_session():
tl = list_ops.empty_tensor_list(
@ -152,7 +152,7 @@ class ListOpsTest(test_util.TensorFlowTestCase):
list_ops.tensor_list_stack(tl, element_dtype=dtypes.int32)),
[[1]])
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testGraphStackInLoop(self):
with context.graph_mode(), self.test_session():
t1 = list_ops.empty_tensor_list(
@ -170,7 +170,7 @@ class ListOpsTest(test_util.TensorFlowTestCase):
s1 = list_ops.tensor_list_stack(t1, element_dtype=dtypes.int32)
self.assertAllEqual(self.evaluate(s1), [0, 1, 2, 3])
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testGraphStackSwitchDtype(self):
with context.graph_mode(), self.test_session():
list_ = list_ops.empty_tensor_list(
@ -192,7 +192,7 @@ class ListOpsTest(test_util.TensorFlowTestCase):
np_s1 = np.array([[1, 2, 3], [1, 2, 3]], dtype=np.float32)
self.assertAllEqual(self.evaluate(s1), np_s1)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testGraphStackInLoopSwitchDtype(self):
with context.graph_mode(), self.test_session():
t1 = list_ops.empty_tensor_list(
@ -216,7 +216,7 @@ class ListOpsTest(test_util.TensorFlowTestCase):
np_s1 = np.vstack([np.arange(1, 4) * i for i in range(4)])
self.assertAllEqual(self.evaluate(s1), np_s1)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testSerialize(self):
# pylint: disable=g-import-not-at-top
try:
@ -248,7 +248,7 @@ class ListOpsTest(test_util.TensorFlowTestCase):
worker_e = array_ops.identity(e)
self.assertAllEqual(self.evaluate(worker_e), [2.0])
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testPushPopGradients(self):
with backprop.GradientTape() as tape:
l = list_ops.empty_tensor_list(element_dtype=dtypes.float32,
@ -260,7 +260,7 @@ class ListOpsTest(test_util.TensorFlowTestCase):
e = 2 * e
self.assertAllEqual(self.evaluate(tape.gradient(e, [c])[0]), 2.0)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testStackFromTensorGradients(self):
with backprop.GradientTape() as tape:
c = constant_op.constant([1.0, 2.0])
@ -272,7 +272,7 @@ class ListOpsTest(test_util.TensorFlowTestCase):
grad = tape.gradient(result, [c])[0]
self.assertAllEqual(self.evaluate(grad), [2.0, 2.0])
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testGetSetGradients(self):
with backprop.GradientTape() as tape:
c = constant_op.constant([1.0, 2.0])
@ -288,14 +288,14 @@ class ListOpsTest(test_util.TensorFlowTestCase):
self.assertAllEqual(self.evaluate(grad_c), [0.0, 4.0])
self.assertAllEqual(self.evaluate(grad_c2), 6.0)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testSetOutOfBounds(self):
c = constant_op.constant([1.0, 2.0])
l = list_ops.tensor_list_from_tensor(c, element_shape=scalar_shape())
with self.assertRaises(errors.InvalidArgumentError):
self.evaluate(list_ops.tensor_list_set_item(l, 20, 3.0))
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testResourceVariableScatterGather(self):
c = constant_op.constant([1.0, 2.0], dtype=dtypes.float32)
l = list_ops.tensor_list_from_tensor(c, element_shape=scalar_shape())
@ -319,7 +319,7 @@ class ListOpsTest(test_util.TensorFlowTestCase):
[[1.0, 2.0]] * 4)
self.assertAllEqual(self.evaluate(updated_v_stacked), expected)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testConcat(self):
c = constant_op.constant([1.0, 2.0], dtype=dtypes.float32)
l0 = list_ops.tensor_list_from_tensor(c, element_shape=scalar_shape())
@ -379,7 +379,7 @@ class ListOpsTest(test_util.TensorFlowTestCase):
list_ops.tensor_list_concat_lists(l_batch_0, l_batch_of_int_tls,
element_dtype=dtypes.float32))
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testPushBackBatch(self):
c = constant_op.constant([1.0, 2.0], dtype=dtypes.float32)
l0 = list_ops.tensor_list_from_tensor(c, element_shape=scalar_shape())

View File

@ -59,7 +59,7 @@ class LoggingOpsTest(test.TestCase):
class PrintGradientTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testPrintShape(self):
inp = constant_op.constant(2.0, shape=[100, 32])
inp_printed = logging_ops.Print(inp, [inp])

View File

@ -460,7 +460,7 @@ class PyFuncTest(test.TestCase):
self.assertEqual(initial_size, script_ops._py_funcs.size())
# ----- Tests for eager_py_func -----
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testEagerSingleOutputInt32(self):
a = array_ops.ones((3, 3), dtype=dtypes.int32)
x = array_ops.ones((3, 1), dtype=dtypes.int32)
@ -468,7 +468,7 @@ class PyFuncTest(test.TestCase):
ret = self.evaluate(output)
self.assertAllEqual(ret, [[3], [3], [3]])
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testEagerSingleOutputFloat32(self):
with test_util.device(use_gpu=True):
a = array_ops.ones((3, 3), dtype=dtypes.float32)
@ -477,7 +477,7 @@ class PyFuncTest(test.TestCase):
ret = self.evaluate(output)
self.assertAllClose(ret, [[3.0], [3.0], [3.0]])
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testEagerArrayOutput(self):
with test_util.device(use_gpu=True):
a = array_ops.ones((3, 3), dtype=dtypes.float32)
@ -487,7 +487,7 @@ class PyFuncTest(test.TestCase):
ret = self.evaluate(output)
self.assertAllEqual(ret, [[[3.0], [3.0], [3.0]]])
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testEagerReturnNone(self):
with test_util.device(use_gpu=True):
def no_return_value():
@ -500,7 +500,7 @@ class PyFuncTest(test.TestCase):
else:
self.assertIsNone(ret)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testEagerPyFuncInDefun(self):
with test_util.device(use_gpu=True):
def wrapper():
@ -512,7 +512,7 @@ class PyFuncTest(test.TestCase):
ret = self.evaluate(wrapped())
self.assertAllEqual(ret, [[3.0], [3.0], [3.0]])
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testEagerExceptionHandling(self):
with test_util.device(use_gpu=True):
self._testExceptionHandling(
@ -531,7 +531,7 @@ class PyFuncTest(test.TestCase):
self._testExceptionHandling(WeirdError, errors.UnknownError, eager=True)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testEagerReturningVariableRaisesError(self):
def return_variable():
return resource_variable_ops.ResourceVariable(0.0)
@ -542,7 +542,7 @@ class PyFuncTest(test.TestCase):
return_variable, inp=[], Tout=dtypes.float32)
self.evaluate(output)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testEagerGradientTape(self):
def f(x):
@ -565,7 +565,7 @@ class PyFuncTest(test.TestCase):
dy_dx = gradients_impl.gradients(y, x)[0]
self.assertEqual(self.evaluate(dy_dx), 6.0)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testEagerGradientTapeMultipleArgs(self):
def f(x, y):

View File

@ -54,7 +54,7 @@ native_sampler = random_ops.multinomial
class MultinomialTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testSmallEntropy(self):
random_seed.set_random_seed(1618)
for output_dtype in [np.int32, np.int64]:

View File

@ -145,7 +145,7 @@ class ResourceVariableOpsTest(test_util.TensorFlowTestCase):
self.assertIn("<unprintable>", str(handle))
self.assertIn("<unprintable>", repr(handle))
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testDtypeSurvivesIdentity(self):
handle = resource_variable_ops.var_handle_op(dtype=dtypes.int32, shape=[])
id_handle = array_ops.identity(handle)
@ -156,7 +156,7 @@ class ResourceVariableOpsTest(test_util.TensorFlowTestCase):
v = resource_variable_ops.ResourceVariable(1.0)
self.assertNotEqual(v.name, v.assign_add(1.0).name)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testCreateRead(self):
handle = resource_variable_ops.var_handle_op(dtype=dtypes.int32, shape=[])
self.evaluate(resource_variable_ops.assign_variable_op(
@ -165,7 +165,7 @@ class ResourceVariableOpsTest(test_util.TensorFlowTestCase):
resource_variable_ops.read_variable_op(handle, dtype=dtypes.int32))
self.assertAllEqual(1, value)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testManyAssigns(self):
handle = resource_variable_ops.var_handle_op(dtype=dtypes.int32, shape=[])
create = resource_variable_ops.assign_variable_op(
@ -183,7 +183,7 @@ class ResourceVariableOpsTest(test_util.TensorFlowTestCase):
self.assertEqual(f, 1)
self.assertEqual(s, 2)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testAssignAdd(self):
handle = resource_variable_ops.var_handle_op(dtype=dtypes.int32, shape=[])
self.evaluate(resource_variable_ops.assign_variable_op(
@ -194,7 +194,7 @@ class ResourceVariableOpsTest(test_util.TensorFlowTestCase):
resource_variable_ops.read_variable_op(handle, dtype=dtypes.int32))
self.assertEqual(read, 2)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testScatterAdd(self):
handle = resource_variable_ops.var_handle_op(
dtype=dtypes.int32, shape=[1, 1])
@ -207,7 +207,7 @@ class ResourceVariableOpsTest(test_util.TensorFlowTestCase):
read = resource_variable_ops.read_variable_op(handle, dtype=dtypes.int32)
self.assertEqual(self.evaluate(read), [[3]])
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testScatterSub(self):
handle = resource_variable_ops.var_handle_op(
dtype=dtypes.int32, shape=[1, 1])
@ -220,7 +220,7 @@ class ResourceVariableOpsTest(test_util.TensorFlowTestCase):
read = resource_variable_ops.read_variable_op(handle, dtype=dtypes.int32)
self.assertEqual(self.evaluate(read), [[-1]])
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testScatterMul(self):
handle = resource_variable_ops.var_handle_op(
dtype=dtypes.int32, shape=[1, 1])
@ -233,7 +233,7 @@ class ResourceVariableOpsTest(test_util.TensorFlowTestCase):
read = resource_variable_ops.read_variable_op(handle, dtype=dtypes.int32)
self.assertEqual(self.evaluate(read), [[5]])
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testScatterDiv(self):
handle = resource_variable_ops.var_handle_op(
dtype=dtypes.int32, shape=[1, 1])
@ -246,7 +246,7 @@ class ResourceVariableOpsTest(test_util.TensorFlowTestCase):
read = resource_variable_ops.read_variable_op(handle, dtype=dtypes.int32)
self.assertEqual(self.evaluate(read), [[2]])
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testScatterMin(self):
with ops.device("cpu:0"):
handle = resource_variable_ops.var_handle_op(
@ -283,7 +283,7 @@ class ResourceVariableOpsTest(test_util.TensorFlowTestCase):
meta_graph_two = saver.export_meta_graph(graph=graph)
self.assertEqual(meta_graph_def, meta_graph_two)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testScatterMax(self):
handle = resource_variable_ops.var_handle_op(
dtype=dtypes.int32, shape=[1, 1])
@ -296,7 +296,7 @@ class ResourceVariableOpsTest(test_util.TensorFlowTestCase):
read = resource_variable_ops.read_variable_op(handle, dtype=dtypes.int32)
self.assertEqual(self.evaluate(read), [[6]])
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testScatterAddScalar(self):
handle = resource_variable_ops.var_handle_op(
dtype=dtypes.int32, shape=[1, 1])
@ -309,7 +309,7 @@ class ResourceVariableOpsTest(test_util.TensorFlowTestCase):
read = resource_variable_ops.read_variable_op(handle, dtype=dtypes.int32)
self.assertEqual(self.evaluate(read), [[3]])
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testScatterSubScalar(self):
handle = resource_variable_ops.var_handle_op(
dtype=dtypes.int32, shape=[1, 1])
@ -322,7 +322,7 @@ class ResourceVariableOpsTest(test_util.TensorFlowTestCase):
read = resource_variable_ops.read_variable_op(handle, dtype=dtypes.int32)
self.assertEqual(self.evaluate(read), [[-1]])
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testScatterMulScalar(self):
handle = resource_variable_ops.var_handle_op(
dtype=dtypes.int32, shape=[1, 1])
@ -335,7 +335,7 @@ class ResourceVariableOpsTest(test_util.TensorFlowTestCase):
read = resource_variable_ops.read_variable_op(handle, dtype=dtypes.int32)
self.assertEqual(self.evaluate(read), [[5]])
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testScatterDivScalar(self):
handle = resource_variable_ops.var_handle_op(
dtype=dtypes.int32, shape=[1, 1])
@ -348,7 +348,7 @@ class ResourceVariableOpsTest(test_util.TensorFlowTestCase):
read = resource_variable_ops.read_variable_op(handle, dtype=dtypes.int32)
self.assertEqual(self.evaluate(read), [[2]])
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testScatterMinScalar(self):
handle = resource_variable_ops.var_handle_op(
dtype=dtypes.int32, shape=[1, 1])
@ -361,7 +361,7 @@ class ResourceVariableOpsTest(test_util.TensorFlowTestCase):
read = resource_variable_ops.read_variable_op(handle, dtype=dtypes.int32)
self.assertEqual(self.evaluate(read), [[3]])
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testScatterMaxScalar(self):
handle = resource_variable_ops.var_handle_op(
dtype=dtypes.int32, shape=[1, 1])
@ -426,7 +426,7 @@ class ResourceVariableOpsTest(test_util.TensorFlowTestCase):
state_ops.scatter_update(ref, indices, updates)
self.assertAllEqual(ref.read_value(), [True, True, True])
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testConstraintArg(self):
constraint = lambda x: x
v = resource_variable_ops.ResourceVariable(
@ -466,32 +466,32 @@ class ResourceVariableOpsTest(test_util.TensorFlowTestCase):
with self.assertRaises(errors.OutOfRangeError):
state_ops.count_up_to(v, 1)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testInitFnDtype(self):
v = resource_variable_ops.ResourceVariable(
initial_value=lambda: 1, dtype=dtypes.float32, name="var0")
self.assertEqual(dtypes.float32, v.value().dtype)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testInitFnNoDtype(self):
v = resource_variable_ops.ResourceVariable(initial_value=lambda: 1,
name="var2")
self.assertEqual(dtypes.int32, v.value().dtype)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testInitializeAllVariables(self):
v = resource_variable_ops.ResourceVariable(1, dtype=dtypes.float32,
name="var0")
self.evaluate(variables.global_variables_initializer())
self.assertEqual(1.0, self.evaluate(v.value()))
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testOperatorOverload(self):
v = resource_variable_ops.ResourceVariable(1.0, name="var0")
self.evaluate(variables.global_variables_initializer())
self.assertEqual(2.0, self.evaluate(v + v))
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testAssignMethod(self):
v = resource_variable_ops.ResourceVariable(1.0, name="var0")
self.evaluate(variables.global_variables_initializer())
@ -509,7 +509,7 @@ class ResourceVariableOpsTest(test_util.TensorFlowTestCase):
self.evaluate(assign_without_read)
self.assertEqual(4.0, self.evaluate(v.value()))
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testLoad(self):
v = resource_variable_ops.ResourceVariable(1.0, name="var0")
self.evaluate(variables.global_variables_initializer())
@ -561,7 +561,7 @@ class ResourceVariableOpsTest(test_util.TensorFlowTestCase):
variable_def=trainable_variable.to_proto())
.trainable)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testSparseRead(self):
with self.test_session():
init_value = np.reshape(np.arange(np.power(4, 3)), (4, 4, 4))
@ -583,7 +583,7 @@ class ResourceVariableOpsTest(test_util.TensorFlowTestCase):
self.assertEquals(v._handle, w._handle)
self.assertEquals(v._graph_element, w._graph_element)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testAssignAddMethod(self):
v = resource_variable_ops.ResourceVariable(1.0, name="var0")
self.evaluate(variables.global_variables_initializer())
@ -601,7 +601,7 @@ class ResourceVariableOpsTest(test_util.TensorFlowTestCase):
self.evaluate(assign_without_read)
self.assertEqual(4.0, self.evaluate(v.value()))
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testAssignSubMethod(self):
v = resource_variable_ops.ResourceVariable(3.0, name="var0")
self.evaluate(variables.global_variables_initializer())
@ -619,7 +619,7 @@ class ResourceVariableOpsTest(test_util.TensorFlowTestCase):
self.evaluate(assign_without_read)
self.assertEqual(0.0, self.evaluate(v.value()))
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testDestroyResource(self):
v = resource_variable_ops.ResourceVariable(3.0, name="var0")
self.evaluate(variables.global_variables_initializer())
@ -708,7 +708,7 @@ class ResourceVariableOpsTest(test_util.TensorFlowTestCase):
w_read = resource_variable_ops.read_variable_op(w, v.dtype.base_dtype)
self.assertEqual(300.0, self.evaluate(w_read))
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testShape(self):
v = resource_variable_ops.ResourceVariable(
name="var4", initial_value=array_ops.ones(shape=[10, 20, 35]))
@ -842,7 +842,7 @@ class ResourceVariableOpsTest(test_util.TensorFlowTestCase):
state_ops.scatter_update(v, [1], [3])
self.assertAllEqual([1.0, 3.0], v.numpy())
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testScatterUpdateInvalidArgs(self):
v = resource_variable_ops.ResourceVariable([0, 1, 2, 3], name="update")
# The exact error and message differ between graph construction (where the

View File

@ -127,7 +127,7 @@ class RNNTest(test.TestCase):
self._seed = 23489
np.random.seed(self._seed)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testInvalidSequenceLengthShape(self):
cell = Plus1RNNCell()
if context.executing_eagerly():
@ -141,7 +141,7 @@ class RNNTest(test.TestCase):
dtype=dtypes.float32,
sequence_length=[[4]])
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testBatchSizeFromInput(self):
cell = Plus1RNNCell()
in_eager_mode = context.executing_eagerly()
@ -181,7 +181,7 @@ class RNNTest(test.TestCase):
self.assertEqual(None, outputs.shape[0].value)
self.assertEqual(None, state.shape[0].value)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testScalarStateIsAccepted(self):
cell = ScalarStateRNNCell()
in_eager_mode = context.executing_eagerly()
@ -201,7 +201,7 @@ class RNNTest(test.TestCase):
self.assertAllEqual([[[1], [2], [3], [4]]], outputs)
self.assertAllEqual(4, state)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testUnbalancedOutputIsAccepted(self):
cell = UnbalancedOutputRNNCell()
in_eager_mode = context.executing_eagerly()
@ -223,7 +223,7 @@ class RNNTest(test.TestCase):
self.assertAllEqual([[[1, 1], [2, 2], [3, 3], [4, 4]]], outputs[1])
self.assertAllEqual(4, state)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testTensorArrayStateIsAccepted(self):
cell = TensorArrayStateRNNCell()
in_eager_mode = context.executing_eagerly()
@ -256,7 +256,7 @@ class RNNTest(test.TestCase):
cell_output, _ = cell(array_ops.zeros(in_shape, dtype), state_output)
self.assertAllEqual([batch_size, out_size], cell_output.shape.as_list())
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testCellsBuild(self):
f32 = dtypes.float32
f64 = dtypes.float64

View File

@ -369,7 +369,7 @@ class ScatterNdTest(test.TestCase):
del input_ # input_ is not used in scatter_nd
return array_ops.scatter_nd(indices, updates, shape)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testInvalidShape(self):
# TODO(apassos) figure out how to unify these errors
with self.assertRaises(errors.InvalidArgumentError

View File

@ -95,7 +95,7 @@ class SplitOpTest(test.TestCase):
sess.run(array_ops.split(value, size_splits), {size_splits: [2, 2, 6]})
self.assertTrue("Cannot infer num from shape" in str(context.exception))
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testExplicitNum(self):
size_splits = array_ops.constant([2, 2, 6], dtype=dtypes.int32)
value = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
@ -109,7 +109,7 @@ class SplitOpTest(test.TestCase):
self.assertAllEqual(r[1], value[2:4])
self.assertAllEqual(r[2], value[4:])
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testListOfScalarTensors(self):
a = math_ops.to_int32(5)
b = math_ops.to_int32(6)
@ -168,7 +168,7 @@ class SplitOpTest(test.TestCase):
offset += size_splits[i]
self.assertAllEqual(result[i], inp[slices])
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testSpecialCasesVariable(self):
self._testSpecialCasesVariable()
for dtype in _TEST_DTYPES:
@ -210,13 +210,13 @@ class SplitOpTest(test.TestCase):
self.assertAllEqual(np_ans[i], out[i])
self.assertShapeEqual(np_ans[i], tf_ans[i])
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testSplitRows(self):
for dtype in _TEST_DTYPES:
inp = self._makeData((4, 4), dtype)
self._compare(inp, 0, 4)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testSplitCols(self):
for dtype in _TEST_DTYPES:
inp = self._makeData((4, 4), dtype)
@ -232,7 +232,7 @@ class SplitOpTest(test.TestCase):
self.assertEqual(out[i].shape, expected_shape)
self.assertEqual(expected_shape, tf_ans[i].get_shape())
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testEmpty(self):
# Note: np.split returns a rank-0 empty ndarray
# if the input ndarray is empty.
@ -244,7 +244,7 @@ class SplitOpTest(test.TestCase):
self._testEmpty(inp, 2, 3, (8, 0, 7))
self._testEmpty(inp, 2, 7, (8, 0, 3))
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testIdentity(self):
for dtype in _TEST_DTYPES:
inp = self._makeData((2, 2, 2), dtype)
@ -252,7 +252,7 @@ class SplitOpTest(test.TestCase):
self._compare(inp, 1, 1)
self._compare(inp, 2, 1)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testSplitDim0(self):
for dtype in _TEST_DTYPES:
self._compare(self._makeData((6, 10, 18), dtype), 0, 3)
@ -281,7 +281,7 @@ class SplitOpTest(test.TestCase):
offset += length
self.assertAllEqual(result[i], inp[slices])
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testRandom(self):
for dtype in _TEST_DTYPES:
for _ in range(5):

View File

@ -150,7 +150,7 @@ class TemplateTest(test.TestCase):
# Parameters are tied, so the loss should have gone down after training.
self.assertLess(final_test_loss.numpy(), initial_test_loss.numpy())
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_skip_stack_frames(self):
first = traceback.format_stack()
second = traceback.format_stack()
@ -158,7 +158,7 @@ class TemplateTest(test.TestCase):
self.assertEqual(1, len(result))
self.assertNotEqual(len(first), len(result))
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_template_with_name(self):
tmpl1 = template.make_template("s1", variable_scoped_function)
tmpl2 = template.make_template("s1", variable_scoped_function)
@ -204,7 +204,7 @@ class TemplateTest(test.TestCase):
self.assertEqual(v1, v3)
self.assertEqual("s1/dummy:0", v1.name)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_template_in_scope(self):
tmpl1 = template.make_template("s1", variable_scoped_function)
tmpl2 = template.make_template("s1", variable_scoped_function)
@ -221,7 +221,7 @@ class TemplateTest(test.TestCase):
self.assertEqual("scope/s1/dummy:0", v1.name)
self.assertEqual("scope/s1_1/dummy:0", v3.name)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_template_with_internal_reuse(self):
tmpl1 = template.make_template("s1", internally_variable_scoped_function)
tmpl2 = template.make_template("s1", internally_variable_scoped_function)
@ -237,13 +237,13 @@ class TemplateTest(test.TestCase):
with self.assertRaises(ValueError):
tmpl1("not_test")
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_template_without_name(self):
with self.assertRaisesRegexp(
ValueError, "name cannot be None."):
template.make_template(None, variable_scoped_function)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_make_template(self):
# Test both that we can call it with positional and keywords.
tmpl1 = template.make_template(
@ -266,7 +266,7 @@ class TemplateTest(test.TestCase):
with self.assertRaises(ValueError):
tmpl()
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_enforces_no_extra_trainable_variables_eager(self):
tmpl = template.make_template("s",
function_with_side_create,
@ -287,7 +287,7 @@ class TemplateTest(test.TestCase):
trainable=False)
self.assertEqual(tmpl(name="1"), tmpl(name="2"))
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_internal_variable_reuse(self):
def nested():
@ -310,7 +310,7 @@ class TemplateTest(test.TestCase):
self.assertEqual("s1/nested/x:0", v1.name)
self.assertEqual("s1_1/nested/x:0", v3.name)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_nested_templates(self):
def nested_template():
@ -360,7 +360,7 @@ class TemplateTest(test.TestCase):
self.assertEqual("nested", tmpl1._checkpoint_dependencies[0].name)
self.assertEqual("nested_1", tmpl1._checkpoint_dependencies[1].name)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_nested_templates_with_defun(self):
def variable_scoped_function_no_return_value(trainable=True):
@ -429,7 +429,7 @@ class TemplateTest(test.TestCase):
"a", partial, create_graph_function_=True)
self.assertAllEqual(tmpl(ops.convert_to_tensor(1.0)), 2.0)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_immediate_scope_creation(self):
# Create templates in scope a then call in scope b. make_template should
# capture the scope the first time it is called, and make_immediate_template
@ -454,7 +454,7 @@ class TemplateTest(test.TestCase):
self.assertEqual("ctor_scope/a/dummy:0", inner_imm_var.name)
self.assertEqual("call_scope/b/dummy:0", inner_defer_var.name)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_scope_access(self):
# Ensure that we can access the scope inside the template, because the name
# of that scope may be different from the name we pass to make_template, due
@ -479,7 +479,7 @@ class TemplateTest(test.TestCase):
# Template is called at the top level, so there is no preceding "foo_2".
self.assertEqual(tc.variable_scope.name, "blah")
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_custom_getter(self):
# Custom getter that maintains call count and forwards to true getter
custom_getter_count = [0]
@ -512,7 +512,7 @@ class TemplateTest(test.TestCase):
tmpl2()
self.assertEqual(custom_getter_count[0], 2)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_fails_gracefully(self):
for create_scope_now in [True, False]:
def module_function_with_one_arg(inputs):
@ -535,7 +535,7 @@ class TemplateTest(test.TestCase):
templatized_function(data)
self.assertTrue(templatized_function._variables_created)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_name_scopes_for_variable_scopes(self):
# Test that name scopes are not unnecessarily uniquified (but are
# still uniquified when necessary).
@ -586,7 +586,7 @@ class TemplateTest(test.TestCase):
"Second application of template should also get "
"a freshly uniquified name scope.")
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_global_variables(self):
# Make sure global_variables are created.
with variable_scope.variable_scope("foo"):
@ -608,7 +608,7 @@ class TemplateTest(test.TestCase):
self.assertEqual(1, len(ta.global_variables))
self.assertEqual(2, len(tb.global_variables))
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_trainable_variables(self):
# Make sure trainable_variables are created.
with variable_scope.variable_scope("foo2"):
@ -632,7 +632,7 @@ class TemplateTest(test.TestCase):
self.assertEqual(1, len(ta.variables))
self.assertEqual(1, len(tb.variables))
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_non_trainable_variables(self):
# Make sure non_trainable_variables are created.
with variable_scope.variable_scope("foo2"):
@ -675,7 +675,7 @@ class TemplateTest(test.TestCase):
self.assertEqual(0, len(ta.local_variables))
self.assertEqual(1, len(tb.local_variables))
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_make_template_with_defun(self):
def variable_scoped_function_no_return_value(scope_name):

View File

@ -75,7 +75,7 @@ class TensorArrayTest(test.TestCase):
super(TensorArrayTest, cls).tearDownClass()
session_lib.Session.reset(cls._workers[0].target)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testTensorArrayWriteRead(self):
with self.test_session(use_gpu=True):
ta = tensor_array_ops.TensorArray(
@ -123,11 +123,11 @@ class TensorArrayTest(test.TestCase):
self._testTensorArrayWritePack(dtypes.complex128)
self._testTensorArrayWritePack(dtypes.string)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testTensorArrayWritePack(self):
self._testTensorArrayWritePackMaybeLegacy()
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testEmptyTensorArrayPack(self):
with self.test_session(use_gpu=True):
ta = tensor_array_ops.TensorArray(
@ -161,7 +161,7 @@ class TensorArrayTest(test.TestCase):
convert([[4.0, 5.0], [104.0, 105.0], [204.0, 205.0], [6.0, 7.0],
[106.0, 107.0], [8.0, 9.0]]), c0)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testTensorArrayWriteConcat(self):
self._testTensorArrayWriteConcat(dtypes.float32)
self._testTensorArrayWriteConcat(dtypes.float64)
@ -184,7 +184,7 @@ class TensorArrayTest(test.TestCase):
self.assertAllEqual([[0.0, 0.0], [4.0, 5.0], [0.0, 0.0]],
self.evaluate(ta.write(1, [[4.0, 5.0]]).concat()))
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testTensorArrayReadOrPackNotAllValuesAvailableFillsZeros(self):
self._testTensorArrayReadOrPackNotAllValuesAvailableFillsZeros()
@ -200,7 +200,7 @@ class TensorArrayTest(test.TestCase):
self.assertAllEqual([[0.0, 0.0], [4.0, 5.0], [0.0, 0.0]],
self.evaluate(ta.write(1, [[4.0, 5.0]]).concat()))
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testTensorArrayReadOrPackNotAllValuesAvailableInferShapeFillsZeros(self):
self._testTensorArrayReadOrPackNotAllValuesAvailableInferShapeFillsZeros()
@ -251,7 +251,7 @@ class TensorArrayTest(test.TestCase):
self._testTensorArrayUnpackRead(dtypes.complex128)
self._testTensorArrayUnpackRead(dtypes.string)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testTensorArrayUnpackRead(self):
self._testTensorArrayUnpackReadMaybeLegacy()
@ -297,7 +297,7 @@ class TensorArrayTest(test.TestCase):
self.assertAllEqual(convert([]).reshape(0, 2), d1)
self.assertAllEqual(convert([[3.0, 301.0]]), d2)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testTensorArraySplitRead(self):
self._testTensorArraySplitRead(dtypes.float32)
self._testTensorArraySplitRead(dtypes.float64)
@ -397,7 +397,7 @@ class TensorArrayTest(test.TestCase):
self.assertAllEqual(t_g_ta_0, t_g_ta_1)
self.assertAllEqual([[4.0, 5.0]], d_r1_0)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testTensorArrayWriteWrongIndexOrDataTypeFails(self):
with self.test_session(use_gpu=True):
ta = _make_ta(3, "foo", dtype=dtypes.float32)
@ -416,7 +416,7 @@ class TensorArrayTest(test.TestCase):
"resizeable and size is: 3"):
self.evaluate(ta.write(3, 3.0).flow)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testTensorArrayReadWrongIndexOrDataTypeFails(self):
with self.test_session(use_gpu=True):
ta = _make_ta(3, "foo", dtype=dtypes.float32)
@ -450,7 +450,7 @@ class TensorArrayTest(test.TestCase):
"it has already been written to."):
self.evaluate(ta.write(2, 3.0).write(2, 3.0).flow)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testTensorArrayConcatIncompatibleShapesFails(self):
with self.test_session(use_gpu=True):
ta = tensor_array_ops.TensorArray(
@ -482,7 +482,7 @@ class TensorArrayTest(test.TestCase):
with self.assertRaisesOpError("shape"):
self.evaluate(w3.concat())
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testTensorArraySplitIncompatibleShapesFails(self):
with self.test_session(use_gpu=True):
in_eager_mode = context.executing_eagerly()
@ -603,7 +603,7 @@ class TensorArrayTest(test.TestCase):
self.assertAllClose(fed_value,
sess.run(read_value, feed_dict={value: fed_value}))
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testMultiTensorArray(self):
with self.test_session(use_gpu=True):
h1 = tensor_array_ops.TensorArray(
@ -706,7 +706,7 @@ class TensorArrayTest(test.TestCase):
def testTensorArrayGradientWritePackConcatAndRead(self):
self._testTensorArrayGradientWritePackConcatAndRead()
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testTensorArrayReadTwice(self):
with self.test_session(use_gpu=True):
value = constant_op.constant([[1.0, -1.0], [10.0, -10.0]])
@ -811,14 +811,14 @@ class TensorArrayTest(test.TestCase):
def testTensorArrayGradientDynamicUnpackRead(self):
self._testTensorArrayGradientDynamicUnpackRead()
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testCloseTensorArray(self):
with self.test_session(use_gpu=True):
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, tensor_array_name="foo", size=3)
self.evaluate(ta.close())
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testSizeTensorArray(self):
with self.test_session(use_gpu=True):
ta = tensor_array_ops.TensorArray(
@ -826,7 +826,7 @@ class TensorArrayTest(test.TestCase):
s = ta.size()
self.assertAllEqual(3, self.evaluate(s))
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testWriteCloseTensorArray(self):
with self.test_session(use_gpu=True):
ta = tensor_array_ops.TensorArray(
@ -924,7 +924,7 @@ class TensorArrayTest(test.TestCase):
self.assertAllClose(grad_val.sum(axis=0), var_grad_t)
self.assertAllClose(grad_val.sum(axis=0), state0_grad_t)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testWhileLoopWritePackGradients(self):
self._testWhileLoopWritePackGradients(
dynamic_size=False, dtype=dtypes.float32)
@ -936,7 +936,7 @@ class TensorArrayTest(test.TestCase):
self._testWhileLoopWritePackGradients(
dynamic_size=True, dtype=dtypes.float32)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testGradSerialTwoLoops(self):
with self.test_session(use_gpu=True):
def loop(x):
@ -1113,7 +1113,7 @@ class TensorArrayTest(test.TestCase):
r5 = w5.read(0)
self.assertAllEqual([5, 4, 2, 3], r5.get_shape().as_list())
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def _testUnpackShape(self):
with self.test_session(use_gpu=True):
ta = tensor_array_ops.TensorArray(
@ -1147,7 +1147,7 @@ class TensorArrayTest(test.TestCase):
def testUnpackShape(self):
self._testUnpackShape()
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testSplitShape(self):
with self.test_session(use_gpu=True):
ta = tensor_array_ops.TensorArray(
@ -1289,7 +1289,7 @@ class TensorArrayTest(test.TestCase):
self.assertAllEqual([10.0, -10.0], read_vals[1])
self.assertAllEqual([[2.0, 3.0], [4.0, 5.0]], grad_vals[0])
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testTensorArrayWriteGatherAndGradients(self):
with self.test_session(use_gpu=True) as session:
ta = tensor_array_ops.TensorArray(
@ -1433,7 +1433,7 @@ class TensorArrayTest(test.TestCase):
self.assertFalse(
[s for s in dev_stats[d] if "/TensorArray" in s.node_name])
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testTensorArrayIdentity(self):
with self.test_session(use_gpu=True):
ta0 = tensor_array_ops.TensorArray(dtype=dtypes.float32, size=2,

View File

@ -57,7 +57,7 @@ class VariableScopeTest(test.TestCase):
v1 = vs.get_variable("v", [1])
self.assertEqual(v, v1)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testResource(self):
vs = variable_scope._get_default_variable_store()
v1 = vs.get_variable("v", [1], use_resource=True)
@ -87,7 +87,7 @@ class VariableScopeTest(test.TestCase):
self.assertEqual(
set(expected_names), set([v.name for v in vs._vars.values()]))
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testVarScopeInitializer(self):
init = init_ops.constant_initializer(0.3)
with variable_scope.variable_scope("tower0") as tower:
@ -100,7 +100,7 @@ class VariableScopeTest(test.TestCase):
self.evaluate(variables_lib.variables_initializer([w]))
self.assertAllClose(self.evaluate(w.value()), 0.3)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testVarScopeConstraint(self):
constraint = lambda x: 0. * x
with variable_scope.variable_scope("tower1") as tower:
@ -117,7 +117,7 @@ class VariableScopeTest(test.TestCase):
variables_lib.global_variables_initializer().run()
self.assertAllEqual(compat.as_bytes(v.eval()), b"")
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testVarScopeDType(self):
with variable_scope.variable_scope("tower2") as tower:
with variable_scope.variable_scope("foo", dtype=dtypes.float16):
@ -197,7 +197,7 @@ class VariableScopeTest(test.TestCase):
self.assertAllEqual([v1, v2], [v3, v4])
f()
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testEagerVariablesStoreAddsToCollections(self):
store = variable_scope.EagerVariableStore()
with store.as_default():
@ -214,7 +214,7 @@ class VariableScopeTest(test.TestCase):
self.assertEqual(
ops.get_collection(ops.GraphKeys.CONCATENATED_VARIABLES), [concat])
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testEagerVariablesOutsideStoreNotAddedToCollections(self):
if not context.executing_eagerly():
return
@ -223,7 +223,7 @@ class VariableScopeTest(test.TestCase):
self.assertFalse(ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES))
self.assertFalse(ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES))
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testInitFromNonTensorValue(self):
v = variable_scope.get_variable("v4", initializer=4, dtype=dtypes.int32)
self.evaluate(variables_lib.variables_initializer([v]))
@ -239,7 +239,7 @@ class VariableScopeTest(test.TestCase):
with self.assertRaises(error):
variable_scope.get_variable("x4", initializer={})
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testInitFromNonInitializer(self):
# Test various dtypes with zeros initializer as following:
types = [
@ -294,7 +294,7 @@ class VariableScopeTest(test.TestCase):
v_tower = variable_scope.get_variable("v", [])
self.assertFalse(v_tower.value().device.startswith(caching_device))
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testVarScopeRegularizer(self):
init = init_ops.constant_initializer(0.3)
@ -339,7 +339,7 @@ class VariableScopeTest(test.TestCase):
losses = ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)
self.assertEqual(3, len(losses)) # No new loss added.
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testInitializeFromValue(self):
init = constant_op.constant(0.1)
w = variable_scope.get_variable("v", initializer=init)
@ -428,7 +428,7 @@ class VariableScopeTest(test.TestCase):
sess.run(v0.initializer)
sess.run(add)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testGetVariableScope(self):
# Test the get_variable_scope() function and setting properties of result.
init = init_ops.constant_initializer(0.3)
@ -449,7 +449,7 @@ class VariableScopeTest(test.TestCase):
new_init = variable_scope.get_variable_scope().initializer
self.assertEqual(new_init, None)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testVarScope(self):
with variable_scope.variable_scope("tower4") as tower:
self.assertEqual(tower.name, "tower4")
@ -468,7 +468,7 @@ class VariableScopeTest(test.TestCase):
with ops.name_scope("scope") as sc:
self.assertEqual(sc, "tower6/tower4/scope/")
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testVarScopeNameScope(self):
with ops.name_scope("testVarScopeNameScope1"):
with variable_scope.variable_scope("tower") as tower:
@ -961,7 +961,7 @@ class VariableScopeTest(test.TestCase):
self.assertEqual(
constant_op.constant([], name="c").name, "another/inner/c:0")
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testGetLocalVar(self):
# Check that local variable respects naming.
with variable_scope.variable_scope("outer") as outer:

View File

@ -39,7 +39,7 @@ from tensorflow.python.platform import test
class BaseLayerTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testLayerProperties(self):
layer = base_layers.Layer(name='my_layer')
self.assertEqual(layer.variables, [])
@ -53,13 +53,13 @@ class BaseLayerTest(test.TestCase):
layer = base_layers.Layer(name='my_layer', trainable=False)
self.assertEqual(layer.trainable, False)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testInt64Layer(self):
layer = base_layers.Layer(name='my_layer', dtype='int64')
layer.add_variable('my_var', [2, 2])
self.assertEqual(layer.name, 'my_layer')
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testAddWeight(self):
layer = base_layers.Layer(name='my_layer')
@ -116,7 +116,7 @@ class BaseLayerTest(test.TestCase):
with self.assertRaisesRegexp(ValueError, 'activity_regularizer'):
core_layers.Dense(1, activity_regularizer=lambda *args, **kwargs: 0.)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testCall(self):
class MyLayer(base_layers.Layer):
@ -132,7 +132,7 @@ class BaseLayerTest(test.TestCase):
# op is only supported in GRAPH mode
self.assertEqual(outputs.op.name, 'my_layer/Square')
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testDeepCopy(self):
class MyLayer(base_layers.Layer):
@ -155,7 +155,7 @@ class BaseLayerTest(test.TestCase):
self.assertEqual(layer_copy._graph, layer._graph)
self.assertEqual(layer_copy._private_tensor, layer._private_tensor)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testScopeNaming(self):
class PrivateLayer(base_layers.Layer):
@ -203,7 +203,7 @@ class BaseLayerTest(test.TestCase):
my_layer_scoped1.apply(inputs)
self.assertEqual(my_layer_scoped1._scope.name, 'var_scope/my_layer_1')
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testInputSpecNdimCheck(self):
class CustomerLayer(base_layers.Layer):
@ -230,7 +230,7 @@ class BaseLayerTest(test.TestCase):
layer = CustomerLayer()
layer.apply(constant_op.constant([[1], [2]]))
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testInputSpecMinNdimCheck(self):
class CustomerLayer(base_layers.Layer):
@ -258,7 +258,7 @@ class BaseLayerTest(test.TestCase):
layer = CustomerLayer()
layer.apply(constant_op.constant([[[1], [2]]]))
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testInputSpecMaxNdimCheck(self):
class CustomerLayer(base_layers.Layer):
@ -286,7 +286,7 @@ class BaseLayerTest(test.TestCase):
layer = CustomerLayer()
layer.apply(constant_op.constant([[1], [2]]))
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testInputSpecDtypeCheck(self):
class CustomerLayer(base_layers.Layer):
@ -306,7 +306,7 @@ class BaseLayerTest(test.TestCase):
layer = CustomerLayer()
layer.apply(constant_op.constant(1.0, dtype=dtypes.float32))
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testInputSpecAxesCheck(self):
class CustomerLayer(base_layers.Layer):
@ -328,7 +328,7 @@ class BaseLayerTest(test.TestCase):
layer = CustomerLayer()
layer.apply(constant_op.constant([[1, 2], [3, 4], [5, 6]]))
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testInputSpecShapeCheck(self):
class CustomerLayer(base_layers.Layer):
@ -348,7 +348,7 @@ class BaseLayerTest(test.TestCase):
layer = CustomerLayer()
layer.apply(constant_op.constant([[1, 2, 3], [4, 5, 6]]))
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testNoInputSpec(self):
class CustomerLayer(base_layers.Layer):
@ -369,7 +369,7 @@ class BaseLayerTest(test.TestCase):
layer.apply(array_ops.placeholder('int32'))
layer.apply(array_ops.placeholder('int32', shape=(2, 3)))
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_count_params(self):
dense = core_layers.Dense(16)
dense.build((None, 4))
@ -379,7 +379,7 @@ class BaseLayerTest(test.TestCase):
with self.assertRaises(ValueError):
dense.count_params()
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testDictInputOutput(self):
class DictLayer(base_layers.Layer):

View File

@ -41,7 +41,7 @@ from tensorflow.python.platform import test
class DenseTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testDenseProperties(self):
dense = core_layers.Dense(2, activation=nn_ops.relu, name='my_dense')
self.assertEqual(dense.units, 2)
@ -91,14 +91,14 @@ class DenseTest(test.TestCase):
core_layers.Dense(5)(inputs)
core_layers.Dense(2, activation=nn_ops.relu, name='my_dense')(inputs)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testCallTensorDot(self):
dense = core_layers.Dense(2, activation=nn_ops.relu, name='my_dense')
inputs = random_ops.random_uniform((5, 4, 3), seed=1)
outputs = dense(inputs)
self.assertListEqual([5, 4, 2], outputs.get_shape().as_list())
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testNoBias(self):
dense = core_layers.Dense(2, use_bias=False, name='my_dense')
inputs = random_ops.random_uniform((5, 2), seed=1)
@ -112,7 +112,7 @@ class DenseTest(test.TestCase):
self.assertEqual(dense.kernel.name, 'my_dense/kernel:0')
self.assertEqual(dense.bias, None)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testNonTrainable(self):
dense = core_layers.Dense(2, trainable=False, name='my_dense')
inputs = random_ops.random_uniform((5, 2), seed=1)
@ -125,7 +125,7 @@ class DenseTest(test.TestCase):
self.assertEqual(
len(ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)), 0)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testOutputShape(self):
dense = core_layers.Dense(7, activation=nn_ops.relu, name='my_dense')
inputs = random_ops.random_uniform((5, 3), seed=1)
@ -165,7 +165,7 @@ class DenseTest(test.TestCase):
dense = core_layers.Dense(4, name='my_dense')
dense(inputs)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testActivation(self):
dense = core_layers.Dense(2, activation=nn_ops.relu, name='dense1')
inputs = random_ops.random_uniform((5, 3), seed=1)
@ -325,7 +325,7 @@ class DenseTest(test.TestCase):
var_key = 'test2/dense/kernel'
self.assertEqual(var_dict[var_key].name, '%s:0' % var_key)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testComputeOutputShape(self):
dense = core_layers.Dense(2, activation=nn_ops.relu, name='dense1')
ts = tensor_shape.TensorShape
@ -347,7 +347,7 @@ class DenseTest(test.TestCase):
dense.compute_output_shape(ts([None, 4, 3])).as_list())
# pylint: enable=protected-access
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testConstraints(self):
k_constraint = lambda x: x / math_ops.reduce_sum(x)
b_constraint = lambda x: x / math_ops.reduce_max(x)
@ -369,7 +369,7 @@ def _get_variable_dict_from_varstore():
class DropoutTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testDropoutProperties(self):
dp = core_layers.Dropout(0.5, name='dropout')
self.assertEqual(dp.rate, 0.5)
@ -377,7 +377,7 @@ class DropoutTest(test.TestCase):
dp.apply(array_ops.ones(()))
self.assertEqual(dp.name, 'dropout')
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testBooleanLearningPhase(self):
dp = core_layers.Dropout(0.5)
inputs = array_ops.ones((5, 3))
@ -402,7 +402,7 @@ class DropoutTest(test.TestCase):
np_output = sess.run(dropped, feed_dict={training: False})
self.assertAllClose(np.ones((5, 5)), np_output)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testDynamicNoiseShape(self):
inputs = array_ops.ones((5, 3, 2))
noise_shape = [None, 1, None]

View File

@ -939,7 +939,7 @@ class CaseTest(test_util.TensorFlowTestCase):
class WhileLoopTestCase(test_util.TensorFlowTestCase):
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testWhileLoopWithSingleVariable(self):
i = constant_op.constant(0)
c = lambda i: math_ops.less(i, 10)
@ -948,7 +948,7 @@ class WhileLoopTestCase(test_util.TensorFlowTestCase):
self.assertEqual(self.evaluate(r), 10)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testEagerWhileLoopWithSingleVariable_bodyReturnsTuple(self):
i = constant_op.constant(0)
c = lambda i: math_ops.less(i, 10)

View File

@ -37,14 +37,14 @@ log = np.log
class ReduceTest(test_util.TensorFlowTestCase):
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testReduceAllDims(self):
x = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.int32)
with test_util.device(use_gpu=True):
y_tf = self.evaluate(math_ops.reduce_sum(x))
self.assertEqual(y_tf, 21)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testReduceExplicitAxes(self):
x = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.int32)
with test_util.device(use_gpu=True):
@ -57,7 +57,7 @@ class ReduceTest(test_util.TensorFlowTestCase):
for axis in (None, (0, 1), (-1, -2), (-2, -1, 0, 1)):
self.assertEqual(self.evaluate(math_ops.reduce_sum(x, axis=axis)), 21)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testReduceInvalidAxis(self):
if context.executing_eagerly():
# The shape check is in run a graph construction time. In eager mode,
@ -150,7 +150,7 @@ class LogSumExpTest(test_util.TensorFlowTestCase):
class RoundTest(test_util.TensorFlowTestCase):
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testRounding(self):
x = np.arange(-5.0, 5.0, .25)
for dtype in [np.float32, np.double, np.int32]:
@ -194,7 +194,7 @@ class ModTest(test_util.TensorFlowTestCase):
class SquaredDifferenceTest(test_util.TensorFlowTestCase):
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testSquaredDifference(self):
for dtype in [np.int32, np.float16]:
x = np.array([[1, 2, 3], [4, 5, 6]], dtype=dtype)
@ -207,7 +207,7 @@ class SquaredDifferenceTest(test_util.TensorFlowTestCase):
class ApproximateEqualTest(test_util.TensorFlowTestCase):
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testApproximateEqual(self):
for dtype in [np.float32, np.double]:
x = dtype(1)
@ -238,7 +238,7 @@ class ApproximateEqualTest(test_util.TensorFlowTestCase):
class ScalarMulTest(test_util.TensorFlowTestCase):
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testAcceptsRefs(self):
if context.executing_eagerly():
var = resource_variable_ops.ResourceVariable(10, name="var")
@ -250,14 +250,14 @@ class ScalarMulTest(test_util.TensorFlowTestCase):
self.evaluate(init)
self.assertEqual(30, self.evaluate(result))
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testAcceptsConstant(self):
const = constant_op.constant(10)
result = math_ops.scalar_mul(3, const)
with test_util.device(use_gpu=True):
self.assertEqual(30, self.evaluate(result))
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testAcceptsTensor(self):
tensor = array_ops.ones([10, 10])
result = math_ops.scalar_mul(3, tensor)
@ -266,7 +266,7 @@ class ScalarMulTest(test_util.TensorFlowTestCase):
with test_util.device(use_gpu=True):
self.assertAllEqual(self.evaluate(expected), self.evaluate(result))
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testAcceptsIndexedSlices(self):
values = constant_op.constant([2, 3, 5, 7, 0, -1], shape=[3, 2])
indices = constant_op.constant([0, 2, 5])

View File

@ -76,7 +76,7 @@ class SoftmaxTest(test_lib.TestCase):
z = u.sum(1)[:, np.newaxis]
return u / z
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testSoftmax(self):
x_shape = [5, 10]
x_np = np.random.randn(*x_shape).astype(np.float32)
@ -123,7 +123,7 @@ class LogPoissonLossTest(test_lib.TestCase):
lpl += np.ma.masked_array(stirling_approx, mask=(z <= 1)).filled(0.)
return lpl
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testLogPoissonLoss(self):
x_shape = [5, 10]
x_np = np.random.randn(*x_shape).astype(np.float32)
@ -164,7 +164,7 @@ class LogSoftmaxTest(test_lib.TestCase):
u = x - m
return u - np.log(np.sum(np.exp(u), 1, keepdims=True))
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testLogSoftmax(self):
x_shape = [5, 10]
x_np = np.random.randn(*x_shape).astype(np.float32)
@ -201,7 +201,7 @@ class LogSoftmaxTest(test_lib.TestCase):
class L2LossTest(test_lib.TestCase):
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testL2Loss(self):
for dtype in [dtypes.float32, dtypes.float64]:
x = constant_op.constant(
@ -235,7 +235,7 @@ class L2NormalizeTest(test_lib.TestCase):
norm = np.apply_along_axis(np.linalg.norm, dim, x)
return x / np.expand_dims(norm, dim)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testL2Normalize(self):
x_shape = [20, 7, 3]
np.random.seed(1)
@ -246,7 +246,7 @@ class L2NormalizeTest(test_lib.TestCase):
y_tf = nn_impl.l2_normalize(x_tf, dim)
self.assertAllClose(y_np, self.evaluate(y_tf))
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testL2NormalizeDimArray(self):
x_shape = [20, 7, 3]
np.random.seed(1)

View File

@ -66,7 +66,7 @@ class HasList(training.Model):
class ListTests(test.TestCase):
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testTracking(self):
model = HasList()
output = model(array_ops.ones([32, 2]))
@ -106,7 +106,7 @@ class ListTests(test.TestCase):
model(model_input)
self.assertEqual(0, len(model.updates))
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testLossesForwarded(self):
model = HasList()
model_input = array_ops.ones([32, 2])
@ -190,7 +190,7 @@ class HasMapping(training.Model):
class MappingTests(test.TestCase):
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testTracking(self):
model = HasMapping()
output = model(array_ops.ones([32, 2]))

View File

@ -355,7 +355,7 @@ class CheckpointingTests(test.TestCase):
optimizer_node.slot_variables[0]
.slot_variable_node_id].attributes[0].checkpoint_key)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testMoreComplexSaveableReturned(self):
v = _OwnsMirroredVariables()
checkpoint = checkpointable_utils.Checkpoint(v=v)
@ -375,7 +375,7 @@ class CheckpointingTests(test.TestCase):
self.assertEqual(44., self.evaluate(v.non_dep_variable))
self.assertEqual(44., self.evaluate(v.mirrored))
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testMoreComplexSaveableReturnedWithGlobalName(self):
# The same object can also be saved using the name-based saver.
v = _OwnsMirroredVariables()
@ -391,7 +391,7 @@ class CheckpointingTests(test.TestCase):
self.assertEqual(42., self.evaluate(v.non_dep_variable))
self.assertEqual(42., self.evaluate(v.mirrored))
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testSaveRestore(self):
model = MyModel()
optimizer = adam.AdamOptimizer(0.001)
@ -512,7 +512,7 @@ class CheckpointingTests(test.TestCase):
self.assertEqual(training_continuation + 1,
session.run(root.save_counter))
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testAgnosticUsage(self):
"""Graph/eager agnostic usage."""
# Does create garbage when executing eagerly due to ops.Graph() creation.
@ -546,7 +546,7 @@ class CheckpointingTests(test.TestCase):
self.evaluate(root.save_counter))
# pylint: disable=cell-var-from-loop
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testWithDefun(self):
num_training_steps = 2
checkpoint_directory = self.get_temp_dir()
@ -619,7 +619,7 @@ class CheckpointingTests(test.TestCase):
root, saveables_cache=None)
self.assertEqual(r"leaf/v/.ATTRIBUTES/VARIABLE_VALUE", named_variable.name)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testLocalNameValidation(self):
root = checkpointable.Checkpointable()
leaf = checkpointable.Checkpointable()
@ -660,7 +660,7 @@ class CheckpointingTests(test.TestCase):
optimizer.apply_gradients(
[(g, v) for g, v in zip(grad, model.vars)])
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testLateDependencyTracking(self):
class Dependency(checkpointable.Checkpointable):
@ -692,7 +692,7 @@ class CheckpointingTests(test.TestCase):
status.run_restore_ops()
self.assertEqual(123., self.evaluate(load_into.dep.var))
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testDepAfterVar(self):
class Dependency(checkpointable.Checkpointable):
@ -724,7 +724,7 @@ class CheckpointingTests(test.TestCase):
status.run_restore_ops()
self.assertEqual(-14., self.evaluate(loaded_dep_after_var.dep.var))
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testDeferredSlotRestoration(self):
checkpoint_directory = self.get_temp_dir()
@ -789,7 +789,7 @@ class CheckpointingTests(test.TestCase):
self.evaluate(train_op)
slot_status.assert_consumed()
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testOverlappingRestores(self):
checkpoint_directory = self.get_temp_dir()
save_root = checkpointable.Checkpointable()
@ -840,7 +840,7 @@ class CheckpointingTests(test.TestCase):
second_status.run_restore_ops()
self.assertEqual(12., self.evaluate(load_dep.var))
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testAmbiguousLoad(self):
# Not OK to split one checkpoint object into two
checkpoint_directory = self.get_temp_dir()
@ -866,7 +866,7 @@ class CheckpointingTests(test.TestCase):
with self.assertRaises(AssertionError):
status.assert_consumed()
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testObjectsCombined(self):
# Currently fine to load two checkpoint objects into one Python object
checkpoint_directory = self.get_temp_dir()
@ -893,7 +893,7 @@ class CheckpointingTests(test.TestCase):
self.assertEqual(32., self.evaluate(v1))
self.assertEqual(64., self.evaluate(v2))
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testDependencyLoop(self):
# Note: this test creates garbage during eager execution because it
# purposefully creates a reference cycle.
@ -939,7 +939,7 @@ class CheckpointingTests(test.TestCase):
self.assertAllEqual([3., 1., 4.], self.evaluate(first_load.v))
self.assertAllEqual([1., 1., 2., 3.], self.evaluate(second_load.v))
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testRestoreOnAssign(self):
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
@ -989,7 +989,7 @@ class CheckpointingTests(test.TestCase):
saver.save(checkpoint_prefix)
self.assertEqual(before_ops, graph.get_operations())
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testCheckpointCleanup(self):
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
@ -1009,7 +1009,7 @@ class CheckpointingTests(test.TestCase):
expected_filenames,
os.listdir(checkpoint_directory))
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testCheckpointCleanupChangingVarList(self):
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
@ -1132,7 +1132,7 @@ class CheckpointingTests(test.TestCase):
beta1_power, _ = optimizer._get_beta_accumulators()
self.assertAllEqual(3., self.evaluate(beta1_power))
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_sequential(self):
model = sequential.Sequential()
checkpoint = checkpointable_utils.Checkpoint(model=model)
@ -1164,7 +1164,7 @@ class CheckpointingTests(test.TestCase):
self.assertAllEqual([1., 2., 3., 4., 5.],
self.evaluate(deferred_second_dense.bias))
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_initialize_if_not_restoring(self):
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
@ -1257,7 +1257,7 @@ class _ManualScope(checkpointable.Checkpointable):
class TemplateTests(test.TestCase):
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_checkpointable_save_restore(self):
def _templated():
@ -1308,7 +1308,7 @@ class TemplateTests(test.TestCase):
self.assertAllEqual([13.], self.evaluate(var_plus_one))
self.assertAllEqual([14.], self.evaluate(var2))
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_checkpointable_save_restore_nested(self):
def _inner_template():
@ -1409,7 +1409,7 @@ class CheckpointCompatibilityTests(test.TestCase):
sess=session, save_path=checkpoint_prefix,
global_step=root.optimizer_step)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testLoadFromNameBasedSaver(self):
"""Save a name-based checkpoint, load it using the object-based API."""
with test_util.device(use_gpu=True):
@ -1471,7 +1471,7 @@ class CheckpointCompatibilityTests(test.TestCase):
class PythonMetadataTests(test.TestCase):
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testSaveLoad(self):
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")

View File

@ -31,7 +31,7 @@ from tensorflow.python.training import learning_rate_decay
class LRDecayTest(test_util.TensorFlowTestCase):
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testContinuous(self):
self.evaluate(variables.global_variables_initializer())
step = 5
@ -39,7 +39,7 @@ class LRDecayTest(test_util.TensorFlowTestCase):
expected = .05 * 0.96**(5.0 / 10.0)
self.assertAllClose(self.evaluate(decayed_lr), expected, 1e-6)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testStaircase(self):
if context.executing_eagerly():
step = resource_variable_ops.ResourceVariable(0)
@ -80,7 +80,7 @@ class LRDecayTest(test_util.TensorFlowTestCase):
expected = .1 * 0.96 ** (100 // 3)
self.assertAllClose(decayed_lr.eval(), expected, 1e-6)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testPiecewiseConstant(self):
x = resource_variable_ops.ResourceVariable(-999)
decayed_lr = learning_rate_decay.piecewise_constant(
@ -100,7 +100,7 @@ class LRDecayTest(test_util.TensorFlowTestCase):
self.evaluate(x.assign(999))
self.assertAllClose(self.evaluate(decayed_lr), 0.001, 1e-6)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testPiecewiseConstantEdgeCases(self):
x_int = resource_variable_ops.ResourceVariable(
0, dtype=variables.dtypes.int32)
@ -147,7 +147,7 @@ class LRDecayTest(test_util.TensorFlowTestCase):
class LinearDecayTest(test_util.TensorFlowTestCase):
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testHalfWay(self):
step = 5
lr = 0.05
@ -156,7 +156,7 @@ class LinearDecayTest(test_util.TensorFlowTestCase):
expected = lr * 0.5
self.assertAllClose(self.evaluate(decayed_lr), expected, 1e-6)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testEnd(self):
step = 10
lr = 0.05
@ -165,7 +165,7 @@ class LinearDecayTest(test_util.TensorFlowTestCase):
expected = end_lr
self.assertAllClose(self.evaluate(decayed_lr), expected, 1e-6)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testHalfWayWithEnd(self):
step = 5
lr = 0.05
@ -174,7 +174,7 @@ class LinearDecayTest(test_util.TensorFlowTestCase):
expected = (lr + end_lr) * 0.5
self.assertAllClose(self.evaluate(decayed_lr), expected, 1e-6)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testBeyondEnd(self):
step = 15
lr = 0.05
@ -183,7 +183,7 @@ class LinearDecayTest(test_util.TensorFlowTestCase):
expected = end_lr
self.assertAllClose(self.evaluate(decayed_lr), expected, 1e-6)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testBeyondEndWithCycle(self):
step = 15
lr = 0.05
@ -196,7 +196,7 @@ class LinearDecayTest(test_util.TensorFlowTestCase):
class SqrtDecayTest(test_util.TensorFlowTestCase):
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testHalfWay(self):
step = 5
lr = 0.05
@ -207,7 +207,7 @@ class SqrtDecayTest(test_util.TensorFlowTestCase):
expected = lr * 0.5**power
self.assertAllClose(self.evaluate(decayed_lr), expected, 1e-6)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testEnd(self):
step = 10
lr = 0.05
@ -218,7 +218,7 @@ class SqrtDecayTest(test_util.TensorFlowTestCase):
expected = end_lr
self.assertAllClose(self.evaluate(decayed_lr), expected, 1e-6)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testHalfWayWithEnd(self):
step = 5
lr = 0.05
@ -229,7 +229,7 @@ class SqrtDecayTest(test_util.TensorFlowTestCase):
expected = (lr - end_lr) * 0.5**power + end_lr
self.assertAllClose(self.evaluate(decayed_lr), expected, 1e-6)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testBeyondEnd(self):
step = 15
lr = 0.05
@ -240,7 +240,7 @@ class SqrtDecayTest(test_util.TensorFlowTestCase):
expected = end_lr
self.assertAllClose(self.evaluate(decayed_lr), expected, 1e-6)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testBeyondEndWithCycle(self):
step = 15
lr = 0.05
@ -254,7 +254,7 @@ class SqrtDecayTest(test_util.TensorFlowTestCase):
class PolynomialDecayTest(test_util.TensorFlowTestCase):
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testBeginWithCycle(self):
lr = 0.001
decay_steps = 10
@ -267,7 +267,7 @@ class PolynomialDecayTest(test_util.TensorFlowTestCase):
class ExponentialDecayTest(test_util.TensorFlowTestCase):
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testDecay(self):
initial_lr = 0.1
k = 10
@ -282,7 +282,7 @@ class ExponentialDecayTest(test_util.TensorFlowTestCase):
self.assertAllClose(self.evaluate(decayed_lr), expected, 1e-6)
self.evaluate(step.assign_add(1))
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testStaircase(self):
initial_lr = 0.1
k = 10
@ -300,7 +300,7 @@ class ExponentialDecayTest(test_util.TensorFlowTestCase):
class InverseDecayTest(test_util.TensorFlowTestCase):
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testDecay(self):
initial_lr = 0.1
k = 10
@ -315,7 +315,7 @@ class InverseDecayTest(test_util.TensorFlowTestCase):
self.assertAllClose(self.evaluate(decayed_lr), expected, 1e-6)
self.evaluate(step.assign_add(1))
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testStaircase(self):
initial_lr = 0.1
k = 10
@ -339,7 +339,7 @@ class CosineDecayTest(test_util.TensorFlowTestCase):
decay = 0.5 * (1.0 + math.cos(math.pi * completed_fraction))
return (1.0 - alpha) * decay + alpha
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testDecay(self):
num_training_steps = 1000
initial_lr = 1.0
@ -349,7 +349,7 @@ class CosineDecayTest(test_util.TensorFlowTestCase):
expected = self.np_cosine_decay(step, num_training_steps)
self.assertAllClose(self.evaluate(decayed_lr), expected, 1e-6)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testAlpha(self):
num_training_steps = 1000
initial_lr = 1.0
@ -375,7 +375,7 @@ class CosineDecayRestartsTest(test_util.TensorFlowTestCase):
decay = fac * 0.5 * (1.0 + math.cos(math.pi * completed_fraction))
return (1.0 - alpha) * decay + alpha
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testDecay(self):
num_training_steps = 1000
initial_lr = 1.0
@ -385,7 +385,7 @@ class CosineDecayRestartsTest(test_util.TensorFlowTestCase):
expected = self.np_cosine_decay_restarts(step, num_training_steps)
self.assertAllClose(self.evaluate(decayed_lr), expected, 1e-6)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testAlpha(self):
num_training_steps = 1000
initial_lr = 1.0
@ -397,7 +397,7 @@ class CosineDecayRestartsTest(test_util.TensorFlowTestCase):
step, num_training_steps, alpha=alpha)
self.assertAllClose(self.evaluate(decayed_lr), expected, 1e-6)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testMMul(self):
num_training_steps = 1000
initial_lr = 1.0
@ -409,7 +409,7 @@ class CosineDecayRestartsTest(test_util.TensorFlowTestCase):
step, num_training_steps, m_mul=m_mul)
self.assertAllClose(self.evaluate(decayed_lr), expected, 1e-6)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testTMul(self):
num_training_steps = 1000
initial_lr = 1.0
@ -436,7 +436,7 @@ class LinearCosineDecayTest(test_util.TensorFlowTestCase):
cosine_decayed = 0.5 * (1.0 + math.cos(math.pi * fraction))
return (alpha + linear_decayed) * cosine_decayed + beta
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testDefaultDecay(self):
num_training_steps = 1000
initial_lr = 1.0
@ -446,7 +446,7 @@ class LinearCosineDecayTest(test_util.TensorFlowTestCase):
expected = self.np_linear_cosine_decay(step, num_training_steps)
self.assertAllClose(self.evaluate(decayed_lr), expected, 1e-6)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testNonDefaultDecay(self):
num_training_steps = 1000
initial_lr = 1.0
@ -465,7 +465,7 @@ class LinearCosineDecayTest(test_util.TensorFlowTestCase):
class NoisyLinearCosineDecayTest(test_util.TensorFlowTestCase):
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testDefaultNoisyLinearCosine(self):
num_training_steps = 1000
initial_lr = 1.0
@ -476,7 +476,7 @@ class NoisyLinearCosineDecayTest(test_util.TensorFlowTestCase):
# Cannot be deterministically tested
self.evaluate(decayed_lr)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testNonDefaultNoisyLinearCosine(self):
num_training_steps = 1000
initial_lr = 1.0

View File

@ -34,7 +34,7 @@ from tensorflow.python.training import gradient_descent
class OptimizerTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testBasic(self):
for i, dtype in enumerate([dtypes.half, dtypes.float32, dtypes.float64]):
# Note that we name the variables uniquely here since the variables don't
@ -112,7 +112,7 @@ class OptimizerTest(test.TestCase):
self.assertAllClose([3.0 - 3 * 3 * 42.0, 4.0 - 3 * 3 * (-42.0)],
var1.eval())
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testNoVariables(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
# pylint: disable=cell-var-from-loop
@ -127,7 +127,7 @@ class OptimizerTest(test.TestCase):
with self.assertRaisesRegexp(ValueError, 'No.*variables'):
sgd_op.minimize(loss)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testNoGradients(self):
for i, dtype in enumerate([dtypes.half, dtypes.float32, dtypes.float64]):
# Note that we name the variables uniquely here since the variables don't
@ -145,7 +145,7 @@ class OptimizerTest(test.TestCase):
# var1 has no gradient
sgd_op.minimize(loss, var_list=[var1])
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testNoGradientsForAnyVariables_Minimize(self):
for i, dtype in enumerate([dtypes.half, dtypes.float32, dtypes.float64]):
# Note that we name the variables uniquely here since the variables don't
@ -161,7 +161,7 @@ class OptimizerTest(test.TestCase):
'No gradients provided for any variable'):
sgd_op.minimize(loss, var_list=[var0, var1])
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testNoGradientsForAnyVariables_ApplyGradients(self):
for i, dtype in enumerate([dtypes.half, dtypes.float32, dtypes.float64]):
# Note that we name the variables uniquely here since the variables don't
@ -175,7 +175,7 @@ class OptimizerTest(test.TestCase):
'No gradients provided for any variable'):
sgd_op.apply_gradients([(None, var0), (None, var1)])
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testGradientsAsVariables(self):
for i, dtype in enumerate([dtypes.half, dtypes.float32, dtypes.float64]):
# Note that we name the variables uniquely here since the variables don't
@ -215,7 +215,7 @@ class OptimizerTest(test.TestCase):
self.assertAllClose([-14., -13.], self.evaluate(var0))
self.assertAllClose([-6., -5.], self.evaluate(var1))
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testComputeGradientsWithTensors(self):
x = ops.convert_to_tensor(1.0)
def f():

View File

@ -171,7 +171,7 @@ class SaverTest(test.TestCase):
def testBasic(self):
self.basicSaveRestore(variables.Variable)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testResourceBasic(self):
self.basicSaveRestore(resource_variable_ops.ResourceVariable)
@ -252,7 +252,7 @@ class SaverTest(test.TestCase):
self.assertAllEqual(w3.eval(), 3.0)
self.assertAllEqual(w4.eval(), 4.0)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testResourceSaveRestoreCachingDevice(self):
save_path = os.path.join(self.get_temp_dir(), "resource_cache")
with self.test_session(graph=ops_lib.Graph()) as sess:
@ -671,7 +671,7 @@ class SaverTest(test.TestCase):
save.restore(sess, save_path)
self.assertAllClose([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], var.eval())
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testSaveWithGlobalStep(self, pad_step_number=False):
save_path = os.path.join(self.get_temp_dir(), "ckpt_with_global_step")
global_step_int = 5
@ -1395,7 +1395,7 @@ class KeepCheckpointEveryNHoursTest(test.TestCase):
gfile.MakeDirs(test_dir)
return test_dir
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
@test.mock.patch.object(saver_module, "time")
def testNonSharded(self, mock_time):
save_dir = self._get_test_dir("keep_checkpoint_every_n_hours")
@ -1515,7 +1515,7 @@ class SaveRestoreWithVariableNameMap(test.TestCase):
self.assertEqual(10.0, self.evaluate(v0))
self.assertEqual(20.0, self.evaluate(v1))
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testNonReshapeResourceVariable(self):
self._testNonReshape(resource_variable_ops.ResourceVariable)
@ -3021,7 +3021,7 @@ class MyModel(training.Model):
class CheckpointableCompatibilityTests(test.TestCase):
# TODO(allenl): Track down python3 reference cycles in these tests.
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testNotSaveableButIsCheckpointable(self):
v = _OwnsAVariableSimple()
saver = saver_module.Saver(var_list=[v])
@ -3034,7 +3034,7 @@ class CheckpointableCompatibilityTests(test.TestCase):
saver.restore(sess, save_path)
self.assertEqual(42., self.evaluate(v.non_dep_variable))
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def testMoreComplexSaveableReturned(self):
v = _OwnsMirroredVariables()
saver = saver_module.Saver(var_list=[v])

View File

@ -47,7 +47,7 @@ class SerializationTests(test.TestCase):
self.assertIs(round_trip[0], None)
self.assertEqual(round_trip[1], 2)
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_serialize_sequential(self):
model = sequential.Sequential()
model.add(core.Dense(4))
@ -61,7 +61,7 @@ class SerializationTests(test.TestCase):
self.assertAllEqual([1, 1],
input_round_trip[0]["config"]["batch_input_shape"])
@test_util.run_in_graph_and_eager_modes()
@test_util.run_in_graph_and_eager_modes
def test_serialize_model(self):
x = input_layer.Input(shape=[3])
y = core.Dense(10)(x)