Update private API usage of test_util. deprecated_graph_mode_only.

PiperOrigin-RevId: 321583859
Change-Id: I05d02f8f41792099fa9b225a09b4c109c981bfa7
This commit is contained in:
Scott Zhu 2020-07-16 09:38:12 -07:00 committed by TensorFlower Gardener
parent 960358aaa2
commit 11b8948857
3 changed files with 16 additions and 17 deletions
tensorflow/python/keras

View File

@ -23,7 +23,6 @@ import numpy as np
from tensorflow.python import keras from tensorflow.python import keras
from tensorflow.python.eager import context from tensorflow.python.eager import context
from tensorflow.python.framework import test_util
from tensorflow.python.keras import combinations from tensorflow.python.keras import combinations
from tensorflow.python.keras.layers import core from tensorflow.python.keras.layers import core
from tensorflow.python.keras.layers import dense_attention from tensorflow.python.keras.layers import dense_attention
@ -361,7 +360,6 @@ class AttentionTest(test.TestCase, parameterized.TestCase):
attention_layer.build(input_shape=([1, 1, 1], [1, 1, 1])) attention_layer.build(input_shape=([1, 1, 1], [1, 1, 1]))
self.assertAllClose(1., attention_layer.scale.value()) self.assertAllClose(1., attention_layer.scale.value())
@test_util.deprecated_graph_mode_only
def test_scale_init_graph(self): def test_scale_init_graph(self):
"""Tests that scale initializes to 1 when use_scale=True.""" """Tests that scale initializes to 1 when use_scale=True."""
with self.cached_session() as sess: with self.cached_session() as sess:

View File

@ -106,13 +106,14 @@ class LossScaleOptimizerTest(test.TestCase, parameterized.TestCase):
# and so the variable will be init_val - grad * lr == 5 - 1 * 2 == 3 # and so the variable will be init_val - grad * lr == 5 - 1 * 2 == 3
self.assertAllClose([3.], self.evaluate(var)) self.assertAllClose([3.], self.evaluate(var))
@test_util.deprecated_graph_mode_only
def testFixedLossScaleAppliedToLossWithGetGradients(self): def testFixedLossScaleAppliedToLossWithGetGradients(self):
with ops.Graph().as_default():
var = variables.Variable([2.0]) var = variables.Variable([2.0])
opt = gradient_descent.SGD(1.0) opt = gradient_descent.SGD(1.0)
loss_scale = 10. loss_scale = 10.
opt = loss_scale_optimizer.LossScaleOptimizer(opt, loss_scale) opt = loss_scale_optimizer.LossScaleOptimizer(opt, loss_scale)
grad_check_fn = mp_test_util.create_identity_with_grad_check_fn(loss_scale) grad_check_fn = mp_test_util.create_identity_with_grad_check_fn(
loss_scale)
loss = grad_check_fn(var) loss = grad_check_fn(var)
run_op = opt.get_gradients(loss, [var]) run_op = opt.get_gradients(loss, [var])
self.evaluate(variables.global_variables_initializer()) self.evaluate(variables.global_variables_initializer())

View File

@ -23,7 +23,7 @@ from tensorflow.python import data
from tensorflow.python import keras from tensorflow.python import keras
from tensorflow.python.eager import context from tensorflow.python.eager import context
from tensorflow.python.framework import config from tensorflow.python.framework import config
from tensorflow.python.framework import test_util from tensorflow.python.framework import ops
from tensorflow.python.keras.utils import multi_gpu_utils from tensorflow.python.keras.utils import multi_gpu_utils
from tensorflow.python.keras.utils import np_utils from tensorflow.python.keras.utils import np_utils
from tensorflow.python.platform import test from tensorflow.python.platform import test
@ -38,7 +38,7 @@ def check_if_compatible_devices(gpus=2):
return False return False
return True return True
@test_util.run_all_in_deprecated_graph_mode_only
class TestMultiGPUModel(test.TestCase): class TestMultiGPUModel(test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
@ -161,7 +161,7 @@ class TestMultiGPUModel(test.TestCase):
if not check_if_compatible_devices(gpus=gpus): if not check_if_compatible_devices(gpus=gpus):
self.skipTest('multi gpu only') self.skipTest('multi gpu only')
with self.cached_session(): with ops.Graph().as_default(), self.cached_session():
input_shape = (num_samples,) + shape input_shape = (num_samples,) + shape
x_train = np.random.randint(0, 255, input_shape) x_train = np.random.randint(0, 255, input_shape)
y_train = np.random.randint(0, num_classes, (input_shape[0],)) y_train = np.random.randint(0, num_classes, (input_shape[0],))