Remove run_v1_only annotation from proximal_adagrad_test, saved_model_experimental_test, legacy base layer test
PiperOrigin-RevId: 320522977 Change-Id: I671006714ffb961be0ddbd33fe8f58f28571cfb0
This commit is contained in:
parent
2015320832
commit
d13f4ec326
tensorflow/python
keras
training
@ -28,7 +28,6 @@ from tensorflow.python.eager import def_function
|
||||
from tensorflow.python.framework import constant_op
|
||||
from tensorflow.python.framework import dtypes
|
||||
from tensorflow.python.framework import ops
|
||||
from tensorflow.python.framework import test_util
|
||||
from tensorflow.python.keras import combinations
|
||||
from tensorflow.python.keras.engine import base_layer as keras_base_layer
|
||||
from tensorflow.python.keras.engine import input_spec
|
||||
@ -162,21 +161,22 @@ class BaseLayerTest(test.TestCase, parameterized.TestCase):
|
||||
synchronization=variable_scope.VariableSynchronization.ON_READ,
|
||||
trainable=True)
|
||||
|
||||
@test_util.run_v1_only('Legacy TF Base layer is supported only in V1.')
|
||||
def testReusePartitionedVariablesAndRegularizers(self):
|
||||
regularizer = lambda x: math_ops.reduce_sum(x) * 1e-3
|
||||
partitioner = partitioned_variables.fixed_size_partitioner(3)
|
||||
for reuse in [False, True]:
|
||||
with variable_scope.variable_scope(variable_scope.get_variable_scope(),
|
||||
partitioner=partitioner,
|
||||
reuse=reuse):
|
||||
layer = base_layers.Layer(name='my_layer')
|
||||
_ = layer.add_variable(
|
||||
'reg_part_var', [4, 4],
|
||||
initializer=init_ops.zeros_initializer(),
|
||||
regularizer=regularizer)
|
||||
self.assertEqual(
|
||||
len(ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)), 3)
|
||||
with ops.Graph().as_default():
|
||||
regularizer = lambda x: math_ops.reduce_sum(x) * 1e-3
|
||||
partitioner = partitioned_variables.fixed_size_partitioner(3)
|
||||
for reuse in [False, True]:
|
||||
with variable_scope.variable_scope(
|
||||
variable_scope.get_variable_scope(),
|
||||
partitioner=partitioner,
|
||||
reuse=reuse):
|
||||
layer = base_layers.Layer(name='my_layer')
|
||||
_ = layer.add_variable(
|
||||
'reg_part_var', [4, 4],
|
||||
initializer=init_ops.zeros_initializer(),
|
||||
regularizer=regularizer)
|
||||
self.assertEqual(
|
||||
len(ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)), 3)
|
||||
|
||||
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
|
||||
def testCall(self):
|
||||
@ -464,13 +464,13 @@ class BaseLayerTest(test.TestCase, parameterized.TestCase):
|
||||
self.assertTrue(isinstance(result, dict))
|
||||
self.assertEqual(set(['label', 'logits']), set(result.keys()))
|
||||
|
||||
@test_util.run_v1_only('Legacy TF Base layer is supported only in V1.')
|
||||
def testActivityRegularizer(self):
|
||||
regularizer = math_ops.reduce_sum
|
||||
layer = base_layers.Layer(activity_regularizer=regularizer)
|
||||
x = array_ops.placeholder('int32')
|
||||
layer.apply(x)
|
||||
self.assertEqual(len(layer.get_losses_for(x)), 1)
|
||||
with ops.Graph().as_default():
|
||||
regularizer = math_ops.reduce_sum
|
||||
layer = base_layers.Layer(activity_regularizer=regularizer)
|
||||
x = array_ops.placeholder('int32')
|
||||
layer.apply(x)
|
||||
self.assertEqual(len(layer.get_losses_for(x)), 1)
|
||||
|
||||
def testNameScopeIsConsistentWithVariableScope(self):
|
||||
# Github issue 13429.
|
||||
@ -553,7 +553,6 @@ class BaseLayerTest(test.TestCase, parameterized.TestCase):
|
||||
self.assertEqual(len(layer.trainable_variables), 1)
|
||||
self.assertEqual(layer.variables[0].graph, outer_graph)
|
||||
|
||||
@test_util.run_v1_only('Legacy TF Base layer is supported only in V1.')
|
||||
def testGetUpdateFor(self):
|
||||
|
||||
class MyLayer(base_layers.Layer):
|
||||
@ -575,30 +574,30 @@ class BaseLayerTest(test.TestCase, parameterized.TestCase):
|
||||
inputs=True)
|
||||
return inputs + 1
|
||||
|
||||
layer = MyLayer()
|
||||
inputs = array_ops.placeholder(dtypes.float32, (), 'inputs')
|
||||
intermediate_inputs = inputs + 1
|
||||
outputs = layer.apply(intermediate_inputs)
|
||||
with ops.Graph().as_default():
|
||||
layer = MyLayer()
|
||||
inputs = array_ops.placeholder(dtypes.float32, (), 'inputs')
|
||||
intermediate_inputs = inputs + 1
|
||||
outputs = layer.apply(intermediate_inputs)
|
||||
|
||||
self.assertEqual(len(layer.updates), 2)
|
||||
self.assertEqual(len(layer.get_updates_for(None)), 1)
|
||||
self.assertEqual(len(layer.get_updates_for([inputs])), 1)
|
||||
self.assertEqual(len(layer.get_updates_for([intermediate_inputs])), 1)
|
||||
self.assertEqual(len(layer.get_updates_for([outputs])), 0)
|
||||
self.assertEqual(len(layer.updates), 2)
|
||||
self.assertEqual(len(layer.get_updates_for(None)), 1)
|
||||
self.assertEqual(len(layer.get_updates_for([inputs])), 1)
|
||||
self.assertEqual(len(layer.get_updates_for([intermediate_inputs])), 1)
|
||||
self.assertEqual(len(layer.get_updates_for([outputs])), 0)
|
||||
|
||||
# Call same layer on new input, creating one more conditional update
|
||||
inputs = array_ops.placeholder(dtypes.float32, (), 'inputs')
|
||||
intermediate_inputs = inputs + 1
|
||||
outputs = layer.apply(intermediate_inputs)
|
||||
# Call same layer on new input, creating one more conditional update
|
||||
inputs = array_ops.placeholder(dtypes.float32, (), 'inputs')
|
||||
intermediate_inputs = inputs + 1
|
||||
outputs = layer.apply(intermediate_inputs)
|
||||
|
||||
self.assertEqual(len(layer.updates), 3)
|
||||
self.assertEqual(len(layer.get_updates_for(None)), 1)
|
||||
# Check that we are successfully filtering out irrelevant updates
|
||||
self.assertEqual(len(layer.get_updates_for([inputs])), 1)
|
||||
self.assertEqual(len(layer.get_updates_for([intermediate_inputs])), 1)
|
||||
self.assertEqual(len(layer.get_updates_for([outputs])), 0)
|
||||
self.assertEqual(len(layer.updates), 3)
|
||||
self.assertEqual(len(layer.get_updates_for(None)), 1)
|
||||
# Check that we are successfully filtering out irrelevant updates
|
||||
self.assertEqual(len(layer.get_updates_for([inputs])), 1)
|
||||
self.assertEqual(len(layer.get_updates_for([intermediate_inputs])), 1)
|
||||
self.assertEqual(len(layer.get_updates_for([outputs])), 0)
|
||||
|
||||
@test_util.run_v1_only('Legacy TF Base layer is supported only in V1.')
|
||||
def testGetLossesFor(self):
|
||||
|
||||
class MyLayer(base_layers.Layer):
|
||||
@ -619,28 +618,29 @@ class BaseLayerTest(test.TestCase, parameterized.TestCase):
|
||||
self.add_loss(inputs, inputs=True)
|
||||
return inputs + 1
|
||||
|
||||
layer = MyLayer()
|
||||
inputs = array_ops.placeholder(dtypes.float32, (), 'inputs')
|
||||
intermediate_inputs = inputs + 1
|
||||
outputs = layer.apply(intermediate_inputs)
|
||||
with ops.Graph().as_default():
|
||||
layer = MyLayer()
|
||||
inputs = array_ops.placeholder(dtypes.float32, (), 'inputs')
|
||||
intermediate_inputs = inputs + 1
|
||||
outputs = layer.apply(intermediate_inputs)
|
||||
|
||||
self.assertEqual(len(layer.losses), 2)
|
||||
self.assertEqual(len(layer.get_losses_for(None)), 1)
|
||||
self.assertEqual(len(layer.get_losses_for([inputs])), 1)
|
||||
self.assertEqual(len(layer.get_losses_for([intermediate_inputs])), 1)
|
||||
self.assertEqual(len(layer.get_losses_for([outputs])), 0)
|
||||
self.assertEqual(len(layer.losses), 2)
|
||||
self.assertEqual(len(layer.get_losses_for(None)), 1)
|
||||
self.assertEqual(len(layer.get_losses_for([inputs])), 1)
|
||||
self.assertEqual(len(layer.get_losses_for([intermediate_inputs])), 1)
|
||||
self.assertEqual(len(layer.get_losses_for([outputs])), 0)
|
||||
|
||||
# Call same layer on new input, creating one more conditional loss
|
||||
inputs = array_ops.placeholder(dtypes.float32, (), 'inputs')
|
||||
intermediate_inputs = inputs + 1
|
||||
outputs = layer.apply(intermediate_inputs)
|
||||
# Call same layer on new input, creating one more conditional loss
|
||||
inputs = array_ops.placeholder(dtypes.float32, (), 'inputs')
|
||||
intermediate_inputs = inputs + 1
|
||||
outputs = layer.apply(intermediate_inputs)
|
||||
|
||||
self.assertEqual(len(layer.losses), 3)
|
||||
self.assertEqual(len(layer.get_losses_for(None)), 1)
|
||||
# Check that we are successfully filtering out irrelevant losses
|
||||
self.assertEqual(len(layer.get_losses_for([inputs])), 1)
|
||||
self.assertEqual(len(layer.get_losses_for([intermediate_inputs])), 1)
|
||||
self.assertEqual(len(layer.get_losses_for([outputs])), 0)
|
||||
self.assertEqual(len(layer.losses), 3)
|
||||
self.assertEqual(len(layer.get_losses_for(None)), 1)
|
||||
# Check that we are successfully filtering out irrelevant losses
|
||||
self.assertEqual(len(layer.get_losses_for([inputs])), 1)
|
||||
self.assertEqual(len(layer.get_losses_for([intermediate_inputs])), 1)
|
||||
self.assertEqual(len(layer.get_losses_for([outputs])), 0)
|
||||
|
||||
|
||||
class IdentityLayer(base_layers.Layer):
|
||||
|
@ -31,7 +31,6 @@ from tensorflow.python.eager import context
|
||||
from tensorflow.python.framework import dtypes
|
||||
from tensorflow.python.framework import ops
|
||||
from tensorflow.python.framework import tensor_spec
|
||||
from tensorflow.python.framework import test_util
|
||||
from tensorflow.python.keras.engine import training as model_lib
|
||||
from tensorflow.python.keras.optimizer_v2 import adadelta
|
||||
from tensorflow.python.keras.optimizer_v2 import rmsprop
|
||||
@ -45,8 +44,6 @@ from tensorflow.python.saved_model import model_utils
|
||||
from tensorflow.python.training import training as training_module
|
||||
|
||||
|
||||
@test_util.run_v1_only(
|
||||
'keras.experimental.load_from_saved_model is supported only in V1.')
|
||||
class TestModelSavingandLoading(parameterized.TestCase, test.TestCase):
|
||||
|
||||
def _save_model_dir(self, dirname='saved_model'):
|
||||
@ -280,8 +277,6 @@ def load_model(sess, path, mode):
|
||||
return inputs, outputs, meta_graph_def
|
||||
|
||||
|
||||
@test_util.run_v1_only(
|
||||
'keras.experimental.export_saved_model is supported only in V1.')
|
||||
class TestModelSavedModelExport(test.TestCase, parameterized.TestCase):
|
||||
|
||||
def _save_model_dir(self, dirname='saved_model'):
|
||||
|
@ -23,7 +23,6 @@ import numpy as np
|
||||
from tensorflow.python.framework import constant_op
|
||||
from tensorflow.python.framework import dtypes
|
||||
from tensorflow.python.framework import ops
|
||||
from tensorflow.python.framework import test_util
|
||||
from tensorflow.python.ops import embedding_ops
|
||||
from tensorflow.python.ops import math_ops
|
||||
from tensorflow.python.ops import resource_variable_ops
|
||||
@ -36,7 +35,8 @@ from tensorflow.python.training import proximal_adagrad
|
||||
class ProximalAdagradOptimizerTest(test.TestCase):
|
||||
|
||||
def doTestProximalAdagradwithoutRegularization(self, use_resource=False):
|
||||
with self.cached_session() as sess:
|
||||
# ProximalAdagradOptimizer is supported only in V1.
|
||||
with ops.Graph().as_default(), self.cached_session():
|
||||
var0 = variables.Variable([0.0, 0.0])
|
||||
var1 = variables.Variable([0.0, 0.0])
|
||||
grads0 = constant_op.constant([0.1, 0.2])
|
||||
@ -65,17 +65,15 @@ class ProximalAdagradOptimizerTest(test.TestCase):
|
||||
self.assertStartsWith(opt_vars[1].name, var1._shared_name)
|
||||
self.assertEqual(2, len(opt_vars))
|
||||
|
||||
@test_util.run_v1_only("ProximalAdagradOptimizer is supported only in V1.")
|
||||
def testProximalAdagradwithoutRegularization(self):
|
||||
self.doTestProximalAdagradwithoutRegularization(use_resource=False)
|
||||
|
||||
@test_util.run_v1_only("ProximalAdagradOptimizer is supported only in V1.")
|
||||
def testResourceProximalAdagradwithoutRegularization(self):
|
||||
self.doTestProximalAdagradwithoutRegularization(use_resource=True)
|
||||
|
||||
@test_util.run_v1_only("ProximalAdagradOptimizer is supported only in V1.")
|
||||
def testProximalAdagradwithoutRegularization2(self):
|
||||
with self.cached_session() as sess:
|
||||
# ProximalAdagradOptimizer is supported only in V1.
|
||||
with ops.Graph().as_default(), self.cached_session():
|
||||
var0 = variables.Variable([1.0, 2.0])
|
||||
var1 = variables.Variable([4.0, 3.0])
|
||||
grads0 = constant_op.constant([0.1, 0.2])
|
||||
@ -100,10 +98,10 @@ class ProximalAdagradOptimizerTest(test.TestCase):
|
||||
self.assertAllClose(np.array([-1.60261, -2.296985]), v0_val)
|
||||
self.assertAllClose(np.array([3.715679, 2.433051]), v1_val)
|
||||
|
||||
@test_util.run_v1_only("ProximalAdagradOptimizer is supported only in V1.")
|
||||
def testMinimizeSparseResourceVariable(self):
|
||||
for dtype in [dtypes.float32, dtypes.float64]:
|
||||
with self.cached_session():
|
||||
# ProximalAdagradOptimizer is supported only in V1.
|
||||
with ops.Graph().as_default(), self.cached_session():
|
||||
var0 = resource_variable_ops.ResourceVariable([[1.0, 2.0]], dtype=dtype)
|
||||
x = constant_op.constant([[4.0], [5.0]], dtype=dtype)
|
||||
pred = math_ops.matmul(embedding_ops.embedding_lookup([var0], [0]), x)
|
||||
@ -119,9 +117,9 @@ class ProximalAdagradOptimizerTest(test.TestCase):
|
||||
self.evaluate(var0),
|
||||
atol=0.01)
|
||||
|
||||
@test_util.run_v1_only("ProximalAdagradOptimizer is supported only in V1.")
|
||||
def testProximalAdagradWithL1(self):
|
||||
with self.cached_session() as sess:
|
||||
# ProximalAdagradOptimizer is supported only in V1.
|
||||
with ops.Graph().as_default(), self.cached_session():
|
||||
var0 = variables.Variable([1.0, 2.0])
|
||||
var1 = variables.Variable([4.0, 3.0])
|
||||
grads0 = constant_op.constant([0.1, 0.2])
|
||||
@ -146,9 +144,9 @@ class ProximalAdagradOptimizerTest(test.TestCase):
|
||||
self.assertAllClose(np.array([-6.663634, -9.190331]), v0_val)
|
||||
self.assertAllClose(np.array([2.959304, 1.029232]), v1_val)
|
||||
|
||||
@test_util.run_v1_only("ProximalAdagradOptimizer is supported only in V1.")
|
||||
def testProximalAdagradWithL1_L2(self):
|
||||
with self.cached_session() as sess:
|
||||
# ProximalAdagradOptimizer is supported only in V1.
|
||||
with ops.Graph().as_default(), self.cached_session():
|
||||
var0 = variables.Variable([1.0, 2.0])
|
||||
var1 = variables.Variable([4.0, 3.0])
|
||||
grads0 = constant_op.constant([0.1, 0.2])
|
||||
@ -213,9 +211,9 @@ class ProximalAdagradOptimizerTest(test.TestCase):
|
||||
v0_val, v1_val = self.evaluate([var0, var1])
|
||||
return v0_val, v1_val
|
||||
|
||||
@test_util.run_v1_only("ProximalAdagradOptimizer is supported only in V1.")
|
||||
def testEquivAdagradwithoutRegularization(self):
|
||||
with self.cached_session():
|
||||
# ProximalAdagradOptimizer is supported only in V1.
|
||||
with ops.Graph().as_default(), self.cached_session():
|
||||
val0, val1 = self.applyOptimizer(
|
||||
proximal_adagrad.ProximalAdagradOptimizer(
|
||||
3.0,
|
||||
@ -223,7 +221,7 @@ class ProximalAdagradOptimizerTest(test.TestCase):
|
||||
l1_regularization_strength=0.0,
|
||||
l2_regularization_strength=0.0))
|
||||
|
||||
with self.cached_session():
|
||||
with ops.Graph().as_default(), self.cached_session():
|
||||
val2, val3 = self.applyOptimizer(
|
||||
adagrad.AdagradOptimizer(
|
||||
3.0, initial_accumulator_value=0.1))
|
||||
@ -231,9 +229,9 @@ class ProximalAdagradOptimizerTest(test.TestCase):
|
||||
self.assertAllClose(val0, val2)
|
||||
self.assertAllClose(val1, val3)
|
||||
|
||||
@test_util.run_v1_only("ProximalAdagradOptimizer is supported only in V1.")
|
||||
def testEquivSparseAdagradwithoutRegularization(self):
|
||||
with self.cached_session():
|
||||
# ProximalAdagradOptimizer is supported only in V1.
|
||||
with ops.Graph().as_default(), self.cached_session():
|
||||
val0, val1 = self.applyOptimizer(
|
||||
proximal_adagrad.ProximalAdagradOptimizer(
|
||||
3.0,
|
||||
@ -242,7 +240,7 @@ class ProximalAdagradOptimizerTest(test.TestCase):
|
||||
l2_regularization_strength=0.0),
|
||||
is_sparse=True)
|
||||
|
||||
with self.cached_session():
|
||||
with ops.Graph().as_default(), self.cached_session():
|
||||
val2, val3 = self.applyOptimizer(
|
||||
adagrad.AdagradOptimizer(
|
||||
3.0, initial_accumulator_value=0.1),
|
||||
|
Loading…
Reference in New Issue
Block a user