Annotate tests as @run_v1_only

Skip individual test cases or entire suites that are not
running in v1. Also replace some @run_deprecated_v1
annotations since simply running the test in graph mode
was not enough.

PiperOrigin-RevId: 224604547
This commit is contained in:
Gaurav Jain 2018-12-07 17:36:37 -08:00 committed by TensorFlower Gardener
parent d35b41c261
commit 4890b781b6
160 changed files with 827 additions and 480 deletions

View File

@ -21,6 +21,7 @@ from __future__ import print_function
import tensorflow as tf import tensorflow as tf
from tensorflow.lite.experimental.microfrontend.python.ops import audio_microfrontend_op as frontend_op from tensorflow.lite.experimental.microfrontend.python.ops import audio_microfrontend_op as frontend_op
from tensorflow.python.framework import test_util
SAMPLE_RATE = 1000 SAMPLE_RATE = 1000
WINDOW_SIZE = 25 WINDOW_SIZE = 25
@ -33,6 +34,7 @@ SMOOTHING_BITS = 10
class AudioFeatureGenerationTest(tf.test.TestCase): class AudioFeatureGenerationTest(tf.test.TestCase):
@test_util.run_v1_only("b/120545219")
def testSimple(self): def testSimple(self):
with self.test_session(): with self.test_session():
audio = tf.constant( audio = tf.constant(
@ -51,6 +53,7 @@ class AudioFeatureGenerationTest(tf.test.TestCase):
self.assertAllEqual(filterbanks.eval(), self.assertAllEqual(filterbanks.eval(),
[[479, 425], [436, 378], [410, 350], [391, 325]]) [[479, 425], [436, 378], [410, 350], [391, 325]])
@test_util.run_v1_only("b/120545219")
def testSimpleFloatScaled(self): def testSimpleFloatScaled(self):
with self.test_session(): with self.test_session():
audio = tf.constant( audio = tf.constant(
@ -72,6 +75,7 @@ class AudioFeatureGenerationTest(tf.test.TestCase):
[[7.484375, 6.640625], [6.8125, 5.90625], [[7.484375, 6.640625], [6.8125, 5.90625],
[6.40625, 5.46875], [6.109375, 5.078125]]) [6.40625, 5.46875], [6.109375, 5.078125]])
@test_util.run_v1_only("b/120545219")
def testStacking(self): def testStacking(self):
with self.test_session(): with self.test_session():
audio = tf.constant( audio = tf.constant(
@ -114,6 +118,7 @@ class AudioFeatureGenerationTest(tf.test.TestCase):
[[479, 425, 479, 425, 436, 378], [479, 425, 436, 378, 410, 350], [[479, 425, 479, 425, 436, 378], [479, 425, 436, 378, 410, 350],
[436, 378, 410, 350, 391, 325], [410, 350, 391, 325, 391, 325]]) [436, 378, 410, 350, 391, 325], [410, 350, 391, 325, 391, 325]])
@test_util.run_v1_only("b/120545219")
def testStackingDropFrame(self): def testStackingDropFrame(self):
with self.test_session(): with self.test_session():
audio = tf.constant( audio = tf.constant(

View File

@ -39,6 +39,7 @@ from tensorflow.python.saved_model import tag_constants
class TensorFunctionsTest(test_util.TensorFlowTestCase): class TensorFunctionsTest(test_util.TensorFlowTestCase):
@test_util.run_v1_only("b/120545219")
def testGetTensorsValid(self): def testGetTensorsValid(self):
in_tensor = array_ops.placeholder( in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32) shape=[1, 16, 16, 3], dtype=dtypes.float32)
@ -49,6 +50,7 @@ class TensorFunctionsTest(test_util.TensorFlowTestCase):
sess.graph, ["Placeholder"]) sess.graph, ["Placeholder"])
self.assertEqual("Placeholder:0", tensors[0].name) self.assertEqual("Placeholder:0", tensors[0].name)
@test_util.run_v1_only("b/120545219")
def testGetTensorsInvalid(self): def testGetTensorsInvalid(self):
in_tensor = array_ops.placeholder( in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32) shape=[1, 16, 16, 3], dtype=dtypes.float32)
@ -61,6 +63,7 @@ class TensorFunctionsTest(test_util.TensorFlowTestCase):
self.assertEqual("Invalid tensors 'invalid-input' were found.", self.assertEqual("Invalid tensors 'invalid-input' were found.",
str(error.exception)) str(error.exception))
@test_util.run_v1_only("b/120545219")
def testSetTensorShapeValid(self): def testSetTensorShapeValid(self):
tensor = array_ops.placeholder(shape=[None, 3, 5], dtype=dtypes.float32) tensor = array_ops.placeholder(shape=[None, 3, 5], dtype=dtypes.float32)
self.assertEqual([None, 3, 5], tensor.shape.as_list()) self.assertEqual([None, 3, 5], tensor.shape.as_list())
@ -68,6 +71,7 @@ class TensorFunctionsTest(test_util.TensorFlowTestCase):
convert_saved_model.set_tensor_shapes([tensor], {"Placeholder": [5, 3, 5]}) convert_saved_model.set_tensor_shapes([tensor], {"Placeholder": [5, 3, 5]})
self.assertEqual([5, 3, 5], tensor.shape.as_list()) self.assertEqual([5, 3, 5], tensor.shape.as_list())
@test_util.run_v1_only("b/120545219")
def testSetTensorShapeNoneValid(self): def testSetTensorShapeNoneValid(self):
tensor = array_ops.placeholder(dtype=dtypes.float32) tensor = array_ops.placeholder(dtype=dtypes.float32)
self.assertEqual(None, tensor.shape) self.assertEqual(None, tensor.shape)
@ -75,6 +79,7 @@ class TensorFunctionsTest(test_util.TensorFlowTestCase):
convert_saved_model.set_tensor_shapes([tensor], {"Placeholder": [1, 3, 5]}) convert_saved_model.set_tensor_shapes([tensor], {"Placeholder": [1, 3, 5]})
self.assertEqual([1, 3, 5], tensor.shape.as_list()) self.assertEqual([1, 3, 5], tensor.shape.as_list())
@test_util.run_v1_only("b/120545219")
def testSetTensorShapeArrayInvalid(self): def testSetTensorShapeArrayInvalid(self):
# Tests set_tensor_shape where the tensor name passed in doesn't exist. # Tests set_tensor_shape where the tensor name passed in doesn't exist.
tensor = array_ops.placeholder(shape=[None, 3, 5], dtype=dtypes.float32) tensor = array_ops.placeholder(shape=[None, 3, 5], dtype=dtypes.float32)
@ -88,6 +93,7 @@ class TensorFunctionsTest(test_util.TensorFlowTestCase):
str(error.exception)) str(error.exception))
self.assertEqual([None, 3, 5], tensor.shape.as_list()) self.assertEqual([None, 3, 5], tensor.shape.as_list())
@test_util.run_v1_only("b/120545219")
def testSetTensorShapeDimensionInvalid(self): def testSetTensorShapeDimensionInvalid(self):
# Tests set_tensor_shape where the shape passed in is incompatiable. # Tests set_tensor_shape where the shape passed in is incompatiable.
tensor = array_ops.placeholder(shape=[None, 3, 5], dtype=dtypes.float32) tensor = array_ops.placeholder(shape=[None, 3, 5], dtype=dtypes.float32)
@ -101,6 +107,7 @@ class TensorFunctionsTest(test_util.TensorFlowTestCase):
"(?, 3, 5) to [1, 5, 5].", str(error.exception)) "(?, 3, 5) to [1, 5, 5].", str(error.exception))
self.assertEqual([None, 3, 5], tensor.shape.as_list()) self.assertEqual([None, 3, 5], tensor.shape.as_list())
@test_util.run_v1_only("b/120545219")
def testSetTensorShapeEmpty(self): def testSetTensorShapeEmpty(self):
tensor = array_ops.placeholder(shape=[None, 3, 5], dtype=dtypes.float32) tensor = array_ops.placeholder(shape=[None, 3, 5], dtype=dtypes.float32)
self.assertEqual([None, 3, 5], tensor.shape.as_list()) self.assertEqual([None, 3, 5], tensor.shape.as_list())

View File

@ -34,6 +34,7 @@ from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test from tensorflow.python.platform import test
@test_util.run_v1_only("b/120545219")
class ConvertTest(test_util.TensorFlowTestCase): class ConvertTest(test_util.TensorFlowTestCase):
def testBasic(self): def testBasic(self):
@ -176,6 +177,7 @@ class ConvertTest(test_util.TensorFlowTestCase):
"QUANTIZED_UINT8.", str(error.exception)) "QUANTIZED_UINT8.", str(error.exception))
@test_util.run_v1_only("b/120545219")
class ConvertTestOpHint(test_util.TensorFlowTestCase): class ConvertTestOpHint(test_util.TensorFlowTestCase):
"""Test the hint to stub functionality.""" """Test the hint to stub functionality."""

View File

@ -80,6 +80,7 @@ class FromConstructor(test_util.TensorFlowTestCase):
self.assertTrue(converter._has_valid_tensors()) self.assertTrue(converter._has_valid_tensors())
@test_util.run_v1_only('b/120545219')
class FromSessionTest(test_util.TensorFlowTestCase): class FromSessionTest(test_util.TensorFlowTestCase):
def testFloat(self): def testFloat(self):
@ -497,6 +498,7 @@ class FromSessionTest(test_util.TensorFlowTestCase):
interpreter.allocate_tensors() interpreter.allocate_tensors()
@test_util.run_v1_only('b/120545219')
class FromFrozenGraphFile(test_util.TensorFlowTestCase): class FromFrozenGraphFile(test_util.TensorFlowTestCase):
def testFloat(self): def testFloat(self):
@ -744,6 +746,7 @@ class FromFrozenGraphFile(test_util.TensorFlowTestCase):
interpreter.allocate_tensors() interpreter.allocate_tensors()
@test_util.run_v1_only('b/120545219')
class FromSavedModelTest(test_util.TensorFlowTestCase): class FromSavedModelTest(test_util.TensorFlowTestCase):
def _createSavedModel(self, shape): def _createSavedModel(self, shape):
@ -888,6 +891,7 @@ class FromSavedModelTest(test_util.TensorFlowTestCase):
interpreter.allocate_tensors() interpreter.allocate_tensors()
@test_util.run_v1_only('b/120545219')
class FromKerasFile(test_util.TensorFlowTestCase): class FromKerasFile(test_util.TensorFlowTestCase):
def setUp(self): def setUp(self):

View File

@ -28,6 +28,7 @@ from tensorflow.python import keras
from tensorflow.python.client import session from tensorflow.python.client import session
from tensorflow.python.framework import constant_op from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test from tensorflow.python.platform import test
@ -42,6 +43,7 @@ class EvaluateFrozenGraph(test.TestCase):
write_graph(sess.graph_def, '', graph_def_file, False) write_graph(sess.graph_def, '', graph_def_file, False)
return graph_def_file return graph_def_file
@test_util.run_v1_only('b/120545219')
def testFloat(self): def testFloat(self):
with session.Session().as_default() as sess: with session.Session().as_default() as sess:
in_tensor = array_ops.placeholder( in_tensor = array_ops.placeholder(
@ -51,6 +53,7 @@ class EvaluateFrozenGraph(test.TestCase):
model_coverage.test_frozen_graph(filename, ['Placeholder'], ['add']) model_coverage.test_frozen_graph(filename, ['Placeholder'], ['add'])
@test_util.run_v1_only('b/120545219')
def testMultipleOutputs(self): def testMultipleOutputs(self):
with session.Session().as_default() as sess: with session.Session().as_default() as sess:
in_tensor_1 = array_ops.placeholder( in_tensor_1 = array_ops.placeholder(
@ -84,15 +87,18 @@ class EvaluateFrozenGraph(test.TestCase):
filename = self._saveFrozenGraph(sess) filename = self._saveFrozenGraph(sess)
return filename return filename
@test_util.run_v1_only('b/120545219')
def testQuantized(self): def testQuantized(self):
filename = self._getQuantizedModel() filename = self._getQuantizedModel()
model_coverage.test_frozen_graph_quant(filename, ['inputA'], ['output']) model_coverage.test_frozen_graph_quant(filename, ['inputA'], ['output'])
@test_util.run_v1_only('b/120545219')
def testQuantizedInputShapes(self): def testQuantizedInputShapes(self):
filename = self._getQuantizedModel() filename = self._getQuantizedModel()
model_coverage.test_frozen_graph_quant( model_coverage.test_frozen_graph_quant(
filename, ['inputA'], ['output'], input_shapes={'inputA': [33, 33]}) filename, ['inputA'], ['output'], input_shapes={'inputA': [33, 33]})
@test_util.run_v1_only('b/120545219')
def testQuantizedFlexAll(self): def testQuantizedFlexAll(self):
filename = self._getQuantizedModel() filename = self._getQuantizedModel()
model_coverage.test_frozen_graph_quant( model_coverage.test_frozen_graph_quant(
@ -102,6 +108,7 @@ class EvaluateFrozenGraph(test.TestCase):
class EvaluateSavedModel(test.TestCase): class EvaluateSavedModel(test.TestCase):
@test_util.run_v1_only('b/120545219')
def testFloat(self): def testFloat(self):
saved_model_dir = os.path.join(self.get_temp_dir(), 'simple_savedmodel') saved_model_dir = os.path.join(self.get_temp_dir(), 'simple_savedmodel')
with session.Session().as_default() as sess: with session.Session().as_default() as sess:
@ -139,18 +146,21 @@ class EvaluateKerasModel(test.TestCase):
os.close(fd) os.close(fd)
return keras_file return keras_file
@test_util.run_v1_only('b/120545219')
def testFloat(self): def testFloat(self):
model = self._getSingleInputKerasModel() model = self._getSingleInputKerasModel()
keras_file = self._saveKerasModel(model) keras_file = self._saveKerasModel(model)
model_coverage.test_keras_model(keras_file) model_coverage.test_keras_model(keras_file)
@test_util.run_v1_only('b/120545219')
def testPostTrainingQuantize(self): def testPostTrainingQuantize(self):
model = self._getSingleInputKerasModel() model = self._getSingleInputKerasModel()
keras_file = self._saveKerasModel(model) keras_file = self._saveKerasModel(model)
model_coverage.test_keras_model(keras_file, post_training_quantize=True) model_coverage.test_keras_model(keras_file, post_training_quantize=True)
@test_util.run_v1_only('b/120545219')
def testTargetOps(self): def testTargetOps(self):
model = self._getSingleInputKerasModel() model = self._getSingleInputKerasModel()
keras_file = self._saveKerasModel(model) keras_file = self._saveKerasModel(model)

View File

@ -218,6 +218,7 @@ class ApiTest(test.TestCase):
constant_op.constant(-1)) constant_op.constant(-1))
self.assertEqual(1, self.evaluate(x)) self.assertEqual(1, self.evaluate(x))
@test_util.run_v1_only('b/120545219')
def test_converted_call_functools_partial(self): def test_converted_call_functools_partial(self):
def test_fn(x, y, z): def test_fn(x, y, z):

View File

@ -312,6 +312,7 @@ class SessionTest(test_util.TensorFlowTestCase):
self.assertEqual(None, res[2]) self.assertEqual(None, res[2])
self.assertEqual(44.0, res[1]) self.assertEqual(44.0, res[1])
@test_util.run_v1_only('b/120545219')
def testFetchAttrs(self): def testFetchAttrs(self):
if attr is None: if attr is None:
self.skipTest('attr module is unavailable.') self.skipTest('attr module is unavailable.')
@ -340,6 +341,7 @@ class SessionTest(test_util.TensorFlowTestCase):
self.assertAllEqual(val3, result.field1) self.assertAllEqual(val3, result.field1)
self.assertAllEqual(val2, result.field2) self.assertAllEqual(val2, result.field2)
@test_util.run_v1_only('b/120545219')
def testFetchNestedAttrs(self): def testFetchNestedAttrs(self):
if attr is None: if attr is None:
self.skipTest('attr module is unavailable.') self.skipTest('attr module is unavailable.')
@ -1024,6 +1026,7 @@ class SessionTest(test_util.TensorFlowTestCase):
fed_c_val = c.eval(feed_dict={a.name: [[4.0, 4.0]]}) fed_c_val = c.eval(feed_dict={a.name: [[4.0, 4.0]]})
self.assertAllEqual([[16.0, 16.0, 16.0]], fed_c_val) self.assertAllEqual([[16.0, 16.0, 16.0]], fed_c_val)
@test_util.run_v1_only('b/120545219')
def testOperationRunMethod(self): def testOperationRunMethod(self):
with session.Session(): with session.Session():
a = constant_op.constant(1.0, shape=[1, 2]) a = constant_op.constant(1.0, shape=[1, 2])
@ -1154,6 +1157,7 @@ class SessionTest(test_util.TensorFlowTestCase):
else: else:
importer.import_graph_def(gdef, name='import') importer.import_graph_def(gdef, name='import')
@test_util.run_v1_only('b/120545219')
def testParallelRunAndSingleBuild(self): def testParallelRunAndSingleBuild(self):
with session.Session() as sess: with session.Session() as sess:
c = constant_op.constant(5.0) c = constant_op.constant(5.0)
@ -1174,6 +1178,7 @@ class SessionTest(test_util.TensorFlowTestCase):
for t in threads: for t in threads:
t.join() t.join()
@test_util.run_v1_only('b/120545219')
def testParallelRunAndParallelBuild(self): def testParallelRunAndParallelBuild(self):
with session.Session() as sess: with session.Session() as sess:
c = constant_op.constant(5.0) c = constant_op.constant(5.0)
@ -1274,6 +1279,7 @@ class SessionTest(test_util.TensorFlowTestCase):
with self.assertRaisesRegexp(RuntimeError, 'The Session graph is empty.'): with self.assertRaisesRegexp(RuntimeError, 'The Session graph is empty.'):
sess.run({}) sess.run({})
@test_util.run_v1_only('b/120545219')
def testNotEntered(self): def testNotEntered(self):
# pylint: disable=protected-access # pylint: disable=protected-access
self.assertEqual(ops._default_session_stack.get_default(), None) self.assertEqual(ops._default_session_stack.get_default(), None)
@ -1289,6 +1295,7 @@ class SessionTest(test_util.TensorFlowTestCase):
ValueError, lambda e: 'No default session is registered.' in str(e)): ValueError, lambda e: 'No default session is registered.' in str(e)):
c_2.eval() c_2.eval()
@test_util.run_v1_only('b/120545219')
def testInteractive(self): def testInteractive(self):
with ops.device('/cpu:0'): with ops.device('/cpu:0'):
sess = session.InteractiveSession() sess = session.InteractiveSession()
@ -1301,6 +1308,7 @@ class SessionTest(test_util.TensorFlowTestCase):
self.assertAllEqual([[24.0]], e.eval()) self.assertAllEqual([[24.0]], e.eval())
sess.close() sess.close()
@test_util.run_v1_only('b/120545219')
def testMultipleInteractiveSessionsWarning(self): def testMultipleInteractiveSessionsWarning(self):
# Reinitialize the global state to ensure that the expected warnings will # Reinitialize the global state to ensure that the expected warnings will
# be emitted. # be emitted.
@ -1328,6 +1336,7 @@ class SessionTest(test_util.TensorFlowTestCase):
sess2.close() sess2.close()
sess.close() sess.close()
@test_util.run_v1_only('b/120545219')
def testInteractivePlacePrunedGraph(self): def testInteractivePlacePrunedGraph(self):
sess = session.InteractiveSession() sess = session.InteractiveSession()
@ -1349,6 +1358,7 @@ class SessionTest(test_util.TensorFlowTestCase):
a.eval() a.eval()
sess.close() sess.close()
@test_util.run_v1_only('b/120545219')
def testDefaultSessionPlacePrunedGraph(self): def testDefaultSessionPlacePrunedGraph(self):
sess = session.Session() sess = session.Session()
@ -1769,9 +1779,11 @@ class SessionTest(test_util.TensorFlowTestCase):
sess.run(a, run_metadata=run_metadata) sess.run(a, run_metadata=run_metadata)
self.assertEqual(len(run_metadata.partition_graphs), 0) self.assertEqual(len(run_metadata.partition_graphs), 0)
@test_util.run_v1_only('b/120545219')
def testOutputPartitionGraphsDirect(self): def testOutputPartitionGraphsDirect(self):
self.runTestOutputPartitionGraphs(session.Session()) self.runTestOutputPartitionGraphs(session.Session())
@test_util.run_v1_only('b/120545219')
def testOutputPartitionGraphsDistributed(self): def testOutputPartitionGraphsDistributed(self):
server = server_lib.Server.create_local_server() server = server_lib.Server.create_local_server()
self.runTestOutputPartitionGraphs(session.Session(server.target)) self.runTestOutputPartitionGraphs(session.Session(server.target))
@ -1796,6 +1808,7 @@ class SessionTest(test_util.TensorFlowTestCase):
del sess1 del sess1
del sess2 del sess2
@test_util.run_v1_only('b/120545219')
def testAsDefault(self): def testAsDefault(self):
c = constant_op.constant(37) c = constant_op.constant(37)
sess = session.Session() sess = session.Session()
@ -1821,6 +1834,7 @@ class SessionTest(test_util.TensorFlowTestCase):
with self.assertRaisesRegexp(TypeError, 'graph must be a tf.Graph'): with self.assertRaisesRegexp(TypeError, 'graph must be a tf.Graph'):
session.Session(graph=37) session.Session(graph=37)
@test_util.run_v1_only('b/120545219')
def testTimeoutWithShortOperations(self): def testTimeoutWithShortOperations(self):
num_epochs = 5 num_epochs = 5
q = data_flow_ops.FIFOQueue(capacity=50, dtypes=[dtypes.int32], shapes=[()]) q = data_flow_ops.FIFOQueue(capacity=50, dtypes=[dtypes.int32], shapes=[()])
@ -1834,6 +1848,7 @@ class SessionTest(test_util.TensorFlowTestCase):
sess.run(enqueue_op) sess.run(enqueue_op)
self.assertEqual(sess.run(q.size()), num_epochs * 2) self.assertEqual(sess.run(q.size()), num_epochs * 2)
@test_util.run_v1_only('b/120545219')
def testRegisterFetchAndFeedConversionFunctions(self): def testRegisterFetchAndFeedConversionFunctions(self):
class SquaredTensor(object): class SquaredTensor(object):
@ -1865,6 +1880,7 @@ class SessionTest(test_util.TensorFlowTestCase):
squared_eval = sess.partial_run(partial_run, squared_tensor) squared_eval = sess.partial_run(partial_run, squared_tensor)
self.assertAllClose(np2 * np2, squared_eval) self.assertAllClose(np2 * np2, squared_eval)
@test_util.run_v1_only('b/120545219')
def testDefaultLogDevicePlacement(self): def testDefaultLogDevicePlacement(self):
class CaptureStderr(str): class CaptureStderr(str):
@ -1914,6 +1930,7 @@ class SessionTest(test_util.TensorFlowTestCase):
self.assertTrue('/job:local/replica:0/task:0/device:CPU:0' in str(log), self.assertTrue('/job:local/replica:0/task:0/device:CPU:0' in str(log),
str(log)) str(log))
@test_util.run_v1_only('b/120545219')
def testLocalMasterSessionTimeout(self): def testLocalMasterSessionTimeout(self):
# Test that the timeout passed in a config to the session works correctly. # Test that the timeout passed in a config to the session works correctly.
config = config_pb2.ConfigProto(operation_timeout_in_ms=1000) config = config_pb2.ConfigProto(operation_timeout_in_ms=1000)
@ -1927,6 +1944,7 @@ class SessionTest(test_util.TensorFlowTestCase):
with self.assertRaises(errors.DeadlineExceededError): with self.assertRaises(errors.DeadlineExceededError):
sess.run(dequeued_t) sess.run(dequeued_t)
@test_util.run_v1_only('b/120545219')
def testDefaultServerTimeout(self): def testDefaultServerTimeout(self):
# Test that the default server config timeout gets used when no Session # Test that the default server config timeout gets used when no Session
# config is provided. # config is provided.
@ -1952,9 +1970,11 @@ class SessionTest(test_util.TensorFlowTestCase):
with self.assertRaisesOpError('has inputs from different frames'): with self.assertRaisesOpError('has inputs from different frames'):
sess.run(res, feed_dict={data: 1.0}) sess.run(res, feed_dict={data: 1.0})
@test_util.run_v1_only('b/120545219')
def testBuildGraphErrorDirect(self): def testBuildGraphErrorDirect(self):
self.runTestBuildGraphError(session.Session()) self.runTestBuildGraphError(session.Session())
@test_util.run_v1_only('b/120545219')
def testBuildGraphErrorDist(self): def testBuildGraphErrorDist(self):
server = server_lib.Server.create_local_server() server = server_lib.Server.create_local_server()
self.runTestBuildGraphError(session.Session(server.target)) self.runTestBuildGraphError(session.Session(server.target))
@ -1993,9 +2013,11 @@ class SessionTest(test_util.TensorFlowTestCase):
result = sess.run(f) result = sess.run(f)
self.assertEqual(result, 2.0) self.assertEqual(result, 2.0)
@test_util.run_v1_only('b/120545219')
def testAddFunctionToSession(self): def testAddFunctionToSession(self):
self.runTestAddFunctionToSession() self.runTestAddFunctionToSession()
@test_util.run_v1_only('b/120545219')
def testAddFunctionToGrpcSession(self): def testAddFunctionToGrpcSession(self):
server = server_lib.Server.create_local_server() server = server_lib.Server.create_local_server()
self.runTestAddFunctionToSession(server.target) self.runTestAddFunctionToSession(server.target)
@ -2009,6 +2031,7 @@ class SessionTest(test_util.TensorFlowTestCase):
with session.Session(): with session.Session():
pass pass
@test_util.run_v1_only('b/120545219')
def testAutoConvertAndCheckData(self): def testAutoConvertAndCheckData(self):
with self.cached_session() as sess: with self.cached_session() as sess:
a = array_ops.placeholder(dtype=dtypes.string) a = array_ops.placeholder(dtype=dtypes.string)

View File

@ -264,6 +264,7 @@ class InterleaveTest(test_base.DatasetTestBase, parameterized.TestCase):
("8", np.int64([4, 0, 6]), 2, 3, 1), ("8", np.int64([4, 0, 6]), 2, 3, 1),
("9", np.int64([4, 0, 6]), 2, 3, 2), ("9", np.int64([4, 0, 6]), 2, 3, 2),
) )
@test_util.run_v1_only("b/120545219")
def testSkipEagerSloppyInterleaveInOrder(self, input_values, cycle_length, def testSkipEagerSloppyInterleaveInOrder(self, input_values, cycle_length,
block_length, num_parallel_calls): block_length, num_parallel_calls):
get_next, coordination_events = _make_coordinated_sloppy_dataset( get_next, coordination_events = _make_coordinated_sloppy_dataset(
@ -286,6 +287,7 @@ class InterleaveTest(test_base.DatasetTestBase, parameterized.TestCase):
("3", np.int64([4, 5, 6]), 3, 2, 3), ("3", np.int64([4, 5, 6]), 3, 2, 3),
("4", np.int64([4, 0, 6]), 2, 3, 2), ("4", np.int64([4, 0, 6]), 2, 3, 2),
) )
@test_util.run_v1_only("b/120545219")
def testSkipEagerSloppyInterleaveOutOfOrder(self, input_values, cycle_length, def testSkipEagerSloppyInterleaveOutOfOrder(self, input_values, cycle_length,
block_length, num_parallel_calls): block_length, num_parallel_calls):
get_next, coordination_events = _make_coordinated_sloppy_dataset( get_next, coordination_events = _make_coordinated_sloppy_dataset(

View File

@ -39,6 +39,7 @@ from tensorflow.python.platform import test
class IteratorClusterTest(test.TestCase): class IteratorClusterTest(test.TestCase):
@test_util.run_v1_only("b/120545219")
def testRemoteIteratorWithoutRemoteCallFail(self): def testRemoteIteratorWithoutRemoteCallFail(self):
worker_config = config_pb2.ConfigProto() worker_config = config_pb2.ConfigProto()
worker_config.device_count["CPU"] = 2 worker_config.device_count["CPU"] = 2
@ -92,6 +93,7 @@ class IteratorClusterTest(test.TestCase):
with self.assertRaises(errors.OutOfRangeError): with self.assertRaises(errors.OutOfRangeError):
sess.run(remote_op, feed_dict={target_placeholder: device1}) sess.run(remote_op, feed_dict={target_placeholder: device1})
@test_util.run_v1_only("b/120545219")
def testRemoteIteratorUsingRemoteCallOp(self): def testRemoteIteratorUsingRemoteCallOp(self):
worker_config = config_pb2.ConfigProto() worker_config = config_pb2.ConfigProto()
worker_config.device_count["CPU"] = 2 worker_config.device_count["CPU"] = 2
@ -102,6 +104,7 @@ class IteratorClusterTest(test.TestCase):
"/job:worker/replica:0/task:0/cpu:1", "/job:worker/replica:0/task:0/cpu:1",
worker[0].target) worker[0].target)
@test_util.run_v1_only("b/120545219")
def testRemoteIteratorUsingRemoteCallOpCrossProcess(self): def testRemoteIteratorUsingRemoteCallOpCrossProcess(self):
workers, _ = test_util.create_local_cluster(2, 1) workers, _ = test_util.create_local_cluster(2, 1)
@ -109,6 +112,7 @@ class IteratorClusterTest(test.TestCase):
"/job:worker/replica:0/task:1/cpu:0", "/job:worker/replica:0/task:1/cpu:0",
workers[0].target) workers[0].target)
@test_util.run_v1_only("b/120545219")
def testCaptureHashTableInSharedIterator(self): def testCaptureHashTableInSharedIterator(self):
worker, _ = test_util.create_local_cluster(1, 1) worker, _ = test_util.create_local_cluster(1, 1)
@ -143,6 +147,7 @@ class IteratorClusterTest(test.TestCase):
with self.assertRaises(errors.OutOfRangeError): with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next) sess.run(get_next)
@test_util.run_v1_only("b/120545219")
def testImplicitDisposeParallelMapDataset(self): def testImplicitDisposeParallelMapDataset(self):
# Tests whether a parallel map dataset will be cleaned up correctly when # Tests whether a parallel map dataset will be cleaned up correctly when
# the pipeline does not run it until exhaustion. # the pipeline does not run it until exhaustion.

View File

@ -34,6 +34,7 @@ from tensorflow.python.framework import errors
from tensorflow.python.framework import ops from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_util from tensorflow.python.framework import tensor_util
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops from tensorflow.python.ops import data_flow_ops
@ -82,6 +83,7 @@ def _make_coordinated_sloppy_dataset(num_elements, num_parallel_calls):
return next_element, coordination_events return next_element, coordination_events
@test_util.run_v1_only("b/120545219")
class MapDatasetTest(test_base.DatasetTestBase, parameterized.TestCase): class MapDatasetTest(test_base.DatasetTestBase, parameterized.TestCase):
def _buildMapDataset(self, components, count): def _buildMapDataset(self, components, count):

View File

@ -35,6 +35,7 @@ from tensorflow.python.platform import test
# TODO(b/117581999): Add eager coverage. # TODO(b/117581999): Add eager coverage.
class MultiDeviceIteratorTest(test_base.DatasetTestBase): class MultiDeviceIteratorTest(test_base.DatasetTestBase):
@test_util.run_v1_only("b/120545219")
def testNoGetNext(self): def testNoGetNext(self):
dataset = dataset_ops.Dataset.range(10) dataset = dataset_ops.Dataset.range(10)
multi_device_iterator = multi_device_iterator_ops.MultiDeviceIterator( multi_device_iterator = multi_device_iterator_ops.MultiDeviceIterator(
@ -44,6 +45,7 @@ class MultiDeviceIteratorTest(test_base.DatasetTestBase):
with self.test_session(config=config) as sess: with self.test_session(config=config) as sess:
self.evaluate(multi_device_iterator.initializer) self.evaluate(multi_device_iterator.initializer)
@test_util.run_v1_only("b/120545219")
def testBasic(self): def testBasic(self):
dataset = dataset_ops.Dataset.range(10) dataset = dataset_ops.Dataset.range(10)
multi_device_iterator = multi_device_iterator_ops.MultiDeviceIterator( multi_device_iterator = multi_device_iterator_ops.MultiDeviceIterator(
@ -60,6 +62,7 @@ class MultiDeviceIteratorTest(test_base.DatasetTestBase):
self.evaluate(elem_on_1) self.evaluate(elem_on_1)
self.evaluate(elem_on_2) self.evaluate(elem_on_2)
@test_util.run_v1_only("b/120545219")
def testOneOnSameDevice(self): def testOneOnSameDevice(self):
with ops.device("/cpu:0"): with ops.device("/cpu:0"):
dataset = dataset_ops.Dataset.range(10) dataset = dataset_ops.Dataset.range(10)
@ -77,6 +80,7 @@ class MultiDeviceIteratorTest(test_base.DatasetTestBase):
self.evaluate(elem_on_1) self.evaluate(elem_on_1)
self.evaluate(elem_on_2) self.evaluate(elem_on_2)
@test_util.run_v1_only("b/120545219")
def testRepeatDevices(self): def testRepeatDevices(self):
with ops.device("/cpu:0"): with ops.device("/cpu:0"):
dataset = dataset_ops.Dataset.range(20) dataset = dataset_ops.Dataset.range(20)
@ -99,6 +103,7 @@ class MultiDeviceIteratorTest(test_base.DatasetTestBase):
self.evaluate(elem_on_3) self.evaluate(elem_on_3)
self.evaluate(elem_on_4) self.evaluate(elem_on_4)
@test_util.run_v1_only("b/120545219")
def testNotFullyDivisible(self): def testNotFullyDivisible(self):
dataset = dataset_ops.Dataset.range(9) dataset = dataset_ops.Dataset.range(9)
multi_device_iterator = multi_device_iterator_ops.MultiDeviceIterator( multi_device_iterator = multi_device_iterator_ops.MultiDeviceIterator(
@ -116,6 +121,7 @@ class MultiDeviceIteratorTest(test_base.DatasetTestBase):
self.evaluate(elem_on_1) self.evaluate(elem_on_1)
self.evaluate(elem_on_2) self.evaluate(elem_on_2)
@test_util.run_v1_only("b/120545219")
def testGetNextAsOptional(self): def testGetNextAsOptional(self):
dataset = dataset_ops.Dataset.range(9) dataset = dataset_ops.Dataset.range(9)
multi_device_iterator = multi_device_iterator_ops.MultiDeviceIterator( multi_device_iterator = multi_device_iterator_ops.MultiDeviceIterator(
@ -149,6 +155,7 @@ class MultiDeviceIteratorTest(test_base.DatasetTestBase):
with self.assertRaises(errors.InvalidArgumentError): with self.assertRaises(errors.InvalidArgumentError):
self.evaluate(elem_on_2_t) self.evaluate(elem_on_2_t)
@test_util.run_v1_only("b/120545219")
def testUneven(self): def testUneven(self):
dataset = dataset_ops.Dataset.range(10) dataset = dataset_ops.Dataset.range(10)
multi_device_iterator = multi_device_iterator_ops.MultiDeviceIterator( multi_device_iterator = multi_device_iterator_ops.MultiDeviceIterator(
@ -166,6 +173,7 @@ class MultiDeviceIteratorTest(test_base.DatasetTestBase):
self.evaluate(elem_on_1) self.evaluate(elem_on_1)
self.evaluate(elem_on_2) self.evaluate(elem_on_2)
@test_util.run_v1_only("b/120545219")
def testMultipleInitializations(self): def testMultipleInitializations(self):
with ops.device("/cpu:0"): with ops.device("/cpu:0"):
epoch = array_ops.placeholder(dtypes.int64, shape=[]) epoch = array_ops.placeholder(dtypes.int64, shape=[])
@ -259,6 +267,7 @@ class MultiDeviceIteratorTest(test_base.DatasetTestBase):
with self.assertRaises(errors.InvalidArgumentError): with self.assertRaises(errors.InvalidArgumentError):
self.evaluate(elem_on_2_t) self.evaluate(elem_on_2_t)
@test_util.run_v1_only("b/120545219")
def testOptimization(self): def testOptimization(self):
dataset = dataset_ops.Dataset.range(10) dataset = dataset_ops.Dataset.range(10)
dataset = dataset.apply(optimization.assert_next(["MemoryCacheImpl"])) dataset = dataset.apply(optimization.assert_next(["MemoryCacheImpl"]))

View File

@ -573,6 +573,7 @@ def create_analyzer_cli(dump):
return analyzer, registry return analyzer, registry
@test_util.run_v1_only("b/120545219")
class AnalyzerCLISimpleMulAddTest(test_util.TensorFlowTestCase): class AnalyzerCLISimpleMulAddTest(test_util.TensorFlowTestCase):
@classmethod @classmethod
@ -645,7 +646,6 @@ class AnalyzerCLISimpleMulAddTest(test_util.TensorFlowTestCase):
self.assertEqual(len("Size (B)") + 1, dump_size_col_width) self.assertEqual(len("Size (B)") + 1, dump_size_col_width)
self.assertEqual(len("Op type") + 1, op_type_col_width) self.assertEqual(len("Op type") + 1, op_type_col_width)
@test_util.run_deprecated_v1
def testMeasureTensorListColumnWidthsGivesRightAnswerForData(self): def testMeasureTensorListColumnWidthsGivesRightAnswerForData(self):
dump = self._debug_dump.dumped_tensor_data[0] dump = self._debug_dump.dumped_tensor_data[0]
self.assertLess(dump.dump_size_bytes, 1000) self.assertLess(dump.dump_size_bytes, 1000)
@ -661,7 +661,6 @@ class AnalyzerCLISimpleMulAddTest(test_util.TensorFlowTestCase):
# column should be determined by the length of "VariableV2". # column should be determined by the length of "VariableV2".
self.assertEqual(len("VariableV2") + 1, op_type_col_width) self.assertEqual(len("VariableV2") + 1, op_type_col_width)
@test_util.run_deprecated_v1
def testListTensors(self): def testListTensors(self):
# Use shorthand alias for the command prefix. # Use shorthand alias for the command prefix.
out = self._registry.dispatch_command("lt", []) out = self._registry.dispatch_command("lt", [])
@ -675,7 +674,6 @@ class AnalyzerCLISimpleMulAddTest(test_util.TensorFlowTestCase):
# Check the main menu. # Check the main menu.
check_main_menu(self, out, list_tensors_enabled=False) check_main_menu(self, out, list_tensors_enabled=False)
@test_util.run_deprecated_v1
def testListTensorsInReverseTimeOrderWorks(self): def testListTensorsInReverseTimeOrderWorks(self):
# Use shorthand alias for the command prefix. # Use shorthand alias for the command prefix.
out = self._registry.dispatch_command("lt", ["-s", "timestamp", "-r"]) out = self._registry.dispatch_command("lt", ["-s", "timestamp", "-r"])
@ -691,7 +689,6 @@ class AnalyzerCLISimpleMulAddTest(test_util.TensorFlowTestCase):
reverse=True) reverse=True)
check_main_menu(self, out, list_tensors_enabled=False) check_main_menu(self, out, list_tensors_enabled=False)
@test_util.run_deprecated_v1
def testListTensorsInDumpSizeOrderWorks(self): def testListTensorsInDumpSizeOrderWorks(self):
out = self._registry.dispatch_command("lt", ["-s", "dump_size"]) out = self._registry.dispatch_command("lt", ["-s", "dump_size"])
assert_listed_tensors( assert_listed_tensors(
@ -705,7 +702,6 @@ class AnalyzerCLISimpleMulAddTest(test_util.TensorFlowTestCase):
sort_by="dump_size") sort_by="dump_size")
check_main_menu(self, out, list_tensors_enabled=False) check_main_menu(self, out, list_tensors_enabled=False)
@test_util.run_deprecated_v1
def testListTensorsInReverseDumpSizeOrderWorks(self): def testListTensorsInReverseDumpSizeOrderWorks(self):
out = self._registry.dispatch_command("lt", ["-s", "dump_size", "-r"]) out = self._registry.dispatch_command("lt", ["-s", "dump_size", "-r"])
assert_listed_tensors( assert_listed_tensors(
@ -725,7 +721,6 @@ class AnalyzerCLISimpleMulAddTest(test_util.TensorFlowTestCase):
self.assertIn("ValueError: Unsupported key to sort tensors by: foobar", self.assertIn("ValueError: Unsupported key to sort tensors by: foobar",
out.lines) out.lines)
@test_util.run_deprecated_v1
def testListTensorsInOpTypeOrderWorks(self): def testListTensorsInOpTypeOrderWorks(self):
# Use shorthand alias for the command prefix. # Use shorthand alias for the command prefix.
out = self._registry.dispatch_command("lt", ["-s", "op_type"]) out = self._registry.dispatch_command("lt", ["-s", "op_type"])
@ -741,7 +736,6 @@ class AnalyzerCLISimpleMulAddTest(test_util.TensorFlowTestCase):
reverse=False) reverse=False)
check_main_menu(self, out, list_tensors_enabled=False) check_main_menu(self, out, list_tensors_enabled=False)
@test_util.run_deprecated_v1
def testListTensorsInReverseOpTypeOrderWorks(self): def testListTensorsInReverseOpTypeOrderWorks(self):
# Use shorthand alias for the command prefix. # Use shorthand alias for the command prefix.
out = self._registry.dispatch_command("lt", ["-s", "op_type", "-r"]) out = self._registry.dispatch_command("lt", ["-s", "op_type", "-r"])
@ -757,7 +751,6 @@ class AnalyzerCLISimpleMulAddTest(test_util.TensorFlowTestCase):
reverse=True) reverse=True)
check_main_menu(self, out, list_tensors_enabled=False) check_main_menu(self, out, list_tensors_enabled=False)
@test_util.run_deprecated_v1
def testListTensorsInTensorNameOrderWorks(self): def testListTensorsInTensorNameOrderWorks(self):
# Use shorthand alias for the command prefix. # Use shorthand alias for the command prefix.
out = self._registry.dispatch_command("lt", ["-s", "tensor_name"]) out = self._registry.dispatch_command("lt", ["-s", "tensor_name"])
@ -773,7 +766,6 @@ class AnalyzerCLISimpleMulAddTest(test_util.TensorFlowTestCase):
reverse=False) reverse=False)
check_main_menu(self, out, list_tensors_enabled=False) check_main_menu(self, out, list_tensors_enabled=False)
@test_util.run_deprecated_v1
def testListTensorsInReverseTensorNameOrderWorks(self): def testListTensorsInReverseTensorNameOrderWorks(self):
# Use shorthand alias for the command prefix. # Use shorthand alias for the command prefix.
out = self._registry.dispatch_command("lt", ["-s", "tensor_name", "-r"]) out = self._registry.dispatch_command("lt", ["-s", "tensor_name", "-r"])
@ -789,7 +781,6 @@ class AnalyzerCLISimpleMulAddTest(test_util.TensorFlowTestCase):
reverse=True) reverse=True)
check_main_menu(self, out, list_tensors_enabled=False) check_main_menu(self, out, list_tensors_enabled=False)
@test_util.run_deprecated_v1
def testListTensorsFilterByNodeNameRegex(self): def testListTensorsFilterByNodeNameRegex(self):
out = self._registry.dispatch_command("list_tensors", out = self._registry.dispatch_command("list_tensors",
["--node_name_filter", ".*read.*"]) ["--node_name_filter", ".*read.*"])
@ -803,7 +794,6 @@ class AnalyzerCLISimpleMulAddTest(test_util.TensorFlowTestCase):
assert_listed_tensors(self, out, [], [], node_name_regex="^read") assert_listed_tensors(self, out, [], [], node_name_regex="^read")
check_main_menu(self, out, list_tensors_enabled=False) check_main_menu(self, out, list_tensors_enabled=False)
@test_util.run_deprecated_v1
def testListTensorFilterByOpTypeRegex(self): def testListTensorFilterByOpTypeRegex(self):
out = self._registry.dispatch_command("list_tensors", out = self._registry.dispatch_command("list_tensors",
["--op_type_filter", "Identity"]) ["--op_type_filter", "Identity"])
@ -832,7 +822,6 @@ class AnalyzerCLISimpleMulAddTest(test_util.TensorFlowTestCase):
op_type_regex="(Add|MatMul)") op_type_regex="(Add|MatMul)")
check_main_menu(self, out, list_tensors_enabled=False) check_main_menu(self, out, list_tensors_enabled=False)
@test_util.run_deprecated_v1
def testListTensorWithFilterAndNodeNameExclusionWorks(self): def testListTensorWithFilterAndNodeNameExclusionWorks(self):
# First, create and register the filter. # First, create and register the filter.
def is_2x1_vector(datum, tensor): def is_2x1_vector(datum, tensor):
@ -889,7 +878,6 @@ class AnalyzerCLISimpleMulAddTest(test_util.TensorFlowTestCase):
out = self._registry.dispatch_command("list_tensors", ["--bar"]) out = self._registry.dispatch_command("list_tensors", ["--bar"])
check_syntax_error_output(self, out, "list_tensors") check_syntax_error_output(self, out, "list_tensors")
@test_util.run_deprecated_v1
def testNodeInfoByNodeName(self): def testNodeInfoByNodeName(self):
node_name = "simple_mul_add/matmul" node_name = "simple_mul_add/matmul"
out = self._registry.dispatch_command("node_info", [node_name]) out = self._registry.dispatch_command("node_info", [node_name])
@ -914,7 +902,6 @@ class AnalyzerCLISimpleMulAddTest(test_util.TensorFlowTestCase):
[(len(out.lines[0]) - len(node_name), len(out.lines[0]), "bold")], [(len(out.lines[0]) - len(node_name), len(out.lines[0]), "bold")],
out.font_attr_segs[0]) out.font_attr_segs[0])
@test_util.run_deprecated_v1
def testNodeInfoShowAttributes(self): def testNodeInfoShowAttributes(self):
node_name = "simple_mul_add/matmul" node_name = "simple_mul_add/matmul"
out = self._registry.dispatch_command("node_info", ["-a", node_name]) out = self._registry.dispatch_command("node_info", ["-a", node_name])
@ -938,7 +925,6 @@ class AnalyzerCLISimpleMulAddTest(test_util.TensorFlowTestCase):
print_tensor_node_name=node_name, print_tensor_node_name=node_name,
list_outputs_node_name=node_name) list_outputs_node_name=node_name)
@test_util.run_deprecated_v1
def testNodeInfoShowDumps(self): def testNodeInfoShowDumps(self):
node_name = "simple_mul_add/matmul" node_name = "simple_mul_add/matmul"
out = self._registry.dispatch_command("node_info", ["-d", node_name]) out = self._registry.dispatch_command("node_info", ["-d", node_name])
@ -963,7 +949,6 @@ class AnalyzerCLISimpleMulAddTest(test_util.TensorFlowTestCase):
len(out.lines[16]) - len(out.lines[16].strip()), len(out.lines[16]) - len(out.lines[16].strip()),
len(out.lines[16]), "pt %s:0 -n 0" % node_name) len(out.lines[16]), "pt %s:0 -n 0" % node_name)
@test_util.run_deprecated_v1
def testNodeInfoShowStackTraceUnavailableIsIndicated(self): def testNodeInfoShowStackTraceUnavailableIsIndicated(self):
self._debug_dump.set_python_graph(None) self._debug_dump.set_python_graph(None)
@ -987,7 +972,6 @@ class AnalyzerCLISimpleMulAddTest(test_util.TensorFlowTestCase):
print_tensor_node_name=node_name, print_tensor_node_name=node_name,
list_outputs_node_name=node_name) list_outputs_node_name=node_name)
@test_util.run_deprecated_v1
def testNodeInfoShowStackTraceAvailableWorks(self): def testNodeInfoShowStackTraceAvailableWorks(self):
self._debug_dump.set_python_graph(self._sess.graph) self._debug_dump.set_python_graph(self._sess.graph)
@ -1011,7 +995,6 @@ class AnalyzerCLISimpleMulAddTest(test_util.TensorFlowTestCase):
print_tensor_node_name=node_name, print_tensor_node_name=node_name,
list_outputs_node_name=node_name) list_outputs_node_name=node_name)
@test_util.run_deprecated_v1
def testNodeInfoByTensorName(self): def testNodeInfoByTensorName(self):
node_name = "simple_mul_add/u/read" node_name = "simple_mul_add/u/read"
tensor_name = node_name + ":0" tensor_name = node_name + ":0"
@ -1381,7 +1364,6 @@ class AnalyzerCLISimpleMulAddTest(test_util.TensorFlowTestCase):
break break
return index return index
@test_util.run_deprecated_v1
def testPrintSourceForOpNamesWholeFileWorks(self): def testPrintSourceForOpNamesWholeFileWorks(self):
self._debug_dump.set_python_graph(self._sess.graph) self._debug_dump.set_python_graph(self._sess.graph)
out = self._registry.dispatch_command( out = self._registry.dispatch_command(
@ -1434,7 +1416,6 @@ class AnalyzerCLISimpleMulAddTest(test_util.TensorFlowTestCase):
self.assertEqual("pt simple_mul_add/add", self.assertEqual("pt simple_mul_add/add",
out.font_attr_segs[index + 1][0][2].content) out.font_attr_segs[index + 1][0][2].content)
@test_util.run_deprecated_v1
def testPrintSourceForTensorNamesWholeFileWorks(self): def testPrintSourceForTensorNamesWholeFileWorks(self):
self._debug_dump.set_python_graph(self._sess.graph) self._debug_dump.set_python_graph(self._sess.graph)
out = self._registry.dispatch_command( out = self._registry.dispatch_command(
@ -1455,7 +1436,6 @@ class AnalyzerCLISimpleMulAddTest(test_util.TensorFlowTestCase):
self.assertEqual("pt simple_mul_add/u:0", self.assertEqual("pt simple_mul_add/u:0",
out.font_attr_segs[index + 2][0][2].content) out.font_attr_segs[index + 2][0][2].content)
@test_util.run_deprecated_v1
def testPrintSourceForOpNamesStartingAtSpecifiedLineWorks(self): def testPrintSourceForOpNamesStartingAtSpecifiedLineWorks(self):
self._debug_dump.set_python_graph(self._sess.graph) self._debug_dump.set_python_graph(self._sess.graph)
out = self._registry.dispatch_command( out = self._registry.dispatch_command(
@ -1482,7 +1462,6 @@ class AnalyzerCLISimpleMulAddTest(test_util.TensorFlowTestCase):
self.assertEqual("pt simple_mul_add/u/read", self.assertEqual("pt simple_mul_add/u/read",
out.font_attr_segs[index + 3][0][2].content) out.font_attr_segs[index + 3][0][2].content)
@test_util.run_deprecated_v1
def testPrintSourceForOpNameSettingMaximumElementCountWorks(self): def testPrintSourceForOpNameSettingMaximumElementCountWorks(self):
self._debug_dump.set_python_graph(self._sess.graph) self._debug_dump.set_python_graph(self._sess.graph)
out = self._registry.dispatch_command( out = self._registry.dispatch_command(
@ -1527,7 +1506,6 @@ class AnalyzerCLISimpleMulAddTest(test_util.TensorFlowTestCase):
self.assertTrue(cli_shared.COLOR_GRAY in attr_seg[2] or self.assertTrue(cli_shared.COLOR_GRAY in attr_seg[2] or
attr_seg[2] == cli_shared.COLOR_GRAY) attr_seg[2] == cli_shared.COLOR_GRAY)
@test_util.run_deprecated_v1
def testListSourceWithNodeNameFilterWithMatchesWorks(self): def testListSourceWithNodeNameFilterWithMatchesWorks(self):
self._debug_dump.set_python_graph(self._sess.graph) self._debug_dump.set_python_graph(self._sess.graph)
out = self._registry.dispatch_command("list_source", ["-n", ".*/read"]) out = self._registry.dispatch_command("list_source", ["-n", ".*/read"])
@ -1691,6 +1669,7 @@ class AnalyzerCLIPrintLargeTensorTest(test_util.TensorFlowTestCase):
self.assertNotIn("...,", out.lines[4]) self.assertNotIn("...,", out.lines[4])
@test_util.run_v1_only("b/120545219")
class AnalyzerCLIControlDepTest(test_util.TensorFlowTestCase): class AnalyzerCLIControlDepTest(test_util.TensorFlowTestCase):
@classmethod @classmethod
@ -1742,7 +1721,6 @@ class AnalyzerCLIControlDepTest(test_util.TensorFlowTestCase):
# Tear down temporary dump directory. # Tear down temporary dump directory.
shutil.rmtree(cls._dump_root) shutil.rmtree(cls._dump_root)
@test_util.run_deprecated_v1
def testNodeInfoWithControlDependencies(self): def testNodeInfoWithControlDependencies(self):
# Call node_info on a node with control inputs. # Call node_info on a node with control inputs.
out = self._registry.dispatch_command("node_info", out = self._registry.dispatch_command("node_info",
@ -1783,7 +1761,6 @@ class AnalyzerCLIControlDepTest(test_util.TensorFlowTestCase):
len(out.lines[z_line]), len(out.lines[z_line]),
"ni -a -d -t control_deps/ctrl_dep_z") "ni -a -d -t control_deps/ctrl_dep_z")
@test_util.run_deprecated_v1
def testListInputsNonRecursiveNoControl(self): def testListInputsNonRecursiveNoControl(self):
"""List inputs non-recursively, without any control inputs.""" """List inputs non-recursively, without any control inputs."""
@ -1826,7 +1803,6 @@ class AnalyzerCLIControlDepTest(test_util.TensorFlowTestCase):
len(out.lines[3]) - len("control_deps/ctrl_dep_y"), len(out.lines[3]) - len("control_deps/ctrl_dep_y"),
len(out.lines[3]), "li -c -r control_deps/ctrl_dep_y") len(out.lines[3]), "li -c -r control_deps/ctrl_dep_y")
@test_util.run_deprecated_v1
def testListInputsNonRecursiveNoControlUsingTensorName(self): def testListInputsNonRecursiveNoControlUsingTensorName(self):
"""List inputs using the name of an output tensor of the node.""" """List inputs using the name of an output tensor of the node."""
@ -1855,7 +1831,6 @@ class AnalyzerCLIControlDepTest(test_util.TensorFlowTestCase):
len(out.lines[3]) - len("control_deps/ctrl_dep_y"), len(out.lines[3]) - len("control_deps/ctrl_dep_y"),
len(out.lines[3]), "li -c -r control_deps/ctrl_dep_y") len(out.lines[3]), "li -c -r control_deps/ctrl_dep_y")
@test_util.run_deprecated_v1
def testListInputsNonRecursiveWithControls(self): def testListInputsNonRecursiveWithControls(self):
"""List inputs non-recursively, with control inputs.""" """List inputs non-recursively, with control inputs."""
node_name = "control_deps/ctrl_dep_z" node_name = "control_deps/ctrl_dep_z"
@ -1886,7 +1861,6 @@ class AnalyzerCLIControlDepTest(test_util.TensorFlowTestCase):
len(out.lines[5]) - len("control_deps/x"), len(out.lines[5]) - len("control_deps/x"),
len(out.lines[5]), "li -c -r control_deps/x") len(out.lines[5]), "li -c -r control_deps/x")
@test_util.run_deprecated_v1
def testListInputsRecursiveWithControls(self): def testListInputsRecursiveWithControls(self):
"""List inputs recursively, with control inputs.""" """List inputs recursively, with control inputs."""
node_name = "control_deps/ctrl_dep_z" node_name = "control_deps/ctrl_dep_z"
@ -1932,7 +1906,6 @@ class AnalyzerCLIControlDepTest(test_util.TensorFlowTestCase):
len(out.lines[18]) - len("control_deps/x"), len(out.lines[18]) - len("control_deps/x"),
len(out.lines[18]), "li -c -r control_deps/x") len(out.lines[18]), "li -c -r control_deps/x")
@test_util.run_deprecated_v1
def testListInputsRecursiveWithControlsWithDepthLimit(self): def testListInputsRecursiveWithControlsWithDepthLimit(self):
"""List inputs recursively, with control inputs and a depth limit.""" """List inputs recursively, with control inputs and a depth limit."""
node_name = "control_deps/ctrl_dep_z" node_name = "control_deps/ctrl_dep_z"
@ -1992,7 +1965,6 @@ class AnalyzerCLIControlDepTest(test_util.TensorFlowTestCase):
"ERROR: There is no node named \"control_deps/z/foo\" in the " "ERROR: There is no node named \"control_deps/z/foo\" in the "
"partition graphs"], out.lines) "partition graphs"], out.lines)
@test_util.run_deprecated_v1
def testListRecipientsRecursiveWithControlsWithDepthLimit(self): def testListRecipientsRecursiveWithControlsWithDepthLimit(self):
"""List recipients recursively, with control inputs and a depth limit.""" """List recipients recursively, with control inputs and a depth limit."""
@ -2025,6 +1997,7 @@ class AnalyzerCLIControlDepTest(test_util.TensorFlowTestCase):
out.font_attr_segs[0]) out.font_attr_segs[0])
@test_util.run_v1_only("b/120545219")
class AnalyzerCLIWhileLoopTest(test_util.TensorFlowTestCase): class AnalyzerCLIWhileLoopTest(test_util.TensorFlowTestCase):
@classmethod @classmethod
@ -2064,7 +2037,6 @@ class AnalyzerCLIWhileLoopTest(test_util.TensorFlowTestCase):
# Tear down temporary dump directory. # Tear down temporary dump directory.
shutil.rmtree(cls._dump_root) shutil.rmtree(cls._dump_root)
@test_util.run_deprecated_v1
def testMultipleDumpsPrintTensorNoNumber(self): def testMultipleDumpsPrintTensorNoNumber(self):
output = self._registry.dispatch_command("pt", ["while/Identity:0"]) output = self._registry.dispatch_command("pt", ["while/Identity:0"])
@ -2082,7 +2054,6 @@ class AnalyzerCLIWhileLoopTest(test_util.TensorFlowTestCase):
self.assertEqual("For example:", output.lines[-2]) self.assertEqual("For example:", output.lines[-2])
self.assertEqual(" print_tensor while/Identity:0 -n 0", output.lines[-1]) self.assertEqual(" print_tensor while/Identity:0 -n 0", output.lines[-1])
@test_util.run_deprecated_v1
def testMultipleDumpsPrintTensorWithNumber(self): def testMultipleDumpsPrintTensorWithNumber(self):
for i in xrange(5): for i in xrange(5):
output = self._registry.dispatch_command( output = self._registry.dispatch_command(
@ -2096,7 +2067,6 @@ class AnalyzerCLIWhileLoopTest(test_util.TensorFlowTestCase):
self.assertTrue(output.lines[4].startswith("array(%d" % i)) self.assertTrue(output.lines[4].startswith("array(%d" % i))
self.assertTrue(output.lines[4].endswith(")")) self.assertTrue(output.lines[4].endswith(")"))
@test_util.run_deprecated_v1
def testMultipleDumpsPrintTensorInvalidNumber(self): def testMultipleDumpsPrintTensorInvalidNumber(self):
output = self._registry.dispatch_command("pt", output = self._registry.dispatch_command("pt",
["while/Identity:0", "-n", "10"]) ["while/Identity:0", "-n", "10"])

View File

@ -105,6 +105,7 @@ class TimeToReadableStrTest(test_util.TensorFlowTestCase):
cli_shared.time_to_readable_str(100, force_time_unit="ks") cli_shared.time_to_readable_str(100, force_time_unit="ks")
@test_util.run_v1_only("b/120545219")
class GetRunStartIntroAndDescriptionTest(test_util.TensorFlowTestCase): class GetRunStartIntroAndDescriptionTest(test_util.TensorFlowTestCase):
def setUp(self): def setUp(self):
@ -334,6 +335,7 @@ class GetRunStartIntroAndDescriptionTest(test_util.TensorFlowTestCase):
self.assertEqual("run #1: 1 fetch (a:0); 1 feed (foo)", short_description) self.assertEqual("run #1: 1 fetch (a:0); 1 feed (foo)", short_description)
@test_util.run_v1_only("b/120545219")
class GetErrorIntroTest(test_util.TensorFlowTestCase): class GetErrorIntroTest(test_util.TensorFlowTestCase):
def setUp(self): def setUp(self):
@ -342,7 +344,6 @@ class GetErrorIntroTest(test_util.TensorFlowTestCase):
def tearDown(self): def tearDown(self):
ops.reset_default_graph() ops.reset_default_graph()
@test_util.run_deprecated_v1
def testShapeError(self): def testShapeError(self):
tf_error = errors.OpError(None, self.var_a.initializer, "foo description", tf_error = errors.OpError(None, self.var_a.initializer, "foo description",
None) None)

View File

@ -70,6 +70,7 @@ def _assert_no_lines_match(pattern, lines):
"%s matched at least one line in %s." % (pattern, str(lines))) "%s matched at least one line in %s." % (pattern, str(lines)))
@test_util.run_v1_only("b/120545219")
class ProfileAnalyzerListProfileTest(test_util.TensorFlowTestCase): class ProfileAnalyzerListProfileTest(test_util.TensorFlowTestCase):
def testNodeInfoEmpty(self): def testNodeInfoEmpty(self):
@ -321,6 +322,7 @@ class ProfileAnalyzerListProfileTest(test_util.TensorFlowTestCase):
_assert_at_least_one_line_matches(r"Device Total.*0\.009ms", prof_output) _assert_at_least_one_line_matches(r"Device Total.*0\.009ms", prof_output)
@test_util.run_v1_only("b/120545219")
class ProfileAnalyzerPrintSourceTest(test_util.TensorFlowTestCase): class ProfileAnalyzerPrintSourceTest(test_util.TensorFlowTestCase):
def setUp(self): def setUp(self):
@ -348,7 +350,6 @@ class ProfileAnalyzerPrintSourceTest(test_util.TensorFlowTestCase):
ops.reset_default_graph() ops.reset_default_graph()
super(ProfileAnalyzerPrintSourceTest, self).tearDown() super(ProfileAnalyzerPrintSourceTest, self).tearDown()
@test_util.run_deprecated_v1
def testPrintSourceForWhileLoop(self): def testPrintSourceForWhileLoop(self):
prof_output = self.prof_analyzer.print_source([__file__]) prof_output = self.prof_analyzer.print_source([__file__])
@ -362,7 +363,6 @@ class ProfileAnalyzerPrintSourceTest(test_util.TensorFlowTestCase):
r"\[(\|)+(\s)*\] .*us .*7\(55\) .*L%d.*(\S)+" % self.loop_lineno, r"\[(\|)+(\s)*\] .*us .*7\(55\) .*L%d.*(\S)+" % self.loop_lineno,
prof_output.lines) prof_output.lines)
@test_util.run_deprecated_v1
def testPrintSourceOutputContainsClickableLinks(self): def testPrintSourceOutputContainsClickableLinks(self):
prof_output = self.prof_analyzer.print_source([__file__]) prof_output = self.prof_analyzer.print_source([__file__])
any_match, line_index = _at_least_one_line_matches( any_match, line_index = _at_least_one_line_matches(
@ -379,7 +379,6 @@ class ProfileAnalyzerPrintSourceTest(test_util.TensorFlowTestCase):
break break
self.assertTrue(any_menu_item_match) self.assertTrue(any_menu_item_match)
@test_util.run_deprecated_v1
def testPrintSourceWithNonDefaultTimeUnit(self): def testPrintSourceWithNonDefaultTimeUnit(self):
prof_output = self.prof_analyzer.print_source([ prof_output = self.prof_analyzer.print_source([
__file__, "--time_unit", "ms"]) __file__, "--time_unit", "ms"])
@ -394,7 +393,6 @@ class ProfileAnalyzerPrintSourceTest(test_util.TensorFlowTestCase):
r"\[(\|)+(\s)*\] .*ms .*7\(55\) .*L%d.*(\S)+" % self.loop_lineno, r"\[(\|)+(\s)*\] .*ms .*7\(55\) .*L%d.*(\S)+" % self.loop_lineno,
prof_output.lines) prof_output.lines)
@test_util.run_deprecated_v1
def testPrintSourceWithNodeNameFilter(self): def testPrintSourceWithNodeNameFilter(self):
prof_output = self.prof_analyzer.print_source([ prof_output = self.prof_analyzer.print_source([
__file__, "--node_name_filter", "x$"]) __file__, "--node_name_filter", "x$"])
@ -427,7 +425,6 @@ class ProfileAnalyzerPrintSourceTest(test_util.TensorFlowTestCase):
break break
self.assertTrue(any_menu_item_match) self.assertTrue(any_menu_item_match)
@test_util.run_deprecated_v1
def testPrintSourceWithOpTypeFilter(self): def testPrintSourceWithOpTypeFilter(self):
prof_output = self.prof_analyzer.print_source([ prof_output = self.prof_analyzer.print_source([
__file__, "--op_type_filter", "Less"]) __file__, "--op_type_filter", "Less"])

View File

@ -129,6 +129,7 @@ def _parse_updated(lines):
return updated return updated
@test_util.run_v1_only("b/120545219")
class NodeStepperSimpleGraphTest(test_util.TensorFlowTestCase): class NodeStepperSimpleGraphTest(test_util.TensorFlowTestCase):
def setUp(self): def setUp(self):

View File

@ -36,6 +36,7 @@ from tensorflow.python.platform import googletest
from tensorflow.python.training import gradient_descent from tensorflow.python.training import gradient_descent
@test_util.run_v1_only("b/120545219")
class IdentifyGradientTest(test_util.TensorFlowTestCase): class IdentifyGradientTest(test_util.TensorFlowTestCase):
def setUp(self): def setUp(self):
@ -54,7 +55,6 @@ class IdentifyGradientTest(test_util.TensorFlowTestCase):
ops.reset_default_graph() ops.reset_default_graph()
debug_gradients.clear_gradient_debuggers() debug_gradients.clear_gradient_debuggers()
@test_util.run_deprecated_v1
def testIdentifyGradientGivesCorrectTensorObjectWithoutContextManager(self): def testIdentifyGradientGivesCorrectTensorObjectWithoutContextManager(self):
grad_debugger = debug_gradients.GradientsDebugger() grad_debugger = debug_gradients.GradientsDebugger()
id_grad_w = grad_debugger.identify_gradient(self.w) id_grad_w = grad_debugger.identify_gradient(self.w)
@ -85,7 +85,6 @@ class IdentifyGradientTest(test_util.TensorFlowTestCase):
self.assertIsInstance(w_grad, ops.Tensor) self.assertIsInstance(w_grad, ops.Tensor)
self.assertAllClose(1.0, self.sess.run(w_grad)) self.assertAllClose(1.0, self.sess.run(w_grad))
@test_util.run_deprecated_v1
def testIdentifyGradientGivesCorrectTensorObjectWithTfGradients(self): def testIdentifyGradientGivesCorrectTensorObjectWithTfGradients(self):
grad_debugger = debug_gradients.GradientsDebugger() grad_debugger = debug_gradients.GradientsDebugger()
id_grad_w = grad_debugger.identify_gradient(self.w) id_grad_w = grad_debugger.identify_gradient(self.w)
@ -117,7 +116,6 @@ class IdentifyGradientTest(test_util.TensorFlowTestCase):
self.assertIsInstance(w_grad, ops.Tensor) self.assertIsInstance(w_grad, ops.Tensor)
self.assertAllClose(1.0, self.sess.run(w_grad)) self.assertAllClose(1.0, self.sess.run(w_grad))
@test_util.run_deprecated_v1
def testCallingIdentifyGradientTwiceWithTheSameGradientsDebuggerErrors(self): def testCallingIdentifyGradientTwiceWithTheSameGradientsDebuggerErrors(self):
grad_debugger = debug_gradients.GradientsDebugger() grad_debugger = debug_gradients.GradientsDebugger()
grad_debugger.identify_gradient(self.w) grad_debugger.identify_gradient(self.w)
@ -125,7 +123,6 @@ class IdentifyGradientTest(test_util.TensorFlowTestCase):
"The graph already contains an op named .*"): "The graph already contains an op named .*"):
grad_debugger.identify_gradient(self.w) grad_debugger.identify_gradient(self.w)
@test_util.run_deprecated_v1
def testIdentifyGradientWorksOnMultipleLosses(self): def testIdentifyGradientWorksOnMultipleLosses(self):
grad_debugger_1 = debug_gradients.GradientsDebugger() grad_debugger_1 = debug_gradients.GradientsDebugger()
grad_debugger_2 = debug_gradients.GradientsDebugger() grad_debugger_2 = debug_gradients.GradientsDebugger()
@ -154,7 +151,6 @@ class IdentifyGradientTest(test_util.TensorFlowTestCase):
self.assertAllClose(2.0 * 5.0, self.sess.run(dz1_dy)) self.assertAllClose(2.0 * 5.0, self.sess.run(dz1_dy))
self.assertAllClose(0.5 * (5.0**-0.5), self.sess.run(dz2_dy)) self.assertAllClose(0.5 * (5.0**-0.5), self.sess.run(dz2_dy))
@test_util.run_deprecated_v1
def testIdentifyGradientRaisesLookupErrorForUnknownXTensor(self): def testIdentifyGradientRaisesLookupErrorForUnknownXTensor(self):
grad_debugger_1 = debug_gradients.GradientsDebugger() grad_debugger_1 = debug_gradients.GradientsDebugger()
grad_debugger_2 = debug_gradients.GradientsDebugger() grad_debugger_2 = debug_gradients.GradientsDebugger()
@ -175,7 +171,6 @@ class IdentifyGradientTest(test_util.TensorFlowTestCase):
r"This GradientsDebugger has not received any gradient tensor for "): r"This GradientsDebugger has not received any gradient tensor for "):
grad_debugger_2.gradient_tensor(self.w) grad_debugger_2.gradient_tensor(self.w)
@test_util.run_deprecated_v1
def testIdentifyGradientRaisesTypeErrorForNonTensorOrTensorNameInput(self): def testIdentifyGradientRaisesTypeErrorForNonTensorOrTensorNameInput(self):
grad_debugger = debug_gradients.GradientsDebugger() grad_debugger = debug_gradients.GradientsDebugger()
with self.assertRaisesRegexp( with self.assertRaisesRegexp(
@ -184,7 +179,6 @@ class IdentifyGradientTest(test_util.TensorFlowTestCase):
r"has type .*Operation.*"): r"has type .*Operation.*"):
grad_debugger.gradient_tensor(variables.global_variables_initializer()) grad_debugger.gradient_tensor(variables.global_variables_initializer())
@test_util.run_deprecated_v1
def testIdentifyGradientTensorWorksWithGradientDescentOptimizer(self): def testIdentifyGradientTensorWorksWithGradientDescentOptimizer(self):
grad_debugger = debug_gradients.GradientsDebugger() grad_debugger = debug_gradients.GradientsDebugger()
id_grad_w = grad_debugger.identify_gradient(self.w) id_grad_w = grad_debugger.identify_gradient(self.w)
@ -200,7 +194,6 @@ class IdentifyGradientTest(test_util.TensorFlowTestCase):
self.assertIsInstance(w_grad, ops.Tensor) self.assertIsInstance(w_grad, ops.Tensor)
self.assertAllClose(1.0, self.sess.run(w_grad)) self.assertAllClose(1.0, self.sess.run(w_grad))
@test_util.run_deprecated_v1
def testWatchGradientsByXTensorNamesWorks(self): def testWatchGradientsByXTensorNamesWorks(self):
y = math_ops.add(self.w, -1.0, name="y") y = math_ops.add(self.w, -1.0, name="y")
@ -227,7 +220,6 @@ class IdentifyGradientTest(test_util.TensorFlowTestCase):
self.assertIsInstance(w_grad, ops.Tensor) self.assertIsInstance(w_grad, ops.Tensor)
self.assertAllClose(1.0, self.sess.run(w_grad)) self.assertAllClose(1.0, self.sess.run(w_grad))
@test_util.run_deprecated_v1
def testWatchGradientsByXTensorNamesWorksWithoutContextManager(self): def testWatchGradientsByXTensorNamesWorksWithoutContextManager(self):
y = math_ops.add(self.w, -1.0, name="y") y = math_ops.add(self.w, -1.0, name="y")
@ -254,7 +246,6 @@ class IdentifyGradientTest(test_util.TensorFlowTestCase):
self.assertIsInstance(w_grad, ops.Tensor) self.assertIsInstance(w_grad, ops.Tensor)
self.assertAllClose(1.0, self.sess.run(w_grad)) self.assertAllClose(1.0, self.sess.run(w_grad))
@test_util.run_deprecated_v1
def testWatchGradientsWorksOnRefTensor(self): def testWatchGradientsWorksOnRefTensor(self):
y = math_ops.add(self.w, -1.0, name="y") y = math_ops.add(self.w, -1.0, name="y")
@ -273,7 +264,6 @@ class IdentifyGradientTest(test_util.TensorFlowTestCase):
self.assertAllClose(3.0, self.sess.run( self.assertAllClose(3.0, self.sess.run(
grad_debugger.gradient_tensor("u:0"))) grad_debugger.gradient_tensor("u:0")))
@test_util.run_deprecated_v1
def testWatchGradientsWorksOnMultipleTensors(self): def testWatchGradientsWorksOnMultipleTensors(self):
y = math_ops.add(self.w, -1.0, name="y") y = math_ops.add(self.w, -1.0, name="y")
@ -294,7 +284,6 @@ class IdentifyGradientTest(test_util.TensorFlowTestCase):
self.assertAllClose(3.0, self.sess.run( self.assertAllClose(3.0, self.sess.run(
grad_debugger.gradient_tensor("u:0"))) grad_debugger.gradient_tensor("u:0")))
@test_util.run_deprecated_v1
def testWatchGradientsByXTensorsWorks(self): def testWatchGradientsByXTensorsWorks(self):
y = math_ops.add(self.w, -1.0, name="foo/y") y = math_ops.add(self.w, -1.0, name="foo/y")
z = math_ops.square(y, name="foo/z") z = math_ops.square(y, name="foo/z")
@ -317,7 +306,6 @@ class IdentifyGradientTest(test_util.TensorFlowTestCase):
self.assertAllClose(10.0, self.sess.run(w_grad)) self.assertAllClose(10.0, self.sess.run(w_grad))
self.assertAllClose(30.0, self.sess.run(u_grad)) self.assertAllClose(30.0, self.sess.run(u_grad))
@test_util.run_deprecated_v1
def testWatchGradientsByTensorCanWorkOnMultipleLosses(self): def testWatchGradientsByTensorCanWorkOnMultipleLosses(self):
y = math_ops.add(self.w, -1.0, name="y") y = math_ops.add(self.w, -1.0, name="y")
z1 = math_ops.square(y, name="z1") z1 = math_ops.square(y, name="z1")
@ -343,7 +331,6 @@ class IdentifyGradientTest(test_util.TensorFlowTestCase):
self.assertAllClose(2.0 * 5.0, self.sess.run(dz1_dy)) self.assertAllClose(2.0 * 5.0, self.sess.run(dz1_dy))
self.assertAllClose(0.5 * (5.0**-0.5), self.sess.run(dz2_dy)) self.assertAllClose(0.5 * (5.0**-0.5), self.sess.run(dz2_dy))
@test_util.run_deprecated_v1
def testGradientsValuesFromDumpWorks(self): def testGradientsValuesFromDumpWorks(self):
y = math_ops.add(self.w, -1.0, name="y") y = math_ops.add(self.w, -1.0, name="y")
z = math_ops.square(y, name="z") z = math_ops.square(y, name="z")

View File

@ -185,7 +185,7 @@ class DebugUtilsTest(test_util.TensorFlowTestCase):
self.assertEqual(["file:///tmp/tfdbg_1", "file:///tmp/tfdbg_2"], self.assertEqual(["file:///tmp/tfdbg_1", "file:///tmp/tfdbg_2"],
watch_0.debug_urls) watch_0.debug_urls)
@test_util.run_deprecated_v1 @test_util.run_v1_only("b/120545219")
def testWatchGraph_allNodes(self): def testWatchGraph_allNodes(self):
debug_utils.watch_graph( debug_utils.watch_graph(
self._run_options, self._run_options,
@ -217,7 +217,7 @@ class DebugUtilsTest(test_util.TensorFlowTestCase):
self.assertTrue("p1" in node_names) self.assertTrue("p1" in node_names)
self.assertTrue("s" in node_names) self.assertTrue("s" in node_names)
@test_util.run_deprecated_v1 @test_util.run_v1_only("b/120545219")
def testWatchGraph_nodeNameWhitelist(self): def testWatchGraph_nodeNameWhitelist(self):
debug_utils.watch_graph( debug_utils.watch_graph(
self._run_options, self._run_options,
@ -232,7 +232,7 @@ class DebugUtilsTest(test_util.TensorFlowTestCase):
sorted(["a1_init", "a1", "a1/Assign", "a1/read", "p1"]), sorted(["a1_init", "a1", "a1/Assign", "a1/read", "p1"]),
sorted(node_names)) sorted(node_names))
@test_util.run_deprecated_v1 @test_util.run_v1_only("b/120545219")
def testWatchGraph_opTypeWhitelist(self): def testWatchGraph_opTypeWhitelist(self):
debug_utils.watch_graph( debug_utils.watch_graph(
self._run_options, self._run_options,
@ -258,7 +258,7 @@ class DebugUtilsTest(test_util.TensorFlowTestCase):
["DebugIdentity"], ["file:///tmp/tfdbg_1"]) ["DebugIdentity"], ["file:///tmp/tfdbg_1"])
self.assertEqual(["p1"], node_names) self.assertEqual(["p1"], node_names)
@test_util.run_deprecated_v1 @test_util.run_v1_only("b/120545219")
def testWatchGraph_tensorDTypeWhitelist(self): def testWatchGraph_tensorDTypeWhitelist(self):
debug_utils.watch_graph( debug_utils.watch_graph(
self._run_options, self._run_options,
@ -271,7 +271,7 @@ class DebugUtilsTest(test_util.TensorFlowTestCase):
["DebugIdentity"], ["file:///tmp/tfdbg_1"]) ["DebugIdentity"], ["file:///tmp/tfdbg_1"])
self.assertItemsEqual(["a1", "a1/Assign", "b", "b/Assign"], node_names) self.assertItemsEqual(["a1", "a1/Assign", "b", "b/Assign"], node_names)
@test_util.run_deprecated_v1 @test_util.run_v1_only("b/120545219")
def testWatchGraph_nodeNameAndTensorDTypeWhitelists(self): def testWatchGraph_nodeNameAndTensorDTypeWhitelists(self):
debug_utils.watch_graph( debug_utils.watch_graph(
self._run_options, self._run_options,
@ -285,7 +285,7 @@ class DebugUtilsTest(test_util.TensorFlowTestCase):
["DebugIdentity"], ["file:///tmp/tfdbg_1"]) ["DebugIdentity"], ["file:///tmp/tfdbg_1"])
self.assertItemsEqual(["a1", "a1/Assign"], node_names) self.assertItemsEqual(["a1", "a1/Assign"], node_names)
@test_util.run_deprecated_v1 @test_util.run_v1_only("b/120545219")
def testWatchGraph_nodeNameBlacklist(self): def testWatchGraph_nodeNameBlacklist(self):
debug_utils.watch_graph_with_blacklists( debug_utils.watch_graph_with_blacklists(
self._run_options, self._run_options,
@ -300,7 +300,7 @@ class DebugUtilsTest(test_util.TensorFlowTestCase):
sorted(["b_init", "b", "b/Assign", "b/read", "c", "s"]), sorted(["b_init", "b", "b/Assign", "b/read", "c", "s"]),
sorted(node_names)) sorted(node_names))
@test_util.run_deprecated_v1 @test_util.run_v1_only("b/120545219")
def testWatchGraph_opTypeBlacklist(self): def testWatchGraph_opTypeBlacklist(self):
debug_utils.watch_graph_with_blacklists( debug_utils.watch_graph_with_blacklists(
self._run_options, self._run_options,
@ -313,7 +313,7 @@ class DebugUtilsTest(test_util.TensorFlowTestCase):
["DebugIdentity"], ["file:///tmp/tfdbg_1"]) ["DebugIdentity"], ["file:///tmp/tfdbg_1"])
self.assertEqual(sorted(["p1", "s"]), sorted(node_names)) self.assertEqual(sorted(["p1", "s"]), sorted(node_names))
@test_util.run_deprecated_v1 @test_util.run_v1_only("b/120545219")
def testWatchGraph_nodeNameAndOpTypeBlacklists(self): def testWatchGraph_nodeNameAndOpTypeBlacklists(self):
debug_utils.watch_graph_with_blacklists( debug_utils.watch_graph_with_blacklists(
self._run_options, self._run_options,
@ -327,7 +327,7 @@ class DebugUtilsTest(test_util.TensorFlowTestCase):
["DebugIdentity"], ["file:///tmp/tfdbg_1"]) ["DebugIdentity"], ["file:///tmp/tfdbg_1"])
self.assertEqual(["s"], node_names) self.assertEqual(["s"], node_names)
@test_util.run_deprecated_v1 @test_util.run_v1_only("b/120545219")
def testWatchGraph_tensorDTypeBlacklists(self): def testWatchGraph_tensorDTypeBlacklists(self):
debug_utils.watch_graph_with_blacklists( debug_utils.watch_graph_with_blacklists(
self._run_options, self._run_options,
@ -344,7 +344,7 @@ class DebugUtilsTest(test_util.TensorFlowTestCase):
self.assertNotIn("b/Assign", node_names) self.assertNotIn("b/Assign", node_names)
self.assertIn("s", node_names) self.assertIn("s", node_names)
@test_util.run_deprecated_v1 @test_util.run_v1_only("b/120545219")
def testWatchGraph_nodeNameAndTensorDTypeBlacklists(self): def testWatchGraph_nodeNameAndTensorDTypeBlacklists(self):
debug_utils.watch_graph_with_blacklists( debug_utils.watch_graph_with_blacklists(
self._run_options, self._run_options,

View File

@ -44,6 +44,7 @@ from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging from tensorflow.python.platform import tf_logging
@test_util.run_v1_only("b/120545219")
class DistributedSessionDebugTest(test_util.TensorFlowTestCase): class DistributedSessionDebugTest(test_util.TensorFlowTestCase):
"""Test the debugging of distributed sessions.""" """Test the debugging of distributed sessions."""

View File

@ -34,6 +34,7 @@ from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest from tensorflow.python.platform import googletest
@test_util.run_v1_only("b/120545219")
class SessionDebugFileTest(session_debug_testlib.SessionDebugTestBase): class SessionDebugFileTest(session_debug_testlib.SessionDebugTestBase):
def _debug_urls(self, run_number=None): def _debug_urls(self, run_number=None):
@ -45,7 +46,6 @@ class SessionDebugFileTest(session_debug_testlib.SessionDebugTestBase):
else: else:
return os.path.join(self._dump_root, "run_%d" % run_number) return os.path.join(self._dump_root, "run_%d" % run_number)
@test_util.run_deprecated_v1
def testAllowsDifferentWatchesOnDifferentRuns(self): def testAllowsDifferentWatchesOnDifferentRuns(self):
"""Test watching different tensors on different runs of the same graph.""" """Test watching different tensors on different runs of the same graph."""

View File

@ -91,6 +91,7 @@ class GrpcDebugServerTest(test_util.TensorFlowTestCase):
server.stop_server().wait() server.stop_server().wait()
@test_util.run_v1_only("b/120545219")
class SessionDebugGrpcTest(session_debug_testlib.SessionDebugTestBase): class SessionDebugGrpcTest(session_debug_testlib.SessionDebugTestBase):
@classmethod @classmethod
@ -353,6 +354,7 @@ class SessionDebugConcurrentTest(
return urls return urls
@test_util.run_v1_only("b/120545219")
class SessionDebugGrpcGatingTest(test_util.TensorFlowTestCase): class SessionDebugGrpcGatingTest(test_util.TensorFlowTestCase):
"""Test server gating of debug ops.""" """Test server gating of debug ops."""
@ -730,6 +732,7 @@ class SessionDebugGrpcGatingTest(test_util.TensorFlowTestCase):
self.assertEqual("DebugNumericSummary", debug_watch.debug_op) self.assertEqual("DebugNumericSummary", debug_watch.debug_op)
@test_util.run_v1_only("b/120545219")
class DelayedDebugServerTest(test_util.TensorFlowTestCase): class DelayedDebugServerTest(test_util.TensorFlowTestCase):
def testDebuggedSessionRunWorksWithDelayedDebugServerStartup(self): def testDebuggedSessionRunWorksWithDelayedDebugServerStartup(self):

View File

@ -84,6 +84,7 @@ class _RNNCellForTest(rnn_cell_impl.RNNCell):
return (math_ops.multiply(self._w, input_), state) return (math_ops.multiply(self._w, input_), state)
@test_util.run_v1_only("b/120545219")
class SessionDebugTestBase(test_util.TensorFlowTestCase): class SessionDebugTestBase(test_util.TensorFlowTestCase):
"""Base class for unit tests of tfdbg running with tf.Session.""" """Base class for unit tests of tfdbg running with tf.Session."""

View File

@ -216,6 +216,7 @@ class SourceHelperTest(test_util.TensorFlowTestCase):
os.remove(unrelated_source_path) os.remove(unrelated_source_path)
@test_util.run_v1_only("b/120545219")
class ListSourceAgainstDumpTest(test_util.TensorFlowTestCase): class ListSourceAgainstDumpTest(test_util.TensorFlowTestCase):
def createAndRunGraphWithWhileLoop(self): def createAndRunGraphWithWhileLoop(self):

View File

@ -33,6 +33,7 @@ from tensorflow.python.platform import googletest
from tensorflow.python.training import gradient_descent from tensorflow.python.training import gradient_descent
@test_util.run_v1_only("b/120545219")
class StepperTest(test_util.TensorFlowTestCase): class StepperTest(test_util.TensorFlowTestCase):
def setUp(self): def setUp(self):
@ -443,6 +444,7 @@ class StepperTest(test_util.TensorFlowTestCase):
self.assertAllClose(-4.0, result["fz"]["z"]) self.assertAllClose(-4.0, result["fz"]["z"])
@test_util.run_v1_only("b/120545219")
class StepperTestWithPlaceHolders(test_util.TensorFlowTestCase): class StepperTestWithPlaceHolders(test_util.TensorFlowTestCase):
def setUp(self): def setUp(self):
@ -577,6 +579,7 @@ class StepperTestWithPlaceHolders(test_util.TensorFlowTestCase):
self.assertAllClose([[-1.0], [6.0]], stepper.finalize()) self.assertAllClose([[-1.0], [6.0]], stepper.finalize())
@test_util.run_v1_only("b/120545219")
class StepperAssignAddTest(test_util.TensorFlowTestCase): class StepperAssignAddTest(test_util.TensorFlowTestCase):
def setUp(self): def setUp(self):
@ -692,6 +695,7 @@ class StepperAssignAddTest(test_util.TensorFlowTestCase):
self.assertAllClose(12.0, stepper.cont(self.v)) self.assertAllClose(12.0, stepper.cont(self.v))
@test_util.run_v1_only("b/120545219")
class StepperBackwardRunTest(test_util.TensorFlowTestCase): class StepperBackwardRunTest(test_util.TensorFlowTestCase):
def setUp(self): def setUp(self):

View File

@ -32,6 +32,7 @@ from tensorflow.python.platform import googletest
from tensorflow.python.training import monitored_session from tensorflow.python.training import monitored_session
@test_util.run_v1_only("b/120545219")
class DumpingDebugWrapperDiskUsageLimitTest(test_util.TensorFlowTestCase): class DumpingDebugWrapperDiskUsageLimitTest(test_util.TensorFlowTestCase):
@classmethod @classmethod

View File

@ -41,6 +41,7 @@ from tensorflow.python.platform import googletest
from tensorflow.python.training import monitored_session from tensorflow.python.training import monitored_session
@test_util.run_v1_only("b/120545219")
class DumpingDebugWrapperSessionTest(test_util.TensorFlowTestCase): class DumpingDebugWrapperSessionTest(test_util.TensorFlowTestCase):
def setUp(self): def setUp(self):

View File

@ -141,6 +141,7 @@ class TestDebugWrapperSessionBadAction(framework.BaseDebugWrapperSession):
return framework.OnRunEndResponse() return framework.OnRunEndResponse()
@test_util.run_v1_only("b/120545219")
class DebugWrapperSessionTest(test_util.TensorFlowTestCase): class DebugWrapperSessionTest(test_util.TensorFlowTestCase):
def _no_rewrite_session_config(self): def _no_rewrite_session_config(self):

View File

@ -127,6 +127,7 @@ class LocalCLIDebuggerWrapperSessionForTest(
return e.exit_token return e.exit_token
@test_util.run_v1_only("b/120545219")
class LocalCLIDebugWrapperSessionTest(test_util.TensorFlowTestCase): class LocalCLIDebugWrapperSessionTest(test_util.TensorFlowTestCase):
def setUp(self): def setUp(self):

View File

@ -427,6 +427,7 @@ class DistributeCoordinatorTestStandaloneMode(DistributeCoordinatorTestBase):
# Each finished worker will increment self._result_correct. # Each finished worker will increment self._result_correct.
self.assertEqual(self._result_correct, NUM_WORKERS) self.assertEqual(self._result_correct, NUM_WORKERS)
@test_util.run_v1_only("b/120545219")
def testBetweenGraphWithMonitoredSession(self): def testBetweenGraphWithMonitoredSession(self):
"""Test monitored session in standalone client mode.""" """Test monitored session in standalone client mode."""
distribute_coordinator.run_distribute_coordinator( distribute_coordinator.run_distribute_coordinator(
@ -600,6 +601,7 @@ class DistributeCoordinatorTestInpendentWorkerMode(
# Each finished worker will increment self._result_correct. # Each finished worker will increment self._result_correct.
self.assertEqual(self._result_correct, NUM_WORKERS) self.assertEqual(self._result_correct, NUM_WORKERS)
@test_util.run_v1_only("b/120545219")
def testBetweenGraphWithMonitoredSession(self): def testBetweenGraphWithMonitoredSession(self):
cluster_spec = self._create_cluster_spec( cluster_spec = self._create_cluster_spec(
num_workers=NUM_WORKERS, num_ps=NUM_PS) num_workers=NUM_WORKERS, num_ps=NUM_PS)

View File

@ -648,6 +648,7 @@ class BackpropTest(test.TestCase):
g.gradient(x, y) g.gradient(x, y)
@test_util.run_in_graph_and_eager_modes @test_util.run_in_graph_and_eager_modes
@test_util.run_v1_only('b/120545219')
def testGradientTapeWithCond(self): def testGradientTapeWithCond(self):
x = constant_op.constant(3.0) x = constant_op.constant(3.0)
@ -669,6 +670,7 @@ class BackpropTest(test.TestCase):
self.assertEqual(self.evaluate(dy), 6.0) self.assertEqual(self.evaluate(dy), 6.0)
@test_util.run_in_graph_and_eager_modes @test_util.run_in_graph_and_eager_modes
@test_util.run_v1_only('b/120545219')
def testGradientTapeWithWhileLoop(self): def testGradientTapeWithWhileLoop(self):
i = constant_op.constant(1) i = constant_op.constant(1)
x = constant_op.constant(2.) x = constant_op.constant(2.)
@ -704,6 +706,7 @@ class BackpropTest(test.TestCase):
@test_util.assert_no_new_tensors @test_util.assert_no_new_tensors
@test_util.run_in_graph_and_eager_modes @test_util.run_in_graph_and_eager_modes
@test_util.run_v1_only('b/120545219')
def testPersistentTape(self): def testPersistentTape(self):
with backprop.GradientTape(persistent=True) as g: with backprop.GradientTape(persistent=True) as g:
x = constant_op.constant(3.0) x = constant_op.constant(3.0)
@ -1243,16 +1246,19 @@ class JacobianTest(test.TestCase):
answer = [array_ops.diag(2 * x * y), array_ops.diag(x * x)] answer = [array_ops.diag(2 * x * y), array_ops.diag(x * x)]
return jacobian, answer return jacobian, answer
@test_util.run_v1_only('b/120545219')
def testPfor(self): def testPfor(self):
jacobian, answer = self._jacobian(experimental_use_pfor=True) jacobian, answer = self._jacobian(experimental_use_pfor=True)
for j, a in zip(jacobian, answer): for j, a in zip(jacobian, answer):
self.assertAllEqual(a, j) self.assertAllEqual(a, j)
@test_util.run_v1_only('b/120545219')
def testWhileLoop(self): def testWhileLoop(self):
jacobian, answer = self._jacobian(experimental_use_pfor=False) jacobian, answer = self._jacobian(experimental_use_pfor=False)
for j, a in zip(jacobian, answer): for j, a in zip(jacobian, answer):
self.assertAllEqual(a, j) self.assertAllEqual(a, j)
@test_util.run_v1_only('b/120545219')
def testPforDefun(self): def testPforDefun(self):
@function.defun @function.defun
@ -1263,6 +1269,7 @@ class JacobianTest(test.TestCase):
for j, a in zip(jacobian, answer): for j, a in zip(jacobian, answer):
self.assertAllEqual(a, j) self.assertAllEqual(a, j)
@test_util.run_v1_only('b/120545219')
def testWhileLoopDefun(self): def testWhileLoopDefun(self):
@function.defun @function.defun
@ -1273,6 +1280,7 @@ class JacobianTest(test.TestCase):
for j, a in zip(jacobian, answer): for j, a in zip(jacobian, answer):
self.assertAllEqual(a, j) self.assertAllEqual(a, j)
@test_util.run_v1_only('b/120545219')
def testPersistentTape(self): def testPersistentTape(self):
if not context.executing_eagerly(): if not context.executing_eagerly():
return return
@ -1283,6 +1291,7 @@ class JacobianTest(test.TestCase):
with self.assertRaisesRegexp(RuntimeError, 'persistent'): with self.assertRaisesRegexp(RuntimeError, 'persistent'):
g.jacobian(y, x, experimental_use_pfor=False) g.jacobian(y, x, experimental_use_pfor=False)
@test_util.run_v1_only('b/120545219')
def testPforException(self): def testPforException(self):
var = variables.Variable([1.]) var = variables.Variable([1.])
@ -1303,6 +1312,7 @@ class JacobianTest(test.TestCase):
with self.assertRaisesRegexp(ValueError, 'No converter'): with self.assertRaisesRegexp(ValueError, 'No converter'):
g.jacobian(y, x, experimental_use_pfor=True) g.jacobian(y, x, experimental_use_pfor=True)
@test_util.run_v1_only('b/120545219')
def test_parallel_iterations(self): def test_parallel_iterations(self):
with backprop.GradientTape(persistent=True) as g: with backprop.GradientTape(persistent=True) as g:
x = constant_op.constant([[1., 2], [3, 4]]) x = constant_op.constant([[1., 2], [3, 4]])
@ -1328,14 +1338,17 @@ class BatchJacobianTest(test.TestCase):
array_ops.diag(2 * x[1] * y[1])]) array_ops.diag(2 * x[1] * y[1])])
return batch_jacobian, answer return batch_jacobian, answer
@test_util.run_v1_only('b/120545219')
def testPfor(self): def testPfor(self):
batch_jacobian, answer = self._batch_jacobian(experimental_use_pfor=True) batch_jacobian, answer = self._batch_jacobian(experimental_use_pfor=True)
self.assertAllEqual(answer, batch_jacobian) self.assertAllEqual(answer, batch_jacobian)
@test_util.run_v1_only('b/120545219')
def testWhileLoop(self): def testWhileLoop(self):
batch_jacobian, answer = self._batch_jacobian(experimental_use_pfor=False) batch_jacobian, answer = self._batch_jacobian(experimental_use_pfor=False)
self.assertAllEqual(answer, batch_jacobian) self.assertAllEqual(answer, batch_jacobian)
@test_util.run_v1_only('b/120545219')
def testPforDefun(self): def testPforDefun(self):
@function.defun @function.defun
@ -1345,6 +1358,7 @@ class BatchJacobianTest(test.TestCase):
batch_jacobian, answer = _f() batch_jacobian, answer = _f()
self.assertAllEqual(answer, batch_jacobian) self.assertAllEqual(answer, batch_jacobian)
@test_util.run_v1_only('b/120545219')
def testWhileLoopDefun(self): def testWhileLoopDefun(self):
@function.defun @function.defun
@ -1354,6 +1368,7 @@ class BatchJacobianTest(test.TestCase):
batch_jacobian, answer = _f() batch_jacobian, answer = _f()
self.assertAllEqual(answer, batch_jacobian) self.assertAllEqual(answer, batch_jacobian)
@test_util.run_v1_only('b/120545219')
def testPersistentTape(self): def testPersistentTape(self):
if not context.executing_eagerly(): if not context.executing_eagerly():
return return
@ -1364,6 +1379,7 @@ class BatchJacobianTest(test.TestCase):
with self.assertRaisesRegexp(RuntimeError, 'persistent'): with self.assertRaisesRegexp(RuntimeError, 'persistent'):
g.batch_jacobian(y, x, experimental_use_pfor=False) g.batch_jacobian(y, x, experimental_use_pfor=False)
@test_util.run_v1_only('b/120545219')
def testBadShape(self): def testBadShape(self):
x = random_ops.random_uniform([2, 3]) x = random_ops.random_uniform([2, 3])
with backprop.GradientTape() as g: with backprop.GradientTape() as g:
@ -1371,6 +1387,7 @@ class BatchJacobianTest(test.TestCase):
with self.assertRaisesRegexp(ValueError, 'Need first dimension'): with self.assertRaisesRegexp(ValueError, 'Need first dimension'):
g.batch_jacobian(y, x) g.batch_jacobian(y, x)
@test_util.run_v1_only('b/120545219')
def testBadInputRank(self): def testBadInputRank(self):
x = random_ops.random_uniform([2]) x = random_ops.random_uniform([2])
with backprop.GradientTape() as g: with backprop.GradientTape() as g:
@ -1385,6 +1402,7 @@ class BatchJacobianTest(test.TestCase):
with self.assertRaisesRegexp(ValueError, 'must have rank at least 2'): with self.assertRaisesRegexp(ValueError, 'must have rank at least 2'):
g.batch_jacobian(y, x) g.batch_jacobian(y, x)
@test_util.run_v1_only('b/120545219')
def testPforException(self): def testPforException(self):
var = variables.Variable([1.]) var = variables.Variable([1.])
@ -1405,6 +1423,7 @@ class BatchJacobianTest(test.TestCase):
with self.assertRaisesRegexp(ValueError, 'No converter'): with self.assertRaisesRegexp(ValueError, 'No converter'):
g.batch_jacobian(y, x, experimental_use_pfor=True) g.batch_jacobian(y, x, experimental_use_pfor=True)
@test_util.run_v1_only('b/120545219')
def test_parallel_iterations(self): def test_parallel_iterations(self):
with backprop.GradientTape(persistent=True) as g: with backprop.GradientTape(persistent=True) as g:
x = constant_op.constant([[1., 2], [3, 4]]) x = constant_op.constant([[1., 2], [3, 4]])

View File

@ -187,7 +187,7 @@ class FunctionGradientsTest(test.TestCase, parameterized.TestCase):
self.assertAllEqual(2, g(constant_op.constant(2.))) self.assertAllEqual(2, g(constant_op.constant(2.)))
@test_util.run_deprecated_v1 @test_util.run_v1_only('b/120545219')
def testGraphModeEagerGradError(self): def testGraphModeEagerGradError(self):
with context.graph_mode(): with context.graph_mode():
def f(): def f():

View File

@ -963,6 +963,7 @@ class FunctionTest(test.TestCase, parameterized.TestCase):
# construction. Eager's configuration is controlled in `__main__`. # construction. Eager's configuration is controlled in `__main__`.
@test_util.run_in_graph_and_eager_modes( @test_util.run_in_graph_and_eager_modes(
config=config_pb2.ConfigProto(device_count={'CPU': 4})) config=config_pb2.ConfigProto(device_count={'CPU': 4}))
@test_util.run_v1_only('b/120545219')
def testDeviceAnnotationsRespected(self): def testDeviceAnnotationsRespected(self):
def multi_device_fn(): def multi_device_fn():
@ -1001,6 +1002,7 @@ class FunctionTest(test.TestCase, parameterized.TestCase):
@test_util.run_in_graph_and_eager_modes( @test_util.run_in_graph_and_eager_modes(
config=config_pb2.ConfigProto(device_count={'CPU': 2})) config=config_pb2.ConfigProto(device_count={'CPU': 2}))
@test_util.run_v1_only('b/120545219')
def testCallingGraphFunctionOnDifferentDevice(self): def testCallingGraphFunctionOnDifferentDevice(self):
def func(): def func():

View File

@ -24,6 +24,7 @@ from tensorflow.python.framework import auto_control_deps as acd
from tensorflow.python.framework import constant_op from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import resource_variable_ops from tensorflow.python.ops import resource_variable_ops
@ -46,6 +47,7 @@ class AutomaticControlDependenciesTest(test.TestCase):
val = c.mark_as_return(val) val = c.mark_as_return(val)
self.assertAllEqual(val.eval(), 4.0) self.assertAllEqual(val.eval(), 4.0)
@test_util.run_v1_only("b/120545219")
def testCondMustRun(self): def testCondMustRun(self):
with context.graph_mode(), self.cached_session(): with context.graph_mode(), self.cached_session():
v = resource_variable_ops.ResourceVariable(1.0) v = resource_variable_ops.ResourceVariable(1.0)
@ -67,6 +69,7 @@ class AutomaticControlDependenciesTest(test.TestCase):
self.assertAllEqual(val.eval(feed_dict={p: False}), 5.0) self.assertAllEqual(val.eval(feed_dict={p: False}), 5.0)
self.assertAllEqual(val.eval(feed_dict={p: True}), 6.0) self.assertAllEqual(val.eval(feed_dict={p: True}), 6.0)
@test_util.run_v1_only("b/120545219")
def testCondMustRunSeparateRead(self): def testCondMustRunSeparateRead(self):
with context.graph_mode(), self.cached_session(): with context.graph_mode(), self.cached_session():
v = resource_variable_ops.ResourceVariable(1.0) v = resource_variable_ops.ResourceVariable(1.0)
@ -90,6 +93,7 @@ class AutomaticControlDependenciesTest(test.TestCase):
one.eval(feed_dict={p: True}) one.eval(feed_dict={p: True})
self.assertAllEqual(v.read_value().eval(), 6.0) self.assertAllEqual(v.read_value().eval(), 6.0)
@test_util.run_v1_only("b/120545219")
def testCondNested(self): def testCondNested(self):
with context.graph_mode(), self.cached_session(): with context.graph_mode(), self.cached_session():
v = resource_variable_ops.ResourceVariable(1.0) v = resource_variable_ops.ResourceVariable(1.0)
@ -124,6 +128,7 @@ class AutomaticControlDependenciesTest(test.TestCase):
self.assertAllEqual(val.eval(feed_dict={p: True, q: True}), 7.0) self.assertAllEqual(val.eval(feed_dict={p: True, q: True}), 7.0)
self.assertAllEqual(val.eval(feed_dict={p: True, q: False}), 8.0) self.assertAllEqual(val.eval(feed_dict={p: True, q: False}), 8.0)
@test_util.run_v1_only("b/120545219")
def testCondOneBranch(self): def testCondOneBranch(self):
with context.graph_mode(), self.cached_session(): with context.graph_mode(), self.cached_session():
v = resource_variable_ops.ResourceVariable(1.0) v = resource_variable_ops.ResourceVariable(1.0)
@ -144,6 +149,7 @@ class AutomaticControlDependenciesTest(test.TestCase):
self.assertAllEqual(val.eval(feed_dict={p: False}), 5.0) self.assertAllEqual(val.eval(feed_dict={p: False}), 5.0)
self.assertAllEqual(val.eval(feed_dict={p: True}), 5.0) self.assertAllEqual(val.eval(feed_dict={p: True}), 5.0)
@test_util.run_v1_only("b/120545219")
def testCondOneBranchUpdateBefore(self): def testCondOneBranchUpdateBefore(self):
with context.graph_mode(), self.cached_session(): with context.graph_mode(), self.cached_session():
v = resource_variable_ops.ResourceVariable(1.0) v = resource_variable_ops.ResourceVariable(1.0)
@ -165,6 +171,7 @@ class AutomaticControlDependenciesTest(test.TestCase):
self.assertAllEqual(val.eval(feed_dict={p: False}), 6.0) self.assertAllEqual(val.eval(feed_dict={p: False}), 6.0)
self.assertAllEqual(val.eval(feed_dict={p: True}), 12.0) self.assertAllEqual(val.eval(feed_dict={p: True}), 12.0)
@test_util.run_v1_only("b/120545219")
def testCondOneBranchUpdateAfter(self): def testCondOneBranchUpdateAfter(self):
with context.graph_mode(), self.cached_session(): with context.graph_mode(), self.cached_session():
v = resource_variable_ops.ResourceVariable(1.0) v = resource_variable_ops.ResourceVariable(1.0)

View File

@ -23,6 +23,7 @@ import os
from tensorflow.python.framework import constant_op from tensorflow.python.framework import constant_op
from tensorflow.python.framework import error_interpolation from tensorflow.python.framework import error_interpolation
from tensorflow.python.framework import ops from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.framework import traceable_stack from tensorflow.python.framework import traceable_stack
from tensorflow.python.platform import test from tensorflow.python.platform import test
from tensorflow.python.util import tf_stack from tensorflow.python.util import tf_stack
@ -112,6 +113,7 @@ class ComputeColocationSummaryFromOpTest(test.TestCase):
self.assertIn("No node-device colocations", summary) self.assertIn("No node-device colocations", summary)
@test_util.run_v1_only("b/120545219")
class InterpolateFilenamesAndLineNumbersTest(test.TestCase): class InterpolateFilenamesAndLineNumbersTest(test.TestCase):
def setUp(self): def setUp(self):
@ -193,6 +195,7 @@ class InterpolateFilenamesAndLineNumbersTest(test.TestCase):
self.assertRegexpMatches(interpolated_string, "constant_op.py:[0-9]+.*") self.assertRegexpMatches(interpolated_string, "constant_op.py:[0-9]+.*")
@test_util.run_v1_only("b/120545219")
class InterpolateDeviceSummaryTest(test.TestCase): class InterpolateDeviceSummaryTest(test.TestCase):
def _fancy_device_function(self, unused_op): def _fancy_device_function(self, unused_op):
@ -236,6 +239,7 @@ class InterpolateDeviceSummaryTest(test.TestCase):
self.assertRegexpMatches(result, expected_re) self.assertRegexpMatches(result, expected_re)
@test_util.run_v1_only("b/120545219")
class InterpolateColocationSummaryTest(test.TestCase): class InterpolateColocationSummaryTest(test.TestCase):
def setUp(self): def setUp(self):
@ -260,11 +264,13 @@ class InterpolateColocationSummaryTest(test.TestCase):
self.graph = node_three.graph self.graph = node_three.graph
@test_util.run_v1_only("b/120545219")
def testNodeThreeHasColocationInterpolation(self): def testNodeThreeHasColocationInterpolation(self):
message = "{{colocation_node Three_with_one}}" message = "{{colocation_node Three_with_one}}"
result = error_interpolation.interpolate(message, self.graph) result = error_interpolation.interpolate(message, self.graph)
self.assertIn("colocate_with(One)", result) self.assertIn("colocate_with(One)", result)
@test_util.run_v1_only("b/120545219")
def testNodeFourHasColocationInterpolationForNodeThreeOnly(self): def testNodeFourHasColocationInterpolationForNodeThreeOnly(self):
message = "{{colocation_node Four_with_three}}" message = "{{colocation_node Four_with_three}}"
result = error_interpolation.interpolate(message, self.graph) result = error_interpolation.interpolate(message, self.graph)
@ -273,12 +279,14 @@ class InterpolateColocationSummaryTest(test.TestCase):
"One", result, "One", result,
"Node One should not appear in Four_with_three's summary:\n%s" % result) "Node One should not appear in Four_with_three's summary:\n%s" % result)
@test_util.run_v1_only("b/120545219")
def testNodeFiveHasColocationInterpolationForNodeOneAndTwo(self): def testNodeFiveHasColocationInterpolationForNodeOneAndTwo(self):
message = "{{colocation_node Five_with_one_with_two}}" message = "{{colocation_node Five_with_one_with_two}}"
result = error_interpolation.interpolate(message, self.graph) result = error_interpolation.interpolate(message, self.graph)
self.assertIn("colocate_with(One)", result) self.assertIn("colocate_with(One)", result)
self.assertIn("colocate_with(Two)", result) self.assertIn("colocate_with(Two)", result)
@test_util.run_v1_only("b/120545219")
def testColocationInterpolationForNodeLackingColocation(self): def testColocationInterpolationForNodeLackingColocation(self):
message = "{{colocation_node One}}" message = "{{colocation_node One}}"
result = error_interpolation.interpolate(message, self.graph) result = error_interpolation.interpolate(message, self.graph)

View File

@ -103,7 +103,7 @@ class DeviceFunctionsTest(test.TestCase):
self.assertDeviceEqual(var_5.device, "/device:GPU:0") self.assertDeviceEqual(var_5.device, "/device:GPU:0")
self.assertDeviceEqual(var_6.device, "/device:CPU:0") self.assertDeviceEqual(var_6.device, "/device:CPU:0")
@test_util.run_deprecated_v1 @test_util.run_v1_only("b/120545219")
def testNestedDeviceFunctions(self): def testNestedDeviceFunctions(self):
with ops.Graph().as_default(): with ops.Graph().as_default():
var_0 = variables.VariableV1(0) var_0 = variables.VariableV1(0)

View File

@ -528,7 +528,7 @@ class ScopedMetaGraphTest(test.TestCase):
actual_grad_value = self.evaluate(grad) actual_grad_value = self.evaluate(grad)
self.assertEqual(expected_grad_value, actual_grad_value) self.assertEqual(expected_grad_value, actual_grad_value)
@test_util.run_deprecated_v1 @test_util.run_v1_only("b/120545219")
def testImportWhileLoopInWhileLoop(self): def testImportWhileLoopInWhileLoop(self):
# Create a simple while loop. # Create a simple while loop.
with ops.Graph().as_default(): with ops.Graph().as_default():

View File

@ -605,6 +605,7 @@ class OperationTest(test_util.TensorFlowTestCase):
x.op._update_input(1, x) # pylint: disable=protected-access x.op._update_input(1, x) # pylint: disable=protected-access
@test_util.enable_control_flow_v2 @test_util.enable_control_flow_v2
@test_util.run_v1_only("b/120545219")
def testAddWhileInput(self): def testAddWhileInput(self):
@eager_function.defun @eager_function.defun
def test(): def test():
@ -780,7 +781,7 @@ class CreateOpFromTFOperationTest(test_util.TensorFlowTestCase):
self.assertEqual(op3.name, "myop_2") self.assertEqual(op3.name, "myop_2")
self.assertEqual(op4.name, "myop_1_1") self.assertEqual(op4.name, "myop_1_1")
@test_util.run_deprecated_v1 @test_util.run_v1_only("b/120545219")
def testCond(self): def testCond(self):
g = ops.Graph() g = ops.Graph()
with g.as_default(): with g.as_default():
@ -810,7 +811,7 @@ class CreateOpFromTFOperationTest(test_util.TensorFlowTestCase):
"cond/cond_text") "cond/cond_text")
# pylint: enable=protected-access # pylint: enable=protected-access
@test_util.run_deprecated_v1 @test_util.run_v1_only("b/120545219")
def testWhileLoop(self): def testWhileLoop(self):
g = ops.Graph() g = ops.Graph()
with g.as_default(): with g.as_default():
@ -840,7 +841,7 @@ class CreateOpFromTFOperationTest(test_util.TensorFlowTestCase):
"myloop/while_context") "myloop/while_context")
# pylint: enable=protected-access # pylint: enable=protected-access
@test_util.run_deprecated_v1 @test_util.run_v1_only("b/120545219")
def testWhileLoopWithInternalControlDep(self): def testWhileLoopWithInternalControlDep(self):
g = ops.Graph() g = ops.Graph()
with g.as_default(): with g.as_default():
@ -864,7 +865,7 @@ class CreateOpFromTFOperationTest(test_util.TensorFlowTestCase):
# Internal control dep is preserved # Internal control dep is preserved
self.assertEqual(op.control_inputs, [c]) self.assertEqual(op.control_inputs, [c])
@test_util.run_deprecated_v1 @test_util.run_v1_only("b/120545219")
def testWhileLoopWithExternalControlDep(self): def testWhileLoopWithExternalControlDep(self):
g = ops.Graph() g = ops.Graph()
with g.as_default(): with g.as_default():
@ -2283,7 +2284,7 @@ class InitScopeTest(test_util.TensorFlowTestCase):
self.assertEqual(4, int(compiled_outer(inner=compiled_inner))) self.assertEqual(4, int(compiled_outer(inner=compiled_inner)))
self.assertEqual(7, int(compiled_outer(inner=compiled_inner))) self.assertEqual(7, int(compiled_outer(inner=compiled_inner)))
@test_util.run_deprecated_v1 @test_util.run_v1_only("b/120545219")
def testFallsBackToGlobalGraphWhenAllGraphsAreBuildingFunctions(self): def testFallsBackToGlobalGraphWhenAllGraphsAreBuildingFunctions(self):
with context.graph_mode(): with context.graph_mode():
ops.reset_default_graph() ops.reset_default_graph()
@ -2994,7 +2995,7 @@ class TracebackTest(test_util.TensorFlowTestCase):
class EnableEagerExecutionTest(test_util.TensorFlowTestCase): class EnableEagerExecutionTest(test_util.TensorFlowTestCase):
@test_util.run_deprecated_v1 @test_util.run_v1_only("b/120545219")
def testBadArgumentsToEnableEagerExecution(self): def testBadArgumentsToEnableEagerExecution(self):
with self.assertRaisesRegexp(TypeError, "config must be a tf.ConfigProto"): with self.assertRaisesRegexp(TypeError, "config must be a tf.ConfigProto"):
ops.enable_eager_execution(context.DEVICE_PLACEMENT_SILENT) ops.enable_eager_execution(context.DEVICE_PLACEMENT_SILENT)

View File

@ -215,7 +215,7 @@ class SubscribeTest(test_util.TensorFlowTestCase):
self.assertIn('graph2', shared) self.assertIn('graph2', shared)
self.assertIn('graph3', shared) self.assertIn('graph3', shared)
@test_util.run_deprecated_v1 @test_util.run_v1_only('b/120545219')
def testSubscribeVariable(self): def testSubscribeVariable(self):
"""Confirm that variables can be subscribed.""" """Confirm that variables can be subscribed."""
v1 = variables.VariableV1(0.0) v1 = variables.VariableV1(0.0)
@ -254,7 +254,7 @@ class SubscribeTest(test_util.TensorFlowTestCase):
# Make sure the values read from the variable match the expected ones. # Make sure the values read from the variable match the expected ones.
self.assertEqual([0.0, 3.0], shared) self.assertEqual([0.0, 3.0], shared)
@test_util.run_deprecated_v1 @test_util.run_v1_only('b/120545219')
def testResourceType(self): def testResourceType(self):
"""Confirm that subscribe correctly handles tensors with 'resource' type.""" """Confirm that subscribe correctly handles tensors with 'resource' type."""
tensor_array = tensor_array_ops.TensorArray( tensor_array = tensor_array_ops.TensorArray(
@ -344,7 +344,7 @@ class SubscribeTest(test_util.TensorFlowTestCase):
self.assertEqual(add.device, add_sub.device) self.assertEqual(add.device, add_sub.device)
self.assertEqual(mul.device, mul_sub.device) self.assertEqual(mul.device, mul_sub.device)
@test_util.run_deprecated_v1 @test_util.run_v1_only('b/120545219')
def test_subscribe_tensors_within_control_flow_context(self): def test_subscribe_tensors_within_control_flow_context(self):
"""Side effect ops are added with the same control flow context.""" """Side effect ops are added with the same control flow context."""
c1 = constant_op.constant(10) c1 = constant_op.constant(10)

View File

@ -108,7 +108,7 @@ class ItemTest(test.TestCase):
newest_tf_item = grappler_item.tf_item newest_tf_item = grappler_item.tf_item
self.assertEqual(new_tf_item, newest_tf_item) self.assertEqual(new_tf_item, newest_tf_item)
@test_util.run_deprecated_v1 @test_util.run_v1_only('b/120545219')
def testColocationContraints(self): def testColocationContraints(self):
with ops.Graph().as_default() as g: with ops.Graph().as_default() as g:
c = constant_op.constant([10]) c = constant_op.constant([10])

View File

@ -62,7 +62,7 @@ class MemoryOptimizerSwapTest(test.TestCase):
self.assertEqual(len(graph.node), graph_size) self.assertEqual(len(graph.node), graph_size)
self.assertItemsEqual([node.name for node in graph.node], nodes) self.assertItemsEqual([node.name for node in graph.node], nodes)
@test_util.run_deprecated_v1 @test_util.run_v1_only('b/120545219')
def testSimpleSwap(self): def testSimpleSwap(self):
"""Check that the swap annotations are followed.""" """Check that the swap annotations are followed."""
a = variables.VariableV1(10, name='a') a = variables.VariableV1(10, name='a')

View File

@ -57,7 +57,7 @@ class PyWrapOptimizeGraphTest(test.TestCase):
self.assertEqual(len(graph.node), 1) self.assertEqual(len(graph.node), 1)
self.assertItemsEqual([node.name for node in graph.node], ['d']) self.assertItemsEqual([node.name for node in graph.node], ['d'])
@test_util.run_deprecated_v1 @test_util.run_v1_only('b/120545219')
def testKeepNodes(self): def testKeepNodes(self):
g = ops.Graph() g = ops.Graph()
with g.as_default(): with g.as_default():
@ -86,7 +86,7 @@ class PyWrapOptimizeGraphTest(test.TestCase):
self.assertEqual(len(optimized_graph_nodes), len(expected_nodes)) self.assertEqual(len(optimized_graph_nodes), len(expected_nodes))
self.assertAllInSet(optimized_graph_nodes, expected_nodes) self.assertAllInSet(optimized_graph_nodes, expected_nodes)
@test_util.run_deprecated_v1 @test_util.run_v1_only('b/120545219')
def testLoops(self): def testLoops(self):
g = ops.Graph() g = ops.Graph()
with g.as_default(): with g.as_default():

View File

@ -1422,7 +1422,7 @@ class TestCTC(test.TestCase):
decode_truth[i] == keras.backend.eval(decode_pred_tf[i]))) decode_truth[i] == keras.backend.eval(decode_pred_tf[i])))
self.assertAllClose(log_prob_truth, log_prob_pred) self.assertAllClose(log_prob_truth, log_prob_pred)
@test_util.run_deprecated_v1 @test_util.run_v1_only('b/120545219')
def test_ctc_batch_cost(self): def test_ctc_batch_cost(self):
with self.cached_session(): with self.cached_session():
label_lens = np.expand_dims(np.asarray([5, 4]), 1) label_lens = np.expand_dims(np.asarray([5, 4]), 1)

View File

@ -403,7 +403,7 @@ class KerasCallbacksTest(test.TestCase):
float(keras.backend.get_value( float(keras.backend.get_value(
model.optimizer.lr)) - 0.01 / 4) < keras.backend.epsilon() model.optimizer.lr)) - 0.01 / 4) < keras.backend.epsilon()
@test_util.run_deprecated_v1 @test_util.run_v1_only('b/120545219')
def test_ReduceLROnPlateau(self): def test_ReduceLROnPlateau(self):
with self.cached_session(): with self.cached_session():
np.random.seed(1337) np.random.seed(1337)
@ -675,7 +675,7 @@ class KerasCallbacksTest(test.TestCase):
self.assertEqual(len(loss), 1) self.assertEqual(len(loss), 1)
self.assertEqual(loss[0], np.inf) self.assertEqual(loss[0], np.inf)
@test_util.run_deprecated_v1 @test_util.run_v1_only('b/120545219')
def test_TensorBoard(self): def test_TensorBoard(self):
np.random.seed(1337) np.random.seed(1337)
@ -779,7 +779,7 @@ class KerasCallbacksTest(test.TestCase):
data_generator(True), len(x_train), epochs=2, callbacks=cbks) data_generator(True), len(x_train), epochs=2, callbacks=cbks)
assert os.path.exists(temp_dir) assert os.path.exists(temp_dir)
@test_util.run_deprecated_v1 @test_util.run_v1_only('b/120545219')
def test_TensorBoard_multi_input_output(self): def test_TensorBoard_multi_input_output(self):
np.random.seed(1337) np.random.seed(1337)
tmpdir = self.get_temp_dir() tmpdir = self.get_temp_dir()
@ -851,7 +851,7 @@ class KerasCallbacksTest(test.TestCase):
callbacks=callbacks_factory(histogram_freq=1)) callbacks=callbacks_factory(histogram_freq=1))
assert os.path.isdir(filepath) assert os.path.isdir(filepath)
@test_util.run_deprecated_v1 @test_util.run_v1_only('b/120545219')
def test_Tensorboard_histogram_summaries_in_test_function(self): def test_Tensorboard_histogram_summaries_in_test_function(self):
class FileWriterStub(object): class FileWriterStub(object):
@ -929,7 +929,7 @@ class KerasCallbacksTest(test.TestCase):
self.assertAllEqual(tsb.writer.steps_seen, [0, 1, 2, 3, 4, 5]) self.assertAllEqual(tsb.writer.steps_seen, [0, 1, 2, 3, 4, 5])
@test_util.run_deprecated_v1 @test_util.run_v1_only('b/120545219')
def test_Tensorboard_histogram_summaries_with_generator(self): def test_Tensorboard_histogram_summaries_with_generator(self):
np.random.seed(1337) np.random.seed(1337)
tmpdir = self.get_temp_dir() tmpdir = self.get_temp_dir()

View File

@ -332,7 +332,7 @@ class TestWeightSavingAndLoading(test.TestCase, parameterized.TestCase):
class TestWholeModelSaving(test.TestCase): class TestWholeModelSaving(test.TestCase):
@test_util.run_deprecated_v1 @test_util.run_v1_only('b/120545219')
def test_sequential_model_saving(self): def test_sequential_model_saving(self):
if h5py is None: if h5py is None:
self.skipTest('h5py required to run this test') self.skipTest('h5py required to run this test')
@ -635,7 +635,7 @@ class TestWholeModelSaving(test.TestCase):
os.close(fd) os.close(fd)
os.remove(fname) os.remove(fname)
@test_util.run_deprecated_v1 @test_util.run_v1_only('b/120545219')
def test_saving_model_with_long_weights_names(self): def test_saving_model_with_long_weights_names(self):
if h5py is None: if h5py is None:
self.skipTest('h5py required to run this test') self.skipTest('h5py required to run this test')
@ -756,7 +756,7 @@ class SubclassedModel(training.Model):
class TestWeightSavingAndLoadingTFFormat(test.TestCase): class TestWeightSavingAndLoadingTFFormat(test.TestCase):
@test_util.run_deprecated_v1 @test_util.run_v1_only('b/120545219')
def test_keras_optimizer_warning(self): def test_keras_optimizer_warning(self):
graph = ops.Graph() graph = ops.Graph()
with graph.as_default(), self.session(graph): with graph.as_default(), self.session(graph):

View File

@ -226,7 +226,7 @@ class TestSequential(keras_parameterized.TestCase):
inner_model.trainable = True inner_model.trainable = True
self.assertEqual(len(model.trainable_weights), 4) self.assertEqual(len(model.trainable_weights), 4)
@tf_test_util.run_deprecated_v1 @tf_test_util.run_v1_only('b/120545219')
def test_sequential_update_disabling(self): def test_sequential_update_disabling(self):
val_a = np.random.random((10, 4)) val_a = np.random.random((10, 4))
val_out = np.random.random((10, 4)) val_out = np.random.random((10, 4))

View File

@ -107,6 +107,7 @@ class TopologyConstructionTest(test.TestCase):
self.assertEqual(len(network.updates), 5) self.assertEqual(len(network.updates), 5)
self.assertEqual(len(network.get_updates_for(x4)), 2) self.assertEqual(len(network.get_updates_for(x4)), 2)
@test_util.run_v1_only('b/120545219')
def test_get_updates_bn(self): def test_get_updates_bn(self):
x1 = input_layer_lib.Input(shape=(1,)) x1 = input_layer_lib.Input(shape=(1,))
layer = keras.layers.BatchNormalization() layer = keras.layers.BatchNormalization()
@ -833,7 +834,7 @@ class TopologyConstructionTest(test.TestCase):
output_val_2 = m2.predict(x_val) output_val_2 = m2.predict(x_val)
self.assertAllClose(output_val, output_val_2, atol=1e-6) self.assertAllClose(output_val, output_val_2, atol=1e-6)
@test_util.run_deprecated_v1 @test_util.run_v1_only('b/120545219')
def test_explicit_training_argument(self): def test_explicit_training_argument(self):
with self.cached_session(): with self.cached_session():
a = keras.layers.Input(shape=(2,)) a = keras.layers.Input(shape=(2,))

View File

@ -471,7 +471,7 @@ class TrainingTest(keras_parameterized.TestCase):
metrics=['accuracy'], metrics=['accuracy'],
run_eagerly=testing_utils.should_run_eagerly()) run_eagerly=testing_utils.should_run_eagerly())
@tf_test_util.run_deprecated_v1 @tf_test_util.run_v1_only('b/120545219')
def test_that_trainable_disables_updates(self): def test_that_trainable_disables_updates(self):
val_a = np.random.random((10, 4)) val_a = np.random.random((10, 4))
val_out = np.random.random((10, 4)) val_out = np.random.random((10, 4))
@ -864,6 +864,7 @@ class LossWeightingTest(keras_parameterized.TestCase):
self.assertLess(score[0], ref_score[0]) self.assertLess(score[0], ref_score[0])
@keras_parameterized.run_all_keras_modes @keras_parameterized.run_all_keras_modes
@tf_test_util.run_v1_only('b/120545219')
def test_sample_weights(self): def test_sample_weights(self):
num_classes = 5 num_classes = 5
batch_size = 5 batch_size = 5
@ -961,6 +962,7 @@ class LossWeightingTest(keras_parameterized.TestCase):
self.assertTrue(msg_found) self.assertTrue(msg_found)
@keras_parameterized.run_all_keras_modes @keras_parameterized.run_all_keras_modes
@tf_test_util.run_v1_only('b/120545219')
# TODO(b/120562577): Test failing with assertion error. # TODO(b/120562577): Test failing with assertion error.
def DISABLED_test_temporal_sample_weights(self): def DISABLED_test_temporal_sample_weights(self):
num_classes = 5 num_classes = 5
@ -1283,7 +1285,7 @@ class LossMaskingTest(keras_parameterized.TestCase):
class TestDynamicTrainability(keras_parameterized.TestCase): class TestDynamicTrainability(keras_parameterized.TestCase):
@tf_test_util.run_deprecated_v1 @tf_test_util.run_v1_only('b/120545219')
def test_trainable_warning(self): def test_trainable_warning(self):
with self.cached_session(): with self.cached_session():
x = np.random.random((5, 3)) x = np.random.random((5, 3))
@ -1297,7 +1299,7 @@ class TestDynamicTrainability(keras_parameterized.TestCase):
model.train_on_batch(x, y) model.train_on_batch(x, y)
self.assertRaises(Warning) self.assertRaises(Warning)
@tf_test_util.run_deprecated_v1 @tf_test_util.run_v1_only('b/120545219')
def test_trainable_argument(self): def test_trainable_argument(self):
with self.cached_session(): with self.cached_session():
x = np.random.random((5, 3)) x = np.random.random((5, 3))

View File

@ -35,7 +35,7 @@ class KerasIntegrationTest(test.TestCase):
def test_version(self): def test_version(self):
self.assertTrue(keras.__version__.endswith('-tf')) self.assertTrue(keras.__version__.endswith('-tf'))
@test_util.run_deprecated_v1 @test_util.run_v1_only('b/120545219')
def test_vector_classification_sequential(self): def test_vector_classification_sequential(self):
with self.cached_session(): with self.cached_session():
np.random.seed(1337) np.random.seed(1337)
@ -134,6 +134,7 @@ class KerasIntegrationTest(test.TestCase):
verbose=2) verbose=2)
self.assertGreater(history.history['val_acc'][-1], 0.7) self.assertGreater(history.history['val_acc'][-1], 0.7)
@test_util.run_v1_only('b/120545219')
def test_image_classification_sequential(self): def test_image_classification_sequential(self):
with self.cached_session(): with self.cached_session():
np.random.seed(1337) np.random.seed(1337)
@ -168,7 +169,7 @@ class KerasIntegrationTest(test.TestCase):
verbose=2) verbose=2)
self.assertGreater(history.history['val_acc'][-1], 0.7) self.assertGreater(history.history['val_acc'][-1], 0.7)
@test_util.run_deprecated_v1 @test_util.run_v1_only('b/120545219')
def test_video_classification_functional(self): def test_video_classification_functional(self):
with self.cached_session(): with self.cached_session():
np.random.seed(1337) np.random.seed(1337)
@ -197,7 +198,7 @@ class KerasIntegrationTest(test.TestCase):
verbose=2) verbose=2)
self.assertGreater(history.history['val_acc'][-1], 0.7) self.assertGreater(history.history['val_acc'][-1], 0.7)
@test_util.run_deprecated_v1 @test_util.run_v1_only('b/120545219')
def test_vector_classification_shared_sequential(self): def test_vector_classification_shared_sequential(self):
# Test that Sequential models that feature internal updates # Test that Sequential models that feature internal updates
# and internal losses can be shared. # and internal losses can be shared.
@ -232,7 +233,7 @@ class KerasIntegrationTest(test.TestCase):
verbose=2) verbose=2)
self.assertGreater(history.history['val_acc'][-1], 0.7) self.assertGreater(history.history['val_acc'][-1], 0.7)
@test_util.run_deprecated_v1 @test_util.run_v1_only('b/120545219')
def test_vector_classification_shared_model(self): def test_vector_classification_shared_model(self):
# Test that functional models that feature internal updates # Test that functional models that feature internal updates
# and internal losses can be shared. # and internal losses can be shared.

View File

@ -115,7 +115,7 @@ class LSTMLayerTest(test.TestCase, parameterized.TestCase):
self.assertEqual(layer.cell.recurrent_kernel.constraint, r_constraint) self.assertEqual(layer.cell.recurrent_kernel.constraint, r_constraint)
self.assertEqual(layer.cell.bias.constraint, b_constraint) self.assertEqual(layer.cell.bias.constraint, b_constraint)
@tf_test_util.run_deprecated_v1 @tf_test_util.run_v1_only('b/120545219')
def test_with_masking_layer_LSTM(self): def test_with_masking_layer_LSTM(self):
layer_class = keras.layers.LSTM layer_class = keras.layers.LSTM
inputs = np.random.random((2, 3, 4)) inputs = np.random.random((2, 3, 4))
@ -128,7 +128,7 @@ class LSTMLayerTest(test.TestCase, parameterized.TestCase):
optimizer=RMSPropOptimizer(0.01)) optimizer=RMSPropOptimizer(0.01))
model.fit(inputs, targets, epochs=1, batch_size=2, verbose=1) model.fit(inputs, targets, epochs=1, batch_size=2, verbose=1)
@tf_test_util.run_deprecated_v1 @tf_test_util.run_v1_only('b/120545219')
def test_masking_with_stacking_LSTM(self): def test_masking_with_stacking_LSTM(self):
inputs = np.random.random((2, 3, 4)) inputs = np.random.random((2, 3, 4))
targets = np.abs(np.random.random((2, 3, 5))) targets = np.abs(np.random.random((2, 3, 5)))
@ -314,7 +314,7 @@ class LSTMLayerTest(test.TestCase, parameterized.TestCase):
class LSTMLayerGraphOnlyTest(test.TestCase): class LSTMLayerGraphOnlyTest(test.TestCase):
@tf_test_util.run_deprecated_v1 @tf_test_util.run_v1_only('b/120545219')
def test_statefulness_LSTM(self): def test_statefulness_LSTM(self):
num_samples = 2 num_samples = 2
timesteps = 3 timesteps = 3

View File

@ -29,6 +29,7 @@ from tensorflow.python.training import gradient_descent
@tf_test_util.run_all_in_graph_and_eager_modes @tf_test_util.run_all_in_graph_and_eager_modes
@tf_test_util.run_v1_only('b/120545219')
class NormalizationLayersTest(test.TestCase): class NormalizationLayersTest(test.TestCase):
def test_basic_batchnorm(self): def test_basic_batchnorm(self):
@ -227,6 +228,7 @@ class NormalizationLayersTest(test.TestCase):
norm(inp) norm(inp)
@tf_test_util.run_v1_only('b/120545219')
class NormalizationLayersGraphModeOnlyTest(test.TestCase): class NormalizationLayersGraphModeOnlyTest(test.TestCase):
def test_shared_batchnorm(self): def test_shared_batchnorm(self):
@ -301,7 +303,6 @@ class NormalizationLayersGraphModeOnlyTest(test.TestCase):
x2 = model.predict(val_a) x2 = model.predict(val_a)
self.assertAllClose(x1, x2, atol=1e-7) self.assertAllClose(x1, x2, atol=1e-7)
@tf_test_util.run_deprecated_v1
def test_batchnorm_trainable(self): def test_batchnorm_trainable(self):
"""Tests that batchnorm layer is trainable when learning phase is enabled. """Tests that batchnorm layer is trainable when learning phase is enabled.

View File

@ -98,7 +98,7 @@ class SimpleRNNLayerTest(test.TestCase):
self.assertEqual(layer.cell.recurrent_kernel.constraint, r_constraint) self.assertEqual(layer.cell.recurrent_kernel.constraint, r_constraint)
self.assertEqual(layer.cell.bias.constraint, b_constraint) self.assertEqual(layer.cell.bias.constraint, b_constraint)
@tf_test_util.run_deprecated_v1 @tf_test_util.run_v1_only('b/120545219')
def test_with_masking_layer_SimpleRNN(self): def test_with_masking_layer_SimpleRNN(self):
layer_class = keras.layers.SimpleRNN layer_class = keras.layers.SimpleRNN
inputs = np.random.random((2, 3, 4)) inputs = np.random.random((2, 3, 4))
@ -121,7 +121,7 @@ class SimpleRNNLayerTest(test.TestCase):
class SimpleRNNLayerGraphOnlyTest(test.TestCase): class SimpleRNNLayerGraphOnlyTest(test.TestCase):
@tf_test_util.run_deprecated_v1 @tf_test_util.run_v1_only('b/120545219')
def test_statefulness_SimpleRNN(self): def test_statefulness_SimpleRNN(self):
num_samples = 2 num_samples = 2
timesteps = 3 timesteps = 3

View File

@ -56,9 +56,9 @@ _graph_options = config_pb2.GraphOptions(rewrite_options=_rewrites)
_config = config_pb2.ConfigProto(graph_options=_graph_options) _config = config_pb2.ConfigProto(graph_options=_graph_options)
@test_util.run_v1_only('b/120545219')
class UnifiedLSTMTest(test.TestCase, parameterized.TestCase): class UnifiedLSTMTest(test.TestCase, parameterized.TestCase):
@test_util.run_deprecated_v1
def test_unifiedLSTM(self): def test_unifiedLSTM(self):
input_shape = 10 input_shape = 10
rnn_state_size = 8 rnn_state_size = 8
@ -103,7 +103,6 @@ class UnifiedLSTMTest(test.TestCase, parameterized.TestCase):
self.assertNotEqual(existing_loss, loss_value) self.assertNotEqual(existing_loss, loss_value)
existing_loss = loss_value existing_loss = loss_value
@test_util.run_deprecated_v1
def test_unifiedLSTM_with_cond(self): def test_unifiedLSTM_with_cond(self):
# This test is to demonstrate the graph rewrite of grappler plugin under # This test is to demonstrate the graph rewrite of grappler plugin under
# the condition that the function returns different number of internal # the condition that the function returns different number of internal
@ -692,6 +691,7 @@ class UnifiedLSTMTest(test.TestCase, parameterized.TestCase):
model.train_on_batch([main_inputs] + initial_state, targets) model.train_on_batch([main_inputs] + initial_state, targets)
@test_util.run_v1_only('b/120545219')
class LSTMLayerGraphOnlyTest(test.TestCase): class LSTMLayerGraphOnlyTest(test.TestCase):
def test_statefulness_LSTM(self): def test_statefulness_LSTM(self):

View File

@ -165,6 +165,7 @@ class TimeDistributedTest(test.TestCase):
y = model.predict(np.random.random((10, 3, 2))) y = model.predict(np.random.random((10, 3, 2)))
self.assertAllClose(np.mean(y), 0., atol=1e-1, rtol=1e-1) self.assertAllClose(np.mean(y), 0., atol=1e-1, rtol=1e-1)
@tf_test_util.run_v1_only('b/120545219')
def test_TimeDistributed_batchnorm(self): def test_TimeDistributed_batchnorm(self):
with self.cached_session(): with self.cached_session():
# test that wrapped BN updates still work. # test that wrapped BN updates still work.
@ -187,6 +188,7 @@ class TimeDistributedTest(test.TestCase):
# Verify input_map has one mapping from inputs to reshaped inputs. # Verify input_map has one mapping from inputs to reshaped inputs.
self.assertEqual(len(td._input_map.keys()), 1) self.assertEqual(len(td._input_map.keys()), 1)
@tf_test_util.run_v1_only('b/120545219')
def test_TimeDistributed_trainable(self): def test_TimeDistributed_trainable(self):
# test layers that need learning_phase to be set # test layers that need learning_phase to be set
x = keras.layers.Input(shape=(3, 2)) x = keras.layers.Input(shape=(3, 2))
@ -201,7 +203,7 @@ class TimeDistributedTest(test.TestCase):
assert len(layer.updates) == 2 assert len(layer.updates) == 2
assert len(layer.trainable_weights) == 2 assert len(layer.trainable_weights) == 2
@tf_test_util.run_deprecated_v1 @tf_test_util.run_v1_only('b/120545219')
def test_TimeDistributed_with_masked_embedding_and_unspecified_shape(self): def test_TimeDistributed_with_masked_embedding_and_unspecified_shape(self):
with self.cached_session(): with self.cached_session():
# test with unspecified shape and Embeddings with mask_zero # test with unspecified shape and Embeddings with mask_zero
@ -234,7 +236,7 @@ class TimeDistributedTest(test.TestCase):
self.assertAllEqual(mask_outputs_val[i], ref_mask_val[i]) self.assertAllEqual(mask_outputs_val[i], ref_mask_val[i])
self.assertIs(mask_outputs[-1], None) # final layer self.assertIs(mask_outputs[-1], None) # final layer
@tf_test_util.run_deprecated_v1 @tf_test_util.run_v1_only('b/120545219')
def test_TimeDistributed_with_masking_layer(self): def test_TimeDistributed_with_masking_layer(self):
with self.cached_session(): with self.cached_session():
# test with Masking layer # test with Masking layer
@ -377,7 +379,7 @@ class BidirectionalTest(test.TestCase):
model.compile(loss='mse', optimizer='sgd') model.compile(loss='mse', optimizer='sgd')
model.fit(x, y, epochs=1, batch_size=1) model.fit(x, y, epochs=1, batch_size=1)
@tf_test_util.run_deprecated_v1 @tf_test_util.run_v1_only('b/120545219')
def test_Bidirectional_merged_value(self): def test_Bidirectional_merged_value(self):
rnn = keras.layers.LSTM rnn = keras.layers.LSTM
samples = 2 samples = 2
@ -508,7 +510,7 @@ class BidirectionalTest(test.TestCase):
layer.trainable = True layer.trainable = True
assert len(layer.trainable_weights) == 6 assert len(layer.trainable_weights) == 6
@tf_test_util.run_deprecated_v1 @tf_test_util.run_v1_only('b/120545219')
def test_Bidirectional_updates(self): def test_Bidirectional_updates(self):
with self.cached_session(): with self.cached_session():
x = keras.layers.Input(shape=(3, 2)) x = keras.layers.Input(shape=(3, 2))

View File

@ -187,6 +187,7 @@ def get_nested_model_3(input_dim, num_classes):
@test_util.run_all_in_graph_and_eager_modes @test_util.run_all_in_graph_and_eager_modes
@test_util.run_v1_only('b/120545219')
class ModelSubclassingTest(test.TestCase): class ModelSubclassingTest(test.TestCase):
def test_custom_build(self): def test_custom_build(self):
@ -915,6 +916,7 @@ class ModelSubclassingTest(test.TestCase):
self.assertEqual(1, len(model.get_updates_for(x))) self.assertEqual(1, len(model.get_updates_for(x)))
@test_util.run_v1_only('b/120545219')
class GraphSpecificModelSubclassingTests(test.TestCase): class GraphSpecificModelSubclassingTests(test.TestCase):
@test_util.run_deprecated_v1 @test_util.run_deprecated_v1

View File

@ -69,7 +69,7 @@ def sequential_model(add_input_layer, include_input_shape=True):
class TestModelCloning(test.TestCase): class TestModelCloning(test.TestCase):
@test_util.run_deprecated_v1 @test_util.run_v1_only('b/120545219')
def test_clone_sequential_model(self): def test_clone_sequential_model(self):
with self.cached_session(): with self.cached_session():
val_a = np.random.random((10, 4)) val_a = np.random.random((10, 4))
@ -102,10 +102,9 @@ class TestModelCloning(test.TestCase):
new_model.compile('rmsprop', 'mse') new_model.compile('rmsprop', 'mse')
new_model.train_on_batch(None, val_out) new_model.train_on_batch(None, val_out)
@test_util.run_deprecated_v1 @test_util.run_v1_only('b/120545219')
def test_clone_sequential_model_input_layer(self): def test_clone_sequential_model_input_layer(self):
@test_util.run_deprecated_v1
def test_input_layer(include_inputs): def test_input_layer(include_inputs):
with self.cached_session(): with self.cached_session():
val_a = np.random.random((10, 4)) val_a = np.random.random((10, 4))
@ -142,7 +141,7 @@ class TestModelCloning(test.TestCase):
test_input_layer(True) test_input_layer(True)
test_input_layer(False) test_input_layer(False)
@test_util.run_deprecated_v1 @test_util.run_v1_only('b/120545219')
def test_clone_functional_model(self): def test_clone_functional_model(self):
with self.cached_session(): with self.cached_session():
val_a = np.random.random((10, 4)) val_a = np.random.random((10, 4))
@ -318,6 +317,7 @@ class TestModelDeepCopy(test.TestCase):
model_copy.get_weights()[0])) model_copy.get_weights()[0]))
@test_util.run_v1_only('b/120545219')
class TestCloneAndBuildModel(test.TestCase): class TestCloneAndBuildModel(test.TestCase):
def test_clone_and_build_non_compiled_model(self): def test_clone_and_build_non_compiled_model(self):
@ -404,7 +404,6 @@ class TestCloneAndBuildModel(test.TestCase):
new_model.train_on_batch(inp, out) new_model.train_on_batch(inp, out)
new_model.evaluate(inp, out) new_model.evaluate(inp, out)
@test_util.run_deprecated_v1
def test_clone_and_build_compiled_sequential_model(self): def test_clone_and_build_compiled_sequential_model(self):
with self.cached_session(): with self.cached_session():
model = keras.models.Sequential() model = keras.models.Sequential()
@ -417,7 +416,6 @@ class TestCloneAndBuildModel(test.TestCase):
self._clone_and_build_test_helper(model) self._clone_and_build_test_helper(model)
@test_util.run_deprecated_v1
def test_clone_and_build_functional_model(self): def test_clone_and_build_functional_model(self):
with self.cached_session(): with self.cached_session():
input_a = keras.Input(shape=(4,)) input_a = keras.Input(shape=(4,))
@ -434,7 +432,6 @@ class TestCloneAndBuildModel(test.TestCase):
self._clone_and_build_test_helper(model) self._clone_and_build_test_helper(model)
@test_util.run_deprecated_v1
def test_clone_and_build_subclassed_model(self): def test_clone_and_build_subclassed_model(self):
class SubclassedModel(keras.Model): class SubclassedModel(keras.Model):
@ -483,11 +480,9 @@ class TestCloneAndBuildModel(test.TestCase):
def test_replace_tf_optimizer_iterations_variable(self): def test_replace_tf_optimizer_iterations_variable(self):
self.assert_optimizer_iterations_increases(adam.AdamOptimizer(0.01)) self.assert_optimizer_iterations_increases(adam.AdamOptimizer(0.01))
@test_util.run_deprecated_v1
def test_replace_keras_optimizer_iterations_variable(self): def test_replace_keras_optimizer_iterations_variable(self):
self.assert_optimizer_iterations_increases('adam') self.assert_optimizer_iterations_increases('adam')
@test_util.run_deprecated_v1
def test_clone_and_build_sequential_model_without_inputs_defined(self): def test_clone_and_build_sequential_model_without_inputs_defined(self):
with self.cached_session(): with self.cached_session():
model = sequential_model(False, False) model = sequential_model(False, False)

View File

@ -91,26 +91,26 @@ def _test_optimizer(optimizer, target=0.75):
class KerasOptimizersTest(test.TestCase): class KerasOptimizersTest(test.TestCase):
@test_util.run_deprecated_v1 @test_util.run_v1_only('b/120545219')
def test_sgd(self): def test_sgd(self):
with self.cached_session(): with self.cached_session():
_test_optimizer(keras.optimizers.SGD(lr=0.01, _test_optimizer(keras.optimizers.SGD(lr=0.01,
momentum=0.9, momentum=0.9,
nesterov=True)) nesterov=True))
@test_util.run_deprecated_v1 @test_util.run_v1_only('b/120545219')
def test_rmsprop(self): def test_rmsprop(self):
with self.cached_session(): with self.cached_session():
_test_optimizer(keras.optimizers.RMSprop()) _test_optimizer(keras.optimizers.RMSprop())
_test_optimizer(keras.optimizers.RMSprop(decay=1e-3)) _test_optimizer(keras.optimizers.RMSprop(decay=1e-3))
@test_util.run_deprecated_v1 @test_util.run_v1_only('b/120545219')
def test_adagrad(self): def test_adagrad(self):
with self.cached_session(): with self.cached_session():
_test_optimizer(keras.optimizers.Adagrad()) _test_optimizer(keras.optimizers.Adagrad())
_test_optimizer(keras.optimizers.Adagrad(decay=1e-3)) _test_optimizer(keras.optimizers.Adagrad(decay=1e-3))
@test_util.run_deprecated_v1 @test_util.run_v1_only('b/120545219')
def test_adadelta(self): def test_adadelta(self):
with self.cached_session(): with self.cached_session():
_test_optimizer(keras.optimizers.Adadelta(), target=0.6) _test_optimizer(keras.optimizers.Adadelta(), target=0.6)
@ -119,32 +119,32 @@ class KerasOptimizersTest(test.TestCase):
# the accuracy. # the accuracy.
_test_optimizer(keras.optimizers.Adadelta(decay=1e-3), target=0.4) _test_optimizer(keras.optimizers.Adadelta(decay=1e-3), target=0.4)
@test_util.run_deprecated_v1 @test_util.run_v1_only('b/120545219')
def test_adam(self): def test_adam(self):
with self.cached_session(): with self.cached_session():
_test_optimizer(keras.optimizers.Adam()) _test_optimizer(keras.optimizers.Adam())
_test_optimizer(keras.optimizers.Adam(decay=1e-3)) _test_optimizer(keras.optimizers.Adam(decay=1e-3))
_test_optimizer(keras.optimizers.Adam(amsgrad=True)) _test_optimizer(keras.optimizers.Adam(amsgrad=True))
@test_util.run_deprecated_v1 @test_util.run_v1_only('b/120545219')
def test_adamax(self): def test_adamax(self):
with self.cached_session(): with self.cached_session():
_test_optimizer(keras.optimizers.Adamax()) _test_optimizer(keras.optimizers.Adamax())
_test_optimizer(keras.optimizers.Adamax(decay=1e-3)) _test_optimizer(keras.optimizers.Adamax(decay=1e-3))
@test_util.run_deprecated_v1 @test_util.run_v1_only('b/120545219')
def test_nadam(self): def test_nadam(self):
with self.cached_session(): with self.cached_session():
_test_optimizer(keras.optimizers.Nadam()) _test_optimizer(keras.optimizers.Nadam())
@test_util.run_deprecated_v1 @test_util.run_v1_only('b/120545219')
def test_clipnorm(self): def test_clipnorm(self):
with self.cached_session(): with self.cached_session():
_test_optimizer(keras.optimizers.SGD(lr=0.01, _test_optimizer(keras.optimizers.SGD(lr=0.01,
momentum=0.9, momentum=0.9,
clipnorm=0.5)) clipnorm=0.5))
@test_util.run_deprecated_v1 @test_util.run_v1_only('b/120545219')
def test_clipvalue(self): def test_clipvalue(self):
with self.cached_session(): with self.cached_session():
_test_optimizer(keras.optimizers.SGD(lr=0.01, _test_optimizer(keras.optimizers.SGD(lr=0.01,

View File

@ -110,6 +110,7 @@ class AtrousConvolutionTest(test.TestCase):
add_check(check, y1, y2) add_check(check, y1, y2)
@test_util.run_v1_only("b/120545219")
def test_unknown_spatial_dims_for_channel_last_format(self): def test_unknown_spatial_dims_for_channel_last_format(self):
x = array_ops.placeholder(dtypes.float32, [1, None, None, 10]) x = array_ops.placeholder(dtypes.float32, [1, None, None, 10])
w = array_ops.zeros([3, 3, 10, 20]) w = array_ops.zeros([3, 3, 10, 20])
@ -117,6 +118,7 @@ class AtrousConvolutionTest(test.TestCase):
x, w, "VALID", dilation_rate=[2, 2], data_format="NHWC") x, w, "VALID", dilation_rate=[2, 2], data_format="NHWC")
self.assertEqual(y.shape.as_list(), [1, None, None, 20]) self.assertEqual(y.shape.as_list(), [1, None, None, 20])
@test_util.run_v1_only("b/120545219")
def test_unknown_spatial_dims_for_channel_first_format(self): def test_unknown_spatial_dims_for_channel_first_format(self):
x = array_ops.placeholder(dtypes.float32, [1, 10, None, None]) x = array_ops.placeholder(dtypes.float32, [1, 10, None, None])
w = array_ops.zeros([3, 3, 10, 20]) w = array_ops.zeros([3, 3, 10, 20])
@ -262,6 +264,7 @@ class AtrousConvolutionTest(test.TestCase):
err_tolerance = 1e-3 err_tolerance = 1e-3
self.assertLess(err, err_tolerance) self.assertLess(err, err_tolerance)
@test_util.run_v1_only("b/120545219")
def testGradient(self): def testGradient(self):
with self.cached_session(): with self.cached_session():
for padding in ["SAME", "VALID"]: for padding in ["SAME", "VALID"]:

View File

@ -31,6 +31,7 @@ from tensorflow.python.ops import string_ops
from tensorflow.python.platform import test from tensorflow.python.platform import test
@test_util.run_v1_only("b/120545219")
class Base64OpsTest(test_util.TensorFlowTestCase): class Base64OpsTest(test_util.TensorFlowTestCase):
def setUp(self): def setUp(self):

View File

@ -21,6 +21,7 @@ from __future__ import print_function
import numpy as np import numpy as np
from tensorflow.python import tf2 from tensorflow.python import tf2
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker_v2 from tensorflow.python.ops import gradient_checker_v2
from tensorflow.python.ops import math_ops from tensorflow.python.ops import math_ops
@ -135,6 +136,7 @@ class BatchMatmulOpTest(test.TestCase):
def _GetBatchMatmulOpTest(dtype, adjoint_a, adjoint_b, use_static_shape): def _GetBatchMatmulOpTest(dtype, adjoint_a, adjoint_b, use_static_shape):
@test_util.run_v1_only("b/120545219")
def Test(self): def Test(self):
np.random.seed(42) np.random.seed(42)
self._testNonEmpty(dtype, adjoint_a, adjoint_b, use_static_shape) self._testNonEmpty(dtype, adjoint_a, adjoint_b, use_static_shape)
@ -184,6 +186,7 @@ class BatchMatmulGradientTest(test.TestCase):
def _GetBatchMatmulGradientTest(dtype, adjoint_a, adjoint_b): def _GetBatchMatmulGradientTest(dtype, adjoint_a, adjoint_b):
@test_util.run_v1_only("b/120545219")
def Test(self): def Test(self):
self._compare(1, 2, 3, 5, dtype, adjoint_a, adjoint_b) self._compare(1, 2, 3, 5, dtype, adjoint_a, adjoint_b)
self._compare(3, 4, 7, 10, dtype, adjoint_a, adjoint_b) self._compare(3, 4, 7, 10, dtype, adjoint_a, adjoint_b)

View File

@ -82,7 +82,7 @@ class QuantileOpsTest(test_util.TensorFlowTestCase):
self.max_elements = 1 << 16 self.max_elements = 1 << 16
self.num_quantiles = constant_op.constant(3, dtype=dtypes.int64) self.num_quantiles = constant_op.constant(3, dtype=dtypes.int64)
@test_util.run_deprecated_v1 @test_util.run_v1_only("b/120545219")
def testBasicQuantileBucketsSingleResource(self): def testBasicQuantileBucketsSingleResource(self):
with self.cached_session() as sess: with self.cached_session() as sess:
quantile_accumulator_handle = self.create_resource("floats", self.eps, quantile_accumulator_handle = self.create_resource("floats", self.eps,
@ -107,7 +107,7 @@ class QuantileOpsTest(test_util.TensorFlowTestCase):
self.assertAllClose(self._feature_0_quantiles, quantiles[0].eval()) self.assertAllClose(self._feature_0_quantiles, quantiles[0].eval())
self.assertAllClose(self._feature_1_quantiles, quantiles[1].eval()) self.assertAllClose(self._feature_1_quantiles, quantiles[1].eval())
@test_util.run_deprecated_v1 @test_util.run_v1_only("b/120545219")
def testBasicQuantileBucketsMultipleResources(self): def testBasicQuantileBucketsMultipleResources(self):
with self.cached_session() as sess: with self.cached_session() as sess:
quantile_accumulator_handle_0 = self.create_resource("float_0", self.eps, quantile_accumulator_handle_0 = self.create_resource("float_0", self.eps,
@ -142,7 +142,7 @@ class QuantileOpsTest(test_util.TensorFlowTestCase):
self.assertAllClose(self._feature_0_quantiles, quantiles[0].eval()) self.assertAllClose(self._feature_0_quantiles, quantiles[0].eval())
self.assertAllClose(self._feature_1_quantiles, quantiles[1].eval()) self.assertAllClose(self._feature_1_quantiles, quantiles[1].eval())
@test_util.run_deprecated_v1 @test_util.run_v1_only("b/120545219")
def testSaveRestoreAfterFlush(self): def testSaveRestoreAfterFlush(self):
save_dir = os.path.join(self.get_temp_dir(), "save_restore") save_dir = os.path.join(self.get_temp_dir(), "save_restore")
save_path = os.path.join(tempfile.mkdtemp(prefix=save_dir), "hash") save_path = os.path.join(tempfile.mkdtemp(prefix=save_dir), "hash")
@ -175,7 +175,7 @@ class QuantileOpsTest(test_util.TensorFlowTestCase):
self.assertAllClose(self._feature_0_boundaries, buckets[0].eval()) self.assertAllClose(self._feature_0_boundaries, buckets[0].eval())
self.assertAllClose(self._feature_1_boundaries, buckets[1].eval()) self.assertAllClose(self._feature_1_boundaries, buckets[1].eval())
@test_util.run_deprecated_v1 @test_util.run_v1_only("b/120545219")
def testSaveRestoreBeforeFlush(self): def testSaveRestoreBeforeFlush(self):
save_dir = os.path.join(self.get_temp_dir(), "save_restore") save_dir = os.path.join(self.get_temp_dir(), "save_restore")
save_path = os.path.join(tempfile.mkdtemp(prefix=save_dir), "hash") save_path = os.path.join(tempfile.mkdtemp(prefix=save_dir), "hash")

View File

@ -105,6 +105,7 @@ class GenerateVocabRemappingTest(test.TestCase):
self.assertAllEqual(expected_num_present, self.evaluate(num_present)) self.assertAllEqual(expected_num_present, self.evaluate(num_present))
@test_util.run_v1_only('b/120545219')
class LoadAndRemapMatrixTest(test.TestCase): class LoadAndRemapMatrixTest(test.TestCase):
"""Tests for the load_and_remap_matrix() op.""" """Tests for the load_and_remap_matrix() op."""

View File

@ -155,6 +155,7 @@ class CholeskyOpTest(test.TestCase):
np.array([[[1., 2., 3.], [3., 4., 5.]], [[1., 2., 3.], [3., 4., 5.]] np.array([[[1., 2., 3.], [3., 4., 5.]], [[1., 2., 3.], [3., 4., 5.]]
])) ]))
@test_util.run_v1_only("b/120545219")
def testWrongDimensions(self): def testWrongDimensions(self):
tensor3 = constant_op.constant([1., 2.]) tensor3 = constant_op.constant([1., 2.])
with self.assertRaises(ValueError): with self.assertRaises(ValueError):
@ -233,6 +234,7 @@ class CholeskyGradTest(test.TestCase):
self.runFiniteDifferences( self.runFiniteDifferences(
shapes, dtypes=(dtypes_lib.float64,), scalarTest=True) shapes, dtypes=(dtypes_lib.float64,), scalarTest=True)
@test_util.run_v1_only("b/120545219")
def testTwoBlockMatrixComplexFloat(self): def testTwoBlockMatrixComplexFloat(self):
np.random.seed(0) np.random.seed(0)
shapes = self.getShapes([2 * self._backprop_block_size + 1]) shapes = self.getShapes([2 * self._backprop_block_size + 1])

View File

@ -170,6 +170,7 @@ class CondV2Test(test.TestCase):
self.assertRegexpMatches( self.assertRegexpMatches(
cond2_op.get_attr("else_branch").name, r"foo_cond_1_false_\d*") cond2_op.get_attr("else_branch").name, r"foo_cond_1_false_\d*")
@test_util.run_v1_only("b/120545219")
def testDefunInCond(self): def testDefunInCond(self):
x = constant_op.constant(1.0, name="x") x = constant_op.constant(1.0, name="x")
y = constant_op.constant(2.0, name="y") y = constant_op.constant(2.0, name="y")
@ -189,6 +190,7 @@ class CondV2Test(test.TestCase):
self._testCond(true_fn, false_fn, [x, y]) self._testCond(true_fn, false_fn, [x, y])
self._testCond(true_fn, false_fn, [y]) self._testCond(true_fn, false_fn, [y])
@test_util.run_deprecated_v1
def testNestedDefunInCond(self): def testNestedDefunInCond(self):
x = constant_op.constant(1.0, name="x") x = constant_op.constant(1.0, name="x")
y = constant_op.constant(2.0, name="y") y = constant_op.constant(2.0, name="y")
@ -213,6 +215,7 @@ class CondV2Test(test.TestCase):
self._testCond(true_fn, false_fn, [x, y]) self._testCond(true_fn, false_fn, [x, y])
self._testCond(true_fn, false_fn, [y]) self._testCond(true_fn, false_fn, [y])
@test_util.run_deprecated_v1
def testDoubleNestedDefunInCond(self): def testDoubleNestedDefunInCond(self):
x = constant_op.constant(1.0, name="x") x = constant_op.constant(1.0, name="x")
y = constant_op.constant(2.0, name="y") y = constant_op.constant(2.0, name="y")
@ -773,6 +776,7 @@ class CondV2Test(test.TestCase):
self.assertAllEqual( self.assertAllEqual(
self.evaluate(output_t), [-5, -4, -3, -2, -1, 0, 1, 4, 9, 16]) self.evaluate(output_t), [-5, -4, -3, -2, -1, 0, 1, 4, 9, 16])
@test_util.run_deprecated_v1
def testForwardPassRewrite(self): def testForwardPassRewrite(self):
x = constant_op.constant(1.0, name="x") x = constant_op.constant(1.0, name="x")
output = cond_v2.cond_v2(constant_op.constant(True), output = cond_v2.cond_v2(constant_op.constant(True),

View File

@ -199,7 +199,7 @@ class ConditionalAccumulatorTest(test.TestCase):
is_all_equal &= (val[i][j] == elems_ave[i][j]) is_all_equal &= (val[i][j] == elems_ave[i][j])
self.assertTrue(is_all_equal) self.assertTrue(is_all_equal)
@test_util.run_deprecated_v1 @test_util.run_v1_only("b/120545219")
def testAccumulatorWrongDynamicShape(self): def testAccumulatorWrongDynamicShape(self):
with self.cached_session() as sess: with self.cached_session() as sess:
q = data_flow_ops.ConditionalAccumulator( q = data_flow_ops.ConditionalAccumulator(
@ -321,7 +321,7 @@ class ConditionalAccumulatorTest(test.TestCase):
shape=tensor_shape.TensorShape([1]), shape=tensor_shape.TensorShape([1]),
reduction_type="Invalid") reduction_type="Invalid")
@test_util.run_deprecated_v1 @test_util.run_v1_only("b/120545219")
def testAccumulatorInvalidTakeGrad(self): def testAccumulatorInvalidTakeGrad(self):
with self.cached_session(): with self.cached_session():
q = data_flow_ops.ConditionalAccumulator( q = data_flow_ops.ConditionalAccumulator(
@ -435,7 +435,7 @@ class ConditionalAccumulatorTest(test.TestCase):
if x >= ls) / sum(1 for x in local_steps if x >= ls) / sum(1 for x in local_steps
if x >= ls), val) if x >= ls), val)
@test_util.run_deprecated_v1 @test_util.run_v1_only("b/120545219")
def testParallelApplyGrad(self): def testParallelApplyGrad(self):
with self.cached_session() as sess: with self.cached_session() as sess:
q = data_flow_ops.ConditionalAccumulator( q = data_flow_ops.ConditionalAccumulator(
@ -461,7 +461,7 @@ class ConditionalAccumulatorTest(test.TestCase):
self.assertEqual(val, sum(elems) / len(elems)) self.assertEqual(val, sum(elems) / len(elems))
@test_util.run_deprecated_v1 @test_util.run_v1_only("b/120545219")
def testParallelTakeGrad(self): def testParallelTakeGrad(self):
with self.cached_session() as sess: with self.cached_session() as sess:
q = data_flow_ops.ConditionalAccumulator( q = data_flow_ops.ConditionalAccumulator(
@ -494,7 +494,7 @@ class ConditionalAccumulatorTest(test.TestCase):
self.assertItemsEqual(elems, results) self.assertItemsEqual(elems, results)
@test_util.run_deprecated_v1 @test_util.run_v1_only("b/120545219")
def testAccumulatorApplyAndBlockingTake(self): def testAccumulatorApplyAndBlockingTake(self):
with self.cached_session() as sess: with self.cached_session() as sess:
q = data_flow_ops.ConditionalAccumulator( q = data_flow_ops.ConditionalAccumulator(
@ -528,7 +528,7 @@ class ConditionalAccumulatorTest(test.TestCase):
with self.assertRaisesOpError("was cancelled"): with self.assertRaisesOpError("was cancelled"):
self.evaluate(takeg_op) self.evaluate(takeg_op)
@test_util.run_deprecated_v1 @test_util.run_v1_only("b/120545219")
def testAccumulatorCancel(self): def testAccumulatorCancel(self):
with self.cached_session() as sess: with self.cached_session() as sess:
q = data_flow_ops.ConditionalAccumulator( q = data_flow_ops.ConditionalAccumulator(

View File

@ -129,6 +129,7 @@ def isum(s, maximum_iterations=None):
@test_util.with_control_flow_v2 @test_util.with_control_flow_v2
class ControlFlowTest(test.TestCase): class ControlFlowTest(test.TestCase):
@test_util.run_v1_only("b/120545219")
def testRefIdentity(self): def testRefIdentity(self):
with self.cached_session(): with self.cached_session():
v = variables.VariableV1(7) v = variables.VariableV1(7)
@ -141,7 +142,7 @@ class ControlFlowTest(test.TestCase):
variables.global_variables_initializer().run() variables.global_variables_initializer().run()
self.assertEqual(9, self.evaluate(v2)) self.assertEqual(9, self.evaluate(v2))
@test_util.run_deprecated_v1 @test_util.run_v1_only("b/120545219")
def testRefEnter(self): def testRefEnter(self):
with self.cached_session(): with self.cached_session():
v = variables.VariableV1(7) v = variables.VariableV1(7)
@ -155,7 +156,7 @@ class ControlFlowTest(test.TestCase):
variables.global_variables_initializer().run() variables.global_variables_initializer().run()
self.assertEqual(9, self.evaluate(v3)) self.assertEqual(9, self.evaluate(v3))
@test_util.run_deprecated_v1 @test_util.run_v1_only("b/120545219")
def testRefSwitch(self): def testRefSwitch(self):
with self.cached_session(): with self.cached_session():
v = variables.VariableV1(7) v = variables.VariableV1(7)
@ -193,6 +194,7 @@ class ControlFlowTest(test.TestCase):
v, "frame2", is_constant=False) v, "frame2", is_constant=False)
self.assertEqual(enter_v_non_constant.shape, None) self.assertEqual(enter_v_non_constant.shape, None)
@test_util.run_v1_only("b/120545219")
def testSwitchMergeIndexedSlices(self): def testSwitchMergeIndexedSlices(self):
with self.cached_session(): with self.cached_session():
values = constant_op.constant([1, 2, 3, 4, 5, 6]) values = constant_op.constant([1, 2, 3, 4, 5, 6])
@ -207,6 +209,7 @@ class ControlFlowTest(test.TestCase):
self.assertAllEqual(np.arange(1, 7), val) self.assertAllEqual(np.arange(1, 7), val)
self.assertAllEqual(np.arange(0, 12, 2), ind) self.assertAllEqual(np.arange(0, 12, 2), ind)
@test_util.run_v1_only("b/120545219")
def testSwitchDeadBranch(self): def testSwitchDeadBranch(self):
with self.cached_session(): with self.cached_session():
data = constant_op.constant([1, 2, 3, 4, 5, 6], name="data") data = constant_op.constant([1, 2, 3, 4, 5, 6], name="data")
@ -219,6 +222,7 @@ class ControlFlowTest(test.TestCase):
lambda e: "Retval[0] does not have value" in str(e)): lambda e: "Retval[0] does not have value" in str(e)):
self.evaluate(dead_branch) self.evaluate(dead_branch)
@test_util.run_v1_only("b/120545219")
def testSwitchMergeLess(self): def testSwitchMergeLess(self):
with self.cached_session(): with self.cached_session():
data = constant_op.constant([1, 2, 3, 4, 5, 6], name="data") data = constant_op.constant([1, 2, 3, 4, 5, 6], name="data")
@ -231,6 +235,7 @@ class ControlFlowTest(test.TestCase):
result = self.evaluate(merge_op) result = self.evaluate(merge_op)
self.assertAllEqual(np.arange(1, 7), result) self.assertAllEqual(np.arange(1, 7), result)
@test_util.run_v1_only("b/120545219")
def testSwitchMergeAddIdentity(self): def testSwitchMergeAddIdentity(self):
with self.cached_session(): with self.cached_session():
data = constant_op.constant([1, 2, 3, 4, 5, 6], name="data") data = constant_op.constant([1, 2, 3, 4, 5, 6], name="data")
@ -244,6 +249,7 @@ class ControlFlowTest(test.TestCase):
result = self.evaluate(merge_op) result = self.evaluate(merge_op)
self.assertAllEqual(np.array([x + 1 for x in [1, 2, 3, 4, 5, 6]]), result) self.assertAllEqual(np.array([x + 1 for x in [1, 2, 3, 4, 5, 6]]), result)
@test_util.run_v1_only("b/120545219")
def testSwitchMergeAddMul(self): def testSwitchMergeAddMul(self):
with self.cached_session(): with self.cached_session():
data = constant_op.constant([1, 2, 3, 4, 5, 6], name="data") data = constant_op.constant([1, 2, 3, 4, 5, 6], name="data")
@ -258,6 +264,7 @@ class ControlFlowTest(test.TestCase):
result = self.evaluate(merge_op) result = self.evaluate(merge_op)
self.assertAllEqual(np.array([x * 5 for x in [1, 2, 3, 4, 5, 6]]), result) self.assertAllEqual(np.array([x * 5 for x in [1, 2, 3, 4, 5, 6]]), result)
@test_util.run_v1_only("b/120545219")
def testLoop_false(self): def testLoop_false(self):
with self.cached_session(): with self.cached_session():
false = ops.convert_to_tensor(False) false = ops.convert_to_tensor(False)
@ -302,6 +309,7 @@ class ControlFlowTest(test.TestCase):
result = self.evaluate(exit_i) result = self.evaluate(exit_i)
self.assertAllEqual(10, result) self.assertAllEqual(10, result)
@test_util.run_v1_only("b/120545219")
def testLoop_2(self): def testLoop_2(self):
with self.cached_session(): with self.cached_session():
zero = constant_op.constant(0) zero = constant_op.constant(0)
@ -328,6 +336,7 @@ class ControlFlowTest(test.TestCase):
result = self.evaluate(exit_i) result = self.evaluate(exit_i)
self.assertAllEqual(10, result) self.assertAllEqual(10, result)
@test_util.run_v1_only("b/120545219")
def testDifferentFrame(self): def testDifferentFrame(self):
with self.cached_session(): with self.cached_session():
data = array_ops.placeholder(dtypes.float32, shape=[]) data = array_ops.placeholder(dtypes.float32, shape=[])
@ -362,6 +371,7 @@ class ControlFlowTest(test.TestCase):
lambda: math_ops.subtract(x, 1.)) lambda: math_ops.subtract(x, 1.))
self.assertEqual(b.shape, tensor_shape.scalar()) self.assertEqual(b.shape, tensor_shape.scalar())
@test_util.run_v1_only("b/120545219")
def testFetchable(self): def testFetchable(self):
with self.cached_session() as sess: with self.cached_session() as sess:
x = array_ops.placeholder(dtypes.float32) x = array_ops.placeholder(dtypes.float32)
@ -378,6 +388,7 @@ class ControlFlowTest(test.TestCase):
sess.run(t, feed_dict={x: 3}) sess.run(t, feed_dict={x: 3})
@test_util.disable_control_flow_v2("Not relevant") @test_util.disable_control_flow_v2("Not relevant")
@test_util.run_v1_only("b/120545219")
def testFeedable(self): def testFeedable(self):
with self.cached_session() as sess: with self.cached_session() as sess:
c = constant_op.constant(2) c = constant_op.constant(2)
@ -395,6 +406,7 @@ class ControlFlowTest(test.TestCase):
with self.assertRaisesRegexp(ValueError, "may not be fed"): with self.assertRaisesRegexp(ValueError, "may not be fed"):
sess.run(r, feed_dict={t: 3}) sess.run(r, feed_dict={t: 3})
@test_util.run_v1_only("b/120545219")
def testCondIndexedSlices(self): def testCondIndexedSlices(self):
with self.cached_session(): with self.cached_session():
values = constant_op.constant(10) values = constant_op.constant(10)
@ -410,6 +422,7 @@ class ControlFlowTest(test.TestCase):
self.assertAllEqual(11, val) self.assertAllEqual(11, val)
self.assertAllEqual(0, ind) self.assertAllEqual(0, ind)
@test_util.run_v1_only("b/120545219")
def testCondSparseTensor(self): def testCondSparseTensor(self):
with self.cached_session(): with self.cached_session():
values = constant_op.constant([2.0, 4.0], name="values") values = constant_op.constant([2.0, 4.0], name="values")
@ -427,6 +440,7 @@ class ControlFlowTest(test.TestCase):
self.assertAllEqual([[1], [4]], r.indices.eval()) self.assertAllEqual([[1], [4]], r.indices.eval())
self.assertAllEqual(r.values.get_shape(), (2,)) self.assertAllEqual(r.values.get_shape(), (2,))
@test_util.run_v1_only("b/120545219")
def testCondResource(self): def testCondResource(self):
with self.cached_session(): with self.cached_session():
@ -441,6 +455,7 @@ class ControlFlowTest(test.TestCase):
self.assertEqual(1.0, control_flow_ops.cond(rv, case, lambda: t).eval()) self.assertEqual(1.0, control_flow_ops.cond(rv, case, lambda: t).eval())
@test_util.run_v1_only("b/120545219")
def testCondWithTensorArrayGrad(self): def testCondWithTensorArrayGrad(self):
with self.cached_session() as sess: with self.cached_session() as sess:
with ops.device(test.gpu_device_name()): with ops.device(test.gpu_device_name()):
@ -455,6 +470,7 @@ class ControlFlowTest(test.TestCase):
self.assertAllEqual(sess.run(g, {pred: False}), [0.0, 0.0, 0.0]) self.assertAllEqual(sess.run(g, {pred: False}), [0.0, 0.0, 0.0])
@test_util.disable_control_flow_v2("b/113293074") @test_util.disable_control_flow_v2("b/113293074")
@test_util.run_v1_only("b/120545219")
def testCondIndexedSlicesDifferentTypes(self): def testCondIndexedSlicesDifferentTypes(self):
with self.cached_session(): with self.cached_session():
values = constant_op.constant(10) values = constant_op.constant(10)
@ -472,6 +488,7 @@ class ControlFlowTest(test.TestCase):
self.assertAllEqual(0, ind) self.assertAllEqual(0, ind)
self.assertTrue(ind.dtype == np.int64) self.assertTrue(ind.dtype == np.int64)
@test_util.run_v1_only("b/120545219")
def testCondColocation(self): def testCondColocation(self):
with self.session(use_gpu=True): with self.session(use_gpu=True):
with ops.device("/cpu:0"): with ops.device("/cpu:0"):
@ -576,6 +593,7 @@ class ControlFlowTest(test.TestCase):
alive, count = body(i) alive, count = body(i)
self.assertAllEqual(4, self.evaluate(count)) self.assertAllEqual(4, self.evaluate(count))
@test_util.run_v1_only("b/120545219")
def testCond_6(self): def testCond_6(self):
with self.cached_session(): with self.cached_session():
v1 = variables.Variable([7]) v1 = variables.Variable([7])
@ -671,6 +689,7 @@ class ControlFlowTest(test.TestCase):
test_result = self.evaluate(r) test_result = self.evaluate(r)
self.assertDictEqual({"a": {"c": 210}, "b": {"d": 210}}, test_result) self.assertDictEqual({"a": {"c": 210}, "b": {"d": 210}}, test_result)
@test_util.run_v1_only("b/120545219")
def testCheckNestedOutputStruct(self): def testCheckNestedOutputStruct(self):
with self.cached_session() as sess: with self.cached_session() as sess:
x = constant_op.constant(10) x = constant_op.constant(10)
@ -701,7 +720,7 @@ class ControlFlowTest(test.TestCase):
self.assertAllEqual([2.0], self.evaluate(r)) self.assertAllEqual([2.0], self.evaluate(r))
@test_util.disable_control_flow_v2("b/79881896 (control deps)") @test_util.disable_control_flow_v2("b/79881896 (control deps)")
@test_util.run_deprecated_v1 @test_util.run_v1_only("b/120545219")
def testCondWithControl(self): def testCondWithControl(self):
with self.cached_session(): with self.cached_session():
control_holder = array_ops.placeholder(dtypes.float32, shape=()) control_holder = array_ops.placeholder(dtypes.float32, shape=())
@ -717,6 +736,7 @@ class ControlFlowTest(test.TestCase):
lambda: constant_op.constant(1)) lambda: constant_op.constant(1))
self.assertEqual(5, self.evaluate(r)) self.assertEqual(5, self.evaluate(r))
@test_util.run_v1_only("b/120545219")
def testUninitializedRefIdentity(self): def testUninitializedRefIdentity(self):
with self.cached_session() as sess: with self.cached_session() as sess:
v = gen_state_ops.variable( v = gen_state_ops.variable(
@ -771,6 +791,7 @@ class ControlFlowTest(test.TestCase):
r = control_flow_ops.cond(pred, fn1, fn2) r = control_flow_ops.cond(pred, fn1, fn2)
self.evaluate(r) self.evaluate(r)
@test_util.run_v1_only("b/120545219")
def testCondGrad_1(self): def testCondGrad_1(self):
with self.cached_session(): with self.cached_session():
x = constant_op.constant(10.0, name="x") x = constant_op.constant(10.0, name="x")
@ -845,6 +866,7 @@ class ControlFlowTest(test.TestCase):
self.assertEqual(sess.run(grad_grad, {pred: True, x: 1.0, y: 2.0}), 0.0) self.assertEqual(sess.run(grad_grad, {pred: True, x: 1.0, y: 2.0}), 0.0)
self.assertEqual(sess.run(grad_grad, {pred: False, x: 1.0, y: 2.0}), 0.0) self.assertEqual(sess.run(grad_grad, {pred: False, x: 1.0, y: 2.0}), 0.0)
@test_util.run_v1_only("b/120545219")
def testNestedCond_Simple(self): def testNestedCond_Simple(self):
with self.cached_session(): with self.cached_session():
x = constant_op.constant(0., name="X") x = constant_op.constant(0., name="X")
@ -861,7 +883,7 @@ class ControlFlowTest(test.TestCase):
self.assertEqual(1.0, self.evaluate(result)) self.assertEqual(1.0, self.evaluate(result))
@test_util.disable_control_flow_v2("b/113327884") @test_util.disable_control_flow_v2("b/113327884")
@test_util.run_deprecated_v1 @test_util.run_v1_only("b/120545219")
def testCondGrad_Gather(self): def testCondGrad_Gather(self):
with self.cached_session() as sess: with self.cached_session() as sess:
v1 = variables.Variable([1.0, 42.0]) v1 = variables.Variable([1.0, 42.0])
@ -885,6 +907,7 @@ class ControlFlowTest(test.TestCase):
] ]
self.assertAllEqual(dense_gv, [0.0, 2.0]) self.assertAllEqual(dense_gv, [0.0, 2.0])
@test_util.run_v1_only("b/120545219")
def testCondPredicateTensor(self): def testCondPredicateTensor(self):
"""Regression test for lowering predicate from non-first output of an op.""" """Regression test for lowering predicate from non-first output of an op."""
@ -1011,6 +1034,7 @@ class ControlFlowTest(test.TestCase):
self.assertEqual(10000, self.evaluate(r)) self.assertEqual(10000, self.evaluate(r))
@test_util.disable_control_flow_v2("b/79881896 (control deps)") @test_util.disable_control_flow_v2("b/79881896 (control deps)")
@test_util.run_v1_only("b/120545219")
def testWhileExternalControlDependencies(self): def testWhileExternalControlDependencies(self):
with self.cached_session(): with self.cached_session():
v = variables.Variable(0.0) v = variables.Variable(0.0)
@ -1027,6 +1051,7 @@ class ControlFlowTest(test.TestCase):
self.assertAllEqual(v.eval(), 1.0) self.assertAllEqual(v.eval(), 1.0)
@test_util.disable_control_flow_v2("b/79881896 (control deps)") @test_util.disable_control_flow_v2("b/79881896 (control deps)")
@test_util.run_v1_only("b/120545219")
def testWhileExternalControlDependenciesNoInput(self): def testWhileExternalControlDependenciesNoInput(self):
with self.cached_session(): with self.cached_session():
v = variables.Variable(0.0) v = variables.Variable(0.0)
@ -1043,7 +1068,7 @@ class ControlFlowTest(test.TestCase):
self.assertAllEqual(v.eval(), 1.0) self.assertAllEqual(v.eval(), 1.0)
@test_util.disable_control_flow_v2("b/113324949 (RefVariable)") @test_util.disable_control_flow_v2("b/113324949 (RefVariable)")
@test_util.run_deprecated_v1 @test_util.run_v1_only("b/120545219")
def testWhileWithRefs_1(self): def testWhileWithRefs_1(self):
with self.cached_session() as sess: with self.cached_session() as sess:
x = variables.VariableV1(0)._ref() # pylint: disable=protected-access x = variables.VariableV1(0)._ref() # pylint: disable=protected-access
@ -1080,6 +1105,7 @@ class ControlFlowTest(test.TestCase):
r = isum(s, maximum_iterations=3) r = isum(s, maximum_iterations=3)
self.assertAllEqual([1 + 3, 2 + 3, 3 + 3, 4 + 3, 5 + 3], self.evaluate(r)) self.assertAllEqual([1 + 3, 2 + 3, 3 + 3, 4 + 3, 5 + 3], self.evaluate(r))
@test_util.run_v1_only("b/120545219")
def testWhileWithMaximumIterationsAndSingleArgument(self): def testWhileWithMaximumIterationsAndSingleArgument(self):
with self.cached_session(): with self.cached_session():
r = control_flow_ops.while_loop( r = control_flow_ops.while_loop(
@ -1087,6 +1113,7 @@ class ControlFlowTest(test.TestCase):
self.assertEqual(1, self.evaluate(r)) self.assertEqual(1, self.evaluate(r))
@test_util.disable_control_flow_v2("b/115776323 (max_iters)") @test_util.disable_control_flow_v2("b/115776323 (max_iters)")
@test_util.run_v1_only("b/120545219")
def testSingleNestedMaximumIterationsWhileLoopGradientInXLAContext(self): def testSingleNestedMaximumIterationsWhileLoopGradientInXLAContext(self):
v = constant_op.constant(1.0) v = constant_op.constant(1.0)
@ -1112,6 +1139,7 @@ class ControlFlowTest(test.TestCase):
# Should execute without issue. # Should execute without issue.
self.assertEqual(3, self.evaluate(loop_execute)) self.assertEqual(3, self.evaluate(loop_execute))
@test_util.run_v1_only("b/120545219")
def testInvalidMaximumIterationsWhileLoopGradientInXLAContext(self): def testInvalidMaximumIterationsWhileLoopGradientInXLAContext(self):
v = constant_op.constant(1.0) v = constant_op.constant(1.0)
@ -1172,6 +1200,7 @@ class ControlFlowTest(test.TestCase):
r"context '.*' \(currently defined in '.*'\)"): r"context '.*' \(currently defined in '.*'\)"):
_ = gradients_impl.gradients(loop_with_maxiter, v) _ = gradients_impl.gradients(loop_with_maxiter, v)
@test_util.run_v1_only("b/120545219")
def testInvalidMaximumIterationsFromSiblingContextWhileLoopInXLAContext(self): def testInvalidMaximumIterationsFromSiblingContextWhileLoopInXLAContext(self):
v = constant_op.constant(1.0) v = constant_op.constant(1.0)
@ -1215,6 +1244,7 @@ class ControlFlowTest(test.TestCase):
_ = gradients_impl.gradients(loop, v) _ = gradients_impl.gradients(loop, v)
@test_util.disable_control_flow_v2("b/118457764") @test_util.disable_control_flow_v2("b/118457764")
@test_util.run_v1_only("b/120545219")
def testNestedWhileLoopWithMaxItersFromOuterContextInXLAContext(self): def testNestedWhileLoopWithMaxItersFromOuterContextInXLAContext(self):
v = constant_op.constant(1.0) v = constant_op.constant(1.0)
@ -1326,6 +1356,7 @@ class ControlFlowTest(test.TestCase):
result = r[3].eval() result = r[3].eval()
self.assertAllEqual(42, result) self.assertAllEqual(42, result)
@test_util.run_v1_only("b/120545219")
def testWhile_5(self): def testWhile_5(self):
with self.cached_session(): with self.cached_session():
@ -1351,6 +1382,7 @@ class ControlFlowTest(test.TestCase):
self.assertAllEqual(np.array([0, 1, 2, 3, 4, 5, 6]), result) self.assertAllEqual(np.array([0, 1, 2, 3, 4, 5, 6]), result)
@test_util.disable_control_flow_v2("b/116338794 (buffer_reuse)") @test_util.disable_control_flow_v2("b/116338794 (buffer_reuse)")
@test_util.run_v1_only("b/120545219")
def testBufferForwarding(self): def testBufferForwarding(self):
run_options = config_pb2.RunOptions( run_options = config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE) trace_level=config_pb2.RunOptions.FULL_TRACE)
@ -1435,6 +1467,7 @@ class ControlFlowTest(test.TestCase):
r = control_flow_ops.while_loop(c, b, [n], parallel_iterations=20) r = control_flow_ops.while_loop(c, b, [n], parallel_iterations=20)
self.assertEqual([10000], self.evaluate(r)) self.assertEqual([10000], self.evaluate(r))
@test_util.run_v1_only("b/120545219")
def testWhileShapeInference(self): def testWhileShapeInference(self):
with self.cached_session(): with self.cached_session():
i = constant_op.constant(0) i = constant_op.constant(0)
@ -1461,6 +1494,7 @@ class ControlFlowTest(test.TestCase):
r = control_flow_ops.while_loop(c, b, [i, m]) r = control_flow_ops.while_loop(c, b, [i, m])
@test_util.disable_control_flow_v2("b/116328420 (SparseTensor)") @test_util.disable_control_flow_v2("b/116328420 (SparseTensor)")
@test_util.run_v1_only("b/120545219")
def testWhileShapeInferenceSparseTensor(self): def testWhileShapeInferenceSparseTensor(self):
with self.cached_session(): with self.cached_session():
values = constant_op.constant([2.0, 4.0], name="values") values = constant_op.constant([2.0, 4.0], name="values")
@ -1493,7 +1527,7 @@ class ControlFlowTest(test.TestCase):
[i.get_shape(), tensor_shape.TensorShape([5])]) [i.get_shape(), tensor_shape.TensorShape([5])])
@test_util.disable_control_flow_v2("b/116282023 (IndexedSlices)") @test_util.disable_control_flow_v2("b/116282023 (IndexedSlices)")
@test_util.run_deprecated_v1 @test_util.run_v1_only("b/120545219")
def testWhileShapeInferenceIndexedSlices(self): def testWhileShapeInferenceIndexedSlices(self):
with self.cached_session(): with self.cached_session():
values = constant_op.constant([[2.0, 4.0], [3.0, 5.0]], name="values") values = constant_op.constant([[2.0, 4.0], [3.0, 5.0]], name="values")
@ -1584,6 +1618,7 @@ class ControlFlowTest(test.TestCase):
self._testNestedWhile_2(use_gpu=False) self._testNestedWhile_2(use_gpu=False)
self._testNestedWhile_2(use_gpu=True) self._testNestedWhile_2(use_gpu=True)
@test_util.run_v1_only("b/120545219")
def testWhileWithControl_1(self): def testWhileWithControl_1(self):
with self.cached_session(): with self.cached_session():
n = constant_op.constant(0) n = constant_op.constant(0)
@ -1615,6 +1650,7 @@ class ControlFlowTest(test.TestCase):
condition, body, [r], parallel_iterations=1) condition, body, [r], parallel_iterations=1)
self.assertAllEqual(12, self.evaluate(res)) self.assertAllEqual(12, self.evaluate(res))
@test_util.run_v1_only("b/120545219")
def testWhileWithControl_3(self): def testWhileWithControl_3(self):
with self.cached_session() as sess: with self.cached_session() as sess:
b = array_ops.placeholder(dtypes.bool) b = array_ops.placeholder(dtypes.bool)
@ -1624,6 +1660,7 @@ class ControlFlowTest(test.TestCase):
r = control_flow_ops.while_loop(lambda x: x < 10, lambda x: x + c, [x0]) r = control_flow_ops.while_loop(lambda x: x < 10, lambda x: x + c, [x0])
self.assertEqual(10, sess.run(r, {b: True})) self.assertEqual(10, sess.run(r, {b: True}))
@test_util.run_v1_only("b/120545219")
def testWhileWithControl_4(self): def testWhileWithControl_4(self):
with self.cached_session() as sess: with self.cached_session() as sess:
b = array_ops.placeholder(dtypes.bool) b = array_ops.placeholder(dtypes.bool)
@ -1635,6 +1672,7 @@ class ControlFlowTest(test.TestCase):
self.assertEqual(10, sess.run(r, {b: True})) self.assertEqual(10, sess.run(r, {b: True}))
@test_util.disable_control_flow_v2("b/79881896 (control_deps)") @test_util.disable_control_flow_v2("b/79881896 (control_deps)")
@test_util.run_v1_only("b/120545219")
def testWhileWithControl_5(self): def testWhileWithControl_5(self):
with self.cached_session() as sess: with self.cached_session() as sess:
b = array_ops.placeholder(dtypes.bool) b = array_ops.placeholder(dtypes.bool)
@ -1663,6 +1701,7 @@ class ControlFlowTest(test.TestCase):
self.assertEqual(0, self.evaluate(loop)) self.assertEqual(0, self.evaluate(loop))
@test_util.disable_control_flow_v2("b/113324949 (ref vars)") @test_util.disable_control_flow_v2("b/113324949 (ref vars)")
@test_util.run_v1_only("b/120545219")
def testWhileCondWithControl_1(self): def testWhileCondWithControl_1(self):
with self.cached_session(): with self.cached_session():
v = variable_scope.get_variable( v = variable_scope.get_variable(
@ -1686,6 +1725,7 @@ class ControlFlowTest(test.TestCase):
self.assertAllClose(65536.0, self.evaluate(v)) self.assertAllClose(65536.0, self.evaluate(v))
@test_util.disable_control_flow_v2("b/113324949 (ref vars)") @test_util.disable_control_flow_v2("b/113324949 (ref vars)")
@test_util.run_v1_only("b/120545219")
def testWhileCondExitControl(self): def testWhileCondExitControl(self):
with self.cached_session(): with self.cached_session():
@ -1855,6 +1895,7 @@ class ControlFlowTest(test.TestCase):
self.assertAllClose(np.array([10.0, 10.0, 10.0]), result) self.assertAllClose(np.array([10.0, 10.0, 10.0]), result)
@test_util.disable_control_flow_v2("b/113324949 (RefVariable)") @test_util.disable_control_flow_v2("b/113324949 (RefVariable)")
@test_util.run_v1_only("b/120545219")
def testWhileUpdateVariable_2(self): def testWhileUpdateVariable_2(self):
with self.cached_session(): with self.cached_session():
select1 = variables.Variable([3.0, 4.0, 5.0]) select1 = variables.Variable([3.0, 4.0, 5.0])
@ -1905,7 +1946,7 @@ class ControlFlowTest(test.TestCase):
self.assertAllClose(np.array([10.0, 10.0, 10.0]), result) self.assertAllClose(np.array([10.0, 10.0, 10.0]), result)
@test_util.disable_control_flow_v2("b/113324949 (RefVariable)") @test_util.disable_control_flow_v2("b/113324949 (RefVariable)")
@test_util.run_deprecated_v1 @test_util.run_v1_only("b/120545219")
def testWhileUpdateVariable_4(self): def testWhileUpdateVariable_4(self):
with self.cached_session(): with self.cached_session():
var_a = variables.Variable(0, name="a") var_a = variables.Variable(0, name="a")
@ -1934,7 +1975,7 @@ class ControlFlowTest(test.TestCase):
self.assertEqual(10, self.evaluate(var_b)) self.assertEqual(10, self.evaluate(var_b))
@test_util.disable_control_flow_v2("b/113324949 (RefVariable)") @test_util.disable_control_flow_v2("b/113324949 (RefVariable)")
@test_util.run_deprecated_v1 @test_util.run_v1_only("b/120545219")
def testWhileUpdateVariable_5(self): def testWhileUpdateVariable_5(self):
with self.cached_session(): with self.cached_session():
# Create some variables. # Create some variables.
@ -1965,6 +2006,7 @@ class ControlFlowTest(test.TestCase):
self.assertEqual(10, self.evaluate(var_b)) self.assertEqual(10, self.evaluate(var_b))
@test_util.disable_control_flow_v2("b/113324949 (RefVariable)") @test_util.disable_control_flow_v2("b/113324949 (RefVariable)")
@test_util.run_v1_only("b/120545219")
def testWhileUpdateVariable_6(self): def testWhileUpdateVariable_6(self):
with self.cached_session(): with self.cached_session():
# Create some variables. # Create some variables.
@ -1994,6 +2036,7 @@ class ControlFlowTest(test.TestCase):
self.assertEqual(55, self.evaluate(var_b)) self.assertEqual(55, self.evaluate(var_b))
self.assertEqual(10, self.evaluate(var_a)) self.assertEqual(10, self.evaluate(var_a))
@test_util.run_v1_only("b/120545219")
def testWhileQueue_1(self): def testWhileQueue_1(self):
with self.cached_session(): with self.cached_session():
q = data_flow_ops.FIFOQueue(-1, dtypes.int32) q = data_flow_ops.FIFOQueue(-1, dtypes.int32)
@ -2012,6 +2055,7 @@ class ControlFlowTest(test.TestCase):
for i in xrange(10): for i in xrange(10):
self.assertEqual([i], q.dequeue().eval()) self.assertEqual([i], q.dequeue().eval())
@test_util.run_v1_only("b/120545219")
def testWhileTimeOut(self): def testWhileTimeOut(self):
run_options = config_pb2.RunOptions(timeout_in_ms=1) run_options = config_pb2.RunOptions(timeout_in_ms=1)
with self.cached_session() as sess: with self.cached_session() as sess:
@ -2023,6 +2067,7 @@ class ControlFlowTest(test.TestCase):
sess.run(r, options=run_options) sess.run(r, options=run_options)
@test_util.disable_control_flow_v2("b/117119329 (stack)") @test_util.disable_control_flow_v2("b/117119329 (stack)")
@test_util.run_v1_only("b/120545219")
def testWhileStack_1(self): def testWhileStack_1(self):
with self.cached_session(): with self.cached_session():
s = gen_data_flow_ops.stack_v2(-1, dtypes.int32, stack_name="foo") s = gen_data_flow_ops.stack_v2(-1, dtypes.int32, stack_name="foo")
@ -2092,10 +2137,12 @@ class ControlFlowTest(test.TestCase):
self.assertAllClose(1024.0, self.evaluate(r)) self.assertAllClose(1024.0, self.evaluate(r))
@test_util.disable_control_flow_v2("b/116351701 (colocation)") @test_util.disable_control_flow_v2("b/116351701 (colocation)")
@test_util.run_v1_only("b/120545219")
def testWhileGrad_ColocateGradients(self): def testWhileGrad_ColocateGradients(self):
self._testWhileGrad_ColocateGradients(colocate=False) self._testWhileGrad_ColocateGradients(colocate=False)
self._testWhileGrad_ColocateGradients(colocate=True) self._testWhileGrad_ColocateGradients(colocate=True)
@test_util.run_v1_only("b/120545219")
def testWhileGrad_Square(self): def testWhileGrad_Square(self):
with self.cached_session(): with self.cached_session():
v = constant_op.constant(2.0, name="v") v = constant_op.constant(2.0, name="v")
@ -2107,6 +2154,7 @@ class ControlFlowTest(test.TestCase):
r = gradients_impl.gradients(r, v)[0] r = gradients_impl.gradients(r, v)[0]
self.assertAllClose(1024.0, self.evaluate(r)) self.assertAllClose(1024.0, self.evaluate(r))
@test_util.run_v1_only("b/120545219")
def testWhileGrad_Shape(self): def testWhileGrad_Shape(self):
with self.cached_session(): with self.cached_session():
x = array_ops.placeholder(dtypes.float32, shape=[None]) x = array_ops.placeholder(dtypes.float32, shape=[None])
@ -2137,6 +2185,7 @@ class ControlFlowTest(test.TestCase):
r = gradients_impl.gradients([r, y], x)[0] r = gradients_impl.gradients([r, y], x)[0]
self.assertAllClose([2.0, 4.0], sess.run(r, feed_dict={x: [1.0, 2.0]})) self.assertAllClose([2.0, 4.0], sess.run(r, feed_dict={x: [1.0, 2.0]}))
@test_util.run_v1_only("b/120545219")
def testWhileGrad_MultipleUses(self): def testWhileGrad_MultipleUses(self):
with self.cached_session(): with self.cached_session():
v = constant_op.constant(2.0, name="v") v = constant_op.constant(2.0, name="v")
@ -2148,6 +2197,7 @@ class ControlFlowTest(test.TestCase):
r = gradients_impl.gradients(r, v)[0] r = gradients_impl.gradients(r, v)[0]
self.assertEqual(524288.0, self.evaluate(r)) self.assertEqual(524288.0, self.evaluate(r))
@test_util.run_v1_only("b/120545219")
def testWhileGrad_LoopAdd(self): def testWhileGrad_LoopAdd(self):
with self.cached_session(): with self.cached_session():
v = constant_op.constant(2.0, name="v") v = constant_op.constant(2.0, name="v")
@ -2211,6 +2261,7 @@ class ControlFlowTest(test.TestCase):
def testNestedWhileCondWhileGradGpu(self): def testNestedWhileCondWhileGradGpu(self):
self._testNestedWhileCondWhileGrad(use_gpu=True) self._testNestedWhileCondWhileGrad(use_gpu=True)
@test_util.run_v1_only("b/120545219")
def testWhileGrad_Variable(self): def testWhileGrad_Variable(self):
with self.cached_session(): with self.cached_session():
a = variables.Variable(3.0) a = variables.Variable(3.0)
@ -2236,6 +2287,7 @@ class ControlFlowTest(test.TestCase):
variables.global_variables_initializer().run() variables.global_variables_initializer().run()
self.assertAllClose(216.0, g[0].eval()) self.assertAllClose(216.0, g[0].eval())
@test_util.run_v1_only("b/120545219")
def testWhileGradInCond(self): def testWhileGradInCond(self):
with self.cached_session(): with self.cached_session():
@ -2253,7 +2305,7 @@ class ControlFlowTest(test.TestCase):
self.assertAllClose(9.0, r.eval(feed_dict={x: 1.0})) self.assertAllClose(9.0, r.eval(feed_dict={x: 1.0}))
@test_util.disable_control_flow_v2("b/116340060") @test_util.disable_control_flow_v2("b/116340060")
@test_util.run_deprecated_v1 @test_util.run_v1_only("b/120545219")
def testGradInWhileWrtInitialLoopVal(self): def testGradInWhileWrtInitialLoopVal(self):
with self.cached_session(): with self.cached_session():
x = array_ops.placeholder(dtypes.float32, shape=(), name="x") x = array_ops.placeholder(dtypes.float32, shape=(), name="x")
@ -2271,6 +2323,7 @@ class ControlFlowTest(test.TestCase):
"loop invariants or wrt the input parameters to the loop body."): "loop invariants or wrt the input parameters to the loop body."):
control_flow_ops.while_loop(lambda i, x: i < 3, body, [0, y]) control_flow_ops.while_loop(lambda i, x: i < 3, body, [0, y])
@test_util.run_v1_only("b/120545219")
def testWhileGradInWhile(self): def testWhileGradInWhile(self):
with self.cached_session(): with self.cached_session():
n = ops.convert_to_tensor(1.0, name="n") n = ops.convert_to_tensor(1.0, name="n")
@ -2287,6 +2340,7 @@ class ControlFlowTest(test.TestCase):
[tensor_shape.unknown_shape()]) [tensor_shape.unknown_shape()])
self.assertAllClose(9.0, r.eval(feed_dict={x: 1.0})) self.assertAllClose(9.0, r.eval(feed_dict={x: 1.0}))
@test_util.run_v1_only("b/120545219")
def testCondGradInNestedWhiles(self): def testCondGradInNestedWhiles(self):
def outer_body(i, x): def outer_body(i, x):
@ -2375,6 +2429,7 @@ class ControlFlowTest(test.TestCase):
self.assertEqual([100.0, 1.0, 102.0, 3.0, 4.0 + 100 * 2.0], self.assertEqual([100.0, 1.0, 102.0, 3.0, 4.0 + 100 * 2.0],
self.evaluate(r_flattened)) self.evaluate(r_flattened))
@test_util.run_v1_only("b/120545219")
def testWhile_NestedBadArityFails(self): def testWhile_NestedBadArityFails(self):
with self.cached_session(): with self.cached_session():
named = collections.namedtuple("named", ("a", "b")) named = collections.namedtuple("named", ("a", "b"))
@ -2391,6 +2446,7 @@ class ControlFlowTest(test.TestCase):
with self.assertRaisesRegexp(ValueError, "the same number of elements"): with self.assertRaisesRegexp(ValueError, "the same number of elements"):
control_flow_ops.while_loop(c, b, loop_vars) control_flow_ops.while_loop(c, b, loop_vars)
@test_util.run_v1_only("b/120545219")
def testWhileGrad_ys_xs(self): def testWhileGrad_ys_xs(self):
with self.cached_session(): with self.cached_session():
x = constant_op.constant(3.0, name="x") x = constant_op.constant(3.0, name="x")
@ -2435,6 +2491,7 @@ class ControlFlowTest(test.TestCase):
self.assertAllClose(1024.0, r[0].eval()) self.assertAllClose(1024.0, r[0].eval())
@test_util.disable_control_flow_v2("b/116355153 (back_prop flag)") @test_util.disable_control_flow_v2("b/116355153 (back_prop flag)")
@test_util.run_v1_only("b/120545219")
def testWhileGrad_NoGradient(self): def testWhileGrad_NoGradient(self):
with self.cached_session(): with self.cached_session():
v = constant_op.constant(2.0, name="v") v = constant_op.constant(2.0, name="v")
@ -2446,6 +2503,7 @@ class ControlFlowTest(test.TestCase):
self.assertAllClose(1.0, r[0].eval()) self.assertAllClose(1.0, r[0].eval())
@test_util.disable_control_flow_v2("b/113324949 (RefVariable)") @test_util.disable_control_flow_v2("b/113324949 (RefVariable)")
@test_util.run_v1_only("b/120545219")
def testWhileGrad_NoDependency(self): def testWhileGrad_NoDependency(self):
with self.cached_session() as sess: with self.cached_session() as sess:
variable = variables.Variable(array_ops.ones([2, 3])) variable = variables.Variable(array_ops.ones([2, 3]))
@ -2486,6 +2544,7 @@ class ControlFlowTest(test.TestCase):
grad = gradients_impl.gradients(cost, [c0]) grad = gradients_impl.gradients(cost, [c0])
self.assertAllClose(0.0, sess.run(grad[0])) self.assertAllClose(0.0, sess.run(grad[0]))
@test_util.run_v1_only("b/120545219")
def testWhileGrad_SerialTwoLoops(self): def testWhileGrad_SerialTwoLoops(self):
with self.cached_session(): with self.cached_session():
i = constant_op.constant(0, name="i") i = constant_op.constant(0, name="i")
@ -2504,6 +2563,7 @@ class ControlFlowTest(test.TestCase):
r = gradients_impl.gradients([rx], x) r = gradients_impl.gradients([rx], x)
self.assertAllClose(1024.0, r[0].eval()) self.assertAllClose(1024.0, r[0].eval())
@test_util.run_v1_only("b/120545219")
def testWhileGrad_ParallelTwoLoops(self): def testWhileGrad_ParallelTwoLoops(self):
with self.cached_session(): with self.cached_session():
i = constant_op.constant(0, name="i") i = constant_op.constant(0, name="i")
@ -2523,6 +2583,7 @@ class ControlFlowTest(test.TestCase):
r = gradients_impl.gradients([rx], x) r = gradients_impl.gradients([rx], x)
self.assertAllClose(64.0, r[0].eval()) self.assertAllClose(64.0, r[0].eval())
@test_util.run_v1_only("b/120545219")
def testWhileGrad_OneOutputWithControlDependencyOnSecond(self): def testWhileGrad_OneOutputWithControlDependencyOnSecond(self):
with self.cached_session(): with self.cached_session():
i = constant_op.constant(0, name="i") i = constant_op.constant(0, name="i")
@ -2566,6 +2627,7 @@ class ControlFlowTest(test.TestCase):
self._testNestedWhileGrad_Simple(use_gpu=False) self._testNestedWhileGrad_Simple(use_gpu=False)
self._testNestedWhileGrad_Simple(use_gpu=True) self._testNestedWhileGrad_Simple(use_gpu=True)
@test_util.run_v1_only("b/120545219")
def testNestedWhileGrad_SerialInner(self): def testNestedWhileGrad_SerialInner(self):
with self.cached_session(): with self.cached_session():
v = constant_op.constant(1.0) v = constant_op.constant(1.0)
@ -2613,6 +2675,7 @@ class ControlFlowTest(test.TestCase):
r = gradients_impl.gradients(r, v)[0] r = gradients_impl.gradients(r, v)[0]
self.assertAllClose(512.0, self.evaluate(r)) self.assertAllClose(512.0, self.evaluate(r))
@test_util.run_v1_only("b/120545219")
def testNestedWhileGrad_ParallelIterations(self): def testNestedWhileGrad_ParallelIterations(self):
# Make sure the stack pushes and pops of an inner loop are executed in # Make sure the stack pushes and pops of an inner loop are executed in
# the sequential order of the iterations of its outer loop. # the sequential order of the iterations of its outer loop.
@ -2702,6 +2765,7 @@ class ControlFlowTest(test.TestCase):
self.assertAllClose([[0.98000002, 1.98000002]], self.evaluate(x)) self.assertAllClose([[0.98000002, 1.98000002]], self.evaluate(x))
@test_util.disable_control_flow_v2("b/113324949 (RefVariable)") @test_util.disable_control_flow_v2("b/113324949 (RefVariable)")
@test_util.run_v1_only("b/120545219")
def testWhileWithRefsWithGradients_1(self): def testWhileWithRefsWithGradients_1(self):
with self.cached_session() as sess: with self.cached_session() as sess:
x = variables.VariableV1(0.)._ref() # pylint: disable=protected-access x = variables.VariableV1(0.)._ref() # pylint: disable=protected-access
@ -2731,6 +2795,7 @@ class ControlFlowTest(test.TestCase):
self.assertEqual(73, value_x_grad) self.assertEqual(73, value_x_grad)
@test_util.disable_control_flow_v2("b/116282023 (IndexedSlices)") @test_util.disable_control_flow_v2("b/116282023 (IndexedSlices)")
@test_util.run_v1_only("b/120545219")
def testWhileGrad_IndexedSlices(self): def testWhileGrad_IndexedSlices(self):
with self.cached_session(): with self.cached_session():
values = constant_op.constant([2.0, 4.0], name="values") values = constant_op.constant([2.0, 4.0], name="values")
@ -2753,7 +2818,7 @@ class ControlFlowTest(test.TestCase):
self.assertAllClose(np.array([1024.0, 1024.0]), self.evaluate(r)) self.assertAllClose(np.array([1024.0, 1024.0]), self.evaluate(r))
@test_util.disable_control_flow_v2("b/116328420 (SparseTensor)") @test_util.disable_control_flow_v2("b/116328420 (SparseTensor)")
@test_util.run_deprecated_v1 @test_util.run_v1_only("b/120545219")
def testWhileGrad_SparseTensor(self): def testWhileGrad_SparseTensor(self):
with self.cached_session(): with self.cached_session():
values = constant_op.constant([2.0, 4.0], name="values") values = constant_op.constant([2.0, 4.0], name="values")
@ -2777,6 +2842,7 @@ class ControlFlowTest(test.TestCase):
self.assertAllClose(np.array([1024.0, 1024.0]), self.evaluate(r)) self.assertAllClose(np.array([1024.0, 1024.0]), self.evaluate(r))
@test_util.disable_control_flow_v2("b/115920078 (gradients)") @test_util.disable_control_flow_v2("b/115920078 (gradients)")
@test_util.run_v1_only("b/120545219")
def testCallGradInLoop(self): def testCallGradInLoop(self):
with self.cached_session() as sess: with self.cached_session() as sess:
i0 = constant_op.constant(0) i0 = constant_op.constant(0)
@ -2959,6 +3025,7 @@ class ControlFlowTest(test.TestCase):
self.assertAllClose([0., 0.], self.evaluate(dy_dq)) self.assertAllClose([0., 0.], self.evaluate(dy_dq))
@test_util.disable_control_flow_v2("b/113324949 (RefVariable)") @test_util.disable_control_flow_v2("b/113324949 (RefVariable)")
@test_util.run_v1_only("b/120545219")
def testWhileGradientWithNontrainablePath2(self): def testWhileGradientWithNontrainablePath2(self):
q = variables.Variable([7., 8.]) q = variables.Variable([7., 8.])
@ -2977,6 +3044,7 @@ class ControlFlowTest(test.TestCase):
self.assertAllClose([1., 1.], self.evaluate(dy_dq)) self.assertAllClose([1., 1.], self.evaluate(dy_dq))
@test_util.disable_control_flow_v2("b/115920078 (gradients)") @test_util.disable_control_flow_v2("b/115920078 (gradients)")
@test_util.run_v1_only("b/120545219")
def testIssue16504(self): def testIssue16504(self):
c = constant_op.constant(np.arange(100), dtype=dtypes.float32) c = constant_op.constant(np.arange(100), dtype=dtypes.float32)
w = variables.Variable( w = variables.Variable(
@ -3000,6 +3068,7 @@ class ControlFlowTest(test.TestCase):
grad, = gradients_impl.gradients(w, c) grad, = gradients_impl.gradients(w, c)
self.assertIsNotNone(grad) self.assertIsNotNone(grad)
@test_util.run_v1_only("b/120545219")
def testStopGradMultiFlows(self): def testStopGradMultiFlows(self):
with self.cached_session(): with self.cached_session():
@ -3026,6 +3095,7 @@ class ControlFlowTest(test.TestCase):
variables.global_variables_initializer().run() variables.global_variables_initializer().run()
self.assertEqual(5.0, self.evaluate(result)) self.assertEqual(5.0, self.evaluate(result))
@test_util.run_v1_only("b/120545219")
def testOneValueCond(self): def testOneValueCond(self):
with self.cached_session(): with self.cached_session():
@ -3059,6 +3129,7 @@ class ControlFlowTest(test.TestCase):
self.assertAllClose(4.0, i.eval(feed_dict={d: 1})) self.assertAllClose(4.0, i.eval(feed_dict={d: 1}))
self.assertAllClose(2.0 * math.sqrt(2), i.eval(feed_dict={d: 2})) self.assertAllClose(2.0 * math.sqrt(2), i.eval(feed_dict={d: 2}))
@test_util.run_v1_only("b/120545219")
def testCase(self): def testCase(self):
with self.cached_session(): with self.cached_session():
x = constant_op.constant(1) x = constant_op.constant(1)
@ -3111,6 +3182,7 @@ class ControlFlowTest(test.TestCase):
self.assertAllEqual(r6.eval(), 0) self.assertAllEqual(r6.eval(), 0)
@test_util.run_v1_only("b/120545219")
def testCaseSideEffects(self): def testCaseSideEffects(self):
with self.cached_session() as sess: with self.cached_session() as sess:
v0 = variables.Variable(-1) v0 = variables.Variable(-1)
@ -3147,6 +3219,7 @@ class ControlFlowTest(test.TestCase):
self.assertAllEqual(self.evaluate([v0, v1, v2]), [0, -1, -1]) self.assertAllEqual(self.evaluate([v0, v1, v2]), [0, -1, -1])
@test_util.disable_control_flow_v2("b/113324949 (ref vars)") @test_util.disable_control_flow_v2("b/113324949 (ref vars)")
@test_util.run_v1_only("b/120545219")
def testOneOpCond(self): def testOneOpCond(self):
with self.cached_session(): with self.cached_session():
v = variables.Variable(0) v = variables.Variable(0)
@ -3175,6 +3248,7 @@ class ControlFlowTest(test.TestCase):
self.assertEqual(2, i.eval(feed_dict={c.name: 0})) self.assertEqual(2, i.eval(feed_dict={c.name: 0}))
self.assertEqual(2, self.evaluate(v)) self.assertEqual(2, self.evaluate(v))
@test_util.run_v1_only("b/120545219")
def testWithOpsDependencies(self): def testWithOpsDependencies(self):
with self.cached_session() as sess: with self.cached_session() as sess:
v = variables.VariableV1(0.0) v = variables.VariableV1(0.0)
@ -3198,6 +3272,7 @@ class ControlFlowTest(test.TestCase):
# Ensure that 'v' is initialized # Ensure that 'v' is initialized
self.assertAllClose(0.0, real_v_val) self.assertAllClose(0.0, real_v_val)
@test_util.run_v1_only("b/120545219")
def testWithTensorDependencies(self): def testWithTensorDependencies(self):
with self.cached_session(): with self.cached_session():
v = variables.VariableV1(0.0) v = variables.VariableV1(0.0)
@ -3224,6 +3299,7 @@ class ControlFlowTest(test.TestCase):
# Ensure that 'v' is initialized # Ensure that 'v' is initialized
self.assertAllClose(0.0, self.evaluate(v)) self.assertAllClose(0.0, self.evaluate(v))
@test_util.run_v1_only("b/120545219")
def testWithIndexedSlicesDependencies(self): def testWithIndexedSlicesDependencies(self):
with self.cached_session(): with self.cached_session():
v = variables.VariableV1( v = variables.VariableV1(
@ -3270,6 +3346,7 @@ class ControlFlowTest(test.TestCase):
self.assertDeviceEqual("", with_vdef_dep.device) self.assertDeviceEqual("", with_vdef_dep.device)
self.assertEqual([b"loc:@vdef"], with_vdef_dep.op.colocation_groups()) self.assertEqual([b"loc:@vdef"], with_vdef_dep.op.colocation_groups())
@test_util.run_v1_only("b/120545219")
def testGroup(self): def testGroup(self):
with self.cached_session() as sess: with self.cached_session() as sess:
v1 = variables.VariableV1([0.0]) v1 = variables.VariableV1([0.0])
@ -3289,6 +3366,7 @@ class ControlFlowTest(test.TestCase):
self.assertAllClose([0.0], v1_val) self.assertAllClose([0.0], v1_val)
self.assertAllClose([1.0], v2_val) self.assertAllClose([1.0], v2_val)
@test_util.run_v1_only("b/120545219")
def testGroupEmpty(self): def testGroupEmpty(self):
op = control_flow_ops.group() op = control_flow_ops.group()
self.assertEqual(op.type, "NoOp") self.assertEqual(op.type, "NoOp")
@ -3349,7 +3427,7 @@ class ControlFlowTest(test.TestCase):
self.assertEqual([None, None], m.get_shape().as_list()) self.assertEqual([None, None], m.get_shape().as_list())
self.assertEqual([], index.get_shape()) self.assertEqual([], index.get_shape())
@test_util.run_deprecated_v1 @test_util.run_v1_only("b/120545219")
def testRefSelect(self): def testRefSelect(self):
index = array_ops.placeholder(dtypes.int32) index = array_ops.placeholder(dtypes.int32)
@ -3404,6 +3482,7 @@ class ControlFlowTest(test.TestCase):
with self.assertRaises(ValueError): with self.assertRaises(ValueError):
sess.run(tensor_list[0]) sess.run(tensor_list[0])
@test_util.run_v1_only("b/120545219")
def testWhilePyFuncBasic(self): def testWhilePyFuncBasic(self):
def func(x): def func(x):
@ -3417,6 +3496,7 @@ class ControlFlowTest(test.TestCase):
[tensor_shape.unknown_shape(), tensor_shape.unknown_shape()]) [tensor_shape.unknown_shape(), tensor_shape.unknown_shape()])
self.assertEqual(r[1].eval(), 65536.0) self.assertEqual(r[1].eval(), 65536.0)
@test_util.run_v1_only("b/120545219")
def testWhileFuncBasic(self): def testWhileFuncBasic(self):
@function.Defun(dtypes.float32) @function.Defun(dtypes.float32)
@ -3440,6 +3520,7 @@ class ControlFlowTest(test.TestCase):
]), 1) ]), 1)
@test_util.run_v1_only("b/120545219")
def testQIntSwitchMerge(self): def testQIntSwitchMerge(self):
with self.cached_session(force_gpu=test.is_gpu_available()) as sess: with self.cached_session(force_gpu=test.is_gpu_available()) as sess:
constant_qint = constant_op.constant(np.array([42]), dtypes.qint8) constant_qint = constant_op.constant(np.array([42]), dtypes.qint8)
@ -3448,6 +3529,7 @@ class ControlFlowTest(test.TestCase):
result = control_flow_ops.merge([v_f, v_t]) result = control_flow_ops.merge([v_f, v_t])
self.evaluate(result) self.evaluate(result)
@test_util.run_v1_only("b/120545219")
def testQIntRefSwitchMerge(self): def testQIntRefSwitchMerge(self):
with self.cached_session(use_gpu=test.is_gpu_available()) as sess: with self.cached_session(use_gpu=test.is_gpu_available()) as sess:
var_qint = gen_state_ops.variable( var_qint = gen_state_ops.variable(
@ -3461,6 +3543,7 @@ class ControlFlowTest(test.TestCase):
result = control_flow_ops.ref_merge([v_f, v_t]) result = control_flow_ops.ref_merge([v_f, v_t])
self.evaluate(result) self.evaluate(result)
@test_util.run_v1_only("b/120545219")
def testUInt64SwitchMerge(self): def testUInt64SwitchMerge(self):
with self.cached_session(force_gpu=test.is_gpu_available()) as sess: with self.cached_session(force_gpu=test.is_gpu_available()) as sess:
constant_uint64 = constant_op.constant(np.array([42]), dtypes.uint64) constant_uint64 = constant_op.constant(np.array([42]), dtypes.uint64)
@ -3508,6 +3591,7 @@ class ControlFlowContextCheckTest(test.TestCase):
math_ops.less(1, 2), true_fn, lambda: constant_op.constant(0)) math_ops.less(1, 2), true_fn, lambda: constant_op.constant(0))
return cond_tensor[0] return cond_tensor[0]
@test_util.run_v1_only("b/120545219")
def testInvalidContext(self): def testInvalidContext(self):
# Accessing a while loop tensor outside of control flow is illegal. # Accessing a while loop tensor outside of control flow is illegal.
while_tensor = self._getWhileTensor() while_tensor = self._getWhileTensor()
@ -3517,7 +3601,7 @@ class ControlFlowContextCheckTest(test.TestCase):
"is in a while loop. See info log for more details."): "is in a while loop. See info log for more details."):
math_ops.add(1, while_tensor) math_ops.add(1, while_tensor)
@test_util.run_deprecated_v1 @test_util.run_v1_only("b/120545219")
def testInvalidContextInCond(self): def testInvalidContextInCond(self):
# Accessing a while loop tensor in cond is illegal. # Accessing a while loop tensor in cond is illegal.
while_tensor = self._getWhileTensor() while_tensor = self._getWhileTensor()
@ -3530,6 +3614,7 @@ class ControlFlowContextCheckTest(test.TestCase):
math_ops.less(1, 2), lambda: math_ops.add(1, while_tensor), math_ops.less(1, 2), lambda: math_ops.add(1, while_tensor),
lambda: constant_op.constant(0)) lambda: constant_op.constant(0))
@test_util.run_v1_only("b/120545219")
def testInvalidContextInWhile(self): def testInvalidContextInWhile(self):
# Accessing a while loop tensor in a different while loop is illegal. # Accessing a while loop tensor in a different while loop is illegal.
while_tensor = self._getWhileTensor() while_tensor = self._getWhileTensor()
@ -3564,6 +3649,7 @@ class ControlFlowContextCheckTest(test.TestCase):
control_flow_ops.cond(math_ops.less(1, 2), branch_fn, branch_fn) control_flow_ops.cond(math_ops.less(1, 2), branch_fn, branch_fn)
@test_util.run_v1_only("b/120545219")
def testValidWhileContext(self): def testValidWhileContext(self):
# Accessing a tensor in a nested while is OK. # Accessing a tensor in a nested while is OK.
def body(_): def body(_):
@ -3572,6 +3658,7 @@ class ControlFlowContextCheckTest(test.TestCase):
control_flow_ops.while_loop(lambda i: i < 5, body, [0]) control_flow_ops.while_loop(lambda i: i < 5, body, [0])
@test_util.run_v1_only("b/120545219")
def testValidNestedContexts(self): def testValidNestedContexts(self):
# Accessing a tensor from a cond context in a while context, all inside an # Accessing a tensor from a cond context in a while context, all inside an
# outer while context, is OK. # outer while context, is OK.
@ -3586,7 +3673,7 @@ class ControlFlowContextCheckTest(test.TestCase):
control_flow_ops.while_loop(lambda i: i < 5, body, [0]) control_flow_ops.while_loop(lambda i: i < 5, body, [0])
@test_util.run_deprecated_v1 @test_util.run_v1_only("b/120545219")
def testInvalidNestedContexts(self): def testInvalidNestedContexts(self):
# Accessing a tensor from a while context in a different while context, all # Accessing a tensor from a while context in a different while context, all
# inside a cond context, is illegal. # inside a cond context, is illegal.
@ -3605,6 +3692,7 @@ class ControlFlowContextCheckTest(test.TestCase):
class TupleTest(test.TestCase): class TupleTest(test.TestCase):
@test_util.run_v1_only("b/120545219")
def testTensors(self): def testTensors(self):
for v1_first in [True, False]: for v1_first in [True, False]:
with self.cached_session(): with self.cached_session():
@ -3635,7 +3723,7 @@ class TupleTest(test.TestCase):
self.assertAllClose([30.0], self.evaluate(t2)) self.assertAllClose([30.0], self.evaluate(t2))
self.assertAllClose([1.0], self.evaluate(v1)) self.assertAllClose([1.0], self.evaluate(v1))
@test_util.run_deprecated_v1 @test_util.run_v1_only("b/120545219")
def testIndexedSlices(self): def testIndexedSlices(self):
for v1_first in [True, False]: for v1_first in [True, False]:
with self.cached_session(): with self.cached_session():
@ -3887,6 +3975,7 @@ class EagerTest(test.TestCase):
isum(tensor, maximum_iterations=3).numpy(), isum(tensor, maximum_iterations=3).numpy(),
[1 + 3, 2 + 3, 3 + 3, 4 + 3, 5 + 3]) [1 + 3, 2 + 3, 3 + 3, 4 + 3, 5 + 3])
@test_util.run_v1_only("b/120545219")
def testWhileWithMaximumIterationsAndSingleArgument(self): def testWhileWithMaximumIterationsAndSingleArgument(self):
with context.eager_mode(): with context.eager_mode():
tensor = constant_op.constant(0) tensor = constant_op.constant(0)
@ -3909,6 +3998,7 @@ class EagerTest(test.TestCase):
self.assertAllEqual(t1.numpy(), tup1.numpy()) self.assertAllEqual(t1.numpy(), tup1.numpy())
self.assertAllEqual(t2.numpy(), tup2.numpy()) self.assertAllEqual(t2.numpy(), tup2.numpy())
@test_util.run_v1_only("b/120545219")
def testCase(self): def testCase(self):
with context.eager_mode(): with context.eager_mode():
x = constant_op.constant(1) x = constant_op.constant(1)

View File

@ -22,6 +22,7 @@ from __future__ import print_function
from tensorflow.python.framework import dtypes from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops from tensorflow.python.framework import ops
from tensorflow.python.framework import test_ops from tensorflow.python.framework import test_ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import control_flow_util from tensorflow.python.ops import control_flow_util
@ -32,6 +33,7 @@ from tensorflow.python.platform import test
class ControlFlowUtilTest(test.TestCase): class ControlFlowUtilTest(test.TestCase):
@test_util.run_v1_only("b/120545219")
def testIsSwitch(self): def testIsSwitch(self):
switch_false, _ = control_flow_ops.switch(1, True) switch_false, _ = control_flow_ops.switch(1, True)
switch = switch_false.op switch = switch_false.op
@ -44,6 +46,7 @@ class ControlFlowUtilTest(test.TestCase):
self.assertFalse(control_flow_util.IsSwitch(test_ops.int_output().op)) self.assertFalse(control_flow_util.IsSwitch(test_ops.int_output().op))
@test_util.run_v1_only("b/120545219")
def testIsLoopEnter(self): def testIsLoopEnter(self):
enter = gen_control_flow_ops.enter(1, frame_name="name").op enter = gen_control_flow_ops.enter(1, frame_name="name").op
self.assertTrue(control_flow_util.IsLoopEnter(enter)) self.assertTrue(control_flow_util.IsLoopEnter(enter))
@ -61,6 +64,7 @@ class ControlFlowUtilTest(test.TestCase):
self.assertFalse(control_flow_util.IsLoopEnter(test_ops.int_output().op)) self.assertFalse(control_flow_util.IsLoopEnter(test_ops.int_output().op))
@test_util.run_v1_only("b/120545219")
def testIsLoopExit(self): def testIsLoopExit(self):
exit_op = control_flow_ops.exit(1).op exit_op = control_flow_ops.exit(1).op
self.assertTrue(control_flow_util.IsLoopExit(exit_op)) self.assertTrue(control_flow_util.IsLoopExit(exit_op))

View File

@ -106,7 +106,7 @@ class CTCLossTest(test.TestCase):
with self.assertRaisesOpError(expected_err_re): with self.assertRaisesOpError(expected_err_re):
self.evaluate([loss, grad]) self.evaluate([loss, grad])
@test_util.run_deprecated_v1 @test_util.run_v1_only("b/120545219")
def testBasic(self): def testBasic(self):
"""Test two batch entries.""" """Test two batch entries."""
# Input and ground truth from Alex Graves' implementation. # Input and ground truth from Alex Graves' implementation.
@ -242,7 +242,7 @@ class CTCLossTest(test.TestCase):
self._testCTCLoss(inputs, seq_lens, labels, loss_truth, grad_truth) self._testCTCLoss(inputs, seq_lens, labels, loss_truth, grad_truth)
@test_util.run_deprecated_v1 @test_util.run_v1_only("b/120545219")
def test_time_major(self): def test_time_major(self):
"""Testing time_major param. """Testing time_major param.
@ -272,7 +272,7 @@ class CTCLossTest(test.TestCase):
(tf_loss, tf_loss_transposed) = self.evaluate([loss, loss_transposed]) (tf_loss, tf_loss_transposed) = self.evaluate([loss, loss_transposed])
self.assertAllEqual(tf_loss, tf_loss_transposed) self.assertAllEqual(tf_loss, tf_loss_transposed)
@test_util.run_deprecated_v1 @test_util.run_v1_only("b/120545219")
def testInvalidSecondGradient(self): def testInvalidSecondGradient(self):
inputs = np.random.randn(2, 2, 3).astype(np.float32) inputs = np.random.randn(2, 2, 3).astype(np.float32)
inputs_t = constant_op.constant(inputs) inputs_t = constant_op.constant(inputs)
@ -289,7 +289,7 @@ class CTCLossTest(test.TestCase):
"explicitly disabled"): "explicitly disabled"):
_ = gradients_impl._hessian_vector_product(loss, [inputs_t], v) _ = gradients_impl._hessian_vector_product(loss, [inputs_t], v)
@test_util.run_deprecated_v1 @test_util.run_v1_only("b/120545219")
def testEmptyBatch(self): def testEmptyBatch(self):
inputs = constant_op.constant([], dtype=dtypes.float32, shape=(1, 0, 2)) inputs = constant_op.constant([], dtype=dtypes.float32, shape=(1, 0, 2))
sequence_lengths = constant_op.constant([], dtype=dtypes.int32) sequence_lengths = constant_op.constant([], dtype=dtypes.int32)
@ -306,7 +306,7 @@ class CTCLossTest(test.TestCase):
class CTCLossTestV2(test.TestCase): class CTCLossTestV2(test.TestCase):
@test_util.run_deprecated_v1 @test_util.run_v1_only("b/120545219")
def testCtcLossV2(self): def testCtcLossV2(self):
random_seed.set_random_seed(5) random_seed.set_random_seed(5)
@ -351,7 +351,7 @@ class CTCLossTestV2(test.TestCase):
logit_length=logit_length, logit_length=logit_length,
blank_index=0)) blank_index=0))
@test_util.run_deprecated_v1 @test_util.run_v1_only("b/120545219")
def testCtcLossDenseIsSameAsCtcLoss(self): def testCtcLossDenseIsSameAsCtcLoss(self):
with ops.device("/GPU:0" if test.is_gpu_available() else "/CPU:0"): with ops.device("/GPU:0" if test.is_gpu_available() else "/CPU:0"):
random_seed.set_random_seed(5) random_seed.set_random_seed(5)
@ -405,7 +405,7 @@ class CTCLossTestV2(test.TestCase):
rtol=2e-06, rtol=2e-06,
atol=2e-06) atol=2e-06)
@test_util.run_deprecated_v1 @test_util.run_v1_only("b/120545219")
def testCtcLossDenseUniqueFastPathIsSameAsCtcLoss(self): def testCtcLossDenseUniqueFastPathIsSameAsCtcLoss(self):
random_seed.set_random_seed(5) random_seed.set_random_seed(5)
@ -459,7 +459,7 @@ class CTCLossTestV2(test.TestCase):
rtol=2e-06, rtol=2e-06,
atol=2e-06) atol=2e-06)
@test_util.run_deprecated_v1 @test_util.run_v1_only("b/120545219")
def testCtcLossDenseWithBlankIndexIsSameAsCtcLoss(self): def testCtcLossDenseWithBlankIndexIsSameAsCtcLoss(self):
random_seed.set_random_seed(5) random_seed.set_random_seed(5)
@ -516,7 +516,7 @@ class CTCLossTestV2(test.TestCase):
rtol=2e-06, rtol=2e-06,
atol=2e-06) atol=2e-06)
@test_util.run_deprecated_v1 @test_util.run_v1_only("b/120545219")
def testCtcLossDenseWithNegativeBlankIndexIsSameAsCtcLoss(self): def testCtcLossDenseWithNegativeBlankIndexIsSameAsCtcLoss(self):
with ops.device("/GPU:0" if test.is_gpu_available() else "/CPU:0"): with ops.device("/GPU:0" if test.is_gpu_available() else "/CPU:0"):
random_seed.set_random_seed(5) random_seed.set_random_seed(5)
@ -565,7 +565,7 @@ class CTCLossTestV2(test.TestCase):
rtol=2e-06, rtol=2e-06,
atol=2e-06) atol=2e-06)
@test_util.run_deprecated_v1 @test_util.run_v1_only("b/120545219")
def testCollapseRepeated(self): def testCollapseRepeated(self):
collapsed, new_seq_lengths = ctc_ops.collapse_repeated( collapsed, new_seq_lengths = ctc_ops.collapse_repeated(
labels=[[1, 3, 3, 3, 0], labels=[[1, 3, 3, 3, 0],
@ -579,7 +579,7 @@ class CTCLossTestV2(test.TestCase):
[1, 4, 0, 0], [1, 4, 0, 0],
[4, 2, 9, 4]]) [4, 2, 9, 4]])
@test_util.run_deprecated_v1 @test_util.run_v1_only("b/120545219")
def testCollapseRepeatedPreservesDtypes(self): def testCollapseRepeatedPreservesDtypes(self):
collapsed, new_seq_lengths = ctc_ops.collapse_repeated( collapsed, new_seq_lengths = ctc_ops.collapse_repeated(
labels=constant_op.constant( labels=constant_op.constant(
@ -597,7 +597,7 @@ class CTCLossTestV2(test.TestCase):
[1, 4, 0, 0], [1, 4, 0, 0],
[4, 2, 9, 4]]) [4, 2, 9, 4]])
@test_util.run_deprecated_v1 @test_util.run_v1_only("b/120545219")
def testCollapseRepeatedExtraPadding(self): def testCollapseRepeatedExtraPadding(self):
collapsed, new_seq_lengths = ctc_ops.collapse_repeated( collapsed, new_seq_lengths = ctc_ops.collapse_repeated(
labels=[[1, 3, 3, 3, 0, 0, 0], labels=[[1, 3, 3, 3, 0, 0, 0],
@ -611,7 +611,7 @@ class CTCLossTestV2(test.TestCase):
[1, 4, 0, 0], [1, 4, 0, 0],
[4, 2, 9, 4]]) [4, 2, 9, 4]])
@test_util.run_deprecated_v1 @test_util.run_v1_only("b/120545219")
def testCollapseRepeatedFrontRepeats(self): def testCollapseRepeatedFrontRepeats(self):
collapsed, new_seq_lengths = ctc_ops.collapse_repeated( collapsed, new_seq_lengths = ctc_ops.collapse_repeated(
labels=[[1, 1, 1, 2, 2], labels=[[1, 1, 1, 2, 2],
@ -625,7 +625,7 @@ class CTCLossTestV2(test.TestCase):
[1, 2], [1, 2],
[1, 0]]) [1, 0]])
@test_util.run_deprecated_v1 @test_util.run_v1_only("b/120545219")
def testCollapseRepeatedAllLabelsTheSame(self): def testCollapseRepeatedAllLabelsTheSame(self):
collapsed, new_seq_lengths = ctc_ops.collapse_repeated( collapsed, new_seq_lengths = ctc_ops.collapse_repeated(
labels=[[1, 1, 1, 1, 1], labels=[[1, 1, 1, 1, 1],
@ -658,7 +658,7 @@ class CTCLossTestV2(test.TestCase):
self.assertAllEqual(padded_dense, new_dense) self.assertAllEqual(padded_dense, new_dense)
@test_util.run_deprecated_v1 @test_util.run_v1_only("b/120545219")
def testUnique(self): def testUnique(self):
labels = [ labels = [
[3, 4, 4, 3], [3, 4, 4, 3],
@ -674,7 +674,7 @@ class CTCLossTestV2(test.TestCase):
[0, 0, 0, 1], [0, 0, 0, 1],
], idx) ], idx)
@test_util.run_deprecated_v1 @test_util.run_v1_only("b/120545219")
def testSumStates(self): def testSumStates(self):
idx = [ idx = [
[0, 1, 0, 1], [0, 1, 0, 1],
@ -694,7 +694,7 @@ class CTCLossTestV2(test.TestCase):
[1.8, 0.8, 0.0, 0.0]] [1.8, 0.8, 0.0, 0.0]]
], sum_of_states) ], sum_of_states)
@test_util.run_deprecated_v1 @test_util.run_v1_only("b/120545219")
def testStateToOlabel(self): def testStateToOlabel(self):
labels = [ labels = [
[3, 4, 3, 4], [3, 4, 3, 4],
@ -733,7 +733,7 @@ class CTCLossTestV2(test.TestCase):
[22.0 + 23.0 + 24.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]], [22.0 + 23.0 + 24.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]],
]) ])
@test_util.run_deprecated_v1 @test_util.run_v1_only("b/120545219")
def testStateToOlabelUnique(self): def testStateToOlabelUnique(self):
labels = [ labels = [
[3, 4, 3, 4], [3, 4, 3, 4],

View File

@ -33,7 +33,7 @@ class AssignOpTest(test.TestCase):
# NOTE(mrry): We exclude thess tests from the TSAN TAP target, because they # NOTE(mrry): We exclude thess tests from the TSAN TAP target, because they
# contain benign and deliberate data races when multiple threads update # contain benign and deliberate data races when multiple threads update
# the same parameters without a lock. # the same parameters without a lock.
@test_util.run_deprecated_v1 @test_util.run_v1_only("b/120545219")
def testParallelUpdateWithoutLocking(self): def testParallelUpdateWithoutLocking(self):
with self.cached_session() as sess: with self.cached_session() as sess:
ones_t = array_ops.fill([1024, 1024], 1.0) ones_t = array_ops.fill([1024, 1024], 1.0)
@ -61,7 +61,7 @@ class AssignOpTest(test.TestCase):
self.assertTrue((vals >= ones).all()) self.assertTrue((vals >= ones).all())
self.assertTrue((vals <= ones * 20).all()) self.assertTrue((vals <= ones * 20).all())
@test_util.run_deprecated_v1 @test_util.run_v1_only("b/120545219")
def testParallelAssignWithoutLocking(self): def testParallelAssignWithoutLocking(self):
with self.cached_session() as sess: with self.cached_session() as sess:
ones_t = array_ops.fill([1024, 1024], float(1)) ones_t = array_ops.fill([1024, 1024], float(1))
@ -94,7 +94,7 @@ class AssignOpTest(test.TestCase):
# contain non-benign but known data races between the variable assignment and # contain non-benign but known data races between the variable assignment and
# returning the output tensors. This issue will be resolved with the new # returning the output tensors. This issue will be resolved with the new
# resource variables. # resource variables.
@test_util.run_deprecated_v1 @test_util.run_v1_only("b/120545219")
def testParallelUpdateWithLocking(self): def testParallelUpdateWithLocking(self):
with self.cached_session() as sess: with self.cached_session() as sess:
zeros_t = array_ops.fill([1024, 1024], 0.0) zeros_t = array_ops.fill([1024, 1024], 0.0)
@ -122,7 +122,7 @@ class AssignOpTest(test.TestCase):
ones = np.ones((1024, 1024)).astype(np.float32) ones = np.ones((1024, 1024)).astype(np.float32)
self.assertAllEqual(vals, ones * 20) self.assertAllEqual(vals, ones * 20)
@test_util.run_deprecated_v1 @test_util.run_v1_only("b/120545219")
def testParallelAssignWithLocking(self): def testParallelAssignWithLocking(self):
with self.cached_session() as sess: with self.cached_session() as sess:
zeros_t = array_ops.fill([1024, 1024], 0.0) zeros_t = array_ops.fill([1024, 1024], 0.0)

View File

@ -86,7 +86,7 @@ class AssignOpTest(test.TestCase):
def testBasic(self): def testBasic(self):
self._testTypes(np.arange(0, 20).reshape([4, 5])) self._testTypes(np.arange(0, 20).reshape([4, 5]))
@test_util.run_deprecated_v1 @test_util.run_v1_only("b/120545219")
def testAssignNonStrictShapeChecking(self): def testAssignNonStrictShapeChecking(self):
with self.cached_session(): with self.cached_session():
data = array_ops.fill([1024, 1024], 0) data = array_ops.fill([1024, 1024], 0)
@ -101,7 +101,7 @@ class AssignOpTest(test.TestCase):
a2.op.run() a2.op.run()
self.assertAllEqual(p.eval(), self.evaluate(data2)) self.assertAllEqual(p.eval(), self.evaluate(data2))
@test_util.run_deprecated_v1 @test_util.run_v1_only("b/120545219")
def testInitRequiredAssignAdd(self): def testInitRequiredAssignAdd(self):
with self.cached_session(): with self.cached_session():
p = variables.VariableV1(array_ops.fill([1024, 1024], 1), dtypes.int32) p = variables.VariableV1(array_ops.fill([1024, 1024], 1), dtypes.int32)
@ -109,7 +109,7 @@ class AssignOpTest(test.TestCase):
with self.assertRaisesOpError("use uninitialized"): with self.assertRaisesOpError("use uninitialized"):
a.op.run() a.op.run()
@test_util.run_deprecated_v1 @test_util.run_v1_only("b/120545219")
def testInitRequiredAssignSub(self): def testInitRequiredAssignSub(self):
with self.cached_session(): with self.cached_session():
p = variables.VariableV1(array_ops.fill([1024, 1024], 1), dtypes.int32) p = variables.VariableV1(array_ops.fill([1024, 1024], 1), dtypes.int32)

View File

@ -24,6 +24,7 @@ from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors from tensorflow.python.framework import errors
from tensorflow.python.framework import ops from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import nn_impl from tensorflow.python.ops import nn_impl
@ -185,6 +186,7 @@ class DepthwiseConv2DTest(test.TestCase):
self.assertShapeEqual(native_result, conv_native) self.assertShapeEqual(native_result, conv_native)
self.assertShapeEqual(native_result, conv_interface) self.assertShapeEqual(native_result, conv_interface)
@test_util.run_v1_only("b/120545219")
def testDepthwiseConv2D(self): def testDepthwiseConv2D(self):
for index, (input_size, filter_size, _, stride, for index, (input_size, filter_size, _, stride,
padding) in enumerate(ConfigsToTest()): padding) in enumerate(ConfigsToTest()):
@ -428,6 +430,7 @@ class DepthwiseConv2DTest(test.TestCase):
use_gpu, grouped_conv, err) use_gpu, grouped_conv, err)
self.assertLess(err, tolerance) self.assertLess(err, tolerance)
@test_util.run_v1_only("b/120545219")
def testDepthwiseConv2DInputGrad(self): def testDepthwiseConv2DInputGrad(self):
for index, (input_size, filter_size, output_size, stride, for index, (input_size, filter_size, output_size, stride,
padding) in enumerate(CheckGradConfigsToTest()): padding) in enumerate(CheckGradConfigsToTest()):
@ -477,6 +480,7 @@ class DepthwiseConv2DTest(test.TestCase):
use_gpu=True, use_gpu=True,
data_format="NCHW") data_format="NCHW")
@test_util.run_v1_only("b/120545219")
def testDepthwiseConv2DFilterGrad(self): def testDepthwiseConv2DFilterGrad(self):
for index, (input_size, filter_size, output_size, stride, for index, (input_size, filter_size, output_size, stride,
padding) in enumerate(CheckGradConfigsToTest()): padding) in enumerate(CheckGradConfigsToTest()):

View File

@ -133,6 +133,7 @@ class DeterminantOpTest(test.TestCase):
huge_matrix = np.array([[max_double, 0.0], [0.0, max_double]]) huge_matrix = np.array([[max_double, 0.0], [0.0, max_double]])
self._compareDeterminant(huge_matrix) self._compareDeterminant(huge_matrix)
@test_util.run_v1_only("b/120545219")
def testNonSquareMatrix(self): def testNonSquareMatrix(self):
# When the determinant of a non-square matrix is attempted we should return # When the determinant of a non-square matrix is attempted we should return
# an error # an error
@ -140,6 +141,7 @@ class DeterminantOpTest(test.TestCase):
linalg_ops.matrix_determinant( linalg_ops.matrix_determinant(
np.array([[1., 2., 3.], [3., 5., 4.]]).astype(np.float32)) np.array([[1., 2., 3.], [3., 5., 4.]]).astype(np.float32))
@test_util.run_v1_only("b/120545219")
def testWrongDimensions(self): def testWrongDimensions(self):
# The input to the determinant should be a 2-dimensional tensor. # The input to the determinant should be a 2-dimensional tensor.
tensor1 = constant_op.constant([1., 2.]) tensor1 = constant_op.constant([1., 2.])
@ -150,6 +152,7 @@ class DeterminantOpTest(test.TestCase):
self._compareDeterminant(np.empty([0, 2, 2])) self._compareDeterminant(np.empty([0, 2, 2]))
self._compareDeterminant(np.empty([2, 0, 0])) self._compareDeterminant(np.empty([2, 0, 0]))
@test_util.run_v1_only("b/120545219")
def testConcurrentExecutesWithoutError(self): def testConcurrentExecutesWithoutError(self):
with self.session(use_gpu=True) as sess: with self.session(use_gpu=True) as sess:
matrix1 = random_ops.random_normal([5, 5], seed=42) matrix1 = random_ops.random_normal([5, 5], seed=42)

View File

@ -22,6 +22,7 @@ from tensorflow.python.eager import backprop
from tensorflow.python.framework import constant_op from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes from tensorflow.python.framework import dtypes
from tensorflow.python.framework import tensor_shape from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops from tensorflow.python.ops import math_ops
from tensorflow.python.ops.distributions import multinomial from tensorflow.python.ops.distributions import multinomial
@ -33,6 +34,7 @@ class MultinomialTest(test.TestCase):
def setUp(self): def setUp(self):
self._rng = np.random.RandomState(42) self._rng = np.random.RandomState(42)
@test_util.run_v1_only("b/120545219")
def testSimpleShapes(self): def testSimpleShapes(self):
with self.cached_session(): with self.cached_session():
p = [.1, .3, .6] p = [.1, .3, .6]
@ -42,6 +44,7 @@ class MultinomialTest(test.TestCase):
self.assertEqual(tensor_shape.TensorShape([3]), dist.event_shape) self.assertEqual(tensor_shape.TensorShape([3]), dist.event_shape)
self.assertEqual(tensor_shape.TensorShape([]), dist.batch_shape) self.assertEqual(tensor_shape.TensorShape([]), dist.batch_shape)
@test_util.run_v1_only("b/120545219")
def testComplexShapes(self): def testComplexShapes(self):
with self.cached_session(): with self.cached_session():
p = 0.5 * np.ones([3, 2, 2], dtype=np.float32) p = 0.5 * np.ones([3, 2, 2], dtype=np.float32)
@ -52,6 +55,7 @@ class MultinomialTest(test.TestCase):
self.assertEqual(tensor_shape.TensorShape([2]), dist.event_shape) self.assertEqual(tensor_shape.TensorShape([2]), dist.event_shape)
self.assertEqual(tensor_shape.TensorShape([3, 2]), dist.batch_shape) self.assertEqual(tensor_shape.TensorShape([3, 2]), dist.batch_shape)
@test_util.run_v1_only("b/120545219")
def testN(self): def testN(self):
p = [[0.1, 0.2, 0.7], [0.2, 0.3, 0.5]] p = [[0.1, 0.2, 0.7], [0.2, 0.3, 0.5]]
n = [[3.], [4]] n = [[3.], [4]]
@ -60,6 +64,7 @@ class MultinomialTest(test.TestCase):
self.assertEqual((2, 1), dist.total_count.get_shape()) self.assertEqual((2, 1), dist.total_count.get_shape())
self.assertAllClose(n, dist.total_count.eval()) self.assertAllClose(n, dist.total_count.eval())
@test_util.run_v1_only("b/120545219")
def testP(self): def testP(self):
p = [[0.1, 0.2, 0.7]] p = [[0.1, 0.2, 0.7]]
with self.cached_session(): with self.cached_session():
@ -68,6 +73,7 @@ class MultinomialTest(test.TestCase):
self.assertEqual((1, 3), dist.logits.get_shape()) self.assertEqual((1, 3), dist.logits.get_shape())
self.assertAllClose(p, dist.probs.eval()) self.assertAllClose(p, dist.probs.eval())
@test_util.run_v1_only("b/120545219")
def testLogits(self): def testLogits(self):
p = np.array([[0.1, 0.2, 0.7]], dtype=np.float32) p = np.array([[0.1, 0.2, 0.7]], dtype=np.float32)
logits = np.log(p) - 50. logits = np.log(p) - 50.
@ -78,6 +84,7 @@ class MultinomialTest(test.TestCase):
self.assertAllClose(p, multinom.probs.eval()) self.assertAllClose(p, multinom.probs.eval())
self.assertAllClose(logits, multinom.logits.eval()) self.assertAllClose(logits, multinom.logits.eval())
@test_util.run_v1_only("b/120545219")
def testPmfUnderflow(self): def testPmfUnderflow(self):
logits = np.array([[-200, 0]], dtype=np.float32) logits = np.array([[-200, 0]], dtype=np.float32)
with self.cached_session(): with self.cached_session():
@ -85,6 +92,7 @@ class MultinomialTest(test.TestCase):
lp = dist.log_prob([1., 0.]).eval()[0] lp = dist.log_prob([1., 0.]).eval()[0]
self.assertAllClose(-200, lp, atol=0, rtol=1e-6) self.assertAllClose(-200, lp, atol=0, rtol=1e-6)
@test_util.run_v1_only("b/120545219")
def testPmfandCountsAgree(self): def testPmfandCountsAgree(self):
p = [[0.1, 0.2, 0.7]] p = [[0.1, 0.2, 0.7]]
n = [[5.]] n = [[5.]]
@ -97,6 +105,7 @@ class MultinomialTest(test.TestCase):
with self.assertRaisesOpError("counts must sum to `self.total_count`"): with self.assertRaisesOpError("counts must sum to `self.total_count`"):
dist.prob([3., 3, 0]).eval() dist.prob([3., 3, 0]).eval()
@test_util.run_v1_only("b/120545219")
def testPmfNonIntegerCounts(self): def testPmfNonIntegerCounts(self):
p = [[0.1, 0.2, 0.7]] p = [[0.1, 0.2, 0.7]]
n = [[5.]] n = [[5.]]
@ -157,6 +166,7 @@ class MultinomialTest(test.TestCase):
self.assertAllClose([0.1, 0.9], self.evaluate(pmf)) self.assertAllClose([0.1, 0.9], self.evaluate(pmf))
self.assertEqual((2), pmf.get_shape()) self.assertEqual((2), pmf.get_shape())
@test_util.run_v1_only("b/120545219")
def testPmfCountsStretchedInBroadcastWhenSameRank(self): def testPmfCountsStretchedInBroadcastWhenSameRank(self):
with self.cached_session(): with self.cached_session():
p = [[0.1, 0.9], [0.7, 0.3]] p = [[0.1, 0.9], [0.7, 0.3]]
@ -165,6 +175,7 @@ class MultinomialTest(test.TestCase):
self.assertAllClose(pmf.eval(), [0.1, 0.7]) self.assertAllClose(pmf.eval(), [0.1, 0.7])
self.assertEqual((2), pmf.get_shape()) self.assertEqual((2), pmf.get_shape())
@test_util.run_v1_only("b/120545219")
def testPmfCountsStretchedInBroadcastWhenLowerRank(self): def testPmfCountsStretchedInBroadcastWhenLowerRank(self):
with self.cached_session(): with self.cached_session():
p = [[0.1, 0.9], [0.7, 0.3]] p = [[0.1, 0.9], [0.7, 0.3]]
@ -194,6 +205,7 @@ class MultinomialTest(test.TestCase):
self.evaluate(pmf) self.evaluate(pmf)
self.assertEqual((4, 3), pmf.get_shape()) self.assertEqual((4, 3), pmf.get_shape())
@test_util.run_v1_only("b/120545219")
def testMultinomialMean(self): def testMultinomialMean(self):
with self.cached_session(): with self.cached_session():
n = 5. n = 5.
@ -203,6 +215,7 @@ class MultinomialTest(test.TestCase):
self.assertEqual((3,), dist.mean().get_shape()) self.assertEqual((3,), dist.mean().get_shape())
self.assertAllClose(expected_means, dist.mean().eval()) self.assertAllClose(expected_means, dist.mean().eval())
@test_util.run_v1_only("b/120545219")
def testMultinomialCovariance(self): def testMultinomialCovariance(self):
with self.cached_session(): with self.cached_session():
n = 5. n = 5.
@ -214,6 +227,7 @@ class MultinomialTest(test.TestCase):
self.assertEqual((3, 3), dist.covariance().get_shape()) self.assertEqual((3, 3), dist.covariance().get_shape())
self.assertAllClose(expected_covariances, dist.covariance().eval()) self.assertAllClose(expected_covariances, dist.covariance().eval())
@test_util.run_v1_only("b/120545219")
def testMultinomialCovarianceBatch(self): def testMultinomialCovarianceBatch(self):
with self.cached_session(): with self.cached_session():
# Shape [2] # Shape [2]
@ -246,6 +260,7 @@ class MultinomialTest(test.TestCase):
self.assertEqual((3, 5, 4, 4), covariance.get_shape()) self.assertEqual((3, 5, 4, 4), covariance.get_shape())
self.assertEqual((6, 3, 3, 3), covariance2.get_shape()) self.assertEqual((6, 3, 3, 3), covariance2.get_shape())
@test_util.run_v1_only("b/120545219")
def testCovarianceFromSampling(self): def testCovarianceFromSampling(self):
# We will test mean, cov, var, stddev on a DirichletMultinomial constructed # We will test mean, cov, var, stddev on a DirichletMultinomial constructed
# via broadcast between alpha, n. # via broadcast between alpha, n.
@ -288,6 +303,7 @@ class MultinomialTest(test.TestCase):
self.assertAllClose(sample_var_, analytic_var, atol=0.01, rtol=0.01) self.assertAllClose(sample_var_, analytic_var, atol=0.01, rtol=0.01)
self.assertAllClose(sample_stddev_, analytic_stddev, atol=0.01, rtol=0.01) self.assertAllClose(sample_stddev_, analytic_stddev, atol=0.01, rtol=0.01)
@test_util.run_v1_only("b/120545219")
def testSampleUnbiasedNonScalarBatch(self): def testSampleUnbiasedNonScalarBatch(self):
with self.cached_session() as sess: with self.cached_session() as sess:
dist = multinomial.Multinomial( dist = multinomial.Multinomial(
@ -317,6 +333,7 @@ class MultinomialTest(test.TestCase):
self.assertAllClose( self.assertAllClose(
actual_covariance_, sample_covariance_, atol=0., rtol=0.20) actual_covariance_, sample_covariance_, atol=0., rtol=0.20)
@test_util.run_v1_only("b/120545219")
def testSampleUnbiasedScalarBatch(self): def testSampleUnbiasedScalarBatch(self):
with self.cached_session() as sess: with self.cached_session() as sess:
dist = multinomial.Multinomial( dist = multinomial.Multinomial(

View File

@ -39,6 +39,7 @@ from tensorflow.python.platform import test
from tensorflow.python.util import compat from tensorflow.python.util import compat
@test_util.run_v1_only("b/120545219")
class FIFOQueueTest(test.TestCase): class FIFOQueueTest(test.TestCase):
def testConstructor(self): def testConstructor(self):
@ -1423,6 +1424,7 @@ class FIFOQueueTest(test.TestCase):
session.run([a, c]) session.run([a, c])
@test_util.run_v1_only("b/120545219")
class FIFOQueueDictTest(test.TestCase): class FIFOQueueDictTest(test.TestCase):
def testConstructor(self): def testConstructor(self):
@ -1583,6 +1585,7 @@ class FIFOQueueDictTest(test.TestCase):
self.assertTrue([compat.as_bytes("dd"), compat.as_bytes("ee")], list(s)) self.assertTrue([compat.as_bytes("dd"), compat.as_bytes("ee")], list(s))
@test_util.run_v1_only("b/120545219")
class FIFOQueueWithTimeoutTest(test.TestCase): class FIFOQueueWithTimeoutTest(test.TestCase):
def testDequeueWithTimeout(self): def testDequeueWithTimeout(self):
@ -1617,6 +1620,7 @@ class FIFOQueueWithTimeoutTest(test.TestCase):
self.assertEqual(37, self.evaluate(dequeued_t)) self.assertEqual(37, self.evaluate(dequeued_t))
@test_util.run_v1_only("b/120545219")
class QueueContainerTest(test.TestCase): class QueueContainerTest(test.TestCase):
def testContainer(self): def testContainer(self):

View File

@ -494,7 +494,7 @@ class FunctionalOpsTest(test.TestCase):
@test_util.disable_control_flow_v2("b/119323354") @test_util.disable_control_flow_v2("b/119323354")
@test_util.run_in_graph_and_eager_modes @test_util.run_in_graph_and_eager_modes
@test_util.run_deprecated_v1 @test_util.run_v1_only("b/120545219")
def testMapEmptyScalar(self): def testMapEmptyScalar(self):
map_return = functional_ops.map_fn(lambda x: 1, constant_op.constant([])) map_return = functional_ops.map_fn(lambda x: 1, constant_op.constant([]))
self.assertAllEqual([0], map_return.get_shape().dims) self.assertAllEqual([0], map_return.get_shape().dims)
@ -503,7 +503,7 @@ class FunctionalOpsTest(test.TestCase):
# TODO(akshayka): this test fails in eager: the iterable is of length 0 so # TODO(akshayka): this test fails in eager: the iterable is of length 0 so
# so the body of the while loop never executes # so the body of the while loop never executes
@test_util.disable_control_flow_v2("b/119323354") @test_util.disable_control_flow_v2("b/119323354")
@test_util.run_deprecated_v1 @test_util.run_v1_only("b/120545219")
def testMapEmptyTensor(self): def testMapEmptyTensor(self):
with self.cached_session(): with self.cached_session():
map_return = functional_ops.map_fn(lambda x: array_ops.zeros([3, 2]), map_return = functional_ops.map_fn(lambda x: array_ops.zeros([3, 2]),
@ -797,7 +797,7 @@ class FunctionalOpsTest(test.TestCase):
self.assertAllEqual(Run(100., False), 5050.) self.assertAllEqual(Run(100., False), 5050.)
self.assertAllEqual(Run(100., True), 5050.) self.assertAllEqual(Run(100., True), 5050.)
@test_util.run_deprecated_v1 @test_util.run_v1_only("b/120545219")
def testWhileError(self): def testWhileError(self):
for use_gpu in (True, False): for use_gpu in (True, False):
with ops.Graph().as_default() as g: with ops.Graph().as_default() as g:
@ -1027,7 +1027,7 @@ class FunctionalOpsTest(test.TestCase):
def testForMLPWhile(self): def testForMLPWhile(self):
self._testForMLP(True) self._testForMLP(True)
@test_util.run_deprecated_v1 @test_util.run_v1_only("b/120545219")
def testForError(self): def testForError(self):
@function.Defun(dtypes.int32, dtypes.float32) @function.Defun(dtypes.int32, dtypes.float32)
@ -1233,7 +1233,7 @@ class PartitionedCallTest(test.TestCase):
self.assertAllEqual(expected, result) self.assertAllEqual(expected, result)
# Use an invalid executor name to test the plumbing of the executor_type attr. # Use an invalid executor name to test the plumbing of the executor_type attr.
@test_util.run_deprecated_v1 @test_util.run_v1_only("b/120545219")
def testExecutorTypeAttrExecutorNotFound(self): def testExecutorTypeAttrExecutorNotFound(self):
@function.Defun(dtypes.int32) @function.Defun(dtypes.int32)
def AddFive(x): def AddFive(x):

View File

@ -62,7 +62,7 @@ class IdentityOpTest(test.TestCase):
self.assertEquals(shape, self.assertEquals(shape,
array_ops.identity(np.array(array_2x3)).get_shape()) array_ops.identity(np.array(array_2x3)).get_shape())
@test_util.run_deprecated_v1 @test_util.run_v1_only("b/120545219")
def testRefIdentityShape(self): def testRefIdentityShape(self):
with self.cached_session(): with self.cached_session():
shape = [2, 3] shape = [2, 3]

View File

@ -214,7 +214,7 @@ class LinearOperatorTest(test.TestCase):
operator = LinearOperatorMatmulSolve(matrix, is_square=True) operator = LinearOperatorMatmulSolve(matrix, is_square=True)
self.assertTrue(operator.is_square) self.assertTrue(operator.is_square)
@test_util.run_deprecated_v1 @test_util.run_v1_only("b/120545219")
def test_linear_operator_matmul_hints_closed(self): def test_linear_operator_matmul_hints_closed(self):
matrix = array_ops.placeholder(dtypes.float32) matrix = array_ops.placeholder(dtypes.float32)
operator1 = LinearOperatorMatmulSolve(matrix) operator1 = LinearOperatorMatmulSolve(matrix)
@ -241,7 +241,7 @@ class LinearOperatorTest(test.TestCase):
self.assertTrue(operator_matmul.is_self_adjoint) self.assertTrue(operator_matmul.is_self_adjoint)
self.assertEqual(None, operator_matmul.is_positive_definite) self.assertEqual(None, operator_matmul.is_positive_definite)
@test_util.run_deprecated_v1 @test_util.run_v1_only("b/120545219")
def test_linear_operator_matmul_hints_false(self): def test_linear_operator_matmul_hints_false(self):
matrix = array_ops.placeholder(dtypes.float32) matrix = array_ops.placeholder(dtypes.float32)
operator1 = LinearOperatorMatmulSolve( operator1 = LinearOperatorMatmulSolve(
@ -274,7 +274,7 @@ class LinearOperatorTest(test.TestCase):
self.assertEqual(None, operator_matmul.is_self_adjoint) self.assertEqual(None, operator_matmul.is_self_adjoint)
self.assertEqual(None, operator_matmul.is_positive_definite) self.assertEqual(None, operator_matmul.is_positive_definite)
@test_util.run_deprecated_v1 @test_util.run_v1_only("b/120545219")
def test_linear_operator_matmul_hint_infer_square(self): def test_linear_operator_matmul_hint_infer_square(self):
matrix1 = array_ops.placeholder(shape=[2, 3], dtype=dtypes.float32) matrix1 = array_ops.placeholder(shape=[2, 3], dtype=dtypes.float32)
matrix2 = array_ops.placeholder(shape=[3, 2], dtype=dtypes.float32) matrix2 = array_ops.placeholder(shape=[3, 2], dtype=dtypes.float32)

View File

@ -61,6 +61,7 @@ class MatrixUnaryFunctorGradientTest(test_lib.TestCase):
def _GetMatrixUnaryFunctorGradientTest(functor_, dtype_, shape_, **kwargs_): def _GetMatrixUnaryFunctorGradientTest(functor_, dtype_, shape_, **kwargs_):
@test_util.run_v1_only('b/120545219')
def Test(self): def Test(self):
with self.session(use_gpu=True): with self.session(use_gpu=True):
np.random.seed(1) np.random.seed(1)
@ -103,6 +104,7 @@ def _GetMatrixBinaryFunctorGradientTest(functor_,
float32_tol_fudge=1.0, float32_tol_fudge=1.0,
**kwargs_): **kwargs_):
@test_util.run_v1_only('b/120545219')
def Test(self): def Test(self):
# TODO(rmlarsen): Debug illegal address bug on CUDA and re-enable # TODO(rmlarsen): Debug illegal address bug on CUDA and re-enable
# GPU test for matrix_solve. # GPU test for matrix_solve.

View File

@ -51,26 +51,26 @@ class AbsoluteDifferenceLossTest(test.TestCase):
losses.absolute_difference( losses.absolute_difference(
self._predictions, self._predictions, weights=None) self._predictions, self._predictions, weights=None)
@test_util.run_deprecated_v1 @test_util.run_v1_only("b/120545219")
def testAllCorrectNoLossWeight(self): def testAllCorrectNoLossWeight(self):
loss = losses.absolute_difference(self._predictions, self._predictions) loss = losses.absolute_difference(self._predictions, self._predictions)
with self.cached_session(): with self.cached_session():
self.assertAlmostEqual(0.0, self.evaluate(loss), 3) self.assertAlmostEqual(0.0, self.evaluate(loss), 3)
@test_util.run_deprecated_v1 @test_util.run_v1_only("b/120545219")
def testNonZeroLoss(self): def testNonZeroLoss(self):
loss = losses.absolute_difference(self._labels, self._predictions) loss = losses.absolute_difference(self._labels, self._predictions)
with self.cached_session(): with self.cached_session():
self.assertAlmostEqual(5.5, self.evaluate(loss), 3) self.assertAlmostEqual(5.5, self.evaluate(loss), 3)
@test_util.run_deprecated_v1 @test_util.run_v1_only("b/120545219")
def testNonZeroLossWithPythonScalarWeight(self): def testNonZeroLossWithPythonScalarWeight(self):
weights = 2.3 weights = 2.3
loss = losses.absolute_difference(self._labels, self._predictions, weights) loss = losses.absolute_difference(self._labels, self._predictions, weights)
with self.cached_session(): with self.cached_session():
self.assertAlmostEqual(5.5 * weights, self.evaluate(loss), 3) self.assertAlmostEqual(5.5 * weights, self.evaluate(loss), 3)
@test_util.run_deprecated_v1 @test_util.run_v1_only("b/120545219")
def testNonZeroLossWithScalarTensorWeight(self): def testNonZeroLossWithScalarTensorWeight(self):
weights = 2.3 weights = 2.3
loss = losses.absolute_difference(self._labels, self._predictions, loss = losses.absolute_difference(self._labels, self._predictions,
@ -148,7 +148,7 @@ class SoftmaxCrossEntropyLossTest(test.TestCase):
self.assertEquals(loss.op.name, 'softmax_cross_entropy_loss/value') self.assertEquals(loss.op.name, 'softmax_cross_entropy_loss/value')
self.assertAlmostEqual(loss.eval(), 10.0, 3) self.assertAlmostEqual(loss.eval(), 10.0, 3)
@test_util.run_deprecated_v1 @test_util.run_v1_only("b/120545219")
def testNonZeroLossWithPythonScalarWeight(self): def testNonZeroLossWithPythonScalarWeight(self):
logits = constant_op.constant([[10.0, 0.0, 0.0], [0.0, 10.0, 0.0], logits = constant_op.constant([[10.0, 0.0, 0.0], [0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]]) [0.0, 0.0, 10.0]])
@ -158,7 +158,7 @@ class SoftmaxCrossEntropyLossTest(test.TestCase):
loss = losses.softmax_cross_entropy(labels, logits, weights) loss = losses.softmax_cross_entropy(labels, logits, weights)
self.assertAlmostEqual(weights * 10.0, self.evaluate(loss), 3) self.assertAlmostEqual(weights * 10.0, self.evaluate(loss), 3)
@test_util.run_deprecated_v1 @test_util.run_v1_only("b/120545219")
def testNonZeroLossWithScalarTensorWeight(self): def testNonZeroLossWithScalarTensorWeight(self):
logits = constant_op.constant([[10.0, 0.0, 0.0], [0.0, 10.0, 0.0], logits = constant_op.constant([[10.0, 0.0, 0.0], [0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]]) [0.0, 0.0, 10.0]])
@ -311,7 +311,7 @@ class SparseSoftmaxCrossEntropyLossTest(test.TestCase):
self.assertEquals(loss.op.name, 'sparse_softmax_cross_entropy_loss/value') self.assertEquals(loss.op.name, 'sparse_softmax_cross_entropy_loss/value')
self.assertAlmostEqual(loss.eval(), 10.0, 3) self.assertAlmostEqual(loss.eval(), 10.0, 3)
@test_util.run_deprecated_v1 @test_util.run_v1_only("b/120545219")
def testNonZeroLossWithPythonScalarWeight(self): def testNonZeroLossWithPythonScalarWeight(self):
logits = constant_op.constant([[10.0, 0.0, 0.0], [0.0, 10.0, 0.0], logits = constant_op.constant([[10.0, 0.0, 0.0], [0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]]) [0.0, 0.0, 10.0]])
@ -321,7 +321,7 @@ class SparseSoftmaxCrossEntropyLossTest(test.TestCase):
loss = losses.sparse_softmax_cross_entropy(labels, logits, weights) loss = losses.sparse_softmax_cross_entropy(labels, logits, weights)
self.assertAlmostEqual(weights * 10.0, self.evaluate(loss), 3) self.assertAlmostEqual(weights * 10.0, self.evaluate(loss), 3)
@test_util.run_deprecated_v1 @test_util.run_v1_only("b/120545219")
def testNonZeroLossWithScalarTensorWeight(self): def testNonZeroLossWithScalarTensorWeight(self):
logits = constant_op.constant([[10.0, 0.0, 0.0], [0.0, 10.0, 0.0], logits = constant_op.constant([[10.0, 0.0, 0.0], [0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]]) [0.0, 0.0, 10.0]])
@ -677,13 +677,13 @@ class LogLossTest(test.TestCase):
with self.assertRaises(ValueError): with self.assertRaises(ValueError):
losses.log_loss(self._labels, self._labels, weights=None) losses.log_loss(self._labels, self._labels, weights=None)
@test_util.run_deprecated_v1 @test_util.run_v1_only("b/120545219")
def testAllCorrectNoLossWeight(self): def testAllCorrectNoLossWeight(self):
loss = losses.log_loss(self._labels, self._labels) loss = losses.log_loss(self._labels, self._labels)
with self.cached_session(): with self.cached_session():
self.assertAlmostEqual(0.0, self.evaluate(loss), 3) self.assertAlmostEqual(0.0, self.evaluate(loss), 3)
@test_util.run_deprecated_v1 @test_util.run_v1_only("b/120545219")
def testAllCorrectNoLossWeightWithPlaceholder(self): def testAllCorrectNoLossWeightWithPlaceholder(self):
tf_predictions = array_ops.placeholder( tf_predictions = array_ops.placeholder(
dtypes.float32, shape=self._np_labels.shape) dtypes.float32, shape=self._np_labels.shape)
@ -692,14 +692,14 @@ class LogLossTest(test.TestCase):
self.assertAlmostEqual( self.assertAlmostEqual(
0.0, loss.eval(feed_dict={tf_predictions: self._np_labels}), 3) 0.0, loss.eval(feed_dict={tf_predictions: self._np_labels}), 3)
@test_util.run_deprecated_v1 @test_util.run_v1_only("b/120545219")
def testNonZeroLoss(self): def testNonZeroLoss(self):
loss = losses.log_loss(self._labels, self._predictions) loss = losses.log_loss(self._labels, self._predictions)
with self.cached_session(): with self.cached_session():
self.assertAlmostEqual(-np.sum(self._expected_losses) / 6.0, self.assertAlmostEqual(-np.sum(self._expected_losses) / 6.0,
self.evaluate(loss), 3) self.evaluate(loss), 3)
@test_util.run_deprecated_v1 @test_util.run_v1_only("b/120545219")
def testNonZeroLossWithPythonScalarWeight(self): def testNonZeroLossWithPythonScalarWeight(self):
weights = 2.3 weights = 2.3
loss = losses.log_loss(self._labels, self._predictions, weights) loss = losses.log_loss(self._labels, self._predictions, weights)
@ -707,7 +707,7 @@ class LogLossTest(test.TestCase):
self.assertAlmostEqual(weights * -np.sum(self._expected_losses) / 6.0, self.assertAlmostEqual(weights * -np.sum(self._expected_losses) / 6.0,
self.evaluate(loss), 3) self.evaluate(loss), 3)
@test_util.run_deprecated_v1 @test_util.run_v1_only("b/120545219")
def testNonZeroLossWithScalarTensorWeight(self): def testNonZeroLossWithScalarTensorWeight(self):
weights = 2.3 weights = 2.3
loss = losses.log_loss(self._labels, self._predictions, loss = losses.log_loss(self._labels, self._predictions,
@ -716,7 +716,7 @@ class LogLossTest(test.TestCase):
self.assertAlmostEqual(weights * -np.sum(self._expected_losses) / 6.0, self.assertAlmostEqual(weights * -np.sum(self._expected_losses) / 6.0,
self.evaluate(loss), 3) self.evaluate(loss), 3)
@test_util.run_deprecated_v1 @test_util.run_v1_only("b/120545219")
def testNonZeroLossWithScalarTensorWeightAndPlaceholder(self): def testNonZeroLossWithScalarTensorWeightAndPlaceholder(self):
tf_predictions = array_ops.placeholder( tf_predictions = array_ops.placeholder(
dtypes.float32, shape=self._np_predictions.shape) dtypes.float32, shape=self._np_predictions.shape)
@ -728,7 +728,7 @@ class LogLossTest(test.TestCase):
self.assertAlmostEqual(weights * -np.sum(self._expected_losses) / 6.0, self.assertAlmostEqual(weights * -np.sum(self._expected_losses) / 6.0,
loss, 3) loss, 3)
@test_util.run_deprecated_v1 @test_util.run_v1_only("b/120545219")
def testNonZeroLossWithScalarTensorWeightAndPlaceholderWithRankOnly(self): def testNonZeroLossWithScalarTensorWeightAndPlaceholderWithRankOnly(self):
tf_predictions = array_ops.placeholder(dtypes.float32, shape=[None, None]) tf_predictions = array_ops.placeholder(dtypes.float32, shape=[None, None])
weights = 2.3 weights = 2.3
@ -788,7 +788,7 @@ class LogLossTest(test.TestCase):
self.assertAlmostEqual(-np.sum(expected_losses) / 5.0, self.assertAlmostEqual(-np.sum(expected_losses) / 5.0,
self.evaluate(loss), 3) self.evaluate(loss), 3)
@test_util.run_deprecated_v1 @test_util.run_v1_only("b/120545219")
def testNonZeroLossWithMeasurementSpecificWeightsWithPlaceholder(self): def testNonZeroLossWithMeasurementSpecificWeightsWithPlaceholder(self):
weights = np.array([3, 6, 5, 0, 4, 2]).reshape((2, 3)) weights = np.array([3, 6, 5, 0, 4, 2]).reshape((2, 3))
expected_losses = np.multiply(self._expected_losses, weights) expected_losses = np.multiply(self._expected_losses, weights)
@ -816,7 +816,7 @@ class LogLossTest(test.TestCase):
with self.cached_session(): with self.cached_session():
self.assertAlmostEqual(-np.sum(expected_losses), self.evaluate(loss), 3) self.assertAlmostEqual(-np.sum(expected_losses), self.evaluate(loss), 3)
@test_util.run_deprecated_v1 @test_util.run_v1_only("b/120545219")
def testNonZeroLossWithSampleSpecificWeightsMostZeroWithPlaceholder(self): def testNonZeroLossWithSampleSpecificWeightsMostZeroWithPlaceholder(self):
weights = np.array([0, 0, 0, 0, 0, 2]).reshape((2, 3)) weights = np.array([0, 0, 0, 0, 0, 2]).reshape((2, 3))
expected_losses = np.multiply(self._expected_losses, weights) expected_losses = np.multiply(self._expected_losses, weights)
@ -955,26 +955,26 @@ class MeanSquaredErrorTest(test.TestCase):
losses.mean_squared_error(predictions=constant_op.constant(0), losses.mean_squared_error(predictions=constant_op.constant(0),
labels=constant_op.constant(0)).eval()) labels=constant_op.constant(0)).eval())
@test_util.run_deprecated_v1 @test_util.run_v1_only("b/120545219")
def testAllCorrectNoLossWeight(self): def testAllCorrectNoLossWeight(self):
loss = losses.mean_squared_error(self._predictions, self._predictions) loss = losses.mean_squared_error(self._predictions, self._predictions)
with self.cached_session(): with self.cached_session():
self.assertAlmostEqual(0.0, self.evaluate(loss), 3) self.assertAlmostEqual(0.0, self.evaluate(loss), 3)
@test_util.run_deprecated_v1 @test_util.run_v1_only("b/120545219")
def testNonZeroLoss(self): def testNonZeroLoss(self):
loss = losses.mean_squared_error(self._labels, self._predictions) loss = losses.mean_squared_error(self._labels, self._predictions)
with self.cached_session(): with self.cached_session():
self.assertAlmostEqual(49.5, self.evaluate(loss), 3) self.assertAlmostEqual(49.5, self.evaluate(loss), 3)
@test_util.run_deprecated_v1 @test_util.run_v1_only("b/120545219")
def testNonZeroLossWithPythonScalarWeight(self): def testNonZeroLossWithPythonScalarWeight(self):
weights = 2.3 weights = 2.3
loss = losses.mean_squared_error(self._labels, self._predictions, weights) loss = losses.mean_squared_error(self._labels, self._predictions, weights)
with self.cached_session(): with self.cached_session():
self.assertAlmostEqual(49.5 * weights, self.evaluate(loss), 3) self.assertAlmostEqual(49.5 * weights, self.evaluate(loss), 3)
@test_util.run_deprecated_v1 @test_util.run_v1_only("b/120545219")
def testNonZeroLossWithScalarTensorWeight(self): def testNonZeroLossWithScalarTensorWeight(self):
weights = 2.3 weights = 2.3
loss = losses.mean_squared_error(self._labels, self._predictions, loss = losses.mean_squared_error(self._labels, self._predictions,
@ -1068,12 +1068,12 @@ class MeanPairwiseSquaredErrorTest(test.TestCase):
self.assertAlmostEqual( self.assertAlmostEqual(
expected_loss, dynamic_inputs_op.eval(feed_dict=feed_dict), places=3) expected_loss, dynamic_inputs_op.eval(feed_dict=feed_dict), places=3)
@test_util.run_deprecated_v1 @test_util.run_v1_only("b/120545219")
def testAllCorrectNoLossWeight(self): def testAllCorrectNoLossWeight(self):
self._test_valid_weights( self._test_valid_weights(
self._labels, self._labels, expected_loss=0.0) self._labels, self._labels, expected_loss=0.0)
@test_util.run_deprecated_v1 @test_util.run_v1_only("b/120545219")
def testNonZeroLoss(self): def testNonZeroLoss(self):
self._test_valid_weights( self._test_valid_weights(
self._labels, self._predictions, self._labels, self._predictions,
@ -1104,7 +1104,7 @@ class MeanPairwiseSquaredErrorTest(test.TestCase):
np_grad = self.evaluate(grad) np_grad = self.evaluate(grad)
self.assertFalse(np.isnan(np_grad).any()) self.assertFalse(np.isnan(np_grad).any())
@test_util.run_deprecated_v1 @test_util.run_v1_only("b/120545219")
def testNonZeroLossWithPythonScalarWeight(self): def testNonZeroLossWithPythonScalarWeight(self):
weight = 2.3 weight = 2.3
self._test_valid_weights( self._test_valid_weights(
@ -1112,7 +1112,7 @@ class MeanPairwiseSquaredErrorTest(test.TestCase):
expected_loss=weight * np.sum(self._expected_losses), expected_loss=weight * np.sum(self._expected_losses),
weights=weight) weights=weight)
@test_util.run_deprecated_v1 @test_util.run_v1_only("b/120545219")
def testNonZeroLossWithScalarTensorWeight(self): def testNonZeroLossWithScalarTensorWeight(self):
weights = 2.3 weights = 2.3
loss = losses.mean_pairwise_squared_error( loss = losses.mean_pairwise_squared_error(
@ -1179,7 +1179,7 @@ class MeanPairwiseSquaredErrorTest(test.TestCase):
weights_placeholder: weights, weights_placeholder: weights,
}) })
@test_util.run_deprecated_v1 @test_util.run_v1_only("b/120545219")
def testInvalid3dWeighted2x0(self): def testInvalid3dWeighted2x0(self):
labels = np.array([ labels = np.array([
[[1, 9, 2], [12, 11, 10], [9, 8, 7]], [[1, 9, 2], [12, 11, 10], [9, 8, 7]],

View File

@ -23,6 +23,7 @@ from tensorflow.python.client import session
from tensorflow.python.framework import constant_op from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes as dtypes_lib from tensorflow.python.framework import dtypes as dtypes_lib
from tensorflow.python.framework import ops from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gradient_checker from tensorflow.python.ops import gradient_checker
@ -44,6 +45,7 @@ class MatrixBandPartTest(test_lib.TestCase):
def _GetMatrixBandPartTest(dtype_, batch_shape_, shape_): def _GetMatrixBandPartTest(dtype_, batch_shape_, shape_):
@test_util.run_v1_only("b/120545219")
def Test(self): def Test(self):
mat = np.ones(shape_).astype(dtype_) mat = np.ones(shape_).astype(dtype_)
batch_mat = np.tile(mat, batch_shape_ + (1, 1)) batch_mat = np.tile(mat, batch_shape_ + (1, 1))
@ -73,6 +75,7 @@ class MatrixBandPartGradTest(test_lib.TestCase):
def _GetMatrixBandPartGradTest(dtype_, batch_shape_, shape_): def _GetMatrixBandPartGradTest(dtype_, batch_shape_, shape_):
@test_util.run_v1_only("b/120545219")
def Test(self): def Test(self):
shape = batch_shape_ + shape_ shape = batch_shape_ + shape_
x = constant_op.constant(np.random.rand(*shape), dtype=dtype_) x = constant_op.constant(np.random.rand(*shape), dtype=dtype_)

View File

@ -84,6 +84,7 @@ class LogarithmOpTest(test.TestCase):
# Complex batch # Complex batch
self._verifyLogarithmComplex(self._makeBatch(matrix1, matrix2)) self._verifyLogarithmComplex(self._makeBatch(matrix1, matrix2))
@test_util.run_v1_only("b/120545219")
def testNonSquareMatrix(self): def testNonSquareMatrix(self):
# When the logarithm of a non-square matrix is attempted we should return # When the logarithm of a non-square matrix is attempted we should return
# an error # an error
@ -91,6 +92,7 @@ class LogarithmOpTest(test.TestCase):
gen_linalg_ops.matrix_logarithm( gen_linalg_ops.matrix_logarithm(
np.array([[1., 2., 3.], [3., 4., 5.]], dtype=np.complex64)) np.array([[1., 2., 3.], [3., 4., 5.]], dtype=np.complex64))
@test_util.run_v1_only("b/120545219")
def testWrongDimensions(self): def testWrongDimensions(self):
# The input to the logarithm should be at least a 2-dimensional tensor. # The input to the logarithm should be at least a 2-dimensional tensor.
tensor3 = constant_op.constant([1., 2.], dtype=dtypes.complex64) tensor3 = constant_op.constant([1., 2.], dtype=dtypes.complex64)
@ -121,6 +123,7 @@ class LogarithmOpTest(test.TestCase):
size=np.prod(shape)).reshape(shape).astype(np.complex128) size=np.prod(shape)).reshape(shape).astype(np.complex128)
self._verifyLogarithmComplex(matrix) self._verifyLogarithmComplex(matrix)
@test_util.run_v1_only("b/120545219")
def testConcurrentExecutesWithoutError(self): def testConcurrentExecutesWithoutError(self):
with self.session(use_gpu=True) as sess: with self.session(use_gpu=True) as sess:
matrix1 = math_ops.cast( matrix1 = math_ops.cast(

View File

@ -135,7 +135,7 @@ class MatrixSolveLsOpTest(test_lib.TestCase):
self.assertEqual(np_ans.shape, tf_ans_val.shape) self.assertEqual(np_ans.shape, tf_ans_val.shape)
self.assertAllClose(np_ans, tf_ans_val, atol=2 * tol, rtol=2 * tol) self.assertAllClose(np_ans, tf_ans_val, atol=2 * tol, rtol=2 * tol)
@test_util.run_deprecated_v1 @test_util.run_v1_only("b/120545219")
def testWrongDimensions(self): def testWrongDimensions(self):
# The matrix and right-hand sides should have the same number of rows. # The matrix and right-hand sides should have the same number of rows.
with self.session(use_gpu=True): with self.session(use_gpu=True):
@ -144,7 +144,6 @@ class MatrixSolveLsOpTest(test_lib.TestCase):
with self.assertRaises(ValueError): with self.assertRaises(ValueError):
linalg_ops.matrix_solve_ls(matrix, rhs) linalg_ops.matrix_solve_ls(matrix, rhs)
@test_util.run_deprecated_v1
def testEmpty(self): def testEmpty(self):
full = np.array([[1., 2.], [3., 4.], [5., 6.]]) full = np.array([[1., 2.], [3., 4.], [5., 6.]])
empty0 = np.empty([3, 0]) empty0 = np.empty([3, 0])
@ -164,7 +163,7 @@ class MatrixSolveLsOpTest(test_lib.TestCase):
linalg_ops.matrix_solve_ls(empty1, empty1, fast=fast)) linalg_ops.matrix_solve_ls(empty1, empty1, fast=fast))
self.assertEqual(tf_ans.shape, (2, 2)) self.assertEqual(tf_ans.shape, (2, 2))
@test_util.run_deprecated_v1 @test_util.run_v1_only("b/120545219")
def testBatchResultSize(self): def testBatchResultSize(self):
# 3x3x3 matrices, 3x3x1 right-hand sides. # 3x3x3 matrices, 3x3x1 right-hand sides.
matrix = np.array([1., 2., 3., 4., 5., 6., 7., 8., 9.] * 3).reshape(3, 3, 3) matrix = np.array([1., 2., 3., 4., 5., 6., 7., 8., 9.] * 3).reshape(3, 3, 3)

View File

@ -90,17 +90,20 @@ class SquareRootOpTest(test.TestCase):
self._verifySquareRootReal(np.empty([0, 2, 2])) self._verifySquareRootReal(np.empty([0, 2, 2]))
self._verifySquareRootReal(np.empty([2, 0, 0])) self._verifySquareRootReal(np.empty([2, 0, 0]))
@test_util.run_v1_only("b/120545219")
def testWrongDimensions(self): def testWrongDimensions(self):
# The input to the square root should be at least a 2-dimensional tensor. # The input to the square root should be at least a 2-dimensional tensor.
tensor = constant_op.constant([1., 2.]) tensor = constant_op.constant([1., 2.])
with self.assertRaises(ValueError): with self.assertRaises(ValueError):
gen_linalg_ops.matrix_square_root(tensor) gen_linalg_ops.matrix_square_root(tensor)
@test_util.run_v1_only("b/120545219")
def testNotSquare(self): def testNotSquare(self):
with self.assertRaises(ValueError): with self.assertRaises(ValueError):
tensor = constant_op.constant([[1., 0., -1.], [-1., 1., 0.]]) tensor = constant_op.constant([[1., 0., -1.], [-1., 1., 0.]])
self.evaluate(gen_linalg_ops.matrix_square_root(tensor)) self.evaluate(gen_linalg_ops.matrix_square_root(tensor))
@test_util.run_v1_only("b/120545219")
def testConcurrentExecutesWithoutError(self): def testConcurrentExecutesWithoutError(self):
with test_util.use_gpu(): with test_util.use_gpu():
matrix1 = random_ops.random_normal([5, 5], seed=42) matrix1 = random_ops.random_normal([5, 5], seed=42)

View File

@ -21,6 +21,7 @@ from __future__ import print_function
import numpy as np import numpy as np
from tensorflow.python.framework import constant_op from tensorflow.python.framework import constant_op
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops from tensorflow.python.ops import array_ops
from tensorflow.python.ops import linalg_ops from tensorflow.python.ops import linalg_ops
from tensorflow.python.platform import test as test_lib from tensorflow.python.platform import test as test_lib
@ -35,6 +36,7 @@ def _AddTest(test, test_name, fn):
class NormOpTest(test_lib.TestCase): class NormOpTest(test_lib.TestCase):
@test_util.run_v1_only("b/120545219")
def testBadOrder(self): def testBadOrder(self):
matrix = [[0., 1.], [2., 3.]] matrix = [[0., 1.], [2., 3.]]
for ord_ in "fro", -7, -1.1, 0: for ord_ in "fro", -7, -1.1, 0:
@ -52,6 +54,7 @@ class NormOpTest(test_lib.TestCase):
"'ord' must be a supported matrix norm"): "'ord' must be a supported matrix norm"):
linalg_ops.norm(matrix, ord=ord_, axis=[-2, -1]) linalg_ops.norm(matrix, ord=ord_, axis=[-2, -1])
@test_util.run_v1_only("b/120545219")
def testInvalidAxis(self): def testInvalidAxis(self):
matrix = [[0., 1.], [2., 3.]] matrix = [[0., 1.], [2., 3.]]
for axis_ in [], [1, 2, 3], [[1]], [[1], [2]], [3.1415], [1, 1]: for axis_ in [], [1, 2, 3], [[1]], [[1], [2]], [3.1415], [1, 1]:
@ -78,6 +81,7 @@ def _GetNormOpTest(dtype_, shape_, ord_, axis_, keep_dims_, use_static_shape_):
tf_norm_val = sess.run(tf_norm, feed_dict={tf_matrix: matrix}) tf_norm_val = sess.run(tf_norm, feed_dict={tf_matrix: matrix})
self.assertAllClose(np_norm, tf_norm_val, rtol=1e-5, atol=1e-5) self.assertAllClose(np_norm, tf_norm_val, rtol=1e-5, atol=1e-5)
@test_util.run_v1_only("b/120545219")
def Test(self): def Test(self):
is_matrix_norm = (isinstance(axis_, tuple) or is_matrix_norm = (isinstance(axis_, tuple) or
isinstance(axis_, list)) and len(axis_) == 2 isinstance(axis_, list)) and len(axis_) == 2

View File

@ -64,9 +64,9 @@ class VerifyTensorAllFiniteTest(test.TestCase):
self.evaluate(t_verified) self.evaluate(t_verified)
@test_util.run_v1_only("b/120545219")
class NumericsTest(test.TestCase): class NumericsTest(test.TestCase):
@test_util.run_deprecated_v1
def testInf(self): def testInf(self):
with self.session(graph=ops.Graph()): with self.session(graph=ops.Graph()):
t1 = constant_op.constant(1.0) t1 = constant_op.constant(1.0)
@ -77,7 +77,6 @@ class NumericsTest(test.TestCase):
with self.assertRaisesOpError("Inf"): with self.assertRaisesOpError("Inf"):
self.evaluate(a) self.evaluate(a)
@test_util.run_deprecated_v1
def testNaN(self): def testNaN(self):
with self.session(graph=ops.Graph()): with self.session(graph=ops.Graph()):
t1 = constant_op.constant(0.0) t1 = constant_op.constant(0.0)
@ -88,7 +87,6 @@ class NumericsTest(test.TestCase):
with self.assertRaisesOpError("NaN"): with self.assertRaisesOpError("NaN"):
self.evaluate(a) self.evaluate(a)
@test_util.run_deprecated_v1
def testBoth(self): def testBoth(self):
with self.session(graph=ops.Graph()): with self.session(graph=ops.Graph()):
t1 = constant_op.constant([1.0, 0.0]) t1 = constant_op.constant([1.0, 0.0])
@ -107,7 +105,6 @@ class NumericsTest(test.TestCase):
self.assertAllEqual(np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]), value) self.assertAllEqual(np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]), value)
self.assertEqual([2, 3], checked.get_shape()) self.assertEqual([2, 3], checked.get_shape())
@test_util.run_deprecated_v1
def testControlFlowCond(self): def testControlFlowCond(self):
predicate = array_ops.placeholder(dtypes.bool, shape=[]) predicate = array_ops.placeholder(dtypes.bool, shape=[])
_ = control_flow_ops.cond(predicate, _ = control_flow_ops.cond(predicate,
@ -120,7 +117,6 @@ class NumericsTest(test.TestCase):
r"or `tf.while_loop\(\)`\."): r"or `tf.while_loop\(\)`\."):
numerics.add_check_numerics_ops() numerics.add_check_numerics_ops()
@test_util.run_deprecated_v1
def testControlFlowWhile(self): def testControlFlowWhile(self):
predicate = array_ops.placeholder(dtypes.bool, shape=[]) predicate = array_ops.placeholder(dtypes.bool, shape=[])
_ = control_flow_ops.while_loop(lambda _: predicate, _ = control_flow_ops.while_loop(lambda _: predicate,

View File

@ -29,11 +29,13 @@ from tensorflow.python.framework import dtypes as dtypes_lib
from tensorflow.python.framework import errors_impl from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops from tensorflow.python.ops import array_ops
from tensorflow.python.ops import data_flow_ops from tensorflow.python.ops import data_flow_ops
from tensorflow.python.platform import test from tensorflow.python.platform import test
@test_util.run_v1_only("b/120545219")
class PaddingFIFOQueueTest(test.TestCase): class PaddingFIFOQueueTest(test.TestCase):
def testConstructor(self): def testConstructor(self):
@ -1393,6 +1395,7 @@ class PaddingFIFOQueueTest(test.TestCase):
with self.assertRaisesOpError("was cancelled"): with self.assertRaisesOpError("was cancelled"):
self.evaluate(enqueue_many_op) self.evaluate(enqueue_many_op)
@test_util.run_deprecated_v1
def testResetOfBlockingOperation(self): def testResetOfBlockingOperation(self):
with self.cached_session() as sess: with self.cached_session() as sess:
q_empty = data_flow_ops.PaddingFIFOQueue(5, dtypes_lib.float32, ((),)) q_empty = data_flow_ops.PaddingFIFOQueue(5, dtypes_lib.float32, ((),))

View File

@ -412,7 +412,7 @@ class PartitionedVariablesTestCase(test.TestCase):
def testResourceName(self): def testResourceName(self):
self._testNameHelper(use_resource=True) self._testNameHelper(use_resource=True)
@test_util.run_deprecated_v1 @test_util.run_v1_only("b/120545219")
def testRandomInitValue(self): def testRandomInitValue(self):
with self.cached_session(): with self.cached_session():
rnd = variables.Variable(random_ops.random_uniform([200, 40])) rnd = variables.Variable(random_ops.random_uniform([200, 40]))
@ -430,7 +430,7 @@ class PartitionedVariablesTestCase(test.TestCase):
"200 40 0,200:36,4" "200 40 0,200:36,4"
]) ])
@test_util.run_deprecated_v1 @test_util.run_v1_only("b/120545219")
def testRandomInitUnevenPartitions(self): def testRandomInitUnevenPartitions(self):
with self.cached_session(): with self.cached_session():
rnd = variables.Variable( rnd = variables.Variable(
@ -469,7 +469,7 @@ class PartitionedVariablesTestCase(test.TestCase):
if i < len(save_specs): if i < len(save_specs):
self._TestSaveSpec(vs, save_specs[i]) self._TestSaveSpec(vs, save_specs[i])
@test_util.run_deprecated_v1 @test_util.run_v1_only("b/120545219")
def testDegenerate(self): def testDegenerate(self):
with self.cached_session(): with self.cached_session():
rnd = variables.Variable(random_ops.random_uniform([10, 43])) rnd = variables.Variable(random_ops.random_uniform([10, 43]))
@ -481,7 +481,7 @@ class PartitionedVariablesTestCase(test.TestCase):
self.assertAllClose(rnd, val) self.assertAllClose(rnd, val)
self._TestSaveSpec(vs, ["10 43 0,10:0,43"]) self._TestSaveSpec(vs, ["10 43 0,10:0,43"])
@test_util.run_deprecated_v1 @test_util.run_v1_only("b/120545219")
def testSliceSizeOne(self): def testSliceSizeOne(self):
with self.cached_session(): with self.cached_session():
rnd = variables.Variable(random_ops.random_uniform([10, 43])) rnd = variables.Variable(random_ops.random_uniform([10, 43]))

View File

@ -27,6 +27,7 @@ import numpy as np
from tensorflow.python.framework import constant_op from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops from tensorflow.python.ops import array_ops
from tensorflow.python.ops import data_flow_ops from tensorflow.python.ops import data_flow_ops
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
@ -35,6 +36,7 @@ from tensorflow.python.platform import test
class PriorityQueueTest(test.TestCase): class PriorityQueueTest(test.TestCase):
@test_util.run_v1_only("b/120545219")
def testRoundTripInsertReadOnceSorts(self): def testRoundTripInsertReadOnceSorts(self):
with self.cached_session() as sess: with self.cached_session() as sess:
q = data_flow_ops.PriorityQueue(2000, (dtypes.string, dtypes.string), ( q = data_flow_ops.PriorityQueue(2000, (dtypes.string, dtypes.string), (
@ -112,6 +114,7 @@ class PriorityQueueTest(test.TestCase):
missed.remove((dv0, dv1)) missed.remove((dv0, dv1))
self.assertEqual(missed, set()) self.assertEqual(missed, set())
@test_util.run_v1_only("b/120545219")
def testRoundTripFillsCapacityMultiThreadedEnqueueAndDequeue(self): def testRoundTripFillsCapacityMultiThreadedEnqueueAndDequeue(self):
with self.cached_session() as sess: with self.cached_session() as sess:
q = data_flow_ops.PriorityQueue(10, (dtypes.int64), (())) q = data_flow_ops.PriorityQueue(10, (dtypes.int64), (()))
@ -267,6 +270,7 @@ class PriorityQueueTest(test.TestCase):
missed.remove((dv0, dv1)) missed.remove((dv0, dv1))
self.assertEqual(missed, set()) self.assertEqual(missed, set())
@test_util.run_v1_only("b/120545219")
def testRoundTripInsertOnceReadOnceSorts(self): def testRoundTripInsertOnceReadOnceSorts(self):
with self.cached_session() as sess: with self.cached_session() as sess:
q = data_flow_ops.PriorityQueue(2000, (dtypes.string, dtypes.string), ( q = data_flow_ops.PriorityQueue(2000, (dtypes.string, dtypes.string), (
@ -288,6 +292,7 @@ class PriorityQueueTest(test.TestCase):
for e, dv0, dv1 in zip(deq_elem, deq_value_0, deq_value_1): for e, dv0, dv1 in zip(deq_elem, deq_value_0, deq_value_1):
self.assertTrue((dv0, dv1) in allowed[e]) self.assertTrue((dv0, dv1) in allowed[e])
@test_util.run_v1_only("b/120545219")
def testRoundTripInsertOnceReadManySorts(self): def testRoundTripInsertOnceReadManySorts(self):
with self.cached_session(): with self.cached_session():
q = data_flow_ops.PriorityQueue(2000, (dtypes.int64), (())) q = data_flow_ops.PriorityQueue(2000, (dtypes.int64), (()))
@ -296,6 +301,7 @@ class PriorityQueueTest(test.TestCase):
deq_values = np.hstack((q.dequeue_many(100)[0].eval() for _ in range(10))) deq_values = np.hstack((q.dequeue_many(100)[0].eval() for _ in range(10)))
self.assertAllEqual(deq_values, sorted(elem)) self.assertAllEqual(deq_values, sorted(elem))
@test_util.run_v1_only("b/120545219")
def testRoundTripInsertOnceReadOnceLotsSorts(self): def testRoundTripInsertOnceReadOnceLotsSorts(self):
with self.cached_session(): with self.cached_session():
q = data_flow_ops.PriorityQueue(2000, (dtypes.int64), (())) q = data_flow_ops.PriorityQueue(2000, (dtypes.int64), (()))
@ -311,6 +317,7 @@ class PriorityQueueTest(test.TestCase):
with self.assertRaises(TypeError): with self.assertRaises(TypeError):
q.enqueue_many((["a", "b", "c"], ["a", "b", "c"])).run() q.enqueue_many((["a", "b", "c"], ["a", "b", "c"])).run()
@test_util.run_v1_only("b/120545219")
def testInsertingNonScalarFails(self): def testInsertingNonScalarFails(self):
with self.cached_session() as sess: with self.cached_session() as sess:
input_priority = array_ops.placeholder(dtypes.int64) input_priority = array_ops.placeholder(dtypes.int64)

View File

@ -102,6 +102,7 @@ class PyFuncTest(test.TestCase):
script_ops.eager_py_func(np_func, [x, y], [dtypes.float32])) script_ops.eager_py_func(np_func, [x, y], [dtypes.float32]))
self.assertEqual(z[0], np_func(1.0, 2.0).astype(np.float32)) self.assertEqual(z[0], np_func(1.0, 2.0).astype(np.float32))
@test_util.run_v1_only("b/120545219")
def testArray(self): def testArray(self):
with self.cached_session(): with self.cached_session():
x = constant_op.constant([1.0, 2.0], dtypes.float64) x = constant_op.constant([1.0, 2.0], dtypes.float64)
@ -168,6 +169,7 @@ class PyFuncTest(test.TestCase):
(dtypes.float64, dtypes.float64))) (dtypes.float64, dtypes.float64)))
self.assertAllClose(y, [0.0, 1.0]) self.assertAllClose(y, [0.0, 1.0])
@test_util.run_v1_only("b/120545219")
def testStrings(self): def testStrings(self):
def read_fixed_length_numpy_strings(): def read_fixed_length_numpy_strings():
@ -185,6 +187,7 @@ class PyFuncTest(test.TestCase):
script_ops.py_func(read_and_return_strings, [x, y], dtypes.string)) script_ops.py_func(read_and_return_strings, [x, y], dtypes.string))
self.assertAllEqual(z, [b"hello there", b"hi there"]) self.assertAllEqual(z, [b"hello there", b"hi there"])
@test_util.run_v1_only("b/120545219")
def testStringsAreConvertedToBytes(self): def testStringsAreConvertedToBytes(self):
def read_fixed_length_numpy_strings(): def read_fixed_length_numpy_strings():
@ -202,6 +205,7 @@ class PyFuncTest(test.TestCase):
script_ops.py_func(read_and_return_strings, [x, y], dtypes.string)) script_ops.py_func(read_and_return_strings, [x, y], dtypes.string))
self.assertAllEqual(z, [b"hello there", b"hi there"]) self.assertAllEqual(z, [b"hello there", b"hi there"])
@test_util.run_v1_only("b/120545219")
def testObjectArraysAreConvertedToBytes(self): def testObjectArraysAreConvertedToBytes(self):
def read_object_array(): def read_object_array():
@ -217,12 +221,14 @@ class PyFuncTest(test.TestCase):
z, = script_ops.py_func(read_and_return_strings, [x, y], [dtypes.string]) z, = script_ops.py_func(read_and_return_strings, [x, y], [dtypes.string])
self.assertListEqual(list(z.eval()), [b"hello there", b"hi ya"]) self.assertListEqual(list(z.eval()), [b"hello there", b"hi ya"])
@test_util.run_v1_only("b/120545219")
def testStringPadding(self): def testStringPadding(self):
correct = [b"this", b"is", b"a", b"test"] correct = [b"this", b"is", b"a", b"test"]
with self.cached_session(): with self.cached_session():
s, = script_ops.py_func(lambda: [correct], [], [dtypes.string]) s, = script_ops.py_func(lambda: [correct], [], [dtypes.string])
self.assertAllEqual(s.eval(), correct) self.assertAllEqual(s.eval(), correct)
@test_util.run_v1_only("b/120545219")
def testStringPaddingAreConvertedToBytes(self): def testStringPaddingAreConvertedToBytes(self):
inp = ["this", "is", "a", "test"] inp = ["this", "is", "a", "test"]
correct = [b"this", b"is", b"a", b"test"] correct = [b"this", b"is", b"a", b"test"]
@ -230,6 +236,7 @@ class PyFuncTest(test.TestCase):
s, = script_ops.py_func(lambda: [inp], [], [dtypes.string]) s, = script_ops.py_func(lambda: [inp], [], [dtypes.string])
self.assertAllEqual(s.eval(), correct) self.assertAllEqual(s.eval(), correct)
@test_util.run_v1_only("b/120545219")
def testLarge(self): def testLarge(self):
with self.cached_session() as sess: with self.cached_session() as sess:
x = array_ops.zeros([1000000], dtype=np.float32) x = array_ops.zeros([1000000], dtype=np.float32)
@ -243,6 +250,7 @@ class PyFuncTest(test.TestCase):
x = self.evaluate(script_ops.py_func(lambda: 42.0, [], dtypes.float64)) x = self.evaluate(script_ops.py_func(lambda: 42.0, [], dtypes.float64))
self.assertAllClose(x, 42.0) self.assertAllClose(x, 42.0)
@test_util.run_v1_only("b/120545219")
def testAlias(self): def testAlias(self):
with self.cached_session(): with self.cached_session():
np_array = np.array([1.0, 2.0], dtype=np.float32) np_array = np.array([1.0, 2.0], dtype=np.float32)
@ -251,6 +259,7 @@ class PyFuncTest(test.TestCase):
value.op.run() value.op.run()
self.assertAllEqual(np_array, [1.0, 2.0]) self.assertAllEqual(np_array, [1.0, 2.0])
@test_util.run_v1_only("b/120545219")
def testReturnUnicodeString(self): def testReturnUnicodeString(self):
with self.cached_session(): with self.cached_session():
correct = u"你好 世界" correct = u"你好 世界"
@ -261,6 +270,7 @@ class PyFuncTest(test.TestCase):
z, = script_ops.py_func(unicode_string, [], [dtypes.string]) z, = script_ops.py_func(unicode_string, [], [dtypes.string])
self.assertEqual(z.eval(), correct.encode("utf8")) self.assertEqual(z.eval(), correct.encode("utf8"))
@test_util.run_v1_only("b/120545219")
def testBadNumpyReturnType(self): def testBadNumpyReturnType(self):
with self.cached_session(): with self.cached_session():
@ -274,6 +284,7 @@ class PyFuncTest(test.TestCase):
"Unsupported numpy type"): "Unsupported numpy type"):
self.evaluate(y) self.evaluate(y)
@test_util.run_v1_only("b/120545219")
def testBadReturnType(self): def testBadReturnType(self):
with self.cached_session(): with self.cached_session():
@ -287,6 +298,7 @@ class PyFuncTest(test.TestCase):
"Unsupported object type"): "Unsupported object type"):
self.evaluate(z) self.evaluate(z)
@test_util.run_v1_only("b/120545219")
def testReturnInput(self): def testReturnInput(self):
with self.cached_session(): with self.cached_session():
@ -321,6 +333,7 @@ class PyFuncTest(test.TestCase):
self.assertEqual(self.evaluate(x), 0) self.assertEqual(self.evaluate(x), 0)
self.assertEqual(self.evaluate(x), 0) self.assertEqual(self.evaluate(x), 0)
@test_util.run_v1_only("b/120545219")
def testGradientFunction(self): def testGradientFunction(self):
# Input to tf.py_func is necessary, otherwise get_gradient_function() # Input to tf.py_func is necessary, otherwise get_gradient_function()
# returns None per default. # returns None per default.
@ -330,6 +343,7 @@ class PyFuncTest(test.TestCase):
self.assertEqual(None, ops.get_gradient_function(x.op)) self.assertEqual(None, ops.get_gradient_function(x.op))
self.assertEqual(None, ops.get_gradient_function(y.op)) self.assertEqual(None, ops.get_gradient_function(y.op))
@test_util.run_v1_only("b/120545219")
def testCOrder(self): def testCOrder(self):
with self.cached_session(): with self.cached_session():
val = [[1, 2], [3, 4]] val = [[1, 2], [3, 4]]
@ -337,6 +351,7 @@ class PyFuncTest(test.TestCase):
[dtypes.int64]) [dtypes.int64])
self.assertAllEqual(val, self.evaluate(x)) self.assertAllEqual(val, self.evaluate(x))
@test_util.run_v1_only("b/120545219")
def testParallel(self): def testParallel(self):
# Tests that tf.py_func's can run in parallel if they release the GIL. # Tests that tf.py_func's can run in parallel if they release the GIL.
with self.cached_session() as session: with self.cached_session() as session:
@ -382,6 +397,7 @@ class PyFuncTest(test.TestCase):
self.assertIsNone(ret) self.assertIsNone(ret)
self.assertAllEqual([3], s.value) self.assertAllEqual([3], s.value)
@test_util.run_v1_only("b/120545219")
def testNoReturnValueStateless(self): def testNoReturnValueStateless(self):
def do_nothing(unused_x): def do_nothing(unused_x):
@ -420,6 +436,7 @@ class PyFuncTest(test.TestCase):
with self.assertRaisesWithPredicateMatch(tf_exp, expected_error_check): with self.assertRaisesWithPredicateMatch(tf_exp, expected_error_check):
self.evaluate(f) self.evaluate(f)
@test_util.run_v1_only("b/120545219")
def testExceptionHandling(self): def testExceptionHandling(self):
with self.cached_session(): with self.cached_session():
self._testExceptionHandling(ValueError, errors.InvalidArgumentError) self._testExceptionHandling(ValueError, errors.InvalidArgumentError)
@ -514,7 +531,7 @@ class PyFuncTest(test.TestCase):
self.assertAllEqual(ret, [[3.0], [3.0], [3.0]]) self.assertAllEqual(ret, [[3.0], [3.0], [3.0]])
@test_util.run_in_graph_and_eager_modes @test_util.run_in_graph_and_eager_modes
@test_util.run_deprecated_v1 @test_util.run_v1_only("b/120545219")
def testEagerExceptionHandling(self): def testEagerExceptionHandling(self):
with test_util.device(use_gpu=True): with test_util.device(use_gpu=True):
self._testExceptionHandling( self._testExceptionHandling(
@ -534,7 +551,7 @@ class PyFuncTest(test.TestCase):
self._testExceptionHandling(WeirdError, errors.UnknownError, eager=True) self._testExceptionHandling(WeirdError, errors.UnknownError, eager=True)
@test_util.run_in_graph_and_eager_modes @test_util.run_in_graph_and_eager_modes
@test_util.run_deprecated_v1 @test_util.run_v1_only("b/120545219")
def testEagerReturningVariableRaisesError(self): def testEagerReturningVariableRaisesError(self):
def return_variable(): def return_variable():
return resource_variable_ops.ResourceVariable(0.0) return resource_variable_ops.ResourceVariable(0.0)
@ -558,6 +575,7 @@ class PyFuncTest(test.TestCase):
dy_dx = tape.gradient(y, x) dy_dx = tape.gradient(y, x)
self.assertEqual(self.evaluate(dy_dx), 6.0) self.assertEqual(self.evaluate(dy_dx), 6.0)
@test_util.run_v1_only("b/120545219")
def testEagerGradientGraph(self): def testEagerGradientGraph(self):
def f(x): def f(x):
@ -568,6 +586,7 @@ class PyFuncTest(test.TestCase):
dy_dx = gradients_impl.gradients(y, x)[0] dy_dx = gradients_impl.gradients(y, x)[0]
self.assertEqual(self.evaluate(dy_dx), 6.0) self.assertEqual(self.evaluate(dy_dx), 6.0)
@test_util.run_v1_only("b/120545219")
def testEagerGradientGraphTwoOutputs(self): def testEagerGradientGraphTwoOutputs(self):
def f(x, y): def f(x, y):
@ -597,6 +616,7 @@ class PyFuncTest(test.TestCase):
self.assertEqual(self.evaluate(dz_dx), 6.0) self.assertEqual(self.evaluate(dz_dx), 6.0)
self.assertEqual(self.evaluate(dz_dy), 8.0) self.assertEqual(self.evaluate(dz_dy), 8.0)
@test_util.run_v1_only("b/120545219")
def testEagerGradientGraphMultipleArgs(self): def testEagerGradientGraphMultipleArgs(self):
def f(x, y): def f(x, y):
@ -610,6 +630,7 @@ class PyFuncTest(test.TestCase):
self.assertEqual(self.evaluate(dz_dx), 6.0) self.assertEqual(self.evaluate(dz_dx), 6.0)
self.assertEqual(self.evaluate(dz_dy), 8.0) self.assertEqual(self.evaluate(dz_dy), 8.0)
@test_util.run_v1_only("b/120545219")
def testEagerGradientGraphLogHuber(self): def testEagerGradientGraphLogHuber(self):
def log_huber(x, m): def log_huber(x, m):
@ -631,6 +652,7 @@ class PyFuncTest(test.TestCase):
self.assertEqual(y, 1.0) self.assertEqual(y, 1.0)
self.assertEqual(dy_dx, 2.0) self.assertEqual(dy_dx, 2.0)
@test_util.run_v1_only("b/120545219")
def testEagerRespectsDevicePlacmentOfOp(self): def testEagerRespectsDevicePlacmentOfOp(self):
def f(x): def f(x):

View File

@ -40,6 +40,7 @@ def _AddTest(test_class, op_name, testcase_name, fn):
class QrOpTest(test.TestCase): class QrOpTest(test.TestCase):
@test_util.run_v1_only("b/120545219")
def testWrongDimensions(self): def testWrongDimensions(self):
# The input to qr should be a tensor of at least rank 2. # The input to qr should be a tensor of at least rank 2.
scalar = constant_op.constant(1.) scalar = constant_op.constant(1.)
@ -115,6 +116,7 @@ def _GetQrOpTest(dtype_, shape_, full_matrices_, use_static_shape_):
tol = 1e-14 tol = 1e-14
self.assertAllClose(identity, xx, atol=tol) self.assertAllClose(identity, xx, atol=tol)
@test_util.run_v1_only("b/120545219")
def Test(self): def Test(self):
np.random.seed(1) np.random.seed(1)
x_np = np.random.uniform( x_np = np.random.uniform(
@ -163,6 +165,7 @@ class QrGradOpTest(test.TestCase):
def _GetQrGradOpTest(dtype_, shape_, full_matrices_): def _GetQrGradOpTest(dtype_, shape_, full_matrices_):
@test_util.run_v1_only("b/120545219")
def Test(self): def Test(self):
np.random.seed(42) np.random.seed(42)
a = np.random.uniform(low=-1.0, high=1.0, size=shape_).astype(dtype_) a = np.random.uniform(low=-1.0, high=1.0, size=shape_).astype(dtype_)

View File

@ -29,11 +29,13 @@ from tensorflow.python.framework import dtypes as dtypes_lib
from tensorflow.python.framework import errors_impl from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import random_seed from tensorflow.python.framework import random_seed
from tensorflow.python.framework import tensor_shape from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.ops import data_flow_ops from tensorflow.python.ops import data_flow_ops
from tensorflow.python.platform import test from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging from tensorflow.python.platform import tf_logging
@test_util.run_v1_only("b/120545219")
class RandomShuffleQueueTest(test.TestCase): class RandomShuffleQueueTest(test.TestCase):
def setUp(self): def setUp(self):
@ -1415,6 +1417,7 @@ class RandomShuffleQueueTest(test.TestCase):
self.assertItemsEqual(elem, results) self.assertItemsEqual(elem, results)
@test_util.run_v1_only("b/120545219")
def testBigDequeueMany(self): def testBigDequeueMany(self):
with self.cached_session() as sess: with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(2, 0, dtypes_lib.int32, ((),)) q = data_flow_ops.RandomShuffleQueue(2, 0, dtypes_lib.int32, ((),))

View File

@ -628,7 +628,7 @@ class ResourceVariableOpsTest(test_util.TensorFlowTestCase):
variable_def=other_v_def) variable_def=other_v_def)
self.assertTrue(other_v_prime._cached_value is not None) self.assertTrue(other_v_prime._cached_value is not None)
@test_util.run_deprecated_v1 @test_util.run_v1_only("b/120545219")
def testVariableDefInitializedInstances(self): def testVariableDefInitializedInstances(self):
with ops.Graph().as_default(), self.cached_session() as sess: with ops.Graph().as_default(), self.cached_session() as sess:
v_def = resource_variable_ops.ResourceVariable( v_def = resource_variable_ops.ResourceVariable(
@ -733,7 +733,7 @@ class ResourceVariableOpsTest(test_util.TensorFlowTestCase):
self.assertEqual(0.0, self.evaluate(v.value())) self.assertEqual(0.0, self.evaluate(v.value()))
@test_util.run_in_graph_and_eager_modes @test_util.run_in_graph_and_eager_modes
@test_util.run_deprecated_v1 @test_util.run_v1_only("b/120545219")
def testDestroyResource(self): def testDestroyResource(self):
v = resource_variable_ops.ResourceVariable(3.0, name="var0") v = resource_variable_ops.ResourceVariable(3.0, name="var0")
self.evaluate(variables.global_variables_initializer()) self.evaluate(variables.global_variables_initializer())
@ -792,7 +792,7 @@ class ResourceVariableOpsTest(test_util.TensorFlowTestCase):
with self.assertRaises(ValueError): with self.assertRaises(ValueError):
_ = w.value().op.get_attr("_class") _ = w.value().op.get_attr("_class")
@test_util.run_deprecated_v1 @test_util.run_v1_only("b/120545219")
def testSharedName(self): def testSharedName(self):
with self.cached_session(): with self.cached_session():
v = resource_variable_ops.ResourceVariable(300.0, name="var4") v = resource_variable_ops.ResourceVariable(300.0, name="var4")
@ -849,7 +849,7 @@ class ResourceVariableOpsTest(test_util.TensorFlowTestCase):
v.initializer.run(feed_dict={v.initial_value: 3.0}) v.initializer.run(feed_dict={v.initial_value: 3.0})
self.assertEqual(3.0, v.value().eval()) self.assertEqual(3.0, v.value().eval())
@test_util.run_deprecated_v1 @test_util.run_v1_only("b/120545219")
def testControlFlowInitialization(self): def testControlFlowInitialization(self):
"""Expects an error if an initializer is in a control-flow scope.""" """Expects an error if an initializer is in a control-flow scope."""
@ -986,7 +986,7 @@ class ResourceVariableOpsTest(test_util.TensorFlowTestCase):
self.assertAllEqual(self.evaluate(v.assign_add(1)), [1, 2, 3, 4]) self.assertAllEqual(self.evaluate(v.assign_add(1)), [1, 2, 3, 4])
@test_util.run_in_graph_and_eager_modes @test_util.run_in_graph_and_eager_modes
@test_util.run_deprecated_v1 @test_util.run_v1_only("b/120545219")
def testCopyToGraphUninitialized(self): def testCopyToGraphUninitialized(self):
v = resource_variable_ops.ResourceVariable([0, 1, 2, 3]) v = resource_variable_ops.ResourceVariable([0, 1, 2, 3])
copy_to_graph = ops.Graph() copy_to_graph = ops.Graph()

View File

@ -262,7 +262,7 @@ class RNNTest(test.TestCase):
rnn.dynamic_rnn(cell, inputs, dtype=dtypes.float32, sequence_length=[4]) rnn.dynamic_rnn(cell, inputs, dtype=dtypes.float32, sequence_length=[4])
@test_util.run_in_graph_and_eager_modes @test_util.run_in_graph_and_eager_modes
@test_util.run_deprecated_v1 @test_util.run_v1_only("b/120545219")
def testTensorArrayStateIsAccepted(self): def testTensorArrayStateIsAccepted(self):
cell = TensorArrayStateRNNCell() cell = TensorArrayStateRNNCell()
in_eager_mode = context.executing_eagerly() in_eager_mode = context.executing_eagerly()

View File

@ -217,7 +217,7 @@ class StatefulScatterNdTest(test.TestCase):
def testVariableRankAdd(self): def testVariableRankAdd(self):
self._VariableRankTests(_NumpyAdd, state_ops.scatter_nd_add) self._VariableRankTests(_NumpyAdd, state_ops.scatter_nd_add)
@test_util.run_deprecated_v1 @test_util.run_v1_only("b/120545219")
def testVariableRankSub(self): def testVariableRankSub(self):
self._VariableRankTests(_NumpySub, state_ops.scatter_nd_sub) self._VariableRankTests(_NumpySub, state_ops.scatter_nd_sub)
@ -235,7 +235,7 @@ class StatefulScatterNdTest(test.TestCase):
self._VariableRankTest( self._VariableRankTest(
np_scatter, tf_scatter, vtype, itype, repeat_indices=True) np_scatter, tf_scatter, vtype, itype, repeat_indices=True)
@test_util.run_deprecated_v1 @test_util.run_v1_only("b/120545219")
def testScatterRepeatIndices(self): def testScatterRepeatIndices(self):
"""This tests scatter_add using indices that repeat.""" """This tests scatter_add using indices that repeat."""
self._ScatterRepeatIndicesTest(_NumpyAdd, state_ops.scatter_nd_add) self._ScatterRepeatIndicesTest(_NumpyAdd, state_ops.scatter_nd_add)
@ -257,7 +257,7 @@ class StatefulScatterNdTest(test.TestCase):
# session.run([update0, update1]) # session.run([update0, update1])
# self.assertAllEqual([False, True], self.evaluate(var)) # self.assertAllEqual([False, True], self.evaluate(var))
@test_util.run_deprecated_v1 @test_util.run_v1_only("b/120545219")
def testScatterOutOfRangeCpu(self): def testScatterOutOfRangeCpu(self):
# TODO(simister): Re-enable once binary size increase due to # TODO(simister): Re-enable once binary size increase due to
# scatter_nd ops is under control. # scatter_nd ops is under control.
@ -294,7 +294,7 @@ class StatefulScatterNdTest(test.TestCase):
state_ops.scatter_nd_update(ref, indices, state_ops.scatter_nd_update(ref, indices,
updates).get_shape().as_list(), shape) updates).get_shape().as_list(), shape)
@test_util.run_deprecated_v1 @test_util.run_v1_only("b/120545219")
def testResVarInvalidOutputShape(self): def testResVarInvalidOutputShape(self):
res = variables.Variable( res = variables.Variable(
initial_value=lambda: array_ops.zeros(shape=[], dtype=dtypes.float32), initial_value=lambda: array_ops.zeros(shape=[], dtype=dtypes.float32),
@ -509,7 +509,7 @@ class ScatterNdTest(test.TestCase):
ValueError, "Indices and updates specified for empty output shape"): ValueError, "Indices and updates specified for empty output shape"):
self.scatter_nd(indices, updates, shape) self.scatter_nd(indices, updates, shape)
@test_util.run_deprecated_v1 @test_util.run_v1_only("b/120545219")
def testEmptyOutputShape2(self): def testEmptyOutputShape2(self):
indices = array_ops.placeholder(dtypes.int32, shape=None) indices = array_ops.placeholder(dtypes.int32, shape=None)
updates = array_ops.placeholder(dtypes.int32, shape=None) updates = array_ops.placeholder(dtypes.int32, shape=None)
@ -717,6 +717,7 @@ class ScatterNdTensorTest(test.TestCase):
self.assertAllEqual(subbed, self.assertAllEqual(subbed,
constant_op.constant([1, -10, 1, -9, -8, 1, 1, -11])) constant_op.constant([1, -10, 1, -9, -8, 1, 1, -11]))
@test_util.run_v1_only("b/120545219")
def testUpdateAddSubGradients(self): def testUpdateAddSubGradients(self):
with self.cached_session(): with self.cached_session():

View File

@ -22,6 +22,7 @@ import numpy as np
from tensorflow.python.framework import constant_op from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes as dtypes_lib from tensorflow.python.framework import dtypes as dtypes_lib
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker_v2 from tensorflow.python.ops import gradient_checker_v2
from tensorflow.python.ops import linalg_ops from tensorflow.python.ops import linalg_ops
@ -39,6 +40,7 @@ def _AddTest(test_class, op_name, testcase_name, fn):
class SelfAdjointEigTest(test.TestCase): class SelfAdjointEigTest(test.TestCase):
@test_util.run_deprecated_v1
def testWrongDimensions(self): def testWrongDimensions(self):
# The input to self_adjoint_eig should be a tensor of # The input to self_adjoint_eig should be a tensor of
# at least rank 2. # at least rank 2.
@ -49,6 +51,7 @@ class SelfAdjointEigTest(test.TestCase):
with self.assertRaises(ValueError): with self.assertRaises(ValueError):
linalg_ops.self_adjoint_eig(vector) linalg_ops.self_adjoint_eig(vector)
@test_util.run_deprecated_v1
def testConcurrentExecutesWithoutError(self): def testConcurrentExecutesWithoutError(self):
all_ops = [] all_ops = []
with self.session(use_gpu=True) as sess: with self.session(use_gpu=True) as sess:

View File

@ -20,6 +20,7 @@ from __future__ import print_function
from tensorflow.python.framework import constant_op from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops from tensorflow.python.ops import math_ops
from tensorflow.python.ops import session_ops from tensorflow.python.ops import session_ops
@ -28,6 +29,7 @@ from tensorflow.python.ops import variables
from tensorflow.python.platform import test from tensorflow.python.platform import test
@test_util.run_v1_only("b/120545219")
class SessionOpsTest(test.TestCase): class SessionOpsTest(test.TestCase):
def testHandleBasic(self): def testHandleBasic(self):
@ -232,6 +234,7 @@ class SessionOpsTest(test.TestCase):
b_p: b_handle.handle}) b_p: b_handle.handle})
self.assertEqual(3.0, c_handle.eval()) self.assertEqual(3.0, c_handle.eval())
@test_util.run_v1_only("b/120545219")
def testFeedOneHandleDirectly(self): def testFeedOneHandleDirectly(self):
with self.cached_session() as sess: with self.cached_session() as sess:
a = constant_op.constant(10.0) a = constant_op.constant(10.0)
@ -243,6 +246,7 @@ class SessionOpsTest(test.TestCase):
self.assertAllClose(2500.0, sess.run(d, feed_dict={c: h_c})) self.assertAllClose(2500.0, sess.run(d, feed_dict={c: h_c}))
@test_util.run_v1_only("b/120545219")
def testDirectHandleFeedOverlappingWithFetches(self): def testDirectHandleFeedOverlappingWithFetches(self):
with self.cached_session() as sess: with self.cached_session() as sess:
a = constant_op.constant(10.0) a = constant_op.constant(10.0)
@ -283,6 +287,7 @@ class SessionOpsTest(test.TestCase):
self.assertAllClose(48.0, sess.run(e, feed_dict={c: h_c, d: h_d})) self.assertAllClose(48.0, sess.run(e, feed_dict={c: h_c, d: h_d}))
self.assertAllClose(-48.0, sess.run(e, feed_dict={c: h_d, d: h_c})) self.assertAllClose(-48.0, sess.run(e, feed_dict={c: h_d, d: h_c}))
@test_util.run_v1_only("b/120545219")
def testFeedHandleToVariableDirectly(self): def testFeedHandleToVariableDirectly(self):
with self.cached_session() as sess: with self.cached_session() as sess:
a = variables.Variable(12.0) a = variables.Variable(12.0)

View File

@ -66,6 +66,7 @@ class ReconstructionOpsTest(test.TestCase):
self.assertAllClose(output, expected_output) self.assertAllClose(output, expected_output)
@test_util.run_deprecated_v1
def test_unknown_shapes(self): def test_unknown_shapes(self):
# This test uses placeholders and does not work in eager mode. # This test uses placeholders and does not work in eager mode.
if context.executing_eagerly(): if context.executing_eagerly():
@ -85,6 +86,7 @@ class ReconstructionOpsTest(test.TestCase):
self.assertAllClose(output, expected_output) self.assertAllClose(output, expected_output)
@test_util.run_deprecated_v1
def test_unknown_rank(self): def test_unknown_rank(self):
# This test uses placeholders and does not work in eager mode. # This test uses placeholders and does not work in eager mode.
if context.executing_eagerly(): if context.executing_eagerly():
@ -104,6 +106,7 @@ class ReconstructionOpsTest(test.TestCase):
self.assertAllClose(output, expected_output) self.assertAllClose(output, expected_output)
@test_util.run_deprecated_v1
def test_fast_path(self): def test_fast_path(self):
# This test uses tensor names and does not work in eager mode. # This test uses tensor names and does not work in eager mode.
if context.executing_eagerly(): if context.executing_eagerly():

View File

@ -267,7 +267,7 @@ class IndexedSlicesConditionalAccumulatorTest(test.TestCase):
self.assertAllEqual(val.values, [[5, 5], [0, 20], [30, 0]]) self.assertAllEqual(val.values, [[5, 5], [0, 20], [30, 0]])
self.assertAllEqual(val.dense_shape, [-1, 2]) self.assertAllEqual(val.dense_shape, [-1, 2])
@test_util.run_deprecated_v1 @test_util.run_v1_only("b/120545219")
def testParallelApplyGradMean(self): def testParallelApplyGradMean(self):
with self.cached_session() as sess: with self.cached_session() as sess:
q = data_flow_ops.SparseConditionalAccumulator( q = data_flow_ops.SparseConditionalAccumulator(
@ -299,7 +299,7 @@ class IndexedSlicesConditionalAccumulatorTest(test.TestCase):
np.array([[expected_val, 0], [0, expected_val]]).astype(np.float32), np.array([[expected_val, 0], [0, expected_val]]).astype(np.float32),
val, sess) val, sess)
@test_util.run_deprecated_v1 @test_util.run_v1_only("b/120545219")
def testParallelApplyGradSum(self): def testParallelApplyGradSum(self):
with self.cached_session() as sess: with self.cached_session() as sess:
q = data_flow_ops.SparseConditionalAccumulator( q = data_flow_ops.SparseConditionalAccumulator(
@ -334,7 +334,7 @@ class IndexedSlicesConditionalAccumulatorTest(test.TestCase):
np.array([[expected_val, 0], [0, expected_val]]).astype(np.float32), np.array([[expected_val, 0], [0, expected_val]]).astype(np.float32),
val, sess) val, sess)
@test_util.run_deprecated_v1 @test_util.run_v1_only("b/120545219")
def testParallelTakeGrad(self): def testParallelTakeGrad(self):
with self.cached_session() as sess: with self.cached_session() as sess:
q = data_flow_ops.SparseConditionalAccumulator( q = data_flow_ops.SparseConditionalAccumulator(
@ -374,7 +374,7 @@ class IndexedSlicesConditionalAccumulatorTest(test.TestCase):
self._assertEqual_nparray( self._assertEqual_nparray(
np.array([[0, 0], [elems[i], 0]]), results[i], sess) np.array([[0, 0], [elems[i], 0]]), results[i], sess)
@test_util.run_deprecated_v1 @test_util.run_v1_only("b/120545219")
def testAccumulatorApplyAndBlockingTake(self): def testAccumulatorApplyAndBlockingTake(self):
with self.cached_session() as sess: with self.cached_session() as sess:
q = data_flow_ops.SparseConditionalAccumulator( q = data_flow_ops.SparseConditionalAccumulator(
@ -410,7 +410,7 @@ class IndexedSlicesConditionalAccumulatorTest(test.TestCase):
with self.assertRaisesOpError("was cancelled"): with self.assertRaisesOpError("was cancelled"):
self.evaluate(takeg_op) self.evaluate(takeg_op)
@test_util.run_deprecated_v1 @test_util.run_v1_only("b/120545219")
def testAccumulatorCancel(self): def testAccumulatorCancel(self):
with self.cached_session() as sess: with self.cached_session() as sess:
q = data_flow_ops.SparseConditionalAccumulator( q = data_flow_ops.SparseConditionalAccumulator(
@ -430,7 +430,7 @@ class IndexedSlicesConditionalAccumulatorTest(test.TestCase):
takeg_thread.join() takeg_thread.join()
@test_util.run_deprecated_v1 @test_util.run_v1_only("b/120545219")
def testNonVectorIndices(self): def testNonVectorIndices(self):
with self.cached_session(): with self.cached_session():
q = data_flow_ops.SparseConditionalAccumulator( q = data_flow_ops.SparseConditionalAccumulator(
@ -443,7 +443,7 @@ class IndexedSlicesConditionalAccumulatorTest(test.TestCase):
grad_indices=[[0, 1], [1, 0]], grad_indices=[[0, 1], [1, 0]],
grad_values=np.array([1, 2]).astype(np.float32)).run() grad_values=np.array([1, 2]).astype(np.float32)).run()
@test_util.run_deprecated_v1 @test_util.run_v1_only("b/120545219")
def testZeroDimensionValues(self): def testZeroDimensionValues(self):
with self.cached_session(): with self.cached_session():
q = data_flow_ops.SparseConditionalAccumulator( q = data_flow_ops.SparseConditionalAccumulator(
@ -454,7 +454,7 @@ class IndexedSlicesConditionalAccumulatorTest(test.TestCase):
q.apply_grad( q.apply_grad(
grad_indices=[0], grad_values=np.array(1).astype(np.float32)).run() grad_indices=[0], grad_values=np.array(1).astype(np.float32)).run()
@test_util.run_deprecated_v1 @test_util.run_v1_only("b/120545219")
def testWrongNonEmptyInputValues(self): def testWrongNonEmptyInputValues(self):
with self.cached_session(): with self.cached_session():
q = data_flow_ops.SparseConditionalAccumulator( q = data_flow_ops.SparseConditionalAccumulator(
@ -466,7 +466,7 @@ class IndexedSlicesConditionalAccumulatorTest(test.TestCase):
grad_indices=[0, 1], grad_indices=[0, 1],
grad_values=np.array([[0, 1, 1]]).astype(np.float32)).run() grad_values=np.array([[0, 1, 1]]).astype(np.float32)).run()
@test_util.run_deprecated_v1 @test_util.run_v1_only("b/120545219")
def testDynamicNonVectorIndices(self): def testDynamicNonVectorIndices(self):
with self.cached_session() as sess: with self.cached_session() as sess:
q = data_flow_ops.SparseConditionalAccumulator( q = data_flow_ops.SparseConditionalAccumulator(
@ -486,7 +486,7 @@ class IndexedSlicesConditionalAccumulatorTest(test.TestCase):
x_values: np.array([1, 2]).astype(np.float32) x_values: np.array([1, 2]).astype(np.float32)
}) })
@test_util.run_deprecated_v1 @test_util.run_v1_only("b/120545219")
def testDynamicWrongNonEmptyInputValues(self): def testDynamicWrongNonEmptyInputValues(self):
with self.cached_session() as sess: with self.cached_session() as sess:
q = data_flow_ops.SparseConditionalAccumulator( q = data_flow_ops.SparseConditionalAccumulator(
@ -505,7 +505,7 @@ class IndexedSlicesConditionalAccumulatorTest(test.TestCase):
x_values: np.array([[0, 1, 1]]).astype(np.float32) x_values: np.array([[0, 1, 1]]).astype(np.float32)
}) })
@test_util.run_deprecated_v1 @test_util.run_v1_only("b/120545219")
def testEmptyShapeApply(self): def testEmptyShapeApply(self):
with self.cached_session(): with self.cached_session():
q = data_flow_ops.SparseConditionalAccumulator( q = data_flow_ops.SparseConditionalAccumulator(
@ -531,7 +531,7 @@ class IndexedSlicesConditionalAccumulatorTest(test.TestCase):
q.apply_grad(grad_indices=[0], grad_values=[1.0], grad_shape=[]).run() q.apply_grad(grad_indices=[0], grad_values=[1.0], grad_shape=[]).run()
q.apply_grad(grad_indices=[0], grad_values=[1.0]).run() q.apply_grad(grad_indices=[0], grad_values=[1.0]).run()
@test_util.run_deprecated_v1 @test_util.run_v1_only("b/120545219")
def testValidateShape(self): def testValidateShape(self):
with self.cached_session() as sess: with self.cached_session() as sess:
q = data_flow_ops.SparseConditionalAccumulator( q = data_flow_ops.SparseConditionalAccumulator(

View File

@ -96,7 +96,7 @@ class StackOpTest(test.TestCase):
c1, b1, [r, v], [r.get_shape(), tensor_shape.unknown_shape()]) c1, b1, [r, v], [r.get_shape(), tensor_shape.unknown_shape()])
self.assertAllClose(np.ones(2000) * 10.0, self.evaluate(ry)) self.assertAllClose(np.ones(2000) * 10.0, self.evaluate(ry))
@test_util.run_deprecated_v1 @test_util.run_v1_only("b/120545219")
def testStackWhileSwap(self): def testStackWhileSwap(self):
self._testStackWhileSwap(use_gpu=False) self._testStackWhileSwap(use_gpu=False)
self._testStackWhileSwap(use_gpu=True) self._testStackWhileSwap(use_gpu=True)
@ -248,7 +248,7 @@ class StackOpRefTest(test.TestCase):
c1, b1, [r, v], [r.get_shape(), tensor_shape.unknown_shape()]) c1, b1, [r, v], [r.get_shape(), tensor_shape.unknown_shape()])
self.assertAllClose(np.ones(2000) * 10.0, self.evaluate(ry)) self.assertAllClose(np.ones(2000) * 10.0, self.evaluate(ry))
@test_util.run_deprecated_v1 @test_util.run_v1_only("b/120545219")
def testStackWhileSwap(self): def testStackWhileSwap(self):
self._testStackWhileSwap(use_gpu=False) self._testStackWhileSwap(use_gpu=False)
self._testStackWhileSwap(use_gpu=True) self._testStackWhileSwap(use_gpu=True)

View File

@ -22,6 +22,7 @@ import numpy as np
from tensorflow.python import tf2 from tensorflow.python import tf2
from tensorflow.python.framework import constant_op from tensorflow.python.framework import constant_op
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import linalg_ops from tensorflow.python.ops import linalg_ops
@ -39,6 +40,7 @@ def _AddTest(test_class, op_name, testcase_name, fn):
class SvdOpTest(test.TestCase): class SvdOpTest(test.TestCase):
@test_util.run_v1_only("b/120545219")
def testWrongDimensions(self): def testWrongDimensions(self):
# The input to svd should be a tensor of at least rank 2. # The input to svd should be a tensor of at least rank 2.
scalar = constant_op.constant(1.) scalar = constant_op.constant(1.)
@ -50,6 +52,7 @@ class SvdOpTest(test.TestCase):
"Shape must be at least rank 2 but is rank 1"): "Shape must be at least rank 2 but is rank 1"):
linalg_ops.svd(vector) linalg_ops.svd(vector)
@test_util.run_v1_only("b/120545219")
def testConcurrentExecutesWithoutError(self): def testConcurrentExecutesWithoutError(self):
with self.session(use_gpu=True) as sess: with self.session(use_gpu=True) as sess:
all_ops = [] all_ops = []
@ -126,6 +129,7 @@ def _GetSvdOpTest(dtype_, shape_, use_static_shape_, compute_uv_,
identity = array_ops.matrix_band_part(array_ops.ones_like(xx), 0, 0) identity = array_ops.matrix_band_part(array_ops.ones_like(xx), 0, 0)
self.assertAllClose(identity, xx, atol=tol) self.assertAllClose(identity, xx, atol=tol)
@test_util.run_v1_only("b/120545219")
def Test(self): def Test(self):
is_complex = dtype_ in (np.complex64, np.complex128) is_complex = dtype_ in (np.complex64, np.complex128)
is_single = dtype_ in (np.float32, np.complex64) is_single = dtype_ in (np.float32, np.complex64)
@ -214,6 +218,7 @@ def _GetSvdGradOpTest(dtype_, shape_, compute_uv_, full_matrices_):
tf_v *= phase[..., :n] tf_v *= phase[..., :n]
return tf_s, tf_u, tf_v return tf_s, tf_u, tf_v
@test_util.run_v1_only("b/120545219")
def Test(self): def Test(self):
np.random.seed(42) np.random.seed(42)
a = np.random.uniform(low=-1.0, high=1.0, size=shape_).astype(dtype_) a = np.random.uniform(low=-1.0, high=1.0, size=shape_).astype(dtype_)

View File

@ -309,7 +309,7 @@ class TensorArrayTest(test.TestCase):
self._testTensorArraySplitRead(dtypes.string) self._testTensorArraySplitRead(dtypes.string)
@test_util.disable_control_flow_v2("v2 does not support TensorArray.grad.") @test_util.disable_control_flow_v2("v2 does not support TensorArray.grad.")
@test_util.run_deprecated_v1 @test_util.run_v1_only("v2 does not support TensorArray.grad.")
def testSkipEagerTensorGradArrayWriteRead(self): def testSkipEagerTensorGradArrayWriteRead(self):
with self.session(use_gpu=True) as session: with self.session(use_gpu=True) as session:
ta = tensor_array_ops.TensorArray( ta = tensor_array_ops.TensorArray(
@ -364,7 +364,7 @@ class TensorArrayTest(test.TestCase):
self.assertAllEqual([2.0], session.run(g2)) self.assertAllEqual([2.0], session.run(g2))
@test_util.disable_control_flow_v2("v2 does not support TensorArray.grad.") @test_util.disable_control_flow_v2("v2 does not support TensorArray.grad.")
@test_util.run_deprecated_v1 @test_util.run_v1_only("v2 does not support TensorArray.grad.")
def testSkipEagerTensorGradArrayDynamicWriteRead(self): def testSkipEagerTensorGradArrayDynamicWriteRead(self):
with self.session(use_gpu=True) as session: with self.session(use_gpu=True) as session:
ta = tensor_array_ops.TensorArray( ta = tensor_array_ops.TensorArray(
@ -407,7 +407,7 @@ class TensorArrayTest(test.TestCase):
self.assertAllEqual(3, g_vs) self.assertAllEqual(3, g_vs)
@test_util.disable_control_flow_v2("v2 does not support TensorArray.grad.") @test_util.disable_control_flow_v2("v2 does not support TensorArray.grad.")
@test_util.run_deprecated_v1 @test_util.run_v1_only("v2 does not support TensorArray.grad.")
def testSkipEagerTensorGradAccessTwiceReceiveSameObject(self): def testSkipEagerTensorGradAccessTwiceReceiveSameObject(self):
with self.session(use_gpu=True) as session: with self.session(use_gpu=True) as session:
ta = tensor_array_ops.TensorArray( ta = tensor_array_ops.TensorArray(
@ -424,7 +424,7 @@ class TensorArrayTest(test.TestCase):
self.assertAllEqual(t_g_ta_0, t_g_ta_1) self.assertAllEqual(t_g_ta_0, t_g_ta_1)
self.assertAllEqual([[4.0, 5.0]], d_r1_0) self.assertAllEqual([[4.0, 5.0]], d_r1_0)
@test_util.run_deprecated_v1 @test_util.run_v1_only("b/120545219")
def testTensorArrayWriteWrongIndexOrDataTypeFails(self): def testTensorArrayWriteWrongIndexOrDataTypeFails(self):
with self.session(use_gpu=True): with self.session(use_gpu=True):
ta = _make_ta(3, "foo", dtype=dtypes.float32) ta = _make_ta(3, "foo", dtype=dtypes.float32)
@ -458,7 +458,7 @@ class TensorArrayTest(test.TestCase):
with self.assertRaisesOpError(error_msg): with self.assertRaisesOpError(error_msg):
self.evaluate(ta.write(3, 3.0).flow) self.evaluate(ta.write(3, 3.0).flow)
@test_util.run_deprecated_v1 @test_util.run_v1_only("b/120545219")
def testTensorArrayReadWrongIndexOrDataTypeFails(self): def testTensorArrayReadWrongIndexOrDataTypeFails(self):
with self.session(use_gpu=True): with self.session(use_gpu=True):
ta = _make_ta(3, "foo", dtype=dtypes.float32) ta = _make_ta(3, "foo", dtype=dtypes.float32)
@ -493,7 +493,7 @@ class TensorArrayTest(test.TestCase):
self.evaluate(ta.read(3)) self.evaluate(ta.read(3))
@test_util.disable_control_flow_v2("v2 allows multiple writes.") @test_util.disable_control_flow_v2("v2 allows multiple writes.")
@test_util.run_deprecated_v1 @test_util.run_v1_only("v2 allows multiple writes.")
def testSkipEagerTensorArrayWriteMultipleFails(self): def testSkipEagerTensorArrayWriteMultipleFails(self):
with self.session(use_gpu=True): with self.session(use_gpu=True):
ta = tensor_array_ops.TensorArray( ta = tensor_array_ops.TensorArray(
@ -504,7 +504,7 @@ class TensorArrayTest(test.TestCase):
"it has already been written to."): "it has already been written to."):
self.evaluate(ta.write(2, 3.0).write(2, 3.0).flow) self.evaluate(ta.write(2, 3.0).write(2, 3.0).flow)
@test_util.run_deprecated_v1 @test_util.run_v1_only("b/120545219")
def testTensorArrayConcatIncompatibleShapesFails(self): def testTensorArrayConcatIncompatibleShapesFails(self):
with self.session(use_gpu=True): with self.session(use_gpu=True):
ta = tensor_array_ops.TensorArray( ta = tensor_array_ops.TensorArray(
@ -536,7 +536,7 @@ class TensorArrayTest(test.TestCase):
with self.assertRaisesOpError("shape"): with self.assertRaisesOpError("shape"):
self.evaluate(w3.concat()) self.evaluate(w3.concat())
@test_util.run_deprecated_v1 @test_util.run_v1_only("b/120545219")
def testTensorArraySplitIncompatibleShapesFails(self): def testTensorArraySplitIncompatibleShapesFails(self):
with self.session(use_gpu=True): with self.session(use_gpu=True):
in_eager_mode = context.executing_eagerly() in_eager_mode = context.executing_eagerly()
@ -611,14 +611,14 @@ class TensorArrayTest(test.TestCase):
wb1_grad.flow.eval() wb1_grad.flow.eval()
@test_util.disable_control_flow_v2("v2 does not support TensorArray.grad.") @test_util.disable_control_flow_v2("v2 does not support TensorArray.grad.")
@test_util.run_deprecated_v1 @test_util.run_v1_only("v2 does not support TensorArray.grad.")
def testSkipEagerTensorArrayWriteGradientAddMultipleAdds(self): def testSkipEagerTensorArrayWriteGradientAddMultipleAdds(self):
for dtype in (dtypes.int32, dtypes.int64, dtypes.float32, dtypes.float64, for dtype in (dtypes.int32, dtypes.int64, dtypes.float32, dtypes.float64,
dtypes.complex64, dtypes.complex128): dtypes.complex64, dtypes.complex128):
self._testTensorArrayWriteGradientAddMultipleAdds(dtype) self._testTensorArrayWriteGradientAddMultipleAdds(dtype)
@test_util.disable_control_flow_v2("Low level legacy TA op test.") @test_util.disable_control_flow_v2("Low level legacy TA op test.")
@test_util.run_deprecated_v1 @test_util.run_v1_only("Low level legacy TA op test.")
def testSkipEagerTensorArrayGradWithShapeKnownElementShape(self): def testSkipEagerTensorArrayGradWithShapeKnownElementShape(self):
with self.session(use_gpu=True) as sess: with self.session(use_gpu=True) as sess:
ta = tensor_array_ops.TensorArray( ta = tensor_array_ops.TensorArray(
@ -649,7 +649,7 @@ class TensorArrayTest(test.TestCase):
sess.run(read_value, feed_dict={value: fed_value})) sess.run(read_value, feed_dict={value: fed_value}))
@test_util.disable_control_flow_v2("Low level legacy TA op test.") @test_util.disable_control_flow_v2("Low level legacy TA op test.")
@test_util.run_deprecated_v1 @test_util.run_v1_only("Low level legacy TA op test.")
def testSkipEagerTensorArrayGradWithShapeUnknownElementShape(self): def testSkipEagerTensorArrayGradWithShapeUnknownElementShape(self):
with self.session(use_gpu=True) as sess: with self.session(use_gpu=True) as sess:
ta = tensor_array_ops.TensorArray( ta = tensor_array_ops.TensorArray(
@ -778,7 +778,7 @@ class TensorArrayTest(test.TestCase):
self._testTensorArrayGradientWritePackConcatAndRead() self._testTensorArrayGradientWritePackConcatAndRead()
@test_util.disable_control_flow_v2("v2 does not support clear_after_read.") @test_util.disable_control_flow_v2("v2 does not support clear_after_read.")
@test_util.run_deprecated_v1 @test_util.run_v1_only("v2 does not support clear_after_read.")
def testTensorArrayReadTwice(self): def testTensorArrayReadTwice(self):
with self.session(use_gpu=True): with self.session(use_gpu=True):
value = constant_op.constant([[1.0, -1.0], [10.0, -10.0]]) value = constant_op.constant([[1.0, -1.0], [10.0, -10.0]])
@ -1351,7 +1351,7 @@ class TensorArrayTest(test.TestCase):
.ENABLE_TENSOR_ARRAY_V2 else v1_msg): .ENABLE_TENSOR_ARRAY_V2 else v1_msg):
ta.stack().eval() ta.stack().eval()
@test_util.run_deprecated_v1 @test_util.run_v1_only("b/120545219")
def testSkipEagerTensorArrayEvalEmpty(self): def testSkipEagerTensorArrayEvalEmpty(self):
self._testTensorArrayEvalEmpty() self._testTensorArrayEvalEmpty()
@ -1443,7 +1443,7 @@ class TensorArrayTest(test.TestCase):
self.assertAllEqual(expected_grad, grad_vals[0]) self.assertAllEqual(expected_grad, grad_vals[0])
@test_util.disable_control_flow_v2("colocate_with not supported in v2.") @test_util.disable_control_flow_v2("colocate_with not supported in v2.")
@test_util.run_deprecated_v1 @test_util.run_v1_only("b/120545219")
def testSkipEagerTensorArrayGetsDeviceFromFirstWrite(self): def testSkipEagerTensorArrayGetsDeviceFromFirstWrite(self):
with ops.device("/job:worker/task:0/cpu:0"): with ops.device("/job:worker/task:0/cpu:0"):
# this initial device will be ignored. # this initial device will be ignored.
@ -1493,7 +1493,7 @@ class TensorArrayTest(test.TestCase):
[s for s in dev_stats[d] if "/TensorArray" in s.node_name]) [s for s in dev_stats[d] if "/TensorArray" in s.node_name])
@test_util.disable_control_flow_v2("colocate_with not supported in v2.") @test_util.disable_control_flow_v2("colocate_with not supported in v2.")
@test_util.run_deprecated_v1 @test_util.run_v1_only("b/120545219")
def testSkipEagerTensorArrayGetsDeviceFromFirstWriteInWhileLoop(self): def testSkipEagerTensorArrayGetsDeviceFromFirstWriteInWhileLoop(self):
with ops.device("/job:worker/task:0/cpu:0"): with ops.device("/job:worker/task:0/cpu:0"):
ta = tensor_array_ops.TensorArray(dtype=dtypes.float32, size=2) ta = tensor_array_ops.TensorArray(dtype=dtypes.float32, size=2)
@ -1524,7 +1524,7 @@ class TensorArrayTest(test.TestCase):
[s for s in dev_stats[d] if "TensorArray" == s.node_name]) [s for s in dev_stats[d] if "TensorArray" == s.node_name])
@test_util.disable_control_flow_v2("colocate_with not supported in v2.") @test_util.disable_control_flow_v2("colocate_with not supported in v2.")
@test_util.run_deprecated_v1 @test_util.run_v1_only("b/120545219")
def testSkipEagerTensorArrayDisabledColocateWithFirstWriteCall(self): def testSkipEagerTensorArrayDisabledColocateWithFirstWriteCall(self):
with ops.device("/job:worker/task:0/cpu:0"): with ops.device("/job:worker/task:0/cpu:0"):
ta = tensor_array_ops.TensorArray( ta = tensor_array_ops.TensorArray(

View File

@ -24,6 +24,7 @@ from tensorflow.python import tf2
from tensorflow.python.framework import constant_op from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test as test_lib from tensorflow.python.platform import test as test_lib
@ -40,6 +41,7 @@ def _add_test(test, test_name, fn):
class TensordotTest(test_lib.TestCase): class TensordotTest(test_lib.TestCase):
@test_util.run_v1_only("b/120545219")
def test_invalid_shape(self): def test_invalid_shape(self):
a = [[1, 2], [3, 4]] a = [[1, 2], [3, 4]]
b = [[1, 2], [3, 4], [5, 6]] b = [[1, 2], [3, 4], [5, 6]]
@ -63,6 +65,7 @@ class TensordotTest(test_lib.TestCase):
axes_ph: (a_axes, b_axes) axes_ph: (a_axes, b_axes)
}) })
@test_util.run_v1_only("b/120545219")
def test_invalid_axes(self): def test_invalid_axes(self):
a = [[1, 2], [3, 4]] a = [[1, 2], [3, 4]]
b = [[1, 2], [3, 4]] b = [[1, 2], [3, 4]]
@ -105,6 +108,7 @@ class TensordotTest(test_lib.TestCase):
self.assertAllEqual(tf_ans.shape, np_ans.shape) self.assertAllEqual(tf_ans.shape, np_ans.shape)
self.assertAllEqual(tf_ans, np_ans) self.assertAllEqual(tf_ans, np_ans)
@test_util.run_v1_only("b/120545219")
def test_partial_shape_inference(self): def test_partial_shape_inference(self):
for axes in ([1], [0]), 1: for axes in ([1], [0]), 1:
a = array_ops.placeholder(dtypes.float32) a = array_ops.placeholder(dtypes.float32)

Some files were not shown because too many files have changed in this diff Show More