Remove run_deprecated_v1 annotations from collective ops tests.

PiperOrigin-RevId: 318344535
Change-Id: Idce034f9c0eba341047a76e74a3a57d6220c786f
This commit is contained in:
Ayush Dubey 2020-06-25 14:08:29 -07:00 committed by TensorFlower Gardener
parent e4025640f7
commit 94f361d057
2 changed files with 21 additions and 21 deletions

View File

@ -60,7 +60,6 @@ class CollectiveOpGPUTest(test.TestCase):
len(gpus)))
context.ensure_initialized()
@test_util.run_deprecated_v1
def testBasicNcclAllReduce(self):
inputs = [[0.1, 1.1, 2.1, 3.1, 4.1, 5.1, 6.1, 7.1],
[0.3, 1.3, 2.3, 3.3, 4.3, 5.3, 6.3, 7.3]]
@ -69,7 +68,9 @@ class CollectiveOpGPUTest(test.TestCase):
instance_key = 1
devices = ['/GPU:{}'.format(i) for i in range(self._group_size)]
with self.session(config=self._configure()) as sess:
# Tests that execute collectives need to be enclosed in graph or tf.function
with ops.Graph().as_default(), self.session(
config=self._configure()) as sess:
if not test_util.is_gpu_available(cuda_only=True):
self.skipTest('No GPU available')
collectives = []
@ -82,14 +83,15 @@ class CollectiveOpGPUTest(test.TestCase):
for result in results:
self.assertAllClose(result, expected, rtol=1e-5, atol=1e-5)
@test_util.run_deprecated_v1
def testInt32Error(self):
inputs = [[0, 1], [2, 3]]
group_key = 1
instance_key = 50
devices = ['/GPU:{}'.format(i) for i in range(self._group_size)]
with self.session(config=self._configure()) as sess:
# Tests that execute collectives need to be enclosed in graph or tf.function
with ops.Graph().as_default(), self.session(
config=self._configure()) as sess:
if not test_util.is_gpu_available(cuda_only=True):
self.skipTest('No GPU available')
collectives = []
@ -103,7 +105,6 @@ class CollectiveOpGPUTest(test.TestCase):
'does not support datatype DT_INT32 on DEVICE_GPU'):
sess.run(collectives)
@test_util.run_deprecated_v1
def testFp16Reduce(self):
inputs = [[0.1, 1.1, 2.1, 3.1, 4.1, 5.1, 6.1, 7.1],
[0.3, 1.3, 2.3, 3.3, 4.3, 5.3, 6.3, 7.3]]
@ -112,7 +113,8 @@ class CollectiveOpGPUTest(test.TestCase):
instance_key = 100
devices = ['/GPU:{}'.format(i) for i in range(self._group_size)]
with self.session(config=self._configure()) as sess:
with ops.Graph().as_default(), self.session(
config=self._configure()) as sess:
if not test_util.is_gpu_available(cuda_only=True):
self.skipTest('No GPU available')
collectives = []
@ -126,7 +128,6 @@ class CollectiveOpGPUTest(test.TestCase):
logging.info('i {} result {} expected {}'.format(i, results[i], expected))
self.assertAllClose(result, expected, rtol=1e-3, atol=1e-3)
@test_util.run_deprecated_v1
def testNcclHintAllReduce(self):
inputs = [[0.1, 1.1, 2.1, 3.1, 4.1, 5.1, 6.1, 7.1],
[0.3, 1.3, 2.3, 3.3, 4.3, 5.3, 6.3, 7.3]]
@ -135,7 +136,7 @@ class CollectiveOpGPUTest(test.TestCase):
instance_key = 1
devices = ['/GPU:{}'.format(i) for i in range(self._group_size)]
with self.session(
with ops.Graph().as_default(), self.session(
config=self._configure(set_config_proto_nccl=False)) as sess:
if not test_util.is_gpu_available(cuda_only=True):
self.skipTest('No GPU available')
@ -150,14 +151,14 @@ class CollectiveOpGPUTest(test.TestCase):
for result in results:
self.assertAllClose(result, expected, rtol=1e-5, atol=1e-5)
@test_util.run_deprecated_v1
def testBasicNcclBroadcast(self):
tensor_value = [0.1, 1.1, 2.1, 3.1, 4.1, 5.1, 6.1, 7.1]
group_key = 1
instance_key = 1
devices = ['/GPU:{}'.format(i) for i in range(self._group_size)]
with self.session(config=self._configure()) as sess:
with ops.Graph().as_default(), self.session(
config=self._configure()) as sess:
if not test_util.is_gpu_available(cuda_only=True):
self.skipTest('No GPU available')
collectives = []
@ -173,14 +174,14 @@ class CollectiveOpGPUTest(test.TestCase):
for result in results:
self.assertAllClose(result, tensor_value, rtol=1e-5, atol=1e-5)
@test_util.run_deprecated_v1
def testNcclBroadcastDoubleRecv(self):
tensor_value = [0.1, 1.1, 2.1, 3.1, 4.1, 5.1, 6.1, 7.1]
group_key = 1
instance_key = 1
devices = ['/GPU:{}'.format(i) for i in range(self._group_size)]
with self.session(config=self._configure()) as sess:
with ops.Graph().as_default(), self.session(
config=self._configure()) as sess:
if not test_util.is_gpu_available(cuda_only=True):
self.skipTest('No GPU available')
collectives = []
@ -192,14 +193,14 @@ class CollectiveOpGPUTest(test.TestCase):
with self.assertRaisesRegexp(errors.InternalError, 'found no source'):
sess.run(collectives)
@test_util.run_deprecated_v1
def testNcclBroadcastDoubleSend(self):
tensor_value = [0.1, 1.1, 2.1, 3.1, 4.1, 5.1, 6.1, 7.1]
group_key = 1
instance_key = 1
devices = ['/GPU:{}'.format(i) for i in range(self._group_size)]
with self.session(config=self._configure()) as sess:
with ops.Graph().as_default(), self.session(
config=self._configure()) as sess:
if not test_util.is_gpu_available(cuda_only=True):
self.skipTest('No GPU available')
collectives = []
@ -211,7 +212,6 @@ class CollectiveOpGPUTest(test.TestCase):
with self.assertRaisesRegexp(errors.InternalError, 'already has source'):
sess.run(collectives)
@test_util.run_deprecated_v1
def testBasicNcclAllGather(self):
inputs = [[0.1, 1.1, 2.1, 3.1, 4.1, 5.1, 6.1, 7.1],
[0.3, 1.3, 2.3, 3.3, 4.3, 5.3, 6.3, 7.3]]
@ -221,7 +221,8 @@ class CollectiveOpGPUTest(test.TestCase):
instance_key = 1
devices = ['/GPU:{}'.format(i) for i in range(self._group_size)]
with self.session(config=self._configure()) as sess:
with ops.Graph().as_default(), self.session(
config=self._configure()) as sess:
if not test_util.is_gpu_available(cuda_only=True):
self.skipTest('No GPU available')
collectives = []
@ -234,13 +235,13 @@ class CollectiveOpGPUTest(test.TestCase):
for result in results:
self.assertAllClose(result, expected, rtol=1e-5, atol=1e-5)
@test_util.run_deprecated_v1
def testCollectiveDeviceMismatch(self):
group_key = 10
instance_key = 20
t0 = [1, 2, 3, 4]
t1 = [5, 6, 7, 8]
with self.session(
with ops.Graph().as_default(), self.session(
config=self._configure(set_config_proto_nccl=False)) as sess:
if not test_util.is_gpu_available(cuda_only=True):
self.skipTest('No GPU available')

View File

@ -23,7 +23,6 @@ from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import collective_ops
from tensorflow.python.platform import test
@ -31,7 +30,6 @@ from tensorflow.python.platform import test
class CollectiveOpXlaTest(test.TestCase):
@test_util.run_deprecated_v1
def testScopedAllocatorWithXla(self):
group_size = 2
group_key = 1
@ -50,7 +48,8 @@ class CollectiveOpXlaTest(test.TestCase):
del rewrite_options.scoped_allocator_opts.enable_op[:]
rewrite_options.scoped_allocator_opts.enable_op.append('CollectiveReduce')
with self.session(config=cfg) as sess:
# Tests that execute collectives need to be enclosed in graph or tf.function
with ops.Graph().as_default(), self.session(config=cfg) as sess:
run_ops = []
for i in range(group_size):
with ops.device('CPU:%d' % i):