[ROCm] Re-enabling unit-tests that are now passing on ROCm platform

This commit is contained in:
Deven Desai 2020-12-03 22:23:07 +00:00
parent 3980781f7f
commit 3c8f50d55f
13 changed files with 19 additions and 83 deletions

View File

@ -631,13 +631,6 @@ TEST_F(ConstantFoldingTest, ConstShapeKnown) {
} }
} }
// Disabling the following test on the ROCm platform because it relies on the
// "topK" operator being supported on the ROCm platform (which is currently not
// the case)
// TODO(rocm) :
// re-enable this test once support for "topK" operator is available on ROCm
#ifndef TENSORFLOW_USE_ROCM
TEST_F(ConstantFoldingTest, NoReplacePartialOutput) { TEST_F(ConstantFoldingTest, NoReplacePartialOutput) {
Graph g(OpRegistry::Global()); Graph g(OpRegistry::Global());
{ {
@ -662,7 +655,6 @@ TEST_F(ConstantFoldingTest, NoReplacePartialOutput) {
&g, &was_mutated)); &g, &was_mutated));
EXPECT_FALSE(was_mutated); EXPECT_FALSE(was_mutated);
} }
#endif // TENSORFLOW_USE_ROCM
namespace { namespace {

View File

@ -102,7 +102,7 @@ TEST_F(DepthwiseConvOpTest, DepthwiseConvHalfCpu) {
Run<Eigen::half>(Device::CPU); Run<Eigen::half>(Device::CPU);
} }
#ifdef GOOGLE_CUDA #if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
TEST_F(DepthwiseConvOpTest, DepthwiseConvFloatGpu) { Run<float>(Device::GPU); } TEST_F(DepthwiseConvOpTest, DepthwiseConvFloatGpu) { Run<float>(Device::GPU); }
TEST_F(DepthwiseConvOpTest, DepthwiseConvDoubleGpu) { TEST_F(DepthwiseConvOpTest, DepthwiseConvDoubleGpu) {
Run<double>(Device::GPU); Run<double>(Device::GPU);

View File

@ -533,7 +533,7 @@ INSTANTIATE_TEST_SUITE_P(ResizeBilinearHalfPixelCentersOpTestCpu,
INSTANTIATE_TEST_SUITE_P(ResizeBilinearOpAlignCornersTestCpu, INSTANTIATE_TEST_SUITE_P(ResizeBilinearOpAlignCornersTestCpu,
ResizeBilinearOpAlignCornersTest, ResizeBilinearOpAlignCornersTest,
::testing::Values(TestDevice::CPU)); ::testing::Values(TestDevice::CPU));
#if GOOGLE_CUDA #if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
// Instantiate tests for GPU. // Instantiate tests for GPU.
INSTANTIATE_TEST_SUITE_P(ResizeBilinearOpTestGpu, ResizeBilinearOpTest, INSTANTIATE_TEST_SUITE_P(ResizeBilinearOpTestGpu, ResizeBilinearOpTest,
::testing::Values(TestDevice::GPU)); ::testing::Values(TestDevice::GPU));
@ -543,7 +543,7 @@ INSTANTIATE_TEST_SUITE_P(ResizeBilinearHalfPixelCentersOpTestGpu,
INSTANTIATE_TEST_SUITE_P(ResizeBilinearOpAlignCornersTestGpu, INSTANTIATE_TEST_SUITE_P(ResizeBilinearOpAlignCornersTestGpu,
ResizeBilinearOpAlignCornersTest, ResizeBilinearOpAlignCornersTest,
::testing::Values(TestDevice::GPU)); ::testing::Values(TestDevice::GPU));
#endif // GOOGLE_CUDA #endif // GOOGLE_CUDA || TENSORFLOW_USE_ROCM
class ResizeBM : public ResizeBilinearOpTest { class ResizeBM : public ResizeBilinearOpTest {
public: public:

View File

@ -37,20 +37,14 @@ class JitCompileTest(test.TestCase):
xla_func = def_function.function(fn, jit_compile=True) xla_func = def_function.function(fn, jit_compile=True)
inputs = array_ops.placeholder(dtypes.float32, [5]) inputs = array_ops.placeholder(dtypes.float32, [5])
# XLA support is not yet enabled for TF ROCm x = xla_func(inputs, 1)
if not test.is_built_with_rocm(): with session.Session(graph=g) as sess:
x = xla_func(inputs, 1) y = sess.run(x, feed_dict={inputs: [1, 2, 2, 3, 3]})
with session.Session(graph=g) as sess: self.assertTrue(x.graph.as_graph_def().library.function[0]
y = sess.run(x, feed_dict={inputs: [1, 2, 2, 3, 3]}) .attr["_XlaMustCompile"].b)
self.assertTrue(x.graph.as_graph_def().library.function[0] self.assertAllClose([2, 3, 3, 4, 4], y)
.attr["_XlaMustCompile"].b)
self.assertAllClose([2, 3, 3, 4, 4], y)
def testDerivative(self): def testDerivative(self):
# XLA support is not yet enabled for TF ROCm
if test.is_built_with_rocm():
return
def fn(x, a): def fn(x, a):
return 2 * x + a return 2 * x + a
@ -81,14 +75,12 @@ class JitCompileTest(test.TestCase):
xla_func = def_function.function(fn, jit_compile=True) xla_func = def_function.function(fn, jit_compile=True)
inputs = array_ops.placeholder(dtypes.int32, [5]) inputs = array_ops.placeholder(dtypes.int32, [5])
# XLA support is not yet enabled for TF ROCm x = xla_func(inputs, 1)
if not test.is_built_with_rocm(): with session.Session(graph=g) as sess:
x = xla_func(inputs, 1) y = sess.run(x, feed_dict={inputs: [1, 2, 2, 3, 3]})
with session.Session(graph=g) as sess: self.assertTrue(x.graph.as_graph_def().library.function[0]
y = sess.run(x, feed_dict={inputs: [1, 2, 2, 3, 3]}) .attr["_XlaMustCompile"].b)
self.assertTrue(x.graph.as_graph_def().library.function[0] self.assertAllClose([2, 3, 3, 4, 4], y)
.attr["_XlaMustCompile"].b)
self.assertAllClose([2, 3, 3, 4, 4], y)
# Checking that we crash on an unsupported operation lets us test that the XLA # Checking that we crash on an unsupported operation lets us test that the XLA
# compiler was actually invoked. # compiler was actually invoked.
@ -101,12 +93,10 @@ class JitCompileTest(test.TestCase):
xla_func = def_function.function(fn, jit_compile=True) xla_func = def_function.function(fn, jit_compile=True)
inputs = array_ops.placeholder(dtypes.float32, [5]) inputs = array_ops.placeholder(dtypes.float32, [5])
x = xla_func(inputs) x = xla_func(inputs)
# XLA support is not yet enabled for TF ROCm with self.assertRaisesRegex(errors.InvalidArgumentError,
if not test.is_built_with_rocm(): "not compilable"):
with self.assertRaisesRegex(errors.InvalidArgumentError, with session.Session(graph=g) as sess:
"not compilable"): sess.run(x, feed_dict={inputs: [1, 2, 2, 3, 3]})
with session.Session(graph=g) as sess:
sess.run(x, feed_dict={inputs: [1, 2, 2, 3, 3]})
if __name__ == "__main__": if __name__ == "__main__":

View File

@ -472,12 +472,6 @@ class ForwardpropTest(test.TestCase, parameterized.TestCase):
def testFusedBatchNormGradsInference(self): def testFusedBatchNormGradsInference(self):
if test.is_built_with_rocm():
# This test was added recently and has been failing on the ROCm
# platform, since it was added.
# TODO(rocm): do root cause analysis of test failure and fix it.
self.skipTest("Test fails on ROCm platform, needs further analysis")
x_shape = [4, 10, 10, 2] x_shape = [4, 10, 10, 2]
increment = 3. / math_ops.reduce_prod( increment = 3. / math_ops.reduce_prod(
constant_op.constant(x_shape, dtype=dtypes.float32)) constant_op.constant(x_shape, dtype=dtypes.float32))

View File

@ -847,8 +847,6 @@ class BackendNNOpsTest(test.TestCase, parameterized.TestCase):
y = backend.pool2d(x, (2, 2), strides=(2, 2), pool_mode='other') y = backend.pool2d(x, (2, 2), strides=(2, 2), pool_mode='other')
def test_pool3d(self): def test_pool3d(self):
if test.is_built_with_rocm():
self.skipTest('Pooling with 3D tensors is not supported in ROCm')
val = np.random.random((10, 3, 10, 10, 10)) val = np.random.random((10, 3, 10, 10, 10))
x = backend.variable(val) x = backend.variable(val)
y = backend.pool3d( y = backend.pool3d(

View File

@ -218,12 +218,6 @@ class ForwardpropTest(tf.test.TestCase, parameterized.TestCase):
("NoFunction", lambda f: f)]) ("NoFunction", lambda f: f)])
def testVariablesHVP(self, decorator): def testVariablesHVP(self, decorator):
if tf.test.is_built_with_rocm():
# TODO(rocm)
# This test was recently added and has never passed on the
# ROCm platform. Remove this skip once the test is passing again
self.skipTest("NoFunction decorator test fails on the ROCm platform")
class _Model(tf.Module): class _Model(tf.Module):
def __init__(self): def __init__(self):

View File

@ -194,8 +194,6 @@ class Pooling2DTest(test.TestCase, parameterized.TestCase):
class Pooling3DTest(test.TestCase, parameterized.TestCase): class Pooling3DTest(test.TestCase, parameterized.TestCase):
def test_maxpooling_3d(self): def test_maxpooling_3d(self):
if test.is_built_with_rocm():
self.skipTest('Pooling with 3D tensors is not supported in ROCm')
pool_size = (3, 3, 3) pool_size = (3, 3, 3)
testing_utils.layer_test( testing_utils.layer_test(
keras.layers.MaxPooling3D, keras.layers.MaxPooling3D,
@ -214,8 +212,6 @@ class Pooling3DTest(test.TestCase, parameterized.TestCase):
input_shape=(3, 4, 11, 12, 10)) input_shape=(3, 4, 11, 12, 10))
def test_averagepooling_3d(self): def test_averagepooling_3d(self):
if test.is_built_with_rocm():
self.skipTest('Pooling with 3D tensors is not supported in ROCm')
pool_size = (3, 3, 3) pool_size = (3, 3, 3)
testing_utils.layer_test( testing_utils.layer_test(
keras.layers.AveragePooling3D, keras.layers.AveragePooling3D,

View File

@ -228,11 +228,6 @@ class RandomCropTest(keras_parameterized.TestCase):
self._run_test(expected_height, expected_width) self._run_test(expected_height, expected_width)
def test_training_with_mock(self): def test_training_with_mock(self):
if test.is_built_with_rocm():
# TODO(rocm):
# re-enable this test once ROCm adds support for
# the StatefulUniformFullInt Op (on the GPU)
self.skipTest('Feature not supported on ROCm')
np.random.seed(1337) np.random.seed(1337)
height, width = 3, 4 height, width = 3, 4
height_offset = np.random.randint(low=0, high=3) height_offset = np.random.randint(low=0, high=3)
@ -253,11 +248,6 @@ class RandomCropTest(keras_parameterized.TestCase):
('random_crop_4_by_6', 4, 6), ('random_crop_4_by_6', 4, 6),
('random_crop_3_by_2', 3, 2)) ('random_crop_3_by_2', 3, 2))
def test_random_crop_output_shape(self, expected_height, expected_width): def test_random_crop_output_shape(self, expected_height, expected_width):
if test.is_built_with_rocm():
# TODO(rocm):
# re-enable this test once ROCm adds support for
# the StatefulUniformFullInt Op (on the GPU)
self.skipTest('Feature not supported on ROCm')
with CustomObjectScope({'RandomCrop': image_preprocessing.RandomCrop}): with CustomObjectScope({'RandomCrop': image_preprocessing.RandomCrop}):
self._run_test(expected_height, expected_width) self._run_test(expected_height, expected_width)

View File

@ -200,13 +200,6 @@ if __name__ == '__main__':
for lower in True, False: for lower in True, False:
name = '%s_low_%s' % (name, lower) name = '%s_low_%s' % (name, lower)
if (name == 'float32_10_10_adj_False_low_True') and \
test_lib.is_built_with_rocm():
# Skip this one particular subtest on the ROCm platform
# It will fail because of 1 element in 10,000 mismatch,
# and the mismatch is minor (tolerance is 0.20, mismatch is 0,22)
# TODO(rocm) : investigate cause of mismatch and fix
continue
_AddTest(MatrixBinaryFunctorGradientTest, _AddTest(MatrixBinaryFunctorGradientTest,
'MatrixTriangularSolveGradient', name, 'MatrixTriangularSolveGradient', name,
_GetMatrixBinaryFunctorGradientTest( _GetMatrixBinaryFunctorGradientTest(

View File

@ -274,9 +274,6 @@ class PoolingTest(test.TestCase):
strides=[1, 2], strides=[1, 2],
dilation_rate=[1, 1], dilation_rate=[1, 1],
data_format="NCHW") data_format="NCHW")
if test.is_built_with_rocm():
# Pooling with 3D tensors is not supported in ROCm
continue
self._test( self._test(
input_shape=[2, 2, 7, 5, 3], input_shape=[2, 2, 7, 5, 3],
window_shape=[2, 2, 2], window_shape=[2, 2, 2],

View File

@ -57,7 +57,6 @@ from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables from tensorflow.python.ops import variables
import tensorflow.python.ops.tensor_array_grad # pylint: disable=unused-import import tensorflow.python.ops.tensor_array_grad # pylint: disable=unused-import
from tensorflow.python.platform import googletest from tensorflow.python.platform import googletest
from tensorflow.python.platform import test
from tensorflow.python.training import momentum from tensorflow.python.training import momentum
from tensorflow.python.util import nest from tensorflow.python.util import nest
@ -1113,9 +1112,6 @@ class IndexedCaseTest(test_util.TensorFlowTestCase, parameterized.TestCase):
"""Verify disjoint branches across while iterations are run in parallel.""" """Verify disjoint branches across while iterations are run in parallel."""
if control_flow_v2_toggles.control_flow_v2_enabled(): if control_flow_v2_toggles.control_flow_v2_enabled():
self.skipTest("b/138870290") self.skipTest("b/138870290")
if test.is_built_with_rocm():
self.skipTest(
"Disable subtest on ROCm due to missing Cholesky op support")
with ops.Graph().as_default() as g: with ops.Graph().as_default() as g:
nbranches = 7 nbranches = 7

View File

@ -443,10 +443,6 @@ class OrthogonalInitializerTest(InitializersTest):
@test_util.run_in_graph_and_eager_modes @test_util.run_in_graph_and_eager_modes
def testShapesValues(self): def testShapesValues(self):
if test.is_built_with_rocm():
self.skipTest("Disable subtest on ROCm due to missing QR op support")
for shape in [(10, 10), (10, 9, 8), (100, 5, 5), (50, 40), (40, 50)]: for shape in [(10, 10), (10, 9, 8), (100, 5, 5), (50, 40), (40, 50)]:
init = init_ops_v2.Orthogonal() init = init_ops_v2.Orthogonal()
tol = 1e-5 tol = 1e-5