Merge pull request #30500 from ROCmSoftwarePlatform:google_upstream_skip_double_dtyp_subtests

PiperOrigin-RevId: 258814196
This commit is contained in:
TensorFlower Gardener 2019-07-18 12:33:05 -07:00
commit f1b70ad839
6 changed files with 70 additions and 18 deletions

View File

@ -31,7 +31,10 @@ class Conv1DTest(test.TestCase):
def testBasic(self):
"""Test that argument passing to conv1d is handled properly."""
for dtype in [dtypes.float16, dtypes.float32, dtypes.float64]:
# double datatype is currently not supported for convolution ops
# on the ROCm platform
optional_float64 = [] if test.is_built_with_rocm() else [dtypes.float64]
for dtype in [dtypes.float16, dtypes.float32] + optional_float64:
x = constant_op.constant([1, 2, 3, 4], dtype=dtype)
x = array_ops.expand_dims(x, 0) # Add batch dimension
x = array_ops.expand_dims(x, 2) # And depth dimension

View File

@ -51,13 +51,16 @@ def GetTestConfigs():
class Conv3DTest(test.TestCase):
def _DtypesToTest(self, use_gpu):
# double datatype is currently not supported for convolution ops
# on the ROCm platform
optional_float64 = [] if test.is_built_with_rocm() else [dtypes.float64]
if use_gpu:
if not test_util.GpuSupportsHalfMatMulAndConv():
return [dtypes.float64, dtypes.float32]
return optional_float64 + [dtypes.float32]
else:
# It is important that float32 comes before float16 here,
# as we will be using its gradients as reference for fp16 gradients.
return [dtypes.float64, dtypes.float32, dtypes.float16]
return optional_float64 + [dtypes.float32, dtypes.float16]
else:
return [dtypes.float64, dtypes.float32, dtypes.float16]

View File

@ -163,12 +163,15 @@ def GetTestConfigs():
class Conv2DTest(test.TestCase):
def _DtypesToTest(self, use_gpu):
# double datatype is currently not supported for convolution ops
# on the ROCm platform
optional_float64 = [] if test.is_built_with_rocm() else [dtypes.float64]
if use_gpu and not test_util.GpuSupportsHalfMatMulAndConv():
return [dtypes.float32, dtypes.float64]
return [dtypes.float32] + optional_float64
else:
# It is important that float32 comes before float16 here,
# as we will be using its gradients as reference for fp16 gradients.
return [dtypes.float32, dtypes.float16, dtypes.float64]
return [dtypes.float32, dtypes.float16] + optional_float64
def _CreateNumpyTensor(self, shape):
total_size = 1

View File

@ -193,7 +193,10 @@ class DepthwiseConv2DTest(test.TestCase):
tf_logging.info(
"Testing DepthwiseConv2D, %dth config: %r * %r, stride: %d, padding: "
"%s", index, input_size, filter_size, stride, padding)
for data_type in [dtypes.float32, dtypes.float64]:
# double datatype is currently not supported for convolution ops
# on the ROCm platform
optional_float64 = [] if test.is_built_with_rocm() else [dtypes.float64]
for data_type in ([dtypes.float32] + optional_float64):
tf_logging.info("Testing without grouped_conv")
self._VerifyValues(
input_size, filter_size, stride, padding, data_type, use_gpu=True)
@ -231,7 +234,10 @@ class DepthwiseConv2DTest(test.TestCase):
tf_logging.info(
"Testing DepthwiseConv2DFormat, %dth config: %r * %r, stride: %d, "
"padding: %s", index, input_size, filter_size, stride, padding)
for data_type in [dtypes.float32, dtypes.float64]:
# double datatype is currently not supported for convolution ops
# on the ROCm platform
optional_float64 = [] if test.is_built_with_rocm() else [dtypes.float64]
for data_type in ([dtypes.float32] + optional_float64):
self._VerifyValues(
input_size,
filter_size,
@ -439,7 +445,10 @@ class DepthwiseConv2DTest(test.TestCase):
tf_logging.info(
"Testing DepthwiseConv2DInputGrad, %dth config: %r * %r, stride: %d, "
"padding: %s", index, input_size, filter_size, stride, padding)
for data_type in [dtypes.float32, dtypes.float64]:
# double datatype is currently not supported for convolution ops
# on the ROCm platform
optional_float64 = [] if test.is_built_with_rocm() else [dtypes.float64]
for data_type in ([dtypes.float32] + optional_float64):
self._ConstructAndTestGradient(
input_size,
filter_size,
@ -471,7 +480,10 @@ class DepthwiseConv2DTest(test.TestCase):
"Testing DepthwiseConv2DInputGradFormat, %dth config: %r * %r, "
"stride: %d, padding: %s", index, input_size, filter_size, stride,
padding)
for data_type in [dtypes.float32, dtypes.float64]:
# double datatype is currently not supported for convolution ops
# on the ROCm platform
optional_float64 = [] if test.is_built_with_rocm() else [dtypes.float64]
for data_type in ([dtypes.float32] + optional_float64):
self._ConstructAndTestGradient(
input_size,
filter_size,
@ -490,7 +502,10 @@ class DepthwiseConv2DTest(test.TestCase):
tf_logging.info(
"Testing DepthwiseConv2DFilterGrad, %dth config: %r * %r, stride: "
"%d, padding: %s", index, input_size, filter_size, stride, padding)
for data_type in [dtypes.float32, dtypes.float64]:
# double datatype is currently not supported for convolution ops
# on the ROCm platform
optional_float64 = [] if test.is_built_with_rocm() else [dtypes.float64]
for data_type in ([dtypes.float32] + optional_float64):
self._ConstructAndTestGradient(
input_size,
filter_size,
@ -512,7 +527,10 @@ class DepthwiseConv2DTest(test.TestCase):
"Testing DepthwiseConv2DFilterGradFormat, %dth config: %r * %r, "
"stride: %d, padding: %s", index, input_size, filter_size, stride,
padding)
for data_type in [dtypes.float32, dtypes.float64]:
# double datatype is currently not supported for convolution ops
# on the ROCm platform
optional_float64 = [] if test.is_built_with_rocm() else [dtypes.float64]
for data_type in ([dtypes.float32] + optional_float64):
self._ConstructAndTestGradient(
input_size,
filter_size,
@ -573,6 +591,10 @@ class DepthwiseConv2DTest(test.TestCase):
padding)
self._CompareBackpropInputFloat(input_size, filter_size, output_size,
stride, padding)
# double datatype is currently not supported for convolution ops
# on the ROCm platform
if test.is_built_with_rocm():
continue
self._CompareBackpropInputDouble(input_size, filter_size, output_size,
stride, padding)
@ -625,6 +647,10 @@ class DepthwiseConv2DTest(test.TestCase):
padding)
self._CompareBackpropFilterFloat(input_size, filter_size, output_size,
stride, padding)
# double datatype is currently not supported for convolution ops
# on the ROCm platform
if test.is_built_with_rocm():
continue
self._CompareBackpropFilterDouble(input_size, filter_size, output_size,
stride, padding)

View File

@ -206,8 +206,10 @@ class PoolingTest(test.TestCase):
self._VerifyOneType(pool_func, input_sizes, ksize, strides, padding,
data_format, dtypes.float32, expected, use_gpu, v2)
self._VerifyOneType(pool_func, input_sizes, ksize, strides, padding,
data_format, dtypes.float64, expected, use_gpu, v2)
if not test.is_built_with_rocm():
# double datatype is not supported for pooling ops on the ROCm platform
self._VerifyOneType(pool_func, input_sizes, ksize, strides, padding,
data_format, dtypes.float64, expected, use_gpu, v2)
if not use_gpu or test_util.GpuSupportsHalfMatMulAndConv():
self._VerifyOneType(pool_func, input_sizes, ksize, strides, padding,

View File

@ -1264,7 +1264,10 @@ class AvgPoolTest(test_lib.TestCase):
self.assertAllEqual(self.evaluate(y1), self.evaluate(y2))
def test1DNumpy(self):
x = np.ones([3, 6, 5])
# explicilty use float32 for ROCm, as MIOpen does not yet support float64
# np.ones defaults to using float64 when dtype is not explicitly specified
dtype = np.float32 if test_lib.is_built_with_rocm() else np.float64
x = np.ones([3, 6, 5], dtype=dtype)
ksize = 2
strides = 2
@ -1284,7 +1287,10 @@ class AvgPoolTest(test_lib.TestCase):
self.assertAllEqual(self.evaluate(y1), self.evaluate(y2))
def test2DNumpy(self):
x = np.ones([3, 6, 6, 5])
# explicilty use float32 for ROCm, as MIOpen does not yet support float64
# np.ones defaults to using float64 when dtype is not explicitly specified
dtype = np.float32 if test_lib.is_built_with_rocm() else np.float64
x = np.ones([3, 6, 6, 5], dtype=dtype)
ksize = 2
strides = 2
@ -1332,7 +1338,10 @@ class MaxPoolTest(test_lib.TestCase):
self.assertAllEqual(self.evaluate(y1), self.evaluate(y2))
def test1DNumpy(self):
x = np.ones([3, 6, 5])
# explicilty use float32 for ROCm, as MIOpen does not yet support float64
# np.ones defaults to using float64 when dtype is not explicitly specified
dtype = np.float32 if test_lib.is_built_with_rocm() else np.float64
x = np.ones([3, 6, 5], dtype=dtype)
ksize = 2
strides = 2
@ -1352,7 +1361,10 @@ class MaxPoolTest(test_lib.TestCase):
self.assertAllEqual(self.evaluate(y1), self.evaluate(y2))
def test2DNumpy(self):
x = np.ones([3, 6, 6, 5])
# explicilty use float32 for ROCm, as MIOpen does not yet support float64
# np.ones defaults to using float64 when dtype is not explicitly specified
dtype = np.float32 if test_lib.is_built_with_rocm() else np.float64
x = np.ones([3, 6, 6, 5], dtype=dtype)
ksize = 2
strides = 2
@ -1402,8 +1414,11 @@ class MaxPoolTest(test_lib.TestCase):
class ConvolutionTest(test_lib.TestCase):
def testUnknownSize(self):
# explicilty use float32 for ROCm, as MIOpen does not yet support float64
# np.ones defaults to using float64 when dtype is not explicitly specified
dtype = np.float32 if test_lib.is_built_with_rocm() else np.float64
x = tensor_spec.TensorSpec(None, dtypes.float32, name="x")
k = np.ones([3, 6, 6, 5])
k = np.ones([3, 6, 6, 5], dtype=dtype)
@def_function.function
def F(value):