Use subTest to improve error reporting on test failures in cwise_ops_test.py.

PiperOrigin-RevId: 310422205
Change-Id: I67ba3406fbf7df3ceef59ed3bd084d406cef23bd
This commit is contained in:
Andrew Selle 2020-05-07 13:09:51 -07:00 committed by TensorFlower Gardener
parent a65ece1e46
commit 3d0dac26f5

View File

@ -97,6 +97,7 @@ class ComparisonOpTest(test.TestCase):
for t in dtypes: for t in dtypes:
for x in data: for x in data:
for y in data: for y in data:
with self.subTest(t=t, x=x, y=y):
self.assertEqual(self._compareScalar(math_ops.less, x, y, t), x < y) self.assertEqual(self._compareScalar(math_ops.less, x, y, t), x < y)
self.assertEqual( self.assertEqual(
self._compareScalar(math_ops.less_equal, x, y, t), x <= y) self._compareScalar(math_ops.less_equal, x, y, t), x <= y)
@ -104,14 +105,17 @@ class ComparisonOpTest(test.TestCase):
self._compareScalar(math_ops.greater, x, y, t), x > y) self._compareScalar(math_ops.greater, x, y, t), x > y)
self.assertEqual( self.assertEqual(
self._compareScalar(math_ops.greater_equal, x, y, t), x >= y) self._compareScalar(math_ops.greater_equal, x, y, t), x >= y)
self.assertEqual(self._compareScalar(math_ops.equal, x, y, t), x == y) self.assertEqual(
self._compareScalar(math_ops.equal, x, y, t), x == y)
self.assertEqual( self.assertEqual(
self._compareScalar(math_ops.not_equal, x, y, t), x != y) self._compareScalar(math_ops.not_equal, x, y, t), x != y)
data = [-1, 0, 1, -1j, 1j, 1 + 1j, 1 - 1j] data = [-1, 0, 1, -1j, 1j, 1 + 1j, 1 - 1j]
for t in [np.complex64, np.complex128]: for t in [np.complex64, np.complex128]:
for x in data: for x in data:
for y in data: for y in data:
self.assertEqual(self._compareScalar(math_ops.equal, x, y, t), x == y) with self.subTest(t=t, x=x, y=y):
self.assertEqual(
self._compareScalar(math_ops.equal, x, y, t), x == y)
self.assertEqual( self.assertEqual(
self._compareScalar(math_ops.not_equal, x, y, t), x != y) self._compareScalar(math_ops.not_equal, x, y, t), x != y)
@ -126,6 +130,7 @@ class ComparisonOpTest(test.TestCase):
x = np.linspace(-15, 15, 6).reshape(1, 3, 2) x = np.linspace(-15, 15, 6).reshape(1, 3, 2)
y = np.linspace(20, -10, 6).reshape(1, 3, 2) y = np.linspace(20, -10, 6).reshape(1, 3, 2)
for t in [np.float16, np.float32, np.float64, np.int32, np.int64]: for t in [np.float16, np.float32, np.float64, np.int32, np.int64]:
with self.subTest(t=t):
xt = x.astype(t) xt = x.astype(t)
yt = y.astype(t) yt = y.astype(t)
self._compare(xt, yt, np.less, math_ops.less) self._compare(xt, yt, np.less, math_ops.less)
@ -136,6 +141,7 @@ class ComparisonOpTest(test.TestCase):
self._compare(xt, yt, np.not_equal, math_ops.not_equal) self._compare(xt, yt, np.not_equal, math_ops.not_equal)
# Complex types do not support ordering but do support equality tests. # Complex types do not support ordering but do support equality tests.
for t in [np.complex64, np.complex128]: for t in [np.complex64, np.complex128]:
with self.subTest(t=t):
xt = x.astype(t) xt = x.astype(t)
xt -= 1j * xt xt -= 1j * xt
yt = y.astype(t) yt = y.astype(t)
@ -178,6 +184,7 @@ class ComparisonOpTest(test.TestCase):
for (xs, ys) in shapes: for (xs, ys) in shapes:
for dtype in dtypes: for dtype in dtypes:
with self.subTest(xs=xs, ys=ys, dtype=dtype):
self._compareBCast(xs, ys, dtype, np_func, tf_func) self._compareBCast(xs, ys, dtype, np_func, tf_func)
def testBCastLess(self): def testBCastLess(self):
@ -209,6 +216,7 @@ class ComparisonOpTest(test.TestCase):
y = np.arange(0, 10).reshape([5, 2]) y = np.arange(0, 10).reshape([5, 2])
for t in dtypes: for t in dtypes:
for f in funcs: for f in funcs:
with self.subTest(t=t, f=f):
with self.assertRaisesRegexp( with self.assertRaisesRegexp(
(ValueError, errors.InvalidArgumentError), (ValueError, errors.InvalidArgumentError),
"Incompatible shapes|Dimensions must be equal"): "Incompatible shapes|Dimensions must be equal"):
@ -241,12 +249,15 @@ class LogicalOpTest(test.TestCase):
data = [np.array([True]), np.array([False])] data = [np.array([True]), np.array([False])]
for use_gpu in [True, False]: for use_gpu in [True, False]:
for x in data: for x in data:
with self.subTest(use_gpu=use_gpu, x=x):
self._not(x, use_gpu) self._not(x, use_gpu)
for x in data: for x in data:
for y in data: for y in data:
with self.subTest(use_gpu=use_gpu, x=x, y=y):
self._compareBinary(x, y, np.logical_and, math_ops.logical_and, self._compareBinary(x, y, np.logical_and, math_ops.logical_and,
use_gpu) use_gpu)
self._compareBinary(x, y, np.logical_or, math_ops.logical_or, use_gpu) self._compareBinary(x, y, np.logical_or, math_ops.logical_or,
use_gpu)
self._compareBinary(x, y, np.logical_xor, math_ops.logical_xor, self._compareBinary(x, y, np.logical_xor, math_ops.logical_xor,
use_gpu) use_gpu)
@ -254,6 +265,7 @@ class LogicalOpTest(test.TestCase):
x = np.random.randint(0, 2, 6).astype(np.bool).reshape(1, 3, 2) x = np.random.randint(0, 2, 6).astype(np.bool).reshape(1, 3, 2)
y = np.random.randint(0, 2, 6).astype(np.bool).reshape(1, 3, 2) y = np.random.randint(0, 2, 6).astype(np.bool).reshape(1, 3, 2)
for use_gpu in [True, False]: for use_gpu in [True, False]:
with self.subTest(use_gpu=use_gpu):
self._not(x, use_gpu) self._not(x, use_gpu)
self._compareBinary(x, y, np.logical_and, math_ops.logical_and, use_gpu) self._compareBinary(x, y, np.logical_and, math_ops.logical_and, use_gpu)
self._compareBinary(x, y, np.logical_or, math_ops.logical_or, use_gpu) self._compareBinary(x, y, np.logical_or, math_ops.logical_or, use_gpu)
@ -277,15 +289,19 @@ class LogicalOpTest(test.TestCase):
x = np.random.randint(0, 2, np.prod(xs)).astype(np.bool).reshape(xs) x = np.random.randint(0, 2, np.prod(xs)).astype(np.bool).reshape(xs)
y = np.random.randint(0, 2, np.prod(ys)).astype(np.bool).reshape(ys) y = np.random.randint(0, 2, np.prod(ys)).astype(np.bool).reshape(ys)
for use_gpu in [True, False]: for use_gpu in [True, False]:
self._compareBinary(x, y, np.logical_and, math_ops.logical_and, use_gpu) with self.subTest(xs=xs, ys=ys, use_gpu=use_gpu):
self._compareBinary(x, y, np.logical_and, math_ops.logical_and,
use_gpu)
self._compareBinary(x, y, np.logical_or, math_ops.logical_or, use_gpu) self._compareBinary(x, y, np.logical_or, math_ops.logical_or, use_gpu)
self._compareBinary(x, y, np.logical_xor, math_ops.logical_xor, use_gpu) self._compareBinary(x, y, np.logical_xor, math_ops.logical_xor,
use_gpu)
@test_util.run_deprecated_v1 @test_util.run_deprecated_v1
def testShapeMismatch(self): def testShapeMismatch(self):
x = np.random.randint(0, 2, 6).astype(np.bool).reshape(1, 3, 2) x = np.random.randint(0, 2, 6).astype(np.bool).reshape(1, 3, 2)
y = np.random.randint(0, 2, 6).astype(np.bool).reshape(3, 2, 1) y = np.random.randint(0, 2, 6).astype(np.bool).reshape(3, 2, 1)
for f in [math_ops.logical_and, math_ops.logical_or, math_ops.logical_xor]: for f in [math_ops.logical_and, math_ops.logical_or, math_ops.logical_xor]:
with self.subTest(f=f):
with self.assertRaisesWithPredicateMatch( with self.assertRaisesWithPredicateMatch(
ValueError, lambda e: "Dimensions must" in str(e)): ValueError, lambda e: "Dimensions must" in str(e)):
f(x, y) f(x, y)
@ -389,6 +405,7 @@ class SelectOpTest(test.TestCase):
np.float16, np.float32, np.float64, np.int32, np.int64, np.complex64, np.float16, np.float32, np.float64, np.int32, np.int64, np.complex64,
np.complex128 np.complex128
]: ]:
with self.subTest(t=t):
xt = x.astype(t) xt = x.astype(t)
yt = y.astype(t) yt = y.astype(t)
self._compare(fn, c, xt, yt, use_gpu=False) self._compare(fn, c, xt, yt, use_gpu=False)
@ -404,6 +421,7 @@ class SelectOpTest(test.TestCase):
np.float16, np.float32, np.float64, np.int32, np.int64, np.complex64, np.float16, np.float32, np.float64, np.int32, np.int64, np.complex64,
np.complex128 np.complex128
]: ]:
with self.subTest(t=t):
xt = x.astype(t) xt = x.astype(t)
yt = y.astype(t) yt = y.astype(t)
self._compare(fn, c, xt, yt, use_gpu=False) self._compare(fn, c, xt, yt, use_gpu=False)
@ -450,6 +468,7 @@ class SelectOpTest(test.TestCase):
np.float16, np.float32, np.float64, np.int32, np.int64, np.complex64, np.float16, np.float32, np.float64, np.int32, np.int64, np.complex64,
np.complex128 np.complex128
]: ]:
with self.subTest(t=t):
xt = x.astype(t) xt = x.astype(t)
yt = y.astype(t) yt = y.astype(t)
self._compare(fn, c, xt, yt, use_gpu=False) self._compare(fn, c, xt, yt, use_gpu=False)
@ -465,6 +484,7 @@ class SelectOpTest(test.TestCase):
np.float16, np.float32, np.float64, np.int32, np.int64, np.complex64, np.float16, np.float32, np.float64, np.int32, np.int64, np.complex64,
np.complex128 np.complex128
]: ]:
with self.subTest(t=t):
xt = x.astype(t) xt = x.astype(t)
yt = y.astype(t) yt = y.astype(t)
self._compare(fn, c, xt, yt, use_gpu=False) self._compare(fn, c, xt, yt, use_gpu=False)
@ -478,6 +498,7 @@ class SelectOpTest(test.TestCase):
c3 = np.random.randint(0, 2, 1).astype(np.bool).reshape(1, 1, 1) c3 = np.random.randint(0, 2, 1).astype(np.bool).reshape(1, 1, 1)
for c in [c0, c1, c2, c3]: for c in [c0, c1, c2, c3]:
# where_v2 only # where_v2 only
with self.subTest(c=c):
x = np.random.rand(1, 3, 2) * 100 x = np.random.rand(1, 3, 2) * 100
y = np.random.rand(1, 1, 1) * 100 y = np.random.rand(1, 1, 1) * 100
self._testBasicBroadcast(array_ops.where_v2, c, x, y) self._testBasicBroadcast(array_ops.where_v2, c, x, y)
@ -512,6 +533,7 @@ class SelectOpTest(test.TestCase):
x = np.random.rand(1, 3, 2) * 100 x = np.random.rand(1, 3, 2) * 100
y = np.random.rand(1, 3, 2) * 100 y = np.random.rand(1, 3, 2) * 100
for t in [np.float16, np.float32, np.float64]: for t in [np.float16, np.float32, np.float64]:
with self.subTest(t=t):
xt = x.astype(t) xt = x.astype(t)
yt = y.astype(t) yt = y.astype(t)
if t == np.float16: if t == np.float16:
@ -536,6 +558,7 @@ class SelectOpTest(test.TestCase):
c = np.random.randint(0, 2, 6).astype(np.bool).reshape(1, 3, 2) c = np.random.randint(0, 2, 6).astype(np.bool).reshape(1, 3, 2)
for t in [np.float32, np.float64]: for t in [np.float32, np.float64]:
# where_v2 only # where_v2 only
with self.subTest(t=t):
x = np.random.rand(1, 3, 2) * 100 x = np.random.rand(1, 3, 2) * 100
y = np.random.rand(1, 1, 1) * 100 y = np.random.rand(1, 1, 1) * 100
self._compareGradientX(array_ops.where_v2, c, x.astype(t), y.astype(t)) self._compareGradientX(array_ops.where_v2, c, x.astype(t), y.astype(t))
@ -566,6 +589,7 @@ class SelectOpTest(test.TestCase):
np.float16, np.float32, np.float64, np.int32, np.int64, np.complex64, np.float16, np.float32, np.float64, np.int32, np.int64, np.complex64,
np.complex128 np.complex128
]: ]:
with self.subTest(t=t):
xt = x.astype(t) xt = x.astype(t)
yt = y.astype(t) yt = y.astype(t)
with self.assertRaises(ValueError): with self.assertRaises(ValueError):
@ -597,6 +621,7 @@ class SelectOpTest(test.TestCase):
for c in False, True: for c in False, True:
for a in 7.0, np.nan: for a in 7.0, np.nan:
for b in 5.0, np.nan: for b in 5.0, np.nan:
with self.subTest(c=c, a=a, b=b):
x = fn(c, a, b).eval() x = fn(c, a, b).eval()
y = a if c else b y = a if c else b
self.assertEqual(np.isnan(x), np.isnan(y)) self.assertEqual(np.isnan(x), np.isnan(y))
@ -677,6 +702,7 @@ class BatchSelectOpTest(test.TestCase):
np.float16, np.float32, np.float64, np.int32, np.int64, np.complex64, np.float16, np.float32, np.float64, np.int32, np.int64, np.complex64,
np.complex128 np.complex128
]: ]:
with self.subTest(t=t):
xt = x.astype(t) xt = x.astype(t)
yt = y.astype(t) yt = y.astype(t)
self._compare(c, xt, yt, use_gpu=False) self._compare(c, xt, yt, use_gpu=False)
@ -689,6 +715,7 @@ class BatchSelectOpTest(test.TestCase):
x = np.random.rand(16, 2, 8) * 100 x = np.random.rand(16, 2, 8) * 100
y = np.random.rand(16, 2, 8) * 100 y = np.random.rand(16, 2, 8) * 100
for t in [np.float16, np.float32, np.float64]: for t in [np.float16, np.float32, np.float64]:
with self.subTest(t=t):
xt = x.astype(t) xt = x.astype(t)
yt = y.astype(t) yt = y.astype(t)
if t == np.float16: if t == np.float16:
@ -712,6 +739,7 @@ class BatchSelectOpTest(test.TestCase):
np.float16, np.float32, np.float64, np.int32, np.int64, np.complex64, np.float16, np.float32, np.float64, np.int32, np.int64, np.complex64,
np.complex128 np.complex128
]: ]:
with self.subTest(t=t):
xt = x.astype(t) xt = x.astype(t)
yt = y.astype(t) yt = y.astype(t)
with self.assertRaises(ValueError): with self.assertRaises(ValueError):
@ -735,6 +763,7 @@ class MinMaxOpTest(test.TestCase):
y = np.random.rand(1, 3, 2) * 100. y = np.random.rand(1, 3, 2) * 100.
for t in [np.float16, np.float32, np.float64, np.uint8, np.int16, np.int32, for t in [np.float16, np.float32, np.float64, np.uint8, np.int16, np.int32,
np.int64]: np.int64]:
with self.subTest(t=t):
self._compare(x.astype(t), y.astype(t), use_gpu=False) self._compare(x.astype(t), y.astype(t), use_gpu=False)
self._compare(x.astype(t), y.astype(t), use_gpu=True) self._compare(x.astype(t), y.astype(t), use_gpu=True)
@ -742,6 +771,7 @@ class MinMaxOpTest(test.TestCase):
x = np.random.rand(1, 3, 2) * 100. x = np.random.rand(1, 3, 2) * 100.
y = np.random.rand(2) * 100. # should broadcast y = np.random.rand(2) * 100. # should broadcast
for t in [np.float16, np.float32, np.float64, np.int32, np.int64]: for t in [np.float16, np.float32, np.float64, np.int32, np.int64]:
with self.subTest(t=t):
self._compare(x.astype(t), y.astype(t), use_gpu=False) self._compare(x.astype(t), y.astype(t), use_gpu=False)
self._compare(x.astype(t), y.astype(t), use_gpu=True) self._compare(x.astype(t), y.astype(t), use_gpu=True)
@ -750,6 +780,7 @@ class MinMaxOpTest(test.TestCase):
y = np.random.rand(1).item() * 100. # should broadcast y = np.random.rand(1).item() * 100. # should broadcast
# dropped np.float64, int64 because TF automatically converts to 32 bit # dropped np.float64, int64 because TF automatically converts to 32 bit
for t in [np.float32, np.int32]: for t in [np.float32, np.int32]:
with self.subTest(t=t):
self._compare(x.astype(t), t(y), use_gpu=False) self._compare(x.astype(t), t(y), use_gpu=False)
self._compare(x.astype(t), t(y), use_gpu=True) self._compare(x.astype(t), t(y), use_gpu=True)
@ -841,12 +872,14 @@ class MathOpsOverloadTest(test.TestCase):
] ]
for dtype in dtypes: for dtype in dtypes:
for np_func, tf_func in funcs: for np_func, tf_func in funcs:
with self.subTest(dtype=dtype, np_func=np_func, tf_func=tf_func):
if dtype in (dtypes_lib.complex64, if dtype in (dtypes_lib.complex64,
dtypes_lib.complex128) and tf_func == _FLOORDIV: dtypes_lib.complex128) and tf_func == _FLOORDIV:
continue # floordiv makes no sense for complex continue # floordiv makes no sense for complex
self._compareBinary(10, 5, dtype, np_func, tf_func) self._compareBinary(10, 5, dtype, np_func, tf_func)
# Mod only works for int32 and int64. # Mod only works for int32 and int64.
for dtype in [dtypes_lib.int32, dtypes_lib.int64]: for dtype in [dtypes_lib.int32, dtypes_lib.int64]:
with self.subTest(dtype=dtype):
self._compareBinary(10, 3, dtype, np.mod, _MOD) self._compareBinary(10, 3, dtype, np.mod, _MOD)
def testOverloadComparisons(self): def testOverloadComparisons(self):
@ -865,18 +898,20 @@ class MathOpsOverloadTest(test.TestCase):
] ]
for dtype in dtypes: for dtype in dtypes:
for np_func, tf_func in funcs: for np_func, tf_func in funcs:
with self.subTest(dtype=dtype, np_func=np_func, tf_func=tf_func):
self._compareBinary(10, 5, dtype, np_func, tf_func) self._compareBinary(10, 5, dtype, np_func, tf_func)
logical_funcs = [(np.logical_and, _AND), (np.logical_or, _OR), logical_funcs = [(np.logical_and, _AND), (np.logical_or, _OR),
(np.logical_xor, _XOR), (np.equal, math_ops.equal), (np.logical_xor, _XOR), (np.equal, math_ops.equal),
(np.not_equal, math_ops.not_equal)] (np.not_equal, math_ops.not_equal)]
for np_func, tf_func in logical_funcs: for np_func, tf_func in logical_funcs:
with self.subTest(np_func=np_func, tf_func=tf_func):
self._compareBinary(True, False, dtypes_lib.bool, np_func, tf_func) self._compareBinary(True, False, dtypes_lib.bool, np_func, tf_func)
self._compareBinary(True, True, dtypes_lib.bool, np_func, tf_func) self._compareBinary(True, True, dtypes_lib.bool, np_func, tf_func)
self._compareBinary(False, False, dtypes_lib.bool, np_func, tf_func) self._compareBinary(False, False, dtypes_lib.bool, np_func, tf_func)
self._compareBinary(False, True, dtypes_lib.bool, np_func, tf_func) self._compareBinary(False, True, dtypes_lib.bool, np_func, tf_func)
self._compareBinary([True, True, False, False], self._compareBinary([True, True, False, False],
[True, False, True, False], dtypes_lib.bool, np_func, [True, False, True, False], dtypes_lib.bool,
tf_func) np_func, tf_func)
self._compareUnary(True, dtypes_lib.bool, np.logical_not, _INV) self._compareUnary(True, dtypes_lib.bool, np.logical_not, _INV)
self._compareUnary(False, dtypes_lib.bool, np.logical_not, _INV) self._compareUnary(False, dtypes_lib.bool, np.logical_not, _INV)
self._compareUnary([True, False], dtypes_lib.bool, np.logical_not, _INV) self._compareUnary([True, False], dtypes_lib.bool, np.logical_not, _INV)
@ -924,6 +959,7 @@ class IsFiniteInfNanTest(test.TestCase):
# It is not accurate for very large arguments, so we test for # It is not accurate for very large arguments, so we test for
# fi.max/100 instead of fi.max here. # fi.max/100 instead of fi.max here.
for value in [fi.min, -2, -1, 0, fi.tiny, 1, 2, 1000, fi.max / 100]: for value in [fi.min, -2, -1, 0, fi.tiny, 1, 2, 1000, fi.max / 100]:
with self.subTest(dtype=dtype, size=size, value=value):
x = np.full((size,), value, dtype=dtype) x = np.full((size,), value, dtype=dtype)
np_y = np.sqrt(x) np_y = np.sqrt(x)
np_nan = np.isnan(np_y) np_nan = np.isnan(np_y)
@ -978,6 +1014,7 @@ class RoundingTest(test.TestCase):
def testTypes(self): def testTypes(self):
self.skipTest("b/131162241") self.skipTest("b/131162241")
for dtype in [np.float16, np.float32, np.float64]: for dtype in [np.float16, np.float32, np.float64]:
with self.subTest(dtype=dtype):
self._testDtype(dtype) self._testDtype(dtype)
@ -999,6 +1036,7 @@ class ComplexMakeRealImagTest(test.TestCase):
real = (np.arange(-3, 3) / 4.).reshape([1, 3, 2]).astype(np.float32) real = (np.arange(-3, 3) / 4.).reshape([1, 3, 2]).astype(np.float32)
imag = (np.arange(-3, 3) / 5.).reshape([1, 3, 2]).astype(np.float32) imag = (np.arange(-3, 3) / 5.).reshape([1, 3, 2]).astype(np.float32)
for use_gpu in [False, True]: for use_gpu in [False, True]:
with self.subTest(use_gpu=use_gpu):
self._compareMake(real, imag, use_gpu) self._compareMake(real, imag, use_gpu)
self._compareMake(real, 12.0, use_gpu) self._compareMake(real, 12.0, use_gpu)
self._compareMake(23.0, imag, use_gpu) self._compareMake(23.0, imag, use_gpu)
@ -1006,6 +1044,7 @@ class ComplexMakeRealImagTest(test.TestCase):
def testRealImagNumericType(self): def testRealImagNumericType(self):
for use_gpu in [True, False]: for use_gpu in [True, False]:
for value in [1., 1j, 1. + 1j]: for value in [1., 1j, 1. + 1j]:
with self.subTest(use_gpu=use_gpu, value=value):
np_real, np_imag = np.real(value), np.imag(value) np_real, np_imag = np.real(value), np.imag(value)
with test_util.device(use_gpu=use_gpu): with test_util.device(use_gpu=use_gpu):
tf_real = math_ops.real(value) tf_real = math_ops.real(value)
@ -1079,6 +1118,7 @@ class ComplexMakeRealImagTest(test.TestCase):
def testRealReal(self): def testRealReal(self):
for dtype in (dtypes_lib.int32, dtypes_lib.int64, dtypes_lib.float32, for dtype in (dtypes_lib.int32, dtypes_lib.int64, dtypes_lib.float32,
dtypes_lib.float64): dtypes_lib.float64):
with self.subTest(dtype=dtype):
x = array_ops.placeholder(dtype) x = array_ops.placeholder(dtype)
y = math_ops.real(x) y = math_ops.real(x)
self.assertEqual(x, y) self.assertEqual(x, y)
@ -1110,6 +1150,7 @@ class ComplexMakeRealImagTest(test.TestCase):
def testConjReal(self): def testConjReal(self):
for dtype in (dtypes_lib.int32, dtypes_lib.int64, dtypes_lib.float16, for dtype in (dtypes_lib.int32, dtypes_lib.int64, dtypes_lib.float16,
dtypes_lib.float32, dtypes_lib.float64): dtypes_lib.float32, dtypes_lib.float64):
with self.subTest(dtype=dtype):
x = array_ops.placeholder(dtype) x = array_ops.placeholder(dtype)
y = math_ops.conj(x) y = math_ops.conj(x)
self.assertEqual(x, y) self.assertEqual(x, y)
@ -1146,6 +1187,7 @@ class ComplexMakeRealImagTest(test.TestCase):
epsilon = 1e-3 epsilon = 1e-3
with self.cached_session(): with self.cached_session():
for args in [(x_, 0.), (0., x_)]: for args in [(x_, 0.), (0., x_)]:
with self.subTest(args=args):
z = math_ops.reduce_sum(math_ops.abs(math_ops.complex(*args))) z = math_ops.reduce_sum(math_ops.abs(math_ops.complex(*args)))
jacob_t, jacob_n = gradient_checker.compute_gradient( jacob_t, jacob_n = gradient_checker.compute_gradient(
x_, list(x.shape), z, [1], x_init_value=x, delta=epsilon) x_, list(x.shape), z, [1], x_init_value=x, delta=epsilon)
@ -1208,6 +1250,7 @@ class PolyvalTest(test.TestCase):
np.int32, np.float32, np.float64, np.complex64, np.complex128 np.int32, np.float32, np.float64, np.complex64, np.complex128
]: ]:
for degree in range(5): for degree in range(5):
with self.subTest(dtype=dtype, degree=degree):
self._runtest(dtype, degree) self._runtest(dtype, degree)
def testBroadcast(self): def testBroadcast(self):
@ -1216,6 +1259,7 @@ class PolyvalTest(test.TestCase):
shapes = [(1,), (2, 1), (1, 2), (2, 2)] shapes = [(1,), (2, 1), (1, 2), (2, 2)]
for x_shape in shapes: for x_shape in shapes:
for coeff_shape in shapes: for coeff_shape in shapes:
with self.subTest(x_shape=x_shape, coeff_shape=coeff_shape):
x = np.random.rand(*x_shape).astype(dtype) x = np.random.rand(*x_shape).astype(dtype)
coeffs = [ coeffs = [
np.random.rand(*coeff_shape).astype(dtype) np.random.rand(*coeff_shape).astype(dtype)