Use subTest to improve error reporting on linear algebra ops.
e.g. cholesky_op_test.py, linalg_ops_test.py, lu_ops_test.py PiperOrigin-RevId: 310654829 Change-Id: I82ba700cf02fb8122a43eabda530c6f60574e372
This commit is contained in:
parent
8efd27dceb
commit
ebd34b3dc1
@ -114,12 +114,14 @@ class CholeskyOpTest(test.TestCase):
|
||||
def testBasic(self):
|
||||
data = np.array([[4., -1., 2.], [-1., 6., 0], [2., 0., 5.]])
|
||||
for dtype in (np.float32, np.float64):
|
||||
self._verifyCholesky(data.astype(dtype))
|
||||
with self.subTest(dtype=dtype):
|
||||
self._verifyCholesky(data.astype(dtype))
|
||||
for dtype in (np.complex64, np.complex128):
|
||||
complex_data = np.tril(1j * data, -1).astype(dtype)
|
||||
complex_data += np.triu(-1j * data, 1).astype(dtype)
|
||||
complex_data += data
|
||||
self._verifyCholesky(complex_data)
|
||||
with self.subTest(dtype=dtype):
|
||||
complex_data = np.tril(1j * data, -1).astype(dtype)
|
||||
complex_data += np.triu(-1j * data, 1).astype(dtype)
|
||||
complex_data += data
|
||||
self._verifyCholesky(complex_data)
|
||||
|
||||
def testBatch(self):
|
||||
simple_array = np.array([[[1., 0.], [0., 5.]]]) # shape (1, 2, 2)
|
||||
@ -131,13 +133,15 @@ class CholeskyOpTest(test.TestCase):
|
||||
# Generate random positive-definite matrices.
|
||||
matrices = np.random.rand(10, 5, 5)
|
||||
for i in xrange(10):
|
||||
matrices[i] = np.dot(matrices[i].T, matrices[i])
|
||||
with self.subTest(i=i):
|
||||
matrices[i] = np.dot(matrices[i].T, matrices[i])
|
||||
self._verifyCholesky(matrices)
|
||||
|
||||
# Generate random complex valued positive-definite matrices.
|
||||
matrices = np.random.rand(10, 5, 5) + 1j * np.random.rand(10, 5, 5)
|
||||
for i in xrange(10):
|
||||
matrices[i] = np.dot(matrices[i].T.conj(), matrices[i])
|
||||
with self.subTest(i=i):
|
||||
matrices[i] = np.dot(matrices[i].T.conj(), matrices[i])
|
||||
self._verifyCholesky(matrices)
|
||||
|
||||
@test_util.run_deprecated_v1
|
||||
|
@ -66,10 +66,11 @@ class CholeskySolveTest(test.TestCase):
|
||||
_RandomPDMatrix(n, self.rng)]).astype(np_type)
|
||||
chol = linalg_ops.cholesky(array)
|
||||
for k in range(1, 3):
|
||||
rhs = self.rng.randn(2, n, k).astype(np_type)
|
||||
x = linalg_ops.cholesky_solve(chol, rhs)
|
||||
self.assertAllClose(
|
||||
rhs, math_ops.matmul(array, x).eval(), atol=atol)
|
||||
with self.subTest(n=n, np_type=np_type, atol=atol, k=k):
|
||||
rhs = self.rng.randn(2, n, k).astype(np_type)
|
||||
x = linalg_ops.cholesky_solve(chol, rhs)
|
||||
self.assertAllClose(
|
||||
rhs, math_ops.matmul(array, x).eval(), atol=atol)
|
||||
|
||||
|
||||
class LogdetTest(test.TestCase):
|
||||
@ -82,24 +83,26 @@ class LogdetTest(test.TestCase):
|
||||
for n in range(1, 6):
|
||||
for np_dtype, atol in [(np.float32, 0.05), (np.float64, 1e-5),
|
||||
(np.complex64, 0.05), (np.complex128, 1e-5)]:
|
||||
matrix = _RandomPDMatrix(n, self.rng, np_dtype)
|
||||
_, logdet_np = np.linalg.slogdet(matrix)
|
||||
with self.session(use_gpu=True):
|
||||
# Create 2 x n x n matrix
|
||||
# matrix = np.array(
|
||||
# [_RandomPDMatrix(n, self.rng, np_dtype),
|
||||
# _RandomPDMatrix(n, self.rng, np_dtype)]).astype(np_dtype)
|
||||
logdet_tf = linalg.logdet(matrix)
|
||||
self.assertAllClose(logdet_np, self.evaluate(logdet_tf), atol=atol)
|
||||
with self.subTest(n=n, np_dtype=np_dtype, atol=atol):
|
||||
matrix = _RandomPDMatrix(n, self.rng, np_dtype)
|
||||
_, logdet_np = np.linalg.slogdet(matrix)
|
||||
with self.session(use_gpu=True):
|
||||
# Create 2 x n x n matrix
|
||||
# matrix = np.array(
|
||||
# [_RandomPDMatrix(n, self.rng, np_dtype),
|
||||
# _RandomPDMatrix(n, self.rng, np_dtype)]).astype(np_dtype)
|
||||
logdet_tf = linalg.logdet(matrix)
|
||||
self.assertAllClose(logdet_np, self.evaluate(logdet_tf), atol=atol)
|
||||
|
||||
def test_works_with_underflow_case(self):
|
||||
for np_dtype, atol in [(np.float32, 0.05), (np.float64, 1e-5),
|
||||
(np.complex64, 0.05), (np.complex128, 1e-5)]:
|
||||
matrix = (np.eye(20) * 1e-6).astype(np_dtype)
|
||||
_, logdet_np = np.linalg.slogdet(matrix)
|
||||
with self.session(use_gpu=True):
|
||||
logdet_tf = linalg.logdet(matrix)
|
||||
self.assertAllClose(logdet_np, self.evaluate(logdet_tf), atol=atol)
|
||||
with self.subTest(np_dtype=np_dtype, atol=atol):
|
||||
matrix = (np.eye(20) * 1e-6).astype(np_dtype)
|
||||
_, logdet_np = np.linalg.slogdet(matrix)
|
||||
with self.session(use_gpu=True):
|
||||
logdet_tf = linalg.logdet(matrix)
|
||||
self.assertAllClose(logdet_np, self.evaluate(logdet_tf), atol=atol)
|
||||
|
||||
|
||||
class SlogdetTest(test.TestCase):
|
||||
@ -112,7 +115,20 @@ class SlogdetTest(test.TestCase):
|
||||
for n in range(1, 6):
|
||||
for np_dtype, atol in [(np.float32, 0.05), (np.float64, 1e-5),
|
||||
(np.complex64, 0.05), (np.complex128, 1e-5)]:
|
||||
matrix = _RandomPDMatrix(n, self.rng, np_dtype)
|
||||
with self.subTest(n=n, np_dtype=np_dtype, atol=atol):
|
||||
matrix = _RandomPDMatrix(n, self.rng, np_dtype)
|
||||
sign_np, log_abs_det_np = np.linalg.slogdet(matrix)
|
||||
with self.session(use_gpu=True):
|
||||
sign_tf, log_abs_det_tf = linalg.slogdet(matrix)
|
||||
self.assertAllClose(
|
||||
log_abs_det_np, self.evaluate(log_abs_det_tf), atol=atol)
|
||||
self.assertAllClose(sign_np, self.evaluate(sign_tf), atol=atol)
|
||||
|
||||
def test_works_with_underflow_case(self):
|
||||
for np_dtype, atol in [(np.float32, 0.05), (np.float64, 1e-5),
|
||||
(np.complex64, 0.05), (np.complex128, 1e-5)]:
|
||||
with self.subTest(np_dtype=np_dtype, atol=atol):
|
||||
matrix = (np.eye(20) * 1e-6).astype(np_dtype)
|
||||
sign_np, log_abs_det_np = np.linalg.slogdet(matrix)
|
||||
with self.session(use_gpu=True):
|
||||
sign_tf, log_abs_det_tf = linalg.slogdet(matrix)
|
||||
@ -120,30 +136,20 @@ class SlogdetTest(test.TestCase):
|
||||
log_abs_det_np, self.evaluate(log_abs_det_tf), atol=atol)
|
||||
self.assertAllClose(sign_np, self.evaluate(sign_tf), atol=atol)
|
||||
|
||||
def test_works_with_underflow_case(self):
|
||||
for np_dtype, atol in [(np.float32, 0.05), (np.float64, 1e-5),
|
||||
(np.complex64, 0.05), (np.complex128, 1e-5)]:
|
||||
matrix = (np.eye(20) * 1e-6).astype(np_dtype)
|
||||
sign_np, log_abs_det_np = np.linalg.slogdet(matrix)
|
||||
with self.session(use_gpu=True):
|
||||
sign_tf, log_abs_det_tf = linalg.slogdet(matrix)
|
||||
self.assertAllClose(
|
||||
log_abs_det_np, self.evaluate(log_abs_det_tf), atol=atol)
|
||||
self.assertAllClose(sign_np, self.evaluate(sign_tf), atol=atol)
|
||||
|
||||
|
||||
class AdjointTest(test.TestCase):
|
||||
|
||||
def test_compare_to_numpy(self):
|
||||
for dtype in np.float64, np.float64, np.complex64, np.complex128:
|
||||
matrix_np = np.array([[1 + 1j, 2 + 2j, 3 + 3j], [4 + 4j, 5 + 5j,
|
||||
6 + 6j]]).astype(dtype)
|
||||
expected_transposed = np.conj(matrix_np.T)
|
||||
with self.session():
|
||||
matrix = ops.convert_to_tensor(matrix_np)
|
||||
transposed = linalg.adjoint(matrix)
|
||||
self.assertEqual((3, 2), transposed.get_shape())
|
||||
self.assertAllEqual(expected_transposed, self.evaluate(transposed))
|
||||
with self.subTest(dtype=dtype):
|
||||
matrix_np = np.array([[1 + 1j, 2 + 2j, 3 + 3j], [4 + 4j, 5 + 5j,
|
||||
6 + 6j]]).astype(dtype)
|
||||
expected_transposed = np.conj(matrix_np.T)
|
||||
with self.session():
|
||||
matrix = ops.convert_to_tensor(matrix_np)
|
||||
transposed = linalg.adjoint(matrix)
|
||||
self.assertEqual((3, 2), transposed.get_shape())
|
||||
self.assertAllEqual(expected_transposed, self.evaluate(transposed))
|
||||
|
||||
|
||||
class EyeTest(parameterized.TestCase, test.TestCase):
|
||||
|
@ -128,14 +128,16 @@ class LuOpTest(test.TestCase):
|
||||
|
||||
for dtype in (np.float32, np.float64):
|
||||
for output_idx_type in (dtypes.int32, dtypes.int64):
|
||||
self._verifyLu(data.astype(dtype), output_idx_type=output_idx_type)
|
||||
with self.subTest(dtype=dtype, output_idx_type=output_idx_type):
|
||||
self._verifyLu(data.astype(dtype), output_idx_type=output_idx_type)
|
||||
|
||||
for dtype in (np.complex64, np.complex128):
|
||||
for output_idx_type in (dtypes.int32, dtypes.int64):
|
||||
complex_data = np.tril(1j * data, -1).astype(dtype)
|
||||
complex_data += np.triu(-1j * data, 1).astype(dtype)
|
||||
complex_data += data
|
||||
self._verifyLu(complex_data, output_idx_type=output_idx_type)
|
||||
with self.subTest(dtype=dtype, output_idx_type=output_idx_type):
|
||||
complex_data = np.tril(1j * data, -1).astype(dtype)
|
||||
complex_data += np.triu(-1j * data, 1).astype(dtype)
|
||||
complex_data += data
|
||||
self._verifyLu(complex_data, output_idx_type=output_idx_type)
|
||||
|
||||
def testPivoting(self):
|
||||
# This matrix triggers partial pivoting because the first diagonal entry
|
||||
@ -144,38 +146,41 @@ class LuOpTest(test.TestCase):
|
||||
self._verifyLu(data.astype(np.float32))
|
||||
|
||||
for dtype in (np.float32, np.float64):
|
||||
self._verifyLu(data.astype(dtype))
|
||||
_, p = linalg_ops.lu(data)
|
||||
p_val = self.evaluate([p])
|
||||
# Make sure p_val is not the identity permutation.
|
||||
self.assertNotAllClose(np.arange(3), p_val)
|
||||
with self.subTest(dtype=dtype):
|
||||
self._verifyLu(data.astype(dtype))
|
||||
_, p = linalg_ops.lu(data)
|
||||
p_val = self.evaluate([p])
|
||||
# Make sure p_val is not the identity permutation.
|
||||
self.assertNotAllClose(np.arange(3), p_val)
|
||||
|
||||
for dtype in (np.complex64, np.complex128):
|
||||
complex_data = np.tril(1j * data, -1).astype(dtype)
|
||||
complex_data += np.triu(-1j * data, 1).astype(dtype)
|
||||
complex_data += data
|
||||
self._verifyLu(complex_data)
|
||||
_, p = linalg_ops.lu(data)
|
||||
p_val = self.evaluate([p])
|
||||
# Make sure p_val is not the identity permutation.
|
||||
self.assertNotAllClose(np.arange(3), p_val)
|
||||
with self.subTest(dtype=dtype):
|
||||
complex_data = np.tril(1j * data, -1).astype(dtype)
|
||||
complex_data += np.triu(-1j * data, 1).astype(dtype)
|
||||
complex_data += data
|
||||
self._verifyLu(complex_data)
|
||||
_, p = linalg_ops.lu(data)
|
||||
p_val = self.evaluate([p])
|
||||
# Make sure p_val is not the identity permutation.
|
||||
self.assertNotAllClose(np.arange(3), p_val)
|
||||
|
||||
def testInvalidMatrix(self):
|
||||
# LU factorization gives an error when the input is singular.
|
||||
# Note: A singular matrix may return without error but it won't be a valid
|
||||
# factorization.
|
||||
for dtype in self.float_types:
|
||||
with self.assertRaises(errors.InvalidArgumentError):
|
||||
self.evaluate(
|
||||
linalg_ops.lu(
|
||||
np.array([[1., 2., 3.], [2., 4., 6.], [2., 3., 4.]],
|
||||
dtype=dtype)))
|
||||
with self.assertRaises(errors.InvalidArgumentError):
|
||||
self.evaluate(
|
||||
linalg_ops.lu(
|
||||
np.array([[[1., 2., 3.], [2., 4., 6.], [1., 2., 3.]],
|
||||
[[1., 2., 3.], [3., 4., 5.], [5., 6., 7.]]],
|
||||
dtype=dtype)))
|
||||
with self.subTest(dtype=dtype):
|
||||
with self.assertRaises(errors.InvalidArgumentError):
|
||||
self.evaluate(
|
||||
linalg_ops.lu(
|
||||
np.array([[1., 2., 3.], [2., 4., 6.], [2., 3., 4.]],
|
||||
dtype=dtype)))
|
||||
with self.assertRaises(errors.InvalidArgumentError):
|
||||
self.evaluate(
|
||||
linalg_ops.lu(
|
||||
np.array([[[1., 2., 3.], [2., 4., 6.], [1., 2., 3.]],
|
||||
[[1., 2., 3.], [3., 4., 5.], [5., 6., 7.]]],
|
||||
dtype=dtype)))
|
||||
|
||||
def testBatch(self):
|
||||
simple_array = np.array([[[1., -1.], [2., 5.]]]) # shape (1, 2, 2)
|
||||
|
Loading…
Reference in New Issue
Block a user