Improve testing of the stack operation.
- add axis to V1 simple test. - use proper random numbers for Boolean - test more than 1D tensors against numpy PiperOrigin-RevId: 308368279 Change-Id: I4cec2a05e970dda15aead4ae0d00d46a981e523e
This commit is contained in:
parent
5712d2cac6
commit
4440c68d02
@ -42,18 +42,26 @@ def np_split_squeeze(array, axis):
|
|||||||
|
|
||||||
class StackOpTest(test.TestCase):
|
class StackOpTest(test.TestCase):
|
||||||
|
|
||||||
|
def randn(self, shape, dtype):
|
||||||
|
data = np.random.randn(*shape)
|
||||||
|
if dtype == np.bool:
|
||||||
|
return data < 0 # Naive casting yields True with P(1)!
|
||||||
|
else:
|
||||||
|
return data.astype(dtype)
|
||||||
|
|
||||||
@test_util.run_deprecated_v1
|
@test_util.run_deprecated_v1
|
||||||
def testSimple(self):
|
def testSimple(self):
|
||||||
np.random.seed(7)
|
np.random.seed(7)
|
||||||
with self.session(use_gpu=True):
|
with self.session(use_gpu=True):
|
||||||
for shape in (2,), (3,), (2, 3), (3, 2), (4, 3, 2):
|
for shape in (2,), (3,), (2, 3), (3, 2), (8, 2, 10):
|
||||||
|
rank = len(shape)
|
||||||
|
for axis in range(-rank, rank):
|
||||||
for dtype in [np.bool, np.float32, np.int32, np.int64]:
|
for dtype in [np.bool, np.float32, np.int32, np.int64]:
|
||||||
data = np.random.randn(*shape).astype(dtype)
|
data = self.randn(shape, dtype)
|
||||||
# Convert [data[0], data[1], ...] separately to tensorflow
|
xs = np_split_squeeze(data, axis)
|
||||||
# TODO(irving): Remove list() once we handle maps correctly
|
|
||||||
xs = list(map(constant_op.constant, data))
|
|
||||||
# Stack back into a single tensorflow tensor
|
# Stack back into a single tensorflow tensor
|
||||||
c = array_ops.stack(xs)
|
with self.subTest(shape=shape, axis=axis, dtype=dtype):
|
||||||
|
c = array_ops.stack(xs, axis=axis)
|
||||||
self.assertAllEqual(c.eval(), data)
|
self.assertAllEqual(c.eval(), data)
|
||||||
|
|
||||||
@test_util.run_deprecated_v1
|
@test_util.run_deprecated_v1
|
||||||
@ -61,7 +69,8 @@ class StackOpTest(test.TestCase):
|
|||||||
np.random.seed(7)
|
np.random.seed(7)
|
||||||
with self.session(use_gpu=False):
|
with self.session(use_gpu=False):
|
||||||
for shape in (2,), (3,), (2, 3), (3, 2), (4, 3, 2), (100, 24, 24, 3):
|
for shape in (2,), (3,), (2, 3), (3, 2), (4, 3, 2), (100, 24, 24, 3):
|
||||||
data = np.random.randn(*shape).astype(np.float32)
|
with self.subTest(shape=shape):
|
||||||
|
data = self.randn(shape, np.float32)
|
||||||
xs = list(map(constant_op.constant, data))
|
xs = list(map(constant_op.constant, data))
|
||||||
c = array_ops.parallel_stack(xs)
|
c = array_ops.parallel_stack(xs)
|
||||||
self.assertAllEqual(c.eval(), data)
|
self.assertAllEqual(c.eval(), data)
|
||||||
@ -71,7 +80,8 @@ class StackOpTest(test.TestCase):
|
|||||||
np.random.seed(7)
|
np.random.seed(7)
|
||||||
with self.session(use_gpu=True):
|
with self.session(use_gpu=True):
|
||||||
for shape in (2,), (3,), (2, 3), (3, 2), (4, 3, 2), (100, 24, 24, 3):
|
for shape in (2,), (3,), (2, 3), (3, 2), (4, 3, 2), (100, 24, 24, 3):
|
||||||
data = np.random.randn(*shape).astype(np.float32)
|
with self.subTest(shape=shape):
|
||||||
|
data = self.randn(shape, np.float32)
|
||||||
xs = list(map(constant_op.constant, data))
|
xs = list(map(constant_op.constant, data))
|
||||||
c = array_ops.parallel_stack(xs)
|
c = array_ops.parallel_stack(xs)
|
||||||
self.assertAllEqual(c.eval(), data)
|
self.assertAllEqual(c.eval(), data)
|
||||||
@ -80,9 +90,16 @@ class StackOpTest(test.TestCase):
|
|||||||
def testConst(self):
|
def testConst(self):
|
||||||
np.random.seed(7)
|
np.random.seed(7)
|
||||||
with self.session(use_gpu=True):
|
with self.session(use_gpu=True):
|
||||||
for shape in (2,), (3,), (2, 3), (3, 2), (4, 3, 2):
|
# Verify that shape induction works with shapes produced via const stack
|
||||||
|
a = constant_op.constant([1, 2, 3, 4, 5, 6])
|
||||||
|
b = array_ops.reshape(a, array_ops.stack([2, 3]))
|
||||||
|
self.assertAllEqual(b.get_shape(), [2, 3])
|
||||||
|
|
||||||
|
# Check on a variety of shapes and types
|
||||||
|
for shape in (2,), (3,), (2, 3), (3, 2), (4, 3, 2), (8, 2, 10):
|
||||||
for dtype in [np.bool, np.float32, np.int16, np.int32, np.int64]:
|
for dtype in [np.bool, np.float32, np.int16, np.int32, np.int64]:
|
||||||
data = np.random.randn(*shape).astype(dtype)
|
with self.subTest(shape=shape, dtype=dtype):
|
||||||
|
data = self.randn(shape, dtype)
|
||||||
# Stack back into a single tensorflow tensor directly using np array
|
# Stack back into a single tensorflow tensor directly using np array
|
||||||
c = array_ops.stack(data)
|
c = array_ops.stack(data)
|
||||||
# This is implemented via a Const:
|
# This is implemented via a Const:
|
||||||
@ -96,23 +113,19 @@ class StackOpTest(test.TestCase):
|
|||||||
self.assertEqual(cl.op.type, "Const")
|
self.assertEqual(cl.op.type, "Const")
|
||||||
self.assertAllEqual(cl.eval(), data)
|
self.assertAllEqual(cl.eval(), data)
|
||||||
|
|
||||||
# Verify that shape induction works with shapes produced via const stack
|
|
||||||
a = constant_op.constant([1, 2, 3, 4, 5, 6])
|
|
||||||
b = array_ops.reshape(a, array_ops.stack([2, 3]))
|
|
||||||
self.assertAllEqual(b.get_shape(), [2, 3])
|
|
||||||
|
|
||||||
@test_util.run_deprecated_v1
|
@test_util.run_deprecated_v1
|
||||||
def testConstParallelCPU(self):
|
def testConstParallelCPU(self):
|
||||||
np.random.seed(7)
|
np.random.seed(7)
|
||||||
with self.session(use_gpu=False):
|
with self.session(use_gpu=False):
|
||||||
for shape in (2,), (3,), (2, 3), (3, 2), (4, 3, 2):
|
for shape in (2,), (3,), (2, 3), (3, 2), (4, 3, 2), (8, 2, 10):
|
||||||
data = np.random.randn(*shape).astype(np.float32)
|
with self.subTest(shape=shape):
|
||||||
|
data = self.randn(shape, np.float32)
|
||||||
if len(shape) == 1:
|
if len(shape) == 1:
|
||||||
data_list = list(data)
|
data_list = list(data)
|
||||||
cl = array_ops.parallel_stack(data_list)
|
cl = array_ops.parallel_stack(data_list)
|
||||||
self.assertAllEqual(cl.eval(), data)
|
self.assertAllEqual(cl.eval(), data)
|
||||||
|
|
||||||
data = np.random.randn(*shape).astype(np.float32)
|
data = self.randn(shape, np.float32)
|
||||||
c = array_ops.parallel_stack(data)
|
c = array_ops.parallel_stack(data)
|
||||||
self.assertAllEqual(c.eval(), data)
|
self.assertAllEqual(c.eval(), data)
|
||||||
|
|
||||||
@ -121,22 +134,24 @@ class StackOpTest(test.TestCase):
|
|||||||
np.random.seed(7)
|
np.random.seed(7)
|
||||||
with self.session(use_gpu=True):
|
with self.session(use_gpu=True):
|
||||||
for shape in (2,), (3,), (2, 3), (3, 2), (4, 3, 2):
|
for shape in (2,), (3,), (2, 3), (3, 2), (4, 3, 2):
|
||||||
data = np.random.randn(*shape).astype(np.float32)
|
with self.subTest(shape=shape):
|
||||||
|
data = self.randn(shape, np.float32)
|
||||||
if len(shape) == 1:
|
if len(shape) == 1:
|
||||||
data_list = list(data)
|
data_list = list(data)
|
||||||
cl = array_ops.parallel_stack(data_list)
|
cl = array_ops.parallel_stack(data_list)
|
||||||
self.assertAllEqual(cl.eval(), data)
|
self.assertAllEqual(cl.eval(), data)
|
||||||
|
|
||||||
data = np.random.randn(*shape).astype(np.float32)
|
data = self.randn(shape, np.float32)
|
||||||
c = array_ops.parallel_stack(data)
|
c = array_ops.parallel_stack(data)
|
||||||
self.assertAllEqual(c.eval(), data)
|
self.assertAllEqual(c.eval(), data)
|
||||||
|
|
||||||
@test_util.run_deprecated_v1
|
@test_util.run_deprecated_v1
|
||||||
def testGradientsAxis0(self):
|
def testGradientsAxis0(self):
|
||||||
np.random.seed(7)
|
np.random.seed(7)
|
||||||
for shape in (2,), (3,), (2, 3), (3, 2), (4, 3, 2):
|
for shape in (2,), (3,), (2, 3), (3, 2), (8, 2, 10):
|
||||||
data = np.random.randn(*shape)
|
data = np.random.randn(*shape)
|
||||||
shapes = [shape[1:]] * shape[0]
|
shapes = [shape[1:]] * shape[0]
|
||||||
|
with self.subTest(shape=shape):
|
||||||
with self.cached_session(use_gpu=True):
|
with self.cached_session(use_gpu=True):
|
||||||
# TODO(irving): Remove list() once we handle maps correctly
|
# TODO(irving): Remove list() once we handle maps correctly
|
||||||
xs = list(map(constant_op.constant, data))
|
xs = list(map(constant_op.constant, data))
|
||||||
@ -147,16 +162,18 @@ class StackOpTest(test.TestCase):
|
|||||||
@test_util.run_deprecated_v1
|
@test_util.run_deprecated_v1
|
||||||
def testGradientsAxis1(self):
|
def testGradientsAxis1(self):
|
||||||
np.random.seed(7)
|
np.random.seed(7)
|
||||||
for shape in (2, 3), (3, 2), (4, 3, 2):
|
for shape in (2, 3), (3, 2), (8, 2, 10):
|
||||||
data = np.random.randn(*shape)
|
data = np.random.randn(*shape)
|
||||||
shapes = [shape[1:]] * shape[0]
|
shapes = [shape[1:]] * shape[0]
|
||||||
out_shape = list(shape[1:])
|
out_shape = list(shape[1:])
|
||||||
out_shape.insert(1, shape[0])
|
out_shape.insert(1, shape[0])
|
||||||
|
with self.subTest(shape=shape):
|
||||||
with self.cached_session(use_gpu=True):
|
with self.cached_session(use_gpu=True):
|
||||||
# TODO(irving): Remove list() once we handle maps correctly
|
# TODO(irving): Remove list() once we handle maps correctly
|
||||||
xs = list(map(constant_op.constant, data))
|
xs = list(map(constant_op.constant, data))
|
||||||
c = array_ops.stack(xs, axis=1)
|
c = array_ops.stack(xs, axis=1)
|
||||||
err = gradient_checker.compute_gradient_error(xs, shapes, c, out_shape)
|
err = gradient_checker.compute_gradient_error(xs, shapes, c,
|
||||||
|
out_shape)
|
||||||
self.assertLess(err, 1e-6)
|
self.assertLess(err, 1e-6)
|
||||||
|
|
||||||
@test_util.run_deprecated_v1
|
@test_util.run_deprecated_v1
|
||||||
@ -164,6 +181,7 @@ class StackOpTest(test.TestCase):
|
|||||||
# Verify that stack doesn't crash for zero size inputs
|
# Verify that stack doesn't crash for zero size inputs
|
||||||
with self.session(use_gpu=False):
|
with self.session(use_gpu=False):
|
||||||
for shape in (0,), (3, 0), (0, 3):
|
for shape in (0,), (3, 0), (0, 3):
|
||||||
|
with self.subTest(shape=shape):
|
||||||
x = np.zeros((2,) + shape).astype(np.int32)
|
x = np.zeros((2,) + shape).astype(np.int32)
|
||||||
p = array_ops.stack(list(x)).eval()
|
p = array_ops.stack(list(x)).eval()
|
||||||
self.assertAllEqual(p, x)
|
self.assertAllEqual(p, x)
|
||||||
@ -176,6 +194,7 @@ class StackOpTest(test.TestCase):
|
|||||||
# Verify that stack doesn't crash for zero size inputs
|
# Verify that stack doesn't crash for zero size inputs
|
||||||
with self.session(use_gpu=True):
|
with self.session(use_gpu=True):
|
||||||
for shape in (0,), (3, 0), (0, 3):
|
for shape in (0,), (3, 0), (0, 3):
|
||||||
|
with self.subTest(shape=shape):
|
||||||
x = np.zeros((2,) + shape).astype(np.int32)
|
x = np.zeros((2,) + shape).astype(np.int32)
|
||||||
p = array_ops.stack(list(x)).eval()
|
p = array_ops.stack(list(x)).eval()
|
||||||
self.assertAllEqual(p, x)
|
self.assertAllEqual(p, x)
|
||||||
@ -207,19 +226,21 @@ class StackOpTest(test.TestCase):
|
|||||||
|
|
||||||
def testAgainstNumpy(self):
|
def testAgainstNumpy(self):
|
||||||
# For 1 to 5 dimensions.
|
# For 1 to 5 dimensions.
|
||||||
for i in range(1, 6):
|
for shape in (3,), (2, 2, 3), (4, 1, 2, 2), (8, 2, 10):
|
||||||
expected = np.random.random(np.random.permutation(i) + 1)
|
rank = len(shape)
|
||||||
|
expected = self.randn(shape, np.float32)
|
||||||
|
for dtype in [np.bool, np.float32, np.int32, np.int64]:
|
||||||
# For all the possible axis to split it, including negative indices.
|
# For all the possible axis to split it, including negative indices.
|
||||||
for j in range(-i, i):
|
for axis in range(-rank, rank):
|
||||||
test_arrays = np_split_squeeze(expected, j)
|
test_arrays = np_split_squeeze(expected, axis)
|
||||||
|
|
||||||
with self.cached_session(use_gpu=True):
|
with self.cached_session(use_gpu=True):
|
||||||
actual_pack = array_ops.stack(test_arrays, axis=j)
|
with self.subTest(shape=shape, dtype=dtype, axis=axis):
|
||||||
|
actual_pack = array_ops.stack(test_arrays, axis=axis)
|
||||||
self.assertEqual(expected.shape, actual_pack.get_shape())
|
self.assertEqual(expected.shape, actual_pack.get_shape())
|
||||||
actual_pack = self.evaluate(actual_pack)
|
actual_pack = self.evaluate(actual_pack)
|
||||||
|
|
||||||
actual_stack = array_ops.stack(test_arrays, axis=j)
|
actual_stack = array_ops.stack(test_arrays, axis=axis)
|
||||||
self.assertEqual(expected.shape, actual_stack.get_shape())
|
self.assertEqual(expected.shape, actual_stack.get_shape())
|
||||||
actual_stack = self.evaluate(actual_stack)
|
actual_stack = self.evaluate(actual_stack)
|
||||||
|
|
||||||
@ -238,9 +259,10 @@ class StackOpTest(test.TestCase):
|
|||||||
def testComplex(self):
|
def testComplex(self):
|
||||||
np.random.seed(7)
|
np.random.seed(7)
|
||||||
with self.session(use_gpu=True):
|
with self.session(use_gpu=True):
|
||||||
for shape in (2,), (3,), (2, 3), (3, 2), (4, 3, 2):
|
for shape in (2,), (3,), (2, 3), (3, 2), (8, 2, 10):
|
||||||
for dtype in [np.complex64, np.complex128]:
|
for dtype in [np.complex64, np.complex128]:
|
||||||
data = np.random.randn(*shape).astype(dtype)
|
with self.subTest(shape=shape, dtype=dtype):
|
||||||
|
data = self.randn(shape, dtype)
|
||||||
xs = list(map(constant_op.constant, data))
|
xs = list(map(constant_op.constant, data))
|
||||||
c = array_ops.stack(xs)
|
c = array_ops.stack(xs)
|
||||||
self.assertAllEqual(self.evaluate(c), data)
|
self.assertAllEqual(self.evaluate(c), data)
|
||||||
|
Loading…
Reference in New Issue
Block a user