Improve testing of the stack operation.

-  add axis to V1 simple test.
-  use proper random numbers for Boolean
-  test more than 1D tensors against numpy

PiperOrigin-RevId: 308368279
Change-Id: I4cec2a05e970dda15aead4ae0d00d46a981e523e
This commit is contained in:
Andrew Selle 2020-04-24 19:23:33 -07:00 committed by TensorFlower Gardener
parent 5712d2cac6
commit 4440c68d02

View File

@ -42,146 +42,165 @@ def np_split_squeeze(array, axis):
class StackOpTest(test.TestCase): class StackOpTest(test.TestCase):
def randn(self, shape, dtype):
data = np.random.randn(*shape)
if dtype == np.bool:
return data < 0 # Naive casting yields True with P(1)!
else:
return data.astype(dtype)
@test_util.run_deprecated_v1 @test_util.run_deprecated_v1
def testSimple(self): def testSimple(self):
np.random.seed(7) np.random.seed(7)
with self.session(use_gpu=True): with self.session(use_gpu=True):
for shape in (2,), (3,), (2, 3), (3, 2), (4, 3, 2): for shape in (2,), (3,), (2, 3), (3, 2), (8, 2, 10):
for dtype in [np.bool, np.float32, np.int32, np.int64]: rank = len(shape)
data = np.random.randn(*shape).astype(dtype) for axis in range(-rank, rank):
# Convert [data[0], data[1], ...] separately to tensorflow for dtype in [np.bool, np.float32, np.int32, np.int64]:
# TODO(irving): Remove list() once we handle maps correctly data = self.randn(shape, dtype)
xs = list(map(constant_op.constant, data)) xs = np_split_squeeze(data, axis)
# Stack back into a single tensorflow tensor # Stack back into a single tensorflow tensor
c = array_ops.stack(xs) with self.subTest(shape=shape, axis=axis, dtype=dtype):
self.assertAllEqual(c.eval(), data) c = array_ops.stack(xs, axis=axis)
self.assertAllEqual(c.eval(), data)
@test_util.run_deprecated_v1 @test_util.run_deprecated_v1
def testSimpleParallelCPU(self): def testSimpleParallelCPU(self):
np.random.seed(7) np.random.seed(7)
with self.session(use_gpu=False): with self.session(use_gpu=False):
for shape in (2,), (3,), (2, 3), (3, 2), (4, 3, 2), (100, 24, 24, 3): for shape in (2,), (3,), (2, 3), (3, 2), (4, 3, 2), (100, 24, 24, 3):
data = np.random.randn(*shape).astype(np.float32) with self.subTest(shape=shape):
xs = list(map(constant_op.constant, data)) data = self.randn(shape, np.float32)
c = array_ops.parallel_stack(xs) xs = list(map(constant_op.constant, data))
self.assertAllEqual(c.eval(), data) c = array_ops.parallel_stack(xs)
self.assertAllEqual(c.eval(), data)
@test_util.run_deprecated_v1 @test_util.run_deprecated_v1
def testSimpleParallelGPU(self): def testSimpleParallelGPU(self):
np.random.seed(7) np.random.seed(7)
with self.session(use_gpu=True): with self.session(use_gpu=True):
for shape in (2,), (3,), (2, 3), (3, 2), (4, 3, 2), (100, 24, 24, 3): for shape in (2,), (3,), (2, 3), (3, 2), (4, 3, 2), (100, 24, 24, 3):
data = np.random.randn(*shape).astype(np.float32) with self.subTest(shape=shape):
xs = list(map(constant_op.constant, data)) data = self.randn(shape, np.float32)
c = array_ops.parallel_stack(xs) xs = list(map(constant_op.constant, data))
self.assertAllEqual(c.eval(), data) c = array_ops.parallel_stack(xs)
self.assertAllEqual(c.eval(), data)
@test_util.run_deprecated_v1 @test_util.run_deprecated_v1
def testConst(self): def testConst(self):
np.random.seed(7) np.random.seed(7)
with self.session(use_gpu=True): with self.session(use_gpu=True):
for shape in (2,), (3,), (2, 3), (3, 2), (4, 3, 2): # Verify that shape induction works with shapes produced via const stack
a = constant_op.constant([1, 2, 3, 4, 5, 6])
b = array_ops.reshape(a, array_ops.stack([2, 3]))
self.assertAllEqual(b.get_shape(), [2, 3])
# Check on a variety of shapes and types
for shape in (2,), (3,), (2, 3), (3, 2), (4, 3, 2), (8, 2, 10):
for dtype in [np.bool, np.float32, np.int16, np.int32, np.int64]: for dtype in [np.bool, np.float32, np.int16, np.int32, np.int64]:
data = np.random.randn(*shape).astype(dtype) with self.subTest(shape=shape, dtype=dtype):
# Stack back into a single tensorflow tensor directly using np array data = self.randn(shape, dtype)
c = array_ops.stack(data) # Stack back into a single tensorflow tensor directly using np array
# This is implemented via a Const: c = array_ops.stack(data)
self.assertEqual(c.op.type, "Const") # This is implemented via a Const:
self.assertAllEqual(c.eval(), data) self.assertEqual(c.op.type, "Const")
self.assertAllEqual(c.eval(), data)
# Python lists also work for 1-D case: # Python lists also work for 1-D case:
if len(shape) == 1: if len(shape) == 1:
data_list = list(data) data_list = list(data)
cl = array_ops.stack(data_list) cl = array_ops.stack(data_list)
self.assertEqual(cl.op.type, "Const") self.assertEqual(cl.op.type, "Const")
self.assertAllEqual(cl.eval(), data) self.assertAllEqual(cl.eval(), data)
# Verify that shape induction works with shapes produced via const stack
a = constant_op.constant([1, 2, 3, 4, 5, 6])
b = array_ops.reshape(a, array_ops.stack([2, 3]))
self.assertAllEqual(b.get_shape(), [2, 3])
@test_util.run_deprecated_v1 @test_util.run_deprecated_v1
def testConstParallelCPU(self): def testConstParallelCPU(self):
np.random.seed(7) np.random.seed(7)
with self.session(use_gpu=False): with self.session(use_gpu=False):
for shape in (2,), (3,), (2, 3), (3, 2), (4, 3, 2): for shape in (2,), (3,), (2, 3), (3, 2), (4, 3, 2), (8, 2, 10):
data = np.random.randn(*shape).astype(np.float32) with self.subTest(shape=shape):
if len(shape) == 1: data = self.randn(shape, np.float32)
data_list = list(data) if len(shape) == 1:
cl = array_ops.parallel_stack(data_list) data_list = list(data)
self.assertAllEqual(cl.eval(), data) cl = array_ops.parallel_stack(data_list)
self.assertAllEqual(cl.eval(), data)
data = np.random.randn(*shape).astype(np.float32) data = self.randn(shape, np.float32)
c = array_ops.parallel_stack(data) c = array_ops.parallel_stack(data)
self.assertAllEqual(c.eval(), data) self.assertAllEqual(c.eval(), data)
@test_util.run_deprecated_v1 @test_util.run_deprecated_v1
def testConstParallelGPU(self): def testConstParallelGPU(self):
np.random.seed(7) np.random.seed(7)
with self.session(use_gpu=True): with self.session(use_gpu=True):
for shape in (2,), (3,), (2, 3), (3, 2), (4, 3, 2): for shape in (2,), (3,), (2, 3), (3, 2), (4, 3, 2):
data = np.random.randn(*shape).astype(np.float32) with self.subTest(shape=shape):
if len(shape) == 1: data = self.randn(shape, np.float32)
data_list = list(data) if len(shape) == 1:
cl = array_ops.parallel_stack(data_list) data_list = list(data)
self.assertAllEqual(cl.eval(), data) cl = array_ops.parallel_stack(data_list)
self.assertAllEqual(cl.eval(), data)
data = np.random.randn(*shape).astype(np.float32) data = self.randn(shape, np.float32)
c = array_ops.parallel_stack(data) c = array_ops.parallel_stack(data)
self.assertAllEqual(c.eval(), data) self.assertAllEqual(c.eval(), data)
@test_util.run_deprecated_v1 @test_util.run_deprecated_v1
def testGradientsAxis0(self): def testGradientsAxis0(self):
np.random.seed(7) np.random.seed(7)
for shape in (2,), (3,), (2, 3), (3, 2), (4, 3, 2): for shape in (2,), (3,), (2, 3), (3, 2), (8, 2, 10):
data = np.random.randn(*shape) data = np.random.randn(*shape)
shapes = [shape[1:]] * shape[0] shapes = [shape[1:]] * shape[0]
with self.cached_session(use_gpu=True): with self.subTest(shape=shape):
# TODO(irving): Remove list() once we handle maps correctly with self.cached_session(use_gpu=True):
xs = list(map(constant_op.constant, data)) # TODO(irving): Remove list() once we handle maps correctly
c = array_ops.stack(xs) xs = list(map(constant_op.constant, data))
err = gradient_checker.compute_gradient_error(xs, shapes, c, shape) c = array_ops.stack(xs)
self.assertLess(err, 1e-6) err = gradient_checker.compute_gradient_error(xs, shapes, c, shape)
self.assertLess(err, 1e-6)
@test_util.run_deprecated_v1 @test_util.run_deprecated_v1
def testGradientsAxis1(self): def testGradientsAxis1(self):
np.random.seed(7) np.random.seed(7)
for shape in (2, 3), (3, 2), (4, 3, 2): for shape in (2, 3), (3, 2), (8, 2, 10):
data = np.random.randn(*shape) data = np.random.randn(*shape)
shapes = [shape[1:]] * shape[0] shapes = [shape[1:]] * shape[0]
out_shape = list(shape[1:]) out_shape = list(shape[1:])
out_shape.insert(1, shape[0]) out_shape.insert(1, shape[0])
with self.cached_session(use_gpu=True): with self.subTest(shape=shape):
# TODO(irving): Remove list() once we handle maps correctly with self.cached_session(use_gpu=True):
xs = list(map(constant_op.constant, data)) # TODO(irving): Remove list() once we handle maps correctly
c = array_ops.stack(xs, axis=1) xs = list(map(constant_op.constant, data))
err = gradient_checker.compute_gradient_error(xs, shapes, c, out_shape) c = array_ops.stack(xs, axis=1)
self.assertLess(err, 1e-6) err = gradient_checker.compute_gradient_error(xs, shapes, c,
out_shape)
self.assertLess(err, 1e-6)
@test_util.run_deprecated_v1 @test_util.run_deprecated_v1
def testZeroSizeCPU(self): def testZeroSizeCPU(self):
# Verify that stack doesn't crash for zero size inputs # Verify that stack doesn't crash for zero size inputs
with self.session(use_gpu=False): with self.session(use_gpu=False):
for shape in (0,), (3, 0), (0, 3): for shape in (0,), (3, 0), (0, 3):
x = np.zeros((2,) + shape).astype(np.int32) with self.subTest(shape=shape):
p = array_ops.stack(list(x)).eval() x = np.zeros((2,) + shape).astype(np.int32)
self.assertAllEqual(p, x) p = array_ops.stack(list(x)).eval()
self.assertAllEqual(p, x)
p = array_ops.parallel_stack(list(x)).eval() p = array_ops.parallel_stack(list(x)).eval()
self.assertAllEqual(p, x) self.assertAllEqual(p, x)
@test_util.run_deprecated_v1 @test_util.run_deprecated_v1
def testZeroSizeGPU(self): def testZeroSizeGPU(self):
# Verify that stack doesn't crash for zero size inputs # Verify that stack doesn't crash for zero size inputs
with self.session(use_gpu=True): with self.session(use_gpu=True):
for shape in (0,), (3, 0), (0, 3): for shape in (0,), (3, 0), (0, 3):
x = np.zeros((2,) + shape).astype(np.int32) with self.subTest(shape=shape):
p = array_ops.stack(list(x)).eval() x = np.zeros((2,) + shape).astype(np.int32)
self.assertAllEqual(p, x) p = array_ops.stack(list(x)).eval()
self.assertAllEqual(p, x)
p = array_ops.parallel_stack(list(x)).eval() p = array_ops.parallel_stack(list(x)).eval()
self.assertAllEqual(p, x) self.assertAllEqual(p, x)
@test_util.run_deprecated_v1 @test_util.run_deprecated_v1
def testAxis0DefaultCPU(self): def testAxis0DefaultCPU(self):
@ -207,23 +226,25 @@ class StackOpTest(test.TestCase):
def testAgainstNumpy(self): def testAgainstNumpy(self):
# For 1 to 5 dimensions. # For 1 to 5 dimensions.
for i in range(1, 6): for shape in (3,), (2, 2, 3), (4, 1, 2, 2), (8, 2, 10):
expected = np.random.random(np.random.permutation(i) + 1) rank = len(shape)
expected = self.randn(shape, np.float32)
for dtype in [np.bool, np.float32, np.int32, np.int64]:
# For all the possible axis to split it, including negative indices.
for axis in range(-rank, rank):
test_arrays = np_split_squeeze(expected, axis)
# For all the possible axis to split it, including negative indices. with self.cached_session(use_gpu=True):
for j in range(-i, i): with self.subTest(shape=shape, dtype=dtype, axis=axis):
test_arrays = np_split_squeeze(expected, j) actual_pack = array_ops.stack(test_arrays, axis=axis)
self.assertEqual(expected.shape, actual_pack.get_shape())
actual_pack = self.evaluate(actual_pack)
with self.cached_session(use_gpu=True): actual_stack = array_ops.stack(test_arrays, axis=axis)
actual_pack = array_ops.stack(test_arrays, axis=j) self.assertEqual(expected.shape, actual_stack.get_shape())
self.assertEqual(expected.shape, actual_pack.get_shape()) actual_stack = self.evaluate(actual_stack)
actual_pack = self.evaluate(actual_pack)
actual_stack = array_ops.stack(test_arrays, axis=j) self.assertNDArrayNear(expected, actual_stack, 1e-6)
self.assertEqual(expected.shape, actual_stack.get_shape())
actual_stack = self.evaluate(actual_stack)
self.assertNDArrayNear(expected, actual_stack, 1e-6)
def testDimOutOfRange(self): def testDimOutOfRange(self):
t = [constant_op.constant([1, 2, 3]), constant_op.constant([4, 5, 6])] t = [constant_op.constant([1, 2, 3]), constant_op.constant([4, 5, 6])]
@ -238,12 +259,13 @@ class StackOpTest(test.TestCase):
def testComplex(self): def testComplex(self):
np.random.seed(7) np.random.seed(7)
with self.session(use_gpu=True): with self.session(use_gpu=True):
for shape in (2,), (3,), (2, 3), (3, 2), (4, 3, 2): for shape in (2,), (3,), (2, 3), (3, 2), (8, 2, 10):
for dtype in [np.complex64, np.complex128]: for dtype in [np.complex64, np.complex128]:
data = np.random.randn(*shape).astype(dtype) with self.subTest(shape=shape, dtype=dtype):
xs = list(map(constant_op.constant, data)) data = self.randn(shape, dtype)
c = array_ops.stack(xs) xs = list(map(constant_op.constant, data))
self.assertAllEqual(self.evaluate(c), data) c = array_ops.stack(xs)
self.assertAllEqual(self.evaluate(c), data)
class AutomaticStackingTest(test.TestCase): class AutomaticStackingTest(test.TestCase):