Supports negative axes for sparse_reduce_sum().

Useful for cases where the rank of the sparse input is dynamic/unknown,
and the desired axes are static constants (e.g., -1).
Change: 123262728
This commit is contained in:
Zongheng Yang 2016-05-25 14:51:46 -08:00 committed by TensorFlower Gardener
parent 1db1272f7d
commit 46d7c44571
4 changed files with 54 additions and 23 deletions

View File

@ -74,6 +74,14 @@ class SparseReduceSumOp : public OpKernel {
std::vector<int32> axes(num_reduction_axes);
std::copy_n(reduction_axes_t->flat<int32>().data(), num_reduction_axes,
axes.begin());
for (int i = 0; i < num_reduction_axes; ++i) {
int32 axis = axes[i];
OP_REQUIRES(
ctx, axis >= -ndims && axis < ndims,
errors::InvalidArgument("Invalid reduction dimension ", axis,
", for input with ", ndims, " dimensions."));
axes[i] = (axes[i] + ndims) % ndims;
}
std::sort(axes.begin(), axes.end());
std::vector<int64> group_by_dims;

View File

@ -430,7 +430,8 @@ Reduces `sp_input` along the dimensions given in `reduction_axes`. Unless
with length 1.
If `reduction_axes` has no entries, all dimensions are reduced, and a tensor
with a single element is returned.
with a single element is returned. Additionally, the axes can be negative,
which are interpreted according to the indexing rules in Python.
input_indices: 2-D. `N x R` matrix with the indices of non-empty values in a
SparseTensor, possibly not in canonical ordering.

View File

@ -417,16 +417,27 @@ class SparseFillEmptyRowsTest(test_util.TensorFlowTestCase):
class SparseReduceSumTest(test_util.TensorFlowTestCase):
def _compare(self, sp_t, reduction_axes, keep_dims):
# [[1, ?, 1]
# [?, 1, ?]]
# where ? is implictly-zero.
ind = np.array([[0, 0], [0, 2], [1, 1]]).astype(np.int64)
vals = np.array([1, 1, 1]).astype(np.int32)
shape = np.array([2, 3]).astype(np.int64)
def _compare(self, sp_t, reduction_axes, ndims, keep_dims):
densified = sparse_ops.sparse_tensor_to_dense(sp_t).eval()
np_ans = densified
if reduction_axes is None:
np_ans = np.sum(np_ans, keepdims=keep_dims)
else:
if isinstance(reduction_axes, list):
reduction_axes = sorted(reduction_axes) # loop below depends on sorted
if not isinstance(reduction_axes, list): # Single scalar.
reduction_axes = [reduction_axes]
reduction_axes = np.array(reduction_axes).astype(np.int32)
# Handles negative axes.
reduction_axes = (reduction_axes + ndims) % ndims
# Loop below depends on sorted.
reduction_axes.sort()
for ra in reduction_axes.ravel()[::-1]:
np_ans = np.sum(np_ans, axis=ra, keepdims=keep_dims)
@ -436,25 +447,21 @@ class SparseReduceSumTest(test_util.TensorFlowTestCase):
self.assertAllClose(np_ans, out)
def _compare_all(self, sp_t, reduction_axes):
self._compare(sp_t, reduction_axes, False)
self._compare(sp_t, reduction_axes, True)
def _compare_all(self, sp_t, reduction_axes, ndims):
self._compare(sp_t, reduction_axes, ndims, False)
self._compare(sp_t, reduction_axes, ndims, True)
def testSimpleAndRandomInputs(self):
# [[1, ?, 1]
# [?, 1, ?]]
# where ? is implictly-zero.
ind = np.array([[0, 0], [0, 2], [1, 1]]).astype(np.int64)
vals = np.array([1, 1, 1]).astype(np.int32)
shape = np.array([2, 3]).astype(np.int64)
sp_t = ops.SparseTensor(ind, vals, shape)
sp_t = ops.SparseTensor(self.ind, self.vals, self.shape)
with self.test_session(use_gpu=False):
self._compare_all(sp_t, None)
self._compare_all(sp_t, 0)
self._compare_all(sp_t, [1])
self._compare_all(sp_t, [0, 1])
self._compare_all(sp_t, [1, 0])
self._compare_all(sp_t, None, ndims=2)
self._compare_all(sp_t, 0, ndims=2)
self._compare_all(sp_t, [1], ndims=2)
self._compare_all(sp_t, [0, 1], ndims=2)
self._compare_all(sp_t, [1, 0], ndims=2)
self._compare_all(sp_t, [-1], ndims=2)
self._compare_all(sp_t, [1, -2], ndims=2)
np.random.seed(1618)
test_dims = [(1618, 1, 11, 7, 1), (1,), (1, 1, 1)]
@ -462,11 +469,19 @@ class SparseReduceSumTest(test_util.TensorFlowTestCase):
for dims in test_dims:
sp_t, unused_nnz = _sparsify(np.random.randn(*dims))
# reduce all using None
self._compare_all(sp_t, None)
self._compare_all(sp_t, None, ndims=len(dims))
# reduce random axes from 1D to N-D
for d in range(1, len(dims) + 1):
axes = np.random.choice(len(dims), size=d, replace=False).tolist()
self._compare_all(sp_t, axes)
self._compare_all(sp_t, axes, ndims=len(dims))
def testInvalidAxes(self):
sp_t = ops.SparseTensor(self.ind, self.vals, self.shape)
with self.test_session(use_gpu=False):
with self.assertRaisesOpError("Invalid reduction dimension -3"):
sparse_ops.sparse_reduce_sum(sp_t, -3).eval()
with self.assertRaisesOpError("Invalid reduction dimension 2"):
sparse_ops.sparse_reduce_sum(sp_t, 2).eval()
def testGradient(self):
np.random.seed(8161)
@ -483,6 +498,12 @@ class SparseReduceSumTest(test_util.TensorFlowTestCase):
reduced.eval().shape)
self.assertLess(err, 1e-3)
# Tests for negative axes.
reduced = sparse_ops.sparse_reduce_sum(sp_t, -1)
err = tf.test.compute_gradient_error(sp_t.values, (nnz,), reduced,
reduced.eval().shape)
self.assertLess(err, 1e-3)
class SparseMathOpsTest(test_util.TensorFlowTestCase):

View File

@ -548,7 +548,8 @@ def sparse_reduce_sum(sp_input, reduction_axes=None, keep_dims=False):
with length 1.
If `reduction_axes` has no entries, all dimensions are reduced, and a tensor
with a single element is returned.
with a single element is returned. Additionally, the axes can be negative,
similar to the indexing rules in Python.
For example:
@ -558,7 +559,7 @@ def sparse_reduce_sum(sp_input, reduction_axes=None, keep_dims=False):
# where ? is implictly-zero.
tf.sparse_reduce_sum(x) ==> 3
tf.sparse_reduce_sum(x, 0) ==> [1, 1, 1]
tf.sparse_reduce_sum(x, 1) ==> [2, 1]
tf.sparse_reduce_sum(x, 1) ==> [2, 1] # Can also use -1 as the axis.
tf.sparse_reduce_sum(x, 1, keep_dims=True) ==> [[2], [1]]
tf.sparse_reduce_sum(x, [0, 1]) ==> 3
```