Update batch_scatter to v2.

PiperOrigin-RevId: 224073996
This commit is contained in:
Alexandre Passos 2018-12-04 17:17:32 -08:00 committed by TensorFlower Gardener
parent 8ed9863adb
commit a74f0484b5
7 changed files with 137 additions and 28 deletions

View File

@ -22,6 +22,7 @@ import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
@ -50,7 +51,8 @@ class ScatterTest(test.TestCase):
vtype,
itype,
repeat_indices=False,
updates_are_scalar=False):
updates_are_scalar=False,
method=False):
np.random.seed(8)
with self.cached_session(use_gpu=False):
for indices_shape in (2,), (3, 7), (3, 4, 7):
@ -71,7 +73,10 @@ class ScatterTest(test.TestCase):
# Scatter via tensorflow
ref = variables.Variable(old)
ref.initializer.run()
tf_scatter(ref, indices, updates).eval()
if method:
ref.batch_scatter_update(ops.IndexedSlices(indices, updates))
else:
tf_scatter(ref, indices, updates).eval()
self.assertAllClose(ref.eval(), new)
@test_util.run_deprecated_v1

View File

@ -32,6 +32,7 @@ from tensorflow.python.ops import gen_state_ops
# pylint: disable=wildcard-import
from tensorflow.python.ops.gen_state_ops import *
# pylint: enable=wildcard-import
from tensorflow.python.util import deprecation
from tensorflow.python.util.tf_export import tf_export
@ -595,7 +596,9 @@ def scatter_nd_sub(ref, indices, updates, use_locking=False, name=None):
name=name))
@tf_export("batch_scatter_update")
@tf_export(v1=["batch_scatter_update"])
@deprecation.deprecated(
"2018-11-29", "Use the batch_scatter_update method of Variable instead.")
def batch_scatter_update(ref, indices, updates, use_locking=True, name=None):
"""Generalization of `tf.scatter_update` to axis different than 0.

View File

@ -637,37 +637,84 @@ class Variable(six.with_metaclass(VariableMetaclass,
"""
raise NotImplementedError
def batch_scatter_update(self, sparse_delta, use_locking=False, name=None):
"""Assigns `IndexedSlices` to this variable batch-wise.
Analogous to `batch_gather`. This assumes that this variable and the
sparse_delta IndexedSlices have a series of leading dimensions that are the
same for all of them, and the updates are performed on the last dimension of
indices. In other words, the dimensions should be the following:
`num_prefix_dims = sparse_delta.indices.ndims - 1`
`batch_dim = num_prefix_dims + 1`
`sparse_delta.updates.shape = sparse_delta.indices.shape + var.shape[
batch_dim:]`
where
`sparse_delta.updates.shape[:num_prefix_dims]`
`== sparse_delta.indices.shape[:num_prefix_dims]`
`== var.shape[:num_prefix_dims]`
And the operation performed can be expressed as:
`var[i_1, ..., i_n,
sparse_delta.indices[i_1, ..., i_n, j]] = sparse_delta.updates[
i_1, ..., i_n, j]`
When sparse_delta.indices is a 1D tensor, this operation is equivalent to
`scatter_update`.
To avoid this operation one can looping over the first `ndims` of the
variable and using `scatter_update` on the subtensors that result of slicing
the first dimension. This is a valid option for `ndims = 1`, but less
efficient than this implementation.
Args:
sparse_delta: `IndexedSlices` to be assigned to this variable.
use_locking: If `True`, use locking during the operation.
name: the name of the operation.
Returns:
A `Tensor` that will hold the new value of this variable after
the scattered subtraction has completed.
Raises:
ValueError: if `sparse_delta` is not an `IndexedSlices`.
"""
raise NotImplementedError
def scatter_nd_sub(self, indices, updates, name=None):
"""Applies sparse subtraction to individual values or slices in a Variable.
`ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.
Assuming the variable has rank `P` and `indices` is a `Tensor` of rank `Q`.
`indices` must be integer tensor, containing indices into `ref`.
`indices` must be integer tensor, containing indices into self.
It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.
The innermost dimension of `indices` (with length `K`) corresponds to
indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th
dimension of `ref`.
dimension of self.
`updates` is `Tensor` of rank `Q-1+P-K` with shape:
```
[d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]].
[d_0, ..., d_{Q-2}, self.shape[K], ..., self.shape[P-1]].
```
For example, say we want to add 4 scattered elements to a rank-1 tensor to
8 elements. In Python, that update would look like this:
```python
ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8])
v = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8])
indices = tf.constant([[4], [3], [1] ,[7]])
updates = tf.constant([9, 10, 11, 12])
op = ref.scatter_nd_sub(indices, updates)
op = v.scatter_nd_sub(indices, updates)
with tf.Session() as sess:
print sess.run(op)
```
The resulting update to ref would look like this:
The resulting update to v would look like this:
[1, -9, 3, -6, -6, 6, 7, -4]
@ -691,34 +738,34 @@ class Variable(six.with_metaclass(VariableMetaclass,
def scatter_nd_add(self, indices, updates, name=None):
"""Applies sparse addition to individual values or slices in a Variable.
`ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.
The Variable has rank `P` and `indices` is a `Tensor` of rank `Q`.
`indices` must be integer tensor, containing indices into `ref`.
`indices` must be integer tensor, containing indices into self.
It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.
The innermost dimension of `indices` (with length `K`) corresponds to
indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th
dimension of `ref`.
dimension of self.
`updates` is `Tensor` of rank `Q-1+P-K` with shape:
```
[d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]].
[d_0, ..., d_{Q-2}, self.shape[K], ..., self.shape[P-1]].
```
For example, say we want to add 4 scattered elements to a rank-1 tensor to
8 elements. In Python, that update would look like this:
```python
ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8])
v = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8])
indices = tf.constant([[4], [3], [1] ,[7]])
updates = tf.constant([9, 10, 11, 12])
add = ref.scatter_nd_add(indices, updates)
add = v.scatter_nd_add(indices, updates)
with tf.Session() as sess:
print sess.run(add)
```
The resulting update to ref would look like this:
The resulting update to v would look like this:
[1, 13, 3, 14, 14, 6, 7, 20]
@ -742,34 +789,34 @@ class Variable(six.with_metaclass(VariableMetaclass,
def scatter_nd_update(self, indices, updates, name=None):
"""Applies sparse assignment to individual values or slices in a Variable.
`ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.
The Variable has rank `P` and `indices` is a `Tensor` of rank `Q`.
`indices` must be integer tensor, containing indices into `ref`.
`indices` must be integer tensor, containing indices into self.
It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.
The innermost dimension of `indices` (with length `K`) corresponds to
indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th
dimension of `ref`.
dimension of self.
`updates` is `Tensor` of rank `Q-1+P-K` with shape:
```
[d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]].
[d_0, ..., d_{Q-2}, self.shape[K], ..., self.shape[P-1]].
```
For example, say we want to add 4 scattered elements to a rank-1 tensor to
8 elements. In Python, that update would look like this:
```python
ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8])
v = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8])
indices = tf.constant([[4], [3], [1] ,[7]])
updates = tf.constant([9, 10, 11, 12])
op = ref.scatter_nd_assign(indices, updates)
op = v.scatter_nd_assign(indices, updates)
with tf.Session() as sess:
print sess.run(op)
```
The resulting update to ref would look like this:
The resulting update to v would look like this:
[1, 11, 3, 10, 9, 6, 7, 12]
@ -1842,6 +1889,55 @@ class RefVariable(VariableV1):
use_locking=use_locking,
name=name)
def batch_scatter_update(self, sparse_delta, use_locking=False, name=None):
"""Assigns `IndexedSlices` to this variable batch-wise.
Analogous to `batch_gather`. This assumes that this variable and the
sparse_delta IndexedSlices have a series of leading dimensions that are the
same for all of them, and the updates are performed on the last dimension of
indices. In other words, the dimensions should be the following:
`num_prefix_dims = sparse_delta.indices.ndims - 1`
`batch_dim = num_prefix_dims + 1`
`sparse_delta.updates.shape = sparse_delta.indices.shape + var.shape[
batch_dim:]`
where
`sparse_delta.updates.shape[:num_prefix_dims]`
`== sparse_delta.indices.shape[:num_prefix_dims]`
`== var.shape[:num_prefix_dims]`
And the operation performed can be expressed as:
`var[i_1, ..., i_n,
sparse_delta.indices[i_1, ..., i_n, j]] = sparse_delta.updates[
i_1, ..., i_n, j]`
When sparse_delta.indices is a 1D tensor, this operation is equivalent to
`scatter_update`.
To avoid this operation one can looping over the first `ndims` of the
variable and using `scatter_update` on the subtensors that result of slicing
the first dimension. This is a valid option for `ndims = 1`, but less
efficient than this implementation.
Args:
sparse_delta: `IndexedSlices` to be assigned to this variable.
use_locking: If `True`, use locking during the operation.
name: the name of the operation.
Returns:
A `Tensor` that will hold the new value of this variable after
the scattered subtraction has completed.
Raises:
ValueError: if `sparse_delta` is not an `IndexedSlices`.
"""
return state_ops.batch_scatter_update(
self, sparse_delta.indices, sparse_delta.values,
use_locking=use_locking, name=name)
def scatter_nd_sub(self, indices, updates, name=None):
"""Applies sparse subtraction to individual values or slices in a Variable.

View File

@ -64,6 +64,10 @@ tf_class {
name: "assign_sub"
argspec: "args=[\'self\', \'delta\', \'use_locking\', \'name\', \'read_value\'], varargs=None, keywords=None, defaults=[\'False\', \'None\', \'True\'], "
}
member_method {
name: "batch_scatter_update"
argspec: "args=[\'self\', \'sparse_delta\', \'use_locking\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'None\'], "
}
member_method {
name: "count_up_to"
argspec: "args=[\'self\', \'limit\'], varargs=None, keywords=None, defaults=None"

View File

@ -63,6 +63,10 @@ tf_class {
name: "assign_sub"
argspec: "args=[\'self\', \'delta\', \'use_locking\', \'name\', \'read_value\'], varargs=None, keywords=None, defaults=[\'False\', \'None\', \'True\'], "
}
member_method {
name: "batch_scatter_update"
argspec: "args=[\'self\', \'sparse_delta\', \'use_locking\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'None\'], "
}
member_method {
name: "count_up_to"
argspec: "args=[\'self\', \'limit\'], varargs=None, keywords=None, defaults=None"

View File

@ -492,10 +492,6 @@ tf_module {
name: "batch_gather"
argspec: "args=[\'params\', \'indices\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
}
member_method {
name: "batch_scatter_update"
argspec: "args=[\'ref\', \'indices\', \'updates\', \'use_locking\', \'name\'], varargs=None, keywords=None, defaults=[\'True\', \'None\'], "
}
member_method {
name: "batch_to_space"
argspec: "args=[\'input\', \'block_shape\', \'crops\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "

View File

@ -90,6 +90,7 @@ renames = {
'tf.assign': 'tf.compat.v1.assign',
'tf.assign_add': 'tf.compat.v1.assign_add',
'tf.assign_sub': 'tf.compat.v1.assign_sub',
'tf.batch_scatter_update': 'tf.compat.v1.batch_scatter_update',
'tf.betainc': 'tf.math.betainc',
'tf.ceil': 'tf.math.ceil',
'tf.check_numerics': 'tf.debugging.check_numerics',