Setting the tf2 namespace to prefer variable.assign and friends over tf.assign
PiperOrigin-RevId: 209627830
This commit is contained in:
parent
a3f3949d82
commit
138bc155d2
@ -0,0 +1,4 @@
|
||||
op {
|
||||
graph_op_name: "ScatterNdSub"
|
||||
visibility: HIDDEN
|
||||
}
|
@ -958,6 +958,228 @@ class ResourceVariable(variables.RefVariable):
|
||||
return self._lazy_read(assign_op)
|
||||
return assign_op
|
||||
|
||||
def scatter_sub(self, sparse_delta, use_locking=False, name=None):
|
||||
"""Subtracts `IndexedSlices` from this variable.
|
||||
|
||||
Args:
|
||||
sparse_delta: `IndexedSlices` to be subtracted from this variable.
|
||||
use_locking: If `True`, use locking during the operation.
|
||||
name: the name of the operation.
|
||||
|
||||
Returns:
|
||||
A `Tensor` that will hold the new value of this variable after
|
||||
the scattered subtraction has completed.
|
||||
|
||||
Raises:
|
||||
ValueError: if `sparse_delta` is not an `IndexedSlices`.
|
||||
"""
|
||||
if not isinstance(sparse_delta, ops.IndexedSlices):
|
||||
raise ValueError("sparse_delta is not IndexedSlices: %s" % sparse_delta)
|
||||
return self._lazy_read(gen_resource_variable_ops.resource_scatter_sub(
|
||||
self.handle, sparse_delta.indices,
|
||||
ops.convert_to_tensor(sparse_delta.values, self.dtype), name=name))
|
||||
|
||||
def scatter_add(self, sparse_delta, use_locking=False, name=None):
|
||||
"""Adds `IndexedSlices` from this variable.
|
||||
|
||||
Args:
|
||||
sparse_delta: `IndexedSlices` to be added to this variable.
|
||||
use_locking: If `True`, use locking during the operation.
|
||||
name: the name of the operation.
|
||||
|
||||
Returns:
|
||||
A `Tensor` that will hold the new value of this variable after
|
||||
the scattered subtraction has completed.
|
||||
|
||||
Raises:
|
||||
ValueError: if `sparse_delta` is not an `IndexedSlices`.
|
||||
"""
|
||||
if not isinstance(sparse_delta, ops.IndexedSlices):
|
||||
raise ValueError("sparse_delta is not IndexedSlices: %s" % sparse_delta)
|
||||
return self._lazy_read(gen_resource_variable_ops.resource_scatter_add(
|
||||
self.handle, sparse_delta.indices,
|
||||
ops.convert_to_tensor(sparse_delta.values, self.dtype), name=name))
|
||||
|
||||
def scatter_update(self, sparse_delta, use_locking=False, name=None):
|
||||
"""Assigns `IndexedSlices` to this variable.
|
||||
|
||||
Args:
|
||||
sparse_delta: `IndexedSlices` to be assigned to this variable.
|
||||
use_locking: If `True`, use locking during the operation.
|
||||
name: the name of the operation.
|
||||
|
||||
Returns:
|
||||
A `Tensor` that will hold the new value of this variable after
|
||||
the scattered subtraction has completed.
|
||||
|
||||
Raises:
|
||||
ValueError: if `sparse_delta` is not an `IndexedSlices`.
|
||||
"""
|
||||
if not isinstance(sparse_delta, ops.IndexedSlices):
|
||||
raise ValueError("sparse_delta is not IndexedSlices: %s" % sparse_delta)
|
||||
return self._lazy_read(gen_resource_variable_ops.resource_scatter_update(
|
||||
self.handle, sparse_delta.indices,
|
||||
ops.convert_to_tensor(sparse_delta.values, self.dtype), name=name))
|
||||
|
||||
def scatter_nd_sub(self, indices, updates, name=None):
|
||||
"""Applies sparse subtraction to individual values or slices in a Variable.
|
||||
|
||||
`ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.
|
||||
|
||||
`indices` must be integer tensor, containing indices into `ref`.
|
||||
It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.
|
||||
|
||||
The innermost dimension of `indices` (with length `K`) corresponds to
|
||||
indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th
|
||||
dimension of `ref`.
|
||||
|
||||
`updates` is `Tensor` of rank `Q-1+P-K` with shape:
|
||||
|
||||
```
|
||||
[d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]].
|
||||
```
|
||||
|
||||
For example, say we want to add 4 scattered elements to a rank-1 tensor to
|
||||
8 elements. In Python, that update would look like this:
|
||||
|
||||
```python
|
||||
ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8])
|
||||
indices = tf.constant([[4], [3], [1] ,[7]])
|
||||
updates = tf.constant([9, 10, 11, 12])
|
||||
op = ref.scatter_nd_sub(indices, updates)
|
||||
with tf.Session() as sess:
|
||||
print sess.run(op)
|
||||
```
|
||||
|
||||
The resulting update to ref would look like this:
|
||||
|
||||
[1, -9, 3, -6, -6, 6, 7, -4]
|
||||
|
||||
See `tf.scatter_nd` for more details about how to make updates to
|
||||
slices.
|
||||
|
||||
Args:
|
||||
indices: The indices to be used in the operation.
|
||||
updates: The values to be used in the operation.
|
||||
name: the name of the operation.
|
||||
|
||||
Returns:
|
||||
A `Tensor` that will hold the new value of this variable after
|
||||
the scattered subtraction has completed.
|
||||
|
||||
Raises:
|
||||
ValueError: if `sparse_delta` is not an `IndexedSlices`.
|
||||
"""
|
||||
return self._lazy_read(gen_state_ops.resource_scatter_nd_sub(
|
||||
self.handle, indices, ops.convert_to_tensor(updates, self.dtype),
|
||||
name=name))
|
||||
|
||||
def scatter_nd_add(self, indices, updates, name=None):
|
||||
"""Applies sparse addition to individual values or slices in a Variable.
|
||||
|
||||
`ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.
|
||||
|
||||
`indices` must be integer tensor, containing indices into `ref`.
|
||||
It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.
|
||||
|
||||
The innermost dimension of `indices` (with length `K`) corresponds to
|
||||
indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th
|
||||
dimension of `ref`.
|
||||
|
||||
`updates` is `Tensor` of rank `Q-1+P-K` with shape:
|
||||
|
||||
```
|
||||
[d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]].
|
||||
```
|
||||
|
||||
For example, say we want to add 4 scattered elements to a rank-1 tensor to
|
||||
8 elements. In Python, that update would look like this:
|
||||
|
||||
```python
|
||||
ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8])
|
||||
indices = tf.constant([[4], [3], [1] ,[7]])
|
||||
updates = tf.constant([9, 10, 11, 12])
|
||||
add = ref.scatter_nd_add(indices, updates)
|
||||
with tf.Session() as sess:
|
||||
print sess.run(add)
|
||||
```
|
||||
|
||||
The resulting update to ref would look like this:
|
||||
|
||||
[1, 13, 3, 14, 14, 6, 7, 20]
|
||||
|
||||
See `tf.scatter_nd` for more details about how to make updates to
|
||||
slices.
|
||||
|
||||
Args:
|
||||
indices: The indices to be used in the operation.
|
||||
updates: The values to be used in the operation.
|
||||
name: the name of the operation.
|
||||
|
||||
Returns:
|
||||
A `Tensor` that will hold the new value of this variable after
|
||||
the scattered subtraction has completed.
|
||||
|
||||
Raises:
|
||||
ValueError: if `sparse_delta` is not an `IndexedSlices`.
|
||||
"""
|
||||
return self._lazy_read(gen_state_ops.resource_scatter_nd_add(
|
||||
self.handle, indices, ops.convert_to_tensor(updates, self.dtype),
|
||||
name=name))
|
||||
|
||||
def scatter_nd_update(self, indices, updates, name=None):
|
||||
"""Applies sparse assignment to individual values or slices in a Variable.
|
||||
|
||||
`ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.
|
||||
|
||||
`indices` must be integer tensor, containing indices into `ref`.
|
||||
It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.
|
||||
|
||||
The innermost dimension of `indices` (with length `K`) corresponds to
|
||||
indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th
|
||||
dimension of `ref`.
|
||||
|
||||
`updates` is `Tensor` of rank `Q-1+P-K` with shape:
|
||||
|
||||
```
|
||||
[d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]].
|
||||
```
|
||||
|
||||
For example, say we want to add 4 scattered elements to a rank-1 tensor to
|
||||
8 elements. In Python, that update would look like this:
|
||||
|
||||
```python
|
||||
ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8])
|
||||
indices = tf.constant([[4], [3], [1] ,[7]])
|
||||
updates = tf.constant([9, 10, 11, 12])
|
||||
op = ref.scatter_nd_update(indices, updates)
|
||||
with tf.Session() as sess:
|
||||
print sess.run(op)
|
||||
```
|
||||
|
||||
The resulting update to ref would look like this:
|
||||
|
||||
[1, 11, 3, 10, 9, 6, 7, 12]
|
||||
|
||||
See `tf.scatter_nd` for more details about how to make updates to
|
||||
slices.
|
||||
|
||||
Args:
|
||||
indices: The indices to be used in the operation.
|
||||
updates: The values to be used in the operation.
|
||||
name: the name of the operation.
|
||||
|
||||
Returns:
|
||||
A `Tensor` that will hold the new value of this variable after
|
||||
the scattered subtraction has completed.
|
||||
|
||||
Raises:
|
||||
ValueError: if `sparse_delta` is not an `IndexedSlices`.
|
||||
"""
|
||||
return self._lazy_read(gen_state_ops.resource_scatter_nd_update(
|
||||
self.handle, indices, ops.convert_to_tensor(updates, self.dtype),
|
||||
name=name))
|
||||
|
||||
def _strided_slice_assign(self, begin, end, strides, value, name, begin_mask,
|
||||
end_mask, ellipsis_mask, new_axis_mask,
|
||||
shrink_axis_mask):
|
||||
|
@ -31,8 +31,8 @@ from tensorflow.python.ops import gen_state_ops
|
||||
# go/tf-wildcard-import
|
||||
# pylint: disable=wildcard-import
|
||||
from tensorflow.python.ops.gen_state_ops import *
|
||||
from tensorflow.python.util.tf_export import tf_export
|
||||
# pylint: enable=wildcard-import
|
||||
from tensorflow.python.util.tf_export import tf_export
|
||||
|
||||
|
||||
# pylint: disable=protected-access,g-doc-return-or-yield,g-doc-args
|
||||
@ -131,7 +131,7 @@ def is_variable_initialized(ref, name=None):
|
||||
return ref.is_initialized(name=name)
|
||||
|
||||
|
||||
@tf_export("assign_sub")
|
||||
@tf_export(v1=["assign_sub"])
|
||||
def assign_sub(ref, value, use_locking=None, name=None):
|
||||
"""Update 'ref' by subtracting 'value' from it.
|
||||
|
||||
@ -160,7 +160,7 @@ def assign_sub(ref, value, use_locking=None, name=None):
|
||||
return ref.assign_sub(value)
|
||||
|
||||
|
||||
@tf_export("assign_add")
|
||||
@tf_export(v1=["assign_add"])
|
||||
def assign_add(ref, value, use_locking=None, name=None):
|
||||
"""Update 'ref' by adding 'value' to it.
|
||||
|
||||
@ -189,7 +189,7 @@ def assign_add(ref, value, use_locking=None, name=None):
|
||||
return ref.assign_add(value)
|
||||
|
||||
|
||||
@tf_export("assign")
|
||||
@tf_export(v1=["assign"])
|
||||
def assign(ref, value, validate_shape=None, use_locking=None, name=None):
|
||||
"""Update 'ref' by assigning 'value' to it.
|
||||
|
||||
@ -222,7 +222,7 @@ def assign(ref, value, validate_shape=None, use_locking=None, name=None):
|
||||
return ref.assign(value, name=name)
|
||||
|
||||
|
||||
@tf_export("count_up_to")
|
||||
@tf_export(v1=["count_up_to"])
|
||||
def count_up_to(ref, limit, name=None):
|
||||
r"""Increments 'ref' until it reaches 'limit'.
|
||||
|
||||
@ -245,7 +245,7 @@ def count_up_to(ref, limit, name=None):
|
||||
ref.handle, limit, T=ref.dtype, name=name)
|
||||
|
||||
|
||||
@tf_export("scatter_update")
|
||||
@tf_export(v1=["scatter_update"])
|
||||
def scatter_update(ref, indices, updates, use_locking=True, name=None):
|
||||
# pylint: disable=line-too-long
|
||||
r"""Applies sparse updates to a variable reference.
|
||||
@ -299,7 +299,7 @@ def scatter_update(ref, indices, updates, use_locking=True, name=None):
|
||||
name=name))
|
||||
|
||||
|
||||
@tf_export("scatter_nd_update")
|
||||
@tf_export(v1=["scatter_nd_update"])
|
||||
def scatter_nd_update(ref, indices, updates, use_locking=True, name=None):
|
||||
r"""Applies sparse `updates` to individual values or slices in a Variable.
|
||||
|
||||
@ -361,7 +361,7 @@ def scatter_nd_update(ref, indices, updates, use_locking=True, name=None):
|
||||
name=name))
|
||||
|
||||
|
||||
@tf_export("scatter_add")
|
||||
@tf_export(v1=["scatter_add"])
|
||||
def scatter_add(ref, indices, updates, use_locking=False, name=None):
|
||||
# pylint: disable=line-too-long
|
||||
r"""Adds sparse updates to the variable referenced by `resource`.
|
||||
@ -413,7 +413,7 @@ def scatter_add(ref, indices, updates, use_locking=False, name=None):
|
||||
name=name))
|
||||
|
||||
|
||||
@tf_export("scatter_nd_add")
|
||||
@tf_export(v1=["scatter_nd_add"])
|
||||
def scatter_nd_add(ref, indices, updates, use_locking=False, name=None):
|
||||
r"""Applies sparse addition to individual values or slices in a Variable.
|
||||
|
||||
@ -477,7 +477,7 @@ def scatter_nd_add(ref, indices, updates, use_locking=False, name=None):
|
||||
name=name))
|
||||
|
||||
|
||||
@tf_export("scatter_sub")
|
||||
@tf_export(v1=["scatter_sub"])
|
||||
def scatter_sub(ref, indices, updates, use_locking=False, name=None):
|
||||
r"""Subtracts sparse updates to a variable reference.
|
||||
|
||||
@ -531,6 +531,70 @@ def scatter_sub(ref, indices, updates, use_locking=False, name=None):
|
||||
name=name))
|
||||
|
||||
|
||||
@tf_export(v1=["scatter_nd_sub"])
|
||||
def scatter_nd_sub(ref, indices, updates, use_locking=False, name=None):
|
||||
r"""Applies sparse subtraction to individual values or slices in a Variable.
|
||||
|
||||
`ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.
|
||||
|
||||
`indices` must be integer tensor, containing indices into `ref`.
|
||||
It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.
|
||||
|
||||
The innermost dimension of `indices` (with length `K`) corresponds to
|
||||
indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th
|
||||
dimension of `ref`.
|
||||
|
||||
`updates` is `Tensor` of rank `Q-1+P-K` with shape:
|
||||
|
||||
```
|
||||
[d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]].
|
||||
```
|
||||
|
||||
For example, say we want to subtract 4 scattered elements from a rank-1 tensor
|
||||
to 8 elements. In Python, that update would look like this:
|
||||
|
||||
```python
|
||||
ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8])
|
||||
indices = tf.constant([[4], [3], [1] ,[7]])
|
||||
updates = tf.constant([9, 10, 11, 12])
|
||||
op = tf.scatter_nd_sub(ref, indices, updates)
|
||||
with tf.Session() as sess:
|
||||
print sess.run(op)
|
||||
```
|
||||
|
||||
The resulting update to ref would look like this:
|
||||
|
||||
[1, -9, 3, -6, -6, 6, 7, -4]
|
||||
|
||||
See `tf.scatter_nd` for more details about how to make updates to
|
||||
slices.
|
||||
|
||||
Args:
|
||||
ref: A mutable `Tensor`. Must be one of the following types: `float32`,
|
||||
`float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`,
|
||||
`qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`,
|
||||
`uint32`, `uint64`. A mutable Tensor. Should be from a Variable node.
|
||||
indices: A `Tensor`. Must be one of the following types: `int32`, `int64`.
|
||||
A tensor of indices into ref.
|
||||
updates: A `Tensor`. Must have the same type as `ref`.
|
||||
A tensor of updated values to add to ref.
|
||||
use_locking: An optional `bool`. Defaults to `False`.
|
||||
An optional bool. Defaults to True. If True, the assignment will
|
||||
be protected by a lock; otherwise the behavior is undefined,
|
||||
but may exhibit less contention.
|
||||
name: A name for the operation (optional).
|
||||
|
||||
Returns:
|
||||
A mutable `Tensor`. Has the same type as `ref`.
|
||||
"""
|
||||
if ref.dtype._is_ref_dtype:
|
||||
return gen_state_ops.scatter_nd_sub(
|
||||
ref, indices, updates, use_locking, name)
|
||||
return ref._lazy_read(gen_state_ops.resource_scatter_nd_sub( # pylint: disable=protected-access
|
||||
ref.handle, indices, ops.convert_to_tensor(updates, ref.dtype),
|
||||
name=name))
|
||||
|
||||
|
||||
@tf_export("batch_scatter_update")
|
||||
def batch_scatter_update(ref, indices, updates, use_locking=True, name=None):
|
||||
"""Generalization of `tf.scatter_update` to axis different than 0.
|
||||
@ -626,4 +690,3 @@ def batch_scatter_update(ref, indices, updates, use_locking=True, name=None):
|
||||
final_indices = array_ops.concat(nd_indices_list, axis=-1)
|
||||
return scatter_nd_update(
|
||||
ref, final_indices, updates, use_locking=use_locking)
|
||||
|
||||
|
@ -30,6 +30,7 @@ from tensorflow.python.framework import tensor_shape
|
||||
from tensorflow.python.ops import array_ops
|
||||
from tensorflow.python.ops import control_flow_ops
|
||||
from tensorflow.python.ops import gen_array_ops
|
||||
from tensorflow.python.ops import gen_state_ops
|
||||
from tensorflow.python.ops import math_ops
|
||||
from tensorflow.python.ops import state_ops
|
||||
from tensorflow.python.platform import tf_logging as logging
|
||||
@ -503,15 +504,200 @@ class Variable(six.with_metaclass(VariableMetaclass,
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def scatter_sub(self, sparse_delta, use_locking=False):
|
||||
def scatter_sub(self, sparse_delta, use_locking=False, name=None):
|
||||
"""Subtracts `IndexedSlices` from this variable.
|
||||
|
||||
This is essentially a shortcut for `scatter_sub(self, sparse_delta.indices,
|
||||
sparse_delta.values)`.
|
||||
|
||||
Args:
|
||||
sparse_delta: `IndexedSlices` to be subtracted from this variable.
|
||||
use_locking: If `True`, use locking during the operation.
|
||||
name: the name of the operation.
|
||||
|
||||
Returns:
|
||||
A `Tensor` that will hold the new value of this variable after
|
||||
the scattered subtraction has completed.
|
||||
|
||||
Raises:
|
||||
ValueError: if `sparse_delta` is not an `IndexedSlices`.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def scatter_add(self, sparse_delta, use_locking=False, name=None):
|
||||
"""Adds `IndexedSlices` to this variable.
|
||||
|
||||
Args:
|
||||
sparse_delta: `IndexedSlices` to be assigned to this variable.
|
||||
use_locking: If `True`, use locking during the operation.
|
||||
name: the name of the operation.
|
||||
|
||||
Returns:
|
||||
A `Tensor` that will hold the new value of this variable after
|
||||
the scattered subtraction has completed.
|
||||
|
||||
Raises:
|
||||
ValueError: if `sparse_delta` is not an `IndexedSlices`.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def scatter_update(self, sparse_delta, use_locking=False, name=None):
|
||||
"""Assigns `IndexedSlices` to this variable.
|
||||
|
||||
Args:
|
||||
sparse_delta: `IndexedSlices` to be assigned to this variable.
|
||||
use_locking: If `True`, use locking during the operation.
|
||||
name: the name of the operation.
|
||||
|
||||
Returns:
|
||||
A `Tensor` that will hold the new value of this variable after
|
||||
the scattered subtraction has completed.
|
||||
|
||||
Raises:
|
||||
ValueError: if `sparse_delta` is not an `IndexedSlices`.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def scatter_nd_sub(self, indices, updates, name=None):
|
||||
"""Applies sparse subtraction to individual values or slices in a Variable.
|
||||
|
||||
`ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.
|
||||
|
||||
`indices` must be integer tensor, containing indices into `ref`.
|
||||
It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.
|
||||
|
||||
The innermost dimension of `indices` (with length `K`) corresponds to
|
||||
indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th
|
||||
dimension of `ref`.
|
||||
|
||||
`updates` is `Tensor` of rank `Q-1+P-K` with shape:
|
||||
|
||||
```
|
||||
[d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]].
|
||||
```
|
||||
|
||||
For example, say we want to add 4 scattered elements to a rank-1 tensor to
|
||||
8 elements. In Python, that update would look like this:
|
||||
|
||||
```python
|
||||
ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8])
|
||||
indices = tf.constant([[4], [3], [1] ,[7]])
|
||||
updates = tf.constant([9, 10, 11, 12])
|
||||
op = ref.scatter_nd_sub(indices, updates)
|
||||
with tf.Session() as sess:
|
||||
print sess.run(op)
|
||||
```
|
||||
|
||||
The resulting update to ref would look like this:
|
||||
|
||||
[1, -9, 3, -6, -6, 6, 7, -4]
|
||||
|
||||
See `tf.scatter_nd` for more details about how to make updates to
|
||||
slices.
|
||||
|
||||
Args:
|
||||
indices: The indices to be used in the operation.
|
||||
updates: The values to be used in the operation.
|
||||
name: the name of the operation.
|
||||
|
||||
Returns:
|
||||
A `Tensor` that will hold the new value of this variable after
|
||||
the scattered subtraction has completed.
|
||||
|
||||
Raises:
|
||||
ValueError: if `sparse_delta` is not an `IndexedSlices`.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def scatter_nd_add(self, indices, updates, name=None):
|
||||
"""Applies sparse addition to individual values or slices in a Variable.
|
||||
|
||||
`ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.
|
||||
|
||||
`indices` must be integer tensor, containing indices into `ref`.
|
||||
It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.
|
||||
|
||||
The innermost dimension of `indices` (with length `K`) corresponds to
|
||||
indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th
|
||||
dimension of `ref`.
|
||||
|
||||
`updates` is `Tensor` of rank `Q-1+P-K` with shape:
|
||||
|
||||
```
|
||||
[d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]].
|
||||
```
|
||||
|
||||
For example, say we want to add 4 scattered elements to a rank-1 tensor to
|
||||
8 elements. In Python, that update would look like this:
|
||||
|
||||
```python
|
||||
ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8])
|
||||
indices = tf.constant([[4], [3], [1] ,[7]])
|
||||
updates = tf.constant([9, 10, 11, 12])
|
||||
add = ref.scatter_nd_add(indices, updates)
|
||||
with tf.Session() as sess:
|
||||
print sess.run(add)
|
||||
```
|
||||
|
||||
The resulting update to ref would look like this:
|
||||
|
||||
[1, 13, 3, 14, 14, 6, 7, 20]
|
||||
|
||||
See `tf.scatter_nd` for more details about how to make updates to
|
||||
slices.
|
||||
|
||||
Args:
|
||||
indices: The indices to be used in the operation.
|
||||
updates: The values to be used in the operation.
|
||||
name: the name of the operation.
|
||||
|
||||
Returns:
|
||||
A `Tensor` that will hold the new value of this variable after
|
||||
the scattered subtraction has completed.
|
||||
|
||||
Raises:
|
||||
ValueError: if `sparse_delta` is not an `IndexedSlices`.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def scatter_nd_update(self, indices, updates, name=None):
|
||||
"""Applies sparse assignment to individual values or slices in a Variable.
|
||||
|
||||
`ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.
|
||||
|
||||
`indices` must be integer tensor, containing indices into `ref`.
|
||||
It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.
|
||||
|
||||
The innermost dimension of `indices` (with length `K`) corresponds to
|
||||
indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th
|
||||
dimension of `ref`.
|
||||
|
||||
`updates` is `Tensor` of rank `Q-1+P-K` with shape:
|
||||
|
||||
```
|
||||
[d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]].
|
||||
```
|
||||
|
||||
For example, say we want to add 4 scattered elements to a rank-1 tensor to
|
||||
8 elements. In Python, that update would look like this:
|
||||
|
||||
```python
|
||||
ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8])
|
||||
indices = tf.constant([[4], [3], [1] ,[7]])
|
||||
updates = tf.constant([9, 10, 11, 12])
|
||||
op = ref.scatter_nd_assign(indices, updates)
|
||||
with tf.Session() as sess:
|
||||
print sess.run(op)
|
||||
```
|
||||
|
||||
The resulting update to ref would look like this:
|
||||
|
||||
[1, 11, 3, 10, 9, 6, 7, 12]
|
||||
|
||||
See `tf.scatter_nd` for more details about how to make updates to
|
||||
slices.
|
||||
|
||||
Args:
|
||||
indices: The indices to be used in the operation.
|
||||
updates: The values to be used in the operation.
|
||||
name: the name of the operation.
|
||||
|
||||
Returns:
|
||||
A `Tensor` that will hold the new value of this variable after
|
||||
@ -1309,15 +1495,13 @@ class RefVariable(Variable):
|
||||
"""
|
||||
return state_ops.assign_sub(self._variable, delta, use_locking=use_locking)
|
||||
|
||||
def scatter_sub(self, sparse_delta, use_locking=False):
|
||||
def scatter_sub(self, sparse_delta, use_locking=False, name=None):
|
||||
"""Subtracts `IndexedSlices` from this variable.
|
||||
|
||||
This is essentially a shortcut for `scatter_sub(self, sparse_delta.indices,
|
||||
sparse_delta.values)`.
|
||||
|
||||
Args:
|
||||
sparse_delta: `IndexedSlices` to be subtracted from this variable.
|
||||
use_locking: If `True`, use locking during the operation.
|
||||
name: the name of the operation.
|
||||
|
||||
Returns:
|
||||
A `Tensor` that will hold the new value of this variable after
|
||||
@ -1328,11 +1512,216 @@ class RefVariable(Variable):
|
||||
"""
|
||||
if not isinstance(sparse_delta, ops.IndexedSlices):
|
||||
raise ValueError("sparse_delta is not IndexedSlices: %s" % sparse_delta)
|
||||
return state_ops.scatter_sub(
|
||||
return gen_state_ops.scatter_sub(
|
||||
self._variable,
|
||||
sparse_delta.indices,
|
||||
sparse_delta.values,
|
||||
use_locking=use_locking)
|
||||
use_locking=use_locking,
|
||||
name=name)
|
||||
|
||||
def scatter_add(self, sparse_delta, use_locking=False, name=None):
|
||||
"""Adds `IndexedSlices` from this variable.
|
||||
|
||||
Args:
|
||||
sparse_delta: `IndexedSlices` to be added to this variable.
|
||||
use_locking: If `True`, use locking during the operation.
|
||||
name: the name of the operation.
|
||||
|
||||
Returns:
|
||||
A `Tensor` that will hold the new value of this variable after
|
||||
the scattered subtraction has completed.
|
||||
|
||||
Raises:
|
||||
ValueError: if `sparse_delta` is not an `IndexedSlices`.
|
||||
"""
|
||||
if not isinstance(sparse_delta, ops.IndexedSlices):
|
||||
raise ValueError("sparse_delta is not IndexedSlices: %s" % sparse_delta)
|
||||
return gen_state_ops.scatter_add(
|
||||
self._variable,
|
||||
sparse_delta.indices,
|
||||
sparse_delta.values,
|
||||
use_locking=use_locking,
|
||||
name=name)
|
||||
|
||||
def scatter_update(self, sparse_delta, use_locking=False, name=None):
|
||||
"""Assigns `IndexedSlices` to this variable.
|
||||
|
||||
Args:
|
||||
sparse_delta: `IndexedSlices` to be assigned to this variable.
|
||||
use_locking: If `True`, use locking during the operation.
|
||||
name: the name of the operation.
|
||||
|
||||
Returns:
|
||||
A `Tensor` that will hold the new value of this variable after
|
||||
the scattered subtraction has completed.
|
||||
|
||||
Raises:
|
||||
ValueError: if `sparse_delta` is not an `IndexedSlices`.
|
||||
"""
|
||||
if not isinstance(sparse_delta, ops.IndexedSlices):
|
||||
raise ValueError("sparse_delta is not IndexedSlices: %s" % sparse_delta)
|
||||
return gen_state_ops.scatter_update(
|
||||
self._variable,
|
||||
sparse_delta.indices,
|
||||
sparse_delta.values,
|
||||
use_locking=use_locking,
|
||||
name=name)
|
||||
|
||||
def scatter_nd_sub(self, indices, updates, name=None):
|
||||
"""Applies sparse subtraction to individual values or slices in a Variable.
|
||||
|
||||
`ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.
|
||||
|
||||
`indices` must be integer tensor, containing indices into `ref`.
|
||||
It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.
|
||||
|
||||
The innermost dimension of `indices` (with length `K`) corresponds to
|
||||
indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th
|
||||
dimension of `ref`.
|
||||
|
||||
`updates` is `Tensor` of rank `Q-1+P-K` with shape:
|
||||
|
||||
```
|
||||
[d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]].
|
||||
```
|
||||
|
||||
For example, say we want to add 4 scattered elements to a rank-1 tensor to
|
||||
8 elements. In Python, that update would look like this:
|
||||
|
||||
```python
|
||||
ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8])
|
||||
indices = tf.constant([[4], [3], [1] ,[7]])
|
||||
updates = tf.constant([9, 10, 11, 12])
|
||||
op = ref.scatter_nd_sub(indices, updates)
|
||||
with tf.Session() as sess:
|
||||
print sess.run(op)
|
||||
```
|
||||
|
||||
The resulting update to ref would look like this:
|
||||
|
||||
[1, -9, 3, -6, -6, 6, 7, -4]
|
||||
|
||||
See `tf.scatter_nd` for more details about how to make updates to
|
||||
slices.
|
||||
|
||||
Args:
|
||||
indices: The indices to be used in the operation.
|
||||
updates: The values to be used in the operation.
|
||||
name: the name of the operation.
|
||||
|
||||
Returns:
|
||||
A `Tensor` that will hold the new value of this variable after
|
||||
the scattered subtraction has completed.
|
||||
|
||||
Raises:
|
||||
ValueError: if `sparse_delta` is not an `IndexedSlices`.
|
||||
"""
|
||||
return gen_state_ops.scatter_nd_sub(
|
||||
self._variable, indices, updates, use_locking=True, name=name)
|
||||
|
||||
def scatter_nd_add(self, indices, updates, name=None):
|
||||
"""Applies sparse addition to individual values or slices in a Variable.
|
||||
|
||||
`ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.
|
||||
|
||||
`indices` must be integer tensor, containing indices into `ref`.
|
||||
It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.
|
||||
|
||||
The innermost dimension of `indices` (with length `K`) corresponds to
|
||||
indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th
|
||||
dimension of `ref`.
|
||||
|
||||
`updates` is `Tensor` of rank `Q-1+P-K` with shape:
|
||||
|
||||
```
|
||||
[d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]].
|
||||
```
|
||||
|
||||
For example, say we want to add 4 scattered elements to a rank-1 tensor to
|
||||
8 elements. In Python, that update would look like this:
|
||||
|
||||
```python
|
||||
ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8])
|
||||
indices = tf.constant([[4], [3], [1] ,[7]])
|
||||
updates = tf.constant([9, 10, 11, 12])
|
||||
add = ref.scatter_nd_add(indices, updates)
|
||||
with tf.Session() as sess:
|
||||
print sess.run(add)
|
||||
```
|
||||
|
||||
The resulting update to ref would look like this:
|
||||
|
||||
[1, 13, 3, 14, 14, 6, 7, 20]
|
||||
|
||||
See `tf.scatter_nd` for more details about how to make updates to
|
||||
slices.
|
||||
|
||||
Args:
|
||||
indices: The indices to be used in the operation.
|
||||
updates: The values to be used in the operation.
|
||||
name: the name of the operation.
|
||||
|
||||
Returns:
|
||||
A `Tensor` that will hold the new value of this variable after
|
||||
the scattered subtraction has completed.
|
||||
|
||||
Raises:
|
||||
ValueError: if `sparse_delta` is not an `IndexedSlices`.
|
||||
"""
|
||||
return gen_state_ops.scatter_nd_add(
|
||||
self._variable, indices, updates, use_locking=True, name=name)
|
||||
|
||||
def scatter_nd_update(self, indices, updates, name=None):
|
||||
"""Applies sparse assignment to individual values or slices in a Variable.
|
||||
|
||||
`ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.
|
||||
|
||||
`indices` must be integer tensor, containing indices into `ref`.
|
||||
It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.
|
||||
|
||||
The innermost dimension of `indices` (with length `K`) corresponds to
|
||||
indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th
|
||||
dimension of `ref`.
|
||||
|
||||
`updates` is `Tensor` of rank `Q-1+P-K` with shape:
|
||||
|
||||
```
|
||||
[d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]].
|
||||
```
|
||||
|
||||
For example, say we want to add 4 scattered elements to a rank-1 tensor to
|
||||
8 elements. In Python, that update would look like this:
|
||||
|
||||
```python
|
||||
ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8])
|
||||
indices = tf.constant([[4], [3], [1] ,[7]])
|
||||
updates = tf.constant([9, 10, 11, 12])
|
||||
op = ref.scatter_nd_update(indices, updates)
|
||||
with tf.Session() as sess:
|
||||
print sess.run(op)
|
||||
```
|
||||
|
||||
The resulting update to ref would look like this:
|
||||
|
||||
[1, 11, 3, 10, 9, 6, 7, 12]
|
||||
|
||||
See `tf.scatter_nd` for more details about how to make updates to
|
||||
slices.
|
||||
|
||||
Args:
|
||||
indices: The indices to be used in the operation.
|
||||
updates: The values to be used in the operation.
|
||||
name: the name of the operation.
|
||||
|
||||
Returns:
|
||||
A `Tensor` that will hold the new value of this variable after
|
||||
the scattered subtraction has completed.
|
||||
|
||||
Raises:
|
||||
ValueError: if `sparse_delta` is not an `IndexedSlices`.
|
||||
"""
|
||||
return gen_state_ops.scatter_nd_update(
|
||||
self._variable, indices, updates, use_locking=True, name=name)
|
||||
|
||||
def _strided_slice_assign(self,
|
||||
begin,
|
||||
|
@ -91,9 +91,29 @@ tf_class {
|
||||
name: "read_value"
|
||||
argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
|
||||
}
|
||||
member_method {
|
||||
name: "scatter_add"
|
||||
argspec: "args=[\'self\', \'sparse_delta\', \'use_locking\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'None\'], "
|
||||
}
|
||||
member_method {
|
||||
name: "scatter_nd_add"
|
||||
argspec: "args=[\'self\', \'indices\', \'updates\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
|
||||
}
|
||||
member_method {
|
||||
name: "scatter_nd_sub"
|
||||
argspec: "args=[\'self\', \'indices\', \'updates\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
|
||||
}
|
||||
member_method {
|
||||
name: "scatter_nd_update"
|
||||
argspec: "args=[\'self\', \'indices\', \'updates\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
|
||||
}
|
||||
member_method {
|
||||
name: "scatter_sub"
|
||||
argspec: "args=[\'self\', \'sparse_delta\', \'use_locking\'], varargs=None, keywords=None, defaults=[\'False\'], "
|
||||
argspec: "args=[\'self\', \'sparse_delta\', \'use_locking\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'None\'], "
|
||||
}
|
||||
member_method {
|
||||
name: "scatter_update"
|
||||
argspec: "args=[\'self\', \'sparse_delta\', \'use_locking\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'None\'], "
|
||||
}
|
||||
member_method {
|
||||
name: "set_shape"
|
||||
|
@ -91,9 +91,29 @@ tf_class {
|
||||
name: "read_value"
|
||||
argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
|
||||
}
|
||||
member_method {
|
||||
name: "scatter_add"
|
||||
argspec: "args=[\'self\', \'sparse_delta\', \'use_locking\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'None\'], "
|
||||
}
|
||||
member_method {
|
||||
name: "scatter_nd_add"
|
||||
argspec: "args=[\'self\', \'indices\', \'updates\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
|
||||
}
|
||||
member_method {
|
||||
name: "scatter_nd_sub"
|
||||
argspec: "args=[\'self\', \'indices\', \'updates\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
|
||||
}
|
||||
member_method {
|
||||
name: "scatter_nd_update"
|
||||
argspec: "args=[\'self\', \'indices\', \'updates\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
|
||||
}
|
||||
member_method {
|
||||
name: "scatter_sub"
|
||||
argspec: "args=[\'self\', \'sparse_delta\', \'use_locking\'], varargs=None, keywords=None, defaults=[\'False\'], "
|
||||
argspec: "args=[\'self\', \'sparse_delta\', \'use_locking\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'None\'], "
|
||||
}
|
||||
member_method {
|
||||
name: "scatter_update"
|
||||
argspec: "args=[\'self\', \'sparse_delta\', \'use_locking\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'None\'], "
|
||||
}
|
||||
member_method {
|
||||
name: "set_shape"
|
||||
|
@ -760,18 +760,6 @@ tf_module {
|
||||
name: "assert_variables_initialized"
|
||||
argspec: "args=[\'var_list\'], varargs=None, keywords=None, defaults=[\'None\'], "
|
||||
}
|
||||
member_method {
|
||||
name: "assign"
|
||||
argspec: "args=[\'ref\', \'value\', \'validate_shape\', \'use_locking\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\'], "
|
||||
}
|
||||
member_method {
|
||||
name: "assign_add"
|
||||
argspec: "args=[\'ref\', \'value\', \'use_locking\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "
|
||||
}
|
||||
member_method {
|
||||
name: "assign_sub"
|
||||
argspec: "args=[\'ref\', \'value\', \'use_locking\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "
|
||||
}
|
||||
member_method {
|
||||
name: "atan"
|
||||
argspec: "args=[\'x\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
|
||||
@ -928,10 +916,6 @@ tf_module {
|
||||
name: "count_nonzero"
|
||||
argspec: "args=[\'input_tensor\', \'axis\', \'keepdims\', \'dtype\', \'name\', \'reduction_indices\', \'keep_dims\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \"<dtype: \'int64\'>\", \'None\', \'None\', \'None\'], "
|
||||
}
|
||||
member_method {
|
||||
name: "count_up_to"
|
||||
argspec: "args=[\'ref\', \'limit\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
|
||||
}
|
||||
member_method {
|
||||
name: "create_partitioned_variables"
|
||||
argspec: "args=[\'shape\', \'slicing\', \'initializer\', \'dtype\', \'trainable\', \'collections\', \'name\', \'reuse\'], varargs=None, keywords=None, defaults=[\"<dtype: \'float32\'>\", \'True\', \'None\', \'None\', \'None\'], "
|
||||
@ -1736,10 +1720,6 @@ tf_module {
|
||||
name: "scan"
|
||||
argspec: "args=[\'fn\', \'elems\', \'initializer\', \'parallel_iterations\', \'back_prop\', \'swap_memory\', \'infer_shape\', \'reverse\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'10\', \'True\', \'False\', \'True\', \'False\', \'None\'], "
|
||||
}
|
||||
member_method {
|
||||
name: "scatter_add"
|
||||
argspec: "args=[\'ref\', \'indices\', \'updates\', \'use_locking\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'None\'], "
|
||||
}
|
||||
member_method {
|
||||
name: "scatter_div"
|
||||
argspec: "args=[\'ref\', \'indices\', \'updates\', \'use_locking\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'None\'], "
|
||||
@ -1760,26 +1740,6 @@ tf_module {
|
||||
name: "scatter_nd"
|
||||
argspec: "args=[\'indices\', \'updates\', \'shape\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
|
||||
}
|
||||
member_method {
|
||||
name: "scatter_nd_add"
|
||||
argspec: "args=[\'ref\', \'indices\', \'updates\', \'use_locking\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'None\'], "
|
||||
}
|
||||
member_method {
|
||||
name: "scatter_nd_sub"
|
||||
argspec: "args=[\'ref\', \'indices\', \'updates\', \'use_locking\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'None\'], "
|
||||
}
|
||||
member_method {
|
||||
name: "scatter_nd_update"
|
||||
argspec: "args=[\'ref\', \'indices\', \'updates\', \'use_locking\', \'name\'], varargs=None, keywords=None, defaults=[\'True\', \'None\'], "
|
||||
}
|
||||
member_method {
|
||||
name: "scatter_sub"
|
||||
argspec: "args=[\'ref\', \'indices\', \'updates\', \'use_locking\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'None\'], "
|
||||
}
|
||||
member_method {
|
||||
name: "scatter_update"
|
||||
argspec: "args=[\'ref\', \'indices\', \'updates\', \'use_locking\', \'name\'], varargs=None, keywords=None, defaults=[\'True\', \'None\'], "
|
||||
}
|
||||
member_method {
|
||||
name: "segment_max"
|
||||
argspec: "args=[\'data\', \'segment_ids\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
|
||||
|
Loading…
Reference in New Issue
Block a user