Added cross linkages between elementwise and reduction ops for math_ops, sparse_ops and string_ops. Had to update docstrings for the various ops as well and moved an op or two's documentation from the api_def protobuf file to the python file.

PiperOrigin-RevId: 344020924
Change-Id: I65e62370686b420e76cbb257ae2d1cb5bea2ddbe
This commit is contained in:
A. Unique TensorFlower 2020-11-24 02:55:55 -08:00 committed by TensorFlower Gardener
parent 0f61f051d3
commit d2b2f9728b
7 changed files with 174 additions and 430 deletions

View File

@ -6,22 +6,4 @@ op {
endpoint {
name: "add"
}
description: <<END
Given two input tensors, the `tf.math.add` operation computes the sum for every element in the tensor.
Both input and output have a range `(-inf, inf)`.
Example:
>>> x = tf.constant([0., 0., 0., 0.])
>>> y = tf.constant([-2., 0., 2., 5.])
>>> tf.math.add(x, y)
<tf.Tensor: shape=(4,), dtype=float32, numpy=array([-2., 0., 2., 5.], dtype=float32)>
The reduction version of this elementwise operation is `tf.math.reduce_sum`
*NOTE*: `Add` supports broadcasting. `AddN` does not. More about broadcasting
[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
END
}

View File

@ -1,4 +1,9 @@
op {
graph_op_name: "LogicalOr"
visibility: HIDDEN
endpoint {
name: "math.logical_or"
}
endpoint {
name: "logical_or"
}
}

View File

@ -14,7 +14,5 @@ Example:
>>> tf.math.maximum(x, y)
<tf.Tensor: shape=(4,), dtype=float32, numpy=array([0., 0., 2., 5.], dtype=float32)>
The reduction version of this elementwise operation is `tf.math.reduce_max`
END
}

View File

@ -31,7 +31,5 @@ If inputs are not tensors, they will be converted to tensors. See
>>> tf.math.minimum([-5], x)
<tf.Tensor: shape=(1,), dtype=float32, numpy=array([-5.], dtype=float32)>
The reduction version of this elementwise operation is `tf.math.reduce_min`
END
}

View File

@ -500,8 +500,6 @@ def multiply(x, y, name=None):
array([[1., 1.],
[1., 1.]], dtype=float32)>
The reduction version of this elementwise operation is `tf.math.reduce_prod`
Args:
x: A Tensor. Must be one of the following types: `bfloat16`,
`half`, `float32`, `float64`, `uint8`, `int8`, `uint16`,
@ -1582,32 +1580,22 @@ def logical_and(x, y, name=None):
- Two `tf.Tensor` objects of type `bool` of the same shape. In this case,
the result will be the element-wise logical AND of the two input tensors.
You can also use the `&` operator instead
Usage:
>>> a = tf.constant([True])
>>> b = tf.constant([False])
>>> tf.math.logical_and(a, b)
<tf.Tensor: shape=(1,), dtype=bool, numpy=array([False])>
>>> a & b
<tf.Tensor: shape=(1,), dtype=bool, numpy=array([False])>
>>> c = tf.constant([True])
>>> x = tf.constant([False, True, True, False])
>>> tf.math.logical_and(c, x)
<tf.Tensor: shape=(4,), dtype=bool, numpy=array([False, True, True, False])>
>>> c & x
<tf.Tensor: shape=(4,), dtype=bool, numpy=array([False, True, True, False])>
>>> y = tf.constant([False, False, True, True])
>>> z = tf.constant([False, True, False, True])
>>> tf.math.logical_and(y, z)
<tf.Tensor: shape=(4,), dtype=bool, numpy=array([False, False, False, True])>
>>> y & z
<tf.Tensor: shape=(4,), dtype=bool, numpy=array([False, False, False, True])>
The reduction version of this elementwise operation is `tf.math.reduce_all`
<tf.Tensor: shape=(4,), dtype=bool, numpy=array([False, False, False, True])>
Args:
x: A `tf.Tensor` type bool.
@ -1620,58 +1608,6 @@ def logical_and(x, y, name=None):
return gen_math_ops.logical_and(x, y, name)
@tf_export("math.logical_or", "logical_or")
@dispatch.add_dispatch_support
def logical_or(x, y, name=None):
"""Logical OR function.
The operation works for the following input types:
- Two single elements of type `bool`
- One `tf.Tensor` of type `bool` and one single `bool`, where the result will
be calculated by applying logical AND with the single element to each
element in the larger Tensor.
- Two `tf.Tensor` objects of type `bool` of the same shape. In this case,
the result will be the element-wise logical AND of the two input tensors.
You can also use the `|` operator instead
Usage:
>>> a = tf.constant([True])
>>> b = tf.constant([False])
>>> tf.math.logical_or(a, b)
<tf.Tensor: shape=(1,), dtype=bool, numpy=array([ True])>
>>> a | b
<tf.Tensor: shape=(1,), dtype=bool, numpy=array([ True])>
>>> c = tf.constant([False])
>>> x = tf.constant([False, True, True, False])
>>> tf.math.logical_or(c, x)
<tf.Tensor: shape=(4,), dtype=bool, numpy=array([False, True, True, False])>
>>> c | x
<tf.Tensor: shape=(4,), dtype=bool, numpy=array([False, True, True, False])>
>>> y = tf.constant([False, False, True, True])
>>> z = tf.constant([False, True, False, True])
>>> tf.math.logical_or(y, z)
<tf.Tensor: shape=(4,), dtype=bool, numpy=array([False, True, True, True])>
>>> y | z
<tf.Tensor: shape=(4,), dtype=bool, numpy=array([False, True, True, True])>
The reduction version of this elementwise operation is `tf.math.reduce_any`
Args:
x: A `tf.Tensor` type bool.
y: A `tf.Tensor` of type bool.
name: A name for the operation (optional).
Returns:
A `tf.Tensor` of type bool with the same size as that of x or y.
"""
return gen_math_ops.logical_or(x, y, name)
def and_(x, y, name=None):
if x.dtype == dtypes.bool:
return gen_math_ops.logical_and(x, y, name)
@ -1998,8 +1934,6 @@ def reduce_sum_v1(input_tensor,
keep_dims=None):
"""Computes the sum of elements across dimensions of a tensor.
This is the reduction operation for the elementwise `tf.math.add` op.
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
of the entries in `axis`, which must be unique. If `keepdims` is true, the
@ -2010,34 +1944,14 @@ def reduce_sum_v1(input_tensor,
For example:
>>> # x has a shape of (2, 3) (two rows and three columns):
>>> x = tf.constant([[1, 1, 1], [1, 1, 1]])
>>> x.numpy()
array([[1, 1, 1],
[1, 1, 1]], dtype=int32)
>>> # sum all the elements
>>> # 1 + 1 + 1 + 1 + 1+ 1 = 6
>>> tf.reduce_sum(x).numpy()
6
>>> # reduce along the first dimension
>>> # the result is [1, 1, 1] + [1, 1, 1] = [2, 2, 2]
>>> tf.reduce_sum(x, 0).numpy()
array([2, 2, 2], dtype=int32)
>>> # reduce along the second dimension
>>> # the result is [1, 1] + [1, 1] + [1, 1] = [3, 3]
>>> tf.reduce_sum(x, 1).numpy()
array([3, 3], dtype=int32)
>>> # keep the original dimensions
>>> tf.reduce_sum(x, 1, keepdims=True).numpy()
array([[3],
[3]], dtype=int32)
>>> # reduce along both dimensions
>>> # the result is 1 + 1 + 1 + 1 + 1 + 1 = 6
>>> # or, equivalently, reduce along rows, then reduce the resultant array
>>> # [1, 1, 1] + [1, 1, 1] = [2, 2, 2]
>>> # 2 + 2 + 2 = 6
>>> tf.reduce_sum(x, [0, 1]).numpy()
6
```python
x = tf.constant([[1, 1, 1], [1, 1, 1]])
tf.reduce_sum(x) # 6
tf.reduce_sum(x, 0) # [2, 2, 2]
tf.reduce_sum(x, 1) # [3, 3]
tf.reduce_sum(x, 1, keepdims=True) # [[3], [3]]
tf.reduce_sum(x, [0, 1]) # 6
```
Args:
input_tensor: The tensor to reduce. Should have numeric type.
@ -2070,8 +1984,6 @@ def reduce_sum_v1(input_tensor,
def reduce_sum(input_tensor, axis=None, keepdims=False, name=None):
"""Computes the sum of elements across dimensions of a tensor.
This is the reduction operation for the elementwise `tf.math.add` op.
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
of the entries in `axis`, which must be unique. If `keepdims` is true, the
@ -2082,34 +1994,35 @@ def reduce_sum(input_tensor, axis=None, keepdims=False, name=None):
For example:
>>> # x has a shape of (2, 3) (two rows and three columns):
>>> x = tf.constant([[1, 1, 1], [1, 1, 1]])
>>> x.numpy()
array([[1, 1, 1],
[1, 1, 1]], dtype=int32)
>>> # sum all the elements
>>> # 1 + 1 + 1 + 1 + 1+ 1 = 6
>>> tf.reduce_sum(x).numpy()
6
>>> # reduce along the first dimension
>>> # the result is [1, 1, 1] + [1, 1, 1] = [2, 2, 2]
>>> tf.reduce_sum(x, 0).numpy()
array([2, 2, 2], dtype=int32)
>>> # reduce along the second dimension
>>> # the result is [1, 1] + [1, 1] + [1, 1] = [3, 3]
>>> tf.reduce_sum(x, 1).numpy()
array([3, 3], dtype=int32)
>>> # keep the original dimensions
>>> tf.reduce_sum(x, 1, keepdims=True).numpy()
array([[3],
[3]], dtype=int32)
>>> # reduce along both dimensions
>>> # the result is 1 + 1 + 1 + 1 + 1 + 1 = 6
>>> # or, equivalently, reduce along rows, then reduce the resultant array
>>> # [1, 1, 1] + [1, 1, 1] = [2, 2, 2]
>>> # 2 + 2 + 2 = 6
>>> tf.reduce_sum(x, [0, 1]).numpy()
6
>>> # x has a shape of (2, 3) (two rows and three columns):
>>> x = tf.constant([[1, 1, 1], [1, 1, 1]])
>>> x.numpy()
array([[1, 1, 1],
[1, 1, 1]], dtype=int32)
>>> # sum all the elements
>>> # 1 + 1 + 1 + 1 + 1+ 1 = 6
>>> tf.reduce_sum(x).numpy()
6
>>> # reduce along the first dimension
>>> # the result is [1, 1, 1] + [1, 1, 1] = [2, 2, 2]
>>> tf.reduce_sum(x, 0).numpy()
array([2, 2, 2], dtype=int32)
>>> # reduce along the second dimension
>>> # the result is [1, 1] + [1, 1] + [1, 1] = [3, 3]
>>> tf.reduce_sum(x, 1).numpy()
array([3, 3], dtype=int32)
>>> # keep the original dimensions
>>> tf.reduce_sum(x, 1, keepdims=True).numpy()
array([[3],
[3]], dtype=int32)
>>> # reduce along both dimensions
>>> # the result is 1 + 1 + 1 + 1 + 1 + 1 = 6
>>> # or, equivalently, reduce along rows, then reduce the resultant array
>>> # [1, 1, 1] + [1, 1, 1] = [2, 2, 2]
>>> # 2 + 2 + 2 = 6
>>> tf.reduce_sum(x, [0, 1]).numpy()
6
Args:
input_tensor: The tensor to reduce. Should have numeric type.
@ -2574,9 +2487,7 @@ def reduce_std(input_tensor, axis=None, keepdims=False, name=None):
@tf_export("math.reduce_prod", "reduce_prod", v1=[])
@dispatch.add_dispatch_support
def reduce_prod(input_tensor, axis=None, keepdims=False, name=None):
"""Computes `tf.math.multiply` of elements across dimensions of a tensor.
This is the reduction operation for the elementwise `tf.math.multiply` op.
"""Computes the product of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
@ -2586,17 +2497,6 @@ def reduce_prod(input_tensor, axis=None, keepdims=False, name=None):
If `axis` is None, all dimensions are reduced, and a
tensor with a single element is returned.
For example:
>>> x = tf.constant([[1., 2.], [3., 4.]])
>>> tf.math.reduce_prod(x)
<tf.Tensor: shape=(), dtype=float32, numpy=24.>
>>> tf.math.reduce_prod(x, 0)
<tf.Tensor: shape=(2,), dtype=float32, numpy=array([3., 8.], dtype=float32)>
>>> tf.math.reduce_prod(x, 1)
<tf.Tensor: shape=(2,), dtype=float32, numpy=array([2., 12.],
dtype=float32)>
Args:
input_tensor: The tensor to reduce. Should have numeric type.
axis: The dimensions to reduce. If `None` (the default), reduces all
@ -2631,9 +2531,7 @@ def reduce_prod_v1(input_tensor,
name=None,
reduction_indices=None,
keep_dims=None):
"""Computes `tf.math.multiply` of elements across dimensions of a tensor.
This is the reduction operation for the elementwise `tf.math.multiply` op.
"""Computes the product of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
@ -2643,17 +2541,6 @@ def reduce_prod_v1(input_tensor,
If `axis` is None, all dimensions are reduced, and a
tensor with a single element is returned.
For example:
>>> x = tf.constant([[1., 2.], [3., 4.]])
>>> tf.math.reduce_prod(x)
<tf.Tensor: shape=(), dtype=float32, numpy=24.>
>>> tf.math.reduce_prod(x, 0)
<tf.Tensor: shape=(2,), dtype=float32, numpy=array([3., 8.], dtype=float32)>
>>> tf.math.reduce_prod(x, 1)
<tf.Tensor: shape=(2,), dtype=float32, numpy=array([2., 12.],
dtype=float32)>
Args:
input_tensor: The tensor to reduce. Should have numeric type.
axis: The dimensions to reduce. If `None` (the default), reduces all
@ -2690,9 +2577,7 @@ def reduce_min_v1(input_tensor,
name=None,
reduction_indices=None,
keep_dims=None):
"""Computes the `tf.math.minimum` of elements across dimensions of a tensor.
This is the reduction operation for the elementwise `tf.math.minimum` op.
"""Computes the minimum of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
@ -2702,26 +2587,6 @@ def reduce_min_v1(input_tensor,
If `axis` is None, all dimensions are reduced, and a
tensor with a single element is returned.
Usage example:
>>> x = tf.constant([5, 1, 2, 4])
>>> tf.reduce_min(x)
<tf.Tensor: shape=(), dtype=int32, numpy=1>
>>> x = tf.constant([-5, -1, -2, -4])
>>> tf.reduce_min(x)
<tf.Tensor: shape=(), dtype=int32, numpy=-5>
>>> x = tf.constant([4, float('nan')])
>>> tf.reduce_min(x)
<tf.Tensor: shape=(), dtype=float32, numpy=nan>
>>> x = tf.constant([float('nan'), float('nan')])
>>> tf.reduce_min(x)
<tf.Tensor: shape=(), dtype=float32, numpy=nan>
>>> x = tf.constant([float('-inf'), float('inf')])
>>> tf.reduce_min(x)
<tf.Tensor: shape=(), dtype=float32, numpy=-inf>
See the numpy docs for `np.amin` and `np.nanmin` behavior.
Args:
input_tensor: The tensor to reduce. Should have real numeric type.
axis: The dimensions to reduce. If `None` (the default), reduces all
@ -2734,6 +2599,10 @@ def reduce_min_v1(input_tensor,
Returns:
The reduced tensor.
@compatibility(numpy)
Equivalent to np.min
@end_compatibility
"""
axis = deprecation.deprecated_argument_lookup("axis", axis,
"reduction_indices",
@ -2746,9 +2615,7 @@ def reduce_min_v1(input_tensor,
@tf_export("math.reduce_min", "reduce_min", v1=[])
@dispatch.add_dispatch_support
def reduce_min(input_tensor, axis=None, keepdims=False, name=None):
"""Computes the `tf.math.minimum` of elements across dimensions of a tensor.
This is the reduction operation for the elementwise `tf.math.minimum` op.
"""Computes the minimum of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
@ -2758,26 +2625,6 @@ def reduce_min(input_tensor, axis=None, keepdims=False, name=None):
If `axis` is None, all dimensions are reduced, and a
tensor with a single element is returned.
Usage example:
>>> x = tf.constant([5, 1, 2, 4])
>>> tf.reduce_min(x)
<tf.Tensor: shape=(), dtype=int32, numpy=1>
>>> x = tf.constant([-5, -1, -2, -4])
>>> tf.reduce_min(x)
<tf.Tensor: shape=(), dtype=int32, numpy=-5>
>>> x = tf.constant([4, float('nan')])
>>> tf.reduce_min(x)
<tf.Tensor: shape=(), dtype=float32, numpy=nan>
>>> x = tf.constant([float('nan'), float('nan')])
>>> tf.reduce_min(x)
<tf.Tensor: shape=(), dtype=float32, numpy=nan>
>>> x = tf.constant([float('-inf'), float('inf')])
>>> tf.reduce_min(x)
<tf.Tensor: shape=(), dtype=float32, numpy=-inf>
See the numpy docs for `np.amin` and `np.nanmin` behavior.
Args:
input_tensor: The tensor to reduce. Should have real numeric type.
axis: The dimensions to reduce. If `None` (the default), reduces all
@ -2788,6 +2635,15 @@ def reduce_min(input_tensor, axis=None, keepdims=False, name=None):
Returns:
The reduced tensor.
For example:
>>> a = tf.constant([[1, 2], [3, 4]])
>>> tf.reduce_min(a)
<tf.Tensor: shape=(), dtype=int32, numpy=1>
@compatibility(numpy)
Equivalent to np.min
@end_compatibility
"""
keepdims = False if keepdims is None else keepdims
return _may_reduce_to_scalar(
@ -2808,9 +2664,7 @@ def reduce_max_v1(input_tensor,
name=None,
reduction_indices=None,
keep_dims=None):
"""Computes `tf.math.maximum` of elements across dimensions of a tensor.
This is the reduction operation for the elementwise `tf.math.maximum` op.
"""Computes the maximum of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
@ -2820,26 +2674,6 @@ def reduce_max_v1(input_tensor,
If `axis` is None, all dimensions are reduced, and a
tensor with a single element is returned.
Usage example:
>>> x = tf.constant([5, 1, 2, 4])
>>> tf.reduce_max(x)
<tf.Tensor: shape=(), dtype=int32, numpy=5>
>>> x = tf.constant([-5, -1, -2, -4])
>>> tf.reduce_max(x)
<tf.Tensor: shape=(), dtype=int32, numpy=-1>
>>> x = tf.constant([4, float('nan')])
>>> tf.reduce_max(x)
<tf.Tensor: shape=(), dtype=float32, numpy=nan>
>>> x = tf.constant([float('nan'), float('nan')])
>>> tf.reduce_max(x)
<tf.Tensor: shape=(), dtype=float32, numpy=nan>
>>> x = tf.constant([float('-inf'), float('inf')])
>>> tf.reduce_max(x)
<tf.Tensor: shape=(), dtype=float32, numpy=inf>
See the numpy docs for `np.amax` and `np.nanmax` behavior.
Args:
input_tensor: The tensor to reduce. Should have real numeric type.
axis: The dimensions to reduce. If `None` (the default), reduces all
@ -2852,6 +2686,10 @@ def reduce_max_v1(input_tensor,
Returns:
The reduced tensor.
@compatibility(numpy)
Equivalent to np.max
@end_compatibility
"""
axis = deprecation.deprecated_argument_lookup("axis", axis,
"reduction_indices",
@ -2864,9 +2702,7 @@ def reduce_max_v1(input_tensor,
@tf_export("math.reduce_max", "reduce_max", v1=[])
@dispatch.add_dispatch_support
def reduce_max(input_tensor, axis=None, keepdims=False, name=None):
"""Computes `tf.math.maximum` of elements across dimensions of a tensor.
This is the reduction operation for the elementwise `tf.math.maximum` op.
"""Computes the maximum of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
@ -2878,21 +2714,21 @@ def reduce_max(input_tensor, axis=None, keepdims=False, name=None):
Usage example:
>>> x = tf.constant([5, 1, 2, 4])
>>> tf.reduce_max(x)
<tf.Tensor: shape=(), dtype=int32, numpy=5>
>>> x = tf.constant([-5, -1, -2, -4])
>>> tf.reduce_max(x)
<tf.Tensor: shape=(), dtype=int32, numpy=-1>
>>> x = tf.constant([4, float('nan')])
>>> tf.reduce_max(x)
<tf.Tensor: shape=(), dtype=float32, numpy=nan>
>>> x = tf.constant([float('nan'), float('nan')])
>>> tf.reduce_max(x)
<tf.Tensor: shape=(), dtype=float32, numpy=nan>
>>> x = tf.constant([float('-inf'), float('inf')])
>>> tf.reduce_max(x)
<tf.Tensor: shape=(), dtype=float32, numpy=inf>
>>> x = tf.constant([5, 1, 2, 4])
>>> print(tf.reduce_max(x))
tf.Tensor(5, shape=(), dtype=int32)
>>> x = tf.constant([-5, -1, -2, -4])
>>> print(tf.reduce_max(x))
tf.Tensor(-1, shape=(), dtype=int32)
>>> x = tf.constant([4, float('nan')])
>>> print(tf.reduce_max(x))
tf.Tensor(nan, shape=(), dtype=float32)
>>> x = tf.constant([float('nan'), float('nan')])
>>> print(tf.reduce_max(x))
tf.Tensor(nan, shape=(), dtype=float32)
>>> x = tf.constant([float('-inf'), float('inf')])
>>> print(tf.reduce_max(x))
tf.Tensor(inf, shape=(), dtype=float32)
See the numpy docs for `np.amax` and `np.nanmax` behavior.
@ -2933,9 +2769,7 @@ def reduce_all_v1(input_tensor,
name=None,
reduction_indices=None,
keep_dims=None):
"""Computes `tf.math.logical_and` of elements across dimensions of a tensor.
This is the reduction operation for the elementwise `tf.math.logical_and` op.
"""Computes the "logical and" of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
@ -2947,13 +2781,12 @@ def reduce_all_v1(input_tensor,
For example:
>>> x = tf.constant([[True, True], [False, False]])
>>> tf.math.reduce_all(x)
<tf.Tensor: shape=(), dtype=bool, numpy=False>
>>> tf.math.reduce_all(x, 0)
<tf.Tensor: shape=(2,), dtype=bool, numpy=array([False, False])>
>>> tf.math.reduce_all(x, 1)
<tf.Tensor: shape=(2,), dtype=bool, numpy=array([ True, False])>
```python
x = tf.constant([[True, True], [False, False]])
tf.reduce_all(x) # False
tf.reduce_all(x, 0) # [False, False]
tf.reduce_all(x, 1) # [True, False]
```
Args:
input_tensor: The boolean tensor to reduce.
@ -2983,9 +2816,7 @@ def reduce_all_v1(input_tensor,
@tf_export("math.reduce_all", "reduce_all", v1=[])
@dispatch.add_dispatch_support
def reduce_all(input_tensor, axis=None, keepdims=False, name=None):
"""Computes `tf.math.logical_and` of elements across dimensions of a tensor.
This is the reduction operation for the elementwise `tf.math.logical_and` op.
"""Computes the "logical and" of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
@ -2997,13 +2828,12 @@ def reduce_all(input_tensor, axis=None, keepdims=False, name=None):
For example:
>>> x = tf.constant([[True, True], [False, False]])
>>> tf.math.reduce_all(x)
<tf.Tensor: shape=(), dtype=bool, numpy=False>
>>> tf.math.reduce_all(x, 0)
<tf.Tensor: shape=(2,), dtype=bool, numpy=array([False, False])>
>>> tf.math.reduce_all(x, 1)
<tf.Tensor: shape=(2,), dtype=bool, numpy=array([ True, False])>
```python
x = tf.constant([[True, True], [False, False]])
tf.reduce_all(x) # False
tf.reduce_all(x, 0) # [False, False]
tf.reduce_all(x, 1) # [True, False]
```
Args:
input_tensor: The boolean tensor to reduce.
@ -3039,9 +2869,7 @@ def reduce_any_v1(input_tensor,
name=None,
reduction_indices=None,
keep_dims=None):
"""Computes `tf.math.logical_or` of elements across dimensions of a tensor.
This is the reduction operation for the elementwise `tf.math.logical_or` op.
"""Computes the "logical or" of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
@ -3053,13 +2881,12 @@ def reduce_any_v1(input_tensor,
For example:
>>> x = tf.constant([[True, True], [False, False]])
>>> tf.reduce_any(x)
<tf.Tensor: shape=(), dtype=bool, numpy=True>
>>> tf.reduce_any(x, 0)
<tf.Tensor: shape=(2,), dtype=bool, numpy=array([ True, True])>
>>> tf.reduce_any(x, 1)
<tf.Tensor: shape=(2,), dtype=bool, numpy=array([ True, False])>
```python
x = tf.constant([[True, True], [False, False]])
tf.reduce_any(x) # True
tf.reduce_any(x, 0) # [True, True]
tf.reduce_any(x, 1) # [True, False]
```
Args:
input_tensor: The boolean tensor to reduce.
@ -3089,9 +2916,7 @@ def reduce_any_v1(input_tensor,
@tf_export("math.reduce_any", "reduce_any", v1=[])
@dispatch.add_dispatch_support
def reduce_any(input_tensor, axis=None, keepdims=False, name=None):
"""Computes `tf.math.logical_or` of elements across dimensions of a tensor.
This is the reduction operation for the elementwise `tf.math.logical_or` op.
"""Computes the "logical or" of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
@ -3103,13 +2928,12 @@ def reduce_any(input_tensor, axis=None, keepdims=False, name=None):
For example:
>>> x = tf.constant([[True, True], [False, False]])
>>> tf.reduce_any(x)
<tf.Tensor: shape=(), dtype=bool, numpy=True>
>>> tf.reduce_any(x, 0)
<tf.Tensor: shape=(2,), dtype=bool, numpy=array([ True, True])>
>>> tf.reduce_any(x, 1)
<tf.Tensor: shape=(2,), dtype=bool, numpy=array([ True, False])>
```python
x = tf.constant([[True, True], [False, False]])
tf.reduce_any(x) # True
tf.reduce_any(x, 0) # [True, True]
tf.reduce_any(x, 1) # [True, False]
```
Args:
input_tensor: The boolean tensor to reduce.

View File

@ -1215,9 +1215,7 @@ def sparse_to_dense(sparse_indices,
@tf_export("sparse.reduce_max", v1=[])
def sparse_reduce_max_v2(
sp_input, axis=None, keepdims=None, output_is_sparse=False, name=None):
"""Computes `tf.sparse.maximum` of elements across dimensions of a SparseTensor.
This is the reduction operation for the elementwise `tf.sparse.maximum` op.
"""Computes the max of elements across dimensions of a SparseTensor.
This Op takes a SparseTensor and is the sparse counterpart to
`tf.reduce_max()`. In particular, this Op also returns a dense `Tensor`
@ -1243,32 +1241,21 @@ def sparse_reduce_max_v2(
For example:
# 'x' represents [[1, ?, 2]
# [?, 3, ?]]
# where ? is implicitly-zero.
```python
# 'x' represents [[1, ?, 2]
# [?, 3, ?]]
# where ? is implicitly-zero.
tf.sparse.reduce_max(x) ==> 3
tf.sparse.reduce_max(x, 0) ==> [1, 3, 2]
tf.sparse.reduce_max(x, 1) ==> [2, 3] # Can also use -1 as the axis.
tf.sparse.reduce_max(x, 1, keepdims=True) ==> [[2], [3]]
tf.sparse.reduce_max(x, [0, 1]) ==> 3
>>> x = tf.sparse.SparseTensor([[0, 0], [0, 2], [1, 1]], [1, 2, 3], [2, 3])
>>> tf.sparse.reduce_max(x)
<tf.Tensor: shape=(), dtype=int32, numpy=3>
>>> tf.sparse.reduce_max(x, 0)
<tf.Tensor: shape=(3,), dtype=int32, numpy=array([1, 3, 2], dtype=int32)>
>>> tf.sparse.reduce_max(x, 1)
<tf.Tensor: shape=(2,), dtype=int32, numpy=array([2, 3], dtype=int32)>
>>> tf.sparse.reduce_max(x, 1, keepdims=True)
<tf.Tensor: shape=(2, 1), dtype=int32, numpy=
array([[2],
[3]], dtype=int32)>
>>> tf.sparse.reduce_max(x, [0, 1])
<tf.Tensor: shape=(), dtype=int32, numpy=3>
# 'y' represents [[-7, ?]
# [ 4, 3]
# [ ?, ?]
>>> y = tf.sparse.SparseTensor([[0, 0,], [1, 0], [1, 1]], [-7, 4, 3],
... [3, 2])
>>> tf.sparse.reduce_max(y, 1)
<tf.Tensor: shape=(3,), dtype=int32, numpy=array([-7, 4, 0], dtype=int32)>
# 'y' represents [[-7, ?]
# [ 4, 3]
# [ ?, ?]
tf.sparse.reduce_max(x, 1) ==> [-7, 4, 0]
```
Args:
sp_input: The SparseTensor to reduce. Should have numeric type.
@ -1316,9 +1303,7 @@ def sparse_reduce_max_v2(
"reduction_axes")
def sparse_reduce_max(sp_input, axis=None, keepdims=None,
reduction_axes=None, keep_dims=None):
"""Computes `tf.sparse.maximum` of elements across dimensions of a SparseTensor.
This is the reduction operation for the elementwise `tf.sparse.maximum` op.
"""Computes the max of elements across dimensions of a SparseTensor.
This Op takes a SparseTensor and is the sparse counterpart to
`tf.reduce_max()`. In particular, this Op also returns a dense `Tensor`
@ -1343,32 +1328,21 @@ def sparse_reduce_max(sp_input, axis=None, keepdims=None,
For example:
# 'x' represents [[1, ?, 2]
# [?, 3, ?]]
# where ? is implicitly-zero.
```python
# 'x' represents [[1, ?, 2]
# [?, 3, ?]]
# where ? is implicitly-zero.
tf.sparse.reduce_max(x) ==> 3
tf.sparse.reduce_max(x, 0) ==> [1, 3, 2]
tf.sparse.reduce_max(x, 1) ==> [2, 3] # Can also use -1 as the axis.
tf.sparse.reduce_max(x, 1, keepdims=True) ==> [[2], [3]]
tf.sparse.reduce_max(x, [0, 1]) ==> 3
>>> x = tf.sparse.SparseTensor([[0, 0], [0, 2], [1, 1]], [1, 2, 3], [2, 3])
>>> tf.sparse.reduce_max(x)
<tf.Tensor: shape=(), dtype=int32, numpy=3>
>>> tf.sparse.reduce_max(x, 0)
<tf.Tensor: shape=(3,), dtype=int32, numpy=array([1, 3, 2], dtype=int32)>
>>> tf.sparse.reduce_max(x, 1)
<tf.Tensor: shape=(2,), dtype=int32, numpy=array([2, 3], dtype=int32)>
>>> tf.sparse.reduce_max(x, 1, keepdims=True)
<tf.Tensor: shape=(2, 1), dtype=int32, numpy=
array([[2],
[3]], dtype=int32)>
>>> tf.sparse.reduce_max(x, [0, 1])
<tf.Tensor: shape=(), dtype=int32, numpy=3>
# 'y' represents [[-7, ?]
# [ 4, 3]
# [ ?, ?]
>>> y = tf.sparse.SparseTensor([[0, 0,], [1, 0], [1, 1]], [-7, 4, 3],
... [3, 2])
>>> tf.sparse.reduce_max(y, 1)
<tf.Tensor: shape=(3,), dtype=int32, numpy=array([-7, 4, 0], dtype=int32)>
# 'y' represents [[-7, ?]
# [ 4, 3]
# [ ?, ?]
tf.sparse.reduce_max(x, 1) ==> [-7, 4, 0]
```
Args:
sp_input: The SparseTensor to reduce. Should have numeric type.
@ -1449,9 +1423,7 @@ def sparse_reduce_max_sparse(sp_input,
@tf_export("sparse.reduce_sum", v1=[])
def sparse_reduce_sum_v2(
sp_input, axis=None, keepdims=None, output_is_sparse=False, name=None):
"""Computes `tf.sparse.add` of elements across dimensions of a SparseTensor.
This is the reduction operation for the elementwise `tf.sparse.add` op.
"""Computes the sum of elements across dimensions of a SparseTensor.
This Op takes a SparseTensor and is the sparse counterpart to
`tf.reduce_sum()`. In particular, this Op also returns a dense `Tensor`
@ -1471,23 +1443,16 @@ def sparse_reduce_sum_v2(
For example:
# 'x' represents [[1, ?, 1]
# [?, 1, ?]]
# where ? is implicitly-zero.
>>> x = tf.sparse.SparseTensor([[0, 0], [0, 2], [1, 1]], [1, 1, 1], [2, 3])
>>> tf.sparse.reduce_sum(x)
<tf.Tensor: shape=(), dtype=int32, numpy=3>
>>> tf.sparse.reduce_sum(x, 0)
<tf.Tensor: shape=(3,), dtype=int32, numpy=array([1, 1, 1], dtype=int32)>
>>> tf.sparse.reduce_sum(x, 1) # Can also use -1 as the axis
<tf.Tensor: shape=(2,), dtype=int32, numpy=array([2, 1], dtype=int32)>
>>> tf.sparse.reduce_sum(x, 1, keepdims=True)
<tf.Tensor: shape=(2, 1), dtype=int32, numpy=
array([[2],
[1]], dtype=int32)>
>>> tf.sparse.reduce_sum(x, [0, 1])
<tf.Tensor: shape=(), dtype=int32, numpy=3>
```python
# 'x' represents [[1, ?, 1]
# [?, 1, ?]]
# where ? is implicitly-zero.
tf.sparse.reduce_sum(x) ==> 3
tf.sparse.reduce_sum(x, 0) ==> [1, 1, 1]
tf.sparse.reduce_sum(x, 1) ==> [2, 1] # Can also use -1 as the axis.
tf.sparse.reduce_sum(x, 1, keepdims=True) ==> [[2], [1]]
tf.sparse.reduce_sum(x, [0, 1]) ==> 3
```
Args:
sp_input: The SparseTensor to reduce. Should have numeric type.
@ -1534,9 +1499,7 @@ def sparse_reduce_sum_v2(
"reduction_axes")
def sparse_reduce_sum(sp_input, axis=None, keepdims=None,
reduction_axes=None, keep_dims=None):
"""Computes `tf.sparse.add` of elements across dimensions of a SparseTensor.
This is the reduction operation for the elementwise `tf.sparse.add` op.
"""Computes the sum of elements across dimensions of a SparseTensor.
This Op takes a SparseTensor and is the sparse counterpart to
`tf.reduce_sum()`. In particular, this Op also returns a dense `Tensor`
@ -1553,23 +1516,16 @@ def sparse_reduce_sum(sp_input, axis=None, keepdims=None,
For example:
# 'x' represents [[1, ?, 1]
# [?, 1, ?]]
# where ? is implicitly-zero.
>>> x = tf.sparse.SparseTensor([[0, 0], [0, 2], [1, 1]], [1, 1, 1], [2, 3])
>>> tf.sparse.reduce_sum(x)
<tf.Tensor: shape=(), dtype=int32, numpy=3>
>>> tf.sparse.reduce_sum(x, 0)
<tf.Tensor: shape=(3,), dtype=int32, numpy=array([1, 1, 1], dtype=int32)>
>>> tf.sparse.reduce_sum(x, 1) # Can also use -1 as the axis
<tf.Tensor: shape=(2,), dtype=int32, numpy=array([2, 1], dtype=int32)>
>>> tf.sparse.reduce_sum(x, 1, keepdims=True)
<tf.Tensor: shape=(2, 1), dtype=int32, numpy=
array([[2],
[1]], dtype=int32)>
>>> tf.sparse.reduce_sum(x, [0, 1])
<tf.Tensor: shape=(), dtype=int32, numpy=3>
```python
# 'x' represents [[1, ?, 1]
# [?, 1, ?]]
# where ? is implicitly-zero.
tf.sparse.reduce_sum(x) ==> 3
tf.sparse.reduce_sum(x, 0) ==> [1, 1, 1]
tf.sparse.reduce_sum(x, 1) ==> [2, 1] # Can also use -1 as the axis.
tf.sparse.reduce_sum(x, 1, keepdims=True) ==> [[2], [1]]
tf.sparse.reduce_sum(x, [0, 1]) ==> 3
```
Args:
sp_input: The SparseTensor to reduce. Should have numeric type.
@ -2695,22 +2651,14 @@ def sparse_maximum(sp_a, sp_b, name=None):
"""Returns the element-wise max of two SparseTensors.
Assumes the two SparseTensors have the same shape, i.e., no broadcasting.
Example:
>>> sp_zero = tf.sparse.SparseTensor([[0]], [0], [7])
>>> sp_one = tf.sparse.SparseTensor([[1]], [1], [7])
>>> res = tf.sparse.maximum(sp_zero, sp_one)
>>> res.indices
<tf.Tensor: shape=(2, 1), dtype=int64, numpy=
array([[0],
[1]])>
>>> res.values
<tf.Tensor: shape=(2,), dtype=int32, numpy=array([0, 1], dtype=int32)>
>>> res.dense_shape
<tf.Tensor: shape=(1,), dtype=int64, numpy=array([7])>
The reduction version of this elementwise operation is `tf.sparse.reduce_max`
```python
sp_zero = sparse_tensor.SparseTensor([[0]], [0], [7])
sp_one = sparse_tensor.SparseTensor([[1]], [1], [7])
res = tf.sparse.maximum(sp_zero, sp_one).eval()
# "res" should be equal to SparseTensor([[0], [1]], [0, 1], [7]).
```
Args:
sp_a: a `SparseTensor` operand whose dtype is real, and indices
@ -2741,20 +2689,14 @@ def sparse_minimum(sp_a, sp_b, name=None):
"""Returns the element-wise min of two SparseTensors.
Assumes the two SparseTensors have the same shape, i.e., no broadcasting.
Example:
>>> sp_zero = tf.sparse.SparseTensor([[0]], [0], [7])
>>> sp_one = tf.sparse.SparseTensor([[1]], [1], [7])
>>> res = tf.sparse.minimum(sp_zero, sp_one)
>>> res.indices
<tf.Tensor: shape=(2, 1), dtype=int64, numpy=
array([[0],
[1]])>
>>> res.values
<tf.Tensor: shape=(2,), dtype=int32, numpy=array([0, 0], dtype=int32)>
>>> res.dense_shape
<tf.Tensor: shape=(1,), dtype=int64, numpy=array([7])>
```python
sp_zero = sparse_tensor.SparseTensor([[0]], [0], [7])
sp_one = sparse_tensor.SparseTensor([[1]], [1], [7])
res = tf.sparse.minimum(sp_zero, sp_one).eval()
# "res" should be equal to SparseTensor([[0], [1]], [0, 0], [7]).
```
Args:
sp_a: a `SparseTensor` operand whose dtype is real, and indices

View File

@ -338,8 +338,6 @@ def reduce_join_v2( # pylint: disable=missing-docstring
name=None):
"""Joins all strings into a single string, or joins along an axis.
This is the reduction operation for the elementwise `tf.strings.join` op.
>>> tf.strings.reduce_join([['abc','123'],
... ['def','456']]).numpy()
b'abc123def456'
@ -561,9 +559,6 @@ def string_join(inputs, separator="", name=None):
... separator=" ").numpy()
array([b'abc def', b'123 456'], dtype=object)
The reduction version of this elementwise operation is
`tf.strings.reduce_join`
Args:
inputs: A list of `tf.Tensor` objects of same size and `tf.string` dtype.
separator: A string added between each string being joined.