Merge pull request #34864 from nikochiko:update-array-ops-docstrings
PiperOrigin-RevId: 318082779 Change-Id: Iab29629d0197ea71b4b51c60767599666bf614dd
This commit is contained in:
commit
fb9291d5a5
@ -34,13 +34,13 @@ END
|
|||||||
description: <<END
|
description: <<END
|
||||||
The type of padding algorithm to use.
|
The type of padding algorithm to use.
|
||||||
|
|
||||||
We specify the size-related attributes as:
|
The size-related attributes are specified as follows:
|
||||||
|
|
||||||
```python
|
```python
|
||||||
ksizes = [1, ksize_planes, ksize_rows, ksize_cols, 1]
|
ksizes = [1, ksize_planes, ksize_rows, ksize_cols, 1]
|
||||||
strides = [1, stride_planes, strides_rows, strides_cols, 1]
|
strides = [1, stride_planes, strides_rows, strides_cols, 1]
|
||||||
```
|
```
|
||||||
END
|
END
|
||||||
}
|
}
|
||||||
summary: "Extract `patches` from `input` and put them in the \"depth\" output dimension. 3D extension of `extract_image_patches`."
|
summary: "Extract `patches` from `input` and put them in the `\"depth\"` output dimension. 3D extension of `extract_image_patches`."
|
||||||
}
|
}
|
||||||
|
@ -1322,13 +1322,17 @@ def convert_to_tensor_v2(value, dtype=None, dtype_hint=None, name=None):
|
|||||||
|
|
||||||
This function converts Python objects of various types to `Tensor`
|
This function converts Python objects of various types to `Tensor`
|
||||||
objects. It accepts `Tensor` objects, numpy arrays, Python lists,
|
objects. It accepts `Tensor` objects, numpy arrays, Python lists,
|
||||||
and Python scalars. For example:
|
and Python scalars.
|
||||||
|
|
||||||
|
For example:
|
||||||
|
|
||||||
|
>>> import numpy as np
|
||||||
>>> def my_func(arg):
|
>>> def my_func(arg):
|
||||||
... arg = tf.convert_to_tensor(arg, dtype=tf.float32)
|
... arg = tf.convert_to_tensor(arg, dtype=tf.float32)
|
||||||
... return arg
|
... return arg
|
||||||
|
|
||||||
>>> # The following calls are equivalent.
|
>>> # The following calls are equivalent.
|
||||||
|
...
|
||||||
>>> value_1 = my_func(tf.constant([[1.0, 2.0], [3.0, 4.0]]))
|
>>> value_1 = my_func(tf.constant([[1.0, 2.0], [3.0, 4.0]]))
|
||||||
>>> print(value_1)
|
>>> print(value_1)
|
||||||
tf.Tensor(
|
tf.Tensor(
|
||||||
|
@ -505,23 +505,31 @@ setdiff1d.__doc__ = gen_array_ops.list_diff.__doc__
|
|||||||
def broadcast_dynamic_shape(shape_x, shape_y):
|
def broadcast_dynamic_shape(shape_x, shape_y):
|
||||||
"""Computes the shape of a broadcast given symbolic shapes.
|
"""Computes the shape of a broadcast given symbolic shapes.
|
||||||
|
|
||||||
When shape_x and shape_y are Tensors representing shapes (i.e. the result of
|
When `shape_x` and `shape_y` are Tensors representing shapes (i.e. the result
|
||||||
calling tf.shape on another Tensor) this computes a Tensor which is the shape
|
of calling tf.shape on another Tensor) this computes a Tensor which is the
|
||||||
of the result of a broadcasting op applied in tensors of shapes shape_x and
|
shape of the result of a broadcasting op applied in tensors of shapes
|
||||||
shape_y.
|
`shape_x` and `shape_y`.
|
||||||
|
|
||||||
For example, if shape_x is [1, 2, 3] and shape_y is [5, 1, 3], the result is a
|
|
||||||
Tensor whose value is [5, 2, 3].
|
|
||||||
|
|
||||||
This is useful when validating the result of a broadcasting operation when the
|
This is useful when validating the result of a broadcasting operation when the
|
||||||
tensors do not have statically known shapes.
|
tensors do not have statically known shapes.
|
||||||
|
|
||||||
|
Example:
|
||||||
|
|
||||||
|
>>> shape_x = (1, 2, 3)
|
||||||
|
>>> shape_y = (5, 1, 3)
|
||||||
|
>>> tf.broadcast_dynamic_shape(shape_x, shape_y)
|
||||||
|
<tf.Tensor: shape=(3,), dtype=int32, numpy=array([5, 2, 3], ...>
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
shape_x: A rank 1 integer `Tensor`, representing the shape of x.
|
shape_x: A rank 1 integer `Tensor`, representing the shape of x.
|
||||||
shape_y: A rank 1 integer `Tensor`, representing the shape of y.
|
shape_y: A rank 1 integer `Tensor`, representing the shape of y.
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
A rank 1 integer `Tensor` representing the broadcasted shape.
|
A rank 1 integer `Tensor` representing the broadcasted shape.
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
InvalidArgumentError: If the two shapes are incompatible for
|
||||||
|
broadcasting.
|
||||||
"""
|
"""
|
||||||
return gen_array_ops.broadcast_args(shape_x, shape_y)
|
return gen_array_ops.broadcast_args(shape_x, shape_y)
|
||||||
|
|
||||||
@ -531,16 +539,24 @@ def broadcast_dynamic_shape(shape_x, shape_y):
|
|||||||
def broadcast_static_shape(shape_x, shape_y):
|
def broadcast_static_shape(shape_x, shape_y):
|
||||||
"""Computes the shape of a broadcast given known shapes.
|
"""Computes the shape of a broadcast given known shapes.
|
||||||
|
|
||||||
When shape_x and shape_y are fully known TensorShapes this computes a
|
When `shape_x` and `shape_y` are fully known `TensorShape`s this computes a
|
||||||
TensorShape which is the shape of the result of a broadcasting op applied in
|
`TensorShape` which is the shape of the result of a broadcasting op applied in
|
||||||
tensors of shapes shape_x and shape_y.
|
tensors of shapes `shape_x` and `shape_y`.
|
||||||
|
|
||||||
For example, if shape_x is [1, 2, 3] and shape_y is [5, 1, 3], the result is a
|
For example, if shape_x is `TensorShape([1, 2, 3])` and shape_y is
|
||||||
TensorShape whose value is [5, 2, 3].
|
`TensorShape([5, 1, 3])`, the result is a TensorShape whose value is
|
||||||
|
`TensorShape([5, 2, 3])`.
|
||||||
|
|
||||||
This is useful when validating the result of a broadcasting operation when the
|
This is useful when validating the result of a broadcasting operation when the
|
||||||
tensors have statically known shapes.
|
tensors have statically known shapes.
|
||||||
|
|
||||||
|
Example:
|
||||||
|
|
||||||
|
>>> shape_x = tf.TensorShape([1, 2, 3])
|
||||||
|
>>> shape_y = tf.TensorShape([5, 1 ,3])
|
||||||
|
>>> tf.broadcast_static_shape(shape_x, shape_y)
|
||||||
|
TensorShape([5, 2, 3])
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
shape_x: A `TensorShape`
|
shape_x: A `TensorShape`
|
||||||
shape_y: A `TensorShape`
|
shape_y: A `TensorShape`
|
||||||
@ -1661,13 +1677,6 @@ def boolean_mask(tensor, mask, name="boolean_mask", axis=None):
|
|||||||
|
|
||||||
Numpy equivalent is `tensor[mask]`.
|
Numpy equivalent is `tensor[mask]`.
|
||||||
|
|
||||||
```python
|
|
||||||
# 1-D example
|
|
||||||
tensor = [0, 1, 2, 3]
|
|
||||||
mask = np.array([True, False, True, False])
|
|
||||||
boolean_mask(tensor, mask) # [0, 2]
|
|
||||||
```
|
|
||||||
|
|
||||||
In general, `0 < dim(mask) = K <= dim(tensor)`, and `mask`'s shape must match
|
In general, `0 < dim(mask) = K <= dim(tensor)`, and `mask`'s shape must match
|
||||||
the first K dimensions of `tensor`'s shape. We then have:
|
the first K dimensions of `tensor`'s shape. We then have:
|
||||||
`boolean_mask(tensor, mask)[i, j1,...,jd] = tensor[i1,...,iK,j1,...,jd]`
|
`boolean_mask(tensor, mask)[i, j1,...,jd] = tensor[i1,...,iK,j1,...,jd]`
|
||||||
@ -1680,9 +1689,23 @@ def boolean_mask(tensor, mask, name="boolean_mask", axis=None):
|
|||||||
ragged tensors, and can be used if you need to preserve the masked dimensions
|
ragged tensors, and can be used if you need to preserve the masked dimensions
|
||||||
of `tensor` (rather than flattening them, as `tf.boolean_mask` does).
|
of `tensor` (rather than flattening them, as `tf.boolean_mask` does).
|
||||||
|
|
||||||
|
Examples:
|
||||||
|
|
||||||
|
```python
|
||||||
|
# 1-D example
|
||||||
|
tensor = [0, 1, 2, 3]
|
||||||
|
mask = np.array([True, False, True, False])
|
||||||
|
tf.boolean_mask(tensor, mask) # [0, 2]
|
||||||
|
|
||||||
|
# 2-D example
|
||||||
|
tensor = [[1, 2], [3, 4], [5, 6]]
|
||||||
|
mask = np.array([True, False, True])
|
||||||
|
tf.boolean_mask(tensor, mask) # [[1, 2], [5, 6]]
|
||||||
|
```
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
tensor: N-D tensor.
|
tensor: N-D Tensor.
|
||||||
mask: K-D boolean tensor, K <= N and K must be known statically.
|
mask: K-D boolean Tensor, K <= N and K must be known statically.
|
||||||
name: A name for this operation (optional).
|
name: A name for this operation (optional).
|
||||||
axis: A 0-D int Tensor representing the axis in `tensor` to mask from. By
|
axis: A 0-D int Tensor representing the axis in `tensor` to mask from. By
|
||||||
default, axis is 0 which will mask from the first dimension. Otherwise K +
|
default, axis is 0 which will mask from the first dimension. Otherwise K +
|
||||||
@ -1694,15 +1717,6 @@ def boolean_mask(tensor, mask, name="boolean_mask", axis=None):
|
|||||||
|
|
||||||
Raises:
|
Raises:
|
||||||
ValueError: If shapes do not conform.
|
ValueError: If shapes do not conform.
|
||||||
|
|
||||||
Examples:
|
|
||||||
|
|
||||||
```python
|
|
||||||
# 2-D example
|
|
||||||
tensor = [[1, 2], [3, 4], [5, 6]]
|
|
||||||
mask = np.array([True, False, True])
|
|
||||||
boolean_mask(tensor, mask) # [[1, 2], [5, 6]]
|
|
||||||
```
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def _apply_mask_1d(reshaped_tensor, mask, axis=None):
|
def _apply_mask_1d(reshaped_tensor, mask, axis=None):
|
||||||
@ -1757,13 +1771,6 @@ def boolean_mask_v2(tensor, mask, axis=None, name="boolean_mask"):
|
|||||||
|
|
||||||
Numpy equivalent is `tensor[mask]`.
|
Numpy equivalent is `tensor[mask]`.
|
||||||
|
|
||||||
```python
|
|
||||||
# 1-D example
|
|
||||||
tensor = [0, 1, 2, 3]
|
|
||||||
mask = np.array([True, False, True, False])
|
|
||||||
boolean_mask(tensor, mask) # [0, 2]
|
|
||||||
```
|
|
||||||
|
|
||||||
In general, `0 < dim(mask) = K <= dim(tensor)`, and `mask`'s shape must match
|
In general, `0 < dim(mask) = K <= dim(tensor)`, and `mask`'s shape must match
|
||||||
the first K dimensions of `tensor`'s shape. We then have:
|
the first K dimensions of `tensor`'s shape. We then have:
|
||||||
`boolean_mask(tensor, mask)[i, j1,...,jd] = tensor[i1,...,iK,j1,...,jd]`
|
`boolean_mask(tensor, mask)[i, j1,...,jd] = tensor[i1,...,iK,j1,...,jd]`
|
||||||
@ -1776,9 +1783,23 @@ def boolean_mask_v2(tensor, mask, axis=None, name="boolean_mask"):
|
|||||||
ragged tensors, and can be used if you need to preserve the masked dimensions
|
ragged tensors, and can be used if you need to preserve the masked dimensions
|
||||||
of `tensor` (rather than flattening them, as `tf.boolean_mask` does).
|
of `tensor` (rather than flattening them, as `tf.boolean_mask` does).
|
||||||
|
|
||||||
|
Examples:
|
||||||
|
|
||||||
|
>>> tensor = [0, 1, 2, 3] # 1-D example
|
||||||
|
>>> mask = np.array([True, False, True, False])
|
||||||
|
>>> tf.boolean_mask(tensor, mask)
|
||||||
|
<tf.Tensor: shape=(2,), dtype=int32, numpy=array([0, 2], dtype=int32)>
|
||||||
|
|
||||||
|
>>> tensor = [[1, 2], [3, 4], [5, 6]] # 2-D example
|
||||||
|
>>> mask = np.array([True, False, True])
|
||||||
|
>>> tf.boolean_mask(tensor, mask)
|
||||||
|
<tf.Tensor: shape=(2, 2), dtype=int32, numpy=
|
||||||
|
array([[1, 2],
|
||||||
|
[5, 6]], dtype=int32)>
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
tensor: N-D tensor.
|
tensor: N-D Tensor.
|
||||||
mask: K-D boolean tensor, K <= N and K must be known statically.
|
mask: K-D boolean Tensor, K <= N and K must be known statically.
|
||||||
axis: A 0-D int Tensor representing the axis in `tensor` to mask from. By
|
axis: A 0-D int Tensor representing the axis in `tensor` to mask from. By
|
||||||
default, axis is 0 which will mask from the first dimension. Otherwise K +
|
default, axis is 0 which will mask from the first dimension. Otherwise K +
|
||||||
axis <= N.
|
axis <= N.
|
||||||
@ -3548,7 +3569,37 @@ def edit_distance(hypothesis, truth, normalize=True, name="edit_distance"):
|
|||||||
You can normalize the edit distance by length of `truth` by setting
|
You can normalize the edit distance by length of `truth` by setting
|
||||||
`normalize` to true.
|
`normalize` to true.
|
||||||
|
|
||||||
For example, given the following input:
|
For example:
|
||||||
|
|
||||||
|
Given the following input,
|
||||||
|
* `hypothesis` is a `tf.SparseTensor` of shape `[2, 1, 1]`
|
||||||
|
* `truth` is a `tf.SparseTensor` of shape `[2, 2, 2]`
|
||||||
|
|
||||||
|
>>> hypothesis = tf.SparseTensor(
|
||||||
|
... [[0, 0, 0],
|
||||||
|
... [1, 0, 0]],
|
||||||
|
... ["a", "b"],
|
||||||
|
... (2, 1, 1))
|
||||||
|
>>> truth = tf.SparseTensor(
|
||||||
|
... [[0, 1, 0],
|
||||||
|
... [1, 0, 0],
|
||||||
|
... [1, 0, 1],
|
||||||
|
... [1, 1, 0]],
|
||||||
|
... ["a", "b", "c", "a"],
|
||||||
|
... (2, 2, 2))
|
||||||
|
>>> tf.edit_distance(hypothesis, truth, normalize=True)
|
||||||
|
<tf.Tensor: shape=(2, 2), dtype=float32, numpy=
|
||||||
|
array([[inf, 1. ],
|
||||||
|
[0.5, 1. ]], dtype=float32)>
|
||||||
|
|
||||||
|
The operaton returns a dense Tensor of shape `[2, 2]` with
|
||||||
|
edit distances normalized by `truth` lengths.
|
||||||
|
|
||||||
|
**Note**: It is possible to calculate edit distance between two
|
||||||
|
sparse tensors with variable-length values. However, attempting to create
|
||||||
|
them while eager execution is enabled will result in a `ValueError`.
|
||||||
|
|
||||||
|
For the following inputs,
|
||||||
|
|
||||||
```python
|
```python
|
||||||
# 'hypothesis' is a tensor of shape `[2, 1]` with variable-length values:
|
# 'hypothesis' is a tensor of shape `[2, 1]` with variable-length values:
|
||||||
@ -3574,15 +3625,10 @@ def edit_distance(hypothesis, truth, normalize=True, name="edit_distance"):
|
|||||||
(2, 2, 2))
|
(2, 2, 2))
|
||||||
|
|
||||||
normalize = True
|
normalize = True
|
||||||
```
|
|
||||||
|
|
||||||
This operation would return the following:
|
# The output would be a dense Tensor of shape `(2,)`, with edit distances
|
||||||
|
noramlized by 'truth' lengths.
|
||||||
```python
|
# output => array([0., 0.5], dtype=float32)
|
||||||
# 'output' is a tensor of shape `[2, 2]` with edit distances normalized
|
|
||||||
# by 'truth' lengths.
|
|
||||||
output ==> [[inf, 1.0], # (0,0): no truth, (0,1): no hypothesis
|
|
||||||
[0.5, 1.0]] # (1,0): addition, (1,1): no hypothesis
|
|
||||||
```
|
```
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
|
@ -2217,6 +2217,25 @@ def assert_scalar(tensor, name=None, message=None):
|
|||||||
def ensure_shape(x, shape, name=None):
|
def ensure_shape(x, shape, name=None):
|
||||||
"""Updates the shape of a tensor and checks at runtime that the shape holds.
|
"""Updates the shape of a tensor and checks at runtime that the shape holds.
|
||||||
|
|
||||||
|
For example:
|
||||||
|
|
||||||
|
>>> @tf.function(input_signature=[tf.TensorSpec(shape=None, dtype=tf.float32)])
|
||||||
|
... def f(tensor):
|
||||||
|
... return tf.ensure_shape(tensor, [3, 3])
|
||||||
|
>>>
|
||||||
|
>>> f(tf.zeros([3, 3])) # Passes
|
||||||
|
<tf.Tensor: shape=(3, 3), dtype=float32, numpy=
|
||||||
|
array([[0., 0., 0.],
|
||||||
|
[0., 0., 0.],
|
||||||
|
[0., 0., 0.]], dtype=float32)>
|
||||||
|
>>> f([1, 2, 3]) # fails
|
||||||
|
Traceback (most recent call last):
|
||||||
|
...
|
||||||
|
InvalidArgumentError: Shape of tensor x [3] is not compatible with expected shape [3,3].
|
||||||
|
|
||||||
|
The above example raises `tf.errors.InvalidArgumentError`,
|
||||||
|
because the shape (3,) is not compatible with the shape (None, 3, 3)
|
||||||
|
|
||||||
With eager execution this is a shape assertion, that returns the input:
|
With eager execution this is a shape assertion, that returns the input:
|
||||||
|
|
||||||
>>> x = tf.constant([1,2,3])
|
>>> x = tf.constant([1,2,3])
|
||||||
@ -2303,8 +2322,10 @@ def ensure_shape(x, shape, name=None):
|
|||||||
name: A name for this operation (optional). Defaults to "EnsureShape".
|
name: A name for this operation (optional). Defaults to "EnsureShape".
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
A `Tensor`. Has the same type and contents as `x`. At runtime, raises a
|
A `Tensor`. Has the same type and contents as `x`.
|
||||||
`tf.errors.InvalidArgumentError` if `shape` is incompatible with the shape
|
|
||||||
|
Raises:
|
||||||
|
tf.errors.InvalidArgumentError: If `shape` is incompatible with the shape
|
||||||
of `x`.
|
of `x`.
|
||||||
"""
|
"""
|
||||||
if not isinstance(shape, tensor_shape.TensorShape):
|
if not isinstance(shape, tensor_shape.TensorShape):
|
||||||
|
Loading…
x
Reference in New Issue
Block a user