Fix doctests in RaggedTensor docstrings.

PiperOrigin-RevId: 267499142
This commit is contained in:
Edward Loper 2019-09-05 18:06:35 -07:00 committed by TensorFlower Gardener
parent d8f6cd24f2
commit 484e8acedc
14 changed files with 382 additions and 443 deletions

View File

@ -70,25 +70,24 @@ def boolean_mask(data, mask, name=None):
not a prefix of `data.shape`. not a prefix of `data.shape`.
#### Examples: #### Examples:
```python
>>> # Aliases for True & False so data and mask line up.
>>> T, F = (True, False)
>>> tf.ragged.boolean_mask( # Mask a 2D Tensor. >>> # Aliases for True & False so data and mask line up.
... data=[[1, 2, 3], [4, 5, 6], [7, 8, 9]], >>> T, F = (True, False)
... mask=[[T, F, T], [F, F, F], [T, F, F]]).tolist()
[[1, 3], [], [7]]
>>> tf.ragged.boolean_mask( # Mask a 2D RaggedTensor. >>> tf.ragged.boolean_mask( # Mask a 2D Tensor.
... tf.ragged.constant([[1, 2, 3], [4], [5, 6]]), ... data=[[1, 2, 3], [4, 5, 6], [7, 8, 9]],
... tf.ragged.constant([[F, F, T], [F], [T, T]])).tolist() ... mask=[[T, F, T], [F, F, F], [T, F, F]]).to_list()
[[3], [], [5, 6]] [[1, 3], [], [7]]
>>> tf.ragged.boolean_mask( # Mask rows of a 2D RaggedTensor. >>> tf.ragged.boolean_mask( # Mask a 2D RaggedTensor.
... tf.ragged.constant([[1, 2, 3], [4], [5, 6]]), ... tf.ragged.constant([[1, 2, 3], [4], [5, 6]]),
... tf.ragged.constant([True, False, True])).tolist() ... tf.ragged.constant([[F, F, T], [F], [T, T]])).to_list()
[[1, 2, 3], [5, 6]] [[3], [], [5, 6]]
```
>>> tf.ragged.boolean_mask( # Mask rows of a 2D RaggedTensor.
... tf.ragged.constant([[1, 2, 3], [4], [5, 6]]),
... tf.ragged.constant([True, False, True])).to_list()
[[1, 2, 3], [5, 6]]
""" """
with ops.name_scope(name, 'RaggedMask', [data, mask]): with ops.name_scope(name, 'RaggedMask', [data, mask]):
# Convert inputs to tensors. # Convert inputs to tensors.
@ -223,11 +222,10 @@ def tile(input, multiples, name=None): # pylint: disable=redefined-builtin
A `RaggedTensor` with the same type, rank, and ragged_rank as `input`. A `RaggedTensor` with the same type, rank, and ragged_rank as `input`.
#### Example: #### Example:
```python
>>> rt = tf.ragged.constant([[1, 2], [3]]) >>> rt = tf.ragged.constant([[1, 2], [3]])
>>> ragged.tile(rt, [3, 2]) >>> tf.tile(rt, [3, 2]).to_list()
[[1, 2, 1, 2], [3, 3], [1, 2, 1, 2], [3, 3], [1, 2, 1, 2], [3, 3]] [[1, 2, 1, 2], [3, 3], [1, 2, 1, 2], [3, 3], [1, 2, 1, 2], [3, 3]]
```
""" """
with ops.name_scope(name, 'RaggedTile', [input, multiples]): with ops.name_scope(name, 'RaggedTile', [input, multiples]):
input = ragged_tensor.convert_to_tensor_or_ragged_tensor( input = ragged_tensor.convert_to_tensor_or_ragged_tensor(
@ -267,11 +265,10 @@ def _tile_ragged_values(rt_input, multiples, const_multiples=None):
A `Tensor` with the same type and rank as `rt_input.flat_values`. A `Tensor` with the same type and rank as `rt_input.flat_values`.
#### Example: #### Example:
```python
>>> rt = tf.ragged.constant([[1, 2], [3]]) >>> rt = tf.ragged.constant([[1, 2], [3]])
>>> _tile_ragged_values(rt, [3, 2]) >>> _tile_ragged_values(rt, tf.constant([3, 2])).numpy()
[1, 2, 1, 2, 3, 3, 1, 2, 1, 2, 3, 3, 1, 2, 1, 2, 3, 3] array([1, 2, 1, 2, 3, 3, 1, 2, 1, 2, 3, 3, 1, 2, 1, 2, 3, 3], dtype=int32)
```
""" """
ragged_rank = rt_input.ragged_rank ragged_rank = rt_input.ragged_rank
nested_splits = rt_input.nested_row_splits nested_splits = rt_input.nested_row_splits
@ -326,11 +323,10 @@ def _tile_ragged_splits(rt_input, multiples, const_multiples=None):
`rt_input`). `rt_input`).
#### Example: #### Example:
```python
>>> rt = tf.ragged.constant([[1, 2], [3]]) >>> rt = tf.ragged.constant([[1, 2], [3]])
>>> _tile_ragged_splits(rt, [3, 2]) >>> _tile_ragged_splits(rt, [3, 2])
[0, 4, 6, 10, 12, 16, 18] [<tf.Tensor: ..., numpy=array([ 0, 4, 6, 10, 12, 16, 18])>]
```
""" """
ragged_rank = rt_input.ragged_rank ragged_rank = rt_input.ragged_rank
nested_splits = rt_input.nested_row_splits nested_splits = rt_input.nested_row_splits
@ -423,23 +419,22 @@ def expand_dims(input, axis, name=None): # pylint: disable=redefined-builtin
size 1 at `axis`. size 1 at `axis`.
#### Examples: #### Examples:
```python
>>> rt = tf.ragged.constant([[1, 2], [3]])
>>> print rt.shape
TensorShape([2, None])
>>> expanded = ragged.expand_dims(rt, axis=0) >>> rt = tf.ragged.constant([[1, 2], [3]])
>>> print(expanded.shape, expanded) >>> print(rt.shape)
TensorShape([1, None, None]) [[[1, 2], [3]]] (2, None)
>>> expanded = ragged.expand_dims(rt, axis=1) >>> expanded = tf.expand_dims(rt, axis=0)
>>> print(expanded.shape, expanded) >>> print(expanded.shape, expanded)
TensorShape([2, None, None]) [[[1, 2]], [[3]]] (1, None, None) <tf.RaggedTensor [[[1, 2], [3]]]>
>>> expanded = ragged.expand_dims(rt, axis=2) >>> expanded = tf.expand_dims(rt, axis=1)
>>> print(expanded.shape, expanded) >>> print(expanded.shape, expanded)
TensorShape([2, None, 1]) [[[1], [2]], [[3]]] (2, None, None) <tf.RaggedTensor [[[1, 2]], [[3]]]>
```
>>> expanded = tf.expand_dims(rt, axis=2)
>>> print(expanded.shape, expanded)
(2, None, 1) <tf.RaggedTensor [[[1], [2]], [[3]]]>
""" """
with ops.name_scope(name, 'RaggedExpandDims', [input]): with ops.name_scope(name, 'RaggedExpandDims', [input]):
input = ragged_tensor.convert_to_tensor_or_ragged_tensor( input = ragged_tensor.convert_to_tensor_or_ragged_tensor(
@ -474,6 +469,11 @@ def size(input, out_type=dtypes.int32, name=None): # pylint: disable=redefined-
The size of a ragged tensor is the size of its inner values. The size of a ragged tensor is the size of its inner values.
#### Example:
>>> tf.size(tf.ragged.constant([[1, 2], [3]])).numpy()
3
Args: Args:
input: A potentially ragged `Tensor`. input: A potentially ragged `Tensor`.
out_type: The numeric output type for the operation. out_type: The numeric output type for the operation.
@ -481,12 +481,6 @@ def size(input, out_type=dtypes.int32, name=None): # pylint: disable=redefined-
Returns: Returns:
A Tensor of type `out_type`. A Tensor of type `out_type`.
#### Example:
```python
>>> tf.size(tf.ragged.constant([[1, 2], [3]]))
3
```
""" """
if ragged_tensor.is_ragged(input): if ragged_tensor.is_ragged(input):
return array_ops.size(input.flat_values, out_type=out_type, name=name) return array_ops.size(input.flat_values, out_type=out_type, name=name)
@ -502,13 +496,12 @@ def rank(input, name=None): # pylint: disable=redefined-builtin
Returns a 0-D `int32` `Tensor` representing the rank of `input`. Returns a 0-D `int32` `Tensor` representing the rank of `input`.
For example: #### Example:
```python >>> # shape of tensor 't' is [2, None, None]
# shape of tensor 't' is [2, None, None] >>> t = tf.ragged.constant([[[1], [2, 2]], [[3, 3, 3], [4, 4, 4, 4]]])
t = tf.ragged.constant([[[1], [2, 2]], [[3, 3, 3], [4, 4, 4, 4]]]) >>> tf.rank(t).numpy()
tf.rank(t) # 3 3
```
Args: Args:
input: A `RaggedTensor` input: A `RaggedTensor`
@ -562,14 +555,13 @@ def stack_dynamic_partitions(data, partitions, num_partitions, name=None):
If `num_partitions` is an `int` (not a `Tensor`), then this is equivalent to If `num_partitions` is an `int` (not a `Tensor`), then this is equivalent to
`tf.ragged.stack(tf.dynamic_partition(data, partitions, num_partitions))`. `tf.ragged.stack(tf.dynamic_partition(data, partitions, num_partitions))`.
####Example: #### Example:
```python
>>> data = ['a', 'b', 'c', 'd', 'e'] >>> data = ['a', 'b', 'c', 'd', 'e']
>>> partitions = [ 3, 0, 2, 2, 3] >>> partitions = [ 3, 0, 2, 2, 3]
>>> num_partitions = 5 >>> num_partitions = 5
>>> tf.ragged.stack_dynamic_partitions(data, partitions, num_partitions) >>> tf.ragged.stack_dynamic_partitions(data, partitions, num_partitions)
<RaggedTensor [['b'], [], ['c', 'd'], ['a', 'e'], []]> <tf.RaggedTensor [[b'b'], [], [b'c', b'd'], [b'a', b'e'], []]>
```
Args: Args:
data: A `Tensor` or `RaggedTensor` containing the values to stack. data: A `Tensor` or `RaggedTensor` containing the values to stack.

View File

@ -55,12 +55,11 @@ def batch_gather(params, indices, name=None):
`result.ragged_rank = max(indices.ragged_rank, params.ragged_rank)`. `result.ragged_rank = max(indices.ragged_rank, params.ragged_rank)`.
#### Example: #### Example:
```python
>>> params = tf.ragged.constant([['a', 'b', 'c'], ['d'], [], ['e']]) >>> params = tf.ragged.constant([['a', 'b', 'c'], ['d'], [], ['e']])
>>> indices = tf.ragged.constant([[1, 2, 0], [], [], [0, 0]]) >>> indices = tf.ragged.constant([[1, 2, 0], [], [], [0, 0]])
>>> tf.compat.v1.batch_gather(params, indices) >>> tf.compat.v1.batch_gather(params, indices)
[['b', 'c', 'a'], [], [], ['e', 'e']] <tf.RaggedTensor [[b'b', b'c', b'a'], [], [], [b'e', b'e']]>
```
""" """
if not (ragged_tensor.is_ragged(params) or ragged_tensor.is_ragged(indices)): if not (ragged_tensor.is_ragged(params) or ragged_tensor.is_ragged(indices)):
return array_ops.batch_gather(params, indices, name) return array_ops.batch_gather(params, indices, name)

View File

@ -56,14 +56,13 @@ def concat(values, axis, name=None):
the input tensors have different ranks. the input tensors have different ranks.
#### Example: #### Example:
```python
>>> t1 = tf.ragged.constant([[1, 2], [3, 4, 5]]) >>> t1 = tf.ragged.constant([[1, 2], [3, 4, 5]])
>>> t2 = tf.ragged.constant([[6], [7, 8, 9]]) >>> t2 = tf.ragged.constant([[6], [7, 8, 9]])
>>> ragged.concat([t1, t2], axis=0) >>> tf.concat([t1, t2], axis=0)
[[1, 2], [3, 4, 5], [6], [7, 8, 9]] <tf.RaggedTensor [[1, 2], [3, 4, 5], [6], [7, 8, 9]]>
>>> ragged.concat([t1, t2], axis=1) >>> tf.concat([t1, t2], axis=1)
[[1, 2, 6], [3, 4, 5, 7, 8, 9]] <tf.RaggedTensor [[1, 2, 6], [3, 4, 5, 7, 8, 9]]>
```
""" """
if not isinstance(values, (list, tuple)): if not isinstance(values, (list, tuple)):
values = [values] values = [values]
@ -79,15 +78,21 @@ def stack(values, axis=0, name=None):
(`R >= axis`), returns a rank-`R+1` `RaggedTensor` `result` such that (`R >= axis`), returns a rank-`R+1` `RaggedTensor` `result` such that
`result[i0...iaxis]` is `[value[i0...iaxis] for value in values]`. `result[i0...iaxis]` is `[value[i0...iaxis] for value in values]`.
#### Example: #### Examples:
```python
>>> t1 = tf.ragged.constant([[1, 2], [3, 4, 5]]) >>> # Stacking two ragged tensors.
>>> t2 = tf.ragged.constant([[6], [7, 8, 9]]) >>> t1 = tf.ragged.constant([[1, 2], [3, 4, 5]])
>>> tf.ragged.stack([t1, t2], axis=0) >>> t2 = tf.ragged.constant([[6], [7, 8, 9]])
[[[1, 2], [3, 4, 5]], [[6], [7, 9, 0]]] >>> tf.ragged.stack([t1, t2], axis=0)
>>> tf.ragged.stack([t1, t2], axis=1) <tf.RaggedTensor [[[1, 2], [3, 4, 5]], [[6], [7, 8, 9]]]>
[[[1, 2], [6]], [[3, 4, 5], [7, 8, 9]]] >>> tf.ragged.stack([t1, t2], axis=1)
``` <tf.RaggedTensor [[[1, 2], [6]], [[3, 4, 5], [7, 8, 9]]]>
>>> # Stacking two dense tensors with different sizes.
>>> t3 = tf.constant([[1, 2, 3], [4, 5, 6]])
>>> t4 = tf.constant([[5], [6], [7]])
>>> tf.ragged.stack([t3, t4], axis=0)
<tf.RaggedTensor [[[1, 2, 3], [4, 5, 6]], [[5], [6], [7]]]>
Args: Args:
values: A list of `tf.Tensor` or `tf.RaggedTensor`. May not be empty. All values: A list of `tf.Tensor` or `tf.RaggedTensor`. May not be empty. All

View File

@ -40,10 +40,8 @@ def constant(pylist, dtype=None, ragged_rank=None, inner_shape=None,
Example: Example:
```python >>> tf.ragged.constant([[1, 2], [3], [4, 5, 6]])
>>> ragged.constant([[1, 2], [3], [4, 5, 6]]).eval() <tf.RaggedTensor [[1, 2], [3], [4, 5, 6]]>
RaggedTensorValue(values=[1, 2, 3, 4, 5, 6], splits=[0, 2, 3, 6])
```
All scalar values in `pylist` must have the same nesting depth `K`, and the All scalar values in `pylist` must have the same nesting depth `K`, and the
returned `RaggedTensor` will have rank `K`. If `pylist` contains no scalar returned `RaggedTensor` will have rank `K`. If `pylist` contains no scalar
@ -98,10 +96,9 @@ def constant_value(pylist, dtype=None, ragged_rank=None, inner_shape=None,
Example: Example:
```python >>> tf.compat.v1.ragged.constant_value([[1, 2], [3], [4, 5, 6]])
>>> ragged.constant_value([[1, 2], [3], [4, 5, 6]]) tf.RaggedTensorValue(values=array([1, 2, 3, 4, 5, 6]),
RaggedTensorValue(values=[1, 2, 3, 4, 5, 6], splits=[0, 2, 3, 6]) row_splits=array([0, 2, 3, 6]))
```
All scalar values in `pylist` must have the same nesting depth `K`, and the All scalar values in `pylist` must have the same nesting depth `K`, and the
returned `RaggedTensorValue` will have rank `K`. If `pylist` contains no returned `RaggedTensorValue` will have rank `K`. If `pylist` contains no

View File

@ -41,15 +41,13 @@ def map_flat_values(op, *args, **kwargs):
Examples: Examples:
```python >>> rt = tf.ragged.constant([[1, 2, 3], [], [4, 5], [6]])
>>> rt = ragged.constant([[1, 2, 3], [], [4, 5], [6]]) >>> map_flat_values(tf.ones_like, rt).to_list()
>>> ragged.map_flat_values(tf.ones_like, rt).eval().tolist()
[[1, 1, 1], [], [1, 1], [1]] [[1, 1, 1], [], [1, 1], [1]]
>>> ragged.map_flat_values(tf.multiply, rt, rt).eval().tolist() >>> map_flat_values(tf.multiply, rt, rt).to_list()
[[1, 4, 9], [], [16, 25], [36]] [[1, 4, 9], [], [16, 25], [36]]
>>> ragged.map_flat_values(tf.add, rt, 5).eval().tolist() >>> map_flat_values(tf.add, rt, 5).to_list()
[[6, 7, 8], [], [9, 10], [11]] [[6, 7, 8], [], [9, 10], [11]]
```
Args: Args:
op: The operation that should be applied to the RaggedTensor `flat_values`. op: The operation that should be applied to the RaggedTensor `flat_values`.

View File

@ -52,21 +52,19 @@ def gather(params, indices, validate_indices=None, axis=0, batch_dims=0,
Examples: Examples:
```python
>>> params = tf.constant(['a', 'b', 'c', 'd', 'e']) >>> params = tf.constant(['a', 'b', 'c', 'd', 'e'])
>>> indices = tf.constant([3, 1, 2, 1, 0]) >>> indices = tf.constant([3, 1, 2, 1, 0])
>>> ragged_params = tf.ragged.constant([['a', 'b', 'c'], ['d'], [], ['e']]) >>> ragged_params = tf.ragged.constant([['a', 'b', 'c'], ['d'], [], ['e']])
>>> ragged_indices = tf.ragged.constant([[3, 1, 2], [1], [], [0]]) >>> ragged_indices = tf.ragged.constant([[3, 1, 2], [1], [], [0]])
>>> print ragged.gather(params, ragged_indices) >>> tf.gather(params, ragged_indices)
[['d', 'b', 'c'], ['b'], [], ['a']] <tf.RaggedTensor [[b'd', b'b', b'c'], [b'b'], [], [b'a']]>
>>> print ragged.gather(ragged_params, indices) >>> tf.gather(ragged_params, indices)
[['e'], ['d'], [], ['d'], ['a', 'b', 'c']] <tf.RaggedTensor [[b'e'], [b'd'], [], [b'd'], [b'a', b'b', b'c']]>
>>> print ragged.gather(ragged_params, ragged_indices) >>> tf.gather(ragged_params, ragged_indices)
[[['e'], ['d'], []], [['d']], [], [['a', 'b', 'c']]] <tf.RaggedTensor [[[b'e'], [b'd'], []], [[b'd']], [], [[b'a', b'b', b'c']]]>
```
Args: Args:
params: The potentially ragged tensor from which to gather values. Must be params: The potentially ragged tensor from which to gather values. Must be
@ -148,25 +146,23 @@ def gather_nd(params, indices, batch_dims=0, name=None):
A potentially ragged tensor with shape `[A1...AN, B_{I+1}...BM]`. A potentially ragged tensor with shape `[A1...AN, B_{I+1}...BM]`.
#### Examples: #### Examples:
```python
>>> params = tf.compat.v1.ragged.constant_value(
... [ [ ['000', '001'], ['010' ] ],
... [ ['100' ], ['110', '111', '112'], ['120'] ],
... [ [ ], ['210' ] ] ])
>>> # Gather 2D slices from a 3D tensor >>> params = tf.ragged.constant(
>>> ragged.gather_nd(params, [[2], [0]]) ... [ [ ['000', '001'], ['010' ] ],
[ [ [ ], ['210'] ] ... [ ['100' ], ['110', '111', '112'], ['120'] ],
[ ['000', '001'], ['010'] ] ] ... [ [ ], ['210' ] ] ])
>>> # Gather 1D slices from a 3D tensor >>> # Gather 2D slices from a 3D tensor
>>> ragged.gather_nd(params, [[2, 1], [0, 0]]) >>> tf.gather_nd(params, [[2], [0]])
[['210'], ['000', '001']] <tf.RaggedTensor [[[], [b'210']], [[b'000', b'001'], [b'010']]]>
>>> # Gather scalars from a 3D tensor >>> # Gather 1D slices from a 3D tensor
>>> ragged.gather_nd(params, [[0, 0, 1], [1, 1, 2]]) >>> tf.gather_nd(params, [[2, 1], [0, 0]])
['001', '112'] <tf.RaggedTensor [[b'210'], [b'000', b'001']]>
```
>>> # Gather scalars from a 3D tensor
>>> tf.gather_nd(params, [[0, 0, 1], [1, 1, 2]]).numpy()
array([b'001', b'112'], dtype=object)
""" """
if not isinstance(batch_dims, int) or batch_dims != 0: if not isinstance(batch_dims, int) or batch_dims != 0:
raise ValueError('batch_dims != 0 is not supported for ragged gather yet.') raise ValueError('batch_dims != 0 is not supported for ragged gather yet.')

View File

@ -70,30 +70,28 @@ def ragged_tensor_getitem(self, key):
Examples: Examples:
```python >>> # A 2-D ragged tensor with 1 ragged dimension.
>>> # A 2-D ragged tensor with 1 ragged dimension. >>> rt = tf.ragged.constant([['a', 'b', 'c'], ['d', 'e'], ['f'], ['g']])
>>> rt = ragged.constant([['a', 'b', 'c'], ['d', 'e'], ['f'], ['g']]) >>> rt[0].numpy() # First row (1-D `Tensor`)
>>> rt[0].eval().tolist() # First row (1-D `Tensor`) array([b'a', b'b', b'c'], dtype=object)
['a', 'b', 'c'] >>> rt[:3].to_list() # First three rows (2-D RaggedTensor)
>>> rt[:3].eval().tolist() # First three rows (2-D RaggedTensor) [[b'a', b'b', b'c'], [b'd', b'e'], [b'f']]
[['a', 'b', 'c'], ['d', 'e'], '[f'], [g']] >>> rt[3, 0].numpy() # 1st element of 4th row (scalar)
>>> rt[3, 0].eval().tolist() # 1st element of 4th row (scalar) b'g'
'g'
>>> # A 3-D ragged tensor with 2 ragged dimensions. >>> # A 3-D ragged tensor with 2 ragged dimensions.
>>> rt = ragged.constant([[[1, 2, 3], [4]], >>> rt = tf.ragged.constant([[[1, 2, 3], [4]],
... [[5], [], [6]], ... [[5], [], [6]],
... [[7]], ... [[7]],
... [[8, 9], [10]]]) ... [[8, 9], [10]]])
>>> rt[1].eval().tolist() # Second row (2-D RaggedTensor) >>> rt[1].to_list() # Second row (2-D RaggedTensor)
[[5], [], [6]] [[5], [], [6]]
>>> rt[3, 0].eval().tolist() # First element of fourth row (1-D Tensor) >>> rt[3, 0].numpy() # First element of fourth row (1-D Tensor)
[8, 9] array([8, 9], dtype=int32)
>>> rt[:, 1:3].eval().tolist() # Items 1-3 of each row (3-D RaggedTensor) >>> rt[:, 1:3].to_list() # Items 1-3 of each row (3-D RaggedTensor)
[[[4]], [[], [6]], [], [[10]]] [[[4]], [[], [6]], [], [[10]]]
>>> rt[:, -1:].eval().tolist() # Last item of each row (3-D RaggedTensor) >>> rt[:, -1:].to_list() # Last item of each row (3-D RaggedTensor)
[[[4]], [[6]], [[7]], [[10]]] [[[4]], [[6]], [[7]], [[10]]]
```
""" """
scope_tensors = [self] + list(_tensors_in_key_list(key)) scope_tensors = [self] + list(_tensors_in_key_list(key))
if isinstance(key, (list, tuple)): if isinstance(key, (list, tuple)):

View File

@ -163,7 +163,7 @@ def map_fn(fn,
elems=ragged.constant([[1, 2, 3], [4, 5], [6, 7]], dtype=tf.int64) elems=ragged.constant([[1, 2, 3], [4, 5], [6, 7]], dtype=tf.int64)
out = map_fn(fn=lambda x: x+1, elems, out = map_fn(fn=lambda x: x+1, elems,
dtype=ragged.RaggedTensorType(type=tf.int64, ragged_rank=0)) dtype=ragged.RaggedTensorType(type=tf.int64, ragged_rank=0))
# out = ragged.constant([[2, 3, 4], [5, 6], [7, 8]]) # out = tf.ragged.constant([[2, 3, 4], [5, 6], [7, 8]])
``` ```
""" """
if not callable(fn): if not callable(fn):

View File

@ -58,14 +58,12 @@ def range(starts, limits=None, deltas=1, dtype=None,
Examples: Examples:
```python >>> tf.ragged.range([3, 5, 2]).to_list()
>>> ragged.range([3, 5, 2]).eval().tolist()
[[0, 1, 2], [0, 1, 2, 3, 4], [0, 1]] [[0, 1, 2], [0, 1, 2, 3, 4], [0, 1]]
>>> ragged.range([0, 5, 8], [3, 3, 12]).eval().tolist() >>> tf.ragged.range([0, 5, 8], [3, 3, 12]).to_list()
[[0, 1, 2], [], [8, 9, 10, 11]] [[0, 1, 2], [], [8, 9, 10, 11]]
>>> ragged.range([0, 5, 8], [3, 3, 12], 2).eval().tolist() >>> tf.ragged.range([0, 5, 8], [3, 3, 12], 2).to_list()
[[0, 2], [], [8, 10]] [[0, 2], [], [8, 10]]
```
The input tensors `starts`, `limits`, and `deltas` may be scalars or vectors. The input tensors `starts`, `limits`, and `deltas` may be scalars or vectors.
The vector inputs must all have the same size. Scalar inputs are broadcast The vector inputs must all have the same size. Scalar inputs are broadcast
@ -371,56 +369,56 @@ Computes the %(combination)s of elements across dimensions of a `RaggedTensor`.
Raises: Raises:
ValueError: If `axis` contains a `Tensor` whose value is not constant. ValueError: If `axis` contains a `Tensor` whose value is not constant.
####Example: ####Example:
```python%(example)s ``` %(example)s
""" """
_RAGGED_REDUCE_SUM_EXAMPLE = """ _RAGGED_REDUCE_SUM_EXAMPLE = """
>>> rt = ragged.constant([[3, 1, 4], [1, 5], [9], [2, 6]]) >>> rt = tf.ragged.constant([[3, 1, 4], [1, 5], [9], [2, 6]])
>>> ragged.reduce_sum(rt, axis=0).eval().tolist() >>> tf.reduce_sum(rt, axis=0).numpy() # = [3+1+9+2, 1+5+6, 4]
[15, 12, 4] # = [3+1+9+2, 1+5+6, 4] array([15, 12, 4], dtype=int32)
>>> ragged.reduce_sum(rt, axis=1).eval().tolist() >>> tf.reduce_sum(rt, axis=1).numpy() # = [3+1+4, 1+5, 9, 2+6]
[8, 6, 9, 8] # = [3+1+4, 1+5, 9, 2+6] array([8, 6, 9, 8], dtype=int32)
""" """
_RAGGED_REDUCE_PROD_EXAMPLE = """ _RAGGED_REDUCE_PROD_EXAMPLE = """
>>> rt = ragged.constant([[3, 1, 4], [1, 5], [9], [2, 6]]) >>> rt = tf.ragged.constant([[3, 1, 4], [1, 5], [9], [2, 6]])
>>> ragged.reduce_prod(rt, axis=0).eval().tolist() >>> tf.reduce_prod(rt, axis=0).numpy() # = [3*1*9*2, 1*5*6, 4]
[54, 30, 4] # = [3*1*9*2, 1*5*6, 4] array([54, 30, 4], dtype=int32)
>>> ragged.reduce_prod(rt, axis=1).eval().tolist() >>> tf.reduce_prod(rt, axis=1).numpy() # = [3*1*4, 1*5, 9, 2*6]
[12, 5, 9, 12] # = [3*1*4, 1*5, 9, 2*6] array([12, 5, 9, 12], dtype=int32)
""" """
_RAGGED_REDUCE_MIN_EXAMPLE = """ _RAGGED_REDUCE_MIN_EXAMPLE = """
>>> rt = ragged.constant([[3, 1, 4], [1, 5], [9], [2, 6]]) >>> rt = tf.ragged.constant([[3, 1, 4], [1, 5], [9], [2, 6]])
>>> ragged.reduce_min(rt, axis=0).eval().tolist() >>> tf.reduce_min(rt, axis=0).numpy()
[1, 1, 4] # = [min(3, 1, 9, 2), min(1, 5, 6), 4] array([1, 1, 4], dtype=int32)
>>> ragged.reduce_min(rt, axis=1).eval().tolist() >>> tf.reduce_min(rt, axis=1).numpy()
[1, 1, 9, 2] # = [min(3, 1, 4), min(1, 5), 9, min(2, 6)] array([1, 1, 9, 2], dtype=int32)
""" """
_RAGGED_REDUCE_MAX_EXAMPLE = """ _RAGGED_REDUCE_MAX_EXAMPLE = """
>>> rt = ragged.constant([[3, 1, 4], [1, 5], [9], [2, 6]]) >>> rt = tf.ragged.constant([[3, 1, 4], [1, 5], [9], [2, 6]])
>>> ragged.reduce_max(rt, axis=0).eval().tolist() >>> tf.reduce_max(rt, axis=0).numpy()
[9, 6, 4] # = [max(3, 1, 9, 2), max(1, 5, 6), 4] array([9, 6, 4], dtype=int32)
>>> ragged.reduce_max(rt, axis=1).eval().tolist() >>> tf.reduce_max(rt, axis=1).numpy()
[4, 5, 9, 6] # = [max(3, 1, 4), max(1, 5), 9, max(2, 6)] array([4, 5, 9, 6], dtype=int32)
""" """
_RAGGED_REDUCE_MEAN_EXAMPLE = """ _RAGGED_REDUCE_MEAN_EXAMPLE = """
>>> rt = ragged.constant([[3, 1, 4], [1, 5], [9], [2, 6]]) >>> rt = tf.ragged.constant([[3, 1, 4], [1, 5], [9], [2, 6]])
>>> ragged.reduce_mean(rt, axis=0).eval().tolist() >>> tf.reduce_mean(rt, axis=0).numpy()
[3.75, 4, 4] # = [mean(3, 1, 9, 2), mean(1, 5, 6), 4] array([3.75, 4. , 4. ])
>>> ragged.reduce_mean(rt, axis=1).eval().tolist() >>> tf.reduce_mean(rt, axis=1).numpy()
[2.66666, 3, 9, 4] # = [mean(3, 1, 4), mean(1, 5), 9, mean(2, 6)] array([2.6666..., 3. , 9. , 4. ])
""" """
_RAGGED_REDUCE_ALL_EXAMPLE = """ _RAGGED_REDUCE_ALL_EXAMPLE = """
>>> rt = ragged.constant([[True, True], [True, True, False, True], [False, True]]) >>> rt = tf.ragged.constant([[True, True], [True, True, False, True], [False, True]])
>>> ragged.reduce_all(rt, axis=0).eval().tolist() >>> tf.reduce_all(rt, axis=0).numpy()
[False, True, False, True] array([False, True, False, True])
>>> ragged.reduce_all(rt, axis=1).eval().tolist() >>> tf.reduce_all(rt, axis=1).numpy()
[True, False, False] array([ True, False, False])
""" """
_RAGGED_REDUCE_ANY_EXAMPLE = """ _RAGGED_REDUCE_ANY_EXAMPLE = """
>>> rt = ragged.constant([[True, True], [True, True, False, True], [False, True]]) >>> rt = tf.ragged.constant([[True, True], [True, True, False, True], [False, True]])
>>> ragged.reduce_any(rt, axis=0).eval().tolist() >>> tf.reduce_any(rt, axis=0).numpy()
[True, True, False, True] array([ True, True, False, True])
>>> ragged.reduce_any(rt, axis=1).eval().tolist() >>> tf.reduce_any(rt, axis=1).numpy()
[True, True, True] array([ True, True, True])
""" """

View File

@ -37,12 +37,10 @@ def string_bytes_split(input, name=None): # pylint: disable=redefined-builtin
Examples: Examples:
```python >>> tf.strings.bytes_split('hello').numpy()
>>> tf.strings.bytes_split('hello') array([b'h', b'e', b'l', b'l', b'o'], dtype=object)
['h', 'e', 'l', 'l', 'o']
>>> tf.strings.bytes_split(['hello', '123']) >>> tf.strings.bytes_split(['hello', '123'])
<RaggedTensor [['h', 'e', 'l', 'l', 'o'], ['1', '2', '3']]> <tf.RaggedTensor [[b'h', b'e', b'l', b'l', b'o'], [b'1', b'2', b'3']]>
```
Note that this op splits strings into bytes, not unicode characters. To Note that this op splits strings into bytes, not unicode characters. To
split strings into unicode characters, use `tf.strings.unicode_split`. split strings into unicode characters, use `tf.strings.unicode_split`.
@ -113,10 +111,11 @@ def unicode_encode(input,
#### Example: #### Example:
>>> input = [[71, 246, 246, 100, 110, 105, 103, 104, 116], [128522]] >>> input = tf.ragged.constant(
>>> unicode_encode(input, 'UTF-8') ... [[71, 246, 246, 100, 110, 105, 103, 104, 116], [128522]])
['G\xc3\xb6\xc3\xb6dnight', '\xf0\x9f\x98\x8a'] >>> print(unicode_encode(input, 'UTF-8'))
tf.Tensor([b'G\xc3\xb6\xc3\xb6dnight' b'\xf0\x9f\x98\x8a'],
shape=(2,), dtype=string)
""" """
with ops.name_scope(name, "UnicodeEncode", [input]): with ops.name_scope(name, "UnicodeEncode", [input]):
input_tensor = ragged_tensor.convert_to_tensor_or_ragged_tensor(input) input_tensor = ragged_tensor.convert_to_tensor_or_ragged_tensor(input)
@ -211,11 +210,10 @@ def unicode_decode(input,
`tf.RaggedTensor` otherwise. `tf.RaggedTensor` otherwise.
#### Example: #### Example:
```python
>>> input = [s.encode('utf8') for s in (u'G\xf6\xf6dnight', u'\U0001f60a')] >>> input = [s.encode('utf8') for s in (u'G\xf6\xf6dnight', u'\U0001f60a')]
>>> tf.strings.unicode_decode(input, 'UTF-8').tolist() >>> tf.strings.unicode_decode(input, 'UTF-8').to_list()
[[71, 246, 246, 100, 110, 105, 103, 104, 116], [128522]] [[71, 246, 246, 100, 110, 105, 103, 104, 116], [128522]]
```
""" """
with ops.name_scope(name, "UnicodeDecode", [input]): with ops.name_scope(name, "UnicodeDecode", [input]):
return _unicode_decode(input, input_encoding, errors, replacement_char, return _unicode_decode(input, input_encoding, errors, replacement_char,
@ -272,9 +270,9 @@ def unicode_decode_with_offsets(input,
>>> input = [s.encode('utf8') for s in (u'G\xf6\xf6dnight', u'\U0001f60a')] >>> input = [s.encode('utf8') for s in (u'G\xf6\xf6dnight', u'\U0001f60a')]
>>> result = tf.strings.unicode_decode_with_offsets(input, 'UTF-8') >>> result = tf.strings.unicode_decode_with_offsets(input, 'UTF-8')
>>> result[0].tolist() # codepoints >>> result[0].to_list() # codepoints
[[71, 246, 246, 100, 110, 105, 103, 104, 116], [128522]] [[71, 246, 246, 100, 110, 105, 103, 104, 116], [128522]]
>>> result[1].tolist() # offsets >>> result[1].to_list() # offsets
[[0, 1, 3, 5, 6, 7, 8, 9, 10], [0]] [[0, 1, 3, 5, 6, 7, 8, 9, 10], [0]]
""" """
@ -314,12 +312,11 @@ def unicode_split(input,
`tf.RaggedTensor` otherwise. `tf.RaggedTensor` otherwise.
#### Example: #### Example:
```python
>>> input = [s.encode('utf8') for s in (u'G\xf6\xf6dnight', u'\U0001f60a')] >>> input = [s.encode('utf8') for s in (u'G\xf6\xf6dnight', u'\U0001f60a')]
>>> tf.strings.unicode_split(input, 'UTF-8').tolist() >>> tf.strings.unicode_split(input, 'UTF-8').to_list()
[['G', '\xc3\xb6', '\xc3\xb6', 'd', 'n', 'i', 'g', 'h', 't'], [[b'G', b'\xc3\xb6', b'\xc3\xb6', b'd', b'n', b'i', b'g', b'h', b't'],
['\xf0\x9f\x98\x8a']] [b'\xf0\x9f\x98\x8a']]
```
""" """
with ops.name_scope(name, "UnicodeSplit", [input]): with ops.name_scope(name, "UnicodeSplit", [input]):
codepoints = _unicode_decode(input, input_encoding, errors, codepoints = _unicode_decode(input, input_encoding, errors,
@ -377,10 +374,10 @@ def unicode_split_with_offsets(input,
>>> input = [s.encode('utf8') for s in (u'G\xf6\xf6dnight', u'\U0001f60a')] >>> input = [s.encode('utf8') for s in (u'G\xf6\xf6dnight', u'\U0001f60a')]
>>> result = tf.strings.unicode_split_with_offsets(input, 'UTF-8') >>> result = tf.strings.unicode_split_with_offsets(input, 'UTF-8')
>>> result[0].tolist() # character substrings >>> result[0].to_list() # character substrings
[['G', '\xc3\xb6', '\xc3\xb6', 'd', 'n', 'i', 'g', 'h', 't'], [[b'G', b'\xc3\xb6', b'\xc3\xb6', b'd', b'n', b'i', b'g', b'h', b't'],
['\xf0\x9f\x98\x8a']] [b'\xf0\x9f\x98\x8a']]
>>> result[1].tolist() # offsets >>> result[1].to_list() # offsets
[[0, 1, 3, 5, 6, 7, 8, 9, 10], [0]] [[0, 1, 3, 5, 6, 7, 8, 9, 10], [0]]
""" """
@ -464,12 +461,10 @@ def string_split_v2(input, sep=None, maxsplit=-1, name=None): # pylint: disable
Example: Example:
```python >>> tf.strings.split('hello world').numpy()
>>> tf.strings.split('hello world') array([b'hello', b'world'], dtype=object)
<Tensor ['hello', 'world']>
>>> tf.strings.split(['hello world', 'a b c']) >>> tf.strings.split(['hello world', 'a b c'])
<tf.RaggedTensor [['hello', 'world'], ['a', 'b', 'c']]> <tf.RaggedTensor [[b'hello', b'world'], [b'a', b'b', b'c']]>
```
If `sep` is given, consecutive delimiters are not grouped together and are If `sep` is given, consecutive delimiters are not grouped together and are
deemed to delimit empty strings. For example, `input` of `"1<>2<><>3"` and deemed to delimit empty strings. For example, `input` of `"1<>2<><>3"` and
@ -536,15 +531,14 @@ def string_split(source, sep=None, skip_empty=True, delimiter=None,
Examples: Examples:
```python >>> print(tf.compat.v1.string_split(['hello world', 'a b c']))
>>> tf.strings.split(['hello world', 'a b c']) SparseTensor(indices=tf.Tensor( [[0 0] [0 1] [1 0] [1 1] [1 2]], ...),
tf.SparseTensor(indices=[[0, 0], [0, 1], [1, 0], [1, 1], [1, 2]], values=tf.Tensor([b'hello' b'world' b'a' b'b' b'c'], ...),
values=['hello', 'world', 'a', 'b', 'c'] dense_shape=tf.Tensor([2 3], shape=(2,), dtype=int64))
dense_shape=[2, 3])
>>> tf.strings.split(['hello world', 'a b c'], result_type="RaggedTensor") >>> print(tf.compat.v1.string_split(['hello world', 'a b c'],
<tf.RaggedTensor [['hello', 'world'], ['a', 'b', 'c']]> ... result_type="RaggedTensor"))
``` <tf.RaggedTensor [[b'hello', b'world'], [b'a', b'b', b'c']]>
Args: Args:
source: `1-D` string `Tensor`, the strings to split. source: `1-D` string `Tensor`, the strings to split.
@ -593,15 +587,14 @@ def strings_split_v1(input=None, sep=None, maxsplit=-1, # pylint: disable=redef
Examples: Examples:
```python >>> print(tf.compat.v1.strings.split(['hello world', 'a b c']))
>>> tf.strings.split(['hello world', 'a b c']) SparseTensor(indices=tf.Tensor( [[0 0] [0 1] [1 0] [1 1] [1 2]], ...),
tf.SparseTensor(indices=[[0, 0], [0, 1], [1, 0], [1, 1], [1, 2]], values=tf.Tensor([b'hello' b'world' b'a' b'b' b'c'], ...),
values=['hello', 'world', 'a', 'b', 'c'] dense_shape=tf.Tensor([2 3], shape=(2,), dtype=int64))
dense_shape=[2, 3])
>>> tf.strings.split(['hello world', 'a b c'], result_type="RaggedTensor") >>> print(tf.compat.v1.strings.split(['hello world', 'a b c'],
<tf.RaggedTensor [['hello', 'world'], ['a', 'b', 'c']]> ... result_type="RaggedTensor"))
``` <tf.RaggedTensor [[b'hello', b'world'], [b'a', b'b', b'c']]>
If `sep` is given, consecutive delimiters are not grouped together and are If `sep` is given, consecutive delimiters are not grouped together and are
deemed to delimit empty strings. For example, `input` of `"1<>2<><>3"` and deemed to delimit empty strings. For example, `input` of `"1<>2<><>3"` and

View File

@ -155,9 +155,9 @@ class RaggedTensor(composite_tensor.CompositeTensor):
... values=[3, 1, 4, 1, 5, 9, 2, 6], row_splits=[0, 4, 4, 7, 8, 8]) ... values=[3, 1, 4, 1, 5, 9, 2, 6], row_splits=[0, 4, 4, 7, 8, 8])
>>> outer_rt = RaggedTensor.from_row_splits( >>> outer_rt = RaggedTensor.from_row_splits(
... values=inner_rt, row_splits=[0, 3, 3, 5]) ... values=inner_rt, row_splits=[0, 3, 3, 5])
>>> print outer_rt.to_list() >>> print(outer_rt.to_list())
[[[3, 1, 4, 1], [], [5, 9, 2]], [], [[6], []]] [[[3, 1, 4, 1], [], [5, 9, 2]], [], [[6], []]]
>>> print outer_rt.ragged_rank >>> print(outer_rt.ragged_rank)
2 2
The factory function `RaggedTensor.from_nested_row_splits` may be used to The factory function `RaggedTensor.from_nested_row_splits` may be used to
@ -174,13 +174,13 @@ class RaggedTensor(composite_tensor.CompositeTensor):
`RaggedTensor`s with uniform inner dimensions can be defined `RaggedTensor`s with uniform inner dimensions can be defined
by using a multidimensional `Tensor` for `values`. by using a multidimensional `Tensor` for `values`.
>>> rt = RaggedTensor.from_row_splits(values=tf.ones([5, 3]), >>> rt = RaggedTensor.from_row_splits(values=tf.ones([5, 3], tf.int32),
.. row_splits=[0, 2, 5]) ... row_splits=[0, 2, 5])
>>> print rt.to_list() >>> print(rt.to_list())
[[[1, 1, 1], [1, 1, 1]], [[[1, 1, 1], [1, 1, 1]],
[[1, 1, 1], [1, 1, 1], [1, 1, 1]]] [[1, 1, 1], [1, 1, 1], [1, 1, 1]]]
>>> print rt.shape >>> print(rt.shape)
(2, ?, 3) (2, None, 3)
### Uniform Outer Dimensions ### Uniform Outer Dimensions
@ -191,20 +191,20 @@ class RaggedTensor(composite_tensor.CompositeTensor):
`[4, None]`: `[4, None]`:
>>> values = tf.ragged.constant([[1, 2, 3], [4], [5, 6], [7, 8, 9, 10]]) >>> values = tf.ragged.constant([[1, 2, 3], [4], [5, 6], [7, 8, 9, 10]])
>>> print values.shape >>> print(values.shape)
(4, None) (4, None)
>>> rt1 = tf.RaggedTensor.from_uniform_row_lengths(values, 2) >>> rt6 = tf.RaggedTensor.from_uniform_row_length(values, 2)
>>> print rt1 >>> print(rt6)
<tf.RaggedTensor [[[1, 2, 3], [4]], [[5, 6], [7, 8, 9, 10]]])> <tf.RaggedTensor [[[1, 2, 3], [4]], [[5, 6], [7, 8, 9, 10]]]>
>>> print rt1.shape >>> print(rt6.shape)
(2, 2, None) (2, 2, None)
Note that `rt1` only contains one ragged dimension (the innermost Note that `rt6` only contains one ragged dimension (the innermost
dimension). In contrast, if `from_row_splits` is used to construct a similar dimension). In contrast, if `from_row_splits` is used to construct a similar
`RaggedTensor`, then that `RaggedTensor` will have two ragged dimensions: `RaggedTensor`, then that `RaggedTensor` will have two ragged dimensions:
>>> rt2 = tf.RaggedTensor.from_row_splits(values, [0, 2, 4]) >>> rt7 = tf.RaggedTensor.from_row_splits(values, [0, 2, 4])
>>> print rt2.shape >>> print(rt7.shape)
(2, None, None) (2, None, None)
Uniform and ragged outer dimensions may be interleaved, meaning that a Uniform and ragged outer dimensions may be interleaved, meaning that a
@ -212,7 +212,7 @@ class RaggedTensor(composite_tensor.CompositeTensor):
For example, a RaggedTensor `t4` with shape `[3, None, 4, 8, None, 2]` could For example, a RaggedTensor `t4` with shape `[3, None, 4, 8, None, 2]` could
be constructed as follows: be constructed as follows:
``` ```python
t0 = tf.zeros([1000, 2]) # Shape: [1000, 2] t0 = tf.zeros([1000, 2]) # Shape: [1000, 2]
t1 = RaggedTensor.from_row_lengths(t0, [...]) # [160, None, 2] t1 = RaggedTensor.from_row_lengths(t0, [...]) # [160, None, 2]
t2 = RaggedTensor.from_uniform_row_length(t1, 8) # [20, 8, None, 2] t2 = RaggedTensor.from_uniform_row_length(t1, 8) # [20, 8, None, 2]
@ -349,13 +349,12 @@ class RaggedTensor(composite_tensor.CompositeTensor):
ValueError: If `nrows` is incompatible with `value_rowids`. ValueError: If `nrows` is incompatible with `value_rowids`.
#### Example: #### Example:
```python
>>> print(tf.RaggedTensor.from_value_rowids( >>> print(tf.RaggedTensor.from_value_rowids(
... values=[3, 1, 4, 1, 5, 9, 2, 6], ... values=[3, 1, 4, 1, 5, 9, 2, 6],
... value_rowids=[0, 0, 0, 0, 2, 2, 2, 3], ... value_rowids=[0, 0, 0, 0, 2, 2, 2, 3],
... nrows=5)) ... nrows=5))
<tf.RaggedTensor [[3, 1, 4, 1], [], [5, 9, 2], [6], []]> <tf.RaggedTensor [[3, 1, 4, 1], [], [5, 9, 2], [6], []]>
```
""" """
if not isinstance(validate, bool): if not isinstance(validate, bool):
raise TypeError("validate must have type bool") raise TypeError("validate must have type bool")
@ -458,12 +457,11 @@ class RaggedTensor(composite_tensor.CompositeTensor):
ValueError: If `row_splits` is an empty list. ValueError: If `row_splits` is an empty list.
#### Example: #### Example:
```python
>>> print(tf.RaggedTensor.from_row_splits( >>> print(tf.RaggedTensor.from_row_splits(
... values=[3, 1, 4, 1, 5, 9, 2, 6], ... values=[3, 1, 4, 1, 5, 9, 2, 6],
... row_splits=[0, 4, 4, 7, 8, 8])) ... row_splits=[0, 4, 4, 7, 8, 8]))
<tf.RaggedTensor [[3, 1, 4, 1], [], [5, 9, 2], [6], []]> <tf.RaggedTensor [[3, 1, 4, 1], [], [5, 9, 2], [6], []]>
```
""" """
if not isinstance(validate, bool): if not isinstance(validate, bool):
raise TypeError("validate must have type bool") raise TypeError("validate must have type bool")
@ -516,12 +514,11 @@ class RaggedTensor(composite_tensor.CompositeTensor):
`result.ragged_rank = values.ragged_rank + 1`. `result.ragged_rank = values.ragged_rank + 1`.
#### Example: #### Example:
```python
>>> print(tf.RaggedTensor.from_row_lengths( >>> print(tf.RaggedTensor.from_row_lengths(
... values=[3, 1, 4, 1, 5, 9, 2, 6], ... values=[3, 1, 4, 1, 5, 9, 2, 6],
... row_lengths=[4, 0, 3, 1, 0])) ... row_lengths=[4, 0, 3, 1, 0]))
<tf.RaggedTensor [[3, 1, 4, 1], [], [5, 9, 2], [6], []])> <tf.RaggedTensor [[3, 1, 4, 1], [], [5, 9, 2], [6], []]>
```
""" """
if not isinstance(validate, bool): if not isinstance(validate, bool):
raise TypeError("validate must have type bool") raise TypeError("validate must have type bool")
@ -571,12 +568,11 @@ class RaggedTensor(composite_tensor.CompositeTensor):
`result.ragged_rank = values.ragged_rank + 1`. `result.ragged_rank = values.ragged_rank + 1`.
#### Example: #### Example:
```python
>>> print(tf.RaggedTensor.from_row_starts( >>> print(tf.RaggedTensor.from_row_starts(
... values=[3, 1, 4, 1, 5, 9, 2, 6], ... values=[3, 1, 4, 1, 5, 9, 2, 6],
... row_starts=[0, 4, 4, 7, 8])) ... row_starts=[0, 4, 4, 7, 8]))
<tf.RaggedTensor [[3, 1, 4, 1], [], [5, 9, 2], [6], []]> <tf.RaggedTensor [[3, 1, 4, 1], [], [5, 9, 2], [6], []]>
```
""" """
if not isinstance(validate, bool): if not isinstance(validate, bool):
raise TypeError("validate must have type bool") raise TypeError("validate must have type bool")
@ -620,12 +616,11 @@ class RaggedTensor(composite_tensor.CompositeTensor):
`result.ragged_rank = values.ragged_rank + 1`. `result.ragged_rank = values.ragged_rank + 1`.
#### Example: #### Example:
```python
>>> print(tf.RaggedTensor.from_row_limits( >>> print(tf.RaggedTensor.from_row_limits(
... values=[3, 1, 4, 1, 5, 9, 2, 6], ... values=[3, 1, 4, 1, 5, 9, 2, 6],
... row_limits=[4, 4, 7, 8, 8])) ... row_limits=[4, 4, 7, 8, 8]))
<tf.RaggedTensor [[3, 1, 4, 1], [], [5, 9, 2], [6], []]> <tf.RaggedTensor [[3, 1, 4, 1], [], [5, 9, 2], [6], []]>
```
""" """
if not isinstance(validate, bool): if not isinstance(validate, bool):
raise TypeError("validate must have type bool") raise TypeError("validate must have type bool")
@ -665,26 +660,22 @@ class RaggedTensor(composite_tensor.CompositeTensor):
can be constructed with this method from a `RaggedTensor` values with shape can be constructed with this method from a `RaggedTensor` values with shape
`[4, None]`: `[4, None]`:
```python
>>> values = tf.ragged.constant([[1, 2, 3], [4], [5, 6], [7, 8, 9, 10]]) >>> values = tf.ragged.constant([[1, 2, 3], [4], [5, 6], [7, 8, 9, 10]])
>>> print values.shape >>> print(values.shape)
(4, None) (4, None)
>>> rt1 = tf.RaggedTensor.from_uniform_row_lengths(values, 2) >>> rt1 = tf.RaggedTensor.from_uniform_row_length(values, 2)
>>> print rt1 >>> print(rt1)
<tf.RaggedTensor [[[1, 2, 3], [4]], [[5, 6], [7, 8, 9, 10]]])> <tf.RaggedTensor [[[1, 2, 3], [4]], [[5, 6], [7, 8, 9, 10]]]>
>>> print rt1.shape >>> print(rt1.shape)
(2, 2, None) (2, 2, None)
```
Note that `rt1` only contains one ragged dimension (the innermost Note that `rt1` only contains one ragged dimension (the innermost
dimension). In contrast, if `from_row_splits` is used to construct a similar dimension). In contrast, if `from_row_splits` is used to construct a similar
`RaggedTensor`, then that `RaggedTensor` will have two ragged dimensions: `RaggedTensor`, then that `RaggedTensor` will have two ragged dimensions:
```python
>>> rt2 = tf.RaggedTensor.from_row_splits(values, [0, 2, 4]) >>> rt2 = tf.RaggedTensor.from_row_splits(values, [0, 2, 4])
>>> print rt2.shape >>> print(rt2.shape)
(2, None, None) (2, None, None)
```
Args: Args:
values: A potentially ragged tensor with shape `[nvals, ...]`. values: A potentially ragged tensor with shape `[nvals, ...]`.
@ -994,13 +985,11 @@ class RaggedTensor(composite_tensor.CompositeTensor):
Examples: Examples:
```python >>> tf.ragged.constant([[0], [1, 2]]).shape
>>> ragged.constant([[0], [1, 2]]).shape TensorShape([2, None])
TensorShape([Dimension(2), Dimension(None)])
>>> ragged.constant([[[0, 1]], [[1, 2], [3, 4]]], ragged_rank=1).shape >>> tf.ragged.constant([[[0, 1]], [[1, 2], [3, 4]]], ragged_rank=1).shape
TensorShape([Dimension(2), Dimension(None), Dimension(2) TensorShape([2, None, 2])
```
""" """
nrows = tensor_shape.dimension_at_index(self._row_splits.shape, 0) - 1 nrows = tensor_shape.dimension_at_index(self._row_splits.shape, 0) - 1
@ -1041,11 +1030,10 @@ class RaggedTensor(composite_tensor.CompositeTensor):
A potentially ragged tensor. A potentially ragged tensor.
#### Example: #### Example:
```python
>>> rt = ragged.constant([[3, 1, 4, 1], [], [5, 9, 2], [6], []]) >>> rt = tf.ragged.constant([[3, 1, 4, 1], [], [5, 9, 2], [6], []])
>>> print rt.values >>> print(rt.values)
tf.Tensor([3, 1, 4, 1, 5, 9, 2, 6]) tf.Tensor([3 1 4 1 5 9 2 6], shape=(8,), dtype=int32)
```
""" """
return self._values return self._values
@ -1064,11 +1052,10 @@ class RaggedTensor(composite_tensor.CompositeTensor):
`self.values.shape[0]`. `self.values.shape[0]`.
#### Example: #### Example:
```python
>>> rt = ragged.constant([[3, 1, 4, 1], [], [5, 9, 2], [6], []]) >>> rt = tf.ragged.constant([[3, 1, 4, 1], [], [5, 9, 2], [6], []])
>>> print rt.row_splits # indices of row splits in rt.values >>> print(rt.row_splits) # indices of row splits in rt.values
tf.Tensor([0, 4, 4, 7, 8, 8]) tf.Tensor([0 4 4 7 8 8], shape=(6,), dtype=int64)
```
""" """
return self._row_splits return self._row_splits
@ -1091,11 +1078,9 @@ class RaggedTensor(composite_tensor.CompositeTensor):
#### Example: #### Example:
```python >>> rt = tf.ragged.constant([[[3, 1, 4, 1], [], [5, 9, 2]], [], [[6], []]])
>>> rt = ragged.constant([[[3, 1, 4, 1], [], [5, 9, 2]], [], [[6], []]]) >>> print(rt.flat_values)
>>> print rt.flat_values() tf.Tensor([3 1 4 1 5 9 2 6], shape=(8,), dtype=int32)
tf.Tensor([3, 1, 4, 1, 5, 9, 2, 6])
```
""" """
rt_values = self.values rt_values = self.values
while isinstance(rt_values, RaggedTensor): while isinstance(rt_values, RaggedTensor):
@ -1118,15 +1103,13 @@ class RaggedTensor(composite_tensor.CompositeTensor):
#### Example: #### Example:
```python >>> rt = tf.ragged.constant(
>>> rt = ragged.constant([[[[3, 1, 4, 1], [], [5, 9, 2]], [], [[6], []]]]) ... [[[[3, 1, 4, 1], [], [5, 9, 2]], [], [[6], []]]])
>>> for i, splits in enumerate(rt.nested_row_splits()): >>> for i, splits in enumerate(rt.nested_row_splits):
... print('Splits for dimension %d: %s' % (i+1, splits)) ... print('Splits for dimension %d: %s' % (i+1, splits.numpy()))
Splits for dimension 1: [0, 1] Splits for dimension 1: [0 3]
Splits for dimension 2: [0, 3, 3, 5] Splits for dimension 2: [0 3 3 5]
Splits for dimension 3: [0, 4, 4, 7, 8, 8] Splits for dimension 3: [0 4 4 7 8 8]
```
""" """
rt_nested_splits = [self.row_splits] rt_nested_splits = [self.row_splits]
rt_values = self.values rt_values = self.values
@ -1151,13 +1134,12 @@ class RaggedTensor(composite_tensor.CompositeTensor):
The returned tensor is nonnegative, and is sorted in ascending order. The returned tensor is nonnegative, and is sorted in ascending order.
#### Example: #### Example:
```python
>>> rt = ragged.constant([[3, 1, 4, 1], [], [5, 9, 2], [6], []]) >>> rt = tf.ragged.constant([[3, 1, 4, 1], [], [5, 9, 2], [6], []])
>>> rt.values >>> print(rt.values)
tf.Tensor([3, 1, 4, 1, 5, 9, 2, 6]) tf.Tensor([3 1 4 1 5 9 2 6], shape=(8,), dtype=int32)
>>> rt.value_rowids() >>> print(rt.value_rowids()) # corresponds 1:1 with rt.values
tf.Tensor([0, 0, 0, 0, 2, 2, 2, 3]) # corresponds 1:1 with rt.values tf.Tensor([0 0 0 0 2 2 2 3], shape=(8,), dtype=int64)
```
""" """
if self._cached_value_rowids is not None: if self._cached_value_rowids is not None:
return self._cached_value_rowids return self._cached_value_rowids
@ -1185,15 +1167,13 @@ class RaggedTensor(composite_tensor.CompositeTensor):
#### Example: #### Example:
```python >>> rt = tf.ragged.constant(
>>> rt = ragged.constant([[[[3, 1, 4, 1], [], [5, 9, 2]], [], [[6], []]]]) ... [[[[3, 1, 4, 1], [], [5, 9, 2]], [], [[6], []]]])
>>> for i, ids in enumerate(rt.nested_value_rowids()): >>> for i, ids in enumerate(rt.nested_value_rowids()):
... print('row ids for dimension %d: %s' % (i+1, ids)) ... print('row ids for dimension %d: %s' % (i+1, ids.numpy()))
row ids for dimension 1: [0] row ids for dimension 1: [0 0 0]
row ids for dimension 2: [0, 0, 0, 2, 2] row ids for dimension 2: [0 0 0 2 2]
row ids for dimension 3: [0, 0, 0, 0, 2, 2, 2, 3] row ids for dimension 3: [0 0 0 0 2 2 2 3]
```
""" """
with ops.name_scope(name, "RaggedNestedValueRowIds", [self]): with ops.name_scope(name, "RaggedNestedValueRowIds", [self]):
rt_nested_ids = [self.value_rowids()] rt_nested_ids = [self.value_rowids()]
@ -1217,11 +1197,10 @@ class RaggedTensor(composite_tensor.CompositeTensor):
A scalar `Tensor` with dtype `out_type`. A scalar `Tensor` with dtype `out_type`.
#### Example: #### Example:
```python
>>> rt = ragged.constant([[3, 1, 4, 1], [], [5, 9, 2], [6], []]) >>> rt = tf.ragged.constant([[3, 1, 4, 1], [], [5, 9, 2], [6], []])
>>> rt.nrows() # rt has 5 rows. >>> print(rt.nrows()) # rt has 5 rows.
5 tf.Tensor(5, shape=(), dtype=int64)
```
""" """
if out_type is None: if out_type is None:
out_type = self._row_splits.dtype out_type = self._row_splits.dtype
@ -1246,13 +1225,12 @@ class RaggedTensor(composite_tensor.CompositeTensor):
The returned tensor is nonnegative, and is sorted in ascending order. The returned tensor is nonnegative, and is sorted in ascending order.
#### Example: #### Example:
```python
>>> rt = ragged.constant([[3, 1, 4, 1], [], [5, 9, 2], [6], []]) >>> rt = tf.ragged.constant([[3, 1, 4, 1], [], [5, 9, 2], [6], []])
>>> rt.values >>> print(rt.values)
tf.Tensor([3, 1, 4, 1, 5, 9, 2, 6]) tf.Tensor([3 1 4 1 5 9 2 6], shape=(8,), dtype=int32)
>>> rt.row_starts() # indices of row starts in rt.values >>> print(rt.row_starts()) # indices of row starts in rt.values
tf.Tensor([0, 4, 4, 7, 8]) tf.Tensor([0 4 4 7 8], shape=(5,), dtype=int64)
```
""" """
with ops.name_scope(name, "RaggedRowStarts", [self]): with ops.name_scope(name, "RaggedRowStarts", [self]):
return self.row_splits[:-1] return self.row_splits[:-1]
@ -1271,13 +1249,12 @@ class RaggedTensor(composite_tensor.CompositeTensor):
The returned tensor is nonnegative, and is sorted in ascending order. The returned tensor is nonnegative, and is sorted in ascending order.
#### Example: #### Example:
```python
>>> rt = ragged.constant([[3, 1, 4, 1], [], [5, 9, 2], [6], []]) >>> rt = tf.ragged.constant([[3, 1, 4, 1], [], [5, 9, 2], [6], []])
>>> rt.values >>> print(rt.values)
tf.Tensor([3, 1, 4, 1, 5, 9, 2, 6]) tf.Tensor([3 1 4 1 5 9 2 6], shape=(8,), dtype=int32)
>>> rt.row_limits() # indices of row limits in rt.values >>> print(rt.row_limits()) # indices of row limits in rt.values
tf.Tensor([4, 4, 7, 8, 8]) tf.Tensor([4 4 7 8 8], shape=(5,), dtype=int64)
```
""" """
with ops.name_scope(name, "RaggedRowLimits", [self]): with ops.name_scope(name, "RaggedRowLimits", [self]):
return self.row_splits[1:] return self.row_splits[1:]
@ -1300,13 +1277,13 @@ class RaggedTensor(composite_tensor.CompositeTensor):
ValueError: If `axis` is out of bounds. ValueError: If `axis` is out of bounds.
#### Example: #### Example:
```python
>>> rt = ragged.constant([[[3, 1, 4], [1]], [], [[5, 9], [2]], [[6]], []]) >>> rt = tf.ragged.constant(
>>> rt.row_lengths(rt) # lengths of rows in rt ... [[[3, 1, 4], [1]], [], [[5, 9], [2]], [[6]], []])
tf.Tensor([2, 0, 2, 1, 0]) >>> print(rt.row_lengths()) # lengths of rows in rt
>>> rt.row_lengths(axis=2) # lengths of axis=2 rows. tf.Tensor([2 0 2 1 0], shape=(5,), dtype=int64)
<tf.RaggedTensor [[3, 1], [], [2, 1], [1], []]> >>> print(rt.row_lengths(axis=2)) # lengths of axis=2 rows.
``` <tf.RaggedTensor [[3, 1], [], [2, 1], [1], []]>
""" """
if self._cached_row_lengths is not None: if self._cached_row_lengths is not None:
return self._cached_row_lengths return self._cached_row_lengths
@ -1366,11 +1343,10 @@ class RaggedTensor(composite_tensor.CompositeTensor):
where `output[i]` is the bounding size for dimension `axis[i]`. where `output[i]` is the bounding size for dimension `axis[i]`.
#### Example: #### Example:
```python
>>> rt = ragged.constant([[1, 2, 3, 4], [5], [], [6, 7, 8, 9], [10]]) >>> rt = tf.ragged.constant([[1, 2, 3, 4], [5], [], [6, 7, 8, 9], [10]])
>>> rt.bounding_shape() >>> rt.bounding_shape().numpy()
[5, 4] array([5, 4])
```
""" """
if out_type is None: if out_type is None:
out_type = self._row_splits.dtype out_type = self._row_splits.dtype
@ -1507,15 +1483,13 @@ class RaggedTensor(composite_tensor.CompositeTensor):
#### Examples: #### Examples:
```python
>>> rt = tf.ragged.constant([[[1, 2], [3]], [[4, 5, 6]]]) >>> rt = tf.ragged.constant([[[1, 2], [3]], [[4, 5, 6]]])
>>> rt.merge_dims(0, 1) >>> print(rt.merge_dims(0, 1))
[[1, 2], [3], [4, 5, 6]] <tf.RaggedTensor [[1, 2], [3], [4, 5, 6]]>
>>> rt.merge_dims(1, 2) >>> print(rt.merge_dims(1, 2))
[[1, 2, 3], [4, 5, 6]] <tf.RaggedTensor [[1, 2, 3], [4, 5, 6]]>
>>> rt.merge_dims(0, 2) >>> print(rt.merge_dims(0, 2))
[1, 2, 3, 4, 5, 6] tf.Tensor([1 2 3 4 5 6], shape=(6,), dtype=int32)
```
To mimic the behavior of `np.flatten` (which flattens all dimensions), use To mimic the behavior of `np.flatten` (which flattens all dimensions), use
`rt.merge_dims(0, -1). To mimic the behavior of `tf.layers.Flatten` (which `rt.merge_dims(0, -1). To mimic the behavior of `tf.layers.Flatten` (which
@ -1567,7 +1541,6 @@ class RaggedTensor(composite_tensor.CompositeTensor):
Examples: Examples:
```python
>>> dt = tf.constant([[5, 7, 0], [0, 3, 0], [6, 0, 0]]) >>> dt = tf.constant([[5, 7, 0], [0, 3, 0], [6, 0, 0]])
>>> tf.RaggedTensor.from_tensor(dt) >>> tf.RaggedTensor.from_tensor(dt)
<tf.RaggedTensor [[5, 7, 0], [0, 3, 0], [6, 0, 0]]> <tf.RaggedTensor [[5, 7, 0], [0, 3, 0], [6, 0, 0]]>
@ -1578,11 +1551,10 @@ class RaggedTensor(composite_tensor.CompositeTensor):
<tf.RaggedTensor [[5, 7], [0, 3], [6]]> <tf.RaggedTensor [[5, 7], [0, 3], [6]]>
>>> dt = tf.constant([[[5, 0], [7, 0], [0, 0]], >>> dt = tf.constant([[[5, 0], [7, 0], [0, 0]],
[[0, 0], [3, 0], [0, 0]], ... [[0, 0], [3, 0], [0, 0]],
[[6, 0], [0, 0], [0, 0]]]) ... [[6, 0], [0, 0], [0, 0]]])
>>> tf.RaggedTensor.from_tensor(dt, lengths=([2, 0, 3], [1, 1, 2, 0, 1])) >>> tf.RaggedTensor.from_tensor(dt, lengths=([2, 0, 3], [1, 1, 2, 0, 1]))
<tf.RaggedTensor [[[5], [7]], [], [[6, 0], [], [0]]]> <tf.RaggedTensor [[[5], [7]], [], [[6, 0], [], [0]]]>
```
Args: Args:
tensor: The `Tensor` to convert. Must have rank `ragged_rank + 1` or tensor: The `Tensor` to convert. Must have rank `ragged_rank + 1` or
@ -1760,14 +1732,13 @@ class RaggedTensor(composite_tensor.CompositeTensor):
Example: Example:
```python >>> rt = tf.ragged.constant([[9, 8, 7], [], [6, 5], [4]])
>>> rt = ragged.constant([[9, 8, 7], [], [6, 5], [4]]) >>> print(rt.to_tensor())
>>> print rt.to_tensor() tf.Tensor(
[[9 8 7] [[9 8 7]
[0 0 0] [0 0 0]
[6 5 0] [6 5 0]
[4 0 0]] [4 0 0]], shape=(4, 3), dtype=int32)
```
Args: Args:
default_value: Value to set for indices not specified in `self`. Defaults default_value: Value to set for indices not specified in `self`. Defaults
@ -1851,13 +1822,11 @@ class RaggedTensor(composite_tensor.CompositeTensor):
Example: Example:
```python >>> st = tf.SparseTensor(indices=[[0, 0], [0, 1], [0, 2], [1, 0], [3, 0]],
>>> st = SparseTensor(indices=[[0, 1], [0, 2], [0, 3], [1, 0], [3, 0]], ... values=[1, 2, 3, 4, 5],
... values=[1, 2, 3, 4, 5], ... dense_shape=[4, 3])
... dense_shape=[4, 3]) >>> tf.RaggedTensor.from_sparse(st).to_list()
>>> rt.RaggedTensor.from_sparse(st).eval().tolist()
[[1, 2, 3], [4], [], [5]] [[1, 2, 3], [4], [], [5]]
```
Currently, only two-dimensional `SparseTensors` are supported. Currently, only two-dimensional `SparseTensors` are supported.
@ -1910,13 +1879,13 @@ class RaggedTensor(composite_tensor.CompositeTensor):
Example: Example:
```python >>> rt = tf.ragged.constant([[1, 2, 3], [4], [], [5, 6]])
>>> rt = ragged.constant([[1, 2, 3], [4], [], [5, 6]]) >>> print(rt.to_sparse())
>>> rt.to_sparse().eval() SparseTensor(indices=tf.Tensor(
SparseTensorValue(indices=[[0, 0], [0, 1], [0, 2], [1, 0], [3, 0], [3, 1]], [[0 0] [0 1] [0 2] [1 0] [3 0] [3 1]],
values=[1, 2, 3, 4, 5, 6], shape=(6, 2), dtype=int64),
dense_shape=[4, 3]) values=tf.Tensor([1 2 3 4 5 6], shape=(6,), dtype=int32),
``` dense_shape=tf.Tensor([4 3], shape=(2,), dtype=int64))
Args: Args:
name: A name prefix for the returned tensors (optional). name: A name prefix for the returned tensors (optional).
@ -1952,17 +1921,15 @@ class RaggedTensor(composite_tensor.CompositeTensor):
Example: Example:
```python >>> rt = tf.ragged.constant([[0], [1, 2]])
>>> rt = ragged.constant([[0], [1, 2]])
>>> et = rt._to_variant() >>> et = rt._to_variant()
>>> stacked_et = ragged.stack([et, et]) >>> stacked_et = tf.stack([et, et])
>>> ragged.RaggedTensor._from_variant( # scalar input. >>> tf.RaggedTensor._from_variant( # scalar input.
et, dtype=tf.int32, output_ragged_rank=1).eval().tolist() ... et, dtype=tf.int32, output_ragged_rank=1).to_list()
[[0], [1, 2]] [[0], [1, 2]]
>>> ragged.RaggedTensor._from_variant( # batched input. >>> tf.RaggedTensor._from_variant( # batched input.
stacked_et, dtype=tf.int32, output_ragged_rank=2).eval().tolist() ... stacked_et, dtype=tf.int32, output_ragged_rank=2).to_list()
[[[0], [1, 2]], [[0], [1, 2]]] [[[0], [1, 2]], [[0], [1, 2]]]
```
Args: Args:
variant: A `variant` Tensor representing an encoded (possibly variant: A `variant` Tensor representing an encoded (possibly
@ -2012,7 +1979,7 @@ class RaggedTensor(composite_tensor.CompositeTensor):
a scalar `variant` Tensor is returned. a scalar `variant` Tensor is returned.
Example: Example:
>>> rt = ragged.constant([[[0]], [[1]], [[2]]]) >>> rt = tf.ragged.constant([[[0]], [[1]], [[2]]])
>>> rt._to_variant().shape.as_list() >>> rt._to_variant().shape.as_list()
[] []
>>> rt._to_variant(batched_input=True).shape.as_list() >>> rt._to_variant(batched_input=True).shape.as_list()

View File

@ -86,10 +86,13 @@ def repeat_ranges(params, splits, repeats):
A `Tensor` with the same rank and type as `params`. A `Tensor` with the same rank and type as `params`.
#### Example: #### Example:
```python
>>> repeat_ranges(['a', 'b', 'c'], [0, 2, 3], 3) >>> print(repeat_ranges(
['a', 'b', 'a', 'b', 'a', 'b', 'c', 'c', 'c'] ... params=tf.constant(['a', 'b', 'c']),
``` ... splits=tf.constant([0, 2, 3]),
... repeats=tf.constant(3)))
tf.Tensor([b'a' b'b' b'a' b'b' b'a' b'b' b'c' b'c' b'c'],
shape=(9,), dtype=string)
""" """
# Divide `splits` into starts and limits, and repeat them `repeats` times. # Divide `splits` into starts and limits, and repeat them `repeats` times.
if repeats.shape.ndims != 0: if repeats.shape.ndims != 0:

View File

@ -74,28 +74,25 @@ def where(condition, x=None, y=None, name=None):
`condition`, `x`, and `y` have incompatible shapes. `condition`, `x`, and `y` have incompatible shapes.
#### Examples: #### Examples:
```python
>>> # Coordinates where condition is true.
>>> condition = tf.compat.v1.ragged.constant_value(
... [[True, False, True], [False, True]])
>>> ragged.where(condition)
[[0, 0], [0, 2], [1, 1]]
>>> # Elementwise selection between x and y, based on condition. >>> # Coordinates where condition is true.
>>> condition = tf.compat.v1.ragged.constant_value( >>> condition = tf.ragged.constant([[True, False, True], [False, True]])
... [[True, False, True], [False, True]]) >>> print(where(condition))
>>> x = tf.compat.v1.ragged.constant_value([['A', 'B', 'C'], ['D', 'E']]) tf.Tensor( [[0 0] [0 2] [1 1]], shape=(3, 2), dtype=int64)
>>> y = tf.compat.v1.ragged.constant_value([['a', 'b', 'c'], ['d', 'e']])
>>> ragged.where(condition, x, y)
[['A', 'b', 'C'], ['d', 'E']]
>>> # Row selection between x and y, based on condition. >>> # Elementwise selection between x and y, based on condition.
>>> condition = [True, False] >>> condition = tf.ragged.constant([[True, False, True], [False, True]])
>>> x = tf.compat.v1.ragged.constant_value([['A', 'B', 'C'], ['D', 'E']]) >>> x = tf.ragged.constant([['A', 'B', 'C'], ['D', 'E']])
>>> y = tf.compat.v1.ragged.constant_value([['a', 'b', 'c'], ['d', 'e']]) >>> y = tf.ragged.constant([['a', 'b', 'c'], ['d', 'e']])
>>> ragged.where(condition, x, y) >>> print(where(condition, x, y))
[['A', 'B', 'C'], ['d', 'e']] <tf.RaggedTensor [[b'A', b'b', b'C'], [b'd', b'E']]>
```
>>> # Row selection between x and y, based on condition.
>>> condition = [True, False]
>>> x = tf.ragged.constant([['A', 'B', 'C'], ['D', 'E']])
>>> y = tf.ragged.constant([['a', 'b', 'c'], ['d', 'e']])
>>> print(where(condition, x, y))
<tf.RaggedTensor [[b'A', b'B', b'C'], [b'd', b'e']]>
""" """
if (x is None) != (y is None): if (x is None) != (y is None):
raise ValueError('x and y must be either both None or both non-None') raise ValueError('x and y must be either both None or both non-None')

View File

@ -37,10 +37,8 @@ def row_splits_to_segment_ids(splits, name=None, out_type=None):
Returns an integer vector `segment_ids`, where `segment_ids[i] == j` if Returns an integer vector `segment_ids`, where `segment_ids[i] == j` if
`splits[j] <= i < splits[j+1]`. Example: `splits[j] <= i < splits[j+1]`. Example:
```python >>> print(tf.ragged.row_splits_to_segment_ids([0, 3, 3, 5, 6, 9]))
>>> ragged.row_splits_to_segment_ids([0, 3, 3, 5, 6, 9]).eval() tf.Tensor([0 0 0 2 2 3 4 4 4], shape=(9,), dtype=int64)
[ 0 0 0 2 2 3 4 4 4 ]
```
Args: Args:
splits: A sorted 1-D integer Tensor. `splits[0]` must be zero. splits: A sorted 1-D integer Tensor. `splits[0]` must be zero.
@ -83,10 +81,8 @@ def segment_ids_to_row_splits(segment_ids, num_segments=None,
Returns an integer vector `splits`, where `splits[0] = 0` and Returns an integer vector `splits`, where `splits[0] = 0` and
`splits[i] = splits[i-1] + count(segment_ids==i)`. Example: `splits[i] = splits[i-1] + count(segment_ids==i)`. Example:
```python >>> print(tf.ragged.segment_ids_to_row_splits([0, 0, 0, 2, 2, 3, 4, 4, 4]))
>>> ragged.segment_ids_to_row_splits([0, 0, 0, 2, 2, 3, 4, 4, 4]).eval() tf.Tensor([0 3 3 5 6 9], shape=(6,), dtype=int64)
[ 0 3 3 5 6 9 ]
```
Args: Args:
segment_ids: A 1-D integer Tensor. segment_ids: A 1-D integer Tensor.