Remove usage of magic-api-link syntax from source files.

Back-ticks are now converted to links in the api_docs generator. With the new docs repo we're moving to simplify the docs pipeline, and make everything more readable.

By doing this we no longer get test failures for symbols that don't exist (`tf.does_not_exist`  will not get a link).

There is also no way, not to set custom link text. That's okay.

This is the result of the following regex replacement (+ a couple of manual edits.):

re: @\{([^$].*?)(\$.+?)?}
sub: `\1`

Which does the following replacements:

"@{tf.symbol}" --> "`tf.symbol`"
"@{tf.symbol$link_text}" --> "`tf.symbol`"

PiperOrigin-RevId: 208042358
This commit is contained in:
Mark Daoust 2018-08-09 07:03:39 -07:00 committed by TensorFlower Gardener
parent fd9fc4b4b6
commit f40a875355
119 changed files with 530 additions and 528 deletions

View File

@ -32,10 +32,10 @@ def _flatten_tensors(tensors):
"""Check tensors for isomorphism and flatten.
Args:
tensors: list of T @{tf.Tensor} which must all have the same shape.
tensors: list of T `tf.Tensor` which must all have the same shape.
Returns:
tensors: a list of T @{tf.Tensor} which are flattened (1D) views of tensors
tensors: a list of T `tf.Tensor` which are flattened (1D) views of tensors
shape: the original shape of each element of input tensors
Raises:
@ -61,12 +61,12 @@ def _reshape_tensors(tensors, shape):
"""Reshape tensors flattened by _flatten_tensors.
Args:
tensors: list of T @{tf.Tensor} of identical length 1D tensors.
tensors: list of T `tf.Tensor` of identical length 1D tensors.
shape: list of integers describing the desired shape. Product of
the elements must equal the length of each tensor.
Returns:
list of T @{tf.Tensor} which are the reshaped inputs.
list of T `tf.Tensor` which are the reshaped inputs.
"""
reshaped = []
for t in tensors:
@ -79,12 +79,12 @@ def _padded_split(tensor, pieces):
"""Like split for 1D tensors but pads-out case where len % pieces != 0.
Args:
tensor: T @{tf.Tensor} that must be 1D.
tensor: T `tf.Tensor` that must be 1D.
pieces: a positive integer specifying the number of pieces into which
tensor should be split.
Returns:
list of T @{tf.Tensor} of length pieces, which hold the values of
list of T `tf.Tensor` of length pieces, which hold the values of
thin input tensor, in order. The final tensor may
be zero-padded on the end to make its size equal to those of all
of the other tensors.
@ -132,11 +132,11 @@ def _strip_padding(tensors, pad_len):
"""Strip the suffix padding added by _padded_split.
Args:
tensors: list of T @{tf.Tensor} of identical length 1D tensors.
tensors: list of T `tf.Tensor` of identical length 1D tensors.
pad_len: number of elements to be stripped from the end of each tensor.
Returns:
list of T @{tf.Tensor} which are the stripped inputs.
list of T `tf.Tensor` which are the stripped inputs.
Raises:
ValueError: tensors must be a non-empty list of 1D tensors, and
@ -161,12 +161,12 @@ def _ragged_split(tensor, pieces):
"""Like split for 1D tensors but allows case where len % pieces != 0.
Args:
tensor: T @{tf.Tensor} that must be 1D.
tensor: T `tf.Tensor` that must be 1D.
pieces: a positive integer specifying the number of pieces into which
tensor should be split.
Returns:
list of T @{tf.Tensor} of length pieces, which hold the values of
list of T `tf.Tensor` of length pieces, which hold the values of
the input tensor, in order. The final tensor may be shorter
than the others, which will all be of equal length.
@ -256,7 +256,7 @@ def build_ring_all_reduce(input_tensors, num_workers, num_subchunks,
"""Construct a subgraph performing a ring-style all-reduce of input_tensors.
Args:
input_tensors: a list of T @{tf.Tensor} objects, which must all
input_tensors: a list of T `tf.Tensor` objects, which must all
have the same shape and type.
num_workers: number of worker tasks spanned by input_tensors.
num_subchunks: number of subchunks each device should process in one tick.
@ -272,7 +272,7 @@ def build_ring_all_reduce(input_tensors, num_workers, num_subchunks,
size.
Returns:
a list of T @{tf.Tensor} identical sum-reductions of input_tensors.
a list of T `tf.Tensor` identical sum-reductions of input_tensors.
"""
if len(input_tensors) < 2:
raise ValueError("input_tensors must be length 2 or longer")
@ -299,7 +299,7 @@ def _build_ring_gather(input_tensors, devices, num_subchunks,
"""Construct a subgraph for the first (reduction) pass of ring all-reduce.
Args:
input_tensors: a list of T @{tf.Tensor} 1D input tensors of same
input_tensors: a list of T `tf.Tensor` 1D input tensors of same
shape and type.
devices: array of device name strings
num_subchunks: number of subchunks each device should process in one tick.
@ -311,7 +311,7 @@ def _build_ring_gather(input_tensors, devices, num_subchunks,
ValueError: tensors must all be one dimensional.
Returns:
list of list of T @{tf.Tensor} of (partially) reduced values where
list of list of T `tf.Tensor` of (partially) reduced values where
exactly num_subchunks chunks at each device are fully reduced.
"""
num_devices = len(input_tensors)
@ -360,11 +360,11 @@ def _apply_unary_to_chunks(f, chunks_by_dev):
"""Apply a unary op to each tensor in chunks_by_dev, on same device.
Args:
f: a unary function over T @{tf.Tensor}.
chunks_by_dev: list of lists of T @{tf.Tensor}.
f: a unary function over T `tf.Tensor`.
chunks_by_dev: list of lists of T `tf.Tensor`.
Returns:
new list of lists of T @{tf.Tensor} with the same structure as
new list of lists of T `tf.Tensor` with the same structure as
chunks_by_dev containing the derived tensors.
"""
output = []
@ -381,14 +381,14 @@ def _build_ring_scatter(pred_by_s_d, rank_by_s_d,
Args:
pred_by_s_d: as produced by _ring_permutations
rank_by_s_d: as produced by _ring_permutations
chunks_by_dev: list of list of T @{tf.Tensor} indexed by ints
chunks_by_dev: list of list of T `tf.Tensor` indexed by ints
(device, chunk)
Raises:
ValueError: chunks_by_dev is not well-formed
Returns:
list of T @{tf.Tensor} which are the fully reduced tensors, one
list of T `tf.Tensor` which are the fully reduced tensors, one
at each device corresponding to the outer dimension of chunks_by_dev.
"""
num_devices = len(chunks_by_dev)
@ -448,12 +448,12 @@ def build_recursive_hd_all_reduce(input_tensors, red_op, un_op=None):
the future with edge-case specific logic.
Args:
input_tensors: list of T @{tf.Tensor} to be elementwise reduced.
input_tensors: list of T `tf.Tensor` to be elementwise reduced.
red_op: a binary elementwise reduction Op.
un_op: an optional unary elementwise Op to apply to reduced values.
Returns:
list of T @{tf.Tensor} which are the fully reduced tensors, one
list of T `tf.Tensor` which are the fully reduced tensors, one
at each device of input_tensors.
Raises:
@ -475,13 +475,13 @@ def _build_recursive_hd_gather(input_tensors, devices, red_op):
"""Construct the gather phase of recursive halving-doubling all-reduce.
Args:
input_tensors: list of T @{tf.Tensor} to be elementwise reduced.
input_tensors: list of T `tf.Tensor` to be elementwise reduced.
devices: a list of strings naming the devices hosting input_tensors,
which will also be used to host the (partial) reduction values.
red_op: a binary elementwise reduction Op.
Returns:
list of T @{tf.Tensor} which are the fully reduced tensor shards.
list of T `tf.Tensor` which are the fully reduced tensor shards.
Raises:
ValueError: num_devices not a power of 2, or tensor len not divisible
@ -516,12 +516,12 @@ def _build_recursive_hd_scatter(input_tensors, devices):
"""Construct the scatter phase of recursive halving-doublng all-reduce.
Args:
input_tensors: list of T @{tf.Tensor} that are fully-reduced shards.
input_tensors: list of T `tf.Tensor` that are fully-reduced shards.
devices: a list of strings naming the devices on which the reconstituted
full tensors should be placed.
Returns:
list of T @{tf.Tensor} which are the fully reduced tensors.
list of T `tf.Tensor` which are the fully reduced tensors.
"""
num_devices = len(devices)
num_hops = int(math.log(num_devices, 2))
@ -571,7 +571,7 @@ def build_shuffle_all_reduce(input_tensors, gather_devices, red_op, un_op=None):
un_op: optional elementwise unary Op to be applied to fully-reduced values.
Returns:
list of T @{tf.Tensor} which are the fully reduced tensors.
list of T `tf.Tensor` which are the fully reduced tensors.
"""
input_tensors, shape = _flatten_tensors(input_tensors)
dst_devices = [t.device for t in input_tensors]
@ -594,7 +594,7 @@ def _build_shuffle_gather(input_tensors, gather_devices, red_op, un_op=None):
un_op: optional elementwise unary Op to be applied to fully-reduced values.
Returns:
list of T @{tf.Tensor} which are the fully reduced shards.
list of T `tf.Tensor` which are the fully reduced shards.
Raises:
ValueError: inputs not well-formed.
@ -629,7 +629,7 @@ def _build_shuffle_scatter(reduced_shards, dst_devices):
should be reconstituted.
Returns:
list of T @{tf.Tensor} scattered tensors.
list of T `tf.Tensor` scattered tensors.
"""
num_devices = len(dst_devices)
out_tensors = []
@ -644,7 +644,7 @@ def _split_by_task(devices, values):
Args:
devices: list of device name strings
values: list of T @{tf.tensor} of same length as devices.
values: list of T `tf.tensor` of same length as devices.
Returns:
(per_task_devices, per_task_values) where both values are
@ -680,14 +680,14 @@ def build_nccl_all_reduce(input_tensors, red_op, un_op=None):
"""Build a subgraph that does one full all-reduce, using NCCL.
Args:
input_tensors: list of T @{tf.Tensor} of same-shape and type values to
input_tensors: list of T `tf.Tensor` of same-shape and type values to
be reduced.
red_op: binary elementwise reduction operator. Must be one of
{tf.add}
un_op: optional unary elementwise Op to apply to fully-reduce values.
Returns:
list of T @{tf.Tensor} of reduced values.
list of T `tf.Tensor` of reduced values.
Raises:
ValueError: red_op not supported.
@ -709,14 +709,14 @@ def _build_nccl_hybrid(input_tensors, red_op, upper_level_f):
"""Construct a subgraph for NCCL hybrid all-reduce.
Args:
input_tensors: list of T @{tf.Tensor} of same-shape and type values to
input_tensors: list of T `tf.Tensor` of same-shape and type values to
be reduced.
red_op: binary elementwise reduction operator.
upper_level_f: function for reducing one value per worker, across
workers.
Returns:
list of T @{tf.Tensor} of reduced values.
list of T `tf.Tensor` of reduced values.
Raises:
ValueError: inputs not well-formed.
@ -797,7 +797,7 @@ def _build_shuffle_hybrid(input_tensors, gather_devices, red_op, upper_level_f):
"""Construct a subgraph for Shuffle hybrid all-reduce.
Args:
input_tensors: list of T @{tf.Tensor} of same-shape and type values to
input_tensors: list of T `tf.Tensor` of same-shape and type values to
be reduced.
gather_devices: list of device names on which to host gather shards.
red_op: binary elementwise reduction operator.
@ -805,7 +805,7 @@ def _build_shuffle_hybrid(input_tensors, gather_devices, red_op, upper_level_f):
workers.
Returns:
list of T @{tf.Tensor} of reduced values.
list of T `tf.Tensor` of reduced values.
Raises:
ValueError: inputs not well-formed.

View File

@ -17,8 +17,8 @@
TensorFlow has support for reading from and writing to Cloud Bigtable. To use
TensorFlow + Cloud Bigtable integration, first create a BigtableClient to
configure your connection to Cloud Bigtable, and then create a BigtableTable
object to allow you to create numerous @{tf.data.Dataset}s to read data, or
write a @{tf.data.Dataset} object to the underlying Cloud Bigtable table.
object to allow you to create numerous `tf.data.Dataset`s to read data, or
write a `tf.data.Dataset` object to the underlying Cloud Bigtable table.
For background on Cloud Bigtable, see: https://cloud.google.com/bigtable .
"""
@ -203,7 +203,7 @@ class BigtableTable(object):
be retrieved. If end is None, all subsequent row keys will be retrieved.
Returns:
A @{tf.data.Dataset} containing `tf.string` Tensors corresponding to all
A `tf.data.Dataset` containing `tf.string` Tensors corresponding to all
of the row keys between `start` and `end`.
"""
# TODO(saeta): Make inclusive / exclusive configurable?
@ -219,7 +219,7 @@ class BigtableTable(object):
retrieved.
Returns:
A @{tf.data.Dataset}. containing `tf.string` Tensors corresponding to all
A `tf.data.Dataset`. containing `tf.string` Tensors corresponding to all
of the row keys matching that prefix.
"""
return _BigtablePrefixKeyDataset(self, prefix)
@ -228,11 +228,11 @@ class BigtableTable(object):
"""Retrieves a sampling of row keys from the Bigtable table.
This dataset is most often used in conjunction with
@{tf.contrib.data.parallel_interleave} to construct a set of ranges for
`tf.contrib.data.parallel_interleave` to construct a set of ranges for
scanning in parallel.
Returns:
A @{tf.data.Dataset} returning string row keys.
A `tf.data.Dataset` returning string row keys.
"""
return _BigtableSampleKeysDataset(self)
@ -272,7 +272,7 @@ class BigtableTable(object):
that are treated as the column qualifier (column name).
Returns:
A @{tf.data.Dataset} returning the row keys and the cell contents.
A `tf.data.Dataset` returning the row keys and the cell contents.
Raises:
ValueError: If the configured probability is unexpected.
@ -317,7 +317,7 @@ class BigtableTable(object):
that are treated as the column qualifier (column name).
Returns:
A @{tf.data.Dataset} returning the row keys and the cell contents.
A `tf.data.Dataset` returning the row keys and the cell contents.
Raises:
ValueError: If the configured probability is unexpected.
@ -373,7 +373,7 @@ class BigtableTable(object):
that are treated as the column qualifier (column name).
Returns:
A @{tf.data.Dataset} returning the row keys and the cell contents.
A `tf.data.Dataset` returning the row keys and the cell contents.
Raises:
ValueError: If the configured probability is unexpected.
@ -435,7 +435,7 @@ class BigtableTable(object):
that are treated as the column qualifier (column name).
Returns:
A @{tf.data.Dataset} returning the row keys and the cell contents.
A `tf.data.Dataset` returning the row keys and the cell contents.
Raises:
ValueError: If the configured probability is unexpected.
@ -450,12 +450,12 @@ class BigtableTable(object):
"""Writes a dataset to the table.
Args:
dataset: A @{tf.data.Dataset} to be written to this table. It must produce
dataset: A `tf.data.Dataset` to be written to this table. It must produce
a list of number-of-columns+1 elements, all of which must be strings.
The first value will be used as the row key, and subsequent values will
be used as cell values for the corresponding columns from the
corresponding column_families and columns entries.
column_families: A @{tf.Tensor} of `tf.string`s corresponding to the
column_families: A `tf.Tensor` of `tf.string`s corresponding to the
column names to store the dataset's elements into.
columns: A `tf.Tensor` of `tf.string`s corresponding to the column names
to store the dataset's elements into.
@ -463,7 +463,7 @@ class BigtableTable(object):
Leave as None to use server-provided timestamps.
Returns:
A @{tf.Operation} that can be run to perform the write.
A `tf.Operation` that can be run to perform the write.
Raises:
ValueError: If there are unexpected or incompatible types, or if the
@ -502,7 +502,7 @@ class BigtableTable(object):
normalized_columns: The column families and column qualifiers to retrieve.
Returns:
A @{tf.data.Dataset} representing the result of the parallel scan.
A `tf.data.Dataset` representing the result of the parallel scan.
"""
if num_parallel_scans is None:
num_parallel_scans = 50

View File

@ -56,7 +56,7 @@ class _CudnnRNN(base_layer.Layer):
Cudnn RNNs have two major differences from other platform-independent RNNs tf
provides:
* Cudnn LSTM and GRU are mathematically different from their tf counterparts.
(e.g. @{tf.contrib.rnn.LSTMBlockCell} and @{tf.nn.rnn_cell.GRUCell}.
(e.g. `tf.contrib.rnn.LSTMBlockCell` and `tf.nn.rnn_cell.GRUCell`.
* Cudnn-trained checkpoints are not directly compatible with tf RNNs:
* They use a single opaque parameter buffer for the entire (possibly)
multi-layer multi-directional RNN; Whereas tf RNN weights are per-cell and
@ -182,7 +182,7 @@ class _CudnnRNN(base_layer.Layer):
dropout: dropout rate, a number between [0, 1]. Dropout is applied between
each layer (no dropout is applied for a model with a single layer).
When set to 0, dropout is disabled.
seed: the op seed used for initializing dropout. See @{tf.set_random_seed}
seed: the op seed used for initializing dropout. See `tf.set_random_seed`
for behavior.
dtype: tf.float16, tf.float32 or tf.float64
kernel_initializer: starting value to initialize the weight.

View File

@ -61,8 +61,8 @@ _WEIGHTS_VARIABLE_NAME = rnn_cell_impl._WEIGHTS_VARIABLE_NAME
class CudnnCompatibleLSTMCell(lstm_ops.LSTMBlockCell):
"""Cudnn Compatible LSTMCell.
A simple wrapper around @{tf.contrib.rnn.LSTMBlockCell} to use along with
@{tf.contrib.cudnn_rnn.CudnnLSTM}. The latter's params can be used by
A simple wrapper around `tf.contrib.rnn.LSTMBlockCell` to use along with
`tf.contrib.cudnn_rnn.CudnnLSTM`. The latter's params can be used by
this cell seamlessly.
"""
@ -76,8 +76,8 @@ class CudnnCompatibleLSTMCell(lstm_ops.LSTMBlockCell):
class CudnnCompatibleGRUCell(rnn_cell_impl.GRUCell):
"""Cudnn Compatible GRUCell.
A GRU impl akin to @{tf.nn.rnn_cell.GRUCell} to use along with
@{tf.contrib.cudnn_rnn.CudnnGRU}. The latter's params can be used by
A GRU impl akin to `tf.nn.rnn_cell.GRUCell` to use along with
`tf.contrib.cudnn_rnn.CudnnGRU`. The latter's params can be used by
it seamlessly.
It differs from platform-independent GRUs in how the new memory gate is
@ -97,7 +97,7 @@ class CudnnCompatibleGRUCell(rnn_cell_impl.GRUCell):
$$h_t = (1 - u_t) .* h'_t + u_t .* h_t-1$$
```
Other GRU (see @{tf.nn.rnn_cell.GRUCell} and @{tf.contrib.rnn.GRUBlockCell}):
Other GRU (see `tf.nn.rnn_cell.GRUCell` and `tf.contrib.rnn.GRUBlockCell`):
```python
# new memory gate
\\(h'_t = tanh(x_t * W_h + (r_t .* h_t-1) * R_h + b_{Wh})\\)
@ -891,7 +891,7 @@ def _cudnn_rnn(inputs,
direction: the direction model that the model operates. Could be either
'unidirectional' or 'bidirectional'
dropout: whether to enable dropout. With it is 0, dropout is disabled.
seed: the op seed used for initializing dropout. See @{tf.set_random_seed}
seed: the op seed used for initializing dropout. See `tf.set_random_seed`
for behavior.
name: name of the operation.
Returns:
@ -957,7 +957,7 @@ def cudnn_lstm(inputs,
direction: the direction model that the model operates. Could be either
'unidirectional' or 'bidirectional'
dropout: whether to enable dropout. With it is 0, dropout is disabled.
seed: the op seed used for initializing dropout. See @{tf.set_random_seed}
seed: the op seed used for initializing dropout. See `tf.set_random_seed`
for behavior.
name: name of the operation.
Returns:
@ -998,7 +998,7 @@ def _cudnn_rnn_no_input_c(inputs,
direction: the direction model that the model operates. Could be either
'unidirectional' or 'bidirectional'
dropout: whether to enable dropout. With it is 0, dropout is disabled.
seed: the op seed used for initializing dropout. See @{tf.set_random_seed}
seed: the op seed used for initializing dropout. See `tf.set_random_seed`
for behavior.
name: name of the operation.
Returns:
@ -1040,7 +1040,7 @@ def cudnn_gru(inputs,
direction: the direction model that the model operates. Could be either
'unidirectional' or 'bidirectional'
dropout: whether to enable dropout. With it is 0, dropout is disabled.
seed: the op seed used for initializing dropout. See @{tf.set_random_seed}
seed: the op seed used for initializing dropout. See `tf.set_random_seed`
for behavior.
name: name of the operation.
Returns:
@ -1079,7 +1079,7 @@ def cudnn_rnn_relu(inputs,
direction: the direction model that the model operates. Could be either
'unidirectional' or 'bidirectional'
dropout: whether to enable dropout. With it is 0, dropout is disabled.
seed: the op seed used for initializing dropout. See @{tf.set_random_seed}
seed: the op seed used for initializing dropout. See `tf.set_random_seed`
for behavior.
name: name of the operation.
Returns:
@ -1119,7 +1119,7 @@ def cudnn_rnn_tanh(inputs,
direction: the direction model that the model operates. Could be either
'unidirectional' or 'bidirectional'
dropout: whether to enable dropout. With it is 0, dropout is disabled.
seed: the op seed used for initializing dropout. See @{tf.set_random_seed}
seed: the op seed used for initializing dropout. See `tf.set_random_seed`
for behavior.
name: name of the operation.
Returns:
@ -1161,7 +1161,7 @@ def cudnn_rnn_opaque_params_to_canonical(rnn_mode,
direction: the direction model that the model operates. Could be either
'unidirectional' or 'bidirectional'
dropout: whether to enable dropout. With it is 0, dropout is disabled.
seed: the op seed used for initializing dropout. See @{tf.set_random_seed}
seed: the op seed used for initializing dropout. See `tf.set_random_seed`
for behavior.
name: name of the operation.
Returns:
@ -1224,7 +1224,7 @@ def cudnn_rnn_canonical_to_opaque_params(rnn_mode,
direction: the direction model that the model operates. Could be either
'unidirectional' or 'bidirectional'
dropout: whether to enable dropout. With it is 0, dropout is disabled.
seed: the op seed used for initializing dropout. See @{tf.set_random_seed}
seed: the op seed used for initializing dropout. See `tf.set_random_seed`
for behavior.
name: name of the operation.
Returns:
@ -1282,7 +1282,7 @@ def cudnn_rnn_opaque_params_size(rnn_mode,
'unidirectional' or 'bidirectional'
dtype: one of tf.float32 or tf.float64.
dropout: whether to enable dropout. With it is 0, dropout is disabled.
seed: the op seed used for initializing dropout. See @{tf.set_random_seed}
seed: the op seed used for initializing dropout. See `tf.set_random_seed`
for behavior.
name: name of the operation.
Returns:
@ -1349,7 +1349,7 @@ class _CudnnRNN(object):
'unidirectional' or 'bidirectional'
dtype: dtype of params, tf.float32 or tf.float64.
dropout: whether to enable dropout. With it is 0, dropout is disabled.
seed: the op seed used for initializing dropout. See @{tf.set_random_seed}
seed: the op seed used for initializing dropout. See `tf.set_random_seed`
for behavior.
Raises:
ValueError: if direction is invalid.

View File

@ -15,7 +15,7 @@
"""Experimental API for building input pipelines.
This module contains experimental `Dataset` sources and transformations that can
be used in conjunction with the @{tf.data.Dataset} API. Note that the
be used in conjunction with the `tf.data.Dataset` API. Note that the
`tf.contrib.data` API is not subject to the same backwards compatibility
guarantees as `tf.data`, but we will provide deprecation advice in advance of
removing existing functionality.

View File

@ -185,7 +185,7 @@ def dense_to_sparse_batch(batch_size, row_shape):
Returns:
A `Dataset` transformation function, which can be passed to
@{tf.data.Dataset.apply}.
`tf.data.Dataset.apply`.
"""
def _apply_fn(dataset):
@ -401,7 +401,7 @@ def unbatch():
Returns:
A `Dataset` transformation function, which can be passed to
@{tf.data.Dataset.apply}.
`tf.data.Dataset.apply`.
"""
def _apply_fn(dataset):
@ -443,7 +443,7 @@ def unbatch():
def batch_and_drop_remainder(batch_size):
"""A batching transformation that omits the final small batch (if present).
Like @{tf.data.Dataset.batch}, this transformation combines
Like `tf.data.Dataset.batch`, this transformation combines
consecutive elements of this dataset into batches. However, if the batch
size does not evenly divide the input dataset size, this transformation will
drop the final smaller element.
@ -467,7 +467,7 @@ def batch_and_drop_remainder(batch_size):
Returns:
A `Dataset` transformation function, which can be passed to
@{tf.data.Dataset.apply}
`tf.data.Dataset.apply`
"""
def _apply_fn(dataset):
@ -484,25 +484,25 @@ def padded_batch_and_drop_remainder(batch_size,
padding_values=None):
"""A batching and padding transformation that omits the final small batch.
Like @{tf.data.Dataset.padded_batch}, this transformation combines
Like `tf.data.Dataset.padded_batch`, this transformation combines
consecutive elements of this dataset into batches. However, if the batch
size does not evenly divide the input dataset size, this transformation will
drop the final smaller element.
See `@{tf.contrib.data.batch_and_drop_remainder}` for more details.
See `tf.contrib.data.batch_and_drop_remainder` for more details.
Args:
batch_size: A `tf.int64` scalar `tf.Tensor`, representing the number of
consecutive elements of this dataset to combine in a single batch.
padded_shapes: A nested structure of `tf.TensorShape` or
`tf.int64` vector tensor-like objects. See
@{tf.data.Dataset.padded_batch} for details.
`tf.data.Dataset.padded_batch` for details.
padding_values: (Optional.) A nested structure of scalar-shaped
`tf.Tensor`. See @{tf.data.Dataset.padded_batch} for details.
`tf.Tensor`. See `tf.data.Dataset.padded_batch` for details.
Returns:
A `Dataset` transformation function, which can be passed to
@{tf.data.Dataset.apply}
`tf.data.Dataset.apply`
"""
def _apply_fn(dataset):
@ -661,7 +661,7 @@ def assert_element_shape(expected_shapes):
Returns:
A `Dataset` transformation function, which can be passed to
@{tf.data.Dataset.apply}
`tf.data.Dataset.apply`
"""
def _check_shape(*elements):
@ -760,7 +760,7 @@ def map_and_batch(map_func,
Returns:
A `Dataset` transformation function, which can be passed to
@{tf.data.Dataset.apply}.
`tf.data.Dataset.apply`.
Raises:
ValueError: If both `num_parallel_batches` and `num_parallel_calls` are

View File

@ -47,7 +47,7 @@ def enumerate_dataset(start=0):
Returns:
A `Dataset` transformation function, which can be passed to
@{tf.data.Dataset.apply}.
`tf.data.Dataset.apply`.
"""
def _apply_fn(dataset):

View File

@ -42,7 +42,7 @@ def ignore_errors():
Returns:
A `Dataset` transformation function, which can be passed to
@{tf.data.Dataset.apply}.
`tf.data.Dataset.apply`.
"""
def _apply_fn(dataset):

View File

@ -29,8 +29,8 @@ from tensorflow.python.ops import gen_dataset_ops
def get_single_element(dataset):
"""Returns the single element in `dataset` as a nested structure of tensors.
This function enables you to use a @{tf.data.Dataset} in a stateless
"tensor-in tensor-out" expression, without creating a @{tf.data.Iterator}.
This function enables you to use a `tf.data.Dataset` in a stateless
"tensor-in tensor-out" expression, without creating a `tf.data.Iterator`.
This can be useful when your preprocessing transformations are expressed
as a `Dataset`, and you want to use the transformation at serving time.
For example:
@ -50,10 +50,10 @@ def get_single_element(dataset):
```
Args:
dataset: A @{tf.data.Dataset} object containing a single element.
dataset: A `tf.data.Dataset` object containing a single element.
Returns:
A nested structure of @{tf.Tensor} objects, corresponding to the single
A nested structure of `tf.Tensor` objects, corresponding to the single
element of `dataset`.
Raises:
@ -77,11 +77,11 @@ def reduce_dataset(dataset, reducer):
"""Returns the result of reducing the `dataset` using `reducer`.
Args:
dataset: A @{tf.data.Dataset} object.
reducer: A @{tf.contrib.data.Reducer} object representing the reduce logic.
dataset: A `tf.data.Dataset` object.
reducer: A `tf.contrib.data.Reducer` object representing the reduce logic.
Returns:
A nested structure of @{tf.Tensor} objects, corresponding to the result
A nested structure of `tf.Tensor` objects, corresponding to the result
of reducing `dataset` using `reducer`.
Raises:

View File

@ -50,7 +50,7 @@ def group_by_reducer(key_func, reducer):
Returns:
A `Dataset` transformation function, which can be passed to
@{tf.data.Dataset.apply}.
`tf.data.Dataset.apply`.
"""
def _apply_fn(dataset):
@ -92,7 +92,7 @@ def group_by_window(key_func,
Returns:
A `Dataset` transformation function, which can be passed to
@{tf.data.Dataset.apply}.
`tf.data.Dataset.apply`.
Raises:
ValueError: if neither or both of {`window_size`, `window_size_func`} are
@ -142,11 +142,11 @@ def bucket_by_sequence_length(element_length_func,
bucket_batch_sizes: `list<int>`, batch size per bucket. Length should be
`len(bucket_boundaries) + 1`.
padded_shapes: Nested structure of `tf.TensorShape` to pass to
@{tf.data.Dataset.padded_batch}. If not provided, will use
`tf.data.Dataset.padded_batch`. If not provided, will use
`dataset.output_shapes`, which will result in variable length dimensions
being padded out to the maximum length in each batch.
padding_values: Values to pad with, passed to
@{tf.data.Dataset.padded_batch}. Defaults to padding with 0.
`tf.data.Dataset.padded_batch`. Defaults to padding with 0.
pad_to_bucket_boundary: bool, if `False`, will pad dimensions with unknown
size to maximum length in batch. If `True`, will pad dimensions with
unknown size to bucket boundary minus 1 (i.e., the maximum length in each
@ -155,7 +155,7 @@ def bucket_by_sequence_length(element_length_func,
Returns:
A `Dataset` transformation function, which can be passed to
@{tf.data.Dataset.apply}.
`tf.data.Dataset.apply`.
Raises:
ValueError: if `len(bucket_batch_sizes) != len(bucket_boundaries) + 1`.

View File

@ -42,7 +42,7 @@ def parallel_interleave(map_func,
`parallel_interleave()` maps `map_func` across its input to produce nested
datasets, and outputs their elements interleaved. Unlike
@{tf.data.Dataset.interleave}, it gets elements from `cycle_length` nested
`tf.data.Dataset.interleave`, it gets elements from `cycle_length` nested
datasets in parallel, which increases the throughput, especially in the
presence of stragglers. Furthermore, the `sloppy` argument can be used to
improve performance, by relaxing the requirement that the outputs are produced
@ -79,7 +79,7 @@ def parallel_interleave(map_func,
Returns:
A `Dataset` transformation function, which can be passed to
@{tf.data.Dataset.apply}.
`tf.data.Dataset.apply`.
"""
def _apply_fn(dataset):
return readers.ParallelInterleaveDataset(
@ -138,7 +138,7 @@ def sloppy_interleave(map_func, cycle_length, block_length=1):
Returns:
A `Dataset` transformation function, which can be passed to
@{tf.data.Dataset.apply}.
`tf.data.Dataset.apply`.
"""
def _apply_fn(dataset):
return readers.ParallelInterleaveDataset(
@ -196,15 +196,15 @@ def sample_from_datasets(datasets, weights=None, seed=None):
"""Samples elements at random from the datasets in `datasets`.
Args:
datasets: A list of @{tf.data.Dataset} objects with compatible structure.
datasets: A list of `tf.data.Dataset` objects with compatible structure.
weights: (Optional.) A list of `len(datasets)` floating-point values where
`weights[i]` represents the probability with which an element should be
sampled from `datasets[i]`, or a @{tf.data.Dataset} object where each
sampled from `datasets[i]`, or a `tf.data.Dataset` object where each
element is such a list. Defaults to a uniform distribution across
`datasets`.
seed: (Optional.) A `tf.int64` scalar `tf.Tensor`, representing the
random seed that will be used to create the distribution. See
@{tf.set_random_seed} for behavior.
`tf.set_random_seed` for behavior.
Returns:
A dataset that interleaves elements from `datasets` at random, according to
@ -262,8 +262,8 @@ def choose_from_datasets(datasets, choice_dataset):
```
Args:
datasets: A list of @{tf.data.Dataset} objects with compatible structure.
choice_dataset: A @{tf.data.Dataset} of scalar `tf.int64` tensors between
datasets: A list of `tf.data.Dataset` objects with compatible structure.
choice_dataset: A `tf.data.Dataset` of scalar `tf.int64` tensors between
`0` and `len(datasets) - 1`.
Returns:

View File

@ -118,7 +118,7 @@ class CheckpointInputPipelineHook(session_run_hook.SessionRunHook):
pipeline.
For saving the input pipeline checkpoint alongside the model weights use
@{tf.contrib.data.make_saveable_from_iterator} directly to create a
`tf.contrib.data.make_saveable_from_iterator` directly to create a
`SaveableObject` and add to the `SAVEABLE_OBJECTS` collection. Note, however,
that you will need to be careful not to restore the training iterator during
eval. You can do that by not adding the iterator to the SAVEABLE_OBJECTS

View File

@ -36,7 +36,7 @@ def assert_next(transformations):
Returns:
A `Dataset` transformation function, which can be passed to
@{tf.data.Dataset.apply}.
`tf.data.Dataset.apply`.
"""
def _apply_fn(dataset):
@ -56,7 +56,7 @@ def optimize(optimizations=None):
Returns:
A `Dataset` transformation function, which can be passed to
@{tf.data.Dataset.apply}.
`tf.data.Dataset.apply`.
"""
def _apply_fn(dataset):

View File

@ -92,7 +92,7 @@ def function_buffering_resource_reset(function_buffer_resource, name=None):
# pylint: disable=protected-access
class _PrefetchToDeviceIterator(object):
"""A replacement for @{tf.data.Iterator} that prefetches to another device.
"""A replacement for `tf.data.Iterator` that prefetches to another device.
Args:
input_dataset: The input dataset
@ -158,7 +158,7 @@ class _PrefetchToDeviceIterator(object):
self._input_dataset)
def get_next(self, name=None):
"""See @{tf.data.Iterator.get_next}."""
"""See `tf.data.Iterator.get_next`."""
self._get_next_call_count += 1
if self._get_next_call_count > iterator_ops.GET_NEXT_CALL_WARNING_THRESHOLD:
warnings.warn(iterator_ops.GET_NEXT_CALL_WARNING_MESSAGE)
@ -199,7 +199,7 @@ class _PrefetchToDeviceIterator(object):
class _PrefetchToDeviceEagerIterator(iterator_ops.EagerIterator):
"""A replacement for @{tf.data.Iterator} that prefetches to another device.
"""A replacement for `tf.data.Iterator` that prefetches to another device.
Args:
input_dataset: The input dataset
@ -334,7 +334,7 @@ class _PrefetchToDeviceDataset(dataset_ops.Dataset):
def prefetch_to_device(device, buffer_size=None):
"""A transformation that prefetches dataset values to the given `device`.
NOTE: Although the transformation creates a @{tf.data.Dataset}, the
NOTE: Although the transformation creates a `tf.data.Dataset`, the
transformation must be the final `Dataset` in the input pipeline.
Args:
@ -344,7 +344,7 @@ def prefetch_to_device(device, buffer_size=None):
Returns:
A `Dataset` transformation function, which can be passed to
@{tf.data.Dataset.apply}.
`tf.data.Dataset.apply`.
"""
def _apply_fn(dataset):
return _PrefetchToDeviceDataset(dataset, device, buffer_size)
@ -361,7 +361,7 @@ def copy_to_device(target_device, source_device="/cpu:0"):
Returns:
A `Dataset` transformation function, which can be passed to
@{tf.data.Dataset.apply}.
`tf.data.Dataset.apply`.
"""
def _apply_fn(dataset):

View File

@ -234,7 +234,7 @@ def make_tf_record_dataset(
Args:
file_pattern: List of files or patterns of TFRecord file paths.
See @{tf.gfile.Glob} for pattern rules.
See `tf.gfile.Glob` for pattern rules.
batch_size: An int representing the number of records to combine
in a single batch.
parser_fn: (Optional.) A function accepting string input to parse
@ -340,7 +340,7 @@ def make_csv_dataset(
Args:
file_pattern: List of files or patterns of file paths containing CSV
records. See @{tf.gfile.Glob} for pattern rules.
records. See `tf.gfile.Glob` for pattern rules.
batch_size: An int representing the number of records to combine
in a single batch.
column_names: An optional list of strings that corresponds to the CSV

View File

@ -50,7 +50,7 @@ def rejection_resample(class_func, target_dist, initial_dist=None, seed=None):
Returns:
A `Dataset` transformation function, which can be passed to
@{tf.data.Dataset.apply}.
`tf.data.Dataset.apply`.
"""
def _apply_fn(dataset):
"""Function from `Dataset` to `Dataset` that applies the transformation."""

View File

@ -151,7 +151,7 @@ class _ScanDataset(dataset_ops.Dataset):
def scan(initial_state, scan_func):
"""A transformation that scans a function across an input dataset.
This transformation is a stateful relative of @{tf.data.Dataset.map}.
This transformation is a stateful relative of `tf.data.Dataset.map`.
In addition to mapping `scan_func` across the elements of the input dataset,
`scan()` accumulates one or more state tensors, whose initial values are
`initial_state`.
@ -166,7 +166,7 @@ def scan(initial_state, scan_func):
Returns:
A `Dataset` transformation function, which can be passed to
@{tf.data.Dataset.apply}.
`tf.data.Dataset.apply`.
"""
def _apply_fn(dataset):
return _ScanDataset(dataset, initial_state, scan_func)

View File

@ -92,11 +92,11 @@ def shuffle_and_repeat(buffer_size, count=None, seed=None):
indefinitely.
seed: (Optional.) A `tf.int64` scalar `tf.Tensor`, representing the
random seed that will be used to create the distribution. See
@{tf.set_random_seed} for behavior.
`tf.set_random_seed` for behavior.
Returns:
A `Dataset` transformation function, which can be passed to
@{tf.data.Dataset.apply}.
`tf.data.Dataset.apply`.
"""
def _apply_fn(dataset): # pylint: disable=missing-docstring

View File

@ -109,7 +109,7 @@ def sliding_window_batch(window_size,
Returns:
A `Dataset` transformation function, which can be passed to
@{tf.data.Dataset.apply}.
`tf.data.Dataset.apply`.
Raises:
ValueError: if invalid arguments are provided.

View File

@ -29,7 +29,7 @@ class StatsAggregator(object):
"""A stateful resource that aggregates statistics from one or more iterators.
To record statistics, use one of the custom transformation functions defined
in this module when defining your @{tf.data.Dataset}. All statistics will be
in this module when defining your `tf.data.Dataset`. All statistics will be
aggregated by the `StatsAggregator` that is associated with a particular
iterator (see below). For example, to record the total number of bytes
produced by iterating over a dataset:
@ -39,7 +39,7 @@ class StatsAggregator(object):
dataset = dataset.apply(stats_ops.bytes_produced_stats("total_bytes"))
```
To associate a `StatsAggregator` with a @{tf.data.Iterator} object, use
To associate a `StatsAggregator` with a `tf.data.Iterator` object, use
the following pattern:
```python
@ -55,7 +55,7 @@ class StatsAggregator(object):
To get a protocol buffer summary of the currently aggregated statistics,
use the `StatsAggregator.get_summary()` tensor. The easiest way to do this
is to add the returned tensor to the @{tf.GraphKeys.SUMMARIES} collection,
is to add the returned tensor to the `tf.GraphKeys.SUMMARIES` collection,
so that the summaries will be included with any existing summaries.
```python
@ -74,13 +74,13 @@ class StatsAggregator(object):
self._resource = gen_dataset_ops.stats_aggregator_handle()
def get_summary(self):
"""Returns a string @{tf.Tensor} that summarizes the aggregated statistics.
"""Returns a string `tf.Tensor` that summarizes the aggregated statistics.
The returned tensor will contain a serialized @{tf.summary.Summary} protocol
The returned tensor will contain a serialized `tf.summary.Summary` protocol
buffer, which can be used with the standard TensorBoard logging facilities.
Returns:
A scalar string @{tf.Tensor} that summarizes the aggregated statistics.
A scalar string `tf.Tensor` that summarizes the aggregated statistics.
"""
return gen_dataset_ops.stats_aggregator_summary(self._resource)
@ -122,7 +122,7 @@ def set_stats_aggregator(stats_aggregator):
Returns:
A `Dataset` transformation function, which can be passed to
@{tf.data.Dataset.apply}.
`tf.data.Dataset.apply`.
"""
def _apply_fn(dataset):
@ -145,7 +145,7 @@ def bytes_produced_stats(tag):
Returns:
A `Dataset` transformation function, which can be passed to
@{tf.data.Dataset.apply}.
`tf.data.Dataset.apply`.
"""
def _apply_fn(dataset):
@ -169,7 +169,7 @@ def latency_stats(tag):
Returns:
A `Dataset` transformation function, which can be passed to
@{tf.data.Dataset.apply}.
`tf.data.Dataset.apply`.
"""
def _apply_fn(dataset):
@ -192,7 +192,7 @@ def feature_stats(tag):
Returns:
A `Dataset` transformation function, which can be passed to
@{tf.data.Dataset.apply}.
`tf.data.Dataset.apply`.
"""
def _apply_fn(dataset):

View File

@ -100,6 +100,6 @@ def override_threadpool(dataset, thread_pool):
Returns:
A dataset containing the same values as `dataset`, but which uses
`thread_pool` to compute any of its parallel operations (such as
@{tf.data.Dataset.map}).
`tf.data.Dataset.map`).
"""
return _ThreadPoolDataset(dataset, thread_pool)

View File

@ -38,7 +38,7 @@ def unique():
Returns:
A `Dataset` transformation function, which can be passed to
@{tf.data.Dataset.apply}.
`tf.data.Dataset.apply`.
"""
def _apply_fn(dataset):

View File

@ -38,13 +38,13 @@ class TFRecordWriter(object):
argument_dtype=dtypes.string)
def write(self, dataset):
"""Returns a @{tf.Operation} to write a dataset to a file.
"""Returns a `tf.Operation` to write a dataset to a file.
Args:
dataset: a @{tf.data.Dataset} whose elements are to be written to a file
dataset: a `tf.data.Dataset` whose elements are to be written to a file
Returns:
A @{tf.Operation} that, when run, writes contents of `dataset` to a file.
A `tf.Operation` that, when run, writes contents of `dataset` to a file.
"""
if not isinstance(dataset, dataset_ops.Dataset):
raise TypeError("`dataset` must be a `tf.data.Dataset` object.")

View File

@ -157,7 +157,7 @@ class CrossTowerOps(object):
Args:
aggregation: Indicates how a variable will be aggregated. Accepted values
are @{tf.VariableAggregation.SUM}, @{tf.VariableAggregation.MEAN}.
are `tf.VariableAggregation.SUM`, `tf.VariableAggregation.MEAN`.
per_device_value: a PerDevice object.
destinations: the reduction destinations.
@ -181,7 +181,7 @@ class CrossTowerOps(object):
Args:
aggregation: Indicates how a variable will be aggregated. Accepted values
are @{tf.VariableAggregation.SUM}, @{tf.VariableAggregation.MEAN}.
are `tf.VariableAggregation.SUM`, `tf.VariableAggregation.MEAN`.
value_destination_pairs: a list or a tuple of tuples of PerDevice objects
and destinations. If a destination is None, then the destinations
are set to match the devices of the input PerDevice object.
@ -305,7 +305,7 @@ def _ungroup_and_make_mirrored(grouped_reduced,
cross_tower_utils.aggregate_gradients_using*.
destinations: a list of device strings for returned Mirrored objects.
aggregation: Indicates how a variable will be aggregated. Accepted values
are @{tf.VariableAggregation.SUM}, @{tf.VariableAggregation.MEAN}.
are `tf.VariableAggregation.SUM`, `tf.VariableAggregation.MEAN`.
num_between_graph_workers: number of workers in the between-graph
replication.

View File

@ -77,16 +77,16 @@ class ParameterServerStrategy(distribute_lib.DistributionStrategy):
GPUs) even if there is only CPU or one GPU. When defining the `fn`, extra
caution needs to be taken:
1) Always use @{tf.get_variable} instead of @{tf.Variable} which is not able
1) Always use `tf.get_variable` instead of `tf.Variable` which is not able
to refer to the same variable on different towers.
2) It is generally not recommended to open a device scope under the strategy's
scope. A device scope (i.e. calling @{tf.device}) will be merged with or
scope. A device scope (i.e. calling `tf.device`) will be merged with or
override the device for operations but will not change the device for
variables.
3) It is also not recommended to open a colocation scope (i.e. calling
@{tf.colocate_with}) under the strategy's scope. For colocating variables,
`tf.colocate_with`) under the strategy's scope. For colocating variables,
use `distribution.colocate_vars_with` instead. Colocation of ops will possibly
create conflicts of device assignement.
"""

View File

@ -35,7 +35,7 @@ from tensorflow.python.util import nest
# pylint: disable=protected-access
class _PrefetchToDeviceIterator(object):
"""A replacement for @{tf.data.Iterator} that prefetches to another device.
"""A replacement for `tf.data.Iterator` that prefetches to another device.
Args:
input_dataset: The input dataset.
@ -108,7 +108,7 @@ class _PrefetchToDeviceIterator(object):
self._input_dataset)
def get_next(self, name=None):
"""See @{tf.data.Iterator.get_next}."""
"""See `tf.data.Iterator.get_next`."""
self._get_next_call_count += 1
if self._get_next_call_count > iterator_ops.GET_NEXT_CALL_WARNING_THRESHOLD:
warnings.warn(iterator_ops.GET_NEXT_CALL_WARNING_MESSAGE)
@ -209,7 +209,7 @@ class _PrefetchToDeviceDataset(dataset_ops.Dataset):
def prefetch_to_devices(devices, buffer_size=None):
"""A transformation that prefetches dataset values to the given `devices`.
NOTE: Although the transformation creates a @{tf.data.Dataset}, the
NOTE: Although the transformation creates a `tf.data.Dataset`, the
transformation must be the final `Dataset` in the input pipeline.
Args:
@ -220,7 +220,7 @@ def prefetch_to_devices(devices, buffer_size=None):
Returns:
A `Dataset` transformation function, which can be passed to
@{tf.data.Dataset.apply}.
`tf.data.Dataset.apply`.
"""
def _apply_fn(dataset):
return _PrefetchToDeviceDataset(dataset, devices, buffer_size)

View File

@ -28,7 +28,7 @@ class Iterator(iterator_ops.EagerIterator):
"""An iterator producing tf.Tensor objects from a tf.data.Dataset.
NOTE: Unlike the iterator created by the
@{tf.data.Dataset.make_one_shot_iterator} method, this class enables
`tf.data.Dataset.make_one_shot_iterator` method, this class enables
additional experimental functionality, such as prefetching to the GPU.
"""

View File

@ -161,7 +161,7 @@ class Saver(object):
Args:
file_prefix: Path prefix where parameters were previously saved.
Typically obtained from a previous `save()` call, or from
@{tf.train.latest_checkpoint}.
`tf.train.latest_checkpoint`.
"""
with ops.device("/device:CPU:0"):
self._saver.restore(None, file_prefix)

View File

@ -147,7 +147,7 @@ class DNNLinearCombinedEstimator(estimator.Estimator):
if a categorical column is multivalent. One of "mean", "sqrtn", and
"sum" -- these are effectively different ways to do example-level
normalization, which can be useful for bag-of-words features. For more
details, see @{tf.feature_column.linear_model$linear_model}.
details, see `tf.feature_column.linear_model`.
Raises:
ValueError: If both linear_feature_columns and dnn_features_columns are

View File

@ -34,7 +34,7 @@ _VALID_METRIC_FN_ARGS = set(['features', 'labels', 'predictions', 'config'])
def add_metrics(estimator, metric_fn):
"""Creates a new @{tf.estimator.Estimator} which has given metrics.
"""Creates a new `tf.estimator.Estimator` which has given metrics.
Example:
@ -61,7 +61,7 @@ def add_metrics(estimator, metric_fn):
```
Args:
estimator: A @{tf.estimator.Estimator} object.
estimator: A `tf.estimator.Estimator` object.
metric_fn: A function which should obey the following signature:
- Args: can only have following four arguments in any order:
* predictions: Predictions `Tensor` or dict of `Tensor` created by given
@ -79,7 +79,7 @@ def add_metrics(estimator, metric_fn):
function, namely a `(metric_tensor, update_op)` tuple.
Returns:
A new @{tf.estimator.Estimator} which has a union of original metrics with
A new `tf.estimator.Estimator` which has a union of original metrics with
given ones.
"""
_verify_metric_fn_args(metric_fn)
@ -165,14 +165,14 @@ def forward_features(estimator, keys=None):
```
Args:
estimator: A @{tf.estimator.Estimator} object.
estimator: A `tf.estimator.Estimator` object.
keys: a `string` or a `list` of `string`. If it is `None`, all of the
`features` in `dict` is forwarded to the `predictions`. If it is a
`string`, only given key is forwarded. If it is a `list` of strings, all
the given `keys` are forwarded.
Returns:
A new @{tf.estimator.Estimator} which forwards features to predictions.
A new `tf.estimator.Estimator` which forwards features to predictions.
Raises:
ValueError:

View File

@ -121,7 +121,7 @@ class LinearEstimator(estimator.Estimator):
is multivalent. One of "mean", "sqrtn", and "sum" -- these are
effectively different ways to do example-level normalization, which can
be useful for bag-of-words features. for more details, see
@{tf.feature_column.linear_model$linear_model}.
`tf.feature_column.linear_model`.
"""
def _model_fn(features, labels, mode, config):
return linear_lib._linear_model_fn( # pylint: disable=protected-access

View File

@ -158,12 +158,12 @@ class _ModelFn(object):
return either `features` or, equivalently, `(features, None)`.
Args:
features: The input points. See @{tf.estimator.Estimator}.
mode: See @{tf.estimator.Estimator}.
config: See @{tf.estimator.Estimator}.
features: The input points. See `tf.estimator.Estimator`.
mode: See `tf.estimator.Estimator`.
config: See `tf.estimator.Estimator`.
Returns:
A @{tf.estimator.EstimatorSpec} (see @{tf.estimator.Estimator}) specifying
A `tf.estimator.EstimatorSpec` (see `tf.estimator.Estimator`) specifying
this behavior:
* `train_op`: Execute one mini-batch or full-batch run of Lloyd's
algorithm.
@ -394,7 +394,7 @@ class KMeansClustering(estimator.Estimator):
relative_tolerance: A relative tolerance of change in the loss between
iterations. Stops learning if the loss changes less than this amount.
This may not work correctly if `use_mini_batch=True`.
config: See @{tf.estimator.Estimator}.
config: See `tf.estimator.Estimator`.
feature_columns: An optionable iterable containing all the feature columns
used by the model. All items in the set should be feature column
instances that can be passed to `tf.feature_column.input_layer`. If this
@ -431,7 +431,7 @@ class KMeansClustering(estimator.Estimator):
"""Finds the index of the closest cluster center to each input point.
Args:
input_fn: Input points. See @{tf.estimator.Estimator.predict}.
input_fn: Input points. See `tf.estimator.Estimator.predict`.
Yields:
The index of the closest cluster center for each input point.
@ -447,7 +447,7 @@ class KMeansClustering(estimator.Estimator):
which returns the negative sum.
Args:
input_fn: Input points. See @{tf.estimator.Estimator.evaluate}. Only one
input_fn: Input points. See `tf.estimator.Estimator.evaluate`. Only one
batch is retrieved.
Returns:
@ -465,7 +465,7 @@ class KMeansClustering(estimator.Estimator):
sklearn function returns the Euclidean distance.
Args:
input_fn: Input points. See @{tf.estimator.Estimator.predict}.
input_fn: Input points. See `tf.estimator.Estimator.predict`.
Yields:
The distances from each input point to each cluster center.

View File

@ -241,13 +241,13 @@ def variable(name,
use_resource: If `True` use a ResourceVariable instead of a Variable.
synchronization: Indicates when a distributed a variable will be
aggregated. Accepted values are constants defined in the class
@{tf.VariableSynchronization}. By default the synchronization is set to
`tf.VariableSynchronization`. By default the synchronization is set to
`AUTO` and the current `DistributionStrategy` chooses
when to synchronize. If `synchronization` is set to `ON_READ`,
`trainable` must not be set to `True`.
aggregation: Indicates how a distributed variable will be aggregated.
Accepted values are constants defined in the class
@{tf.VariableAggregation}.
`tf.VariableAggregation`.
Returns:
The created or existing variable.
@ -320,13 +320,13 @@ def model_variable(name,
use_resource: If `True` use a ResourceVariable instead of a Variable.
synchronization: Indicates when a distributed a variable will be
aggregated. Accepted values are constants defined in the class
@{tf.VariableSynchronization}. By default the synchronization is set to
`tf.VariableSynchronization`. By default the synchronization is set to
`AUTO` and the current `DistributionStrategy` chooses
when to synchronize. If `synchronization` is set to `ON_READ`,
`trainable` must not be set to `True`.
aggregation: Indicates how a distributed variable will be aggregated.
Accepted values are constants defined in the class
@{tf.VariableAggregation}.
`tf.VariableAggregation`.
Returns:
The created or existing variable.

View File

@ -112,10 +112,10 @@ def sparse_image_warp(image,
Apply a non-linear warp to the image, where the warp is specified by
the source and destination locations of a (potentially small) number of
control points. First, we use a polyharmonic spline
(@{tf.contrib.image.interpolate_spline}) to interpolate the displacements
(`tf.contrib.image.interpolate_spline`) to interpolate the displacements
between the corresponding control points to a dense flow field.
Then, we warp the image using this dense flow field
(@{tf.contrib.image.dense_image_warp}).
(`tf.contrib.image.dense_image_warp`).
Let t index our control points. For regularization_weight=0, we have:
warped_image[b, dest_control_point_locations[b, t, 0],
@ -126,7 +126,7 @@ def sparse_image_warp(image,
For regularization_weight > 0, this condition is met approximately, since
regularized interpolation trades off smoothness of the interpolant vs.
reconstruction of the interpolant at the control points.
See @{tf.contrib.image.interpolate_spline} for further documentation of the
See `tf.contrib.image.interpolate_spline` for further documentation of the
interpolation_order and regularization_weight arguments.

View File

@ -15,7 +15,7 @@
# ==============================================================================
"""Implementation of the Keras API meant to be a high-level API for TensorFlow.
This module an alias for @{tf.keras}, for backwards compatibility.
This module an alias for `tf.keras`, for backwards compatibility.
Detailed documentation and user guides are also available at
[keras.io](https://keras.io).

View File

@ -21,13 +21,15 @@ Currently, there is a [RandomFourierFeatureMapper](https://www.tensorflow.org/co
output. More mappers are on the way.
## Kernel-based Estimators
These are estimators inheriting from the @{tf.contrib.learn.Estimator} class and
use kernel mappers internally to discover non-linearities in the data. These
canned estimators map their input features using kernel mapper Ops and then
apply linear models to the mapped features. Combining kernel mappers with linear
models and different loss functions leads to a variety of models: linear and
non-linear SVMs, linear regression (with and without kernels) and (multinomial)
logistic regression (with and without kernels).
These estimators inherit from the
[`tf.contrib.learn.Estimator`](https://www.tensorflow.org/code/tensorflow/contrib/learn/python/learn/estimators/estimator.py)
class and use kernel mappers internally to discover non-linearities in the
data. These canned estimators map their input features using kernel mapper
Ops and then apply linear models to the mapped features. Combining kernel
mappers with linear models and different loss functions leads to a variety of
models: linear and non-linear SVMs, linear regression (with and without
kernels) and (multinomial) logistic regression (with and without kernels).
Currently there is a [KernelLinearClassifier](https://www.tensorflow.org/code/tensorflow/contrib/kernel_methods/python/kernel_estimators.py) implemented but more pre-packaged estimators
are on the way.

View File

@ -47,7 +47,7 @@ def xavier_initializer(uniform=True, seed=None, dtype=dtypes.float32):
Args:
uniform: Whether to use uniform or normal distributed random initialization.
seed: A Python integer. Used to create random seeds. See
@{tf.set_random_seed} for behavior.
`tf.set_random_seed` for behavior.
dtype: The data type. Only floating point types are supported.
Returns:
@ -98,7 +98,7 @@ def variance_scaling_initializer(factor=2.0, mode='FAN_IN', uniform=False,
mode: String. 'FAN_IN', 'FAN_OUT', 'FAN_AVG'.
uniform: Whether to use uniform or normal distributed random initialization.
seed: A Python integer. Used to create random seeds. See
@{tf.set_random_seed} for behavior.
`tf.set_random_seed` for behavior.
dtype: The data type. Only floating point types are supported.
Returns:

View File

@ -1584,7 +1584,7 @@ def dropout(inputs,
outputs_collections: Collection to add the outputs.
scope: Optional scope for name_scope.
seed: A Python integer. Used to create random seeds. See
@{tf.set_random_seed} for behavior.
`tf.set_random_seed` for behavior.
Returns:
A tensor representing the output of the operation.

View File

@ -15,9 +15,9 @@
"""Implementation of k-means clustering on top of `Estimator` API (deprecated).
This module is deprecated. Please use
@{tf.contrib.factorization.KMeansClustering} instead of
@{tf.contrib.learn.KMeansClustering}. It has a similar interface, but uses the
@{tf.estimator.Estimator} API instead of @{tf.contrib.learn.Estimator}.
`tf.contrib.factorization.KMeansClustering` instead of
`tf.contrib.learn.KMeansClustering`. It has a similar interface, but uses the
`tf.estimator.Estimator` API instead of `tf.contrib.learn.Estimator`.
"""
from __future__ import absolute_import

View File

@ -221,7 +221,7 @@ class ClusterConfig(object):
class RunConfig(ClusterConfig, core_run_config.RunConfig):
"""This class specifies the configurations for an `Estimator` run.
This class is a deprecated implementation of @{tf.estimator.RunConfig}
This class is a deprecated implementation of `tf.estimator.RunConfig`
interface.
"""
_USE_DEFAULT = 0

View File

@ -162,16 +162,16 @@ class Experiment(object):
Args:
estimator: Object implementing Estimator interface, which could be a
combination of @{tf.contrib.learn.Trainable} and
@{tf.contrib.learn.Evaluable} (deprecated), or
@{tf.estimator.Estimator}.
combination of `tf.contrib.learn.Trainable` and
`tf.contrib.learn.Evaluable` (deprecated), or
`tf.estimator.Estimator`.
train_input_fn: function, returns features and labels for training.
eval_input_fn: function, returns features and labels for evaluation. If
`eval_steps` is `None`, this should be configured only to produce for a
finite number of batches (generally, 1 epoch over the evaluation data).
eval_metrics: `dict` of string, metric function. If `None`, default set
is used. This should be `None` if the `estimator` is
@{tf.estimator.Estimator}. If metrics are provided they will be
`tf.estimator.Estimator`. If metrics are provided they will be
*appended* to the default set.
train_steps: Perform this many steps of training. `None`, the default,
means train forever.

View File

@ -415,7 +415,7 @@ def make_export_strategy(serving_input_fn,
`InputFnOps`.
default_output_alternative_key: the name of the head to serve when an
incoming serving request does not explicitly request a specific head.
Must be `None` if the estimator inherits from @{tf.estimator.Estimator}
Must be `None` if the estimator inherits from `tf.estimator.Estimator`
or for single-headed models.
assets_extra: A dict specifying how to populate the assets.extra directory
within the exported SavedModel. Each key should give the destination
@ -453,7 +453,7 @@ def make_export_strategy(serving_input_fn,
The string path to the exported directory.
Raises:
ValueError: If `estimator` is a @{tf.estimator.Estimator} instance
ValueError: If `estimator` is a `tf.estimator.Estimator` instance
and `default_output_alternative_key` was specified.
"""
if isinstance(estimator, core_estimator.Estimator):
@ -504,7 +504,7 @@ def make_parsing_export_strategy(feature_columns,
that must be provided at serving time (excluding labels!).
default_output_alternative_key: the name of the head to serve when an
incoming serving request does not explicitly request a specific head.
Must be `None` if the estimator inherits from @{tf.estimator.Estimator}
Must be `None` if the estimator inherits from `tf.estimator.Estimator`
or for single-headed models.
assets_extra: A dict specifying how to populate the assets.extra directory
within the exported SavedModel. Each key should give the destination
@ -767,7 +767,7 @@ def extend_export_strategy(base_export_strategy,
The string path to the SavedModel indicated by post_export_fn.
Raises:
ValueError: If `estimator` is a @{tf.estimator.Estimator} instance
ValueError: If `estimator` is a `tf.estimator.Estimator` instance
and `default_output_alternative_key` was specified or if post_export_fn
does not return a valid directory.
RuntimeError: If unable to create temporary or final export directory.

View File

@ -54,7 +54,7 @@ def toco_convert_protos(model_flags_str, toco_flags_str, input_data_str):
"""Convert `input_data_str` according to model and toco parameters.
Unless you know what you are doing consider using
the more friendly @{tf.contrib.lite.toco_convert}}.
the more friendly `tf.contrib.lite.toco_convert`.
Args:
model_flags_str: Serialized proto describing model properties, see

View File

@ -41,12 +41,12 @@ class LossScaleManager(object):
applied on variables.
This class is used together with
@{tf.contrib.mixed_precision.LossScaleOptimizer} for mixed precision training
`tf.contrib.mixed_precision.LossScaleOptimizer` for mixed precision training
(float32 variables and float16 ops) on Nvidia GPUs in order to achieve the
same model quality as single precision training, with the benefits of
potential higher throughput.
See @{tf.contrib.mixed_precision.LossScaleOptimizer} for more details.
See `tf.contrib.mixed_precision.LossScaleOptimizer` for more details.
"""
@abc.abstractmethod

View File

@ -103,7 +103,7 @@ class LossScaleOptimizer(optimizer.Optimizer):
Args:
opt: The actual optimizer that will be used to compute and apply the
gradients. Must be an implementation of the @{tf.train.Optimizer}
gradients. Must be an implementation of the `tf.train.Optimizer`
interface.
loss_scale_manager: A LossScaleManager object.
"""
@ -117,7 +117,7 @@ class LossScaleOptimizer(optimizer.Optimizer):
aggregation_method=None,
colocate_gradients_with_ops=False,
grad_loss=None):
"""Compute gradients. See base class @{tf.train.Optimizer}."""
"""Compute gradients. See base class `tf.train.Optimizer`."""
loss_scale = self._loss_scale_manager.get_loss_scale()
if context.executing_eagerly():
@ -141,7 +141,7 @@ class LossScaleOptimizer(optimizer.Optimizer):
return self._down_scale(grads_and_vars, loss_scale)
def apply_gradients(self, grads_and_vars, global_step=None, name=None):
"""Apply gradients. See base class @{tf.train.Optimizer}."""
"""Apply gradients. See base class `tf.train.Optimizer`."""
grads = [g for (g, _) in grads_and_vars]
is_finite_grad = []

View File

@ -48,7 +48,7 @@ class MaskedBasicLSTMCell(tf_rnn.BasicLSTMCell):
It does not allow cell clipping, a projection layer, and does not
use peep-hole connections: it is the basic baseline.
For advanced models, please use the full @{tf.nn.rnn_cell.LSTMCell}
For advanced models, please use the full `tf.nn.rnn_cell.LSTMCell`
that follows.
"""

View File

@ -43,7 +43,7 @@ def alpha_dropout(x, keep_prob, noise_shape=None, seed=None, name=None): # pylin
noise_shape: A 1-D `Tensor` of type `int32`, representing the
shape for randomly generated keep/drop flags.
seed: A Python integer. Used to create random seeds. See
@{tf.set_random_seed} for behavior.
`tf.set_random_seed` for behavior.
name: A name for this operation (optional).
Returns:

View File

@ -123,15 +123,15 @@ def rank_sampled_softmax_loss(weights,
"""Computes softmax loss using rank-based adaptive resampling.
This has been shown to improve rank loss after training compared to
@{tf.nn.sampled_softmax_loss}. For a description of the algorithm and some
`tf.nn.sampled_softmax_loss`. For a description of the algorithm and some
experimental results, please see: [TAPAS: Two-pass Approximate Adaptive
Sampling for Softmax](https://arxiv.org/abs/1707.03073).
Sampling follows two phases:
* In the first phase, `num_sampled` classes are selected using
@{tf.nn.learned_unigram_candidate_sampler} or supplied `sampled_values`.
`tf.nn.learned_unigram_candidate_sampler` or supplied `sampled_values`.
The logits are calculated on those sampled classes. This phases is
similar to @{tf.nn.sampled_softmax_loss}.
similar to `tf.nn.sampled_softmax_loss`.
* In the second phase, the `num_resampled` classes with highest predicted
probability are kept. Probabilities are
`LogSumExp(logits / resampling_temperature)`, where the sum is over
@ -142,7 +142,7 @@ def rank_sampled_softmax_loss(weights,
picks more candidates close to the predicted classes. A common strategy is
to decrease the temperature as training proceeds.
See @{tf.nn.sampled_softmax_loss} for more documentation on sampling and
See `tf.nn.sampled_softmax_loss` for more documentation on sampling and
for typical default values for some of the parameters.
This operation is for training only. It is generally an underestimate of
@ -197,7 +197,7 @@ def rank_sampled_softmax_loss(weights,
where a sampled class equals one of the target classes.
partition_strategy: A string specifying the partitioning strategy, relevant
if `len(weights) > 1`. Currently `"div"` and `"mod"` are supported.
See @{tf.nn.embedding_lookup} for more details.
See `tf.nn.embedding_lookup` for more details.
name: A name for the operation (optional).
Returns:

View File

@ -3276,7 +3276,7 @@ class IndyLSTMCell(rnn_cell_impl.LayerRNNCell):
It does not allow cell clipping, a projection layer, and does not
use peep-hole connections: it is the basic baseline.
For advanced models, please use the full @{tf.nn.rnn_cell.LSTMCell}
For advanced models, please use the full `tf.nn.rnn_cell.LSTMCell`
that follows.
TODO(gonnet): Write a paper describing this and add a reference here.

View File

@ -382,8 +382,8 @@ class LuongAttention(_BaseAttentionMechanism):
for values past the respective sequence lengths.
scale: Python boolean. Whether to scale the energy term.
probability_fn: (optional) A `callable`. Converts the score to
probabilities. The default is @{tf.nn.softmax}. Other options include
@{tf.contrib.seq2seq.hardmax} and @{tf.contrib.sparsemax.sparsemax}.
probabilities. The default is `tf.nn.softmax`. Other options include
`tf.contrib.seq2seq.hardmax` and `tf.contrib.sparsemax.sparsemax`.
Its signature should be: `probabilities = probability_fn(score)`.
score_mask_value: (optional) The mask value for score before passing into
`probability_fn`. The default is -inf. Only used if
@ -529,8 +529,8 @@ class BahdanauAttention(_BaseAttentionMechanism):
for values past the respective sequence lengths.
normalize: Python boolean. Whether to normalize the energy term.
probability_fn: (optional) A `callable`. Converts the score to
probabilities. The default is @{tf.nn.softmax}. Other options include
@{tf.contrib.seq2seq.hardmax} and @{tf.contrib.sparsemax.sparsemax}.
probabilities. The default is `tf.nn.softmax`. Other options include
`tf.contrib.seq2seq.hardmax` and `tf.contrib.sparsemax.sparsemax`.
Its signature should be: `probabilities = probability_fn(score)`.
score_mask_value: (optional): The mask value for score before passing into
`probability_fn`. The default is -inf. Only used if
@ -1091,7 +1091,7 @@ class AttentionWrapper(rnn_cell_impl.RNNCell):
`AttentionWrapper`, then you must ensure that:
- The encoder output has been tiled to `beam_width` via
@{tf.contrib.seq2seq.tile_batch} (NOT `tf.tile`).
`tf.contrib.seq2seq.tile_batch` (NOT `tf.tile`).
- The `batch_size` argument passed to the `zero_state` method of this
wrapper is equal to `true_batch_size * beam_width`.
- The initial state created with `zero_state` above contains a

View File

@ -234,7 +234,7 @@ class BeamSearchDecoder(decoder.Decoder):
`AttentionWrapper`, then you must ensure that:
- The encoder output has been tiled to `beam_width` via
@{tf.contrib.seq2seq.tile_batch} (NOT `tf.tile`).
`tf.contrib.seq2seq.tile_batch` (NOT `tf.tile`).
- The `batch_size` argument passed to the `zero_state` method of this
wrapper is equal to `true_batch_size * beam_width`.
- The initial state created with `zero_state` above contains a

View File

@ -27,15 +27,15 @@ def grappler_optimize(graph, fetches=None, rewriter_config=None):
"""Tries to optimize the provided graph using grappler.
Args:
graph: A @{tf.Graph} instance containing the graph to optimize.
graph: A `tf.Graph` instance containing the graph to optimize.
fetches: An optional list of `Tensor`s to fetch (i.e. not optimize away).
Grappler uses the 'train_op' collection to look for fetches, so if not
provided this collection should be non-empty.
rewriter_config: An optional @{tf.RewriterConfig} to use when rewriting the
rewriter_config: An optional `tf.RewriterConfig` to use when rewriting the
graph.
Returns:
A @{tf.GraphDef} containing the rewritten graph.
A `tf.GraphDef` containing the rewritten graph.
"""
if rewriter_config is None:
rewriter_config = rewriter_config_pb2.RewriterConfig()

View File

@ -108,7 +108,7 @@ def linear_to_mel_weight_matrix(num_mel_bins=20,
# `M` has shape [frames, num_mel_bins]
M = tf.matmul(S, A)
The matrix can be used with @{tf.tensordot} to convert an arbitrary rank
The matrix can be used with `tf.tensordot` to convert an arbitrary rank
`Tensor` of linear-scale spectral bins into the mel scale.
# S has shape [..., num_spectrogram_bins].

View File

@ -17,7 +17,7 @@
The operations in this package are safe to use with eager execution turned on or
off. It has a more flexible API that allows summaries to be written directly
from ops to places other than event log files, rather than propagating protos
from @{tf.summary.merge_all} to @{tf.summary.FileWriter}.
from `tf.summary.merge_all` to `tf.summary.FileWriter`.
To use with eager execution enabled, write your code as follows:

View File

@ -224,7 +224,7 @@ class TPUEstimatorSpec(model_fn_lib._TPUEstimatorSpec): # pylint: disable=prote
`metric_fn` runs on CPU to generate metrics and `tensors` represents the
`Tensor`s transferred from TPU system to CPU host and passed to `metric_fn`.
To be precise, TPU evaluation expects a slightly different signature from the
@{tf.estimator.Estimator}. While `EstimatorSpec.eval_metric_ops` expects a
`tf.estimator.Estimator`. While `EstimatorSpec.eval_metric_ops` expects a
dict, `TPUEstimatorSpec.eval_metrics` is a tuple of `metric_fn` and `tensors`.
The `tensors` could be a list of `Tensor`s or dict of names to `Tensor`s. The
`tensors` usually specify the model logits, which are transferred back from
@ -247,7 +247,7 @@ class TPUEstimatorSpec(model_fn_lib._TPUEstimatorSpec): # pylint: disable=prote
sending tensors from TPU to CPU. To reduce the overhead, try reducing the
size of the tensors. The `tensors` are concatenated along their major (batch)
dimension, and so must be >= rank 1. The `host_call` is useful for writing
summaries with @{tf.contrib.summary.create_file_writer}.
summaries with `tf.contrib.summary.create_file_writer`.
"""
def __new__(cls,

View File

@ -156,7 +156,7 @@ def prepend_from_queue_and_padded_batch_dataset(batch_size,
Returns:
A `Dataset` transformation function, which can be passed to
@{tf.data.Dataset.apply}.
`tf.data.Dataset.apply`.
"""
def _apply_fn(dataset):

View File

@ -27,7 +27,7 @@ slice of `params`:
output[\\(i_0, ..., i_{K-2}\\)] = params[indices[\\(i_0, ..., i_{K-2}\\)]]
Whereas in @{tf.gather} `indices` defines slices into the first
Whereas in `tf.gather` `indices` defines slices into the first
dimension of `params`, in `tf.gather_nd`, `indices` defines slices into the
first `N` dimensions of `params`, where `N = indices.shape[-1]`.

View File

@ -63,7 +63,7 @@ The resulting update to ref would look like this:
[1, 12, 3, 14, 14, 6, 7, 20]
See @{tf.scatter_nd} for more details about how to make updates to
See `tf.scatter_nd` for more details about how to make updates to
slices.
END
}

View File

@ -63,7 +63,7 @@ The resulting update to ref would look like this:
[1, 11, 3, 10, 9, 6, 7, 12]
See @{tf.scatter_nd} for more details about how to make updates to
See `tf.scatter_nd` for more details about how to make updates to
slices.
END
}

View File

@ -30,7 +30,7 @@ END
Creates a new tensor by applying sparse `updates` to individual values or
slices within a tensor (initially zero for numeric, empty for string) of
the given `shape` according to indices. This operator is the inverse of the
@{tf.gather_nd} operator which extracts values or slices from a given tensor.
`tf.gather_nd` operator which extracts values or slices from a given tensor.
If `indices` contains duplicates, then their updates are accumulated (summed).

View File

@ -66,7 +66,7 @@ The resulting update to ref would look like this:
[1, 13, 3, 14, 14, 6, 7, 20]
See @{tf.scatter_nd} for more details about how to make updates to
See `tf.scatter_nd` for more details about how to make updates to
slices.
END
}

View File

@ -61,6 +61,6 @@ The resulting value `output` would look like this:
[1, 13, 3, 14, 14, 6, 7, 20]
See @{tf.scatter_nd} for more details about how to make updates to slices.
See `tf.scatter_nd` for more details about how to make updates to slices.
END
}

View File

@ -66,7 +66,7 @@ The resulting update to ref would look like this:
[1, -9, 3, -6, -4, 6, 7, -4]
See @{tf.scatter_nd} for more details about how to make updates to
See `tf.scatter_nd` for more details about how to make updates to
slices.
END
}

View File

@ -68,7 +68,7 @@ The resulting update to ref would look like this:
[1, 11, 3, 10, 9, 6, 7, 12]
See @{tf.scatter_nd} for more details about how to make updates to
See `tf.scatter_nd` for more details about how to make updates to
slices.
END
}

View File

@ -22,12 +22,12 @@ When authoring docs, note that we have some new syntax for references --
at least for docs coming from Python docstrings or
tensorflow/docs_src/. Use:
* @{tf.symbol} to make a link to the reference page for a Python
* `tf.symbol` to make a link to the reference page for a Python
symbol. Note that class members don't get their own page, but the
syntax still works, since @{tf.MyClass.method} links to the right
syntax still works, since `tf.MyClass.method` links to the right
part of the tf.MyClass page.
* @{tensorflow::symbol} to make a link to the reference page for a C++
* `tensorflow::symbol` to make a link to the reference page for a C++
symbol. (This only works for a few symbols but will work for more soon.)
* @{$doc_page} to make a link to another (not an API reference) doc

View File

@ -724,7 +724,7 @@ class BaseSession(SessionInterface):
"""Returns a context manager that makes this object the default session.
Use with the `with` keyword to specify that calls to
@{tf.Operation.run} or @{tf.Tensor.eval} should be executed in
`tf.Operation.run` or `tf.Tensor.eval` should be executed in
this session.
```python
@ -736,7 +736,7 @@ class BaseSession(SessionInterface):
print(c.eval())
```
To get the current default session, use @{tf.get_default_session}.
To get the current default session, use `tf.get_default_session`.
*N.B.* The `as_default` context manager *does not* close the
session when you exit the context, and you must close the session
@ -765,7 +765,7 @@ class BaseSession(SessionInterface):
*N.B.* Entering a `with sess.as_default():` block does not affect
the current default graph. If you are using multiple graphs, and
`sess.graph` is different from the value of @{tf.get_default_graph},
`sess.graph` is different from the value of `tf.get_default_graph`,
you must explicitly enter a `with sess.graph.as_default():` block
to make `sess.graph` the default graph.
@ -786,14 +786,14 @@ class BaseSession(SessionInterface):
nested list, tuple, namedtuple, dict, or OrderedDict containing graph
elements at its leaves. A graph element can be one of the following types:
* An @{tf.Operation}.
* An `tf.Operation`.
The corresponding fetched value will be `None`.
* A @{tf.Tensor}.
* A `tf.Tensor`.
The corresponding fetched value will be a numpy ndarray containing the
value of that tensor.
* A @{tf.SparseTensor}.
* A `tf.SparseTensor`.
The corresponding fetched value will be a
@{tf.SparseTensorValue}
`tf.SparseTensorValue`
containing the value of that sparse tensor.
* A `get_tensor_handle` op. The corresponding fetched value will be a
numpy ndarray containing the handle of that tensor.
@ -829,16 +829,16 @@ class BaseSession(SessionInterface):
the value of tensors in the graph. Each key in `feed_dict` can be
one of the following types:
* If the key is a @{tf.Tensor}, the
* If the key is a `tf.Tensor`, the
value may be a Python scalar, string, list, or numpy ndarray
that can be converted to the same `dtype` as that
tensor. Additionally, if the key is a
@{tf.placeholder}, the shape of
`tf.placeholder`, the shape of
the value will be checked for compatibility with the placeholder.
* If the key is a
@{tf.SparseTensor},
`tf.SparseTensor`,
the value should be a
@{tf.SparseTensorValue}.
`tf.SparseTensorValue`.
* If the key is a nested tuple of `Tensor`s or `SparseTensor`s, the value
should be a nested tuple with the same structure that maps to their
corresponding values as above.
@ -1120,7 +1120,7 @@ class BaseSession(SessionInterface):
For example, if element `i` of `feed_list` is a `tf.Tensor`, the `i`th
argument to the returned callable must be a numpy ndarray (or something
convertible to an ndarray) with matching element type and shape. See
@{tf.Session.run} for details of the allowable feed key and value types.
`tf.Session.run` for details of the allowable feed key and value types.
The returned callable will have the same return type as
`tf.Session.run(fetches, ...)`. For example, if `fetches` is a `tf.Tensor`,
@ -1128,14 +1128,14 @@ class BaseSession(SessionInterface):
it will return `None`.
Args:
fetches: A value or list of values to fetch. See @{tf.Session.run}
fetches: A value or list of values to fetch. See `tf.Session.run`
for details of the allowable fetch types.
feed_list: (Optional.) A list of `feed_dict` keys. See
@{tf.Session.run} for details of the allowable feed key types.
`tf.Session.run` for details of the allowable feed key types.
accept_options: (Optional.) Iff `True`, the returned `Callable` will be
able to accept @{tf.RunOptions} and @{tf.RunMetadata} as optional
able to accept `tf.RunOptions` and `tf.RunMetadata` as optional
keyword arguments `options` and `run_metadata`, respectively, with
the same syntax and semantics as @{tf.Session.run}, which is useful
the same syntax and semantics as `tf.Session.run`, which is useful
for certain use cases (profiling and debugging) but will result in
measurable slowdown of the `Callable`'s performance. Default: `False`.
@ -1145,7 +1145,7 @@ class BaseSession(SessionInterface):
Raises:
TypeError: If `fetches` or `feed_list` cannot be interpreted
as arguments to @{tf.Session.run}.
as arguments to `tf.Session.run`.
"""
if feed_list is not None:
if not isinstance(feed_list, (list, tuple)):
@ -1453,10 +1453,10 @@ class Session(BaseSession):
```
A session may own resources, such as
@{tf.Variable}, @{tf.QueueBase},
and @{tf.ReaderBase}. It is important to release
`tf.Variable`, `tf.QueueBase`,
and `tf.ReaderBase`. It is important to release
these resources when they are no longer required. To do this, either
invoke the @{tf.Session.close} method on the session, or use
invoke the `tf.Session.close` method on the session, or use
the session as a context manager. The following two examples are
equivalent:
@ -1592,8 +1592,8 @@ class InteractiveSession(BaseSession):
The only difference with a regular `Session` is that an `InteractiveSession`
installs itself as the default session on construction.
The methods @{tf.Tensor.eval}
and @{tf.Operation.run}
The methods `tf.Tensor.eval`
and `tf.Operation.run`
will use that session to run ops.
This is convenient in interactive shells and [IPython

View File

@ -222,7 +222,7 @@ class Dataset(object):
Note that if `tensors` contains a NumPy array, and eager execution is not
enabled, the values will be embedded in the graph as one or more
@{tf.constant} operations. For large datasets (> 1 GB), this can waste
`tf.constant` operations. For large datasets (> 1 GB), this can waste
memory and run into byte limits of graph serialization. If tensors contains
one or more large NumPy arrays, consider the alternative described in
@{$guide/datasets#consuming_numpy_arrays$this guide}.
@ -241,7 +241,7 @@ class Dataset(object):
Note that if `tensors` contains a NumPy array, and eager execution is not
enabled, the values will be embedded in the graph as one or more
@{tf.constant} operations. For large datasets (> 1 GB), this can waste
`tf.constant` operations. For large datasets (> 1 GB), this can waste
memory and run into byte limits of graph serialization. If tensors contains
one or more large NumPy arrays, consider the alternative described in
@{$guide/datasets#consuming_numpy_arrays$this guide}.
@ -331,7 +331,7 @@ class Dataset(object):
```
NOTE: The current implementation of `Dataset.from_generator()` uses
@{tf.py_func} and inherits the same constraints. In particular, it
`tf.py_func` and inherits the same constraints. In particular, it
requires the `Dataset`- and `Iterator`-related operations to be placed
on a device in the same process as the Python program that called
`Dataset.from_generator()`. The body of `generator` will not be
@ -641,7 +641,7 @@ class Dataset(object):
Defaults to `True`.
seed: (Optional.) A `tf.int64` scalar `tf.Tensor`, representing the
random seed that will be used to create the distribution. See
@{tf.set_random_seed} for behavior.
`tf.set_random_seed` for behavior.
Returns:
Dataset: A `Dataset` of strings corresponding to file names.
@ -706,7 +706,7 @@ class Dataset(object):
dataset will sample.
seed: (Optional.) A `tf.int64` scalar `tf.Tensor`, representing the
random seed that will be used to create the distribution. See
@{tf.set_random_seed} for behavior.
`tf.set_random_seed` for behavior.
reshuffle_each_iteration: (Optional.) A boolean, which if true indicates
that the dataset should be pseudorandomly reshuffled each time it is
iterated over. (Defaults to `True`.)
@ -863,7 +863,7 @@ class Dataset(object):
This transformation combines multiple consecutive elements of the input
dataset into a single element.
Like @{tf.data.Dataset.batch}, the tensors in the resulting element will
Like `tf.data.Dataset.batch`, the tensors in the resulting element will
have an additional outer dimension, which will be `batch_size` (or
`N % batch_size` for the last element if `batch_size` does not divide the
number of input elements `N` evenly and `drop_remainder` is `False`). If
@ -871,7 +871,7 @@ class Dataset(object):
should set the `drop_remainder` argument to `True` to prevent the smaller
batch from being produced.
Unlike @{tf.data.Dataset.batch}, the input elements to be batched may have
Unlike `tf.data.Dataset.batch`, the input elements to be batched may have
different shapes, and this transformation will pad each component to the
respective shape in `padding_shapes`. The `padding_shapes` argument
determines the resulting shape for each dimension of each component in an
@ -883,8 +883,8 @@ class Dataset(object):
will be padded out to the maximum length of all elements in that
dimension.
See also @{tf.contrib.data.dense_to_sparse_batch}, which combines elements
that may have different shapes into a @{tf.SparseTensor}.
See also `tf.contrib.data.dense_to_sparse_batch`, which combines elements
that may have different shapes into a `tf.SparseTensor`.
Args:
batch_size: A `tf.int64` scalar `tf.Tensor`, representing the number of
@ -1039,7 +1039,7 @@ class Dataset(object):
elements are produced. `cycle_length` controls the number of input elements
that are processed concurrently. If you set `cycle_length` to 1, this
transformation will handle one input element at a time, and will produce
identical results = to @{tf.data.Dataset.flat_map}. In general,
identical results = to `tf.data.Dataset.flat_map`. In general,
this transformation will apply `map_func` to `cycle_length` input elements,
open iterators on the returned `Dataset` objects, and cycle through them
producing `block_length` consecutive elements from each iterator, and
@ -1306,7 +1306,7 @@ class _NestedDatasetComponent(object):
class _VariantDataset(Dataset):
"""A Dataset wrapper around a @{tf.variant}-typed function argument."""
"""A Dataset wrapper around a `tf.variant`-typed function argument."""
def __init__(self, dataset_variant, structure):
super(_VariantDataset, self).__init__()
@ -1342,20 +1342,20 @@ class StructuredFunctionWrapper(object):
func: A function from a nested structure to another nested structure.
transformation_name: Human-readable name of the transformation in which
this function is being instantiated, for error messages.
dataset: (Optional.) A @{tf.data.Dataset}. If given, the structure of this
dataset: (Optional.) A `tf.data.Dataset`. If given, the structure of this
dataset will be assumed as the structure for `func` arguments; otherwise
`input_classes`, `input_shapes`, and `input_types` must be defined.
input_classes: (Optional.) A nested structure of `type`. If given, this
argument defines the Python types for `func` arguments.
input_shapes: (Optional.) A nested structure of @{tf.TensorShape}. If
input_shapes: (Optional.) A nested structure of `tf.TensorShape`. If
given, this argument defines the shapes and structure for `func`
arguments.
input_types: (Optional.) A nested structure of @{tf.DType}. If given, this
input_types: (Optional.) A nested structure of `tf.DType`. If given, this
argument defines the element types and structure for `func` arguments.
add_to_graph: (Optional.) If `True`, the function will be added to the
default graph.
experimental_nested_dataset_support: (Optional.) If `True`, the function
will support @{tf.data.Dataset} objects as arguments and return values.
will support `tf.data.Dataset` objects as arguments and return values.
Raises:
ValueError: If an invalid combination of `dataset`, `input_classes`,
@ -1478,7 +1478,7 @@ class StructuredFunctionWrapper(object):
self._function._create_definition_if_needed() # pylint: disable=protected-access
def _defun_args(self):
"""Returns a flat list of @{tf.DType} for the input element structure."""
"""Returns a flat list of `tf.DType` for the input element structure."""
ret = []
for input_type, input_class in zip(nest.flatten(self._input_types),
nest.flatten(self._input_classes)):
@ -1523,7 +1523,7 @@ def flat_structure(dataset):
`**flat_structure(self)` to the op constructor.
Args:
dataset: A @{tf.data.Dataset}.
dataset: A `tf.data.Dataset`.
Returns:
A dictionary of keyword arguments that can be passed to many Dataset op
@ -1846,7 +1846,7 @@ class ShuffleDataset(Dataset):
dataset will sample.
seed: (Optional.) A `tf.int64` scalar `tf.Tensor`, representing the
random seed that will be used to create the distribution. See
@{tf.set_random_seed} for behavior.
`tf.set_random_seed` for behavior.
reshuffle_each_iteration: (Optional.) A boolean, which if true indicates
that the dataset should be pseudorandomly reshuffled each time it is
iterated over. (Defaults to `True`.)

View File

@ -220,9 +220,9 @@ class Iterator(checkpointable.CheckpointableBase):
"""Creates a new, uninitialized `Iterator` based on the given handle.
This method allows you to define a "feedable" iterator where you can choose
between concrete iterators by feeding a value in a @{tf.Session.run} call.
In that case, `string_handle` would be a @{tf.placeholder}, and you would
feed it with the value of @{tf.data.Iterator.string_handle} in each step.
between concrete iterators by feeding a value in a `tf.Session.run` call.
In that case, `string_handle` would be a `tf.placeholder`, and you would
feed it with the value of `tf.data.Iterator.string_handle` in each step.
For example, if you had two iterators that marked the current position in
a training dataset and a test dataset, you could choose which to use in
@ -362,9 +362,9 @@ class Iterator(checkpointable.CheckpointableBase):
In graph mode, you should typically call this method *once* and use its
result as the input to another computation. A typical loop will then call
@{tf.Session.run} on the result of that computation. The loop will terminate
`tf.Session.run` on the result of that computation. The loop will terminate
when the `Iterator.get_next()` operation raises
@{tf.errors.OutOfRangeError}. The following skeleton shows how to use
`tf.errors.OutOfRangeError`. The following skeleton shows how to use
this method when building a training loop:
```python

View File

@ -33,8 +33,8 @@ class Optional(object):
An `Optional` can represent the result of an operation that may fail as a
value, rather than raising an exception and halting execution. For example,
@{tf.contrib.data.get_next_as_optional} returns an `Optional` that either
contains the next value from a @{tf.data.Iterator} if one exists, or a "none"
`tf.contrib.data.get_next_as_optional` returns an `Optional` that either
contains the next value from a `tf.data.Iterator` if one exists, or a "none"
value that indicates the end of the sequence has been reached.
"""
@ -55,7 +55,7 @@ class Optional(object):
"""Returns a nested structure of values wrapped by this optional.
If this optional does not have a value (i.e. `self.has_value()` evaluates
to `False`), this operation will raise @{tf.errors.InvalidArgumentError}
to `False`), this operation will raise `tf.errors.InvalidArgumentError`
at runtime.
Args:

View File

@ -36,11 +36,11 @@ def optional_param_to_tensor(argument_name,
def partial_shape_to_tensor(shape_like):
"""Returns a @{tf.Tensor} that represents the given shape.
"""Returns a `tf.Tensor` that represents the given shape.
Args:
shape_like: A value that can be converted to a @{tf.TensorShape} or a
@{tf.Tensor}.
shape_like: A value that can be converted to a `tf.TensorShape` or a
`tf.Tensor`.
Returns:
A 1-D `tf.Tensor` of `tf.int64` elements representing the given shape, where

View File

@ -29,14 +29,14 @@ from tensorflow.python.ops import math_ops
def get_seed(seed):
"""Returns the local seeds an operation should use given an op-specific seed.
See @{tf.get_seed} for more details. This wrapper adds support for the case
See `tf.get_seed` for more details. This wrapper adds support for the case
where `seed` may be a tensor.
Args:
seed: An integer or a @{tf.int64} scalar tensor.
seed: An integer or a `tf.int64` scalar tensor.
Returns:
A tuple of two @{tf.int64} scalar tensors that should be used for the local
A tuple of two `tf.int64` scalar tensors that should be used for the local
seed of the calling dataset.
"""
seed, seed2 = random_seed.get_seed(seed)

View File

@ -69,7 +69,7 @@ class GradientsDebugger(object):
"""Gradients Debugger.
Allows retrieval of gradient tensors created by TensorFlow's automatic
differentiation algorithm, i.e., @{tf.gradients} and optimizer classes that
differentiation algorithm, i.e., `tf.gradients` and optimizer classes that
use it.
"""
# TODO(cais): Add examples code in the doc string?
@ -142,8 +142,8 @@ class GradientsDebugger(object):
Args:
input_tensor: the input `tf.Tensor` object whose related gradient tensors
are to be reigstered with this `GradientsDebugger` instance when they
are created, e.g., during @{tf.gradients} calls or the construction
of optimization (training) op that uses @{tf.gradients}.
are created, e.g., during `tf.gradients` calls or the construction
of optimization (training) op that uses `tf.gradients`.
Returns:
A forwarded identity of `input_tensor`, as a `tf.Tensor`.

View File

@ -45,7 +45,7 @@ class DumpingDebugWrapperSession(framework.NonInteractiveDebugWrapperSession):
session_root: (`str`) Path to the session root directory. Must be a
directory that does not exist or an empty directory. If the directory
does not exist, it will be created by the debugger core during debug
@{tf.Session.run}
`tf.Session.run`
calls.
As the `run()` calls occur, subdirectories will be added to
`session_root`. The subdirectories' names has the following pattern:

View File

@ -646,7 +646,7 @@ class GradientTape(object):
Operations are recorded if they are executed within this context manager and
at least one of their inputs is being "watched".
Trainable variables (created by `tf.Variable` or @{tf.get_variable},
Trainable variables (created by `tf.Variable` or `tf.get_variable`,
trainable=True is default in both cases) are automatically watched. Tensors
can be manually watched by invoking the `watch` method on this context
manager.

View File

@ -663,7 +663,7 @@ def internal_operation_seed():
def executing_eagerly():
"""Returns True if the current thread has eager execution enabled.
Eager execution is typically enabled via @{tf.enable_eager_execution},
Eager execution is typically enabled via `tf.enable_eager_execution`,
but may also be enabled within the context of a Python function via
tf.contrib.eager.py_func.
"""

View File

@ -1221,7 +1221,7 @@ def defun(func=None, input_signature=None, compiled=False):
"""Compiles a Python function into a callable TensorFlow graph.
`defun` (short for "define function") trace-compiles a Python function
composed of TensorFlow operations into a callable that executes a @{tf.Graph}
composed of TensorFlow operations into a callable that executes a `tf.Graph`
containing those operations. The callable produced by `defun` contains only
the subgraph of TensorFlow operations that were executed when the Python
function was called with a particular input signature, defined as a list
@ -1244,9 +1244,9 @@ def defun(func=None, input_signature=None, compiled=False):
For a Python function to be compatible with `defun`, all of its arguments must
be hashable Python objects or lists thereof. The function itself may not
modify the list/map structure of its arguments. Additionally, it must return
zero or more @{tf.Tensor} objects. If the Python function returns
a @{tf.Variable}, its compiled version will return the value of that variable
as a @{tf.Tensor}.
zero or more `tf.Tensor` objects. If the Python function returns
a `tf.Variable`, its compiled version will return the value of that variable
as a `tf.Tensor`.
Executing a graph generated by `defun` respects device annotations (i.e.,
all `with tf.device` directives present in a Python function will also be
@ -1315,7 +1315,7 @@ def defun(func=None, input_signature=None, compiled=False):
When using `defun`, there are subtleties regarding inputs, Python control
flow, and variable creation that one should be aware of. For concreteness, let
`f` be a Python function that returns zero or more @{tf.Tensor} objects and
`f` be a Python function that returns zero or more `tf.Tensor` objects and
let `F = defun(f)`. `F` builds a graph for each unique input signature it
sees, Python control flow is baked into graphs, and operations related to
variable initialization are automatically lifted out of the graphs that `F`
@ -1398,10 +1398,10 @@ def defun(func=None, input_signature=None, compiled=False):
On the other hand, because `defun` generates graphs by tracing and not by
source code analysis, it fully unrolls Python `for` and `while` loops,
potentially creating large graphs. If your Python function has native loops
that run for many iterations, consider replacing them with @{tf.while_loop}
that run for many iterations, consider replacing them with `tf.while_loop`
operations.
When constructing graphs, @{tf.Tensor} objects cannot be used as Python
When constructing graphs, `tf.Tensor` objects cannot be used as Python
`bool` objects. This means, for example, that you should replace code in `f`
resembling
@ -1420,7 +1420,7 @@ def defun(func=None, input_signature=None, compiled=False):
automatically lifted out of the graphs generated by `defun`. In practice, this
implies that variable creation and initialization only happen the first time
`F` is called, and that variables are reused every time thereafter. Many
TensorFlow APIs, like @{tf.keras.layers.Layer} objects, create variables the
TensorFlow APIs, like `tf.keras.layers.Layer` objects, create variables the
first time they are called and reuse them thereafter. Automatic variable
lifting makes it possible to compile these APIs without extra effort, at the
cost of introducing a discrepancy between the semantics of executing Python
@ -1459,7 +1459,7 @@ def defun(func=None, input_signature=None, compiled=False):
to reference the same set of variables, add logic to your Python function that
ensures that variables are only created the first time it is called and are
reused for every subsequent invocation; note that this is precisely what
@{tf.keras.layers.Layer} objects do, so we recommend using them to represent
`tf.keras.layers.Layer` objects do, so we recommend using them to represent
variable-bearing computations whenever possible.
Args:

View File

@ -388,7 +388,7 @@ class DNNLinearCombinedClassifier(estimator.Estimator):
if a categorical column is multivalent. One of "mean", "sqrtn", and
"sum" -- these are effectively different ways to do example-level
normalization, which can be useful for bag-of-words features. For more
details, see @{tf.feature_column.linear_model$linear_model}.
details, see `tf.feature_column.linear_model`.
Raises:
ValueError: If both linear_feature_columns and dnn_features_columns are
@ -586,7 +586,7 @@ class DNNLinearCombinedRegressor(estimator.Estimator):
if a categorical column is multivalent. One of "mean", "sqrtn", and
"sum" -- these are effectively different ways to do example-level
normalization, which can be useful for bag-of-words features. For more
details, see @{tf.feature_column.linear_model$linear_model}.
details, see `tf.feature_column.linear_model`.
Raises:
ValueError: If both linear_feature_columns and dnn_features_columns are

View File

@ -306,7 +306,7 @@ class LinearClassifier(estimator.Estimator):
is multivalent. One of "mean", "sqrtn", and "sum" -- these are
effectively different ways to do example-level normalization, which can
be useful for bag-of-words features. for more details, see
@{tf.feature_column.linear_model$linear_model}.
`tf.feature_column.linear_model`.
Returns:
A `LinearClassifier` estimator.
@ -472,7 +472,7 @@ class LinearRegressor(estimator.Estimator):
is multivalent. One of "mean", "sqrtn", and "sum" -- these are
effectively different ways to do example-level normalization, which can
be useful for bag-of-words features. for more details, see
@{tf.feature_column.linear_model$linear_model}.
`tf.feature_column.linear_model`.
"""
head = head_lib._regression_head( # pylint: disable=protected-access
label_dimension=label_dimension, weight_column=weight_column,

View File

@ -128,7 +128,7 @@ class Estimator(object):
```
For more details on warm-start configuration, see
@{tf.estimator.WarmStartSettings$WarmStartSettings}.
`tf.estimator.WarmStartSettings`.
Args:
model_fn: Model function. Follows the signature:
@ -1027,7 +1027,7 @@ class Estimator(object):
"""Creates the global step tensor in graph.
The global step tensor must be an integer type with name 'global_step' and
be added to the collection @{tf.GraphKeys.GLOBAL_STEP}.
be added to the collection `tf.GraphKeys.GLOBAL_STEP`.
Args:
graph: The graph in which to create the global step tensor.

View File

@ -16,7 +16,7 @@
FeatureColumns provide a high level abstraction for ingesting and representing
features. FeatureColumns are also the primary way of encoding features for
canned @{tf.estimator.Estimator}s.
canned `tf.estimator.Estimator`s.
When using FeatureColumns with `Estimators`, the type of feature column you
should choose depends on (1) the feature type and (2) the model type.
@ -1936,7 +1936,7 @@ class _FeatureColumn(object):
It is used for get_parsing_spec for `tf.parse_example`. Returned spec is a
dict from keys ('string') to `VarLenFeature`, `FixedLenFeature`, and other
supported objects. Please check documentation of @{tf.parse_example} for all
supported objects. Please check documentation of `tf.parse_example` for all
supported spec objects.
Let's say a Feature column depends on raw feature ('raw') and another
@ -1995,7 +1995,7 @@ class _DenseColumn(_FeatureColumn):
weight_collections: List of graph collections to which Variables (if any
will be created) are added.
trainable: If `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see @{tf.Variable}).
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
Returns:
`Tensor` of shape [batch_size] + `_variable_shape`.
@ -2062,7 +2062,7 @@ class _CategoricalColumn(_FeatureColumn):
WARNING: Do not subclass this layer unless you know what you are doing:
the API is subject to future changes.
A categorical feature typically handled with a @{tf.SparseTensor} of IDs.
A categorical feature typically handled with a `tf.SparseTensor` of IDs.
"""
__metaclass__ = abc.ABCMeta
@ -2097,7 +2097,7 @@ class _CategoricalColumn(_FeatureColumn):
weight_collections: List of graph collections to which variables (if any
will be created) are added.
trainable: If `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see @{tf.get_variable}).
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.get_variable`).
"""
pass

View File

@ -16,7 +16,7 @@
FeatureColumns provide a high level abstraction for ingesting and representing
features. FeatureColumns are also the primary way of encoding features for
canned @{tf.estimator.Estimator}s.
canned `tf.estimator.Estimator`s.
When using FeatureColumns with `Estimators`, the type of feature column you
should choose depends on (1) the feature type and (2) the model type.
@ -1904,7 +1904,7 @@ class FeatureColumn(object):
It is used for get_parsing_spec for `tf.parse_example`. Returned spec is a
dict from keys ('string') to `VarLenFeature`, `FixedLenFeature`, and other
supported objects. Please check documentation of @{tf.parse_example} for all
supported objects. Please check documentation of `tf.parse_example` for all
supported spec objects.
Let's say a Feature column depends on raw feature ('raw') and another
@ -2025,7 +2025,7 @@ def _create_dense_column_weighted_sum(column,
class CategoricalColumn(FeatureColumn):
"""Represents a categorical feature.
A categorical feature typically handled with a @{tf.SparseTensor} of IDs.
A categorical feature typically handled with a `tf.SparseTensor` of IDs.
"""
__metaclass__ = abc.ABCMeta

View File

@ -63,9 +63,9 @@ class OpError(Exception):
*N.B.* If the failed op was synthesized at runtime, e.g. a `Send`
or `Recv` op, there will be no corresponding
@{tf.Operation}
`tf.Operation`
object. In that case, this will return `None`, and you should
instead use the @{tf.OpError.node_def} to
instead use the `tf.OpError.node_def` to
discover information about the op.
Returns:
@ -181,10 +181,10 @@ class CancelledError(OpError):
"""Raised when an operation or step is cancelled.
For example, a long-running operation (e.g.
@{tf.QueueBase.enqueue} may be
`tf.QueueBase.enqueue` may be
cancelled by running another operation (e.g.
@{tf.QueueBase.close},
or by @{tf.Session.close}.
`tf.QueueBase.close`,
or by `tf.Session.close`.
A step that is running such a long-running operation will fail by raising
`CancelledError`.
@ -221,9 +221,9 @@ class InvalidArgumentError(OpError):
This may occur, for example, if an operation is receives an input
tensor that has an invalid value or shape. For example, the
@{tf.matmul} op will raise this
`tf.matmul` op will raise this
error if it receives an input that is not a matrix, and the
@{tf.reshape} op will raise
`tf.reshape` op will raise
this error if the new shape does not match the number of elements in the input
tensor.
@ -256,7 +256,7 @@ class NotFoundError(OpError):
"""Raised when a requested entity (e.g., a file or directory) was not found.
For example, running the
@{tf.WholeFileReader.read}
`tf.WholeFileReader.read`
operation could raise `NotFoundError` if it receives the name of a file that
does not exist.
@ -273,7 +273,7 @@ class AlreadyExistsError(OpError):
"""Raised when an entity that we attempted to create already exists.
For example, running an operation that saves a file
(e.g. @{tf.train.Saver.save})
(e.g. `tf.train.Saver.save`)
could potentially raise this exception if an explicit filename for an
existing file was passed.
@ -291,7 +291,7 @@ class PermissionDeniedError(OpError):
"""Raised when the caller does not have permission to run an operation.
For example, running the
@{tf.WholeFileReader.read}
`tf.WholeFileReader.read`
operation could raise `PermissionDeniedError` if it receives the name of a
file for which the user does not have the read file permission.
@ -340,7 +340,7 @@ class FailedPreconditionError(OpError):
"""Operation was rejected because the system is not in a state to execute it.
This exception is most commonly raised when running an operation
that reads a @{tf.Variable}
that reads a `tf.Variable`
before it has been initialized.
@@__init__
@ -357,9 +357,9 @@ class AbortedError(OpError):
"""The operation was aborted, typically due to a concurrent action.
For example, running a
@{tf.QueueBase.enqueue}
`tf.QueueBase.enqueue`
operation may raise `AbortedError` if a
@{tf.QueueBase.close} operation
`tf.QueueBase.close` operation
previously ran.
@@__init__
@ -375,9 +375,9 @@ class OutOfRangeError(OpError):
"""Raised when an operation iterates past the valid input range.
This exception is raised in "end-of-file" conditions, such as when a
@{tf.QueueBase.dequeue}
`tf.QueueBase.dequeue`
operation is blocked on an empty queue, and a
@{tf.QueueBase.close}
`tf.QueueBase.close`
operation executes.
@@__init__
@ -395,7 +395,7 @@ class UnimplementedError(OpError):
Some operations may raise this error when passed otherwise-valid
arguments that it does not currently support. For example, running
the @{tf.nn.max_pool} operation
the `tf.nn.max_pool` operation
would raise this error if pooling was requested on the batch dimension,
because this is not yet supported.
@ -443,7 +443,7 @@ class DataLossError(OpError):
"""Raised when unrecoverable data loss or corruption is encountered.
For example, this may be raised by running a
@{tf.WholeFileReader.read}
`tf.WholeFileReader.read`
operation, if the file is truncated while it is being read.
@@__init__

View File

@ -665,7 +665,7 @@ class _FuncGraph(ops.Graph):
def container(self, container_name):
"""Returns a context manager that specifies the resource container to use.
Overridden from @{tf.Graph} to update both the init_scope container
Overridden from `tf.Graph` to update both the init_scope container
and the present inner container. This is necessary to make sure setting
containers applies correctly both to created variables and to stateful
ops.

View File

@ -344,9 +344,9 @@ def import_graph_def(graph_def,
This function provides a way to import a serialized TensorFlow
[`GraphDef`](https://www.tensorflow.org/code/tensorflow/core/framework/graph.proto)
protocol buffer, and extract individual objects in the `GraphDef` as
@{tf.Tensor} and @{tf.Operation} objects. Once extracted,
`tf.Tensor` and `tf.Operation` objects. Once extracted,
these objects are placed into the current default `Graph`. See
@{tf.Graph.as_graph_def} for a way to create a `GraphDef`
`tf.Graph.as_graph_def` for a way to create a `GraphDef`
proto.
Args:

View File

@ -229,7 +229,7 @@ class Tensor(_TensorLike):
A `Tensor` is a symbolic handle to one of the outputs of an
`Operation`. It does not hold the values of that operation's output,
but instead provides a means of computing those values in a
TensorFlow @{tf.Session}.
TensorFlow `tf.Session`.
This class has two primary purposes:
@ -240,7 +240,7 @@ class Tensor(_TensorLike):
2. After the graph has been launched in a session, the value of the
`Tensor` can be computed by passing it to
@{tf.Session.run}.
`tf.Session.run`.
`t.eval()` is a shortcut for calling
`tf.get_default_session().run(t)`.
@ -365,7 +365,7 @@ class Tensor(_TensorLike):
The shape is computed using shape inference functions that are
registered in the Op for each `Operation`. See
@{tf.TensorShape}
`tf.TensorShape`
for more details of what a shape represents.
The inferred shape of a tensor is used to provide shape
@ -695,7 +695,7 @@ class Tensor(_TensorLike):
Args:
feed_dict: A dictionary that maps `Tensor` objects to feed values.
See @{tf.Session.run} for a
See `tf.Session.run` for a
description of the valid feed values.
session: (Optional.) The `Session` to be used to evaluate this tensor. If
none, the default session will be used.
@ -1455,10 +1455,10 @@ class IndexedSlices(_TensorLike):
The `IndexedSlices` class is used principally in the definition of
gradients for operations that have sparse gradients
(e.g. @{tf.gather}).
(e.g. `tf.gather`).
Contrast this representation with
@{tf.SparseTensor},
`tf.SparseTensor`,
which uses multi-dimensional indices and scalar values.
"""
@ -1619,8 +1619,8 @@ class Operation(object):
more `Tensor` objects as input, and produces zero or more `Tensor`
objects as output. Objects of type `Operation` are created by
calling a Python op constructor (such as
@{tf.matmul})
or @{tf.Graph.create_op}.
`tf.matmul`)
or `tf.Graph.create_op`.
For example `c = tf.matmul(a, b)` creates an `Operation` of type
"MatMul" that takes tensors `a` and `b` as input, and produces `c`
@ -1628,7 +1628,7 @@ class Operation(object):
After the graph has been launched in a session, an `Operation` can
be executed by passing it to
@{tf.Session.run}.
`tf.Session.run`.
`op.run()` is a shortcut for calling `tf.get_default_session().run(op)`.
"""
@ -2338,7 +2338,7 @@ class Operation(object):
Args:
feed_dict: A dictionary that maps `Tensor` objects to feed values.
See @{tf.Session.run}
See `tf.Session.run`
for a description of the valid feed values.
session: (Optional.) The `Session` to be used to run to this operation. If
none, the default session will be used.
@ -2727,13 +2727,13 @@ class Graph(object):
"""A TensorFlow computation, represented as a dataflow graph.
A `Graph` contains a set of
@{tf.Operation} objects,
`tf.Operation` objects,
which represent units of computation; and
@{tf.Tensor} objects, which represent
`tf.Tensor` objects, which represent
the units of data that flow between operations.
A default `Graph` is always registered, and accessible by calling
@{tf.get_default_graph}.
`tf.get_default_graph`.
To add an operation to the default graph, simply call one of the functions
that defines a new `Operation`:
@ -2743,7 +2743,7 @@ class Graph(object):
```
Another typical usage involves the
@{tf.Graph.as_default}
`tf.Graph.as_default`
context manager, which overrides the current default graph for the
lifetime of the context:
@ -2764,7 +2764,7 @@ class Graph(object):
that are identified by name. For convenience when building a large
graph, collections can store groups of related objects: for
example, the `tf.Variable` uses a collection (named
@{tf.GraphKeys.GLOBAL_VARIABLES}) for
`tf.GraphKeys.GLOBAL_VARIABLES`) for
all variables that are created during the construction of a graph. The caller
may define additional collections by specifying a new name.
"""
@ -2941,7 +2941,7 @@ class Graph(object):
"""Returns a version number that increases as ops are added to the graph.
Note that this is unrelated to the
@{tf.Graph.graph_def_versions}.
`tf.Graph.graph_def_versions`.
Returns:
An integer version that increases as ops are added to the graph.
@ -2991,7 +2991,7 @@ class Graph(object):
After calling `g.finalize()`, no new operations can be added to
`g`. This method is used to ensure that no operations are added
to a graph when it is shared between multiple threads, for example
when using a @{tf.train.QueueRunner}.
when using a `tf.train.QueueRunner`.
"""
self._finalized = True
@ -3040,7 +3040,7 @@ class Graph(object):
"""Returns a serialized `GraphDef` representation of this graph.
The serialized `GraphDef` can be imported into another `Graph`
(using @{tf.import_graph_def}) or used with the
(using `tf.import_graph_def`) or used with the
[C++ Session API](../../../../api_docs/cc/index.md).
This method is thread-safe.
@ -3086,7 +3086,7 @@ class Graph(object):
"""Returns a serialized `GraphDef` representation of this graph.
The serialized `GraphDef` can be imported into another `Graph`
(using @{tf.import_graph_def}) or used with the
(using `tf.import_graph_def`) or used with the
[C++ Session API](../../api_docs/cc/index.md).
This method is thread-safe.
@ -4884,7 +4884,7 @@ def device(device_name_or_function):
"""Wrapper for `Graph.device()` using the default graph.
See
@{tf.Graph.device}
`tf.Graph.device`
for more details.
Args:
@ -4950,7 +4950,7 @@ def colocate_with(op, ignore_existing=False):
def control_dependencies(control_inputs):
"""Wrapper for `Graph.control_dependencies()` using the default graph.
See @{tf.Graph.control_dependencies}
See `tf.Graph.control_dependencies`
for more details.
When eager execution is enabled, any callable object in the `control_inputs`
@ -5316,7 +5316,7 @@ def enable_eager_execution(config=None,
Eager execution provides an imperative interface to TensorFlow. With eager
execution enabled, TensorFlow functions execute operations immediately (as
opposed to adding to a graph to be executed later in a @{tf.Session}) and
opposed to adding to a graph to be executed later in a `tf.Session`) and
return concrete values (as opposed to symbolic references to a node in a
computational graph).
@ -5336,9 +5336,9 @@ def enable_eager_execution(config=None,
both with and without eager execution).
Args:
config: (Optional.) A @{tf.ConfigProto} to use to configure the environment
in which operations are executed. Note that @{tf.ConfigProto} is also
used to configure graph execution (via @{tf.Session}) and many options
config: (Optional.) A `tf.ConfigProto` to use to configure the environment
in which operations are executed. Note that `tf.ConfigProto` is also
used to configure graph execution (via `tf.Session`) and many options
within `tf.ConfigProto` are not implemented (or are irrelevant) when
eager execution is enabled.
device_policy: (Optional.) Policy controlling how operations requiring
@ -5638,7 +5638,7 @@ class GraphKeys(object):
* `GLOBAL_VARIABLES`: the default collection of `Variable` objects, shared
across distributed environment (model variables are subset of these). See
@{tf.global_variables}
`tf.global_variables`
for more details.
Commonly, all `TRAINABLE_VARIABLES` variables will be in `MODEL_VARIABLES`,
and all `MODEL_VARIABLES` variables will be in `GLOBAL_VARIABLES`.
@ -5650,19 +5650,19 @@ class GraphKeys(object):
`tf.contrib.framework.model_variable` to add to this collection.
* `TRAINABLE_VARIABLES`: the subset of `Variable` objects that will
be trained by an optimizer. See
@{tf.trainable_variables}
`tf.trainable_variables`
for more details.
* `SUMMARIES`: the summary `Tensor` objects that have been created in the
graph. See
@{tf.summary.merge_all}
`tf.summary.merge_all`
for more details.
* `QUEUE_RUNNERS`: the `QueueRunner` objects that are used to
produce input for a computation. See
@{tf.train.start_queue_runners}
`tf.train.start_queue_runners`
for more details.
* `MOVING_AVERAGE_VARIABLES`: the subset of `Variable` objects that will also
keep moving averages. See
@{tf.moving_average_variables}
`tf.moving_average_variables`
for more details.
* `REGULARIZATION_LOSSES`: regularization losses collected during graph
construction.
@ -5776,7 +5776,7 @@ class GraphKeys(object):
def add_to_collection(name, value):
"""Wrapper for `Graph.add_to_collection()` using the default graph.
See @{tf.Graph.add_to_collection}
See `tf.Graph.add_to_collection`
for more details.
Args:
@ -5795,7 +5795,7 @@ def add_to_collection(name, value):
def add_to_collections(names, value):
"""Wrapper for `Graph.add_to_collections()` using the default graph.
See @{tf.Graph.add_to_collections}
See `tf.Graph.add_to_collections`
for more details.
Args:
@ -5815,7 +5815,7 @@ def add_to_collections(names, value):
def get_collection_ref(key):
"""Wrapper for `Graph.get_collection_ref()` using the default graph.
See @{tf.Graph.get_collection_ref}
See `tf.Graph.get_collection_ref`
for more details.
Args:
@ -5839,7 +5839,7 @@ def get_collection_ref(key):
def get_collection(key, scope=None):
"""Wrapper for `Graph.get_collection()` using the default graph.
See @{tf.Graph.get_collection}
See `tf.Graph.get_collection`
for more details.
Args:
@ -5882,7 +5882,7 @@ class name_scope(object): # pylint: disable=invalid-name
This context manager validates that the given `values` are from the
same graph, makes that graph the default graph, and pushes a
name scope in that graph (see
@{tf.Graph.name_scope}
`tf.Graph.name_scope`
for more details on that).
For example, to define a new Python op called `my_op`:

View File

@ -43,7 +43,7 @@ def get_seed(op_seed):
graph, or for only specific operations.
For details on how the graph-level seed interacts with op seeds, see
@{tf.set_random_seed}.
`tf.set_random_seed`.
Args:
op_seed: integer.

View File

@ -205,7 +205,7 @@ class SparseTensor(_TensorLike):
Args:
feed_dict: A dictionary that maps `Tensor` objects to feed values.
See @{tf.Session.run} for a
See `tf.Session.run` for a
description of the valid feed values.
session: (Optional.) The `Session` to be used to evaluate this sparse
tensor. If none, the default session will be used.

View File

@ -500,7 +500,7 @@ class TensorShape(object):
may be inferred if there is a registered shape function for
`"Foo"`. See @{$adding_an_op#shape-functions-in-c$`Shape functions in C++`}
for details of shape functions and how to register them. Alternatively,
the shape may be set explicitly using @{tf.Tensor.set_shape}.
the shape may be set explicitly using `tf.Tensor.set_shape`.
"""
def __init__(self, dims):

View File

@ -659,10 +659,10 @@ def run_in_graph_and_eager_modes(func=None,
"""Execute the decorated test with and without enabling eager execution.
This function returns a decorator intended to be applied to test methods in
a @{tf.test.TestCase} class. Doing so will cause the contents of the test
a `tf.test.TestCase` class. Doing so will cause the contents of the test
method to be executed twice - once normally, and once with eager execution
enabled. This allows unittests to confirm the equivalence between eager
and graph execution (see @{tf.enable_eager_execution}).
and graph execution (see `tf.enable_eager_execution`).
For example, consider the following unittest:

View File

@ -500,13 +500,13 @@ class Layer(checkpointable.CheckpointableBase):
use_resource: Whether to use `ResourceVariable`.
synchronization: Indicates when a distributed a variable will be
aggregated. Accepted values are constants defined in the class
@{tf.VariableSynchronization}. By default the synchronization is set to
`tf.VariableSynchronization`. By default the synchronization is set to
`AUTO` and the current `DistributionStrategy` chooses
when to synchronize. If `synchronization` is set to `ON_READ`,
`trainable` must not be set to `True`.
aggregation: Indicates how a distributed variable will be aggregated.
Accepted values are constants defined in the class
@{tf.VariableAggregation}.
`tf.VariableAggregation`.
getter: Variable getter argument to be passed to the `Checkpointable` API.
Returns:
@ -1921,13 +1921,13 @@ def make_variable(name,
use_resource: Whether to use a `ResourceVariable`.
synchronization: Indicates when a distributed a variable will be
aggregated. Accepted values are constants defined in the class
@{tf.VariableSynchronization}. By default the synchronization is set to
`tf.VariableSynchronization`. By default the synchronization is set to
`AUTO` and the current `DistributionStrategy` chooses
when to synchronize. If `synchronization` is set to `ON_READ`,
`trainable` must not be set to `True`.
aggregation: Indicates how a distributed variable will be aggregated.
Accepted values are constants defined in the class
@{tf.VariableAggregation}.
`tf.VariableAggregation`.
partitioner: Not handled at this time.
Returns:

View File

@ -183,13 +183,13 @@ class Layer(base_layer.Layer):
use_resource: Whether to use `ResourceVariable`.
synchronization: Indicates when a distributed a variable will be
aggregated. Accepted values are constants defined in the class
@{tf.VariableSynchronization}. By default the synchronization is set to
`tf.VariableSynchronization`. By default the synchronization is set to
`AUTO` and the current `DistributionStrategy` chooses
when to synchronize. If `synchronization` is set to `ON_READ`,
`trainable` must not be set to `True`.
aggregation: Indicates how a distributed variable will be aggregated.
Accepted values are constants defined in the class
@{tf.VariableAggregation}.
`tf.VariableAggregation`.
partitioner: (optional) partitioner instance (callable). If
provided, when the requested variable is created it will be split
into multiple partitions according to `partitioner`. In this case,

View File

@ -203,7 +203,7 @@ class Dropout(keras_layers.Dropout, base.Layer):
to be the same for all timesteps, you can use
`noise_shape=[batch_size, 1, features]`.
seed: A Python integer. Used to create random seeds. See
@{tf.set_random_seed}.
`tf.set_random_seed`.
for behavior.
name: The name of the layer (string).
"""
@ -248,7 +248,7 @@ def dropout(inputs,
to be the same for all timesteps, you can use
`noise_shape=[batch_size, 1, features]`.
seed: A Python integer. Used to create random seeds. See
@{tf.set_random_seed}
`tf.set_random_seed`
for behavior.
training: Either a Python boolean, or a TensorFlow boolean scalar tensor
(e.g. a placeholder). Whether to return the output in training mode

View File

@ -538,7 +538,7 @@ def slice(input_, begin, size, name=None):
words, `begin[i]` is the offset into the 'i'th dimension of `input` that you
want to slice from.
Note that @{tf.Tensor.__getitem__} is typically a more pythonic way to
Note that `tf.Tensor.__getitem__` is typically a more pythonic way to
perform slices, as it allows you to write `foo[3:7, :-2]` instead of
`tf.slice(foo, [3, 0], [4, foo.get_shape()[1]-2])`.
@ -594,7 +594,7 @@ def strided_slice(input_,
**Instead of calling this op directly most users will want to use the
NumPy-style slicing syntax (e.g. `tensor[..., 3:4:-1, tf.newaxis, 3]`), which
is supported via @{tf.Tensor.__getitem__} and @{tf.Variable.__getitem__}.**
is supported via `tf.Tensor.__getitem__` and `tf.Variable.__getitem__`.**
The interface of this op is a low-level encoding of the slicing syntax.
Roughly speaking, this op extracts a slice of size `(end-begin)/stride`
@ -723,7 +723,7 @@ def _SliceHelperVar(var, slice_spec):
"""Creates a slice helper object given a variable.
This allows creating a sub-tensor from part of the current contents
of a variable. See @{tf.Tensor.__getitem__} for detailed examples
of a variable. See `tf.Tensor.__getitem__` for detailed examples
of slicing.
This function in addition also allows assignment to a sliced range.

View File

@ -3069,7 +3069,7 @@ def while_loop(cond,
`loop_vars` is the same in every iteration. The `shape_invariants` argument
allows the caller to specify a less specific shape invariant for each loop
variable, which is needed if the shape varies between iterations. The
@{tf.Tensor.set_shape}
`tf.Tensor.set_shape`
function may also be used in the `body` function to indicate that
the output loop variable has a particular shape. The shape invariant for
SparseTensor and IndexedSlices are treated specially as follows:
@ -3320,7 +3320,7 @@ def with_dependencies(dependencies, output_tensor, name=None):
no guarantee that `output_tensor` will be evaluated after any `dependencies`
have run.
See also @{tf.tuple$tuple} and @{tf.group$group}.
See also `tf.tuple` and `tf.group`.
Args:
dependencies: Iterable of operations to run before this op finishes.
@ -3365,8 +3365,8 @@ def group(*inputs, **kwargs):
When this op finishes, all ops in `inputs` have finished. This op has no
output.
See also @{tf.tuple$tuple} and
@{tf.control_dependencies$control_dependencies}.
See also `tf.tuple` and
`tf.control_dependencies`.
Args:
*inputs: Zero or more tensors to group.
@ -3435,8 +3435,8 @@ def tuple(tensors, name=None, control_inputs=None): # pylint: disable=redefined
returned by `tuple` are only available after all the parallel computations
are done.
See also @{tf.group$group} and
@{tf.control_dependencies$control_dependencies}.
See also `tf.group` and
`tf.control_dependencies`.
Args:
tensors: A list of `Tensor`s or `IndexedSlices`, some entries can be `None`.

View File

@ -73,7 +73,7 @@ def custom_gradient(f):
With this definition, the gradient at x=100 will be correctly evaluated as
1.0.
See also @{tf.RegisterGradient} which registers a gradient function for a
See also `tf.RegisterGradient` which registers a gradient function for a
primitive TensorFlow operation. `tf.custom_gradient` on the other hand allows
for fine grained control over the gradient computation of a sequence of
operations.
@ -100,7 +100,7 @@ def custom_gradient(f):
Returns:
A function `h(x)` which returns the same value as `f(x)[0]` and whose
gradient (as calculated by @{tf.gradients}) is determined by `f(x)[1]`.
gradient (as calculated by `tf.gradients`) is determined by `f(x)[1]`.
"""
def decorated(*args, **kwargs):

View File

@ -126,8 +126,8 @@ class QueueBase(object):
handle single elements, versions that support enqueuing and
dequeuing a batch of elements at once.
See @{tf.FIFOQueue} and
@{tf.RandomShuffleQueue} for concrete
See `tf.FIFOQueue` and
`tf.RandomShuffleQueue` for concrete
implementations of this class, and instructions on how to create
them.
"""
@ -309,12 +309,12 @@ class QueueBase(object):
until the element has been enqueued.
At runtime, this operation may raise an error if the queue is
@{tf.QueueBase.close} before or during its execution. If the
`tf.QueueBase.close` before or during its execution. If the
queue is closed before this operation runs,
`tf.errors.CancelledError` will be raised. If this operation is
blocked, and either (i) the queue is closed by a close operation
with `cancel_pending_enqueues=True`, or (ii) the session is
@{tf.Session.close},
`tf.Session.close`,
`tf.errors.CancelledError` will be raised.
Args:
@ -352,12 +352,12 @@ class QueueBase(object):
until all of the elements have been enqueued.
At runtime, this operation may raise an error if the queue is
@{tf.QueueBase.close} before or during its execution. If the
`tf.QueueBase.close` before or during its execution. If the
queue is closed before this operation runs,
`tf.errors.CancelledError` will be raised. If this operation is
blocked, and either (i) the queue is closed by a close operation
with `cancel_pending_enqueues=True`, or (ii) the session is
@{tf.Session.close},
`tf.Session.close`,
`tf.errors.CancelledError` will be raised.
Args:
@ -413,11 +413,11 @@ class QueueBase(object):
until there is an element to dequeue.
At runtime, this operation may raise an error if the queue is
@{tf.QueueBase.close} before or during its execution. If the
`tf.QueueBase.close` before or during its execution. If the
queue is closed, the queue is empty, and there are no pending
enqueue operations that can fulfill this request,
`tf.errors.OutOfRangeError` will be raised. If the session is
@{tf.Session.close},
`tf.Session.close`,
`tf.errors.CancelledError` will be raised.
Args:
@ -455,11 +455,11 @@ class QueueBase(object):
`OutOfRange` exception is raised.
At runtime, this operation may raise an error if the queue is
@{tf.QueueBase.close} before or during its execution. If the
`tf.QueueBase.close` before or during its execution. If the
queue is closed, the queue contains fewer than `n` elements, and
there are no pending enqueue operations that can fulfill this
request, `tf.errors.OutOfRangeError` will be raised. If the
session is @{tf.Session.close},
session is `tf.Session.close`,
`tf.errors.CancelledError` will be raised.
Args:
@ -500,7 +500,7 @@ class QueueBase(object):
If the queue is closed and there are more than `0` but fewer than
`n` elements remaining, then instead of raising a
`tf.errors.OutOfRangeError` like @{tf.QueueBase.dequeue_many},
`tf.errors.OutOfRangeError` like `tf.QueueBase.dequeue_many`,
less than `n` elements are returned immediately. If the queue is
closed and there are `0` elements left in the queue, then a
`tf.errors.OutOfRangeError` is raised just like in `dequeue_many`.
@ -608,7 +608,7 @@ def _shared_name(shared_name):
class RandomShuffleQueue(QueueBase):
"""A queue implementation that dequeues elements in a random order.
See @{tf.QueueBase} for a description of the methods on
See `tf.QueueBase` for a description of the methods on
this class.
"""
@ -657,7 +657,7 @@ class RandomShuffleQueue(QueueBase):
with the same length as `dtypes`, or `None`. If specified the dequeue
methods return a dictionary with the names as keys.
seed: A Python integer. Used to create a random seed. See
@{tf.set_random_seed}
`tf.set_random_seed`
for behavior.
shared_name: (Optional.) If non-empty, this queue will be shared under
the given name across multiple sessions.
@ -693,7 +693,7 @@ class RandomShuffleQueue(QueueBase):
class FIFOQueue(QueueBase):
"""A queue implementation that dequeues elements in first-in first-out order.
See @{tf.QueueBase} for a description of the methods on
See `tf.QueueBase` for a description of the methods on
this class.
"""
@ -753,7 +753,7 @@ class PaddingFIFOQueue(QueueBase):
A `PaddingFIFOQueue` may contain components with dynamic shape, while also
supporting `dequeue_many`. See the constructor for more details.
See @{tf.QueueBase} for a description of the methods on
See `tf.QueueBase` for a description of the methods on
this class.
"""
@ -824,7 +824,7 @@ class PaddingFIFOQueue(QueueBase):
class PriorityQueue(QueueBase):
"""A queue implementation that dequeues elements in prioritized order.
See @{tf.QueueBase} for a description of the methods on
See `tf.QueueBase` for a description of the methods on
this class.
"""

View File

@ -253,7 +253,7 @@ def embedding_lookup(
This function is used to perform parallel lookups on the list of
tensors in `params`. It is a generalization of
@{tf.gather}, where `params` is
`tf.gather`, where `params` is
interpreted as a partitioning of a large embedding tensor. `params` may be
a `PartitionedVariable` as returned by using `tf.get_variable()` with a
partitioner.

View File

@ -265,7 +265,7 @@ def random_flip_up_down(image, seed=None):
image: 4-D Tensor of shape `[batch, height, width, channels]` or
3-D Tensor of shape `[height, width, channels]`.
seed: A Python integer. Used to create a random seed. See
@{tf.set_random_seed}
`tf.set_random_seed`
for behavior.
Returns:
@ -287,7 +287,7 @@ def random_flip_left_right(image, seed=None):
image: 4-D Tensor of shape `[batch, height, width, channels]` or
3-D Tensor of shape `[height, width, channels]`.
seed: A Python integer. Used to create a random seed. See
@{tf.set_random_seed}
`tf.set_random_seed`
for behavior.
Returns:
@ -307,7 +307,7 @@ def _random_flip(image, flip_index, seed, scope_name):
flip_index: The dimension along which to flip the image.
Vertical: 0, Horizontal: 1
seed: A Python integer. Used to create a random seed. See
@{tf.set_random_seed}
`tf.set_random_seed`
for behavior.
scope_name: Name of the scope in which the ops are added.
@ -948,7 +948,7 @@ def resize_images(images,
Resized images will be distorted if their original aspect ratio is not
the same as `size`. To avoid distortions see
@{tf.image.resize_image_with_pad}.
`tf.image.resize_image_with_pad`.
`method` can be one of:
@ -1227,7 +1227,7 @@ def random_brightness(image, max_delta, seed=None):
image: An image.
max_delta: float, must be non-negative.
seed: A Python integer. Used to create a random seed. See
@{tf.set_random_seed}
`tf.set_random_seed`
for behavior.
Returns:
@ -1255,7 +1255,7 @@ def random_contrast(image, lower, upper, seed=None):
lower: float. Lower bound for the random contrast factor.
upper: float. Upper bound for the random contrast factor.
seed: A Python integer. Used to create a random seed. See
@{tf.set_random_seed}
`tf.set_random_seed`
for behavior.
Returns:

View File

@ -238,7 +238,7 @@ class RandomUniform(Initializer):
maxval: A python scalar or a scalar tensor. Upper bound of the range
of random values to generate. Defaults to 1 for float types.
seed: A Python integer. Used to create random seeds. See
@{tf.set_random_seed}
`tf.set_random_seed`
for behavior.
dtype: The data type.
"""
@ -276,7 +276,7 @@ class RandomNormal(Initializer):
stddev: a python scalar or a scalar tensor. Standard deviation of the
random values to generate.
seed: A Python integer. Used to create random seeds. See
@{tf.set_random_seed}
`tf.set_random_seed`
for behavior.
dtype: The data type. Only floating point types are supported.
"""
@ -319,7 +319,7 @@ class TruncatedNormal(Initializer):
stddev: a python scalar or a scalar tensor. Standard deviation of the
random values to generate.
seed: A Python integer. Used to create random seeds. See
@{tf.set_random_seed}
`tf.set_random_seed`
for behavior.
dtype: The data type. Only floating point types are supported.
"""
@ -369,7 +369,7 @@ class UniformUnitScaling(Initializer):
Args:
factor: Float. A multiplicative factor by which the values will be scaled.
seed: A Python integer. Used to create random seeds. See
@{tf.set_random_seed}
`tf.set_random_seed`
for behavior.
dtype: The data type. Only floating point types are supported.
"""
@ -427,7 +427,7 @@ class VarianceScaling(Initializer):
mode: One of "fan_in", "fan_out", "fan_avg".
distribution: Random distribution to use. One of "normal", "uniform".
seed: A Python integer. Used to create random seeds. See
@{tf.set_random_seed}
`tf.set_random_seed`
for behavior.
dtype: The data type. Only floating point types are supported.
@ -517,7 +517,7 @@ class Orthogonal(Initializer):
Args:
gain: multiplicative factor to apply to the orthogonal matrix
seed: A Python integer. Used to create random seeds. See
@{tf.set_random_seed}
`tf.set_random_seed`
for behavior.
dtype: The data type.
"""
@ -572,7 +572,7 @@ class ConvolutionDeltaOrthogonal(Initializer):
The 2-norm of an input is multiplied by a factor of 'sqrt(gain)' after
applying this convolution.
seed: A Python integer. Used to create random seeds. See
@{tf.set_random_seed} for behavior.
`tf.set_random_seed` for behavior.
dtype: The data type.
"""
@ -628,7 +628,7 @@ class ConvolutionOrthogonal(Initializer):
The 2-norm of an input is multiplied by a factor of 'sqrt(gain)' after
applying this convolution.
seed: A Python integer. Used to create random seeds. See
@{tf.set_random_seed} for behavior.
`tf.set_random_seed` for behavior.
dtype: The data type.
"""
@ -693,7 +693,7 @@ class ConvolutionOrthogonal2D(ConvolutionOrthogonal):
This has the effect of scaling the output 2-norm by a factor of
`sqrt(gain)`.
seed: A Python integer. Used to create random seeds. See
@{tf.set_random_seed} for behavior.
`tf.set_random_seed` for behavior.
dtype: The data type.
"""
@ -829,7 +829,7 @@ class ConvolutionOrthogonal1D(ConvolutionOrthogonal):
The 2-norm of an input is multiplied by a factor of 'sqrt(gain)' after
applying this convolution.
seed: A Python integer. Used to create random seeds. See
@{tf.set_random_seed}
`tf.set_random_seed`
for behavior.
dtype: The data type.
"""
@ -946,7 +946,7 @@ class ConvolutionOrthogonal3D(ConvolutionOrthogonal):
The 2-norm of an input is multiplied by a factor of 'sqrt(gain)' after
applying this convolution.
seed: A Python integer. Used to create random seeds. See
@{tf.set_random_seed} for behavior.
`tf.set_random_seed` for behavior.
dtype: The data type.
"""
@ -1150,7 +1150,7 @@ def glorot_uniform_initializer(seed=None, dtype=dtypes.float32):
Args:
seed: A Python integer. Used to create random seeds. See
@{tf.set_random_seed}
`tf.set_random_seed`
for behavior.
dtype: The data type. Only floating point types are supported.
@ -1175,7 +1175,7 @@ def glorot_normal_initializer(seed=None, dtype=dtypes.float32):
Args:
seed: A Python integer. Used to create random seeds. See
@{tf.set_random_seed}
`tf.set_random_seed`
for behavior.
dtype: The data type. Only floating point types are supported.

View File

@ -190,7 +190,7 @@ def compute_weighted_loss(
When calculating the gradient of a weighted loss contributions from
both `losses` and `weights` are considered. If your `weights` depend
on some model parameters but you do not want this to affect the loss
gradient, you need to apply @{tf.stop_gradient} to `weights` before
gradient, you need to apply `tf.stop_gradient` to `weights` before
passing them to `compute_weighted_loss`.
@compatbility(eager)

Some files were not shown because too many files have changed in this diff Show More