[*.py,tensorflow/cc/framework/cc_op_gen.cc] Rename "Arguments:" to "Args:"

This commit is contained in:
Samuel Marks 2020-12-22 09:24:04 +11:00
parent 40f207fdb9
commit 17c5b631b9
No known key found for this signature in database
GPG Key ID: 43FD8EDE42E1A799
141 changed files with 824 additions and 824 deletions

View File

@ -586,7 +586,7 @@ OpInfo::OpInfo(const OpDef& graph_op_def, const ApiDef& api_def,
if (!api_def.description().empty()) { if (!api_def.description().empty()) {
strings::StrAppend(&comment, "\n", api_def.description(), "\n"); strings::StrAppend(&comment, "\n", api_def.description(), "\n");
} }
strings::StrAppend(&comment, "\nArguments:\n* scope: A Scope object\n"); strings::StrAppend(&comment, "\nArgs:\n* scope: A Scope object\n");
// Process inputs // Process inputs
for (int i = 0; i < api_def.arg_order_size(); ++i) { for (int i = 0; i < api_def.arg_order_size(); ++i) {

View File

@ -352,7 +352,7 @@ def execute_with_python_values(executable, arguments, backend):
def execute_with_python_values_replicated(executable, arguments, backend): def execute_with_python_values_replicated(executable, arguments, backend):
"""Execute on many replicas with Python values as arguments and output. """Execute on many replicas with Python values as arguments and output.
Arguments: Args:
executable: the program to run. executable: the program to run.
arguments: a list of lists of Python values indexed by `[replica][arg_num]` arguments: a list of lists of Python values indexed by `[replica][arg_num]`
to pass as inputs. to pass as inputs.

View File

@ -115,7 +115,7 @@ def _create_pseudo_names(tensors, prefix):
`[x, y]` becomes: `[x, y]` becomes:
`['output_1', 'output_2']` `['output_1', 'output_2']`
Arguments: Args:
tensors: `Model`'s outputs or inputs. tensors: `Model`'s outputs or inputs.
prefix: 'output_' for outputs, 'input_' for inputs. prefix: 'output_' for outputs, 'input_' for inputs.

View File

@ -387,7 +387,7 @@ def create_cluster_spec(has_chief=False,
This util is useful when creating the `cluster_spec` arg for This util is useful when creating the `cluster_spec` arg for
`tf.__internal__.distribute.multi_process_runner.run`. `tf.__internal__.distribute.multi_process_runner.run`.
Arguments: Args:
has_chief: Whether the generated cluster spec should contain "chief" task has_chief: Whether the generated cluster spec should contain "chief" task
type. type.
num_workers: Number of workers to use in the cluster spec. num_workers: Number of workers to use in the cluster spec.
@ -699,7 +699,7 @@ class IndependentWorkerTestBase(test.TestCase):
from `cluster_spec`, `task_type`, and `task_id`, and provide it to the new from `cluster_spec`, `task_type`, and `task_id`, and provide it to the new
thread to be set as `TF_CONFIG` environment. thread to be set as `TF_CONFIG` environment.
Arguments: Args:
task_fn: The function to run in the new thread. task_fn: The function to run in the new thread.
cluster_spec: The cluster spec. cluster_spec: The cluster spec.
task_type: The task type. task_type: The task type.
@ -810,7 +810,7 @@ class MultiWorkerMultiProcessTest(test.TestCase):
In that case, this function only prints stderr from the first process of In that case, this function only prints stderr from the first process of
each type. each type.
Arguments: Args:
processes: A dictionary from process type string -> list of processes. processes: A dictionary from process type string -> list of processes.
print_only_first: If true, only print output from first process of each print_only_first: If true, only print output from first process of each
type. type.

View File

@ -484,7 +484,7 @@ class MonitoredTimer(object):
def monitored_timer(cell): def monitored_timer(cell):
"""A function decorator for adding MonitoredTimer support. """A function decorator for adding MonitoredTimer support.
Arguments: Args:
cell: the cell associated with the time metric that will be inremented. cell: the cell associated with the time metric that will be inremented.
Returns: Returns:
A decorator that measure the function runtime and increment the specified A decorator that measure the function runtime and increment the specified

View File

@ -1201,7 +1201,7 @@ class _EagerTensorBase(Tensor):
def gpu(self, gpu_index=0): def gpu(self, gpu_index=0):
"""A copy of this Tensor with contents backed by memory on the GPU. """A copy of this Tensor with contents backed by memory on the GPU.
Arguments: Args:
gpu_index: Identifies which GPU to place the contents on the returned gpu_index: Identifies which GPU to place the contents on the returned
Tensor in. Tensor in.
@ -2335,7 +2335,7 @@ class Operation(object):
Note: this is generally unsafe to use. This is used in certain situations in Note: this is generally unsafe to use. This is used in certain situations in
conjunction with _set_type_list_attr. conjunction with _set_type_list_attr.
Arguments: Args:
types: list of DTypes types: list of DTypes
shapes: list of TensorShapes shapes: list of TensorShapes
""" """

View File

@ -30,7 +30,7 @@ def smart_cond(pred, true_fn=None, false_fn=None, name=None):
If `pred` is a bool or has a constant value, we return either `true_fn()` If `pred` is a bool or has a constant value, we return either `true_fn()`
or `false_fn()`, otherwise we use `tf.cond` to dynamically route to both. or `false_fn()`, otherwise we use `tf.cond` to dynamically route to both.
Arguments: Args:
pred: A scalar determining whether to return the result of `true_fn` or pred: A scalar determining whether to return the result of `true_fn` or
`false_fn`. `false_fn`.
true_fn: The callable to be performed if pred is true. true_fn: The callable to be performed if pred is true.
@ -62,7 +62,7 @@ def smart_cond(pred, true_fn=None, false_fn=None, name=None):
def smart_constant_value(pred): def smart_constant_value(pred):
"""Return the bool value for `pred`, or None if `pred` had a dynamic value. """Return the bool value for `pred`, or None if `pred` had a dynamic value.
Arguments: Args:
pred: A scalar, either a Python bool or tensor. pred: A scalar, either a Python bool or tensor.
Returns: Returns:

View File

@ -118,7 +118,7 @@ def dimension_value(dimension):
value = tensor_shape[i] # Warning: this will return the dim value in V2! value = tensor_shape[i] # Warning: this will return the dim value in V2!
``` ```
Arguments: Args:
dimension: Either a `Dimension` instance, an integer, or None. dimension: Either a `Dimension` instance, an integer, or None.
Returns: Returns:
@ -164,7 +164,7 @@ def dimension_at_index(shape, index):
# instantiated on the fly. # instantiated on the fly.
``` ```
Arguments: Args:
shape: A TensorShape instance. shape: A TensorShape instance.
index: An integer index. index: An integer index.

View File

@ -81,7 +81,7 @@ class TestCombination(object):
If the environment doesn't satisfy the dependencies of the test If the environment doesn't satisfy the dependencies of the test
combination, then it can be skipped. combination, then it can be skipped.
Arguments: Args:
kwargs: Arguments that are passed to the test combination. kwargs: Arguments that are passed to the test combination.
Returns: Returns:
@ -103,7 +103,7 @@ class TestCombination(object):
The test combination will run under all context managers that all The test combination will run under all context managers that all
`TestCombination` instances return. `TestCombination` instances return.
Arguments: Args:
kwargs: Arguments and their values that are passed to the test kwargs: Arguments and their values that are passed to the test
combination. combination.
@ -141,7 +141,7 @@ class ParameterModifier(object):
def __init__(self, parameter_name=None): def __init__(self, parameter_name=None):
"""Construct a parameter modifier that may be specific to a parameter. """Construct a parameter modifier that may be specific to a parameter.
Arguments: Args:
parameter_name: A `ParameterModifier` instance may operate on a class of parameter_name: A `ParameterModifier` instance may operate on a class of
parameters or on a parameter with a particular name. Only parameters or on a parameter with a particular name. Only
`ParameterModifier` instances that are of a unique type or were `ParameterModifier` instances that are of a unique type or were
@ -157,7 +157,7 @@ class ParameterModifier(object):
This makes it possible to adjust user-provided arguments before passing This makes it possible to adjust user-provided arguments before passing
them to the test method. them to the test method.
Arguments: Args:
kwargs: The combined arguments for the test. kwargs: The combined arguments for the test.
requested_parameters: The set of parameters that are defined in the requested_parameters: The set of parameters that are defined in the
signature of the test method. signature of the test method.

View File

@ -61,7 +61,7 @@ def softmax(x, axis=-1):
The input values in are the log-odds of the resulting probability. The input values in are the log-odds of the resulting probability.
Arguments: Args:
x : Input tensor. x : Input tensor.
axis: Integer, axis along which the softmax normalization is applied. axis: Integer, axis along which the softmax normalization is applied.
@ -121,7 +121,7 @@ def elu(x, alpha=1.0):
<tensorflow.python.keras.engine.sequential.Sequential object ...> <tensorflow.python.keras.engine.sequential.Sequential object ...>
Arguments: Args:
x: Input tensor. x: Input tensor.
alpha: A scalar, slope of negative section. `alpha` controls the value to alpha: A scalar, slope of negative section. `alpha` controls the value to
which an ELU saturates for negative net inputs. which an ELU saturates for negative net inputs.
@ -174,7 +174,7 @@ def selu(x):
... activation='selu')) ... activation='selu'))
>>> model.add(tf.keras.layers.Dense(num_classes, activation='softmax')) >>> model.add(tf.keras.layers.Dense(num_classes, activation='softmax'))
Arguments: Args:
x: A tensor or variable to compute the activation function for. x: A tensor or variable to compute the activation function for.
Returns: Returns:
@ -205,7 +205,7 @@ def softplus(x):
array([2.0611537e-09, 3.1326166e-01, 6.9314718e-01, 1.3132616e+00, array([2.0611537e-09, 3.1326166e-01, 6.9314718e-01, 1.3132616e+00,
2.0000000e+01], dtype=float32) 2.0000000e+01], dtype=float32)
Arguments: Args:
x: Input tensor. x: Input tensor.
Returns: Returns:
@ -226,7 +226,7 @@ def softsign(x):
>>> b.numpy() >>> b.numpy()
array([-0.5, 0. , 0.5], dtype=float32) array([-0.5, 0. , 0.5], dtype=float32)
Arguments: Args:
x: Input tensor. x: Input tensor.
Returns: Returns:
@ -254,7 +254,7 @@ def swish(x):
array([-4.1223075e-08, -2.6894143e-01, 0.0000000e+00, 7.3105860e-01, array([-4.1223075e-08, -2.6894143e-01, 0.0000000e+00, 7.3105860e-01,
2.0000000e+01], dtype=float32) 2.0000000e+01], dtype=float32)
Arguments: Args:
x: Input tensor. x: Input tensor.
Returns: Returns:
@ -290,7 +290,7 @@ def relu(x, alpha=0., max_value=None, threshold=0):
>>> tf.keras.activations.relu(foo, threshold=5).numpy() >>> tf.keras.activations.relu(foo, threshold=5).numpy()
array([-0., -0., 0., 0., 10.], dtype=float32) array([-0., -0., 0., 0., 10.], dtype=float32)
Arguments: Args:
x: Input `tensor` or `variable`. x: Input `tensor` or `variable`.
alpha: A `float` that governs the slope for values lower than the alpha: A `float` that governs the slope for values lower than the
threshold. threshold.
@ -329,7 +329,7 @@ def gelu(x, approximate=False):
array([-0.00363752, -0.15880796, 0. , 0.841192 , 2.9963627 ], array([-0.00363752, -0.15880796, 0. , 0.841192 , 2.9963627 ],
dtype=float32) dtype=float32)
Arguments: Args:
x: Input tensor. x: Input tensor.
approximate: A `bool`, whether to enable approximation. approximate: A `bool`, whether to enable approximation.
@ -359,7 +359,7 @@ def tanh(x):
>>> b.numpy() >>> b.numpy()
array([-0.9950547, -0.7615942, 0., 0.7615942, 0.9950547], dtype=float32) array([-0.9950547, -0.7615942, 0., 0.7615942, 0.9950547], dtype=float32)
Arguments: Args:
x: Input tensor. x: Input tensor.
Returns: Returns:
@ -390,7 +390,7 @@ def sigmoid(x):
array([2.0611537e-09, 2.6894143e-01, 5.0000000e-01, 7.3105860e-01, array([2.0611537e-09, 2.6894143e-01, 5.0000000e-01, 7.3105860e-01,
1.0000000e+00], dtype=float32) 1.0000000e+00], dtype=float32)
Arguments: Args:
x: Input tensor. x: Input tensor.
Returns: Returns:
@ -414,7 +414,7 @@ def exponential(x):
>>> b.numpy() >>> b.numpy()
array([0.04978707, 0.36787945, 1., 2.7182817 , 20.085537], dtype=float32) array([0.04978707, 0.36787945, 1., 2.7182817 , 20.085537], dtype=float32)
Arguments: Args:
x: Input tensor. x: Input tensor.
Returns: Returns:
@ -437,7 +437,7 @@ def hard_sigmoid(x):
>>> b.numpy() >>> b.numpy()
array([0. , 0.3, 0.5, 0.7, 1. ], dtype=float32) array([0. , 0.3, 0.5, 0.7, 1. ], dtype=float32)
Arguments: Args:
x: Input tensor. x: Input tensor.
Returns: Returns:
@ -462,7 +462,7 @@ def linear(x):
>>> b.numpy() >>> b.numpy()
array([-3., -1., 0., 1., 3.], dtype=float32) array([-3., -1., 0., 1., 3.], dtype=float32)
Arguments: Args:
x: Input tensor. x: Input tensor.
Returns: Returns:
@ -476,7 +476,7 @@ def linear(x):
def serialize(activation): def serialize(activation):
"""Returns the string identifier of an activation function. """Returns the string identifier of an activation function.
Arguments: Args:
activation : Function object. activation : Function object.
Returns: Returns:
@ -550,7 +550,7 @@ def deserialize(name, custom_objects=None):
def get(identifier): def get(identifier):
"""Returns function. """Returns function.
Arguments: Args:
identifier: Function or string identifier: Function or string
Returns: Returns:

View File

@ -57,7 +57,7 @@ layers = VersionAwareLayers()
def dense_block(x, blocks, name): def dense_block(x, blocks, name):
"""A dense block. """A dense block.
Arguments: Args:
x: input tensor. x: input tensor.
blocks: integer, the number of building blocks. blocks: integer, the number of building blocks.
name: string, block label. name: string, block label.
@ -73,7 +73,7 @@ def dense_block(x, blocks, name):
def transition_block(x, reduction, name): def transition_block(x, reduction, name):
"""A transition block. """A transition block.
Arguments: Args:
x: input tensor. x: input tensor.
reduction: float, compression rate at transition layers. reduction: float, compression rate at transition layers.
name: string, block label. name: string, block label.
@ -99,7 +99,7 @@ def transition_block(x, reduction, name):
def conv_block(x, growth_rate, name): def conv_block(x, growth_rate, name):
"""A building block for a dense block. """A building block for a dense block.
Arguments: Args:
x: input tensor. x: input tensor.
growth_rate: float, growth rate at dense layers. growth_rate: float, growth rate at dense layers.
name: string, block label. name: string, block label.
@ -149,7 +149,7 @@ def DenseNet(
For DenseNet, call `tf.keras.applications.densenet.preprocess_input` on your For DenseNet, call `tf.keras.applications.densenet.preprocess_input` on your
inputs before passing them to the model. inputs before passing them to the model.
Arguments: Args:
blocks: numbers of building blocks for the four dense layers. blocks: numbers of building blocks for the four dense layers.
include_top: whether to include the fully-connected include_top: whether to include the fully-connected
layer at the top of the network. layer at the top of the network.
@ -388,7 +388,7 @@ DOC = """
For DenseNet, call `tf.keras.applications.densenet.preprocess_input` on your For DenseNet, call `tf.keras.applications.densenet.preprocess_input` on your
inputs before passing them to the model. inputs before passing them to the model.
Arguments: Args:
include_top: whether to include the fully-connected include_top: whether to include the fully-connected
layer at the top of the network. layer at the top of the network.
weights: one of `None` (random initialization), weights: one of `None` (random initialization),

View File

@ -154,7 +154,7 @@ BASE_DOCSTRING = """Instantiates the {name} architecture.
the one specified in your Keras config at `~/.keras/keras.json`. the one specified in your Keras config at `~/.keras/keras.json`.
If you have never configured it, it defaults to `"channels_last"`. If you have never configured it, it defaults to `"channels_last"`.
Arguments: Args:
include_top: Whether to include the fully-connected include_top: Whether to include the fully-connected
layer at the top of the network. Defaults to True. layer at the top of the network. Defaults to True.
weights: One of `None` (random initialization), weights: One of `None` (random initialization),
@ -218,7 +218,7 @@ def EfficientNet(
Note that the data format convention used by the model is Note that the data format convention used by the model is
the one specified in your Keras config at `~/.keras/keras.json`. the one specified in your Keras config at `~/.keras/keras.json`.
Arguments: Args:
width_coefficient: float, scaling coefficient for network width. width_coefficient: float, scaling coefficient for network width.
depth_coefficient: float, scaling coefficient for network depth. depth_coefficient: float, scaling coefficient for network depth.
default_size: integer, default input image size. default_size: integer, default input image size.
@ -423,7 +423,7 @@ def block(inputs,
id_skip=True): id_skip=True):
"""An inverted residual block. """An inverted residual block.
Arguments: Args:
inputs: input tensor. inputs: input tensor.
activation: activation function. activation: activation function.
drop_rate: float between 0 and 1, fraction of the input units to drop. drop_rate: float between 0 and 1, fraction of the input units to drop.

View File

@ -50,7 +50,7 @@ PREPROCESS_INPUT_DOC = """
result = model(image) result = model(image)
``` ```
Arguments: Args:
x: A floating point `numpy.array` or a `tf.Tensor`, 3D or 4D with 3 color x: A floating point `numpy.array` or a `tf.Tensor`, 3D or 4D with 3 color
channels, with values in the range [0, 255]. channels, with values in the range [0, 255].
The preprocessed data are written over the input data The preprocessed data are written over the input data
@ -129,7 +129,7 @@ preprocess_input.__doc__ = PREPROCESS_INPUT_DOC.format(
def decode_predictions(preds, top=5): def decode_predictions(preds, top=5):
"""Decodes the prediction of an ImageNet model. """Decodes the prediction of an ImageNet model.
Arguments: Args:
preds: Numpy array encoding a batch of predictions. preds: Numpy array encoding a batch of predictions.
top: Integer, how many top-guesses to return. Defaults to 5. top: Integer, how many top-guesses to return. Defaults to 5.
@ -169,7 +169,7 @@ def decode_predictions(preds, top=5):
def _preprocess_numpy_input(x, data_format, mode): def _preprocess_numpy_input(x, data_format, mode):
"""Preprocesses a Numpy array encoding a batch of images. """Preprocesses a Numpy array encoding a batch of images.
Arguments: Args:
x: Input array, 3D or 4D. x: Input array, 3D or 4D.
data_format: Data format of the image array. data_format: Data format of the image array.
mode: One of "caffe", "tf" or "torch". mode: One of "caffe", "tf" or "torch".
@ -242,7 +242,7 @@ def _preprocess_numpy_input(x, data_format, mode):
def _preprocess_symbolic_input(x, data_format, mode): def _preprocess_symbolic_input(x, data_format, mode):
"""Preprocesses a tensor encoding a batch of images. """Preprocesses a tensor encoding a batch of images.
Arguments: Args:
x: Input tensor, 3D or 4D. x: Input tensor, 3D or 4D.
data_format: Data format of the image tensor. data_format: Data format of the image tensor.
mode: One of "caffe", "tf" or "torch". mode: One of "caffe", "tf" or "torch".
@ -301,7 +301,7 @@ def obtain_input_shape(input_shape,
weights=None): weights=None):
"""Internal utility to compute/validate a model's input shape. """Internal utility to compute/validate a model's input shape.
Arguments: Args:
input_shape: Either None (will return the default network input shape), input_shape: Either None (will return the default network input shape),
or a user-provided shape to be validated. or a user-provided shape to be validated.
default_size: Default input width/height for the model. default_size: Default input width/height for the model.
@ -388,7 +388,7 @@ def obtain_input_shape(input_shape,
def correct_pad(inputs, kernel_size): def correct_pad(inputs, kernel_size):
"""Returns a tuple for zero-padding for 2D convolution with downsampling. """Returns a tuple for zero-padding for 2D convolution with downsampling.
Arguments: Args:
inputs: Input tensor. inputs: Input tensor.
kernel_size: An integer or tuple/list of 2 integers. kernel_size: An integer or tuple/list of 2 integers.

View File

@ -66,7 +66,7 @@ def InceptionResNetV2(include_top=True,
`tf.keras.applications.inception_resnet_v2.preprocess_input` `tf.keras.applications.inception_resnet_v2.preprocess_input`
on your inputs before passing them to the model. on your inputs before passing them to the model.
Arguments: Args:
include_top: whether to include the fully-connected include_top: whether to include the fully-connected
layer at the top of the network. layer at the top of the network.
weights: one of `None` (random initialization), weights: one of `None` (random initialization),
@ -260,7 +260,7 @@ def conv2d_bn(x,
name=None): name=None):
"""Utility function to apply conv + BN. """Utility function to apply conv + BN.
Arguments: Args:
x: input tensor. x: input tensor.
filters: filters in `Conv2D`. filters: filters in `Conv2D`.
kernel_size: kernel size as in `Conv2D`. kernel_size: kernel size as in `Conv2D`.
@ -302,7 +302,7 @@ def inception_resnet_block(x, scale, block_type, block_idx, activation='relu'):
- Inception-ResNet-B: `block_type='block17'` - Inception-ResNet-B: `block_type='block17'`
- Inception-ResNet-C: `block_type='block8'` - Inception-ResNet-C: `block_type='block8'`
Arguments: Args:
x: input tensor. x: input tensor.
scale: scaling factor to scale the residuals (i.e., the output of passing scale: scaling factor to scale the residuals (i.e., the output of passing
`x` through an inception module) before adding them to the shortcut `x` through an inception module) before adding them to the shortcut

View File

@ -67,7 +67,7 @@ def InceptionV3(
For InceptionV3, call `tf.keras.applications.inception_v3.preprocess_input` For InceptionV3, call `tf.keras.applications.inception_v3.preprocess_input`
on your inputs before passing them to the model. on your inputs before passing them to the model.
Arguments: Args:
include_top: Boolean, whether to include the fully-connected include_top: Boolean, whether to include the fully-connected
layer at the top, as the last layer of the network. Default to `True`. layer at the top, as the last layer of the network. Default to `True`.
weights: One of `None` (random initialization), weights: One of `None` (random initialization),
@ -369,7 +369,7 @@ def conv2d_bn(x,
name=None): name=None):
"""Utility function to apply conv + BN. """Utility function to apply conv + BN.
Arguments: Args:
x: input tensor. x: input tensor.
filters: filters in `Conv2D`. filters: filters in `Conv2D`.
num_row: height of the convolution kernel. num_row: height of the convolution kernel.

View File

@ -108,7 +108,7 @@ def MobileNet(input_shape=None,
For MobileNet, call `tf.keras.applications.mobilenet.preprocess_input` For MobileNet, call `tf.keras.applications.mobilenet.preprocess_input`
on your inputs before passing them to the model. on your inputs before passing them to the model.
Arguments: Args:
input_shape: Optional shape tuple, only to be specified if `include_top` input_shape: Optional shape tuple, only to be specified if `include_top`
is False (otherwise the input shape has to be `(224, 224, 3)` (with is False (otherwise the input shape has to be `(224, 224, 3)` (with
`channels_last` data format) or (3, 224, 224) (with `channels_first` `channels_last` data format) or (3, 224, 224) (with `channels_first`
@ -315,7 +315,7 @@ def MobileNet(input_shape=None,
def _conv_block(inputs, filters, alpha, kernel=(3, 3), strides=(1, 1)): def _conv_block(inputs, filters, alpha, kernel=(3, 3), strides=(1, 1)):
"""Adds an initial convolution layer (with batch normalization and relu6). """Adds an initial convolution layer (with batch normalization and relu6).
Arguments: Args:
inputs: Input tensor of shape `(rows, cols, 3)` (with `channels_last` inputs: Input tensor of shape `(rows, cols, 3)` (with `channels_last`
data format) or (3, rows, cols) (with `channels_first` data format). data format) or (3, rows, cols) (with `channels_first` data format).
It should have exactly 3 inputs channels, and width and height should It should have exactly 3 inputs channels, and width and height should
@ -373,7 +373,7 @@ def _depthwise_conv_block(inputs,
batch normalization, relu6, pointwise convolution, batch normalization, relu6, pointwise convolution,
batch normalization and relu6 activation. batch normalization and relu6 activation.
Arguments: Args:
inputs: Input tensor of shape `(rows, cols, channels)` (with inputs: Input tensor of shape `(rows, cols, channels)` (with
`channels_last` data format) or (channels, rows, cols) (with `channels_last` data format) or (channels, rows, cols) (with
`channels_first` data format). `channels_first` data format).

View File

@ -115,7 +115,7 @@ def MobileNetV2(input_shape=None,
For MobileNetV2, call `tf.keras.applications.mobilenet_v2.preprocess_input` For MobileNetV2, call `tf.keras.applications.mobilenet_v2.preprocess_input`
on your inputs before passing them to the model. on your inputs before passing them to the model.
Arguments: Args:
input_shape: Optional shape tuple, to be specified if you would input_shape: Optional shape tuple, to be specified if you would
like to use a model with an input image resolution that is not like to use a model with an input image resolution that is not
(224, 224, 3). (224, 224, 3).

View File

@ -77,7 +77,7 @@ BASE_DOCSTRING = """Instantiates the {name} architecture.
Optionally loads weights pre-trained on ImageNet. Optionally loads weights pre-trained on ImageNet.
Arguments: Args:
input_shape: Optional shape tuple, to be specified if you would input_shape: Optional shape tuple, to be specified if you would
like to use a model with an input image resolution that is not like to use a model with an input image resolution that is not
(224, 224, 3). (224, 224, 3).

View File

@ -85,7 +85,7 @@ def NASNet(input_shape=None,
Note that the data format convention used by the model is Note that the data format convention used by the model is
the one specified in your Keras config at `~/.keras/keras.json`. the one specified in your Keras config at `~/.keras/keras.json`.
Arguments: Args:
input_shape: Optional shape tuple, the input shape input_shape: Optional shape tuple, the input shape
is by default `(331, 331, 3)` for NASNetLarge and is by default `(331, 331, 3)` for NASNetLarge and
`(224, 224, 3)` for NASNetMobile. `(224, 224, 3)` for NASNetMobile.
@ -340,7 +340,7 @@ def NASNetMobile(input_shape=None,
For NASNet, call `tf.keras.applications.nasnet.preprocess_input` on your For NASNet, call `tf.keras.applications.nasnet.preprocess_input` on your
inputs before passing them to the model. inputs before passing them to the model.
Arguments: Args:
input_shape: Optional shape tuple, only to be specified input_shape: Optional shape tuple, only to be specified
if `include_top` is False (otherwise the input shape if `include_top` is False (otherwise the input shape
has to be `(224, 224, 3)` for NASNetMobile has to be `(224, 224, 3)` for NASNetMobile
@ -417,7 +417,7 @@ def NASNetLarge(input_shape=None,
For NASNet, call `tf.keras.applications.nasnet.preprocess_input` on your For NASNet, call `tf.keras.applications.nasnet.preprocess_input` on your
inputs before passing them to the model. inputs before passing them to the model.
Arguments: Args:
input_shape: Optional shape tuple, only to be specified input_shape: Optional shape tuple, only to be specified
if `include_top` is False (otherwise the input shape if `include_top` is False (otherwise the input shape
has to be `(331, 331, 3)` for NASNetLarge. has to be `(331, 331, 3)` for NASNetLarge.
@ -479,7 +479,7 @@ def _separable_conv_block(ip,
block_id=None): block_id=None):
"""Adds 2 blocks of [relu-separable conv-batchnorm]. """Adds 2 blocks of [relu-separable conv-batchnorm].
Arguments: Args:
ip: Input tensor ip: Input tensor
filters: Number of output filters per layer filters: Number of output filters per layer
kernel_size: Kernel size of separable convolutions kernel_size: Kernel size of separable convolutions
@ -538,7 +538,7 @@ def _adjust_block(p, ip, filters, block_id=None):
Used in situations where the output number of filters needs to be changed. Used in situations where the output number of filters needs to be changed.
Arguments: Args:
p: Input tensor which needs to be modified p: Input tensor which needs to be modified
ip: Input tensor whose shape needs to be matched ip: Input tensor whose shape needs to be matched
filters: Number of output filters to be matched filters: Number of output filters to be matched
@ -621,7 +621,7 @@ def _adjust_block(p, ip, filters, block_id=None):
def _normal_a_cell(ip, p, filters, block_id=None): def _normal_a_cell(ip, p, filters, block_id=None):
"""Adds a Normal cell for NASNet-A (Fig. 4 in the paper). """Adds a Normal cell for NASNet-A (Fig. 4 in the paper).
Arguments: Args:
ip: Input tensor `x` ip: Input tensor `x`
p: Input tensor `p` p: Input tensor `p`
filters: Number of output filters filters: Number of output filters
@ -700,7 +700,7 @@ def _normal_a_cell(ip, p, filters, block_id=None):
def _reduction_a_cell(ip, p, filters, block_id=None): def _reduction_a_cell(ip, p, filters, block_id=None):
"""Adds a Reduction cell for NASNet-A (Fig. 4 in the paper). """Adds a Reduction cell for NASNet-A (Fig. 4 in the paper).
Arguments: Args:
ip: Input tensor `x` ip: Input tensor `x`
p: Input tensor `p` p: Input tensor `p`
filters: Number of output filters filters: Number of output filters

View File

@ -79,7 +79,7 @@ def ResNet(stack_fn,
Note that the data format convention used by the model is Note that the data format convention used by the model is
the one specified in your Keras config at `~/.keras/keras.json`. the one specified in your Keras config at `~/.keras/keras.json`.
Arguments: Args:
stack_fn: a function that returns output tensor for the stack_fn: a function that returns output tensor for the
stacked residual blocks. stacked residual blocks.
preact: whether to use pre-activation or not preact: whether to use pre-activation or not
@ -226,7 +226,7 @@ def ResNet(stack_fn,
def block1(x, filters, kernel_size=3, stride=1, conv_shortcut=True, name=None): def block1(x, filters, kernel_size=3, stride=1, conv_shortcut=True, name=None):
"""A residual block. """A residual block.
Arguments: Args:
x: input tensor. x: input tensor.
filters: integer, filters of the bottleneck layer. filters: integer, filters of the bottleneck layer.
kernel_size: default 3, kernel size of the bottleneck layer. kernel_size: default 3, kernel size of the bottleneck layer.
@ -271,7 +271,7 @@ def block1(x, filters, kernel_size=3, stride=1, conv_shortcut=True, name=None):
def stack1(x, filters, blocks, stride1=2, name=None): def stack1(x, filters, blocks, stride1=2, name=None):
"""A set of stacked residual blocks. """A set of stacked residual blocks.
Arguments: Args:
x: input tensor. x: input tensor.
filters: integer, filters of the bottleneck layer in a block. filters: integer, filters of the bottleneck layer in a block.
blocks: integer, blocks in the stacked blocks. blocks: integer, blocks in the stacked blocks.
@ -290,7 +290,7 @@ def stack1(x, filters, blocks, stride1=2, name=None):
def block2(x, filters, kernel_size=3, stride=1, conv_shortcut=False, name=None): def block2(x, filters, kernel_size=3, stride=1, conv_shortcut=False, name=None):
"""A residual block. """A residual block.
Arguments: Args:
x: input tensor. x: input tensor.
filters: integer, filters of the bottleneck layer. filters: integer, filters of the bottleneck layer.
kernel_size: default 3, kernel size of the bottleneck layer. kernel_size: default 3, kernel size of the bottleneck layer.
@ -339,7 +339,7 @@ def block2(x, filters, kernel_size=3, stride=1, conv_shortcut=False, name=None):
def stack2(x, filters, blocks, stride1=2, name=None): def stack2(x, filters, blocks, stride1=2, name=None):
"""A set of stacked residual blocks. """A set of stacked residual blocks.
Arguments: Args:
x: input tensor. x: input tensor.
filters: integer, filters of the bottleneck layer in a block. filters: integer, filters of the bottleneck layer in a block.
blocks: integer, blocks in the stacked blocks. blocks: integer, blocks in the stacked blocks.
@ -365,7 +365,7 @@ def block3(x,
name=None): name=None):
"""A residual block. """A residual block.
Arguments: Args:
x: input tensor. x: input tensor.
filters: integer, filters of the bottleneck layer. filters: integer, filters of the bottleneck layer.
kernel_size: default 3, kernel size of the bottleneck layer. kernel_size: default 3, kernel size of the bottleneck layer.
@ -428,7 +428,7 @@ def block3(x,
def stack3(x, filters, blocks, stride1=2, groups=32, name=None): def stack3(x, filters, blocks, stride1=2, groups=32, name=None):
"""A set of stacked residual blocks. """A set of stacked residual blocks.
Arguments: Args:
x: input tensor. x: input tensor.
filters: integer, filters of the bottleneck layer in a block. filters: integer, filters of the bottleneck layer in a block.
blocks: integer, blocks in the stacked blocks. blocks: integer, blocks in the stacked blocks.
@ -547,7 +547,7 @@ DOC = """
For ResNet, call `tf.keras.applications.resnet.preprocess_input` on your For ResNet, call `tf.keras.applications.resnet.preprocess_input` on your
inputs before passing them to the model. inputs before passing them to the model.
Arguments: Args:
include_top: whether to include the fully-connected include_top: whether to include the fully-connected
layer at the top of the network. layer at the top of the network.
weights: one of `None` (random initialization), weights: one of `None` (random initialization),

View File

@ -152,7 +152,7 @@ DOC = """
For ResNetV2, call `tf.keras.applications.resnet_v2.preprocess_input` on your For ResNetV2, call `tf.keras.applications.resnet_v2.preprocess_input` on your
inputs before passing them to the model. inputs before passing them to the model.
Arguments: Args:
include_top: whether to include the fully-connected include_top: whether to include the fully-connected
layer at the top of the network. layer at the top of the network.
weights: one of `None` (random initialization), weights: one of `None` (random initialization),

View File

@ -70,7 +70,7 @@ def VGG16(
For VGG16, call `tf.keras.applications.vgg16.preprocess_input` on your For VGG16, call `tf.keras.applications.vgg16.preprocess_input` on your
inputs before passing them to the model. inputs before passing them to the model.
Arguments: Args:
include_top: whether to include the 3 fully-connected include_top: whether to include the 3 fully-connected
layers at the top of the network. layers at the top of the network.
weights: one of `None` (random initialization), weights: one of `None` (random initialization),

View File

@ -70,7 +70,7 @@ def VGG19(
For VGG19, call `tf.keras.applications.vgg19.preprocess_input` on your For VGG19, call `tf.keras.applications.vgg19.preprocess_input` on your
inputs before passing them to the model. inputs before passing them to the model.
Arguments: Args:
include_top: whether to include the 3 fully-connected include_top: whether to include the 3 fully-connected
layers at the top of the network. layers at the top of the network.
weights: one of `None` (random initialization), weights: one of `None` (random initialization),

View File

@ -72,7 +72,7 @@ def Xception(
For Xception, call `tf.keras.applications.xception.preprocess_input` on your For Xception, call `tf.keras.applications.xception.preprocess_input` on your
inputs before passing them to the model. inputs before passing them to the model.
Arguments: Args:
include_top: whether to include the fully-connected include_top: whether to include the fully-connected
layer at the top of the network. layer at the top of the network.
weights: one of `None` (random initialization), weights: one of `None` (random initialization),

File diff suppressed because it is too large Load Diff

View File

@ -49,7 +49,7 @@ def epsilon():
def set_epsilon(value): def set_epsilon(value):
"""Sets the value of the fuzz factor used in numeric expressions. """Sets the value of the fuzz factor used in numeric expressions.
Arguments: Args:
value: float. New value of epsilon. value: float. New value of epsilon.
Example: Example:
@ -91,7 +91,7 @@ def set_floatx(value):
[mixed precision guide]( [mixed precision guide](
https://www.tensorflow.org/guide/keras/mixed_precision) for details. https://www.tensorflow.org/guide/keras/mixed_precision) for details.
Arguments: Args:
value: String; `'float16'`, `'float32'`, or `'float64'`. value: String; `'float16'`, `'float32'`, or `'float64'`.
Example: Example:
@ -130,7 +130,7 @@ def image_data_format():
def set_image_data_format(data_format): def set_image_data_format(data_format):
"""Sets the value of the image data format convention. """Sets the value of the image data format convention.
Arguments: Args:
data_format: string. `'channels_first'` or `'channels_last'`. data_format: string. `'channels_first'` or `'channels_last'`.
Example: Example:

View File

@ -33,7 +33,7 @@ def get_benchmark_name(name):
This is to generate the metadata of the benchmark test. This is to generate the metadata of the benchmark test.
Arguments: Args:
name: A string, the benchmark name. name: A string, the benchmark name.
Returns: Returns:
@ -47,7 +47,7 @@ def get_benchmark_name(name):
def generate_benchmark_params_cpu_gpu(*params_list): def generate_benchmark_params_cpu_gpu(*params_list):
"""Extend the benchmark names with CPU and GPU suffix. """Extend the benchmark names with CPU and GPU suffix.
Arguments: Args:
*params_list: A list of tuples represents the benchmark parameters. *params_list: A list of tuples represents the benchmark parameters.
Returns: Returns:
@ -99,7 +99,7 @@ def measure_performance(model_fn,
distribution_strategy='off'): distribution_strategy='off'):
"""Run models and measure the performance. """Run models and measure the performance.
Arguments: Args:
model_fn: Model function to be benchmarked. model_fn: Model function to be benchmarked.
x: Input data. See `x` in the `fit()` method of `keras.Model`. x: Input data. See `x` in the `fit()` method of `keras.Model`.
y: Target data. See `y` in the `fit()` method of `keras.Model`. y: Target data. See `y` in the `fit()` method of `keras.Model`.

View File

@ -66,7 +66,7 @@ class CustomMnistBenchmark(tf.test.Benchmark):
def train_step(self, inputs, model, loss_fn, optimizer, batch_size): def train_step(self, inputs, model, loss_fn, optimizer, batch_size):
"""Compute loss and optimize model by optimizer. """Compute loss and optimize model by optimizer.
Arguments: Args:
inputs: `tf.data`. inputs: `tf.data`.
model: See `model` in `train_function()` method. model: See `model` in `train_function()` method.
loss_fn: See `loss_fn` in `train_function()` method. loss_fn: See `loss_fn` in `train_function()` method.
@ -89,7 +89,7 @@ class CustomMnistBenchmark(tf.test.Benchmark):
batch_size, distribution_strategy): batch_size, distribution_strategy):
"""Train step in distribution strategy setting. """Train step in distribution strategy setting.
Arguments: Args:
batch_dataset: `tf.data`. batch_dataset: `tf.data`.
model: See `model` in `train_function()` method. model: See `model` in `train_function()` method.
loss_fn: See `loss_fn` in `train_function()` method. loss_fn: See `loss_fn` in `train_function()` method.
@ -125,7 +125,7 @@ class CustomMnistBenchmark(tf.test.Benchmark):
train_step_time. train_step_time.
Arguments: Args:
model: Model function to be benchmarked. model: Model function to be benchmarked.
train_dataset: `tf.data` dataset. Should return a tuple of either (inputs, train_dataset: `tf.data` dataset. Should return a tuple of either (inputs,
targets) or (inputs, targets, sample_weights). targets) or (inputs, targets, sample_weights).
@ -180,7 +180,7 @@ class CustomMnistBenchmark(tf.test.Benchmark):
distribution_strategy=None): distribution_strategy=None):
"""Run models and measure the performance. """Run models and measure the performance.
Arguments: Args:
model_fn: Model function to be benchmarked. model_fn: Model function to be benchmarked.
dataset: `tf.data` dataset. Should return a tuple of either (inputs, dataset: `tf.data` dataset. Should return a tuple of either (inputs,
targets) or (inputs, targets, sample_weights). targets) or (inputs, targets, sample_weights).

View File

@ -58,7 +58,7 @@ class KerasOptimizerBenchmark(
def benchmark_optimizer(self, optimizer, num_iters): def benchmark_optimizer(self, optimizer, num_iters):
"""Optimizer benchmark with Bidirectional LSTM model on IMDB data. """Optimizer benchmark with Bidirectional LSTM model on IMDB data.
Arguments: Args:
optimizer: The optimizer instance to be benchmarked. optimizer: The optimizer instance to be benchmarked.
num_iters: The number of iterations to run for performance measurement. num_iters: The number of iterations to run for performance measurement.
""" """

View File

@ -84,7 +84,7 @@ def configure_callbacks(callbacks,
mode=ModeKeys.TRAIN): mode=ModeKeys.TRAIN):
"""Configures callbacks for use in various training loops. """Configures callbacks for use in various training loops.
Arguments: Args:
callbacks: List of Callbacks. callbacks: List of Callbacks.
model: Model being trained. model: Model being trained.
do_validation: Whether or not validation loop will be run. do_validation: Whether or not validation loop will be run.
@ -145,7 +145,7 @@ def set_callback_parameters(callback_list,
mode=ModeKeys.TRAIN): mode=ModeKeys.TRAIN):
"""Sets callback parameters. """Sets callback parameters.
Arguments: Args:
callback_list: CallbackList instance. callback_list: CallbackList instance.
model: Model being trained. model: Model being trained.
do_validation: Whether or not validation loop will be run. do_validation: Whether or not validation loop will be run.
@ -215,7 +215,7 @@ class CallbackList(object):
to call them all at once via a single endpoint to call them all at once via a single endpoint
(e.g. `callback_list.on_epoch_end(...)`). (e.g. `callback_list.on_epoch_end(...)`).
Arguments: Args:
callbacks: List of `Callback` instances. callbacks: List of `Callback` instances.
add_history: Whether a `History` callback should be added, if one does not add_history: Whether a `History` callback should be added, if one does not
already exist in the `callbacks` list. already exist in the `callbacks` list.
@ -396,7 +396,7 @@ class CallbackList(object):
This function should only be called during TRAIN mode. This function should only be called during TRAIN mode.
Arguments: Args:
epoch: Integer, index of epoch. epoch: Integer, index of epoch.
logs: Dict. Currently no data is passed to this argument for this method logs: Dict. Currently no data is passed to this argument for this method
but that may change in the future. but that may change in the future.
@ -416,7 +416,7 @@ class CallbackList(object):
This function should only be called during TRAIN mode. This function should only be called during TRAIN mode.
Arguments: Args:
epoch: Integer, index of epoch. epoch: Integer, index of epoch.
logs: Dict, metric results for this training epoch, and for the logs: Dict, metric results for this training epoch, and for the
validation epoch if validation is performed. Validation result keys validation epoch if validation is performed. Validation result keys
@ -435,7 +435,7 @@ class CallbackList(object):
def on_train_batch_begin(self, batch, logs=None): def on_train_batch_begin(self, batch, logs=None):
"""Calls the `on_train_batch_begin` methods of its callbacks. """Calls the `on_train_batch_begin` methods of its callbacks.
Arguments: Args:
batch: Integer, index of batch within the current epoch. batch: Integer, index of batch within the current epoch.
logs: Dict, contains the return value of `model.train_step`. Typically, logs: Dict, contains the return value of `model.train_step`. Typically,
the values of the `Model`'s metrics are returned. Example: the values of the `Model`'s metrics are returned. Example:
@ -447,7 +447,7 @@ class CallbackList(object):
def on_train_batch_end(self, batch, logs=None): def on_train_batch_end(self, batch, logs=None):
"""Calls the `on_train_batch_end` methods of its callbacks. """Calls the `on_train_batch_end` methods of its callbacks.
Arguments: Args:
batch: Integer, index of batch within the current epoch. batch: Integer, index of batch within the current epoch.
logs: Dict. Aggregated metric results up until this batch. logs: Dict. Aggregated metric results up until this batch.
""" """
@ -457,7 +457,7 @@ class CallbackList(object):
def on_test_batch_begin(self, batch, logs=None): def on_test_batch_begin(self, batch, logs=None):
"""Calls the `on_test_batch_begin` methods of its callbacks. """Calls the `on_test_batch_begin` methods of its callbacks.
Arguments: Args:
batch: Integer, index of batch within the current epoch. batch: Integer, index of batch within the current epoch.
logs: Dict, contains the return value of `model.test_step`. Typically, logs: Dict, contains the return value of `model.test_step`. Typically,
the values of the `Model`'s metrics are returned. Example: the values of the `Model`'s metrics are returned. Example:
@ -469,7 +469,7 @@ class CallbackList(object):
def on_test_batch_end(self, batch, logs=None): def on_test_batch_end(self, batch, logs=None):
"""Calls the `on_test_batch_end` methods of its callbacks. """Calls the `on_test_batch_end` methods of its callbacks.
Arguments: Args:
batch: Integer, index of batch within the current epoch. batch: Integer, index of batch within the current epoch.
logs: Dict. Aggregated metric results up until this batch. logs: Dict. Aggregated metric results up until this batch.
""" """
@ -479,7 +479,7 @@ class CallbackList(object):
def on_predict_batch_begin(self, batch, logs=None): def on_predict_batch_begin(self, batch, logs=None):
"""Calls the `on_predict_batch_begin` methods of its callbacks. """Calls the `on_predict_batch_begin` methods of its callbacks.
Arguments: Args:
batch: Integer, index of batch within the current epoch. batch: Integer, index of batch within the current epoch.
logs: Dict, contains the return value of `model.predict_step`, logs: Dict, contains the return value of `model.predict_step`,
it typically returns a dict with a key 'outputs' containing it typically returns a dict with a key 'outputs' containing
@ -491,7 +491,7 @@ class CallbackList(object):
def on_predict_batch_end(self, batch, logs=None): def on_predict_batch_end(self, batch, logs=None):
"""Calls the `on_predict_batch_end` methods of its callbacks. """Calls the `on_predict_batch_end` methods of its callbacks.
Arguments: Args:
batch: Integer, index of batch within the current epoch. batch: Integer, index of batch within the current epoch.
logs: Dict. Aggregated metric results up until this batch. logs: Dict. Aggregated metric results up until this batch.
""" """
@ -501,7 +501,7 @@ class CallbackList(object):
def on_train_begin(self, logs=None): def on_train_begin(self, logs=None):
"""Calls the `on_train_begin` methods of its callbacks. """Calls the `on_train_begin` methods of its callbacks.
Arguments: Args:
logs: Dict. Currently no data is passed to this argument for this method logs: Dict. Currently no data is passed to this argument for this method
but that may change in the future. but that may change in the future.
""" """
@ -518,7 +518,7 @@ class CallbackList(object):
def on_train_end(self, logs=None): def on_train_end(self, logs=None):
"""Calls the `on_train_end` methods of its callbacks. """Calls the `on_train_end` methods of its callbacks.
Arguments: Args:
logs: Dict. Currently no data is passed to this argument for this method logs: Dict. Currently no data is passed to this argument for this method
but that may change in the future. but that may change in the future.
""" """
@ -535,7 +535,7 @@ class CallbackList(object):
def on_test_begin(self, logs=None): def on_test_begin(self, logs=None):
"""Calls the `on_test_begin` methods of its callbacks. """Calls the `on_test_begin` methods of its callbacks.
Arguments: Args:
logs: Dict. Currently no data is passed to this argument for this method logs: Dict. Currently no data is passed to this argument for this method
but that may change in the future. but that may change in the future.
""" """
@ -552,7 +552,7 @@ class CallbackList(object):
def on_test_end(self, logs=None): def on_test_end(self, logs=None):
"""Calls the `on_test_end` methods of its callbacks. """Calls the `on_test_end` methods of its callbacks.
Arguments: Args:
logs: Dict. Currently no data is passed to this argument for this method logs: Dict. Currently no data is passed to this argument for this method
but that may change in the future. but that may change in the future.
""" """
@ -569,7 +569,7 @@ class CallbackList(object):
def on_predict_begin(self, logs=None): def on_predict_begin(self, logs=None):
"""Calls the 'on_predict_begin` methods of its callbacks. """Calls the 'on_predict_begin` methods of its callbacks.
Arguments: Args:
logs: Dict. Currently no data is passed to this argument for this method logs: Dict. Currently no data is passed to this argument for this method
but that may change in the future. but that may change in the future.
""" """
@ -586,7 +586,7 @@ class CallbackList(object):
def on_predict_end(self, logs=None): def on_predict_end(self, logs=None):
"""Calls the `on_predict_end` methods of its callbacks. """Calls the `on_predict_end` methods of its callbacks.
Arguments: Args:
logs: Dict. Currently no data is passed to this argument for this method logs: Dict. Currently no data is passed to this argument for this method
but that may change in the future. but that may change in the future.
""" """
@ -651,7 +651,7 @@ class Callback(object):
Subclasses should override for any actions to run. This function should only Subclasses should override for any actions to run. This function should only
be called during TRAIN mode. be called during TRAIN mode.
Arguments: Args:
epoch: Integer, index of epoch. epoch: Integer, index of epoch.
logs: Dict. Currently no data is passed to this argument for this method logs: Dict. Currently no data is passed to this argument for this method
but that may change in the future. but that may change in the future.
@ -664,7 +664,7 @@ class Callback(object):
Subclasses should override for any actions to run. This function should only Subclasses should override for any actions to run. This function should only
be called during TRAIN mode. be called during TRAIN mode.
Arguments: Args:
epoch: Integer, index of epoch. epoch: Integer, index of epoch.
logs: Dict, metric results for this training epoch, and for the logs: Dict, metric results for this training epoch, and for the
validation epoch if validation is performed. Validation result keys validation epoch if validation is performed. Validation result keys
@ -683,7 +683,7 @@ class Callback(object):
`tf.keras.Model` is set to `N`, this method will only be called every `N` `tf.keras.Model` is set to `N`, this method will only be called every `N`
batches. batches.
Arguments: Args:
batch: Integer, index of batch within the current epoch. batch: Integer, index of batch within the current epoch.
logs: Dict, contains the return value of `model.train_step`. Typically, logs: Dict, contains the return value of `model.train_step`. Typically,
the values of the `Model`'s metrics are returned. Example: the values of the `Model`'s metrics are returned. Example:
@ -703,7 +703,7 @@ class Callback(object):
`tf.keras.Model` is set to `N`, this method will only be called every `N` `tf.keras.Model` is set to `N`, this method will only be called every `N`
batches. batches.
Arguments: Args:
batch: Integer, index of batch within the current epoch. batch: Integer, index of batch within the current epoch.
logs: Dict. Aggregated metric results up until this batch. logs: Dict. Aggregated metric results up until this batch.
""" """
@ -724,7 +724,7 @@ class Callback(object):
`tf.keras.Model` is set to `N`, this method will only be called every `N` `tf.keras.Model` is set to `N`, this method will only be called every `N`
batches. batches.
Arguments: Args:
batch: Integer, index of batch within the current epoch. batch: Integer, index of batch within the current epoch.
logs: Dict, contains the return value of `model.test_step`. Typically, logs: Dict, contains the return value of `model.test_step`. Typically,
the values of the `Model`'s metrics are returned. Example: the values of the `Model`'s metrics are returned. Example:
@ -745,7 +745,7 @@ class Callback(object):
`tf.keras.Model` is set to `N`, this method will only be called every `N` `tf.keras.Model` is set to `N`, this method will only be called every `N`
batches. batches.
Arguments: Args:
batch: Integer, index of batch within the current epoch. batch: Integer, index of batch within the current epoch.
logs: Dict. Aggregated metric results up until this batch. logs: Dict. Aggregated metric results up until this batch.
""" """
@ -761,7 +761,7 @@ class Callback(object):
`tf.keras.Model` is set to `N`, this method will only be called every `N` `tf.keras.Model` is set to `N`, this method will only be called every `N`
batches. batches.
Arguments: Args:
batch: Integer, index of batch within the current epoch. batch: Integer, index of batch within the current epoch.
logs: Dict, contains the return value of `model.predict_step`, logs: Dict, contains the return value of `model.predict_step`,
it typically returns a dict with a key 'outputs' containing it typically returns a dict with a key 'outputs' containing
@ -779,7 +779,7 @@ class Callback(object):
`tf.keras.Model` is set to `N`, this method will only be called every `N` `tf.keras.Model` is set to `N`, this method will only be called every `N`
batches. batches.
Arguments: Args:
batch: Integer, index of batch within the current epoch. batch: Integer, index of batch within the current epoch.
logs: Dict. Aggregated metric results up until this batch. logs: Dict. Aggregated metric results up until this batch.
""" """
@ -790,7 +790,7 @@ class Callback(object):
Subclasses should override for any actions to run. Subclasses should override for any actions to run.
Arguments: Args:
logs: Dict. Currently no data is passed to this argument for this method logs: Dict. Currently no data is passed to this argument for this method
but that may change in the future. but that may change in the future.
""" """
@ -801,7 +801,7 @@ class Callback(object):
Subclasses should override for any actions to run. Subclasses should override for any actions to run.
Arguments: Args:
logs: Dict. Currently the output of the last call to `on_epoch_end()` logs: Dict. Currently the output of the last call to `on_epoch_end()`
is passed to this argument for this method but that may change in is passed to this argument for this method but that may change in
the future. the future.
@ -813,7 +813,7 @@ class Callback(object):
Subclasses should override for any actions to run. Subclasses should override for any actions to run.
Arguments: Args:
logs: Dict. Currently no data is passed to this argument for this method logs: Dict. Currently no data is passed to this argument for this method
but that may change in the future. but that may change in the future.
""" """
@ -824,7 +824,7 @@ class Callback(object):
Subclasses should override for any actions to run. Subclasses should override for any actions to run.
Arguments: Args:
logs: Dict. Currently the output of the last call to logs: Dict. Currently the output of the last call to
`on_test_batch_end()` is passed to this argument for this method `on_test_batch_end()` is passed to this argument for this method
but that may change in the future. but that may change in the future.
@ -836,7 +836,7 @@ class Callback(object):
Subclasses should override for any actions to run. Subclasses should override for any actions to run.
Arguments: Args:
logs: Dict. Currently no data is passed to this argument for this method logs: Dict. Currently no data is passed to this argument for this method
but that may change in the future. but that may change in the future.
""" """
@ -847,7 +847,7 @@ class Callback(object):
Subclasses should override for any actions to run. Subclasses should override for any actions to run.
Arguments: Args:
logs: Dict. Currently no data is passed to this argument for this method logs: Dict. Currently no data is passed to this argument for this method
but that may change in the future. but that may change in the future.
""" """
@ -876,7 +876,7 @@ class BaseLogger(Callback):
This callback is automatically applied to every Keras model. This callback is automatically applied to every Keras model.
Arguments: Args:
stateful_metrics: Iterable of string names of metrics that stateful_metrics: Iterable of string names of metrics that
should *not* be averaged over an epoch. should *not* be averaged over an epoch.
Metrics in this list will be logged as-is in `on_epoch_end`. Metrics in this list will be logged as-is in `on_epoch_end`.
@ -942,7 +942,7 @@ class TerminateOnNaN(Callback):
class ProgbarLogger(Callback): class ProgbarLogger(Callback):
"""Callback that prints metrics to stdout. """Callback that prints metrics to stdout.
Arguments: Args:
count_mode: One of `"steps"` or `"samples"`. count_mode: One of `"steps"` or `"samples"`.
Whether the progress bar should Whether the progress bar should
count samples seen or steps (batches) seen. count samples seen or steps (batches) seen.
@ -1171,7 +1171,7 @@ class ModelCheckpoint(Callback):
model.load_weights(checkpoint_filepath) model.load_weights(checkpoint_filepath)
``` ```
Arguments: Args:
filepath: string or `PathLike`, path to save the model file. e.g. filepath: string or `PathLike`, path to save the model file. e.g.
filepath = os.path.join(working_dir, 'ckpt', file_name). `filepath` filepath = os.path.join(working_dir, 'ckpt', file_name). `filepath`
can contain named formatting options, which will be filled the value of can contain named formatting options, which will be filled the value of
@ -1366,7 +1366,7 @@ class ModelCheckpoint(Callback):
def _save_model(self, epoch, logs): def _save_model(self, epoch, logs):
"""Saves the model. """Saves the model.
Arguments: Args:
epoch: the epoch this iteration is in. epoch: the epoch this iteration is in.
logs: the `logs` dict passed in to `on_batch_end` or `on_epoch_end`. logs: the `logs` dict passed in to `on_batch_end` or `on_epoch_end`.
""" """
@ -1487,7 +1487,7 @@ class ModelCheckpoint(Callback):
file_paths[-1]) file_paths[-1])
``` ```
Arguments: Args:
pattern: The file pattern that may optionally contain python placeholder pattern: The file pattern that may optionally contain python placeholder
such as `{epoch:02d}`. such as `{epoch:02d}`.
@ -1591,7 +1591,7 @@ class BackupAndRestore(Callback):
>>> len(history.history['loss']) >>> len(history.history['loss'])
6 6
Arguments: Args:
backup_dir: String, path to store the checkpoint. backup_dir: String, path to store the checkpoint.
e.g. backup_dir = os.path.join(working_dir, 'backup') e.g. backup_dir = os.path.join(working_dir, 'backup')
This is the directory in which the system stores temporary files to This is the directory in which the system stores temporary files to
@ -1674,7 +1674,7 @@ class EarlyStopping(Callback):
The quantity to be monitored needs to be available in `logs` dict. The quantity to be monitored needs to be available in `logs` dict.
To make it so, pass the loss or metrics at `model.compile()`. To make it so, pass the loss or metrics at `model.compile()`.
Arguments: Args:
monitor: Quantity to be monitored. monitor: Quantity to be monitored.
min_delta: Minimum change in the monitored quantity min_delta: Minimum change in the monitored quantity
to qualify as an improvement, i.e. an absolute to qualify as an improvement, i.e. an absolute
@ -1807,7 +1807,7 @@ class RemoteMonitor(Callback):
`"application/json"`. `"application/json"`.
Otherwise the serialized JSON will be sent within a form. Otherwise the serialized JSON will be sent within a form.
Arguments: Args:
root: String; root url of the target server. root: String; root url of the target server.
path: String; path relative to `root` to which the events will be sent. path: String; path relative to `root` to which the events will be sent.
field: String; JSON field under which the data will be stored. field: String; JSON field under which the data will be stored.
@ -1867,7 +1867,7 @@ class LearningRateScheduler(Callback):
and current learning rate, and applies the updated learning rate and current learning rate, and applies the updated learning rate
on the optimizer. on the optimizer.
Arguments: Args:
schedule: a function that takes an epoch index (integer, indexed from 0) schedule: a function that takes an epoch index (integer, indexed from 0)
and current learning rate (float) as inputs and returns a new and current learning rate (float) as inputs and returns a new
learning rate as output (float). learning rate as output (float).
@ -1993,7 +1993,7 @@ class TensorBoard(Callback, version_utils.TensorBoardVersionSelector):
You can find more information about TensorBoard You can find more information about TensorBoard
[here](https://www.tensorflow.org/get_started/summaries_and_tensorboard). [here](https://www.tensorflow.org/get_started/summaries_and_tensorboard).
Arguments: Args:
log_dir: the path of the directory where to save the log files to be log_dir: the path of the directory where to save the log files to be
parsed by TensorBoard. e.g. log_dir = os.path.join(working_dir, 'logs') parsed by TensorBoard. e.g. log_dir = os.path.join(working_dir, 'logs')
This directory should not be reused by any other callbacks. This directory should not be reused by any other callbacks.
@ -2299,7 +2299,7 @@ class TensorBoard(Callback, version_utils.TensorBoardVersionSelector):
def _init_profile_batch(self, profile_batch): def _init_profile_batch(self, profile_batch):
"""Validate profile_batch value and set the range of batches to profile. """Validate profile_batch value and set the range of batches to profile.
Arguments: Args:
profile_batch: The range of batches to profile. Should be a non-negative profile_batch: The range of batches to profile. Should be a non-negative
integer or a comma separated string of pair of positive integers. A pair integer or a comma separated string of pair of positive integers. A pair
of positive integers signify a range of batches to profile. of positive integers signify a range of batches to profile.
@ -2440,7 +2440,7 @@ class TensorBoard(Callback, version_utils.TensorBoardVersionSelector):
def _log_epoch_metrics(self, epoch, logs): def _log_epoch_metrics(self, epoch, logs):
"""Writes epoch metrics out as scalar summaries. """Writes epoch metrics out as scalar summaries.
Arguments: Args:
epoch: Int. The global step to use for TensorBoard. epoch: Int. The global step to use for TensorBoard.
logs: Dict. Keys are scalar summary names, values are scalars. logs: Dict. Keys are scalar summary names, values are scalars.
""" """
@ -2523,7 +2523,7 @@ class ReduceLROnPlateau(Callback):
model.fit(X_train, Y_train, callbacks=[reduce_lr]) model.fit(X_train, Y_train, callbacks=[reduce_lr])
``` ```
Arguments: Args:
monitor: quantity to be monitored. monitor: quantity to be monitored.
factor: factor by which the learning rate will be reduced. factor: factor by which the learning rate will be reduced.
`new_lr = lr * factor`. `new_lr = lr * factor`.
@ -2644,7 +2644,7 @@ class CSVLogger(Callback):
model.fit(X_train, Y_train, callbacks=[csv_logger]) model.fit(X_train, Y_train, callbacks=[csv_logger])
``` ```
Arguments: Args:
filename: Filename of the CSV file, e.g. `'run/log.csv'`. filename: Filename of the CSV file, e.g. `'run/log.csv'`.
separator: String used to separate elements in the CSV file. separator: String used to separate elements in the CSV file.
append: Boolean. True: append if file exists (useful for continuing append: Boolean. True: append if file exists (useful for continuing
@ -2736,7 +2736,7 @@ class LambdaCallback(Callback):
- `on_train_begin` and `on_train_end` expect one positional argument: - `on_train_begin` and `on_train_end` expect one positional argument:
`logs` `logs`
Arguments: Args:
on_epoch_begin: called at the beginning of every epoch. on_epoch_begin: called at the beginning of every epoch.
on_epoch_end: called at the end of every epoch. on_epoch_end: called at the end of every epoch.
on_batch_begin: called at the beginning of every batch. on_batch_begin: called at the beginning of every batch.

View File

@ -61,7 +61,7 @@ class TensorBoard(callbacks.TensorBoard):
You can find more information about TensorBoard You can find more information about TensorBoard
[here](https://www.tensorflow.org/get_started/summaries_and_tensorboard). [here](https://www.tensorflow.org/get_started/summaries_and_tensorboard).
Arguments: Args:
log_dir: the path of the directory where to save the log files to be log_dir: the path of the directory where to save the log files to be
parsed by TensorBoard. parsed by TensorBoard.
histogram_freq: frequency (in epochs) at which to compute activation and histogram_freq: frequency (in epochs) at which to compute activation and
@ -318,7 +318,7 @@ class TensorBoard(callbacks.TensorBoard):
def _write_custom_summaries(self, step, logs=None): def _write_custom_summaries(self, step, logs=None):
"""Writes metrics out as custom scalar summaries. """Writes metrics out as custom scalar summaries.
Arguments: Args:
step: the global step to use for TensorBoard. step: the global step to use for TensorBoard.
logs: dict. Keys are scalar summary names, values are logs: dict. Keys are scalar summary names, values are
NumPy scalars. NumPy scalars.

View File

@ -51,7 +51,7 @@ class MaxNorm(Constraint):
Also available via the shortcut function `tf.keras.constraints.max_norm`. Also available via the shortcut function `tf.keras.constraints.max_norm`.
Arguments: Args:
max_value: the maximum norm value for the incoming weights. max_value: the maximum norm value for the incoming weights.
axis: integer, axis along which to calculate weight norms. axis: integer, axis along which to calculate weight norms.
For instance, in a `Dense` layer the weight matrix For instance, in a `Dense` layer the weight matrix
@ -100,7 +100,7 @@ class UnitNorm(Constraint):
Also available via the shortcut function `tf.keras.constraints.unit_norm`. Also available via the shortcut function `tf.keras.constraints.unit_norm`.
Arguments: Args:
axis: integer, axis along which to calculate weight norms. axis: integer, axis along which to calculate weight norms.
For instance, in a `Dense` layer the weight matrix For instance, in a `Dense` layer the weight matrix
has shape `(input_dim, output_dim)`, has shape `(input_dim, output_dim)`,
@ -138,7 +138,7 @@ class MinMaxNorm(Constraint):
Also available via the shortcut function `tf.keras.constraints.min_max_norm`. Also available via the shortcut function `tf.keras.constraints.min_max_norm`.
Arguments: Args:
min_value: the minimum norm for the incoming weights. min_value: the minimum norm for the incoming weights.
max_value: the maximum norm for the incoming weights. max_value: the maximum norm for the incoming weights.
rate: rate for enforcing the constraint: weights will be rate: rate for enforcing the constraint: weights will be

View File

@ -38,7 +38,7 @@ def load_data(path='boston_housing.npz', test_split=0.2, seed=113):
The attributes themselves are defined in the The attributes themselves are defined in the
[StatLib website](http://lib.stat.cmu.edu/datasets/boston). [StatLib website](http://lib.stat.cmu.edu/datasets/boston).
Arguments: Args:
path: path where to cache the dataset locally path: path where to cache the dataset locally
(relative to `~/.keras/datasets`). (relative to `~/.keras/datasets`).
test_split: fraction of the data to reserve as test set. test_split: fraction of the data to reserve as test set.

View File

@ -26,7 +26,7 @@ from six.moves import cPickle
def load_batch(fpath, label_key='labels'): def load_batch(fpath, label_key='labels'):
"""Internal utility for parsing CIFAR data. """Internal utility for parsing CIFAR data.
Arguments: Args:
fpath: path the file to parse. fpath: path the file to parse.
label_key: key for label data in the retrieve label_key: key for label data in the retrieve
dictionary. dictionary.

View File

@ -37,7 +37,7 @@ def load_data(label_mode='fine'):
grouped into 20 coarse-grained classes. See more info at the grouped into 20 coarse-grained classes. See more info at the
[CIFAR homepage](https://www.cs.toronto.edu/~kriz/cifar.html). [CIFAR homepage](https://www.cs.toronto.edu/~kriz/cifar.html).
Arguments: Args:
label_mode: one of "fine", "coarse". If it is "fine" the category labels label_mode: one of "fine", "coarse". If it is "fine" the category labels
are the fine-grained labels, if it is "coarse" the output labels are the are the fine-grained labels, if it is "coarse" the output labels are the
coarse-grained superclasses. coarse-grained superclasses.

View File

@ -52,7 +52,7 @@ def load_data(path='imdb.npz',
As a convention, "0" does not stand for a specific word, but instead is used As a convention, "0" does not stand for a specific word, but instead is used
to encode any unknown word. to encode any unknown word.
Arguments: Args:
path: where to cache the data (relative to `~/.keras/dataset`). path: where to cache the data (relative to `~/.keras/dataset`).
num_words: integer or None. Words are num_words: integer or None. Words are
ranked by how often they occur (in the training set) and only ranked by how often they occur (in the training set) and only
@ -166,7 +166,7 @@ def load_data(path='imdb.npz',
def get_word_index(path='imdb_word_index.json'): def get_word_index(path='imdb_word_index.json'):
"""Retrieves a dict mapping words to their index in the IMDB dataset. """Retrieves a dict mapping words to their index in the IMDB dataset.
Arguments: Args:
path: where to cache the data (relative to `~/.keras/dataset`). path: where to cache the data (relative to `~/.keras/dataset`).
Returns: Returns:

View File

@ -34,7 +34,7 @@ def load_data(path='mnist.npz'):
[MNIST homepage](http://yann.lecun.com/exdb/mnist/). [MNIST homepage](http://yann.lecun.com/exdb/mnist/).
Arguments: Args:
path: path where to cache the dataset locally path: path where to cache the dataset locally
(relative to `~/.keras/datasets`). (relative to `~/.keras/datasets`).

View File

@ -60,7 +60,7 @@ def load_data(path='reuters.npz',
to encode any unknown word. to encode any unknown word.
Arguments: Args:
path: where to cache the data (relative to `~/.keras/dataset`). path: where to cache the data (relative to `~/.keras/dataset`).
num_words: integer or None. Words are num_words: integer or None. Words are
ranked by how often they occur (in the training set) and only ranked by how often they occur (in the training set) and only
@ -155,7 +155,7 @@ def load_data(path='reuters.npz',
def get_word_index(path='reuters_word_index.json'): def get_word_index(path='reuters_word_index.json'):
"""Retrieves a dict mapping words to their index in the Reuters dataset. """Retrieves a dict mapping words to their index in the Reuters dataset.
Arguments: Args:
path: where to cache the data (relative to `~/.keras/dataset`). path: where to cache the data (relative to `~/.keras/dataset`).
Returns: Returns:

View File

@ -41,7 +41,7 @@ def call_replica_local_fn(fn, *args, **kwargs):
This function correctly handles calling `fn` in a cross-replica This function correctly handles calling `fn` in a cross-replica
context. context.
Arguments: Args:
fn: The function to call. fn: The function to call.
*args: Positional arguments to the `fn`. *args: Positional arguments to the `fn`.
**kwargs: Keyword argument to `fn`. **kwargs: Keyword argument to `fn`.

View File

@ -611,7 +611,7 @@ def _get_input_from_iterator(iterator, model):
def _prepare_feed_values(model, inputs, targets, sample_weights, mode): def _prepare_feed_values(model, inputs, targets, sample_weights, mode):
"""Prepare feed values to the model execution function. """Prepare feed values to the model execution function.
Arguments: Args:
model: Model to prepare feed values for. model: Model to prepare feed values for.
inputs: List or dict of model inputs. inputs: List or dict of model inputs.
targets: Optional list of model targets. targets: Optional list of model targets.
@ -1097,7 +1097,7 @@ def is_current_worker_chief():
def filter_distributed_callbacks(callbacks_list, model): def filter_distributed_callbacks(callbacks_list, model):
"""Filter Callbacks based on the worker context when running multi-worker. """Filter Callbacks based on the worker context when running multi-worker.
Arguments: Args:
callbacks_list: A list of `Callback` instances. callbacks_list: A list of `Callback` instances.
model: Keras model instance. model: Keras model instance.

View File

@ -415,7 +415,7 @@ class TestDistributionStrategyCorrectnessBase(test.TestCase,
We only provide a default implementation of this method here. If you need We only provide a default implementation of this method here. If you need
more customized way of providing input to your model, overwrite this method. more customized way of providing input to your model, overwrite this method.
Arguments: Args:
**kwargs: key word arguments about how to create the input dictionaries **kwargs: key word arguments about how to create the input dictionaries
Returns: Returns:
@ -522,7 +522,7 @@ class TestDistributionStrategyCorrectnessBase(test.TestCase,
We only provide a default implementation of this method here. If you need We only provide a default implementation of this method here. If you need
more customized way of providing input to your model, overwrite this method. more customized way of providing input to your model, overwrite this method.
Arguments: Args:
**kwargs: key word arguments about how to create the input dictionaries **kwargs: key word arguments about how to create the input dictionaries
Returns: Returns:

View File

@ -88,7 +88,7 @@ class WorkerTrainingState(object):
def back_up(self, epoch): def back_up(self, epoch):
"""Back up the current state of training into a checkpoint file. """Back up the current state of training into a checkpoint file.
Arguments: Args:
epoch: The current epoch information to be saved. epoch: The current epoch information to be saved.
""" """
K.set_value(self._ckpt_saved_epoch, epoch) K.set_value(self._ckpt_saved_epoch, epoch)
@ -125,7 +125,7 @@ class WorkerTrainingState(object):
infer `initial_epoch` from `self._ckpt_saved_epoch` to continue previous infer `initial_epoch` from `self._ckpt_saved_epoch` to continue previous
unfinished training from certain epoch. unfinished training from certain epoch.
Arguments: Args:
initial_epoch: The original initial_epoch user passes in in `fit()`. initial_epoch: The original initial_epoch user passes in in `fit()`.
mode: The mode for running `model.fit()`. mode: The mode for running `model.fit()`.

View File

@ -122,7 +122,7 @@ class Layer(module.Module, version_utils.LayerVersionSelector):
Users will just instantiate a layer and then treat it as a callable. Users will just instantiate a layer and then treat it as a callable.
Arguments: Args:
trainable: Boolean, whether the layer's variables should be trainable. trainable: Boolean, whether the layer's variables should be trainable.
name: String name of the layer. name: String name of the layer.
dtype: The dtype of the layer's computations and weights. Can also be a dtype: The dtype of the layer's computations and weights. Can also be a
@ -459,7 +459,7 @@ class Layer(module.Module, version_utils.LayerVersionSelector):
This is typically used to create the weights of `Layer` subclasses. This is typically used to create the weights of `Layer` subclasses.
Arguments: Args:
input_shape: Instance of `TensorShape`, or list of instances of input_shape: Instance of `TensorShape`, or list of instances of
`TensorShape` if the layer expects a list of inputs `TensorShape` if the layer expects a list of inputs
(one instance per input). (one instance per input).
@ -478,7 +478,7 @@ class Layer(module.Module, version_utils.LayerVersionSelector):
layers as additional arguments. Whereas `tf.keras` has `compute_mask()` layers as additional arguments. Whereas `tf.keras` has `compute_mask()`
method to support masking. method to support masking.
Arguments: Args:
inputs: Input tensor, or list/tuple of input tensors. inputs: Input tensor, or list/tuple of input tensors.
**kwargs: Additional keyword arguments. Currently unused. **kwargs: Additional keyword arguments. Currently unused.
@ -491,7 +491,7 @@ class Layer(module.Module, version_utils.LayerVersionSelector):
def _add_trackable(self, trackable_object, trainable): def _add_trackable(self, trackable_object, trainable):
"""Adds a Trackable object to this layer's state. """Adds a Trackable object to this layer's state.
Arguments: Args:
trackable_object: The tf.tracking.Trackable object to add. trackable_object: The tf.tracking.Trackable object to add.
trainable: Boolean, whether the variable should be part of the layer's trainable: Boolean, whether the variable should be part of the layer's
"trainable_variables" (e.g. variables, biases) or "trainable_variables" (e.g. variables, biases) or
@ -522,7 +522,7 @@ class Layer(module.Module, version_utils.LayerVersionSelector):
**kwargs): **kwargs):
"""Adds a new variable to the layer. """Adds a new variable to the layer.
Arguments: Args:
name: Variable name. name: Variable name.
shape: Variable shape. Defaults to scalar if unspecified. shape: Variable shape. Defaults to scalar if unspecified.
dtype: The type of the variable. Defaults to `self.dtype`. dtype: The type of the variable. Defaults to `self.dtype`.
@ -717,7 +717,7 @@ class Layer(module.Module, version_utils.LayerVersionSelector):
dictionary. It does not handle layer connectivity dictionary. It does not handle layer connectivity
(handled by Network), nor weights (handled by `set_weights`). (handled by Network), nor weights (handled by `set_weights`).
Arguments: Args:
config: A Python dictionary, typically the config: A Python dictionary, typically the
output of get_config. output of get_config.
@ -733,7 +733,7 @@ class Layer(module.Module, version_utils.LayerVersionSelector):
layer. This assumes that the layer will later be used with inputs that layer. This assumes that the layer will later be used with inputs that
match the input shape provided here. match the input shape provided here.
Arguments: Args:
input_shape: Shape tuple (tuple of integers) input_shape: Shape tuple (tuple of integers)
or list of shape tuples (one per output tensor of the layer). or list of shape tuples (one per output tensor of the layer).
Shape tuples can include None for free dimensions, Shape tuples can include None for free dimensions,
@ -886,7 +886,7 @@ class Layer(module.Module, version_utils.LayerVersionSelector):
def compute_mask(self, inputs, mask=None): # pylint: disable=unused-argument def compute_mask(self, inputs, mask=None): # pylint: disable=unused-argument
"""Computes an output mask tensor. """Computes an output mask tensor.
Arguments: Args:
inputs: Tensor or list of tensors. inputs: Tensor or list of tensors.
mask: Tensor or list of tensors. mask: Tensor or list of tensors.
@ -907,7 +907,7 @@ class Layer(module.Module, version_utils.LayerVersionSelector):
def __call__(self, *args, **kwargs): def __call__(self, *args, **kwargs):
"""Wraps `call`, applying pre- and post-processing steps. """Wraps `call`, applying pre- and post-processing steps.
Arguments: Args:
*args: Positional arguments to be passed to `self.call`. *args: Positional arguments to be passed to `self.call`.
**kwargs: Keyword arguments to be passed to `self.call`. **kwargs: Keyword arguments to be passed to `self.call`.
@ -1531,7 +1531,7 @@ class Layer(module.Module, version_utils.LayerVersionSelector):
model.add_loss(lambda: tf.reduce_mean(d.kernel)) model.add_loss(lambda: tf.reduce_mean(d.kernel))
``` ```
Arguments: Args:
losses: Loss tensor, or list/tuple of tensors. Rather than tensors, losses losses: Loss tensor, or list/tuple of tensors. Rather than tensors, losses
may also be zero-argument callables which create a loss tensor. may also be zero-argument callables which create a loss tensor.
**kwargs: Additional keyword arguments for backward compatibility. **kwargs: Additional keyword arguments for backward compatibility.
@ -1773,7 +1773,7 @@ class Layer(module.Module, version_utils.LayerVersionSelector):
updates are run on the fly and thus do not need to be tracked for later updates are run on the fly and thus do not need to be tracked for later
execution). execution).
Arguments: Args:
updates: Update op, or list/tuple of update ops, or zero-arg callable updates: Update op, or list/tuple of update ops, or zero-arg callable
that returns an update op. A zero-arg callable should be passed in that returns an update op. A zero-arg callable should be passed in
order to disable running the updates by setting `trainable=False` order to disable running the updates by setting `trainable=False`
@ -1828,7 +1828,7 @@ class Layer(module.Module, version_utils.LayerVersionSelector):
[1.], [1.],
[1.]], dtype=float32), array([0.], dtype=float32)] [1.]], dtype=float32), array([0.], dtype=float32)]
Arguments: Args:
weights: a list of Numpy arrays. The number weights: a list of Numpy arrays. The number
of arrays and their shape must match of arrays and their shape must match
number of the dimensions of the weights number of the dimensions of the weights
@ -1925,7 +1925,7 @@ class Layer(module.Module, version_utils.LayerVersionSelector):
Retrieves updates relevant to a specific set of inputs. Retrieves updates relevant to a specific set of inputs.
Arguments: Args:
inputs: Input tensor or list/tuple of input tensors. inputs: Input tensor or list/tuple of input tensors.
Returns: Returns:
@ -1942,7 +1942,7 @@ class Layer(module.Module, version_utils.LayerVersionSelector):
Retrieves losses relevant to a specific set of inputs. Retrieves losses relevant to a specific set of inputs.
Arguments: Args:
inputs: Input tensor or list/tuple of input tensors. inputs: Input tensor or list/tuple of input tensors.
Returns: Returns:
@ -1957,7 +1957,7 @@ class Layer(module.Module, version_utils.LayerVersionSelector):
def get_input_mask_at(self, node_index): def get_input_mask_at(self, node_index):
"""Retrieves the input mask tensor(s) of a layer at a given node. """Retrieves the input mask tensor(s) of a layer at a given node.
Arguments: Args:
node_index: Integer, index of the node node_index: Integer, index of the node
from which to retrieve the attribute. from which to retrieve the attribute.
E.g. `node_index=0` will correspond to the E.g. `node_index=0` will correspond to the
@ -1977,7 +1977,7 @@ class Layer(module.Module, version_utils.LayerVersionSelector):
def get_output_mask_at(self, node_index): def get_output_mask_at(self, node_index):
"""Retrieves the output mask tensor(s) of a layer at a given node. """Retrieves the output mask tensor(s) of a layer at a given node.
Arguments: Args:
node_index: Integer, index of the node node_index: Integer, index of the node
from which to retrieve the attribute. from which to retrieve the attribute.
E.g. `node_index=0` will correspond to the E.g. `node_index=0` will correspond to the
@ -2041,7 +2041,7 @@ class Layer(module.Module, version_utils.LayerVersionSelector):
def get_input_shape_at(self, node_index): def get_input_shape_at(self, node_index):
"""Retrieves the input shape(s) of a layer at a given node. """Retrieves the input shape(s) of a layer at a given node.
Arguments: Args:
node_index: Integer, index of the node node_index: Integer, index of the node
from which to retrieve the attribute. from which to retrieve the attribute.
E.g. `node_index=0` will correspond to the E.g. `node_index=0` will correspond to the
@ -2061,7 +2061,7 @@ class Layer(module.Module, version_utils.LayerVersionSelector):
def get_output_shape_at(self, node_index): def get_output_shape_at(self, node_index):
"""Retrieves the output shape(s) of a layer at a given node. """Retrieves the output shape(s) of a layer at a given node.
Arguments: Args:
node_index: Integer, index of the node node_index: Integer, index of the node
from which to retrieve the attribute. from which to retrieve the attribute.
E.g. `node_index=0` will correspond to the E.g. `node_index=0` will correspond to the
@ -2081,7 +2081,7 @@ class Layer(module.Module, version_utils.LayerVersionSelector):
def get_input_at(self, node_index): def get_input_at(self, node_index):
"""Retrieves the input tensor(s) of a layer at a given node. """Retrieves the input tensor(s) of a layer at a given node.
Arguments: Args:
node_index: Integer, index of the node node_index: Integer, index of the node
from which to retrieve the attribute. from which to retrieve the attribute.
E.g. `node_index=0` will correspond to the E.g. `node_index=0` will correspond to the
@ -2100,7 +2100,7 @@ class Layer(module.Module, version_utils.LayerVersionSelector):
def get_output_at(self, node_index): def get_output_at(self, node_index):
"""Retrieves the output tensor(s) of a layer at a given node. """Retrieves the output tensor(s) of a layer at a given node.
Arguments: Args:
node_index: Integer, index of the node node_index: Integer, index of the node
from which to retrieve the attribute. from which to retrieve the attribute.
E.g. `node_index=0` will correspond to the E.g. `node_index=0` will correspond to the
@ -2261,7 +2261,7 @@ class Layer(module.Module, version_utils.LayerVersionSelector):
This is an alias of `self.__call__`. This is an alias of `self.__call__`.
Arguments: Args:
inputs: Input tensor(s). inputs: Input tensor(s).
*args: additional positional arguments to be passed to `self.call`. *args: additional positional arguments to be passed to `self.call`.
**kwargs: additional keyword arguments to be passed to `self.call`. **kwargs: additional keyword arguments to be passed to `self.call`.
@ -2650,7 +2650,7 @@ class Layer(module.Module, version_utils.LayerVersionSelector):
- get_input_at - get_input_at
etc... etc...
Arguments: Args:
node_index: Integer index of the node from which node_index: Integer index of the node from which
to retrieve the attribute. to retrieve the attribute.
attr: Exact node attribute name. attr: Exact node attribute name.
@ -2916,7 +2916,7 @@ class Layer(module.Module, version_utils.LayerVersionSelector):
def _flatten_modules(self, recursive=True, include_self=True): def _flatten_modules(self, recursive=True, include_self=True):
"""Flattens `tf.Module` instances (excluding `Metrics`). """Flattens `tf.Module` instances (excluding `Metrics`).
Arguments: Args:
recursive: Whether to recursively flatten through submodules. recursive: Whether to recursively flatten through submodules.
include_self: Whether to include this `Layer` instance. include_self: Whether to include this `Layer` instance.

View File

@ -76,7 +76,7 @@ def make_variable(name,
TODO(fchollet): remove this method when no longer needed. TODO(fchollet): remove this method when no longer needed.
Arguments: Args:
name: Variable name. name: Variable name.
shape: Variable shape. shape: Variable shape.
dtype: The type of the variable. Defaults to `self.dtype` or `float32`. dtype: The type of the variable. Defaults to `self.dtype` or `float32`.
@ -145,7 +145,7 @@ def make_variable(name,
def collect_previous_mask(input_tensors): def collect_previous_mask(input_tensors):
"""Retrieves the output mask(s) of the previous node. """Retrieves the output mask(s) of the previous node.
Arguments: Args:
input_tensors: An arbitrary structure of Tensors. input_tensors: An arbitrary structure of Tensors.
Returns: Returns:
@ -177,7 +177,7 @@ def create_keras_history(tensors):
Any Tensors not originating from a Keras `Input` Layer will be treated as Any Tensors not originating from a Keras `Input` Layer will be treated as
constants when constructing `TensorFlowOpLayer` instances. constants when constructing `TensorFlowOpLayer` instances.
Arguments: Args:
tensors: A structure of Tensors, some of which come from raw TensorFlow tensors: A structure of Tensors, some of which come from raw TensorFlow
operations and need to have Keras metadata assigned to them. operations and need to have Keras metadata assigned to them.
@ -205,7 +205,7 @@ _UNSAFE_GRAPH_OP_LAYER_CREATION = False
def _create_keras_history_helper(tensors, processed_ops, created_layers): def _create_keras_history_helper(tensors, processed_ops, created_layers):
"""Helper method for `create_keras_history`. """Helper method for `create_keras_history`.
Arguments: Args:
tensors: A structure of Tensors for which to create Keras metadata. tensors: A structure of Tensors for which to create Keras metadata.
processed_ops: Set. TensorFlow operations that have already been wrapped in processed_ops: Set. TensorFlow operations that have already been wrapped in
`TensorFlowOpLayer` instances. `TensorFlowOpLayer` instances.
@ -312,7 +312,7 @@ def needs_keras_history(tensors, ignore_call_context=False):
if one or more of `tensors` originates from a `keras.Input` and if one or more of `tensors` originates from a `keras.Input` and
does not have `_keras_history` set. does not have `_keras_history` set.
Arguments: Args:
tensors: An arbitrary nested structure of Tensors. tensors: An arbitrary nested structure of Tensors.
ignore_call_context: Whether to ignore the check of if currently ignore_call_context: Whether to ignore the check of if currently
outside of a `call` context. This is `True` when creating outside of a `call` context. This is `True` when creating
@ -370,7 +370,7 @@ def uses_keras_history(tensors):
already been checked to not originate from a `keras.Input` already been checked to not originate from a `keras.Input`
are marked as `_keras_history_checked`. are marked as `_keras_history_checked`.
Arguments: Args:
tensors: An arbitrary nested structure of Tensors. tensors: An arbitrary nested structure of Tensors.
Returns: Returns:
@ -412,7 +412,7 @@ def mark_checked(tensors):
This prevents Layers from attempting to create TensorFlowOpLayers This prevents Layers from attempting to create TensorFlowOpLayers
for these Tensors. for these Tensors.
Arguments: Args:
tensors: An arbitrary structure of Tensors. tensors: An arbitrary structure of Tensors.
""" """
@ -469,7 +469,7 @@ class CallContext(object):
def enter(self, layer, inputs, build_graph, training, saving=None): def enter(self, layer, inputs, build_graph, training, saving=None):
"""Push a Layer and its inputs and state onto the current call context. """Push a Layer and its inputs and state onto the current call context.
Arguments: Args:
layer: The `Layer` whose `call` is currently active. layer: The `Layer` whose `call` is currently active.
inputs: The inputs to the currently active `Layer`. inputs: The inputs to the currently active `Layer`.
build_graph: Whether currently inside a Graph or FuncGraph. build_graph: Whether currently inside a Graph or FuncGraph.
@ -584,7 +584,7 @@ def check_graph_consistency(tensor=None, method='add_loss', force_raise=False):
the underlying tensor gets created in a FuncGraph managed by control_flow_v2. the underlying tensor gets created in a FuncGraph managed by control_flow_v2.
We need to raise clear error messages in such cases. We need to raise clear error messages in such cases.
Arguments: Args:
tensor: Tensor to check, or `False` if it is known that an error tensor: Tensor to check, or `False` if it is known that an error
should be raised. should be raised.
method: Caller method, one of {'add_metric', 'add_loss', 'add_update'}. method: Caller method, one of {'add_metric', 'add_loss', 'add_update'}.

View File

@ -96,7 +96,7 @@ class Layer(base_layer.Layer):
once. Should actually perform the logic of applying the layer to the once. Should actually perform the logic of applying the layer to the
input tensors (which should be passed in as the first argument). input tensors (which should be passed in as the first argument).
Arguments: Args:
trainable: Boolean, whether the layer's variables should be trainable. trainable: Boolean, whether the layer's variables should be trainable.
name: String name of the layer. name: String name of the layer.
dtype: The dtype of the layer's computations and weights (default of dtype: The dtype of the layer's computations and weights (default of
@ -274,7 +274,7 @@ class Layer(base_layer.Layer):
This is typically used to create the weights of `Layer` subclasses. This is typically used to create the weights of `Layer` subclasses.
Arguments: Args:
input_shape: Instance of `TensorShape`, or list of instances of input_shape: Instance of `TensorShape`, or list of instances of
`TensorShape` if the layer expects a list of inputs `TensorShape` if the layer expects a list of inputs
(one instance per input). (one instance per input).
@ -287,7 +287,7 @@ class Layer(base_layer.Layer):
def call(self, inputs, **kwargs): # pylint: disable=unused-argument def call(self, inputs, **kwargs): # pylint: disable=unused-argument
"""This is where the layer's logic lives. """This is where the layer's logic lives.
Arguments: Args:
inputs: Input tensor, or list/tuple of input tensors. inputs: Input tensor, or list/tuple of input tensors.
**kwargs: Additional keyword arguments. **kwargs: Additional keyword arguments.
@ -300,7 +300,7 @@ class Layer(base_layer.Layer):
def _add_trackable(self, trackable_object, trainable): def _add_trackable(self, trackable_object, trainable):
"""Adds a Trackable object to this layer's state. """Adds a Trackable object to this layer's state.
Arguments: Args:
trackable_object: The tf.tracking.Trackable object to add. trackable_object: The tf.tracking.Trackable object to add.
trainable: Boolean, whether the variable should be part of the layer's trainable: Boolean, whether the variable should be part of the layer's
"trainable_variables" (e.g. variables, biases) or "trainable_variables" (e.g. variables, biases) or
@ -332,7 +332,7 @@ class Layer(base_layer.Layer):
**kwargs): **kwargs):
"""Adds a new variable to the layer. """Adds a new variable to the layer.
Arguments: Args:
name: Variable name. name: Variable name.
shape: Variable shape. Defaults to scalar if unspecified. shape: Variable shape. Defaults to scalar if unspecified.
dtype: The type of the variable. Defaults to `self.dtype` or `float32`. dtype: The type of the variable. Defaults to `self.dtype` or `float32`.
@ -524,7 +524,7 @@ class Layer(base_layer.Layer):
dictionary. It does not handle layer connectivity dictionary. It does not handle layer connectivity
(handled by Network), nor weights (handled by `set_weights`). (handled by Network), nor weights (handled by `set_weights`).
Arguments: Args:
config: A Python dictionary, typically the config: A Python dictionary, typically the
output of get_config. output of get_config.
@ -540,7 +540,7 @@ class Layer(base_layer.Layer):
layer. This assumes that the layer will later be used with inputs that layer. This assumes that the layer will later be used with inputs that
match the input shape provided here. match the input shape provided here.
Arguments: Args:
input_shape: Shape tuple (tuple of integers) input_shape: Shape tuple (tuple of integers)
or list of shape tuples (one per output tensor of the layer). or list of shape tuples (one per output tensor of the layer).
Shape tuples can include None for free dimensions, Shape tuples can include None for free dimensions,
@ -619,7 +619,7 @@ class Layer(base_layer.Layer):
def compute_mask(self, inputs, mask=None): # pylint: disable=unused-argument def compute_mask(self, inputs, mask=None): # pylint: disable=unused-argument
"""Computes an output mask tensor. """Computes an output mask tensor.
Arguments: Args:
inputs: Tensor or list of tensors. inputs: Tensor or list of tensors.
mask: Tensor or list of tensors. mask: Tensor or list of tensors.
@ -640,7 +640,7 @@ class Layer(base_layer.Layer):
def __call__(self, *args, **kwargs): def __call__(self, *args, **kwargs):
"""Wraps `call`, applying pre- and post-processing steps. """Wraps `call`, applying pre- and post-processing steps.
Arguments: Args:
*args: Positional arguments to be passed to `self.call`. *args: Positional arguments to be passed to `self.call`.
**kwargs: Keyword arguments to be passed to `self.call`. **kwargs: Keyword arguments to be passed to `self.call`.
@ -1015,7 +1015,7 @@ class Layer(base_layer.Layer):
The `get_losses_for` method allows to retrieve the losses relevant to a The `get_losses_for` method allows to retrieve the losses relevant to a
specific set of inputs. specific set of inputs.
Arguments: Args:
losses: Loss tensor, or list/tuple of tensors. Rather than tensors, losses losses: Loss tensor, or list/tuple of tensors. Rather than tensors, losses
may also be zero-argument callables which create a loss tensor. may also be zero-argument callables which create a loss tensor.
inputs: Ignored when executing eagerly. If anything other than None is inputs: Ignored when executing eagerly. If anything other than None is
@ -1168,7 +1168,7 @@ class Layer(base_layer.Layer):
updates are run on the fly and thus do not need to be tracked for later updates are run on the fly and thus do not need to be tracked for later
execution). execution).
Arguments: Args:
updates: Update op, or list/tuple of update ops, or zero-arg callable updates: Update op, or list/tuple of update ops, or zero-arg callable
that returns an update op. A zero-arg callable should be passed in that returns an update op. A zero-arg callable should be passed in
order to disable running the updates by setting `trainable=False` order to disable running the updates by setting `trainable=False`
@ -1200,7 +1200,7 @@ class Layer(base_layer.Layer):
def process_update(x): def process_update(x):
"""Standardize update ops. """Standardize update ops.
Arguments: Args:
x: Tensor, op, or callable. x: Tensor, op, or callable.
Returns: Returns:
@ -1256,7 +1256,7 @@ class Layer(base_layer.Layer):
[1.], [1.],
[1.]], dtype=float32), array([0.], dtype=float32)] [1.]], dtype=float32), array([0.], dtype=float32)]
Arguments: Args:
weights: a list of Numpy arrays. The number weights: a list of Numpy arrays. The number
of arrays and their shape must match of arrays and their shape must match
number of the dimensions of the weights number of the dimensions of the weights
@ -1350,7 +1350,7 @@ class Layer(base_layer.Layer):
def get_updates_for(self, inputs): def get_updates_for(self, inputs):
"""Retrieves updates relevant to a specific set of inputs. """Retrieves updates relevant to a specific set of inputs.
Arguments: Args:
inputs: Input tensor or list/tuple of input tensors. inputs: Input tensor or list/tuple of input tensors.
Returns: Returns:
@ -1369,7 +1369,7 @@ class Layer(base_layer.Layer):
def get_losses_for(self, inputs): def get_losses_for(self, inputs):
"""Retrieves losses relevant to a specific set of inputs. """Retrieves losses relevant to a specific set of inputs.
Arguments: Args:
inputs: Input tensor or list/tuple of input tensors. inputs: Input tensor or list/tuple of input tensors.
Returns: Returns:
@ -1388,7 +1388,7 @@ class Layer(base_layer.Layer):
def get_input_mask_at(self, node_index): def get_input_mask_at(self, node_index):
"""Retrieves the input mask tensor(s) of a layer at a given node. """Retrieves the input mask tensor(s) of a layer at a given node.
Arguments: Args:
node_index: Integer, index of the node node_index: Integer, index of the node
from which to retrieve the attribute. from which to retrieve the attribute.
E.g. `node_index=0` will correspond to the E.g. `node_index=0` will correspond to the
@ -1407,7 +1407,7 @@ class Layer(base_layer.Layer):
def get_output_mask_at(self, node_index): def get_output_mask_at(self, node_index):
"""Retrieves the output mask tensor(s) of a layer at a given node. """Retrieves the output mask tensor(s) of a layer at a given node.
Arguments: Args:
node_index: Integer, index of the node node_index: Integer, index of the node
from which to retrieve the attribute. from which to retrieve the attribute.
E.g. `node_index=0` will correspond to the E.g. `node_index=0` will correspond to the
@ -1468,7 +1468,7 @@ class Layer(base_layer.Layer):
def get_input_shape_at(self, node_index): def get_input_shape_at(self, node_index):
"""Retrieves the input shape(s) of a layer at a given node. """Retrieves the input shape(s) of a layer at a given node.
Arguments: Args:
node_index: Integer, index of the node node_index: Integer, index of the node
from which to retrieve the attribute. from which to retrieve the attribute.
E.g. `node_index=0` will correspond to the E.g. `node_index=0` will correspond to the
@ -1487,7 +1487,7 @@ class Layer(base_layer.Layer):
def get_output_shape_at(self, node_index): def get_output_shape_at(self, node_index):
"""Retrieves the output shape(s) of a layer at a given node. """Retrieves the output shape(s) of a layer at a given node.
Arguments: Args:
node_index: Integer, index of the node node_index: Integer, index of the node
from which to retrieve the attribute. from which to retrieve the attribute.
E.g. `node_index=0` will correspond to the E.g. `node_index=0` will correspond to the
@ -1506,7 +1506,7 @@ class Layer(base_layer.Layer):
def get_input_at(self, node_index): def get_input_at(self, node_index):
"""Retrieves the input tensor(s) of a layer at a given node. """Retrieves the input tensor(s) of a layer at a given node.
Arguments: Args:
node_index: Integer, index of the node node_index: Integer, index of the node
from which to retrieve the attribute. from which to retrieve the attribute.
E.g. `node_index=0` will correspond to the E.g. `node_index=0` will correspond to the
@ -1524,7 +1524,7 @@ class Layer(base_layer.Layer):
def get_output_at(self, node_index): def get_output_at(self, node_index):
"""Retrieves the output tensor(s) of a layer at a given node. """Retrieves the output tensor(s) of a layer at a given node.
Arguments: Args:
node_index: Integer, index of the node node_index: Integer, index of the node
from which to retrieve the attribute. from which to retrieve the attribute.
E.g. `node_index=0` will correspond to the E.g. `node_index=0` will correspond to the
@ -1683,7 +1683,7 @@ class Layer(base_layer.Layer):
This is an alias of `self.__call__`. This is an alias of `self.__call__`.
Arguments: Args:
inputs: Input tensor(s). inputs: Input tensor(s).
*args: additional positional arguments to be passed to `self.call`. *args: additional positional arguments to be passed to `self.call`.
**kwargs: additional keyword arguments to be passed to `self.call`. **kwargs: additional keyword arguments to be passed to `self.call`.
@ -2029,7 +2029,7 @@ class Layer(base_layer.Layer):
- get_input_at - get_input_at
etc... etc...
Arguments: Args:
node_index: Integer index of the node from which node_index: Integer index of the node from which
to retrieve the attribute. to retrieve the attribute.
attr: Exact node attribute name. attr: Exact node attribute name.

View File

@ -55,7 +55,7 @@ class PreprocessingLayer(Layer):
# TODO(momernick): Add examples. # TODO(momernick): Add examples.
"""Fits the state of the preprocessing layer to the data being passed. """Fits the state of the preprocessing layer to the data being passed.
Arguments: Args:
data: The data to train on. It can be passed either as a tf.data data: The data to train on. It can be passed either as a tf.data
Dataset, or as a numpy array. Dataset, or as a numpy array.
reset_state: Optional argument specifying whether to clear the state of reset_state: Optional argument specifying whether to clear the state of
@ -137,7 +137,7 @@ class CombinerPreprocessingLayer(PreprocessingLayer):
def adapt(self, data, reset_state=True): def adapt(self, data, reset_state=True):
"""Fits the state of the preprocessing layer to the data being passed. """Fits the state of the preprocessing layer to the data being passed.
Arguments: Args:
data: The data to train on. It can be passed either as a tf.data Dataset, data: The data to train on. It can be passed either as a tf.data Dataset,
or as a numpy array. or as a numpy array.
reset_state: Optional argument specifying whether to clear the state of reset_state: Optional argument specifying whether to clear the state of

View File

@ -52,7 +52,7 @@ class Container(object):
(2) Fill missing keys in a dict w/ `None`s. (2) Fill missing keys in a dict w/ `None`s.
(3) Map a single item to all outputs. (3) Map a single item to all outputs.
Arguments: Args:
outputs: Model predictions. outputs: Model predictions.
struct: Arbitrary nested structure (e.g. of labels, sample_weights, struct: Arbitrary nested structure (e.g. of labels, sample_weights,
losses, or metrics). losses, or metrics).
@ -73,7 +73,7 @@ class Container(object):
NOTE: This method should only be called for Metrics / Losses, not for NOTE: This method should only be called for Metrics / Losses, not for
y_true / sample_weight. y_true / sample_weight.
Arguments: Args:
outputs: Model predictions. outputs: Model predictions.
objects: Arbitrary nested structure (e.g. of losses or metrics) objects: Arbitrary nested structure (e.g. of losses or metrics)
@ -168,7 +168,7 @@ class LossesContainer(Container):
regularization_losses=None): regularization_losses=None):
"""Computes the overall loss. """Computes the overall loss.
Arguments: Args:
y_true: An arbitrary structure of Tensors representing the ground truth. y_true: An arbitrary structure of Tensors representing the ground truth.
y_pred: An arbitrary structure of Tensors representing a Model's outputs. y_pred: An arbitrary structure of Tensors representing a Model's outputs.
sample_weight: An arbitrary structure of Tensors representing the sample_weight: An arbitrary structure of Tensors representing the
@ -251,7 +251,7 @@ class LossesContainer(Container):
Converts the user-supplied loss to a `Loss` object. Also allows Converts the user-supplied loss to a `Loss` object. Also allows
`SUM_OVER_BATCH_SIZE` reduction to be used for this loss. `SUM_OVER_BATCH_SIZE` reduction to be used for this loss.
Arguments: Args:
loss: A string, function, or `Loss` object. loss: A string, function, or `Loss` object.
Returns: Returns:
@ -437,7 +437,7 @@ class MetricsContainer(Container):
def _get_metric_object(self, metric, y_t, y_p): def _get_metric_object(self, metric, y_t, y_p):
"""Converts user-supplied metric to a `Metric` object. """Converts user-supplied metric to a `Metric` object.
Arguments: Args:
metric: A string, function, or `Metric` object. metric: A string, function, or `Metric` object.
y_t: Sample of label. y_t: Sample of label.
y_p: Sample of output. y_p: Sample of output.
@ -534,7 +534,7 @@ def _create_pseudo_names(tensors, prefix):
`[x, y]` becomes: `[x, y]` becomes:
`['output_1', 'output_2']` `['output_1', 'output_2']`
Arguments: Args:
tensors: `Model`'s outputs or inputs. tensors: `Model`'s outputs or inputs.
prefix: 'output_' for outputs, 'input_' for inputs. prefix: 'output_' for outputs, 'input_' for inputs.
@ -579,7 +579,7 @@ def map_to_output_names(y_pred, output_names, struct):
This mapping preserves backwards compatibility for `compile` and This mapping preserves backwards compatibility for `compile` and
`fit`. `fit`.
Arguments: Args:
y_pred: Sample outputs of the Model, to determine if this convenience y_pred: Sample outputs of the Model, to determine if this convenience
feature should be applied (`struct` is returned unmodified if `y_pred` feature should be applied (`struct` is returned unmodified if `y_pred`
isn't a flat list). isn't a flat list).
@ -660,7 +660,7 @@ def apply_mask(y_p, sw, mask):
def get_custom_object_name(obj): def get_custom_object_name(obj):
"""Returns the name to use for a custom loss or metric callable. """Returns the name to use for a custom loss or metric callable.
Arguments: Args:
obj: Custom loss of metric callable obj: Custom loss of metric callable
Returns: Returns:

View File

@ -1267,7 +1267,7 @@ def _make_class_weight_map_fn(class_weight):
The `Dataset` is assumed to be in format `(x, y)` or `(x, y, sw)`, where The `Dataset` is assumed to be in format `(x, y)` or `(x, y, sw)`, where
`y` must be a single `Tensor`. `y` must be a single `Tensor`.
Arguments: Args:
class_weight: A map where the keys are integer class ids and values are class_weight: A map where the keys are integer class ids and values are
the class weights, e.g. `{0: 0.2, 1: 0.6, 2: 0.3}` the class weights, e.g. `{0: 0.2, 1: 0.6, 2: 0.3}`
@ -1335,7 +1335,7 @@ def train_validation_split(arrays, validation_split):
The last part of data will become validation data. The last part of data will become validation data.
Arguments: Args:
arrays: Tensors to split. Allowed inputs are arbitrarily nested structures arrays: Tensors to split. Allowed inputs are arbitrarily nested structures
of Tensors and NumPy arrays. of Tensors and NumPy arrays.
validation_split: Float between 0 and 1. The proportion of the dataset to validation_split: Float between 0 and 1. The proportion of the dataset to
@ -1433,7 +1433,7 @@ def unpack_x_y_sample_weight(data):
return {m.name: m.result() for m in self.metrics} return {m.name: m.result() for m in self.metrics}
``` ```
Arguments: Args:
data: A tuple of the form `(x,)`, `(x, y)`, or `(x, y, sample_weight)`. data: A tuple of the form `(x,)`, `(x, y)`, or `(x, y, sample_weight)`.
Returns: Returns:
@ -1473,7 +1473,7 @@ def pack_x_y_sample_weight(x, y=None, sample_weight=None):
True True
>>> x, y = data >>> x, y = data
Arguments: Args:
x: Features to pass to `Model`. x: Features to pass to `Model`.
y: Ground-truth targets to pass to `Model`. y: Ground-truth targets to pass to `Model`.
sample_weight: Sample weight for each element. sample_weight: Sample weight for each element.

View File

@ -90,7 +90,7 @@ class Functional(training_lib.Model):
model = keras.Model(inputs, outputs) model = keras.Model(inputs, outputs)
``` ```
Arguments: Args:
inputs: List of input tensors (must be created via `tf.keras.Input()`). inputs: List of input tensors (must be created via `tf.keras.Input()`).
outputs: List of outputs tensors. outputs: List of outputs tensors.
name: String, optional. Name of the model. name: String, optional. Name of the model.
@ -412,7 +412,7 @@ class Functional(training_lib.Model):
all ops in the graph to the new inputs all ops in the graph to the new inputs
(e.g. build a new computational graph from the provided inputs). (e.g. build a new computational graph from the provided inputs).
Arguments: Args:
inputs: A tensor or list of tensors. inputs: A tensor or list of tensors.
training: Boolean or boolean scalar tensor, indicating whether to run training: Boolean or boolean scalar tensor, indicating whether to run
the `Network` in training mode or inference mode. the `Network` in training mode or inference mode.
@ -521,7 +521,7 @@ class Functional(training_lib.Model):
# Note: # Note:
- Can be run on non-Keras tensors. - Can be run on non-Keras tensors.
Arguments: Args:
inputs: Tensor or nested structure of Tensors. inputs: Tensor or nested structure of Tensors.
training: Boolean learning phase. training: Boolean learning phase.
mask: (Optional) Tensor or nested structure of Tensors. mask: (Optional) Tensor or nested structure of Tensors.
@ -655,7 +655,7 @@ class Functional(training_lib.Model):
def from_config(cls, config, custom_objects=None): def from_config(cls, config, custom_objects=None):
"""Instantiates a Model from its config (output of `get_config()`). """Instantiates a Model from its config (output of `get_config()`).
Arguments: Args:
config: Model config dictionary. config: Model config dictionary.
custom_objects: Optional dictionary mapping names custom_objects: Optional dictionary mapping names
(strings) to custom classes or functions to be (strings) to custom classes or functions to be
@ -738,7 +738,7 @@ class Functional(training_lib.Model):
They will not be added to the Network's outputs. They will not be added to the Network's outputs.
Arguments: Args:
layers: Arbitrary nested structure of Layers. Layers must be reachable layers: Arbitrary nested structure of Layers. Layers must be reachable
from one or more of the `keras.Input` Tensors that correspond to this from one or more of the `keras.Input` Tensors that correspond to this
Network's inputs. Network's inputs.
@ -886,7 +886,7 @@ def _make_node_key(layer_name, node_index):
def _map_graph_network(inputs, outputs): def _map_graph_network(inputs, outputs):
"""Validates a network's topology and gather its layers and nodes. """Validates a network's topology and gather its layers and nodes.
Arguments: Args:
inputs: List of input tensors. inputs: List of input tensors.
outputs: List of outputs tensors. outputs: List of outputs tensors.
@ -1187,7 +1187,7 @@ def reconstruct_from_config(config, custom_objects=None, created_layers=None):
def process_node(layer, node_data): def process_node(layer, node_data):
"""Deserialize a node. """Deserialize a node.
Arguments: Args:
layer: layer instance. layer: layer instance.
node_data: Nested structure of `ListWrapper`. node_data: Nested structure of `ListWrapper`.
@ -1243,7 +1243,7 @@ def reconstruct_from_config(config, custom_objects=None, created_layers=None):
def process_layer(layer_data): def process_layer(layer_data):
"""Deserializes a layer, then call it on appropriate inputs. """Deserializes a layer, then call it on appropriate inputs.
Arguments: Args:
layer_data: layer config dict. layer_data: layer config dict.
Raises: Raises:
@ -1405,7 +1405,7 @@ class ModuleWrapper(base_layer.Layer):
def __init__(self, module, method_name=None, **kwargs): def __init__(self, module, method_name=None, **kwargs):
"""Initializes the wrapper Layer for this module. """Initializes the wrapper Layer for this module.
Arguments: Args:
module: The `tf.Module` instance to be wrapped. module: The `tf.Module` instance to be wrapped.
method_name: (Optional) str. The name of the method to use as the forward method_name: (Optional) str. The name of the method to use as the forward
pass of the module. If not set, defaults to '__call__' if defined, or pass of the module. If not set, defaults to '__call__' if defined, or

View File

@ -69,7 +69,7 @@ class InputLayer(base_layer.Layer):
np.ones((10, 8))) np.ones((10, 8)))
``` ```
Arguments: Args:
input_shape: Shape tuple (not including the batch axis), or `TensorShape` input_shape: Shape tuple (not including the batch axis), or `TensorShape`
instance (not including the batch axis). instance (not including the batch axis).
batch_size: Optional input batch size (integer or None). batch_size: Optional input batch size (integer or None).
@ -224,7 +224,7 @@ def Input( # pylint: disable=invalid-name
it becomes possible to do: it becomes possible to do:
`model = Model(input=[a, b], output=c)` `model = Model(input=[a, b], output=c)`
Arguments: Args:
shape: A shape tuple (integers), not including the batch size. shape: A shape tuple (integers), not including the batch size.
For instance, `shape=(32,)` indicates that the expected input For instance, `shape=(32,)` indicates that the expected input
will be batches of 32-dimensional vectors. Elements of this tuple will be batches of 32-dimensional vectors. Elements of this tuple

View File

@ -43,7 +43,7 @@ class InputSpec(object):
A None entry in a shape is compatible with any dimension, A None entry in a shape is compatible with any dimension,
a None shape is compatible with any shape. a None shape is compatible with any shape.
Arguments: Args:
dtype: Expected DataType of the input. dtype: Expected DataType of the input.
shape: Shape tuple, expected shape of the input shape: Shape tuple, expected shape of the input
(may include None for unchecked axes). Includes the batch size. (may include None for unchecked axes). Includes the batch size.
@ -162,7 +162,7 @@ def assert_input_compatibility(input_spec, inputs, layer_name):
This checks that the tensor(s) `inputs` verify the input assumptions This checks that the tensor(s) `inputs` verify the input assumptions
of a layer (if any). If not, a clear and actional exception gets raised. of a layer (if any). If not, a clear and actional exception gets raised.
Arguments: Args:
input_spec: An InputSpec instance, list of InputSpec instances, a nested input_spec: An InputSpec instance, list of InputSpec instances, a nested
structure of InputSpec instances, or None. structure of InputSpec instances, or None.
inputs: Input tensor, list of input tensors, or a nested structure of inputs: Input tensor, list of input tensors, or a nested structure of

View File

@ -43,7 +43,7 @@ class Node(object):
Each time the output of a layer is used by another layer, Each time the output of a layer is used by another layer,
a node is added to `layer._outbound_nodes`. a node is added to `layer._outbound_nodes`.
Arguments: Args:
layer: The Layer for the Layer.__call__ this node represents. layer: The Layer for the Layer.__call__ this node represents.
call_args: The positional arguments the Layer was called with. call_args: The positional arguments the Layer was called with.
call_kwargs: The keyword arguments the Layer was called with. call_kwargs: The keyword arguments the Layer was called with.

View File

@ -160,7 +160,7 @@ class Sequential(functional.Functional):
def add(self, layer): def add(self, layer):
"""Adds a layer instance on top of the layer stack. """Adds a layer instance on top of the layer stack.
Arguments: Args:
layer: layer instance. layer: layer instance.
Raises: Raises:
@ -422,7 +422,7 @@ class Sequential(functional.Functional):
The input samples are processed batch by batch. The input samples are processed batch by batch.
Arguments: Args:
x: input data, as a Numpy array or list of Numpy arrays x: input data, as a Numpy array or list of Numpy arrays
(if the model has multiple inputs). (if the model has multiple inputs).
batch_size: integer. batch_size: integer.
@ -447,7 +447,7 @@ class Sequential(functional.Functional):
The input samples are processed batch by batch. The input samples are processed batch by batch.
Arguments: Args:
x: input data, as a Numpy array or list of Numpy arrays x: input data, as a Numpy array or list of Numpy arrays
(if the model has multiple inputs). (if the model has multiple inputs).
batch_size: integer. batch_size: integer.

View File

@ -142,7 +142,7 @@ def is_functional_model_init_params(args, kwargs):
class Model(base_layer.Layer, version_utils.ModelVersionSelector): class Model(base_layer.Layer, version_utils.ModelVersionSelector):
"""`Model` groups layers into an object with training and inference features. """`Model` groups layers into an object with training and inference features.
Arguments: Args:
inputs: The input(s) of the model: a `keras.Input` object or list of inputs: The input(s) of the model: a `keras.Input` object or list of
`keras.Input` objects. `keras.Input` objects.
outputs: The output(s) of the model. See Functional API example below. outputs: The output(s) of the model. See Functional API example below.
@ -467,7 +467,7 @@ class Model(base_layer.Layer, version_utils.ModelVersionSelector):
To call a model on an input, always use the `__call__` method, To call a model on an input, always use the `__call__` method,
i.e. `model(inputs)`, which relies on the underlying `call` method. i.e. `model(inputs)`, which relies on the underlying `call` method.
Arguments: Args:
inputs: A tensor or list of tensors. inputs: A tensor or list of tensors.
training: Boolean or boolean scalar tensor, indicating whether to run training: Boolean or boolean scalar tensor, indicating whether to run
the `Network` in training mode or inference mode. the `Network` in training mode or inference mode.
@ -492,7 +492,7 @@ class Model(base_layer.Layer, version_utils.ModelVersionSelector):
**kwargs): **kwargs):
"""Configures the model for training. """Configures the model for training.
Arguments: Args:
optimizer: String (name of optimizer) or optimizer instance. See optimizer: String (name of optimizer) or optimizer instance. See
`tf.keras.optimizers`. `tf.keras.optimizers`.
loss: String (name of objective function), objective function or loss: String (name of objective function), objective function or
@ -770,7 +770,7 @@ class Model(base_layer.Layer, version_utils.ModelVersionSelector):
`tf.distribute.Strategy` settings), should be left to `tf.distribute.Strategy` settings), should be left to
`Model.make_train_function`, which can also be overridden. `Model.make_train_function`, which can also be overridden.
Arguments: Args:
data: A nested structure of `Tensor`s. data: A nested structure of `Tensor`s.
Returns: Returns:
@ -876,7 +876,7 @@ class Model(base_layer.Layer, version_utils.ModelVersionSelector):
use_multiprocessing=False): use_multiprocessing=False):
"""Trains the model for a fixed number of epochs (iterations on a dataset). """Trains the model for a fixed number of epochs (iterations on a dataset).
Arguments: Args:
x: Input data. It could be: x: Input data. It could be:
- A Numpy array (or array-like), or a list of arrays - A Numpy array (or array-like), or a list of arrays
(in case the model has multiple inputs). (in case the model has multiple inputs).
@ -1204,7 +1204,7 @@ class Model(base_layer.Layer, version_utils.ModelVersionSelector):
`tf.distribute.Strategy` settings), should be left to `tf.distribute.Strategy` settings), should be left to
`Model.make_test_function`, which can also be overridden. `Model.make_test_function`, which can also be overridden.
Arguments: Args:
data: A nested structure of `Tensor`s. data: A nested structure of `Tensor`s.
Returns: Returns:
@ -1298,7 +1298,7 @@ class Model(base_layer.Layer, version_utils.ModelVersionSelector):
Computation is done in batches (see the `batch_size` arg.) Computation is done in batches (see the `batch_size` arg.)
Arguments: Args:
x: Input data. It could be: x: Input data. It could be:
- A Numpy array (or array-like), or a list of arrays - A Numpy array (or array-like), or a list of arrays
(in case the model has multiple inputs). (in case the model has multiple inputs).
@ -1457,7 +1457,7 @@ class Model(base_layer.Layer, version_utils.ModelVersionSelector):
`tf.distribute.Strategy` settings), should be left to `tf.distribute.Strategy` settings), should be left to
`Model.make_predict_function`, which can also be overridden. `Model.make_predict_function`, which can also be overridden.
Arguments: Args:
data: A nested structure of `Tensor`s. data: A nested structure of `Tensor`s.
Returns: Returns:
@ -1553,7 +1553,7 @@ class Model(base_layer.Layer, version_utils.ModelVersionSelector):
inference. Also, note the fact that test loss is not affected by inference. Also, note the fact that test loss is not affected by
regularization layers like noise and dropout. regularization layers like noise and dropout.
Arguments: Args:
x: Input samples. It could be: x: Input samples. It could be:
- A Numpy array (or array-like), or a list of arrays - A Numpy array (or array-like), or a list of arrays
(in case the model has multiple inputs). (in case the model has multiple inputs).
@ -1712,7 +1712,7 @@ class Model(base_layer.Layer, version_utils.ModelVersionSelector):
return_dict=False): return_dict=False):
"""Runs a single gradient update on a single batch of data. """Runs a single gradient update on a single batch of data.
Arguments: Args:
x: Input data. It could be: x: Input data. It could be:
- A Numpy array (or array-like), or a list of arrays - A Numpy array (or array-like), or a list of arrays
(in case the model has multiple inputs). (in case the model has multiple inputs).
@ -1780,7 +1780,7 @@ class Model(base_layer.Layer, version_utils.ModelVersionSelector):
return_dict=False): return_dict=False):
"""Test the model on a single batch of samples. """Test the model on a single batch of samples.
Arguments: Args:
x: Input data. It could be: - A Numpy array (or array-like), or a list x: Input data. It could be: - A Numpy array (or array-like), or a list
of arrays (in case the model has multiple inputs). - A TensorFlow of arrays (in case the model has multiple inputs). - A TensorFlow
tensor, or a list of tensors (in case the model has multiple inputs). tensor, or a list of tensors (in case the model has multiple inputs).
@ -1834,7 +1834,7 @@ class Model(base_layer.Layer, version_utils.ModelVersionSelector):
def predict_on_batch(self, x): def predict_on_batch(self, x):
"""Returns predictions for a single batch of samples. """Returns predictions for a single batch of samples.
Arguments: Args:
x: Input data. It could be: - A Numpy array (or array-like), or a list x: Input data. It could be: - A Numpy array (or array-like), or a list
of arrays (in case the model has multiple inputs). - A TensorFlow of arrays (in case the model has multiple inputs). - A TensorFlow
tensor, or a list of tensors (in case the model has multiple inputs). tensor, or a list of tensors (in case the model has multiple inputs).
@ -2011,7 +2011,7 @@ class Model(base_layer.Layer, version_utils.ModelVersionSelector):
[Serialization and Saving guide](https://keras.io/guides/serialization_and_saving/) [Serialization and Saving guide](https://keras.io/guides/serialization_and_saving/)
for details. for details.
Arguments: Args:
filepath: String, PathLike, path to SavedModel or H5 file to save the filepath: String, PathLike, path to SavedModel or H5 file to save the
model. model.
overwrite: Whether to silently overwrite any existing file at the overwrite: Whether to silently overwrite any existing file at the
@ -2097,7 +2097,7 @@ class Model(base_layer.Layer, version_utils.ModelVersionSelector):
checkpoints](https://www.tensorflow.org/guide/checkpoint) for details checkpoints](https://www.tensorflow.org/guide/checkpoint) for details
on the TensorFlow format. on the TensorFlow format.
Arguments: Args:
filepath: String or PathLike, path to the file to save the weights to. filepath: String or PathLike, path to the file to save the weights to.
When saving in TensorFlow format, this is the prefix used for When saving in TensorFlow format, this is the prefix used for
checkpoint files (multiple files are generated). Note that the '.h5' checkpoint files (multiple files are generated). Note that the '.h5'
@ -2202,7 +2202,7 @@ class Model(base_layer.Layer, version_utils.ModelVersionSelector):
TensorFlow format loads based on the object-local names of attributes to TensorFlow format loads based on the object-local names of attributes to
which layers are assigned in the `Model`'s constructor. which layers are assigned in the `Model`'s constructor.
Arguments: Args:
filepath: String, path to the weights file to load. For weight files in filepath: String, path to the weights file to load. For weight files in
TensorFlow format, this is the file prefix (the same as was passed TensorFlow format, this is the file prefix (the same as was passed
to `save_weights`). This can also be a path to a SavedModel to `save_weights`). This can also be a path to a SavedModel
@ -2308,7 +2308,7 @@ class Model(base_layer.Layer, version_utils.ModelVersionSelector):
To load a network from a JSON save file, use To load a network from a JSON save file, use
`keras.models.model_from_json(json_string, custom_objects={})`. `keras.models.model_from_json(json_string, custom_objects={})`.
Arguments: Args:
**kwargs: Additional keyword arguments **kwargs: Additional keyword arguments
to be passed to `json.dumps()`. to be passed to `json.dumps()`.
@ -2329,7 +2329,7 @@ class Model(base_layer.Layer, version_utils.ModelVersionSelector):
the names of custom losses / layers / etc to the corresponding the names of custom losses / layers / etc to the corresponding
functions / classes. functions / classes.
Arguments: Args:
**kwargs: Additional keyword arguments **kwargs: Additional keyword arguments
to be passed to `yaml.dump()`. to be passed to `yaml.dump()`.
@ -2398,7 +2398,7 @@ class Model(base_layer.Layer, version_utils.ModelVersionSelector):
def summary(self, line_length=None, positions=None, print_fn=None): def summary(self, line_length=None, positions=None, print_fn=None):
"""Prints a string summary of the network. """Prints a string summary of the network.
Arguments: Args:
line_length: Total length of printed lines line_length: Total length of printed lines
(e.g. set this to adapt the display to different (e.g. set this to adapt the display to different
terminal window sizes). terminal window sizes).
@ -2434,7 +2434,7 @@ class Model(base_layer.Layer, version_utils.ModelVersionSelector):
If `name` and `index` are both provided, `index` will take precedence. If `name` and `index` are both provided, `index` will take precedence.
Indices are based on order of horizontal graph traversal (bottom-up). Indices are based on order of horizontal graph traversal (bottom-up).
Arguments: Args:
name: String, name of layer. name: String, name of layer.
index: Integer, index of layer. index: Integer, index of layer.
@ -2611,7 +2611,7 @@ class Model(base_layer.Layer, version_utils.ModelVersionSelector):
Refer to tensorflow/python/keras/distribute/worker_training_state.py Refer to tensorflow/python/keras/distribute/worker_training_state.py
for more information. for more information.
Arguments: Args:
initial_epoch: The original initial_epoch user passes in in `fit()`. initial_epoch: The original initial_epoch user passes in in `fit()`.
Returns: Returns:
@ -2716,7 +2716,7 @@ class Model(base_layer.Layer, version_utils.ModelVersionSelector):
def reduce_per_replica(values, strategy, reduction='first'): def reduce_per_replica(values, strategy, reduction='first'):
"""Reduce PerReplica objects. """Reduce PerReplica objects.
Arguments: Args:
values: Structure of `PerReplica` objects or `Tensor`s. `Tensor`s are values: Structure of `PerReplica` objects or `Tensor`s. `Tensor`s are
returned as-is. returned as-is.
strategy: `tf.distribute.Strategy` object. strategy: `tf.distribute.Strategy` object.

View File

@ -66,7 +66,7 @@ def model_iteration(model,
**kwargs): **kwargs):
"""Loop function for arrays of data with modes TRAIN/TEST/PREDICT. """Loop function for arrays of data with modes TRAIN/TEST/PREDICT.
Arguments: Args:
model: Keras Model instance. model: Keras Model instance.
inputs: Either a list or dictionary of arrays, or a dataset instance. inputs: Either a list or dictionary of arrays, or a dataset instance.
targets: List/dictionary of input arrays. targets: List/dictionary of input arrays.
@ -486,7 +486,7 @@ def _get_num_samples_or_steps(ins, batch_size, steps_per_epoch):
def _prepare_feed_values(model, inputs, targets, sample_weights, mode): def _prepare_feed_values(model, inputs, targets, sample_weights, mode):
"""Prepare feed values to the model execution function. """Prepare feed values to the model execution function.
Arguments: Args:
model: Model to prepare feed values for. model: Model to prepare feed values for.
inputs: List or dict of model inputs. inputs: List or dict of model inputs.
targets: Optional list of model targets. targets: Optional list of model targets.

View File

@ -61,7 +61,7 @@ def _build_model(strategy, model, mode, inputs, targets=None):
def _make_train_step_fn(model, mode, strategy, output_labels): def _make_train_step_fn(model, mode, strategy, output_labels):
"""Create step fn. """Create step fn.
Arguments: Args:
model: a Keras Model instance. model: a Keras Model instance.
mode: One of ModeKeys.TRAIN/ModeKeys.TEST/ModeKeys.PREDICT. mode: One of ModeKeys.TRAIN/ModeKeys.TEST/ModeKeys.PREDICT.
strategy: a `tf.distribute.Strategy` instance. strategy: a `tf.distribute.Strategy` instance.
@ -133,7 +133,7 @@ def experimental_tpu_fit_loop(model,
validation_freq=1): validation_freq=1):
"""Fit loop for training with TPU tf.distribute.Strategy. """Fit loop for training with TPU tf.distribute.Strategy.
Arguments: Args:
model: Keras Model instance. model: Keras Model instance.
dataset: Dataset that returns inputs and targets dataset: Dataset that returns inputs and targets
epochs: Number of times to iterate over the data epochs: Number of times to iterate over the data
@ -298,7 +298,7 @@ def experimental_tpu_test_loop(model,
callbacks=None): callbacks=None):
"""Test loop for evaluating with TPU tf.distribute.Strategy. """Test loop for evaluating with TPU tf.distribute.Strategy.
Arguments: Args:
model: Keras Model instance. model: Keras Model instance.
dataset: Dataset for input data. dataset: Dataset for input data.
verbose: Integer, Verbosity mode 0 or 1. verbose: Integer, Verbosity mode 0 or 1.
@ -429,7 +429,7 @@ def experimental_tpu_predict_loop(model,
callbacks=None): callbacks=None):
"""Predict loop for predicting with TPU tf.distribute.Strategy. """Predict loop for predicting with TPU tf.distribute.Strategy.
Arguments: Args:
model: Keras Model instance. model: Keras Model instance.
dataset: Dataset for input data. dataset: Dataset for input data.
verbose: Integer, Verbosity mode 0 or 1. verbose: Integer, Verbosity mode 0 or 1.

View File

@ -42,7 +42,7 @@ def _eager_loss_fn(outputs, targets, loss_fn, output_name):
def _eager_metrics_fn(model, outputs, targets, sample_weights=None, masks=None): def _eager_metrics_fn(model, outputs, targets, sample_weights=None, masks=None):
"""Calculates the metrics for each output of the given model. """Calculates the metrics for each output of the given model.
Arguments: Args:
model: The model on which metrics are being calculated. model: The model on which metrics are being calculated.
outputs: The outputs of the given model. outputs: The outputs of the given model.
targets: The predictions or targets of the given model. targets: The predictions or targets of the given model.
@ -90,7 +90,7 @@ def _model_loss(model,
training=False): training=False):
"""Calculates the loss for a given model. """Calculates the loss for a given model.
Arguments: Args:
model: The model on which metrics are being calculated. model: The model on which metrics are being calculated.
inputs: Either a dictionary of inputs to the model or a list of input inputs: Either a dictionary of inputs to the model or a list of input
arrays. arrays.
@ -231,7 +231,7 @@ def _process_single_batch(model,
The model weights are updated if training is set to True. The model weights are updated if training is set to True.
Arguments: Args:
model: Model whose loss has to be calculated. model: Model whose loss has to be calculated.
inputs: List of input arrays. inputs: List of input arrays.
targets: List of target arrays. targets: List of target arrays.
@ -291,7 +291,7 @@ def train_on_batch(model,
output_loss_metrics=None): output_loss_metrics=None):
"""Calculates the loss and gradient updates for one input batch. """Calculates the loss and gradient updates for one input batch.
Arguments: Args:
model: Model whose loss has to be calculated. model: Model whose loss has to be calculated.
inputs: Input batch data. inputs: Input batch data.
targets: Target batch data. targets: Target batch data.
@ -332,7 +332,7 @@ def test_on_batch(model,
output_loss_metrics=None): output_loss_metrics=None):
"""Calculates the loss for one input batch. """Calculates the loss for one input batch.
Arguments: Args:
model: Model whose loss has to be calculated. model: Model whose loss has to be calculated.
inputs: Input batch data. inputs: Input batch data.
targets: Target batch data. targets: Target batch data.

View File

@ -60,7 +60,7 @@ def model_iteration(model,
**kwargs): **kwargs):
"""Loop function for arrays of data with modes TRAIN/TEST/PREDICT. """Loop function for arrays of data with modes TRAIN/TEST/PREDICT.
Arguments: Args:
model: Keras Model instance. model: Keras Model instance.
data: Either a tuple of NumPy/Tensor inputs (i.e. `(x,)` or `(x, y)` or data: Either a tuple of NumPy/Tensor inputs (i.e. `(x,)` or `(x, y)` or
`(x, y, sample_weights)`) or a generator or `(x, y, sample_weights)`) or a generator or
@ -370,7 +370,7 @@ def _validate_arguments(is_sequence, is_dataset, use_multiprocessing, workers,
mode, kwargs): mode, kwargs):
"""Raises errors if arguments are invalid. """Raises errors if arguments are invalid.
Arguments: Args:
is_sequence: Boolean, whether data is a `keras.utils.data_utils.Sequence` is_sequence: Boolean, whether data is a `keras.utils.data_utils.Sequence`
instance. instance.
is_dataset: Boolean, whether data is a dataset instance. is_dataset: Boolean, whether data is a dataset instance.
@ -429,7 +429,7 @@ def convert_to_generator_like(data,
shuffle=False): shuffle=False):
"""Make a generator out of NumPy or EagerTensor inputs. """Make a generator out of NumPy or EagerTensor inputs.
Arguments: Args:
data: Either a generator or `keras.utils.data_utils.Sequence` object or data: Either a generator or `keras.utils.data_utils.Sequence` object or
`Dataset`, `Iterator`, or a {1,2,3}-tuple of NumPy arrays or EagerTensors. `Dataset`, `Iterator`, or a {1,2,3}-tuple of NumPy arrays or EagerTensors.
If a tuple, the elements represent `(x, y, sample_weights)` and may be If a tuple, the elements represent `(x, y, sample_weights)` and may be

View File

@ -35,7 +35,7 @@ def slice_arrays(arrays, indices, contiguous=True):
and we have to implement this workaround based on `concat`. This has a and we have to implement this workaround based on `concat`. This has a
performance cost. performance cost.
Arguments: Args:
arrays: Single array or list of arrays. arrays: Single array or list of arrays.
indices: List of indices in the array that should be included in the output indices: List of indices in the array that should be included in the output
batch. batch.
@ -206,7 +206,7 @@ def get_input_shape_and_dtype(layer):
def get_static_batch_size(layer): def get_static_batch_size(layer):
"""Gets the static batch size of a Layer. """Gets the static batch size of a Layer.
Arguments: Args:
layer: a `Layer` instance. layer: a `Layer` instance.
Returns: Returns:

View File

@ -96,7 +96,7 @@ class Aggregator(object):
def create(self, batch_outs): def create(self, batch_outs):
"""Creates the initial results from the first batch outputs. """Creates the initial results from the first batch outputs.
Arguments: Args:
batch_outs: A list of batch-level outputs. batch_outs: A list of batch-level outputs.
""" """
raise NotImplementedError('Must be implemented in subclasses.') raise NotImplementedError('Must be implemented in subclasses.')
@ -105,7 +105,7 @@ class Aggregator(object):
def aggregate(self, batch_outs, batch_start=None, batch_end=None): def aggregate(self, batch_outs, batch_start=None, batch_end=None):
"""Aggregates batch-level results into total results. """Aggregates batch-level results into total results.
Arguments: Args:
batch_outs: A list of batch-level outputs. batch_outs: A list of batch-level outputs.
batch_start: The start index of this batch. Always `None` if `use_steps` batch_start: The start index of this batch. Always `None` if `use_steps`
is `True`. is `True`.
@ -227,7 +227,7 @@ def _append_composite_tensor(target, to_append):
working with CompositeTensor Value objects that have no connection with the working with CompositeTensor Value objects that have no connection with the
CompositeTensors that created them. CompositeTensors that created them.
Arguments: Args:
target: CompositeTensor or CompositeTensor value object that will be target: CompositeTensor or CompositeTensor value object that will be
appended to. appended to.
to_append: CompositeTensor or CompositeTensor value object to append to. to_append: CompositeTensor or CompositeTensor value object to append to.
@ -487,7 +487,7 @@ def check_num_samples(ins, batch_size=None, steps=None, steps_name='steps'):
The number of samples is not defined when running with `steps`, The number of samples is not defined when running with `steps`,
in which case the number of samples is set to `None`. in which case the number of samples is set to `None`.
Arguments: Args:
ins: List of tensors to be fed to the Keras function. ins: List of tensors to be fed to the Keras function.
batch_size: Integer batch size or `None` if not defined. batch_size: Integer batch size or `None` if not defined.
steps: Total number of steps (batches of samples) before declaring steps: Total number of steps (batches of samples) before declaring
@ -559,7 +559,7 @@ def standardize_input_data(data,
arrays (same order as `names`), while checking that the provided arrays (same order as `names`), while checking that the provided
arrays have shapes that match the network's expectations. arrays have shapes that match the network's expectations.
Arguments: Args:
data: User-provided input data (polymorphic). data: User-provided input data (polymorphic).
names: List of expected array names. names: List of expected array names.
shapes: Optional list of expected array shapes. shapes: Optional list of expected array shapes.
@ -676,7 +676,7 @@ def standardize_input_data(data,
def standardize_sample_or_class_weights(x_weight, output_names, weight_type): def standardize_sample_or_class_weights(x_weight, output_names, weight_type):
"""Maps `sample_weight` or `class_weight` to model outputs. """Maps `sample_weight` or `class_weight` to model outputs.
Arguments: Args:
x_weight: User-provided `sample_weight` or `class_weight` argument. x_weight: User-provided `sample_weight` or `class_weight` argument.
output_names: List of output names (strings) in the model. output_names: List of output names (strings) in the model.
weight_type: A string used purely for exception printing. weight_type: A string used purely for exception printing.
@ -732,7 +732,7 @@ def standardize_sample_weights(sample_weight, output_names):
def check_array_lengths(inputs, targets, weights=None): def check_array_lengths(inputs, targets, weights=None):
"""Does user input validation for numpy arrays. """Does user input validation for numpy arrays.
Arguments: Args:
inputs: list of Numpy arrays of inputs. inputs: list of Numpy arrays of inputs.
targets: list of Numpy arrays of targets. targets: list of Numpy arrays of targets.
weights: list of Numpy arrays of sample weights. weights: list of Numpy arrays of sample weights.
@ -789,7 +789,7 @@ def check_loss_and_target_compatibility(targets, loss_fns, output_shapes):
This helps prevent users from using loss functions incorrectly. This check This helps prevent users from using loss functions incorrectly. This check
is purely for UX purposes. is purely for UX purposes.
Arguments: Args:
targets: list of Numpy arrays of targets. targets: list of Numpy arrays of targets.
loss_fns: list of loss functions. loss_fns: list of loss functions.
output_shapes: list of shapes of model outputs. output_shapes: list of shapes of model outputs.
@ -849,7 +849,7 @@ def collect_per_output_metric_info(metrics,
is_weighted=False): is_weighted=False):
"""Maps metric names and functions to model outputs. """Maps metric names and functions to model outputs.
Arguments: Args:
metrics: a list or a list of lists or a dict of metric functions. metrics: a list or a list of lists or a dict of metric functions.
output_names: a list of the names (strings) of model outputs. output_names: a list of the names (strings) of model outputs.
output_shapes: a list of the shapes (strings) of model outputs. output_shapes: a list of the shapes (strings) of model outputs.
@ -927,7 +927,7 @@ def batch_shuffle(index_array, batch_size):
Useful for shuffling HDF5 arrays Useful for shuffling HDF5 arrays
(where one cannot access arbitrary indices). (where one cannot access arbitrary indices).
Arguments: Args:
index_array: array of indices to be shuffled. index_array: array of indices to be shuffled.
batch_size: integer. batch_size: integer.
@ -955,7 +955,7 @@ def standardize_weights(y,
weight array. If both `sample_weight` and `class_weight` are provided, weight array. If both `sample_weight` and `class_weight` are provided,
the weights are multiplied. the weights are multiplied.
Arguments: Args:
y: Numpy array or Tensor of model targets to be weighted. y: Numpy array or Tensor of model targets to be weighted.
sample_weight: User-provided `sample_weight` argument. sample_weight: User-provided `sample_weight` argument.
class_weight: User-provided `class_weight` argument. class_weight: User-provided `class_weight` argument.
@ -1099,7 +1099,7 @@ def has_tensors(ls):
def get_metric_name(metric, weighted=False): def get_metric_name(metric, weighted=False):
"""Returns the name corresponding to the given metric input. """Returns the name corresponding to the given metric input.
Arguments: Args:
metric: Metric function name or reference. metric: Metric function name or reference.
weighted: Boolean indicating if the given metric is weighted. weighted: Boolean indicating if the given metric is weighted.
@ -1134,7 +1134,7 @@ def get_metric_name(metric, weighted=False):
def get_metric_function(metric, output_shape=None, loss_fn=None): def get_metric_function(metric, output_shape=None, loss_fn=None):
"""Returns the metric function corresponding to the given metric input. """Returns the metric function corresponding to the given metric input.
Arguments: Args:
metric: Metric function name or reference. metric: Metric function name or reference.
output_shape: The shape of the output that this metric will be calculated output_shape: The shape of the output that this metric will be calculated
for. for.
@ -1232,7 +1232,7 @@ def get_loss_function(loss):
def validate_dataset_input(x, y, sample_weight, validation_split=None): def validate_dataset_input(x, y, sample_weight, validation_split=None):
"""Validates user input arguments when a dataset iterator is passed. """Validates user input arguments when a dataset iterator is passed.
Arguments: Args:
x: Input data. A `tf.data` dataset or iterator. x: Input data. A `tf.data` dataset or iterator.
y: Target data. It could be either Numpy array(s) or TensorFlow tensor(s). y: Target data. It could be either Numpy array(s) or TensorFlow tensor(s).
Expected to be `None` when `x` is a dataset iterator. Expected to be `None` when `x` is a dataset iterator.
@ -1310,7 +1310,7 @@ def check_steps_argument(input_data, steps, steps_name):
required and is `None`. required and is `None`.
3. input data passed is a symbolic tensor. 3. input data passed is a symbolic tensor.
Arguments: Args:
input_data: Input data. Can be Numpy array(s) or TensorFlow tensor(s) or input_data: Input data. Can be Numpy array(s) or TensorFlow tensor(s) or
tf.data.Dataset iterator or `None`. tf.data.Dataset iterator or `None`.
steps: Integer or `None`. Total number of steps (batches of samples) to steps: Integer or `None`. Total number of steps (batches of samples) to
@ -1458,7 +1458,7 @@ def prepare_sample_weight_modes(training_endpoints, sample_weight_mode):
def prepare_loss_functions(loss, output_names): def prepare_loss_functions(loss, output_names):
"""Converts loss to a list of loss functions. """Converts loss to a list of loss functions.
Arguments: Args:
loss: String (name of objective function), objective function or loss: String (name of objective function), objective function or
`tf.losses.Loss` instance. See `tf.losses`. If the model has multiple `tf.losses.Loss` instance. See `tf.losses`. If the model has multiple
outputs, you can use a different loss on each output by passing a outputs, you can use a different loss on each output by passing a
@ -1502,7 +1502,7 @@ def prepare_loss_weights(training_endpoints, loss_weights=None):
The result loss weights will be populated on the training endpoint. The result loss weights will be populated on the training endpoint.
Arguments: Args:
training_endpoints: List of model training endpoints. training_endpoints: List of model training endpoints.
loss_weights: Optional list or dictionary specifying scalar coefficients loss_weights: Optional list or dictionary specifying scalar coefficients
(Python floats) to weight the loss contributions of different model (Python floats) to weight the loss contributions of different model
@ -1609,7 +1609,7 @@ def initialize_iterator(iterator):
def extract_tensors_from_dataset(dataset): def extract_tensors_from_dataset(dataset):
"""Extract a tuple of tensors `inputs, targets, sample_weight` from a dataset. """Extract a tuple of tensors `inputs, targets, sample_weight` from a dataset.
Arguments: Args:
dataset: Dataset instance. dataset: Dataset instance.
Returns: Returns:
@ -1623,7 +1623,7 @@ def extract_tensors_from_dataset(dataset):
def unpack_iterator_input(iterator): def unpack_iterator_input(iterator):
"""Convert a dataset iterator to a tuple of tensors `x, y, sample_weights`. """Convert a dataset iterator to a tuple of tensors `x, y, sample_weights`.
Arguments: Args:
iterator: Instance of a dataset iterator. iterator: Instance of a dataset iterator.
Returns: Returns:
@ -1661,7 +1661,7 @@ def infer_steps_for_dataset(model,
steps_name='steps'): steps_name='steps'):
"""Infers steps_per_epoch needed to loop through a dataset. """Infers steps_per_epoch needed to loop through a dataset.
Arguments: Args:
model: Keras model instance. model: Keras model instance.
dataset: Input data of type tf.data.Dataset. dataset: Input data of type tf.data.Dataset.
steps: Number of steps to draw from the dataset (may be None if unknown). steps: Number of steps to draw from the dataset (may be None if unknown).
@ -1805,7 +1805,7 @@ def generic_output_names(outputs_list):
def should_run_validation(validation_freq, epoch): def should_run_validation(validation_freq, epoch):
"""Checks if validation should be run this epoch. """Checks if validation should be run this epoch.
Arguments: Args:
validation_freq: Integer or list. If an integer, specifies how many training validation_freq: Integer or list. If an integer, specifies how many training
epochs to run before a new validation run is performed. If a list, epochs to run before a new validation run is performed. If a list,
specifies the epochs on which to run validation. specifies the epochs on which to run validation.

View File

@ -202,7 +202,7 @@ class Model(training_lib.Model):
TensorFlow format loads based on the object-local names of attributes to TensorFlow format loads based on the object-local names of attributes to
which layers are assigned in the `Model`'s constructor. which layers are assigned in the `Model`'s constructor.
Arguments: Args:
filepath: String, path to the weights file to load. For weight files in filepath: String, path to the weights file to load. For weight files in
TensorFlow format, this is the file prefix (the same as was passed TensorFlow format, this is the file prefix (the same as was passed
to `save_weights`). to `save_weights`).
@ -248,7 +248,7 @@ class Model(training_lib.Model):
**kwargs): **kwargs):
"""Configures the model for training. """Configures the model for training.
Arguments: Args:
optimizer: String (name of optimizer) or optimizer instance. optimizer: String (name of optimizer) or optimizer instance.
See `tf.keras.optimizers`. See `tf.keras.optimizers`.
loss: String (name of objective function), objective function or loss: String (name of objective function), objective function or
@ -637,7 +637,7 @@ class Model(training_lib.Model):
**kwargs): **kwargs):
"""Trains the model for a fixed number of epochs (iterations on a dataset). """Trains the model for a fixed number of epochs (iterations on a dataset).
Arguments: Args:
x: Input data. It could be: x: Input data. It could be:
- A Numpy array (or array-like), or a list of arrays - A Numpy array (or array-like), or a list of arrays
(in case the model has multiple inputs). (in case the model has multiple inputs).
@ -830,7 +830,7 @@ class Model(training_lib.Model):
Computation is done in batches (see the `batch_size` arg.) Computation is done in batches (see the `batch_size` arg.)
Arguments: Args:
x: Input data. It could be: x: Input data. It could be:
- A Numpy array (or array-like), or a list of arrays - A Numpy array (or array-like), or a list of arrays
(in case the model has multiple inputs). (in case the model has multiple inputs).
@ -934,7 +934,7 @@ class Model(training_lib.Model):
Computation is done in batches (see the `batch_size` arg.) Computation is done in batches (see the `batch_size` arg.)
Arguments: Args:
x: Input samples. It could be: x: Input samples. It could be:
- A Numpy array (or array-like), or a list of arrays - A Numpy array (or array-like), or a list of arrays
(in case the model has multiple inputs). (in case the model has multiple inputs).
@ -1016,7 +1016,7 @@ class Model(training_lib.Model):
reset_metrics=True): reset_metrics=True):
"""Runs a single gradient update on a single batch of data. """Runs a single gradient update on a single batch of data.
Arguments: Args:
x: Input data. It could be: x: Input data. It could be:
- A Numpy array (or array-like), or a list of arrays - A Numpy array (or array-like), or a list of arrays
(in case the model has multiple inputs). (in case the model has multiple inputs).
@ -1105,7 +1105,7 @@ class Model(training_lib.Model):
def test_on_batch(self, x, y=None, sample_weight=None, reset_metrics=True): def test_on_batch(self, x, y=None, sample_weight=None, reset_metrics=True):
"""Test the model on a single batch of samples. """Test the model on a single batch of samples.
Arguments: Args:
x: Input data. It could be: x: Input data. It could be:
- A Numpy array (or array-like), or a list of arrays - A Numpy array (or array-like), or a list of arrays
(in case the model has multiple inputs). (in case the model has multiple inputs).
@ -1181,7 +1181,7 @@ class Model(training_lib.Model):
def predict_on_batch(self, x): def predict_on_batch(self, x):
"""Returns predictions for a single batch of samples. """Returns predictions for a single batch of samples.
Arguments: Args:
x: Input data. It could be: x: Input data. It could be:
- A Numpy array (or array-like), or a list of arrays - A Numpy array (or array-like), or a list of arrays
(in case the model has multiple inputs). (in case the model has multiple inputs).
@ -1570,7 +1570,7 @@ class Model(training_lib.Model):
def _prepare_total_loss(self, masks): def _prepare_total_loss(self, masks):
"""Computes total loss from loss functions. """Computes total loss from loss functions.
Arguments: Args:
masks: List of mask values corresponding to each model output. masks: List of mask values corresponding to each model output.
Returns: Returns:
@ -1696,7 +1696,7 @@ class Model(training_lib.Model):
raised if `x` is a tf.data.Dataset and `batch_size` is specified as we raised if `x` is a tf.data.Dataset and `batch_size` is specified as we
expect users to provide batched datasets. expect users to provide batched datasets.
Arguments: Args:
batch_size: The batch_size provided as an argument to batch_size: The batch_size provided as an argument to
fit/evaluate/predict. fit/evaluate/predict.
steps: The steps provided as an argument to fit/evaluate/predict. steps: The steps provided as an argument to fit/evaluate/predict.
@ -1815,7 +1815,7 @@ class Model(training_lib.Model):
If there are multiple outputs for which the metrics are calculated, the If there are multiple outputs for which the metrics are calculated, the
metric names have to be made unique by appending an integer. metric names have to be made unique by appending an integer.
Arguments: Args:
metric_name: Metric name that corresponds to the metric specified by the metric_name: Metric name that corresponds to the metric specified by the
user. For example: 'acc'. user. For example: 'acc'.
output_index: The index of the model output for which the metric name is output_index: The index of the model output for which the metric name is
@ -1843,7 +1843,7 @@ class Model(training_lib.Model):
def _set_per_output_metric_attributes(self, metrics_dict, output_index): def _set_per_output_metric_attributes(self, metrics_dict, output_index):
"""Sets the metric attributes on the model for the given output. """Sets the metric attributes on the model for the given output.
Arguments: Args:
metrics_dict: A dict with metric names as keys and metric fns as values. metrics_dict: A dict with metric names as keys and metric fns as values.
output_index: The index of the model output for which the metric output_index: The index of the model output for which the metric
attributes are added. attributes are added.
@ -1899,7 +1899,7 @@ class Model(training_lib.Model):
weights=None): weights=None):
"""Calls metric functions for a single output. """Calls metric functions for a single output.
Arguments: Args:
metrics_dict: A dict with metric names as keys and metric fns as values. metrics_dict: A dict with metric names as keys and metric fns as values.
y_true: Target output. y_true: Target output.
y_pred: Predicted output. y_pred: Predicted output.
@ -1927,7 +1927,7 @@ class Model(training_lib.Model):
return_weighted_and_unweighted_metrics=False): return_weighted_and_unweighted_metrics=False):
"""Handles calling metric functions. """Handles calling metric functions.
Arguments: Args:
outputs: List of outputs (predictions). outputs: List of outputs (predictions).
targets: List of targets. targets: List of targets.
skip_target_masks: Optional. List of boolean for whether the corresponding skip_target_masks: Optional. List of boolean for whether the corresponding
@ -2757,7 +2757,7 @@ class Model(training_lib.Model):
Refer to tensorflow/python/keras/distribute/worker_training_state.py Refer to tensorflow/python/keras/distribute/worker_training_state.py
for more information. for more information.
Arguments: Args:
initial_epoch: The original initial_epoch user passes in in `fit()`. initial_epoch: The original initial_epoch user passes in in `fit()`.
mode: The mode for running `model.fit()`. mode: The mode for running `model.fit()`.
@ -3111,7 +3111,7 @@ class _TrainingEndpoint(object):
class _TrainingTarget(object): class _TrainingTarget(object):
"""Container for a target tensor (y_true) and its metadata (shape, loss...). """Container for a target tensor (y_true) and its metadata (shape, loss...).
Arguments: Args:
target: A target tensor for the model. It may be `None` if the target: A target tensor for the model. It may be `None` if the
output is excluded from loss computation. It is still kept as None output is excluded from loss computation. It is still kept as None
since each output of the model should have a corresponding target. If since each output of the model should have a corresponding target. If
@ -3155,7 +3155,7 @@ def _convert_scipy_sparse_tensor(value, expected_input):
not a scipy sparse tensor, or scipy is not imported, we pass it through not a scipy sparse tensor, or scipy is not imported, we pass it through
unchanged. unchanged.
Arguments: Args:
value: An object that may be a scipy sparse tensor value: An object that may be a scipy sparse tensor
expected_input: The expected input placeholder. expected_input: The expected input placeholder.
@ -3186,7 +3186,7 @@ def _get_metrics_from_layers(layers):
This will not include the `compile` metrics of a model layer. This will not include the `compile` metrics of a model layer.
Arguments: Args:
layers: List of layers. layers: List of layers.
Returns: Returns:

View File

@ -617,7 +617,7 @@ class LecunNormal(VarianceScaling):
>>> initializer = tf.keras.initializers.LecunNormal() >>> initializer = tf.keras.initializers.LecunNormal()
>>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer) >>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer)
Arguments: Args:
seed: A Python integer. Used to seed the random generator. seed: A Python integer. Used to seed the random generator.
References: References:
@ -661,7 +661,7 @@ class LecunUniform(VarianceScaling):
>>> initializer = tf.keras.initializers.LecunUniform() >>> initializer = tf.keras.initializers.LecunUniform()
>>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer) >>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer)
Arguments: Args:
seed: A Python integer. An initializer created with a given seed will seed: A Python integer. An initializer created with a given seed will
always produce the same random tensor for a given shape and dtype. always produce the same random tensor for a given shape and dtype.
@ -704,7 +704,7 @@ class HeNormal(VarianceScaling):
>>> initializer = tf.keras.initializers.HeNormal() >>> initializer = tf.keras.initializers.HeNormal()
>>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer) >>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer)
Arguments: Args:
seed: A Python integer. An initializer created with a given seed will seed: A Python integer. An initializer created with a given seed will
always produce the same random tensor for a given shape and dtype. always produce the same random tensor for a given shape and dtype.
@ -744,7 +744,7 @@ class HeUniform(VarianceScaling):
>>> initializer = tf.keras.initializers.HeUniform() >>> initializer = tf.keras.initializers.HeUniform()
>>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer) >>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer)
Arguments: Args:
seed: A Python integer. An initializer created with a given seed will seed: A Python integer. An initializer created with a given seed will
always produce the same random tensor for a given shape and dtype. always produce the same random tensor for a given shape and dtype.

View File

@ -64,7 +64,7 @@ class LeakyReLU(Layer):
Output shape: Output shape:
Same shape as the input. Same shape as the input.
Arguments: Args:
alpha: Float >= 0. Negative slope coefficient. Default to 0.3. alpha: Float >= 0. Negative slope coefficient. Default to 0.3.
""" """
@ -108,7 +108,7 @@ class PReLU(Layer):
Output shape: Output shape:
Same shape as the input. Same shape as the input.
Arguments: Args:
alpha_initializer: Initializer function for the weights. alpha_initializer: Initializer function for the weights.
alpha_regularizer: Regularizer for the weights. alpha_regularizer: Regularizer for the weights.
alpha_constraint: Constraint for the weights. alpha_constraint: Constraint for the weights.
@ -200,7 +200,7 @@ class ELU(Layer):
Output shape: Output shape:
Same shape as the input. Same shape as the input.
Arguments: Args:
alpha: Scale for the negative factor. alpha: Scale for the negative factor.
""" """
@ -241,7 +241,7 @@ class ThresholdedReLU(Layer):
Output shape: Output shape:
Same shape as the input. Same shape as the input.
Arguments: Args:
theta: Float >= 0. Threshold location of activation. theta: Float >= 0. Threshold location of activation.
""" """
@ -303,7 +303,7 @@ class Softmax(Layer):
Output shape: Output shape:
Same shape as the input. Same shape as the input.
Arguments: Args:
axis: Integer, or list of Integers, axis along which the softmax axis: Integer, or list of Integers, axis along which the softmax
normalization is applied. normalization is applied.
Call arguments: Call arguments:
@ -389,7 +389,7 @@ class ReLU(Layer):
Output shape: Output shape:
Same shape as the input. Same shape as the input.
Arguments: Args:
max_value: Float >= 0. Maximum activation value. Default to None, which max_value: Float >= 0. Maximum activation value. Default to None, which
means unlimited. means unlimited.
negative_slope: Float >= 0. Negative slope coefficient. Default to 0. negative_slope: Float >= 0. Negative slope coefficient. Default to 0.

View File

@ -61,7 +61,7 @@ class Conv(Layer):
Note: layer attributes cannot be modified after the layer has been called Note: layer attributes cannot be modified after the layer has been called
once (except the `trainable` attribute). once (except the `trainable` attribute).
Arguments: Args:
rank: An integer, the rank of the convolution, e.g. "2" for 2D convolution. rank: An integer, the rank of the convolution, e.g. "2" for 2D convolution.
filters: Integer, the dimensionality of the output space (i.e. the number filters: Integer, the dimensionality of the output space (i.e. the number
of filters in the convolution). of filters in the convolution).
@ -409,7 +409,7 @@ class Conv1D(Conv):
>>> print(y.shape) >>> print(y.shape)
(4, 7, 8, 32) (4, 7, 8, 32)
Arguments: Args:
filters: Integer, the dimensionality of the output space filters: Integer, the dimensionality of the output space
(i.e. the number of output filters in the convolution). (i.e. the number of output filters in the convolution).
kernel_size: An integer or tuple/list of a single integer, kernel_size: An integer or tuple/list of a single integer,
@ -564,7 +564,7 @@ class Conv2D(Conv):
(4, 7, 26, 26, 2) (4, 7, 26, 26, 2)
Arguments: Args:
filters: Integer, the dimensionality of the output space (i.e. the number of filters: Integer, the dimensionality of the output space (i.e. the number of
output filters in the convolution). output filters in the convolution).
kernel_size: An integer or tuple/list of 2 integers, specifying the height kernel_size: An integer or tuple/list of 2 integers, specifying the height
@ -708,7 +708,7 @@ class Conv3D(Conv):
>>> print(y.shape) >>> print(y.shape)
(4, 7, 26, 26, 26, 2) (4, 7, 26, 26, 26, 2)
Arguments: Args:
filters: Integer, the dimensionality of the output space (i.e. the number of filters: Integer, the dimensionality of the output space (i.e. the number of
output filters in the convolution). output filters in the convolution).
kernel_size: An integer or tuple/list of 3 integers, specifying the depth, kernel_size: An integer or tuple/list of 3 integers, specifying the depth,
@ -835,7 +835,7 @@ class Conv1DTranspose(Conv1D):
(tuple of integers or `None`, does not include the sample axis), (tuple of integers or `None`, does not include the sample axis),
e.g. `input_shape=(128, 3)` for data with 128 time steps and 3 channels. e.g. `input_shape=(128, 3)` for data with 128 time steps and 3 channels.
Arguments: Args:
filters: Integer, the dimensionality of the output space filters: Integer, the dimensionality of the output space
(i.e. the number of output filters in the convolution). (i.e. the number of output filters in the convolution).
kernel_size: An integer length of the 1D convolution window. kernel_size: An integer length of the 1D convolution window.
@ -1083,7 +1083,7 @@ class Conv2DTranspose(Conv2D):
e.g. `input_shape=(128, 128, 3)` for 128x128 RGB pictures e.g. `input_shape=(128, 128, 3)` for 128x128 RGB pictures
in `data_format="channels_last"`. in `data_format="channels_last"`.
Arguments: Args:
filters: Integer, the dimensionality of the output space filters: Integer, the dimensionality of the output space
(i.e. the number of output filters in the convolution). (i.e. the number of output filters in the convolution).
kernel_size: An integer or tuple/list of 2 integers, specifying the kernel_size: An integer or tuple/list of 2 integers, specifying the
@ -1386,7 +1386,7 @@ class Conv3DTranspose(Conv3D):
e.g. `input_shape=(128, 128, 128, 3)` for a 128x128x128 volume with 3 channels e.g. `input_shape=(128, 128, 128, 3)` for a 128x128x128 volume with 3 channels
if `data_format="channels_last"`. if `data_format="channels_last"`.
Arguments: Args:
filters: Integer, the dimensionality of the output space filters: Integer, the dimensionality of the output space
(i.e. the number of output filters in the convolution). (i.e. the number of output filters in the convolution).
kernel_size: An integer or tuple/list of 3 integers, specifying the kernel_size: An integer or tuple/list of 3 integers, specifying the
@ -1688,7 +1688,7 @@ class SeparableConv(Conv):
it adds a bias vector to the output. it adds a bias vector to the output.
It then optionally applies an activation function to produce the final output. It then optionally applies an activation function to produce the final output.
Arguments: Args:
rank: An integer, the rank of the convolution, e.g. "2" for 2D convolution. rank: An integer, the rank of the convolution, e.g. "2" for 2D convolution.
filters: Integer, the dimensionality of the output space (i.e. the number filters: Integer, the dimensionality of the output space (i.e. the number
of filters in the convolution). of filters in the convolution).
@ -1897,7 +1897,7 @@ class SeparableConv1D(SeparableConv):
it adds a bias vector to the output. it adds a bias vector to the output.
It then optionally applies an activation function to produce the final output. It then optionally applies an activation function to produce the final output.
Arguments: Args:
filters: Integer, the dimensionality of the output space (i.e. the number filters: Integer, the dimensionality of the output space (i.e. the number
of filters in the convolution). of filters in the convolution).
kernel_size: A single integer specifying the spatial kernel_size: A single integer specifying the spatial
@ -2081,7 +2081,7 @@ class SeparableConv2D(SeparableConv):
a way to factorize a convolution kernel into two smaller kernels, a way to factorize a convolution kernel into two smaller kernels,
or as an extreme version of an Inception block. or as an extreme version of an Inception block.
Arguments: Args:
filters: Integer, the dimensionality of the output space filters: Integer, the dimensionality of the output space
(i.e. the number of output filters in the convolution). (i.e. the number of output filters in the convolution).
kernel_size: An integer or tuple/list of 2 integers, specifying the kernel_size: An integer or tuple/list of 2 integers, specifying the
@ -2246,7 +2246,7 @@ class DepthwiseConv2D(Conv2D):
The `depth_multiplier` argument controls how many The `depth_multiplier` argument controls how many
output channels are generated per input channel in the depthwise step. output channels are generated per input channel in the depthwise step.
Arguments: Args:
kernel_size: An integer or tuple/list of 2 integers, specifying the kernel_size: An integer or tuple/list of 2 integers, specifying the
height and width of the 2D convolution window. height and width of the 2D convolution window.
Can be a single integer to specify the same value for Can be a single integer to specify the same value for
@ -2480,7 +2480,7 @@ class UpSampling1D(Layer):
[ 9 10 11] [ 9 10 11]
[ 9 10 11]]], shape=(2, 4, 3), dtype=int64) [ 9 10 11]]], shape=(2, 4, 3), dtype=int64)
Arguments: Args:
size: Integer. Upsampling factor. size: Integer. Upsampling factor.
Input shape: Input shape:
@ -2538,7 +2538,7 @@ class UpSampling2D(Layer):
[[ 9 10 11] [[ 9 10 11]
[ 9 10 11]]]], shape=(2, 2, 2, 3), dtype=int64) [ 9 10 11]]]], shape=(2, 2, 2, 3), dtype=int64)
Arguments: Args:
size: Int, or tuple of 2 integers. size: Int, or tuple of 2 integers.
The upsampling factors for rows and columns. The upsampling factors for rows and columns.
data_format: A string, data_format: A string,
@ -2629,7 +2629,7 @@ class UpSampling3D(Layer):
>>> print(y.shape) >>> print(y.shape)
(2, 2, 4, 2, 3) (2, 2, 4, 2, 3)
Arguments: Args:
size: Int, or tuple of 3 integers. size: Int, or tuple of 3 integers.
The upsampling factors for dim1, dim2 and dim3. The upsampling factors for dim1, dim2 and dim3.
data_format: A string, data_format: A string,
@ -2724,7 +2724,7 @@ class ZeroPadding1D(Layer):
[ 0 0 0] [ 0 0 0]
[ 0 0 0]]], shape=(2, 6, 3), dtype=int64) [ 0 0 0]]], shape=(2, 6, 3), dtype=int64)
Arguments: Args:
padding: Int, or tuple of int (length 2), or dictionary. padding: Int, or tuple of int (length 2), or dictionary.
- If int: - If int:
How many zeros to add at the beginning and end of How many zeros to add at the beginning and end of
@ -2791,7 +2791,7 @@ class ZeroPadding2D(Layer):
[0 0] [0 0]
[0 0]]]], shape=(1, 3, 4, 2), dtype=int64) [0 0]]]], shape=(1, 3, 4, 2), dtype=int64)
Arguments: Args:
padding: Int, or tuple of 2 ints, or tuple of 2 tuples of 2 ints. padding: Int, or tuple of 2 ints, or tuple of 2 tuples of 2 ints.
- If int: the same symmetric padding - If int: the same symmetric padding
is applied to height and width. is applied to height and width.
@ -2898,7 +2898,7 @@ class ZeroPadding3D(Layer):
>>> print(y.shape) >>> print(y.shape)
(1, 5, 6, 6, 3) (1, 5, 6, 6, 3)
Arguments: Args:
padding: Int, or tuple of 3 ints, or tuple of 3 tuples of 2 ints. padding: Int, or tuple of 3 ints, or tuple of 3 tuples of 2 ints.
- If int: the same symmetric padding - If int: the same symmetric padding
is applied to height and width. is applied to height and width.
@ -3035,7 +3035,7 @@ class Cropping1D(Layer):
[[[2 3]] [[[2 3]]
[[8 9]]], shape=(2, 1, 2), dtype=int64) [[8 9]]], shape=(2, 1, 2), dtype=int64)
Arguments: Args:
cropping: Int or tuple of int (length 2) cropping: Int or tuple of int (length 2)
How many units should be trimmed off at the beginning and end of How many units should be trimmed off at the beginning and end of
the cropping dimension (axis 1). the cropping dimension (axis 1).
@ -3087,7 +3087,7 @@ class Cropping2D(Layer):
>>> print(y.shape) >>> print(y.shape)
(2, 24, 20, 3) (2, 24, 20, 3)
Arguments: Args:
cropping: Int, or tuple of 2 ints, or tuple of 2 tuples of 2 ints. cropping: Int, or tuple of 2 ints, or tuple of 2 tuples of 2 ints.
- If int: the same symmetric cropping - If int: the same symmetric cropping
is applied to height and width. is applied to height and width.
@ -3212,7 +3212,7 @@ class Cropping3D(Layer):
>>> print(y.shape) >>> print(y.shape)
(2, 24, 20, 6, 3) (2, 24, 20, 6, 3)
Arguments: Args:
cropping: Int, or tuple of 3 ints, or tuple of 3 tuples of 2 ints. cropping: Int, or tuple of 3 ints, or tuple of 3 tuples of 2 ints.
- If int: the same symmetric cropping - If int: the same symmetric cropping
is applied to depth, height, and width. is applied to depth, height, and width.

View File

@ -40,7 +40,7 @@ from tensorflow.python.util.tf_export import keras_export
class ConvRNN2D(RNN): class ConvRNN2D(RNN):
"""Base class for convolutional-recurrent layers. """Base class for convolutional-recurrent layers.
Arguments: Args:
cell: A RNN cell instance. A RNN cell is a class that has: cell: A RNN cell instance. A RNN cell is a class that has:
- a `call(input_at_t, states_at_t)` method, returning - a `call(input_at_t, states_at_t)` method, returning
`(output_at_t, states_at_t_plus_1)`. The call method of the `(output_at_t, states_at_t_plus_1)`. The call method of the
@ -424,7 +424,7 @@ class ConvRNN2D(RNN):
class ConvLSTM2DCell(DropoutRNNCellMixin, Layer): class ConvLSTM2DCell(DropoutRNNCellMixin, Layer):
"""Cell class for the ConvLSTM2D layer. """Cell class for the ConvLSTM2D layer.
Arguments: Args:
filters: Integer, the dimensionality of the output space filters: Integer, the dimensionality of the output space
(i.e. the number of output filters in the convolution). (i.e. the number of output filters in the convolution).
kernel_size: An integer or tuple/list of n integers, specifying the kernel_size: An integer or tuple/list of n integers, specifying the
@ -703,7 +703,7 @@ class ConvLSTM2D(ConvRNN2D):
It is similar to an LSTM layer, but the input transformations It is similar to an LSTM layer, but the input transformations
and recurrent transformations are both convolutional. and recurrent transformations are both convolutional.
Arguments: Args:
filters: Integer, the dimensionality of the output space filters: Integer, the dimensionality of the output space
(i.e. the number of output filters in the convolution). (i.e. the number of output filters in the convolution).
kernel_size: An integer or tuple/list of n integers, specifying the kernel_size: An integer or tuple/list of n integers, specifying the

View File

@ -175,7 +175,7 @@ class Dropout(Layer):
[ 7.5 8.75] [ 7.5 8.75]
[10. 0. ]], shape=(5, 2), dtype=float32) [10. 0. ]], shape=(5, 2), dtype=float32)
Arguments: Args:
rate: Float between 0 and 1. Fraction of the input units to drop. rate: Float between 0 and 1. Fraction of the input units to drop.
noise_shape: 1D integer tensor representing the shape of the noise_shape: 1D integer tensor representing the shape of the
binary dropout mask that will be multiplied with the input. binary dropout mask that will be multiplied with the input.
@ -255,7 +255,7 @@ class SpatialDropout1D(Dropout):
decrease. In this case, SpatialDropout1D will help promote independence decrease. In this case, SpatialDropout1D will help promote independence
between feature maps and should be used instead. between feature maps and should be used instead.
Arguments: Args:
rate: Float between 0 and 1. Fraction of the input units to drop. rate: Float between 0 and 1. Fraction of the input units to drop.
Call arguments: Call arguments:
@ -297,7 +297,7 @@ class SpatialDropout2D(Dropout):
decrease. In this case, SpatialDropout2D will help promote independence decrease. In this case, SpatialDropout2D will help promote independence
between feature maps and should be used instead. between feature maps and should be used instead.
Arguments: Args:
rate: Float between 0 and 1. Fraction of the input units to drop. rate: Float between 0 and 1. Fraction of the input units to drop.
data_format: 'channels_first' or 'channels_last'. data_format: 'channels_first' or 'channels_last'.
In 'channels_first' mode, the channels dimension In 'channels_first' mode, the channels dimension
@ -356,7 +356,7 @@ class SpatialDropout3D(Dropout):
decrease. In this case, SpatialDropout3D will help promote independence decrease. In this case, SpatialDropout3D will help promote independence
between feature maps and should be used instead. between feature maps and should be used instead.
Arguments: Args:
rate: Float between 0 and 1. Fraction of the input units to drop. rate: Float between 0 and 1. Fraction of the input units to drop.
data_format: 'channels_first' or 'channels_last'. data_format: 'channels_first' or 'channels_last'.
In 'channels_first' mode, the channels dimension (the depth) In 'channels_first' mode, the channels dimension (the depth)
@ -406,7 +406,7 @@ class SpatialDropout3D(Dropout):
class Activation(Layer): class Activation(Layer):
"""Applies an activation function to an output. """Applies an activation function to an output.
Arguments: Args:
activation: Activation function, such as `tf.nn.relu`, or string name of activation: Activation function, such as `tf.nn.relu`, or string name of
built-in activation function, such as "relu". built-in activation function, such as "relu".
@ -497,7 +497,7 @@ class Reshape(Layer):
This is a near direct port of the internal Numpy function This is a near direct port of the internal Numpy function
`_fix_unknown_dimension` in `numpy/core/src/multiarray/shape.c` `_fix_unknown_dimension` in `numpy/core/src/multiarray/shape.c`
Arguments: Args:
input_shape: Shape of array being reshaped input_shape: Shape of array being reshaped
output_shape: Desired shape of the array with at most output_shape: Desired shape of the array with at most
a single -1 which indicates a dimension that should be a single -1 which indicates a dimension that should be
@ -577,7 +577,7 @@ class Permute(Layer):
# note: `None` is the batch dimension # note: `None` is the batch dimension
``` ```
Arguments: Args:
dims: Tuple of integers. Permutation pattern does not include the dims: Tuple of integers. Permutation pattern does not include the
samples dimension. Indexing starts at 1. samples dimension. Indexing starts at 1.
For instance, `(2, 1)` permutes the first and second dimensions For instance, `(2, 1)` permutes the first and second dimensions
@ -627,7 +627,7 @@ class Flatten(Layer):
Note: If inputs are shaped `(batch,)` without a feature axis, then Note: If inputs are shaped `(batch,)` without a feature axis, then
flattening adds an extra channel dimension and output shape is `(batch, 1)`. flattening adds an extra channel dimension and output shape is `(batch, 1)`.
Arguments: Args:
data_format: A string, data_format: A string,
one of `channels_last` (default) or `channels_first`. one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs. The ordering of the dimensions in the inputs.
@ -724,7 +724,7 @@ class RepeatVector(Layer):
# now: model.output_shape == (None, 3, 32) # now: model.output_shape == (None, 3, 32)
``` ```
Arguments: Args:
n: Integer, repetition factor. n: Integer, repetition factor.
Input shape: Input shape:
@ -821,7 +821,7 @@ class Lambda(Layer):
In general, Lambda layers can be convenient for simple stateless In general, Lambda layers can be convenient for simple stateless
computation, but anything more complex should use a subclass Layer instead. computation, but anything more complex should use a subclass Layer instead.
Arguments: Args:
function: The function to be evaluated. Takes input tensor as first function: The function to be evaluated. Takes input tensor as first
argument. argument.
output_shape: Expected output shape from function. This argument can be output_shape: Expected output shape from function. This argument can be
@ -1113,7 +1113,7 @@ class Dense(Layer):
>>> model.output_shape >>> model.output_shape
(None, 32) (None, 32)
Arguments: Args:
units: Positive integer, dimensionality of the output space. units: Positive integer, dimensionality of the output space.
activation: Activation function to use. activation: Activation function to use.
If you don't specify anything, no activation is applied If you don't specify anything, no activation is applied
@ -1250,7 +1250,7 @@ class Dense(Layer):
class ActivityRegularization(Layer): class ActivityRegularization(Layer):
"""Layer that applies an update to the cost function based input activity. """Layer that applies an update to the cost function based input activity.
Arguments: Args:
l1: L1 regularization factor (positive float). l1: L1 regularization factor (positive float).
l2: L2 regularization factor (positive float). l2: L2 regularization factor (positive float).
@ -1621,7 +1621,7 @@ def _delegate_property(keras_tensor_cls, property_name): # pylint: disable=inva
`InstanceProperty` layer to access the property on the represented `InstanceProperty` layer to access the property on the represented
intermediate values in the model. intermediate values in the model.
Arguments: Args:
keras_tensor_cls: The KerasTensor subclass that should expose the property. keras_tensor_cls: The KerasTensor subclass that should expose the property.
property_name: The name of the property to expose and delegate to the property_name: The name of the property to expose and delegate to the
represented (Composite)Tensor. represented (Composite)Tensor.
@ -1641,7 +1641,7 @@ def _delegate_method(keras_tensor_cls, method_name): # pylint: disable=invalid-
an `InstanceMethod` layer to run the desired method on the represented an `InstanceMethod` layer to run the desired method on the represented
intermediate values in the model. intermediate values in the model.
Arguments: Args:
keras_tensor_cls: The KerasTensor subclass that should expose the property. keras_tensor_cls: The KerasTensor subclass that should expose the property.
method_name: The name of the method to expose and delegate to the method_name: The name of the method to expose and delegate to the
represented (Composite)Tensor. represented (Composite)Tensor.

View File

@ -37,7 +37,7 @@ from tensorflow.python.util.tf_export import keras_export
class _CuDNNRNN(RNN): class _CuDNNRNN(RNN):
"""Private base class for CuDNNGRU and CuDNNLSTM layers. """Private base class for CuDNNGRU and CuDNNLSTM layers.
Arguments: Args:
return_sequences: Boolean. Whether to return the last output return_sequences: Boolean. Whether to return the last output
in the output sequence, or the full sequence. in the output sequence, or the full sequence.
return_state: Boolean. Whether to return the last state return_state: Boolean. Whether to return the last state
@ -166,7 +166,7 @@ class CuDNNGRU(_CuDNNRNN):
developer website](https://developer.nvidia.com/cudnn). developer website](https://developer.nvidia.com/cudnn).
Can only be run on GPU. Can only be run on GPU.
Arguments: Args:
units: Positive integer, dimensionality of the output space. units: Positive integer, dimensionality of the output space.
kernel_initializer: Initializer for the `kernel` weights matrix, used for kernel_initializer: Initializer for the `kernel` weights matrix, used for
the linear transformation of the inputs. the linear transformation of the inputs.
@ -346,7 +346,7 @@ class CuDNNLSTM(_CuDNNRNN):
developer website](https://developer.nvidia.com/cudnn). developer website](https://developer.nvidia.com/cudnn).
Can only be run on GPU. Can only be run on GPU.
Arguments: Args:
units: Positive integer, dimensionality of the output space. units: Positive integer, dimensionality of the output space.
kernel_initializer: Initializer for the `kernel` weights matrix, used for kernel_initializer: Initializer for the `kernel` weights matrix, used for
the linear transformation of the inputs. the linear transformation of the inputs.

View File

@ -50,7 +50,7 @@ class BaseDenseAttention(Layer):
dropout: Float between 0 and 1. Fraction of the units to drop for the dropout: Float between 0 and 1. Fraction of the units to drop for the
attention scores. attention scores.
Call Arguments: Call Args:
inputs: List of the following tensors: inputs: List of the following tensors:
* query: Query `Tensor` of shape `[batch_size, Tq, dim]`. * query: Query `Tensor` of shape `[batch_size, Tq, dim]`.
@ -242,7 +242,7 @@ class Attention(BaseDenseAttention):
dropout: Float between 0 and 1. Fraction of the units to drop for the dropout: Float between 0 and 1. Fraction of the units to drop for the
attention scores. attention scores.
Call Arguments: Call Args:
inputs: List of the following tensors: inputs: List of the following tensors:
* query: Query `Tensor` of shape `[batch_size, Tq, dim]`. * query: Query `Tensor` of shape `[batch_size, Tq, dim]`.
@ -381,7 +381,7 @@ class AdditiveAttention(BaseDenseAttention):
dropout: Float between 0 and 1. Fraction of the units to drop for the dropout: Float between 0 and 1. Fraction of the units to drop for the
attention scores. attention scores.
Call Arguments: Call Args:
inputs: List of the following tensors: inputs: List of the following tensors:
* query: Query `Tensor` of shape `[batch_size, Tq, dim]`. * query: Query `Tensor` of shape `[batch_size, Tq, dim]`.

View File

@ -36,7 +36,7 @@ class EinsumDense(Layer):
This layer can perform einsum calculations of arbitrary dimensionality. This layer can perform einsum calculations of arbitrary dimensionality.
Arguments: Args:
equation: An equation describing the einsum to perform. This equation must equation: An equation describing the einsum to perform. This equation must
be a valid einsum string of the form `ab,bc->ac`, `...ab,bc->...ac`, or be a valid einsum string of the form `ab,bc->ac`, `...ab,bc->...ac`, or
`ab...,bc->ac...` where 'ab', 'bc', and 'ac' can be any valid einsum axis `ab...,bc->ac...` where 'ab', 'bc', and 'ac' can be any valid einsum axis

View File

@ -57,7 +57,7 @@ class Embedding(Layer):
>>> print(output_array.shape) >>> print(output_array.shape)
(32, 10, 64) (32, 10, 64)
Arguments: Args:
input_dim: Integer. Size of the vocabulary, input_dim: Integer. Size of the vocabulary,
i.e. maximum integer index + 1. i.e. maximum integer index + 1.
output_dim: Integer. Dimension of the dense embedding. output_dim: Integer. Dimension of the dense embedding.

View File

@ -121,7 +121,7 @@ class RandomFourierFeatures(base_layer.Layer):
...) ...)
``` ```
Arguments: Args:
output_dim: Positive integer, the dimension of the layer's output, i.e., the output_dim: Positive integer, the dimension of the layer's output, i.e., the
number of random features used to approximate the kernel. number of random features used to approximate the kernel.
kernel_initializer: Determines the distribution of the parameters of the kernel_initializer: Determines the distribution of the parameters of the

View File

@ -58,7 +58,7 @@ class LocallyConnected1D(Layer):
# now model.output_shape == (None, 6, 32) # now model.output_shape == (None, 6, 32)
``` ```
Arguments: Args:
filters: Integer, the dimensionality of the output space (i.e. the number filters: Integer, the dimensionality of the output space (i.e. the number
of output filters in the convolution). of output filters in the convolution).
kernel_size: An integer or tuple/list of a single integer, specifying the kernel_size: An integer or tuple/list of a single integer, specifying the
@ -350,7 +350,7 @@ class LocallyConnected2D(Layer):
# now model.output_shape == (None, 28, 28, 32) # now model.output_shape == (None, 28, 28, 32)
``` ```
Arguments: Args:
filters: Integer, the dimensionality of the output space (i.e. the number filters: Integer, the dimensionality of the output space (i.e. the number
of output filters in the convolution). of output filters in the convolution).
kernel_size: An integer or tuple/list of 2 integers, specifying the width kernel_size: An integer or tuple/list of 2 integers, specifying the width
@ -652,7 +652,7 @@ def get_locallyconnected_mask(input_shape, kernel_shape, strides, padding,
to make it perform an unshared convolution with given `kernel_shape`, to make it perform an unshared convolution with given `kernel_shape`,
`strides`, `padding` and `data_format`. `strides`, `padding` and `data_format`.
Arguments: Args:
input_shape: tuple of size N: `(d_in1, ..., d_inN)` spatial shape of the input_shape: tuple of size N: `(d_in1, ..., d_inN)` spatial shape of the
input. input.
kernel_shape: tuple of size N, spatial shape of the convolutional kernel / kernel_shape: tuple of size N, spatial shape of the convolutional kernel /
@ -704,7 +704,7 @@ def local_conv_matmul(inputs, kernel, kernel_mask, output_shape):
(the remaining entries in `kernel`) weights. It also does the necessary (the remaining entries in `kernel`) weights. It also does the necessary
reshapes to make `inputs` and `kernel` 2-D and `output` (N+2)-D. reshapes to make `inputs` and `kernel` 2-D and `output` (N+2)-D.
Arguments: Args:
inputs: (N+2)-D tensor with shape `(batch_size, channels_in, d_in1, ..., inputs: (N+2)-D tensor with shape `(batch_size, channels_in, d_in1, ...,
d_inN)` or `(batch_size, d_in1, ..., d_inN, channels_in)`. d_inN)` or `(batch_size, d_in1, ..., d_inN, channels_in)`.
kernel: the unshared weights for N-D convolution, kernel: the unshared weights for N-D convolution,
@ -749,7 +749,7 @@ def local_conv_sparse_matmul(inputs, kernel, kernel_idxs, kernel_shape,
values=kernel, dense_shape=kernel_shape)`, with `.` standing for values=kernel, dense_shape=kernel_shape)`, with `.` standing for
matrix-multiply. It also reshapes `inputs` to 2-D and `output` to (N+2)-D. matrix-multiply. It also reshapes `inputs` to 2-D and `output` to (N+2)-D.
Arguments: Args:
inputs: (N+2)-D tensor with shape `(batch_size, channels_in, d_in1, ..., inputs: (N+2)-D tensor with shape `(batch_size, channels_in, d_in1, ...,
d_inN)` or `(batch_size, d_in1, ..., d_inN, channels_in)`. d_inN)` or `(batch_size, d_in1, ..., d_inN, channels_in)`.
kernel: a 1-D tensor with shape `(len(kernel_idxs),)` containing all the kernel: a 1-D tensor with shape `(len(kernel_idxs),)` containing all the
@ -788,7 +788,7 @@ def make_2d(tensor, split_dim):
Dimensions before (excluding) and after (including) `split_dim` are grouped Dimensions before (excluding) and after (including) `split_dim` are grouped
together. together.
Arguments: Args:
tensor: a tensor of shape `(d0, ..., d(N-1))`. tensor: a tensor of shape `(d0, ..., d(N-1))`.
split_dim: an integer from 1 to N-1, index of the dimension to group split_dim: an integer from 1 to N-1, index of the dimension to group
dimensions before (excluding) and after (including). dimensions before (excluding) and after (including).

View File

@ -39,7 +39,7 @@ class _Merge(Layer):
def __init__(self, **kwargs): def __init__(self, **kwargs):
"""Intializes a Merge layer. """Intializes a Merge layer.
Arguments: Args:
**kwargs: standard layer keyword arguments. **kwargs: standard layer keyword arguments.
""" """
super(_Merge, self).__init__(**kwargs) super(_Merge, self).__init__(**kwargs)
@ -51,7 +51,7 @@ class _Merge(Layer):
def _compute_elemwise_op_output_shape(self, shape1, shape2): def _compute_elemwise_op_output_shape(self, shape1, shape2):
"""Computes the shape of the resultant of an elementwise operation. """Computes the shape of the resultant of an elementwise operation.
Arguments: Args:
shape1: tuple or None. Shape of the first tensor shape1: tuple or None. Shape of the first tensor
shape2: tuple or None. Shape of the second tensor shape2: tuple or None. Shape of the second tensor
@ -477,7 +477,7 @@ class Concatenate(_Merge):
[15, 16, 17, 18, 19], [15, 16, 17, 18, 19],
[25, 26, 27, 28, 29]]])> [25, 26, 27, 28, 29]]])>
Arguments: Args:
axis: Axis along which to concatenate. axis: Axis along which to concatenate.
**kwargs: standard layer keyword arguments. **kwargs: standard layer keyword arguments.
""" """
@ -628,7 +628,7 @@ class Dot(_Merge):
array([[[260, 360], array([[[260, 360],
[320, 445]]])> [320, 445]]])>
Arguments: Args:
axes: Integer or tuple of integers, axes: Integer or tuple of integers,
axis or axes along which to take the dot product. If a tuple, should axis or axes along which to take the dot product. If a tuple, should
be two integers corresponding to the desired axis from the first input be two integers corresponding to the desired axis from the first input
@ -741,7 +741,7 @@ class Dot(_Merge):
def add(inputs, **kwargs): def add(inputs, **kwargs):
"""Functional interface to the `tf.keras.layers.Add` layer. """Functional interface to the `tf.keras.layers.Add` layer.
Arguments: Args:
inputs: A list of input tensors (at least 2) with the same shape. inputs: A list of input tensors (at least 2) with the same shape.
**kwargs: Standard layer keyword arguments. **kwargs: Standard layer keyword arguments.
@ -775,7 +775,7 @@ def add(inputs, **kwargs):
def subtract(inputs, **kwargs): def subtract(inputs, **kwargs):
"""Functional interface to the `Subtract` layer. """Functional interface to the `Subtract` layer.
Arguments: Args:
inputs: A list of input tensors (exactly 2). inputs: A list of input tensors (exactly 2).
**kwargs: Standard layer keyword arguments. **kwargs: Standard layer keyword arguments.
@ -804,7 +804,7 @@ def subtract(inputs, **kwargs):
def multiply(inputs, **kwargs): def multiply(inputs, **kwargs):
"""Functional interface to the `Multiply` layer. """Functional interface to the `Multiply` layer.
Arguments: Args:
inputs: A list of input tensors (at least 2). inputs: A list of input tensors (at least 2).
**kwargs: Standard layer keyword arguments. **kwargs: Standard layer keyword arguments.
@ -836,7 +836,7 @@ def average(inputs, **kwargs):
>>> out = tf.keras.layers.Dense(4)(avg) >>> out = tf.keras.layers.Dense(4)(avg)
>>> model = tf.keras.models.Model(inputs=[input1, input2], outputs=out) >>> model = tf.keras.models.Model(inputs=[input1, input2], outputs=out)
Arguments: Args:
inputs: A list of input tensors (at least 2). inputs: A list of input tensors (at least 2).
**kwargs: Standard layer keyword arguments. **kwargs: Standard layer keyword arguments.
@ -868,7 +868,7 @@ def maximum(inputs, **kwargs):
model = tf.keras.models.Model(inputs=[input1, input2], outputs=out) model = tf.keras.models.Model(inputs=[input1, input2], outputs=out)
``` ```
Arguments: Args:
inputs: A list of input tensors (at least 2) of same shape. inputs: A list of input tensors (at least 2) of same shape.
**kwargs: Standard layer keyword arguments. **kwargs: Standard layer keyword arguments.
@ -886,7 +886,7 @@ def maximum(inputs, **kwargs):
def minimum(inputs, **kwargs): def minimum(inputs, **kwargs):
"""Functional interface to the `Minimum` layer. """Functional interface to the `Minimum` layer.
Arguments: Args:
inputs: A list of input tensors (at least 2). inputs: A list of input tensors (at least 2).
**kwargs: Standard layer keyword arguments. **kwargs: Standard layer keyword arguments.
@ -920,7 +920,7 @@ def concatenate(inputs, axis=-1, **kwargs):
[15, 16, 17, 18, 19], [15, 16, 17, 18, 19],
[25, 26, 27, 28, 29]]])> [25, 26, 27, 28, 29]]])>
Arguments: Args:
inputs: A list of input tensors (at least 2). inputs: A list of input tensors (at least 2).
axis: Concatenation axis. axis: Concatenation axis.
**kwargs: Standard layer keyword arguments. **kwargs: Standard layer keyword arguments.
@ -935,7 +935,7 @@ def concatenate(inputs, axis=-1, **kwargs):
def dot(inputs, axes, normalize=False, **kwargs): def dot(inputs, axes, normalize=False, **kwargs):
"""Functional interface to the `Dot` layer. """Functional interface to the `Dot` layer.
Arguments: Args:
inputs: A list of input tensors (at least 2). inputs: A list of input tensors (at least 2).
axes: Integer or tuple of integers, axes: Integer or tuple of integers,
axis or axes along which to take the dot product. axis or axes along which to take the dot product.

View File

@ -168,7 +168,7 @@ class MultiHeadAttention(Layer):
>>> print(output_tensor.shape) >>> print(output_tensor.shape)
(None, 5, 3, 4, 16) (None, 5, 3, 4, 16)
Arguments: Args:
num_heads: Number of attention heads. num_heads: Number of attention heads.
key_dim: Size of each attention head for query and key. key_dim: Size of each attention head for query and key.
value_dim: Size of each attention head for value. value_dim: Size of each attention head for value.

View File

@ -39,7 +39,7 @@ class GaussianNoise(Layer):
As it is a regularization layer, it is only active at training time. As it is a regularization layer, it is only active at training time.
Arguments: Args:
stddev: Float, standard deviation of the noise distribution. stddev: Float, standard deviation of the noise distribution.
Call arguments: Call arguments:
@ -88,7 +88,7 @@ class GaussianDropout(Layer):
As it is a regularization layer, it is only active at training time. As it is a regularization layer, it is only active at training time.
Arguments: Args:
rate: Float, drop probability (as with `Dropout`). rate: Float, drop probability (as with `Dropout`).
The multiplicative noise will have The multiplicative noise will have
standard deviation `sqrt(rate / (1 - rate))`. standard deviation `sqrt(rate / (1 - rate))`.
@ -146,7 +146,7 @@ class AlphaDropout(Layer):
Alpha Dropout fits well to Scaled Exponential Linear Units Alpha Dropout fits well to Scaled Exponential Linear Units
by randomly setting activations to the negative saturation value. by randomly setting activations to the negative saturation value.
Arguments: Args:
rate: float, drop probability (as with `Dropout`). rate: float, drop probability (as with `Dropout`).
The multiplicative noise will have The multiplicative noise will have
standard deviation `sqrt(rate / (1 - rate))`. standard deviation `sqrt(rate / (1 - rate))`.

View File

@ -80,7 +80,7 @@ class BatchNormalizationBase(Layer):
*after having been trained on data that has similar statistics as the *after having been trained on data that has similar statistics as the
inference data*. inference data*.
Arguments: Args:
axis: Integer or a list of integers, the axis that should be normalized axis: Integer or a list of integers, the axis that should be normalized
(typically the features axis). For instance, after a `Conv2D` layer with (typically the features axis). For instance, after a `Conv2D` layer with
`data_format="channels_first"`, set `axis=1` in `BatchNormalization`. `data_format="channels_first"`, set `axis=1` in `BatchNormalization`.
@ -1063,7 +1063,7 @@ class LayerNormalization(Layer):
So, this Layer Normalization implementation will not match a Group So, this Layer Normalization implementation will not match a Group
Normalization layer with group size set to 1. Normalization layer with group size set to 1.
Arguments: Args:
axis: Integer or List/Tuple. The axis or axes to normalize across. Typically axis: Integer or List/Tuple. The axis or axes to normalize across. Typically
this is the features axis/axes. The left-out axes are typically the batch this is the features axis/axes. The left-out axes are typically the batch
axis/axes. This argument defaults to `-1`, the last dimension in the axis/axes. This argument defaults to `-1`, the last dimension in the

View File

@ -55,7 +55,7 @@ class SyncBatchNormalization(normalization.BatchNormalizationBase):
model.add(tf.keras.layers.experimental.SyncBatchNormalization()) model.add(tf.keras.layers.experimental.SyncBatchNormalization())
``` ```
Arguments: Args:
axis: Integer, the axis that should be normalized axis: Integer, the axis that should be normalized
(typically the features axis). (typically the features axis).
For instance, after a `Conv2D` layer with For instance, after a `Conv2D` layer with
@ -228,7 +228,7 @@ class BatchNormalization(normalization.BatchNormalizationBase):
*after having been trained on data that has similar statistics as the *after having been trained on data that has similar statistics as the
inference data*. inference data*.
Arguments: Args:
axis: Integer, the axis that should be normalized (typically the features axis: Integer, the axis that should be normalized (typically the features
axis). For instance, after a `Conv2D` layer with axis). For instance, after a `Conv2D` layer with
`data_format="channels_first"`, set `axis=1` in `BatchNormalization`. `data_format="channels_first"`, set `axis=1` in `BatchNormalization`.

View File

@ -30,7 +30,7 @@ from tensorflow.python.ops import standard_ops
def dense(inputs, kernel, bias=None, activation=None, dtype=None): def dense(inputs, kernel, bias=None, activation=None, dtype=None):
"""Densely connected NN layer op. """Densely connected NN layer op.
Arguments: Args:
inputs: `tf.Tensor` or `tf.SparseTensor`. Inputs to operation. inputs: `tf.Tensor` or `tf.SparseTensor`. Inputs to operation.
kernel: `tf.Variable`. Matrix kernel. kernel: `tf.Variable`. Matrix kernel.
bias: (Optional) `tf.Variable`. Bias to add to outputs. bias: (Optional) `tf.Variable`. Bias to add to outputs.

View File

@ -36,7 +36,7 @@ class Pooling1D(Layer):
This class only exists for code reuse. It will never be an exposed API. This class only exists for code reuse. It will never be an exposed API.
Arguments: Args:
pool_function: The pooling function to apply, e.g. `tf.nn.max_pool2d`. pool_function: The pooling function to apply, e.g. `tf.nn.max_pool2d`.
pool_size: An integer or tuple/list of a single integer, pool_size: An integer or tuple/list of a single integer,
representing the size of the pooling window. representing the size of the pooling window.
@ -158,7 +158,7 @@ class MaxPooling1D(Pooling1D):
[5.], [5.],
[5.]]], dtype=float32)> [5.]]], dtype=float32)>
Arguments: Args:
pool_size: Integer, size of the max pooling window. pool_size: Integer, size of the max pooling window.
strides: Integer, or None. Specifies how much the pooling window moves strides: Integer, or None. Specifies how much the pooling window moves
for each pooling step. for each pooling step.
@ -204,7 +204,7 @@ class MaxPooling1D(Pooling1D):
class AveragePooling1D(Pooling1D): class AveragePooling1D(Pooling1D):
"""Average pooling for temporal data. """Average pooling for temporal data.
Arguments: Args:
pool_size: Integer, size of the average pooling windows. pool_size: Integer, size of the average pooling windows.
strides: Integer, or None. Factor by which to downscale. strides: Integer, or None. Factor by which to downscale.
E.g. 2 will halve the input. E.g. 2 will halve the input.
@ -250,7 +250,7 @@ class Pooling2D(Layer):
This class only exists for code reuse. It will never be an exposed API. This class only exists for code reuse. It will never be an exposed API.
Arguments: Args:
pool_function: The pooling function to apply, e.g. `tf.nn.max_pool2d`. pool_function: The pooling function to apply, e.g. `tf.nn.max_pool2d`.
pool_size: An integer or tuple/list of 2 integers: (pool_height, pool_width) pool_size: An integer or tuple/list of 2 integers: (pool_height, pool_width)
specifying the size of the pooling window. specifying the size of the pooling window.
@ -414,7 +414,7 @@ class MaxPooling2D(Pooling2D):
[9.], [9.],
[9.]]]], dtype=float32)> [9.]]]], dtype=float32)>
Arguments: Args:
pool_size: integer or tuple of 2 integers, pool_size: integer or tuple of 2 integers,
window size over which to take the maximum. window size over which to take the maximum.
`(2, 2)` will take the max value over a 2x2 pooling window. `(2, 2)` will take the max value over a 2x2 pooling window.
@ -471,7 +471,7 @@ class MaxPooling2D(Pooling2D):
class AveragePooling2D(Pooling2D): class AveragePooling2D(Pooling2D):
"""Average pooling operation for spatial data. """Average pooling operation for spatial data.
Arguments: Args:
pool_size: integer or tuple of 2 integers, pool_size: integer or tuple of 2 integers,
factors by which to downscale (vertical, horizontal). factors by which to downscale (vertical, horizontal).
`(2, 2)` will halve the input in both spatial dimension. `(2, 2)` will halve the input in both spatial dimension.
@ -525,7 +525,7 @@ class Pooling3D(Layer):
This class only exists for code reuse. It will never be an exposed API. This class only exists for code reuse. It will never be an exposed API.
Arguments: Args:
pool_function: The pooling function to apply, e.g. `tf.nn.max_pool2d`. pool_function: The pooling function to apply, e.g. `tf.nn.max_pool2d`.
pool_size: An integer or tuple/list of 3 integers: pool_size: An integer or tuple/list of 3 integers:
(pool_depth, pool_height, pool_width) (pool_depth, pool_height, pool_width)
@ -620,7 +620,7 @@ class Pooling3D(Layer):
class MaxPooling3D(Pooling3D): class MaxPooling3D(Pooling3D):
"""Max pooling operation for 3D data (spatial or spatio-temporal). """Max pooling operation for 3D data (spatial or spatio-temporal).
Arguments: Args:
pool_size: Tuple of 3 integers, pool_size: Tuple of 3 integers,
factors by which to downscale (dim1, dim2, dim3). factors by which to downscale (dim1, dim2, dim3).
`(2, 2, 2)` will halve the size of the 3D input in each dimension. `(2, 2, 2)` will halve the size of the 3D input in each dimension.
@ -673,7 +673,7 @@ class MaxPooling3D(Pooling3D):
class AveragePooling3D(Pooling3D): class AveragePooling3D(Pooling3D):
"""Average pooling operation for 3D data (spatial or spatio-temporal). """Average pooling operation for 3D data (spatial or spatio-temporal).
Arguments: Args:
pool_size: tuple of 3 integers, pool_size: tuple of 3 integers,
factors by which to downscale (dim1, dim2, dim3). factors by which to downscale (dim1, dim2, dim3).
`(2, 2, 2)` will halve the size of the 3D input in each dimension. `(2, 2, 2)` will halve the size of the 3D input in each dimension.
@ -759,7 +759,7 @@ class GlobalAveragePooling1D(GlobalPooling1D):
>>> print(y.shape) >>> print(y.shape)
(2, 4) (2, 4)
Arguments: Args:
data_format: A string, data_format: A string,
one of `channels_last` (default) or `channels_first`. one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs. The ordering of the dimensions in the inputs.
@ -829,7 +829,7 @@ class GlobalMaxPooling1D(GlobalPooling1D):
[6.], [6.],
[9.], dtype=float32)> [9.], dtype=float32)>
Arguments: Args:
data_format: A string, data_format: A string,
one of `channels_last` (default) or `channels_first`. one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs. The ordering of the dimensions in the inputs.
@ -893,7 +893,7 @@ class GlobalAveragePooling2D(GlobalPooling2D):
>>> print(y.shape) >>> print(y.shape)
(2, 3) (2, 3)
Arguments: Args:
data_format: A string, data_format: A string,
one of `channels_last` (default) or `channels_first`. one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs. The ordering of the dimensions in the inputs.
@ -934,7 +934,7 @@ class GlobalMaxPooling2D(GlobalPooling2D):
>>> print(y.shape) >>> print(y.shape)
(2, 3) (2, 3)
Arguments: Args:
data_format: A string, data_format: A string,
one of `channels_last` (default) or `channels_first`. one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs. The ordering of the dimensions in the inputs.
@ -992,7 +992,7 @@ class GlobalPooling3D(Layer):
class GlobalAveragePooling3D(GlobalPooling3D): class GlobalAveragePooling3D(GlobalPooling3D):
"""Global Average pooling operation for 3D data. """Global Average pooling operation for 3D data.
Arguments: Args:
data_format: A string, data_format: A string,
one of `channels_last` (default) or `channels_first`. one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs. The ordering of the dimensions in the inputs.
@ -1027,7 +1027,7 @@ class GlobalAveragePooling3D(GlobalPooling3D):
class GlobalMaxPooling3D(GlobalPooling3D): class GlobalMaxPooling3D(GlobalPooling3D):
"""Global Max pooling operation for 3D data. """Global Max pooling operation for 3D data.
Arguments: Args:
data_format: A string, data_format: A string,
one of `channels_last` (default) or `channels_first`. one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs. The ordering of the dimensions in the inputs.

View File

@ -63,7 +63,7 @@ class CategoryCrossing(base_preprocessing_layer.PreprocessingLayer):
[b'b-e'], [b'b-e'],
[b'c-f']], dtype=object)> [b'c-f']], dtype=object)>
Arguments: Args:
depth: depth of input crossing. By default None, all inputs are crossed into depth: depth of input crossing. By default None, all inputs are crossed into
one output. It can also be an int or tuple/list of ints. Passing an one output. It can also be an int or tuple/list of ints. Passing an
integer will create combinations of crossed outputs with depth up to that integer will create combinations of crossed outputs with depth up to that

View File

@ -100,7 +100,7 @@ class CategoryEncoding(base_preprocessing_layer.CombinerPreprocessingLayer):
[0. , 0.2, 0.3, 0. ], [0. , 0.2, 0.3, 0. ],
[0. , 0.2, 0. , 0.4]])> [0. , 0.2, 0. , 0.4]])>
Arguments: Args:
max_tokens: The maximum size of the vocabulary for this layer. If None, max_tokens: The maximum size of the vocabulary for this layer. If None,
there is no cap on the size of the vocabulary. there is no cap on the size of the vocabulary.
output_mode: Specification for the output of the layer. output_mode: Specification for the output of the layer.
@ -193,7 +193,7 @@ class CategoryEncoding(base_preprocessing_layer.CombinerPreprocessingLayer):
Overrides the default adapt method to apply relevant preprocessing to the Overrides the default adapt method to apply relevant preprocessing to the
inputs before passing to the combiner. inputs before passing to the combiner.
Arguments: Args:
data: The data to train on. It can be passed either as a tf.data Dataset, data: The data to train on. It can be passed either as a tf.data Dataset,
or as a numpy array. or as a numpy array.
reset_state: Optional argument specifying whether to clear the state of reset_state: Optional argument specifying whether to clear the state of

View File

@ -55,7 +55,7 @@ def summarize(values, epsilon):
If the target num_bins is larger than the size of values, the whole array is If the target num_bins is larger than the size of values, the whole array is
returned (with weights of 1). returned (with weights of 1).
Arguments: Args:
values: 1-D `np.ndarray` to be summarized. values: 1-D `np.ndarray` to be summarized.
epsilon: A `'float32'` that determines the approxmiate desired precision. epsilon: A `'float32'` that determines the approxmiate desired precision.
@ -87,7 +87,7 @@ def compress(summary, epsilon):
Taking the difference of the cumulative weights from the previous bin's Taking the difference of the cumulative weights from the previous bin's
cumulative weight will give the new weight for that bin. cumulative weight will give the new weight for that bin.
Arguments: Args:
summary: 2-D `np.ndarray` summary to be compressed. summary: 2-D `np.ndarray` summary to be compressed.
epsilon: A `'float32'` that determines the approxmiate desired precision. epsilon: A `'float32'` that determines the approxmiate desired precision.
@ -115,7 +115,7 @@ def merge_summaries(prev_summary, next_summary, epsilon):
Given two summaries of distinct data, this function merges (and compresses) Given two summaries of distinct data, this function merges (and compresses)
them to stay within `epsilon` error tolerance. them to stay within `epsilon` error tolerance.
Arguments: Args:
prev_summary: 2-D `np.ndarray` summary to be merged with `next_summary`. prev_summary: 2-D `np.ndarray` summary to be merged with `next_summary`.
next_summary: 2-D `np.ndarray` summary to be merged with `prev_summary`. next_summary: 2-D `np.ndarray` summary to be merged with `prev_summary`.
epsilon: A `'float32'` that determines the approxmiate desired precision. epsilon: A `'float32'` that determines the approxmiate desired precision.

View File

@ -113,7 +113,7 @@ class Hashing(base_preprocessing_layer.PreprocessingLayer):
Reference: [SipHash with salt](https://www.131002.net/siphash/siphash.pdf) Reference: [SipHash with salt](https://www.131002.net/siphash/siphash.pdf)
Arguments: Args:
num_bins: Number of hash bins. num_bins: Number of hash bins.
salt: A single unsigned integer or None. salt: A single unsigned integer or None.
If passed, the hash function used will be SipHash64, with these values If passed, the hash function used will be SipHash64, with these values

View File

@ -76,7 +76,7 @@ class Resizing(PreprocessingLayer):
Resize the batched image input to target height and width. The input should Resize the batched image input to target height and width. The input should
be a 4-D tensor in the format of NHWC. be a 4-D tensor in the format of NHWC.
Arguments: Args:
height: Integer, the height of the output shape. height: Integer, the height of the output shape.
width: Integer, the width of the output shape. width: Integer, the width of the output shape.
interpolation: String, the interpolation method. Defaults to `bilinear`. interpolation: String, the interpolation method. Defaults to `bilinear`.
@ -136,7 +136,7 @@ class CenterCrop(PreprocessingLayer):
If the input height/width is even and the target height/width is odd (or If the input height/width is even and the target height/width is odd (or
inversely), the input image is left-padded by 1 pixel. inversely), the input image is left-padded by 1 pixel.
Arguments: Args:
height: Integer, the height of the output shape. height: Integer, the height of the output shape.
width: Integer, the width of the output shape. width: Integer, the width of the output shape.
name: A string, the name of the layer. name: A string, the name of the layer.
@ -208,7 +208,7 @@ class RandomCrop(PreprocessingLayer):
4D tensor with shape: 4D tensor with shape:
`(samples, target_height, target_width, channels)`. `(samples, target_height, target_width, channels)`.
Arguments: Args:
height: Integer, the height of the output shape. height: Integer, the height of the output shape.
width: Integer, the width of the output shape. width: Integer, the width of the output shape.
seed: Integer. Used to create a random seed. seed: Integer. Used to create a random seed.
@ -317,7 +317,7 @@ class Rescaling(PreprocessingLayer):
Output shape: Output shape:
Same as input. Same as input.
Arguments: Args:
scale: Float, the scale to apply to the inputs. scale: Float, the scale to apply to the inputs.
offset: Float, the offset to apply to the inputs. offset: Float, the offset to apply to the inputs.
name: A string, the name of the layer. name: A string, the name of the layer.
@ -437,7 +437,7 @@ class RandomFlip(PreprocessingLayer):
class RandomTranslation(PreprocessingLayer): class RandomTranslation(PreprocessingLayer):
"""Randomly translate each image during training. """Randomly translate each image during training.
Arguments: Args:
height_factor: a float represented as fraction of value, or a tuple height_factor: a float represented as fraction of value, or a tuple
of size 2 representing lower and upper bound for shifting vertically. of size 2 representing lower and upper bound for shifting vertically.
A negative value means shifting image up, while a positive value A negative value means shifting image up, while a positive value
@ -889,7 +889,7 @@ class RandomRotation(PreprocessingLayer):
class RandomZoom(PreprocessingLayer): class RandomZoom(PreprocessingLayer):
"""Randomly zoom each image during training. """Randomly zoom each image during training.
Arguments: Args:
height_factor: a float represented as fraction of value, or a tuple height_factor: a float represented as fraction of value, or a tuple
of size 2 representing lower and upper bound for zooming vertically. of size 2 representing lower and upper bound for zooming vertically.
When represented as a single float, this value is used for both the When represented as a single float, this value is used for both the
@ -1166,7 +1166,7 @@ class RandomHeight(PreprocessingLayer):
By default, this layer is inactive during inference. By default, this layer is inactive during inference.
Arguments: Args:
factor: A positive float (fraction of original height), or a tuple of size 2 factor: A positive float (fraction of original height), or a tuple of size 2
representing lower and upper bound for resizing vertically. When representing lower and upper bound for resizing vertically. When
represented as a single float, this value is used for both the upper and represented as a single float, this value is used for both the upper and
@ -1265,7 +1265,7 @@ class RandomWidth(PreprocessingLayer):
By default, this layer is inactive during inference. By default, this layer is inactive during inference.
Arguments: Args:
factor: A positive float (fraction of original height), or a tuple of size 2 factor: A positive float (fraction of original height), or a tuple of size 2
representing lower and upper bound for resizing vertically. When representing lower and upper bound for resizing vertically. When
represented as a single float, this value is used for both the upper and represented as a single float, this value is used for both the upper and

View File

@ -64,7 +64,7 @@ class IndexLookup(base_preprocessing_layer.CombinerPreprocessingLayer):
vocabulary size, the most frequent terms will be used to create the vocabulary size, the most frequent terms will be used to create the
vocabulary. vocabulary.
Arguments: Args:
max_tokens: The maximum size of the vocabulary for this layer. If None, max_tokens: The maximum size of the vocabulary for this layer. If None,
there is no cap on the size of the vocabulary. Note that this vocabulary there is no cap on the size of the vocabulary. Note that this vocabulary
includes the OOV and mask tokens, so the effective number of tokens is includes the OOV and mask tokens, so the effective number of tokens is
@ -213,7 +213,7 @@ class IndexLookup(base_preprocessing_layer.CombinerPreprocessingLayer):
Overrides the default adapt method to apply relevant preprocessing to the Overrides the default adapt method to apply relevant preprocessing to the
inputs before passing to the combiner. inputs before passing to the combiner.
Arguments: Args:
data: The data to train on. It can be passed either as a tf.data Dataset, data: The data to train on. It can be passed either as a tf.data Dataset,
or as a numpy array. or as a numpy array.
reset_state: Optional argument specifying whether to clear the state of reset_state: Optional argument specifying whether to clear the state of
@ -393,7 +393,7 @@ class IndexLookup(base_preprocessing_layer.CombinerPreprocessingLayer):
information is already known. If vocabulary data is already present in the information is already known. If vocabulary data is already present in the
layer, this method will either replace it layer, this method will either replace it
Arguments: Args:
vocab: An array of hashable tokens. vocab: An array of hashable tokens.
Raises: Raises:

View File

@ -40,7 +40,7 @@ class IntegerLookup(index_lookup.IndexLookup):
vocabulary size, the most frequent values will be used to create the vocabulary size, the most frequent values will be used to create the
vocabulary (and the values that don't make the cut will be treated as OOV). vocabulary (and the values that don't make the cut will be treated as OOV).
Arguments: Args:
max_values: The maximum size of the vocabulary for this layer. If None, max_values: The maximum size of the vocabulary for this layer. If None,
there is no cap on the size of the vocabulary. Note that this vocabulary there is no cap on the size of the vocabulary. Note that this vocabulary
includes the OOV and mask values, so the effective number of values is includes the OOV and mask values, so the effective number of values is

View File

@ -60,7 +60,7 @@ class Normalization(base_preprocessing_layer.CombinerPreprocessingLayer):
as the layer's weights. `adapt` should be called before `fit`, `evaluate`, as the layer's weights. `adapt` should be called before `fit`, `evaluate`,
or `predict`. or `predict`.
Arguments: Args:
axis: Integer or tuple of integers, the axis or axes that should be axis: Integer or tuple of integers, the axis or axes that should be
"kept". These axes are not be summed over when calculating the "kept". These axes are not be summed over when calculating the
normalization statistics. By default the last axis, the `features` axis normalization statistics. By default the last axis, the `features` axis

View File

@ -37,7 +37,7 @@ class PreprocessingStage(base_preprocessing_layer.PreprocessingLayer,
Sequential-like object that enables you to `adapt()` the whole list via Sequential-like object that enables you to `adapt()` the whole list via
a single `adapt()` call on the preprocessing stage. a single `adapt()` call on the preprocessing stage.
Arguments: Args:
layers: List of layers. Can include layers that aren't preprocessing layers. layers: List of layers. Can include layers that aren't preprocessing layers.
name: String. Optional name for the preprocessing stage object. name: String. Optional name for the preprocessing stage object.
""" """
@ -45,7 +45,7 @@ class PreprocessingStage(base_preprocessing_layer.PreprocessingLayer,
def adapt(self, data, reset_state=True): def adapt(self, data, reset_state=True):
"""Adapt the state of the layers of the preprocessing stage to the data. """Adapt the state of the layers of the preprocessing stage to the data.
Arguments: Args:
data: A batched Dataset object, or a NumPy array, or an EagerTensor. data: A batched Dataset object, or a NumPy array, or an EagerTensor.
Data to be iterated over to adapt the state of the layers in this Data to be iterated over to adapt the state of the layers in this
preprocessing stage. preprocessing stage.
@ -125,7 +125,7 @@ class FunctionalPreprocessingStage(base_preprocessing_layer.PreprocessingLayer,
>>> outputs = [inputs['x1'], [y, z]] >>> outputs = [inputs['x1'], [y, z]]
>>> stage = FunctionalPreprocessingStage(inputs, outputs) >>> stage = FunctionalPreprocessingStage(inputs, outputs)
Arguments: Args:
inputs: An input tensor (must be created via `tf.keras.Input()`), or a list, inputs: An input tensor (must be created via `tf.keras.Input()`), or a list,
a dict, or a nested strcture of input tensors. a dict, or a nested strcture of input tensors.
outputs: An output tensor, or a list, a dict or a nested structure of output outputs: An output tensor, or a list, a dict or a nested structure of output
@ -142,7 +142,7 @@ class FunctionalPreprocessingStage(base_preprocessing_layer.PreprocessingLayer,
def adapt(self, data, reset_state=True): def adapt(self, data, reset_state=True):
"""Adapt the state of the layers of the preprocessing stage to the data. """Adapt the state of the layers of the preprocessing stage to the data.
Arguments: Args:
data: A batched Dataset object, a NumPy array, an EagerTensor, or a list, data: A batched Dataset object, a NumPy array, an EagerTensor, or a list,
dict or nested structure of Numpy Arrays or EagerTensors. The elements dict or nested structure of Numpy Arrays or EagerTensors. The elements
of Dataset object need to conform with inputs of the stage. The first of Dataset object need to conform with inputs of the stage. The first
@ -242,7 +242,7 @@ class FunctionalPreprocessingStage(base_preprocessing_layer.PreprocessingLayer,
def _unzip_dataset(ds): def _unzip_dataset(ds):
"""Unzip dataset into a list of single element datasets. """Unzip dataset into a list of single element datasets.
Arguments: Args:
ds: A Dataset object. ds: A Dataset object.
Returns: Returns:

View File

@ -48,7 +48,7 @@ class Reduction(Layer):
This layer performs a reduction across one axis of its input data. This This layer performs a reduction across one axis of its input data. This
data may optionally be weighted by passing in an identical float tensor. data may optionally be weighted by passing in an identical float tensor.
Arguments: Args:
reduction: The type of reduction to perform. Can be one of the following: reduction: The type of reduction to perform. Can be one of the following:
"max", "mean", "min", "prod", or "sum". This layer uses the Tensorflow "max", "mean", "min", "prod", or "sum". This layer uses the Tensorflow
reduce op which corresponds to that reduction (so, for "mean", we use reduce op which corresponds to that reduction (so, for "mean", we use

View File

@ -40,7 +40,7 @@ class StringLookup(index_lookup.IndexLookup):
vocabulary size, the most frequent terms will be used to create the vocabulary size, the most frequent terms will be used to create the
vocabulary (and the terms that don't make the cut will be treated as OOV). vocabulary (and the terms that don't make the cut will be treated as OOV).
Arguments: Args:
max_tokens: The maximum size of the vocabulary for this layer. If None, max_tokens: The maximum size of the vocabulary for this layer. If None,
there is no cap on the size of the vocabulary. Note that this vocabulary there is no cap on the size of the vocabulary. Note that this vocabulary
includes the OOV and mask tokens, so the effective number of tokens is includes the OOV and mask tokens, so the effective number of tokens is

View File

@ -118,7 +118,7 @@ class TextVectorization(base_preprocessing_layer.CombinerPreprocessingLayer):
["another", "string", "to", "split"]]`. This makes the callable site ["another", "string", "to", "split"]]`. This makes the callable site
natively compatible with `tf.strings.split()`. natively compatible with `tf.strings.split()`.
Arguments: Args:
max_tokens: The maximum size of the vocabulary for this layer. If None, max_tokens: The maximum size of the vocabulary for this layer. If None,
there is no cap on the size of the vocabulary. Note that this vocabulary there is no cap on the size of the vocabulary. Note that this vocabulary
contains 1 OOV token, so the effective number of tokens is `(max_tokens - contains 1 OOV token, so the effective number of tokens is `(max_tokens -
@ -400,7 +400,7 @@ class TextVectorization(base_preprocessing_layer.CombinerPreprocessingLayer):
Overrides the default adapt method to apply relevant preprocessing to the Overrides the default adapt method to apply relevant preprocessing to the
inputs before passing to the combiner. inputs before passing to the combiner.
Arguments: Args:
data: The data to train on. It can be passed either as a tf.data Dataset, data: The data to train on. It can be passed either as a tf.data Dataset,
as a NumPy array, a string tensor, or as a list of texts. as a NumPy array, a string tensor, or as a list of texts.
reset_state: Optional argument specifying whether to clear the state of reset_state: Optional argument specifying whether to clear the state of
@ -485,7 +485,7 @@ class TextVectorization(base_preprocessing_layer.CombinerPreprocessingLayer):
vocabulary data is already present in the layer, this method will replace vocabulary data is already present in the layer, this method will replace
it. it.
Arguments: Args:
vocab: An array of string tokens, or a path to a file containing one vocab: An array of string tokens, or a path to a file containing one
token per line. token per line.
df_data: An array of document frequency data. Only necessary if the layer df_data: An array of document frequency data. Only necessary if the layer

View File

@ -62,7 +62,7 @@ class StackedRNNCells(Layer):
Used to implement efficient stacked RNNs. Used to implement efficient stacked RNNs.
Arguments: Args:
cells: List of RNN cell instances. cells: List of RNN cell instances.
Examples: Examples:
@ -205,7 +205,7 @@ class RNN(Layer):
See [the Keras RNN API guide](https://www.tensorflow.org/guide/keras/rnn) See [the Keras RNN API guide](https://www.tensorflow.org/guide/keras/rnn)
for details about the usage of RNN API. for details about the usage of RNN API.
Arguments: Args:
cell: A RNN cell instance or a list of RNN cell instances. cell: A RNN cell instance or a list of RNN cell instances.
A RNN cell is a class that has: A RNN cell is a class that has:
- A `call(input_at_t, states_at_t)` method, returning - A `call(input_at_t, states_at_t)` method, returning
@ -1240,7 +1240,7 @@ class SimpleRNNCell(DropoutRNNCellMixin, Layer):
This class processes one step within the whole time sequence input, whereas This class processes one step within the whole time sequence input, whereas
`tf.keras.layer.SimpleRNN` processes the whole sequence. `tf.keras.layer.SimpleRNN` processes the whole sequence.
Arguments: Args:
units: Positive integer, dimensionality of the output space. units: Positive integer, dimensionality of the output space.
activation: Activation function to use. activation: Activation function to use.
Default: hyperbolic tangent (`tanh`). Default: hyperbolic tangent (`tanh`).
@ -1439,7 +1439,7 @@ class SimpleRNN(RNN):
See [the Keras RNN API guide](https://www.tensorflow.org/guide/keras/rnn) See [the Keras RNN API guide](https://www.tensorflow.org/guide/keras/rnn)
for details about the usage of RNN API. for details about the usage of RNN API.
Arguments: Args:
units: Positive integer, dimensionality of the output space. units: Positive integer, dimensionality of the output space.
activation: Activation function to use. activation: Activation function to use.
Default: hyperbolic tangent (`tanh`). Default: hyperbolic tangent (`tanh`).
@ -1690,7 +1690,7 @@ class SimpleRNN(RNN):
class GRUCell(DropoutRNNCellMixin, Layer): class GRUCell(DropoutRNNCellMixin, Layer):
"""Cell class for the GRU layer. """Cell class for the GRU layer.
Arguments: Args:
units: Positive integer, dimensionality of the output space. units: Positive integer, dimensionality of the output space.
activation: Activation function to use. activation: Activation function to use.
Default: hyperbolic tangent (`tanh`). Default: hyperbolic tangent (`tanh`).
@ -1974,7 +1974,7 @@ class GRU(RNN):
`recurrent_kernel`. Use `'reset_after'=True` and `recurrent_kernel`. Use `'reset_after'=True` and
`recurrent_activation='sigmoid'`. `recurrent_activation='sigmoid'`.
Arguments: Args:
units: Positive integer, dimensionality of the output space. units: Positive integer, dimensionality of the output space.
activation: Activation function to use. activation: Activation function to use.
Default: hyperbolic tangent (`tanh`). Default: hyperbolic tangent (`tanh`).
@ -2244,7 +2244,7 @@ class GRU(RNN):
class LSTMCell(DropoutRNNCellMixin, Layer): class LSTMCell(DropoutRNNCellMixin, Layer):
"""Cell class for the LSTM layer. """Cell class for the LSTM layer.
Arguments: Args:
units: Positive integer, dimensionality of the output space. units: Positive integer, dimensionality of the output space.
activation: Activation function to use. activation: Activation function to use.
Default: hyperbolic tangent (`tanh`). Default: hyperbolic tangent (`tanh`).
@ -2647,7 +2647,7 @@ class LSTM(RNN):
Note that this cell is not optimized for performance on GPU. Please use Note that this cell is not optimized for performance on GPU. Please use
`tf.compat.v1.keras.layers.CuDNNLSTM` for better performance on GPU. `tf.compat.v1.keras.layers.CuDNNLSTM` for better performance on GPU.
Arguments: Args:
units: Positive integer, dimensionality of the output space. units: Positive integer, dimensionality of the output space.
activation: Activation function to use. activation: Activation function to use.
Default: hyperbolic tangent (`tanh`). Default: hyperbolic tangent (`tanh`).
@ -2937,7 +2937,7 @@ def _standardize_args(inputs, initial_state, constants, num_constants):
makes sure the arguments are separated and that `initial_state` and makes sure the arguments are separated and that `initial_state` and
`constants` are lists of tensors (or None). `constants` are lists of tensors (or None).
Arguments: Args:
inputs: Tensor or list/tuple of tensors. which may include constants inputs: Tensor or list/tuple of tensors. which may include constants
and initial states. In that case `num_constant` must be specified. and initial states. In that case `num_constant` must be specified.
initial_state: Tensor or list of tensors or None, initial states. initial_state: Tensor or list of tensors or None, initial states.

View File

@ -133,7 +133,7 @@ class GRUCell(recurrent.GRUCell):
>>> print(final_state.shape) >>> print(final_state.shape)
(32, 4) (32, 4)
Arguments: Args:
units: Positive integer, dimensionality of the output space. units: Positive integer, dimensionality of the output space.
activation: Activation function to use. Default: hyperbolic tangent activation: Activation function to use. Default: hyperbolic tangent
(`tanh`). If you pass None, no activation is applied (`tanh`). If you pass None, no activation is applied
@ -266,7 +266,7 @@ class GRU(recurrent.DropoutRNNCellMixin, recurrent.GRU):
>>> print(final_state.shape) >>> print(final_state.shape)
(32, 4) (32, 4)
Arguments: Args:
units: Positive integer, dimensionality of the output space. units: Positive integer, dimensionality of the output space.
activation: Activation function to use. activation: Activation function to use.
Default: hyperbolic tangent (`tanh`). Default: hyperbolic tangent (`tanh`).
@ -566,7 +566,7 @@ def standard_gru(inputs, init_h, kernel, recurrent_kernel, bias, mask,
counterpart. The RNN step logic has been simplified, eg dropout and mask is counterpart. The RNN step logic has been simplified, eg dropout and mask is
removed since CuDNN implementation does not support that. removed since CuDNN implementation does not support that.
Arguments: Args:
inputs: Input tensor of GRU layer. inputs: Input tensor of GRU layer.
init_h: Initial state tensor for the cell output. init_h: Initial state tensor for the cell output.
kernel: Weights for cell kernel. kernel: Weights for cell kernel.
@ -882,7 +882,7 @@ class LSTMCell(recurrent.LSTMCell):
>>> print(final_carry_state.shape) >>> print(final_carry_state.shape)
(32, 4) (32, 4)
Arguments: Args:
units: Positive integer, dimensionality of the output space. units: Positive integer, dimensionality of the output space.
activation: Activation function to use. Default: hyperbolic tangent activation: Activation function to use. Default: hyperbolic tangent
(`tanh`). If you pass `None`, no activation is applied (ie. "linear" (`tanh`). If you pass `None`, no activation is applied (ie. "linear"
@ -1008,7 +1008,7 @@ class LSTM(recurrent.DropoutRNNCellMixin, recurrent.LSTM):
>>> print(final_carry_state.shape) >>> print(final_carry_state.shape)
(32, 4) (32, 4)
Arguments: Args:
units: Positive integer, dimensionality of the output space. units: Positive integer, dimensionality of the output space.
activation: Activation function to use. activation: Activation function to use.
Default: hyperbolic tangent (`tanh`). If you pass `None`, no activation Default: hyperbolic tangent (`tanh`). If you pass `None`, no activation

View File

@ -161,7 +161,7 @@ def serialize(layer):
def deserialize(config, custom_objects=None): def deserialize(config, custom_objects=None):
"""Instantiates a layer from a config dictionary. """Instantiates a layer from a config dictionary.
Arguments: Args:
config: dict of the form {'class_name': str, 'config': dict} config: dict of the form {'class_name': str, 'config': dict}
custom_objects: dict mapping class names (or function names) custom_objects: dict mapping class names (or function names)
of custom (non-Keras) objects to class/functions of custom (non-Keras) objects to class/functions

View File

@ -45,7 +45,7 @@ class Wrapper(Layer):
Do not use this class as a layer, it is only an abstract base class. Do not use this class as a layer, it is only an abstract base class.
Two usable wrappers are the `TimeDistributed` and `Bidirectional` wrappers. Two usable wrappers are the `TimeDistributed` and `Bidirectional` wrappers.
Arguments: Args:
layer: The layer to be wrapped. layer: The layer to be wrapped.
""" """
@ -105,7 +105,7 @@ class TimeDistributed(Wrapper):
Because `TimeDistributed` applies the same instance of `Conv2D` to each of the Because `TimeDistributed` applies the same instance of `Conv2D` to each of the
timestamps, the same set of weights are used at each timestamp. timestamps, the same set of weights are used at each timestamp.
Arguments: Args:
layer: a `tf.keras.layers.Layer` instance. layer: a `tf.keras.layers.Layer` instance.
Call arguments: Call arguments:
@ -142,7 +142,7 @@ class TimeDistributed(Wrapper):
The static shapes are replaced with the corresponding dynamic shapes of the The static shapes are replaced with the corresponding dynamic shapes of the
tensor. tensor.
Arguments: Args:
init_tuple: a tuple, the first part of the output shape init_tuple: a tuple, the first part of the output shape
tensor: the tensor from which to get the (static and dynamic) shapes tensor: the tensor from which to get the (static and dynamic) shapes
as the last part of the output shape as the last part of the output shape
@ -310,7 +310,7 @@ class TimeDistributed(Wrapper):
(E.g., `mask` is not used at all) (E.g., `mask` is not used at all)
Return `None`. Return `None`.
Arguments: Args:
inputs: Tensor with shape [batch size, timesteps, ...] indicating the inputs: Tensor with shape [batch size, timesteps, ...] indicating the
input to TimeDistributed. If static shape information is available for input to TimeDistributed. If static shape information is available for
"batch size", `mask` is returned unmodified. "batch size", `mask` is returned unmodified.
@ -384,7 +384,7 @@ class TimeDistributed(Wrapper):
class Bidirectional(Wrapper): class Bidirectional(Wrapper):
"""Bidirectional wrapper for RNNs. """Bidirectional wrapper for RNNs.
Arguments: Args:
layer: `keras.layers.RNN` instance, such as `keras.layers.LSTM` or layer: `keras.layers.RNN` instance, such as `keras.layers.LSTM` or
`keras.layers.GRU`. It could also be a `keras.layers.Layer` instance `keras.layers.GRU`. It could also be a `keras.layers.Layer` instance
that meets the following criteria: that meets the following criteria:

View File

@ -163,7 +163,7 @@ class Layer(base_layer.Layer):
It is considered legacy, and we recommend the use of `tf.keras.layers.Layer` It is considered legacy, and we recommend the use of `tf.keras.layers.Layer`
instead. instead.
Arguments: Args:
trainable: Boolean, whether the layer's variables should be trainable. trainable: Boolean, whether the layer's variables should be trainable.
name: String name of the layer. name: String name of the layer.
dtype: Default dtype of the layer's weights (default of `None` means use the dtype: Default dtype of the layer's weights (default of `None` means use the
@ -334,7 +334,7 @@ class Layer(base_layer.Layer):
**kwargs): **kwargs):
"""Adds a new variable to the layer, or gets an existing one; returns it. """Adds a new variable to the layer, or gets an existing one; returns it.
Arguments: Args:
name: variable name. name: variable name.
shape: variable shape. shape: variable shape.
dtype: The type of the variable. Defaults to `self.dtype` or `float32`. dtype: The type of the variable. Defaults to `self.dtype` or `float32`.
@ -489,7 +489,7 @@ class Layer(base_layer.Layer):
def __call__(self, inputs, *args, **kwargs): def __call__(self, inputs, *args, **kwargs):
"""Wraps `call`, applying pre- and post-processing steps. """Wraps `call`, applying pre- and post-processing steps.
Arguments: Args:
inputs: input tensor(s). inputs: input tensor(s).
*args: additional positional arguments to be passed to `self.call`. *args: additional positional arguments to be passed to `self.call`.
**kwargs: additional keyword arguments to be passed to `self.call`. **kwargs: additional keyword arguments to be passed to `self.call`.

View File

@ -37,7 +37,7 @@ class Conv1D(keras_layers.Conv1D, base.Layer):
a bias vector is created and added to the outputs. Finally, if a bias vector is created and added to the outputs. Finally, if
`activation` is not `None`, it is applied to the outputs as well. `activation` is not `None`, it is applied to the outputs as well.
Arguments: Args:
filters: Integer, the dimensionality of the output space (i.e. the number filters: Integer, the dimensionality of the output space (i.e. the number
of filters in the convolution). of filters in the convolution).
kernel_size: An integer or tuple/list of a single integer, specifying the kernel_size: An integer or tuple/list of a single integer, specifying the
@ -147,7 +147,7 @@ def conv1d(inputs,
a bias vector is created and added to the outputs. Finally, if a bias vector is created and added to the outputs. Finally, if
`activation` is not `None`, it is applied to the outputs as well. `activation` is not `None`, it is applied to the outputs as well.
Arguments: Args:
inputs: Tensor input. inputs: Tensor input.
filters: Integer, the dimensionality of the output space (i.e. the number filters: Integer, the dimensionality of the output space (i.e. the number
of filters in the convolution). of filters in the convolution).
@ -235,7 +235,7 @@ class Conv2D(keras_layers.Conv2D, base.Layer):
a bias vector is created and added to the outputs. Finally, if a bias vector is created and added to the outputs. Finally, if
`activation` is not `None`, it is applied to the outputs as well. `activation` is not `None`, it is applied to the outputs as well.
Arguments: Args:
filters: Integer, the dimensionality of the output space (i.e. the number filters: Integer, the dimensionality of the output space (i.e. the number
of filters in the convolution). of filters in the convolution).
kernel_size: An integer or tuple/list of 2 integers, specifying the kernel_size: An integer or tuple/list of 2 integers, specifying the
@ -352,7 +352,7 @@ def conv2d(inputs,
a bias vector is created and added to the outputs. Finally, if a bias vector is created and added to the outputs. Finally, if
`activation` is not `None`, it is applied to the outputs as well. `activation` is not `None`, it is applied to the outputs as well.
Arguments: Args:
inputs: Tensor input. inputs: Tensor input.
filters: Integer, the dimensionality of the output space (i.e. the number filters: Integer, the dimensionality of the output space (i.e. the number
of filters in the convolution). of filters in the convolution).
@ -447,7 +447,7 @@ class Conv3D(keras_layers.Conv3D, base.Layer):
a bias vector is created and added to the outputs. Finally, if a bias vector is created and added to the outputs. Finally, if
`activation` is not `None`, it is applied to the outputs as well. `activation` is not `None`, it is applied to the outputs as well.
Arguments: Args:
filters: Integer, the dimensionality of the output space (i.e. the number filters: Integer, the dimensionality of the output space (i.e. the number
of filters in the convolution). of filters in the convolution).
kernel_size: An integer or tuple/list of 3 integers, specifying the kernel_size: An integer or tuple/list of 3 integers, specifying the
@ -565,7 +565,7 @@ def conv3d(inputs,
a bias vector is created and added to the outputs. Finally, if a bias vector is created and added to the outputs. Finally, if
`activation` is not `None`, it is applied to the outputs as well. `activation` is not `None`, it is applied to the outputs as well.
Arguments: Args:
inputs: Tensor input. inputs: Tensor input.
filters: Integer, the dimensionality of the output space (i.e. the number filters: Integer, the dimensionality of the output space (i.e. the number
of filters in the convolution). of filters in the convolution).
@ -661,7 +661,7 @@ class SeparableConv1D(keras_layers.SeparableConv1D, base.Layer):
it adds a bias vector to the output. it adds a bias vector to the output.
It then optionally applies an activation function to produce the final output. It then optionally applies an activation function to produce the final output.
Arguments: Args:
filters: Integer, the dimensionality of the output space (i.e. the number filters: Integer, the dimensionality of the output space (i.e. the number
of filters in the convolution). of filters in the convolution).
kernel_size: A single integer specifying the spatial kernel_size: A single integer specifying the spatial
@ -771,7 +771,7 @@ class SeparableConv2D(keras_layers.SeparableConv2D, base.Layer):
it adds a bias vector to the output. it adds a bias vector to the output.
It then optionally applies an activation function to produce the final output. It then optionally applies an activation function to produce the final output.
Arguments: Args:
filters: Integer, the dimensionality of the output space (i.e. the number filters: Integer, the dimensionality of the output space (i.e. the number
of filters in the convolution). of filters in the convolution).
kernel_size: A tuple or list of 2 integers specifying the spatial kernel_size: A tuple or list of 2 integers specifying the spatial
@ -908,7 +908,7 @@ def separable_conv1d(inputs,
it adds a bias vector to the output. it adds a bias vector to the output.
It then optionally applies an activation function to produce the final output. It then optionally applies an activation function to produce the final output.
Arguments: Args:
inputs: Input tensor. inputs: Input tensor.
filters: Integer, the dimensionality of the output space (i.e. the number filters: Integer, the dimensionality of the output space (i.e. the number
of filters in the convolution). of filters in the convolution).
@ -1031,7 +1031,7 @@ def separable_conv2d(inputs,
it adds a bias vector to the output. it adds a bias vector to the output.
It then optionally applies an activation function to produce the final output. It then optionally applies an activation function to produce the final output.
Arguments: Args:
inputs: Input tensor. inputs: Input tensor.
filters: Integer, the dimensionality of the output space (i.e. the number filters: Integer, the dimensionality of the output space (i.e. the number
of filters in the convolution). of filters in the convolution).
@ -1138,7 +1138,7 @@ class Conv2DTranspose(keras_layers.Conv2DTranspose, base.Layer):
while maintaining a connectivity pattern that is compatible with while maintaining a connectivity pattern that is compatible with
said convolution. said convolution.
Arguments: Args:
filters: Integer, the dimensionality of the output space (i.e. the number filters: Integer, the dimensionality of the output space (i.e. the number
of filters in the convolution). of filters in the convolution).
kernel_size: A tuple or list of 2 positive integers specifying the spatial kernel_size: A tuple or list of 2 positive integers specifying the spatial
@ -1243,7 +1243,7 @@ def conv2d_transpose(inputs,
while maintaining a connectivity pattern that is compatible with while maintaining a connectivity pattern that is compatible with
said convolution. said convolution.
Arguments: Args:
inputs: Input tensor. inputs: Input tensor.
filters: Integer, the dimensionality of the output space (i.e. the number filters: Integer, the dimensionality of the output space (i.e. the number
of filters in the convolution). of filters in the convolution).
@ -1320,7 +1320,7 @@ def conv2d_transpose(inputs,
class Conv3DTranspose(keras_layers.Conv3DTranspose, base.Layer): class Conv3DTranspose(keras_layers.Conv3DTranspose, base.Layer):
"""Transposed 3D convolution layer (sometimes called 3D Deconvolution). """Transposed 3D convolution layer (sometimes called 3D Deconvolution).
Arguments: Args:
filters: Integer, the dimensionality of the output space (i.e. the number filters: Integer, the dimensionality of the output space (i.e. the number
of filters in the convolution). of filters in the convolution).
kernel_size: An integer or tuple/list of 3 integers, specifying the kernel_size: An integer or tuple/list of 3 integers, specifying the
@ -1422,7 +1422,7 @@ def conv3d_transpose(inputs,
reuse=None): reuse=None):
"""Functional interface for transposed 3D convolution layer. """Functional interface for transposed 3D convolution layer.
Arguments: Args:
inputs: Input tensor. inputs: Input tensor.
filters: Integer, the dimensionality of the output space (i.e. the number filters: Integer, the dimensionality of the output space (i.e. the number
of filters in the convolution). of filters in the convolution).

View File

@ -40,7 +40,7 @@ class Dense(keras_layers.Dense, base.Layer):
and `bias` is a bias vector created by the layer and `bias` is a bias vector created by the layer
(only if `use_bias` is `True`). (only if `use_bias` is `True`).
Arguments: Args:
units: Integer or Long, dimensionality of the output space. units: Integer or Long, dimensionality of the output space.
activation: Activation function (callable). Set it to None to maintain a activation: Activation function (callable). Set it to None to maintain a
linear activation. linear activation.
@ -134,7 +134,7 @@ def dense(
and `bias` is a bias vector created by the layer and `bias` is a bias vector created by the layer
(only if `use_bias` is `True`). (only if `use_bias` is `True`).
Arguments: Args:
inputs: Tensor input. inputs: Tensor input.
units: Integer or Long, dimensionality of the output space. units: Integer or Long, dimensionality of the output space.
activation: Activation function (callable). Set it to None to maintain a activation: Activation function (callable). Set it to None to maintain a
@ -197,7 +197,7 @@ class Dropout(keras_layers.Dropout, base.Layer):
The units that are kept are scaled by `1 / (1 - rate)`, so that their The units that are kept are scaled by `1 / (1 - rate)`, so that their
sum is unchanged at training time and inference time. sum is unchanged at training time and inference time.
Arguments: Args:
rate: The dropout rate, between 0 and 1. E.g. `rate=0.1` would drop out rate: The dropout rate, between 0 and 1. E.g. `rate=0.1` would drop out
10% of input units. 10% of input units.
noise_shape: 1D tensor of type `int32` representing the shape of the noise_shape: 1D tensor of type `int32` representing the shape of the
@ -241,7 +241,7 @@ def dropout(inputs,
The units that are kept are scaled by `1 / (1 - rate)`, so that their The units that are kept are scaled by `1 / (1 - rate)`, so that their
sum is unchanged at training time and inference time. sum is unchanged at training time and inference time.
Arguments: Args:
inputs: Tensor input. inputs: Tensor input.
rate: The dropout rate, between 0 and 1. E.g. "rate=0.1" would drop out rate: The dropout rate, between 0 and 1. E.g. "rate=0.1" would drop out
10% of input units. 10% of input units.
@ -276,7 +276,7 @@ def dropout(inputs,
class Flatten(keras_layers.Flatten, base.Layer): class Flatten(keras_layers.Flatten, base.Layer):
"""Flattens an input tensor while preserving the batch axis (axis 0). """Flattens an input tensor while preserving the batch axis (axis 0).
Arguments: Args:
data_format: A string, one of `channels_last` (default) or `channels_first`. data_format: A string, one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs. The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape `channels_last` corresponds to inputs with shape
@ -302,7 +302,7 @@ class Flatten(keras_layers.Flatten, base.Layer):
def flatten(inputs, name=None, data_format='channels_last'): def flatten(inputs, name=None, data_format='channels_last'):
"""Flattens an input tensor while preserving the batch axis (axis 0). """Flattens an input tensor while preserving the batch axis (axis 0).
Arguments: Args:
inputs: Tensor input. inputs: Tensor input.
name: The name of the layer (string). name: The name of the layer (string).
data_format: A string, one of `channels_last` (default) or `channels_first`. data_format: A string, one of `channels_last` (default) or `channels_first`.

View File

@ -43,7 +43,7 @@ class BatchNormalization(keras_normalization.BatchNormalization, base.Layer):
train_op = tf.group([train_op, update_ops]) train_op = tf.group([train_op, update_ops])
``` ```
Arguments: Args:
axis: An `int` or list of `int`, the axis or axes that should be normalized, axis: An `int` or list of `int`, the axis or axes that should be normalized,
typically the features axis/axes. For instance, after a `Conv2D` layer typically the features axis/axes. For instance, after a `Conv2D` layer
with `data_format="channels_first"`, set `axis=1`. If a list of axes is with `data_format="channels_first"`, set `axis=1`. If a list of axes is
@ -216,7 +216,7 @@ def batch_normalization(inputs,
train_op = tf.group([train_op, update_ops]) train_op = tf.group([train_op, update_ops])
``` ```
Arguments: Args:
inputs: Tensor input. inputs: Tensor input.
axis: An `int`, the axis that should be normalized (typically the features axis: An `int`, the axis that should be normalized (typically the features
axis). For instance, after a `Convolution2D` layer with axis). For instance, after a `Convolution2D` layer with

View File

@ -30,7 +30,7 @@ from tensorflow.python.util.tf_export import tf_export
class AveragePooling1D(keras_layers.AveragePooling1D, base.Layer): class AveragePooling1D(keras_layers.AveragePooling1D, base.Layer):
"""Average Pooling layer for 1D inputs. """Average Pooling layer for 1D inputs.
Arguments: Args:
pool_size: An integer or tuple/list of a single integer, pool_size: An integer or tuple/list of a single integer,
representing the size of the pooling window. representing the size of the pooling window.
strides: An integer or tuple/list of a single integer, specifying the strides: An integer or tuple/list of a single integer, specifying the
@ -65,7 +65,7 @@ def average_pooling1d(inputs, pool_size, strides,
name=None): name=None):
"""Average Pooling layer for 1D inputs. """Average Pooling layer for 1D inputs.
Arguments: Args:
inputs: The tensor over which to pool. Must have rank 3. inputs: The tensor over which to pool. Must have rank 3.
pool_size: An integer or tuple/list of a single integer, pool_size: An integer or tuple/list of a single integer,
representing the size of the pooling window. representing the size of the pooling window.
@ -101,7 +101,7 @@ def average_pooling1d(inputs, pool_size, strides,
class MaxPooling1D(keras_layers.MaxPooling1D, base.Layer): class MaxPooling1D(keras_layers.MaxPooling1D, base.Layer):
"""Max Pooling layer for 1D inputs. """Max Pooling layer for 1D inputs.
Arguments: Args:
pool_size: An integer or tuple/list of a single integer, pool_size: An integer or tuple/list of a single integer,
representing the size of the pooling window. representing the size of the pooling window.
strides: An integer or tuple/list of a single integer, specifying the strides: An integer or tuple/list of a single integer, specifying the
@ -136,7 +136,7 @@ def max_pooling1d(inputs, pool_size, strides,
name=None): name=None):
"""Max Pooling layer for 1D inputs. """Max Pooling layer for 1D inputs.
Arguments: Args:
inputs: The tensor over which to pool. Must have rank 3. inputs: The tensor over which to pool. Must have rank 3.
pool_size: An integer or tuple/list of a single integer, pool_size: An integer or tuple/list of a single integer,
representing the size of the pooling window. representing the size of the pooling window.
@ -172,7 +172,7 @@ def max_pooling1d(inputs, pool_size, strides,
class AveragePooling2D(keras_layers.AveragePooling2D, base.Layer): class AveragePooling2D(keras_layers.AveragePooling2D, base.Layer):
"""Average pooling layer for 2D inputs (e.g. images). """Average pooling layer for 2D inputs (e.g. images).
Arguments: Args:
pool_size: An integer or tuple/list of 2 integers: (pool_height, pool_width) pool_size: An integer or tuple/list of 2 integers: (pool_height, pool_width)
specifying the size of the pooling window. specifying the size of the pooling window.
Can be a single integer to specify the same value for Can be a single integer to specify the same value for
@ -208,7 +208,7 @@ def average_pooling2d(inputs,
name=None): name=None):
"""Average pooling layer for 2D inputs (e.g. images). """Average pooling layer for 2D inputs (e.g. images).
Arguments: Args:
inputs: The tensor over which to pool. Must have rank 4. inputs: The tensor over which to pool. Must have rank 4.
pool_size: An integer or tuple/list of 2 integers: (pool_height, pool_width) pool_size: An integer or tuple/list of 2 integers: (pool_height, pool_width)
specifying the size of the pooling window. specifying the size of the pooling window.
@ -246,7 +246,7 @@ def average_pooling2d(inputs,
class MaxPooling2D(keras_layers.MaxPooling2D, base.Layer): class MaxPooling2D(keras_layers.MaxPooling2D, base.Layer):
"""Max pooling layer for 2D inputs (e.g. images). """Max pooling layer for 2D inputs (e.g. images).
Arguments: Args:
pool_size: An integer or tuple/list of 2 integers: (pool_height, pool_width) pool_size: An integer or tuple/list of 2 integers: (pool_height, pool_width)
specifying the size of the pooling window. specifying the size of the pooling window.
Can be a single integer to specify the same value for Can be a single integer to specify the same value for
@ -282,7 +282,7 @@ def max_pooling2d(inputs,
name=None): name=None):
"""Max pooling layer for 2D inputs (e.g. images). """Max pooling layer for 2D inputs (e.g. images).
Arguments: Args:
inputs: The tensor over which to pool. Must have rank 4. inputs: The tensor over which to pool. Must have rank 4.
pool_size: An integer or tuple/list of 2 integers: (pool_height, pool_width) pool_size: An integer or tuple/list of 2 integers: (pool_height, pool_width)
specifying the size of the pooling window. specifying the size of the pooling window.
@ -320,7 +320,7 @@ def max_pooling2d(inputs,
class AveragePooling3D(keras_layers.AveragePooling3D, base.Layer): class AveragePooling3D(keras_layers.AveragePooling3D, base.Layer):
"""Average pooling layer for 3D inputs (e.g. volumes). """Average pooling layer for 3D inputs (e.g. volumes).
Arguments: Args:
pool_size: An integer or tuple/list of 3 integers: pool_size: An integer or tuple/list of 3 integers:
(pool_depth, pool_height, pool_width) (pool_depth, pool_height, pool_width)
specifying the size of the pooling window. specifying the size of the pooling window.
@ -358,7 +358,7 @@ def average_pooling3d(inputs,
name=None): name=None):
"""Average pooling layer for 3D inputs (e.g. volumes). """Average pooling layer for 3D inputs (e.g. volumes).
Arguments: Args:
inputs: The tensor over which to pool. Must have rank 5. inputs: The tensor over which to pool. Must have rank 5.
pool_size: An integer or tuple/list of 3 integers: pool_size: An integer or tuple/list of 3 integers:
(pool_depth, pool_height, pool_width) (pool_depth, pool_height, pool_width)
@ -398,7 +398,7 @@ def average_pooling3d(inputs,
class MaxPooling3D(keras_layers.MaxPooling3D, base.Layer): class MaxPooling3D(keras_layers.MaxPooling3D, base.Layer):
"""Max pooling layer for 3D inputs (e.g. volumes). """Max pooling layer for 3D inputs (e.g. volumes).
Arguments: Args:
pool_size: An integer or tuple/list of 3 integers: pool_size: An integer or tuple/list of 3 integers:
(pool_depth, pool_height, pool_width) (pool_depth, pool_height, pool_width)
specifying the size of the pooling window. specifying the size of the pooling window.
@ -438,7 +438,7 @@ def max_pooling3d(inputs,
volumes). volumes).
Arguments: Args:
inputs: The tensor over which to pool. Must have rank 5. inputs: The tensor over which to pool. Must have rank 5.
pool_size: An integer or tuple/list of 3 integers: (pool_depth, pool_height, pool_size: An integer or tuple/list of 3 integers: (pool_depth, pool_height,
pool_width) specifying the size of the pooling window. Can be a single pool_width) specifying the size of the pooling window. Can be a single

Some files were not shown because too many files have changed in this diff Show More