diff --git a/tensorflow/cc/framework/cc_op_gen.cc b/tensorflow/cc/framework/cc_op_gen.cc index 8cb1a8ad3d5..467202250c8 100644 --- a/tensorflow/cc/framework/cc_op_gen.cc +++ b/tensorflow/cc/framework/cc_op_gen.cc @@ -586,7 +586,7 @@ OpInfo::OpInfo(const OpDef& graph_op_def, const ApiDef& api_def, if (!api_def.description().empty()) { strings::StrAppend(&comment, "\n", api_def.description(), "\n"); } - strings::StrAppend(&comment, "\nArguments:\n* scope: A Scope object\n"); + strings::StrAppend(&comment, "\nArgs:\n* scope: A Scope object\n"); // Process inputs for (int i = 0; i < api_def.arg_order_size(); ++i) { diff --git a/tensorflow/compiler/xla/python/xla_client.py b/tensorflow/compiler/xla/python/xla_client.py index d1d3de9041d..4350ce232cd 100644 --- a/tensorflow/compiler/xla/python/xla_client.py +++ b/tensorflow/compiler/xla/python/xla_client.py @@ -352,7 +352,7 @@ def execute_with_python_values(executable, arguments, backend): def execute_with_python_values_replicated(executable, arguments, backend): """Execute on many replicas with Python values as arguments and output. - Arguments: + Args: executable: the program to run. arguments: a list of lists of Python values indexed by `[replica][arg_num]` to pass as inputs. diff --git a/tensorflow/lite/python/tflite_keras_util.py b/tensorflow/lite/python/tflite_keras_util.py index e8c3e45d1a1..3fee75b9b1e 100644 --- a/tensorflow/lite/python/tflite_keras_util.py +++ b/tensorflow/lite/python/tflite_keras_util.py @@ -115,7 +115,7 @@ def _create_pseudo_names(tensors, prefix): `[x, y]` becomes: `['output_1', 'output_2']` - Arguments: + Args: tensors: `Model`'s outputs or inputs. prefix: 'output_' for outputs, 'input_' for inputs. diff --git a/tensorflow/python/distribute/multi_worker_test_base.py b/tensorflow/python/distribute/multi_worker_test_base.py index e07e19f621b..b5ffe7c69f1 100644 --- a/tensorflow/python/distribute/multi_worker_test_base.py +++ b/tensorflow/python/distribute/multi_worker_test_base.py @@ -387,7 +387,7 @@ def create_cluster_spec(has_chief=False, This util is useful when creating the `cluster_spec` arg for `tf.__internal__.distribute.multi_process_runner.run`. - Arguments: + Args: has_chief: Whether the generated cluster spec should contain "chief" task type. num_workers: Number of workers to use in the cluster spec. @@ -699,7 +699,7 @@ class IndependentWorkerTestBase(test.TestCase): from `cluster_spec`, `task_type`, and `task_id`, and provide it to the new thread to be set as `TF_CONFIG` environment. - Arguments: + Args: task_fn: The function to run in the new thread. cluster_spec: The cluster spec. task_type: The task type. @@ -810,7 +810,7 @@ class MultiWorkerMultiProcessTest(test.TestCase): In that case, this function only prints stderr from the first process of each type. - Arguments: + Args: processes: A dictionary from process type string -> list of processes. print_only_first: If true, only print output from first process of each type. diff --git a/tensorflow/python/eager/monitoring.py b/tensorflow/python/eager/monitoring.py index 552b3d9b78b..d5792a79a9a 100644 --- a/tensorflow/python/eager/monitoring.py +++ b/tensorflow/python/eager/monitoring.py @@ -484,7 +484,7 @@ class MonitoredTimer(object): def monitored_timer(cell): """A function decorator for adding MonitoredTimer support. - Arguments: + Args: cell: the cell associated with the time metric that will be inremented. Returns: A decorator that measure the function runtime and increment the specified diff --git a/tensorflow/python/framework/ops.py b/tensorflow/python/framework/ops.py index 8f0d18a3ab7..4df8b2944b6 100644 --- a/tensorflow/python/framework/ops.py +++ b/tensorflow/python/framework/ops.py @@ -1201,7 +1201,7 @@ class _EagerTensorBase(Tensor): def gpu(self, gpu_index=0): """A copy of this Tensor with contents backed by memory on the GPU. - Arguments: + Args: gpu_index: Identifies which GPU to place the contents on the returned Tensor in. @@ -2335,7 +2335,7 @@ class Operation(object): Note: this is generally unsafe to use. This is used in certain situations in conjunction with _set_type_list_attr. - Arguments: + Args: types: list of DTypes shapes: list of TensorShapes """ diff --git a/tensorflow/python/framework/smart_cond.py b/tensorflow/python/framework/smart_cond.py index cecd3a113a4..0cdd1b4d8ef 100644 --- a/tensorflow/python/framework/smart_cond.py +++ b/tensorflow/python/framework/smart_cond.py @@ -30,7 +30,7 @@ def smart_cond(pred, true_fn=None, false_fn=None, name=None): If `pred` is a bool or has a constant value, we return either `true_fn()` or `false_fn()`, otherwise we use `tf.cond` to dynamically route to both. - Arguments: + Args: pred: A scalar determining whether to return the result of `true_fn` or `false_fn`. true_fn: The callable to be performed if pred is true. @@ -62,7 +62,7 @@ def smart_cond(pred, true_fn=None, false_fn=None, name=None): def smart_constant_value(pred): """Return the bool value for `pred`, or None if `pred` had a dynamic value. - Arguments: + Args: pred: A scalar, either a Python bool or tensor. Returns: diff --git a/tensorflow/python/framework/tensor_shape.py b/tensorflow/python/framework/tensor_shape.py index 972b49709cb..497695e28cf 100644 --- a/tensorflow/python/framework/tensor_shape.py +++ b/tensorflow/python/framework/tensor_shape.py @@ -118,7 +118,7 @@ def dimension_value(dimension): value = tensor_shape[i] # Warning: this will return the dim value in V2! ``` - Arguments: + Args: dimension: Either a `Dimension` instance, an integer, or None. Returns: @@ -164,7 +164,7 @@ def dimension_at_index(shape, index): # instantiated on the fly. ``` - Arguments: + Args: shape: A TensorShape instance. index: An integer index. diff --git a/tensorflow/python/framework/test_combinations.py b/tensorflow/python/framework/test_combinations.py index 09b6ba478db..9d4ecbd3f4e 100644 --- a/tensorflow/python/framework/test_combinations.py +++ b/tensorflow/python/framework/test_combinations.py @@ -81,7 +81,7 @@ class TestCombination(object): If the environment doesn't satisfy the dependencies of the test combination, then it can be skipped. - Arguments: + Args: kwargs: Arguments that are passed to the test combination. Returns: @@ -103,7 +103,7 @@ class TestCombination(object): The test combination will run under all context managers that all `TestCombination` instances return. - Arguments: + Args: kwargs: Arguments and their values that are passed to the test combination. @@ -141,7 +141,7 @@ class ParameterModifier(object): def __init__(self, parameter_name=None): """Construct a parameter modifier that may be specific to a parameter. - Arguments: + Args: parameter_name: A `ParameterModifier` instance may operate on a class of parameters or on a parameter with a particular name. Only `ParameterModifier` instances that are of a unique type or were @@ -157,7 +157,7 @@ class ParameterModifier(object): This makes it possible to adjust user-provided arguments before passing them to the test method. - Arguments: + Args: kwargs: The combined arguments for the test. requested_parameters: The set of parameters that are defined in the signature of the test method. diff --git a/tensorflow/python/keras/activations.py b/tensorflow/python/keras/activations.py index b1433120749..5b33d96df0a 100644 --- a/tensorflow/python/keras/activations.py +++ b/tensorflow/python/keras/activations.py @@ -61,7 +61,7 @@ def softmax(x, axis=-1): The input values in are the log-odds of the resulting probability. - Arguments: + Args: x : Input tensor. axis: Integer, axis along which the softmax normalization is applied. @@ -121,7 +121,7 @@ def elu(x, alpha=1.0): - Arguments: + Args: x: Input tensor. alpha: A scalar, slope of negative section. `alpha` controls the value to which an ELU saturates for negative net inputs. @@ -174,7 +174,7 @@ def selu(x): ... activation='selu')) >>> model.add(tf.keras.layers.Dense(num_classes, activation='softmax')) - Arguments: + Args: x: A tensor or variable to compute the activation function for. Returns: @@ -205,7 +205,7 @@ def softplus(x): array([2.0611537e-09, 3.1326166e-01, 6.9314718e-01, 1.3132616e+00, 2.0000000e+01], dtype=float32) - Arguments: + Args: x: Input tensor. Returns: @@ -226,7 +226,7 @@ def softsign(x): >>> b.numpy() array([-0.5, 0. , 0.5], dtype=float32) - Arguments: + Args: x: Input tensor. Returns: @@ -254,7 +254,7 @@ def swish(x): array([-4.1223075e-08, -2.6894143e-01, 0.0000000e+00, 7.3105860e-01, 2.0000000e+01], dtype=float32) - Arguments: + Args: x: Input tensor. Returns: @@ -290,7 +290,7 @@ def relu(x, alpha=0., max_value=None, threshold=0): >>> tf.keras.activations.relu(foo, threshold=5).numpy() array([-0., -0., 0., 0., 10.], dtype=float32) - Arguments: + Args: x: Input `tensor` or `variable`. alpha: A `float` that governs the slope for values lower than the threshold. @@ -329,7 +329,7 @@ def gelu(x, approximate=False): array([-0.00363752, -0.15880796, 0. , 0.841192 , 2.9963627 ], dtype=float32) - Arguments: + Args: x: Input tensor. approximate: A `bool`, whether to enable approximation. @@ -359,7 +359,7 @@ def tanh(x): >>> b.numpy() array([-0.9950547, -0.7615942, 0., 0.7615942, 0.9950547], dtype=float32) - Arguments: + Args: x: Input tensor. Returns: @@ -390,7 +390,7 @@ def sigmoid(x): array([2.0611537e-09, 2.6894143e-01, 5.0000000e-01, 7.3105860e-01, 1.0000000e+00], dtype=float32) - Arguments: + Args: x: Input tensor. Returns: @@ -414,7 +414,7 @@ def exponential(x): >>> b.numpy() array([0.04978707, 0.36787945, 1., 2.7182817 , 20.085537], dtype=float32) - Arguments: + Args: x: Input tensor. Returns: @@ -437,7 +437,7 @@ def hard_sigmoid(x): >>> b.numpy() array([0. , 0.3, 0.5, 0.7, 1. ], dtype=float32) - Arguments: + Args: x: Input tensor. Returns: @@ -462,7 +462,7 @@ def linear(x): >>> b.numpy() array([-3., -1., 0., 1., 3.], dtype=float32) - Arguments: + Args: x: Input tensor. Returns: @@ -476,7 +476,7 @@ def linear(x): def serialize(activation): """Returns the string identifier of an activation function. - Arguments: + Args: activation : Function object. Returns: @@ -550,7 +550,7 @@ def deserialize(name, custom_objects=None): def get(identifier): """Returns function. - Arguments: + Args: identifier: Function or string Returns: diff --git a/tensorflow/python/keras/applications/densenet.py b/tensorflow/python/keras/applications/densenet.py index 482181cc38d..189b209864d 100644 --- a/tensorflow/python/keras/applications/densenet.py +++ b/tensorflow/python/keras/applications/densenet.py @@ -57,7 +57,7 @@ layers = VersionAwareLayers() def dense_block(x, blocks, name): """A dense block. - Arguments: + Args: x: input tensor. blocks: integer, the number of building blocks. name: string, block label. @@ -73,7 +73,7 @@ def dense_block(x, blocks, name): def transition_block(x, reduction, name): """A transition block. - Arguments: + Args: x: input tensor. reduction: float, compression rate at transition layers. name: string, block label. @@ -99,7 +99,7 @@ def transition_block(x, reduction, name): def conv_block(x, growth_rate, name): """A building block for a dense block. - Arguments: + Args: x: input tensor. growth_rate: float, growth rate at dense layers. name: string, block label. @@ -149,7 +149,7 @@ def DenseNet( For DenseNet, call `tf.keras.applications.densenet.preprocess_input` on your inputs before passing them to the model. - Arguments: + Args: blocks: numbers of building blocks for the four dense layers. include_top: whether to include the fully-connected layer at the top of the network. @@ -388,7 +388,7 @@ DOC = """ For DenseNet, call `tf.keras.applications.densenet.preprocess_input` on your inputs before passing them to the model. - Arguments: + Args: include_top: whether to include the fully-connected layer at the top of the network. weights: one of `None` (random initialization), diff --git a/tensorflow/python/keras/applications/efficientnet.py b/tensorflow/python/keras/applications/efficientnet.py index a289b27e8ce..745420d16a0 100644 --- a/tensorflow/python/keras/applications/efficientnet.py +++ b/tensorflow/python/keras/applications/efficientnet.py @@ -154,7 +154,7 @@ BASE_DOCSTRING = """Instantiates the {name} architecture. the one specified in your Keras config at `~/.keras/keras.json`. If you have never configured it, it defaults to `"channels_last"`. - Arguments: + Args: include_top: Whether to include the fully-connected layer at the top of the network. Defaults to True. weights: One of `None` (random initialization), @@ -218,7 +218,7 @@ def EfficientNet( Note that the data format convention used by the model is the one specified in your Keras config at `~/.keras/keras.json`. - Arguments: + Args: width_coefficient: float, scaling coefficient for network width. depth_coefficient: float, scaling coefficient for network depth. default_size: integer, default input image size. @@ -423,7 +423,7 @@ def block(inputs, id_skip=True): """An inverted residual block. - Arguments: + Args: inputs: input tensor. activation: activation function. drop_rate: float between 0 and 1, fraction of the input units to drop. diff --git a/tensorflow/python/keras/applications/imagenet_utils.py b/tensorflow/python/keras/applications/imagenet_utils.py index 45cccfdb2b0..15c2ac0a0d8 100644 --- a/tensorflow/python/keras/applications/imagenet_utils.py +++ b/tensorflow/python/keras/applications/imagenet_utils.py @@ -50,7 +50,7 @@ PREPROCESS_INPUT_DOC = """ result = model(image) ``` - Arguments: + Args: x: A floating point `numpy.array` or a `tf.Tensor`, 3D or 4D with 3 color channels, with values in the range [0, 255]. The preprocessed data are written over the input data @@ -129,7 +129,7 @@ preprocess_input.__doc__ = PREPROCESS_INPUT_DOC.format( def decode_predictions(preds, top=5): """Decodes the prediction of an ImageNet model. - Arguments: + Args: preds: Numpy array encoding a batch of predictions. top: Integer, how many top-guesses to return. Defaults to 5. @@ -169,7 +169,7 @@ def decode_predictions(preds, top=5): def _preprocess_numpy_input(x, data_format, mode): """Preprocesses a Numpy array encoding a batch of images. - Arguments: + Args: x: Input array, 3D or 4D. data_format: Data format of the image array. mode: One of "caffe", "tf" or "torch". @@ -242,7 +242,7 @@ def _preprocess_numpy_input(x, data_format, mode): def _preprocess_symbolic_input(x, data_format, mode): """Preprocesses a tensor encoding a batch of images. - Arguments: + Args: x: Input tensor, 3D or 4D. data_format: Data format of the image tensor. mode: One of "caffe", "tf" or "torch". @@ -301,7 +301,7 @@ def obtain_input_shape(input_shape, weights=None): """Internal utility to compute/validate a model's input shape. - Arguments: + Args: input_shape: Either None (will return the default network input shape), or a user-provided shape to be validated. default_size: Default input width/height for the model. @@ -388,7 +388,7 @@ def obtain_input_shape(input_shape, def correct_pad(inputs, kernel_size): """Returns a tuple for zero-padding for 2D convolution with downsampling. - Arguments: + Args: inputs: Input tensor. kernel_size: An integer or tuple/list of 2 integers. diff --git a/tensorflow/python/keras/applications/inception_resnet_v2.py b/tensorflow/python/keras/applications/inception_resnet_v2.py index 5e46d97fdd2..494a2b0a9e0 100644 --- a/tensorflow/python/keras/applications/inception_resnet_v2.py +++ b/tensorflow/python/keras/applications/inception_resnet_v2.py @@ -66,7 +66,7 @@ def InceptionResNetV2(include_top=True, `tf.keras.applications.inception_resnet_v2.preprocess_input` on your inputs before passing them to the model. - Arguments: + Args: include_top: whether to include the fully-connected layer at the top of the network. weights: one of `None` (random initialization), @@ -260,7 +260,7 @@ def conv2d_bn(x, name=None): """Utility function to apply conv + BN. - Arguments: + Args: x: input tensor. filters: filters in `Conv2D`. kernel_size: kernel size as in `Conv2D`. @@ -302,7 +302,7 @@ def inception_resnet_block(x, scale, block_type, block_idx, activation='relu'): - Inception-ResNet-B: `block_type='block17'` - Inception-ResNet-C: `block_type='block8'` - Arguments: + Args: x: input tensor. scale: scaling factor to scale the residuals (i.e., the output of passing `x` through an inception module) before adding them to the shortcut diff --git a/tensorflow/python/keras/applications/inception_v3.py b/tensorflow/python/keras/applications/inception_v3.py index 94e1ab558b8..e6ebab3530f 100644 --- a/tensorflow/python/keras/applications/inception_v3.py +++ b/tensorflow/python/keras/applications/inception_v3.py @@ -67,7 +67,7 @@ def InceptionV3( For InceptionV3, call `tf.keras.applications.inception_v3.preprocess_input` on your inputs before passing them to the model. - Arguments: + Args: include_top: Boolean, whether to include the fully-connected layer at the top, as the last layer of the network. Default to `True`. weights: One of `None` (random initialization), @@ -369,7 +369,7 @@ def conv2d_bn(x, name=None): """Utility function to apply conv + BN. - Arguments: + Args: x: input tensor. filters: filters in `Conv2D`. num_row: height of the convolution kernel. diff --git a/tensorflow/python/keras/applications/mobilenet.py b/tensorflow/python/keras/applications/mobilenet.py index d434a801e52..f8272ddd6a7 100644 --- a/tensorflow/python/keras/applications/mobilenet.py +++ b/tensorflow/python/keras/applications/mobilenet.py @@ -108,7 +108,7 @@ def MobileNet(input_shape=None, For MobileNet, call `tf.keras.applications.mobilenet.preprocess_input` on your inputs before passing them to the model. - Arguments: + Args: input_shape: Optional shape tuple, only to be specified if `include_top` is False (otherwise the input shape has to be `(224, 224, 3)` (with `channels_last` data format) or (3, 224, 224) (with `channels_first` @@ -315,7 +315,7 @@ def MobileNet(input_shape=None, def _conv_block(inputs, filters, alpha, kernel=(3, 3), strides=(1, 1)): """Adds an initial convolution layer (with batch normalization and relu6). - Arguments: + Args: inputs: Input tensor of shape `(rows, cols, 3)` (with `channels_last` data format) or (3, rows, cols) (with `channels_first` data format). It should have exactly 3 inputs channels, and width and height should @@ -373,7 +373,7 @@ def _depthwise_conv_block(inputs, batch normalization, relu6, pointwise convolution, batch normalization and relu6 activation. - Arguments: + Args: inputs: Input tensor of shape `(rows, cols, channels)` (with `channels_last` data format) or (channels, rows, cols) (with `channels_first` data format). diff --git a/tensorflow/python/keras/applications/mobilenet_v2.py b/tensorflow/python/keras/applications/mobilenet_v2.py index 3b15a5747e0..4004539d874 100644 --- a/tensorflow/python/keras/applications/mobilenet_v2.py +++ b/tensorflow/python/keras/applications/mobilenet_v2.py @@ -115,7 +115,7 @@ def MobileNetV2(input_shape=None, For MobileNetV2, call `tf.keras.applications.mobilenet_v2.preprocess_input` on your inputs before passing them to the model. - Arguments: + Args: input_shape: Optional shape tuple, to be specified if you would like to use a model with an input image resolution that is not (224, 224, 3). diff --git a/tensorflow/python/keras/applications/mobilenet_v3.py b/tensorflow/python/keras/applications/mobilenet_v3.py index 055d277a29b..04ec7d9b8f0 100644 --- a/tensorflow/python/keras/applications/mobilenet_v3.py +++ b/tensorflow/python/keras/applications/mobilenet_v3.py @@ -77,7 +77,7 @@ BASE_DOCSTRING = """Instantiates the {name} architecture. Optionally loads weights pre-trained on ImageNet. - Arguments: + Args: input_shape: Optional shape tuple, to be specified if you would like to use a model with an input image resolution that is not (224, 224, 3). diff --git a/tensorflow/python/keras/applications/nasnet.py b/tensorflow/python/keras/applications/nasnet.py index 67f837dfc58..7ad93a374ad 100644 --- a/tensorflow/python/keras/applications/nasnet.py +++ b/tensorflow/python/keras/applications/nasnet.py @@ -85,7 +85,7 @@ def NASNet(input_shape=None, Note that the data format convention used by the model is the one specified in your Keras config at `~/.keras/keras.json`. - Arguments: + Args: input_shape: Optional shape tuple, the input shape is by default `(331, 331, 3)` for NASNetLarge and `(224, 224, 3)` for NASNetMobile. @@ -340,7 +340,7 @@ def NASNetMobile(input_shape=None, For NASNet, call `tf.keras.applications.nasnet.preprocess_input` on your inputs before passing them to the model. - Arguments: + Args: input_shape: Optional shape tuple, only to be specified if `include_top` is False (otherwise the input shape has to be `(224, 224, 3)` for NASNetMobile @@ -417,7 +417,7 @@ def NASNetLarge(input_shape=None, For NASNet, call `tf.keras.applications.nasnet.preprocess_input` on your inputs before passing them to the model. - Arguments: + Args: input_shape: Optional shape tuple, only to be specified if `include_top` is False (otherwise the input shape has to be `(331, 331, 3)` for NASNetLarge. @@ -479,7 +479,7 @@ def _separable_conv_block(ip, block_id=None): """Adds 2 blocks of [relu-separable conv-batchnorm]. - Arguments: + Args: ip: Input tensor filters: Number of output filters per layer kernel_size: Kernel size of separable convolutions @@ -538,7 +538,7 @@ def _adjust_block(p, ip, filters, block_id=None): Used in situations where the output number of filters needs to be changed. - Arguments: + Args: p: Input tensor which needs to be modified ip: Input tensor whose shape needs to be matched filters: Number of output filters to be matched @@ -621,7 +621,7 @@ def _adjust_block(p, ip, filters, block_id=None): def _normal_a_cell(ip, p, filters, block_id=None): """Adds a Normal cell for NASNet-A (Fig. 4 in the paper). - Arguments: + Args: ip: Input tensor `x` p: Input tensor `p` filters: Number of output filters @@ -700,7 +700,7 @@ def _normal_a_cell(ip, p, filters, block_id=None): def _reduction_a_cell(ip, p, filters, block_id=None): """Adds a Reduction cell for NASNet-A (Fig. 4 in the paper). - Arguments: + Args: ip: Input tensor `x` p: Input tensor `p` filters: Number of output filters diff --git a/tensorflow/python/keras/applications/resnet.py b/tensorflow/python/keras/applications/resnet.py index 9c50b8a7c65..486761df37a 100644 --- a/tensorflow/python/keras/applications/resnet.py +++ b/tensorflow/python/keras/applications/resnet.py @@ -79,7 +79,7 @@ def ResNet(stack_fn, Note that the data format convention used by the model is the one specified in your Keras config at `~/.keras/keras.json`. - Arguments: + Args: stack_fn: a function that returns output tensor for the stacked residual blocks. preact: whether to use pre-activation or not @@ -226,7 +226,7 @@ def ResNet(stack_fn, def block1(x, filters, kernel_size=3, stride=1, conv_shortcut=True, name=None): """A residual block. - Arguments: + Args: x: input tensor. filters: integer, filters of the bottleneck layer. kernel_size: default 3, kernel size of the bottleneck layer. @@ -271,7 +271,7 @@ def block1(x, filters, kernel_size=3, stride=1, conv_shortcut=True, name=None): def stack1(x, filters, blocks, stride1=2, name=None): """A set of stacked residual blocks. - Arguments: + Args: x: input tensor. filters: integer, filters of the bottleneck layer in a block. blocks: integer, blocks in the stacked blocks. @@ -290,7 +290,7 @@ def stack1(x, filters, blocks, stride1=2, name=None): def block2(x, filters, kernel_size=3, stride=1, conv_shortcut=False, name=None): """A residual block. - Arguments: + Args: x: input tensor. filters: integer, filters of the bottleneck layer. kernel_size: default 3, kernel size of the bottleneck layer. @@ -339,7 +339,7 @@ def block2(x, filters, kernel_size=3, stride=1, conv_shortcut=False, name=None): def stack2(x, filters, blocks, stride1=2, name=None): """A set of stacked residual blocks. - Arguments: + Args: x: input tensor. filters: integer, filters of the bottleneck layer in a block. blocks: integer, blocks in the stacked blocks. @@ -365,7 +365,7 @@ def block3(x, name=None): """A residual block. - Arguments: + Args: x: input tensor. filters: integer, filters of the bottleneck layer. kernel_size: default 3, kernel size of the bottleneck layer. @@ -428,7 +428,7 @@ def block3(x, def stack3(x, filters, blocks, stride1=2, groups=32, name=None): """A set of stacked residual blocks. - Arguments: + Args: x: input tensor. filters: integer, filters of the bottleneck layer in a block. blocks: integer, blocks in the stacked blocks. @@ -547,7 +547,7 @@ DOC = """ For ResNet, call `tf.keras.applications.resnet.preprocess_input` on your inputs before passing them to the model. - Arguments: + Args: include_top: whether to include the fully-connected layer at the top of the network. weights: one of `None` (random initialization), diff --git a/tensorflow/python/keras/applications/resnet_v2.py b/tensorflow/python/keras/applications/resnet_v2.py index 83f6e674cc8..b621c77be2d 100644 --- a/tensorflow/python/keras/applications/resnet_v2.py +++ b/tensorflow/python/keras/applications/resnet_v2.py @@ -152,7 +152,7 @@ DOC = """ For ResNetV2, call `tf.keras.applications.resnet_v2.preprocess_input` on your inputs before passing them to the model. - Arguments: + Args: include_top: whether to include the fully-connected layer at the top of the network. weights: one of `None` (random initialization), diff --git a/tensorflow/python/keras/applications/vgg16.py b/tensorflow/python/keras/applications/vgg16.py index 33bf8d25b24..9d43514ea03 100644 --- a/tensorflow/python/keras/applications/vgg16.py +++ b/tensorflow/python/keras/applications/vgg16.py @@ -70,7 +70,7 @@ def VGG16( For VGG16, call `tf.keras.applications.vgg16.preprocess_input` on your inputs before passing them to the model. - Arguments: + Args: include_top: whether to include the 3 fully-connected layers at the top of the network. weights: one of `None` (random initialization), diff --git a/tensorflow/python/keras/applications/vgg19.py b/tensorflow/python/keras/applications/vgg19.py index ad6c9b84c00..f5e260ea7ec 100644 --- a/tensorflow/python/keras/applications/vgg19.py +++ b/tensorflow/python/keras/applications/vgg19.py @@ -70,7 +70,7 @@ def VGG19( For VGG19, call `tf.keras.applications.vgg19.preprocess_input` on your inputs before passing them to the model. - Arguments: + Args: include_top: whether to include the 3 fully-connected layers at the top of the network. weights: one of `None` (random initialization), diff --git a/tensorflow/python/keras/applications/xception.py b/tensorflow/python/keras/applications/xception.py index 3d595ffa419..f51b0eb8998 100644 --- a/tensorflow/python/keras/applications/xception.py +++ b/tensorflow/python/keras/applications/xception.py @@ -72,7 +72,7 @@ def Xception( For Xception, call `tf.keras.applications.xception.preprocess_input` on your inputs before passing them to the model. - Arguments: + Args: include_top: whether to include the fully-connected layer at the top of the network. weights: one of `None` (random initialization), diff --git a/tensorflow/python/keras/backend.py b/tensorflow/python/keras/backend.py index 62dff46c44a..a5b077be984 100644 --- a/tensorflow/python/keras/backend.py +++ b/tensorflow/python/keras/backend.py @@ -195,7 +195,7 @@ def backend(): def cast_to_floatx(x): """Cast a Numpy array to the default Keras float type. - Arguments: + Args: x: Numpy array or TensorFlow tensor. Returns: @@ -227,7 +227,7 @@ def cast_to_floatx(x): def get_uid(prefix=''): """Associates a string prefix with an integer counter in a TensorFlow graph. - Arguments: + Args: prefix: String prefix to index. Returns: @@ -335,7 +335,7 @@ def manual_variable_initialization(value): the user should handle the initialization (e.g. via `tf.compat.v1.initialize_all_variables()`). - Arguments: + Args: value: Python boolean. """ global _MANUAL_VAR_INIT @@ -430,7 +430,7 @@ def set_learning_phase(value): training = backend.learning_phase() ``` - Arguments: + Args: value: Learning phase value, either 0 or 1 (integers). 0 = test, 1 = train @@ -459,7 +459,7 @@ def deprecated_internal_set_learning_phase(value): sets learning phase just for compatibility with code that relied on explicitly setting the learning phase for other values. - Arguments: + Args: value: Learning phase value, either 0 or 1 (integers). 0 = test, 1 = train Raises: @@ -485,7 +485,7 @@ def learning_phase_scope(value): The learning phase gets restored to its original value upon exiting the scope. - Arguments: + Args: value: Learning phase value, either 0 or 1 (integers). 0 = test, 1 = train @@ -519,7 +519,7 @@ def deprecated_internal_learning_phase_scope(value): We can get rid of this method and its usages when the public API is removed. - Arguments: + Args: value: Learning phase value, either 0 or 1 (integers). 0 = test, 1 = train Yields: @@ -564,7 +564,7 @@ def deprecated_internal_learning_phase_scope(value): def eager_learning_phase_scope(value): """Internal scope that sets the learning phase in eager / tf.function only. - Arguments: + Args: value: Learning phase value, either 0 or 1 (integers). 0 = test, 1 = train @@ -736,7 +736,7 @@ def get_session(op_input_list=()): Note that you can manually set the global session via `K.set_session(sess)`. - Arguments: + Args: op_input_list: An option sequence of tensors or ops, which will be used to determine the current graph. Otherwise the default graph will be used. @@ -809,7 +809,7 @@ def _scratch_graph(graph=None): def set_session(session): """Sets the global TensorFlow session. - Arguments: + Args: session: A TF Session. """ global _SESSION @@ -876,7 +876,7 @@ def _get_current_tf_device(): def _is_current_explicit_device(device_type): """Check if the current device is explicitly set on the device type specified. - Arguments: + Args: device_type: A string containing `GPU` or `CPU` (case-insensitive). Returns: @@ -934,7 +934,7 @@ def _constant_to_tensor(x, dtype): This is slightly faster than the _to_tensor function, at the cost of handling fewer cases. - Arguments: + Args: x: An object to be converted (numpy arrays, floats, ints and lists of them). dtype: The destination type. @@ -948,7 +948,7 @@ def _constant_to_tensor(x, dtype): def _to_tensor(x, dtype): """Convert the input `x` to a tensor of type `dtype`. - Arguments: + Args: x: An object to be converted (numpy array, list, tensors). dtype: The destination type. @@ -963,7 +963,7 @@ def _to_tensor(x, dtype): def is_sparse(tensor): """Returns whether a tensor is a sparse tensor. - Arguments: + Args: tensor: A tensor instance. Returns: @@ -992,7 +992,7 @@ def is_sparse(tensor): def to_dense(tensor): """Converts a sparse tensor into a dense tensor and returns it. - Arguments: + Args: tensor: A tensor instance (potentially sparse). Returns: @@ -1052,7 +1052,7 @@ keras_export(v1=['keras.backend.name_scope'])(ops.name_scope_v1) def variable(value, dtype=None, name=None, constraint=None): """Instantiates a variable and returns it. - Arguments: + Args: value: Numpy array, initial value of the tensor. dtype: Tensor type. name: Optional name string for the tensor. @@ -1127,7 +1127,7 @@ def unique_object_name(name, avoid_observed_names=False): """Makes a object name (or arbitrary string) unique within a TensorFlow graph. - Arguments: + Args: name: String name to make unique. name_uid_map: An optional defaultdict(int) to use when creating unique names. If None (default), uses a per-Graph dictionary. @@ -1216,7 +1216,7 @@ def _initialize_variables(session): def constant(value, dtype=None, shape=None, name=None): """Creates a constant tensor. - Arguments: + Args: value: A constant value (or list) dtype: The type of the elements of the resulting tensor. shape: Optional dimensions of resulting tensor. @@ -1238,7 +1238,7 @@ def is_keras_tensor(x): A "Keras tensor" is a tensor that was returned by a Keras layer, (`Layer` class) or by `Input`. - Arguments: + Args: x: A candidate tensor. Returns: @@ -1295,7 +1295,7 @@ def placeholder(shape=None, ragged=False): """Instantiates a placeholder tensor and returns it. - Arguments: + Args: shape: Shape of the placeholder (integer tuple, may include `None` entries). ndim: Number of axes of the tensor. @@ -1388,7 +1388,7 @@ def placeholder(shape=None, def is_placeholder(x): """Returns whether `x` is a placeholder. - Arguments: + Args: x: A candidate placeholder. Returns: @@ -1413,7 +1413,7 @@ def is_placeholder(x): def shape(x): """Returns the symbolic shape of a tensor or variable. - Arguments: + Args: x: A tensor or variable. Returns: @@ -1438,7 +1438,7 @@ def shape(x): def int_shape(x): """Returns the shape of tensor or variable as a tuple of int or None entries. - Arguments: + Args: x: Tensor or variable. Returns: @@ -1469,7 +1469,7 @@ def int_shape(x): def ndim(x): """Returns the number of axes in a tensor, as an integer. - Arguments: + Args: x: Tensor or variable. Returns: @@ -1499,7 +1499,7 @@ def ndim(x): def dtype(x): """Returns the dtype of a Keras tensor or variable, as a string. - Arguments: + Args: x: Tensor or variable. Returns: @@ -1532,7 +1532,7 @@ def dtype(x): def eval(x): """Evaluates the value of a variable. - Arguments: + Args: x: A variable. Returns: @@ -1555,7 +1555,7 @@ def eval(x): def zeros(shape, dtype=None, name=None): """Instantiates an all-zeros variable and returns it. - Arguments: + Args: shape: Tuple or list of integers, shape of returned Keras variable dtype: data type of returned Keras variable name: name of returned Keras variable @@ -1601,7 +1601,7 @@ def zeros(shape, dtype=None, name=None): def ones(shape, dtype=None, name=None): """Instantiates an all-ones variable and returns it. - Arguments: + Args: shape: Tuple of integers, shape of returned Keras variable. dtype: String, data type of returned Keras variable. name: String, name of returned Keras variable. @@ -1637,7 +1637,7 @@ def ones(shape, dtype=None, name=None): def eye(size, dtype=None, name=None): """Instantiate an identity matrix and returns it. - Arguments: + Args: size: Integer, number of rows/columns. dtype: String, data type of returned Keras variable. name: String, name of returned Keras variable. @@ -1667,7 +1667,7 @@ def eye(size, dtype=None, name=None): def zeros_like(x, dtype=None, name=None): """Instantiates an all-zeros variable of the same shape as another tensor. - Arguments: + Args: x: Keras variable or Keras tensor. dtype: dtype of returned Keras variable. `None` uses the dtype of `x`. @@ -1695,7 +1695,7 @@ def zeros_like(x, dtype=None, name=None): def ones_like(x, dtype=None, name=None): """Instantiates an all-ones variable of the same shape as another tensor. - Arguments: + Args: x: Keras variable or tensor. dtype: String, dtype of returned Keras variable. None uses the dtype of x. @@ -1719,7 +1719,7 @@ def ones_like(x, dtype=None, name=None): def identity(x, name=None): """Returns a tensor with the same content as the input tensor. - Arguments: + Args: x: The input tensor. name: String, name for the variable to create. @@ -1734,7 +1734,7 @@ def identity(x, name=None): def random_uniform_variable(shape, low, high, dtype=None, name=None, seed=None): """Instantiates a variable with values drawn from a uniform distribution. - Arguments: + Args: shape: Tuple of integers, shape of returned Keras variable. low: Float, lower boundary of the output interval. high: Float, upper boundary of the output interval. @@ -1770,7 +1770,7 @@ def random_normal_variable(shape, mean, scale, dtype=None, name=None, seed=None): """Instantiates a variable with values drawn from a normal distribution. - Arguments: + Args: shape: Tuple of integers, shape of returned Keras variable. mean: Float, mean of the normal distribution. scale: Float, standard deviation of the normal distribution. @@ -1805,7 +1805,7 @@ def random_normal_variable(shape, mean, scale, dtype=None, name=None, def count_params(x): """Returns the static number of elements in a variable or tensor. - Arguments: + Args: x: Variable or tensor. Returns: @@ -1832,7 +1832,7 @@ def cast(x, dtype): You can cast a Keras variable but it still returns a Keras tensor. - Arguments: + Args: x: Keras tensor (or variable). dtype: String, either (`'float16'`, `'float32'`, or `'float64'`). @@ -1868,7 +1868,7 @@ def update(x, new_x): def update_add(x, increment): """Update the value of `x` by adding `increment`. - Arguments: + Args: x: A Variable. increment: A tensor of same shape as `x`. @@ -1883,7 +1883,7 @@ def update_add(x, increment): def update_sub(x, decrement): """Update the value of `x` by subtracting `decrement`. - Arguments: + Args: x: A Variable. decrement: A tensor of same shape as `x`. @@ -1923,7 +1923,7 @@ def moving_average_update(x, value, momentum): >>> x_zdb.numpy() 2.0 - Arguments: + Args: x: A Variable, the moving average. value: A tensor with the same shape as `x`, the new value to be averaged in. @@ -1948,7 +1948,7 @@ def dot(x, y): This operation corresponds to `numpy.dot(a, b, out=None)`. - Arguments: + Args: x: Tensor or variable. y: Tensor or variable. @@ -2020,7 +2020,7 @@ def batch_dot(x, y, axes=None): than the input. If the number of dimensions is reduced to 1, we use `expand_dims` to make sure that ndim is at least 2. - Arguments: + Args: x: Keras tensor or variable with `ndim >= 2`. y: Keras tensor or variable with `ndim >= 2`. axes: Tuple or list of integers with target dimensions, or single integer. @@ -2203,7 +2203,7 @@ def batch_dot(x, y, axes=None): def transpose(x): """Transposes a tensor and returns it. - Arguments: + Args: x: Tensor or variable. Returns: @@ -2236,7 +2236,7 @@ def transpose(x): def gather(reference, indices): """Retrieves the elements of indices `indices` in the tensor `reference`. - Arguments: + Args: reference: A tensor. indices: An integer tensor of indices. @@ -2273,7 +2273,7 @@ def gather(reference, indices): def max(x, axis=None, keepdims=False): """Maximum value in a tensor. - Arguments: + Args: x: A tensor or variable. axis: An integer, the axis to find maximum values. keepdims: A boolean, whether to keep the dimensions or not. @@ -2293,7 +2293,7 @@ def max(x, axis=None, keepdims=False): def min(x, axis=None, keepdims=False): """Minimum value in a tensor. - Arguments: + Args: x: A tensor or variable. axis: An integer, the axis to find minimum values. keepdims: A boolean, whether to keep the dimensions or not. @@ -2313,7 +2313,7 @@ def min(x, axis=None, keepdims=False): def sum(x, axis=None, keepdims=False): """Sum of the values in a tensor, alongside the specified axis. - Arguments: + Args: x: A tensor or variable. axis: An integer, the axis to sum over. keepdims: A boolean, whether to keep the dimensions or not. @@ -2333,7 +2333,7 @@ def sum(x, axis=None, keepdims=False): def prod(x, axis=None, keepdims=False): """Multiplies the values in a tensor, alongside the specified axis. - Arguments: + Args: x: A tensor or variable. axis: An integer, the axis to compute the product. keepdims: A boolean, whether to keep the dimensions or not. @@ -2353,7 +2353,7 @@ def prod(x, axis=None, keepdims=False): def cumsum(x, axis=0): """Cumulative sum of the values in a tensor, alongside the specified axis. - Arguments: + Args: x: A tensor or variable. axis: An integer, the axis to compute the sum. @@ -2369,7 +2369,7 @@ def cumsum(x, axis=0): def cumprod(x, axis=0): """Cumulative product of the values in a tensor, alongside the specified axis. - Arguments: + Args: x: A tensor or variable. axis: An integer, the axis to compute the product. @@ -2384,7 +2384,7 @@ def cumprod(x, axis=0): def var(x, axis=None, keepdims=False): """Variance of a tensor, alongside the specified axis. - Arguments: + Args: x: A tensor or variable. axis: An integer, the axis to compute the variance. keepdims: A boolean, whether to keep the dimensions or not. @@ -2408,7 +2408,7 @@ def std(x, axis=None, keepdims=False): It is an alias to `tf.math.reduce_std`. - Arguments: + Args: x: A tensor or variable. It should have numerical dtypes. Boolean type inputs will be converted to float. axis: An integer, the axis to compute the standard deviation. If `None` @@ -2434,7 +2434,7 @@ def std(x, axis=None, keepdims=False): def mean(x, axis=None, keepdims=False): """Mean of a tensor, alongside the specified axis. - Arguments: + Args: x: A tensor or variable. axis: A list of integer. Axes to compute the mean. keepdims: A boolean, whether to keep the dimensions or not. @@ -2456,7 +2456,7 @@ def mean(x, axis=None, keepdims=False): def any(x, axis=None, keepdims=False): """Bitwise reduction (logical OR). - Arguments: + Args: x: Tensor or variable. axis: axis along which to perform the reduction. keepdims: whether the drop or broadcast the reduction axes. @@ -2474,7 +2474,7 @@ def any(x, axis=None, keepdims=False): def all(x, axis=None, keepdims=False): """Bitwise reduction (logical AND). - Arguments: + Args: x: Tensor or variable. axis: axis along which to perform the reduction. keepdims: whether the drop or broadcast the reduction axes. @@ -2492,7 +2492,7 @@ def all(x, axis=None, keepdims=False): def argmax(x, axis=-1): """Returns the index of the maximum value along an axis. - Arguments: + Args: x: Tensor or variable. axis: axis along which to perform the reduction. @@ -2508,7 +2508,7 @@ def argmax(x, axis=-1): def argmin(x, axis=-1): """Returns the index of the minimum value along an axis. - Arguments: + Args: x: Tensor or variable. axis: axis along which to perform the reduction. @@ -2524,7 +2524,7 @@ def argmin(x, axis=-1): def square(x): """Element-wise square. - Arguments: + Args: x: Tensor or variable. Returns: @@ -2539,7 +2539,7 @@ def square(x): def abs(x): """Element-wise absolute value. - Arguments: + Args: x: Tensor or variable. Returns: @@ -2557,7 +2557,7 @@ def sqrt(x): This function clips negative tensor values to 0 before computing the square root. - Arguments: + Args: x: Tensor or variable. Returns: @@ -2574,7 +2574,7 @@ def sqrt(x): def exp(x): """Element-wise exponential. - Arguments: + Args: x: Tensor or variable. Returns: @@ -2589,7 +2589,7 @@ def exp(x): def log(x): """Element-wise log. - Arguments: + Args: x: Tensor or variable. Returns: @@ -2605,7 +2605,7 @@ def logsumexp(x, axis=None, keepdims=False): It avoids overflows caused by taking the exp of large inputs and underflows caused by taking the log of small inputs. - Arguments: + Args: x: A tensor or variable. axis: An integer, the axis to reduce over. keepdims: A boolean, whether to keep the dimensions or not. @@ -2627,7 +2627,7 @@ def round(x): In case of tie, the rounding mode used is "half to even". - Arguments: + Args: x: Tensor or variable. Returns: @@ -2642,7 +2642,7 @@ def round(x): def sign(x): """Element-wise sign. - Arguments: + Args: x: Tensor or variable. Returns: @@ -2657,7 +2657,7 @@ def sign(x): def pow(x, a): """Element-wise exponentiation. - Arguments: + Args: x: Tensor or variable. a: Python integer. @@ -2673,7 +2673,7 @@ def pow(x, a): def clip(x, min_value, max_value): """Element-wise value clipping. - Arguments: + Args: x: Tensor or variable. min_value: Python float, integer, or tensor. max_value: Python float, integer, or tensor. @@ -2698,7 +2698,7 @@ def clip(x, min_value, max_value): def equal(x, y): """Element-wise equality between two tensors. - Arguments: + Args: x: Tensor or variable. y: Tensor or variable. @@ -2714,7 +2714,7 @@ def equal(x, y): def not_equal(x, y): """Element-wise inequality between two tensors. - Arguments: + Args: x: Tensor or variable. y: Tensor or variable. @@ -2730,7 +2730,7 @@ def not_equal(x, y): def greater(x, y): """Element-wise truth value of (x > y). - Arguments: + Args: x: Tensor or variable. y: Tensor or variable. @@ -2746,7 +2746,7 @@ def greater(x, y): def greater_equal(x, y): """Element-wise truth value of (x >= y). - Arguments: + Args: x: Tensor or variable. y: Tensor or variable. @@ -2762,7 +2762,7 @@ def greater_equal(x, y): def less(x, y): """Element-wise truth value of (x < y). - Arguments: + Args: x: Tensor or variable. y: Tensor or variable. @@ -2778,7 +2778,7 @@ def less(x, y): def less_equal(x, y): """Element-wise truth value of (x <= y). - Arguments: + Args: x: Tensor or variable. y: Tensor or variable. @@ -2794,7 +2794,7 @@ def less_equal(x, y): def maximum(x, y): """Element-wise maximum of two tensors. - Arguments: + Args: x: Tensor or variable. y: Tensor or variable. @@ -2820,7 +2820,7 @@ def maximum(x, y): def minimum(x, y): """Element-wise minimum of two tensors. - Arguments: + Args: x: Tensor or variable. y: Tensor or variable. @@ -2836,7 +2836,7 @@ def minimum(x, y): def sin(x): """Computes sin of x element-wise. - Arguments: + Args: x: Tensor or variable. Returns: @@ -2851,7 +2851,7 @@ def sin(x): def cos(x): """Computes cos of x element-wise. - Arguments: + Args: x: Tensor or variable. Returns: @@ -2867,7 +2867,7 @@ def _regular_normalize_batch_in_training(x, epsilon=1e-3): """Non-fused version of `normalize_batch_in_training`. - Arguments: + Args: x: Input tensor or variable. gamma: Tensor by which to scale the input. beta: Tensor with which to center the input. @@ -2890,7 +2890,7 @@ def _broadcast_normalize_batch_in_training(x, epsilon=1e-3): """Non-fused, broadcast version of `normalize_batch_in_training`. - Arguments: + Args: x: Input tensor or variable. gamma: Tensor by which to scale the input. beta: Tensor with which to center the input. @@ -2933,7 +2933,7 @@ def _fused_normalize_batch_in_training(x, epsilon=1e-3): """Fused version of `normalize_batch_in_training`. - Arguments: + Args: x: Input tensor or variable. gamma: Tensor by which to scale the input. beta: Tensor with which to center the input. @@ -2967,7 +2967,7 @@ def _fused_normalize_batch_in_training(x, def normalize_batch_in_training(x, gamma, beta, reduction_axes, epsilon=1e-3): """Computes mean and std for batch then apply batch_normalization on batch. - Arguments: + Args: x: Input tensor or variable. gamma: Tensor by which to scale the input. beta: Tensor with which to center the input. @@ -3002,7 +3002,7 @@ def batch_normalization(x, mean, var, beta, gamma, axis=-1, epsilon=1e-3): I.e. returns: `output = (x - mean) / (sqrt(var) + epsilon) * gamma + beta` - Arguments: + Args: x: Input tensor or variable. mean: Mean of batch. var: Variance of batch. @@ -3063,7 +3063,7 @@ def batch_normalization(x, mean, var, beta, gamma, axis=-1, epsilon=1e-3): def concatenate(tensors, axis=-1): """Concatenates a list of tensors alongside the specified axis. - Arguments: + Args: tensors: list of tensors to concatenate. axis: concatenation axis. @@ -3102,7 +3102,7 @@ def concatenate(tensors, axis=-1): def reshape(x, shape): """Reshapes a tensor to the specified shape. - Arguments: + Args: x: Tensor or variable. shape: Target shape tuple. @@ -3133,7 +3133,7 @@ def reshape(x, shape): def permute_dimensions(x, pattern): """Permutes axes in a tensor. - Arguments: + Args: x: Tensor or variable. pattern: A tuple of dimension indices, e.g. `(0, 2, 1)`. @@ -3167,7 +3167,7 @@ def resize_images(x, height_factor, width_factor, data_format, interpolation='nearest'): """Resizes the images contained in a 4D tensor. - Arguments: + Args: x: Tensor or variable to resize. height_factor: Positive integer. width_factor: Positive integer. @@ -3231,7 +3231,7 @@ def resize_images(x, height_factor, width_factor, data_format, def resize_volumes(x, depth_factor, height_factor, width_factor, data_format): """Resizes the volume contained in a 5D tensor. - Arguments: + Args: x: Tensor or variable to resize. depth_factor: Positive integer. height_factor: Positive integer. @@ -3268,7 +3268,7 @@ def repeat_elements(x, rep, axis): If `x` has shape `(s1, s2, s3)` and `axis` is `1`, the output will have shape `(s1, s2 * rep, s3)`. - Arguments: + Args: x: Tensor or variable. rep: Python integer, number of times to repeat. axis: Axis along which to repeat. @@ -3331,7 +3331,7 @@ def repeat(x, n): if `x` has shape (samples, dim) and `n` is `2`, the output will have shape `(samples, 2, dim)`. - Arguments: + Args: x: Tensor or variable. n: Python integer, number of times to repeat. @@ -3372,7 +3372,7 @@ def arange(start, stop=None, step=1, dtype='int32'): The default type of the returned tensor is `'int32'` to match TensorFlow's default. - Arguments: + Args: start: Start value. stop: Stop value. step: Difference between two successive values. @@ -3405,7 +3405,7 @@ def arange(start, stop=None, step=1, dtype='int32'): def tile(x, n): """Creates a tensor by tiling `x` by `n`. - Arguments: + Args: x: A tensor or variable n: A list of integer. The length must be the same as the number of dimensions in `x`. @@ -3424,7 +3424,7 @@ def tile(x, n): def flatten(x): """Flatten a tensor. - Arguments: + Args: x: A tensor or variable. Returns: @@ -3453,7 +3453,7 @@ def batch_flatten(x): In other words, it flattens each data samples of a batch. - Arguments: + Args: x: A tensor or variable. Returns: @@ -3478,7 +3478,7 @@ def batch_flatten(x): def expand_dims(x, axis=-1): """Adds a 1-sized dimension at index "axis". - Arguments: + Args: x: A tensor or variable. axis: Position where to add a new axis. @@ -3494,7 +3494,7 @@ def expand_dims(x, axis=-1): def squeeze(x, axis): """Removes a 1-dimension from the tensor at index "axis". - Arguments: + Args: x: A tensor or variable. axis: Axis to drop. @@ -3510,7 +3510,7 @@ def squeeze(x, axis): def temporal_padding(x, padding=(1, 1)): """Pads the middle dimension of a 3D tensor. - Arguments: + Args: x: Tensor or variable. padding: Tuple of 2 integers, how many zeros to add at the start and end of dim 1. @@ -3529,7 +3529,7 @@ def temporal_padding(x, padding=(1, 1)): def spatial_2d_padding(x, padding=((1, 1), (1, 1)), data_format=None): """Pads the 2nd and 3rd dimensions of a 4D tensor. - Arguments: + Args: x: Tensor or variable. padding: Tuple of 2 tuples, padding pattern. data_format: One of `channels_last` or `channels_first`. @@ -3570,7 +3570,7 @@ def spatial_3d_padding(x, padding=((1, 1), (1, 1), (1, 1)), data_format=None): For 'channels_first' data_format, the 3rd, 4th and 5th dimension will be padded. - Arguments: + Args: x: Tensor or variable. padding: Tuple of 3 tuples, padding pattern. data_format: One of `channels_last` or `channels_first`. @@ -3608,7 +3608,7 @@ def spatial_3d_padding(x, padding=((1, 1), (1, 1), (1, 1)), data_format=None): def stack(x, axis=0): """Stacks a list of rank `R` tensors into a rank `R+1` tensor. - Arguments: + Args: x: List of tensors. axis: Axis along which to perform stacking. @@ -3636,7 +3636,7 @@ def stack(x, axis=0): def one_hot(indices, num_classes): """Computes the one-hot representation of an integer tensor. - Arguments: + Args: indices: nD integer tensor of shape `(batch_size, dim1, dim2, ... dim(n-1))` num_classes: Integer, number of classes to consider. @@ -3657,7 +3657,7 @@ def one_hot(indices, num_classes): def reverse(x, axes): """Reverse a tensor along the specified axes. - Arguments: + Args: x: Tensor to reverse. axes: Integer or iterable of integers. Axes to reverse. @@ -3710,7 +3710,7 @@ def get_value(x): {snippet} - Arguments: + Args: x: input variable. Returns: @@ -3740,7 +3740,7 @@ def get_value(x): def batch_get_value(tensors): """Returns the value of more than one tensor variable. - Arguments: + Args: tensors: list of ops to run. Returns: @@ -3770,7 +3770,7 @@ def set_value(x, value): {snippet} - Arguments: + Args: x: Variable to set to a new value. value: Value to set the tensor to, as a Numpy array (of the same shape). @@ -3804,7 +3804,7 @@ def set_value(x, value): def batch_set_value(tuples): """Sets the values of many tensor variables at once. - Arguments: + Args: tuples: a list of tuples `(tensor, value)`. `value` should be a Numpy array. """ @@ -3860,7 +3860,7 @@ def print_tensor(x, message=''): array([[1., 2.], [3., 4.]], dtype=float32)> - Arguments: + Args: x: Tensor to print. message: Message to print jointly with the tensor. @@ -3890,7 +3890,7 @@ class GraphExecutionFunction(object): we can modify the values in the dictionary. Through this feed_dict we can provide additional substitutions besides Keras inputs. - Arguments: + Args: inputs: Feed placeholders to the computation graph. outputs: Output tensors to fetch. updates: Additional update ops to be run at function call. @@ -3958,7 +3958,7 @@ class GraphExecutionFunction(object): def _make_callable(self, feed_arrays, feed_symbols, symbol_vals, session): """Generates a callable that runs the graph. - Arguments: + Args: feed_arrays: List of input tensors to be fed Numpy arrays at runtime. feed_symbols: List of input tensors to be fed symbolic tensors at runtime. symbol_vals: List of symbolic tensors to be fed to `feed_symbols`. @@ -4087,7 +4087,7 @@ def eval_in_eager_or_function(outputs): longer needed, after Keras switches to KerasTensors and op layers work via dispatch. - Arguments: + Args: outputs: tensors to fetch. Returns: The value of the tensors (as numpy arrays). @@ -4151,7 +4151,7 @@ def eval_in_eager_or_function(outputs): def function(inputs, outputs, updates=None, name=None, **kwargs): """Instantiates a Keras function. - Arguments: + Args: inputs: List of placeholder tensors. outputs: List of output tensors. updates: List of update ops. @@ -4199,7 +4199,7 @@ def function(inputs, outputs, updates=None, name=None, **kwargs): def gradients(loss, variables): """Returns the gradients of `loss` w.r.t. `variables`. - Arguments: + Args: loss: Scalar tensor to minimize. variables: List of variables. @@ -4216,7 +4216,7 @@ def gradients(loss, variables): def stop_gradient(variables): """Returns `variables` but with zero gradient w.r.t. every other variable. - Arguments: + Args: variables: Tensor or list of tensors to consider constant with respect to any other variable. @@ -4247,7 +4247,7 @@ def rnn(step_function, zero_output_for_mask=False): """Iterates over the time dimension of a tensor. - Arguments: + Args: step_function: RNN step function. Args; input; Tensor with shape `(samples, ...)` (no time dimension), @@ -4525,7 +4525,7 @@ def rnn(step_function, def _step(time, output_ta_t, prev_output, *states): """RNN step function. - Arguments: + Args: time: Current timestep value. output_ta_t: TensorArray. prev_output: tuple of outputs from time - 1. @@ -4573,7 +4573,7 @@ def rnn(step_function, def _step(time, output_ta_t, *states): """RNN step function. - Arguments: + Args: time: Current timestep value. output_ta_t: TensorArray. *states: List of states. @@ -4637,7 +4637,7 @@ def switch(condition, then_expression, else_expression): Note that both `then_expression` and `else_expression` should be symbolic tensors of the *same shape*. - Arguments: + Args: condition: tensor (`int` or `bool`). then_expression: either a tensor, or a callable that returns a tensor. else_expression: either a tensor, or a callable that returns a tensor. @@ -4701,7 +4701,7 @@ def in_train_phase(x, alt, training=None): Note that `alt` should have the *same shape* as `x`. - Arguments: + Args: x: What to return in train phase (tensor or callable that returns a tensor). alt: What to return otherwise @@ -4747,7 +4747,7 @@ def in_test_phase(x, alt, training=None): Note that `alt` should have the *same shape* as `x`. - Arguments: + Args: x: What to return in test phase (tensor or callable that returns a tensor). alt: What to return otherwise @@ -4778,7 +4778,7 @@ def relu(x, alpha=0., max_value=None, threshold=0): `f(x) = x` for `threshold <= x < max_value`, `f(x) = alpha * (x - threshold)` otherwise. - Arguments: + Args: x: A tensor or variable. alpha: A scalar, slope of negative section (default=`0.`). max_value: float. Saturation threshold. @@ -4829,7 +4829,7 @@ def relu(x, alpha=0., max_value=None, threshold=0): def elu(x, alpha=1.): """Exponential linear unit. - Arguments: + Args: x: A tensor or variable to compute the activation function for. alpha: A scalar, slope of negative section. @@ -4849,7 +4849,7 @@ def elu(x, alpha=1.): def softmax(x, axis=-1): """Softmax of a tensor. - Arguments: + Args: x: A tensor or variable. axis: The dimension softmax would be performed on. The default is -1 which indicates the last dimension. @@ -4866,7 +4866,7 @@ def softmax(x, axis=-1): def softplus(x): """Softplus of a tensor. - Arguments: + Args: x: A tensor or variable. Returns: @@ -4881,7 +4881,7 @@ def softplus(x): def softsign(x): """Softsign of a tensor. - Arguments: + Args: x: A tensor or variable. Returns: @@ -4896,7 +4896,7 @@ def softsign(x): def categorical_crossentropy(target, output, from_logits=False, axis=-1): """Categorical crossentropy between an output tensor and a target tensor. - Arguments: + Args: target: A tensor of the same shape as `output`. output: A tensor resulting from a softmax (unless `from_logits` is True, in which @@ -4979,7 +4979,7 @@ def categorical_crossentropy(target, output, from_logits=False, axis=-1): def sparse_categorical_crossentropy(target, output, from_logits=False, axis=-1): """Categorical crossentropy with integer targets. - Arguments: + Args: target: An integer tensor. output: A tensor resulting from a softmax (unless `from_logits` is True, in which @@ -5073,7 +5073,7 @@ def sparse_categorical_crossentropy(target, output, from_logits=False, axis=-1): def binary_crossentropy(target, output, from_logits=False): """Binary crossentropy between an output tensor and a target tensor. - Arguments: + Args: target: A tensor with the same shape as `output`. output: A tensor. from_logits: Whether `output` is expected to be a logits tensor. @@ -5124,7 +5124,7 @@ def binary_crossentropy(target, output, from_logits=False): def sigmoid(x): """Element-wise sigmoid. - Arguments: + Args: x: A tensor or variable. Returns: @@ -5143,7 +5143,7 @@ def hard_sigmoid(x): Returns `0.` if `x < -2.5`, `1.` if `x > 2.5`. In `-2.5 <= x <= 2.5`, returns `0.2 * x + 0.5`. - Arguments: + Args: x: A tensor or variable. Returns: @@ -5163,7 +5163,7 @@ def hard_sigmoid(x): def tanh(x): """Element-wise tanh. - Arguments: + Args: x: A tensor or variable. Returns: @@ -5178,7 +5178,7 @@ def tanh(x): def dropout(x, level, noise_shape=None, seed=None): """Sets entries in `x` to zero at random, while scaling the entire tensor. - Arguments: + Args: x: tensor level: fraction of the entries in the tensor that will be set to 0. @@ -5200,7 +5200,7 @@ def dropout(x, level, noise_shape=None, seed=None): def l2_normalize(x, axis=None): """Normalizes a tensor wrt the L2 norm alongside the specified axis. - Arguments: + Args: x: Tensor or variable. axis: axis along which to perform normalization. @@ -5216,7 +5216,7 @@ def l2_normalize(x, axis=None): def in_top_k(predictions, targets, k): """Returns whether the `targets` are in the top `k` `predictions`. - Arguments: + Args: predictions: A tensor of shape `(batch_size, classes)` and type `float32`. targets: A 1D tensor of length `batch_size` and type `int32` or `int64`. k: An `int`, number of top elements to consider. @@ -5235,7 +5235,7 @@ def in_top_k(predictions, targets, k): def _preprocess_conv1d_input(x, data_format): """Transpose and cast the input before the conv1d. - Arguments: + Args: x: input tensor. data_format: string, `"channels_last"` or `"channels_first"`. @@ -5254,7 +5254,7 @@ def _preprocess_conv1d_input(x, data_format): def _preprocess_conv2d_input(x, data_format, force_transpose=False): """Transpose and cast the input before the conv2d. - Arguments: + Args: x: input tensor. data_format: string, `"channels_last"` or `"channels_first"`. force_transpose: Boolean. If True, the input will always be transposed @@ -5277,7 +5277,7 @@ def _preprocess_conv2d_input(x, data_format, force_transpose=False): def _preprocess_conv3d_input(x, data_format): """Transpose and cast the input before the conv3d. - Arguments: + Args: x: input tensor. data_format: string, `"channels_last"` or `"channels_first"`. @@ -5296,7 +5296,7 @@ def _preprocess_conv3d_input(x, data_format): def _preprocess_padding(padding): """Convert keras' padding to TensorFlow's padding. - Arguments: + Args: padding: string, one of 'same' , 'valid' Returns: @@ -5325,7 +5325,7 @@ def conv1d(x, dilation_rate=1): """1D convolution. - Arguments: + Args: x: Tensor or variable. kernel: kernel tensor. strides: stride integer. @@ -5377,7 +5377,7 @@ def conv2d(x, dilation_rate=(1, 1)): """2D convolution. - Arguments: + Args: x: Tensor or variable. kernel: kernel tensor. strides: strides tuple. @@ -5425,7 +5425,7 @@ def conv2d_transpose(x, transposed convolution). - Arguments: + Args: x: Tensor or variable. kernel: kernel tensor. output_shape: 1D int tensor for the output shape. @@ -5495,7 +5495,7 @@ def separable_conv1d(x, dilation_rate=1): """1D convolution with separable filters. - Arguments: + Args: x: input tensor depthwise_kernel: convolution kernel for the depthwise convolution. pointwise_kernel: kernel for the 1x1 convolution. @@ -5565,7 +5565,7 @@ def separable_conv2d(x, dilation_rate=(1, 1)): """2D convolution with separable filters. - Arguments: + Args: x: input tensor depthwise_kernel: convolution kernel for the depthwise convolution. pointwise_kernel: kernel for the 1x1 convolution. @@ -5623,7 +5623,7 @@ def depthwise_conv2d(x, dilation_rate=(1, 1)): """2D convolution with separable filters. - Arguments: + Args: x: input tensor depthwise_kernel: convolution kernel for the depthwise convolution. strides: strides tuple (length 2). @@ -5674,7 +5674,7 @@ def conv3d(x, dilation_rate=(1, 1, 1)): """3D convolution. - Arguments: + Args: x: Tensor or variable. kernel: kernel tensor. strides: strides tuple. @@ -5718,7 +5718,7 @@ def conv3d_transpose(x, transposed convolution). - Arguments: + Args: x: input tensor. kernel: kernel tensor. output_shape: 1D int tensor for the output shape. @@ -5778,7 +5778,7 @@ def pool2d(x, pool_mode='max'): """2D Pooling. - Arguments: + Args: x: Tensor or variable. pool_size: tuple of 2 integers. strides: tuple of 2 integers. @@ -5839,7 +5839,7 @@ def pool3d(x, pool_mode='max'): """3D Pooling. - Arguments: + Args: x: Tensor or variable. pool_size: tuple of 3 integers. strides: tuple of 3 integers. @@ -5891,7 +5891,7 @@ def local_conv(inputs, data_format=None): """Apply N-D convolution with un-shared weights. - Arguments: + Args: inputs: (N+2)-D tensor with shape (batch_size, channels_in, d_in1, ..., d_inN) if data_format='channels_first', or @@ -5966,7 +5966,7 @@ def local_conv(inputs, def local_conv1d(inputs, kernel, kernel_size, strides, data_format=None): """Apply 1D conv with un-shared weights. - Arguments: + Args: inputs: 3D tensor with shape: (batch_size, steps, input_dim) if data_format is "channels_last" or @@ -6008,7 +6008,7 @@ def local_conv2d(inputs, data_format=None): """Apply 2D conv with un-shared weights. - Arguments: + Args: inputs: 4D tensor with shape: (batch_size, filters, new_rows, new_cols) if data_format='channels_first' @@ -6046,7 +6046,7 @@ def local_conv2d(inputs, def bias_add(x, bias, data_format=None): """Adds a bias vector to a tensor. - Arguments: + Args: x: Tensor or variable. bias: Bias tensor to add. data_format: string, `"channels_last"` or `"channels_first"`. @@ -6094,7 +6094,7 @@ def random_normal(shape, mean=0.0, stddev=1.0, dtype=None, seed=None): It is an alias to `tf.random.normal`. - Arguments: + Args: shape: A tuple of integers, the shape of tensor to create. mean: A float, the mean value of the normal distribution to draw samples. Default to 0.0. @@ -6130,7 +6130,7 @@ def random_normal(shape, mean=0.0, stddev=1.0, dtype=None, seed=None): def random_uniform(shape, minval=0.0, maxval=1.0, dtype=None, seed=None): """Returns a tensor with uniform distribution of values. - Arguments: + Args: shape: A tuple of integers, the shape of tensor to create. minval: A float, lower boundary of the uniform distribution to draw samples. @@ -6170,7 +6170,7 @@ def random_binomial(shape, p=0.0, dtype=None, seed=None): distribution of the number of successful Bernoulli process. Only supports `n` = 1 for now. - Arguments: + Args: shape: A tuple of integers, the shape of tensor to create. p: A float, `0. <= p <= 1`, probability of binomial distribution. dtype: String, dtype of returned tensor. @@ -6199,7 +6199,7 @@ def random_binomial(shape, p=0.0, dtype=None, seed=None): def random_bernoulli(shape, p=0.0, dtype=None, seed=None): """Returns a tensor with random bernoulli distribution of values. - Arguments: + Args: shape: A tuple of integers, the shape of tensor to create. p: A float, `0. <= p <= 1`, probability of bernoulli distribution. dtype: String, dtype of returned tensor. @@ -6228,7 +6228,7 @@ def truncated_normal(shape, mean=0.0, stddev=1.0, dtype=None, seed=None): except that values whose magnitude is more than two standard deviations from the mean are dropped and re-picked. - Arguments: + Args: shape: A tuple of integers, the shape of tensor to create. mean: Mean of the values. stddev: Standard deviation of the values. @@ -6259,7 +6259,7 @@ def truncated_normal(shape, mean=0.0, stddev=1.0, dtype=None, seed=None): def ctc_label_dense_to_sparse(labels, label_lengths): """Converts CTC labels from dense to sparse. - Arguments: + Args: labels: dense CTC labels. label_lengths: length of the labels. @@ -6307,7 +6307,7 @@ def ctc_label_dense_to_sparse(labels, label_lengths): def ctc_batch_cost(y_true, y_pred, input_length, label_length): """Runs CTC loss algorithm on each batch element. - Arguments: + Args: y_true: tensor `(samples, max_string_length)` containing the truth labels. y_pred: tensor `(samples, time_steps, num_categories)` @@ -6344,7 +6344,7 @@ def ctc_decode(y_pred, input_length, greedy=True, beam_width=100, top_paths=1): Can use either greedy search (also known as best path) or a constrained dictionary search. - Arguments: + Args: y_pred: tensor `(samples, time_steps, num_categories)` containing the prediction, or output of the softmax. input_length: tensor `(samples, )` containing the sequence length for @@ -6398,7 +6398,7 @@ def ctc_decode(y_pred, input_length, greedy=True, beam_width=100, top_paths=1): def map_fn(fn, elems, name=None, dtype=None): """Map the function fn over the elements elems and return the outputs. - Arguments: + Args: fn: Callable that will be called upon each element in elems elems: tensor name: A string name for the map node in the graph @@ -6415,7 +6415,7 @@ def map_fn(fn, elems, name=None, dtype=None): def foldl(fn, elems, initializer=None, name=None): """Reduce elems using fn to combine them from left to right. - Arguments: + Args: fn: Callable that will be called upon each element in elems and an accumulator, for instance `lambda acc, x: acc + x` elems: tensor @@ -6433,7 +6433,7 @@ def foldl(fn, elems, initializer=None, name=None): def foldr(fn, elems, initializer=None, name=None): """Reduce elems using fn to combine them from right to left. - Arguments: + Args: fn: Callable that will be called upon each element in elems and an accumulator, for instance `lambda acc, x: acc + x` elems: tensor diff --git a/tensorflow/python/keras/backend_config.py b/tensorflow/python/keras/backend_config.py index cd1f1e4b423..4c411256734 100644 --- a/tensorflow/python/keras/backend_config.py +++ b/tensorflow/python/keras/backend_config.py @@ -49,7 +49,7 @@ def epsilon(): def set_epsilon(value): """Sets the value of the fuzz factor used in numeric expressions. - Arguments: + Args: value: float. New value of epsilon. Example: @@ -91,7 +91,7 @@ def set_floatx(value): [mixed precision guide]( https://www.tensorflow.org/guide/keras/mixed_precision) for details. - Arguments: + Args: value: String; `'float16'`, `'float32'`, or `'float64'`. Example: @@ -130,7 +130,7 @@ def image_data_format(): def set_image_data_format(data_format): """Sets the value of the image data format convention. - Arguments: + Args: data_format: string. `'channels_first'` or `'channels_last'`. Example: diff --git a/tensorflow/python/keras/benchmarks/benchmark_util.py b/tensorflow/python/keras/benchmarks/benchmark_util.py index 0bbd7ba8947..969ad733a52 100644 --- a/tensorflow/python/keras/benchmarks/benchmark_util.py +++ b/tensorflow/python/keras/benchmarks/benchmark_util.py @@ -33,7 +33,7 @@ def get_benchmark_name(name): This is to generate the metadata of the benchmark test. - Arguments: + Args: name: A string, the benchmark name. Returns: @@ -47,7 +47,7 @@ def get_benchmark_name(name): def generate_benchmark_params_cpu_gpu(*params_list): """Extend the benchmark names with CPU and GPU suffix. - Arguments: + Args: *params_list: A list of tuples represents the benchmark parameters. Returns: @@ -109,7 +109,7 @@ def measure_performance(model_fn, distribution_strategy='off'): """Run models and measure the performance. - Arguments: + Args: model_fn: Model function to be benchmarked. x: Input data. See `x` in the `fit()` method of `keras.Model`. y: Target data. See `y` in the `fit()` method of `keras.Model`. diff --git a/tensorflow/python/keras/benchmarks/keras_examples_benchmarks/mnist_conv_custom_training_benchmark_test.py b/tensorflow/python/keras/benchmarks/keras_examples_benchmarks/mnist_conv_custom_training_benchmark_test.py index 7b28087c984..f1b431a35db 100644 --- a/tensorflow/python/keras/benchmarks/keras_examples_benchmarks/mnist_conv_custom_training_benchmark_test.py +++ b/tensorflow/python/keras/benchmarks/keras_examples_benchmarks/mnist_conv_custom_training_benchmark_test.py @@ -67,7 +67,7 @@ class CustomMnistBenchmark(tf.test.Benchmark): def train_step(self, inputs, model, loss_fn, optimizer, batch_size): """Compute loss and optimize model by optimizer. - Arguments: + Args: inputs: `tf.data`. model: See `model` in `train_function()` method. loss_fn: See `loss_fn` in `train_function()` method. @@ -90,7 +90,7 @@ class CustomMnistBenchmark(tf.test.Benchmark): batch_size, distribution_strategy): """Train step in distribution strategy setting. - Arguments: + Args: batch_dataset: `tf.data`. model: See `model` in `train_function()` method. loss_fn: See `loss_fn` in `train_function()` method. @@ -126,7 +126,7 @@ class CustomMnistBenchmark(tf.test.Benchmark): train_step_time. - Arguments: + Args: model: Model function to be benchmarked. train_dataset: `tf.data` dataset. Should return a tuple of either (inputs, targets) or (inputs, targets, sample_weights). @@ -181,7 +181,7 @@ class CustomMnistBenchmark(tf.test.Benchmark): distribution_strategy=None): """Run models and measure the performance. - Arguments: + Args: model_fn: Model function to be benchmarked. dataset: `tf.data` dataset. Should return a tuple of either (inputs, targets) or (inputs, targets, sample_weights). diff --git a/tensorflow/python/keras/benchmarks/optimizer_benchmarks_test.py b/tensorflow/python/keras/benchmarks/optimizer_benchmarks_test.py index a5ba7711bea..dc5f23aaa04 100644 --- a/tensorflow/python/keras/benchmarks/optimizer_benchmarks_test.py +++ b/tensorflow/python/keras/benchmarks/optimizer_benchmarks_test.py @@ -58,7 +58,7 @@ class KerasOptimizerBenchmark( def benchmark_optimizer(self, optimizer, num_iters): """Optimizer benchmark with Bidirectional LSTM model on IMDB data. - Arguments: + Args: optimizer: The optimizer instance to be benchmarked. num_iters: The number of iterations to run for performance measurement. """ diff --git a/tensorflow/python/keras/callbacks.py b/tensorflow/python/keras/callbacks.py index 96d903178ec..ae81157f00b 100644 --- a/tensorflow/python/keras/callbacks.py +++ b/tensorflow/python/keras/callbacks.py @@ -85,7 +85,7 @@ def configure_callbacks(callbacks, mode=ModeKeys.TRAIN): """Configures callbacks for use in various training loops. - Arguments: + Args: callbacks: List of Callbacks. model: Model being trained. do_validation: Whether or not validation loop will be run. @@ -146,7 +146,7 @@ def set_callback_parameters(callback_list, mode=ModeKeys.TRAIN): """Sets callback parameters. - Arguments: + Args: callback_list: CallbackList instance. model: Model being trained. do_validation: Whether or not validation loop will be run. @@ -216,7 +216,7 @@ class CallbackList(object): to call them all at once via a single endpoint (e.g. `callback_list.on_epoch_end(...)`). - Arguments: + Args: callbacks: List of `Callback` instances. add_history: Whether a `History` callback should be added, if one does not already exist in the `callbacks` list. @@ -397,7 +397,7 @@ class CallbackList(object): This function should only be called during TRAIN mode. - Arguments: + Args: epoch: Integer, index of epoch. logs: Dict. Currently no data is passed to this argument for this method but that may change in the future. @@ -417,7 +417,7 @@ class CallbackList(object): This function should only be called during TRAIN mode. - Arguments: + Args: epoch: Integer, index of epoch. logs: Dict, metric results for this training epoch, and for the validation epoch if validation is performed. Validation result keys @@ -436,7 +436,7 @@ class CallbackList(object): def on_train_batch_begin(self, batch, logs=None): """Calls the `on_train_batch_begin` methods of its callbacks. - Arguments: + Args: batch: Integer, index of batch within the current epoch. logs: Dict, contains the return value of `model.train_step`. Typically, the values of the `Model`'s metrics are returned. Example: @@ -448,7 +448,7 @@ class CallbackList(object): def on_train_batch_end(self, batch, logs=None): """Calls the `on_train_batch_end` methods of its callbacks. - Arguments: + Args: batch: Integer, index of batch within the current epoch. logs: Dict. Aggregated metric results up until this batch. """ @@ -458,7 +458,7 @@ class CallbackList(object): def on_test_batch_begin(self, batch, logs=None): """Calls the `on_test_batch_begin` methods of its callbacks. - Arguments: + Args: batch: Integer, index of batch within the current epoch. logs: Dict, contains the return value of `model.test_step`. Typically, the values of the `Model`'s metrics are returned. Example: @@ -470,7 +470,7 @@ class CallbackList(object): def on_test_batch_end(self, batch, logs=None): """Calls the `on_test_batch_end` methods of its callbacks. - Arguments: + Args: batch: Integer, index of batch within the current epoch. logs: Dict. Aggregated metric results up until this batch. """ @@ -480,7 +480,7 @@ class CallbackList(object): def on_predict_batch_begin(self, batch, logs=None): """Calls the `on_predict_batch_begin` methods of its callbacks. - Arguments: + Args: batch: Integer, index of batch within the current epoch. logs: Dict, contains the return value of `model.predict_step`, it typically returns a dict with a key 'outputs' containing @@ -492,7 +492,7 @@ class CallbackList(object): def on_predict_batch_end(self, batch, logs=None): """Calls the `on_predict_batch_end` methods of its callbacks. - Arguments: + Args: batch: Integer, index of batch within the current epoch. logs: Dict. Aggregated metric results up until this batch. """ @@ -502,7 +502,7 @@ class CallbackList(object): def on_train_begin(self, logs=None): """Calls the `on_train_begin` methods of its callbacks. - Arguments: + Args: logs: Dict. Currently no data is passed to this argument for this method but that may change in the future. """ @@ -519,7 +519,7 @@ class CallbackList(object): def on_train_end(self, logs=None): """Calls the `on_train_end` methods of its callbacks. - Arguments: + Args: logs: Dict. Currently no data is passed to this argument for this method but that may change in the future. """ @@ -536,7 +536,7 @@ class CallbackList(object): def on_test_begin(self, logs=None): """Calls the `on_test_begin` methods of its callbacks. - Arguments: + Args: logs: Dict. Currently no data is passed to this argument for this method but that may change in the future. """ @@ -553,7 +553,7 @@ class CallbackList(object): def on_test_end(self, logs=None): """Calls the `on_test_end` methods of its callbacks. - Arguments: + Args: logs: Dict. Currently no data is passed to this argument for this method but that may change in the future. """ @@ -570,7 +570,7 @@ class CallbackList(object): def on_predict_begin(self, logs=None): """Calls the 'on_predict_begin` methods of its callbacks. - Arguments: + Args: logs: Dict. Currently no data is passed to this argument for this method but that may change in the future. """ @@ -587,7 +587,7 @@ class CallbackList(object): def on_predict_end(self, logs=None): """Calls the `on_predict_end` methods of its callbacks. - Arguments: + Args: logs: Dict. Currently no data is passed to this argument for this method but that may change in the future. """ @@ -652,7 +652,7 @@ class Callback(object): Subclasses should override for any actions to run. This function should only be called during TRAIN mode. - Arguments: + Args: epoch: Integer, index of epoch. logs: Dict. Currently no data is passed to this argument for this method but that may change in the future. @@ -665,7 +665,7 @@ class Callback(object): Subclasses should override for any actions to run. This function should only be called during TRAIN mode. - Arguments: + Args: epoch: Integer, index of epoch. logs: Dict, metric results for this training epoch, and for the validation epoch if validation is performed. Validation result keys @@ -684,7 +684,7 @@ class Callback(object): `tf.keras.Model` is set to `N`, this method will only be called every `N` batches. - Arguments: + Args: batch: Integer, index of batch within the current epoch. logs: Dict, contains the return value of `model.train_step`. Typically, the values of the `Model`'s metrics are returned. Example: @@ -704,7 +704,7 @@ class Callback(object): `tf.keras.Model` is set to `N`, this method will only be called every `N` batches. - Arguments: + Args: batch: Integer, index of batch within the current epoch. logs: Dict. Aggregated metric results up until this batch. """ @@ -725,7 +725,7 @@ class Callback(object): `tf.keras.Model` is set to `N`, this method will only be called every `N` batches. - Arguments: + Args: batch: Integer, index of batch within the current epoch. logs: Dict, contains the return value of `model.test_step`. Typically, the values of the `Model`'s metrics are returned. Example: @@ -746,7 +746,7 @@ class Callback(object): `tf.keras.Model` is set to `N`, this method will only be called every `N` batches. - Arguments: + Args: batch: Integer, index of batch within the current epoch. logs: Dict. Aggregated metric results up until this batch. """ @@ -762,7 +762,7 @@ class Callback(object): `tf.keras.Model` is set to `N`, this method will only be called every `N` batches. - Arguments: + Args: batch: Integer, index of batch within the current epoch. logs: Dict, contains the return value of `model.predict_step`, it typically returns a dict with a key 'outputs' containing @@ -780,7 +780,7 @@ class Callback(object): `tf.keras.Model` is set to `N`, this method will only be called every `N` batches. - Arguments: + Args: batch: Integer, index of batch within the current epoch. logs: Dict. Aggregated metric results up until this batch. """ @@ -791,7 +791,7 @@ class Callback(object): Subclasses should override for any actions to run. - Arguments: + Args: logs: Dict. Currently no data is passed to this argument for this method but that may change in the future. """ @@ -802,7 +802,7 @@ class Callback(object): Subclasses should override for any actions to run. - Arguments: + Args: logs: Dict. Currently the output of the last call to `on_epoch_end()` is passed to this argument for this method but that may change in the future. @@ -814,7 +814,7 @@ class Callback(object): Subclasses should override for any actions to run. - Arguments: + Args: logs: Dict. Currently no data is passed to this argument for this method but that may change in the future. """ @@ -825,7 +825,7 @@ class Callback(object): Subclasses should override for any actions to run. - Arguments: + Args: logs: Dict. Currently the output of the last call to `on_test_batch_end()` is passed to this argument for this method but that may change in the future. @@ -837,7 +837,7 @@ class Callback(object): Subclasses should override for any actions to run. - Arguments: + Args: logs: Dict. Currently no data is passed to this argument for this method but that may change in the future. """ @@ -848,7 +848,7 @@ class Callback(object): Subclasses should override for any actions to run. - Arguments: + Args: logs: Dict. Currently no data is passed to this argument for this method but that may change in the future. """ @@ -877,7 +877,7 @@ class BaseLogger(Callback): This callback is automatically applied to every Keras model. - Arguments: + Args: stateful_metrics: Iterable of string names of metrics that should *not* be averaged over an epoch. Metrics in this list will be logged as-is in `on_epoch_end`. @@ -943,7 +943,7 @@ class TerminateOnNaN(Callback): class ProgbarLogger(Callback): """Callback that prints metrics to stdout. - Arguments: + Args: count_mode: One of `"steps"` or `"samples"`. Whether the progress bar should count samples seen or steps (batches) seen. @@ -1172,7 +1172,7 @@ class ModelCheckpoint(Callback): model.load_weights(checkpoint_filepath) ``` - Arguments: + Args: filepath: string or `PathLike`, path to save the model file. e.g. filepath = os.path.join(working_dir, 'ckpt', file_name). `filepath` can contain named formatting options, which will be filled the value of @@ -1367,7 +1367,7 @@ class ModelCheckpoint(Callback): def _save_model(self, epoch, logs): """Saves the model. - Arguments: + Args: epoch: the epoch this iteration is in. logs: the `logs` dict passed in to `on_batch_end` or `on_epoch_end`. """ @@ -1488,7 +1488,7 @@ class ModelCheckpoint(Callback): file_paths[-1]) ``` - Arguments: + Args: pattern: The file pattern that may optionally contain python placeholder such as `{epoch:02d}`. @@ -1592,7 +1592,7 @@ class BackupAndRestore(Callback): >>> len(history.history['loss']) 6 - Arguments: + Args: backup_dir: String, path to store the checkpoint. e.g. backup_dir = os.path.join(working_dir, 'backup') This is the directory in which the system stores temporary files to @@ -1675,7 +1675,7 @@ class EarlyStopping(Callback): The quantity to be monitored needs to be available in `logs` dict. To make it so, pass the loss or metrics at `model.compile()`. - Arguments: + Args: monitor: Quantity to be monitored. min_delta: Minimum change in the monitored quantity to qualify as an improvement, i.e. an absolute @@ -1808,7 +1808,7 @@ class RemoteMonitor(Callback): `"application/json"`. Otherwise the serialized JSON will be sent within a form. - Arguments: + Args: root: String; root url of the target server. path: String; path relative to `root` to which the events will be sent. field: String; JSON field under which the data will be stored. @@ -1868,7 +1868,7 @@ class LearningRateScheduler(Callback): and current learning rate, and applies the updated learning rate on the optimizer. - Arguments: + Args: schedule: a function that takes an epoch index (integer, indexed from 0) and current learning rate (float) as inputs and returns a new learning rate as output (float). @@ -1994,7 +1994,7 @@ class TensorBoard(Callback, version_utils.TensorBoardVersionSelector): You can find more information about TensorBoard [here](https://www.tensorflow.org/get_started/summaries_and_tensorboard). - Arguments: + Args: log_dir: the path of the directory where to save the log files to be parsed by TensorBoard. e.g. log_dir = os.path.join(working_dir, 'logs') This directory should not be reused by any other callbacks. @@ -2300,7 +2300,7 @@ class TensorBoard(Callback, version_utils.TensorBoardVersionSelector): def _init_profile_batch(self, profile_batch): """Validate profile_batch value and set the range of batches to profile. - Arguments: + Args: profile_batch: The range of batches to profile. Should be a non-negative integer or a comma separated string of pair of positive integers. A pair of positive integers signify a range of batches to profile. @@ -2441,7 +2441,7 @@ class TensorBoard(Callback, version_utils.TensorBoardVersionSelector): def _log_epoch_metrics(self, epoch, logs): """Writes epoch metrics out as scalar summaries. - Arguments: + Args: epoch: Int. The global step to use for TensorBoard. logs: Dict. Keys are scalar summary names, values are scalars. """ @@ -2524,7 +2524,7 @@ class ReduceLROnPlateau(Callback): model.fit(X_train, Y_train, callbacks=[reduce_lr]) ``` - Arguments: + Args: monitor: quantity to be monitored. factor: factor by which the learning rate will be reduced. `new_lr = lr * factor`. @@ -2645,7 +2645,7 @@ class CSVLogger(Callback): model.fit(X_train, Y_train, callbacks=[csv_logger]) ``` - Arguments: + Args: filename: Filename of the CSV file, e.g. `'run/log.csv'`. separator: String used to separate elements in the CSV file. append: Boolean. True: append if file exists (useful for continuing @@ -2737,7 +2737,7 @@ class LambdaCallback(Callback): - `on_train_begin` and `on_train_end` expect one positional argument: `logs` - Arguments: + Args: on_epoch_begin: called at the beginning of every epoch. on_epoch_end: called at the end of every epoch. on_batch_begin: called at the beginning of every batch. diff --git a/tensorflow/python/keras/callbacks_v1.py b/tensorflow/python/keras/callbacks_v1.py index 79a8e9d2efc..f38bee13c5d 100644 --- a/tensorflow/python/keras/callbacks_v1.py +++ b/tensorflow/python/keras/callbacks_v1.py @@ -61,7 +61,7 @@ class TensorBoard(callbacks.TensorBoard): You can find more information about TensorBoard [here](https://www.tensorflow.org/get_started/summaries_and_tensorboard). - Arguments: + Args: log_dir: the path of the directory where to save the log files to be parsed by TensorBoard. histogram_freq: frequency (in epochs) at which to compute activation and @@ -318,7 +318,7 @@ class TensorBoard(callbacks.TensorBoard): def _write_custom_summaries(self, step, logs=None): """Writes metrics out as custom scalar summaries. - Arguments: + Args: step: the global step to use for TensorBoard. logs: dict. Keys are scalar summary names, values are NumPy scalars. diff --git a/tensorflow/python/keras/constraints.py b/tensorflow/python/keras/constraints.py index 7cdc00151a6..8f42d9f937f 100644 --- a/tensorflow/python/keras/constraints.py +++ b/tensorflow/python/keras/constraints.py @@ -51,7 +51,7 @@ class MaxNorm(Constraint): Also available via the shortcut function `tf.keras.constraints.max_norm`. - Arguments: + Args: max_value: the maximum norm value for the incoming weights. axis: integer, axis along which to calculate weight norms. For instance, in a `Dense` layer the weight matrix @@ -100,7 +100,7 @@ class UnitNorm(Constraint): Also available via the shortcut function `tf.keras.constraints.unit_norm`. - Arguments: + Args: axis: integer, axis along which to calculate weight norms. For instance, in a `Dense` layer the weight matrix has shape `(input_dim, output_dim)`, @@ -138,7 +138,7 @@ class MinMaxNorm(Constraint): Also available via the shortcut function `tf.keras.constraints.min_max_norm`. - Arguments: + Args: min_value: the minimum norm for the incoming weights. max_value: the maximum norm for the incoming weights. rate: rate for enforcing the constraint: weights will be diff --git a/tensorflow/python/keras/datasets/boston_housing.py b/tensorflow/python/keras/datasets/boston_housing.py index 8886634a4b7..43c9bc22d47 100644 --- a/tensorflow/python/keras/datasets/boston_housing.py +++ b/tensorflow/python/keras/datasets/boston_housing.py @@ -38,7 +38,7 @@ def load_data(path='boston_housing.npz', test_split=0.2, seed=113): The attributes themselves are defined in the [StatLib website](http://lib.stat.cmu.edu/datasets/boston). - Arguments: + Args: path: path where to cache the dataset locally (relative to `~/.keras/datasets`). test_split: fraction of the data to reserve as test set. diff --git a/tensorflow/python/keras/datasets/cifar.py b/tensorflow/python/keras/datasets/cifar.py index 02344897f77..c769c594420 100644 --- a/tensorflow/python/keras/datasets/cifar.py +++ b/tensorflow/python/keras/datasets/cifar.py @@ -26,7 +26,7 @@ from six.moves import cPickle def load_batch(fpath, label_key='labels'): """Internal utility for parsing CIFAR data. - Arguments: + Args: fpath: path the file to parse. label_key: key for label data in the retrieve dictionary. diff --git a/tensorflow/python/keras/datasets/cifar100.py b/tensorflow/python/keras/datasets/cifar100.py index 5596f6ebb9b..c83d427948d 100644 --- a/tensorflow/python/keras/datasets/cifar100.py +++ b/tensorflow/python/keras/datasets/cifar100.py @@ -37,7 +37,7 @@ def load_data(label_mode='fine'): grouped into 20 coarse-grained classes. See more info at the [CIFAR homepage](https://www.cs.toronto.edu/~kriz/cifar.html). - Arguments: + Args: label_mode: one of "fine", "coarse". If it is "fine" the category labels are the fine-grained labels, if it is "coarse" the output labels are the coarse-grained superclasses. diff --git a/tensorflow/python/keras/datasets/imdb.py b/tensorflow/python/keras/datasets/imdb.py index e359d691a5d..44aafdeb40c 100644 --- a/tensorflow/python/keras/datasets/imdb.py +++ b/tensorflow/python/keras/datasets/imdb.py @@ -52,7 +52,7 @@ def load_data(path='imdb.npz', As a convention, "0" does not stand for a specific word, but instead is used to encode any unknown word. - Arguments: + Args: path: where to cache the data (relative to `~/.keras/dataset`). num_words: integer or None. Words are ranked by how often they occur (in the training set) and only @@ -166,7 +166,7 @@ def load_data(path='imdb.npz', def get_word_index(path='imdb_word_index.json'): """Retrieves a dict mapping words to their index in the IMDB dataset. - Arguments: + Args: path: where to cache the data (relative to `~/.keras/dataset`). Returns: diff --git a/tensorflow/python/keras/datasets/mnist.py b/tensorflow/python/keras/datasets/mnist.py index f371ad4ece5..1e7bfc08c1a 100644 --- a/tensorflow/python/keras/datasets/mnist.py +++ b/tensorflow/python/keras/datasets/mnist.py @@ -34,7 +34,7 @@ def load_data(path='mnist.npz'): [MNIST homepage](http://yann.lecun.com/exdb/mnist/). - Arguments: + Args: path: path where to cache the dataset locally (relative to `~/.keras/datasets`). diff --git a/tensorflow/python/keras/datasets/reuters.py b/tensorflow/python/keras/datasets/reuters.py index b71440fd632..a1319a91450 100644 --- a/tensorflow/python/keras/datasets/reuters.py +++ b/tensorflow/python/keras/datasets/reuters.py @@ -60,7 +60,7 @@ def load_data(path='reuters.npz', to encode any unknown word. - Arguments: + Args: path: where to cache the data (relative to `~/.keras/dataset`). num_words: integer or None. Words are ranked by how often they occur (in the training set) and only @@ -155,7 +155,7 @@ def load_data(path='reuters.npz', def get_word_index(path='reuters_word_index.json'): """Retrieves a dict mapping words to their index in the Reuters dataset. - Arguments: + Args: path: where to cache the data (relative to `~/.keras/dataset`). Returns: diff --git a/tensorflow/python/keras/distribute/distributed_training_utils.py b/tensorflow/python/keras/distribute/distributed_training_utils.py index 9f707f57668..8f4b62b9e54 100644 --- a/tensorflow/python/keras/distribute/distributed_training_utils.py +++ b/tensorflow/python/keras/distribute/distributed_training_utils.py @@ -41,7 +41,7 @@ def call_replica_local_fn(fn, *args, **kwargs): This function correctly handles calling `fn` in a cross-replica context. - Arguments: + Args: fn: The function to call. *args: Positional arguments to the `fn`. **kwargs: Keyword argument to `fn`. diff --git a/tensorflow/python/keras/distribute/distributed_training_utils_v1.py b/tensorflow/python/keras/distribute/distributed_training_utils_v1.py index c631ae07b19..68a999e2afe 100644 --- a/tensorflow/python/keras/distribute/distributed_training_utils_v1.py +++ b/tensorflow/python/keras/distribute/distributed_training_utils_v1.py @@ -611,7 +611,7 @@ def _get_input_from_iterator(iterator, model): def _prepare_feed_values(model, inputs, targets, sample_weights, mode): """Prepare feed values to the model execution function. - Arguments: + Args: model: Model to prepare feed values for. inputs: List or dict of model inputs. targets: Optional list of model targets. @@ -1097,7 +1097,7 @@ def is_current_worker_chief(): def filter_distributed_callbacks(callbacks_list, model): """Filter Callbacks based on the worker context when running multi-worker. - Arguments: + Args: callbacks_list: A list of `Callback` instances. model: Keras model instance. diff --git a/tensorflow/python/keras/distribute/keras_correctness_test_base.py b/tensorflow/python/keras/distribute/keras_correctness_test_base.py index 37a63a5774b..a11216cf3ad 100644 --- a/tensorflow/python/keras/distribute/keras_correctness_test_base.py +++ b/tensorflow/python/keras/distribute/keras_correctness_test_base.py @@ -415,7 +415,7 @@ class TestDistributionStrategyCorrectnessBase(test.TestCase, We only provide a default implementation of this method here. If you need more customized way of providing input to your model, overwrite this method. - Arguments: + Args: **kwargs: key word arguments about how to create the input dictionaries Returns: @@ -522,7 +522,7 @@ class TestDistributionStrategyCorrectnessBase(test.TestCase, We only provide a default implementation of this method here. If you need more customized way of providing input to your model, overwrite this method. - Arguments: + Args: **kwargs: key word arguments about how to create the input dictionaries Returns: diff --git a/tensorflow/python/keras/distribute/worker_training_state.py b/tensorflow/python/keras/distribute/worker_training_state.py index 41377a27936..6f86616651d 100644 --- a/tensorflow/python/keras/distribute/worker_training_state.py +++ b/tensorflow/python/keras/distribute/worker_training_state.py @@ -88,7 +88,7 @@ class WorkerTrainingState(object): def back_up(self, epoch): """Back up the current state of training into a checkpoint file. - Arguments: + Args: epoch: The current epoch information to be saved. """ K.set_value(self._ckpt_saved_epoch, epoch) @@ -125,7 +125,7 @@ class WorkerTrainingState(object): infer `initial_epoch` from `self._ckpt_saved_epoch` to continue previous unfinished training from certain epoch. - Arguments: + Args: initial_epoch: The original initial_epoch user passes in in `fit()`. mode: The mode for running `model.fit()`. diff --git a/tensorflow/python/keras/engine/base_layer.py b/tensorflow/python/keras/engine/base_layer.py index 894751c92ff..ce27810cdc1 100644 --- a/tensorflow/python/keras/engine/base_layer.py +++ b/tensorflow/python/keras/engine/base_layer.py @@ -122,7 +122,7 @@ class Layer(module.Module, version_utils.LayerVersionSelector): Users will just instantiate a layer and then treat it as a callable. - Arguments: + Args: trainable: Boolean, whether the layer's variables should be trainable. name: String name of the layer. dtype: The dtype of the layer's computations and weights. Can also be a @@ -459,7 +459,7 @@ class Layer(module.Module, version_utils.LayerVersionSelector): This is typically used to create the weights of `Layer` subclasses. - Arguments: + Args: input_shape: Instance of `TensorShape`, or list of instances of `TensorShape` if the layer expects a list of inputs (one instance per input). @@ -478,7 +478,7 @@ class Layer(module.Module, version_utils.LayerVersionSelector): layers as additional arguments. Whereas `tf.keras` has `compute_mask()` method to support masking. - Arguments: + Args: inputs: Input tensor, or list/tuple of input tensors. **kwargs: Additional keyword arguments. Currently unused. @@ -491,7 +491,7 @@ class Layer(module.Module, version_utils.LayerVersionSelector): def _add_trackable(self, trackable_object, trainable): """Adds a Trackable object to this layer's state. - Arguments: + Args: trackable_object: The tf.tracking.Trackable object to add. trainable: Boolean, whether the variable should be part of the layer's "trainable_variables" (e.g. variables, biases) or @@ -522,7 +522,7 @@ class Layer(module.Module, version_utils.LayerVersionSelector): **kwargs): """Adds a new variable to the layer. - Arguments: + Args: name: Variable name. shape: Variable shape. Defaults to scalar if unspecified. dtype: The type of the variable. Defaults to `self.dtype`. @@ -717,7 +717,7 @@ class Layer(module.Module, version_utils.LayerVersionSelector): dictionary. It does not handle layer connectivity (handled by Network), nor weights (handled by `set_weights`). - Arguments: + Args: config: A Python dictionary, typically the output of get_config. @@ -733,7 +733,7 @@ class Layer(module.Module, version_utils.LayerVersionSelector): layer. This assumes that the layer will later be used with inputs that match the input shape provided here. - Arguments: + Args: input_shape: Shape tuple (tuple of integers) or list of shape tuples (one per output tensor of the layer). Shape tuples can include None for free dimensions, @@ -886,7 +886,7 @@ class Layer(module.Module, version_utils.LayerVersionSelector): def compute_mask(self, inputs, mask=None): # pylint: disable=unused-argument """Computes an output mask tensor. - Arguments: + Args: inputs: Tensor or list of tensors. mask: Tensor or list of tensors. @@ -907,7 +907,7 @@ class Layer(module.Module, version_utils.LayerVersionSelector): def __call__(self, *args, **kwargs): """Wraps `call`, applying pre- and post-processing steps. - Arguments: + Args: *args: Positional arguments to be passed to `self.call`. **kwargs: Keyword arguments to be passed to `self.call`. @@ -1531,7 +1531,7 @@ class Layer(module.Module, version_utils.LayerVersionSelector): model.add_loss(lambda: tf.reduce_mean(d.kernel)) ``` - Arguments: + Args: losses: Loss tensor, or list/tuple of tensors. Rather than tensors, losses may also be zero-argument callables which create a loss tensor. **kwargs: Additional keyword arguments for backward compatibility. @@ -1773,7 +1773,7 @@ class Layer(module.Module, version_utils.LayerVersionSelector): updates are run on the fly and thus do not need to be tracked for later execution). - Arguments: + Args: updates: Update op, or list/tuple of update ops, or zero-arg callable that returns an update op. A zero-arg callable should be passed in order to disable running the updates by setting `trainable=False` @@ -1828,7 +1828,7 @@ class Layer(module.Module, version_utils.LayerVersionSelector): [1.], [1.]], dtype=float32), array([0.], dtype=float32)] - Arguments: + Args: weights: a list of Numpy arrays. The number of arrays and their shape must match number of the dimensions of the weights @@ -1925,7 +1925,7 @@ class Layer(module.Module, version_utils.LayerVersionSelector): Retrieves updates relevant to a specific set of inputs. - Arguments: + Args: inputs: Input tensor or list/tuple of input tensors. Returns: @@ -1942,7 +1942,7 @@ class Layer(module.Module, version_utils.LayerVersionSelector): Retrieves losses relevant to a specific set of inputs. - Arguments: + Args: inputs: Input tensor or list/tuple of input tensors. Returns: @@ -1957,7 +1957,7 @@ class Layer(module.Module, version_utils.LayerVersionSelector): def get_input_mask_at(self, node_index): """Retrieves the input mask tensor(s) of a layer at a given node. - Arguments: + Args: node_index: Integer, index of the node from which to retrieve the attribute. E.g. `node_index=0` will correspond to the @@ -1977,7 +1977,7 @@ class Layer(module.Module, version_utils.LayerVersionSelector): def get_output_mask_at(self, node_index): """Retrieves the output mask tensor(s) of a layer at a given node. - Arguments: + Args: node_index: Integer, index of the node from which to retrieve the attribute. E.g. `node_index=0` will correspond to the @@ -2041,7 +2041,7 @@ class Layer(module.Module, version_utils.LayerVersionSelector): def get_input_shape_at(self, node_index): """Retrieves the input shape(s) of a layer at a given node. - Arguments: + Args: node_index: Integer, index of the node from which to retrieve the attribute. E.g. `node_index=0` will correspond to the @@ -2061,7 +2061,7 @@ class Layer(module.Module, version_utils.LayerVersionSelector): def get_output_shape_at(self, node_index): """Retrieves the output shape(s) of a layer at a given node. - Arguments: + Args: node_index: Integer, index of the node from which to retrieve the attribute. E.g. `node_index=0` will correspond to the @@ -2081,7 +2081,7 @@ class Layer(module.Module, version_utils.LayerVersionSelector): def get_input_at(self, node_index): """Retrieves the input tensor(s) of a layer at a given node. - Arguments: + Args: node_index: Integer, index of the node from which to retrieve the attribute. E.g. `node_index=0` will correspond to the @@ -2100,7 +2100,7 @@ class Layer(module.Module, version_utils.LayerVersionSelector): def get_output_at(self, node_index): """Retrieves the output tensor(s) of a layer at a given node. - Arguments: + Args: node_index: Integer, index of the node from which to retrieve the attribute. E.g. `node_index=0` will correspond to the @@ -2261,7 +2261,7 @@ class Layer(module.Module, version_utils.LayerVersionSelector): This is an alias of `self.__call__`. - Arguments: + Args: inputs: Input tensor(s). *args: additional positional arguments to be passed to `self.call`. **kwargs: additional keyword arguments to be passed to `self.call`. @@ -2650,7 +2650,7 @@ class Layer(module.Module, version_utils.LayerVersionSelector): - get_input_at etc... - Arguments: + Args: node_index: Integer index of the node from which to retrieve the attribute. attr: Exact node attribute name. @@ -2916,7 +2916,7 @@ class Layer(module.Module, version_utils.LayerVersionSelector): def _flatten_modules(self, recursive=True, include_self=True): """Flattens `tf.Module` instances (excluding `Metrics`). - Arguments: + Args: recursive: Whether to recursively flatten through submodules. include_self: Whether to include this `Layer` instance. diff --git a/tensorflow/python/keras/engine/base_layer_utils.py b/tensorflow/python/keras/engine/base_layer_utils.py index 98aee7568ca..525d6c7e92e 100644 --- a/tensorflow/python/keras/engine/base_layer_utils.py +++ b/tensorflow/python/keras/engine/base_layer_utils.py @@ -76,7 +76,7 @@ def make_variable(name, TODO(fchollet): remove this method when no longer needed. - Arguments: + Args: name: Variable name. shape: Variable shape. dtype: The type of the variable. Defaults to `self.dtype` or `float32`. @@ -145,7 +145,7 @@ def make_variable(name, def collect_previous_mask(input_tensors): """Retrieves the output mask(s) of the previous node. - Arguments: + Args: input_tensors: An arbitrary structure of Tensors. Returns: @@ -177,7 +177,7 @@ def create_keras_history(tensors): Any Tensors not originating from a Keras `Input` Layer will be treated as constants when constructing `TensorFlowOpLayer` instances. - Arguments: + Args: tensors: A structure of Tensors, some of which come from raw TensorFlow operations and need to have Keras metadata assigned to them. @@ -205,7 +205,7 @@ _UNSAFE_GRAPH_OP_LAYER_CREATION = False def _create_keras_history_helper(tensors, processed_ops, created_layers): """Helper method for `create_keras_history`. - Arguments: + Args: tensors: A structure of Tensors for which to create Keras metadata. processed_ops: Set. TensorFlow operations that have already been wrapped in `TensorFlowOpLayer` instances. @@ -312,7 +312,7 @@ def needs_keras_history(tensors, ignore_call_context=False): if one or more of `tensors` originates from a `keras.Input` and does not have `_keras_history` set. - Arguments: + Args: tensors: An arbitrary nested structure of Tensors. ignore_call_context: Whether to ignore the check of if currently outside of a `call` context. This is `True` when creating @@ -370,7 +370,7 @@ def uses_keras_history(tensors): already been checked to not originate from a `keras.Input` are marked as `_keras_history_checked`. - Arguments: + Args: tensors: An arbitrary nested structure of Tensors. Returns: @@ -412,7 +412,7 @@ def mark_checked(tensors): This prevents Layers from attempting to create TensorFlowOpLayers for these Tensors. - Arguments: + Args: tensors: An arbitrary structure of Tensors. """ @@ -469,7 +469,7 @@ class CallContext(object): def enter(self, layer, inputs, build_graph, training, saving=None): """Push a Layer and its inputs and state onto the current call context. - Arguments: + Args: layer: The `Layer` whose `call` is currently active. inputs: The inputs to the currently active `Layer`. build_graph: Whether currently inside a Graph or FuncGraph. @@ -584,7 +584,7 @@ def check_graph_consistency(tensor=None, method='add_loss', force_raise=False): the underlying tensor gets created in a FuncGraph managed by control_flow_v2. We need to raise clear error messages in such cases. - Arguments: + Args: tensor: Tensor to check, or `False` if it is known that an error should be raised. method: Caller method, one of {'add_metric', 'add_loss', 'add_update'}. diff --git a/tensorflow/python/keras/engine/base_layer_v1.py b/tensorflow/python/keras/engine/base_layer_v1.py index 217d04a9651..39cdfbb8f7f 100644 --- a/tensorflow/python/keras/engine/base_layer_v1.py +++ b/tensorflow/python/keras/engine/base_layer_v1.py @@ -96,7 +96,7 @@ class Layer(base_layer.Layer): once. Should actually perform the logic of applying the layer to the input tensors (which should be passed in as the first argument). - Arguments: + Args: trainable: Boolean, whether the layer's variables should be trainable. name: String name of the layer. dtype: The dtype of the layer's computations and weights (default of @@ -274,7 +274,7 @@ class Layer(base_layer.Layer): This is typically used to create the weights of `Layer` subclasses. - Arguments: + Args: input_shape: Instance of `TensorShape`, or list of instances of `TensorShape` if the layer expects a list of inputs (one instance per input). @@ -287,7 +287,7 @@ class Layer(base_layer.Layer): def call(self, inputs, **kwargs): # pylint: disable=unused-argument """This is where the layer's logic lives. - Arguments: + Args: inputs: Input tensor, or list/tuple of input tensors. **kwargs: Additional keyword arguments. @@ -300,7 +300,7 @@ class Layer(base_layer.Layer): def _add_trackable(self, trackable_object, trainable): """Adds a Trackable object to this layer's state. - Arguments: + Args: trackable_object: The tf.tracking.Trackable object to add. trainable: Boolean, whether the variable should be part of the layer's "trainable_variables" (e.g. variables, biases) or @@ -332,7 +332,7 @@ class Layer(base_layer.Layer): **kwargs): """Adds a new variable to the layer. - Arguments: + Args: name: Variable name. shape: Variable shape. Defaults to scalar if unspecified. dtype: The type of the variable. Defaults to `self.dtype` or `float32`. @@ -524,7 +524,7 @@ class Layer(base_layer.Layer): dictionary. It does not handle layer connectivity (handled by Network), nor weights (handled by `set_weights`). - Arguments: + Args: config: A Python dictionary, typically the output of get_config. @@ -540,7 +540,7 @@ class Layer(base_layer.Layer): layer. This assumes that the layer will later be used with inputs that match the input shape provided here. - Arguments: + Args: input_shape: Shape tuple (tuple of integers) or list of shape tuples (one per output tensor of the layer). Shape tuples can include None for free dimensions, @@ -619,7 +619,7 @@ class Layer(base_layer.Layer): def compute_mask(self, inputs, mask=None): # pylint: disable=unused-argument """Computes an output mask tensor. - Arguments: + Args: inputs: Tensor or list of tensors. mask: Tensor or list of tensors. @@ -640,7 +640,7 @@ class Layer(base_layer.Layer): def __call__(self, *args, **kwargs): """Wraps `call`, applying pre- and post-processing steps. - Arguments: + Args: *args: Positional arguments to be passed to `self.call`. **kwargs: Keyword arguments to be passed to `self.call`. @@ -1015,7 +1015,7 @@ class Layer(base_layer.Layer): The `get_losses_for` method allows to retrieve the losses relevant to a specific set of inputs. - Arguments: + Args: losses: Loss tensor, or list/tuple of tensors. Rather than tensors, losses may also be zero-argument callables which create a loss tensor. inputs: Ignored when executing eagerly. If anything other than None is @@ -1168,7 +1168,7 @@ class Layer(base_layer.Layer): updates are run on the fly and thus do not need to be tracked for later execution). - Arguments: + Args: updates: Update op, or list/tuple of update ops, or zero-arg callable that returns an update op. A zero-arg callable should be passed in order to disable running the updates by setting `trainable=False` @@ -1200,7 +1200,7 @@ class Layer(base_layer.Layer): def process_update(x): """Standardize update ops. - Arguments: + Args: x: Tensor, op, or callable. Returns: @@ -1256,7 +1256,7 @@ class Layer(base_layer.Layer): [1.], [1.]], dtype=float32), array([0.], dtype=float32)] - Arguments: + Args: weights: a list of Numpy arrays. The number of arrays and their shape must match number of the dimensions of the weights @@ -1350,7 +1350,7 @@ class Layer(base_layer.Layer): def get_updates_for(self, inputs): """Retrieves updates relevant to a specific set of inputs. - Arguments: + Args: inputs: Input tensor or list/tuple of input tensors. Returns: @@ -1369,7 +1369,7 @@ class Layer(base_layer.Layer): def get_losses_for(self, inputs): """Retrieves losses relevant to a specific set of inputs. - Arguments: + Args: inputs: Input tensor or list/tuple of input tensors. Returns: @@ -1388,7 +1388,7 @@ class Layer(base_layer.Layer): def get_input_mask_at(self, node_index): """Retrieves the input mask tensor(s) of a layer at a given node. - Arguments: + Args: node_index: Integer, index of the node from which to retrieve the attribute. E.g. `node_index=0` will correspond to the @@ -1407,7 +1407,7 @@ class Layer(base_layer.Layer): def get_output_mask_at(self, node_index): """Retrieves the output mask tensor(s) of a layer at a given node. - Arguments: + Args: node_index: Integer, index of the node from which to retrieve the attribute. E.g. `node_index=0` will correspond to the @@ -1468,7 +1468,7 @@ class Layer(base_layer.Layer): def get_input_shape_at(self, node_index): """Retrieves the input shape(s) of a layer at a given node. - Arguments: + Args: node_index: Integer, index of the node from which to retrieve the attribute. E.g. `node_index=0` will correspond to the @@ -1487,7 +1487,7 @@ class Layer(base_layer.Layer): def get_output_shape_at(self, node_index): """Retrieves the output shape(s) of a layer at a given node. - Arguments: + Args: node_index: Integer, index of the node from which to retrieve the attribute. E.g. `node_index=0` will correspond to the @@ -1506,7 +1506,7 @@ class Layer(base_layer.Layer): def get_input_at(self, node_index): """Retrieves the input tensor(s) of a layer at a given node. - Arguments: + Args: node_index: Integer, index of the node from which to retrieve the attribute. E.g. `node_index=0` will correspond to the @@ -1524,7 +1524,7 @@ class Layer(base_layer.Layer): def get_output_at(self, node_index): """Retrieves the output tensor(s) of a layer at a given node. - Arguments: + Args: node_index: Integer, index of the node from which to retrieve the attribute. E.g. `node_index=0` will correspond to the @@ -1683,7 +1683,7 @@ class Layer(base_layer.Layer): This is an alias of `self.__call__`. - Arguments: + Args: inputs: Input tensor(s). *args: additional positional arguments to be passed to `self.call`. **kwargs: additional keyword arguments to be passed to `self.call`. @@ -2029,7 +2029,7 @@ class Layer(base_layer.Layer): - get_input_at etc... - Arguments: + Args: node_index: Integer index of the node from which to retrieve the attribute. attr: Exact node attribute name. diff --git a/tensorflow/python/keras/engine/base_preprocessing_layer.py b/tensorflow/python/keras/engine/base_preprocessing_layer.py index 09fca11bd59..5b3927c1351 100644 --- a/tensorflow/python/keras/engine/base_preprocessing_layer.py +++ b/tensorflow/python/keras/engine/base_preprocessing_layer.py @@ -55,7 +55,7 @@ class PreprocessingLayer(Layer): # TODO(momernick): Add examples. """Fits the state of the preprocessing layer to the data being passed. - Arguments: + Args: data: The data to train on. It can be passed either as a tf.data Dataset, or as a numpy array. reset_state: Optional argument specifying whether to clear the state of @@ -137,7 +137,7 @@ class CombinerPreprocessingLayer(PreprocessingLayer): def adapt(self, data, reset_state=True): """Fits the state of the preprocessing layer to the data being passed. - Arguments: + Args: data: The data to train on. It can be passed either as a tf.data Dataset, or as a numpy array. reset_state: Optional argument specifying whether to clear the state of diff --git a/tensorflow/python/keras/engine/compile_utils.py b/tensorflow/python/keras/engine/compile_utils.py index 259e6e4694a..e71dfc57be2 100644 --- a/tensorflow/python/keras/engine/compile_utils.py +++ b/tensorflow/python/keras/engine/compile_utils.py @@ -52,7 +52,7 @@ class Container(object): (2) Fill missing keys in a dict w/ `None`s. (3) Map a single item to all outputs. - Arguments: + Args: outputs: Model predictions. struct: Arbitrary nested structure (e.g. of labels, sample_weights, losses, or metrics). @@ -73,7 +73,7 @@ class Container(object): NOTE: This method should only be called for Metrics / Losses, not for y_true / sample_weight. - Arguments: + Args: outputs: Model predictions. objects: Arbitrary nested structure (e.g. of losses or metrics) @@ -168,7 +168,7 @@ class LossesContainer(Container): regularization_losses=None): """Computes the overall loss. - Arguments: + Args: y_true: An arbitrary structure of Tensors representing the ground truth. y_pred: An arbitrary structure of Tensors representing a Model's outputs. sample_weight: An arbitrary structure of Tensors representing the @@ -251,7 +251,7 @@ class LossesContainer(Container): Converts the user-supplied loss to a `Loss` object. Also allows `SUM_OVER_BATCH_SIZE` reduction to be used for this loss. - Arguments: + Args: loss: A string, function, or `Loss` object. Returns: @@ -437,7 +437,7 @@ class MetricsContainer(Container): def _get_metric_object(self, metric, y_t, y_p): """Converts user-supplied metric to a `Metric` object. - Arguments: + Args: metric: A string, function, or `Metric` object. y_t: Sample of label. y_p: Sample of output. @@ -534,7 +534,7 @@ def _create_pseudo_names(tensors, prefix): `[x, y]` becomes: `['output_1', 'output_2']` - Arguments: + Args: tensors: `Model`'s outputs or inputs. prefix: 'output_' for outputs, 'input_' for inputs. @@ -579,7 +579,7 @@ def map_to_output_names(y_pred, output_names, struct): This mapping preserves backwards compatibility for `compile` and `fit`. - Arguments: + Args: y_pred: Sample outputs of the Model, to determine if this convenience feature should be applied (`struct` is returned unmodified if `y_pred` isn't a flat list). @@ -660,7 +660,7 @@ def apply_mask(y_p, sw, mask): def get_custom_object_name(obj): """Returns the name to use for a custom loss or metric callable. - Arguments: + Args: obj: Custom loss of metric callable Returns: diff --git a/tensorflow/python/keras/engine/data_adapter.py b/tensorflow/python/keras/engine/data_adapter.py index 968b3108a57..3e62f53c5cd 100644 --- a/tensorflow/python/keras/engine/data_adapter.py +++ b/tensorflow/python/keras/engine/data_adapter.py @@ -1267,7 +1267,7 @@ def _make_class_weight_map_fn(class_weight): The `Dataset` is assumed to be in format `(x, y)` or `(x, y, sw)`, where `y` must be a single `Tensor`. - Arguments: + Args: class_weight: A map where the keys are integer class ids and values are the class weights, e.g. `{0: 0.2, 1: 0.6, 2: 0.3}` @@ -1335,7 +1335,7 @@ def train_validation_split(arrays, validation_split): The last part of data will become validation data. - Arguments: + Args: arrays: Tensors to split. Allowed inputs are arbitrarily nested structures of Tensors and NumPy arrays. validation_split: Float between 0 and 1. The proportion of the dataset to @@ -1433,7 +1433,7 @@ def unpack_x_y_sample_weight(data): return {m.name: m.result() for m in self.metrics} ``` - Arguments: + Args: data: A tuple of the form `(x,)`, `(x, y)`, or `(x, y, sample_weight)`. Returns: @@ -1473,7 +1473,7 @@ def pack_x_y_sample_weight(x, y=None, sample_weight=None): True >>> x, y = data - Arguments: + Args: x: Features to pass to `Model`. y: Ground-truth targets to pass to `Model`. sample_weight: Sample weight for each element. diff --git a/tensorflow/python/keras/engine/functional.py b/tensorflow/python/keras/engine/functional.py index f633afaae5b..fc199835b21 100644 --- a/tensorflow/python/keras/engine/functional.py +++ b/tensorflow/python/keras/engine/functional.py @@ -90,7 +90,7 @@ class Functional(training_lib.Model): model = keras.Model(inputs, outputs) ``` - Arguments: + Args: inputs: List of input tensors (must be created via `tf.keras.Input()`). outputs: List of outputs tensors. name: String, optional. Name of the model. @@ -412,7 +412,7 @@ class Functional(training_lib.Model): all ops in the graph to the new inputs (e.g. build a new computational graph from the provided inputs). - Arguments: + Args: inputs: A tensor or list of tensors. training: Boolean or boolean scalar tensor, indicating whether to run the `Network` in training mode or inference mode. @@ -521,7 +521,7 @@ class Functional(training_lib.Model): # Note: - Can be run on non-Keras tensors. - Arguments: + Args: inputs: Tensor or nested structure of Tensors. training: Boolean learning phase. mask: (Optional) Tensor or nested structure of Tensors. @@ -655,7 +655,7 @@ class Functional(training_lib.Model): def from_config(cls, config, custom_objects=None): """Instantiates a Model from its config (output of `get_config()`). - Arguments: + Args: config: Model config dictionary. custom_objects: Optional dictionary mapping names (strings) to custom classes or functions to be @@ -738,7 +738,7 @@ class Functional(training_lib.Model): They will not be added to the Network's outputs. - Arguments: + Args: layers: Arbitrary nested structure of Layers. Layers must be reachable from one or more of the `keras.Input` Tensors that correspond to this Network's inputs. @@ -886,7 +886,7 @@ def _make_node_key(layer_name, node_index): def _map_graph_network(inputs, outputs): """Validates a network's topology and gather its layers and nodes. - Arguments: + Args: inputs: List of input tensors. outputs: List of outputs tensors. @@ -1187,7 +1187,7 @@ def reconstruct_from_config(config, custom_objects=None, created_layers=None): def process_node(layer, node_data): """Deserialize a node. - Arguments: + Args: layer: layer instance. node_data: Nested structure of `ListWrapper`. @@ -1243,7 +1243,7 @@ def reconstruct_from_config(config, custom_objects=None, created_layers=None): def process_layer(layer_data): """Deserializes a layer, then call it on appropriate inputs. - Arguments: + Args: layer_data: layer config dict. Raises: @@ -1405,7 +1405,7 @@ class ModuleWrapper(base_layer.Layer): def __init__(self, module, method_name=None, **kwargs): """Initializes the wrapper Layer for this module. - Arguments: + Args: module: The `tf.Module` instance to be wrapped. method_name: (Optional) str. The name of the method to use as the forward pass of the module. If not set, defaults to '__call__' if defined, or diff --git a/tensorflow/python/keras/engine/input_layer.py b/tensorflow/python/keras/engine/input_layer.py index f92709a1128..75e0cc879f3 100644 --- a/tensorflow/python/keras/engine/input_layer.py +++ b/tensorflow/python/keras/engine/input_layer.py @@ -69,7 +69,7 @@ class InputLayer(base_layer.Layer): np.ones((10, 8))) ``` - Arguments: + Args: input_shape: Shape tuple (not including the batch axis), or `TensorShape` instance (not including the batch axis). batch_size: Optional input batch size (integer or None). @@ -224,7 +224,7 @@ def Input( # pylint: disable=invalid-name it becomes possible to do: `model = Model(input=[a, b], output=c)` - Arguments: + Args: shape: A shape tuple (integers), not including the batch size. For instance, `shape=(32,)` indicates that the expected input will be batches of 32-dimensional vectors. Elements of this tuple diff --git a/tensorflow/python/keras/engine/input_spec.py b/tensorflow/python/keras/engine/input_spec.py index 52a2829ffdb..30a0c33660f 100644 --- a/tensorflow/python/keras/engine/input_spec.py +++ b/tensorflow/python/keras/engine/input_spec.py @@ -43,7 +43,7 @@ class InputSpec(object): A None entry in a shape is compatible with any dimension, a None shape is compatible with any shape. - Arguments: + Args: dtype: Expected DataType of the input. shape: Shape tuple, expected shape of the input (may include None for unchecked axes). Includes the batch size. @@ -162,7 +162,7 @@ def assert_input_compatibility(input_spec, inputs, layer_name): This checks that the tensor(s) `inputs` verify the input assumptions of a layer (if any). If not, a clear and actional exception gets raised. - Arguments: + Args: input_spec: An InputSpec instance, list of InputSpec instances, a nested structure of InputSpec instances, or None. inputs: Input tensor, list of input tensors, or a nested structure of diff --git a/tensorflow/python/keras/engine/node.py b/tensorflow/python/keras/engine/node.py index 2a35477eea2..3087ecc782a 100644 --- a/tensorflow/python/keras/engine/node.py +++ b/tensorflow/python/keras/engine/node.py @@ -43,7 +43,7 @@ class Node(object): Each time the output of a layer is used by another layer, a node is added to `layer._outbound_nodes`. - Arguments: + Args: layer: The Layer for the Layer.__call__ this node represents. call_args: The positional arguments the Layer was called with. call_kwargs: The keyword arguments the Layer was called with. diff --git a/tensorflow/python/keras/engine/sequential.py b/tensorflow/python/keras/engine/sequential.py index 474dce8307b..293ad9ca9ed 100644 --- a/tensorflow/python/keras/engine/sequential.py +++ b/tensorflow/python/keras/engine/sequential.py @@ -160,7 +160,7 @@ class Sequential(functional.Functional): def add(self, layer): """Adds a layer instance on top of the layer stack. - Arguments: + Args: layer: layer instance. Raises: @@ -422,7 +422,7 @@ class Sequential(functional.Functional): The input samples are processed batch by batch. - Arguments: + Args: x: input data, as a Numpy array or list of Numpy arrays (if the model has multiple inputs). batch_size: integer. @@ -447,7 +447,7 @@ class Sequential(functional.Functional): The input samples are processed batch by batch. - Arguments: + Args: x: input data, as a Numpy array or list of Numpy arrays (if the model has multiple inputs). batch_size: integer. diff --git a/tensorflow/python/keras/engine/training.py b/tensorflow/python/keras/engine/training.py index 91c1182eb5c..d2c931d78aa 100644 --- a/tensorflow/python/keras/engine/training.py +++ b/tensorflow/python/keras/engine/training.py @@ -142,7 +142,7 @@ def is_functional_model_init_params(args, kwargs): class Model(base_layer.Layer, version_utils.ModelVersionSelector): """`Model` groups layers into an object with training and inference features. - Arguments: + Args: inputs: The input(s) of the model: a `keras.Input` object or list of `keras.Input` objects. outputs: The output(s) of the model. See Functional API example below. @@ -467,7 +467,7 @@ class Model(base_layer.Layer, version_utils.ModelVersionSelector): To call a model on an input, always use the `__call__` method, i.e. `model(inputs)`, which relies on the underlying `call` method. - Arguments: + Args: inputs: A tensor or list of tensors. training: Boolean or boolean scalar tensor, indicating whether to run the `Network` in training mode or inference mode. @@ -492,7 +492,7 @@ class Model(base_layer.Layer, version_utils.ModelVersionSelector): **kwargs): """Configures the model for training. - Arguments: + Args: optimizer: String (name of optimizer) or optimizer instance. See `tf.keras.optimizers`. loss: String (name of objective function), objective function or @@ -770,7 +770,7 @@ class Model(base_layer.Layer, version_utils.ModelVersionSelector): `tf.distribute.Strategy` settings), should be left to `Model.make_train_function`, which can also be overridden. - Arguments: + Args: data: A nested structure of `Tensor`s. Returns: @@ -876,7 +876,7 @@ class Model(base_layer.Layer, version_utils.ModelVersionSelector): use_multiprocessing=False): """Trains the model for a fixed number of epochs (iterations on a dataset). - Arguments: + Args: x: Input data. It could be: - A Numpy array (or array-like), or a list of arrays (in case the model has multiple inputs). @@ -1204,7 +1204,7 @@ class Model(base_layer.Layer, version_utils.ModelVersionSelector): `tf.distribute.Strategy` settings), should be left to `Model.make_test_function`, which can also be overridden. - Arguments: + Args: data: A nested structure of `Tensor`s. Returns: @@ -1298,7 +1298,7 @@ class Model(base_layer.Layer, version_utils.ModelVersionSelector): Computation is done in batches (see the `batch_size` arg.) - Arguments: + Args: x: Input data. It could be: - A Numpy array (or array-like), or a list of arrays (in case the model has multiple inputs). @@ -1457,7 +1457,7 @@ class Model(base_layer.Layer, version_utils.ModelVersionSelector): `tf.distribute.Strategy` settings), should be left to `Model.make_predict_function`, which can also be overridden. - Arguments: + Args: data: A nested structure of `Tensor`s. Returns: @@ -1553,7 +1553,7 @@ class Model(base_layer.Layer, version_utils.ModelVersionSelector): inference. Also, note the fact that test loss is not affected by regularization layers like noise and dropout. - Arguments: + Args: x: Input samples. It could be: - A Numpy array (or array-like), or a list of arrays (in case the model has multiple inputs). @@ -1712,7 +1712,7 @@ class Model(base_layer.Layer, version_utils.ModelVersionSelector): return_dict=False): """Runs a single gradient update on a single batch of data. - Arguments: + Args: x: Input data. It could be: - A Numpy array (or array-like), or a list of arrays (in case the model has multiple inputs). @@ -1780,7 +1780,7 @@ class Model(base_layer.Layer, version_utils.ModelVersionSelector): return_dict=False): """Test the model on a single batch of samples. - Arguments: + Args: x: Input data. It could be: - A Numpy array (or array-like), or a list of arrays (in case the model has multiple inputs). - A TensorFlow tensor, or a list of tensors (in case the model has multiple inputs). @@ -1834,7 +1834,7 @@ class Model(base_layer.Layer, version_utils.ModelVersionSelector): def predict_on_batch(self, x): """Returns predictions for a single batch of samples. - Arguments: + Args: x: Input data. It could be: - A Numpy array (or array-like), or a list of arrays (in case the model has multiple inputs). - A TensorFlow tensor, or a list of tensors (in case the model has multiple inputs). @@ -2011,7 +2011,7 @@ class Model(base_layer.Layer, version_utils.ModelVersionSelector): [Serialization and Saving guide](https://keras.io/guides/serialization_and_saving/) for details. - Arguments: + Args: filepath: String, PathLike, path to SavedModel or H5 file to save the model. overwrite: Whether to silently overwrite any existing file at the @@ -2097,7 +2097,7 @@ class Model(base_layer.Layer, version_utils.ModelVersionSelector): checkpoints](https://www.tensorflow.org/guide/checkpoint) for details on the TensorFlow format. - Arguments: + Args: filepath: String or PathLike, path to the file to save the weights to. When saving in TensorFlow format, this is the prefix used for checkpoint files (multiple files are generated). Note that the '.h5' @@ -2202,7 +2202,7 @@ class Model(base_layer.Layer, version_utils.ModelVersionSelector): TensorFlow format loads based on the object-local names of attributes to which layers are assigned in the `Model`'s constructor. - Arguments: + Args: filepath: String, path to the weights file to load. For weight files in TensorFlow format, this is the file prefix (the same as was passed to `save_weights`). This can also be a path to a SavedModel @@ -2308,7 +2308,7 @@ class Model(base_layer.Layer, version_utils.ModelVersionSelector): To load a network from a JSON save file, use `keras.models.model_from_json(json_string, custom_objects={})`. - Arguments: + Args: **kwargs: Additional keyword arguments to be passed to `json.dumps()`. @@ -2329,7 +2329,7 @@ class Model(base_layer.Layer, version_utils.ModelVersionSelector): the names of custom losses / layers / etc to the corresponding functions / classes. - Arguments: + Args: **kwargs: Additional keyword arguments to be passed to `yaml.dump()`. @@ -2398,7 +2398,7 @@ class Model(base_layer.Layer, version_utils.ModelVersionSelector): def summary(self, line_length=None, positions=None, print_fn=None): """Prints a string summary of the network. - Arguments: + Args: line_length: Total length of printed lines (e.g. set this to adapt the display to different terminal window sizes). @@ -2434,7 +2434,7 @@ class Model(base_layer.Layer, version_utils.ModelVersionSelector): If `name` and `index` are both provided, `index` will take precedence. Indices are based on order of horizontal graph traversal (bottom-up). - Arguments: + Args: name: String, name of layer. index: Integer, index of layer. @@ -2611,7 +2611,7 @@ class Model(base_layer.Layer, version_utils.ModelVersionSelector): Refer to tensorflow/python/keras/distribute/worker_training_state.py for more information. - Arguments: + Args: initial_epoch: The original initial_epoch user passes in in `fit()`. Returns: @@ -2716,7 +2716,7 @@ class Model(base_layer.Layer, version_utils.ModelVersionSelector): def reduce_per_replica(values, strategy, reduction='first'): """Reduce PerReplica objects. - Arguments: + Args: values: Structure of `PerReplica` objects or `Tensor`s. `Tensor`s are returned as-is. strategy: `tf.distribute.Strategy` object. @@ -2882,4 +2882,3 @@ def _is_readable_tf_checkpoint(filepath): except errors_impl.DataLossError: # The checkpoint is not readable in TensorFlow format. return False - diff --git a/tensorflow/python/keras/engine/training_arrays_v1.py b/tensorflow/python/keras/engine/training_arrays_v1.py index b08d6dc59bc..7f20baa70fe 100644 --- a/tensorflow/python/keras/engine/training_arrays_v1.py +++ b/tensorflow/python/keras/engine/training_arrays_v1.py @@ -66,7 +66,7 @@ def model_iteration(model, **kwargs): """Loop function for arrays of data with modes TRAIN/TEST/PREDICT. - Arguments: + Args: model: Keras Model instance. inputs: Either a list or dictionary of arrays, or a dataset instance. targets: List/dictionary of input arrays. @@ -486,7 +486,7 @@ def _get_num_samples_or_steps(ins, batch_size, steps_per_epoch): def _prepare_feed_values(model, inputs, targets, sample_weights, mode): """Prepare feed values to the model execution function. - Arguments: + Args: model: Model to prepare feed values for. inputs: List or dict of model inputs. targets: Optional list of model targets. diff --git a/tensorflow/python/keras/engine/training_distributed_v1.py b/tensorflow/python/keras/engine/training_distributed_v1.py index 4a40fdc2015..005cdfd8dae 100644 --- a/tensorflow/python/keras/engine/training_distributed_v1.py +++ b/tensorflow/python/keras/engine/training_distributed_v1.py @@ -61,7 +61,7 @@ def _build_model(strategy, model, mode, inputs, targets=None): def _make_train_step_fn(model, mode, strategy, output_labels): """Create step fn. - Arguments: + Args: model: a Keras Model instance. mode: One of ModeKeys.TRAIN/ModeKeys.TEST/ModeKeys.PREDICT. strategy: a `tf.distribute.Strategy` instance. @@ -133,7 +133,7 @@ def experimental_tpu_fit_loop(model, validation_freq=1): """Fit loop for training with TPU tf.distribute.Strategy. - Arguments: + Args: model: Keras Model instance. dataset: Dataset that returns inputs and targets epochs: Number of times to iterate over the data @@ -298,7 +298,7 @@ def experimental_tpu_test_loop(model, callbacks=None): """Test loop for evaluating with TPU tf.distribute.Strategy. - Arguments: + Args: model: Keras Model instance. dataset: Dataset for input data. verbose: Integer, Verbosity mode 0 or 1. @@ -429,7 +429,7 @@ def experimental_tpu_predict_loop(model, callbacks=None): """Predict loop for predicting with TPU tf.distribute.Strategy. - Arguments: + Args: model: Keras Model instance. dataset: Dataset for input data. verbose: Integer, Verbosity mode 0 or 1. diff --git a/tensorflow/python/keras/engine/training_eager_v1.py b/tensorflow/python/keras/engine/training_eager_v1.py index a52b20c5aa0..b0bc81b10a2 100644 --- a/tensorflow/python/keras/engine/training_eager_v1.py +++ b/tensorflow/python/keras/engine/training_eager_v1.py @@ -42,7 +42,7 @@ def _eager_loss_fn(outputs, targets, loss_fn, output_name): def _eager_metrics_fn(model, outputs, targets, sample_weights=None, masks=None): """Calculates the metrics for each output of the given model. - Arguments: + Args: model: The model on which metrics are being calculated. outputs: The outputs of the given model. targets: The predictions or targets of the given model. @@ -90,7 +90,7 @@ def _model_loss(model, training=False): """Calculates the loss for a given model. - Arguments: + Args: model: The model on which metrics are being calculated. inputs: Either a dictionary of inputs to the model or a list of input arrays. @@ -231,7 +231,7 @@ def _process_single_batch(model, The model weights are updated if training is set to True. - Arguments: + Args: model: Model whose loss has to be calculated. inputs: List of input arrays. targets: List of target arrays. @@ -291,7 +291,7 @@ def train_on_batch(model, output_loss_metrics=None): """Calculates the loss and gradient updates for one input batch. - Arguments: + Args: model: Model whose loss has to be calculated. inputs: Input batch data. targets: Target batch data. @@ -332,7 +332,7 @@ def test_on_batch(model, output_loss_metrics=None): """Calculates the loss for one input batch. - Arguments: + Args: model: Model whose loss has to be calculated. inputs: Input batch data. targets: Target batch data. diff --git a/tensorflow/python/keras/engine/training_generator_v1.py b/tensorflow/python/keras/engine/training_generator_v1.py index e6ae352c416..dcfbf6438f3 100644 --- a/tensorflow/python/keras/engine/training_generator_v1.py +++ b/tensorflow/python/keras/engine/training_generator_v1.py @@ -60,7 +60,7 @@ def model_iteration(model, **kwargs): """Loop function for arrays of data with modes TRAIN/TEST/PREDICT. - Arguments: + Args: model: Keras Model instance. data: Either a tuple of NumPy/Tensor inputs (i.e. `(x,)` or `(x, y)` or `(x, y, sample_weights)`) or a generator or @@ -370,7 +370,7 @@ def _validate_arguments(is_sequence, is_dataset, use_multiprocessing, workers, mode, kwargs): """Raises errors if arguments are invalid. - Arguments: + Args: is_sequence: Boolean, whether data is a `keras.utils.data_utils.Sequence` instance. is_dataset: Boolean, whether data is a dataset instance. @@ -429,7 +429,7 @@ def convert_to_generator_like(data, shuffle=False): """Make a generator out of NumPy or EagerTensor inputs. - Arguments: + Args: data: Either a generator or `keras.utils.data_utils.Sequence` object or `Dataset`, `Iterator`, or a {1,2,3}-tuple of NumPy arrays or EagerTensors. If a tuple, the elements represent `(x, y, sample_weights)` and may be diff --git a/tensorflow/python/keras/engine/training_utils.py b/tensorflow/python/keras/engine/training_utils.py index d75b6a125bf..a5653452ce2 100644 --- a/tensorflow/python/keras/engine/training_utils.py +++ b/tensorflow/python/keras/engine/training_utils.py @@ -35,7 +35,7 @@ def slice_arrays(arrays, indices, contiguous=True): and we have to implement this workaround based on `concat`. This has a performance cost. - Arguments: + Args: arrays: Single array or list of arrays. indices: List of indices in the array that should be included in the output batch. @@ -206,7 +206,7 @@ def get_input_shape_and_dtype(layer): def get_static_batch_size(layer): """Gets the static batch size of a Layer. - Arguments: + Args: layer: a `Layer` instance. Returns: diff --git a/tensorflow/python/keras/engine/training_utils_v1.py b/tensorflow/python/keras/engine/training_utils_v1.py index c2e1b5e652f..3aade19543b 100644 --- a/tensorflow/python/keras/engine/training_utils_v1.py +++ b/tensorflow/python/keras/engine/training_utils_v1.py @@ -96,7 +96,7 @@ class Aggregator(object): def create(self, batch_outs): """Creates the initial results from the first batch outputs. - Arguments: + Args: batch_outs: A list of batch-level outputs. """ raise NotImplementedError('Must be implemented in subclasses.') @@ -105,7 +105,7 @@ class Aggregator(object): def aggregate(self, batch_outs, batch_start=None, batch_end=None): """Aggregates batch-level results into total results. - Arguments: + Args: batch_outs: A list of batch-level outputs. batch_start: The start index of this batch. Always `None` if `use_steps` is `True`. @@ -227,7 +227,7 @@ def _append_composite_tensor(target, to_append): working with CompositeTensor Value objects that have no connection with the CompositeTensors that created them. - Arguments: + Args: target: CompositeTensor or CompositeTensor value object that will be appended to. to_append: CompositeTensor or CompositeTensor value object to append to. @@ -487,7 +487,7 @@ def check_num_samples(ins, batch_size=None, steps=None, steps_name='steps'): The number of samples is not defined when running with `steps`, in which case the number of samples is set to `None`. - Arguments: + Args: ins: List of tensors to be fed to the Keras function. batch_size: Integer batch size or `None` if not defined. steps: Total number of steps (batches of samples) before declaring @@ -559,7 +559,7 @@ def standardize_input_data(data, arrays (same order as `names`), while checking that the provided arrays have shapes that match the network's expectations. - Arguments: + Args: data: User-provided input data (polymorphic). names: List of expected array names. shapes: Optional list of expected array shapes. @@ -676,7 +676,7 @@ def standardize_input_data(data, def standardize_sample_or_class_weights(x_weight, output_names, weight_type): """Maps `sample_weight` or `class_weight` to model outputs. - Arguments: + Args: x_weight: User-provided `sample_weight` or `class_weight` argument. output_names: List of output names (strings) in the model. weight_type: A string used purely for exception printing. @@ -732,7 +732,7 @@ def standardize_sample_weights(sample_weight, output_names): def check_array_lengths(inputs, targets, weights=None): """Does user input validation for numpy arrays. - Arguments: + Args: inputs: list of Numpy arrays of inputs. targets: list of Numpy arrays of targets. weights: list of Numpy arrays of sample weights. @@ -789,7 +789,7 @@ def check_loss_and_target_compatibility(targets, loss_fns, output_shapes): This helps prevent users from using loss functions incorrectly. This check is purely for UX purposes. - Arguments: + Args: targets: list of Numpy arrays of targets. loss_fns: list of loss functions. output_shapes: list of shapes of model outputs. @@ -849,7 +849,7 @@ def collect_per_output_metric_info(metrics, is_weighted=False): """Maps metric names and functions to model outputs. - Arguments: + Args: metrics: a list or a list of lists or a dict of metric functions. output_names: a list of the names (strings) of model outputs. output_shapes: a list of the shapes (strings) of model outputs. @@ -927,7 +927,7 @@ def batch_shuffle(index_array, batch_size): Useful for shuffling HDF5 arrays (where one cannot access arbitrary indices). - Arguments: + Args: index_array: array of indices to be shuffled. batch_size: integer. @@ -955,7 +955,7 @@ def standardize_weights(y, weight array. If both `sample_weight` and `class_weight` are provided, the weights are multiplied. - Arguments: + Args: y: Numpy array or Tensor of model targets to be weighted. sample_weight: User-provided `sample_weight` argument. class_weight: User-provided `class_weight` argument. @@ -1099,7 +1099,7 @@ def has_tensors(ls): def get_metric_name(metric, weighted=False): """Returns the name corresponding to the given metric input. - Arguments: + Args: metric: Metric function name or reference. weighted: Boolean indicating if the given metric is weighted. @@ -1134,7 +1134,7 @@ def get_metric_name(metric, weighted=False): def get_metric_function(metric, output_shape=None, loss_fn=None): """Returns the metric function corresponding to the given metric input. - Arguments: + Args: metric: Metric function name or reference. output_shape: The shape of the output that this metric will be calculated for. @@ -1232,7 +1232,7 @@ def get_loss_function(loss): def validate_dataset_input(x, y, sample_weight, validation_split=None): """Validates user input arguments when a dataset iterator is passed. - Arguments: + Args: x: Input data. A `tf.data` dataset or iterator. y: Target data. It could be either Numpy array(s) or TensorFlow tensor(s). Expected to be `None` when `x` is a dataset iterator. @@ -1310,7 +1310,7 @@ def check_steps_argument(input_data, steps, steps_name): required and is `None`. 3. input data passed is a symbolic tensor. - Arguments: + Args: input_data: Input data. Can be Numpy array(s) or TensorFlow tensor(s) or tf.data.Dataset iterator or `None`. steps: Integer or `None`. Total number of steps (batches of samples) to @@ -1458,7 +1458,7 @@ def prepare_sample_weight_modes(training_endpoints, sample_weight_mode): def prepare_loss_functions(loss, output_names): """Converts loss to a list of loss functions. - Arguments: + Args: loss: String (name of objective function), objective function or `tf.losses.Loss` instance. See `tf.losses`. If the model has multiple outputs, you can use a different loss on each output by passing a @@ -1502,7 +1502,7 @@ def prepare_loss_weights(training_endpoints, loss_weights=None): The result loss weights will be populated on the training endpoint. - Arguments: + Args: training_endpoints: List of model training endpoints. loss_weights: Optional list or dictionary specifying scalar coefficients (Python floats) to weight the loss contributions of different model @@ -1609,7 +1609,7 @@ def initialize_iterator(iterator): def extract_tensors_from_dataset(dataset): """Extract a tuple of tensors `inputs, targets, sample_weight` from a dataset. - Arguments: + Args: dataset: Dataset instance. Returns: @@ -1623,7 +1623,7 @@ def extract_tensors_from_dataset(dataset): def unpack_iterator_input(iterator): """Convert a dataset iterator to a tuple of tensors `x, y, sample_weights`. - Arguments: + Args: iterator: Instance of a dataset iterator. Returns: @@ -1661,7 +1661,7 @@ def infer_steps_for_dataset(model, steps_name='steps'): """Infers steps_per_epoch needed to loop through a dataset. - Arguments: + Args: model: Keras model instance. dataset: Input data of type tf.data.Dataset. steps: Number of steps to draw from the dataset (may be None if unknown). @@ -1805,7 +1805,7 @@ def generic_output_names(outputs_list): def should_run_validation(validation_freq, epoch): """Checks if validation should be run this epoch. - Arguments: + Args: validation_freq: Integer or list. If an integer, specifies how many training epochs to run before a new validation run is performed. If a list, specifies the epochs on which to run validation. diff --git a/tensorflow/python/keras/engine/training_v1.py b/tensorflow/python/keras/engine/training_v1.py index 576e8c8469c..ab5859d8c37 100644 --- a/tensorflow/python/keras/engine/training_v1.py +++ b/tensorflow/python/keras/engine/training_v1.py @@ -202,7 +202,7 @@ class Model(training_lib.Model): TensorFlow format loads based on the object-local names of attributes to which layers are assigned in the `Model`'s constructor. - Arguments: + Args: filepath: String, path to the weights file to load. For weight files in TensorFlow format, this is the file prefix (the same as was passed to `save_weights`). @@ -248,7 +248,7 @@ class Model(training_lib.Model): **kwargs): """Configures the model for training. - Arguments: + Args: optimizer: String (name of optimizer) or optimizer instance. See `tf.keras.optimizers`. loss: String (name of objective function), objective function or @@ -637,7 +637,7 @@ class Model(training_lib.Model): **kwargs): """Trains the model for a fixed number of epochs (iterations on a dataset). - Arguments: + Args: x: Input data. It could be: - A Numpy array (or array-like), or a list of arrays (in case the model has multiple inputs). @@ -830,7 +830,7 @@ class Model(training_lib.Model): Computation is done in batches (see the `batch_size` arg.) - Arguments: + Args: x: Input data. It could be: - A Numpy array (or array-like), or a list of arrays (in case the model has multiple inputs). @@ -934,7 +934,7 @@ class Model(training_lib.Model): Computation is done in batches (see the `batch_size` arg.) - Arguments: + Args: x: Input samples. It could be: - A Numpy array (or array-like), or a list of arrays (in case the model has multiple inputs). @@ -1016,7 +1016,7 @@ class Model(training_lib.Model): reset_metrics=True): """Runs a single gradient update on a single batch of data. - Arguments: + Args: x: Input data. It could be: - A Numpy array (or array-like), or a list of arrays (in case the model has multiple inputs). @@ -1105,7 +1105,7 @@ class Model(training_lib.Model): def test_on_batch(self, x, y=None, sample_weight=None, reset_metrics=True): """Test the model on a single batch of samples. - Arguments: + Args: x: Input data. It could be: - A Numpy array (or array-like), or a list of arrays (in case the model has multiple inputs). @@ -1181,7 +1181,7 @@ class Model(training_lib.Model): def predict_on_batch(self, x): """Returns predictions for a single batch of samples. - Arguments: + Args: x: Input data. It could be: - A Numpy array (or array-like), or a list of arrays (in case the model has multiple inputs). @@ -1570,7 +1570,7 @@ class Model(training_lib.Model): def _prepare_total_loss(self, masks): """Computes total loss from loss functions. - Arguments: + Args: masks: List of mask values corresponding to each model output. Returns: @@ -1696,7 +1696,7 @@ class Model(training_lib.Model): raised if `x` is a tf.data.Dataset and `batch_size` is specified as we expect users to provide batched datasets. - Arguments: + Args: batch_size: The batch_size provided as an argument to fit/evaluate/predict. steps: The steps provided as an argument to fit/evaluate/predict. @@ -1815,7 +1815,7 @@ class Model(training_lib.Model): If there are multiple outputs for which the metrics are calculated, the metric names have to be made unique by appending an integer. - Arguments: + Args: metric_name: Metric name that corresponds to the metric specified by the user. For example: 'acc'. output_index: The index of the model output for which the metric name is @@ -1843,7 +1843,7 @@ class Model(training_lib.Model): def _set_per_output_metric_attributes(self, metrics_dict, output_index): """Sets the metric attributes on the model for the given output. - Arguments: + Args: metrics_dict: A dict with metric names as keys and metric fns as values. output_index: The index of the model output for which the metric attributes are added. @@ -1899,7 +1899,7 @@ class Model(training_lib.Model): weights=None): """Calls metric functions for a single output. - Arguments: + Args: metrics_dict: A dict with metric names as keys and metric fns as values. y_true: Target output. y_pred: Predicted output. @@ -1927,7 +1927,7 @@ class Model(training_lib.Model): return_weighted_and_unweighted_metrics=False): """Handles calling metric functions. - Arguments: + Args: outputs: List of outputs (predictions). targets: List of targets. skip_target_masks: Optional. List of boolean for whether the corresponding @@ -2757,7 +2757,7 @@ class Model(training_lib.Model): Refer to tensorflow/python/keras/distribute/worker_training_state.py for more information. - Arguments: + Args: initial_epoch: The original initial_epoch user passes in in `fit()`. mode: The mode for running `model.fit()`. @@ -3111,7 +3111,7 @@ class _TrainingEndpoint(object): class _TrainingTarget(object): """Container for a target tensor (y_true) and its metadata (shape, loss...). - Arguments: + Args: target: A target tensor for the model. It may be `None` if the output is excluded from loss computation. It is still kept as None since each output of the model should have a corresponding target. If @@ -3155,7 +3155,7 @@ def _convert_scipy_sparse_tensor(value, expected_input): not a scipy sparse tensor, or scipy is not imported, we pass it through unchanged. - Arguments: + Args: value: An object that may be a scipy sparse tensor expected_input: The expected input placeholder. @@ -3186,7 +3186,7 @@ def _get_metrics_from_layers(layers): This will not include the `compile` metrics of a model layer. - Arguments: + Args: layers: List of layers. Returns: diff --git a/tensorflow/python/keras/initializers/initializers_v2.py b/tensorflow/python/keras/initializers/initializers_v2.py index 1eaf0af90f7..e01d2a6645c 100644 --- a/tensorflow/python/keras/initializers/initializers_v2.py +++ b/tensorflow/python/keras/initializers/initializers_v2.py @@ -736,7 +736,7 @@ class LecunNormal(VarianceScaling): >>> initializer = tf.keras.initializers.LecunNormal() >>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer) - Arguments: + Args: seed: A Python integer. Used to seed the random generator. References: @@ -780,7 +780,7 @@ class LecunUniform(VarianceScaling): >>> initializer = tf.keras.initializers.LecunUniform() >>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer) - Arguments: + Args: seed: A Python integer. An initializer created with a given seed will always produce the same random tensor for a given shape and dtype. @@ -823,7 +823,7 @@ class HeNormal(VarianceScaling): >>> initializer = tf.keras.initializers.HeNormal() >>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer) - Arguments: + Args: seed: A Python integer. An initializer created with a given seed will always produce the same random tensor for a given shape and dtype. @@ -863,7 +863,7 @@ class HeUniform(VarianceScaling): >>> initializer = tf.keras.initializers.HeUniform() >>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer) - Arguments: + Args: seed: A Python integer. An initializer created with a given seed will always produce the same random tensor for a given shape and dtype. diff --git a/tensorflow/python/keras/layers/advanced_activations.py b/tensorflow/python/keras/layers/advanced_activations.py index 06e369dab21..f588dad1574 100644 --- a/tensorflow/python/keras/layers/advanced_activations.py +++ b/tensorflow/python/keras/layers/advanced_activations.py @@ -64,7 +64,7 @@ class LeakyReLU(Layer): Output shape: Same shape as the input. - Arguments: + Args: alpha: Float >= 0. Negative slope coefficient. Default to 0.3. """ @@ -108,7 +108,7 @@ class PReLU(Layer): Output shape: Same shape as the input. - Arguments: + Args: alpha_initializer: Initializer function for the weights. alpha_regularizer: Regularizer for the weights. alpha_constraint: Constraint for the weights. @@ -200,7 +200,7 @@ class ELU(Layer): Output shape: Same shape as the input. - Arguments: + Args: alpha: Scale for the negative factor. """ @@ -241,7 +241,7 @@ class ThresholdedReLU(Layer): Output shape: Same shape as the input. - Arguments: + Args: theta: Float >= 0. Threshold location of activation. """ @@ -303,7 +303,7 @@ class Softmax(Layer): Output shape: Same shape as the input. - Arguments: + Args: axis: Integer, or list of Integers, axis along which the softmax normalization is applied. Call arguments: @@ -389,7 +389,7 @@ class ReLU(Layer): Output shape: Same shape as the input. - Arguments: + Args: max_value: Float >= 0. Maximum activation value. Default to None, which means unlimited. negative_slope: Float >= 0. Negative slope coefficient. Default to 0. diff --git a/tensorflow/python/keras/layers/convolutional.py b/tensorflow/python/keras/layers/convolutional.py index fc071ad51fa..731b51e2862 100644 --- a/tensorflow/python/keras/layers/convolutional.py +++ b/tensorflow/python/keras/layers/convolutional.py @@ -61,7 +61,7 @@ class Conv(Layer): Note: layer attributes cannot be modified after the layer has been called once (except the `trainable` attribute). - Arguments: + Args: rank: An integer, the rank of the convolution, e.g. "2" for 2D convolution. filters: Integer, the dimensionality of the output space (i.e. the number of filters in the convolution). @@ -409,7 +409,7 @@ class Conv1D(Conv): >>> print(y.shape) (4, 7, 8, 32) - Arguments: + Args: filters: Integer, the dimensionality of the output space (i.e. the number of output filters in the convolution). kernel_size: An integer or tuple/list of a single integer, @@ -564,7 +564,7 @@ class Conv2D(Conv): (4, 7, 26, 26, 2) - Arguments: + Args: filters: Integer, the dimensionality of the output space (i.e. the number of output filters in the convolution). kernel_size: An integer or tuple/list of 2 integers, specifying the height @@ -708,7 +708,7 @@ class Conv3D(Conv): >>> print(y.shape) (4, 7, 26, 26, 26, 2) - Arguments: + Args: filters: Integer, the dimensionality of the output space (i.e. the number of output filters in the convolution). kernel_size: An integer or tuple/list of 3 integers, specifying the depth, @@ -835,7 +835,7 @@ class Conv1DTranspose(Conv1D): (tuple of integers or `None`, does not include the sample axis), e.g. `input_shape=(128, 3)` for data with 128 time steps and 3 channels. - Arguments: + Args: filters: Integer, the dimensionality of the output space (i.e. the number of output filters in the convolution). kernel_size: An integer length of the 1D convolution window. @@ -1083,7 +1083,7 @@ class Conv2DTranspose(Conv2D): e.g. `input_shape=(128, 128, 3)` for 128x128 RGB pictures in `data_format="channels_last"`. - Arguments: + Args: filters: Integer, the dimensionality of the output space (i.e. the number of output filters in the convolution). kernel_size: An integer or tuple/list of 2 integers, specifying the @@ -1386,7 +1386,7 @@ class Conv3DTranspose(Conv3D): e.g. `input_shape=(128, 128, 128, 3)` for a 128x128x128 volume with 3 channels if `data_format="channels_last"`. - Arguments: + Args: filters: Integer, the dimensionality of the output space (i.e. the number of output filters in the convolution). kernel_size: An integer or tuple/list of 3 integers, specifying the @@ -1688,7 +1688,7 @@ class SeparableConv(Conv): it adds a bias vector to the output. It then optionally applies an activation function to produce the final output. - Arguments: + Args: rank: An integer, the rank of the convolution, e.g. "2" for 2D convolution. filters: Integer, the dimensionality of the output space (i.e. the number of filters in the convolution). @@ -1897,7 +1897,7 @@ class SeparableConv1D(SeparableConv): it adds a bias vector to the output. It then optionally applies an activation function to produce the final output. - Arguments: + Args: filters: Integer, the dimensionality of the output space (i.e. the number of filters in the convolution). kernel_size: A single integer specifying the spatial @@ -2081,7 +2081,7 @@ class SeparableConv2D(SeparableConv): a way to factorize a convolution kernel into two smaller kernels, or as an extreme version of an Inception block. - Arguments: + Args: filters: Integer, the dimensionality of the output space (i.e. the number of output filters in the convolution). kernel_size: An integer or tuple/list of 2 integers, specifying the @@ -2246,7 +2246,7 @@ class DepthwiseConv2D(Conv2D): The `depth_multiplier` argument controls how many output channels are generated per input channel in the depthwise step. - Arguments: + Args: kernel_size: An integer or tuple/list of 2 integers, specifying the height and width of the 2D convolution window. Can be a single integer to specify the same value for @@ -2480,7 +2480,7 @@ class UpSampling1D(Layer): [ 9 10 11] [ 9 10 11]]], shape=(2, 4, 3), dtype=int64) - Arguments: + Args: size: Integer. Upsampling factor. Input shape: @@ -2538,7 +2538,7 @@ class UpSampling2D(Layer): [[ 9 10 11] [ 9 10 11]]]], shape=(2, 2, 2, 3), dtype=int64) - Arguments: + Args: size: Int, or tuple of 2 integers. The upsampling factors for rows and columns. data_format: A string, @@ -2629,7 +2629,7 @@ class UpSampling3D(Layer): >>> print(y.shape) (2, 2, 4, 2, 3) - Arguments: + Args: size: Int, or tuple of 3 integers. The upsampling factors for dim1, dim2 and dim3. data_format: A string, @@ -2724,7 +2724,7 @@ class ZeroPadding1D(Layer): [ 0 0 0] [ 0 0 0]]], shape=(2, 6, 3), dtype=int64) - Arguments: + Args: padding: Int, or tuple of int (length 2), or dictionary. - If int: How many zeros to add at the beginning and end of @@ -2791,7 +2791,7 @@ class ZeroPadding2D(Layer): [0 0] [0 0]]]], shape=(1, 3, 4, 2), dtype=int64) - Arguments: + Args: padding: Int, or tuple of 2 ints, or tuple of 2 tuples of 2 ints. - If int: the same symmetric padding is applied to height and width. @@ -2898,7 +2898,7 @@ class ZeroPadding3D(Layer): >>> print(y.shape) (1, 5, 6, 6, 3) - Arguments: + Args: padding: Int, or tuple of 3 ints, or tuple of 3 tuples of 2 ints. - If int: the same symmetric padding is applied to height and width. @@ -3035,7 +3035,7 @@ class Cropping1D(Layer): [[[2 3]] [[8 9]]], shape=(2, 1, 2), dtype=int64) - Arguments: + Args: cropping: Int or tuple of int (length 2) How many units should be trimmed off at the beginning and end of the cropping dimension (axis 1). @@ -3087,7 +3087,7 @@ class Cropping2D(Layer): >>> print(y.shape) (2, 24, 20, 3) - Arguments: + Args: cropping: Int, or tuple of 2 ints, or tuple of 2 tuples of 2 ints. - If int: the same symmetric cropping is applied to height and width. @@ -3212,7 +3212,7 @@ class Cropping3D(Layer): >>> print(y.shape) (2, 24, 20, 6, 3) - Arguments: + Args: cropping: Int, or tuple of 3 ints, or tuple of 3 tuples of 2 ints. - If int: the same symmetric cropping is applied to depth, height, and width. diff --git a/tensorflow/python/keras/layers/convolutional_recurrent.py b/tensorflow/python/keras/layers/convolutional_recurrent.py index 04ae43c1879..1da37a4cf13 100644 --- a/tensorflow/python/keras/layers/convolutional_recurrent.py +++ b/tensorflow/python/keras/layers/convolutional_recurrent.py @@ -40,7 +40,7 @@ from tensorflow.python.util.tf_export import keras_export class ConvRNN2D(RNN): """Base class for convolutional-recurrent layers. - Arguments: + Args: cell: A RNN cell instance. A RNN cell is a class that has: - a `call(input_at_t, states_at_t)` method, returning `(output_at_t, states_at_t_plus_1)`. The call method of the @@ -424,7 +424,7 @@ class ConvRNN2D(RNN): class ConvLSTM2DCell(DropoutRNNCellMixin, Layer): """Cell class for the ConvLSTM2D layer. - Arguments: + Args: filters: Integer, the dimensionality of the output space (i.e. the number of output filters in the convolution). kernel_size: An integer or tuple/list of n integers, specifying the @@ -703,7 +703,7 @@ class ConvLSTM2D(ConvRNN2D): It is similar to an LSTM layer, but the input transformations and recurrent transformations are both convolutional. - Arguments: + Args: filters: Integer, the dimensionality of the output space (i.e. the number of output filters in the convolution). kernel_size: An integer or tuple/list of n integers, specifying the diff --git a/tensorflow/python/keras/layers/core.py b/tensorflow/python/keras/layers/core.py index e64a2a6b8c4..d42a021ec4f 100644 --- a/tensorflow/python/keras/layers/core.py +++ b/tensorflow/python/keras/layers/core.py @@ -175,7 +175,7 @@ class Dropout(Layer): [ 7.5 8.75] [10. 0. ]], shape=(5, 2), dtype=float32) - Arguments: + Args: rate: Float between 0 and 1. Fraction of the input units to drop. noise_shape: 1D integer tensor representing the shape of the binary dropout mask that will be multiplied with the input. @@ -255,7 +255,7 @@ class SpatialDropout1D(Dropout): decrease. In this case, SpatialDropout1D will help promote independence between feature maps and should be used instead. - Arguments: + Args: rate: Float between 0 and 1. Fraction of the input units to drop. Call arguments: @@ -297,7 +297,7 @@ class SpatialDropout2D(Dropout): decrease. In this case, SpatialDropout2D will help promote independence between feature maps and should be used instead. - Arguments: + Args: rate: Float between 0 and 1. Fraction of the input units to drop. data_format: 'channels_first' or 'channels_last'. In 'channels_first' mode, the channels dimension @@ -356,7 +356,7 @@ class SpatialDropout3D(Dropout): decrease. In this case, SpatialDropout3D will help promote independence between feature maps and should be used instead. - Arguments: + Args: rate: Float between 0 and 1. Fraction of the input units to drop. data_format: 'channels_first' or 'channels_last'. In 'channels_first' mode, the channels dimension (the depth) @@ -406,7 +406,7 @@ class SpatialDropout3D(Dropout): class Activation(Layer): """Applies an activation function to an output. - Arguments: + Args: activation: Activation function, such as `tf.nn.relu`, or string name of built-in activation function, such as "relu". @@ -497,7 +497,7 @@ class Reshape(Layer): This is a near direct port of the internal Numpy function `_fix_unknown_dimension` in `numpy/core/src/multiarray/shape.c` - Arguments: + Args: input_shape: Shape of array being reshaped output_shape: Desired shape of the array with at most a single -1 which indicates a dimension that should be @@ -577,7 +577,7 @@ class Permute(Layer): # note: `None` is the batch dimension ``` - Arguments: + Args: dims: Tuple of integers. Permutation pattern does not include the samples dimension. Indexing starts at 1. For instance, `(2, 1)` permutes the first and second dimensions @@ -627,7 +627,7 @@ class Flatten(Layer): Note: If inputs are shaped `(batch,)` without a feature axis, then flattening adds an extra channel dimension and output shape is `(batch, 1)`. - Arguments: + Args: data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. @@ -724,7 +724,7 @@ class RepeatVector(Layer): # now: model.output_shape == (None, 3, 32) ``` - Arguments: + Args: n: Integer, repetition factor. Input shape: @@ -821,7 +821,7 @@ class Lambda(Layer): In general, Lambda layers can be convenient for simple stateless computation, but anything more complex should use a subclass Layer instead. - Arguments: + Args: function: The function to be evaluated. Takes input tensor as first argument. output_shape: Expected output shape from function. This argument can be @@ -1113,7 +1113,7 @@ class Dense(Layer): >>> model.output_shape (None, 32) - Arguments: + Args: units: Positive integer, dimensionality of the output space. activation: Activation function to use. If you don't specify anything, no activation is applied @@ -1250,7 +1250,7 @@ class Dense(Layer): class ActivityRegularization(Layer): """Layer that applies an update to the cost function based input activity. - Arguments: + Args: l1: L1 regularization factor (positive float). l2: L2 regularization factor (positive float). @@ -1621,7 +1621,7 @@ def _delegate_property(keras_tensor_cls, property_name): # pylint: disable=inva `InstanceProperty` layer to access the property on the represented intermediate values in the model. - Arguments: + Args: keras_tensor_cls: The KerasTensor subclass that should expose the property. property_name: The name of the property to expose and delegate to the represented (Composite)Tensor. @@ -1641,7 +1641,7 @@ def _delegate_method(keras_tensor_cls, method_name): # pylint: disable=invalid- an `InstanceMethod` layer to run the desired method on the represented intermediate values in the model. - Arguments: + Args: keras_tensor_cls: The KerasTensor subclass that should expose the property. method_name: The name of the method to expose and delegate to the represented (Composite)Tensor. diff --git a/tensorflow/python/keras/layers/cudnn_recurrent.py b/tensorflow/python/keras/layers/cudnn_recurrent.py index 7ecc7ac1996..72cc89e92c8 100644 --- a/tensorflow/python/keras/layers/cudnn_recurrent.py +++ b/tensorflow/python/keras/layers/cudnn_recurrent.py @@ -37,7 +37,7 @@ from tensorflow.python.util.tf_export import keras_export class _CuDNNRNN(RNN): """Private base class for CuDNNGRU and CuDNNLSTM layers. - Arguments: + Args: return_sequences: Boolean. Whether to return the last output in the output sequence, or the full sequence. return_state: Boolean. Whether to return the last state @@ -166,7 +166,7 @@ class CuDNNGRU(_CuDNNRNN): developer website](https://developer.nvidia.com/cudnn). Can only be run on GPU. - Arguments: + Args: units: Positive integer, dimensionality of the output space. kernel_initializer: Initializer for the `kernel` weights matrix, used for the linear transformation of the inputs. @@ -346,7 +346,7 @@ class CuDNNLSTM(_CuDNNRNN): developer website](https://developer.nvidia.com/cudnn). Can only be run on GPU. - Arguments: + Args: units: Positive integer, dimensionality of the output space. kernel_initializer: Initializer for the `kernel` weights matrix, used for the linear transformation of the inputs. diff --git a/tensorflow/python/keras/layers/dense_attention.py b/tensorflow/python/keras/layers/dense_attention.py index 490d1eac01a..34879524b64 100644 --- a/tensorflow/python/keras/layers/dense_attention.py +++ b/tensorflow/python/keras/layers/dense_attention.py @@ -50,7 +50,7 @@ class BaseDenseAttention(Layer): dropout: Float between 0 and 1. Fraction of the units to drop for the attention scores. - Call Arguments: + Call Args: inputs: List of the following tensors: * query: Query `Tensor` of shape `[batch_size, Tq, dim]`. @@ -242,7 +242,7 @@ class Attention(BaseDenseAttention): dropout: Float between 0 and 1. Fraction of the units to drop for the attention scores. - Call Arguments: + Call Args: inputs: List of the following tensors: * query: Query `Tensor` of shape `[batch_size, Tq, dim]`. @@ -381,7 +381,7 @@ class AdditiveAttention(BaseDenseAttention): dropout: Float between 0 and 1. Fraction of the units to drop for the attention scores. - Call Arguments: + Call Args: inputs: List of the following tensors: * query: Query `Tensor` of shape `[batch_size, Tq, dim]`. diff --git a/tensorflow/python/keras/layers/einsum_dense.py b/tensorflow/python/keras/layers/einsum_dense.py index f8f2e01058d..fd29314150f 100644 --- a/tensorflow/python/keras/layers/einsum_dense.py +++ b/tensorflow/python/keras/layers/einsum_dense.py @@ -36,7 +36,7 @@ class EinsumDense(Layer): This layer can perform einsum calculations of arbitrary dimensionality. - Arguments: + Args: equation: An equation describing the einsum to perform. This equation must be a valid einsum string of the form `ab,bc->ac`, `...ab,bc->...ac`, or `ab...,bc->ac...` where 'ab', 'bc', and 'ac' can be any valid einsum axis diff --git a/tensorflow/python/keras/layers/embeddings.py b/tensorflow/python/keras/layers/embeddings.py index 2f73074f4a0..3ccaf2a2bd2 100644 --- a/tensorflow/python/keras/layers/embeddings.py +++ b/tensorflow/python/keras/layers/embeddings.py @@ -57,7 +57,7 @@ class Embedding(Layer): >>> print(output_array.shape) (32, 10, 64) - Arguments: + Args: input_dim: Integer. Size of the vocabulary, i.e. maximum integer index + 1. output_dim: Integer. Dimension of the dense embedding. diff --git a/tensorflow/python/keras/layers/kernelized.py b/tensorflow/python/keras/layers/kernelized.py index f6160cf0981..f499b4b0518 100644 --- a/tensorflow/python/keras/layers/kernelized.py +++ b/tensorflow/python/keras/layers/kernelized.py @@ -121,7 +121,7 @@ class RandomFourierFeatures(base_layer.Layer): ...) ``` - Arguments: + Args: output_dim: Positive integer, the dimension of the layer's output, i.e., the number of random features used to approximate the kernel. kernel_initializer: Determines the distribution of the parameters of the diff --git a/tensorflow/python/keras/layers/local.py b/tensorflow/python/keras/layers/local.py index 69ec0af6d9f..06ba00ad08b 100644 --- a/tensorflow/python/keras/layers/local.py +++ b/tensorflow/python/keras/layers/local.py @@ -58,7 +58,7 @@ class LocallyConnected1D(Layer): # now model.output_shape == (None, 6, 32) ``` - Arguments: + Args: filters: Integer, the dimensionality of the output space (i.e. the number of output filters in the convolution). kernel_size: An integer or tuple/list of a single integer, specifying the @@ -350,7 +350,7 @@ class LocallyConnected2D(Layer): # now model.output_shape == (None, 28, 28, 32) ``` - Arguments: + Args: filters: Integer, the dimensionality of the output space (i.e. the number of output filters in the convolution). kernel_size: An integer or tuple/list of 2 integers, specifying the width @@ -652,7 +652,7 @@ def get_locallyconnected_mask(input_shape, kernel_shape, strides, padding, to make it perform an unshared convolution with given `kernel_shape`, `strides`, `padding` and `data_format`. - Arguments: + Args: input_shape: tuple of size N: `(d_in1, ..., d_inN)` spatial shape of the input. kernel_shape: tuple of size N, spatial shape of the convolutional kernel / @@ -704,7 +704,7 @@ def local_conv_matmul(inputs, kernel, kernel_mask, output_shape): (the remaining entries in `kernel`) weights. It also does the necessary reshapes to make `inputs` and `kernel` 2-D and `output` (N+2)-D. - Arguments: + Args: inputs: (N+2)-D tensor with shape `(batch_size, channels_in, d_in1, ..., d_inN)` or `(batch_size, d_in1, ..., d_inN, channels_in)`. kernel: the unshared weights for N-D convolution, @@ -749,7 +749,7 @@ def local_conv_sparse_matmul(inputs, kernel, kernel_idxs, kernel_shape, values=kernel, dense_shape=kernel_shape)`, with `.` standing for matrix-multiply. It also reshapes `inputs` to 2-D and `output` to (N+2)-D. - Arguments: + Args: inputs: (N+2)-D tensor with shape `(batch_size, channels_in, d_in1, ..., d_inN)` or `(batch_size, d_in1, ..., d_inN, channels_in)`. kernel: a 1-D tensor with shape `(len(kernel_idxs),)` containing all the @@ -788,7 +788,7 @@ def make_2d(tensor, split_dim): Dimensions before (excluding) and after (including) `split_dim` are grouped together. - Arguments: + Args: tensor: a tensor of shape `(d0, ..., d(N-1))`. split_dim: an integer from 1 to N-1, index of the dimension to group dimensions before (excluding) and after (including). diff --git a/tensorflow/python/keras/layers/merge.py b/tensorflow/python/keras/layers/merge.py index 2ac0fe5d584..490629a6005 100644 --- a/tensorflow/python/keras/layers/merge.py +++ b/tensorflow/python/keras/layers/merge.py @@ -39,7 +39,7 @@ class _Merge(Layer): def __init__(self, **kwargs): """Intializes a Merge layer. - Arguments: + Args: **kwargs: standard layer keyword arguments. """ super(_Merge, self).__init__(**kwargs) @@ -51,7 +51,7 @@ class _Merge(Layer): def _compute_elemwise_op_output_shape(self, shape1, shape2): """Computes the shape of the resultant of an elementwise operation. - Arguments: + Args: shape1: tuple or None. Shape of the first tensor shape2: tuple or None. Shape of the second tensor @@ -477,7 +477,7 @@ class Concatenate(_Merge): [15, 16, 17, 18, 19], [25, 26, 27, 28, 29]]])> - Arguments: + Args: axis: Axis along which to concatenate. **kwargs: standard layer keyword arguments. """ @@ -628,7 +628,7 @@ class Dot(_Merge): array([[[260, 360], [320, 445]]])> - Arguments: + Args: axes: Integer or tuple of integers, axis or axes along which to take the dot product. If a tuple, should be two integers corresponding to the desired axis from the first input @@ -741,7 +741,7 @@ class Dot(_Merge): def add(inputs, **kwargs): """Functional interface to the `tf.keras.layers.Add` layer. - Arguments: + Args: inputs: A list of input tensors (at least 2) with the same shape. **kwargs: Standard layer keyword arguments. @@ -775,7 +775,7 @@ def add(inputs, **kwargs): def subtract(inputs, **kwargs): """Functional interface to the `Subtract` layer. - Arguments: + Args: inputs: A list of input tensors (exactly 2). **kwargs: Standard layer keyword arguments. @@ -804,7 +804,7 @@ def subtract(inputs, **kwargs): def multiply(inputs, **kwargs): """Functional interface to the `Multiply` layer. - Arguments: + Args: inputs: A list of input tensors (at least 2). **kwargs: Standard layer keyword arguments. @@ -836,7 +836,7 @@ def average(inputs, **kwargs): >>> out = tf.keras.layers.Dense(4)(avg) >>> model = tf.keras.models.Model(inputs=[input1, input2], outputs=out) - Arguments: + Args: inputs: A list of input tensors (at least 2). **kwargs: Standard layer keyword arguments. @@ -868,7 +868,7 @@ def maximum(inputs, **kwargs): model = tf.keras.models.Model(inputs=[input1, input2], outputs=out) ``` - Arguments: + Args: inputs: A list of input tensors (at least 2) of same shape. **kwargs: Standard layer keyword arguments. @@ -886,7 +886,7 @@ def maximum(inputs, **kwargs): def minimum(inputs, **kwargs): """Functional interface to the `Minimum` layer. - Arguments: + Args: inputs: A list of input tensors (at least 2). **kwargs: Standard layer keyword arguments. @@ -920,7 +920,7 @@ def concatenate(inputs, axis=-1, **kwargs): [15, 16, 17, 18, 19], [25, 26, 27, 28, 29]]])> - Arguments: + Args: inputs: A list of input tensors (at least 2). axis: Concatenation axis. **kwargs: Standard layer keyword arguments. @@ -935,7 +935,7 @@ def concatenate(inputs, axis=-1, **kwargs): def dot(inputs, axes, normalize=False, **kwargs): """Functional interface to the `Dot` layer. - Arguments: + Args: inputs: A list of input tensors (at least 2). axes: Integer or tuple of integers, axis or axes along which to take the dot product. diff --git a/tensorflow/python/keras/layers/multi_head_attention.py b/tensorflow/python/keras/layers/multi_head_attention.py index bda0056fe7e..164a5f0b9a7 100644 --- a/tensorflow/python/keras/layers/multi_head_attention.py +++ b/tensorflow/python/keras/layers/multi_head_attention.py @@ -168,7 +168,7 @@ class MultiHeadAttention(Layer): >>> print(output_tensor.shape) (None, 5, 3, 4, 16) - Arguments: + Args: num_heads: Number of attention heads. key_dim: Size of each attention head for query and key. value_dim: Size of each attention head for value. diff --git a/tensorflow/python/keras/layers/noise.py b/tensorflow/python/keras/layers/noise.py index 9623be84f56..880d4c9cdef 100644 --- a/tensorflow/python/keras/layers/noise.py +++ b/tensorflow/python/keras/layers/noise.py @@ -39,7 +39,7 @@ class GaussianNoise(Layer): As it is a regularization layer, it is only active at training time. - Arguments: + Args: stddev: Float, standard deviation of the noise distribution. Call arguments: @@ -88,7 +88,7 @@ class GaussianDropout(Layer): As it is a regularization layer, it is only active at training time. - Arguments: + Args: rate: Float, drop probability (as with `Dropout`). The multiplicative noise will have standard deviation `sqrt(rate / (1 - rate))`. @@ -146,7 +146,7 @@ class AlphaDropout(Layer): Alpha Dropout fits well to Scaled Exponential Linear Units by randomly setting activations to the negative saturation value. - Arguments: + Args: rate: float, drop probability (as with `Dropout`). The multiplicative noise will have standard deviation `sqrt(rate / (1 - rate))`. diff --git a/tensorflow/python/keras/layers/normalization.py b/tensorflow/python/keras/layers/normalization.py index 1e3ff7ae674..b03ce54f78b 100644 --- a/tensorflow/python/keras/layers/normalization.py +++ b/tensorflow/python/keras/layers/normalization.py @@ -80,7 +80,7 @@ class BatchNormalizationBase(Layer): *after having been trained on data that has similar statistics as the inference data*. - Arguments: + Args: axis: Integer or a list of integers, the axis that should be normalized (typically the features axis). For instance, after a `Conv2D` layer with `data_format="channels_first"`, set `axis=1` in `BatchNormalization`. @@ -1063,7 +1063,7 @@ class LayerNormalization(Layer): So, this Layer Normalization implementation will not match a Group Normalization layer with group size set to 1. - Arguments: + Args: axis: Integer or List/Tuple. The axis or axes to normalize across. Typically this is the features axis/axes. The left-out axes are typically the batch axis/axes. This argument defaults to `-1`, the last dimension in the diff --git a/tensorflow/python/keras/layers/normalization_v2.py b/tensorflow/python/keras/layers/normalization_v2.py index 59aab30f427..9b6d3b6f0f6 100644 --- a/tensorflow/python/keras/layers/normalization_v2.py +++ b/tensorflow/python/keras/layers/normalization_v2.py @@ -55,7 +55,7 @@ class SyncBatchNormalization(normalization.BatchNormalizationBase): model.add(tf.keras.layers.experimental.SyncBatchNormalization()) ``` - Arguments: + Args: axis: Integer, the axis that should be normalized (typically the features axis). For instance, after a `Conv2D` layer with @@ -228,7 +228,7 @@ class BatchNormalization(normalization.BatchNormalizationBase): *after having been trained on data that has similar statistics as the inference data*. - Arguments: + Args: axis: Integer, the axis that should be normalized (typically the features axis). For instance, after a `Conv2D` layer with `data_format="channels_first"`, set `axis=1` in `BatchNormalization`. diff --git a/tensorflow/python/keras/layers/ops/core.py b/tensorflow/python/keras/layers/ops/core.py index d53fb4d2aab..df9fbcd45e2 100644 --- a/tensorflow/python/keras/layers/ops/core.py +++ b/tensorflow/python/keras/layers/ops/core.py @@ -30,7 +30,7 @@ from tensorflow.python.ops import standard_ops def dense(inputs, kernel, bias=None, activation=None, dtype=None): """Densely connected NN layer op. - Arguments: + Args: inputs: `tf.Tensor` or `tf.SparseTensor`. Inputs to operation. kernel: `tf.Variable`. Matrix kernel. bias: (Optional) `tf.Variable`. Bias to add to outputs. diff --git a/tensorflow/python/keras/layers/pooling.py b/tensorflow/python/keras/layers/pooling.py index 6eb3e8e97ac..dcf6dd8d84a 100644 --- a/tensorflow/python/keras/layers/pooling.py +++ b/tensorflow/python/keras/layers/pooling.py @@ -36,7 +36,7 @@ class Pooling1D(Layer): This class only exists for code reuse. It will never be an exposed API. - Arguments: + Args: pool_function: The pooling function to apply, e.g. `tf.nn.max_pool2d`. pool_size: An integer or tuple/list of a single integer, representing the size of the pooling window. @@ -158,7 +158,7 @@ class MaxPooling1D(Pooling1D): [5.], [5.]]], dtype=float32)> - Arguments: + Args: pool_size: Integer, size of the max pooling window. strides: Integer, or None. Specifies how much the pooling window moves for each pooling step. @@ -204,7 +204,7 @@ class MaxPooling1D(Pooling1D): class AveragePooling1D(Pooling1D): """Average pooling for temporal data. - Arguments: + Args: pool_size: Integer, size of the average pooling windows. strides: Integer, or None. Factor by which to downscale. E.g. 2 will halve the input. @@ -250,7 +250,7 @@ class Pooling2D(Layer): This class only exists for code reuse. It will never be an exposed API. - Arguments: + Args: pool_function: The pooling function to apply, e.g. `tf.nn.max_pool2d`. pool_size: An integer or tuple/list of 2 integers: (pool_height, pool_width) specifying the size of the pooling window. @@ -414,7 +414,7 @@ class MaxPooling2D(Pooling2D): [9.], [9.]]]], dtype=float32)> - Arguments: + Args: pool_size: integer or tuple of 2 integers, window size over which to take the maximum. `(2, 2)` will take the max value over a 2x2 pooling window. @@ -471,7 +471,7 @@ class MaxPooling2D(Pooling2D): class AveragePooling2D(Pooling2D): """Average pooling operation for spatial data. - Arguments: + Args: pool_size: integer or tuple of 2 integers, factors by which to downscale (vertical, horizontal). `(2, 2)` will halve the input in both spatial dimension. @@ -525,7 +525,7 @@ class Pooling3D(Layer): This class only exists for code reuse. It will never be an exposed API. - Arguments: + Args: pool_function: The pooling function to apply, e.g. `tf.nn.max_pool2d`. pool_size: An integer or tuple/list of 3 integers: (pool_depth, pool_height, pool_width) @@ -620,7 +620,7 @@ class Pooling3D(Layer): class MaxPooling3D(Pooling3D): """Max pooling operation for 3D data (spatial or spatio-temporal). - Arguments: + Args: pool_size: Tuple of 3 integers, factors by which to downscale (dim1, dim2, dim3). `(2, 2, 2)` will halve the size of the 3D input in each dimension. @@ -673,7 +673,7 @@ class MaxPooling3D(Pooling3D): class AveragePooling3D(Pooling3D): """Average pooling operation for 3D data (spatial or spatio-temporal). - Arguments: + Args: pool_size: tuple of 3 integers, factors by which to downscale (dim1, dim2, dim3). `(2, 2, 2)` will halve the size of the 3D input in each dimension. @@ -759,7 +759,7 @@ class GlobalAveragePooling1D(GlobalPooling1D): >>> print(y.shape) (2, 4) - Arguments: + Args: data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. @@ -829,7 +829,7 @@ class GlobalMaxPooling1D(GlobalPooling1D): [6.], [9.], dtype=float32)> - Arguments: + Args: data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. @@ -893,7 +893,7 @@ class GlobalAveragePooling2D(GlobalPooling2D): >>> print(y.shape) (2, 3) - Arguments: + Args: data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. @@ -934,7 +934,7 @@ class GlobalMaxPooling2D(GlobalPooling2D): >>> print(y.shape) (2, 3) - Arguments: + Args: data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. @@ -992,7 +992,7 @@ class GlobalPooling3D(Layer): class GlobalAveragePooling3D(GlobalPooling3D): """Global Average pooling operation for 3D data. - Arguments: + Args: data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. @@ -1027,7 +1027,7 @@ class GlobalAveragePooling3D(GlobalPooling3D): class GlobalMaxPooling3D(GlobalPooling3D): """Global Max pooling operation for 3D data. - Arguments: + Args: data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. diff --git a/tensorflow/python/keras/layers/preprocessing/category_crossing.py b/tensorflow/python/keras/layers/preprocessing/category_crossing.py index b8fc6dd2b53..7cc6db3c16b 100644 --- a/tensorflow/python/keras/layers/preprocessing/category_crossing.py +++ b/tensorflow/python/keras/layers/preprocessing/category_crossing.py @@ -63,7 +63,7 @@ class CategoryCrossing(base_preprocessing_layer.PreprocessingLayer): [b'b-e'], [b'c-f']], dtype=object)> - Arguments: + Args: depth: depth of input crossing. By default None, all inputs are crossed into one output. It can also be an int or tuple/list of ints. Passing an integer will create combinations of crossed outputs with depth up to that diff --git a/tensorflow/python/keras/layers/preprocessing/category_encoding.py b/tensorflow/python/keras/layers/preprocessing/category_encoding.py index 5230faa379e..9085ccdc779 100644 --- a/tensorflow/python/keras/layers/preprocessing/category_encoding.py +++ b/tensorflow/python/keras/layers/preprocessing/category_encoding.py @@ -100,7 +100,7 @@ class CategoryEncoding(base_preprocessing_layer.CombinerPreprocessingLayer): [0. , 0.2, 0.3, 0. ], [0. , 0.2, 0. , 0.4]])> - Arguments: + Args: max_tokens: The maximum size of the vocabulary for this layer. If None, there is no cap on the size of the vocabulary. output_mode: Specification for the output of the layer. @@ -193,7 +193,7 @@ class CategoryEncoding(base_preprocessing_layer.CombinerPreprocessingLayer): Overrides the default adapt method to apply relevant preprocessing to the inputs before passing to the combiner. - Arguments: + Args: data: The data to train on. It can be passed either as a tf.data Dataset, or as a numpy array. reset_state: Optional argument specifying whether to clear the state of diff --git a/tensorflow/python/keras/layers/preprocessing/discretization.py b/tensorflow/python/keras/layers/preprocessing/discretization.py index 181325b9e9c..22bd7e3ff4e 100644 --- a/tensorflow/python/keras/layers/preprocessing/discretization.py +++ b/tensorflow/python/keras/layers/preprocessing/discretization.py @@ -55,7 +55,7 @@ def summarize(values, epsilon): If the target num_bins is larger than the size of values, the whole array is returned (with weights of 1). - Arguments: + Args: values: 1-D `np.ndarray` to be summarized. epsilon: A `'float32'` that determines the approxmiate desired precision. @@ -87,7 +87,7 @@ def compress(summary, epsilon): Taking the difference of the cumulative weights from the previous bin's cumulative weight will give the new weight for that bin. - Arguments: + Args: summary: 2-D `np.ndarray` summary to be compressed. epsilon: A `'float32'` that determines the approxmiate desired precision. @@ -115,7 +115,7 @@ def merge_summaries(prev_summary, next_summary, epsilon): Given two summaries of distinct data, this function merges (and compresses) them to stay within `epsilon` error tolerance. - Arguments: + Args: prev_summary: 2-D `np.ndarray` summary to be merged with `next_summary`. next_summary: 2-D `np.ndarray` summary to be merged with `prev_summary`. epsilon: A `'float32'` that determines the approxmiate desired precision. diff --git a/tensorflow/python/keras/layers/preprocessing/hashing.py b/tensorflow/python/keras/layers/preprocessing/hashing.py index f453eeff339..573c860c0c8 100644 --- a/tensorflow/python/keras/layers/preprocessing/hashing.py +++ b/tensorflow/python/keras/layers/preprocessing/hashing.py @@ -113,7 +113,7 @@ class Hashing(base_preprocessing_layer.PreprocessingLayer): Reference: [SipHash with salt](https://www.131002.net/siphash/siphash.pdf) - Arguments: + Args: num_bins: Number of hash bins. salt: A single unsigned integer or None. If passed, the hash function used will be SipHash64, with these values diff --git a/tensorflow/python/keras/layers/preprocessing/image_preprocessing.py b/tensorflow/python/keras/layers/preprocessing/image_preprocessing.py index f9d6ca74c3e..b7ef4df7475 100644 --- a/tensorflow/python/keras/layers/preprocessing/image_preprocessing.py +++ b/tensorflow/python/keras/layers/preprocessing/image_preprocessing.py @@ -76,7 +76,7 @@ class Resizing(PreprocessingLayer): Resize the batched image input to target height and width. The input should be a 4-D tensor in the format of NHWC. - Arguments: + Args: height: Integer, the height of the output shape. width: Integer, the width of the output shape. interpolation: String, the interpolation method. Defaults to `bilinear`. @@ -136,7 +136,7 @@ class CenterCrop(PreprocessingLayer): If the input height/width is even and the target height/width is odd (or inversely), the input image is left-padded by 1 pixel. - Arguments: + Args: height: Integer, the height of the output shape. width: Integer, the width of the output shape. name: A string, the name of the layer. @@ -208,7 +208,7 @@ class RandomCrop(PreprocessingLayer): 4D tensor with shape: `(samples, target_height, target_width, channels)`. - Arguments: + Args: height: Integer, the height of the output shape. width: Integer, the width of the output shape. seed: Integer. Used to create a random seed. @@ -317,7 +317,7 @@ class Rescaling(PreprocessingLayer): Output shape: Same as input. - Arguments: + Args: scale: Float, the scale to apply to the inputs. offset: Float, the offset to apply to the inputs. name: A string, the name of the layer. @@ -437,7 +437,7 @@ class RandomFlip(PreprocessingLayer): class RandomTranslation(PreprocessingLayer): """Randomly translate each image during training. - Arguments: + Args: height_factor: a float represented as fraction of value, or a tuple of size 2 representing lower and upper bound for shifting vertically. A negative value means shifting image up, while a positive value @@ -889,7 +889,7 @@ class RandomRotation(PreprocessingLayer): class RandomZoom(PreprocessingLayer): """Randomly zoom each image during training. - Arguments: + Args: height_factor: a float represented as fraction of value, or a tuple of size 2 representing lower and upper bound for zooming vertically. When represented as a single float, this value is used for both the @@ -1166,7 +1166,7 @@ class RandomHeight(PreprocessingLayer): By default, this layer is inactive during inference. - Arguments: + Args: factor: A positive float (fraction of original height), or a tuple of size 2 representing lower and upper bound for resizing vertically. When represented as a single float, this value is used for both the upper and @@ -1265,7 +1265,7 @@ class RandomWidth(PreprocessingLayer): By default, this layer is inactive during inference. - Arguments: + Args: factor: A positive float (fraction of original height), or a tuple of size 2 representing lower and upper bound for resizing vertically. When represented as a single float, this value is used for both the upper and diff --git a/tensorflow/python/keras/layers/preprocessing/index_lookup.py b/tensorflow/python/keras/layers/preprocessing/index_lookup.py index db358a1400e..844c6ca0e50 100644 --- a/tensorflow/python/keras/layers/preprocessing/index_lookup.py +++ b/tensorflow/python/keras/layers/preprocessing/index_lookup.py @@ -64,7 +64,7 @@ class IndexLookup(base_preprocessing_layer.CombinerPreprocessingLayer): vocabulary size, the most frequent terms will be used to create the vocabulary. - Arguments: + Args: max_tokens: The maximum size of the vocabulary for this layer. If None, there is no cap on the size of the vocabulary. Note that this vocabulary includes the OOV and mask tokens, so the effective number of tokens is @@ -213,7 +213,7 @@ class IndexLookup(base_preprocessing_layer.CombinerPreprocessingLayer): Overrides the default adapt method to apply relevant preprocessing to the inputs before passing to the combiner. - Arguments: + Args: data: The data to train on. It can be passed either as a tf.data Dataset, or as a numpy array. reset_state: Optional argument specifying whether to clear the state of @@ -393,7 +393,7 @@ class IndexLookup(base_preprocessing_layer.CombinerPreprocessingLayer): information is already known. If vocabulary data is already present in the layer, this method will either replace it - Arguments: + Args: vocab: An array of hashable tokens. Raises: diff --git a/tensorflow/python/keras/layers/preprocessing/integer_lookup.py b/tensorflow/python/keras/layers/preprocessing/integer_lookup.py index cef9bc95ba8..06c426504c1 100644 --- a/tensorflow/python/keras/layers/preprocessing/integer_lookup.py +++ b/tensorflow/python/keras/layers/preprocessing/integer_lookup.py @@ -40,7 +40,7 @@ class IntegerLookup(index_lookup.IndexLookup): vocabulary size, the most frequent values will be used to create the vocabulary (and the values that don't make the cut will be treated as OOV). - Arguments: + Args: max_values: The maximum size of the vocabulary for this layer. If None, there is no cap on the size of the vocabulary. Note that this vocabulary includes the OOV and mask values, so the effective number of values is diff --git a/tensorflow/python/keras/layers/preprocessing/normalization.py b/tensorflow/python/keras/layers/preprocessing/normalization.py index fcbee80e2db..0bdd0d4999c 100644 --- a/tensorflow/python/keras/layers/preprocessing/normalization.py +++ b/tensorflow/python/keras/layers/preprocessing/normalization.py @@ -60,7 +60,7 @@ class Normalization(base_preprocessing_layer.CombinerPreprocessingLayer): as the layer's weights. `adapt` should be called before `fit`, `evaluate`, or `predict`. - Arguments: + Args: axis: Integer or tuple of integers, the axis or axes that should be "kept". These axes are not be summed over when calculating the normalization statistics. By default the last axis, the `features` axis diff --git a/tensorflow/python/keras/layers/preprocessing/preprocessing_stage.py b/tensorflow/python/keras/layers/preprocessing/preprocessing_stage.py index 6bcae297d51..4e297e7c029 100644 --- a/tensorflow/python/keras/layers/preprocessing/preprocessing_stage.py +++ b/tensorflow/python/keras/layers/preprocessing/preprocessing_stage.py @@ -37,7 +37,7 @@ class PreprocessingStage(base_preprocessing_layer.PreprocessingLayer, Sequential-like object that enables you to `adapt()` the whole list via a single `adapt()` call on the preprocessing stage. - Arguments: + Args: layers: List of layers. Can include layers that aren't preprocessing layers. name: String. Optional name for the preprocessing stage object. """ @@ -45,7 +45,7 @@ class PreprocessingStage(base_preprocessing_layer.PreprocessingLayer, def adapt(self, data, reset_state=True): """Adapt the state of the layers of the preprocessing stage to the data. - Arguments: + Args: data: A batched Dataset object, or a NumPy array, or an EagerTensor. Data to be iterated over to adapt the state of the layers in this preprocessing stage. @@ -125,7 +125,7 @@ class FunctionalPreprocessingStage(base_preprocessing_layer.PreprocessingLayer, >>> outputs = [inputs['x1'], [y, z]] >>> stage = FunctionalPreprocessingStage(inputs, outputs) - Arguments: + Args: inputs: An input tensor (must be created via `tf.keras.Input()`), or a list, a dict, or a nested strcture of input tensors. outputs: An output tensor, or a list, a dict or a nested structure of output @@ -142,7 +142,7 @@ class FunctionalPreprocessingStage(base_preprocessing_layer.PreprocessingLayer, def adapt(self, data, reset_state=True): """Adapt the state of the layers of the preprocessing stage to the data. - Arguments: + Args: data: A batched Dataset object, a NumPy array, an EagerTensor, or a list, dict or nested structure of Numpy Arrays or EagerTensors. The elements of Dataset object need to conform with inputs of the stage. The first @@ -242,7 +242,7 @@ class FunctionalPreprocessingStage(base_preprocessing_layer.PreprocessingLayer, def _unzip_dataset(ds): """Unzip dataset into a list of single element datasets. - Arguments: + Args: ds: A Dataset object. Returns: diff --git a/tensorflow/python/keras/layers/preprocessing/reduction.py b/tensorflow/python/keras/layers/preprocessing/reduction.py index 5b1e83bf27e..177167fb056 100644 --- a/tensorflow/python/keras/layers/preprocessing/reduction.py +++ b/tensorflow/python/keras/layers/preprocessing/reduction.py @@ -48,7 +48,7 @@ class Reduction(Layer): This layer performs a reduction across one axis of its input data. This data may optionally be weighted by passing in an identical float tensor. - Arguments: + Args: reduction: The type of reduction to perform. Can be one of the following: "max", "mean", "min", "prod", or "sum". This layer uses the Tensorflow reduce op which corresponds to that reduction (so, for "mean", we use diff --git a/tensorflow/python/keras/layers/preprocessing/string_lookup.py b/tensorflow/python/keras/layers/preprocessing/string_lookup.py index 5d6d2bcee32..4c4bbd531bb 100644 --- a/tensorflow/python/keras/layers/preprocessing/string_lookup.py +++ b/tensorflow/python/keras/layers/preprocessing/string_lookup.py @@ -40,7 +40,7 @@ class StringLookup(index_lookup.IndexLookup): vocabulary size, the most frequent terms will be used to create the vocabulary (and the terms that don't make the cut will be treated as OOV). - Arguments: + Args: max_tokens: The maximum size of the vocabulary for this layer. If None, there is no cap on the size of the vocabulary. Note that this vocabulary includes the OOV and mask tokens, so the effective number of tokens is diff --git a/tensorflow/python/keras/layers/preprocessing/text_vectorization.py b/tensorflow/python/keras/layers/preprocessing/text_vectorization.py index 62225fce053..e13fe8195a5 100644 --- a/tensorflow/python/keras/layers/preprocessing/text_vectorization.py +++ b/tensorflow/python/keras/layers/preprocessing/text_vectorization.py @@ -118,7 +118,7 @@ class TextVectorization(base_preprocessing_layer.CombinerPreprocessingLayer): ["another", "string", "to", "split"]]`. This makes the callable site natively compatible with `tf.strings.split()`. - Arguments: + Args: max_tokens: The maximum size of the vocabulary for this layer. If None, there is no cap on the size of the vocabulary. Note that this vocabulary contains 1 OOV token, so the effective number of tokens is `(max_tokens - @@ -400,7 +400,7 @@ class TextVectorization(base_preprocessing_layer.CombinerPreprocessingLayer): Overrides the default adapt method to apply relevant preprocessing to the inputs before passing to the combiner. - Arguments: + Args: data: The data to train on. It can be passed either as a tf.data Dataset, as a NumPy array, a string tensor, or as a list of texts. reset_state: Optional argument specifying whether to clear the state of @@ -485,7 +485,7 @@ class TextVectorization(base_preprocessing_layer.CombinerPreprocessingLayer): vocabulary data is already present in the layer, this method will replace it. - Arguments: + Args: vocab: An array of string tokens, or a path to a file containing one token per line. df_data: An array of document frequency data. Only necessary if the layer diff --git a/tensorflow/python/keras/layers/recurrent.py b/tensorflow/python/keras/layers/recurrent.py index 88a6d0060d8..cc20fe825f8 100644 --- a/tensorflow/python/keras/layers/recurrent.py +++ b/tensorflow/python/keras/layers/recurrent.py @@ -62,7 +62,7 @@ class StackedRNNCells(Layer): Used to implement efficient stacked RNNs. - Arguments: + Args: cells: List of RNN cell instances. Examples: @@ -205,7 +205,7 @@ class RNN(Layer): See [the Keras RNN API guide](https://www.tensorflow.org/guide/keras/rnn) for details about the usage of RNN API. - Arguments: + Args: cell: A RNN cell instance or a list of RNN cell instances. A RNN cell is a class that has: - A `call(input_at_t, states_at_t)` method, returning @@ -1240,7 +1240,7 @@ class SimpleRNNCell(DropoutRNNCellMixin, Layer): This class processes one step within the whole time sequence input, whereas `tf.keras.layer.SimpleRNN` processes the whole sequence. - Arguments: + Args: units: Positive integer, dimensionality of the output space. activation: Activation function to use. Default: hyperbolic tangent (`tanh`). @@ -1439,7 +1439,7 @@ class SimpleRNN(RNN): See [the Keras RNN API guide](https://www.tensorflow.org/guide/keras/rnn) for details about the usage of RNN API. - Arguments: + Args: units: Positive integer, dimensionality of the output space. activation: Activation function to use. Default: hyperbolic tangent (`tanh`). @@ -1690,7 +1690,7 @@ class SimpleRNN(RNN): class GRUCell(DropoutRNNCellMixin, Layer): """Cell class for the GRU layer. - Arguments: + Args: units: Positive integer, dimensionality of the output space. activation: Activation function to use. Default: hyperbolic tangent (`tanh`). @@ -1974,7 +1974,7 @@ class GRU(RNN): `recurrent_kernel`. Use `'reset_after'=True` and `recurrent_activation='sigmoid'`. - Arguments: + Args: units: Positive integer, dimensionality of the output space. activation: Activation function to use. Default: hyperbolic tangent (`tanh`). @@ -2244,7 +2244,7 @@ class GRU(RNN): class LSTMCell(DropoutRNNCellMixin, Layer): """Cell class for the LSTM layer. - Arguments: + Args: units: Positive integer, dimensionality of the output space. activation: Activation function to use. Default: hyperbolic tangent (`tanh`). @@ -2647,7 +2647,7 @@ class LSTM(RNN): Note that this cell is not optimized for performance on GPU. Please use `tf.compat.v1.keras.layers.CuDNNLSTM` for better performance on GPU. - Arguments: + Args: units: Positive integer, dimensionality of the output space. activation: Activation function to use. Default: hyperbolic tangent (`tanh`). @@ -2937,7 +2937,7 @@ def _standardize_args(inputs, initial_state, constants, num_constants): makes sure the arguments are separated and that `initial_state` and `constants` are lists of tensors (or None). - Arguments: + Args: inputs: Tensor or list/tuple of tensors. which may include constants and initial states. In that case `num_constant` must be specified. initial_state: Tensor or list of tensors or None, initial states. diff --git a/tensorflow/python/keras/layers/recurrent_v2.py b/tensorflow/python/keras/layers/recurrent_v2.py index 7c112e0f726..76aae3bb926 100644 --- a/tensorflow/python/keras/layers/recurrent_v2.py +++ b/tensorflow/python/keras/layers/recurrent_v2.py @@ -133,7 +133,7 @@ class GRUCell(recurrent.GRUCell): >>> print(final_state.shape) (32, 4) - Arguments: + Args: units: Positive integer, dimensionality of the output space. activation: Activation function to use. Default: hyperbolic tangent (`tanh`). If you pass None, no activation is applied @@ -266,7 +266,7 @@ class GRU(recurrent.DropoutRNNCellMixin, recurrent.GRU): >>> print(final_state.shape) (32, 4) - Arguments: + Args: units: Positive integer, dimensionality of the output space. activation: Activation function to use. Default: hyperbolic tangent (`tanh`). @@ -566,7 +566,7 @@ def standard_gru(inputs, init_h, kernel, recurrent_kernel, bias, mask, counterpart. The RNN step logic has been simplified, eg dropout and mask is removed since CuDNN implementation does not support that. - Arguments: + Args: inputs: Input tensor of GRU layer. init_h: Initial state tensor for the cell output. kernel: Weights for cell kernel. @@ -882,7 +882,7 @@ class LSTMCell(recurrent.LSTMCell): >>> print(final_carry_state.shape) (32, 4) - Arguments: + Args: units: Positive integer, dimensionality of the output space. activation: Activation function to use. Default: hyperbolic tangent (`tanh`). If you pass `None`, no activation is applied (ie. "linear" @@ -1008,7 +1008,7 @@ class LSTM(recurrent.DropoutRNNCellMixin, recurrent.LSTM): >>> print(final_carry_state.shape) (32, 4) - Arguments: + Args: units: Positive integer, dimensionality of the output space. activation: Activation function to use. Default: hyperbolic tangent (`tanh`). If you pass `None`, no activation diff --git a/tensorflow/python/keras/layers/serialization.py b/tensorflow/python/keras/layers/serialization.py index e47d9f59e08..b748b3b3b1d 100644 --- a/tensorflow/python/keras/layers/serialization.py +++ b/tensorflow/python/keras/layers/serialization.py @@ -161,7 +161,7 @@ def serialize(layer): def deserialize(config, custom_objects=None): """Instantiates a layer from a config dictionary. - Arguments: + Args: config: dict of the form {'class_name': str, 'config': dict} custom_objects: dict mapping class names (or function names) of custom (non-Keras) objects to class/functions diff --git a/tensorflow/python/keras/layers/wrappers.py b/tensorflow/python/keras/layers/wrappers.py index 2c19cecec0c..aa0b5c7a544 100644 --- a/tensorflow/python/keras/layers/wrappers.py +++ b/tensorflow/python/keras/layers/wrappers.py @@ -45,7 +45,7 @@ class Wrapper(Layer): Do not use this class as a layer, it is only an abstract base class. Two usable wrappers are the `TimeDistributed` and `Bidirectional` wrappers. - Arguments: + Args: layer: The layer to be wrapped. """ @@ -105,7 +105,7 @@ class TimeDistributed(Wrapper): Because `TimeDistributed` applies the same instance of `Conv2D` to each of the timestamps, the same set of weights are used at each timestamp. - Arguments: + Args: layer: a `tf.keras.layers.Layer` instance. Call arguments: @@ -142,7 +142,7 @@ class TimeDistributed(Wrapper): The static shapes are replaced with the corresponding dynamic shapes of the tensor. - Arguments: + Args: init_tuple: a tuple, the first part of the output shape tensor: the tensor from which to get the (static and dynamic) shapes as the last part of the output shape @@ -310,7 +310,7 @@ class TimeDistributed(Wrapper): (E.g., `mask` is not used at all) Return `None`. - Arguments: + Args: inputs: Tensor with shape [batch size, timesteps, ...] indicating the input to TimeDistributed. If static shape information is available for "batch size", `mask` is returned unmodified. @@ -384,7 +384,7 @@ class TimeDistributed(Wrapper): class Bidirectional(Wrapper): """Bidirectional wrapper for RNNs. - Arguments: + Args: layer: `keras.layers.RNN` instance, such as `keras.layers.LSTM` or `keras.layers.GRU`. It could also be a `keras.layers.Layer` instance that meets the following criteria: diff --git a/tensorflow/python/keras/legacy_tf_layers/base.py b/tensorflow/python/keras/legacy_tf_layers/base.py index 50e8ae683a3..dce676ef743 100644 --- a/tensorflow/python/keras/legacy_tf_layers/base.py +++ b/tensorflow/python/keras/legacy_tf_layers/base.py @@ -163,7 +163,7 @@ class Layer(base_layer.Layer): It is considered legacy, and we recommend the use of `tf.keras.layers.Layer` instead. - Arguments: + Args: trainable: Boolean, whether the layer's variables should be trainable. name: String name of the layer. dtype: Default dtype of the layer's weights (default of `None` means use the @@ -334,7 +334,7 @@ class Layer(base_layer.Layer): **kwargs): """Adds a new variable to the layer, or gets an existing one; returns it. - Arguments: + Args: name: variable name. shape: variable shape. dtype: The type of the variable. Defaults to `self.dtype` or `float32`. @@ -489,7 +489,7 @@ class Layer(base_layer.Layer): def __call__(self, inputs, *args, **kwargs): """Wraps `call`, applying pre- and post-processing steps. - Arguments: + Args: inputs: input tensor(s). *args: additional positional arguments to be passed to `self.call`. **kwargs: additional keyword arguments to be passed to `self.call`. diff --git a/tensorflow/python/keras/legacy_tf_layers/convolutional.py b/tensorflow/python/keras/legacy_tf_layers/convolutional.py index 4f3732510a0..759cb1f6977 100644 --- a/tensorflow/python/keras/legacy_tf_layers/convolutional.py +++ b/tensorflow/python/keras/legacy_tf_layers/convolutional.py @@ -37,7 +37,7 @@ class Conv1D(keras_layers.Conv1D, base.Layer): a bias vector is created and added to the outputs. Finally, if `activation` is not `None`, it is applied to the outputs as well. - Arguments: + Args: filters: Integer, the dimensionality of the output space (i.e. the number of filters in the convolution). kernel_size: An integer or tuple/list of a single integer, specifying the @@ -147,7 +147,7 @@ def conv1d(inputs, a bias vector is created and added to the outputs. Finally, if `activation` is not `None`, it is applied to the outputs as well. - Arguments: + Args: inputs: Tensor input. filters: Integer, the dimensionality of the output space (i.e. the number of filters in the convolution). @@ -235,7 +235,7 @@ class Conv2D(keras_layers.Conv2D, base.Layer): a bias vector is created and added to the outputs. Finally, if `activation` is not `None`, it is applied to the outputs as well. - Arguments: + Args: filters: Integer, the dimensionality of the output space (i.e. the number of filters in the convolution). kernel_size: An integer or tuple/list of 2 integers, specifying the @@ -352,7 +352,7 @@ def conv2d(inputs, a bias vector is created and added to the outputs. Finally, if `activation` is not `None`, it is applied to the outputs as well. - Arguments: + Args: inputs: Tensor input. filters: Integer, the dimensionality of the output space (i.e. the number of filters in the convolution). @@ -447,7 +447,7 @@ class Conv3D(keras_layers.Conv3D, base.Layer): a bias vector is created and added to the outputs. Finally, if `activation` is not `None`, it is applied to the outputs as well. - Arguments: + Args: filters: Integer, the dimensionality of the output space (i.e. the number of filters in the convolution). kernel_size: An integer or tuple/list of 3 integers, specifying the @@ -565,7 +565,7 @@ def conv3d(inputs, a bias vector is created and added to the outputs. Finally, if `activation` is not `None`, it is applied to the outputs as well. - Arguments: + Args: inputs: Tensor input. filters: Integer, the dimensionality of the output space (i.e. the number of filters in the convolution). @@ -661,7 +661,7 @@ class SeparableConv1D(keras_layers.SeparableConv1D, base.Layer): it adds a bias vector to the output. It then optionally applies an activation function to produce the final output. - Arguments: + Args: filters: Integer, the dimensionality of the output space (i.e. the number of filters in the convolution). kernel_size: A single integer specifying the spatial @@ -771,7 +771,7 @@ class SeparableConv2D(keras_layers.SeparableConv2D, base.Layer): it adds a bias vector to the output. It then optionally applies an activation function to produce the final output. - Arguments: + Args: filters: Integer, the dimensionality of the output space (i.e. the number of filters in the convolution). kernel_size: A tuple or list of 2 integers specifying the spatial @@ -908,7 +908,7 @@ def separable_conv1d(inputs, it adds a bias vector to the output. It then optionally applies an activation function to produce the final output. - Arguments: + Args: inputs: Input tensor. filters: Integer, the dimensionality of the output space (i.e. the number of filters in the convolution). @@ -1031,7 +1031,7 @@ def separable_conv2d(inputs, it adds a bias vector to the output. It then optionally applies an activation function to produce the final output. - Arguments: + Args: inputs: Input tensor. filters: Integer, the dimensionality of the output space (i.e. the number of filters in the convolution). @@ -1138,7 +1138,7 @@ class Conv2DTranspose(keras_layers.Conv2DTranspose, base.Layer): while maintaining a connectivity pattern that is compatible with said convolution. - Arguments: + Args: filters: Integer, the dimensionality of the output space (i.e. the number of filters in the convolution). kernel_size: A tuple or list of 2 positive integers specifying the spatial @@ -1243,7 +1243,7 @@ def conv2d_transpose(inputs, while maintaining a connectivity pattern that is compatible with said convolution. - Arguments: + Args: inputs: Input tensor. filters: Integer, the dimensionality of the output space (i.e. the number of filters in the convolution). @@ -1320,7 +1320,7 @@ def conv2d_transpose(inputs, class Conv3DTranspose(keras_layers.Conv3DTranspose, base.Layer): """Transposed 3D convolution layer (sometimes called 3D Deconvolution). - Arguments: + Args: filters: Integer, the dimensionality of the output space (i.e. the number of filters in the convolution). kernel_size: An integer or tuple/list of 3 integers, specifying the @@ -1422,7 +1422,7 @@ def conv3d_transpose(inputs, reuse=None): """Functional interface for transposed 3D convolution layer. - Arguments: + Args: inputs: Input tensor. filters: Integer, the dimensionality of the output space (i.e. the number of filters in the convolution). diff --git a/tensorflow/python/keras/legacy_tf_layers/core.py b/tensorflow/python/keras/legacy_tf_layers/core.py index b401801bd4a..a7e624120d5 100644 --- a/tensorflow/python/keras/legacy_tf_layers/core.py +++ b/tensorflow/python/keras/legacy_tf_layers/core.py @@ -40,7 +40,7 @@ class Dense(keras_layers.Dense, base.Layer): and `bias` is a bias vector created by the layer (only if `use_bias` is `True`). - Arguments: + Args: units: Integer or Long, dimensionality of the output space. activation: Activation function (callable). Set it to None to maintain a linear activation. @@ -134,7 +134,7 @@ def dense( and `bias` is a bias vector created by the layer (only if `use_bias` is `True`). - Arguments: + Args: inputs: Tensor input. units: Integer or Long, dimensionality of the output space. activation: Activation function (callable). Set it to None to maintain a @@ -197,7 +197,7 @@ class Dropout(keras_layers.Dropout, base.Layer): The units that are kept are scaled by `1 / (1 - rate)`, so that their sum is unchanged at training time and inference time. - Arguments: + Args: rate: The dropout rate, between 0 and 1. E.g. `rate=0.1` would drop out 10% of input units. noise_shape: 1D tensor of type `int32` representing the shape of the @@ -241,7 +241,7 @@ def dropout(inputs, The units that are kept are scaled by `1 / (1 - rate)`, so that their sum is unchanged at training time and inference time. - Arguments: + Args: inputs: Tensor input. rate: The dropout rate, between 0 and 1. E.g. "rate=0.1" would drop out 10% of input units. @@ -276,7 +276,7 @@ def dropout(inputs, class Flatten(keras_layers.Flatten, base.Layer): """Flattens an input tensor while preserving the batch axis (axis 0). - Arguments: + Args: data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape @@ -302,7 +302,7 @@ class Flatten(keras_layers.Flatten, base.Layer): def flatten(inputs, name=None, data_format='channels_last'): """Flattens an input tensor while preserving the batch axis (axis 0). - Arguments: + Args: inputs: Tensor input. name: The name of the layer (string). data_format: A string, one of `channels_last` (default) or `channels_first`. diff --git a/tensorflow/python/keras/legacy_tf_layers/normalization.py b/tensorflow/python/keras/legacy_tf_layers/normalization.py index 4b16ad62336..732e65643bc 100644 --- a/tensorflow/python/keras/legacy_tf_layers/normalization.py +++ b/tensorflow/python/keras/legacy_tf_layers/normalization.py @@ -43,7 +43,7 @@ class BatchNormalization(keras_normalization.BatchNormalization, base.Layer): train_op = tf.group([train_op, update_ops]) ``` - Arguments: + Args: axis: An `int` or list of `int`, the axis or axes that should be normalized, typically the features axis/axes. For instance, after a `Conv2D` layer with `data_format="channels_first"`, set `axis=1`. If a list of axes is @@ -216,7 +216,7 @@ def batch_normalization(inputs, train_op = tf.group([train_op, update_ops]) ``` - Arguments: + Args: inputs: Tensor input. axis: An `int`, the axis that should be normalized (typically the features axis). For instance, after a `Convolution2D` layer with diff --git a/tensorflow/python/keras/legacy_tf_layers/pooling.py b/tensorflow/python/keras/legacy_tf_layers/pooling.py index a989cb30e01..2d7402f7e11 100644 --- a/tensorflow/python/keras/legacy_tf_layers/pooling.py +++ b/tensorflow/python/keras/legacy_tf_layers/pooling.py @@ -30,7 +30,7 @@ from tensorflow.python.util.tf_export import tf_export class AveragePooling1D(keras_layers.AveragePooling1D, base.Layer): """Average Pooling layer for 1D inputs. - Arguments: + Args: pool_size: An integer or tuple/list of a single integer, representing the size of the pooling window. strides: An integer or tuple/list of a single integer, specifying the @@ -65,7 +65,7 @@ def average_pooling1d(inputs, pool_size, strides, name=None): """Average Pooling layer for 1D inputs. - Arguments: + Args: inputs: The tensor over which to pool. Must have rank 3. pool_size: An integer or tuple/list of a single integer, representing the size of the pooling window. @@ -101,7 +101,7 @@ def average_pooling1d(inputs, pool_size, strides, class MaxPooling1D(keras_layers.MaxPooling1D, base.Layer): """Max Pooling layer for 1D inputs. - Arguments: + Args: pool_size: An integer or tuple/list of a single integer, representing the size of the pooling window. strides: An integer or tuple/list of a single integer, specifying the @@ -136,7 +136,7 @@ def max_pooling1d(inputs, pool_size, strides, name=None): """Max Pooling layer for 1D inputs. - Arguments: + Args: inputs: The tensor over which to pool. Must have rank 3. pool_size: An integer or tuple/list of a single integer, representing the size of the pooling window. @@ -172,7 +172,7 @@ def max_pooling1d(inputs, pool_size, strides, class AveragePooling2D(keras_layers.AveragePooling2D, base.Layer): """Average pooling layer for 2D inputs (e.g. images). - Arguments: + Args: pool_size: An integer or tuple/list of 2 integers: (pool_height, pool_width) specifying the size of the pooling window. Can be a single integer to specify the same value for @@ -208,7 +208,7 @@ def average_pooling2d(inputs, name=None): """Average pooling layer for 2D inputs (e.g. images). - Arguments: + Args: inputs: The tensor over which to pool. Must have rank 4. pool_size: An integer or tuple/list of 2 integers: (pool_height, pool_width) specifying the size of the pooling window. @@ -246,7 +246,7 @@ def average_pooling2d(inputs, class MaxPooling2D(keras_layers.MaxPooling2D, base.Layer): """Max pooling layer for 2D inputs (e.g. images). - Arguments: + Args: pool_size: An integer or tuple/list of 2 integers: (pool_height, pool_width) specifying the size of the pooling window. Can be a single integer to specify the same value for @@ -282,7 +282,7 @@ def max_pooling2d(inputs, name=None): """Max pooling layer for 2D inputs (e.g. images). - Arguments: + Args: inputs: The tensor over which to pool. Must have rank 4. pool_size: An integer or tuple/list of 2 integers: (pool_height, pool_width) specifying the size of the pooling window. @@ -320,7 +320,7 @@ def max_pooling2d(inputs, class AveragePooling3D(keras_layers.AveragePooling3D, base.Layer): """Average pooling layer for 3D inputs (e.g. volumes). - Arguments: + Args: pool_size: An integer or tuple/list of 3 integers: (pool_depth, pool_height, pool_width) specifying the size of the pooling window. @@ -358,7 +358,7 @@ def average_pooling3d(inputs, name=None): """Average pooling layer for 3D inputs (e.g. volumes). - Arguments: + Args: inputs: The tensor over which to pool. Must have rank 5. pool_size: An integer or tuple/list of 3 integers: (pool_depth, pool_height, pool_width) @@ -398,7 +398,7 @@ def average_pooling3d(inputs, class MaxPooling3D(keras_layers.MaxPooling3D, base.Layer): """Max pooling layer for 3D inputs (e.g. volumes). - Arguments: + Args: pool_size: An integer or tuple/list of 3 integers: (pool_depth, pool_height, pool_width) specifying the size of the pooling window. @@ -438,7 +438,7 @@ def max_pooling3d(inputs, volumes). - Arguments: + Args: inputs: The tensor over which to pool. Must have rank 5. pool_size: An integer or tuple/list of 3 integers: (pool_depth, pool_height, pool_width) specifying the size of the pooling window. Can be a single diff --git a/tensorflow/python/keras/losses.py b/tensorflow/python/keras/losses.py index f7ea1e5daf8..58761790fdd 100644 --- a/tensorflow/python/keras/losses.py +++ b/tensorflow/python/keras/losses.py @@ -1839,7 +1839,7 @@ def is_categorical_crossentropy(loss): def serialize(loss): """Serializes loss function or `Loss` instance. - Arguments: + Args: loss: A Keras `Loss` instance or a loss function. Returns: @@ -1852,7 +1852,7 @@ def serialize(loss): def deserialize(name, custom_objects=None): """Deserializes a serialized loss class/function instance. - Arguments: + Args: name: Loss configuration. custom_objects: Optional dictionary mapping names (strings) to custom objects (classes and functions) to be considered during deserialization. @@ -1890,7 +1890,7 @@ def get(identifier): >>> type(loss) - Arguments: + Args: identifier: A loss identifier. One of None or string name of a loss function/class or loss configuration dictionary or a loss function or a loss class instance diff --git a/tensorflow/python/keras/metrics.py b/tensorflow/python/keras/metrics.py index 5b7019793e6..32eec49b6ec 100644 --- a/tensorflow/python/keras/metrics.py +++ b/tensorflow/python/keras/metrics.py @@ -3498,7 +3498,7 @@ def clone_metrics(metrics): def serialize(metric): """Serializes metric function or `Metric` instance. - Arguments: + Args: metric: A Keras `Metric` instance or a metric function. Returns: @@ -3511,7 +3511,7 @@ def serialize(metric): def deserialize(config, custom_objects=None): """Deserializes a serialized metric class/function instance. - Arguments: + Args: config: Metric configuration. custom_objects: Optional dictionary mapping names (strings) to custom objects (classes and functions) to be considered during deserialization. @@ -3549,7 +3549,7 @@ def get(identifier): >>> type(metric) - Arguments: + Args: identifier: A metric identifier. One of None or string name of a metric function/class or metric configuration dictionary or a metric function or a metric class instance diff --git a/tensorflow/python/keras/models.py b/tensorflow/python/keras/models.py index a6558f74e0b..b16e0d6fb60 100644 --- a/tensorflow/python/keras/models.py +++ b/tensorflow/python/keras/models.py @@ -139,7 +139,7 @@ def _clone_functional_model(model, input_tensors=None, layer_fn=_clone_layer): Input layers are always cloned. - Arguments: + Args: model: Instance of `Model`. input_tensors: optional list of input tensors to build the model upon. If not provided, @@ -287,7 +287,7 @@ def _clone_sequential_model(model, input_tensors=None, layer_fn=_clone_layer): except that it creates new layers (and thus new weights) instead of sharing the weights of the existing layers. - Arguments: + Args: model: Instance of `Sequential`. input_tensors: optional list of input tensors to build the model upon. If not provided, @@ -393,7 +393,7 @@ def clone_model(model, input_tensors=None, clone_function=None): except that it creates new layers (and thus new weights) instead of sharing the weights of the existing layers. - Arguments: + Args: model: Instance of `Model` (could be a functional model or a Sequential model). input_tensors: optional list of input tensors or InputLayer objects diff --git a/tensorflow/python/keras/optimizer_v1.py b/tensorflow/python/keras/optimizer_v1.py index 24cb0aaecff..866185c15df 100644 --- a/tensorflow/python/keras/optimizer_v1.py +++ b/tensorflow/python/keras/optimizer_v1.py @@ -85,7 +85,7 @@ class Optimizer(object): def get_gradients(self, loss, params): """Returns gradients of `loss` with respect to `params`. - Arguments: + Args: loss: Loss tensor. params: List of variables. @@ -118,7 +118,7 @@ class Optimizer(object): Should only be called after computing the gradients (otherwise the optimizer has no weights). - Arguments: + Args: weights: a list of Numpy arrays. The number of arrays and their shape must match number of the dimensions of the weights of the optimizer (i.e. it should match the output of `get_weights`). @@ -169,7 +169,7 @@ class SGD(Optimizer): Includes support for momentum, learning rate decay, and Nesterov momentum. - Arguments: + Args: lr: float >= 0. Learning rate. momentum: float >= 0. Parameter that accelerates SGD in the relevant direction and dampens oscillations. @@ -239,7 +239,7 @@ class RMSprop(Optimizer): at their default values (except the learning rate, which can be freely tuned). - Arguments: + Args: lr: float >= 0. Learning rate. rho: float >= 0. epsilon: float >= 0. Fuzz factor. If `None`, defaults to `K.epsilon()`. @@ -466,7 +466,7 @@ class Adam(Optimizer): Default parameters follow those provided in the original paper. - Arguments: + Args: lr: float >= 0. Learning rate. beta_1: float, 0 < beta < 1. Generally close to 1. beta_2: float, 0 < beta < 1. Generally close to 1. @@ -565,7 +565,7 @@ class Adamax(Optimizer): It is a variant of Adam based on the infinity norm. Default parameters follow those provided in the paper. - Arguments: + Args: lr: float >= 0. Learning rate. beta_1/beta_2: floats, 0 < beta < 1. Generally close to 1. epsilon: float >= 0. Fuzz factor. If `None`, defaults to `K.epsilon()`. @@ -657,7 +657,7 @@ class Nadam(Optimizer): It is recommended to leave the parameters of this optimizer at their default values. - Arguments: + Args: lr: float >= 0. Learning rate. beta_1/beta_2: floats, 0 < beta < 1. Generally close to 1. epsilon: float >= 0. Fuzz factor. If `None`, defaults to `K.epsilon()`. diff --git a/tensorflow/python/keras/optimizer_v2/adagrad.py b/tensorflow/python/keras/optimizer_v2/adagrad.py index f18b02bf7cb..708def2971e 100644 --- a/tensorflow/python/keras/optimizer_v2/adagrad.py +++ b/tensorflow/python/keras/optimizer_v2/adagrad.py @@ -109,7 +109,7 @@ class Adagrad(optimizer_v2.OptimizerV2): capable of instantiating the same optimizer from the config dictionary. - Arguments: + Args: config: A Python dictionary, typically the output of get_config. custom_objects: A Python dictionary mapping names to additional Python objects used to create this optimizer, such as a function used for a diff --git a/tensorflow/python/keras/optimizer_v2/optimizer_v2.py b/tensorflow/python/keras/optimizer_v2/optimizer_v2.py index 06af303ef37..7df0fe47d64 100644 --- a/tensorflow/python/keras/optimizer_v2/optimizer_v2.py +++ b/tensorflow/python/keras/optimizer_v2/optimizer_v2.py @@ -738,7 +738,7 @@ class OptimizerV2(trackable.Trackable): Should be used only in legacy v1 graph mode. - Arguments: + Args: loss: Loss tensor. params: List of variables. @@ -1037,7 +1037,7 @@ class OptimizerV2(trackable.Trackable): capable of instantiating the same optimizer from the config dictionary. - Arguments: + Args: config: A Python dictionary, typically the output of get_config. custom_objects: A Python dictionary mapping names to additional Python objects used to create this optimizer, such as a function used for a @@ -1131,7 +1131,7 @@ class OptimizerV2(trackable.Trackable): >>> opt.iterations - Arguments: + Args: weights: weight values as a list of numpy arrays. """ params = self.weights diff --git a/tensorflow/python/keras/optimizers.py b/tensorflow/python/keras/optimizers.py index ab779b161de..4a840820086 100644 --- a/tensorflow/python/keras/optimizers.py +++ b/tensorflow/python/keras/optimizers.py @@ -50,7 +50,7 @@ def serialize(optimizer): def deserialize(config, custom_objects=None): """Inverse of the `serialize` function. - Arguments: + Args: config: Optimizer configuration dictionary. custom_objects: Optional dictionary mapping names (strings) to custom objects (classes and functions) to be considered during deserialization. @@ -91,7 +91,7 @@ def deserialize(config, custom_objects=None): def get(identifier): """Retrieves a Keras Optimizer instance. - Arguments: + Args: identifier: Optimizer identifier, one of - String: name of an optimizer - Dictionary: configuration dictionary. - Keras Optimizer instance (it diff --git a/tensorflow/python/keras/preprocessing/dataset_utils.py b/tensorflow/python/keras/preprocessing/dataset_utils.py index 5000f5f798a..dc51bf92053 100644 --- a/tensorflow/python/keras/preprocessing/dataset_utils.py +++ b/tensorflow/python/keras/preprocessing/dataset_utils.py @@ -131,7 +131,7 @@ def iter_valid_files(directory, follow_links, formats): def index_subdirectory(directory, class_indices, follow_links, formats): """Recursively walks directory and list image paths and their class index. - Arguments: + Args: directory: string, target directory. class_indices: dict mapping class names to their index. follow_links: boolean, whether to recursively follow subdirectories diff --git a/tensorflow/python/keras/preprocessing/image.py b/tensorflow/python/keras/preprocessing/image.py index 4052f583631..feff5d783bc 100644 --- a/tensorflow/python/keras/preprocessing/image.py +++ b/tensorflow/python/keras/preprocessing/image.py @@ -95,7 +95,7 @@ def smart_resize(x, size, interpolation='bilinear'): 2. Resize the cropped image to the target size. In the example above, we resize the `(340, 340)` crop to `(200, 200)`. - Arguments: + Args: x: Input image (as a tensor or NumPy array). Must be in format `(height, width, channels)`. size: Tuple of `(height, width)` integer. Target size. @@ -161,7 +161,7 @@ def array_to_img(x, data_format=None, scale=True, dtype=None): ``` - Arguments: + Args: x: Input Numpy array. data_format: Image data format, can be either "channels_first" or "channels_last". Defaults to `None`, in which case the global setting @@ -205,7 +205,7 @@ def img_to_array(img, data_format=None, dtype=None): ``` - Arguments: + Args: img: Input PIL Image instance. data_format: Image data format, can be either "channels_first" or "channels_last". Defaults to `None`, in which case the global setting @@ -241,7 +241,7 @@ def save_img(path, **kwargs): """Saves an image stored as a Numpy array to a path or file object. - Arguments: + Args: path: Path or file object. x: Numpy array. data_format: Image data format, @@ -275,7 +275,7 @@ def load_img(path, grayscale=False, color_mode='rgb', target_size=None, predictions = model.predict(input_arr) ``` - Arguments: + Args: path: Path to image file. grayscale: DEPRECATED use `color_mode="grayscale"`. color_mode: One of "grayscale", "rgb", "rgba". Default: "rgb". @@ -309,7 +309,7 @@ class Iterator(image.Iterator, data_utils.Sequence): class DirectoryIterator(image.DirectoryIterator, Iterator): """Iterator capable of reading images from a directory on disk. - Arguments: + Args: directory: Path to the directory to read images from. Each subdirectory in this directory will be considered to contain images from one class, @@ -400,7 +400,7 @@ class DirectoryIterator(image.DirectoryIterator, Iterator): class NumpyArrayIterator(image.NumpyArrayIterator, Iterator): """Iterator yielding data from a Numpy array. - Arguments: + Args: x: Numpy array of input data or tuple. If tuple, the second elements is either another numpy array or a list of numpy arrays, @@ -463,7 +463,7 @@ class NumpyArrayIterator(image.NumpyArrayIterator, Iterator): class DataFrameIterator(image.DataFrameIterator, Iterator): """Iterator capable of reading images from a directory on disk as a dataframe. - Arguments: + Args: dataframe: Pandas dataframe containing the filepaths relative to `directory` (or absolute paths if `directory` is None) of the images in a string column. It should include other column/s @@ -583,7 +583,7 @@ class ImageDataGenerator(image.ImageDataGenerator): The data will be looped over (in batches). - Arguments: + Args: featurewise_center: Boolean. Set input mean to 0 over the dataset, feature-wise. samplewise_center: Boolean. Set each sample mean to 0. @@ -817,7 +817,7 @@ class ImageDataGenerator(image.ImageDataGenerator): subset=None): """Takes data & label arrays, generates batches of augmented data. - Arguments: + Args: x: Input data. Numpy array of rank 4 or a tuple. If tuple, the first element should contain the images and the second element another numpy array or a list of numpy arrays that gets passed to the output without @@ -881,7 +881,7 @@ class ImageDataGenerator(image.ImageDataGenerator): interpolation='nearest'): """Takes the path to a directory & generates batches of augmented data. - Arguments: + Args: directory: string, path to the target directory. It should contain one subdirectory per class. Any PNG, JPG, BMP, PPM or TIF images inside each of the subdirectories directory tree will be included in the @@ -984,7 +984,7 @@ class ImageDataGenerator(image.ImageDataGenerator): **A simple tutorial can be found **[here]( http://bit.ly/keras_flow_from_dataframe). - Arguments: + Args: dataframe: Pandas dataframe containing the filepaths relative to `directory` (or absolute paths if `directory` is None) of the images in a string column. It should include other column/s diff --git a/tensorflow/python/keras/preprocessing/image_dataset.py b/tensorflow/python/keras/preprocessing/image_dataset.py index 66164086e7e..f2c4d5a374d 100644 --- a/tensorflow/python/keras/preprocessing/image_dataset.py +++ b/tensorflow/python/keras/preprocessing/image_dataset.py @@ -67,7 +67,7 @@ def image_dataset_from_directory(directory, Supported image formats: jpeg, png, bmp, gif. Animated gifs are truncated to the first frame. - Arguments: + Args: directory: Directory where the data is located. If `labels` is "inferred", it should contain subdirectories, each containing images for a class. diff --git a/tensorflow/python/keras/preprocessing/sequence.py b/tensorflow/python/keras/preprocessing/sequence.py index 5ba2e2b47d5..2e531e6b56b 100644 --- a/tensorflow/python/keras/preprocessing/sequence.py +++ b/tensorflow/python/keras/preprocessing/sequence.py @@ -132,7 +132,7 @@ def pad_sequences(sequences, maxlen=None, dtype='int32', [2, 3], [5, 6]], dtype=int32) - Arguments: + Args: sequences: List of sequences (each sequence is a list of integers). maxlen: Optional Int, maximum length of all sequences. If not provided, sequences will be padded to the length of the longest individual diff --git a/tensorflow/python/keras/preprocessing/text.py b/tensorflow/python/keras/preprocessing/text.py index 2d49fc14427..40c4265a9d7 100644 --- a/tensorflow/python/keras/preprocessing/text.py +++ b/tensorflow/python/keras/preprocessing/text.py @@ -42,7 +42,7 @@ def text_to_word_sequence(input_text, >>> tf.keras.preprocessing.text.text_to_word_sequence(sample_text) ['this', 'is', 'a', 'sample', 'sentence'] - Arguments: + Args: input_text: Input text (string). filters: list (or concatenation) of characters to filter out, such as punctuation. Default: ``'!"#$%&()*+,-./:;<=>?@[\\]^_`{|}~\\t\\n'``, @@ -69,7 +69,7 @@ def one_hot(input_text, list of encoded integers each corresponding to a word (or token) in the given input string. - Arguments: + Args: input_text: Input text (string). n: int. Size of vocabulary. filters: list (or concatenation) of characters to filter out, such as diff --git a/tensorflow/python/keras/preprocessing/text_dataset.py b/tensorflow/python/keras/preprocessing/text_dataset.py index c634df86edd..c9f3e405a28 100644 --- a/tensorflow/python/keras/preprocessing/text_dataset.py +++ b/tensorflow/python/keras/preprocessing/text_dataset.py @@ -59,7 +59,7 @@ def text_dataset_from_directory(directory, Only `.txt` files are supported at this time. - Arguments: + Args: directory: Directory where the data is located. If `labels` is "inferred", it should contain subdirectories, each containing text files for a class. diff --git a/tensorflow/python/keras/preprocessing/timeseries.py b/tensorflow/python/keras/preprocessing/timeseries.py index 4c77655ad56..1b309a5fa7a 100644 --- a/tensorflow/python/keras/preprocessing/timeseries.py +++ b/tensorflow/python/keras/preprocessing/timeseries.py @@ -45,7 +45,7 @@ def timeseries_dataset_from_array( length of the sequences/windows, spacing between two sequence/windows, etc., to produce batches of timeseries inputs and targets. - Arguments: + Args: data: Numpy array or eager tensor containing consecutive data points (timesteps). Axis 0 is expected to be the time dimension. diff --git a/tensorflow/python/keras/regularizers.py b/tensorflow/python/keras/regularizers.py index 40fcfb1392a..9243e0a23c8 100644 --- a/tensorflow/python/keras/regularizers.py +++ b/tensorflow/python/keras/regularizers.py @@ -183,7 +183,7 @@ class Regularizer(object): loading models to HDF5 formats, Keras model cloning, some visualization utilities, and exporting models to and from JSON. - Arguments: + Args: config: A Python dictionary, typically the output of get_config. Returns: @@ -335,7 +335,7 @@ def l1_l2(l1=0.01, l2=0.01): # pylint: disable=redefined-outer-name The L2 regularization penalty is computed as: `loss = l2 * reduce_sum(square(x))` - Arguments: + Args: l1: Float; L1 regularization factor. l2: Float; L2 regularization factor. diff --git a/tensorflow/python/keras/saving/hdf5_format.py b/tensorflow/python/keras/saving/hdf5_format.py index 400f830d085..313ed5952a9 100644 --- a/tensorflow/python/keras/saving/hdf5_format.py +++ b/tensorflow/python/keras/saving/hdf5_format.py @@ -66,7 +66,7 @@ def save_model_to_hdf5(model, filepath, overwrite=True, include_optimizer=True): the exact same state, without any of the code used for model definition or training. - Arguments: + Args: model: Keras model instance to be saved. filepath: One of the following: - String, path where to save the model @@ -139,7 +139,7 @@ def save_model_to_hdf5(model, filepath, overwrite=True, include_optimizer=True): def load_model_from_hdf5(filepath, custom_objects=None, compile=True): # pylint: disable=redefined-builtin """Loads a model saved via `save_model_to_hdf5`. - Arguments: + Args: filepath: One of the following: - String, path to the saved model - `h5py.File` object from which to load the model @@ -237,7 +237,7 @@ def preprocess_weights_for_loading(layer, Converts layers weights from Keras 1 format to Keras 2 and also weights of CuDNN layers in Keras 2. - Arguments: + Args: layer: Layer instance. weights: List of weights values (Numpy arrays). original_keras_version: Keras version for the weights, as a string. @@ -253,7 +253,7 @@ def preprocess_weights_for_loading(layer, This function uses `preprocess_weights_for_loading()` for converting layers. - Arguments: + Args: weights: List of weights values (Numpy arrays). Returns: @@ -274,7 +274,7 @@ def preprocess_weights_for_loading(layer, This function uses `preprocess_weights_for_loading()` for converting nested layers. - Arguments: + Args: weights: List of weights values (Numpy arrays). Returns: @@ -289,7 +289,7 @@ def preprocess_weights_for_loading(layer, This function uses `preprocess_weights_for_loading()` for converting nested layers. - Arguments: + Args: weights: List of weights values (Numpy arrays). Returns: @@ -427,7 +427,7 @@ def _convert_rnn_weights(layer, weights): For missing biases in `LSTM`/`GRU` (`use_bias=False`) no conversion is made. - Arguments: + Args: layer: Target layer instance. weights: List of source weights values (input kernels, recurrent kernels, [biases]) (Numpy arrays). @@ -442,7 +442,7 @@ def _convert_rnn_weights(layer, weights): def transform_kernels(kernels, func, n_gates): """Transforms kernel for each gate separately using given function. - Arguments: + Args: kernels: Stacked array of kernels for individual gates. func: Function applied to kernel of each gate. n_gates: Number of gates (4 for LSTM, 3 for GRU). @@ -465,7 +465,7 @@ def _convert_rnn_weights(layer, weights): It can be passed to `transform_kernels()`. - Arguments: + Args: from_cudnn: `True` if source weights are in CuDNN format, `False` if they're in plain Keras format. @@ -501,7 +501,7 @@ def _convert_rnn_weights(layer, weights): def convert_lstm_weights(weights, from_cudnn=True): """Converts the weights between CuDNNLSTM and LSTM. - Arguments: + Args: weights: Original weights. from_cudnn: Indicates whether original weights are from CuDNN layer. @@ -538,7 +538,7 @@ def _convert_rnn_weights(layer, weights): def convert_gru_weights(weights, from_cudnn=True): """Converts the weights between CuDNNGRU and GRU. - Arguments: + Args: weights: Original weights. from_cudnn: Indicates whether original weights are from CuDNN layer. @@ -584,7 +584,7 @@ def _convert_rnn_weights(layer, weights): def save_optimizer_weights_to_hdf5_group(hdf5_group, optimizer): """Saves optimizer weights of a optimizer to a HDF5 group. - Arguments: + Args: hdf5_group: HDF5 group. optimizer: optimizer instance. """ @@ -608,7 +608,7 @@ def save_optimizer_weights_to_hdf5_group(hdf5_group, optimizer): def load_optimizer_weights_from_hdf5_group(hdf5_group): """Load optimizer weights from a HDF5 group. - Arguments: + Args: hdf5_group: A pointer to a HDF5 group. Returns: @@ -623,7 +623,7 @@ def load_optimizer_weights_from_hdf5_group(hdf5_group): def save_weights_to_hdf5_group(f, layers): """Saves the weights of a list of layers to a HDF5 group. - Arguments: + Args: f: HDF5 group. layers: List of layer instances. """ @@ -654,7 +654,7 @@ def save_weights_to_hdf5_group(f, layers): def load_weights_from_hdf5_group(f, layers): """Implements topological (order-based) weight loading. - Arguments: + Args: f: A pointer to a HDF5 group. layers: a list of target layers. @@ -726,7 +726,7 @@ def load_weights_from_hdf5_group_by_name( Layers that have no matching name are skipped. - Arguments: + Args: f: A pointer to a HDF5 group. layers: a list of target layers. skip_mismatch: Boolean, whether to skip loading of layers @@ -810,7 +810,7 @@ def save_attributes_to_hdf5_group(group, name, data): This method deals with an inherent problem of HDF5 file which is not able to store data larger than HDF5_OBJECT_HEADER_LIMIT bytes. - Arguments: + Args: group: A pointer to a HDF5 group. name: A name of the attributes to save. data: Attributes data to store. @@ -853,7 +853,7 @@ def load_attributes_from_hdf5_group(group, name): of HDF5 file which is not able to store data larger than HDF5_OBJECT_HEADER_LIMIT bytes. - Arguments: + Args: group: A pointer to a HDF5 group. name: A name of the attributes to load. diff --git a/tensorflow/python/keras/saving/model_config.py b/tensorflow/python/keras/saving/model_config.py index facc95b22f9..ab9eb2816d4 100644 --- a/tensorflow/python/keras/saving/model_config.py +++ b/tensorflow/python/keras/saving/model_config.py @@ -44,7 +44,7 @@ def model_from_config(config, custom_objects=None): tf.keras.Sequential().from_config(model.get_config()) ``` - Arguments: + Args: config: Configuration dictionary. custom_objects: Optional dictionary mapping names (strings) to custom classes or functions to be @@ -80,7 +80,7 @@ def model_from_yaml(yaml_string, custom_objects=None): ... except ImportError: ... pass - Arguments: + Args: yaml_string: YAML string or open file encoding a model configuration. custom_objects: Optional dictionary mapping names (strings) to custom classes or functions to be @@ -117,7 +117,7 @@ def model_from_json(json_string, custom_objects=None): >>> config = model.to_json() >>> loaded_model = tf.keras.models.model_from_json(config) - Arguments: + Args: json_string: JSON string encoding a model configuration. custom_objects: Optional dictionary mapping names (strings) to custom classes or functions to be diff --git a/tensorflow/python/keras/saving/save.py b/tensorflow/python/keras/saving/save.py index 4a4c345d3ec..d4749fcb4e8 100644 --- a/tensorflow/python/keras/saving/save.py +++ b/tensorflow/python/keras/saving/save.py @@ -98,7 +98,7 @@ def save_model(model, option, then you _must_ provide all custom class definitions when loading the model. See the `custom_objects` argument in `tf.keras.models.load_model`. - Arguments: + Args: model: Keras model instance to be saved. filepath: One of the following: - String or `pathlib.Path` object, path where to save the model @@ -171,7 +171,7 @@ def load_model(filepath, custom_objects=None, compile=True, options=None): # py `"dense_1/kernel:0"`. It is recommended that you use the layer properties to access specific variables, e.g. `model.get_layer("dense_1").kernel`. - Arguments: + Args: filepath: One of the following: - String or `pathlib.Path` object, path to the saved model - `h5py.File` object from which to load the model diff --git a/tensorflow/python/keras/saving/saved_model/json_utils.py b/tensorflow/python/keras/saving/saved_model/json_utils.py index cff1628d49c..7dd094e4f4e 100644 --- a/tensorflow/python/keras/saving/saved_model/json_utils.py +++ b/tensorflow/python/keras/saving/saved_model/json_utils.py @@ -83,7 +83,7 @@ def _decode_helper(obj): def get_json_type(obj): """Serializes any object to a JSON-serializable structure. - Arguments: + Args: obj: the object to serialize Returns: @@ -141,4 +141,3 @@ def get_json_type(obj): .format(obj, type(obj))) raise TypeError('Not JSON Serializable:', obj) - diff --git a/tensorflow/python/keras/testing_utils.py b/tensorflow/python/keras/testing_utils.py index 9045f0ec3de..bf3de9a7f37 100644 --- a/tensorflow/python/keras/testing_utils.py +++ b/tensorflow/python/keras/testing_utils.py @@ -66,7 +66,7 @@ def get_test_data(train_samples, random_seed=None): """Generates test data to train a model on. - Arguments: + Args: train_samples: Integer, how many training samples to generate. test_samples: Integer, how many test samples to generate. input_shape: Tuple of integers, shape of the inputs. @@ -104,7 +104,7 @@ def layer_test(layer_cls, supports_masking=None): """Test routine for a layer with a single input and single output. - Arguments: + Args: layer_cls: Layer class object. kwargs: Optional dictionary of keyword arguments for instantiating the layer. @@ -323,7 +323,7 @@ def model_type_scope(value): The model type gets restored to its original value upon exiting the scope. - Arguments: + Args: value: model type value Yields: @@ -344,7 +344,7 @@ def run_eagerly_scope(value): The boolean gets restored to its original value upon exiting the scope. - Arguments: + Args: value: Bool specifying if we should run models eagerly in the active test. Should be True or False. @@ -366,7 +366,7 @@ def use_keras_tensors_scope(value): The boolean gets restored to its original value upon exiting the scope. - Arguments: + Args: value: Bool specifying if we should build functional models using KerasTensors in the active test. Should be True or False. @@ -400,7 +400,7 @@ def saved_model_format_scope(value, **kwargs): The saved model format gets restored to its original value upon exiting the scope. - Arguments: + Args: value: saved model format value **kwargs: optional kwargs to pass to the save function. diff --git a/tensorflow/python/keras/type/types.py b/tensorflow/python/keras/type/types.py index 78f9b9aa4b9..e36d2f9bfb0 100644 --- a/tensorflow/python/keras/type/types.py +++ b/tensorflow/python/keras/type/types.py @@ -160,7 +160,7 @@ class Layer(object): [Making new Layers and Models via subclassing]( https://www.tensorflow.org/guide/keras/custom_layers_and_models) - Arguments: + Args: trainable: Boolean, whether the layer's variables should be trainable. name: String name of the layer. dtype: The dtype of the layer's computations and weights (default of diff --git a/tensorflow/python/keras/utils/control_flow_util.py b/tensorflow/python/keras/utils/control_flow_util.py index 788b5731554..9a52149ca02 100644 --- a/tensorflow/python/keras/utils/control_flow_util.py +++ b/tensorflow/python/keras/utils/control_flow_util.py @@ -95,7 +95,7 @@ def smart_cond(pred, true_fn=None, false_fn=None, name=None): # pylint: disable If `pred` is a bool or has a constant value, we return either `true_fn()` or `false_fn()`, otherwise we use `tf.cond` to dynamically route to both. - Arguments: + Args: pred: A scalar determining whether to return the result of `true_fn` or `false_fn`. true_fn: The callable to be performed if pred is true. @@ -118,7 +118,7 @@ def smart_cond(pred, true_fn=None, false_fn=None, name=None): # pylint: disable def constant_value(pred): # pylint: disable=invalid-name """Return the bool value for `pred`, or None if `pred` had a dynamic value. - Arguments: + Args: pred: A scalar, either a Python bool or a TensorFlow boolean variable or tensor, or the Python integer 1 or 0. diff --git a/tensorflow/python/keras/utils/conv_utils.py b/tensorflow/python/keras/utils/conv_utils.py index 769ac654687..1d328c4422f 100644 --- a/tensorflow/python/keras/utils/conv_utils.py +++ b/tensorflow/python/keras/utils/conv_utils.py @@ -51,7 +51,7 @@ def convert_data_format(data_format, ndim): def normalize_tuple(value, n, name): """Transforms a single integer or iterable of integers into an integer tuple. - Arguments: + Args: value: The value to validate and convert. Could an int, or any iterable of ints. n: The size of the tuple to be returned. @@ -90,7 +90,7 @@ def normalize_tuple(value, n, name): def conv_output_length(input_length, filter_size, padding, stride, dilation=1): """Determines output length of a convolution given input length. - Arguments: + Args: input_length: integer. filter_size: integer. padding: one of "same", "valid", "full", "causal" @@ -116,7 +116,7 @@ def conv_output_length(input_length, filter_size, padding, stride, dilation=1): def conv_input_length(output_length, filter_size, padding, stride): """Determines input length of a convolution given output length. - Arguments: + Args: output_length: integer. filter_size: integer. padding: one of "same", "valid", "full". @@ -145,7 +145,7 @@ def deconv_output_length(input_length, dilation=1): """Determines output length of a transposed convolution given input length. - Arguments: + Args: input_length: Integer. filter_size: Integer. padding: one of `"same"`, `"valid"`, `"full"`. diff --git a/tensorflow/python/keras/utils/data_utils.py b/tensorflow/python/keras/utils/data_utils.py index 7f15c3e8af5..32d315944a7 100644 --- a/tensorflow/python/keras/utils/data_utils.py +++ b/tensorflow/python/keras/utils/data_utils.py @@ -71,7 +71,7 @@ if sys.version_info[0] == 2: Under Python 2, `urlretrieve` relies on `FancyURLopener` from legacy `urllib` module, known to have issues with proxy management. - Arguments: + Args: url: url to retrieve. filename: where to store the retrieved data locally. reporthook: a hook function that will be called once on establishment of @@ -116,7 +116,7 @@ def is_generator_or_sequence(x): def _extract_archive(file_path, path='.', archive_format='auto'): """Extracts an archive if it matches tar, tar.gz, tar.bz, or zip formats. - Arguments: + Args: file_path: path to the archive file path: path to extract the archive file archive_format: Archive format to try for extracting the file. @@ -193,7 +193,7 @@ def get_file(fname, untar=True) ``` - Arguments: + Args: fname: Name of the file. If an absolute path `/path/to/file.txt` is specified the file will be saved at that location. origin: Original URL of the file. @@ -316,7 +316,7 @@ def _hash_file(fpath, algorithm='sha256', chunk_size=65535): 'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855' ``` - Arguments: + Args: fpath: path to the file being validated algorithm: hash algorithm, one of `'auto'`, `'sha256'`, or `'md5'`. The default `'auto'` detects the hash algorithm in use. @@ -340,7 +340,7 @@ def _hash_file(fpath, algorithm='sha256', chunk_size=65535): def validate_file(fpath, file_hash, algorithm='auto', chunk_size=65535): """Validates a file against a sha256 or md5 hash. - Arguments: + Args: fpath: path to the file being validated file_hash: The expected hash string of the file. The sha256 and md5 hash algorithms are both supported. @@ -456,7 +456,7 @@ class Sequence(object): def __getitem__(self, index): """Gets batch at position `index`. - Arguments: + Args: index: position of the batch in the Sequence. Returns: @@ -487,7 +487,7 @@ class Sequence(object): def iter_sequence_infinite(seq): """Iterates indefinitely over a Sequence. - Arguments: + Args: seq: `Sequence` instance. Yields: @@ -557,7 +557,7 @@ def get_index(uid, i): get a specific one. A single Sequence would cause the validation to overwrite the training Sequence. - Arguments: + Args: uid: int, Sequence identifier i: index @@ -625,7 +625,7 @@ class SequenceEnqueuer(object): def start(self, workers=1, max_queue_size=10): """Starts the handler's workers. - Arguments: + Args: workers: Number of workers. max_queue_size: queue size (when full, workers could block on `put()`) @@ -652,7 +652,7 @@ class SequenceEnqueuer(object): Should be called by the same thread which called `start()`. - Arguments: + Args: timeout: maximum time to wait on `thread.join()` """ self.stop_signal.set() @@ -676,7 +676,7 @@ class SequenceEnqueuer(object): def _get_executor_init(self, workers): """Gets the Pool initializer for multiprocessing. - Arguments: + Args: workers: Number of workers. Returns: @@ -702,7 +702,7 @@ class OrderedEnqueuer(SequenceEnqueuer): Used in `fit_generator`, `evaluate_generator`, `predict_generator`. - Arguments: + Args: sequence: A `tf.keras.utils.data_utils.Sequence` object. use_multiprocessing: use multiprocessing if True, otherwise threading shuffle: whether to shuffle the data at the beginning of each epoch @@ -715,7 +715,7 @@ class OrderedEnqueuer(SequenceEnqueuer): def _get_executor_init(self, workers): """Gets the Pool initializer for multiprocessing. - Arguments: + Args: workers: Number of workers. Returns: @@ -822,7 +822,7 @@ def next_sample(uid): get a specific one. A single generator would cause the validation to overwrite the training generator. - Arguments: + Args: uid: int, generator identifier Returns: @@ -840,7 +840,7 @@ class GeneratorEnqueuer(SequenceEnqueuer): Used in `fit_generator`, `evaluate_generator`, `predict_generator`. - Arguments: + Args: generator: a generator function which yields data use_multiprocessing: use multiprocessing if True, otherwise threading wait_time: time to sleep in-between calls to `put()` @@ -857,7 +857,7 @@ class GeneratorEnqueuer(SequenceEnqueuer): def _get_executor_init(self, workers): """Gets the Pool initializer for multiprocessing. - Arguments: + Args: workers: Number of works. Returns: diff --git a/tensorflow/python/keras/utils/generic_utils.py b/tensorflow/python/keras/utils/generic_utils.py index 530db2f93a0..ecf382413ad 100644 --- a/tensorflow/python/keras/utils/generic_utils.py +++ b/tensorflow/python/keras/utils/generic_utils.py @@ -70,7 +70,7 @@ class CustomObjectScope(object): layer = Dense.from_config(config) ``` - Arguments: + Args: *args: Dictionary or dictionaries of `{name: object}` pairs. """ @@ -130,7 +130,7 @@ def register_keras_serializable(package='Custom', name=None): The object will be registered under the key 'package>name' where `name`, defaults to the object name if not passed. - Arguments: + Args: package: The package that this class belongs to. name: The name to serialize this class under in this package. If None, the class' name will be used. @@ -402,7 +402,7 @@ def deserialize_keras_object(identifier, def func_dump(func): """Serializes a user defined function. - Arguments: + Args: func: the function to serialize. Returns: @@ -425,7 +425,7 @@ def func_dump(func): def func_load(code, defaults=None, closure=None, globs=None): """Deserializes a user defined function. - Arguments: + Args: code: bytecode of the function. defaults: defaults of the function. closure: closure of the function. @@ -442,7 +442,7 @@ def func_load(code, defaults=None, closure=None, globs=None): def ensure_value_to_cell(value): """Ensures that a value is converted to a python cell object. - Arguments: + Args: value: Any value that needs to be casted to the cell type Returns: @@ -474,7 +474,7 @@ def func_load(code, defaults=None, closure=None, globs=None): def has_arg(fn, name, accept_all=False): """Checks if a callable accepts a given keyword argument. - Arguments: + Args: fn: Callable to inspect. name: Check if `fn` can be called with `name` as a keyword argument. accept_all: What to return if there is no parameter called `name` but the @@ -493,7 +493,7 @@ def has_arg(fn, name, accept_all=False): class Progbar(object): """Displays a progress bar. - Arguments: + Args: target: Total number of steps expected, None if unknown. width: Progress bar width on screen. verbose: Verbosity mode, 0 (silent), 1 (verbose), 2 (semi-verbose) @@ -540,7 +540,7 @@ class Progbar(object): def update(self, current, values=None, finalize=None): """Updates the progress bar. - Arguments: + Args: current: Index of current step. values: List of tuples: `(name, value_for_last_step)`. If `name` is in `stateful_metrics`, `value_for_last_step` will be displayed as-is. @@ -681,7 +681,7 @@ class Progbar(object): (i.e. `current == 0`) then zero is given as an estimate. The duration estimate ignores the duration of the (assumed to be non-representative) first step for estimates when more steps are available (i.e. `current>1`). - Arguments: + Args: current: Index of current step. now: The current time. Returns: Estimate of the duration of a single step. @@ -707,7 +707,7 @@ class Progbar(object): def make_batches(size, batch_size): """Returns a list of batch indices (tuples of indices). - Arguments: + Args: size: Integer, total size of the data to slice into batches. batch_size: Integer, batch size. @@ -729,7 +729,7 @@ def slice_arrays(arrays, start=None, stop=None): Can also work on list/array of indices: `slice_arrays(x, indices)` - Arguments: + Args: arrays: Single array or list of arrays. start: can be an integer index (start index) or a list/array of indices stop: integer (stop index); should be None if `start` was a list. @@ -771,7 +771,7 @@ def to_list(x): If a tensor is passed, we return a list of size 1 containing the tensor. - Arguments: + Args: x: target object to be normalized. Returns: diff --git a/tensorflow/python/keras/utils/io_utils.py b/tensorflow/python/keras/utils/io_utils.py index e70f8013ef8..6de08c73f5f 100644 --- a/tensorflow/python/keras/utils/io_utils.py +++ b/tensorflow/python/keras/utils/io_utils.py @@ -69,7 +69,7 @@ def path_to_string(path): def ask_to_proceed_with_overwrite(filepath): """Produces a prompt asking about overwriting a file. - Arguments: + Args: filepath: the path to the file to be overwritten. Returns: diff --git a/tensorflow/python/keras/utils/layer_utils.py b/tensorflow/python/keras/utils/layer_utils.py index 5606f616a7a..458a68a0050 100644 --- a/tensorflow/python/keras/utils/layer_utils.py +++ b/tensorflow/python/keras/utils/layer_utils.py @@ -36,7 +36,7 @@ def get_source_inputs(tensor, layer=None, node_index=None): Output will always be a list of tensors (potentially with 1 element). - Arguments: + Args: tensor: The tensor to start from. layer: Origin layer of the tensor. Will be determined via tensor._keras_history if not provided. @@ -94,7 +94,7 @@ def validate_string_arg(input_data, def count_params(weights): """Count the total number of scalars composing the weights. - Arguments: + Args: weights: An iterable containing the weights on which to compute params Returns: @@ -111,7 +111,7 @@ def count_params(weights): def print_summary(model, line_length=None, positions=None, print_fn=None): """Prints a summary of a model. - Arguments: + Args: model: Keras model instance. line_length: Total length of printed lines (e.g. set this to adapt the display to different @@ -196,7 +196,7 @@ def print_summary(model, line_length=None, positions=None, print_fn=None): def print_layer_summary(layer): """Prints a summary for a single layer. - Arguments: + Args: layer: target layer. """ try: @@ -219,7 +219,7 @@ def print_summary(model, line_length=None, positions=None, print_fn=None): def print_layer_summary_with_connections(layer): """Prints a summary for a single layer (including topological connections). - Arguments: + Args: layer: target layer. """ try: @@ -342,7 +342,7 @@ def convert_dense_weights_data_format(dense, followed by a `Dense` layer, the weights of that `Dense` layer should be updated to reflect the new dimension ordering. - Arguments: + Args: dense: The target `Dense` layer. previous_feature_map_shape: A shape tuple of 3 integers, e.g. `(512, 7, 7)`. The shape of the convolutional diff --git a/tensorflow/python/keras/utils/multi_gpu_utils.py b/tensorflow/python/keras/utils/multi_gpu_utils.py index 089ca98f6d0..a5b43ed3867 100644 --- a/tensorflow/python/keras/utils/multi_gpu_utils.py +++ b/tensorflow/python/keras/utils/multi_gpu_utils.py @@ -55,7 +55,7 @@ def multi_gpu_model(model, gpus, cpu_merge=True, cpu_relocation=False): This function is only available with the TensorFlow backend for the time being. - Arguments: + Args: model: A Keras model instance. To avoid OOM errors, this model could have been built on CPU, for instance (see usage example below). @@ -180,7 +180,7 @@ def multi_gpu_model(model, gpus, cpu_merge=True, cpu_relocation=False): def get_slice(data, i, parts): """Slice an array into `parts` slices and return slice `i`. - Arguments: + Args: data: array to slice. i: index of slice to return. parts: number of slices to make. diff --git a/tensorflow/python/keras/utils/np_utils.py b/tensorflow/python/keras/utils/np_utils.py index 1e8fcf34693..569f8a6c35e 100644 --- a/tensorflow/python/keras/utils/np_utils.py +++ b/tensorflow/python/keras/utils/np_utils.py @@ -27,7 +27,7 @@ def to_categorical(y, num_classes=None, dtype='float32'): E.g. for use with categorical_crossentropy. - Arguments: + Args: y: class vector to be converted into a matrix (integers from 0 to num_classes). num_classes: total number of classes. If `None`, this would be inferred @@ -85,7 +85,7 @@ def to_categorical(y, num_classes=None, dtype='float32'): def normalize(x, axis=-1, order=2): """Normalizes a Numpy array. - Arguments: + Args: x: Numpy array to normalize. axis: axis along which to normalize. order: Normalization order (e.g. `order=2` for L2 norm). diff --git a/tensorflow/python/keras/utils/tf_utils.py b/tensorflow/python/keras/utils/tf_utils.py index 01996f4bfea..3ba787c759d 100644 --- a/tensorflow/python/keras/utils/tf_utils.py +++ b/tensorflow/python/keras/utils/tf_utils.py @@ -107,7 +107,7 @@ def get_reachable_from_inputs(inputs, targets=None): def map_structure_with_atomic(is_atomic_fn, map_fn, nested): """Maps the atomic elements of a nested structure. - Arguments: + Args: is_atomic_fn: A function that determines if an element of `nested` is atomic. map_fn: The function to apply to atomic elements of `nested`. @@ -161,7 +161,7 @@ def convert_shapes(input_shape, to_tuples=True): - ints - None - Arguments: + Args: input_shape: A nested structure of objects to be converted to TensorShapes. to_tuples: If `True`, converts all TensorShape to tuples. Otherwise converts all tuples representing shapes to TensorShapes. @@ -211,7 +211,7 @@ class ListWrapper(object): def convert_inner_node_data(nested, wrap=False): """Either wraps or unwraps innermost node data lists in `ListWrapper` objects. - Arguments: + Args: nested: A nested data structure. wrap: If `True`, wrap innermost lists in `ListWrapper` objects. If `False`, unwraps `ListWrapper` objects into lists. @@ -258,7 +258,7 @@ def shape_type_conversion(fn): Used in `compute_output_shape` and `build`. - Arguments: + Args: fn: function to wrap. Returns: @@ -294,7 +294,7 @@ def is_extension_type(tensor): but this will be changed to use an appropriate extensiontype protocol check once ExtensionType is made public. - Arguments: + Args: tensor: An object to test Returns: @@ -309,7 +309,7 @@ def is_symbolic_tensor(tensor): A Variable can be seen as either: it is considered symbolic when we are in a graph scope, and eager when we are in an eager scope. - Arguments: + Args: tensor: A tensor instance to test. Returns: @@ -361,7 +361,7 @@ def register_symbolic_tensor_type(cls): layer = tf.keras.layers.Lambda(lambda input_: Foo(input_)) ``` - Arguments: + Args: cls: A `class` type which shall be regarded as a symbolic `Tensor`. """ global _user_convertible_tensor_types @@ -423,7 +423,7 @@ def assert_no_legacy_layers(layers): def maybe_init_scope(layer): """Open an `init_scope` if in V2 mode and using the keras graph. - Arguments: + Args: layer: The Layer/Model that is currently active. Yields: diff --git a/tensorflow/python/keras/utils/vis_utils.py b/tensorflow/python/keras/utils/vis_utils.py index 8e587e0c80d..da9ec5b6a32 100644 --- a/tensorflow/python/keras/utils/vis_utils.py +++ b/tensorflow/python/keras/utils/vis_utils.py @@ -77,7 +77,7 @@ def model_to_dot(model, subgraph=False): """Convert a Keras model to dot format. - Arguments: + Args: model: A Keras model instance. show_shapes: whether to display shape information. show_dtype: whether to display layer dtypes. @@ -304,7 +304,7 @@ def plot_model(model, tf.keras.utils.plot_model(model, to_file=dot_img_file, show_shapes=True) ``` - Arguments: + Args: model: A Keras model instance to_file: File name of the plot image. show_shapes: whether to display shape information. diff --git a/tensorflow/python/keras/wrappers/scikit_learn.py b/tensorflow/python/keras/wrappers/scikit_learn.py index 149ad06f57c..8bfac1939d0 100644 --- a/tensorflow/python/keras/wrappers/scikit_learn.py +++ b/tensorflow/python/keras/wrappers/scikit_learn.py @@ -36,7 +36,7 @@ class BaseWrapper(object): Warning: This class should not be used directly. Use descendant classes instead. - Arguments: + Args: build_fn: callable function or class instance **sk_params: model parameters & fitting parameters @@ -79,7 +79,7 @@ class BaseWrapper(object): def check_params(self, params): """Checks for user typos in `params`. - Arguments: + Args: params: dictionary; the parameters to be checked Raises: @@ -108,7 +108,7 @@ class BaseWrapper(object): def get_params(self, **params): # pylint: disable=unused-argument """Gets parameters for this estimator. - Arguments: + Args: **params: ignored (exists for API compatibility). Returns: @@ -121,7 +121,7 @@ class BaseWrapper(object): def set_params(self, **params): """Sets the parameters of this estimator. - Arguments: + Args: **params: Dictionary of parameter names mapped to their values. Returns: @@ -134,7 +134,7 @@ class BaseWrapper(object): def fit(self, x, y, **kwargs): """Constructs a new model with `build_fn` & fit the model to `(x, y)`. - Arguments: + Args: x : array-like, shape `(n_samples, n_features)` Training samples where `n_samples` is the number of samples and `n_features` is the number of features. @@ -170,7 +170,7 @@ class BaseWrapper(object): def filter_sk_params(self, fn, override=None): """Filters `sk_params` and returns those in `fn`'s arguments. - Arguments: + Args: fn : arbitrary function override: dictionary, values to override `sk_params` @@ -195,7 +195,7 @@ class KerasClassifier(BaseWrapper): def fit(self, x, y, **kwargs): """Constructs a new model with `build_fn` & fit the model to `(x, y)`. - Arguments: + Args: x : array-like, shape `(n_samples, n_features)` Training samples where `n_samples` is the number of samples and `n_features` is the number of features. @@ -225,7 +225,7 @@ class KerasClassifier(BaseWrapper): def predict(self, x, **kwargs): """Returns the class predictions for the given test data. - Arguments: + Args: x: array-like, shape `(n_samples, n_features)` Test samples where `n_samples` is the number of samples and `n_features` is the number of features. @@ -244,7 +244,7 @@ class KerasClassifier(BaseWrapper): def predict_proba(self, x, **kwargs): """Returns class probability estimates for the given test data. - Arguments: + Args: x: array-like, shape `(n_samples, n_features)` Test samples where `n_samples` is the number of samples and `n_features` is the number of features. @@ -272,7 +272,7 @@ class KerasClassifier(BaseWrapper): def score(self, x, y, **kwargs): """Returns the mean accuracy on the given test data and labels. - Arguments: + Args: x: array-like, shape `(n_samples, n_features)` Test samples where `n_samples` is the number of samples and `n_features` is the number of features. @@ -318,7 +318,7 @@ class KerasRegressor(BaseWrapper): def predict(self, x, **kwargs): """Returns predictions for the given test data. - Arguments: + Args: x: array-like, shape `(n_samples, n_features)` Test samples where `n_samples` is the number of samples and `n_features` is the number of features. @@ -335,7 +335,7 @@ class KerasRegressor(BaseWrapper): def score(self, x, y, **kwargs): """Returns the mean loss on the given test data and labels. - Arguments: + Args: x: array-like, shape `(n_samples, n_features)` Test samples where `n_samples` is the number of samples and `n_features` is the number of features. diff --git a/tensorflow/python/layers/utils.py b/tensorflow/python/layers/utils.py index 8fb5151fadf..e376de9eaae 100644 --- a/tensorflow/python/layers/utils.py +++ b/tensorflow/python/layers/utils.py @@ -49,7 +49,7 @@ def convert_data_format(data_format, ndim): def normalize_tuple(value, n, name): """Transforms a single integer or iterable of integers into an integer tuple. - Arguments: + Args: value: The value to validate and convert. Could an int, or any iterable of ints. n: The size of the tuple to be returned. @@ -105,7 +105,7 @@ def normalize_padding(value): def conv_output_length(input_length, filter_size, padding, stride, dilation=1): """Determines output length of a convolution given input length. - Arguments: + Args: input_length: integer. filter_size: integer. padding: one of "same", "valid", "full". @@ -131,7 +131,7 @@ def conv_output_length(input_length, filter_size, padding, stride, dilation=1): def conv_input_length(output_length, filter_size, padding, stride): """Determines input length of a convolution given output length. - Arguments: + Args: output_length: integer. filter_size: integer. padding: one of "same", "valid", "full". @@ -155,7 +155,7 @@ def conv_input_length(output_length, filter_size, padding, stride): def deconv_output_length(input_length, filter_size, padding, stride): """Determines output length of a transposed convolution given input length. - Arguments: + Args: input_length: integer. filter_size: integer. padding: one of "same", "valid", "full". @@ -180,7 +180,7 @@ def smart_cond(pred, true_fn=None, false_fn=None, name=None): If `pred` is a bool or has a constant value, we return either `true_fn()` or `false_fn()`, otherwise we use `tf.cond` to dynamically route to both. - Arguments: + Args: pred: A scalar determining whether to return the result of `true_fn` or `false_fn`. true_fn: The callable to be performed if pred is true. @@ -203,7 +203,7 @@ def smart_cond(pred, true_fn=None, false_fn=None, name=None): def constant_value(pred): """Return the bool value for `pred`, or None if `pred` had a dynamic value. - Arguments: + Args: pred: A scalar, either a Python bool or a TensorFlow boolean variable or tensor, or the Python integer 1 or 0. diff --git a/tensorflow/python/ops/image_ops_impl.py b/tensorflow/python/ops/image_ops_impl.py index b9041d23df1..c5a3611b1d0 100644 --- a/tensorflow/python/ops/image_ops_impl.py +++ b/tensorflow/python/ops/image_ops_impl.py @@ -4036,7 +4036,7 @@ def psnr(a, b, max_val, name=None): # psnr1 and psnr2 both have type tf.float32 and are almost equal. ``` - Arguments: + Args: a: First set of images. b: Second set of images. max_val: The dynamic range of the images (i.e., the difference between the @@ -4080,7 +4080,7 @@ def _ssim_helper(x, y, reducer, max_val, compensation=1.0, k1=0.01, k2=0.03): For SSIM measure with unbiased covariance estimators, pass as `compensation` argument (1 - \sum_i w_i ^ 2). - Arguments: + Args: x: First set of images. y: Second set of images. reducer: Function that computes 'local' averages from the set of images. For @@ -4337,7 +4337,7 @@ def ssim_multiscale(img1, structural similarity for image quality assessment." Signals, Systems and Computers, 2004. - Arguments: + Args: img1: First image batch. img2: Second image batch. Must have the same rank as img1. max_val: The dynamic range of the images (i.e., the difference between the @@ -4485,7 +4485,7 @@ def image_gradients(image): [1. 1. 1. 1. 0.]], shape=(5, 5), dtype=float32) ``` - Arguments: + Args: image: Tensor with shape [batch_size, h, w, d]. Returns: @@ -4548,7 +4548,7 @@ def sobel_edges(image): Image.fromarray(sobel_x[..., 0] / 4 + 0.5).show() ``` - Arguments: + Args: image: Image tensor with shape [batch_size, h, w, d] and type float32 or float64. The image(s) must be 2x2 or larger. diff --git a/tensorflow/python/ops/init_ops.py b/tensorflow/python/ops/init_ops.py index d418fa64c52..02b73ef2276 100644 --- a/tensorflow/python/ops/init_ops.py +++ b/tensorflow/python/ops/init_ops.py @@ -1300,7 +1300,7 @@ def lecun_normal(seed=None): `stddev = sqrt(1 / fan_in)` where `fan_in` is the number of input units in the weight tensor. - Arguments: + Args: seed: A Python integer. Used to seed the random generator. Returns: @@ -1327,7 +1327,7 @@ def lecun_uniform(seed=None): where `limit` is `sqrt(3 / fan_in)` where `fan_in` is the number of input units in the weight tensor. - Arguments: + Args: seed: A Python integer. Used to seed the random generator. Returns: @@ -1355,7 +1355,7 @@ def he_normal(seed=None): `stddev = sqrt(2 / fan_in)` where `fan_in` is the number of input units in the weight tensor. - Arguments: + Args: seed: A Python integer. Used to seed the random generator. Returns: @@ -1379,7 +1379,7 @@ def he_uniform(seed=None): where `limit` is `sqrt(6 / fan_in)` where `fan_in` is the number of input units in the weight tensor. - Arguments: + Args: seed: A Python integer. Used to seed the random generator. Returns: diff --git a/tensorflow/python/ops/init_ops_v2.py b/tensorflow/python/ops/init_ops_v2.py index 02ae3a0ac4b..0fabb2a8bed 100644 --- a/tensorflow/python/ops/init_ops_v2.py +++ b/tensorflow/python/ops/init_ops_v2.py @@ -886,7 +886,7 @@ def lecun_normal(seed=None): (