Extensive fixes in metrics docstrings.

PiperOrigin-RevId: 304917158
Change-Id: I60ccca4b1a13d75012a95f8d7a2f43fb17682c22
This commit is contained in:
Francois Chollet 2020-04-05 13:45:55 -07:00 committed by TensorFlower Gardener
parent 248675bf77
commit 79e6396788
2 changed files with 247 additions and 263 deletions

View File

@ -48,12 +48,14 @@ class Loss(object):
* `call()`: Contains the logic for loss calculation using `y_true`, `y_pred`. * `call()`: Contains the logic for loss calculation using `y_true`, `y_pred`.
Example subclass implementation: Example subclass implementation:
```python ```python
class MeanSquaredError(Loss): class MeanSquaredError(Loss):
def call(self, y_true, y_pred): def call(self, y_true, y_pred):
y_pred = ops.convert_to_tensor_v2(y_pred) y_pred = tf.convert_to_tensor_v2(y_pred)
y_true = math_ops.cast(y_true, y_pred.dtype) y_true = tf.cast(y_true, y_pred.dtype)
return K.mean(math_ops.square(y_pred - y_true), axis=-1) return tf.reduce_mean(math_ops.square(y_pred - y_true), axis=-1)
``` ```
When used with `tf.distribute.Strategy`, outside of built-in training loops When used with `tf.distribute.Strategy`, outside of built-in training loops
@ -259,7 +261,7 @@ class MeanSquaredError(LossFunctionWrapper):
`loss = square(y_true - y_pred)` `loss = square(y_true - y_pred)`
Usage: Standalone usage:
>>> y_true = [[0., 1.], [0., 0.]] >>> y_true = [[0., 1.], [0., 0.]]
>>> y_pred = [[1., 1.], [1., 0.]] >>> y_pred = [[1., 1.], [1., 0.]]
@ -284,11 +286,10 @@ class MeanSquaredError(LossFunctionWrapper):
>>> mse(y_true, y_pred).numpy() >>> mse(y_true, y_pred).numpy()
array([0.5, 0.5], dtype=float32) array([0.5, 0.5], dtype=float32)
Usage with the `compile` API: Usage with the `compile()` API:
```python ```python
model = tf.keras.Model(inputs, outputs) model.compile(optimizer='sgd', loss=tf.keras.losses.MeanSquaredError())
model.compile('sgd', loss=tf.keras.losses.MeanSquaredError())
``` ```
""" """
@ -319,7 +320,7 @@ class MeanAbsoluteError(LossFunctionWrapper):
`loss = abs(y_true - y_pred)` `loss = abs(y_true - y_pred)`
Usage: Standalone usage:
>>> y_true = [[0., 1.], [0., 0.]] >>> y_true = [[0., 1.], [0., 0.]]
>>> y_pred = [[1., 1.], [1., 0.]] >>> y_pred = [[1., 1.], [1., 0.]]
@ -344,11 +345,10 @@ class MeanAbsoluteError(LossFunctionWrapper):
>>> mae(y_true, y_pred).numpy() >>> mae(y_true, y_pred).numpy()
array([0.5, 0.5], dtype=float32) array([0.5, 0.5], dtype=float32)
Usage with the `compile` API: Usage with the `compile()` API:
```python ```python
model = tf.keras.Model(inputs, outputs) model.compile(optimizer='sgd', loss=tf.keras.losses.MeanAbsoluteError())
model.compile('sgd', loss=tf.keras.losses.MeanAbsoluteError())
``` ```
""" """
@ -379,7 +379,7 @@ class MeanAbsolutePercentageError(LossFunctionWrapper):
`loss = 100 * abs(y_true - y_pred) / y_true` `loss = 100 * abs(y_true - y_pred) / y_true`
Usage: Standalone usage:
>>> y_true = [[2., 1.], [2., 3.]] >>> y_true = [[2., 1.], [2., 3.]]
>>> y_pred = [[1., 1.], [1., 0.]] >>> y_pred = [[1., 1.], [1., 0.]]
@ -404,11 +404,11 @@ class MeanAbsolutePercentageError(LossFunctionWrapper):
>>> mape(y_true, y_pred).numpy() >>> mape(y_true, y_pred).numpy()
array([25., 75.], dtype=float32) array([25., 75.], dtype=float32)
Usage with the `compile` API: Usage with the `compile()` API:
```python ```python
model = tf.keras.Model(inputs, outputs) model.compile(optimizer='sgd',
model.compile('sgd', loss=tf.keras.losses.MeanAbsolutePercentageError()) loss=tf.keras.losses.MeanAbsolutePercentageError())
``` ```
""" """
@ -440,7 +440,7 @@ class MeanSquaredLogarithmicError(LossFunctionWrapper):
`loss = square(log(y_true + 1.) - log(y_pred + 1.))` `loss = square(log(y_true + 1.) - log(y_pred + 1.))`
Usage: Standalone usage:
>>> y_true = [[0., 1.], [0., 0.]] >>> y_true = [[0., 1.], [0., 0.]]
>>> y_pred = [[1., 1.], [1., 0.]] >>> y_pred = [[1., 1.], [1., 0.]]
@ -465,11 +465,11 @@ class MeanSquaredLogarithmicError(LossFunctionWrapper):
>>> msle(y_true, y_pred).numpy() >>> msle(y_true, y_pred).numpy()
array([0.240, 0.240], dtype=float32) array([0.240, 0.240], dtype=float32)
Usage with the `compile` API: Usage with the `compile()` API:
```python ```python
model = tf.keras.Model(inputs, outputs) model.compile(optimizer='sgd',
model.compile('sgd', loss=tf.keras.losses.MeanSquaredLogarithmicError()) loss=tf.keras.losses.MeanSquaredLogarithmicError())
``` ```
""" """
@ -507,7 +507,7 @@ class BinaryCrossentropy(LossFunctionWrapper):
floating-pointing value, and both `y_pred` and `y_true` have the shape floating-pointing value, and both `y_pred` and `y_true` have the shape
`[batch_size]`. `[batch_size]`.
Usage: Standalone usage:
>>> y_true = [[0., 1.], [0., 0.]] >>> y_true = [[0., 1.], [0., 0.]]
>>> y_pred = [[0.6, 0.4], [0.4, 0.6]] >>> y_pred = [[0.6, 0.4], [0.4, 0.6]]
@ -535,8 +535,7 @@ class BinaryCrossentropy(LossFunctionWrapper):
Usage with the `tf.keras` API: Usage with the `tf.keras` API:
```python ```python
model = tf.keras.Model(inputs, outputs) model.compile(optimizer='sgd', loss=tf.keras.losses.BinaryCrossentropy())
model.compile('sgd', loss=tf.keras.losses.BinaryCrossentropy())
``` ```
""" """
@ -589,7 +588,7 @@ class CategoricalCrossentropy(LossFunctionWrapper):
example. The shape of both `y_pred` and `y_true` are example. The shape of both `y_pred` and `y_true` are
`[batch_size, num_classes]`. `[batch_size, num_classes]`.
Usage: Standalone usage:
>>> y_true = [[0, 1, 0], [0, 0, 1]] >>> y_true = [[0, 1, 0], [0, 0, 1]]
>>> y_pred = [[0.05, 0.95, 0], [0.1, 0.8, 0.1]] >>> y_pred = [[0.05, 0.95, 0], [0.1, 0.8, 0.1]]
@ -614,11 +613,10 @@ class CategoricalCrossentropy(LossFunctionWrapper):
>>> cce(y_true, y_pred).numpy() >>> cce(y_true, y_pred).numpy()
array([0.0513, 2.303], dtype=float32) array([0.0513, 2.303], dtype=float32)
Usage with the `compile` API: Usage with the `compile()` API:
```python ```python
model = tf.keras.Model(inputs, outputs) model.compile(optimizer='sgd', loss=tf.keras.losses.CategoricalCrossentropy())
model.compile('sgd', loss=tf.keras.losses.CategoricalCrossentropy())
``` ```
""" """
@ -671,7 +669,7 @@ class SparseCategoricalCrossentropy(LossFunctionWrapper):
The shape of `y_true` is `[batch_size]` and the shape of `y_pred` is The shape of `y_true` is `[batch_size]` and the shape of `y_pred` is
`[batch_size, num_classes]`. `[batch_size, num_classes]`.
Usage: Standalone usage:
>>> y_true = [1, 2] >>> y_true = [1, 2]
>>> y_pred = [[0.05, 0.95, 0], [0.1, 0.8, 0.1]] >>> y_pred = [[0.05, 0.95, 0], [0.1, 0.8, 0.1]]
@ -696,11 +694,11 @@ class SparseCategoricalCrossentropy(LossFunctionWrapper):
>>> scce(y_true, y_pred).numpy() >>> scce(y_true, y_pred).numpy()
array([0.0513, 2.303], dtype=float32) array([0.0513, 2.303], dtype=float32)
Usage with the `compile` API: Usage with the `compile()` API:
```python ```python
model = tf.keras.Model(inputs, outputs) model.compile(optimizer='sgd',
model.compile('sgd', loss=tf.keras.losses.SparseCategoricalCrossentropy()) loss=tf.keras.losses.SparseCategoricalCrossentropy())
``` ```
""" """
@ -742,7 +740,7 @@ class Hinge(LossFunctionWrapper):
`y_true` values are expected to be -1 or 1. If binary (0 or 1) labels are `y_true` values are expected to be -1 or 1. If binary (0 or 1) labels are
provided we will convert them to -1 or 1. provided we will convert them to -1 or 1.
Usage: Standalone usage:
>>> y_true = [[0., 1.], [0., 0.]] >>> y_true = [[0., 1.], [0., 0.]]
>>> y_pred = [[0.6, 0.4], [0.4, 0.6]] >>> y_pred = [[0.6, 0.4], [0.4, 0.6]]
@ -767,11 +765,10 @@ class Hinge(LossFunctionWrapper):
>>> h(y_true, y_pred).numpy() >>> h(y_true, y_pred).numpy()
array([1.1, 1.5], dtype=float32) array([1.1, 1.5], dtype=float32)
Usage with the `compile` API: Usage with the `compile()` API:
```python ```python
model = tf.keras.Model(inputs, outputs) model.compile(optimizer='sgd', loss=tf.keras.losses.Hinge())
model.compile('sgd', loss=tf.keras.losses.Hinge())
``` ```
""" """
@ -802,7 +799,7 @@ class SquaredHinge(LossFunctionWrapper):
`y_true` values are expected to be -1 or 1. If binary (0 or 1) labels are `y_true` values are expected to be -1 or 1. If binary (0 or 1) labels are
provided we will convert them to -1 or 1. provided we will convert them to -1 or 1.
Usage: Standalone usage:
>>> y_true = [[0., 1.], [0., 0.]] >>> y_true = [[0., 1.], [0., 0.]]
>>> y_pred = [[0.6, 0.4], [0.4, 0.6]] >>> y_pred = [[0.6, 0.4], [0.4, 0.6]]
@ -827,11 +824,10 @@ class SquaredHinge(LossFunctionWrapper):
>>> h(y_true, y_pred).numpy() >>> h(y_true, y_pred).numpy()
array([1.46, 2.26], dtype=float32) array([1.46, 2.26], dtype=float32)
Usage with the `compile` API: Usage with the `compile()` API:
```python ```python
model = tf.keras.Model(inputs, outputs) model.compile(optimizer='sgd', loss=tf.keras.losses.SquaredHinge())
model.compile('sgd', loss=tf.keras.losses.SquaredHinge())
``` ```
""" """
@ -863,7 +859,7 @@ class CategoricalHinge(LossFunctionWrapper):
`loss = maximum(neg - pos + 1, 0)` `loss = maximum(neg - pos + 1, 0)`
where `neg=maximum((1-y_true)*y_pred) and pos=sum(y_true*y_pred)` where `neg=maximum((1-y_true)*y_pred) and pos=sum(y_true*y_pred)`
Usage: Standalone usage:
>>> y_true = [[0, 1], [0, 0]] >>> y_true = [[0, 1], [0, 0]]
>>> y_pred = [[0.6, 0.4], [0.4, 0.6]] >>> y_pred = [[0.6, 0.4], [0.4, 0.6]]
@ -888,11 +884,10 @@ class CategoricalHinge(LossFunctionWrapper):
>>> h(y_true, y_pred).numpy() >>> h(y_true, y_pred).numpy()
array([1.2, 1.6], dtype=float32) array([1.2, 1.6], dtype=float32)
Usage with the `compile` API: Usage with the `compile()` API:
```python ```python
model = tf.keras.Model(inputs, outputs) model.compile(optimizer='sgd', loss=tf.keras.losses.CategoricalHinge())
model.compile('sgd', loss=tf.keras.losses.CategoricalHinge())
``` ```
""" """
@ -923,7 +918,7 @@ class Poisson(LossFunctionWrapper):
`loss = y_pred - y_true * log(y_pred)` `loss = y_pred - y_true * log(y_pred)`
Usage: Standalone usage:
>>> y_true = [[0., 1.], [0., 0.]] >>> y_true = [[0., 1.], [0., 0.]]
>>> y_pred = [[1., 1.], [0., 0.]] >>> y_pred = [[1., 1.], [0., 0.]]
@ -948,11 +943,10 @@ class Poisson(LossFunctionWrapper):
>>> p(y_true, y_pred).numpy() >>> p(y_true, y_pred).numpy()
array([0.999, 0.], dtype=float32) array([0.999, 0.], dtype=float32)
Usage with the `compile` API: Usage with the `compile()` API:
```python ```python
model = tf.keras.Model(inputs, outputs) model.compile(optimizer='sgd', loss=tf.keras.losses.Poisson())
model.compile('sgd', loss=tf.keras.losses.Poisson())
``` ```
""" """
@ -981,7 +975,7 @@ class LogCosh(LossFunctionWrapper):
`logcosh = log((exp(x) + exp(-x))/2)`, `logcosh = log((exp(x) + exp(-x))/2)`,
where x is the error `y_pred - y_true`. where x is the error `y_pred - y_true`.
Usage: Standalone usage:
>>> y_true = [[0., 1.], [0., 0.]] >>> y_true = [[0., 1.], [0., 0.]]
>>> y_pred = [[1., 1.], [0., 0.]] >>> y_pred = [[1., 1.], [0., 0.]]
@ -1006,11 +1000,10 @@ class LogCosh(LossFunctionWrapper):
>>> l(y_true, y_pred).numpy() >>> l(y_true, y_pred).numpy()
array([0.217, 0.], dtype=float32) array([0.217, 0.], dtype=float32)
Usage with the `compile` API: Usage with the `compile()` API:
```python ```python
model = tf.keras.Model(inputs, outputs) model.compile(optimizer='sgd', loss=tf.keras.losses.LogCosh())
model.compile('sgd', loss=tf.keras.losses.LogCosh())
``` ```
""" """
@ -1040,7 +1033,7 @@ class KLDivergence(LossFunctionWrapper):
See: https://en.wikipedia.org/wiki/Kullback%E2%80%93Leibler_divergence See: https://en.wikipedia.org/wiki/Kullback%E2%80%93Leibler_divergence
Usage: Standalone usage:
>>> y_true = [[0, 1], [0, 0]] >>> y_true = [[0, 1], [0, 0]]
>>> y_pred = [[0.6, 0.4], [0.4, 0.6]] >>> y_pred = [[0.6, 0.4], [0.4, 0.6]]
@ -1065,11 +1058,10 @@ class KLDivergence(LossFunctionWrapper):
>>> kl(y_true, y_pred).numpy() >>> kl(y_true, y_pred).numpy()
array([0.916, -3.08e-06], dtype=float32) array([0.916, -3.08e-06], dtype=float32)
Usage with the `compile` API: Usage with the `compile()` API:
```python ```python
model = tf.keras.Model(inputs, outputs) model.compile(optimizer='sgd', loss=tf.keras.losses.KLDivergence())
model.compile('sgd', loss=tf.keras.losses.KLDivergence())
``` ```
""" """
@ -1106,7 +1098,7 @@ class Huber(LossFunctionWrapper):
``` ```
where d is `delta`. See: https://en.wikipedia.org/wiki/Huber_loss where d is `delta`. See: https://en.wikipedia.org/wiki/Huber_loss
Usage: Standalone usage:
>>> y_true = [[0, 1], [0, 0]] >>> y_true = [[0, 1], [0, 0]]
>>> y_pred = [[0.6, 0.4], [0.4, 0.6]] >>> y_pred = [[0.6, 0.4], [0.4, 0.6]]
@ -1131,11 +1123,10 @@ class Huber(LossFunctionWrapper):
>>> h(y_true, y_pred).numpy() >>> h(y_true, y_pred).numpy()
array([0.18, 0.13], dtype=float32) array([0.18, 0.13], dtype=float32)
Usage with the `compile` API: Usage with the `compile()` API:
```python ```python
model = tf.keras.Model(inputs, outputs) model.compile(optimizer='sgd', loss=tf.keras.losses.Huber())
model.compile('sgd', loss=tf.keras.losses.Huber())
``` ```
""" """
@ -1177,7 +1168,7 @@ def mean_squared_error(y_true, y_pred):
`loss = mean(square(y_true - y_pred), axis=-1)` `loss = mean(square(y_true - y_pred), axis=-1)`
Usage: Standalone usage:
>>> y_true = np.random.randint(0, 2, size=(2, 3)) >>> y_true = np.random.randint(0, 2, size=(2, 3))
>>> y_pred = np.random.random(size=(2, 3)) >>> y_pred = np.random.random(size=(2, 3))
@ -1209,7 +1200,7 @@ def mean_absolute_error(y_true, y_pred):
`loss = mean(abs(y_true - y_pred), axis=-1)` `loss = mean(abs(y_true - y_pred), axis=-1)`
Usage: Standalone usage:
>>> y_true = np.random.randint(0, 2, size=(2, 3)) >>> y_true = np.random.randint(0, 2, size=(2, 3))
>>> y_pred = np.random.random(size=(2, 3)) >>> y_pred = np.random.random(size=(2, 3))
@ -1241,7 +1232,7 @@ def mean_absolute_percentage_error(y_true, y_pred):
`loss = 100 * mean(abs(y_true - y_pred) / y_true, axis=-1)` `loss = 100 * mean(abs(y_true - y_pred) / y_true, axis=-1)`
Usage: Standalone usage:
>>> y_true = np.random.random(size=(2, 3)) >>> y_true = np.random.random(size=(2, 3))
>>> y_true = np.maximum(y_true, 1e-7) # Prevent division by zero >>> y_true = np.maximum(y_true, 1e-7) # Prevent division by zero
@ -1277,7 +1268,7 @@ def mean_squared_logarithmic_error(y_true, y_pred):
`loss = mean(square(log(y_true + 1) - log(y_pred + 1)), axis=-1)` `loss = mean(square(log(y_true + 1) - log(y_pred + 1)), axis=-1)`
Usage: Standalone usage:
>>> y_true = np.random.randint(0, 2, size=(2, 3)) >>> y_true = np.random.randint(0, 2, size=(2, 3))
>>> y_pred = np.random.random(size=(2, 3)) >>> y_pred = np.random.random(size=(2, 3))
@ -1325,7 +1316,7 @@ def squared_hinge(y_true, y_pred):
`loss = mean(square(maximum(1 - y_true * y_pred, 0)), axis=-1)` `loss = mean(square(maximum(1 - y_true * y_pred, 0)), axis=-1)`
Usage: Standalone usage:
>>> y_true = np.random.choice([-1, 1], size=(2, 3)) >>> y_true = np.random.choice([-1, 1], size=(2, 3))
>>> y_pred = np.random.random(size=(2, 3)) >>> y_pred = np.random.random(size=(2, 3))
@ -1357,7 +1348,7 @@ def hinge(y_true, y_pred):
`loss = mean(maximum(1 - y_true * y_pred, 0), axis=-1)` `loss = mean(maximum(1 - y_true * y_pred, 0), axis=-1)`
Usage: Standalone usage:
>>> y_true = np.random.choice([-1, 1], size=(2, 3)) >>> y_true = np.random.choice([-1, 1], size=(2, 3))
>>> y_pred = np.random.random(size=(2, 3)) >>> y_pred = np.random.random(size=(2, 3))
@ -1389,7 +1380,7 @@ def categorical_hinge(y_true, y_pred):
`loss = maximum(neg - pos + 1, 0)` `loss = maximum(neg - pos + 1, 0)`
where `neg=maximum((1-y_true)*y_pred) and pos=sum(y_true*y_pred)` where `neg=maximum((1-y_true)*y_pred) and pos=sum(y_true*y_pred)`
Usage: Standalone usage:
>>> y_true = np.random.randint(0, 3, size=(2,)) >>> y_true = np.random.randint(0, 3, size=(2,))
>>> y_true = tf.keras.utils.to_categorical(y_true, num_classes=3) >>> y_true = tf.keras.utils.to_categorical(y_true, num_classes=3)
@ -1459,7 +1450,7 @@ def log_cosh(y_true, y_pred):
like the mean squared error, but will not be so strongly affected by the like the mean squared error, but will not be so strongly affected by the
occasional wildly incorrect prediction. occasional wildly incorrect prediction.
Usage: Standalone usage:
>>> y_true = np.random.random(size=(2, 3)) >>> y_true = np.random.random(size=(2, 3))
>>> y_pred = np.random.random(size=(2, 3)) >>> y_pred = np.random.random(size=(2, 3))
@ -1495,7 +1486,7 @@ def categorical_crossentropy(y_true,
label_smoothing=0): label_smoothing=0):
"""Computes the categorical crossentropy loss. """Computes the categorical crossentropy loss.
Usage: Standalone usage:
>>> y_true = [[0, 1, 0], [0, 0, 1]] >>> y_true = [[0, 1, 0], [0, 0, 1]]
>>> y_pred = [[0.05, 0.95, 0], [0.1, 0.8, 0.1]] >>> y_pred = [[0.05, 0.95, 0], [0.1, 0.8, 0.1]]
@ -1532,7 +1523,7 @@ def categorical_crossentropy(y_true,
def sparse_categorical_crossentropy(y_true, y_pred, from_logits=False, axis=-1): def sparse_categorical_crossentropy(y_true, y_pred, from_logits=False, axis=-1):
"""Computes the sparse categorical crossentropy loss. """Computes the sparse categorical crossentropy loss.
Usage: Standalone usage:
>>> y_true = [1, 2] >>> y_true = [1, 2]
>>> y_pred = [[0.05, 0.95, 0], [0.1, 0.8, 0.1]] >>> y_pred = [[0.05, 0.95, 0], [0.1, 0.8, 0.1]]
@ -1563,7 +1554,7 @@ def sparse_categorical_crossentropy(y_true, y_pred, from_logits=False, axis=-1):
def binary_crossentropy(y_true, y_pred, from_logits=False, label_smoothing=0): def binary_crossentropy(y_true, y_pred, from_logits=False, label_smoothing=0):
"""Computes the binary crossentropy loss. """Computes the binary crossentropy loss.
Usage: Standalone usage:
>>> y_true = [[0, 1], [0, 0]] >>> y_true = [[0, 1], [0, 0]]
>>> y_pred = [[0.6, 0.4], [0.4, 0.6]] >>> y_pred = [[0.6, 0.4], [0.4, 0.6]]
@ -1610,7 +1601,7 @@ def kl_divergence(y_true, y_pred):
See: https://en.wikipedia.org/wiki/Kullback%E2%80%93Leibler_divergence See: https://en.wikipedia.org/wiki/Kullback%E2%80%93Leibler_divergence
Usage: Standalone usage:
>>> y_true = np.random.randint(0, 2, size=(2, 3)).astype(np.float64) >>> y_true = np.random.randint(0, 2, size=(2, 3)).astype(np.float64)
>>> y_pred = np.random.random(size=(2, 3)) >>> y_pred = np.random.random(size=(2, 3))
@ -1645,7 +1636,7 @@ def poisson(y_true, y_pred):
The Poisson loss is the mean of the elements of the `Tensor` The Poisson loss is the mean of the elements of the `Tensor`
`y_pred - y_true * log(y_pred)`. `y_pred - y_true * log(y_pred)`.
Usage: Standalone usage:
>>> y_true = np.random.randint(0, 2, size=(2, 3)) >>> y_true = np.random.randint(0, 2, size=(2, 3))
>>> y_pred = np.random.random(size=(2, 3)) >>> y_pred = np.random.random(size=(2, 3))
@ -1694,7 +1685,7 @@ def cosine_similarity(y_true, y_pred, axis=-1):
`loss = -sum(l2_norm(y_true) * l2_norm(y_pred))` `loss = -sum(l2_norm(y_true) * l2_norm(y_pred))`
Usage: Standalone usage:
>>> y_true = [[0., 1.], [1., 1.], [1., 1.]] >>> y_true = [[0., 1.], [1., 1.], [1., 1.]]
>>> y_pred = [[1., 0.], [1., 1.], [-1., -1.]] >>> y_pred = [[1., 0.], [1., 1.], [-1., -1.]]
@ -1728,7 +1719,7 @@ class CosineSimilarity(LossFunctionWrapper):
`loss = -sum(l2_norm(y_true) * l2_norm(y_pred))` `loss = -sum(l2_norm(y_true) * l2_norm(y_pred))`
Usage: Standalone usage:
>>> y_true = [[0., 1.], [1., 1.]] >>> y_true = [[0., 1.], [1., 1.]]
>>> y_pred = [[1., 0.], [1., 1.]] >>> y_pred = [[1., 0.], [1., 1.]]
@ -1758,11 +1749,10 @@ class CosineSimilarity(LossFunctionWrapper):
>>> cosine_loss(y_true, y_pred).numpy() >>> cosine_loss(y_true, y_pred).numpy()
array([-0., -0.999], dtype=float32) array([-0., -0.999], dtype=float32)
Usage with the `compile` API: Usage with the `compile()` API:
```python ```python
model = tf.keras.Model(inputs, outputs) model.compile(optimizer='sgd', loss=tf.keras.losses.CosineSimilarity(axis=1))
model.compile('sgd', loss=tf.keras.losses.CosineSimilarity(axis=1))
``` ```
Args: Args:

View File

@ -83,7 +83,7 @@ class Metric(base_layer.Layer):
dtype: (Optional) data type of the metric result. dtype: (Optional) data type of the metric result.
**kwargs: Additional layer keywords arguments. **kwargs: Additional layer keywords arguments.
Usage: Standalone usage:
```python ```python
m = SomeMetric(...) m = SomeMetric(...)
@ -92,7 +92,7 @@ class Metric(base_layer.Layer):
print('Final result: ', m.result().numpy()) print('Final result: ', m.result().numpy())
``` ```
Usage with tf.keras API: Usage with `compile()` API:
```python ```python
model = tf.keras.Sequential() model = tf.keras.Sequential()
@ -404,19 +404,18 @@ class Sum(Reduce):
name: (Optional) string name of the metric instance. name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result. dtype: (Optional) data type of the metric result.
Usage: Standalone usage:
>>> m = tf.keras.metrics.Sum() >>> m = tf.keras.metrics.Sum()
>>> m.update_state([1, 3, 5, 7]) >>> m.update_state([1, 3, 5, 7])
>>> m.result().numpy() >>> m.result().numpy()
16.0 16.0
Usage with tf.keras API: Usage with `compile()` API:
```python ```python
model = tf.keras.Model(inputs, outputs)
model.add_metric(tf.keras.metrics.Sum(name='sum_1')(outputs)) model.add_metric(tf.keras.metrics.Sum(name='sum_1')(outputs))
model.compile('sgd', loss='mse') model.compile(optimizer='sgd', loss='mse')
``` ```
""" """
@ -443,7 +442,7 @@ class Mean(Reduce):
name: (Optional) string name of the metric instance. name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result. dtype: (Optional) data type of the metric result.
Usage: Standalone usage:
>>> m = tf.keras.metrics.Mean() >>> m = tf.keras.metrics.Mean()
>>> m.update_state([1, 3, 5, 7]) >>> m.update_state([1, 3, 5, 7])
@ -454,12 +453,11 @@ class Mean(Reduce):
>>> m.result().numpy() >>> m.result().numpy()
2.0 2.0
Usage with tf.keras API: Usage with `compile()` API:
```python ```python
model = tf.keras.Model(inputs, outputs)
model.add_metric(tf.keras.metrics.Mean(name='mean_1')(outputs)) model.add_metric(tf.keras.metrics.Mean(name='mean_1')(outputs))
model.compile('sgd', loss='mse') model.compile(optimizer='sgd', loss='mse')
``` ```
""" """
@ -485,7 +483,7 @@ class MeanRelativeError(Mean):
name: (Optional) string name of the metric instance. name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result. dtype: (Optional) data type of the metric result.
Usage: Standalone usage:
>>> m = tf.keras.metrics.MeanRelativeError(normalizer=[1, 3, 2, 3]) >>> m = tf.keras.metrics.MeanRelativeError(normalizer=[1, 3, 2, 3])
>>> m.update_state([1, 3, 2, 3], [2, 4, 6, 8]) >>> m.update_state([1, 3, 2, 3], [2, 4, 6, 8])
@ -496,12 +494,11 @@ class MeanRelativeError(Mean):
>>> m.result().numpy() >>> m.result().numpy()
1.25 1.25
Usage with tf.keras API: Usage with `compile()` API:
```python ```python
model = tf.keras.Model(inputs, outputs)
model.compile( model.compile(
'sgd', optimizer='sgd',
loss='mse', loss='mse',
metrics=[tf.keras.metrics.MeanRelativeError(normalizer=[1, 3])]) metrics=[tf.keras.metrics.MeanRelativeError(normalizer=[1, 3])])
``` ```
@ -638,7 +635,7 @@ class Accuracy(MeanMetricWrapper):
name: (Optional) string name of the metric instance. name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result. dtype: (Optional) data type of the metric result.
Usage: Standalone usage:
>>> m = tf.keras.metrics.Accuracy() >>> m = tf.keras.metrics.Accuracy()
>>> m.update_state([[1], [2], [3], [4]], [[0], [2], [3], [4]]) >>> m.update_state([[1], [2], [3], [4]], [[0], [2], [3], [4]])
@ -651,11 +648,12 @@ class Accuracy(MeanMetricWrapper):
>>> m.result().numpy() >>> m.result().numpy()
0.5 0.5
Usage with tf.keras API: Usage with `compile()` API:
```python ```python
model = tf.keras.Model(inputs, outputs) model.compile(optimizer='sgd',
model.compile('sgd', loss='mse', metrics=[tf.keras.metrics.Accuracy()]) loss='mse',
metrics=[tf.keras.metrics.Accuracy()])
``` ```
""" """
@ -681,7 +679,7 @@ class BinaryAccuracy(MeanMetricWrapper):
threshold: (Optional) Float representing the threshold for deciding threshold: (Optional) Float representing the threshold for deciding
whether prediction values are 1 or 0. whether prediction values are 1 or 0.
Usage: Standalone usage:
>>> m = tf.keras.metrics.BinaryAccuracy() >>> m = tf.keras.metrics.BinaryAccuracy()
>>> m.update_state([[1], [1], [0], [0]], [[0.98], [1], [0], [0.6]]) >>> m.update_state([[1], [1], [0], [0]], [[0.98], [1], [0], [0.6]])
@ -694,11 +692,12 @@ class BinaryAccuracy(MeanMetricWrapper):
>>> m.result().numpy() >>> m.result().numpy()
0.5 0.5
Usage with tf.keras API: Usage with `compile()` API:
```python ```python
model = tf.keras.Model(inputs, outputs) model.compile(optimizer='sgd',
model.compile('sgd', loss='mse', metrics=[tf.keras.metrics.BinaryAccuracy()]) loss='mse',
metrics=[tf.keras.metrics.BinaryAccuracy()])
``` ```
""" """
@ -729,7 +728,7 @@ class CategoricalAccuracy(MeanMetricWrapper):
name: (Optional) string name of the metric instance. name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result. dtype: (Optional) data type of the metric result.
Usage: Standalone usage:
>>> m = tf.keras.metrics.CategoricalAccuracy() >>> m = tf.keras.metrics.CategoricalAccuracy()
>>> m.update_state([[0, 0, 1], [0, 1, 0]], [[0.1, 0.9, 0.8], >>> m.update_state([[0, 0, 1], [0, 1, 0]], [[0.1, 0.9, 0.8],
@ -744,12 +743,11 @@ class CategoricalAccuracy(MeanMetricWrapper):
>>> m.result().numpy() >>> m.result().numpy()
0.3 0.3
Usage with tf.keras API: Usage with `compile()` API:
```python ```python
model = tf.keras.Model(inputs, outputs)
model.compile( model.compile(
'sgd', optimizer='sgd',
loss='mse', loss='mse',
metrics=[tf.keras.metrics.CategoricalAccuracy()]) metrics=[tf.keras.metrics.CategoricalAccuracy()])
``` ```
@ -783,7 +781,7 @@ class SparseCategoricalAccuracy(MeanMetricWrapper):
name: (Optional) string name of the metric instance. name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result. dtype: (Optional) data type of the metric result.
Usage: Standalone usage:
>>> m = tf.keras.metrics.SparseCategoricalAccuracy() >>> m = tf.keras.metrics.SparseCategoricalAccuracy()
>>> m.update_state([[2], [1]], [[0.1, 0.6, 0.3], [0.05, 0.95, 0]]) >>> m.update_state([[2], [1]], [[0.1, 0.6, 0.3], [0.05, 0.95, 0]])
@ -796,12 +794,11 @@ class SparseCategoricalAccuracy(MeanMetricWrapper):
>>> m.result().numpy() >>> m.result().numpy()
0.3 0.3
Usage with tf.keras API: Usage with `compile()` API:
```python ```python
model = tf.keras.Model(inputs, outputs)
model.compile( model.compile(
'sgd', optimizer='sgd',
loss='mse', loss='mse',
metrics=[tf.keras.metrics.SparseCategoricalAccuracy()]) metrics=[tf.keras.metrics.SparseCategoricalAccuracy()])
``` ```
@ -822,7 +819,7 @@ class TopKCategoricalAccuracy(MeanMetricWrapper):
name: (Optional) string name of the metric instance. name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result. dtype: (Optional) data type of the metric result.
Usage: Standalone usage:
>>> m = tf.keras.metrics.TopKCategoricalAccuracy(k=1) >>> m = tf.keras.metrics.TopKCategoricalAccuracy(k=1)
>>> m.update_state([[0, 0, 1], [0, 1, 0]], >>> m.update_state([[0, 0, 1], [0, 1, 0]],
@ -837,11 +834,12 @@ class TopKCategoricalAccuracy(MeanMetricWrapper):
>>> m.result().numpy() >>> m.result().numpy()
0.3 0.3
Usage with tf.keras API: Usage with `compile()` API:
```python ```python
model = tf.keras.Model(inputs, outputs) model.compile(optimizer='sgd',
model.compile('sgd', metrics=[tf.keras.metrics.TopKCategoricalAccuracy()]) loss='mse',
metrics=[tf.keras.metrics.TopKCategoricalAccuracy()])
``` ```
""" """
@ -860,7 +858,7 @@ class SparseTopKCategoricalAccuracy(MeanMetricWrapper):
name: (Optional) string name of the metric instance. name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result. dtype: (Optional) data type of the metric result.
Usage: Standalone usage:
>>> m = tf.keras.metrics.SparseTopKCategoricalAccuracy(k=1) >>> m = tf.keras.metrics.SparseTopKCategoricalAccuracy(k=1)
>>> m.update_state([2, 1], [[0.1, 0.9, 0.8], [0.05, 0.95, 0]]) >>> m.update_state([2, 1], [[0.1, 0.9, 0.8], [0.05, 0.95, 0]])
@ -873,12 +871,12 @@ class SparseTopKCategoricalAccuracy(MeanMetricWrapper):
>>> m.result().numpy() >>> m.result().numpy()
0.3 0.3
Usage with tf.keras API: Usage with `compile()` API:
```python ```python
model = tf.keras.Model(inputs, outputs)
model.compile( model.compile(
'sgd', optimizer='sgd',
loss='mse',
metrics=[tf.keras.metrics.SparseTopKCategoricalAccuracy()]) metrics=[tf.keras.metrics.SparseTopKCategoricalAccuracy()])
``` ```
""" """
@ -975,7 +973,7 @@ class FalsePositives(_ConfusionMatrixConditionCount):
name: (Optional) string name of the metric instance. name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result. dtype: (Optional) data type of the metric result.
Usage: Standalone usage:
>>> m = tf.keras.metrics.FalsePositives() >>> m = tf.keras.metrics.FalsePositives()
>>> m.update_state([0, 1, 0, 0], [0, 0, 1, 1]) >>> m.update_state([0, 1, 0, 0], [0, 0, 1, 1])
@ -987,11 +985,12 @@ class FalsePositives(_ConfusionMatrixConditionCount):
>>> m.result().numpy() >>> m.result().numpy()
1.0 1.0
Usage with tf.keras API: Usage with `compile()` API:
```python ```python
model = tf.keras.Model(inputs, outputs) model.compile(optimizer='sgd',
model.compile('sgd', loss='mse', metrics=[tf.keras.metrics.FalsePositives()]) loss='mse',
metrics=[tf.keras.metrics.FalsePositives()])
``` ```
""" """
@ -1023,7 +1022,7 @@ class FalseNegatives(_ConfusionMatrixConditionCount):
name: (Optional) string name of the metric instance. name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result. dtype: (Optional) data type of the metric result.
Usage: Standalone usage:
>>> m = tf.keras.metrics.FalseNegatives() >>> m = tf.keras.metrics.FalseNegatives()
>>> m.update_state([0, 1, 1, 1], [0, 1, 0, 0]) >>> m.update_state([0, 1, 1, 1], [0, 1, 0, 0])
@ -1035,11 +1034,12 @@ class FalseNegatives(_ConfusionMatrixConditionCount):
>>> m.result().numpy() >>> m.result().numpy()
1.0 1.0
Usage with tf.keras API: Usage with `compile()` API:
```python ```python
model = tf.keras.Model(inputs, outputs) model.compile(optimizer='sgd',
model.compile('sgd', loss='mse', metrics=[tf.keras.metrics.FalseNegatives()]) loss='mse',
metrics=[tf.keras.metrics.FalseNegatives()])
``` ```
""" """
@ -1071,7 +1071,7 @@ class TrueNegatives(_ConfusionMatrixConditionCount):
name: (Optional) string name of the metric instance. name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result. dtype: (Optional) data type of the metric result.
Usage: Standalone usage:
>>> m = tf.keras.metrics.TrueNegatives() >>> m = tf.keras.metrics.TrueNegatives()
>>> m.update_state([0, 1, 0, 0], [1, 1, 0, 0]) >>> m.update_state([0, 1, 0, 0], [1, 1, 0, 0])
@ -1083,11 +1083,12 @@ class TrueNegatives(_ConfusionMatrixConditionCount):
>>> m.result().numpy() >>> m.result().numpy()
1.0 1.0
Usage with tf.keras API: Usage with `compile()` API:
```python ```python
model = tf.keras.Model(inputs, outputs) model.compile(optimizer='sgd',
model.compile('sgd', loss='mse', metrics=[tf.keras.metrics.TrueNegatives()]) loss='mse',
metrics=[tf.keras.metrics.TrueNegatives()])
``` ```
""" """
@ -1119,7 +1120,7 @@ class TruePositives(_ConfusionMatrixConditionCount):
name: (Optional) string name of the metric instance. name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result. dtype: (Optional) data type of the metric result.
Usage: Standalone usage:
>>> m = tf.keras.metrics.TruePositives() >>> m = tf.keras.metrics.TruePositives()
>>> m.update_state([0, 1, 1, 1], [1, 0, 1, 1]) >>> m.update_state([0, 1, 1, 1], [1, 0, 1, 1])
@ -1131,11 +1132,12 @@ class TruePositives(_ConfusionMatrixConditionCount):
>>> m.result().numpy() >>> m.result().numpy()
1.0 1.0
Usage with tf.keras API: Usage with `compile()` API:
```python ```python
model = tf.keras.Model(inputs, outputs) model.compile(optimizer='sgd',
model.compile('sgd', loss='mse', metrics=[tf.keras.metrics.TruePositives()]) loss='mse',
metrics=[tf.keras.metrics.TruePositives()])
``` ```
""" """
@ -1183,7 +1185,7 @@ class Precision(Metric):
name: (Optional) string name of the metric instance. name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result. dtype: (Optional) data type of the metric result.
Usage: Standalone usage:
>>> m = tf.keras.metrics.Precision() >>> m = tf.keras.metrics.Precision()
>>> m.update_state([0, 1, 1, 1], [1, 0, 1, 1]) >>> m.update_state([0, 1, 1, 1], [1, 0, 1, 1])
@ -1207,11 +1209,12 @@ class Precision(Metric):
>>> m.result().numpy() >>> m.result().numpy()
0.5 0.5
Usage with tf.keras API: Usage with `compile()` API:
```python ```python
model = tf.keras.Model(inputs, outputs) model.compile(optimizer='sgd',
model.compile('sgd', loss='mse', metrics=[tf.keras.metrics.Precision()]) loss='mse',
metrics=[tf.keras.metrics.Precision()])
``` ```
""" """
@ -1319,7 +1322,7 @@ class Recall(Metric):
name: (Optional) string name of the metric instance. name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result. dtype: (Optional) data type of the metric result.
Usage: Standalone usage:
>>> m = tf.keras.metrics.Recall() >>> m = tf.keras.metrics.Recall()
>>> m.update_state([0, 1, 1, 1], [1, 0, 1, 1]) >>> m.update_state([0, 1, 1, 1], [1, 0, 1, 1])
@ -1331,11 +1334,12 @@ class Recall(Metric):
>>> m.result().numpy() >>> m.result().numpy()
1.0 1.0
Usage with tf.keras API: Usage with `compile()` API:
```python ```python
model = tf.keras.Model(inputs, outputs) model.compile(optimizer='sgd',
model.compile('sgd', loss='mse', metrics=[tf.keras.metrics.Recall()]) loss='mse',
metrics=[tf.keras.metrics.Recall()])
``` ```
""" """
@ -1529,7 +1533,7 @@ class SensitivityAtSpecificity(SensitivitySpecificityBase):
name: (Optional) string name of the metric instance. name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result. dtype: (Optional) data type of the metric result.
Usage: Standalone usage:
>>> m = tf.keras.metrics.SensitivityAtSpecificity(0.5) >>> m = tf.keras.metrics.SensitivityAtSpecificity(0.5)
>>> m.update_state([0, 0, 0, 1, 1], [0, 0.3, 0.8, 0.3, 0.8]) >>> m.update_state([0, 0, 0, 1, 1], [0, 0.3, 0.8, 0.3, 0.8])
@ -1542,12 +1546,11 @@ class SensitivityAtSpecificity(SensitivitySpecificityBase):
>>> m.result().numpy() >>> m.result().numpy()
0.333333 0.333333
Usage with tf.keras API: Usage with `compile()` API:
```python ```python
model = tf.keras.Model(inputs, outputs)
model.compile( model.compile(
'sgd', optimizer='sgd',
loss='mse', loss='mse',
metrics=[tf.keras.metrics.SensitivityAtSpecificity()]) metrics=[tf.keras.metrics.SensitivityAtSpecificity()])
``` ```
@ -1605,7 +1608,7 @@ class SpecificityAtSensitivity(SensitivitySpecificityBase):
name: (Optional) string name of the metric instance. name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result. dtype: (Optional) data type of the metric result.
Usage: Standalone usage:
>>> m = tf.keras.metrics.SpecificityAtSensitivity(0.5) >>> m = tf.keras.metrics.SpecificityAtSensitivity(0.5)
>>> m.update_state([0, 0, 0, 1, 1], [0, 0.3, 0.8, 0.3, 0.8]) >>> m.update_state([0, 0, 0, 1, 1], [0, 0.3, 0.8, 0.3, 0.8])
@ -1618,12 +1621,11 @@ class SpecificityAtSensitivity(SensitivitySpecificityBase):
>>> m.result().numpy() >>> m.result().numpy()
0.5 0.5
Usage with tf.keras API: Usage with `compile()` API:
```python ```python
model = tf.keras.Model(inputs, outputs)
model.compile( model.compile(
'sgd', optimizer='sgd',
loss='mse', loss='mse',
metrics=[tf.keras.metrics.SpecificityAtSensitivity()]) metrics=[tf.keras.metrics.SpecificityAtSensitivity()])
``` ```
@ -1673,7 +1675,7 @@ class PrecisionAtRecall(SensitivitySpecificityBase):
name: (Optional) string name of the metric instance. name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result. dtype: (Optional) data type of the metric result.
Usage: Standalone usage:
>>> m = tf.keras.metrics.PrecisionAtRecall(0.5) >>> m = tf.keras.metrics.PrecisionAtRecall(0.5)
>>> m.update_state([0, 0, 0, 1, 1], [0, 0.3, 0.8, 0.3, 0.8]) >>> m.update_state([0, 0, 0, 1, 1], [0, 0.3, 0.8, 0.3, 0.8])
@ -1686,12 +1688,11 @@ class PrecisionAtRecall(SensitivitySpecificityBase):
>>> m.result().numpy() >>> m.result().numpy()
0.33333333 0.33333333
Usage with tf.keras API: Usage with `compile()` API:
```python ```python
model = tf.keras.Model(inputs, outputs)
model.compile( model.compile(
'sgd', optimizer='sgd',
loss='mse', loss='mse',
metrics=[tf.keras.metrics.PrecisionAtRecall(recall=0.8)]) metrics=[tf.keras.metrics.PrecisionAtRecall(recall=0.8)])
``` ```
@ -1744,7 +1745,7 @@ class RecallAtPrecision(SensitivitySpecificityBase):
name: (Optional) string name of the metric instance. name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result. dtype: (Optional) data type of the metric result.
Usage: Standalone usage:
>>> m = tf.keras.metrics.RecallAtPrecision(0.8) >>> m = tf.keras.metrics.RecallAtPrecision(0.8)
>>> m.update_state([0, 0, 1, 1], [0, 0.5, 0.3, 0.9]) >>> m.update_state([0, 0, 1, 1], [0, 0.5, 0.3, 0.9])
@ -1757,12 +1758,11 @@ class RecallAtPrecision(SensitivitySpecificityBase):
>>> m.result().numpy() >>> m.result().numpy()
1.0 1.0
Usage with tf.keras API: Usage with `compile()` API:
```python ```python
model = tf.keras.Model(inputs, outputs)
model.compile( model.compile(
'sgd', optimizer='sgd',
loss='mse', loss='mse',
metrics=[tf.keras.metrics.RecallAtPrecision(precision=0.8)]) metrics=[tf.keras.metrics.RecallAtPrecision(precision=0.8)])
``` ```
@ -1861,7 +1861,7 @@ class AUC(Metric):
before flattening; therefore `label_weights` should not be used for before flattening; therefore `label_weights` should not be used for
multi-class data. multi-class data.
Usage: Standalone usage:
>>> m = tf.keras.metrics.AUC(num_thresholds=3) >>> m = tf.keras.metrics.AUC(num_thresholds=3)
>>> m.update_state([0, 0, 1, 1], [0, 0.5, 0.3, 0.9]) >>> m.update_state([0, 0, 1, 1], [0, 0.5, 0.3, 0.9])
@ -1878,11 +1878,10 @@ class AUC(Metric):
>>> m.result().numpy() >>> m.result().numpy()
1.0 1.0
Usage with tf.keras API: Usage with `compile()` API:
```python ```python
model = tf.keras.Model(inputs, outputs) model.compile(optimizer='sgd', loss='mse', metrics=[tf.keras.metrics.AUC()])
model.compile('sgd', loss='mse', metrics=[tf.keras.metrics.AUC()])
``` ```
""" """
@ -2239,7 +2238,7 @@ class CosineSimilarity(MeanMetricWrapper):
axis: (Optional) Defaults to -1. The dimension along which the cosine axis: (Optional) Defaults to -1. The dimension along which the cosine
similarity is computed. similarity is computed.
Usage: Standalone usage:
>>> # l2_norm(y_true) = [[0., 1.], [1./1.414], 1./1.414]]] >>> # l2_norm(y_true) = [[0., 1.], [1./1.414], 1./1.414]]]
>>> # l2_norm(y_pred) = [[1., 0.], [1./1.414], 1./1.414]]] >>> # l2_norm(y_pred) = [[1., 0.], [1./1.414], 1./1.414]]]
@ -2257,12 +2256,11 @@ class CosineSimilarity(MeanMetricWrapper):
>>> m.result().numpy() >>> m.result().numpy()
0.6999999 0.6999999
Usage with tf.keras API: Usage with `compile()` API:
```python ```python
model = tf.keras.Model(inputs, outputs)
model.compile( model.compile(
'sgd', optimizer='sgd',
loss='mse', loss='mse',
metrics=[tf.keras.metrics.CosineSimilarity(axis=1)]) metrics=[tf.keras.metrics.CosineSimilarity(axis=1)])
``` ```
@ -2281,7 +2279,7 @@ class MeanAbsoluteError(MeanMetricWrapper):
name: (Optional) string name of the metric instance. name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result. dtype: (Optional) data type of the metric result.
Usage: Standalone usage:
>>> m = tf.keras.metrics.MeanAbsoluteError() >>> m = tf.keras.metrics.MeanAbsoluteError()
>>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]]) >>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]])
@ -2294,12 +2292,13 @@ class MeanAbsoluteError(MeanMetricWrapper):
>>> m.result().numpy() >>> m.result().numpy()
0.5 0.5
Usage with tf.keras API: Usage with `compile()` API:
```python ```python
model = tf.keras.Model(inputs, outputs)
model.compile( model.compile(
'sgd', loss='mse', metrics=[tf.keras.metrics.MeanAbsoluteError()]) optimizer='sgd',
loss='mse',
metrics=[tf.keras.metrics.MeanAbsoluteError()])
``` ```
""" """
@ -2316,7 +2315,7 @@ class MeanAbsolutePercentageError(MeanMetricWrapper):
name: (Optional) string name of the metric instance. name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result. dtype: (Optional) data type of the metric result.
Usage: Standalone usage:
>>> m = tf.keras.metrics.MeanAbsolutePercentageError() >>> m = tf.keras.metrics.MeanAbsolutePercentageError()
>>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]]) >>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]])
@ -2329,12 +2328,11 @@ class MeanAbsolutePercentageError(MeanMetricWrapper):
>>> m.result().numpy() >>> m.result().numpy()
500000000.0 500000000.0
Usage with tf.keras API: Usage with `compile()` API:
```python ```python
model = tf.keras.Model(inputs, outputs)
model.compile( model.compile(
'sgd', optimizer='sgd',
loss='mse', loss='mse',
metrics=[tf.keras.metrics.MeanAbsolutePercentageError()]) metrics=[tf.keras.metrics.MeanAbsolutePercentageError()])
``` ```
@ -2353,7 +2351,7 @@ class MeanSquaredError(MeanMetricWrapper):
name: (Optional) string name of the metric instance. name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result. dtype: (Optional) data type of the metric result.
Usage: Standalone usage:
>>> m = tf.keras.metrics.MeanSquaredError() >>> m = tf.keras.metrics.MeanSquaredError()
>>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]]) >>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]])
@ -2366,12 +2364,13 @@ class MeanSquaredError(MeanMetricWrapper):
>>> m.result().numpy() >>> m.result().numpy()
0.5 0.5
Usage with tf.keras API: Usage with `compile()` API:
```python ```python
model = tf.keras.Model(inputs, outputs)
model.compile( model.compile(
'sgd', loss='mse', metrics=[tf.keras.metrics.MeanSquaredError()]) optimizer='sgd',
loss='mse',
metrics=[tf.keras.metrics.MeanSquaredError()])
``` ```
""" """
@ -2388,7 +2387,7 @@ class MeanSquaredLogarithmicError(MeanMetricWrapper):
name: (Optional) string name of the metric instance. name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result. dtype: (Optional) data type of the metric result.
Usage: Standalone usage:
>>> m = tf.keras.metrics.MeanSquaredLogarithmicError() >>> m = tf.keras.metrics.MeanSquaredLogarithmicError()
>>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]]) >>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]])
@ -2401,12 +2400,11 @@ class MeanSquaredLogarithmicError(MeanMetricWrapper):
>>> m.result().numpy() >>> m.result().numpy()
0.24022643 0.24022643
Usage with tf.keras API: Usage with `compile()` API:
```python ```python
model = tf.keras.Model(inputs, outputs)
model.compile( model.compile(
'sgd', optimizer='sgd',
loss='mse', loss='mse',
metrics=[tf.keras.metrics.MeanSquaredLogarithmicError()]) metrics=[tf.keras.metrics.MeanSquaredLogarithmicError()])
``` ```
@ -2428,7 +2426,7 @@ class Hinge(MeanMetricWrapper):
name: (Optional) string name of the metric instance. name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result. dtype: (Optional) data type of the metric result.
Usage: Standalone usage:
>>> m = tf.keras.metrics.Hinge() >>> m = tf.keras.metrics.Hinge()
>>> m.update_state([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]]) >>> m.update_state([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]])
@ -2441,11 +2439,10 @@ class Hinge(MeanMetricWrapper):
>>> m.result().numpy() >>> m.result().numpy()
1.1 1.1
Usage with tf.keras API: Usage with `compile()` API:
```python ```python
model = tf.keras.Model(inputs, outputs) model.compile(optimizer='sgd', loss='mse', metrics=[tf.keras.metrics.Hinge()])
model.compile('sgd', loss='mse', metrics=[tf.keras.metrics.Hinge()])
``` ```
""" """
@ -2464,7 +2461,7 @@ class SquaredHinge(MeanMetricWrapper):
name: (Optional) string name of the metric instance. name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result. dtype: (Optional) data type of the metric result.
Usage: Standalone usage:
>>> m = tf.keras.metrics.SquaredHinge() >>> m = tf.keras.metrics.SquaredHinge()
>>> m.update_state([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]]) >>> m.update_state([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]])
@ -2477,12 +2474,11 @@ class SquaredHinge(MeanMetricWrapper):
>>> m.result().numpy() >>> m.result().numpy()
1.46 1.46
Usage with tf.keras API: Usage with `compile()` API:
```python ```python
model = tf.keras.Model(inputs, outputs)
model.compile( model.compile(
'sgd', optimizer='sgd',
loss='mse', loss='mse',
metrics=[tf.keras.metrics.SquaredHinge()]) metrics=[tf.keras.metrics.SquaredHinge()])
``` ```
@ -2500,7 +2496,7 @@ class CategoricalHinge(MeanMetricWrapper):
name: (Optional) string name of the metric instance. name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result. dtype: (Optional) data type of the metric result.
Usage: Standalone usage:
>>> m = tf.keras.metrics.CategoricalHinge() >>> m = tf.keras.metrics.CategoricalHinge()
>>> m.update_state([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]]) >>> m.update_state([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]])
@ -2513,12 +2509,11 @@ class CategoricalHinge(MeanMetricWrapper):
>>> m.result().numpy() >>> m.result().numpy()
1.2 1.2
Usage with tf.keras API: Usage with `compile()` API:
```python ```python
model = tf.keras.Model(inputs, outputs)
model.compile( model.compile(
'sgd', optimizer='sgd',
loss='mse', loss='mse',
metrics=[tf.keras.metrics.CategoricalHinge()]) metrics=[tf.keras.metrics.CategoricalHinge()])
``` ```
@ -2532,7 +2527,7 @@ class CategoricalHinge(MeanMetricWrapper):
class RootMeanSquaredError(Mean): class RootMeanSquaredError(Mean):
"""Computes root mean squared error metric between `y_true` and `y_pred`. """Computes root mean squared error metric between `y_true` and `y_pred`.
Usage: Standalone usage:
>>> m = tf.keras.metrics.RootMeanSquaredError() >>> m = tf.keras.metrics.RootMeanSquaredError()
>>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]]) >>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]])
@ -2545,12 +2540,11 @@ class RootMeanSquaredError(Mean):
>>> m.result().numpy() >>> m.result().numpy()
0.70710677 0.70710677
Usage with tf.keras API: Usage with `compile()` API:
```python ```python
model = tf.keras.Model(inputs, outputs)
model.compile( model.compile(
'sgd', optimizer='sgd',
loss='mse', loss='mse',
metrics=[tf.keras.metrics.RootMeanSquaredError()]) metrics=[tf.keras.metrics.RootMeanSquaredError()])
``` ```
@ -2594,7 +2588,7 @@ class LogCoshError(MeanMetricWrapper):
name: (Optional) string name of the metric instance. name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result. dtype: (Optional) data type of the metric result.
Usage: Standalone usage:
>>> m = tf.keras.metrics.LogCoshError() >>> m = tf.keras.metrics.LogCoshError()
>>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]]) >>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]])
@ -2607,11 +2601,12 @@ class LogCoshError(MeanMetricWrapper):
>>> m.result().numpy() >>> m.result().numpy()
0.21689045 0.21689045
Usage with tf.keras API: Usage with `compile()` API:
```python ```python
model = tf.keras.Model(inputs, outputs) model.compile(optimizer='sgd',
model.compile('sgd', loss='mse', metrics=[tf.keras.metrics.LogCoshError()]) loss='mse',
metrics=[tf.keras.metrics.LogCoshError()])
``` ```
""" """
@ -2629,7 +2624,7 @@ class Poisson(MeanMetricWrapper):
name: (Optional) string name of the metric instance. name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result. dtype: (Optional) data type of the metric result.
Usage: Standalone usage:
>>> m = tf.keras.metrics.Poisson() >>> m = tf.keras.metrics.Poisson()
>>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]]) >>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]])
@ -2642,11 +2637,12 @@ class Poisson(MeanMetricWrapper):
>>> m.result().numpy() >>> m.result().numpy()
0.99999994 0.99999994
Usage with tf.keras API: Usage with `compile()` API:
```python ```python
model = tf.keras.Model(inputs, outputs) model.compile(optimizer='sgd',
model.compile('sgd', loss='mse', metrics=[tf.keras.metrics.Poisson()]) loss='mse',
metrics=[tf.keras.metrics.Poisson()])
``` ```
""" """
@ -2664,7 +2660,7 @@ class KLDivergence(MeanMetricWrapper):
name: (Optional) string name of the metric instance. name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result. dtype: (Optional) data type of the metric result.
Usage: Standalone usage:
>>> m = tf.keras.metrics.KLDivergence() >>> m = tf.keras.metrics.KLDivergence()
>>> m.update_state([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]]) >>> m.update_state([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]])
@ -2677,11 +2673,12 @@ class KLDivergence(MeanMetricWrapper):
>>> m.result().numpy() >>> m.result().numpy()
0.9162892 0.9162892
Usage with tf.keras API: Usage with `compile()` API:
```python ```python
model = tf.keras.Model(inputs, outputs) model.compile(optimizer='sgd',
model.compile('sgd', loss='mse', metrics=[tf.keras.metrics.KLDivergence()]) loss='mse',
metrics=[tf.keras.metrics.KLDivergence()])
``` ```
""" """
@ -2711,7 +2708,7 @@ class MeanIoU(Metric):
name: (Optional) string name of the metric instance. name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result. dtype: (Optional) data type of the metric result.
Usage: Standalone usage:
>>> # cm = [[1, 1], >>> # cm = [[1, 1],
>>> # [1, 1]] >>> # [1, 1]]
@ -2729,12 +2726,11 @@ class MeanIoU(Metric):
>>> m.result().numpy() >>> m.result().numpy()
0.23809525 0.23809525
Usage with tf.keras API: Usage with `compile()` API:
```python ```python
model = tf.keras.Model(inputs, outputs)
model.compile( model.compile(
'sgd', optimizer='sgd',
loss='mse', loss='mse',
metrics=[tf.keras.metrics.MeanIoU(num_classes=2)]) metrics=[tf.keras.metrics.MeanIoU(num_classes=2)])
``` ```
@ -2836,7 +2832,7 @@ class MeanTensor(Metric):
name: (Optional) string name of the metric instance. name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result. dtype: (Optional) data type of the metric result.
Usage: Standalone usage:
>>> m = tf.keras.metrics.MeanTensor() >>> m = tf.keras.metrics.MeanTensor()
>>> m.update_state([0, 1, 2, 3]) >>> m.update_state([0, 1, 2, 3])
@ -2951,7 +2947,7 @@ class BinaryCrossentropy(MeanMetricWrapper):
e.g. `label_smoothing=0.2` means that we will use a value of `0.1` for e.g. `label_smoothing=0.2` means that we will use a value of `0.1` for
label `0` and `0.9` for label `1`". label `0` and `0.9` for label `1`".
Usage: Standalone usage:
>>> m = tf.keras.metrics.BinaryCrossentropy() >>> m = tf.keras.metrics.BinaryCrossentropy()
>>> m.update_state([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]]) >>> m.update_state([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]])
@ -2964,12 +2960,11 @@ class BinaryCrossentropy(MeanMetricWrapper):
>>> m.result().numpy() >>> m.result().numpy()
0.9162905 0.9162905
Usage with tf.keras API: Usage with `compile()` API:
```python ```python
model = tf.keras.Model(inputs, outputs)
model.compile( model.compile(
'sgd', optimizer='sgd',
loss='mse', loss='mse',
metrics=[tf.keras.metrics.BinaryCrossentropy()]) metrics=[tf.keras.metrics.BinaryCrossentropy()])
``` ```
@ -3007,7 +3002,7 @@ class CategoricalCrossentropy(MeanMetricWrapper):
`label_smoothing=0.2` means that we will use a value of `0.1` for label `label_smoothing=0.2` means that we will use a value of `0.1` for label
`0` and `0.9` for label `1`" `0` and `0.9` for label `1`"
Usage: Standalone usage:
>>> # EPSILON = 1e-7, y = y_true, y` = y_pred >>> # EPSILON = 1e-7, y = y_true, y` = y_pred
>>> # y` = clip_ops.clip_by_value(output, EPSILON, 1. - EPSILON) >>> # y` = clip_ops.clip_by_value(output, EPSILON, 1. - EPSILON)
@ -3029,12 +3024,11 @@ class CategoricalCrossentropy(MeanMetricWrapper):
>>> m.result().numpy() >>> m.result().numpy()
1.6271976 1.6271976
Usage with tf.keras API: Usage with `compile()` API:
```python ```python
model = tf.keras.Model(inputs, outputs)
model.compile( model.compile(
'sgd', optimizer='sgd',
loss='mse', loss='mse',
metrics=[tf.keras.metrics.CategoricalCrossentropy()]) metrics=[tf.keras.metrics.CategoricalCrossentropy()])
``` ```
@ -3076,7 +3070,7 @@ class SparseCategoricalCrossentropy(MeanMetricWrapper):
axis: (Optional) Defaults to -1. The dimension along which the metric is axis: (Optional) Defaults to -1. The dimension along which the metric is
computed. computed.
Usage: Standalone usage:
>>> # y_true = one_hot(y_true) = [[0, 1, 0], [0, 0, 1]] >>> # y_true = one_hot(y_true) = [[0, 1, 0], [0, 0, 1]]
>>> # logits = log(y_pred) >>> # logits = log(y_pred)
@ -3101,12 +3095,11 @@ class SparseCategoricalCrossentropy(MeanMetricWrapper):
>>> m.result().numpy() >>> m.result().numpy()
1.6271976 1.6271976
Usage with tf.keras API: Usage with `compile()` API:
```python ```python
model = tf.keras.Model(inputs, outputs)
model.compile( model.compile(
'sgd', optimizer='sgd',
loss='mse', loss='mse',
metrics=[tf.keras.metrics.SparseCategoricalCrossentropy()]) metrics=[tf.keras.metrics.SparseCategoricalCrossentropy()])
``` ```
@ -3196,7 +3189,7 @@ def accuracy(y_true, y_pred):
def binary_accuracy(y_true, y_pred, threshold=0.5): def binary_accuracy(y_true, y_pred, threshold=0.5):
"""Calculates how often predictions matches binary labels. """Calculates how often predictions matches binary labels.
Usage: Standalone usage:
>>> y_true = [[1], [1], [0], [0]] >>> y_true = [[1], [1], [0], [0]]
>>> y_pred = [[1], [1], [0], [0]] >>> y_pred = [[1], [1], [0], [0]]
>>> m = tf.keras.metrics.binary_accuracy(y_true, y_pred) >>> m = tf.keras.metrics.binary_accuracy(y_true, y_pred)
@ -3223,7 +3216,7 @@ def binary_accuracy(y_true, y_pred, threshold=0.5):
def categorical_accuracy(y_true, y_pred): def categorical_accuracy(y_true, y_pred):
"""Calculates how often predictions matches one-hot labels. """Calculates how often predictions matches one-hot labels.
Usage: Standalone usage:
>>> y_true = [[0, 0, 1], [0, 1, 0]] >>> y_true = [[0, 0, 1], [0, 1, 0]]
>>> y_pred = [[0.1, 0.9, 0.8], [0.05, 0.95, 0]] >>> y_pred = [[0.1, 0.9, 0.8], [0.05, 0.95, 0]]
>>> m = tf.keras.metrics.categorical_accuracy(y_true, y_pred) >>> m = tf.keras.metrics.categorical_accuracy(y_true, y_pred)
@ -3251,7 +3244,7 @@ def categorical_accuracy(y_true, y_pred):
def sparse_categorical_accuracy(y_true, y_pred): def sparse_categorical_accuracy(y_true, y_pred):
"""Calculates how often predictions matches integer labels. """Calculates how often predictions matches integer labels.
Usage: Standalone usage:
>>> y_true = [2, 1] >>> y_true = [2, 1]
>>> y_pred = [[0.1, 0.9, 0.8], [0.05, 0.95, 0]] >>> y_pred = [[0.1, 0.9, 0.8], [0.05, 0.95, 0]]
>>> m = tf.keras.metrics.sparse_categorical_accuracy(y_true, y_pred) >>> m = tf.keras.metrics.sparse_categorical_accuracy(y_true, y_pred)
@ -3291,7 +3284,7 @@ def sparse_categorical_accuracy(y_true, y_pred):
def top_k_categorical_accuracy(y_true, y_pred, k=5): def top_k_categorical_accuracy(y_true, y_pred, k=5):
"""Computes how often targets are in the top `K` predictions. """Computes how often targets are in the top `K` predictions.
Usage: Standalone usage:
>>> y_true = [[0, 0, 1], [0, 1, 0]] >>> y_true = [[0, 0, 1], [0, 1, 0]]
>>> y_pred = [[0.1, 0.9, 0.8], [0.05, 0.95, 0]] >>> y_pred = [[0.1, 0.9, 0.8], [0.05, 0.95, 0]]
>>> m = tf.keras.metrics.top_k_categorical_accuracy(y_true, y_pred, k=3) >>> m = tf.keras.metrics.top_k_categorical_accuracy(y_true, y_pred, k=3)
@ -3316,7 +3309,7 @@ def top_k_categorical_accuracy(y_true, y_pred, k=5):
def sparse_top_k_categorical_accuracy(y_true, y_pred, k=5): def sparse_top_k_categorical_accuracy(y_true, y_pred, k=5):
"""Computes how often integer targets are in the top `K` predictions. """Computes how often integer targets are in the top `K` predictions.
Usage: Standalone usage:
>>> y_true = [2, 1] >>> y_true = [2, 1]
>>> y_pred = [[0.1, 0.9, 0.8], [0.05, 0.95, 0]] >>> y_pred = [[0.1, 0.9, 0.8], [0.05, 0.95, 0]]
>>> m = tf.keras.metrics.sparse_top_k_categorical_accuracy( >>> m = tf.keras.metrics.sparse_top_k_categorical_accuracy(
@ -3467,3 +3460,4 @@ def get(identifier):
def is_built_in(cls): def is_built_in(cls):
return cls.__module__ == Metric.__module__ return cls.__module__ == Metric.__module__