From c1065c4c790cb972322eacda6ec4615dc7ce7f34 Mon Sep 17 00:00:00 2001 From: Francois Chollet <fchollet@google.com> Date: Mon, 30 Mar 2020 13:36:53 -0700 Subject: [PATCH] Regularize loss naming quirks. Previously, some of our losses did not respect the rule "for every loss class with name XxxYyy, there is an equivalent loss function with name xxx_yyy". In particular: KLDivergence class -> kullback_leibler_divergence function (expected: kl_divergence) LogCosh class -> logcosh function (expected: log_cosh) Huber class -> corresponding function not exported (expected: huber) This change is backwards compatible (only adding aliases, and changing default names for LogCosh and KLDivergence, which is fine as we make no guarantees with regard to default names). PiperOrigin-RevId: 303812304 Change-Id: I2f62d594d99f3fa30fbf04bf92c0dd5caadc0958 --- tensorflow/python/keras/losses.py | 31 +++++++++++-------- tensorflow/python/keras/losses_test.py | 4 +-- ...sorflow.keras.losses.-k-l-divergence.pbtxt | 2 +- .../tensorflow.keras.losses.-log-cosh.pbtxt | 2 +- .../golden/v1/tensorflow.keras.losses.pbtxt | 8 +++++ .../golden/v1/tensorflow.keras.metrics.pbtxt | 4 +++ ...sorflow.keras.losses.-k-l-divergence.pbtxt | 2 +- .../tensorflow.keras.losses.-log-cosh.pbtxt | 2 +- .../golden/v2/tensorflow.keras.losses.pbtxt | 12 +++++++ .../golden/v2/tensorflow.keras.metrics.pbtxt | 4 +++ .../tensorflow.losses.-k-l-divergence.pbtxt | 2 +- .../v2/tensorflow.losses.-log-cosh.pbtxt | 2 +- .../api/golden/v2/tensorflow.losses.pbtxt | 12 +++++++ .../api/golden/v2/tensorflow.metrics.pbtxt | 4 +++ 14 files changed, 70 insertions(+), 21 deletions(-) diff --git a/tensorflow/python/keras/losses.py b/tensorflow/python/keras/losses.py index f5948414029..9db3435fa50 100644 --- a/tensorflow/python/keras/losses.py +++ b/tensorflow/python/keras/losses.py @@ -1014,7 +1014,7 @@ class LogCosh(LossFunctionWrapper): ``` """ - def __init__(self, reduction=losses_utils.ReductionV2.AUTO, name='logcosh'): + def __init__(self, reduction=losses_utils.ReductionV2.AUTO, name='log_cosh'): """Initializes `LogCosh` instance. Args: @@ -1027,9 +1027,9 @@ class LogCosh(LossFunctionWrapper): will raise an error. Please see this custom training [tutorial] (https://www.tensorflow.org/tutorials/distribute/custom_training) for more details. - name: Optional name for the op. Defaults to 'logcosh'. + name: Optional name for the op. Defaults to 'log_cosh'. """ - super(LogCosh, self).__init__(logcosh, name=name, reduction=reduction) + super(LogCosh, self).__init__(log_cosh, name=name, reduction=reduction) @keras_export('keras.losses.KLDivergence') @@ -1075,7 +1075,7 @@ class KLDivergence(LossFunctionWrapper): def __init__(self, reduction=losses_utils.ReductionV2.AUTO, - name='kullback_leibler_divergence'): + name='kl_divergence'): """Initializes `KLDivergence` instance. Args: @@ -1088,10 +1088,10 @@ class KLDivergence(LossFunctionWrapper): will raise an error. Please see this custom training [tutorial] (https://www.tensorflow.org/tutorials/distribute/custom_training) for more details. - name: Optional name for the op. Defaults to 'kullback_leibler_divergence'. + name: Optional name for the op. Defaults to 'kl_divergence'. """ super(KLDivergence, self).__init__( - kullback_leibler_divergence, name=name, reduction=reduction) + kl_divergence, name=name, reduction=reduction) @keras_export('keras.losses.Huber') @@ -1160,7 +1160,7 @@ class Huber(LossFunctionWrapper): name: Optional name for the op. Defaults to 'huber_loss'. """ super(Huber, self).__init__( - huber_loss, name=name, reduction=reduction, delta=delta) + huber, name=name, reduction=reduction, delta=delta) @keras_export('keras.metrics.mean_squared_error', @@ -1414,7 +1414,8 @@ def categorical_hinge(y_true, y_pred): return math_ops.maximum(0., neg - pos + 1.) -def huber_loss(y_true, y_pred, delta=1.0): +@keras_export('keras.losses.huber', v1=[]) +def huber(y_true, y_pred, delta=1.0): """Computes Huber loss value. For each value x in `error = y_true - y_pred`: @@ -1449,8 +1450,8 @@ def huber_loss(y_true, y_pred, delta=1.0): axis=-1) -@keras_export('keras.losses.logcosh') -def logcosh(y_true, y_pred): +@keras_export('keras.losses.log_cosh', 'keras.losses.logcosh') +def log_cosh(y_true, y_pred): """Logarithm of the hyperbolic cosine of the prediction error. `log(cosh(x))` is approximately equal to `(x ** 2) / 2` for small `x` and @@ -1594,13 +1595,15 @@ def binary_crossentropy(y_true, y_pred, from_logits=False, label_smoothing=0): K.binary_crossentropy(y_true, y_pred, from_logits=from_logits), axis=-1) -@keras_export('keras.metrics.kullback_leibler_divergence', +@keras_export('keras.metrics.kl_divergence', + 'keras.metrics.kullback_leibler_divergence', 'keras.metrics.kld', 'keras.metrics.KLD', + 'keras.losses.kl_divergence', 'keras.losses.kullback_leibler_divergence', 'keras.losses.kld', 'keras.losses.KLD') -def kullback_leibler_divergence(y_true, y_pred): +def kl_divergence(y_true, y_pred): """Computes Kullback-Leibler divergence loss between `y_true` and `y_pred`. `loss = y_true * log(y_true / y_pred)` @@ -1795,7 +1798,9 @@ mse = MSE = mean_squared_error mae = MAE = mean_absolute_error mape = MAPE = mean_absolute_percentage_error msle = MSLE = mean_squared_logarithmic_error -kld = KLD = kullback_leibler_divergence +kld = KLD = kullback_leibler_divergence = kl_divergence +logcosh = log_cosh +huber_loss = huber def is_categorical_crossentropy(loss): diff --git a/tensorflow/python/keras/losses_test.py b/tensorflow/python/keras/losses_test.py index 855a1ed41a3..119cc5db87d 100644 --- a/tensorflow/python/keras/losses_test.py +++ b/tensorflow/python/keras/losses_test.py @@ -36,8 +36,8 @@ ALL_LOSSES = [ losses.mean_absolute_percentage_error, losses.mean_squared_logarithmic_error, losses.squared_hinge, losses.hinge, losses.categorical_crossentropy, losses.binary_crossentropy, - losses.kullback_leibler_divergence, losses.poisson, - losses.cosine_similarity, losses.logcosh, losses.categorical_hinge + losses.kl_divergence, losses.poisson, + losses.cosine_similarity, losses.log_cosh, losses.categorical_hinge ] diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.losses.-k-l-divergence.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.losses.-k-l-divergence.pbtxt index 30d68f097be..b15ba6f2d6d 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.losses.-k-l-divergence.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.losses.-k-l-divergence.pbtxt @@ -6,7 +6,7 @@ tf_class { is_instance: "<type \'object\'>" member_method { name: "__init__" - argspec: "args=[\'self\', \'reduction\', \'name\'], varargs=None, keywords=None, defaults=[\'auto\', \'kullback_leibler_divergence\'], " + argspec: "args=[\'self\', \'reduction\', \'name\'], varargs=None, keywords=None, defaults=[\'auto\', \'kl_divergence\'], " } member_method { name: "call" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.losses.-log-cosh.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.losses.-log-cosh.pbtxt index 9310f07f509..1bdc6751a4a 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.losses.-log-cosh.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.losses.-log-cosh.pbtxt @@ -6,7 +6,7 @@ tf_class { is_instance: "<type \'object\'>" member_method { name: "__init__" - argspec: "args=[\'self\', \'reduction\', \'name\'], varargs=None, keywords=None, defaults=[\'auto\', \'logcosh\'], " + argspec: "args=[\'self\', \'reduction\', \'name\'], varargs=None, keywords=None, defaults=[\'auto\', \'log_cosh\'], " } member_method { name: "call" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.losses.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.losses.pbtxt index e24947ad19a..8f4c6a78b26 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.losses.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.losses.pbtxt @@ -120,6 +120,10 @@ tf_module { name: "hinge" argspec: "args=[\'y_true\', \'y_pred\'], varargs=None, keywords=None, defaults=None" } + member_method { + name: "kl_divergence" + argspec: "args=[\'y_true\', \'y_pred\'], varargs=None, keywords=None, defaults=None" + } member_method { name: "kld" argspec: "args=[\'y_true\', \'y_pred\'], varargs=None, keywords=None, defaults=None" @@ -128,6 +132,10 @@ tf_module { name: "kullback_leibler_divergence" argspec: "args=[\'y_true\', \'y_pred\'], varargs=None, keywords=None, defaults=None" } + member_method { + name: "log_cosh" + argspec: "args=[\'y_true\', \'y_pred\'], varargs=None, keywords=None, defaults=None" + } member_method { name: "logcosh" argspec: "args=[\'y_true\', \'y_pred\'], varargs=None, keywords=None, defaults=None" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.metrics.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.metrics.pbtxt index 2b09ccc48a2..57876312213 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.metrics.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.metrics.pbtxt @@ -208,6 +208,10 @@ tf_module { name: "hinge" argspec: "args=[\'y_true\', \'y_pred\'], varargs=None, keywords=None, defaults=None" } + member_method { + name: "kl_divergence" + argspec: "args=[\'y_true\', \'y_pred\'], varargs=None, keywords=None, defaults=None" + } member_method { name: "kld" argspec: "args=[\'y_true\', \'y_pred\'], varargs=None, keywords=None, defaults=None" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.losses.-k-l-divergence.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.losses.-k-l-divergence.pbtxt index 30d68f097be..b15ba6f2d6d 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.losses.-k-l-divergence.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.losses.-k-l-divergence.pbtxt @@ -6,7 +6,7 @@ tf_class { is_instance: "<type \'object\'>" member_method { name: "__init__" - argspec: "args=[\'self\', \'reduction\', \'name\'], varargs=None, keywords=None, defaults=[\'auto\', \'kullback_leibler_divergence\'], " + argspec: "args=[\'self\', \'reduction\', \'name\'], varargs=None, keywords=None, defaults=[\'auto\', \'kl_divergence\'], " } member_method { name: "call" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.losses.-log-cosh.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.losses.-log-cosh.pbtxt index 9310f07f509..1bdc6751a4a 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.losses.-log-cosh.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.losses.-log-cosh.pbtxt @@ -6,7 +6,7 @@ tf_class { is_instance: "<type \'object\'>" member_method { name: "__init__" - argspec: "args=[\'self\', \'reduction\', \'name\'], varargs=None, keywords=None, defaults=[\'auto\', \'logcosh\'], " + argspec: "args=[\'self\', \'reduction\', \'name\'], varargs=None, keywords=None, defaults=[\'auto\', \'log_cosh\'], " } member_method { name: "call" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.losses.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.losses.pbtxt index a8a4134df5e..dc97f818309 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.losses.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.losses.pbtxt @@ -116,6 +116,14 @@ tf_module { name: "hinge" argspec: "args=[\'y_true\', \'y_pred\'], varargs=None, keywords=None, defaults=None" } + member_method { + name: "huber" + argspec: "args=[\'y_true\', \'y_pred\', \'delta\'], varargs=None, keywords=None, defaults=[\'1.0\'], " + } + member_method { + name: "kl_divergence" + argspec: "args=[\'y_true\', \'y_pred\'], varargs=None, keywords=None, defaults=None" + } member_method { name: "kld" argspec: "args=[\'y_true\', \'y_pred\'], varargs=None, keywords=None, defaults=None" @@ -124,6 +132,10 @@ tf_module { name: "kullback_leibler_divergence" argspec: "args=[\'y_true\', \'y_pred\'], varargs=None, keywords=None, defaults=None" } + member_method { + name: "log_cosh" + argspec: "args=[\'y_true\', \'y_pred\'], varargs=None, keywords=None, defaults=None" + } member_method { name: "logcosh" argspec: "args=[\'y_true\', \'y_pred\'], varargs=None, keywords=None, defaults=None" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.metrics.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.metrics.pbtxt index 1b4976294ed..17768aeafbe 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.metrics.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.metrics.pbtxt @@ -200,6 +200,10 @@ tf_module { name: "hinge" argspec: "args=[\'y_true\', \'y_pred\'], varargs=None, keywords=None, defaults=None" } + member_method { + name: "kl_divergence" + argspec: "args=[\'y_true\', \'y_pred\'], varargs=None, keywords=None, defaults=None" + } member_method { name: "kld" argspec: "args=[\'y_true\', \'y_pred\'], varargs=None, keywords=None, defaults=None" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.losses.-k-l-divergence.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.losses.-k-l-divergence.pbtxt index 21930e36fd9..d6f19bf3144 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.losses.-k-l-divergence.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.losses.-k-l-divergence.pbtxt @@ -6,7 +6,7 @@ tf_class { is_instance: "<type \'object\'>" member_method { name: "__init__" - argspec: "args=[\'self\', \'reduction\', \'name\'], varargs=None, keywords=None, defaults=[\'auto\', \'kullback_leibler_divergence\'], " + argspec: "args=[\'self\', \'reduction\', \'name\'], varargs=None, keywords=None, defaults=[\'auto\', \'kl_divergence\'], " } member_method { name: "call" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.losses.-log-cosh.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.losses.-log-cosh.pbtxt index 44d1f898717..0fea0e6712f 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.losses.-log-cosh.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.losses.-log-cosh.pbtxt @@ -6,7 +6,7 @@ tf_class { is_instance: "<type \'object\'>" member_method { name: "__init__" - argspec: "args=[\'self\', \'reduction\', \'name\'], varargs=None, keywords=None, defaults=[\'auto\', \'logcosh\'], " + argspec: "args=[\'self\', \'reduction\', \'name\'], varargs=None, keywords=None, defaults=[\'auto\', \'log_cosh\'], " } member_method { name: "call" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.losses.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.losses.pbtxt index e681f29b99c..88a473e7372 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.losses.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.losses.pbtxt @@ -116,6 +116,14 @@ tf_module { name: "hinge" argspec: "args=[\'y_true\', \'y_pred\'], varargs=None, keywords=None, defaults=None" } + member_method { + name: "huber" + argspec: "args=[\'y_true\', \'y_pred\', \'delta\'], varargs=None, keywords=None, defaults=[\'1.0\'], " + } + member_method { + name: "kl_divergence" + argspec: "args=[\'y_true\', \'y_pred\'], varargs=None, keywords=None, defaults=None" + } member_method { name: "kld" argspec: "args=[\'y_true\', \'y_pred\'], varargs=None, keywords=None, defaults=None" @@ -124,6 +132,10 @@ tf_module { name: "kullback_leibler_divergence" argspec: "args=[\'y_true\', \'y_pred\'], varargs=None, keywords=None, defaults=None" } + member_method { + name: "log_cosh" + argspec: "args=[\'y_true\', \'y_pred\'], varargs=None, keywords=None, defaults=None" + } member_method { name: "logcosh" argspec: "args=[\'y_true\', \'y_pred\'], varargs=None, keywords=None, defaults=None" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.metrics.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.metrics.pbtxt index eb0fc81133a..b3c87d67d2b 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.metrics.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.metrics.pbtxt @@ -200,6 +200,10 @@ tf_module { name: "hinge" argspec: "args=[\'y_true\', \'y_pred\'], varargs=None, keywords=None, defaults=None" } + member_method { + name: "kl_divergence" + argspec: "args=[\'y_true\', \'y_pred\'], varargs=None, keywords=None, defaults=None" + } member_method { name: "kld" argspec: "args=[\'y_true\', \'y_pred\'], varargs=None, keywords=None, defaults=None"