Add v2 log loss.

PiperOrigin-RevId: 227755606
This commit is contained in:
Pavithra Vijay 2019-01-03 14:57:47 -08:00 committed by TensorFlower Gardener
parent 7d57d32e44
commit 2ae28cb35f
2 changed files with 137 additions and 0 deletions

View File

@ -500,6 +500,46 @@ class CategoricalHinge(Loss):
return categorical_hinge(y_true, y_pred)
class LogLoss(Loss):
"""Computes the log loss between `y_true` and `y_pred`.
logloss = -y log(p) - (1-y) log(1-p)
Usage:
```python
l = tf.losses.LogLoss()
loss = l([0., 1., 1.], [1., 0., 1.])
print('Loss: ', loss.numpy()) # Loss: 10.745
```
Usage with tf.keras API:
```python
model = keras.models.Model(inputs, outputs)
model.compile('sgd', loss=tf.losses.LogLoss())
```
Args:
epsilon: A small increment to add to avoid taking a log of zero.
reduction: Type of `tf.losses.Reduction` to apply to loss. Default value is
`SUM_OVER_BATCH_SIZE`.
name: Optional name for the op.
"""
def __init__(self,
epsilon=1e-7,
reduction=losses_impl.ReductionV2.SUM_OVER_BATCH_SIZE,
name=None):
super(LogLoss, self).__init__(reduction=reduction, name=name)
self.epsilon = epsilon
def call(self, y_true, y_pred):
y_pred = ops.convert_to_tensor(y_pred)
y_true = math_ops.cast(y_true, y_pred.dtype)
return logloss(y_true, y_pred, epsilon=self.epsilon)
@keras_export('keras.metrics.mean_squared_error',
'keras.metrics.mse',
'keras.metrics.MSE',
@ -562,6 +602,12 @@ def categorical_hinge(y_true, y_pred):
return math_ops.maximum(0., neg - pos + 1.)
def logloss(y_true, y_pred, epsilon=1e-7):
losses = math_ops.multiply(y_true, math_ops.log(y_pred + epsilon))
losses += math_ops.multiply((1 - y_true), math_ops.log(1 - y_pred + epsilon))
return K.mean(-losses, axis=-1)
@keras_export('keras.losses.logcosh')
def logcosh(y_true, y_pred):
"""Logarithm of the hyperbolic cosine of the prediction error.

View File

@ -1003,5 +1003,96 @@ class CategoricalHingeTest(test.TestCase):
self.assertAlmostEqual(self.evaluate(loss), 0., 3)
@test_util.run_all_in_graph_and_eager_modes
class LogLossTest(test.TestCase):
def setup(self):
# TODO(psv): Change to setUp() after b/122319309 is fixed.
y_pred = np.asarray([.9, .2, .2, .8, .4, .6]).reshape((2, 3))
y_true = np.asarray([1., 0., 1., 1., 0., 0.]).reshape((2, 3))
epsilon = 1e-7 # to avoid log 0
self.batch_size = 6
self.expected_losses = np.multiply(y_true, np.log(y_pred + epsilon))
self.expected_losses += np.multiply(1 - y_true,
np.log(1 - y_pred + epsilon))
self.expected_losses = -self.expected_losses
self.y_pred = constant_op.constant(y_pred)
self.y_true = constant_op.constant(y_true)
def test_config(self):
log_loss_obj = keras.losses.LogLoss(
reduction=losses_impl.ReductionV2.SUM, name='log')
self.assertEqual(log_loss_obj.name, 'log')
self.assertEqual(log_loss_obj.reduction, losses_impl.ReductionV2.SUM)
def test_all_correct(self):
self.setup()
log_loss_obj = keras.losses.LogLoss()
loss = log_loss_obj(self.y_true, self.y_true)
self.assertAlmostEqual(self.evaluate(loss), 0.0, 3)
def test_unweighted(self):
self.setup()
log_loss_obj = keras.losses.LogLoss()
loss = log_loss_obj(self.y_true, self.y_pred)
actual_loss = np.sum(self.expected_losses) / self.batch_size
self.assertAlmostEqual(self.evaluate(loss), actual_loss, 3)
def test_scalar_weighted(self):
self.setup()
log_loss_obj = keras.losses.LogLoss()
sample_weight = 2.3
loss = log_loss_obj(self.y_true, self.y_pred, sample_weight=sample_weight)
actual_loss = sample_weight * np.sum(self.expected_losses) / self.batch_size
self.assertAlmostEqual(self.evaluate(loss), actual_loss, 3)
# Verify we get the same output when the same input is given
loss_2 = log_loss_obj(self.y_true, self.y_pred, sample_weight=sample_weight)
self.assertAlmostEqual(self.evaluate(loss), self.evaluate(loss_2), 3)
def test_sample_weighted(self):
self.setup()
log_loss_obj = keras.losses.LogLoss()
sample_weight = constant_op.constant((1.2, 3.4), shape=(2, 1))
loss = log_loss_obj(self.y_true, self.y_pred, sample_weight=sample_weight)
actual_loss = np.multiply(
self.expected_losses,
np.asarray([1.2, 1.2, 1.2, 3.4, 3.4, 3.4]).reshape((2, 3)))
actual_loss = np.sum(actual_loss) / self.batch_size
self.assertAlmostEqual(self.evaluate(loss), actual_loss, 3)
def test_timestep_weighted(self):
log_loss_obj = keras.losses.LogLoss()
y_pred = np.asarray([.9, .2, .2, .8, .4, .6]).reshape((2, 3, 1))
y_true = np.asarray([1., 0., 1., 1., 0., 0.]).reshape((2, 3, 1))
epsilon = 1e-7 # to avoid log 0
batch_size = 6
expected_losses = np.multiply(y_true, np.log(y_pred + epsilon))
expected_losses += np.multiply(1 - y_true, np.log(1 - y_pred + epsilon))
y_pred = constant_op.constant(y_pred)
y_true = constant_op.constant(y_true)
sample_weight = np.array([3, 6, 5, 0, 4, 2]).reshape((2, 3, 1))
loss = log_loss_obj(
y_true,
y_pred,
sample_weight=constant_op.constant(sample_weight, shape=(2, 3)))
actual_loss = np.multiply(-expected_losses, sample_weight)
actual_loss = np.sum(actual_loss) / batch_size
self.assertAlmostEqual(self.evaluate(loss), actual_loss, 3)
def test_zero_weighted(self):
self.setup()
log_loss_obj = keras.losses.LogLoss()
sample_weight = 0
loss = log_loss_obj(self.y_true, self.y_pred, sample_weight=sample_weight)
self.assertAlmostEqual(self.evaluate(loss), 0., 3)
if __name__ == '__main__':
test.main()