diff --git a/tensorflow/python/ops/nn.py b/tensorflow/python/ops/nn.py index 60e9695dcb9..a80662c8b55 100644 --- a/tensorflow/python/ops/nn.py +++ b/tensorflow/python/ops/nn.py @@ -22,6 +22,7 @@ See the @{$python/nn} guide. @@relu6 @@crelu @@elu +@@leaky_relu @@selu @@softplus @@softsign diff --git a/tensorflow/python/ops/nn_ops.py b/tensorflow/python/ops/nn_ops.py index a2e75dd7f27..67b490fcfb2 100644 --- a/tensorflow/python/ops/nn_ops.py +++ b/tensorflow/python/ops/nn_ops.py @@ -1361,6 +1361,27 @@ def relu6(features, name=None): return gen_nn_ops._relu6(features, name=name) +def leaky_relu(features, alpha=0.2, name=None): + """Compute the Leaky ReLU activation function. + + "Rectifier Nonlinearities Improve Neural Network Acoustic Models" + AL Maas, AY Hannun, AY Ng - Proc. ICML, 2013 + http://web.stanford.edu/~awni/papers/relu_hybrid_icml2013_final.pdf + + Args: + features: A `Tensor` representing preactivation values. + alpha: Slope of the activation function at x < 0. + name: A name for the operation (optional). + + Returns: + The activation value. + """ + with ops.name_scope(name, "LeakyRelu", [features, alpha]): + features = ops.convert_to_tensor(features, name="features") + alpha = ops.convert_to_tensor(alpha, name="alpha") + return math_ops.maximum(alpha * features, features) + + def _flatten_outer_dims(logits): """Flattens logits' outer dimensions and keep its last dimension.""" rank = array_ops.rank(logits) diff --git a/tensorflow/python/ops/nn_test.py b/tensorflow/python/ops/nn_test.py index 809208e3a43..3528b60ca79 100644 --- a/tensorflow/python/ops/nn_test.py +++ b/tensorflow/python/ops/nn_test.py @@ -834,6 +834,32 @@ class ReluTest(test_lib.TestCase): self.assertTrue(np.isnan(z).all()) +class LeakyReluTest(test_lib.TestCase): + + def testRange(self): + batch_size = 3 + height, width = 4, 4 + np.random.seed(1) # Make it reproducible. + inputs = np.random.uniform( + size=(batch_size, height, width, 3)).astype(np.float32) + inputs = constant_op.constant(inputs) + + outputs = nn_ops.leaky_relu(inputs) + self.assertEquals(inputs.shape, outputs.shape) + with self.test_session() as sess: + inputs, outputs = sess.run([inputs, outputs]) + self.assertGreaterEqual(outputs.min(), 0.0) + self.assertLessEqual(outputs.max(), 1.0) + self.assertAllClose(inputs, outputs) + + def testValues(self): + np_values = np.array([-1.0, 0.0, 0.5, 1.0, 2.0], dtype=np.float32) + outputs = nn_ops.leaky_relu(constant_op.constant(np_values)) + with self.test_session() as sess: + outputs = sess.run(outputs) + self.assertAllClose(outputs, [-0.2, 0.0, 0.5, 1.0, 2.0]) + + class MomentsTest(test_lib.TestCase): def doOutputTest(self, input_shape, moments_axes, tol=1e-4, diff --git a/tensorflow/tools/api/golden/tensorflow.nn.pbtxt b/tensorflow/tools/api/golden/tensorflow.nn.pbtxt index d4e83ced7fd..f10299377b3 100644 --- a/tensorflow/tools/api/golden/tensorflow.nn.pbtxt +++ b/tensorflow/tools/api/golden/tensorflow.nn.pbtxt @@ -168,6 +168,10 @@ tf_module { name: "l2_normalize" argspec: "args=[\'x\', \'dim\', \'epsilon\', \'name\'], varargs=None, keywords=None, defaults=[\'1e-12\', \'None\'], " } + member_method { + name: "leaky_relu" + argspec: "args=[\'features\', \'alpha\', \'name\'], varargs=None, keywords=None, defaults=[\'0.2\', \'None\'], " + } member_method { name: "learned_unigram_candidate_sampler" argspec: "args=[\'true_classes\', \'num_true\', \'num_sampled\', \'unique\', \'range_max\', \'seed\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "