Migrate leaky_relu to nn_ops.py. Will be used for TFGAN.

PiperOrigin-RevId: 168386268
This commit is contained in:
A. Unique TensorFlower 2017-09-12 09:28:18 -07:00 committed by TensorFlower Gardener
parent f7ba16fdf3
commit 7a8c63da36
4 changed files with 52 additions and 0 deletions

View File

@ -22,6 +22,7 @@ See the @{$python/nn} guide.
@@relu6
@@crelu
@@elu
@@leaky_relu
@@selu
@@softplus
@@softsign

View File

@ -1361,6 +1361,27 @@ def relu6(features, name=None):
return gen_nn_ops._relu6(features, name=name)
def leaky_relu(features, alpha=0.2, name=None):
"""Compute the Leaky ReLU activation function.
"Rectifier Nonlinearities Improve Neural Network Acoustic Models"
AL Maas, AY Hannun, AY Ng - Proc. ICML, 2013
http://web.stanford.edu/~awni/papers/relu_hybrid_icml2013_final.pdf
Args:
features: A `Tensor` representing preactivation values.
alpha: Slope of the activation function at x < 0.
name: A name for the operation (optional).
Returns:
The activation value.
"""
with ops.name_scope(name, "LeakyRelu", [features, alpha]):
features = ops.convert_to_tensor(features, name="features")
alpha = ops.convert_to_tensor(alpha, name="alpha")
return math_ops.maximum(alpha * features, features)
def _flatten_outer_dims(logits):
"""Flattens logits' outer dimensions and keep its last dimension."""
rank = array_ops.rank(logits)

View File

@ -834,6 +834,32 @@ class ReluTest(test_lib.TestCase):
self.assertTrue(np.isnan(z).all())
class LeakyReluTest(test_lib.TestCase):
def testRange(self):
batch_size = 3
height, width = 4, 4
np.random.seed(1) # Make it reproducible.
inputs = np.random.uniform(
size=(batch_size, height, width, 3)).astype(np.float32)
inputs = constant_op.constant(inputs)
outputs = nn_ops.leaky_relu(inputs)
self.assertEquals(inputs.shape, outputs.shape)
with self.test_session() as sess:
inputs, outputs = sess.run([inputs, outputs])
self.assertGreaterEqual(outputs.min(), 0.0)
self.assertLessEqual(outputs.max(), 1.0)
self.assertAllClose(inputs, outputs)
def testValues(self):
np_values = np.array([-1.0, 0.0, 0.5, 1.0, 2.0], dtype=np.float32)
outputs = nn_ops.leaky_relu(constant_op.constant(np_values))
with self.test_session() as sess:
outputs = sess.run(outputs)
self.assertAllClose(outputs, [-0.2, 0.0, 0.5, 1.0, 2.0])
class MomentsTest(test_lib.TestCase):
def doOutputTest(self, input_shape, moments_axes, tol=1e-4,

View File

@ -168,6 +168,10 @@ tf_module {
name: "l2_normalize"
argspec: "args=[\'x\', \'dim\', \'epsilon\', \'name\'], varargs=None, keywords=None, defaults=[\'1e-12\', \'None\'], "
}
member_method {
name: "leaky_relu"
argspec: "args=[\'features\', \'alpha\', \'name\'], varargs=None, keywords=None, defaults=[\'0.2\', \'None\'], "
}
member_method {
name: "learned_unigram_candidate_sampler"
argspec: "args=[\'true_classes\', \'num_true\', \'num_sampled\', \'unique\', \'range_max\', \'seed\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "