Merge pull request #30422 from siju-samuel:deprecated_dropout_keepprob_removed
PiperOrigin-RevId: 257149096
This commit is contained in:
commit
36ac0a1ac5
@ -103,7 +103,7 @@ def train():
|
||||
with tf.name_scope('dropout'):
|
||||
keep_prob = tf.placeholder(tf.float32)
|
||||
tf.summary.scalar('dropout_keep_probability', keep_prob)
|
||||
dropped = tf.nn.dropout(hidden1, keep_prob)
|
||||
dropped = tf.nn.dropout(hidden1, rate=(1 - keep_prob))
|
||||
|
||||
# Do not apply softmax activation yet, see below.
|
||||
y = nn_layer(dropped, 500, 10, 'layer2', act=tf.identity)
|
||||
|
@ -883,7 +883,7 @@ class HierarchicalController(Controller):
|
||||
actions.read(i - 1))
|
||||
)
|
||||
if self.hparams.keep_prob is not None:
|
||||
signal = nn_ops.dropout(signal, self.hparams.keep_prob)
|
||||
signal = nn_ops.dropout(signal, rate=(1 - self.hparams.keep_prob))
|
||||
next_c, next_h = lstm(signal, prev_c, prev_h, w_lstm, forget_bias)
|
||||
query = math_ops.matmul(next_h, attn_w_2)
|
||||
query = array_ops.reshape(
|
||||
|
@ -313,7 +313,7 @@ class DropoutTest(test_lib.TestCase):
|
||||
num_iter = 10
|
||||
for keep_prob in [0.1, 0.5, 0.8]:
|
||||
t = constant_op.constant(1.0, shape=[x_dim, y_dim], dtype=dtypes.float32)
|
||||
dropout = nn_ops.dropout(t, keep_prob)
|
||||
dropout = nn_ops.dropout(t, rate=(1 - keep_prob))
|
||||
final_count = 0
|
||||
self.assertEqual([x_dim, y_dim], dropout.get_shape())
|
||||
for _ in xrange(0, num_iter):
|
||||
@ -340,7 +340,7 @@ class DropoutTest(test_lib.TestCase):
|
||||
num_iter = 10
|
||||
for keep_prob in [0.1, 0.5, 0.8]:
|
||||
t = constant_op.constant(1.0, shape=[x_dim, y_dim], dtype=dtypes.float32)
|
||||
dropout = nn_ops.dropout(t, keep_prob, noise_shape=[x_dim, 1])
|
||||
dropout = nn_ops.dropout(t, rate=(1 - keep_prob), noise_shape=[x_dim, 1])
|
||||
self.assertEqual([x_dim, y_dim], dropout.get_shape())
|
||||
final_count = 0
|
||||
for _ in xrange(0, num_iter):
|
||||
@ -364,7 +364,7 @@ class DropoutTest(test_lib.TestCase):
|
||||
num_iter = 10
|
||||
for keep_prob in [0.1, 0.5, 0.8]:
|
||||
t = constant_op.constant(1.0, shape=[x_dim, y_dim], dtype=dtypes.float32)
|
||||
dropout = nn_ops.dropout(t, keep_prob, noise_shape=[x_dim, 1])
|
||||
dropout = nn_ops.dropout(t, rate=(1 - keep_prob), noise_shape=[x_dim, 1])
|
||||
self.assertEqual([x_dim, y_dim], dropout.get_shape())
|
||||
for _ in xrange(0, num_iter):
|
||||
value = self.evaluate(dropout)
|
||||
@ -409,7 +409,9 @@ class DropoutTest(test_lib.TestCase):
|
||||
keep_prob = 0.5
|
||||
x = constant_op.constant(1.0, shape=[x_dim, y_dim], dtype=dtypes.float32)
|
||||
dropout_x = nn_ops.dropout(
|
||||
x, keep_prob, noise_shape=array_ops.placeholder(dtypes.int32))
|
||||
x,
|
||||
rate=(1 - keep_prob),
|
||||
noise_shape=array_ops.placeholder(dtypes.int32))
|
||||
self.assertEqual(x.get_shape(), dropout_x.get_shape())
|
||||
|
||||
def testPartialShapedDropout(self):
|
||||
@ -419,7 +421,7 @@ class DropoutTest(test_lib.TestCase):
|
||||
for keep_prob in [0.1, 0.5, 0.8]:
|
||||
t = constant_op.constant(1.0, shape=[x_dim, y_dim], dtype=dtypes.float32)
|
||||
# Set noise_shape=[None, 1] which means [x_dim, 1].
|
||||
dropout = nn_ops.dropout(t, keep_prob, noise_shape=[None, 1])
|
||||
dropout = nn_ops.dropout(t, rate=(1 - keep_prob), noise_shape=[None, 1])
|
||||
self.assertEqual([x_dim, y_dim], dropout.get_shape())
|
||||
final_count = 0
|
||||
for _ in xrange(0, num_iter):
|
||||
@ -478,22 +480,23 @@ class DropoutTest(test_lib.TestCase):
|
||||
keep_prob = 0.5
|
||||
t = constant_op.constant(1.0, shape=[x_dim, y_dim], dtype=dtypes.float32)
|
||||
with self.assertRaises(ValueError):
|
||||
_ = nn_ops.dropout(t, keep_prob, noise_shape=[x_dim, y_dim + 10])
|
||||
_ = nn_ops.dropout(
|
||||
t, rate=(1 - keep_prob), noise_shape=[x_dim, y_dim + 10])
|
||||
with self.assertRaises(ValueError):
|
||||
_ = nn_ops.dropout(t, keep_prob, noise_shape=[x_dim, y_dim, 5])
|
||||
_ = nn_ops.dropout(t, rate=(1 - keep_prob), noise_shape=[x_dim, y_dim, 5])
|
||||
with self.assertRaises(ValueError):
|
||||
_ = nn_ops.dropout(t, keep_prob, noise_shape=[x_dim + 3])
|
||||
_ = nn_ops.dropout(t, rate=(1 - keep_prob), noise_shape=[x_dim + 3])
|
||||
with self.assertRaises(ValueError):
|
||||
_ = nn_ops.dropout(t, keep_prob, noise_shape=[x_dim])
|
||||
_ = nn_ops.dropout(t, rate=(1 - keep_prob), noise_shape=[x_dim])
|
||||
# test that broadcasting proceeds
|
||||
_ = nn_ops.dropout(t, keep_prob, noise_shape=[y_dim])
|
||||
_ = nn_ops.dropout(t, keep_prob, noise_shape=[1, y_dim])
|
||||
_ = nn_ops.dropout(t, keep_prob, noise_shape=[x_dim, 1])
|
||||
_ = nn_ops.dropout(t, keep_prob, noise_shape=[1, 1])
|
||||
_ = nn_ops.dropout(t, rate=(1 - keep_prob), noise_shape=[y_dim])
|
||||
_ = nn_ops.dropout(t, rate=(1 - keep_prob), noise_shape=[1, y_dim])
|
||||
_ = nn_ops.dropout(t, rate=(1 - keep_prob), noise_shape=[x_dim, 1])
|
||||
_ = nn_ops.dropout(t, rate=(1 - keep_prob), noise_shape=[1, 1])
|
||||
|
||||
def testNoDropoutFast(self):
|
||||
x = array_ops.zeros((5,))
|
||||
y = nn_ops.dropout(x, keep_prob=1)
|
||||
y = nn_ops.dropout(x, rate=0)
|
||||
self.assertTrue(x is y)
|
||||
|
||||
y = nn_ops.dropout_v2(x, rate=0)
|
||||
|
Loading…
Reference in New Issue
Block a user