Remove expired forward compatible check on dropout

PiperOrigin-RevId: 286419581
Change-Id: I25d7f0121d1ebaf972f28e682bac25879b928be2
This commit is contained in:
Yanhua Sun 2019-12-19 10:36:39 -08:00 committed by TensorFlower Gardener
parent 54daf3c570
commit 7e1680206a

View File

@ -24,7 +24,6 @@ import os
import numpy as np
from tensorflow.python.compat import compat
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
@ -45,7 +44,6 @@ from tensorflow.python.ops import random_ops
from tensorflow.python.ops.gen_nn_ops import *
# pylint: enable=wildcard-import
from tensorflow.python.platform import device_context
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import deprecation
from tensorflow.python.util.compat import collections_abc
from tensorflow.python.util.deprecation import deprecated_args
@ -4403,100 +4401,51 @@ def dropout_v2(x, rate, noise_shape=None, seed=None, name=None):
which is likely not what was intended.
"""
with ops.name_scope(name, "dropout", [x]) as name:
# TODO(b/144930399): Remove this once the compatible window is passed.
if compat.forward_compatible(2019, 12, 16):
is_rate_number = isinstance(rate, numbers.Real)
if is_rate_number and (rate < 0 or rate >= 1):
raise ValueError("rate must be a scalar tensor or a float in the "
"range [0, 1), got %g" % rate)
x = ops.convert_to_tensor(x, name="x")
x_dtype = x.dtype
if not x_dtype.is_floating:
raise ValueError("x has to be a floating point tensor since it's going "
"to be scaled. Got a %s tensor instead." % x_dtype)
is_executing_eagerly = context.executing_eagerly()
if not tensor_util.is_tensor(rate):
if is_rate_number:
keep_prob = 1 - rate
scale = 1 / keep_prob
scale = ops.convert_to_tensor(scale, dtype=x_dtype)
ret = gen_math_ops.mul(x, scale)
else:
raise ValueError("rate is neither scalar nor scalar tensor %r" % rate)
is_rate_number = isinstance(rate, numbers.Real)
if is_rate_number and (rate < 0 or rate >= 1):
raise ValueError("rate must be a scalar tensor or a float in the "
"range [0, 1), got %g" % rate)
x = ops.convert_to_tensor(x, name="x")
x_dtype = x.dtype
if not x_dtype.is_floating:
raise ValueError("x has to be a floating point tensor since it's going "
"to be scaled. Got a %s tensor instead." % x_dtype)
is_executing_eagerly = context.executing_eagerly()
if not tensor_util.is_tensor(rate):
if is_rate_number:
keep_prob = 1 - rate
scale = 1 / keep_prob
scale = ops.convert_to_tensor(scale, dtype=x_dtype)
ret = gen_math_ops.mul(x, scale)
else:
rate.get_shape().assert_has_rank(0)
rate_dtype = rate.dtype
if rate_dtype != x_dtype:
if not rate_dtype.is_compatible_with(x_dtype):
raise ValueError(
"Tensor dtype %s is incomptaible with Tensor dtype %s: %r" %
(x_dtype.name, rate_dtype.name, rate))
rate = gen_math_ops.cast(rate, x_dtype, name="rate")
one_tensor = constant_op.constant(1, dtype=x_dtype)
ret = gen_math_ops.real_div(x, gen_math_ops.sub(one_tensor, rate))
noise_shape = _get_noise_shape(x, noise_shape)
# Sample a uniform distribution on [0.0, 1.0) and select values larger
# than rate.
#
# NOTE: Random uniform can only generate 2^23 floats on [1.0, 2.0)
# and subtract 1.0.
random_tensor = random_ops.random_uniform(
noise_shape, seed=seed, dtype=x_dtype)
# NOTE: if (1.0 + rate) - 1 is equal to rate, then that float is selected,
# hence a >= comparison is used.
keep_mask = random_tensor >= rate
ret = gen_math_ops.mul(ret, gen_math_ops.cast(keep_mask, x_dtype))
if not is_executing_eagerly:
ret.set_shape(x.get_shape())
return ret
raise ValueError("rate is neither scalar nor scalar tensor %r" % rate)
else:
x = ops.convert_to_tensor(x, name="x")
if not x.dtype.is_floating:
raise ValueError("x has to be a floating point tensor since it will "
"be scaled. Got a %s tensor instead." % x.dtype)
if isinstance(rate, numbers.Real):
if not (rate >= 0 and rate < 1):
raise ValueError("rate must be a scalar tensor or a float in the "
"range [0, 1), got %g" % rate)
if rate > 0.5:
logging.log_first_n(
logging.WARN, "Large dropout rate: %g (>0.5). In TensorFlow "
"2.x, dropout() uses dropout rate instead of keep_prob. "
"Please ensure that this is intended.", 5, rate)
rate.get_shape().assert_has_rank(0)
rate_dtype = rate.dtype
if rate_dtype != x_dtype:
if not rate_dtype.is_compatible_with(x_dtype):
raise ValueError(
"Tensor dtype %s is incomptaible with Tensor dtype %s: %r" %
(x_dtype.name, rate_dtype.name, rate))
rate = gen_math_ops.cast(rate, x_dtype, name="rate")
one_tensor = constant_op.constant(1, dtype=x_dtype)
ret = gen_math_ops.real_div(x, gen_math_ops.sub(one_tensor, rate))
# Early return if nothing needs to be dropped.
if isinstance(rate, numbers.Real) and rate == 0:
return x
if context.executing_eagerly():
if isinstance(rate, ops.EagerTensor):
if rate.numpy() == 0:
return x
else:
rate = ops.convert_to_tensor(rate, dtype=x.dtype, name="rate")
rate.get_shape().assert_has_rank(0)
# Do nothing if we know rate == 0
if tensor_util.constant_value(rate) == 0:
return x
noise_shape = _get_noise_shape(x, noise_shape)
# Sample a uniform distribution on [0.0, 1.0) and select values larger
# than rate.
#
# NOTE: Random uniform can only generate 2^23 floats on [1.0, 2.0)
# and subtract 1.0.
random_tensor = random_ops.random_uniform(
noise_shape, seed=seed, dtype=x.dtype)
keep_prob = 1 - rate
scale = 1 / keep_prob
# NOTE: if (1.0 + rate) - 1 is equal to rate, then that
# float is selected, hence we use a >= comparison.
keep_mask = random_tensor >= rate
ret = x * scale * math_ops.cast(keep_mask, x.dtype)
if not context.executing_eagerly():
ret.set_shape(x.get_shape())
return ret
noise_shape = _get_noise_shape(x, noise_shape)
# Sample a uniform distribution on [0.0, 1.0) and select values larger
# than rate.
#
# NOTE: Random uniform can only generate 2^23 floats on [1.0, 2.0)
# and subtract 1.0.
random_tensor = random_ops.random_uniform(
noise_shape, seed=seed, dtype=x_dtype)
# NOTE: if (1.0 + rate) - 1 is equal to rate, then that float is selected,
# hence a >= comparison is used.
keep_mask = random_tensor >= rate
ret = gen_math_ops.mul(ret, gen_math_ops.cast(keep_mask, x_dtype))
if not is_executing_eagerly:
ret.set_shape(x.get_shape())
return ret
@tf_export("math.top_k", "nn.top_k")