Remove expired forward compatible check on dropout

PiperOrigin-RevId: 286419581
Change-Id: I25d7f0121d1ebaf972f28e682bac25879b928be2
This commit is contained in:
Yanhua Sun 2019-12-19 10:36:39 -08:00 committed by TensorFlower Gardener
parent 54daf3c570
commit 7e1680206a

View File

@ -24,7 +24,6 @@ import os
import numpy as np import numpy as np
from tensorflow.python.compat import compat
from tensorflow.python.eager import context from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes from tensorflow.python.framework import dtypes
@ -45,7 +44,6 @@ from tensorflow.python.ops import random_ops
from tensorflow.python.ops.gen_nn_ops import * from tensorflow.python.ops.gen_nn_ops import *
# pylint: enable=wildcard-import # pylint: enable=wildcard-import
from tensorflow.python.platform import device_context from tensorflow.python.platform import device_context
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import deprecation from tensorflow.python.util import deprecation
from tensorflow.python.util.compat import collections_abc from tensorflow.python.util.compat import collections_abc
from tensorflow.python.util.deprecation import deprecated_args from tensorflow.python.util.deprecation import deprecated_args
@ -4403,100 +4401,51 @@ def dropout_v2(x, rate, noise_shape=None, seed=None, name=None):
which is likely not what was intended. which is likely not what was intended.
""" """
with ops.name_scope(name, "dropout", [x]) as name: with ops.name_scope(name, "dropout", [x]) as name:
# TODO(b/144930399): Remove this once the compatible window is passed. is_rate_number = isinstance(rate, numbers.Real)
if compat.forward_compatible(2019, 12, 16): if is_rate_number and (rate < 0 or rate >= 1):
is_rate_number = isinstance(rate, numbers.Real) raise ValueError("rate must be a scalar tensor or a float in the "
if is_rate_number and (rate < 0 or rate >= 1): "range [0, 1), got %g" % rate)
raise ValueError("rate must be a scalar tensor or a float in the " x = ops.convert_to_tensor(x, name="x")
"range [0, 1), got %g" % rate) x_dtype = x.dtype
x = ops.convert_to_tensor(x, name="x") if not x_dtype.is_floating:
x_dtype = x.dtype raise ValueError("x has to be a floating point tensor since it's going "
if not x_dtype.is_floating: "to be scaled. Got a %s tensor instead." % x_dtype)
raise ValueError("x has to be a floating point tensor since it's going " is_executing_eagerly = context.executing_eagerly()
"to be scaled. Got a %s tensor instead." % x_dtype) if not tensor_util.is_tensor(rate):
is_executing_eagerly = context.executing_eagerly() if is_rate_number:
if not tensor_util.is_tensor(rate): keep_prob = 1 - rate
if is_rate_number: scale = 1 / keep_prob
keep_prob = 1 - rate scale = ops.convert_to_tensor(scale, dtype=x_dtype)
scale = 1 / keep_prob ret = gen_math_ops.mul(x, scale)
scale = ops.convert_to_tensor(scale, dtype=x_dtype)
ret = gen_math_ops.mul(x, scale)
else:
raise ValueError("rate is neither scalar nor scalar tensor %r" % rate)
else: else:
rate.get_shape().assert_has_rank(0) raise ValueError("rate is neither scalar nor scalar tensor %r" % rate)
rate_dtype = rate.dtype
if rate_dtype != x_dtype:
if not rate_dtype.is_compatible_with(x_dtype):
raise ValueError(
"Tensor dtype %s is incomptaible with Tensor dtype %s: %r" %
(x_dtype.name, rate_dtype.name, rate))
rate = gen_math_ops.cast(rate, x_dtype, name="rate")
one_tensor = constant_op.constant(1, dtype=x_dtype)
ret = gen_math_ops.real_div(x, gen_math_ops.sub(one_tensor, rate))
noise_shape = _get_noise_shape(x, noise_shape)
# Sample a uniform distribution on [0.0, 1.0) and select values larger
# than rate.
#
# NOTE: Random uniform can only generate 2^23 floats on [1.0, 2.0)
# and subtract 1.0.
random_tensor = random_ops.random_uniform(
noise_shape, seed=seed, dtype=x_dtype)
# NOTE: if (1.0 + rate) - 1 is equal to rate, then that float is selected,
# hence a >= comparison is used.
keep_mask = random_tensor >= rate
ret = gen_math_ops.mul(ret, gen_math_ops.cast(keep_mask, x_dtype))
if not is_executing_eagerly:
ret.set_shape(x.get_shape())
return ret
else: else:
x = ops.convert_to_tensor(x, name="x") rate.get_shape().assert_has_rank(0)
if not x.dtype.is_floating: rate_dtype = rate.dtype
raise ValueError("x has to be a floating point tensor since it will " if rate_dtype != x_dtype:
"be scaled. Got a %s tensor instead." % x.dtype) if not rate_dtype.is_compatible_with(x_dtype):
if isinstance(rate, numbers.Real): raise ValueError(
if not (rate >= 0 and rate < 1): "Tensor dtype %s is incomptaible with Tensor dtype %s: %r" %
raise ValueError("rate must be a scalar tensor or a float in the " (x_dtype.name, rate_dtype.name, rate))
"range [0, 1), got %g" % rate) rate = gen_math_ops.cast(rate, x_dtype, name="rate")
if rate > 0.5: one_tensor = constant_op.constant(1, dtype=x_dtype)
logging.log_first_n( ret = gen_math_ops.real_div(x, gen_math_ops.sub(one_tensor, rate))
logging.WARN, "Large dropout rate: %g (>0.5). In TensorFlow "
"2.x, dropout() uses dropout rate instead of keep_prob. "
"Please ensure that this is intended.", 5, rate)
# Early return if nothing needs to be dropped. noise_shape = _get_noise_shape(x, noise_shape)
if isinstance(rate, numbers.Real) and rate == 0: # Sample a uniform distribution on [0.0, 1.0) and select values larger
return x # than rate.
if context.executing_eagerly(): #
if isinstance(rate, ops.EagerTensor): # NOTE: Random uniform can only generate 2^23 floats on [1.0, 2.0)
if rate.numpy() == 0: # and subtract 1.0.
return x random_tensor = random_ops.random_uniform(
else: noise_shape, seed=seed, dtype=x_dtype)
rate = ops.convert_to_tensor(rate, dtype=x.dtype, name="rate") # NOTE: if (1.0 + rate) - 1 is equal to rate, then that float is selected,
rate.get_shape().assert_has_rank(0) # hence a >= comparison is used.
keep_mask = random_tensor >= rate
# Do nothing if we know rate == 0 ret = gen_math_ops.mul(ret, gen_math_ops.cast(keep_mask, x_dtype))
if tensor_util.constant_value(rate) == 0: if not is_executing_eagerly:
return x ret.set_shape(x.get_shape())
return ret
noise_shape = _get_noise_shape(x, noise_shape)
# Sample a uniform distribution on [0.0, 1.0) and select values larger
# than rate.
#
# NOTE: Random uniform can only generate 2^23 floats on [1.0, 2.0)
# and subtract 1.0.
random_tensor = random_ops.random_uniform(
noise_shape, seed=seed, dtype=x.dtype)
keep_prob = 1 - rate
scale = 1 / keep_prob
# NOTE: if (1.0 + rate) - 1 is equal to rate, then that
# float is selected, hence we use a >= comparison.
keep_mask = random_tensor >= rate
ret = x * scale * math_ops.cast(keep_mask, x.dtype)
if not context.executing_eagerly():
ret.set_shape(x.get_shape())
return ret
@tf_export("math.top_k", "nn.top_k") @tf_export("math.top_k", "nn.top_k")