Remove expired forward compatibility horizons

This commit is contained in:
Lukas Geiger 2020-03-31 22:45:32 +01:00
parent 69ccb409a0
commit 84d354f945
4 changed files with 14 additions and 41 deletions

View File

@ -23,7 +23,6 @@ import numpy as np
from tensorflow.compiler.tests import test_utils
from tensorflow.compiler.tests import xla_test
from tensorflow.python.compat import compat
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_nn_ops
from tensorflow.python.ops import gradient_checker
@ -132,9 +131,6 @@ class FusedBatchNormTest(xla_test.XLATestCase, parameterized.TestCase):
def _testLearning(self, use_gradient_checker, data_format,
exponential_avg_factor):
if not compat.forward_compatible(2020, 3,
6) and exponential_avg_factor != 1.0:
self.skipTest("running average not available.")
channel = 3
x_shape = [2, 2, 6, channel]
scale_shape = [channel]

View File

@ -18,7 +18,6 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.compat import compat
from tensorflow.python.distribute import distribution_strategy_context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
@ -547,7 +546,6 @@ class BatchNormalizationBase(Layer):
# after fixing graph pattern matching and enabling fused_batch_norm to
# take exponential_avg_factor as a tensor input.
use_fused_avg_updates = (
compat.forward_compatible(2020, 3, 6) and
ops.executing_eagerly_outside_functions() and
isinstance(self.momentum, (float, int)) and
device_context.enclosing_tpu_context() is None)

View File

@ -20,7 +20,6 @@ from __future__ import print_function
import numpy as np
from tensorflow.python.compat import compat
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
@ -380,13 +379,7 @@ class BatchNormalizationTest(test.TestCase):
use_gpu_vals = [False]
if test.is_gpu_available(cuda_only=True):
use_gpu_vals += [True]
factors = [
1.0,
]
if compat.forward_compatible(2020, 3, 6):
factors += [
0.6,
]
factors = [1.0, 0.6]
for dtype in [np.float16, np.float32]:
for use_gpu in use_gpu_vals:
for data_format in ['NHWC', 'NCHW']:

View File

@ -20,7 +20,6 @@ from __future__ import print_function
import math
from tensorflow.python.compat import compat
from tensorflow.python.distribute import distribution_strategy_context as ds
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
@ -1458,7 +1457,7 @@ def batch_normalization(x,
`tf.nn.moments(..., keepdims=False)` during training, or running averages
thereof during inference.
See equation 11 in Algorithm 2 of source:
See equation 11 in Algorithm 2 of source:
[Batch Normalization: Accelerating Deep Network Training by
Reducing Internal Covariate Shift; S. Ioffe, C. Szegedy]
(http://arxiv.org/abs/1502.03167).
@ -1589,31 +1588,18 @@ def fused_batch_norm(
min_epsilon = 1.001e-5
epsilon = epsilon if epsilon > min_epsilon else min_epsilon
if compat.forward_compatible(2020, 3, 6):
y, running_mean, running_var, _, _, _ = gen_nn_ops.fused_batch_norm_v3(
x,
scale,
offset,
mean,
variance,
epsilon=epsilon,
exponential_avg_factor=exponential_avg_factor,
data_format=data_format,
is_training=is_training,
name=name)
return y, running_mean, running_var
else:
y, running_mean, running_var, _, _, _ = gen_nn_ops.fused_batch_norm_v3(
x,
scale,
offset,
mean,
variance,
epsilon=epsilon,
data_format=data_format,
is_training=is_training,
name=name)
return y, running_mean, running_var
y, running_mean, running_var, _, _, _ = gen_nn_ops.fused_batch_norm_v3(
x,
scale,
offset,
mean,
variance,
epsilon=epsilon,
exponential_avg_factor=exponential_avg_factor,
data_format=data_format,
is_training=is_training,
name=name)
return y, running_mean, running_var
@tf_export(v1=["nn.batch_norm_with_global_normalization"])