From 84d354f945a85368b1dcece6c203fc5538ff2fab Mon Sep 17 00:00:00 2001 From: Lukas Geiger Date: Tue, 31 Mar 2020 22:45:32 +0100 Subject: [PATCH] Remove expired forward compatibility horizons --- .../compiler/tests/fused_batchnorm_test.py | 4 -- .../python/keras/layers/normalization.py | 2 - .../python/ops/nn_fused_batchnorm_test.py | 9 +---- tensorflow/python/ops/nn_impl.py | 40 ++++++------------- 4 files changed, 14 insertions(+), 41 deletions(-) diff --git a/tensorflow/compiler/tests/fused_batchnorm_test.py b/tensorflow/compiler/tests/fused_batchnorm_test.py index 6a9076e9be8..a36effe5984 100644 --- a/tensorflow/compiler/tests/fused_batchnorm_test.py +++ b/tensorflow/compiler/tests/fused_batchnorm_test.py @@ -23,7 +23,6 @@ import numpy as np from tensorflow.compiler.tests import test_utils from tensorflow.compiler.tests import xla_test -from tensorflow.python.compat import compat from tensorflow.python.ops import array_ops from tensorflow.python.ops import gen_nn_ops from tensorflow.python.ops import gradient_checker @@ -132,9 +131,6 @@ class FusedBatchNormTest(xla_test.XLATestCase, parameterized.TestCase): def _testLearning(self, use_gradient_checker, data_format, exponential_avg_factor): - if not compat.forward_compatible(2020, 3, - 6) and exponential_avg_factor != 1.0: - self.skipTest("running average not available.") channel = 3 x_shape = [2, 2, 6, channel] scale_shape = [channel] diff --git a/tensorflow/python/keras/layers/normalization.py b/tensorflow/python/keras/layers/normalization.py index 97da2954b65..e1fb21c76ef 100644 --- a/tensorflow/python/keras/layers/normalization.py +++ b/tensorflow/python/keras/layers/normalization.py @@ -18,7 +18,6 @@ from __future__ import absolute_import from __future__ import division from __future__ import print_function -from tensorflow.python.compat import compat from tensorflow.python.distribute import distribution_strategy_context from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes @@ -547,7 +546,6 @@ class BatchNormalizationBase(Layer): # after fixing graph pattern matching and enabling fused_batch_norm to # take exponential_avg_factor as a tensor input. use_fused_avg_updates = ( - compat.forward_compatible(2020, 3, 6) and ops.executing_eagerly_outside_functions() and isinstance(self.momentum, (float, int)) and device_context.enclosing_tpu_context() is None) diff --git a/tensorflow/python/ops/nn_fused_batchnorm_test.py b/tensorflow/python/ops/nn_fused_batchnorm_test.py index a809b768833..5497325f6c0 100644 --- a/tensorflow/python/ops/nn_fused_batchnorm_test.py +++ b/tensorflow/python/ops/nn_fused_batchnorm_test.py @@ -20,7 +20,6 @@ from __future__ import print_function import numpy as np -from tensorflow.python.compat import compat from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import test_util @@ -380,13 +379,7 @@ class BatchNormalizationTest(test.TestCase): use_gpu_vals = [False] if test.is_gpu_available(cuda_only=True): use_gpu_vals += [True] - factors = [ - 1.0, - ] - if compat.forward_compatible(2020, 3, 6): - factors += [ - 0.6, - ] + factors = [1.0, 0.6] for dtype in [np.float16, np.float32]: for use_gpu in use_gpu_vals: for data_format in ['NHWC', 'NCHW']: diff --git a/tensorflow/python/ops/nn_impl.py b/tensorflow/python/ops/nn_impl.py index 8a3a620f765..e7e44a6d490 100644 --- a/tensorflow/python/ops/nn_impl.py +++ b/tensorflow/python/ops/nn_impl.py @@ -20,7 +20,6 @@ from __future__ import print_function import math -from tensorflow.python.compat import compat from tensorflow.python.distribute import distribution_strategy_context as ds from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes @@ -1458,7 +1457,7 @@ def batch_normalization(x, `tf.nn.moments(..., keepdims=False)` during training, or running averages thereof during inference. - See equation 11 in Algorithm 2 of source: + See equation 11 in Algorithm 2 of source: [Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift; S. Ioffe, C. Szegedy] (http://arxiv.org/abs/1502.03167). @@ -1589,31 +1588,18 @@ def fused_batch_norm( min_epsilon = 1.001e-5 epsilon = epsilon if epsilon > min_epsilon else min_epsilon - if compat.forward_compatible(2020, 3, 6): - y, running_mean, running_var, _, _, _ = gen_nn_ops.fused_batch_norm_v3( - x, - scale, - offset, - mean, - variance, - epsilon=epsilon, - exponential_avg_factor=exponential_avg_factor, - data_format=data_format, - is_training=is_training, - name=name) - return y, running_mean, running_var - else: - y, running_mean, running_var, _, _, _ = gen_nn_ops.fused_batch_norm_v3( - x, - scale, - offset, - mean, - variance, - epsilon=epsilon, - data_format=data_format, - is_training=is_training, - name=name) - return y, running_mean, running_var + y, running_mean, running_var, _, _, _ = gen_nn_ops.fused_batch_norm_v3( + x, + scale, + offset, + mean, + variance, + epsilon=epsilon, + exponential_avg_factor=exponential_avg_factor, + data_format=data_format, + is_training=is_training, + name=name) + return y, running_mean, running_var @tf_export(v1=["nn.batch_norm_with_global_normalization"])