Remove stale forward compatibility horizons in unittests

This commit is contained in:
Lukas Geiger 2019-12-29 17:55:51 +01:00
parent 3b33c87378
commit 19fb3ae6e2
5 changed files with 101 additions and 111 deletions

View File

@ -23,7 +23,6 @@ import itertools
import numpy as np import numpy as np
from tensorflow.compiler.tests import xla_test from tensorflow.compiler.tests import xla_test
from tensorflow.python.compat import compat
from tensorflow.python.framework import dtypes from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors from tensorflow.python.framework import errors
from tensorflow.python.ops import array_ops from tensorflow.python.ops import array_ops
@ -1113,59 +1112,58 @@ class BinaryOpsTest(xla_test.XLATestCase):
def testBatchMatMulBroadcast(self): def testBatchMatMulBroadcast(self):
"""Tests broadcasting behavior of BatchMatMul.""" """Tests broadcasting behavior of BatchMatMul."""
with compat.forward_compatibility_horizon(2019, 4, 26): # [2, 3] @ [1, 3, 4] -> [1, 2, 4]
# [2, 3] @ [1, 3, 4] -> [1, 2, 4] self._testBinary(
self._testBinary( math_ops.matmul,
math_ops.matmul, np.array([[10, 20, 30], [11, 21, 31]], dtype=np.float32),
np.array([[10, 20, 30], [11, 21, 31]], dtype=np.float32), np.array([[[1, 2, 3, 4], [2, 4, 6, 8], [3, 6, 9, 12]]],
np.array([[[1, 2, 3, 4], [2, 4, 6, 8], [3, 6, 9, 12]]], dtype=np.float32),
dtype=np.float32), expected=np.array([[[140, 280, 420, 560], [146, 292, 438, 584]]],
expected=np.array([[[140, 280, 420, 560], [146, 292, 438, 584]]], dtype=np.float32))
dtype=np.float32)) # [1, 2, 3] @ [3, 4] -> [1, 2, 4]
# [1, 2, 3] @ [3, 4] -> [1, 2, 4] self._testBinary(
self._testBinary( math_ops.matmul,
math_ops.matmul, np.array([[[10, 20, 30], [11, 21, 31]]], dtype=np.float32),
np.array([[[10, 20, 30], [11, 21, 31]]], dtype=np.float32), np.array([[1, 2, 3, 4], [2, 4, 6, 8], [3, 6, 9, 12]],
np.array([[1, 2, 3, 4], [2, 4, 6, 8], [3, 6, 9, 12]], dtype=np.float32),
dtype=np.float32), expected=np.array([[[140, 280, 420, 560], [146, 292, 438, 584]]],
expected=np.array([[[140, 280, 420, 560], [146, 292, 438, 584]]], dtype=np.float32))
dtype=np.float32)) # [2, 1, 3] @ [3, 1] -> [2, 1, 1]
# [2, 1, 3] @ [3, 1] -> [2, 1, 1] self._testBinary(
self._testBinary( math_ops.matmul,
math_ops.matmul, np.array([[[10, 20, 30]], [[11, 21, 31]]], dtype=np.float32),
np.array([[[10, 20, 30]], [[11, 21, 31]]], dtype=np.float32), np.array([[1], [2], [3]], dtype=np.float32),
np.array([[1], [2], [3]], dtype=np.float32), expected=np.array([[[140]], [[146]]], dtype=np.float32))
expected=np.array([[[140]], [[146]]], dtype=np.float32)) # [2, 1, 3] @ [1, 3] -> [2, 1, 1] (adjoint_b)
# [2, 1, 3] @ [1, 3] -> [2, 1, 1] (adjoint_b) self._testBinary(
self._testBinary( lambda x, y: math_ops.matmul(x, y, adjoint_b=True),
lambda x, y: math_ops.matmul(x, y, adjoint_b=True), np.array([[[10, 20, 30]], [[11, 21, 31]]], dtype=np.float32),
np.array([[[10, 20, 30]], [[11, 21, 31]]], dtype=np.float32), np.array([[1, 2, 3]], dtype=np.float32),
np.array([[1, 2, 3]], dtype=np.float32), expected=np.array([[[140]], [[146]]], dtype=np.float32))
expected=np.array([[[140]], [[146]]], dtype=np.float32)) # [2, 3, 1] @ [3, 1] -> [2, 1, 1] (adjoint_a)
# [2, 3, 1] @ [3, 1] -> [2, 1, 1] (adjoint_a) self._testBinary(
self._testBinary( lambda x, y: math_ops.matmul(x, y, adjoint_a=True),
lambda x, y: math_ops.matmul(x, y, adjoint_a=True), np.array([[[10], [20], [30]], [[11], [21], [31]]], dtype=np.float32),
np.array([[[10], [20], [30]], [[11], [21], [31]]], dtype=np.float32), np.array([[1], [2], [3]], dtype=np.float32),
np.array([[1], [2], [3]], dtype=np.float32), expected=np.array([[[140]], [[146]]], dtype=np.float32))
expected=np.array([[[140]], [[146]]], dtype=np.float32)) # [2, 3, 1] @ [1, 3] -> [2, 1, 1] (adjoint_a and adjoint_b)
# [2, 3, 1] @ [1, 3] -> [2, 1, 1] (adjoint_a and adjoint_b) self._testBinary(
self._testBinary( lambda x, y: math_ops.matmul(x, y, adjoint_a=True, adjoint_b=True),
lambda x, y: math_ops.matmul(x, y, adjoint_a=True, adjoint_b=True), np.array([[[10], [20], [30]], [[11], [21], [31]]], dtype=np.float32),
np.array([[[10], [20], [30]], [[11], [21], [31]]], dtype=np.float32), np.array([[1, 2, 3]], dtype=np.float32),
np.array([[1, 2, 3]], dtype=np.float32), expected=np.array([[[140]], [[146]]], dtype=np.float32))
expected=np.array([[[140]], [[146]]], dtype=np.float32)) # [5, 1, 2, 3] @ [1, 7, 3, 4] -> [5, 7, 2, 4]
# [5, 1, 2, 3] @ [1, 7, 3, 4] -> [5, 7, 2, 4] self._testBinary(
self._testBinary( math_ops.matmul,
math_ops.matmul, np.ones([5, 1, 2, 3], dtype=np.float32),
np.ones([5, 1, 2, 3], dtype=np.float32), np.ones([1, 7, 3, 4], dtype=np.float32),
np.ones([1, 7, 3, 4], dtype=np.float32), expected=np.full([5, 7, 2, 4], 3, dtype=np.float32))
expected=np.full([5, 7, 2, 4], 3, dtype=np.float32)) # [4, 5, 1, 2, 3] @ [1, 1, 3, 5] -> [4, 5, 1, 2, 5]
# [4, 5, 1, 2, 3] @ [1, 1, 3, 5] -> [4, 5, 1, 2, 5] self._testBinary(
self._testBinary( math_ops.matmul,
math_ops.matmul, np.full([4, 5, 1, 2, 3], 2., dtype=np.float32),
np.full([4, 5, 1, 2, 3], 2., dtype=np.float32), np.full([1, 1, 3, 5], 3., dtype=np.float32),
np.full([1, 1, 3, 5], 3., dtype=np.float32), expected=np.full([4, 5, 1, 2, 5], 18., dtype=np.float32))
expected=np.full([4, 5, 1, 2, 5], 18., dtype=np.float32))
def testPad(self): def testPad(self):
for dtype, pad_type in itertools.product( for dtype, pad_type in itertools.product(

View File

@ -20,7 +20,6 @@ from __future__ import print_function
from absl.testing import parameterized from absl.testing import parameterized
from tensorflow.core.protobuf import config_pb2 from tensorflow.core.protobuf import config_pb2
from tensorflow.python.compat import compat
from tensorflow.python.data.experimental.ops import prefetching_ops from tensorflow.python.data.experimental.ops import prefetching_ops
from tensorflow.python.data.kernel_tests import test_base from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops from tensorflow.python.data.ops import dataset_ops
@ -420,24 +419,23 @@ class CopyToDeviceTest(test_base.DatasetTestBase, parameterized.TestCase):
if not test_util.is_gpu_available(): if not test_util.is_gpu_available():
self.skipTest("No GPU available") self.skipTest("No GPU available")
with compat.forward_compatibility_horizon(2018, 8, 4): host_dataset = dataset_ops.Dataset.range(10)
host_dataset = dataset_ops.Dataset.range(10) device_dataset = host_dataset.apply(
device_dataset = host_dataset.apply( prefetching_ops.copy_to_device("/gpu:0", source_device="/cpu:0"))
prefetching_ops.copy_to_device("/gpu:0", source_device="/cpu:0")) back_to_cpu_dataset = device_dataset.apply(
back_to_cpu_dataset = device_dataset.apply( prefetching_ops.copy_to_device("/cpu:0", source_device="/gpu:0"))
prefetching_ops.copy_to_device("/cpu:0", source_device="/gpu:0"))
with ops.device("/cpu:0"): with ops.device("/cpu:0"):
iterator = dataset_ops.make_initializable_iterator(back_to_cpu_dataset) iterator = dataset_ops.make_initializable_iterator(back_to_cpu_dataset)
next_element = iterator.get_next() next_element = iterator.get_next()
with self.cached_session( with self.cached_session(
config=config_pb2.ConfigProto(allow_soft_placement=False)): config=config_pb2.ConfigProto(allow_soft_placement=False)):
self.evaluate(iterator.initializer) self.evaluate(iterator.initializer)
for i in range(10): for i in range(10):
self.assertEqual(i, self.evaluate(next_element)) self.assertEqual(i, self.evaluate(next_element))
with self.assertRaises(errors.OutOfRangeError): with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_element) self.evaluate(next_element)
@combinations.generate(test_base.graph_only_combinations()) @combinations.generate(test_base.graph_only_combinations())
def testCopyToDeviceWithReInit(self): def testCopyToDeviceWithReInit(self):

View File

@ -25,7 +25,6 @@ from tensorflow.core.protobuf import device_properties_pb2
from tensorflow.core.protobuf import rewriter_config_pb2 from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.core.protobuf import saver_pb2 from tensorflow.core.protobuf import saver_pb2
from tensorflow.python.client import session from tensorflow.python.client import session
from tensorflow.python.compat import compat
from tensorflow.python.framework import constant_op from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops from tensorflow.python.framework import ops
@ -1457,29 +1456,28 @@ class LayoutOptimizerTest(test.TestCase):
@test_util.deprecated_graph_mode_only @test_util.deprecated_graph_mode_only
def testBinaryOpSecondPort(self): def testBinaryOpSecondPort(self):
with compat.forward_compatibility_horizon(2019, 6, 7): if test.is_gpu_available(cuda_only=True):
if test.is_gpu_available(cuda_only=True): output = _model_with_second_port()
output = _model_with_second_port()
with session.Session(config=_get_config(False)) as sess: with session.Session(config=_get_config(False)) as sess:
output_val_ref = self.evaluate(output) output_val_ref = self.evaluate(output)
with session.Session(config=_get_config()) as sess: with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata() metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata) output_val = sess.run(output, run_metadata=metadata)
nodes = [] nodes = []
num_transposes = 0 num_transposes = 0
for node in metadata.cost_graph.node: for node in metadata.cost_graph.node:
if _is_transpose(node.name): if _is_transpose(node.name):
num_transposes += 1 num_transposes += 1
nodes.append(node.name) nodes.append(node.name)
expected_num_transposes = 2 expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes) self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('FusedBatchNormV3-0', nodes) self._assert_trans_nhwc_to_nchw('FusedBatchNormV3-0', nodes)
self._assert_trans_nchw_to_nhwc('Add-0-0', nodes) self._assert_trans_nchw_to_nhwc('Add-0-0', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3) self.assertAllClose(output_val_ref, output_val, atol=1e-3)
@test_util.deprecated_graph_mode_only @test_util.deprecated_graph_mode_only
def testGradient(self): def testGradient(self):

View File

@ -23,7 +23,6 @@ import time
import numpy as np import numpy as np
from tensorflow.python.client import session from tensorflow.python.client import session
from tensorflow.python.compat import compat
from tensorflow.python.eager import context from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes from tensorflow.python.framework import dtypes
@ -375,15 +374,14 @@ class GatherNdTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes @test_util.run_in_graph_and_eager_modes
def testGatherNdResourceVariable(self): def testGatherNdResourceVariable(self):
with compat.forward_compatibility_horizon(2019, 4, 30): with self.cached_session():
with self.cached_session(): v = resource_variable_ops.ResourceVariable(
v = resource_variable_ops.ResourceVariable( constant_op.constant([[1, 2], [3, 4], [5, 6]]))
constant_op.constant([[1, 2], [3, 4], [5, 6]])) self.evaluate(variables.global_variables_initializer())
self.evaluate(variables.global_variables_initializer()) gather = array_ops.gather_nd(v, [[0, 1], [2, 0]])
gather = array_ops.gather_nd(v, [[0, 1], [2, 0]]) if not context.executing_eagerly(): # .op doesn't make sense in Eager
if not context.executing_eagerly(): # .op doesn't make sense in Eager self.assertEqual("ResourceGatherNd", gather.op.inputs[0].op.type)
self.assertEqual("ResourceGatherNd", gather.op.inputs[0].op.type) self.assertAllEqual([2, 5], gather)
self.assertAllEqual([2, 5], gather)
class GatherNdOpBenchmark(test.Benchmark): class GatherNdOpBenchmark(test.Benchmark):

View File

@ -20,7 +20,6 @@ from __future__ import print_function
from absl.testing import parameterized from absl.testing import parameterized
from tensorflow.python.compat import compat
from tensorflow.python.framework import constant_op from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util from tensorflow.python.framework import test_util
@ -85,16 +84,15 @@ class RegexFullMatchOpTest(test.TestCase):
@test_util.run_deprecated_v1 @test_util.run_deprecated_v1
def testStaticRegexFullMatchDelegation(self): def testStaticRegexFullMatchDelegation(self):
with compat.forward_compatibility_horizon(2018, 11, 20): with self.cached_session():
with self.cached_session(): input_tensor = constant_op.constant("foo", dtypes.string)
input_tensor = constant_op.constant("foo", dtypes.string) pattern = "[a-z]*"
pattern = "[a-z]*" op = string_ops.regex_full_match(input_tensor, pattern)
op = string_ops.regex_full_match(input_tensor, pattern) self.assertTrue(op.name.startswith("StaticRegexFullMatch"), op.name)
self.assertTrue(op.name.startswith("StaticRegexFullMatch"), op.name)
pattern_tensor = constant_op.constant("[a-z]*", dtypes.string) pattern_tensor = constant_op.constant("[a-z]*", dtypes.string)
op_vec = string_ops.regex_full_match(input_tensor, pattern_tensor) op_vec = string_ops.regex_full_match(input_tensor, pattern_tensor)
self.assertTrue(op_vec.name.startswith("RegexFullMatch"), op.name) self.assertTrue(op_vec.name.startswith("RegexFullMatch"), op.name)
if __name__ == "__main__": if __name__ == "__main__":
test.main() test.main()