Remove expired forward compatibility horizons
This PR removes expired forward compatibility statements that will always evaluate to `True`.
This commit is contained in:
parent
1d961d8b58
commit
503f3418ca
@ -21,7 +21,6 @@ from __future__ import print_function
|
||||
import numpy as np
|
||||
|
||||
from tensorflow.compiler.tests import xla_test
|
||||
from tensorflow.python.compat import compat
|
||||
from tensorflow.python.framework import dtypes
|
||||
from tensorflow.python.ops import array_ops
|
||||
from tensorflow.python.ops import special_math_ops
|
||||
@ -61,13 +60,6 @@ class EinsumOpTest(xla_test.XLATestCase):
|
||||
np.array([[8]], dtype=dtype),
|
||||
expected=np.array([[-2]], dtype=dtype))
|
||||
|
||||
with compat.forward_compatibility_horizon(2019, 10, 19):
|
||||
self._testBinary(
|
||||
lambda x, y: special_math_ops.einsum('ij,jk->ik', x, y),
|
||||
np.array([[-0.25]], dtype=dtype),
|
||||
np.array([[8]], dtype=dtype),
|
||||
expected=np.array([[-2]], dtype=dtype))
|
||||
|
||||
def testImplicitForm(self):
|
||||
for dtype in self.float_types:
|
||||
self._testBinary(
|
||||
@ -76,13 +68,6 @@ class EinsumOpTest(xla_test.XLATestCase):
|
||||
np.array([[[1], [3], [2]], [[5], [6], [8]]], dtype=dtype),
|
||||
expected=np.array(128, dtype=dtype))
|
||||
|
||||
with compat.forward_compatibility_horizon(2019, 10, 19):
|
||||
self._testBinary(
|
||||
lambda x, y: special_math_ops.einsum('ijk,kji', x, y),
|
||||
np.array([[[1, 3], [2, 5], [6, 8]]], dtype=dtype),
|
||||
np.array([[[1], [3], [2]], [[5], [6], [8]]], dtype=dtype),
|
||||
expected=np.array(128, dtype=dtype))
|
||||
|
||||
def testReducedIndices(self):
|
||||
for dtype in self.float_types:
|
||||
self._testBinary(
|
||||
@ -91,13 +76,6 @@ class EinsumOpTest(xla_test.XLATestCase):
|
||||
np.array([3, 2], dtype=dtype),
|
||||
expected=np.array(59, dtype=dtype))
|
||||
|
||||
with compat.forward_compatibility_horizon(2019, 10, 19):
|
||||
self._testBinary(
|
||||
lambda x, y: special_math_ops.einsum('ij,j->', x, y),
|
||||
np.array([[1, 3], [2, 5], [6, 8]], dtype=dtype),
|
||||
np.array([3, 2], dtype=dtype),
|
||||
expected=np.array(59, dtype=dtype))
|
||||
|
||||
def testUnary(self):
|
||||
for dtype in self.float_types:
|
||||
self._testUnary(
|
||||
@ -105,12 +83,6 @@ class EinsumOpTest(xla_test.XLATestCase):
|
||||
np.array([[[1, 3], [2, 5], [6, 8]]], dtype=dtype),
|
||||
expected=np.array([[[1], [2], [6]], [[3], [5], [8]]], dtype=dtype))
|
||||
|
||||
with compat.forward_compatibility_horizon(2019, 10, 19):
|
||||
self._testUnary(
|
||||
lambda x: special_math_ops.einsum('ijk->kji', x),
|
||||
np.array([[[1, 3], [2, 5], [6, 8]]], dtype=dtype),
|
||||
expected=np.array([[[1], [2], [6]], [[3], [5], [8]]], dtype=dtype))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
googletest.main()
|
||||
|
@ -51,9 +51,6 @@ def make_unroll_batch_matmul_tests(options):
|
||||
[(4, 2, 2, 3), (4, 2, 2, 3), False, True],
|
||||
[(4, 2, 2, 3),
|
||||
(4, 2, 2, 3), True, False]] + broadcast_shape_params,
|
||||
# TODO(b/130887442): Improve the forward compatibility tests for every
|
||||
# ops.
|
||||
"forward_compatibility_test": [False, True],
|
||||
}]
|
||||
|
||||
def build_graph(parameters):
|
||||
@ -73,14 +70,7 @@ def make_unroll_batch_matmul_tests(options):
|
||||
transpose_b=parameters["shape"][3])
|
||||
return [input_tensor1, input_tensor2], [out]
|
||||
|
||||
if parameters["forward_compatibility_test"]:
|
||||
# This is hardcoded to the date after MatMulV2 is activated.
|
||||
# TODO(b/130887442): Improve the forward compatibility tests for every
|
||||
# ops, and remove the hardcoded date.
|
||||
with tf.compat.forward_compatibility_horizon(2019, 4, 26):
|
||||
return _build_graph()
|
||||
else:
|
||||
return _build_graph()
|
||||
return _build_graph()
|
||||
|
||||
def build_inputs(parameters, sess, inputs, outputs):
|
||||
input_value1 = create_tensor_data(
|
||||
|
@ -64,9 +64,7 @@ class InjectPrefetchTest(test_base.DatasetTestBase, parameterized.TestCase):
|
||||
@combinations.generate(test_base.default_test_combinations())
|
||||
def testParallelInterleave(self):
|
||||
dataset = dataset_ops.Dataset.range(100)
|
||||
parallel_interleave = "ParallelInterleaveV2"
|
||||
if compat.forward_compatible(2020, 2, 20):
|
||||
parallel_interleave = "ParallelInterleaveV3"
|
||||
parallel_interleave = "ParallelInterleaveV3"
|
||||
if compat.forward_compatible(2020, 3, 6):
|
||||
parallel_interleave = "ParallelInterleaveV4"
|
||||
dataset = dataset.apply(
|
||||
@ -81,9 +79,7 @@ class InjectPrefetchTest(test_base.DatasetTestBase, parameterized.TestCase):
|
||||
@combinations.generate(test_base.default_test_combinations())
|
||||
def testChainedParallelDatasets(self):
|
||||
dataset = dataset_ops.Dataset.range(100)
|
||||
parallel_interleave = "ParallelInterleaveV2"
|
||||
if compat.forward_compatible(2020, 2, 20):
|
||||
parallel_interleave = "ParallelInterleaveV3"
|
||||
parallel_interleave = "ParallelInterleaveV3"
|
||||
if compat.forward_compatible(2020, 3, 6):
|
||||
parallel_interleave = "ParallelInterleaveV4"
|
||||
parallel_map = "ParallelMap"
|
||||
|
@ -17,8 +17,6 @@ from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
from tensorflow.python.compat import compat
|
||||
from tensorflow.python.data.experimental.ops.distribute_options import AutoShardPolicy
|
||||
from tensorflow.python.data.experimental.ops.distribute_options import ExternalStatePolicy
|
||||
from tensorflow.python.data.ops import dataset_ops
|
||||
from tensorflow.python.data.util import nest
|
||||
@ -49,22 +47,13 @@ class _AutoShardDataset(dataset_ops.UnaryDataset):
|
||||
self._input_dataset = input_dataset
|
||||
|
||||
self._element_spec = input_dataset.element_spec
|
||||
if (compat.forward_compatible(2019, 11, 25) or
|
||||
(input_dataset.options().experimental_distribute.auto_shard_policy !=
|
||||
AutoShardPolicy.AUTO)):
|
||||
variant_tensor = ged_ops.auto_shard_dataset(
|
||||
self._input_dataset._variant_tensor, # pylint: disable=protected-access
|
||||
num_workers=num_workers,
|
||||
index=index,
|
||||
auto_shard_policy=int(input_dataset.options().experimental_distribute
|
||||
.auto_shard_policy),
|
||||
**self._flat_structure)
|
||||
else:
|
||||
variant_tensor = ged_ops.auto_shard_dataset(
|
||||
self._input_dataset._variant_tensor, # pylint: disable=protected-access
|
||||
num_workers=num_workers,
|
||||
index=index,
|
||||
**self._flat_structure)
|
||||
variant_tensor = ged_ops.auto_shard_dataset(
|
||||
self._input_dataset._variant_tensor, # pylint: disable=protected-access
|
||||
num_workers=num_workers,
|
||||
index=index,
|
||||
auto_shard_policy=int(input_dataset.options().experimental_distribute
|
||||
.auto_shard_policy),
|
||||
**self._flat_structure)
|
||||
super(_AutoShardDataset, self).__init__(input_dataset, variant_tensor)
|
||||
|
||||
@property
|
||||
|
@ -17,7 +17,6 @@ from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
from tensorflow.python.compat import compat
|
||||
from tensorflow.python.data.ops import dataset_ops
|
||||
from tensorflow.python.framework import dtypes
|
||||
from tensorflow.python.framework import ops
|
||||
@ -78,42 +77,24 @@ class _SnapshotDataset(dataset_ops.UnaryUnchangedStructureDataset):
|
||||
self._input_dataset = input_dataset
|
||||
self._path = ops.convert_to_tensor(path, dtype=dtypes.string, name="path")
|
||||
|
||||
if compat.forward_compatible(2020, 1, 10) or mode or snapshot_name:
|
||||
variant_tensor = ged_ops.snapshot_dataset(
|
||||
self._input_dataset._variant_tensor, # pylint: disable=protected-access
|
||||
path=self._path,
|
||||
compression=self._compression,
|
||||
reader_path_prefix=self._reader_path_prefix,
|
||||
writer_path_prefix=self._writer_path_prefix,
|
||||
shard_size_bytes=self._shard_size_bytes,
|
||||
pending_snapshot_expiry_seconds=self._pending_snapshot_expiry_seconds,
|
||||
num_reader_threads=self._num_reader_threads,
|
||||
reader_buffer_size=self._reader_buffer_size,
|
||||
num_writer_threads=self._num_writer_threads,
|
||||
writer_buffer_size=self._writer_buffer_size,
|
||||
shuffle_on_read=self._shuffle_on_read,
|
||||
seed=self._seed,
|
||||
seed2=self._seed2,
|
||||
mode=self._mode,
|
||||
snapshot_name=self._snapshot_name,
|
||||
**self._flat_structure)
|
||||
else:
|
||||
variant_tensor = ged_ops.snapshot_dataset(
|
||||
self._input_dataset._variant_tensor, # pylint: disable=protected-access
|
||||
path=self._path,
|
||||
compression=self._compression,
|
||||
reader_path_prefix=self._reader_path_prefix,
|
||||
writer_path_prefix=self._writer_path_prefix,
|
||||
shard_size_bytes=self._shard_size_bytes,
|
||||
pending_snapshot_expiry_seconds=self._pending_snapshot_expiry_seconds,
|
||||
num_reader_threads=self._num_reader_threads,
|
||||
reader_buffer_size=self._reader_buffer_size,
|
||||
num_writer_threads=self._num_writer_threads,
|
||||
writer_buffer_size=self._writer_buffer_size,
|
||||
shuffle_on_read=self._shuffle_on_read,
|
||||
seed=self._seed,
|
||||
seed2=self._seed2,
|
||||
**self._flat_structure)
|
||||
variant_tensor = ged_ops.snapshot_dataset(
|
||||
self._input_dataset._variant_tensor, # pylint: disable=protected-access
|
||||
path=self._path,
|
||||
compression=self._compression,
|
||||
reader_path_prefix=self._reader_path_prefix,
|
||||
writer_path_prefix=self._writer_path_prefix,
|
||||
shard_size_bytes=self._shard_size_bytes,
|
||||
pending_snapshot_expiry_seconds=self._pending_snapshot_expiry_seconds,
|
||||
num_reader_threads=self._num_reader_threads,
|
||||
reader_buffer_size=self._reader_buffer_size,
|
||||
num_writer_threads=self._num_writer_threads,
|
||||
writer_buffer_size=self._writer_buffer_size,
|
||||
shuffle_on_read=self._shuffle_on_read,
|
||||
seed=self._seed,
|
||||
seed2=self._seed2,
|
||||
mode=self._mode,
|
||||
snapshot_name=self._snapshot_name,
|
||||
**self._flat_structure)
|
||||
|
||||
super(_SnapshotDataset, self).__init__(input_dataset, variant_tensor)
|
||||
|
||||
|
@ -1374,7 +1374,7 @@ class DatasetV2(tracking_base.Trackable, composite_tensor.CompositeTensor):
|
||||
|
||||
Raises:
|
||||
InvalidArgumentError: if `num_shards` or `index` are illegal values.
|
||||
|
||||
|
||||
Note: error checking is done on a best-effort basis, and errors aren't
|
||||
guaranteed to be caught upon dataset creation. (e.g. providing in a
|
||||
placeholder tensor bypasses the early checking, and will instead result
|
||||
@ -4269,7 +4269,7 @@ class ParallelInterleaveDataset(UnaryDataset):
|
||||
f=self._map_func.function,
|
||||
deterministic=deterministic_string,
|
||||
**self._flat_structure)
|
||||
elif deterministic is not None or compat.forward_compatible(2020, 2, 20):
|
||||
else:
|
||||
variant_tensor = gen_dataset_ops.parallel_interleave_dataset_v3(
|
||||
input_dataset._variant_tensor, # pylint: disable=protected-access
|
||||
self._map_func.function.captured_inputs, # pylint: disable=protected-access
|
||||
@ -4279,15 +4279,6 @@ class ParallelInterleaveDataset(UnaryDataset):
|
||||
f=self._map_func.function,
|
||||
deterministic=deterministic_string,
|
||||
**self._flat_structure)
|
||||
else:
|
||||
variant_tensor = gen_dataset_ops.parallel_interleave_dataset_v2(
|
||||
input_dataset._variant_tensor, # pylint: disable=protected-access
|
||||
self._map_func.function.captured_inputs, # pylint: disable=protected-access
|
||||
self._cycle_length,
|
||||
self._block_length,
|
||||
self._num_parallel_calls,
|
||||
f=self._map_func.function,
|
||||
**self._flat_structure)
|
||||
super(ParallelInterleaveDataset, self).__init__(input_dataset,
|
||||
variant_tensor)
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user