[tf.data] Graduating tf.data.experimental.AUTOTUNE to core API.

PiperOrigin-RevId: 330845881
Change-Id: I0480031d39753f5115e9dae5f5a6ae5b42e19bb0
This commit is contained in:
Jiri Simsa 2020-09-09 19:01:28 -07:00 committed by TensorFlower Gardener
parent 8fc390aa96
commit 8eb7bb0a92
7 changed files with 22 additions and 9 deletions

View File

@ -107,6 +107,8 @@
* `tf.data.Dataset.from_generator` now supports Ragged and Sparse tensors
with a new `output_signature` argument, which allows `from_generator` to
produce any type describable by a `tf.TypeSpec`.
* `tf.data.experimental.AUTOTUNE` is now available in the core API as
`tf.data.AUTOTUNE`.
* `tf.image`:
* Added deterministic `tf.image.stateless_random_*` functions for each
`tf.image.random_*` function. Added a new op

View File

@ -23,6 +23,7 @@ from __future__ import print_function
# pylint: disable=unused-import
from tensorflow.python.data import experimental
from tensorflow.python.data.ops.dataset_ops import AUTOTUNE
from tensorflow.python.data.ops.dataset_ops import Dataset
from tensorflow.python.data.ops.dataset_ops import INFINITE as INFINITE_CARDINALITY
from tensorflow.python.data.ops.dataset_ops import make_initializable_iterator

View File

@ -176,7 +176,7 @@ def map_and_batch_with_legacy_function(map_func,
num_parallel_calls: (Optional.) A `tf.int32` scalar `tf.Tensor`,
representing the number of elements to process in parallel. If not
specified, `batch_size * num_parallel_batches` elements will be processed
in parallel. If the value `tf.data.experimental.AUTOTUNE` is used, then
in parallel. If the value `tf.data.AUTOTUNE` is used, then
the number of parallel calls is set dynamically based on available CPU.
Returns:
@ -237,7 +237,7 @@ def map_and_batch(map_func,
num_parallel_calls: (Optional.) A `tf.int32` scalar `tf.Tensor`,
representing the number of elements to process in parallel. If not
specified, `batch_size * num_parallel_batches` elements will be processed
in parallel. If the value `tf.data.experimental.AUTOTUNE` is used, then
in parallel. If the value `tf.data.AUTOTUNE` is used, then
the number of parallel calls is set dynamically based on available CPU.
Returns:

View File

@ -37,7 +37,7 @@ from tensorflow.python.util.tf_export import tf_export
@deprecation.deprecated(
None,
"Use `tf.data.Dataset.interleave(map_func, cycle_length, block_length, "
"num_parallel_calls=tf.data.experimental.AUTOTUNE)` instead. If sloppy "
"num_parallel_calls=tf.data.AUTOTUNE)` instead. If sloppy "
"execution is desired, use `tf.data.Options.experimental_deterministic`.")
@tf_export("data.experimental.parallel_interleave")
def parallel_interleave(map_func,

View File

@ -94,6 +94,8 @@ ops.NotDifferentiable("ReduceDataset")
# A constant that can be used to enable auto-tuning.
AUTOTUNE = -1
tf_export("data.AUTOTUNE").export_constant(__name__, "AUTOTUNE")
# TODO(b/168128531): Deprecate and remove this symbol.
tf_export("data.experimental.AUTOTUNE").export_constant(__name__, "AUTOTUNE")
# Constants representing infinite and unknown cardinalities.
@ -1700,7 +1702,7 @@ name=None))
>>> dataset = Dataset.range(1, 6) # ==> [ 1, 2, 3, 4, 5 ]
>>> dataset = dataset.map(lambda x: x + 1,
... num_parallel_calls=tf.data.experimental.AUTOTUNE,
... num_parallel_calls=tf.data.AUTOTUNE,
... deterministic=False)
Args:
@ -1708,7 +1710,7 @@ name=None))
num_parallel_calls: (Optional.) A `tf.int32` scalar `tf.Tensor`,
representing the number elements to process asynchronously in parallel.
If not specified, elements will be processed sequentially. If the value
`tf.data.experimental.AUTOTUNE` is used, then the number of parallel
`tf.data.AUTOTUNE` is used, then the number of parallel
calls is set dynamically based on available CPU.
deterministic: (Optional.) A boolean controlling whether determinism
should be traded for performance by allowing elements to be produced out
@ -1821,7 +1823,7 @@ name=None))
... "/var/data/file3.txt", "/var/data/file4.txt"]
>>> dataset = tf.data.Dataset.from_tensor_slices(filenames)
>>> dataset = dataset.interleave(lambda x: tf.data.TFRecordDataset(x),
... cycle_length=4, num_parallel_calls=tf.data.experimental.AUTOTUNE,
... cycle_length=4, num_parallel_calls=tf.data.AUTOTUNE,
... deterministic=False)
Args:
@ -1829,7 +1831,7 @@ name=None))
cycle_length: (Optional.) The number of input elements that will be
processed concurrently. If not set, the tf.data runtime decides what it
should be based on available CPU. If `num_parallel_calls` is set to
`tf.data.experimental.AUTOTUNE`, the `cycle_length` argument identifies
`tf.data.AUTOTUNE`, the `cycle_length` argument identifies
the maximum degree of parallelism.
block_length: (Optional.) The number of consecutive elements to produce
from each input element before cycling to another input element. If not
@ -1838,7 +1840,7 @@ name=None))
threadpool, which is used to fetch inputs from cycle elements
asynchronously and in parallel. The default behavior is to fetch inputs
from cycle elements synchronously with no parallelism. If the value
`tf.data.experimental.AUTOTUNE` is used, then the number of parallel
`tf.data.AUTOTUNE` is used, then the number of parallel
calls is set dynamically based on available CPU.
deterministic: (Optional.) A boolean controlling whether determinism
should be traded for performance by allowing elements to be produced out
@ -2574,7 +2576,7 @@ class DatasetV1(DatasetV2):
num_parallel_calls: (Optional.) A `tf.int32` scalar `tf.Tensor`,
representing the number elements to process asynchronously in parallel.
If not specified, elements will be processed sequentially. If the value
`tf.data.experimental.AUTOTUNE` is used, then the number of parallel
`tf.data.AUTOTUNE` is used, then the number of parallel
calls is set dynamically based on available CPU.
deterministic: (Optional.) A boolean controlling whether determinism
should be traded for performance by allowing elements to be produced out

View File

@ -1,5 +1,9 @@
path: "tensorflow.data"
tf_module {
member {
name: "AUTOTUNE"
mtype: "<type \'int\'>"
}
member {
name: "Dataset"
mtype: "<type \'type\'>"

View File

@ -1,5 +1,9 @@
path: "tensorflow.data"
tf_module {
member {
name: "AUTOTUNE"
mtype: "<type \'int\'>"
}
member {
name: "Dataset"
mtype: "<type \'type\'>"