[tf.data] Add ParallelBatchDataset to the datasets list which will be autotuned buffer_sizes if autotune_buffer_sizes option is ON.

PiperOrigin-RevId: 356275496
Change-Id: Ia3d848c02a6ca7b61138e5fed8f81a4182286cec
This commit is contained in:
Jay Shi 2021-02-08 08:52:11 -08:00 committed by TensorFlower Gardener
parent 20da5546cb
commit b97a34c5f9
2 changed files with 10 additions and 6 deletions

View File

@ -34,10 +34,14 @@ constexpr char kLegacyAutotune[] = "legacy_autotune";
constexpr char kBufferSizeMin[] = "buffer_size_min";
constexpr char kPrefetchDataset[] = "PrefetchDataset";
constexpr std::array<const char*, 7> kAsyncDatasetOps = {
"ExperimentalMapAndBatchDataset", "MapAndBatchDataset",
"ParallelInterleaveDatasetV2", "ParallelInterleaveDatasetV3",
"ParallelInterleaveDatasetV4", "ParallelMapDataset",
constexpr std::array<const char*, 8> kAsyncDatasetOps = {
"ExperimentalMapAndBatchDataset",
"MapAndBatchDataset",
"ParallelBatchDataset",
"ParallelInterleaveDatasetV2",
"ParallelInterleaveDatasetV3",
"ParallelInterleaveDatasetV4",
"ParallelMapDataset",
"ParallelMapDatasetV2",
};

View File

@ -27,8 +27,8 @@ constexpr char kAutotune[] = "autotune";
// This optimization does the following:
//
// 1. Adds `prefetch(AUTOTUNE)` after all asynchronous tf.data transformations
// (e.g. parallel map, parallel interleave, and map + batch) if they are not
// followed by a `prefetch` yet.
// (e.g. parallel batch, parallel map, parallel interleave, and map + batch) if
// they are not followed by a `prefetch` yet.
//
// 2. If there exists any `prefetch(buffer_size=N)` for `N>=0`, it will replace
// the transformation with autotunable version of `prefetch` which uses N as