[tf.data] Add ParallelBatchDataset to the datasets list which will be autotuned buffer_sizes if autotune_buffer_sizes option is ON.
PiperOrigin-RevId: 356275496 Change-Id: Ia3d848c02a6ca7b61138e5fed8f81a4182286cec
This commit is contained in:
parent
20da5546cb
commit
b97a34c5f9
@ -34,10 +34,14 @@ constexpr char kLegacyAutotune[] = "legacy_autotune";
|
||||
constexpr char kBufferSizeMin[] = "buffer_size_min";
|
||||
constexpr char kPrefetchDataset[] = "PrefetchDataset";
|
||||
|
||||
constexpr std::array<const char*, 7> kAsyncDatasetOps = {
|
||||
"ExperimentalMapAndBatchDataset", "MapAndBatchDataset",
|
||||
"ParallelInterleaveDatasetV2", "ParallelInterleaveDatasetV3",
|
||||
"ParallelInterleaveDatasetV4", "ParallelMapDataset",
|
||||
constexpr std::array<const char*, 8> kAsyncDatasetOps = {
|
||||
"ExperimentalMapAndBatchDataset",
|
||||
"MapAndBatchDataset",
|
||||
"ParallelBatchDataset",
|
||||
"ParallelInterleaveDatasetV2",
|
||||
"ParallelInterleaveDatasetV3",
|
||||
"ParallelInterleaveDatasetV4",
|
||||
"ParallelMapDataset",
|
||||
"ParallelMapDatasetV2",
|
||||
};
|
||||
|
||||
|
||||
@ -27,8 +27,8 @@ constexpr char kAutotune[] = "autotune";
|
||||
// This optimization does the following:
|
||||
//
|
||||
// 1. Adds `prefetch(AUTOTUNE)` after all asynchronous tf.data transformations
|
||||
// (e.g. parallel map, parallel interleave, and map + batch) if they are not
|
||||
// followed by a `prefetch` yet.
|
||||
// (e.g. parallel batch, parallel map, parallel interleave, and map + batch) if
|
||||
// they are not followed by a `prefetch` yet.
|
||||
//
|
||||
// 2. If there exists any `prefetch(buffer_size=N)` for `N>=0`, it will replace
|
||||
// the transformation with autotunable version of `prefetch` which uses N as
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user