Model Dataset iterator op resource side effects.

- DatasetIterator resource effect support is added along with TF_DatasetIteratorRead, TF_DatasetIteratorWrite, TF_DatasetIteratorAlloc and TF_DatasetIteratorFree MemoryEffects.
- tf.AnonymousIterator, tf.AnonymousIteratorV2, tf.AnonymousMultiDeviceIterator, tf.DeleteIterator, tf.DeleteMultiDeviceIterator, tf.DeserializeIterator, tf.Iterator, tf.IteratorFromStringHandle, tf.IteratorFromStringHandleV2, tf.IteratorGetNextAsOptional, tf.IteratorGetNextSync, tf.IteratorToStringHandle, tf.IteratorV2, tf.MakeIterator, tf.MultiDeviceIterator, tf.MultiDeviceIteratorFromStringHandle, tf.MultiDeviceIteratorGetNextFromShard, tf.MultiDeviceIteratorInit, tf.MultiDeviceIteratorToStringHandle, tf.OneShotIterator and tf.SerializeIterator are added to TensorFlow MLIR ODS.
- DatasetIterator resource memory effects are annotated on resource arguments and results.

PiperOrigin-RevId: 335639743
Change-Id: I4976f2247f7b044a17944f34840bc9a0e9ca9514
This commit is contained in:
Andy Ly 2020-10-06 08:00:37 -07:00 committed by TensorFlower Gardener
parent e4384a8302
commit afde52b7c0
3 changed files with 353 additions and 1 deletions

View File

@ -297,6 +297,33 @@ Equivalent to np.angle.
TF_DerivedResultTypeAttr Tout = TF_DerivedResultTypeAttr<0>;
}
def TF_AnonymousIteratorOp : TF_Op<"AnonymousIterator", []> {
let summary = "A container for an iterator resource.";
let arguments = (ins
Confined<TypeArrayAttr, [ArrayMinCount<1>]>:$output_types,
Confined<TF_ShapeAttrArray, [ArrayMinCount<1>]>:$output_shapes
);
let results = (outs
Res<TF_ResourceTensor, "", [TF_DatasetIteratorAlloc]>:$handle
);
}
def TF_AnonymousIteratorV2Op : TF_Op<"AnonymousIteratorV2", []> {
let summary = "A container for an iterator resource.";
let arguments = (ins
Confined<TypeArrayAttr, [ArrayMinCount<1>]>:$output_types,
Confined<TF_ShapeAttrArray, [ArrayMinCount<1>]>:$output_shapes
);
let results = (outs
Res<TF_ResourceTensor, "", [TF_DatasetIteratorAlloc]>:$handle,
TF_VariantTensor:$deleter
);
}
def TF_AnonymousMemoryCacheOp : TF_Op<"AnonymousMemoryCache", []> {
let summary = "";
@ -308,6 +335,21 @@ def TF_AnonymousMemoryCacheOp : TF_Op<"AnonymousMemoryCache", []> {
);
}
def TF_AnonymousMultiDeviceIteratorOp : TF_Op<"AnonymousMultiDeviceIterator", []> {
let summary = "A container for a multi device iterator resource.";
let arguments = (ins
Confined<StrArrayAttr, [ArrayMinCount<1>]>:$devices,
Confined<TypeArrayAttr, [ArrayMinCount<1>]>:$output_types,
Confined<TF_ShapeAttrArray, [ArrayMinCount<1>]>:$output_shapes
);
let results = (outs
Res<TF_ResourceTensor, "", [TF_DatasetIteratorAlloc]>:$handle,
TF_VariantTensor:$deleter
);
}
def TF_AnonymousRandomSeedGeneratorOp : TF_Op<"AnonymousRandomSeedGenerator", []> {
let summary = "";
@ -2485,6 +2527,17 @@ is the same, though it is cleaner to use `tf.io.decode_image`.
TF_DerivedResultTypeAttr dtype = TF_DerivedResultTypeAttr<0>;
}
def TF_DeleteIteratorOp : TF_Op<"DeleteIterator", []> {
let summary = "A container for an iterator resource.";
let arguments = (ins
Arg<TF_ResourceTensor, "", [TF_DatasetIteratorFree]>:$handle,
TF_VariantTensor:$deleter
);
let results = (outs);
}
def TF_DeleteMemoryCacheOp : TF_Op<"DeleteMemoryCache", []> {
let summary = "";
@ -2496,6 +2549,20 @@ def TF_DeleteMemoryCacheOp : TF_Op<"DeleteMemoryCache", []> {
let results = (outs);
}
def TF_DeleteMultiDeviceIteratorOp : TF_Op<"DeleteMultiDeviceIterator", []> {
let summary = "A container for an iterator resource.";
let arguments = (ins
Arg<TF_ResourceTensor, "", [TF_DatasetIteratorFree]>:$multi_device_iterator,
Arg<Variadic<TF_ResourceTensor>, "", [TF_DatasetIteratorRead]>:$iterators,
TF_VariantTensor:$deleter
);
let results = (outs);
TF_DerivedOperandSizeAttr N = TF_DerivedOperandSizeAttr<1>;
}
def TF_DeleteRandomSeedGeneratorOp : TF_Op<"DeleteRandomSeedGenerator", []> {
let summary = "";
@ -2719,6 +2786,19 @@ Computes the gradients of depthwise convolution with respect to the input.
TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<1>;
}
def TF_DeserializeIteratorOp : TF_Op<"DeserializeIterator", []> {
let summary = [{
Converts the given variant tensor to an iterator and stores it in the given resource.
}];
let arguments = (ins
Arg<TF_ResourceTensor, "", [TF_DatasetIteratorWrite]>:$resource_handle,
TF_VariantTensor:$serialized
);
let results = (outs);
}
def TF_DeviceIndexOp : TF_Op<"DeviceIndex", [NoSideEffect]> {
let summary = "Return the index of device the op runs.";
@ -4965,11 +5045,58 @@ tf.math.is_nan(x) ==> [False, True, False, True, False]
TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
}
def TF_IteratorOp : TF_Op<"Iterator", []> {
let summary = "A container for an iterator resource.";
let arguments = (ins
StrAttr:$shared_name,
StrAttr:$container,
Confined<TypeArrayAttr, [ArrayMinCount<1>]>:$output_types,
Confined<TF_ShapeAttrArray, [ArrayMinCount<1>]>:$output_shapes
);
let results = (outs
Res<TF_ResourceTensor, "", [TF_DatasetIteratorAlloc]>:$handle
);
}
def TF_IteratorFromStringHandleOp : TF_Op<"IteratorFromStringHandle", []> {
let summary = [{
Converts the given string representing a handle to an iterator to a resource.
}];
let arguments = (ins
TF_StrTensor:$string_handle,
DefaultValuedAttr<TypeArrayAttr, "{}">:$output_types,
DefaultValuedAttr<TF_ShapeAttrArray, "{}">:$output_shapes
);
let results = (outs
Res<TF_ResourceTensor, "", [TF_DatasetIteratorAlloc]>:$resource_handle
);
}
def TF_IteratorFromStringHandleV2Op : TF_Op<"IteratorFromStringHandleV2", []> {
let summary = "";
let arguments = (ins
TF_StrTensor:$string_handle,
DefaultValuedAttr<TypeArrayAttr, "{}">:$output_types,
DefaultValuedAttr<TF_ShapeAttrArray, "{}">:$output_shapes
);
let results = (outs
Res<TF_ResourceTensor, "", [TF_DatasetIteratorAlloc]>:$resource_handle
);
}
def TF_IteratorGetNextOp : TF_Op<"IteratorGetNext", []> {
let summary = "Gets the next output from the given iterator .";
let arguments = (ins
TF_ResourceTensor:$iterator
Arg<TF_ResourceTensor, "", [TF_DatasetIteratorRead, TF_DatasetIteratorWrite]>:$iterator
);
let results = (outs
@ -4980,6 +5107,74 @@ def TF_IteratorGetNextOp : TF_Op<"IteratorGetNext", []> {
TF_DerivedResultTypeListAttr output_types = TF_DerivedResultTypeListAttr<0>;
}
def TF_IteratorGetNextAsOptionalOp : TF_Op<"IteratorGetNextAsOptional", []> {
let summary = [{
Gets the next output from the given iterator as an Optional variant.
}];
let arguments = (ins
Arg<TF_ResourceTensor, "", [TF_DatasetIteratorRead, TF_DatasetIteratorWrite]>:$iterator,
Confined<TypeArrayAttr, [ArrayMinCount<1>]>:$output_types,
Confined<TF_ShapeAttrArray, [ArrayMinCount<1>]>:$output_shapes
);
let results = (outs
TF_VariantTensor:$optional
);
}
def TF_IteratorGetNextSyncOp : TF_Op<"IteratorGetNextSync", []> {
let summary = "Gets the next output from the given iterator.";
let description = [{
This operation is a synchronous version IteratorGetNext. It should only be used
in situations where the iterator does not block the calling thread, or where
the calling thread is not a member of the thread pool used to execute parallel
operations (e.g. in eager mode).
}];
let arguments = (ins
Arg<TF_ResourceTensor, "", [TF_DatasetIteratorRead, TF_DatasetIteratorWrite]>:$iterator
);
let results = (outs
Variadic<TF_Tensor>:$components
);
TF_DerivedResultShapeListAttr output_shapes = TF_DerivedResultShapeListAttr<0>;
TF_DerivedResultTypeListAttr output_types = TF_DerivedResultTypeListAttr<0>;
}
def TF_IteratorToStringHandleOp : TF_Op<"IteratorToStringHandle", []> {
let summary = [{
Converts the given `resource_handle` representing an iterator to a string.
}];
let arguments = (ins
Arg<TF_ResourceTensor, "", [TF_DatasetIteratorRead]>:$resource_handle
);
let results = (outs
TF_StrTensor:$string_handle
);
}
def TF_IteratorV2Op : TF_Op<"IteratorV2", []> {
let summary = "";
let arguments = (ins
StrAttr:$shared_name,
StrAttr:$container,
Confined<TypeArrayAttr, [ArrayMinCount<1>]>:$output_types,
Confined<TF_ShapeAttrArray, [ArrayMinCount<1>]>:$output_shapes
);
let results = (outs
Res<TF_ResourceTensor, "", [TF_DatasetIteratorAlloc]>:$handle
);
}
def TF_L2LossOp : TF_Op<"L2Loss", [NoSideEffect]> {
let summary = "L2 Loss.";
@ -5586,6 +5781,24 @@ A 2-D example:
TF_DerivedResultTypeAttr out_type = TF_DerivedResultTypeAttr<0>;
}
def TF_MakeIteratorOp : TF_Op<"MakeIterator", []> {
let summary = [{
Makes a new iterator from the given `dataset` and stores it in `iterator`.
}];
let description = [{
This operation may be executed multiple times. Each execution will reset the
iterator in `iterator` to the first element of `dataset`.
}];
let arguments = (ins
TF_VariantTensor:$dataset,
Arg<TF_ResourceTensor, "", [TF_DatasetIteratorWrite]>:$iterator
);
let results = (outs);
}
def TF_MatMulOp : TF_Op<"MatMul", [NoSideEffect, TF_SameOperandsAndResultElementTypeResolveRef]> {
let summary = [{
Multiply the matrix "a" by the matrix "b".
@ -6909,6 +7122,82 @@ Returns x * y element-wise. Returns zero if y is zero, even if x if infinite or
TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
}
def TF_MultiDeviceIteratorOp : TF_Op<"MultiDeviceIterator", []> {
let summary = "Creates a MultiDeviceIterator resource.";
let arguments = (ins
Confined<StrArrayAttr, [ArrayMinCount<1>]>:$devices,
StrAttr:$shared_name,
StrAttr:$container,
Confined<TypeArrayAttr, [ArrayMinCount<1>]>:$output_types,
Confined<TF_ShapeAttrArray, [ArrayMinCount<1>]>:$output_shapes
);
let results = (outs
Res<TF_ResourceTensor, "", [TF_DatasetIteratorAlloc]>:$handle
);
}
def TF_MultiDeviceIteratorFromStringHandleOp : TF_Op<"MultiDeviceIteratorFromStringHandle", []> {
let summary = [{
Generates a MultiDeviceIterator resource from its provided string handle.
}];
let arguments = (ins
TF_StrTensor:$string_handle,
DefaultValuedAttr<TypeArrayAttr, "{}">:$output_types,
DefaultValuedAttr<TF_ShapeAttrArray, "{}">:$output_shapes
);
let results = (outs
Res<TF_ResourceTensor, "", [TF_DatasetIteratorAlloc]>:$multi_device_iterator
);
}
def TF_MultiDeviceIteratorGetNextFromShardOp : TF_Op<"MultiDeviceIteratorGetNextFromShard", []> {
let summary = "Gets next element for the provided shard number.";
let arguments = (ins
Arg<TF_ResourceTensor, "", [TF_DatasetIteratorRead, TF_DatasetIteratorWrite]>:$multi_device_iterator,
TF_Int32Tensor:$shard_num,
TF_Int64Tensor:$incarnation_id
);
let results = (outs
Variadic<TF_Tensor>:$components
);
TF_DerivedResultShapeListAttr output_shapes = TF_DerivedResultShapeListAttr<0>;
TF_DerivedResultTypeListAttr output_types = TF_DerivedResultTypeListAttr<0>;
}
def TF_MultiDeviceIteratorInitOp : TF_Op<"MultiDeviceIteratorInit", []> {
let summary = "Initializes the multi device iterator with the given dataset.";
let arguments = (ins
TF_VariantTensor:$dataset,
Arg<TF_ResourceTensor, "", [TF_DatasetIteratorWrite]>:$multi_device_iterator,
TF_Int64Tensor:$max_buffer_size
);
let results = (outs
TF_Int64Tensor:$incarnation_id
);
}
def TF_MultiDeviceIteratorToStringHandleOp : TF_Op<"MultiDeviceIteratorToStringHandle", []> {
let summary = "Produces a string handle for the given MultiDeviceIterator.";
let arguments = (ins
Arg<TF_ResourceTensor, "", [TF_DatasetIteratorRead]>:$multi_device_iterator
);
let results = (outs
TF_StrTensor:$string_handle
);
}
def TF_MultinomialOp : TF_Op<"Multinomial", [TF_CannotDuplicate]> {
let summary = "Draws samples from a multinomial distribution.";
@ -7363,6 +7652,44 @@ output =
}];
}
def TF_OneShotIteratorOp : TF_Op<"OneShotIterator", []> {
let summary = [{
Makes a "one-shot" iterator that can be iterated only once.
}];
let description = [{
A one-shot iterator bundles the logic for defining the dataset and
the state of the iterator in a single op, which allows simple input
pipelines to be defined without an additional initialization
("MakeIterator") step.
One-shot iterators have the following limitations:
* They do not support parameterization: all logic for creating the underlying
dataset must be bundled in the `dataset_factory` function.
* They are not resettable. Once a one-shot iterator reaches the end of its
underlying dataset, subsequent "IteratorGetNext" operations on that
iterator will always produce an `OutOfRange` error.
For greater flexibility, use "Iterator" and "MakeIterator" to define
an iterator using an arbitrary subgraph, which may capture tensors
(including fed values) as parameters, and which may be reset multiple
times by rerunning "MakeIterator".
}];
let arguments = (ins
SymbolRefAttr:$dataset_factory,
Confined<TypeArrayAttr, [ArrayMinCount<1>]>:$output_types,
Confined<TF_ShapeAttrArray, [ArrayMinCount<1>]>:$output_shapes,
StrAttr:$container,
StrAttr:$shared_name
);
let results = (outs
Res<TF_ResourceTensor, "", [TF_DatasetIteratorAlloc]>:$handle
);
}
def TF_OutfeedEnqueueTupleOp : TF_Op<"OutfeedEnqueueTuple", []> {
let summary = "Enqueue multiple Tensor values on the computation outfeed.";
@ -10353,6 +10680,22 @@ Computes gradients for the scaled exponential linear (Selu) operation.
TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
}
def TF_SerializeIteratorOp : TF_Op<"SerializeIterator", []> {
let summary = [{
Converts the given `resource_handle` representing an iterator to a variant tensor.
}];
let arguments = (ins
Arg<TF_ResourceTensor, "", [TF_DatasetIteratorRead]>:$resource_handle,
DefaultValuedAttr<I64Attr, "0">:$external_state_policy
);
let results = (outs
TF_VariantTensor:$serialized
);
}
def TF_ShapeOp : TF_Op<"Shape", [NoSideEffect]> {
let summary = "Returns the shape of a tensor.";

View File

@ -115,6 +115,7 @@ def TF_SummaryResource : TF_ResourceBase<"Summary">;
def TF_LookupTableResource : TF_ResourceBase<"LookupTable">;
def TF_DatasetSeedGeneratorResource : TF_ResourceBase<"DatasetSeedGenerator">;
def TF_DatasetMemoryCacheResource : TF_ResourceBase<"DatasetMemoryCache">;
def TF_DatasetIteratorResource : TF_ResourceBase<"DatasetIterator">;
def TF_VariableRead : MemRead<TF_VariableResource>;
def TF_StackRead : MemRead<TF_StackResource>;
@ -122,6 +123,7 @@ def TF_TensorArrayRead : MemRead<TF_TensorArrayResource>;
def TF_LookupTableRead : MemRead<TF_LookupTableResource>;
def TF_DatasetSeedGeneratorRead : MemRead<TF_DatasetSeedGeneratorResource>;
def TF_DatasetMemoryCacheRead : MemRead<TF_DatasetMemoryCacheResource>;
def TF_DatasetIteratorRead : MemRead<TF_DatasetIteratorResource>;
def TF_VariableWrite : MemWrite<TF_VariableResource>;
def TF_StackWrite : MemWrite<TF_StackResource>;
@ -130,6 +132,7 @@ def TF_SummaryWrite : MemWrite<TF_SummaryResource>;
def TF_LookupTableWrite : MemWrite<TF_LookupTableResource>;
def TF_DatasetSeedGeneratorWrite : MemWrite<TF_DatasetSeedGeneratorResource>;
def TF_DatasetMemoryCacheWrite : MemWrite<TF_DatasetMemoryCacheResource>;
def TF_DatasetIteratorWrite : MemWrite<TF_DatasetIteratorResource>;
def TF_VariableAlloc : MemAlloc<TF_VariableResource>;
def TF_StackAlloc : MemAlloc<TF_StackResource>;
@ -138,12 +141,14 @@ def TF_SummaryAlloc : MemAlloc<TF_SummaryResource>;
def TF_LookupTableAlloc : MemAlloc<TF_LookupTableResource>;
def TF_DatasetSeedGeneratorAlloc : MemAlloc<TF_DatasetSeedGeneratorResource>;
def TF_DatasetMemoryCacheAlloc : MemAlloc<TF_DatasetMemoryCacheResource>;
def TF_DatasetIteratorAlloc : MemAlloc<TF_DatasetIteratorResource>;
def TF_StackFree : MemFree<TF_StackResource>;
def TF_TensorArrayFree : MemFree<TF_TensorArrayResource>;
def TF_SummaryFree : MemFree<TF_SummaryResource>;
def TF_DatasetSeedGeneratorFree : MemFree<TF_DatasetSeedGeneratorResource>;
def TF_DatasetMemoryCacheFree : MemFree<TF_DatasetMemoryCacheResource>;
def TF_DatasetIteratorFree : MemFree<TF_DatasetIteratorResource>;
//===----------------------------------------------------------------------===//
// TensorFlow op definitions

View File

@ -53,6 +53,10 @@ struct DatasetMemoryCache
StringRef getName() final { return "DatasetMemoryCache"; }
};
struct DatasetIterator : ::mlir::SideEffects::Resource::Base<DatasetIterator> {
StringRef getName() final { return "DatasetIterator"; }
};
} // namespace ResourceEffects
} // namespace TF
} // namespace mlir