diff --git a/tensorflow/compiler/mlir/tensorflow/ir/tf_generated_ops.td b/tensorflow/compiler/mlir/tensorflow/ir/tf_generated_ops.td index f2287219d76..153ac5346b9 100644 --- a/tensorflow/compiler/mlir/tensorflow/ir/tf_generated_ops.td +++ b/tensorflow/compiler/mlir/tensorflow/ir/tf_generated_ops.td @@ -3681,31 +3681,6 @@ retained with length 1. TF_DerivedOperandTypeAttr Tidx = TF_DerivedOperandTypeAttr<1>; } -def TF_TPUReplicateMetadataOp : TF_Op<"TPUReplicateMetadata", [NoSideEffect]> { - let summary = [{ -Metadata indicating how the TPU computation should be replicated. - }]; - - let description = [{ -This operation holds the metadata common to operations of a `tpu.replicate()` computation subgraph. - }]; - - let arguments = (ins - Confined]>:$num_replicas, - DefaultValuedAttr:$num_cores_per_replica, - StrAttr:$topology, - DefaultValuedAttr:$use_tpu, - DefaultValuedAttr:$device_assignment, - DefaultValuedAttr:$computation_shape, - DefaultValuedAttr:$host_compute_core, - DefaultValuedAttr:$padding_map, - DefaultValuedAttr:$step_marker_location, - DefaultValuedAttr:$allow_soft_placement - ); - - let results = (outs); -} - def TF_TanhOp : TF_Op<"Tanh", [NoSideEffect, SameOperandsAndResultType]> { let summary = "Computes hyperbolic tangent of `x` element-wise."; diff --git a/tensorflow/compiler/mlir/tensorflow/ir/tf_ops.td b/tensorflow/compiler/mlir/tensorflow/ir/tf_ops.td index 8919168f4d7..f014cac0868 100644 --- a/tensorflow/compiler/mlir/tensorflow/ir/tf_ops.td +++ b/tensorflow/compiler/mlir/tensorflow/ir/tf_ops.td @@ -302,4 +302,32 @@ element_dtype: the desired type of elements in the list. ); } +// This operation when auto-generated is marked as NoSideEffect because it isn't +// stateful in TensorFlow. However it is kept alive through control dependency, +// and does not have any output. When placed in an island it wouldn't be kept +// alive in any way and the canonicalizer would just always fold it away. +def TF_TPUReplicateMetadataOp : TF_Op<"TPUReplicateMetadata", []> { + let summary = [{ +Metadata indicating how the TPU computation should be replicated. + }]; + + let description = [{ +This operation holds the metadata common to operations of a `tpu.replicate()` computation subgraph. + }]; + + let arguments = (ins + Confined]>:$num_replicas, + DefaultValuedAttr:$num_cores_per_replica, + StrAttr:$topology, + DefaultValuedAttr:$use_tpu, + DefaultValuedAttr:$device_assignment, + DefaultValuedAttr:$computation_shape, + DefaultValuedAttr:$host_compute_core, + DefaultValuedAttr:$padding_map, + DefaultValuedAttr:$step_marker_location, + DefaultValuedAttr:$allow_soft_placement + ); + + let results = (outs); +} #endif // TF_OPS