diff --git a/tensorflow/compiler/mlir/tensorflow/ir/tf_generated_ops.td b/tensorflow/compiler/mlir/tensorflow/ir/tf_generated_ops.td
index cce5f71651a..dc32e705df1 100644
--- a/tensorflow/compiler/mlir/tensorflow/ir/tf_generated_ops.td
+++ b/tensorflow/compiler/mlir/tensorflow/ir/tf_generated_ops.td
@@ -139,12 +139,12 @@ channel and then adjusts each component of each pixel to
   }];
 
   let arguments = (ins
-    TensorOf<[TF_Float16, TF_Float32]>:$images,
-    TF_Float32Tensor:$contrast_factor
+    Arg<TensorOf<[TF_Float16, TF_Float32]>, [{Images to adjust.  At least 3-D.}]>:$images,
+    Arg<TF_Float32Tensor, [{A float multiplier for adjusting contrast.}]>:$contrast_factor
   );
 
   let results = (outs
-    TensorOf<[TF_Float16, TF_Float32]>:$output
+    Res<TensorOf<[TF_Float16, TF_Float32]>, [{The contrast-adjusted image or images.}]>:$output
   );
 
   TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
@@ -163,12 +163,12 @@ and then remapped back to RGB colorspace.
   }];
 
   let arguments = (ins
-    TensorOf<[TF_Float16, TF_Float32]>:$images,
-    TF_Float32Tensor:$delta
+    Arg<TensorOf<[TF_Float16, TF_Float32]>, [{Images to adjust.  At least 3-D.}]>:$images,
+    Arg<TF_Float32Tensor, [{A float delta to add to the hue.}]>:$delta
   );
 
   let results = (outs
-    TensorOf<[TF_Float16, TF_Float32]>:$output
+    Res<TensorOf<[TF_Float16, TF_Float32]>, [{The hue-adjusted image or images.}]>:$output
   );
 
   TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
@@ -187,12 +187,12 @@ values, and then remapped back to RGB colorspace.
   }];
 
   let arguments = (ins
-    TensorOf<[TF_Float16, TF_Float32]>:$images,
-    TF_Float32Tensor:$scale
+    Arg<TensorOf<[TF_Float16, TF_Float32]>, [{Images to adjust.  At least 3-D.}]>:$images,
+    Arg<TF_Float32Tensor, [{A float scale to add to the saturation.}]>:$scale
   );
 
   let results = (outs
-    TensorOf<[TF_Float16, TF_Float32]>:$output
+    Res<TensorOf<[TF_Float16, TF_Float32]>, [{The hue-adjusted image or images.}]>:$output
   );
 
   TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
@@ -211,14 +211,15 @@ retained with length 1.
   }];
 
   let arguments = (ins
-    TF_BoolTensor:$input,
-    TF_I32OrI64Tensor:$reduction_indices,
+    Arg<TF_BoolTensor, [{The tensor to reduce.}]>:$input,
+    Arg<TF_I32OrI64Tensor, [{The dimensions to reduce. Must be in the range
+`[-rank(input), rank(input))`.}]>:$reduction_indices,
 
     DefaultValuedAttr<BoolAttr, "false">:$keep_dims
   );
 
   let results = (outs
-    TF_BoolTensor:$output
+    Res<TF_BoolTensor, [{The reduced tensor.}]>:$output
   );
 
   TF_DerivedOperandTypeAttr Tidx = TF_DerivedOperandTypeAttr<1>;
@@ -249,8 +250,10 @@ replica 1's output: `[[B], [D]]`
   }];
 
   let arguments = (ins
-    TensorOf<[TF_Bfloat16, TF_Bool, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$input,
-    TF_Int32Tensor:$group_assignment,
+    Arg<TensorOf<[TF_Bfloat16, TF_Bool, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{The local input to the sum.}]>:$input,
+    Arg<TF_Int32Tensor, [{An int32 tensor with shape
+[num_groups, num_replicas_per_group]. `group_assignment[i]` represents the
+replica ids in the ith subgroup.}]>:$group_assignment,
 
     I64Attr:$concat_dimension,
     I64Attr:$split_dimension,
@@ -258,7 +261,7 @@ replica 1's output: `[[B], [D]]`
   );
 
   let results = (outs
-    TensorOf<[TF_Bfloat16, TF_Bool, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$output
+    Res<TensorOf<[TF_Bfloat16, TF_Bool, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{The exchanged result.}]>:$output
   );
 
   TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
@@ -308,7 +311,10 @@ def TF_AnonymousIteratorOp : TF_Op<"AnonymousIterator", []> {
   );
 
   let results = (outs
-    Res<TF_ResourceTensor, "", [TF_DatasetIteratorAlloc]>:$handle
+    Res<TF_ResourceTensor, [{A handle to the iterator that can be passed to a "MakeIterator" or
+"IteratorGetNext" op. In contrast to Iterator, AnonymousIterator prevents
+resource sharing by name, and does not keep a reference to the resource
+container.}], [TF_DatasetIteratorAlloc]>:$handle
   );
 }
 
@@ -321,8 +327,11 @@ def TF_AnonymousIteratorV2Op : TF_Op<"AnonymousIteratorV2", []> {
   );
 
   let results = (outs
-    Res<TF_ResourceTensor, "", [TF_DatasetIteratorAlloc]>:$handle,
-    TF_VariantTensor:$deleter
+    Res<TF_ResourceTensor, [{A handle to the iterator that can be passed to a "MakeIterator" or
+"IteratorGetNext" op. In contrast to Iterator, AnonymousIterator prevents
+resource sharing by name, and does not keep a reference to the resource
+container.}], [TF_DatasetIteratorAlloc]>:$handle,
+    Res<TF_VariantTensor, [{A variant deleter that should be passed into the op that deletes the iterator.}]>:$deleter
   );
 }
 
@@ -347,8 +356,11 @@ def TF_AnonymousMultiDeviceIteratorOp : TF_Op<"AnonymousMultiDeviceIterator", []
   );
 
   let results = (outs
-    Res<TF_ResourceTensor, "", [TF_DatasetIteratorAlloc]>:$handle,
-    TF_VariantTensor:$deleter
+    Res<TF_ResourceTensor, [{A handle to a multi device iterator that can be passed to a
+"MultiDeviceIteratorGetNextFromShard" op. In contrast to MultiDeviceIterator,
+AnonymousIterator prevents resource sharing by name, and does not keep a
+reference to the resource container.}], [TF_DatasetIteratorAlloc]>:$handle,
+    Res<TF_VariantTensor, [{A variant deleter that should be passed into the op that deletes the iterator.}]>:$deleter
   );
 }
 
@@ -394,14 +406,15 @@ retained with length 1.
   }];
 
   let arguments = (ins
-    TF_BoolTensor:$input,
-    TF_I32OrI64Tensor:$reduction_indices,
+    Arg<TF_BoolTensor, [{The tensor to reduce.}]>:$input,
+    Arg<TF_I32OrI64Tensor, [{The dimensions to reduce. Must be in the range
+`[-rank(input), rank(input))`.}]>:$reduction_indices,
 
     DefaultValuedAttr<BoolAttr, "false">:$keep_dims
   );
 
   let results = (outs
-    TF_BoolTensor:$output
+    Res<TF_BoolTensor, [{The reduced tensor.}]>:$output
   );
 
   TF_DerivedOperandTypeAttr Tidx = TF_DerivedOperandTypeAttr<1>;
@@ -447,7 +460,9 @@ Usage:
 
   let arguments = (ins
     TensorOf<[TF_Bfloat16, TF_Bool, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$input,
-    TF_I32OrI64Tensor:$dimension
+    Arg<TF_I32OrI64Tensor, [{int32 or int64, must be in the range `[-rank(input), rank(input))`.
+Describes which dimension of the input Tensor to reduce across. For vectors,
+use dimension = 0.}]>:$dimension
   );
 
   let results = (outs
@@ -480,7 +495,9 @@ Usage:
 
   let arguments = (ins
     TensorOf<[TF_Bfloat16, TF_Bool, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$input,
-    TF_I32OrI64Tensor:$dimension
+    Arg<TF_I32OrI64Tensor, [{int32 or int64, must be in the range `[-rank(input), rank(input))`.
+Describes which dimension of the input Tensor to reduce across. For vectors,
+use dimension = 0.}]>:$dimension
   );
 
   let results = (outs
@@ -593,8 +610,8 @@ If `condition` evaluates to false, print the list of tensors in `data`.
   }];
 
   let arguments = (ins
-    TF_BoolTensor:$condition,
-    Variadic<TF_Tensor>:$data,
+    Arg<TF_BoolTensor, [{The condition to evaluate.}]>:$condition,
+    Arg<Variadic<TF_Tensor>, [{The tensors to print out when condition is false.}]>:$data,
 
     DefaultValuedAttr<I64Attr, "3">:$summarize
   );
@@ -615,8 +632,8 @@ see the incremented value or a subsequent newer one.
   }];
 
   let arguments = (ins
-    Arg<TF_ResourceTensor, "", [TF_VariableRead, TF_VariableWrite]>:$resource,
-    TF_Tensor:$value
+    Arg<TF_ResourceTensor, [{handle to the resource in which to store the variable.}], [TF_VariableRead, TF_VariableWrite]>:$resource,
+    Arg<TF_Tensor, [{the value by which the variable will be incremented.}]>:$value
   );
 
   let results = (outs);
@@ -633,8 +650,8 @@ see the decremented value or a subsequent newer one.
   }];
 
   let arguments = (ins
-    Arg<TF_ResourceTensor, "", [TF_VariableRead, TF_VariableWrite]>:$resource,
-    TF_Tensor:$value
+    Arg<TF_ResourceTensor, [{handle to the resource in which to store the variable.}], [TF_VariableRead, TF_VariableWrite]>:$resource,
+    Arg<TF_Tensor, [{the value by which the variable will be incremented.}]>:$value
   );
 
   let results = (outs);
@@ -651,8 +668,8 @@ this value or a subsequent newer value of the variable.
   }];
 
   let arguments = (ins
-    Arg<TF_ResourceTensor, "", [TF_VariableWrite]>:$resource,
-    TF_Tensor:$value
+    Arg<TF_ResourceTensor, [{handle to the resource in which to store the variable.}], [TF_VariableWrite]>:$resource,
+    Arg<TF_Tensor, [{the value to set the new tensor to use.}]>:$value
   );
 
   let results = (outs);
@@ -754,7 +771,7 @@ window in `value`.
   }];
 
   let arguments = (ins
-    TF_FloatTensor:$value,
+    Arg<TF_FloatTensor, [{4-D with shape `[batch, height, width, channels]`.}]>:$value,
 
     Confined<I64ArrayAttr, [ArrayMinCount<4>]>:$ksize,
     Confined<I64ArrayAttr, [ArrayMinCount<4>]>:$strides,
@@ -763,7 +780,7 @@ window in `value`.
   );
 
   let results = (outs
-    TF_FloatTensor:$output
+    Res<TF_FloatTensor, [{The average pooled output tensor.}]>:$output
   );
 
   TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
@@ -778,7 +795,7 @@ Each entry in `output` is the mean of the corresponding size `ksize` window in
   }];
 
   let arguments = (ins
-    TF_FloatTensor:$input,
+    Arg<TF_FloatTensor, [{Shape `[batch, depth, rows, cols, channels]` tensor to pool over.}]>:$input,
 
     Confined<I64ArrayAttr, [ArrayMinCount<5>]>:$ksize,
     Confined<I64ArrayAttr, [ArrayMinCount<5>]>:$strides,
@@ -787,7 +804,7 @@ Each entry in `output` is the mean of the corresponding size `ksize` window in
   );
 
   let results = (outs
-    TF_FloatTensor:$output
+    Res<TF_FloatTensor, [{The average pooled output tensor.}]>:$output
   );
 
   TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
@@ -797,8 +814,8 @@ def TF_AvgPool3DGradOp : TF_Op<"AvgPool3DGrad", [NoSideEffect]> {
   let summary = "Computes gradients of average pooling function.";
 
   let arguments = (ins
-    TF_Int32Tensor:$orig_input_shape,
-    TF_FloatTensor:$grad,
+    Arg<TF_Int32Tensor, [{The original input dimensions.}]>:$orig_input_shape,
+    Arg<TF_FloatTensor, [{Output backprop of shape `[batch, depth, rows, cols, channels]`.}]>:$grad,
 
     Confined<I64ArrayAttr, [ArrayMinCount<5>]>:$ksize,
     Confined<I64ArrayAttr, [ArrayMinCount<5>]>:$strides,
@@ -807,7 +824,7 @@ def TF_AvgPool3DGradOp : TF_Op<"AvgPool3DGrad", [NoSideEffect]> {
   );
 
   let results = (outs
-    TF_FloatTensor:$output
+    Res<TF_FloatTensor, [{The backprop for input.}]>:$output
   );
 
   TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<1>;
@@ -817,8 +834,9 @@ def TF_AvgPoolGradOp : TF_Op<"AvgPoolGrad", [NoSideEffect]> {
   let summary = "Computes gradients of the average pooling function.";
 
   let arguments = (ins
-    TF_Int32Tensor:$orig_input_shape,
-    TF_FloatTensor:$grad,
+    Arg<TF_Int32Tensor, [{1-D.  Shape of the original input to `avg_pool`.}]>:$orig_input_shape,
+    Arg<TF_FloatTensor, [{4-D with shape `[batch, height, width, channels]`.  Gradients w.r.t.
+the output of `avg_pool`.}]>:$grad,
 
     Confined<I64ArrayAttr, [ArrayMinCount<4>]>:$ksize,
     Confined<I64ArrayAttr, [ArrayMinCount<4>]>:$strides,
@@ -827,7 +845,7 @@ def TF_AvgPoolGradOp : TF_Op<"AvgPoolGrad", [NoSideEffect]> {
   );
 
   let results = (outs
-    TF_FloatTensor:$output
+    Res<TF_FloatTensor, [{4-D.  Gradients w.r.t. the input of `avg_pool`.}]>:$output
   );
 
   TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<1>;
@@ -858,15 +876,15 @@ It is computed as:
   }];
 
   let arguments = (ins
-    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int32, TF_Int64]>:$x,
-    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int32, TF_Int64]>:$y,
+    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int32, TF_Int64]>, [{2-D or higher with shape `[..., r_x, c_x]`.}]>:$x,
+    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int32, TF_Int64]>, [{2-D or higher with shape `[..., r_y, c_y]`.}]>:$y,
 
     DefaultValuedAttr<BoolAttr, "false">:$adj_x,
     DefaultValuedAttr<BoolAttr, "false">:$adj_y
   );
 
   let results = (outs
-    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int32, TF_Int64]>:$output
+    Res<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int32, TF_Int64]>, [{3-D or higher with shape `[..., r_o, c_o]`}]>:$output
   );
 
   TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
@@ -907,15 +925,15 @@ about broadcasting
   }];
 
   let arguments = (ins
-    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64]>:$x,
-    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64]>:$y,
+    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64]>, [{2-D or higher with shape `[..., r_x, c_x]`.}]>:$x,
+    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64]>, [{2-D or higher with shape `[..., r_y, c_y]`.}]>:$y,
 
     DefaultValuedAttr<BoolAttr, "false">:$adj_x,
     DefaultValuedAttr<BoolAttr, "false">:$adj_y
   );
 
   let results = (outs
-    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64]>:$output
+    Res<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64]>, [{3-D or higher with shape `[..., r_o, c_o]`}]>:$output
   );
 
   TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
@@ -935,11 +953,18 @@ This op is deprecated. Prefer `tf.nn.batch_normalization`.
   }];
 
   let arguments = (ins
-    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$t,
-    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$m,
-    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$v,
-    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$beta,
-    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$gamma,
+    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{A 4D input Tensor.}]>:$t,
+    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{A 1D mean Tensor with size matching the last dimension of t.
+This is the first output from tf.nn.moments,
+or a saved moving average thereof.}]>:$m,
+    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{A 1D variance Tensor with size matching the last dimension of t.
+This is the second output from tf.nn.moments,
+or a saved moving average thereof.}]>:$v,
+    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{A 1D beta Tensor with size matching the last dimension of t.
+An offset to be added to the normalized tensor.}]>:$beta,
+    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{A 1D gamma Tensor with size matching the last dimension of t.
+If "scale_after_normalization" is true, this tensor will be multiplied
+with the normalized tensor.}]>:$gamma,
 
     F32Attr:$variance_epsilon,
     BoolAttr:$scale_after_normalization
@@ -966,14 +991,87 @@ followed by cropping along the `height` and `width` dimensions.
   }];
 
   let arguments = (ins
-    TF_Tensor:$input,
-    TF_I32OrI64Tensor:$crops,
+    Arg<TF_Tensor, [{4-D tensor with shape
+`[batch*block_size*block_size, height_pad/block_size, width_pad/block_size,
+  depth]`. Note that the batch size of the input tensor must be divisible by
+`block_size * block_size`.}]>:$input,
+    Arg<TF_I32OrI64Tensor, [{2-D tensor of non-negative integers with shape `[2, 2]`. It specifies
+how many elements to crop from the intermediate result across the spatial
+dimensions as follows:
+
+    crops = [[crop_top, crop_bottom], [crop_left, crop_right]]}]>:$crops,
 
     Confined<I64Attr, [IntMinValue<2>]>:$block_size
   );
 
   let results = (outs
-    TF_Tensor:$output
+    Res<TF_Tensor, [{4-D with shape `[batch, height, width, depth]`, where:
+
+      height = height_pad - crop_top - crop_bottom
+      width = width_pad - crop_left - crop_right
+
+The attr `block_size` must be greater than one. It indicates the block size.
+
+Some examples:
+
+(1) For the following input of shape `[4, 1, 1, 1]` and block_size of 2:
+
+```
+[[[[1]]], [[[2]]], [[[3]]], [[[4]]]]
+```
+
+The output tensor has shape `[1, 2, 2, 1]` and value:
+
+```
+x = [[[[1], [2]], [[3], [4]]]]
+```
+
+(2) For the following input of shape `[4, 1, 1, 3]` and block_size of 2:
+
+```
+[[[[1, 2, 3]]], [[[4, 5, 6]]], [[[7, 8, 9]]], [[[10, 11, 12]]]]
+```
+
+The output tensor has shape `[1, 2, 2, 3]` and value:
+
+```
+x = [[[[1, 2, 3], [4, 5, 6]],
+      [[7, 8, 9], [10, 11, 12]]]]
+```
+
+(3) For the following input of shape `[4, 2, 2, 1]` and block_size of 2:
+
+```
+x = [[[[1], [3]], [[9], [11]]],
+     [[[2], [4]], [[10], [12]]],
+     [[[5], [7]], [[13], [15]]],
+     [[[6], [8]], [[14], [16]]]]
+```
+
+The output tensor has shape `[1, 4, 4, 1]` and value:
+
+```
+x = [[[[1],   [2],  [3],  [4]],
+     [[5],   [6],  [7],  [8]],
+     [[9],  [10], [11],  [12]],
+     [[13], [14], [15],  [16]]]]
+```
+
+(4) For the following input of shape `[8, 1, 2, 1]` and block_size of 2:
+
+```
+x = [[[[1], [3]]], [[[9], [11]]], [[[2], [4]]], [[[10], [12]]],
+     [[[5], [7]]], [[[13], [15]]], [[[6], [8]]], [[[14], [16]]]]
+```
+
+The output tensor has shape `[2, 2, 4, 1]` and value:
+
+```
+x = [[[[1], [3]], [[5], [7]]],
+     [[[2], [4]], [[10], [12]]],
+     [[[5], [7]], [[13], [15]]],
+     [[[6], [8]], [[14], [16]]]]
+```}]>:$output
   );
 
   TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
@@ -999,9 +1097,118 @@ reverse of SpaceToBatch.  See below for a precise description.
   }];
 
   let arguments = (ins
-    TF_Tensor:$input,
-    TF_I32OrI64Tensor:$block_shape,
-    TF_I32OrI64Tensor:$crops
+    Arg<TF_Tensor, [{N-D with shape `input_shape = [batch] + spatial_shape + remaining_shape`,
+where spatial_shape has M dimensions.}]>:$input,
+    Arg<TF_I32OrI64Tensor, [{1-D with shape `[M]`, all values must be >= 1.}]>:$block_shape,
+    Arg<TF_I32OrI64Tensor, [{2-D with shape `[M, 2]`, all values must be >= 0.
+  `crops[i] = [crop_start, crop_end]` specifies the amount to crop from input
+  dimension `i + 1`, which corresponds to spatial dimension `i`.  It is
+  required that
+  `crop_start[i] + crop_end[i] <= block_shape[i] * input_shape[i + 1]`.
+
+This operation is equivalent to the following steps:
+
+1. Reshape `input` to `reshaped` of shape:
+     [block_shape[0], ..., block_shape[M-1],
+      batch / prod(block_shape),
+      input_shape[1], ..., input_shape[N-1]]
+
+2. Permute dimensions of `reshaped` to produce `permuted` of shape
+     [batch / prod(block_shape),
+
+      input_shape[1], block_shape[0],
+      ...,
+      input_shape[M], block_shape[M-1],
+
+      input_shape[M+1], ..., input_shape[N-1]]
+
+3. Reshape `permuted` to produce `reshaped_permuted` of shape
+     [batch / prod(block_shape),
+
+      input_shape[1] * block_shape[0],
+      ...,
+      input_shape[M] * block_shape[M-1],
+
+      input_shape[M+1],
+      ...,
+      input_shape[N-1]]
+
+4. Crop the start and end of dimensions `[1, ..., M]` of
+   `reshaped_permuted` according to `crops` to produce the output of shape:
+     [batch / prod(block_shape),
+
+      input_shape[1] * block_shape[0] - crops[0,0] - crops[0,1],
+      ...,
+      input_shape[M] * block_shape[M-1] - crops[M-1,0] - crops[M-1,1],
+
+      input_shape[M+1], ..., input_shape[N-1]]
+
+Some examples:
+
+(1) For the following input of shape `[4, 1, 1, 1]`, `block_shape = [2, 2]`, and
+    `crops = [[0, 0], [0, 0]]`:
+
+```
+[[[[1]]], [[[2]]], [[[3]]], [[[4]]]]
+```
+
+The output tensor has shape `[1, 2, 2, 1]` and value:
+
+```
+x = [[[[1], [2]], [[3], [4]]]]
+```
+
+(2) For the following input of shape `[4, 1, 1, 3]`, `block_shape = [2, 2]`, and
+    `crops = [[0, 0], [0, 0]]`:
+
+```
+[[[[1, 2, 3]]], [[[4, 5, 6]]], [[[7, 8, 9]]], [[[10, 11, 12]]]]
+```
+
+The output tensor has shape `[1, 2, 2, 3]` and value:
+
+```
+x = [[[[1, 2, 3], [4, 5, 6]],
+      [[7, 8, 9], [10, 11, 12]]]]
+```
+
+(3) For the following input of shape `[4, 2, 2, 1]`, `block_shape = [2, 2]`, and
+    `crops = [[0, 0], [0, 0]]`:
+
+```
+x = [[[[1], [3]], [[9], [11]]],
+     [[[2], [4]], [[10], [12]]],
+     [[[5], [7]], [[13], [15]]],
+     [[[6], [8]], [[14], [16]]]]
+```
+
+The output tensor has shape `[1, 4, 4, 1]` and value:
+
+```
+x = [[[[1],   [2],  [3],  [4]],
+     [[5],   [6],  [7],  [8]],
+     [[9],  [10], [11],  [12]],
+     [[13], [14], [15],  [16]]]]
+```
+
+(4) For the following input of shape `[8, 1, 3, 1]`, `block_shape = [2, 2]`, and
+    `crops = [[0, 0], [2, 0]]`:
+
+```
+x = [[[[0], [1], [3]]], [[[0], [9], [11]]],
+     [[[0], [2], [4]]], [[[0], [10], [12]]],
+     [[[0], [5], [7]]], [[[0], [13], [15]]],
+     [[[0], [6], [8]]], [[[0], [14], [16]]]]
+```
+
+The output tensor has shape `[2, 2, 4, 1]` and value:
+
+```
+x = [[[[1],   [2],  [3],  [4]],
+      [[5],   [6],  [7],  [8]]],
+     [[[9],  [10], [11],  [12]],
+      [[13], [14], [15],  [16]]]]
+```}]>:$crops
   );
 
   let results = (outs
@@ -1060,14 +1267,14 @@ Broadcasting is supported, so `value` may have any number of dimensions.
   }];
 
   let arguments = (ins
-    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$value,
-    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$bias,
+    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Any number of dimensions.}]>:$value,
+    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{1-D with size the last dimension of `value`.}]>:$bias,
 
     DefaultValuedAttr<TF_ConvnetDataFormatAttr, "NHWC">:$data_format
   );
 
   let results = (outs
-    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$output
+    Res<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Broadcasted sum of `value` and `bias`.}]>:$output
   );
 
   TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
@@ -1099,13 +1306,13 @@ the feature dimension is the third-to-last.
   }];
 
   let arguments = (ins
-    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$out_backprop,
+    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Any number of dimensions.}]>:$out_backprop,
 
     DefaultValuedAttr<TF_ConvnetDataFormatAttr, "NHWC">:$data_format
   );
 
   let results = (outs
-    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$output
+    Res<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{1-D with size the feature dimension of `out_backprop`.}]>:$output
   );
 
   TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
@@ -1126,12 +1333,12 @@ Broadcasting is supported, so `value` may have any number of dimensions.
   }];
 
   let arguments = (ins
-    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$value,
-    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$bias
+    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Any number of dimensions.}]>:$value,
+    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{1-D with size the last dimension of `value`.}]>:$bias
   );
 
   let results = (outs
-    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$output
+    Res<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Broadcasted sum of `value` and `bias`.}]>:$output
   );
 
   TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
@@ -1155,13 +1362,16 @@ Values in `arr` outside of the range [0, size) are ignored.
   }];
 
   let arguments = (ins
-    TF_Int32Tensor:$arr,
-    TF_Int32Tensor:$size,
-    TensorOf<[TF_Float32, TF_Float64, TF_Int32, TF_Int64]>:$weights
+    Arg<TF_Int32Tensor, [{int32 `Tensor`.}]>:$arr,
+    Arg<TF_Int32Tensor, [{non-negative int32 scalar `Tensor`.}]>:$size,
+    Arg<TensorOf<[TF_Float32, TF_Float64, TF_Int32, TF_Int64]>, [{is an int32, int64, float32, or float64 `Tensor` with the same
+shape as `arr`, or a length-0 `Tensor`, in which case it acts as all weights
+equal to 1.}]>:$weights
   );
 
   let results = (outs
-    TensorOf<[TF_Float32, TF_Float64, TF_Int32, TF_Int64]>:$bins
+    Res<TensorOf<[TF_Float32, TF_Float64, TF_Int32, TF_Int64]>, [{1D `Tensor` with length equal to `size`. The counts or summed weights for
+each value in the range [0, size).}]>:$bins
   );
 
   TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<2>;
@@ -1362,12 +1572,13 @@ bucketized values for a single feature.
   }];
 
   let arguments = (ins
-    Variadic<TF_Float32Tensor>:$float_values,
-    Variadic<TF_Float32Tensor>:$bucket_boundaries
+    Arg<Variadic<TF_Float32Tensor>, [{float; List of Rank 1 Tensor each containing float values for a single feature.}]>:$float_values,
+    Arg<Variadic<TF_Float32Tensor>, [{float; List of Rank 1 Tensors each containing the bucket boundaries for a single
+feature.}]>:$bucket_boundaries
   );
 
   let results = (outs
-    Variadic<TF_Int32Tensor>:$buckets
+    Res<Variadic<TF_Int32Tensor>, [{int; List of Rank 1 Tensors each containing the bucketized values for a single feature.}]>:$buckets
   );
 
   TF_DerivedOperandSizeAttr num_features = TF_DerivedOperandSizeAttr<0>;
@@ -1455,12 +1666,12 @@ subsequent operation and then be optimized away, however.)
   }];
 
   let arguments = (ins
-    TF_Tensor:$input,
-    TF_I32OrI64Tensor:$shape
+    Arg<TF_Tensor, [{A Tensor to broadcast.}]>:$input,
+    Arg<TF_I32OrI64Tensor, [{An 1-D `int` Tensor. The shape of the desired output.}]>:$shape
   );
 
   let results = (outs
-    TF_Tensor:$output
+    Res<TF_Tensor, [{A Tensor.}]>:$output
   );
 
   TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
@@ -1489,13 +1700,17 @@ then the output will be
   }];
 
   let arguments = (ins
-    TensorOf<[TF_Float32, TF_Float64, TF_Int32, TF_Int64]>:$input,
+    Arg<TensorOf<[TF_Float32, TF_Float64, TF_Int32, TF_Int64]>, [{Any shape of Tensor contains with int or float type.}]>:$input,
 
     F32ArrayAttr:$boundaries
   );
 
   let results = (outs
-    TF_Int32Tensor:$output
+    Res<TF_Int32Tensor, [{Same shape with 'input', each value of input replaced with bucket index.
+
+@compatibility(numpy)
+Equivalent to np.digitize.
+@end_compatibility}]>:$output
   );
 
   TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
@@ -1577,11 +1792,11 @@ case it might be faster to use the CPU.
   }];
 
   let arguments = (ins
-    TensorOf<[TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64]>:$input
+    Arg<TensorOf<[TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64]>, [{Shape is `[..., M, M]`.}]>:$input
   );
 
   let results = (outs
-    TensorOf<[TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64]>:$output
+    Res<TensorOf<[TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64]>, [{Shape is `[..., M, M]`.}]>:$output
   );
 
   TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
@@ -1598,13 +1813,15 @@ greater than `clip_value_max` are set to `clip_value_max`.
   }];
 
   let arguments = (ins
-    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$t,
-    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$clip_value_min,
-    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$clip_value_max
+    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{A `Tensor`.}]>:$t,
+    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{A 0-D (scalar) `Tensor`, or a `Tensor` with the same shape
+as `t`. The minimum value to clip by.}]>:$clip_value_min,
+    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{A 0-D (scalar) `Tensor`, or a `Tensor` with the same shape
+as `t`. The maximum value to clip by.}]>:$clip_value_max
   );
 
   let results = (outs
-    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$output
+    Res<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{A clipped `Tensor` with the same shape as input 't'.}]>:$output
   );
 
   TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
@@ -1784,12 +2001,16 @@ def TF_ConcatOp : TF_Op<"Concat", [NoSideEffect]> {
   let summary = "Concatenates tensors along one dimension.";
 
   let arguments = (ins
-    TF_Int32Tensor:$concat_dim,
-    Variadic<TF_Tensor>:$values
+    Arg<TF_Int32Tensor, [{0-D.  The dimension along which to concatenate.  Must be in the
+range [0, rank(values)).}]>:$concat_dim,
+    Arg<Variadic<TF_Tensor>, [{The `N` Tensors to concatenate. Their ranks and types must match,
+and their sizes must match in all dimensions except `concat_dim`.}]>:$values
   );
 
   let results = (outs
-    TF_Tensor:$output
+    Res<TF_Tensor, [{A `Tensor` with the concatenation of values stacked along the
+`concat_dim` dimension.  This tensor's shape matches that of `values` except
+in `concat_dim` where it has the sum of the sizes.}]>:$output
   );
 
   TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<1>;
@@ -1819,12 +2040,13 @@ This is typically used by gradient computations for a concat operation.
   }];
 
   let arguments = (ins
-    TF_Int32Tensor:$concat_dim,
-    Variadic<TF_Int32Tensor>:$shape
+    Arg<TF_Int32Tensor, [{The dimension along which to concatenate.}]>:$concat_dim,
+    Arg<Variadic<TF_Int32Tensor>, [{The `N` int32 vectors representing shape of tensors being concatenated.}]>:$shape
   );
 
   let results = (outs
-    Variadic<TF_Int32Tensor>:$offset
+    Res<Variadic<TF_Int32Tensor>, [{The `N` int32 vectors representing the starting offset
+of input tensors within the concatenated output.}]>:$offset
   );
 
   TF_DerivedOperandSizeAttr N = TF_DerivedOperandSizeAttr<1>;
@@ -1840,12 +2062,16 @@ def TF_ConcatV2Op : TF_Op<"ConcatV2", [NoSideEffect]> {
   let summary = "Concatenates tensors along one dimension.";
 
   let arguments = (ins
-    Variadic<TF_Tensor>:$values,
-    TF_I32OrI64Tensor:$axis
+    Arg<Variadic<TF_Tensor>, [{List of `N` Tensors to concatenate. Their ranks and types must match,
+and their sizes must match in all dimensions except `concat_dim`.}]>:$values,
+    Arg<TF_I32OrI64Tensor, [{0-D.  The dimension along which to concatenate.  Must be in the
+range [-rank(values), rank(values)).}]>:$axis
   );
 
   let results = (outs
-    TF_Tensor:$output
+    Res<TF_Tensor, [{A `Tensor` with the concatenation of values stacked along the
+`concat_dim` dimension.  This tensor's shape matches that of `values` except
+in `concat_dim` where it has the sum of the sizes.}]>:$output
   );
 
   TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
@@ -1873,7 +2099,8 @@ Sets up the centralized structures for a distributed TPU system.
   );
 
   let results = (outs
-    TF_StrTensor:$topology
+    Res<TF_StrTensor, [{A serialized tensorflow.tpu.TopologyProto that describes the TPU
+topology.}]>:$topology
   );
 }
 
@@ -1971,8 +2198,10 @@ horizontal and vertices strides, `strides = [1, stride, stride, 1]`.
   }];
 
   let arguments = (ins
-    TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32, TF_Float64, TF_Int32]>:$input,
-    TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32, TF_Float64, TF_Int32]>:$filter,
+    Arg<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32, TF_Float64, TF_Int32]>, [{A 4-D tensor. The dimension order is interpreted according to the value
+of `data_format`, see below for details.}]>:$input,
+    Arg<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32, TF_Float64, TF_Int32]>, [{A 4-D tensor of shape
+`[filter_height, filter_width, in_channels, out_channels]`}]>:$filter,
 
     I64ArrayAttr:$strides,
     DefaultValuedAttr<BoolAttr, "true">:$use_cudnn_on_gpu,
@@ -1983,7 +2212,8 @@ horizontal and vertices strides, `strides = [1, stride, stride, 1]`.
   );
 
   let results = (outs
-    TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32, TF_Float64, TF_Int32]>:$output
+    Res<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32, TF_Float64, TF_Int32]>, [{A 4-D tensor. The dimension order is determined by the value of
+`data_format`, see below for details.}]>:$output
   );
 
   TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
@@ -2011,9 +2241,12 @@ Computes the gradients of convolution with respect to the filter.
   }];
 
   let arguments = (ins
-    TF_FloatTensor:$input,
-    TF_Int32Tensor:$filter_sizes,
-    TF_FloatTensor:$out_backprop,
+    Arg<TF_FloatTensor, [{4-D with shape `[batch, in_height, in_width, in_channels]`.}]>:$input,
+    Arg<TF_Int32Tensor, [{An integer vector representing the tensor shape of `filter`,
+where `filter` is a 4-D
+`[filter_height, filter_width, in_channels, out_channels]` tensor.}]>:$filter_sizes,
+    Arg<TF_FloatTensor, [{4-D with shape `[batch, out_height, out_width, out_channels]`.
+Gradients w.r.t. the output of the convolution.}]>:$out_backprop,
 
     I64ArrayAttr:$strides,
     DefaultValuedAttr<BoolAttr, "true">:$use_cudnn_on_gpu,
@@ -2024,7 +2257,9 @@ Computes the gradients of convolution with respect to the filter.
   );
 
   let results = (outs
-    TF_FloatTensor:$output
+    Res<TF_FloatTensor, [{4-D with shape
+`[filter_height, filter_width, in_channels, out_channels]`.  Gradient w.r.t.
+the `filter` input of the convolution.}]>:$output
   );
 
   TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
@@ -2044,9 +2279,12 @@ Computes the gradients of convolution with respect to the input.
   }];
 
   let arguments = (ins
-    TF_Int32Tensor:$input_sizes,
-    TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32, TF_Float64, TF_Int32]>:$filter,
-    TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32, TF_Float64, TF_Int32]>:$out_backprop,
+    Arg<TF_Int32Tensor, [{An integer vector representing the shape of `input`,
+where `input` is a 4-D `[batch, height, width, channels]` tensor.}]>:$input_sizes,
+    Arg<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32, TF_Float64, TF_Int32]>, [{4-D with shape
+`[filter_height, filter_width, in_channels, out_channels]`.}]>:$filter,
+    Arg<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32, TF_Float64, TF_Int32]>, [{4-D with shape `[batch, out_height, out_width, out_channels]`.
+Gradients w.r.t. the output of the convolution.}]>:$out_backprop,
 
     I64ArrayAttr:$strides,
     DefaultValuedAttr<BoolAttr, "true">:$use_cudnn_on_gpu,
@@ -2057,7 +2295,8 @@ Computes the gradients of convolution with respect to the input.
   );
 
   let results = (outs
-    TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32, TF_Float64, TF_Int32]>:$output
+    Res<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32, TF_Float64, TF_Int32]>, [{4-D with shape `[batch, in_height, in_width, in_channels]`.  Gradient
+w.r.t. the input of the convolution.}]>:$output
   );
 
   TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<1>;
@@ -2089,8 +2328,9 @@ Our Conv3D implements a form of cross-correlation.
   }];
 
   let arguments = (ins
-    TF_FloatTensor:$input,
-    TF_FloatTensor:$filter,
+    Arg<TF_FloatTensor, [{Shape `[batch, in_depth, in_height, in_width, in_channels]`.}]>:$input,
+    Arg<TF_FloatTensor, [{Shape `[filter_depth, filter_height, filter_width, in_channels,
+out_channels]`. `in_channels` must match between `input` and `filter`.}]>:$filter,
 
     Confined<I64ArrayAttr, [ArrayMinCount<5>]>:$strides,
     TF_AnyStrAttrOf<["SAME", "VALID"]>:$padding,
@@ -2123,9 +2363,13 @@ Computes the gradients of 3-D convolution with respect to the filter.
   }];
 
   let arguments = (ins
-    TF_FloatTensor:$input,
-    TF_Int32Tensor:$filter_sizes,
-    TF_FloatTensor:$out_backprop,
+    Arg<TF_FloatTensor, [{Shape `[batch, depth, rows, cols, in_channels]`.}]>:$input,
+    Arg<TF_Int32Tensor, [{An integer vector representing the tensor shape of `filter`,
+where `filter` is a 5-D
+`[filter_depth, filter_height, filter_width, in_channels, out_channels]`
+tensor.}]>:$filter_sizes,
+    Arg<TF_FloatTensor, [{Backprop signal of shape `[batch, out_depth, out_rows, out_cols,
+out_channels]`.}]>:$out_backprop,
 
     Confined<I64ArrayAttr, [ArrayMinCount<5>]>:$strides,
     TF_AnyStrAttrOf<["SAME", "VALID"]>:$padding,
@@ -2146,9 +2390,13 @@ Computes the gradients of 3-D convolution with respect to the input.
   }];
 
   let arguments = (ins
-    TF_I32OrI64Tensor:$input_sizes,
-    TF_FloatTensor:$filter,
-    TF_FloatTensor:$out_backprop,
+    Arg<TF_I32OrI64Tensor, [{An integer vector representing the tensor shape of `input`,
+where `input` is a 5-D
+`[batch, depth, rows, cols, in_channels]` tensor.}]>:$input_sizes,
+    Arg<TF_FloatTensor, [{Shape `[depth, rows, cols, in_channels, out_channels]`.
+`in_channels` must match between `input` and `filter`.}]>:$filter,
+    Arg<TF_FloatTensor, [{Backprop signal of shape `[batch, out_depth, out_rows, out_cols,
+out_channels]`.}]>:$out_backprop,
 
     Confined<I64ArrayAttr, [ArrayMinCount<5>]>:$strides,
     TF_AnyStrAttrOf<["SAME", "VALID"]>:$padding,
@@ -2225,12 +2473,12 @@ of corresponding 3-element vectors is cross-multiplied independently.
   }];
 
   let arguments = (ins
-    TF_IntOrFpTensor:$a,
-    TF_IntOrFpTensor:$b
+    Arg<TF_IntOrFpTensor, [{A tensor containing 3-element vectors.}]>:$a,
+    Arg<TF_IntOrFpTensor, [{Another tensor, of same type and shape as `a`.}]>:$b
   );
 
   let results = (outs
-    TF_IntOrFpTensor:$product
+    Res<TF_IntOrFpTensor, [{Pairwise cross product of the vectors in `a` and `b`.}]>:$product
   );
 
   TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
@@ -2249,12 +2497,14 @@ and `B, D, F, H` as group 1. Thus we get the outputs:
   }];
 
   let arguments = (ins
-    TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32, TF_Int32, TF_Uint32]>:$input,
-    TF_Int32Tensor:$group_assignment
+    Arg<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32, TF_Int32, TF_Uint32]>, [{The local input to the sum.}]>:$input,
+    Arg<TF_Int32Tensor, [{An int32 tensor with shape
+[num_groups, num_replicas_per_group]. `group_assignment[i]` represents the
+replica ids in the ith subgroup.}]>:$group_assignment
   );
 
   let results = (outs
-    TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32, TF_Int32, TF_Uint32]>:$output
+    Res<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32, TF_Int32, TF_Uint32]>, [{The sum of all the distributed inputs.}]>:$output
   );
 
   TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
@@ -2297,8 +2547,11 @@ tf.cumprod([a, b, c], exclusive=True, reverse=True)  # => [b * c, c, 1]
   }];
 
   let arguments = (ins
-    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$x,
-    TF_I32OrI64Tensor:$axis,
+    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{A `Tensor`. Must be one of the following types: `float32`, `float64`,
+`int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`,
+`complex128`, `qint8`, `quint8`, `qint32`, `half`.}]>:$x,
+    Arg<TF_I32OrI64Tensor, [{A `Tensor` of type `int32` (default: 0). Must be in the range
+`[-rank(x), rank(x))`.}]>:$axis,
 
     DefaultValuedAttr<BoolAttr, "false">:$exclusive,
     DefaultValuedAttr<BoolAttr, "false">:$reverse
@@ -2351,8 +2604,11 @@ tf.cumsum([a, b, c], exclusive=True, reverse=True)  # => [b + c, c, 0]
   }];
 
   let arguments = (ins
-    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$x,
-    TF_I32OrI64Tensor:$axis,
+    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{A `Tensor`. Must be one of the following types: `float32`, `float64`,
+`int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`,
+`complex128`, `qint8`, `quint8`, `qint32`, `half`.}]>:$x,
+    Arg<TF_I32OrI64Tensor, [{A `Tensor` of type `int32` (default: 0). Must be in the range
+`[-rank(x), rank(x))`.}]>:$axis,
 
     DefaultValuedAttr<BoolAttr, "false">:$exclusive,
     DefaultValuedAttr<BoolAttr, "false">:$reverse
@@ -2380,14 +2636,15 @@ the source data format.
   }];
 
   let arguments = (ins
-    TF_I32OrI64Tensor:$x,
+    Arg<TF_I32OrI64Tensor, [{A Tensor with each element as a dimension index in source data format.
+Must be in the range [-4, 4).}]>:$x,
 
     DefaultValuedAttr<StrAttr, "NHWC">:$src_format,
     DefaultValuedAttr<StrAttr, "NCHW">:$dst_format
   );
 
   let results = (outs
-    TF_I32OrI64Tensor:$y
+    Res<TF_I32OrI64Tensor, [{A Tensor with each element as a dimension index in destination data format.}]>:$y
   );
 
   TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
@@ -2420,14 +2677,14 @@ and
   }];
 
   let arguments = (ins
-    TF_I32OrI64Tensor:$x,
+    Arg<TF_I32OrI64Tensor, [{Vector of size 4 or Tensor of shape (4, 2) in source data format.}]>:$x,
 
     DefaultValuedAttr<StrAttr, "NHWC">:$src_format,
     DefaultValuedAttr<StrAttr, "NCHW">:$dst_format
   );
 
   let results = (outs
-    TF_I32OrI64Tensor:$y
+    Res<TF_I32OrI64Tensor, [{Vector of size 4 or Tensor of shape (4, 2) in destination data format.}]>:$y
   );
 
   TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
@@ -2451,7 +2708,7 @@ computes summary information about one or more tensors.
   }];
 
   let arguments = (ins
-    TF_Tensor:$input,
+    Arg<TF_Tensor, [{Input tensor, non-Reference type}]>:$input,
 
     StrAttr:$tfdbg_context_id,
     StrAttr:$op_name,
@@ -2495,8 +2752,8 @@ decoding partial jpeg image.
   }];
 
   let arguments = (ins
-    TF_StrTensor:$contents,
-    TF_Int32Tensor:$crop_window,
+    Arg<TF_StrTensor, [{0-D.  The JPEG-encoded image.}]>:$contents,
+    Arg<TF_Int32Tensor, [{1-D.  The crop window: [crop_y, crop_x, crop_height, crop_width].}]>:$crop_window,
 
     DefaultValuedAttr<I64Attr, "0">:$channels,
     DefaultValuedAttr<I64Attr, "1">:$ratio,
@@ -2507,7 +2764,7 @@ decoding partial jpeg image.
   );
 
   let results = (outs
-    TF_Uint8Tensor:$image
+    Res<TF_Uint8Tensor, [{3-D with shape `[height, width, channels]`..}]>:$image
   );
 }
 
@@ -2526,11 +2783,11 @@ This op also supports decoding JPEGs and PNGs, though it is cleaner to use
   }];
 
   let arguments = (ins
-    TF_StrTensor:$contents
+    Arg<TF_StrTensor, [{0-D.  The GIF-encoded image.}]>:$contents
   );
 
   let results = (outs
-    TF_Uint8Tensor:$image
+    Res<TF_Uint8Tensor, [{4-D with shape `[num_frames, height, width, 3]`. RGB channel order.}]>:$image
   );
 }
 
@@ -2560,7 +2817,7 @@ the same, though it is cleaner to use `tf.io.decode_image`.
   }];
 
   let arguments = (ins
-    TF_StrTensor:$contents,
+    Arg<TF_StrTensor, [{0-D.  The JPEG-encoded image.}]>:$contents,
 
     DefaultValuedAttr<I64Attr, "0">:$channels,
     DefaultValuedAttr<I64Attr, "1">:$ratio,
@@ -2571,7 +2828,7 @@ the same, though it is cleaner to use `tf.io.decode_image`.
   );
 
   let results = (outs
-    TF_Uint8Tensor:$image
+    Res<TF_Uint8Tensor, [{3-D with shape `[height, width, channels]`..}]>:$image
   );
 }
 
@@ -2597,13 +2854,13 @@ is the same, though it is cleaner to use `tf.io.decode_image`.
   }];
 
   let arguments = (ins
-    TF_StrTensor:$contents,
+    Arg<TF_StrTensor, [{0-D.  The PNG-encoded image.}]>:$contents,
 
     DefaultValuedAttr<I64Attr, "0">:$channels
   );
 
   let results = (outs
-    TensorOf<[TF_Uint16, TF_Uint8]>:$image
+    Res<TensorOf<[TF_Uint16, TF_Uint8]>, [{3-D with shape `[height, width, channels]`.}]>:$image
   );
 
   TF_DerivedResultTypeAttr dtype = TF_DerivedResultTypeAttr<0>;
@@ -2613,8 +2870,8 @@ def TF_DeleteIteratorOp : TF_Op<"DeleteIterator", []> {
   let summary = "A container for an iterator resource.";
 
   let arguments = (ins
-    Arg<TF_ResourceTensor, "", [TF_DatasetIteratorFree]>:$handle,
-    TF_VariantTensor:$deleter
+    Arg<TF_ResourceTensor, [{A handle to the iterator to delete.}], [TF_DatasetIteratorFree]>:$handle,
+    Arg<TF_VariantTensor, [{A variant deleter.}]>:$deleter
   );
 
   let results = (outs);
@@ -2635,9 +2892,9 @@ def TF_DeleteMultiDeviceIteratorOp : TF_Op<"DeleteMultiDeviceIterator", []> {
   let summary = "A container for an iterator resource.";
 
   let arguments = (ins
-    Arg<TF_ResourceTensor, "", [TF_DatasetIteratorFree]>:$multi_device_iterator,
-    Arg<Variadic<TF_ResourceTensor>, "", [TF_DatasetIteratorRead]>:$iterators,
-    TF_VariantTensor:$deleter
+    Arg<TF_ResourceTensor, [{A handle to the multi device iterator to delete.}], [TF_DatasetIteratorFree]>:$multi_device_iterator,
+    Arg<Variadic<TF_ResourceTensor>, [{A list of iterator handles (unused). This is added so that automatic control dependencies get added during function tracing that ensure this op runs after all the dependent iterators are deleted.}], [TF_DatasetIteratorRead]>:$iterators,
+    Arg<TF_VariantTensor, [{A variant deleter.}]>:$deleter
   );
 
   let results = (outs);
@@ -2826,9 +3083,16 @@ Computes the gradients of depthwise convolution with respect to the filter.
   }];
 
   let arguments = (ins
-    TF_FloatTensor:$input,
-    TF_Int32Tensor:$filter_sizes,
-    TF_FloatTensor:$out_backprop,
+    Arg<TF_FloatTensor, [{4-D with shape based on `data_format`.  For example, if
+`data_format` is 'NHWC' then `input` is a 4-D `[batch, in_height,
+in_width, in_channels]` tensor.}]>:$input,
+    Arg<TF_Int32Tensor, [{An integer vector representing the tensor shape of `filter`,
+where `filter` is a 4-D
+`[filter_height, filter_width, in_channels, depthwise_multiplier]` tensor.}]>:$filter_sizes,
+    Arg<TF_FloatTensor, [{4-D with shape  based on `data_format`.
+For example, if `data_format` is 'NHWC' then
+out_backprop shape is `[batch, out_height, out_width, out_channels]`.
+Gradients w.r.t. the output of the convolution.}]>:$out_backprop,
 
     I64ArrayAttr:$strides,
     TF_AnyStrAttrOf<["SAME", "VALID", "EXPLICIT"]>:$padding,
@@ -2838,7 +3102,9 @@ Computes the gradients of depthwise convolution with respect to the filter.
   );
 
   let results = (outs
-    TF_FloatTensor:$output
+    Res<TF_FloatTensor, [{4-D with shape
+`[filter_height, filter_width, in_channels, out_channels]`.  Gradient w.r.t.
+the `filter` input of the convolution.}]>:$output
   );
 
   TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
@@ -2850,9 +3116,15 @@ Computes the gradients of depthwise convolution with respect to the input.
   }];
 
   let arguments = (ins
-    TF_Int32Tensor:$input_sizes,
-    TF_FloatTensor:$filter,
-    TF_FloatTensor:$out_backprop,
+    Arg<TF_Int32Tensor, [{An integer vector representing the shape of `input`, based
+on `data_format`.  For example, if `data_format` is 'NHWC' then
+ `input` is a 4-D `[batch, height, width, channels]` tensor.}]>:$input_sizes,
+    Arg<TF_FloatTensor, [{4-D with shape
+`[filter_height, filter_width, in_channels, depthwise_multiplier]`.}]>:$filter,
+    Arg<TF_FloatTensor, [{4-D with shape  based on `data_format`.
+For example, if `data_format` is 'NHWC' then
+out_backprop shape is `[batch, out_height, out_width, out_channels]`.
+Gradients w.r.t. the output of the convolution.}]>:$out_backprop,
 
     I64ArrayAttr:$strides,
     TF_AnyStrAttrOf<["SAME", "VALID", "EXPLICIT"]>:$padding,
@@ -2862,7 +3134,10 @@ Computes the gradients of depthwise convolution with respect to the input.
   );
 
   let results = (outs
-    TF_FloatTensor:$output
+    Res<TF_FloatTensor, [{4-D with shape according to `data_format`.  For example, if
+`data_format` is 'NHWC', output shape is `[batch, in_height,
+in_width, in_channels]`.  Gradient w.r.t. the input of the
+convolution.}]>:$output
   );
 
   TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<1>;
@@ -2930,8 +3205,8 @@ and `QuantizeV2`, using the following algorithm:
 
   let arguments = (ins
     TensorOf<[TF_Qint16, TF_Qint32, TF_Qint8, TF_Quint16, TF_Quint8]>:$input,
-    TF_Float32Tensor:$min_range,
-    TF_Float32Tensor:$max_range,
+    Arg<TF_Float32Tensor, [{The minimum scalar value possibly produced for the input.}]>:$min_range,
+    Arg<TF_Float32Tensor, [{The maximum scalar value possibly produced for the input.}]>:$max_range,
 
     DefaultValuedAttr<TF_AnyStrAttrOf<["MIN_COMBINED", "MIN_FIRST", "SCALED"]>, "MIN_COMBINED">:$mode,
     DefaultValuedAttr<BoolAttr, "false">:$narrow_range,
@@ -2952,8 +3227,9 @@ Converts the given variant tensor to an iterator and stores it in the given reso
   }];
 
   let arguments = (ins
-    Arg<TF_ResourceTensor, "", [TF_DatasetIteratorWrite]>:$resource_handle,
-    TF_VariantTensor:$serialized
+    Arg<TF_ResourceTensor, [{A handle to an iterator resource.}], [TF_DatasetIteratorWrite]>:$resource_handle,
+    Arg<TF_VariantTensor, [{A variant tensor storing the state of the iterator contained in the
+resource.}]>:$serialized
   );
 
   let results = (outs);
@@ -3007,7 +3283,8 @@ then the final deserialized `SparseTensor` will be:
   }];
 
   let arguments = (ins
-    TensorOf<[TF_Str, TF_Variant]>:$serialized_sparse
+    Arg<TensorOf<[TF_Str, TF_Variant]>, [{The serialized `SparseTensor` objects. The last dimension
+must have 3 columns.}]>:$serialized_sparse
   );
 
   let results = (outs
@@ -3029,7 +3306,7 @@ error status.
   }];
 
   let arguments = (ins
-    TF_ResourceTensor:$resource,
+    Arg<TF_ResourceTensor, [{handle to the resource to delete.}]>:$resource,
 
     DefaultValuedAttr<BoolAttr, "true">:$ignore_lookup_error
   );
@@ -3080,7 +3357,7 @@ tf.diag(diagonal) ==> [[1, 0, 0, 0]
   }];
 
   let arguments = (ins
-    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int32, TF_Int64]>:$diagonal
+    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int32, TF_Int64]>, [{Rank k tensor where k is at most 1.}]>:$diagonal
   );
 
   let results = (outs
@@ -3115,11 +3392,11 @@ tf.diag_part(input) ==> [1, 2, 3, 4]
   }];
 
   let arguments = (ins
-    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int32, TF_Int64]>:$input
+    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int32, TF_Int64]>, [{Rank k tensor where k is even and not zero.}]>:$input
   );
 
   let results = (outs
-    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int32, TF_Int64]>:$diagonal
+    Res<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int32, TF_Int64]>, [{The extracted diagonal.}]>:$diagonal
   );
 
   TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
@@ -3361,13 +3638,13 @@ Comparison with `numpy.einsum`:
   }];
 
   let arguments = (ins
-    Variadic<TF_Tensor>:$inputs,
+    Arg<Variadic<TF_Tensor>, [{List of 1 or 2 Tensors.}]>:$inputs,
 
     StrAttr:$equation
   );
 
   let results = (outs
-    TF_Tensor:$output
+    Res<TF_Tensor, [{Output Tensor with shape depending upon `equation`.}]>:$output
   );
 
   TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
@@ -3405,12 +3682,13 @@ Computes gradients for the exponential linear (Elu) operation.
   }];
 
   let arguments = (ins
-    TF_FloatTensor:$gradients,
-    TF_FloatTensor:$outputs
+    Arg<TF_FloatTensor, [{The backpropagated gradients to the corresponding Elu operation.}]>:$gradients,
+    Arg<TF_FloatTensor, [{The outputs of the corresponding Elu operation.}]>:$outputs
   );
 
   let results = (outs
-    TF_FloatTensor:$backprops
+    Res<TF_FloatTensor, [{The gradients: `gradients * (outputs + 1)` if outputs < 0,
+`gradients` otherwise.}]>:$backprops
   );
 
   TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
@@ -3424,13 +3702,13 @@ This operation creates a tensor of `shape` and `dtype`.
   }];
 
   let arguments = (ins
-    TF_Int32Tensor:$shape,
+    Arg<TF_Int32Tensor, [{1-D. Represents the shape of the output tensor.}]>:$shape,
 
     DefaultValuedAttr<BoolAttr, "false">:$init
   );
 
   let results = (outs
-    TF_Tensor:$output
+    Res<TF_Tensor, [{A `Tensor` of type `T`.}]>:$output
   );
 
   TF_DerivedResultTypeAttr dtype = TF_DerivedResultTypeAttr<0>;
@@ -3444,8 +3722,12 @@ An op that enqueues a list of input batch tensors to TPUEmbedding.
   }];
 
   let arguments = (ins
-    Variadic<TF_Int32Tensor>:$batch,
-    TF_StrTensor:$mode_override,
+    Arg<Variadic<TF_Int32Tensor>, [{A list of 1D tensors, one for each embedding table, containing the
+indices into the tables.}]>:$batch,
+    Arg<TF_StrTensor, [{A string input that overrides the mode specified in the
+TPUEmbeddingConfiguration. Supported values are {'unspecified', 'inference',
+'training', 'backward_pass_only'}. When set to 'unspecified', the mode set
+in TPUEmbeddingConfiguration is used, otherwise mode_override is used.}]>:$mode_override,
 
     DefaultValuedAttr<I64Attr, "-1">:$device_ordinal
   );
@@ -3470,10 +3752,20 @@ the corresponding feature.
   }];
 
   let arguments = (ins
-    Variadic<TF_I32OrI64Tensor>:$sample_splits,
-    Variadic<TF_I32OrI64Tensor>:$embedding_indices,
-    Variadic<TF_F32OrF64Tensor>:$aggregation_weights,
-    TF_StrTensor:$mode_override,
+    Arg<Variadic<TF_I32OrI64Tensor>, [{A list of rank 1 Tensors specifying the break points for splitting
+embedding_indices and aggregation_weights into rows.
+It corresponds to ids.row_splits in embedding_lookup(), when ids is a
+RaggedTensor.}]>:$sample_splits,
+    Arg<Variadic<TF_I32OrI64Tensor>, [{A list of rank 1 Tensors, indices into the embedding tables.
+It corresponds to ids.values in embedding_lookup(), when ids is a RaggedTensor.}]>:$embedding_indices,
+    Arg<Variadic<TF_F32OrF64Tensor>, [{A list of rank 1 Tensors containing per training example
+aggregation weights. It corresponds to the values field of a RaggedTensor
+with the same row_splits as ids in embedding_lookup(), when ids is a
+RaggedTensor.}]>:$aggregation_weights,
+    Arg<TF_StrTensor, [{A string input that overrides the mode specified in the
+TPUEmbeddingConfiguration. Supported values are {'unspecified', 'inference',
+'training', 'backward_pass_only'}. When set to 'unspecified', the mode set
+in TPUEmbeddingConfiguration is used, otherwise mode_override is used.}]>:$mode_override,
 
     DefaultValuedAttr<I64Attr, "-1">:$device_ordinal,
     DefaultValuedAttr<StrArrayAttr, "{}">:$combiners,
@@ -3508,10 +3800,18 @@ number of lookups into the table described by the corresponding table_id.
   }];
 
   let arguments = (ins
-    Variadic<TF_I32OrI64Tensor>:$sample_indices,
-    Variadic<TF_I32OrI64Tensor>:$embedding_indices,
-    Variadic<TF_F32OrF64Tensor>:$aggregation_weights,
-    TF_StrTensor:$mode_override,
+    Arg<Variadic<TF_I32OrI64Tensor>, [{A list of rank 1 Tensors specifying the training example and
+feature to which the corresponding embedding_indices and aggregation_weights
+values belong. sample_indices[i] must equal b * nf + f, where nf is the
+number of features from the corresponding table, f is in [0, nf), and
+b is in [0, batch size).}]>:$sample_indices,
+    Arg<Variadic<TF_I32OrI64Tensor>, [{A list of rank 1 Tensors, indices into the embedding tables.}]>:$embedding_indices,
+    Arg<Variadic<TF_F32OrF64Tensor>, [{A list of rank 1 Tensors containing per sample -- i.e. per
+(training example, feature) -- aggregation weights.}]>:$aggregation_weights,
+    Arg<TF_StrTensor, [{A string input that overrides the mode specified in the
+TPUEmbeddingConfiguration. Supported values are {'unspecified', 'inference',
+'training', 'backward_pass_only'}. When set to 'unspecified', the mode set
+in TPUEmbeddingConfiguration is used, otherwise mode_override is used.}]>:$mode_override,
 
     DefaultValuedAttr<I64Attr, "-1">:$device_ordinal,
     DefaultValuedAttr<StrArrayAttr, "{}">:$combiners
@@ -3542,10 +3842,18 @@ the corresponding feature.
   }];
 
   let arguments = (ins
-    Variadic<TF_I32OrI64Tensor>:$sample_indices,
-    Variadic<TF_I32OrI64Tensor>:$embedding_indices,
-    Variadic<TF_F32OrF64Tensor>:$aggregation_weights,
-    TF_StrTensor:$mode_override,
+    Arg<Variadic<TF_I32OrI64Tensor>, [{A list of rank 1 Tensors specifying the training example to
+which the corresponding embedding_indices and aggregation_weights values
+belong. It corresponds to sp_ids.indices[:,0] in  embedding_lookup_sparse().}]>:$sample_indices,
+    Arg<Variadic<TF_I32OrI64Tensor>, [{A list of rank 1 Tensors, indices into the embedding tables.
+It corresponds to sp_ids.values in embedding_lookup_sparse().}]>:$embedding_indices,
+    Arg<Variadic<TF_F32OrF64Tensor>, [{A list of rank 1 Tensors containing per training example
+aggregation weights. It corresponds to sp_weights.values in
+embedding_lookup_sparse().}]>:$aggregation_weights,
+    Arg<TF_StrTensor, [{A string input that overrides the mode specified in the
+TPUEmbeddingConfiguration. Supported values are {'unspecified', 'inference',
+'training', 'backward_pass_only'}. When set to 'unspecified', the mode set
+in TPUEmbeddingConfiguration is used, otherwise mode_override is used.}]>:$mode_override,
 
     DefaultValuedAttr<I64Attr, "-1">:$device_ordinal,
     DefaultValuedAttr<StrArrayAttr, "{}">:$combiners,
@@ -3571,13 +3879,13 @@ Returns the input tensor otherwise.
   }];
 
   let arguments = (ins
-    TF_Tensor:$input,
+    Arg<TF_Tensor, [{A tensor, whose shape is to be validated.}]>:$input,
 
     TF_ShapeAttr:$shape
   );
 
   let results = (outs
-    TF_Tensor:$output
+    Res<TF_Tensor, [{A tensor with the same shape and contents as the input tensor or value.}]>:$output
   );
 
   TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
@@ -3751,11 +4059,14 @@ size 1.
 
   let arguments = (ins
     TF_Tensor:$input,
-    TF_I32OrI64Tensor:$dim
+    Arg<TF_I32OrI64Tensor, [{0-D (scalar). Specifies the dimension index at which to
+expand the shape of `input`. Must be in the range
+`[-rank(input) - 1, rank(input)]`.}]>:$dim
   );
 
   let results = (outs
-    TF_Tensor:$output
+    Res<TF_Tensor, [{Contains the same data as `input`, but its shape has an additional
+dimension of size 1 added.}]>:$output
   );
 
   TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
@@ -3802,7 +4113,7 @@ Extract `patches` from `images` and put them in the "depth" output dimension.
   }];
 
   let arguments = (ins
-    TensorOf<[TF_Bfloat16, TF_Bool, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$images,
+    Arg<TensorOf<[TF_Bfloat16, TF_Bool, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{4-D Tensor with shape `[batch, in_rows, in_cols, depth]`.}]>:$images,
 
     Confined<I64ArrayAttr, [ArrayMinCount<4>]>:$ksizes,
     Confined<I64ArrayAttr, [ArrayMinCount<4>]>:$strides,
@@ -3811,7 +4122,10 @@ Extract `patches` from `images` and put them in the "depth" output dimension.
   );
 
   let results = (outs
-    TensorOf<[TF_Bfloat16, TF_Bool, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$patches
+    Res<TensorOf<[TF_Bfloat16, TF_Bool, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{4-D Tensor with shape `[batch, out_rows, out_cols, ksize_rows *
+ksize_cols * depth]` containing image patches with size
+`ksize_rows x ksize_cols x depth` vectorized in the "depth" dimension. Note
+`out_rows` and `out_cols` are the dimensions of the output patches.}]>:$patches
   );
 
   TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
@@ -3826,11 +4140,16 @@ dimension of `input`.
   }];
 
   let arguments = (ins
-    TensorOf<[TF_Complex128, TF_Complex64]>:$input
+    Arg<TensorOf<[TF_Complex128, TF_Complex64]>, [{A complex tensor.}]>:$input
   );
 
   let results = (outs
-    TensorOf<[TF_Complex128, TF_Complex64]>:$output
+    Res<TensorOf<[TF_Complex128, TF_Complex64]>, [{A complex tensor of the same shape as `input`. The inner-most
+  dimension of `input` is replaced with its 1D Fourier transform.
+
+@compatibility(numpy)
+Equivalent to np.fft.fft
+@end_compatibility}]>:$output
   );
 
   TF_DerivedOperandTypeAttr Tcomplex = TF_DerivedOperandTypeAttr<0>;
@@ -3845,11 +4164,16 @@ Computes the 2-dimensional discrete Fourier transform over the inner-most
   }];
 
   let arguments = (ins
-    TensorOf<[TF_Complex128, TF_Complex64]>:$input
+    Arg<TensorOf<[TF_Complex128, TF_Complex64]>, [{A complex tensor.}]>:$input
   );
 
   let results = (outs
-    TensorOf<[TF_Complex128, TF_Complex64]>:$output
+    Res<TensorOf<[TF_Complex128, TF_Complex64]>, [{A complex tensor of the same shape as `input`. The inner-most 2
+  dimensions of `input` are replaced with their 2D Fourier transform.
+
+@compatibility(numpy)
+Equivalent to np.fft.fft2
+@end_compatibility}]>:$output
   );
 
   TF_DerivedOperandTypeAttr Tcomplex = TF_DerivedOperandTypeAttr<0>;
@@ -3864,11 +4188,16 @@ dimensions of `input`.
   }];
 
   let arguments = (ins
-    TensorOf<[TF_Complex128, TF_Complex64]>:$input
+    Arg<TensorOf<[TF_Complex128, TF_Complex64]>, [{A complex tensor.}]>:$input
   );
 
   let results = (outs
-    TensorOf<[TF_Complex128, TF_Complex64]>:$output
+    Res<TensorOf<[TF_Complex128, TF_Complex64]>, [{A complex tensor of the same shape as `input`. The inner-most 3
+  dimensions of `input` are replaced with their 3D Fourier transform.
+
+@compatibility(numpy)
+Equivalent to np.fft.fftn with 3 dimensions.
+@end_compatibility}]>:$output
   );
 
   TF_DerivedOperandTypeAttr Tcomplex = TF_DerivedOperandTypeAttr<0>;
@@ -3887,7 +4216,7 @@ def TF_FakeParamOp : TF_Op<"FakeParam", [NoSideEffect, TF_NoConstantFold]> {
   );
 
   let results = (outs
-    TF_Tensor:$output
+    Res<TF_Tensor, [{    \"Fake\" output value. This should not be consumed by another op.}]>:$output
   );
 
   TF_DerivedResultTypeAttr dtype = TF_DerivedResultTypeAttr<0>;
@@ -3943,8 +4272,8 @@ def TF_FakeQuantWithMinMaxArgsGradientOp : TF_Op<"FakeQuantWithMinMaxArgsGradien
   let summary = "Compute gradients for a FakeQuantWithMinMaxArgs operation.";
 
   let arguments = (ins
-    TF_Float32Tensor:$gradients,
-    TF_Float32Tensor:$inputs,
+    Arg<TF_Float32Tensor, [{Backpropagated gradients above the FakeQuantWithMinMaxArgs operation.}]>:$gradients,
+    Arg<TF_Float32Tensor, [{Values passed as inputs to the FakeQuantWithMinMaxArgs operation.}]>:$inputs,
 
     DefaultValuedAttr<F32Attr, "-6.0f">:$min,
     DefaultValuedAttr<F32Attr, "6.0f">:$max,
@@ -3953,7 +4282,8 @@ def TF_FakeQuantWithMinMaxArgsGradientOp : TF_Op<"FakeQuantWithMinMaxArgsGradien
   );
 
   let results = (outs
-    TF_Float32Tensor:$backprops
+    Res<TF_Float32Tensor, [{Backpropagated gradients below the FakeQuantWithMinMaxArgs operation:
+`gradients * (inputs >= min && inputs <= max)`.}]>:$backprops
   );
 }
 
@@ -4011,8 +4341,9 @@ def TF_FakeQuantWithMinMaxVarsGradientOp : TF_Op<"FakeQuantWithMinMaxVarsGradien
   let summary = "Compute gradients for a FakeQuantWithMinMaxVars operation.";
 
   let arguments = (ins
-    TF_Float32Tensor:$gradients,
-    TF_Float32Tensor:$inputs,
+    Arg<TF_Float32Tensor, [{Backpropagated gradients above the FakeQuantWithMinMaxVars operation.}]>:$gradients,
+    Arg<TF_Float32Tensor, [{Values passed as inputs to the FakeQuantWithMinMaxVars operation.
+min, max: Quantization interval, scalar floats.}]>:$inputs,
     TF_Float32Tensor:$min,
     TF_Float32Tensor:$max,
 
@@ -4021,9 +4352,12 @@ def TF_FakeQuantWithMinMaxVarsGradientOp : TF_Op<"FakeQuantWithMinMaxVarsGradien
   );
 
   let results = (outs
-    TF_Float32Tensor:$backprops_wrt_input,
-    TF_Float32Tensor:$backprop_wrt_min,
-    TF_Float32Tensor:$backprop_wrt_max
+    Res<TF_Float32Tensor, [{Backpropagated gradients w.r.t. inputs:
+`gradients * (inputs >= min && inputs <= max)`.}]>:$backprops_wrt_input,
+    Res<TF_Float32Tensor, [{Backpropagated gradients w.r.t. min parameter:
+`sum(gradients * (inputs < min))`.}]>:$backprop_wrt_min,
+    Res<TF_Float32Tensor, [{Backpropagated gradients w.r.t. max parameter:
+`sum(gradients * (inputs > max))`.}]>:$backprop_wrt_max
   );
 }
 
@@ -4104,8 +4438,12 @@ fill([2, 3], 9) ==> [[9, 9, 9]
   }];
 
   let arguments = (ins
-    TF_I32OrI64Tensor:$dims,
-    TF_Tensor:$value
+    Arg<TF_I32OrI64Tensor, [{1-D. Represents the shape of the output tensor.}]>:$dims,
+    Arg<TF_Tensor, [{0-D (scalar). Value to fill the returned tensor.
+
+@compatibility(numpy)
+Equivalent to np.full
+@end_compatibility}]>:$value
   );
 
   let results = (outs
@@ -4196,11 +4534,13 @@ The size of 1D Tensors matches the dimension C of the 4D Tensors.
   }];
 
   let arguments = (ins
-    TF_Float32Tensor:$x,
-    TF_Float32Tensor:$scale,
-    TF_Float32Tensor:$offset,
-    TF_Float32Tensor:$mean,
-    TF_Float32Tensor:$variance,
+    Arg<TF_Float32Tensor, [{A 4D Tensor for input data.}]>:$x,
+    Arg<TF_Float32Tensor, [{A 1D Tensor for scaling factor, to scale the normalized x.}]>:$scale,
+    Arg<TF_Float32Tensor, [{A 1D Tensor for offset, to shift to the normalized x.}]>:$offset,
+    Arg<TF_Float32Tensor, [{A 1D Tensor for population mean. Used for inference only;
+must be empty for training.}]>:$mean,
+    Arg<TF_Float32Tensor, [{A 1D Tensor for population variance. Used for inference only;
+must be empty for training.}]>:$variance,
 
     DefaultValuedAttr<F32Attr, "0.0001f">:$epsilon,
     DefaultValuedAttr<F32Attr, "1.0f">:$exponential_avg_factor,
@@ -4209,11 +4549,15 @@ The size of 1D Tensors matches the dimension C of the 4D Tensors.
   );
 
   let results = (outs
-    TF_Float32Tensor:$y,
-    TF_Float32Tensor:$batch_mean,
-    TF_Float32Tensor:$batch_variance,
-    TF_Float32Tensor:$reserve_space_1,
-    TF_Float32Tensor:$reserve_space_2
+    Res<TF_Float32Tensor, [{A 4D Tensor for output data.}]>:$y,
+    Res<TF_Float32Tensor, [{A 1D Tensor for the computed batch mean, to be used by TensorFlow
+to compute the running mean.}]>:$batch_mean,
+    Res<TF_Float32Tensor, [{A 1D Tensor for the computed batch variance, to be used by
+TensorFlow to compute the running variance.}]>:$batch_variance,
+    Res<TF_Float32Tensor, [{A 1D Tensor for the computed batch mean, to be reused
+in the gradient computation.}]>:$reserve_space_1,
+    Res<TF_Float32Tensor, [{A 1D Tensor for the computed batch variance (inverted variance
+in the cuDNN case), to be reused in the gradient computation.}]>:$reserve_space_2
   );
 
   TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
@@ -4234,11 +4578,18 @@ The size of 1D Tensors matches the dimension C of the 4D Tensors.
   }];
 
   let arguments = (ins
-    TF_Float32Tensor:$y_backprop,
-    TF_Float32Tensor:$x,
-    TF_Float32Tensor:$scale,
-    TF_Float32Tensor:$reserve_space_1,
-    TF_Float32Tensor:$reserve_space_2,
+    Arg<TF_Float32Tensor, [{A 4D Tensor for the gradient with respect to y.}]>:$y_backprop,
+    Arg<TF_Float32Tensor, [{A 4D Tensor for input data.}]>:$x,
+    Arg<TF_Float32Tensor, [{A 1D Tensor for scaling factor, to scale the normalized x.}]>:$scale,
+    Arg<TF_Float32Tensor, [{When is_training is True, a 1D Tensor for the computed batch
+mean to be reused in gradient computation. When is_training is
+False, a 1D Tensor for the population mean to be reused in both
+1st and 2nd order gradient computation.}]>:$reserve_space_1,
+    Arg<TF_Float32Tensor, [{When is_training is True, a 1D Tensor for the computed batch
+variance (inverted variance in the cuDNN case) to be reused in
+gradient computation. When is_training is False, a 1D Tensor
+for the population variance to be reused in both 1st and 2nd
+order gradient computation.}]>:$reserve_space_2,
 
     DefaultValuedAttr<F32Attr, "0.0001f">:$epsilon,
     DefaultValuedAttr<TF_ConvnetDataFormatAttr, "NHWC">:$data_format,
@@ -4246,11 +4597,12 @@ The size of 1D Tensors matches the dimension C of the 4D Tensors.
   );
 
   let results = (outs
-    TF_Float32Tensor:$x_backprop,
-    TF_Float32Tensor:$scale_backprop,
-    TF_Float32Tensor:$offset_backprop,
-    TF_Float32Tensor:$reserve_space_3,
-    TF_Float32Tensor:$reserve_space_4
+    Res<TF_Float32Tensor, [{A 4D Tensor for the gradient with respect to x.}]>:$x_backprop,
+    Res<TF_Float32Tensor, [{A 1D Tensor for the gradient with respect to scale.}]>:$scale_backprop,
+    Res<TF_Float32Tensor, [{A 1D Tensor for the gradient with respect to offset.}]>:$offset_backprop,
+    Res<TF_Float32Tensor, [{Unused placeholder to match the mean input in FusedBatchNorm.}]>:$reserve_space_3,
+    Res<TF_Float32Tensor, [{Unused placeholder to match the variance input
+in FusedBatchNorm.}]>:$reserve_space_4
   );
 
   TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
@@ -4265,11 +4617,18 @@ The size of 1D Tensors matches the dimension C of the 4D Tensors.
   }];
 
   let arguments = (ins
-    TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32]>:$y_backprop,
-    TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32]>:$x,
-    TF_Float32Tensor:$scale,
-    TF_Float32Tensor:$reserve_space_1,
-    TF_Float32Tensor:$reserve_space_2,
+    Arg<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32]>, [{A 4D Tensor for the gradient with respect to y.}]>:$y_backprop,
+    Arg<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32]>, [{A 4D Tensor for input data.}]>:$x,
+    Arg<TF_Float32Tensor, [{A 1D Tensor for scaling factor, to scale the normalized x.}]>:$scale,
+    Arg<TF_Float32Tensor, [{When is_training is True, a 1D Tensor for the computed batch
+mean to be reused in gradient computation. When is_training is
+False, a 1D Tensor for the population mean to be reused in both
+1st and 2nd order gradient computation.}]>:$reserve_space_1,
+    Arg<TF_Float32Tensor, [{When is_training is True, a 1D Tensor for the computed batch
+variance (inverted variance in the cuDNN case) to be reused in
+gradient computation. When is_training is False, a 1D Tensor
+for the population variance to be reused in both 1st and 2nd
+order gradient computation.}]>:$reserve_space_2,
 
     DefaultValuedAttr<F32Attr, "0.0001f">:$epsilon,
     DefaultValuedAttr<TF_ConvnetDataFormatAttr, "NHWC">:$data_format,
@@ -4277,11 +4636,12 @@ The size of 1D Tensors matches the dimension C of the 4D Tensors.
   );
 
   let results = (outs
-    TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32]>:$x_backprop,
-    TF_Float32Tensor:$scale_backprop,
-    TF_Float32Tensor:$offset_backprop,
-    TF_Float32Tensor:$reserve_space_3,
-    TF_Float32Tensor:$reserve_space_4
+    Res<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32]>, [{A 4D Tensor for the gradient with respect to x.}]>:$x_backprop,
+    Res<TF_Float32Tensor, [{A 1D Tensor for the gradient with respect to scale.}]>:$scale_backprop,
+    Res<TF_Float32Tensor, [{A 1D Tensor for the gradient with respect to offset.}]>:$offset_backprop,
+    Res<TF_Float32Tensor, [{Unused placeholder to match the mean input in FusedBatchNorm.}]>:$reserve_space_3,
+    Res<TF_Float32Tensor, [{Unused placeholder to match the variance input
+in FusedBatchNorm.}]>:$reserve_space_4
   );
 
   TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
@@ -4297,12 +4657,21 @@ The size of 1D Tensors matches the dimension C of the 4D Tensors.
   }];
 
   let arguments = (ins
-    TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32]>:$y_backprop,
-    TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32]>:$x,
-    TF_Float32Tensor:$scale,
-    TF_Float32Tensor:$reserve_space_1,
-    TF_Float32Tensor:$reserve_space_2,
-    TF_Float32Tensor:$reserve_space_3,
+    Arg<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32]>, [{A 4D Tensor for the gradient with respect to y.}]>:$y_backprop,
+    Arg<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32]>, [{A 4D Tensor for input data.}]>:$x,
+    Arg<TF_Float32Tensor, [{A 1D Tensor for scaling factor, to scale the normalized x.}]>:$scale,
+    Arg<TF_Float32Tensor, [{When is_training is True, a 1D Tensor for the computed batch
+mean to be reused in gradient computation. When is_training is
+False, a 1D Tensor for the population mean to be reused in both
+1st and 2nd order gradient computation.}]>:$reserve_space_1,
+    Arg<TF_Float32Tensor, [{When is_training is True, a 1D Tensor for the computed batch
+variance (inverted variance in the cuDNN case) to be reused in
+gradient computation. When is_training is False, a 1D Tensor
+for the population variance to be reused in both 1st and 2nd
+order gradient computation.}]>:$reserve_space_2,
+    Arg<TF_Float32Tensor, [{When is_training is True, a 1D Tensor for some intermediate results to be reused
+in gradient computation. When is_training is False, a dummy empty Tensor will be
+created.}]>:$reserve_space_3,
 
     DefaultValuedAttr<F32Attr, "0.0001f">:$epsilon,
     DefaultValuedAttr<TF_AnyStrAttrOf<["NHWC", "NCHW", "NDHWC", "NCDHW"]>, "NHWC">:$data_format,
@@ -4310,11 +4679,12 @@ The size of 1D Tensors matches the dimension C of the 4D Tensors.
   );
 
   let results = (outs
-    TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32]>:$x_backprop,
-    TF_Float32Tensor:$scale_backprop,
-    TF_Float32Tensor:$offset_backprop,
-    TF_Float32Tensor:$reserve_space_4,
-    TF_Float32Tensor:$reserve_space_5
+    Res<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32]>, [{A 4D Tensor for the gradient with respect to x.}]>:$x_backprop,
+    Res<TF_Float32Tensor, [{A 1D Tensor for the gradient with respect to scale.}]>:$scale_backprop,
+    Res<TF_Float32Tensor, [{A 1D Tensor for the gradient with respect to offset.}]>:$offset_backprop,
+    Res<TF_Float32Tensor, [{Unused placeholder to match the mean input in FusedBatchNorm.}]>:$reserve_space_4,
+    Res<TF_Float32Tensor, [{Unused placeholder to match the variance input
+in FusedBatchNorm.}]>:$reserve_space_5
   );
 
   TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
@@ -4338,11 +4708,13 @@ The size of 1D Tensors matches the dimension C of the 4D Tensors.
   }];
 
   let arguments = (ins
-    TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32]>:$x,
-    TF_Float32Tensor:$scale,
-    TF_Float32Tensor:$offset,
-    TF_Float32Tensor:$mean,
-    TF_Float32Tensor:$variance,
+    Arg<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32]>, [{A 4D Tensor for input data.}]>:$x,
+    Arg<TF_Float32Tensor, [{A 1D Tensor for scaling factor, to scale the normalized x.}]>:$scale,
+    Arg<TF_Float32Tensor, [{A 1D Tensor for offset, to shift to the normalized x.}]>:$offset,
+    Arg<TF_Float32Tensor, [{A 1D Tensor for population mean. Used for inference only;
+must be empty for training.}]>:$mean,
+    Arg<TF_Float32Tensor, [{A 1D Tensor for population variance. Used for inference only;
+must be empty for training.}]>:$variance,
 
     DefaultValuedAttr<F32Attr, "0.0001f">:$epsilon,
     DefaultValuedAttr<F32Attr, "1.0f">:$exponential_avg_factor,
@@ -4351,11 +4723,15 @@ The size of 1D Tensors matches the dimension C of the 4D Tensors.
   );
 
   let results = (outs
-    TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32]>:$y,
-    TF_Float32Tensor:$batch_mean,
-    TF_Float32Tensor:$batch_variance,
-    TF_Float32Tensor:$reserve_space_1,
-    TF_Float32Tensor:$reserve_space_2
+    Res<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32]>, [{A 4D Tensor for output data.}]>:$y,
+    Res<TF_Float32Tensor, [{A 1D Tensor for the computed batch mean, to be used by TensorFlow
+to compute the running mean.}]>:$batch_mean,
+    Res<TF_Float32Tensor, [{A 1D Tensor for the computed batch variance, to be used by
+TensorFlow to compute the running variance.}]>:$batch_variance,
+    Res<TF_Float32Tensor, [{A 1D Tensor for the computed batch mean, to be reused
+in the gradient computation.}]>:$reserve_space_1,
+    Res<TF_Float32Tensor, [{A 1D Tensor for the computed batch variance (inverted variance
+in the cuDNN case), to be reused in the gradient computation.}]>:$reserve_space_2
   );
 
   TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
@@ -4382,11 +4758,13 @@ The size of 1D Tensors matches the dimension C of the 4D Tensors.
   }];
 
   let arguments = (ins
-    TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32]>:$x,
-    TF_Float32Tensor:$scale,
-    TF_Float32Tensor:$offset,
-    TF_Float32Tensor:$mean,
-    TF_Float32Tensor:$variance,
+    Arg<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32]>, [{A 4D Tensor for input data.}]>:$x,
+    Arg<TF_Float32Tensor, [{A 1D Tensor for scaling factor, to scale the normalized x.}]>:$scale,
+    Arg<TF_Float32Tensor, [{A 1D Tensor for offset, to shift to the normalized x.}]>:$offset,
+    Arg<TF_Float32Tensor, [{A 1D Tensor for population mean. Used for inference only;
+must be empty for training.}]>:$mean,
+    Arg<TF_Float32Tensor, [{A 1D Tensor for population variance. Used for inference only;
+must be empty for training.}]>:$variance,
 
     DefaultValuedAttr<F32Attr, "0.0001f">:$epsilon,
     DefaultValuedAttr<F32Attr, "1.0f">:$exponential_avg_factor,
@@ -4395,12 +4773,17 @@ The size of 1D Tensors matches the dimension C of the 4D Tensors.
   );
 
   let results = (outs
-    TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32]>:$y,
-    TF_Float32Tensor:$batch_mean,
-    TF_Float32Tensor:$batch_variance,
-    TF_Float32Tensor:$reserve_space_1,
-    TF_Float32Tensor:$reserve_space_2,
-    TF_Float32Tensor:$reserve_space_3
+    Res<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32]>, [{A 4D Tensor for output data.}]>:$y,
+    Res<TF_Float32Tensor, [{A 1D Tensor for the computed batch mean, to be used by TensorFlow
+to compute the running mean.}]>:$batch_mean,
+    Res<TF_Float32Tensor, [{A 1D Tensor for the computed batch variance, to be used by
+TensorFlow to compute the running variance.}]>:$batch_variance,
+    Res<TF_Float32Tensor, [{A 1D Tensor for the computed batch mean, to be reused
+in the gradient computation.}]>:$reserve_space_1,
+    Res<TF_Float32Tensor, [{A 1D Tensor for the computed batch variance (inverted variance
+in the cuDNN case), to be reused in the gradient computation.}]>:$reserve_space_2,
+    Res<TF_Float32Tensor, [{A 1D Tensor for some intermediate results, to be reused in the gradient
+computation for better efficiency.}]>:$reserve_space_3
   );
 
   TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
@@ -4577,12 +4960,13 @@ See also `tf.gather` and `tf.batch_gather`.
   }];
 
   let arguments = (ins
-    TF_Tensor:$params,
-    TF_I32OrI64Tensor:$indices
+    Arg<TF_Tensor, [{The tensor from which to gather values.}]>:$params,
+    Arg<TF_I32OrI64Tensor, [{Index tensor.}]>:$indices
   );
 
   let results = (outs
-    TF_Tensor:$output
+    Res<TF_Tensor, [{Values from `params` gathered from indices given by `indices`, with
+shape `indices.shape[:-1] + params.shape[indices.shape[-1]:]`.}]>:$output
   );
 
   TF_DerivedOperandTypeAttr Tindices = TF_DerivedOperandTypeAttr<1>;
@@ -4625,15 +5009,18 @@ See also `tf.batch_gather` and `tf.gather_nd`.
   }];
 
   let arguments = (ins
-    TF_Tensor:$params,
-    TF_I32OrI64Tensor:$indices,
-    TF_I32OrI64Tensor:$axis,
+    Arg<TF_Tensor, [{The tensor from which to gather values. Must be at least rank
+`axis + 1`.}]>:$params,
+    Arg<TF_I32OrI64Tensor, [{Index tensor. Must be in range `[0, params.shape[axis])`.}]>:$indices,
+    Arg<TF_I32OrI64Tensor, [{The axis in `params` to gather `indices` from. Defaults to the first
+dimension. Supports negative indexes.}]>:$axis,
 
     DefaultValuedAttr<I64Attr, "0">:$batch_dims
   );
 
   let results = (outs
-    TF_Tensor:$output
+    Res<TF_Tensor, [{Values from `params` gathered from indices given by `indices`, with
+shape `params.shape[:axis] + indices.shape + params.shape[axis + 1:]`.}]>:$output
   );
 
   TF_DerivedOperandTypeAttr Tindices = TF_DerivedOperandTypeAttr<1>;
@@ -4723,11 +5110,11 @@ See `rgb_to_hsv` for a description of the HSV encoding.
   }];
 
   let arguments = (ins
-    TF_FloatTensor:$images
+    Arg<TF_FloatTensor, [{1-D or higher rank. HSV data to convert. Last dimension must be size 3.}]>:$images
   );
 
   let results = (outs
-    TF_FloatTensor:$output
+    Res<TF_FloatTensor, [{`images` converted to RGB.}]>:$output
   );
 
   TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
@@ -4751,7 +5138,7 @@ table will be immutable.
   );
 
   let results = (outs
-    Res<TF_ResourceTensor, "", [TF_LookupTableAlloc]>:$table_handle
+    Res<TF_ResourceTensor, [{Handle to a table.}], [TF_LookupTableAlloc]>:$table_handle
   );
 }
 
@@ -4764,11 +5151,16 @@ inner-most dimension of `input`.
   }];
 
   let arguments = (ins
-    TensorOf<[TF_Complex128, TF_Complex64]>:$input
+    Arg<TensorOf<[TF_Complex128, TF_Complex64]>, [{A complex tensor.}]>:$input
   );
 
   let results = (outs
-    TensorOf<[TF_Complex128, TF_Complex64]>:$output
+    Res<TensorOf<[TF_Complex128, TF_Complex64]>, [{A complex tensor of the same shape as `input`. The inner-most
+  dimension of `input` is replaced with its inverse 1D Fourier transform.
+
+@compatibility(numpy)
+Equivalent to np.fft.ifft
+@end_compatibility}]>:$output
   );
 
   TF_DerivedOperandTypeAttr Tcomplex = TF_DerivedOperandTypeAttr<0>;
@@ -4783,11 +5175,16 @@ inner-most 2 dimensions of `input`.
   }];
 
   let arguments = (ins
-    TensorOf<[TF_Complex128, TF_Complex64]>:$input
+    Arg<TensorOf<[TF_Complex128, TF_Complex64]>, [{A complex tensor.}]>:$input
   );
 
   let results = (outs
-    TensorOf<[TF_Complex128, TF_Complex64]>:$output
+    Res<TensorOf<[TF_Complex128, TF_Complex64]>, [{A complex tensor of the same shape as `input`. The inner-most 2
+  dimensions of `input` are replaced with their inverse 2D Fourier transform.
+
+@compatibility(numpy)
+Equivalent to np.fft.ifft2
+@end_compatibility}]>:$output
   );
 
   TF_DerivedOperandTypeAttr Tcomplex = TF_DerivedOperandTypeAttr<0>;
@@ -4802,11 +5199,16 @@ inner-most 3 dimensions of `input`.
   }];
 
   let arguments = (ins
-    TensorOf<[TF_Complex128, TF_Complex64]>:$input
+    Arg<TensorOf<[TF_Complex128, TF_Complex64]>, [{A complex tensor.}]>:$input
   );
 
   let results = (outs
-    TensorOf<[TF_Complex128, TF_Complex64]>:$output
+    Res<TensorOf<[TF_Complex128, TF_Complex64]>, [{A complex tensor of the same shape as `input`. The inner-most 3
+  dimensions of `input` are replaced with their inverse 3D Fourier transform.
+
+@compatibility(numpy)
+Equivalent to np.fft.ifftn with 3 dimensions.
+@end_compatibility}]>:$output
   );
 
   TF_DerivedOperandTypeAttr Tcomplex = TF_DerivedOperandTypeAttr<0>;
@@ -4832,12 +5234,18 @@ larger, the dimension is padded with zeros.
   }];
 
   let arguments = (ins
-    TensorOf<[TF_Complex128, TF_Complex64]>:$input,
-    TF_Int32Tensor:$fft_length
+    Arg<TensorOf<[TF_Complex128, TF_Complex64]>, [{A complex tensor.}]>:$input,
+    Arg<TF_Int32Tensor, [{An int32 tensor of shape [1]. The FFT length.}]>:$fft_length
   );
 
   let results = (outs
-    TF_F32OrF64Tensor:$output
+    Res<TF_F32OrF64Tensor, [{A float32 tensor of the same rank as `input`. The inner-most
+  dimension of `input` is replaced with the `fft_length` samples of its inverse
+  1D Fourier transform.
+
+@compatibility(numpy)
+Equivalent to np.fft.irfft
+@end_compatibility}]>:$output
   );
 
   TF_DerivedResultTypeAttr Treal = TF_DerivedResultTypeAttr<0>;
@@ -4865,12 +5273,18 @@ the dimension is padded with zeros.
   }];
 
   let arguments = (ins
-    TensorOf<[TF_Complex128, TF_Complex64]>:$input,
-    TF_Int32Tensor:$fft_length
+    Arg<TensorOf<[TF_Complex128, TF_Complex64]>, [{A complex tensor.}]>:$input,
+    Arg<TF_Int32Tensor, [{An int32 tensor of shape [2]. The FFT length for each dimension.}]>:$fft_length
   );
 
   let results = (outs
-    TF_F32OrF64Tensor:$output
+    Res<TF_F32OrF64Tensor, [{A float32 tensor of the same rank as `input`. The inner-most 2
+  dimensions of `input` are replaced with the `fft_length` samples of their
+  inverse 2D Fourier transform.
+
+@compatibility(numpy)
+Equivalent to np.fft.irfft2
+@end_compatibility}]>:$output
   );
 
   TF_DerivedResultTypeAttr Treal = TF_DerivedResultTypeAttr<0>;
@@ -4898,12 +5312,18 @@ the dimension is padded with zeros.
   }];
 
   let arguments = (ins
-    TensorOf<[TF_Complex128, TF_Complex64]>:$input,
-    TF_Int32Tensor:$fft_length
+    Arg<TensorOf<[TF_Complex128, TF_Complex64]>, [{A complex tensor.}]>:$input,
+    Arg<TF_Int32Tensor, [{An int32 tensor of shape [3]. The FFT length for each dimension.}]>:$fft_length
   );
 
   let results = (outs
-    TF_F32OrF64Tensor:$output
+    Res<TF_F32OrF64Tensor, [{A float32 tensor of the same rank as `input`. The inner-most 3
+  dimensions of `input` are replaced with the `fft_length` samples of their
+  inverse 3D real Fourier transform.
+
+@compatibility(numpy)
+Equivalent to np.irfftn with 3 dimensions.
+@end_compatibility}]>:$output
   );
 
   TF_DerivedResultTypeAttr Treal = TF_DerivedResultTypeAttr<0>;
@@ -5077,13 +5497,13 @@ $$out_i = predictions_{i, targets_i} \in TopKIncludingTies(predictions_i)$$
   }];
 
   let arguments = (ins
-    TF_Float32Tensor:$predictions,
-    TF_I32OrI64Tensor:$targets,
-    TF_I32OrI64Tensor:$k
+    Arg<TF_Float32Tensor, [{A `batch_size` x `classes` tensor.}]>:$predictions,
+    Arg<TF_I32OrI64Tensor, [{A `batch_size` vector of class ids.}]>:$targets,
+    Arg<TF_I32OrI64Tensor, [{Number of top elements to look at for computing precision.}]>:$k
   );
 
   let results = (outs
-    TF_BoolTensor:$precision
+    Res<TF_BoolTensor, [{Computed precision at `k` as a `bool Tensor`.}]>:$precision
   );
 
   TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<1>;
@@ -5099,7 +5519,7 @@ A placeholder op for a value that will be fed into the computation.
   );
 
   let results = (outs
-    TF_Tensor:$output
+    Res<TF_Tensor, [{A tensor that will be provided using the infeed mechanism.}]>:$output
   );
 
   TF_DerivedResultTypeAttr dtype = TF_DerivedResultTypeAttr<0>;
@@ -5111,9 +5531,9 @@ Table initializer that takes two tensors for keys and values respectively.
   }];
 
   let arguments = (ins
-    Arg<TF_ResourceTensor, "", [TF_LookupTableWrite]>:$table_handle,
-    TF_Tensor:$keys,
-    TF_Tensor:$values
+    Arg<TF_ResourceTensor, [{Handle to a table which will be initialized.}], [TF_LookupTableWrite]>:$table_handle,
+    Arg<TF_Tensor, [{Keys of type Tkey.}]>:$keys,
+    Arg<TF_Tensor, [{Values of type Tval.}]>:$values
   );
 
   let results = (outs);
@@ -5130,13 +5550,13 @@ Computes y = x; y[i, :] += v; return y.
   }];
 
   let arguments = (ins
-    TF_Tensor:$x,
-    TF_Int32Tensor:$i,
-    TF_Tensor:$v
+    Arg<TF_Tensor, [{A `Tensor` of type T.}]>:$x,
+    Arg<TF_Int32Tensor, [{A vector. Indices into the left-most dimension of `x`.}]>:$i,
+    Arg<TF_Tensor, [{A `Tensor` of type T. Same dimension sizes as x except the first dimension, which must be the same as i's size.}]>:$v
   );
 
   let results = (outs
-    TF_Tensor:$y
+    Res<TF_Tensor, [{A `Tensor` of type T. An alias of `x`. The content of `y` is undefined if there are duplicates in `i`.}]>:$y
   );
 
   TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
@@ -5153,13 +5573,13 @@ operation create / operate on a copy of `x`.
   }];
 
   let arguments = (ins
-    TF_Tensor:$x,
-    TF_Int32Tensor:$i,
-    TF_Tensor:$v
+    Arg<TF_Tensor, [{A tensor of type `T`.}]>:$x,
+    Arg<TF_Int32Tensor, [{A vector. Indices into the left-most dimension of `x`.}]>:$i,
+    Arg<TF_Tensor, [{A `Tensor` of type T. Same dimension sizes as x except the first dimension, which must be the same as i's size.}]>:$v
   );
 
   let results = (outs
-    TF_Tensor:$y
+    Res<TF_Tensor, [{A `Tensor` of type T. An alias of `x`. The content of `y` is undefined if there are duplicates in `i`.}]>:$y
   );
 
   TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
@@ -5263,11 +5683,11 @@ invert_permutation(x) ==> [2, 4, 3, 0, 1]
   }];
 
   let arguments = (ins
-    TF_I32OrI64Tensor:$x
+    Arg<TF_I32OrI64Tensor, [{1-D.}]>:$x
   );
 
   let results = (outs
-    TF_I32OrI64Tensor:$y
+    Res<TF_I32OrI64Tensor, [{1-D.}]>:$y
   );
 
   TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
@@ -5369,7 +5789,8 @@ def TF_IteratorOp : TF_Op<"Iterator", []> {
   );
 
   let results = (outs
-    Res<TF_ResourceTensor, "", [TF_DatasetIteratorAlloc]>:$handle
+    Res<TF_ResourceTensor, [{A handle to the iterator that can be passed to a "MakeIterator"
+or "IteratorGetNext" op.}], [TF_DatasetIteratorAlloc]>:$handle
   );
 }
 
@@ -5379,14 +5800,14 @@ Converts the given string representing a handle to an iterator to a resource.
   }];
 
   let arguments = (ins
-    TF_StrTensor:$string_handle,
+    Arg<TF_StrTensor, [{A string representation of the given handle.}]>:$string_handle,
 
     DefaultValuedAttr<TypeArrayAttr, "{}">:$output_types,
     DefaultValuedAttr<TF_ShapeAttrArray, "{}">:$output_shapes
   );
 
   let results = (outs
-    Res<TF_ResourceTensor, "", [TF_DatasetIteratorAlloc]>:$resource_handle
+    Res<TF_ResourceTensor, [{A handle to an iterator resource.}], [TF_DatasetIteratorAlloc]>:$resource_handle
   );
 }
 
@@ -5465,11 +5886,11 @@ Converts the given `resource_handle` representing an iterator to a string.
   }];
 
   let arguments = (ins
-    Arg<TF_ResourceTensor, "", [TF_DatasetIteratorRead]>:$resource_handle
+    Arg<TF_ResourceTensor, [{A handle to an iterator resource.}], [TF_DatasetIteratorRead]>:$resource_handle
   );
 
   let results = (outs
-    TF_StrTensor:$string_handle
+    Res<TF_StrTensor, [{A string representation of the given handle.}]>:$string_handle
   );
 }
 
@@ -5529,11 +5950,11 @@ Computes half the L2 norm of a tensor without the `sqrt`:
   }];
 
   let arguments = (ins
-    TF_FloatTensor:$t
+    Arg<TF_FloatTensor, [{Typically 2-D, but may have any dimensions.}]>:$t
   );
 
   let results = (outs
-    TF_FloatTensor:$output
+    Res<TF_FloatTensor, [{0-D.}]>:$output
   );
 
   TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
@@ -5557,7 +5978,7 @@ convolutional neural networks (NIPS 2012)](http://papers.nips.cc/paper/4824-imag
   }];
 
   let arguments = (ins
-    TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32]>:$input,
+    Arg<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32]>, [{4-D.}]>:$input,
 
     DefaultValuedAttr<I64Attr, "5">:$depth_radius,
     DefaultValuedAttr<F32Attr, "1.0f">:$bias,
@@ -5576,9 +5997,9 @@ def TF_LRNGradOp : TF_Op<"LRNGrad", [NoSideEffect]> {
   let summary = "Gradients for Local Response Normalization.";
 
   let arguments = (ins
-    TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32]>:$input_grads,
-    TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32]>:$input_image,
-    TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32]>:$output_image,
+    Arg<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32]>, [{4-D with shape `[batch, height, width, channels]`.}]>:$input_grads,
+    Arg<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32]>, [{4-D with shape `[batch, height, width, channels]`.}]>:$input_image,
+    Arg<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32]>, [{4-D with shape `[batch, height, width, channels]`.}]>:$output_image,
 
     DefaultValuedAttr<I64Attr, "5">:$depth_radius,
     DefaultValuedAttr<F32Attr, "1.0f">:$bias,
@@ -5587,7 +6008,7 @@ def TF_LRNGradOp : TF_Op<"LRNGrad", [NoSideEffect]> {
   );
 
   let results = (outs
-    TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32]>:$output
+    Res<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32]>, [{The gradients for LRN.}]>:$output
   );
 
   TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
@@ -5622,14 +6043,15 @@ Computes rectified linear gradients for a LeakyRelu operation.
   }];
 
   let arguments = (ins
-    TF_FloatTensor:$gradients,
-    TF_FloatTensor:$features,
+    Arg<TF_FloatTensor, [{The backpropagated gradients to the corresponding LeakyRelu operation.}]>:$gradients,
+    Arg<TF_FloatTensor, [{The features passed as input to the corresponding LeakyRelu operation,
+OR the outputs of that operation (both work equivalently).}]>:$features,
 
     DefaultValuedAttr<F32Attr, "0.2f">:$alpha
   );
 
   let results = (outs
-    TF_FloatTensor:$backprops
+    Res<TF_FloatTensor, [{`gradients * (features > 0) + alpha * gradients * (features <= 0)`.}]>:$backprops
   );
 
   TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
@@ -5794,13 +6216,13 @@ tf.linspace(10.0, 12.0, 3, name="linspace") => [ 10.0  11.0  12.0]
   }];
 
   let arguments = (ins
-    TF_FloatTensor:$start,
-    TF_FloatTensor:$stop,
-    TF_I32OrI64Tensor:$num
+    Arg<TF_FloatTensor, [{0-D tensor. First entry in the range.}]>:$start,
+    Arg<TF_FloatTensor, [{0-D tensor. Last entry in the range.}]>:$stop,
+    Arg<TF_I32OrI64Tensor, [{0-D tensor. Number of values to generate.}]>:$num
   );
 
   let results = (outs
-    TF_FloatTensor:$output
+    Res<TF_FloatTensor, [{1-D. The generated values.}]>:$output
   );
 
   TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
@@ -5837,13 +6259,13 @@ idx ==> [1, 3, 5]
   }];
 
   let arguments = (ins
-    TF_Tensor:$x,
-    TF_Tensor:$y
+    Arg<TF_Tensor, [{1-D. Values to keep.}]>:$x,
+    Arg<TF_Tensor, [{1-D. Values to remove.}]>:$y
   );
 
   let results = (outs
-    TF_Tensor:$out,
-    TF_I32OrI64Tensor:$idx
+    Res<TF_Tensor, [{1-D. Values present in `x` but not in `y`.}]>:$out,
+    Res<TF_I32OrI64Tensor, [{1-D. Positions of `x` values preserved in `out`.}]>:$idx
   );
 
   TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
@@ -5862,9 +6284,9 @@ executed.
   }];
 
   let arguments = (ins
-    TF_Float32Tensor:$parameters,
-    TF_Float32Tensor:$momenta,
-    TF_Float32Tensor:$velocities,
+    Arg<TF_Float32Tensor, [{Value of parameters used in the ADAM optimization algorithm.}]>:$parameters,
+    Arg<TF_Float32Tensor, [{Value of momenta used in the ADAM optimization algorithm.}]>:$momenta,
+    Arg<TF_Float32Tensor, [{Value of velocities used in the ADAM optimization algorithm.}]>:$velocities,
 
     DefaultValuedAttr<I64Attr, "-1">:$table_id,
     StrAttr:$table_name,
@@ -5888,10 +6310,10 @@ executed.
   }];
 
   let arguments = (ins
-    TF_Float32Tensor:$parameters,
-    TF_Float32Tensor:$momenta,
-    TF_Float32Tensor:$velocities,
-    TF_Float32Tensor:$gradient_accumulators,
+    Arg<TF_Float32Tensor, [{Value of parameters used in the ADAM optimization algorithm.}]>:$parameters,
+    Arg<TF_Float32Tensor, [{Value of momenta used in the ADAM optimization algorithm.}]>:$momenta,
+    Arg<TF_Float32Tensor, [{Value of velocities used in the ADAM optimization algorithm.}]>:$velocities,
+    Arg<TF_Float32Tensor, [{Value of gradient_accumulators used in the ADAM optimization algorithm.}]>:$gradient_accumulators,
 
     DefaultValuedAttr<I64Attr, "-1">:$table_id,
     StrAttr:$table_name,
@@ -5915,9 +6337,9 @@ executed.
   }];
 
   let arguments = (ins
-    TF_Float32Tensor:$parameters,
-    TF_Float32Tensor:$accumulators,
-    TF_Float32Tensor:$updates,
+    Arg<TF_Float32Tensor, [{Value of parameters used in the Adadelta optimization algorithm.}]>:$parameters,
+    Arg<TF_Float32Tensor, [{Value of accumulators used in the Adadelta optimization algorithm.}]>:$accumulators,
+    Arg<TF_Float32Tensor, [{Value of updates used in the Adadelta optimization algorithm.}]>:$updates,
 
     DefaultValuedAttr<I64Attr, "-1">:$table_id,
     StrAttr:$table_name,
@@ -5941,10 +6363,10 @@ executed.
   }];
 
   let arguments = (ins
-    TF_Float32Tensor:$parameters,
-    TF_Float32Tensor:$accumulators,
-    TF_Float32Tensor:$updates,
-    TF_Float32Tensor:$gradient_accumulators,
+    Arg<TF_Float32Tensor, [{Value of parameters used in the Adadelta optimization algorithm.}]>:$parameters,
+    Arg<TF_Float32Tensor, [{Value of accumulators used in the Adadelta optimization algorithm.}]>:$accumulators,
+    Arg<TF_Float32Tensor, [{Value of updates used in the Adadelta optimization algorithm.}]>:$updates,
+    Arg<TF_Float32Tensor, [{Value of gradient_accumulators used in the Adadelta optimization algorithm.}]>:$gradient_accumulators,
 
     DefaultValuedAttr<I64Attr, "-1">:$table_id,
     StrAttr:$table_name,
@@ -5968,8 +6390,8 @@ executed.
   }];
 
   let arguments = (ins
-    TF_Float32Tensor:$parameters,
-    TF_Float32Tensor:$accumulators,
+    Arg<TF_Float32Tensor, [{Value of parameters used in the Adagrad optimization algorithm.}]>:$parameters,
+    Arg<TF_Float32Tensor, [{Value of accumulators used in the Adagrad optimization algorithm.}]>:$accumulators,
 
     DefaultValuedAttr<I64Attr, "-1">:$table_id,
     StrAttr:$table_name,
@@ -5993,9 +6415,9 @@ executed.
   }];
 
   let arguments = (ins
-    TF_Float32Tensor:$parameters,
-    TF_Float32Tensor:$accumulators,
-    TF_Float32Tensor:$gradient_accumulators,
+    Arg<TF_Float32Tensor, [{Value of parameters used in the Adagrad optimization algorithm.}]>:$parameters,
+    Arg<TF_Float32Tensor, [{Value of accumulators used in the Adagrad optimization algorithm.}]>:$accumulators,
+    Arg<TF_Float32Tensor, [{Value of gradient_accumulators used in the Adagrad optimization algorithm.}]>:$gradient_accumulators,
 
     DefaultValuedAttr<I64Attr, "-1">:$table_id,
     StrAttr:$table_name,
@@ -6019,10 +6441,10 @@ executed.
   }];
 
   let arguments = (ins
-    TF_Float32Tensor:$parameters,
-    TF_Float32Tensor:$ms,
-    TF_Float32Tensor:$mom,
-    TF_Float32Tensor:$mg,
+    Arg<TF_Float32Tensor, [{Value of parameters used in the centered RMSProp optimization algorithm.}]>:$parameters,
+    Arg<TF_Float32Tensor, [{Value of ms used in the centered RMSProp optimization algorithm.}]>:$ms,
+    Arg<TF_Float32Tensor, [{Value of mom used in the centered RMSProp optimization algorithm.}]>:$mom,
+    Arg<TF_Float32Tensor, [{Value of mg used in the centered RMSProp optimization algorithm.}]>:$mg,
 
     DefaultValuedAttr<I64Attr, "-1">:$table_id,
     StrAttr:$table_name,
@@ -6046,9 +6468,9 @@ executed.
   }];
 
   let arguments = (ins
-    TF_Float32Tensor:$parameters,
-    TF_Float32Tensor:$accumulators,
-    TF_Float32Tensor:$linears,
+    Arg<TF_Float32Tensor, [{Value of parameters used in the FTRL optimization algorithm.}]>:$parameters,
+    Arg<TF_Float32Tensor, [{Value of accumulators used in the FTRL optimization algorithm.}]>:$accumulators,
+    Arg<TF_Float32Tensor, [{Value of linears used in the FTRL optimization algorithm.}]>:$linears,
 
     DefaultValuedAttr<I64Attr, "-1">:$table_id,
     StrAttr:$table_name,
@@ -6072,10 +6494,10 @@ executed.
   }];
 
   let arguments = (ins
-    TF_Float32Tensor:$parameters,
-    TF_Float32Tensor:$accumulators,
-    TF_Float32Tensor:$linears,
-    TF_Float32Tensor:$gradient_accumulators,
+    Arg<TF_Float32Tensor, [{Value of parameters used in the FTRL optimization algorithm.}]>:$parameters,
+    Arg<TF_Float32Tensor, [{Value of accumulators used in the FTRL optimization algorithm.}]>:$accumulators,
+    Arg<TF_Float32Tensor, [{Value of linears used in the FTRL optimization algorithm.}]>:$linears,
+    Arg<TF_Float32Tensor, [{Value of gradient_accumulators used in the FTRL optimization algorithm.}]>:$gradient_accumulators,
 
     DefaultValuedAttr<I64Attr, "-1">:$table_id,
     StrAttr:$table_name,
@@ -6099,10 +6521,10 @@ executed.
   }];
 
   let arguments = (ins
-    TF_Float32Tensor:$parameters,
-    TF_Float32Tensor:$accumulators,
-    TF_Float32Tensor:$weights,
-    TF_Float32Tensor:$benefits,
+    Arg<TF_Float32Tensor, [{Value of parameters used in the MDL Adagrad Light optimization algorithm.}]>:$parameters,
+    Arg<TF_Float32Tensor, [{Value of accumulators used in the MDL Adagrad Light optimization algorithm.}]>:$accumulators,
+    Arg<TF_Float32Tensor, [{Value of weights used in the MDL Adagrad Light optimization algorithm.}]>:$weights,
+    Arg<TF_Float32Tensor, [{Value of benefits used in the MDL Adagrad Light optimization algorithm.}]>:$benefits,
 
     DefaultValuedAttr<I64Attr, "-1">:$table_id,
     StrAttr:$table_name,
@@ -6126,8 +6548,8 @@ executed.
   }];
 
   let arguments = (ins
-    TF_Float32Tensor:$parameters,
-    TF_Float32Tensor:$momenta,
+    Arg<TF_Float32Tensor, [{Value of parameters used in the Momentum optimization algorithm.}]>:$parameters,
+    Arg<TF_Float32Tensor, [{Value of momenta used in the Momentum optimization algorithm.}]>:$momenta,
 
     DefaultValuedAttr<I64Attr, "-1">:$table_id,
     StrAttr:$table_name,
@@ -6151,9 +6573,9 @@ executed.
   }];
 
   let arguments = (ins
-    TF_Float32Tensor:$parameters,
-    TF_Float32Tensor:$momenta,
-    TF_Float32Tensor:$gradient_accumulators,
+    Arg<TF_Float32Tensor, [{Value of parameters used in the Momentum optimization algorithm.}]>:$parameters,
+    Arg<TF_Float32Tensor, [{Value of momenta used in the Momentum optimization algorithm.}]>:$momenta,
+    Arg<TF_Float32Tensor, [{Value of gradient_accumulators used in the Momentum optimization algorithm.}]>:$gradient_accumulators,
 
     DefaultValuedAttr<I64Attr, "-1">:$table_id,
     StrAttr:$table_name,
@@ -6177,8 +6599,8 @@ executed.
   }];
 
   let arguments = (ins
-    TF_Float32Tensor:$parameters,
-    TF_Float32Tensor:$accumulators,
+    Arg<TF_Float32Tensor, [{Value of parameters used in the proximal Adagrad optimization algorithm.}]>:$parameters,
+    Arg<TF_Float32Tensor, [{Value of accumulators used in the proximal Adagrad optimization algorithm.}]>:$accumulators,
 
     DefaultValuedAttr<I64Attr, "-1">:$table_id,
     StrAttr:$table_name,
@@ -6204,9 +6626,9 @@ executed.
   }];
 
   let arguments = (ins
-    TF_Float32Tensor:$parameters,
-    TF_Float32Tensor:$accumulators,
-    TF_Float32Tensor:$gradient_accumulators,
+    Arg<TF_Float32Tensor, [{Value of parameters used in the proximal Adagrad optimization algorithm.}]>:$parameters,
+    Arg<TF_Float32Tensor, [{Value of accumulators used in the proximal Adagrad optimization algorithm.}]>:$accumulators,
+    Arg<TF_Float32Tensor, [{Value of gradient_accumulators used in the proximal Adagrad optimization algorithm.}]>:$gradient_accumulators,
 
     DefaultValuedAttr<I64Attr, "-1">:$table_id,
     StrAttr:$table_name,
@@ -6267,9 +6689,9 @@ executed.
   }];
 
   let arguments = (ins
-    TF_Float32Tensor:$parameters,
-    TF_Float32Tensor:$ms,
-    TF_Float32Tensor:$mom,
+    Arg<TF_Float32Tensor, [{Value of parameters used in the RMSProp optimization algorithm.}]>:$parameters,
+    Arg<TF_Float32Tensor, [{Value of ms used in the RMSProp optimization algorithm.}]>:$ms,
+    Arg<TF_Float32Tensor, [{Value of mom used in the RMSProp optimization algorithm.}]>:$mom,
 
     DefaultValuedAttr<I64Attr, "-1">:$table_id,
     StrAttr:$table_name,
@@ -6293,10 +6715,10 @@ executed.
   }];
 
   let arguments = (ins
-    TF_Float32Tensor:$parameters,
-    TF_Float32Tensor:$ms,
-    TF_Float32Tensor:$mom,
-    TF_Float32Tensor:$gradient_accumulators,
+    Arg<TF_Float32Tensor, [{Value of parameters used in the RMSProp optimization algorithm.}]>:$parameters,
+    Arg<TF_Float32Tensor, [{Value of ms used in the RMSProp optimization algorithm.}]>:$ms,
+    Arg<TF_Float32Tensor, [{Value of mom used in the RMSProp optimization algorithm.}]>:$mom,
+    Arg<TF_Float32Tensor, [{Value of gradient_accumulators used in the RMSProp optimization algorithm.}]>:$gradient_accumulators,
 
     DefaultValuedAttr<I64Attr, "-1">:$table_id,
     StrAttr:$table_name,
@@ -6320,7 +6742,7 @@ executed.
   }];
 
   let arguments = (ins
-    TF_Float32Tensor:$parameters,
+    Arg<TF_Float32Tensor, [{Value of parameters used in the stochastic gradient descent optimization algorithm.}]>:$parameters,
 
     DefaultValuedAttr<I64Attr, "-1">:$table_id,
     StrAttr:$table_name,
@@ -6344,8 +6766,8 @@ executed.
   }];
 
   let arguments = (ins
-    TF_Float32Tensor:$parameters,
-    TF_Float32Tensor:$gradient_accumulators,
+    Arg<TF_Float32Tensor, [{Value of parameters used in the stochastic gradient descent optimization algorithm.}]>:$parameters,
+    Arg<TF_Float32Tensor, [{Value of gradient_accumulators used in the Adadelta optimization algorithm.}]>:$gradient_accumulators,
 
     DefaultValuedAttr<I64Attr, "-1">:$table_id,
     StrAttr:$table_name,
@@ -6419,11 +6841,11 @@ For each batch `i` and class `j` we have
   }];
 
   let arguments = (ins
-    TF_FloatTensor:$logits
+    Arg<TF_FloatTensor, [{2-D with shape `[batch_size, num_classes]`.}]>:$logits
   );
 
   let results = (outs
-    TF_FloatTensor:$logsoftmax
+    Res<TF_FloatTensor, [{Same shape as `logits`.}]>:$logsoftmax
   );
 
   TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
@@ -6452,11 +6874,11 @@ def TF_LogicalNotOp : TF_Op<"LogicalNot", [Involution, NoSideEffect, SameOperand
   let summary = "Returns the truth value of `NOT x` element-wise.";
 
   let arguments = (ins
-    TF_BoolTensor:$x
+    Arg<TF_BoolTensor, [{A `Tensor` of type `bool`.}]>:$x
   );
 
   let results = (outs
-    TF_BoolTensor:$y
+    Res<TF_BoolTensor, [{A `Tensor` of type `bool` with the same shape as `x`. The logical negation of `x`.}]>:$y
   );
 
   let hasCanonicalizer = 1;
@@ -6485,12 +6907,12 @@ def TF_LookupTableExportV2Op : TF_Op<"LookupTableExportV2", []> {
   let summary = "Outputs all keys and values in the table.";
 
   let arguments = (ins
-    Arg<TF_ResourceTensor, "", [TF_LookupTableRead]>:$table_handle
+    Arg<TF_ResourceTensor, [{Handle to the table.}], [TF_LookupTableRead]>:$table_handle
   );
 
   let results = (outs
-    TF_Tensor:$keys,
-    TF_Tensor:$values
+    Res<TF_Tensor, [{Vector of all keys present in the table.}]>:$keys,
+    Res<TF_Tensor, [{Tensor of all values in the table. Indexed in parallel with `keys`.}]>:$values
   );
 
   TF_DerivedResultTypeAttr Tkeys = TF_DerivedResultTypeAttr<0>;
@@ -6509,13 +6931,14 @@ table. It must also be of the same type as the table values.
   }];
 
   let arguments = (ins
-    Arg<TF_ResourceTensor, "", [TF_LookupTableRead]>:$table_handle,
-    TF_Tensor:$keys,
+    Arg<TF_ResourceTensor, [{Handle to the table.}], [TF_LookupTableRead]>:$table_handle,
+    Arg<TF_Tensor, [{Any shape.  Keys to look up.}]>:$keys,
     TF_Tensor:$default_value
   );
 
   let results = (outs
-    TF_Tensor:$values
+    Res<TF_Tensor, [{Same shape as `keys`.  Values found in the table, or `default_values`
+for missing keys.}]>:$values
   );
 
   TF_DerivedOperandTypeAttr Tin = TF_DerivedOperandTypeAttr<1>;
@@ -6533,9 +6956,9 @@ The tensor `values` must be of the type of the table values.
   }];
 
   let arguments = (ins
-    Arg<TF_ResourceTensor, "", [TF_LookupTableWrite]>:$table_handle,
-    TF_Tensor:$keys,
-    TF_Tensor:$values
+    Arg<TF_ResourceTensor, [{Handle to the table.}], [TF_LookupTableWrite]>:$table_handle,
+    Arg<TF_Tensor, [{Any shape.  Keys to look up.}]>:$keys,
+    Arg<TF_Tensor, [{Values to associate with keys.}]>:$values
   );
 
   let results = (outs);
@@ -6553,9 +6976,9 @@ The tensor `values` must be of the type of the table values.
   }];
 
   let arguments = (ins
-    Arg<TF_ResourceTensor, "", [TF_LookupTableWrite]>:$table_handle,
-    TF_Tensor:$keys,
-    TF_Tensor:$values
+    Arg<TF_ResourceTensor, [{Handle to the table.}], [TF_LookupTableWrite]>:$table_handle,
+    Arg<TF_Tensor, [{Any shape.  Keys to look up.}]>:$keys,
+    Arg<TF_Tensor, [{Values to associate with keys.}]>:$values
   );
 
   let results = (outs);
@@ -6573,8 +6996,8 @@ already in the table are silently ignored.
   }];
 
   let arguments = (ins
-    Arg<TF_ResourceTensor, "", [TF_LookupTableWrite]>:$table_handle,
-    TF_Tensor:$keys
+    Arg<TF_ResourceTensor, [{Handle to the table.}], [TF_LookupTableWrite]>:$table_handle,
+    Arg<TF_Tensor, [{Any shape.  Keys of the elements to remove.}]>:$keys
   );
 
   let results = (outs);
@@ -6586,11 +7009,11 @@ def TF_LookupTableSizeV2Op : TF_Op<"LookupTableSizeV2", []> {
   let summary = "Computes the number of elements in the given table.";
 
   let arguments = (ins
-    TF_ResourceTensor:$table_handle
+    Arg<TF_ResourceTensor, [{Handle to the table.}]>:$table_handle
   );
 
   let results = (outs
-    TF_Int64Tensor:$size
+    Res<TF_Int64Tensor, [{Scalar that contains number of elements in the table.}]>:$size
   );
 }
 
@@ -6620,12 +7043,15 @@ A 2-D example:
   }];
 
   let arguments = (ins
-    TF_Tensor:$sorted_inputs,
-    TF_Tensor:$values
+    Arg<TF_Tensor, [{2-D Tensor where each row is ordered.}]>:$sorted_inputs,
+    Arg<TF_Tensor, [{2-D Tensor with the same numbers of rows as `sorted_search_values`. Contains
+the values that will be searched for in `sorted_search_values`.}]>:$values
   );
 
   let results = (outs
-    TF_I32OrI64Tensor:$output
+    Res<TF_I32OrI64Tensor, [{A `Tensor` with the same shape as `values`.  It contains the first scalar index
+into the last dimension where values can be inserted without changing the
+ordered property.}]>:$output
   );
 
   TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
@@ -6747,13 +7173,15 @@ Useful special cases:
   }];
 
   let arguments = (ins
-    TF_Tensor:$input,
-    TF_I32OrI64Tensor:$num_lower,
-    TF_I32OrI64Tensor:$num_upper
+    Arg<TF_Tensor, [{Rank `k` tensor.}]>:$input,
+    Arg<TF_I32OrI64Tensor, [{0-D tensor. Number of subdiagonals to keep. If negative, keep entire
+lower triangle.}]>:$num_lower,
+    Arg<TF_I32OrI64Tensor, [{0-D tensor. Number of superdiagonals to keep. If negative, keep
+entire upper triangle.}]>:$num_upper
   );
 
   let results = (outs
-    TF_Tensor:$band
+    Res<TF_Tensor, [{Rank `k` tensor of the same shape as input. The extracted banded tensor.}]>:$band
   );
 
   TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
@@ -6799,11 +7227,11 @@ which has shape (2, 4, 4)
   }];
 
   let arguments = (ins
-    TF_Tensor:$diagonal
+    Arg<TF_Tensor, [{Rank `k`, where `k >= 1`.}]>:$diagonal
   );
 
   let results = (outs
-    TF_Tensor:$output
+    Res<TF_Tensor, [{Rank `k+1`, with `output.shape = diagonal.shape + [diagonal.shape[-1]]`.}]>:$output
   );
 
   TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
@@ -6915,15 +7343,19 @@ tf.matrix_diag_part(input, k = (1, 3), padding_value = 9)
   }];
 
   let arguments = (ins
-    TF_Tensor:$input,
-    TF_Int32Tensor:$k,
-    TF_Tensor:$padding_value,
+    Arg<TF_Tensor, [{Rank `r` tensor where `r >= 2`.}]>:$input,
+    Arg<TF_Int32Tensor, [{Diagonal offset(s). Positive value means superdiagonal, 0 refers to the main
+diagonal, and negative value means subdiagonals. `k` can be a single integer
+(for a single diagonal) or a pair of integers specifying the low and high ends
+of a matrix band. `k[0]` must not be larger than `k[1]`.}]>:$k,
+    Arg<TF_Tensor, [{The value to fill the area outside the specified diagonal band with.
+Default is 0.}]>:$padding_value,
 
     DefaultValuedAttr<TF_AnyStrAttrOf<["LEFT_RIGHT", "RIGHT_LEFT", "LEFT_LEFT", "RIGHT_RIGHT"]>, "RIGHT_LEFT">:$align
   );
 
   let results = (outs
-    TF_Tensor:$diagonal
+    Res<TF_Tensor, [{The extracted diagonal(s).}]>:$diagonal
   );
 
   TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
@@ -7025,15 +7457,23 @@ tf.matrix_diag(diagonal, k = -1, num_rows = 3, padding_value = 9)
   }];
 
   let arguments = (ins
-    TF_Tensor:$diagonal,
-    TF_Int32Tensor:$k,
-    TF_Int32Tensor:$num_rows,
-    TF_Int32Tensor:$num_cols,
-    TF_Tensor:$padding_value
+    Arg<TF_Tensor, [{Rank `r`, where `r >= 1`}]>:$diagonal,
+    Arg<TF_Int32Tensor, [{Diagonal offset(s). Positive value means superdiagonal, 0 refers to the main
+diagonal, and negative value means subdiagonals. `k` can be a single integer
+(for a single diagonal) or a pair of integers specifying the low and high ends
+of a matrix band. `k[0]` must not be larger than `k[1]`.}]>:$k,
+    Arg<TF_Int32Tensor, [{The number of rows of the output matrix. If it is not provided, the op assumes
+the output matrix is a square matrix and infers the matrix size from k and the
+innermost dimension of `diagonal`.}]>:$num_rows,
+    Arg<TF_Int32Tensor, [{The number of columns of the output matrix. If it is not provided, the op
+assumes the output matrix is a square matrix and infers the matrix size from
+k and the innermost dimension of `diagonal`.}]>:$num_cols,
+    Arg<TF_Tensor, [{The number to fill the area outside the specified diagonal band with.
+Default is 0.}]>:$padding_value
   );
 
   let results = (outs
-    TF_Tensor:$output
+    Res<TF_Tensor, [{Has rank `r+1` when `k` is an integer or `k[0] == k[1]`, rank `r` otherwise.}]>:$output
   );
 
   TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
@@ -7164,17 +7604,25 @@ tf.matrix_diag(diagonal, k = -1, num_rows = 3, padding_value = 9)
   }];
 
   let arguments = (ins
-    TF_Tensor:$diagonal,
-    TF_Int32Tensor:$k,
-    TF_Int32Tensor:$num_rows,
-    TF_Int32Tensor:$num_cols,
-    TF_Tensor:$padding_value,
+    Arg<TF_Tensor, [{Rank `r`, where `r >= 1`}]>:$diagonal,
+    Arg<TF_Int32Tensor, [{Diagonal offset(s). Positive value means superdiagonal, 0 refers to the main
+diagonal, and negative value means subdiagonals. `k` can be a single integer
+(for a single diagonal) or a pair of integers specifying the low and high ends
+of a matrix band. `k[0]` must not be larger than `k[1]`.}]>:$k,
+    Arg<TF_Int32Tensor, [{The number of rows of the output matrix. If it is not provided, the op assumes
+the output matrix is a square matrix and infers the matrix size from k and the
+innermost dimension of `diagonal`.}]>:$num_rows,
+    Arg<TF_Int32Tensor, [{The number of columns of the output matrix. If it is not provided, the op
+assumes the output matrix is a square matrix and infers the matrix size from
+k and the innermost dimension of `diagonal`.}]>:$num_cols,
+    Arg<TF_Tensor, [{The number to fill the area outside the specified diagonal band with.
+Default is 0.}]>:$padding_value,
 
     DefaultValuedAttr<TF_AnyStrAttrOf<["LEFT_RIGHT", "RIGHT_LEFT", "LEFT_LEFT", "RIGHT_RIGHT"]>, "RIGHT_LEFT">:$align
   );
 
   let results = (outs
-    TF_Tensor:$output
+    Res<TF_Tensor, [{Has rank `r+1` when `k` is an integer or `k[0] == k[1]`, rank `r` otherwise.}]>:$output
   );
 
   TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
@@ -7198,13 +7646,17 @@ garbage result.
   }];
 
   let arguments = (ins
-    TensorOf<[TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64]>:$input,
+    Arg<TensorOf<[TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64]>, [{Shape is `[..., M, M]`.}]>:$input,
 
     DefaultValuedAttr<BoolAttr, "false">:$adjoint
   );
 
   let results = (outs
-    TensorOf<[TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64]>:$output
+    Res<TensorOf<[TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64]>, [{Shape is `[..., M, M]`.
+
+@compatibility(numpy)
+Equivalent to np.linalg.inv
+@end_compatibility}]>:$output
   );
 
   TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
@@ -7231,12 +7683,12 @@ tensor of rank `k+1` with dimensions `[I, J, K, ..., M, N]` where:
   }];
 
   let arguments = (ins
-    TF_Tensor:$input,
-    TF_Tensor:$diagonal
+    Arg<TF_Tensor, [{Rank `k+1`, where `k >= 1`.}]>:$input,
+    Arg<TF_Tensor, [{Rank `k`, where `k >= 1`.}]>:$diagonal
   );
 
   let results = (outs
-    TF_Tensor:$output
+    Res<TF_Tensor, [{Rank `k+1`, with `output.shape = input.shape`.}]>:$output
   );
 
   TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
@@ -7324,13 +7776,17 @@ tf.matrix_set_diag(diagonals, k = (-1, 0))
   }];
 
   let arguments = (ins
-    TF_Tensor:$input,
-    TF_Tensor:$diagonal,
-    TF_Int32Tensor:$k
+    Arg<TF_Tensor, [{Rank `r+1`, where `r >= 1`.}]>:$input,
+    Arg<TF_Tensor, [{Rank `r` when `k` is an integer or `k[0] == k[1]`. Otherwise, it has rank `r+1`.
+`k >= 1`.}]>:$diagonal,
+    Arg<TF_Int32Tensor, [{Diagonal offset(s). Positive value means superdiagonal, 0 refers to the main
+diagonal, and negative value means subdiagonals. `k` can be a single integer
+(for a single diagonal) or a pair of integers specifying the low and high ends
+of a matrix band. `k[0]` must not be larger than `k[1]`.}]>:$k
   );
 
   let results = (outs
-    TF_Tensor:$output
+    Res<TF_Tensor, [{Rank `r+1`, with `output.shape = input.shape`.}]>:$output
   );
 
   TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
@@ -7451,15 +7907,19 @@ tf.matrix_set_diag(input, diagonals, k = (-1, 2), align="LEFT_RIGHT")
   }];
 
   let arguments = (ins
-    TF_Tensor:$input,
-    TF_Tensor:$diagonal,
-    TF_Int32Tensor:$k,
+    Arg<TF_Tensor, [{Rank `r+1`, where `r >= 1`.}]>:$input,
+    Arg<TF_Tensor, [{Rank `r` when `k` is an integer or `k[0] == k[1]`. Otherwise, it has rank `r+1`.
+`k >= 1`.}]>:$diagonal,
+    Arg<TF_Int32Tensor, [{Diagonal offset(s). Positive value means superdiagonal, 0 refers to the main
+diagonal, and negative value means subdiagonals. `k` can be a single integer
+(for a single diagonal) or a pair of integers specifying the low and high ends
+of a matrix band. `k[0]` must not be larger than `k[1]`.}]>:$k,
 
     DefaultValuedAttr<TF_AnyStrAttrOf<["LEFT_RIGHT", "RIGHT_LEFT", "LEFT_LEFT", "RIGHT_RIGHT"]>, "RIGHT_LEFT">:$align
   );
 
   let results = (outs
-    TF_Tensor:$output
+    Res<TF_Tensor, [{Rank `r+1`, with `output.shape = input.shape`.}]>:$output
   );
 
   TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
@@ -7478,14 +7938,14 @@ If `adjoint` is `True` then each output matrix satisfies
   }];
 
   let arguments = (ins
-    TensorOf<[TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64]>:$matrix,
-    TensorOf<[TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64]>:$rhs,
+    Arg<TensorOf<[TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64]>, [{Shape is `[..., M, M]`.}]>:$matrix,
+    Arg<TensorOf<[TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64]>, [{Shape is `[..., M, K]`.}]>:$rhs,
 
     DefaultValuedAttr<BoolAttr, "false">:$adjoint
   );
 
   let results = (outs
-    TensorOf<[TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64]>:$output
+    Res<TensorOf<[TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64]>, [{Shape is `[..., M, K]`.}]>:$output
   );
 
   TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
@@ -7545,15 +8005,15 @@ tf.matmul(a, x)
   }];
 
   let arguments = (ins
-    TensorOf<[TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64]>:$matrix,
-    TensorOf<[TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64]>:$rhs,
+    Arg<TensorOf<[TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64]>, [{Shape is `[..., M, M]`.}]>:$matrix,
+    Arg<TensorOf<[TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64]>, [{Shape is `[..., M, K]`.}]>:$rhs,
 
     DefaultValuedAttr<BoolAttr, "true">:$lower,
     DefaultValuedAttr<BoolAttr, "false">:$adjoint
   );
 
   let results = (outs
-    TensorOf<[TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64]>:$output
+    Res<TensorOf<[TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64]>, [{Shape is `[..., M, K]`.}]>:$output
   );
 
   TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
@@ -7572,14 +8032,15 @@ retained with length 1.
   }];
 
   let arguments = (ins
-    TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint16, TF_Qint32, TF_Qint8, TF_Quint16, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$input,
-    TF_I32OrI64Tensor:$reduction_indices,
+    Arg<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint16, TF_Qint32, TF_Qint8, TF_Quint16, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{The tensor to reduce.}]>:$input,
+    Arg<TF_I32OrI64Tensor, [{The dimensions to reduce. Must be in the range
+`[-rank(input), rank(input))`.}]>:$reduction_indices,
 
     DefaultValuedAttr<BoolAttr, "false">:$keep_dims
   );
 
   let results = (outs
-    TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint16, TF_Qint32, TF_Qint8, TF_Quint16, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$output
+    Res<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint16, TF_Qint32, TF_Qint8, TF_Quint16, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{The reduced tensor.}]>:$output
   );
 
   TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
@@ -7595,7 +8056,7 @@ def TF_MaxPoolOp : TF_Op<"MaxPool", [NoSideEffect, TF_FoldOperandsTransposeInter
   let summary = "Performs max pooling on the input.";
 
   let arguments = (ins
-    TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint8, TF_Uint16, TF_Uint8]>:$input,
+    Arg<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint8, TF_Uint16, TF_Uint8]>, [{4-D input to pool over.}]>:$input,
 
     Confined<I64ArrayAttr, [ArrayMinCount<4>]>:$ksize,
     Confined<I64ArrayAttr, [ArrayMinCount<4>]>:$strides,
@@ -7605,7 +8066,7 @@ def TF_MaxPoolOp : TF_Op<"MaxPool", [NoSideEffect, TF_FoldOperandsTransposeInter
   );
 
   let results = (outs
-    TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint8, TF_Uint16, TF_Uint8]>:$output
+    Res<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint8, TF_Uint16, TF_Uint8]>, [{The max pooled output tensor.}]>:$output
   );
 
   TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
@@ -7625,7 +8086,7 @@ def TF_MaxPool3DOp : TF_Op<"MaxPool3D", [NoSideEffect]> {
   let summary = "Performs 3D max pooling on the input.";
 
   let arguments = (ins
-    TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32]>:$input,
+    Arg<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32]>, [{Shape `[batch, depth, rows, cols, channels]` tensor to pool over.}]>:$input,
 
     Confined<I64ArrayAttr, [ArrayMinCount<5>]>:$ksize,
     Confined<I64ArrayAttr, [ArrayMinCount<5>]>:$strides,
@@ -7634,7 +8095,7 @@ def TF_MaxPool3DOp : TF_Op<"MaxPool3D", [NoSideEffect]> {
   );
 
   let results = (outs
-    TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32]>:$output
+    Res<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32]>, [{The max pooled output tensor.}]>:$output
   );
 
   TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
@@ -7644,9 +8105,9 @@ def TF_MaxPool3DGradOp : TF_Op<"MaxPool3DGrad", [NoSideEffect]> {
   let summary = "Computes gradients of 3D max pooling function.";
 
   let arguments = (ins
-    TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32]>:$orig_input,
-    TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32]>:$orig_output,
-    TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32]>:$grad,
+    Arg<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32]>, [{The original input tensor.}]>:$orig_input,
+    Arg<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32]>, [{The original output tensor.}]>:$orig_output,
+    Arg<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32]>, [{Output backprop of shape `[batch, depth, rows, cols, channels]`.}]>:$grad,
 
     Confined<I64ArrayAttr, [ArrayMinCount<5>]>:$ksize,
     Confined<I64ArrayAttr, [ArrayMinCount<5>]>:$strides,
@@ -7666,9 +8127,9 @@ def TF_MaxPool3DGradGradOp : TF_Op<"MaxPool3DGradGrad", [NoSideEffect]> {
   let summary = "Computes second-order gradients of the maxpooling function.";
 
   let arguments = (ins
-    TF_IntOrFpTensor:$orig_input,
-    TF_IntOrFpTensor:$orig_output,
-    TF_IntOrFpTensor:$grad,
+    Arg<TF_IntOrFpTensor, [{The original input tensor.}]>:$orig_input,
+    Arg<TF_IntOrFpTensor, [{The original output tensor.}]>:$orig_output,
+    Arg<TF_IntOrFpTensor, [{Output backprop of shape `[batch, depth, rows, cols, channels]`.}]>:$grad,
 
     Confined<I64ArrayAttr, [ArrayMinCount<5>]>:$ksize,
     Confined<I64ArrayAttr, [ArrayMinCount<5>]>:$strides,
@@ -7677,7 +8138,7 @@ def TF_MaxPool3DGradGradOp : TF_Op<"MaxPool3DGradGrad", [NoSideEffect]> {
   );
 
   let results = (outs
-    TF_IntOrFpTensor:$output
+    Res<TF_IntOrFpTensor, [{Gradients of gradients w.r.t. the input to `max_pool`.}]>:$output
   );
 
   TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
@@ -7687,9 +8148,9 @@ def TF_MaxPoolGradOp : TF_Op<"MaxPoolGrad", [NoSideEffect]> {
   let summary = "Computes gradients of the maxpooling function.";
 
   let arguments = (ins
-    TF_IntOrFpTensor:$orig_input,
-    TF_IntOrFpTensor:$orig_output,
-    TF_IntOrFpTensor:$grad,
+    Arg<TF_IntOrFpTensor, [{The original input tensor.}]>:$orig_input,
+    Arg<TF_IntOrFpTensor, [{The original output tensor.}]>:$orig_output,
+    Arg<TF_IntOrFpTensor, [{4-D.  Gradients w.r.t. the output of `max_pool`.}]>:$grad,
 
     Confined<I64ArrayAttr, [ArrayMinCount<4>]>:$ksize,
     Confined<I64ArrayAttr, [ArrayMinCount<4>]>:$strides,
@@ -7699,7 +8160,7 @@ def TF_MaxPoolGradOp : TF_Op<"MaxPoolGrad", [NoSideEffect]> {
   );
 
   let results = (outs
-    TF_IntOrFpTensor:$output
+    Res<TF_IntOrFpTensor, [{Gradients w.r.t. the input to `max_pool`.}]>:$output
   );
 
   TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
@@ -7713,9 +8174,9 @@ def TF_MaxPoolGradGradOp : TF_Op<"MaxPoolGradGrad", [NoSideEffect]> {
   let summary = "Computes second-order gradients of the maxpooling function.";
 
   let arguments = (ins
-    TF_IntOrFpTensor:$orig_input,
-    TF_IntOrFpTensor:$orig_output,
-    TF_IntOrFpTensor:$grad,
+    Arg<TF_IntOrFpTensor, [{The original input tensor.}]>:$orig_input,
+    Arg<TF_IntOrFpTensor, [{The original output tensor.}]>:$orig_output,
+    Arg<TF_IntOrFpTensor, [{4-D.  Gradients of gradients w.r.t. the input of `max_pool`.}]>:$grad,
 
     Confined<I64ArrayAttr, [ArrayMinCount<4>]>:$ksize,
     Confined<I64ArrayAttr, [ArrayMinCount<4>]>:$strides,
@@ -7724,7 +8185,7 @@ def TF_MaxPoolGradGradOp : TF_Op<"MaxPoolGradGrad", [NoSideEffect]> {
   );
 
   let results = (outs
-    TF_IntOrFpTensor:$output
+    Res<TF_IntOrFpTensor, [{Gradients of gradients w.r.t. the input to `max_pool`.}]>:$output
   );
 
   TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
@@ -7734,18 +8195,19 @@ def TF_MaxPoolGradGradV2Op : TF_Op<"MaxPoolGradGradV2", [NoSideEffect]> {
   let summary = "Computes second-order gradients of the maxpooling function.";
 
   let arguments = (ins
-    TF_IntOrFpTensor:$orig_input,
-    TF_IntOrFpTensor:$orig_output,
-    TF_IntOrFpTensor:$grad,
-    TF_Int32Tensor:$ksize,
-    TF_Int32Tensor:$strides,
+    Arg<TF_IntOrFpTensor, [{The original input tensor.}]>:$orig_input,
+    Arg<TF_IntOrFpTensor, [{The original output tensor.}]>:$orig_output,
+    Arg<TF_IntOrFpTensor, [{4-D.  Gradients of gradients w.r.t. the input of `max_pool`.}]>:$grad,
+    Arg<TF_Int32Tensor, [{The size of the window for each dimension of the input tensor.}]>:$ksize,
+    Arg<TF_Int32Tensor, [{The stride of the sliding window for each dimension of the
+input tensor.}]>:$strides,
 
     TF_AnyStrAttrOf<["SAME", "VALID"]>:$padding,
     DefaultValuedAttr<TF_ConvnetDataFormatAttr, "NHWC">:$data_format
   );
 
   let results = (outs
-    TF_IntOrFpTensor:$output
+    Res<TF_IntOrFpTensor, [{Gradients of gradients w.r.t. the input to `max_pool`.}]>:$output
   );
 
   TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
@@ -7755,18 +8217,19 @@ def TF_MaxPoolGradV2Op : TF_Op<"MaxPoolGradV2", [NoSideEffect]> {
   let summary = "Computes gradients of the maxpooling function.";
 
   let arguments = (ins
-    TF_IntOrFpTensor:$orig_input,
-    TF_IntOrFpTensor:$orig_output,
-    TF_IntOrFpTensor:$grad,
-    TF_Int32Tensor:$ksize,
-    TF_Int32Tensor:$strides,
+    Arg<TF_IntOrFpTensor, [{The original input tensor.}]>:$orig_input,
+    Arg<TF_IntOrFpTensor, [{The original output tensor.}]>:$orig_output,
+    Arg<TF_IntOrFpTensor, [{4-D.  Gradients w.r.t. the output of `max_pool`.}]>:$grad,
+    Arg<TF_Int32Tensor, [{The size of the window for each dimension of the input tensor.}]>:$ksize,
+    Arg<TF_Int32Tensor, [{The stride of the sliding window for each dimension of the
+input tensor.}]>:$strides,
 
     TF_AnyStrAttrOf<["SAME", "VALID"]>:$padding,
     DefaultValuedAttr<TF_ConvnetDataFormatAttr, "NHWC">:$data_format
   );
 
   let results = (outs
-    TF_IntOrFpTensor:$output
+    Res<TF_IntOrFpTensor, [{Gradients w.r.t. the input to `max_pool`.}]>:$output
   );
 
   TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
@@ -7776,16 +8239,17 @@ def TF_MaxPoolV2Op : TF_Op<"MaxPoolV2", [NoSideEffect]> {
   let summary = "Performs max pooling on the input.";
 
   let arguments = (ins
-    TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint8, TF_Uint16, TF_Uint8]>:$input,
-    TF_Int32Tensor:$ksize,
-    TF_Int32Tensor:$strides,
+    Arg<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint8, TF_Uint16, TF_Uint8]>, [{4-D input to pool over.}]>:$input,
+    Arg<TF_Int32Tensor, [{The size of the window for each dimension of the input tensor.}]>:$ksize,
+    Arg<TF_Int32Tensor, [{The stride of the sliding window for each dimension of the
+input tensor.}]>:$strides,
 
     TF_AnyStrAttrOf<["SAME", "VALID"]>:$padding,
     DefaultValuedAttr<TF_AnyStrAttrOf<["NHWC", "NCHW", "NCHW_VECT_C"]>, "NHWC">:$data_format
   );
 
   let results = (outs
-    TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint8, TF_Uint16, TF_Uint8]>:$output
+    Res<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint8, TF_Uint16, TF_Uint8]>, [{The max pooled output tensor.}]>:$output
   );
 
   TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
@@ -7802,14 +8266,15 @@ retained with length 1.
   }];
 
   let arguments = (ins
-    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$input,
-    TF_I32OrI64Tensor:$reduction_indices,
+    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{The tensor to reduce.}]>:$input,
+    Arg<TF_I32OrI64Tensor, [{The dimensions to reduce. Must be in the range
+`[-rank(input), rank(input))`.}]>:$reduction_indices,
 
     DefaultValuedAttr<BoolAttr, "false">:$keep_dims
   );
 
   let results = (outs
-    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$output
+    Res<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{The reduced tensor.}]>:$output
   );
 
   TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
@@ -7837,11 +8302,12 @@ in the summaries to merge use the same tag.
   }];
 
   let arguments = (ins
-    Variadic<TF_StrTensor>:$inputs
+    Arg<Variadic<TF_StrTensor>, [{Can be of any shape.  Each must contain serialized `Summary` protocol
+buffers.}]>:$inputs
   );
 
   let results = (outs
-    TF_StrTensor:$summary
+    Res<TF_StrTensor, [{Scalar. Serialized `Summary` protocol buffer.}]>:$summary
   );
 
   TF_DerivedOperandSizeAttr N = TF_DerivedOperandSizeAttr<0>;
@@ -7864,8 +8330,9 @@ user-facing temporary locations.
   }];
 
   let arguments = (ins
-    TF_StrTensor:$checkpoint_prefixes,
-    TF_StrTensor:$destination_prefix,
+    Arg<TF_StrTensor, [{prefixes of V2 checkpoints to merge.}]>:$checkpoint_prefixes,
+    Arg<TF_StrTensor, [{scalar.  The desired final prefix.  Allowed to be the same
+as one of the checkpoint_prefixes.}]>:$destination_prefix,
 
     DefaultValuedAttr<BoolAttr, "true">:$delete_old_dirs
   );
@@ -7886,14 +8353,15 @@ retained with length 1.
   }];
 
   let arguments = (ins
-    TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint16, TF_Qint32, TF_Qint8, TF_Quint16, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$input,
-    TF_I32OrI64Tensor:$reduction_indices,
+    Arg<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint16, TF_Qint32, TF_Qint8, TF_Quint16, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{The tensor to reduce.}]>:$input,
+    Arg<TF_I32OrI64Tensor, [{The dimensions to reduce. Must be in the range
+`[-rank(input), rank(input))`.}]>:$reduction_indices,
 
     DefaultValuedAttr<BoolAttr, "false">:$keep_dims
   );
 
   let results = (outs
-    TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint16, TF_Qint32, TF_Qint8, TF_Quint16, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$output
+    Res<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint16, TF_Qint32, TF_Qint8, TF_Quint16, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{The reduced tensor.}]>:$output
   );
 
   TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
@@ -7953,14 +8421,15 @@ pad(t, paddings) ==> [[2, 1, 1, 2, 3, 3, 2]
   }];
 
   let arguments = (ins
-    TF_Tensor:$input,
-    TF_I32OrI64Tensor:$paddings,
+    Arg<TF_Tensor, [{The input tensor to be padded.}]>:$input,
+    Arg<TF_I32OrI64Tensor, [{A two-column matrix specifying the padding sizes. The number of
+rows must be the same as the rank of `input`.}]>:$paddings,
 
     TF_AnyStrAttrOf<["REFLECT", "SYMMETRIC"]>:$mode
   );
 
   let results = (outs
-    TF_Tensor:$output
+    Res<TF_Tensor, [{The padded tensor.}]>:$output
   );
 
   TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
@@ -7994,14 +8463,15 @@ pad(t, paddings) ==> [[ 1,  5]
   }];
 
   let arguments = (ins
-    TF_Tensor:$input,
-    TF_I32OrI64Tensor:$paddings,
+    Arg<TF_Tensor, [{The input tensor to be folded.}]>:$input,
+    Arg<TF_I32OrI64Tensor, [{A two-column matrix specifying the padding sizes. The number of
+rows must be the same as the rank of `input`.}]>:$paddings,
 
     TF_AnyStrAttrOf<["REFLECT", "SYMMETRIC"]>:$mode
   );
 
   let results = (outs
-    TF_Tensor:$output
+    Res<TF_Tensor, [{The folded tensor.}]>:$output
   );
 
   TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
@@ -8158,7 +8628,7 @@ def TF_MultiDeviceIteratorOp : TF_Op<"MultiDeviceIterator", []> {
   );
 
   let results = (outs
-    Res<TF_ResourceTensor, "", [TF_DatasetIteratorAlloc]>:$handle
+    Res<TF_ResourceTensor, [{Handle to the resource created.}], [TF_DatasetIteratorAlloc]>:$handle
   );
 }
 
@@ -8168,14 +8638,14 @@ Generates a MultiDeviceIterator resource from its provided string handle.
   }];
 
   let arguments = (ins
-    TF_StrTensor:$string_handle,
+    Arg<TF_StrTensor, [{String representing the resource.}]>:$string_handle,
 
     DefaultValuedAttr<TypeArrayAttr, "{}">:$output_types,
     DefaultValuedAttr<TF_ShapeAttrArray, "{}">:$output_shapes
   );
 
   let results = (outs
-    Res<TF_ResourceTensor, "", [TF_DatasetIteratorAlloc]>:$multi_device_iterator
+    Res<TF_ResourceTensor, [{A MultiDeviceIterator resource.}], [TF_DatasetIteratorAlloc]>:$multi_device_iterator
   );
 }
 
@@ -8183,13 +8653,13 @@ def TF_MultiDeviceIteratorGetNextFromShardOp : TF_Op<"MultiDeviceIteratorGetNext
   let summary = "Gets next element for the provided shard number.";
 
   let arguments = (ins
-    Arg<TF_ResourceTensor, "", [TF_DatasetIteratorRead, TF_DatasetIteratorWrite]>:$multi_device_iterator,
-    TF_Int32Tensor:$shard_num,
-    TF_Int64Tensor:$incarnation_id
+    Arg<TF_ResourceTensor, [{A MultiDeviceIterator resource.}], [TF_DatasetIteratorRead, TF_DatasetIteratorWrite]>:$multi_device_iterator,
+    Arg<TF_Int32Tensor, [{Integer representing which shard to fetch data for.}]>:$shard_num,
+    Arg<TF_Int64Tensor, [{Which incarnation of the MultiDeviceIterator is running.}]>:$incarnation_id
   );
 
   let results = (outs
-    Variadic<TF_Tensor>:$components
+    Res<Variadic<TF_Tensor>, [{Result of the get_next on the dataset.}]>:$components
   );
 
   TF_DerivedResultShapeListAttr output_shapes = TF_DerivedResultShapeListAttr<0>;
@@ -8200,13 +8670,14 @@ def TF_MultiDeviceIteratorInitOp : TF_Op<"MultiDeviceIteratorInit", []> {
   let summary = "Initializes the multi device iterator with the given dataset.";
 
   let arguments = (ins
-    TF_VariantTensor:$dataset,
-    Arg<TF_ResourceTensor, "", [TF_DatasetIteratorWrite]>:$multi_device_iterator,
-    TF_Int64Tensor:$max_buffer_size
+    Arg<TF_VariantTensor, [{Dataset to be iterated upon.}]>:$dataset,
+    Arg<TF_ResourceTensor, [{A MultiDeviceIteratorResource.}], [TF_DatasetIteratorWrite]>:$multi_device_iterator,
+    Arg<TF_Int64Tensor, [{The maximum size of the host side per device buffer to keep.}]>:$max_buffer_size
   );
 
   let results = (outs
-    TF_Int64Tensor:$incarnation_id
+    Res<TF_Int64Tensor, [{An int64 indicating which incarnation of the MultiDeviceIterator
+is running.}]>:$incarnation_id
   );
 }
 
@@ -8214,11 +8685,11 @@ def TF_MultiDeviceIteratorToStringHandleOp : TF_Op<"MultiDeviceIteratorToStringH
   let summary = "Produces a string handle for the given MultiDeviceIterator.";
 
   let arguments = (ins
-    Arg<TF_ResourceTensor, "", [TF_DatasetIteratorRead]>:$multi_device_iterator
+    Arg<TF_ResourceTensor, [{A MultiDeviceIterator resource.}], [TF_DatasetIteratorRead]>:$multi_device_iterator
   );
 
   let results = (outs
-    TF_StrTensor:$string_handle
+    Res<TF_StrTensor, [{A string representing the resource.}]>:$string_handle
   );
 }
 
@@ -8226,15 +8697,17 @@ def TF_MultinomialOp : TF_Op<"Multinomial", [TF_CannotDuplicate]> {
   let summary = "Draws samples from a multinomial distribution.";
 
   let arguments = (ins
-    TF_IntOrFpTensor:$logits,
-    TF_Int32Tensor:$num_samples,
+    Arg<TF_IntOrFpTensor, [{2-D Tensor with shape `[batch_size, num_classes]`.  Each slice `[i, :]`
+represents the unnormalized log probabilities for all classes.}]>:$logits,
+    Arg<TF_Int32Tensor, [{0-D.  Number of independent samples to draw for each row slice.}]>:$num_samples,
 
     DefaultValuedAttr<I64Attr, "0">:$seed,
     DefaultValuedAttr<I64Attr, "0">:$seed2
   );
 
   let results = (outs
-    TF_I32OrI64Tensor:$output
+    Res<TF_I32OrI64Tensor, [{2-D Tensor with shape `[batch_size, num_samples]`.  Each slice `[i, :]`
+contains the drawn class labels with range `[0, num_classes)`.}]>:$output
   );
 
   TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
@@ -8256,7 +8729,8 @@ the insert operations. It does not support the initialization operation.
   }];
 
   let arguments = (ins
-    TF_Tensor:$empty_key,
+    Arg<TF_Tensor, [{The key used to represent empty key buckets internally. Must not
+be used in insert or lookup operations.}]>:$empty_key,
     TF_Tensor:$deleted_key,
 
     StrAttr:$container,
@@ -8269,7 +8743,7 @@ the insert operations. It does not support the initialization operation.
   );
 
   let results = (outs
-    Res<TF_ResourceTensor, "", [TF_LookupTableAlloc]>:$table_handle
+    Res<TF_ResourceTensor, [{Handle to a table.}], [TF_LookupTableAlloc]>:$table_handle
   );
 
   TF_DerivedOperandTypeAttr key_dtype = TF_DerivedOperandTypeAttr<0>;
@@ -8294,7 +8768,7 @@ the insert operations. It does not support the initialization operation.
   );
 
   let results = (outs
-    Res<TF_ResourceTensor, "", [TF_LookupTableAlloc]>:$table_handle
+    Res<TF_ResourceTensor, [{Handle to a table.}], [TF_LookupTableAlloc]>:$table_handle
   );
 }
 
@@ -8316,7 +8790,7 @@ the insert operations. It does not support the initialization operation.
   );
 
   let results = (outs
-    Res<TF_ResourceTensor, "", [TF_LookupTableAlloc]>:$table_handle
+    Res<TF_ResourceTensor, [{Handle to a table.}], [TF_LookupTableAlloc]>:$table_handle
   );
 }
 
@@ -8414,15 +8888,20 @@ using the `tf.gather operation`.  For example:
   }];
 
   let arguments = (ins
-    TensorOf<[TF_Float16, TF_Float32]>:$boxes,
-    TensorOf<[TF_Float16, TF_Float32]>:$scores,
-    TF_Int32Tensor:$max_output_size,
-    TensorOf<[TF_Float16, TF_Float32]>:$iou_threshold,
-    TensorOf<[TF_Float16, TF_Float32]>:$score_threshold
+    Arg<TensorOf<[TF_Float16, TF_Float32]>, [{A 2-D float tensor of shape `[num_boxes, 4]`.}]>:$boxes,
+    Arg<TensorOf<[TF_Float16, TF_Float32]>, [{A 1-D float tensor of shape `[num_boxes]` representing a single
+score corresponding to each box (each row of boxes).}]>:$scores,
+    Arg<TF_Int32Tensor, [{A scalar integer tensor representing the maximum number of
+boxes to be selected by non max suppression.}]>:$max_output_size,
+    Arg<TensorOf<[TF_Float16, TF_Float32]>, [{A 0-D float tensor representing the threshold for deciding whether
+boxes overlap too much with respect to IOU.}]>:$iou_threshold,
+    Arg<TensorOf<[TF_Float16, TF_Float32]>, [{A 0-D float tensor representing the threshold for deciding when to remove
+boxes based on score.}]>:$score_threshold
   );
 
   let results = (outs
-    TF_Int32Tensor:$selected_indices
+    Res<TF_Int32Tensor, [{A 1-D integer tensor of shape `[M]` representing the selected
+indices from the boxes tensor, where `M <= max_output_size`.}]>:$selected_indices
   );
 
   TF_DerivedOperandTypeAttr T_threshold = TF_DerivedOperandTypeAttr<3>;
@@ -8457,18 +8936,24 @@ using the `tf.gather operation`.  For example:
   }];
 
   let arguments = (ins
-    TensorOf<[TF_Float16, TF_Float32]>:$boxes,
-    TensorOf<[TF_Float16, TF_Float32]>:$scores,
-    TF_Int32Tensor:$max_output_size,
-    TensorOf<[TF_Float16, TF_Float32]>:$iou_threshold,
-    TensorOf<[TF_Float16, TF_Float32]>:$score_threshold,
+    Arg<TensorOf<[TF_Float16, TF_Float32]>, [{A 2-D float tensor of shape `[num_boxes, 4]`.}]>:$boxes,
+    Arg<TensorOf<[TF_Float16, TF_Float32]>, [{A 1-D float tensor of shape `[num_boxes]` representing a single
+score corresponding to each box (each row of boxes).}]>:$scores,
+    Arg<TF_Int32Tensor, [{A scalar integer tensor representing the maximum number of
+boxes to be selected by non max suppression.}]>:$max_output_size,
+    Arg<TensorOf<[TF_Float16, TF_Float32]>, [{A 0-D float tensor representing the threshold for deciding whether
+boxes overlap too much with respect to IOU.}]>:$iou_threshold,
+    Arg<TensorOf<[TF_Float16, TF_Float32]>, [{A 0-D float tensor representing the threshold for deciding when to remove
+boxes based on score.}]>:$score_threshold,
 
     DefaultValuedAttr<BoolAttr, "false">:$pad_to_max_output_size
   );
 
   let results = (outs
-    TF_Int32Tensor:$selected_indices,
-    TF_Int32Tensor:$valid_outputs
+    Res<TF_Int32Tensor, [{A 1-D integer tensor of shape `[M]` representing the selected
+indices from the boxes tensor, where `M <= max_output_size`.}]>:$selected_indices,
+    Res<TF_Int32Tensor, [{A 0-D integer tensor representing the number of valid elements in
+`selected_indices`, with the valid elements appearing first.}]>:$valid_outputs
   );
 
   TF_DerivedOperandTypeAttr T_threshold = TF_DerivedOperandTypeAttr<3>;
@@ -8506,20 +8991,31 @@ larger than 0.
   }];
 
   let arguments = (ins
-    TensorOf<[TF_Float16, TF_Float32]>:$boxes,
-    TensorOf<[TF_Float16, TF_Float32]>:$scores,
-    TF_Int32Tensor:$max_output_size,
-    TensorOf<[TF_Float16, TF_Float32]>:$iou_threshold,
-    TensorOf<[TF_Float16, TF_Float32]>:$score_threshold,
-    TensorOf<[TF_Float16, TF_Float32]>:$soft_nms_sigma,
+    Arg<TensorOf<[TF_Float16, TF_Float32]>, [{A 2-D float tensor of shape `[num_boxes, 4]`.}]>:$boxes,
+    Arg<TensorOf<[TF_Float16, TF_Float32]>, [{A 1-D float tensor of shape `[num_boxes]` representing a single
+score corresponding to each box (each row of boxes).}]>:$scores,
+    Arg<TF_Int32Tensor, [{A scalar integer tensor representing the maximum number of
+boxes to be selected by non max suppression.}]>:$max_output_size,
+    Arg<TensorOf<[TF_Float16, TF_Float32]>, [{A 0-D float tensor representing the threshold for deciding whether
+boxes overlap too much with respect to IOU.}]>:$iou_threshold,
+    Arg<TensorOf<[TF_Float16, TF_Float32]>, [{A 0-D float tensor representing the threshold for deciding when to remove
+boxes based on score.}]>:$score_threshold,
+    Arg<TensorOf<[TF_Float16, TF_Float32]>, [{A 0-D float tensor representing the sigma parameter for Soft NMS; see Bodla et
+al (c.f. https://arxiv.org/abs/1704.04503).  When `soft_nms_sigma=0.0` (which
+is default), we fall back to standard (hard) NMS.}]>:$soft_nms_sigma,
 
     DefaultValuedAttr<BoolAttr, "false">:$pad_to_max_output_size
   );
 
   let results = (outs
-    TF_Int32Tensor:$selected_indices,
-    TensorOf<[TF_Float16, TF_Float32]>:$selected_scores,
-    TF_Int32Tensor:$valid_outputs
+    Res<TF_Int32Tensor, [{A 1-D integer tensor of shape `[M]` representing the selected
+indices from the boxes tensor, where `M <= max_output_size`.}]>:$selected_indices,
+    Res<TensorOf<[TF_Float16, TF_Float32]>, [{A 1-D float tensor of shape `[M]` representing the corresponding
+scores for each selected box, where `M <= max_output_size`.  Scores only differ
+from corresponding input scores when using Soft NMS (i.e. when
+`soft_nms_sigma>0`)}]>:$selected_scores,
+    Res<TF_Int32Tensor, [{A 0-D integer tensor representing the number of valid elements in
+`selected_indices`, with the valid elements appearing first.}]>:$valid_outputs
   );
 
   TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
@@ -8650,16 +9146,16 @@ output =
   }];
 
   let arguments = (ins
-    TensorOf<[TF_Int32, TF_Int64, TF_Uint8]>:$indices,
-    TF_Int32Tensor:$depth,
-    TF_Tensor:$on_value,
-    TF_Tensor:$off_value,
+    Arg<TensorOf<[TF_Int32, TF_Int64, TF_Uint8]>, [{A tensor of indices.}]>:$indices,
+    Arg<TF_Int32Tensor, [{A scalar defining the depth of the one hot dimension.}]>:$depth,
+    Arg<TF_Tensor, [{A scalar defining the value to fill in output when `indices[j] = i`.}]>:$on_value,
+    Arg<TF_Tensor, [{A scalar defining the value to fill in output when `indices[j] != i`.}]>:$off_value,
 
     DefaultValuedAttr<I64Attr, "-1">:$axis
   );
 
   let results = (outs
-    TF_Tensor:$output
+    Res<TF_Tensor, [{The one-hot tensor.}]>:$output
   );
 
   TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<2>;
@@ -8709,7 +9205,8 @@ times by rerunning "MakeIterator".
   );
 
   let results = (outs
-    Res<TF_ResourceTensor, "", [TF_DatasetIteratorAlloc]>:$handle
+    Res<TF_ResourceTensor, [{A handle to the iterator that can be passed to an "IteratorGetNext"
+op.}], [TF_DatasetIteratorAlloc]>:$handle
   );
 }
 
@@ -8717,11 +9214,11 @@ def TF_OnesLikeOp : TF_Op<"OnesLike", [Idempotent, NoSideEffect, SameOperandsAnd
   let summary = "Returns a tensor of ones with the same shape and type as x.";
 
   let arguments = (ins
-    TensorOf<[TF_Bfloat16, TF_Bool, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Uint16, TF_Uint8]>:$x
+    Arg<TensorOf<[TF_Bfloat16, TF_Bool, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Uint16, TF_Uint8]>, [{a tensor of type T.}]>:$x
   );
 
   let results = (outs
-    TensorOf<[TF_Bfloat16, TF_Bool, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Uint16, TF_Uint8]>:$y
+    Res<TensorOf<[TF_Bfloat16, TF_Bool, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Uint16, TF_Uint8]>, [{a tensor of the same shape and type as x but filled with ones.}]>:$y
   );
 
   TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
@@ -8762,7 +9259,8 @@ def TF_OutfeedEnqueueTupleOp : TF_Op<"OutfeedEnqueueTuple", []> {
   let summary = "Enqueue multiple Tensor values on the computation outfeed.";
 
   let arguments = (ins
-    Variadic<TF_Tensor>:$inputs
+    Arg<Variadic<TF_Tensor>, [{A list of tensors that will be inserted into the outfeed queue as an
+XLA tuple.}]>:$inputs
   );
 
   let results = (outs);
@@ -8798,13 +9296,13 @@ This is the opposite of `unpack`.
   }];
 
   let arguments = (ins
-    Variadic<TF_Tensor>:$values,
+    Arg<Variadic<TF_Tensor>, [{Must be of same shape and type.}]>:$values,
 
     DefaultValuedAttr<I64Attr, "0">:$axis
   );
 
   let results = (outs
-    TF_Tensor:$output
+    Res<TF_Tensor, [{The packed tensor.}]>:$output
   );
 
   TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
@@ -9002,18 +9500,20 @@ stores the parameters for each batch.
   }];
 
   let arguments = (ins
-    TF_I32OrI64Tensor:$shape,
-    TF_FloatTensor:$means,
-    TF_FloatTensor:$stdevs,
-    TF_FloatTensor:$minvals,
-    TF_FloatTensor:$maxvals,
+    Arg<TF_I32OrI64Tensor, [{The shape of the output tensor. Batches are indexed by the 0th dimension.}]>:$shape,
+    Arg<TF_FloatTensor, [{The mean parameter of each batch.}]>:$means,
+    Arg<TF_FloatTensor, [{The standard deviation parameter of each batch. Must be greater than 0.}]>:$stdevs,
+    Arg<TF_FloatTensor, [{The minimum cutoff. May be -infinity.}]>:$minvals,
+    Arg<TF_FloatTensor, [{The maximum cutoff. May be +infinity, and must be more than the minval
+for each batch.}]>:$maxvals,
 
     DefaultValuedAttr<I64Attr, "0">:$seed,
     DefaultValuedAttr<I64Attr, "0">:$seed2
   );
 
   let results = (outs
-    TF_FloatTensor:$output
+    Res<TF_FloatTensor, [{A matrix of shape num_batches x samples_per_batch, filled with random
+truncated normal values using the parameters for each row.}]>:$output
   );
 
   TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
@@ -9118,13 +9618,13 @@ gradients in some corner cases.
   }];
 
   let arguments = (ins
-    TF_Tensor:$input,
+    Arg<TF_Tensor, [{any tensor.}]>:$input,
 
     StrAttr:$message
   );
 
   let results = (outs
-    TF_Tensor:$output
+    Res<TF_Tensor, [{the same input tensor.}]>:$output
   );
 
   TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
@@ -9138,7 +9638,7 @@ Prints a string scalar to the desired output_stream.
   }];
 
   let arguments = (ins
-    TF_StrTensor:$input,
+    Arg<TF_StrTensor, [{The string scalar to print.}]>:$input,
 
     DefaultValuedAttr<StrAttr, "stderr">:$output_stream,
     DefaultValuedAttr<StrAttr, "\n">:$end
@@ -9160,14 +9660,15 @@ retained with length 1.
   }];
 
   let arguments = (ins
-    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$input,
-    TF_I32OrI64Tensor:$reduction_indices,
+    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{The tensor to reduce.}]>:$input,
+    Arg<TF_I32OrI64Tensor, [{The dimensions to reduce. Must be in the range
+`[-rank(input), rank(input))`.}]>:$reduction_indices,
 
     DefaultValuedAttr<BoolAttr, "false">:$keep_dims
   );
 
   let results = (outs
-    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$output
+    Res<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{The reduced tensor.}]>:$output
   );
 
   TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
@@ -9195,14 +9696,18 @@ q_full, r_full = qr(a, full_matrices=True)
   }];
 
   let arguments = (ins
-    TensorOf<[TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64]>:$input,
+    Arg<TensorOf<[TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64]>, [{A tensor of shape `[..., M, N]` whose inner-most 2 dimensions
+form matrices of size `[M, N]`. Let `P` be the minimum of `M` and `N`.}]>:$input,
 
     DefaultValuedAttr<BoolAttr, "false">:$full_matrices
   );
 
   let results = (outs
-    TensorOf<[TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64]>:$q,
-    TensorOf<[TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64]>:$r
+    Res<TensorOf<[TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64]>, [{Orthonormal basis for range of `a`. If `full_matrices` is `False` then
+shape is `[..., M, P]`; if `full_matrices` is `True` then shape is
+`[..., M, M]`.}]>:$q,
+    Res<TensorOf<[TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64]>, [{Triangular factor. If `full_matrices` is `False` then shape is
+`[..., P, N]`. If `full_matrices` is `True` then shape is `[..., M, N]`.}]>:$r
   );
 
   TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
@@ -9290,9 +9795,13 @@ The above round function rounds the value based on the given round_mode.
   }];
 
   let arguments = (ins
-    TF_FloatTensor:$input,
-    TF_FloatTensor:$input_min,
-    TF_FloatTensor:$input_max,
+    Arg<TF_FloatTensor, [{Tensor to quantize and then dequantize.}]>:$input,
+    Arg<TF_FloatTensor, [{If `range_given == True`, this specifies the minimum input value that needs to
+be represented, otherwise it is determined from the min value of the `input`
+tensor.}]>:$input_min,
+    Arg<TF_FloatTensor, [{If `range_given == True`, this specifies the maximum input value that needs to
+be represented, otherwise it is determined from the max value of the `input`
+tensor.}]>:$input_max,
 
     DefaultValuedAttr<BoolAttr, "true">:$signed_input,
     DefaultValuedAttr<I64Attr, "8">:$num_bits,
@@ -9349,13 +9858,13 @@ has been dequeued (or 'timeout_ms' elapses, if specified).
   }];
 
   let arguments = (ins
-    TF_ResourceTensor:$handle,
+    Arg<TF_ResourceTensor, [{The handle to a queue.}]>:$handle,
 
     DefaultValuedAttr<I64Attr, "-1">:$timeout_ms
   );
 
   let results = (outs
-    Variadic<TF_Tensor>:$components
+    Res<Variadic<TF_Tensor>, [{One or more tensors that were dequeued as a tuple.}]>:$components
   );
 
   TF_DerivedResultTypeListAttr component_types = TF_DerivedResultTypeListAttr<0>;
@@ -9378,12 +9887,18 @@ the dimension is padded with zeros.
   }];
 
   let arguments = (ins
-    TF_F32OrF64Tensor:$input,
-    TF_Int32Tensor:$fft_length
+    Arg<TF_F32OrF64Tensor, [{A float32 tensor.}]>:$input,
+    Arg<TF_Int32Tensor, [{An int32 tensor of shape [1]. The FFT length.}]>:$fft_length
   );
 
   let results = (outs
-    TensorOf<[TF_Complex128, TF_Complex64]>:$output
+    Res<TensorOf<[TF_Complex128, TF_Complex64]>, [{A complex64 tensor of the same rank as `input`. The inner-most
+  dimension of `input` is replaced with the `fft_length / 2 + 1` unique
+  frequency components of its 1D Fourier transform.
+
+@compatibility(numpy)
+Equivalent to np.fft.rfft
+@end_compatibility}]>:$output
   );
 
   TF_DerivedOperandTypeAttr Treal = TF_DerivedOperandTypeAttr<0>;
@@ -9408,12 +9923,19 @@ the dimension is padded with zeros.
   }];
 
   let arguments = (ins
-    TF_F32OrF64Tensor:$input,
-    TF_Int32Tensor:$fft_length
+    Arg<TF_F32OrF64Tensor, [{A float32 tensor.}]>:$input,
+    Arg<TF_Int32Tensor, [{An int32 tensor of shape [2]. The FFT length for each dimension.}]>:$fft_length
   );
 
   let results = (outs
-    TensorOf<[TF_Complex128, TF_Complex64]>:$output
+    Res<TensorOf<[TF_Complex128, TF_Complex64]>, [{A complex64 tensor of the same rank as `input`. The inner-most 2
+  dimensions of `input` are replaced with their 2D Fourier transform. The
+  inner-most dimension contains `fft_length / 2 + 1` unique frequency
+  components.
+
+@compatibility(numpy)
+Equivalent to np.fft.rfft2
+@end_compatibility}]>:$output
   );
 
   TF_DerivedOperandTypeAttr Treal = TF_DerivedOperandTypeAttr<0>;
@@ -9438,12 +9960,19 @@ the dimension is padded with zeros.
   }];
 
   let arguments = (ins
-    TF_F32OrF64Tensor:$input,
-    TF_Int32Tensor:$fft_length
+    Arg<TF_F32OrF64Tensor, [{A float32 tensor.}]>:$input,
+    Arg<TF_Int32Tensor, [{An int32 tensor of shape [3]. The FFT length for each dimension.}]>:$fft_length
   );
 
   let results = (outs
-    TensorOf<[TF_Complex128, TF_Complex64]>:$output
+    Res<TensorOf<[TF_Complex128, TF_Complex64]>, [{A complex64 tensor of the same rank as `input`. The inner-most 3
+  dimensions of `input` are replaced with the their 3D Fourier transform. The
+  inner-most dimension contains `fft_length / 2 + 1` unique frequency
+  components.
+
+@compatibility(numpy)
+Equivalent to np.fft.rfftn with 3 dimensions.
+@end_compatibility}]>:$output
   );
 
   TF_DerivedOperandTypeAttr Treal = TF_DerivedOperandTypeAttr<0>;
@@ -9475,11 +10004,11 @@ array([0.6666667, 1. , 1. ], dtype=float32)
   }];
 
   let arguments = (ins
-    TF_FloatTensor:$images
+    Arg<TF_FloatTensor, [{1-D or higher rank. RGB data to convert. Last dimension must be size 3.}]>:$images
   );
 
   let results = (outs
-    TF_FloatTensor:$output
+    Res<TF_FloatTensor, [{`images` converted to HSV.}]>:$output
   );
 
   TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
@@ -9516,14 +10045,19 @@ where
   }];
 
   let arguments = (ins
-    Variadic<TF_I32OrI64Tensor>:$params_nested_splits,
-    TF_Tensor:$params_dense_values,
-    TF_I32OrI64Tensor:$indices
+    Arg<Variadic<TF_I32OrI64Tensor>, [{The `nested_row_splits` tensors that define the row-partitioning for the
+`params` RaggedTensor input.}]>:$params_nested_splits,
+    Arg<TF_Tensor, [{The `flat_values` for the `params` RaggedTensor. There was a terminology change
+at the python level from dense_values to flat_values, so dense_values is the
+deprecated name.}]>:$params_dense_values,
+    Arg<TF_I32OrI64Tensor, [{Indices in the outermost dimension of `params` of the values that should be
+gathered.}]>:$indices
   );
 
   let results = (outs
-    Variadic<TF_I32OrI64Tensor>:$output_nested_splits,
-    TF_Tensor:$output_dense_values
+    Res<Variadic<TF_I32OrI64Tensor>, [{The `nested_row_splits` tensors that define the row-partitioning for the
+returned RaggedTensor.}]>:$output_nested_splits,
+    Res<TF_Tensor, [{The `flat_values` for the returned RaggedTensor.}]>:$output_dense_values
   );
 
   TF_DerivedOperandTypeAttr Tsplits = TF_DerivedOperandTypeAttr<0>;
@@ -9557,14 +10091,14 @@ to match the size of the vector inputs.
   }];
 
   let arguments = (ins
-    TensorOf<[TF_Bfloat16, TF_Float32, TF_Float64, TF_Int32, TF_Int64]>:$starts,
-    TensorOf<[TF_Bfloat16, TF_Float32, TF_Float64, TF_Int32, TF_Int64]>:$limits,
-    TensorOf<[TF_Bfloat16, TF_Float32, TF_Float64, TF_Int32, TF_Int64]>:$deltas
+    Arg<TensorOf<[TF_Bfloat16, TF_Float32, TF_Float64, TF_Int32, TF_Int64]>, [{The starts of each range.}]>:$starts,
+    Arg<TensorOf<[TF_Bfloat16, TF_Float32, TF_Float64, TF_Int32, TF_Int64]>, [{The limits of each range.}]>:$limits,
+    Arg<TensorOf<[TF_Bfloat16, TF_Float32, TF_Float64, TF_Int32, TF_Int64]>, [{The deltas of each range.}]>:$deltas
   );
 
   let results = (outs
-    TF_I32OrI64Tensor:$rt_nested_splits,
-    TensorOf<[TF_Bfloat16, TF_Float32, TF_Float64, TF_Int32, TF_Int64]>:$rt_dense_values
+    Res<TF_I32OrI64Tensor, [{The `row_splits` for the returned `RaggedTensor`.}]>:$rt_nested_splits,
+    Res<TensorOf<[TF_Bfloat16, TF_Float32, TF_Float64, TF_Int32, TF_Int64]>, [{The `flat_values` for the returned `RaggedTensor`.}]>:$rt_dense_values
   );
 
   TF_DerivedResultTypeAttr Tsplits = TF_DerivedResultTypeAttr<0>;
@@ -9583,15 +10117,19 @@ See http://dl.acm.org/citation.cfm?id=358414
   }];
 
   let arguments = (ins
-    TF_I32OrI64Tensor:$shape,
-    TensorOf<[TF_Float16, TF_Float32, TF_Float64]>:$alpha,
+    Arg<TF_I32OrI64Tensor, [{1-D integer tensor. Shape of independent samples to draw from each
+distribution described by the shape parameters given in alpha.}]>:$shape,
+    Arg<TensorOf<[TF_Float16, TF_Float32, TF_Float64]>, [{A tensor in which each scalar is a "shape" parameter describing the
+associated gamma distribution.}]>:$alpha,
 
     DefaultValuedAttr<I64Attr, "0">:$seed,
     DefaultValuedAttr<I64Attr, "0">:$seed2
   );
 
   let results = (outs
-    TensorOf<[TF_Float16, TF_Float32, TF_Float64]>:$output
+    Res<TensorOf<[TF_Float16, TF_Float32, TF_Float64]>, [{A tensor with shape `shape + shape(alpha)`. Each slice
+`[:, ..., :, i0, i1, ...iN]` contains the samples drawn for
+`alpha[i0, i1, ...iN]`. The dtype of the output matches the dtype of alpha.}]>:$output
   );
 
   TF_DerivedOperandTypeAttr S = TF_DerivedOperandTypeAttr<0>;
@@ -9653,15 +10191,19 @@ Programming, Volume 2. Addison Wesley
   }];
 
   let arguments = (ins
-    TF_I32OrI64Tensor:$shape,
-    TensorOf<[TF_Float16, TF_Float32, TF_Float64, TF_Int32, TF_Int64]>:$rate,
+    Arg<TF_I32OrI64Tensor, [{1-D integer tensor. Shape of independent samples to draw from each
+distribution described by the shape parameters given in rate.}]>:$shape,
+    Arg<TensorOf<[TF_Float16, TF_Float32, TF_Float64, TF_Int32, TF_Int64]>, [{A tensor in which each scalar is a "rate" parameter describing the
+associated poisson distribution.}]>:$rate,
 
     DefaultValuedAttr<I64Attr, "0">:$seed,
     DefaultValuedAttr<I64Attr, "0">:$seed2
   );
 
   let results = (outs
-    TensorOf<[TF_Float16, TF_Float32, TF_Float64, TF_Int32, TF_Int64]>:$output
+    Res<TensorOf<[TF_Float16, TF_Float32, TF_Float64, TF_Int32, TF_Int64]>, [{A tensor with shape `shape + shape(rate)`. Each slice
+`[:, ..., :, i0, i1, ...iN]` contains the samples drawn for
+`rate[i0, i1, ...iN]`.}]>:$output
   );
 
   TF_DerivedOperandTypeAttr R = TF_DerivedOperandTypeAttr<1>;
@@ -9685,14 +10227,15 @@ The tensor is shuffled along dimension 0, such that each `value[j]` is mapped
   }];
 
   let arguments = (ins
-    TF_Tensor:$value,
+    Arg<TF_Tensor, [{The tensor to be shuffled.}]>:$value,
 
     DefaultValuedAttr<I64Attr, "0">:$seed,
     DefaultValuedAttr<I64Attr, "0">:$seed2
   );
 
   let results = (outs
-    TF_Tensor:$output
+    Res<TF_Tensor, [{A tensor of same shape and type as `value`, shuffled along its first
+dimension.}]>:$output
   );
 
   TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
@@ -9706,14 +10249,14 @@ The generated values will have mean 0 and standard deviation 1.
   }];
 
   let arguments = (ins
-    TF_I32OrI64Tensor:$shape,
+    Arg<TF_I32OrI64Tensor, [{The shape of the output tensor.}]>:$shape,
 
     DefaultValuedAttr<I64Attr, "0">:$seed,
     DefaultValuedAttr<I64Attr, "0">:$seed2
   );
 
   let results = (outs
-    TF_FloatTensor:$output
+    Res<TF_FloatTensor, [{A tensor of the specified shape filled with random normal values.}]>:$output
   );
 
   TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
@@ -9729,14 +10272,14 @@ lower bound 0 is included in the range, while the upper bound 1 is excluded.
   }];
 
   let arguments = (ins
-    TF_I32OrI64Tensor:$shape,
+    Arg<TF_I32OrI64Tensor, [{The shape of the output tensor.}]>:$shape,
 
     DefaultValuedAttr<I64Attr, "0">:$seed,
     DefaultValuedAttr<I64Attr, "0">:$seed2
   );
 
   let results = (outs
-    TF_FloatTensor:$output
+    Res<TF_FloatTensor, [{A tensor of the specified shape filled with uniform random values.}]>:$output
   );
 
   TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
@@ -9761,16 +10304,16 @@ smaller than the range of the output (either `2^32` or `2^64`).
   }];
 
   let arguments = (ins
-    TF_I32OrI64Tensor:$shape,
-    TF_I32OrI64Tensor:$minval,
-    TF_I32OrI64Tensor:$maxval,
+    Arg<TF_I32OrI64Tensor, [{The shape of the output tensor.}]>:$shape,
+    Arg<TF_I32OrI64Tensor, [{0-D.  Inclusive lower bound on the generated integers.}]>:$minval,
+    Arg<TF_I32OrI64Tensor, [{0-D.  Exclusive upper bound on the generated integers.}]>:$maxval,
 
     DefaultValuedAttr<I64Attr, "0">:$seed,
     DefaultValuedAttr<I64Attr, "0">:$seed2
   );
 
   let results = (outs
-    TF_I32OrI64Tensor:$output
+    Res<TF_I32OrI64Tensor, [{A tensor of the specified shape filled with uniform random integers.}]>:$output
   );
 
   TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
@@ -9795,13 +10338,13 @@ tf.range(start, limit, delta) ==> [3, 6, 9, 12, 15]
   }];
 
   let arguments = (ins
-    TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8]>:$start,
-    TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8]>:$limit,
-    TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8]>:$delta
+    Arg<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8]>, [{0-D (scalar). First entry in the sequence.}]>:$start,
+    Arg<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8]>, [{0-D (scalar). Upper limit of sequence, exclusive.}]>:$limit,
+    Arg<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8]>, [{0-D (scalar). Optional. Default is 1. Number that increments `start`.}]>:$delta
   );
 
   let results = (outs
-    TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8]>:$output
+    Res<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8]>, [{1-D.}]>:$output
   );
 
   TF_DerivedOperandTypeAttr Tidx = TF_DerivedOperandTypeAttr<0>;
@@ -9817,9 +10360,9 @@ Creates a dataset with a range of values. Corresponds to python's xrange.
   }];
 
   let arguments = (ins
-    TF_Int64Tensor:$start,
-    TF_Int64Tensor:$stop,
-    TF_Int64Tensor:$step,
+    Arg<TF_Int64Tensor, [{corresponds to start in python's xrange().}]>:$start,
+    Arg<TF_Int64Tensor, [{corresponds to stop in python's xrange().}]>:$stop,
+    Arg<TF_Int64Tensor, [{corresponds to step in python's xrange().}]>:$step,
 
     Confined<TypeArrayAttr, [ArrayMinCount<1>]>:$output_types,
     Confined<TF_ShapeAttrArray, [ArrayMinCount<1>]>:$output_shapes
@@ -9879,7 +10422,7 @@ operation.
   }];
 
   let arguments = (ins
-    Arg<TF_ResourceTensor, "", [TF_VariableRead]>:$resource
+    Arg<TF_ResourceTensor, [{handle to the resource in which to store the variable.}], [TF_VariableRead]>:$resource
   );
 
   let results = (outs
@@ -9970,7 +10513,7 @@ def TF_RecvOp : TF_Op<"Recv", []> {
   );
 
   let results = (outs
-    TF_Tensor:$tensor
+    Res<TF_Tensor, [{The tensor to receive.}]>:$tensor
   );
 
   TF_DerivedResultTypeAttr tensor_type = TF_DerivedResultTypeAttr<0>;
@@ -9993,7 +10536,8 @@ most one RecvTPUEmbeddingActivations op in the TPU graph.
   );
 
   let results = (outs
-    Variadic<TF_Float32Tensor>:$outputs
+    Res<Variadic<TF_Float32Tensor>, [{A TensorList of embedding activations containing one Tensor per
+embedding table in the model.}]>:$outputs
   );
 
   TF_DerivedResultSizeAttr num_outputs = TF_DerivedResultSizeAttr<0>;
@@ -10029,15 +10573,18 @@ tf.reduce_join(a) = tf.reduce_join(a, [1, 0]) ==> "abcd"
   }];
 
   let arguments = (ins
-    TF_StrTensor:$inputs,
-    TF_Int32Tensor:$reduction_indices,
+    Arg<TF_StrTensor, [{The input to be joined.  All reduced indices must have non-zero size.}]>:$inputs,
+    Arg<TF_Int32Tensor, [{The dimensions to reduce over.  Dimensions are reduced in the
+order specified.  Omitting `reduction_indices` is equivalent to passing
+`[n-1, n-2, ..., 0]`.  Negative indices from `-n` to `-1` are supported.}]>:$reduction_indices,
 
     DefaultValuedAttr<BoolAttr, "false">:$keep_dims,
     StrAttr:$separator
   );
 
   let results = (outs
-    TF_StrTensor:$output
+    Res<TF_StrTensor, [{Has shape equal to that of the input with reduced dimensions removed or
+set to `1` depending on `keep_dims`.}]>:$output
   );
 }
 
@@ -10085,12 +10632,14 @@ def TF_Relu6GradOp : TF_Op<"Relu6Grad", [NoSideEffect, TF_SameOperandsAndResultT
   let summary = "Computes rectified linear 6 gradients for a Relu6 operation.";
 
   let arguments = (ins
-    TF_IntOrFpTensor:$gradients,
-    TF_IntOrFpTensor:$features
+    Arg<TF_IntOrFpTensor, [{The backpropagated gradients to the corresponding Relu6 operation.}]>:$gradients,
+    Arg<TF_IntOrFpTensor, [{The features passed as input to the corresponding Relu6 operation, or
+its output; using either one produces the same result.}]>:$features
   );
 
   let results = (outs
-    TF_IntOrFpTensor:$backprops
+    Res<TF_IntOrFpTensor, [{The gradients:
+`gradients * (features > 0) * (features < 6)`.}]>:$backprops
   );
 
   TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
@@ -10100,12 +10649,13 @@ def TF_ReluGradOp : TF_Op<"ReluGrad", [NoSideEffect, TF_SameOperandsAndResultTyp
   let summary = "Computes rectified linear gradients for a Relu operation.";
 
   let arguments = (ins
-    TF_IntOrFpTensor:$gradients,
-    TF_IntOrFpTensor:$features
+    Arg<TF_IntOrFpTensor, [{The backpropagated gradients to the corresponding Relu operation.}]>:$gradients,
+    Arg<TF_IntOrFpTensor, [{The features passed as input to the corresponding Relu operation, OR
+the outputs of that operation (both work equivalently).}]>:$features
   );
 
   let results = (outs
-    TF_IntOrFpTensor:$backprops
+    Res<TF_IntOrFpTensor, [{`gradients * (features > 0)`.}]>:$backprops
   );
 
   TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
@@ -10115,14 +10665,14 @@ def TF_RemoteCallOp : TF_Op<"RemoteCall", []> {
   let summary = "Runs function `f` on a remote device indicated by `target`.";
 
   let arguments = (ins
-    TF_StrTensor:$target,
-    Variadic<TF_Tensor>:$args,
+    Arg<TF_StrTensor, [{A fully specified device name where we want to run the function.}]>:$target,
+    Arg<Variadic<TF_Tensor>, [{A list of arguments for the function.}]>:$args,
 
     SymbolRefAttr:$f
   );
 
   let results = (outs
-    Variadic<TF_Tensor>:$output
+    Res<Variadic<TF_Tensor>, [{A list of return values.}]>:$output
   );
 
   TF_DerivedOperandTypeListAttr Tin = TF_DerivedOperandTypeListAttr<1>;
@@ -10196,7 +10746,7 @@ reshape(t, []) ==> 7
 
   let arguments = (ins
     TF_Tensor:$tensor,
-    TF_I32OrI64Tensor:$shape
+    Arg<TF_I32OrI64Tensor, [{Defines the shape of the output tensor.}]>:$shape
   );
 
   let results = (outs
@@ -10226,15 +10776,17 @@ Input images can be of different types but output images are always float.
   }];
 
   let arguments = (ins
-    TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Uint16, TF_Uint8]>:$images,
-    TF_Int32Tensor:$size,
+    Arg<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Uint16, TF_Uint8]>, [{4-D with shape `[batch, height, width, channels]`.}]>:$images,
+    Arg<TF_Int32Tensor, [{= A 1-D int32 Tensor of 2 elements: `new_height, new_width`.  The
+new size for the images.}]>:$size,
 
     DefaultValuedAttr<BoolAttr, "false">:$align_corners,
     DefaultValuedAttr<BoolAttr, "false">:$half_pixel_centers
   );
 
   let results = (outs
-    TF_Float32Tensor:$resized_images
+    Res<TF_Float32Tensor, [{4-D with shape
+`[batch, new_height, new_width, channels]`.}]>:$resized_images
   );
 
   TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
@@ -10244,15 +10796,18 @@ def TF_ResizeBilinearGradOp : TF_Op<"ResizeBilinearGrad", [NoSideEffect]> {
   let summary = "Computes the gradient of bilinear interpolation.";
 
   let arguments = (ins
-    TF_Float32Tensor:$grads,
-    TF_FloatTensor:$original_image,
+    Arg<TF_Float32Tensor, [{4-D with shape `[batch, height, width, channels]`.}]>:$grads,
+    Arg<TF_FloatTensor, [{4-D with shape `[batch, orig_height, orig_width, channels]`,
+The image tensor that was resized.}]>:$original_image,
 
     DefaultValuedAttr<BoolAttr, "false">:$align_corners,
     DefaultValuedAttr<BoolAttr, "false">:$half_pixel_centers
   );
 
   let results = (outs
-    TF_FloatTensor:$output
+    Res<TF_FloatTensor, [{4-D with shape `[batch, orig_height, orig_width, channels]`.
+Gradients with respect to the input image. Input image must have been
+float or double.}]>:$output
   );
 
   TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<1>;
@@ -10264,15 +10819,17 @@ Resize `images` to `size` using nearest neighbor interpolation.
   }];
 
   let arguments = (ins
-    TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Uint16, TF_Uint8]>:$images,
-    TF_Int32Tensor:$size,
+    Arg<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Uint16, TF_Uint8]>, [{4-D with shape `[batch, height, width, channels]`.}]>:$images,
+    Arg<TF_Int32Tensor, [{= A 1-D int32 Tensor of 2 elements: `new_height, new_width`.  The
+new size for the images.}]>:$size,
 
     DefaultValuedAttr<BoolAttr, "false">:$align_corners,
     DefaultValuedAttr<BoolAttr, "false">:$half_pixel_centers
   );
 
   let results = (outs
-    TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Uint16, TF_Uint8]>:$resized_images
+    Res<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Uint16, TF_Uint8]>, [{4-D with shape
+`[batch, new_height, new_width, channels]`.}]>:$resized_images
   );
 
   TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
@@ -10282,15 +10839,17 @@ def TF_ResizeNearestNeighborGradOp : TF_Op<"ResizeNearestNeighborGrad", [NoSideE
   let summary = "Computes the gradient of nearest neighbor interpolation.";
 
   let arguments = (ins
-    TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32, TF_Float64, TF_Int32, TF_Int8, TF_Uint8]>:$grads,
-    TF_Int32Tensor:$size,
+    Arg<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32, TF_Float64, TF_Int32, TF_Int8, TF_Uint8]>, [{4-D with shape `[batch, height, width, channels]`.}]>:$grads,
+    Arg<TF_Int32Tensor, [{= A 1-D int32 Tensor of 2 elements: `orig_height, orig_width`. The
+original input size.}]>:$size,
 
     DefaultValuedAttr<BoolAttr, "false">:$align_corners,
     DefaultValuedAttr<BoolAttr, "false">:$half_pixel_centers
   );
 
   let results = (outs
-    TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32, TF_Float64, TF_Int32, TF_Int8, TF_Uint8]>:$output
+    Res<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32, TF_Float64, TF_Int32, TF_Int8, TF_Uint8]>, [{4-D with shape `[batch, orig_height, orig_width, channels]`. Gradients
+with respect to the input image.}]>:$output
   );
 
   TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
@@ -10306,15 +10865,15 @@ variable <- variable - learning_rate / (1 - beta1^t) * m_t / (v_t + epsilon)
   }];
 
   let arguments = (ins
-    Arg<TF_ResourceTensor, "", [TF_VariableRead, TF_VariableWrite]>:$var,
-    Arg<TF_ResourceTensor, "", [TF_VariableRead, TF_VariableWrite]>:$m,
-    Arg<TF_ResourceTensor, "", [TF_VariableRead, TF_VariableWrite]>:$v,
-    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$beta1_power,
-    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$lr,
-    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$beta1,
-    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$beta2,
-    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$epsilon,
-    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$grad,
+    Arg<TF_ResourceTensor, [{Should be from a Variable().}], [TF_VariableRead, TF_VariableWrite]>:$var,
+    Arg<TF_ResourceTensor, [{Should be from a Variable().}], [TF_VariableRead, TF_VariableWrite]>:$m,
+    Arg<TF_ResourceTensor, [{Should be from a Variable().}], [TF_VariableRead, TF_VariableWrite]>:$v,
+    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Must be a scalar.}]>:$beta1_power,
+    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Scaling factor. Must be a scalar.}]>:$lr,
+    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Momentum factor. Must be a scalar.}]>:$beta1,
+    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Momentum factor. Must be a scalar.}]>:$beta2,
+    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Ridge term. Must be a scalar.}]>:$epsilon,
+    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{The gradient.}]>:$grad,
 
     DefaultValuedAttr<BoolAttr, "false">:$use_locking
   );
@@ -10335,13 +10894,13 @@ var -= update;
   }];
 
   let arguments = (ins
-    Arg<TF_ResourceTensor, "", [TF_VariableRead, TF_VariableWrite]>:$var,
-    Arg<TF_ResourceTensor, "", [TF_VariableRead, TF_VariableWrite]>:$accum,
-    Arg<TF_ResourceTensor, "", [TF_VariableRead, TF_VariableWrite]>:$accum_update,
-    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$lr,
-    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$rho,
-    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$epsilon,
-    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$grad,
+    Arg<TF_ResourceTensor, [{Should be from a Variable().}], [TF_VariableRead, TF_VariableWrite]>:$var,
+    Arg<TF_ResourceTensor, [{Should be from a Variable().}], [TF_VariableRead, TF_VariableWrite]>:$accum,
+    Arg<TF_ResourceTensor, [{Should be from a Variable().}], [TF_VariableRead, TF_VariableWrite]>:$accum_update,
+    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Scaling factor. Must be a scalar.}]>:$lr,
+    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Decay factor. Must be a scalar.}]>:$rho,
+    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Constant factor. Must be a scalar.}]>:$epsilon,
+    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{The gradient.}]>:$grad,
 
     DefaultValuedAttr<BoolAttr, "false">:$use_locking
   );
@@ -10360,10 +10919,10 @@ var -= lr * grad * (1 / sqrt(accum))
   }];
 
   let arguments = (ins
-    Arg<TF_ResourceTensor, "", [TF_VariableRead, TF_VariableWrite]>:$var,
-    Arg<TF_ResourceTensor, "", [TF_VariableRead, TF_VariableWrite]>:$accum,
-    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$lr,
-    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$grad,
+    Arg<TF_ResourceTensor, [{Should be from a Variable().}], [TF_VariableRead, TF_VariableWrite]>:$var,
+    Arg<TF_ResourceTensor, [{Should be from a Variable().}], [TF_VariableRead, TF_VariableWrite]>:$accum,
+    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Scaling factor. Must be a scalar.}]>:$lr,
+    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{The gradient.}]>:$grad,
 
     DefaultValuedAttr<BoolAttr, "false">:$use_locking,
     DefaultValuedAttr<BoolAttr, "true">:$update_slots
@@ -10378,14 +10937,14 @@ def TF_ResourceApplyAdagradDAOp : TF_Op<"ResourceApplyAdagradDA", []> {
   let summary = "Update '*var' according to the proximal adagrad scheme.";
 
   let arguments = (ins
-    Arg<TF_ResourceTensor, "", [TF_VariableRead, TF_VariableWrite]>:$var,
-    Arg<TF_ResourceTensor, "", [TF_VariableRead, TF_VariableWrite]>:$gradient_accumulator,
-    Arg<TF_ResourceTensor, "", [TF_VariableRead, TF_VariableWrite]>:$gradient_squared_accumulator,
-    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$grad,
-    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$lr,
-    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$l1,
-    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$l2,
-    TF_Int64Tensor:$global_step,
+    Arg<TF_ResourceTensor, [{Should be from a Variable().}], [TF_VariableRead, TF_VariableWrite]>:$var,
+    Arg<TF_ResourceTensor, [{Should be from a Variable().}], [TF_VariableRead, TF_VariableWrite]>:$gradient_accumulator,
+    Arg<TF_ResourceTensor, [{Should be from a Variable().}], [TF_VariableRead, TF_VariableWrite]>:$gradient_squared_accumulator,
+    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{The gradient.}]>:$grad,
+    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Scaling factor. Must be a scalar.}]>:$lr,
+    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{L1 regularization. Must be a scalar.}]>:$l1,
+    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{L2 regularization. Must be a scalar.}]>:$l2,
+    Arg<TF_Int64Tensor, [{Training step number. Must be a scalar.}]>:$global_step,
 
     DefaultValuedAttr<BoolAttr, "false">:$use_locking
   );
@@ -10404,11 +10963,11 @@ var -= lr * grad * (1 / (sqrt(accum) + epsilon))
   }];
 
   let arguments = (ins
-    Arg<TF_ResourceTensor, "", [TF_VariableRead, TF_VariableWrite]>:$var,
-    Arg<TF_ResourceTensor, "", [TF_VariableRead, TF_VariableWrite]>:$accum,
-    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$lr,
-    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$epsilon,
-    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$grad,
+    Arg<TF_ResourceTensor, [{Should be from a Variable().}], [TF_VariableRead, TF_VariableWrite]>:$var,
+    Arg<TF_ResourceTensor, [{Should be from a Variable().}], [TF_VariableRead, TF_VariableWrite]>:$accum,
+    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Scaling factor. Must be a scalar.}]>:$lr,
+    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Constant factor. Must be a scalar.}]>:$epsilon,
+    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{The gradient.}]>:$grad,
 
     DefaultValuedAttr<BoolAttr, "false">:$use_locking,
     DefaultValuedAttr<BoolAttr, "true">:$update_slots
@@ -10430,16 +10989,16 @@ $$\text{variable} := \text{variable} - \text{lr}_t * m_t / (\sqrt{v_t} + \epsilo
   }];
 
   let arguments = (ins
-    Arg<TF_ResourceTensor, "", [TF_VariableRead, TF_VariableWrite]>:$var,
-    Arg<TF_ResourceTensor, "", [TF_VariableRead, TF_VariableWrite]>:$m,
-    Arg<TF_ResourceTensor, "", [TF_VariableRead, TF_VariableWrite]>:$v,
-    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$beta1_power,
-    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$beta2_power,
-    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$lr,
-    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$beta1,
-    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$beta2,
-    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$epsilon,
-    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$grad,
+    Arg<TF_ResourceTensor, [{Should be from a Variable().}], [TF_VariableRead, TF_VariableWrite]>:$var,
+    Arg<TF_ResourceTensor, [{Should be from a Variable().}], [TF_VariableRead, TF_VariableWrite]>:$m,
+    Arg<TF_ResourceTensor, [{Should be from a Variable().}], [TF_VariableRead, TF_VariableWrite]>:$v,
+    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Must be a scalar.}]>:$beta1_power,
+    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Must be a scalar.}]>:$beta2_power,
+    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Scaling factor. Must be a scalar.}]>:$lr,
+    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Momentum factor. Must be a scalar.}]>:$beta1,
+    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Momentum factor. Must be a scalar.}]>:$beta2,
+    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Ridge term. Must be a scalar.}]>:$epsilon,
+    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{The gradient.}]>:$grad,
 
     DefaultValuedAttr<BoolAttr, "false">:$use_locking,
     DefaultValuedAttr<BoolAttr, "false">:$use_nesterov
@@ -10460,13 +11019,13 @@ variable <- variable - lr_t * update
   }];
 
   let arguments = (ins
-    Arg<TF_ResourceTensor, "", [TF_VariableRead, TF_VariableWrite]>:$var,
-    Arg<TF_ResourceTensor, "", [TF_VariableRead, TF_VariableWrite]>:$m,
-    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$lr,
-    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$alpha,
-    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$sign_decay,
-    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$beta,
-    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$grad,
+    Arg<TF_ResourceTensor, [{Should be from a Variable().}], [TF_VariableRead, TF_VariableWrite]>:$var,
+    Arg<TF_ResourceTensor, [{Should be from a Variable().}], [TF_VariableRead, TF_VariableWrite]>:$m,
+    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Scaling factor. Must be a scalar.}]>:$lr,
+    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Must be a scalar.}]>:$alpha,
+    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Must be a scalar.}]>:$sign_decay,
+    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Must be a scalar.}]>:$beta,
+    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{The gradient.}]>:$grad,
 
     DefaultValuedAttr<BoolAttr, "false">:$use_locking
   );
@@ -10501,15 +11060,15 @@ var <- var - mom
   }];
 
   let arguments = (ins
-    Arg<TF_ResourceTensor, "", [TF_VariableRead, TF_VariableWrite]>:$var,
-    Arg<TF_ResourceTensor, "", [TF_VariableRead, TF_VariableWrite]>:$mg,
-    Arg<TF_ResourceTensor, "", [TF_VariableRead, TF_VariableWrite]>:$ms,
-    Arg<TF_ResourceTensor, "", [TF_VariableRead, TF_VariableWrite]>:$mom,
-    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$lr,
-    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$rho,
-    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$momentum,
-    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$epsilon,
-    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$grad,
+    Arg<TF_ResourceTensor, [{Should be from a Variable().}], [TF_VariableRead, TF_VariableWrite]>:$var,
+    Arg<TF_ResourceTensor, [{Should be from a Variable().}], [TF_VariableRead, TF_VariableWrite]>:$mg,
+    Arg<TF_ResourceTensor, [{Should be from a Variable().}], [TF_VariableRead, TF_VariableWrite]>:$ms,
+    Arg<TF_ResourceTensor, [{Should be from a Variable().}], [TF_VariableRead, TF_VariableWrite]>:$mom,
+    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Scaling factor. Must be a scalar.}]>:$lr,
+    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Decay rate. Must be a scalar.}]>:$rho,
+    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Momentum Scale. Must be a scalar.}]>:$momentum,
+    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Ridge term. Must be a scalar.}]>:$epsilon,
+    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{The gradient.}]>:$grad,
 
     DefaultValuedAttr<BoolAttr, "false">:$use_locking
   );
@@ -10531,14 +11090,14 @@ accum = accum_new
   }];
 
   let arguments = (ins
-    Arg<TF_ResourceTensor, "", [TF_VariableRead, TF_VariableWrite]>:$var,
-    Arg<TF_ResourceTensor, "", [TF_VariableRead, TF_VariableWrite]>:$accum,
-    Arg<TF_ResourceTensor, "", [TF_VariableRead, TF_VariableWrite]>:$linear,
-    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$grad,
-    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$lr,
-    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$l1,
-    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$l2,
-    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$lr_power,
+    Arg<TF_ResourceTensor, [{Should be from a Variable().}], [TF_VariableRead, TF_VariableWrite]>:$var,
+    Arg<TF_ResourceTensor, [{Should be from a Variable().}], [TF_VariableRead, TF_VariableWrite]>:$accum,
+    Arg<TF_ResourceTensor, [{Should be from a Variable().}], [TF_VariableRead, TF_VariableWrite]>:$linear,
+    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{The gradient.}]>:$grad,
+    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Scaling factor. Must be a scalar.}]>:$lr,
+    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{L1 regularization. Must be a scalar.}]>:$l1,
+    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{L2 regularization. Must be a scalar.}]>:$l2,
+    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Scaling factor. Must be a scalar.}]>:$lr_power,
 
     DefaultValuedAttr<BoolAttr, "false">:$use_locking,
     DefaultValuedAttr<BoolAttr, "false">:$multiply_linear_by_lr
@@ -10563,15 +11122,15 @@ accum = accum_new
   }];
 
   let arguments = (ins
-    Arg<TF_ResourceTensor, "", [TF_VariableRead, TF_VariableWrite]>:$var,
-    Arg<TF_ResourceTensor, "", [TF_VariableRead, TF_VariableWrite]>:$accum,
-    Arg<TF_ResourceTensor, "", [TF_VariableRead, TF_VariableWrite]>:$linear,
-    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$grad,
-    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$lr,
-    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$l1,
-    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$l2,
+    Arg<TF_ResourceTensor, [{Should be from a Variable().}], [TF_VariableRead, TF_VariableWrite]>:$var,
+    Arg<TF_ResourceTensor, [{Should be from a Variable().}], [TF_VariableRead, TF_VariableWrite]>:$accum,
+    Arg<TF_ResourceTensor, [{Should be from a Variable().}], [TF_VariableRead, TF_VariableWrite]>:$linear,
+    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{The gradient.}]>:$grad,
+    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Scaling factor. Must be a scalar.}]>:$lr,
+    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{L1 regularization. Must be a scalar.}]>:$l1,
+    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{L2 shrinkage regularization. Must be a scalar.}]>:$l2,
     TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$l2_shrinkage,
-    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$lr_power,
+    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Scaling factor. Must be a scalar.}]>:$lr_power,
 
     DefaultValuedAttr<BoolAttr, "false">:$use_locking,
     DefaultValuedAttr<BoolAttr, "false">:$multiply_linear_by_lr
@@ -10586,9 +11145,9 @@ def TF_ResourceApplyGradientDescentOp : TF_Op<"ResourceApplyGradientDescent", []
   let summary = "Update '*var' by subtracting 'alpha' * 'delta' from it.";
 
   let arguments = (ins
-    Arg<TF_ResourceTensor, "", [TF_VariableRead, TF_VariableWrite]>:$var,
-    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$alpha,
-    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$delta,
+    Arg<TF_ResourceTensor, [{Should be from a Variable().}], [TF_VariableRead, TF_VariableWrite]>:$var,
+    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Scaling factor. Must be a scalar.}]>:$alpha,
+    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{The change.}]>:$delta,
 
     DefaultValuedAttr<BoolAttr, "false">:$use_locking
   );
@@ -10609,11 +11168,11 @@ var += accum
   }];
 
   let arguments = (ins
-    Arg<TF_ResourceTensor, "", [TF_VariableRead, TF_VariableWrite]>:$var,
-    Arg<TF_ResourceTensor, "", [TF_VariableRead, TF_VariableWrite]>:$accum,
-    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$lr,
-    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$grad,
-    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$momentum,
+    Arg<TF_ResourceTensor, [{Should be from a Variable().}], [TF_VariableRead, TF_VariableWrite]>:$var,
+    Arg<TF_ResourceTensor, [{Should be from a Variable().}], [TF_VariableRead, TF_VariableWrite]>:$accum,
+    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Scaling factor. Must be a scalar.}]>:$lr,
+    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{The gradient.}]>:$grad,
+    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Momentum. Must be a scalar.}]>:$momentum,
 
     DefaultValuedAttr<BoolAttr, "false">:$use_locking,
     DefaultValuedAttr<BoolAttr, "false">:$use_nesterov
@@ -10635,11 +11194,11 @@ var -= lr * accum
   }];
 
   let arguments = (ins
-    Arg<TF_ResourceTensor, "", [TF_VariableRead, TF_VariableWrite]>:$var,
-    Arg<TF_ResourceTensor, "", [TF_VariableRead, TF_VariableWrite]>:$accum,
-    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$lr,
-    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$grad,
-    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$momentum,
+    Arg<TF_ResourceTensor, [{Should be from a Variable().}], [TF_VariableRead, TF_VariableWrite]>:$var,
+    Arg<TF_ResourceTensor, [{Should be from a Variable().}], [TF_VariableRead, TF_VariableWrite]>:$accum,
+    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Scaling factor. Must be a scalar.}]>:$lr,
+    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{The gradient.}]>:$grad,
+    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Momentum. Must be a scalar.}]>:$momentum,
 
     DefaultValuedAttr<BoolAttr, "false">:$use_locking,
     DefaultValuedAttr<BoolAttr, "false">:$use_nesterov
@@ -10660,13 +11219,13 @@ variable <- variable - lr_t * update
   }];
 
   let arguments = (ins
-    Arg<TF_ResourceTensor, "", [TF_VariableRead, TF_VariableWrite]>:$var,
-    Arg<TF_ResourceTensor, "", [TF_VariableRead, TF_VariableWrite]>:$m,
-    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$lr,
-    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$logbase,
-    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$sign_decay,
-    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$beta,
-    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$grad,
+    Arg<TF_ResourceTensor, [{Should be from a Variable().}], [TF_VariableRead, TF_VariableWrite]>:$var,
+    Arg<TF_ResourceTensor, [{Should be from a Variable().}], [TF_VariableRead, TF_VariableWrite]>:$m,
+    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Scaling factor. Must be a scalar.}]>:$lr,
+    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Must be a scalar.}]>:$logbase,
+    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Must be a scalar.}]>:$sign_decay,
+    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Must be a scalar.}]>:$beta,
+    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{The gradient.}]>:$grad,
 
     DefaultValuedAttr<BoolAttr, "false">:$use_locking
   );
@@ -10688,12 +11247,12 @@ var = sign(prox_v)/(1+lr*l2) * max{|prox_v|-lr*l1,0}
   }];
 
   let arguments = (ins
-    Arg<TF_ResourceTensor, "", [TF_VariableRead, TF_VariableWrite]>:$var,
-    Arg<TF_ResourceTensor, "", [TF_VariableRead, TF_VariableWrite]>:$accum,
-    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$lr,
-    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$l1,
-    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$l2,
-    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$grad,
+    Arg<TF_ResourceTensor, [{Should be from a Variable().}], [TF_VariableRead, TF_VariableWrite]>:$var,
+    Arg<TF_ResourceTensor, [{Should be from a Variable().}], [TF_VariableRead, TF_VariableWrite]>:$accum,
+    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Scaling factor. Must be a scalar.}]>:$lr,
+    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{L1 regularization. Must be a scalar.}]>:$l1,
+    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{L2 regularization. Must be a scalar.}]>:$l2,
+    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{The gradient.}]>:$grad,
 
     DefaultValuedAttr<BoolAttr, "false">:$use_locking
   );
@@ -10712,11 +11271,11 @@ var = sign(prox_v)/(1+alpha*l2) * max{|prox_v|-alpha*l1,0}
   }];
 
   let arguments = (ins
-    Arg<TF_ResourceTensor, "", [TF_VariableRead, TF_VariableWrite]>:$var,
-    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$alpha,
-    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$l1,
-    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$l2,
-    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$delta,
+    Arg<TF_ResourceTensor, [{Should be from a Variable().}], [TF_VariableRead, TF_VariableWrite]>:$var,
+    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Scaling factor. Must be a scalar.}]>:$alpha,
+    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{L1 regularization. Must be a scalar.}]>:$l1,
+    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{L2 regularization. Must be a scalar.}]>:$l2,
+    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{The change.}]>:$delta,
 
     DefaultValuedAttr<BoolAttr, "false">:$use_locking
   );
@@ -10743,14 +11302,14 @@ var <- var - mom
   }];
 
   let arguments = (ins
-    Arg<TF_ResourceTensor, "", [TF_VariableRead, TF_VariableWrite]>:$var,
-    Arg<TF_ResourceTensor, "", [TF_VariableRead, TF_VariableWrite]>:$ms,
-    Arg<TF_ResourceTensor, "", [TF_VariableRead, TF_VariableWrite]>:$mom,
-    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$lr,
-    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$rho,
+    Arg<TF_ResourceTensor, [{Should be from a Variable().}], [TF_VariableRead, TF_VariableWrite]>:$var,
+    Arg<TF_ResourceTensor, [{Should be from a Variable().}], [TF_VariableRead, TF_VariableWrite]>:$ms,
+    Arg<TF_ResourceTensor, [{Should be from a Variable().}], [TF_VariableRead, TF_VariableWrite]>:$mom,
+    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Scaling factor. Must be a scalar.}]>:$lr,
+    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Decay rate. Must be a scalar.}]>:$rho,
     TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$momentum,
-    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$epsilon,
-    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$grad,
+    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Ridge term. Must be a scalar.}]>:$epsilon,
+    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{The gradient.}]>:$grad,
 
     DefaultValuedAttr<BoolAttr, "false">:$use_locking
   );
@@ -10823,9 +11382,9 @@ Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`
   }];
 
   let arguments = (ins
-    Arg<TF_ResourceTensor, "", [TF_VariableRead, TF_VariableWrite]>:$resource,
-    TF_I32OrI64Tensor:$indices,
-    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$updates
+    Arg<TF_ResourceTensor, [{Should be from a `Variable` node.}], [TF_VariableRead, TF_VariableWrite]>:$resource,
+    Arg<TF_I32OrI64Tensor, [{A tensor of indices into the first dimension of `ref`.}]>:$indices,
+    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{A tensor of updated values to add to `ref`.}]>:$updates
   );
 
   let results = (outs);
@@ -10862,9 +11421,9 @@ Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`
   }];
 
   let arguments = (ins
-    Arg<TF_ResourceTensor, "", [TF_VariableRead, TF_VariableWrite]>:$resource,
-    TF_I32OrI64Tensor:$indices,
-    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$updates
+    Arg<TF_ResourceTensor, [{Should be from a `Variable` node.}], [TF_VariableRead, TF_VariableWrite]>:$resource,
+    Arg<TF_I32OrI64Tensor, [{A tensor of indices into the first dimension of `ref`.}]>:$indices,
+    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{A tensor of updated values to add to `ref`.}]>:$updates
   );
 
   let results = (outs);
@@ -10901,9 +11460,9 @@ Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`
   }];
 
   let arguments = (ins
-    Arg<TF_ResourceTensor, "", [TF_VariableRead, TF_VariableWrite]>:$resource,
-    TF_I32OrI64Tensor:$indices,
-    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$updates
+    Arg<TF_ResourceTensor, [{Should be from a `Variable` node.}], [TF_VariableRead, TF_VariableWrite]>:$resource,
+    Arg<TF_I32OrI64Tensor, [{A tensor of indices into the first dimension of `ref`.}]>:$indices,
+    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{A tensor of updated values to add to `ref`.}]>:$updates
   );
 
   let results = (outs);
@@ -10940,9 +11499,9 @@ Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`
   }];
 
   let arguments = (ins
-    Arg<TF_ResourceTensor, "", [TF_VariableRead, TF_VariableWrite]>:$resource,
-    TF_I32OrI64Tensor:$indices,
-    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$updates
+    Arg<TF_ResourceTensor, [{Should be from a `Variable` node.}], [TF_VariableRead, TF_VariableWrite]>:$resource,
+    Arg<TF_I32OrI64Tensor, [{A tensor of indices into the first dimension of `ref`.}]>:$indices,
+    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{A tensor of updated values to add to `ref`.}]>:$updates
   );
 
   let results = (outs);
@@ -10979,9 +11538,9 @@ Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`
   }];
 
   let arguments = (ins
-    Arg<TF_ResourceTensor, "", [TF_VariableRead, TF_VariableWrite]>:$resource,
-    TF_I32OrI64Tensor:$indices,
-    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$updates
+    Arg<TF_ResourceTensor, [{Should be from a `Variable` node.}], [TF_VariableRead, TF_VariableWrite]>:$resource,
+    Arg<TF_I32OrI64Tensor, [{A tensor of indices into the first dimension of `ref`.}]>:$indices,
+    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{A tensor of updated values to add to `ref`.}]>:$updates
   );
 
   let results = (outs);
@@ -11032,9 +11591,11 @@ slices.
   }];
 
   let arguments = (ins
-    Arg<TF_ResourceTensor, "", [TF_VariableRead, TF_VariableWrite]>:$ref,
-    TF_I32OrI64Tensor:$indices,
-    TF_Tensor:$updates,
+    Arg<TF_ResourceTensor, [{A resource handle. Must be from a VarHandleOp.}], [TF_VariableRead, TF_VariableWrite]>:$ref,
+    Arg<TF_I32OrI64Tensor, [{A Tensor. Must be one of the following types: int32, int64.
+A tensor of indices into ref.}]>:$indices,
+    Arg<TF_Tensor, [{A Tensor. Must have the same type as ref. A tensor of
+values to add to ref.}]>:$updates,
 
     DefaultValuedAttr<BoolAttr, "true">:$use_locking
   );
@@ -11087,9 +11648,11 @@ slices.
   }];
 
   let arguments = (ins
-    Arg<TF_ResourceTensor, "", [TF_VariableRead, TF_VariableWrite]>:$ref,
-    TF_I32OrI64Tensor:$indices,
-    TF_Tensor:$updates,
+    Arg<TF_ResourceTensor, [{A resource handle. Must be from a VarHandleOp.}], [TF_VariableRead, TF_VariableWrite]>:$ref,
+    Arg<TF_I32OrI64Tensor, [{A Tensor. Must be one of the following types: int32, int64.
+A tensor of indices into ref.}]>:$indices,
+    Arg<TF_Tensor, [{A Tensor. Must have the same type as ref. A tensor of
+values to add to ref.}]>:$updates,
 
     DefaultValuedAttr<BoolAttr, "true">:$use_locking
   );
@@ -11144,9 +11707,11 @@ slices.
   }];
 
   let arguments = (ins
-    Arg<TF_ResourceTensor, "", [TF_VariableRead, TF_VariableWrite]>:$ref,
-    TF_I32OrI64Tensor:$indices,
-    TF_Tensor:$updates,
+    Arg<TF_ResourceTensor, [{A resource handle. Must be from a VarHandleOp.}], [TF_VariableRead, TF_VariableWrite]>:$ref,
+    Arg<TF_I32OrI64Tensor, [{A Tensor. Must be one of the following types: int32, int64.
+A tensor of indices into ref.}]>:$indices,
+    Arg<TF_Tensor, [{A Tensor. Must have the same type as ref. A tensor of updated
+values to add to ref.}]>:$updates,
 
     DefaultValuedAttr<BoolAttr, "true">:$use_locking
   );
@@ -11185,9 +11750,9 @@ Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`
   }];
 
   let arguments = (ins
-    Arg<TF_ResourceTensor, "", [TF_VariableRead, TF_VariableWrite]>:$resource,
-    TF_I32OrI64Tensor:$indices,
-    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$updates
+    Arg<TF_ResourceTensor, [{Should be from a `Variable` node.}], [TF_VariableRead, TF_VariableWrite]>:$resource,
+    Arg<TF_I32OrI64Tensor, [{A tensor of indices into the first dimension of `ref`.}]>:$indices,
+    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{A tensor of updated values to add to `ref`.}]>:$updates
   );
 
   let results = (outs);
@@ -11215,9 +11780,9 @@ This operation computes
   }];
 
   let arguments = (ins
-    Arg<TF_ResourceTensor, "", [TF_VariableRead, TF_VariableWrite]>:$resource,
-    TF_I32OrI64Tensor:$indices,
-    TF_Tensor:$updates
+    Arg<TF_ResourceTensor, [{Should be from a `Variable` node.}], [TF_VariableRead, TF_VariableWrite]>:$resource,
+    Arg<TF_I32OrI64Tensor, [{A tensor of indices into the first dimension of `ref`.}]>:$indices,
+    Arg<TF_Tensor, [{A tensor of updated values to add to `ref`.}]>:$updates
   );
 
   let results = (outs);
@@ -11278,13 +11843,15 @@ Callers must ensure all the named tensors are indeed stored in the checkpoint.
   }];
 
   let arguments = (ins
-    TF_StrTensor:$prefix,
-    TF_StrTensor:$tensor_names,
-    TF_StrTensor:$shape_and_slices
+    Arg<TF_StrTensor, [{Must have a single element.  The prefix of a V2 checkpoint.}]>:$prefix,
+    Arg<TF_StrTensor, [{shape {N}.  The names of the tensors to be restored.}]>:$tensor_names,
+    Arg<TF_StrTensor, [{shape {N}.  The slice specs of the tensors to be restored.
+Empty strings indicate that they are non-partitioned tensors.}]>:$shape_and_slices
   );
 
   let results = (outs
-    Variadic<TF_Tensor>:$tensors
+    Res<Variadic<TF_Tensor>, [{shape {N}.  The restored tensors, whose shapes are read from the
+checkpoint directly.}]>:$tensors
   );
 
   TF_DerivedResultTypeListAttr dtypes = TF_DerivedResultTypeListAttr<0>;
@@ -11309,9 +11876,9 @@ used to retrieve updated parameters before saving a checkpoint.
   );
 
   let results = (outs
-    TF_Float32Tensor:$parameters,
-    TF_Float32Tensor:$momenta,
-    TF_Float32Tensor:$velocities
+    Res<TF_Float32Tensor, [{Parameter parameters updated by the ADAM optimization algorithm.}]>:$parameters,
+    Res<TF_Float32Tensor, [{Parameter momenta updated by the ADAM optimization algorithm.}]>:$momenta,
+    Res<TF_Float32Tensor, [{Parameter velocities updated by the ADAM optimization algorithm.}]>:$velocities
   );
 }
 
@@ -11334,10 +11901,10 @@ used to retrieve updated parameters before saving a checkpoint.
   );
 
   let results = (outs
-    TF_Float32Tensor:$parameters,
-    TF_Float32Tensor:$momenta,
-    TF_Float32Tensor:$velocities,
-    TF_Float32Tensor:$gradient_accumulators
+    Res<TF_Float32Tensor, [{Parameter parameters updated by the ADAM optimization algorithm.}]>:$parameters,
+    Res<TF_Float32Tensor, [{Parameter momenta updated by the ADAM optimization algorithm.}]>:$momenta,
+    Res<TF_Float32Tensor, [{Parameter velocities updated by the ADAM optimization algorithm.}]>:$velocities,
+    Res<TF_Float32Tensor, [{Parameter gradient_accumulators updated by the ADAM optimization algorithm.}]>:$gradient_accumulators
   );
 }
 
@@ -11360,9 +11927,9 @@ used to retrieve updated parameters before saving a checkpoint.
   );
 
   let results = (outs
-    TF_Float32Tensor:$parameters,
-    TF_Float32Tensor:$accumulators,
-    TF_Float32Tensor:$updates
+    Res<TF_Float32Tensor, [{Parameter parameters updated by the Adadelta optimization algorithm.}]>:$parameters,
+    Res<TF_Float32Tensor, [{Parameter accumulators updated by the Adadelta optimization algorithm.}]>:$accumulators,
+    Res<TF_Float32Tensor, [{Parameter updates updated by the Adadelta optimization algorithm.}]>:$updates
   );
 }
 
@@ -11385,10 +11952,10 @@ used to retrieve updated parameters before saving a checkpoint.
   );
 
   let results = (outs
-    TF_Float32Tensor:$parameters,
-    TF_Float32Tensor:$accumulators,
-    TF_Float32Tensor:$updates,
-    TF_Float32Tensor:$gradient_accumulators
+    Res<TF_Float32Tensor, [{Parameter parameters updated by the Adadelta optimization algorithm.}]>:$parameters,
+    Res<TF_Float32Tensor, [{Parameter accumulators updated by the Adadelta optimization algorithm.}]>:$accumulators,
+    Res<TF_Float32Tensor, [{Parameter updates updated by the Adadelta optimization algorithm.}]>:$updates,
+    Res<TF_Float32Tensor, [{Parameter gradient_accumulators updated by the Adadelta optimization algorithm.}]>:$gradient_accumulators
   );
 }
 
@@ -11411,8 +11978,8 @@ used to retrieve updated parameters before saving a checkpoint.
   );
 
   let results = (outs
-    TF_Float32Tensor:$parameters,
-    TF_Float32Tensor:$accumulators
+    Res<TF_Float32Tensor, [{Parameter parameters updated by the Adagrad optimization algorithm.}]>:$parameters,
+    Res<TF_Float32Tensor, [{Parameter accumulators updated by the Adagrad optimization algorithm.}]>:$accumulators
   );
 }
 
@@ -11435,9 +12002,9 @@ used to retrieve updated parameters before saving a checkpoint.
   );
 
   let results = (outs
-    TF_Float32Tensor:$parameters,
-    TF_Float32Tensor:$accumulators,
-    TF_Float32Tensor:$gradient_accumulators
+    Res<TF_Float32Tensor, [{Parameter parameters updated by the Adagrad optimization algorithm.}]>:$parameters,
+    Res<TF_Float32Tensor, [{Parameter accumulators updated by the Adagrad optimization algorithm.}]>:$accumulators,
+    Res<TF_Float32Tensor, [{Parameter gradient_accumulators updated by the Adagrad optimization algorithm.}]>:$gradient_accumulators
   );
 }
 
@@ -11460,10 +12027,10 @@ used to retrieve updated parameters before saving a checkpoint.
   );
 
   let results = (outs
-    TF_Float32Tensor:$parameters,
-    TF_Float32Tensor:$ms,
-    TF_Float32Tensor:$mom,
-    TF_Float32Tensor:$mg
+    Res<TF_Float32Tensor, [{Parameter parameters updated by the centered RMSProp optimization algorithm.}]>:$parameters,
+    Res<TF_Float32Tensor, [{Parameter ms updated by the centered RMSProp optimization algorithm.}]>:$ms,
+    Res<TF_Float32Tensor, [{Parameter mom updated by the centered RMSProp optimization algorithm.}]>:$mom,
+    Res<TF_Float32Tensor, [{Parameter mg updated by the centered RMSProp optimization algorithm.}]>:$mg
   );
 }
 
@@ -11486,9 +12053,9 @@ used to retrieve updated parameters before saving a checkpoint.
   );
 
   let results = (outs
-    TF_Float32Tensor:$parameters,
-    TF_Float32Tensor:$accumulators,
-    TF_Float32Tensor:$linears
+    Res<TF_Float32Tensor, [{Parameter parameters updated by the FTRL optimization algorithm.}]>:$parameters,
+    Res<TF_Float32Tensor, [{Parameter accumulators updated by the FTRL optimization algorithm.}]>:$accumulators,
+    Res<TF_Float32Tensor, [{Parameter linears updated by the FTRL optimization algorithm.}]>:$linears
   );
 }
 
@@ -11511,10 +12078,10 @@ used to retrieve updated parameters before saving a checkpoint.
   );
 
   let results = (outs
-    TF_Float32Tensor:$parameters,
-    TF_Float32Tensor:$accumulators,
-    TF_Float32Tensor:$linears,
-    TF_Float32Tensor:$gradient_accumulators
+    Res<TF_Float32Tensor, [{Parameter parameters updated by the FTRL optimization algorithm.}]>:$parameters,
+    Res<TF_Float32Tensor, [{Parameter accumulators updated by the FTRL optimization algorithm.}]>:$accumulators,
+    Res<TF_Float32Tensor, [{Parameter linears updated by the FTRL optimization algorithm.}]>:$linears,
+    Res<TF_Float32Tensor, [{Parameter gradient_accumulators updated by the FTRL optimization algorithm.}]>:$gradient_accumulators
   );
 }
 
@@ -11537,10 +12104,10 @@ used to retrieve updated parameters before saving a checkpoint.
   );
 
   let results = (outs
-    TF_Float32Tensor:$parameters,
-    TF_Float32Tensor:$accumulators,
-    TF_Float32Tensor:$weights,
-    TF_Float32Tensor:$benefits
+    Res<TF_Float32Tensor, [{Parameter parameters updated by the MDL Adagrad Light optimization algorithm.}]>:$parameters,
+    Res<TF_Float32Tensor, [{Parameter accumulators updated by the MDL Adagrad Light optimization algorithm.}]>:$accumulators,
+    Res<TF_Float32Tensor, [{Parameter weights updated by the MDL Adagrad Light optimization algorithm.}]>:$weights,
+    Res<TF_Float32Tensor, [{Parameter benefits updated by the MDL Adagrad Light optimization algorithm.}]>:$benefits
   );
 }
 
@@ -11563,8 +12130,8 @@ used to retrieve updated parameters before saving a checkpoint.
   );
 
   let results = (outs
-    TF_Float32Tensor:$parameters,
-    TF_Float32Tensor:$momenta
+    Res<TF_Float32Tensor, [{Parameter parameters updated by the Momentum optimization algorithm.}]>:$parameters,
+    Res<TF_Float32Tensor, [{Parameter momenta updated by the Momentum optimization algorithm.}]>:$momenta
   );
 }
 
@@ -11587,9 +12154,9 @@ used to retrieve updated parameters before saving a checkpoint.
   );
 
   let results = (outs
-    TF_Float32Tensor:$parameters,
-    TF_Float32Tensor:$momenta,
-    TF_Float32Tensor:$gradient_accumulators
+    Res<TF_Float32Tensor, [{Parameter parameters updated by the Momentum optimization algorithm.}]>:$parameters,
+    Res<TF_Float32Tensor, [{Parameter momenta updated by the Momentum optimization algorithm.}]>:$momenta,
+    Res<TF_Float32Tensor, [{Parameter gradient_accumulators updated by the Momentum optimization algorithm.}]>:$gradient_accumulators
   );
 }
 
@@ -11612,8 +12179,8 @@ used to retrieve updated parameters before saving a checkpoint.
   );
 
   let results = (outs
-    TF_Float32Tensor:$parameters,
-    TF_Float32Tensor:$accumulators
+    Res<TF_Float32Tensor, [{Parameter parameters updated by the proximal Adagrad optimization algorithm.}]>:$parameters,
+    Res<TF_Float32Tensor, [{Parameter accumulators updated by the proximal Adagrad optimization algorithm.}]>:$accumulators
   );
 }
 
@@ -11638,9 +12205,9 @@ used to retrieve updated parameters before saving a checkpoint.
   );
 
   let results = (outs
-    TF_Float32Tensor:$parameters,
-    TF_Float32Tensor:$accumulators,
-    TF_Float32Tensor:$gradient_accumulators
+    Res<TF_Float32Tensor, [{Parameter parameters updated by the proximal Adagrad optimization algorithm.}]>:$parameters,
+    Res<TF_Float32Tensor, [{Parameter accumulators updated by the proximal Adagrad optimization algorithm.}]>:$accumulators,
+    Res<TF_Float32Tensor, [{Parameter gradient_accumulators updated by the proximal Adagrad optimization algorithm.}]>:$gradient_accumulators
   );
 }
 
@@ -11700,9 +12267,9 @@ used to retrieve updated parameters before saving a checkpoint.
   );
 
   let results = (outs
-    TF_Float32Tensor:$parameters,
-    TF_Float32Tensor:$ms,
-    TF_Float32Tensor:$mom
+    Res<TF_Float32Tensor, [{Parameter parameters updated by the RMSProp optimization algorithm.}]>:$parameters,
+    Res<TF_Float32Tensor, [{Parameter ms updated by the RMSProp optimization algorithm.}]>:$ms,
+    Res<TF_Float32Tensor, [{Parameter mom updated by the RMSProp optimization algorithm.}]>:$mom
   );
 }
 
@@ -11725,10 +12292,10 @@ used to retrieve updated parameters before saving a checkpoint.
   );
 
   let results = (outs
-    TF_Float32Tensor:$parameters,
-    TF_Float32Tensor:$ms,
-    TF_Float32Tensor:$mom,
-    TF_Float32Tensor:$gradient_accumulators
+    Res<TF_Float32Tensor, [{Parameter parameters updated by the RMSProp optimization algorithm.}]>:$parameters,
+    Res<TF_Float32Tensor, [{Parameter ms updated by the RMSProp optimization algorithm.}]>:$ms,
+    Res<TF_Float32Tensor, [{Parameter mom updated by the RMSProp optimization algorithm.}]>:$mom,
+    Res<TF_Float32Tensor, [{Parameter gradient_accumulators updated by the RMSProp optimization algorithm.}]>:$gradient_accumulators
   );
 }
 
@@ -11751,7 +12318,7 @@ used to retrieve updated parameters before saving a checkpoint.
   );
 
   let results = (outs
-    TF_Float32Tensor:$parameters
+    Res<TF_Float32Tensor, [{Parameter parameters updated by the stochastic gradient descent optimization algorithm.}]>:$parameters
   );
 }
 
@@ -11774,8 +12341,8 @@ used to retrieve updated parameters before saving a checkpoint.
   );
 
   let results = (outs
-    TF_Float32Tensor:$parameters,
-    TF_Float32Tensor:$gradient_accumulators
+    Res<TF_Float32Tensor, [{Parameter parameters updated by the stochastic gradient descent optimization algorithm.}]>:$parameters,
+    Res<TF_Float32Tensor, [{Parameter gradient_accumulators updated by the Adadelta optimization algorithm.}]>:$gradient_accumulators
   );
 }
 
@@ -11840,15 +12407,16 @@ output[2:, :, 3, :, ...] = input[2:, :, 3, :, ...]
   }];
 
   let arguments = (ins
-    TF_Tensor:$input,
-    TF_I32OrI64Tensor:$seq_lengths,
+    Arg<TF_Tensor, [{The input to reverse.}]>:$input,
+    Arg<TF_I32OrI64Tensor, [{1-D with length `input.dims(batch_dim)` and
+`max(seq_lengths) <= input.dims(seq_dim)`}]>:$seq_lengths,
 
     I64Attr:$seq_dim,
     DefaultValuedAttr<I64Attr, "0">:$batch_dim
   );
 
   let results = (outs
-    TF_Tensor:$output
+    Res<TF_Tensor, [{The partially reversed input. It has the same shape as `input`.}]>:$output
   );
 
   TF_DerivedOperandTypeAttr Tlen = TF_DerivedOperandTypeAttr<1>;
@@ -11908,12 +12476,13 @@ reverse(t, dims) ==> [[[[8, 9, 10, 11],
   }];
 
   let arguments = (ins
-    TensorOf<[TF_Bfloat16, TF_Bool, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Str, TF_Uint16, TF_Uint8]>:$tensor,
-    TF_I32OrI64Tensor:$axis
+    Arg<TensorOf<[TF_Bfloat16, TF_Bool, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Str, TF_Uint16, TF_Uint8]>, [{Up to 8-D.}]>:$tensor,
+    Arg<TF_I32OrI64Tensor, [{1-D. The indices of the dimensions to reverse. Must be in the range
+`[-rank(tensor), rank(tensor))`.}]>:$axis
   );
 
   let results = (outs
-    TensorOf<[TF_Bfloat16, TF_Bool, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Str, TF_Uint16, TF_Uint8]>:$output
+    Res<TensorOf<[TF_Bfloat16, TF_Bool, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Str, TF_Uint16, TF_Uint8]>, [{The same shape as `tensor`.}]>:$output
   );
 
   TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
@@ -12067,12 +12636,20 @@ roll(t, shift=[2, -3], axis=[1, 1]) ==> [[1, 2, 3, 4, 0], [6, 7, 8, 9, 5]]
 
   let arguments = (ins
     TF_Tensor:$input,
-    TF_I32OrI64Tensor:$shift,
-    TF_I32OrI64Tensor:$axis
+    Arg<TF_I32OrI64Tensor, [{Dimension must be 0-D or 1-D. `shift[i]` specifies the number of places by which
+elements are shifted positively (towards larger indices) along the dimension
+specified by `axis[i]`. Negative shifts will roll the elements in the opposite
+direction.}]>:$shift,
+    Arg<TF_I32OrI64Tensor, [{Dimension must be 0-D or 1-D. `axis[i]` specifies the dimension that the shift
+`shift[i]` should occur. If the same axis is referenced more than once, the
+total shift for that axis will be the sum of all the shifts that belong to that
+axis.}]>:$axis
   );
 
   let results = (outs
-    TF_Tensor:$output
+    Res<TF_Tensor, [{Has the same shape and size as the input. The elements are shifted
+positively (towards larger indices) by the offsets of `shift` along the
+dimensions of `axis`.}]>:$output
   );
 
   TF_DerivedOperandTypeAttr Tshift = TF_DerivedOperandTypeAttr<1>;
@@ -12149,10 +12726,12 @@ and correspondingly well-formed.
   }];
 
   let arguments = (ins
-    TF_StrTensor:$prefix,
-    TF_StrTensor:$tensor_names,
-    TF_StrTensor:$shape_and_slices,
-    Variadic<TF_Tensor>:$tensors
+    Arg<TF_StrTensor, [{Must have a single element. The prefix of the V2 checkpoint to which we
+write the tensors.}]>:$prefix,
+    Arg<TF_StrTensor, [{shape {N}. The names of the tensors to be saved.}]>:$tensor_names,
+    Arg<TF_StrTensor, [{shape {N}.  The slice specs of the tensors to be saved.
+Empty strings indicate that they are non-partitioned tensors.}]>:$shape_and_slices,
+    Arg<Variadic<TF_Tensor>, [{`N` tensors to save.}]>:$tensors
   );
 
   let results = (outs);
@@ -12247,13 +12826,14 @@ On GPU, if an out of bound index is found, the index is ignored.
   }];
 
   let arguments = (ins
-    TF_I32OrI64Tensor:$indices,
-    TF_Tensor:$updates,
-    TF_I32OrI64Tensor:$shape
+    Arg<TF_I32OrI64Tensor, [{Index tensor.}]>:$indices,
+    Arg<TF_Tensor, [{Updates to scatter into output.}]>:$updates,
+    Arg<TF_I32OrI64Tensor, [{1-D. The shape of the resulting tensor.}]>:$shape
   );
 
   let results = (outs
-    TF_Tensor:$output
+    Res<TF_Tensor, [{A new tensor with the given shape and updates applied according
+to the indices.}]>:$output
   );
 
   TF_DerivedOperandTypeAttr Tindices = TF_DerivedOperandTypeAttr<0>;
@@ -12290,11 +12870,13 @@ tf.segment_max(c, tf.constant([0, 0, 1]))
 
   let arguments = (ins
     TF_IntOrFpTensor:$data,
-    TF_I32OrI64Tensor:$segment_ids
+    Arg<TF_I32OrI64Tensor, [{A 1-D tensor whose size is equal to the size of `data`'s
+first dimension.  Values should be sorted and can be repeated.}]>:$segment_ids
   );
 
   let results = (outs
-    TF_IntOrFpTensor:$output
+    Res<TF_IntOrFpTensor, [{Has same shape as data, except for dimension 0 which
+has size `k`, the number of segments.}]>:$output
   );
 
   TF_DerivedOperandTypeAttr Tindices = TF_DerivedOperandTypeAttr<1>;
@@ -12332,11 +12914,13 @@ tf.segment_mean(c, tf.constant([0, 0, 1]))
 
   let arguments = (ins
     TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$data,
-    TF_I32OrI64Tensor:$segment_ids
+    Arg<TF_I32OrI64Tensor, [{A 1-D tensor whose size is equal to the size of `data`'s
+first dimension.  Values should be sorted and can be repeated.}]>:$segment_ids
   );
 
   let results = (outs
-    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$output
+    Res<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Has same shape as data, except for dimension 0 which
+has size `k`, the number of segments.}]>:$output
   );
 
   TF_DerivedOperandTypeAttr Tindices = TF_DerivedOperandTypeAttr<1>;
@@ -12373,11 +12957,13 @@ tf.segment_min(c, tf.constant([0, 0, 1]))
 
   let arguments = (ins
     TF_IntOrFpTensor:$data,
-    TF_I32OrI64Tensor:$segment_ids
+    Arg<TF_I32OrI64Tensor, [{A 1-D tensor whose size is equal to the size of `data`'s
+first dimension.  Values should be sorted and can be repeated.}]>:$segment_ids
   );
 
   let results = (outs
-    TF_IntOrFpTensor:$output
+    Res<TF_IntOrFpTensor, [{Has same shape as data, except for dimension 0 which
+has size `k`, the number of segments.}]>:$output
   );
 
   TF_DerivedOperandTypeAttr Tindices = TF_DerivedOperandTypeAttr<1>;
@@ -12414,11 +13000,13 @@ tf.segment_prod(c, tf.constant([0, 0, 1]))
 
   let arguments = (ins
     TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$data,
-    TF_I32OrI64Tensor:$segment_ids
+    Arg<TF_I32OrI64Tensor, [{A 1-D tensor whose size is equal to the size of `data`'s
+first dimension.  Values should be sorted and can be repeated.}]>:$segment_ids
   );
 
   let results = (outs
-    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$output
+    Res<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Has same shape as data, except for dimension 0 which
+has size `k`, the number of segments.}]>:$output
   );
 
   TF_DerivedOperandTypeAttr Tindices = TF_DerivedOperandTypeAttr<1>;
@@ -12455,11 +13043,13 @@ tf.segment_sum(c, tf.constant([0, 0, 1]))
 
   let arguments = (ins
     TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$data,
-    TF_I32OrI64Tensor:$segment_ids
+    Arg<TF_I32OrI64Tensor, [{A 1-D tensor whose size is equal to the size of `data`'s
+first dimension.  Values should be sorted and can be repeated.}]>:$segment_ids
   );
 
   let results = (outs
-    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$output
+    Res<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Has same shape as data, except for dimension 0 which
+has size `k`, the number of segments.}]>:$output
   );
 
   TF_DerivedOperandTypeAttr Tindices = TF_DerivedOperandTypeAttr<1>;
@@ -12512,12 +13102,14 @@ select(condition, t, e) ==> [[1, 2],
 
   let arguments = (ins
     TF_BoolTensor:$condition,
-    TF_Tensor:$t,
-    TF_Tensor:$e
+    Arg<TF_Tensor, [{= A `Tensor` which may have the same shape as `condition`.
+If `condition` is rank 1, `x` may have higher rank,
+but its first dimension must match the size of `condition`.}]>:$t,
+    Arg<TF_Tensor, [{= A `Tensor` with the same type and shape as `x`.}]>:$e
   );
 
   let results = (outs
-    TF_Tensor:$output
+    Res<TF_Tensor, [{= A `Tensor` with the same type and shape as `x` and `y`.}]>:$output
   );
 
   TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<1>;
@@ -12569,14 +13161,14 @@ e = self_adjoint_eig(a, compute_v=False)
   }];
 
   let arguments = (ins
-    TensorOf<[TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64]>:$input,
+    Arg<TensorOf<[TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64]>, [{`Tensor` input of shape `[N, N]`.}]>:$input,
 
     DefaultValuedAttr<BoolAttr, "true">:$compute_v
   );
 
   let results = (outs
-    TensorOf<[TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64]>:$e,
-    TensorOf<[TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64]>:$v
+    Res<TensorOf<[TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64]>, [{Eigenvalues. Shape is `[N]`.}]>:$e,
+    Res<TensorOf<[TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64]>, [{Eigenvectors. Shape is `[N, N]`.}]>:$v
   );
 
   TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
@@ -12614,12 +13206,13 @@ Computes gradients for the scaled exponential linear (Selu) operation.
   }];
 
   let arguments = (ins
-    TF_FloatTensor:$gradients,
-    TF_FloatTensor:$outputs
+    Arg<TF_FloatTensor, [{The backpropagated gradients to the corresponding Selu operation.}]>:$gradients,
+    Arg<TF_FloatTensor, [{The outputs of the corresponding Selu operation.}]>:$outputs
   );
 
   let results = (outs
-    TF_FloatTensor:$backprops
+    Res<TF_FloatTensor, [{The gradients: `gradients * (outputs + scale * alpha)`
+if outputs < 0, `scale * gradients` otherwise.}]>:$backprops
   );
 
   TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
@@ -12629,7 +13222,7 @@ def TF_SendOp : TF_Op<"Send", []> {
   let summary = "Sends the named tensor from send_device to recv_device.";
 
   let arguments = (ins
-    TF_Tensor:$tensor,
+    Arg<TF_Tensor, [{The tensor to send.}]>:$tensor,
 
     StrAttr:$tensor_name,
     StrAttr:$send_device,
@@ -12649,13 +13242,14 @@ Converts the given `resource_handle` representing an iterator to a variant tenso
   }];
 
   let arguments = (ins
-    Arg<TF_ResourceTensor, "", [TF_DatasetIteratorRead]>:$resource_handle,
+    Arg<TF_ResourceTensor, [{A handle to an iterator resource.}], [TF_DatasetIteratorRead]>:$resource_handle,
 
     DefaultValuedAttr<I64Attr, "0">:$external_state_policy
   );
 
   let results = (outs
-    TF_VariantTensor:$serialized
+    Res<TF_VariantTensor, [{A variant tensor storing the state of the iterator contained in the
+resource.}]>:$serialized
   );
 }
 
@@ -12663,9 +13257,9 @@ def TF_SerializeSparseOp : TF_Op<"SerializeSparse", [NoSideEffect]> {
   let summary = "Serialize a `SparseTensor` into a `[3]` `Tensor` object.";
 
   let arguments = (ins
-    TF_Int64Tensor:$sparse_indices,
-    TF_Tensor:$sparse_values,
-    TF_Int64Tensor:$sparse_shape
+    Arg<TF_Int64Tensor, [{2-D.  The `indices` of the `SparseTensor`.}]>:$sparse_indices,
+    Arg<TF_Tensor, [{1-D.  The `values` of the `SparseTensor`.}]>:$sparse_values,
+    Arg<TF_Int64Tensor, [{1-D.  The `shape` of the `SparseTensor`.}]>:$sparse_shape
   );
 
   let results = (outs
@@ -12987,8 +13581,12 @@ whose values are extracted from 'input' starting at the offsets in
 
   let arguments = (ins
     TF_Tensor:$input,
-    TF_I32OrI64Tensor:$begin,
-    TF_I32OrI64Tensor:$size
+    Arg<TF_I32OrI64Tensor, [{begin[i] specifies the offset into the 'i'th dimension of
+'input' to slice from.}]>:$begin,
+    Arg<TF_I32OrI64Tensor, [{size[i] specifies the number of elements of the 'i'th dimension
+of 'input' to slice. If size[i] is -1, all remaining elements in dimension
+i are included in the slice (i.e. this is equivalent to setting
+size[i] = input.dim_size(i) - begin[i]).}]>:$size
   );
 
   let results = (outs
@@ -13027,11 +13625,11 @@ For each batch `i` and class `j` we have
   }];
 
   let arguments = (ins
-    TF_FloatTensor:$logits
+    Arg<TF_FloatTensor, [{2-D with shape `[batch_size, num_classes]`.}]>:$logits
   );
 
   let results = (outs
-    TF_FloatTensor:$softmax
+    Res<TF_FloatTensor, [{Same shape as `logits`.}]>:$softmax
   );
 
   TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
@@ -13051,13 +13649,15 @@ Inputs are the logits, not probabilities.
   }];
 
   let arguments = (ins
-    TF_FloatTensor:$features,
-    TF_FloatTensor:$labels
+    Arg<TF_FloatTensor, [{batch_size x num_classes matrix}]>:$features,
+    Arg<TF_FloatTensor, [{batch_size x num_classes matrix
+The caller must ensure that each batch of labels represents a valid
+probability distribution.}]>:$labels
   );
 
   let results = (outs
-    TF_FloatTensor:$loss,
-    TF_FloatTensor:$backprop
+    Res<TF_FloatTensor, [{Per example loss (batch_size vector).}]>:$loss,
+    Res<TF_FloatTensor, [{backpropagated gradients (batch_size x num_classes matrix).}]>:$backprop
   );
 
   TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
@@ -13085,12 +13685,12 @@ def TF_SoftplusGradOp : TF_Op<"SoftplusGrad", [NoSideEffect, TF_SameOperandsAndR
   let summary = "Computes softplus gradients for a softplus operation.";
 
   let arguments = (ins
-    TF_FloatTensor:$gradients,
-    TF_FloatTensor:$features
+    Arg<TF_FloatTensor, [{The backpropagated gradients to the corresponding softplus operation.}]>:$gradients,
+    Arg<TF_FloatTensor, [{The features passed as input to the corresponding softplus operation.}]>:$features
   );
 
   let results = (outs
-    TF_FloatTensor:$backprops
+    Res<TF_FloatTensor, [{The gradients: `gradients / (1 + exp(-features))`.}]>:$backprops
   );
 
   TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
@@ -13114,12 +13714,12 @@ def TF_SoftsignGradOp : TF_Op<"SoftsignGrad", [NoSideEffect, TF_SameOperandsAndR
   let summary = "Computes softsign gradients for a softsign operation.";
 
   let arguments = (ins
-    TF_FloatTensor:$gradients,
-    TF_FloatTensor:$features
+    Arg<TF_FloatTensor, [{The backpropagated gradients to the corresponding softsign operation.}]>:$gradients,
+    Arg<TF_FloatTensor, [{The features passed as input to the corresponding softsign operation.}]>:$features
   );
 
   let results = (outs
-    TF_FloatTensor:$backprops
+    Res<TF_FloatTensor, [{The gradients: `gradients / (1 + abs(features)) ** 2`.}]>:$backprops
   );
 
   TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
@@ -13139,8 +13739,92 @@ block size.
   }];
 
   let arguments = (ins
-    TF_Tensor:$input,
-    TF_I32OrI64Tensor:$paddings,
+    Arg<TF_Tensor, [{4-D with shape `[batch, height, width, depth]`.}]>:$input,
+    Arg<TF_I32OrI64Tensor, [{2-D tensor of non-negative integers with shape `[2, 2]`. It specifies
+  the padding of the input with zeros across the spatial dimensions as follows:
+
+      paddings = [[pad_top, pad_bottom], [pad_left, pad_right]]
+
+  The effective spatial dimensions of the zero-padded input tensor will be:
+
+      height_pad = pad_top + height + pad_bottom
+      width_pad = pad_left + width + pad_right
+
+The attr `block_size` must be greater than one. It indicates the block size.
+
+  * Non-overlapping blocks of size `block_size x block size` in the height and
+    width dimensions are rearranged into the batch dimension at each location.
+  * The batch of the output tensor is `batch * block_size * block_size`.
+  * Both height_pad and width_pad must be divisible by block_size.
+
+The shape of the output will be:
+
+    [batch*block_size*block_size, height_pad/block_size, width_pad/block_size,
+     depth]
+
+Some examples:
+
+(1) For the following input of shape `[1, 2, 2, 1]` and block_size of 2:
+
+```
+x = [[[[1], [2]], [[3], [4]]]]
+```
+
+The output tensor has shape `[4, 1, 1, 1]` and value:
+
+```
+[[[[1]]], [[[2]]], [[[3]]], [[[4]]]]
+```
+
+(2) For the following input of shape `[1, 2, 2, 3]` and block_size of 2:
+
+```
+x = [[[[1, 2, 3], [4, 5, 6]],
+      [[7, 8, 9], [10, 11, 12]]]]
+```
+
+The output tensor has shape `[4, 1, 1, 3]` and value:
+
+```
+[[[[1, 2, 3]]], [[[4, 5, 6]]], [[[7, 8, 9]]], [[[10, 11, 12]]]]
+```
+
+(3) For the following input of shape `[1, 4, 4, 1]` and block_size of 2:
+
+```
+x = [[[[1],   [2],  [3],  [4]],
+      [[5],   [6],  [7],  [8]],
+      [[9],  [10], [11],  [12]],
+      [[13], [14], [15],  [16]]]]
+```
+
+The output tensor has shape `[4, 2, 2, 1]` and value:
+
+```
+x = [[[[1], [3]], [[9], [11]]],
+     [[[2], [4]], [[10], [12]]],
+     [[[5], [7]], [[13], [15]]],
+     [[[6], [8]], [[14], [16]]]]
+```
+
+(4) For the following input of shape `[2, 2, 4, 1]` and block_size of 2:
+
+```
+x = [[[[1],   [2],  [3],  [4]],
+      [[5],   [6],  [7],  [8]]],
+     [[[9],  [10], [11],  [12]],
+      [[13], [14], [15],  [16]]]]
+```
+
+The output tensor has shape `[8, 1, 2, 1]` and value:
+
+```
+x = [[[[1], [3]]], [[[9], [11]]], [[[2], [4]]], [[[10], [12]]],
+     [[[5], [7]]], [[[13], [15]]], [[[6], [8]]], [[[14], [16]]]]
+```
+
+Among others, this operation is useful for reducing atrous convolution into
+regular convolution.}]>:$paddings,
 
     Confined<I64Attr, [IntMinValue<2>]>:$block_size
   );
@@ -13168,9 +13852,117 @@ precise description.
   }];
 
   let arguments = (ins
-    TF_Tensor:$input,
-    TF_I32OrI64Tensor:$block_shape,
-    TF_I32OrI64Tensor:$paddings
+    Arg<TF_Tensor, [{N-D with shape `input_shape = [batch] + spatial_shape + remaining_shape`,
+where spatial_shape has `M` dimensions.}]>:$input,
+    Arg<TF_I32OrI64Tensor, [{1-D with shape `[M]`, all values must be >= 1.}]>:$block_shape,
+    Arg<TF_I32OrI64Tensor, [{2-D with shape `[M, 2]`, all values must be >= 0.
+  `paddings[i] = [pad_start, pad_end]` specifies the padding for input dimension
+  `i + 1`, which corresponds to spatial dimension `i`.  It is required that
+  `block_shape[i]` divides `input_shape[i + 1] + pad_start + pad_end`.
+
+This operation is equivalent to the following steps:
+
+1. Zero-pad the start and end of dimensions `[1, ..., M]` of the
+   input according to `paddings` to produce `padded` of shape `padded_shape`.
+
+2. Reshape `padded` to `reshaped_padded` of shape:
+
+     [batch] +
+     [padded_shape[1] / block_shape[0],
+       block_shape[0],
+      ...,
+      padded_shape[M] / block_shape[M-1],
+      block_shape[M-1]] +
+     remaining_shape
+
+3. Permute dimensions of `reshaped_padded` to produce
+   `permuted_reshaped_padded` of shape:
+
+     block_shape +
+     [batch] +
+     [padded_shape[1] / block_shape[0],
+      ...,
+      padded_shape[M] / block_shape[M-1]] +
+     remaining_shape
+
+4. Reshape `permuted_reshaped_padded` to flatten `block_shape` into the batch
+   dimension, producing an output tensor of shape:
+
+     [batch * prod(block_shape)] +
+     [padded_shape[1] / block_shape[0],
+      ...,
+      padded_shape[M] / block_shape[M-1]] +
+     remaining_shape
+
+Some examples:
+
+(1) For the following input of shape `[1, 2, 2, 1]`, `block_shape = [2, 2]`, and
+    `paddings = [[0, 0], [0, 0]]`:
+
+```
+x = [[[[1], [2]], [[3], [4]]]]
+```
+
+The output tensor has shape `[4, 1, 1, 1]` and value:
+
+```
+[[[[1]]], [[[2]]], [[[3]]], [[[4]]]]
+```
+
+(2) For the following input of shape `[1, 2, 2, 3]`, `block_shape = [2, 2]`, and
+    `paddings = [[0, 0], [0, 0]]`:
+
+```
+x = [[[[1, 2, 3], [4, 5, 6]],
+      [[7, 8, 9], [10, 11, 12]]]]
+```
+
+The output tensor has shape `[4, 1, 1, 3]` and value:
+
+```
+[[[[1, 2, 3]]], [[[4, 5, 6]]], [[[7, 8, 9]]], [[[10, 11, 12]]]]
+```
+
+(3) For the following input of shape `[1, 4, 4, 1]`, `block_shape = [2, 2]`, and
+    `paddings = [[0, 0], [0, 0]]`:
+
+```
+x = [[[[1],   [2],  [3],  [4]],
+      [[5],   [6],  [7],  [8]],
+      [[9],  [10], [11],  [12]],
+      [[13], [14], [15],  [16]]]]
+```
+
+The output tensor has shape `[4, 2, 2, 1]` and value:
+
+```
+x = [[[[1], [3]], [[9], [11]]],
+     [[[2], [4]], [[10], [12]]],
+     [[[5], [7]], [[13], [15]]],
+     [[[6], [8]], [[14], [16]]]]
+```
+
+(4) For the following input of shape `[2, 2, 4, 1]`, block_shape = `[2, 2]`, and
+    paddings = `[[0, 0], [2, 0]]`:
+
+```
+x = [[[[1],   [2],  [3],  [4]],
+      [[5],   [6],  [7],  [8]]],
+     [[[9],  [10], [11],  [12]],
+      [[13], [14], [15],  [16]]]]
+```
+
+The output tensor has shape `[8, 1, 3, 1]` and value:
+
+```
+x = [[[[0], [1], [3]]], [[[0], [9], [11]]],
+     [[[0], [2], [4]]], [[[0], [10], [12]]],
+     [[[0], [5], [7]]], [[[0], [13], [15]]],
+     [[[0], [6], [8]]], [[[0], [14], [16]]]]
+```
+
+Among others, this operation is useful for reducing atrous convolution into
+regular convolution.}]>:$paddings
   );
 
   let results = (outs
@@ -13338,17 +14130,20 @@ backpropagation,
   }];
 
   let arguments = (ins
-    TF_Int64Tensor:$indices,
-    TF_Tensor:$values,
-    TF_Int64Tensor:$dense_shape,
-    TF_Tensor:$default_value
+    Arg<TF_Int64Tensor, [{2-D. the indices of the sparse tensor.}]>:$indices,
+    Arg<TF_Tensor, [{1-D. the values of the sparse tensor.}]>:$values,
+    Arg<TF_Int64Tensor, [{1-D. the shape of the sparse tensor.}]>:$dense_shape,
+    Arg<TF_Tensor, [{0-D. default value to insert into location `[row, 0, ..., 0]`
+  for rows missing from the input sparse tensor.
+output indices: 2-D. the indices of the filled sparse tensor.}]>:$default_value
   );
 
   let results = (outs
     TF_Int64Tensor:$output_indices,
-    TF_Tensor:$output_values,
-    TF_BoolTensor:$empty_row_indicator,
-    TF_Int64Tensor:$reverse_index_map
+    Res<TF_Tensor, [{1-D. the values of the filled sparse tensor.}]>:$output_values,
+    Res<TF_BoolTensor, [{1-D. whether the dense row was missing in the
+input sparse tensor.}]>:$empty_row_indicator,
+    Res<TF_Int64Tensor, [{1-D. a map from the input indices to the output indices.}]>:$reverse_index_map
   );
 
   TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<1>;
@@ -13413,14 +14208,18 @@ has length `R_out`, then `input_indices` has shape `[N, R_in]`,
   }];
 
   let arguments = (ins
-    TF_Int64Tensor:$input_indices,
-    TF_Int64Tensor:$input_shape,
-    TF_Int64Tensor:$new_shape
+    Arg<TF_Int64Tensor, [{2-D.  `N x R_in` matrix with the indices of non-empty values in a
+SparseTensor.}]>:$input_indices,
+    Arg<TF_Int64Tensor, [{1-D.  `R_in` vector with the input SparseTensor's dense shape.}]>:$input_shape,
+    Arg<TF_Int64Tensor, [{1-D.  `R_out` vector with the requested new dense shape.}]>:$new_shape
   );
 
   let results = (outs
-    TF_Int64Tensor:$output_indices,
-    TF_Int64Tensor:$output_shape
+    Res<TF_Int64Tensor, [{2-D.  `N x R_out` matrix with the updated indices of non-empty
+values in the output SparseTensor.}]>:$output_indices,
+    Res<TF_Int64Tensor, [{1-D.  `R_out` vector with the full dense shape of the output
+SparseTensor.  This is the same as `new_shape` but with any -1 dimensions
+filled in.}]>:$output_shape
   );
 }
 
@@ -13436,12 +14235,13 @@ dimension, selecting a subset of dimension 0, specified by `indices`.
 
   let arguments = (ins
     TensorOf<[TF_Bfloat16, TF_Float32, TF_Float64]>:$data,
-    TF_I32OrI64Tensor:$indices,
-    TF_I32OrI64Tensor:$segment_ids
+    Arg<TF_I32OrI64Tensor, [{A 1-D tensor. Has same rank as `segment_ids`.}]>:$indices,
+    Arg<TF_I32OrI64Tensor, [{A 1-D tensor. Values should be sorted and can be repeated.}]>:$segment_ids
   );
 
   let results = (outs
-    TensorOf<[TF_Bfloat16, TF_Float32, TF_Float64]>:$output
+    Res<TensorOf<[TF_Bfloat16, TF_Float32, TF_Float64]>, [{Has same shape as data, except for dimension 0 which
+has size `k`, the number of segments.}]>:$output
   );
 
   TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
@@ -13462,12 +14262,13 @@ See `tf.sparse.segment_sum` for usage examples.
 
   let arguments = (ins
     TensorOf<[TF_Bfloat16, TF_Float32, TF_Float64]>:$data,
-    TF_I32OrI64Tensor:$indices,
-    TF_I32OrI64Tensor:$segment_ids
+    Arg<TF_I32OrI64Tensor, [{A 1-D tensor. Has same rank as `segment_ids`.}]>:$indices,
+    Arg<TF_I32OrI64Tensor, [{A 1-D tensor. Values should be sorted and can be repeated.}]>:$segment_ids
   );
 
   let results = (outs
-    TensorOf<[TF_Bfloat16, TF_Float32, TF_Float64]>:$output
+    Res<TensorOf<[TF_Bfloat16, TF_Float32, TF_Float64]>, [{Has same shape as data, except for dimension 0 which
+has size `k`, the number of segments.}]>:$output
   );
 
   TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
@@ -13512,12 +14313,13 @@ tf.segment_sum(c, tf.constant([0, 0, 1]))
 
   let arguments = (ins
     TF_IntOrFpTensor:$data,
-    TF_I32OrI64Tensor:$indices,
-    TF_I32OrI64Tensor:$segment_ids
+    Arg<TF_I32OrI64Tensor, [{A 1-D tensor. Has same rank as `segment_ids`.}]>:$indices,
+    Arg<TF_I32OrI64Tensor, [{A 1-D tensor. Values should be sorted and can be repeated.}]>:$segment_ids
   );
 
   let results = (outs
-    TF_IntOrFpTensor:$output
+    Res<TF_IntOrFpTensor, [{Has same shape as data, except for dimension 0 which
+has size `k`, the number of segments.}]>:$output
   );
 
   TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
@@ -13540,13 +14342,14 @@ Inputs are the logits, not probabilities.
   }];
 
   let arguments = (ins
-    TF_FloatTensor:$features,
-    TF_I32OrI64Tensor:$labels
+    Arg<TF_FloatTensor, [{batch_size x num_classes matrix}]>:$features,
+    Arg<TF_I32OrI64Tensor, [{batch_size vector with values in [0, num_classes).
+This is the label for the given minibatch entry.}]>:$labels
   );
 
   let results = (outs
-    TF_FloatTensor:$loss,
-    TF_FloatTensor:$backprop
+    Res<TF_FloatTensor, [{Per example loss (batch_size vector).}]>:$loss,
+    Res<TF_FloatTensor, [{backpropagated gradients (batch_size x num_classes matrix).}]>:$backprop
   );
 
   TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
@@ -13581,16 +14384,19 @@ are checked during execution.
   }];
 
   let arguments = (ins
-    TF_I32OrI64Tensor:$sparse_indices,
-    TF_I32OrI64Tensor:$output_shape,
-    TF_Tensor:$sparse_values,
-    TF_Tensor:$default_value,
+    Arg<TF_I32OrI64Tensor, [{0-D, 1-D, or 2-D.  `sparse_indices[i]` contains the complete
+index where `sparse_values[i]` will be placed.}]>:$sparse_indices,
+    Arg<TF_I32OrI64Tensor, [{1-D.  Shape of the dense output tensor.}]>:$output_shape,
+    Arg<TF_Tensor, [{1-D.  Values corresponding to each row of `sparse_indices`,
+or a scalar value to be used for all sparse indices.}]>:$sparse_values,
+    Arg<TF_Tensor, [{Scalar value to set for indices not specified in
+`sparse_indices`.}]>:$default_value,
 
     DefaultValuedAttr<BoolAttr, "true">:$validate_indices
   );
 
   let results = (outs
-    TF_Tensor:$dense
+    Res<TF_Tensor, [{Dense output tensor of shape `output_shape`.}]>:$dense
   );
 
   TF_DerivedOperandTypeAttr Tindices = TF_DerivedOperandTypeAttr<0>;
@@ -13601,12 +14407,15 @@ def TF_SplitOp : TF_Op<"Split", [NoSideEffect]> {
   let summary = "Splits a tensor into `num_split` tensors along one dimension.";
 
   let arguments = (ins
-    TF_Int32Tensor:$split_dim,
-    TF_Tensor:$value
+    Arg<TF_Int32Tensor, [{0-D.  The dimension along which to split.  Must be in the range
+`[-rank(value), rank(value))`.}]>:$split_dim,
+    Arg<TF_Tensor, [{The tensor to split.}]>:$value
   );
 
   let results = (outs
-    Variadic<TF_Tensor>:$output
+    Res<Variadic<TF_Tensor>, [{They are identically shaped tensors, whose shape matches that of `value`
+except along `axis`, where their sizes are
+`values.shape[split_dim] / num_split`.}]>:$output
   );
 
   TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<1>;
@@ -13619,13 +14428,18 @@ def TF_SplitVOp : TF_Op<"SplitV", [NoSideEffect]> {
   let summary = "Splits a tensor into `num_split` tensors along one dimension.";
 
   let arguments = (ins
-    TF_Tensor:$value,
-    TF_I32OrI64Tensor:$size_splits,
-    TF_Int32Tensor:$split_dim
+    Arg<TF_Tensor, [{The tensor to split.}]>:$value,
+    Arg<TF_I32OrI64Tensor, [{list containing the sizes of each output tensor along the split
+dimension. Must sum to the dimension of value along split_dim.
+Can contain one -1 indicating that dimension is to be inferred.}]>:$size_splits,
+    Arg<TF_Int32Tensor, [{0-D.  The dimension along which to split.  Must be in the range
+`[-rank(value), rank(value))`.}]>:$split_dim
   );
 
   let results = (outs
-    Variadic<TF_Tensor>:$output
+    Res<Variadic<TF_Tensor>, [{Tensors whose shape matches that of `value`
+except along `axis`, where their sizes are
+`size_splits[i]`.}]>:$output
   );
 
   TF_DerivedOperandTypeAttr Tlen = TF_DerivedOperandTypeAttr<1>;
@@ -13739,13 +14553,14 @@ shape(squeeze(t, [2, 4])) ==> [1, 2, 3, 1]
   }];
 
   let arguments = (ins
-    TF_Tensor:$input,
+    Arg<TF_Tensor, [{The `input` to squeeze.}]>:$input,
 
     DefaultValuedAttr<I64ArrayAttr, "{}">:$squeeze_dims
   );
 
   let results = (outs
-    TF_Tensor:$output
+    Res<TF_Tensor, [{Contains the same data as `input`, but has one or more dimensions of
+size 1 removed.}]>:$output
   );
 
   TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
@@ -13755,7 +14570,7 @@ def TF_StackCloseV2Op : TF_Op<"StackCloseV2", []> {
   let summary = "Delete the stack from its resource container.";
 
   let arguments = (ins
-    Arg<TF_ResourceTensor, "", [TF_StackFree]>:$handle
+    Arg<TF_ResourceTensor, [{The handle to a stack.}], [TF_StackFree]>:$handle
   );
 
   let results = (outs);
@@ -13765,11 +14580,11 @@ def TF_StackPopV2Op : TF_Op<"StackPopV2", []> {
   let summary = "Pop the element at the top of the stack.";
 
   let arguments = (ins
-    Arg<TF_ResourceTensor, "", [TF_StackRead, TF_StackWrite]>:$handle
+    Arg<TF_ResourceTensor, [{The handle to a stack.}], [TF_StackRead, TF_StackWrite]>:$handle
   );
 
   let results = (outs
-    TF_Tensor:$elem
+    Res<TF_Tensor, [{The tensor that is popped from the top of the stack.}]>:$elem
   );
 
   TF_DerivedResultTypeAttr elem_type = TF_DerivedResultTypeAttr<0>;
@@ -13779,14 +14594,14 @@ def TF_StackPushV2Op : TF_Op<"StackPushV2", []> {
   let summary = "Push an element onto the stack.";
 
   let arguments = (ins
-    Arg<TF_ResourceTensor, "", [TF_StackRead, TF_StackWrite]>:$handle,
-    TF_Tensor:$elem,
+    Arg<TF_ResourceTensor, [{The handle to a stack.}], [TF_StackRead, TF_StackWrite]>:$handle,
+    Arg<TF_Tensor, [{The tensor to be pushed onto the stack.}]>:$elem,
 
     DefaultValuedAttr<BoolAttr, "false">:$swap_memory
   );
 
   let results = (outs
-    TF_Tensor:$output
+    Res<TF_Tensor, [{The same tensor as the input 'elem'.}]>:$output
   );
 
   TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<1>;
@@ -13796,14 +14611,15 @@ def TF_StackV2Op : TF_Op<"StackV2", []> {
   let summary = "A stack that produces elements in first-in last-out order.";
 
   let arguments = (ins
-    TF_Int32Tensor:$max_size,
+    Arg<TF_Int32Tensor, [{The maximum size of the stack if non-negative. If negative, the stack
+size is unlimited.}]>:$max_size,
 
     TypeAttr:$elem_type,
     StrAttr:$stack_name
   );
 
   let results = (outs
-    Res<TF_ResourceTensor, "", [TF_StackAlloc]>:$handle
+    Res<TF_ResourceTensor, [{The handle to the stack.}], [TF_StackAlloc]>:$handle
   );
 }
 
@@ -13811,13 +14627,15 @@ def TF_StatelessMultinomialOp : TF_Op<"StatelessMultinomial", [NoSideEffect, TF_
   let summary = "Draws samples from a multinomial distribution.";
 
   let arguments = (ins
-    TF_IntOrFpTensor:$logits,
-    TF_Int32Tensor:$num_samples,
-    TF_I32OrI64Tensor:$seed
+    Arg<TF_IntOrFpTensor, [{2-D Tensor with shape `[batch_size, num_classes]`.  Each slice `[i, :]`
+represents the unnormalized log probabilities for all classes.}]>:$logits,
+    Arg<TF_Int32Tensor, [{0-D.  Number of independent samples to draw for each row slice.}]>:$num_samples,
+    Arg<TF_I32OrI64Tensor, [{2 seeds (shape [2]).}]>:$seed
   );
 
   let results = (outs
-    TF_I32OrI64Tensor:$output
+    Res<TF_I32OrI64Tensor, [{2-D Tensor with shape `[batch_size, num_samples]`.  Each slice `[i, :]`
+contains the drawn class labels with range `[0, num_classes)`.}]>:$output
   );
 
   TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
@@ -13829,16 +14647,18 @@ def TF_StatelessParameterizedTruncatedNormalOp : TF_Op<"StatelessParameterizedTr
   let summary = "";
 
   let arguments = (ins
-    TF_I32OrI64Tensor:$shape,
-    TF_I32OrI64Tensor:$seed,
-    TensorOf<[TF_Float16, TF_Float32, TF_Float64]>:$means,
-    TensorOf<[TF_Float16, TF_Float32, TF_Float64]>:$stddevs,
-    TensorOf<[TF_Float16, TF_Float32, TF_Float64]>:$minvals,
-    TensorOf<[TF_Float16, TF_Float32, TF_Float64]>:$maxvals
+    Arg<TF_I32OrI64Tensor, [{The shape of the output tensor.}]>:$shape,
+    Arg<TF_I32OrI64Tensor, [{2 seeds (shape [2]).}]>:$seed,
+    Arg<TensorOf<[TF_Float16, TF_Float32, TF_Float64]>, [{The mean parameter of each batch.}]>:$means,
+    Arg<TensorOf<[TF_Float16, TF_Float32, TF_Float64]>, [{The standard deviation parameter of each batch. Must be greater than 0.}]>:$stddevs,
+    Arg<TensorOf<[TF_Float16, TF_Float32, TF_Float64]>, [{The minimum cutoff. May be -infinity.}]>:$minvals,
+    Arg<TensorOf<[TF_Float16, TF_Float32, TF_Float64]>, [{The maximum cutoff. May be +infinity, and must be more than the minval
+for each batch.}]>:$maxvals
   );
 
   let results = (outs
-    TensorOf<[TF_Float16, TF_Float32, TF_Float64]>:$output
+    Res<TensorOf<[TF_Float16, TF_Float32, TF_Float64]>, [{The outputs are truncated normal samples and are a deterministic function of
+`shape`, `seed`, `minvals`, `maxvals`, `means` and `stddevs`.}]>:$output
   );
 
   TF_DerivedOperandTypeAttr S = TF_DerivedOperandTypeAttr<0>;
@@ -13858,14 +14678,16 @@ The outputs are a deterministic function of `shape`, `seed`, `counts`, and `prob
   }];
 
   let arguments = (ins
-    TF_I32OrI64Tensor:$shape,
-    TF_I32OrI64Tensor:$seed,
-    TensorOf<[TF_Float16, TF_Float32, TF_Float64, TF_Int32, TF_Int64]>:$counts,
-    TensorOf<[TF_Float16, TF_Float32, TF_Float64, TF_Int32, TF_Int64]>:$probs
+    Arg<TF_I32OrI64Tensor, [{The shape of the output tensor.}]>:$shape,
+    Arg<TF_I32OrI64Tensor, [{2 seeds (shape [2]).}]>:$seed,
+    Arg<TensorOf<[TF_Float16, TF_Float32, TF_Float64, TF_Int32, TF_Int64]>, [{The counts of the binomial distribution. Must be broadcastable with `probs`,
+and broadcastable with the rightmost dimensions of `shape`.}]>:$counts,
+    Arg<TensorOf<[TF_Float16, TF_Float32, TF_Float64, TF_Int32, TF_Int64]>, [{The probability of success for the binomial distribution. Must be broadcastable
+with `counts` and broadcastable with the rightmost dimensions of `shape`.}]>:$probs
   );
 
   let results = (outs
-    TensorOf<[TF_Float16, TF_Float32, TF_Float64, TF_Int32, TF_Int64]>:$output
+    Res<TensorOf<[TF_Float16, TF_Float32, TF_Float64, TF_Int32, TF_Int64]>, [{Random values with specified shape.}]>:$output
   );
 
   TF_DerivedOperandTypeAttr S = TF_DerivedOperandTypeAttr<0>;
@@ -13886,13 +14708,14 @@ The outputs are a deterministic function of `shape`, `seed`, and `alpha`.
   }];
 
   let arguments = (ins
-    TF_I32OrI64Tensor:$shape,
-    TF_I32OrI64Tensor:$seed,
-    TensorOf<[TF_Float16, TF_Float32, TF_Float64]>:$alpha
+    Arg<TF_I32OrI64Tensor, [{The shape of the output tensor.}]>:$shape,
+    Arg<TF_I32OrI64Tensor, [{2 seeds (shape [2]).}]>:$seed,
+    Arg<TensorOf<[TF_Float16, TF_Float32, TF_Float64]>, [{The concentration of the gamma distribution. Shape must match the rightmost
+dimensions of `shape`.}]>:$alpha
   );
 
   let results = (outs
-    TensorOf<[TF_Float16, TF_Float32, TF_Float64]>:$output
+    Res<TensorOf<[TF_Float16, TF_Float32, TF_Float64]>, [{Random values with specified shape.}]>:$output
   );
 
   TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
@@ -13910,7 +14733,7 @@ This op picks the best counter-based RNG algorithm based on device.
   let arguments = (ins);
 
   let results = (outs
-    TF_Int32Tensor:$alg
+    Res<TF_Int32Tensor, [{The RNG algorithm (shape int32[]).}]>:$alg
   );
 }
 
@@ -13924,12 +14747,12 @@ This op scrambles a shape-[2] seed into a key and a counter, both needed by coun
   }];
 
   let arguments = (ins
-    TF_I32OrI64Tensor:$seed
+    Arg<TF_I32OrI64Tensor, [{2 seeds (shape [2]).}]>:$seed
   );
 
   let results = (outs
-    TF_Uint64Tensor:$key,
-    TF_Uint64Tensor:$counter
+    Res<TF_Uint64Tensor, [{Key for the counter-based RNG algorithm (shape uint64[1]).}]>:$key,
+    Res<TF_Uint64Tensor, [{Counter for the counter-based RNG algorithm. Since counter size is algorithm-dependent, this output will be right-padded with zeros to reach shape uint64[2] (the current maximal counter size among algorithms).}]>:$counter
   );
 
   TF_DerivedOperandTypeAttr Tseed = TF_DerivedOperandTypeAttr<0>;
@@ -13945,13 +14768,13 @@ This op picks the best counter-based RNG algorithm based on device, and scramble
   }];
 
   let arguments = (ins
-    TF_I32OrI64Tensor:$seed
+    Arg<TF_I32OrI64Tensor, [{2 seeds (shape [2]).}]>:$seed
   );
 
   let results = (outs
-    TF_Uint64Tensor:$key,
-    TF_Uint64Tensor:$counter,
-    TF_Int32Tensor:$alg
+    Res<TF_Uint64Tensor, [{Key for the counter-based RNG algorithm (shape uint64[1]).}]>:$key,
+    Res<TF_Uint64Tensor, [{Counter for the counter-based RNG algorithm. Since counter size is algorithm-dependent, this output will be right-padded with zeros to reach shape uint64[2] (the current maximal counter size among algorithms).}]>:$counter,
+    Res<TF_Int32Tensor, [{The RNG algorithm (shape int32[]).}]>:$alg
   );
 
   TF_DerivedOperandTypeAttr Tseed = TF_DerivedOperandTypeAttr<0>;
@@ -13969,12 +14792,12 @@ The outputs are a deterministic function of `shape` and `seed`.
   }];
 
   let arguments = (ins
-    TF_I32OrI64Tensor:$shape,
-    TF_I32OrI64Tensor:$seed
+    Arg<TF_I32OrI64Tensor, [{The shape of the output tensor.}]>:$shape,
+    Arg<TF_I32OrI64Tensor, [{2 seeds (shape [2]).}]>:$seed
   );
 
   let results = (outs
-    TF_FloatTensor:$output
+    Res<TF_FloatTensor, [{Random values with specified shape.}]>:$output
   );
 
   TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
@@ -13994,14 +14817,14 @@ The outputs are a deterministic function of `shape`, `key`, `counter` and `alg`.
   }];
 
   let arguments = (ins
-    TF_I32OrI64Tensor:$shape,
-    TF_Uint64Tensor:$key,
-    TF_Uint64Tensor:$counter,
-    TF_Int32Tensor:$alg
+    Arg<TF_I32OrI64Tensor, [{The shape of the output tensor.}]>:$shape,
+    Arg<TF_Uint64Tensor, [{Key for the counter-based RNG algorithm (shape uint64[1]).}]>:$key,
+    Arg<TF_Uint64Tensor, [{Initial counter for the counter-based RNG algorithm (shape uint64[2] or uint64[1] depending on the algorithm). If a larger vector is given, only the needed portion on the left (i.e. [:N]) will be used.}]>:$counter,
+    Arg<TF_Int32Tensor, [{The RNG algorithm (shape int32[]).}]>:$alg
   );
 
   let results = (outs
-    TF_FloatTensor:$output
+    Res<TF_FloatTensor, [{Random values with specified shape.}]>:$output
   );
 
   TF_DerivedOperandTypeAttr Tshape = TF_DerivedOperandTypeAttr<0>;
@@ -14020,13 +14843,14 @@ The outputs are a deterministic function of `shape`, `seed`, and `lam`.
   }];
 
   let arguments = (ins
-    TF_I32OrI64Tensor:$shape,
-    TF_I32OrI64Tensor:$seed,
-    TensorOf<[TF_Float16, TF_Float32, TF_Float64, TF_Int32, TF_Int64]>:$lam
+    Arg<TF_I32OrI64Tensor, [{The shape of the output tensor.}]>:$shape,
+    Arg<TF_I32OrI64Tensor, [{2 seeds (shape [2]).}]>:$seed,
+    Arg<TensorOf<[TF_Float16, TF_Float32, TF_Float64, TF_Int32, TF_Int64]>, [{The rate of the Poisson distribution. Shape must match the rightmost dimensions
+of `shape`.}]>:$lam
   );
 
   let results = (outs
-    TensorOf<[TF_Float16, TF_Float32, TF_Float64, TF_Int32, TF_Int64]>:$output
+    Res<TensorOf<[TF_Float16, TF_Float32, TF_Float64, TF_Int32, TF_Int64]>, [{Random values with specified shape.}]>:$output
   );
 
   TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
@@ -14048,12 +14872,12 @@ The outputs are a deterministic function of `shape` and `seed`.
   }];
 
   let arguments = (ins
-    TF_I32OrI64Tensor:$shape,
-    TF_I32OrI64Tensor:$seed
+    Arg<TF_I32OrI64Tensor, [{The shape of the output tensor.}]>:$shape,
+    Arg<TF_I32OrI64Tensor, [{2 seeds (shape [2]).}]>:$seed
   );
 
   let results = (outs
-    TF_FloatTensor:$output
+    Res<TF_FloatTensor, [{Random values with specified shape.}]>:$output
   );
 
   TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
@@ -14073,12 +14897,12 @@ The outputs are a deterministic function of `shape` and `seed`.
   }];
 
   let arguments = (ins
-    TF_I32OrI64Tensor:$shape,
-    TensorOf<[TF_Int32, TF_Int64, TF_Uint32, TF_Uint64]>:$seed
+    Arg<TF_I32OrI64Tensor, [{The shape of the output tensor.}]>:$shape,
+    Arg<TensorOf<[TF_Int32, TF_Int64, TF_Uint32, TF_Uint64]>, [{2 seeds (shape [2]).}]>:$seed
   );
 
   let results = (outs
-    TensorOf<[TF_Int32, TF_Int64, TF_Uint32, TF_Uint64]>:$output
+    Res<TensorOf<[TF_Int32, TF_Int64, TF_Uint32, TF_Uint64]>, [{Random values with specified shape.}]>:$output
   );
 
   TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
@@ -14098,14 +14922,14 @@ The outputs are a deterministic function of `shape`, `seed`, `minval`, and `maxv
   }];
 
   let arguments = (ins
-    TF_I32OrI64Tensor:$shape,
-    TF_I32OrI64Tensor:$seed,
-    TF_I32OrI64Tensor:$minval,
-    TF_I32OrI64Tensor:$maxval
+    Arg<TF_I32OrI64Tensor, [{The shape of the output tensor.}]>:$shape,
+    Arg<TF_I32OrI64Tensor, [{2 seeds (shape [2]).}]>:$seed,
+    Arg<TF_I32OrI64Tensor, [{Minimum value (inclusive, scalar).}]>:$minval,
+    Arg<TF_I32OrI64Tensor, [{Maximum value (exclusive, scalar).}]>:$maxval
   );
 
   let results = (outs
-    TF_I32OrI64Tensor:$output
+    Res<TF_I32OrI64Tensor, [{Random values with specified shape.}]>:$output
   );
 
   TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
@@ -14125,16 +14949,16 @@ The outputs are a deterministic function of `shape`, `key`, `counter`, `alg`, `m
   }];
 
   let arguments = (ins
-    TF_I32OrI64Tensor:$shape,
-    TF_Uint64Tensor:$key,
-    TF_Uint64Tensor:$counter,
-    TF_Int32Tensor:$alg,
-    TensorOf<[TF_Int32, TF_Int64, TF_Uint32, TF_Uint64]>:$minval,
-    TensorOf<[TF_Int32, TF_Int64, TF_Uint32, TF_Uint64]>:$maxval
+    Arg<TF_I32OrI64Tensor, [{The shape of the output tensor.}]>:$shape,
+    Arg<TF_Uint64Tensor, [{Key for the counter-based RNG algorithm (shape uint64[1]).}]>:$key,
+    Arg<TF_Uint64Tensor, [{Initial counter for the counter-based RNG algorithm (shape uint64[2] or uint64[1] depending on the algorithm). If a larger vector is given, only the needed portion on the left (i.e. [:N]) will be used.}]>:$counter,
+    Arg<TF_Int32Tensor, [{The RNG algorithm (shape int32[]).}]>:$alg,
+    Arg<TensorOf<[TF_Int32, TF_Int64, TF_Uint32, TF_Uint64]>, [{Minimum value (inclusive, scalar).}]>:$minval,
+    Arg<TensorOf<[TF_Int32, TF_Int64, TF_Uint32, TF_Uint64]>, [{Maximum value (exclusive, scalar).}]>:$maxval
   );
 
   let results = (outs
-    TensorOf<[TF_Int32, TF_Int64, TF_Uint32, TF_Uint64]>:$output
+    Res<TensorOf<[TF_Int32, TF_Int64, TF_Uint32, TF_Uint64]>, [{Random values with specified shape.}]>:$output
   );
 
   TF_DerivedOperandTypeAttr Tshape = TF_DerivedOperandTypeAttr<0>;
@@ -14154,14 +14978,14 @@ The outputs are a deterministic function of `shape`, `key`, `counter` and `alg`.
   }];
 
   let arguments = (ins
-    TF_I32OrI64Tensor:$shape,
-    TF_Uint64Tensor:$key,
-    TF_Uint64Tensor:$counter,
-    TF_Int32Tensor:$alg
+    Arg<TF_I32OrI64Tensor, [{The shape of the output tensor.}]>:$shape,
+    Arg<TF_Uint64Tensor, [{Key for the counter-based RNG algorithm (shape uint64[1]).}]>:$key,
+    Arg<TF_Uint64Tensor, [{Initial counter for the counter-based RNG algorithm (shape uint64[2] or uint64[1] depending on the algorithm). If a larger vector is given, only the needed portion on the left (i.e. [:N]) will be used.}]>:$counter,
+    Arg<TF_Int32Tensor, [{The RNG algorithm (shape int32[]).}]>:$alg
   );
 
   let results = (outs
-    TF_FloatTensor:$output
+    Res<TF_FloatTensor, [{Random values with specified shape.}]>:$output
   );
 
   TF_DerivedOperandTypeAttr Tshape = TF_DerivedOperandTypeAttr<0>;
@@ -14182,12 +15006,12 @@ The outputs are a deterministic function of `shape` and `seed`.
   }];
 
   let arguments = (ins
-    TF_I32OrI64Tensor:$shape,
-    TF_I32OrI64Tensor:$seed
+    Arg<TF_I32OrI64Tensor, [{The shape of the output tensor.}]>:$shape,
+    Arg<TF_I32OrI64Tensor, [{2 seeds (shape [2]).}]>:$seed
   );
 
   let results = (outs
-    TF_FloatTensor:$output
+    Res<TF_FloatTensor, [{Random values with specified shape.}]>:$output
   );
 
   TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
@@ -14209,14 +15033,14 @@ The outputs are a deterministic function of `shape`, `key`, `counter` and `alg`.
   }];
 
   let arguments = (ins
-    TF_I32OrI64Tensor:$shape,
-    TF_Uint64Tensor:$key,
-    TF_Uint64Tensor:$counter,
-    TF_Int32Tensor:$alg
+    Arg<TF_I32OrI64Tensor, [{The shape of the output tensor.}]>:$shape,
+    Arg<TF_Uint64Tensor, [{Key for the counter-based RNG algorithm (shape uint64[1]).}]>:$key,
+    Arg<TF_Uint64Tensor, [{Initial counter for the counter-based RNG algorithm (shape uint64[2] or uint64[1] depending on the algorithm). If a larger vector is given, only the needed portion on the left (i.e. [:N]) will be used.}]>:$counter,
+    Arg<TF_Int32Tensor, [{The RNG algorithm (shape int32[]).}]>:$alg
   );
 
   let results = (outs
-    TF_FloatTensor:$output
+    Res<TF_FloatTensor, [{Random values with specified shape.}]>:$output
   );
 
   TF_DerivedOperandTypeAttr Tshape = TF_DerivedOperandTypeAttr<0>;
@@ -14394,9 +15218,18 @@ receive 0, 0, and 1, respectively. The appropriate bits in `begin_mask` and
 
   let arguments = (ins
     TF_Tensor:$input,
-    TF_I32OrI64Tensor:$begin,
-    TF_I32OrI64Tensor:$end,
-    TF_I32OrI64Tensor:$strides,
+    Arg<TF_I32OrI64Tensor, [{`begin[k]` specifies the offset into the `k`th range specification.
+The exact dimension this corresponds to will be determined by context.
+Out-of-bounds values will be silently clamped. If the `k`th bit of
+`begin_mask` then `begin[k]` is ignored and the full range of the
+appropriate dimension is used instead. Negative values causes indexing
+to start from the highest element e.g. If `foo==[1,2,3]` then `foo[-1]==3`.}]>:$begin,
+    Arg<TF_I32OrI64Tensor, [{`end[i]` is like `begin` with the exception that `end_mask` is
+used to determine full ranges.}]>:$end,
+    Arg<TF_I32OrI64Tensor, [{`strides[i]` specifies the increment in the `i`th specification
+after extracting a given element. Negative indices will reverse
+the original order. Out or range values are
+clamped to `[0,dim[i]) if slice[i]>0` or `[-1,dim[i]-1] if slice[i] < 0`}]>:$strides,
 
     DefaultValuedAttr<I64Attr, "0">:$begin_mask,
     DefaultValuedAttr<I64Attr, "0">:$end_mask,
@@ -14491,7 +15324,9 @@ Examples:
   }];
 
   let arguments = (ins
-    Variadic<TF_StrTensor>:$inputs,
+    Arg<Variadic<TF_StrTensor>, [{A list of string tensors.  The tensors must all have the same shape,
+or be scalars.  Scalars may be mixed in; these will be broadcast to the shape
+of non-scalar inputs.}]>:$inputs,
 
     StrAttr:$separator
   );
@@ -14523,13 +15358,13 @@ array([0, 2, 2])
   }];
 
   let arguments = (ins
-    TF_StrTensor:$input,
+    Arg<TF_StrTensor, [{The strings to assign a hash bucket.}]>:$input,
 
     Confined<I64Attr, [IntMinValue<1>]>:$num_buckets
   );
 
   let results = (outs
-    TF_Int64Tensor:$output
+    Res<TF_Int64Tensor, [{A Tensor of the same shape as the input `string_tensor`.}]>:$output
   );
 }
 
@@ -14569,14 +15404,15 @@ retained with length 1.
   }];
 
   let arguments = (ins
-    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$input,
-    TF_I32OrI64Tensor:$reduction_indices,
+    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{The tensor to reduce.}]>:$input,
+    Arg<TF_I32OrI64Tensor, [{The dimensions to reduce. Must be in the range
+`[-rank(input), rank(input))`.}]>:$reduction_indices,
 
     DefaultValuedAttr<BoolAttr, "false">:$keep_dims
   );
 
   let results = (outs
-    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$output
+    Res<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{The reduced tensor.}]>:$output
   );
 
   TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
@@ -14610,16 +15446,21 @@ s, _, _ = svd(a, compute_uv=False)
   }];
 
   let arguments = (ins
-    TensorOf<[TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64]>:$input,
+    Arg<TensorOf<[TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64]>, [{A tensor of shape `[..., M, N]` whose inner-most 2 dimensions
+form matrices of size `[M, N]`. Let `P` be the minimum of `M` and `N`.}]>:$input,
 
     DefaultValuedAttr<BoolAttr, "true">:$compute_uv,
     DefaultValuedAttr<BoolAttr, "false">:$full_matrices
   );
 
   let results = (outs
-    TensorOf<[TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64]>:$s,
-    TensorOf<[TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64]>:$u,
-    TensorOf<[TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64]>:$v
+    Res<TensorOf<[TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64]>, [{Singular values. Shape is `[..., P]`.}]>:$s,
+    Res<TensorOf<[TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64]>, [{Left singular vectors. If `full_matrices` is `False` then shape is
+`[..., M, P]`; if `full_matrices` is `True` then shape is
+`[..., M, M]`. Undefined if `compute_uv` is `False`.}]>:$u,
+    Res<TensorOf<[TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64]>, [{Left singular vectors. If `full_matrices` is `False` then shape is
+`[..., N, P]`. If `full_matrices` is `True` then shape is `[..., N, N]`.
+Undefined if `compute_uv` is false.}]>:$v
   );
 
   TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
@@ -14631,13 +15472,13 @@ Computes the gradient function for function f via backpropagation.
   }];
 
   let arguments = (ins
-    Variadic<TF_Tensor>:$input,
+    Arg<Variadic<TF_Tensor>, [{a list of input tensors of size N + M;}]>:$input,
 
     SymbolRefAttr:$f
   );
 
   let results = (outs
-    Variadic<TF_Tensor>:$output
+    Res<Variadic<TF_Tensor>, [{a list of output tensors of size N;}]>:$output
   );
 
   TF_DerivedOperandTypeListAttr Tin = TF_DerivedOperandTypeListAttr<0>;
@@ -14709,8 +15550,8 @@ libraries.
   }];
 
   let arguments = (ins
-    TF_Float32Tensor:$embedding_variable,
-    TF_Float32Tensor:$sliced_activations,
+    Arg<TF_Float32Tensor, [{A trainable variable, enabling optimizers to find this op.}]>:$embedding_variable,
+    Arg<TF_Float32Tensor, [{The embedding activations Tensor to return.}]>:$sliced_activations,
 
     Confined<I64Attr, [IntMinValue<0>]>:$table_id,
     Confined<I64Attr, [IntMinValue<0>]>:$lookup_id
@@ -14807,7 +15648,7 @@ consumed by TPUPartitionedCall.
   let arguments = (ins);
 
   let results = (outs
-    TF_Int32Tensor:$device_ordinals
+    Res<TF_Int32Tensor, [{A vector 1 or more TPU cores.}]>:$device_ordinals
   );
 }
 
@@ -14975,7 +15816,7 @@ of a step/run.
   }];
 
   let arguments = (ins
-    Arg<TF_ResourceTensor, "", [TF_TensorArrayFree]>:$handle
+    Arg<TF_ResourceTensor, [{The handle to a TensorArray (output of TensorArray or TensorArrayGrad).}], [TF_TensorArrayFree]>:$handle
   );
 
   let results = (outs);
@@ -14999,15 +15840,18 @@ All elements must have the same shape (excepting the first dimension).
   }];
 
   let arguments = (ins
-    Arg<TF_ResourceTensor, "", [TF_TensorArrayRead]>:$handle,
-    TF_Float32Tensor:$flow_in,
+    Arg<TF_ResourceTensor, [{The handle to a TensorArray.}], [TF_TensorArrayRead]>:$handle,
+    Arg<TF_Float32Tensor, [{A float scalar that enforces proper chaining of operations.}]>:$flow_in,
 
     DefaultValuedAttr<TF_ShapeAttr, "llvm::None">:$element_shape_except0
   );
 
   let results = (outs
-    TF_Tensor:$value,
-    TF_Int64Tensor:$lengths
+    Res<TF_Tensor, [{All of the elements in the TensorArray, concatenated along the first
+axis.}]>:$value,
+    Res<TF_Int64Tensor, [{A vector of the row sizes of the original T elements in the
+value output.  In the example above, this would be the values:
+`(n1, n2, ..., n(T-1))`.}]>:$lengths
   );
 
   TF_DerivedResultTypeAttr dtype = TF_DerivedResultTypeAttr<0>;
@@ -15023,15 +15867,16 @@ All elements selected by `indices` must have the same shape.
   }];
 
   let arguments = (ins
-    Arg<TF_ResourceTensor, "", [TF_TensorArrayRead]>:$handle,
-    TF_Int32Tensor:$indices,
-    TF_Float32Tensor:$flow_in,
+    Arg<TF_ResourceTensor, [{The handle to a TensorArray.}], [TF_TensorArrayRead]>:$handle,
+    Arg<TF_Int32Tensor, [{The locations in the TensorArray from which to read tensor elements.}]>:$indices,
+    Arg<TF_Float32Tensor, [{A float scalar that enforces proper chaining of operations.}]>:$flow_in,
 
     DefaultValuedAttr<TF_ShapeAttr, "llvm::None">:$element_shape
   );
 
   let results = (outs
-    TF_Tensor:$value
+    Res<TF_Tensor, [{All of the elements in the TensorArray, concatenated along a new
+axis (the new dimension 0).}]>:$value
   );
 
   TF_DerivedResultTypeAttr dtype = TF_DerivedResultTypeAttr<0>;
@@ -15082,8 +15927,8 @@ calculation gets its own TensorArray accumulator.
   }];
 
   let arguments = (ins
-    Arg<TF_ResourceTensor, "", [TF_TensorArrayRead, TF_TensorArrayWrite]>:$handle,
-    TF_Float32Tensor:$flow_in,
+    Arg<TF_ResourceTensor, [{The handle to the forward TensorArray.}], [TF_TensorArrayRead, TF_TensorArrayWrite]>:$handle,
+    Arg<TF_Float32Tensor, [{A float scalar that enforces proper chaining of operations.}]>:$flow_in,
 
     StrAttr:$source
   );
@@ -15098,13 +15943,13 @@ def TF_TensorArrayReadV3Op : TF_Op<"TensorArrayReadV3", []> {
   let summary = "Read an element from the TensorArray into output `value`.";
 
   let arguments = (ins
-    Arg<TF_ResourceTensor, "", [TF_TensorArrayRead]>:$handle,
+    Arg<TF_ResourceTensor, [{The handle to a TensorArray.}], [TF_TensorArrayRead]>:$handle,
     TF_Int32Tensor:$index,
-    TF_Float32Tensor:$flow_in
+    Arg<TF_Float32Tensor, [{A float scalar that enforces proper chaining of operations.}]>:$flow_in
   );
 
   let results = (outs
-    TF_Tensor:$value
+    Res<TF_Tensor, [{The tensor that is read from the TensorArray.}]>:$value
   );
 
   TF_DerivedResultTypeAttr dtype = TF_DerivedResultTypeAttr<0>;
@@ -15120,14 +15965,14 @@ Scatter the data from the input value into specific TensorArray elements.
   }];
 
   let arguments = (ins
-    Arg<TF_ResourceTensor, "", [TF_TensorArrayRead, TF_TensorArrayWrite]>:$handle,
-    TF_Int32Tensor:$indices,
-    TF_Tensor:$value,
-    TF_Float32Tensor:$flow_in
+    Arg<TF_ResourceTensor, [{The handle to a TensorArray.}], [TF_TensorArrayRead, TF_TensorArrayWrite]>:$handle,
+    Arg<TF_Int32Tensor, [{The locations at which to write the tensor elements.}]>:$indices,
+    Arg<TF_Tensor, [{The concatenated tensor to write to the TensorArray.}]>:$value,
+    Arg<TF_Float32Tensor, [{A float scalar that enforces proper chaining of operations.}]>:$flow_in
   );
 
   let results = (outs
-    TF_Float32Tensor:$flow_out
+    Res<TF_Float32Tensor, [{A float scalar that enforces proper chaining of operations.}]>:$flow_out
   );
 
   TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<2>;
@@ -15137,12 +15982,12 @@ def TF_TensorArraySizeV3Op : TF_Op<"TensorArraySizeV3", []> {
   let summary = "Get the current size of the TensorArray.";
 
   let arguments = (ins
-    Arg<TF_ResourceTensor, "", [TF_TensorArrayRead]>:$handle,
-    TF_Float32Tensor:$flow_in
+    Arg<TF_ResourceTensor, [{The handle to a TensorArray (output of TensorArray or TensorArrayGrad).}], [TF_TensorArrayRead]>:$handle,
+    Arg<TF_Float32Tensor, [{A float scalar that enforces proper chaining of operations.}]>:$flow_in
   );
 
   let results = (outs
-    TF_Int32Tensor:$size
+    Res<TF_Int32Tensor, [{The current size of the TensorArray.}]>:$size
   );
 }
 
@@ -15172,14 +16017,15 @@ and having size
   }];
 
   let arguments = (ins
-    Arg<TF_ResourceTensor, "", [TF_TensorArrayRead, TF_TensorArrayWrite]>:$handle,
-    TF_Tensor:$value,
-    TF_Int64Tensor:$lengths,
-    TF_Float32Tensor:$flow_in
+    Arg<TF_ResourceTensor, [{The handle to a TensorArray.}], [TF_TensorArrayRead, TF_TensorArrayWrite]>:$handle,
+    Arg<TF_Tensor, [{The concatenated tensor to write to the TensorArray.}]>:$value,
+    Arg<TF_Int64Tensor, [{The vector of lengths, how to split the rows of value into the
+TensorArray.}]>:$lengths,
+    Arg<TF_Float32Tensor, [{A float scalar that enforces proper chaining of operations.}]>:$flow_in
   );
 
   let results = (outs
-    TF_Float32Tensor:$flow_out
+    Res<TF_Float32Tensor, [{A float scalar that enforces proper chaining of operations.}]>:$flow_out
   );
 
   TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<1>;
@@ -15193,7 +16039,7 @@ Write data via Write and read via Read or Pack.
   }];
 
   let arguments = (ins
-    TF_Int32Tensor:$size,
+    Arg<TF_Int32Tensor, [{The size of the array.}]>:$size,
 
     TypeAttr:$dtype,
     DefaultValuedAttr<TF_ShapeAttr, "llvm::None">:$element_shape,
@@ -15204,8 +16050,8 @@ Write data via Write and read via Read or Pack.
   );
 
   let results = (outs
-    Res<TF_ResourceTensor, "", [TF_TensorArrayAlloc]>:$handle,
-    TF_Float32Tensor:$flow
+    Res<TF_ResourceTensor, [{The handle to the TensorArray.}], [TF_TensorArrayAlloc]>:$handle,
+    Res<TF_Float32Tensor, [{A scalar used to control gradient flow.}]>:$flow
   );
 }
 
@@ -15213,14 +16059,14 @@ def TF_TensorArrayWriteV3Op : TF_Op<"TensorArrayWriteV3", []> {
   let summary = "Push an element onto the tensor_array.";
 
   let arguments = (ins
-    Arg<TF_ResourceTensor, "", [TF_TensorArrayRead, TF_TensorArrayWrite]>:$handle,
-    TF_Int32Tensor:$index,
-    TF_Tensor:$value,
-    TF_Float32Tensor:$flow_in
+    Arg<TF_ResourceTensor, [{The handle to a TensorArray.}], [TF_TensorArrayRead, TF_TensorArrayWrite]>:$handle,
+    Arg<TF_Int32Tensor, [{The position to write to inside the TensorArray.}]>:$index,
+    Arg<TF_Tensor, [{The tensor to write to the TensorArray.}]>:$value,
+    Arg<TF_Float32Tensor, [{A float scalar that enforces proper chaining of operations.}]>:$flow_in
   );
 
   let results = (outs
-    TF_Float32Tensor:$flow_out
+    Res<TF_Float32Tensor, [{A float scalar that enforces proper chaining of operations.}]>:$flow_out
   );
 
   TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<2>;
@@ -15575,13 +16421,13 @@ On GPU, if an out of bound index is found, the index is ignored.
   }];
 
   let arguments = (ins
-    TF_Tensor:$tensor,
-    TF_I32OrI64Tensor:$indices,
-    TF_Tensor:$updates
+    Arg<TF_Tensor, [{Tensor to copy/update.}]>:$tensor,
+    Arg<TF_I32OrI64Tensor, [{Index tensor.}]>:$indices,
+    Arg<TF_Tensor, [{Updates to scatter into output.}]>:$updates
   );
 
   let results = (outs
-    TF_Tensor:$output
+    Res<TF_Tensor, [{A new tensor copied from tensor and updates added according to the indices.}]>:$output
   );
 
   TF_DerivedOperandTypeAttr Tindices = TF_DerivedOperandTypeAttr<1>;
@@ -15592,13 +16438,13 @@ def TF_TensorScatterMaxOp : TF_Op<"TensorScatterMax", [NoSideEffect]> {
   let summary = "";
 
   let arguments = (ins
-    TF_Tensor:$tensor,
-    TF_I32OrI64Tensor:$indices,
-    TF_Tensor:$updates
+    Arg<TF_Tensor, [{Tensor to update.}]>:$tensor,
+    Arg<TF_I32OrI64Tensor, [{Index tensor.}]>:$indices,
+    Arg<TF_Tensor, [{Updates to scatter into output.}]>:$updates
   );
 
   let results = (outs
-    TF_Tensor:$output
+    Res<TF_Tensor, [{A new tensor copied from tensor whose values are element-wise maximum between tensor and updates according to the indices.}]>:$output
   );
 
   TF_DerivedOperandTypeAttr Tindices = TF_DerivedOperandTypeAttr<1>;
@@ -15609,13 +16455,13 @@ def TF_TensorScatterMinOp : TF_Op<"TensorScatterMin", [NoSideEffect]> {
   let summary = "";
 
   let arguments = (ins
-    TF_Tensor:$tensor,
-    TF_I32OrI64Tensor:$indices,
-    TF_Tensor:$updates
+    Arg<TF_Tensor, [{Tensor to update.}]>:$tensor,
+    Arg<TF_I32OrI64Tensor, [{Index tensor.}]>:$indices,
+    Arg<TF_Tensor, [{Updates to scatter into output.}]>:$updates
   );
 
   let results = (outs
-    TF_Tensor:$output
+    Res<TF_Tensor, [{A new tensor copied from tensor whose values are element-wise minimum between tensor and updates according to the indices.}]>:$output
   );
 
   TF_DerivedOperandTypeAttr Tindices = TF_DerivedOperandTypeAttr<1>;
@@ -15693,13 +16539,13 @@ On GPU, if an out of bound index is found, the index is ignored.
   }];
 
   let arguments = (ins
-    TF_Tensor:$tensor,
-    TF_I32OrI64Tensor:$indices,
-    TF_Tensor:$updates
+    Arg<TF_Tensor, [{Tensor to copy/update.}]>:$tensor,
+    Arg<TF_I32OrI64Tensor, [{Index tensor.}]>:$indices,
+    Arg<TF_Tensor, [{Updates to scatter into output.}]>:$updates
   );
 
   let results = (outs
-    TF_Tensor:$output
+    Res<TF_Tensor, [{A new tensor copied from tensor and updates subtracted according to the indices.}]>:$output
   );
 
   TF_DerivedOperandTypeAttr Tindices = TF_DerivedOperandTypeAttr<1>;
@@ -15750,13 +16596,14 @@ https://www.tensorflow.org/api_docs/python/tf/tensor_scatter_nd_update) function
   }];
 
   let arguments = (ins
-    TF_Tensor:$tensor,
-    TF_I32OrI64Tensor:$indices,
-    TF_Tensor:$updates
+    Arg<TF_Tensor, [{Tensor to copy/update.}]>:$tensor,
+    Arg<TF_I32OrI64Tensor, [{Index tensor.}]>:$indices,
+    Arg<TF_Tensor, [{Updates to scatter into output.}]>:$updates
   );
 
   let results = (outs
-    TF_Tensor:$output
+    Res<TF_Tensor, [{A new tensor with the given shape and updates applied according
+to the indices.}]>:$output
   );
 
   TF_DerivedOperandTypeAttr Tindices = TF_DerivedOperandTypeAttr<1>;
@@ -15837,8 +16684,8 @@ array([[1, 2, 3, 1, 2, 3],
   }];
 
   let arguments = (ins
-    TF_Tensor:$input,
-    TF_I32OrI64Tensor:$multiples
+    Arg<TF_Tensor, [{1-D or higher.}]>:$input,
+    Arg<TF_I32OrI64Tensor, [{1-D. Length must be the same as the number of dimensions in `input`}]>:$multiples
   );
 
   let results = (outs
@@ -15902,15 +16749,16 @@ If two elements are equal, the lower-index element appears first.
   }];
 
   let arguments = (ins
-    TF_IntOrFpTensor:$input,
-    TF_Int32Tensor:$k,
+    Arg<TF_IntOrFpTensor, [{1-D or higher with last dimension at least `k`.}]>:$input,
+    Arg<TF_Int32Tensor, [{0-D.  Number of top elements to look for along the last dimension (along each
+row for matrices).}]>:$k,
 
     DefaultValuedAttr<BoolAttr, "true">:$sorted
   );
 
   let results = (outs
-    TF_IntOrFpTensor:$values,
-    TF_Int32Tensor:$indices
+    Res<TF_IntOrFpTensor, [{The `k` largest elements along each last dimensional slice.}]>:$values,
+    Res<TF_Int32Tensor, [{The indices of `values` within the last dimension of `input`.}]>:$indices
   );
 
   TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
@@ -15986,14 +16834,18 @@ Solves tridiagonal systems of equations.
   }];
 
   let arguments = (ins
-    TensorOf<[TF_Complex128, TF_Complex64, TF_Float32, TF_Float64]>:$diagonals,
-    TensorOf<[TF_Complex128, TF_Complex64, TF_Float32, TF_Float64]>:$rhs,
+    Arg<TensorOf<[TF_Complex128, TF_Complex64, TF_Float32, TF_Float64]>, [{Tensor of shape `[..., 3, M]` whose innermost 2 dimensions represent the
+tridiagonal matrices with three rows being the superdiagonal, diagonals, and
+subdiagonals, in order. The last element of the superdiagonal and the first
+element of the subdiagonal is ignored.}]>:$diagonals,
+    Arg<TensorOf<[TF_Complex128, TF_Complex64, TF_Float32, TF_Float64]>, [{Tensor of shape `[..., M, K]`, representing K right-hand sides per each
+left-hand side.}]>:$rhs,
 
     DefaultValuedAttr<BoolAttr, "true">:$partial_pivoting
   );
 
   let results = (outs
-    TensorOf<[TF_Complex128, TF_Complex64, TF_Float32, TF_Float64]>:$output
+    Res<TensorOf<[TF_Complex128, TF_Complex64, TF_Float32, TF_Float64]>, [{Tensor of shape `[..., M, K]` containing the solutions}]>:$output
   );
 
   TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
@@ -16063,14 +16915,15 @@ deviations from the mean are dropped and re-picked.
   }];
 
   let arguments = (ins
-    TF_I32OrI64Tensor:$shape,
+    Arg<TF_I32OrI64Tensor, [{The shape of the output tensor.}]>:$shape,
 
     DefaultValuedAttr<I64Attr, "0">:$seed,
     DefaultValuedAttr<I64Attr, "0">:$seed2
   );
 
   let results = (outs
-    TF_FloatTensor:$output
+    Res<TF_FloatTensor, [{A tensor of the specified shape filled with random truncated normal
+values.}]>:$output
   );
 
   TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
@@ -16121,12 +16974,12 @@ idx ==> [0, 1, 2, 3, 4, 4, 0, 1]
   }];
 
   let arguments = (ins
-    TF_Tensor:$x
+    Arg<TF_Tensor, [{1-D.}]>:$x
   );
 
   let results = (outs
-    TF_Tensor:$y,
-    TF_I32OrI64Tensor:$idx
+    Res<TF_Tensor, [{1-D.}]>:$y,
+    Res<TF_I32OrI64Tensor, [{1-D.}]>:$idx
   );
 
   TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
@@ -16154,13 +17007,13 @@ This is the opposite of `pack`.
   }];
 
   let arguments = (ins
-    TF_Tensor:$value,
+    Arg<TF_Tensor, [{1-D or higher, with `axis` dimension size equal to `num`.}]>:$value,
 
     DefaultValuedAttr<I64Attr, "0">:$axis
   );
 
   let results = (outs
-    Variadic<TF_Tensor>:$output
+    Res<Variadic<TF_Tensor>, [{The list of tensors unpacked from `value`.}]>:$output
   );
 
   TF_DerivedResultSizeAttr num = TF_DerivedResultSizeAttr<0>;
@@ -16209,12 +17062,14 @@ tf.unsorted_segment_max(c, tf.constant([0, 1, 0]), num_segments=2)
 
   let arguments = (ins
     TF_IntOrFpTensor:$data,
-    TF_I32OrI64Tensor:$segment_ids,
+    Arg<TF_I32OrI64Tensor, [{A tensor whose shape is a prefix of `data.shape`.}]>:$segment_ids,
     TF_I32OrI64Tensor:$num_segments
   );
 
   let results = (outs
-    TF_IntOrFpTensor:$output
+    Res<TF_IntOrFpTensor, [{Has same shape as data, except for the first `segment_ids.rank`
+dimensions, which are replaced with a single dimension which has size
+`num_segments`.}]>:$output
   );
 
   TF_DerivedOperandTypeAttr Tindices = TF_DerivedOperandTypeAttr<1>;
@@ -16258,12 +17113,14 @@ dropped, and will not be included in the result.
 
   let arguments = (ins
     TF_IntOrFpTensor:$data,
-    TF_I32OrI64Tensor:$segment_ids,
+    Arg<TF_I32OrI64Tensor, [{A tensor whose shape is a prefix of `data.shape`.}]>:$segment_ids,
     TF_I32OrI64Tensor:$num_segments
   );
 
   let results = (outs
-    TF_IntOrFpTensor:$output
+    Res<TF_IntOrFpTensor, [{Has same shape as data, except for the first `segment_ids.rank`
+dimensions, which are replaced with a single dimension which has size
+`num_segments`.}]>:$output
   );
 
   TF_DerivedOperandTypeAttr Tindices = TF_DerivedOperandTypeAttr<1>;
@@ -16306,12 +17163,14 @@ dropped, and will not be included in the result.
 
   let arguments = (ins
     TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$data,
-    TF_I32OrI64Tensor:$segment_ids,
+    Arg<TF_I32OrI64Tensor, [{A tensor whose shape is a prefix of `data.shape`.}]>:$segment_ids,
     TF_I32OrI64Tensor:$num_segments
   );
 
   let results = (outs
-    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$output
+    Res<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Has same shape as data, except for the first `segment_ids.rank`
+dimensions, which are replaced with a single dimension which has size
+`num_segments`.}]>:$output
   );
 
   TF_DerivedOperandTypeAttr Tindices = TF_DerivedOperandTypeAttr<1>;
@@ -16355,12 +17214,14 @@ tf.unsorted_segment_sum(c, tf.constant([0, 1, 0]), num_segments=2)
 
   let arguments = (ins
     TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$data,
-    TF_I32OrI64Tensor:$segment_ids,
+    Arg<TF_I32OrI64Tensor, [{A tensor whose shape is a prefix of `data.shape`.}]>:$segment_ids,
     TF_I32OrI64Tensor:$num_segments
   );
 
   let results = (outs
-    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$output
+    Res<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Has same shape as data, except for the first `segment_ids.rank`
+dimensions, which are replaced with a single dimension which has size
+`num_segments`.}]>:$output
   );
 
   TF_DerivedOperandTypeAttr Tindices = TF_DerivedOperandTypeAttr<1>;
@@ -16396,12 +17257,15 @@ A 2-D example:
   }];
 
   let arguments = (ins
-    TF_Tensor:$sorted_inputs,
-    TF_Tensor:$values
+    Arg<TF_Tensor, [{2-D Tensor where each row is ordered.}]>:$sorted_inputs,
+    Arg<TF_Tensor, [{2-D Tensor with the same numbers of rows as `sorted_search_values`. Contains
+the values that will be searched for in `sorted_search_values`.}]>:$values
   );
 
   let results = (outs
-    TF_I32OrI64Tensor:$output
+    Res<TF_I32OrI64Tensor, [{A `Tensor` with the same shape as `values`.  It contains the last scalar index
+into the last dimension where values can be inserted without changing the
+ordered property.}]>:$output
   );
 
   TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
@@ -16414,11 +17278,12 @@ Checks whether a resource handle-based variable has been initialized.
   }];
 
   let arguments = (ins
-    Arg<TF_ResourceTensor, "", [TF_VariableRead]>:$resource
+    Arg<TF_ResourceTensor, [{the input resource handle.}], [TF_VariableRead]>:$resource
   );
 
   let results = (outs
-    TF_BoolTensor:$is_initialized
+    Res<TF_BoolTensor, [{a scalar boolean which is true if the variable has been
+initialized.}]>:$is_initialized
   );
 
   let hasCanonicalizer = 1;
@@ -16491,7 +17356,7 @@ about sharing states in tensorflow.
   );
 
   let results = (outs
-    TF_Tensor:$ref
+    Res<TF_Tensor, [{A reference to the variable tensor.}]>:$ref
   );
 
   TF_DerivedResultTypeAttr dtype = TF_DerivedResultTypeAttr<0>;
@@ -16805,7 +17670,7 @@ A pseudo-op to represent host-side computation in an XLA program.
   }];
 
   let arguments = (ins
-    Variadic<TF_Tensor>:$inputs,
+    Arg<Variadic<TF_Tensor>, [{A list of tensors that will be sent to the host.}]>:$inputs,
 
     StrArrayAttr:$ancestors,
     TF_ShapeAttrArray:$shapes,
@@ -16816,7 +17681,7 @@ A pseudo-op to represent host-side computation in an XLA program.
   );
 
   let results = (outs
-    Variadic<TF_Tensor>:$outputs
+    Res<Variadic<TF_Tensor>, [{A list of tensors that will be returned to the device.}]>:$outputs
   );
 
   TF_DerivedOperandTypeListAttr Tinputs = TF_DerivedOperandTypeListAttr<0>;
@@ -17221,11 +18086,11 @@ def TF_ZerosLikeOp : TF_Op<"ZerosLike", [Idempotent, NoSideEffect, SameOperandsA
   let summary = "Returns a tensor of zeros with the same shape and type as x.";
 
   let arguments = (ins
-    TF_Tensor:$x
+    Arg<TF_Tensor, [{a tensor of type T.}]>:$x
   );
 
   let results = (outs
-    TF_Tensor:$y
+    Res<TF_Tensor, [{a tensor of the same shape and type as x but filled with zeros.}]>:$y
   );
 
   TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;