Updated auto-generated TF ops

- Updated following generated TF ops from TF registry (improved comments, new
parameters, new traits etc.):
TF_DivOp, TF_FloorDivOp, TF_MulOp, TF_PowOp, TF_SubOp, TF_TruncateDivOp,
TF_EnqueueTPUEmbeddingRaggedTensorBatchOp,
TF_EnqueueTPUEmbeddingSparseTensorBatchOp, TF_InplaceAddOp, TF_StopGradientOp,
TF__FusedBatchNormExOp, TF__FusedMatMulOp
- Adapted call of `EnqueueTPUEmbeddingSparseTensorBatchOp` which has a new
  default parameter that is not optional in MLIR builder, thus has to be
  specified at call sites

PiperOrigin-RevId: 355470188
Change-Id: I564f22ad8a8df6c58a01d7ee546ef5199f5f66e2
This commit is contained in:
Michael Gester 2021-02-03 13:31:19 -08:00 committed by TensorFlower Gardener
parent 66dd98a106
commit a5e16cb4d1

View File

@ -3155,12 +3155,12 @@ def TF_DivOp : TF_Op<"Div", [NoSideEffect, ResultsBroadcastableShape, TF_SameOpe
}];
let arguments = (ins
TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Uint16, TF_Uint8]>:$x,
TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Uint16, TF_Uint8]>:$y
TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$x,
TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$y
);
let results = (outs
TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Uint16, TF_Uint8]>:$z
TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$z
);
TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
@ -3478,7 +3478,8 @@ the corresponding feature.
DefaultValuedAttr<I64Attr, "-1">:$device_ordinal,
DefaultValuedAttr<StrArrayAttr, "{}">:$combiners,
I64ArrayAttr:$table_ids,
DefaultValuedAttr<I64ArrayAttr, "{}">:$max_sequence_lengths
DefaultValuedAttr<I64ArrayAttr, "{}">:$max_sequence_lengths,
DefaultValuedAttr<I64ArrayAttr, "{}">:$num_features
);
let results = (outs);
@ -3549,7 +3550,8 @@ the corresponding feature.
DefaultValuedAttr<I64Attr, "-1">:$device_ordinal,
DefaultValuedAttr<StrArrayAttr, "{}">:$combiners,
I64ArrayAttr:$table_ids,
DefaultValuedAttr<I64ArrayAttr, "{}">:$max_sequence_lengths
DefaultValuedAttr<I64ArrayAttr, "{}">:$max_sequence_lengths,
DefaultValuedAttr<I64ArrayAttr, "{}">:$num_features
);
let results = (outs);
@ -4148,12 +4150,12 @@ def TF_FloorDivOp : TF_Op<"FloorDiv", [NoSideEffect, ResultsBroadcastableShape]>
}];
let arguments = (ins
TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Uint16, TF_Uint8]>:$x,
TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Uint16, TF_Uint8]>:$y
TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$x,
TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$y
);
let results = (outs
TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Uint16, TF_Uint8]>:$z
TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$z
);
TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
@ -5120,7 +5122,7 @@ Table initializer that takes two tensors for keys and values respectively.
TF_DerivedOperandTypeAttr Tkey = TF_DerivedOperandTypeAttr<1>;
}
def TF_InplaceAddOp : TF_Op<"InplaceAdd", [AllTypesMatch<["x", "y"]>, NoSideEffect]> {
def TF_InplaceAddOp : TF_Op<"InplaceAdd", [NoSideEffect, TF_AllTypesMatch<["x", "y"]>]> {
let summary = "Adds v into specified rows of x.";
let description = [{
@ -8108,12 +8110,12 @@ def TF_MulOp : TF_Op<"Mul", [Commutative, NoSideEffect, ResultsBroadcastableShap
}];
let arguments = (ins
TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Uint64, TF_Uint32, TF_Uint16, TF_Uint8]>:$x,
TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Uint64, TF_Uint32, TF_Uint16, TF_Uint8]>:$y
TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$x,
TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$y
);
let results = (outs
TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Uint64, TF_Uint32, TF_Uint16, TF_Uint8]>:$z
TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$z
);
TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
@ -9087,12 +9089,12 @@ tf.pow(x, y) ==> [[256, 65536], [9, 27]]
}];
let arguments = (ins
TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int8, TF_Int16, TF_Int32, TF_Int64]>:$x,
TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int8, TF_Int16, TF_Int32, TF_Int64]>:$y
TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8]>:$x,
TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8]>:$y
);
let results = (outs
TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int8, TF_Int16, TF_Int32, TF_Int64]>:$z
TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8]>:$z
);
TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
@ -14235,7 +14237,45 @@ in the graph it inputs are masked from the gradient generator. They are not
taken into account for computing gradients.
This is useful any time you want to compute a value with TensorFlow but need
to pretend that the value was a constant. Some examples include:
to pretend that the value was a constant. For example, the softmax function
for a vector x can be written as
```python
def softmax(x):
numerator = tf.exp(x)
denominator = tf.reduce_sum(numerator)
return numerator / denominator
```
This however is susceptible to overflow if the values in x are large. An
alternative more stable way is to subtract the maximum of x from each of the
values.
```python
def stable_softmax(x):
z = x - tf.reduce_max(x)
numerator = tf.exp(z)
denominator = tf.reduce_sum(numerator)
return numerator / denominator
```
However, when we backprop through the softmax to x, we dont want to backprop
through the `tf.reduce_max(x)` (if the max values are not unique then the
gradient could flow to the wrong input) calculation and treat that as a
constant. Therefore, we should write this out as
```python
def stable_softmax(x):
z = x - tf.stop_gradient(tf.reduce_max(x))
numerator = tf.exp(z)
denominator = tf.reduce_sum(numerator)
return numerator / denominator
```
Some other examples include:
* The *EM* algorithm where the *M-step* should not involve backpropagation
through the output of the *E-step*.
@ -14503,12 +14543,12 @@ def TF_SubOp : TF_Op<"Sub", [NoSideEffect, ResultsBroadcastableShape, TF_CwiseBi
}];
let arguments = (ins
TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Uint16, TF_Uint64, TF_Uint32, TF_Uint8]>:$x,
TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Uint16, TF_Uint64, TF_Uint32, TF_Uint8]>:$y
TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$x,
TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$y
);
let results = (outs
TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Uint16, TF_Uint64, TF_Uint32, TF_Uint8]>:$z
TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$z
);
TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
@ -15974,12 +16014,12 @@ Python Semantics.
}];
let arguments = (ins
TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Uint16, TF_Uint8]>:$x,
TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Uint16, TF_Uint8]>:$y
TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$x,
TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$y
);
let results = (outs
TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Uint16, TF_Uint8]>:$z
TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$z
);
TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
@ -16818,9 +16858,13 @@ https://www.tensorflow.org/performance/xla/operation_semantics#pad
let arguments = (ins
Arg<TF_Tensor, [{A `Tensor` of type T.}]>:$input,
Arg<TF_Tensor, [{A scalar `Tensor` of type T.}]>:$padding_value,
Arg<TF_I32OrI64Tensor, [{the padding to apply at the start of each input dimensions}]>:$padding_low,
Arg<TF_I32OrI64Tensor, [{the padding to apply at the end of each input dimension.}]>:$padding_high,
Arg<TF_I32OrI64Tensor, [{the padding to apply between each input element.}]>:$padding_interior
Arg<TF_I32OrI64Tensor, [{the padding to apply at the start of each input dimensions. Must
be a compile-time constant 1D tensor of length equal to rank of input.}]>:$padding_low,
Arg<TF_I32OrI64Tensor, [{the padding to apply at the end of each input dimension. Must
be a compile-time constant 1D tensor of length equal to rank of input.}]>:$padding_high,
Arg<TF_I32OrI64Tensor, [{the padding to apply between each input element. Must
be a compile-time constant 1D tensor of length equal to rank of input,
containing only non-negative values.}]>:$padding_interior
);
let results = (outs
@ -17237,12 +17281,12 @@ expected to create these operators.
}];
let arguments = (ins
TensorOf<[TF_Float16, TF_Float32]>:$x,
TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32]>:$x,
TF_Float32Tensor:$scale,
TF_Float32Tensor:$offset,
TF_Float32Tensor:$mean,
TF_Float32Tensor:$variance,
Variadic<TensorOf<[TF_Float16, TF_Float32]>>:$side_input,
Variadic<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32]>>:$side_input,
DefaultValuedAttr<F32Attr, "0.0001f">:$epsilon,
DefaultValuedAttr<F32Attr, "1.0f">:$exponential_avg_factor,
@ -17252,7 +17296,7 @@ expected to create these operators.
);
let results = (outs
TensorOf<[TF_Float16, TF_Float32]>:$y,
TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32]>:$y,
TF_Float32Tensor:$batch_mean,
TF_Float32Tensor:$batch_variance,
TF_Float32Tensor:$reserve_space_1,
@ -17347,7 +17391,8 @@ expected to create these operators.
DefaultValuedAttr<BoolAttr, "false">:$transpose_a,
DefaultValuedAttr<BoolAttr, "false">:$transpose_b,
DefaultValuedAttr<StrArrayAttr, "{}">:$fused_ops,
DefaultValuedAttr<F32Attr, "0.0001f">:$epsilon
DefaultValuedAttr<F32Attr, "0.0001f">:$epsilon,
DefaultValuedAttr<F32Attr, "0.2f">:$leakyrelu_alpha
);
let results = (outs