Add op sanity checks to the following TFLite ops:

QuantizeOp
RangeOp
RankOp
ReduceAnyOp
ReduceMaxOp
ReduceMinOp
ReduceProdOp
Relu6Op
ReshapeOp
ResizeBilinearOp
ResizeNearestNeighborOp
ReverseSequenceOp
ReverseV2Op
RoundOp
RsqrtOp
SVDFOp
SegmentSumOp
SelectOp
SelectV2Op
ShapeOp
SliceOp
SoftmaxOp
SpaceToBatchNdOp

PiperOrigin-RevId: 312599980
Change-Id: I93588c30156f8c94e589dbd6768911d9cbc9e60a
This commit is contained in:
Jaesung Chung 2020-05-20 19:09:51 -07:00 committed by TensorFlower Gardener
parent 5af64a19a8
commit bdd61926a6
3 changed files with 174 additions and 99 deletions

View File

@ -293,6 +293,10 @@ def TFL_BoolFalse: AttrConstraint<
CPred<"!$_self.cast<BoolAttr>().getValue()">,
"whose value is false">;
class TFL_StringEqualsTo<string value> : AttrConstraint<
CPred<"$_self.cast<StringAttr>().getValue() == \"" # value # "\"">,
"whose value equals to '" # value # "'">;
// This is a quantization-aware version of TCresVTEtIsSameAsOp
class TFL_TCresVTEtIsSameAsOp<int i, int j> : And<[
TCOpResIsShapedTypePred<i, j>,
@ -1892,7 +1896,10 @@ def TFL_OneHotOp : TFL_Op<"one_hot", [NoSideEffect]> {
let hasOptions = 1;
}
def TFL_RoundOp: TFL_Op<"round", [NoSideEffect, SameOperandsAndResultType]> {
def TFL_RoundOp: TFL_Op<"round", [
NoSideEffect,
SameOperandsAndResultShape,
SameOperandsAndResultType]> {
let summary = "Round operator";
let description = [{
@ -1909,7 +1916,14 @@ Rounds the values of a tensor to the nearest integer, element-wise.
}
def TFL_SliceOp : TFL_Op<"slice", [
NoSideEffect, SameOperandsAndResultsScale, TFL_GpuTargetOp]> {
PredOpTrait<"input and output must have same element type",
TFL_TCresVTEtIsSameAsOp<0, 0>>,
NoSideEffect,
SameOperandsAndResultsScale,
TFL_OperandHasRankAtMost<0, 4>,
TFL_OperandHasRankAtMost<1, 1>,
TFL_OperandHasRankAtMost<2, 1>,
TFL_GpuTargetOp]> {
let summary = "Return a slice from 'input'.";
let description = [{
@ -1927,13 +1941,13 @@ equivalent to setting:
}];
let arguments = (ins
AnyTensor:$input,
TFL_TensorOf<[F32, I32, I64, I8, UI8, I1, TFL_Str, QI8, QUI8, TFL_Quint8]>:$input,
TFL_I32OrI64Tensor:$begin,
TFL_I32OrI64Tensor:$size
);
let results = (outs
AnyTensor:$output
TFL_TensorOf<[F32, I32, I64, I8, UI8, I1, TFL_Str, QI8, QUI8, TFL_Quint8]>:$output
);
let verifier = [{ return Verify(*this); }];
@ -1961,7 +1975,10 @@ def TFL_SumOp: TFL_Op<"sum", [NoSideEffect]> {
}
def TFL_ReduceMinOp: TFL_Op<"reduce_min", [
NoSideEffect, SameOperandsAndResultsScale]> {
PredOpTrait<"input and output must have same element type",
TFL_TCresVTEtIsSameAsOp<0, 0>>,
NoSideEffect,
SameOperandsAndResultsScale]> {
let summary = "Min-reduction operator";
let description = [{
@ -1969,19 +1986,23 @@ def TFL_ReduceMinOp: TFL_Op<"reduce_min", [
}];
let arguments = (ins
AnyTensor:$input,
TFL_TensorOf<[F32, I32, I64, QI8, QUI8, TFL_Quint8]>:$input,
TFL_I32Tensor:$axes,
BoolAttr:$keep_dims
);
let results = (outs AnyTensor);
let results = (outs
TFL_TensorOf<[F32, I32, I64, QI8, QUI8, TFL_Quint8]>:$output);
let hasOptions = 1;
let customOption = "ReducerOptions";
}
def TFL_ReduceMaxOp: TFL_Op<"reduce_max", [
NoSideEffect, SameOperandsAndResultsScale]> {
PredOpTrait<"input and output must have same element type",
TFL_TCresVTEtIsSameAsOp<0, 0>>,
NoSideEffect,
SameOperandsAndResultsScale]> {
let summary = "Max-reduction operator";
let description = [{
@ -1989,18 +2010,22 @@ def TFL_ReduceMaxOp: TFL_Op<"reduce_max", [
}];
let arguments = (ins
AnyTensor:$input,
TFL_TensorOf<[F32, I32, I64, QI8, QUI8, TFL_Quint8]>:$input,
TFL_I32Tensor:$axes,
BoolAttr:$keep_dims
);
let results = (outs AnyTensor);
let results = (outs
TFL_TensorOf<[F32, I32, I64, QI8, QUI8, TFL_Quint8]>:$output);
let hasOptions = 1;
let customOption = "ReducerOptions";
}
def TFL_ReduceProdOp: TFL_Op<"reduce_prod", [NoSideEffect]> {
def TFL_ReduceProdOp: TFL_Op<"reduce_prod", [
PredOpTrait<"input and output must have same element type",
TFL_TCresVTEtIsSameAsOp<0, 0>>,
NoSideEffect]> {
let summary = "Prod-reduction operator";
let description = [{
@ -2008,12 +2033,13 @@ def TFL_ReduceProdOp: TFL_Op<"reduce_prod", [NoSideEffect]> {
}];
let arguments = (ins
TFL_TensorOf<[F32, I8, I32, I64]>:$input,
TFL_TensorOf<[F32, I32, I64, QI8, QUI8, TFL_Quint8]>:$input,
TFL_I32Tensor:$axes,
BoolAttr:$keep_dims
);
let results = (outs AnyTensor);
let results = (outs
TFL_TensorOf<[F32, I32, I64, QI8, QUI8, TFL_Quint8]>:$output);
let hasOptions = 1;
let customOption = "ReducerOptions";
@ -2308,7 +2334,10 @@ def TFL_RankOp: TFL_Op<"rank", [NoSideEffect]> {
let hasFolder = 1;
}
def TFL_ReluOp: TFL_Op<"relu", [NoSideEffect,
def TFL_ReluOp: TFL_Op<"relu", [
PredOpTrait<"x and y must have same element type",
TFL_TCresVTEtIsSameAsOp<0, 0>>,
NoSideEffect,
SameOperandsAndResultShape,
SameOperandsAndResultsScale,
TFL_GpuTargetOp]> {
@ -2319,9 +2348,9 @@ def TFL_ReluOp: TFL_Op<"relu", [NoSideEffect,
x -> max(0, x)
}];
let arguments = (ins TFL_TensorOf<[F32, QUI8, I8]>:$x);
let arguments = (ins TFL_TensorOf<[F32, QUI8, QI8]>:$x);
let results = (outs TFL_TensorOf<[F32, QUI8, I8]>:$y);
let results = (outs TFL_TensorOf<[F32, QUI8, QI8]>:$y);
// This builder doesn't work with quantized type, so it can only be used by
// non-quantization tablegen patterns. Currently, it is used by the
@ -2335,7 +2364,10 @@ def TFL_ReluOp: TFL_Op<"relu", [NoSideEffect,
];
}
def TFL_Relu6Op: TFL_Op<"relu6", [NoSideEffect,
def TFL_Relu6Op: TFL_Op<"relu6", [
PredOpTrait<"x and y must have same element type",
TFL_TCresVTEtIsSameAsOp<0, 0>>,
NoSideEffect,
SameOperandsAndResultShape,
SameOperandsAndResultsScale,
TFL_GpuTargetOp]> {
@ -2346,9 +2378,9 @@ def TFL_Relu6Op: TFL_Op<"relu6", [NoSideEffect,
x -> max(0, min(6, x))
}];
let arguments = (ins TFL_TensorOf<[F32, QUI8, I8]>:$x);
let arguments = (ins TFL_TensorOf<[F32, QUI8, QI8]>:$x);
let results = (outs TFL_TensorOf<[F32, QUI8, I8]>:$y);
let results = (outs TFL_TensorOf<[F32, QUI8, QI8]>:$y);
// This builder doesn't work with quantized type, so it can only be used by
// non-quantization tablegen patterns. Currently, it is used by the
@ -2362,7 +2394,10 @@ def TFL_Relu6Op: TFL_Op<"relu6", [NoSideEffect,
];
}
def TFL_Relu1Op: TFL_Op<"relu_n1_to_1", [NoSideEffect,
def TFL_Relu1Op: TFL_Op<"relu_n1_to_1", [
PredOpTrait<"x and y must have same element type",
TFL_TCresVTEtIsSameAsOp<0, 0>>,
NoSideEffect,
SameOperandsAndResultShape,
SameOperandsAndResultsScale]> {
let summary = "Relu1 operator";
@ -2372,9 +2407,9 @@ def TFL_Relu1Op: TFL_Op<"relu_n1_to_1", [NoSideEffect,
x -> max(-1, min(1, x))
}];
let arguments = (ins TFL_TensorOf<[F32, QUI8, I8]>:$x);
let arguments = (ins TFL_TensorOf<[F32, QUI8, QI8]>:$x);
let results = (outs TFL_TensorOf<[F32, QUI8, I8]>:$y);
let results = (outs TFL_TensorOf<[F32, QUI8, QI8]>:$y);
// This builder doesn't work with quantized type, so it can only be used by
// non-quantization tablegen patterns. Currently, it is used by the
@ -2406,7 +2441,11 @@ def TFL_ReshapeOp: TFL_Op<"reshape", [
let hasFolder = 1;
}
def TFL_ReverseSequenceOp : TFL_Op<"reverse_sequence", [NoSideEffect]> {
def TFL_ReverseSequenceOp : TFL_Op<"reverse_sequence", [
PredOpTrait<"input and output must have same element type",
TFL_TCresVTEtIsSameAsOp<0, 0>>,
NoSideEffect,
TFL_OperandHasRank<1, 1>]> {
let summary = "Reverses variable length slices.";
let description = [{
@ -2423,15 +2462,15 @@ slice `i`, with the first `seq_lengths[i]` slices along dimension
}];
let arguments = (ins
TFL_TensorOf<[F32, I16, I32, I64, TFL_Uint8]>:$input,
TFL_TensorOf<[F32, I32, I64, QI16, QUI8, TFL_Quint8]>:$input,
TFL_I32OrI64Tensor:$seq_lengths,
I32Attr:$seq_dim,
I32Attr:$batch_dim
Confined<I32Attr, [IntNonNegative]>:$seq_dim,
Confined<I32Attr, [IntNonNegative]>:$batch_dim
);
let results = (outs
TFL_TensorOf<[F32, I16, I32, I64, TFL_Uint8]>:$output
TFL_TensorOf<[F32, I32, I64, QI16, QUI8, TFL_Quint8]>:$output
);
let hasOptions = 1;
@ -2439,6 +2478,7 @@ slice `i`, with the first `seq_lengths[i]` slices along dimension
def TFL_RsqrtOp: TFL_Op<"rsqrt", [NoSideEffect,
SameOperandsAndResultType,
SameOperandsAndResultShape,
NoQuantizableResult,
TFL_GpuTargetOp]> {
let summary = "Reciprocal of square root operator";
@ -2463,7 +2503,7 @@ def TFL_ShapeOp: TFL_Op<"shape", [NoSideEffect]> {
let arguments = (ins AnyTensor:$input);
let results = (outs AnyTensor:$output);
let results = (outs TFL_TensorOf<[I32, I64]>:$output);
DerivedTypeAttr out_type = DerivedTypeAttr<[{
return getResult().getType().cast<TensorType>().getElementType();
@ -2472,9 +2512,11 @@ def TFL_ShapeOp: TFL_Op<"shape", [NoSideEffect]> {
let hasOptions = 1;
}
// TODO(jpienaar): Flesh this out.
def TFL_RangeOp: TFL_Op<"range", [NoSideEffect, TFL_OperandHasRank<0, 0>,
TFL_OperandHasRank<1, 0>, TFL_OperandHasRank<2, 0>,
def TFL_RangeOp: TFL_Op<"range", [
NoSideEffect,
TFL_OperandHasRank<0, 0>,
TFL_OperandHasRank<1, 0>,
TFL_OperandHasRank<2, 0>,
PredOpTrait<"operands and output must have same element type",
And<[TCresVTEtIsSameAsOp<0, 0>, TCresVTEtIsSameAsOp<0, 1>,
TCresVTEtIsSameAsOp<0, 2>]>>]> {
@ -2486,17 +2528,20 @@ def TFL_RangeOp: TFL_Op<"range", [NoSideEffect, TFL_OperandHasRank<0, 0>,
}];
let arguments = (ins
AnyTensor:$start,
AnyTensor:$limit,
AnyTensor:$delta);
TFL_TensorOf<[I32, F32]>:$start,
TFL_TensorOf<[I32, F32]>:$limit,
TFL_TensorOf<[I32, F32]>:$delta);
let results = (outs AnyTensor:$result);
let results = (outs TFL_TensorOf<[I32, F32]>:$result);
let hasFolder = 1;
}
def TFL_ReverseV2Op: TFL_Op<"reverse_v2",
[NoSideEffect, TFL_OperandHasRank<1,1>]> {
def TFL_ReverseV2Op: TFL_Op<"reverse_v2", [
PredOpTrait<"input and output must have same element type",
TFL_TCresVTEtIsSameAsOp<0, 0>>,
NoSideEffect,
TFL_OperandHasRank<1, 1>]> {
let summary = "ReverseV2 Operator";
let description = [{
@ -2518,18 +2563,18 @@ def TFL_ReverseV2Op: TFL_Op<"reverse_v2",
let arguments = (
ins
TFL_TensorOf<[F32, I16, I32, I64, TFL_Uint8, I1]>:$input,
TFL_TensorOf<[I32, I64]>:$axis
TFL_TensorOf<[F32, UI8, I16, I32, I64, QI16, QUI8, TFL_Quint8, I1]>:$input,
TFL_I32Tensor:$axis
);
let results = (outs
TFL_TensorOf<[F32, I16, I32, I64, TFL_Uint8, I1]>:$output
);
TFL_TensorOf<[F32, UI8, I16, I32, I64, QI16, QUI8, TFL_Quint8, I1]>:$output);
}
// Select has many instances in TF models where one or more of its operands
// are unranked. Therefore, we skip adding shape constraints here.
def TFL_SelectOp : TFL_Op<"select", [NoSideEffect,
def TFL_SelectOp : TFL_Op<"select", [
NoSideEffect,
PredOpTrait<"operands have same element type", TCopVTEtIsSameAs<1, 2>>,
PredOpTrait<"operands and result have same element type",
TCresVTEtIsSameAsOp<0, 1>>]> {
@ -2545,9 +2590,11 @@ def TFL_SelectOp : TFL_Op<"select", [NoSideEffect,
let arguments = (ins
TFL_BoolTensor:$condition,
TFL_TensorOf<[F32, I1, I8, I16, I32, I64, TFL_Uint8]>:$x,
TFL_TensorOf<[F32, I1, I8, I16, I32, I64, TFL_Uint8]>:$y);
let results = (outs AnyTensor:$output);
TFL_TensorOf<[F32, I1, I8, I16, I32, I64, QI8, QUI8, QI16, TFL_Quint8]>:$x,
TFL_TensorOf<[F32, I1, I8, I16, I32, I64, QI8, QUI8, QI16, TFL_Quint8]>:$y);
let results = (outs
TFL_TensorOf<[F32, I1, I8, I16, I32, I64, QI8, QUI8, QI16, TFL_Quint8]>:$output);
// TODO(jpienaar): autogenerate this.
let builders = [OpBuilder<"OpBuilder &builder, OperationState &result, "
@ -2561,7 +2608,12 @@ def TFL_SelectOp : TFL_Op<"select", [NoSideEffect,
let hasOptions = 1;
}
def TFL_SelectV2Op : TFL_Op<"select_v2", [NoSideEffect]> {
def TFL_SelectV2Op : TFL_Op<"select_v2", [
NoSideEffect,
TFL_BinaryOperandsHaveSameShapesOrBroadcastableShape<1, 2, 4>,
PredOpTrait<"operands have same element type", TCopVTEtIsSameAs<1, 2>>,
PredOpTrait<"operands and result have same element type",
TCresVTEtIsSameAsOp<0, 1>>]> {
let summary = "SelectV2 operator";
let description = [{
@ -2574,9 +2626,11 @@ def TFL_SelectV2Op : TFL_Op<"select_v2", [NoSideEffect]> {
let arguments = (ins
TFL_BoolTensor:$condition,
TFL_TensorOf<[F32, I1, I8, I16, I32, I64, TFL_Uint8]>:$x,
TFL_TensorOf<[F32, I1, I8, I16, I32, I64, TFL_Uint8]>:$y);
let results = (outs AnyTensor:$output);
TFL_TensorOf<[F32, I1, I8, I16, I32, I64, QI8, QUI8, QI16, TFL_Quint8]>:$x,
TFL_TensorOf<[F32, I1, I8, I16, I32, I64, QI8, QUI8, QI16, TFL_Quint8]>:$y);
let results = (outs
TFL_TensorOf<[F32, I1, I8, I16, I32, I64, QI8, QUI8, QI16, TFL_Quint8]>:$output);
let builders = [OpBuilder<"OpBuilder &builder, OperationState &result, "
"Value cond, Value x, Value y",
@ -2605,9 +2659,11 @@ def TFL_SinOp: TFL_Op<"sin", [
let hasFolder = 1;
}
// TODO(b/130643170): Adds some constraint for the input/output element types.
def TFL_SoftmaxOp : TFL_Op<"softmax", [
NoSideEffect,
PredOpTrait<"input and output must have same element type",
TFL_TCresVTEtIsSameAsOp<0, 0>>,
TFL_OperandHasRankRange<0, 1, 4>,
SameOperandsAndResultShape,
// zero_point = 0
// scale = 1. / (max_value + 1)
@ -2623,11 +2679,11 @@ def TFL_SoftmaxOp : TFL_Op<"softmax", [
}];
let arguments = (
ins AnyTensor:$input,
ins TFL_TensorOf<[F32, QI8, QUI8, TFL_Quint8]>:$input,
F32Attr:$beta
);
let results = (outs AnyTensor:$output);
let results = (outs TFL_TensorOf<[F32, QI8, QUI8, TFL_Quint8]>:$output);
let hasOptions = 1;
}
@ -2914,6 +2970,7 @@ def TFL_BatchToSpaceNdOp: TFL_Op<"batch_to_space_nd", [
def TFL_SpaceToBatchNdOp: TFL_Op<"space_to_batch_nd", [
NoSideEffect,
SameOperandsAndResultsScale,
TFL_OperandHasRankRange<0, 3, 4>,
PredOpTrait<"input and output must have same element type",
TCresVTEtIsSameAsOp<0, 0>>
]> {
@ -2924,13 +2981,13 @@ def TFL_SpaceToBatchNdOp: TFL_Op<"space_to_batch_nd", [
}];
let arguments = (ins
TFL_TensorOf<[F32, I8, I32, I64, QI8, QUI8]>:$input,
TFL_TensorOf<[I32]>:$block_shape,
TFL_TensorOf<[I32]>:$paddings
TFL_TensorOf<[F32, I32, I64, QI8, QUI8, TFL_Quint8]>:$input,
TFL_I32Tensor:$block_shape,
TFL_I32Tensor:$paddings
);
let results = (outs
TFL_TensorOf<[F32, I16, I32, I64, QI8, QUI8]>:$output
TFL_TensorOf<[F32, I32, I64, QI8, QUI8, TFL_Quint8]>:$output
);
}
@ -3045,7 +3102,12 @@ def TFL_SplitVOp : TFL_Op<"split_v", [NoSideEffect, SameOperandsAndResultsScale]
}
def TFL_ResizeBilinearOp: TFL_Op<"resize_bilinear", [
NoSideEffect, SameOperandsAndResultsScale]> {
NoSideEffect,
PredOpTrait<"input and output must have same element type",
TFL_TCresVTEtIsSameAsOp<0, 0>>,
TFL_OperandHasRank<0, 4>,
TFL_OperandHasRank<1, 1>,
SameOperandsAndResultsScale]> {
let summary = "ResizeBilinear Op";
let description = [{
@ -3053,22 +3115,25 @@ def TFL_ResizeBilinearOp: TFL_Op<"resize_bilinear", [
}];
let arguments = (ins
// TODO(ycling): Support quantized types.
TFL_TensorOf<[F32, I32, QI8, QUI8]>:$input,
TFL_TensorOf<[I32]>:$size,
TFL_TensorOf<[F32, QI8, QUI8, TFL_Quint8]>:$input,
TFL_I32Tensor:$size,
BoolAttr:$align_corners,
DefaultValuedAttr<BoolAttr, "false">:$half_pixel_centers
);
let results = (outs
TFL_TensorOf<[F32, QI8, QUI8]>:$output
TFL_TensorOf<[F32, QI8, QUI8, TFL_Quint8]>:$output
);
let hasOptions = 1;
}
def TFL_ResizeNearestNeighborOp : TFL_Op<"resize_nearest_neighbor",
[NoSideEffect,
def TFL_ResizeNearestNeighborOp : TFL_Op<"resize_nearest_neighbor", [
NoSideEffect,
PredOpTrait<"input and output must have same element type",
TFL_TCresVTEtIsSameAsOp<0, 0>>,
TFL_OperandHasRank<0, 4>,
TFL_OperandHasRank<1, 1>,
SameOperandsAndResultsScale]> {
let summary = "ResizeNearestNeighbor Op";
@ -3077,14 +3142,14 @@ def TFL_ResizeNearestNeighborOp : TFL_Op<"resize_nearest_neighbor",
}];
let arguments = (ins
TFL_TensorOf<[F32, I8, TFL_Uint8, QUI8, QI8]>:$input,
TFL_TensorOf<[I32]>:$size,
TFL_TensorOf<[F32, TFL_Quint8, QUI8, QI8]>:$input,
TFL_I32Tensor:$size,
BoolAttr:$align_corners,
DefaultValuedAttr<BoolAttr, "false">:$half_pixel_centers
);
let results = (outs
TFL_TensorOf<[F32, I8, TFL_Uint8, QUI8, QI8]>:$output
TFL_TensorOf<[F32, TFL_Quint8, QUI8, QI8]>:$output
);
let hasOptions = 1;
@ -3349,7 +3414,9 @@ def TFL_SparseQConstOp : Op<TFL_Dialect, "pseudo_sparse_qconst", [
}
def TFL_QuantizeOp: TFL_Op<"quantize", [
FirstAttrDerivedResultType, NoQuantizableResult]> {
FirstAttrDerivedResultType,
SameOperandsAndResultShape,
NoQuantizableResult]> {
let summary = "Quantize operator";
let description = [{
@ -3358,11 +3425,11 @@ def TFL_QuantizeOp: TFL_Op<"quantize", [
}];
let arguments = (
ins AnyTensor:$input,
ins TFL_TensorOf<[F32, QI8, QUI8, QI16, TFL_Quint8]>:$input,
TensorTypeAttr:$qtype
);
let results = (outs AnyTensor:$output);
let results = (outs TFL_TensorOf<[QI8, QUI8, QI16, TFL_Quint8]>:$output);
}
def TFL_DensifyOp: TFL_Op<"densify", [
@ -3941,14 +4008,12 @@ def TFL_NumericVerifyOp : Op<TFL_Dialect, "NumericVerify", [
let results = (outs);
}
def SVDFResultConstraint: PredOpTrait<
"the input and result tensor elemental types must be same",
TCresVTEtIsSameAsOp<0, 0>>;
// SVDF op.
def TFL_SVDFOp :
TFL_Op<"svdf",
[SVDFResultConstraint, TFL_StatefulOp]> {
TFL_Op<"svdf", [
PredOpTrait<"the input and result tensor elemental types must be same",
TCresVTEtIsSameAsOp<0, 0>>,
TFL_StatefulOp]> {
let summary = "Single value decomposition filter operator";
@ -3960,13 +4025,13 @@ def TFL_SVDFOp :
}];
let arguments = (
ins TFL_TensorOf<[F32, I8]>:$input,
ins TFL_TensorOf<[F32, QI8]>:$input,
// Feature Weights.
TFL_TensorOf<[F32, I8]>:$feature_weights,
TFL_TensorOf<[F32, QI8, QUI8]>:$feature_weights,
// Time weights
TFL_TensorOf<[F32, I8]>:$time_weights,
TFL_TensorOf<[F32, QI8]>:$time_weights,
// Bias
TFL_TensorOfOrNone<[F32]>:$input_gate_bias,
@ -3975,11 +4040,11 @@ def TFL_SVDFOp :
TFL_StatefulTensor:$activation_state,
// Attributes
I32Attr:$rank,
Confined<I32Attr, [IntPositive]>:$rank,
TFL_AFAttr:$fused_activation_function
);
let results = (outs TFL_TensorOf<[F32, I8]>:$output);
let results = (outs TFL_TensorOf<[F32, QI8]>:$output);
let hasOptions = 1;
@ -3991,7 +4056,10 @@ def TFL_SVDFOp :
}];
}
def TFL_SegmentSumOp: TFL_Op<"segment_sum", [NoSideEffect]> {
def TFL_SegmentSumOp: TFL_Op<"segment_sum", [
NoSideEffect,
PredOpTrait<"input and output must have same element type",
TFL_TCresVTEtIsSameAsOp<0, 0>>]> {
let summary = "SegmentSum operator";
let description = [{
@ -3999,7 +4067,7 @@ def TFL_SegmentSumOp: TFL_Op<"segment_sum", [NoSideEffect]> {
}];
let arguments = (ins
TFL_TensorOf<[F32, I32]>:$data,
TFL_TensorOf<[F32, I32]>:$input,
TFL_I32Tensor:$segment_ids
);
let results = (outs TFL_TensorOf<[F32, I32]>:$output);

View File

@ -190,9 +190,9 @@ func @testSquare(tensor<? x f32>) -> tensor<? x f32> {
return %0 : tensor<? x f32>
}
func @testQuantizedResizeNearestNeighbor(tensor<? x !quant.uniform<u8:f32, 0.1>>, tensor<? x i32>) -> tensor<? x !quant.uniform<u8:f32, 0.1>> {
^bb0(%arg0: tensor<? x !quant.uniform<u8:f32, 0.1>>, %arg1: tensor<? x i32>):
%0 = "tfl.resize_nearest_neighbor"(%arg0, %arg1) { align_corners = false, half_pixel_centers = false } : (tensor<? x !quant.uniform<u8:f32, 0.1>>, tensor<? x i32>) -> tensor<? x !quant.uniform<u8:f32, 0.1>>
func @testQuantizedResizeNearestNeighbor(tensor<? x ? x ? x ? x !quant.uniform<u8:f32, 0.1>>, tensor<? x i32>) -> tensor<? x !quant.uniform<u8:f32, 0.1>> {
^bb0(%arg0: tensor<? x ? x ? x ? x !quant.uniform<u8:f32, 0.1>>, %arg1: tensor<? x i32>):
%0 = "tfl.resize_nearest_neighbor"(%arg0, %arg1) { align_corners = false, half_pixel_centers = false } : (tensor<? x ? x ? x ? x !quant.uniform<u8:f32, 0.1>>, tensor<? x i32>) -> tensor<? x !quant.uniform<u8:f32, 0.1>>
return %0 : tensor<? x !quant.uniform<u8:f32, 0.1>>
}
@ -1201,7 +1201,7 @@ func @testResizeBilinear(%arg0 : tensor<1x100x100x3xf32>, %arg1 : tensor<4xi32>)
// -----
func @testResizeBilinearInvalidOutputType(%arg0 : tensor<1x100x100x3xf32>, %arg1 : tensor<4xi32>) -> tensor<?xi32> {
// expected-error @+1 {{'tfl.resize_bilinear' op result #0 must be tensor of 32-bit float or QI8 type or QUI8 type values}}
// expected-error @+1 {{'tfl.resize_bilinear' op failed to verify that input and output must have same element type}}
%0 = "tfl.resize_bilinear"(%arg0, %arg1) {align_corners = false} : (tensor<1x100x100x3xf32>, tensor<4xi32>) -> tensor<?xi32>
return %0 : tensor<?xi32>
}
@ -1499,8 +1499,8 @@ func @testWrongQuantizedLocalResponseNormalization(%arg0 : tensor<1x56x56x192x!q
// CHECK-LABEL: testSvdf
func @testSvdf(%arg0: tensor<? x f32>, %arg1: tensor<? x f32>, %arg2: tensor<? x f32>, %arg3: tensor<? x f32>, %arg4: tensor<? x f32>) -> tensor<? x f32> {
// CHECK: "tfl.svdf"(%arg0, %arg1, %arg2, %arg3, %arg4) {fused_activation_function = "NONE", rank = 2 : i32} : (tensor<?xf32>, tensor<?xf32>, tensor<?xf32>, tensor<?xf32>, tensor<?xf32>) -> tensor<?xf32>
%0 = "tfl.svdf"(%arg0, %arg1, %arg2, %arg3, %arg4) {fused_activation_function = "NONE", rank = 2 : i32} : (tensor<?xf32>, tensor<?xf32>, tensor<?xf32>, tensor<?xf32>, tensor<?xf32>) -> tensor<?xf32>
// CHECK: "tfl.svdf"(%arg0, %arg1, %arg2, %arg3, %arg4) {fused_activation_function = "RELU", rank = 2 : i32} : (tensor<?xf32>, tensor<?xf32>, tensor<?xf32>, tensor<?xf32>, tensor<?xf32>) -> tensor<?xf32>
%0 = "tfl.svdf"(%arg0, %arg1, %arg2, %arg3, %arg4) {fused_activation_function = "RELU", rank = 2 : i32} : (tensor<?xf32>, tensor<?xf32>, tensor<?xf32>, tensor<?xf32>, tensor<?xf32>) -> tensor<?xf32>
return %0 : tensor<?xf32>
}

View File

@ -105,6 +105,13 @@ def make_space_to_batch_nd_tests(options):
values.append(np.array(parameters["paddings"]))
return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))
if options.use_experimental_converter:
# Remove unsupported dimension cases. Currently, kernel supports 3 and 4-D
# inputs.
test_parameters = [
test_parameters[0], test_parameters[1], test_parameters[3]
]
make_zip_of_tests(
options,
test_parameters,