Add custom printer and parser for HLO Const op

There are two forms short form and long form. Short form is used when the type of value attribute matches exactly with the result type. Short form uses the same format as standard constant. Long form prints full dictionary attribute and shortens the op signature.

Short form example:
xla_hlo.constant dense<0> : tensor<i32>

Long form example:
xla_hlo.constant {value = dense<0> : tensor<i32>} : tensor<*xi32>

PiperOrigin-RevId: 272763262
This commit is contained in:
Smit Hinsu 2019-10-03 16:10:31 -07:00 committed by TensorFlower Gardener
parent 3c30766e1b
commit 11b69dada6
9 changed files with 132 additions and 71 deletions

View File

@ -43,6 +43,7 @@ limitations under the License.
#include "mlir/IR/TypeUtilities.h" // TF:local_config_mlir
#include "mlir/IR/Types.h" // TF:local_config_mlir
#include "mlir/IR/Value.h" // TF:local_config_mlir
#include "mlir/Support/LogicalResult.h" // TF:local_config_mlir
#include "tensorflow/compiler/mlir/xla/ir/hlo_ops.h.inc"
namespace mlir {
@ -83,6 +84,45 @@ static LogicalResult Verify(T op) {
// ConstOp
//===----------------------------------------------------------------------===//
static void Print(ConstOp op, OpAsmPrinter* printer) {
// Use short form only if the result type matches type of attribute 'value'.
bool use_short_form = op.value().getType() == op.getType();
// Print op name.
*printer << op.getOperationName();
// If short form, elide attribute value while printing the attribute
// dictionary.
SmallVector<StringRef, 1> elided_attrs;
if (use_short_form) elided_attrs.push_back("value");
printer->printOptionalAttrDict(op.getAttrs(), elided_attrs);
if (use_short_form) {
*printer << ' ' << op.value();
} else {
*printer << " : " << op.getType();
}
}
static ParseResult ParseConstOp(OpAsmParser* parser, OperationState* result) {
if (parser->parseOptionalAttributeDict(result->attributes)) return failure();
// If colon is not present after attribute dictionary, it should be short form
// and attribute 'value' is outside the dictionary.
if (failed(parser->parseOptionalColon())) {
Attribute value;
if (parser->parseAttribute(value, "value", result->attributes))
return failure();
return parser->addTypeToList(value.getType(), result->types);
}
// Long form should have type of the result after colon.
Type ty;
if (parser->parseType(ty)) return failure();
result->types.push_back(ty);
return success();
}
OpFoldResult ConstOp::fold(ArrayRef<Attribute> operands) {
assert(operands.empty() && "constant has no operands");

View File

@ -82,6 +82,9 @@ def HLO_ConstOp : BASE_HLO_ConstOp, HLO_Op<"constant", [NoSideEffect]> {
"Builder *builder, OperationState &result, Attribute value"
>];
let printer = [{ return Print(*this, &p); }];
let parser = [{ return ParseConstOp(&parser, &result); }];
let hasFolder = 1;
// Constant has special conversion logic to HLO.

View File

@ -70,8 +70,8 @@ func @high_rank_tensor(%arg: tensor<2x3xi32>) -> tensor<2x3xf32> {
// CHECK-LABEL: func @const_same_type
func @const_same_type() -> tensor<i32> {
// CHECK-NEXT: [[CST:%.+]] = "xla_hlo.constant"() {value = dense<42> : tensor<i32>} : () -> tensor<i32>
%cst = "xla_hlo.constant"() {value = dense<42> : tensor<i32>} : () -> tensor<i32>
// CHECK-NEXT: [[CST:%.+]] = xla_hlo.constant dense<42> : tensor<i32>
%cst = xla_hlo.constant dense<42> : tensor<i32>
%0 = "xla_hlo.convert"(%cst) : (tensor<i32>) -> tensor<i32>
// CHECK-NEXT: return [[CST]]
return %0 : tensor<i32>
@ -81,8 +81,8 @@ func @const_same_type() -> tensor<i32> {
// CHECK-LABEL: func @const_float_int
func @const_float_int() -> tensor<i32> {
// CHECK-NEXT: [[CST:%.+]] = "xla_hlo.constant"() {value = dense<42> : tensor<i32>} : () -> tensor<i32>
%cst = "xla_hlo.constant"() {value = dense<42.0> : tensor<f32>} : () -> tensor<f32>
// CHECK-NEXT: [[CST:%.+]] = xla_hlo.constant dense<42> : tensor<i32>
%cst = xla_hlo.constant dense<42.0> : tensor<f32>
%0 = "xla_hlo.convert"(%cst) : (tensor<f32>) -> tensor<i32>
// CHECK-NEXT: return [[CST]]
return %0 : tensor<i32>
@ -92,8 +92,8 @@ func @const_float_int() -> tensor<i32> {
// CHECK-LABEL: func @const_int_float
func @const_int_float() -> tensor<f32> {
// CHECK-NEXT: [[CST:%.+]] = "xla_hlo.constant"() {value = dense<4.{{0*}}e+00> : tensor<f32>} : () -> tensor<f32>
%cst = "xla_hlo.constant"() {value = dense<4> : tensor<i32>} : () -> tensor<i32>
// CHECK-NEXT: [[CST:%.+]] = xla_hlo.constant dense<4.{{0*}}e+00> : tensor<f32>
%cst = xla_hlo.constant dense<4> : tensor<i32>
%0 = "xla_hlo.convert"(%cst) : (tensor<i32>) -> tensor<f32>
// CHECK-NEXT: return [[CST]]
return %0 : tensor<f32>
@ -103,8 +103,8 @@ func @const_int_float() -> tensor<f32> {
// CHECK-LABEL: func @const_negative_int_float
func @const_negative_int_float() -> tensor<f32> {
// CHECK-NEXT: [[CST:%.+]] = "xla_hlo.constant"() {value = dense<-4.{{0*}}e+00> : tensor<f32>} : () -> tensor<f32>
%cst = "xla_hlo.constant"() {value = dense<-4> : tensor<i32>} : () -> tensor<i32>
// CHECK-NEXT: [[CST:%.+]] = xla_hlo.constant dense<-4.{{0*}}e+00> : tensor<f32>
%cst = xla_hlo.constant dense<-4> : tensor<i32>
%0 = "xla_hlo.convert"(%cst) : (tensor<i32>) -> tensor<f32>
// CHECK-NEXT: return [[CST]]
return %0 : tensor<f32>
@ -114,8 +114,8 @@ func @const_negative_int_float() -> tensor<f32> {
// CHECK-LABEL: func @const_int_bf16
func @const_int_bf16() -> tensor<bf16> {
// CHECK-NEXT: [[CST:%.+]] = "xla_hlo.constant"() {value = dense<4.{{0*}}e+00> : tensor<bf16>} : () -> tensor<bf16>
%cst = "xla_hlo.constant"() {value = dense<4> : tensor<i32>} : () -> tensor<i32>
// CHECK-NEXT: [[CST:%.+]] = xla_hlo.constant dense<4.{{0*}}e+00> : tensor<bf16>
%cst = xla_hlo.constant dense<4> : tensor<i32>
%0 = "xla_hlo.convert"(%cst) : (tensor<i32>) -> tensor<bf16>
// CHECK-NEXT: return [[CST]]
return %0 : tensor<bf16>
@ -125,8 +125,8 @@ func @const_int_bf16() -> tensor<bf16> {
// CHECK-LABEL: func @const_bf16_int
func @const_bf16_int() -> tensor<i16> {
// CHECK-NEXT: [[CST:%.+]] = "xla_hlo.constant"() {value = dense<42> : tensor<i16>} : () -> tensor<i16>
%cst = "xla_hlo.constant"() {value = dense<42.0> : tensor<bf16>} : () -> tensor<bf16>
// CHECK-NEXT: [[CST:%.+]] = xla_hlo.constant dense<42> : tensor<i16>
%cst = xla_hlo.constant dense<42.0> : tensor<bf16>
%0 = "xla_hlo.convert"(%cst) : (tensor<bf16>) -> tensor<i16>
// CHECK-NEXT: return [[CST]]
return %0 : tensor<i16>
@ -136,8 +136,8 @@ func @const_bf16_int() -> tensor<i16> {
// CHECK-LABEL: func @const_int_narrowing
func @const_int_narrowing() -> tensor<i32> {
// CHECK-NEXT: [[CST:%.+]] = "xla_hlo.constant"() {value = dense<42> : tensor<i32>} : () -> tensor<i32>
%cst = "xla_hlo.constant"() {value = dense<42> : tensor<i64>} : () -> tensor<i64>
// CHECK-NEXT: [[CST:%.+]] = xla_hlo.constant dense<42> : tensor<i32>
%cst = xla_hlo.constant dense<42> : tensor<i64>
%0 = "xla_hlo.convert"(%cst) : (tensor<i64>) -> tensor<i32>
// CHECK-NEXT: return [[CST]]
return %0 : tensor<i32>
@ -147,8 +147,8 @@ func @const_int_narrowing() -> tensor<i32> {
// CHECK-LABEL: func @const_int_widening
func @const_int_widening() -> tensor<i64> {
// CHECK-NEXT: [[CST:%.+]] = "xla_hlo.constant"() {value = dense<42> : tensor<i64>} : () -> tensor<i64>
%cst = "xla_hlo.constant"() {value = dense<42> : tensor<i32>} : () -> tensor<i32>
// CHECK-NEXT: [[CST:%.+]] = xla_hlo.constant dense<42> : tensor<i64>
%cst = xla_hlo.constant dense<42> : tensor<i32>
%0 = "xla_hlo.convert"(%cst) : (tensor<i32>) -> tensor<i64>
// CHECK-NEXT: return [[CST]]
return %0 : tensor<i64>
@ -158,8 +158,8 @@ func @const_int_widening() -> tensor<i64> {
// CHECK-LABEL: func @const_negative_int_widening
func @const_negative_int_widening() -> tensor<i64> {
// CHECK-NEXT: [[CST:%.+]] = "xla_hlo.constant"() {value = dense<-42> : tensor<i64>} : () -> tensor<i64>
%cst = "xla_hlo.constant"() {value = dense<-42> : tensor<i32>} : () -> tensor<i32>
// CHECK-NEXT: [[CST:%.+]] = xla_hlo.constant dense<-42> : tensor<i64>
%cst = xla_hlo.constant dense<-42> : tensor<i32>
%0 = "xla_hlo.convert"(%cst) : (tensor<i32>) -> tensor<i64>
// CHECK-NEXT: return [[CST]]
return %0 : tensor<i64>
@ -169,8 +169,8 @@ func @const_negative_int_widening() -> tensor<i64> {
// CHECK-LABEL: func @const_float_narrowing
func @const_float_narrowing() -> tensor<f32> {
// CHECK-NEXT: [[CST:%.+]] = "xla_hlo.constant"() {value = dense<4.2{{0*}}e+00> : tensor<f32>} : () -> tensor<f32>
%cst = "xla_hlo.constant"() {value = dense<4.2> : tensor<f64>} : () -> tensor<f64>
// CHECK-NEXT: [[CST:%.+]] = xla_hlo.constant dense<4.2{{0*}}e+00> : tensor<f32>
%cst = xla_hlo.constant dense<4.2> : tensor<f64>
%0 = "xla_hlo.convert"(%cst) : (tensor<f64>) -> tensor<f32>
// CHECK-NEXT: return [[CST]]
return %0 : tensor<f32>
@ -180,8 +180,8 @@ func @const_float_narrowing() -> tensor<f32> {
// CHECK-LABEL: func @const_f32_bf16
func @const_f32_bf16() -> tensor<bf16> {
// CHECK-NEXT: [[CST:%.+]] = "xla_hlo.constant"() {value = dense<4.2{{0*}}e+01> : tensor<bf16>} : () -> tensor<bf16>
%cst = "xla_hlo.constant"() {value = dense<42.0> : tensor<f32>} : () -> tensor<f32>
// CHECK-NEXT: [[CST:%.+]] = xla_hlo.constant dense<4.2{{0*}}e+01> : tensor<bf16>
%cst = xla_hlo.constant dense<42.0> : tensor<f32>
%0 = "xla_hlo.convert"(%cst) : (tensor<f32>) -> tensor<bf16>
// CHECK-NEXT: return [[CST]]
return %0 : tensor<bf16>
@ -191,8 +191,8 @@ func @const_f32_bf16() -> tensor<bf16> {
// CHECK-LABEL: func @const_bf16_f64
func @const_bf16_f64() -> tensor<f64> {
// CHECK-NEXT: [[CST:%.+]] = "xla_hlo.constant"() {value = dense<4.2{{0*}}e+00> : tensor<f64>} : () -> tensor<f64>
%cst = "xla_hlo.constant"() {value = dense<4.2> : tensor<bf16>} : () -> tensor<bf16>
// CHECK-NEXT: [[CST:%.+]] = xla_hlo.constant dense<4.2{{0*}}e+00> : tensor<f64>
%cst = xla_hlo.constant dense<4.2> : tensor<bf16>
%0 = "xla_hlo.convert"(%cst) : (tensor<bf16>) -> tensor<f64>
// CHECK-NEXT: return [[CST]]
return %0 : tensor<f64>
@ -202,8 +202,8 @@ func @const_bf16_f64() -> tensor<f64> {
// CHECK-LABEL: func @const_bf16_int
func @const_bf16_int() -> tensor<i64> {
// CHECK-NEXT: [[CST:%.+]] = "xla_hlo.constant"() {value = dense<42> : tensor<i64>} : () -> tensor<i64>
%cst = "xla_hlo.constant"() {value = dense<42.0> : tensor<bf16>} : () -> tensor<bf16>
// CHECK-NEXT: [[CST:%.+]] = xla_hlo.constant dense<42> : tensor<i64>
%cst = xla_hlo.constant dense<42.0> : tensor<bf16>
%0 = "xla_hlo.convert"(%cst) : (tensor<bf16>) -> tensor<i64>
// CHECK-NEXT: return [[CST]]
return %0 : tensor<i64>
@ -214,10 +214,10 @@ func @const_bf16_int() -> tensor<i64> {
// CHECK-LABEL: func @const_high_rank_tensor
func @const_high_rank_tensor() -> tensor<2x3xi32> {
// CHECK-NEXT: [[CST:%.+]] = "xla_hlo.constant"() {value = dense<[
// CHECK-NEXT: [[CST:%.+]] = xla_hlo.constant dense<[
// CHECK-SAME: [1, 2, 3], [4, 5, 6]
// CHECK-SAME: ]> : tensor<2x3xi32>} : () -> tensor<2x3xi32>
%cst = "xla_hlo.constant"() {value = dense<[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]> : tensor<2x3xf32>} : () -> tensor<2x3xf32>
// CHECK-SAME: ]> : tensor<2x3xi32>
%cst = xla_hlo.constant dense<[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]> : tensor<2x3xf32>
%0 = "xla_hlo.convert"(%cst) : (tensor<2x3xf32>) -> tensor<2x3xi32>
// CHECK-NEXT: return [[CST]]
return %0 : tensor<2x3xi32>

View File

@ -4,7 +4,7 @@
// CHECK-LABEL: func @iota.const.1() -> tensor<4xi32> {
func @iota.const.1() -> tensor<4xi32> {
// CHECK-NEXT: %[[CST:.*]] = "xla_hlo.constant"() {value = dense<[0, 1, 2, 3]> : tensor<4xi32>} : () -> tensor<4xi32>
// CHECK-NEXT: %[[CST:.*]] = xla_hlo.constant dense<[0, 1, 2, 3]> : tensor<4xi32>
%0 = "xla_hlo.iota"() {iota_dimension = 0 : i64} : () -> tensor<4xi32>
// CHECK-NEXT: return %[[CST]] : tensor<4xi32>
return %0 : tensor<4xi32>
@ -14,7 +14,7 @@ func @iota.const.1() -> tensor<4xi32> {
// CHECK-LABEL: func @iota.const.2() -> tensor<2x4xi32> {
func @iota.const.2() -> tensor<2x4xi32> {
// CHECK-NEXT: %[[CST:.*]] = "xla_hlo.constant"() {value = dense<{{\[\[}}0, 0, 0, 0], [1, 1, 1, 1]]> : tensor<2x4xi32>} : () -> tensor<2x4xi32>
// CHECK-NEXT: %[[CST:.*]] = xla_hlo.constant dense<{{\[\[}}0, 0, 0, 0], [1, 1, 1, 1]]> : tensor<2x4xi32>
%0 = "xla_hlo.iota"() {iota_dimension = 0 : i64} : () -> tensor<2x4xi32>
// CHECK-NEXT: return %[[CST]] : tensor<2x4xi32>
return %0 : tensor<2x4xi32>
@ -24,7 +24,7 @@ func @iota.const.2() -> tensor<2x4xi32> {
// CHECK-LABEL: func @iota.const.3() -> tensor<2x4xi32> {
func @iota.const.3() -> tensor<2x4xi32> {
// CHECK-NEXT: %[[CST:.*]] = "xla_hlo.constant"() {value = dense<{{\[\[}}0, 1, 2, 3], [0, 1, 2, 3]]> : tensor<2x4xi32>} : () -> tensor<2x4xi32>
// CHECK-NEXT: %[[CST:.*]] = xla_hlo.constant dense<{{\[\[}}0, 1, 2, 3], [0, 1, 2, 3]]> : tensor<2x4xi32>
%0 = "xla_hlo.iota"() {iota_dimension = 1 : i64} : () -> tensor<2x4xi32>
// CHECK-NEXT: return %[[CST]] : tensor<2x4xi32>
return %0 : tensor<2x4xi32>
@ -34,7 +34,7 @@ func @iota.const.3() -> tensor<2x4xi32> {
// CHECK-LABEL: func @iota.const.4() -> tensor<2x3x4xi32> {
func @iota.const.4() -> tensor<2x3x4xi32> {
// CHECK-NEXT: %[[CST:.*]] = "xla_hlo.constant"() {value = dense<{{\[\[\[}}0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0{{\]\]}}, {{\[\[}}1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]]]> : tensor<2x3x4xi32>} : () -> tensor<2x3x4xi32>
// CHECK-NEXT: %[[CST:.*]] = xla_hlo.constant dense<{{\[\[\[}}0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0{{\]\]}}, {{\[\[}}1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]]]> : tensor<2x3x4xi32>
%0 = "xla_hlo.iota"() {iota_dimension = 0 : i64} : () -> tensor<2x3x4xi32>
// CHECK-NEXT: return %[[CST]] : tensor<2x3x4xi32>
return %0 : tensor<2x3x4xi32>
@ -44,7 +44,7 @@ func @iota.const.4() -> tensor<2x3x4xi32> {
// CHECK-LABEL: func @iota.const.5() -> tensor<2x3x4xi32> {
func @iota.const.5() -> tensor<2x3x4xi32> {
// CHECK-NEXT: %[[CST:.*]] = "xla_hlo.constant"() {value = dense<{{\[\[\[}}0, 0, 0, 0], [1, 1, 1, 1], [2, 2, 2, 2{{\]\]}}, {{\[\[}}0, 0, 0, 0], [1, 1, 1, 1], [2, 2, 2, 2]]]> : tensor<2x3x4xi32>} : () -> tensor<2x3x4xi32>
// CHECK-NEXT: %[[CST:.*]] = xla_hlo.constant dense<{{\[\[\[}}0, 0, 0, 0], [1, 1, 1, 1], [2, 2, 2, 2{{\]\]}}, {{\[\[}}0, 0, 0, 0], [1, 1, 1, 1], [2, 2, 2, 2]]]> : tensor<2x3x4xi32>
%0 = "xla_hlo.iota"() {iota_dimension = 1 : i64} : () -> tensor<2x3x4xi32>
// CHECK-NEXT: return %[[CST]] : tensor<2x3x4xi32>
return %0 : tensor<2x3x4xi32>
@ -54,7 +54,7 @@ func @iota.const.5() -> tensor<2x3x4xi32> {
// CHECK-LABEL: func @iota.const.6() -> tensor<2x3x4xi32> {
func @iota.const.6() -> tensor<2x3x4xi32> {
// CHECK-NEXT: %[[CST:.*]] = "xla_hlo.constant"() {value = dense<{{\[\[\[}}0, 1, 2, 3], [0, 1, 2, 3], [0, 1, 2, 3{{\]\]}}, {{\[\[}}0, 1, 2, 3], [0, 1, 2, 3], [0, 1, 2, 3]]]> : tensor<2x3x4xi32>} : () -> tensor<2x3x4xi32>
// CHECK-NEXT: %[[CST:.*]] = xla_hlo.constant dense<{{\[\[\[}}0, 1, 2, 3], [0, 1, 2, 3], [0, 1, 2, 3{{\]\]}}, {{\[\[}}0, 1, 2, 3], [0, 1, 2, 3], [0, 1, 2, 3]]]> : tensor<2x3x4xi32>
%0 = "xla_hlo.iota"() {iota_dimension = 2 : i64} : () -> tensor<2x3x4xi32>
// CHECK-NEXT: return %[[CST]] : tensor<2x3x4xi32>
return %0 : tensor<2x3x4xi32>

View File

@ -263,7 +263,7 @@ func @identity(%arg0: tensor<1xi32>) -> tensor<1xi32> {
// CHECK-LABEL: @const
func @const() -> tensor<2xi32> {
// CHECK-NEXT: "xla_hlo.constant"() {value = dense<0> : tensor<2xi32>}
// CHECK-NEXT: xla_hlo.constant dense<0> : tensor<2xi32>
%0 = "tf.Const"() {device = "", name = "", dtype = "tfdtype$DT_INT32", value = dense<0> : tensor<2xi32>} : () -> (tensor<2xi32>)
return %0: tensor<2xi32>
}
@ -287,7 +287,7 @@ func @matmul_notranspose(%arg0: tensor<5x7xf32>, %arg1: tensor<7x11xf32>) -> ten
// CHECK-LABEL: maxpool_valid_padding
// CHECK-SAME: %[[ARG:.*]]: tensor
func @maxpool_valid_padding(%arg0: tensor<2x12x20x7xi32>) -> tensor<2x3x5x7xi32> {
// CHECK: %[[INIT:.*]] = "xla_hlo.constant"() {value = dense<-2147483648> : tensor<i32>}
// CHECK: %[[INIT:.*]] = xla_hlo.constant dense<-2147483648> : tensor<i32>
// CHECK: "xla_hlo.reduce_window"(%[[ARG]], %[[INIT]])
// CHECK: xla_hlo.max
// CHECK: xla_hlo.return
@ -317,7 +317,7 @@ func @pack(%arg0: tensor<2xi32>, %arg1: tensor<2xi32>) -> tensor<2x2xi32> {
// CHECK-LABEL: func @relu
func @relu(%arg0: tensor<1xi32>) -> tensor<1xi32> {
// CHECK-NEXT: %[[ZERO:.*]] = "xla_hlo.constant"() {value = dense<0> : tensor<1xi32>}
// CHECK-NEXT: %[[ZERO:.*]] = xla_hlo.constant dense<0> : tensor<1xi32>
// CHECK-NEXT: xla_hlo.max %[[ZERO]], %arg0 : tensor<1xi32>
%0 = "tf.Relu"(%arg0) : (tensor<1xi32>) -> tensor<1xi32>
return %0: tensor<1xi32>
@ -332,8 +332,8 @@ func @relu_non_static_input(%arg0: tensor<?xi32>) -> tensor<?xi32> {
// CHECK-LABEL: func @relu6
func @relu6(%arg0: tensor<1xi32>) -> tensor<1xi32> {
// CHECK-NEXT: %[[ZERO:.*]] = "xla_hlo.constant"() {value = dense<0> : tensor<1xi32>}
// CHECK-NEXT: %[[SIX:.*]] = "xla_hlo.constant"() {value = dense<6> : tensor<1xi32>}
// CHECK-NEXT: %[[ZERO:.*]] = xla_hlo.constant dense<0> : tensor<1xi32>
// CHECK-NEXT: %[[SIX:.*]] = xla_hlo.constant dense<6> : tensor<1xi32>
// CHECK-NEXT: "xla_hlo.clamp"(%[[ZERO]], %arg0, %[[SIX]]) : (tensor<1xi32>, tensor<1xi32>, tensor<1xi32>) -> tensor<1xi32>
%0 = "tf.Relu6"(%arg0) : (tensor<1xi32>) -> tensor<1xi32>
return %0: tensor<1xi32>
@ -373,7 +373,7 @@ func @select_multidimensional(%arg0: tensor<3x2xi1>, %arg1: tensor<3x2xi32>, %ar
func @simple_softmax(%arg0: tensor<2x3xf32>) -> tensor<2x3xf32> {
// Verify reduce op for max computation and its body.
// CHECK: %[[NEG_INF:.*]] = "xla_hlo.constant"() {value = dense<0xFF800000> : tensor<f32>}
// CHECK: %[[NEG_INF:.*]] = xla_hlo.constant dense<0xFF800000> : tensor<f32>
// CHECK: %[[MAX:.*]] = "xla_hlo.reduce"(%[[ARG0]], %[[NEG_INF]])
// CHECK: xla_hlo.max
// CHECK: "xla_hlo.return"
@ -384,7 +384,7 @@ func @simple_softmax(%arg0: tensor<2x3xf32>) -> tensor<2x3xf32> {
// CHECK: %[[CASTED_EXP:.*]] = "xla_hlo.convert"(%[[EXP]]) : (tensor<2x3xf32>) -> tensor<2x3xf32>
// Verify reduce op for summation and its body.
// CHECK: %[[ZERO:.*]] = "xla_hlo.constant"() {value = dense<0.000000e+00> : tensor<f32>}
// CHECK: %[[ZERO:.*]] = xla_hlo.constant dense<0.000000e+00> : tensor<f32>
// CHECK: %[[SUM:.*]] = "xla_hlo.reduce"(%[[CASTED_EXP]], %[[ZERO]])
// CHECK: xla_hlo.add
// CHECK: "xla_hlo.return"
@ -433,7 +433,7 @@ func @rank4_softmax(%arg0: tensor<2x3x4x5xf16>) -> tensor<2x3x4x5xf16> {
func @simple_logsoftmax(%arg0: tensor<2x3xf32>) -> tensor<2x3xf32> {
// Verify reduce op for max computation and its body.
// CHECK: %[[NEG_INF:.*]] = "xla_hlo.constant"() {value = dense<0xFF800000> : tensor<f32>}
// CHECK: %[[NEG_INF:.*]] = xla_hlo.constant dense<0xFF800000> : tensor<f32>
// CHECK: %[[MAX:.*]] = "xla_hlo.reduce"(%[[ARG0]], %[[NEG_INF]])
// CHECK: xla_hlo.max
// CHECK: "xla_hlo.return"
@ -444,7 +444,7 @@ func @simple_logsoftmax(%arg0: tensor<2x3xf32>) -> tensor<2x3xf32> {
// CHECK: %[[CASTED_EXP:.*]] = "xla_hlo.convert"(%[[EXP]]) : (tensor<2x3xf32>) -> tensor<2x3xf32>
// Verify reduce op for summation and its body.
// CHECK: %[[ZERO:.*]] = "xla_hlo.constant"() {value = dense<0.000000e+00> : tensor<f32>}
// CHECK: %[[ZERO:.*]] = xla_hlo.constant dense<0.000000e+00> : tensor<f32>
// CHECK: %[[SUM:.*]] = "xla_hlo.reduce"(%[[CASTED_EXP]], %[[ZERO]])
// CHECK: xla_hlo.add
// CHECK: "xla_hlo.return"
@ -657,7 +657,7 @@ func @neg_rankless(%arg0: tensor<*xf32>) -> tensor<*xf32> {
// CHECK-LABEL: @sigmoid
func @sigmoid(%arg0: tensor<2xf32>) -> tensor<2xf32> {
// CHECK-DAG: [[R0:%.+]] = "xla_hlo.constant"() {value = dense<5.000000e-01> : tensor<f32>} : () -> tensor<f32>
// CHECK-DAG: [[R0:%.+]] = xla_hlo.constant dense<5.000000e-01> : tensor<f32>
// CHECK-DAG: [[R1:%.+]] = "xla_hlo.broadcast"([[R0]]) {broadcast_sizes = dense<2> : tensor<1xi64>} : (tensor<f32>) -> tensor<2xf32>
// CHECK-DAG: [[R2:%.+]] = xla_hlo.mul %arg0, [[R1]] : tensor<2xf32>
// CHECK-DAG: [[R3:%.+]] = "xla_hlo.tanh"([[R2]]) : (tensor<2xf32>) -> tensor<2xf32>
@ -826,13 +826,13 @@ func @strided_slice_shrink_axis(%input: tensor<4x8xf32>) -> tensor<f32> {
// CHECK-LABEL: func @mean
func @mean(%arg0: tensor<4x8xf16>) -> tensor<4x1xf16> {
// CHECK: %[[CAST:.*]] = "xla_hlo.convert"(%arg0) : (tensor<4x8xf16>) -> tensor<4x8xf32>
// CHECK: %[[INITIAL:.*]] = "xla_hlo.constant"() {value = dense<0.000000e+00> : tensor<f32>} : () -> tensor<f32>
// CHECK: %[[INITIAL:.*]] = xla_hlo.constant dense<0.000000e+00> : tensor<f32>
// CHECK: %[[REDUCED:.*]] = "xla_hlo.reduce"(%[[CAST]], %[[INITIAL]]) ( {
// CHECK: ^bb0(%[[ARGA:.*]]: tensor<f32>, %[[ARGB:.*]]: tensor<f32>):
// CHECK: %[[REDUCE_BODY_RESULT:.*]] = xla_hlo.add %[[ARGA]], %[[ARGB]] : tensor<f32>
// CHECK: "xla_hlo.return"(%[[REDUCE_BODY_RESULT]]) : (tensor<f32>) -> ()
// CHECK: }) {dimensions = dense<1> : tensor<1xi64>} : (tensor<4x8xf32>, tensor<f32>) -> tensor<4xf32>
// CHECK: %[[DIVISOR:.*]] = "xla_hlo.constant"() {value = dense<8.000000e+00> : tensor<f32>} : () -> tensor<f32>
// CHECK: %[[DIVISOR:.*]] = xla_hlo.constant dense<8.000000e+00> : tensor<f32>
// CHECK: %[[MEAN:.*]] = "xla_hlo.div"(%[[REDUCED]], %[[DIVISOR]]) : (tensor<4xf32>, tensor<f32>) -> tensor<4xf32>
// CHECK: %[[CAST_BACK:.*]] = "xla_hlo.convert"(%[[MEAN]]) : (tensor<4xf32>) -> tensor<4xf16>
// CHECK: %[[RESULT:.*]] = "xla_hlo.reshape"(%[[CAST_BACK]]) : (tensor<4xf16>) -> tensor<4x1xf16>
@ -845,7 +845,7 @@ func @mean(%arg0: tensor<4x8xf16>) -> tensor<4x1xf16> {
// CHECK-LABEL: func @sum
func @sum(%arg0: tensor<4x8xf16>) -> tensor<4x1xf16> {
// CHECK: %[[CAST:.*]] = "xla_hlo.convert"(%arg0) : (tensor<4x8xf16>) -> tensor<4x8xf32>
// CHECK: %[[INITIAL:.*]] = "xla_hlo.constant"() {value = dense<0.000000e+00> : tensor<f32>} : () -> tensor<f32>
// CHECK: %[[INITIAL:.*]] = xla_hlo.constant dense<0.000000e+00> : tensor<f32>
// CHECK: %[[REDUCED:.*]] = "xla_hlo.reduce"(%[[CAST]], %[[INITIAL]]) ( {
// CHECK: ^bb0(%[[ARGA:.*]]: tensor<f32>, %[[ARGB:.*]]: tensor<f32>):
// CHECK: %[[REDUCE_BODY_RESULT:.*]] = xla_hlo.add %[[ARGA]], %[[ARGB]] : tensor<f32>
@ -862,7 +862,7 @@ func @sum(%arg0: tensor<4x8xf16>) -> tensor<4x1xf16> {
// CHECK-LABEL: func @max
func @max(%arg0: tensor<4x8xf16>) -> tensor<4x1xf16> {
// CHECK: %[[CAST:.*]] = "xla_hlo.convert"(%arg0) : (tensor<4x8xf16>) -> tensor<4x8xf16>
// CHECK: %[[INITIAL:.*]] = "xla_hlo.constant"() {value = dense<0xFC00> : tensor<f16>} : () -> tensor<f16>
// CHECK: %[[INITIAL:.*]] = xla_hlo.constant dense<0xFC00> : tensor<f16>
// CHECK: %[[REDUCED:.*]] = "xla_hlo.reduce"(%[[CAST]], %[[INITIAL]]) ( {
// CHECK: ^bb0(%[[ARGA:.*]]: tensor<f16>, %[[ARGB:.*]]: tensor<f16>):
// CHECK: %[[REDUCE_BODY_RESULT:.*]] = xla_hlo.max %[[ARGA]], %[[ARGB]] : tensor<f16>

View File

@ -1,4 +1,4 @@
// RUN: tf-opt %s -verify-diagnostics -split-input-file | FileCheck %s
// RUN: tf-opt %s -verify-diagnostics -split-input-file | tf-opt | FileCheck %s
// -----
@ -490,7 +490,7 @@ func @get_tuple_element_index_out_of_bounds(%arg0: tuple<tensor<f32>, tensor<i32
// CHECK-LABEL: func @reduce_window
func @reduce_window(%arg0: tensor<4x4xi32>) -> tensor<2x2xi32> {
%cst = constant dense<0> : tensor<i32>
%cst = xla_hlo.constant dense<0> : tensor<i32>
%0 = "xla_hlo.reduce_window"(%arg0, %cst) ( {
^bb0(%arg1: tensor<i32>, %arg2: tensor<i32>): // no predecessors
%6 = "xla_hlo.max"(%arg1, %arg2) : (tensor<i32>, tensor<i32>) -> tensor<i32>
@ -498,3 +498,22 @@ func @reduce_window(%arg0: tensor<4x4xi32>) -> tensor<2x2xi32> {
}) {window_dimensions = dense<[2, 2]> : tensor<2xi64>, window_strides = dense<[2, 2]> : tensor<2xi64>, padding = dense<[2, 2]> : tensor<2xi64>} : (tensor<4x4xi32>, tensor<i32>) -> tensor<2x2xi32>
return %0 : tensor<2x2xi32>
}
// -----
// Verifiers HLO constant op custom printing and parsing.
// CHECK-LABEL: func @constants
func @constants() -> () {
// CHECK: xla_hlo.constant dense<0> : tensor<i32>
%0 = "xla_hlo.constant"() {value = dense<0> : tensor<i32>} : () -> (tensor<i32>)
// CHECK: xla_hlo.constant {extra_attr = 3 : i32} dense<0> : tensor<i32>
%1 = "xla_hlo.constant"() {extra_attr = 3 : i32, value = dense<0> : tensor<i32>} : () -> (tensor<i32>)
// CHECK: xla_hlo.constant {value = dense<0> : tensor<i32>} : tensor<*xi32>
%2 = "xla_hlo.constant"() {value = dense<0> : tensor<i32>} : () -> (tensor<*xi32>)
// CHECK: xla_hlo.constant {extra_attr = 3 : i32, value = dense<0> : tensor<i32>} : tensor<*xi32>
%3 = "xla_hlo.constant"() {extra_attr = 3 : i32, value = dense<0> : tensor<i32>} : () -> (tensor<*xi32>)
return
}

View File

@ -4,7 +4,7 @@
// CHECK-SAME: (%[[ARG0:.*]]: tensor<4x8xf32>)
// CHECK: return %[[ARG0]]
func @noop(%arg0: tensor<4x8xf32>) -> tensor<4x8xf32> {
%0 = "xla_hlo.constant"() {value = dense<0.000000e+00> : tensor<f32>} : () -> tensor<f32>
%0 = xla_hlo.constant dense<0.000000e+00> : tensor<f32>
%2 = "xla_hlo.reduce"(%arg0, %0) ( {
^bb0(%arg1: tensor<f32>, %arg2: tensor<f32>):
%4 = xla_hlo.add %arg1, %arg2 : tensor<f32>

View File

@ -2,8 +2,8 @@
// CHECK-LABEL: func @const_fold_collapse_to_scalar
func @const_fold_collapse_to_scalar() -> tensor<i32> {
// CHECK-NEXT: [[CST:%.+]] = "xla_hlo.constant"() {value = dense<42> : tensor<i32>} : () -> tensor<i32>
%cst = "xla_hlo.constant"() {value = dense<42> : tensor<1x1xi32>} : () -> tensor<1x1xi32>
// CHECK-NEXT: [[CST:%.+]] = xla_hlo.constant dense<42> : tensor<i32>
%cst = xla_hlo.constant dense<42> : tensor<1x1xi32>
%0 = "xla_hlo.reshape"(%cst) : (tensor<1x1xi32>) -> tensor<i32>
// CHECK-NEXT: return [[CST]]
return %0 : tensor<i32>
@ -13,8 +13,8 @@ func @const_fold_collapse_to_scalar() -> tensor<i32> {
// CHECK-LABEL: func @const_fold_collapse_to_tensor
func @const_fold_collapse_to_tensor() -> tensor<2xi32> {
// CHECK-NEXT: [[CST:%.+]] = "xla_hlo.constant"() {value = dense<42> : tensor<2xi32>} : () -> tensor<2xi32>
%cst = "xla_hlo.constant"() {value = dense<42> : tensor<1x2xi32>} : () -> tensor<1x2xi32>
// CHECK-NEXT: [[CST:%.+]] = xla_hlo.constant dense<42> : tensor<2xi32>
%cst = xla_hlo.constant dense<42> : tensor<1x2xi32>
%0 = "xla_hlo.reshape"(%cst) : (tensor<1x2xi32>) -> tensor<2xi32>
// CHECK-NEXT: return [[CST]]
return %0 : tensor<2xi32>
@ -24,8 +24,8 @@ func @const_fold_collapse_to_tensor() -> tensor<2xi32> {
// CHECK-LABEL: func @const_fold_expand
func @const_fold_expand() -> tensor<1xi32> {
// CHECK-NEXT: [[CST:%.+]] = "xla_hlo.constant"() {value = dense<42> : tensor<1xi32>} : () -> tensor<1xi32>
%cst = "xla_hlo.constant"() {value = dense<42> : tensor<i32>} : () -> tensor<i32>
// CHECK-NEXT: [[CST:%.+]] = xla_hlo.constant dense<42> : tensor<1xi32>
%cst = xla_hlo.constant dense<42> : tensor<i32>
%0 = "xla_hlo.reshape"(%cst) : (tensor<i32>) -> tensor<1xi32>
// CHECK-NEXT: return [[CST]]
return %0 : tensor<1xi32>
@ -35,8 +35,8 @@ func @const_fold_expand() -> tensor<1xi32> {
// CHECK-LABEL: func @const_fold_nontrivial
func @const_fold_nontrivial() -> tensor<16xi64> {
// CHECK-NEXT: [[CST:%.+]] = "xla_hlo.constant"() {value = dense<42> : tensor<16xi64>} : () -> tensor<16xi64>
%cst = "xla_hlo.constant"() {value = dense<42> : tensor<4x4xi64>} : () -> tensor<4x4xi64>
// CHECK-NEXT: [[CST:%.+]] = xla_hlo.constant dense<42> : tensor<16xi64>
%cst = xla_hlo.constant dense<42> : tensor<4x4xi64>
%0 = "xla_hlo.reshape"(%cst) : (tensor<4x4xi64>) -> tensor<16xi64>
// CHECK-NEXT: return [[CST]]
return %0 : tensor<16xi64>
@ -46,8 +46,8 @@ func @const_fold_nontrivial() -> tensor<16xi64> {
// CHECK-LABEL: func @const_fold_flatten
func @const_fold_flatten() -> tensor<16xi64> {
// CHECK-NEXT: [[CST:%.+]] = "xla_hlo.constant"() {value = dense<42> : tensor<16xi64>} : () -> tensor<16xi64>
%cst = "xla_hlo.constant"() {value = dense<42> : tensor<4x4xi64>} : () -> tensor<4x4xi64>
// CHECK-NEXT: [[CST:%.+]] = xla_hlo.constant dense<42> : tensor<16xi64>
%cst = xla_hlo.constant dense<42> : tensor<4x4xi64>
%0 = "xla_hlo.reshape"(%cst) : (tensor<4x4xi64>) -> tensor<16xi64>
// CHECK-NEXT: return [[CST]]
return %0 : tensor<16xi64>
@ -57,8 +57,8 @@ func @const_fold_flatten() -> tensor<16xi64> {
// CHECK-LABEL: func @const_fold_6
func @const_fold_6() -> tensor<6xi32> {
// CHECK-NEXT: [[CST:%.+]] = "xla_hlo.constant"() {value = dense<[1, 2, 3, 4, 5, 6]> : tensor<6xi32>} : () -> tensor<6xi32>
%cst = "xla_hlo.constant"() {value = dense<[[1, 2], [3, 4], [5, 6]]> : tensor<3x2xi32>} : () -> tensor<3x2xi32>
// CHECK-NEXT: [[CST:%.+]] = xla_hlo.constant dense<[1, 2, 3, 4, 5, 6]> : tensor<6xi32>
%cst = xla_hlo.constant dense<[[1, 2], [3, 4], [5, 6]]> : tensor<3x2xi32>
%0 = "xla_hlo.reshape"(%cst) : (tensor<3x2xi32>) -> tensor<6xi32>
// CHECK-NEXT: return [[CST]]
return %0 : tensor<6xi32>
@ -68,10 +68,10 @@ func @const_fold_6() -> tensor<6xi32> {
// CHECK-LABEL: func @const_fold_same_shape
func @const_fold_same_shape() -> tensor<2x3xi32> {
// CHECK-NEXT: [[CST:%.+]] = "xla_hlo.constant"() {value = dense<[
// CHECK-NEXT: [[CST:%.+]] = xla_hlo.constant dense<[
// CHECK-SAME: [1, 2, 3], [4, 5, 6]
// CHECK-SAME: ]> : tensor<2x3xi32>} : () -> tensor<2x3xi32>
%cst = "xla_hlo.constant"() {value = dense<[1, 2, 3, 4, 5, 6]> : tensor<6xi32>} : () -> tensor<6xi32>
// CHECK-SAME: ]> : tensor<2x3xi32>
%cst = xla_hlo.constant dense<[1, 2, 3, 4, 5, 6]> : tensor<6xi32>
%0 = "xla_hlo.reshape"(%cst) : (tensor<6xi32>) -> tensor<2x3xi32>
// CHECK-NEXT: return [[CST]]
return %0 : tensor<2x3xi32>
@ -81,8 +81,8 @@ func @const_fold_same_shape() -> tensor<2x3xi32> {
// CHECK-LABEL: func @const_fold_float
func @const_fold_float() -> tensor<16xf64> {
// CHECK-NEXT: [[CST:%.+]] = "xla_hlo.constant"() {value = dense<4.2{{0*}}e+00> : tensor<16xf64>} : () -> tensor<16xf64>
%cst = "xla_hlo.constant"() {value = dense<4.2> : tensor<4x4xf64>} : () -> tensor<4x4xf64>
// CHECK-NEXT: [[CST:%.+]] = xla_hlo.constant dense<4.2{{0*}}e+00> : tensor<16xf64>
%cst = xla_hlo.constant dense<4.2> : tensor<4x4xf64>
%0 = "xla_hlo.reshape"(%cst) : (tensor<4x4xf64>) -> tensor<16xf64>
// CHECK-NEXT: return [[CST]]
return %0 : tensor<16xf64>

View File

@ -295,8 +295,7 @@ class ConvertMaxPoolOp : public OpRewritePattern<TF::MaxPoolOp> {
// Sample result with 2-d f16 inputs with B batches of with N elements each.
//
// // Create an array of 0.5 the shape of the input array.
// %half = "xla_hlo.constant"() {value = dense<5.000000e-01>
// : tensor<f32>} : () -> tensor<f32>
// %half = xla_hlo.constant dense<5.000000e-01> : tensor<f32>
// %half_array = "xla_hlo.broadcast"(half)
// {broadcast_sizes = dense<2> : tensor<1xi64>}
// : (tensor<f32>) -> tensor<2xf32>