From 503babe90b37a89da9ef71c2b3952884d0041022 Mon Sep 17 00:00:00 2001 From: Benjamin Kramer Date: Thu, 4 Mar 2021 05:21:32 -0800 Subject: [PATCH] Integrate LLVM at llvm/llvm-project@c907681b077c Updates LLVM usage to match [c907681b077c](https://github.com/llvm/llvm-project/commit/c907681b077c) PiperOrigin-RevId: 360891677 Change-Id: Iaaf934b70f5b064ad3796d08abd887c53e3b1d41 --- .../mlir-hlo/Dialect/mhlo/IR/chlo_ops.td | 4 +- .../mlir-hlo/Dialect/mhlo/IR/hlo_ops.td | 16 +-- .../mlir-hlo/Dialect/mhlo/IR/lhlo_ops.td | 4 +- tensorflow/compiler/mlir/lite/ir/tfl_ops.cc | 8 +- tensorflow/compiler/mlir/lite/ir/tfl_ops.td | 40 +++--- .../compiler/mlir/tensorflow/ir/tf_device.cc | 8 +- .../mlir/tensorflow/ir/tf_device_ops.td | 12 +- .../mlir/tensorflow/ir/tf_executor_ops.td | 10 +- .../mlir/tensorflow/ir/tf_generated_ops.td | 36 ++--- .../compiler/mlir/tensorflow/ir/tf_op_base.td | 8 +- .../compiler/mlir/tensorflow/ir/tf_ops.td | 10 +- .../compiler/mlir/tensorflow/ir/tf_types.cc | 6 +- .../compiler/mlir/tensorflow/ir/tf_types.h | 6 +- .../tensorflow/transforms/constant_fold.cc | 2 +- .../tensorflow/transforms/shape_inference.cc | 4 +- tensorflow/compiler/mlir/tfr/ir/tfr_ops.td | 4 +- .../compiler/mlir/tools/kernel_gen/BUILD | 7 +- .../tools/kernel_gen/ir/tf_framework_ops.td | 4 +- .../mlir/tools/kernel_gen/kernel_creator.cc | 11 +- .../mlir/tools/kernel_gen/tf_to_kernel.cc | 2 +- .../mlir/tools/kernel_gen/transforms/BUILD | 6 +- .../transforms/gpu_kernel_to_blob_pass.cc | 1 - tensorflow/compiler/xla/service/cpu/BUILD | 4 +- .../compiler/xla/service/cpu/mlir_emitter.cc | 1 - third_party/llvm/workspace.bzl | 4 +- third_party/mlir/BUILD | 135 ++++++++++++------ third_party/mlir/test.BUILD | 28 +++- 27 files changed, 224 insertions(+), 157 deletions(-) diff --git a/tensorflow/compiler/mlir/hlo/include/mlir-hlo/Dialect/mhlo/IR/chlo_ops.td b/tensorflow/compiler/mlir/hlo/include/mlir-hlo/Dialect/mhlo/IR/chlo_ops.td index 9a42d9574c8..b3f81a029e0 100644 --- a/tensorflow/compiler/mlir/hlo/include/mlir-hlo/Dialect/mhlo/IR/chlo_ops.td +++ b/tensorflow/compiler/mlir/hlo/include/mlir-hlo/Dialect/mhlo/IR/chlo_ops.td @@ -90,7 +90,7 @@ class HLOClient_BroadcastBinaryElementwiseOp< ); let builders = [ - OpBuilderDAG<(ins "Value":$left, "Value":$right, + OpBuilder<(ins "Value":$left, "Value":$right, "DenseIntElementsAttr":$broadcast_dimensions)>]; let results = (outs HLO_Tensor); @@ -673,7 +673,7 @@ def HLOClient_BroadcastCompareOp : HLOClient_BroadcastBinaryElementwiseOp< let results = (outs HLO_PredTensor); let builders = [ - OpBuilderDAG<(ins "Value":$lhs, "Value":$rhs, + OpBuilder<(ins "Value":$lhs, "Value":$rhs, "DenseIntElementsAttr":$broadcast_dimensions, "StringAttr":$comparison_direction, CArg<"StringAttr", "{}">:$compare_type)>]; } diff --git a/tensorflow/compiler/mlir/hlo/include/mlir-hlo/Dialect/mhlo/IR/hlo_ops.td b/tensorflow/compiler/mlir/hlo/include/mlir-hlo/Dialect/mhlo/IR/hlo_ops.td index 519873d977c..f37ae039664 100644 --- a/tensorflow/compiler/mlir/hlo/include/mlir-hlo/Dialect/mhlo/IR/hlo_ops.td +++ b/tensorflow/compiler/mlir/hlo/include/mlir-hlo/Dialect/mhlo/IR/hlo_ops.td @@ -61,7 +61,7 @@ def HLO_ConstOp : HLO_Op<"constant", ); let builders = [ - OpBuilderDAG<(ins "Attribute":$value)>]; + OpBuilder<(ins "Attribute":$value)>]; let assemblyFormat = "attr-dict $value"; @@ -163,7 +163,7 @@ def HLO_ConvertOp : HLO_UnaryElementwiseOp<"convert", [NoSideEffect, SameOperandsAndResultShape], HLO_Tensor>, BASE_HLO_ConvertOp { let builders = [ - OpBuilderDAG<(ins "Value":$operand, "Type":$result_element_ty)>]; + OpBuilder<(ins "Value":$operand, "Type":$result_element_ty)>]; let hasFolder = 1; @@ -624,7 +624,7 @@ def HLO_ReduceOp: HLO_Op<"reduce", [ let results = (outs Variadic); let builders = [ - OpBuilderDAG<(ins "ValueRange":$operands, "ValueRange":$init_values, + OpBuilder<(ins "ValueRange":$operands, "ValueRange":$init_values, "DenseIntElementsAttr":$dimensions)>]; let extraClassDeclaration = [{ @@ -662,7 +662,7 @@ def HLO_GetTupleElementOp: HLO_Op<"get_tuple_element", [NoSideEffect]>, BASE_HLO let hasFolder = 1; let builders = [ - OpBuilderDAG<(ins "Value":$value, "int32_t":$index)>]; + OpBuilder<(ins "Value":$value, "int32_t":$index)>]; } def HLO_TupleOp : HLO_Op<"tuple", [NoSideEffect]>, BASE_HLO_TupleOp { @@ -670,7 +670,7 @@ def HLO_TupleOp : HLO_Op<"tuple", [NoSideEffect]>, BASE_HLO_TupleOp { let results = (outs HLO_Tuple); let builders = [ - OpBuilderDAG<(ins "ValueRange":$values)>]; + OpBuilder<(ins "ValueRange":$values)>]; let hasCanonicalizer = 1; } @@ -690,7 +690,7 @@ def HLO_CompareOp: HLO_Op<"compare", [NoSideEffect, SameTypeOperands, let hasFolder = 1; let builders = [ - OpBuilderDAG<(ins "Value":$lhs, "Value":$rhs, + OpBuilder<(ins "Value":$lhs, "Value":$rhs, "StringAttr":$comparison_direction, CArg<"StringAttr", "{}">:$compare_type)>, ]; @@ -889,7 +889,7 @@ def HLO_ConcatenateOp : HLO_Op<"concatenate", let hasFolder = 1; let extraClassDeclaration = [{ - static bool isCompatibleReturnTypes(ArrayRef l, ArrayRef r) { + static bool isCompatibleReturnTypes(TypeRange l, TypeRange r) { return succeeded(mlir::verifyCompatibleShapes(l, r)); } }]; @@ -1170,7 +1170,7 @@ def HLO_SortOp : HLO_Op<"sort", [RecursiveSideEffects, let regions = (region SizedRegion<1>:$comparator); let builders = [ - OpBuilderDAG<(ins "ValueRange":$operands, CArg<"int64_t", "-1">:$dimension, + OpBuilder<(ins "ValueRange":$operands, CArg<"int64_t", "-1">:$dimension, CArg<"bool", "false">:$is_stable)>]; // TODO(b/129422361): SortOp has special conversion logic to HLO. diff --git a/tensorflow/compiler/mlir/hlo/include/mlir-hlo/Dialect/mhlo/IR/lhlo_ops.td b/tensorflow/compiler/mlir/hlo/include/mlir-hlo/Dialect/mhlo/IR/lhlo_ops.td index e2f835ae6e2..64bc6a2c6b9 100644 --- a/tensorflow/compiler/mlir/hlo/include/mlir-hlo/Dialect/mhlo/IR/lhlo_ops.td +++ b/tensorflow/compiler/mlir/hlo/include/mlir-hlo/Dialect/mhlo/IR/lhlo_ops.td @@ -691,7 +691,7 @@ def FusionOp : LHLO_Op<"fusion", [SingleBlockImplicitTerminator<"TerminatorOp">] let skipDefaultBuilders = 1; let builders = [ - OpBuilderDAG<(ins CArg<"ArrayRef", "{}">:$attributes)> + OpBuilder<(ins CArg<"ArrayRef", "{}">:$attributes)> ]; let extraClassDeclaration = [{ @@ -740,7 +740,7 @@ def TerminatorOp : Terminator operation for the LHLO dialect. }]; let builders = [ - OpBuilderDAG<(ins "ValueRange":$operands), + OpBuilder<(ins "ValueRange":$operands), [{ build($_builder, $_state, llvm::None, operands, llvm::None); }]>]; } diff --git a/tensorflow/compiler/mlir/lite/ir/tfl_ops.cc b/tensorflow/compiler/mlir/lite/ir/tfl_ops.cc index 55ad1c62429..c5d989914c7 100644 --- a/tensorflow/compiler/mlir/lite/ir/tfl_ops.cc +++ b/tensorflow/compiler/mlir/lite/ir/tfl_ops.cc @@ -1045,7 +1045,7 @@ LogicalResult Conv2DOp::inferReturnTypes( return success(); } -bool Conv2DOp::isCompatibleReturnTypes(ArrayRef lhs, ArrayRef rhs) { +bool Conv2DOp::isCompatibleReturnTypes(TypeRange lhs, TypeRange rhs) { if (lhs.size() != rhs.size() || lhs.size() != 1) return false; if (failed(mlir::verifyCompatibleShape(lhs[0], rhs[0]))) return false; return true; @@ -1917,7 +1917,7 @@ LogicalResult UnpackOp::inferReturnTypes( return success(); } -bool UnpackOp::isCompatibleReturnTypes(ArrayRef lhs, ArrayRef rhs) { +bool UnpackOp::isCompatibleReturnTypes(TypeRange lhs, TypeRange rhs) { if (lhs.size() != rhs.size()) return false; for (auto pair : llvm::zip(lhs, rhs)) { if (failed( @@ -2267,8 +2267,8 @@ LogicalResult UnidirectionalSequenceLSTMOp::inferReturnTypes( return success(); } -bool UnidirectionalSequenceLSTMOp::isCompatibleReturnTypes(ArrayRef lhs, - ArrayRef rhs) { +bool UnidirectionalSequenceLSTMOp::isCompatibleReturnTypes(TypeRange lhs, + TypeRange rhs) { if (lhs.size() != rhs.size() || lhs.size() != 1) return false; if (failed(mlir::verifyCompatibleShape(lhs[0], rhs[0]))) return false; return true; diff --git a/tensorflow/compiler/mlir/lite/ir/tfl_ops.td b/tensorflow/compiler/mlir/lite/ir/tfl_ops.td index 04dc41d61ba..74d2a66cc0b 100644 --- a/tensorflow/compiler/mlir/lite/ir/tfl_ops.td +++ b/tensorflow/compiler/mlir/lite/ir/tfl_ops.td @@ -428,7 +428,7 @@ def ComparisonOpSameElementTypeConstraint : //===----------------------------------------------------------------------===// def TFL_BroadcastableBinaryBuilder : - OpBuilderDAG<(ins "Value":$lhs, "Value":$rhs), + OpBuilder<(ins "Value":$lhs, "Value":$rhs), [{ auto resultType = OpTrait::util::getBroadcastedType(lhs.getType(), rhs.getType()); @@ -439,7 +439,7 @@ def TFL_BroadcastableBinaryBuilder : }]>; def TFL_FusedBroadcastableBinaryBuilder : - OpBuilderDAG<(ins "Value":$lhs, "Value":$rhs, + OpBuilder<(ins "Value":$lhs, "Value":$rhs, "StringAttr":$fusedActivationFunction), [{ buildFusedBroadcastableBinOp( @@ -447,7 +447,7 @@ def TFL_FusedBroadcastableBinaryBuilder : }]>; def TFL_ComparisonBinaryBuilder : - OpBuilderDAG<(ins "Value":$lhs, "Value":$rhs), + OpBuilder<(ins "Value":$lhs, "Value":$rhs), [{ buildComparisonBinOp(&$_builder, $_state, lhs, rhs); }]>; @@ -818,7 +818,7 @@ def TFL_ConstOp : Op> GetQuantizedBlockSize() { return {}; } // Returns whether the return types are compatible. - static bool isCompatibleReturnTypes(ArrayRef l, ArrayRef r); + static bool isCompatibleReturnTypes(TypeRange l, TypeRange r); }]; } @@ -1062,7 +1062,7 @@ def TFL_GatherOp : TFL_Op<"gather", [ let builders = [ - OpBuilderDAG<(ins "Value":$params, "Value":$indices, "IntegerAttr":$axis), + OpBuilder<(ins "Value":$params, "Value":$indices, "IntegerAttr":$axis), [{ BuildGatherOp(&$_builder, $_state, params, indices, axis); }]> ]; @@ -1399,7 +1399,7 @@ def TFL_NotEqualOp : TFL_Op<"not_equal", [ let builders = [ - OpBuilderDAG<(ins "Value":$lhs, "Value":$rhs), + OpBuilder<(ins "Value":$lhs, "Value":$rhs), [{ buildComparisonBinOp(&$_builder, $_state, lhs, rhs); }]> @@ -1901,7 +1901,7 @@ def TFL_LogisticOp: TFL_Op<"logistic", [ // non-quantization tablegen patterns. Currently, it is used by the // elementwise-move reordering pattern in the optimize_patterns.td let builders = [ - OpBuilderDAG<(ins "Value":$input), + OpBuilder<(ins "Value":$input), [{ $_state.addOperands({input}); $_state.addTypes(input.getType()); @@ -2554,7 +2554,7 @@ def TFL_ReluOp: TFL_Op<"relu", [ // non-quantization tablegen patterns. Currently, it is used by the // elementwise-move reordering pattern in the optimize_patterns.td let builders = [ - OpBuilderDAG<(ins "Value":$input), + OpBuilder<(ins "Value":$input), [{ $_state.addOperands({input}); $_state.addTypes(input.getType()); @@ -2582,7 +2582,7 @@ def TFL_Relu6Op: TFL_Op<"relu6", [ // non-quantization tablegen patterns. Currently, it is used by the // elementwise-move reordering pattern in the optimize_patterns.td let builders = [ - OpBuilderDAG<(ins "Value":$input), + OpBuilder<(ins "Value":$input), [{ $_state.addOperands({input}); $_state.addTypes(input.getType()); @@ -2610,7 +2610,7 @@ def TFL_Relu1Op: TFL_Op<"relu_n1_to_1", [ // non-quantization tablegen patterns. Currently, it is used by the // elementwise-move reordering pattern in the optimize_patterns.td let builders = [ - OpBuilderDAG<(ins "Value":$input), + OpBuilder<(ins "Value":$input), [{ $_state.addOperands({input}); $_state.addTypes(input.getType()); @@ -2795,7 +2795,7 @@ def TFL_SelectOp : TFL_Op<"select", [ // TODO(jpienaar): autogenerate this. let builders = [ - OpBuilderDAG<(ins "Value":$condition, "Value":$x, "Value":$y), + OpBuilder<(ins "Value":$condition, "Value":$x, "Value":$y), [{ auto resultType = x.getType(); $_state.addOperands({condition, x, y}); @@ -2832,7 +2832,7 @@ def TFL_SelectV2Op : TFL_Op<"select_v2", [ TFL_TensorOf<[F32, I1, I8, I16, I32, I64, QI8, QUI8, QI16, TFL_Quint8]>:$output); let builders = [ - OpBuilderDAG<(ins "Value":$cond, "Value":$x, "Value":$y), + OpBuilder<(ins "Value":$cond, "Value":$x, "Value":$y), [{ BuildSelectV2Op(&$_builder, $_state, cond, x, y); }]>]; @@ -3009,7 +3009,7 @@ def TFL_TanhOp: TFL_Op<"tanh", [ // non-quantization tablegen patterns. Currently, it is used by the // elementwise-move reordering pattern in the optimize_patterns.td let builders = [ - OpBuilderDAG<(ins "Value":$input), + OpBuilder<(ins "Value":$input), [{ $_state.addOperands({input}); $_state.addTypes(input.getType()); @@ -3083,7 +3083,7 @@ def TFL_TopKV2Op: TFL_Op<"topk_v2", [ TFL_I32Tensor:$indices); let builders = [ - OpBuilderDAG<(ins "Value":$input, "Value":$k), + OpBuilder<(ins "Value":$input, "Value":$k), [{ BuildTopKOp(&$_builder, $_state, input, k); }]>]; let hasOptions = 1; @@ -3152,7 +3152,7 @@ def TFL_UnpackOp : TFL_Op<"unpack", [ ); let extraClassDeclaration = [{ - static bool isCompatibleReturnTypes(ArrayRef l, ArrayRef r); + static bool isCompatibleReturnTypes(TypeRange l, TypeRange r); }]; let hasOptions = 1; @@ -3628,7 +3628,7 @@ def TFL_QConstOp : Op:$output); let builders = [ - OpBuilderDAG<(ins "TypeAttr":$qtype, "Attribute":$value), + OpBuilder<(ins "TypeAttr":$qtype, "Attribute":$value), [{ $_state.addAttribute("qtype", qtype); $_state.addAttribute("value", value); @@ -3657,7 +3657,7 @@ def TFL_SparseQConstOp : Op:$output); let builders = [ - OpBuilderDAG<(ins "TypeAttr":$qtype, "Attribute":$value, + OpBuilder<(ins "TypeAttr":$qtype, "Attribute":$value, "SparsityParameterAttr":$s_param, "Attribute":$compressed_data), [{ $_state.addTypes(qtype.getValue()); @@ -4046,7 +4046,7 @@ def TFL_UnidirectionalSequenceLSTMOp : std::vector GetStatefulOperands() { return {18, 19}; } // Compatiable return types check - static bool isCompatibleReturnTypes(ArrayRef l, ArrayRef r); + static bool isCompatibleReturnTypes(TypeRange l, TypeRange r); }]; } diff --git a/tensorflow/compiler/mlir/tensorflow/ir/tf_device.cc b/tensorflow/compiler/mlir/tensorflow/ir/tf_device.cc index 68d1ae2bc83..3cd3b55326e 100644 --- a/tensorflow/compiler/mlir/tensorflow/ir/tf_device.cc +++ b/tensorflow/compiler/mlir/tensorflow/ir/tf_device.cc @@ -179,8 +179,7 @@ LogicalResult Verify(ParallelExecuteOp op) { // static void ParallelExecuteOp::build(OpBuilder& builder, OperationState& state, - int num_regions, - llvm::ArrayRef output_types) { + int num_regions, TypeRange output_types) { DCHECK_GE(num_regions, 2); for (int i = 0; i < num_regions; ++i) { Region* region = state.addRegion(); @@ -203,10 +202,7 @@ Operation::result_range ParallelExecuteOp::GetRegionOutputs( return_value_offset += GetRegionBlockWithIndex(region_id).getTerminator()->getNumOperands(); - Operation::result_range region_results(getOperation(), - /*startIndex=*/return_value_offset, - /*count=*/num_region_results); - return region_results; + return getResults().slice(return_value_offset, num_region_results); } bool ParallelExecuteOp::RegionWrapsSingleOp(unsigned index) { diff --git a/tensorflow/compiler/mlir/tensorflow/ir/tf_device_ops.td b/tensorflow/compiler/mlir/tensorflow/ir/tf_device_ops.td index 742a9c42736..ee9ec0fe2a1 100644 --- a/tensorflow/compiler/mlir/tensorflow/ir/tf_device_ops.td +++ b/tensorflow/compiler/mlir/tensorflow/ir/tf_device_ops.td @@ -76,7 +76,7 @@ This op captures all needed live-in values. }]; let builders = [ - OpBuilderDAG<(ins "StringAttr":$device, "ArrayRef":$result_types), + OpBuilder<(ins "StringAttr":$device, "TypeRange":$result_types), [{ $_state.addAttribute("device", device); $_state.addTypes(result_types); @@ -98,7 +98,7 @@ The `tf_device.return` operation terminates and returns values from a ); let builders = [ - OpBuilderDAG<(ins), + OpBuilder<(ins), [{ build($_builder, $_state, {}); }]> @@ -169,7 +169,7 @@ def TfDevice_ParallelExecuteOp : TfDevice_Op<"parallel_execute", }]; let builders = [ - OpBuilderDAG<(ins "int":$num_regions, "llvm::ArrayRef":$output_types)>, + OpBuilder<(ins "int":$num_regions, "TypeRange":$output_types)>, ]; let verifier = [{ return Verify(*this); }]; @@ -294,11 +294,11 @@ For example: }]; let builders = [ - OpBuilderDAG<(ins "int":$n, + OpBuilder<(ins "int":$n, "const llvm::SmallDenseMap>&":$devices, "llvm::ArrayRef>":$replicated_inputs, "ValueRange":$packed_inputs, "TypeRange":$replica_output_types)>, - OpBuilderDAG<(ins "int":$n, "llvm::Optional":$devices, + OpBuilder<(ins "int":$n, "llvm::Optional":$devices, "llvm::ArrayRef>":$replicated_inputs, "ValueRange":$packed_inputs, "TypeRange":$replica_output_types)>, ]; @@ -330,7 +330,7 @@ used to form the cluster. let regions = (region SizedRegion<1>:$body); let builders = [ - OpBuilderDAG<(ins "ArrayRef":$resultTypes), + OpBuilder<(ins "TypeRange":$resultTypes), [{ build($_builder, $_state, resultTypes, mlir::StringAttr {}); }]> diff --git a/tensorflow/compiler/mlir/tensorflow/ir/tf_executor_ops.td b/tensorflow/compiler/mlir/tensorflow/ir/tf_executor_ops.td index a6d89f06473..fe79b76c8b1 100644 --- a/tensorflow/compiler/mlir/tensorflow/ir/tf_executor_ops.td +++ b/tensorflow/compiler/mlir/tensorflow/ir/tf_executor_ops.td @@ -144,7 +144,7 @@ def TfExecutor_FetchOp : TfExecutor_Op<"fetch", ); let builders = [ - OpBuilderDAG<(ins), + OpBuilder<(ins), [{ build($_builder, $_state, {}); }]> @@ -229,7 +229,7 @@ def TfExecutor_YieldOp : TfExecutor_Op<"yield", ); let builders = [ - OpBuilderDAG<(ins), + OpBuilder<(ins), [{ build($_builder, $_state, {}); }]> @@ -460,7 +460,7 @@ def TfExecutor_NextIterationSourceOp : TfExecutor_Op<"NextIteration.Source", ); let builders = [ - OpBuilderDAG<(ins "Type":$result_type, + OpBuilder<(ins "Type":$result_type, CArg<"ArrayRef", "{}">:$attributes), [{ Type token_type = TokenType::get($_builder.getContext()); @@ -530,7 +530,7 @@ def TfExecutor_NextIterationSinkOp : TfExecutor_Op<"NextIteration.Sink", ); let builders = [ - OpBuilderDAG<(ins "Value":$token, "ArrayRef":$operands, + OpBuilder<(ins "Value":$token, "ArrayRef":$operands, CArg<"ArrayRef", "{}">:$attributes), [{ assert(operands.size() >= 1 && "tf_executor.NextIteration.Sink builder " @@ -618,7 +618,7 @@ def TfExecutor_ControlTriggerOp : TfExecutor_Op<"ControlTrigger", let hasCanonicalizer = 1; let builders = [ - OpBuilderDAG<(ins "ArrayRef":$operands, + OpBuilder<(ins "ArrayRef":$operands, CArg<"ArrayRef", "{}">:$attributes), [{ assert(operands.size() >= 1 && "tf_executor.ControlTrigger builder " diff --git a/tensorflow/compiler/mlir/tensorflow/ir/tf_generated_ops.td b/tensorflow/compiler/mlir/tensorflow/ir/tf_generated_ops.td index a7b28a83a86..5e73c6471df 100644 --- a/tensorflow/compiler/mlir/tensorflow/ir/tf_generated_ops.td +++ b/tensorflow/compiler/mlir/tensorflow/ir/tf_generated_ops.td @@ -2227,7 +2227,7 @@ of `data_format`, see below for details.}]>:$input, StringRef GetOptimalLayout(const RuntimeDevices& devices); LogicalResult UpdateDataFormat(StringRef data_format); // InferTypeOpInterface: - static bool isCompatibleReturnTypes(ArrayRef l, ArrayRef r) { + static bool isCompatibleReturnTypes(TypeRange l, TypeRange r) { return ArraysAreCastCompatible(l, r); } }]; @@ -2348,7 +2348,7 @@ out_channels]`. `in_channels` must match between `input` and `filter`.}]>:$filte let extraClassDeclaration = [{ // InferTypeOpInterface: - static bool isCompatibleReturnTypes(ArrayRef l, ArrayRef r) { + static bool isCompatibleReturnTypes(TypeRange l, TypeRange r) { return ArraysAreCastCompatible(l, r); } }]; @@ -3923,7 +3923,7 @@ tf.math.equal(x, y) ==> array([True, True]) TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>; let builders = [ - OpBuilderDAG<(ins "Value":$x, "Value":$y, + OpBuilder<(ins "Value":$x, "Value":$y, "BoolAttr":$incompatible_shape_error)> ]; @@ -4073,7 +4073,7 @@ dimension of size 1 added.}]>:$output TF_DerivedOperandTypeAttr Tdim = TF_DerivedOperandTypeAttr<1>; let builders = [ - OpBuilderDAG<(ins "Value":$condition, "Value":$dim)> + OpBuilder<(ins "Value":$condition, "Value":$dim)> ]; } @@ -4460,7 +4460,7 @@ Equivalent to np.full let hasFolder = 1; let builders = [ - OpBuilderDAG<(ins "Value":$dims, "Value":$value)> + OpBuilder<(ins "Value":$dims, "Value":$value)> ]; } @@ -8042,7 +8042,7 @@ retained with length 1. TF_DerivedOperandTypeAttr Tidx = TF_DerivedOperandTypeAttr<1>; let builders = [ - OpBuilderDAG<(ins "Value":$input, "Value":$reduction_indices, + OpBuilder<(ins "Value":$input, "Value":$reduction_indices, "BoolAttr":$keep_dims)> ]; } @@ -9060,7 +9060,7 @@ def TF_NotEqualOp : TF_Op<"NotEqual", [Commutative, NoSideEffect]> { TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>; let builders = [ - OpBuilderDAG<(ins "Value":$x, "Value":$y, + OpBuilder<(ins "Value":$x, "Value":$y, "BoolAttr":$incompatible_shape_error)> ]; @@ -9179,7 +9179,7 @@ output = TF_DerivedOperandTypeAttr TI = TF_DerivedOperandTypeAttr<0>; let builders = [ - OpBuilderDAG<(ins "Value":$indices, "Value":$depth, "Value":$on_value, + OpBuilder<(ins "Value":$indices, "Value":$depth, "Value":$on_value, "Value":$off_value, "IntegerAttr":$axis)> ]; @@ -10394,7 +10394,7 @@ tf.range(start, limit, delta) ==> [3, 6, 9, 12, 15] TF_DerivedOperandTypeAttr Tidx = TF_DerivedOperandTypeAttr<0>; let builders = [ - OpBuilderDAG<(ins "Value":$start, "Value":$limit, "Value":$delta)> + OpBuilder<(ins "Value":$start, "Value":$limit, "Value":$delta)> ]; } @@ -10447,7 +10447,7 @@ of the tensor. Rank is also known as "order", "degree", or "ndims." TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>; let builders = [ - OpBuilderDAG<(ins "Value":$input)> + OpBuilder<(ins "Value":$input)> ]; let hasFolder = 1; @@ -10796,7 +10796,7 @@ reshape(t, []) ==> 7 TF_DerivedOperandTypeAttr Tshape = TF_DerivedOperandTypeAttr<1>; let builders = [ - OpBuilderDAG<(ins "Value":$tensor, "Value":$shape)> + OpBuilder<(ins "Value":$tensor, "Value":$shape)> ]; let verifier = [{ @@ -13298,7 +13298,7 @@ def TF_SelectV2Op : TF_Op<"SelectV2", [NoSideEffect, ResultsBroadcastableShape]> TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<1>; let builders = [ - OpBuilderDAG<(ins "Value":$condition, "Value":$e, "Value":$t)> + OpBuilder<(ins "Value":$condition, "Value":$e, "Value":$t)> ]; } @@ -13461,7 +13461,7 @@ shape(t) ==> [2, 2, 3] }]; let builders = [ - OpBuilderDAG<(ins "Value":$input, "BoolAttr":$use32Bit)> + OpBuilder<(ins "Value":$input, "BoolAttr":$use32Bit)> ]; let hasFolder = 1; @@ -14137,7 +14137,7 @@ regular convolution.}]>:$paddings let verifier = [{ return Verify(*this); }]; let extraClassDeclaration = [{ - static bool isCompatibleReturnTypes(ArrayRef l, ArrayRef r) { + static bool isCompatibleReturnTypes(TypeRange l, TypeRange r) { return ArraysAreCastCompatible(l, r); } }]; @@ -15608,7 +15608,7 @@ retained with length 1. TF_DerivedOperandTypeAttr Tidx = TF_DerivedOperandTypeAttr<1>; let builders = [ - OpBuilderDAG<(ins "Value":$input, "Value":$reduction_indices, + OpBuilder<(ins "Value":$input, "Value":$reduction_indices, "BoolAttr":$keep_dims)> ]; @@ -16801,7 +16801,7 @@ to the indices.}]>:$output let verifier = [{ return Verify(*this); }]; let builders = [ - OpBuilderDAG<(ins "Value":$tensor, "Value":$indices, "Value":$updates), + OpBuilder<(ins "Value":$tensor, "Value":$indices, "Value":$updates), [{build($_builder, $_state, tensor.getType(), tensor, indices, updates);}]> ]; } @@ -16999,7 +16999,7 @@ The output `y` has the same rank as `x`. The shapes of `x` and `y` satisfy: TF_DerivedOperandTypeAttr Tperm = TF_DerivedOperandTypeAttr<1>; let builders = [ - OpBuilderDAG<(ins "Value":$x, "Value":$perm)> + OpBuilder<(ins "Value":$x, "Value":$perm)> ]; let verifier = [{ @@ -17670,7 +17670,7 @@ for binary operators. let extraClassDeclaration = [{ // InferTypeOpInterface: - static bool isCompatibleReturnTypes(ArrayRef l, ArrayRef r) { + static bool isCompatibleReturnTypes(TypeRange l, TypeRange r) { return ArraysAreCastCompatible(l, r); } }]; diff --git a/tensorflow/compiler/mlir/tensorflow/ir/tf_op_base.td b/tensorflow/compiler/mlir/tensorflow/ir/tf_op_base.td index 9fae3ceb710..96a108b5689 100644 --- a/tensorflow/compiler/mlir/tensorflow/ir/tf_op_base.td +++ b/tensorflow/compiler/mlir/tensorflow/ir/tf_op_base.td @@ -579,8 +579,8 @@ def TF_IntTypeAttr : TypeAttrBase<"IntegerType", "integer type"> { // Mixin class defining a builder for binary ops supporting broadcast // behavior. The result type has the same element type as both operands. class WithBroadcastableBinOpBuilder { - list builders = [ - OpBuilderDAG<(ins "Value":$x, "Value":$y), + list builders = [ + OpBuilder<(ins "Value":$x, "Value":$y), [{ auto resultType = OpTrait::util::getBroadcastedType(x.getType(), y.getType()); @@ -593,8 +593,8 @@ class WithBroadcastableBinOpBuilder { // Mixin class defining a builder for comparison ops supporting broadcast // behavior. The result type has bool element type. class WithBroadcastableCmpOpBuilder { - list builders = [ - OpBuilderDAG<(ins "Value":$x, "Value":$y), + list builders = [ + OpBuilder<(ins "Value":$x, "Value":$y), [{ Type resultType; if (x.getType().isa() || diff --git a/tensorflow/compiler/mlir/tensorflow/ir/tf_ops.td b/tensorflow/compiler/mlir/tensorflow/ir/tf_ops.td index 4d48d61503a..ef96e43c61f 100644 --- a/tensorflow/compiler/mlir/tensorflow/ir/tf_ops.td +++ b/tensorflow/compiler/mlir/tensorflow/ir/tf_ops.td @@ -203,14 +203,14 @@ def TF_ConstOp : TF_Op<"Const", [ConstantLike, NoSideEffect, TF_DerivedResultTypeAttr dtype = TF_DerivedResultTypeAttr<0>; let builders = [ - OpBuilderDAG<(ins "Attribute":$value)>, - OpBuilderDAG<(ins "Type":$type, "Attribute":$value)>, + OpBuilder<(ins "Attribute":$value)>, + OpBuilder<(ins "Type":$type, "Attribute":$value)>, ]; let hasFolder = 1; let extraClassDeclaration = [{ - static bool isCompatibleReturnTypes(ArrayRef l, ArrayRef r) { + static bool isCompatibleReturnTypes(TypeRange l, TypeRange r) { return BroadcastCompatible(l, r); } }]; @@ -410,7 +410,7 @@ else_branch: A region that computes the outputs of the op if cond = false. }]; let builders = [ - OpBuilderDAG<(ins "TypeRange":$resultTypes, "ValueRange":$operands, + OpBuilder<(ins "TypeRange":$resultTypes, "ValueRange":$operands, "llvm::ArrayRef<::mlir::NamedAttribute>":$attributes, "unsigned":$numRegions), [{ @@ -1213,7 +1213,7 @@ as true/false for a branch condition. TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>; let builders = [ - OpBuilderDAG<(ins "Value":$value), + OpBuilder<(ins "Value":$value), [{ build($_builder, $_state, RankedTensorType::get({}, $_builder.getI1Type()), value); diff --git a/tensorflow/compiler/mlir/tensorflow/ir/tf_types.cc b/tensorflow/compiler/mlir/tensorflow/ir/tf_types.cc index 0b21b86029f..cc7ce6b39dc 100644 --- a/tensorflow/compiler/mlir/tensorflow/ir/tf_types.cc +++ b/tensorflow/compiler/mlir/tensorflow/ir/tf_types.cc @@ -198,7 +198,7 @@ ArrayRef TensorFlowTypeWithSubtype::GetSubtypes() { // TODO(jpienaar): BroadcastCompatible and HasCompatibleElementTypes have // similar structure that could be extracted into helper method. -bool BroadcastCompatible(ArrayRef lhs, ArrayRef rhs) { +bool BroadcastCompatible(TypeRange lhs, TypeRange rhs) { if (lhs.size() != rhs.size()) return false; for (auto types : llvm::zip(lhs, rhs)) { // Drop ref types because they don't affect broadcast compatibility. E.g., @@ -349,7 +349,7 @@ bool HasCompatibleElementTypes(Type lhs, Type rhs, return GetCastCompatibleType(lhs, rhs, may_ignore_ref_type_lhs) != nullptr; } -bool AreCastCompatible(ArrayRef types) { +bool AreCastCompatible(TypeRange types) { Type common = types.front(); for (auto type : types.drop_front()) { Type refined_type = @@ -360,7 +360,7 @@ bool AreCastCompatible(ArrayRef types) { return true; } -bool ArraysAreCastCompatible(ArrayRef lhs, ArrayRef rhs) { +bool ArraysAreCastCompatible(TypeRange lhs, TypeRange rhs) { if (lhs.size() != rhs.size()) return false; for (auto pair : llvm::zip(lhs, rhs)) { auto lhs_i = std::get<0>(pair); diff --git a/tensorflow/compiler/mlir/tensorflow/ir/tf_types.h b/tensorflow/compiler/mlir/tensorflow/ir/tf_types.h index a9d58c7270a..57ff9dce272 100644 --- a/tensorflow/compiler/mlir/tensorflow/ir/tf_types.h +++ b/tensorflow/compiler/mlir/tensorflow/ir/tf_types.h @@ -287,7 +287,7 @@ mlir::Type GetCastCompatibleType(mlir::Type a, mlir::Type b, bool may_ignore_ref_type_a); // Returns whether two arrays of Type are broadcast compatible. -bool BroadcastCompatible(ArrayRef lhs, ArrayRef rhs); +bool BroadcastCompatible(TypeRange lhs, TypeRange rhs); // Returns whether the two elemental types are compatible. Shapes are compatible // if: @@ -305,11 +305,11 @@ bool HasCompatibleElementTypes(Type lhs, Type rhs, // another. In other words, a single run-time value is legal for both the types. // For example, tensor<*xf32>, tensor and tensor<3xf32> are cast // compatible. -bool AreCastCompatible(ArrayRef types); +bool AreCastCompatible(TypeRange types); // Returns true if corresponding elements of lhs and rhs AreCastCompatible and // lhs and rhs are the same length. -bool ArraysAreCastCompatible(ArrayRef lhs, ArrayRef rhs); +bool ArraysAreCastCompatible(TypeRange lhs, TypeRange rhs); // If `ty` is a tensor type and its element type has subtypes, then returns a // new type of same shape but dropped subtypes for the element type. diff --git a/tensorflow/compiler/mlir/tensorflow/transforms/constant_fold.cc b/tensorflow/compiler/mlir/tensorflow/transforms/constant_fold.cc index 3d694b49757..845a3c156af 100644 --- a/tensorflow/compiler/mlir/tensorflow/transforms/constant_fold.cc +++ b/tensorflow/compiler/mlir/tensorflow/transforms/constant_fold.cc @@ -80,7 +80,7 @@ LogicalResult ConstantFoldFallbackHook( // If any of the result types are variants, don't try to constant fold them. // This creates opaque variant constants which lose information and would // require "raising" later. - for (auto& type : inst->getResultTypes()) { + for (auto type : inst->getResultTypes()) { if (auto tensor_type = type.dyn_cast()) { if (tensor_type.getElementType().isa()) { return failure(); diff --git a/tensorflow/compiler/mlir/tensorflow/transforms/shape_inference.cc b/tensorflow/compiler/mlir/tensorflow/transforms/shape_inference.cc index fc53d17a93a..601f7f74fe1 100644 --- a/tensorflow/compiler/mlir/tensorflow/transforms/shape_inference.cc +++ b/tensorflow/compiler/mlir/tensorflow/transforms/shape_inference.cc @@ -890,7 +890,7 @@ bool ShapeInference::RefineTypeForPassThroughOperands(Operation* op, OperandRange operands, ResultRange results) { bool changed = false; - for (auto entry : zip(operands, results)) { + for (auto entry : llvm::zip(operands, results)) { Type operand_type = std::get<0>(entry).getType(); Value result = std::get<1>(entry); TensorType result_type = result.getType().cast(); @@ -925,7 +925,7 @@ bool ShapeInference::RefineShapeForPassThroughOps(Operation* op) { }; bool changed = false; - for (auto entry : zip(op->getOperands(), op->getResults())) { + for (auto entry : llvm::zip(op->getOperands(), op->getResults())) { TensorType operand_type = std::get<0>(entry).getType().cast(); Value result = std::get<1>(entry); TensorType result_type = result.getType().cast(); diff --git a/tensorflow/compiler/mlir/tfr/ir/tfr_ops.td b/tensorflow/compiler/mlir/tfr/ir/tfr_ops.td index 6971edc298f..3bd9efac2e2 100644 --- a/tensorflow/compiler/mlir/tfr/ir/tfr_ops.td +++ b/tensorflow/compiler/mlir/tfr/ir/tfr_ops.td @@ -267,7 +267,7 @@ def TFR_ConstOp : TFR_Op<"constant", [ConstantLike, NoSideEffect]> { let hasFolder = 1; let builders = [ - OpBuilderDAG<(ins "Attribute":$value), + OpBuilder<(ins "Attribute":$value), [{ auto* ctx = value.getContext(); $_state.addAttribute("value", value); @@ -425,7 +425,7 @@ def TFR_TFRFuncOp : TFR_Op<"func", [HasParent<"ModuleOp">, let skipDefaultBuilders = 1; let builders = [ - OpBuilderDAG<(ins "StringRef":$name, "FunctionType":$type, + OpBuilder<(ins "StringRef":$name, "FunctionType":$type, CArg<"ArrayRef", "{}">:$attrs)> ]; diff --git a/tensorflow/compiler/mlir/tools/kernel_gen/BUILD b/tensorflow/compiler/mlir/tools/kernel_gen/BUILD index bd5ca749d5d..b7cefd456fd 100644 --- a/tensorflow/compiler/mlir/tools/kernel_gen/BUILD +++ b/tensorflow/compiler/mlir/tools/kernel_gen/BUILD @@ -68,6 +68,7 @@ cc_library( "@llvm-project//mlir:GPUTransforms", "@llvm-project//mlir:IR", "@llvm-project//mlir:LLVMDialect", + "@llvm-project//mlir:LLVMToLLVMIRTranslation", "@llvm-project//mlir:LinalgOps", "@llvm-project//mlir:LinalgTransforms", "@llvm-project//mlir:NVVMDialect", @@ -86,7 +87,7 @@ cc_library( "@llvm-project//mlir:StandardOps", "@llvm-project//mlir:StandardOpsTransforms", "@llvm-project//mlir:Support", - "@llvm-project//mlir:TargetLLVMIR", + "@llvm-project//mlir:ToLLVMIRTranslation", "@llvm-project//mlir:Transforms", ], ) @@ -117,8 +118,8 @@ tf_cc_binary( "@llvm-project//llvm:X86Disassembler", # fixdeps: keep "@llvm-project//mlir:ExecutionEngineUtils", "@llvm-project//mlir:Pass", - "@llvm-project//mlir:TargetLLVMIR", - "@llvm-project//mlir:TargetLLVMIRModuleTranslation", + "@llvm-project//mlir:LLVMToLLVMIRTranslation", + "@llvm-project//mlir:ToLLVMIRTranslation", ] + if_llvm_system_z_available([ "@llvm-project//llvm:SystemZCodeGen", # fixdeps: keep ]) + if_llvm_aarch64_available([ diff --git a/tensorflow/compiler/mlir/tools/kernel_gen/ir/tf_framework_ops.td b/tensorflow/compiler/mlir/tools/kernel_gen/ir/tf_framework_ops.td index 67a4c753329..bef37bd65a9 100644 --- a/tensorflow/compiler/mlir/tools/kernel_gen/ir/tf_framework_ops.td +++ b/tensorflow/compiler/mlir/tools/kernel_gen/ir/tf_framework_ops.td @@ -80,12 +80,12 @@ def TFFramework_TFAllocOp : TFFramework_Op<"alloc", let results = (outs Res]>:$result); let builders = [ - OpBuilderDAG<(ins "MemRefType":$memref_type, "Value":$ctx), + OpBuilder<(ins "MemRefType":$memref_type, "Value":$ctx), [{ $_state.addOperands(ctx); $_state.types.push_back(memref_type); }]>, - OpBuilderDAG<(ins "MemRefType":$memref_type, "Value":$ctx, + OpBuilder<(ins "MemRefType":$memref_type, "Value":$ctx, "ValueRange":$dyn_sizes), [{ build($_builder, $_state, memref_type, ctx); diff --git a/tensorflow/compiler/mlir/tools/kernel_gen/kernel_creator.cc b/tensorflow/compiler/mlir/tools/kernel_gen/kernel_creator.cc index aa9a036f1e7..1900685b33c 100644 --- a/tensorflow/compiler/mlir/tools/kernel_gen/kernel_creator.cc +++ b/tensorflow/compiler/mlir/tools/kernel_gen/kernel_creator.cc @@ -31,8 +31,6 @@ limitations under the License. #include "mlir/Dialect/GPU/ParallelLoopMapper.h" // from @llvm-project #include "mlir/Dialect/GPU/Passes.h" // from @llvm-project #include "mlir/Dialect/LLVMIR/LLVMDialect.h" // from @llvm-project -#include "mlir/Dialect/LLVMIR/NVVMDialect.h" // from @llvm-project -#include "mlir/Dialect/LLVMIR/ROCDLDialect.h" // from @llvm-project #include "mlir/Dialect/Linalg/Passes.h" // from @llvm-project #include "mlir/Dialect/Linalg/Transforms/Transforms.h" // from @llvm-project #include "mlir/Dialect/SCF/Passes.h" // from @llvm-project @@ -46,7 +44,7 @@ limitations under the License. #include "mlir/Parser.h" // from @llvm-project #include "mlir/Pass/Pass.h" // from @llvm-project #include "mlir/Pass/PassManager.h" // from @llvm-project -#include "mlir/Target/LLVMIR.h" // from @llvm-project +#include "mlir/Target/LLVMIR/Dialect/LLVMIR/LLVMToLLVMIRTranslation.h" // from @llvm-project #include "mlir/Target/LLVMIR/Dialect/NVVM/NVVMToLLVMIRTranslation.h" // from @llvm-project #include "mlir/Target/LLVMIR/Dialect/ROCDL/ROCDLToLLVMIRTranslation.h" // from @llvm-project #include "mlir/Transforms/Bufferize.h" // from @llvm-project @@ -435,12 +433,9 @@ StatusOr GenerateKernelForTfCode( mlir::DialectRegistry registry; mlir::RegisterAllTensorFlowDialects(registry); registry.insert(); - registry.insert(); - registry.addDialectInterface(); - registry.addDialectInterface(); mlir::registerLLVMDialectTranslation(registry); + mlir::registerNVVMDialectTranslation(registry); + mlir::registerROCDLDialectTranslation(registry); context.appendDialectRegistry(registry); mlir::OwningModuleRef module = mlir::parseSourceString(tf_code, &context); diff --git a/tensorflow/compiler/mlir/tools/kernel_gen/tf_to_kernel.cc b/tensorflow/compiler/mlir/tools/kernel_gen/tf_to_kernel.cc index 10751f5421b..1a7f2bbf728 100644 --- a/tensorflow/compiler/mlir/tools/kernel_gen/tf_to_kernel.cc +++ b/tensorflow/compiler/mlir/tools/kernel_gen/tf_to_kernel.cc @@ -34,7 +34,7 @@ #include "llvm/Target/TargetMachine.h" #include "mlir/ExecutionEngine/OptUtils.h" // from @llvm-project #include "mlir/Pass/PassManager.h" // from @llvm-project -#include "mlir/Target/LLVMIR.h" // from @llvm-project +#include "mlir/Target/LLVMIR/Dialect/LLVMIR/LLVMToLLVMIRTranslation.h" // from @llvm-project #include "mlir/Target/LLVMIR/Export.h" // from @llvm-project #include "tensorflow/compiler/mlir/init_mlir.h" #include "tensorflow/compiler/mlir/tools/kernel_gen/kernel_creator.h" diff --git a/tensorflow/compiler/mlir/tools/kernel_gen/transforms/BUILD b/tensorflow/compiler/mlir/tools/kernel_gen/transforms/BUILD index a6341f00736..c6089af2de8 100644 --- a/tensorflow/compiler/mlir/tools/kernel_gen/transforms/BUILD +++ b/tensorflow/compiler/mlir/tools/kernel_gen/transforms/BUILD @@ -126,8 +126,10 @@ cc_library( "@llvm-project//mlir:StandardOps", "@llvm-project//mlir:StandardOpsTransforms", "@llvm-project//mlir:Support", - "@llvm-project//mlir:TargetLLVMIR", - "@llvm-project//mlir:TargetLLVMIRModuleTranslation", + "@llvm-project//mlir:LLVMToLLVMIRTranslation", + "@llvm-project//mlir:NVVMToLLVMIRTranslation", + "@llvm-project//mlir:ROCDLToLLVMIRTranslation", + "@llvm-project//mlir:ToLLVMIRTranslation", "@llvm-project//mlir:TensorDialect", "@llvm-project//mlir:TensorTransforms", "@llvm-project//mlir:Transforms", diff --git a/tensorflow/compiler/mlir/tools/kernel_gen/transforms/gpu_kernel_to_blob_pass.cc b/tensorflow/compiler/mlir/tools/kernel_gen/transforms/gpu_kernel_to_blob_pass.cc index 184f34f53c3..7e0ea0955dd 100644 --- a/tensorflow/compiler/mlir/tools/kernel_gen/transforms/gpu_kernel_to_blob_pass.cc +++ b/tensorflow/compiler/mlir/tools/kernel_gen/transforms/gpu_kernel_to_blob_pass.cc @@ -15,7 +15,6 @@ limitations under the License. #include "llvm/Transforms/Utils/Cloning.h" #include "mlir/Dialect/StandardOps/IR/Ops.h" // from @llvm-project -#include "mlir/Target/LLVMIR.h" // from @llvm-project #include "mlir/Target/LLVMIR/Export.h" // from @llvm-project #include "mlir/Transforms/DialectConversion.h" // from @llvm-project #include "tensorflow/compiler/mlir/hlo/include/mlir-hlo/Dialect/mhlo/IR/hlo_ops.h" diff --git a/tensorflow/compiler/xla/service/cpu/BUILD b/tensorflow/compiler/xla/service/cpu/BUILD index 60b7a66bf19..d1e2acb64db 100644 --- a/tensorflow/compiler/xla/service/cpu/BUILD +++ b/tensorflow/compiler/xla/service/cpu/BUILD @@ -1162,10 +1162,10 @@ cc_library( "@llvm-project//llvm:Linker", "@llvm-project//mlir:CFGTransforms", "@llvm-project//mlir:IR", + "@llvm-project//mlir:LLVMToLLVMIRTranslation", "@llvm-project//mlir:LinalgTransforms", "@llvm-project//mlir:Pass", - "@llvm-project//mlir:TargetLLVMIR", - "@llvm-project//mlir:TargetLLVMIRModuleTranslation", + "@llvm-project//mlir:ToLLVMIRTranslation", "@llvm-project//mlir:Transforms", "@llvm-project//mlir:VectorToLLVM", ], diff --git a/tensorflow/compiler/xla/service/cpu/mlir_emitter.cc b/tensorflow/compiler/xla/service/cpu/mlir_emitter.cc index 43a48a180ad..ddb5e27a8a9 100644 --- a/tensorflow/compiler/xla/service/cpu/mlir_emitter.cc +++ b/tensorflow/compiler/xla/service/cpu/mlir_emitter.cc @@ -23,7 +23,6 @@ limitations under the License. #include "mlir/IR/BuiltinOps.h" // from @llvm-project #include "mlir/Pass/Pass.h" // from @llvm-project #include "mlir/Pass/PassManager.h" // from @llvm-project -#include "mlir/Target/LLVMIR.h" // from @llvm-project #include "mlir/Target/LLVMIR/Export.h" // from @llvm-project #include "mlir/Transforms/Passes.h" // from @llvm-project #include "tensorflow/compiler/mlir/xla/hlo_utils.h" diff --git a/third_party/llvm/workspace.bzl b/third_party/llvm/workspace.bzl index 4b58aebaabe..8dd833c5d0d 100644 --- a/third_party/llvm/workspace.bzl +++ b/third_party/llvm/workspace.bzl @@ -4,8 +4,8 @@ load("//third_party:repo.bzl", "tf_http_archive") def repo(name): """Imports LLVM.""" - LLVM_COMMIT = "5d7e0a23c6f2051d4caf8f8c8821790c40b584be" - LLVM_SHA256 = "dc100f5eddb35b694454ecc8cbf654b238f2e2684784d33d40536500297e9212" + LLVM_COMMIT = "c907681b077c6acf95c3ddca6611639c29559e40" + LLVM_SHA256 = "2929f1730e9ba4fea489400d81e5da21a4e79c94fea9dd3fd12e042350ff19ea" tf_http_archive( name = name, diff --git a/third_party/mlir/BUILD b/third_party/mlir/BUILD index 529c81187c1..a096baac162 100644 --- a/third_party/mlir/BUILD +++ b/third_party/mlir/BUILD @@ -201,6 +201,7 @@ cc_library( "include/mlir-c/BuiltinTypes.h", "include/mlir-c/Diagnostics.h", "include/mlir-c/Dialect/Standard.h", + "include/mlir-c/ExecutionEngine.h", "include/mlir-c/IR.h", "include/mlir-c/IntegerSet.h", "include/mlir-c/Pass.h", @@ -219,6 +220,7 @@ cc_library( ], includes = ["include"], deps = [ + ":ConversionPassIncGen", ":IR", ":InferTypeOpInterface", ":Parser", @@ -229,6 +231,35 @@ cc_library( ], ) +cc_library( + name = "CAPIConversion", + srcs = ["lib/CAPI/Conversion/Passes.cpp"], + hdrs = ["include/mlir-c/Conversion.h"], + includes = ["include"], + deps = [ + ":CAPIIR", + ":ConversionPassIncGen", + ":ConversionPasses", + ":Pass", + ], +) + +cc_library( + name = "CAPIExecutionEngine", + srcs = ["lib/CAPI/ExecutionEngine/ExecutionEngine.cpp"], + hdrs = [ + "include/mlir-c/ExecutionEngine.h", + "include/mlir/CAPI/ExecutionEngine.h", + ], + includes = ["include"], + deps = [ + ":CAPIIR", + ":ExecutionEngine", + ":LLVMToLLVMIRTranslation", + "@llvm-project//llvm:Support", + ], +) + cc_library( name = "CAPITransforms", srcs = ["lib/CAPI/Transforms/Passes.cpp"], @@ -259,6 +290,7 @@ cc_library( deps = [ ":AllPassesAndDialectsNoRegistration", ":CAPIIR", + ":LLVMToLLVMIRTranslation", ], ) @@ -1130,6 +1162,14 @@ gentbl( "-gen-pass-decls -name Conversion", "include/mlir/Conversion/Passes.h.inc", ), + ( + "-gen-pass-capi-header --prefix Conversion", + "include/mlir/Conversion/Passes.capi.h.inc", + ), + ( + "-gen-pass-capi-impl --prefix Conversion", + "include/mlir/Conversion/Passes.capi.cpp.inc", + ), ], tblgen = ":mlir-tblgen", td_file = "include/mlir/Conversion/Passes.td", @@ -3535,7 +3575,7 @@ cc_library( ) cc_library( - name = "TargetLLVMIRModuleTranslation", + name = "ToLLVMIRTranslation", srcs = [ "lib/Target/LLVMIR/DebugTranslation.cpp", "lib/Target/LLVMIR/DebugTranslation.h", @@ -3573,7 +3613,7 @@ cc_library( ":LLVMAVX512", ":LLVMAVX512ConversionIncGen", ":Support", - ":TargetLLVMIRModuleTranslation", + ":ToLLVMIRTranslation", "@llvm-project//llvm:Core", "@llvm-project//llvm:Support", ], @@ -3590,7 +3630,7 @@ cc_library( ":LLVMArmNeonConversionIncGen", ":LLVMArmNeonIncGen", ":Support", - ":TargetLLVMIRModuleTranslation", + ":ToLLVMIRTranslation", "@llvm-project//llvm:Core", "@llvm-project//llvm:Support", ], @@ -3606,7 +3646,7 @@ cc_library( ":LLVMArmSVE", ":LLVMArmSVEConversionIncGen", ":Support", - ":TargetLLVMIRModuleTranslation", + ":ToLLVMIRTranslation", "@llvm-project//llvm:Core", "@llvm-project//llvm:Support", ], @@ -3622,7 +3662,7 @@ cc_library( ":NVVMConversionIncGen", ":NVVMDialect", ":Support", - ":TargetLLVMIRModuleTranslation", + ":ToLLVMIRTranslation", "@llvm-project//llvm:Core", "@llvm-project//llvm:Support", ], @@ -3638,7 +3678,7 @@ cc_library( ":ROCDLConversionIncGen", ":ROCDLDialect", ":Support", - ":TargetLLVMIRModuleTranslation", + ":ToLLVMIRTranslation", "@llvm-project//llvm:Core", "@llvm-project//llvm:Support", ], @@ -3654,7 +3694,7 @@ cc_library( ":LLVMConversionIncGen", ":LLVMDialect", ":Support", - ":TargetLLVMIRModuleTranslation", + ":ToLLVMIRTranslation", "@llvm-project//llvm:Core", "@llvm-project//llvm:Support", ], @@ -3669,7 +3709,7 @@ cc_library( ":IR", ":OpenMPDialect", ":Support", - ":TargetLLVMIRModuleTranslation", + ":ToLLVMIRTranslation", "@llvm-project//llvm:Core", "@llvm-project//llvm:FrontendOpenMP", "@llvm-project//llvm:Support", @@ -3677,32 +3717,46 @@ cc_library( ) cc_library( - name = "TargetLLVMIR", + name = "AllToLLVMIRTranslations", + hdrs = ["include/mlir/Target/LLVMIR/Dialect/All.h"], + includes = ["include"], + deps = [ + ":LLVMAVX512ToLLVMIRTranslation", + ":LLVMArmNeonToLLVMIRTranslation", + ":LLVMArmSVEToLLVMIRTranslation", + ":LLVMToLLVMIRTranslation", + ":NVVMToLLVMIRTranslation", + ":OpenMPToLLVMIRTranslation", + ":ROCDLToLLVMIRTranslation", + ], +) + +cc_library( + name = "ToLLVMIRTranslationRegistration", + srcs = ["lib/Target/LLVMIR/ConvertToLLVMIR.cpp"], + includes = ["include"], + deps = [ + ":AllToLLVMIRTranslations", + ":IR", + ":ToLLVMIRTranslation", + ":Translation", + "@llvm-project//llvm:Core", + "@llvm-project//llvm:Support", + ], +) + +cc_library( + name = "FromLLVMIRTranslation", srcs = [ "lib/Target/LLVMIR/ConvertFromLLVMIR.cpp", - "lib/Target/LLVMIR/ConvertToLLVMIR.cpp", ], hdrs = ["include/mlir/Target/LLVMIR.h"], includes = ["include"], deps = [ ":IR", - ":LLVMAVX512", - ":LLVMAVX512ToLLVMIRTranslation", - ":LLVMArmNeon", - ":LLVMArmNeonToLLVMIRTranslation", - ":LLVMArmSVE", - ":LLVMArmSVEToLLVMIRTranslation", ":LLVMConversionIncGen", ":LLVMDialect", - ":LLVMToLLVMIRTranslation", - ":NVVMDialect", - ":NVVMToLLVMIRTranslation", - ":OpenMPDialect", - ":OpenMPToLLVMIRTranslation", - ":ROCDLDialect", - ":ROCDLToLLVMIRTranslation", ":Support", - ":TargetLLVMIRModuleTranslation", ":Translation", "@llvm-project//llvm:Core", "@llvm-project//llvm:IRReader", @@ -3722,11 +3776,11 @@ cc_library( ], includes = ["include"], deps = [ + ":AllToLLVMIRTranslations", ":IR", ":LLVMDialect", ":Support", - ":TargetLLVMIR", - ":TargetLLVMIRModuleTranslation", + ":ToLLVMIRTranslation", ":Translation", "@llvm-project//llvm:BitReader", "@llvm-project//llvm:BitWriter", @@ -3789,14 +3843,9 @@ cc_library( name = "AllTranslations", hdrs = ["include/mlir/InitAllTranslations.h"], deps = [ - ":LLVMAVX512ToLLVMIRTranslation", - ":LLVMArmNeonToLLVMIRTranslation", - ":LLVMArmSVEToLLVMIRTranslation", - ":LLVMToLLVMIRTranslation", - ":NVVMToLLVMIRTranslation", - ":ROCDLToLLVMIRTranslation", + ":FromLLVMIRTranslation", ":SPIRVTranslateRegistration", - ":TargetLLVMIR", + ":ToLLVMIRTranslationRegistration", ], ) @@ -4011,13 +4060,14 @@ cc_binary( srcs = ["tools/mlir-cpu-runner/mlir-cpu-runner.cpp"], linkopts = ["-ldl"], deps = [ + ":AllToLLVMIRTranslations", ":ExecutionEngineUtils", ":IR", ":LLVMDialect", + ":LLVMToLLVMIRTranslation", ":MlirJitRunner", - ":OpenMPDialect", ":OpenMPToLLVMIRTranslation", - ":TargetLLVMIR", + ":ToLLVMIRTranslation", "@llvm-project//llvm:AsmParser", "@llvm-project//llvm:Support", "@llvm-project//llvm:X86AsmParser", @@ -4094,13 +4144,14 @@ cc_binary( ":GPUTransforms", ":IR", ":LLVMDialect", + ":LLVMToLLVMIRTranslation", ":MlirJitRunner", ":NVVMDialect", ":NVVMToLLVMIRTranslation", ":Pass", ":StandardOps", - ":TargetLLVMIR", - ":TargetLLVMIRModuleTranslation", + ":StandardToLLVM", + ":ToLLVMIRTranslation", ":Transforms", "//devtools/build/runtime:get_runfiles_dir", "//third_party/gpus/cuda:cuda_headers", @@ -4120,6 +4171,7 @@ cc_binary( ":GPUToVulkanTransforms", ":GPUTransforms", ":LLVMDialect", + ":LLVMToLLVMIRTranslation", ":MlirJitRunner", ":Pass", ":SPIRVDialect", @@ -4127,8 +4179,7 @@ cc_binary( ":StandardOps", ":StandardToLLVM", ":StandardToSPIRV", - ":TargetLLVMIR", - ":TargetLLVMIRModuleTranslation", + ":ToLLVMIRTranslation", "@llvm-project//llvm:Support", ], ) @@ -4143,6 +4194,7 @@ cc_binary( ":GPUTransforms", ":IR", ":LLVMDialect", + ":LLVMToLLVMIRTranslation", ":MlirJitRunner", ":Pass", ":SPIRVConversion", @@ -4151,8 +4203,7 @@ cc_binary( ":SPIRVTransforms", ":StandardOps", ":StandardToLLVM", - ":TargetLLVMIR", - ":TargetLLVMIRModuleTranslation", + ":ToLLVMIRTranslation", "@llvm-project//llvm:Core", "@llvm-project//llvm:Linker", "@llvm-project//llvm:Support", @@ -4940,7 +4991,7 @@ cc_library( ":StandardOps", ":StandardToLLVM", ":Support", - ":TargetLLVMIRModuleTranslation", + ":ToLLVMIRTranslation", ":Transforms", ":VectorOps", "@llvm-project//llvm:Core", diff --git a/third_party/mlir/test.BUILD b/third_party/mlir/test.BUILD index ffc5aad0c7b..51803de6973 100644 --- a/third_party/mlir/test.BUILD +++ b/third_party/mlir/test.BUILD @@ -124,6 +124,27 @@ gentbl( test = True, ) +gentbl( + name = "TestAttrDefsIncGen", + strip_include_prefix = "lib/Dialect/Test", + tbl_outs = [ + ( + "-gen-attrdef-decls", + "lib/Dialect/Test/TestAttrDefs.h.inc", + ), + ( + "-gen-attrdef-defs", + "lib/Dialect/Test/TestAttrDefs.cpp.inc", + ), + ], + tblgen = "@llvm-project//mlir:mlir-tblgen", + td_file = "lib/Dialect/Test/TestAttrDefs.td", + td_srcs = [ + ":TestOpTdFiles", + ], + test = True, +) + gentbl( name = "TestTypeDefsIncGen", strip_include_prefix = "lib/Dialect/Test", @@ -148,6 +169,7 @@ gentbl( cc_library( name = "TestDialect", srcs = [ + "lib/Dialect/Test/TestAttributes.cpp", "lib/Dialect/Test/TestDialect.cpp", "lib/Dialect/Test/TestInterfaces.cpp", "lib/Dialect/Test/TestPatterns.cpp", @@ -155,6 +177,7 @@ cc_library( "lib/Dialect/Test/TestTypes.cpp", ], hdrs = [ + "lib/Dialect/Test/TestAttributes.h", "lib/Dialect/Test/TestDialect.h", "lib/Dialect/Test/TestInterfaces.h", "lib/Dialect/Test/TestTypes.h", @@ -163,6 +186,7 @@ cc_library( "lib/Dialect/Test", ], deps = [ + ":TestAttrDefsIncGen", ":TestInterfacesIncGen", ":TestOpsIncGen", ":TestTypeDefsIncGen", @@ -262,6 +286,7 @@ cc_library( "@llvm-project//mlir:GPUTransforms", "@llvm-project//mlir:IR", "@llvm-project//mlir:LLVMDialect", + "@llvm-project//mlir:LLVMToLLVMIRTranslation", "@llvm-project//mlir:LLVMTransforms", "@llvm-project//mlir:LinalgOps", "@llvm-project//mlir:LinalgTransforms", @@ -277,8 +302,7 @@ cc_library( "@llvm-project//mlir:StandardOps", "@llvm-project//mlir:StandardOpsTransforms", "@llvm-project//mlir:Support", - "@llvm-project//mlir:TargetLLVMIR", - "@llvm-project//mlir:TargetLLVMIRModuleTranslation", + "@llvm-project//mlir:ToLLVMIRTranslation", "@llvm-project//mlir:TransformUtils", "@llvm-project//mlir:Transforms", "@llvm-project//mlir:VectorOps",