Updates LLVM usage to match
[c907681b077c](https://github.com/llvm/llvm-project/commit/c907681b077c)

PiperOrigin-RevId: 360891677
Change-Id: Iaaf934b70f5b064ad3796d08abd887c53e3b1d41
This commit is contained in:
Benjamin Kramer 2021-03-04 05:21:32 -08:00 committed by TensorFlower Gardener
parent a5629c8368
commit 503babe90b
27 changed files with 224 additions and 157 deletions

View File

@ -90,7 +90,7 @@ class HLOClient_BroadcastBinaryElementwiseOp<
);
let builders = [
OpBuilderDAG<(ins "Value":$left, "Value":$right,
OpBuilder<(ins "Value":$left, "Value":$right,
"DenseIntElementsAttr":$broadcast_dimensions)>];
let results = (outs HLO_Tensor);
@ -673,7 +673,7 @@ def HLOClient_BroadcastCompareOp : HLOClient_BroadcastBinaryElementwiseOp<
let results = (outs HLO_PredTensor);
let builders = [
OpBuilderDAG<(ins "Value":$lhs, "Value":$rhs,
OpBuilder<(ins "Value":$lhs, "Value":$rhs,
"DenseIntElementsAttr":$broadcast_dimensions,
"StringAttr":$comparison_direction, CArg<"StringAttr", "{}">:$compare_type)>];
}

View File

@ -61,7 +61,7 @@ def HLO_ConstOp : HLO_Op<"constant",
);
let builders = [
OpBuilderDAG<(ins "Attribute":$value)>];
OpBuilder<(ins "Attribute":$value)>];
let assemblyFormat = "attr-dict $value";
@ -163,7 +163,7 @@ def HLO_ConvertOp : HLO_UnaryElementwiseOp<"convert",
[NoSideEffect, SameOperandsAndResultShape], HLO_Tensor>,
BASE_HLO_ConvertOp {
let builders = [
OpBuilderDAG<(ins "Value":$operand, "Type":$result_element_ty)>];
OpBuilder<(ins "Value":$operand, "Type":$result_element_ty)>];
let hasFolder = 1;
@ -624,7 +624,7 @@ def HLO_ReduceOp: HLO_Op<"reduce", [
let results = (outs Variadic<HLO_TensorOrTuple>);
let builders = [
OpBuilderDAG<(ins "ValueRange":$operands, "ValueRange":$init_values,
OpBuilder<(ins "ValueRange":$operands, "ValueRange":$init_values,
"DenseIntElementsAttr":$dimensions)>];
let extraClassDeclaration = [{
@ -662,7 +662,7 @@ def HLO_GetTupleElementOp: HLO_Op<"get_tuple_element", [NoSideEffect]>, BASE_HLO
let hasFolder = 1;
let builders = [
OpBuilderDAG<(ins "Value":$value, "int32_t":$index)>];
OpBuilder<(ins "Value":$value, "int32_t":$index)>];
}
def HLO_TupleOp : HLO_Op<"tuple", [NoSideEffect]>, BASE_HLO_TupleOp {
@ -670,7 +670,7 @@ def HLO_TupleOp : HLO_Op<"tuple", [NoSideEffect]>, BASE_HLO_TupleOp {
let results = (outs HLO_Tuple);
let builders = [
OpBuilderDAG<(ins "ValueRange":$values)>];
OpBuilder<(ins "ValueRange":$values)>];
let hasCanonicalizer = 1;
}
@ -690,7 +690,7 @@ def HLO_CompareOp: HLO_Op<"compare", [NoSideEffect, SameTypeOperands,
let hasFolder = 1;
let builders = [
OpBuilderDAG<(ins "Value":$lhs, "Value":$rhs,
OpBuilder<(ins "Value":$lhs, "Value":$rhs,
"StringAttr":$comparison_direction, CArg<"StringAttr", "{}">:$compare_type)>,
];
@ -889,7 +889,7 @@ def HLO_ConcatenateOp : HLO_Op<"concatenate",
let hasFolder = 1;
let extraClassDeclaration = [{
static bool isCompatibleReturnTypes(ArrayRef<Type> l, ArrayRef<Type> r) {
static bool isCompatibleReturnTypes(TypeRange l, TypeRange r) {
return succeeded(mlir::verifyCompatibleShapes(l, r));
}
}];
@ -1170,7 +1170,7 @@ def HLO_SortOp : HLO_Op<"sort", [RecursiveSideEffects,
let regions = (region SizedRegion<1>:$comparator);
let builders = [
OpBuilderDAG<(ins "ValueRange":$operands, CArg<"int64_t", "-1">:$dimension,
OpBuilder<(ins "ValueRange":$operands, CArg<"int64_t", "-1">:$dimension,
CArg<"bool", "false">:$is_stable)>];
// TODO(b/129422361): SortOp has special conversion logic to HLO.

View File

@ -691,7 +691,7 @@ def FusionOp : LHLO_Op<"fusion", [SingleBlockImplicitTerminator<"TerminatorOp">]
let skipDefaultBuilders = 1;
let builders = [
OpBuilderDAG<(ins CArg<"ArrayRef<NamedAttribute>", "{}">:$attributes)>
OpBuilder<(ins CArg<"ArrayRef<NamedAttribute>", "{}">:$attributes)>
];
let extraClassDeclaration = [{
@ -740,7 +740,7 @@ def TerminatorOp :
Terminator operation for the LHLO dialect.
}];
let builders = [
OpBuilderDAG<(ins "ValueRange":$operands),
OpBuilder<(ins "ValueRange":$operands),
[{ build($_builder, $_state, llvm::None, operands, llvm::None); }]>];
}

View File

@ -1045,7 +1045,7 @@ LogicalResult Conv2DOp::inferReturnTypes(
return success();
}
bool Conv2DOp::isCompatibleReturnTypes(ArrayRef<Type> lhs, ArrayRef<Type> rhs) {
bool Conv2DOp::isCompatibleReturnTypes(TypeRange lhs, TypeRange rhs) {
if (lhs.size() != rhs.size() || lhs.size() != 1) return false;
if (failed(mlir::verifyCompatibleShape(lhs[0], rhs[0]))) return false;
return true;
@ -1917,7 +1917,7 @@ LogicalResult UnpackOp::inferReturnTypes(
return success();
}
bool UnpackOp::isCompatibleReturnTypes(ArrayRef<Type> lhs, ArrayRef<Type> rhs) {
bool UnpackOp::isCompatibleReturnTypes(TypeRange lhs, TypeRange rhs) {
if (lhs.size() != rhs.size()) return false;
for (auto pair : llvm::zip(lhs, rhs)) {
if (failed(
@ -2267,8 +2267,8 @@ LogicalResult UnidirectionalSequenceLSTMOp::inferReturnTypes(
return success();
}
bool UnidirectionalSequenceLSTMOp::isCompatibleReturnTypes(ArrayRef<Type> lhs,
ArrayRef<Type> rhs) {
bool UnidirectionalSequenceLSTMOp::isCompatibleReturnTypes(TypeRange lhs,
TypeRange rhs) {
if (lhs.size() != rhs.size() || lhs.size() != 1) return false;
if (failed(mlir::verifyCompatibleShape(lhs[0], rhs[0]))) return false;
return true;

View File

@ -428,7 +428,7 @@ def ComparisonOpSameElementTypeConstraint :
//===----------------------------------------------------------------------===//
def TFL_BroadcastableBinaryBuilder :
OpBuilderDAG<(ins "Value":$lhs, "Value":$rhs),
OpBuilder<(ins "Value":$lhs, "Value":$rhs),
[{
auto resultType =
OpTrait::util::getBroadcastedType(lhs.getType(), rhs.getType());
@ -439,7 +439,7 @@ def TFL_BroadcastableBinaryBuilder :
}]>;
def TFL_FusedBroadcastableBinaryBuilder :
OpBuilderDAG<(ins "Value":$lhs, "Value":$rhs,
OpBuilder<(ins "Value":$lhs, "Value":$rhs,
"StringAttr":$fusedActivationFunction),
[{
buildFusedBroadcastableBinOp(
@ -447,7 +447,7 @@ def TFL_FusedBroadcastableBinaryBuilder :
}]>;
def TFL_ComparisonBinaryBuilder :
OpBuilderDAG<(ins "Value":$lhs, "Value":$rhs),
OpBuilder<(ins "Value":$lhs, "Value":$rhs),
[{
buildComparisonBinOp(&$_builder, $_state, lhs, rhs);
}]>;
@ -818,7 +818,7 @@ def TFL_ConstOp : Op<TFL_Dialect, "pseudo_const", [ConstantLike, NoSideEffect,
let hasFolder = 1;
let builders = [
OpBuilderDAG<(ins "Attribute":$value),
OpBuilder<(ins "Attribute":$value),
[{
$_state.addAttribute("value", value);
$_state.addTypes(value.getType());
@ -843,7 +843,7 @@ def TFL_SparseConstOp : Op<TFL_Dialect, "pseudo_sparse_const", [
let results = (outs AnyTensor:$output);
let builders = [
OpBuilderDAG<(ins "Attribute":$value, "SparsityParameterAttr":$s_param,
OpBuilder<(ins "Attribute":$value, "SparsityParameterAttr":$s_param,
"Attribute":$compressed_data),
[{
$_state.addTypes(value.getType());
@ -881,7 +881,7 @@ def TFL_Conv2DOp : TFL_ConvOp<"conv_2d", "Convolution", 0,
std::vector<std::vector<int>> GetQuantizedBlockSize() { return {}; }
// Returns whether the return types are compatible.
static bool isCompatibleReturnTypes(ArrayRef<Type> l, ArrayRef<Type> r);
static bool isCompatibleReturnTypes(TypeRange l, TypeRange r);
}];
}
@ -1062,7 +1062,7 @@ def TFL_GatherOp : TFL_Op<"gather", [
let builders =
[
OpBuilderDAG<(ins "Value":$params, "Value":$indices, "IntegerAttr":$axis),
OpBuilder<(ins "Value":$params, "Value":$indices, "IntegerAttr":$axis),
[{ BuildGatherOp(&$_builder, $_state, params, indices, axis); }]>
];
@ -1399,7 +1399,7 @@ def TFL_NotEqualOp : TFL_Op<"not_equal", [
let builders =
[
OpBuilderDAG<(ins "Value":$lhs, "Value":$rhs),
OpBuilder<(ins "Value":$lhs, "Value":$rhs),
[{
buildComparisonBinOp(&$_builder, $_state, lhs, rhs);
}]>
@ -1901,7 +1901,7 @@ def TFL_LogisticOp: TFL_Op<"logistic", [
// non-quantization tablegen patterns. Currently, it is used by the
// elementwise-move reordering pattern in the optimize_patterns.td
let builders = [
OpBuilderDAG<(ins "Value":$input),
OpBuilder<(ins "Value":$input),
[{
$_state.addOperands({input});
$_state.addTypes(input.getType());
@ -2554,7 +2554,7 @@ def TFL_ReluOp: TFL_Op<"relu", [
// non-quantization tablegen patterns. Currently, it is used by the
// elementwise-move reordering pattern in the optimize_patterns.td
let builders = [
OpBuilderDAG<(ins "Value":$input),
OpBuilder<(ins "Value":$input),
[{
$_state.addOperands({input});
$_state.addTypes(input.getType());
@ -2582,7 +2582,7 @@ def TFL_Relu6Op: TFL_Op<"relu6", [
// non-quantization tablegen patterns. Currently, it is used by the
// elementwise-move reordering pattern in the optimize_patterns.td
let builders = [
OpBuilderDAG<(ins "Value":$input),
OpBuilder<(ins "Value":$input),
[{
$_state.addOperands({input});
$_state.addTypes(input.getType());
@ -2610,7 +2610,7 @@ def TFL_Relu1Op: TFL_Op<"relu_n1_to_1", [
// non-quantization tablegen patterns. Currently, it is used by the
// elementwise-move reordering pattern in the optimize_patterns.td
let builders = [
OpBuilderDAG<(ins "Value":$input),
OpBuilder<(ins "Value":$input),
[{
$_state.addOperands({input});
$_state.addTypes(input.getType());
@ -2795,7 +2795,7 @@ def TFL_SelectOp : TFL_Op<"select", [
// TODO(jpienaar): autogenerate this.
let builders = [
OpBuilderDAG<(ins "Value":$condition, "Value":$x, "Value":$y),
OpBuilder<(ins "Value":$condition, "Value":$x, "Value":$y),
[{
auto resultType = x.getType();
$_state.addOperands({condition, x, y});
@ -2832,7 +2832,7 @@ def TFL_SelectV2Op : TFL_Op<"select_v2", [
TFL_TensorOf<[F32, I1, I8, I16, I32, I64, QI8, QUI8, QI16, TFL_Quint8]>:$output);
let builders = [
OpBuilderDAG<(ins "Value":$cond, "Value":$x, "Value":$y),
OpBuilder<(ins "Value":$cond, "Value":$x, "Value":$y),
[{
BuildSelectV2Op(&$_builder, $_state, cond, x, y);
}]>];
@ -3009,7 +3009,7 @@ def TFL_TanhOp: TFL_Op<"tanh", [
// non-quantization tablegen patterns. Currently, it is used by the
// elementwise-move reordering pattern in the optimize_patterns.td
let builders = [
OpBuilderDAG<(ins "Value":$input),
OpBuilder<(ins "Value":$input),
[{
$_state.addOperands({input});
$_state.addTypes(input.getType());
@ -3083,7 +3083,7 @@ def TFL_TopKV2Op: TFL_Op<"topk_v2", [
TFL_I32Tensor:$indices);
let builders = [
OpBuilderDAG<(ins "Value":$input, "Value":$k),
OpBuilder<(ins "Value":$input, "Value":$k),
[{ BuildTopKOp(&$_builder, $_state, input, k); }]>];
let hasOptions = 1;
@ -3152,7 +3152,7 @@ def TFL_UnpackOp : TFL_Op<"unpack", [
);
let extraClassDeclaration = [{
static bool isCompatibleReturnTypes(ArrayRef<Type> l, ArrayRef<Type> r);
static bool isCompatibleReturnTypes(TypeRange l, TypeRange r);
}];
let hasOptions = 1;
@ -3628,7 +3628,7 @@ def TFL_QConstOp : Op<TFL_Dialect, "pseudo_qconst", [
let results = (outs TFL_TensorOf<[QUI8, QI8, QI16, QUI16, TFL_Quint8]>:$output);
let builders = [
OpBuilderDAG<(ins "TypeAttr":$qtype, "Attribute":$value),
OpBuilder<(ins "TypeAttr":$qtype, "Attribute":$value),
[{
$_state.addAttribute("qtype", qtype);
$_state.addAttribute("value", value);
@ -3657,7 +3657,7 @@ def TFL_SparseQConstOp : Op<TFL_Dialect, "pseudo_sparse_qconst", [
let results = (outs TFL_TensorOf<[QUI8, QI8, QI16, QUI16, TFL_Quint8]>:$output);
let builders = [
OpBuilderDAG<(ins "TypeAttr":$qtype, "Attribute":$value,
OpBuilder<(ins "TypeAttr":$qtype, "Attribute":$value,
"SparsityParameterAttr":$s_param, "Attribute":$compressed_data),
[{
$_state.addTypes(qtype.getValue());
@ -4046,7 +4046,7 @@ def TFL_UnidirectionalSequenceLSTMOp :
std::vector<int> GetStatefulOperands() { return {18, 19}; }
// Compatiable return types check
static bool isCompatibleReturnTypes(ArrayRef<Type> l, ArrayRef<Type> r);
static bool isCompatibleReturnTypes(TypeRange l, TypeRange r);
}];
}

View File

@ -179,8 +179,7 @@ LogicalResult Verify(ParallelExecuteOp op) {
// static
void ParallelExecuteOp::build(OpBuilder& builder, OperationState& state,
int num_regions,
llvm::ArrayRef<Type> output_types) {
int num_regions, TypeRange output_types) {
DCHECK_GE(num_regions, 2);
for (int i = 0; i < num_regions; ++i) {
Region* region = state.addRegion();
@ -203,10 +202,7 @@ Operation::result_range ParallelExecuteOp::GetRegionOutputs(
return_value_offset +=
GetRegionBlockWithIndex(region_id).getTerminator()->getNumOperands();
Operation::result_range region_results(getOperation(),
/*startIndex=*/return_value_offset,
/*count=*/num_region_results);
return region_results;
return getResults().slice(return_value_offset, num_region_results);
}
bool ParallelExecuteOp::RegionWrapsSingleOp(unsigned index) {

View File

@ -76,7 +76,7 @@ This op captures all needed live-in values.
}];
let builders = [
OpBuilderDAG<(ins "StringAttr":$device, "ArrayRef<Type>":$result_types),
OpBuilder<(ins "StringAttr":$device, "TypeRange":$result_types),
[{
$_state.addAttribute("device", device);
$_state.addTypes(result_types);
@ -98,7 +98,7 @@ The `tf_device.return` operation terminates and returns values from a
);
let builders = [
OpBuilderDAG<(ins),
OpBuilder<(ins),
[{
build($_builder, $_state, {});
}]>
@ -169,7 +169,7 @@ def TfDevice_ParallelExecuteOp : TfDevice_Op<"parallel_execute",
}];
let builders = [
OpBuilderDAG<(ins "int":$num_regions, "llvm::ArrayRef<Type>":$output_types)>,
OpBuilder<(ins "int":$num_regions, "TypeRange":$output_types)>,
];
let verifier = [{ return Verify(*this); }];
@ -294,11 +294,11 @@ For example:
}];
let builders = [
OpBuilderDAG<(ins "int":$n,
OpBuilder<(ins "int":$n,
"const llvm::SmallDenseMap<StringRef, llvm::SmallVector<StringRef, 4>>&":$devices,
"llvm::ArrayRef<std::pair<ValueRange, Type>>":$replicated_inputs,
"ValueRange":$packed_inputs, "TypeRange":$replica_output_types)>,
OpBuilderDAG<(ins "int":$n, "llvm::Optional<DictionaryAttr>":$devices,
OpBuilder<(ins "int":$n, "llvm::Optional<DictionaryAttr>":$devices,
"llvm::ArrayRef<std::pair<ValueRange, Type>>":$replicated_inputs,
"ValueRange":$packed_inputs, "TypeRange":$replica_output_types)>,
];
@ -330,7 +330,7 @@ used to form the cluster.
let regions = (region SizedRegion<1>:$body);
let builders = [
OpBuilderDAG<(ins "ArrayRef<Type>":$resultTypes),
OpBuilder<(ins "TypeRange":$resultTypes),
[{
build($_builder, $_state, resultTypes, mlir::StringAttr {});
}]>

View File

@ -144,7 +144,7 @@ def TfExecutor_FetchOp : TfExecutor_Op<"fetch",
);
let builders = [
OpBuilderDAG<(ins),
OpBuilder<(ins),
[{
build($_builder, $_state, {});
}]>
@ -229,7 +229,7 @@ def TfExecutor_YieldOp : TfExecutor_Op<"yield",
);
let builders = [
OpBuilderDAG<(ins),
OpBuilder<(ins),
[{
build($_builder, $_state, {});
}]>
@ -460,7 +460,7 @@ def TfExecutor_NextIterationSourceOp : TfExecutor_Op<"NextIteration.Source",
);
let builders = [
OpBuilderDAG<(ins "Type":$result_type,
OpBuilder<(ins "Type":$result_type,
CArg<"ArrayRef<NamedAttribute>", "{}">:$attributes),
[{
Type token_type = TokenType::get($_builder.getContext());
@ -530,7 +530,7 @@ def TfExecutor_NextIterationSinkOp : TfExecutor_Op<"NextIteration.Sink",
);
let builders = [
OpBuilderDAG<(ins "Value":$token, "ArrayRef<Value>":$operands,
OpBuilder<(ins "Value":$token, "ArrayRef<Value>":$operands,
CArg<"ArrayRef<NamedAttribute>", "{}">:$attributes),
[{
assert(operands.size() >= 1 && "tf_executor.NextIteration.Sink builder "
@ -618,7 +618,7 @@ def TfExecutor_ControlTriggerOp : TfExecutor_Op<"ControlTrigger",
let hasCanonicalizer = 1;
let builders = [
OpBuilderDAG<(ins "ArrayRef<Value>":$operands,
OpBuilder<(ins "ArrayRef<Value>":$operands,
CArg<"ArrayRef<NamedAttribute>", "{}">:$attributes),
[{
assert(operands.size() >= 1 && "tf_executor.ControlTrigger builder "

View File

@ -2227,7 +2227,7 @@ of `data_format`, see below for details.}]>:$input,
StringRef GetOptimalLayout(const RuntimeDevices& devices);
LogicalResult UpdateDataFormat(StringRef data_format);
// InferTypeOpInterface:
static bool isCompatibleReturnTypes(ArrayRef<Type> l, ArrayRef<Type> r) {
static bool isCompatibleReturnTypes(TypeRange l, TypeRange r) {
return ArraysAreCastCompatible(l, r);
}
}];
@ -2348,7 +2348,7 @@ out_channels]`. `in_channels` must match between `input` and `filter`.}]>:$filte
let extraClassDeclaration = [{
// InferTypeOpInterface:
static bool isCompatibleReturnTypes(ArrayRef<Type> l, ArrayRef<Type> r) {
static bool isCompatibleReturnTypes(TypeRange l, TypeRange r) {
return ArraysAreCastCompatible(l, r);
}
}];
@ -3923,7 +3923,7 @@ tf.math.equal(x, y) ==> array([True, True])
TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
let builders = [
OpBuilderDAG<(ins "Value":$x, "Value":$y,
OpBuilder<(ins "Value":$x, "Value":$y,
"BoolAttr":$incompatible_shape_error)>
];
@ -4073,7 +4073,7 @@ dimension of size 1 added.}]>:$output
TF_DerivedOperandTypeAttr Tdim = TF_DerivedOperandTypeAttr<1>;
let builders = [
OpBuilderDAG<(ins "Value":$condition, "Value":$dim)>
OpBuilder<(ins "Value":$condition, "Value":$dim)>
];
}
@ -4460,7 +4460,7 @@ Equivalent to np.full
let hasFolder = 1;
let builders = [
OpBuilderDAG<(ins "Value":$dims, "Value":$value)>
OpBuilder<(ins "Value":$dims, "Value":$value)>
];
}
@ -8042,7 +8042,7 @@ retained with length 1.
TF_DerivedOperandTypeAttr Tidx = TF_DerivedOperandTypeAttr<1>;
let builders = [
OpBuilderDAG<(ins "Value":$input, "Value":$reduction_indices,
OpBuilder<(ins "Value":$input, "Value":$reduction_indices,
"BoolAttr":$keep_dims)>
];
}
@ -9060,7 +9060,7 @@ def TF_NotEqualOp : TF_Op<"NotEqual", [Commutative, NoSideEffect]> {
TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
let builders = [
OpBuilderDAG<(ins "Value":$x, "Value":$y,
OpBuilder<(ins "Value":$x, "Value":$y,
"BoolAttr":$incompatible_shape_error)>
];
@ -9179,7 +9179,7 @@ output =
TF_DerivedOperandTypeAttr TI = TF_DerivedOperandTypeAttr<0>;
let builders = [
OpBuilderDAG<(ins "Value":$indices, "Value":$depth, "Value":$on_value,
OpBuilder<(ins "Value":$indices, "Value":$depth, "Value":$on_value,
"Value":$off_value, "IntegerAttr":$axis)>
];
@ -10394,7 +10394,7 @@ tf.range(start, limit, delta) ==> [3, 6, 9, 12, 15]
TF_DerivedOperandTypeAttr Tidx = TF_DerivedOperandTypeAttr<0>;
let builders = [
OpBuilderDAG<(ins "Value":$start, "Value":$limit, "Value":$delta)>
OpBuilder<(ins "Value":$start, "Value":$limit, "Value":$delta)>
];
}
@ -10447,7 +10447,7 @@ of the tensor. Rank is also known as "order", "degree", or "ndims."
TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
let builders = [
OpBuilderDAG<(ins "Value":$input)>
OpBuilder<(ins "Value":$input)>
];
let hasFolder = 1;
@ -10796,7 +10796,7 @@ reshape(t, []) ==> 7
TF_DerivedOperandTypeAttr Tshape = TF_DerivedOperandTypeAttr<1>;
let builders = [
OpBuilderDAG<(ins "Value":$tensor, "Value":$shape)>
OpBuilder<(ins "Value":$tensor, "Value":$shape)>
];
let verifier = [{
@ -13298,7 +13298,7 @@ def TF_SelectV2Op : TF_Op<"SelectV2", [NoSideEffect, ResultsBroadcastableShape]>
TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<1>;
let builders = [
OpBuilderDAG<(ins "Value":$condition, "Value":$e, "Value":$t)>
OpBuilder<(ins "Value":$condition, "Value":$e, "Value":$t)>
];
}
@ -13461,7 +13461,7 @@ shape(t) ==> [2, 2, 3]
}];
let builders = [
OpBuilderDAG<(ins "Value":$input, "BoolAttr":$use32Bit)>
OpBuilder<(ins "Value":$input, "BoolAttr":$use32Bit)>
];
let hasFolder = 1;
@ -14137,7 +14137,7 @@ regular convolution.}]>:$paddings
let verifier = [{ return Verify(*this); }];
let extraClassDeclaration = [{
static bool isCompatibleReturnTypes(ArrayRef<Type> l, ArrayRef<Type> r) {
static bool isCompatibleReturnTypes(TypeRange l, TypeRange r) {
return ArraysAreCastCompatible(l, r);
}
}];
@ -15608,7 +15608,7 @@ retained with length 1.
TF_DerivedOperandTypeAttr Tidx = TF_DerivedOperandTypeAttr<1>;
let builders = [
OpBuilderDAG<(ins "Value":$input, "Value":$reduction_indices,
OpBuilder<(ins "Value":$input, "Value":$reduction_indices,
"BoolAttr":$keep_dims)>
];
@ -16801,7 +16801,7 @@ to the indices.}]>:$output
let verifier = [{ return Verify(*this); }];
let builders = [
OpBuilderDAG<(ins "Value":$tensor, "Value":$indices, "Value":$updates),
OpBuilder<(ins "Value":$tensor, "Value":$indices, "Value":$updates),
[{build($_builder, $_state, tensor.getType(), tensor, indices, updates);}]>
];
}
@ -16999,7 +16999,7 @@ The output `y` has the same rank as `x`. The shapes of `x` and `y` satisfy:
TF_DerivedOperandTypeAttr Tperm = TF_DerivedOperandTypeAttr<1>;
let builders = [
OpBuilderDAG<(ins "Value":$x, "Value":$perm)>
OpBuilder<(ins "Value":$x, "Value":$perm)>
];
let verifier = [{
@ -17670,7 +17670,7 @@ for binary operators.
let extraClassDeclaration = [{
// InferTypeOpInterface:
static bool isCompatibleReturnTypes(ArrayRef<Type> l, ArrayRef<Type> r) {
static bool isCompatibleReturnTypes(TypeRange l, TypeRange r) {
return ArraysAreCastCompatible(l, r);
}
}];

View File

@ -579,8 +579,8 @@ def TF_IntTypeAttr : TypeAttrBase<"IntegerType", "integer type"> {
// Mixin class defining a builder for binary ops supporting broadcast
// behavior. The result type has the same element type as both operands.
class WithBroadcastableBinOpBuilder {
list<OpBuilderDAG> builders = [
OpBuilderDAG<(ins "Value":$x, "Value":$y),
list<OpBuilder> builders = [
OpBuilder<(ins "Value":$x, "Value":$y),
[{
auto resultType =
OpTrait::util::getBroadcastedType(x.getType(), y.getType());
@ -593,8 +593,8 @@ class WithBroadcastableBinOpBuilder {
// Mixin class defining a builder for comparison ops supporting broadcast
// behavior. The result type has bool element type.
class WithBroadcastableCmpOpBuilder {
list<OpBuilderDAG> builders = [
OpBuilderDAG<(ins "Value":$x, "Value":$y),
list<OpBuilder> builders = [
OpBuilder<(ins "Value":$x, "Value":$y),
[{
Type resultType;
if (x.getType().isa<UnrankedTensorType>() ||

View File

@ -203,14 +203,14 @@ def TF_ConstOp : TF_Op<"Const", [ConstantLike, NoSideEffect,
TF_DerivedResultTypeAttr dtype = TF_DerivedResultTypeAttr<0>;
let builders = [
OpBuilderDAG<(ins "Attribute":$value)>,
OpBuilderDAG<(ins "Type":$type, "Attribute":$value)>,
OpBuilder<(ins "Attribute":$value)>,
OpBuilder<(ins "Type":$type, "Attribute":$value)>,
];
let hasFolder = 1;
let extraClassDeclaration = [{
static bool isCompatibleReturnTypes(ArrayRef<Type> l, ArrayRef<Type> r) {
static bool isCompatibleReturnTypes(TypeRange l, TypeRange r) {
return BroadcastCompatible(l, r);
}
}];
@ -410,7 +410,7 @@ else_branch: A region that computes the outputs of the op if cond = false.
}];
let builders = [
OpBuilderDAG<(ins "TypeRange":$resultTypes, "ValueRange":$operands,
OpBuilder<(ins "TypeRange":$resultTypes, "ValueRange":$operands,
"llvm::ArrayRef<::mlir::NamedAttribute>":$attributes,
"unsigned":$numRegions),
[{
@ -1213,7 +1213,7 @@ as true/false for a branch condition.
TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
let builders = [
OpBuilderDAG<(ins "Value":$value),
OpBuilder<(ins "Value":$value),
[{
build($_builder, $_state, RankedTensorType::get({}, $_builder.getI1Type()),
value);

View File

@ -198,7 +198,7 @@ ArrayRef<TensorType> TensorFlowTypeWithSubtype::GetSubtypes() {
// TODO(jpienaar): BroadcastCompatible and HasCompatibleElementTypes have
// similar structure that could be extracted into helper method.
bool BroadcastCompatible(ArrayRef<Type> lhs, ArrayRef<Type> rhs) {
bool BroadcastCompatible(TypeRange lhs, TypeRange rhs) {
if (lhs.size() != rhs.size()) return false;
for (auto types : llvm::zip(lhs, rhs)) {
// Drop ref types because they don't affect broadcast compatibility. E.g.,
@ -349,7 +349,7 @@ bool HasCompatibleElementTypes(Type lhs, Type rhs,
return GetCastCompatibleType(lhs, rhs, may_ignore_ref_type_lhs) != nullptr;
}
bool AreCastCompatible(ArrayRef<Type> types) {
bool AreCastCompatible(TypeRange types) {
Type common = types.front();
for (auto type : types.drop_front()) {
Type refined_type =
@ -360,7 +360,7 @@ bool AreCastCompatible(ArrayRef<Type> types) {
return true;
}
bool ArraysAreCastCompatible(ArrayRef<Type> lhs, ArrayRef<Type> rhs) {
bool ArraysAreCastCompatible(TypeRange lhs, TypeRange rhs) {
if (lhs.size() != rhs.size()) return false;
for (auto pair : llvm::zip(lhs, rhs)) {
auto lhs_i = std::get<0>(pair);

View File

@ -287,7 +287,7 @@ mlir::Type GetCastCompatibleType(mlir::Type a, mlir::Type b,
bool may_ignore_ref_type_a);
// Returns whether two arrays of Type are broadcast compatible.
bool BroadcastCompatible(ArrayRef<Type> lhs, ArrayRef<Type> rhs);
bool BroadcastCompatible(TypeRange lhs, TypeRange rhs);
// Returns whether the two elemental types are compatible. Shapes are compatible
// if:
@ -305,11 +305,11 @@ bool HasCompatibleElementTypes(Type lhs, Type rhs,
// another. In other words, a single run-time value is legal for both the types.
// For example, tensor<*xf32>, tensor<?xf32> and tensor<3xf32> are cast
// compatible.
bool AreCastCompatible(ArrayRef<Type> types);
bool AreCastCompatible(TypeRange types);
// Returns true if corresponding elements of lhs and rhs AreCastCompatible and
// lhs and rhs are the same length.
bool ArraysAreCastCompatible(ArrayRef<Type> lhs, ArrayRef<Type> rhs);
bool ArraysAreCastCompatible(TypeRange lhs, TypeRange rhs);
// If `ty` is a tensor type and its element type has subtypes, then returns a
// new type of same shape but dropped subtypes for the element type.

View File

@ -80,7 +80,7 @@ LogicalResult ConstantFoldFallbackHook(
// If any of the result types are variants, don't try to constant fold them.
// This creates opaque variant constants which lose information and would
// require "raising" later.
for (auto& type : inst->getResultTypes()) {
for (auto type : inst->getResultTypes()) {
if (auto tensor_type = type.dyn_cast<TensorType>()) {
if (tensor_type.getElementType().isa<VariantType>()) {
return failure();

View File

@ -890,7 +890,7 @@ bool ShapeInference::RefineTypeForPassThroughOperands(Operation* op,
OperandRange operands,
ResultRange results) {
bool changed = false;
for (auto entry : zip(operands, results)) {
for (auto entry : llvm::zip(operands, results)) {
Type operand_type = std::get<0>(entry).getType();
Value result = std::get<1>(entry);
TensorType result_type = result.getType().cast<TensorType>();
@ -925,7 +925,7 @@ bool ShapeInference::RefineShapeForPassThroughOps(Operation* op) {
};
bool changed = false;
for (auto entry : zip(op->getOperands(), op->getResults())) {
for (auto entry : llvm::zip(op->getOperands(), op->getResults())) {
TensorType operand_type = std::get<0>(entry).getType().cast<TensorType>();
Value result = std::get<1>(entry);
TensorType result_type = result.getType().cast<TensorType>();

View File

@ -267,7 +267,7 @@ def TFR_ConstOp : TFR_Op<"constant", [ConstantLike, NoSideEffect]> {
let hasFolder = 1;
let builders = [
OpBuilderDAG<(ins "Attribute":$value),
OpBuilder<(ins "Attribute":$value),
[{
auto* ctx = value.getContext();
$_state.addAttribute("value", value);
@ -425,7 +425,7 @@ def TFR_TFRFuncOp : TFR_Op<"func", [HasParent<"ModuleOp">,
let skipDefaultBuilders = 1;
let builders = [
OpBuilderDAG<(ins "StringRef":$name, "FunctionType":$type,
OpBuilder<(ins "StringRef":$name, "FunctionType":$type,
CArg<"ArrayRef<NamedAttribute>", "{}">:$attrs)>
];

View File

@ -68,6 +68,7 @@ cc_library(
"@llvm-project//mlir:GPUTransforms",
"@llvm-project//mlir:IR",
"@llvm-project//mlir:LLVMDialect",
"@llvm-project//mlir:LLVMToLLVMIRTranslation",
"@llvm-project//mlir:LinalgOps",
"@llvm-project//mlir:LinalgTransforms",
"@llvm-project//mlir:NVVMDialect",
@ -86,7 +87,7 @@ cc_library(
"@llvm-project//mlir:StandardOps",
"@llvm-project//mlir:StandardOpsTransforms",
"@llvm-project//mlir:Support",
"@llvm-project//mlir:TargetLLVMIR",
"@llvm-project//mlir:ToLLVMIRTranslation",
"@llvm-project//mlir:Transforms",
],
)
@ -117,8 +118,8 @@ tf_cc_binary(
"@llvm-project//llvm:X86Disassembler", # fixdeps: keep
"@llvm-project//mlir:ExecutionEngineUtils",
"@llvm-project//mlir:Pass",
"@llvm-project//mlir:TargetLLVMIR",
"@llvm-project//mlir:TargetLLVMIRModuleTranslation",
"@llvm-project//mlir:LLVMToLLVMIRTranslation",
"@llvm-project//mlir:ToLLVMIRTranslation",
] + if_llvm_system_z_available([
"@llvm-project//llvm:SystemZCodeGen", # fixdeps: keep
]) + if_llvm_aarch64_available([

View File

@ -80,12 +80,12 @@ def TFFramework_TFAllocOp : TFFramework_Op<"alloc",
let results = (outs Res<AnyMemRef, "", [MemAlloc<DefaultResource>]>:$result);
let builders = [
OpBuilderDAG<(ins "MemRefType":$memref_type, "Value":$ctx),
OpBuilder<(ins "MemRefType":$memref_type, "Value":$ctx),
[{
$_state.addOperands(ctx);
$_state.types.push_back(memref_type);
}]>,
OpBuilderDAG<(ins "MemRefType":$memref_type, "Value":$ctx,
OpBuilder<(ins "MemRefType":$memref_type, "Value":$ctx,
"ValueRange":$dyn_sizes),
[{
build($_builder, $_state, memref_type, ctx);

View File

@ -31,8 +31,6 @@ limitations under the License.
#include "mlir/Dialect/GPU/ParallelLoopMapper.h" // from @llvm-project
#include "mlir/Dialect/GPU/Passes.h" // from @llvm-project
#include "mlir/Dialect/LLVMIR/LLVMDialect.h" // from @llvm-project
#include "mlir/Dialect/LLVMIR/NVVMDialect.h" // from @llvm-project
#include "mlir/Dialect/LLVMIR/ROCDLDialect.h" // from @llvm-project
#include "mlir/Dialect/Linalg/Passes.h" // from @llvm-project
#include "mlir/Dialect/Linalg/Transforms/Transforms.h" // from @llvm-project
#include "mlir/Dialect/SCF/Passes.h" // from @llvm-project
@ -46,7 +44,7 @@ limitations under the License.
#include "mlir/Parser.h" // from @llvm-project
#include "mlir/Pass/Pass.h" // from @llvm-project
#include "mlir/Pass/PassManager.h" // from @llvm-project
#include "mlir/Target/LLVMIR.h" // from @llvm-project
#include "mlir/Target/LLVMIR/Dialect/LLVMIR/LLVMToLLVMIRTranslation.h" // from @llvm-project
#include "mlir/Target/LLVMIR/Dialect/NVVM/NVVMToLLVMIRTranslation.h" // from @llvm-project
#include "mlir/Target/LLVMIR/Dialect/ROCDL/ROCDLToLLVMIRTranslation.h" // from @llvm-project
#include "mlir/Transforms/Bufferize.h" // from @llvm-project
@ -435,12 +433,9 @@ StatusOr<mlir::OwningModuleRef> GenerateKernelForTfCode(
mlir::DialectRegistry registry;
mlir::RegisterAllTensorFlowDialects(registry);
registry.insert<mlir::chlo::HloClientDialect, mlir::mhlo::MhloDialect>();
registry.insert<mlir::NVVM::NVVMDialect, mlir::ROCDL::ROCDLDialect>();
registry.addDialectInterface<mlir::NVVM::NVVMDialect,
mlir::NVVMDialectLLVMIRTranslationInterface>();
registry.addDialectInterface<mlir::ROCDL::ROCDLDialect,
mlir::ROCDLDialectLLVMIRTranslationInterface>();
mlir::registerLLVMDialectTranslation(registry);
mlir::registerNVVMDialectTranslation(registry);
mlir::registerROCDLDialectTranslation(registry);
context.appendDialectRegistry(registry);
mlir::OwningModuleRef module = mlir::parseSourceString(tf_code, &context);

View File

@ -34,7 +34,7 @@
#include "llvm/Target/TargetMachine.h"
#include "mlir/ExecutionEngine/OptUtils.h" // from @llvm-project
#include "mlir/Pass/PassManager.h" // from @llvm-project
#include "mlir/Target/LLVMIR.h" // from @llvm-project
#include "mlir/Target/LLVMIR/Dialect/LLVMIR/LLVMToLLVMIRTranslation.h" // from @llvm-project
#include "mlir/Target/LLVMIR/Export.h" // from @llvm-project
#include "tensorflow/compiler/mlir/init_mlir.h"
#include "tensorflow/compiler/mlir/tools/kernel_gen/kernel_creator.h"

View File

@ -126,8 +126,10 @@ cc_library(
"@llvm-project//mlir:StandardOps",
"@llvm-project//mlir:StandardOpsTransforms",
"@llvm-project//mlir:Support",
"@llvm-project//mlir:TargetLLVMIR",
"@llvm-project//mlir:TargetLLVMIRModuleTranslation",
"@llvm-project//mlir:LLVMToLLVMIRTranslation",
"@llvm-project//mlir:NVVMToLLVMIRTranslation",
"@llvm-project//mlir:ROCDLToLLVMIRTranslation",
"@llvm-project//mlir:ToLLVMIRTranslation",
"@llvm-project//mlir:TensorDialect",
"@llvm-project//mlir:TensorTransforms",
"@llvm-project//mlir:Transforms",

View File

@ -15,7 +15,6 @@ limitations under the License.
#include "llvm/Transforms/Utils/Cloning.h"
#include "mlir/Dialect/StandardOps/IR/Ops.h" // from @llvm-project
#include "mlir/Target/LLVMIR.h" // from @llvm-project
#include "mlir/Target/LLVMIR/Export.h" // from @llvm-project
#include "mlir/Transforms/DialectConversion.h" // from @llvm-project
#include "tensorflow/compiler/mlir/hlo/include/mlir-hlo/Dialect/mhlo/IR/hlo_ops.h"

View File

@ -1162,10 +1162,10 @@ cc_library(
"@llvm-project//llvm:Linker",
"@llvm-project//mlir:CFGTransforms",
"@llvm-project//mlir:IR",
"@llvm-project//mlir:LLVMToLLVMIRTranslation",
"@llvm-project//mlir:LinalgTransforms",
"@llvm-project//mlir:Pass",
"@llvm-project//mlir:TargetLLVMIR",
"@llvm-project//mlir:TargetLLVMIRModuleTranslation",
"@llvm-project//mlir:ToLLVMIRTranslation",
"@llvm-project//mlir:Transforms",
"@llvm-project//mlir:VectorToLLVM",
],

View File

@ -23,7 +23,6 @@ limitations under the License.
#include "mlir/IR/BuiltinOps.h" // from @llvm-project
#include "mlir/Pass/Pass.h" // from @llvm-project
#include "mlir/Pass/PassManager.h" // from @llvm-project
#include "mlir/Target/LLVMIR.h" // from @llvm-project
#include "mlir/Target/LLVMIR/Export.h" // from @llvm-project
#include "mlir/Transforms/Passes.h" // from @llvm-project
#include "tensorflow/compiler/mlir/xla/hlo_utils.h"

View File

@ -4,8 +4,8 @@ load("//third_party:repo.bzl", "tf_http_archive")
def repo(name):
"""Imports LLVM."""
LLVM_COMMIT = "5d7e0a23c6f2051d4caf8f8c8821790c40b584be"
LLVM_SHA256 = "dc100f5eddb35b694454ecc8cbf654b238f2e2684784d33d40536500297e9212"
LLVM_COMMIT = "c907681b077c6acf95c3ddca6611639c29559e40"
LLVM_SHA256 = "2929f1730e9ba4fea489400d81e5da21a4e79c94fea9dd3fd12e042350ff19ea"
tf_http_archive(
name = name,

135
third_party/mlir/BUILD vendored
View File

@ -201,6 +201,7 @@ cc_library(
"include/mlir-c/BuiltinTypes.h",
"include/mlir-c/Diagnostics.h",
"include/mlir-c/Dialect/Standard.h",
"include/mlir-c/ExecutionEngine.h",
"include/mlir-c/IR.h",
"include/mlir-c/IntegerSet.h",
"include/mlir-c/Pass.h",
@ -219,6 +220,7 @@ cc_library(
],
includes = ["include"],
deps = [
":ConversionPassIncGen",
":IR",
":InferTypeOpInterface",
":Parser",
@ -229,6 +231,35 @@ cc_library(
],
)
cc_library(
name = "CAPIConversion",
srcs = ["lib/CAPI/Conversion/Passes.cpp"],
hdrs = ["include/mlir-c/Conversion.h"],
includes = ["include"],
deps = [
":CAPIIR",
":ConversionPassIncGen",
":ConversionPasses",
":Pass",
],
)
cc_library(
name = "CAPIExecutionEngine",
srcs = ["lib/CAPI/ExecutionEngine/ExecutionEngine.cpp"],
hdrs = [
"include/mlir-c/ExecutionEngine.h",
"include/mlir/CAPI/ExecutionEngine.h",
],
includes = ["include"],
deps = [
":CAPIIR",
":ExecutionEngine",
":LLVMToLLVMIRTranslation",
"@llvm-project//llvm:Support",
],
)
cc_library(
name = "CAPITransforms",
srcs = ["lib/CAPI/Transforms/Passes.cpp"],
@ -259,6 +290,7 @@ cc_library(
deps = [
":AllPassesAndDialectsNoRegistration",
":CAPIIR",
":LLVMToLLVMIRTranslation",
],
)
@ -1130,6 +1162,14 @@ gentbl(
"-gen-pass-decls -name Conversion",
"include/mlir/Conversion/Passes.h.inc",
),
(
"-gen-pass-capi-header --prefix Conversion",
"include/mlir/Conversion/Passes.capi.h.inc",
),
(
"-gen-pass-capi-impl --prefix Conversion",
"include/mlir/Conversion/Passes.capi.cpp.inc",
),
],
tblgen = ":mlir-tblgen",
td_file = "include/mlir/Conversion/Passes.td",
@ -3535,7 +3575,7 @@ cc_library(
)
cc_library(
name = "TargetLLVMIRModuleTranslation",
name = "ToLLVMIRTranslation",
srcs = [
"lib/Target/LLVMIR/DebugTranslation.cpp",
"lib/Target/LLVMIR/DebugTranslation.h",
@ -3573,7 +3613,7 @@ cc_library(
":LLVMAVX512",
":LLVMAVX512ConversionIncGen",
":Support",
":TargetLLVMIRModuleTranslation",
":ToLLVMIRTranslation",
"@llvm-project//llvm:Core",
"@llvm-project//llvm:Support",
],
@ -3590,7 +3630,7 @@ cc_library(
":LLVMArmNeonConversionIncGen",
":LLVMArmNeonIncGen",
":Support",
":TargetLLVMIRModuleTranslation",
":ToLLVMIRTranslation",
"@llvm-project//llvm:Core",
"@llvm-project//llvm:Support",
],
@ -3606,7 +3646,7 @@ cc_library(
":LLVMArmSVE",
":LLVMArmSVEConversionIncGen",
":Support",
":TargetLLVMIRModuleTranslation",
":ToLLVMIRTranslation",
"@llvm-project//llvm:Core",
"@llvm-project//llvm:Support",
],
@ -3622,7 +3662,7 @@ cc_library(
":NVVMConversionIncGen",
":NVVMDialect",
":Support",
":TargetLLVMIRModuleTranslation",
":ToLLVMIRTranslation",
"@llvm-project//llvm:Core",
"@llvm-project//llvm:Support",
],
@ -3638,7 +3678,7 @@ cc_library(
":ROCDLConversionIncGen",
":ROCDLDialect",
":Support",
":TargetLLVMIRModuleTranslation",
":ToLLVMIRTranslation",
"@llvm-project//llvm:Core",
"@llvm-project//llvm:Support",
],
@ -3654,7 +3694,7 @@ cc_library(
":LLVMConversionIncGen",
":LLVMDialect",
":Support",
":TargetLLVMIRModuleTranslation",
":ToLLVMIRTranslation",
"@llvm-project//llvm:Core",
"@llvm-project//llvm:Support",
],
@ -3669,7 +3709,7 @@ cc_library(
":IR",
":OpenMPDialect",
":Support",
":TargetLLVMIRModuleTranslation",
":ToLLVMIRTranslation",
"@llvm-project//llvm:Core",
"@llvm-project//llvm:FrontendOpenMP",
"@llvm-project//llvm:Support",
@ -3677,32 +3717,46 @@ cc_library(
)
cc_library(
name = "TargetLLVMIR",
name = "AllToLLVMIRTranslations",
hdrs = ["include/mlir/Target/LLVMIR/Dialect/All.h"],
includes = ["include"],
deps = [
":LLVMAVX512ToLLVMIRTranslation",
":LLVMArmNeonToLLVMIRTranslation",
":LLVMArmSVEToLLVMIRTranslation",
":LLVMToLLVMIRTranslation",
":NVVMToLLVMIRTranslation",
":OpenMPToLLVMIRTranslation",
":ROCDLToLLVMIRTranslation",
],
)
cc_library(
name = "ToLLVMIRTranslationRegistration",
srcs = ["lib/Target/LLVMIR/ConvertToLLVMIR.cpp"],
includes = ["include"],
deps = [
":AllToLLVMIRTranslations",
":IR",
":ToLLVMIRTranslation",
":Translation",
"@llvm-project//llvm:Core",
"@llvm-project//llvm:Support",
],
)
cc_library(
name = "FromLLVMIRTranslation",
srcs = [
"lib/Target/LLVMIR/ConvertFromLLVMIR.cpp",
"lib/Target/LLVMIR/ConvertToLLVMIR.cpp",
],
hdrs = ["include/mlir/Target/LLVMIR.h"],
includes = ["include"],
deps = [
":IR",
":LLVMAVX512",
":LLVMAVX512ToLLVMIRTranslation",
":LLVMArmNeon",
":LLVMArmNeonToLLVMIRTranslation",
":LLVMArmSVE",
":LLVMArmSVEToLLVMIRTranslation",
":LLVMConversionIncGen",
":LLVMDialect",
":LLVMToLLVMIRTranslation",
":NVVMDialect",
":NVVMToLLVMIRTranslation",
":OpenMPDialect",
":OpenMPToLLVMIRTranslation",
":ROCDLDialect",
":ROCDLToLLVMIRTranslation",
":Support",
":TargetLLVMIRModuleTranslation",
":Translation",
"@llvm-project//llvm:Core",
"@llvm-project//llvm:IRReader",
@ -3722,11 +3776,11 @@ cc_library(
],
includes = ["include"],
deps = [
":AllToLLVMIRTranslations",
":IR",
":LLVMDialect",
":Support",
":TargetLLVMIR",
":TargetLLVMIRModuleTranslation",
":ToLLVMIRTranslation",
":Translation",
"@llvm-project//llvm:BitReader",
"@llvm-project//llvm:BitWriter",
@ -3789,14 +3843,9 @@ cc_library(
name = "AllTranslations",
hdrs = ["include/mlir/InitAllTranslations.h"],
deps = [
":LLVMAVX512ToLLVMIRTranslation",
":LLVMArmNeonToLLVMIRTranslation",
":LLVMArmSVEToLLVMIRTranslation",
":LLVMToLLVMIRTranslation",
":NVVMToLLVMIRTranslation",
":ROCDLToLLVMIRTranslation",
":FromLLVMIRTranslation",
":SPIRVTranslateRegistration",
":TargetLLVMIR",
":ToLLVMIRTranslationRegistration",
],
)
@ -4011,13 +4060,14 @@ cc_binary(
srcs = ["tools/mlir-cpu-runner/mlir-cpu-runner.cpp"],
linkopts = ["-ldl"],
deps = [
":AllToLLVMIRTranslations",
":ExecutionEngineUtils",
":IR",
":LLVMDialect",
":LLVMToLLVMIRTranslation",
":MlirJitRunner",
":OpenMPDialect",
":OpenMPToLLVMIRTranslation",
":TargetLLVMIR",
":ToLLVMIRTranslation",
"@llvm-project//llvm:AsmParser",
"@llvm-project//llvm:Support",
"@llvm-project//llvm:X86AsmParser",
@ -4094,13 +4144,14 @@ cc_binary(
":GPUTransforms",
":IR",
":LLVMDialect",
":LLVMToLLVMIRTranslation",
":MlirJitRunner",
":NVVMDialect",
":NVVMToLLVMIRTranslation",
":Pass",
":StandardOps",
":TargetLLVMIR",
":TargetLLVMIRModuleTranslation",
":StandardToLLVM",
":ToLLVMIRTranslation",
":Transforms",
"//devtools/build/runtime:get_runfiles_dir",
"//third_party/gpus/cuda:cuda_headers",
@ -4120,6 +4171,7 @@ cc_binary(
":GPUToVulkanTransforms",
":GPUTransforms",
":LLVMDialect",
":LLVMToLLVMIRTranslation",
":MlirJitRunner",
":Pass",
":SPIRVDialect",
@ -4127,8 +4179,7 @@ cc_binary(
":StandardOps",
":StandardToLLVM",
":StandardToSPIRV",
":TargetLLVMIR",
":TargetLLVMIRModuleTranslation",
":ToLLVMIRTranslation",
"@llvm-project//llvm:Support",
],
)
@ -4143,6 +4194,7 @@ cc_binary(
":GPUTransforms",
":IR",
":LLVMDialect",
":LLVMToLLVMIRTranslation",
":MlirJitRunner",
":Pass",
":SPIRVConversion",
@ -4151,8 +4203,7 @@ cc_binary(
":SPIRVTransforms",
":StandardOps",
":StandardToLLVM",
":TargetLLVMIR",
":TargetLLVMIRModuleTranslation",
":ToLLVMIRTranslation",
"@llvm-project//llvm:Core",
"@llvm-project//llvm:Linker",
"@llvm-project//llvm:Support",
@ -4940,7 +4991,7 @@ cc_library(
":StandardOps",
":StandardToLLVM",
":Support",
":TargetLLVMIRModuleTranslation",
":ToLLVMIRTranslation",
":Transforms",
":VectorOps",
"@llvm-project//llvm:Core",

View File

@ -124,6 +124,27 @@ gentbl(
test = True,
)
gentbl(
name = "TestAttrDefsIncGen",
strip_include_prefix = "lib/Dialect/Test",
tbl_outs = [
(
"-gen-attrdef-decls",
"lib/Dialect/Test/TestAttrDefs.h.inc",
),
(
"-gen-attrdef-defs",
"lib/Dialect/Test/TestAttrDefs.cpp.inc",
),
],
tblgen = "@llvm-project//mlir:mlir-tblgen",
td_file = "lib/Dialect/Test/TestAttrDefs.td",
td_srcs = [
":TestOpTdFiles",
],
test = True,
)
gentbl(
name = "TestTypeDefsIncGen",
strip_include_prefix = "lib/Dialect/Test",
@ -148,6 +169,7 @@ gentbl(
cc_library(
name = "TestDialect",
srcs = [
"lib/Dialect/Test/TestAttributes.cpp",
"lib/Dialect/Test/TestDialect.cpp",
"lib/Dialect/Test/TestInterfaces.cpp",
"lib/Dialect/Test/TestPatterns.cpp",
@ -155,6 +177,7 @@ cc_library(
"lib/Dialect/Test/TestTypes.cpp",
],
hdrs = [
"lib/Dialect/Test/TestAttributes.h",
"lib/Dialect/Test/TestDialect.h",
"lib/Dialect/Test/TestInterfaces.h",
"lib/Dialect/Test/TestTypes.h",
@ -163,6 +186,7 @@ cc_library(
"lib/Dialect/Test",
],
deps = [
":TestAttrDefsIncGen",
":TestInterfacesIncGen",
":TestOpsIncGen",
":TestTypeDefsIncGen",
@ -262,6 +286,7 @@ cc_library(
"@llvm-project//mlir:GPUTransforms",
"@llvm-project//mlir:IR",
"@llvm-project//mlir:LLVMDialect",
"@llvm-project//mlir:LLVMToLLVMIRTranslation",
"@llvm-project//mlir:LLVMTransforms",
"@llvm-project//mlir:LinalgOps",
"@llvm-project//mlir:LinalgTransforms",
@ -277,8 +302,7 @@ cc_library(
"@llvm-project//mlir:StandardOps",
"@llvm-project//mlir:StandardOpsTransforms",
"@llvm-project//mlir:Support",
"@llvm-project//mlir:TargetLLVMIR",
"@llvm-project//mlir:TargetLLVMIRModuleTranslation",
"@llvm-project//mlir:ToLLVMIRTranslation",
"@llvm-project//mlir:TransformUtils",
"@llvm-project//mlir:Transforms",
"@llvm-project//mlir:VectorOps",