Integrate LLVM at llvm/llvm-project@52f0837778
Updates LLVM usage to match [52f0837778b6](https://github.com/llvm/llvm-project/commit/52f0837778b6) PiperOrigin-RevId: 330939173 Change-Id: I1ca97ad6189a8626b53db0c5a468f7ed43af904d
This commit is contained in:
parent
7021444afe
commit
c7be4e0be2
@ -45,7 +45,7 @@ struct StaticMemRefCastOpConverter
|
||||
return failure();
|
||||
// Create descriptor.
|
||||
auto desc = MemRefDescriptor::undef(rewriter, loc, llvmTargetDescriptorTy);
|
||||
Type llvmTargetElementTy = desc.getElementType();
|
||||
Type llvmTargetElementTy = desc.getElementPtrType();
|
||||
// Set allocated ptr.
|
||||
Value allocated = sourceMemRef.allocatedPtr(rewriter, loc);
|
||||
allocated =
|
||||
@ -96,7 +96,7 @@ struct DynamicMemRefCastOpConverter
|
||||
return failure();
|
||||
// Create descriptor.
|
||||
auto desc = MemRefDescriptor::undef(rewriter, loc, llvmTargetDescriptorTy);
|
||||
Type llvmTargetElementTy = desc.getElementType();
|
||||
Type llvmTargetElementTy = desc.getElementPtrType();
|
||||
// Set allocated ptr.
|
||||
Value allocated = sourceMemRef.allocatedPtr(rewriter, loc);
|
||||
allocated =
|
||||
|
@ -253,7 +253,7 @@ func @addScalarUnranked(%arg0: tensor<f32>, %arg1: tensor<*xf32>) -> tensor<*xf3
|
||||
// to a 1D tensor.
|
||||
// CHECK: %[[SHAPE_1:.*]] = shape.shape_of %[[ARG_1]] : tensor<*xf32>
|
||||
// CHECK: %[[NUM_ELEMENTS:.*]] = shape.num_elements %[[SHAPE_1]] : tensor<?xindex> -> index
|
||||
// CHECK: %[[SIZE_TENSOR:.*]] = tensor_from_elements(%[[NUM_ELEMENTS]]) : tensor<1xindex>
|
||||
// CHECK: %[[SIZE_TENSOR:.*]] = tensor_from_elements %[[NUM_ELEMENTS]] : tensor<1xindex>
|
||||
// CHECK: %[[RESHAPED:.*]] = "mhlo.dynamic_reshape"(%[[ARG_1]], %[[SIZE_TENSOR]]) : (tensor<*xf32>, tensor<1xindex>) -> tensor<?xf32>
|
||||
// The assuming region is part of the second stage of lowering
|
||||
// with ranked broadcasting logic.
|
||||
@ -288,7 +288,7 @@ func @addUnrankedScalar(%arg0: tensor<*xf32>, %arg1: tensor<f32>) -> tensor<*xf3
|
||||
// to a 1D tensor.
|
||||
// CHECK: %[[SHAPE_0:.*]] = shape.shape_of %[[ARG_0]] : tensor<*xf32>
|
||||
// CHECK: %[[NUM_ELEMENTS:.*]] = shape.num_elements %[[SHAPE_0]] : tensor<?xindex> -> index
|
||||
// CHECK: %[[SIZE_TENSOR:.*]] = tensor_from_elements(%[[NUM_ELEMENTS]]) : tensor<1xindex>
|
||||
// CHECK: %[[SIZE_TENSOR:.*]] = tensor_from_elements %[[NUM_ELEMENTS]] : tensor<1xindex>
|
||||
// CHECK: %[[RESHAPED:.*]] = "mhlo.dynamic_reshape"(%[[ARG_0]], %[[SIZE_TENSOR]]) : (tensor<*xf32>, tensor<1xindex>) -> tensor<?xf32>
|
||||
// The assuming region is part of the second stage of lowering
|
||||
// with ranked broadcasting logic.
|
||||
|
@ -170,7 +170,7 @@ func @dyn_broadcast(%operand: memref<?x?xf32>) {
|
||||
// BOTH-SAME: (%[[OPERAND:.*]]: memref<?x?xf32>)
|
||||
%tensor_operand = tensor_load %operand : memref<?x?xf32>
|
||||
%c1 = constant 1 : i64
|
||||
%shape = tensor_from_elements(%c1, %c1, %c1) : tensor<3xi64>
|
||||
%shape = tensor_from_elements %c1, %c1, %c1 : tensor<3xi64>
|
||||
%tensor_result = "mhlo.dynamic_broadcast_in_dim"(%tensor_operand, %shape) {
|
||||
broadcast_dimensions = dense<[1, 2]> : tensor<2xi64>
|
||||
} : (tensor<?x?xf32>, tensor<3xi64>) -> tensor<?x?x?xf32>
|
||||
@ -416,7 +416,7 @@ func @add_dyn(%lhs: tensor<?x?xf32>, %rhs: tensor<?x?xf32>) {
|
||||
// BOTH: %[[C1:.*]] = constant 1 : index
|
||||
// BOTH: %[[DIM1:.*]] = dim %arg0, %[[C1]] : memref<?x?xf32>
|
||||
// BOTH: %[[IC1:.*]] = index_cast %[[DIM1]] : index to i64
|
||||
// BOTH: %[[SHAPE:.*]] = tensor_from_elements(%[[IC0]], %[[IC1]]) : tensor<2xi64>
|
||||
// BOTH: %[[SHAPE:.*]] = tensor_from_elements %[[IC0]], %[[IC1]] : tensor<2xi64>
|
||||
// BOTH: %[[C0_:.*]] = constant 0 : index
|
||||
// BOTH: %[[EE0:.*]] = extract_element %[[SHAPE]][%[[C0_]]] : tensor<2xi64>
|
||||
// BOTH: %[[ICS0:.*]] = index_cast %[[EE0]] : i64 to index
|
||||
@ -441,7 +441,7 @@ func @tanh_dyn(%arg0: tensor<?x?xf32>) {
|
||||
// BOTH: %[[C1:.*]] = constant 1 : index
|
||||
// BOTH: %[[DIM1:.*]] = dim %arg0, %[[C1]] : memref<?x?xf32>
|
||||
// BOTH: %[[IC1:.*]] = index_cast %[[DIM1]] : index to i64
|
||||
// BOTH: %[[SHAPE:.*]] = tensor_from_elements(%[[IC0]], %[[IC1]]) : tensor<2xi64>
|
||||
// BOTH: %[[SHAPE:.*]] = tensor_from_elements %[[IC0]], %[[IC1]] : tensor<2xi64>
|
||||
// BOTH: %[[C0_:.*]] = constant 0 : index
|
||||
// BOTH: %[[EE0:.*]] = extract_element %[[SHAPE]][%[[C0_]]] : tensor<2xi64>
|
||||
// BOTH: %[[ICS0:.*]] = index_cast %[[EE0]] : i64 to index
|
||||
|
@ -7,7 +7,7 @@ func @sqr_transform_result(%a: tensor<*xf32>) -> tensor<*xf32> {
|
||||
// Flatten operand shape.
|
||||
%shape = shape.shape_of %a : tensor<*xf32> -> tensor<?xindex>
|
||||
%num_elements = shape.num_elements %shape : tensor<?xindex> -> index
|
||||
%flat_shape = tensor_from_elements(%num_elements) : tensor<1xindex>
|
||||
%flat_shape = tensor_from_elements %num_elements : tensor<1xindex>
|
||||
%flat_a = "mhlo.dynamic_reshape"(%a, %flat_shape)
|
||||
: (tensor<*xf32>, tensor<1xindex>) -> tensor<?xf32>
|
||||
|
||||
@ -29,7 +29,7 @@ func @sqr_transform_result(%a: tensor<*xf32>) -> tensor<*xf32> {
|
||||
func @sqrt(%a: tensor<*xf32>) -> tensor<*xf32> {
|
||||
// CHECK-NEXT: %[[SHAPE:.*]] = shape.shape_of %[[A]] : tensor<*xf32> -> tensor<?xindex>
|
||||
// CHECK-NEXT: %[[NUM_ELEMENTS:.*]] = shape.num_elements %[[SHAPE]]
|
||||
// CHECK-NEXT: %[[FLAT_SHAPE:.*]] = tensor_from_elements(%[[NUM_ELEMENTS]]) : tensor<1xindex>
|
||||
// CHECK-NEXT: %[[FLAT_SHAPE:.*]] = tensor_from_elements %[[NUM_ELEMENTS]] : tensor<1xindex>
|
||||
// CHECK-NEXT: %[[FLAT_A:.*]] = "mhlo.dynamic_reshape"(%[[A]], %[[FLAT_SHAPE]]) : (tensor<*xf32>, tensor<1xindex>) -> tensor<?xf32>
|
||||
// CHECK-NEXT: %[[FLAT_B:.*]] = "mhlo.sqrt"(%[[FLAT_A]]) : (tensor<?xf32>) -> tensor<?xf32>
|
||||
// CHECK-NEXT: %[[B:.*]] = "mhlo.dynamic_reshape"(%[[FLAT_B]], %[[SHAPE]]) : (tensor<?xf32>, tensor<?xindex>) -> tensor<*xf32>
|
||||
@ -71,7 +71,7 @@ func @add_unranked(%a : tensor<*xf32>, %b : tensor<*xf32>) -> tensor<*xf32> {
|
||||
// CHECK: %[[SHAPE_B:.*]] = shape.shape_of %[[B]]
|
||||
// CHECK: %[[SHAPE:.*]] = shape.any %[[SHAPE_A]], %[[SHAPE_B]]
|
||||
// CHECK: %[[NUM_ELEMENTS:.*]] = shape.num_elements %[[SHAPE]]
|
||||
// CHECK: %[[FLAT_SHAPE:.*]] = tensor_from_elements(%[[NUM_ELEMENTS]]) : tensor<1xindex>
|
||||
// CHECK: %[[FLAT_SHAPE:.*]] = tensor_from_elements %[[NUM_ELEMENTS]] : tensor<1xindex>
|
||||
// CHECK: %[[FLAT_A:.*]] = "mhlo.dynamic_reshape"(%[[A]], %[[FLAT_SHAPE]]) : (tensor<*xf32>, tensor<1xindex>) -> tensor<?xf32>
|
||||
// CHECK: %[[FLAT_B:.*]] = "mhlo.dynamic_reshape"(%[[B]], %[[FLAT_SHAPE]]) : (tensor<*xf32>, tensor<1xindex>) -> tensor<?xf32>
|
||||
// CHECK: %[[FLAT_RESULT:.*]] = mhlo.add %[[FLAT_A]], %[[FLAT_B]] : tensor<?xf32>
|
||||
|
@ -109,7 +109,7 @@ func @batchNormInference_dynamic_shape(
|
||||
// CHECK-DAG: %[[C3:.*]] = constant 3 : index
|
||||
// CHECK-DAG: %[[EPS:.+]] = mhlo.constant dense<1.000000e-03> : tensor<f32>
|
||||
// CHECK-DAG: %[[DIM:.+]] = dim %[[VARIANCE]], %[[C0]] : tensor<?xf32>
|
||||
// CHECK-DAG: %[[TO_DIM_TENSOR:.+]] = tensor_from_elements(%[[DIM]]) : tensor<1xindex>
|
||||
// CHECK-DAG: %[[TO_DIM_TENSOR:.+]] = tensor_from_elements %[[DIM]] : tensor<1xindex>
|
||||
// CHECK-DAG: %[[EPS_BCAST:.+]] = "mhlo.dynamic_broadcast_in_dim"(%[[EPS]], %[[TO_DIM_TENSOR]]) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>, tensor<1xindex>) -> tensor<?xf32>
|
||||
// CHECK-DAG: %[[VARIANCE_EPS:.+]] = mhlo.add %[[VARIANCE]], %[[EPS_BCAST]] : tensor<?xf32>
|
||||
// CHECK-DAG: %[[STDDEV:.+]] = "mhlo.sqrt"(%[[VARIANCE_EPS]]) : (tensor<?xf32>) -> tensor<?xf32>
|
||||
@ -117,7 +117,7 @@ func @batchNormInference_dynamic_shape(
|
||||
// CHECK-DAG: %[[INPUT_DIM_1:.+]] = dim %[[X]], %[[C1]] : tensor<?x?x?x?xf32>
|
||||
// CHECK-DAG: %[[INPUT_DIM_2:.+]] = dim %[[X]], %[[C2]] : tensor<?x?x?x?xf32>
|
||||
// CHECK-DAG: %[[INPUT_DIM_3:.+]] = dim %[[X]], %[[C3]] : tensor<?x?x?x?xf32>
|
||||
// CHECK-DAG: %[[TO_INPUT_DIM_TENSOR:.+]] = tensor_from_elements(%[[INPUT_DIM_0]], %[[INPUT_DIM_1]], %[[INPUT_DIM_2]], %[[INPUT_DIM_3]]) : tensor<4xindex>
|
||||
// CHECK-DAG: %[[TO_INPUT_DIM_TENSOR:.+]] = tensor_from_elements %[[INPUT_DIM_0]], %[[INPUT_DIM_1]], %[[INPUT_DIM_2]], %[[INPUT_DIM_3]] : tensor<4xindex>
|
||||
// CHECK-DAG: %[[STDDEV_BCAST:.+]] = "mhlo.dynamic_broadcast_in_dim"(%[[STDDEV]], %[[TO_INPUT_DIM_TENSOR]]) {broadcast_dimensions = dense<1> : tensor<1xi64>} : (tensor<?xf32>, tensor<4xindex>) -> tensor<?x?x?x?xf32>
|
||||
// CHECK-DAG: %[[SCALE_BCAST:.+]] = "mhlo.dynamic_broadcast_in_dim"(%[[SCALE]], %[[TO_INPUT_DIM_TENSOR]]) {broadcast_dimensions = dense<1> : tensor<1xi64>} : (tensor<?xf32>, tensor<4xindex>) -> tensor<?x?x?x?xf32>
|
||||
// CHECK-DAG: %[[OFFSET_BCAST:.+]] = "mhlo.dynamic_broadcast_in_dim"(%[[OFFSET]], %[[TO_INPUT_DIM_TENSOR]]) {broadcast_dimensions = dense<1> : tensor<1xi64>} : (tensor<?xf32>, tensor<4xindex>) -> tensor<?x?x?x?xf32>
|
||||
|
@ -28,6 +28,7 @@ cc_library(
|
||||
"@llvm-project//llvm:Support",
|
||||
"@llvm-project//mlir:IR",
|
||||
"@llvm-project//mlir:Pass",
|
||||
"@llvm-project//mlir:SCFDialect",
|
||||
"@llvm-project//mlir:StandardOps",
|
||||
"@llvm-project//mlir:Support",
|
||||
"@llvm-project//mlir:Transforms",
|
||||
|
@ -19,10 +19,14 @@ limitations under the License.
|
||||
#include <memory>
|
||||
|
||||
#include "llvm/ADT/STLExtras.h"
|
||||
#include "llvm/ADT/SmallVector.h"
|
||||
#include "mlir/Dialect/SCF/SCF.h" // from @llvm-project
|
||||
#include "mlir/Dialect/StandardOps/IR/Ops.h" // from @llvm-project
|
||||
#include "mlir/IR/BlockAndValueMapping.h" // from @llvm-project
|
||||
#include "mlir/IR/Function.h" // from @llvm-project
|
||||
#include "mlir/IR/MLIRContext.h" // from @llvm-project
|
||||
#include "mlir/IR/Operation.h" // from @llvm-project
|
||||
#include "mlir/IR/OperationSupport.h" // from @llvm-project
|
||||
#include "mlir/IR/StandardTypes.h" // from @llvm-project
|
||||
#include "mlir/Pass/Pass.h" // from @llvm-project
|
||||
#include "mlir/Transforms/BufferPlacement.h" // from @llvm-project
|
||||
@ -58,6 +62,58 @@ class TensorFromElementsOpConverter
|
||||
}
|
||||
};
|
||||
|
||||
class DynamicTensorFromElementsOpConverter
|
||||
: public BufferAssignmentOpConversionPattern<DynamicTensorFromElementsOp> {
|
||||
public:
|
||||
using BufferAssignmentOpConversionPattern<
|
||||
DynamicTensorFromElementsOp>::BufferAssignmentOpConversionPattern;
|
||||
|
||||
LogicalResult matchAndRewrite(
|
||||
DynamicTensorFromElementsOp op, ArrayRef<Value> operands,
|
||||
ConversionPatternRewriter &rewriter) const final {
|
||||
// Allocate memory on stack.
|
||||
Location loc = op.getLoc();
|
||||
DynamicTensorFromElementsOp::Adaptor transformed(operands);
|
||||
RankedTensorType tensor_ty = op.getType().cast<RankedTensorType>();
|
||||
MemRefType memref_type =
|
||||
MemRefType::get(tensor_ty.getShape(), tensor_ty.getElementType());
|
||||
Value result = rewriter.create<AllocaOp>(loc, memref_type,
|
||||
transformed.dynamicExtents());
|
||||
|
||||
// Collect loop bounds.
|
||||
int64_t rank = tensor_ty.getRank();
|
||||
Value zero = rewriter.create<ConstantIndexOp>(loc, 0);
|
||||
Value one = rewriter.create<ConstantIndexOp>(loc, 1);
|
||||
SmallVector<Value, 4> lower_bounds(rank, zero);
|
||||
SmallVector<Value, 4> steps(rank, one);
|
||||
SmallVector<Value, 4> upper_bounds;
|
||||
int next_dynamic_index = 0;
|
||||
for (int i = 0; i < rank; i++) {
|
||||
Value ub = tensor_ty.isDynamicDim(i)
|
||||
? transformed.dynamicExtents()[next_dynamic_index++]
|
||||
: rewriter.create<ConstantIndexOp>(
|
||||
loc, memref_type.getDimSize(i));
|
||||
upper_bounds.push_back(ub);
|
||||
}
|
||||
|
||||
// Generate tensor elements.
|
||||
rewriter.create<scf::ParallelOp>(
|
||||
loc, lower_bounds, upper_bounds, steps,
|
||||
[&](OpBuilder &b, Location loc, ValueRange ivs) {
|
||||
BlockAndValueMapping mapping;
|
||||
mapping.map(op.body().getArguments(), ivs);
|
||||
for (auto &nested_op : op.getBody()->without_terminator())
|
||||
b.clone(nested_op, mapping);
|
||||
auto yield_op = llvm::cast<YieldOp>(op.getBody()->getTerminator());
|
||||
b.create<StoreOp>(loc, mapping.lookup(yield_op.value()), result, ivs);
|
||||
b.create<scf::YieldOp>(loc);
|
||||
});
|
||||
|
||||
rewriter.replaceOp(op, {result});
|
||||
return success();
|
||||
}
|
||||
};
|
||||
|
||||
class TensorLoadOpConversion
|
||||
: public BufferAssignmentOpConversionPattern<TensorLoadOp> {
|
||||
public:
|
||||
@ -99,8 +155,10 @@ class ExtractElementOpConversion
|
||||
void populateStandardBufferizePattern(MLIRContext *context,
|
||||
BufferAssignmentTypeConverter *converter,
|
||||
OwningRewritePatternList *patterns) {
|
||||
patterns->insert<ExtractElementOpConversion, TensorFromElementsOpConverter,
|
||||
TensorLoadOpConversion>(context, converter);
|
||||
patterns
|
||||
->insert<ExtractElementOpConversion, TensorFromElementsOpConverter,
|
||||
DynamicTensorFromElementsOpConverter, TensorLoadOpConversion>(
|
||||
context, converter);
|
||||
}
|
||||
|
||||
} // namespace transforms
|
||||
|
@ -75,15 +75,12 @@ struct BufferizePass : public BufferizePassBase<BufferizePass> {
|
||||
void runOnOperation() override {
|
||||
auto& context = getContext();
|
||||
ConversionTarget target(context);
|
||||
target.addLegalDialect<lmhlo::LmhloDialect>();
|
||||
target.addLegalDialect<StandardOpsDialect>();
|
||||
target.addLegalDialect<scf::SCFDialect>();
|
||||
target.addLegalOp<ModuleOp>();
|
||||
target.addLegalOp<ModuleTerminatorOp>();
|
||||
target.addLegalDialect<lmhlo::LmhloDialect, scf::SCFDialect,
|
||||
StandardOpsDialect>();
|
||||
target.addLegalOp<ModuleOp, ModuleTerminatorOp>();
|
||||
target.addIllegalDialect<mhlo::MhloDialect>();
|
||||
target.addIllegalOp<TensorFromElementsOp>();
|
||||
target.addIllegalOp<ExtractElementOp>();
|
||||
target.addIllegalOp<TensorLoadOp>();
|
||||
target.addIllegalOp<DynamicTensorFromElementsOp, ExtractElementOp,
|
||||
TensorFromElementsOp, TensorLoadOp, YieldOp>();
|
||||
target.addDynamicallyLegalOp<TensorStoreOp>([&](TensorStoreOp op) {
|
||||
return !op.tensor().getType().isa<UnrankedTensorType>();
|
||||
});
|
||||
|
@ -722,8 +722,8 @@ def tf_repositories(path_prefix = "", tf_repo_name = ""):
|
||||
)
|
||||
|
||||
# Check out LLVM and MLIR from llvm-project.
|
||||
LLVM_COMMIT = "4964d75d7078b932ac6b17c1990adaa6eada75c1"
|
||||
LLVM_SHA256 = "39b959e3395f2669024d2331d742ec1fd90cf1db8e9b428e750cefd571d77677"
|
||||
LLVM_COMMIT = "52f0837778b6f3b742b36c22b7c608535a52097b"
|
||||
LLVM_SHA256 = "75dbed152662185c937f540339dd4ecb3ffa39160bac364b3aa516168c48b657"
|
||||
LLVM_URLS = [
|
||||
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
|
||||
"https://github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
|
||||
|
Loading…
x
Reference in New Issue
Block a user