Updates LLVM usage to match
[7f6f9f4cf966](https://github.com/llvm/llvm-project/commit/7f6f9f4cf966)

PiperOrigin-RevId: 345745888
Change-Id: Id11822b32c3605a5d86c28aeacfec2f2c03da164
This commit is contained in:
A. Unique TensorFlower 2020-12-04 13:32:48 -08:00 committed by TensorFlower Gardener
parent 80aa374b54
commit 1dfcf5cdd7
3 changed files with 15 additions and 15 deletions

View File

@ -151,7 +151,7 @@ class TFAllocOpConverter : public ConvertToLLVMCallOpPattern<TFAllocOp> {
Value allocated_byte_ptr,
ArrayRef<Value> sizes) const {
auto memref_desc = MemRefDescriptor::undef(
rewriter, loc, typeConverter.convertType(memref_type));
rewriter, loc, typeConverter->convertType(memref_type));
// TF AllocateRaw returns aligned pointer => AllocatedPtr == AlignedPtr.
Value allocated_type_ptr = rewriter.create<LLVM::BitcastOp>(
@ -267,7 +267,7 @@ class ReportErrorOpConverter
// Insert function call.
FlatSymbolRefAttr tf_func_ref = getOrInsertTFFunction(rewriter, op);
Value error_code = rewriter.create<LLVM::ConstantOp>(
loc, typeConverter.convertType(rewriter.getI32Type()),
loc, typeConverter->convertType(rewriter.getI32Type()),
transformed.error_code());
rewriter.replaceOpWithNewOp<LLVM::CallOp>(
@ -279,7 +279,7 @@ class ReportErrorOpConverter
protected:
StringRef GetFuncName() const override { return kCInterfaceReportError; }
LLVMType GetFuncType() const override {
MLIRContext *ctx = &this->typeConverter.getContext();
MLIRContext *ctx = &getTypeConverter()->getContext();
auto i8_ptr_type = LLVM::LLVMType::getInt8Ty(ctx).getPointerTo();
auto i32_type = LLVM::LLVMType::getInt32Ty(ctx);
return LLVM::LLVMType::getFunctionTy(
@ -310,7 +310,7 @@ class ReportErrorOpConverter
Value globalPtr = builder.create<LLVM::AddressOfOp>(
loc, cast<LLVM::GlobalOp>(global_constant));
MLIRContext *ctx = &this->typeConverter.getContext();
MLIRContext *ctx = &getTypeConverter()->getContext();
Value c0 = builder.create<LLVM::ConstantOp>(
loc, LLVM::LLVMType::getInt64Ty(ctx),
builder.getIntegerAttr(builder.getIndexType(), 0));
@ -346,7 +346,7 @@ class NullMemRefOpConverter : public ConvertOpToLLVMPattern<NullMemRefOp> {
Location loc = op->getLoc();
auto result_type = null_memref_op.getType().cast<UnrankedMemRefType>();
LLVMType llvm_result_type =
typeConverter.convertType(result_type).cast<LLVMType>();
typeConverter->convertType(result_type).cast<LLVMType>();
auto desc =
UnrankedMemRefDescriptor::undef(rewriter, loc, llvm_result_type);
@ -357,8 +357,8 @@ class NullMemRefOpConverter : public ConvertOpToLLVMPattern<NullMemRefOp> {
// have to actually construct a ranked underlying descriptor instead of just
// setting its pointer to NULL.
SmallVector<Value, 4> sizes;
UnrankedMemRefDescriptor::computeSizes(rewriter, loc, typeConverter, desc,
sizes);
UnrankedMemRefDescriptor::computeSizes(rewriter, loc, *getTypeConverter(),
desc, sizes);
Value underlying_desc_ptr = rewriter.create<LLVM::AllocaOp>(
loc, getVoidPtrType(), sizes.front(), llvm::None);
@ -366,7 +366,7 @@ class NullMemRefOpConverter : public ConvertOpToLLVMPattern<NullMemRefOp> {
unsigned address_space = result_type.getMemorySpace();
Type elem_type = result_type.getElementType();
LLVM::LLVMType llvm_elem_type =
typeConverter.convertType(elem_type).cast<LLVMType>();
typeConverter->convertType(elem_type).cast<LLVMType>();
LLVM::LLVMType elem_ptr_ptr_type =
llvm_elem_type.getPointerTo(address_space).getPointerTo();
@ -374,10 +374,10 @@ class NullMemRefOpConverter : public ConvertOpToLLVMPattern<NullMemRefOp> {
loc, llvm_elem_type.getPointerTo(address_space));
UnrankedMemRefDescriptor::setAllocatedPtr(
rewriter, loc, underlying_desc_ptr, elem_ptr_ptr_type, nullPtr);
UnrankedMemRefDescriptor::setAlignedPtr(rewriter, loc, typeConverter,
UnrankedMemRefDescriptor::setAlignedPtr(rewriter, loc, *getTypeConverter(),
underlying_desc_ptr,
elem_ptr_ptr_type, nullPtr);
UnrankedMemRefDescriptor::setOffset(rewriter, loc, typeConverter,
UnrankedMemRefDescriptor::setOffset(rewriter, loc, *getTypeConverter(),
underlying_desc_ptr, elem_ptr_ptr_type,
zero);

View File

@ -59,7 +59,7 @@ class ConvertLaunchFuncOpToTfRuntimeCallPattern
gpu::LaunchFuncOp launch_op, ArrayRef<Value> operands,
ConversionPatternRewriter &rewriter) const override;
MLIRContext *context_ = &this->typeConverter.getContext();
MLIRContext *context_ = &this->getTypeConverter()->getContext();
LLVM::LLVMType llvm_void_type_ = LLVM::LLVMType::getVoidTy(context_);
LLVM::LLVMType llvm_pointer_type_ = LLVM::LLVMType::getInt8PtrTy(context_);
@ -68,7 +68,7 @@ class ConvertLaunchFuncOpToTfRuntimeCallPattern
LLVM::LLVMType llvm_int32_type_ = LLVM::LLVMType::getInt32Ty(context_);
LLVM::LLVMType llvm_int64_type_ = LLVM::LLVMType::getInt64Ty(context_);
LLVM::LLVMType llvm_intptr_type_ = LLVM::LLVMType::getIntNTy(
context_, this->typeConverter.getPointerBitwidth(0));
context_, this->getTypeConverter()->getPointerBitwidth(0));
llvm::SmallString<32> gpu_binary_annotation_;
};
@ -91,7 +91,7 @@ Value ConvertLaunchFuncOpToTfRuntimeCallPattern::generateParamsArray(
OpBuilder &builder) const {
auto loc = launch_op.getLoc();
auto num_kernel_operands = launch_op.getNumKernelOperands();
auto arguments = typeConverter.promoteOperands(
auto arguments = getTypeConverter()->promoteOperands(
loc, launch_op.getOperands().take_back(num_kernel_operands),
operands.take_back(num_kernel_operands), builder);
auto num_arguments = arguments.size();

View File

@ -685,8 +685,8 @@ def tf_repositories(path_prefix = "", tf_repo_name = ""):
)
# Check out LLVM and MLIR from llvm-project.
LLVM_COMMIT = "f5d52916ce34f68a2fb4de69844f1b51b6bd0a13"
LLVM_SHA256 = "3e28017a6f81c457180774d522ecec65c3a9ec2150b6195d109ee3c5c810f4da"
LLVM_COMMIT = "7f6f9f4cf966c78a315d15d6e913c43cfa45c47c"
LLVM_SHA256 = "f82cbab6921cd62a444eedff4305320d955a409a2181fd6d5a6ee0054a758c39"
LLVM_URLS = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
"https://github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),