diff --git a/tensorflow/compiler/mlir/tools/kernel_gen/transforms/tf_framework_legalize_to_llvm.cc b/tensorflow/compiler/mlir/tools/kernel_gen/transforms/tf_framework_legalize_to_llvm.cc index 2ad9f6c9455..03b6636b2a5 100644 --- a/tensorflow/compiler/mlir/tools/kernel_gen/transforms/tf_framework_legalize_to_llvm.cc +++ b/tensorflow/compiler/mlir/tools/kernel_gen/transforms/tf_framework_legalize_to_llvm.cc @@ -151,7 +151,7 @@ class TFAllocOpConverter : public ConvertToLLVMCallOpPattern { Value allocated_byte_ptr, ArrayRef sizes) const { auto memref_desc = MemRefDescriptor::undef( - rewriter, loc, typeConverter.convertType(memref_type)); + rewriter, loc, typeConverter->convertType(memref_type)); // TF AllocateRaw returns aligned pointer => AllocatedPtr == AlignedPtr. Value allocated_type_ptr = rewriter.create( @@ -267,7 +267,7 @@ class ReportErrorOpConverter // Insert function call. FlatSymbolRefAttr tf_func_ref = getOrInsertTFFunction(rewriter, op); Value error_code = rewriter.create( - loc, typeConverter.convertType(rewriter.getI32Type()), + loc, typeConverter->convertType(rewriter.getI32Type()), transformed.error_code()); rewriter.replaceOpWithNewOp( @@ -279,7 +279,7 @@ class ReportErrorOpConverter protected: StringRef GetFuncName() const override { return kCInterfaceReportError; } LLVMType GetFuncType() const override { - MLIRContext *ctx = &this->typeConverter.getContext(); + MLIRContext *ctx = &getTypeConverter()->getContext(); auto i8_ptr_type = LLVM::LLVMType::getInt8Ty(ctx).getPointerTo(); auto i32_type = LLVM::LLVMType::getInt32Ty(ctx); return LLVM::LLVMType::getFunctionTy( @@ -310,7 +310,7 @@ class ReportErrorOpConverter Value globalPtr = builder.create( loc, cast(global_constant)); - MLIRContext *ctx = &this->typeConverter.getContext(); + MLIRContext *ctx = &getTypeConverter()->getContext(); Value c0 = builder.create( loc, LLVM::LLVMType::getInt64Ty(ctx), builder.getIntegerAttr(builder.getIndexType(), 0)); @@ -346,7 +346,7 @@ class NullMemRefOpConverter : public ConvertOpToLLVMPattern { Location loc = op->getLoc(); auto result_type = null_memref_op.getType().cast(); LLVMType llvm_result_type = - typeConverter.convertType(result_type).cast(); + typeConverter->convertType(result_type).cast(); auto desc = UnrankedMemRefDescriptor::undef(rewriter, loc, llvm_result_type); @@ -357,8 +357,8 @@ class NullMemRefOpConverter : public ConvertOpToLLVMPattern { // have to actually construct a ranked underlying descriptor instead of just // setting its pointer to NULL. SmallVector sizes; - UnrankedMemRefDescriptor::computeSizes(rewriter, loc, typeConverter, desc, - sizes); + UnrankedMemRefDescriptor::computeSizes(rewriter, loc, *getTypeConverter(), + desc, sizes); Value underlying_desc_ptr = rewriter.create( loc, getVoidPtrType(), sizes.front(), llvm::None); @@ -366,7 +366,7 @@ class NullMemRefOpConverter : public ConvertOpToLLVMPattern { unsigned address_space = result_type.getMemorySpace(); Type elem_type = result_type.getElementType(); LLVM::LLVMType llvm_elem_type = - typeConverter.convertType(elem_type).cast(); + typeConverter->convertType(elem_type).cast(); LLVM::LLVMType elem_ptr_ptr_type = llvm_elem_type.getPointerTo(address_space).getPointerTo(); @@ -374,10 +374,10 @@ class NullMemRefOpConverter : public ConvertOpToLLVMPattern { loc, llvm_elem_type.getPointerTo(address_space)); UnrankedMemRefDescriptor::setAllocatedPtr( rewriter, loc, underlying_desc_ptr, elem_ptr_ptr_type, nullPtr); - UnrankedMemRefDescriptor::setAlignedPtr(rewriter, loc, typeConverter, + UnrankedMemRefDescriptor::setAlignedPtr(rewriter, loc, *getTypeConverter(), underlying_desc_ptr, elem_ptr_ptr_type, nullPtr); - UnrankedMemRefDescriptor::setOffset(rewriter, loc, typeConverter, + UnrankedMemRefDescriptor::setOffset(rewriter, loc, *getTypeConverter(), underlying_desc_ptr, elem_ptr_ptr_type, zero); diff --git a/tensorflow/compiler/mlir/tools/kernel_gen/transforms/tf_kernel_to_llvm_pass.cc b/tensorflow/compiler/mlir/tools/kernel_gen/transforms/tf_kernel_to_llvm_pass.cc index 60f2d7c9ffe..80fee1f3f13 100644 --- a/tensorflow/compiler/mlir/tools/kernel_gen/transforms/tf_kernel_to_llvm_pass.cc +++ b/tensorflow/compiler/mlir/tools/kernel_gen/transforms/tf_kernel_to_llvm_pass.cc @@ -59,7 +59,7 @@ class ConvertLaunchFuncOpToTfRuntimeCallPattern gpu::LaunchFuncOp launch_op, ArrayRef operands, ConversionPatternRewriter &rewriter) const override; - MLIRContext *context_ = &this->typeConverter.getContext(); + MLIRContext *context_ = &this->getTypeConverter()->getContext(); LLVM::LLVMType llvm_void_type_ = LLVM::LLVMType::getVoidTy(context_); LLVM::LLVMType llvm_pointer_type_ = LLVM::LLVMType::getInt8PtrTy(context_); @@ -68,7 +68,7 @@ class ConvertLaunchFuncOpToTfRuntimeCallPattern LLVM::LLVMType llvm_int32_type_ = LLVM::LLVMType::getInt32Ty(context_); LLVM::LLVMType llvm_int64_type_ = LLVM::LLVMType::getInt64Ty(context_); LLVM::LLVMType llvm_intptr_type_ = LLVM::LLVMType::getIntNTy( - context_, this->typeConverter.getPointerBitwidth(0)); + context_, this->getTypeConverter()->getPointerBitwidth(0)); llvm::SmallString<32> gpu_binary_annotation_; }; @@ -91,7 +91,7 @@ Value ConvertLaunchFuncOpToTfRuntimeCallPattern::generateParamsArray( OpBuilder &builder) const { auto loc = launch_op.getLoc(); auto num_kernel_operands = launch_op.getNumKernelOperands(); - auto arguments = typeConverter.promoteOperands( + auto arguments = getTypeConverter()->promoteOperands( loc, launch_op.getOperands().take_back(num_kernel_operands), operands.take_back(num_kernel_operands), builder); auto num_arguments = arguments.size(); diff --git a/tensorflow/workspace.bzl b/tensorflow/workspace.bzl index d5bc33c27e1..33745ee8c41 100755 --- a/tensorflow/workspace.bzl +++ b/tensorflow/workspace.bzl @@ -685,8 +685,8 @@ def tf_repositories(path_prefix = "", tf_repo_name = ""): ) # Check out LLVM and MLIR from llvm-project. - LLVM_COMMIT = "f5d52916ce34f68a2fb4de69844f1b51b6bd0a13" - LLVM_SHA256 = "3e28017a6f81c457180774d522ecec65c3a9ec2150b6195d109ee3c5c810f4da" + LLVM_COMMIT = "7f6f9f4cf966c78a315d15d6e913c43cfa45c47c" + LLVM_SHA256 = "f82cbab6921cd62a444eedff4305320d955a409a2181fd6d5a6ee0054a758c39" LLVM_URLS = [ "https://storage.googleapis.com/mirror.tensorflow.org/github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT), "https://github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),