Updates LLVM usage to match
[6713332fddb7](https://github.com/llvm/llvm-project/commit/6713332fddb7)

PiperOrigin-RevId: 336988895
Change-Id: Iaaa161ebb58e2ad765dfb2741353441f15a9875a
This commit is contained in:
A. Unique TensorFlower 2020-10-13 17:20:27 -07:00 committed by TensorFlower Gardener
parent 56d8f02a2d
commit 25d6743828
20 changed files with 28 additions and 28 deletions

View File

@ -516,7 +516,7 @@ struct HloCompareAdaptor {
void PopulateLegalizeChloToHloPatterns(MLIRContext *context,
OwningRewritePatternList *patterns) {
populateWithGenerated(context, patterns);
populateWithGenerated(context, *patterns);
// Instantiate conversion templates for conforming binary elementwise ops
// that do not have different dtypes between operands and results and do

View File

@ -464,9 +464,9 @@ struct HloLegalizeToLhlo
populateHLOToLHLOConversionPattern(&context, &converter, &patterns);
populateWithBufferAssignmentOpConversionPatterns<
mlir::ReturnOp, mlir::ReturnOp, lmhlo::CopyOp>(&context, &converter,
&patterns);
populateShapeTypeConversionPatterns(&context, &converter, &patterns);
mlir::ReturnOp, mlir::ReturnOp, lmhlo::CopyOp>(&context, converter,
patterns);
populateShapeTypeConversionPatterns(&context, converter, patterns);
if (failed(applyPartialConversion(getOperation(), target, patterns)))
signalPassFailure();
}
@ -531,7 +531,7 @@ void populateHLOToLHLOConversionPattern(
HloToLhloReturnOpConverter,
HloToLhloTensorLoadOpConverter,
HloToLhloTensorStoreOpConverter
>(context, converter);
>(context, *converter);
// clang-format on
}

View File

@ -193,7 +193,7 @@ std::unique_ptr<mlir::OperationPass<mlir::FuncOp>> createLegalizeToStdPass() {
void PopulateMhloToStdPatterns(OwningRewritePatternList *patterns,
mlir::MLIRContext *ctx) {
mlir::populateWithGenerated(ctx, patterns);
mlir::populateWithGenerated(ctx, *patterns);
patterns->insert<CompareFConvert, CompareIConvert, ConvertIotaOp>(ctx);
}

View File

@ -59,7 +59,7 @@ namespace {
void PopulateComplexLoweringPatterns(MLIRContext* context,
OwningRewritePatternList* patterns) {
populateWithGenerated(context, patterns);
populateWithGenerated(context, *patterns);
}
} // end namespace mhlo
} // end namespace mlir

View File

@ -665,7 +665,7 @@ void LegalizeTF::runOnFunction() {
auto func = getFunction();
// Add the generated patterns to the list.
populateWithGenerated(context, &patterns);
populateWithGenerated(context, patterns);
patterns
.insert<ConvertTFConcatV2Op, ConvertTFMatMulOp, ConvertTFMatrixDiagV2Op,
ConvertTFMatrixDiagV3Op, ConvertTFPackOp, ConvertTFReshapeOp,

View File

@ -892,7 +892,7 @@ LogicalResult LowerStaticTensorListPass::RewriteFunction(
target.addLegalOp<TFL::BidirectionalSequenceLSTMOp>();
OwningRewritePatternList patterns;
populateWithGenerated(context, &patterns);
populateWithGenerated(context, patterns);
patterns.insert<ConvertConst, ConvertEmptyTensorList, ConvertIdentity,
ConvertTensorListGetItem, ConvertTensorListLength,
ConvertTensorListPushBack, ConvertTensorListReserve,

View File

@ -798,7 +798,7 @@ void Optimize::runOnFunction() {
// Potentially the binary ops might be fused together, like hard_swish, thus
// we explore these potentially first and then fuse the binary ops with the
// following ops in a second pattern match.
TFL::populateWithGenerated(ctx, &patterns);
TFL::populateWithGenerated(ctx, patterns);
patterns.insert<FuseFullyConnectedAndAdd,
FuseFullyConnectedAndReluX<TFL::ReluOp, kRelu>,
FuseFullyConnectedAndReluX<TFL::Relu6Op, kRelu6>,

View File

@ -144,7 +144,7 @@ void PostQuantizePass::runOnFunction() {
OwningRewritePatternList patterns;
auto func = getFunction();
auto* ctx = func.getContext();
TFL::populateWithGenerated(ctx, &patterns);
TFL::populateWithGenerated(ctx, patterns);
patterns.insert<quant::FoldTrivalRequantizeOp<QuantizeOp>>(ctx);
applyPatternsAndFoldGreedily(func, patterns);

View File

@ -1203,7 +1203,7 @@ void PrepareTFPass::runOnFunction() {
ConvertTFDilatedConvOp<TF::DepthwiseConv2dNativeOp>>(ctx);
patterns.insert<ConvertFusedBatchNorm>(ctx);
TFL::populateWithGenerated(ctx, &patterns);
TFL::populateWithGenerated(ctx, patterns);
// TODO(karimnosseir): Split to separate pass probably after
// deciding on long term plan for this optimization.
// This will allow optimizing any TF_Mul->TF_Conv in the graph
@ -1214,7 +1214,7 @@ void PrepareTFPass::runOnFunction() {
// Load the generated pattern again, so new quantization pass-through
// will be applied.
patterns.clear();
TFL::populateWithGenerated(ctx, &patterns);
TFL::populateWithGenerated(ctx, patterns);
if (unfold_batch_matmul_) {
patterns.insert<TF::ConvertTFBatchMatMulOp<TF::BatchMatMulOp>,
TF::ConvertTFBatchMatMulOp<TF::BatchMatMulV2Op>>(ctx);

View File

@ -84,7 +84,7 @@ void QuantizePass::runOnFunction() {
OwningRewritePatternList patterns;
auto func = getFunction();
auto* ctx = func.getContext();
TFL::populateWithGenerated(ctx, &patterns);
TFL::populateWithGenerated(ctx, patterns);
patterns.insert<TFLFullQuantization>(
ctx, enable_numeric_verify, error_tolerance, enable_single_layer_verify);
applyPatternsAndFoldGreedily(func, patterns);

View File

@ -73,7 +73,7 @@ static Type GetResourceSubtype(Value resource) {
void PopulateDecomposeResourceOpsPatterns(MLIRContext *context,
OwningRewritePatternList *patterns) {
populateWithGenerated(context, patterns);
populateWithGenerated(context, *patterns);
}
} // namespace TF

View File

@ -821,7 +821,7 @@ static PassRegistration<LegalizeHloToTf> pass(
void PopulateLegalizeHloToTfPatterns(OwningRewritePatternList *patterns,
MLIRContext *context) {
populateWithGenerated(context, patterns);
populateWithGenerated(context, *patterns);
patterns->insert<ConvertConvOp, ConvertSliceOp, ConvertReduceOpToTfMax,
ConvertReduceOpToTfMin, ConvertReduceOpToTfSum,
ConvertIotaOpToTfRange>(context);

View File

@ -780,7 +780,7 @@ void PopulateLoweringTFPatterns(MLIRContext *context,
LowerDynamicStitchOp, LowerInvertPermutationOp, LowerPackOp,
LowerSpaceToBatchNDOp, LowerSparseMatMulOp,
Lower_UnaryOpsComposition>(context);
populateWithGenerated(context, patterns);
populateWithGenerated(context, *patterns);
}
} // namespace TF

View File

@ -37,7 +37,7 @@ struct TFOptimizePass : public PassWrapper<TFOptimizePass, FunctionPass> {
void runOnFunction() override {
OwningRewritePatternList patterns;
auto func = getFunction();
populateWithGenerated(&getContext(), &patterns);
populateWithGenerated(&getContext(), patterns);
applyPatternsAndFoldGreedily(func, patterns);
}
};

View File

@ -58,7 +58,7 @@ struct FuseParallelMapAndBatch : public OpRewritePattern<BatchDatasetV2Op> {
void PopulateTFDataOptimizationPatterns(MLIRContext *context,
OwningRewritePatternList *patterns) {
patterns->insert<FuseParallelMapAndBatch>(context);
populateWithGenerated(context, patterns);
populateWithGenerated(context, *patterns);
}
} // namespace TF

View File

@ -50,7 +50,7 @@ void Optimize::runOnFunction() {
auto *ctx = &getContext();
auto func = getFunction();
populateWithGenerated(ctx, &patterns);
populateWithGenerated(ctx, patterns);
applyPatternsAndFoldGreedily(func, patterns);
}
} // namespace

View File

@ -162,7 +162,7 @@ class SimpleOpResultConversion
LogicalResult matchAndRewrite(
OpTy op, ArrayRef<Value> operands,
ConversionPatternRewriter &rewriter) const final {
rewriter.replaceOpWithNewOp<OpTy>(op, converter->convertType(op.getType()),
rewriter.replaceOpWithNewOp<OpTy>(op, converter.convertType(op.getType()),
operands);
return success();
}
@ -180,7 +180,7 @@ class TensorCastOpConverter
Value arg = operands.front();
if (!arg.getType().isa<BaseMemRefType>()) return failure();
auto result_ty = converter->convertType(op.getType());
auto result_ty = converter.convertType(op.getType());
rewriter.replaceOpWithNewOp<MemRefCastOp>(op, arg, result_ty);
return success();
@ -195,7 +195,7 @@ void populateStandardBufferizePattern(MLIRContext *context,
patterns->insert<ExtractElementOpConversion, TensorFromElementsOpConverter,
DynamicTensorFromElementsOpConverter,
SimpleOpResultConversion<SelectOp>, TensorLoadOpConversion,
TensorCastOpConverter>(context, converter);
TensorCastOpConverter>(context, *converter);
}
} // namespace transforms

View File

@ -104,9 +104,9 @@ struct BufferizePass : public BufferizePassBase<BufferizePass> {
mhlo::populateHLOToLHLOConversionPattern(&context, &converter, &patterns);
populateWithBufferAssignmentOpConversionPatterns<ReturnOp, ReturnOp,
lmhlo::CopyOp>(
&context, &converter, &patterns);
&context, converter, patterns);
populateStandardBufferizePattern(&context, &converter, &patterns);
populateShapeTypeConversionPatterns(&context, &converter, &patterns);
populateShapeTypeConversionPatterns(&context, converter, patterns);
patterns.insert<UnrankedTensorStoreTestOnlyPattern>(&context);
auto module = getOperation();

View File

@ -6104,7 +6104,7 @@ LogicalResult legalizeTF(
void PopulateLegalizeTfPatterns(MLIRContext *context,
OwningRewritePatternList *patterns) {
populateWithGenerated(context, patterns);
populateWithGenerated(context, *patterns);
patterns->insert<
ConvertAllOp, ConvertAnyOp, ConvertArgMaxOp, ConvertBatchMatMulV2Op,
ConvertBiasAddOp, ConvertBroadcastToOp, ConvertBF16FloorDivOp,

View File

@ -712,8 +712,8 @@ def tf_repositories(path_prefix = "", tf_repo_name = ""):
)
# Check out LLVM and MLIR from llvm-project.
LLVM_COMMIT = "93377888ae89560ba6d3976e2762d3d4724c4dfd"
LLVM_SHA256 = "606e772f64024cf6c56b31cd1f5fa700c789a22181c73365498973ac73892f91"
LLVM_COMMIT = "6713332fddb796f5b14fcb6a7e5d36979676e4ab"
LLVM_SHA256 = "d83051da693c4165a8bd9a2703c806820d5146188b1036fd94300fafa09aae50"
LLVM_URLS = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
"https://github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),