Updates LLVM usage to match
[0fc1aa22ee6a](https://github.com/llvm/llvm-project/commit/0fc1aa22ee6a)

PiperOrigin-RevId: 339239851
Change-Id: I78072452756354de403b60c19f2441b250a63f53
This commit is contained in:
Thomas Joerg 2020-10-27 06:55:28 -07:00 committed by TensorFlower Gardener
parent ed41e4431a
commit 30994be76c
64 changed files with 192 additions and 105 deletions

View File

@ -538,6 +538,7 @@ cc_library(
"@llvm-project//mlir:IR",
"@llvm-project//mlir:Pass",
"@llvm-project//mlir:StandardOps",
"@llvm-project//mlir:TransformUtils",
],
alwayslink = 1,
)
@ -764,6 +765,7 @@ cc_library(
"@llvm-project//mlir:Pass",
"@llvm-project//mlir:StandardOps",
"@llvm-project//mlir:Support",
"@llvm-project//mlir:TransformUtils",
],
alwayslink = 1,
)

View File

@ -49,7 +49,7 @@ struct ChloLegalizeToHloPass
chlo::PopulateLegalizeChloToHloPatterns(&getContext(), &conversionPatterns);
if (failed(applyPartialConversion(getFunction(), conversionTarget,
conversionPatterns))) {
std::move(conversionPatterns)))) {
return signalPassFailure();
}
}

View File

@ -555,7 +555,8 @@ struct HloLegalizeToLhlo
&context, converter, patterns);
populateShapeStructuralTypeConversionsAndLegality(&context, converter,
patterns, target);
if (failed(applyPartialConversion(getOperation(), target, patterns)))
if (failed(applyPartialConversion(getOperation(), target,
std::move(patterns))))
signalPassFailure();
}

View File

@ -17,8 +17,8 @@ limitations under the License.
#include "mlir-hlo/Dialect/mhlo/transforms/passes.h"
#include "mlir-hlo/Dialect/mhlo/transforms/rewriters.h"
#include "mlir/IR/Function.h"
#include "mlir/IR/PatternMatch.h"
#include "mlir/Pass/Pass.h"
#include "mlir/Transforms/GreedyPatternRewriteDriver.h"
namespace mlir {
@ -133,7 +133,7 @@ struct LegalizeGatherToTorchIndexSelectPass
void runOnFunction() override {
OwningRewritePatternList patterns;
PopulateGatherToTorchIndexSelectPatterns(&getContext(), &patterns);
applyPatternsAndFoldGreedily(getFunction(), patterns);
applyPatternsAndFoldGreedily(getFunction(), std::move(patterns));
}
};
} // namespace

View File

@ -893,7 +893,7 @@ struct LhloLegalizeToLinalgPass
auto func = getFunction();
populateLHLOToLinalgConversionPattern(func.getContext(), &patterns);
if (failed(applyPartialConversion(func, target, patterns, nullptr))) {
if (failed(applyPartialConversion(func, target, std::move(patterns)))) {
signalPassFailure();
}
}
@ -912,7 +912,7 @@ struct HloLegalizeToLinalgPass
auto func = getFunction();
mhlo::populateHLOToLinalgConversionPattern(func.getContext(), &patterns);
if (failed(applyPartialConversion(func, target, patterns, nullptr))) {
if (failed(applyPartialConversion(func, target, std::move(patterns)))) {
signalPassFailure();
}
}

View File

@ -21,8 +21,8 @@ limitations under the License.
#include "mlir-hlo/Dialect/mhlo/transforms/rewriters.h"
#include "mlir/Dialect/StandardOps/IR/Ops.h"
#include "mlir/IR/Function.h"
#include "mlir/IR/PatternMatch.h"
#include "mlir/Pass/Pass.h"
#include "mlir/Transforms/GreedyPatternRewriteDriver.h"
namespace mlir {
namespace {
@ -201,7 +201,7 @@ void PopulateMhloToStdPatterns(OwningRewritePatternList *patterns,
void LegalizeToStandardPass::runOnFunction() {
OwningRewritePatternList patterns;
mlir::mhlo::PopulateMhloToStdPatterns(&patterns, &getContext());
applyPatternsAndFoldGreedily(getFunction(), patterns);
applyPatternsAndFoldGreedily(getFunction(), std::move(patterns));
}
} // end namespace mhlo

View File

@ -20,8 +20,8 @@ limitations under the License.
#include "mlir-hlo/Dialect/mhlo/transforms/rewriters.h"
#include "mlir/Dialect/StandardOps/IR/Ops.h"
#include "mlir/IR/Function.h"
#include "mlir/IR/PatternMatch.h"
#include "mlir/Pass/Pass.h"
#include "mlir/Transforms/GreedyPatternRewriteDriver.h"
namespace mlir {
namespace mhlo {
@ -259,7 +259,7 @@ struct LegalizeTrigonometricToApproximationPass
void runOnFunction() override {
OwningRewritePatternList patterns;
PopulateTrigonometricToApproximationPatterns(&getContext(), &patterns);
applyPatternsAndFoldGreedily(getFunction(), patterns);
applyPatternsAndFoldGreedily(getFunction(), std::move(patterns));
}
};

View File

@ -27,6 +27,7 @@ limitations under the License.
#include "mlir/Interfaces/ViewLikeInterface.h"
#include "mlir/Pass/Pass.h"
#include "mlir/Transforms/FoldUtils.h"
#include "mlir/Transforms/GreedyPatternRewriteDriver.h"
namespace mlir {
namespace lmhlo {
@ -111,7 +112,7 @@ class LhloFuseLinalgPass
}
});
auto patterns = linalg::getLinalgTilingCanonicalizationPatterns(ctx);
applyPatternsAndFoldGreedily(func, patterns);
applyPatternsAndFoldGreedily(func, std::move(patterns));
// Fuse producers of tiled linalg ops.
llvm::SmallDenseSet<Operation*> erase_set;
@ -132,7 +133,7 @@ class LhloFuseLinalgPass
}
auto patterns = linalg::getLinalgTilingCanonicalizationPatterns(ctx);
applyPatternsAndFoldGreedily(func, patterns);
applyPatternsAndFoldGreedily(func, std::move(patterns));
}
for (auto* e : erase_set) e->erase();
}

View File

@ -19,12 +19,10 @@ limitations under the License.
#include "mlir-hlo/Dialect/mhlo/transforms/map_lmhlo_to_scalar_op.h"
#include "mlir/Dialect/Affine/IR/AffineOps.h"
#include "mlir/Dialect/StandardOps/IR/Ops.h"
#include "mlir/IR/Attributes.h"
#include "mlir/IR/Location.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/PatternMatch.h"
#include "mlir/IR/StandardTypes.h"
#include "mlir/Pass/Pass.h"
#include "mlir/Transforms/GreedyPatternRewriteDriver.h"
namespace mlir {
namespace lmhlo {
@ -160,7 +158,7 @@ struct LhloLegalizeToAffinePass
OwningRewritePatternList patterns;
auto func = getFunction();
populateLHLOToAffineConversionPattern(func.getContext(), &patterns);
applyPatternsAndFoldGreedily(func, patterns);
applyPatternsAndFoldGreedily(func, std::move(patterns));
}
};

View File

@ -184,7 +184,7 @@ struct LhloLegalizeToGpuPass
target.addIllegalOp<ReduceOp>();
auto func = getFunction();
patterns.insert<LhloReduceToGPULaunchConverter>(func.getContext());
if (failed(applyPartialConversion(func, target, patterns))) {
if (failed(applyPartialConversion(func, target, std::move(patterns)))) {
signalPassFailure();
}
}

View File

@ -47,7 +47,7 @@ class TestLhloToLLVMPass
target.addLegalOp<ModuleOp, ModuleTerminatorOp>();
target.addIllegalDialect<LmhloDialect>();
if (failed(applyFullConversion(m, target, patterns))) {
if (failed(applyFullConversion(m, target, std::move(patterns)))) {
signalPassFailure();
}
}

View File

@ -713,7 +713,7 @@ struct LhloLegalizeToParallelLoopsPass
target.addIllegalOp<lmhlo::ReduceOp, lmhlo::ReduceWindowOp,
lmhlo::SelectAndScatterOp>();
if (failed(applyPartialConversion(func, target, patterns))) {
if (failed(applyPartialConversion(func, target, std::move(patterns)))) {
signalPassFailure();
}
}

View File

@ -29,11 +29,11 @@ limitations under the License.
#include "mlir/IR/Attributes.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/Operation.h"
#include "mlir/IR/PatternMatch.h"
#include "mlir/IR/TypeUtilities.h"
#include "mlir/IR/Types.h"
#include "mlir/Pass/Pass.h"
#include "mlir/Pass/PassRegistry.h"
#include "mlir/Transforms/GreedyPatternRewriteDriver.h"
using mlir::FunctionPass;
using mlir::OwningRewritePatternList;
@ -70,7 +70,7 @@ void LowerComplexPass::runOnFunction() {
OwningRewritePatternList patterns;
mlir::mhlo::PopulateComplexLoweringPatterns(&getContext(), &patterns);
applyPatternsAndFoldGreedily(getFunction(), patterns);
applyPatternsAndFoldGreedily(getFunction(), std::move(patterns));
}
std::unique_ptr<FunctionPass> mlir::mhlo::createLowerComplexPass() {

View File

@ -25,10 +25,10 @@ limitations under the License.
#include "mlir/IR/Function.h"
#include "mlir/IR/Location.h"
#include "mlir/IR/Operation.h"
#include "mlir/IR/PatternMatch.h"
#include "mlir/IR/StandardTypes.h"
#include "mlir/IR/TypeUtilities.h"
#include "mlir/Pass/Pass.h"
#include "mlir/Transforms/GreedyPatternRewriteDriver.h"
using mlir::DenseIntElementsAttr;
using mlir::ElementsAttr;
@ -182,7 +182,7 @@ struct LegalizeGeneralDotPass
void runOnFunction() override {
OwningRewritePatternList patterns;
mlir::mhlo::PopulateGeneralDotOpLoweringPatterns(&patterns, &getContext());
applyPatternsAndFoldGreedily(getFunction(), patterns);
applyPatternsAndFoldGreedily(getFunction(), std::move(patterns));
}
};

View File

@ -42,7 +42,7 @@ struct TestMaterializeBroadcastsPass
PopulateMaterializeBroadcastsPatterns(&getContext(), &conversionPatterns);
if (failed(applyPartialConversion(getFunction(), conversionTarget,
conversionPatterns))) {
std::move(conversionPatterns)))) {
return signalPassFailure();
}
}

View File

@ -19,9 +19,9 @@ limitations under the License.
#include "mlir/Dialect/StandardOps/IR/Ops.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/Operation.h"
#include "mlir/IR/PatternMatch.h"
#include "mlir/Pass/Pass.h"
#include "mlir/Transforms/DialectConversion.h"
#include "mlir/Transforms/GreedyPatternRewriteDriver.h"
using mlir::FunctionPass;
using mlir::PassWrapper;
@ -42,7 +42,7 @@ void OptimizeMhloPass::runOnFunction() {
mlir::OwningRewritePatternList patterns;
mlir::mhlo::PopulateOptimizeMHLOPatterns(&getContext(), &patterns);
applyPatternsAndFoldGreedily(getFunction(), patterns);
applyPatternsAndFoldGreedily(getFunction(), std::move(patterns));
}
std::unique_ptr<mlir::FunctionPass> mlir::mhlo::createOptimizeMhloPass() {

View File

@ -17,9 +17,9 @@ limitations under the License.
#include "mlir/IR/Identifier.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/OperationSupport.h"
#include "mlir/IR/PatternMatch.h"
#include "mlir/Interfaces/InferTypeOpInterface.h"
#include "mlir/Pass/Pass.h"
#include "mlir/Transforms/GreedyPatternRewriteDriver.h"
namespace mlir {
namespace mhlo {
@ -87,7 +87,7 @@ struct TestInferShapedTypeMethodsPass
OwningRewritePatternList patterns;
patterns.insert<ReifyReturnTypeShapesPattern>(&getContext());
patterns.insert<InferReturnTypeComponentsPattern>(&getContext());
applyPatternsAndFoldGreedily(getFunction(), patterns);
applyPatternsAndFoldGreedily(getFunction(), std::move(patterns));
}
};

View File

@ -154,7 +154,8 @@ struct TransformUnrankedHloPass
PopulateTransformUnrankedHloPatterns(&ctx, &patterns);
// Apply transformation.
if (failed(applyPartialConversion(getFunction(), target, patterns)))
if (failed(
applyPartialConversion(getFunction(), target, std::move(patterns))))
return signalPassFailure();
}
};

View File

@ -18,9 +18,9 @@ limitations under the License.
#include "mlir/Dialect/StandardOps/IR/Ops.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/Operation.h"
#include "mlir/IR/PatternMatch.h"
#include "mlir/Pass/Pass.h"
#include "mlir/Transforms/DialectConversion.h"
#include "mlir/Transforms/GreedyPatternRewriteDriver.h"
namespace mlir {
namespace mhlo {
@ -32,7 +32,7 @@ struct TestUnfuseBatchNormPass
void runOnOperation() override {
OwningRewritePatternList patterns;
PopulateUnfuseBatchNormPatterns(&getContext(), &patterns);
applyPatternsAndFoldGreedily(getOperation(), patterns);
applyPatternsAndFoldGreedily(getOperation(), std::move(patterns));
}
};

View File

@ -468,6 +468,7 @@ cc_library(
"@llvm-project//mlir:Pass",
"@llvm-project//mlir:StandardOps",
"@llvm-project//mlir:Support",
"@llvm-project//mlir:TransformUtils",
],
alwayslink = 1,
)
@ -501,6 +502,7 @@ cc_library(
"@llvm-project//mlir:QuantOps",
"@llvm-project//mlir:StandardOps",
"@llvm-project//mlir:Support",
"@llvm-project//mlir:TransformUtils",
],
alwayslink = 1,
)

View File

@ -25,14 +25,12 @@ cc_library(
"passes.h",
],
deps = [
"//tensorflow/compiler/mlir/lite/quantization:quantization_config",
"//tensorflow/compiler/mlir/lite/quantization:quantization_lib",
"//tensorflow/compiler/mlir/tensorflow",
"@com_google_absl//absl/strings",
"@llvm-project//llvm:Support",
"@llvm-project//mlir:IR",
"@llvm-project//mlir:Pass",
"@llvm-project//mlir:QuantOps",
"@llvm-project//mlir:TransformUtils",
],
alwayslink = 1,
)

View File

@ -13,8 +13,8 @@ See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "mlir/Dialect/Quant/QuantOps.h" // from @llvm-project
#include "mlir/IR/PatternMatch.h" // from @llvm-project
#include "mlir/Pass/Pass.h" // from @llvm-project
#include "mlir/Transforms/GreedyPatternRewriteDriver.h" // from @llvm-project
#include "tensorflow/compiler/mlir/lite/quantization/quantization_utils.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h"
@ -145,7 +145,7 @@ void LegalizeTFToQuant::runOnFunction() {
auto func = getFunction();
auto *ctx = func.getContext();
patterns.insert<PreparePerTensorFakeQuant, PreparePerChannelFakeQuant>(ctx);
applyPatternsAndFoldGreedily(func, patterns);
applyPatternsAndFoldGreedily(func, std::move(patterns));
}
} // namespace

View File

@ -14,6 +14,8 @@ limitations under the License.
==============================================================================*/
#include "tensorflow/compiler/mlir/lite/transforms/dilated_conv.h"
#include "mlir/Transforms/GreedyPatternRewriteDriver.h" // from @llvm-project
namespace mlir {
namespace TFL {
namespace {
@ -30,7 +32,7 @@ void IdentifyDilatedConvPass::runOnFunction() {
patterns.insert<ConvertTFDilatedConvOp<TF::Conv2DOp>,
ConvertTFDilatedConvOp<TF::DepthwiseConv2dNativeOp>>(
&getContext());
applyPatternsAndFoldGreedily(func, patterns);
applyPatternsAndFoldGreedily(func, std::move(patterns));
}
} // namespace

View File

@ -677,6 +677,7 @@ void LegalizeTF::runOnFunction() {
// Ophint python converter converted tf node pattern.
patterns.insert<LegalizeUnidirectionalSequenceLstm,
LegalizeUnidirectionalSequenceRnn>(context);
FrozenRewritePatternList frozenPatterns(std::move(patterns));
ConversionTarget target(*context);
// It is legal to have TF ops in the graph still which can be
@ -716,7 +717,7 @@ void LegalizeTF::runOnFunction() {
// Currently unit-test doesn't do multiple tries, so we need this.
const int max_iterations = 15;
for (int i = 0; i < max_iterations; ++i) {
if (failed(applyPartialConversion(func, target, patterns))) {
if (failed(applyPartialConversion(func, target, frozenPatterns))) {
return;
}
}

View File

@ -899,7 +899,7 @@ LogicalResult LowerStaticTensorListPass::RewriteFunction(
ConvertTensorListSetItem, ConvertTensorListStack,
ConvertTensorListResize, ConvertWhile, ConvertWhileRegion>(
context);
return applyPartialConversion(func, target, patterns);
return applyPartialConversion(func, target, std::move(patterns));
}
void LowerStaticTensorListPass::runOnOperation() {

View File

@ -35,13 +35,13 @@ limitations under the License.
#include "mlir/Dialect/StandardOps/IR/Ops.h" // from @llvm-project
#include "mlir/IR/Attributes.h" // from @llvm-project
#include "mlir/IR/Matchers.h" // from @llvm-project
#include "mlir/IR/PatternMatch.h" // from @llvm-project
#include "mlir/IR/StandardTypes.h" // from @llvm-project
#include "mlir/IR/TypeUtilities.h" // from @llvm-project
#include "mlir/IR/Value.h" // from @llvm-project
#include "mlir/Pass/Pass.h" // from @llvm-project
#include "mlir/Support/LLVM.h" // from @llvm-project
#include "mlir/Support/LogicalResult.h" // from @llvm-project
#include "mlir/Transforms/GreedyPatternRewriteDriver.h" // from @llvm-project
#include "tensorflow/compiler/mlir/lite/ir/tfl_ops.h"
#include "tensorflow/compiler/mlir/lite/quantization/quantization_utils.h"
#include "tensorflow/compiler/mlir/lite/transforms/passes.h"
@ -804,15 +804,21 @@ void Optimize::runOnFunction() {
FuseFullyConnectedAndReluX<TFL::Relu6Op, kRelu6>,
FuseFullyConnectedAndReluX<TFL::Relu1Op, kRelu1>,
FuseFullyConnectedAndMul>(ctx);
applyPatternsAndFoldGreedily(func, patterns);
applyPatternsAndFoldGreedily(func, std::move(patterns));
// Fuse the binary ops with the following ops.
patterns.insert<
FuseBinaryOpToFollowingConv2D, FuseBinaryOpToFollowingDepthwiseConv2D,
OwningRewritePatternList phase_2_patterns;
TFL::populateWithGenerated(ctx, phase_2_patterns);
phase_2_patterns.insert<
FuseFullyConnectedAndAdd, FuseFullyConnectedAndReluX<TFL::ReluOp, kRelu>,
FuseFullyConnectedAndReluX<TFL::Relu6Op, kRelu6>,
FuseFullyConnectedAndReluX<TFL::Relu1Op, kRelu1>,
FuseFullyConnectedAndMul, FuseBinaryOpToFollowingConv2D,
FuseBinaryOpToFollowingDepthwiseConv2D,
FuseBinaryOpToFollowingFullyConnected, FuseConv2DAndMulWithQDQs,
FuseDepthwiseConv2DAndMulWithQDQs, ConvertTrivialTransposeOpToReshapeOp>(
ctx);
applyPatternsAndFoldGreedily(func, patterns);
applyPatternsAndFoldGreedily(func, std::move(phase_2_patterns));
}
} // namespace

View File

@ -21,11 +21,11 @@ limitations under the License.
#include "mlir/IR/BlockAndValueMapping.h" // from @llvm-project
#include "mlir/IR/MLIRContext.h" // from @llvm-project
#include "mlir/IR/Module.h" // from @llvm-project
#include "mlir/IR/PatternMatch.h" // from @llvm-project
#include "mlir/IR/StandardTypes.h" // from @llvm-project
#include "mlir/IR/TypeUtilities.h" // from @llvm-project
#include "mlir/Pass/Pass.h" // from @llvm-project
#include "mlir/Support/LogicalResult.h" // from @llvm-project
#include "mlir/Transforms/GreedyPatternRewriteDriver.h" // from @llvm-project
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h"
namespace mlir {
@ -150,7 +150,7 @@ void OptimizeFunctionalOpsPass::runOnOperation() {
patterns.insert<FoldIfOp>(&getContext());
ModuleOp module = getOperation();
applyPatternsAndFoldGreedily(module, patterns);
applyPatternsAndFoldGreedily(module, std::move(patterns));
}
PassRegistration<OptimizeFunctionalOpsPass> pass(

View File

@ -19,6 +19,7 @@ limitations under the License.
#include "mlir/IR/MLIRContext.h" // from @llvm-project
#include "mlir/Pass/Pass.h" // from @llvm-project
#include "mlir/Support/LogicalResult.h" // from @llvm-project
#include "mlir/Transforms/GreedyPatternRewriteDriver.h" // from @llvm-project
#include "tensorflow/compiler/mlir/lite/ir/tfl_ops.h"
#include "tensorflow/compiler/mlir/lite/quantization/quantization_utils.h"
#include "tensorflow/compiler/mlir/lite/transforms/passes.h"
@ -146,14 +147,18 @@ void PostQuantizePass::runOnFunction() {
auto* ctx = func.getContext();
TFL::populateWithGenerated(ctx, patterns);
patterns.insert<quant::FoldTrivalRequantizeOp<QuantizeOp>>(ctx);
applyPatternsAndFoldGreedily(func, patterns);
applyPatternsAndFoldGreedily(func, std::move(patterns));
if (!emit_quant_adaptor_ops_) {
RemoveQuantizationAdaptorOps(getFunction());
}
patterns.insert<RemoveVolatileOps>(ctx);
applyPatternsAndFoldGreedily(func, patterns);
OwningRewritePatternList phase_2_patterns;
TFL::populateWithGenerated(ctx, phase_2_patterns);
phase_2_patterns
.insert<quant::FoldTrivalRequantizeOp<QuantizeOp>, RemoveVolatileOps>(
ctx);
applyPatternsAndFoldGreedily(func, std::move(phase_2_patterns));
}
} // namespace

View File

@ -25,9 +25,9 @@ limitations under the License.
#include "mlir/Dialect/Quant/QuantOps.h" // from @llvm-project
#include "mlir/IR/Function.h" // from @llvm-project
#include "mlir/IR/MLIRContext.h" // from @llvm-project
#include "mlir/IR/PatternMatch.h" // from @llvm-project
#include "mlir/IR/Value.h" // from @llvm-project
#include "mlir/Pass/Pass.h" // from @llvm-project
#include "mlir/Transforms/GreedyPatternRewriteDriver.h" // from @llvm-project
#include "tensorflow/compiler/mlir/lite/ir/tfl_ops.h"
#include "tensorflow/compiler/mlir/lite/quantization/lite/tfl_to_std.h"
#include "tensorflow/compiler/mlir/lite/quantization/quantization_config.h"
@ -337,7 +337,7 @@ void PrepareQuantizePass::runOnFunction() {
// Currently, only activation stats are imported, so narrow_range = false.
patterns.insert<PrepareQuantStats>(bit_width, false, false, ctx);
}
applyPatternsAndFoldGreedily(func, patterns);
applyPatternsAndFoldGreedily(func, std::move(patterns));
SanityCheckAndAdjustment(func);

View File

@ -46,12 +46,12 @@ limitations under the License.
#include "mlir/IR/Attributes.h" // from @llvm-project
#include "mlir/IR/Function.h" // from @llvm-project
#include "mlir/IR/MLIRContext.h" // from @llvm-project
#include "mlir/IR/PatternMatch.h" // from @llvm-project
#include "mlir/IR/StandardTypes.h" // from @llvm-project
#include "mlir/Pass/Pass.h" // from @llvm-project
#include "mlir/Support/LLVM.h" // from @llvm-project
#include "mlir/Support/LogicalResult.h" // from @llvm-project
#include "mlir/Transforms/DialectConversion.h" // from @llvm-project
#include "mlir/Transforms/GreedyPatternRewriteDriver.h" // from @llvm-project
#include "tensorflow/compiler/mlir/hlo/include/mlir-hlo/Dialect/mhlo/IR/hlo_ops.h"
#include "tensorflow/compiler/mlir/lite/ir/tfl_ops.h"
#include "tensorflow/compiler/mlir/lite/quantization/quantization_utils.h"
@ -1045,7 +1045,7 @@ LogicalResult ConvertTf2XlaOps(FuncOp func, MLIRContext *context) {
TF::PopulateLegalizeHloToTfPatterns(&patterns, context);
mhlo::GatherOp::getCanonicalizationPatterns(patterns, context);
return applyPartialConversion(func, target, patterns);
return applyPartialConversion(func, target, std::move(patterns));
}
// Convert rfft to rfft2d.
@ -1145,7 +1145,7 @@ struct ConvertRfftToRfft2d : public RewritePattern {
};
void PrepareTFPass::runOnFunction() {
OwningRewritePatternList patterns;
OwningRewritePatternList patterns, phase_2_patterns;
auto func = getFunction();
MLIRContext *ctx = &getContext();
@ -1183,20 +1183,20 @@ void PrepareTFPass::runOnFunction() {
// This will allow optimizing any TF_Mul->TF_Conv in the graph
// and any expanded from FusedBatchNorm. We need to do this
// before converting TF_Conv to TFL_Conv
applyPatternsAndFoldGreedily(func, patterns);
applyPatternsAndFoldGreedily(func, std::move(patterns));
// Load the generated pattern again, so new quantization pass-through
// will be applied.
patterns.clear();
TFL::populateWithGenerated(ctx, patterns);
TFL::populateWithGenerated(ctx, phase_2_patterns);
if (unfold_batch_matmul_) {
patterns.insert<TF::ConvertTFBatchMatMulOp<TF::BatchMatMulOp>,
TF::ConvertTFBatchMatMulOp<TF::BatchMatMulV2Op>>(ctx);
phase_2_patterns.insert<TF::ConvertTFBatchMatMulOp<TF::BatchMatMulOp>,
TF::ConvertTFBatchMatMulOp<TF::BatchMatMulV2Op>>(
ctx);
}
patterns.insert<TF::ConvertTFEinsumOp, ConvertTFBroadcastTo, ConvertTFConv2D,
ConvertTFDepthwiseConv2dNative, ConvertTFStridedSlice,
ConvertRfftToRfft2d>(ctx);
applyPatternsAndFoldGreedily(func, patterns);
phase_2_patterns.insert<TF::ConvertTFEinsumOp, ConvertTFBroadcastTo,
ConvertTFConv2D, ConvertTFDepthwiseConv2dNative,
ConvertTFStridedSlice, ConvertRfftToRfft2d>(ctx);
applyPatternsAndFoldGreedily(func, std::move(phase_2_patterns));
}
} // namespace

View File

@ -29,6 +29,7 @@ limitations under the License.
#include "mlir/IR/OperationSupport.h" // from @llvm-project
#include "mlir/IR/PatternMatch.h" // from @llvm-project
#include "mlir/Pass/Pass.h" // from @llvm-project
#include "mlir/Transforms/GreedyPatternRewriteDriver.h" // from @llvm-project
#include "tensorflow/compiler/mlir/lite/ir/tfl_ops.h"
#include "tensorflow/compiler/mlir/lite/quantization/quantization_utils.h"
#include "tensorflow/compiler/mlir/lite/transforms/passes.h"
@ -87,7 +88,7 @@ void QuantizePass::runOnFunction() {
TFL::populateWithGenerated(ctx, patterns);
patterns.insert<TFLFullQuantization>(
ctx, enable_numeric_verify, error_tolerance, enable_single_layer_verify);
applyPatternsAndFoldGreedily(func, patterns);
applyPatternsAndFoldGreedily(func, std::move(patterns));
}
} // namespace

View File

@ -740,6 +740,7 @@ cc_library(
"@llvm-project//mlir:Pass",
"@llvm-project//mlir:StandardOps",
"@llvm-project//mlir:Support",
"@llvm-project//mlir:TransformUtils",
],
)
@ -998,6 +999,7 @@ cc_library(
"@llvm-project//mlir:Pass",
"@llvm-project//mlir:StandardOps",
"@llvm-project//mlir:Support",
"@llvm-project//mlir:TransformUtils",
],
alwayslink = 1,
)

View File

@ -32,6 +32,7 @@ limitations under the License.
#include "mlir/Pass/Pass.h" // from @llvm-project
#include "mlir/Support/LLVM.h" // from @llvm-project
#include "mlir/Support/LogicalResult.h" // from @llvm-project
#include "mlir/Transforms/GreedyPatternRewriteDriver.h" // from @llvm-project
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h"
#include "tensorflow/core/util/matmul_bcast.h"
@ -93,7 +94,7 @@ void BatchMatMulToEinsumPass::runOnFunction() {
patterns.insert<ConvertTFBatchMatMulToEinsumOp<TF::BatchMatMulOp>,
ConvertTFBatchMatMulToEinsumOp<TF::BatchMatMulV2Op>>(
&getContext());
applyPatternsAndFoldGreedily(func, patterns);
applyPatternsAndFoldGreedily(func, std::move(patterns));
}
PassRegistration<BatchMatMulToEinsumPass> pass(

View File

@ -17,10 +17,10 @@ limitations under the License.
#include "llvm/ADT/SmallVector.h"
#include "mlir/IR/Attributes.h" // from @llvm-project
#include "mlir/IR/MLIRContext.h" // from @llvm-project
#include "mlir/IR/PatternMatch.h" // from @llvm-project
#include "mlir/IR/UseDefLists.h" // from @llvm-project
#include "mlir/Pass/Pass.h" // from @llvm-project
#include "mlir/Support/LogicalResult.h" // from @llvm-project
#include "mlir/Transforms/GreedyPatternRewriteDriver.h" // from @llvm-project
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h"
namespace mlir {
@ -144,7 +144,7 @@ void ContractionFusionPass::runOnFunction() {
OwningRewritePatternList patterns;
patterns.insert<FuseIntoMatMulOp>();
applyPatternsAndFoldGreedily(func, patterns);
applyPatternsAndFoldGreedily(func, std::move(patterns));
}
} // namespace

View File

@ -15,6 +15,7 @@ limitations under the License.
#include "mlir/IR/PatternMatch.h" // from @llvm-project
#include "mlir/Pass/Pass.h" // from @llvm-project
#include "mlir/Transforms/GreedyPatternRewriteDriver.h" // from @llvm-project
#include "tensorflow/compiler/mlir/tensorflow/transforms/decompose_resource_ops.h"
#include "tensorflow/compiler/mlir/tensorflow/transforms/passes.h"
@ -45,7 +46,7 @@ struct DecomposeResourceOps
OwningRewritePatternList patterns;
mlir::TF::PopulateDecomposeResourceOpsPatterns(&getContext(), &patterns);
applyPatternsAndFoldGreedily(getFunction(), patterns);
applyPatternsAndFoldGreedily(getFunction(), std::move(patterns));
}
};

View File

@ -39,6 +39,7 @@ limitations under the License.
#include "mlir/Pass/Pass.h" // from @llvm-project
#include "mlir/Support/LLVM.h" // from @llvm-project
#include "mlir/Support/LogicalResult.h" // from @llvm-project
#include "mlir/Transforms/GreedyPatternRewriteDriver.h" // from @llvm-project
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h"
#include "tensorflow/core/util/matmul_bcast.h"
@ -364,7 +365,7 @@ void TransformEinsumPass::runOnFunction() {
auto func = getFunction();
patterns.insert<ConvertTFEinsumOp>(&getContext());
applyPatternsAndFoldGreedily(func, patterns);
applyPatternsAndFoldGreedily(func, std::move(patterns));
}
static PassRegistration<TransformEinsumPass> pass(

View File

@ -22,10 +22,10 @@ limitations under the License.
#include "mlir/Dialect/Traits.h" // from @llvm-project
#include "mlir/IR/Function.h" // from @llvm-project
#include "mlir/IR/Operation.h" // from @llvm-project
#include "mlir/IR/PatternMatch.h" // from @llvm-project
#include "mlir/IR/StandardTypes.h" // from @llvm-project
#include "mlir/Pass/Pass.h" // from @llvm-project
#include "mlir/Support/LogicalResult.h" // from @llvm-project
#include "mlir/Transforms/GreedyPatternRewriteDriver.h" // from @llvm-project
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h"
namespace mlir {
@ -100,7 +100,7 @@ void BroadcastFoldPass::runOnFunction() {
auto func = getFunction();
patterns.insert<ConvertResultsBroadcastableShapeOp>();
applyPatternsAndFoldGreedily(func, patterns);
applyPatternsAndFoldGreedily(func, std::move(patterns));
}
} // namespace

View File

@ -24,6 +24,7 @@ limitations under the License.
#include "mlir/Pass/Pass.h" // from @llvm-project
#include "mlir/Support/LLVM.h" // from @llvm-project
#include "mlir/Support/LogicalResult.h" // from @llvm-project
#include "mlir/Transforms/GreedyPatternRewriteDriver.h" // from @llvm-project
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h"
namespace mlir {
@ -206,7 +207,7 @@ void FusedKernelMatcherPass::runOnFunction() {
auto func = getFunction();
patterns.insert<FuseConv2DBiasAdd, FuseMatMulBiasAdd>(&getContext());
applyPatternsAndFoldGreedily(func, patterns);
applyPatternsAndFoldGreedily(func, std::move(patterns));
}
} // namespace

View File

@ -21,6 +21,7 @@ limitations under the License.
#include "mlir/Pass/Pass.h" // from @llvm-project
#include "mlir/Pass/PassManager.h" // from @llvm-project
#include "mlir/Pass/PassRegistry.h" // from @llvm-project
#include "mlir/Transforms/GreedyPatternRewriteDriver.h" // from @llvm-project
#include "mlir/Transforms/Passes.h" // from @llvm-project
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h"
#include "tensorflow/compiler/mlir/tensorflow/transforms/passes.h"
@ -118,7 +119,7 @@ void GpuOpFusionPass::runOnFunction() {
FuncOp func = getFunction();
OwningRewritePatternList patterns;
patterns.insert<ReluToFusedBatchNorm>(&getContext());
applyPatternsAndFoldGreedily(func, patterns);
applyPatternsAndFoldGreedily(func, std::move(patterns));
}
} // namespace

View File

@ -22,6 +22,7 @@ limitations under the License.
#include "mlir/IR/PatternMatch.h" // from @llvm-project
#include "mlir/Pass/Pass.h" // from @llvm-project
#include "mlir/Support/FileUtilities.h" // from @llvm-project
#include "mlir/Transforms/GreedyPatternRewriteDriver.h" // from @llvm-project
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h"
namespace mlir {
@ -115,7 +116,7 @@ void InitTextFileToImportPass::runOnFunction() {
FuncOp func = getFunction();
patterns.insert<ConvertInitializeTableFromTextFileV2>(context);
applyPatternsAndFoldGreedily(func, patterns);
applyPatternsAndFoldGreedily(func, std::move(patterns));
}
} // namespace

View File

@ -808,7 +808,8 @@ void LegalizeHloToTf::runOnFunction() {
ConversionTarget target(context);
target.addLegalDialect<TensorFlowDialect>();
target.addLegalOp<CallOp, ConstantOp>();
if (failed(applyPartialConversion(getFunction(), target, patterns))) {
if (failed(
applyPartialConversion(getFunction(), target, std::move(patterns)))) {
getFunction().emitError("mhlo to TF legalization failed.");
signalPassFailure();
}

View File

@ -15,6 +15,7 @@ limitations under the License.
#include "mlir/IR/PatternMatch.h" // from @llvm-project
#include "mlir/Pass/Pass.h" // from @llvm-project
#include "mlir/Transforms/GreedyPatternRewriteDriver.h" // from @llvm-project
#include "tensorflow/compiler/mlir/tensorflow/transforms/lower_tf.h"
namespace mlir {
@ -29,7 +30,7 @@ struct LowerTF : public PassWrapper<LowerTF, FunctionPass> {
OwningRewritePatternList patterns;
mlir::TF::PopulateLoweringTFPatterns(&getContext(), &patterns);
applyPatternsAndFoldGreedily(getFunction(), patterns);
applyPatternsAndFoldGreedily(getFunction(), std::move(patterns));
}
};

View File

@ -21,6 +21,7 @@ limitations under the License.
#include "mlir/IR/PatternMatch.h" // from @llvm-project
#include "mlir/Pass/Pass.h" // from @llvm-project
#include "mlir/Pass/PassManager.h" // from @llvm-project
#include "mlir/Transforms/GreedyPatternRewriteDriver.h" // from @llvm-project
#include "mlir/Transforms/Passes.h" // from @llvm-project
#include "tensorflow/compiler/mlir/lite/utils/validators.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h"
@ -129,7 +130,7 @@ struct TFOptimizePass : public PassWrapper<TFOptimizePass, FunctionPass> {
auto func = getFunction();
populateWithGenerated(&getContext(), patterns);
patterns.insert<SimplifyBroadcastReshape>(&getContext());
applyPatternsAndFoldGreedily(func, patterns);
applyPatternsAndFoldGreedily(func, std::move(patterns));
}
};

View File

@ -59,7 +59,8 @@ class TensorDeviceCopyConversionPass
patterns.insert<PassThroughConversion<TF::IdentityOp>,
PassThroughConversion<TF::IdentityNOp>>(&getContext());
if (failed(applyPartialConversion(getFunction(), target, patterns))) {
if (failed(applyPartialConversion(getFunction(), target,
std::move(patterns)))) {
signalPassFailure();
}
}

View File

@ -15,6 +15,7 @@ limitations under the License.
#include "mlir/IR/PatternMatch.h" // from @llvm-project
#include "mlir/Pass/Pass.h" // from @llvm-project
#include "mlir/Transforms/GreedyPatternRewriteDriver.h" // from @llvm-project
#include "tensorflow/compiler/mlir/tensorflow/transforms/tf_data_optimization.h"
namespace mlir {
@ -28,7 +29,7 @@ struct TFDataOptimization
OwningRewritePatternList patterns;
mlir::TF::PopulateTFDataOptimizationPatterns(&getContext(), &patterns);
applyPatternsAndFoldGreedily(getFunction(), patterns);
applyPatternsAndFoldGreedily(getFunction(), std::move(patterns));
}
};

View File

@ -33,6 +33,7 @@ limitations under the License.
#include "mlir/Pass/Pass.h" // from @llvm-project
#include "mlir/Support/LLVM.h" // from @llvm-project
#include "mlir/Support/LogicalResult.h" // from @llvm-project
#include "mlir/Transforms/GreedyPatternRewriteDriver.h" // from @llvm-project
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h"
#include "tensorflow/core/util/matmul_bcast.h"
@ -54,7 +55,7 @@ void UnrollBatchMatMulPass::runOnFunction() {
patterns.insert<ConvertTFBatchMatMulOp<TF::BatchMatMulOp>,
ConvertTFBatchMatMulOp<TF::BatchMatMulV2Op>>(&getContext());
applyPatternsAndFoldGreedily(func, patterns);
applyPatternsAndFoldGreedily(func, std::move(patterns));
}
} // namespace

View File

@ -111,6 +111,7 @@ cc_library(
"@llvm-project//mlir:Pass",
"@llvm-project//mlir:StandardOps",
"@llvm-project//mlir:Support",
"@llvm-project//mlir:TransformUtils",
],
alwayslink = 1,
)

View File

@ -25,6 +25,7 @@ limitations under the License.
#include "mlir/IR/StandardTypes.h" // from @llvm-project
#include "mlir/Pass/Pass.h" // from @llvm-project
#include "mlir/Support/LLVM.h" // from @llvm-project
#include "mlir/Transforms/GreedyPatternRewriteDriver.h" // from @llvm-project
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h"
#include "tensorflow/compiler/mlir/tfjs/ir/tfjs_ops.h"
@ -51,7 +52,7 @@ void Optimize::runOnFunction() {
auto func = getFunction();
populateWithGenerated(ctx, patterns);
applyPatternsAndFoldGreedily(func, patterns);
applyPatternsAndFoldGreedily(func, std::move(patterns));
}
} // namespace

View File

@ -36,7 +36,6 @@ limitations under the License.
#include "mlir/IR/Builders.h" // from @llvm-project
#include "mlir/IR/MLIRContext.h" // from @llvm-project
#include "mlir/IR/Module.h" // from @llvm-project
#include "mlir/IR/PatternMatch.h" // from @llvm-project
#include "mlir/IR/StandardTypes.h" // from @llvm-project
#include "mlir/IR/SymbolTable.h" // from @llvm-project
#include "mlir/IR/Value.h" // from @llvm-project
@ -45,6 +44,7 @@ limitations under the License.
#include "mlir/Support/LLVM.h" // from @llvm-project
#include "mlir/Support/LogicalResult.h" // from @llvm-project
#include "mlir/Transforms/DialectConversion.h" // from @llvm-project
#include "mlir/Transforms/GreedyPatternRewriteDriver.h" // from @llvm-project
#include "mlir/Transforms/InliningUtils.h" // from @llvm-project
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h"
#include "tensorflow/compiler/mlir/tfr/ir/tfr_ops.h"
@ -107,7 +107,7 @@ void DecomposeTFOpsPass::ApplyCanonicalization() {
}
populateSCFOpsCanonicalizationPatterns(patterns, context);
applyPatternsAndFoldGreedily(getFunction(), patterns);
applyPatternsAndFoldGreedily(getFunction(), std::move(patterns));
}
LogicalResult DecomposeTFOpsPass::RewriteUnregisteredTFOps() {

View File

@ -40,7 +40,6 @@ limitations under the License.
#include "mlir/IR/Matchers.h" // from @llvm-project
#include "mlir/IR/Module.h" // from @llvm-project
#include "mlir/IR/OperationSupport.h" // from @llvm-project
#include "mlir/IR/PatternMatch.h" // from @llvm-project
#include "mlir/IR/StandardTypes.h" // from @llvm-project
#include "mlir/IR/SymbolTable.h" // from @llvm-project
#include "mlir/IR/Types.h" // from @llvm-project
@ -50,6 +49,7 @@ limitations under the License.
#include "mlir/Support/LLVM.h" // from @llvm-project
#include "mlir/Support/LogicalResult.h" // from @llvm-project
#include "mlir/Transforms/DialectConversion.h" // from @llvm-project
#include "mlir/Transforms/GreedyPatternRewriteDriver.h" // from @llvm-project
#include "mlir/Transforms/InliningUtils.h" // from @llvm-project
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h"
#include "tensorflow/compiler/mlir/tfr/ir/tfr_ops.h"
@ -455,7 +455,7 @@ void RaiseToTFOpsPass::runOnFunction() {
op->getCanonicalizationPatterns(patterns, ctx);
}
applyPatternsAndFoldGreedily(func, patterns);
applyPatternsAndFoldGreedily(func, std::move(patterns));
}
} // namespace

View File

@ -44,6 +44,7 @@ limitations under the License.
#include "mlir/Pass/PassManager.h" // from @llvm-project
#include "mlir/Transforms/Bufferize.h" // from @llvm-project
#include "mlir/Transforms/DialectConversion.h" // from @llvm-project
#include "mlir/Transforms/GreedyPatternRewriteDriver.h" // from @llvm-project
#include "mlir/Transforms/Passes.h" // from @llvm-project
#include "tensorflow/compiler/mlir/hlo/include/mlir-hlo/Dialect/mhlo/transforms/passes.h"
#include "tensorflow/compiler/mlir/tensorflow/dialect_registration.h"

View File

@ -110,7 +110,7 @@ struct BufferizePass : public BufferizePassBase<BufferizePass> {
patterns.insert<UnrankedTensorStoreTestOnlyPattern>(&context);
auto module = getOperation();
if (failed(applyPartialConversion(module, target, patterns))) {
if (failed(applyPartialConversion(module, target, std::move(patterns)))) {
signalPassFailure();
}
}

View File

@ -64,7 +64,7 @@ class EmbedTFFrameworkPass
return !op->getParentOfType<FuncOp>().getAttrOfType<UnitAttr>(kTFEntry);
});
if (failed(applyPartialConversion(m, target, patterns))) {
if (failed(applyPartialConversion(m, target, std::move(patterns)))) {
signalPassFailure();
}
}

View File

@ -44,7 +44,7 @@ struct MaterializeBroadcastsPass
&conversionPatterns);
if (failed(applyPartialConversion(getFunction(), conversionTarget,
conversionPatterns))) {
std::move(conversionPatterns)))) {
return signalPassFailure();
}
}

View File

@ -36,7 +36,8 @@ struct ParallelLoopsToSequentialPass
target.addIllegalOp<mlir::scf::ParallelOp>();
target.addLegalOp<mlir::scf::ForOp, mlir::scf::IfOp>();
target.markUnknownOpDynamicallyLegal([](mlir::Operation*) { return true; });
if (failed(applyPartialConversion(getOperation(), target, patterns)))
if (failed(applyPartialConversion(getOperation(), target,
std::move(patterns))))
signalPassFailure();
}
};

View File

@ -61,7 +61,7 @@ struct ShapeToDescriptorsPass
// Apply conversion.
auto module = getOperation();
if (failed(applyPartialConversion(module, target, patterns)))
if (failed(applyPartialConversion(module, target, std::move(patterns))))
signalPassFailure();
}
};

View File

@ -62,7 +62,7 @@ class TFKernelToLLVMPass : public TFKernelToLLVMPassBase<TFKernelToLLVMPass> {
.addIllegalDialect<gpu::GPUDialect, tf_framework::TFFrameworkDialect>();
target.addIllegalOp<LLVM::DialectCastOp>();
if (failed(applyPartialConversion(m, target, patterns))) {
if (failed(applyPartialConversion(m, target, std::move(patterns)))) {
signalPassFailure();
}
}

View File

@ -13,6 +13,7 @@ See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "mlir/Transforms/GreedyPatternRewriteDriver.h" // from @llvm-project
#include "tensorflow/compiler/mlir/hlo/include/mlir-hlo/Dialect/mhlo/transforms/rewriters.h"
#include "tensorflow/compiler/mlir/tools/kernel_gen/transforms/passes.h"
@ -29,7 +30,7 @@ struct UnfuseBatchNormPass
void runOnFunction() override {
mlir::OwningRewritePatternList patterns;
mlir::mhlo::PopulateUnfuseBatchNormPatterns(&getContext(), &patterns);
mlir::applyPatternsAndFoldGreedily(getOperation(), patterns);
mlir::applyPatternsAndFoldGreedily(getOperation(), std::move(patterns));
}
};

View File

@ -124,6 +124,7 @@ cc_library(
"@llvm-project//mlir:Pass",
"@llvm-project//mlir:StandardOps",
"@llvm-project//mlir:Support",
"@llvm-project//mlir:TransformUtils",
],
alwayslink = 1,
)

View File

@ -6125,8 +6125,8 @@ LogicalResult legalizeTF(
// Fully qualify ReturnOp here as mhlo dialect also defines a ReturnOp.
target.addLegalOp<ModuleOp, FuncOp, ModuleTerminatorOp, ::mlir::ReturnOp>();
DenseSet<Operation *> nonlegalized_ops;
LogicalResult result =
applyPartialConversion(op, target, patterns, &nonlegalized_ops);
LogicalResult result = applyPartialConversion(
op, target, std::move(patterns), &nonlegalized_ops);
// In order to enforce that the conversion result is fully converted,
// fail if there are any nonlegalized ops in the set.
if (failed(result) || !nonlegalized_ops.empty()) {
@ -6136,7 +6136,7 @@ LogicalResult legalizeTF(
return result;
}
return applyPartialConversion(op, target, patterns);
return applyPartialConversion(op, target, std::move(patterns));
}
void PopulateLegalizeTfPatterns(MLIRContext *context,

View File

@ -38,6 +38,7 @@ limitations under the License.
#include "mlir/IR/Value.h" // from @llvm-project
#include "mlir/Pass/Pass.h" // from @llvm-project
#include "mlir/Support/LogicalResult.h" // from @llvm-project
#include "mlir/Transforms/GreedyPatternRewriteDriver.h" // from @llvm-project
#include "tensorflow/compiler/mlir/op_or_arg_name_mapper.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops_a_m.h"
@ -573,7 +574,8 @@ class LegalizeTF : public PassWrapper<LegalizeTF, FunctionPass> {
void runOnFunction() override {
OwningRewritePatternList patterns;
patterns.insert<Tf2XlaRewritePattern>(device_type_);
if (failed(applyPatternsAndFoldGreedily(getFunction(), patterns)))
if (failed(
applyPatternsAndFoldGreedily(getFunction(), std::move(patterns))))
signalPassFailure();
}

View File

@ -31,11 +31,11 @@ limitations under the License.
#include "mlir/Dialect/Linalg/Passes.h" // from @llvm-project
#include "mlir/Dialect/SCF/Passes.h" // from @llvm-project
#include "mlir/Dialect/SCF/Transforms.h" // from @llvm-project
#include "mlir/IR/Builders.h" // from @llvm-project
#include "mlir/IR/Dialect.h" // from @llvm-project
#include "mlir/Pass/Pass.h" // from @llvm-project
#include "mlir/Pass/PassManager.h" // from @llvm-project
#include "mlir/Transforms/Bufferize.h" // from @llvm-project
#include "mlir/Transforms/GreedyPatternRewriteDriver.h" // from @llvm-project
#include "mlir/Transforms/Passes.h" // from @llvm-project
#include "tensorflow/compiler/mlir/hlo/include/mlir-hlo/Dialect/mhlo/IR/lhlo_ops.h"
#include "tensorflow/compiler/mlir/hlo/include/mlir-hlo/Dialect/mhlo/transforms/passes.h"
@ -170,7 +170,7 @@ class LowerToNVVMPass
// TODO(csigg): Remove once we support replacing non-root ops.
target.addLegalOp<::mlir::gpu::GPUModuleOp, ::mlir::gpu::ModuleEndOp,
::mlir::gpu::YieldOp>();
if (failed(mlir::applyFullConversion(m, target, patterns))) {
if (failed(mlir::applyFullConversion(m, target, std::move(patterns)))) {
signalPassFailure();
}
}
@ -214,11 +214,13 @@ class LowerToROCDLPass
void runOnOperation() override {
::mlir::gpu::GPUModuleOp m = getOperation();
::mlir::OwningRewritePatternList patterns;
::mlir::populateGpuRewritePatterns(m.getContext(), patterns);
::mlir::applyPatternsAndFoldGreedily(m, patterns);
patterns.clear();
{
::mlir::OwningRewritePatternList patterns;
::mlir::populateGpuRewritePatterns(m.getContext(), patterns);
::mlir::applyPatternsAndFoldGreedily(m, std::move(patterns));
}
::mlir::OwningRewritePatternList patterns;
::mlir::LLVMTypeConverter converter(m.getContext());
::mlir::populateStdToLLVMConversionPatterns(converter, patterns);
// TODO(b/145824979) Remove linalg once sliceop is in std.
@ -239,7 +241,7 @@ class LowerToROCDLPass
// TODO(csigg): Remove once we support replacing non-root ops.
target.addLegalOp<::mlir::gpu::GPUModuleOp, ::mlir::gpu::ModuleEndOp,
::mlir::gpu::YieldOp>();
if (failed(mlir::applyFullConversion(m, target, patterns))) {
if (failed(mlir::applyFullConversion(m, target, std::move(patterns)))) {
signalPassFailure();
}
}

View File

@ -680,8 +680,8 @@ def tf_repositories(path_prefix = "", tf_repo_name = ""):
)
# Check out LLVM and MLIR from llvm-project.
LLVM_COMMIT = "26750a1264b3df114a1efae7cde6f0784206b2ce"
LLVM_SHA256 = "eb360bbcd3e4b505689a21756c89fdf087064882899bab2f98258f8cf0546218"
LLVM_COMMIT = "0fc1aa22ee6ac337a5d51fa5666c9cd61da61b07"
LLVM_SHA256 = "e0682d8c3c29b99db0037913bae706bdfc6e2439d75132087d628e54d894f1e1"
LLVM_URLS = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
"https://github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),

View File

@ -621,6 +621,7 @@ cc_library(
":Affine",
":IR",
":Support",
":TransformUtils",
"@llvm-project//llvm:Support",
],
)
@ -701,6 +702,7 @@ cc_library(
":LinalgToSPIRV",
":LinalgToStandard",
":OpenMPToLLVM",
":PDLToPDLInterp",
":SCFToGPUPass",
":SCFToStandard",
":SPIRVToLLVM",
@ -1694,6 +1696,30 @@ cc_library(
],
)
cc_library(
name = "PDLToPDLInterp",
srcs = glob([
"lib/Conversion/PDLToPDLInterp/*.cpp",
"lib/Conversion/PDLToPDLInterp/*.h",
]) + [
"lib/Conversion/PassDetail.h",
],
hdrs = [
"include/mlir/Conversion/PDLToPDLInterp/PDLToPDLInterp.h",
],
includes = ["include"],
deps = [
":ConversionPassIncGen",
":IR",
":InferTypeOpInterface",
":PDLDialect",
":PDLInterpDialect",
":Pass",
":Support",
"@llvm-project//llvm:Support",
],
)
cc_library(
name = "SPIRVToLLVM",
srcs = glob([
@ -2348,6 +2374,22 @@ cc_library(
],
)
cc_library(
name = "Rewrite",
srcs = glob([
"lib/Rewrite/*.cpp",
"lib/Rewrite/*.h",
]),
hdrs = glob(["include/mlir/Rewrite/*.h"]),
includes = ["include"],
deps = [
":IR",
":Pass",
":Support",
"@llvm-project//llvm:Support",
],
)
cc_library(
name = "TransformUtils",
srcs = glob([
@ -2364,6 +2406,7 @@ cc_library(
":ControlFlowInterfaces",
":IR",
":Pass",
":Rewrite",
":SCFDialect",
":SideEffectInterfaces",
":StandardOps",
@ -2523,6 +2566,7 @@ cc_library(
":LinalgOps",
":LoopLikeInterface",
":Pass",
":Rewrite",
":SCFDialect",
":SideEffectInterfaces",
":StandardOps",
@ -3153,6 +3197,7 @@ cc_library(
":OpenMPToLLVM",
":PDLDialect",
":PDLInterpDialect",
":PDLToPDLInterp",
":QuantOps",
":QuantPassIncGen",
":ROCDLDialect",