Updates LLVM usage to match
[33945cdd62c4](https://github.com/llvm/llvm-project/commit/33945cdd62c4)

PiperOrigin-RevId: 340556042
Change-Id: Id99605800c4c095597b63b822e307edf3da3aa5b
This commit is contained in:
A. Unique TensorFlower 2020-11-03 17:25:17 -08:00 committed by TensorFlower Gardener
parent 1e228f57d2
commit b37cacd6ea
18 changed files with 130 additions and 73 deletions

View File

@ -289,7 +289,8 @@ Status ConvertMLIRToTFLiteFlatBuffer(
absl::StrCat(toco_flags.dump_graphviz_dir(), "/toco_AT_IMPORT.dot")));
}
mlir::PassManager pm(module->getContext());
mlir::PassManager pm(module->getContext(),
mlir::OpPassManager::Nesting::Implicit);
tensorflow::AddTFToTFLConversionPasses(pass_config, &pm, session);
// Convert back to outlined while format for export back to flatbuffer.

View File

@ -75,7 +75,7 @@ TfLiteStatus QuantizeModel(
}
// Apply quantization passes
PassManager pm(module->getContext());
PassManager pm(module->getContext(), OpPassManager::Nesting::Implicit);
TFL::QuantizationSpecs quant_specs;
quant_specs.inference_type = tflite::TflTypeToTfType(inference_type);
quant_specs.post_training_quantization = true;

View File

@ -57,7 +57,7 @@ TfLiteStatus SparsifyModel(const tflite::ModelT& input_model,
return kTfLiteError;
}
PassManager pm(module->getContext());
PassManager pm(module->getContext(), OpPassManager::Nesting::Implicit);
pm.addPass(TFL::CreateDenseToSparsePass());
if (failed(pm.run(module.get()))) {

View File

@ -45,18 +45,20 @@ const char kTFLiteDataLayout[] = "NHWC";
void AddQuantizationPasses(const mlir::TFL::QuantizationSpecs& quant_specs,
mlir::OpPassManager* pass_manager) {
pass_manager->addPass(mlir::TFL::CreatePrepareQuantizePass(quant_specs));
pass_manager->addNestedPass<mlir::FuncOp>(
mlir::TFL::CreatePrepareQuantizePass(quant_specs));
if (quant_specs.default_ranges.first.hasValue() ||
quant_specs.default_ranges.second.hasValue()) {
pass_manager->addPass(mlir::TFL::CreateDefaultQuantParamsPass(
quant_specs.default_ranges.first.getValueOr(0.0),
quant_specs.default_ranges.second.getValueOr(0.0),
quant_specs.IsSignedInferenceType()));
pass_manager->addNestedPass<mlir::FuncOp>(
mlir::TFL::CreateDefaultQuantParamsPass(
quant_specs.default_ranges.first.getValueOr(0.0),
quant_specs.default_ranges.second.getValueOr(0.0),
quant_specs.IsSignedInferenceType()));
}
pass_manager->addPass(mlir::TFL::CreateQuantizePass());
pass_manager->addNestedPass<mlir::FuncOp>(mlir::TFL::CreateQuantizePass());
bool emit_quant_adaptor_ops =
quant_specs.inference_type != quant_specs.inference_input_type;
pass_manager->addPass(
pass_manager->addNestedPass<mlir::FuncOp>(
mlir::TFL::CreatePostQuantizePass(emit_quant_adaptor_ops));
}
@ -67,7 +69,8 @@ void AddTFToTFLConversionPasses(const mlir::TFL::PassConfig& pass_config,
standard_pipeline_options.enable_inliner = false;
standard_pipeline_options.form_clusters = pass_config.form_clusters;
mlir::TF::CreateTFStandardPipeline(*pass_manager, standard_pipeline_options);
pass_manager->addPass(mlir::TF::CreateDeviceIndexSelectorPass());
pass_manager->addNestedPass<mlir::FuncOp>(
mlir::TF::CreateDeviceIndexSelectorPass());
// Add canonicalize pass to remove no-op session initializer pass.
pass_manager->addPass(mlir::createCanonicalizerPass());
@ -155,7 +158,8 @@ void AddTFToTFLConversionPasses(const mlir::TFL::PassConfig& pass_config,
pass_manager->addPass(mlir::createInlinerPass());
// TODO(jpienaar): Revise post dialect constants.
pass_manager->addPass(mlir::TF::CreateDecodeConstantPass());
pass_manager->addNestedPass<mlir::FuncOp>(
mlir::TF::CreateDecodeConstantPass());
// Canonicalization includes const folding, which is utilized here to optimize
// away ops that can't get constant folded after PrepareTF pass. For example,
// tf.Conv2D is split into tf.Transpose and tfl.Conv2D.
@ -178,11 +182,11 @@ void AddTFToTFLConversionPasses(const mlir::TFL::PassConfig& pass_config,
// to match 'kTFLiteDataLayout'
mlir::TF::LayoutOptimizationPipelineOptions layout_optimization_options;
layout_optimization_options.force_data_format = kTFLiteDataLayout;
mlir::TF::CreateLayoutOptimizationPipeline(*pass_manager,
layout_optimization_options);
mlir::TF::CreateLayoutOptimizationPipeline(
pass_manager->nest<mlir::FuncOp>(), layout_optimization_options);
// Prepare for TFLite dialect, rerun canonicalization, and then legalize to
// the TFLite dialect.
pass_manager->addPass(
pass_manager->addNestedPass<mlir::FuncOp>(
mlir::TFL::CreatePrepareTFPass(pass_config.unfold_batch_matmul));
pass_manager->addNestedPass<mlir::FuncOp>(mlir::createCanonicalizerPass());
if (pass_config.shape_inference) {
@ -198,16 +202,18 @@ void AddTFToTFLConversionPasses(const mlir::TFL::PassConfig& pass_config,
pass_manager->addPass(mlir::createInlinerPass());
// This pass removes the asset file dependencies in hash table use cases.
pass_manager->addPass(mlir::TF::CreateInitTextFileToImportPass());
pass_manager->addNestedPass<mlir::FuncOp>(
mlir::TF::CreateInitTextFileToImportPass());
pass_manager->addPass(
pass_manager->addNestedPass<mlir::FuncOp>(
mlir::TFL::CreateLegalizeTFPass(pass_config.runtime_verification));
pass_manager->addPass(mlir::TFL::CreateOptimizePass());
pass_manager->addNestedPass<mlir::FuncOp>(mlir::TFL::CreateOptimizePass());
// This pass operates on TensorFlow ops but is triggered after legalization
// so that it can target constants introduced once TensorFlow Identity ops
// are removed during legalization.
pass_manager->addPass(mlir::TFL::CreateOptimizeFunctionalOpsPass());
pass_manager->addPass(mlir::TFL::CreateRaiseCustomOpsPass());
pass_manager->addNestedPass<mlir::FuncOp>(
mlir::TFL::CreateRaiseCustomOpsPass());
pass_manager->addPass(mlir::createSymbolDCEPass());
pass_manager->addNestedPass<mlir::FuncOp>(mlir::createCanonicalizerPass());
pass_manager->addNestedPass<mlir::FuncOp>(mlir::createCSEPass());
@ -225,7 +231,8 @@ void AddTFToTFLConversionPasses(const mlir::TFL::PassConfig& pass_config,
// it's not desired by TFL. This pass serves as a "fix" pass to split the
// merged inputs until we have 1st class variable support or reuse
// tf.variable to model this.
pass_manager->addPass(mlir::TFL::CreateSplitMergedOperandsPass());
pass_manager->addNestedPass<mlir::FuncOp>(
mlir::TFL::CreateSplitMergedOperandsPass());
}
}
@ -295,7 +302,7 @@ void CreateTFLStandardPipeline(OpPassManager& pm,
pm.addPass(mlir::TFL::CreateWhileOutlinePass());
pm.addPass(mlir::TFL::CreateRuntimeVerifyPass());
pm.addNestedPass<mlir::FuncOp>(mlir::TFL::CreateRuntimeVerifyPass());
}
// Registers a pass pipeline for the standard TFL passes.

View File

@ -29,6 +29,7 @@ limitations under the License.
#include "mlir/IR/MLIRContext.h" // from @llvm-project
#include "mlir/IR/Module.h" // from @llvm-project
#include "mlir/Pass/Pass.h" // from @llvm-project
#include "mlir/Pass/PassManager.h" // from @llvm-project
#include "mlir/Support/FileUtilities.h" // from @llvm-project
#include "tensorflow/cc/saved_model/loader.h"
#include "tensorflow/compiler/mlir/init_mlir.h"
@ -187,7 +188,7 @@ int main(int argc, char **argv) {
// message. So we can just return here.
if (!module.ok()) return kTrFailure;
mlir::PassManager pm(&context);
mlir::PassManager pm(&context, mlir::OpPassManager::Nesting::Implicit);
mlir::applyPassManagerCLOptions(pm);
// Set the quantization specifications from the command line flags.

View File

@ -121,7 +121,7 @@ void CreateTPUBridgePipeline(OpPassManager &pm) {
pm.addPass(CreateTPURewritePass());
pm.addPass(createSymbolDCEPass());
pm.addNestedPass<FuncOp>(TFDevice::CreateReplicateInvariantOpHoistingPass());
pm.addNestedPass<FuncOp>(CreateTPUDynamicLayoutPass());
pm.addPass(CreateTPUDynamicLayoutPass());
pm.addNestedPass<FuncOp>(CreateTPUMergeVariablesWithExecutePass());
pm.addNestedPass<FuncOp>(CreateTPUColocateCompositeResourceOps());
pm.addPass(CreateTPUVariableReformattingPass());

View File

@ -50,7 +50,8 @@ Status MlirGraphOptimizationPass::Run(const ConfigProto& config_proto,
// Assign optimal data layout to layout sensitive operations and delete
// redundant transposes from the IR.
LayoutOptimizationPipelineOptions layout_optimization_options;
CreateLayoutOptimizationPipeline(pm, layout_optimization_options);
CreateLayoutOptimizationPipeline(pm.nest<FuncOp>(),
layout_optimization_options);
// Prepare IR for exporting.
pm.addPass(CreateBreakUpIslandsPass());

View File

@ -85,7 +85,7 @@ void InitTextFileToImportTestPass::runOnOperation() {
// Run the lowering pass.
PassManager pm(context);
pm.addPass(CreateInitTextFileToImportPass());
pm.addNestedPass<FuncOp>(CreateInitTextFileToImportPass());
if (failed(pm.run(module))) return signalPassFailure();
}

View File

@ -3569,17 +3569,20 @@ Status SavedModelSignatureDefImporter::LiftVariables() {
mlir::PassManager pm(module_->getContext());
SetCrashReproducer(pm);
pm.addPass(mlir::tf_executor::CreateTFExecutorGraphPruningPass());
pm.addPass(mlir::CreateExecutorDialectToFunctionalConversionPass());
pm.addNestedPass<mlir::FuncOp>(
mlir::tf_executor::CreateTFExecutorGraphPruningPass());
pm.addNestedPass<mlir::FuncOp>(
mlir::CreateExecutorDialectToFunctionalConversionPass());
pm.addPass(
mlir::tf_saved_model::CreateRemoveVariablesInSessionInitializerPass());
pm.addPass(
pm.addNestedPass<mlir::FuncOp>(
mlir::TF::
CreateConvertReadonlyReferenceVariablesToResourceVariablesPass());
pm.addPass(mlir::TF::CreatePromoteVarHandlesToArgsPass());
pm.addPass(
mlir::tf_saved_model::CreateLiftVariablesPass(bundle_.GetSession()));
pm.addPass(mlir::tf_saved_model::CreateDedupBoundInputBindingPass());
pm.addNestedPass<mlir::FuncOp>(
mlir::tf_saved_model::CreateDedupBoundInputBindingPass());
if (mlir::failed(pm.run(*module_)))
return diag_handler.Combine(errors::Internal("Failed to lift variables."));

View File

@ -279,7 +279,8 @@ void CreateConvertMlirToXlaHloPipeline(
pm.addPass(mlir::TF::CreateTensorListOpsDecompositionPass());
pm.addPass(mlir::TF::CreateStackOpsDecompositionPass());
pm.addPass(mlir::TF::CreateTensorArrayOpsDecompositionPass());
pm.addPass(mlir::TFDevice::CreateDecomposeResourceOpsPass());
pm.addNestedPass<mlir::FuncOp>(
mlir::TFDevice::CreateDecomposeResourceOpsPass());
pm.addPass(mlir::TF::CreatePromoteResourcesToArgsPass());
pm.addPass(mlir::createSymbolDCEPass());
// Guarantee all functions have one use, which enables shape inference.

View File

@ -36,7 +36,7 @@ void AddTFToTFJSConversionPasses(mlir::OpPassManager* pm) {
pm->addPass(mlir::tf_saved_model::CreateFreezeGlobalTensorsPass());
// TFJS dialect passes.
pm->addPass(mlir::tfjs::CreateOptimizePass());
pm->addNestedPass<mlir::FuncOp>(mlir::tfjs::CreateOptimizePass());
// Canonicalize, CSE etc.
pm->addNestedPass<mlir::FuncOp>(mlir::createCanonicalizerPass());

View File

@ -75,7 +75,7 @@ Status LowerTFtoGPU(mlir::ModuleOp module, bool gpu_binary_only,
applyTensorflowAndCLOptions(pm);
if (gpu_binary_only) {
pm.addPass(mlir::mhlo::createLegalizeTFPass(
pm.addNestedPass<mlir::FuncOp>(mlir::mhlo::createLegalizeTFPass(
/*allow_partial_conversion=*/false, /*legalize_chlo=*/true));
pm.addNestedPass<mlir::FuncOp>(
mlir::kernel_gen::transforms::CreateMaterializeBroadcastsPass());
@ -83,23 +83,24 @@ Status LowerTFtoGPU(mlir::ModuleOp module, bool gpu_binary_only,
mlir::kernel_gen::transforms::CreateUnfuseBatchNormPass());
pm.addPass(mlir::mhlo::createLegalizeToLhloPass());
// Moving `AllocOp`s and inserting missing `DeallocOp`s
pm.addPass(::mlir::createBufferHoistingPass());
pm.addPass(::mlir::createBufferDeallocationPass());
pm.addNestedPass<mlir::FuncOp>(::mlir::createBufferHoistingPass());
pm.addNestedPass<mlir::FuncOp>(::mlir::createBufferDeallocationPass());
pm.addNestedPass<mlir::FuncOp>(mlir::createCopyRemovalPass());
pm.addPass(mlir::createCanonicalizerPass());
pm.addPass(mlir::kernel_gen::transforms::CreateShapeToDescriptorsPass());
} else {
pm.addPass(mlir::mhlo::createLegalizeTFPass(
pm.addNestedPass<mlir::FuncOp>(mlir::mhlo::createLegalizeTFPass(
/*allow_partial_conversion=*/false, /*legalize_chlo=*/false));
pm.addPass(mlir::createTransformUnrankedHloPass());
pm.addPass(mlir::mhlo::createChloLegalizeToHloPass());
pm.addPass(mlir::createCanonicalizerPass());
pm.addNestedPass<mlir::FuncOp>(mlir::createTransformUnrankedHloPass());
pm.addNestedPass<mlir::FuncOp>(mlir::mhlo::createChloLegalizeToHloPass());
pm.addNestedPass<mlir::FuncOp>(mlir::createCanonicalizerPass());
pm.addPass(mlir::kernel_gen::transforms::CreateShapeToDescriptorsPass());
// Clean up the IR created above. In particular, operations on descriptors
// are simplified here.
pm.addPass(mlir::createCanonicalizerPass());
pm.addPass(mlir::kernel_gen::transforms::CreateBufferizePass());
pm.addPass(mlir::kernel_gen::transforms::CreateParallelLoopsToSequential());
pm.addNestedPass<mlir::FuncOp>(
mlir::kernel_gen::transforms::CreateParallelLoopsToSequential());
}
// Clean up the IR for further processing.
@ -119,36 +120,41 @@ Status LowerTFtoGPU(mlir::ModuleOp module, bool gpu_binary_only,
tiling_for_unrolling.append(tile_sizes.begin(), tile_sizes.end());
}
// Transform LHLO operations to LinAlg.
pm.addPass(::mlir::lmhlo::createLegalizeLhloToLinalgPass());
pm.addNestedPass<mlir::FuncOp>(
::mlir::lmhlo::createLegalizeLhloToLinalgPass());
// Fuse linalg operations.
pm.addPass(::mlir::lmhlo::createLhloFuseLinalgPass(
pm.addNestedPass<mlir::FuncOp>(::mlir::lmhlo::createLhloFuseLinalgPass(
/*use_parallel_loops=*/true, tiling_for_unrolling));
// Transform the Linalg operations inside of the loop nest into parallel
// loops.
pm.addPass(::mlir::createConvertLinalgToParallelLoopsPass());
pm.addNestedPass<mlir::FuncOp>(
::mlir::createConvertLinalgToParallelLoopsPass());
// Canonicalize the code to simplify index computations. This is needed so
// that loop bounds have the same value.
pm.addNestedPass<::mlir::FuncOp>(::mlir::createCanonicalizerPass());
pm.addNestedPass<::mlir::FuncOp>(::mlir::createCSEPass());
// Fuse the inner-most loops.
pm.addPass(xla::mlir_gpu::createFuseInnerParallelLoopsPass());
pm.addNestedPass<mlir::FuncOp>(
xla::mlir_gpu::createFuseInnerParallelLoopsPass());
// Run CSE to ensure that loads and stores to the same subview get
// recognized as such.
pm.addNestedPass<::mlir::FuncOp>(::mlir::createCSEPass());
// Forward stores to buffers to loads.
pm.addPass(xla::mlir_gpu::createStoreForwardingPass());
pm.addNestedPass<mlir::FuncOp>(xla::mlir_gpu::createStoreForwardingPass());
// Remove now unused temporary buffers.
pm.addPass(xla::mlir_gpu::createDeadTempBufferRemovalPass());
pm.addNestedPass<mlir::FuncOp>(
xla::mlir_gpu::createDeadTempBufferRemovalPass());
if (!unroll_factors.empty()) {
pm.addPass(::mlir::createParallelLoopTilingPass(as_int64));
pm.addNestedPass<mlir::FuncOp>(
::mlir::createParallelLoopTilingPass(as_int64));
}
// Some basic cleanup.
pm.addNestedPass<::mlir::FuncOp>(::mlir::createCanonicalizerPass());
pm.addNestedPass<::mlir::FuncOp>(::mlir::createCSEPass());
// Greedily map the remaining loop to GPU hardware dimensions.
pm.addPass(xla::mlir_gpu::createMapParallelLoopsPass());
pm.addNestedPass<::mlir::FuncOp>(xla::mlir_gpu::createMapParallelLoopsPass());
// Apply the mapping.
pm.addPass(mlir::createParallelLoopToGpuPass());
pm.addNestedPass<::mlir::FuncOp>(mlir::createParallelLoopToGpuPass());
// Embed TF Framework ops.
if (!gpu_binary_only) {
@ -171,12 +177,13 @@ Status LowerTFtoGPU(mlir::ModuleOp module, bool gpu_binary_only,
if (gpu_binary_only) {
// Make kernel signature deterministic so that we can call it externally.
pm.addPass(xla::mlir_gpu::createRewriteKernelSignaturePass());
pm.addNestedPass<::mlir::FuncOp>(
xla::mlir_gpu::createRewriteKernelSignaturePass());
}
pm.addPass(::mlir::createLowerAffinePass());
// Constraints are removed as late as possible and before lowering to CFG.
pm.addPass(::mlir::createConvertShapeConstraintsPass());
pm.addNestedPass<::mlir::FuncOp>(::mlir::createConvertShapeConstraintsPass());
pm.addNestedPass<::mlir::FuncOp>(::mlir::createCanonicalizerPass());
pm.addPass(::mlir::createLowerToCFGPass());

View File

@ -40,7 +40,8 @@ std::unique_ptr<llvm::Module> MakeLLVMModule(mlir::OwningModuleRef module,
// TODO(kramerb): link this to the right option, command line flag, etc.
constexpr bool kReassociateFPReductions = true;
mlir::PassManager manager(module->getContext());
mlir::PassManager manager(module->getContext(),
mlir::OpPassManager::Nesting::Implicit);
manager.addPass(mlir::createConvertLinalgToLoopsPass());
manager.addPass(mlir::createLowerAffinePass());
manager.addPass(mlir::createLowerToCFGPass());

View File

@ -70,49 +70,53 @@ Status LowerLHLOToGPU(mlir::ModuleOp module, LowerLHLOToGPUOptions options) {
// Legalize from HLO to LHLO.
pm.addPass(::mlir::mhlo::createLegalizeToLhloPass());
// Moving `AllocOp`s and inserting missing `DeallocOp`s
pm.addPass(::mlir::createBufferHoistingPass());
pm.addPass(::mlir::createBufferDeallocationPass());
pm.addNestedPass<mlir::FuncOp>(::mlir::createBufferHoistingPass());
pm.addNestedPass<mlir::FuncOp>(::mlir::createBufferDeallocationPass());
// Next, we can strip the outer fusion operation.
pm.addPass(createFusionOpRemoverPass());
pm.addNestedPass<mlir::FuncOp>(createFusionOpRemoverPass());
// Remove unnecessary LHLO copies.
pm.addPass(::mlir::createCopyRemovalPass());
pm.addNestedPass<mlir::FuncOp>(::mlir::createCopyRemovalPass());
// Legalize reduce operations directly to GPU dialect.
pm.addPass(::mlir::lmhlo::createLegalizeToGpuPass());
pm.addNestedPass<mlir::FuncOp>(::mlir::lmhlo::createLegalizeToGpuPass());
// Transform LHLO operations to LinAlg.
pm.addPass(::mlir::lmhlo::createLegalizeLhloToLinalgPass());
pm.addNestedPass<mlir::FuncOp>(
::mlir::lmhlo::createLegalizeLhloToLinalgPass());
// Fuse linalg operations.
pm.addPass(::mlir::lmhlo::createLhloFuseLinalgPass(
pm.addNestedPass<mlir::FuncOp>(::mlir::lmhlo::createLhloFuseLinalgPass(
/*use_parallel_loops=*/true, tiling_for_unrolling));
// Transform the Linalg operations inside of the loop nest into parallel
// loops.
pm.addPass(::mlir::createConvertLinalgToParallelLoopsPass());
pm.addNestedPass<mlir::FuncOp>(
::mlir::createConvertLinalgToParallelLoopsPass());
// Canonicalize the code to simplify index computations. This is needed so
// that loop bounds have the same value.
pm.addNestedPass<::mlir::FuncOp>(::mlir::createCanonicalizerPass());
pm.addNestedPass<::mlir::FuncOp>(::mlir::createCSEPass());
// Fuse the inner-most loops.
pm.addPass(createFuseInnerParallelLoopsPass());
pm.addNestedPass<mlir::FuncOp>(createFuseInnerParallelLoopsPass());
// Run CSE to ensure that loads and stores to the same subview get
// recognized as such.
pm.addNestedPass<::mlir::FuncOp>(::mlir::createCSEPass());
// Forward stores to buffers to loads.
pm.addPass(createStoreForwardingPass());
pm.addNestedPass<mlir::FuncOp>(createStoreForwardingPass());
// Remove now unused temporary buffers.
pm.addPass(createDeadTempBufferRemovalPass());
pm.addNestedPass<mlir::FuncOp>(createDeadTempBufferRemovalPass());
if (!options.unroll_factors.empty()) {
pm.addPass(::mlir::createParallelLoopTilingPass(as_int64));
pm.addNestedPass<mlir::FuncOp>(
::mlir::createParallelLoopTilingPass(as_int64));
}
// Project all loop dimensions to X if necessary.
if (options.collapse_parallel_loops) {
pm.addPass(createParallelLoopCollapsingToFirstDimPass());
pm.addNestedPass<mlir::FuncOp>(
createParallelLoopCollapsingToFirstDimPass());
}
// Some basic cleanup.
pm.addNestedPass<::mlir::FuncOp>(::mlir::createCanonicalizerPass());
pm.addNestedPass<::mlir::FuncOp>(::mlir::createCSEPass());
// Greedily map the remaining loop to GPU hardware dimensions.
pm.addPass(createMapParallelLoopsPass());
pm.addNestedPass<::mlir::FuncOp>(createMapParallelLoopsPass());
// Apply the mapping.
pm.addPass(mlir::createParallelLoopToGpuPass());
pm.addNestedPass<::mlir::FuncOp>(mlir::createParallelLoopToGpuPass());
// Some basic cleanup.
pm.addNestedPass<::mlir::FuncOp>(::mlir::createCanonicalizerPass());
pm.addNestedPass<::mlir::FuncOp>(::mlir::createCSEPass());
@ -131,7 +135,7 @@ Status LowerLHLOToGPU(mlir::ModuleOp module, LowerLHLOToGPUOptions options) {
// Make sure the kernel signature resembled the original function's
// signature
if (options.rewrite_signature) {
pm.addPass(createRewriteKernelSignaturePass());
pm.addNestedPass<::mlir::FuncOp>(createRewriteKernelSignaturePass());
}
if (failed(pm.run(module))) {
return InternalError("Lowering to GPU kernels failed.");
@ -179,8 +183,9 @@ class LowerToNVVMPass
} // namespace
Status LowerKernelBodiesToNVVM(mlir::ModuleOp module) {
::mlir::PassManager pm(module.getContext());
// We cannot verify as the signature of the kernel is rewritten.
::mlir::PassManager pm(module.getContext(), /*verifyPasses=*/false);
pm.enableVerifier(false);
tensorflow::applyTensorflowAndCLOptions(pm);
// Rewrite kernel functions to LLVM IR.
@ -250,8 +255,9 @@ class LowerToROCDLPass
} // namespace
Status LowerKernelBodiesToROCDL(mlir::ModuleOp module) {
::mlir::PassManager pm(module.getContext());
// We cannot verify as the signature of the kernel is rewritten.
::mlir::PassManager pm(module.getContext(), /*verifyPasses=*/false);
pm.enableVerifier(false);
tensorflow::applyTensorflowAndCLOptions(pm);
auto enable_if_vlog_is_on = [](mlir::Pass*, mlir::Operation*) {

View File

@ -90,7 +90,8 @@ MlirCompiler::IRHook XlaGpuOpt::GetIRHookBreakingLoweringStage(
LoweringStage breaking_stage) {
return {[](mlir::ModuleOp module) -> Status {
mlir::PassManager pm(module.getContext());
pm.addPass(::mlir::createInjectErrorsForTestingPass());
pm.addNestedPass<::mlir::FuncOp>(
::mlir::createInjectErrorsForTestingPass());
if (failed(pm.run(module))) {
return InternalError("InjectErrorsForTestingPass failed.");
}

View File

@ -686,8 +686,8 @@ def tf_repositories(path_prefix = "", tf_repo_name = ""):
)
# Check out LLVM and MLIR from llvm-project.
LLVM_COMMIT = "701456b52355c25089e1b536805164570f5def6f"
LLVM_SHA256 = "de280defac064df7243ef4db30be85fbcff595669c6ac99033ed674215a630f3"
LLVM_COMMIT = "33945cdd62c40ea4ce381d4c3d49b22f8a2cc015"
LLVM_SHA256 = "37870dd2459898ab5f555676480b5c56b3279e323b0e9e1783c5641b88849d84"
LLVM_URLS = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
"https://github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),

View File

@ -106,7 +106,6 @@ cc_library(
"lib/EDSC/Builders.cpp",
],
hdrs = [
"include/mlir-c/Core.h",
"include/mlir/EDSC/Builders.h",
],
includes = ["include"],
@ -856,6 +855,16 @@ cc_library(
],
)
filegroup(
name = "ShapeOpsTdFiles",
srcs = [
"include/mlir/Dialect/Shape/IR/ShapeBase.td",
"include/mlir/Dialect/Shape/IR/ShapeOps.td",
"include/mlir/Interfaces/InferTypeOpInterface.td",
":StdOpsTdFiles",
],
)
gentbl(
name = "ShapeOpsIncGen",
strip_include_prefix = "include",
@ -922,6 +931,22 @@ cc_library(
],
)
gentbl(
name = "ShapeToStandardGen",
strip_include_prefix = "lib/Conversion/ShapeToStandard",
tbl_outs = [
(
"-gen-rewriters",
"lib/Conversion/ShapeToStandard/ShapeToStandard.cpp.inc",
),
],
tblgen = ":mlir-tblgen",
td_file = "lib/Conversion/ShapeToStandard/ShapeToStandard.td",
td_srcs = [
":ShapeOpsTdFiles",
],
)
cc_library(
name = "ShapeToStandard",
srcs = glob([
@ -936,6 +961,7 @@ cc_library(
":Pass",
":SCFDialect",
":Shape",
":ShapeToStandardGen",
":StandardOps",
":Support",
":Transforms",

View File

@ -27,6 +27,7 @@ filegroup(
"@llvm-project//mlir:include/mlir/IR/SymbolInterfaces.td",
"@llvm-project//mlir:include/mlir/Interfaces/CallInterfaces.td",
"@llvm-project//mlir:include/mlir/Interfaces/ControlFlowInterfaces.td",
"@llvm-project//mlir:include/mlir/Interfaces/CopyOpInterface.td",
"@llvm-project//mlir:include/mlir/Interfaces/InferTypeOpInterface.td",
"@llvm-project//mlir:include/mlir/Interfaces/SideEffectInterfaces.td",
],
@ -140,6 +141,7 @@ cc_library(
":TestTypeDefsIncGen",
"@llvm-project//llvm:Support",
"@llvm-project//mlir:ControlFlowInterfaces",
"@llvm-project//mlir:CopyOpInterface",
"@llvm-project//mlir:DerivedAttributeOpInterface",
"@llvm-project//mlir:Dialect",
"@llvm-project//mlir:IR",