Integrate LLVM at llvm/llvm-project@33945cdd62
Updates LLVM usage to match [33945cdd62c4](https://github.com/llvm/llvm-project/commit/33945cdd62c4) PiperOrigin-RevId: 340556042 Change-Id: Id99605800c4c095597b63b822e307edf3da3aa5b
This commit is contained in:
parent
1e228f57d2
commit
b37cacd6ea
tensorflow
compiler
mlir
lite
tensorflow
transforms
translate
utils
tfjs
tools/kernel_gen
xla/service
third_party/mlir
@ -289,7 +289,8 @@ Status ConvertMLIRToTFLiteFlatBuffer(
|
|||||||
absl::StrCat(toco_flags.dump_graphviz_dir(), "/toco_AT_IMPORT.dot")));
|
absl::StrCat(toco_flags.dump_graphviz_dir(), "/toco_AT_IMPORT.dot")));
|
||||||
}
|
}
|
||||||
|
|
||||||
mlir::PassManager pm(module->getContext());
|
mlir::PassManager pm(module->getContext(),
|
||||||
|
mlir::OpPassManager::Nesting::Implicit);
|
||||||
|
|
||||||
tensorflow::AddTFToTFLConversionPasses(pass_config, &pm, session);
|
tensorflow::AddTFToTFLConversionPasses(pass_config, &pm, session);
|
||||||
// Convert back to outlined while format for export back to flatbuffer.
|
// Convert back to outlined while format for export back to flatbuffer.
|
||||||
|
@ -75,7 +75,7 @@ TfLiteStatus QuantizeModel(
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Apply quantization passes
|
// Apply quantization passes
|
||||||
PassManager pm(module->getContext());
|
PassManager pm(module->getContext(), OpPassManager::Nesting::Implicit);
|
||||||
TFL::QuantizationSpecs quant_specs;
|
TFL::QuantizationSpecs quant_specs;
|
||||||
quant_specs.inference_type = tflite::TflTypeToTfType(inference_type);
|
quant_specs.inference_type = tflite::TflTypeToTfType(inference_type);
|
||||||
quant_specs.post_training_quantization = true;
|
quant_specs.post_training_quantization = true;
|
||||||
|
@ -57,7 +57,7 @@ TfLiteStatus SparsifyModel(const tflite::ModelT& input_model,
|
|||||||
return kTfLiteError;
|
return kTfLiteError;
|
||||||
}
|
}
|
||||||
|
|
||||||
PassManager pm(module->getContext());
|
PassManager pm(module->getContext(), OpPassManager::Nesting::Implicit);
|
||||||
pm.addPass(TFL::CreateDenseToSparsePass());
|
pm.addPass(TFL::CreateDenseToSparsePass());
|
||||||
|
|
||||||
if (failed(pm.run(module.get()))) {
|
if (failed(pm.run(module.get()))) {
|
||||||
|
@ -45,18 +45,20 @@ const char kTFLiteDataLayout[] = "NHWC";
|
|||||||
|
|
||||||
void AddQuantizationPasses(const mlir::TFL::QuantizationSpecs& quant_specs,
|
void AddQuantizationPasses(const mlir::TFL::QuantizationSpecs& quant_specs,
|
||||||
mlir::OpPassManager* pass_manager) {
|
mlir::OpPassManager* pass_manager) {
|
||||||
pass_manager->addPass(mlir::TFL::CreatePrepareQuantizePass(quant_specs));
|
pass_manager->addNestedPass<mlir::FuncOp>(
|
||||||
|
mlir::TFL::CreatePrepareQuantizePass(quant_specs));
|
||||||
if (quant_specs.default_ranges.first.hasValue() ||
|
if (quant_specs.default_ranges.first.hasValue() ||
|
||||||
quant_specs.default_ranges.second.hasValue()) {
|
quant_specs.default_ranges.second.hasValue()) {
|
||||||
pass_manager->addPass(mlir::TFL::CreateDefaultQuantParamsPass(
|
pass_manager->addNestedPass<mlir::FuncOp>(
|
||||||
quant_specs.default_ranges.first.getValueOr(0.0),
|
mlir::TFL::CreateDefaultQuantParamsPass(
|
||||||
quant_specs.default_ranges.second.getValueOr(0.0),
|
quant_specs.default_ranges.first.getValueOr(0.0),
|
||||||
quant_specs.IsSignedInferenceType()));
|
quant_specs.default_ranges.second.getValueOr(0.0),
|
||||||
|
quant_specs.IsSignedInferenceType()));
|
||||||
}
|
}
|
||||||
pass_manager->addPass(mlir::TFL::CreateQuantizePass());
|
pass_manager->addNestedPass<mlir::FuncOp>(mlir::TFL::CreateQuantizePass());
|
||||||
bool emit_quant_adaptor_ops =
|
bool emit_quant_adaptor_ops =
|
||||||
quant_specs.inference_type != quant_specs.inference_input_type;
|
quant_specs.inference_type != quant_specs.inference_input_type;
|
||||||
pass_manager->addPass(
|
pass_manager->addNestedPass<mlir::FuncOp>(
|
||||||
mlir::TFL::CreatePostQuantizePass(emit_quant_adaptor_ops));
|
mlir::TFL::CreatePostQuantizePass(emit_quant_adaptor_ops));
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -67,7 +69,8 @@ void AddTFToTFLConversionPasses(const mlir::TFL::PassConfig& pass_config,
|
|||||||
standard_pipeline_options.enable_inliner = false;
|
standard_pipeline_options.enable_inliner = false;
|
||||||
standard_pipeline_options.form_clusters = pass_config.form_clusters;
|
standard_pipeline_options.form_clusters = pass_config.form_clusters;
|
||||||
mlir::TF::CreateTFStandardPipeline(*pass_manager, standard_pipeline_options);
|
mlir::TF::CreateTFStandardPipeline(*pass_manager, standard_pipeline_options);
|
||||||
pass_manager->addPass(mlir::TF::CreateDeviceIndexSelectorPass());
|
pass_manager->addNestedPass<mlir::FuncOp>(
|
||||||
|
mlir::TF::CreateDeviceIndexSelectorPass());
|
||||||
|
|
||||||
// Add canonicalize pass to remove no-op session initializer pass.
|
// Add canonicalize pass to remove no-op session initializer pass.
|
||||||
pass_manager->addPass(mlir::createCanonicalizerPass());
|
pass_manager->addPass(mlir::createCanonicalizerPass());
|
||||||
@ -155,7 +158,8 @@ void AddTFToTFLConversionPasses(const mlir::TFL::PassConfig& pass_config,
|
|||||||
pass_manager->addPass(mlir::createInlinerPass());
|
pass_manager->addPass(mlir::createInlinerPass());
|
||||||
|
|
||||||
// TODO(jpienaar): Revise post dialect constants.
|
// TODO(jpienaar): Revise post dialect constants.
|
||||||
pass_manager->addPass(mlir::TF::CreateDecodeConstantPass());
|
pass_manager->addNestedPass<mlir::FuncOp>(
|
||||||
|
mlir::TF::CreateDecodeConstantPass());
|
||||||
// Canonicalization includes const folding, which is utilized here to optimize
|
// Canonicalization includes const folding, which is utilized here to optimize
|
||||||
// away ops that can't get constant folded after PrepareTF pass. For example,
|
// away ops that can't get constant folded after PrepareTF pass. For example,
|
||||||
// tf.Conv2D is split into tf.Transpose and tfl.Conv2D.
|
// tf.Conv2D is split into tf.Transpose and tfl.Conv2D.
|
||||||
@ -178,11 +182,11 @@ void AddTFToTFLConversionPasses(const mlir::TFL::PassConfig& pass_config,
|
|||||||
// to match 'kTFLiteDataLayout'
|
// to match 'kTFLiteDataLayout'
|
||||||
mlir::TF::LayoutOptimizationPipelineOptions layout_optimization_options;
|
mlir::TF::LayoutOptimizationPipelineOptions layout_optimization_options;
|
||||||
layout_optimization_options.force_data_format = kTFLiteDataLayout;
|
layout_optimization_options.force_data_format = kTFLiteDataLayout;
|
||||||
mlir::TF::CreateLayoutOptimizationPipeline(*pass_manager,
|
mlir::TF::CreateLayoutOptimizationPipeline(
|
||||||
layout_optimization_options);
|
pass_manager->nest<mlir::FuncOp>(), layout_optimization_options);
|
||||||
// Prepare for TFLite dialect, rerun canonicalization, and then legalize to
|
// Prepare for TFLite dialect, rerun canonicalization, and then legalize to
|
||||||
// the TFLite dialect.
|
// the TFLite dialect.
|
||||||
pass_manager->addPass(
|
pass_manager->addNestedPass<mlir::FuncOp>(
|
||||||
mlir::TFL::CreatePrepareTFPass(pass_config.unfold_batch_matmul));
|
mlir::TFL::CreatePrepareTFPass(pass_config.unfold_batch_matmul));
|
||||||
pass_manager->addNestedPass<mlir::FuncOp>(mlir::createCanonicalizerPass());
|
pass_manager->addNestedPass<mlir::FuncOp>(mlir::createCanonicalizerPass());
|
||||||
if (pass_config.shape_inference) {
|
if (pass_config.shape_inference) {
|
||||||
@ -198,16 +202,18 @@ void AddTFToTFLConversionPasses(const mlir::TFL::PassConfig& pass_config,
|
|||||||
pass_manager->addPass(mlir::createInlinerPass());
|
pass_manager->addPass(mlir::createInlinerPass());
|
||||||
|
|
||||||
// This pass removes the asset file dependencies in hash table use cases.
|
// This pass removes the asset file dependencies in hash table use cases.
|
||||||
pass_manager->addPass(mlir::TF::CreateInitTextFileToImportPass());
|
pass_manager->addNestedPass<mlir::FuncOp>(
|
||||||
|
mlir::TF::CreateInitTextFileToImportPass());
|
||||||
|
|
||||||
pass_manager->addPass(
|
pass_manager->addNestedPass<mlir::FuncOp>(
|
||||||
mlir::TFL::CreateLegalizeTFPass(pass_config.runtime_verification));
|
mlir::TFL::CreateLegalizeTFPass(pass_config.runtime_verification));
|
||||||
pass_manager->addPass(mlir::TFL::CreateOptimizePass());
|
pass_manager->addNestedPass<mlir::FuncOp>(mlir::TFL::CreateOptimizePass());
|
||||||
// This pass operates on TensorFlow ops but is triggered after legalization
|
// This pass operates on TensorFlow ops but is triggered after legalization
|
||||||
// so that it can target constants introduced once TensorFlow Identity ops
|
// so that it can target constants introduced once TensorFlow Identity ops
|
||||||
// are removed during legalization.
|
// are removed during legalization.
|
||||||
pass_manager->addPass(mlir::TFL::CreateOptimizeFunctionalOpsPass());
|
pass_manager->addPass(mlir::TFL::CreateOptimizeFunctionalOpsPass());
|
||||||
pass_manager->addPass(mlir::TFL::CreateRaiseCustomOpsPass());
|
pass_manager->addNestedPass<mlir::FuncOp>(
|
||||||
|
mlir::TFL::CreateRaiseCustomOpsPass());
|
||||||
pass_manager->addPass(mlir::createSymbolDCEPass());
|
pass_manager->addPass(mlir::createSymbolDCEPass());
|
||||||
pass_manager->addNestedPass<mlir::FuncOp>(mlir::createCanonicalizerPass());
|
pass_manager->addNestedPass<mlir::FuncOp>(mlir::createCanonicalizerPass());
|
||||||
pass_manager->addNestedPass<mlir::FuncOp>(mlir::createCSEPass());
|
pass_manager->addNestedPass<mlir::FuncOp>(mlir::createCSEPass());
|
||||||
@ -225,7 +231,8 @@ void AddTFToTFLConversionPasses(const mlir::TFL::PassConfig& pass_config,
|
|||||||
// it's not desired by TFL. This pass serves as a "fix" pass to split the
|
// it's not desired by TFL. This pass serves as a "fix" pass to split the
|
||||||
// merged inputs until we have 1st class variable support or reuse
|
// merged inputs until we have 1st class variable support or reuse
|
||||||
// tf.variable to model this.
|
// tf.variable to model this.
|
||||||
pass_manager->addPass(mlir::TFL::CreateSplitMergedOperandsPass());
|
pass_manager->addNestedPass<mlir::FuncOp>(
|
||||||
|
mlir::TFL::CreateSplitMergedOperandsPass());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -295,7 +302,7 @@ void CreateTFLStandardPipeline(OpPassManager& pm,
|
|||||||
|
|
||||||
pm.addPass(mlir::TFL::CreateWhileOutlinePass());
|
pm.addPass(mlir::TFL::CreateWhileOutlinePass());
|
||||||
|
|
||||||
pm.addPass(mlir::TFL::CreateRuntimeVerifyPass());
|
pm.addNestedPass<mlir::FuncOp>(mlir::TFL::CreateRuntimeVerifyPass());
|
||||||
}
|
}
|
||||||
|
|
||||||
// Registers a pass pipeline for the standard TFL passes.
|
// Registers a pass pipeline for the standard TFL passes.
|
||||||
|
@ -29,6 +29,7 @@ limitations under the License.
|
|||||||
#include "mlir/IR/MLIRContext.h" // from @llvm-project
|
#include "mlir/IR/MLIRContext.h" // from @llvm-project
|
||||||
#include "mlir/IR/Module.h" // from @llvm-project
|
#include "mlir/IR/Module.h" // from @llvm-project
|
||||||
#include "mlir/Pass/Pass.h" // from @llvm-project
|
#include "mlir/Pass/Pass.h" // from @llvm-project
|
||||||
|
#include "mlir/Pass/PassManager.h" // from @llvm-project
|
||||||
#include "mlir/Support/FileUtilities.h" // from @llvm-project
|
#include "mlir/Support/FileUtilities.h" // from @llvm-project
|
||||||
#include "tensorflow/cc/saved_model/loader.h"
|
#include "tensorflow/cc/saved_model/loader.h"
|
||||||
#include "tensorflow/compiler/mlir/init_mlir.h"
|
#include "tensorflow/compiler/mlir/init_mlir.h"
|
||||||
@ -187,7 +188,7 @@ int main(int argc, char **argv) {
|
|||||||
// message. So we can just return here.
|
// message. So we can just return here.
|
||||||
if (!module.ok()) return kTrFailure;
|
if (!module.ok()) return kTrFailure;
|
||||||
|
|
||||||
mlir::PassManager pm(&context);
|
mlir::PassManager pm(&context, mlir::OpPassManager::Nesting::Implicit);
|
||||||
mlir::applyPassManagerCLOptions(pm);
|
mlir::applyPassManagerCLOptions(pm);
|
||||||
|
|
||||||
// Set the quantization specifications from the command line flags.
|
// Set the quantization specifications from the command line flags.
|
||||||
|
@ -121,7 +121,7 @@ void CreateTPUBridgePipeline(OpPassManager &pm) {
|
|||||||
pm.addPass(CreateTPURewritePass());
|
pm.addPass(CreateTPURewritePass());
|
||||||
pm.addPass(createSymbolDCEPass());
|
pm.addPass(createSymbolDCEPass());
|
||||||
pm.addNestedPass<FuncOp>(TFDevice::CreateReplicateInvariantOpHoistingPass());
|
pm.addNestedPass<FuncOp>(TFDevice::CreateReplicateInvariantOpHoistingPass());
|
||||||
pm.addNestedPass<FuncOp>(CreateTPUDynamicLayoutPass());
|
pm.addPass(CreateTPUDynamicLayoutPass());
|
||||||
pm.addNestedPass<FuncOp>(CreateTPUMergeVariablesWithExecutePass());
|
pm.addNestedPass<FuncOp>(CreateTPUMergeVariablesWithExecutePass());
|
||||||
pm.addNestedPass<FuncOp>(CreateTPUColocateCompositeResourceOps());
|
pm.addNestedPass<FuncOp>(CreateTPUColocateCompositeResourceOps());
|
||||||
pm.addPass(CreateTPUVariableReformattingPass());
|
pm.addPass(CreateTPUVariableReformattingPass());
|
||||||
|
@ -50,7 +50,8 @@ Status MlirGraphOptimizationPass::Run(const ConfigProto& config_proto,
|
|||||||
// Assign optimal data layout to layout sensitive operations and delete
|
// Assign optimal data layout to layout sensitive operations and delete
|
||||||
// redundant transposes from the IR.
|
// redundant transposes from the IR.
|
||||||
LayoutOptimizationPipelineOptions layout_optimization_options;
|
LayoutOptimizationPipelineOptions layout_optimization_options;
|
||||||
CreateLayoutOptimizationPipeline(pm, layout_optimization_options);
|
CreateLayoutOptimizationPipeline(pm.nest<FuncOp>(),
|
||||||
|
layout_optimization_options);
|
||||||
|
|
||||||
// Prepare IR for exporting.
|
// Prepare IR for exporting.
|
||||||
pm.addPass(CreateBreakUpIslandsPass());
|
pm.addPass(CreateBreakUpIslandsPass());
|
||||||
|
@ -85,7 +85,7 @@ void InitTextFileToImportTestPass::runOnOperation() {
|
|||||||
|
|
||||||
// Run the lowering pass.
|
// Run the lowering pass.
|
||||||
PassManager pm(context);
|
PassManager pm(context);
|
||||||
pm.addPass(CreateInitTextFileToImportPass());
|
pm.addNestedPass<FuncOp>(CreateInitTextFileToImportPass());
|
||||||
if (failed(pm.run(module))) return signalPassFailure();
|
if (failed(pm.run(module))) return signalPassFailure();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3569,17 +3569,20 @@ Status SavedModelSignatureDefImporter::LiftVariables() {
|
|||||||
|
|
||||||
mlir::PassManager pm(module_->getContext());
|
mlir::PassManager pm(module_->getContext());
|
||||||
SetCrashReproducer(pm);
|
SetCrashReproducer(pm);
|
||||||
pm.addPass(mlir::tf_executor::CreateTFExecutorGraphPruningPass());
|
pm.addNestedPass<mlir::FuncOp>(
|
||||||
pm.addPass(mlir::CreateExecutorDialectToFunctionalConversionPass());
|
mlir::tf_executor::CreateTFExecutorGraphPruningPass());
|
||||||
|
pm.addNestedPass<mlir::FuncOp>(
|
||||||
|
mlir::CreateExecutorDialectToFunctionalConversionPass());
|
||||||
pm.addPass(
|
pm.addPass(
|
||||||
mlir::tf_saved_model::CreateRemoveVariablesInSessionInitializerPass());
|
mlir::tf_saved_model::CreateRemoveVariablesInSessionInitializerPass());
|
||||||
pm.addPass(
|
pm.addNestedPass<mlir::FuncOp>(
|
||||||
mlir::TF::
|
mlir::TF::
|
||||||
CreateConvertReadonlyReferenceVariablesToResourceVariablesPass());
|
CreateConvertReadonlyReferenceVariablesToResourceVariablesPass());
|
||||||
pm.addPass(mlir::TF::CreatePromoteVarHandlesToArgsPass());
|
pm.addPass(mlir::TF::CreatePromoteVarHandlesToArgsPass());
|
||||||
pm.addPass(
|
pm.addPass(
|
||||||
mlir::tf_saved_model::CreateLiftVariablesPass(bundle_.GetSession()));
|
mlir::tf_saved_model::CreateLiftVariablesPass(bundle_.GetSession()));
|
||||||
pm.addPass(mlir::tf_saved_model::CreateDedupBoundInputBindingPass());
|
pm.addNestedPass<mlir::FuncOp>(
|
||||||
|
mlir::tf_saved_model::CreateDedupBoundInputBindingPass());
|
||||||
if (mlir::failed(pm.run(*module_)))
|
if (mlir::failed(pm.run(*module_)))
|
||||||
return diag_handler.Combine(errors::Internal("Failed to lift variables."));
|
return diag_handler.Combine(errors::Internal("Failed to lift variables."));
|
||||||
|
|
||||||
|
@ -279,7 +279,8 @@ void CreateConvertMlirToXlaHloPipeline(
|
|||||||
pm.addPass(mlir::TF::CreateTensorListOpsDecompositionPass());
|
pm.addPass(mlir::TF::CreateTensorListOpsDecompositionPass());
|
||||||
pm.addPass(mlir::TF::CreateStackOpsDecompositionPass());
|
pm.addPass(mlir::TF::CreateStackOpsDecompositionPass());
|
||||||
pm.addPass(mlir::TF::CreateTensorArrayOpsDecompositionPass());
|
pm.addPass(mlir::TF::CreateTensorArrayOpsDecompositionPass());
|
||||||
pm.addPass(mlir::TFDevice::CreateDecomposeResourceOpsPass());
|
pm.addNestedPass<mlir::FuncOp>(
|
||||||
|
mlir::TFDevice::CreateDecomposeResourceOpsPass());
|
||||||
pm.addPass(mlir::TF::CreatePromoteResourcesToArgsPass());
|
pm.addPass(mlir::TF::CreatePromoteResourcesToArgsPass());
|
||||||
pm.addPass(mlir::createSymbolDCEPass());
|
pm.addPass(mlir::createSymbolDCEPass());
|
||||||
// Guarantee all functions have one use, which enables shape inference.
|
// Guarantee all functions have one use, which enables shape inference.
|
||||||
|
@ -36,7 +36,7 @@ void AddTFToTFJSConversionPasses(mlir::OpPassManager* pm) {
|
|||||||
pm->addPass(mlir::tf_saved_model::CreateFreezeGlobalTensorsPass());
|
pm->addPass(mlir::tf_saved_model::CreateFreezeGlobalTensorsPass());
|
||||||
|
|
||||||
// TFJS dialect passes.
|
// TFJS dialect passes.
|
||||||
pm->addPass(mlir::tfjs::CreateOptimizePass());
|
pm->addNestedPass<mlir::FuncOp>(mlir::tfjs::CreateOptimizePass());
|
||||||
|
|
||||||
// Canonicalize, CSE etc.
|
// Canonicalize, CSE etc.
|
||||||
pm->addNestedPass<mlir::FuncOp>(mlir::createCanonicalizerPass());
|
pm->addNestedPass<mlir::FuncOp>(mlir::createCanonicalizerPass());
|
||||||
|
@ -75,7 +75,7 @@ Status LowerTFtoGPU(mlir::ModuleOp module, bool gpu_binary_only,
|
|||||||
applyTensorflowAndCLOptions(pm);
|
applyTensorflowAndCLOptions(pm);
|
||||||
|
|
||||||
if (gpu_binary_only) {
|
if (gpu_binary_only) {
|
||||||
pm.addPass(mlir::mhlo::createLegalizeTFPass(
|
pm.addNestedPass<mlir::FuncOp>(mlir::mhlo::createLegalizeTFPass(
|
||||||
/*allow_partial_conversion=*/false, /*legalize_chlo=*/true));
|
/*allow_partial_conversion=*/false, /*legalize_chlo=*/true));
|
||||||
pm.addNestedPass<mlir::FuncOp>(
|
pm.addNestedPass<mlir::FuncOp>(
|
||||||
mlir::kernel_gen::transforms::CreateMaterializeBroadcastsPass());
|
mlir::kernel_gen::transforms::CreateMaterializeBroadcastsPass());
|
||||||
@ -83,23 +83,24 @@ Status LowerTFtoGPU(mlir::ModuleOp module, bool gpu_binary_only,
|
|||||||
mlir::kernel_gen::transforms::CreateUnfuseBatchNormPass());
|
mlir::kernel_gen::transforms::CreateUnfuseBatchNormPass());
|
||||||
pm.addPass(mlir::mhlo::createLegalizeToLhloPass());
|
pm.addPass(mlir::mhlo::createLegalizeToLhloPass());
|
||||||
// Moving `AllocOp`s and inserting missing `DeallocOp`s
|
// Moving `AllocOp`s and inserting missing `DeallocOp`s
|
||||||
pm.addPass(::mlir::createBufferHoistingPass());
|
pm.addNestedPass<mlir::FuncOp>(::mlir::createBufferHoistingPass());
|
||||||
pm.addPass(::mlir::createBufferDeallocationPass());
|
pm.addNestedPass<mlir::FuncOp>(::mlir::createBufferDeallocationPass());
|
||||||
pm.addNestedPass<mlir::FuncOp>(mlir::createCopyRemovalPass());
|
pm.addNestedPass<mlir::FuncOp>(mlir::createCopyRemovalPass());
|
||||||
pm.addPass(mlir::createCanonicalizerPass());
|
pm.addPass(mlir::createCanonicalizerPass());
|
||||||
pm.addPass(mlir::kernel_gen::transforms::CreateShapeToDescriptorsPass());
|
pm.addPass(mlir::kernel_gen::transforms::CreateShapeToDescriptorsPass());
|
||||||
} else {
|
} else {
|
||||||
pm.addPass(mlir::mhlo::createLegalizeTFPass(
|
pm.addNestedPass<mlir::FuncOp>(mlir::mhlo::createLegalizeTFPass(
|
||||||
/*allow_partial_conversion=*/false, /*legalize_chlo=*/false));
|
/*allow_partial_conversion=*/false, /*legalize_chlo=*/false));
|
||||||
pm.addPass(mlir::createTransformUnrankedHloPass());
|
pm.addNestedPass<mlir::FuncOp>(mlir::createTransformUnrankedHloPass());
|
||||||
pm.addPass(mlir::mhlo::createChloLegalizeToHloPass());
|
pm.addNestedPass<mlir::FuncOp>(mlir::mhlo::createChloLegalizeToHloPass());
|
||||||
pm.addPass(mlir::createCanonicalizerPass());
|
pm.addNestedPass<mlir::FuncOp>(mlir::createCanonicalizerPass());
|
||||||
pm.addPass(mlir::kernel_gen::transforms::CreateShapeToDescriptorsPass());
|
pm.addPass(mlir::kernel_gen::transforms::CreateShapeToDescriptorsPass());
|
||||||
// Clean up the IR created above. In particular, operations on descriptors
|
// Clean up the IR created above. In particular, operations on descriptors
|
||||||
// are simplified here.
|
// are simplified here.
|
||||||
pm.addPass(mlir::createCanonicalizerPass());
|
pm.addPass(mlir::createCanonicalizerPass());
|
||||||
pm.addPass(mlir::kernel_gen::transforms::CreateBufferizePass());
|
pm.addPass(mlir::kernel_gen::transforms::CreateBufferizePass());
|
||||||
pm.addPass(mlir::kernel_gen::transforms::CreateParallelLoopsToSequential());
|
pm.addNestedPass<mlir::FuncOp>(
|
||||||
|
mlir::kernel_gen::transforms::CreateParallelLoopsToSequential());
|
||||||
}
|
}
|
||||||
|
|
||||||
// Clean up the IR for further processing.
|
// Clean up the IR for further processing.
|
||||||
@ -119,36 +120,41 @@ Status LowerTFtoGPU(mlir::ModuleOp module, bool gpu_binary_only,
|
|||||||
tiling_for_unrolling.append(tile_sizes.begin(), tile_sizes.end());
|
tiling_for_unrolling.append(tile_sizes.begin(), tile_sizes.end());
|
||||||
}
|
}
|
||||||
// Transform LHLO operations to LinAlg.
|
// Transform LHLO operations to LinAlg.
|
||||||
pm.addPass(::mlir::lmhlo::createLegalizeLhloToLinalgPass());
|
pm.addNestedPass<mlir::FuncOp>(
|
||||||
|
::mlir::lmhlo::createLegalizeLhloToLinalgPass());
|
||||||
// Fuse linalg operations.
|
// Fuse linalg operations.
|
||||||
pm.addPass(::mlir::lmhlo::createLhloFuseLinalgPass(
|
pm.addNestedPass<mlir::FuncOp>(::mlir::lmhlo::createLhloFuseLinalgPass(
|
||||||
/*use_parallel_loops=*/true, tiling_for_unrolling));
|
/*use_parallel_loops=*/true, tiling_for_unrolling));
|
||||||
// Transform the Linalg operations inside of the loop nest into parallel
|
// Transform the Linalg operations inside of the loop nest into parallel
|
||||||
// loops.
|
// loops.
|
||||||
pm.addPass(::mlir::createConvertLinalgToParallelLoopsPass());
|
pm.addNestedPass<mlir::FuncOp>(
|
||||||
|
::mlir::createConvertLinalgToParallelLoopsPass());
|
||||||
// Canonicalize the code to simplify index computations. This is needed so
|
// Canonicalize the code to simplify index computations. This is needed so
|
||||||
// that loop bounds have the same value.
|
// that loop bounds have the same value.
|
||||||
pm.addNestedPass<::mlir::FuncOp>(::mlir::createCanonicalizerPass());
|
pm.addNestedPass<::mlir::FuncOp>(::mlir::createCanonicalizerPass());
|
||||||
pm.addNestedPass<::mlir::FuncOp>(::mlir::createCSEPass());
|
pm.addNestedPass<::mlir::FuncOp>(::mlir::createCSEPass());
|
||||||
// Fuse the inner-most loops.
|
// Fuse the inner-most loops.
|
||||||
pm.addPass(xla::mlir_gpu::createFuseInnerParallelLoopsPass());
|
pm.addNestedPass<mlir::FuncOp>(
|
||||||
|
xla::mlir_gpu::createFuseInnerParallelLoopsPass());
|
||||||
// Run CSE to ensure that loads and stores to the same subview get
|
// Run CSE to ensure that loads and stores to the same subview get
|
||||||
// recognized as such.
|
// recognized as such.
|
||||||
pm.addNestedPass<::mlir::FuncOp>(::mlir::createCSEPass());
|
pm.addNestedPass<::mlir::FuncOp>(::mlir::createCSEPass());
|
||||||
// Forward stores to buffers to loads.
|
// Forward stores to buffers to loads.
|
||||||
pm.addPass(xla::mlir_gpu::createStoreForwardingPass());
|
pm.addNestedPass<mlir::FuncOp>(xla::mlir_gpu::createStoreForwardingPass());
|
||||||
// Remove now unused temporary buffers.
|
// Remove now unused temporary buffers.
|
||||||
pm.addPass(xla::mlir_gpu::createDeadTempBufferRemovalPass());
|
pm.addNestedPass<mlir::FuncOp>(
|
||||||
|
xla::mlir_gpu::createDeadTempBufferRemovalPass());
|
||||||
if (!unroll_factors.empty()) {
|
if (!unroll_factors.empty()) {
|
||||||
pm.addPass(::mlir::createParallelLoopTilingPass(as_int64));
|
pm.addNestedPass<mlir::FuncOp>(
|
||||||
|
::mlir::createParallelLoopTilingPass(as_int64));
|
||||||
}
|
}
|
||||||
// Some basic cleanup.
|
// Some basic cleanup.
|
||||||
pm.addNestedPass<::mlir::FuncOp>(::mlir::createCanonicalizerPass());
|
pm.addNestedPass<::mlir::FuncOp>(::mlir::createCanonicalizerPass());
|
||||||
pm.addNestedPass<::mlir::FuncOp>(::mlir::createCSEPass());
|
pm.addNestedPass<::mlir::FuncOp>(::mlir::createCSEPass());
|
||||||
// Greedily map the remaining loop to GPU hardware dimensions.
|
// Greedily map the remaining loop to GPU hardware dimensions.
|
||||||
pm.addPass(xla::mlir_gpu::createMapParallelLoopsPass());
|
pm.addNestedPass<::mlir::FuncOp>(xla::mlir_gpu::createMapParallelLoopsPass());
|
||||||
// Apply the mapping.
|
// Apply the mapping.
|
||||||
pm.addPass(mlir::createParallelLoopToGpuPass());
|
pm.addNestedPass<::mlir::FuncOp>(mlir::createParallelLoopToGpuPass());
|
||||||
|
|
||||||
// Embed TF Framework ops.
|
// Embed TF Framework ops.
|
||||||
if (!gpu_binary_only) {
|
if (!gpu_binary_only) {
|
||||||
@ -171,12 +177,13 @@ Status LowerTFtoGPU(mlir::ModuleOp module, bool gpu_binary_only,
|
|||||||
|
|
||||||
if (gpu_binary_only) {
|
if (gpu_binary_only) {
|
||||||
// Make kernel signature deterministic so that we can call it externally.
|
// Make kernel signature deterministic so that we can call it externally.
|
||||||
pm.addPass(xla::mlir_gpu::createRewriteKernelSignaturePass());
|
pm.addNestedPass<::mlir::FuncOp>(
|
||||||
|
xla::mlir_gpu::createRewriteKernelSignaturePass());
|
||||||
}
|
}
|
||||||
pm.addPass(::mlir::createLowerAffinePass());
|
pm.addPass(::mlir::createLowerAffinePass());
|
||||||
|
|
||||||
// Constraints are removed as late as possible and before lowering to CFG.
|
// Constraints are removed as late as possible and before lowering to CFG.
|
||||||
pm.addPass(::mlir::createConvertShapeConstraintsPass());
|
pm.addNestedPass<::mlir::FuncOp>(::mlir::createConvertShapeConstraintsPass());
|
||||||
pm.addNestedPass<::mlir::FuncOp>(::mlir::createCanonicalizerPass());
|
pm.addNestedPass<::mlir::FuncOp>(::mlir::createCanonicalizerPass());
|
||||||
|
|
||||||
pm.addPass(::mlir::createLowerToCFGPass());
|
pm.addPass(::mlir::createLowerToCFGPass());
|
||||||
|
@ -40,7 +40,8 @@ std::unique_ptr<llvm::Module> MakeLLVMModule(mlir::OwningModuleRef module,
|
|||||||
// TODO(kramerb): link this to the right option, command line flag, etc.
|
// TODO(kramerb): link this to the right option, command line flag, etc.
|
||||||
constexpr bool kReassociateFPReductions = true;
|
constexpr bool kReassociateFPReductions = true;
|
||||||
|
|
||||||
mlir::PassManager manager(module->getContext());
|
mlir::PassManager manager(module->getContext(),
|
||||||
|
mlir::OpPassManager::Nesting::Implicit);
|
||||||
manager.addPass(mlir::createConvertLinalgToLoopsPass());
|
manager.addPass(mlir::createConvertLinalgToLoopsPass());
|
||||||
manager.addPass(mlir::createLowerAffinePass());
|
manager.addPass(mlir::createLowerAffinePass());
|
||||||
manager.addPass(mlir::createLowerToCFGPass());
|
manager.addPass(mlir::createLowerToCFGPass());
|
||||||
|
@ -70,49 +70,53 @@ Status LowerLHLOToGPU(mlir::ModuleOp module, LowerLHLOToGPUOptions options) {
|
|||||||
// Legalize from HLO to LHLO.
|
// Legalize from HLO to LHLO.
|
||||||
pm.addPass(::mlir::mhlo::createLegalizeToLhloPass());
|
pm.addPass(::mlir::mhlo::createLegalizeToLhloPass());
|
||||||
// Moving `AllocOp`s and inserting missing `DeallocOp`s
|
// Moving `AllocOp`s and inserting missing `DeallocOp`s
|
||||||
pm.addPass(::mlir::createBufferHoistingPass());
|
pm.addNestedPass<mlir::FuncOp>(::mlir::createBufferHoistingPass());
|
||||||
pm.addPass(::mlir::createBufferDeallocationPass());
|
pm.addNestedPass<mlir::FuncOp>(::mlir::createBufferDeallocationPass());
|
||||||
// Next, we can strip the outer fusion operation.
|
// Next, we can strip the outer fusion operation.
|
||||||
pm.addPass(createFusionOpRemoverPass());
|
pm.addNestedPass<mlir::FuncOp>(createFusionOpRemoverPass());
|
||||||
// Remove unnecessary LHLO copies.
|
// Remove unnecessary LHLO copies.
|
||||||
pm.addPass(::mlir::createCopyRemovalPass());
|
pm.addNestedPass<mlir::FuncOp>(::mlir::createCopyRemovalPass());
|
||||||
// Legalize reduce operations directly to GPU dialect.
|
// Legalize reduce operations directly to GPU dialect.
|
||||||
pm.addPass(::mlir::lmhlo::createLegalizeToGpuPass());
|
pm.addNestedPass<mlir::FuncOp>(::mlir::lmhlo::createLegalizeToGpuPass());
|
||||||
// Transform LHLO operations to LinAlg.
|
// Transform LHLO operations to LinAlg.
|
||||||
pm.addPass(::mlir::lmhlo::createLegalizeLhloToLinalgPass());
|
pm.addNestedPass<mlir::FuncOp>(
|
||||||
|
::mlir::lmhlo::createLegalizeLhloToLinalgPass());
|
||||||
// Fuse linalg operations.
|
// Fuse linalg operations.
|
||||||
pm.addPass(::mlir::lmhlo::createLhloFuseLinalgPass(
|
pm.addNestedPass<mlir::FuncOp>(::mlir::lmhlo::createLhloFuseLinalgPass(
|
||||||
/*use_parallel_loops=*/true, tiling_for_unrolling));
|
/*use_parallel_loops=*/true, tiling_for_unrolling));
|
||||||
// Transform the Linalg operations inside of the loop nest into parallel
|
// Transform the Linalg operations inside of the loop nest into parallel
|
||||||
// loops.
|
// loops.
|
||||||
pm.addPass(::mlir::createConvertLinalgToParallelLoopsPass());
|
pm.addNestedPass<mlir::FuncOp>(
|
||||||
|
::mlir::createConvertLinalgToParallelLoopsPass());
|
||||||
// Canonicalize the code to simplify index computations. This is needed so
|
// Canonicalize the code to simplify index computations. This is needed so
|
||||||
// that loop bounds have the same value.
|
// that loop bounds have the same value.
|
||||||
pm.addNestedPass<::mlir::FuncOp>(::mlir::createCanonicalizerPass());
|
pm.addNestedPass<::mlir::FuncOp>(::mlir::createCanonicalizerPass());
|
||||||
pm.addNestedPass<::mlir::FuncOp>(::mlir::createCSEPass());
|
pm.addNestedPass<::mlir::FuncOp>(::mlir::createCSEPass());
|
||||||
// Fuse the inner-most loops.
|
// Fuse the inner-most loops.
|
||||||
pm.addPass(createFuseInnerParallelLoopsPass());
|
pm.addNestedPass<mlir::FuncOp>(createFuseInnerParallelLoopsPass());
|
||||||
// Run CSE to ensure that loads and stores to the same subview get
|
// Run CSE to ensure that loads and stores to the same subview get
|
||||||
// recognized as such.
|
// recognized as such.
|
||||||
pm.addNestedPass<::mlir::FuncOp>(::mlir::createCSEPass());
|
pm.addNestedPass<::mlir::FuncOp>(::mlir::createCSEPass());
|
||||||
// Forward stores to buffers to loads.
|
// Forward stores to buffers to loads.
|
||||||
pm.addPass(createStoreForwardingPass());
|
pm.addNestedPass<mlir::FuncOp>(createStoreForwardingPass());
|
||||||
// Remove now unused temporary buffers.
|
// Remove now unused temporary buffers.
|
||||||
pm.addPass(createDeadTempBufferRemovalPass());
|
pm.addNestedPass<mlir::FuncOp>(createDeadTempBufferRemovalPass());
|
||||||
if (!options.unroll_factors.empty()) {
|
if (!options.unroll_factors.empty()) {
|
||||||
pm.addPass(::mlir::createParallelLoopTilingPass(as_int64));
|
pm.addNestedPass<mlir::FuncOp>(
|
||||||
|
::mlir::createParallelLoopTilingPass(as_int64));
|
||||||
}
|
}
|
||||||
// Project all loop dimensions to X if necessary.
|
// Project all loop dimensions to X if necessary.
|
||||||
if (options.collapse_parallel_loops) {
|
if (options.collapse_parallel_loops) {
|
||||||
pm.addPass(createParallelLoopCollapsingToFirstDimPass());
|
pm.addNestedPass<mlir::FuncOp>(
|
||||||
|
createParallelLoopCollapsingToFirstDimPass());
|
||||||
}
|
}
|
||||||
// Some basic cleanup.
|
// Some basic cleanup.
|
||||||
pm.addNestedPass<::mlir::FuncOp>(::mlir::createCanonicalizerPass());
|
pm.addNestedPass<::mlir::FuncOp>(::mlir::createCanonicalizerPass());
|
||||||
pm.addNestedPass<::mlir::FuncOp>(::mlir::createCSEPass());
|
pm.addNestedPass<::mlir::FuncOp>(::mlir::createCSEPass());
|
||||||
// Greedily map the remaining loop to GPU hardware dimensions.
|
// Greedily map the remaining loop to GPU hardware dimensions.
|
||||||
pm.addPass(createMapParallelLoopsPass());
|
pm.addNestedPass<::mlir::FuncOp>(createMapParallelLoopsPass());
|
||||||
// Apply the mapping.
|
// Apply the mapping.
|
||||||
pm.addPass(mlir::createParallelLoopToGpuPass());
|
pm.addNestedPass<::mlir::FuncOp>(mlir::createParallelLoopToGpuPass());
|
||||||
// Some basic cleanup.
|
// Some basic cleanup.
|
||||||
pm.addNestedPass<::mlir::FuncOp>(::mlir::createCanonicalizerPass());
|
pm.addNestedPass<::mlir::FuncOp>(::mlir::createCanonicalizerPass());
|
||||||
pm.addNestedPass<::mlir::FuncOp>(::mlir::createCSEPass());
|
pm.addNestedPass<::mlir::FuncOp>(::mlir::createCSEPass());
|
||||||
@ -131,7 +135,7 @@ Status LowerLHLOToGPU(mlir::ModuleOp module, LowerLHLOToGPUOptions options) {
|
|||||||
// Make sure the kernel signature resembled the original function's
|
// Make sure the kernel signature resembled the original function's
|
||||||
// signature
|
// signature
|
||||||
if (options.rewrite_signature) {
|
if (options.rewrite_signature) {
|
||||||
pm.addPass(createRewriteKernelSignaturePass());
|
pm.addNestedPass<::mlir::FuncOp>(createRewriteKernelSignaturePass());
|
||||||
}
|
}
|
||||||
if (failed(pm.run(module))) {
|
if (failed(pm.run(module))) {
|
||||||
return InternalError("Lowering to GPU kernels failed.");
|
return InternalError("Lowering to GPU kernels failed.");
|
||||||
@ -179,8 +183,9 @@ class LowerToNVVMPass
|
|||||||
} // namespace
|
} // namespace
|
||||||
|
|
||||||
Status LowerKernelBodiesToNVVM(mlir::ModuleOp module) {
|
Status LowerKernelBodiesToNVVM(mlir::ModuleOp module) {
|
||||||
|
::mlir::PassManager pm(module.getContext());
|
||||||
// We cannot verify as the signature of the kernel is rewritten.
|
// We cannot verify as the signature of the kernel is rewritten.
|
||||||
::mlir::PassManager pm(module.getContext(), /*verifyPasses=*/false);
|
pm.enableVerifier(false);
|
||||||
tensorflow::applyTensorflowAndCLOptions(pm);
|
tensorflow::applyTensorflowAndCLOptions(pm);
|
||||||
|
|
||||||
// Rewrite kernel functions to LLVM IR.
|
// Rewrite kernel functions to LLVM IR.
|
||||||
@ -250,8 +255,9 @@ class LowerToROCDLPass
|
|||||||
} // namespace
|
} // namespace
|
||||||
|
|
||||||
Status LowerKernelBodiesToROCDL(mlir::ModuleOp module) {
|
Status LowerKernelBodiesToROCDL(mlir::ModuleOp module) {
|
||||||
|
::mlir::PassManager pm(module.getContext());
|
||||||
// We cannot verify as the signature of the kernel is rewritten.
|
// We cannot verify as the signature of the kernel is rewritten.
|
||||||
::mlir::PassManager pm(module.getContext(), /*verifyPasses=*/false);
|
pm.enableVerifier(false);
|
||||||
tensorflow::applyTensorflowAndCLOptions(pm);
|
tensorflow::applyTensorflowAndCLOptions(pm);
|
||||||
|
|
||||||
auto enable_if_vlog_is_on = [](mlir::Pass*, mlir::Operation*) {
|
auto enable_if_vlog_is_on = [](mlir::Pass*, mlir::Operation*) {
|
||||||
|
@ -90,7 +90,8 @@ MlirCompiler::IRHook XlaGpuOpt::GetIRHookBreakingLoweringStage(
|
|||||||
LoweringStage breaking_stage) {
|
LoweringStage breaking_stage) {
|
||||||
return {[](mlir::ModuleOp module) -> Status {
|
return {[](mlir::ModuleOp module) -> Status {
|
||||||
mlir::PassManager pm(module.getContext());
|
mlir::PassManager pm(module.getContext());
|
||||||
pm.addPass(::mlir::createInjectErrorsForTestingPass());
|
pm.addNestedPass<::mlir::FuncOp>(
|
||||||
|
::mlir::createInjectErrorsForTestingPass());
|
||||||
if (failed(pm.run(module))) {
|
if (failed(pm.run(module))) {
|
||||||
return InternalError("InjectErrorsForTestingPass failed.");
|
return InternalError("InjectErrorsForTestingPass failed.");
|
||||||
}
|
}
|
||||||
|
@ -686,8 +686,8 @@ def tf_repositories(path_prefix = "", tf_repo_name = ""):
|
|||||||
)
|
)
|
||||||
|
|
||||||
# Check out LLVM and MLIR from llvm-project.
|
# Check out LLVM and MLIR from llvm-project.
|
||||||
LLVM_COMMIT = "701456b52355c25089e1b536805164570f5def6f"
|
LLVM_COMMIT = "33945cdd62c40ea4ce381d4c3d49b22f8a2cc015"
|
||||||
LLVM_SHA256 = "de280defac064df7243ef4db30be85fbcff595669c6ac99033ed674215a630f3"
|
LLVM_SHA256 = "37870dd2459898ab5f555676480b5c56b3279e323b0e9e1783c5641b88849d84"
|
||||||
LLVM_URLS = [
|
LLVM_URLS = [
|
||||||
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
|
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
|
||||||
"https://github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
|
"https://github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
|
||||||
|
28
third_party/mlir/BUILD
vendored
28
third_party/mlir/BUILD
vendored
@ -106,7 +106,6 @@ cc_library(
|
|||||||
"lib/EDSC/Builders.cpp",
|
"lib/EDSC/Builders.cpp",
|
||||||
],
|
],
|
||||||
hdrs = [
|
hdrs = [
|
||||||
"include/mlir-c/Core.h",
|
|
||||||
"include/mlir/EDSC/Builders.h",
|
"include/mlir/EDSC/Builders.h",
|
||||||
],
|
],
|
||||||
includes = ["include"],
|
includes = ["include"],
|
||||||
@ -856,6 +855,16 @@ cc_library(
|
|||||||
],
|
],
|
||||||
)
|
)
|
||||||
|
|
||||||
|
filegroup(
|
||||||
|
name = "ShapeOpsTdFiles",
|
||||||
|
srcs = [
|
||||||
|
"include/mlir/Dialect/Shape/IR/ShapeBase.td",
|
||||||
|
"include/mlir/Dialect/Shape/IR/ShapeOps.td",
|
||||||
|
"include/mlir/Interfaces/InferTypeOpInterface.td",
|
||||||
|
":StdOpsTdFiles",
|
||||||
|
],
|
||||||
|
)
|
||||||
|
|
||||||
gentbl(
|
gentbl(
|
||||||
name = "ShapeOpsIncGen",
|
name = "ShapeOpsIncGen",
|
||||||
strip_include_prefix = "include",
|
strip_include_prefix = "include",
|
||||||
@ -922,6 +931,22 @@ cc_library(
|
|||||||
],
|
],
|
||||||
)
|
)
|
||||||
|
|
||||||
|
gentbl(
|
||||||
|
name = "ShapeToStandardGen",
|
||||||
|
strip_include_prefix = "lib/Conversion/ShapeToStandard",
|
||||||
|
tbl_outs = [
|
||||||
|
(
|
||||||
|
"-gen-rewriters",
|
||||||
|
"lib/Conversion/ShapeToStandard/ShapeToStandard.cpp.inc",
|
||||||
|
),
|
||||||
|
],
|
||||||
|
tblgen = ":mlir-tblgen",
|
||||||
|
td_file = "lib/Conversion/ShapeToStandard/ShapeToStandard.td",
|
||||||
|
td_srcs = [
|
||||||
|
":ShapeOpsTdFiles",
|
||||||
|
],
|
||||||
|
)
|
||||||
|
|
||||||
cc_library(
|
cc_library(
|
||||||
name = "ShapeToStandard",
|
name = "ShapeToStandard",
|
||||||
srcs = glob([
|
srcs = glob([
|
||||||
@ -936,6 +961,7 @@ cc_library(
|
|||||||
":Pass",
|
":Pass",
|
||||||
":SCFDialect",
|
":SCFDialect",
|
||||||
":Shape",
|
":Shape",
|
||||||
|
":ShapeToStandardGen",
|
||||||
":StandardOps",
|
":StandardOps",
|
||||||
":Support",
|
":Support",
|
||||||
":Transforms",
|
":Transforms",
|
||||||
|
2
third_party/mlir/test.BUILD
vendored
2
third_party/mlir/test.BUILD
vendored
@ -27,6 +27,7 @@ filegroup(
|
|||||||
"@llvm-project//mlir:include/mlir/IR/SymbolInterfaces.td",
|
"@llvm-project//mlir:include/mlir/IR/SymbolInterfaces.td",
|
||||||
"@llvm-project//mlir:include/mlir/Interfaces/CallInterfaces.td",
|
"@llvm-project//mlir:include/mlir/Interfaces/CallInterfaces.td",
|
||||||
"@llvm-project//mlir:include/mlir/Interfaces/ControlFlowInterfaces.td",
|
"@llvm-project//mlir:include/mlir/Interfaces/ControlFlowInterfaces.td",
|
||||||
|
"@llvm-project//mlir:include/mlir/Interfaces/CopyOpInterface.td",
|
||||||
"@llvm-project//mlir:include/mlir/Interfaces/InferTypeOpInterface.td",
|
"@llvm-project//mlir:include/mlir/Interfaces/InferTypeOpInterface.td",
|
||||||
"@llvm-project//mlir:include/mlir/Interfaces/SideEffectInterfaces.td",
|
"@llvm-project//mlir:include/mlir/Interfaces/SideEffectInterfaces.td",
|
||||||
],
|
],
|
||||||
@ -140,6 +141,7 @@ cc_library(
|
|||||||
":TestTypeDefsIncGen",
|
":TestTypeDefsIncGen",
|
||||||
"@llvm-project//llvm:Support",
|
"@llvm-project//llvm:Support",
|
||||||
"@llvm-project//mlir:ControlFlowInterfaces",
|
"@llvm-project//mlir:ControlFlowInterfaces",
|
||||||
|
"@llvm-project//mlir:CopyOpInterface",
|
||||||
"@llvm-project//mlir:DerivedAttributeOpInterface",
|
"@llvm-project//mlir:DerivedAttributeOpInterface",
|
||||||
"@llvm-project//mlir:Dialect",
|
"@llvm-project//mlir:Dialect",
|
||||||
"@llvm-project//mlir:IR",
|
"@llvm-project//mlir:IR",
|
||||||
|
Loading…
Reference in New Issue
Block a user