Run shape inference before any new non-tf ops are introduced

PiperOrigin-RevId: 310263391
Change-Id: Iff15999065f5ffa1c331646ae53c2f093c05b994
This commit is contained in:
Feng Liu 2020-05-06 17:44:29 -07:00 committed by TensorFlower Gardener
parent 154044d0f2
commit fac30b7a87
1 changed files with 5 additions and 4 deletions

View File

@ -73,16 +73,17 @@ void AddTFToTFLConversionPasses(const mlir::TFL::PassConfig& pass_config,
pass_manager->addPass(mlir::TFControlFlow::CreateRaiseTFControlFlowPass());
}
if (pass_config.shape_inference) {
pass_manager->addPass(mlir::TF::CreateTFShapeInferencePass());
}
// Keep this pass after the shape inference pass, which couldn't do shape
// inference for non-tf ops.
if (!pass_config.quant_specs.serialized_quant_stats.empty()) {
pass_manager->addPass(
mlir::quant::CreateImportQuantStatsPassForTFControlDialect(
pass_config.quant_specs.serialized_quant_stats));
}
if (pass_config.shape_inference) {
pass_manager->addPass(mlir::TF::CreateTFShapeInferencePass());
}
// The conversion pipeline has to follow the following orders:
// 1) Saved model related optimization like decompose resource ops
// 2) Convert composite functions like lstm/rnns, along with proper function