[Grappler] Swap out LayoutOptimizer for GenericLayoutOptimizer.
PiperOrigin-RevId: 257021460
This commit is contained in:
parent
355f7167d7
commit
b9a6fea1f0
@ -594,9 +594,9 @@ cc_library(
|
|||||||
":debug_stripper",
|
":debug_stripper",
|
||||||
":dependency_optimizer",
|
":dependency_optimizer",
|
||||||
":function_optimizer",
|
":function_optimizer",
|
||||||
|
":generic_layout_optimizer",
|
||||||
":graph_optimizer",
|
":graph_optimizer",
|
||||||
":implementation_selector",
|
":implementation_selector",
|
||||||
":layout_optimizer",
|
|
||||||
":loop_optimizer",
|
":loop_optimizer",
|
||||||
":memory_optimizer",
|
":memory_optimizer",
|
||||||
":model_pruner",
|
":model_pruner",
|
||||||
|
@ -32,8 +32,8 @@ limitations under the License.
|
|||||||
#include "tensorflow/core/grappler/optimizers/debug_stripper.h"
|
#include "tensorflow/core/grappler/optimizers/debug_stripper.h"
|
||||||
#include "tensorflow/core/grappler/optimizers/dependency_optimizer.h"
|
#include "tensorflow/core/grappler/optimizers/dependency_optimizer.h"
|
||||||
#include "tensorflow/core/grappler/optimizers/function_optimizer.h"
|
#include "tensorflow/core/grappler/optimizers/function_optimizer.h"
|
||||||
|
#include "tensorflow/core/grappler/optimizers/generic_layout_optimizer.h"
|
||||||
#include "tensorflow/core/grappler/optimizers/implementation_selector.h"
|
#include "tensorflow/core/grappler/optimizers/implementation_selector.h"
|
||||||
#include "tensorflow/core/grappler/optimizers/layout_optimizer.h"
|
|
||||||
#include "tensorflow/core/grappler/optimizers/loop_optimizer.h"
|
#include "tensorflow/core/grappler/optimizers/loop_optimizer.h"
|
||||||
#include "tensorflow/core/grappler/optimizers/memory_optimizer.h"
|
#include "tensorflow/core/grappler/optimizers/memory_optimizer.h"
|
||||||
#include "tensorflow/core/grappler/optimizers/model_pruner.h"
|
#include "tensorflow/core/grappler/optimizers/model_pruner.h"
|
||||||
@ -121,7 +121,7 @@ std::unique_ptr<GraphOptimizer> MetaOptimizer::MakeNewOptimizer(
|
|||||||
MK_OPT("constfold", new ConstantFolding(cpu_device_));
|
MK_OPT("constfold", new ConstantFolding(cpu_device_));
|
||||||
MK_OPT("shape", new ShapeOptimizer());
|
MK_OPT("shape", new ShapeOptimizer());
|
||||||
MK_OPT("remap", new Remapper(cfg_.remapping()));
|
MK_OPT("remap", new Remapper(cfg_.remapping()));
|
||||||
MK_OPT("layout", new LayoutOptimizer());
|
MK_OPT("layout", new GenericLayoutOptimizer());
|
||||||
MK_OPT("auto_mixed_precision",
|
MK_OPT("auto_mixed_precision",
|
||||||
new AutoMixedPrecision(cfg_.auto_mixed_precision()));
|
new AutoMixedPrecision(cfg_.auto_mixed_precision()));
|
||||||
MK_OPT("memory", new MemoryOptimizer(RewriterConfig::MANUAL));
|
MK_OPT("memory", new MemoryOptimizer(RewriterConfig::MANUAL));
|
||||||
@ -193,7 +193,7 @@ Status MetaOptimizer::InitializeOptimizers(
|
|||||||
MakeUnique<DependencyOptimizer>(cfg_.dependency_optimization()));
|
MakeUnique<DependencyOptimizer>(cfg_.dependency_optimization()));
|
||||||
}
|
}
|
||||||
if (cfg_.layout_optimizer() != RewriterConfig::OFF) {
|
if (cfg_.layout_optimizer() != RewriterConfig::OFF) {
|
||||||
optimizers->push_back(MakeUnique<LayoutOptimizer>());
|
optimizers->push_back(MakeUnique<GenericLayoutOptimizer>());
|
||||||
}
|
}
|
||||||
if (AutoMixedPrecisionEnabled(cfg_.auto_mixed_precision())) {
|
if (AutoMixedPrecisionEnabled(cfg_.auto_mixed_precision())) {
|
||||||
optimizers->push_back(
|
optimizers->push_back(
|
||||||
@ -267,7 +267,7 @@ Status MetaOptimizer::InitializeCustomGraphOptimizers(
|
|||||||
TF_RETURN_IF_ERROR(custom_optimizer->Init(&optimizer_config));
|
TF_RETURN_IF_ERROR(custom_optimizer->Init(&optimizer_config));
|
||||||
optimizers->push_back(std::move(custom_optimizer));
|
optimizers->push_back(std::move(custom_optimizer));
|
||||||
} else {
|
} else {
|
||||||
// If there are no custom optimizers with given name, try to initalize a
|
// If there are no custom optimizers with given name, try to initialize a
|
||||||
// default optimizer. This way, custom configurable optimizers can be
|
// default optimizer. This way, custom configurable optimizers can be
|
||||||
// mixed with default optimizers in any order.
|
// mixed with default optimizers in any order.
|
||||||
auto optimizer = MakeNewOptimizer(optimizer_config.name());
|
auto optimizer = MakeNewOptimizer(optimizer_config.name());
|
||||||
|
@ -6438,37 +6438,39 @@ cuda_py_test(
|
|||||||
xla_enable_strict_auto_jit = True,
|
xla_enable_strict_auto_jit = True,
|
||||||
)
|
)
|
||||||
|
|
||||||
cuda_py_test(
|
# TODO(b/131764887) Remove once LayoutOptimizer is swapped out with GenericLayoutOptimizer.
|
||||||
name = "layout_optimizer_test",
|
#
|
||||||
size = "medium",
|
# cuda_py_test(
|
||||||
srcs = [
|
# name = "layout_optimizer_test",
|
||||||
"grappler/layout_optimizer_test.py",
|
# size = "medium",
|
||||||
],
|
# srcs = [
|
||||||
additional_deps = [
|
# "grappler/layout_optimizer_test.py",
|
||||||
":client_testlib",
|
# ],
|
||||||
":framework_for_generated_wrappers",
|
# additional_deps = [
|
||||||
":array_ops",
|
# ":client_testlib",
|
||||||
":constant_op",
|
# ":framework_for_generated_wrappers",
|
||||||
":dtypes",
|
# ":array_ops",
|
||||||
":functional_ops",
|
# ":constant_op",
|
||||||
":math_ops",
|
# ":dtypes",
|
||||||
":nn",
|
# ":functional_ops",
|
||||||
":ops",
|
# ":math_ops",
|
||||||
":random_ops",
|
# ":nn",
|
||||||
":state_ops",
|
# ":ops",
|
||||||
":tf_cluster",
|
# ":random_ops",
|
||||||
":tf_optimizer",
|
# ":state_ops",
|
||||||
":training",
|
# ":tf_cluster",
|
||||||
"//third_party/py/numpy",
|
# ":tf_optimizer",
|
||||||
"//tensorflow/core:protos_all_py",
|
# ":training",
|
||||||
],
|
# "//third_party/py/numpy",
|
||||||
shard_count = 10,
|
# "//tensorflow/core:protos_all_py",
|
||||||
tags = [
|
# ],
|
||||||
"grappler",
|
# shard_count = 10,
|
||||||
],
|
# tags = [
|
||||||
# This test will not run on XLA because it primarily tests the TF Classic flow.
|
# "grappler",
|
||||||
xla_enable_strict_auto_jit = False,
|
# ],
|
||||||
)
|
# # This test will not run on XLA because it primarily tests the TF Classic flow.
|
||||||
|
# xla_enable_strict_auto_jit = False,
|
||||||
|
# )
|
||||||
|
|
||||||
py_library(
|
py_library(
|
||||||
name = "cost_analyzer",
|
name = "cost_analyzer",
|
||||||
|
Loading…
Reference in New Issue
Block a user