Fix uses of private, mangled names for proto enumerators.

PiperOrigin-RevId: 256485441
This commit is contained in:
A. Unique TensorFlower 2019-07-03 21:02:10 -07:00 committed by TensorFlower Gardener
parent eece01c80f
commit 94d7e348d8
5 changed files with 7 additions and 11 deletions

View File

@ -102,7 +102,7 @@ TEST(ImmutableConstantOpTest, Simple) {
session_options.env = Env::Default();
session_options.config.mutable_graph_options()
->mutable_optimizer_options()
->set_opt_level(OptimizerOptions_Level_L0);
->set_opt_level(OptimizerOptions::L0);
std::unique_ptr<Session> session(NewSession(session_options));
ASSERT_TRUE(session != nullptr) << "Failed to create session";
TF_ASSERT_OK(session->Create(graph_def)) << "Can't create test graph";
@ -174,7 +174,7 @@ TEST(ImmutableConstantOpTest, FromFile) {
SessionOptions session_options;
session_options.config.mutable_graph_options()
->mutable_optimizer_options()
->set_opt_level(OptimizerOptions_Level_L0);
->set_opt_level(OptimizerOptions::L0);
std::unique_ptr<Session> session(NewSession(session_options));
ASSERT_TRUE(session != nullptr) << "Failed to create session";
TF_ASSERT_OK(session->Create(graph_def)) << "Can't create test graph";

View File

@ -1275,7 +1275,7 @@ static void BM_ImageNetSoftmaxFwd(int iters, int batch_size, int node_depth,
opts.config.set_use_per_session_threads(true);
opts.config.mutable_graph_options()
->mutable_optimizer_options()
->set_opt_level(OptimizerOptions_Level_L0);
->set_opt_level(OptimizerOptions::L0);
testing::UseRealTime();
test::Benchmark(device, g, &opts).Run(iters);
testing::ItemsProcessed(batch_size * node_depth * iters);
@ -1323,7 +1323,7 @@ static void BM_TopK(int iters, int rows, int cols, int k, int num_threads,
opts.config.set_use_per_session_threads(true);
opts.config.mutable_graph_options()
->mutable_optimizer_options()
->set_opt_level(OptimizerOptions_Level_L0);
->set_opt_level(OptimizerOptions::L0);
testing::UseRealTime();
testing::StartTiming();
test::Benchmark(device, g, &opts).Run(iters);

View File

@ -676,7 +676,7 @@ static void BM_LargeTensorWrite(int iters, int num_elements) {
SessionOptions session_options;
session_options.config.mutable_graph_options()
->mutable_optimizer_options()
->set_opt_level(tensorflow::OptimizerOptions_Level_L0);
->set_opt_level(tensorflow::OptimizerOptions::L0);
TF_CHECK_OK(root.status());
Graph* g = new Graph(OpRegistry::Global());

View File

@ -69,9 +69,7 @@ Status RealMain(int argc, char** argv) {
TF_RETURN_IF_ERROR(runner.AddGpus(8));
// This binary is used to test TF:XLA behavior, so turn on auto_jit.
TF_RETURN_IF_ERROR(
runner.SetJitLevel(tensorflow::OptimizerOptions::GlobalJitLevel::
OptimizerOptions_GlobalJitLevel_ON_2));
TF_RETURN_IF_ERROR(runner.SetJitLevel(tensorflow::OptimizerOptions::ON_2));
GraphDef graphdef_output;
TF_RETURN_IF_ERROR(runner.Run(optimization_pass, std::move(graphdef_input),
&graphdef_output));

View File

@ -30,9 +30,7 @@ namespace tensorflow {
// to test individual Tensorflow Optimization passes.
class OptimizationPassRunner {
public:
explicit OptimizationPassRunner()
: jit_level_(OptimizerOptions::GlobalJitLevel::
OptimizerOptions_GlobalJitLevel_DEFAULT) {}
explicit OptimizationPassRunner() : jit_level_(OptimizerOptions::DEFAULT) {}
// Increasing the Jit level will cause XLA to compile parts of the tensorflow
// graph that it is able to.