From c2239c1a2a108c118355640cea6c80da93985e3c Mon Sep 17 00:00:00 2001 From: Jay Shi Date: Wed, 21 Oct 2020 16:07:04 -0700 Subject: [PATCH] [tf.data] Add the optimization test when RAM budget is 0. PiperOrigin-RevId: 338359725 Change-Id: I5c89aab1f65cf170ed4b64078834c883ad230c96 --- tensorflow/core/framework/model.h | 6 +++ tensorflow/core/framework/model_test.cc | 59 ++++++++++++++++++++++++- 2 files changed, 64 insertions(+), 1 deletion(-) diff --git a/tensorflow/core/framework/model.h b/tensorflow/core/framework/model.h index 993dcddca50..a3cd0c06a48 100644 --- a/tensorflow/core/framework/model.h +++ b/tensorflow/core/framework/model.h @@ -251,6 +251,12 @@ class Node { // Returns the node output. Node* output() const { return output_; } + // Returns the parameter value. + double parameter_value(const string& name) const TF_LOCKS_EXCLUDED(mu_) { + tf_shared_lock l(mu_); + return parameters_.at(name)->state->value; + } + // Returns the aggregate processing time. int64 processing_time() const TF_LOCKS_EXCLUDED(mu_) { return processing_time_; diff --git a/tensorflow/core/framework/model_test.cc b/tensorflow/core/framework/model_test.cc index 6efc8eaac4b..97eb720b058 100644 --- a/tensorflow/core/framework/model_test.cc +++ b/tensorflow/core/framework/model_test.cc @@ -387,7 +387,7 @@ TEST(UnknownTest, Model) { EXPECT_EQ(unknown->OutputTime(&input_times, nullptr), 100); } -TEST(SetterGetterTest, Node) { +TEST(BufferedBytesTest, Node) { std::shared_ptr node = model::MakeAsyncInterleaveManyNode( {-1, "TestNode", nullptr}, {model::MakeParameter("parallelism", @@ -892,6 +892,63 @@ TEST_P(SelfProcessingTimeTest, Model) { INSTANTIATE_TEST_SUITE_P(Test, SelfProcessingTimeTest, ::testing::Values(0, 1, 2, 5, 10, 20, 40)); +class OptimizeZeroRamBudgetTest + : public ::testing::TestWithParam {}; + +TEST_P(OptimizeZeroRamBudgetTest, Model) { + const model::AutotuneAlgorithm algorithm = GetParam(); + + std::shared_ptr mutex1 = std::make_shared(); + std::shared_ptr cv1 = + std::make_shared(); + std::shared_ptr node1 = model::MakeAsyncKnownRatioNode( + {1, "1", nullptr}, 2, + {model::MakeParameter("parallelism", + std::make_shared(-1, mutex1, cv1), 1, + 5)}); + node1->record_buffer_event(1, 1); + + std::shared_ptr mutex2 = std::make_shared(); + std::shared_ptr cv2 = + std::make_shared(); + std::shared_ptr node2 = model::MakeAsyncKnownRatioNode( + {2, "2", node1}, 5, + {model::MakeParameter("buffer_size", + std::make_shared(-1, mutex2, cv2), 0, + 6)}); + node2->record_buffer_event(1, 1); + + std::shared_ptr mutex3 = std::make_shared(); + std::shared_ptr cv3 = + std::make_shared(); + std::shared_ptr node3 = model::MakeAsyncInterleaveManyNode( + {3, "3", node2}, + {model::MakeParameter("parallelism", + std::make_shared(-1, mutex3, cv3), 1, + 7)}); + node3->record_buffer_event(1, 1); + + EXPECT_EQ(node1->parameter_value("parallelism"), -1); + EXPECT_EQ(node2->parameter_value("buffer_size"), -1); + EXPECT_EQ(node3->parameter_value("parallelism"), -1); + + model::Model model; + model.AddNode([&node1](model::Node::Args args) { return node1; }, "1", + nullptr, &node1); + model.AddNode([&node2](model::Node::Args args) { return node2; }, "2", node1, + &node2); + model.AddNode([&node3](model::Node::Args args) { return node3; }, "3", node2, + &node3); + + model.Optimize(algorithm, 40, 0, 0); + EXPECT_EQ(node1->parameter_value("parallelism"), 1); + EXPECT_EQ(node2->parameter_value("buffer_size"), 0); + EXPECT_EQ(node3->parameter_value("parallelism"), 1); +} + +INSTANTIATE_TEST_SUITE_P(Test, OptimizeZeroRamBudgetTest, + ::testing::Values(0, 1)); + } // namespace } // namespace model } // namespace data