diff --git a/tensorflow/cc/tutorials/example_trainer.cc b/tensorflow/cc/tutorials/example_trainer.cc index 27ff0914d5f..f2ecd2eddc2 100644 --- a/tensorflow/cc/tutorials/example_trainer.cc +++ b/tensorflow/cc/tutorials/example_trainer.cc @@ -37,7 +37,7 @@ namespace tensorflow { namespace example { struct Options { - int num_concurrent_sessions = 10; // The number of concurrent sessions + int num_concurrent_sessions = 1; // The number of concurrent sessions int num_concurrent_steps = 10; // The number of concurrent steps int num_iterations = 100; // Each step repeats this many times bool use_gpu = false; // Whether to use gpu in the training @@ -108,10 +108,11 @@ void ConcurrentSteps(const Options* opts, int session_index) { // Spawn M threads for M concurrent steps. const int M = opts->num_concurrent_steps; - thread::ThreadPool step_threads(Env::Default(), "trainer", M); + std::unique_ptr step_threads( + new thread::ThreadPool(Env::Default(), "trainer", M)); for (int step = 0; step < M; ++step) { - step_threads.Schedule([&session, opts, session_index, step]() { + step_threads->Schedule([&session, opts, session_index, step]() { // Randomly initialize the input. Tensor x(DT_FLOAT, TensorShape({2, 1})); auto x_flat = x.flat(); @@ -139,12 +140,19 @@ void ConcurrentSteps(const Options* opts, int session_index) { }); } + // Delete the threadpool, thus waiting for all threads to complete. + step_threads.reset(nullptr); TF_CHECK_OK(session->Close()); } void ConcurrentSessions(const Options& opts) { // Spawn N threads for N concurrent sessions. const int N = opts.num_concurrent_sessions; + + // At the moment our Session implementation only allows + // one concurrently computing Session on GPU. + CHECK_EQ(1, N) << "Currently can only have one concurrent session."; + thread::ThreadPool session_threads(Env::Default(), "trainer", N); for (int i = 0; i < N; ++i) { session_threads.Schedule(std::bind(&ConcurrentSteps, &opts, i)); diff --git a/tensorflow/g3doc/get_started/os_setup.md b/tensorflow/g3doc/get_started/os_setup.md index fe8e0137676..6e20b3e11ca 100644 --- a/tensorflow/g3doc/get_started/os_setup.md +++ b/tensorflow/g3doc/get_started/os_setup.md @@ -734,22 +734,6 @@ Every time you change the Cuda library paths you need to run this step again bef you invoke the bazel build command. For the cuDNN libraries, use '6.5' for R2, '7.0' for R3, and '4.0.4' for R4-RC. -#### Build your target with GPU support -From the root of your source tree, run: - -```bash -$ bazel build -c opt --config=cuda //tensorflow/cc:tutorials_example_trainer - -$ bazel-bin/tensorflow/cc/tutorials_example_trainer --use_gpu -# Lots of output. This tutorial iteratively calculates the major eigenvalue of -# a 2x2 matrix, on GPU. The last few lines look like this. -000009/000005 lambda = 2.000000 x = [0.894427 -0.447214] y = [1.788854 -0.894427] -000006/000001 lambda = 2.000000 x = [0.894427 -0.447214] y = [1.788854 -0.894427] -000009/000009 lambda = 2.000000 x = [0.894427 -0.447214] y = [1.788854 -0.894427] -``` - -Note that "--config=cuda" is needed to enable the GPU support. - #### Known issues * Although it is possible to build both Cuda and non-Cuda configs under the same