diff --git a/eigen.BUILD b/eigen.BUILD
index 8a699f6aa84..8ce28ac0766 100644
--- a/eigen.BUILD
+++ b/eigen.BUILD
@@ -62,8 +62,6 @@ cc_library(
         # This define (mostly) guarantees we don't link any problematic
         # code. We use it, but we do not rely on it, as evidenced above.
         "EIGEN_MPL2_ONLY",
-        # TODO(jart): Use EIGEN_USE_NONBLOCKING_THREAD_POOL but first add an
-        #             eigen_initialize.cc file and alwayslink=1.
     ],
     includes = ["."],
     visibility = ["//visibility:public"],
diff --git a/tensorflow/BUILD b/tensorflow/BUILD
index 5b9517a0e55..7cee39046ef 100644
--- a/tensorflow/BUILD
+++ b/tensorflow/BUILD
@@ -105,6 +105,7 @@ filegroup(
         "//tensorflow/contrib/framework:all_files",
         "//tensorflow/contrib/graph_editor:all_files",
         "//tensorflow/contrib/grid_rnn:all_files",
+        "//tensorflow/contrib/integrate:all_files",
         "//tensorflow/contrib/layers:all_files",
         "//tensorflow/contrib/layers/kernels:all_files",
         "//tensorflow/contrib/learn:all_files",
@@ -148,7 +149,6 @@ filegroup(
         "//tensorflow/examples/image_retraining:all_files",
         "//tensorflow/examples/label_image:all_files",
         "//tensorflow/examples/learn:all_files",
-        "//tensorflow/examples/skflow:all_files",
         "//tensorflow/examples/tutorials/estimators:all_files",
         "//tensorflow/examples/tutorials/mnist:all_files",
         "//tensorflow/examples/tutorials/word2vec:all_files",
diff --git a/tensorflow/cc/BUILD b/tensorflow/cc/BUILD
index a804955a36e..39d519a707c 100644
--- a/tensorflow/cc/BUILD
+++ b/tensorflow/cc/BUILD
@@ -264,6 +264,36 @@ tf_cc_test(
     ],
 )
 
+cc_library(
+    name = "nn_grad",
+    srcs = ["gradients/nn_grad.cc"],
+    deps = [
+        ":cc_ops",
+        ":grad_op_registry",
+        ":ops",
+        ":scope",
+        "//tensorflow/core:core_cpu",
+        "//tensorflow/core:framework",
+    ],
+)
+
+tf_cc_test(
+    name = "gradients_nn_grad_test",
+    srcs = ["gradients/nn_grad_test.cc"],
+    deps = [
+        ":cc_ops",
+        ":grad_op_registry",
+        ":grad_testutil",
+        ":gradient_checker",
+        ":nn_grad",
+        ":testutil",
+        "//tensorflow/core:lib_internal",
+        "//tensorflow/core:test",
+        "//tensorflow/core:test_main",
+        "//tensorflow/core:testlib",
+    ],
+)
+
 tf_gen_op_wrappers_cc(
     name = "cc_ops",
     op_lib_names = [
diff --git a/tensorflow/cc/framework/gradient_checker.cc b/tensorflow/cc/framework/gradient_checker.cc
index a729bdd24d3..57b955454e8 100644
--- a/tensorflow/cc/framework/gradient_checker.cc
+++ b/tensorflow/cc/framework/gradient_checker.cc
@@ -110,20 +110,15 @@ Status ComputeNumericJacobianTranspose(const Scope& scope, const ops::Output& x,
   return Status::OK();
 }
 
-}  // namespace
-
 template <typename T>
-Status ComputeGradientError(const Scope& scope, const ops::Output& x,
-                            const TensorShape& x_shape, const ops::Output& y,
-                            const TensorShape& y_shape, T* max_error) {
+Status ComputeGradientErrorInternal(const Scope& scope, const ops::Output& x,
+                                    const TensorShape& x_shape,
+                                    const ops::Output& y,
+                                    const TensorShape& y_shape, Tensor* x_data,
+                                    T* max_error) {
   const int64 x_size = x_shape.num_elements();
   const int64 y_size = y_shape.num_elements();
 
-  // Initialize 'x_data' to random values.
-  Tensor x_data(x.type(), x_shape);
-  auto x_data_flat = x_data.flat<T>();
-  x_data_flat.setRandom();
-
   // Initialize theoretical Jacobian to zeros.
   Tensor jacobian_t(x.type(), {x_size, y_size});
   auto jacobian_t_flat = jacobian_t.flat<T>();
@@ -131,7 +126,7 @@ Status ComputeGradientError(const Scope& scope, const ops::Output& x,
 
   // Compute theoretical Jacobian.
   TF_RETURN_IF_ERROR(ComputeTheoreticalJacobianTranspose<T>(
-      scope, x, x_shape, x_data, y, y_shape, &jacobian_t));
+      scope, x, x_shape, *x_data, y, y_shape, &jacobian_t));
 
   // Initialize numeric Jacobian to zeros.
   Tensor jacobian_n(x.type(), {x_size, y_size});
@@ -140,7 +135,7 @@ Status ComputeGradientError(const Scope& scope, const ops::Output& x,
 
   // Compute numeric Jacobian.
   TF_RETURN_IF_ERROR(ComputeNumericJacobianTranspose<T>(
-      scope, x, x_shape, y, y_shape, 1e-3, &x_data, &jacobian_n));
+      scope, x, x_shape, y, y_shape, 1e-3, x_data, &jacobian_n));
 
   // Compute the maximum error between theoretical and numeric Jacobians.
   *max_error = 0.0;
@@ -154,10 +149,39 @@ Status ComputeGradientError(const Scope& scope, const ops::Output& x,
   return Status::OK();
 }
 
+}  // namespace
+
+template <typename T>
+Status ComputeGradientError(const Scope& scope, const ops::Output& x,
+                            const TensorShape& x_shape, const ops::Output& y,
+                            const TensorShape& y_shape, T* max_error) {
+  // Initialize 'x_data' to random values.
+  Tensor x_data(x.type(), x_shape);
+  auto x_data_flat = x_data.flat<T>();
+  x_data_flat.setRandom();
+  // Compute gradient error.
+  return ComputeGradientErrorInternal(scope, x, x_shape, y, y_shape, &x_data,
+                                      max_error);
+}
+
+template <typename T>
+Status ComputeGradientError(const Scope& scope, const ops::Output& x,
+                            const Tensor& x_init_value, const ops::Output& y,
+                            const TensorShape& y_shape, T* max_error) {
+  // Initialize 'x_data' from 'x_init_value'.
+  Tensor x_data(x_init_value);
+  // Compute gradient error.
+  return ComputeGradientErrorInternal(scope, x, x_data.shape(), y, y_shape,
+                                      &x_data, max_error);
+}
+
 #define INSTANTIATE_GRAD_ERR_TYPE(T)                                        \
   template Status ComputeGradientError<T>(                                  \
       const Scope& scope, const ops::Output& x, const TensorShape& x_shape, \
-      const ops::Output& y, const TensorShape& y_shape, T* max_error)
+      const ops::Output& y, const TensorShape& y_shape, T* max_error);      \
+  template Status ComputeGradientError<T>(                                  \
+      const Scope& scope, const ops::Output& x, const Tensor& x_init_value, \
+      const ops::Output& y, const TensorShape& y_shape, T* max_error);
 
 INSTANTIATE_GRAD_ERR_TYPE(float);
 INSTANTIATE_GRAD_ERR_TYPE(double);
diff --git a/tensorflow/cc/framework/gradient_checker.h b/tensorflow/cc/framework/gradient_checker.h
index 57e2154b68a..80876afe5c7 100644
--- a/tensorflow/cc/framework/gradient_checker.h
+++ b/tensorflow/cc/framework/gradient_checker.h
@@ -30,6 +30,12 @@ Status ComputeGradientError(const Scope& scope, const ops::Output& x,
                             const TensorShape& x_shape, const ops::Output& y,
                             const TensorShape& y_shape, T* max_error);
 
+// Overload of ComputeGradientError which takes an initial value for 'x'.
+template <typename T>
+Status ComputeGradientError(const Scope& scope, const ops::Output& x,
+                            const Tensor& x_init_value, const ops::Output& y,
+                            const TensorShape& y_shape, T* max_error);
+
 }  // namespace tensorflow
 
 #endif  // THIRD_PARTY_TENSORFLOW_CC_FRAMEWORK_GRADIENT_CHECKER_H_
diff --git a/tensorflow/cc/gradients/nn_grad.cc b/tensorflow/cc/gradients/nn_grad.cc
new file mode 100644
index 00000000000..657585e36fc
--- /dev/null
+++ b/tensorflow/cc/gradients/nn_grad.cc
@@ -0,0 +1,77 @@
+/* Copyright 2016 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#include "tensorflow/cc/ops/nn_ops.h"
+#include "tensorflow/cc/ops/standard_ops.h"
+
+#include "tensorflow/cc/framework/grad_op_registry.h"
+
+namespace tensorflow {
+namespace ops {
+namespace {
+
+Status SoftmaxGrad(const Scope& scope, const Operation& op,
+                   const std::vector<Output>& grad_inputs,
+                   std::vector<Output>* grad_outputs) {
+  // Softmax gradient function.
+  // p = softmax(x) maps from [batch, n] to [batch, m]
+  // dp/dx = [dp0/dx0   ... dp0/dxn-1  ]
+  //         [  ...           ...      ]
+  //         [dpm-1/dx0 ... dpm-1/dxn-1]
+  // dL/dx = dp/dx * dL/dy
+  //
+  // Using alternative formula:
+  // dL/dx = dL/dy * y - sum(dL/dy * y) * y
+  //    = (dL/dy - sum(dL/dy * y)) * y
+  auto y = op.output(0);
+  auto dyy = Mul(scope, grad_inputs[0], y);
+  auto sum = Reshape(scope, Sum(scope, dyy, {1}), {-1, 1});
+  auto sub = Sub(scope, grad_inputs[0], sum);
+  auto dx = Mul(scope, sub, y);
+  grad_outputs->push_back(dx);
+  return scope.status();
+}
+REGISTER_GRADIENT_OP("Softmax", SoftmaxGrad);
+
+Status ReluGradHelper(const Scope& scope, const Operation& op,
+                      const std::vector<Output>& grad_inputs,
+                      std::vector<Output>* grad_outputs) {
+  auto dx = ReluGrad(scope, grad_inputs[0], op.input(0));
+  grad_outputs->push_back(dx);
+  return scope.status();
+}
+REGISTER_GRADIENT_OP("Relu", ReluGradHelper);
+
+Status Relu6GradHelper(const Scope& scope, const Operation& op,
+                       const std::vector<Output>& grad_inputs,
+                       std::vector<Output>* grad_outputs) {
+  auto dx = Relu6Grad(scope, grad_inputs[0], op.input(0));
+  grad_outputs->push_back(dx);
+  return scope.status();
+}
+REGISTER_GRADIENT_OP("Relu6", Relu6GradHelper);
+
+Status EluGradHelper(const Scope& scope, const Operation& op,
+                     const std::vector<Output>& grad_inputs,
+                     std::vector<Output>* grad_outputs) {
+  auto dx = EluGrad(scope, grad_inputs[0], op.output(0));
+  grad_outputs->push_back(dx);
+  return scope.status();
+}
+REGISTER_GRADIENT_OP("Elu", EluGradHelper);
+
+}  // anonymous namespace
+}  // namespace ops
+}  // namespace tensorflow
diff --git a/tensorflow/cc/gradients/nn_grad_test.cc b/tensorflow/cc/gradients/nn_grad_test.cc
new file mode 100644
index 00000000000..ef0a2f9626b
--- /dev/null
+++ b/tensorflow/cc/gradients/nn_grad_test.cc
@@ -0,0 +1,91 @@
+/* Copyright 2016 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#include "tensorflow/cc/framework/grad_op_registry.h"
+#include "tensorflow/cc/framework/gradient_checker.h"
+#include "tensorflow/cc/framework/testutil.h"
+#include "tensorflow/cc/gradients/grad_testutil.h"
+#include "tensorflow/cc/ops/standard_ops.h"
+#include "tensorflow/core/framework/tensor_testutil.h"
+#include "tensorflow/core/lib/core/status_test_util.h"
+#include "tensorflow/core/lib/random/random.h"
+
+namespace tensorflow {
+using namespace ops;  // NOLINT(build/namespaces)
+
+namespace {
+
+class NNGradTest : public ::testing::Test {
+ protected:
+  NNGradTest() : scope_(Scope::NewRootScope()) {}
+
+  void RunTest(const Output& x, const TensorShape& x_shape, const Output& y,
+               const TensorShape& y_shape) {
+    float max_error;
+    TF_ASSERT_OK(
+        ComputeGradientError(scope_, x, x_shape, y, y_shape, &max_error));
+    EXPECT_LT(max_error, 1e-4);
+  }
+
+  void RunTest(const Output& x, const Tensor& x_init_value, const Output& y,
+               const TensorShape& y_shape) {
+    float max_error;
+    TF_ASSERT_OK(
+        ComputeGradientError(scope_, x, x_init_value, y, y_shape, &max_error));
+    EXPECT_LT(max_error, 1e-4);
+  }
+
+  Scope scope_;
+};
+
+TEST_F(NNGradTest, SoftmaxGrad) {
+  TensorShape shape({32, 10});
+  auto x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(shape));
+  auto y = Softmax(scope_, x);
+  RunTest(x, shape, y, shape);
+}
+
+TEST_F(NNGradTest, ReluGrad) {
+  TensorShape shape({5, 2});
+  auto x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(shape));
+  auto y = Relu(scope_, x);
+  // Avoid input values where ReLU gradient is not well defined (around zero).
+  Tensor x_init_value = test::AsTensor<float>(
+      {-0.9, -0.7, -0.5, -0.3, -0.1, 0.1, 0.3, 0.5, 0.7, 0.9}, {5, 2});
+  RunTest(x, x_init_value, y, shape);
+}
+
+TEST_F(NNGradTest, Relu6Grad) {
+  TensorShape shape({5, 2});
+  auto x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(shape));
+  auto y = Relu6(scope_, x);
+  // Avoid input values where ReLU gradient is not well defined (around zero
+  // and six).
+  Tensor x_init_value = test::AsTensor<float>(
+      {-0.9, -0.7, -0.5, -0.3, -0.1, 6.1, 6.3, 6.5, 6.7, 6.9}, {5, 2});
+  RunTest(x, x_init_value, y, shape);
+}
+
+TEST_F(NNGradTest, EluGrad) {
+  TensorShape shape({5, 2});
+  auto x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(shape));
+  auto y = Elu(scope_, x);
+  Tensor x_init_value = test::AsTensor<float>(
+      {-0.9, -0.7, -0.5, -0.3, -0.1, 0.1, 0.3, 0.5, 0.7, 0.9}, {5, 2});
+  RunTest(x, x_init_value, y, shape);
+}
+
+}  // namespace
+}  // namespace tensorflow
diff --git a/tensorflow/contrib/BUILD b/tensorflow/contrib/BUILD
index be325ba2f19..704de2605ec 100644
--- a/tensorflow/contrib/BUILD
+++ b/tensorflow/contrib/BUILD
@@ -23,6 +23,7 @@ py_library(
         "//tensorflow/contrib/framework:framework_py",
         "//tensorflow/contrib/graph_editor:graph_editor_py",
         "//tensorflow/contrib/grid_rnn:grid_rnn_py",
+        "//tensorflow/contrib/integrate:integrate_py",
         "//tensorflow/contrib/layers:layers_py",
         "//tensorflow/contrib/learn",
         "//tensorflow/contrib/linear_optimizer:sdca_ops_py",
diff --git a/tensorflow/contrib/__init__.py b/tensorflow/contrib/__init__.py
index dfeacba6d4d..0ded847cfaf 100644
--- a/tensorflow/contrib/__init__.py
+++ b/tensorflow/contrib/__init__.py
@@ -28,6 +28,7 @@ from tensorflow.contrib import factorization
 from tensorflow.contrib import framework
 from tensorflow.contrib import graph_editor
 from tensorflow.contrib import grid_rnn
+from tensorflow.contrib import integrate
 from tensorflow.contrib import layers
 from tensorflow.contrib import learn
 from tensorflow.contrib import linear_optimizer
diff --git a/tensorflow/contrib/bayesflow/examples/reinforce_simple/reinforce_simple_example.py b/tensorflow/contrib/bayesflow/examples/reinforce_simple/reinforce_simple_example.py
index d9ff84a466c..85f75f74033 100644
--- a/tensorflow/contrib/bayesflow/examples/reinforce_simple/reinforce_simple_example.py
+++ b/tensorflow/contrib/bayesflow/examples/reinforce_simple/reinforce_simple_example.py
@@ -76,7 +76,7 @@ def build_split_apply_merge_model():
 
   # REINFORCE forward step
   route_selection = st.StochasticTensor(
-      distributions.Categorical, logits=logits)
+      distributions.Categorical(logits=logits))
 
   # Accessing route_selection as a Tensor below forces a sample of
   # the Categorical distribution based on its logits.
diff --git a/tensorflow/contrib/bayesflow/python/kernel_tests/stochastic_gradient_estimators_test.py b/tensorflow/contrib/bayesflow/python/kernel_tests/stochastic_gradient_estimators_test.py
index e1edbc908c5..2a2b4218303 100644
--- a/tensorflow/contrib/bayesflow/python/kernel_tests/stochastic_gradient_estimators_test.py
+++ b/tensorflow/contrib/bayesflow/python/kernel_tests/stochastic_gradient_estimators_test.py
@@ -22,6 +22,7 @@ import tensorflow as tf
 
 st = tf.contrib.bayesflow.stochastic_tensor
 sge = tf.contrib.bayesflow.stochastic_gradient_estimators
+dists = tf.contrib.distributions
 
 
 class StochasticGradientEstimatorsTest(tf.test.TestCase):
@@ -31,7 +32,7 @@ class StochasticGradientEstimatorsTest(tf.test.TestCase):
     self._final_loss = tf.constant(3.2)
 
   def _testScoreFunction(self, loss_fn, expected):
-    x = st.BernoulliTensor(p=self._p, loss_fn=loss_fn)
+    x = st.StochasticTensor(dists.Bernoulli(p=self._p), loss_fn=loss_fn)
     sf = x.loss(self._final_loss)
     with self.test_session() as sess:
       sess.run(tf.initialize_all_variables())
@@ -62,8 +63,8 @@ class StochasticGradientEstimatorsTest(tf.test.TestCase):
   def testScoreFunctionWithMeanBaseline(self):
     ema_decay = 0.8
     num_steps = 6
-    x = st.BernoulliTensor(
-        p=self._p,
+    x = st.StochasticTensor(
+        dists.Bernoulli(p=self._p),
         loss_fn=sge.get_score_function_with_baseline(
             sge.get_mean_baseline(ema_decay)))
     sf = x.loss(self._final_loss)
@@ -98,12 +99,12 @@ class StochasticGradientEstimatorsTest(tf.test.TestCase):
 
   def testScoreFunctionWithMeanBaselineHasUniqueVarScope(self):
     ema_decay = 0.8
-    x = st.BernoulliTensor(
-        p=self._p,
+    x = st.StochasticTensor(
+        dists.Bernoulli(p=self._p),
         loss_fn=sge.get_score_function_with_baseline(
             sge.get_mean_baseline(ema_decay)))
-    y = st.BernoulliTensor(
-        p=self._p,
+    y = st.StochasticTensor(
+        dists.Bernoulli(p=self._p),
         loss_fn=sge.get_score_function_with_baseline(
             sge.get_mean_baseline(ema_decay)))
     sf_x = x.loss(self._final_loss)
diff --git a/tensorflow/contrib/bayesflow/python/kernel_tests/stochastic_graph_test.py b/tensorflow/contrib/bayesflow/python/kernel_tests/stochastic_graph_test.py
index eae678f365b..de5c5c82b82 100644
--- a/tensorflow/contrib/bayesflow/python/kernel_tests/stochastic_graph_test.py
+++ b/tensorflow/contrib/bayesflow/python/kernel_tests/stochastic_graph_test.py
@@ -39,9 +39,9 @@ class TestSurrogateLosses(tf.test.TestCase):
       mu = [0.0, 0.1, 0.2]
       sigma = tf.constant([1.1, 1.2, 1.3])
       with st.value_type(st.SampleAndReshapeValue()):
-        prior = st.StochasticTensor(distributions.Normal, mu=mu, sigma=sigma)
+        prior = st.StochasticTensor(distributions.Normal(mu=mu, sigma=sigma))
         likelihood = st.StochasticTensor(
-            distributions.Normal, mu=prior, sigma=sigma)
+            distributions.Normal(mu=prior, sigma=sigma))
         self.assertTrue(prior.distribution.is_reparameterized)
         self.assertTrue(likelihood.distribution.is_reparameterized)
 
@@ -77,10 +77,9 @@ class TestSurrogateLosses(tf.test.TestCase):
       mu = tf.constant([0.0, 0.1, 0.2])
       sigma = tf.constant([1.1, 1.2, 1.3])
       with st.value_type(st.SampleAndReshapeValue()):
-        prior = st.StochasticTensor(NormalNotParam, mu=mu, sigma=sigma)
-        likelihood = st.StochasticTensor(
-            NormalNotParam, mu=prior, sigma=sigma)
-        prior_2 = st.StochasticTensor(NormalNotParam, mu=mu, sigma=sigma)
+        prior = st.StochasticTensor(NormalNotParam(mu=mu, sigma=sigma))
+        likelihood = st.StochasticTensor(NormalNotParam(mu=prior, sigma=sigma))
+        prior_2 = st.StochasticTensor(NormalNotParam(mu=mu, sigma=sigma))
 
       loss = tf.square(tf.identity(likelihood) - mu)
       part_loss = tf.square(tf.identity(prior) - mu)
@@ -155,9 +154,7 @@ class TestSurrogateLosses(tf.test.TestCase):
       mu = tf.constant([0.0, 0.1, 0.2])
       sigma = tf.constant([1.1, 1.2, 1.3])
       with st.value_type(st.SampleAndReshapeValue()):
-        dt = st.StochasticTensor(NormalNotParam,
-                                 mu=mu,
-                                 sigma=sigma,
+        dt = st.StochasticTensor(NormalNotParam(mu=mu, sigma=sigma),
                                  loss_fn=None)
         self.assertEqual(None, dt.loss(tf.constant([2.0])))
 
@@ -166,8 +163,8 @@ class TestSurrogateLosses(tf.test.TestCase):
       mu = tf.constant([0.0, 0.1, 0.2])
       sigma = tf.constant([1.1, 1.2, 1.3])
       with st.value_type(st.SampleAndReshapeValue()):
-        dt1 = st.StochasticTensor(NormalNotParam, mu=mu, sigma=sigma)
-        dt2 = st.StochasticTensor(NormalNotParam, mu=mu, sigma=sigma)
+        dt1 = st.StochasticTensor(NormalNotParam(mu=mu, sigma=sigma))
+        dt2 = st.StochasticTensor(NormalNotParam(mu=mu, sigma=sigma))
         loss = tf.square(tf.identity(dt1)) + 10. + dt2
 
         sl_all = sg.surrogate_loss([loss])
@@ -186,8 +183,8 @@ class TestSurrogateLosses(tf.test.TestCase):
 class StochasticDependenciesMapTest(tf.test.TestCase):
 
   def testBuildsMapOfUpstreamNodes(self):
-    dt1 = st.StochasticTensor(distributions.Normal, mu=0., sigma=1.)
-    dt2 = st.StochasticTensor(distributions.Normal, mu=0., sigma=1.)
+    dt1 = st.StochasticTensor(distributions.Normal(mu=0., sigma=1.))
+    dt2 = st.StochasticTensor(distributions.Normal(mu=0., sigma=1.))
     out1 = dt1.value() + 1.
     out2 = dt2.value() + 2.
     x = out1 + out2
@@ -197,11 +194,11 @@ class StochasticDependenciesMapTest(tf.test.TestCase):
     self.assertEqual(dep_map[dt2], set([x, y]))
 
   def testHandlesStackedStochasticNodes(self):
-    dt1 = st.StochasticTensor(distributions.Normal, mu=0., sigma=1.)
+    dt1 = st.StochasticTensor(distributions.Normal(mu=0., sigma=1.))
     out1 = dt1.value() + 1.
-    dt2 = st.StochasticTensor(distributions.Normal, mu=out1, sigma=1.)
+    dt2 = st.StochasticTensor(distributions.Normal(mu=out1, sigma=1.))
     x = dt2.value() + 2.
-    dt3 = st.StochasticTensor(distributions.Normal, mu=0., sigma=1.)
+    dt3 = st.StochasticTensor(distributions.Normal(mu=0., sigma=1.))
     y = dt3.value() * 3.
     dep_map = sg._stochastic_dependencies_map([x, y])
     self.assertEqual(dep_map[dt1], set([x]))
@@ -209,10 +206,10 @@ class StochasticDependenciesMapTest(tf.test.TestCase):
     self.assertEqual(dep_map[dt3], set([y]))
 
   def testTraversesControlInputs(self):
-    dt1 = st.StochasticTensor(distributions.Normal, mu=0., sigma=1.)
+    dt1 = st.StochasticTensor(distributions.Normal(mu=0., sigma=1.))
     logits = dt1.value() * 3.
-    dt2 = st.StochasticTensor(distributions.Bernoulli, logits=logits)
-    dt3 = st.StochasticTensor(distributions.Normal, mu=0., sigma=1.)
+    dt2 = st.StochasticTensor(distributions.Bernoulli(logits=logits))
+    dt3 = st.StochasticTensor(distributions.Normal(mu=0., sigma=1.))
     x = dt3.value()
     y = tf.ones((2, 2)) * 4.
     z = tf.ones((2, 2)) * 3.
diff --git a/tensorflow/contrib/bayesflow/python/kernel_tests/stochastic_tensor_test.py b/tensorflow/contrib/bayesflow/python/kernel_tests/stochastic_tensor_test.py
index 95c6d39a617..b7bd2adfe8a 100644
--- a/tensorflow/contrib/bayesflow/python/kernel_tests/stochastic_tensor_test.py
+++ b/tensorflow/contrib/bayesflow/python/kernel_tests/stochastic_tensor_test.py
@@ -35,19 +35,19 @@ class StochasticTensorTest(tf.test.TestCase):
       sigma2 = tf.constant([0.1, 0.2, 0.3])
 
       prior_default = st.StochasticTensor(
-          distributions.Normal, mu=mu, sigma=sigma)
+          distributions.Normal(mu=mu, sigma=sigma))
       self.assertTrue(
           isinstance(prior_default.value_type, st.SampleAndReshapeValue))
       prior_0 = st.StochasticTensor(
-          distributions.Normal, mu=mu, sigma=sigma,
+          distributions.Normal(mu=mu, sigma=sigma),
           dist_value_type=st.SampleAndReshapeValue())
       self.assertTrue(isinstance(prior_0.value_type, st.SampleAndReshapeValue))
 
       with st.value_type(st.SampleAndReshapeValue()):
-        prior = st.StochasticTensor(distributions.Normal, mu=mu, sigma=sigma)
+        prior = st.StochasticTensor(distributions.Normal(mu=mu, sigma=sigma))
         self.assertTrue(isinstance(prior.value_type, st.SampleAndReshapeValue))
         likelihood = st.StochasticTensor(
-            distributions.Normal, mu=prior, sigma=sigma2)
+            distributions.Normal(mu=prior, sigma=sigma2))
         self.assertTrue(
             isinstance(likelihood.value_type, st.SampleAndReshapeValue))
 
@@ -77,7 +77,7 @@ class StochasticTensorTest(tf.test.TestCase):
       sigma = tf.constant([1.1, 1.2, 1.3])
 
       with st.value_type(st.MeanValue()):
-        prior = st.StochasticTensor(distributions.Normal, mu=mu, sigma=sigma)
+        prior = st.StochasticTensor(distributions.Normal(mu=mu, sigma=sigma))
         self.assertTrue(isinstance(prior.value_type, st.MeanValue))
 
       prior_mean = prior.mean()
@@ -94,7 +94,8 @@ class StochasticTensorTest(tf.test.TestCase):
 
       with st.value_type(st.SampleAndReshapeValue()):
         prior_single = st.StochasticTensor(
-            distributions.Normal, mu=mu, sigma=sigma)
+            distributions.Normal(
+                mu=mu, sigma=sigma))
 
       prior_single_value = prior_single.value()
       self.assertEqual(prior_single_value.get_shape(), (2, 3))
@@ -104,7 +105,7 @@ class StochasticTensorTest(tf.test.TestCase):
 
       with st.value_type(st.SampleAndReshapeValue(n=2)):
         prior_double = st.StochasticTensor(
-            distributions.Normal, mu=mu, sigma=sigma)
+            distributions.Normal(mu=mu, sigma=sigma))
 
       prior_double_value = prior_double.value()
       self.assertEqual(prior_double_value.get_shape(), (4, 3))
@@ -119,7 +120,7 @@ class StochasticTensorTest(tf.test.TestCase):
 
       with st.value_type(st.SampleValue()):
         prior_single = st.StochasticTensor(
-            distributions.Normal, mu=mu, sigma=sigma)
+            distributions.Normal(mu=mu, sigma=sigma))
         self.assertTrue(isinstance(prior_single.value_type, st.SampleValue))
 
       prior_single_value = prior_single.value()
@@ -130,7 +131,7 @@ class StochasticTensorTest(tf.test.TestCase):
 
       with st.value_type(st.SampleValue(n=2)):
         prior_double = st.StochasticTensor(
-            distributions.Normal, mu=mu, sigma=sigma)
+            distributions.Normal(mu=mu, sigma=sigma))
 
       prior_double_value = prior_double.value()
       self.assertEqual(prior_double_value.get_shape(), (2, 2, 3))
@@ -143,9 +144,9 @@ class StochasticTensorTest(tf.test.TestCase):
       mu = [0.0, -1.0, 1.0]
       sigma = tf.constant([1.1, 1.2, 1.3])
       with st.value_type(st.MeanValue()):
-        prior = st.StochasticTensor(distributions.Normal, mu=mu, sigma=sigma)
+        prior = st.StochasticTensor(distributions.Normal(mu=mu, sigma=sigma))
         entropy = prior.entropy()
-        deep_entropy = prior.entropy()
+        deep_entropy = prior.distribution.entropy()
         expected_deep_entropy = distributions.Normal(
             mu=mu, sigma=sigma).entropy()
         entropies = sess.run([entropy, deep_entropy, expected_deep_entropy])
@@ -159,17 +160,15 @@ class StochasticTensorTest(tf.test.TestCase):
 
       # With default
       with st.value_type(st.MeanValue(stop_gradient=True)):
-        dt = st.StochasticTensor(distributions.Normal, mu=mu, sigma=sigma)
+        dt = st.StochasticTensor(distributions.Normal(mu=mu, sigma=sigma))
       loss = dt.loss([tf.constant(2.0)])
       self.assertTrue(loss is not None)
-      self.assertAllClose(dt.distribution.log_prob(mu).eval() * 2.0,
-                          loss.eval())
+      self.assertAllClose(
+          dt.distribution.log_prob(mu).eval() * 2.0, loss.eval())
 
       # With passed-in loss_fn.
       dt = st.StochasticTensor(
-          distributions.Normal,
-          mu=mu,
-          sigma=sigma,
+          distributions.Normal(mu=mu, sigma=sigma),
           dist_value_type=st.MeanValue(stop_gradient=True),
           loss_fn=sge.get_score_function_with_constant_baseline(
               baseline=tf.constant(8.0)))
@@ -204,7 +203,7 @@ class ObservedStochasticTensorTest(tf.test.TestCase):
       sigma = tf.constant([1.1, 1.2, 1.3])
       obs = tf.zeros((2, 3))
       z = st.ObservedStochasticTensor(
-          distributions.Normal, mu=mu, sigma=sigma, value=obs)
+          distributions.Normal(mu=mu, sigma=sigma), value=obs)
       [obs_val, z_val] = sess.run([obs, z.value()])
       self.assertAllEqual(obs_val, z_val)
 
@@ -216,13 +215,13 @@ class ObservedStochasticTensorTest(tf.test.TestCase):
     sigma = tf.placeholder(tf.float32)
     obs = tf.placeholder(tf.float32)
     z = st.ObservedStochasticTensor(
-        distributions.Normal, mu=mu, sigma=sigma, value=obs)
+        distributions.Normal(mu=mu, sigma=sigma), value=obs)
 
     mu2 = tf.placeholder(tf.float32, shape=[None])
     sigma2 = tf.placeholder(tf.float32, shape=[None])
     obs2 = tf.placeholder(tf.float32, shape=[None, None])
     z2 = st.ObservedStochasticTensor(
-        distributions.Normal, mu=mu2, sigma=sigma2, value=obs2)
+        distributions.Normal(mu=mu2, sigma=sigma2), value=obs2)
 
     coll = tf.get_collection(st.STOCHASTIC_TENSOR_COLLECTION)
     self.assertEqual(coll, [z, z2])
@@ -230,27 +229,19 @@ class ObservedStochasticTensorTest(tf.test.TestCase):
   def testConstructionErrors(self):
     mu = [0., 0.]
     sigma = [1., 1.]
-    self.assertRaises(ValueError, st.ObservedStochasticTensor,
-                      distributions.Normal, mu=mu, sigma=sigma,
-                      value=tf.zeros((3,)))
-    self.assertRaises(ValueError, st.ObservedStochasticTensor,
-                      distributions.Normal, mu=mu, sigma=sigma,
-                      value=tf.zeros((3, 1)))
-    self.assertRaises(ValueError, st.ObservedStochasticTensor,
-                      distributions.Normal, mu=mu, sigma=sigma,
-                      value=tf.zeros((1, 2), dtype=tf.int32))
-
-
-class AutomaticDistributionImportTest(tf.test.TestCase):
-
-  def testImportNormal(self):
-    self.assertTrue(hasattr(st, "NormalTensor"))
-    self.assertTrue(callable(st.NormalTensor))
-    norm = st.NormalTensor(mu=0.0, sigma=1.0)
-    self.assertEqual(type(norm).__name__, "NormalTensor")
-    self.assertTrue(isinstance(norm, st.NormalTensor))
-    self.assertTrue(isinstance(norm, st.StochasticTensor))
-
-
-if __name__ == "__main__":
-  tf.test.main()
+    self.assertRaises(
+        ValueError,
+        st.ObservedStochasticTensor,
+        distributions.Normal(mu=mu, sigma=sigma),
+        value=tf.zeros((3,)))
+    self.assertRaises(
+        ValueError,
+        st.ObservedStochasticTensor,
+        distributions.Normal(mu=mu, sigma=sigma),
+        value=tf.zeros((3, 1)))
+    self.assertRaises(
+        ValueError,
+        st.ObservedStochasticTensor,
+        distributions.Normal(mu=mu, sigma=sigma),
+        value=tf.zeros(
+            (1, 2), dtype=tf.int32))
diff --git a/tensorflow/contrib/bayesflow/python/kernel_tests/variational_inference_test.py b/tensorflow/contrib/bayesflow/python/kernel_tests/variational_inference_test.py
index 336bf981de7..cad42067217 100644
--- a/tensorflow/contrib/bayesflow/python/kernel_tests/variational_inference_test.py
+++ b/tensorflow/contrib/bayesflow/python/kernel_tests/variational_inference_test.py
@@ -44,7 +44,7 @@ def mini_vae():
   x = [[-6., 3., 6.], [-8., 4., 8.]]
   prior = distributions.Normal(mu=0., sigma=1.)
   variational = st.StochasticTensor(
-      distributions.Normal, mu=inference_net(x, 1), sigma=1.)
+      distributions.Normal(mu=inference_net(x, 1), sigma=1.))
   vi.register_prior(variational, prior)
   px = distributions.Normal(mu=generative_net(variational, 3), sigma=1.)
   log_likelihood = tf.reduce_sum(px.log_prob(x), 1)
@@ -101,7 +101,7 @@ class VariationalInferenceTest(tf.test.TestCase):
 
     prior = distributions.Bernoulli(0.5)
     variational = st.StochasticTensor(
-        NormalNoEntropy, mu=inference_net(x, 1), sigma=1.)
+        NormalNoEntropy(mu=inference_net(x, 1), sigma=1.))
     vi.register_prior(variational, prior)
     px = distributions.Normal(mu=generative_net(variational, 3), sigma=1.)
     log_likelihood = tf.reduce_sum(px.log_prob(x), 1)
diff --git a/tensorflow/contrib/bayesflow/python/ops/stochastic_tensor.py b/tensorflow/contrib/bayesflow/python/ops/stochastic_tensor.py
index 06661059ffd..eaee3344e5d 100644
--- a/tensorflow/contrib/bayesflow/python/ops/stochastic_tensor.py
+++ b/tensorflow/contrib/bayesflow/python/ops/stochastic_tensor.py
@@ -44,7 +44,6 @@ from __future__ import print_function
 import abc
 import collections
 import contextlib
-import inspect
 import threading
 
 import six
@@ -79,10 +78,6 @@ class BaseStochasticTensor(object):
   def graph(self):
     pass
 
-  @abc.abstractproperty
-  def input_dict(self):
-    pass
-
   @abc.abstractmethod
   def value(self, name=None):
     pass
@@ -120,6 +115,7 @@ class BaseStochasticTensor(object):
 # pylint: disable=protected-access
 ops.register_tensor_conversion_function(
     BaseStochasticTensor, BaseStochasticTensor._tensor_conversion_function)
+
 # pylint: enable=protected-access
 
 
@@ -223,8 +219,8 @@ class SampleAndReshapeValue(_StochasticValueType):
   st_value = st.value()
   assertEqual(st_value.get_shape(), (4, 3))
 
-  dt_value_val = sess.run([st_value])[0]  # or e.g. run([tf.identity(st)])[0]
-  assertEqual(dt_value_val.shape, (4, 3))
+  st_value_val = sess.run([st_value])[0]  # or e.g. run([tf.identity(st)])[0]
+  assertEqual(st_value_val.shape, (4, 3))
   ```
   """
 
@@ -312,17 +308,16 @@ class StochasticTensor(BaseStochasticTensor):
   """StochasticTensor is a BaseStochasticTensor backed by a distribution."""
 
   def __init__(self,
-               dist_cls,
-               name=None,
+               dist,
+               name="StochasticTensor",
                dist_value_type=None,
-               loss_fn=sge.score_function,
-               **dist_args):
+               loss_fn=sge.score_function):
     """Construct a `StochasticTensor`.
 
-    `StochasticTensor` will instantiate a distribution from `dist_cls` and
-    `dist_args` and its `value` method will return the same value each time
-    it is called. What `value` is returned is controlled by the
-    `dist_value_type` (defaults to `SampleAndReshapeValue`).
+    `StochasticTensor` is backed by the `dist` distribution and its `value`
+    method will return the same value each time it is called. What `value` is
+    returned is controlled by the `dist_value_type` (defaults to
+    `SampleAndReshapeValue`).
 
     Some distributions' sample functions are not differentiable (e.g. a sample
     from a discrete distribution like a Bernoulli) and so to differentiate
@@ -338,28 +333,25 @@ class StochasticTensor(BaseStochasticTensor):
     `MeanValueType` or if `loss_fn=None`.
 
     Args:
-      dist_cls: a `Distribution` class.
+      dist: an instance of `Distribution`.
       name: a name for this `StochasticTensor` and its ops.
       dist_value_type: a `_StochasticValueType`, which will determine what the
           `value` of this `StochasticTensor` will be. If not provided, the
           value type set with the `value_type` context manager will be used.
-      loss_fn: callable that takes `(st, st.value(), influenced_loss)`, where
+      loss_fn: callable that takes
+          `(st, st.value(), influenced_loss)`, where
           `st` is this `StochasticTensor`, and returns a `Tensor` loss. By
           default, `loss_fn` is the `score_function`, or more precisely, the
           integral of the score function, such that when the gradient is taken,
           the score function results. See the `stochastic_gradient_estimators`
           module for additional loss functions and baselines.
-      **dist_args: keyword arguments to be passed through to `dist_cls` on
-          construction.
 
     Raises:
-      TypeError: if `dist_cls` is not a `Distribution`.
+      TypeError: if `dist` is not an instance of `Distribution`.
       TypeError: if `loss_fn` is not `callable`.
     """
-    if not issubclass(dist_cls, distributions.Distribution):
-      raise TypeError("dist_cls must be a subclass of Distribution")
-    self._dist_cls = dist_cls
-    self._dist_args = dist_args
+    if not isinstance(dist, distributions.Distribution):
+      raise TypeError("dist must be an instance of Distribution")
     if dist_value_type is None:
       try:
         self._value_type = get_current_value_type()
@@ -371,24 +363,17 @@ class StochasticTensor(BaseStochasticTensor):
       with value_type(dist_value_type):
         self._value_type = get_current_value_type()
 
-    self._value_type.declare_inputs(self, dist_args)
-
     if loss_fn is not None and not callable(loss_fn):
       raise TypeError("loss_fn must be callable")
     self._loss_fn = loss_fn
 
-    with ops.name_scope(name, "StochasticTensor",
-                        dist_args.values()) as scope:
+    with ops.name_scope(name) as scope:
       self._name = scope
-      self._dist = dist_cls(**dist_args)
+      self._dist = dist
       self._value = self._create_value()
 
     super(StochasticTensor, self).__init__()
 
-  @property
-  def input_dict(self):
-    return self._dist_args
-
   @property
   def value_type(self):
     return self._value_type
@@ -397,9 +382,6 @@ class StochasticTensor(BaseStochasticTensor):
   def distribution(self):
     return self._dist
 
-  def clone(self, name=None, **dist_args):
-    return StochasticTensor(self._dist_cls, name=name, **dist_args)
-
   def _create_value(self):
     """Create the value Tensor based on the value type, store as self._value."""
 
@@ -494,33 +476,28 @@ class ObservedStochasticTensor(StochasticTensor):
   """A StochasticTensor with an observed value."""
 
   # pylint: disable=super-init-not-called
-  def __init__(self, dist_cls, value, name=None, **dist_args):
+  def __init__(self, dist, value, name=None):
     """Construct an `ObservedStochasticTensor`.
 
-    `ObservedStochasticTensor` will instantiate a distribution from `dist_cls`
-    and `dist_args` but use the provided value instead of sampling from the
-    distribution. The provided value argument must be appropriately shaped
-    to have come from the constructed distribution.
+    `ObservedStochasticTensor` is backed by distribution `dist` and uses the
+    provided value instead of using the current value type to draw a value from
+    the distribution. The provided value argument must be appropriately shaped
+    to have come from the distribution.
 
     Args:
-      dist_cls: a `Distribution` class.
+      dist: an instance of `Distribution`.
       value: a Tensor containing the observed value
       name: a name for this `ObservedStochasticTensor` and its ops.
-      **dist_args: keyword arguments to be passed through to `dist_cls` on
-          construction.
 
     Raises:
-      TypeError: if `dist_cls` is not a `Distribution`.
+      TypeError: if `dist` is not an instance of `Distribution`.
       ValueError: if `value` is not compatible with the distribution.
     """
-    if not issubclass(dist_cls, distributions.Distribution):
-      raise TypeError("dist_cls must be a subclass of Distribution")
-    self._dist_cls = dist_cls
-    self._dist_args = dist_args
-    with ops.name_scope(name, "ObservedStochasticTensor",
-                        list(dist_args.values()) + [value]) as scope:
+    if not isinstance(dist, distributions.Distribution):
+      raise TypeError("dist must be an instance of Distribution")
+    with ops.name_scope(name, "ObservedStochasticTensor", [value]) as scope:
       self._name = scope
-      self._dist = dist_cls(**dist_args)
+      self._dist = dist
       dist_shape = self._dist.get_batch_shape().concatenate(
           self._dist.get_event_shape())
       value = ops.convert_to_tensor(value)
@@ -538,7 +515,7 @@ class ObservedStochasticTensor(StochasticTensor):
               "sample from the distribution %s." % (value_shape, dist_shape))
       if value.dtype != self._dist.dtype:
         raise ValueError("Type of observed value (%s) does not match type of "
-                         "distribuiton (%s)." % (value.dtype, self._dist.dtype))
+                         "distribution (%s)." % (value.dtype, self._dist.dtype))
       self._value = array_ops.identity(value)
     # pylint: disable=non-parent-init-called
     BaseStochasticTensor.__init__(self)
@@ -557,39 +534,3 @@ __all__ = [
     "value_type",
     "get_current_value_type",
 ]
-
-_globals = globals()
-# pylint: disable=redefined-builtin
-__doc__ += "\n\n## Automatically Generated StochasticTensors\n\n"
-# pylint: enable=redefined-builtin
-for _name in sorted(dir(distributions)):
-  _candidate = getattr(distributions, _name)
-  if (inspect.isclass(_candidate)
-      and _candidate != distributions.Distribution
-      and issubclass(_candidate, distributions.Distribution)):
-    _local_name = "%sTensor" % _name
-
-    class _WrapperTensor(StochasticTensor):
-      _my_candidate = _candidate
-
-      def __init__(self, name=None, dist_value_type=None,
-                   loss_fn=sge.score_function, **dist_args):
-        StochasticTensor.__init__(
-            self,
-            dist_cls=self._my_candidate,
-            name=name,
-            dist_value_type=dist_value_type,
-            loss_fn=loss_fn, **dist_args)
-
-    _WrapperTensor.__name__ = _local_name
-    _WrapperTensor.__doc__ = (
-        "`%s` is a `StochasticTensor` backed by the distribution `%s`."""
-        % (_local_name, _name))
-    _globals[_local_name] = _WrapperTensor
-    del _WrapperTensor
-    del _candidate
-
-    __all__.append(_local_name)
-    __doc__ += "@@%s\n" % _local_name
-
-    del _local_name
diff --git a/tensorflow/contrib/bayesflow/python/ops/stochastic_variables.py b/tensorflow/contrib/bayesflow/python/ops/stochastic_variables.py
index 72c2e0d8ec1..7baf1366bcd 100644
--- a/tensorflow/contrib/bayesflow/python/ops/stochastic_variables.py
+++ b/tensorflow/contrib/bayesflow/python/ops/stochastic_variables.py
@@ -126,7 +126,7 @@ def get_stochastic_variable(getter,
 
   dist_kwargs = dist_kwargs or {}
   dist_kwargs.update(params)
-  sample = st.StochasticTensor(dist_cls, **dist_kwargs)
+  sample = st.StochasticTensor(dist_cls(**dist_kwargs))
 
   if prior is not None:
     if callable(prior):
diff --git a/tensorflow/contrib/distributions/python/kernel_tests/distribution_util_test.py b/tensorflow/contrib/distributions/python/kernel_tests/distribution_util_test.py
index e82d604d58a..66b43662fd5 100644
--- a/tensorflow/contrib/distributions/python/kernel_tests/distribution_util_test.py
+++ b/tensorflow/contrib/distributions/python/kernel_tests/distribution_util_test.py
@@ -325,7 +325,7 @@ class FillLowerTriangularTest(tf.test.TestCase):
 
   def testCorrectlyMakesNoBatchLowerTril(self):
     with self.test_session():
-      x = np.arange(9)
+      x = tf.convert_to_tensor(np.arange(9, dtype=np.float32))
       expected = np.array(
           [[0., 0., 0.],
            [1., 2., 0.],
@@ -333,6 +333,10 @@ class FillLowerTriangularTest(tf.test.TestCase):
       actual = distribution_util.fill_lower_triangular(x)
       self.assertAllEqual(expected.shape, actual.get_shape())
       self.assertAllEqual(expected, actual.eval())
+      self.assertAllEqual(
+          np.concatenate([np.ones(6, dtype=np.float32),
+                          np.zeros(3, dtype=np.float32)]),
+          tf.gradients(distribution_util.fill_lower_triangular(x), x)[0].eval())
 
   def testCorrectlyMakesBatchLowerTril(self):
     with self.test_session():
diff --git a/tensorflow/contrib/distributions/python/ops/distribution_util.py b/tensorflow/contrib/distributions/python/ops/distribution_util.py
index ad5fa5b5aee..e27dcfe9b3f 100644
--- a/tensorflow/contrib/distributions/python/ops/distribution_util.py
+++ b/tensorflow/contrib/distributions/python/ops/distribution_util.py
@@ -435,15 +435,14 @@ def fill_lower_triangular(x, name="fill_lower_triangular"):
   """
   with ops.name_scope(name, values=(x,)):
     x = ops.convert_to_tensor(x, name="x")
-    ndims = x.get_shape().ndims
-    if ndims is not None and x.get_shape()[-1].value is not None:
+    if (x.get_shape().ndims is not None and
+        x.get_shape()[-1].value is not None):
       d = x.get_shape()[-1].value
       # d = n^2/2 + n/2 implies n is:
       n = int(0.5 * (math.sqrt(1. + 8. * d) - 1.))
       final_shape = x.get_shape()[:-1].concatenate(
           tensor_shape.TensorShape([n, n]))
     else:
-      ndims = array_ops.rank(x)
       d = math_ops.cast(array_ops.shape(x)[-1], dtype=dtypes.float32)
       # d = n^2/2 + n/2 implies n is:
       n = math_ops.cast(0.5 * (dtypes.sqrt(1. + 8. * d) - 1.),
@@ -494,7 +493,12 @@ def fill_lower_triangular(x, name="fill_lower_triangular"):
         array_ops.tile([tril_ids], [m, 1])])
     idx = array_ops.transpose(idx, [1, 2, 0])
 
-    y = array_ops.gather_nd(y, idx)
+    if x.get_shape().ndims == 1:
+      # Prefer using gather because it has a gradient.
+      # We wrap the result in a list so downstream logic "just works."
+      y = [array_ops.gather(y[0, :], tril_ids)]
+    else:
+      y = array_ops.gather_nd(y, idx)
     y = array_ops.reshape(y, array_ops.concat(0, [batch_shape, [n, n]]))
 
     y.set_shape(y.get_shape().merge_with(final_shape))
diff --git a/tensorflow/contrib/factorization/python/ops/factorization_ops.py b/tensorflow/contrib/factorization/python/ops/factorization_ops.py
index 74fd40f2c91..34fa0129dd8 100644
--- a/tensorflow/contrib/factorization/python/ops/factorization_ops.py
+++ b/tensorflow/contrib/factorization/python/ops/factorization_ops.py
@@ -571,9 +571,8 @@ class WALSModel(object):
         extras = size % num_shards
         assignments = tf.maximum(ids // (ids_per_shard + 1),
                                  (ids - extras) // ids_per_shard)
-        new_ids = tf.select(assignments < extras,
-                            ids % (ids_per_shard + 1),
-                            (ids - extras) % ids_per_shard)
+        new_ids = tf.where(assignments < extras, ids % (ids_per_shard + 1),
+                           (ids - extras) % ids_per_shard)
         return assignments, new_ids
     return func
 
diff --git a/tensorflow/contrib/framework/python/ops/variables_test.py b/tensorflow/contrib/framework/python/ops/variables_test.py
index eb0a2c2d8eb..49683faf90f 100644
--- a/tensorflow/contrib/framework/python/ops/variables_test.py
+++ b/tensorflow/contrib/framework/python/ops/variables_test.py
@@ -36,7 +36,7 @@ class LocalVariableTest(tf.test.TestCase):
       variables = tf.local_variables()
       self.assertEquals(2, len(variables))
       self.assertRaises(tf.OpError, sess.run, variables)
-      tf.initialize_variables(variables).run()
+      tf.variables_initializer(variables).run()
       self.assertAllEqual(set([value0, value1]), set(sess.run(variables)))
 
   def testLocalVariableNameAndShape(self):
@@ -51,7 +51,7 @@ class LocalVariableTest(tf.test.TestCase):
     with self.test_session():
       with tf.variable_scope('A'):
         a = tf.contrib.framework.local_variable(0)
-        self.assertFalse(a in tf.all_variables())
+        self.assertFalse(a in tf.global_variables())
         self.assertTrue(a in tf.local_variables())
 
   def testLocalVariableNotInVariablesToRestore(self):
@@ -82,7 +82,7 @@ class LocalVariableTest(tf.test.TestCase):
   def testInitializedVariableValue(self):
     with self.test_session() as sess:
       a = tf.contrib.framework.local_variable([0, 0, 0, 0, 0], name='a')
-      sess.run(tf.initialize_local_variables())
+      sess.run(tf.local_variables_initializer())
       self.assertAllEqual(a.eval(), [0]*5)
 
 
@@ -439,7 +439,7 @@ class ModelVariablesTest(tf.test.TestCase):
     with self.test_session():
       with tf.variable_scope('A'):
         a = tf.contrib.framework.model_variable('a', [5])
-        self.assertTrue(a in tf.all_variables())
+        self.assertTrue(a in tf.global_variables())
         self.assertTrue(a in tf.get_collection(tf.GraphKeys.MODEL_VARIABLES))
         self.assertFalse(a in tf.local_variables())
 
@@ -474,7 +474,7 @@ class ModelVariablesTest(tf.test.TestCase):
     with self.test_session() as sess:
       a = tf.contrib.framework.model_variable(
           'a', [5], initializer=tf.ones_initializer)
-      sess.run(tf.initialize_all_variables())
+      sess.run(tf.global_variables_initializer())
       self.assertAllEqual(a.eval(), [1]*5)
 
   def testDeviceFn(self):
@@ -667,7 +667,7 @@ class AssignFromValuesTest(tf.test.TestCase):
           var_names_to_values)
 
       # Initialize the variables.
-      sess.run(tf.initialize_all_variables())
+      sess.run(tf.global_variables_initializer())
 
       # Perform the assignment.
       sess.run(assign_op, feed_dict)
@@ -697,7 +697,7 @@ class AssignFromValuesTest(tf.test.TestCase):
           var_names_to_values)
 
       # Initialize the variables.
-      sess.run(tf.initialize_all_variables())
+      sess.run(tf.global_variables_initializer())
 
       # Perform the assignment.
       sess.run(assign_op, feed_dict)
@@ -725,7 +725,7 @@ class AssignFromValuesFnTest(tf.test.TestCase):
       init_fn = tf.contrib.framework.assign_from_values_fn(var_names_to_values)
 
       # Initialize the variables.
-      sess.run(tf.initialize_all_variables())
+      sess.run(tf.global_variables_initializer())
 
       # Perform the assignment.
       init_fn(sess)
@@ -754,7 +754,7 @@ class AssignFromValuesFnTest(tf.test.TestCase):
       init_fn = tf.contrib.framework.assign_from_values_fn(var_names_to_values)
 
       # Initialize the variables.
-      sess.run(tf.initialize_all_variables())
+      sess.run(tf.global_variables_initializer())
 
       # Perform the assignment.
       init_fn(sess)
@@ -786,7 +786,7 @@ class AssignFromCheckpointTest(tf.test.TestCase):
         var_value = var_names_to_values[var_name]
         var_list.append(tf.Variable(var_value, name=var_name))
       saver = tf.train.Saver(var_list)
-      init_op = tf.initialize_variables(var_list)
+      init_op = tf.variables_initializer(var_list)
       sess.run(init_op)
       # Save the initialized values in the file at 'checkpoint_dir'
       return saver.save(sess, checkpoint_dir, global_step=global_step)
@@ -808,7 +808,7 @@ class AssignFromCheckpointTest(tf.test.TestCase):
           model_path, vars_to_restore)
 
       # Initialize the variables.
-      sess.run(tf.initialize_all_variables())
+      sess.run(tf.global_variables_initializer())
 
       # Perform the assignment.
       sess.run(op, feed_dict)
@@ -859,7 +859,7 @@ class AssignFromCheckpointTest(tf.test.TestCase):
           vars_to_restore)
 
       # Initialize the variables.
-      sess.run(tf.initialize_all_variables())
+      sess.run(tf.global_variables_initializer())
 
       # Perform the assignment.
       sess.run(op, feed_dict)
@@ -890,7 +890,7 @@ class AssignFromCheckpointFnTest(tf.test.TestCase):
         var_value = var_names_to_values[var_name]
         var_list.append(tf.Variable(var_value, name=var_name))
       saver = tf.train.Saver(var_list)
-      init_op = tf.initialize_variables(var_list)
+      init_op = tf.variables_initializer(var_list)
       sess.run(init_op)
       # Save the initialized values in the file at 'checkpoint_dir'
       return saver.save(sess, checkpoint_dir, global_step=global_step)
@@ -912,7 +912,7 @@ class AssignFromCheckpointFnTest(tf.test.TestCase):
           model_path, vars_to_restore)
 
       # Initialize the variables.
-      sess.run(tf.initialize_all_variables())
+      sess.run(tf.global_variables_initializer())
 
       # Perform the assignment.
       init_fn(sess)
@@ -938,7 +938,7 @@ class AssignFromCheckpointFnTest(tf.test.TestCase):
           model_path, vars_to_restore)
 
       # Initialize the variables.
-      sess.run(tf.initialize_all_variables())
+      sess.run(tf.global_variables_initializer())
 
       # Perform the assignment.
       with self.assertRaises(tf.errors.InvalidArgumentError):
@@ -961,7 +961,7 @@ class AssignFromCheckpointFnTest(tf.test.TestCase):
           model_path, vars_to_restore, reshape_variables=True)
 
       # Initialize the variables.
-      sess.run(tf.initialize_all_variables())
+      sess.run(tf.global_variables_initializer())
 
       # Perform the assignment.
       init_fn(sess)
@@ -989,7 +989,7 @@ class AssignFromCheckpointFnTest(tf.test.TestCase):
           vars_to_restore)
 
       # Initialize the variables.
-      sess.run(tf.initialize_all_variables())
+      sess.run(tf.global_variables_initializer())
 
       # Perform the assignment.
       with self.assertRaises(tf.errors.NotFoundError):
@@ -1015,7 +1015,7 @@ class AssignFromCheckpointFnTest(tf.test.TestCase):
           ignore_missing_vars=True)
 
       # Initialize the variables.
-      sess.run(tf.initialize_all_variables())
+      sess.run(tf.global_variables_initializer())
 
       # Perform the assignment.
       init_fn(sess)
@@ -1044,7 +1044,7 @@ class AssignFromCheckpointFnTest(tf.test.TestCase):
           ignore_missing_vars=True)
 
       # Initialize the variables.
-      sess.run(tf.initialize_all_variables())
+      sess.run(tf.global_variables_initializer())
 
       # Perform the assignment.
       init_fn(sess)
diff --git a/tensorflow/contrib/integrate/BUILD b/tensorflow/contrib/integrate/BUILD
new file mode 100644
index 00000000000..1e6db75d215
--- /dev/null
+++ b/tensorflow/contrib/integrate/BUILD
@@ -0,0 +1,38 @@
+# Description:
+#   Integration and ODE solvers for TensorFlow.
+
+licenses(["notice"])  # Apache 2.0
+
+exports_files(["LICENSE"])
+
+package(default_visibility = ["//tensorflow:__subpackages__"])
+
+py_library(
+    name = "integrate_py",
+    srcs = [
+        "__init__.py",
+        "python/ops/odes.py",
+    ],
+    srcs_version = "PY2AND3",
+)
+
+py_test(
+    name = "odes_test",
+    srcs = ["python/ops/odes_test.py"],
+    srcs_version = "PY2AND3",
+    deps = [
+        ":integrate_py",
+        "//tensorflow:tensorflow_py",
+    ],
+)
+
+filegroup(
+    name = "all_files",
+    srcs = glob(
+        ["**/*"],
+        exclude = [
+            "**/METADATA",
+            "**/OWNERS",
+        ],
+    ),
+)
diff --git a/tensorflow/contrib/integrate/README.md b/tensorflow/contrib/integrate/README.md
new file mode 100644
index 00000000000..beae6993b9d
--- /dev/null
+++ b/tensorflow/contrib/integrate/README.md
@@ -0,0 +1,9 @@
+# Integration and ODE solvers for TensorFlow
+
+TensorFlow equivalents to the routines provided by `scipy.integrate`. Currently
+contains a single function, `odeint`, for integrating ordinary differential
+equations.
+
+Maintainers:
+- Stephan Hoyer (shoyer@google.com, github.com/shoyer)
+- Marc Coram (mcoram@google.com, github.com/mcoram)
diff --git a/tensorflow/contrib/integrate/__init__.py b/tensorflow/contrib/integrate/__init__.py
new file mode 100644
index 00000000000..e88d10c5823
--- /dev/null
+++ b/tensorflow/contrib/integrate/__init__.py
@@ -0,0 +1,64 @@
+# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+
+"""Integration and ODE solvers for TensorFlow.
+
+## Example: Lorenz attractor
+
+We can use `odeint` to solve the
+[Lorentz system](https://en.wikipedia.org/wiki/Lorenz_system) of ordinary
+differential equations, a prototypical example of chaotic dynamics:
+
+```python
+rho = 28.0
+sigma = 10.0
+beta = 8.0/3.0
+
+def lorenz_equation(state, t):
+  x, y, z = tf.unpack(state)
+  dx = sigma * (y - x)
+  dy = x * (rho - z) - y
+  dz = x * y - beta * z
+  return tf.pack([dx, dy, dz])
+
+init_state = tf.constant([0, 2, 20], dtype=tf.float64)
+t = np.linspace(0, 50, num=5000)
+tensor_state, tensor_info = tf.contrib.integrate.odeint(
+    lorenz_equation, init_state, t, full_output=True)
+
+sess = tf.Session()
+state, info = sess.run([tensor_state, tensor_info])
+x, y, z = state.T
+plt.plot(x, z)
+```
+
+<div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
+<img style="width:100%" src="../../images/lorenz_attractor.png" alt>
+</div>
+
+## Ops
+
+@@odeint
+"""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+# pylint: disable=wildcard-import
+from tensorflow.contrib.integrate.python.ops.odes import *
+from tensorflow.python.util.all_util import make_all
+
+__all__ = make_all(__name__)
diff --git a/tensorflow/contrib/integrate/python/ops/odes.py b/tensorflow/contrib/integrate/python/ops/odes.py
new file mode 100644
index 00000000000..5747bdefee8
--- /dev/null
+++ b/tensorflow/contrib/integrate/python/ops/odes.py
@@ -0,0 +1,503 @@
+# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+
+"""ODE solvers for TensorFlow."""
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import collections
+
+from tensorflow.python.framework import constant_op
+from tensorflow.python.framework import dtypes
+from tensorflow.python.framework import ops
+from tensorflow.python.ops import array_ops
+from tensorflow.python.ops import control_flow_ops
+from tensorflow.python.ops import math_ops
+from tensorflow.python.ops import tensor_array_ops
+
+
+_ButcherTableau = collections.namedtuple(
+    '_ButcherTableau', 'alpha beta c_sol c_mid c_error')
+
+# Parameters from Shampine (1986), section 4.
+_DORMAND_PRINCE_TABLEAU = _ButcherTableau(
+    alpha=[1/5, 3/10, 4/5, 8/9, 1., 1.],
+    beta=[[1/5],
+          [3/40, 9/40],
+          [44/45, -56/15, 32/9],
+          [19372/6561, -25360/2187, 64448/6561, -212/729],
+          [9017/3168, -355/33, 46732/5247, 49/176, -5103/18656],
+          [35/384, 0, 500/1113, 125/192, -2187/6784, 11/84]],
+    c_sol=[35/384, 0, 500/1113, 125/192, -2187/6784, 11/84, 0],
+    c_mid=[6025192743/30085553152 / 2, 0, 51252292925/65400821598 / 2,
+           -2691868925/45128329728 / 2, 187940372067/1594534317056 / 2,
+           -1776094331/19743644256 / 2, 11237099/235043384 / 2],
+    c_error=[1951/21600 - 35/384,
+             0,
+             22642/50085 - 500/1113,
+             451/720 - 125/192,
+             -12231/42400 - -2187/6784,
+             649/6300 - 11/84,
+             1/60],
+)
+
+
+def _possibly_nonzero(x):
+  return isinstance(x, ops.Tensor) or x != 0
+
+
+def _scaled_dot_product(scale, xs, ys, name=None):
+  """Calculate a scaled, vector inner product between lists of Tensors."""
+  with ops.name_scope(name, 'scaled_dot_product', [scale, xs, ys]) as scope:
+    # Some of the parameters in our Butcher tableau include zeros. Using
+    # _possibly_nonzero lets us avoid wasted computation.
+    return math_ops.add_n([(scale * x) * y for x, y in zip(xs, ys)
+                           if _possibly_nonzero(x) or _possibly_nonzero(y)],
+                          name=scope)
+
+
+def _dot_product(xs, ys, name=None):
+  """Calculate the vector inner product between two lists of Tensors."""
+  with ops.name_scope(name, 'dot_product', [xs, ys]) as scope:
+    return math_ops.add_n([x * y for x, y in zip(xs, ys)], name=scope)
+
+
+def _runge_kutta_step(func, y0, f0, t0, dt, tableau=_DORMAND_PRINCE_TABLEAU,
+                      name=None):
+  """Take an arbitrary Runge-Kutta step and estimate error.
+
+  Args:
+    func: Function to evaluate like `func(y, t)` to compute the time derivative
+      of `y`.
+    y0: Tensor initial value for the state.
+    f0: Tensor initial value for the derivative, computed from `func(y0, t0)`.
+    t0: float64 scalar Tensor giving the initial time.
+    dt: float64 scalar Tensor giving the size of the desired time step.
+    tableau: optional _ButcherTableau describing how to take the Runge-Kutta
+      step.
+    name: optional name for the operation.
+
+  Returns:
+    Tuple `(y1, f1, y1_error, k)` giving the estimated function value after
+    the Runge-Kutta step at `t1 = t0 + dt`, the derivative of the state at `t1`,
+    estimated error at `t1`, and a list of Runge-Kutta coefficients `k` used for
+    calculating these terms.
+  """
+  with ops.name_scope(name, 'runge_kutta_step', [y0, f0, t0, dt]) as scope:
+    y0 = ops.convert_to_tensor(y0, name='y0')
+    f0 = ops.convert_to_tensor(f0, name='f0')
+    t0 = ops.convert_to_tensor(t0, name='t0')
+    dt = ops.convert_to_tensor(dt, name='dt')
+    dt_cast = math_ops.cast(dt, y0.dtype)
+
+    k = [f0]
+    for alpha_i, beta_i in zip(tableau.alpha, tableau.beta):
+      ti = t0 + alpha_i * dt
+      yi = y0 + _scaled_dot_product(dt_cast, beta_i, k)
+      k.append(func(yi, ti))
+
+    if not (tableau.c_sol[-1] == 0 and tableau.c_sol == tableau.beta[-1]):
+      # This property (true for Dormand-Prince) lets us save a few FLOPs.
+      yi = y0 + _scaled_dot_product(dt_cast, tableau.c_sol, k)
+
+    y1 = array_ops.identity(yi, name='%s/y1' % scope)
+    f1 = array_ops.identity(k[-1], name='%s/f1' % scope)
+    y1_error = _scaled_dot_product(dt_cast, tableau.c_error, k,
+                                   name='%s/y1_error' % scope)
+    return (y1, f1, y1_error, k)
+
+
+def _interp_fit(y0, y1, y_mid, f0, f1, dt):
+  """Fit coefficients for 4th order polynomial interpolation.
+
+  Args:
+    y0: function value at the start of the interval.
+    y1: function value at the end of the interval.
+    y_mid: function value at the mid-point of the interval.
+    f0: derivative value at the start of the interval.
+    f1: derivative value at the end of the interval.
+    dt: width of the interval.
+
+  Returns:
+    List of coefficients `[a, b, c, d, e]` for interpolating with the polynomial
+    `p = a * x ** 4 + b * x ** 3 + c * x ** 2 + d * x + e` for values of `x`
+    between 0 (start of interval) and 1 (end of interval).
+  """
+  # a, b, c, d, e = sympy.symbols('a b c d e')
+  # x, dt, y0, y1, y_mid, f0, f1 = sympy.symbols('x dt y0 y1 y_mid f0 f1')
+  # p = a * x ** 4 + b * x ** 3 + c * x ** 2 + d * x + e
+  # sympy.solve([p.subs(x, 0) - y0,
+  #              p.subs(x, 1 / 2) - y_mid,
+  #              p.subs(x, 1) - y1,
+  #              (p.diff(x) / dt).subs(x, 0) - f0,
+  #              (p.diff(x) / dt).subs(x, 1) - f1],
+  #             [a, b, c, d, e])
+  # {a: -2.0*dt*f0 + 2.0*dt*f1 - 8.0*y0 - 8.0*y1 + 16.0*y_mid,
+  #  b: 5.0*dt*f0 - 3.0*dt*f1 + 18.0*y0 + 14.0*y1 - 32.0*y_mid,
+  #  c: -4.0*dt*f0 + dt*f1 - 11.0*y0 - 5.0*y1 + 16.0*y_mid,
+  #  d: dt*f0,
+  #  e: y0}
+  a = _dot_product([-2 * dt, 2 * dt, -8, -8, 16], [f0, f1, y0, y1, y_mid])
+  b = _dot_product([5 * dt, -3 * dt, 18, 14, -32], [f0, f1, y0, y1, y_mid])
+  c = _dot_product([-4 * dt, dt, -11, -5, 16], [f0, f1, y0, y1, y_mid])
+  d = dt * f0
+  e = y0
+  return [a, b, c, d, e]
+
+
+def _interp_fit_rk(y0, y1, k, dt, tableau=_DORMAND_PRINCE_TABLEAU):
+  """Fit an interpolating polynomial to the results of a Runge-Kutta step."""
+  with ops.name_scope('interp_fit_rk'):
+    dt = math_ops.cast(dt, y0.dtype)
+    y_mid = y0 + _scaled_dot_product(dt, tableau.c_mid, k)
+    f0 = k[0]
+    f1 = k[-1]
+    return _interp_fit(y0, y1, y_mid, f0, f1, dt)
+
+
+def _interp_evaluate(coefficients, t0, t1, t):
+  """Evaluate polynomial interpolation at the given time point.
+
+  Args:
+    coefficients: list of Tensor coefficients as created by `interp_fit`.
+    t0: scalar float64 Tensor giving the start of the interval.
+    t1: scalar float64 Tensor giving the end of the interval.
+    t: scalar float64 Tensor giving the desired interpolation point.
+
+  Returns:
+    Polynomial interpolation of the coefficients at time `t`.
+  """
+  with ops.name_scope('interp_evaluate'):
+    t0 = ops.convert_to_tensor(t0)
+    t1 = ops.convert_to_tensor(t1)
+    t = ops.convert_to_tensor(t)
+
+    dtype = coefficients[0].dtype
+
+    assert_op = control_flow_ops.Assert(
+        (t0 <= t) & (t <= t1),
+        ['invalid interpolation, fails `t0 <= t <= t1`:', t0, t, t1])
+    with ops.control_dependencies([assert_op]):
+      x = math_ops.cast((t - t0) / (t1 - t0), dtype)
+
+    xs = [constant_op.constant(1, dtype), x]
+    for _ in range(2, len(coefficients)):
+      xs.append(xs[-1] * x)
+
+    return _dot_product(coefficients, reversed(xs))
+
+
+def _optimal_step_size(last_step,
+                       error_ratio,
+                       safety=0.9,
+                       ifactor=10.0,
+                       dfactor=0.2,
+                       order=5,
+                       name=None):
+  """Calculate the optimal size for the next Runge-Kutta step."""
+  with ops.name_scope(
+      name, 'optimal_step_size', [last_step, error_ratio]) as scope:
+    error_ratio = math_ops.cast(error_ratio, last_step.dtype)
+    exponent = math_ops.cast(1 / order, last_step.dtype)
+    # this looks more complex than necessary, but importantly it keeps
+    # error_ratio in the numerator so we can't divide by zero:
+    factor = math_ops.maximum(
+        1 / ifactor,
+        math_ops.minimum(error_ratio ** exponent / safety, 1 / dfactor))
+    return math_ops.div(last_step, factor, name=scope)
+
+
+def _abs_square(x):
+  if x.dtype.is_complex:
+    return math_ops.square(math_ops.real(x)) + math_ops.square(math_ops.imag(x))
+  else:
+    return math_ops.square(x)
+
+
+def _ta_append(tensor_array, value):
+  """Append a value to the end of a tf.TensorArray."""
+  return tensor_array.write(tensor_array.size(), value)
+
+
+class _RungeKuttaState(collections.namedtuple(
+    '_RungeKuttaState', 'y1, f1, t0, t1, dt, interp_coeff')):
+  """Saved state of the Runge Kutta solver.
+
+  Attributes:
+    y1: Tensor giving the function value at the end of the last time step.
+    f1: Tensor giving derivative at the end of the last time step.
+    t0: scalar float64 Tensor giving start of the last time step.
+    t1: scalar float64 Tensor giving end of the last time step.
+    dt: scalar float64 Tensor giving the size for the next time step.
+    interp_coef: list of Tensors giving coefficients for polynomial
+      interpolation between `t0` and `t1`.
+  """
+
+
+class _History(collections.namedtuple(
+    '_History', 'integrate_points, error_ratio')):
+  """Saved integration history for use in `info_dict`.
+
+  Attributes:
+    integrate_points: tf.TensorArray storing integrating time points.
+    error_ratio: tf.TensorArray storing computed error ratios at each
+      integration step.
+  """
+
+
+def _dopri5(func,
+            y0,
+            t,
+            rtol,
+            atol,
+            full_output=False,
+            first_step=None,
+            safety=0.9,
+            ifactor=10.0,
+            dfactor=0.2,
+            max_num_steps=1000,
+            name=None):
+  """Solve an ODE for `odeint` using method='dopri5'."""
+
+  if first_step is None:
+    # at some point, we might want to switch to picking the step size
+    # automatically
+    first_step = 1.0
+
+  with ops.name_scope(
+      name, 'dopri5',
+      [y0, t, rtol, atol, safety, ifactor, dfactor, max_num_steps]) as scope:
+
+    first_step = ops.convert_to_tensor(first_step, dtype=t.dtype,
+                                       name='first_step')
+    safety = ops.convert_to_tensor(safety, dtype=t.dtype, name='safety')
+    ifactor = ops.convert_to_tensor(ifactor, dtype=t.dtype, name='ifactor')
+    dfactor = ops.convert_to_tensor(dfactor, dtype=t.dtype, name='dfactor')
+    max_num_steps = ops.convert_to_tensor(max_num_steps, dtype=dtypes.int32,
+                                          name='max_num_steps')
+
+    def adaptive_runge_kutta_step(rk_state, history, n_steps):
+      """Take an adaptive Runge-Kutta step to integrate the ODE."""
+      y0, f0, _, t0, dt, interp_coeff = rk_state
+      with ops.name_scope('assertions'):
+        check_underflow = control_flow_ops.Assert(
+            t0 + dt > t0, ['underflow in dt', dt])
+        check_max_num_steps = control_flow_ops.Assert(
+            n_steps < max_num_steps, ['max_num_steps exceeded'])
+        check_numerics = control_flow_ops.Assert(
+            math_ops.reduce_all(math_ops.is_finite(abs(y0))),
+            ['non-finite values in state `y`', y0])
+      with ops.control_dependencies(
+          [check_underflow, check_max_num_steps, check_numerics]):
+        y1, f1, y1_error, k = _runge_kutta_step(func, y0, f0, t0, dt)
+
+      with ops.name_scope('error_ratio'):
+        # We use the same approach as the dopri5 fortran code.
+        error_tol = atol + rtol * math_ops.maximum(abs(y0), abs(y1))
+        tensor_error_ratio = _abs_square(y1_error) / _abs_square(error_tol)
+        # Could also use reduce_maximum here.
+        error_ratio = math_ops.sqrt(math_ops.reduce_mean(tensor_error_ratio))
+        accept_step = error_ratio <= 1
+
+      with ops.name_scope('update/rk_state'):
+        # If we don't accept the step, the _RungeKuttaState will be useless
+        # (covering a time-interval of size 0), but that's OK, because in such
+        # cases we always immediately take another Runge-Kutta step.
+        y_next = control_flow_ops.cond(accept_step, lambda: y1, lambda: y0)
+        f_next = control_flow_ops.cond(accept_step, lambda: f1, lambda: f0)
+        t_next = control_flow_ops.cond(accept_step, lambda: t0 + dt, lambda: t0)
+        interp_coeff = control_flow_ops.cond(
+            accept_step,
+            lambda: _interp_fit_rk(y0, y1, k, dt),
+            lambda: interp_coeff)
+        dt_next = _optimal_step_size(dt, error_ratio, safety, ifactor, dfactor)
+        rk_state = _RungeKuttaState(
+            y_next, f_next, t0, t_next, dt_next, interp_coeff)
+
+      with ops.name_scope('update/history'):
+        history = _History(_ta_append(history.integrate_points, t0 + dt),
+                           _ta_append(history.error_ratio, error_ratio))
+      return rk_state, history, n_steps + 1
+
+    def interpolate(solution, history, rk_state, i):
+      """Interpolate through the next time point, integrating as necessary."""
+      with ops.name_scope('interpolate'):
+        rk_state, history, _ = control_flow_ops.while_loop(
+            lambda rk_state, *_: t[i] > rk_state.t1,
+            adaptive_runge_kutta_step,
+            (rk_state, history, 0),
+            name='integrate_loop')
+        y = _interp_evaluate(
+            rk_state.interp_coeff, rk_state.t0, rk_state.t1, t[i])
+        solution = solution.write(i, y)
+        return solution, history, rk_state, i + 1
+
+    assert_increasing = control_flow_ops.Assert(
+        math_ops.reduce_all(t[1:] > t[:-1]),
+        ['`t` must be monotonic increasing'])
+    with ops.control_dependencies([assert_increasing]):
+      num_times = array_ops.size(t)
+
+    solution = tensor_array_ops.TensorArray(
+        y0.dtype, size=num_times).write(0, y0)
+    history = _History(
+        integrate_points=tensor_array_ops.TensorArray(
+            t.dtype, size=0, dynamic_size=True),
+        error_ratio=tensor_array_ops.TensorArray(
+            rtol.dtype, size=0, dynamic_size=True))
+    rk_state = _RungeKuttaState(
+        y0, func(y0, t[0]), t[0], t[0], first_step, interp_coeff=[y0] * 5)
+
+    solution, history, _, _ = control_flow_ops.while_loop(
+        lambda _, __, ___, i: i < num_times,
+        interpolate,
+        (solution, history, rk_state, 1),
+        name='interpolate_loop')
+
+    y = solution.pack(name=scope)
+    y.set_shape(t.get_shape().concatenate(y0.get_shape()))
+    if not full_output:
+      return y
+    else:
+      integrate_points = history.integrate_points.pack()
+      info_dict = {'num_func_evals': 6 * array_ops.size(integrate_points) + 1,
+                   'integrate_points': integrate_points,
+                   'error_ratio': history.error_ratio.pack()}
+      return (y, info_dict)
+
+
+def odeint(func,
+           y0,
+           t,
+           rtol=1e-6,
+           atol=1e-12,
+           method=None,
+           options=None,
+           full_output=False,
+           name=None):
+  """Integrate a system of ordinary differential equations.
+
+  Solves the initial value problem for a non-stiff system of first order ode-s:
+
+    ```
+    dy/dt = func(y, t), y(t[0]) = y0
+    ```
+
+  where y is a Tensor of any shape.
+
+  For example:
+
+    ```
+    # solve `dy/dt = -y`, corresponding to exponential decay
+    tf.contrib.integrate.odeint(lambda y, _: -y, 1.0, [0, 1, 2])
+    => [1, exp(-1), exp(-2)]
+    ```
+
+  Output dtypes and numerical precision are based on the dtypes of the inputs
+  `y0` and `t`.
+
+  Currently, implements 5th order Runge-Kutta with adaptive step size control
+  and dense output, using the Dormand-Prince method. Similar to the 'dopri5'
+  method of `scipy.integrate.ode` and MATLAB's `ode45`.
+
+  Based on: Shampine, Lawrence F. (1986), "Some Practical Runge-Kutta Formulas",
+  Mathematics of Computation, American Mathematical Society, 46 (173): 135-150,
+  doi:10.2307/2008219
+
+  Args:
+    func: Function that maps a Tensor holding the state `y` and a scalar Tensor
+      `t` into a Tensor of state derivatives with respect to time.
+    y0: N-D Tensor giving starting value of `y` at time point `t[0]`. May
+      have any floating point or complex dtype.
+    t: 1-D Tensor holding a sequence of time points for which to solve for
+      `y`. The initial time point should be the first element of this sequence,
+      and each time must be larger than the previous time. May have any floating
+      point dtype. If not provided as a Tensor, converted to a Tensor with
+      float64 dtype.
+    rtol: optional float64 Tensor specifying an upper bound on relative error,
+      per element of `y`.
+    atol: optional float64 Tensor specifying an upper bound on absolute error,
+      per element of `y`.
+    method: optional string indicating the integration method to use. Currently,
+      the only valid option is `'dopri5'`.
+    options: optional dict of configuring options for the indicated integration
+      method. Can only be provided if a `method` is explicitly set. For
+      `'dopri5'`, valid options include:
+      * first_step: an initial guess for the size of the first integration
+        (current default: 1.0, but may later be changed to use heuristics based
+        on the gradient).
+      * safety: safety factor for adaptive step control, generally a constant
+        in the range 0.8-1 (default: 0.9).
+      * ifactor: maximum factor by which the adaptive step may be increased
+        (default: 10.0).
+      * dfactor: maximum factor by which the adpative step may be decreased
+        (default: 0.2).
+      * max_num_steps: integer maximum number of integrate steps between time
+        points in `t` (default: 1000).
+    full_output: optional boolean. If True, `odeint` returns a tuple
+      `(y, info_dict)` describing the integration process.
+    name: Optional name for this operation.
+
+  Returns:
+    y: (N+1)-D tensor, where the first dimension corresponds to different
+      time points. Contains the solved value of y for each desired time point in
+      `t`, with the initial value `y0` being the first element along the first
+      dimension.
+    info_dict: only if `full_output == True`. A dict with the following values:
+      * num_func_evals: integer Tensor counting the number of function
+        evaluations.
+      * integrate_points: 1D float64 Tensor with the upper bound of each
+        integration time step.
+      * error_ratio: 1D float Tensor with the estimated ratio of the integration
+        error to the error tolerance at each integration step. An ratio greater
+        than 1 corresponds to rejected steps.
+
+  Raises:
+    ValueError: if an invalid `method` is provided.
+    TypeError: if `options` is supplied without `method`, or if `t` or `y0` has
+      an invalid dtype.
+  """
+  if method is not None and method != 'dopri5':
+    raise ValueError('invalid method: %r' % method)
+
+  if options is None:
+    options = {}
+  elif method is None:
+    raise ValueError('cannot supply `options` without specifying `method`')
+
+  with ops.name_scope(name, 'odeint', [y0, t, rtol, atol]) as scope:
+    # TODO(shoyer): use nest.flatten (like tf.while_loop) to allow `y0` to be an
+    # arbitrarily nested tuple. This will help performance and usability by
+    # avoiding the need to pack/unpack in user functions.
+    y0 = ops.convert_to_tensor(y0, name='y0')
+    if not (y0.dtype.is_floating or y0.dtype.is_complex):
+      raise TypeError('`y0` must have a floating point or complex floating '
+                      'point dtype')
+
+    t = ops.convert_to_tensor(t, preferred_dtype=dtypes.float64, name='t')
+    if not t.dtype.is_floating:
+      raise TypeError('`t` must have a floating point dtype')
+
+    error_dtype = abs(y0).dtype
+    rtol = ops.convert_to_tensor(rtol, dtype=error_dtype, name='rtol')
+    atol = ops.convert_to_tensor(atol, dtype=error_dtype, name='atol')
+
+    return _dopri5(func, y0, t,
+                   rtol=rtol,
+                   atol=atol,
+                   full_output=full_output,
+                   name=scope,
+                   **options)
diff --git a/tensorflow/contrib/integrate/python/ops/odes_test.py b/tensorflow/contrib/integrate/python/ops/odes_test.py
new file mode 100644
index 00000000000..cb036bf05ac
--- /dev/null
+++ b/tensorflow/contrib/integrate/python/ops/odes_test.py
@@ -0,0 +1,232 @@
+# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+
+"""Tests for ODE solvers."""
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import numpy as np
+import tensorflow as tf
+
+from tensorflow.contrib.integrate.python.ops import odes
+
+
+class OdeIntTest(tf.test.TestCase):
+
+  def setUp(self):
+    super(OdeIntTest, self).setUp()
+    # simple defaults (solution is a sin-wave)
+    matrix = tf.constant([[0, 1], [-1, 0]], dtype=tf.float64)
+    self.func = lambda y, t: tf.matmul(matrix, y)
+    self.y0 = np.array([[1.0], [0.0]])
+
+  def test_odeint_exp(self):
+    # Test odeint by an exponential function:
+    #   dy / dt = y,  y(0) = 1.0.
+    # Its analytical solution is y = exp(t).
+    func = lambda y, t: y
+    y0 = tf.constant(1.0, dtype=tf.float64)
+    t = np.linspace(0.0, 1.0, 11)
+    y_solved = tf.contrib.integrate.odeint(func, y0, t)
+    self.assertIn('odeint', y_solved.name)
+    self.assertEqual(y_solved.get_shape(), tf.TensorShape([11]))
+    with self.test_session() as sess:
+      y_solved = sess.run(y_solved)
+    y_true = np.exp(t)
+    self.assertAllClose(y_true, y_solved)
+
+  def test_odeint_complex(self):
+    # Test a complex, linear ODE:
+    #   dy / dt = k * y,  y(0) = 1.0.
+    # Its analytical solution is y = exp(k * t).
+    k = 1j - 0.1
+    func = lambda y, t: k * y
+    t = np.linspace(0.0, 1.0, 11)
+    y_solved = tf.contrib.integrate.odeint(func, 1.0 + 0.0j, t)
+    with self.test_session() as sess:
+      y_solved = sess.run(y_solved)
+    y_true = np.exp(k * t)
+    self.assertAllClose(y_true, y_solved)
+
+  def test_odeint_riccati(self):
+    # The Ricatti equation is:
+    #   dy / dt = (y - t) ** 2 + 1.0,  y(0) = 0.5.
+    # Its analytical solution is y = 1.0 / (2.0 - t) + t.
+    func = lambda t, y: (y - t)**2 + 1.0
+    t = np.linspace(0.0, 1.0, 11)
+    y_solved = tf.contrib.integrate.odeint(func, np.float64(0.5), t)
+    with self.test_session() as sess:
+      y_solved = sess.run(y_solved)
+    y_true = 1.0 / (2.0 - t) + t
+    self.assertAllClose(y_true, y_solved)
+
+  def test_odeint_2d_linear(self):
+    # Solve the 2D linear differential equation:
+    #   dy1 / dt = 3.0 * y1 + 4.0 * y2,
+    #   dy2 / dt = -4.0 * y1 + 3.0 * y2,
+    #   y1(0) = 0.0,
+    #   y2(0) = 1.0.
+    # Its analytical solution is
+    #   y1 = sin(4.0 * t) * exp(3.0 * t),
+    #   y2 = cos(4.0 * t) * exp(3.0 * t).
+    matrix = tf.constant([[3.0, 4.0], [-4.0, 3.0]], dtype=tf.float64)
+    func = lambda y, t: tf.matmul(matrix, y)
+
+    y0 = tf.constant([[0.0], [1.0]], dtype=tf.float64)
+    t = np.linspace(0.0, 1.0, 11)
+
+    y_solved = tf.contrib.integrate.odeint(func, y0, t)
+    with self.test_session() as sess:
+      y_solved = sess.run(y_solved)
+
+    y_true = np.zeros((len(t), 2, 1))
+    y_true[:, 0, 0] = np.sin(4.0 * t) * np.exp(3.0 * t)
+    y_true[:, 1, 0] = np.cos(4.0 * t) * np.exp(3.0 * t)
+    self.assertAllClose(y_true, y_solved, atol=1e-5)
+
+  def test_odeint_higher_rank(self):
+    func = lambda y, t: y
+    y0 = tf.constant(1.0, dtype=tf.float64)
+    t = np.linspace(0.0, 1.0, 11)
+    for shape in [(), (1,), (1, 1)]:
+      expected_shape = (len(t),) + shape
+      y_solved = tf.contrib.integrate.odeint(func, tf.reshape(y0, shape), t)
+      self.assertEqual(y_solved.get_shape(), tf.TensorShape(expected_shape))
+      with self.test_session() as sess:
+        y_solved = sess.run(y_solved)
+        self.assertEquals(y_solved.shape, expected_shape)
+
+  def test_odeint_all_dtypes(self):
+    func = lambda y, t: y
+    t = np.linspace(0.0, 1.0, 11)
+    for y0_dtype in [tf.float32, tf.float64, tf.complex64, tf.complex128]:
+      for t_dtype in [tf.float32, tf.float64]:
+        y0 = tf.cast(1.0, y0_dtype)
+        y_solved = tf.contrib.integrate.odeint(func, y0, tf.cast(t, t_dtype))
+        with self.test_session() as sess:
+          y_solved = sess.run(y_solved)
+        expected = np.asarray(np.exp(t))
+        self.assertAllClose(y_solved, expected, rtol=1e-5)
+        self.assertEqual(tf.as_dtype(y_solved.dtype), y0_dtype)
+
+  def test_odeint_required_dtypes(self):
+    with self.assertRaisesRegexp(TypeError, '`y0` must have a floating point'):
+      tf.contrib.integrate.odeint(self.func, tf.cast(self.y0, tf.int32), [0, 1])
+
+    with self.assertRaisesRegexp(TypeError, '`t` must have a floating point'):
+      tf.contrib.integrate.odeint(self.func, self.y0, tf.cast([0, 1], tf.int32))
+
+  def test_odeint_runtime_errors(self):
+    with self.assertRaisesRegexp(
+        ValueError, 'cannot supply `options` without'):
+      tf.contrib.integrate.odeint(self.func, self.y0, [0, 1],
+                                  options={'first_step': 1.0})
+
+    y = tf.contrib.integrate.odeint(self.func, self.y0, [0, 1], method='dopri5',
+                                    options={'max_num_steps': 0})
+    with self.test_session() as sess:
+      with self.assertRaisesRegexp(
+          tf.errors.InvalidArgumentError, 'max_num_steps'):
+        sess.run(y)
+
+    y = tf.contrib.integrate.odeint(self.func, self.y0, [1, 0])
+    with self.test_session() as sess:
+      with self.assertRaisesRegexp(
+          tf.errors.InvalidArgumentError, 'monotonic increasing'):
+        sess.run(y)
+
+  def test_odeint_different_times(self):
+    # integrate steps should be independent of interpolation times
+    times0 = np.linspace(0, 10, num=11, dtype=float)
+    times1 = np.linspace(0, 10, num=101, dtype=float)
+
+    with self.test_session() as sess:
+      y_solved_0, info_0 = sess.run(
+          tf.contrib.integrate.odeint(
+              self.func, self.y0, times0, full_output=True))
+      y_solved_1, info_1 = sess.run(
+          tf.contrib.integrate.odeint(
+              self.func, self.y0, times1, full_output=True))
+
+    self.assertAllClose(y_solved_0, y_solved_1[::10])
+    self.assertEqual(info_0['num_func_evals'], info_1['num_func_evals'])
+    self.assertAllEqual(info_0['integrate_points'], info_1['integrate_points'])
+    self.assertAllEqual(info_0['error_ratio'], info_1['error_ratio'])
+
+  def test_odeint_5th_order_accuracy(self):
+    t = [0, 20]
+    kwargs = dict(full_output=True,
+                  method='dopri5',
+                  options=dict(max_num_steps=2000))
+    with self.test_session() as sess:
+      _, info_0 = sess.run(tf.contrib.integrate.odeint(
+          self.func, self.y0, t, rtol=0, atol=1e-6, **kwargs))
+      _, info_1 = sess.run(tf.contrib.integrate.odeint(
+          self.func, self.y0, t, rtol=0, atol=1e-9, **kwargs))
+    self.assertAllClose(info_0['integrate_points'].size * 1000 ** 0.2,
+                        float(info_1['integrate_points'].size),
+                        rtol=0.01)
+
+
+class StepSizeTest(tf.test.TestCase):
+
+  def test_error_ratio_one(self):
+    new_step = odes._optimal_step_size(last_step=tf.constant(1.0),
+                                       error_ratio=tf.constant(1.0))
+    with self.test_session() as sess:
+      new_step = sess.run(new_step)
+    self.assertAllClose(new_step, 0.9)
+
+  def test_ifactor(self):
+    new_step = odes._optimal_step_size(last_step=tf.constant(1.0),
+                                       error_ratio=tf.constant(0.0))
+    with self.test_session() as sess:
+      new_step = sess.run(new_step)
+    self.assertAllClose(new_step, 10.0)
+
+  def test_dfactor(self):
+    new_step = odes._optimal_step_size(last_step=tf.constant(1.0),
+                                       error_ratio=tf.constant(1e6))
+    with self.test_session() as sess:
+      new_step = sess.run(new_step)
+    self.assertAllClose(new_step, 0.2)
+
+
+class InterpolationTest(tf.test.TestCase):
+
+  def test_5th_order_polynomial(self):
+    # this should be an exact fit
+    f = lambda x: x ** 4 + x ** 3 - 2 * x ** 2 + 4 * x + 5
+    f_prime = lambda x: 4 * x ** 3 + 3 * x ** 2 - 4 * x + 4
+    coeffs = odes._interp_fit(
+        f(0.0), f(10.0), f(5.0), f_prime(0.0), f_prime(10.0), 10.0)
+    times = np.linspace(0, 10, dtype=np.float32)
+    y_fit = tf.pack([odes._interp_evaluate(coeffs, 0.0, 10.0, t)
+                     for t in times])
+    y_expected = f(times)
+    with self.test_session() as sess:
+      y_actual = sess.run(y_fit)
+      self.assertAllClose(y_expected, y_actual)
+
+    # attempt interpolation outside bounds
+    y_invalid = odes._interp_evaluate(coeffs, 0.0, 10.0, 100.0)
+    with self.test_session() as sess:
+      with self.assertRaises(tf.errors.InvalidArgumentError):
+        sess.run(y_invalid)
+
+
+if __name__ == '__main__':
+  tf.test.main()
diff --git a/tensorflow/contrib/layers/python/layers/optimizers.py b/tensorflow/contrib/layers/python/layers/optimizers.py
index 8c06202f47b..6cb7e91b73f 100644
--- a/tensorflow/contrib/layers/python/layers/optimizers.py
+++ b/tensorflow/contrib/layers/python/layers/optimizers.py
@@ -258,10 +258,11 @@ def optimize_loss(loss,
         grad_values = gradient
 
       if grad_values is not None:
+        var_name = variable.name.replace(":", "_")
         if "gradients" in summaries:
-          summary.histogram("gradients/" + variable.name, grad_values)
+          summary.histogram("gradients/%s" % var_name, grad_values)
         if "gradient_norm" in summaries:
-          summary.scalar("gradient_norm/" + variable.name,
+          summary.scalar("gradient_norm/%s" % var_name,
                          clip_ops.global_norm([grad_values]))
 
     if clip_gradients is not None and "gradient_norm" in summaries:
diff --git a/tensorflow/contrib/learn/BUILD b/tensorflow/contrib/learn/BUILD
index 62d7bb77c98..b93089c9cb7 100644
--- a/tensorflow/contrib/learn/BUILD
+++ b/tensorflow/contrib/learn/BUILD
@@ -291,7 +291,9 @@ py_test(
     deps = [
         ":learn",
         "//tensorflow:tensorflow_py",
+        "//tensorflow/python:extra_py_tests_deps",
         "//tensorflow/python:framework_test_lib",
+        "//tensorflow/python:test_ops",
     ],
 )
 
diff --git a/tensorflow/contrib/learn/python/learn/estimators/__init__.py b/tensorflow/contrib/learn/python/learn/estimators/__init__.py
index b5b1dbb6355..cfe2fb15985 100644
--- a/tensorflow/contrib/learn/python/learn/estimators/__init__.py
+++ b/tensorflow/contrib/learn/python/learn/estimators/__init__.py
@@ -29,11 +29,11 @@ from tensorflow.contrib.learn.python.learn.estimators.estimator import Estimator
 from tensorflow.contrib.learn.python.learn.estimators.estimator import infer_real_valued_columns_from_input
 from tensorflow.contrib.learn.python.learn.estimators.estimator import infer_real_valued_columns_from_input_fn
 from tensorflow.contrib.learn.python.learn.estimators.estimator import ModeKeys
-from tensorflow.contrib.learn.python.learn.estimators.head import MetricKey
-from tensorflow.contrib.learn.python.learn.estimators.head import PredictionKey
 from tensorflow.contrib.learn.python.learn.estimators.linear import LinearClassifier
 from tensorflow.contrib.learn.python.learn.estimators.linear import LinearRegressor
 from tensorflow.contrib.learn.python.learn.estimators.logistic_regressor import LogisticRegressor
+from tensorflow.contrib.learn.python.learn.estimators.metric_key import MetricKey
+from tensorflow.contrib.learn.python.learn.estimators.prediction_key import PredictionKey
 from tensorflow.contrib.learn.python.learn.estimators.random_forest import TensorForestEstimator
 from tensorflow.contrib.learn.python.learn.estimators.random_forest import TensorForestLossHook
 from tensorflow.contrib.learn.python.learn.estimators.run_config import RunConfig
diff --git a/tensorflow/contrib/learn/python/learn/estimators/dnn_linear_combined.py b/tensorflow/contrib/learn/python/learn/estimators/dnn_linear_combined.py
index 57a98e419c6..3146a1e7c81 100644
--- a/tensorflow/contrib/learn/python/learn/estimators/dnn_linear_combined.py
+++ b/tensorflow/contrib/learn/python/learn/estimators/dnn_linear_combined.py
@@ -35,6 +35,7 @@ from tensorflow.contrib.learn.python.learn import trainable
 from tensorflow.contrib.learn.python.learn.estimators import composable_model
 from tensorflow.contrib.learn.python.learn.estimators import estimator
 from tensorflow.contrib.learn.python.learn.estimators import head as head_lib
+from tensorflow.contrib.learn.python.learn.estimators import prediction_key
 from tensorflow.contrib.learn.python.learn.utils import export
 from tensorflow.python.framework import ops
 from tensorflow.python.ops import control_flow_ops
@@ -747,13 +748,16 @@ class DNNLinearCombinedClassifier(evaluable.Evaluable, trainable.Trainable):
       Numpy array of predicted classes (or an iterable of predicted classes if
       as_iterable is True).
     """
-    preds = self._estimator.predict(x=x, input_fn=input_fn,
-                                    batch_size=batch_size,
-                                    outputs=[head_lib.PredictionKey.CLASSES],
-                                    as_iterable=as_iterable)
+    key = prediction_key.PredictionKey.CLASSES
+    preds = self._estimator.predict(
+        x=x,
+        input_fn=input_fn,
+        batch_size=batch_size,
+        outputs=[key],
+        as_iterable=as_iterable)
     if as_iterable:
-      return _as_iterable(preds, output=head_lib.PredictionKey.CLASSES)
-    return preds[head_lib.PredictionKey.CLASSES].reshape(-1)
+      return _as_iterable(preds, output=key)
+    return preds[key].reshape(-1)
 
   @deprecated_arg_values(
       estimator.AS_ITERABLE_DATE, estimator.AS_ITERABLE_INSTRUCTIONS,
@@ -775,20 +779,22 @@ class DNNLinearCombinedClassifier(evaluable.Evaluable, trainable.Trainable):
       Numpy array of predicted probabilities (or an iterable of predicted
       probabilities if as_iterable is True).
     """
+    key = prediction_key.PredictionKey.PROBABILITIES
     preds = self._estimator.predict(
-        x=x, input_fn=input_fn,
+        x=x,
+        input_fn=input_fn,
         batch_size=batch_size,
-        outputs=[head_lib.PredictionKey.PROBABILITIES],
+        outputs=[key],
         as_iterable=as_iterable)
     if as_iterable:
-      return _as_iterable(preds, output=head_lib.PredictionKey.PROBABILITIES)
-    return preds[head_lib.PredictionKey.PROBABILITIES]
+      return _as_iterable(preds, output=key)
+    return preds[key]
 
   def _get_predict_ops(self, features):
     """See `Estimator` class."""
     # pylint: disable=protected-access
     return self._estimator._get_predict_ops(features)[
-        head_lib.PredictionKey.PROBABILITIES]
+        prediction_key.PredictionKey.PROBABILITIES]
 
   def get_variable_names(self):
     """Returns list of all variable names in this model.
@@ -826,9 +832,9 @@ class DNNLinearCombinedClassifier(evaluable.Evaluable, trainable.Trainable):
         input_fn=input_fn or default_input_fn,
         input_feature_key=input_feature_key,
         use_deprecated_input_fn=use_deprecated_input_fn,
-        signature_fn=(
-            signature_fn or export.classification_signature_fn_with_prob),
-        prediction_key=head_lib.PredictionKey.PROBABILITIES,
+        signature_fn=(signature_fn or
+                      export.classification_signature_fn_with_prob),
+        prediction_key=prediction_key.PredictionKey.PROBABILITIES,
         default_batch_size=default_batch_size,
         exports_to_keep=exports_to_keep)
 
@@ -1041,10 +1047,11 @@ class DNNLinearCombinedRegressor(_DNNLinearCombinedBaseEstimator):
         head=head,
         config=config,
         feature_engineering_fn=feature_engineering_fn,
-        default_prediction_key=head_lib.PredictionKey.SCORES,
+        default_prediction_key=prediction_key.PredictionKey.SCORES,
         enable_centered_bias=enable_centered_bias)
 
   def _get_predict_ops(self, features):
     """See base class."""
-    return super(DNNLinearCombinedRegressor, self)._get_predict_ops(features)[
-        head_lib.PredictionKey.SCORES]
+    return super(
+        DNNLinearCombinedRegressor,
+        self)._get_predict_ops(features)[prediction_key.PredictionKey.SCORES]
diff --git a/tensorflow/contrib/learn/python/learn/estimators/estimator.py b/tensorflow/contrib/learn/python/learn/estimators/estimator.py
index 145bdcf2ee8..c9d1377ce73 100644
--- a/tensorflow/contrib/learn/python/learn/estimators/estimator.py
+++ b/tensorflow/contrib/learn/python/learn/estimators/estimator.py
@@ -45,6 +45,7 @@ from tensorflow.contrib.learn.python.learn import metric_spec
 from tensorflow.contrib.learn.python.learn import monitors as monitor_lib
 from tensorflow.contrib.learn.python.learn import trainable
 from tensorflow.contrib.learn.python.learn.estimators import _sklearn as sklearn
+from tensorflow.contrib.learn.python.learn.estimators import metric_key
 from tensorflow.contrib.learn.python.learn.estimators import run_config
 from tensorflow.contrib.learn.python.learn.estimators import tensor_signature
 from tensorflow.contrib.learn.python.learn.estimators._sklearn import NotFittedError
@@ -1108,8 +1109,9 @@ class Estimator(BaseEstimator):
 
     result = _make_metrics_ops(all_metrics, features, labels,
                                model_fn_ops.predictions)
-    if 'loss' not in result:
-      result['loss'] = metrics_lib.streaming_mean(model_fn_ops.loss)
+    if metric_key.MetricKey.LOSS not in result:
+      result[metric_key.MetricKey.LOSS] = metrics_lib.streaming_mean(
+          model_fn_ops.loss)
     return result
 
   def _get_predict_ops(self, features):
diff --git a/tensorflow/contrib/learn/python/learn/estimators/head.py b/tensorflow/contrib/learn/python/learn/estimators/head.py
index f3a4f409032..77e3067bbde 100644
--- a/tensorflow/contrib/learn/python/learn/estimators/head.py
+++ b/tensorflow/contrib/learn/python/learn/estimators/head.py
@@ -24,6 +24,8 @@ from tensorflow.contrib import losses
 from tensorflow.contrib import metrics as metrics_lib
 from tensorflow.contrib.learn.python.learn import metric_spec
 from tensorflow.contrib.learn.python.learn.estimators import estimator
+from tensorflow.contrib.learn.python.learn.estimators import metric_key
+from tensorflow.contrib.learn.python.learn.estimators import prediction_key
 from tensorflow.contrib.session_bundle import exporter
 from tensorflow.python import summary
 from tensorflow.python.framework import ops
@@ -388,17 +390,17 @@ class _RegressionHead(_Head):
   def _logits_to_prediction(self, logits=None):
     predictions = {}
     if self.logits_dimension == 1:
-      predictions[PredictionKey.SCORES] = array_ops.squeeze(
+      predictions[prediction_key.PredictionKey.SCORES] = array_ops.squeeze(
           logits, squeeze_dims=[1])
     else:
-      predictions[PredictionKey.SCORES] = logits
+      predictions[prediction_key.PredictionKey.SCORES] = logits
     return predictions
 
   # pylint: disable=undefined-variable
   def _create_signature_fn(self):
     def _regression_signature_fn(examples, unused_features, predictions):
       if isinstance(predictions, dict):
-        score = predictions[PredictionKey.SCORES]
+        score = predictions[prediction_key.PredictionKey.SCORES]
       else:
         score = predictions
 
@@ -409,11 +411,12 @@ class _RegressionHead(_Head):
     return _regression_signature_fn
 
   def _default_metric(self):
-    return {_head_prefixed(self._head_name, MetricKey.LOSS):
-            _weighted_average_loss_metric_spec(self._eval_loss_fn,
-                                               PredictionKey.SCORES,
-                                               self._label_name,
-                                               self._weight_column_name)}
+    return {_head_prefixed(self._head_name, metric_key.MetricKey.LOSS):
+            _weighted_average_loss_metric_spec(
+                self._eval_loss_fn,
+                prediction_key.PredictionKey.SCORES,
+                self._label_name,
+                self._weight_column_name)}
 
 
 class _MultiClassHead(_Head):
@@ -530,12 +533,16 @@ class _MultiClassHead(_Head):
     return self._logits_to_prediction(logits)
 
   def _logits_to_prediction(self, logits=None):
-    predictions = {PredictionKey.LOGITS: logits}
+    # pylint: disable=missing-docstring
+    predictions = {prediction_key.PredictionKey.LOGITS: logits}
     if self.logits_dimension == 1:
-      predictions[PredictionKey.LOGISTIC] = math_ops.sigmoid(logits)
+      predictions[prediction_key.PredictionKey.LOGISTIC] = math_ops.sigmoid(
+          logits)
       logits = array_ops.concat(1, [array_ops.zeros_like(logits), logits])
-    predictions[PredictionKey.PROBABILITIES] = nn.softmax(logits)
-    predictions[PredictionKey.CLASSES] = math_ops.argmax(logits, 1)
+    predictions[prediction_key.PredictionKey.PROBABILITIES] = nn.softmax(
+        logits)
+    predictions[prediction_key.PredictionKey.CLASSES] = math_ops.argmax(
+        logits, 1)
 
     return predictions
 
@@ -546,8 +553,9 @@ class _MultiClassHead(_Head):
       if isinstance(predictions, dict):
         default_signature = exporter.classification_signature(
             input_tensor=examples,
-            classes_tensor=predictions[PredictionKey.CLASSES],
-            scores_tensor=predictions[PredictionKey.PROBABILITIES])
+            classes_tensor=predictions[prediction_key.PredictionKey.CLASSES],
+            scores_tensor=predictions[
+                prediction_key.PredictionKey.PROBABILITIES])
       else:
         default_signature = exporter.classification_signature(
             input_tensor=examples,
@@ -558,44 +566,49 @@ class _MultiClassHead(_Head):
     return _classification_signature_fn
 
   def _default_metric(self):
-    metrics = {_head_prefixed(self._head_name, MetricKey.LOSS):
-               _weighted_average_loss_metric_spec(self._eval_loss_fn,
-                                                  PredictionKey.LOGITS,
-                                                  self._label_name,
-                                                  self._weight_column_name)}
+    metrics = {_head_prefixed(self._head_name, metric_key.MetricKey.LOSS):
+               _weighted_average_loss_metric_spec(
+                   self._eval_loss_fn,
+                   prediction_key.PredictionKey.LOGITS,
+                   self._label_name,
+                   self._weight_column_name)}
 
     # TODO(b/29366811): This currently results in both an "accuracy" and an
     # "accuracy/threshold_0.500000_mean" metric for binary classification.
-    metrics[_head_prefixed(self._head_name, MetricKey.ACCURACY)] = (
+    metrics[_head_prefixed(self._head_name, metric_key.MetricKey.ACCURACY)] = (
         metric_spec.MetricSpec(metrics_lib.streaming_accuracy,
-                               PredictionKey.CLASSES, self._label_name,
+                               prediction_key.PredictionKey.CLASSES,
+                               self._label_name,
                                self._weight_column_name))
     if self.logits_dimension == 1:
-      def _add_binary_metric(metric_key, metric_fn):
-        metrics[_head_prefixed(self._head_name, metric_key)] = (
+      def _add_binary_metric(key, metric_fn):
+        metrics[_head_prefixed(self._head_name, key)] = (
             metric_spec.MetricSpec(metric_fn,
-                                   PredictionKey.LOGISTIC,
+                                   prediction_key.PredictionKey.LOGISTIC,
                                    self._label_name,
                                    self._weight_column_name))
-      _add_binary_metric(MetricKey.PREDICTION_MEAN, _predictions_streaming_mean)
-      _add_binary_metric(MetricKey.LABEL_MEAN, _labels_streaming_mean)
+      _add_binary_metric(
+          metric_key.MetricKey.PREDICTION_MEAN, _predictions_streaming_mean)
+      _add_binary_metric(
+          metric_key.MetricKey.LABEL_MEAN, _labels_streaming_mean)
 
       # Also include the streaming mean of the label as an accuracy baseline, as
       # a reminder to users.
-      _add_binary_metric(MetricKey.ACCURACY_BASELINE, _labels_streaming_mean)
+      _add_binary_metric(
+          metric_key.MetricKey.ACCURACY_BASELINE, _labels_streaming_mean)
 
-      _add_binary_metric(MetricKey.AUC, _streaming_auc)
+      _add_binary_metric(metric_key.MetricKey.AUC, _streaming_auc)
 
       for threshold in self._thresholds:
-        _add_binary_metric(MetricKey.ACCURACY_MEAN % threshold,
+        _add_binary_metric(metric_key.MetricKey.ACCURACY_MEAN % threshold,
                            _accuracy_at_threshold(threshold))
         # Precision for positive examples.
-        _add_binary_metric(MetricKey.PRECISION_MEAN % threshold,
+        _add_binary_metric(metric_key.MetricKey.PRECISION_MEAN % threshold,
                            _streaming_at_threshold(
                                metrics_lib.streaming_precision_at_thresholds,
                                threshold),)
         # Recall for positive examples.
-        _add_binary_metric(MetricKey.RECALL_MEAN % threshold,
+        _add_binary_metric(metric_key.MetricKey.RECALL_MEAN % threshold,
                            _streaming_at_threshold(
                                metrics_lib.streaming_recall_at_thresholds,
                                threshold))
@@ -635,21 +648,24 @@ class _BinarySvmHead(_MultiClassHead):
 
   def _logits_to_prediction(self, logits=None):
     predictions = {}
-    predictions[PredictionKey.LOGITS] = logits
+    predictions[prediction_key.PredictionKey.LOGITS] = logits
     logits = array_ops.concat(1, [array_ops.zeros_like(logits), logits])
-    predictions[PredictionKey.CLASSES] = math_ops.argmax(logits, 1)
+    predictions[prediction_key.PredictionKey.CLASSES] = math_ops.argmax(
+        logits, 1)
 
     return predictions
 
   def _default_metric(self):
-    metrics = {_head_prefixed(self._head_name, MetricKey.LOSS):
-               _weighted_average_loss_metric_spec(self._eval_loss_fn,
-                                                  PredictionKey.LOGITS,
-                                                  self._label_name,
-                                                  self._weight_column_name)}
-    metrics[_head_prefixed(self._head_name, MetricKey.ACCURACY)] = (
+    metrics = {_head_prefixed(self._head_name, metric_key.MetricKey.LOSS):
+               _weighted_average_loss_metric_spec(
+                   self._eval_loss_fn,
+                   prediction_key.PredictionKey.LOGITS,
+                   self._label_name,
+                   self._weight_column_name)}
+    metrics[_head_prefixed(self._head_name, metric_key.MetricKey.ACCURACY)] = (
         metric_spec.MetricSpec(metrics_lib.streaming_accuracy,
-                               PredictionKey.CLASSES, self._label_name,
+                               prediction_key.PredictionKey.CLASSES,
+                               self._label_name,
                                self._weight_column_name))
     # TODO(sibyl-vie3Poto): add more metrics relevant for svms.
     return metrics
@@ -674,12 +690,14 @@ class _MultiLabelHead(_MultiClassHead):
         thresholds=thresholds)
 
   def _logits_to_prediction(self, logits=None):
-    predictions = {PredictionKey.LOGITS: logits}
+    predictions = {prediction_key.PredictionKey.LOGITS: logits}
     if self.logits_dimension == 1:
-      predictions[PredictionKey.LOGISTIC] = math_ops.sigmoid(logits)
+      predictions[prediction_key.PredictionKey.LOGISTIC] = math_ops.sigmoid(
+          logits)
       logits = array_ops.concat(1, [array_ops.zeros_like(logits), logits])
-    predictions[PredictionKey.PROBABILITIES] = math_ops.sigmoid(logits)
-    predictions[PredictionKey.CLASSES] = math_ops.to_int64(
+    predictions[prediction_key.PredictionKey.PROBABILITIES] = math_ops.sigmoid(
+        logits)
+    predictions[prediction_key.PredictionKey.CLASSES] = math_ops.to_int64(
         math_ops.greater(logits, 0))
     return predictions
 
@@ -849,23 +867,3 @@ def _streaming_at_threshold(streaming_metrics_fn, threshold):
     return array_ops.squeeze(precision_tensor), update_op
 
   return _streaming_metrics
-
-
-class PredictionKey(object):
-  CLASSES = "classes"
-  PROBABILITIES = "probabilities"
-  LOGITS = "logits"
-  LOGISTIC = "logistic"
-  SCORES = "scores"
-
-
-class MetricKey(object):
-  LOSS = "loss"
-  AUC = "auc"
-  PREDICTION_MEAN = "labels/prediction_mean"
-  LABEL_MEAN = "labels/actual_label_mean"
-  ACCURACY = "accuracy"
-  ACCURACY_BASELINE = "accuracy/baseline_label_mean"
-  ACCURACY_MEAN = "accuracy/threshold_%f_mean"
-  PRECISION_MEAN = "precision/positive_threshold_%f_mean"
-  RECALL_MEAN = "recall/positive_threshold_%f_mean"
diff --git a/tensorflow/contrib/learn/python/learn/estimators/linear.py b/tensorflow/contrib/learn/python/learn/estimators/linear.py
index 4e25b12feb6..8d887f20c5e 100644
--- a/tensorflow/contrib/learn/python/learn/estimators/linear.py
+++ b/tensorflow/contrib/learn/python/learn/estimators/linear.py
@@ -32,6 +32,7 @@ from tensorflow.contrib.learn.python.learn import evaluable
 from tensorflow.contrib.learn.python.learn import trainable
 from tensorflow.contrib.learn.python.learn.estimators import estimator
 from tensorflow.contrib.learn.python.learn.estimators import head as head_lib
+from tensorflow.contrib.learn.python.learn.estimators import prediction_key
 from tensorflow.contrib.learn.python.learn.utils import export
 from tensorflow.contrib.linear_optimizer.python import sdca_optimizer
 from tensorflow.python.framework import dtypes
@@ -267,21 +268,18 @@ class LinearClassifier(evaluable.Evaluable, trainable.Trainable):
   Example:
 
   ```python
-  education = sparse_column_with_hash_bucket(column_name="education",
-                                             hash_bucket_size=1000)
-  occupation = sparse_column_with_hash_bucket(column_name="occupation",
-                                              hash_bucket_size=1000)
+  sparse_column_a = sparse_column_with_hash_bucket(...)
+  sparse_column_b = sparse_column_with_hash_bucket(...)
 
-  education_x_occupation = crossed_column(columns=[education, occupation],
-                                          hash_bucket_size=10000)
+  sparse_feature_a_x_sparse_feature_b = crossed_column(...)
 
   # Estimator using the default optimizer.
   estimator = LinearClassifier(
-      feature_columns=[occupation, education_x_occupation])
+      feature_columns=[sparse_column_a, sparse_feature_a_x_sparse_feature_b])
 
   # Or estimator using the FTRL optimizer with regularization.
   estimator = LinearClassifier(
-      feature_columns=[occupation, education_x_occupation],
+      feature_columns=[sparse_column_a, sparse_feature_a_x_sparse_feature_b],
       optimizer=tf.train.FtrlOptimizer(
         learning_rate=0.1,
         l1_regularization_strength=0.001
@@ -289,7 +287,7 @@ class LinearClassifier(evaluable.Evaluable, trainable.Trainable):
 
   # Or estimator using the SDCAOptimizer.
   estimator = LinearClassifier(
-     feature_columns=[occupation, education_x_occupation],
+     feature_columns=[sparse_column_a, sparse_feature_a_x_sparse_feature_b],
      optimizer=tf.contrib.linear_optimizer.SDCAOptimizer(
        example_id_column='example_id',
        num_loss_partitions=...,
@@ -465,13 +463,16 @@ class LinearClassifier(evaluable.Evaluable, trainable.Trainable):
       as_iterable=False)
   def predict(self, x=None, input_fn=None, batch_size=None, as_iterable=True):
     """Runs inference to determine the predicted class."""
-    preds = self._estimator.predict(x=x, input_fn=input_fn,
-                                    batch_size=batch_size,
-                                    outputs=[head_lib.PredictionKey.CLASSES],
-                                    as_iterable=as_iterable)
+    key = prediction_key.PredictionKey.CLASSES
+    preds = self._estimator.predict(
+        x=x,
+        input_fn=input_fn,
+        batch_size=batch_size,
+        outputs=[key],
+        as_iterable=as_iterable)
     if as_iterable:
-      return _as_iterable(preds, output=head_lib.PredictionKey.CLASSES)
-    return preds[head_lib.PredictionKey.CLASSES]
+      return _as_iterable(preds, output=key)
+    return preds[key]
 
   @deprecated_arg_values(
       estimator.AS_ITERABLE_DATE, estimator.AS_ITERABLE_INSTRUCTIONS,
@@ -479,14 +480,16 @@ class LinearClassifier(evaluable.Evaluable, trainable.Trainable):
   def predict_proba(self, x=None, input_fn=None, batch_size=None, outputs=None,
                     as_iterable=True):
     """Runs inference to determine the class probability predictions."""
-    preds = self._estimator.predict(x=x, input_fn=input_fn,
-                                    batch_size=batch_size,
-                                    outputs=[
-                                        head_lib.PredictionKey.PROBABILITIES],
-                                    as_iterable=as_iterable)
+    key = prediction_key.PredictionKey.PROBABILITIES
+    preds = self._estimator.predict(
+        x=x,
+        input_fn=input_fn,
+        batch_size=batch_size,
+        outputs=[key],
+        as_iterable=as_iterable)
     if as_iterable:
-      return _as_iterable(preds, output=head_lib.PredictionKey.PROBABILITIES)
-    return preds[head_lib.PredictionKey.PROBABILITIES]
+      return _as_iterable(preds, output=key)
+    return preds[key]
 
   def get_variable_names(self):
     return self._estimator.get_variable_names()
@@ -512,9 +515,9 @@ class LinearClassifier(evaluable.Evaluable, trainable.Trainable):
         input_fn=input_fn or default_input_fn,
         input_feature_key=input_feature_key,
         use_deprecated_input_fn=use_deprecated_input_fn,
-        signature_fn=(
-            signature_fn or export.classification_signature_fn_with_prob),
-        prediction_key=head_lib.PredictionKey.PROBABILITIES,
+        signature_fn=(signature_fn or
+                      export.classification_signature_fn_with_prob),
+        prediction_key=prediction_key.PredictionKey.PROBABILITIES,
         default_batch_size=default_batch_size,
         exports_to_keep=exports_to_keep)
 
@@ -561,16 +564,13 @@ class LinearRegressor(evaluable.Evaluable, trainable.Trainable):
   Example:
 
   ```python
-  education = sparse_column_with_hash_bucket(column_name="education",
-                                             hash_bucket_size=1000)
-  occupation = sparse_column_with_hash_bucket(column_name="occupation",
-                                              hash_bucket_size=1000)
+  sparse_column_a = sparse_column_with_hash_bucket(...)
+  sparse_column_b = sparse_column_with_hash_bucket(...)
 
-  education_x_occupation = crossed_column(columns=[education, occupation],
-                                          hash_bucket_size=10000)
+  sparse_feature_a_x_sparse_feature_b = crossed_column(...)
 
   estimator = LinearRegressor(
-      feature_columns=[occupation, education_x_occupation])
+      feature_columns=[sparse_column_a, sparse_feature_a_x_sparse_feature_b])
 
   # Input builders
   def input_fn_train: # returns x, y
@@ -731,13 +731,16 @@ class LinearRegressor(evaluable.Evaluable, trainable.Trainable):
       as_iterable=False)
   def predict(self, x=None, input_fn=None, batch_size=None, as_iterable=True):
     """Runs inference to determine the predicted class."""
-    preds = self._estimator.predict(x=x, input_fn=input_fn,
-                                    batch_size=batch_size,
-                                    outputs=[head_lib.PredictionKey.SCORES],
-                                    as_iterable=as_iterable)
+    key = prediction_key.PredictionKey.SCORES
+    preds = self._estimator.predict(
+        x=x,
+        input_fn=input_fn,
+        batch_size=batch_size,
+        outputs=[key],
+        as_iterable=as_iterable)
     if as_iterable:
-      return _as_iterable(preds, output=head_lib.PredictionKey.SCORES)
-    return preds[head_lib.PredictionKey.SCORES]
+      return _as_iterable(preds, output=key)
+    return preds[key]
 
   def get_variable_names(self):
     return self._estimator.get_variable_names()
@@ -764,7 +767,7 @@ class LinearRegressor(evaluable.Evaluable, trainable.Trainable):
         input_feature_key=input_feature_key,
         use_deprecated_input_fn=use_deprecated_input_fn,
         signature_fn=(signature_fn or export.regression_signature_fn),
-        prediction_key=head_lib.PredictionKey.SCORES,
+        prediction_key=prediction_key.PredictionKey.SCORES,
         default_batch_size=default_batch_size,
         exports_to_keep=exports_to_keep)
 
diff --git a/tensorflow/contrib/learn/python/learn/estimators/metric_key.py b/tensorflow/contrib/learn/python/learn/estimators/metric_key.py
new file mode 100644
index 00000000000..8df08e507fe
--- /dev/null
+++ b/tensorflow/contrib/learn/python/learn/estimators/metric_key.py
@@ -0,0 +1,30 @@
+# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""Enum for metric keys."""
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+
+class MetricKey(object):
+  LOSS = "loss"
+  AUC = "auc"
+  PREDICTION_MEAN = "labels/prediction_mean"
+  LABEL_MEAN = "labels/actual_label_mean"
+  ACCURACY = "accuracy"
+  ACCURACY_BASELINE = "accuracy/baseline_label_mean"
+  ACCURACY_MEAN = "accuracy/threshold_%f_mean"
+  PRECISION_MEAN = "precision/positive_threshold_%f_mean"
+  RECALL_MEAN = "recall/positive_threshold_%f_mean"
diff --git a/tensorflow/contrib/learn/python/learn/estimators/prediction_key.py b/tensorflow/contrib/learn/python/learn/estimators/prediction_key.py
new file mode 100644
index 00000000000..a9c0c329584
--- /dev/null
+++ b/tensorflow/contrib/learn/python/learn/estimators/prediction_key.py
@@ -0,0 +1,26 @@
+# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""Enum for model prediction keys."""
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+
+class PredictionKey(object):
+  CLASSES = "classes"
+  PROBABILITIES = "probabilities"
+  LOGITS = "logits"
+  LOGISTIC = "logistic"
+  SCORES = "scores"
diff --git a/tensorflow/contrib/learn/python/learn/estimators/svm.py b/tensorflow/contrib/learn/python/learn/estimators/svm.py
index 6fd675e1b8b..0af33baeeb5 100644
--- a/tensorflow/contrib/learn/python/learn/estimators/svm.py
+++ b/tensorflow/contrib/learn/python/learn/estimators/svm.py
@@ -30,6 +30,7 @@ from tensorflow.contrib.learn.python.learn import trainable
 from tensorflow.contrib.learn.python.learn.estimators import estimator
 from tensorflow.contrib.learn.python.learn.estimators import head as head_lib
 from tensorflow.contrib.learn.python.learn.estimators import linear
+from tensorflow.contrib.learn.python.learn.estimators import prediction_key
 from tensorflow.contrib.linear_optimizer.python import sdca_optimizer
 
 
@@ -188,13 +189,16 @@ class SVM(trainable.Trainable, evaluable.Evaluable):
       as_iterable=False)
   def predict(self, x=None, input_fn=None, batch_size=None, as_iterable=True):
     """Runs inference to determine the predicted class."""
-    preds = self._estimator.predict(x=x, input_fn=input_fn,
-                                    batch_size=batch_size,
-                                    outputs=[head_lib.PredictionKey.CLASSES],
-                                    as_iterable=as_iterable)
+    key = prediction_key.PredictionKey.CLASSES
+    preds = self._estimator.predict(
+        x=x,
+        input_fn=input_fn,
+        batch_size=batch_size,
+        outputs=[key],
+        as_iterable=as_iterable)
     if as_iterable:
-      return _as_iterable(preds, output=head_lib.PredictionKey.CLASSES)
-    return preds[head_lib.PredictionKey.CLASSES]
+      return _as_iterable(preds, output=key)
+    return preds[key]
 
   @deprecated_arg_values(
       estimator.AS_ITERABLE_DATE, estimator.AS_ITERABLE_INSTRUCTIONS,
@@ -202,14 +206,16 @@ class SVM(trainable.Trainable, evaluable.Evaluable):
   def predict_proba(self, x=None, input_fn=None, batch_size=None, outputs=None,
                     as_iterable=True):
     """Runs inference to determine the class probability predictions."""
-    preds = self._estimator.predict(x=x, input_fn=input_fn,
-                                    batch_size=batch_size,
-                                    outputs=[
-                                        head_lib.PredictionKey.PROBABILITIES],
-                                    as_iterable=as_iterable)
+    key = prediction_key.PredictionKey.PROBABILITIES
+    preds = self._estimator.predict(
+        x=x,
+        input_fn=input_fn,
+        batch_size=batch_size,
+        outputs=[key],
+        as_iterable=as_iterable)
     if as_iterable:
-      return _as_iterable(preds, output=head_lib.PredictionKey.PROBABILITIES)
-    return preds[head_lib.PredictionKey.PROBABILITIES]
+      return _as_iterable(preds, output=key)
+    return preds[key]
   # pylint: enable=protected-access
 
   def get_variable_names(self):
diff --git a/tensorflow/contrib/learn/python/learn/graph_actions.py b/tensorflow/contrib/learn/python/learn/graph_actions.py
index 0c5152b553f..baee707a5f6 100644
--- a/tensorflow/contrib/learn/python/learn/graph_actions.py
+++ b/tensorflow/contrib/learn/python/learn/graph_actions.py
@@ -40,6 +40,7 @@ from tensorflow.python.framework import ops
 from tensorflow.python.ops import control_flow_ops
 from tensorflow.python.ops import data_flow_ops
 from tensorflow.python.ops import logging_ops
+from tensorflow.python.ops import resources
 from tensorflow.python.ops import variables
 from tensorflow.python.platform import tf_logging as logging
 from tensorflow.python.training import basic_session_run_hooks
@@ -77,7 +78,8 @@ def get_summary_writer(logdir):
 
 
 def _make_saver(graph, keep_checkpoint_max=5):
-  vars_to_save = graph.get_collection(ops.GraphKeys.VARIABLES)
+  vars_to_save = (graph.get_collection(ops.GraphKeys.VARIABLES) +
+                  graph.get_collection(ops.GraphKeys.SAVEABLE_OBJECTS))
   if vars_to_save:
     return tf_saver.Saver(vars_to_save,
                           sharded=True,
@@ -846,9 +848,11 @@ def run_feeds_iter(output_dict, feed_dicts, restore_checkpoint_path=None):
     raise ValueError('feed_dicts is invalid: %s.' % feed_dicts)
 
   graph = contrib_ops.get_graph_from_inputs(output_dict.values())
-
   with graph.as_default() as g:
     with tf_session.Session('') as session:
+      session.run(
+          resources.initialize_resources(resources.shared_resources() +
+                                         resources.local_resources()))
       if restore_checkpoint_path:
         _restore_from_checkpoint(session, g, restore_checkpoint_path)
       else:
diff --git a/tensorflow/contrib/learn/python/learn/graph_actions_test.py b/tensorflow/contrib/learn/python/learn/graph_actions_test.py
index 9a7306ad4ad..c8c73d5de52 100644
--- a/tensorflow/contrib/learn/python/learn/graph_actions_test.py
+++ b/tensorflow/contrib/learn/python/learn/graph_actions_test.py
@@ -28,6 +28,8 @@ from tensorflow.contrib.learn.python import learn
 from tensorflow.contrib.learn.python.learn.monitors import BaseMonitor
 from tensorflow.python.framework import meta_graph
 from tensorflow.python.framework import ops
+from tensorflow.python.framework import test_ops
+from tensorflow.python.ops import resources
 from tensorflow.python.ops import variables
 
 
@@ -194,6 +196,19 @@ class GraphActionsTest(tf.test.TestCase):
         pass
       self.assertTrue(request_stop.called)
 
+  def test_run_feeds_iter_calls_resources_init(self):
+    with tf.Graph().as_default() as g:
+      in0, _, _ = self._build_inference_graph()
+      handle = test_ops.stub_resource_handle_op(container='a', shared_name='b')
+      resources.register_resource(
+          handle=handle,
+          create_op=test_ops.resource_create_op(handle),
+          is_initialized_op=test_ops.resource_initialized_op(handle))
+
+      for _ in learn.graph_actions.run_feeds_iter({'in0': in0},
+                                                  feed_dicts=[{}]):
+        self.assertTrue(test_ops.resource_initialized_op(handle).eval())
+
   def test_infer_different_default_graph(self):
     with self.test_session():
       self._assert_ckpt(self._output_dir, False)
diff --git a/tensorflow/contrib/makefile/README.md b/tensorflow/contrib/makefile/README.md
index 25c03d8bf74..03a745ad4c3 100644
--- a/tensorflow/contrib/makefile/README.md
+++ b/tensorflow/contrib/makefile/README.md
@@ -1,7 +1,8 @@
 ### TensorFlow Makefile
 
 The recommended way to build TensorFlow from source is using the Bazel
-open-source build system. Sometimes this isn't possible.
+open-source build system. Sometimes this isn't possible. For example,
+if you are building for iOS, you currently need to use the Makefile.
 
  - The build system may not have the RAM or processing power to support Bazel.
  - Bazel or its dependencies may not be available.
diff --git a/tensorflow/contrib/makefile/tf_op_files.txt b/tensorflow/contrib/makefile/tf_op_files.txt
index ed5d6539b3b..cbf73a7a16f 100644
--- a/tensorflow/contrib/makefile/tf_op_files.txt
+++ b/tensorflow/contrib/makefile/tf_op_files.txt
@@ -43,6 +43,13 @@ tensorflow/core/kernels/sequence_ops.cc
 tensorflow/core/kernels/sendrecv_ops.cc
 tensorflow/core/kernels/scatter_op.cc
 tensorflow/core/kernels/scatter_functor.cc
+tensorflow/core/kernels/scatter_nd_op_cpu_impl_0.cc
+tensorflow/core/kernels/scatter_nd_op_cpu_impl_1.cc
+tensorflow/core/kernels/scatter_nd_op_cpu_impl_2.cc
+tensorflow/core/kernels/scatter_nd_op_cpu_impl_3.cc
+tensorflow/core/kernels/scatter_nd_op_cpu_impl_4.cc
+tensorflow/core/kernels/scatter_nd_op_cpu_impl_5.cc
+tensorflow/core/kernels/scatter_nd_op.cc
 tensorflow/core/kernels/save_restore_tensor.cc
 tensorflow/core/kernels/save_restore_v2_ops.cc
 tensorflow/core/kernels/save_op.cc
diff --git a/tensorflow/contrib/metrics/python/ops/metric_ops.py b/tensorflow/contrib/metrics/python/ops/metric_ops.py
index 34017c22931..90b56b6a971 100644
--- a/tensorflow/contrib/metrics/python/ops/metric_ops.py
+++ b/tensorflow/contrib/metrics/python/ops/metric_ops.py
@@ -763,7 +763,12 @@ def streaming_auc(predictions, labels, weights=None, num_thresholds=200,
   computes the area under a discretized curve of precision versus recall values
   (computed using the aforementioned variables). The `num_thresholds` variable
   controls the degree of discretization with larger numbers of thresholds more
-  closely approximating the true AUC.
+  closely approximating the true AUC. The quality of the approximation may vary
+  dramatically depending on `num_thresholds`.
+
+  For best results, `predictions` should be distributed approximately uniformly
+  in the range [0, 1] and not peaked around 0 or 1. The quality of the AUC
+  approximation may be poor if this is not the case.
 
   For estimation of the metric over a stream of data, the function creates an
   `update_op` operation that updates these variables and returns the `auc`.
diff --git a/tensorflow/contrib/training/BUILD b/tensorflow/contrib/training/BUILD
index ff462bdee3c..9c116266a2d 100644
--- a/tensorflow/contrib/training/BUILD
+++ b/tensorflow/contrib/training/BUILD
@@ -15,9 +15,16 @@ py_library(
         "python/training/resample.py",
         "python/training/sampling_ops.py",
         "python/training/sequence_queueing_state_saver.py",
+        "python/training/training.py",
     ],
     srcs_version = "PY2AND3",
     visibility = ["//visibility:public"],
+    deps = [
+        "//tensorflow/python:framework",
+        "//tensorflow/python:ops",
+        "//tensorflow/python:platform",
+        "//tensorflow/python:training",
+    ],
 )
 
 py_test(
@@ -98,6 +105,19 @@ py_test(
     ],
 )
 
+py_test(
+    name = "training_test",
+    size = "large",
+    srcs = ["python/training/training_test.py"],
+    shard_count = 3,
+    srcs_version = "PY2AND3",
+    deps = [
+        ":training_py",
+        "//tensorflow:tensorflow_py",
+        "//tensorflow/python:framework_test_lib",
+    ],
+)
+
 filegroup(
     name = "all_files",
     srcs = glob(
diff --git a/tensorflow/contrib/training/__init__.py b/tensorflow/contrib/training/__init__.py
index d2a6368d785..721f8cdf750 100644
--- a/tensorflow/contrib/training/__init__.py
+++ b/tensorflow/contrib/training/__init__.py
@@ -70,6 +70,11 @@ from tensorflow.contrib.training.python.training.bucket_ops import *
 from tensorflow.contrib.training.python.training.resample import *
 from tensorflow.contrib.training.python.training.sampling_ops import *
 from tensorflow.contrib.training.python.training.sequence_queueing_state_saver import *
+from tensorflow.contrib.training.python.training.training import add_gradients_summaries
+from tensorflow.contrib.training.python.training.training import clip_gradient_norms
+from tensorflow.contrib.training.python.training.training import create_train_op
+from tensorflow.contrib.training.python.training.training import multiply_gradients
+from tensorflow.contrib.training.python.training.training import train
 from tensorflow.python.util.all_util import make_all
 
 __all__ = make_all(__name__)
diff --git a/tensorflow/contrib/training/python/training/training.py b/tensorflow/contrib/training/python/training/training.py
new file mode 100644
index 00000000000..e65ef6ba119
--- /dev/null
+++ b/tensorflow/contrib/training/python/training/training.py
@@ -0,0 +1,316 @@
+# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""Contains various routines and helper functions for training models.
+
+TODO(nsilberman): Port documentation.
+"""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+from tensorflow.contrib.framework.python.ops import variables
+from tensorflow.python import summary
+from tensorflow.python.framework import constant_op
+from tensorflow.python.framework import ops
+from tensorflow.python.ops import array_ops
+from tensorflow.python.ops import clip_ops
+from tensorflow.python.ops import control_flow_ops
+from tensorflow.python.ops import variables as tf_variables
+from tensorflow.python.platform import tf_logging as logging
+from tensorflow.python.training import basic_session_run_hooks
+from tensorflow.python.training import monitored_session
+from tensorflow.python.training import optimizer as tf_optimizer
+
+# TODO(nsilberman): move add_gradients_summaries, clip_gradient_norms and
+# multiply_gradients into contrib/summaries and contrib/optimizers.py
+__all__ = [
+    'add_gradients_summaries',
+    'clip_gradient_norms',
+    'create_train_op',
+    'multiply_gradients',
+    'train',
+]
+
+
+def add_gradients_summaries(grads_and_vars):
+  """Add summaries to gradients.
+
+  Args:
+    grads_and_vars: A list of gradient to variable pairs (tuples).
+
+  Returns:
+    The list of created summaries.
+  """
+  summaries = []
+  for grad, var in grads_and_vars:
+    if grad is not None:
+      if isinstance(grad, ops.IndexedSlices):
+        grad_values = grad.values
+      else:
+        grad_values = grad
+      summaries.append(summary.histogram_summary(
+          var.op.name + ':gradient', grad_values))
+      summaries.append(summary.histogram_summary(
+          var.op.name + ':gradient_norm', clip_ops.global_norm([grad_values])))
+    else:
+      logging.info('Var %s has no gradient', var.op.name)
+
+  return summaries
+
+
+def clip_gradient_norms(gradients_to_variables, max_norm):
+  """Clips the gradients by the given value.
+
+  Args:
+    gradients_to_variables: A list of gradient to variable pairs (tuples).
+    max_norm: the maximum norm value.
+
+  Returns:
+    A list of clipped gradient to variable pairs.
+  """
+  clipped_grads_and_vars = []
+  for grad, var in gradients_to_variables:
+    if grad is not None:
+      if isinstance(grad, ops.IndexedSlices):
+        tmp = clip_ops.clip_by_norm(grad.values, max_norm)
+        grad = ops.IndexedSlices(tmp, grad.indices, grad.dense_shape)
+      else:
+        grad = clip_ops.clip_by_norm(grad, max_norm)
+    clipped_grads_and_vars.append((grad, var))
+  return clipped_grads_and_vars
+
+
+def multiply_gradients(grads_and_vars, gradient_multipliers):
+  """Multiply specified gradients.
+
+  Args:
+    grads_and_vars: A list of gradient to variable pairs (tuples).
+    gradient_multipliers: A map from either `Variables` or `Variable` op names
+      to the coefficient by which the associated gradient should be scaled.
+
+  Returns:
+    The updated list of gradient to variable pairs.
+
+  Raises:
+    ValueError: If `grads_and_vars` is not a list or if `gradient_multipliers`
+    is empty or None or if `gradient_multipliers` is not a dictionary.
+  """
+  if not isinstance(grads_and_vars, list):
+    raise ValueError('`grads_and_vars` must be a list.')
+  if not gradient_multipliers:
+    raise ValueError('`gradient_multipliers` is empty.')
+  if not isinstance(gradient_multipliers, dict):
+    raise ValueError('`gradient_multipliers` must be a dict.')
+
+  multiplied_grads_and_vars = []
+  for grad, var in grads_and_vars:
+    if var in gradient_multipliers or var.op.name in gradient_multipliers:
+      key = var if var in gradient_multipliers else var.op.name
+      if grad is None:
+        raise ValueError('Requested multiple of `None` gradient.')
+
+      if isinstance(grad, ops.IndexedSlices):
+        tmp = grad.values * constant_op.constant(
+            gradient_multipliers[key], dtype=grad.dtype)
+        grad = ops.IndexedSlices(tmp, grad.indices, grad.dense_shape)
+      else:
+        grad *= constant_op.constant(
+            gradient_multipliers[key], dtype=grad.dtype)
+    multiplied_grads_and_vars.append((grad, var))
+  return multiplied_grads_and_vars
+
+
+def create_train_op(total_loss,
+                    optimizer,
+                    global_step=None,
+                    update_ops=None,
+                    variables_to_train=None,
+                    transform_grads_fn=None,
+                    summarize_gradients=False,
+                    gate_gradients=tf_optimizer.Optimizer.GATE_OP,
+                    aggregation_method=None,
+                    colocate_gradients_with_ops=False):
+  """Creates an `Operation` that evaluates the gradients and returns the loss.
+
+  Args:
+    total_loss: A `Tensor` representing the total loss.
+    optimizer: A tf.Optimizer to use for computing the gradients.
+    global_step: A `Tensor` representing the global step variable. If left as
+      `None`, then slim.variables.global_step() is used.
+    update_ops: An optional list of updates to execute. If `update_ops` is
+      `None`, then the update ops are set to the contents of the
+      `tf.GraphKeys.UPDATE_OPS` collection. If `update_ops` is not `None`, but
+      it doesn't contain all of the update ops in `tf.GraphKeys.UPDATE_OPS`,
+      a warning will be displayed.
+    variables_to_train: an optional list of variables to train. If None, it will
+      default to all tf.trainable_variables().
+    transform_grads_fn: A function which takes a single argument, a list of
+      gradient to variable pairs (tuples), performs any requested gradient
+      updates, such as gradient clipping or multipliers, and returns the updated
+      list.
+    summarize_gradients: Whether or not add summaries for each gradient.
+    gate_gradients: How to gate the computation of gradients. See tf.Optimizer.
+    aggregation_method: Specifies the method used to combine gradient terms.
+      Valid values are defined in the class `AggregationMethod`.
+    colocate_gradients_with_ops: Whether or not to try colocating the gradients
+      with the ops that generated them.
+
+  Returns:
+    A `Tensor` that when evaluated, computes the gradients and returns the total
+      loss value.
+  """
+  if global_step is None:
+    global_step = variables.get_or_create_global_step()
+
+  # Update ops use GraphKeys.UPDATE_OPS collection if update_ops is None.
+  global_update_ops = set(ops.get_collection(ops.GraphKeys.UPDATE_OPS))
+  if update_ops is None:
+    update_ops = global_update_ops
+  else:
+    update_ops = set(update_ops)
+  if not global_update_ops.issubset(update_ops):
+    logging.warning('update_ops in create_train_op does not contain all the '
+                    ' update_ops in GraphKeys.UPDATE_OPS')
+
+  # Make sure update_ops are computed before total_loss.
+  if update_ops:
+    with ops.control_dependencies(update_ops):
+      barrier = control_flow_ops.no_op(name='update_barrier')
+    total_loss = control_flow_ops.with_dependencies([barrier], total_loss)
+
+  if variables_to_train is None:
+    # Default to tf.trainable_variables()
+    variables_to_train = tf_variables.trainable_variables()
+  else:
+    # Make sure that variables_to_train are in tf.trainable_variables()
+    for v in variables_to_train:
+      assert v in tf_variables.trainable_variables()
+
+  assert variables_to_train
+
+  # Create the gradients. Note that apply_gradients adds the gradient
+  # computation to the current graph.
+  grads = optimizer.compute_gradients(
+      total_loss,
+      variables_to_train,
+      gate_gradients=gate_gradients,
+      aggregation_method=aggregation_method,
+      colocate_gradients_with_ops=colocate_gradients_with_ops)
+
+  if transform_grads_fn:
+    grads = transform_grads_fn(grads)
+
+  # Summarize gradients.
+  if summarize_gradients:
+    with ops.name_scope('summarize_grads'):
+      add_gradients_summaries(grads)
+
+  # Create gradient updates.
+  grad_updates = optimizer.apply_gradients(grads, global_step=global_step)
+
+  with ops.name_scope('train_op'):
+    # Make sure total_loss is valid.
+    total_loss = array_ops.check_numerics(total_loss,
+                                          'LossTensor is inf or nan')
+
+    # Ensure the train_tensor computes grad_updates.
+    return control_flow_ops.with_dependencies([grad_updates], total_loss)
+
+
+def train(
+    train_op,
+    logdir,
+    master='',
+    is_chief=True,
+    scaffold=None,
+    hooks=None,
+    chief_only_hooks=None,
+    save_checkpoint_secs=600,
+    save_summaries_steps=100,
+    config=None):
+  """Runs the training loop.
+
+  Args:
+    train_op: A `Tensor` that, when executed, will apply the gradients and
+      return the loss value.
+    logdir: The directory where the graph and checkpoints are saved.
+    master: The URL of the master.
+    is_chief: Specifies whether or not the training is being run by the primary
+      replica during replica training.
+    scaffold: An tf.train.Scaffold instance.
+    hooks: List of `tf.train.SessionRunHook` callbacks which are run inside the
+      training loop.
+    chief_only_hooks: List of `tf.train.SessionRunHook` instances which are run
+      inside the training loop for the chief trainer only.
+    save_checkpoint_secs: The frequency, in seconds, that a checkpoint is saved
+      using a default checkpoint saver. If `save_checkpoint_secs` is set to
+      `None`, then the default checkpoint saver isn't used.
+    save_summaries_steps: The frequency, in number of global steps, that the
+      summaries are written to disk using a default summary saver. If
+      `save_summaries_steps` is set to `None`, then the default summary saver
+      isn't used.
+    config: An instance of `tf.ConfigProto`.
+
+  Returns:
+    the value of the loss function after training.
+
+  Raises:
+    ValueError: if `logdir` is `None` and either `save_checkpoint_secs` or
+    `save_summaries_steps` are `None.
+  """
+  # TODO(nsilberman): move this logic into monitored_session.py
+  scaffold = scaffold or monitored_session.Scaffold()
+
+  hooks = hooks or []
+
+  if is_chief:
+    session_creator = monitored_session.ChiefSessionCreator(
+        scaffold=scaffold,
+        checkpoint_dir=logdir,
+        master=master,
+        config=config)
+
+    if chief_only_hooks:
+      hooks.extend(chief_only_hooks)
+
+    hooks.append(basic_session_run_hooks.StepCounterHook(
+        output_dir=logdir))
+
+    if save_summaries_steps:
+      if logdir is None:
+        raise ValueError(
+            'logdir cannot be None when save_summaries_steps is None')
+      hooks.append(basic_session_run_hooks.SummarySaverHook(
+          scaffold=scaffold,
+          save_steps=save_summaries_steps,
+          output_dir=logdir))
+
+    if save_checkpoint_secs:
+      if logdir is None:
+        raise ValueError(
+            'logdir cannot be None when save_checkpoint_secs is None')
+      hooks.append(basic_session_run_hooks.CheckpointSaverHook(
+          logdir, save_secs=save_checkpoint_secs, scaffold=scaffold))
+  else:
+    session_creator = monitored_session.WorkerSessionCreator(
+        scaffold=scaffold, master=master, config=config)
+
+  with monitored_session.MonitoredSession(
+      session_creator=session_creator, hooks=hooks) as session:
+    loss = None
+    while not session.should_stop():
+      loss = session.run(train_op)
+  return loss
diff --git a/tensorflow/contrib/training/python/training/training_test.py b/tensorflow/contrib/training/python/training/training_test.py
new file mode 100644
index 00000000000..81de828a803
--- /dev/null
+++ b/tensorflow/contrib/training/python/training/training_test.py
@@ -0,0 +1,514 @@
+# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""Tests for tf.contrib.training.training."""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import os
+
+import numpy as np
+import tensorflow as tf
+
+
+def logistic_classifier(inputs):
+  return tf.contrib.layers.fully_connected(
+      inputs, 1, activation_fn=tf.sigmoid)
+
+
+def batchnorm_classifier(inputs):
+  inputs = tf.contrib.layers.batch_norm(inputs, decay=0.1)
+  return tf.contrib.layers.fully_connected(inputs, 1, activation_fn=tf.sigmoid)
+
+
+class CreateTrainOpTest(tf.test.TestCase):
+
+  def setUp(self):
+    np.random.seed(0)
+
+    # Create an easy training set:
+    self._inputs = np.random.rand(16, 4).astype(np.float32)
+    self._labels = np.random.randint(0, 2, size=(16, 1)).astype(np.float32)
+
+  def testUseUpdateOps(self):
+    with tf.Graph().as_default():
+      tf.set_random_seed(0)
+      tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
+      tf_labels = tf.constant(self._labels, dtype=tf.float32)
+
+      expected_mean = np.mean(self._inputs, axis=(0))
+      expected_var = np.var(self._inputs, axis=(0))
+
+      tf_predictions = batchnorm_classifier(tf_inputs)
+      tf.contrib.losses.log_loss(tf_predictions, tf_labels)
+      total_loss = tf.contrib.losses.get_total_loss()
+      optimizer = tf.train.GradientDescentOptimizer(learning_rate=1.0)
+
+      train_op = tf.contrib.training.create_train_op(total_loss, optimizer)
+
+      moving_mean = tf.contrib.framework.get_variables_by_name('moving_mean')[0]
+      moving_variance = tf.contrib.framework.get_variables_by_name(
+          'moving_variance')[0]
+
+      with tf.Session() as sess:
+        # Initialize all variables
+        sess.run(tf.initialize_all_variables())
+        mean, variance = sess.run([moving_mean, moving_variance])
+        # After initialization moving_mean == 0 and moving_variance == 1.
+        self.assertAllClose(mean, [0] * 4)
+        self.assertAllClose(variance, [1] * 4)
+
+        for _ in range(10):
+          sess.run([train_op])
+        mean = moving_mean.eval()
+        variance = moving_variance.eval()
+        # After 10 updates with decay 0.1 moving_mean == expected_mean and
+        # moving_variance == expected_var.
+        self.assertAllClose(mean, expected_mean)
+        self.assertAllClose(variance, expected_var)
+
+  def testEmptyUpdateOps(self):
+    with tf.Graph().as_default():
+      tf.set_random_seed(0)
+      tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
+      tf_labels = tf.constant(self._labels, dtype=tf.float32)
+
+      tf_predictions = batchnorm_classifier(tf_inputs)
+      tf.contrib.losses.log_loss(tf_predictions, tf_labels)
+      total_loss = tf.contrib.losses.get_total_loss()
+      optimizer = tf.train.GradientDescentOptimizer(learning_rate=1.0)
+
+      train_op = tf.contrib.training.create_train_op(
+          total_loss, optimizer, update_ops=[])
+
+      moving_mean = tf.contrib.framework.get_variables_by_name('moving_mean')[0]
+      moving_variance = tf.contrib.framework.get_variables_by_name(
+          'moving_variance')[0]
+
+      with tf.Session() as sess:
+        # Initialize all variables
+        sess.run(tf.initialize_all_variables())
+        mean, variance = sess.run([moving_mean, moving_variance])
+        # After initialization moving_mean == 0 and moving_variance == 1.
+        self.assertAllClose(mean, [0] * 4)
+        self.assertAllClose(variance, [1] * 4)
+
+        for _ in range(10):
+          sess.run([train_op])
+        mean = moving_mean.eval()
+        variance = moving_variance.eval()
+
+        # Since we skip update_ops the moving_vars are not updated.
+        self.assertAllClose(mean, [0] * 4)
+        self.assertAllClose(variance, [1] * 4)
+
+
+class TrainBNClassifierTest(tf.test.TestCase):
+
+  def setUp(self):
+    # Create an easy training set:
+    np.random.seed(0)
+
+    self._inputs = np.zeros((16, 4))
+    self._labels = np.random.randint(0, 2, size=(16, 1)).astype(np.float32)
+    self._logdir = os.path.join(self.get_temp_dir(), 'tmp_bnlogs/')
+
+    for i in range(16):
+      j = int(2 * self._labels[i] + np.random.randint(0, 2))
+      self._inputs[i, j] = 1
+
+  def testTrainWithNoInitAssignCanAchieveZeroLoss(self):
+    g = tf.Graph()
+    with g.as_default():
+      tf.set_random_seed(0)
+      tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
+      tf_labels = tf.constant(self._labels, dtype=tf.float32)
+
+      tf_predictions = batchnorm_classifier(tf_inputs)
+      tf.contrib.losses.log_loss(tf_predictions, tf_labels)
+      total_loss = tf.contrib.losses.get_total_loss()
+
+      optimizer = tf.train.GradientDescentOptimizer(learning_rate=1.0)
+
+      train_op = tf.contrib.training.create_train_op(
+          total_loss, optimizer)
+
+      loss = tf.contrib.training.train(
+          train_op, self._logdir, hooks=[
+              tf.train.StopAtStepHook(num_steps=300)
+          ])
+      self.assertLess(loss, .1)
+
+
+class TrainTest(tf.test.TestCase):
+
+  def setUp(self):
+    # Create an easy training set:
+    np.random.seed(0)
+
+    self._inputs = np.zeros((16, 4))
+    self._labels = np.random.randint(0, 2, size=(16, 1)).astype(np.float32)
+
+    for i in range(16):
+      j = int(2 * self._labels[i] + np.random.randint(0, 2))
+      self._inputs[i, j] = 1
+
+  def testCanAchieveZeroLoss(self):
+    logdir = os.path.join(self.get_temp_dir(), 'can_achieve_zero_loss')
+
+    with tf.Graph().as_default():
+      tf.set_random_seed(0)
+      tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
+      tf_labels = tf.constant(self._labels, dtype=tf.float32)
+
+      tf_predictions = logistic_classifier(tf_inputs)
+      tf.contrib.losses.log_loss(tf_predictions, tf_labels)
+      total_loss = tf.contrib.losses.get_total_loss()
+
+      optimizer = tf.train.GradientDescentOptimizer(learning_rate=1.0)
+
+      train_op = tf.contrib.training.create_train_op(total_loss, optimizer)
+
+      loss = tf.contrib.training.train(
+          train_op, logdir, hooks=[
+              tf.train.StopAtStepHook(num_steps=300)
+          ])
+      self.assertIsNotNone(loss)
+      self.assertLess(loss, .015)
+
+  def testTrainWithLocalVariable(self):
+    logdir = os.path.join(self.get_temp_dir(), 'train_with_local_variable')
+
+    with tf.Graph().as_default():
+      tf.set_random_seed(0)
+      tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
+      tf_labels = tf.constant(self._labels, dtype=tf.float32)
+
+      local_multiplier = tf.contrib.framework.local_variable(1.0)
+
+      tf_predictions = logistic_classifier(tf_inputs) * local_multiplier
+      tf.contrib.losses.log_loss(tf_predictions, tf_labels)
+      total_loss = tf.contrib.losses.get_total_loss()
+
+      optimizer = tf.train.GradientDescentOptimizer(learning_rate=1.0)
+
+      train_op = tf.contrib.training.create_train_op(
+          total_loss, optimizer)
+
+      loss = tf.contrib.training.train(
+          train_op, logdir, hooks=[
+              tf.train.StopAtStepHook(num_steps=300)
+          ])
+      self.assertIsNotNone(loss)
+      self.assertLess(loss, .015)
+
+  def testResumeTrainAchievesRoughlyTheSameLoss(self):
+    number_of_steps = [300, 1, 5]
+    logdir = os.path.join(self.get_temp_dir(), 'resume_train_same_loss')
+
+    for i in range(len(number_of_steps)):
+      with tf.Graph().as_default():
+        tf.set_random_seed(i)
+        tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
+        tf_labels = tf.constant(self._labels, dtype=tf.float32)
+
+        tf_predictions = logistic_classifier(tf_inputs)
+        tf.contrib.losses.log_loss(tf_predictions, tf_labels)
+        total_loss = tf.contrib.losses.get_total_loss()
+
+        optimizer = tf.train.GradientDescentOptimizer(learning_rate=1.0)
+
+        train_op = tf.contrib.training.create_train_op(
+            total_loss, optimizer)
+
+        saver = tf.train.Saver()
+
+        loss = tf.contrib.training.train(
+            train_op, logdir, hooks=[
+                tf.train.StopAtStepHook(num_steps=number_of_steps[i]),
+                tf.train.CheckpointSaverHook(
+                    logdir, save_steps=50, saver=saver),
+            ])
+        self.assertIsNotNone(loss)
+        self.assertLess(loss, .015)
+
+  def create_train_op(self, learning_rate=1.0, gradient_multiplier=1.0):
+    tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
+    tf_labels = tf.constant(self._labels, dtype=tf.float32)
+
+    tf_predictions = logistic_classifier(tf_inputs)
+    tf.contrib.losses.log_loss(tf_predictions, tf_labels)
+    total_loss = tf.contrib.losses.get_total_loss()
+
+    optimizer = tf.train.GradientDescentOptimizer(
+        learning_rate=learning_rate)
+
+    def transform_grads_fn(grads):
+      if gradient_multiplier != 1.0:
+        variables = tf.trainable_variables()
+        gradient_multipliers = {var: gradient_multiplier for var in variables}
+
+        with tf.name_scope('multiply_grads'):
+          return tf.contrib.training.multiply_gradients(
+              grads, gradient_multipliers)
+      else:
+        return grads
+
+    return tf.contrib.training.create_train_op(
+        total_loss, optimizer, transform_grads_fn=transform_grads_fn)
+
+  def testTrainWithInitFromCheckpoint(self):
+    logdir1 = os.path.join(self.get_temp_dir(), 'tmp_logs1/')
+    logdir2 = os.path.join(self.get_temp_dir(), 'tmp_logs2/')
+
+    if tf.gfile.Exists(logdir1):  # For running on jenkins.
+      tf.gfile.DeleteRecursively(logdir1)
+    if tf.gfile.Exists(logdir2):  # For running on jenkins.
+      tf.gfile.DeleteRecursively(logdir2)
+
+    # First, train the model one step (make sure the error is high).
+    with tf.Graph().as_default():
+      tf.set_random_seed(0)
+      train_op = self.create_train_op()
+      saver = tf.train.Saver()
+      loss = tf.contrib.training.train(
+          train_op, logdir1, hooks=[
+              tf.train.CheckpointSaverHook(logdir1, save_steps=1, saver=saver),
+              tf.train.StopAtStepHook(num_steps=1),
+          ], save_checkpoint_secs=None)
+      self.assertGreater(loss, .5)
+
+    # Next, train the model to convergence.
+    with tf.Graph().as_default():
+      tf.set_random_seed(1)
+      train_op = self.create_train_op()
+      saver = tf.train.Saver()
+      loss = tf.contrib.training.train(
+          train_op, logdir1, hooks=[
+              tf.train.CheckpointSaverHook(logdir1, save_steps=1, saver=saver),
+              tf.train.StopAtStepHook(num_steps=300),
+          ], save_checkpoint_secs=None)
+      self.assertIsNotNone(loss)
+      self.assertLess(loss, .02)
+
+    # Finally, advance the model a single step and validate that the loss is
+    # still low.
+    with tf.Graph().as_default():
+      tf.set_random_seed(2)
+      train_op = self.create_train_op()
+
+      model_variables = tf.all_variables()
+      model_path = os.path.join(logdir1, 'model.ckpt-300')
+
+      assign_fn = tf.contrib.framework.assign_from_checkpoint_fn(
+          model_path, model_variables)
+      def init_fn(_, session):
+        assign_fn(session)
+
+      loss = tf.contrib.training.train(
+          train_op,
+          logdir2,
+          scaffold=tf.train.Scaffold(init_fn=init_fn),
+          hooks=[tf.train.StopAtStepHook(num_steps=1)])
+
+      self.assertIsNotNone(loss)
+      self.assertLess(loss, .02)
+
+  def ModelLoss(self):
+    tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
+    tf_labels = tf.constant(self._labels, dtype=tf.float32)
+
+    tf_predictions = logistic_classifier(tf_inputs)
+    tf.contrib.losses.log_loss(tf_predictions, tf_labels)
+    return tf.contrib.losses.get_total_loss()
+
+  def testTrainAllVarsHasLowerLossThanTrainSubsetOfVars(self):
+    logdir = os.path.join(self.get_temp_dir(), 'tmp_logs3/')
+    if tf.gfile.Exists(logdir):  # For running on jenkins.
+      tf.gfile.DeleteRecursively(logdir)
+
+    # First, train only the weights of the model.
+    with tf.Graph().as_default():
+      tf.set_random_seed(0)
+      total_loss = self.ModelLoss()
+      optimizer = tf.train.GradientDescentOptimizer(learning_rate=1.0)
+      weights = tf.contrib.framework.get_variables_by_name('weights')
+
+      train_op = tf.contrib.training.create_train_op(
+          total_loss,
+          optimizer,
+          variables_to_train=weights)
+
+      saver = tf.train.Saver()
+      loss = tf.contrib.training.train(
+          train_op, logdir, hooks=[
+              tf.train.CheckpointSaverHook(logdir, save_steps=1, saver=saver),
+              tf.train.StopAtStepHook(num_steps=200),
+          ])
+      self.assertGreater(loss, .015)
+      self.assertLess(loss, .05)
+
+    # Next, train the biases of the model.
+    with tf.Graph().as_default():
+      tf.set_random_seed(1)
+      total_loss = self.ModelLoss()
+      optimizer = tf.train.GradientDescentOptimizer(learning_rate=1.0)
+      biases = tf.contrib.framework.get_variables_by_name('biases')
+
+      train_op = tf.contrib.training.create_train_op(
+          total_loss,
+          optimizer,
+          variables_to_train=biases)
+
+      saver = tf.train.Saver()
+      loss = tf.contrib.training.train(
+          train_op, logdir, hooks=[
+              tf.train.CheckpointSaverHook(logdir, save_steps=1, saver=saver),
+              tf.train.StopAtStepHook(num_steps=300),
+          ])
+      self.assertGreater(loss, .015)
+      self.assertLess(loss, .05)
+
+    # Finally, train both weights and bias to get lower loss.
+    with tf.Graph().as_default():
+      tf.set_random_seed(2)
+      total_loss = self.ModelLoss()
+      optimizer = tf.train.GradientDescentOptimizer(learning_rate=1.0)
+
+      train_op = tf.contrib.training.create_train_op(total_loss, optimizer)
+      saver = tf.train.Saver()
+      loss = tf.contrib.training.train(
+          train_op, logdir, hooks=[
+              tf.train.CheckpointSaverHook(logdir, save_steps=1, saver=saver),
+              tf.train.StopAtStepHook(num_steps=400),
+          ])
+      self.assertIsNotNone(loss)
+      self.assertLess(loss, .015)
+
+  def testTrainingSubsetsOfVariablesOnlyUpdatesThoseVariables(self):
+    # First, train only the weights of the model.
+    with tf.Graph().as_default():
+      tf.set_random_seed(0)
+      total_loss = self.ModelLoss()
+      optimizer = tf.train.GradientDescentOptimizer(learning_rate=1.0)
+      weights, biases = tf.contrib.framework.get_variables()
+
+      train_op = tf.contrib.training.create_train_op(total_loss, optimizer)
+      train_weights = tf.contrib.training.create_train_op(
+          total_loss, optimizer, variables_to_train=[weights])
+      train_biases = tf.contrib.training.create_train_op(
+          total_loss, optimizer, variables_to_train=[biases])
+
+      with tf.Session() as sess:
+        # Initialize the variables.
+        sess.run(tf.initialize_all_variables())
+
+        # Get the intial weights and biases values.
+        weights_values, biases_values = sess.run([weights, biases])
+        self.assertGreater(np.linalg.norm(weights_values), 0)
+        self.assertAlmostEqual(np.linalg.norm(biases_values), 0)
+
+        # Update weights and biases.
+        loss = sess.run(train_op)
+        self.assertGreater(loss, .5)
+        new_weights, new_biases = sess.run([weights, biases])
+
+        # Check that the weights and biases have been updated.
+        self.assertGreater(np.linalg.norm(weights_values - new_weights), 0)
+        self.assertGreater(np.linalg.norm(biases_values - new_biases), 0)
+
+        weights_values, biases_values = new_weights, new_biases
+
+        # Update only weights.
+        loss = sess.run(train_weights)
+        self.assertGreater(loss, .5)
+        new_weights, new_biases = sess.run([weights, biases])
+
+        # Check that the weights have been updated, but biases have not.
+        self.assertGreater(np.linalg.norm(weights_values - new_weights), 0)
+        self.assertAlmostEqual(np.linalg.norm(biases_values - new_biases), 0)
+        weights_values = new_weights
+
+        # Update only biases.
+        loss = sess.run(train_biases)
+        self.assertGreater(loss, .5)
+        new_weights, new_biases = sess.run([weights, biases])
+
+        # Check that the biases have been updated, but weights have not.
+        self.assertAlmostEqual(np.linalg.norm(weights_values - new_weights), 0)
+        self.assertGreater(np.linalg.norm(biases_values - new_biases), 0)
+
+  def testTrainWithAlteredGradients(self):
+    # Use the same learning rate but different gradient multipliers
+    # to train two models. Model with equivalently larger learning
+    # rate (i.e., learning_rate * gradient_multiplier) has smaller
+    # training loss.
+    logdir1 = os.path.join(self.get_temp_dir(), 'tmp_logs6/')
+    logdir2 = os.path.join(self.get_temp_dir(), 'tmp_logs7/')
+
+    if tf.gfile.Exists(logdir1):
+      tf.gfile.DeleteRecursively(logdir1)
+    if tf.gfile.Exists(logdir2):
+      tf.gfile.DeleteRecursively(logdir2)
+
+    multipliers = [1., 1000.]
+    number_of_steps = 10
+    losses = []
+    learning_rate = 0.001
+
+    # First, train the model with equivalently smaller learning rate.
+    with tf.Graph().as_default():
+      tf.set_random_seed(0)
+      train_op = self.create_train_op(
+          learning_rate=learning_rate,
+          gradient_multiplier=multipliers[0])
+
+      saver = tf.train.Saver()
+
+      loss = tf.contrib.training.train(
+          train_op, logdir1, hooks=[
+              tf.train.StopAtStepHook(num_steps=number_of_steps),
+              tf.train.CheckpointSaverHook(logdir1, save_steps=50, saver=saver),
+          ])
+
+      losses.append(loss)
+      self.assertGreater(loss, .5)
+
+    # Second, train the model with equivalently larger learning rate.
+    with tf.Graph().as_default():
+      tf.set_random_seed(0)
+      train_op = self.create_train_op(
+          learning_rate=learning_rate,
+          gradient_multiplier=multipliers[1])
+      saver = tf.train.Saver()
+
+      loss = tf.contrib.training.train(
+          train_op, logdir2, hooks=[
+              tf.train.StopAtStepHook(num_steps=number_of_steps),
+              tf.train.CheckpointSaverHook(logdir2, save_steps=50, saver=saver),
+          ])
+
+      losses.append(loss)
+      self.assertIsNotNone(loss)
+      self.assertLess(loss, .5)
+
+    # The loss of the model trained with larger learning rate should
+    # be smaller.
+    self.assertGreater(losses[0], losses[1])
+
+
+if __name__ == '__main__':
+  tf.test.main()
diff --git a/tensorflow/core/BUILD b/tensorflow/core/BUILD
index 79546ccd206..61db4ef42dd 100644
--- a/tensorflow/core/BUILD
+++ b/tensorflow/core/BUILD
@@ -221,13 +221,6 @@ cc_library(
     ],
 )
 
-cc_library(
-    name = "jpeg",
-    hdrs = ["lib/jpeg/jpeg_mem.h"],
-    visibility = ["//visibility:public"],
-    deps = [":jpeg_internal"],
-)
-
 # Test support library needed for all tests
 # This is currently public, but may be made internal in the
 # future.  Try to avoid depending on it.
@@ -699,9 +692,9 @@ filegroup(
             "platform/cuda.h",
             "platform/google/**/*",
             "platform/hadoop/**/*",
-            "platform/jpeg.*",
-            "platform/png.*",
-            "platform/gif.*",
+            "platform/gif.h",
+            "platform/jpeg.h",
+            "platform/png.h",
             "platform/stream_executor.*",
             "platform/windows/**/*",
             "user_ops/**/*.cu.cc",
@@ -981,7 +974,10 @@ cc_library(
             ],
             exclude = [
                 "**/*test*",
+                "lib/gif/**/*",
                 "lib/jpeg/**/*",
+                "platform/gif.h",
+                "platform/jpeg.h",
                 "platform/**/cuda.h",
                 "platform/**/stream_executor.h",
                 "platform/load_library.cc",
@@ -998,7 +994,10 @@ cc_library(
             ],
             exclude = [
                 "**/*test*",
+                "lib/gif/**/*",
                 "lib/jpeg/**/*",
+                "platform/gif.h",
+                "platform/jpeg.h",
                 "platform/**/cuda.h",
                 "platform/**/stream_executor.h",
             ],
@@ -1016,7 +1015,6 @@ cc_library(
     hdrs = tf_additional_lib_hdrs() + [
         "lib/core/blocking_counter.h",
         "lib/core/refcount.h",
-        "lib/gif/gif_io.h",
         "lib/gtl/edit_distance.h",
         "lib/gtl/int_type.h",
         "lib/gtl/iterator_range.h",
@@ -1060,18 +1058,32 @@ cc_library(
     ],
 )
 
+cc_library(
+    name = "gif_internal",
+    srcs = [
+        "lib/gif/gif_io.cc",
+        "platform/gif.h",
+    ],
+    hdrs = ["lib/gif/gif_io.h"],
+    copts = tf_copts(),
+    linkopts = ["-ldl"],
+    deps = [
+        ":lib",
+        "//tensorflow/core/platform/default/build_config:gif",
+    ],
+)
+
 cc_library(
     name = "jpeg_internal",
-    srcs = glob(
-        [
-            "lib/jpeg/*h",
-            "lib/jpeg/*.cc",
-        ],
-        exclude = [
-            "**/*test*",
-        ],
-    ),
-    hdrs = ["lib/jpeg/jpeg_handle.h"],
+    srcs = [
+        "lib/jpeg/jpeg_handle.cc",
+        "lib/jpeg/jpeg_mem.cc",
+        "platform/jpeg.h",
+    ],
+    hdrs = [
+        "lib/jpeg/jpeg_handle.h",
+        "lib/jpeg/jpeg_mem.h",
+    ],
     copts = tf_copts(),
     linkopts = ["-ldl"],
     deps = [
@@ -1541,7 +1553,6 @@ cc_test(
     srcs = ["lib/jpeg/jpeg_mem_unittest.cc"],
     data = glob(["lib/jpeg/testdata/*.jpg"]),
     deps = [
-        ":jpeg",
         ":jpeg_internal",
         ":lib",
         ":lib_internal",
diff --git a/tensorflow/core/common_runtime/device_factory.cc b/tensorflow/core/common_runtime/device_factory.cc
index 8104f446366..efbdf6bbb19 100644
--- a/tensorflow/core/common_runtime/device_factory.cc
+++ b/tensorflow/core/common_runtime/device_factory.cc
@@ -78,7 +78,7 @@ DeviceFactory* DeviceFactory::GetFactory(const string& device_type) {
 Status DeviceFactory::AddDevices(const SessionOptions& options,
                                  const string& name_prefix,
                                  std::vector<Device*>* devices) {
-  // CPU first.
+  // CPU first. A CPU device is required.
   auto cpu_factory = GetFactory("CPU");
   if (!cpu_factory) {
     return errors::NotFound(
@@ -90,18 +90,11 @@ Status DeviceFactory::AddDevices(const SessionOptions& options,
     return errors::NotFound("No CPU devices are available in this process");
   }
 
-  // Then GPU.
-  auto gpu_factory = GetFactory("GPU");
-  if (gpu_factory) {
-    TF_RETURN_IF_ERROR(
-        gpu_factory->CreateDevices(options, name_prefix, devices));
-  }
-
-  // Then the rest.
+  // Then the rest (including GPU).
   mutex_lock l(*get_device_factory_lock());
   for (auto& p : device_factories()) {
     auto factory = p.second.factory.get();
-    if (factory != cpu_factory && factory != gpu_factory) {
+    if (factory != cpu_factory) {
       TF_RETURN_IF_ERROR(factory->CreateDevices(options, name_prefix, devices));
     }
   }
diff --git a/tensorflow/core/distributed_runtime/master.cc b/tensorflow/core/distributed_runtime/master.cc
index cf9deaabd8c..741282be31b 100644
--- a/tensorflow/core/distributed_runtime/master.cc
+++ b/tensorflow/core/distributed_runtime/master.cc
@@ -282,6 +282,7 @@ void Master::ExtendSession(const ExtendSessionRequest* req,
     done(errors::Aborted("Session ", req->session_handle(), " is not found."));
     return;
   }
+  mu_.unlock();
 
   SchedClosure([session, req, resp, done]() {
     Status status = ValidateExternalGraphDefSyntax(req->graph_def());
@@ -290,7 +291,22 @@ void Master::ExtendSession(const ExtendSessionRequest* req,
     }
     done(status);
   });
+}
+
+void Master::PartialRunSetup(const PartialRunSetupRequest* req,
+                             PartialRunSetupResponse* resp, MyClosure done) {
+  mu_.lock();
+  MasterSession* session = gtl::FindPtrOrNull(sessions_, req->session_handle());
+  if (session == nullptr) {
+    mu_.unlock();
+    done(errors::Aborted("Session ", req->session_handle(), " is not found."));
+    return;
+  }
   mu_.unlock();
+
+  SchedClosure([this, session, req, resp, done]() {
+    done(session->PartialRunSetup(req, resp));
+  });
 }
 
 void Master::RunStep(CallOptions* opts, const RunStepRequest* req,
@@ -303,6 +319,7 @@ void Master::RunStep(CallOptions* opts, const RunStepRequest* req,
     done(errors::Aborted("Session ", req->session_handle(), " is not found."));
     return;
   }
+  mu_.unlock();
 
   SchedClosure([this, start_time, session, opts, req, resp, done]() {
     Status status = session->Run(opts, req, resp);
@@ -312,7 +329,6 @@ void Master::RunStep(CallOptions* opts, const RunStepRequest* req,
     last_1000_steps_.AddValue((done_time - start_time) / 1e9);
     ++step_count_;
   });
-  mu_.unlock();
 }
 
 void Master::CloseSession(const CloseSessionRequest* req,
diff --git a/tensorflow/core/distributed_runtime/master.h b/tensorflow/core/distributed_runtime/master.h
index a44ee6352a4..10875d80d40 100644
--- a/tensorflow/core/distributed_runtime/master.h
+++ b/tensorflow/core/distributed_runtime/master.h
@@ -46,6 +46,9 @@ class Master {
   void ExtendSession(const ExtendSessionRequest* req,
                      ExtendSessionResponse* resp, MyClosure done);
 
+  void PartialRunSetup(const PartialRunSetupRequest* req,
+                       PartialRunSetupResponse* resp, MyClosure done);
+
   void RunStep(CallOptions* opts, const RunStepRequest* req,
                RunStepResponse* resp, MyClosure done);
 
diff --git a/tensorflow/core/distributed_runtime/master_interface.h b/tensorflow/core/distributed_runtime/master_interface.h
index ec9218e2133..6b405f8eaa5 100644
--- a/tensorflow/core/distributed_runtime/master_interface.h
+++ b/tensorflow/core/distributed_runtime/master_interface.h
@@ -17,6 +17,7 @@ limitations under the License.
 #define TENSORFLOW_CORE_DISTRIBUTED_RUNTIME_MASTER_INTERFACE_H_
 
 #include "tensorflow/core/distributed_runtime/call_options.h"
+#include "tensorflow/core/lib/core/errors.h"
 #include "tensorflow/core/lib/core/status.h"
 #include "tensorflow/core/protobuf/master.pb.h"
 
@@ -37,6 +38,12 @@ class MasterInterface {
                                const ExtendSessionRequest* request,
                                ExtendSessionResponse* response) = 0;
 
+  virtual Status PartialRunSetup(CallOptions* call_options,
+                                 const PartialRunSetupRequest* request,
+                                 PartialRunSetupResponse* response) {
+    return errors::Unimplemented("Partial run not implemented for this master");
+  }
+
   virtual Status RunStep(CallOptions* call_options,
                          const RunStepRequest* request,
                          RunStepResponse* response) = 0;
diff --git a/tensorflow/core/distributed_runtime/master_session.cc b/tensorflow/core/distributed_runtime/master_session.cc
index 6f3b7841785..cacaf838165 100644
--- a/tensorflow/core/distributed_runtime/master_session.cc
+++ b/tensorflow/core/distributed_runtime/master_session.cc
@@ -50,18 +50,6 @@ limitations under the License.
 
 namespace tensorflow {
 
-// A little bit of per-step state.
-struct PerStepState {
-  bool collect_costs = false;
-  bool collect_timeline = false;
-  bool collect_rpcs = false;
-  Microseconds start_micros = Microseconds(0);
-  Microseconds end_micros = Microseconds(0);
-  std::vector<StepStats> step_stats;  // per partition
-  StepStats rpc_stats;                // for RPC layer
-  CostGraphDef cost_graph;
-};
-
 // MasterSession wraps SimpleClientGraph in a reference counted object.
 // This way, MasterSession can clear up the cache mapping Run requests to
 // compiled graphs while the compiled graph is still being used.
@@ -72,15 +60,38 @@ class MasterSession::ReffedClientGraph : public core::RefCounted {
   ReffedClientGraph(const string& handle, const BuildGraphOptions& bopts,
                     std::unique_ptr<SimpleClientGraph> cg,
                     const SessionOptions& session_opts,
-                    StatsPublisherFactory stats_publisher_factory)
+                    StatsPublisherFactory stats_publisher_factory,
+                    SimpleGraphExecutionState* execution_state, bool is_partial)
       : session_handle_(handle),
         client_graph_(std::move(cg)),
         bopts_(bopts),
-        session_opts_(session_opts) {
+        session_opts_(session_opts),
+        is_partial_(is_partial) {
     VLOG(1) << "Created ReffedClientGraph for node with "
             << client_graph_->graph.num_node_ids();
 
     stats_publisher_ = stats_publisher_factory(handle, bopts, session_opts);
+
+    // If this is a partial run we need to initialize a name to node map for
+    // testing that fetches are reachable.
+    if (is_partial) {
+      std::unordered_set<StringPiece, StringPiece::Hasher> names;
+      for (const string& input : bopts.feed_endpoints) {
+        TensorId id(ParseTensorName(input));
+        names.emplace(id.first);
+      }
+      for (const string& output : bopts.fetch_endpoints) {
+        TensorId id(ParseTensorName(output));
+        names.emplace(id.first);
+      }
+      // We use the graph from the execution_state because we want the graph
+      // nodes before they are rewritten replaced by the rewriter.
+      for (Node* n : execution_state->full_graph()->nodes()) {
+        if (names.count(n->name()) > 0) {
+          name_to_node_.insert({n->name(), n});
+        }
+      }
+    }
   }
 
   ~ReffedClientGraph() override { DeregisterPartitions(); }
@@ -171,7 +182,7 @@ class MasterSession::ReffedClientGraph : public core::RefCounted {
                        SimpleGraphExecutionState* execution_state,
                        PerStepState* pss, CallOptions* opts,
                        const RunStepRequest& req, RunStepResponse* resp,
-                       CancellationManager* cm);
+                       CancellationManager* cm, const bool is_last_partial_run);
 
   // Calls workers to cleanup states for the step "step_id".  Calls
   // `done` when all cleanup RPCs have completed.
@@ -185,6 +196,9 @@ class MasterSession::ReffedClientGraph : public core::RefCounted {
   void ProcessDeviceStats(ProfileHandler* ph,
                           const SimpleGraphExecutionState* execution_state,
                           const DeviceStepStats& ds, bool is_rpc);
+  // Checks that the requested fetches can be computed from the provided feeds.
+  Status CheckFetches(const RunStepRequest& req, const RunState* run_state,
+                      SimpleGraphExecutionState* execution_state);
 
   string DetailText(const NodeDef& def, const NodeExecStats& ns) {
     int64 tot = 0;
@@ -209,6 +223,8 @@ class MasterSession::ReffedClientGraph : public core::RefCounted {
   std::unordered_set<const Node*> nodes_needing_input_mapping_;
   BuildGraphOptions bopts_;
   const SessionOptions session_opts_;
+  const bool is_partial_;
+  std::unordered_map<StringPiece, Node*, StringPiece::Hasher> name_to_node_;
 
   // Graph partitioned into per-location subgraphs.
   struct Part {
@@ -483,15 +499,14 @@ class RunManyGraphs {
   TF_DISALLOW_COPY_AND_ASSIGN(RunManyGraphs);
 };
 
-
 Status MasterSession::ReffedClientGraph::RunPartitions(
     const MasterEnv* env, int64 step_id, int64 execution_count,
     SimpleGraphExecutionState* execution_state, PerStepState* pss,
     CallOptions* call_opts, const RunStepRequest& req, RunStepResponse* resp,
-    CancellationManager* cm) {
+    CancellationManager* cm, const bool is_last_partial_run) {
   VLOG(2) << "RunPartitions step_id " << step_id << " execution_count "
           << execution_count;
-  // Builds an index for feeds provided by the client.
+  // Build an index for feeds provided by the client.
   std::unordered_map<StringPiece, const TensorProto*, StringPiece::Hasher>
       feeds(3);
 
@@ -524,26 +539,64 @@ Status MasterSession::ReffedClientGraph::RunPartitions(
   for (int i = 0; i < num; ++i) {
     const Part& part = partitions_[i];
     RunManyGraphs::Call* c = calls.get(i);
+    if (is_partial_) {
+      c->req.set_is_partial(is_partial_);
+      c->req.set_is_last_partial_run(is_last_partial_run);
+    }
     c->req.set_graph_handle(part.graph_handle);
     c->req.set_step_id(step_id);
     *c->req.mutable_exec_opts() = exec_opts;
     // If any feeds are provided, send the feed values together
     // in the RunGraph request.
-    for (const auto& feed_key : part.feed_key) {
-      const string& feed = feed_key.first;
-      const string& key = feed_key.second;
-      const TensorProto* val = feeds[feed];
-      if (val == nullptr) {
-        return errors::InvalidArgument("No feed is provided for feed=", feed,
-                                       ", key=", key);
+    // In the partial case, we only want to include feeds provided in the req.
+    // In the non-partial case, all feeds in the request are in the part.
+    // We keep these as separate paths for now, to ensure we aren't
+    // inadvertently slowing down the normal run path.
+    if (is_partial_) {
+      for (const auto& feed : req.feed()) {
+        const string& name = feed.name();
+        auto iter = part.feed_key.find(name);
+        if (iter == part.feed_key.end()) {
+          // The provided feed must be for a different partition.
+          continue;
+        }
+        const string& key = iter->second;
+        const TensorProto* val = feeds[name];
+        if (val == nullptr) {
+          return errors::InvalidArgument("No feed is provided for feed=", name,
+                                         ", key=", key);
+        }
+        auto* send = c->req.add_send();
+        send->set_key(key);
+        *(send->mutable_val()) = *val;  // TODO(mrry): make it faster if needed.
+      }
+      // TODO(suharshs): Make a map from feed to fetch_key to make this faster.
+      // For now, we just iterate through partitions to find the matching key.
+      for (const auto& req_fetch : req.fetch()) {
+        for (const auto& key_fetch : part.key_fetch) {
+          if (key_fetch.second == req_fetch) {
+            c->req.add_recv_key(key_fetch.first);
+            break;
+          }
+        }
+      }
+    } else {
+      for (const auto& feed_key : part.feed_key) {
+        const string& feed = feed_key.first;
+        const string& key = feed_key.second;
+        const TensorProto* val = feeds[feed];
+        if (val == nullptr) {
+          return errors::InvalidArgument("No feed is provided for feed=", feed,
+                                         ", key=", key);
+        }
+        auto* send = c->req.add_send();
+        send->set_key(key);
+        *(send->mutable_val()) = *val;  // TODO(mrry): make it faster if needed.
+      }
+      for (const auto& key_fetch : part.key_fetch) {
+        const string& key = key_fetch.first;
+        c->req.add_recv_key(key);
       }
-      auto* send = c->req.add_send();
-      send->set_key(key);
-      *(send->mutable_val()) = *val;  // TODO(mrry): make it faster if needed.
-    }
-    for (const auto& key_fetch : part.key_fetch) {
-      const string& key = key_fetch.first;
-      c->req.add_recv_key(key);
     }
   }
 
@@ -762,6 +815,64 @@ void MasterSession::ReffedClientGraph::ProcessDeviceStats(
   }
 }
 
+// TODO(suharshs): Merge with CheckFetches in DirectSession.
+// TODO(suharsh,mrry): Build a map from fetch target to set of feeds it depends
+// on once at setup time to prevent us from computing the dependencies
+// everytime.
+Status MasterSession::ReffedClientGraph::CheckFetches(
+    const RunStepRequest& req, const RunState* run_state,
+    SimpleGraphExecutionState* execution_state) {
+  // Build the set of pending feeds that we haven't seen.
+  std::unordered_set<TensorId, TensorId::Hasher> pending_feeds;
+  for (const string& feed : run_state->pending_inputs) {
+    TensorId id(ParseTensorName(feed));
+    auto it = name_to_node_.find(id.first);
+    if (it == name_to_node_.end()) {
+      return errors::NotFound("Feed ", feed, ": not found");
+    }
+    pending_feeds.insert(id);
+  }
+  for (const auto& feed : req.feed()) {
+    TensorId id(ParseTensorName(feed.name()));
+    pending_feeds.erase(id);
+  }
+
+  // Initialize the stack with the fetch nodes.
+  std::vector<const Node*> stack;
+  for (const string& fetch : req.fetch()) {
+    TensorId id(ParseTensorName(fetch));
+    auto it = name_to_node_.find(id.first);
+    if (it == name_to_node_.end()) {
+      return errors::NotFound("Fetch ", fetch, ": not found");
+    }
+    stack.push_back(it->second);
+  }
+
+  // Any tensor needed for fetches can't be in pending_feeds.
+  // We need to use the original full graph from execution state.
+  const Graph* graph = execution_state->full_graph();
+  std::vector<bool> visited(graph->num_node_ids(), false);
+  while (!stack.empty()) {
+    const Node* n = stack.back();
+    stack.pop_back();
+
+    for (const Edge* in_edge : n->in_edges()) {
+      const Node* in_node = in_edge->src();
+      if (pending_feeds.count({in_node->name(), in_edge->src_output()}) > 0) {
+        return errors::InvalidArgument("Fetch ", in_node->name(), ":",
+                                       in_edge->src_output(),
+                                       " can't be computed from the feeds"
+                                       " that have been fed so far.");
+      }
+      if (!visited[in_node->id()]) {
+        visited[in_node->id()] = true;
+        stack.push_back(in_node);
+      }
+    }
+  }
+  return Status::OK();
+}
+
 // Asynchronously deregisters subgraphs on the workers, without waiting for the
 // result.
 void MasterSession::ReffedClientGraph::DeregisterPartitions() {
@@ -803,6 +914,23 @@ void BuildBuildGraphOptions(const RunStepRequest& req,
   std::sort(opts->fetch_endpoints.begin(), opts->fetch_endpoints.end());
 }
 
+void BuildBuildGraphOptions(const PartialRunSetupRequest& req,
+                            BuildGraphOptions* opts) {
+  for (const auto& feed : req.feed()) {
+    opts->feed_endpoints.push_back(feed);
+  }
+  for (const auto& fetch : req.fetch()) {
+    opts->fetch_endpoints.push_back(fetch);
+  }
+  for (const auto& target : req.target()) {
+    opts->target_nodes.push_back(target);
+  }
+
+  std::sort(opts->feed_endpoints.begin(), opts->feed_endpoints.end());
+  std::sort(opts->target_nodes.begin(), opts->target_nodes.end());
+  std::sort(opts->fetch_endpoints.begin(), opts->fetch_endpoints.end());
+}
+
 uint64 HashBuildGraphOptions(const BuildGraphOptions& opts) {
   uint64 h = 0x2b992ddfa23249d6ull;
   for (const string& name : opts.feed_endpoints) {
@@ -927,11 +1055,9 @@ Status MasterSession::Extend(const ExtendSessionRequest* req,
   return Status::OK();
 }
 
-Status MasterSession::StartStep(const RunStepRequest& req,
-                                BuildGraphOptions* opts, int64* count,
-                                ReffedClientGraph** rcg) {
-  BuildBuildGraphOptions(req, opts);
-  const uint64 hash = HashBuildGraphOptions(*opts);
+Status MasterSession::StartStep(const BuildGraphOptions& opts, int64* count,
+                                ReffedClientGraph** rcg, bool is_partial) {
+  const uint64 hash = HashBuildGraphOptions(opts);
   ReffedClientGraph* to_unref = nullptr;
   {
     mutex_lock l(mu_);
@@ -944,12 +1070,12 @@ Status MasterSession::StartStep(const RunStepRequest& req,
       // We have not seen this subgraph before. Build the subgraph and
       // cache it.
       VLOG(1) << "Unseen hash " << hash << " for "
-              << BuildGraphOptionsString(*opts);
+              << BuildGraphOptionsString(opts);
       std::unique_ptr<SimpleClientGraph> client_graph;
-      TF_RETURN_IF_ERROR(execution_state_->BuildGraph(*opts, &client_graph));
-      auto entry =
-          new ReffedClientGraph(handle_, *opts, std::move(client_graph),
-                                session_opts_, stats_publisher_factory_);
+      TF_RETURN_IF_ERROR(execution_state_->BuildGraph(opts, &client_graph));
+      auto entry = new ReffedClientGraph(
+          handle_, opts, std::move(client_graph), session_opts_,
+          stats_publisher_factory_, execution_state_.get(), is_partial);
       iter = runs_.insert({hash, entry}).first;
       auto obs_iter = obsolete_.find(hash);
       if (obs_iter != obsolete_.end()) {
@@ -979,6 +1105,47 @@ void MasterSession::ClearRunsTable(std::vector<ReffedClientGraph*>* to_unref,
   rcg_map->clear();
 }
 
+Status MasterSession::PartialRunSetup(const PartialRunSetupRequest* req,
+                                      PartialRunSetupResponse* resp) {
+  std::vector<string> inputs, outputs, targets;
+  for (const auto& feed : req->feed()) {
+    inputs.push_back(feed);
+  }
+  for (const auto& fetch : req->fetch()) {
+    outputs.push_back(fetch);
+  }
+  for (const auto& target : req->target()) {
+    targets.push_back(target);
+  }
+
+  string handle = std::to_string(partial_run_handle_counter_.fetch_add(1));
+
+  ReffedClientGraph* rcg = nullptr;
+  int64 count = 0;
+
+  // Prepare.
+  BuildGraphOptions opts;
+  BuildBuildGraphOptions(*req, &opts);
+  TF_RETURN_IF_ERROR(StartStep(opts, &count, &rcg, true));
+  // Keeps the highest 8 bits 0x01: we reserve some bits of the
+  // step_id for future use.
+  uint64 step_id = (random::New64() & ((1uLL << 56) - 1)) | (1uLL << 56);
+  TRACEPRINTF("stepid %llu", step_id);
+
+  rcg->Ref();
+  RunState* run_state = new RunState(inputs, outputs, rcg, step_id, count);
+  {
+    mutex_lock l(mu_);
+    partial_runs_.emplace(
+        std::make_pair(handle, std::unique_ptr<RunState>(run_state)));
+  }
+
+  TF_RETURN_IF_ERROR(BuildAndRegisterPartitions(rcg));
+
+  resp->set_partial_run_handle(handle);
+  return Status::OK();
+}
+
 Status MasterSession::Run(CallOptions* opts, const RunStepRequest* req,
                           RunStepResponse* resp) {
   UpdateLastAccessTime();
@@ -986,7 +1153,12 @@ Status MasterSession::Run(CallOptions* opts, const RunStepRequest* req,
     mutex_lock l(mu_);
     ++num_running_;
   }
-  Status status = DoRunWithLocalExecution(opts, req, resp);
+  Status status;
+  if (!req->partial_run_handle().empty()) {
+    status = DoPartialRun(opts, req, resp);
+  } else {
+    status = DoRunWithLocalExecution(opts, req, resp);
+  }
   {
     mutex_lock l(mu_);
     --num_running_;
@@ -997,23 +1169,7 @@ Status MasterSession::Run(CallOptions* opts, const RunStepRequest* req,
   return status;
 }
 
-Status MasterSession::DoRunWithLocalExecution(CallOptions* opts,
-                                              const RunStepRequest* req,
-                                              RunStepResponse* resp) {
-  VLOG(2) << "DoRunWithLocalExecution "
-          << "req: " << req->DebugString();
-  PerStepState pss;
-  pss.start_micros = Env::Default()->NowMicros();
-
-  // Prepare.
-  BuildGraphOptions bgopts;
-  ReffedClientGraph* rcg = nullptr;
-  int64 count = 0;
-  TF_RETURN_IF_ERROR(StartStep(*req, &bgopts, &count, &rcg));
-
-  // Unref "rcg" when out of scope.
-  core::ScopedUnref unref(rcg);
-
+Status MasterSession::BuildAndRegisterPartitions(ReffedClientGraph* rcg) {
   // Registers subgraphs if haven't done so.
   PartitionOptions popts;
   popts.node_to_loc = SplitByWorker;
@@ -1051,12 +1207,136 @@ Status MasterSession::DoRunWithLocalExecution(CallOptions* opts,
   TF_RETURN_IF_ERROR(rcg->RegisterPartitions(
       env_, popts, rcg->client_graph()->flib_def->ToProto()));
 
+  return Status::OK();
+}
+
+Status MasterSession::DoPartialRun(CallOptions* opts, const RunStepRequest* req,
+                                   RunStepResponse* resp) {
+  const string& prun_handle = req->partial_run_handle();
+  RunState* run_state = nullptr;
+  {
+    mutex_lock l(mu_);
+    auto it = partial_runs_.find(prun_handle);
+    if (it == partial_runs_.end()) {
+      return errors::InvalidArgument(
+          "Must run PartialRunSetup before performing partial runs");
+    }
+    run_state = it->second.get();
+  }
+
+  // If this is the first partial run, initialize the PerStepState.
+  if (!run_state->step_started) {
+    run_state->step_started = true;
+    PerStepState pss;
+
+    auto count = run_state->count;
+    pss.collect_timeline =
+        req->options().trace_level() == RunOptions::FULL_TRACE;
+
+    // Build the cost model every 'build_cost_model_every' steps after skipping
+    // an
+    // initial 'build_cost_model_after' steps.
+    const int64 build_cost_model_after =
+        session_opts_.config.graph_options().build_cost_model_after();
+    const int64 build_cost_model_every =
+        session_opts_.config.graph_options().build_cost_model();
+    pss.collect_costs =
+        build_cost_model_every > 0 &&
+        ((count + 1 - build_cost_model_after) % build_cost_model_every == 0);
+
+    std::unique_ptr<ProfileHandler> ph = run_state->rcg->GetProfileHandler(
+        run_state->step_id, count, req->options());
+    if (ph) {
+      pss.collect_timeline = true;
+      pss.collect_rpcs = ph->should_collect_rpcs();
+    }
+
+    run_state->pss = std::move(pss);
+    run_state->ph = std::move(ph);
+  }
+
+  // Make sure that this is a new set of feeds that are still pending.
+  for (const auto& feed : req->feed()) {
+    auto it = run_state->pending_inputs.find(feed.name());
+    if (it == run_state->pending_inputs.end()) {
+      return errors::InvalidArgument("The feed ", feed.name(),
+                                     " had already been fed.");
+    }
+  }
+  // Check that this is a new set of fetches that are still pending.
+  for (const auto& fetch : req->fetch()) {
+    auto it = run_state->pending_outputs.find(fetch);
+    if (it == run_state->pending_outputs.end()) {
+      return errors::InvalidArgument("The fetch ", fetch,
+                                     " had already been fetched.");
+    }
+  }
+
+  // Ensure that the requested fetches can be computed from the provided feeds.
+  TF_RETURN_IF_ERROR(
+      run_state->rcg->CheckFetches(*req, run_state, execution_state_.get()));
+
+  // Determine if this partial run satisfies all the pending inputs and ouputs.
+  for (const auto& feed : req->feed()) {
+    run_state->pending_inputs.erase(feed.name());
+  }
+  for (const auto& fetch : req->fetch()) {
+    run_state->pending_outputs.erase(fetch);
+  }
+  bool is_last_partial_run =
+      (run_state->pending_inputs.empty() && run_state->pending_outputs.empty());
+
+  Status s = run_state->rcg->RunPartitions(
+      env_, run_state->step_id, run_state->count, execution_state_.get(),
+      &run_state->pss, opts, *req, resp, cancellation_manager_,
+      is_last_partial_run);
+
+  // Delete the run state if there is an error or all fetches are done.
+  if (!s.ok() || is_last_partial_run) {
+    ReffedClientGraph* rcg = run_state->rcg;
+    run_state->pss.end_micros = Env::Default()->NowMicros();
+    // Schedule post-processing and cleanup to be done asynchronously.
+    rcg->Ref();
+    rcg->ProcessStats(env_, run_state->step_id, &run_state->pss,
+                      execution_state_.get(), run_state->ph.get(), *req, resp);
+    rcg->CleanupPartitionsAsync(
+        run_state->step_id, [this, rcg, prun_handle](const Status& s) {
+          if (!s.ok()) {
+            LOG(ERROR) << "Cleanup partition error: " << s;
+          }
+          rcg->Unref();
+          mutex_lock l(mu_);
+          partial_runs_.erase(prun_handle);
+        });
+  }
+  return s;
+}
+
+Status MasterSession::DoRunWithLocalExecution(CallOptions* opts,
+                                              const RunStepRequest* req,
+                                              RunStepResponse* resp) {
+  VLOG(2) << "DoRunWithLocalExecution "
+          << "req: " << req->DebugString();
+  PerStepState pss;
+  pss.start_micros = Env::Default()->NowMicros();
+
+  // Prepare.
+  BuildGraphOptions bgopts;
+  BuildBuildGraphOptions(*req, &bgopts);
+  ReffedClientGraph* rcg = nullptr;
+  int64 count = 0;
+  TF_RETURN_IF_ERROR(StartStep(bgopts, &count, &rcg, false));
+
+  // Unref "rcg" when out of scope.
+  core::ScopedUnref unref(rcg);
+
+  TF_RETURN_IF_ERROR(BuildAndRegisterPartitions(rcg));
+
   // Keeps the highest 8 bits 0x01: we reserve some bits of the
   // step_id for future use.
   const uint64 step_id = (random::New64() & ((1uLL << 56) - 1)) | (1uLL << 56);
   TRACEPRINTF("stepid %llu", step_id);
 
-  std::unique_ptr<ProfileHandler> ph;
   pss.collect_timeline = req->options().trace_level() == RunOptions::FULL_TRACE;
 
   // Build the cost model every 'build_cost_model_every' steps after skipping an
@@ -1069,15 +1349,16 @@ Status MasterSession::DoRunWithLocalExecution(CallOptions* opts,
       build_cost_model_every > 0 &&
       ((count + 1 - build_cost_model_after) % build_cost_model_every == 0);
 
-  ph = rcg->GetProfileHandler(step_id, count, req->options());
+  std::unique_ptr<ProfileHandler> ph =
+      rcg->GetProfileHandler(step_id, count, req->options());
   if (ph) {
     pss.collect_timeline = true;
     pss.collect_rpcs = ph->should_collect_rpcs();
   }
 
-  TF_RETURN_IF_ERROR(rcg->RunPartitions(env_, step_id, count,
-                                        execution_state_.get(), &pss, opts,
-                                        *req, resp, cancellation_manager_));
+  TF_RETURN_IF_ERROR(
+      rcg->RunPartitions(env_, step_id, count, execution_state_.get(), &pss,
+                         opts, *req, resp, cancellation_manager_, false));
 
   pss.end_micros = Env::Default()->NowMicros();
 
@@ -1110,4 +1391,22 @@ Status MasterSession::Close() {
   return Status::OK();
 }
 
+MasterSession::RunState::RunState(const std::vector<string>& input_names,
+                                  const std::vector<string>& output_names,
+                                  ReffedClientGraph* rcg, const uint64 step_id,
+                                  const int64 count)
+    : rcg(rcg), step_id(step_id), count(count) {
+  // Initially all the feeds and fetches are pending.
+  for (auto& name : input_names) {
+    pending_inputs.emplace(name);
+  }
+  for (auto& name : output_names) {
+    pending_outputs.emplace(name);
+  }
+}
+
+MasterSession::RunState::~RunState() {
+  if (rcg) rcg->Unref();
+}
+
 }  // end namespace tensorflow
diff --git a/tensorflow/core/distributed_runtime/master_session.h b/tensorflow/core/distributed_runtime/master_session.h
index e17614c819d..96d759d9c8d 100644
--- a/tensorflow/core/distributed_runtime/master_session.h
+++ b/tensorflow/core/distributed_runtime/master_session.h
@@ -16,6 +16,7 @@ limitations under the License.
 #ifndef TENSORFLOW_CORE_DISTRIBUTED_RUNTIME_MASTER_SESSION_H_
 #define TENSORFLOW_CORE_DISTRIBUTED_RUNTIME_MASTER_SESSION_H_
 
+#include <atomic>
 #include <vector>
 
 #include "tensorflow/core/common_runtime/device_set.h"
@@ -72,6 +73,10 @@ class MasterSession {
   // Extend() may block the caller thread for a long time.
   Status Extend(const ExtendSessionRequest* req, ExtendSessionResponse* resp);
 
+  // Setup a partial run call.
+  Status PartialRunSetup(const PartialRunSetupRequest* req,
+                         PartialRunSetupResponse* resp);
+
   // Run one step.
   Status Run(CallOptions* opts, const RunStepRequest* req,
              RunStepResponse* resp);
@@ -101,6 +106,8 @@ class MasterSession {
 
   std::atomic_ulong last_access_time_usec_;
 
+  std::atomic<int64> partial_run_handle_counter_ = {0};
+
   mutex mu_;
   std::unique_ptr<SimpleGraphExecutionState> execution_state_;
   int64 graph_version_;
@@ -115,6 +122,36 @@ class MasterSession {
   RCGMap runs_ GUARDED_BY(mu_);
   RCGMap obsolete_ GUARDED_BY(mu_);
 
+  struct PerStepState {
+    bool collect_costs = false;
+    bool collect_timeline = false;
+    bool collect_rpcs = false;
+    Microseconds start_micros = Microseconds(0);
+    Microseconds end_micros = Microseconds(0);
+    std::vector<StepStats> step_stats;  // per partition
+    StepStats rpc_stats;                // for RPC layer
+    CostGraphDef cost_graph;
+  };
+
+  struct RunState {
+    std::unordered_set<string> pending_inputs;
+    std::unordered_set<string> pending_outputs;
+    ReffedClientGraph* rcg = nullptr;
+    uint64 step_id;
+    int64 count = 0;
+    PerStepState pss;
+    std::unique_ptr<ProfileHandler> ph;
+    bool step_started = false;
+
+    RunState(const std::vector<string>& input_names,
+             const std::vector<string>& output_names, ReffedClientGraph* rcg,
+             const uint64 step_id, const int64 count);
+
+    ~RunState();
+  };
+  std::unordered_map<string, std::unique_ptr<RunState>> partial_runs_
+      GUARDED_BY(mu_);
+
   // Active RunStep calls.
   condition_variable num_running_is_zero_;
   int32 num_running_ GUARDED_BY(mu_) = 0;
@@ -131,14 +168,18 @@ class MasterSession {
   // Private dtor. The client must call Close().
   virtual ~MasterSession();
 
-  Status StartStep(const RunStepRequest& req, BuildGraphOptions* opts,
-                   int64* count, ReffedClientGraph** graph);
+  Status StartStep(const BuildGraphOptions& opts, int64* count,
+                   ReffedClientGraph** graph, bool is_partial);
   void ClearRunsTable(std::vector<ReffedClientGraph*>* to_unref,
                       RCGMap* rcg_map) EXCLUSIVE_LOCKS_REQUIRED(mu_);
   Status DoRunWithLocalExecution(CallOptions* opts, const RunStepRequest* req,
                                  RunStepResponse* resp);
+  Status DoPartialRun(CallOptions* opts, const RunStepRequest* req,
+                      RunStepResponse* resp);
   void UpdateLastAccessTime();
 
+  Status BuildAndRegisterPartitions(ReffedClientGraph* rcg);
+
   TF_DISALLOW_COPY_AND_ASSIGN(MasterSession);
 };
 
diff --git a/tensorflow/core/framework/op_kernel.h b/tensorflow/core/framework/op_kernel.h
index 4c14918ea70..145b8c53154 100644
--- a/tensorflow/core/framework/op_kernel.h
+++ b/tensorflow/core/framework/op_kernel.h
@@ -131,7 +131,7 @@ class OpKernel {
   // We allow legacy scalars within Google up until GraphDef version 6.
   // TODO(irving): Remove when we can drop support for GraphDef version 5.
   bool allow_legacy_scalars() const {
-#if defined(PLATFORM_GOOGLE)
+#if defined(PLATFORM_GOOGLE) || defined(PLATFORM_GOOGLE_ANDROID)
     return graph_def_version_ < 6;
 #else
     return false;
diff --git a/tensorflow/core/kernels/BUILD b/tensorflow/core/kernels/BUILD
index b4f36034708..a84eaad3152 100644
--- a/tensorflow/core/kernels/BUILD
+++ b/tensorflow/core/kernels/BUILD
@@ -1136,8 +1136,9 @@ tf_kernel_libraries(
         ":eigen_helpers",
         ":image_resizer_state",
         "//tensorflow/core:framework",
+        "//tensorflow/core:gif_internal",
         "//tensorflow/core:image_ops_op_lib",
-        "//tensorflow/core:jpeg",
+        "//tensorflow/core:jpeg_internal",
         "//tensorflow/core:lib",
         "//tensorflow/core:lib_internal",
         "//tensorflow/core:protos_all_cc",
@@ -2099,11 +2100,13 @@ tf_kernel_libraries(
         "count_up_to_op",
         "dense_update_ops",
         "scatter_op",
+        "scatter_nd_op",
         "variable_ops",
     ],
     deps = [
         ":assign_op",
         ":bounds_check",
+        ":fill_functor",
         ":scatter_functor",
         "//tensorflow/core:framework",
         "//tensorflow/core:lib",
@@ -2117,6 +2120,7 @@ tf_cc_test(
     size = "small",
     srcs = ["scatter_op_test.cc"],
     deps = [
+        ":fill_functor",
         ":ops_testutil",
         ":ops_util",
         ":scatter_op",
@@ -2129,6 +2133,23 @@ tf_cc_test(
     ],
 )
 
+tf_cc_test(
+    name = "scatter_nd_op_test",
+    size = "small",
+    srcs = ["scatter_nd_op_test.cc"],
+    deps = [
+        ":ops_testutil",
+        ":ops_util",
+        ":scatter_nd_op",
+        "//tensorflow/core:framework",
+        "//tensorflow/core:lib",
+        "//tensorflow/core:protos_all_cc",
+        "//tensorflow/core:test",
+        "//tensorflow/core:test_main",
+        "//tensorflow/core:testlib",
+    ],
+)
+
 tf_kernel_libraries(
     name = "string",
     prefixes = [
@@ -2571,6 +2592,7 @@ filegroup(
             "debug_ops.*",
             # Ops excluded because they do not build correctly for Android.
             # See b/29213790
+            "scatter_nd_op*",
             "sparse_matmul_op.*",
         ],
     ),
diff --git a/tensorflow/core/kernels/immutable_constant_op_test.cc b/tensorflow/core/kernels/immutable_constant_op_test.cc
index 93d726a64d4..d822e316ead 100644
--- a/tensorflow/core/kernels/immutable_constant_op_test.cc
+++ b/tensorflow/core/kernels/immutable_constant_op_test.cc
@@ -64,7 +64,7 @@ class TestFileSystem : public NullFileSystem {
       std::unique_ptr<ReadOnlyMemoryRegion>* result) override {
     float val = 0;
     StringPiece scheme, host, path;
-    ParseURI(fname, &scheme, &host, &path);
+    io::ParseURI(fname, &scheme, &host, &path);
     // For the tests create in-memory regions with float values equal to the
     // region name.
     if (path == "/2") {
diff --git a/tensorflow/core/kernels/parameterized_truncated_normal_op.cc b/tensorflow/core/kernels/parameterized_truncated_normal_op.cc
index 4d31edbb1a9..77c4b7a7299 100644
--- a/tensorflow/core/kernels/parameterized_truncated_normal_op.cc
+++ b/tensorflow/core/kernels/parameterized_truncated_normal_op.cc
@@ -46,25 +46,6 @@ namespace functor {
 using random::PhiloxRandom;
 using random::SingleSampleAdapter;
 
-// Sample a truncated normal random variable, with mean, stddev, minval, and
-// maxval parameters for each batch. Uses two rejection sampling algorithms
-// described in http://rd.springer.com/article/10.1007/BF00143942.
-//
-// Either minval may be -infinity, or maxval may be +infinity. If the interval
-// (minval, maxval) is empty, the result is NaN. Large intervals which include
-// both tails may have reduced accuracy.
-template <typename Device, typename T>
-struct TruncatedNormalFunctor {
-  void operator()(OpKernelContext* ctx, const Device& d, int64 num_batches,
-                  int64 samples_per_batch, int64 num_elements,
-                  typename TTypes<T>::ConstFlat means,
-                  typename TTypes<T>::ConstFlat stddevs,
-                  typename TTypes<T>::ConstFlat minvals,
-                  typename TTypes<T>::ConstFlat maxvals,
-                  const random::PhiloxRandom& gen,
-                  typename TTypes<T>::Flat output);
-};
-
 template <typename T>
 struct TruncatedNormalFunctor<CPUDevice, T> {
   static const int kMaxIterations = 100;
@@ -96,8 +77,8 @@ struct TruncatedNormalFunctor<CPUDevice, T> {
 
       // Vectorized intermediate calculations for uniform rejection sampling.
       // We always generate at most 4 samples.
-      tensorflow::random::Array<T, 4> z;
-      tensorflow::random::Array<T, 4> g;
+      Eigen::array<T, 4> z;
+      Eigen::array<T, 4> g;
 
       for (int64 b = start_batch; b < limit_batch; ++b) {
         // We are passed a flat array for each of the parameter tensors.
@@ -145,13 +126,7 @@ struct TruncatedNormalFunctor<CPUDevice, T> {
         if (diff < cutoff) {
           // Sample from a uniform distribution on [normMin, normMax].
 
-          T plusFactor;
-          if (normMin < T(0)) {
-            // normMax > 0 because it is flipped otherwise.
-            plusFactor = T(0);
-          } else {
-            plusFactor = normMin * normMin;
-          }
+          const T plusFactor = (normMin < T(0)) ? T(0) : normMin * normMin;
 
           while (sample < limit_sample) {
             const auto rand = dist(&gen_copy);
@@ -395,4 +370,21 @@ TF_CALL_double(REGISTER);
 
 #undef REGISTER
 
+#if GOOGLE_CUDA
+
+#define REGISTER(TYPE)                                         \
+  REGISTER_KERNEL_BUILDER(Name("ParameterizedTruncatedNormal") \
+                              .Device(DEVICE_GPU)              \
+                              .HostMemory("shape")             \
+                              .TypeConstraint<TYPE>("dtype"),  \
+                          ParameterizedTruncatedNormalOp<GPUDevice, TYPE>)
+
+TF_CALL_half(REGISTER);
+TF_CALL_float(REGISTER);
+TF_CALL_double(REGISTER);
+
+#undef REGISTER
+
+#endif  // GOOGLE_CUDA
+
 }  // end namespace tensorflow
diff --git a/tensorflow/core/kernels/parameterized_truncated_normal_op.h b/tensorflow/core/kernels/parameterized_truncated_normal_op.h
index a46bb1c9fa6..cc801eb8109 100644
--- a/tensorflow/core/kernels/parameterized_truncated_normal_op.h
+++ b/tensorflow/core/kernels/parameterized_truncated_normal_op.h
@@ -16,14 +16,35 @@ limitations under the License.
 #ifndef TENSORFLOW_KERNELS_PARAMETERIZED_TRUNCATED_NORMAL_OP_H_
 #define TENSORFLOW_KERNELS_PARAMETERIZED_TRUNCATED_NORMAL_OP_H_
 
+#include "tensorflow/core/framework/tensor_types.h"
+#include "tensorflow/core/lib/random/random_distributions.h"
+
 namespace tensorflow {
 
 class OpKernelContext;
 
 namespace functor {
 
+// Sample a truncated normal random variable, with mean, stddev, minval, and
+// maxval parameters for each batch. Uses two rejection sampling algorithms
+// described in http://rd.springer.com/article/10.1007/BF00143942.
+//
+// Either minval may be -infinity, or maxval may be +infinity. If the interval
+// (minval, maxval) is empty, the result is NaN. Large intervals which include
+// both tails may have reduced accuracy.
 template <typename Device, typename T>
-struct TruncatedNormalFunctor;
+struct TruncatedNormalFunctor {
+  void operator()(OpKernelContext* ctx, const Device& d, int64 num_batches,
+                  int64 samples_per_batch, int64 num_elements,
+                  typename TTypes<T>::ConstFlat means,
+                  typename TTypes<T>::ConstFlat stddevs,
+                  typename TTypes<T>::ConstFlat minvals,
+                  typename TTypes<T>::ConstFlat maxvals,
+                  const random::PhiloxRandom& gen,
+                  typename TTypes<T>::Flat output);
+
+  static const int kMaxIterations = 100;
+};
 
 }  // namespace functor
 }  // namespace tensorflow
diff --git a/tensorflow/core/kernels/parameterized_truncated_normal_op_gpu.cu.cc b/tensorflow/core/kernels/parameterized_truncated_normal_op_gpu.cu.cc
new file mode 100644
index 00000000000..42d47440690
--- /dev/null
+++ b/tensorflow/core/kernels/parameterized_truncated_normal_op_gpu.cu.cc
@@ -0,0 +1,214 @@
+/* Copyright 2015 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#if GOOGLE_CUDA
+
+#define EIGEN_USE_GPU
+
+#include "tensorflow/core/kernels/parameterized_truncated_normal_op.h"
+
+#include <assert.h>
+#include <stdio.h>
+#include <cmath>
+
+#include "third_party/eigen3/unsupported/Eigen/CXX11/Tensor"
+#include "tensorflow/core/framework/tensor_types.h"
+#include "tensorflow/core/lib/random/philox_random.h"
+#include "tensorflow/core/lib/random/random_distributions.h"
+#include "tensorflow/core/util/cuda_kernel_helper.h"
+
+#define UNROLL _Pragma("unroll")
+
+namespace tensorflow {
+
+class OpKernelContext;
+
+namespace functor {
+
+typedef Eigen::GpuDevice GPUDevice;
+
+template <typename T>
+__global__ void __launch_bounds__(1024)
+    TruncatedNormalKernel(random::PhiloxRandom gen, T* data, int64 num_batches,
+                          int64 samples_per_batch, int64 num_elements,
+                          const T* means, bool single_mean, const T* stddevs,
+                          bool single_stddev, const T* minvals,
+                          bool single_minval, const T* maxvals,
+                          bool single_maxval, int64 kMaxIterations) {
+  const int32 max_samples_per_item = 2 * kMaxIterations;
+  // Initial offset as given by CUDA_1D_KERNEL_LOOP.
+  const int32 initial_offset = blockIdx.x * blockDim.x + threadIdx.x;
+  gen.Skip(max_samples_per_item * initial_offset);
+  typedef random::UniformDistribution<random::PhiloxRandom, T> Uniform;
+  Uniform dist;
+  const int kDistSize = Uniform::kResultElementCount;
+  const T quietNaN = Eigen::NumTraits<T>::quiet_NaN();
+
+  // We skip the total number of threads to get to the next element. To produce
+  // deterministic results between devices, each element in the output array
+  // skips max_samples_per_item in the generator. Then after generating this
+  // item, we need to skip the samples for one element for every thread to get
+  // to the next element that we actually process.
+  const int32 samples_between_processed_elements =
+      max_samples_per_item * (gridDim.x * blockDim.x);
+
+  CUDA_1D_KERNEL_LOOP(offset, num_elements) {
+    // Track how many more samples we need to skip before we process the next
+    // element.
+    int32 remaining_samples = samples_between_processed_elements;
+
+    const int64 batch_id = offset / samples_per_batch;
+    T mean = means[single_mean ? 0 : batch_id];
+    const T input_stddev = stddevs[single_stddev ? 0 : batch_id];
+    T minval = minvals[single_minval ? 0 : batch_id];
+    T maxval = maxvals[single_maxval ? 0 : batch_id];
+
+    // Flip the distribution if we can make the lower bound positive.
+    T stddev;
+    if (Eigen::numext::isinf(minval) || maxval < mean) {
+      // Reverse all calculations. normMin and normMax will be flipped.
+      // std::swap is a host function (not available in CUDA).
+      T temp = minval;
+      minval = maxval;
+      maxval = temp;
+      stddev = -input_stddev;
+    } else {
+      stddev = input_stddev;
+    }
+
+    // Calculate normalized samples, then scale them.
+    const T normMin = (minval - mean) / stddev;
+    const T normMax = (maxval - mean) / stddev;
+
+    // Determine the method to use.
+    const T sqrtFactor = Eigen::numext::sqrt((normMin * normMin) + T(4));
+    const T cutoff =
+        T(2) *
+        Eigen::numext::exp(T(0.5) + (normMin * (normMin - sqrtFactor)) / T(4)) /
+        (normMin + sqrtFactor);
+    const T diff = normMax - normMin;
+
+    // Validate the normalized min and max, because the originals may have been
+    // flipped already.
+    if (!(input_stddev > T(0) && normMin < normMax &&
+          (Eigen::numext::isfinite(normMin) ||
+           Eigen::numext::isfinite(normMax)))) {
+      data[offset] = quietNaN;
+    } else if (diff < cutoff) {
+      // Sample from a uniform distribution on [normMin, normMax].
+
+      // Vectorized intermediate calculations for uniform rejection sampling.
+      // We always generate at most 4 samples.
+      Eigen::array<T, 4> z;
+      Eigen::array<T, 4> g;
+
+      const T plusFactor = (normMin < T(0)) ? T(0) : normMin * normMin;
+
+      int numIterations = 0;
+      while (numIterations < kMaxIterations) {
+        const auto rand = dist(&gen);
+        remaining_samples -= gen.kResultElementCount;
+        UNROLL for (int i = 0; i < kDistSize; i++) {
+          z[i] = rand[i] * diff + normMin;
+        }
+        UNROLL for (int i = 0; i < kDistSize; i++) {
+          g[i] = (plusFactor - z[i] * z[i]) / 2.0;
+        }
+
+        const auto u = dist(&gen);
+        remaining_samples -= gen.kResultElementCount;
+        UNROLL for (int i = 0; i < kDistSize; i++) {
+          if (u[i] <= Eigen::numext::exp(g[i]) ||
+              numIterations + 1 >= kMaxIterations) {
+            // Accept the sample z.
+            // If we run out of iterations, just use the current uniform
+            // sample. Emperically, the probability of accepting each sample
+            // is at least 50% for typical inputs, so we will always accept
+            // by 100 iterations.
+            // This introduces a slight inaccuracy when at least one bound
+            // is large, minval is negative and maxval is positive.
+            data[offset] = z[i] * stddev + mean;
+            // Break out of the nested loop by updating numIterations.
+            numIterations = kMaxIterations;
+            break;
+          } else {
+            numIterations++;
+          }
+        }
+      }
+    } else {
+      // Sample from an exponential distribution with alpha maximizing
+      // acceptance probability, offset by normMin from the origin.
+      // Accept only if less than normMax.
+      const T alpha =
+          (normMin + Eigen::numext::sqrt((normMin * normMin) + T(4))) / T(2);
+      int numIterations = 0;
+      while (numIterations < kMaxIterations) {
+        auto rand = dist(&gen);
+        remaining_samples -= gen.kResultElementCount;
+        UNROLL for (int i = 0; i < kDistSize; i += 2) {
+          const T z = -Eigen::numext::log(rand[i]) / alpha + normMin;
+          const T x = normMin < alpha ? alpha - z : normMin - alpha;
+          const T g = Eigen::numext::exp(-x * x / 2.0);
+          const T u = rand[i + 1];
+          if ((u <= g && z < normMax) || numIterations + 1 >= kMaxIterations) {
+            data[offset] = z * stddev + mean;
+            // Break out of the nested loop by updating numIterations.
+            numIterations = kMaxIterations;
+            break;
+          } else {
+            numIterations++;
+          }
+        }
+      }
+    }
+
+    gen.Skip(remaining_samples);
+  }
+}
+
+// Partial specialization for GPU
+template <typename T>
+struct TruncatedNormalFunctor<GPUDevice, T> {
+  static const int kMaxIterations = 100;
+
+  void operator()(OpKernelContext* ctx, const GPUDevice& d, int64 num_batches,
+                  int64 samples_per_batch, int64 num_elements,
+                  typename TTypes<T>::ConstFlat means,
+                  typename TTypes<T>::ConstFlat stddevs,
+                  typename TTypes<T>::ConstFlat minvals,
+                  typename TTypes<T>::ConstFlat maxvals,
+                  const random::PhiloxRandom& gen,
+                  typename TTypes<T>::Flat output) {
+    const auto config = GetCudaLaunchConfig(num_elements, d);
+
+    TruncatedNormalKernel<
+        T><<<config.block_count, config.thread_per_block, 0, d.stream()>>>(
+        gen, output.data(), num_batches, samples_per_batch, num_elements,
+        means.data(), means.dimension(0) == 1, stddevs.data(),
+        stddevs.dimension(0) == 1, minvals.data(), minvals.dimension(0) == 1,
+        maxvals.data(), maxvals.dimension(0) == 1, kMaxIterations);
+  };
+};
+
+// Explicit instantiation of the GPU distributions functors
+template struct TruncatedNormalFunctor<GPUDevice, Eigen::half>;
+template struct TruncatedNormalFunctor<GPUDevice, float>;
+template struct TruncatedNormalFunctor<GPUDevice, double>;
+
+}  // namespace functor
+}  // namespace tensorflow
+
+#endif  // GOOGLE_CUDA
diff --git a/tensorflow/core/kernels/parameterized_truncated_normal_op_test.cc b/tensorflow/core/kernels/parameterized_truncated_normal_op_test.cc
index 13d1187f926..07f2f75ca5a 100644
--- a/tensorflow/core/kernels/parameterized_truncated_normal_op_test.cc
+++ b/tensorflow/core/kernels/parameterized_truncated_normal_op_test.cc
@@ -131,5 +131,8 @@ static Graph* PTruncatedNormalOneTail(int num_batches, int samples_per_batch) {
 BM_PTruncatedNormalDev(cpu, 1000, 1000);
 BM_PTruncatedNormalDev_2SD(cpu, 10000, 100);
 BM_PTruncatedNormalDev_OneTail(cpu, 10000, 100);
+BM_PTruncatedNormalDev(gpu, 1000, 1000);
+BM_PTruncatedNormalDev_2SD(gpu, 10000, 100);
+BM_PTruncatedNormalDev_OneTail(gpu, 10000, 100);
 
 }  // namespace tensorflow
diff --git a/tensorflow/core/kernels/scatter_nd_op.cc b/tensorflow/core/kernels/scatter_nd_op.cc
new file mode 100644
index 00000000000..83b38d73381
--- /dev/null
+++ b/tensorflow/core/kernels/scatter_nd_op.cc
@@ -0,0 +1,402 @@
+/* Copyright 2015 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+// See docs in ../ops/state_ops.cc.
+#define EIGEN_USE_THREADS
+
+#include "tensorflow/core/kernels/scatter_nd_op.h"
+#include "tensorflow/core/framework/op_kernel.h"
+#include "tensorflow/core/framework/register_types.h"
+#include "tensorflow/core/framework/tensor.h"
+#include "tensorflow/core/framework/tensor_shape.h"
+#include "tensorflow/core/kernels/bounds_check.h"
+#include "tensorflow/core/kernels/fill_functor.h"
+#include "tensorflow/core/platform/mutex.h"
+#include "tensorflow/core/platform/types.h"
+#include "tensorflow/core/util/util.h"
+
+namespace tensorflow {
+
+typedef Eigen::ThreadPoolDevice CPUDevice;
+typedef Eigen::GpuDevice GPUDevice;
+
+// Check whether updates.shape = indices.shape[0] + params.shape[IXDIM:]
+static bool ValidUpdateShape(const TensorShape& params_shape,
+                             const Tensor& indices, const Tensor& updates) {
+  int64 indices_nd = 1;
+  if (indices.dims() > 1) {
+    indices_nd = indices.dim_size(1);
+  }
+  for (int d = indices_nd; d < params_shape.dims(); d++) {
+    if (updates.dim_size(d - indices_nd + 1) != params_shape.dim_size(d)) {
+      return false;
+    }
+  }
+  return true;
+}
+
+template <typename Index>
+static void PrepareAndValidateInputs(OpKernelContext* c,
+                                     const TensorShape& params_shape,
+                                     const Tensor& indices,
+                                     const Tensor& updates, int64* indices_nd,
+                                     Index* num_updates, Index* slice_size) {
+  const TensorShape& indices_shape(indices.shape());
+  const TensorShape& updates_shape(updates.shape());
+
+  OP_REQUIRES(
+      c, TensorShapeUtils::IsVectorOrHigher(params_shape),
+      errors::InvalidArgument("Output must be at least 1-D, ", "got shape: ",
+                              params_shape.DebugString()));
+
+  OP_REQUIRES(c, params_shape.num_elements() >= 0 ||
+                     (indices.NumElements() == 0 && updates.NumElements() == 0),
+              errors::InvalidArgument(
+                  "Indices and updates specified for empty output", " shape"));
+
+  OP_REQUIRES(c, updates.dim_size(0) == indices.dim_size(0),
+              errors::InvalidArgument(
+                  "The outermost dimension of updates and indices ",
+                  "must match. Got indices.shape ", indices_shape.DebugString(),
+                  ", updates.shape ", updates_shape.DebugString()));
+  OP_REQUIRES(
+      c, ValidUpdateShape(params_shape, indices, updates),
+      errors::InvalidArgument(
+          "Must have updates.shape = indices.shape[0] + params_shape[IXDIM:], ",
+          "got updates.shape ", updates_shape.DebugString(), ", indices.shape ",
+          indices_shape.DebugString(), ", params_shape ",
+          params_shape.DebugString()));
+  // Check that we have enough index space
+  const int64 N_big = indices.NumElements();
+  OP_REQUIRES(c, N_big <= std::numeric_limits<Index>::max(),
+              errors::InvalidArgument(
+                  "indices has too many elements for ",
+                  DataTypeString(DataTypeToEnum<Index>::v()), " indexing: ",
+                  N_big, " > ", std::numeric_limits<Index>::max()));
+  OP_REQUIRES(
+      c, params_shape.dim_size(0) <= std::numeric_limits<Index>::max(),
+      errors::InvalidArgument("params_shape[0] too large for ",
+                              DataTypeString(DataTypeToEnum<Index>::v()),
+                              " indexing: ", params_shape.dim_size(0), " > ",
+                              std::numeric_limits<Index>::max()));
+
+  // Calculate the number of dimensions in indices
+  *indices_nd = 1;
+  if (indices_shape.dims() > 1) {
+    *indices_nd = indices_shape.dim_size(indices_shape.dims() - 1);
+  }
+
+  // Calculate the number of elements that make up each slice of our updated
+  // tensor. This allows us to work with flattened tensors and copy over whole
+  // slices at a time.
+  Index total_nd = params_shape.dims();
+
+  int64 slice_size_big = 1;
+  for (int64 i = *indices_nd; i < total_nd; ++i) {
+    slice_size_big *= params_shape.dim_size(i);
+  }
+
+  OP_REQUIRES(c, slice_size_big <= std::numeric_limits<Index>::max(),
+              errors::InvalidArgument("slice size is too large for indexing: ",
+                                      slice_size_big, " > ",
+                                      std::numeric_limits<Index>::max()));
+
+  *slice_size = static_cast<Index>(slice_size_big);
+
+  const int64 safe_indices_nd = (*indices_nd < 1) ? 1 : *indices_nd;
+  *num_updates = indices_shape.num_elements() / safe_indices_nd;
+}
+
+template <typename Device, typename T, typename Index>
+class ScatterNdOp : public OpKernel {
+ public:
+  explicit ScatterNdOp(OpKernelConstruction* c) : OpKernel(c) {
+    const DataType dt = DataTypeToEnum<T>::v();
+    const DataType index_t = DataTypeToEnum<Index>::v();
+    OP_REQUIRES_OK(c, c->MatchSignature({index_t, dt, index_t}, {dt}));
+  }
+
+  void Compute(OpKernelContext* c) override {
+    const Tensor& indices = c->input(0);
+    const Tensor& updates = c->input(1);
+    const Tensor& shape_input = c->input(2);
+
+    OP_REQUIRES(c, shape_input.dims() == 1,
+                errors::InvalidArgument("Shape must be a vector"));
+    auto vec = shape_input.flat<Index>();
+    TensorShape shape;
+    TensorShapeUtils::MakeShape(vec.data(), vec.size(), &shape);
+
+    int64 indices_nd;
+    Index num_updates;
+    Index slice_size;
+    PrepareAndValidateInputs<Index>(c, shape, indices, updates, &indices_nd,
+                                    &num_updates, &slice_size);
+    if (!c->status().ok()) return;
+
+    Tensor scratch;
+    OP_REQUIRES_OK(c, c->allocate_temp(DT_INT32, TensorShape(), &scratch));
+
+    auto scratch_scalar = scratch.scalar<Index>();
+    auto indices_flat = indices.flat_inner_dims<Index>();
+    auto updates_flat = updates.shaped<T, 2>({num_updates, slice_size});
+
+    Index bad_i = -1;
+    switch (indices_nd) {
+#define PARAMS_CASE(IXDIM)                                                   \
+  case IXDIM: {                                                              \
+    Tensor* out = nullptr;                                                   \
+    OP_REQUIRES_OK(c, c->allocate_output(0, shape, &out));                   \
+    functor::SetZeroFunctor<Device, T> fill;                                 \
+    fill(c->eigen_device<Device>(), out->flat<T>());                         \
+    if (shape.num_elements() > 0) {                                          \
+      auto output_flat = out->flat_outer_dims<T, (IXDIM) + 1>();             \
+      functor::ScatterNdFunctor<Device, T, Index,                            \
+                                scatter_nd_op::UpdateOp::ASSIGN, (IXDIM)>    \
+          functor;                                                           \
+      bad_i = functor(c->eigen_device<Device>(), slice_size, scratch_scalar, \
+                      output_flat, indices_flat, updates_flat, output_flat); \
+    }                                                                        \
+  } break
+      PARAMS_CASE(0);
+      PARAMS_CASE(1);
+      PARAMS_CASE(2);
+      PARAMS_CASE(3);
+      PARAMS_CASE(4);
+      PARAMS_CASE(5);
+#undef PARAMS_CASE
+      default:
+        OP_REQUIRES(c, false,
+                    errors::InvalidArgument(
+                        "Only indices.shape[-1] values between 0 and 5 "
+                        "are currently supported.  Requested rank: ",
+                        indices_nd));
+    }
+    OP_REQUIRES(
+        c, bad_i < 0,
+        errors::InvalidArgument(
+            "Invalid indices: ", SliceDebugString(indices.shape(), bad_i),
+            " = [", str_util::Join(gtl::ArraySlice<Index>(
+                                       &indices_flat(bad_i, 0), indices_nd),
+                                   ", "),
+            "] does not index into ", shape.DebugString()));
+  }
+};
+
+template <typename Device, typename T, typename Index,
+          scatter_nd_op::UpdateOp op>
+class ScatterNdUpdateOp : public OpKernel {
+ public:
+  explicit ScatterNdUpdateOp(OpKernelConstruction* c) : OpKernel(c) {
+    const DataType dt = DataTypeToEnum<T>::v();
+    const DataType dt_ref = DataTypeToEnum<T>::ref();
+    const DataType index_t = DataTypeToEnum<Index>::v();
+    OP_REQUIRES_OK(c, c->MatchSignature({dt_ref, index_t, dt}, {dt_ref}));
+    OP_REQUIRES_OK(c, c->GetAttr("use_locking", &use_exclusive_lock_));
+  }
+
+  void Compute(OpKernelContext* c) override {
+    if (use_exclusive_lock_) {
+      // Hold mutex while we apply updates
+      mutex_lock l(*c->input_ref_mutex(0));
+      DoCompute(c);
+    } else {
+      DoCompute(c);
+    }
+  }
+
+ private:
+  bool use_exclusive_lock_;
+
+  void DoCompute(OpKernelContext* c) {
+    Tensor params = c->mutable_input(0, use_exclusive_lock_);
+    const Tensor& indices = c->input(1);
+    const Tensor& updates = c->input(2);
+    const TensorShape& params_shape(params.shape());
+
+    int64 indices_nd;
+    Index num_updates;
+    Index slice_size;
+
+    OP_REQUIRES(c, params.IsInitialized(),
+                errors::FailedPrecondition("Null ref for params"));
+    PrepareAndValidateInputs<Index>(c, params_shape, indices, updates,
+                                    &indices_nd, &num_updates, &slice_size);
+    if (!c->status().ok()) return;
+
+    Tensor scratch;
+    OP_REQUIRES_OK(c, c->allocate_temp(DT_INT32, TensorShape(), &scratch));
+
+    auto scratch_scalar = scratch.scalar<Index>();
+    auto indices_flat = indices.flat_inner_dims<Index>();
+    auto updates_flat = updates.shaped<T, 2>({num_updates, slice_size});
+
+    Index bad_i = -1;
+    c->forward_ref_input_to_ref_output(0, 0);
+    switch (indices_nd) {
+#define PARAMS_CASE(IXDIM)                                                 \
+  case IXDIM: {                                                            \
+    auto params_flat = params.flat_outer_dims<T, (IXDIM) + 1>();           \
+    functor::ScatterNdFunctor<Device, T, Index, op, IXDIM> functor;        \
+    bad_i = functor(c->eigen_device<Device>(), slice_size, scratch_scalar, \
+                    params_flat, indices_flat, updates_flat, params_flat); \
+  } break
+      PARAMS_CASE(0);
+      PARAMS_CASE(1);
+      PARAMS_CASE(2);
+      PARAMS_CASE(3);
+      PARAMS_CASE(4);
+      PARAMS_CASE(5);
+#undef PARAMS_CASE
+      default:
+        OP_REQUIRES(c, false,
+                    errors::InvalidArgument(
+                        "Only indices.shape[-1] values between 1 and 5 "
+                        "are currently supported.  Requested rank: ",
+                        indices_nd));
+    }
+    OP_REQUIRES(
+        c, bad_i < 0,
+        errors::InvalidArgument(
+            "Invalid indices: ", SliceDebugString(indices.shape(), bad_i),
+            " = [", str_util::Join(gtl::ArraySlice<Index>(
+                                       &indices_flat(bad_i, 0), indices_nd),
+                                   ", "),
+            "] is not in [0, ", params.dim_size(0), ")"));
+  }
+};
+
+#define REGISTER_SCATTER_ND_KERNEL_INDEX(type, index_type, dev, name)  \
+  REGISTER_KERNEL_BUILDER(Name(name)                                   \
+                              .Device(DEVICE_##dev)                    \
+                              .TypeConstraint<type>("T")               \
+                              .TypeConstraint<index_type>("Tindices"), \
+                          ScatterNdOp<dev##Device, type, index_type>)
+
+#define REGISTER_SCATTER_ND_UPDATE_KERNEL_INDEX(type, index_type, dev, name, \
+                                                op)                          \
+  REGISTER_KERNEL_BUILDER(                                                   \
+      Name(name)                                                             \
+          .Device(DEVICE_##dev)                                              \
+          .TypeConstraint<type>("T")                                         \
+          .TypeConstraint<index_type>("Tindices"),                           \
+      ScatterNdUpdateOp<dev##Device, type, index_type, op>)
+
+#define REGISTER_SCATTER_ND_KERNEL(type, dev, name)         \
+  REGISTER_SCATTER_ND_KERNEL_INDEX(type, int32, dev, name); \
+  REGISTER_SCATTER_ND_KERNEL_INDEX(type, int64, dev, name)
+
+#define REGISTER_SCATTER_ND_UPDATE_KERNEL(type, dev, name, op)         \
+  REGISTER_SCATTER_ND_UPDATE_KERNEL_INDEX(type, int32, dev, name, op); \
+  REGISTER_SCATTER_ND_UPDATE_KERNEL_INDEX(type, int64, dev, name, op)
+
+#define REGISTER_SCATTER_ND_ADD_SUB(type, dev)                     \
+  REGISTER_SCATTER_ND_UPDATE_KERNEL(type, dev, "ScatterNdAdd",     \
+                                    scatter_nd_op::UpdateOp::ADD); \
+  REGISTER_SCATTER_ND_UPDATE_KERNEL(type, dev, "ScatterNdSub",     \
+                                    scatter_nd_op::UpdateOp::SUB); \
+  REGISTER_SCATTER_ND_UPDATE_KERNEL(type, dev, "ScatterNdMul",     \
+                                    scatter_nd_op::UpdateOp::MUL); \
+  REGISTER_SCATTER_ND_UPDATE_KERNEL(type, dev, "ScatterNdDiv",     \
+                                    scatter_nd_op::UpdateOp::DIV);
+
+#define REGISTER_SCATTER_ND(type, dev) \
+  REGISTER_SCATTER_ND_KERNEL(type, dev, "ScatterNd");
+
+#define REGISTER_SCATTER_ND_UPDATE(type, dev)                     \
+  REGISTER_SCATTER_ND_UPDATE_KERNEL(type, dev, "ScatterNdUpdate", \
+                                    scatter_nd_op::UpdateOp::ASSIGN);
+
+// Registers CPU kernels.
+#define REGISTER_SCATTER_ND_ADD_SUB_CPU(type) \
+  REGISTER_SCATTER_ND_ADD_SUB(type, CPU);
+
+#define REGISTER_SCATTER_ND_UPDATE_CPU(type) \
+  REGISTER_SCATTER_ND_UPDATE(type, CPU);
+
+#define REGISTER_SCATTER_ND_CPU(type) REGISTER_SCATTER_ND(type, CPU);
+
+TF_CALL_NUMBER_TYPES(REGISTER_SCATTER_ND_ADD_SUB_CPU);
+TF_CALL_ALL_TYPES(REGISTER_SCATTER_ND_UPDATE_CPU);
+TF_CALL_ALL_TYPES(REGISTER_SCATTER_ND_CPU);
+
+// Registers GPU kernels.
+#if GOOGLE_CUDA
+#define REGISTER_SCATTER_ND_ADD_SUB_GPU(type) \
+  REGISTER_SCATTER_ND_ADD_SUB(type, GPU);
+
+#define REGISTER_SCATTER_ND_UPDATE_GPU(type) \
+  REGISTER_SCATTER_ND_UPDATE(type, GPU);
+
+// TODO(simister): Re-enable when GPU support is working.
+// TF_CALL_GPU_NUMBER_TYPES_NO_HALF(REGISTER_SCATTER_ND_ADD_SUB_GPU);
+// TF_CALL_GPU_NUMBER_TYPES_NO_HALF(REGISTER_SCATTER_ND_UPDATE_GPU);
+
+#endif  // GOOGLE_CUDA
+
+#undef REGISTER_SCATTER_ND_ADD
+#undef REGISTER_SCATTER_ND_ADD_SUB
+#undef REGISTER_SCATTER_ND_ADD_SUB_CPU
+#undef REGISTER_SCATTER_ND_ADD_SUB_GPU
+#undef REGISTER_SCATTER_ND_UPDATE
+#undef REGISTER_SCATTER_ND_UPDATE_CPU
+#undef REGISTER_SCATTER_ND_UPDATE_GPU
+#undef REGISTER_SCATTER_ND_KERNEL
+#undef REGISTER_SCATTER_ND_KERNEL_INDEX
+
+#if GOOGLE_CUDA
+// Forward declarations of the functor specializations for GPU.
+namespace functor {
+
+#define DECLARE_GPU_SPECS_OP(T, Index, op, NDIM)                     \
+  template <>                                                        \
+  Index ScatterNdFunctor<GPUDevice, T, Index, op, NDIM>::operator()( \
+      OpKernelContext* c, const GPUDevice& d,                        \
+      typename TTypes<T, IXDIM>::Tensor params,                      \
+      typename TTypes<Index, 2>::ConstTensor indices,                \
+      typename TTypes<T, 2>::ConstTensor updates);                   \
+  extern template struct ScatterNdFunctor<GPUDevice, T, Index, op>;
+
+#define DECLARE_GPU_SPECS_OPS(T, Index, op) \
+  DECLARE_GPU_SPECS_OP(T, Index, op, 0);    \
+  DECLARE_GPU_SPECS_OP(T, Index, op, 1);    \
+  DECLARE_GPU_SPECS_OP(T, Index, op, 2);    \
+  DECLARE_GPU_SPECS_OP(T, Index, op, 3);    \
+  DECLARE_GPU_SPECS_OP(T, Index, op, 4);    \
+  DECLARE_GPU_SPECS_OP(T, Index, op, 5)
+
+#define DECLARE_GPU_SPECS_INDEX(T, Index)                           \
+  DECLARE_GPU_SPECS_OPS(T, Index, scatter_nd_op::UpdateOp::ASSIGN); \
+  DECLARE_GPU_SPECS_OPS(T, Index, scatter_nd_op::UpdateOp::ADD);    \
+  DECLARE_GPU_SPECS_OPS(T, Index, scatter_nd_op::UpdateOp::SUB);    \
+  DECLARE_GPU_SPECS_OPS(T, Index, scatter_nd_op::UpdateOp::MUL);    \
+  DECLARE_GPU_SPECS_OPS(T, Index, scatter_nd_op::UpdateOp::DIV);
+
+#define DECLARE_GPU_SPECS(T)         \
+  DECLARE_GPU_SPECS_INDEX(T, int32); \
+  DECLARE_GPU_SPECS_INDEX(T, int64);
+
+// TODO(simister): Re-enable when GPU support is working.
+// TF_CALL_GPU_NUMBER_TYPES_NO_HALF(DECLARE_GPU_SPECS);
+
+#undef DECLARE_GPU_SPECS
+#undef DECLARE_GPU_SPECS_INDEX
+#undef DECLARE_GPU_SPECS_OPS
+#undef DECLARE_GPU_SPECS_OP
+
+}  // namespace functor
+#endif  // GOOGLE_CUDA
+
+}  // namespace tensorflow
diff --git a/tensorflow/core/kernels/scatter_nd_op.h b/tensorflow/core/kernels/scatter_nd_op.h
new file mode 100644
index 00000000000..51917b5a0de
--- /dev/null
+++ b/tensorflow/core/kernels/scatter_nd_op.h
@@ -0,0 +1,62 @@
+/* Copyright 2015 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#ifndef TENSORFLOW_KERNELS_SCATTER_ND_OP_H_
+#define TENSORFLOW_KERNELS_SCATTER_ND_OP_H_
+
+#include "third_party/eigen3/unsupported/Eigen/CXX11/Tensor"
+
+#include "tensorflow/core/framework/op_kernel.h"
+#include "tensorflow/core/framework/register_types.h"
+#include "tensorflow/core/framework/tensor.h"
+#include "tensorflow/core/framework/tensor_shape.h"
+#include "tensorflow/core/kernels/bounds_check.h"
+#include "tensorflow/core/kernels/fill_functor.h"
+#include "tensorflow/core/kernels/scatter_nd_op.h"
+#include "tensorflow/core/platform/mutex.h"
+#include "tensorflow/core/platform/types.h"
+#include "tensorflow/core/util/util.h"
+
+namespace tensorflow {
+
+typedef Eigen::ThreadPoolDevice CPUDevice;
+
+class OpKernelContext;
+
+namespace scatter_nd_op {
+
+enum class UpdateOp { ASSIGN, ADD, SUB, MUL, DIV };
+
+}  // namespace scatter_nd_op
+
+namespace functor {
+
+// Functor used by ScatterOp to do the computations.
+template <typename Device, typename T, typename Index,
+          scatter_nd_op::UpdateOp op, int IXDIM>
+struct ScatterNdFunctor {
+  // Returns -1 on success or a nonnegative i s.t. indices[i] is a bad index.
+  Index operator()(const Device& d, const Index slice_size,
+                   typename TTypes<Index>::Scalar Tscratch,
+                   typename TTypes<T, IXDIM + 1>::Tensor Tparams,
+                   typename TTypes<Index, 2>::ConstTensor Tindices,
+                   typename TTypes<T, 2>::ConstTensor Tupdates,
+                   typename TTypes<T, IXDIM + 1>::Tensor Toutput);
+};
+
+}  // namespace functor
+}  // namespace tensorflow
+
+#endif  // TENSORFLOW_KERNELS_SCATTER_ND_OP_H_
diff --git a/tensorflow/core/kernels/scatter_nd_op_cpu_impl.h b/tensorflow/core/kernels/scatter_nd_op_cpu_impl.h
new file mode 100644
index 00000000000..d2a7746c35e
--- /dev/null
+++ b/tensorflow/core/kernels/scatter_nd_op_cpu_impl.h
@@ -0,0 +1,224 @@
+/* Copyright 2015 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#ifndef THIRD_PARTY_TENSORFLOW_CORE_KERNELS_SCATTER_ND_OP_CPU_IMPL_H_
+#define THIRD_PARTY_TENSORFLOW_CORE_KERNELS_SCATTER_ND_OP_CPU_IMPL_H_
+
+// Functor definitions for ScatterND ops, must be compilable by nvcc.
+
+#define EIGEN_USE_THREADS
+
+#include <atomic>
+
+#include "third_party/eigen3/unsupported/Eigen/CXX11/Tensor"
+
+#include "tensorflow/core/framework/op_kernel.h"
+#include "tensorflow/core/framework/register_types.h"
+#include "tensorflow/core/framework/tensor.h"
+#include "tensorflow/core/framework/tensor_shape.h"
+#include "tensorflow/core/kernels/bounds_check.h"
+#include "tensorflow/core/kernels/fill_functor.h"
+#include "tensorflow/core/kernels/scatter_nd_op.h"
+#include "tensorflow/core/platform/mutex.h"
+#include "tensorflow/core/platform/types.h"
+#include "tensorflow/core/util/util.h"
+
+namespace tensorflow {
+
+typedef Eigen::ThreadPoolDevice CPUDevice;
+
+class OpKernelContext;
+
+// Specialization of UpdateExecutor to CPU
+namespace generator {
+
+template <typename T, typename Index, scatter_nd_op::UpdateOp op>
+class UpdateExecutor {
+ public:
+  static void Update(T* input, const T* updates, T* output, Index slice_size);
+};
+
+template <typename T, typename Index>
+class UpdateExecutor<T, Index, scatter_nd_op::UpdateOp::ASSIGN> {
+ public:
+  static void Update(T* /* unused */, const T* updates, T* output,
+                     Index slice_size) {
+    std::copy_n(updates, slice_size, output);
+  }
+};
+
+template <typename T, typename Index>
+class UpdateExecutor<T, Index, scatter_nd_op::UpdateOp::ADD> {
+ public:
+  static void Update(T* input, const T* updates, T* output, Index slice_size) {
+    std::transform(input, input + slice_size, updates, output, std::plus<T>());
+  }
+};
+
+template <typename T, typename Index>
+class UpdateExecutor<T, Index, scatter_nd_op::UpdateOp::SUB> {
+ public:
+  static void Update(T* input, const T* updates, T* output, Index slice_size) {
+    std::transform(input, input + slice_size, updates, output, std::minus<T>());
+  }
+};
+
+template <typename T, typename Index>
+class UpdateExecutor<T, Index, scatter_nd_op::UpdateOp::MUL> {
+ public:
+  static void Update(T* input, const T* updates, T* output, Index slice_size) {
+    std::transform(input, input + slice_size, updates, output,
+                   std::multiplies<T>());
+  }
+};
+
+template <typename T, typename Index>
+class UpdateExecutor<T, Index, scatter_nd_op::UpdateOp::DIV> {
+ public:
+  static void Update(T* input, const T* updates, T* output, Index slice_size) {
+    std::transform(input, input + slice_size, updates, output,
+                   std::divides<T>());
+  }
+};
+
+template <typename T, typename Index, scatter_nd_op::UpdateOp op, int IXDIM>
+class ScatterNdSliceGenerator {
+ public:
+  EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE ScatterNdSliceGenerator(
+      const Index slice_size, typename TTypes<T, IXDIM + 1>::Tensor Tparams,
+      typename TTypes<Index, 2>::ConstTensor Tindices,
+      typename TTypes<T, 2>::ConstTensor Tupdates,
+      typename TTypes<T, IXDIM + 1>::Tensor Toutput,
+      std::atomic<Index>* error_loc)
+      : slice_size_(slice_size),
+        Tparams_(Tparams),
+        Tindices_(Tindices),
+        Tupdates_(Tupdates),
+        Toutput_(Toutput),
+        error_loc_(error_loc) {}
+
+  EIGEN_DEVICE_FUNC bool GenerateIndices(
+      const Index loc, Eigen::array<Eigen::DenseIndex, IXDIM + 1>* ix) const {
+    (*ix)[IXDIM] = 0;
+    bool out_of_bounds = false;
+    for (int i = 0; i < IXDIM; ++i) {
+      const Index ix_i = internal::SubtleMustCopy(Tindices_(loc, i));
+      (*ix)[i] = ix_i;
+      out_of_bounds |= !FastBoundsCheck(ix_i, Tparams_.dimension(i));
+    }
+    return out_of_bounds;
+  }
+
+  EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE int32
+  operator()(const Eigen::array<Eigen::DenseIndex, 1>& loc_array) const {
+    auto loc = loc_array[0];
+    Eigen::array<Eigen::DenseIndex, IXDIM + 1> ix_params;
+    Eigen::array<Eigen::DenseIndex, 2> ix_updates;
+    ix_updates[0] = loc;
+    ix_updates[1] = 0;
+    const bool out_of_bounds = GenerateIndices(loc, &ix_params);
+    if (TF_PREDICT_FALSE(out_of_bounds)) {
+      error_loc_->store(loc);
+    } else {
+      UpdateExecutor<T, Index, op>::Update(&Tparams_(ix_params),
+                                           &Tupdates_(ix_updates),
+                                           &Toutput_(ix_params), slice_size_);
+    }
+    return static_cast<int32>(0);  // Return something...
+  }
+
+ protected:
+  const Index slice_size_;
+  mutable typename TTypes<T, IXDIM + 1>::Tensor Tparams_;
+  const typename TTypes<Index, 2>::ConstTensor Tindices_;
+  const typename TTypes<T, 2>::ConstTensor Tupdates_;
+  mutable typename TTypes<T, IXDIM + 1>::Tensor Toutput_;
+  std::atomic<Index>* error_loc_;
+};
+
+}  // namespace generator
+
+namespace functor {
+
+// Implementation of update functor for CPU.
+template <typename T, typename Index, scatter_nd_op::UpdateOp op, int IXDIM>
+struct ScatterNdFunctor<CPUDevice, T, Index, op, IXDIM> {
+  Index operator()(const CPUDevice& d, const Index slice_size,
+                   typename TTypes<Index>::Scalar Tscratch,
+                   typename TTypes<T, IXDIM + 1>::Tensor Tparams,
+                   typename TTypes<Index, 2>::ConstTensor Tindices,
+                   typename TTypes<T, 2>::ConstTensor Tupdates,
+                   typename TTypes<T, IXDIM + 1>::Tensor Toutput) {
+    std::atomic<Index> error_loc(-1);
+
+    const Eigen::DenseIndex batch_size = Tindices.dimension(0);
+#if !defined(EIGEN_HAS_INDEX_LIST)
+    Eigen::Tensor<Eigen::DenseIndex, 1>::Dimensions reshape_dims{{ 1 }};
+    Eigen::array<Eigen::DenseIndex, 1> broadcast_dims{{ batch_size }};
+#else
+    Eigen::IndexList<Eigen::type2index<1> > reshape_dims;
+    Eigen::IndexList<Eigen::DenseIndex> broadcast_dims;
+    broadcast_dims.set(0, batch_size);
+#endif
+
+    generator::ScatterNdSliceGenerator<T, Index, op, IXDIM> generator(
+        slice_size, Tparams, Tindices, Tupdates, Toutput, &error_loc);
+    Tscratch.device(d) = Tscratch.reshape(reshape_dims)
+                             .broadcast(broadcast_dims)
+                             .generate(generator)
+                             .sum();
+
+    // error_loc() returns -1 if there's no out-of-bounds index,
+    // otherwise it returns the location of an OOB index in Tindices.
+    return error_loc.load();
+  }
+};
+
+#define REGISTER_SCATTER_ND_FULL(T, Index, op)                               \
+  template Index                                                             \
+  ScatterNdFunctor<CPUDevice, T, Index, op, CPU_PROVIDED_IXDIM>::operator()( \
+      const CPUDevice& d, const Index slice_size,                            \
+      typename TTypes<Index>::Scalar Tscratch,                               \
+      typename TTypes<T, CPU_PROVIDED_IXDIM + 1>::Tensor Tparams,            \
+      typename TTypes<Index, 2>::ConstTensor Tindices,                       \
+      typename TTypes<T, 2>::ConstTensor Tupdates,                           \
+      typename TTypes<T, CPU_PROVIDED_IXDIM + 1>::Tensor Toutput)
+
+#define REGISTER_SCATTER_ND_INDEX(type, op)  \
+  REGISTER_SCATTER_ND_FULL(type, int32, op); \
+  REGISTER_SCATTER_ND_FULL(type, int64, op)
+
+#define REGISTER_SCATTER_ND_UPDATE(type) \
+  REGISTER_SCATTER_ND_INDEX(type, scatter_nd_op::UpdateOp::ASSIGN);
+
+#define REGISTER_SCATTER_ND_MATH(type)                           \
+  REGISTER_SCATTER_ND_INDEX(type, scatter_nd_op::UpdateOp::ADD); \
+  REGISTER_SCATTER_ND_INDEX(type, scatter_nd_op::UpdateOp::SUB); \
+  REGISTER_SCATTER_ND_INDEX(type, scatter_nd_op::UpdateOp::MUL); \
+  REGISTER_SCATTER_ND_INDEX(type, scatter_nd_op::UpdateOp::DIV);
+
+TF_CALL_ALL_TYPES(REGISTER_SCATTER_ND_UPDATE);
+TF_CALL_NUMBER_TYPES(REGISTER_SCATTER_ND_MATH)
+
+#undef REGISTER_SCATTER_ND_MATH
+#undef REGISTER_SCATTER_ND_UPDATE
+#undef REGISTER_SCATTER_ND_INDEX
+#undef REGISTER_SCATTER_ND_FULL
+
+}  // namespace functor
+
+}  // namespace tensorflow
+
+#endif  // THIRD_PARTY_TENSORFLOW_CORE_KERNELS_SCATTER_ND_OP_CPU_IMPL_H_
diff --git a/tensorflow/core/kernels/scatter_nd_op_cpu_impl_0.cc b/tensorflow/core/kernels/scatter_nd_op_cpu_impl_0.cc
new file mode 100644
index 00000000000..e978c5c348a
--- /dev/null
+++ b/tensorflow/core/kernels/scatter_nd_op_cpu_impl_0.cc
@@ -0,0 +1,18 @@
+/* Copyright 2016 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#define CPU_PROVIDED_IXDIM 0
+#include "tensorflow/core/kernels/scatter_nd_op_cpu_impl.h"
+#undef CPU_PROVIDED_IXDIM
diff --git a/tensorflow/core/kernels/scatter_nd_op_cpu_impl_1.cc b/tensorflow/core/kernels/scatter_nd_op_cpu_impl_1.cc
new file mode 100644
index 00000000000..1c7867a1a2e
--- /dev/null
+++ b/tensorflow/core/kernels/scatter_nd_op_cpu_impl_1.cc
@@ -0,0 +1,18 @@
+/* Copyright 2016 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#define CPU_PROVIDED_IXDIM 1
+#include "tensorflow/core/kernels/scatter_nd_op_cpu_impl.h"
+#undef CPU_PROVIDED_IXDIM
diff --git a/tensorflow/core/kernels/scatter_nd_op_cpu_impl_2.cc b/tensorflow/core/kernels/scatter_nd_op_cpu_impl_2.cc
new file mode 100644
index 00000000000..fe094c5e6b0
--- /dev/null
+++ b/tensorflow/core/kernels/scatter_nd_op_cpu_impl_2.cc
@@ -0,0 +1,18 @@
+/* Copyright 2016 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#define CPU_PROVIDED_IXDIM 2
+#include "tensorflow/core/kernels/scatter_nd_op_cpu_impl.h"
+#undef CPU_PROVIDED_IXDIM
diff --git a/tensorflow/core/kernels/scatter_nd_op_cpu_impl_3.cc b/tensorflow/core/kernels/scatter_nd_op_cpu_impl_3.cc
new file mode 100644
index 00000000000..a8b0e32bda5
--- /dev/null
+++ b/tensorflow/core/kernels/scatter_nd_op_cpu_impl_3.cc
@@ -0,0 +1,18 @@
+/* Copyright 2016 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#define CPU_PROVIDED_IXDIM 3
+#include "tensorflow/core/kernels/scatter_nd_op_cpu_impl.h"
+#undef CPU_PROVIDED_IXDIM
diff --git a/tensorflow/core/kernels/scatter_nd_op_cpu_impl_4.cc b/tensorflow/core/kernels/scatter_nd_op_cpu_impl_4.cc
new file mode 100644
index 00000000000..2cae469fcae
--- /dev/null
+++ b/tensorflow/core/kernels/scatter_nd_op_cpu_impl_4.cc
@@ -0,0 +1,18 @@
+/* Copyright 2016 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#define CPU_PROVIDED_IXDIM 4
+#include "tensorflow/core/kernels/scatter_nd_op_cpu_impl.h"
+#undef CPU_PROVIDED_IXDIM
diff --git a/tensorflow/core/kernels/scatter_nd_op_cpu_impl_5.cc b/tensorflow/core/kernels/scatter_nd_op_cpu_impl_5.cc
new file mode 100644
index 00000000000..c6031fd7818
--- /dev/null
+++ b/tensorflow/core/kernels/scatter_nd_op_cpu_impl_5.cc
@@ -0,0 +1,19 @@
+
+/* Copyright 2016 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#define CPU_PROVIDED_IXDIM 5
+#include "tensorflow/core/kernels/scatter_nd_op_cpu_impl.h"
+#undef CPU_PROVIDED_IXDIM
diff --git a/tensorflow/core/kernels/scatter_nd_op_test.cc b/tensorflow/core/kernels/scatter_nd_op_test.cc
new file mode 100644
index 00000000000..d6743a68674
--- /dev/null
+++ b/tensorflow/core/kernels/scatter_nd_op_test.cc
@@ -0,0 +1,320 @@
+/* Copyright 2015 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#include <functional>
+#include <memory>
+#include <vector>
+
+#include "tensorflow/core/framework/allocator.h"
+#include "tensorflow/core/framework/fake_input.h"
+#include "tensorflow/core/framework/graph.pb.h"
+#include "tensorflow/core/framework/node_def_builder.h"
+#include "tensorflow/core/framework/op_kernel.h"
+#include "tensorflow/core/framework/tensor.h"
+#include "tensorflow/core/framework/types.h"
+#include "tensorflow/core/framework/types.pb.h"
+#include "tensorflow/core/kernels/ops_testutil.h"
+#include "tensorflow/core/kernels/ops_util.h"
+#include "tensorflow/core/lib/core/status_test_util.h"
+#include "tensorflow/core/lib/random/simple_philox.h"
+#include "tensorflow/core/platform/logging.h"
+#include "tensorflow/core/platform/test.h"
+#include "tensorflow/core/platform/test_benchmark.h"
+
+namespace tensorflow {
+namespace {
+
+class ScatterNdUpdateOpTest : public OpsTestBase {
+ protected:
+  void MakeOp(DataType variable_ref_type, DataType index_type) {
+    TF_ASSERT_OK(NodeDefBuilder("myop", "ScatterNdUpdate")
+                     .Input(FakeInput(variable_ref_type))
+                     .Input(FakeInput(index_type))
+                     .Input(FakeInput(RemoveRefType(variable_ref_type)))
+                     .Finalize(node_def()));
+    TF_ASSERT_OK(InitOp());
+  }
+};
+
+TEST_F(ScatterNdUpdateOpTest, Simple_StringType) {
+  MakeOp(DT_STRING_REF, DT_INT32);
+  AddInputFromArray<string>(TensorShape({1}), {"Brain"});
+  AddInputFromArray<int32>(TensorShape({1}), {0});
+  AddInputFromArray<string>(TensorShape({1}), {"TensorFlow"});
+  TF_ASSERT_OK(RunOpKernel());
+  // Check the new state of the input
+  Tensor params_tensor = *mutable_input(0).tensor;
+  Tensor expected(allocator(), DT_STRING, TensorShape({1}));
+  test::FillValues<string>(&expected, {"TensorFlow"});
+  test::ExpectTensorEqual<string>(expected, params_tensor);
+}
+
+TEST_F(ScatterNdUpdateOpTest, Simple_BoolType) {
+  MakeOp(DT_BOOL_REF, DT_INT32);
+  AddInputFromArray<bool>(TensorShape({1}), {false});
+  AddInputFromArray<int32>(TensorShape({1}), {0});
+  AddInputFromArray<bool>(TensorShape({1}), {true});
+  TF_ASSERT_OK(RunOpKernel());
+  // Check the new state of the input
+  Tensor params_tensor = *mutable_input(0).tensor;
+  Tensor expected(allocator(), DT_BOOL, TensorShape({1}));
+  test::FillValues<bool>(&expected, {true});
+  test::ExpectTensorEqual<bool>(expected, params_tensor);
+}
+
+TEST_F(ScatterNdUpdateOpTest, Simple_TwoD32) {
+  MakeOp(DT_FLOAT_REF, DT_INT32);
+
+  // Feed and run
+  AddInputFromArray<float>(TensorShape({5, 3}),
+                           {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0});
+  AddInputFromArray<int32>(TensorShape({3, 1}), {0, 4, 2});
+  AddInputFromArray<float>(TensorShape({3, 3}),
+                           {100, 101, 102, 777, 778, 779, 10000, 10001, 10002});
+  TF_ASSERT_OK(RunOpKernel());
+
+  // Check the new state of the input
+  Tensor params_tensor = *mutable_input(0).tensor;
+  Tensor expected(allocator(), DT_FLOAT, TensorShape({5, 3}));
+  test::FillValues<float>(&expected, {100, 101, 102, 0, 0, 0, 10000, 10001,
+                                      10002, 0, 0, 0, 777, 778, 779});
+  test::ExpectTensorEqual<float>(expected, params_tensor);
+}
+
+TEST_F(ScatterNdUpdateOpTest, Simple_Two64) {
+  MakeOp(DT_FLOAT_REF, DT_INT64);
+
+  // Feed and run
+  AddInputFromArray<float>(TensorShape({5, 3}),
+                           {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0});
+  AddInputFromArray<int64>(TensorShape({3, 1}), {0, 4, 2});
+  AddInputFromArray<float>(TensorShape({3, 3}),
+                           {100, 101, 102, 777, 778, 779, 10000, 10001, 10002});
+  TF_ASSERT_OK(RunOpKernel());
+
+  // Check the new state of the input
+  Tensor params_tensor = *mutable_input(0).tensor;
+  Tensor expected(allocator(), DT_FLOAT, TensorShape({5, 3}));
+  test::FillValues<float>(&expected, {100, 101, 102, 0, 0, 0, 10000, 10001,
+                                      10002, 0, 0, 0, 777, 778, 779});
+  test::ExpectTensorEqual<float>(expected, params_tensor);
+}
+/*TEST_F(ScatterNdUpdateOpTest, Simple_ZeroElements) {
+  MakeOp(DT_FLOAT_REF, DT_INT32);
+
+  // Feed and run
+  AddInputFromArray<float>(TensorShape({0}), {});
+  AddInputFromArray<int32>(TensorShape({0}), {});
+  AddInputFromArray<float>(TensorShape({0}), {});
+  Status s = RunOpKernel();
+  EXPECT_TRUE(StringPiece(s.ToString())
+                  .contains("Output must not have 0 elements, got shape: "))
+      << s;
+}*/
+
+TEST_F(ScatterNdUpdateOpTest, Simple_ZeroD) {
+  MakeOp(DT_FLOAT_REF, DT_INT32);
+
+  // Feed and run
+  AddInputFromArray<float>(TensorShape({5}), {0, 0, 0, 0, 0});
+  AddInputFromArray<int32>(TensorShape({1}), {3});
+  AddInputFromArray<float>(TensorShape({1}), {101});
+  TF_ASSERT_OK(RunOpKernel());
+
+  // Check the new state of the input
+  Tensor params_tensor = *mutable_input(0).tensor;
+  Tensor expected(allocator(), DT_FLOAT, TensorShape({5}));
+  test::FillValues<float>(&expected, {0, 0, 0, 101, 0});
+  test::ExpectTensorEqual<float>(expected, params_tensor);
+}
+
+TEST_F(ScatterNdUpdateOpTest, Simple_OneD) {
+  MakeOp(DT_FLOAT_REF, DT_INT32);
+
+  // Feed and run
+  AddInputFromArray<float>(TensorShape({5}), {0, 0, 0, 0, 0});
+  AddInputFromArray<int32>(TensorShape({3, 1}), {0, 4, 2});
+  AddInputFromArray<float>(TensorShape({3}), {100, 101, 102});
+  TF_ASSERT_OK(RunOpKernel());
+
+  // Check the new state of the input
+  Tensor params_tensor = *mutable_input(0).tensor;
+  Tensor expected(allocator(), DT_FLOAT, TensorShape({5}));
+  test::FillValues<float>(&expected, {100, 0, 102, 0, 101});
+  test::ExpectTensorEqual<float>(expected, params_tensor);
+}
+
+TEST_F(ScatterNdUpdateOpTest, HigherRank) {
+  MakeOp(DT_FLOAT_REF, DT_INT32);
+
+  // Feed and run
+  AddInputFromArray<float>(TensorShape({8}), {0, 0, 0, 0, 0, 0, 0, 0});
+  AddInputFromArray<int32>(TensorShape({2, 3, 1}), {0, 4, 2, 1, 3, 6});
+  AddInputFromArray<float>(TensorShape({2, 3}), {10, 20, 30, 40, 50, 60});
+  TF_ASSERT_OK(RunOpKernel());
+
+  // Check the new state of the input
+  Tensor params_tensor = *mutable_input(0).tensor;
+  Tensor expected(allocator(), DT_FLOAT, TensorShape({8}));
+  test::FillValues<float>(&expected, {10, 40, 30, 50, 20, 0, 60, 0});
+  test::ExpectTensorEqual<float>(expected, params_tensor);
+}
+
+TEST_F(ScatterNdUpdateOpTest, Error_IndexOutOfRange) {
+  MakeOp(DT_FLOAT_REF, DT_INT32);
+
+  // Feed and run
+  AddInputFromArray<float>(TensorShape({5, 3}),
+                           {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0});
+  AddInputFromArray<int32>(TensorShape({3, 1}), {0, 4, 99});
+  AddInputFromArray<float>(TensorShape({3, 3}),
+                           {100, 101, 102, 777, 778, 779, 10000, 10001, 10002});
+  Status s = RunOpKernel();
+  EXPECT_TRUE(StringPiece(s.ToString())
+                  .contains("Invalid indices: [2,0] = [99] is not in [0, 5)"))
+      << s;
+}
+
+TEST_F(ScatterNdUpdateOpTest, Error_WrongDimsIndices) {
+  MakeOp(DT_FLOAT_REF, DT_INT32);
+
+  // Feed and run
+  AddInputFromArray<float>(TensorShape({2, 3}), {0, 0, 0, 0, 0, 0});
+  AddInputFromArray<int32>(TensorShape({1, 3, 1}), {0, 4, 99});
+  AddInputFromArray<float>(TensorShape({3, 3}),
+                           {100, 101, 102, 777, 778, 779, 10000, 10001, 10002});
+  Status s = RunOpKernel();
+  EXPECT_TRUE(StringPiece(s.ToString())
+                  .contains("The outermost dimension of updates and indices "
+                            "must match. Got indices.shape [1,3,1], "
+                            "updates.shape [3,3]"))
+      << s;
+}
+
+TEST_F(ScatterNdUpdateOpTest, Error_MismatchedParamsAndUpdateDimensions) {
+  MakeOp(DT_FLOAT_REF, DT_INT32);
+
+  // Feed and run
+  AddInputFromArray<float>(TensorShape({5, 3}),
+                           {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0});
+  AddInputFromArray<int32>(TensorShape({3, 1}), {0, 4, 2});
+  AddInputFromArray<float>(
+      TensorShape({3, 4}),
+      {100, 101, 102, 103, 777, 778, 779, 780, 10000, 10001, 10002, 10004});
+  Status s = RunOpKernel();
+  EXPECT_TRUE(StringPiece(s.ToString())
+                  .contains("Must have updates.shape = indices.shape[0] + "
+                            "params_shape[IXDIM:], got"))
+
+      << s;
+}
+
+TEST_F(ScatterNdUpdateOpTest, Error_MismatchedIndicesAndUpdateDimensions) {
+  MakeOp(DT_FLOAT_REF, DT_INT32);
+
+  // Feed and run
+  AddInputFromArray<float>(TensorShape({5, 3}),
+                           {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0});
+  AddInputFromArray<int32>(TensorShape({3, 1}), {0, 4, 2});
+  AddInputFromArray<float>(TensorShape({2, 3}),
+                           {100, 101, 102, 10000, 10001, 10002});
+  Status s = RunOpKernel();
+  EXPECT_TRUE(StringPiece(s.ToString())
+                  .contains("The outermost dimension of updates and indices "
+                            "must match. Got "))
+      << s;
+}
+
+class ScatterNdUpdateBM : public ScatterNdUpdateOpTest {
+ public:
+  virtual void TestBody() {}
+  void MakeBenchmarkOp(const char* op, DataType index_type) {
+    TF_ASSERT_OK(NodeDefBuilder("myop", op)
+                     .Input(FakeInput(DT_FLOAT_REF))
+                     .Input(FakeInput(index_type))
+                     .Input(FakeInput(DT_FLOAT))
+                     .Finalize(node_def()));
+    TF_CHECK_OK(InitOp());
+  }
+};
+
+template <typename Index>
+static void BM_ScatterNdHelper(int iters, int embedding_size, const char* op) {
+  testing::StopTiming();
+  const int kRows = 10000000 / embedding_size;
+  std::vector<float> values;
+  values.reserve(kRows);
+  for (int i = 0; i < kRows * embedding_size; i++) {
+    values.push_back(i);
+  }
+  const int kNumUpdates = 1000;
+  random::PhiloxRandom philox(301, 17);
+  random::SimplePhilox rnd(&philox);
+  std::vector<Index> indices;
+  std::vector<float> updates;
+  for (int i = 0; i < kNumUpdates; i++) {
+    indices.push_back(rnd.Uniform(kRows));
+    for (int j = 0; j < embedding_size; j++) {
+      updates.push_back(i * 10 + j);
+    }
+  }
+
+  ScatterNdUpdateBM bm;
+  bm.MakeBenchmarkOp(op, DataTypeToEnum<Index>::v());
+  bm.AddInputFromArray<float>(TensorShape({kRows, embedding_size}), values);
+  bm.AddInputFromArray<Index>(TensorShape({kNumUpdates}), indices);
+  bm.AddInputFromArray<float>(TensorShape({kNumUpdates, embedding_size}),
+                              updates);
+  testing::ItemsProcessed((static_cast<int64>(kNumUpdates) * embedding_size) *
+                          iters);
+  testing::StartTiming();
+  while (iters-- > 0) {
+    Status s = bm.RunOpKernel();
+  }
+  testing::StopTiming();
+}
+
+static void BM_ScatterNdUpdateInt32(int iters, int embedding_size) {
+  BM_ScatterNdHelper<int32>(iters, embedding_size, "ScatterNdUpdate");
+}
+static void BM_ScatterNdUpdateInt64(int iters, int embedding_size) {
+  BM_ScatterNdHelper<int64>(iters, embedding_size, "ScatterNdUpdate");
+}
+
+static void BM_ScatterNdAddInt32(int iters, int embedding_size) {
+  BM_ScatterNdHelper<int32>(iters, embedding_size, "ScatterNdAdd");
+}
+static void BM_ScatterNdAddInt64(int iters, int embedding_size) {
+  BM_ScatterNdHelper<int64>(iters, embedding_size, "ScatterNdAdd");
+}
+
+BENCHMARK(BM_ScatterNdUpdateInt32)
+    ->Arg(1)
+    ->Arg(10)
+    ->Arg(64)
+    ->Arg(256)
+    ->Arg(1024);
+BENCHMARK(BM_ScatterNdUpdateInt64)
+    ->Arg(1)
+    ->Arg(10)
+    ->Arg(64)
+    ->Arg(256)
+    ->Arg(1024);
+
+BENCHMARK(BM_ScatterNdAddInt32)->Arg(1)->Arg(10)->Arg(64)->Arg(256)->Arg(1024);
+BENCHMARK(BM_ScatterNdAddInt64)->Arg(1)->Arg(10)->Arg(64)->Arg(256)->Arg(1024);
+
+}  // namespace
+}  // namespace tensorflow
diff --git a/tensorflow/core/lib/core/threadpool.cc b/tensorflow/core/lib/core/threadpool.cc
index c3704da0b12..534ef902fb9 100644
--- a/tensorflow/core/lib/core/threadpool.cc
+++ b/tensorflow/core/lib/core/threadpool.cc
@@ -88,16 +88,12 @@ struct ThreadPool::Impl : Eigen::ThreadPoolTempl<EigenEnvironment> {
 
   void ParallelFor(int64 total, int64 cost_per_unit,
                    std::function<void(int64, int64)> fn) {
-#ifdef EIGEN_USE_NONBLOCKING_THREAD_POOL
     CHECK_GE(total, 0);
     CHECK_EQ(total, (int64)(Eigen::Index)total);
     Eigen::ThreadPoolDevice device(this, this->NumThreads());
     device.parallelFor(
         total, Eigen::TensorOpCost(0, 0, cost_per_unit),
         [&fn](Eigen::Index first, Eigen::Index last) { fn(first, last); });
-#else
-    CHECK(0);  // should not be used with the old thread pool
-#endif
   }
 };
 
diff --git a/tensorflow/core/lib/core/threadpool_test.cc b/tensorflow/core/lib/core/threadpool_test.cc
index cf8926b54d1..c7d8db51364 100644
--- a/tensorflow/core/lib/core/threadpool_test.cc
+++ b/tensorflow/core/lib/core/threadpool_test.cc
@@ -57,7 +57,6 @@ TEST(ThreadPool, DoWork) {
   }
 }
 
-#ifdef EIGEN_USE_NONBLOCKING_THREAD_POOL
 TEST(ThreadPool, ParallelFor) {
   // Make ParallelFor use as many threads as possible.
   int64 kHugeCost = 1 << 30;
@@ -80,7 +79,6 @@ TEST(ThreadPool, ParallelFor) {
     }
   }
 }
-#endif
 
 static void BM_Sequential(int iters) {
   ThreadPool pool(Env::Default(), "test", kNumThreads);
diff --git a/tensorflow/core/lib/io/path.cc b/tensorflow/core/lib/io/path.cc
index de49d07d62b..31397722fe6 100644
--- a/tensorflow/core/lib/io/path.cc
+++ b/tensorflow/core/lib/io/path.cc
@@ -14,6 +14,7 @@ limitations under the License.
 ==============================================================================*/
 
 #include "tensorflow/core/lib/io/path.h"
+#include "tensorflow/core/lib/strings/scanner.h"
 #include "tensorflow/core/lib/strings/strcat.h"
 
 namespace tensorflow {
@@ -49,11 +50,14 @@ string JoinPathImpl(std::initializer_list<StringPiece> paths) {
   return result;
 }
 
-// Return the parts of the path, split on the final "/".  If there is no
-// "/" in the path, the first part of the output is empty and the second
-// is the input. If the only "/" in the path is the first character, it is
-// the first part of the output.
-std::pair<StringPiece, StringPiece> SplitPath(StringPiece path) {
+// Return the parts of the URI, split on the final "/" in the path. If there is
+// no "/" in the path, the first part of the output is the scheme and host, and
+// the second is the path. If the only "/" in the path is the first character,
+// it is included in the first part of the output.
+std::pair<StringPiece, StringPiece> SplitPath(StringPiece uri) {
+  StringPiece scheme, host, path;
+  ParseURI(uri, &scheme, &host, &path);
+
   auto pos = path.rfind('/');
 #ifdef PLATFORM_WINDOWS
   if (pos == StringPiece::npos)
@@ -61,15 +65,17 @@ std::pair<StringPiece, StringPiece> SplitPath(StringPiece path) {
 #endif
   // Handle the case with no '/' in 'path'.
   if (pos == StringPiece::npos)
-    return std::make_pair(StringPiece(path.data(), 0), path);
+    return std::make_pair(StringPiece(uri.begin(), host.end() - uri.begin()),
+                          path);
 
   // Handle the case with a single leading '/' in 'path'.
   if (pos == 0)
-    return std::make_pair(StringPiece(path.data(), 1),
-                          StringPiece(path.data() + 1, path.size() - 1));
+    return std::make_pair(
+        StringPiece(uri.begin(), path.begin() + 1 - uri.begin()),
+        StringPiece(path.data() + 1, path.size() - 1));
 
   return std::make_pair(
-      StringPiece(path.data(), pos),
+      StringPiece(uri.begin(), path.begin() + pos - uri.begin()),
       StringPiece(path.data() + pos + 1, path.size() - (pos + 1)));
 }
 
@@ -185,5 +191,42 @@ string CleanPath(StringPiece unclean_path) {
   return path;
 }
 
+void ParseURI(StringPiece remaining, StringPiece* scheme, StringPiece* host,
+              StringPiece* path) {
+  // 0. Parse scheme
+  // Make sure scheme matches [a-zA-Z][0-9a-zA-Z.]*
+  // TODO(keveman): Allow "+" and "-" in the scheme.
+  if (!strings::Scanner(remaining)
+           .One(strings::Scanner::LETTER)
+           .Many(strings::Scanner::LETTER_DIGIT_DOT)
+           .StopCapture()
+           .OneLiteral("://")
+           .GetResult(&remaining, scheme)) {
+    // If there's no scheme, assume the entire string is a path.
+    *scheme = StringPiece(remaining.begin(), 0);
+    *host = StringPiece(remaining.begin(), 0);
+    *path = remaining;
+    return;
+  }
+
+  // 1. Parse host
+  if (!strings::Scanner(remaining).ScanUntil('/').GetResult(&remaining, host)) {
+    // No path, so the rest of the URI is the host.
+    *host = remaining;
+    *path = StringPiece(remaining.end(), 0);
+    return;
+  }
+
+  // 2. The rest is the path
+  *path = remaining;
+}
+
+string CreateURI(StringPiece scheme, StringPiece host, StringPiece path) {
+  if (scheme.empty()) {
+    return path.ToString();
+  }
+  return strings::StrCat(scheme, "://", host, path);
+}
+
 }  // namespace io
 }  // namespace tensorflow
diff --git a/tensorflow/core/lib/io/path.h b/tensorflow/core/lib/io/path.h
index 64165f857fe..955098f5b5e 100644
--- a/tensorflow/core/lib/io/path.h
+++ b/tensorflow/core/lib/io/path.h
@@ -74,6 +74,21 @@ StringPiece Extension(StringPiece path);
 // string manipulation, completely independent of process state.
 string CleanPath(StringPiece path);
 
+// Populates the scheme, host, and path from a URI. scheme, host, and path are
+// guaranteed by this function to point into the contents of uri, even if
+// empty.
+//
+// Corner cases:
+// - If the URI is invalid, scheme and host are set to empty strings and the
+//   passed string is assumed to be a path
+// - If the URI omits the path (e.g. file://host), then the path is left empty.
+void ParseURI(StringPiece uri, StringPiece* scheme, StringPiece* host,
+              StringPiece* path);
+
+// Creates a URI from a scheme, host, and path. If the scheme is empty, we just
+// return the path.
+string CreateURI(StringPiece scheme, StringPiece host, StringPiece path);
+
 }  // namespace io
 }  // namespace tensorflow
 
diff --git a/tensorflow/core/lib/io/path_test.cc b/tensorflow/core/lib/io/path_test.cc
index f3f3d245d5d..e3275b93b68 100644
--- a/tensorflow/core/lib/io/path_test.cc
+++ b/tensorflow/core/lib/io/path_test.cc
@@ -45,6 +45,8 @@ TEST(PathTest, IsAbsolutePath) {
 }
 
 TEST(PathTest, Dirname) {
+  EXPECT_EQ("hdfs://127.0.0.1:9000/",
+            Dirname("hdfs://127.0.0.1:9000/train.csv.tfrecords"));
   EXPECT_EQ("/hello", Dirname("/hello/"));
   EXPECT_EQ("/", Dirname("/hello"));
   EXPECT_EQ("hello", Dirname("hello/world"));
@@ -97,5 +99,47 @@ TEST(PathTest, CleanPath) {
   EXPECT_EQ("../../bar", CleanPath("foo/../../../bar"));
 }
 
+#define EXPECT_PARSE_URI(uri, scheme, host, path)  \
+  do {                                             \
+    StringPiece u(uri);                            \
+    StringPiece s, h, p;                           \
+    ParseURI(u, &s, &h, &p);                       \
+    EXPECT_EQ(scheme, s.ToString());               \
+    EXPECT_EQ(host, h.ToString());                 \
+    EXPECT_EQ(path, p.ToString());                 \
+    EXPECT_EQ(uri, CreateURI(scheme, host, path)); \
+    EXPECT_LE(u.begin(), s.begin());               \
+    EXPECT_GE(u.end(), s.begin());                 \
+    EXPECT_LE(u.begin(), s.end());                 \
+    EXPECT_GE(u.end(), s.end());                   \
+    EXPECT_LE(u.begin(), h.begin());               \
+    EXPECT_GE(u.end(), h.begin());                 \
+    EXPECT_LE(u.begin(), h.end());                 \
+    EXPECT_GE(u.end(), h.end());                   \
+    EXPECT_LE(u.begin(), p.begin());               \
+    EXPECT_GE(u.end(), p.begin());                 \
+    EXPECT_LE(u.begin(), p.end());                 \
+    EXPECT_GE(u.end(), p.end());                   \
+  } while (0)
+
+TEST(PathTest, CreateParseURI) {
+  EXPECT_PARSE_URI("http://foo", "http", "foo", "");
+  EXPECT_PARSE_URI("/encrypted/://foo", "", "", "/encrypted/://foo");
+  EXPECT_PARSE_URI("/usr/local/foo", "", "", "/usr/local/foo");
+  EXPECT_PARSE_URI("file:///usr/local/foo", "file", "", "/usr/local/foo");
+  EXPECT_PARSE_URI("local.file:///usr/local/foo", "local.file", "",
+                   "/usr/local/foo");
+  EXPECT_PARSE_URI("a-b:///foo", "", "", "a-b:///foo");
+  EXPECT_PARSE_URI(":///foo", "", "", ":///foo");
+  EXPECT_PARSE_URI("9dfd:///foo", "", "", "9dfd:///foo");
+  EXPECT_PARSE_URI("file:", "", "", "file:");
+  EXPECT_PARSE_URI("file:/", "", "", "file:/");
+  EXPECT_PARSE_URI("hdfs://localhost:8020/path/to/file", "hdfs",
+                   "localhost:8020", "/path/to/file");
+  EXPECT_PARSE_URI("hdfs://localhost:8020", "hdfs", "localhost:8020", "");
+  EXPECT_PARSE_URI("hdfs://localhost:8020/", "hdfs", "localhost:8020", "/");
+}
+#undef EXPECT_PARSE_URI
+
 }  // namespace io
 }  // namespace tensorflow
diff --git a/tensorflow/core/ops/array_ops.cc b/tensorflow/core/ops/array_ops.cc
index 0d3e6fa94ad..ce1f76503c8 100644
--- a/tensorflow/core/ops/array_ops.cc
+++ b/tensorflow/core/ops/array_ops.cc
@@ -4387,6 +4387,83 @@ output_min: This value is copied from input_min.
 output_max: This value is copied from input_max.
 )Doc");
 
+REGISTER_OP("ScatterNd")
+    .Input("indices: Tindices")
+    .Input("updates: T")
+    .Input("shape: Tindices")
+    .Output("output: T")
+    .Attr("T: type")
+    .Attr("Tindices: {int32, int64}")
+    .Doc(
+        R"doc(Creates a new tensor by applying sparse `updates` to individual values or slices within a zero tensor of the given `shape` tensor according to indices.
+This operator is the inverse of the [tf.gather_nd](#gather_nd) operator which extracts values or slices from a given tensor.
+
+TODO(simister): Add a link to Variable.__getitem__ documentation on slice syntax.
+
+`shape` is a `TensorShape` with rank `P` and `indices` is a `Tensor` of rank `Q`.
+
+`indices` must be integer tensor, containing indices into `shape`.
+It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.
+
+The innermost dimension of `indices` (with length `K`) corresponds to
+indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th
+dimension of `shape`.
+
+`updates` is Tensor of rank `Q-1+P-K` with shape:
+
+```
+[d_0, ..., d_{Q-2}, shape[K], ..., shape[P-1]].
+```
+
+The simplest form of scatter is to insert individual elements in a tensor by index. For example, say we want to insert 4 scattered elements in a rank-1 tensor with 8 elements.
+
+<div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
+<img style="width:100%" src="../../images/ScatterNd1.png" alt>
+</div>
+
+In Python, this scatter operation would look like this:
+
+    indices = tf.constant([[4], [3], [1], [7]])
+    updates = tf.constant([9, 10, 11, 12])
+    shape = tf.constant([8])
+    scatter = tf.scatter_nd(indices, updates, shape)
+    with tf.Session() as sess:
+      print sess.run(scatter)
+
+The resulting tensor would look like this:
+
+    [0, 11, 0, 10, 9, 0, 0, 12]
+
+We can also, insert entire slices of a higher rank tensor all at once. For example, if we wanted to insert two slices in the first dimension of a rank-3 tensor with two matrices of new values.
+
+<div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
+<img style="width:100%" src="../../images/ScatterNd2.png" alt>
+</div>
+
+In Python, this scatter operation would look like this:
+
+    indices = tf.constant([[0], [2]])
+    updates = tf.constant([[[5, 5, 5, 5], [6, 6, 6, 6],
+                            [7, 7, 7, 7], [8, 8, 8, 8]],
+                           [[5, 5, 5, 5], [6, 6, 6, 6],
+                            [7, 7, 7, 7], [8, 8, 8, 8]]])
+    shape = tf.constant([4, 4, 4])
+    scatter = tf.scatter_nd(indices, updates, shape)
+    with tf.Session() as sess:
+      print sess.run(scatter)
+
+The resulting tensor would look like this:
+
+    [[[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]],
+     [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]],
+     [[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]],
+     [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]]
+
+indices: A Tensor. Must be one of the following types: int32, int64. A tensor of indices into ref.
+updates: A Tensor. Must have the same type as tensor. A tensor of updated values to store in ref.
+shape: A vector. The shape of the resulting tensor.
+output: A new tensor with the given shape and updates applied according to the indices.)doc");
+
 REGISTER_OP("FakeQuantWithMinMaxArgs")
     .Attr("min: float = -6.0")
     .Attr("max: float = 6.0")
@@ -4409,6 +4486,7 @@ REGISTER_OP("FakeQuantWithMinMaxArgsGradient")
     .Input("gradients: float")
     .Input("inputs: float")
     .Output("backprops: float")
+    .SetShapeFn(shape_inference::UnchangedShape)
     .Doc(R"doc(
 Compute gradients for a FakeQuantWithMinMaxArgs operation.
 
@@ -4450,6 +4528,21 @@ REGISTER_OP("FakeQuantWithMinMaxVarsGradient")
     .Output("backprops_wrt_input: float")
     .Output("backprop_wrt_min: float")
     .Output("backprop_wrt_max: float")
+    .SetShapeFn([](InferenceContext* c) {
+      // gradients and inputs are same size.
+      ShapeHandle inputs;
+      TF_RETURN_IF_ERROR(c->Merge(c->input(0), c->input(1), &inputs));
+
+      // min and max are scalars
+      ShapeHandle min_max;
+      TF_RETURN_IF_ERROR(c->WithRank(c->input(2), 0, &min_max));
+      TF_RETURN_IF_ERROR(c->Merge(min_max, c->input(3), &min_max));
+
+      c->set_output(0, inputs);
+      c->set_output(1, min_max);
+      c->set_output(2, min_max);
+      return Status::OK();
+    })
     .Doc(R"doc(
 Compute gradients for a FakeQuantWithMinMaxVars operation.
 
@@ -4503,6 +4596,24 @@ REGISTER_OP("FakeQuantWithMinMaxVarsPerChannelGradient")
     .Output("backprops_wrt_input: float")
     .Output("backprop_wrt_min: float")
     .Output("backprop_wrt_max: float")
+    .SetShapeFn([](InferenceContext* c) {
+      ShapeHandle inputs;
+      TF_RETURN_IF_ERROR(c->WithRankAtLeast(c->input(0), 1, &inputs));
+      TF_RETURN_IF_ERROR(c->WithRankAtMost(inputs, 4, &inputs));
+      TF_RETURN_IF_ERROR(c->Merge(inputs, c->input(1), &inputs));
+
+      ShapeHandle last_dim = c->Vector(c->Dim(inputs, -1));
+
+      ShapeHandle min_max;
+      TF_RETURN_IF_ERROR(c->WithRank(c->input(2), 1, &min_max));
+      TF_RETURN_IF_ERROR(c->Merge(min_max, last_dim, &min_max));
+      TF_RETURN_IF_ERROR(c->Merge(c->input(3), min_max, &min_max));
+
+      c->set_output(0, inputs);
+      c->set_output(1, min_max);
+      c->set_output(2, min_max);
+      return Status::OK();
+    })
     .Doc(R"doc(
 Compute gradients for a FakeQuantWithMinMaxVarsPerChannel operation.
 
diff --git a/tensorflow/core/ops/array_ops_test.cc b/tensorflow/core/ops/array_ops_test.cc
index 4e10d72816e..691380fd265 100644
--- a/tensorflow/core/ops/array_ops_test.cc
+++ b/tensorflow/core/ops/array_ops_test.cc
@@ -1533,4 +1533,23 @@ TEST(ArrayOpsTest, FakeQuantWithMinMaxVarsPerChannel) {
   INFER_ERROR("must be equal", op, "[5];[4];[?]");
 }
 
+TEST(ArrayOpsTest, FakeQuantWithMinMaxVarsPerChannelGradient) {
+  ShapeInferenceTestOp op("FakeQuantWithMinMaxVarsPerChannelGradient");
+
+  INFER_OK(op, "?;?;?;?", "?;[?];[?]");
+  INFER_OK(op, "[3];[3];[3];[3]", "in0;in3;in3");
+  INFER_OK(op, "[1,3];[1,3];[3];[3]", "in0;in3;in3");
+  INFER_OK(op, "[1,2,3,4];[1,2,3,4];[4];[4]", "in0;in3;in3");
+
+  // Rank check vectors.
+  INFER_ERROR("be equal rank", op, "[1,?,3];[1,?,3];[3];[]");
+  INFER_ERROR("be rank 1", op, "[1,?,3];[1,?,3];[];[3]");
+  INFER_ERROR("be at least rank 1", op, "[];[];[1];[1]");
+  INFER_ERROR("be at most rank 4", op, "[1,2,3,4,5];[1,2,3,4,5];[1];[1]");
+
+  // Vectors must match each other, and match last dim of input.
+  INFER_ERROR("must be equal", op, "[1,3];[1,3];[2];[3]");
+  INFER_ERROR("must be equal", op, "[1,3];[1,3];[3];[2]");
+}
+
 }  // end namespace tensorflow
diff --git a/tensorflow/core/ops/compat/ops_history.v0.pbtxt b/tensorflow/core/ops/compat/ops_history.v0.pbtxt
index b7a7c3e73fd..9859a763d3e 100644
--- a/tensorflow/core/ops/compat/ops_history.v0.pbtxt
+++ b/tensorflow/core/ops/compat/ops_history.v0.pbtxt
@@ -24732,6 +24732,321 @@ op {
     }
   }
 }
+op {
+  name: "ScatterNd"
+  input_arg {
+    name: "indices"
+    type_attr: "Tindices"
+  }
+  input_arg {
+    name: "updates"
+    type_attr: "T"
+  }
+  input_arg {
+    name: "shape"
+    type_attr: "Tindices"
+  }
+  output_arg {
+    name: "output"
+    type_attr: "T"
+  }
+  attr {
+    name: "T"
+    type: "type"
+  }
+  attr {
+    name: "Tindices"
+    type: "type"
+    allowed_values {
+      list {
+        type: DT_INT32
+        type: DT_INT64
+      }
+    }
+  }
+}
+op {
+  name: "ScatterNdAdd"
+  input_arg {
+    name: "ref"
+    type_attr: "T"
+    is_ref: true
+  }
+  input_arg {
+    name: "indices"
+    type_attr: "Tindices"
+  }
+  input_arg {
+    name: "updates"
+    type_attr: "T"
+  }
+  output_arg {
+    name: "output_ref"
+    type_attr: "T"
+    is_ref: true
+  }
+  attr {
+    name: "T"
+    type: "type"
+    allowed_values {
+      list {
+        type: DT_FLOAT
+        type: DT_DOUBLE
+        type: DT_INT64
+        type: DT_INT32
+        type: DT_UINT8
+        type: DT_UINT16
+        type: DT_INT16
+        type: DT_INT8
+        type: DT_COMPLEX64
+        type: DT_COMPLEX128
+        type: DT_QINT8
+        type: DT_QUINT8
+        type: DT_QINT32
+        type: DT_HALF
+      }
+    }
+  }
+  attr {
+    name: "Tindices"
+    type: "type"
+    allowed_values {
+      list {
+        type: DT_INT32
+        type: DT_INT64
+      }
+    }
+  }
+  attr {
+    name: "use_locking"
+    type: "bool"
+    default_value {
+      b: false
+    }
+  }
+}
+op {
+  name: "ScatterNdDiv"
+  input_arg {
+    name: "ref"
+    type_attr: "T"
+    is_ref: true
+  }
+  input_arg {
+    name: "indices"
+    type_attr: "Tindices"
+  }
+  input_arg {
+    name: "updates"
+    type_attr: "T"
+  }
+  output_arg {
+    name: "output_ref"
+    type_attr: "T"
+    is_ref: true
+  }
+  attr {
+    name: "T"
+    type: "type"
+    allowed_values {
+      list {
+        type: DT_FLOAT
+        type: DT_DOUBLE
+        type: DT_INT64
+        type: DT_INT32
+        type: DT_UINT8
+        type: DT_UINT16
+        type: DT_INT16
+        type: DT_INT8
+        type: DT_COMPLEX64
+        type: DT_COMPLEX128
+        type: DT_QINT8
+        type: DT_QUINT8
+        type: DT_QINT32
+        type: DT_HALF
+      }
+    }
+  }
+  attr {
+    name: "Tindices"
+    type: "type"
+    allowed_values {
+      list {
+        type: DT_INT32
+        type: DT_INT64
+      }
+    }
+  }
+  attr {
+    name: "use_locking"
+    type: "bool"
+    default_value {
+      b: false
+    }
+  }
+}
+op {
+  name: "ScatterNdMul"
+  input_arg {
+    name: "ref"
+    type_attr: "T"
+    is_ref: true
+  }
+  input_arg {
+    name: "indices"
+    type_attr: "Tindices"
+  }
+  input_arg {
+    name: "updates"
+    type_attr: "T"
+  }
+  output_arg {
+    name: "output_ref"
+    type_attr: "T"
+    is_ref: true
+  }
+  attr {
+    name: "T"
+    type: "type"
+    allowed_values {
+      list {
+        type: DT_FLOAT
+        type: DT_DOUBLE
+        type: DT_INT64
+        type: DT_INT32
+        type: DT_UINT8
+        type: DT_UINT16
+        type: DT_INT16
+        type: DT_INT8
+        type: DT_COMPLEX64
+        type: DT_COMPLEX128
+        type: DT_QINT8
+        type: DT_QUINT8
+        type: DT_QINT32
+        type: DT_HALF
+      }
+    }
+  }
+  attr {
+    name: "Tindices"
+    type: "type"
+    allowed_values {
+      list {
+        type: DT_INT32
+        type: DT_INT64
+      }
+    }
+  }
+  attr {
+    name: "use_locking"
+    type: "bool"
+    default_value {
+      b: false
+    }
+  }
+}
+op {
+  name: "ScatterNdSub"
+  input_arg {
+    name: "ref"
+    type_attr: "T"
+    is_ref: true
+  }
+  input_arg {
+    name: "indices"
+    type_attr: "Tindices"
+  }
+  input_arg {
+    name: "updates"
+    type_attr: "T"
+  }
+  output_arg {
+    name: "output_ref"
+    type_attr: "T"
+    is_ref: true
+  }
+  attr {
+    name: "T"
+    type: "type"
+    allowed_values {
+      list {
+        type: DT_FLOAT
+        type: DT_DOUBLE
+        type: DT_INT64
+        type: DT_INT32
+        type: DT_UINT8
+        type: DT_UINT16
+        type: DT_INT16
+        type: DT_INT8
+        type: DT_COMPLEX64
+        type: DT_COMPLEX128
+        type: DT_QINT8
+        type: DT_QUINT8
+        type: DT_QINT32
+        type: DT_HALF
+      }
+    }
+  }
+  attr {
+    name: "Tindices"
+    type: "type"
+    allowed_values {
+      list {
+        type: DT_INT32
+        type: DT_INT64
+      }
+    }
+  }
+  attr {
+    name: "use_locking"
+    type: "bool"
+    default_value {
+      b: false
+    }
+  }
+}
+op {
+  name: "ScatterNdUpdate"
+  input_arg {
+    name: "ref"
+    type_attr: "T"
+    is_ref: true
+  }
+  input_arg {
+    name: "indices"
+    type_attr: "Tindices"
+  }
+  input_arg {
+    name: "updates"
+    type_attr: "T"
+  }
+  output_arg {
+    name: "output_ref"
+    type_attr: "T"
+    is_ref: true
+  }
+  attr {
+    name: "T"
+    type: "type"
+  }
+  attr {
+    name: "Tindices"
+    type: "type"
+    allowed_values {
+      list {
+        type: DT_INT32
+        type: DT_INT64
+      }
+    }
+  }
+  attr {
+    name: "use_locking"
+    type: "bool"
+    default_value {
+      b: true
+    }
+  }
+}
 op {
   name: "ScatterSub"
   input_arg {
diff --git a/tensorflow/core/ops/ops.pbtxt b/tensorflow/core/ops/ops.pbtxt
index 512b3dcd666..44f109a33cb 100644
--- a/tensorflow/core/ops/ops.pbtxt
+++ b/tensorflow/core/ops/ops.pbtxt
@@ -15427,6 +15427,362 @@ op {
   summary: "Multiplies sparse updates into a variable reference."
   description: "This operation computes\n\n    # Scalar indices\n    ref[indices, ...] *= updates[...]\n\n    # Vector indices (for each i)\n    ref[indices[i], ...] *= updates[i, ...]\n\n    # High rank indices (for each i, ..., j)\n    ref[indices[i, ..., j], ...] *= updates[i, ..., j, ...]\n\nThis operation outputs `ref` after the update is done.\nThis makes it easier to chain operations that need to use the reset value.\n\nDuplicate entries are handled correctly: if multiple `indices` reference\nthe same location, their contributions multiply.\n\nRequires `updates.shape = indices.shape + ref.shape[1:]`."
 }
+op {
+  name: "ScatterNd"
+  input_arg {
+    name: "indices"
+    description: "A Tensor. Must be one of the following types: int32, int64. A tensor of indices into ref."
+    type_attr: "Tindices"
+  }
+  input_arg {
+    name: "updates"
+    description: "A Tensor. Must have the same type as tensor. A tensor of updated values to store in ref."
+    type_attr: "T"
+  }
+  input_arg {
+    name: "shape"
+    description: "A vector. The shape of the resulting tensor."
+    type_attr: "Tindices"
+  }
+  output_arg {
+    name: "output"
+    description: "A new tensor with the given shape and updates applied according to the indices."
+    type_attr: "T"
+  }
+  attr {
+    name: "T"
+    type: "type"
+  }
+  attr {
+    name: "Tindices"
+    type: "type"
+    allowed_values {
+      list {
+        type: DT_INT32
+        type: DT_INT64
+      }
+    }
+  }
+  summary: "Creates a new tensor by applying sparse `updates` to individual values or slices within a zero tensor of the given `shape` tensor according to indices."
+  description: "This operator is the inverse of the [tf.gather_nd](#gather_nd) operator which extracts values or slices from a given tensor.\n\nTODO(simister): Add a link to Variable.__getitem__ documentation on slice syntax.\n\n`shape` is a `TensorShape` with rank `P` and `indices` is a `Tensor` of rank `Q`.\n\n`indices` must be integer tensor, containing indices into `shape`.\nIt must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.\n\nThe innermost dimension of `indices` (with length `K`) corresponds to\nindices into elements (if `K = P`) or slices (if `K < P`) along the `K`th\ndimension of `shape`.\n\n`updates` is Tensor of rank `Q-1+P-K` with shape:\n\n```\n[d_0, ..., d_{Q-2}, shape[K], ..., shape[P-1]].\n```\n\nThe simplest form of scatter is to insert individual elements in a tensor by index. For example, say we want to insert 4 scattered elements in a rank-1 tensor with 8 elements.\n\n<div style=\"width:70%; margin:auto; margin-bottom:10px; margin-top:20px;\">\n<img style=\"width:100%\" src=\"../../images/ScatterNd1.png\" alt>\n</div>\n\nIn Python, this scatter operation would look like this:\n\n    indices = tf.constant([[4], [3], [1], [7]])\n    updates = tf.constant([9, 10, 11, 12])\n    shape = tf.constant([8])\n    scatter = tf.scatter_nd(indices, updates, shape)\n    with tf.Session() as sess:\n      print sess.run(scatter)\n\nThe resulting tensor would look like this:\n\n    [0, 11, 0, 10, 9, 0, 0, 12]\n\nWe can also, insert entire slices of a higher rank tensor all at once. For example, if we wanted to insert two slices in the first dimension of a rank-3 tensor with two matrices of new values.\n\n<div style=\"width:70%; margin:auto; margin-bottom:10px; margin-top:20px;\">\n<img style=\"width:100%\" src=\"../../images/ScatterNd2.png\" alt>\n</div>\n\nIn Python, this scatter operation would look like this:\n\n    indices = tf.constant([[0], [2]])\n    updates = tf.constant([[[5, 5, 5, 5], [6, 6, 6, 6],\n                            [7, 7, 7, 7], [8, 8, 8, 8]],\n                           [[5, 5, 5, 5], [6, 6, 6, 6],\n                            [7, 7, 7, 7], [8, 8, 8, 8]]])\n    shape = tf.constant([4, 4, 4])\n    scatter = tf.scatter_nd(indices, updates, shape)\n    with tf.Session() as sess:\n      print sess.run(scatter)\n\nThe resulting tensor would look like this:\n\n    [[[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]],\n     [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]],\n     [[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]],\n     [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]]"
+}
+op {
+  name: "ScatterNdAdd"
+  input_arg {
+    name: "ref"
+    description: "A mutable Tensor. Should be from a Variable node."
+    type_attr: "T"
+    is_ref: true
+  }
+  input_arg {
+    name: "indices"
+    description: "A Tensor. Must be one of the following types: int32, int64. A tensor of indices into ref."
+    type_attr: "Tindices"
+  }
+  input_arg {
+    name: "updates"
+    description: "A Tensor. Must have the same type as ref. A tensor of updated values to add to ref."
+    type_attr: "T"
+  }
+  output_arg {
+    name: "output_ref"
+    description: "Same as ref. Returned as a convenience for operations that want to use the updated values after the update is done."
+    type_attr: "T"
+    is_ref: true
+  }
+  attr {
+    name: "T"
+    type: "type"
+    allowed_values {
+      list {
+        type: DT_FLOAT
+        type: DT_DOUBLE
+        type: DT_INT64
+        type: DT_INT32
+        type: DT_UINT8
+        type: DT_UINT16
+        type: DT_INT16
+        type: DT_INT8
+        type: DT_COMPLEX64
+        type: DT_COMPLEX128
+        type: DT_QINT8
+        type: DT_QUINT8
+        type: DT_QINT32
+        type: DT_HALF
+      }
+    }
+  }
+  attr {
+    name: "Tindices"
+    type: "type"
+    allowed_values {
+      list {
+        type: DT_INT32
+        type: DT_INT64
+      }
+    }
+  }
+  attr {
+    name: "use_locking"
+    type: "bool"
+    default_value {
+      b: false
+    }
+    description: "An optional bool. Defaults to True. If True, the assignment will be protected by a lock; otherwise the behavior is undefined, but may exhibit less contention."
+  }
+  summary: "Applies sparse addition between `updates` and individual values or slices within a given variable according to `indices`."
+  description: "`ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.\n\n`indices` must be integer tensor, containing indices into `ref`.\nIt must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.\n\nThe innermost dimension of `indices` (with length `K`) corresponds to\nindices into elements (if `K = P`) or slices (if `K < P`) along the `K`th\ndimension of `ref`.\n\n`updates` is `Tensor` of rank `Q-1+P-K` with shape:\n\n```\n[d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]].\n```\n\nFor example, say we want to add 4 scattered elements to a rank-1 tensor to 8 elements. In Python, that addition would look like this:\n\n    ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8])\n    indices = tf.constant([[4], [3], [1], [7]])\n    updates = tf.constant([9, 10, 11, 12])\n    add = tf.scatter_nd_add(ref, indices, updates)\n    with tf.Session() as sess:\n      print sess.run(add)\n\nThe resulting update to ref would look like this:\n\n    [1, 13, 3, 14, 14, 6, 7, 20]\n\nSee [tf.scatter_nd](#scatter_nd) for more details about how to make updates to slices."
+}
+op {
+  name: "ScatterNdDiv"
+  input_arg {
+    name: "ref"
+    description: "A mutable Tensor. Should be from a Variable node."
+    type_attr: "T"
+    is_ref: true
+  }
+  input_arg {
+    name: "indices"
+    description: "A Tensor. Must be one of the following types: int32, int64. A tensor of indices into ref."
+    type_attr: "Tindices"
+  }
+  input_arg {
+    name: "updates"
+    description: "A Tensor. Must have the same type as ref. A tensor of updated values to subtract from ref."
+    type_attr: "T"
+  }
+  output_arg {
+    name: "output_ref"
+    description: "Same as ref. Returned as a convenience for operations that want to use the updated values after the update is done."
+    type_attr: "T"
+    is_ref: true
+  }
+  attr {
+    name: "T"
+    type: "type"
+    allowed_values {
+      list {
+        type: DT_FLOAT
+        type: DT_DOUBLE
+        type: DT_INT64
+        type: DT_INT32
+        type: DT_UINT8
+        type: DT_UINT16
+        type: DT_INT16
+        type: DT_INT8
+        type: DT_COMPLEX64
+        type: DT_COMPLEX128
+        type: DT_QINT8
+        type: DT_QUINT8
+        type: DT_QINT32
+        type: DT_HALF
+      }
+    }
+  }
+  attr {
+    name: "Tindices"
+    type: "type"
+    allowed_values {
+      list {
+        type: DT_INT32
+        type: DT_INT64
+      }
+    }
+  }
+  attr {
+    name: "use_locking"
+    type: "bool"
+    default_value {
+      b: false
+    }
+    description: "An optional bool. Defaults to True. If True, the assignment will be protected by a lock; otherwise the behavior is undefined, but may exhibit less contention."
+  }
+  summary: "Applies sparse subtraction between `updates` and individual values or slices within a given variable according to `indices`."
+  description: "`ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.\n\n`indices` must be integer tensor, containing indices into `ref`.\nIt must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.\n\nThe innermost dimension of `indices` (with length `K`) corresponds to\nindices into elements (if `K = P`) or slices (if `K < P`) along the `K`th\ndimension of `ref`.\n\n`updates` is `Tensor` of rank `Q-1+P-K` with shape:\n\n```\n[d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]].\n```\n\nFor example, say we want to divide a rank-1 tensor with 8 elements by 4 scattered elements. In Python, that division would look like this:\n\n    ref = tf.Variable([10, 20, 30, 40, 50, 60, 70, 80])\n    indices = tf.constant([[4], [3], [1], [7]])\n    updates = tf.constant([2, 3, 4, 5])\n    sub = tf.scatter_nd_div(ref, indices, updates)\n    with tf.Session() as sess:\n      print sess.run(sub)\n\nThe resulting update to ref would look like this:\n\n    [10, 5, 30, 13, 25, 60, 70, 16]\n\nSee [tf.scatter_nd](#scatter_nd) for more details about how to make updates to slices."
+}
+op {
+  name: "ScatterNdMul"
+  input_arg {
+    name: "ref"
+    description: "A mutable Tensor. Should be from a Variable node."
+    type_attr: "T"
+    is_ref: true
+  }
+  input_arg {
+    name: "indices"
+    description: "A Tensor. Must be one of the following types: int32, int64. A tensor of indices into ref."
+    type_attr: "Tindices"
+  }
+  input_arg {
+    name: "updates"
+    description: "A Tensor. Must have the same type as ref. A tensor of updated values to subtract from ref."
+    type_attr: "T"
+  }
+  output_arg {
+    name: "output_ref"
+    description: "Same as ref. Returned as a convenience for operations that want to use the updated values after the update is done."
+    type_attr: "T"
+    is_ref: true
+  }
+  attr {
+    name: "T"
+    type: "type"
+    allowed_values {
+      list {
+        type: DT_FLOAT
+        type: DT_DOUBLE
+        type: DT_INT64
+        type: DT_INT32
+        type: DT_UINT8
+        type: DT_UINT16
+        type: DT_INT16
+        type: DT_INT8
+        type: DT_COMPLEX64
+        type: DT_COMPLEX128
+        type: DT_QINT8
+        type: DT_QUINT8
+        type: DT_QINT32
+        type: DT_HALF
+      }
+    }
+  }
+  attr {
+    name: "Tindices"
+    type: "type"
+    allowed_values {
+      list {
+        type: DT_INT32
+        type: DT_INT64
+      }
+    }
+  }
+  attr {
+    name: "use_locking"
+    type: "bool"
+    default_value {
+      b: false
+    }
+    description: "An optional bool. Defaults to True. If True, the assignment will be protected by a lock; otherwise the behavior is undefined, but may exhibit less contention."
+  }
+  summary: "Applies sparse subtraction between `updates` and individual values or slices within a given variable according to `indices`."
+  description: "`ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.\n\n`indices` must be integer tensor, containing indices into `ref`.\nIt must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.\n\nThe innermost dimension of `indices` (with length `K`) corresponds to\nindices into elements (if `K = P`) or slices (if `K < P`) along the `K`th\ndimension of `ref`.\n\n`updates` is `Tensor` of rank `Q-1+P-K` with shape:\n\n```\n[d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]].\n```\n\nFor example, say we want to multiply 4 scattered elements with a rank-1 tensor with 8 elements. In Python, that multiplication would look like this:\n\n    ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8])\n    indices = tf.constant([[4], [3], [1], [7]])\n    updates = tf.constant([9, 10, 11, 12])\n    sub = tf.scatter_nd_mul(ref, indices, updates)\n    with tf.Session() as sess:\n      print sess.run(sub)\n\nThe resulting update to ref would look like this:\n\n    [1, 22, 3, 40, 45, 6, 7, 96]\n\nSee [tf.scatter_nd](#scatter_nd) for more details about how to make updates to slices."
+}
+op {
+  name: "ScatterNdSub"
+  input_arg {
+    name: "ref"
+    description: "A mutable Tensor. Should be from a Variable node."
+    type_attr: "T"
+    is_ref: true
+  }
+  input_arg {
+    name: "indices"
+    description: "A Tensor. Must be one of the following types: int32, int64. A tensor of indices into ref."
+    type_attr: "Tindices"
+  }
+  input_arg {
+    name: "updates"
+    description: "A Tensor. Must have the same type as ref. A tensor of updated values to subtract from ref."
+    type_attr: "T"
+  }
+  output_arg {
+    name: "output_ref"
+    description: "Same as ref. Returned as a convenience for operations that want to use the updated values after the update is done."
+    type_attr: "T"
+    is_ref: true
+  }
+  attr {
+    name: "T"
+    type: "type"
+    allowed_values {
+      list {
+        type: DT_FLOAT
+        type: DT_DOUBLE
+        type: DT_INT64
+        type: DT_INT32
+        type: DT_UINT8
+        type: DT_UINT16
+        type: DT_INT16
+        type: DT_INT8
+        type: DT_COMPLEX64
+        type: DT_COMPLEX128
+        type: DT_QINT8
+        type: DT_QUINT8
+        type: DT_QINT32
+        type: DT_HALF
+      }
+    }
+  }
+  attr {
+    name: "Tindices"
+    type: "type"
+    allowed_values {
+      list {
+        type: DT_INT32
+        type: DT_INT64
+      }
+    }
+  }
+  attr {
+    name: "use_locking"
+    type: "bool"
+    default_value {
+      b: false
+    }
+    description: "An optional bool. Defaults to True. If True, the assignment will be protected by a lock; otherwise the behavior is undefined, but may exhibit less contention."
+  }
+  summary: "Applies sparse subtraction between `updates` and individual values or slices within a given variable according to `indices`."
+  description: "`ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.\n\n`indices` must be integer tensor, containing indices into `ref`.\nIt must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.\n\nThe innermost dimension of `indices` (with length `K`) corresponds to\nindices into elements (if `K = P`) or slices (if `K < P`) along the `K`th\ndimension of `ref`.\n\n`updates` is `Tensor` of rank `Q-1+P-K` with shape:\n\n```\n[d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]].\n```\n\nFor example, say we want to subtract 4 scattered elements from a rank-1 tensor with 8 elements. In Python, that subtraction would look like this:\n\n    ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8])\n    indices = tf.constant([[4], [3], [1], [7]])\n    updates = tf.constant([9, 10, 11, 12])\n    sub = tf.scatter_nd_sub(ref, indices, updates)\n    with tf.Session() as sess:\n      print sess.run(sub)\n\nThe resulting update to ref would look like this:\n\n    [1, -9, 3, -6, -4, 6, 7, -4]\n\nSee [tf.scatter_nd](#scatter_nd) for more details about how to make updates to slices."
+}
+op {
+  name: "ScatterNdUpdate"
+  input_arg {
+    name: "ref"
+    description: "A mutable Tensor. Should be from a Variable node."
+    type_attr: "T"
+    is_ref: true
+  }
+  input_arg {
+    name: "indices"
+    description: "A Tensor. Must be one of the following types: int32, int64. A tensor of indices into ref."
+    type_attr: "Tindices"
+  }
+  input_arg {
+    name: "updates"
+    description: "A Tensor. Must have the same type as ref. A tensor of updated values to add to ref."
+    type_attr: "T"
+  }
+  output_arg {
+    name: "output_ref"
+    description: "Same as ref. Returned as a convenience for operations that want to use the updated values after the update is done."
+    type_attr: "T"
+    is_ref: true
+  }
+  attr {
+    name: "T"
+    type: "type"
+  }
+  attr {
+    name: "Tindices"
+    type: "type"
+    allowed_values {
+      list {
+        type: DT_INT32
+        type: DT_INT64
+      }
+    }
+  }
+  attr {
+    name: "use_locking"
+    type: "bool"
+    default_value {
+      b: true
+    }
+    description: "An optional bool. Defaults to True. If True, the assignment will be protected by a lock; otherwise the behavior is undefined, but may exhibit less contention."
+  }
+  summary: "Applies sparse `updates` to individual values or slices within a given variable according to `indices`."
+  description: "`ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.\n\n`indices` must be integer tensor, containing indices into `ref`.\nIt must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.\n\nThe innermost dimension of `indices` (with length `K`) corresponds to\nindices into elements (if `K = P`) or slices (if `K < P`) along the `K`th\ndimension of `ref`.\n\n`updates` is `Tensor` of rank `Q-1+P-K` with shape:\n\n```\n[d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]].\n```\n\nFor example, say we want to update 4 scattered elements to a rank-1 tensor to 8 elements. In Python, that update would look like this:\n\n    ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8])\n    indices = tf.constant([[4], [3], [1] ,[7]])\n    updates = tf.constant([9, 10, 11, 12])\n    update = tf.scatter_nd_update(ref, indices, updates)\n    with tf.Session() as sess:\n      print sess.run(update)\n\nThe resulting update to ref would look like this:\n\n    [1, 11, 3, 10, 9, 6, 7, 12]\n\nSee [tf.scatter_nd](#scatter_nd) for more details about how to make updates to slices."
+}
 op {
   name: "ScatterSub"
   input_arg {
diff --git a/tensorflow/core/ops/state_ops.cc b/tensorflow/core/ops/state_ops.cc
index b9ac8b16ffb..9339b9b8214 100644
--- a/tensorflow/core/ops/state_ops.cc
+++ b/tensorflow/core/ops/state_ops.cc
@@ -445,6 +445,241 @@ use_locking: If True, the operation will be protected by a lock;
   otherwise the behavior is undefined, but may exhibit less contention.
 )doc");
 
+REGISTER_OP("ScatterNdUpdate")
+    .Input("ref: Ref(T)")
+    .Input("indices: Tindices")
+    .Input("updates: T")
+    .Output("output_ref: Ref(T)")
+    .Attr("T: type")
+    .Attr("Tindices: {int32, int64}")
+    .Attr("use_locking: bool = true")
+    .Doc(
+        R"doc(Applies sparse `updates` to individual values or slices within a given variable according to `indices`.
+
+`ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.
+
+`indices` must be integer tensor, containing indices into `ref`.
+It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.
+
+The innermost dimension of `indices` (with length `K`) corresponds to
+indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th
+dimension of `ref`.
+
+`updates` is `Tensor` of rank `Q-1+P-K` with shape:
+
+```
+[d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]].
+```
+
+For example, say we want to update 4 scattered elements to a rank-1 tensor to 8 elements. In Python, that update would look like this:
+
+    ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8])
+    indices = tf.constant([[4], [3], [1] ,[7]])
+    updates = tf.constant([9, 10, 11, 12])
+    update = tf.scatter_nd_update(ref, indices, updates)
+    with tf.Session() as sess:
+      print sess.run(update)
+
+The resulting update to ref would look like this:
+
+    [1, 11, 3, 10, 9, 6, 7, 12]
+
+See [tf.scatter_nd](#scatter_nd) for more details about how to make updates to slices.
+
+ref: A mutable Tensor. Should be from a Variable node.
+indices: A Tensor. Must be one of the following types: int32, int64. A tensor of indices into ref.
+updates: A Tensor. Must have the same type as ref. A tensor of updated values to add to ref.
+use_locking: An optional bool. Defaults to True. If True, the assignment will be protected by a lock; otherwise the behavior is undefined, but may exhibit less contention.
+output_ref: Same as ref. Returned as a convenience for operations that want to use the updated values after the update is done.)doc");
+
+REGISTER_OP("ScatterNdAdd")
+    .Input("ref: Ref(T)")
+    .Input("indices: Tindices")
+    .Input("updates: T")
+    .Output("output_ref: Ref(T)")
+    .Attr("T: numbertype")
+    .Attr("Tindices: {int32, int64}")
+    .Attr("use_locking: bool = false")
+    .Doc(
+        R"doc(Applies sparse addition between `updates` and individual values or slices within a given variable according to `indices`.
+
+`ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.
+
+`indices` must be integer tensor, containing indices into `ref`.
+It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.
+
+The innermost dimension of `indices` (with length `K`) corresponds to
+indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th
+dimension of `ref`.
+
+`updates` is `Tensor` of rank `Q-1+P-K` with shape:
+
+```
+[d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]].
+```
+
+For example, say we want to add 4 scattered elements to a rank-1 tensor to 8 elements. In Python, that addition would look like this:
+
+    ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8])
+    indices = tf.constant([[4], [3], [1], [7]])
+    updates = tf.constant([9, 10, 11, 12])
+    add = tf.scatter_nd_add(ref, indices, updates)
+    with tf.Session() as sess:
+      print sess.run(add)
+
+The resulting update to ref would look like this:
+
+    [1, 13, 3, 14, 14, 6, 7, 20]
+
+See [tf.scatter_nd](#scatter_nd) for more details about how to make updates to slices.
+
+ref: A mutable Tensor. Should be from a Variable node.
+indices: A Tensor. Must be one of the following types: int32, int64. A tensor of indices into ref.
+updates: A Tensor. Must have the same type as ref. A tensor of updated values to add to ref.
+use_locking: An optional bool. Defaults to True. If True, the assignment will be protected by a lock; otherwise the behavior is undefined, but may exhibit less contention.
+output_ref: Same as ref. Returned as a convenience for operations that want to use the updated values after the update is done.)doc");
+
+REGISTER_OP("ScatterNdSub")
+    .Input("ref: Ref(T)")
+    .Input("indices: Tindices")
+    .Input("updates: T")
+    .Output("output_ref: Ref(T)")
+    .Attr("T: numbertype")
+    .Attr("Tindices: {int32, int64}")
+    .Attr("use_locking: bool = false")
+    .Doc(
+        R"doc(Applies sparse subtraction between `updates` and individual values or slices within a given variable according to `indices`.
+
+`ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.
+
+`indices` must be integer tensor, containing indices into `ref`.
+It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.
+
+The innermost dimension of `indices` (with length `K`) corresponds to
+indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th
+dimension of `ref`.
+
+`updates` is `Tensor` of rank `Q-1+P-K` with shape:
+
+```
+[d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]].
+```
+
+For example, say we want to subtract 4 scattered elements from a rank-1 tensor with 8 elements. In Python, that subtraction would look like this:
+
+    ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8])
+    indices = tf.constant([[4], [3], [1], [7]])
+    updates = tf.constant([9, 10, 11, 12])
+    sub = tf.scatter_nd_sub(ref, indices, updates)
+    with tf.Session() as sess:
+      print sess.run(sub)
+
+The resulting update to ref would look like this:
+
+    [1, -9, 3, -6, -4, 6, 7, -4]
+
+See [tf.scatter_nd](#scatter_nd) for more details about how to make updates to slices.
+
+ref: A mutable Tensor. Should be from a Variable node.
+indices: A Tensor. Must be one of the following types: int32, int64. A tensor of indices into ref.
+updates: A Tensor. Must have the same type as ref. A tensor of updated values to subtract from ref.
+use_locking: An optional bool. Defaults to True. If True, the assignment will be protected by a lock; otherwise the behavior is undefined, but may exhibit less contention.
+output_ref: Same as ref. Returned as a convenience for operations that want to use the updated values after the update is done.)doc");
+
+REGISTER_OP("ScatterNdMul")
+    .Input("ref: Ref(T)")
+    .Input("indices: Tindices")
+    .Input("updates: T")
+    .Output("output_ref: Ref(T)")
+    .Attr("T: numbertype")
+    .Attr("Tindices: {int32, int64}")
+    .Attr("use_locking: bool = false")
+    .Doc(
+        R"doc(Applies sparse subtraction between `updates` and individual values or slices within a given variable according to `indices`.
+
+`ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.
+
+`indices` must be integer tensor, containing indices into `ref`.
+It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.
+
+The innermost dimension of `indices` (with length `K`) corresponds to
+indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th
+dimension of `ref`.
+
+`updates` is `Tensor` of rank `Q-1+P-K` with shape:
+
+```
+[d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]].
+```
+
+For example, say we want to multiply 4 scattered elements with a rank-1 tensor with 8 elements. In Python, that multiplication would look like this:
+
+    ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8])
+    indices = tf.constant([[4], [3], [1], [7]])
+    updates = tf.constant([9, 10, 11, 12])
+    sub = tf.scatter_nd_mul(ref, indices, updates)
+    with tf.Session() as sess:
+      print sess.run(sub)
+
+The resulting update to ref would look like this:
+
+    [1, 22, 3, 40, 45, 6, 7, 96]
+
+See [tf.scatter_nd](#scatter_nd) for more details about how to make updates to slices.
+
+ref: A mutable Tensor. Should be from a Variable node.
+indices: A Tensor. Must be one of the following types: int32, int64. A tensor of indices into ref.
+updates: A Tensor. Must have the same type as ref. A tensor of updated values to subtract from ref.
+use_locking: An optional bool. Defaults to True. If True, the assignment will be protected by a lock; otherwise the behavior is undefined, but may exhibit less contention.
+output_ref: Same as ref. Returned as a convenience for operations that want to use the updated values after the update is done.)doc");
+
+REGISTER_OP("ScatterNdDiv")
+    .Input("ref: Ref(T)")
+    .Input("indices: Tindices")
+    .Input("updates: T")
+    .Output("output_ref: Ref(T)")
+    .Attr("T: numbertype")
+    .Attr("Tindices: {int32, int64}")
+    .Attr("use_locking: bool = false")
+    .Doc(
+        R"doc(Applies sparse subtraction between `updates` and individual values or slices within a given variable according to `indices`.
+
+`ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.
+
+`indices` must be integer tensor, containing indices into `ref`.
+It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.
+
+The innermost dimension of `indices` (with length `K`) corresponds to
+indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th
+dimension of `ref`.
+
+`updates` is `Tensor` of rank `Q-1+P-K` with shape:
+
+```
+[d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]].
+```
+
+For example, say we want to divide a rank-1 tensor with 8 elements by 4 scattered elements. In Python, that division would look like this:
+
+    ref = tf.Variable([10, 20, 30, 40, 50, 60, 70, 80])
+    indices = tf.constant([[4], [3], [1], [7]])
+    updates = tf.constant([2, 3, 4, 5])
+    sub = tf.scatter_nd_div(ref, indices, updates)
+    with tf.Session() as sess:
+      print sess.run(sub)
+
+The resulting update to ref would look like this:
+
+    [10, 5, 30, 13, 25, 60, 70, 16]
+
+See [tf.scatter_nd](#scatter_nd) for more details about how to make updates to slices.
+
+ref: A mutable Tensor. Should be from a Variable node.
+indices: A Tensor. Must be one of the following types: int32, int64. A tensor of indices into ref.
+updates: A Tensor. Must have the same type as ref. A tensor of updated values to subtract from ref.
+use_locking: An optional bool. Defaults to True. If True, the assignment will be protected by a lock; otherwise the behavior is undefined, but may exhibit less contention.
+output_ref: Same as ref. Returned as a convenience for operations that want to use the updated values after the update is done.)doc");
+
 REGISTER_OP("CountUpTo")
     .Input("ref: Ref(T)")
     .Output("output: T")
diff --git a/tensorflow/core/platform/cloud/gcs_file_system.cc b/tensorflow/core/platform/cloud/gcs_file_system.cc
index 867acc7d8a3..39228ed8698 100644
--- a/tensorflow/core/platform/cloud/gcs_file_system.cc
+++ b/tensorflow/core/platform/cloud/gcs_file_system.cc
@@ -81,7 +81,7 @@ Status ParseGcsPath(StringPiece fname, bool empty_object_ok, string* bucket,
     return errors::Internal("bucket and object cannot be null.");
   }
   StringPiece scheme, bucketp, objectp;
-  ParseURI(fname, &scheme, &bucketp, &objectp);
+  io::ParseURI(fname, &scheme, &bucketp, &objectp);
   if (scheme != "gs") {
     return errors::InvalidArgument("GCS path doesn't start with 'gs://': ",
                                    fname);
diff --git a/tensorflow/core/platform/default/build_config/BUILD b/tensorflow/core/platform/default/build_config/BUILD
index a63aa4d7a97..f4ff341a48a 100644
--- a/tensorflow/core/platform/default/build_config/BUILD
+++ b/tensorflow/core/platform/default/build_config/BUILD
@@ -76,16 +76,24 @@ cc_library(
     name = "platformlib",
     copts = tf_copts(),
     deps = [
+        ":gif",
+        ":jpeg",
         "//tensorflow/core:protos_cc",
         "@com_googlesource_code_re2//:re2",
         "@farmhash_archive//:farmhash",
-        "@gif_archive//:gif",
         "@highwayhash//:sip_hash",
-        "@jpeg_archive//:jpeg",
         "@png_archive//:png",
     ],
 )
 
+cc_library(
+    name = "gif",
+    copts = tf_copts(),
+    deps = [
+        "@gif_archive//:gif",
+    ],
+)
+
 cc_library(
     name = "jpeg",
     copts = tf_copts(),
diff --git a/tensorflow/core/platform/env.cc b/tensorflow/core/platform/env.cc
index a5dd7b45c4a..5a09fded9bf 100644
--- a/tensorflow/core/platform/env.cc
+++ b/tensorflow/core/platform/env.cc
@@ -70,7 +70,7 @@ Env::Env() : file_system_registry_(new FileSystemRegistryImpl) {}
 
 Status Env::GetFileSystemForFile(const string& fname, FileSystem** result) {
   StringPiece scheme, host, path;
-  ParseURI(fname, &scheme, &host, &path);
+  io::ParseURI(fname, &scheme, &host, &path);
   FileSystem* file_system = file_system_registry_->Lookup(scheme.ToString());
   if (!file_system) {
     return errors::Unimplemented("File system scheme ", scheme,
diff --git a/tensorflow/core/platform/env_test.cc b/tensorflow/core/platform/env_test.cc
index dbff7e25310..f6fa27327a6 100644
--- a/tensorflow/core/platform/env_test.cc
+++ b/tensorflow/core/platform/env_test.cc
@@ -229,35 +229,6 @@ TEST_F(DefaultEnvTest, LocalFileSystem) {
   }
 }
 
-#define EXPECT_PARSE_URI(uri, scheme, host, path)  \
-  do {                                             \
-    StringPiece s, h, p;                           \
-    ParseURI(uri, &s, &h, &p);                     \
-    EXPECT_EQ(scheme, s.ToString());               \
-    EXPECT_EQ(host, h.ToString());                 \
-    EXPECT_EQ(path, p.ToString());                 \
-    EXPECT_EQ(uri, CreateURI(scheme, host, path)); \
-  } while (0)
-
-TEST_F(DefaultEnvTest, CreateParseURI) {
-  EXPECT_PARSE_URI("http://foo", "http", "foo", "");
-  EXPECT_PARSE_URI("/encrypted/://foo", "", "", "/encrypted/://foo");
-  EXPECT_PARSE_URI("/usr/local/foo", "", "", "/usr/local/foo");
-  EXPECT_PARSE_URI("file:///usr/local/foo", "file", "", "/usr/local/foo");
-  EXPECT_PARSE_URI("local.file:///usr/local/foo", "local.file", "",
-                   "/usr/local/foo");
-  EXPECT_PARSE_URI("a-b:///foo", "", "", "a-b:///foo");
-  EXPECT_PARSE_URI(":///foo", "", "", ":///foo");
-  EXPECT_PARSE_URI("9dfd:///foo", "", "", "9dfd:///foo");
-  EXPECT_PARSE_URI("file:", "", "", "file:");
-  EXPECT_PARSE_URI("file:/", "", "", "file:/");
-  EXPECT_PARSE_URI("hdfs://localhost:8020/path/to/file", "hdfs",
-                   "localhost:8020", "/path/to/file");
-  EXPECT_PARSE_URI("hdfs://localhost:8020", "hdfs", "localhost:8020", "");
-  EXPECT_PARSE_URI("hdfs://localhost:8020/", "hdfs", "localhost:8020", "/");
-}
-#undef EXPECT_PARSE_URI
-
 TEST_F(DefaultEnvTest, SleepForMicroseconds) {
   const int64 start = env_->NowMicros();
   const int64 sleep_time = 1e6 + 5e5;
@@ -274,14 +245,14 @@ class TmpDirFileSystem : public NullFileSystem {
  public:
   bool FileExists(const string& dir) override {
     StringPiece scheme, host, path;
-    ParseURI(dir, &scheme, &host, &path);
+    io::ParseURI(dir, &scheme, &host, &path);
     if (path.empty()) return false;
     return Env::Default()->FileExists(io::JoinPath(BaseDir(), path));
   }
 
   Status CreateDir(const string& dir) override {
     StringPiece scheme, host, path;
-    ParseURI(dir, &scheme, &host, &path);
+    io::ParseURI(dir, &scheme, &host, &path);
     if (scheme != "tmpdirfs") {
       return errors::FailedPrecondition("scheme must be tmpdirfs");
     }
diff --git a/tensorflow/core/platform/file_system.cc b/tensorflow/core/platform/file_system.cc
index d71ff80143d..400835aa07e 100644
--- a/tensorflow/core/platform/file_system.cc
+++ b/tensorflow/core/platform/file_system.cc
@@ -22,7 +22,6 @@ limitations under the License.
 #include "tensorflow/core/lib/gtl/map_util.h"
 #include "tensorflow/core/lib/gtl/stl_util.h"
 #include "tensorflow/core/lib/io/path.h"
-#include "tensorflow/core/lib/strings/scanner.h"
 #include "tensorflow/core/lib/strings/str_util.h"
 #include "tensorflow/core/lib/strings/strcat.h"
 #include "tensorflow/core/platform/env.h"
@@ -79,43 +78,6 @@ WritableFile::~WritableFile() {}
 
 FileSystemRegistry::~FileSystemRegistry() {}
 
-void ParseURI(StringPiece remaining, StringPiece* scheme, StringPiece* host,
-              StringPiece* path) {
-  // 0. Parse scheme
-  // Make sure scheme matches [a-zA-Z][0-9a-zA-Z.]*
-  // TODO(keveman): Allow "+" and "-" in the scheme.
-  if (!strings::Scanner(remaining)
-           .One(strings::Scanner::LETTER)
-           .Many(strings::Scanner::LETTER_DIGIT_DOT)
-           .StopCapture()
-           .OneLiteral("://")
-           .GetResult(&remaining, scheme)) {
-    // If there's no scheme, assume the entire string is a path.
-    scheme->clear();
-    host->clear();
-    *path = remaining;
-    return;
-  }
-
-  // 1. Parse host
-  if (!strings::Scanner(remaining).ScanUntil('/').GetResult(&remaining, host)) {
-    // No path, so the rest of the URI is the host.
-    *host = remaining;
-    path->clear();
-    return;
-  }
-
-  // 2. The rest is the path
-  *path = remaining;
-}
-
-string CreateURI(StringPiece scheme, StringPiece host, StringPiece path) {
-  if (scheme.empty()) {
-    return path.ToString();
-  }
-  return strings::StrCat(scheme, "://", host, path);
-}
-
 Status FileSystem::GetMatchingPaths(const string& pattern,
                                     std::vector<string>* results) {
   results->clear();
@@ -237,9 +199,9 @@ Status FileSystem::DeleteRecursively(const string& dirname,
 
 Status FileSystem::RecursivelyCreateDir(const string& dirname) {
   StringPiece scheme, host, remaining_dir;
-  ParseURI(dirname, &scheme, &host, &remaining_dir);
+  io::ParseURI(dirname, &scheme, &host, &remaining_dir);
   std::vector<StringPiece> sub_dirs;
-  while (!FileExists(CreateURI(scheme, host, remaining_dir)) &&
+  while (!FileExists(io::CreateURI(scheme, host, remaining_dir)) &&
          !remaining_dir.empty()) {
     // Basename returns "" for / ending dirs.
     if (!remaining_dir.ends_with("/")) {
@@ -255,7 +217,7 @@ Status FileSystem::RecursivelyCreateDir(const string& dirname) {
   string built_path = remaining_dir.ToString();
   for (const StringPiece sub_dir : sub_dirs) {
     built_path = io::JoinPath(built_path, sub_dir);
-    TF_RETURN_IF_ERROR(CreateDir(CreateURI(scheme, host, built_path)));
+    TF_RETURN_IF_ERROR(CreateDir(io::CreateURI(scheme, host, built_path)));
   }
   return Status::OK();
 }
diff --git a/tensorflow/core/platform/file_system.h b/tensorflow/core/platform/file_system.h
index 4456e3f3e98..dfaf75be667 100644
--- a/tensorflow/core/platform/file_system.h
+++ b/tensorflow/core/platform/file_system.h
@@ -287,19 +287,6 @@ class FileSystemRegistry {
       std::vector<string>* schemes) = 0;
 };
 
-// Populates the scheme, host, and path from a URI.
-//
-// Corner cases:
-// - If the URI is invalid, scheme and host are set to empty strings and the
-//   passed string is assumed to be a path
-// - If the URI omits the path (e.g. file://host), then the path is left empty.
-void ParseURI(StringPiece uri, StringPiece* scheme, StringPiece* host,
-              StringPiece* path);
-
-// Creates a URI from a scheme, host, and path. If the scheme is empty, we just
-// return the path.
-string CreateURI(StringPiece scheme, StringPiece host, StringPiece path);
-
 }  // namespace tensorflow
 
 #endif  // TENSORFLOW_CORE_PLATFORM_FILE_SYSTEM_H_
diff --git a/tensorflow/core/platform/file_system_test.cc b/tensorflow/core/platform/file_system_test.cc
index 600af91206b..8cdabdc8bcd 100644
--- a/tensorflow/core/platform/file_system_test.cc
+++ b/tensorflow/core/platform/file_system_test.cc
@@ -112,7 +112,7 @@ class InterPlanetaryFileSystem : public NullFileSystem {
 
   void ParsePath(const string& name, string* parsed_path) {
     StringPiece scheme, host, path;
-    ParseURI(name, &scheme, &host, &path);
+    io::ParseURI(name, &scheme, &host, &path);
     ASSERT_EQ(scheme, "ipfs");
     ASSERT_EQ(host, "solarsystem");
     path.Consume("/");
diff --git a/tensorflow/core/platform/hadoop/hadoop_file_system.cc b/tensorflow/core/platform/hadoop/hadoop_file_system.cc
index d5792e82cdd..749d9e1fcda 100644
--- a/tensorflow/core/platform/hadoop/hadoop_file_system.cc
+++ b/tensorflow/core/platform/hadoop/hadoop_file_system.cc
@@ -126,7 +126,7 @@ Status HadoopFileSystem::Connect(StringPiece fname, hdfsFS* fs) {
   TF_RETURN_IF_ERROR(hdfs_->status());
 
   StringPiece scheme, namenode, path;
-  ParseURI(fname, &scheme, &namenode, &path);
+  io::ParseURI(fname, &scheme, &namenode, &path);
   const string nn = namenode.ToString();
 
   hdfsBuilder* builder = hdfs_->hdfsNewBuilder();
@@ -144,7 +144,7 @@ Status HadoopFileSystem::Connect(StringPiece fname, hdfsFS* fs) {
 
 string HadoopFileSystem::TranslateName(const string& name) const {
   StringPiece scheme, namenode, path;
-  ParseURI(name, &scheme, &namenode, &path);
+  io::ParseURI(name, &scheme, &namenode, &path);
   return path.ToString();
 }
 
diff --git a/tensorflow/core/platform/posix/env.cc b/tensorflow/core/platform/posix/env.cc
index 2f9c8e4b2f0..f353fb1c924 100644
--- a/tensorflow/core/platform/posix/env.cc
+++ b/tensorflow/core/platform/posix/env.cc
@@ -120,7 +120,8 @@ class PosixEnv : public Env {
                                                       symbol);
   }
 
-  string FormatLibraryFileName(const string& name, const string& version) {
+  string FormatLibraryFileName(const string& name,
+                               const string& version) override {
     return tensorflow::internal::FormatLibraryFileName(name, version);
   }
 };
diff --git a/tensorflow/core/platform/posix/posix_file_system.h b/tensorflow/core/platform/posix/posix_file_system.h
index 07bb8c9a6ff..ccff70cb56f 100644
--- a/tensorflow/core/platform/posix/posix_file_system.h
+++ b/tensorflow/core/platform/posix/posix_file_system.h
@@ -16,6 +16,7 @@ limitations under the License.
 #ifndef TENSORFLOW_CORE_PLATFORM_POSIX_POSIX_FILE_SYSTEM_H_
 #define TENSORFLOW_CORE_PLATFORM_POSIX_POSIX_FILE_SYSTEM_H_
 
+#include "tensorflow/core/lib/io/path.h"
 #include "tensorflow/core/platform/env.h"
 
 namespace tensorflow {
@@ -63,7 +64,7 @@ class LocalPosixFileSystem : public PosixFileSystem {
  public:
   string TranslateName(const string& name) const override {
     StringPiece scheme, host, path;
-    ParseURI(name, &scheme, &host, &path);
+    io::ParseURI(name, &scheme, &host, &path);
     return path.ToString();
   }
 };
diff --git a/tensorflow/core/platform/windows/windows_file_system.h b/tensorflow/core/platform/windows/windows_file_system.h
index 12b579bc86a..64da239d96d 100644
--- a/tensorflow/core/platform/windows/windows_file_system.h
+++ b/tensorflow/core/platform/windows/windows_file_system.h
@@ -16,6 +16,7 @@ limitations under the License.
 #ifndef TENSORFLOW_CORE_PLATFORM_WINDOWS_WINDOWS_FILE_SYSTEM_H_
 #define TENSORFLOW_CORE_PLATFORM_WINDOWS_WINDOWS_FILE_SYSTEM_H_
 
+#include "tensorflow/core/lib/io/path.h"
 #include "tensorflow/core/platform/file_system.h"
 
 #ifdef PLATFORM_WINDOWS
@@ -68,7 +69,7 @@ class LocalWinFileSystem : public WindowsFileSystem {
 public:
     string TranslateName(const string& name) const override {
       StringPiece scheme, host, path;
-      ParseURI(name, &scheme, &host, &path);
+      io::ParseURI(name, &scheme, &host, &path);
       return path.ToString();
     }
 };
diff --git a/tensorflow/core/protobuf/master.proto b/tensorflow/core/protobuf/master.proto
index 1dc5e0271e3..d22a68d89c5 100644
--- a/tensorflow/core/protobuf/master.proto
+++ b/tensorflow/core/protobuf/master.proto
@@ -122,6 +122,10 @@ message RunStepRequest {
 
   // Options for the run call.
   RunOptions options = 5;
+
+  // Partial run handle (optional). If specified, this will be a partial run
+  // execution, run up to the specified fetches.
+  string partial_run_handle = 6;
 }
 
 message RunStepResponse {
@@ -133,6 +137,42 @@ message RunStepResponse {
   RunMetadata metadata = 2;
 }
 
+////////////////////////////////////////////////////////////////////////////////
+//
+// PartialRunSetup method request/response protos.
+//
+// The caller should provide the future partial run feeds, fetches, and targets.
+// Then the caller can use RunStepRequest with is_partial set to make partial
+// run calls.
+//
+////////////////////////////////////////////////////////////////////////////////
+
+message PartialRunSetupRequest {
+  // REQUIRED: session_handle must be returned by a CreateSession call
+  // to the same master service.
+  string session_handle = 1;
+
+  // Tensors to be fed in future steps.
+  repeated string feed = 2;
+
+  // Fetches. A list of tensor names. The caller expects a tensor to be returned
+  // for each fetch[i] (see RunStepResponse.tensor), for corresponding partial
+  // RunStepRequests. The order of specified fetches does not change the
+  // execution order.
+  repeated string fetch = 3;
+
+  // Target Nodes. A list of node names. The named nodes will be run in future
+  // steps, but their outputs will not be fetched.
+  repeated string target = 4;
+}
+
+message PartialRunSetupResponse {
+  // The unique handle corresponding to the ongoing partial run call setup by
+  // the invocation to PartialRunSetup. This handle may be passed to
+  // RunStepRequest to send and receive tensors for this partial run.
+  string partial_run_handle = 1;
+}
+
 ////////////////////////////////////////////////////////////////////////////////
 //
 // CloseSession method request/response protos.
diff --git a/tensorflow/core/protobuf/master_service.proto b/tensorflow/core/protobuf/master_service.proto
index 4deb63e400b..7475491845c 100644
--- a/tensorflow/core/protobuf/master_service.proto
+++ b/tensorflow/core/protobuf/master_service.proto
@@ -91,6 +91,9 @@ service MasterService {
   // Extends a session.
   rpc ExtendSession(ExtendSessionRequest) returns (ExtendSessionResponse);
 
+  // Prepares future partial run calls.
+  rpc PartialRunSetup(PartialRunSetupRequest) returns (PartialRunSetupResponse);
+
   // Drives the graph computation.
   rpc RunStep(RunStepRequest) returns (RunStepResponse);
 
diff --git a/tensorflow/core/util/tensor_bundle/tensor_bundle.cc b/tensorflow/core/util/tensor_bundle/tensor_bundle.cc
index 61a69a3840f..4b1a01277c8 100644
--- a/tensorflow/core/util/tensor_bundle/tensor_bundle.cc
+++ b/tensorflow/core/util/tensor_bundle/tensor_bundle.cc
@@ -343,7 +343,11 @@ Status BundleWriter::Finish() {
   status_ = env_->NewWritableFile(MetaFilename(prefix_), &file);
   if (!status_.ok()) return status_;
   {
-    table::TableBuilder builder(table::Options(), file.get());
+    // N.B.: the default use of Snappy compression may not be supported on all
+    // platforms (e.g. Android).  The metadata file is small, so this is fine.
+    table::Options options;
+    options.compression = table::kNoCompression;
+    table::TableBuilder builder(options, file.get());
     // Header entry.
     BundleHeaderProto header;
     header.set_num_shards(1);
diff --git a/tensorflow/core/util/work_sharder.cc b/tensorflow/core/util/work_sharder.cc
index 6cede8d461e..7922fc9224e 100644
--- a/tensorflow/core/util/work_sharder.cc
+++ b/tensorflow/core/util/work_sharder.cc
@@ -31,12 +31,10 @@ void Shard(int max_parallelism, thread::ThreadPool* workers, int64 total,
     work(0, total);
     return;
   }
-#ifdef EIGEN_USE_NONBLOCKING_THREAD_POOL
   if (max_parallelism >= workers->NumThreads()) {
     workers->ParallelFor(total, cost_per_unit, work);
     return;
   }
-#endif
   cost_per_unit = std::max(1LL, cost_per_unit);
   // We shard [0, total) into "num_shards" shards.
   //   1 <= num_shards <= num worker threads
diff --git a/tensorflow/examples/android/res/layout-land/camera_connection_fragment.xml b/tensorflow/examples/android/res/layout-land/camera_connection_fragment.xml
deleted file mode 100644
index 543e5358e68..00000000000
--- a/tensorflow/examples/android/res/layout-land/camera_connection_fragment.xml
+++ /dev/null
@@ -1,34 +0,0 @@
-<?xml version="1.0" encoding="utf-8"?><!--
- Copyright 2016 The TensorFlow Authors. All Rights Reserved.
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
-     http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
--->
-<RelativeLayout xmlns:android="http://schemas.android.com/apk/res/android"
-    android:layout_width="match_parent"
-    android:layout_height="match_parent">
-
-    <org.tensorflow.demo.AutoFitTextureView
-        android:id="@+id/texture"
-        android:layout_width="wrap_content"
-        android:layout_height="wrap_content"
-        android:layout_alignParentBottom="true"
-        android:layout_alignParentStart="true"
-        android:layout_alignParentTop="true" />
-
-    <org.tensorflow.demo.RecognitionScoreView
-        android:id="@+id/results"
-        android:layout_width="match_parent"
-        android:layout_height="112dp"
-        android:layout_alignParentTop="true" />
-        
-</RelativeLayout>
diff --git a/tensorflow/examples/android/res/layout/camera_connection_fragment.xml b/tensorflow/examples/android/res/layout/camera_connection_fragment.xml
index fcf08bf8835..420b69b5e3d 100644
--- a/tensorflow/examples/android/res/layout/camera_connection_fragment.xml
+++ b/tensorflow/examples/android/res/layout/camera_connection_fragment.xml
@@ -22,11 +22,17 @@
         android:layout_width="wrap_content"
         android:layout_height="wrap_content"
         android:layout_alignParentBottom="true" />
-    
+
     <org.tensorflow.demo.RecognitionScoreView
         android:id="@+id/results"
         android:layout_width="match_parent"
         android:layout_height="112dp"
         android:layout_alignParentTop="true" />
-        
+
+    <org.tensorflow.demo.OverlayView
+        android:id="@+id/overlay"
+        android:layout_width="match_parent"
+        android:layout_height="match_parent"
+        android:layout_alignParentBottom="true" />
+
 </RelativeLayout>
diff --git a/tensorflow/examples/android/src/org/tensorflow/demo/CameraActivity.java b/tensorflow/examples/android/src/org/tensorflow/demo/CameraActivity.java
index ede3af1467f..e8bbb999a7f 100644
--- a/tensorflow/examples/android/src/org/tensorflow/demo/CameraActivity.java
+++ b/tensorflow/examples/android/src/org/tensorflow/demo/CameraActivity.java
@@ -20,32 +20,72 @@ import android.Manifest;
 import android.app.Activity;
 import android.app.Fragment;
 import android.content.pm.PackageManager;
+import android.media.Image.Plane;
+import android.media.ImageReader.OnImageAvailableListener;
 import android.os.Build;
 import android.os.Bundle;
+import android.os.Handler;
+import android.os.HandlerThread;
+import android.util.Size;
+import android.view.MotionEvent;
 import android.view.WindowManager;
 import android.widget.Toast;
+import java.nio.ByteBuffer;
+import org.tensorflow.demo.env.Logger;
+
+public abstract class CameraActivity extends Activity implements OnImageAvailableListener {
+  private static final Logger LOGGER = new Logger();
 
-public abstract class CameraActivity extends Activity {
   private static final int PERMISSIONS_REQUEST = 1;
 
   private static final String PERMISSION_CAMERA = Manifest.permission.CAMERA;
   private static final String PERMISSION_STORAGE = Manifest.permission.WRITE_EXTERNAL_STORAGE;
 
+  private boolean debug = false;
+
+  private Handler handler;
+  private HandlerThread handlerThread;
+
   @Override
   protected void onCreate(final Bundle savedInstanceState) {
-    super.onCreate(savedInstanceState);
+    super.onCreate(null);
     getWindow().addFlags(WindowManager.LayoutParams.FLAG_KEEP_SCREEN_ON);
 
     setContentView(R.layout.activity_camera);
 
     if (hasPermission()) {
-      if (null == savedInstanceState) {
-        setFragment();
-      }
+      setFragment();
     } else {
       requestPermission();
     }
+  }
 
+  @Override
+  public synchronized void onResume() {
+    super.onResume();
+
+    handlerThread = new HandlerThread("inference");
+    handlerThread.start();
+    handler = new Handler(handlerThread.getLooper());
+  }
+
+  @Override
+  public synchronized void onPause() {
+    super.onPause();
+    handlerThread.quitSafely();
+    try {
+      handlerThread.join();
+      handlerThread = null;
+      handler = null;
+    } catch (final InterruptedException e) {
+      LOGGER.e(e, "Exception!");
+    }
+  }
+
+  protected synchronized void runInBackground(final Runnable r) {
+    if (handler != null) {
+      handler.post(r);
+    }
   }
 
   @Override
@@ -82,11 +122,47 @@ public abstract class CameraActivity extends Activity {
   }
 
   protected void setFragment() {
+    final Fragment fragment = CameraConnectionFragment.newInstance(
+        new CameraConnectionFragment.ConnectionCallback(){
+          @Override
+          public void onPreviewSizeChosen(final Size size, final int rotation) {
+            CameraActivity.this.onPreviewSizeChosen(size, rotation);
+          }
+        },
+        this, getLayoutId(), getDesiredPreviewFrameSize());
+
     getFragmentManager()
         .beginTransaction()
-        .replace(R.id.container, createFragment())
+        .replace(R.id.container, fragment)
         .commit();
   }
 
-  protected abstract Fragment createFragment();
+  protected void fillBytes(final Plane[] planes, final byte[][] yuvBytes) {
+    // Because of the variable row stride it's not possible to know in
+    // advance the actual necessary dimensions of the yuv planes.
+    for (int i = 0; i < planes.length; ++i) {
+      final ByteBuffer buffer = planes[i].getBuffer();
+      if (yuvBytes[i] == null) {
+        LOGGER.i("Initializing buffer %d at size %d", i, buffer.capacity());
+        yuvBytes[i] = new byte[buffer.capacity()];
+      }
+      buffer.get(yuvBytes[i]);
+    }
+  }
+
+  @Override
+  public boolean onTouchEvent(final MotionEvent event) {
+    if (event.getAction() == MotionEvent.ACTION_DOWN) {
+      debug = !debug;
+    }
+    return false;
+  }
+
+  public boolean isDebug() {
+    return debug;
+  }
+
+  protected abstract void onPreviewSizeChosen(final Size size, final int rotation);
+  protected abstract int getLayoutId();
+  protected abstract int getDesiredPreviewFrameSize();
 }
diff --git a/tensorflow/examples/android/src/org/tensorflow/demo/CameraConnectionFragment.java b/tensorflow/examples/android/src/org/tensorflow/demo/CameraConnectionFragment.java
index 0bd963b39ef..2e09e78b8a4 100644
--- a/tensorflow/examples/android/src/org/tensorflow/demo/CameraConnectionFragment.java
+++ b/tensorflow/examples/android/src/org/tensorflow/demo/CameraConnectionFragment.java
@@ -38,6 +38,7 @@ import android.hardware.camera2.CaptureResult;
 import android.hardware.camera2.TotalCaptureResult;
 import android.hardware.camera2.params.StreamConfigurationMap;
 import android.media.ImageReader;
+import android.media.ImageReader.OnImageAvailableListener;
 import android.os.Bundle;
 import android.os.Handler;
 import android.os.HandlerThread;
@@ -49,9 +50,6 @@ import android.view.TextureView;
 import android.view.View;
 import android.view.ViewGroup;
 import android.widget.Toast;
-
-import org.tensorflow.demo.env.Logger;
-
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collections;
@@ -59,6 +57,7 @@ import java.util.Comparator;
 import java.util.List;
 import java.util.concurrent.Semaphore;
 import java.util.concurrent.TimeUnit;
+import org.tensorflow.demo.env.Logger;
 
 public class CameraConnectionFragment extends Fragment {
   private static final Logger LOGGER = new Logger();
@@ -69,8 +68,6 @@ public class CameraConnectionFragment extends Fragment {
    */
   private static final int MINIMUM_PREVIEW_SIZE = 320;
 
-  private ResultsView resultsView;
-
   /**
    * Conversion from screen rotation to JPEG orientation.
    */
@@ -111,6 +108,14 @@ public class CameraConnectionFragment extends Fragment {
         public void onSurfaceTextureUpdated(final SurfaceTexture texture) {}
       };
 
+  /**
+   * Callback for Activities to use to initialize their data once the
+   * selected preview size is known.
+   */
+  public interface ConnectionCallback {
+    void onPreviewSizeChosen(Size size, int cameraRotation);
+  }
+
   /**
    * ID of the current {@link CameraDevice}.
    */
@@ -184,16 +189,6 @@ public class CameraConnectionFragment extends Fragment {
    */
   private Handler backgroundHandler;
 
-  /**
-   * An additional thread for running inference so as not to block the camera.
-   */
-  private HandlerThread inferenceThread;
-
-  /**
-   * A {@link Handler} for running tasks in the background.
-   */
-  private Handler inferenceHandler;
-
   /**
    * An {@link ImageReader} that handles preview frame capture.
    */
@@ -215,9 +210,10 @@ public class CameraConnectionFragment extends Fragment {
   private final Semaphore cameraOpenCloseLock = new Semaphore(1);
 
   /**
-   * A {@link Classifier} object wrapping TensorFlow to pass frames to.
+   * A {@link OnImageAvailableListener} to receive frames as they are available.
    */
-  private final Classifier classifier;
+  private final OnImageAvailableListener imageListener;
+
   /**
    * The input size in pixels desired by TensorFlow (width and height of a square bitmap).
    */
@@ -228,9 +224,15 @@ public class CameraConnectionFragment extends Fragment {
    */
   private final int layout;
 
+
+  private final ConnectionCallback cameraConnectionCallback;
+
   private CameraConnectionFragment(
-      final Classifier classifier, final int layout, final int inputSize) {
-    this.classifier = classifier;
+      final ConnectionCallback connectionCallback,
+      final OnImageAvailableListener imageListener,
+      final int layout, final int inputSize) {
+    this.cameraConnectionCallback = connectionCallback;
+    this.imageListener = imageListener;
     this.layout = layout;
     this.inputSize = inputSize;
   }
@@ -268,8 +270,12 @@ public class CameraConnectionFragment extends Fragment {
       final Size[] choices, final int width, final int height, final Size aspectRatio) {
     // Collect the supported resolutions that are at least as big as the preview Surface
     final List<Size> bigEnough = new ArrayList<Size>();
+
+    final int minWidth = Math.max(width, MINIMUM_PREVIEW_SIZE);
+    final int minHeight = Math.max(height, MINIMUM_PREVIEW_SIZE);
+
     for (final Size option : choices) {
-      if (option.getHeight() >= MINIMUM_PREVIEW_SIZE && option.getWidth() >= MINIMUM_PREVIEW_SIZE) {
+      if (option.getHeight() >= minHeight && option.getWidth() >= minWidth) {
         LOGGER.i("Adding size: " + option.getWidth() + "x" + option.getHeight());
         bigEnough.add(option);
       } else {
@@ -289,8 +295,9 @@ public class CameraConnectionFragment extends Fragment {
   }
 
   public static CameraConnectionFragment newInstance(
-      final Classifier classifier, final int layout, final int inputSize) {
-    return new CameraConnectionFragment(classifier, layout, inputSize);
+      final ConnectionCallback callback,
+      final OnImageAvailableListener imageListener, final int layout, final int inputSize) {
+    return new CameraConnectionFragment(callback, imageListener, layout, inputSize);
   }
 
   @Override
@@ -302,7 +309,6 @@ public class CameraConnectionFragment extends Fragment {
   @Override
   public void onViewCreated(final View view, final Bundle savedInstanceState) {
     textureView = (AutoFitTextureView) view.findViewById(R.id.texture);
-    resultsView = (ResultsView) view.findViewById(R.id.results);
   }
 
   @Override
@@ -371,7 +377,8 @@ public class CameraConnectionFragment extends Fragment {
         // bus' bandwidth limitation, resulting in gorgeous previews but the storage of
         // garbage capture data.
         previewSize =
-            chooseOptimalSize(map.getOutputSizes(SurfaceTexture.class), width, height, largest);
+            chooseOptimalSize(map.getOutputSizes(SurfaceTexture.class),
+                inputSize, inputSize, largest);
 
         // We fit the aspect ratio of TextureView to the size of preview we picked.
         final int orientation = getResources().getConfiguration().orientation;
@@ -382,6 +389,8 @@ public class CameraConnectionFragment extends Fragment {
         }
 
         CameraConnectionFragment.this.cameraId = cameraId;
+
+        cameraConnectionCallback.onPreviewSizeChosen(previewSize, sensorOrientation);
         return;
       }
     } catch (final CameraAccessException e) {
@@ -446,10 +455,6 @@ public class CameraConnectionFragment extends Fragment {
     backgroundThread = new HandlerThread("ImageListener");
     backgroundThread.start();
     backgroundHandler = new Handler(backgroundThread.getLooper());
-
-    inferenceThread = new HandlerThread("InferenceThread");
-    inferenceThread.start();
-    inferenceHandler = new Handler(inferenceThread.getLooper());
   }
 
   /**
@@ -457,22 +462,15 @@ public class CameraConnectionFragment extends Fragment {
    */
   private void stopBackgroundThread() {
     backgroundThread.quitSafely();
-    inferenceThread.quitSafely();
     try {
       backgroundThread.join();
       backgroundThread = null;
       backgroundHandler = null;
-
-      inferenceThread.join();
-      inferenceThread = null;
-      inferenceThread = null;
     } catch (final InterruptedException e) {
       LOGGER.e(e, "Exception!");
     }
   }
 
-  private final TensorFlowImageListener tfPreviewListener = new TensorFlowImageListener();
-
   private final CameraCaptureSession.CaptureCallback captureCallback =
       new CameraCaptureSession.CaptureCallback() {
         @Override
@@ -513,7 +511,7 @@ public class CameraConnectionFragment extends Fragment {
           ImageReader.newInstance(
               previewSize.getWidth(), previewSize.getHeight(), ImageFormat.YUV_420_888, 2);
 
-      previewReader.setOnImageAvailableListener(tfPreviewListener, backgroundHandler);
+      previewReader.setOnImageAvailableListener(imageListener, backgroundHandler);
       previewRequestBuilder.addTarget(previewReader.getSurface());
 
       // Here, we create a CameraCaptureSession for camera preview.
@@ -557,11 +555,6 @@ public class CameraConnectionFragment extends Fragment {
     } catch (final CameraAccessException e) {
       LOGGER.e(e, "Exception!");
     }
-
-    LOGGER.i("Getting assets.");
-    tfPreviewListener.initialize(
-        classifier, resultsView, inputSize, inferenceHandler, sensorOrientation);
-    LOGGER.i("TensorFlow initialized.");
   }
 
   /**
diff --git a/tensorflow/examples/android/src/org/tensorflow/demo/ClassifierActivity.java b/tensorflow/examples/android/src/org/tensorflow/demo/ClassifierActivity.java
index 104ffbbd088..6f695dd7667 100644
--- a/tensorflow/examples/android/src/org/tensorflow/demo/ClassifierActivity.java
+++ b/tensorflow/examples/android/src/org/tensorflow/demo/ClassifierActivity.java
@@ -16,12 +16,29 @@
 
 package org.tensorflow.demo;
 
+import android.graphics.Bitmap;
+import android.graphics.Bitmap.Config;
+import android.graphics.Canvas;
+import android.graphics.Matrix;
+import android.graphics.Paint;
+import android.media.Image;
+import android.media.Image.Plane;
+import android.media.ImageReader;
+import android.media.ImageReader.OnImageAvailableListener;
+import android.os.SystemClock;
+import android.os.Trace;
+import android.util.Size;
+import android.util.TypedValue;
+import android.view.Display;
 import java.io.IOException;
-
-import android.app.Fragment;
+import java.util.List;
+import java.util.Vector;
+import org.tensorflow.demo.OverlayView.DrawCallback;
+import org.tensorflow.demo.env.BorderedText;
+import org.tensorflow.demo.env.ImageUtils;
 import org.tensorflow.demo.env.Logger;
 
-public class ClassifierActivity extends CameraActivity {
+public class ClassifierActivity extends CameraActivity implements OnImageAvailableListener {
   private static final Logger LOGGER = new Logger();
 
   // These are the settings for the original v1 Inception model. If you want to
@@ -41,9 +58,58 @@ public class ClassifierActivity extends CameraActivity {
   private static final String LABEL_FILE =
       "file:///android_asset/imagenet_comp_graph_label_strings.txt";
 
+  private static final boolean SAVE_PREVIEW_BITMAP = false;
+
+  private static final boolean MAINTAIN_ASPECT = true;
+
+  private TensorFlowImageClassifier classifier;
+
+  private Integer sensorOrientation;
+
+  private int previewWidth = 0;
+  private int previewHeight = 0;
+  private byte[][] yuvBytes;
+  private int[] rgbBytes = null;
+  private Bitmap rgbFrameBitmap = null;
+  private Bitmap croppedBitmap = null;
+
+  private Bitmap cropCopyBitmap;
+
+  private boolean computing = false;
+
+  private long timestamp = 0;
+
+  private Matrix frameToCropTransform;
+  private Matrix cropToFrameTransform;
+
+  private ResultsView resultsView;
+
+  private OverlayView overlayView;
+
+  private BorderedText borderedText;
+
+  private long lastProcessingTimeMs;
+
   @Override
-  protected Fragment createFragment() {
-    final TensorFlowImageClassifier classifier = new TensorFlowImageClassifier();
+  protected int getLayoutId() {
+    return R.layout.camera_connection_fragment;
+  }
+
+  @Override
+  protected int getDesiredPreviewFrameSize() {
+    return INPUT_SIZE;
+  }
+
+  private static final float TEXT_SIZE_DIP = 18;
+
+  @Override
+  public void onPreviewSizeChosen(final Size size, final int rotation) {
+    final float textSizePx = TypedValue.applyDimension(
+        TypedValue.COMPLEX_UNIT_DIP, TEXT_SIZE_DIP,
+        getResources().getDisplayMetrics());
+    borderedText = new BorderedText(textSizePx);
+
+    classifier = new TensorFlowImageClassifier();
     try {
       classifier.initializeTensorFlow(
         getAssets(), MODEL_FILE, LABEL_FILE, NUM_CLASSES, INPUT_SIZE, IMAGE_MEAN, IMAGE_STD,
@@ -52,7 +118,151 @@ public class ClassifierActivity extends CameraActivity {
       LOGGER.e(e, "Exception!");
     }
 
-    return CameraConnectionFragment.newInstance(
-        classifier, R.layout.camera_connection_fragment, INPUT_SIZE);
+    overlayView = (OverlayView) findViewById(R.id.overlay);
+    resultsView = (ResultsView) findViewById(R.id.results);
+    previewWidth = size.getWidth();
+    previewHeight = size.getHeight();
+
+    final Display display = getWindowManager().getDefaultDisplay();
+    final int screenOrientation = display.getRotation();
+
+    LOGGER.i("Sensor orientation: %d, Screen orientation: %d",
+        rotation, screenOrientation);
+
+    sensorOrientation = rotation + screenOrientation;
+
+    if (sensorOrientation % 180 == 90) {
+      overlayView.setAspectRatio(size.getHeight(), size.getWidth());
+    } else {
+      overlayView.setAspectRatio(size.getWidth(), size.getHeight());
+    }
+
+    LOGGER.i("Initializing at size %dx%d", previewWidth, previewHeight);
+    rgbBytes = new int[previewWidth * previewHeight];
+    rgbFrameBitmap = Bitmap.createBitmap(previewWidth, previewHeight, Config.ARGB_8888);
+    croppedBitmap = Bitmap.createBitmap(INPUT_SIZE, INPUT_SIZE, Config.ARGB_8888);
+
+    frameToCropTransform = ImageUtils.getTransformationMatrix(
+        previewWidth, previewHeight,
+        INPUT_SIZE, INPUT_SIZE,
+        sensorOrientation, MAINTAIN_ASPECT);
+
+    cropToFrameTransform = new Matrix();
+    frameToCropTransform.invert(cropToFrameTransform);
+
+    yuvBytes = new byte[3][];
+
+    overlayView.addCallback(new DrawCallback() {
+      @Override
+      public void drawCallback(final Canvas canvas) {
+        renderDebug(canvas);
+      }
+    });
+  }
+
+  @Override
+  public void onImageAvailable(final ImageReader reader) {
+    Image image = null;
+
+    ++timestamp;
+
+    try {
+      image = reader.acquireLatestImage();
+
+      if (image == null) {
+        return;
+      }
+
+      if (computing) {
+        image.close();
+        return;
+      }
+      computing = true;
+
+      Trace.beginSection("imageAvailable");
+
+      final Plane[] planes = image.getPlanes();
+      fillBytes(planes, yuvBytes);
+
+      final int yRowStride = planes[0].getRowStride();
+      final int uvRowStride = planes[1].getRowStride();
+      final int uvPixelStride = planes[1].getPixelStride();
+      ImageUtils.convertYUV420ToARGB8888(
+          yuvBytes[0],
+          yuvBytes[1],
+          yuvBytes[2],
+          rgbBytes,
+          previewWidth,
+          previewHeight,
+          yRowStride,
+          uvRowStride,
+          uvPixelStride,
+          false);
+
+      image.close();
+    } catch (final Exception e) {
+      if (image != null) {
+        image.close();
+      }
+      LOGGER.e(e, "Exception!");
+      Trace.endSection();
+      return;
+    }
+
+    rgbFrameBitmap.setPixels(rgbBytes, 0, previewWidth, 0, 0, previewWidth, previewHeight);
+    final Canvas canvas = new Canvas(croppedBitmap);
+    canvas.drawBitmap(rgbFrameBitmap, frameToCropTransform, null);
+
+    // For examining the actual TF input.
+    if (SAVE_PREVIEW_BITMAP) {
+      ImageUtils.saveBitmap(croppedBitmap);
+    }
+
+    runInBackground(
+        new Runnable() {
+          @Override
+          public void run() {
+            final long startTime = SystemClock.uptimeMillis();
+            final List<Classifier.Recognition> results = classifier.recognizeImage(croppedBitmap);
+            lastProcessingTimeMs = SystemClock.uptimeMillis() - startTime;
+
+            cropCopyBitmap = Bitmap.createBitmap(croppedBitmap);
+            resultsView.setResults(results);
+            overlayView.postInvalidate();
+            computing = false;
+          }
+        });
+
+    Trace.endSection();
+  }
+
+  private void renderDebug(Canvas canvas) {
+    if (!isDebug()) {
+      return;
+    }
+    final Bitmap copy = cropCopyBitmap;
+    if (copy != null) {
+      final Matrix matrix = new Matrix();
+      final float scaleFactor = 2;
+      matrix.postScale(scaleFactor, scaleFactor);
+      matrix.postTranslate(
+          canvas.getWidth() - copy.getWidth() * scaleFactor,
+          canvas.getHeight() - copy.getHeight() * scaleFactor);
+      canvas.drawBitmap(copy, matrix, new Paint());
+
+      final Vector<String> lines = new Vector<String>();
+      lines.add("Frame: " + previewWidth + "x" + previewHeight);
+      lines.add("Crop: " + copy.getWidth() + "x" + copy.getHeight());
+      lines.add("View: " + canvas.getWidth() + "x" + canvas.getHeight());
+      lines.add("Rotation: " + sensorOrientation);
+      lines.add("Inference time: " + lastProcessingTimeMs + "ms");
+
+      int lineNum = 0;
+      for (final String line : lines) {
+        borderedText.drawText(canvas, 10,
+            canvas.getHeight() - 10 - borderedText.getTextSize() * lineNum, line);
+        ++lineNum;
+      }
+    }
   }
 }
diff --git a/tensorflow/examples/android/src/org/tensorflow/demo/OverlayView.java b/tensorflow/examples/android/src/org/tensorflow/demo/OverlayView.java
new file mode 100644
index 00000000000..b874bb07380
--- /dev/null
+++ b/tensorflow/examples/android/src/org/tensorflow/demo/OverlayView.java
@@ -0,0 +1,94 @@
+/* Copyright 2016 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+package org.tensorflow.demo;
+
+import android.content.Context;
+import android.graphics.Canvas;
+import android.util.AttributeSet;
+import android.view.MotionEvent;
+import android.view.View;
+import android.view.View.MeasureSpec;
+import java.util.LinkedList;
+import java.util.List;
+
+/**
+ * A simple View providing a render callback to other classes.
+ */
+public class OverlayView extends View {
+  public OverlayView(final Context context, final AttributeSet attrs) {
+    super(context, attrs);
+  }
+
+  /**
+   * Interface defining the callback for client classes.
+   */
+  public interface DrawCallback {
+    public void drawCallback(final Canvas canvas);
+  }
+
+  private int ratioWidth;
+  private int ratioHeight;
+
+  private boolean debug;
+
+  private final List<DrawCallback> callbacks = new LinkedList<DrawCallback>();
+
+  @Override
+  public boolean onTouchEvent(final MotionEvent e) {
+    super.onTouchEvent(e);
+    if (e.getAction() == MotionEvent.ACTION_DOWN) {
+      debug = !debug;
+    }
+    return false;
+  }
+
+  public void addCallback(final DrawCallback callback) {
+    callbacks.add(callback);
+  }
+
+  @Override
+  public synchronized void draw(final Canvas canvas) {
+    for (final DrawCallback callback : callbacks) {
+      callback.drawCallback(canvas);
+    }
+  }
+
+  public void setAspectRatio(final int width, final int height) {
+    if (width < 0 || height < 0) {
+      throw new IllegalArgumentException("Size cannot be negative.");
+    }
+    ratioWidth = width;
+    ratioHeight = height;
+    requestLayout();
+  }
+
+  @Override
+  protected void onMeasure(final int widthMeasureSpec, final int heightMeasureSpec) {
+    super.onMeasure(widthMeasureSpec, heightMeasureSpec);
+    final int width = MeasureSpec.getSize(widthMeasureSpec);
+    final int height = MeasureSpec.getSize(heightMeasureSpec);
+    if (0 == ratioWidth || 0 == ratioHeight) {
+      setMeasuredDimension(width, height);
+    } else {
+      if (width < height * ratioWidth / ratioHeight) {
+        setMeasuredDimension(width, width * ratioHeight / ratioWidth);
+      } else {
+        setMeasuredDimension(height * ratioWidth / ratioHeight, height);
+      }
+    }
+  }
+
+}
diff --git a/tensorflow/examples/android/src/org/tensorflow/demo/env/BorderedText.java b/tensorflow/examples/android/src/org/tensorflow/demo/env/BorderedText.java
new file mode 100644
index 00000000000..e4b13bb7abf
--- /dev/null
+++ b/tensorflow/examples/android/src/org/tensorflow/demo/env/BorderedText.java
@@ -0,0 +1,119 @@
+/* Copyright 2016 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+package org.tensorflow.demo.env;
+
+import android.graphics.Canvas;
+import android.graphics.Color;
+import android.graphics.Paint;
+import android.graphics.Paint.Align;
+import android.graphics.Paint.Style;
+import android.graphics.Rect;
+
+/**
+ * A class that encapsulates the tedious bits of rendering legible, bordered text onto a canvas.
+ */
+public class BorderedText {
+  private final Paint interiorPaint;
+  private final Paint exteriorPaint;
+
+  private final float textSize;
+
+  /**
+   * Creates a left-aligned bordered text object with a white interior, and a black exterior with
+   * the specified text size.
+   *
+   * @param textSize text size in pixels
+   */
+  public BorderedText(final float textSize) {
+    this(Color.WHITE, Color.BLACK, textSize);
+  }
+
+  /**
+   * Create a bordered text object with the specified interior and exterior colors, text size and
+   * alignment.
+   *
+   * @param interiorColor the interior text color
+   * @param exteriorColor the exterior text color
+   * @param textSize text size in pixels
+   */
+  public BorderedText(final int interiorColor, final int exteriorColor, final float textSize) {
+    interiorPaint = new Paint();
+    interiorPaint.setTextSize(textSize);
+    interiorPaint.setColor(interiorColor);
+    interiorPaint.setStyle(Style.FILL);
+    interiorPaint.setAntiAlias(false);
+    interiorPaint.setAlpha(255);
+
+    exteriorPaint = new Paint();
+    exteriorPaint.setTextSize(textSize);
+    exteriorPaint.setColor(exteriorColor);
+    exteriorPaint.setStyle(Style.FILL_AND_STROKE);
+    exteriorPaint.setStrokeWidth(textSize / 8);
+    exteriorPaint.setAntiAlias(false);
+    exteriorPaint.setAlpha(255);
+
+    this.textSize = textSize;
+  }
+
+  public void drawText(final Canvas canvas, final float posX, final float posY, final String text) {
+    /*
+    if (widths == null || widths.length < text.length()) {
+      widths = new float[text.length()];
+      positions = new float[text.length() * 2];
+    }
+
+    exteriorPaint.getTextWidths(text, widths);
+    float lastPosX = posX;
+    for (int i = 0; i < widths.length; ++i) {
+      positions[i * 2] = lastPosX;
+      positions[i * 2 + 1] = posY;
+      lastPosX += widths[i];
+    }
+    */
+
+    //canvas.drawPosText(text, positions, exteriorPaint);
+    //canvas.drawPosText(text, positions, exteriorPaint);
+    canvas.drawText(text, posX, posY, exteriorPaint);
+    canvas.drawText(text, posX, posY, interiorPaint);
+  }
+
+  public void setInteriorColor(final int color) {
+    interiorPaint.setColor(color);
+  }
+
+  public void setExteriorColor(final int color) {
+    exteriorPaint.setColor(color);
+  }
+
+  public float getTextSize() {
+    return textSize;
+  }
+
+  public void setAlpha(final int alpha) {
+    interiorPaint.setAlpha(alpha);
+    exteriorPaint.setAlpha(alpha);
+  }
+
+  public void getTextBounds(
+      final String line, final int index, final int count, final Rect lineBounds) {
+    interiorPaint.getTextBounds(line, index, count, lineBounds);
+  }
+
+  public void setTextAlign(final Align align) {
+    interiorPaint.setTextAlign(align);
+    exteriorPaint.setTextAlign(align);
+  }
+}
diff --git a/tensorflow/examples/android/src/org/tensorflow/demo/env/ImageUtils.java b/tensorflow/examples/android/src/org/tensorflow/demo/env/ImageUtils.java
index a6a8c583190..6f957d1abd5 100644
--- a/tensorflow/examples/android/src/org/tensorflow/demo/env/ImageUtils.java
+++ b/tensorflow/examples/android/src/org/tensorflow/demo/env/ImageUtils.java
@@ -16,8 +16,8 @@ limitations under the License.
 package org.tensorflow.demo.env;
 
 import android.graphics.Bitmap;
+import android.graphics.Matrix;
 import android.os.Environment;
-
 import java.io.File;
 import java.io.FileOutputStream;
 
@@ -49,6 +49,16 @@ public class ImageUtils {
    * @param bitmap The bitmap to save.
    */
   public static void saveBitmap(final Bitmap bitmap) {
+    saveBitmap(bitmap, "preview.png");
+  }
+
+  /**
+   * Saves a Bitmap object to disk for analysis.
+   *
+   * @param bitmap The bitmap to save.
+   * @param filename The location to save the bitmap to.
+   */
+  public static void saveBitmap(final Bitmap bitmap, final String filename) {
     final String root =
         Environment.getExternalStorageDirectory().getAbsolutePath() + File.separator + "tensorflow";
     LOGGER.i("Saving %dx%d bitmap to %s.", bitmap.getWidth(), bitmap.getHeight(), root);
@@ -58,7 +68,7 @@ public class ImageUtils {
       LOGGER.i("Make dir failed");
     }
 
-    final String fname = "preview.png";
+    final String fname = filename;
     final File file = new File(myDir, fname);
     if (file.exists()) {
       file.delete();
@@ -151,4 +161,66 @@ public class ImageUtils {
    */
   public static native void convertRGB565ToYUV420SP(
       byte[] input, byte[] output, int width, int height);
+
+  /**
+   * Returns a transformation matrix from one reference frame into another.
+   * Handles cropping (if maintaining aspect ratio is desired) and rotation.
+   *
+   * @param srcWidth Width of source frame.
+   * @param srcHeight Height of source frame.
+   * @param dstWidth Width of destination frame.
+   * @param dstHeight Height of destination frame.
+   * @param applyRotation Amount of rotation to apply from one frame to another.
+   *  Must be a multiple of 90.
+   * @param maintainAspectRatio If true, will ensure that scaling in x and y remains constant,
+   * cropping the image if necessary.
+   * @return The transformation fulfilling the desired requirements.
+   */
+  public static Matrix getTransformationMatrix(
+      final int srcWidth,
+      final int srcHeight,
+      final int dstWidth,
+      final int dstHeight,
+      final int applyRotation,
+      final boolean maintainAspectRatio) {
+    final Matrix matrix = new Matrix();
+
+    if (applyRotation != 0) {
+      // Translate so center of image is at origin.
+      matrix.postTranslate(-srcWidth / 2.0f, -srcHeight / 2.0f);
+
+      // Rotate around origin.
+      matrix.postRotate(applyRotation);
+    }
+
+    // Account for the already applied rotation, if any, and then determine how
+    // much scaling is needed for each axis.
+    final boolean transpose = (Math.abs(applyRotation) + 90) % 180 == 0;
+
+    final int inWidth = transpose ? srcHeight : srcWidth;
+    final int inHeight = transpose ? srcWidth : srcHeight;
+
+    // Apply scaling if necessary.
+    if (inWidth != dstWidth || inHeight != dstHeight) {
+      final float scaleFactorX = dstWidth / (float) inWidth;
+      final float scaleFactorY = dstHeight / (float) inHeight;
+
+      if (maintainAspectRatio) {
+        // Scale by minimum factor so that dst is filled completely while
+        // maintaining the aspect ratio. Some image may fall off the edge.
+        final float scaleFactor = Math.max(scaleFactorX, scaleFactorX);
+        matrix.postScale(scaleFactor, scaleFactor);
+      } else {
+        // Scale exactly to fill dst from src.
+        matrix.postScale(scaleFactorX, scaleFactorY);
+      }
+    }
+
+    if (applyRotation != 0) {
+      // Translate back from origin centered reference to destination frame.
+      matrix.postTranslate(dstWidth / 2.0f, dstHeight / 2.0f);
+    }
+
+    return matrix;
+  }
 }
diff --git a/tensorflow/examples/android/src/org/tensorflow/demo/env/Size.java b/tensorflow/examples/android/src/org/tensorflow/demo/env/Size.java
new file mode 100644
index 00000000000..ef15d14daa8
--- /dev/null
+++ b/tensorflow/examples/android/src/org/tensorflow/demo/env/Size.java
@@ -0,0 +1,143 @@
+/* Copyright 2016 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+package org.tensorflow.demo.env;
+
+import android.graphics.Bitmap;
+import android.text.TextUtils;
+import java.io.Serializable;
+import java.util.ArrayList;
+import java.util.List;
+
+/**
+ * Size class independent of a Camera object.
+ */
+public class Size implements Comparable<Size>, Serializable {
+
+  // 1.4 went out with this UID so we'll need to maintain it to preserve pending queries when
+  // upgrading.
+  public static final long serialVersionUID = 7689808733290872361L;
+
+  public final int width;
+  public final int height;
+
+  public Size(final int width, final int height) {
+    this.width = width;
+    this.height = height;
+  }
+
+  public Size(final Bitmap bmp) {
+    this.width = bmp.getWidth();
+    this.height = bmp.getHeight();
+  }
+
+  /**
+   * Rotate a size by the given number of degrees.
+   * @param size Size to rotate.
+   * @param rotation Degrees {0, 90, 180, 270} to rotate the size.
+   * @return Rotated size.
+   */
+  public static Size getRotatedSize(final Size size, final int rotation) {
+    if (rotation % 180 != 0) {
+      // The phone is portrait, therefore the camera is sideways and frame should be rotated.
+      return new Size(size.height, size.width);
+    }
+    return size;
+  }
+
+  public static Size parseFromString(String sizeString) {
+    if (TextUtils.isEmpty(sizeString)) {
+      return null;
+    }
+
+    sizeString = sizeString.trim();
+
+    // The expected format is "<width>x<height>".
+    final String[] components = sizeString.split("x");
+    if (components.length == 2) {
+      try {
+        final int width = Integer.parseInt(components[0]);
+        final int height = Integer.parseInt(components[1]);
+        return new Size(width, height);
+      } catch (final NumberFormatException e) {
+        return null;
+      }
+    } else {
+      return null;
+    }
+  }
+
+  public static List<Size> sizeStringToList(final String sizes) {
+    final List<Size> sizeList = new ArrayList<Size>();
+    if (sizes != null) {
+      final String[] pairs = sizes.split(",");
+      for (final String pair : pairs) {
+        final Size size = Size.parseFromString(pair);
+        if (size != null) {
+          sizeList.add(size);
+        }
+      }
+    }
+    return sizeList;
+  }
+
+  public static String sizeListToString(final List<Size> sizes) {
+    String sizesString = "";
+    if (sizes != null && sizes.size() > 0) {
+      sizesString = sizes.get(0).toString();
+      for (int i = 1; i < sizes.size(); i++) {
+        sizesString += "," + sizes.get(i).toString();
+      }
+    }
+    return sizesString;
+  }
+
+  public final float aspectRatio() {
+    return (float) width / (float) height;
+  }
+
+  @Override
+  public int compareTo(final Size other) {
+    return width * height - other.width * other.height;
+  }
+
+  @Override
+  public boolean equals(final Object other) {
+    if (other == null) {
+      return false;
+    }
+
+    if (!(other instanceof Size)) {
+      return false;
+    }
+
+    final Size otherSize = (Size) other;
+    return (width == otherSize.width && height == otherSize.height);
+  }
+
+  @Override
+  public int hashCode() {
+    return width * 32713 + height;
+  }
+
+  @Override
+  public String toString() {
+    return dimensionsAsString(width, height);
+  }
+
+  public static final String dimensionsAsString(final int width, final int height) {
+    return width + "x" + height;
+  }
+}
diff --git a/tensorflow/examples/image_retraining/retrain.py b/tensorflow/examples/image_retraining/retrain.py
index 392f0176d37..74c1de8fd7d 100644
--- a/tensorflow/examples/image_retraining/retrain.py
+++ b/tensorflow/examples/image_retraining/retrain.py
@@ -66,7 +66,6 @@ from __future__ import print_function
 
 import argparse
 from datetime import datetime
-import glob
 import hashlib
 import os.path
 import random
@@ -131,7 +130,7 @@ def create_image_lists(image_dir, testing_percentage, validation_percentage):
     print("Image directory '" + image_dir + "' not found.")
     return None
   result = {}
-  sub_dirs = [x[0] for x in os.walk(image_dir)]
+  sub_dirs = [x[0] for x in gfile.Walk(image_dir)]
   # The root directory comes first, so skip it.
   is_root_dir = True
   for sub_dir in sub_dirs:
@@ -146,7 +145,7 @@ def create_image_lists(image_dir, testing_percentage, validation_percentage):
     print("Looking for images in '" + dir_name + "'")
     for extension in extensions:
       file_glob = os.path.join(image_dir, dir_name, '*.' + extension)
-      file_list.extend(glob.glob(file_glob))
+      file_list.extend(gfile.Glob(file_glob))
     if not file_list:
       print('No files found')
       continue
diff --git a/tensorflow/examples/learn/BUILD b/tensorflow/examples/learn/BUILD
index b7eb2ce1cba..169cd8d8670 100644
--- a/tensorflow/examples/learn/BUILD
+++ b/tensorflow/examples/learn/BUILD
@@ -156,6 +156,25 @@ py_binary(
     ],
 )
 
+py_binary(
+    name = "mnist",
+    srcs = ["mnist.py"],
+    srcs_version = "PY2AND3",
+    deps = [
+        "//tensorflow:tensorflow_py",
+        "//tensorflow/examples/tutorials/mnist:input_data",
+    ],
+)
+
+py_binary(
+    name = "multiple_gpu",
+    srcs = ["multiple_gpu.py"],
+    srcs_version = "PY2AND3",
+    deps = [
+        "//tensorflow:tensorflow_py",
+    ],
+)
+
 sh_test(
     name = "examples_test",
     size = "large",
diff --git a/tensorflow/examples/learn/examples_test.sh b/tensorflow/examples/learn/examples_test.sh
index 317e70d5745..8942720271c 100755
--- a/tensorflow/examples/learn/examples_test.sh
+++ b/tensorflow/examples/learn/examples_test.sh
@@ -46,16 +46,16 @@ function test() {
 
 test boston
 test iris
-test iris_custom_model
 test iris_custom_decay_dnn
+test iris_custom_model
 test iris_run_config
 test iris_val_based_early_stopping
 test iris_with_pipeline
+test random_forest_mnist
 test resnet
 test text_classification --test_with_fake_data
 test text_classification_builtin_rnn_model --test_with_fake_data
-test text_classification_cnn --test_with_fake_data
 test text_classification_character_cnn --test_with_fake_data
 test text_classification_character_rnn --test_with_fake_data
-test random_forest_mnist
+test text_classification_cnn --test_with_fake_data
 test wide_n_deep_tutorial
diff --git a/tensorflow/examples/learn/iris_custom_model.py b/tensorflow/examples/learn/iris_custom_model.py
index 149ee47fa75..bfe5238eba5 100644
--- a/tensorflow/examples/learn/iris_custom_model.py
+++ b/tensorflow/examples/learn/iris_custom_model.py
@@ -38,17 +38,18 @@ def my_model(features, target):
                           normalizer_fn=normalizer_fn,
                           normalizer_params=normalizer_params)
 
-  # Create two tensors respectively for prediction and loss.
-  prediction, loss = (
-      tf.contrib.learn.models.logistic_regression(features, target)
-  )
+  # Compute logits (1 per class) and compute loss.
+  logits = layers.fully_connected(features, 3, activation_fn=None)
+  loss = tf.contrib.losses.softmax_cross_entropy(logits, target)
 
   # Create a tensor for training op.
   train_op = tf.contrib.layers.optimize_loss(
       loss, tf.contrib.framework.get_global_step(), optimizer='Adagrad',
       learning_rate=0.1)
 
-  return {'class': tf.argmax(prediction, 1), 'prob': prediction}, loss, train_op
+  return ({
+      'class': tf.argmax(logits, 1),
+      'prob': tf.nn.softmax(logits)}, loss, train_op)
 
 
 def main(unused_argv):
diff --git a/tensorflow/examples/learn/iris_with_pipeline.py b/tensorflow/examples/learn/iris_with_pipeline.py
index c548387f388..94cfbceee0f 100644
--- a/tensorflow/examples/learn/iris_with_pipeline.py
+++ b/tensorflow/examples/learn/iris_with_pipeline.py
@@ -47,7 +47,7 @@ def main(unused_argv):
 
   pipeline.fit(x_train, y_train, DNNclassifier__steps=200)
 
-  score = accuracy_score(y_test, pipeline.predict(x_test))
+  score = accuracy_score(y_test, list(pipeline.predict(x_test)))
   print('Accuracy: {0:f}'.format(score))
 
 
diff --git a/tensorflow/examples/learn/mnist.py b/tensorflow/examples/learn/mnist.py
new file mode 100644
index 00000000000..8b416373ba0
--- /dev/null
+++ b/tensorflow/examples/learn/mnist.py
@@ -0,0 +1,104 @@
+#  Copyright 2016 The TensorFlow Authors. All Rights Reserved.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+"""This showcases how simple it is to build image classification networks.
+
+It follows description from this TensorFlow tutorial:
+    https://www.tensorflow.org/versions/master/tutorials/mnist/pros/index.html#deep-mnist-for-experts
+"""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import numpy as np
+from sklearn import metrics
+import tensorflow as tf
+from tensorflow.contrib import layers
+from tensorflow.contrib import learn
+
+
+def max_pool_2x2(tensor_in):
+  return tf.nn.max_pool(
+      tensor_in, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
+
+
+def conv_model(feature, target, mode):
+  """2-layer convolution model."""
+  # Convert the target to a one-hot tensor of shape (batch_size, 10) and
+  # with a on-value of 1 for each one-hot vector of length 10.
+  target = tf.one_hot(tf.cast(target, tf.int32), 10, 1, 0)
+
+  # Reshape feature to 4d tensor with 2nd and 3rd dimensions being
+  # image width and height final dimension being the number of color channels.
+  feature = tf.reshape(feature, [-1, 28, 28, 1])
+
+  # First conv layer will compute 32 features for each 5x5 patch
+  with tf.variable_scope('conv_layer1'):
+    h_conv1 = layers.convolution(feature, 32, kernel_size=[5, 5],
+                                 activation_fn=tf.nn.relu)
+    h_pool1 = max_pool_2x2(h_conv1)
+
+  # Second conv layer will compute 64 features for each 5x5 patch.
+  with tf.variable_scope('conv_layer2'):
+    h_conv2 = layers.convolution(h_pool1, 64, kernel_size=[5, 5],
+                                 activation_fn=tf.nn.relu)
+    h_pool2 = max_pool_2x2(h_conv2)
+    # reshape tensor into a batch of vectors
+    h_pool2_flat = tf.reshape(h_pool2, [-1, 7 * 7 * 64])
+
+  # Densely connected layer with 1024 neurons.
+  h_fc1 = layers.dropout(
+      layers.fully_connected(
+          h_pool2_flat, 1024, activation_fn=tf.nn.relu), keep_prob=0.5,
+      is_training=mode == tf.contrib.learn.ModeKeys.TRAIN)
+
+  # Compute logits (1 per class) and compute loss.
+  logits = layers.fully_connected(h_fc1, 10, activation_fn=None)
+  loss = tf.contrib.losses.softmax_cross_entropy(logits, target)
+
+  # Create a tensor for training op.
+  train_op = layers.optimize_loss(
+      loss, tf.contrib.framework.get_global_step(), optimizer='SGD',
+      learning_rate=0.001)
+
+  return tf.argmax(logits, 1), loss, train_op
+
+
+def main(unused_args):
+  ### Download and load MNIST dataset.
+  mnist = learn.datasets.load_dataset('mnist')
+
+  ### Linear classifier.
+  feature_columns = learn.infer_real_valued_columns_from_input(
+      mnist.train.images)
+  classifier = learn.LinearClassifier(
+      feature_columns=feature_columns, n_classes=10)
+  classifier.fit(mnist.train.images, mnist.train.labels.astype(np.int32),
+                 batch_size=100, steps=1000)
+  score = metrics.accuracy_score(
+      mnist.test.labels, list(classifier.predict(mnist.test.images)))
+  print('Accuracy: {0:f}'.format(score))
+
+  ### Convolutional network
+  classifier = learn.Estimator(model_fn=conv_model)
+  classifier.fit(mnist.train.images, mnist.train.labels,
+                 batch_size=100, steps=20000)
+  score = metrics.accuracy_score(
+      mnist.test.labels, list(classifier.predict(mnist.test.images)))
+  print('Accuracy: {0:f}'.format(score))
+
+
+if __name__ == '__main__':
+  tf.app.run()
diff --git a/tensorflow/examples/learn/multiple_gpu.py b/tensorflow/examples/learn/multiple_gpu.py
new file mode 100644
index 00000000000..6647ec3d42e
--- /dev/null
+++ b/tensorflow/examples/learn/multiple_gpu.py
@@ -0,0 +1,87 @@
+#  Copyright 2016 The TensorFlow Authors. All Rights Reserved.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+"""Example of using Estimator with multiple GPUs to distribute one model.
+
+This example only runs if you have multiple GPUs to assign to.
+"""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+from sklearn import cross_validation
+from sklearn import datasets
+from sklearn import metrics
+import tensorflow as tf
+from tensorflow.contrib import layers
+from tensorflow.contrib import learn
+
+
+def my_model(features, target):
+  """DNN with three hidden layers, and dropout of 0.1 probability.
+
+  Note: If you want to run this example with multiple GPUs, Cuda Toolkit 7.0 and
+  CUDNN 6.5 V2 from NVIDIA need to be installed beforehand.
+
+  Args:
+    features: `Tensor` of input features.
+    target: `Tensor` of targets.
+
+  Returns:
+    Tuple of predictions, loss and training op.
+  """
+  # Convert the target to a one-hot tensor of shape (length of features, 3) and
+  # with a on-value of 1 for each one-hot vector of length 3.
+  target = tf.one_hot(target, 3, 1, 0)
+
+  # Create three fully connected layers respectively of size 10, 20, and 10 with
+  # each layer having a dropout probability of 0.1.
+  normalizer_fn = layers.dropout
+  normalizer_params = {'keep_prob': 0.5}
+  with tf.device('/gpu:1'):
+    features = layers.stack(features, layers.fully_connected, [10, 20, 10],
+                            normalizer_fn=normalizer_fn,
+                            normalizer_params=normalizer_params)
+
+  with tf.device('/gpu:2'):
+    # Compute logits (1 per class) and compute loss.
+    logits = layers.fully_connected(features, 3, activation_fn=None)
+    loss = tf.contrib.losses.softmax_cross_entropy(logits, target)
+
+    # Create a tensor for training op.
+    train_op = tf.contrib.layers.optimize_loss(
+        loss, tf.contrib.framework.get_global_step(), optimizer='Adagrad',
+        learning_rate=0.1)
+
+  return ({
+      'class': tf.argmax(logits, 1),
+      'prob': tf.nn.softmax(logits)}, loss, train_op)
+
+
+def main(unused_argv):
+  iris = datasets.load_iris()
+  x_train, x_test, y_train, y_test = cross_validation.train_test_split(
+      iris.data, iris.target, test_size=0.2, random_state=42)
+
+  classifier = learn.Estimator(model_fn=my_model)
+  classifier.fit(x_train, y_train, steps=1000)
+
+  y_predicted = [
+      p['class'] for p in classifier.predict(x_test, as_iterable=True)]
+  score = metrics.accuracy_score(y_test, y_predicted)
+  print('Accuracy: {0:f}'.format(score))
+
+
+if __name__ == '__main__':
+  tf.app.run()
diff --git a/tensorflow/examples/learn/resnet.py b/tensorflow/examples/learn/resnet.py
index 3e9b579b357..fe1a07ccfa1 100755
--- a/tensorflow/examples/learn/resnet.py
+++ b/tensorflow/examples/learn/resnet.py
@@ -132,7 +132,10 @@ def res_net(x, y, activation=tf.nn.relu):
   net = tf.reshape(net, [-1, net_shape[1] * net_shape[2] * net_shape[3]])
 
   target = tf.one_hot(y, depth=10, dtype=tf.float32)
-  return learn.models.logistic_regression(net, target)
+  logits = tf.contrib.layers.fully_connected(net, 10, activation_fn=None)
+  loss = tf.contrib.losses.softmax_cross_entropy(logits, target)
+  return tf.softmax(logits), loss
+
 
 def res_net_model(x, y):
   prediction, loss = res_net(x, y)
diff --git a/tensorflow/examples/learn/text_classification.py b/tensorflow/examples/learn/text_classification.py
index 87a23831f35..7ad77787abb 100644
--- a/tensorflow/examples/learn/text_classification.py
+++ b/tensorflow/examples/learn/text_classification.py
@@ -34,28 +34,29 @@ EMBEDDING_SIZE = 50
 n_words = 0
 
 
-def bag_of_words_model(x, y):
+def bag_of_words_model(features, target):
   """A bag-of-words model. Note it disregards the word order in the text."""
-  target = tf.one_hot(y, 15, 1, 0)
-  word_vectors = learn.ops.categorical_variable(x, n_classes=n_words,
-      embedding_size=EMBEDDING_SIZE, name='words')
-  features = tf.reduce_max(word_vectors, reduction_indices=1)
-  prediction, loss = learn.models.logistic_regression(features, target)
+  target = tf.one_hot(target, 15, 1, 0)
+  features = tf.contrib.layers.bow_encoder(
+      features, vocab_size=n_words, embed_dim=EMBEDDING_SIZE)
+  logits = tf.contrib.layers.fully_connected(features, 15, activation_fn=None)
+  loss = tf.contrib.losses.softmax_cross_entropy(logits, target)
   train_op = tf.contrib.layers.optimize_loss(
       loss, tf.contrib.framework.get_global_step(),
       optimizer='Adam', learning_rate=0.01)
-  return {'class': tf.argmax(prediction, 1), 'prob': prediction}, loss, train_op
+  return (
+      {'class': tf.argmax(logits, 1), 'prob': tf.nn.softmax(logits)},
+      loss, train_op)
 
 
-def rnn_model(x, y):
-  """Recurrent neural network model to predict from sequence of words
-  to a class."""
+def rnn_model(features, target):
+  """RNN model to predict from sequence of words to a class."""
   # Convert indexes of words into embeddings.
   # This creates embeddings matrix of [n_words, EMBEDDING_SIZE] and then
   # maps word indexes of the sequence into [batch_size, sequence_length,
   # EMBEDDING_SIZE].
-  word_vectors = learn.ops.categorical_variable(x, n_classes=n_words,
-      embedding_size=EMBEDDING_SIZE, name='words')
+  word_vectors = tf.contrib.layers.embed_sequence(
+      features, vocab_size=n_words, embed_dim=EMBEDDING_SIZE, scope='words')
 
   # Split into list of embedding per word, while removing doc length dim.
   # word_list results to be a list of tensors [batch_size, EMBEDDING_SIZE].
@@ -71,15 +72,18 @@ def rnn_model(x, y):
   # Given encoding of RNN, take encoding of last step (e.g hidden size of the
   # neural network of last step) and pass it as features for logistic
   # regression over output classes.
-  target = tf.one_hot(y, 15, 1, 0)
-  prediction, loss = learn.models.logistic_regression(encoding, target)
+  target = tf.one_hot(target, 15, 1, 0)
+  logits = tf.contrib.layers.fully_connected(encoding, 15, activation_fn=None)
+  loss = tf.contrib.losses.softmax_cross_entropy(logits, target)
 
   # Create a training op.
   train_op = tf.contrib.layers.optimize_loss(
       loss, tf.contrib.framework.get_global_step(),
       optimizer='Adam', learning_rate=0.01)
 
-  return {'class': tf.argmax(prediction, 1), 'prob': prediction}, loss, train_op
+  return (
+      {'class': tf.argmax(logits, 1), 'prob': tf.nn.softmax(logits)},
+      loss, train_op)
 
 
 def main(unused_argv):
@@ -100,7 +104,11 @@ def main(unused_argv):
   print('Total words: %d' % n_words)
 
   # Build model
-  classifier = learn.Estimator(model_fn=bag_of_words_model)
+  # Switch between rnn_model and bag_of_words_model to test different models.
+  model_fn = rnn_model
+  if FLAGS.bow_model:
+    model_fn = bag_of_words_model
+  classifier = learn.Estimator(model_fn=model_fn)
 
   # Train and predict
   classifier.fit(x_train, y_train, steps=100)
@@ -118,5 +126,11 @@ if __name__ == '__main__':
       help='Test the example code with fake data.',
       action='store_true'
   )
+  parser.add_argument(
+      '--bow_model',
+      default=False,
+      help='Run with BOW model instead of RNN.',
+      action='store_true'
+  )
   FLAGS, unparsed = parser.parse_known_args()
   tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
diff --git a/tensorflow/examples/learn/text_classification_builtin_rnn_model.py b/tensorflow/examples/learn/text_classification_builtin_rnn_model.py
index 6a1c05b86b1..79654eb9021 100644
--- a/tensorflow/examples/learn/text_classification_builtin_rnn_model.py
+++ b/tensorflow/examples/learn/text_classification_builtin_rnn_model.py
@@ -32,14 +32,14 @@ EMBEDDING_SIZE = 50
 n_words = 0
 
 
-def input_op_fn(x):
+def input_op_fn(features):
   """Customized function to transform batched x into embeddings."""
   # Convert indexes of words into embeddings.
   # This creates embeddings matrix of [n_words, EMBEDDING_SIZE] and then
   # maps word indexes of the sequence into [batch_size, sequence_length,
   # EMBEDDING_SIZE].
-  word_vectors = learn.ops.categorical_variable(x, n_classes=n_words,
-      embedding_size=EMBEDDING_SIZE, name='words')
+  word_vectors = tf.contrib.layers.embed_sequence(
+      features, vocab_size=n_words, embed_dim=EMBEDDING_SIZE, scope='words')
   # Split into list of embedding per word, while removing doc length dim.
   # word_list results to be a list of tensors [batch_size, EMBEDDING_SIZE].
   word_list = tf.unpack(word_vectors, axis=1)
diff --git a/tensorflow/examples/learn/text_classification_character_cnn.py b/tensorflow/examples/learn/text_classification_character_cnn.py
index e84790471b5..ffb5a51ad4d 100644
--- a/tensorflow/examples/learn/text_classification_character_cnn.py
+++ b/tensorflow/examples/learn/text_classification_character_cnn.py
@@ -48,15 +48,15 @@ POOLING_WINDOW = 4
 POOLING_STRIDE = 2
 
 
-def char_cnn_model(x, y):
+def char_cnn_model(features, target):
   """Character level convolutional neural network model to predict classes."""
-  y = tf.one_hot(y, 15, 1, 0)
-  byte_list = tf.reshape(learn.ops.one_hot_matrix(x, 256),
+  target = tf.one_hot(target, 15, 1, 0)
+  byte_list = tf.reshape(tf.one_hot(features, 256, 1, 0),
                          [-1, MAX_DOCUMENT_LENGTH, 256, 1])
   with tf.variable_scope('CNN_Layer1'):
     # Apply Convolution filtering on input sequence.
-    conv1 = tf.contrib.layers.convolution2d(byte_list, N_FILTERS,
-                             FILTER_SHAPE1, padding='VALID')
+    conv1 = tf.contrib.layers.convolution2d(
+        byte_list, N_FILTERS, FILTER_SHAPE1, padding='VALID')
     # Add a RELU for non linearity.
     conv1 = tf.nn.relu(conv1)
     # Max pooling across output of Convolution+Relu.
@@ -66,20 +66,22 @@ def char_cnn_model(x, y):
     pool1 = tf.transpose(pool1, [0, 1, 3, 2])
   with tf.variable_scope('CNN_Layer2'):
     # Second level of convolution filtering.
-    conv2 = tf.contrib.layers.convolution2d(pool1, N_FILTERS,
-                                            FILTER_SHAPE2,
-                                            padding='VALID')
+    conv2 = tf.contrib.layers.convolution2d(
+        pool1, N_FILTERS, FILTER_SHAPE2, padding='VALID')
     # Max across each filter to get useful features for classification.
     pool2 = tf.squeeze(tf.reduce_max(conv2, 1), squeeze_dims=[1])
 
   # Apply regular WX + B and classification.
-  prediction, loss = learn.models.logistic_regression(pool2, y)
+  logits = tf.contrib.layers.fully_connected(pool2, 15, activation_fn=None)
+  loss = tf.contrib.losses.softmax_cross_entropy(logits, target)
 
   train_op = tf.contrib.layers.optimize_loss(
       loss, tf.contrib.framework.get_global_step(),
       optimizer='Adam', learning_rate=0.01)
 
-  return {'class': tf.argmax(prediction, 1), 'prob': prediction}, loss, train_op
+  return (
+      {'class': tf.argmax(logits, 1), 'prob': tf.nn.softmax(logits)},
+      loss, train_op)
 
 
 def main(unused_argv):
diff --git a/tensorflow/examples/learn/text_classification_character_rnn.py b/tensorflow/examples/learn/text_classification_character_rnn.py
index e62663aa8af..bca3df4c04f 100644
--- a/tensorflow/examples/learn/text_classification_character_rnn.py
+++ b/tensorflow/examples/learn/text_classification_character_rnn.py
@@ -44,22 +44,25 @@ MAX_DOCUMENT_LENGTH = 100
 HIDDEN_SIZE = 20
 
 
-def char_rnn_model(x, y):
+def char_rnn_model(features, target):
   """Character level recurrent neural network model to predict classes."""
-  y = tf.one_hot(y, 15, 1, 0)
-  byte_list = learn.ops.one_hot_matrix(x, 256)
+  target = tf.one_hot(target, 15, 1, 0)
+  byte_list = tf.ont_hot(features, 256, 1, 0)
   byte_list = tf.unpack(byte_list, axis=1)
 
   cell = tf.nn.rnn_cell.GRUCell(HIDDEN_SIZE)
   _, encoding = tf.nn.rnn(cell, byte_list, dtype=tf.float32)
 
-  prediction, loss = learn.models.logistic_regression(encoding, y)
+  logits = tf.contrib.layers.fully_connected(encoding, 15, activation_fn=None)
+  loss = tf.contrib.losses.softmax_cross_entropy(logits, target)
 
   train_op = tf.contrib.layers.optimize_loss(
       loss, tf.contrib.framework.get_global_step(),
       optimizer='Adam', learning_rate=0.01)
 
-  return {'class': tf.argmax(prediction, 1), 'prob': prediction}, loss, train_op
+  return (
+      {'class': tf.argmax(logits, 1), 'prob': tf.nn.softmax(logits)},
+      loss, train_op)
 
 
 def main(unused_argv):
diff --git a/tensorflow/examples/learn/text_classification_cnn.py b/tensorflow/examples/learn/text_classification_cnn.py
index f71df272ead..cb17ae46ae5 100644
--- a/tensorflow/examples/learn/text_classification_cnn.py
+++ b/tensorflow/examples/learn/text_classification_cnn.py
@@ -40,16 +40,15 @@ POOLING_STRIDE = 2
 n_words = 0
 
 
-def cnn_model(x, y):
-  """2 layer Convolutional network to predict from sequence of words
-  to a class."""
+def cnn_model(features, target):
+  """2 layer ConvNet to predict from sequence of words to a class."""
   # Convert indexes of words into embeddings.
   # This creates embeddings matrix of [n_words, EMBEDDING_SIZE] and then
   # maps word indexes of the sequence into [batch_size, sequence_length,
   # EMBEDDING_SIZE].
-  y = tf.one_hot(y, 15, 1, 0)
-  word_vectors = learn.ops.categorical_variable(x, n_classes=n_words,
-      embedding_size=EMBEDDING_SIZE, name='words')
+  target = tf.one_hot(target, 15, 1, 0)
+  word_vectors = tf.contrib.layers.embed_sequence(
+      features, vocab_size=n_words, embed_dim=EMBEDDING_SIZE, scope='words')
   word_vectors = tf.expand_dims(word_vectors, 3)
   with tf.variable_scope('CNN_Layer1'):
     # Apply Convolution filtering on input sequence.
@@ -58,7 +57,8 @@ def cnn_model(x, y):
     # Add a RELU for non linearity.
     conv1 = tf.nn.relu(conv1)
     # Max pooling across output of Convolution+Relu.
-    pool1 = tf.nn.max_pool(conv1, ksize=[1, POOLING_WINDOW, 1, 1],
+    pool1 = tf.nn.max_pool(
+        conv1, ksize=[1, POOLING_WINDOW, 1, 1],
         strides=[1, POOLING_STRIDE, 1, 1], padding='SAME')
     # Transpose matrix so that n_filters from convolution becomes width.
     pool1 = tf.transpose(pool1, [0, 1, 3, 2])
@@ -70,13 +70,16 @@ def cnn_model(x, y):
     pool2 = tf.squeeze(tf.reduce_max(conv2, 1), squeeze_dims=[1])
 
   # Apply regular WX + B and classification.
-  prediction, loss = learn.models.logistic_regression(pool2, y)
+  logits = tf.contrib.layers.fully_connected(pool2, 15, activation_fn=None)
+  loss = tf.contrib.losses.softmax_cross_entropy(logits, target)
 
   train_op = tf.contrib.layers.optimize_loss(
       loss, tf.contrib.framework.get_global_step(),
       optimizer='Adam', learning_rate=0.01)
 
-  return {'class': tf.argmax(prediction, 1), 'prob': prediction}, loss, train_op
+  return (
+      {'class': tf.argmax(logits, 1), 'prob': tf.nn.softmax(logits)},
+      loss, train_op)
 
 
 def main(unused_argv):
diff --git a/tensorflow/examples/skflow/BUILD b/tensorflow/examples/skflow/BUILD
deleted file mode 100644
index e18e9b1c1c7..00000000000
--- a/tensorflow/examples/skflow/BUILD
+++ /dev/null
@@ -1,38 +0,0 @@
-# Example models (using skflow).
-
-package(default_visibility = ["//tensorflow:internal"])
-
-licenses(["notice"])  # Apache 2.0
-
-exports_files(["LICENSE"])
-
-py_binary(
-    name = "mnist",
-    srcs = ["mnist.py"],
-    srcs_version = "PY2AND3",
-    deps = [
-        "//tensorflow:tensorflow_py",
-        "//tensorflow/examples/tutorials/mnist:input_data",
-    ],
-)
-
-py_binary(
-    name = "multiple_gpu",
-    srcs = ["multiple_gpu.py"],
-    srcs_version = "PY2AND3",
-    deps = [
-        "//tensorflow:tensorflow_py",
-    ],
-)
-
-filegroup(
-    name = "all_files",
-    srcs = glob(
-        ["**/*"],
-        exclude = [
-            "**/METADATA",
-            "**/OWNERS",
-        ],
-    ),
-    visibility = ["//tensorflow:__subpackages__"],
-)
diff --git a/tensorflow/examples/skflow/mnist.py b/tensorflow/examples/skflow/mnist.py
deleted file mode 100644
index e2b78883e4e..00000000000
--- a/tensorflow/examples/skflow/mnist.py
+++ /dev/null
@@ -1,82 +0,0 @@
-#  Copyright 2016 The TensorFlow Authors. All Rights Reserved.
-#
-#  Licensed under the Apache License, Version 2.0 (the "License");
-#  you may not use this file except in compliance with the License.
-#  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-#  Unless required by applicable law or agreed to in writing, software
-#  distributed under the License is distributed on an "AS IS" BASIS,
-#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-#  See the License for the specific language governing permissions and
-#  limitations under the License.
-
-"""This showcases how simple it is to build image classification networks.
-
-It follows description from this TensorFlow tutorial:
-    https://www.tensorflow.org/versions/master/tutorials/mnist/pros/index.html#deep-mnist-for-experts
-"""
-
-from __future__ import absolute_import
-from __future__ import division
-from __future__ import print_function
-
-from sklearn import metrics
-import tensorflow as tf
-from tensorflow.contrib import learn
-
-### Download and load MNIST data.
-
-mnist = learn.datasets.load_dataset('mnist')
-
-### Linear classifier.
-
-feature_columns = learn.infer_real_valued_columns_from_input(mnist.train.images)
-classifier = learn.LinearClassifier(
-    feature_columns=feature_columns, n_classes=10)
-classifier.fit(mnist.train.images, mnist.train.labels, batch_size=100,
-               steps=1000)
-score = metrics.accuracy_score(
-    mnist.test.labels, classifier.predict(mnist.test.images))
-print('Accuracy: {0:f}'.format(score))
-
-### Convolutional network
-
-
-def max_pool_2x2(tensor_in):
-  return tf.nn.max_pool(
-      tensor_in, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
-
-
-def conv_model(X, y):
-  # pylint: disable=invalid-name,missing-docstring
-  # reshape X to 4d tensor with 2nd and 3rd dimensions being image width and
-  # height final dimension being the number of color channels.
-  X = tf.reshape(X, [-1, 28, 28, 1])
-  # first conv layer will compute 32 features for each 5x5 patch
-  with tf.variable_scope('conv_layer1'):
-    h_conv1 = learn.ops.conv2d(X, n_filters=32, filter_shape=[5, 5],
-                               bias=True, activation=tf.nn.relu)
-    h_pool1 = max_pool_2x2(h_conv1)
-  # second conv layer will compute 64 features for each 5x5 patch.
-  with tf.variable_scope('conv_layer2'):
-    h_conv2 = learn.ops.conv2d(h_pool1, n_filters=64, filter_shape=[5, 5],
-                               bias=True, activation=tf.nn.relu)
-    h_pool2 = max_pool_2x2(h_conv2)
-    # reshape tensor into a batch of vectors
-    h_pool2_flat = tf.reshape(h_pool2, [-1, 7 * 7 * 64])
-  # densely connected layer with 1024 neurons.
-  h_fc1 = tf.contrib.layers.dropout(
-      tf.contrib.layers.legacy_fully_connected(
-          h_pool2_flat, 1024, weight_init=None, activation_fn=tf.nn.relu))
-  return learn.models.logistic_regression(h_fc1, y)
-
-# Training and predicting.
-classifier = learn.TensorFlowEstimator(
-    model_fn=conv_model, n_classes=10, batch_size=100, steps=20000,
-    learning_rate=0.001)
-classifier.fit(mnist.train.images, mnist.train.labels)
-score = metrics.accuracy_score(
-    mnist.test.labels, classifier.predict(mnist.test.images))
-print('Accuracy: {0:f}'.format(score))
diff --git a/tensorflow/examples/skflow/multiple_gpu.py b/tensorflow/examples/skflow/multiple_gpu.py
deleted file mode 100644
index 5bb647e54ee..00000000000
--- a/tensorflow/examples/skflow/multiple_gpu.py
+++ /dev/null
@@ -1,44 +0,0 @@
-#  Copyright 2016 The TensorFlow Authors. All Rights Reserved.
-#
-#  Licensed under the Apache License, Version 2.0 (the "License");
-#  you may not use this file except in compliance with the License.
-#  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-#  Unless required by applicable law or agreed to in writing, software
-#  distributed under the License is distributed on an "AS IS" BASIS,
-#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-#  See the License for the specific language governing permissions and
-#  limitations under the License.
-from __future__ import absolute_import
-from __future__ import division
-from __future__ import print_function
-
-from sklearn import datasets, metrics, cross_validation
-import tensorflow as tf
-from tensorflow.contrib import learn
-
-iris = datasets.load_iris()
-X_train, X_test, y_train, y_test = cross_validation.train_test_split(iris.data, iris.target,
-    test_size=0.2, random_state=42)
-
-def my_model(X, y):
-  """
-  This is DNN with 10, 20, 10 hidden layers, and dropout of 0.5 probability.
-
-  Note: If you want to run this example with multiple GPUs, Cuda Toolkit 7.0 and
-  CUDNN 6.5 V2 from NVIDIA need to be installed beforehand.
-  """
-  with tf.device('/gpu:1'):
-    dnn = lambda inputs, num_outputs, scope: tf.contrib.layers.dropout(
-        tf.contrib.layers.legacy_fully_connected(
-            inputs, num_outputs, weight_init=None, activation_fn=tf.nn.relu))
-    layers = tf.contrib.layers.stack(X, dnn, [10, 20, 10])
-  with tf.device('/gpu:2'):
-    return learn.models.logistic_regression(layers, y)
-
-classifier = learn.TensorFlowEstimator(model_fn=my_model, n_classes=3)
-classifier.fit(X_train, y_train)
-score = metrics.accuracy_score(y_test, classifier.predict(X_test))
-print('Accuracy: {0:f}'.format(score))
diff --git a/tensorflow/g3doc/api_docs/python/array_ops.md b/tensorflow/g3doc/api_docs/python/array_ops.md
index adc18589110..83176a7d193 100644
--- a/tensorflow/g3doc/api_docs/python/array_ops.md
+++ b/tensorflow/g3doc/api_docs/python/array_ops.md
@@ -2240,6 +2240,92 @@ count ==> [2, 1, 3, 1, 2]
 *  <b>`count`</b>: A `Tensor` of type `out_idx`. 1-D.
 
 
+- - -
+
+### `tf.scatter_nd(indices, updates, shape, name=None)` {#scatter_nd}
+
+Creates a new tensor by applying sparse `updates` to individual values or slices within a zero tensor of the given `shape` tensor according to indices.
+
+This operator is the inverse of the [tf.gather_nd](#gather_nd) operator which extracts values or slices from a given tensor.
+
+TODO(simister): Add a link to Variable.__getitem__ documentation on slice syntax.
+
+`shape` is a `TensorShape` with rank `P` and `indices` is a `Tensor` of rank `Q`.
+
+`indices` must be integer tensor, containing indices into `shape`.
+It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.
+
+The innermost dimension of `indices` (with length `K`) corresponds to
+indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th
+dimension of `shape`.
+
+`updates` is Tensor of rank `Q-1+P-K` with shape:
+
+```
+[d_0, ..., d_{Q-2}, shape[K], ..., shape[P-1]].
+```
+
+The simplest form of scatter is to insert individual elements in a tensor by index. For example, say we want to insert 4 scattered elements in a rank-1 tensor with 8 elements.
+
+<div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
+<img style="width:100%" src="../../images/ScatterNd1.png" alt>
+</div>
+
+In Python, this scatter operation would look like this:
+
+    indices = tf.constant([[4], [3], [1], [7]])
+    updates = tf.constant([9, 10, 11, 12])
+    shape = tf.constant([8])
+    scatter = tf.scatter_nd(indices, updates, shape)
+    with tf.Session() as sess:
+      print sess.run(scatter)
+
+The resulting tensor would look like this:
+
+    [0, 11, 0, 10, 9, 0, 0, 12]
+
+We can also, insert entire slices of a higher rank tensor all at once. For example, if we wanted to insert two slices in the first dimension of a rank-3 tensor with two matrices of new values.
+
+<div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
+<img style="width:100%" src="../../images/ScatterNd2.png" alt>
+</div>
+
+In Python, this scatter operation would look like this:
+
+    indices = tf.constant([[0], [2]])
+    updates = tf.constant([[[5, 5, 5, 5], [6, 6, 6, 6],
+                            [7, 7, 7, 7], [8, 8, 8, 8]],
+                           [[5, 5, 5, 5], [6, 6, 6, 6],
+                            [7, 7, 7, 7], [8, 8, 8, 8]]])
+    shape = tf.constant([4, 4, 4])
+    scatter = tf.scatter_nd(indices, updates, shape)
+    with tf.Session() as sess:
+      print sess.run(scatter)
+
+The resulting tensor would look like this:
+
+    [[[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]],
+     [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]],
+     [[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]],
+     [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]]
+
+##### Args:
+
+
+*  <b>`indices`</b>: A `Tensor`. Must be one of the following types: `int32`, `int64`.
+    A Tensor. Must be one of the following types: int32, int64. A tensor of indices into ref.
+*  <b>`updates`</b>: A `Tensor`.
+    A Tensor. Must have the same type as tensor. A tensor of updated values to store in ref.
+*  <b>`shape`</b>: A `Tensor`. Must have the same type as `indices`.
+    A vector. The shape of the resulting tensor.
+*  <b>`name`</b>: A name for the operation (optional).
+
+##### Returns:
+
+  A `Tensor`. Has the same type as `updates`.
+  A new tensor with the given shape and updates applied according to the indices.
+
+
 - - -
 
 ### `tf.dynamic_partition(data, partitions, num_partitions, name=None)` {#dynamic_partition}
@@ -2758,3 +2844,175 @@ Returns the difference between the `x` and `y` treated as sets.
   A `Tensor` that is of type `index_dtype` representing indices from .
 
 
+
+## Fake quantization
+Operations used to help train for better quantization accuracy.
+
+- - -
+
+### `tf.fake_quant_with_min_max_args(inputs, min=None, max=None, name=None)` {#fake_quant_with_min_max_args}
+
+Fake-quantize the 'inputs' tensor, type float to 'outputs' tensor of same type.
+
+Attributes [min; max] define the clamping range for the 'inputs' data.  Op
+divides this range into 255 steps (total of 256 values), then replaces each
+'inputs' value with the closest of the quantized step values.
+
+Quantization is called fake since the output is still in floating point.
+
+##### Args:
+
+
+*  <b>`inputs`</b>: A `Tensor` of type `float32`.
+*  <b>`min`</b>: An optional `float`. Defaults to `-6`.
+*  <b>`max`</b>: An optional `float`. Defaults to `6`.
+*  <b>`name`</b>: A name for the operation (optional).
+
+##### Returns:
+
+  A `Tensor` of type `float32`.
+
+
+- - -
+
+### `tf.fake_quant_with_min_max_args_gradient(gradients, inputs, min=None, max=None, name=None)` {#fake_quant_with_min_max_args_gradient}
+
+Compute gradients for a FakeQuantWithMinMaxArgs operation.
+
+##### Args:
+
+
+*  <b>`gradients`</b>: A `Tensor` of type `float32`.
+    Backpropagated gradients above the FakeQuantWithMinMaxArgs operation.
+*  <b>`inputs`</b>: A `Tensor` of type `float32`.
+    Values passed as inputs to the FakeQuantWithMinMaxArgs operation.
+*  <b>`min`</b>: An optional `float`. Defaults to `-6`.
+*  <b>`max`</b>: An optional `float`. Defaults to `6`.
+*  <b>`name`</b>: A name for the operation (optional).
+
+##### Returns:
+
+  A `Tensor` of type `float32`.
+  Backpropagated gradients below the FakeQuantWithMinMaxArgs operation:
+  `gradients * (inputs >= min && inputs <= max)`.
+
+
+- - -
+
+### `tf.fake_quant_with_min_max_vars(inputs, min, max, name=None)` {#fake_quant_with_min_max_vars}
+
+Fake-quantize the 'inputs' tensor of type float and shape `[b, h, w, d]` via
+
+global float scalars `min` and `max` to 'outputs' tensor of same shape as
+`inputs`.
+
+[min; max] is the clamping range for the 'inputs' data.  Op divides this range
+into 255 steps (total of 256 values), then replaces each 'inputs' value with the
+closest of the quantized step values.
+
+This operation has a gradient and thus allows for training `min` and `max` values.
+
+##### Args:
+
+
+*  <b>`inputs`</b>: A `Tensor` of type `float32`.
+*  <b>`min`</b>: A `Tensor` of type `float32`.
+*  <b>`max`</b>: A `Tensor` of type `float32`.
+*  <b>`name`</b>: A name for the operation (optional).
+
+##### Returns:
+
+  A `Tensor` of type `float32`.
+
+
+- - -
+
+### `tf.fake_quant_with_min_max_vars_gradient(gradients, inputs, min, max, name=None)` {#fake_quant_with_min_max_vars_gradient}
+
+Compute gradients for a FakeQuantWithMinMaxVars operation.
+
+##### Args:
+
+
+*  <b>`gradients`</b>: A `Tensor` of type `float32`.
+    Backpropagated gradients above the FakeQuantWithMinMaxVars operation.
+*  <b>`inputs`</b>: A `Tensor` of type `float32`.
+    Values passed as inputs to the FakeQuantWithMinMaxVars operation.
+    min, max: Quantization interval, scalar floats.
+*  <b>`min`</b>: A `Tensor` of type `float32`.
+*  <b>`max`</b>: A `Tensor` of type `float32`.
+*  <b>`name`</b>: A name for the operation (optional).
+
+##### Returns:
+
+  A tuple of `Tensor` objects (backprops_wrt_input, backprop_wrt_min, backprop_wrt_max).
+
+*  <b>`backprops_wrt_input`</b>: A `Tensor` of type `float32`. Backpropagated gradients w.r.t. inputs:
+    `gradients * (inputs >= min && inputs <= max)`.
+*  <b>`backprop_wrt_min`</b>: A `Tensor` of type `float32`. Backpropagated gradients w.r.t. min parameter:
+    `sum(gradients * (inputs < min))`.
+*  <b>`backprop_wrt_max`</b>: A `Tensor` of type `float32`. Backpropagated gradients w.r.t. max parameter:
+    `sum(gradients * (inputs > max))`.
+
+
+- - -
+
+### `tf.fake_quant_with_min_max_vars_per_channel(inputs, min, max, name=None)` {#fake_quant_with_min_max_vars_per_channel}
+
+Fake-quantize the 'inputs' tensor of type float and one of the shapes: `[d]`,
+
+`[b, d]` `[b, h, w, d]` via per-channel floats `min` and `max` of shape `[d]`
+to 'outputs' tensor of same shape as `inputs`.
+
+[min; max] is the clamping range for the 'inputs' data in the corresponding
+depth channel.  Op divides this range into 255 steps (total of 256 values), then
+replaces each 'inputs' value with the closest of the quantized step values.
+
+This operation has a gradient and thus allows for training `min` and `max` values.
+
+##### Args:
+
+
+*  <b>`inputs`</b>: A `Tensor` of type `float32`.
+*  <b>`min`</b>: A `Tensor` of type `float32`.
+*  <b>`max`</b>: A `Tensor` of type `float32`.
+*  <b>`name`</b>: A name for the operation (optional).
+
+##### Returns:
+
+  A `Tensor` of type `float32`.
+
+
+- - -
+
+### `tf.fake_quant_with_min_max_vars_per_channel_gradient(gradients, inputs, min, max, name=None)` {#fake_quant_with_min_max_vars_per_channel_gradient}
+
+Compute gradients for a FakeQuantWithMinMaxVarsPerChannel operation.
+
+##### Args:
+
+
+*  <b>`gradients`</b>: A `Tensor` of type `float32`.
+    Backpropagated gradients above the FakeQuantWithMinMaxVars operation,
+    shape one of: `[d]`, `[b, d]`,  `[b, h, w, d]`.
+*  <b>`inputs`</b>: A `Tensor` of type `float32`.
+    Values passed as inputs to the FakeQuantWithMinMaxVars operation, shape
+      same as `gradients`.
+    min, max: Quantization interval, floats of shape `[d]`.
+*  <b>`min`</b>: A `Tensor` of type `float32`.
+*  <b>`max`</b>: A `Tensor` of type `float32`.
+*  <b>`name`</b>: A name for the operation (optional).
+
+##### Returns:
+
+  A tuple of `Tensor` objects (backprops_wrt_input, backprop_wrt_min, backprop_wrt_max).
+
+*  <b>`backprops_wrt_input`</b>: A `Tensor` of type `float32`. Backpropagated gradients w.r.t. inputs, shape same as
+    `inputs`:
+      `gradients * (inputs >= min && inputs <= max)`.
+*  <b>`backprop_wrt_min`</b>: A `Tensor` of type `float32`. Backpropagated gradients w.r.t. min parameter, shape `[d]`:
+    `sum_per_d(gradients * (inputs < min))`.
+*  <b>`backprop_wrt_max`</b>: A `Tensor` of type `float32`. Backpropagated gradients w.r.t. max parameter, shape `[d]`:
+    `sum_per_d(gradients * (inputs > max))`.
+
+
diff --git a/tensorflow/g3doc/api_docs/python/contrib.bayesflow.stochastic_tensor.md b/tensorflow/g3doc/api_docs/python/contrib.bayesflow.stochastic_tensor.md
index 98881fc13ad..04f2e5ae7e6 100644
--- a/tensorflow/g3doc/api_docs/python/contrib.bayesflow.stochastic_tensor.md
+++ b/tensorflow/g3doc/api_docs/python/contrib.bayesflow.stochastic_tensor.md
@@ -41,13 +41,6 @@ Base Class for Tensor-like objects that emit stochastic values.
 
 
 
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.BaseStochasticTensor.input_dict` {#BaseStochasticTensor.input_dict}
-
-
-
-
 - - -
 
 #### `tf.contrib.bayesflow.stochastic_tensor.BaseStochasticTensor.loss(sample_loss)` {#BaseStochasticTensor.loss}
@@ -92,14 +85,14 @@ constant with respect to the input for purposes of the gradient.
 StochasticTensor is a BaseStochasticTensor backed by a distribution.
 - - -
 
-#### `tf.contrib.bayesflow.stochastic_tensor.StochasticTensor.__init__(dist_cls, name=None, dist_value_type=None, loss_fn=score_function, **dist_args)` {#StochasticTensor.__init__}
+#### `tf.contrib.bayesflow.stochastic_tensor.StochasticTensor.__init__(dist, name='StochasticTensor', dist_value_type=None, loss_fn=score_function)` {#StochasticTensor.__init__}
 
 Construct a `StochasticTensor`.
 
-`StochasticTensor` will instantiate a distribution from `dist_cls` and
-`dist_args` and its `value` method will return the same value each time
-it is called. What `value` is returned is controlled by the
-`dist_value_type` (defaults to `SampleAndReshapeValue`).
+`StochasticTensor` is backed by the `dist` distribution and its `value`
+method will return the same value each time it is called. What `value` is
+returned is controlled by the `dist_value_type` (defaults to
+`SampleAndReshapeValue`).
 
 Some distributions' sample functions are not differentiable (e.g. a sample
 from a discrete distribution like a Bernoulli) and so to differentiate
@@ -117,34 +110,26 @@ reparameterized distributions; it will also return None if the value type is
 ##### Args:
 
 
-*  <b>`dist_cls`</b>: a `Distribution` class.
+*  <b>`dist`</b>: an instance of `Distribution`.
 *  <b>`name`</b>: a name for this `StochasticTensor` and its ops.
 *  <b>`dist_value_type`</b>: a `_StochasticValueType`, which will determine what the
       `value` of this `StochasticTensor` will be. If not provided, the
       value type set with the `value_type` context manager will be used.
-*  <b>`loss_fn`</b>: callable that takes `(st, st.value(), influenced_loss)`, where
+*  <b>`loss_fn`</b>: callable that takes
+      `(st, st.value(), influenced_loss)`, where
       `st` is this `StochasticTensor`, and returns a `Tensor` loss. By
       default, `loss_fn` is the `score_function`, or more precisely, the
       integral of the score function, such that when the gradient is taken,
       the score function results. See the `stochastic_gradient_estimators`
       module for additional loss functions and baselines.
-*  <b>`**dist_args`</b>: keyword arguments to be passed through to `dist_cls` on
-      construction.
 
 ##### Raises:
 
 
-*  <b>`TypeError`</b>: if `dist_cls` is not a `Distribution`.
+*  <b>`TypeError`</b>: if `dist` is not an instance of `Distribution`.
 *  <b>`TypeError`</b>: if `loss_fn` is not `callable`.
 
 
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.StochasticTensor.clone(name=None, **dist_args)` {#StochasticTensor.clone}
-
-
-
-
 - - -
 
 #### `tf.contrib.bayesflow.stochastic_tensor.StochasticTensor.distribution` {#StochasticTensor.distribution}
@@ -173,13 +158,6 @@ reparameterized distributions; it will also return None if the value type is
 
 
 
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.StochasticTensor.input_dict` {#StochasticTensor.input_dict}
-
-
-
-
 - - -
 
 #### `tf.contrib.bayesflow.stochastic_tensor.StochasticTensor.loss(final_loss, name='Loss')` {#StochasticTensor.loss}
@@ -359,8 +337,8 @@ with sg.value_type(sg.SampleAndReshapeValue(n=2)):
 st_value = st.value()
 assertEqual(st_value.get_shape(), (4, 3))
 
-dt_value_val = sess.run([st_value])[0]  # or e.g. run([tf.identity(st)])[0]
-assertEqual(dt_value_val.shape, (4, 3))
+st_value_val = sess.run([st_value])[0]  # or e.g. run([tf.identity(st)])[0]
+assertEqual(st_value_val.shape, (4, 3))
 ```
 - - -
 
@@ -458,3340 +436,6 @@ in a `stop_gradients` call to disable any possible backpropagation.
 
 
 
-
-## Automatically Generated StochasticTensors
-
-- - -
-
-### `class tf.contrib.bayesflow.stochastic_tensor.BernoulliTensor` {#BernoulliTensor}
-
-`BernoulliTensor` is a `StochasticTensor` backed by the distribution `Bernoulli`.
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.BernoulliTensor.__init__(name=None, dist_value_type=None, loss_fn=score_function, **dist_args)` {#BernoulliTensor.__init__}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.BernoulliTensor.clone(name=None, **dist_args)` {#BernoulliTensor.clone}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.BernoulliTensor.distribution` {#BernoulliTensor.distribution}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.BernoulliTensor.dtype` {#BernoulliTensor.dtype}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.BernoulliTensor.entropy(name='entropy')` {#BernoulliTensor.entropy}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.BernoulliTensor.graph` {#BernoulliTensor.graph}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.BernoulliTensor.input_dict` {#BernoulliTensor.input_dict}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.BernoulliTensor.loss(final_loss, name='Loss')` {#BernoulliTensor.loss}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.BernoulliTensor.mean(name='mean')` {#BernoulliTensor.mean}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.BernoulliTensor.name` {#BernoulliTensor.name}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.BernoulliTensor.value(name='value')` {#BernoulliTensor.value}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.BernoulliTensor.value_type` {#BernoulliTensor.value_type}
-
-
-
-
-
-- - -
-
-### `class tf.contrib.bayesflow.stochastic_tensor.BernoulliWithSigmoidPTensor` {#BernoulliWithSigmoidPTensor}
-
-`BernoulliWithSigmoidPTensor` is a `StochasticTensor` backed by the distribution `BernoulliWithSigmoidP`.
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.BernoulliWithSigmoidPTensor.__init__(name=None, dist_value_type=None, loss_fn=score_function, **dist_args)` {#BernoulliWithSigmoidPTensor.__init__}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.BernoulliWithSigmoidPTensor.clone(name=None, **dist_args)` {#BernoulliWithSigmoidPTensor.clone}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.BernoulliWithSigmoidPTensor.distribution` {#BernoulliWithSigmoidPTensor.distribution}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.BernoulliWithSigmoidPTensor.dtype` {#BernoulliWithSigmoidPTensor.dtype}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.BernoulliWithSigmoidPTensor.entropy(name='entropy')` {#BernoulliWithSigmoidPTensor.entropy}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.BernoulliWithSigmoidPTensor.graph` {#BernoulliWithSigmoidPTensor.graph}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.BernoulliWithSigmoidPTensor.input_dict` {#BernoulliWithSigmoidPTensor.input_dict}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.BernoulliWithSigmoidPTensor.loss(final_loss, name='Loss')` {#BernoulliWithSigmoidPTensor.loss}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.BernoulliWithSigmoidPTensor.mean(name='mean')` {#BernoulliWithSigmoidPTensor.mean}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.BernoulliWithSigmoidPTensor.name` {#BernoulliWithSigmoidPTensor.name}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.BernoulliWithSigmoidPTensor.value(name='value')` {#BernoulliWithSigmoidPTensor.value}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.BernoulliWithSigmoidPTensor.value_type` {#BernoulliWithSigmoidPTensor.value_type}
-
-
-
-
-
-- - -
-
-### `class tf.contrib.bayesflow.stochastic_tensor.BetaTensor` {#BetaTensor}
-
-`BetaTensor` is a `StochasticTensor` backed by the distribution `Beta`.
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.BetaTensor.__init__(name=None, dist_value_type=None, loss_fn=score_function, **dist_args)` {#BetaTensor.__init__}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.BetaTensor.clone(name=None, **dist_args)` {#BetaTensor.clone}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.BetaTensor.distribution` {#BetaTensor.distribution}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.BetaTensor.dtype` {#BetaTensor.dtype}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.BetaTensor.entropy(name='entropy')` {#BetaTensor.entropy}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.BetaTensor.graph` {#BetaTensor.graph}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.BetaTensor.input_dict` {#BetaTensor.input_dict}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.BetaTensor.loss(final_loss, name='Loss')` {#BetaTensor.loss}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.BetaTensor.mean(name='mean')` {#BetaTensor.mean}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.BetaTensor.name` {#BetaTensor.name}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.BetaTensor.value(name='value')` {#BetaTensor.value}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.BetaTensor.value_type` {#BetaTensor.value_type}
-
-
-
-
-
-- - -
-
-### `class tf.contrib.bayesflow.stochastic_tensor.BetaWithSoftplusABTensor` {#BetaWithSoftplusABTensor}
-
-`BetaWithSoftplusABTensor` is a `StochasticTensor` backed by the distribution `BetaWithSoftplusAB`.
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.BetaWithSoftplusABTensor.__init__(name=None, dist_value_type=None, loss_fn=score_function, **dist_args)` {#BetaWithSoftplusABTensor.__init__}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.BetaWithSoftplusABTensor.clone(name=None, **dist_args)` {#BetaWithSoftplusABTensor.clone}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.BetaWithSoftplusABTensor.distribution` {#BetaWithSoftplusABTensor.distribution}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.BetaWithSoftplusABTensor.dtype` {#BetaWithSoftplusABTensor.dtype}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.BetaWithSoftplusABTensor.entropy(name='entropy')` {#BetaWithSoftplusABTensor.entropy}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.BetaWithSoftplusABTensor.graph` {#BetaWithSoftplusABTensor.graph}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.BetaWithSoftplusABTensor.input_dict` {#BetaWithSoftplusABTensor.input_dict}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.BetaWithSoftplusABTensor.loss(final_loss, name='Loss')` {#BetaWithSoftplusABTensor.loss}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.BetaWithSoftplusABTensor.mean(name='mean')` {#BetaWithSoftplusABTensor.mean}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.BetaWithSoftplusABTensor.name` {#BetaWithSoftplusABTensor.name}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.BetaWithSoftplusABTensor.value(name='value')` {#BetaWithSoftplusABTensor.value}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.BetaWithSoftplusABTensor.value_type` {#BetaWithSoftplusABTensor.value_type}
-
-
-
-
-
-- - -
-
-### `class tf.contrib.bayesflow.stochastic_tensor.BinomialTensor` {#BinomialTensor}
-
-`BinomialTensor` is a `StochasticTensor` backed by the distribution `Binomial`.
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.BinomialTensor.__init__(name=None, dist_value_type=None, loss_fn=score_function, **dist_args)` {#BinomialTensor.__init__}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.BinomialTensor.clone(name=None, **dist_args)` {#BinomialTensor.clone}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.BinomialTensor.distribution` {#BinomialTensor.distribution}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.BinomialTensor.dtype` {#BinomialTensor.dtype}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.BinomialTensor.entropy(name='entropy')` {#BinomialTensor.entropy}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.BinomialTensor.graph` {#BinomialTensor.graph}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.BinomialTensor.input_dict` {#BinomialTensor.input_dict}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.BinomialTensor.loss(final_loss, name='Loss')` {#BinomialTensor.loss}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.BinomialTensor.mean(name='mean')` {#BinomialTensor.mean}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.BinomialTensor.name` {#BinomialTensor.name}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.BinomialTensor.value(name='value')` {#BinomialTensor.value}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.BinomialTensor.value_type` {#BinomialTensor.value_type}
-
-
-
-
-
-- - -
-
-### `class tf.contrib.bayesflow.stochastic_tensor.CategoricalTensor` {#CategoricalTensor}
-
-`CategoricalTensor` is a `StochasticTensor` backed by the distribution `Categorical`.
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.CategoricalTensor.__init__(name=None, dist_value_type=None, loss_fn=score_function, **dist_args)` {#CategoricalTensor.__init__}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.CategoricalTensor.clone(name=None, **dist_args)` {#CategoricalTensor.clone}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.CategoricalTensor.distribution` {#CategoricalTensor.distribution}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.CategoricalTensor.dtype` {#CategoricalTensor.dtype}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.CategoricalTensor.entropy(name='entropy')` {#CategoricalTensor.entropy}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.CategoricalTensor.graph` {#CategoricalTensor.graph}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.CategoricalTensor.input_dict` {#CategoricalTensor.input_dict}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.CategoricalTensor.loss(final_loss, name='Loss')` {#CategoricalTensor.loss}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.CategoricalTensor.mean(name='mean')` {#CategoricalTensor.mean}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.CategoricalTensor.name` {#CategoricalTensor.name}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.CategoricalTensor.value(name='value')` {#CategoricalTensor.value}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.CategoricalTensor.value_type` {#CategoricalTensor.value_type}
-
-
-
-
-
-- - -
-
-### `class tf.contrib.bayesflow.stochastic_tensor.Chi2Tensor` {#Chi2Tensor}
-
-`Chi2Tensor` is a `StochasticTensor` backed by the distribution `Chi2`.
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.Chi2Tensor.__init__(name=None, dist_value_type=None, loss_fn=score_function, **dist_args)` {#Chi2Tensor.__init__}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.Chi2Tensor.clone(name=None, **dist_args)` {#Chi2Tensor.clone}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.Chi2Tensor.distribution` {#Chi2Tensor.distribution}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.Chi2Tensor.dtype` {#Chi2Tensor.dtype}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.Chi2Tensor.entropy(name='entropy')` {#Chi2Tensor.entropy}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.Chi2Tensor.graph` {#Chi2Tensor.graph}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.Chi2Tensor.input_dict` {#Chi2Tensor.input_dict}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.Chi2Tensor.loss(final_loss, name='Loss')` {#Chi2Tensor.loss}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.Chi2Tensor.mean(name='mean')` {#Chi2Tensor.mean}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.Chi2Tensor.name` {#Chi2Tensor.name}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.Chi2Tensor.value(name='value')` {#Chi2Tensor.value}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.Chi2Tensor.value_type` {#Chi2Tensor.value_type}
-
-
-
-
-
-- - -
-
-### `class tf.contrib.bayesflow.stochastic_tensor.Chi2WithAbsDfTensor` {#Chi2WithAbsDfTensor}
-
-`Chi2WithAbsDfTensor` is a `StochasticTensor` backed by the distribution `Chi2WithAbsDf`.
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.Chi2WithAbsDfTensor.__init__(name=None, dist_value_type=None, loss_fn=score_function, **dist_args)` {#Chi2WithAbsDfTensor.__init__}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.Chi2WithAbsDfTensor.clone(name=None, **dist_args)` {#Chi2WithAbsDfTensor.clone}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.Chi2WithAbsDfTensor.distribution` {#Chi2WithAbsDfTensor.distribution}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.Chi2WithAbsDfTensor.dtype` {#Chi2WithAbsDfTensor.dtype}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.Chi2WithAbsDfTensor.entropy(name='entropy')` {#Chi2WithAbsDfTensor.entropy}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.Chi2WithAbsDfTensor.graph` {#Chi2WithAbsDfTensor.graph}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.Chi2WithAbsDfTensor.input_dict` {#Chi2WithAbsDfTensor.input_dict}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.Chi2WithAbsDfTensor.loss(final_loss, name='Loss')` {#Chi2WithAbsDfTensor.loss}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.Chi2WithAbsDfTensor.mean(name='mean')` {#Chi2WithAbsDfTensor.mean}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.Chi2WithAbsDfTensor.name` {#Chi2WithAbsDfTensor.name}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.Chi2WithAbsDfTensor.value(name='value')` {#Chi2WithAbsDfTensor.value}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.Chi2WithAbsDfTensor.value_type` {#Chi2WithAbsDfTensor.value_type}
-
-
-
-
-
-- - -
-
-### `class tf.contrib.bayesflow.stochastic_tensor.DirichletTensor` {#DirichletTensor}
-
-`DirichletTensor` is a `StochasticTensor` backed by the distribution `Dirichlet`.
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.DirichletTensor.__init__(name=None, dist_value_type=None, loss_fn=score_function, **dist_args)` {#DirichletTensor.__init__}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.DirichletTensor.clone(name=None, **dist_args)` {#DirichletTensor.clone}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.DirichletTensor.distribution` {#DirichletTensor.distribution}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.DirichletTensor.dtype` {#DirichletTensor.dtype}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.DirichletTensor.entropy(name='entropy')` {#DirichletTensor.entropy}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.DirichletTensor.graph` {#DirichletTensor.graph}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.DirichletTensor.input_dict` {#DirichletTensor.input_dict}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.DirichletTensor.loss(final_loss, name='Loss')` {#DirichletTensor.loss}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.DirichletTensor.mean(name='mean')` {#DirichletTensor.mean}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.DirichletTensor.name` {#DirichletTensor.name}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.DirichletTensor.value(name='value')` {#DirichletTensor.value}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.DirichletTensor.value_type` {#DirichletTensor.value_type}
-
-
-
-
-
-- - -
-
-### `class tf.contrib.bayesflow.stochastic_tensor.DirichletMultinomialTensor` {#DirichletMultinomialTensor}
-
-`DirichletMultinomialTensor` is a `StochasticTensor` backed by the distribution `DirichletMultinomial`.
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.DirichletMultinomialTensor.__init__(name=None, dist_value_type=None, loss_fn=score_function, **dist_args)` {#DirichletMultinomialTensor.__init__}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.DirichletMultinomialTensor.clone(name=None, **dist_args)` {#DirichletMultinomialTensor.clone}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.DirichletMultinomialTensor.distribution` {#DirichletMultinomialTensor.distribution}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.DirichletMultinomialTensor.dtype` {#DirichletMultinomialTensor.dtype}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.DirichletMultinomialTensor.entropy(name='entropy')` {#DirichletMultinomialTensor.entropy}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.DirichletMultinomialTensor.graph` {#DirichletMultinomialTensor.graph}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.DirichletMultinomialTensor.input_dict` {#DirichletMultinomialTensor.input_dict}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.DirichletMultinomialTensor.loss(final_loss, name='Loss')` {#DirichletMultinomialTensor.loss}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.DirichletMultinomialTensor.mean(name='mean')` {#DirichletMultinomialTensor.mean}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.DirichletMultinomialTensor.name` {#DirichletMultinomialTensor.name}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.DirichletMultinomialTensor.value(name='value')` {#DirichletMultinomialTensor.value}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.DirichletMultinomialTensor.value_type` {#DirichletMultinomialTensor.value_type}
-
-
-
-
-
-- - -
-
-### `class tf.contrib.bayesflow.stochastic_tensor.ExponentialTensor` {#ExponentialTensor}
-
-`ExponentialTensor` is a `StochasticTensor` backed by the distribution `Exponential`.
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.ExponentialTensor.__init__(name=None, dist_value_type=None, loss_fn=score_function, **dist_args)` {#ExponentialTensor.__init__}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.ExponentialTensor.clone(name=None, **dist_args)` {#ExponentialTensor.clone}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.ExponentialTensor.distribution` {#ExponentialTensor.distribution}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.ExponentialTensor.dtype` {#ExponentialTensor.dtype}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.ExponentialTensor.entropy(name='entropy')` {#ExponentialTensor.entropy}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.ExponentialTensor.graph` {#ExponentialTensor.graph}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.ExponentialTensor.input_dict` {#ExponentialTensor.input_dict}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.ExponentialTensor.loss(final_loss, name='Loss')` {#ExponentialTensor.loss}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.ExponentialTensor.mean(name='mean')` {#ExponentialTensor.mean}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.ExponentialTensor.name` {#ExponentialTensor.name}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.ExponentialTensor.value(name='value')` {#ExponentialTensor.value}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.ExponentialTensor.value_type` {#ExponentialTensor.value_type}
-
-
-
-
-
-- - -
-
-### `class tf.contrib.bayesflow.stochastic_tensor.ExponentialWithSoftplusLamTensor` {#ExponentialWithSoftplusLamTensor}
-
-`ExponentialWithSoftplusLamTensor` is a `StochasticTensor` backed by the distribution `ExponentialWithSoftplusLam`.
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.ExponentialWithSoftplusLamTensor.__init__(name=None, dist_value_type=None, loss_fn=score_function, **dist_args)` {#ExponentialWithSoftplusLamTensor.__init__}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.ExponentialWithSoftplusLamTensor.clone(name=None, **dist_args)` {#ExponentialWithSoftplusLamTensor.clone}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.ExponentialWithSoftplusLamTensor.distribution` {#ExponentialWithSoftplusLamTensor.distribution}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.ExponentialWithSoftplusLamTensor.dtype` {#ExponentialWithSoftplusLamTensor.dtype}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.ExponentialWithSoftplusLamTensor.entropy(name='entropy')` {#ExponentialWithSoftplusLamTensor.entropy}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.ExponentialWithSoftplusLamTensor.graph` {#ExponentialWithSoftplusLamTensor.graph}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.ExponentialWithSoftplusLamTensor.input_dict` {#ExponentialWithSoftplusLamTensor.input_dict}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.ExponentialWithSoftplusLamTensor.loss(final_loss, name='Loss')` {#ExponentialWithSoftplusLamTensor.loss}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.ExponentialWithSoftplusLamTensor.mean(name='mean')` {#ExponentialWithSoftplusLamTensor.mean}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.ExponentialWithSoftplusLamTensor.name` {#ExponentialWithSoftplusLamTensor.name}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.ExponentialWithSoftplusLamTensor.value(name='value')` {#ExponentialWithSoftplusLamTensor.value}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.ExponentialWithSoftplusLamTensor.value_type` {#ExponentialWithSoftplusLamTensor.value_type}
-
-
-
-
-
-- - -
-
-### `class tf.contrib.bayesflow.stochastic_tensor.GammaTensor` {#GammaTensor}
-
-`GammaTensor` is a `StochasticTensor` backed by the distribution `Gamma`.
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.GammaTensor.__init__(name=None, dist_value_type=None, loss_fn=score_function, **dist_args)` {#GammaTensor.__init__}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.GammaTensor.clone(name=None, **dist_args)` {#GammaTensor.clone}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.GammaTensor.distribution` {#GammaTensor.distribution}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.GammaTensor.dtype` {#GammaTensor.dtype}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.GammaTensor.entropy(name='entropy')` {#GammaTensor.entropy}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.GammaTensor.graph` {#GammaTensor.graph}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.GammaTensor.input_dict` {#GammaTensor.input_dict}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.GammaTensor.loss(final_loss, name='Loss')` {#GammaTensor.loss}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.GammaTensor.mean(name='mean')` {#GammaTensor.mean}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.GammaTensor.name` {#GammaTensor.name}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.GammaTensor.value(name='value')` {#GammaTensor.value}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.GammaTensor.value_type` {#GammaTensor.value_type}
-
-
-
-
-
-- - -
-
-### `class tf.contrib.bayesflow.stochastic_tensor.GammaWithSoftplusAlphaBetaTensor` {#GammaWithSoftplusAlphaBetaTensor}
-
-`GammaWithSoftplusAlphaBetaTensor` is a `StochasticTensor` backed by the distribution `GammaWithSoftplusAlphaBeta`.
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.GammaWithSoftplusAlphaBetaTensor.__init__(name=None, dist_value_type=None, loss_fn=score_function, **dist_args)` {#GammaWithSoftplusAlphaBetaTensor.__init__}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.GammaWithSoftplusAlphaBetaTensor.clone(name=None, **dist_args)` {#GammaWithSoftplusAlphaBetaTensor.clone}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.GammaWithSoftplusAlphaBetaTensor.distribution` {#GammaWithSoftplusAlphaBetaTensor.distribution}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.GammaWithSoftplusAlphaBetaTensor.dtype` {#GammaWithSoftplusAlphaBetaTensor.dtype}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.GammaWithSoftplusAlphaBetaTensor.entropy(name='entropy')` {#GammaWithSoftplusAlphaBetaTensor.entropy}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.GammaWithSoftplusAlphaBetaTensor.graph` {#GammaWithSoftplusAlphaBetaTensor.graph}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.GammaWithSoftplusAlphaBetaTensor.input_dict` {#GammaWithSoftplusAlphaBetaTensor.input_dict}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.GammaWithSoftplusAlphaBetaTensor.loss(final_loss, name='Loss')` {#GammaWithSoftplusAlphaBetaTensor.loss}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.GammaWithSoftplusAlphaBetaTensor.mean(name='mean')` {#GammaWithSoftplusAlphaBetaTensor.mean}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.GammaWithSoftplusAlphaBetaTensor.name` {#GammaWithSoftplusAlphaBetaTensor.name}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.GammaWithSoftplusAlphaBetaTensor.value(name='value')` {#GammaWithSoftplusAlphaBetaTensor.value}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.GammaWithSoftplusAlphaBetaTensor.value_type` {#GammaWithSoftplusAlphaBetaTensor.value_type}
-
-
-
-
-
-- - -
-
-### `class tf.contrib.bayesflow.stochastic_tensor.InverseGammaTensor` {#InverseGammaTensor}
-
-`InverseGammaTensor` is a `StochasticTensor` backed by the distribution `InverseGamma`.
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.InverseGammaTensor.__init__(name=None, dist_value_type=None, loss_fn=score_function, **dist_args)` {#InverseGammaTensor.__init__}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.InverseGammaTensor.clone(name=None, **dist_args)` {#InverseGammaTensor.clone}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.InverseGammaTensor.distribution` {#InverseGammaTensor.distribution}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.InverseGammaTensor.dtype` {#InverseGammaTensor.dtype}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.InverseGammaTensor.entropy(name='entropy')` {#InverseGammaTensor.entropy}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.InverseGammaTensor.graph` {#InverseGammaTensor.graph}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.InverseGammaTensor.input_dict` {#InverseGammaTensor.input_dict}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.InverseGammaTensor.loss(final_loss, name='Loss')` {#InverseGammaTensor.loss}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.InverseGammaTensor.mean(name='mean')` {#InverseGammaTensor.mean}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.InverseGammaTensor.name` {#InverseGammaTensor.name}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.InverseGammaTensor.value(name='value')` {#InverseGammaTensor.value}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.InverseGammaTensor.value_type` {#InverseGammaTensor.value_type}
-
-
-
-
-
-- - -
-
-### `class tf.contrib.bayesflow.stochastic_tensor.InverseGammaWithSoftplusAlphaBetaTensor` {#InverseGammaWithSoftplusAlphaBetaTensor}
-
-`InverseGammaWithSoftplusAlphaBetaTensor` is a `StochasticTensor` backed by the distribution `InverseGammaWithSoftplusAlphaBeta`.
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.InverseGammaWithSoftplusAlphaBetaTensor.__init__(name=None, dist_value_type=None, loss_fn=score_function, **dist_args)` {#InverseGammaWithSoftplusAlphaBetaTensor.__init__}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.InverseGammaWithSoftplusAlphaBetaTensor.clone(name=None, **dist_args)` {#InverseGammaWithSoftplusAlphaBetaTensor.clone}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.InverseGammaWithSoftplusAlphaBetaTensor.distribution` {#InverseGammaWithSoftplusAlphaBetaTensor.distribution}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.InverseGammaWithSoftplusAlphaBetaTensor.dtype` {#InverseGammaWithSoftplusAlphaBetaTensor.dtype}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.InverseGammaWithSoftplusAlphaBetaTensor.entropy(name='entropy')` {#InverseGammaWithSoftplusAlphaBetaTensor.entropy}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.InverseGammaWithSoftplusAlphaBetaTensor.graph` {#InverseGammaWithSoftplusAlphaBetaTensor.graph}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.InverseGammaWithSoftplusAlphaBetaTensor.input_dict` {#InverseGammaWithSoftplusAlphaBetaTensor.input_dict}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.InverseGammaWithSoftplusAlphaBetaTensor.loss(final_loss, name='Loss')` {#InverseGammaWithSoftplusAlphaBetaTensor.loss}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.InverseGammaWithSoftplusAlphaBetaTensor.mean(name='mean')` {#InverseGammaWithSoftplusAlphaBetaTensor.mean}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.InverseGammaWithSoftplusAlphaBetaTensor.name` {#InverseGammaWithSoftplusAlphaBetaTensor.name}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.InverseGammaWithSoftplusAlphaBetaTensor.value(name='value')` {#InverseGammaWithSoftplusAlphaBetaTensor.value}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.InverseGammaWithSoftplusAlphaBetaTensor.value_type` {#InverseGammaWithSoftplusAlphaBetaTensor.value_type}
-
-
-
-
-
-- - -
-
-### `class tf.contrib.bayesflow.stochastic_tensor.LaplaceTensor` {#LaplaceTensor}
-
-`LaplaceTensor` is a `StochasticTensor` backed by the distribution `Laplace`.
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.LaplaceTensor.__init__(name=None, dist_value_type=None, loss_fn=score_function, **dist_args)` {#LaplaceTensor.__init__}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.LaplaceTensor.clone(name=None, **dist_args)` {#LaplaceTensor.clone}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.LaplaceTensor.distribution` {#LaplaceTensor.distribution}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.LaplaceTensor.dtype` {#LaplaceTensor.dtype}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.LaplaceTensor.entropy(name='entropy')` {#LaplaceTensor.entropy}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.LaplaceTensor.graph` {#LaplaceTensor.graph}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.LaplaceTensor.input_dict` {#LaplaceTensor.input_dict}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.LaplaceTensor.loss(final_loss, name='Loss')` {#LaplaceTensor.loss}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.LaplaceTensor.mean(name='mean')` {#LaplaceTensor.mean}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.LaplaceTensor.name` {#LaplaceTensor.name}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.LaplaceTensor.value(name='value')` {#LaplaceTensor.value}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.LaplaceTensor.value_type` {#LaplaceTensor.value_type}
-
-
-
-
-
-- - -
-
-### `class tf.contrib.bayesflow.stochastic_tensor.LaplaceWithSoftplusScaleTensor` {#LaplaceWithSoftplusScaleTensor}
-
-`LaplaceWithSoftplusScaleTensor` is a `StochasticTensor` backed by the distribution `LaplaceWithSoftplusScale`.
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.LaplaceWithSoftplusScaleTensor.__init__(name=None, dist_value_type=None, loss_fn=score_function, **dist_args)` {#LaplaceWithSoftplusScaleTensor.__init__}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.LaplaceWithSoftplusScaleTensor.clone(name=None, **dist_args)` {#LaplaceWithSoftplusScaleTensor.clone}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.LaplaceWithSoftplusScaleTensor.distribution` {#LaplaceWithSoftplusScaleTensor.distribution}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.LaplaceWithSoftplusScaleTensor.dtype` {#LaplaceWithSoftplusScaleTensor.dtype}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.LaplaceWithSoftplusScaleTensor.entropy(name='entropy')` {#LaplaceWithSoftplusScaleTensor.entropy}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.LaplaceWithSoftplusScaleTensor.graph` {#LaplaceWithSoftplusScaleTensor.graph}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.LaplaceWithSoftplusScaleTensor.input_dict` {#LaplaceWithSoftplusScaleTensor.input_dict}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.LaplaceWithSoftplusScaleTensor.loss(final_loss, name='Loss')` {#LaplaceWithSoftplusScaleTensor.loss}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.LaplaceWithSoftplusScaleTensor.mean(name='mean')` {#LaplaceWithSoftplusScaleTensor.mean}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.LaplaceWithSoftplusScaleTensor.name` {#LaplaceWithSoftplusScaleTensor.name}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.LaplaceWithSoftplusScaleTensor.value(name='value')` {#LaplaceWithSoftplusScaleTensor.value}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.LaplaceWithSoftplusScaleTensor.value_type` {#LaplaceWithSoftplusScaleTensor.value_type}
-
-
-
-
-
-- - -
-
-### `class tf.contrib.bayesflow.stochastic_tensor.MixtureTensor` {#MixtureTensor}
-
-`MixtureTensor` is a `StochasticTensor` backed by the distribution `Mixture`.
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.MixtureTensor.__init__(name=None, dist_value_type=None, loss_fn=score_function, **dist_args)` {#MixtureTensor.__init__}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.MixtureTensor.clone(name=None, **dist_args)` {#MixtureTensor.clone}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.MixtureTensor.distribution` {#MixtureTensor.distribution}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.MixtureTensor.dtype` {#MixtureTensor.dtype}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.MixtureTensor.entropy(name='entropy')` {#MixtureTensor.entropy}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.MixtureTensor.graph` {#MixtureTensor.graph}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.MixtureTensor.input_dict` {#MixtureTensor.input_dict}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.MixtureTensor.loss(final_loss, name='Loss')` {#MixtureTensor.loss}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.MixtureTensor.mean(name='mean')` {#MixtureTensor.mean}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.MixtureTensor.name` {#MixtureTensor.name}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.MixtureTensor.value(name='value')` {#MixtureTensor.value}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.MixtureTensor.value_type` {#MixtureTensor.value_type}
-
-
-
-
-
-- - -
-
-### `class tf.contrib.bayesflow.stochastic_tensor.MultinomialTensor` {#MultinomialTensor}
-
-`MultinomialTensor` is a `StochasticTensor` backed by the distribution `Multinomial`.
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.MultinomialTensor.__init__(name=None, dist_value_type=None, loss_fn=score_function, **dist_args)` {#MultinomialTensor.__init__}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.MultinomialTensor.clone(name=None, **dist_args)` {#MultinomialTensor.clone}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.MultinomialTensor.distribution` {#MultinomialTensor.distribution}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.MultinomialTensor.dtype` {#MultinomialTensor.dtype}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.MultinomialTensor.entropy(name='entropy')` {#MultinomialTensor.entropy}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.MultinomialTensor.graph` {#MultinomialTensor.graph}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.MultinomialTensor.input_dict` {#MultinomialTensor.input_dict}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.MultinomialTensor.loss(final_loss, name='Loss')` {#MultinomialTensor.loss}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.MultinomialTensor.mean(name='mean')` {#MultinomialTensor.mean}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.MultinomialTensor.name` {#MultinomialTensor.name}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.MultinomialTensor.value(name='value')` {#MultinomialTensor.value}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.MultinomialTensor.value_type` {#MultinomialTensor.value_type}
-
-
-
-
-
-- - -
-
-### `class tf.contrib.bayesflow.stochastic_tensor.MultivariateNormalCholeskyTensor` {#MultivariateNormalCholeskyTensor}
-
-`MultivariateNormalCholeskyTensor` is a `StochasticTensor` backed by the distribution `MultivariateNormalCholesky`.
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.MultivariateNormalCholeskyTensor.__init__(name=None, dist_value_type=None, loss_fn=score_function, **dist_args)` {#MultivariateNormalCholeskyTensor.__init__}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.MultivariateNormalCholeskyTensor.clone(name=None, **dist_args)` {#MultivariateNormalCholeskyTensor.clone}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.MultivariateNormalCholeskyTensor.distribution` {#MultivariateNormalCholeskyTensor.distribution}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.MultivariateNormalCholeskyTensor.dtype` {#MultivariateNormalCholeskyTensor.dtype}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.MultivariateNormalCholeskyTensor.entropy(name='entropy')` {#MultivariateNormalCholeskyTensor.entropy}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.MultivariateNormalCholeskyTensor.graph` {#MultivariateNormalCholeskyTensor.graph}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.MultivariateNormalCholeskyTensor.input_dict` {#MultivariateNormalCholeskyTensor.input_dict}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.MultivariateNormalCholeskyTensor.loss(final_loss, name='Loss')` {#MultivariateNormalCholeskyTensor.loss}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.MultivariateNormalCholeskyTensor.mean(name='mean')` {#MultivariateNormalCholeskyTensor.mean}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.MultivariateNormalCholeskyTensor.name` {#MultivariateNormalCholeskyTensor.name}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.MultivariateNormalCholeskyTensor.value(name='value')` {#MultivariateNormalCholeskyTensor.value}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.MultivariateNormalCholeskyTensor.value_type` {#MultivariateNormalCholeskyTensor.value_type}
-
-
-
-
-
-- - -
-
-### `class tf.contrib.bayesflow.stochastic_tensor.MultivariateNormalDiagTensor` {#MultivariateNormalDiagTensor}
-
-`MultivariateNormalDiagTensor` is a `StochasticTensor` backed by the distribution `MultivariateNormalDiag`.
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.MultivariateNormalDiagTensor.__init__(name=None, dist_value_type=None, loss_fn=score_function, **dist_args)` {#MultivariateNormalDiagTensor.__init__}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.MultivariateNormalDiagTensor.clone(name=None, **dist_args)` {#MultivariateNormalDiagTensor.clone}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.MultivariateNormalDiagTensor.distribution` {#MultivariateNormalDiagTensor.distribution}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.MultivariateNormalDiagTensor.dtype` {#MultivariateNormalDiagTensor.dtype}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.MultivariateNormalDiagTensor.entropy(name='entropy')` {#MultivariateNormalDiagTensor.entropy}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.MultivariateNormalDiagTensor.graph` {#MultivariateNormalDiagTensor.graph}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.MultivariateNormalDiagTensor.input_dict` {#MultivariateNormalDiagTensor.input_dict}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.MultivariateNormalDiagTensor.loss(final_loss, name='Loss')` {#MultivariateNormalDiagTensor.loss}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.MultivariateNormalDiagTensor.mean(name='mean')` {#MultivariateNormalDiagTensor.mean}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.MultivariateNormalDiagTensor.name` {#MultivariateNormalDiagTensor.name}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.MultivariateNormalDiagTensor.value(name='value')` {#MultivariateNormalDiagTensor.value}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.MultivariateNormalDiagTensor.value_type` {#MultivariateNormalDiagTensor.value_type}
-
-
-
-
-
-- - -
-
-### `class tf.contrib.bayesflow.stochastic_tensor.MultivariateNormalDiagPlusVDVTTensor` {#MultivariateNormalDiagPlusVDVTTensor}
-
-`MultivariateNormalDiagPlusVDVTTensor` is a `StochasticTensor` backed by the distribution `MultivariateNormalDiagPlusVDVT`.
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.MultivariateNormalDiagPlusVDVTTensor.__init__(name=None, dist_value_type=None, loss_fn=score_function, **dist_args)` {#MultivariateNormalDiagPlusVDVTTensor.__init__}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.MultivariateNormalDiagPlusVDVTTensor.clone(name=None, **dist_args)` {#MultivariateNormalDiagPlusVDVTTensor.clone}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.MultivariateNormalDiagPlusVDVTTensor.distribution` {#MultivariateNormalDiagPlusVDVTTensor.distribution}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.MultivariateNormalDiagPlusVDVTTensor.dtype` {#MultivariateNormalDiagPlusVDVTTensor.dtype}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.MultivariateNormalDiagPlusVDVTTensor.entropy(name='entropy')` {#MultivariateNormalDiagPlusVDVTTensor.entropy}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.MultivariateNormalDiagPlusVDVTTensor.graph` {#MultivariateNormalDiagPlusVDVTTensor.graph}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.MultivariateNormalDiagPlusVDVTTensor.input_dict` {#MultivariateNormalDiagPlusVDVTTensor.input_dict}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.MultivariateNormalDiagPlusVDVTTensor.loss(final_loss, name='Loss')` {#MultivariateNormalDiagPlusVDVTTensor.loss}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.MultivariateNormalDiagPlusVDVTTensor.mean(name='mean')` {#MultivariateNormalDiagPlusVDVTTensor.mean}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.MultivariateNormalDiagPlusVDVTTensor.name` {#MultivariateNormalDiagPlusVDVTTensor.name}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.MultivariateNormalDiagPlusVDVTTensor.value(name='value')` {#MultivariateNormalDiagPlusVDVTTensor.value}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.MultivariateNormalDiagPlusVDVTTensor.value_type` {#MultivariateNormalDiagPlusVDVTTensor.value_type}
-
-
-
-
-
-- - -
-
-### `class tf.contrib.bayesflow.stochastic_tensor.MultivariateNormalDiagWithSoftplusStDevTensor` {#MultivariateNormalDiagWithSoftplusStDevTensor}
-
-`MultivariateNormalDiagWithSoftplusStDevTensor` is a `StochasticTensor` backed by the distribution `MultivariateNormalDiagWithSoftplusStDev`.
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.MultivariateNormalDiagWithSoftplusStDevTensor.__init__(name=None, dist_value_type=None, loss_fn=score_function, **dist_args)` {#MultivariateNormalDiagWithSoftplusStDevTensor.__init__}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.MultivariateNormalDiagWithSoftplusStDevTensor.clone(name=None, **dist_args)` {#MultivariateNormalDiagWithSoftplusStDevTensor.clone}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.MultivariateNormalDiagWithSoftplusStDevTensor.distribution` {#MultivariateNormalDiagWithSoftplusStDevTensor.distribution}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.MultivariateNormalDiagWithSoftplusStDevTensor.dtype` {#MultivariateNormalDiagWithSoftplusStDevTensor.dtype}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.MultivariateNormalDiagWithSoftplusStDevTensor.entropy(name='entropy')` {#MultivariateNormalDiagWithSoftplusStDevTensor.entropy}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.MultivariateNormalDiagWithSoftplusStDevTensor.graph` {#MultivariateNormalDiagWithSoftplusStDevTensor.graph}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.MultivariateNormalDiagWithSoftplusStDevTensor.input_dict` {#MultivariateNormalDiagWithSoftplusStDevTensor.input_dict}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.MultivariateNormalDiagWithSoftplusStDevTensor.loss(final_loss, name='Loss')` {#MultivariateNormalDiagWithSoftplusStDevTensor.loss}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.MultivariateNormalDiagWithSoftplusStDevTensor.mean(name='mean')` {#MultivariateNormalDiagWithSoftplusStDevTensor.mean}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.MultivariateNormalDiagWithSoftplusStDevTensor.name` {#MultivariateNormalDiagWithSoftplusStDevTensor.name}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.MultivariateNormalDiagWithSoftplusStDevTensor.value(name='value')` {#MultivariateNormalDiagWithSoftplusStDevTensor.value}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.MultivariateNormalDiagWithSoftplusStDevTensor.value_type` {#MultivariateNormalDiagWithSoftplusStDevTensor.value_type}
-
-
-
-
-
-- - -
-
-### `class tf.contrib.bayesflow.stochastic_tensor.MultivariateNormalFullTensor` {#MultivariateNormalFullTensor}
-
-`MultivariateNormalFullTensor` is a `StochasticTensor` backed by the distribution `MultivariateNormalFull`.
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.MultivariateNormalFullTensor.__init__(name=None, dist_value_type=None, loss_fn=score_function, **dist_args)` {#MultivariateNormalFullTensor.__init__}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.MultivariateNormalFullTensor.clone(name=None, **dist_args)` {#MultivariateNormalFullTensor.clone}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.MultivariateNormalFullTensor.distribution` {#MultivariateNormalFullTensor.distribution}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.MultivariateNormalFullTensor.dtype` {#MultivariateNormalFullTensor.dtype}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.MultivariateNormalFullTensor.entropy(name='entropy')` {#MultivariateNormalFullTensor.entropy}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.MultivariateNormalFullTensor.graph` {#MultivariateNormalFullTensor.graph}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.MultivariateNormalFullTensor.input_dict` {#MultivariateNormalFullTensor.input_dict}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.MultivariateNormalFullTensor.loss(final_loss, name='Loss')` {#MultivariateNormalFullTensor.loss}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.MultivariateNormalFullTensor.mean(name='mean')` {#MultivariateNormalFullTensor.mean}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.MultivariateNormalFullTensor.name` {#MultivariateNormalFullTensor.name}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.MultivariateNormalFullTensor.value(name='value')` {#MultivariateNormalFullTensor.value}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.MultivariateNormalFullTensor.value_type` {#MultivariateNormalFullTensor.value_type}
-
-
-
-
-
-- - -
-
-### `class tf.contrib.bayesflow.stochastic_tensor.NormalTensor` {#NormalTensor}
-
-`NormalTensor` is a `StochasticTensor` backed by the distribution `Normal`.
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.NormalTensor.__init__(name=None, dist_value_type=None, loss_fn=score_function, **dist_args)` {#NormalTensor.__init__}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.NormalTensor.clone(name=None, **dist_args)` {#NormalTensor.clone}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.NormalTensor.distribution` {#NormalTensor.distribution}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.NormalTensor.dtype` {#NormalTensor.dtype}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.NormalTensor.entropy(name='entropy')` {#NormalTensor.entropy}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.NormalTensor.graph` {#NormalTensor.graph}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.NormalTensor.input_dict` {#NormalTensor.input_dict}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.NormalTensor.loss(final_loss, name='Loss')` {#NormalTensor.loss}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.NormalTensor.mean(name='mean')` {#NormalTensor.mean}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.NormalTensor.name` {#NormalTensor.name}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.NormalTensor.value(name='value')` {#NormalTensor.value}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.NormalTensor.value_type` {#NormalTensor.value_type}
-
-
-
-
-
-- - -
-
-### `class tf.contrib.bayesflow.stochastic_tensor.NormalWithSoftplusSigmaTensor` {#NormalWithSoftplusSigmaTensor}
-
-`NormalWithSoftplusSigmaTensor` is a `StochasticTensor` backed by the distribution `NormalWithSoftplusSigma`.
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.NormalWithSoftplusSigmaTensor.__init__(name=None, dist_value_type=None, loss_fn=score_function, **dist_args)` {#NormalWithSoftplusSigmaTensor.__init__}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.NormalWithSoftplusSigmaTensor.clone(name=None, **dist_args)` {#NormalWithSoftplusSigmaTensor.clone}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.NormalWithSoftplusSigmaTensor.distribution` {#NormalWithSoftplusSigmaTensor.distribution}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.NormalWithSoftplusSigmaTensor.dtype` {#NormalWithSoftplusSigmaTensor.dtype}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.NormalWithSoftplusSigmaTensor.entropy(name='entropy')` {#NormalWithSoftplusSigmaTensor.entropy}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.NormalWithSoftplusSigmaTensor.graph` {#NormalWithSoftplusSigmaTensor.graph}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.NormalWithSoftplusSigmaTensor.input_dict` {#NormalWithSoftplusSigmaTensor.input_dict}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.NormalWithSoftplusSigmaTensor.loss(final_loss, name='Loss')` {#NormalWithSoftplusSigmaTensor.loss}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.NormalWithSoftplusSigmaTensor.mean(name='mean')` {#NormalWithSoftplusSigmaTensor.mean}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.NormalWithSoftplusSigmaTensor.name` {#NormalWithSoftplusSigmaTensor.name}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.NormalWithSoftplusSigmaTensor.value(name='value')` {#NormalWithSoftplusSigmaTensor.value}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.NormalWithSoftplusSigmaTensor.value_type` {#NormalWithSoftplusSigmaTensor.value_type}
-
-
-
-
-
-- - -
-
-### `class tf.contrib.bayesflow.stochastic_tensor.PoissonTensor` {#PoissonTensor}
-
-`PoissonTensor` is a `StochasticTensor` backed by the distribution `Poisson`.
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.PoissonTensor.__init__(name=None, dist_value_type=None, loss_fn=score_function, **dist_args)` {#PoissonTensor.__init__}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.PoissonTensor.clone(name=None, **dist_args)` {#PoissonTensor.clone}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.PoissonTensor.distribution` {#PoissonTensor.distribution}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.PoissonTensor.dtype` {#PoissonTensor.dtype}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.PoissonTensor.entropy(name='entropy')` {#PoissonTensor.entropy}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.PoissonTensor.graph` {#PoissonTensor.graph}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.PoissonTensor.input_dict` {#PoissonTensor.input_dict}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.PoissonTensor.loss(final_loss, name='Loss')` {#PoissonTensor.loss}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.PoissonTensor.mean(name='mean')` {#PoissonTensor.mean}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.PoissonTensor.name` {#PoissonTensor.name}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.PoissonTensor.value(name='value')` {#PoissonTensor.value}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.PoissonTensor.value_type` {#PoissonTensor.value_type}
-
-
-
-
-
-- - -
-
-### `class tf.contrib.bayesflow.stochastic_tensor.QuantizedDistributionTensor` {#QuantizedDistributionTensor}
-
-`QuantizedDistributionTensor` is a `StochasticTensor` backed by the distribution `QuantizedDistribution`.
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.QuantizedDistributionTensor.__init__(name=None, dist_value_type=None, loss_fn=score_function, **dist_args)` {#QuantizedDistributionTensor.__init__}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.QuantizedDistributionTensor.clone(name=None, **dist_args)` {#QuantizedDistributionTensor.clone}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.QuantizedDistributionTensor.distribution` {#QuantizedDistributionTensor.distribution}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.QuantizedDistributionTensor.dtype` {#QuantizedDistributionTensor.dtype}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.QuantizedDistributionTensor.entropy(name='entropy')` {#QuantizedDistributionTensor.entropy}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.QuantizedDistributionTensor.graph` {#QuantizedDistributionTensor.graph}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.QuantizedDistributionTensor.input_dict` {#QuantizedDistributionTensor.input_dict}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.QuantizedDistributionTensor.loss(final_loss, name='Loss')` {#QuantizedDistributionTensor.loss}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.QuantizedDistributionTensor.mean(name='mean')` {#QuantizedDistributionTensor.mean}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.QuantizedDistributionTensor.name` {#QuantizedDistributionTensor.name}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.QuantizedDistributionTensor.value(name='value')` {#QuantizedDistributionTensor.value}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.QuantizedDistributionTensor.value_type` {#QuantizedDistributionTensor.value_type}
-
-
-
-
-
-- - -
-
-### `class tf.contrib.bayesflow.stochastic_tensor.StudentTTensor` {#StudentTTensor}
-
-`StudentTTensor` is a `StochasticTensor` backed by the distribution `StudentT`.
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.StudentTTensor.__init__(name=None, dist_value_type=None, loss_fn=score_function, **dist_args)` {#StudentTTensor.__init__}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.StudentTTensor.clone(name=None, **dist_args)` {#StudentTTensor.clone}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.StudentTTensor.distribution` {#StudentTTensor.distribution}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.StudentTTensor.dtype` {#StudentTTensor.dtype}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.StudentTTensor.entropy(name='entropy')` {#StudentTTensor.entropy}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.StudentTTensor.graph` {#StudentTTensor.graph}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.StudentTTensor.input_dict` {#StudentTTensor.input_dict}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.StudentTTensor.loss(final_loss, name='Loss')` {#StudentTTensor.loss}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.StudentTTensor.mean(name='mean')` {#StudentTTensor.mean}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.StudentTTensor.name` {#StudentTTensor.name}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.StudentTTensor.value(name='value')` {#StudentTTensor.value}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.StudentTTensor.value_type` {#StudentTTensor.value_type}
-
-
-
-
-
-- - -
-
-### `class tf.contrib.bayesflow.stochastic_tensor.StudentTWithAbsDfSoftplusSigmaTensor` {#StudentTWithAbsDfSoftplusSigmaTensor}
-
-`StudentTWithAbsDfSoftplusSigmaTensor` is a `StochasticTensor` backed by the distribution `StudentTWithAbsDfSoftplusSigma`.
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.StudentTWithAbsDfSoftplusSigmaTensor.__init__(name=None, dist_value_type=None, loss_fn=score_function, **dist_args)` {#StudentTWithAbsDfSoftplusSigmaTensor.__init__}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.StudentTWithAbsDfSoftplusSigmaTensor.clone(name=None, **dist_args)` {#StudentTWithAbsDfSoftplusSigmaTensor.clone}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.StudentTWithAbsDfSoftplusSigmaTensor.distribution` {#StudentTWithAbsDfSoftplusSigmaTensor.distribution}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.StudentTWithAbsDfSoftplusSigmaTensor.dtype` {#StudentTWithAbsDfSoftplusSigmaTensor.dtype}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.StudentTWithAbsDfSoftplusSigmaTensor.entropy(name='entropy')` {#StudentTWithAbsDfSoftplusSigmaTensor.entropy}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.StudentTWithAbsDfSoftplusSigmaTensor.graph` {#StudentTWithAbsDfSoftplusSigmaTensor.graph}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.StudentTWithAbsDfSoftplusSigmaTensor.input_dict` {#StudentTWithAbsDfSoftplusSigmaTensor.input_dict}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.StudentTWithAbsDfSoftplusSigmaTensor.loss(final_loss, name='Loss')` {#StudentTWithAbsDfSoftplusSigmaTensor.loss}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.StudentTWithAbsDfSoftplusSigmaTensor.mean(name='mean')` {#StudentTWithAbsDfSoftplusSigmaTensor.mean}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.StudentTWithAbsDfSoftplusSigmaTensor.name` {#StudentTWithAbsDfSoftplusSigmaTensor.name}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.StudentTWithAbsDfSoftplusSigmaTensor.value(name='value')` {#StudentTWithAbsDfSoftplusSigmaTensor.value}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.StudentTWithAbsDfSoftplusSigmaTensor.value_type` {#StudentTWithAbsDfSoftplusSigmaTensor.value_type}
-
-
-
-
-
-- - -
-
-### `class tf.contrib.bayesflow.stochastic_tensor.TransformedDistributionTensor` {#TransformedDistributionTensor}
-
-`TransformedDistributionTensor` is a `StochasticTensor` backed by the distribution `TransformedDistribution`.
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.TransformedDistributionTensor.__init__(name=None, dist_value_type=None, loss_fn=score_function, **dist_args)` {#TransformedDistributionTensor.__init__}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.TransformedDistributionTensor.clone(name=None, **dist_args)` {#TransformedDistributionTensor.clone}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.TransformedDistributionTensor.distribution` {#TransformedDistributionTensor.distribution}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.TransformedDistributionTensor.dtype` {#TransformedDistributionTensor.dtype}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.TransformedDistributionTensor.entropy(name='entropy')` {#TransformedDistributionTensor.entropy}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.TransformedDistributionTensor.graph` {#TransformedDistributionTensor.graph}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.TransformedDistributionTensor.input_dict` {#TransformedDistributionTensor.input_dict}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.TransformedDistributionTensor.loss(final_loss, name='Loss')` {#TransformedDistributionTensor.loss}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.TransformedDistributionTensor.mean(name='mean')` {#TransformedDistributionTensor.mean}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.TransformedDistributionTensor.name` {#TransformedDistributionTensor.name}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.TransformedDistributionTensor.value(name='value')` {#TransformedDistributionTensor.value}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.TransformedDistributionTensor.value_type` {#TransformedDistributionTensor.value_type}
-
-
-
-
-
-- - -
-
-### `class tf.contrib.bayesflow.stochastic_tensor.UniformTensor` {#UniformTensor}
-
-`UniformTensor` is a `StochasticTensor` backed by the distribution `Uniform`.
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.UniformTensor.__init__(name=None, dist_value_type=None, loss_fn=score_function, **dist_args)` {#UniformTensor.__init__}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.UniformTensor.clone(name=None, **dist_args)` {#UniformTensor.clone}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.UniformTensor.distribution` {#UniformTensor.distribution}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.UniformTensor.dtype` {#UniformTensor.dtype}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.UniformTensor.entropy(name='entropy')` {#UniformTensor.entropy}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.UniformTensor.graph` {#UniformTensor.graph}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.UniformTensor.input_dict` {#UniformTensor.input_dict}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.UniformTensor.loss(final_loss, name='Loss')` {#UniformTensor.loss}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.UniformTensor.mean(name='mean')` {#UniformTensor.mean}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.UniformTensor.name` {#UniformTensor.name}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.UniformTensor.value(name='value')` {#UniformTensor.value}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.UniformTensor.value_type` {#UniformTensor.value_type}
-
-
-
-
-
-- - -
-
-### `class tf.contrib.bayesflow.stochastic_tensor.WishartCholeskyTensor` {#WishartCholeskyTensor}
-
-`WishartCholeskyTensor` is a `StochasticTensor` backed by the distribution `WishartCholesky`.
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.WishartCholeskyTensor.__init__(name=None, dist_value_type=None, loss_fn=score_function, **dist_args)` {#WishartCholeskyTensor.__init__}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.WishartCholeskyTensor.clone(name=None, **dist_args)` {#WishartCholeskyTensor.clone}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.WishartCholeskyTensor.distribution` {#WishartCholeskyTensor.distribution}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.WishartCholeskyTensor.dtype` {#WishartCholeskyTensor.dtype}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.WishartCholeskyTensor.entropy(name='entropy')` {#WishartCholeskyTensor.entropy}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.WishartCholeskyTensor.graph` {#WishartCholeskyTensor.graph}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.WishartCholeskyTensor.input_dict` {#WishartCholeskyTensor.input_dict}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.WishartCholeskyTensor.loss(final_loss, name='Loss')` {#WishartCholeskyTensor.loss}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.WishartCholeskyTensor.mean(name='mean')` {#WishartCholeskyTensor.mean}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.WishartCholeskyTensor.name` {#WishartCholeskyTensor.name}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.WishartCholeskyTensor.value(name='value')` {#WishartCholeskyTensor.value}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.WishartCholeskyTensor.value_type` {#WishartCholeskyTensor.value_type}
-
-
-
-
-
-- - -
-
-### `class tf.contrib.bayesflow.stochastic_tensor.WishartFullTensor` {#WishartFullTensor}
-
-`WishartFullTensor` is a `StochasticTensor` backed by the distribution `WishartFull`.
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.WishartFullTensor.__init__(name=None, dist_value_type=None, loss_fn=score_function, **dist_args)` {#WishartFullTensor.__init__}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.WishartFullTensor.clone(name=None, **dist_args)` {#WishartFullTensor.clone}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.WishartFullTensor.distribution` {#WishartFullTensor.distribution}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.WishartFullTensor.dtype` {#WishartFullTensor.dtype}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.WishartFullTensor.entropy(name='entropy')` {#WishartFullTensor.entropy}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.WishartFullTensor.graph` {#WishartFullTensor.graph}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.WishartFullTensor.input_dict` {#WishartFullTensor.input_dict}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.WishartFullTensor.loss(final_loss, name='Loss')` {#WishartFullTensor.loss}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.WishartFullTensor.mean(name='mean')` {#WishartFullTensor.mean}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.WishartFullTensor.name` {#WishartFullTensor.name}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.WishartFullTensor.value(name='value')` {#WishartFullTensor.value}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.WishartFullTensor.value_type` {#WishartFullTensor.value_type}
-
-
-
-
-
-- - -
-
-### `class tf.contrib.bayesflow.stochastic_tensor.beta_aaTensor` {#beta_aaTensor}
-
-`beta_aaTensor` is a `StochasticTensor` backed by the distribution `beta_aa`.
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.beta_aaTensor.__init__(name=None, dist_value_type=None, loss_fn=score_function, **dist_args)` {#beta_aaTensor.__init__}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.beta_aaTensor.clone(name=None, **dist_args)` {#beta_aaTensor.clone}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.beta_aaTensor.distribution` {#beta_aaTensor.distribution}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.beta_aaTensor.dtype` {#beta_aaTensor.dtype}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.beta_aaTensor.entropy(name='entropy')` {#beta_aaTensor.entropy}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.beta_aaTensor.graph` {#beta_aaTensor.graph}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.beta_aaTensor.input_dict` {#beta_aaTensor.input_dict}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.beta_aaTensor.loss(final_loss, name='Loss')` {#beta_aaTensor.loss}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.beta_aaTensor.mean(name='mean')` {#beta_aaTensor.mean}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.beta_aaTensor.name` {#beta_aaTensor.name}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.beta_aaTensor.value(name='value')` {#beta_aaTensor.value}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.beta_aaTensor.value_type` {#beta_aaTensor.value_type}
-
-
-
-
-
-- - -
-
-### `class tf.contrib.bayesflow.stochastic_tensor.beta_bbTensor` {#beta_bbTensor}
-
-`beta_bbTensor` is a `StochasticTensor` backed by the distribution `beta_bb`.
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.beta_bbTensor.__init__(name=None, dist_value_type=None, loss_fn=score_function, **dist_args)` {#beta_bbTensor.__init__}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.beta_bbTensor.clone(name=None, **dist_args)` {#beta_bbTensor.clone}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.beta_bbTensor.distribution` {#beta_bbTensor.distribution}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.beta_bbTensor.dtype` {#beta_bbTensor.dtype}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.beta_bbTensor.entropy(name='entropy')` {#beta_bbTensor.entropy}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.beta_bbTensor.graph` {#beta_bbTensor.graph}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.beta_bbTensor.input_dict` {#beta_bbTensor.input_dict}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.beta_bbTensor.loss(final_loss, name='Loss')` {#beta_bbTensor.loss}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.beta_bbTensor.mean(name='mean')` {#beta_bbTensor.mean}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.beta_bbTensor.name` {#beta_bbTensor.name}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.beta_bbTensor.value(name='value')` {#beta_bbTensor.value}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.beta_bbTensor.value_type` {#beta_bbTensor.value_type}
-
-
-
-
-
-
 ## Other Functions and Classes
 - - -
 
@@ -3800,38 +444,29 @@ in a `stop_gradients` call to disable any possible backpropagation.
 A StochasticTensor with an observed value.
 - - -
 
-#### `tf.contrib.bayesflow.stochastic_tensor.ObservedStochasticTensor.__init__(dist_cls, value, name=None, **dist_args)` {#ObservedStochasticTensor.__init__}
+#### `tf.contrib.bayesflow.stochastic_tensor.ObservedStochasticTensor.__init__(dist, value, name=None)` {#ObservedStochasticTensor.__init__}
 
 Construct an `ObservedStochasticTensor`.
 
-`ObservedStochasticTensor` will instantiate a distribution from `dist_cls`
-and `dist_args` but use the provided value instead of sampling from the
-distribution. The provided value argument must be appropriately shaped
-to have come from the constructed distribution.
+`ObservedStochasticTensor` is backed by distribution `dist` and uses the
+provided value instead of using the current value type to draw a value from
+the distribution. The provided value argument must be appropriately shaped
+to have come from the distribution.
 
 ##### Args:
 
 
-*  <b>`dist_cls`</b>: a `Distribution` class.
+*  <b>`dist`</b>: an instance of `Distribution`.
 *  <b>`value`</b>: a Tensor containing the observed value
 *  <b>`name`</b>: a name for this `ObservedStochasticTensor` and its ops.
-*  <b>`**dist_args`</b>: keyword arguments to be passed through to `dist_cls` on
-      construction.
 
 ##### Raises:
 
 
-*  <b>`TypeError`</b>: if `dist_cls` is not a `Distribution`.
+*  <b>`TypeError`</b>: if `dist` is not an instance of `Distribution`.
 *  <b>`ValueError`</b>: if `value` is not compatible with the distribution.
 
 
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.ObservedStochasticTensor.clone(name=None, **dist_args)` {#ObservedStochasticTensor.clone}
-
-
-
-
 - - -
 
 #### `tf.contrib.bayesflow.stochastic_tensor.ObservedStochasticTensor.distribution` {#ObservedStochasticTensor.distribution}
@@ -3860,13 +495,6 @@ to have come from the constructed distribution.
 
 
 
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.ObservedStochasticTensor.input_dict` {#ObservedStochasticTensor.input_dict}
-
-
-
-
 - - -
 
 #### `tf.contrib.bayesflow.stochastic_tensor.ObservedStochasticTensor.loss(final_loss, name=None)` {#ObservedStochasticTensor.loss}
diff --git a/tensorflow/g3doc/api_docs/python/contrib.integrate.md b/tensorflow/g3doc/api_docs/python/contrib.integrate.md
new file mode 100644
index 00000000000..dc2c16a0dac
--- /dev/null
+++ b/tensorflow/g3doc/api_docs/python/contrib.integrate.md
@@ -0,0 +1,135 @@
+<!-- This file is machine generated: DO NOT EDIT! -->
+
+# Integrate (contrib)
+[TOC]
+
+Integration and ODE solvers for TensorFlow.
+
+## Example: Lorenz attractor
+
+We can use `odeint` to solve the
+[Lorentz system](https://en.wikipedia.org/wiki/Lorenz_system) of ordinary
+differential equations, a prototypical example of chaotic dynamics:
+
+```python
+rho = 28.0
+sigma = 10.0
+beta = 8.0/3.0
+
+def lorenz_equation(state, t):
+  x, y, z = tf.unpack(state)
+  dx = sigma * (y - x)
+  dy = x * (rho - z) - y
+  dz = x * y - beta * z
+  return tf.pack([dx, dy, dz])
+
+init_state = tf.constant([0, 2, 20], dtype=tf.float64)
+t = np.linspace(0, 50, num=5000)
+tensor_state, tensor_info = tf.contrib.integrate.odeint(
+    lorenz_equation, init_state, t, full_output=True)
+
+sess = tf.Session()
+state, info = sess.run([tensor_state, tensor_info])
+x, y, z = state.T
+plt.plot(x, z)
+```
+
+<div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
+<img style="width:100%" src="../../images/lorenz_attractor.png" alt>
+</div>
+
+## Ops
+
+- - -
+
+### `tf.contrib.integrate.odeint(func, y0, t, rtol=1e-06, atol=1e-12, method=None, options=None, full_output=False, name=None)` {#odeint}
+
+Integrate a system of ordinary differential equations.
+
+Solves the initial value problem for a non-stiff system of first order ode-s:
+
+  ```
+  dy/dt = func(y, t), y(t[0]) = y0
+  ```
+
+where y is a Tensor of any shape.
+
+For example:
+
+  ```
+  # solve `dy/dt = -y`, corresponding to exponential decay
+  tf.contrib.integrate.odeint(lambda y, _: -y, 1.0, [0, 1, 2])
+  => [1, exp(-1), exp(-2)]
+  ```
+
+Output dtypes and numerical precision are based on the dtypes of the inputs
+`y0` and `t`.
+
+Currently, implements 5th order Runge-Kutta with adaptive step size control
+and dense output, using the Dormand-Prince method. Similar to the 'dopri5'
+method of `scipy.integrate.ode` and MATLAB's `ode45`.
+
+Based on: Shampine, Lawrence F. (1986), "Some Practical Runge-Kutta Formulas",
+Mathematics of Computation, American Mathematical Society, 46 (173): 135-150,
+doi:10.2307/2008219
+
+##### Args:
+
+
+*  <b>`func`</b>: Function that maps a Tensor holding the state `y` and a scalar Tensor
+    `t` into a Tensor of state derivatives with respect to time.
+*  <b>`y0`</b>: N-D Tensor giving starting value of `y` at time point `t[0]`. May
+    have any floating point or complex dtype.
+*  <b>`t`</b>: 1-D Tensor holding a sequence of time points for which to solve for
+    `y`. The initial time point should be the first element of this sequence,
+    and each time must be larger than the previous time. May have any floating
+    point dtype. If not provided as a Tensor, converted to a Tensor with
+    float64 dtype.
+*  <b>`rtol`</b>: optional float64 Tensor specifying an upper bound on relative error,
+    per element of `y`.
+*  <b>`atol`</b>: optional float64 Tensor specifying an upper bound on absolute error,
+    per element of `y`.
+*  <b>`method`</b>: optional string indicating the integration method to use. Currently,
+    the only valid option is `'dopri5'`.
+*  <b>`options`</b>: optional dict of configuring options for the indicated integration
+    method. Can only be provided if a `method` is explicitly set. For
+    `'dopri5'`, valid options include:
+    * first_step: an initial guess for the size of the first integration
+      (current default: 1.0, but may later be changed to use heuristics based
+      on the gradient).
+    * safety: safety factor for adaptive step control, generally a constant
+      in the range 0.8-1 (default: 0.9).
+    * ifactor: maximum factor by which the adaptive step may be increased
+      (default: 10.0).
+    * dfactor: maximum factor by which the adpative step may be decreased
+      (default: 0.2).
+    * max_num_steps: integer maximum number of integrate steps between time
+      points in `t` (default: 1000).
+*  <b>`full_output`</b>: optional boolean. If True, `odeint` returns a tuple
+    `(y, info_dict)` describing the integration process.
+*  <b>`name`</b>: Optional name for this operation.
+
+##### Returns:
+
+
+*  <b>`y`</b>: (N+1)-D tensor, where the first dimension corresponds to different
+    time points. Contains the solved value of y for each desired time point in
+    `t`, with the initial value `y0` being the first element along the first
+    dimension.
+*  <b>`info_dict`</b>: only if `full_output == True`. A dict with the following values:
+    * num_func_evals: integer Tensor counting the number of function
+      evaluations.
+    * integrate_points: 1D float64 Tensor with the upper bound of each
+      integration time step.
+    * error_ratio: 1D float Tensor with the estimated ratio of the integration
+      error to the error tolerance at each integration step. An ratio greater
+      than 1 corresponds to rejected steps.
+
+##### Raises:
+
+
+*  <b>`ValueError`</b>: if an invalid `method` is provided.
+*  <b>`TypeError`</b>: if `options` is supplied without `method`, or if `t` or `y0` has
+    an invalid dtype.
+
+
diff --git a/tensorflow/g3doc/api_docs/python/contrib.learn.md b/tensorflow/g3doc/api_docs/python/contrib.learn.md
index 8d55f807aed..30f6bbee6d4 100644
--- a/tensorflow/g3doc/api_docs/python/contrib.learn.md
+++ b/tensorflow/g3doc/api_docs/python/contrib.learn.md
@@ -1265,21 +1265,18 @@ classes. When number of possible classes is 2, this is binary classification.
 Example:
 
 ```python
-education = sparse_column_with_hash_bucket(column_name="education",
-                                           hash_bucket_size=1000)
-occupation = sparse_column_with_hash_bucket(column_name="occupation",
-                                            hash_bucket_size=1000)
+sparse_column_a = sparse_column_with_hash_bucket(...)
+sparse_column_b = sparse_column_with_hash_bucket(...)
 
-education_x_occupation = crossed_column(columns=[education, occupation],
-                                        hash_bucket_size=10000)
+sparse_feature_a_x_sparse_feature_b = crossed_column(...)
 
 # Estimator using the default optimizer.
 estimator = LinearClassifier(
-    feature_columns=[occupation, education_x_occupation])
+    feature_columns=[sparse_column_a, sparse_feature_a_x_sparse_feature_b])
 
 # Or estimator using the FTRL optimizer with regularization.
 estimator = LinearClassifier(
-    feature_columns=[occupation, education_x_occupation],
+    feature_columns=[sparse_column_a, sparse_feature_a_x_sparse_feature_b],
     optimizer=tf.train.FtrlOptimizer(
       learning_rate=0.1,
       l1_regularization_strength=0.001
@@ -1287,7 +1284,7 @@ estimator = LinearClassifier(
 
 # Or estimator using the SDCAOptimizer.
 estimator = LinearClassifier(
-   feature_columns=[occupation, education_x_occupation],
+   feature_columns=[sparse_column_a, sparse_feature_a_x_sparse_feature_b],
    optimizer=tf.contrib.linear_optimizer.SDCAOptimizer(
      example_id_column='example_id',
      num_loss_partitions=...,
@@ -1483,16 +1480,13 @@ feature values.
 Example:
 
 ```python
-education = sparse_column_with_hash_bucket(column_name="education",
-                                           hash_bucket_size=1000)
-occupation = sparse_column_with_hash_bucket(column_name="occupation",
-                                            hash_bucket_size=1000)
+sparse_column_a = sparse_column_with_hash_bucket(...)
+sparse_column_b = sparse_column_with_hash_bucket(...)
 
-education_x_occupation = crossed_column(columns=[education, occupation],
-                                        hash_bucket_size=10000)
+sparse_feature_a_x_sparse_feature_b = crossed_column(...)
 
 estimator = LinearRegressor(
-    feature_columns=[occupation, education_x_occupation])
+    feature_columns=[sparse_column_a, sparse_feature_a_x_sparse_feature_b])
 
 # Input builders
 def input_fn_train: # returns x, y
diff --git a/tensorflow/g3doc/api_docs/python/contrib.metrics.md b/tensorflow/g3doc/api_docs/python/contrib.metrics.md
index 326a90b2c40..856be448660 100644
--- a/tensorflow/g3doc/api_docs/python/contrib.metrics.md
+++ b/tensorflow/g3doc/api_docs/python/contrib.metrics.md
@@ -300,7 +300,12 @@ This value is ultimately returned as `auc`, an idempotent operation that
 computes the area under a discretized curve of precision versus recall values
 (computed using the aforementioned variables). The `num_thresholds` variable
 controls the degree of discretization with larger numbers of thresholds more
-closely approximating the true AUC.
+closely approximating the true AUC. The quality of the approximation may vary
+dramatically depending on `num_thresholds`.
+
+For best results, `predictions` should be distributed approximately uniformly
+in the range [0, 1] and not peaked around 0 or 1. The quality of the AUC
+approximation may be poor if this is not the case.
 
 For estimation of the metric over a stream of data, the function creates an
 `update_op` operation that updates these variables and returns the `auc`.
diff --git a/tensorflow/g3doc/api_docs/python/framework.md b/tensorflow/g3doc/api_docs/python/framework.md
index 498e7d999f3..86de039e7bd 100644
--- a/tensorflow/g3doc/api_docs/python/framework.md
+++ b/tensorflow/g3doc/api_docs/python/framework.md
@@ -2845,10 +2845,18 @@ variables.
 
 The following standard keys are defined:
 
-* `VARIABLES`: the `Variable` objects that comprise a model, and
-  must be saved and restored together. See
-  [`tf.all_variables()`](../../api_docs/python/state_ops.md#all_variables)
+* `GLOBAL_VARIABLES`: the default collection of `Variable` objects, shared
+  across distributed environment (model variables are subset of these). See
+  [`tf.global_variables()`](../../api_docs/python/state_ops.md#global_variables)
   for more details.
+  Commonly, all `TRAINABLE_VARIABLES` variables will be in `MODEL_VARIABLES`,
+  and all `MODEL_VARIABLES` variables will be in `GLOBAL_VARIABLES`.
+* `LOCAL_VARIABLES`: the subset of `Variable` objects that are local to each
+  machine. Usually used for temporarily variables, like counters.
+  Note: use `tf.contrib.framework.local_variable` to add to this collection.
+* `MODEL_VARIABLES`: the subset of `Variable` objects that are used in the
+  model for inference (feed forward). Note: use
+  `tf.contrib.framework.model_variable` to add to this collection.
 * `TRAINABLE_VARIABLES`: the subset of `Variable` objects that will
   be trained by an optimizer. See
   [`tf.trainable_variables()`](../../api_docs/python/state_ops.md#trainable_variables)
diff --git a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard0/tf.contrib.bayesflow.stochastic_tensor.BetaWithSoftplusABTensor.md b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard0/tf.contrib.bayesflow.stochastic_tensor.BetaWithSoftplusABTensor.md
deleted file mode 100644
index 380b9d2c7f2..00000000000
--- a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard0/tf.contrib.bayesflow.stochastic_tensor.BetaWithSoftplusABTensor.md
+++ /dev/null
@@ -1,85 +0,0 @@
-`BetaWithSoftplusABTensor` is a `StochasticTensor` backed by the distribution `BetaWithSoftplusAB`.
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.BetaWithSoftplusABTensor.__init__(name=None, dist_value_type=None, loss_fn=score_function, **dist_args)` {#BetaWithSoftplusABTensor.__init__}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.BetaWithSoftplusABTensor.clone(name=None, **dist_args)` {#BetaWithSoftplusABTensor.clone}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.BetaWithSoftplusABTensor.distribution` {#BetaWithSoftplusABTensor.distribution}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.BetaWithSoftplusABTensor.dtype` {#BetaWithSoftplusABTensor.dtype}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.BetaWithSoftplusABTensor.entropy(name='entropy')` {#BetaWithSoftplusABTensor.entropy}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.BetaWithSoftplusABTensor.graph` {#BetaWithSoftplusABTensor.graph}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.BetaWithSoftplusABTensor.input_dict` {#BetaWithSoftplusABTensor.input_dict}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.BetaWithSoftplusABTensor.loss(final_loss, name='Loss')` {#BetaWithSoftplusABTensor.loss}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.BetaWithSoftplusABTensor.mean(name='mean')` {#BetaWithSoftplusABTensor.mean}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.BetaWithSoftplusABTensor.name` {#BetaWithSoftplusABTensor.name}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.BetaWithSoftplusABTensor.value(name='value')` {#BetaWithSoftplusABTensor.value}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.BetaWithSoftplusABTensor.value_type` {#BetaWithSoftplusABTensor.value_type}
-
-
-
-
diff --git a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard0/tf.contrib.bayesflow.stochastic_tensor.GammaWithSoftplusAlphaBetaTensor.md b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard0/tf.contrib.bayesflow.stochastic_tensor.GammaWithSoftplusAlphaBetaTensor.md
deleted file mode 100644
index 874892f110f..00000000000
--- a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard0/tf.contrib.bayesflow.stochastic_tensor.GammaWithSoftplusAlphaBetaTensor.md
+++ /dev/null
@@ -1,85 +0,0 @@
-`GammaWithSoftplusAlphaBetaTensor` is a `StochasticTensor` backed by the distribution `GammaWithSoftplusAlphaBeta`.
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.GammaWithSoftplusAlphaBetaTensor.__init__(name=None, dist_value_type=None, loss_fn=score_function, **dist_args)` {#GammaWithSoftplusAlphaBetaTensor.__init__}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.GammaWithSoftplusAlphaBetaTensor.clone(name=None, **dist_args)` {#GammaWithSoftplusAlphaBetaTensor.clone}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.GammaWithSoftplusAlphaBetaTensor.distribution` {#GammaWithSoftplusAlphaBetaTensor.distribution}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.GammaWithSoftplusAlphaBetaTensor.dtype` {#GammaWithSoftplusAlphaBetaTensor.dtype}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.GammaWithSoftplusAlphaBetaTensor.entropy(name='entropy')` {#GammaWithSoftplusAlphaBetaTensor.entropy}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.GammaWithSoftplusAlphaBetaTensor.graph` {#GammaWithSoftplusAlphaBetaTensor.graph}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.GammaWithSoftplusAlphaBetaTensor.input_dict` {#GammaWithSoftplusAlphaBetaTensor.input_dict}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.GammaWithSoftplusAlphaBetaTensor.loss(final_loss, name='Loss')` {#GammaWithSoftplusAlphaBetaTensor.loss}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.GammaWithSoftplusAlphaBetaTensor.mean(name='mean')` {#GammaWithSoftplusAlphaBetaTensor.mean}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.GammaWithSoftplusAlphaBetaTensor.name` {#GammaWithSoftplusAlphaBetaTensor.name}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.GammaWithSoftplusAlphaBetaTensor.value(name='value')` {#GammaWithSoftplusAlphaBetaTensor.value}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.GammaWithSoftplusAlphaBetaTensor.value_type` {#GammaWithSoftplusAlphaBetaTensor.value_type}
-
-
-
-
diff --git a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard0/tf.contrib.bayesflow.stochastic_tensor.MultivariateNormalDiagPlusVDVTTensor.md b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard0/tf.contrib.bayesflow.stochastic_tensor.MultivariateNormalDiagPlusVDVTTensor.md
deleted file mode 100644
index 7be941bb9a5..00000000000
--- a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard0/tf.contrib.bayesflow.stochastic_tensor.MultivariateNormalDiagPlusVDVTTensor.md
+++ /dev/null
@@ -1,85 +0,0 @@
-`MultivariateNormalDiagPlusVDVTTensor` is a `StochasticTensor` backed by the distribution `MultivariateNormalDiagPlusVDVT`.
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.MultivariateNormalDiagPlusVDVTTensor.__init__(name=None, dist_value_type=None, loss_fn=score_function, **dist_args)` {#MultivariateNormalDiagPlusVDVTTensor.__init__}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.MultivariateNormalDiagPlusVDVTTensor.clone(name=None, **dist_args)` {#MultivariateNormalDiagPlusVDVTTensor.clone}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.MultivariateNormalDiagPlusVDVTTensor.distribution` {#MultivariateNormalDiagPlusVDVTTensor.distribution}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.MultivariateNormalDiagPlusVDVTTensor.dtype` {#MultivariateNormalDiagPlusVDVTTensor.dtype}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.MultivariateNormalDiagPlusVDVTTensor.entropy(name='entropy')` {#MultivariateNormalDiagPlusVDVTTensor.entropy}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.MultivariateNormalDiagPlusVDVTTensor.graph` {#MultivariateNormalDiagPlusVDVTTensor.graph}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.MultivariateNormalDiagPlusVDVTTensor.input_dict` {#MultivariateNormalDiagPlusVDVTTensor.input_dict}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.MultivariateNormalDiagPlusVDVTTensor.loss(final_loss, name='Loss')` {#MultivariateNormalDiagPlusVDVTTensor.loss}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.MultivariateNormalDiagPlusVDVTTensor.mean(name='mean')` {#MultivariateNormalDiagPlusVDVTTensor.mean}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.MultivariateNormalDiagPlusVDVTTensor.name` {#MultivariateNormalDiagPlusVDVTTensor.name}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.MultivariateNormalDiagPlusVDVTTensor.value(name='value')` {#MultivariateNormalDiagPlusVDVTTensor.value}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.MultivariateNormalDiagPlusVDVTTensor.value_type` {#MultivariateNormalDiagPlusVDVTTensor.value_type}
-
-
-
-
diff --git a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard0/tf.contrib.bayesflow.stochastic_tensor.ObservedStochasticTensor.md b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard0/tf.contrib.bayesflow.stochastic_tensor.ObservedStochasticTensor.md
index dc1975bf123..da7082ffb65 100644
--- a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard0/tf.contrib.bayesflow.stochastic_tensor.ObservedStochasticTensor.md
+++ b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard0/tf.contrib.bayesflow.stochastic_tensor.ObservedStochasticTensor.md
@@ -1,38 +1,29 @@
 A StochasticTensor with an observed value.
 - - -
 
-#### `tf.contrib.bayesflow.stochastic_tensor.ObservedStochasticTensor.__init__(dist_cls, value, name=None, **dist_args)` {#ObservedStochasticTensor.__init__}
+#### `tf.contrib.bayesflow.stochastic_tensor.ObservedStochasticTensor.__init__(dist, value, name=None)` {#ObservedStochasticTensor.__init__}
 
 Construct an `ObservedStochasticTensor`.
 
-`ObservedStochasticTensor` will instantiate a distribution from `dist_cls`
-and `dist_args` but use the provided value instead of sampling from the
-distribution. The provided value argument must be appropriately shaped
-to have come from the constructed distribution.
+`ObservedStochasticTensor` is backed by distribution `dist` and uses the
+provided value instead of using the current value type to draw a value from
+the distribution. The provided value argument must be appropriately shaped
+to have come from the distribution.
 
 ##### Args:
 
 
-*  <b>`dist_cls`</b>: a `Distribution` class.
+*  <b>`dist`</b>: an instance of `Distribution`.
 *  <b>`value`</b>: a Tensor containing the observed value
 *  <b>`name`</b>: a name for this `ObservedStochasticTensor` and its ops.
-*  <b>`**dist_args`</b>: keyword arguments to be passed through to `dist_cls` on
-      construction.
 
 ##### Raises:
 
 
-*  <b>`TypeError`</b>: if `dist_cls` is not a `Distribution`.
+*  <b>`TypeError`</b>: if `dist` is not an instance of `Distribution`.
 *  <b>`ValueError`</b>: if `value` is not compatible with the distribution.
 
 
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.ObservedStochasticTensor.clone(name=None, **dist_args)` {#ObservedStochasticTensor.clone}
-
-
-
-
 - - -
 
 #### `tf.contrib.bayesflow.stochastic_tensor.ObservedStochasticTensor.distribution` {#ObservedStochasticTensor.distribution}
@@ -61,13 +52,6 @@ to have come from the constructed distribution.
 
 
 
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.ObservedStochasticTensor.input_dict` {#ObservedStochasticTensor.input_dict}
-
-
-
-
 - - -
 
 #### `tf.contrib.bayesflow.stochastic_tensor.ObservedStochasticTensor.loss(final_loss, name=None)` {#ObservedStochasticTensor.loss}
diff --git a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard0/tf.contrib.bayesflow.stochastic_tensor.SampleAndReshapeValue.md b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard0/tf.contrib.bayesflow.stochastic_tensor.SampleAndReshapeValue.md
index 6d12b859ffe..6b564c36b9f 100644
--- a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard0/tf.contrib.bayesflow.stochastic_tensor.SampleAndReshapeValue.md
+++ b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard0/tf.contrib.bayesflow.stochastic_tensor.SampleAndReshapeValue.md
@@ -23,8 +23,8 @@ with sg.value_type(sg.SampleAndReshapeValue(n=2)):
 st_value = st.value()
 assertEqual(st_value.get_shape(), (4, 3))
 
-dt_value_val = sess.run([st_value])[0]  # or e.g. run([tf.identity(st)])[0]
-assertEqual(dt_value_val.shape, (4, 3))
+st_value_val = sess.run([st_value])[0]  # or e.g. run([tf.identity(st)])[0]
+assertEqual(st_value_val.shape, (4, 3))
 ```
 - - -
 
diff --git a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard0/tf.contrib.bayesflow.stochastic_tensor.TransformedDistributionTensor.md b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard0/tf.contrib.bayesflow.stochastic_tensor.TransformedDistributionTensor.md
deleted file mode 100644
index 52493288601..00000000000
--- a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard0/tf.contrib.bayesflow.stochastic_tensor.TransformedDistributionTensor.md
+++ /dev/null
@@ -1,85 +0,0 @@
-`TransformedDistributionTensor` is a `StochasticTensor` backed by the distribution `TransformedDistribution`.
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.TransformedDistributionTensor.__init__(name=None, dist_value_type=None, loss_fn=score_function, **dist_args)` {#TransformedDistributionTensor.__init__}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.TransformedDistributionTensor.clone(name=None, **dist_args)` {#TransformedDistributionTensor.clone}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.TransformedDistributionTensor.distribution` {#TransformedDistributionTensor.distribution}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.TransformedDistributionTensor.dtype` {#TransformedDistributionTensor.dtype}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.TransformedDistributionTensor.entropy(name='entropy')` {#TransformedDistributionTensor.entropy}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.TransformedDistributionTensor.graph` {#TransformedDistributionTensor.graph}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.TransformedDistributionTensor.input_dict` {#TransformedDistributionTensor.input_dict}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.TransformedDistributionTensor.loss(final_loss, name='Loss')` {#TransformedDistributionTensor.loss}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.TransformedDistributionTensor.mean(name='mean')` {#TransformedDistributionTensor.mean}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.TransformedDistributionTensor.name` {#TransformedDistributionTensor.name}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.TransformedDistributionTensor.value(name='value')` {#TransformedDistributionTensor.value}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.TransformedDistributionTensor.value_type` {#TransformedDistributionTensor.value_type}
-
-
-
-
diff --git a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard0/tf.contrib.learn.LinearRegressor.md b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard0/tf.contrib.learn.LinearRegressor.md
index bdeab9de13c..2352b13897a 100644
--- a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard0/tf.contrib.learn.LinearRegressor.md
+++ b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard0/tf.contrib.learn.LinearRegressor.md
@@ -6,16 +6,13 @@ feature values.
 Example:
 
 ```python
-education = sparse_column_with_hash_bucket(column_name="education",
-                                           hash_bucket_size=1000)
-occupation = sparse_column_with_hash_bucket(column_name="occupation",
-                                            hash_bucket_size=1000)
+sparse_column_a = sparse_column_with_hash_bucket(...)
+sparse_column_b = sparse_column_with_hash_bucket(...)
 
-education_x_occupation = crossed_column(columns=[education, occupation],
-                                        hash_bucket_size=10000)
+sparse_feature_a_x_sparse_feature_b = crossed_column(...)
 
 estimator = LinearRegressor(
-    feature_columns=[occupation, education_x_occupation])
+    feature_columns=[sparse_column_a, sparse_feature_a_x_sparse_feature_b])
 
 # Input builders
 def input_fn_train: # returns x, y
diff --git a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard0/tf.fake_quant_with_min_max_vars_gradient.md b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard0/tf.fake_quant_with_min_max_vars_gradient.md
new file mode 100644
index 00000000000..b363afe7cef
--- /dev/null
+++ b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard0/tf.fake_quant_with_min_max_vars_gradient.md
@@ -0,0 +1,27 @@
+### `tf.fake_quant_with_min_max_vars_gradient(gradients, inputs, min, max, name=None)` {#fake_quant_with_min_max_vars_gradient}
+
+Compute gradients for a FakeQuantWithMinMaxVars operation.
+
+##### Args:
+
+
+*  <b>`gradients`</b>: A `Tensor` of type `float32`.
+    Backpropagated gradients above the FakeQuantWithMinMaxVars operation.
+*  <b>`inputs`</b>: A `Tensor` of type `float32`.
+    Values passed as inputs to the FakeQuantWithMinMaxVars operation.
+    min, max: Quantization interval, scalar floats.
+*  <b>`min`</b>: A `Tensor` of type `float32`.
+*  <b>`max`</b>: A `Tensor` of type `float32`.
+*  <b>`name`</b>: A name for the operation (optional).
+
+##### Returns:
+
+  A tuple of `Tensor` objects (backprops_wrt_input, backprop_wrt_min, backprop_wrt_max).
+
+*  <b>`backprops_wrt_input`</b>: A `Tensor` of type `float32`. Backpropagated gradients w.r.t. inputs:
+    `gradients * (inputs >= min && inputs <= max)`.
+*  <b>`backprop_wrt_min`</b>: A `Tensor` of type `float32`. Backpropagated gradients w.r.t. min parameter:
+    `sum(gradients * (inputs < min))`.
+*  <b>`backprop_wrt_max`</b>: A `Tensor` of type `float32`. Backpropagated gradients w.r.t. max parameter:
+    `sum(gradients * (inputs > max))`.
+
diff --git a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard0/tf.fake_quant_with_min_max_vars_per_channel_gradient.md b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard0/tf.fake_quant_with_min_max_vars_per_channel_gradient.md
new file mode 100644
index 00000000000..a7a62e29b31
--- /dev/null
+++ b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard0/tf.fake_quant_with_min_max_vars_per_channel_gradient.md
@@ -0,0 +1,30 @@
+### `tf.fake_quant_with_min_max_vars_per_channel_gradient(gradients, inputs, min, max, name=None)` {#fake_quant_with_min_max_vars_per_channel_gradient}
+
+Compute gradients for a FakeQuantWithMinMaxVarsPerChannel operation.
+
+##### Args:
+
+
+*  <b>`gradients`</b>: A `Tensor` of type `float32`.
+    Backpropagated gradients above the FakeQuantWithMinMaxVars operation,
+    shape one of: `[d]`, `[b, d]`,  `[b, h, w, d]`.
+*  <b>`inputs`</b>: A `Tensor` of type `float32`.
+    Values passed as inputs to the FakeQuantWithMinMaxVars operation, shape
+      same as `gradients`.
+    min, max: Quantization interval, floats of shape `[d]`.
+*  <b>`min`</b>: A `Tensor` of type `float32`.
+*  <b>`max`</b>: A `Tensor` of type `float32`.
+*  <b>`name`</b>: A name for the operation (optional).
+
+##### Returns:
+
+  A tuple of `Tensor` objects (backprops_wrt_input, backprop_wrt_min, backprop_wrt_max).
+
+*  <b>`backprops_wrt_input`</b>: A `Tensor` of type `float32`. Backpropagated gradients w.r.t. inputs, shape same as
+    `inputs`:
+      `gradients * (inputs >= min && inputs <= max)`.
+*  <b>`backprop_wrt_min`</b>: A `Tensor` of type `float32`. Backpropagated gradients w.r.t. min parameter, shape `[d]`:
+    `sum_per_d(gradients * (inputs < min))`.
+*  <b>`backprop_wrt_max`</b>: A `Tensor` of type `float32`. Backpropagated gradients w.r.t. max parameter, shape `[d]`:
+    `sum_per_d(gradients * (inputs > max))`.
+
diff --git a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard1/tf.all_variables.md b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard1/tf.all_variables.md
index 904b99f321a..a64640478ff 100644
--- a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard1/tf.all_variables.md
+++ b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard1/tf.all_variables.md
@@ -1,12 +1,8 @@
-### `tf.all_variables()` {#all_variables}
+### `tf.all_variables(*args, **kwargs)` {#all_variables}
 
-Returns all variables that must be saved/restored.
+See `tf.global_variables`. (deprecated)
 
-The `Variable()` constructor automatically adds new variables to the graph
-collection `GraphKeys.VARIABLES`. This convenience function returns the
-contents of that collection.
-
-##### Returns:
-
-  A list of `Variable` objects.
+THIS FUNCTION IS DEPRECATED. It will be removed after 2016-03-02.
+Instructions for updating:
+Please use tf.global_variables instead.
 
diff --git a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard1/tf.contrib.bayesflow.stochastic_tensor.BaseStochasticTensor.md b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard1/tf.contrib.bayesflow.stochastic_tensor.BaseStochasticTensor.md
index 8e1c5a98864..0bee637f4d2 100644
--- a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard1/tf.contrib.bayesflow.stochastic_tensor.BaseStochasticTensor.md
+++ b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard1/tf.contrib.bayesflow.stochastic_tensor.BaseStochasticTensor.md
@@ -20,13 +20,6 @@ Base Class for Tensor-like objects that emit stochastic values.
 
 
 
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.BaseStochasticTensor.input_dict` {#BaseStochasticTensor.input_dict}
-
-
-
-
 - - -
 
 #### `tf.contrib.bayesflow.stochastic_tensor.BaseStochasticTensor.loss(sample_loss)` {#BaseStochasticTensor.loss}
diff --git a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard1/tf.contrib.bayesflow.stochastic_tensor.BetaTensor.md b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard1/tf.contrib.bayesflow.stochastic_tensor.BetaTensor.md
deleted file mode 100644
index 12f015573d2..00000000000
--- a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard1/tf.contrib.bayesflow.stochastic_tensor.BetaTensor.md
+++ /dev/null
@@ -1,85 +0,0 @@
-`BetaTensor` is a `StochasticTensor` backed by the distribution `Beta`.
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.BetaTensor.__init__(name=None, dist_value_type=None, loss_fn=score_function, **dist_args)` {#BetaTensor.__init__}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.BetaTensor.clone(name=None, **dist_args)` {#BetaTensor.clone}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.BetaTensor.distribution` {#BetaTensor.distribution}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.BetaTensor.dtype` {#BetaTensor.dtype}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.BetaTensor.entropy(name='entropy')` {#BetaTensor.entropy}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.BetaTensor.graph` {#BetaTensor.graph}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.BetaTensor.input_dict` {#BetaTensor.input_dict}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.BetaTensor.loss(final_loss, name='Loss')` {#BetaTensor.loss}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.BetaTensor.mean(name='mean')` {#BetaTensor.mean}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.BetaTensor.name` {#BetaTensor.name}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.BetaTensor.value(name='value')` {#BetaTensor.value}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.BetaTensor.value_type` {#BetaTensor.value_type}
-
-
-
-
diff --git a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard1/tf.contrib.bayesflow.stochastic_tensor.LaplaceWithSoftplusScaleTensor.md b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard1/tf.contrib.bayesflow.stochastic_tensor.LaplaceWithSoftplusScaleTensor.md
deleted file mode 100644
index 3354de85c9c..00000000000
--- a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard1/tf.contrib.bayesflow.stochastic_tensor.LaplaceWithSoftplusScaleTensor.md
+++ /dev/null
@@ -1,85 +0,0 @@
-`LaplaceWithSoftplusScaleTensor` is a `StochasticTensor` backed by the distribution `LaplaceWithSoftplusScale`.
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.LaplaceWithSoftplusScaleTensor.__init__(name=None, dist_value_type=None, loss_fn=score_function, **dist_args)` {#LaplaceWithSoftplusScaleTensor.__init__}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.LaplaceWithSoftplusScaleTensor.clone(name=None, **dist_args)` {#LaplaceWithSoftplusScaleTensor.clone}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.LaplaceWithSoftplusScaleTensor.distribution` {#LaplaceWithSoftplusScaleTensor.distribution}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.LaplaceWithSoftplusScaleTensor.dtype` {#LaplaceWithSoftplusScaleTensor.dtype}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.LaplaceWithSoftplusScaleTensor.entropy(name='entropy')` {#LaplaceWithSoftplusScaleTensor.entropy}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.LaplaceWithSoftplusScaleTensor.graph` {#LaplaceWithSoftplusScaleTensor.graph}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.LaplaceWithSoftplusScaleTensor.input_dict` {#LaplaceWithSoftplusScaleTensor.input_dict}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.LaplaceWithSoftplusScaleTensor.loss(final_loss, name='Loss')` {#LaplaceWithSoftplusScaleTensor.loss}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.LaplaceWithSoftplusScaleTensor.mean(name='mean')` {#LaplaceWithSoftplusScaleTensor.mean}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.LaplaceWithSoftplusScaleTensor.name` {#LaplaceWithSoftplusScaleTensor.name}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.LaplaceWithSoftplusScaleTensor.value(name='value')` {#LaplaceWithSoftplusScaleTensor.value}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.LaplaceWithSoftplusScaleTensor.value_type` {#LaplaceWithSoftplusScaleTensor.value_type}
-
-
-
-
diff --git a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard1/tf.contrib.integrate.odeint.md b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard1/tf.contrib.integrate.odeint.md
new file mode 100644
index 00000000000..25b2709be88
--- /dev/null
+++ b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard1/tf.contrib.integrate.odeint.md
@@ -0,0 +1,90 @@
+### `tf.contrib.integrate.odeint(func, y0, t, rtol=1e-06, atol=1e-12, method=None, options=None, full_output=False, name=None)` {#odeint}
+
+Integrate a system of ordinary differential equations.
+
+Solves the initial value problem for a non-stiff system of first order ode-s:
+
+  ```
+  dy/dt = func(y, t), y(t[0]) = y0
+  ```
+
+where y is a Tensor of any shape.
+
+For example:
+
+  ```
+  # solve `dy/dt = -y`, corresponding to exponential decay
+  tf.contrib.integrate.odeint(lambda y, _: -y, 1.0, [0, 1, 2])
+  => [1, exp(-1), exp(-2)]
+  ```
+
+Output dtypes and numerical precision are based on the dtypes of the inputs
+`y0` and `t`.
+
+Currently, implements 5th order Runge-Kutta with adaptive step size control
+and dense output, using the Dormand-Prince method. Similar to the 'dopri5'
+method of `scipy.integrate.ode` and MATLAB's `ode45`.
+
+Based on: Shampine, Lawrence F. (1986), "Some Practical Runge-Kutta Formulas",
+Mathematics of Computation, American Mathematical Society, 46 (173): 135-150,
+doi:10.2307/2008219
+
+##### Args:
+
+
+*  <b>`func`</b>: Function that maps a Tensor holding the state `y` and a scalar Tensor
+    `t` into a Tensor of state derivatives with respect to time.
+*  <b>`y0`</b>: N-D Tensor giving starting value of `y` at time point `t[0]`. May
+    have any floating point or complex dtype.
+*  <b>`t`</b>: 1-D Tensor holding a sequence of time points for which to solve for
+    `y`. The initial time point should be the first element of this sequence,
+    and each time must be larger than the previous time. May have any floating
+    point dtype. If not provided as a Tensor, converted to a Tensor with
+    float64 dtype.
+*  <b>`rtol`</b>: optional float64 Tensor specifying an upper bound on relative error,
+    per element of `y`.
+*  <b>`atol`</b>: optional float64 Tensor specifying an upper bound on absolute error,
+    per element of `y`.
+*  <b>`method`</b>: optional string indicating the integration method to use. Currently,
+    the only valid option is `'dopri5'`.
+*  <b>`options`</b>: optional dict of configuring options for the indicated integration
+    method. Can only be provided if a `method` is explicitly set. For
+    `'dopri5'`, valid options include:
+    * first_step: an initial guess for the size of the first integration
+      (current default: 1.0, but may later be changed to use heuristics based
+      on the gradient).
+    * safety: safety factor for adaptive step control, generally a constant
+      in the range 0.8-1 (default: 0.9).
+    * ifactor: maximum factor by which the adaptive step may be increased
+      (default: 10.0).
+    * dfactor: maximum factor by which the adpative step may be decreased
+      (default: 0.2).
+    * max_num_steps: integer maximum number of integrate steps between time
+      points in `t` (default: 1000).
+*  <b>`full_output`</b>: optional boolean. If True, `odeint` returns a tuple
+    `(y, info_dict)` describing the integration process.
+*  <b>`name`</b>: Optional name for this operation.
+
+##### Returns:
+
+
+*  <b>`y`</b>: (N+1)-D tensor, where the first dimension corresponds to different
+    time points. Contains the solved value of y for each desired time point in
+    `t`, with the initial value `y0` being the first element along the first
+    dimension.
+*  <b>`info_dict`</b>: only if `full_output == True`. A dict with the following values:
+    * num_func_evals: integer Tensor counting the number of function
+      evaluations.
+    * integrate_points: 1D float64 Tensor with the upper bound of each
+      integration time step.
+    * error_ratio: 1D float Tensor with the estimated ratio of the integration
+      error to the error tolerance at each integration step. An ratio greater
+      than 1 corresponds to rejected steps.
+
+##### Raises:
+
+
+*  <b>`ValueError`</b>: if an invalid `method` is provided.
+*  <b>`TypeError`</b>: if `options` is supplied without `method`, or if `t` or `y0` has
+    an invalid dtype.
+
diff --git a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard1/tf.contrib.learn.LinearClassifier.md b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard1/tf.contrib.learn.LinearClassifier.md
index 9327ccc2c1f..3f6584c1f82 100644
--- a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard1/tf.contrib.learn.LinearClassifier.md
+++ b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard1/tf.contrib.learn.LinearClassifier.md
@@ -6,21 +6,18 @@ classes. When number of possible classes is 2, this is binary classification.
 Example:
 
 ```python
-education = sparse_column_with_hash_bucket(column_name="education",
-                                           hash_bucket_size=1000)
-occupation = sparse_column_with_hash_bucket(column_name="occupation",
-                                            hash_bucket_size=1000)
+sparse_column_a = sparse_column_with_hash_bucket(...)
+sparse_column_b = sparse_column_with_hash_bucket(...)
 
-education_x_occupation = crossed_column(columns=[education, occupation],
-                                        hash_bucket_size=10000)
+sparse_feature_a_x_sparse_feature_b = crossed_column(...)
 
 # Estimator using the default optimizer.
 estimator = LinearClassifier(
-    feature_columns=[occupation, education_x_occupation])
+    feature_columns=[sparse_column_a, sparse_feature_a_x_sparse_feature_b])
 
 # Or estimator using the FTRL optimizer with regularization.
 estimator = LinearClassifier(
-    feature_columns=[occupation, education_x_occupation],
+    feature_columns=[sparse_column_a, sparse_feature_a_x_sparse_feature_b],
     optimizer=tf.train.FtrlOptimizer(
       learning_rate=0.1,
       l1_regularization_strength=0.001
@@ -28,7 +25,7 @@ estimator = LinearClassifier(
 
 # Or estimator using the SDCAOptimizer.
 estimator = LinearClassifier(
-   feature_columns=[occupation, education_x_occupation],
+   feature_columns=[sparse_column_a, sparse_feature_a_x_sparse_feature_b],
    optimizer=tf.contrib.linear_optimizer.SDCAOptimizer(
      example_id_column='example_id',
      num_loss_partitions=...,
diff --git a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard1/tf.fake_quant_with_min_max_args_gradient.md b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard1/tf.fake_quant_with_min_max_args_gradient.md
new file mode 100644
index 00000000000..5c93c3e0468
--- /dev/null
+++ b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard1/tf.fake_quant_with_min_max_args_gradient.md
@@ -0,0 +1,21 @@
+### `tf.fake_quant_with_min_max_args_gradient(gradients, inputs, min=None, max=None, name=None)` {#fake_quant_with_min_max_args_gradient}
+
+Compute gradients for a FakeQuantWithMinMaxArgs operation.
+
+##### Args:
+
+
+*  <b>`gradients`</b>: A `Tensor` of type `float32`.
+    Backpropagated gradients above the FakeQuantWithMinMaxArgs operation.
+*  <b>`inputs`</b>: A `Tensor` of type `float32`.
+    Values passed as inputs to the FakeQuantWithMinMaxArgs operation.
+*  <b>`min`</b>: An optional `float`. Defaults to `-6`.
+*  <b>`max`</b>: An optional `float`. Defaults to `6`.
+*  <b>`name`</b>: A name for the operation (optional).
+
+##### Returns:
+
+  A `Tensor` of type `float32`.
+  Backpropagated gradients below the FakeQuantWithMinMaxArgs operation:
+  `gradients * (inputs >= min && inputs <= max)`.
+
diff --git a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard2/tf.contrib.bayesflow.stochastic_tensor.MultivariateNormalDiagTensor.md b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard2/tf.contrib.bayesflow.stochastic_tensor.MultivariateNormalDiagTensor.md
deleted file mode 100644
index 9628ddbb8bb..00000000000
--- a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard2/tf.contrib.bayesflow.stochastic_tensor.MultivariateNormalDiagTensor.md
+++ /dev/null
@@ -1,85 +0,0 @@
-`MultivariateNormalDiagTensor` is a `StochasticTensor` backed by the distribution `MultivariateNormalDiag`.
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.MultivariateNormalDiagTensor.__init__(name=None, dist_value_type=None, loss_fn=score_function, **dist_args)` {#MultivariateNormalDiagTensor.__init__}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.MultivariateNormalDiagTensor.clone(name=None, **dist_args)` {#MultivariateNormalDiagTensor.clone}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.MultivariateNormalDiagTensor.distribution` {#MultivariateNormalDiagTensor.distribution}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.MultivariateNormalDiagTensor.dtype` {#MultivariateNormalDiagTensor.dtype}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.MultivariateNormalDiagTensor.entropy(name='entropy')` {#MultivariateNormalDiagTensor.entropy}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.MultivariateNormalDiagTensor.graph` {#MultivariateNormalDiagTensor.graph}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.MultivariateNormalDiagTensor.input_dict` {#MultivariateNormalDiagTensor.input_dict}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.MultivariateNormalDiagTensor.loss(final_loss, name='Loss')` {#MultivariateNormalDiagTensor.loss}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.MultivariateNormalDiagTensor.mean(name='mean')` {#MultivariateNormalDiagTensor.mean}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.MultivariateNormalDiagTensor.name` {#MultivariateNormalDiagTensor.name}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.MultivariateNormalDiagTensor.value(name='value')` {#MultivariateNormalDiagTensor.value}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.MultivariateNormalDiagTensor.value_type` {#MultivariateNormalDiagTensor.value_type}
-
-
-
-
diff --git a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard2/tf.contrib.bayesflow.stochastic_tensor.MultivariateNormalDiagWithSoftplusStDevTensor.md b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard2/tf.contrib.bayesflow.stochastic_tensor.MultivariateNormalDiagWithSoftplusStDevTensor.md
deleted file mode 100644
index 9e831a2b508..00000000000
--- a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard2/tf.contrib.bayesflow.stochastic_tensor.MultivariateNormalDiagWithSoftplusStDevTensor.md
+++ /dev/null
@@ -1,85 +0,0 @@
-`MultivariateNormalDiagWithSoftplusStDevTensor` is a `StochasticTensor` backed by the distribution `MultivariateNormalDiagWithSoftplusStDev`.
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.MultivariateNormalDiagWithSoftplusStDevTensor.__init__(name=None, dist_value_type=None, loss_fn=score_function, **dist_args)` {#MultivariateNormalDiagWithSoftplusStDevTensor.__init__}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.MultivariateNormalDiagWithSoftplusStDevTensor.clone(name=None, **dist_args)` {#MultivariateNormalDiagWithSoftplusStDevTensor.clone}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.MultivariateNormalDiagWithSoftplusStDevTensor.distribution` {#MultivariateNormalDiagWithSoftplusStDevTensor.distribution}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.MultivariateNormalDiagWithSoftplusStDevTensor.dtype` {#MultivariateNormalDiagWithSoftplusStDevTensor.dtype}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.MultivariateNormalDiagWithSoftplusStDevTensor.entropy(name='entropy')` {#MultivariateNormalDiagWithSoftplusStDevTensor.entropy}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.MultivariateNormalDiagWithSoftplusStDevTensor.graph` {#MultivariateNormalDiagWithSoftplusStDevTensor.graph}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.MultivariateNormalDiagWithSoftplusStDevTensor.input_dict` {#MultivariateNormalDiagWithSoftplusStDevTensor.input_dict}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.MultivariateNormalDiagWithSoftplusStDevTensor.loss(final_loss, name='Loss')` {#MultivariateNormalDiagWithSoftplusStDevTensor.loss}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.MultivariateNormalDiagWithSoftplusStDevTensor.mean(name='mean')` {#MultivariateNormalDiagWithSoftplusStDevTensor.mean}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.MultivariateNormalDiagWithSoftplusStDevTensor.name` {#MultivariateNormalDiagWithSoftplusStDevTensor.name}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.MultivariateNormalDiagWithSoftplusStDevTensor.value(name='value')` {#MultivariateNormalDiagWithSoftplusStDevTensor.value}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.MultivariateNormalDiagWithSoftplusStDevTensor.value_type` {#MultivariateNormalDiagWithSoftplusStDevTensor.value_type}
-
-
-
-
diff --git a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard2/tf.contrib.bayesflow.stochastic_tensor.PoissonTensor.md b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard2/tf.contrib.bayesflow.stochastic_tensor.PoissonTensor.md
deleted file mode 100644
index fdf5e0f7f6e..00000000000
--- a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard2/tf.contrib.bayesflow.stochastic_tensor.PoissonTensor.md
+++ /dev/null
@@ -1,85 +0,0 @@
-`PoissonTensor` is a `StochasticTensor` backed by the distribution `Poisson`.
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.PoissonTensor.__init__(name=None, dist_value_type=None, loss_fn=score_function, **dist_args)` {#PoissonTensor.__init__}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.PoissonTensor.clone(name=None, **dist_args)` {#PoissonTensor.clone}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.PoissonTensor.distribution` {#PoissonTensor.distribution}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.PoissonTensor.dtype` {#PoissonTensor.dtype}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.PoissonTensor.entropy(name='entropy')` {#PoissonTensor.entropy}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.PoissonTensor.graph` {#PoissonTensor.graph}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.PoissonTensor.input_dict` {#PoissonTensor.input_dict}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.PoissonTensor.loss(final_loss, name='Loss')` {#PoissonTensor.loss}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.PoissonTensor.mean(name='mean')` {#PoissonTensor.mean}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.PoissonTensor.name` {#PoissonTensor.name}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.PoissonTensor.value(name='value')` {#PoissonTensor.value}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.PoissonTensor.value_type` {#PoissonTensor.value_type}
-
-
-
-
diff --git a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard2/tf.contrib.bayesflow.stochastic_tensor.WishartFullTensor.md b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard2/tf.contrib.bayesflow.stochastic_tensor.WishartFullTensor.md
deleted file mode 100644
index 460980870ba..00000000000
--- a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard2/tf.contrib.bayesflow.stochastic_tensor.WishartFullTensor.md
+++ /dev/null
@@ -1,85 +0,0 @@
-`WishartFullTensor` is a `StochasticTensor` backed by the distribution `WishartFull`.
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.WishartFullTensor.__init__(name=None, dist_value_type=None, loss_fn=score_function, **dist_args)` {#WishartFullTensor.__init__}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.WishartFullTensor.clone(name=None, **dist_args)` {#WishartFullTensor.clone}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.WishartFullTensor.distribution` {#WishartFullTensor.distribution}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.WishartFullTensor.dtype` {#WishartFullTensor.dtype}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.WishartFullTensor.entropy(name='entropy')` {#WishartFullTensor.entropy}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.WishartFullTensor.graph` {#WishartFullTensor.graph}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.WishartFullTensor.input_dict` {#WishartFullTensor.input_dict}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.WishartFullTensor.loss(final_loss, name='Loss')` {#WishartFullTensor.loss}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.WishartFullTensor.mean(name='mean')` {#WishartFullTensor.mean}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.WishartFullTensor.name` {#WishartFullTensor.name}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.WishartFullTensor.value(name='value')` {#WishartFullTensor.value}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.WishartFullTensor.value_type` {#WishartFullTensor.value_type}
-
-
-
-
diff --git a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard2/tf.contrib.metrics.streaming_auc.md b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard2/tf.contrib.metrics.streaming_auc.md
index 8912eee3d40..aba3101a9f7 100644
--- a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard2/tf.contrib.metrics.streaming_auc.md
+++ b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard2/tf.contrib.metrics.streaming_auc.md
@@ -14,7 +14,12 @@ This value is ultimately returned as `auc`, an idempotent operation that
 computes the area under a discretized curve of precision versus recall values
 (computed using the aforementioned variables). The `num_thresholds` variable
 controls the degree of discretization with larger numbers of thresholds more
-closely approximating the true AUC.
+closely approximating the true AUC. The quality of the approximation may vary
+dramatically depending on `num_thresholds`.
+
+For best results, `predictions` should be distributed approximately uniformly
+in the range [0, 1] and not peaked around 0 or 1. The quality of the AUC
+approximation may be poor if this is not the case.
 
 For estimation of the metric over a stream of data, the function creates an
 `update_op` operation that updates these variables and returns the `auc`.
diff --git a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard2/tf.global_variables_initializer.md b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard2/tf.global_variables_initializer.md
new file mode 100644
index 00000000000..b1ebdcc3270
--- /dev/null
+++ b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard2/tf.global_variables_initializer.md
@@ -0,0 +1,10 @@
+### `tf.global_variables_initializer()` {#global_variables_initializer}
+
+Returns an Op that initializes global variables.
+
+This is just a shortcut for `variable_initializers(global_variables())`
+
+##### Returns:
+
+  An Op that initializes global variables in the graph.
+
diff --git a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard2/tf.initialize_all_variables.md b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard2/tf.initialize_all_variables.md
index 9a0e5d8261b..ec240fc6088 100644
--- a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard2/tf.initialize_all_variables.md
+++ b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard2/tf.initialize_all_variables.md
@@ -1,10 +1,8 @@
-### `tf.initialize_all_variables()` {#initialize_all_variables}
+### `tf.initialize_all_variables(*args, **kwargs)` {#initialize_all_variables}
 
-Returns an Op that initializes all variables.
+See `tf.global_variables_initializer`. (deprecated)
 
-This is just a shortcut for `initialize_variables(all_variables())`
-
-##### Returns:
-
-  An Op that initializes all variables in the graph.
+THIS FUNCTION IS DEPRECATED. It will be removed after 2017-03-02.
+Instructions for updating:
+Use `tf.global_variables_initializer` instead.
 
diff --git a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard2/tf.initialize_local_variables.md b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard2/tf.initialize_local_variables.md
index 2a56dbb9d69..a6c1395e918 100644
--- a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard2/tf.initialize_local_variables.md
+++ b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard2/tf.initialize_local_variables.md
@@ -1,10 +1,8 @@
-### `tf.initialize_local_variables()` {#initialize_local_variables}
+### `tf.initialize_local_variables(*args, **kwargs)` {#initialize_local_variables}
 
-Returns an Op that initializes all local variables.
+See `tf.local_variables_initializer`. (deprecated)
 
-This is just a shortcut for `initialize_variables(local_variables())`
-
-##### Returns:
-
-  An Op that initializes all local variables in the graph.
+THIS FUNCTION IS DEPRECATED. It will be removed after 2017-03-02.
+Instructions for updating:
+Use `tf.local_variables_initializer` instead.
 
diff --git a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard2/tf.local_variables_initializer.md b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard2/tf.local_variables_initializer.md
new file mode 100644
index 00000000000..3f726bdf7ad
--- /dev/null
+++ b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard2/tf.local_variables_initializer.md
@@ -0,0 +1,10 @@
+### `tf.local_variables_initializer()` {#local_variables_initializer}
+
+Returns an Op that initializes all local variables.
+
+This is just a shortcut for `variable_initializers(local_variables())`
+
+##### Returns:
+
+  An Op that initializes all local variables in the graph.
+
diff --git a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard2/tf.report_uninitialized_variables.md b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard2/tf.report_uninitialized_variables.md
index 59c1394a4aa..e3ecdf7733b 100644
--- a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard2/tf.report_uninitialized_variables.md
+++ b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard2/tf.report_uninitialized_variables.md
@@ -9,7 +9,7 @@ variables if there are any, or an empty array if there are none.
 
 
 *  <b>`var_list`</b>: List of `Variable` objects to check. Defaults to the
-    value of `all_variables() + local_variables()`
+    value of `global_variables() + local_variables()`
 *  <b>`name`</b>: Optional name of the `Operation`.
 
 ##### Returns:
diff --git a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard2/tf.scatter_nd_sub.md b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard2/tf.scatter_nd_sub.md
new file mode 100644
index 00000000000..c64e3793754
--- /dev/null
+++ b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard2/tf.scatter_nd_sub.md
@@ -0,0 +1,52 @@
+### `tf.scatter_nd_sub(ref, indices, updates, use_locking=None, name=None)` {#scatter_nd_sub}
+
+Applies sparse subtraction between `updates` and individual values or slices within a given variable according to `indices`.
+
+`ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.
+
+`indices` must be integer tensor, containing indices into `ref`.
+It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.
+
+The innermost dimension of `indices` (with length `K`) corresponds to
+indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th
+dimension of `ref`.
+
+`updates` is `Tensor` of rank `Q-1+P-K` with shape:
+
+```
+[d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]].
+```
+
+For example, say we want to subtract 4 scattered elements from a rank-1 tensor with 8 elements. In Python, that subtraction would look like this:
+
+    ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8])
+    indices = tf.constant([[4], [3], [1], [7]])
+    updates = tf.constant([9, 10, 11, 12])
+    sub = tf.scatter_nd_sub(ref, indices, updates)
+    with tf.Session() as sess:
+      print sess.run(sub)
+
+The resulting update to ref would look like this:
+
+    [1, -9, 3, -6, -4, 6, 7, -4]
+
+See [tf.scatter_nd](#scatter_nd) for more details about how to make updates to slices.
+
+##### Args:
+
+
+*  <b>`ref`</b>: A mutable `Tensor`. Must be one of the following types: `float32`, `float64`, `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`, `complex128`, `qint8`, `quint8`, `qint32`, `half`.
+    A mutable Tensor. Should be from a Variable node.
+*  <b>`indices`</b>: A `Tensor`. Must be one of the following types: `int32`, `int64`.
+    A Tensor. Must be one of the following types: int32, int64. A tensor of indices into ref.
+*  <b>`updates`</b>: A `Tensor`. Must have the same type as `ref`.
+    A Tensor. Must have the same type as ref. A tensor of updated values to subtract from ref.
+*  <b>`use_locking`</b>: An optional `bool`. Defaults to `False`.
+    An optional bool. Defaults to True. If True, the assignment will be protected by a lock; otherwise the behavior is undefined, but may exhibit less contention.
+*  <b>`name`</b>: A name for the operation (optional).
+
+##### Returns:
+
+  A mutable `Tensor`. Has the same type as `ref`.
+  Same as ref. Returned as a convenience for operations that want to use the updated values after the update is done.
+
diff --git a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard2/tf.variables_initializer.md b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard2/tf.variables_initializer.md
new file mode 100644
index 00000000000..ec779e79f66
--- /dev/null
+++ b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard2/tf.variables_initializer.md
@@ -0,0 +1,24 @@
+### `tf.variables_initializer(var_list, name='init')` {#variables_initializer}
+
+Returns an Op that initializes a list of variables.
+
+After you launch the graph in a session, you can run the returned Op to
+initialize all the variables in `var_list`. This Op runs all the
+initializers of the variables in `var_list` in parallel.
+
+Calling `initialize_variables()` is equivalent to passing the list of
+initializers to `Group()`.
+
+If `var_list` is empty, however, the function still returns an Op that can
+be run. That Op just has no effect.
+
+##### Args:
+
+
+*  <b>`var_list`</b>: List of `Variable` objects to initialize.
+*  <b>`name`</b>: Optional name for the returned operation.
+
+##### Returns:
+
+  An Op that run the initializers of all the specified variables.
+
diff --git a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard3/tf.contrib.bayesflow.stochastic_tensor.MixtureTensor.md b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard3/tf.contrib.bayesflow.stochastic_tensor.MixtureTensor.md
deleted file mode 100644
index 3280f5a9448..00000000000
--- a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard3/tf.contrib.bayesflow.stochastic_tensor.MixtureTensor.md
+++ /dev/null
@@ -1,85 +0,0 @@
-`MixtureTensor` is a `StochasticTensor` backed by the distribution `Mixture`.
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.MixtureTensor.__init__(name=None, dist_value_type=None, loss_fn=score_function, **dist_args)` {#MixtureTensor.__init__}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.MixtureTensor.clone(name=None, **dist_args)` {#MixtureTensor.clone}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.MixtureTensor.distribution` {#MixtureTensor.distribution}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.MixtureTensor.dtype` {#MixtureTensor.dtype}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.MixtureTensor.entropy(name='entropy')` {#MixtureTensor.entropy}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.MixtureTensor.graph` {#MixtureTensor.graph}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.MixtureTensor.input_dict` {#MixtureTensor.input_dict}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.MixtureTensor.loss(final_loss, name='Loss')` {#MixtureTensor.loss}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.MixtureTensor.mean(name='mean')` {#MixtureTensor.mean}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.MixtureTensor.name` {#MixtureTensor.name}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.MixtureTensor.value(name='value')` {#MixtureTensor.value}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.MixtureTensor.value_type` {#MixtureTensor.value_type}
-
-
-
-
diff --git a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard3/tf.contrib.bayesflow.stochastic_tensor.MultivariateNormalCholeskyTensor.md b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard3/tf.contrib.bayesflow.stochastic_tensor.MultivariateNormalCholeskyTensor.md
deleted file mode 100644
index 3f29186182b..00000000000
--- a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard3/tf.contrib.bayesflow.stochastic_tensor.MultivariateNormalCholeskyTensor.md
+++ /dev/null
@@ -1,85 +0,0 @@
-`MultivariateNormalCholeskyTensor` is a `StochasticTensor` backed by the distribution `MultivariateNormalCholesky`.
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.MultivariateNormalCholeskyTensor.__init__(name=None, dist_value_type=None, loss_fn=score_function, **dist_args)` {#MultivariateNormalCholeskyTensor.__init__}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.MultivariateNormalCholeskyTensor.clone(name=None, **dist_args)` {#MultivariateNormalCholeskyTensor.clone}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.MultivariateNormalCholeskyTensor.distribution` {#MultivariateNormalCholeskyTensor.distribution}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.MultivariateNormalCholeskyTensor.dtype` {#MultivariateNormalCholeskyTensor.dtype}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.MultivariateNormalCholeskyTensor.entropy(name='entropy')` {#MultivariateNormalCholeskyTensor.entropy}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.MultivariateNormalCholeskyTensor.graph` {#MultivariateNormalCholeskyTensor.graph}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.MultivariateNormalCholeskyTensor.input_dict` {#MultivariateNormalCholeskyTensor.input_dict}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.MultivariateNormalCholeskyTensor.loss(final_loss, name='Loss')` {#MultivariateNormalCholeskyTensor.loss}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.MultivariateNormalCholeskyTensor.mean(name='mean')` {#MultivariateNormalCholeskyTensor.mean}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.MultivariateNormalCholeskyTensor.name` {#MultivariateNormalCholeskyTensor.name}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.MultivariateNormalCholeskyTensor.value(name='value')` {#MultivariateNormalCholeskyTensor.value}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.MultivariateNormalCholeskyTensor.value_type` {#MultivariateNormalCholeskyTensor.value_type}
-
-
-
-
diff --git a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard3/tf.contrib.bayesflow.stochastic_tensor.NormalWithSoftplusSigmaTensor.md b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard3/tf.contrib.bayesflow.stochastic_tensor.NormalWithSoftplusSigmaTensor.md
deleted file mode 100644
index 60976c962bb..00000000000
--- a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard3/tf.contrib.bayesflow.stochastic_tensor.NormalWithSoftplusSigmaTensor.md
+++ /dev/null
@@ -1,85 +0,0 @@
-`NormalWithSoftplusSigmaTensor` is a `StochasticTensor` backed by the distribution `NormalWithSoftplusSigma`.
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.NormalWithSoftplusSigmaTensor.__init__(name=None, dist_value_type=None, loss_fn=score_function, **dist_args)` {#NormalWithSoftplusSigmaTensor.__init__}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.NormalWithSoftplusSigmaTensor.clone(name=None, **dist_args)` {#NormalWithSoftplusSigmaTensor.clone}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.NormalWithSoftplusSigmaTensor.distribution` {#NormalWithSoftplusSigmaTensor.distribution}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.NormalWithSoftplusSigmaTensor.dtype` {#NormalWithSoftplusSigmaTensor.dtype}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.NormalWithSoftplusSigmaTensor.entropy(name='entropy')` {#NormalWithSoftplusSigmaTensor.entropy}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.NormalWithSoftplusSigmaTensor.graph` {#NormalWithSoftplusSigmaTensor.graph}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.NormalWithSoftplusSigmaTensor.input_dict` {#NormalWithSoftplusSigmaTensor.input_dict}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.NormalWithSoftplusSigmaTensor.loss(final_loss, name='Loss')` {#NormalWithSoftplusSigmaTensor.loss}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.NormalWithSoftplusSigmaTensor.mean(name='mean')` {#NormalWithSoftplusSigmaTensor.mean}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.NormalWithSoftplusSigmaTensor.name` {#NormalWithSoftplusSigmaTensor.name}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.NormalWithSoftplusSigmaTensor.value(name='value')` {#NormalWithSoftplusSigmaTensor.value}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.NormalWithSoftplusSigmaTensor.value_type` {#NormalWithSoftplusSigmaTensor.value_type}
-
-
-
-
diff --git a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard3/tf.fake_quant_with_min_max_vars.md b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard3/tf.fake_quant_with_min_max_vars.md
new file mode 100644
index 00000000000..d7815f04146
--- /dev/null
+++ b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard3/tf.fake_quant_with_min_max_vars.md
@@ -0,0 +1,25 @@
+### `tf.fake_quant_with_min_max_vars(inputs, min, max, name=None)` {#fake_quant_with_min_max_vars}
+
+Fake-quantize the 'inputs' tensor of type float and shape `[b, h, w, d]` via
+
+global float scalars `min` and `max` to 'outputs' tensor of same shape as
+`inputs`.
+
+[min; max] is the clamping range for the 'inputs' data.  Op divides this range
+into 255 steps (total of 256 values), then replaces each 'inputs' value with the
+closest of the quantized step values.
+
+This operation has a gradient and thus allows for training `min` and `max` values.
+
+##### Args:
+
+
+*  <b>`inputs`</b>: A `Tensor` of type `float32`.
+*  <b>`min`</b>: A `Tensor` of type `float32`.
+*  <b>`max`</b>: A `Tensor` of type `float32`.
+*  <b>`name`</b>: A name for the operation (optional).
+
+##### Returns:
+
+  A `Tensor` of type `float32`.
+
diff --git a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard3/tf.fake_quant_with_min_max_vars_per_channel.md b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard3/tf.fake_quant_with_min_max_vars_per_channel.md
new file mode 100644
index 00000000000..bc39cf9570a
--- /dev/null
+++ b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard3/tf.fake_quant_with_min_max_vars_per_channel.md
@@ -0,0 +1,25 @@
+### `tf.fake_quant_with_min_max_vars_per_channel(inputs, min, max, name=None)` {#fake_quant_with_min_max_vars_per_channel}
+
+Fake-quantize the 'inputs' tensor of type float and one of the shapes: `[d]`,
+
+`[b, d]` `[b, h, w, d]` via per-channel floats `min` and `max` of shape `[d]`
+to 'outputs' tensor of same shape as `inputs`.
+
+[min; max] is the clamping range for the 'inputs' data in the corresponding
+depth channel.  Op divides this range into 255 steps (total of 256 values), then
+replaces each 'inputs' value with the closest of the quantized step values.
+
+This operation has a gradient and thus allows for training `min` and `max` values.
+
+##### Args:
+
+
+*  <b>`inputs`</b>: A `Tensor` of type `float32`.
+*  <b>`min`</b>: A `Tensor` of type `float32`.
+*  <b>`max`</b>: A `Tensor` of type `float32`.
+*  <b>`name`</b>: A name for the operation (optional).
+
+##### Returns:
+
+  A `Tensor` of type `float32`.
+
diff --git a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard3/tf.global_variables.md b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard3/tf.global_variables.md
new file mode 100644
index 00000000000..1939f422248
--- /dev/null
+++ b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard3/tf.global_variables.md
@@ -0,0 +1,17 @@
+### `tf.global_variables()` {#global_variables}
+
+Returns global variables.
+
+Global variables are variables that are shared across machines in a
+distributed environment. The `Variable()` constructor or `get_variable()`
+automatically adds new variables to the graph collection
+`GraphKeys.GLOBAL_VARIABLES`.
+This convenience function returns the contents of that collection.
+
+An alternative to global variables are local variables. See
+[`tf.local_variables()`](../../api_docs/python/state_ops.md#local_variables)
+
+##### Returns:
+
+  A list of `Variable` objects.
+
diff --git a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard3/tf.local_variables.md b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard3/tf.local_variables.md
index b3612c7cbf3..26b4d127af5 100644
--- a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard3/tf.local_variables.md
+++ b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard3/tf.local_variables.md
@@ -1,8 +1,19 @@
 ### `tf.local_variables()` {#local_variables}
 
-Returns all variables created with collection=[LOCAL_VARIABLES].
+Returns local variables.
+
+Local variables - per process variables, usually not saved/restored to
+checkpoint and used for temporary or intermediate values.
+For example, they can be used as counters for metrics computation or
+number of epochs this machine has read data.
+The `local_variable()` automatically adds new variable to
+`GraphKeys.LOCAL_VARIABLES`.
+This convenience function returns the contents of that collection.
+
+An alternative to local variables are global variables. See
+[`tf.global_variables()`](../../api_docs/python/state_ops.md#global_variables)
 
 ##### Returns:
 
-  A list of local Variable objects.
+  A list of local `Variable` objects.
 
diff --git a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard3/tf.scatter_nd_add.md b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard3/tf.scatter_nd_add.md
new file mode 100644
index 00000000000..59eb4dcb020
--- /dev/null
+++ b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard3/tf.scatter_nd_add.md
@@ -0,0 +1,52 @@
+### `tf.scatter_nd_add(ref, indices, updates, use_locking=None, name=None)` {#scatter_nd_add}
+
+Applies sparse addition between `updates` and individual values or slices within a given variable according to `indices`.
+
+`ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.
+
+`indices` must be integer tensor, containing indices into `ref`.
+It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.
+
+The innermost dimension of `indices` (with length `K`) corresponds to
+indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th
+dimension of `ref`.
+
+`updates` is `Tensor` of rank `Q-1+P-K` with shape:
+
+```
+[d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]].
+```
+
+For example, say we want to add 4 scattered elements to a rank-1 tensor to 8 elements. In Python, that addition would look like this:
+
+    ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8])
+    indices = tf.constant([[4], [3], [1], [7]])
+    updates = tf.constant([9, 10, 11, 12])
+    add = tf.scatter_nd_add(ref, indices, updates)
+    with tf.Session() as sess:
+      print sess.run(add)
+
+The resulting update to ref would look like this:
+
+    [1, 13, 3, 14, 14, 6, 7, 20]
+
+See [tf.scatter_nd](#scatter_nd) for more details about how to make updates to slices.
+
+##### Args:
+
+
+*  <b>`ref`</b>: A mutable `Tensor`. Must be one of the following types: `float32`, `float64`, `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`, `complex128`, `qint8`, `quint8`, `qint32`, `half`.
+    A mutable Tensor. Should be from a Variable node.
+*  <b>`indices`</b>: A `Tensor`. Must be one of the following types: `int32`, `int64`.
+    A Tensor. Must be one of the following types: int32, int64. A tensor of indices into ref.
+*  <b>`updates`</b>: A `Tensor`. Must have the same type as `ref`.
+    A Tensor. Must have the same type as ref. A tensor of updated values to add to ref.
+*  <b>`use_locking`</b>: An optional `bool`. Defaults to `False`.
+    An optional bool. Defaults to True. If True, the assignment will be protected by a lock; otherwise the behavior is undefined, but may exhibit less contention.
+*  <b>`name`</b>: A name for the operation (optional).
+
+##### Returns:
+
+  A mutable `Tensor`. Has the same type as `ref`.
+  Same as ref. Returned as a convenience for operations that want to use the updated values after the update is done.
+
diff --git a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard3/tf.scatter_nd_div.md b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard3/tf.scatter_nd_div.md
new file mode 100644
index 00000000000..803dcbdb820
--- /dev/null
+++ b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard3/tf.scatter_nd_div.md
@@ -0,0 +1,52 @@
+### `tf.scatter_nd_div(ref, indices, updates, use_locking=None, name=None)` {#scatter_nd_div}
+
+Applies sparse subtraction between `updates` and individual values or slices within a given variable according to `indices`.
+
+`ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.
+
+`indices` must be integer tensor, containing indices into `ref`.
+It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.
+
+The innermost dimension of `indices` (with length `K`) corresponds to
+indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th
+dimension of `ref`.
+
+`updates` is `Tensor` of rank `Q-1+P-K` with shape:
+
+```
+[d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]].
+```
+
+For example, say we want to divide a rank-1 tensor with 8 elements by 4 scattered elements. In Python, that division would look like this:
+
+    ref = tf.Variable([10, 20, 30, 40, 50, 60, 70, 80])
+    indices = tf.constant([[4], [3], [1], [7]])
+    updates = tf.constant([2, 3, 4, 5])
+    sub = tf.scatter_nd_div(ref, indices, updates)
+    with tf.Session() as sess:
+      print sess.run(sub)
+
+The resulting update to ref would look like this:
+
+    [10, 5, 30, 13, 25, 60, 70, 16]
+
+See [tf.scatter_nd](#scatter_nd) for more details about how to make updates to slices.
+
+##### Args:
+
+
+*  <b>`ref`</b>: A mutable `Tensor`. Must be one of the following types: `float32`, `float64`, `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`, `complex128`, `qint8`, `quint8`, `qint32`, `half`.
+    A mutable Tensor. Should be from a Variable node.
+*  <b>`indices`</b>: A `Tensor`. Must be one of the following types: `int32`, `int64`.
+    A Tensor. Must be one of the following types: int32, int64. A tensor of indices into ref.
+*  <b>`updates`</b>: A `Tensor`. Must have the same type as `ref`.
+    A Tensor. Must have the same type as ref. A tensor of updated values to subtract from ref.
+*  <b>`use_locking`</b>: An optional `bool`. Defaults to `False`.
+    An optional bool. Defaults to True. If True, the assignment will be protected by a lock; otherwise the behavior is undefined, but may exhibit less contention.
+*  <b>`name`</b>: A name for the operation (optional).
+
+##### Returns:
+
+  A mutable `Tensor`. Has the same type as `ref`.
+  Same as ref. Returned as a convenience for operations that want to use the updated values after the update is done.
+
diff --git a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard3/tf.svd.md b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard3/tf.svd.md
index a11df39a136..b985bd7e581 100644
--- a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard3/tf.svd.md
+++ b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard3/tf.svd.md
@@ -1,4 +1,4 @@
-### `tf.svd(tensor, compute_uv=True, full_matrices=False, name=None)` {#svd}
+### `tf.svd(tensor, full_matrices=False, compute_uv=True, name=None)` {#svd}
 
 Computes the singular value decompositions of one or more matrices.
 
@@ -20,12 +20,12 @@ s = svd(a, compute_uv=False)
 
 *  <b>`matrix`</b>: `Tensor` of shape `[..., M, N]`. Let `P` be the minimum of `M` and
     `N`.
-*  <b>`compute_uv`</b>: If `True` then left and right singular vectors will be
-    computed and returned in `u` and `v`, respectively. Otherwise, only the
-    singular values will be computed, which can be significantly faster.
 *  <b>`full_matrices`</b>: If true, compute full-sized `u` and `v`. If false
     (the default), compute only the leading `P` singular vectors.
     Ignored if `compute_uv` is `False`.
+*  <b>`compute_uv`</b>: If `True` then left and right singular vectors will be
+    computed and returned in `u` and `v`, respectively. Otherwise, only the
+    singular values will be computed, which can be significantly faster.
 *  <b>`name`</b>: string, optional name of the operation.
 
 ##### Returns:
diff --git a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard4/tf.contrib.bayesflow.stochastic_tensor.BinomialTensor.md b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard4/tf.contrib.bayesflow.stochastic_tensor.BinomialTensor.md
deleted file mode 100644
index df77e8c03a6..00000000000
--- a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard4/tf.contrib.bayesflow.stochastic_tensor.BinomialTensor.md
+++ /dev/null
@@ -1,85 +0,0 @@
-`BinomialTensor` is a `StochasticTensor` backed by the distribution `Binomial`.
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.BinomialTensor.__init__(name=None, dist_value_type=None, loss_fn=score_function, **dist_args)` {#BinomialTensor.__init__}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.BinomialTensor.clone(name=None, **dist_args)` {#BinomialTensor.clone}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.BinomialTensor.distribution` {#BinomialTensor.distribution}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.BinomialTensor.dtype` {#BinomialTensor.dtype}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.BinomialTensor.entropy(name='entropy')` {#BinomialTensor.entropy}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.BinomialTensor.graph` {#BinomialTensor.graph}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.BinomialTensor.input_dict` {#BinomialTensor.input_dict}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.BinomialTensor.loss(final_loss, name='Loss')` {#BinomialTensor.loss}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.BinomialTensor.mean(name='mean')` {#BinomialTensor.mean}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.BinomialTensor.name` {#BinomialTensor.name}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.BinomialTensor.value(name='value')` {#BinomialTensor.value}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.BinomialTensor.value_type` {#BinomialTensor.value_type}
-
-
-
-
diff --git a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard4/tf.contrib.bayesflow.stochastic_tensor.DirichletMultinomialTensor.md b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard4/tf.contrib.bayesflow.stochastic_tensor.DirichletMultinomialTensor.md
deleted file mode 100644
index 233ece8e1fe..00000000000
--- a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard4/tf.contrib.bayesflow.stochastic_tensor.DirichletMultinomialTensor.md
+++ /dev/null
@@ -1,85 +0,0 @@
-`DirichletMultinomialTensor` is a `StochasticTensor` backed by the distribution `DirichletMultinomial`.
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.DirichletMultinomialTensor.__init__(name=None, dist_value_type=None, loss_fn=score_function, **dist_args)` {#DirichletMultinomialTensor.__init__}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.DirichletMultinomialTensor.clone(name=None, **dist_args)` {#DirichletMultinomialTensor.clone}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.DirichletMultinomialTensor.distribution` {#DirichletMultinomialTensor.distribution}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.DirichletMultinomialTensor.dtype` {#DirichletMultinomialTensor.dtype}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.DirichletMultinomialTensor.entropy(name='entropy')` {#DirichletMultinomialTensor.entropy}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.DirichletMultinomialTensor.graph` {#DirichletMultinomialTensor.graph}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.DirichletMultinomialTensor.input_dict` {#DirichletMultinomialTensor.input_dict}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.DirichletMultinomialTensor.loss(final_loss, name='Loss')` {#DirichletMultinomialTensor.loss}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.DirichletMultinomialTensor.mean(name='mean')` {#DirichletMultinomialTensor.mean}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.DirichletMultinomialTensor.name` {#DirichletMultinomialTensor.name}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.DirichletMultinomialTensor.value(name='value')` {#DirichletMultinomialTensor.value}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.DirichletMultinomialTensor.value_type` {#DirichletMultinomialTensor.value_type}
-
-
-
-
diff --git a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard4/tf.contrib.bayesflow.stochastic_tensor.InverseGammaTensor.md b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard4/tf.contrib.bayesflow.stochastic_tensor.InverseGammaTensor.md
deleted file mode 100644
index bbc0f007b4e..00000000000
--- a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard4/tf.contrib.bayesflow.stochastic_tensor.InverseGammaTensor.md
+++ /dev/null
@@ -1,85 +0,0 @@
-`InverseGammaTensor` is a `StochasticTensor` backed by the distribution `InverseGamma`.
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.InverseGammaTensor.__init__(name=None, dist_value_type=None, loss_fn=score_function, **dist_args)` {#InverseGammaTensor.__init__}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.InverseGammaTensor.clone(name=None, **dist_args)` {#InverseGammaTensor.clone}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.InverseGammaTensor.distribution` {#InverseGammaTensor.distribution}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.InverseGammaTensor.dtype` {#InverseGammaTensor.dtype}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.InverseGammaTensor.entropy(name='entropy')` {#InverseGammaTensor.entropy}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.InverseGammaTensor.graph` {#InverseGammaTensor.graph}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.InverseGammaTensor.input_dict` {#InverseGammaTensor.input_dict}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.InverseGammaTensor.loss(final_loss, name='Loss')` {#InverseGammaTensor.loss}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.InverseGammaTensor.mean(name='mean')` {#InverseGammaTensor.mean}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.InverseGammaTensor.name` {#InverseGammaTensor.name}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.InverseGammaTensor.value(name='value')` {#InverseGammaTensor.value}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.InverseGammaTensor.value_type` {#InverseGammaTensor.value_type}
-
-
-
-
diff --git a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard4/tf.contrib.bayesflow.stochastic_tensor.StochasticTensor.md b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard4/tf.contrib.bayesflow.stochastic_tensor.StochasticTensor.md
index d92f209f877..71d1af5add2 100644
--- a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard4/tf.contrib.bayesflow.stochastic_tensor.StochasticTensor.md
+++ b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard4/tf.contrib.bayesflow.stochastic_tensor.StochasticTensor.md
@@ -1,14 +1,14 @@
 StochasticTensor is a BaseStochasticTensor backed by a distribution.
 - - -
 
-#### `tf.contrib.bayesflow.stochastic_tensor.StochasticTensor.__init__(dist_cls, name=None, dist_value_type=None, loss_fn=score_function, **dist_args)` {#StochasticTensor.__init__}
+#### `tf.contrib.bayesflow.stochastic_tensor.StochasticTensor.__init__(dist, name='StochasticTensor', dist_value_type=None, loss_fn=score_function)` {#StochasticTensor.__init__}
 
 Construct a `StochasticTensor`.
 
-`StochasticTensor` will instantiate a distribution from `dist_cls` and
-`dist_args` and its `value` method will return the same value each time
-it is called. What `value` is returned is controlled by the
-`dist_value_type` (defaults to `SampleAndReshapeValue`).
+`StochasticTensor` is backed by the `dist` distribution and its `value`
+method will return the same value each time it is called. What `value` is
+returned is controlled by the `dist_value_type` (defaults to
+`SampleAndReshapeValue`).
 
 Some distributions' sample functions are not differentiable (e.g. a sample
 from a discrete distribution like a Bernoulli) and so to differentiate
@@ -26,34 +26,26 @@ reparameterized distributions; it will also return None if the value type is
 ##### Args:
 
 
-*  <b>`dist_cls`</b>: a `Distribution` class.
+*  <b>`dist`</b>: an instance of `Distribution`.
 *  <b>`name`</b>: a name for this `StochasticTensor` and its ops.
 *  <b>`dist_value_type`</b>: a `_StochasticValueType`, which will determine what the
       `value` of this `StochasticTensor` will be. If not provided, the
       value type set with the `value_type` context manager will be used.
-*  <b>`loss_fn`</b>: callable that takes `(st, st.value(), influenced_loss)`, where
+*  <b>`loss_fn`</b>: callable that takes
+      `(st, st.value(), influenced_loss)`, where
       `st` is this `StochasticTensor`, and returns a `Tensor` loss. By
       default, `loss_fn` is the `score_function`, or more precisely, the
       integral of the score function, such that when the gradient is taken,
       the score function results. See the `stochastic_gradient_estimators`
       module for additional loss functions and baselines.
-*  <b>`**dist_args`</b>: keyword arguments to be passed through to `dist_cls` on
-      construction.
 
 ##### Raises:
 
 
-*  <b>`TypeError`</b>: if `dist_cls` is not a `Distribution`.
+*  <b>`TypeError`</b>: if `dist` is not an instance of `Distribution`.
 *  <b>`TypeError`</b>: if `loss_fn` is not `callable`.
 
 
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.StochasticTensor.clone(name=None, **dist_args)` {#StochasticTensor.clone}
-
-
-
-
 - - -
 
 #### `tf.contrib.bayesflow.stochastic_tensor.StochasticTensor.distribution` {#StochasticTensor.distribution}
@@ -82,13 +74,6 @@ reparameterized distributions; it will also return None if the value type is
 
 
 
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.StochasticTensor.input_dict` {#StochasticTensor.input_dict}
-
-
-
-
 - - -
 
 #### `tf.contrib.bayesflow.stochastic_tensor.StochasticTensor.loss(final_loss, name='Loss')` {#StochasticTensor.loss}
diff --git a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard4/tf.contrib.bayesflow.stochastic_tensor.beta_aaTensor.md b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard4/tf.contrib.bayesflow.stochastic_tensor.beta_aaTensor.md
deleted file mode 100644
index 855391fdcfa..00000000000
--- a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard4/tf.contrib.bayesflow.stochastic_tensor.beta_aaTensor.md
+++ /dev/null
@@ -1,85 +0,0 @@
-`beta_aaTensor` is a `StochasticTensor` backed by the distribution `beta_aa`.
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.beta_aaTensor.__init__(name=None, dist_value_type=None, loss_fn=score_function, **dist_args)` {#beta_aaTensor.__init__}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.beta_aaTensor.clone(name=None, **dist_args)` {#beta_aaTensor.clone}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.beta_aaTensor.distribution` {#beta_aaTensor.distribution}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.beta_aaTensor.dtype` {#beta_aaTensor.dtype}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.beta_aaTensor.entropy(name='entropy')` {#beta_aaTensor.entropy}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.beta_aaTensor.graph` {#beta_aaTensor.graph}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.beta_aaTensor.input_dict` {#beta_aaTensor.input_dict}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.beta_aaTensor.loss(final_loss, name='Loss')` {#beta_aaTensor.loss}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.beta_aaTensor.mean(name='mean')` {#beta_aaTensor.mean}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.beta_aaTensor.name` {#beta_aaTensor.name}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.beta_aaTensor.value(name='value')` {#beta_aaTensor.value}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.beta_aaTensor.value_type` {#beta_aaTensor.value_type}
-
-
-
-
diff --git a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard4/tf.initialize_variables.md b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard4/tf.initialize_variables.md
index 8941ab48535..3ab51c4b3c6 100644
--- a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard4/tf.initialize_variables.md
+++ b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard4/tf.initialize_variables.md
@@ -1,24 +1,8 @@
-### `tf.initialize_variables(var_list, name='init')` {#initialize_variables}
+### `tf.initialize_variables(*args, **kwargs)` {#initialize_variables}
 
-Returns an Op that initializes a list of variables.
+See `tf.variables_initializer`. (deprecated)
 
-After you launch the graph in a session, you can run the returned Op to
-initialize all the variables in `var_list`. This Op runs all the
-initializers of the variables in `var_list` in parallel.
-
-Calling `initialize_variables()` is equivalent to passing the list of
-initializers to `Group()`.
-
-If `var_list` is empty, however, the function still returns an Op that can
-be run. That Op just has no effect.
-
-##### Args:
-
-
-*  <b>`var_list`</b>: List of `Variable` objects to initialize.
-*  <b>`name`</b>: Optional name for the returned operation.
-
-##### Returns:
-
-  An Op that run the initializers of all the specified variables.
+THIS FUNCTION IS DEPRECATED. It will be removed after 2017-03-02.
+Instructions for updating:
+Use `tf.variables_initializer` instead.
 
diff --git a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard5/tf.contrib.bayesflow.stochastic_tensor.BernoulliWithSigmoidPTensor.md b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard5/tf.contrib.bayesflow.stochastic_tensor.BernoulliWithSigmoidPTensor.md
deleted file mode 100644
index b4e5a7b8d88..00000000000
--- a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard5/tf.contrib.bayesflow.stochastic_tensor.BernoulliWithSigmoidPTensor.md
+++ /dev/null
@@ -1,85 +0,0 @@
-`BernoulliWithSigmoidPTensor` is a `StochasticTensor` backed by the distribution `BernoulliWithSigmoidP`.
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.BernoulliWithSigmoidPTensor.__init__(name=None, dist_value_type=None, loss_fn=score_function, **dist_args)` {#BernoulliWithSigmoidPTensor.__init__}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.BernoulliWithSigmoidPTensor.clone(name=None, **dist_args)` {#BernoulliWithSigmoidPTensor.clone}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.BernoulliWithSigmoidPTensor.distribution` {#BernoulliWithSigmoidPTensor.distribution}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.BernoulliWithSigmoidPTensor.dtype` {#BernoulliWithSigmoidPTensor.dtype}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.BernoulliWithSigmoidPTensor.entropy(name='entropy')` {#BernoulliWithSigmoidPTensor.entropy}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.BernoulliWithSigmoidPTensor.graph` {#BernoulliWithSigmoidPTensor.graph}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.BernoulliWithSigmoidPTensor.input_dict` {#BernoulliWithSigmoidPTensor.input_dict}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.BernoulliWithSigmoidPTensor.loss(final_loss, name='Loss')` {#BernoulliWithSigmoidPTensor.loss}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.BernoulliWithSigmoidPTensor.mean(name='mean')` {#BernoulliWithSigmoidPTensor.mean}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.BernoulliWithSigmoidPTensor.name` {#BernoulliWithSigmoidPTensor.name}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.BernoulliWithSigmoidPTensor.value(name='value')` {#BernoulliWithSigmoidPTensor.value}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.BernoulliWithSigmoidPTensor.value_type` {#BernoulliWithSigmoidPTensor.value_type}
-
-
-
-
diff --git a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard5/tf.contrib.bayesflow.stochastic_tensor.Chi2Tensor.md b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard5/tf.contrib.bayesflow.stochastic_tensor.Chi2Tensor.md
deleted file mode 100644
index 0e3bf1e8af4..00000000000
--- a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard5/tf.contrib.bayesflow.stochastic_tensor.Chi2Tensor.md
+++ /dev/null
@@ -1,85 +0,0 @@
-`Chi2Tensor` is a `StochasticTensor` backed by the distribution `Chi2`.
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.Chi2Tensor.__init__(name=None, dist_value_type=None, loss_fn=score_function, **dist_args)` {#Chi2Tensor.__init__}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.Chi2Tensor.clone(name=None, **dist_args)` {#Chi2Tensor.clone}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.Chi2Tensor.distribution` {#Chi2Tensor.distribution}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.Chi2Tensor.dtype` {#Chi2Tensor.dtype}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.Chi2Tensor.entropy(name='entropy')` {#Chi2Tensor.entropy}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.Chi2Tensor.graph` {#Chi2Tensor.graph}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.Chi2Tensor.input_dict` {#Chi2Tensor.input_dict}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.Chi2Tensor.loss(final_loss, name='Loss')` {#Chi2Tensor.loss}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.Chi2Tensor.mean(name='mean')` {#Chi2Tensor.mean}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.Chi2Tensor.name` {#Chi2Tensor.name}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.Chi2Tensor.value(name='value')` {#Chi2Tensor.value}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.Chi2Tensor.value_type` {#Chi2Tensor.value_type}
-
-
-
-
diff --git a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard5/tf.contrib.bayesflow.stochastic_tensor.LaplaceTensor.md b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard5/tf.contrib.bayesflow.stochastic_tensor.LaplaceTensor.md
deleted file mode 100644
index 6647401eca6..00000000000
--- a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard5/tf.contrib.bayesflow.stochastic_tensor.LaplaceTensor.md
+++ /dev/null
@@ -1,85 +0,0 @@
-`LaplaceTensor` is a `StochasticTensor` backed by the distribution `Laplace`.
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.LaplaceTensor.__init__(name=None, dist_value_type=None, loss_fn=score_function, **dist_args)` {#LaplaceTensor.__init__}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.LaplaceTensor.clone(name=None, **dist_args)` {#LaplaceTensor.clone}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.LaplaceTensor.distribution` {#LaplaceTensor.distribution}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.LaplaceTensor.dtype` {#LaplaceTensor.dtype}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.LaplaceTensor.entropy(name='entropy')` {#LaplaceTensor.entropy}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.LaplaceTensor.graph` {#LaplaceTensor.graph}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.LaplaceTensor.input_dict` {#LaplaceTensor.input_dict}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.LaplaceTensor.loss(final_loss, name='Loss')` {#LaplaceTensor.loss}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.LaplaceTensor.mean(name='mean')` {#LaplaceTensor.mean}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.LaplaceTensor.name` {#LaplaceTensor.name}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.LaplaceTensor.value(name='value')` {#LaplaceTensor.value}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.LaplaceTensor.value_type` {#LaplaceTensor.value_type}
-
-
-
-
diff --git a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard5/tf.scatter_nd.md b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard5/tf.scatter_nd.md
new file mode 100644
index 00000000000..c169dab876f
--- /dev/null
+++ b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard5/tf.scatter_nd.md
@@ -0,0 +1,83 @@
+### `tf.scatter_nd(indices, updates, shape, name=None)` {#scatter_nd}
+
+Creates a new tensor by applying sparse `updates` to individual values or slices within a zero tensor of the given `shape` tensor according to indices.
+
+This operator is the inverse of the [tf.gather_nd](#gather_nd) operator which extracts values or slices from a given tensor.
+
+TODO(simister): Add a link to Variable.__getitem__ documentation on slice syntax.
+
+`shape` is a `TensorShape` with rank `P` and `indices` is a `Tensor` of rank `Q`.
+
+`indices` must be integer tensor, containing indices into `shape`.
+It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.
+
+The innermost dimension of `indices` (with length `K`) corresponds to
+indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th
+dimension of `shape`.
+
+`updates` is Tensor of rank `Q-1+P-K` with shape:
+
+```
+[d_0, ..., d_{Q-2}, shape[K], ..., shape[P-1]].
+```
+
+The simplest form of scatter is to insert individual elements in a tensor by index. For example, say we want to insert 4 scattered elements in a rank-1 tensor with 8 elements.
+
+<div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
+<img style="width:100%" src="../../images/ScatterNd1.png" alt>
+</div>
+
+In Python, this scatter operation would look like this:
+
+    indices = tf.constant([[4], [3], [1], [7]])
+    updates = tf.constant([9, 10, 11, 12])
+    shape = tf.constant([8])
+    scatter = tf.scatter_nd(indices, updates, shape)
+    with tf.Session() as sess:
+      print sess.run(scatter)
+
+The resulting tensor would look like this:
+
+    [0, 11, 0, 10, 9, 0, 0, 12]
+
+We can also, insert entire slices of a higher rank tensor all at once. For example, if we wanted to insert two slices in the first dimension of a rank-3 tensor with two matrices of new values.
+
+<div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
+<img style="width:100%" src="../../images/ScatterNd2.png" alt>
+</div>
+
+In Python, this scatter operation would look like this:
+
+    indices = tf.constant([[0], [2]])
+    updates = tf.constant([[[5, 5, 5, 5], [6, 6, 6, 6],
+                            [7, 7, 7, 7], [8, 8, 8, 8]],
+                           [[5, 5, 5, 5], [6, 6, 6, 6],
+                            [7, 7, 7, 7], [8, 8, 8, 8]]])
+    shape = tf.constant([4, 4, 4])
+    scatter = tf.scatter_nd(indices, updates, shape)
+    with tf.Session() as sess:
+      print sess.run(scatter)
+
+The resulting tensor would look like this:
+
+    [[[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]],
+     [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]],
+     [[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]],
+     [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]]
+
+##### Args:
+
+
+*  <b>`indices`</b>: A `Tensor`. Must be one of the following types: `int32`, `int64`.
+    A Tensor. Must be one of the following types: int32, int64. A tensor of indices into ref.
+*  <b>`updates`</b>: A `Tensor`.
+    A Tensor. Must have the same type as tensor. A tensor of updated values to store in ref.
+*  <b>`shape`</b>: A `Tensor`. Must have the same type as `indices`.
+    A vector. The shape of the resulting tensor.
+*  <b>`name`</b>: A name for the operation (optional).
+
+##### Returns:
+
+  A `Tensor`. Has the same type as `updates`.
+  A new tensor with the given shape and updates applied according to the indices.
+
diff --git a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard5/tf.train.Saver.md b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard5/tf.train.Saver.md
index e5be71361a5..cacfed94d2e 100644
--- a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard5/tf.train.Saver.md
+++ b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard5/tf.train.Saver.md
@@ -72,7 +72,7 @@ protocol buffer file in the call to `save()`.
 
 - - -
 
-#### `tf.train.Saver.__init__(var_list=None, reshape=False, sharded=False, max_to_keep=5, keep_checkpoint_every_n_hours=10000.0, name=None, restore_sequentially=False, saver_def=None, builder=None, defer_build=False, allow_empty=False, write_version=1, pad_step_number=False)` {#Saver.__init__}
+#### `tf.train.Saver.__init__(var_list=None, reshape=False, sharded=False, max_to_keep=5, keep_checkpoint_every_n_hours=10000.0, name=None, restore_sequentially=False, saver_def=None, builder=None, defer_build=False, allow_empty=False, write_version=2, pad_step_number=False)` {#Saver.__init__}
 
 Creates a `Saver`.
 
diff --git a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard6/tf.contrib.bayesflow.stochastic_tensor.ExponentialWithSoftplusLamTensor.md b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard6/tf.contrib.bayesflow.stochastic_tensor.ExponentialWithSoftplusLamTensor.md
deleted file mode 100644
index cf1bd14d49a..00000000000
--- a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard6/tf.contrib.bayesflow.stochastic_tensor.ExponentialWithSoftplusLamTensor.md
+++ /dev/null
@@ -1,85 +0,0 @@
-`ExponentialWithSoftplusLamTensor` is a `StochasticTensor` backed by the distribution `ExponentialWithSoftplusLam`.
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.ExponentialWithSoftplusLamTensor.__init__(name=None, dist_value_type=None, loss_fn=score_function, **dist_args)` {#ExponentialWithSoftplusLamTensor.__init__}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.ExponentialWithSoftplusLamTensor.clone(name=None, **dist_args)` {#ExponentialWithSoftplusLamTensor.clone}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.ExponentialWithSoftplusLamTensor.distribution` {#ExponentialWithSoftplusLamTensor.distribution}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.ExponentialWithSoftplusLamTensor.dtype` {#ExponentialWithSoftplusLamTensor.dtype}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.ExponentialWithSoftplusLamTensor.entropy(name='entropy')` {#ExponentialWithSoftplusLamTensor.entropy}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.ExponentialWithSoftplusLamTensor.graph` {#ExponentialWithSoftplusLamTensor.graph}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.ExponentialWithSoftplusLamTensor.input_dict` {#ExponentialWithSoftplusLamTensor.input_dict}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.ExponentialWithSoftplusLamTensor.loss(final_loss, name='Loss')` {#ExponentialWithSoftplusLamTensor.loss}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.ExponentialWithSoftplusLamTensor.mean(name='mean')` {#ExponentialWithSoftplusLamTensor.mean}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.ExponentialWithSoftplusLamTensor.name` {#ExponentialWithSoftplusLamTensor.name}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.ExponentialWithSoftplusLamTensor.value(name='value')` {#ExponentialWithSoftplusLamTensor.value}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.ExponentialWithSoftplusLamTensor.value_type` {#ExponentialWithSoftplusLamTensor.value_type}
-
-
-
-
diff --git a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard6/tf.contrib.bayesflow.stochastic_tensor.GammaTensor.md b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard6/tf.contrib.bayesflow.stochastic_tensor.GammaTensor.md
deleted file mode 100644
index a6a8a6d88ab..00000000000
--- a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard6/tf.contrib.bayesflow.stochastic_tensor.GammaTensor.md
+++ /dev/null
@@ -1,85 +0,0 @@
-`GammaTensor` is a `StochasticTensor` backed by the distribution `Gamma`.
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.GammaTensor.__init__(name=None, dist_value_type=None, loss_fn=score_function, **dist_args)` {#GammaTensor.__init__}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.GammaTensor.clone(name=None, **dist_args)` {#GammaTensor.clone}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.GammaTensor.distribution` {#GammaTensor.distribution}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.GammaTensor.dtype` {#GammaTensor.dtype}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.GammaTensor.entropy(name='entropy')` {#GammaTensor.entropy}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.GammaTensor.graph` {#GammaTensor.graph}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.GammaTensor.input_dict` {#GammaTensor.input_dict}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.GammaTensor.loss(final_loss, name='Loss')` {#GammaTensor.loss}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.GammaTensor.mean(name='mean')` {#GammaTensor.mean}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.GammaTensor.name` {#GammaTensor.name}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.GammaTensor.value(name='value')` {#GammaTensor.value}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.GammaTensor.value_type` {#GammaTensor.value_type}
-
-
-
-
diff --git a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard6/tf.contrib.bayesflow.stochastic_tensor.InverseGammaWithSoftplusAlphaBetaTensor.md b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard6/tf.contrib.bayesflow.stochastic_tensor.InverseGammaWithSoftplusAlphaBetaTensor.md
deleted file mode 100644
index 03f39cd501a..00000000000
--- a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard6/tf.contrib.bayesflow.stochastic_tensor.InverseGammaWithSoftplusAlphaBetaTensor.md
+++ /dev/null
@@ -1,85 +0,0 @@
-`InverseGammaWithSoftplusAlphaBetaTensor` is a `StochasticTensor` backed by the distribution `InverseGammaWithSoftplusAlphaBeta`.
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.InverseGammaWithSoftplusAlphaBetaTensor.__init__(name=None, dist_value_type=None, loss_fn=score_function, **dist_args)` {#InverseGammaWithSoftplusAlphaBetaTensor.__init__}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.InverseGammaWithSoftplusAlphaBetaTensor.clone(name=None, **dist_args)` {#InverseGammaWithSoftplusAlphaBetaTensor.clone}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.InverseGammaWithSoftplusAlphaBetaTensor.distribution` {#InverseGammaWithSoftplusAlphaBetaTensor.distribution}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.InverseGammaWithSoftplusAlphaBetaTensor.dtype` {#InverseGammaWithSoftplusAlphaBetaTensor.dtype}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.InverseGammaWithSoftplusAlphaBetaTensor.entropy(name='entropy')` {#InverseGammaWithSoftplusAlphaBetaTensor.entropy}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.InverseGammaWithSoftplusAlphaBetaTensor.graph` {#InverseGammaWithSoftplusAlphaBetaTensor.graph}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.InverseGammaWithSoftplusAlphaBetaTensor.input_dict` {#InverseGammaWithSoftplusAlphaBetaTensor.input_dict}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.InverseGammaWithSoftplusAlphaBetaTensor.loss(final_loss, name='Loss')` {#InverseGammaWithSoftplusAlphaBetaTensor.loss}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.InverseGammaWithSoftplusAlphaBetaTensor.mean(name='mean')` {#InverseGammaWithSoftplusAlphaBetaTensor.mean}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.InverseGammaWithSoftplusAlphaBetaTensor.name` {#InverseGammaWithSoftplusAlphaBetaTensor.name}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.InverseGammaWithSoftplusAlphaBetaTensor.value(name='value')` {#InverseGammaWithSoftplusAlphaBetaTensor.value}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.InverseGammaWithSoftplusAlphaBetaTensor.value_type` {#InverseGammaWithSoftplusAlphaBetaTensor.value_type}
-
-
-
-
diff --git a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard6/tf.contrib.bayesflow.stochastic_tensor.MultivariateNormalFullTensor.md b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard6/tf.contrib.bayesflow.stochastic_tensor.MultivariateNormalFullTensor.md
deleted file mode 100644
index 7ee10de96a0..00000000000
--- a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard6/tf.contrib.bayesflow.stochastic_tensor.MultivariateNormalFullTensor.md
+++ /dev/null
@@ -1,85 +0,0 @@
-`MultivariateNormalFullTensor` is a `StochasticTensor` backed by the distribution `MultivariateNormalFull`.
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.MultivariateNormalFullTensor.__init__(name=None, dist_value_type=None, loss_fn=score_function, **dist_args)` {#MultivariateNormalFullTensor.__init__}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.MultivariateNormalFullTensor.clone(name=None, **dist_args)` {#MultivariateNormalFullTensor.clone}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.MultivariateNormalFullTensor.distribution` {#MultivariateNormalFullTensor.distribution}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.MultivariateNormalFullTensor.dtype` {#MultivariateNormalFullTensor.dtype}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.MultivariateNormalFullTensor.entropy(name='entropy')` {#MultivariateNormalFullTensor.entropy}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.MultivariateNormalFullTensor.graph` {#MultivariateNormalFullTensor.graph}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.MultivariateNormalFullTensor.input_dict` {#MultivariateNormalFullTensor.input_dict}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.MultivariateNormalFullTensor.loss(final_loss, name='Loss')` {#MultivariateNormalFullTensor.loss}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.MultivariateNormalFullTensor.mean(name='mean')` {#MultivariateNormalFullTensor.mean}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.MultivariateNormalFullTensor.name` {#MultivariateNormalFullTensor.name}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.MultivariateNormalFullTensor.value(name='value')` {#MultivariateNormalFullTensor.value}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.MultivariateNormalFullTensor.value_type` {#MultivariateNormalFullTensor.value_type}
-
-
-
-
diff --git a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard6/tf.contrib.bayesflow.stochastic_tensor.NormalTensor.md b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard6/tf.contrib.bayesflow.stochastic_tensor.NormalTensor.md
deleted file mode 100644
index 7140c36d319..00000000000
--- a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard6/tf.contrib.bayesflow.stochastic_tensor.NormalTensor.md
+++ /dev/null
@@ -1,85 +0,0 @@
-`NormalTensor` is a `StochasticTensor` backed by the distribution `Normal`.
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.NormalTensor.__init__(name=None, dist_value_type=None, loss_fn=score_function, **dist_args)` {#NormalTensor.__init__}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.NormalTensor.clone(name=None, **dist_args)` {#NormalTensor.clone}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.NormalTensor.distribution` {#NormalTensor.distribution}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.NormalTensor.dtype` {#NormalTensor.dtype}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.NormalTensor.entropy(name='entropy')` {#NormalTensor.entropy}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.NormalTensor.graph` {#NormalTensor.graph}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.NormalTensor.input_dict` {#NormalTensor.input_dict}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.NormalTensor.loss(final_loss, name='Loss')` {#NormalTensor.loss}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.NormalTensor.mean(name='mean')` {#NormalTensor.mean}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.NormalTensor.name` {#NormalTensor.name}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.NormalTensor.value(name='value')` {#NormalTensor.value}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.NormalTensor.value_type` {#NormalTensor.value_type}
-
-
-
-
diff --git a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard6/tf.contrib.bayesflow.stochastic_tensor.StudentTTensor.md b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard6/tf.contrib.bayesflow.stochastic_tensor.StudentTTensor.md
deleted file mode 100644
index 30b3c95e02a..00000000000
--- a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard6/tf.contrib.bayesflow.stochastic_tensor.StudentTTensor.md
+++ /dev/null
@@ -1,85 +0,0 @@
-`StudentTTensor` is a `StochasticTensor` backed by the distribution `StudentT`.
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.StudentTTensor.__init__(name=None, dist_value_type=None, loss_fn=score_function, **dist_args)` {#StudentTTensor.__init__}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.StudentTTensor.clone(name=None, **dist_args)` {#StudentTTensor.clone}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.StudentTTensor.distribution` {#StudentTTensor.distribution}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.StudentTTensor.dtype` {#StudentTTensor.dtype}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.StudentTTensor.entropy(name='entropy')` {#StudentTTensor.entropy}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.StudentTTensor.graph` {#StudentTTensor.graph}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.StudentTTensor.input_dict` {#StudentTTensor.input_dict}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.StudentTTensor.loss(final_loss, name='Loss')` {#StudentTTensor.loss}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.StudentTTensor.mean(name='mean')` {#StudentTTensor.mean}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.StudentTTensor.name` {#StudentTTensor.name}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.StudentTTensor.value(name='value')` {#StudentTTensor.value}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.StudentTTensor.value_type` {#StudentTTensor.value_type}
-
-
-
-
diff --git a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard6/tf.contrib.bayesflow.stochastic_tensor.beta_bbTensor.md b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard6/tf.contrib.bayesflow.stochastic_tensor.beta_bbTensor.md
deleted file mode 100644
index 8bdef9250e0..00000000000
--- a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard6/tf.contrib.bayesflow.stochastic_tensor.beta_bbTensor.md
+++ /dev/null
@@ -1,85 +0,0 @@
-`beta_bbTensor` is a `StochasticTensor` backed by the distribution `beta_bb`.
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.beta_bbTensor.__init__(name=None, dist_value_type=None, loss_fn=score_function, **dist_args)` {#beta_bbTensor.__init__}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.beta_bbTensor.clone(name=None, **dist_args)` {#beta_bbTensor.clone}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.beta_bbTensor.distribution` {#beta_bbTensor.distribution}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.beta_bbTensor.dtype` {#beta_bbTensor.dtype}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.beta_bbTensor.entropy(name='entropy')` {#beta_bbTensor.entropy}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.beta_bbTensor.graph` {#beta_bbTensor.graph}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.beta_bbTensor.input_dict` {#beta_bbTensor.input_dict}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.beta_bbTensor.loss(final_loss, name='Loss')` {#beta_bbTensor.loss}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.beta_bbTensor.mean(name='mean')` {#beta_bbTensor.mean}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.beta_bbTensor.name` {#beta_bbTensor.name}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.beta_bbTensor.value(name='value')` {#beta_bbTensor.value}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.beta_bbTensor.value_type` {#beta_bbTensor.value_type}
-
-
-
-
diff --git a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard6/tf.fake_quant_with_min_max_args.md b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard6/tf.fake_quant_with_min_max_args.md
new file mode 100644
index 00000000000..fcad8cb5001
--- /dev/null
+++ b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard6/tf.fake_quant_with_min_max_args.md
@@ -0,0 +1,22 @@
+### `tf.fake_quant_with_min_max_args(inputs, min=None, max=None, name=None)` {#fake_quant_with_min_max_args}
+
+Fake-quantize the 'inputs' tensor, type float to 'outputs' tensor of same type.
+
+Attributes [min; max] define the clamping range for the 'inputs' data.  Op
+divides this range into 255 steps (total of 256 values), then replaces each
+'inputs' value with the closest of the quantized step values.
+
+Quantization is called fake since the output is still in floating point.
+
+##### Args:
+
+
+*  <b>`inputs`</b>: A `Tensor` of type `float32`.
+*  <b>`min`</b>: An optional `float`. Defaults to `-6`.
+*  <b>`max`</b>: An optional `float`. Defaults to `6`.
+*  <b>`name`</b>: A name for the operation (optional).
+
+##### Returns:
+
+  A `Tensor` of type `float32`.
+
diff --git a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard7/tf.assert_variables_initialized.md b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard7/tf.assert_variables_initialized.md
index ef61848aa87..ac8604579de 100644
--- a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard7/tf.assert_variables_initialized.md
+++ b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard7/tf.assert_variables_initialized.md
@@ -16,7 +16,7 @@ logged by the C++ runtime. This is expected.
 
 
 *  <b>`var_list`</b>: List of `Variable` objects to check. Defaults to the
-    value of `all_variables().`
+    value of `global_variables().`
 
 ##### Returns:
 
diff --git a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard7/tf.contrib.bayesflow.stochastic_tensor.StudentTWithAbsDfSoftplusSigmaTensor.md b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard7/tf.contrib.bayesflow.stochastic_tensor.StudentTWithAbsDfSoftplusSigmaTensor.md
deleted file mode 100644
index 68ff0573cfd..00000000000
--- a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard7/tf.contrib.bayesflow.stochastic_tensor.StudentTWithAbsDfSoftplusSigmaTensor.md
+++ /dev/null
@@ -1,85 +0,0 @@
-`StudentTWithAbsDfSoftplusSigmaTensor` is a `StochasticTensor` backed by the distribution `StudentTWithAbsDfSoftplusSigma`.
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.StudentTWithAbsDfSoftplusSigmaTensor.__init__(name=None, dist_value_type=None, loss_fn=score_function, **dist_args)` {#StudentTWithAbsDfSoftplusSigmaTensor.__init__}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.StudentTWithAbsDfSoftplusSigmaTensor.clone(name=None, **dist_args)` {#StudentTWithAbsDfSoftplusSigmaTensor.clone}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.StudentTWithAbsDfSoftplusSigmaTensor.distribution` {#StudentTWithAbsDfSoftplusSigmaTensor.distribution}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.StudentTWithAbsDfSoftplusSigmaTensor.dtype` {#StudentTWithAbsDfSoftplusSigmaTensor.dtype}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.StudentTWithAbsDfSoftplusSigmaTensor.entropy(name='entropy')` {#StudentTWithAbsDfSoftplusSigmaTensor.entropy}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.StudentTWithAbsDfSoftplusSigmaTensor.graph` {#StudentTWithAbsDfSoftplusSigmaTensor.graph}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.StudentTWithAbsDfSoftplusSigmaTensor.input_dict` {#StudentTWithAbsDfSoftplusSigmaTensor.input_dict}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.StudentTWithAbsDfSoftplusSigmaTensor.loss(final_loss, name='Loss')` {#StudentTWithAbsDfSoftplusSigmaTensor.loss}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.StudentTWithAbsDfSoftplusSigmaTensor.mean(name='mean')` {#StudentTWithAbsDfSoftplusSigmaTensor.mean}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.StudentTWithAbsDfSoftplusSigmaTensor.name` {#StudentTWithAbsDfSoftplusSigmaTensor.name}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.StudentTWithAbsDfSoftplusSigmaTensor.value(name='value')` {#StudentTWithAbsDfSoftplusSigmaTensor.value}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.StudentTWithAbsDfSoftplusSigmaTensor.value_type` {#StudentTWithAbsDfSoftplusSigmaTensor.value_type}
-
-
-
-
diff --git a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard7/tf.scatter_nd_update.md b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard7/tf.scatter_nd_update.md
new file mode 100644
index 00000000000..ab1e83ae145
--- /dev/null
+++ b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard7/tf.scatter_nd_update.md
@@ -0,0 +1,51 @@
+### `tf.scatter_nd_update(ref, indices, updates, use_locking=None, name=None)` {#scatter_nd_update}
+
+Applies sparse `updates` to individual values or slices within a given variable according to `indices`.
+
+`ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.
+
+`indices` must be integer tensor, containing indices into `ref`.
+It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.
+
+The innermost dimension of `indices` (with length `K`) corresponds to
+indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th
+dimension of `ref`.
+
+`updates` is `Tensor` of rank `Q-1+P-K` with shape:
+
+```
+[d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]].
+```
+
+For example, say we want to update 4 scattered elements to a rank-1 tensor to 8 elements. In Python, that update would look like this:
+
+    ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8])
+    indices = tf.constant([[4], [3], [1] ,[7]])
+    updates = tf.constant([9, 10, 11, 12])
+    update = tf.scatter_nd_update(ref, indices, updates)
+    with tf.Session() as sess:
+      print sess.run(update)
+
+The resulting update to ref would look like this:
+
+    [1, 11, 3, 10, 9, 6, 7, 12]
+
+See [tf.scatter_nd](#scatter_nd) for more details about how to make updates to slices.
+
+##### Args:
+
+
+*  <b>`ref`</b>: A mutable `Tensor`. A mutable Tensor. Should be from a Variable node.
+*  <b>`indices`</b>: A `Tensor`. Must be one of the following types: `int32`, `int64`.
+    A Tensor. Must be one of the following types: int32, int64. A tensor of indices into ref.
+*  <b>`updates`</b>: A `Tensor`. Must have the same type as `ref`.
+    A Tensor. Must have the same type as ref. A tensor of updated values to add to ref.
+*  <b>`use_locking`</b>: An optional `bool`. Defaults to `True`.
+    An optional bool. Defaults to True. If True, the assignment will be protected by a lock; otherwise the behavior is undefined, but may exhibit less contention.
+*  <b>`name`</b>: A name for the operation (optional).
+
+##### Returns:
+
+  A mutable `Tensor`. Has the same type as `ref`.
+  Same as ref. Returned as a convenience for operations that want to use the updated values after the update is done.
+
diff --git a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard8/tf.GraphKeys.md b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard8/tf.GraphKeys.md
index 1d656f40180..965097f2b00 100644
--- a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard8/tf.GraphKeys.md
+++ b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard8/tf.GraphKeys.md
@@ -9,10 +9,18 @@ variables.
 
 The following standard keys are defined:
 
-* `VARIABLES`: the `Variable` objects that comprise a model, and
-  must be saved and restored together. See
-  [`tf.all_variables()`](../../api_docs/python/state_ops.md#all_variables)
+* `GLOBAL_VARIABLES`: the default collection of `Variable` objects, shared
+  across distributed environment (model variables are subset of these). See
+  [`tf.global_variables()`](../../api_docs/python/state_ops.md#global_variables)
   for more details.
+  Commonly, all `TRAINABLE_VARIABLES` variables will be in `MODEL_VARIABLES`,
+  and all `MODEL_VARIABLES` variables will be in `GLOBAL_VARIABLES`.
+* `LOCAL_VARIABLES`: the subset of `Variable` objects that are local to each
+  machine. Usually used for temporarily variables, like counters.
+  Note: use `tf.contrib.framework.local_variable` to add to this collection.
+* `MODEL_VARIABLES`: the subset of `Variable` objects that are used in the
+  model for inference (feed forward). Note: use
+  `tf.contrib.framework.model_variable` to add to this collection.
 * `TRAINABLE_VARIABLES`: the subset of `Variable` objects that will
   be trained by an optimizer. See
   [`tf.trainable_variables()`](../../api_docs/python/state_ops.md#trainable_variables)
diff --git a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard8/tf.Variable.md b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard8/tf.Variable.md
index 44fac742c80..777d56be50d 100644
--- a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard8/tf.Variable.md
+++ b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard8/tf.Variable.md
@@ -51,16 +51,16 @@ with tf.Session() as sess:
 ```
 
 The most common initialization pattern is to use the convenience function
-`initialize_all_variables()` to add an Op to the graph that initializes
+`global_variable_initializers()` to add an Op to the graph that initializes
 all the variables. You then run that Op after launching the graph.
 
 ```python
-# Add an Op to initialize all variables.
-init_op = tf.initialize_all_variables()
+# Add an Op to initialize global variables.
+init_op = tf.global_variable_initializers()
 
 # Launch the graph in a session.
 with tf.Session() as sess:
-    # Run the Op that initializes all variables.
+    # Run the Op that initializes global variables.
     sess.run(init_op)
     # ...you can now run any Op that uses variable values...
 ```
@@ -71,8 +71,8 @@ variables are initialized in the right order.
 
 All variables are automatically collected in the graph where they are
 created. By default, the constructor adds the new variable to the graph
-collection `GraphKeys.VARIABLES`. The convenience function
-`all_variables()` returns the contents of that collection.
+collection `GraphKeys.GLOBAL_VARIABLES`. The convenience function
+`global_variables()` returns the contents of that collection.
 
 When building a machine learning model it is often convenient to distinguish
 between variables holding the trainable model parameters and other variables
@@ -94,7 +94,7 @@ Creating a variable.
 Creates a new variable with value `initial_value`.
 
 The new variable is added to the graph collections listed in `collections`,
-which defaults to `[GraphKeys.VARIABLES]`.
+which defaults to `[GraphKeys.GLOBAL_VARIABLES]`.
 
 If `trainable` is `True` the variable is also added to the graph collection
 `GraphKeys.TRAINABLE_VARIABLES`.
@@ -115,7 +115,7 @@ variable to its initial value.
     collection `GraphKeys.TRAINABLE_VARIABLES`. This collection is used as
     the default list of variables to use by the `Optimizer` classes.
 *  <b>`collections`</b>: List of graph collections keys. The new variable is added to
-    these collections. Defaults to `[GraphKeys.VARIABLES]`.
+    these collections. Defaults to `[GraphKeys.GLOBAL_VARIABLES]`.
 *  <b>`validate_shape`</b>: If `False`, allows the variable to be initialized with a
     value of unknown shape. If `True`, the default, the shape of
     `initial_value` must be known.
@@ -301,7 +301,7 @@ more information on launching a graph and on sessions.
 
 ```python
 v = tf.Variable([1, 2])
-init = tf.initialize_all_variables()
+init = tf.global_variable_initializers()
 
 with tf.Session() as sess:
     sess.run(init)
diff --git a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard8/tf.contrib.bayesflow.stochastic_tensor.ExponentialTensor.md b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard8/tf.contrib.bayesflow.stochastic_tensor.ExponentialTensor.md
deleted file mode 100644
index 8386eea649e..00000000000
--- a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard8/tf.contrib.bayesflow.stochastic_tensor.ExponentialTensor.md
+++ /dev/null
@@ -1,85 +0,0 @@
-`ExponentialTensor` is a `StochasticTensor` backed by the distribution `Exponential`.
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.ExponentialTensor.__init__(name=None, dist_value_type=None, loss_fn=score_function, **dist_args)` {#ExponentialTensor.__init__}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.ExponentialTensor.clone(name=None, **dist_args)` {#ExponentialTensor.clone}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.ExponentialTensor.distribution` {#ExponentialTensor.distribution}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.ExponentialTensor.dtype` {#ExponentialTensor.dtype}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.ExponentialTensor.entropy(name='entropy')` {#ExponentialTensor.entropy}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.ExponentialTensor.graph` {#ExponentialTensor.graph}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.ExponentialTensor.input_dict` {#ExponentialTensor.input_dict}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.ExponentialTensor.loss(final_loss, name='Loss')` {#ExponentialTensor.loss}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.ExponentialTensor.mean(name='mean')` {#ExponentialTensor.mean}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.ExponentialTensor.name` {#ExponentialTensor.name}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.ExponentialTensor.value(name='value')` {#ExponentialTensor.value}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.ExponentialTensor.value_type` {#ExponentialTensor.value_type}
-
-
-
-
diff --git a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard8/tf.contrib.bayesflow.stochastic_tensor.MultinomialTensor.md b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard8/tf.contrib.bayesflow.stochastic_tensor.MultinomialTensor.md
deleted file mode 100644
index 2f00dfa7d30..00000000000
--- a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard8/tf.contrib.bayesflow.stochastic_tensor.MultinomialTensor.md
+++ /dev/null
@@ -1,85 +0,0 @@
-`MultinomialTensor` is a `StochasticTensor` backed by the distribution `Multinomial`.
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.MultinomialTensor.__init__(name=None, dist_value_type=None, loss_fn=score_function, **dist_args)` {#MultinomialTensor.__init__}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.MultinomialTensor.clone(name=None, **dist_args)` {#MultinomialTensor.clone}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.MultinomialTensor.distribution` {#MultinomialTensor.distribution}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.MultinomialTensor.dtype` {#MultinomialTensor.dtype}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.MultinomialTensor.entropy(name='entropy')` {#MultinomialTensor.entropy}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.MultinomialTensor.graph` {#MultinomialTensor.graph}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.MultinomialTensor.input_dict` {#MultinomialTensor.input_dict}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.MultinomialTensor.loss(final_loss, name='Loss')` {#MultinomialTensor.loss}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.MultinomialTensor.mean(name='mean')` {#MultinomialTensor.mean}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.MultinomialTensor.name` {#MultinomialTensor.name}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.MultinomialTensor.value(name='value')` {#MultinomialTensor.value}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.MultinomialTensor.value_type` {#MultinomialTensor.value_type}
-
-
-
-
diff --git a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard8/tf.contrib.bayesflow.stochastic_tensor.QuantizedDistributionTensor.md b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard8/tf.contrib.bayesflow.stochastic_tensor.QuantizedDistributionTensor.md
deleted file mode 100644
index f35b05859c1..00000000000
--- a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard8/tf.contrib.bayesflow.stochastic_tensor.QuantizedDistributionTensor.md
+++ /dev/null
@@ -1,85 +0,0 @@
-`QuantizedDistributionTensor` is a `StochasticTensor` backed by the distribution `QuantizedDistribution`.
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.QuantizedDistributionTensor.__init__(name=None, dist_value_type=None, loss_fn=score_function, **dist_args)` {#QuantizedDistributionTensor.__init__}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.QuantizedDistributionTensor.clone(name=None, **dist_args)` {#QuantizedDistributionTensor.clone}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.QuantizedDistributionTensor.distribution` {#QuantizedDistributionTensor.distribution}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.QuantizedDistributionTensor.dtype` {#QuantizedDistributionTensor.dtype}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.QuantizedDistributionTensor.entropy(name='entropy')` {#QuantizedDistributionTensor.entropy}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.QuantizedDistributionTensor.graph` {#QuantizedDistributionTensor.graph}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.QuantizedDistributionTensor.input_dict` {#QuantizedDistributionTensor.input_dict}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.QuantizedDistributionTensor.loss(final_loss, name='Loss')` {#QuantizedDistributionTensor.loss}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.QuantizedDistributionTensor.mean(name='mean')` {#QuantizedDistributionTensor.mean}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.QuantizedDistributionTensor.name` {#QuantizedDistributionTensor.name}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.QuantizedDistributionTensor.value(name='value')` {#QuantizedDistributionTensor.value}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.QuantizedDistributionTensor.value_type` {#QuantizedDistributionTensor.value_type}
-
-
-
-
diff --git a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard8/tf.scatter_nd_mul.md b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard8/tf.scatter_nd_mul.md
new file mode 100644
index 00000000000..734dfa8d1b7
--- /dev/null
+++ b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard8/tf.scatter_nd_mul.md
@@ -0,0 +1,52 @@
+### `tf.scatter_nd_mul(ref, indices, updates, use_locking=None, name=None)` {#scatter_nd_mul}
+
+Applies sparse subtraction between `updates` and individual values or slices within a given variable according to `indices`.
+
+`ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.
+
+`indices` must be integer tensor, containing indices into `ref`.
+It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.
+
+The innermost dimension of `indices` (with length `K`) corresponds to
+indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th
+dimension of `ref`.
+
+`updates` is `Tensor` of rank `Q-1+P-K` with shape:
+
+```
+[d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]].
+```
+
+For example, say we want to multiply 4 scattered elements with a rank-1 tensor with 8 elements. In Python, that multiplication would look like this:
+
+    ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8])
+    indices = tf.constant([[4], [3], [1], [7]])
+    updates = tf.constant([9, 10, 11, 12])
+    sub = tf.scatter_nd_mul(ref, indices, updates)
+    with tf.Session() as sess:
+      print sess.run(sub)
+
+The resulting update to ref would look like this:
+
+    [1, 22, 3, 40, 45, 6, 7, 96]
+
+See [tf.scatter_nd](#scatter_nd) for more details about how to make updates to slices.
+
+##### Args:
+
+
+*  <b>`ref`</b>: A mutable `Tensor`. Must be one of the following types: `float32`, `float64`, `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`, `complex128`, `qint8`, `quint8`, `qint32`, `half`.
+    A mutable Tensor. Should be from a Variable node.
+*  <b>`indices`</b>: A `Tensor`. Must be one of the following types: `int32`, `int64`.
+    A Tensor. Must be one of the following types: int32, int64. A tensor of indices into ref.
+*  <b>`updates`</b>: A `Tensor`. Must have the same type as `ref`.
+    A Tensor. Must have the same type as ref. A tensor of updated values to subtract from ref.
+*  <b>`use_locking`</b>: An optional `bool`. Defaults to `False`.
+    An optional bool. Defaults to True. If True, the assignment will be protected by a lock; otherwise the behavior is undefined, but may exhibit less contention.
+*  <b>`name`</b>: A name for the operation (optional).
+
+##### Returns:
+
+  A mutable `Tensor`. Has the same type as `ref`.
+  Same as ref. Returned as a convenience for operations that want to use the updated values after the update is done.
+
diff --git a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard9/tf.contrib.bayesflow.stochastic_tensor.BernoulliTensor.md b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard9/tf.contrib.bayesflow.stochastic_tensor.BernoulliTensor.md
deleted file mode 100644
index 328f8e79833..00000000000
--- a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard9/tf.contrib.bayesflow.stochastic_tensor.BernoulliTensor.md
+++ /dev/null
@@ -1,85 +0,0 @@
-`BernoulliTensor` is a `StochasticTensor` backed by the distribution `Bernoulli`.
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.BernoulliTensor.__init__(name=None, dist_value_type=None, loss_fn=score_function, **dist_args)` {#BernoulliTensor.__init__}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.BernoulliTensor.clone(name=None, **dist_args)` {#BernoulliTensor.clone}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.BernoulliTensor.distribution` {#BernoulliTensor.distribution}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.BernoulliTensor.dtype` {#BernoulliTensor.dtype}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.BernoulliTensor.entropy(name='entropy')` {#BernoulliTensor.entropy}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.BernoulliTensor.graph` {#BernoulliTensor.graph}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.BernoulliTensor.input_dict` {#BernoulliTensor.input_dict}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.BernoulliTensor.loss(final_loss, name='Loss')` {#BernoulliTensor.loss}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.BernoulliTensor.mean(name='mean')` {#BernoulliTensor.mean}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.BernoulliTensor.name` {#BernoulliTensor.name}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.BernoulliTensor.value(name='value')` {#BernoulliTensor.value}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.BernoulliTensor.value_type` {#BernoulliTensor.value_type}
-
-
-
-
diff --git a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard9/tf.contrib.bayesflow.stochastic_tensor.CategoricalTensor.md b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard9/tf.contrib.bayesflow.stochastic_tensor.CategoricalTensor.md
deleted file mode 100644
index 2e1217b686f..00000000000
--- a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard9/tf.contrib.bayesflow.stochastic_tensor.CategoricalTensor.md
+++ /dev/null
@@ -1,85 +0,0 @@
-`CategoricalTensor` is a `StochasticTensor` backed by the distribution `Categorical`.
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.CategoricalTensor.__init__(name=None, dist_value_type=None, loss_fn=score_function, **dist_args)` {#CategoricalTensor.__init__}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.CategoricalTensor.clone(name=None, **dist_args)` {#CategoricalTensor.clone}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.CategoricalTensor.distribution` {#CategoricalTensor.distribution}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.CategoricalTensor.dtype` {#CategoricalTensor.dtype}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.CategoricalTensor.entropy(name='entropy')` {#CategoricalTensor.entropy}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.CategoricalTensor.graph` {#CategoricalTensor.graph}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.CategoricalTensor.input_dict` {#CategoricalTensor.input_dict}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.CategoricalTensor.loss(final_loss, name='Loss')` {#CategoricalTensor.loss}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.CategoricalTensor.mean(name='mean')` {#CategoricalTensor.mean}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.CategoricalTensor.name` {#CategoricalTensor.name}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.CategoricalTensor.value(name='value')` {#CategoricalTensor.value}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.CategoricalTensor.value_type` {#CategoricalTensor.value_type}
-
-
-
-
diff --git a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard9/tf.contrib.bayesflow.stochastic_tensor.Chi2WithAbsDfTensor.md b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard9/tf.contrib.bayesflow.stochastic_tensor.Chi2WithAbsDfTensor.md
deleted file mode 100644
index bec5b175269..00000000000
--- a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard9/tf.contrib.bayesflow.stochastic_tensor.Chi2WithAbsDfTensor.md
+++ /dev/null
@@ -1,85 +0,0 @@
-`Chi2WithAbsDfTensor` is a `StochasticTensor` backed by the distribution `Chi2WithAbsDf`.
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.Chi2WithAbsDfTensor.__init__(name=None, dist_value_type=None, loss_fn=score_function, **dist_args)` {#Chi2WithAbsDfTensor.__init__}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.Chi2WithAbsDfTensor.clone(name=None, **dist_args)` {#Chi2WithAbsDfTensor.clone}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.Chi2WithAbsDfTensor.distribution` {#Chi2WithAbsDfTensor.distribution}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.Chi2WithAbsDfTensor.dtype` {#Chi2WithAbsDfTensor.dtype}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.Chi2WithAbsDfTensor.entropy(name='entropy')` {#Chi2WithAbsDfTensor.entropy}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.Chi2WithAbsDfTensor.graph` {#Chi2WithAbsDfTensor.graph}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.Chi2WithAbsDfTensor.input_dict` {#Chi2WithAbsDfTensor.input_dict}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.Chi2WithAbsDfTensor.loss(final_loss, name='Loss')` {#Chi2WithAbsDfTensor.loss}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.Chi2WithAbsDfTensor.mean(name='mean')` {#Chi2WithAbsDfTensor.mean}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.Chi2WithAbsDfTensor.name` {#Chi2WithAbsDfTensor.name}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.Chi2WithAbsDfTensor.value(name='value')` {#Chi2WithAbsDfTensor.value}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.Chi2WithAbsDfTensor.value_type` {#Chi2WithAbsDfTensor.value_type}
-
-
-
-
diff --git a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard9/tf.contrib.bayesflow.stochastic_tensor.DirichletTensor.md b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard9/tf.contrib.bayesflow.stochastic_tensor.DirichletTensor.md
deleted file mode 100644
index 0d6792f7538..00000000000
--- a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard9/tf.contrib.bayesflow.stochastic_tensor.DirichletTensor.md
+++ /dev/null
@@ -1,85 +0,0 @@
-`DirichletTensor` is a `StochasticTensor` backed by the distribution `Dirichlet`.
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.DirichletTensor.__init__(name=None, dist_value_type=None, loss_fn=score_function, **dist_args)` {#DirichletTensor.__init__}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.DirichletTensor.clone(name=None, **dist_args)` {#DirichletTensor.clone}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.DirichletTensor.distribution` {#DirichletTensor.distribution}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.DirichletTensor.dtype` {#DirichletTensor.dtype}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.DirichletTensor.entropy(name='entropy')` {#DirichletTensor.entropy}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.DirichletTensor.graph` {#DirichletTensor.graph}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.DirichletTensor.input_dict` {#DirichletTensor.input_dict}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.DirichletTensor.loss(final_loss, name='Loss')` {#DirichletTensor.loss}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.DirichletTensor.mean(name='mean')` {#DirichletTensor.mean}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.DirichletTensor.name` {#DirichletTensor.name}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.DirichletTensor.value(name='value')` {#DirichletTensor.value}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.DirichletTensor.value_type` {#DirichletTensor.value_type}
-
-
-
-
diff --git a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard9/tf.contrib.bayesflow.stochastic_tensor.UniformTensor.md b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard9/tf.contrib.bayesflow.stochastic_tensor.UniformTensor.md
deleted file mode 100644
index a09a7ea3ae6..00000000000
--- a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard9/tf.contrib.bayesflow.stochastic_tensor.UniformTensor.md
+++ /dev/null
@@ -1,85 +0,0 @@
-`UniformTensor` is a `StochasticTensor` backed by the distribution `Uniform`.
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.UniformTensor.__init__(name=None, dist_value_type=None, loss_fn=score_function, **dist_args)` {#UniformTensor.__init__}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.UniformTensor.clone(name=None, **dist_args)` {#UniformTensor.clone}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.UniformTensor.distribution` {#UniformTensor.distribution}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.UniformTensor.dtype` {#UniformTensor.dtype}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.UniformTensor.entropy(name='entropy')` {#UniformTensor.entropy}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.UniformTensor.graph` {#UniformTensor.graph}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.UniformTensor.input_dict` {#UniformTensor.input_dict}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.UniformTensor.loss(final_loss, name='Loss')` {#UniformTensor.loss}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.UniformTensor.mean(name='mean')` {#UniformTensor.mean}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.UniformTensor.name` {#UniformTensor.name}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.UniformTensor.value(name='value')` {#UniformTensor.value}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.UniformTensor.value_type` {#UniformTensor.value_type}
-
-
-
-
diff --git a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard9/tf.contrib.bayesflow.stochastic_tensor.WishartCholeskyTensor.md b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard9/tf.contrib.bayesflow.stochastic_tensor.WishartCholeskyTensor.md
deleted file mode 100644
index 2284a252691..00000000000
--- a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard9/tf.contrib.bayesflow.stochastic_tensor.WishartCholeskyTensor.md
+++ /dev/null
@@ -1,85 +0,0 @@
-`WishartCholeskyTensor` is a `StochasticTensor` backed by the distribution `WishartCholesky`.
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.WishartCholeskyTensor.__init__(name=None, dist_value_type=None, loss_fn=score_function, **dist_args)` {#WishartCholeskyTensor.__init__}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.WishartCholeskyTensor.clone(name=None, **dist_args)` {#WishartCholeskyTensor.clone}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.WishartCholeskyTensor.distribution` {#WishartCholeskyTensor.distribution}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.WishartCholeskyTensor.dtype` {#WishartCholeskyTensor.dtype}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.WishartCholeskyTensor.entropy(name='entropy')` {#WishartCholeskyTensor.entropy}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.WishartCholeskyTensor.graph` {#WishartCholeskyTensor.graph}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.WishartCholeskyTensor.input_dict` {#WishartCholeskyTensor.input_dict}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.WishartCholeskyTensor.loss(final_loss, name='Loss')` {#WishartCholeskyTensor.loss}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.WishartCholeskyTensor.mean(name='mean')` {#WishartCholeskyTensor.mean}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.WishartCholeskyTensor.name` {#WishartCholeskyTensor.name}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.WishartCholeskyTensor.value(name='value')` {#WishartCholeskyTensor.value}
-
-
-
-
-- - -
-
-#### `tf.contrib.bayesflow.stochastic_tensor.WishartCholeskyTensor.value_type` {#WishartCholeskyTensor.value_type}
-
-
-
-
diff --git a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard9/tf.nn.raw_rnn.md b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard9/tf.nn.raw_rnn.md
index 9d892d1ecb6..8c0d9bd027b 100644
--- a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard9/tf.nn.raw_rnn.md
+++ b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard9/tf.nn.raw_rnn.md
@@ -29,8 +29,8 @@ while not all(finished):
       time=time + 1, cell_output=output, cell_state=cell_state,
       loop_state=loop_state)
   # Emit zeros and copy forward state for minibatch entries that are finished.
-  state = tf.select(finished, state, next_state)
-  emit = tf.select(finished, tf.zeros_like(emit), emit)
+  state = tf.where(finished, state, next_state)
+  emit = tf.where(finished, tf.zeros_like(emit), emit)
   emit_ta = emit_ta.write(time, emit)
   # If any new minibatch entries are marked as finished, mark these.
   finished = tf.logical_or(finished, next_finished)
diff --git a/tensorflow/g3doc/api_docs/python/index.md b/tensorflow/g3doc/api_docs/python/index.md
index 3cac5108281..3e5f2807658 100644
--- a/tensorflow/g3doc/api_docs/python/index.md
+++ b/tensorflow/g3doc/api_docs/python/index.md
@@ -85,6 +85,8 @@
   * [`get_checkpoint_state`](../../api_docs/python/state_ops.md#get_checkpoint_state)
   * [`get_variable`](../../api_docs/python/state_ops.md#get_variable)
   * [`get_variable_scope`](../../api_docs/python/state_ops.md#get_variable_scope)
+  * [`global_variables`](../../api_docs/python/state_ops.md#global_variables)
+  * [`global_variables_initializer`](../../api_docs/python/state_ops.md#global_variables_initializer)
   * [`import_meta_graph`](../../api_docs/python/state_ops.md#import_meta_graph)
   * [`IndexedSlices`](../../api_docs/python/state_ops.md#IndexedSlices)
   * [`initialize_all_tables`](../../api_docs/python/state_ops.md#initialize_all_tables)
@@ -94,6 +96,7 @@
   * [`is_variable_initialized`](../../api_docs/python/state_ops.md#is_variable_initialized)
   * [`latest_checkpoint`](../../api_docs/python/state_ops.md#latest_checkpoint)
   * [`local_variables`](../../api_docs/python/state_ops.md#local_variables)
+  * [`local_variables_initializer`](../../api_docs/python/state_ops.md#local_variables_initializer)
   * [`make_template`](../../api_docs/python/state_ops.md#make_template)
   * [`min_max_variable_partitioner`](../../api_docs/python/state_ops.md#min_max_variable_partitioner)
   * [`model_variables`](../../api_docs/python/state_ops.md#model_variables)
@@ -107,6 +110,11 @@
   * [`scatter_add`](../../api_docs/python/state_ops.md#scatter_add)
   * [`scatter_div`](../../api_docs/python/state_ops.md#scatter_div)
   * [`scatter_mul`](../../api_docs/python/state_ops.md#scatter_mul)
+  * [`scatter_nd_add`](../../api_docs/python/state_ops.md#scatter_nd_add)
+  * [`scatter_nd_div`](../../api_docs/python/state_ops.md#scatter_nd_div)
+  * [`scatter_nd_mul`](../../api_docs/python/state_ops.md#scatter_nd_mul)
+  * [`scatter_nd_sub`](../../api_docs/python/state_ops.md#scatter_nd_sub)
+  * [`scatter_nd_update`](../../api_docs/python/state_ops.md#scatter_nd_update)
   * [`scatter_sub`](../../api_docs/python/state_ops.md#scatter_sub)
   * [`scatter_update`](../../api_docs/python/state_ops.md#scatter_update)
   * [`sparse_mask`](../../api_docs/python/state_ops.md#sparse_mask)
@@ -118,6 +126,7 @@
   * [`variable_axis_size_partitioner`](../../api_docs/python/state_ops.md#variable_axis_size_partitioner)
   * [`variable_op_scope`](../../api_docs/python/state_ops.md#variable_op_scope)
   * [`variable_scope`](../../api_docs/python/state_ops.md#variable_scope)
+  * [`variables_initializer`](../../api_docs/python/state_ops.md#variables_initializer)
   * [`VariableScope`](../../api_docs/python/state_ops.md#VariableScope)
   * [`zeros_initializer`](../../api_docs/python/state_ops.md#zeros_initializer)
 
@@ -134,6 +143,12 @@
   * [`dynamic_stitch`](../../api_docs/python/array_ops.md#dynamic_stitch)
   * [`expand_dims`](../../api_docs/python/array_ops.md#expand_dims)
   * [`extract_image_patches`](../../api_docs/python/array_ops.md#extract_image_patches)
+  * [`fake_quant_with_min_max_args`](../../api_docs/python/array_ops.md#fake_quant_with_min_max_args)
+  * [`fake_quant_with_min_max_args_gradient`](../../api_docs/python/array_ops.md#fake_quant_with_min_max_args_gradient)
+  * [`fake_quant_with_min_max_vars`](../../api_docs/python/array_ops.md#fake_quant_with_min_max_vars)
+  * [`fake_quant_with_min_max_vars_gradient`](../../api_docs/python/array_ops.md#fake_quant_with_min_max_vars_gradient)
+  * [`fake_quant_with_min_max_vars_per_channel`](../../api_docs/python/array_ops.md#fake_quant_with_min_max_vars_per_channel)
+  * [`fake_quant_with_min_max_vars_per_channel_gradient`](../../api_docs/python/array_ops.md#fake_quant_with_min_max_vars_per_channel_gradient)
   * [`gather`](../../api_docs/python/array_ops.md#gather)
   * [`gather_nd`](../../api_docs/python/array_ops.md#gather_nd)
   * [`meshgrid`](../../api_docs/python/array_ops.md#meshgrid)
@@ -148,6 +163,7 @@
   * [`reverse`](../../api_docs/python/array_ops.md#reverse)
   * [`reverse_sequence`](../../api_docs/python/array_ops.md#reverse_sequence)
   * [`saturate_cast`](../../api_docs/python/array_ops.md#saturate_cast)
+  * [`scatter_nd`](../../api_docs/python/array_ops.md#scatter_nd)
   * [`sequence_mask`](../../api_docs/python/array_ops.md#sequence_mask)
   * [`setdiff1d`](../../api_docs/python/array_ops.md#setdiff1d)
   * [`shape`](../../api_docs/python/array_ops.md#shape)
@@ -680,50 +696,13 @@
 
 * **[BayesFlow Stochastic Tensors (contrib)](../../api_docs/python/contrib.bayesflow.stochastic_tensor.md)**:
   * [`BaseStochasticTensor`](../../api_docs/python/contrib.bayesflow.stochastic_tensor.md#BaseStochasticTensor)
-  * [`BernoulliTensor`](../../api_docs/python/contrib.bayesflow.stochastic_tensor.md#BernoulliTensor)
-  * [`BernoulliWithSigmoidPTensor`](../../api_docs/python/contrib.bayesflow.stochastic_tensor.md#BernoulliWithSigmoidPTensor)
-  * [`beta_aaTensor`](../../api_docs/python/contrib.bayesflow.stochastic_tensor.md#beta_aaTensor)
-  * [`beta_bbTensor`](../../api_docs/python/contrib.bayesflow.stochastic_tensor.md#beta_bbTensor)
-  * [`BetaTensor`](../../api_docs/python/contrib.bayesflow.stochastic_tensor.md#BetaTensor)
-  * [`BetaWithSoftplusABTensor`](../../api_docs/python/contrib.bayesflow.stochastic_tensor.md#BetaWithSoftplusABTensor)
-  * [`BinomialTensor`](../../api_docs/python/contrib.bayesflow.stochastic_tensor.md#BinomialTensor)
-  * [`CategoricalTensor`](../../api_docs/python/contrib.bayesflow.stochastic_tensor.md#CategoricalTensor)
-  * [`Chi2Tensor`](../../api_docs/python/contrib.bayesflow.stochastic_tensor.md#Chi2Tensor)
-  * [`Chi2WithAbsDfTensor`](../../api_docs/python/contrib.bayesflow.stochastic_tensor.md#Chi2WithAbsDfTensor)
-  * [`DirichletMultinomialTensor`](../../api_docs/python/contrib.bayesflow.stochastic_tensor.md#DirichletMultinomialTensor)
-  * [`DirichletTensor`](../../api_docs/python/contrib.bayesflow.stochastic_tensor.md#DirichletTensor)
-  * [`ExponentialTensor`](../../api_docs/python/contrib.bayesflow.stochastic_tensor.md#ExponentialTensor)
-  * [`ExponentialWithSoftplusLamTensor`](../../api_docs/python/contrib.bayesflow.stochastic_tensor.md#ExponentialWithSoftplusLamTensor)
-  * [`GammaTensor`](../../api_docs/python/contrib.bayesflow.stochastic_tensor.md#GammaTensor)
-  * [`GammaWithSoftplusAlphaBetaTensor`](../../api_docs/python/contrib.bayesflow.stochastic_tensor.md#GammaWithSoftplusAlphaBetaTensor)
   * [`get_current_value_type`](../../api_docs/python/contrib.bayesflow.stochastic_tensor.md#get_current_value_type)
-  * [`InverseGammaTensor`](../../api_docs/python/contrib.bayesflow.stochastic_tensor.md#InverseGammaTensor)
-  * [`InverseGammaWithSoftplusAlphaBetaTensor`](../../api_docs/python/contrib.bayesflow.stochastic_tensor.md#InverseGammaWithSoftplusAlphaBetaTensor)
-  * [`LaplaceTensor`](../../api_docs/python/contrib.bayesflow.stochastic_tensor.md#LaplaceTensor)
-  * [`LaplaceWithSoftplusScaleTensor`](../../api_docs/python/contrib.bayesflow.stochastic_tensor.md#LaplaceWithSoftplusScaleTensor)
   * [`MeanValue`](../../api_docs/python/contrib.bayesflow.stochastic_tensor.md#MeanValue)
-  * [`MixtureTensor`](../../api_docs/python/contrib.bayesflow.stochastic_tensor.md#MixtureTensor)
-  * [`MultinomialTensor`](../../api_docs/python/contrib.bayesflow.stochastic_tensor.md#MultinomialTensor)
-  * [`MultivariateNormalCholeskyTensor`](../../api_docs/python/contrib.bayesflow.stochastic_tensor.md#MultivariateNormalCholeskyTensor)
-  * [`MultivariateNormalDiagPlusVDVTTensor`](../../api_docs/python/contrib.bayesflow.stochastic_tensor.md#MultivariateNormalDiagPlusVDVTTensor)
-  * [`MultivariateNormalDiagTensor`](../../api_docs/python/contrib.bayesflow.stochastic_tensor.md#MultivariateNormalDiagTensor)
-  * [`MultivariateNormalDiagWithSoftplusStDevTensor`](../../api_docs/python/contrib.bayesflow.stochastic_tensor.md#MultivariateNormalDiagWithSoftplusStDevTensor)
-  * [`MultivariateNormalFullTensor`](../../api_docs/python/contrib.bayesflow.stochastic_tensor.md#MultivariateNormalFullTensor)
-  * [`NormalTensor`](../../api_docs/python/contrib.bayesflow.stochastic_tensor.md#NormalTensor)
-  * [`NormalWithSoftplusSigmaTensor`](../../api_docs/python/contrib.bayesflow.stochastic_tensor.md#NormalWithSoftplusSigmaTensor)
   * [`ObservedStochasticTensor`](../../api_docs/python/contrib.bayesflow.stochastic_tensor.md#ObservedStochasticTensor)
-  * [`PoissonTensor`](../../api_docs/python/contrib.bayesflow.stochastic_tensor.md#PoissonTensor)
-  * [`QuantizedDistributionTensor`](../../api_docs/python/contrib.bayesflow.stochastic_tensor.md#QuantizedDistributionTensor)
   * [`SampleAndReshapeValue`](../../api_docs/python/contrib.bayesflow.stochastic_tensor.md#SampleAndReshapeValue)
   * [`SampleValue`](../../api_docs/python/contrib.bayesflow.stochastic_tensor.md#SampleValue)
   * [`StochasticTensor`](../../api_docs/python/contrib.bayesflow.stochastic_tensor.md#StochasticTensor)
-  * [`StudentTTensor`](../../api_docs/python/contrib.bayesflow.stochastic_tensor.md#StudentTTensor)
-  * [`StudentTWithAbsDfSoftplusSigmaTensor`](../../api_docs/python/contrib.bayesflow.stochastic_tensor.md#StudentTWithAbsDfSoftplusSigmaTensor)
-  * [`TransformedDistributionTensor`](../../api_docs/python/contrib.bayesflow.stochastic_tensor.md#TransformedDistributionTensor)
-  * [`UniformTensor`](../../api_docs/python/contrib.bayesflow.stochastic_tensor.md#UniformTensor)
   * [`value_type`](../../api_docs/python/contrib.bayesflow.stochastic_tensor.md#value_type)
-  * [`WishartCholeskyTensor`](../../api_docs/python/contrib.bayesflow.stochastic_tensor.md#WishartCholeskyTensor)
-  * [`WishartFullTensor`](../../api_docs/python/contrib.bayesflow.stochastic_tensor.md#WishartFullTensor)
 
 * **[BayesFlow Variational Inference (contrib)](../../api_docs/python/contrib.bayesflow.variational_inference.md)**:
   * [`elbo`](../../api_docs/python/contrib.bayesflow.variational_inference.md#elbo)
@@ -913,6 +892,9 @@
   * [`Transformer`](../../api_docs/python/contrib.graph_editor.md#Transformer)
   * [`ts`](../../api_docs/python/contrib.graph_editor.md#ts)
 
+* **[Integrate (contrib)](../../api_docs/python/contrib.integrate.md)**:
+  * [`odeint`](../../api_docs/python/contrib.integrate.md#odeint)
+
 * **[Layers (contrib)](../../api_docs/python/contrib.layers.md)**:
   * [`apply_regularization`](../../api_docs/python/contrib.layers.md#apply_regularization)
   * [`avg_pool2d`](../../api_docs/python/contrib.layers.md#avg_pool2d)
diff --git a/tensorflow/g3doc/api_docs/python/math_ops.md b/tensorflow/g3doc/api_docs/python/math_ops.md
index 71e6b9a26f7..bc40bfa4043 100644
--- a/tensorflow/g3doc/api_docs/python/math_ops.md
+++ b/tensorflow/g3doc/api_docs/python/math_ops.md
@@ -1832,7 +1832,7 @@ Computes the eigenvalues of one or more self-adjoint matrices.
 
 - - -
 
-### `tf.svd(tensor, compute_uv=True, full_matrices=False, name=None)` {#svd}
+### `tf.svd(tensor, full_matrices=False, compute_uv=True, name=None)` {#svd}
 
 Computes the singular value decompositions of one or more matrices.
 
@@ -1854,12 +1854,12 @@ s = svd(a, compute_uv=False)
 
 *  <b>`matrix`</b>: `Tensor` of shape `[..., M, N]`. Let `P` be the minimum of `M` and
     `N`.
-*  <b>`compute_uv`</b>: If `True` then left and right singular vectors will be
-    computed and returned in `u` and `v`, respectively. Otherwise, only the
-    singular values will be computed, which can be significantly faster.
 *  <b>`full_matrices`</b>: If true, compute full-sized `u` and `v`. If false
     (the default), compute only the leading `P` singular vectors.
     Ignored if `compute_uv` is `False`.
+*  <b>`compute_uv`</b>: If `True` then left and right singular vectors will be
+    computed and returned in `u` and `v`, respectively. Otherwise, only the
+    singular values will be computed, which can be significantly faster.
 *  <b>`name`</b>: string, optional name of the operation.
 
 ##### Returns:
diff --git a/tensorflow/g3doc/api_docs/python/nn.md b/tensorflow/g3doc/api_docs/python/nn.md
index eaf42b9de5b..dde5c9c3752 100644
--- a/tensorflow/g3doc/api_docs/python/nn.md
+++ b/tensorflow/g3doc/api_docs/python/nn.md
@@ -2418,8 +2418,8 @@ while not all(finished):
       time=time + 1, cell_output=output, cell_state=cell_state,
       loop_state=loop_state)
   # Emit zeros and copy forward state for minibatch entries that are finished.
-  state = tf.select(finished, state, next_state)
-  emit = tf.select(finished, tf.zeros_like(emit), emit)
+  state = tf.where(finished, state, next_state)
+  emit = tf.where(finished, tf.zeros_like(emit), emit)
   emit_ta = emit_ta.write(time, emit)
   # If any new minibatch entries are marked as finished, mark these.
   finished = tf.logical_or(finished, next_finished)
diff --git a/tensorflow/g3doc/api_docs/python/state_ops.md b/tensorflow/g3doc/api_docs/python/state_ops.md
index e0b6ad57671..29765e7c2f6 100644
--- a/tensorflow/g3doc/api_docs/python/state_ops.md
+++ b/tensorflow/g3doc/api_docs/python/state_ops.md
@@ -66,16 +66,16 @@ with tf.Session() as sess:
 ```
 
 The most common initialization pattern is to use the convenience function
-`initialize_all_variables()` to add an Op to the graph that initializes
+`global_variable_initializers()` to add an Op to the graph that initializes
 all the variables. You then run that Op after launching the graph.
 
 ```python
-# Add an Op to initialize all variables.
-init_op = tf.initialize_all_variables()
+# Add an Op to initialize global variables.
+init_op = tf.global_variable_initializers()
 
 # Launch the graph in a session.
 with tf.Session() as sess:
-    # Run the Op that initializes all variables.
+    # Run the Op that initializes global variables.
     sess.run(init_op)
     # ...you can now run any Op that uses variable values...
 ```
@@ -86,8 +86,8 @@ variables are initialized in the right order.
 
 All variables are automatically collected in the graph where they are
 created. By default, the constructor adds the new variable to the graph
-collection `GraphKeys.VARIABLES`. The convenience function
-`all_variables()` returns the contents of that collection.
+collection `GraphKeys.GLOBAL_VARIABLES`. The convenience function
+`global_variables()` returns the contents of that collection.
 
 When building a machine learning model it is often convenient to distinguish
 between variables holding the trainable model parameters and other variables
@@ -109,7 +109,7 @@ Creating a variable.
 Creates a new variable with value `initial_value`.
 
 The new variable is added to the graph collections listed in `collections`,
-which defaults to `[GraphKeys.VARIABLES]`.
+which defaults to `[GraphKeys.GLOBAL_VARIABLES]`.
 
 If `trainable` is `True` the variable is also added to the graph collection
 `GraphKeys.TRAINABLE_VARIABLES`.
@@ -130,7 +130,7 @@ variable to its initial value.
     collection `GraphKeys.TRAINABLE_VARIABLES`. This collection is used as
     the default list of variables to use by the `Optimizer` classes.
 *  <b>`collections`</b>: List of graph collections keys. The new variable is added to
-    these collections. Defaults to `[GraphKeys.VARIABLES]`.
+    these collections. Defaults to `[GraphKeys.GLOBAL_VARIABLES]`.
 *  <b>`validate_shape`</b>: If `False`, allows the variable to be initialized with a
     value of unknown shape. If `True`, the default, the shape of
     `initial_value` must be known.
@@ -316,7 +316,7 @@ more information on launching a graph and on sessions.
 
 ```python
 v = tf.Variable([1, 2])
-init = tf.initialize_all_variables()
+init = tf.global_variable_initializers()
 
 with tf.Session() as sess:
     sess.run(init)
@@ -1178,19 +1178,57 @@ collected in the graph.
 
 - - -
 
-### `tf.all_variables()` {#all_variables}
+### `tf.global_variables()` {#global_variables}
 
-Returns all variables that must be saved/restored.
+Returns global variables.
 
-The `Variable()` constructor automatically adds new variables to the graph
-collection `GraphKeys.VARIABLES`. This convenience function returns the
-contents of that collection.
+Global variables are variables that are shared across machines in a
+distributed environment. The `Variable()` constructor or `get_variable()`
+automatically adds new variables to the graph collection
+`GraphKeys.GLOBAL_VARIABLES`.
+This convenience function returns the contents of that collection.
+
+An alternative to global variables are local variables. See
+[`tf.local_variables()`](../../api_docs/python/state_ops.md#local_variables)
 
 ##### Returns:
 
   A list of `Variable` objects.
 
 
+- - -
+
+### `tf.local_variables()` {#local_variables}
+
+Returns local variables.
+
+Local variables - per process variables, usually not saved/restored to
+checkpoint and used for temporary or intermediate values.
+For example, they can be used as counters for metrics computation or
+number of epochs this machine has read data.
+The `local_variable()` automatically adds new variable to
+`GraphKeys.LOCAL_VARIABLES`.
+This convenience function returns the contents of that collection.
+
+An alternative to local variables are global variables. See
+[`tf.global_variables()`](../../api_docs/python/state_ops.md#global_variables)
+
+##### Returns:
+
+  A list of local `Variable` objects.
+
+
+- - -
+
+### `tf.model_variables()` {#model_variables}
+
+Returns all variables in the MODEL_VARIABLES collection.
+
+##### Returns:
+
+  A list of local Variable objects.
+
+
 - - -
 
 ### `tf.trainable_variables()` {#trainable_variables}
@@ -1207,28 +1245,6 @@ contents of that collection.
   A list of Variable objects.
 
 
-- - -
-
-### `tf.local_variables()` {#local_variables}
-
-Returns all variables created with collection=[LOCAL_VARIABLES].
-
-##### Returns:
-
-  A list of local Variable objects.
-
-
-- - -
-
-### `tf.model_variables()` {#model_variables}
-
-Returns all variables in the MODEL_VARIABLES collection.
-
-##### Returns:
-
-  A list of local Variable objects.
-
-
 - - -
 
 ### `tf.moving_average_variables()` {#moving_average_variables}
@@ -1248,20 +1264,33 @@ This convenience function returns the contents of that collection.
 
 - - -
 
-### `tf.initialize_all_variables()` {#initialize_all_variables}
+### `tf.global_variables_initializer()` {#global_variables_initializer}
 
-Returns an Op that initializes all variables.
+Returns an Op that initializes global variables.
 
-This is just a shortcut for `initialize_variables(all_variables())`
+This is just a shortcut for `variable_initializers(global_variables())`
 
 ##### Returns:
 
-  An Op that initializes all variables in the graph.
+  An Op that initializes global variables in the graph.
 
 
 - - -
 
-### `tf.initialize_variables(var_list, name='init')` {#initialize_variables}
+### `tf.local_variables_initializer()` {#local_variables_initializer}
+
+Returns an Op that initializes all local variables.
+
+This is just a shortcut for `variable_initializers(local_variables())`
+
+##### Returns:
+
+  An Op that initializes all local variables in the graph.
+
+
+- - -
+
+### `tf.variables_initializer(var_list, name='init')` {#variables_initializer}
 
 Returns an Op that initializes a list of variables.
 
@@ -1286,19 +1315,6 @@ be run. That Op just has no effect.
   An Op that run the initializers of all the specified variables.
 
 
-- - -
-
-### `tf.initialize_local_variables()` {#initialize_local_variables}
-
-Returns an Op that initializes all local variables.
-
-This is just a shortcut for `initialize_variables(local_variables())`
-
-##### Returns:
-
-  An Op that initializes all local variables in the graph.
-
-
 - - -
 
 ### `tf.is_variable_initialized(variable)` {#is_variable_initialized}
@@ -1329,7 +1345,7 @@ variables if there are any, or an empty array if there are none.
 
 
 *  <b>`var_list`</b>: List of `Variable` objects to check. Defaults to the
-    value of `all_variables() + local_variables()`
+    value of `global_variables() + local_variables()`
 *  <b>`name`</b>: Optional name of the `Operation`.
 
 ##### Returns:
@@ -1358,7 +1374,7 @@ logged by the C++ runtime. This is expected.
 
 
 *  <b>`var_list`</b>: List of `Variable` objects to check. Defaults to the
-    value of `all_variables().`
+    value of `global_variables().`
 
 ##### Returns:
 
@@ -1532,7 +1548,7 @@ protocol buffer file in the call to `save()`.
 
 - - -
 
-#### `tf.train.Saver.__init__(var_list=None, reshape=False, sharded=False, max_to_keep=5, keep_checkpoint_every_n_hours=10000.0, name=None, restore_sequentially=False, saver_def=None, builder=None, defer_build=False, allow_empty=False, write_version=1, pad_step_number=False)` {#Saver.__init__}
+#### `tf.train.Saver.__init__(var_list=None, reshape=False, sharded=False, max_to_keep=5, keep_checkpoint_every_n_hours=10000.0, name=None, restore_sequentially=False, saver_def=None, builder=None, defer_build=False, allow_empty=False, write_version=2, pad_step_number=False)` {#Saver.__init__}
 
 Creates a `Saver`.
 
@@ -2942,6 +2958,280 @@ Requires `updates.shape = indices.shape + ref.shape[1:]`.
   to use the updated values after the update is done.
 
 
+- - -
+
+### `tf.scatter_nd_update(ref, indices, updates, use_locking=None, name=None)` {#scatter_nd_update}
+
+Applies sparse `updates` to individual values or slices within a given variable according to `indices`.
+
+`ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.
+
+`indices` must be integer tensor, containing indices into `ref`.
+It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.
+
+The innermost dimension of `indices` (with length `K`) corresponds to
+indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th
+dimension of `ref`.
+
+`updates` is `Tensor` of rank `Q-1+P-K` with shape:
+
+```
+[d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]].
+```
+
+For example, say we want to update 4 scattered elements to a rank-1 tensor to 8 elements. In Python, that update would look like this:
+
+    ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8])
+    indices = tf.constant([[4], [3], [1] ,[7]])
+    updates = tf.constant([9, 10, 11, 12])
+    update = tf.scatter_nd_update(ref, indices, updates)
+    with tf.Session() as sess:
+      print sess.run(update)
+
+The resulting update to ref would look like this:
+
+    [1, 11, 3, 10, 9, 6, 7, 12]
+
+See [tf.scatter_nd](#scatter_nd) for more details about how to make updates to slices.
+
+##### Args:
+
+
+*  <b>`ref`</b>: A mutable `Tensor`. A mutable Tensor. Should be from a Variable node.
+*  <b>`indices`</b>: A `Tensor`. Must be one of the following types: `int32`, `int64`.
+    A Tensor. Must be one of the following types: int32, int64. A tensor of indices into ref.
+*  <b>`updates`</b>: A `Tensor`. Must have the same type as `ref`.
+    A Tensor. Must have the same type as ref. A tensor of updated values to add to ref.
+*  <b>`use_locking`</b>: An optional `bool`. Defaults to `True`.
+    An optional bool. Defaults to True. If True, the assignment will be protected by a lock; otherwise the behavior is undefined, but may exhibit less contention.
+*  <b>`name`</b>: A name for the operation (optional).
+
+##### Returns:
+
+  A mutable `Tensor`. Has the same type as `ref`.
+  Same as ref. Returned as a convenience for operations that want to use the updated values after the update is done.
+
+
+- - -
+
+### `tf.scatter_nd_add(ref, indices, updates, use_locking=None, name=None)` {#scatter_nd_add}
+
+Applies sparse addition between `updates` and individual values or slices within a given variable according to `indices`.
+
+`ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.
+
+`indices` must be integer tensor, containing indices into `ref`.
+It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.
+
+The innermost dimension of `indices` (with length `K`) corresponds to
+indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th
+dimension of `ref`.
+
+`updates` is `Tensor` of rank `Q-1+P-K` with shape:
+
+```
+[d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]].
+```
+
+For example, say we want to add 4 scattered elements to a rank-1 tensor to 8 elements. In Python, that addition would look like this:
+
+    ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8])
+    indices = tf.constant([[4], [3], [1], [7]])
+    updates = tf.constant([9, 10, 11, 12])
+    add = tf.scatter_nd_add(ref, indices, updates)
+    with tf.Session() as sess:
+      print sess.run(add)
+
+The resulting update to ref would look like this:
+
+    [1, 13, 3, 14, 14, 6, 7, 20]
+
+See [tf.scatter_nd](#scatter_nd) for more details about how to make updates to slices.
+
+##### Args:
+
+
+*  <b>`ref`</b>: A mutable `Tensor`. Must be one of the following types: `float32`, `float64`, `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`, `complex128`, `qint8`, `quint8`, `qint32`, `half`.
+    A mutable Tensor. Should be from a Variable node.
+*  <b>`indices`</b>: A `Tensor`. Must be one of the following types: `int32`, `int64`.
+    A Tensor. Must be one of the following types: int32, int64. A tensor of indices into ref.
+*  <b>`updates`</b>: A `Tensor`. Must have the same type as `ref`.
+    A Tensor. Must have the same type as ref. A tensor of updated values to add to ref.
+*  <b>`use_locking`</b>: An optional `bool`. Defaults to `False`.
+    An optional bool. Defaults to True. If True, the assignment will be protected by a lock; otherwise the behavior is undefined, but may exhibit less contention.
+*  <b>`name`</b>: A name for the operation (optional).
+
+##### Returns:
+
+  A mutable `Tensor`. Has the same type as `ref`.
+  Same as ref. Returned as a convenience for operations that want to use the updated values after the update is done.
+
+
+- - -
+
+### `tf.scatter_nd_sub(ref, indices, updates, use_locking=None, name=None)` {#scatter_nd_sub}
+
+Applies sparse subtraction between `updates` and individual values or slices within a given variable according to `indices`.
+
+`ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.
+
+`indices` must be integer tensor, containing indices into `ref`.
+It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.
+
+The innermost dimension of `indices` (with length `K`) corresponds to
+indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th
+dimension of `ref`.
+
+`updates` is `Tensor` of rank `Q-1+P-K` with shape:
+
+```
+[d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]].
+```
+
+For example, say we want to subtract 4 scattered elements from a rank-1 tensor with 8 elements. In Python, that subtraction would look like this:
+
+    ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8])
+    indices = tf.constant([[4], [3], [1], [7]])
+    updates = tf.constant([9, 10, 11, 12])
+    sub = tf.scatter_nd_sub(ref, indices, updates)
+    with tf.Session() as sess:
+      print sess.run(sub)
+
+The resulting update to ref would look like this:
+
+    [1, -9, 3, -6, -4, 6, 7, -4]
+
+See [tf.scatter_nd](#scatter_nd) for more details about how to make updates to slices.
+
+##### Args:
+
+
+*  <b>`ref`</b>: A mutable `Tensor`. Must be one of the following types: `float32`, `float64`, `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`, `complex128`, `qint8`, `quint8`, `qint32`, `half`.
+    A mutable Tensor. Should be from a Variable node.
+*  <b>`indices`</b>: A `Tensor`. Must be one of the following types: `int32`, `int64`.
+    A Tensor. Must be one of the following types: int32, int64. A tensor of indices into ref.
+*  <b>`updates`</b>: A `Tensor`. Must have the same type as `ref`.
+    A Tensor. Must have the same type as ref. A tensor of updated values to subtract from ref.
+*  <b>`use_locking`</b>: An optional `bool`. Defaults to `False`.
+    An optional bool. Defaults to True. If True, the assignment will be protected by a lock; otherwise the behavior is undefined, but may exhibit less contention.
+*  <b>`name`</b>: A name for the operation (optional).
+
+##### Returns:
+
+  A mutable `Tensor`. Has the same type as `ref`.
+  Same as ref. Returned as a convenience for operations that want to use the updated values after the update is done.
+
+
+- - -
+
+### `tf.scatter_nd_mul(ref, indices, updates, use_locking=None, name=None)` {#scatter_nd_mul}
+
+Applies sparse subtraction between `updates` and individual values or slices within a given variable according to `indices`.
+
+`ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.
+
+`indices` must be integer tensor, containing indices into `ref`.
+It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.
+
+The innermost dimension of `indices` (with length `K`) corresponds to
+indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th
+dimension of `ref`.
+
+`updates` is `Tensor` of rank `Q-1+P-K` with shape:
+
+```
+[d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]].
+```
+
+For example, say we want to multiply 4 scattered elements with a rank-1 tensor with 8 elements. In Python, that multiplication would look like this:
+
+    ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8])
+    indices = tf.constant([[4], [3], [1], [7]])
+    updates = tf.constant([9, 10, 11, 12])
+    sub = tf.scatter_nd_mul(ref, indices, updates)
+    with tf.Session() as sess:
+      print sess.run(sub)
+
+The resulting update to ref would look like this:
+
+    [1, 22, 3, 40, 45, 6, 7, 96]
+
+See [tf.scatter_nd](#scatter_nd) for more details about how to make updates to slices.
+
+##### Args:
+
+
+*  <b>`ref`</b>: A mutable `Tensor`. Must be one of the following types: `float32`, `float64`, `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`, `complex128`, `qint8`, `quint8`, `qint32`, `half`.
+    A mutable Tensor. Should be from a Variable node.
+*  <b>`indices`</b>: A `Tensor`. Must be one of the following types: `int32`, `int64`.
+    A Tensor. Must be one of the following types: int32, int64. A tensor of indices into ref.
+*  <b>`updates`</b>: A `Tensor`. Must have the same type as `ref`.
+    A Tensor. Must have the same type as ref. A tensor of updated values to subtract from ref.
+*  <b>`use_locking`</b>: An optional `bool`. Defaults to `False`.
+    An optional bool. Defaults to True. If True, the assignment will be protected by a lock; otherwise the behavior is undefined, but may exhibit less contention.
+*  <b>`name`</b>: A name for the operation (optional).
+
+##### Returns:
+
+  A mutable `Tensor`. Has the same type as `ref`.
+  Same as ref. Returned as a convenience for operations that want to use the updated values after the update is done.
+
+
+- - -
+
+### `tf.scatter_nd_div(ref, indices, updates, use_locking=None, name=None)` {#scatter_nd_div}
+
+Applies sparse subtraction between `updates` and individual values or slices within a given variable according to `indices`.
+
+`ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.
+
+`indices` must be integer tensor, containing indices into `ref`.
+It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.
+
+The innermost dimension of `indices` (with length `K`) corresponds to
+indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th
+dimension of `ref`.
+
+`updates` is `Tensor` of rank `Q-1+P-K` with shape:
+
+```
+[d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]].
+```
+
+For example, say we want to divide a rank-1 tensor with 8 elements by 4 scattered elements. In Python, that division would look like this:
+
+    ref = tf.Variable([10, 20, 30, 40, 50, 60, 70, 80])
+    indices = tf.constant([[4], [3], [1], [7]])
+    updates = tf.constant([2, 3, 4, 5])
+    sub = tf.scatter_nd_div(ref, indices, updates)
+    with tf.Session() as sess:
+      print sess.run(sub)
+
+The resulting update to ref would look like this:
+
+    [10, 5, 30, 13, 25, 60, 70, 16]
+
+See [tf.scatter_nd](#scatter_nd) for more details about how to make updates to slices.
+
+##### Args:
+
+
+*  <b>`ref`</b>: A mutable `Tensor`. Must be one of the following types: `float32`, `float64`, `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`, `complex128`, `qint8`, `quint8`, `qint32`, `half`.
+    A mutable Tensor. Should be from a Variable node.
+*  <b>`indices`</b>: A `Tensor`. Must be one of the following types: `int32`, `int64`.
+    A Tensor. Must be one of the following types: int32, int64. A tensor of indices into ref.
+*  <b>`updates`</b>: A `Tensor`. Must have the same type as `ref`.
+    A Tensor. Must have the same type as ref. A tensor of updated values to subtract from ref.
+*  <b>`use_locking`</b>: An optional `bool`. Defaults to `False`.
+    An optional bool. Defaults to True. If True, the assignment will be protected by a lock; otherwise the behavior is undefined, but may exhibit less contention.
+*  <b>`name`</b>: A name for the operation (optional).
+
+##### Returns:
+
+  A mutable `Tensor`. Has the same type as `ref`.
+  Same as ref. Returned as a convenience for operations that want to use the updated values after the update is done.
+
+
 - - -
 
 ### `tf.sparse_mask(a, mask_indices, name=None)` {#sparse_mask}
@@ -3233,3 +3523,50 @@ device assignments have not changed.
   (i.e., there are no variables to restore).
 
 
+
+# Deprecated functions (removed after 2017-03-02). Please don't use them.
+
+- - -
+
+### `tf.all_variables(*args, **kwargs)` {#all_variables}
+
+See `tf.global_variables`. (deprecated)
+
+THIS FUNCTION IS DEPRECATED. It will be removed after 2016-03-02.
+Instructions for updating:
+Please use tf.global_variables instead.
+
+
+- - -
+
+### `tf.initialize_all_variables(*args, **kwargs)` {#initialize_all_variables}
+
+See `tf.global_variables_initializer`. (deprecated)
+
+THIS FUNCTION IS DEPRECATED. It will be removed after 2017-03-02.
+Instructions for updating:
+Use `tf.global_variables_initializer` instead.
+
+
+- - -
+
+### `tf.initialize_local_variables(*args, **kwargs)` {#initialize_local_variables}
+
+See `tf.local_variables_initializer`. (deprecated)
+
+THIS FUNCTION IS DEPRECATED. It will be removed after 2017-03-02.
+Instructions for updating:
+Use `tf.local_variables_initializer` instead.
+
+
+- - -
+
+### `tf.initialize_variables(*args, **kwargs)` {#initialize_variables}
+
+See `tf.variables_initializer`. (deprecated)
+
+THIS FUNCTION IS DEPRECATED. It will be removed after 2017-03-02.
+Instructions for updating:
+Use `tf.variables_initializer` instead.
+
+
diff --git a/tensorflow/g3doc/get_started/os_setup.md b/tensorflow/g3doc/get_started/os_setup.md
index b077af52055..1535f094c5d 100644
--- a/tensorflow/g3doc/get_started/os_setup.md
+++ b/tensorflow/g3doc/get_started/os_setup.md
@@ -610,8 +610,8 @@ to reflect the cuDNN version you downloaded):
 
 ``` bash
 tar xvzf cudnn-8.0-linux-x64-v5.1-ga.tgz
-sudo cp cuda/include/cudnn.h /usr/local/cuda/include
-sudo cp cuda/lib64/libcudnn* /usr/local/cuda/lib64
+sudo cp -P cuda/include/cudnn.h /usr/local/cuda/include
+sudo cp -P cuda/lib64/libcudnn* /usr/local/cuda/lib64
 sudo chmod a+r /usr/local/cuda/include/cudnn.h /usr/local/cuda/lib64/libcudnn*
 ```
 
diff --git a/tensorflow/python/BUILD b/tensorflow/python/BUILD
index a32c76273d6..4dc1bcec172 100644
--- a/tensorflow/python/BUILD
+++ b/tensorflow/python/BUILD
@@ -41,6 +41,7 @@ py_library(
         ":summary",
         ":training",
         ":ops",
+        ":test_ops",
         "//tensorflow/python/debug:debug_py",
     ] + if_not_windows([
         "//tensorflow/contrib:contrib_py",
@@ -282,6 +283,7 @@ py_library(
         "framework/meta_graph.py",
         "framework/random_seed.py",
         "framework/sparse_tensor.py",
+        "framework/subscribe.py",
         "framework/tensor_util.py",
     ],
     srcs_version = "PY2AND3",
@@ -351,6 +353,19 @@ py_test(
     ],
 )
 
+py_test(
+    name = "framework_subscribe_test",
+    size = "small",
+    srcs = ["framework/subscribe_test.py"],
+    main = "framework/subscribe_test.py",
+    srcs_version = "PY2AND3",
+    deps = [
+        ":framework_test_lib",
+        ":platform_test",
+        "//tensorflow:tensorflow_py",
+    ],
+)
+
 py_test(
     name = "contrib_test",
     size = "small",
@@ -855,7 +870,10 @@ py_library(
 
 py_library(
     name = "gradients",
-    srcs = ["ops/gradients.py"],
+    srcs = [
+        "ops/gradients.py",
+        "ops/gradients_impl.py",
+    ],
     srcs_version = "PY2AND3",
     deps = [
         ":array_grad",
diff --git a/tensorflow/python/debug/BUILD b/tensorflow/python/debug/BUILD
index 580bd0e79bf..b948e43501d 100644
--- a/tensorflow/python/debug/BUILD
+++ b/tensorflow/python/debug/BUILD
@@ -41,6 +41,15 @@ py_library(
     ],
 )
 
+py_library(
+    name = "stepper",
+    srcs = ["stepper.py"],
+    srcs_version = "PY2AND3",
+    deps = [
+        ":debug_data",
+    ],
+)
+
 py_library(
     name = "framework",
     srcs = ["wrappers/framework.py"],
@@ -155,6 +164,20 @@ py_test(
     ],
 )
 
+cuda_py_test(
+    name = "stepper_test",
+    size = "small",
+    srcs = [
+        "stepper_test.py",
+    ],
+    additional_deps = [
+        ":stepper",
+        "//tensorflow:tensorflow_py",
+        "//tensorflow/python:framework",
+        "//tensorflow/python:framework_test_lib",
+    ],
+)
+
 py_test(
     name = "framework_test",
     size = "small",
diff --git a/tensorflow/python/debug/stepper.py b/tensorflow/python/debug/stepper.py
new file mode 100644
index 00000000000..d785f47e1e8
--- /dev/null
+++ b/tensorflow/python/debug/stepper.py
@@ -0,0 +1,617 @@
+# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""TensorFlow Debugger (tfdbg) Stepper Module."""
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+from tensorflow.core.protobuf import config_pb2
+from tensorflow.python.debug import debug_data
+from tensorflow.python.framework import ops
+from tensorflow.python.ops import session_ops
+
+
+class NodeStepper(object):
+  """TensorFlow Debugger (tfdbg) stepper.
+
+  The stepper provides ability to perform "continue to" actions on a graph,
+  given fetch and feeds. The stepper calculates the transitive closure of the
+  fetch. cont() (continue to) calls can only be performed on members of the
+  transitive closure.
+
+  On a cont() call, the stepper performs depth-first tracing of the input
+  tree of the target. When it reaches an input where one of the following is
+  available, it will supply the available value to the feed_dict of the cont()
+  call:
+    (1) TensorHandles from previous cont() calls.
+    (2) Overriding (injected) values from the client.
+    (3) Feeds supplied during the construction of the stepper instance.
+
+  Once the tracing is complete, it will issue a run() call on the
+  underlying session, using the aforementioned feed_dict prepared by the input
+  tracing, to achieve the "continue-to" action. The above process takes into
+  account whether the transitive closure of an input contains Variables that
+  are updated during previous cont() calls on this stepper instance. If such
+  updates exist, we say the transitive closure is "dirty" and the stepper
+  can restore the "clean" state of the Variable and avoid using the
+  TensorHandle.
+
+  Example of basic usage:
+    a = tf.Variable(1.0, name="a")
+    b = tf.Variable(2.0, anme="b")
+    c = tf.add(a, b, name="c")
+    d = tf.mul(a, c, name="d")
+
+    sess = tf.Session()
+    sess.run(tf.initialize_all_varialbes())
+    stepper = NodeStepper(sess, d)
+
+    stepper.cont(c)  # Caches the handle to Tensor c:0.
+    stepper.cont(d)  # Uses handle to Tensor c:0, avoiding recomputing c.
+  """
+
+  # Possible types of feed used during cont() calls.
+  FEED_TYPE_CLIENT = "client"
+  FEED_TYPE_HANDLE = "handle"
+  FEED_TYPE_OVERRIDE = "override"
+
+  # TODO(cais): The following member constant is currently unused. Use it when
+  # the stepper is capable of using dumped intermediate tensors.
+  FEED_TYPE_INTERMEDIATE = "intermediate"
+
+  def __init__(self, sess, fetch, feed_dict=None):
+    """Constructor for Debugger.
+
+    Args:
+      sess: (Session) the TensorFlow Session to step in.
+      fetch: (str or TensorFlow graph element) A single fetched Tensor or Op,
+        or a name (str) representing the Tensor or Op. In the case of a name
+        str, the graph will be searched to find the corresponding Tensor or Op.
+      feed_dict: (dict or None) feed dict to be used in this stepper instance.
+
+    TODO(cais): Currently the stepper supports a single fetch. Support list,
+      tuple or dict of feeds, as in the Session run() interface.
+    """
+
+    self._sess = sess
+
+    if isinstance(fetch, str):
+      # Fetch target is a string. Assume it is the name of the Tensor or Op and
+      # will attempt to find it in the Session's graph.
+      self._fetch_name = fetch
+    elif isinstance(fetch, list) or isinstance(fetch, tuple) or isinstance(
+        fetch, dict):
+      raise NotImplementedError(
+          "list, tuple or dict fetches are not supported yet.")
+    else:
+      self._fetch_name = fetch.name
+    self._fetch = self._sess.graph.as_graph_element(self._fetch_name)
+
+    # A map from Variable name to initializer op.
+    self._variable_initializers = {}
+
+    # A map from Variable name to initial value, used when overriding or
+    # restoring Variable values.
+    self._variable_initial_values = {}
+
+    # Initialize the map for output recipients (targets).
+    self._non_control_output_targets = {}
+
+    # Sorted transitive closure of the fetched node.
+    self._sorted_transitive_closure = self._dfs_visit(self._sess.graph,
+                                                      self._fetch)
+    self._transitive_closure_set = set(self._sorted_transitive_closure)
+
+    # A map from Variable name to the old values (before any cont() calls).
+    self._cached_variable_values = {}
+
+    # A cache map from tensor name to what variables may invalidate the tensor
+    self._cached_invalidation_path = {}
+
+    # Keep track of which variables are in a dirty state.
+    self._dirty_variables = set()
+
+    # Cached tensor handles: a dict with keys as tensor names and values as
+    # tensor handles.
+    self._tensor_handles = {}
+
+    # Feed dict from the client.
+    self._client_feed_dict = feed_dict
+    if not self._client_feed_dict:
+      self._client_feed_dict = {}
+
+    # Overriding tensor values.
+    self._override_tensors = {}
+
+    # What the feed types were used by the last cont() call.
+    self._last_feed_types = {}
+
+  def _dfs_visit(self, graph, elem):
+    """Trace back the input of a graph element, using depth-first search.
+
+    Uses non-recursive implementation to prevent stack overflow for deep
+    graphs.
+
+    Also performs the following action(s):
+      1) When encountering a Variable, obtain its initializer op, to
+         facilitate possible subsequent restoration / overriding of variable
+         value.
+
+    Args:
+      graph: A TF graph instance.
+      elem: A graph element: a Tensor or an Operation.
+
+    Returns:
+      (list of str) A topologically-sorted list of all graph element names
+        in the transitive closure of elem. Obviously, the topological sort is
+        not unique in general. The return value here is just an arbitrary one
+        of potentially many possible topological sorts.
+    """
+
+    # These set should hold only strings, i.e, names of the nodes.
+    done = set()  # Keep track of visited nodes.
+
+    # A list of str: Names of the topologically-sorted graph elements.
+    sorted_node_list = [elem.name]
+
+    elem_stack = [elem]
+
+    while elem_stack:
+      curr_elem = elem_stack.pop()
+      curr_node = self._get_node(curr_elem)
+
+      done.add(curr_node.name)
+
+      non_control_inputs = [inp for inp in curr_node.inputs]
+      control_inputs = [inp for inp in curr_node.control_inputs]
+      all_inputs = set(non_control_inputs + control_inputs)
+
+      # Iterate through the (non-control) inputs.
+      for inp in all_inputs:
+        is_non_control_input = inp in non_control_inputs
+
+        # Set up the non-control output map.
+        if is_non_control_input:
+          if inp.name not in self._non_control_output_targets:
+            self._non_control_output_targets[inp.name] = set([curr_elem.name])
+          else:
+            self._non_control_output_targets[inp.name].add(curr_elem.name)
+
+          if (inp.op.type == "Variable" and
+              inp.name not in self._variable_initializers):
+            # Obtain the initializer op of the variable, in case the Variable's
+            # value needs to be restored later.
+            initializer = graph.as_graph_element(inp.op.name + "/Assign")
+            self._variable_initializers[inp.name] = initializer
+            self._variable_initial_values[inp.name] = initializer.inputs[1]
+
+        inp_node = self._get_node(inp)
+        if inp_node.name in done:
+          # Already visited.
+          continue
+
+        elem_stack.append(inp)
+        sorted_node_list.append(inp.name)
+
+    sorted_node_list.reverse()
+    return sorted_node_list
+
+  def sorted_transitive_closure(self):
+    """Get a sorted list of transitive inputs to the fetch of the stepper.
+
+    Returns:
+      (list of str): Sorted transitive inputs to the fetch of the stepper
+        instance. The fetch itself is included in the list.
+    """
+
+    return self._sorted_transitive_closure
+
+  def is_feedable(self, name):
+    """Determine if a graph element if feedable.
+
+    Args:
+      name: (str) name of the graph element (Tensor or Operation)
+
+    Returns:
+      (bool) whether the graph element is feedable.
+    """
+
+    if not isinstance(name, str):
+      raise TypeError("Expected type str; got type %s" % type(name))
+
+    elem = self._sess.graph.as_graph_element(name)
+    return self._sess.graph.is_feedable(elem)
+
+  def override_tensor(self, tensor_name, overriding_val):
+    """Override the value of a tensor.
+
+    Args:
+      tensor_name: (str) Name of the tensor to override.
+      overriding_val: (numpy.ndarray) Overriding tensor value.
+
+    Raises:
+      ValueError: If tensor_name does not correspond to a tensor in the input
+        tree to the fetched graph element of this stepper instance.
+    """
+
+    if not isinstance(tensor_name, str):
+      raise TypeError("Expected type str; got type %s" % type(tensor_name))
+
+    if tensor_name not in self._transitive_closure_set:
+      raise ValueError(
+          "Cannot override tensor \"%s\" because it does not exist in the "
+          "input tree to the fetch \"%s\"" % (tensor_name, self._fetch_name))
+
+    self._override_tensors[tensor_name] = overriding_val
+
+    # Invalidate cache by tracing outputs.
+    self._invalidate_transitively_outgoing_cache(tensor_name)
+
+  def remove_override(self, tensor_name):
+    """Remove the overriding value on a tensor.
+
+    Args:
+      tensor_name: (str) name of the tensor to remove the overriding value
+        from.
+
+    Raises:
+      ValueError: If no overriding value exists for tensor_name.
+    """
+
+    if tensor_name not in self._override_tensors:
+      raise ValueError("No overriding value exists for tensor \"%s\"." %
+                       tensor_name)
+
+    del self._override_tensors[tensor_name]
+
+    # Invalidate cache by tracing outputs.
+    self._invalidate_transitively_outgoing_cache(tensor_name)
+
+  def last_feed_types(self):
+    """Obtain information about the feed in the last cont() call.
+
+    Returns:
+      (dict) A dict mapping tensor names to feed types.
+    """
+
+    return self._last_feed_types
+
+  def cont(self,
+           target,
+           use_tensor_handles=True,
+           use_overrides=True,
+           restore_variable_values=False):
+    """Continue till the completion of the specified target tensor.
+
+    Args:
+      target: A single fetched Tensor or Op, or a name (str) representing the
+        Tensor or Op. In the case of a name str, the graph will be searched
+        to find the corresponding Tensor or Op.
+        # TODO(cais): Support multiple fetches as in Session.run() interface.
+      use_tensor_handles: (bool) Whether this cont() run will use cached tensor
+        handles to avoid recomputation. Default: True.
+      use_overrides: (bool) Whether the overriding tensor values supplied by
+        the client are to be used in this cont() call. Default: True.
+      restore_variable_values: (bool) Whether the old values of the variables
+        (before any cont() calls in this object) are to be restored.
+
+    Returns:
+      Value from Session.run() of the target.
+
+    Raises:
+      ValueError: If the target is specified as a string and the string does
+        not correspond to any tensors in the Session graph.
+        Or if the target of this cont() is not in the input list of the Stepper
+        object's target.
+        Or if target is a Placeholder.
+    """
+
+    self._last_feed_types = {}
+
+    if isinstance(target, str):
+      # Fetch target is a string. Assume it is the name of the Tensor or Op and
+      # will attempt to find it in the Session's graph.
+      target_name = target
+    else:
+      target_name = target.name
+
+    graph_element = self._sess.graph.as_graph_element(target_name)
+    if (isinstance(graph_element, ops.Tensor) and
+        graph_element.op.type == "Placeholder"):
+      raise ValueError("Should not call cont() on a Placeholder")
+
+    # Verify that the target is in the transitive closure of the stepper's
+    # fetch.
+    if target_name not in self._transitive_closure_set:
+      raise ValueError(
+          "Target \"%s\" is not in the transitive closure for the fetch of the "
+          "stepper: \"%s\"." % (target_name, self._fetch_name))
+
+    # Check if a cached tensor handle can be used on the fetch directly.
+    if use_tensor_handles and target_name in self._tensor_handles:
+      self._last_feed_types[target_name] = self.FEED_TYPE_HANDLE
+      return self._tensor_handles[target_name].eval()
+
+    # Check if an overriding tensor value can be used directly.
+    if use_overrides and target_name in self._override_tensors:
+      # Override is available. Return the value right away.
+      self._last_feed_types[target_name] = self.FEED_TYPE_OVERRIDE
+      return self._override_tensors[target_name]
+
+    # The feeds to be used in the Session.run() call.
+    feeds = {}
+
+    # Keep track of which variables are restored in this cont() call.
+    restored_variables = set()
+
+    # Keep track of which variables are "touched" (i.e., possibly updated) in
+    # this cont() call.
+    touched_variables = set()
+
+    # =========================================================================
+    # Use a non-recursive method to trace the inputs from the node and set up
+    # the feeds.
+    fetched = self._sess.graph.as_graph_element(target_name)
+    elem_stack = [fetched]
+    done = set()
+
+    while elem_stack:
+      curr_elem = elem_stack.pop()
+      curr_node = self._get_node(curr_elem)
+
+      done.add(curr_node.name)
+
+      non_control_inputs = [inp for inp in curr_node.inputs]
+      control_inputs = [inp for inp in curr_node.control_inputs]
+      all_inputs = set(non_control_inputs + control_inputs)
+
+      # Iterate through the (non-control) inputs.
+      for inp in all_inputs:
+        # Determine whether the input is feedable. Reference-type tensors,
+        # e.g., Variables, should not be fed, because they can change.
+        if isinstance(inp, ops.Tensor):
+          is_inp_ref = inp.dtype.is_ref_dtype
+          can_feed = self._sess.graph.is_feedable(inp) and not is_inp_ref
+        else:
+          is_inp_ref = False
+          can_feed = False
+
+        if (restore_variable_values and inp.name in self._dirty_variables and
+            inp.name not in restored_variables and
+            inp.name not in touched_variables):
+          # Do not restore Variables touched or restored previously in this
+          # cont() call.
+          initializer_op = self._variable_initializers[inp.name]
+          initial_value_tensor = self._variable_initial_values[inp.name]
+          self._sess.run(initializer_op,
+                         feed_dict={
+                             initial_value_tensor:
+                                 self._cached_variable_values[inp.name]
+                         })
+
+          # Mark the variable as restored.
+          restored_variables.add(inp.name)
+
+        # Determine if this is a reference-type input from a variable, and
+        # the recipient node is not Identity. In that case, the Variable
+        # needs to be marked as dirty and its current value recorded, due to
+        # the fact that the receiving op may mutate the value of the Variable.
+        if (is_inp_ref and inp.op.type == "Variable" and
+            curr_node.type != "Identity"):
+          # Mark the variable as dirty.
+          touched_variables.add(inp.name)
+
+          # Obtain the old value of the variable and cache it.
+          if inp.name not in self._cached_variable_values:
+            old_value = self._sess.run(inp)
+            self._cached_variable_values[inp.name] = old_value
+
+        # N.B.: The order of the logical branches matters. For example,
+        # _client_feed_dict comes after _tensor_handles, so that tensor
+        # handles stored in cont() calls can override the original client
+        # feeds. Also for example, _override_tensors comes the first, so
+        # the manual overriding, if exists, can always take effect.
+        if use_overrides and can_feed and inp.name in self._override_tensors:
+          # Use client-supplied overriding tensor value.
+          feeds[inp] = self._override_tensors[inp.name]
+          self._last_feed_types[inp.name] = self.FEED_TYPE_OVERRIDE
+        elif (use_tensor_handles and can_feed and
+              inp.name in self._tensor_handles and inp not in feeds):
+          # Tensor handle found in cache.
+          feeds[inp] = self._tensor_handles[inp.name].eval()
+          self._last_feed_types[inp.name] = self.FEED_TYPE_HANDLE
+        elif inp in self._client_feed_dict:
+          # This input is available in the client feed_dict.
+          feeds[inp] = self._client_feed_dict[inp]
+          self._last_feed_types[inp.name] = self.FEED_TYPE_CLIENT
+        else:
+          # There is no feed available for this input. So keep tracing its
+          # input(s).
+          inp_node = self._get_node(inp)
+          if inp_node.name in done:
+            # Already visited.
+            continue
+
+          elem_stack.append(inp)
+          done.add(inp_node.name)
+
+    # =========================================================================
+
+    if touched_variables:
+      self._dirty_variables.update(touched_variables)
+
+    for variable in restored_variables:
+      self._dirty_variables.remove(variable)
+
+    # Prepare RunOptions for DebugTensorWatches
+    run_options = config_pb2.RunOptions()
+    # TODO(cais): Add fields for watching intermediate tensors.
+
+    if isinstance(fetched, ops.Operation):
+      # The fetched is an Operation: Will not get tensor handle.
+      self._sess.run(fetched, feed_dict=feeds, options=run_options)
+      # No return value for a run of an Operation
+    else:
+      # This is a Tensor: Will get tensor handle and cache it.
+      target_handle = self._sess.run(session_ops.get_session_handle(fetched),
+                                     feed_dict=feeds,
+                                     options=run_options)
+      self._tensor_handles[target_name] = target_handle
+
+      return target_handle.eval()
+
+    # Invalidate caches at the end.
+    for touched_variable in touched_variables:
+      self._invalidate_transitively_outgoing_cache(touched_variable)
+
+  def _invalidate_transitively_outgoing_cache(self, source_element):
+    """Invalidate the cached tensor handles by tracing output.
+
+    This method is used to invalidate caches such as cached TensorHandles
+    and intermediate tensor values when Variable mutation happens or when
+    client overrides tensor values.
+
+    Uses non-recursive implementation to avoid stack overflow on deep networks.
+
+    TODO(cais): Currently, only TensorHandle caches are invalidated. Invalidate
+      cached intermediate tensor values from dumps when dumps are added.
+
+    Args:
+      source_element: The source graph element (e.g., a Variable output slot)
+        to trace the output from.
+    """
+
+    if not self._tensor_handles:
+      return
+
+    # First, use cached invalidation paths to eliminate some cached tensor
+    # handles.
+    for handle_name in self._tensor_handles:
+      if (handle_name in self._cached_invalidation_path and
+          source_element in self._cached_invalidation_path[handle_name]):
+        del self._tensor_handles[handle_name]
+
+    if not self._tensor_handles:
+      return
+
+    stack = [source_element]
+    done = set()
+
+    while stack:
+      curr_element = stack.pop()
+
+      done.add(curr_element)
+
+      if curr_element in self._tensor_handles:
+        # Cache the invalidation path for potential future use.
+        if curr_element not in self._cached_invalidation_path:
+          self._cached_invalidation_path[curr_element] = set([source_element])
+        else:
+          self._cached_invalidation_path[curr_element].add(source_element)
+
+        del self._tensor_handles[curr_element]
+
+      targets = self._non_control_output_targets.get(curr_element, [])
+      for target in targets:
+        if target in done:
+          continue
+        else:
+          stack.append(target)
+
+  def finalize(self):
+    """Run the final fetch(es).
+
+    Restore the dirty variables; ignore the client-supplied overriding tensor
+    values.
+
+    Returns:
+      The same return value as self.cont() as called on the final fetch.
+    """
+
+    return self.cont(
+        self._fetch,
+        use_tensor_handles=False,
+        use_overrides=False,
+        restore_variable_values=True)
+
+  def handle_names(self):
+    """Return names of the TensorHandles that the debugger is holding.
+
+    Returns:
+      (list of str) Name of the tensors for which TensorHandle is available.
+    """
+    return [name for name in self._tensor_handles]
+
+  def dirty_variables(self):
+    """Get the set of variables that are currently "dirty".
+
+    "dirty" means:
+      previous cont() calls have updated the value of the Variable,
+      and the Variable's old value (the value before any cont() calls
+      happened) was not restored.
+
+    Returns:
+      (set) A set of dirty variables.
+    """
+
+    return self._dirty_variables
+
+  def get_tensor_value(self, tensor_name):
+    """Get the value of a tensor that the stepper has access to.
+
+    Args:
+      tensor_name: (str) Name of the tensor.
+
+    Returns:
+      Value of the tensor, from overriding values or cached tensor handles.
+
+    Raises:
+      ValueError: If the value is not available as an overriding value
+        or through a TensorHandle.
+    """
+
+    if tensor_name in self._override_tensors:
+      return self._override_tensors[tensor_name]
+    elif tensor_name in self._tensor_handles:
+      return self._tensor_handles[tensor_name].eval()
+    else:
+      raise ValueError(
+          "This stepper instance does not have access to the value of "
+          "tensor \"%s\"" % tensor_name)
+
+  def get_fetch_result(self):
+    return self.get_tensor_value(self._fetch_name)
+
+  def override_names(self):
+    """Return names of the TensorHandles that the debugger is holding.
+
+    Returns:
+      (list of str) Name of the tensor for which overriding tensor values are
+        available.
+    """
+    return [name for name in self._override_tensors]
+
+  def _get_node(self, element):
+    """Get the node of a graph element.
+
+    Args:
+      element: A graph element (Op, Tensor or Node)
+
+    Returns:
+      The node associated with element in the graph.
+    """
+
+    node_name, _ = debug_data.parse_node_or_tensor_name(element.name)
+    return self._sess.graph.as_graph_element(node_name)
diff --git a/tensorflow/python/debug/stepper_test.py b/tensorflow/python/debug/stepper_test.py
new file mode 100644
index 00000000000..670ae5b3a13
--- /dev/null
+++ b/tensorflow/python/debug/stepper_test.py
@@ -0,0 +1,609 @@
+# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""Unit tests of the tfdbg Stepper."""
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import tensorflow as tf
+
+from tensorflow.python.debug.stepper import NodeStepper
+from tensorflow.python.framework import test_util
+from tensorflow.python.platform import googletest
+
+
+class StepperTest(test_util.TensorFlowTestCase):
+
+  def setUp(self):
+    self.a = tf.Variable(2.0, name="a")
+    self.b = tf.Variable(3.0, name="b")
+
+    self.c = tf.mul(self.a, self.b, name="c")  # Should be 6.0.
+    self.d = tf.mul(self.a, self.a, name="d")  # Should be 4.0.
+
+    self.e = tf.mul(self.d, self.c, name="e")  # Should be 24.0.
+
+    self.f = tf.div(self.b, 0.30, name="f")  # Should be 20.0.
+
+    self.sess = tf.Session()
+    self.sess.run(tf.initialize_all_variables())
+
+  def tearDown(self):
+    tf.reset_default_graph()
+
+  def testAttemptToContToFetchNotInTransitiveClosure(self):
+    stepper = NodeStepper(self.sess, "e:0")
+
+    self.assertEqual(
+        ["a:0", "b:0", "b/read:0", "a/read:0", "c:0", "d:0", "e:0"],
+        stepper.sorted_transitive_closure())
+
+    with self.assertRaisesRegexp(
+        ValueError,
+        "Target \"f:0\" is not in the transitive closure for the fetch of the "
+        "stepper: \"e:0\""):
+      stepper.cont("f:0")
+
+  def testUsingNamesNotUsingIntermediateTensors(self):
+    stepper = NodeStepper(self.sess, "e:0")
+
+    # The first cont() call should have used no feeds.
+    result = stepper.cont("c:0")
+    self.assertAllClose(6.0, result)
+    self.assertEqual({}, stepper.last_feed_types())
+
+    # The second cont() call should have used the tensor handle from the
+    # previous cont() call.
+    result = stepper.cont("e:0")
+    self.assertAllClose(24.0, result)
+    self.assertEqual({
+        "c:0": NodeStepper.FEED_TYPE_HANDLE
+    }, stepper.last_feed_types())
+
+  def testUsingNodesNotUsingIntermediateTensors(self):
+    stepper = NodeStepper(self.sess, self.e)
+
+    # There should be no handles before any cont() calls.
+    self.assertEqual([], stepper.handle_names())
+
+    # Before the cont() call, the stepper should not have access to the value
+    # of c:0.
+    with self.assertRaisesRegexp(
+        ValueError,
+        "This stepper instance does not have access to the value of tensor "
+        "\"c:0\""):
+      stepper.get_tensor_value("c:0")
+
+    # Using the node/tensor itself, instead of the name str, should work on
+    # cont().
+    result = stepper.cont(self.c)
+    self.assertAllClose(6.0, result)
+    self.assertEqual({}, stepper.last_feed_types())
+
+    self.assertEqual(["c:0"], stepper.handle_names())
+
+    # After the cont() call, the stepper should have access to the value of c:0
+    # via a tensor handle.
+    self.assertAllClose(6.0, stepper.get_tensor_value("c:0"))
+
+    result = stepper.cont(self.e)
+    self.assertAllClose(24.0, result)
+    self.assertEqual({
+        "c:0": NodeStepper.FEED_TYPE_HANDLE
+    }, stepper.last_feed_types())
+
+  def testIsFeedable(self):
+    stepper = NodeStepper(self.sess, self.e)
+
+    self.assertTrue(stepper.is_feedable("a/read:0"))
+    self.assertTrue(stepper.is_feedable("b/read:0"))
+    self.assertTrue(stepper.is_feedable("c:0"))
+    self.assertTrue(stepper.is_feedable("d:0"))
+
+  def testOverrideValue(self):
+    stepper = NodeStepper(self.sess, self.e)
+
+    result = stepper.cont(self.c)
+    self.assertAllClose(6.0, result)
+    self.assertEqual({}, stepper.last_feed_types())
+
+    # There should be no overrides before any cont() calls.
+    self.assertEqual([], stepper.override_names())
+
+    # Calling cont() on c again should lead to use of the handle.
+    result = stepper.cont(self.c)
+    self.assertAllClose(6.0, result)
+    self.assertEqual({
+        "c:0": NodeStepper.FEED_TYPE_HANDLE
+    }, stepper.last_feed_types())
+
+    # Override c:0.
+    stepper.override_tensor("c:0", 7.0)
+
+    # After the overriding, calling get_tensor_value() on c:0 should yield the
+    # overriding value.
+    self.assertEqual(7.0, stepper.get_tensor_value("c:0"))
+
+    # Now c:0 should have only an override value, but no cached handle, because
+    # the handle should have been invalidated.
+    self.assertEqual([], stepper.handle_names())
+    self.assertEqual(["c:0"], stepper.override_names())
+
+    # Run a downstream tensor after the value override.
+    result = stepper.cont(self.e)
+    self.assertAllClose(28.0, result)  # Should reflect the overriding value.
+
+    # Should use override, instead of the handle.
+    self.assertEqual({
+        "c:0": NodeStepper.FEED_TYPE_OVERRIDE
+    }, stepper.last_feed_types())
+
+  def testOverrideValueTwice(self):
+    stepper = NodeStepper(self.sess, self.e)
+
+    # Override once.
+    stepper.override_tensor("c:0", 7.0)
+    self.assertAllClose(28.0, stepper.cont(self.e))
+    self.assertEqual({
+        "c:0": NodeStepper.FEED_TYPE_OVERRIDE
+    }, stepper.last_feed_types())
+
+    self.assertEqual(["e:0"], stepper.handle_names())
+    self.assertEqual(["c:0"], stepper.override_names())
+
+    # Calling cont(self.e) again. This time the cached tensor handle of e
+    # should be used.
+    self.assertEqual(28.0, stepper.cont(self.e))
+    self.assertEqual({
+        "e:0": NodeStepper.FEED_TYPE_HANDLE
+    }, stepper.last_feed_types())
+
+    # Override c again. This should have invalidated the cache for e.
+    stepper.override_tensor("c:0", 8.0)
+
+    self.assertEqual([], stepper.handle_names())
+    self.assertEqual(["c:0"], stepper.override_names())
+
+    self.assertAllClose(32.0, stepper.cont(self.e))
+    self.assertEqual({
+        "c:0": NodeStepper.FEED_TYPE_OVERRIDE
+    }, stepper.last_feed_types())
+
+  def testRemoveOverrideValue(self):
+    stepper = NodeStepper(self.sess, self.e)
+
+    result = stepper.cont(self.c)
+    self.assertAllClose(6.0, result)
+    self.assertEqual({}, stepper.last_feed_types())
+
+    # The previous cont() step should have generated a cached tensor handle.
+    self.assertEqual(["c:0"], stepper.handle_names())
+
+    # Override c:0.
+    stepper.override_tensor("c:0", 7.0)
+
+    # The overriding should have invalidated the tensor handle.
+    self.assertEqual([], stepper.handle_names())
+    self.assertEqual(["c:0"], stepper.override_names())
+
+    result = stepper.cont(self.e)
+    self.assertAllClose(28.0, result)  # Should reflect the overriding value.
+    self.assertEqual({
+        "c:0": NodeStepper.FEED_TYPE_OVERRIDE
+    }, stepper.last_feed_types())
+
+    # The handle to tensor e:0 should have been cached, even though its
+    # transitive closure contains an override.
+    self.assertIn("e:0", stepper.handle_names())
+
+    # Remove the override.
+    stepper.remove_override("c:0")
+    # c:0 should not be in the overrides anymore.
+    self.assertEqual([], stepper.override_names())
+
+    # Removing the override should have invalidated the tensor handle for c.
+    self.assertNotIn("e:0", stepper.handle_names())
+
+    # Should reflect the non-overriding value.
+    self.assertAllClose(24.0, stepper.cont(self.e))
+
+    # This time, the handle to tensor e:0 should have been cached again, even
+    # thought its transitive closure contains an override.
+    self.assertIn("e:0", stepper.handle_names())
+
+    # Calling cont(self.e) again should have used the tensor handle to e:0.
+    self.assertAllClose(24.0, stepper.cont(self.e))
+    self.assertEqual({
+        "e:0": NodeStepper.FEED_TYPE_HANDLE
+    }, stepper.last_feed_types())
+
+  def testOverrideAndContToSameTensor(self):
+    stepper = NodeStepper(self.sess, self.e)
+
+    result = stepper.cont(self.c)
+    self.assertAllClose(6.0, result)
+    self.assertEqual({}, stepper.last_feed_types())
+    self.assertEqual(["c:0"], stepper.handle_names())
+
+    self.assertAllClose(6.0, stepper.cont(self.c))
+
+    # The last cont() call should use the tensor handle directly.
+    self.assertEqual({
+        "c:0": NodeStepper.FEED_TYPE_HANDLE
+    }, stepper.last_feed_types())
+
+    # Override c:0.
+    stepper.override_tensor("c:0", 7.0)
+
+    # As a result of the override, the tensor handle should have been
+    # invalidated.
+    self.assertEqual([], stepper.handle_names())
+
+    result = stepper.cont(self.c)
+    self.assertAllClose(7.0, result)
+
+    self.assertEqual({
+        "c:0": NodeStepper.FEED_TYPE_OVERRIDE
+    }, stepper.last_feed_types())
+
+  def testFinalizeWithPreviousOverrides(self):
+    stepper = NodeStepper(self.sess, self.e)
+
+    stepper.override_tensor("a/read:0", 20.0)
+    self.assertEqual(["a/read:0"], stepper.override_names())
+
+    # Should reflect the overriding value.
+    self.assertAllClose(24000.0, stepper.cont("e:0"))
+    self.assertEqual({
+        "a/read:0": NodeStepper.FEED_TYPE_OVERRIDE
+    }, stepper.last_feed_types())
+
+    # Finalize call should have ignored the overriding value.
+    self.assertAllClose(24.0, stepper.finalize())
+
+  def testRemoveNonexistentOverrideValue(self):
+    stepper = NodeStepper(self.sess, self.e)
+    self.assertEqual([], stepper.override_names())
+
+    with self.assertRaisesRegexp(
+        ValueError, "No overriding value exists for tensor \"c:0\""):
+      stepper.remove_override("c:0")
+
+  def testAttemptToOverrideInvalidTensor(self):
+    stepper = NodeStepper(self.sess, self.e)
+
+    with self.assertRaisesRegexp(ValueError, "Cannot override tensor \"f:0\""):
+      stepper.override_tensor("f:0", 42.0)
+
+  def testInvalidOverrideArgumentType(self):
+    stepper = NodeStepper(self.sess, self.e)
+
+    with self.assertRaisesRegexp(TypeError, "Expected type str; got type"):
+      stepper.override_tensor(self.a, 42.0)
+
+
+class StepperTestWithPlaceHolders(test_util.TensorFlowTestCase):
+
+  def setUp(self):
+    self.ph0 = tf.placeholder(tf.float32, shape=(2, 2), name="ph0")
+    self.ph1 = tf.placeholder(tf.float32, shape=(2, 1), name="ph1")
+
+    self.x = tf.matmul(self.ph0, self.ph1, name="x")
+    self.y = tf.add(self.x, self.ph1, name="y")
+
+    self.sess = tf.Session()
+
+  def tearDown(self):
+    tf.reset_default_graph()
+
+  def testContWithPlaceholders(self):
+    stepper = NodeStepper(
+        self.sess,
+        self.y,
+        feed_dict={
+            self.ph0: [[1.0, 2.0], [-3.0, 5.0]],
+            self.ph1: [[-1.0], [0.5]]
+        })
+
+    self.assertEqual(["ph0:0", "ph1:0", "x:0", "y:0"],
+                     stepper.sorted_transitive_closure())
+
+    result = stepper.cont(self.x)
+    self.assertAllClose([[0.0], [5.5]], result)
+    self.assertEqual({
+        "ph0:0": NodeStepper.FEED_TYPE_CLIENT,
+        "ph1:0": NodeStepper.FEED_TYPE_CLIENT,
+    }, stepper.last_feed_types())
+
+    self.assertEqual(["x:0"], stepper.handle_names())
+
+    result = stepper.cont(self.y)
+    self.assertAllClose([[-1.0], [6.0]], result)
+    self.assertEqual({
+        "x:0": NodeStepper.FEED_TYPE_HANDLE,
+        "ph1:0": NodeStepper.FEED_TYPE_CLIENT,
+    }, stepper.last_feed_types())
+
+  def testAttemptToContToPlaceholder(self):
+    stepper = NodeStepper(
+        self.sess,
+        self.y,
+        feed_dict={
+            self.ph0: [[1.0, 2.0], [-3.0, 5.0]],
+            self.ph1: [[-1.0], [0.5]]
+        })
+
+    with self.assertRaisesRegexp(ValueError,
+                                 r"Should not call cont\(\) on a Placeholder"):
+      stepper.cont(self.ph0)
+
+
+class StepperBackwardRunTest(test_util.TensorFlowTestCase):
+
+  def setUp(self):
+    """Test setup.
+
+    Structure of the forward graph:
+              f
+             | |
+        -----   -----
+        |           |
+        d           e
+       | |         | |
+    ---   ---------  ---
+    |         |        |
+    a         b        c
+
+    Construct a backward graph using the GradientDescentOptimizer.
+    """
+
+    self.a = tf.Variable(1.0, name="a")
+    self.b = tf.Variable(2.0, name="b")
+    self.c = tf.Variable(4.0, name="c")
+    self.d = tf.mul(self.a, self.b, name="d")
+    self.e = tf.mul(self.b, self.c, name="e")
+    self.f = tf.mul(self.d, self.e, name="f")
+
+    # Gradient descent optimizer that minimizes g.
+    tf.train.GradientDescentOptimizer(0.01).minimize(self.f, name="optim")
+
+    self.sess = tf.Session()
+    self.sess.run(tf.initialize_all_variables())
+
+  def tearDown(self):
+    tf.reset_default_graph()
+
+  def testContToUpdateA(self):
+    stepper = NodeStepper(self.sess, "optim")
+
+    result = stepper.cont("a:0")
+    self.assertAllClose(1.0, result)
+    self.assertEqual({}, stepper.last_feed_types())
+
+    result = stepper.cont("optim/learning_rate:0")
+    self.assertAllClose(0.01, result)
+    self.assertEqual({}, stepper.last_feed_types())
+
+    # Before any cont calls on ApplyGradientDescent, there should be no "dirty"
+    # variables.
+    self.assertEqual(set(), stepper.dirty_variables())
+
+    # First, all the two control inputs to optim.
+    result = stepper.cont("optim/update_a/ApplyGradientDescent")
+
+    # Now variable a should have been marked as dirty due to the update
+    # by optim/update_a/ApplyGradientDescent.
+    self.assertEqual({"a:0"}, stepper.dirty_variables())
+    self.assertIsNone(result)
+    self.assertEqual({
+        "optim/learning_rate:0": NodeStepper.FEED_TYPE_HANDLE
+    }, stepper.last_feed_types())
+
+    # Check that Variable "a" has been updated properly, but "b", "c" and "d"
+    # remain the same.
+    # For backprop on Variable a:
+    #   Because f = a * b * b * c, df / da = b * b * c.
+    #   1.0 - learning_rate * b * b * c
+    #     = 1.0 -  0.01 * 2.0 * 2.0 * 4.0 = 0.84.
+    self.assertAllClose(0.84, self.sess.run(self.a))
+    self.assertAllClose(2.0, self.sess.run(self.b))
+    self.assertAllClose(4.0, self.sess.run(self.c))
+
+  def testContToUpdateB(self):
+    stepper = NodeStepper(self.sess, "optim")
+
+    result = stepper.cont("optim/update_b/ApplyGradientDescent")
+    self.assertIsNone(result)
+    self.assertEqual(set(["b:0"]), stepper.dirty_variables())
+
+    # For backprop on Variable b:
+    #   Because f = a * b * b * c, df / da = 2 * a * b * c.
+    #   2.0 - learning_rate * 2 * a * b * c
+    #     = 2.0 - 0.01 * 2 * 1.0 * 2.0 * 4.0 = 1.84
+    self.assertAllClose(1.0, self.sess.run(self.a))
+    self.assertAllClose(1.84, self.sess.run(self.b))
+    self.assertAllClose(4.0, self.sess.run(self.c))
+
+  def testContAfterUpdateWithoutRestoringVariableValue(self):
+    stepper = NodeStepper(self.sess, "optim")
+
+    # First, update Variable a from 1.0 to 0.84.
+    result = stepper.cont("optim/update_a/ApplyGradientDescent",
+                          restore_variable_values=True)
+    self.assertIsNone(result)
+    self.assertEqual(set(["a:0"]), stepper.dirty_variables())
+    self.assertAllClose(0.84, self.sess.run(self.a))
+    self.assertAllClose(2.0, self.sess.run(self.b))
+    self.assertAllClose(4.0, self.sess.run(self.c))
+
+    # Second, update Variable b without the default restore_variable_values.
+    result = stepper.cont(
+        "optim/update_b/ApplyGradientDescent", restore_variable_values=False)
+    self.assertIsNone(result)
+    # For the backprop on Variable b under the updated value of a:
+    #   2.0 - learning_rate * 2 * a' * b * c
+    #     = 2.0 - 0.01 * 2 * 0.84 * 2.0 * 4.0 = 1.8656
+    self.assertAllClose(0.84, self.sess.run(self.a))
+    self.assertAllClose(1.8656, self.sess.run(self.b))
+    self.assertAllClose(4.0, self.sess.run(self.c))
+
+  def testUpdateTwiceRestoreVariable(self):
+    stepper = NodeStepper(self.sess, "optim")
+
+    result = stepper.cont("optim/update_a/ApplyGradientDescent",
+                          restore_variable_values=True)
+    self.assertIsNone(result)
+    self.assertEqual({"a:0"}, stepper.dirty_variables())
+
+    result = stepper.cont("optim/update_b/ApplyGradientDescent",
+                          restore_variable_values=True)
+    self.assertIsNone(result)
+    # Variables a and c should have been restored and hence no longer dirty.
+    # Variable b should have been marked as dirty.
+    self.assertEqual({"b:0"}, stepper.dirty_variables())
+
+    # The result of the update should be identitcal to as if only update_b is
+    # run.
+    self.assertAllClose(1.0, self.sess.run(self.a))
+    self.assertAllClose(1.84, self.sess.run(self.b))
+    self.assertAllClose(4.0, self.sess.run(self.c))
+
+  def testSelectiveHandleUsageDependingOnTransitiveCleanliness(self):
+    """Test tensor handlers are using only during clean transitive closure.
+
+    "clean" means no Variables have been updated by preceding cont() calls.
+    """
+
+    stepper = NodeStepper(self.sess, "optim")
+
+    # First, call cont() on the two tensors on the intermediate level: e and f.
+    result = stepper.cont("d:0")
+    self.assertAllClose(2.0, result)
+    self.assertEqual({}, stepper.last_feed_types())
+    self.assertEqual(set(), stepper.dirty_variables())
+
+    # The cont call above should have restored Variable "b".
+    result = stepper.cont("e:0")
+    self.assertAllClose(8.0, result)
+    self.assertEqual({}, stepper.last_feed_types())
+    self.assertEqual(set(), stepper.dirty_variables())
+
+    # Now run update_a, so as to let Variable a be diry.
+    result = stepper.cont("optim/update_a/ApplyGradientDescent",
+                          restore_variable_values=True)
+    self.assertIsNone(result)
+    self.assertEqual({"a:0"}, stepper.dirty_variables())
+
+    # Now, run update_b.
+    result = stepper.cont("optim/update_b/ApplyGradientDescent",
+                          restore_variable_values=True)
+    self.assertIsNone(result)
+
+    # The last cont() run should have use the handle of tensor e, but not the
+    # handle of tensor d, because the transitive closure of e is clean, whereas
+    # that of d is dirty due to the update to a in the previous cont() call.
+    self.assertEqual({
+        "e:0": NodeStepper.FEED_TYPE_HANDLE
+    }, stepper.last_feed_types())
+
+    # The result of the update_b should be identical to as if no other
+    # update_* cont() calls have occurred before.
+    self.assertAllClose(1.0, self.sess.run(self.a))
+    self.assertAllClose(1.84, self.sess.run(self.b))
+    self.assertAllClose(4.0, self.sess.run(self.c))
+
+  def testFinalize(self):
+    """Test finalize() to restore variables and run the original fetch."""
+
+    stepper = NodeStepper(self.sess, "optim")
+
+    # Invoke update_b before calling finalize.
+    stepper.cont("optim/update_b/ApplyGradientDescent",
+                 restore_variable_values=True)
+
+    result = stepper.finalize()
+    self.assertIsNone(result)
+
+    # The results of the Variable updates should be the same as if no cont()
+    # call has occurred on update_b.
+    self.assertAllClose(0.84, self.sess.run(self.a))
+    self.assertAllClose(1.84, self.sess.run(self.b))
+    self.assertAllClose(3.96, self.sess.run(self.c))
+
+  def testOverrideThenContToUpdate(self):
+    """Test cont() to update nodes after overriding tensor values."""
+
+    stepper = NodeStepper(self.sess, "optim")
+
+    result = stepper.cont("d:0")
+    self.assertAllClose(2.0, result)
+    self.assertEqual({}, stepper.last_feed_types())
+    self.assertEqual(set(), stepper.dirty_variables())
+    self.assertEqual(["d:0"], stepper.handle_names())
+
+    # Override the value from 1.0 to 10.0.
+    stepper.override_tensor("a/read:0", 10.0)
+
+    self.assertEqual(["a/read:0"], stepper.override_names())
+
+    result = stepper.cont("optim/update_c/ApplyGradientDescent",
+                          restore_variable_values=True)
+    self.assertIsNone(result)
+
+    # The last cont() call should have not used the tensor handle to d:0,
+    # because the transitive closure of d:0 contains an override tensor.
+    self.assertEqual({
+        "a/read:0": NodeStepper.FEED_TYPE_OVERRIDE
+    }, stepper.last_feed_types())
+
+    # The tensor handle to d:0 should have been removed due to the dirty
+    # transitive closure.
+    self.assertEqual([], stepper.handle_names())
+
+    # For this backprop on c, the overriding value of a/read:0 should have been
+    # used:
+    #   4.0 - learning_rate * a * b * b
+    #     = 4.0 - 0.01 * 10.0 * 2.0 * 2.0 = 3.6.
+    self.assertAllClose(3.6, self.sess.run(self.c))
+
+    # Now remove the overriding value of a/read:0.
+    stepper.remove_override("a/read:0")
+    self.assertEqual([], stepper.override_names())
+
+    # Obtain the tensor handle to d:0 again.
+    result = stepper.cont("d:0")
+    self.assertAllClose(2.0, result)
+    self.assertEqual(["d:0"], stepper.handle_names())
+
+    # Then call update_c again, without restoring c.
+    result = stepper.cont(
+        "optim/update_c/ApplyGradientDescent", restore_variable_values=False)
+    self.assertIsNone(result)
+
+    # This time, the d:0 tensor handle should have been used, because its
+    # transitive closure is clean.
+    self.assertEqual({
+        "d:0": NodeStepper.FEED_TYPE_HANDLE
+    }, stepper.last_feed_types())
+
+    # For this backprop on c, the overriding value of a/read:0 should have been
+    # used:
+    #   3.6 - learning_rate * a * b * b
+    #     = 3.6 - 0.01 * 1.0 * 2.0 * 2.0 = 3.56.
+    self.assertAllClose(3.56, self.sess.run(self.c))
+
+
+if __name__ == "__main__":
+  googletest.main()
diff --git a/tensorflow/python/framework/docs.py b/tensorflow/python/framework/docs.py
index 6cc8ab34143..442b8033b42 100644
--- a/tensorflow/python/framework/docs.py
+++ b/tensorflow/python/framework/docs.py
@@ -85,7 +85,7 @@ class Index(Document):
     print("# TensorFlow Python reference documentation", file=f)
     print("", file=f)
     fullname_f = lambda name: self._members[name][0]
-    anchor_f = lambda name: _get_anchor(self._module_to_name, fullname_f(name))
+    anchor_f = lambda name: get_anchor(self._module_to_name, fullname_f(name))
 
     for filename, library in self._filename_to_library_map:
       sorted_names = sorted(library.mentioned, key=lambda x: (str.lower(x), x))
@@ -142,7 +142,7 @@ def collect_members(module_to_name, exclude=()):
   return members
 
 
-def _get_anchor(module_to_name, fullname):
+def get_anchor(module_to_name, fullname):
   """Turn a full member name into an anchor.
 
   Args:
@@ -416,7 +416,7 @@ class Library(Document):
     heading = prefix + " `" + fullname
     if not isinstance(func, property):
       heading += self._generate_signature_for_function(func)
-    heading += "` {#%s}" % _get_anchor(self._module_to_name, fullname)
+    heading += "` {#%s}" % get_anchor(self._module_to_name, fullname)
     print(heading, file=f)
     print("", file=f)
     self._print_formatted_docstring(inspect.getdoc(func), f)
@@ -444,7 +444,7 @@ class Library(Document):
       print("- - -", file=f)
       print("", file=f)
       print("%s `class %s` {#%s}" % (prefix, name,
-                                     _get_anchor(self._module_to_name, name)),
+                                     get_anchor(self._module_to_name, name)),
             file=f)
       print("", file=f)
       self._write_class_markdown_to_file(f, name, member)
diff --git a/tensorflow/python/framework/framework_lib.py b/tensorflow/python/framework/framework_lib.py
index 16b9f347e49..4f44041df73 100644
--- a/tensorflow/python/framework/framework_lib.py
+++ b/tensorflow/python/framework/framework_lib.py
@@ -95,6 +95,7 @@ from tensorflow.python.framework.ops import convert_to_tensor
 from tensorflow.python.framework.ops import convert_to_tensor_or_indexed_slices
 from tensorflow.python.framework.random_seed import get_seed
 from tensorflow.python.framework.random_seed import set_random_seed
+from tensorflow.python.framework.subscribe import subscribe
 from tensorflow.python.framework.importer import import_graph_def
 
 # Needed when you defined a new Op in C++.
diff --git a/tensorflow/python/framework/gen_docs_combined.py b/tensorflow/python/framework/gen_docs_combined.py
index 83d2751f214..ceaf81517ae 100644
--- a/tensorflow/python/framework/gen_docs_combined.py
+++ b/tensorflow/python/framework/gen_docs_combined.py
@@ -67,6 +67,7 @@ def module_names():
       "tf.contrib.ffmpeg",
       "tf.contrib.framework",
       "tf.contrib.graph_editor",
+      "tf.contrib.integrate",
       "tf.contrib.layers",
       "tf.contrib.learn",
       "tf.contrib.learn.monitors",
@@ -220,6 +221,7 @@ def all_libraries(module_to_name, members, documented):
       library("contrib.framework", "Framework (contrib)", tf.contrib.framework),
       library("contrib.graph_editor", "Graph Editor (contrib)",
               tf.contrib.graph_editor),
+      library("contrib.integrate", "Integrate (contrib)", tf.contrib.integrate),
       library("contrib.layers", "Layers (contrib)", tf.contrib.layers),
       library("contrib.learn", "Learn (contrib)", tf.contrib.learn),
       library("contrib.learn.monitors", "Monitors (contrib)",
diff --git a/tensorflow/python/framework/ops.py b/tensorflow/python/framework/ops.py
index 1f8aeefba27..56018f735a4 100644
--- a/tensorflow/python/framework/ops.py
+++ b/tensorflow/python/framework/ops.py
@@ -43,6 +43,7 @@ from tensorflow.python.framework import tensor_shape
 from tensorflow.python.framework import versions
 from tensorflow.python.platform import tf_logging as logging
 from tensorflow.python.util import compat
+from tensorflow.python.util import decorator_utils
 
 
 def _override_helper(clazz_object, operator, func):
@@ -3823,10 +3824,18 @@ class GraphKeys(object):
 
   The following standard keys are defined:
 
-  * `VARIABLES`: the `Variable` objects that comprise a model, and
-    must be saved and restored together. See
-    [`tf.all_variables()`](../../api_docs/python/state_ops.md#all_variables)
+  * `GLOBAL_VARIABLES`: the default collection of `Variable` objects, shared
+    across distributed environment (model variables are subset of these). See
+    [`tf.global_variables()`](../../api_docs/python/state_ops.md#global_variables)
     for more details.
+    Commonly, all `TRAINABLE_VARIABLES` variables will be in `MODEL_VARIABLES`,
+    and all `MODEL_VARIABLES` variables will be in `GLOBAL_VARIABLES`.
+  * `LOCAL_VARIABLES`: the subset of `Variable` objects that are local to each
+    machine. Usually used for temporarily variables, like counters.
+    Note: use `tf.contrib.framework.local_variable` to add to this collection.
+  * `MODEL_VARIABLES`: the subset of `Variable` objects that are used in the
+    model for inference (feed forward). Note: use
+    `tf.contrib.framework.model_variable` to add to this collection.
   * `TRAINABLE_VARIABLES`: the subset of `Variable` objects that will
     be trained by an optimizer. See
     [`tf.trainable_variables()`](../../api_docs/python/state_ops.md#trainable_variables)
@@ -3850,16 +3859,17 @@ class GraphKeys(object):
   * `ACTIVATIONS`: activations of neural network layers
   """
 
-  # Key to collect Variable objects that must be saved and restored
-  # by the model.
-  VARIABLES = "variables"
-  # Key to collect Variable objects that will be trained by the
-  # optimizers.
-  TRAINABLE_VARIABLES = "trainable_variables"
-  # Key to collect local variables that are not saved/restored.
+  # Key to collect Variable objects that are global (shared across machines).
+  # Default collection for all variables, except local ones.
+  GLOBAL_VARIABLES = "variables"
+  # Key to collect local variables that are local to the machine and are not
+  # saved/restored.
   LOCAL_VARIABLES = "local_variables"
   # Key to collect model variables defined by layers.
   MODEL_VARIABLES = "model_variables"
+  # Key to collect Variable objects that will be trained by the
+  # optimizers.
+  TRAINABLE_VARIABLES = "trainable_variables"
   # Key to collect summaries.
   SUMMARIES = "summaries"
   # Key to collect QueueRunners.
@@ -3909,6 +3919,13 @@ class GraphKeys(object):
   COND_CONTEXT = "cond_context"
   WHILE_CONTEXT = "while_context"
 
+  @decorator_utils.classproperty
+  def VARIABLES(cls):  # pylint: disable=no-self-argument
+    logging.warning("VARIABLES collection name is deprecated, "
+                    "please use GLOBAL_VARIABLES instead.\n"
+                    "VARIABLES will be removed after 2017-03-02.")
+    return cls.GLOBAL_VARIABLES
+
 
 def add_to_collection(name, value):
   """Wrapper for `Graph.add_to_collection()` using the default graph.
diff --git a/tensorflow/python/framework/subscribe.py b/tensorflow/python/framework/subscribe.py
new file mode 100644
index 00000000000..53d299a976c
--- /dev/null
+++ b/tensorflow/python/framework/subscribe.py
@@ -0,0 +1,144 @@
+# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+
+"""Subscribe function."""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+from tensorflow.python.framework import ops
+from tensorflow.python.ops import array_ops
+
+
+def _recursive_apply(tensors, apply_fn):
+  """Helper method to recursively apply a function to structure of tensors.
+
+  The structure of the tensors should take the form similar to fetches in
+  `tf.Session` and includes single `Tensor`, `list`, nested `list`, `tuple`,
+  `namedtuple`, or `dict`.
+
+  Args:
+    tensors: Single `Tensor`, `list`, nested `list, `tuple`,
+      `namedtuple`, or `dict`.
+    apply_fn: Function to apply to each `Tensor` and should return a `Tensor`.
+  Returns:
+    Returns the modified tensors with the same structure.
+  Raises:
+    `TypeError` if undefined type in the tensors structure.
+  """
+  tensors_type = type(tensors)
+  if tensors_type is ops.Tensor:
+    return apply_fn(tensors)
+  elif isinstance(tensors, (list, tuple)):
+    tensors = [_recursive_apply(t, apply_fn) for t in tensors]
+    if tensors_type is list:
+      return list(tensors)
+    elif tensors_type is tuple:
+      return tuple(tensors)
+    return tensors_type(*tensors)  # collections.namedtuple
+  elif tensors_type is dict:
+    return dict([(k, _recursive_apply(v, apply_fn))
+                 for k, v in tensors.iteritems()])
+  else:
+    raise TypeError('_recursive_apply argument %r has invalid type %r' %
+                    (tensors, tensors_type))
+
+
+def _control_outputs(op):
+  """Returns the control_input consumers for the supplied `Operation`.
+
+  Args:
+    op: The `Operation` to find consumers of.
+  Yields:
+    A list of ops that have op as a control dependency.
+  """
+  for o in op.graph.get_operations():
+    if op in o.control_inputs:
+      yield o
+
+
+def _subscribe(tensor, side_effects):
+  """Helper method that subscribes a single tensor to a list of side_effects.
+
+  Args:
+    tensor: `tf.Tensor`
+    side_effects: List of side_effect functions see subscribe for details.
+  Returns:
+    The modified replacement to the passed in tensor which triggers the side
+    effects.
+  """
+  update_input = []
+  for consumer_op in list(tensor.consumers()):  # explicit copy
+    update_input.append((consumer_op, list(consumer_op.inputs).index(tensor)))
+
+  update_control_input = list(_control_outputs(tensor.op))
+
+  # Trailing slash on name scope to replace the scope.
+  name_scope = tensor.op.name + '/subscription/'
+  with ops.name_scope(name_scope):
+    outs = []
+    for s in side_effects:
+      outs += s(tensor)
+
+    with ops.control_dependencies(outs):
+      out = array_ops.identity(tensor)
+
+  for consumer_op, index in update_input:
+    consumer_op._update_input(index, out)  # pylint: disable=protected-access
+
+  for consumer_op in update_control_input:
+    consumer_op._control_inputs.remove(tensor.op)  # pylint: disable=protected-access
+    consumer_op._control_inputs.append(out.op)  # pylint: disable=protected-access
+    consumer_op._recompute_node_def()  # pylint: disable=protected-access
+
+  return out
+
+
+def subscribe(tensors, side_effects):
+  """Subscribe to a tensor.
+
+  This method will attach side effect graphs to a given set
+  of tensors. Set of tensors follows from session.run and supports
+  single `Tensor`, `list`, nested `list`, `tuple`, `namedtuple`, or `dict`. It
+  returns the tensors in the same passed in structure, but as clones with
+  side effects applied. The supplied side effect graphs are specified
+  as a constructor function which takes the target tensor and
+  constructs a side effect graph and returns a list of ops that should
+  be control dependencies on fetching the tensor. It will append
+  'subscription' to the name scope of the tensor for every node in
+  the side effect graph. These control dependencies are what trigger
+  the side effects. Subscribe will construct the additions to your
+  graph and return the created identity tensor downstream of the control
+  dependencies. Use these tensors as you would normally in the rest of
+  your tensorflow code.
+
+  Args:
+    tensors: `Tensor` or set of tensors to subscribe to. Set of tensors format
+      follows from `Session.run` and supports single `Tensor`, `list`, nested
+      `list`, `tuple`, `namedtuple`, or `dict`.
+    side_effects: Function(s) that takes a `Tensor`, construct a subgraph, and
+      return a nonempty list of control dependencies. This can be a single
+      function or list of functions.
+  Returns:
+    Subscribed tensors, which are identity copies of the passed in tensors
+      in the same passed in structure, but the graph has been modified
+      such that these are downstream of the control dependencies for
+      the side effect graphs. Use these functionally equivelant tensors
+      instead of the passed in tensors for further construction or running.
+  """
+  if not hasattr(side_effects, '__iter__'):
+    side_effects = [side_effects]
+  return _recursive_apply(tensors, lambda t: _subscribe(t, side_effects))
diff --git a/tensorflow/python/framework/subscribe_test.py b/tensorflow/python/framework/subscribe_test.py
new file mode 100644
index 00000000000..8371c2cfc43
--- /dev/null
+++ b/tensorflow/python/framework/subscribe_test.py
@@ -0,0 +1,59 @@
+# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+
+"""Tests for tf.subscribe."""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import tensorflow as tf
+
+from tensorflow.python.framework import subscribe
+from tensorflow.python.framework import test_util
+from tensorflow.python.platform import googletest
+
+
+class SubscribeTest(test_util.TensorFlowTestCase):
+
+  def testSideEffect(self):
+    a = tf.constant(1)
+    b = tf.constant(1)
+    c = tf.add(a, b)
+    with tf.control_dependencies([c]):
+      d = tf.constant(42)
+    n = tf.neg(c)
+
+    shared = []
+
+    def sub(t):
+      shared.append(t)
+      return t
+
+    c = subscribe.subscribe(c, lambda t: tf.py_func(sub, [t], [t.dtype]))
+
+    with self.test_session() as sess:
+      c_out = sess.run([c])
+      n_out = sess.run([n])
+      d_out = sess.run([d])
+
+    self.assertEquals(n_out, [-2])
+    self.assertEquals(c_out, [2])
+    self.assertEquals(d_out, [42])
+    self.assertEquals(shared, [2, 2, 2])
+
+
+if __name__ == '__main__':
+  googletest.main()
diff --git a/tensorflow/python/kernel_tests/BUILD b/tensorflow/python/kernel_tests/BUILD
index fe74b3426c5..0100e6b3268 100644
--- a/tensorflow/python/kernel_tests/BUILD
+++ b/tensorflow/python/kernel_tests/BUILD
@@ -220,7 +220,7 @@ cuda_py_test(
     additional_deps = ["//tensorflow:tensorflow_py"],
 )
 
-tf_py_test(
+cuda_py_test(
     name = "parameterized_truncated_normal_op_test",
     size = "small",
     srcs = ["parameterized_truncated_normal_op_test.py"],
@@ -272,6 +272,13 @@ tf_py_test(
     additional_deps = ["//tensorflow:tensorflow_py"],
 )
 
+tf_py_test(
+    name = "scatter_nd_ops_test",
+    size = "medium",
+    srcs = ["scatter_nd_ops_test.py"],
+    additional_deps = ["//tensorflow:tensorflow_py"],
+)
+
 tf_py_test(
     name = "segment_reduction_ops_test",
     size = "small",
diff --git a/tensorflow/python/kernel_tests/gather_nd_op_test.py b/tensorflow/python/kernel_tests/gather_nd_op_test.py
index f3fd47381a9..13b3bec3c0f 100644
--- a/tensorflow/python/kernel_tests/gather_nd_op_test.py
+++ b/tensorflow/python/kernel_tests/gather_nd_op_test.py
@@ -53,20 +53,20 @@ class GatherNdTest(tf.test.TestCase):
       gather_nd_ok_t = tf.gather_nd(params, indices_empty)
       gather_nd_ok_val = gather_nd_ok_t.eval()
       self.assertEqual([0], gather_nd_ok_t.get_shape())
-      self.assertAllEqual(np.empty((0,), dtype=np.float32), gather_nd_ok_val)
+      self.assertAllClose(np.empty((0,), dtype=np.float32), gather_nd_ok_val)
 
       indices_empty = np.empty((0, 1), dtype=np.int32)
       gather_nd_ok_t = tf.gather_nd(params, indices_empty)
       gather_nd_ok_val = gather_nd_ok_t.eval()
       self.assertEqual([0, 3], gather_nd_ok_t.get_shape())
-      self.assertAllEqual(np.empty((0, 3), dtype=np.float32), gather_nd_ok_val)
+      self.assertAllClose(np.empty((0, 3), dtype=np.float32), gather_nd_ok_val)
 
       params_empty = np.empty((0, 3), dtype=np.float32)
       indices_empty = np.empty((0, 2), dtype=np.int32)
       gather_nd_ok_t = tf.gather_nd(params_empty, indices_empty)
       gather_nd_ok_val = gather_nd_ok_t.eval()
       self.assertEqual([0], gather_nd_ok_t.get_shape())
-      self.assertAllEqual(np.empty((0,), dtype=np.float32), gather_nd_ok_val)
+      self.assertAllClose(np.empty((0,), dtype=np.float32), gather_nd_ok_val)
 
       params_empty = np.empty((0, 3), dtype=np.float32)
       indices_nonempty = np.zeros((1, 2), dtype=np.int32)
@@ -74,7 +74,7 @@ class GatherNdTest(tf.test.TestCase):
       with self.assertRaisesOpError(
           r"Requested more than 0 entries, but params is empty."):
         gather_nd_break_t.eval()
-      self.assertAllEqual(np.empty((0,), dtype=np.float32), gather_nd_ok_val)
+      self.assertAllClose(np.empty((0,), dtype=np.float32), gather_nd_ok_val)
 
   def testIndexScalar(self):
     with self.test_session(use_gpu=self.use_gpu):
@@ -184,11 +184,11 @@ class GatherNdTest(tf.test.TestCase):
     indices = tf.placeholder(tf.int32)
     gather_nd_t = tf.gather_nd(params, indices)
     shape = gather_nd_t.get_shape()
-    self.assertEqual(shape.ndims, None)
-    self.assertEqual(shape[0].value, None)
+    self.assertEqual(None, shape.ndims)
+    self.assertEqual(None, shape[0].value)
 
   def testBadIndices(self):
-    with self.test_session(use_gpu=False):
+    with self.test_session():
       params = [0, 1, 2]
       indices = [[[0], [7]]]  # Make this one higher rank
       gather_nd = tf.gather_nd(params, indices)
@@ -198,7 +198,7 @@ class GatherNdTest(tf.test.TestCase):
         gather_nd.eval()
 
   def testBadIndicesWithSlices(self):
-    with self.test_session(use_gpu=False):
+    with self.test_session():
       params = [[0, 1, 2]]
       indices = [[[0], [0], [1]]]  # Make this one higher rank
       gather_nd = tf.gather_nd(params, indices)
@@ -207,6 +207,62 @@ class GatherNdTest(tf.test.TestCase):
           r"\(shape: \[1,3\]\)"):
         gather_nd.eval()
 
+  def testGradientsRank2Elements(self):
+    indices = tf.constant([[0, 0], [1, 1]], dtype=tf.int32)
+    inputs = tf.constant([[1, 2], [3, 4]], dtype=tf.float64)
+    outputs = tf.gather_nd(inputs, indices)
+
+    grad_vals = tf.constant([1, 2], dtype=tf.float64)
+    grads = tf.gradients([outputs], [inputs], [grad_vals])[0]
+    expected_grads = np.array([[1, 0], [0, 2]], dtype=np.float64)
+    with self.test_session():
+      assert np.array_equal(expected_grads, grads.eval())
+
+  def testGradientsRank2Slices(self):
+    indices = tf.constant([[1], [0]], dtype=tf.int32)
+    inputs = tf.constant([[1, 2], [3, 4]], dtype=tf.float64)
+    outputs = tf.gather_nd(inputs, indices)
+
+    grad_vals = tf.constant([[1, 2], [3, 4]], dtype=tf.float64)
+    grads = tf.gradients([outputs], [inputs], [grad_vals])[0]
+    expected_grads = np.array([[3, 4], [1, 2]], dtype=np.float64)
+    with self.test_session():
+      self.assertAllEqual(expected_grads, grads.eval())
+
+  def testGradientsRank3Elements(self):
+    indices = tf.constant([[[0, 1], [1, 0]], [[0, 0], [1, 1]]], dtype=tf.int32)
+    inputs = tf.constant([[[1, 3], [5, 7]], [[2, 4], [6, 8]]], dtype=tf.float64)
+    outputs = tf.gather_nd(inputs, indices)
+
+    grad_vals = tf.constant(
+        [[[1, 2], [3, 4]], [[5, 6], [7, 8]]], dtype=tf.float64)
+    grads = tf.gradients([outputs], [inputs], [grad_vals])[0]
+    expected_grads = np.array(
+        [[[5, 6], [1, 2]], [[3, 4], [7, 8]]], dtype=np.float64)
+    with self.test_session():
+      self.assertAllEqual(expected_grads, grads.eval())
+
+  def testGradientsRank2SlicesWithEmptySpace(self):
+    indices = tf.constant([[2], [0], [5]], dtype=tf.int32)
+    inputs = tf.constant(
+        [[1, 2, 3, 4, 5, 6, 7, 8, 9], [1, 2, 3, 4, 5, 6, 7, 8, 9],
+         [1, 2, 3, 4, 5, 6, 7, 8, 9], [1, 2, 3, 4, 5, 6, 7, 8, 9],
+         [1, 2, 3, 4, 5, 6, 7, 8, 9], [1, 2, 3, 4, 5, 6, 7, 8, 9]],
+        dtype=tf.float64)
+    outputs = tf.gather_nd(inputs, indices)
+    grad_vals = tf.constant(
+        [[1, 1, 1, 1, 1, 1, 1, 1, 1], [2, 2, 2, 2, 2, 2, 2, 2, 2],
+         [3, 3, 3, 3, 3, 3, 3, 3, 3]],
+        dtype=tf.float64)
+    grads = tf.gradients([outputs], [inputs], [grad_vals])[0]
+    expected_grads = np.array(
+        [[2, 2, 2, 2, 2, 2, 2, 2, 2], [0, 0, 0, 0, 0, 0, 0, 0, 0],
+         [1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0],
+         [0, 0, 0, 0, 0, 0, 0, 0, 0], [3, 3, 3, 3, 3, 3, 3, 3, 3]],
+        dtype=np.float64)
+    with self.test_session():
+      self.assertAllEqual(expected_grads, grads.eval())
+
 
 class GatherNdGpuTest(GatherNdTest):
   use_gpu = True
diff --git a/tensorflow/python/kernel_tests/linalg_grad_test.py b/tensorflow/python/kernel_tests/linalg_grad_test.py
index 062b714a103..a61e8ed8c2f 100644
--- a/tensorflow/python/kernel_tests/linalg_grad_test.py
+++ b/tensorflow/python/kernel_tests/linalg_grad_test.py
@@ -163,8 +163,19 @@ if __name__ == '__main__':
                 _GetMatrixUnaryFunctorGradientTest(tf.matrix_inverse,
                                                    dtype, shape))
         setattr(MatrixUnaryFunctorGradientTest,
-                'testMatrixUnaryFunctorGradient_' + name,
-                _GetMatrixUnaryFunctorGradientTest(tf.matrix_determinant,
-                                                   dtype, shape))
+                'testMatrixDeterminantGradient_' + name,
+                _GetMatrixUnaryFunctorGradientTest(tf.matrix_determinant, dtype,
+                                                   shape))
+
+  # Tests for gradients of matrix_solve_ls
+  for dtype in np.float32, np.float64:
+    for rows in 2, 5, 10:
+      for cols in 2, 5, 10:
+        for l2_regularization in 0.0, 0.001, 1.0:
+          shape = (rows, cols)
+          setattr(MatrixBinaryFunctorGradientTest,
+                  'testMatrixSolveLsGradient_' + name,
+                  _GetMatrixBinaryFunctorGradientTest(tf.matrix_solve_ls, dtype,
+                                                      shape))
 
   tf.test.main()
diff --git a/tensorflow/python/kernel_tests/parameterized_truncated_normal_op_test.py b/tensorflow/python/kernel_tests/parameterized_truncated_normal_op_test.py
index 8d41029c0b5..1c09949598a 100644
--- a/tensorflow/python/kernel_tests/parameterized_truncated_normal_op_test.py
+++ b/tensorflow/python/kernel_tests/parameterized_truncated_normal_op_test.py
@@ -97,10 +97,10 @@ def z_test(real, expected, i, num_samples):
 
 
 class ParameterizedTruncatedNormalTest(tf.test.TestCase):
-  use_gpu = False
+  _use_gpu = False
   z_limit = 6.0
 
-  # Stop at moment 20 to avoid numerical errors in the theoretical moments.
+  # Stop at moment 10 to avoid numerical errors in the theoretical moments.
   max_moment = 10
 
   def validateMoments(self, shape, mean, stddev, minval, maxval, seed=1618):
@@ -109,9 +109,11 @@ class ParameterizedTruncatedNormalTest(tf.test.TestCase):
       # Give up early if we are unable to import it.
       import scipy.stats  # pylint: disable=g-import-not-at-top,unused-variable
       tf.set_random_seed(seed)
-      with self.test_session(use_gpu=self.use_gpu):
-        samples = random_ops.parameterized_truncated_normal(
-            shape, mean, stddev, minval, maxval).eval()
+      with self.test_session(use_gpu=self._use_gpu):
+        samples = random_ops.parameterized_truncated_normal(shape, mean, stddev,
+                                                            minval,
+                                                            maxval).eval()
+        assert (~np.isnan(samples)).all()
       moments = calculate_moments(samples, self.max_moment)
       expected_moments = TruncatedNormalMoments(mean, stddev, minval, maxval)
       num_samples = functools.reduce(lambda x, y: x * y, shape, 1)
@@ -131,9 +133,11 @@ class ParameterizedTruncatedNormalTest(tf.test.TestCase):
     try:
       import scipy.stats  # pylint: disable=g-import-not-at-top
       tf.set_random_seed(seed)
-      with self.test_session(use_gpu=self.use_gpu):
-        samples = random_ops.parameterized_truncated_normal(
-            shape, mean, stddev, minval, maxval).eval()
+      with self.test_session(use_gpu=self._use_gpu):
+        samples = random_ops.parameterized_truncated_normal(shape, mean, stddev,
+                                                            minval,
+                                                            maxval).eval()
+      assert (~np.isnan(samples)).all()
       minval = max(mean - stddev * 10, minval)
       maxval = min(mean + stddev * 10, maxval)
       dist = scipy.stats.norm(loc=mean, scale=stddev)
@@ -173,8 +177,12 @@ class ParameterizedTruncatedNormalTest(tf.test.TestCase):
     self.validateKolmogorovSmirnov([10**5], 0.0, 0.1, 0.05, 0.10)
 
 
+class ParameterizedTruncatedNormalGpuTest(ParameterizedTruncatedNormalTest):
+  _use_gpu = True
+
+
 # Benchmarking code
-def parameterized_vs_naive(shape, num_iters):
+def parameterized_vs_naive(shape, num_iters, use_gpu=False):
   np.random.seed(1618)  # Make it reproducible.
 
   # No CSE/CF.
@@ -183,17 +191,29 @@ def parameterized_vs_naive(shape, num_iters):
       graph_options=tf.GraphOptions(optimizer_options=optimizer_options))
 
   with tf.Session(config=config) as sess:
-    param_op = tf.group(random_ops.parameterized_truncated_normal(shape))
-    naive_op = tf.group(random_ops.truncated_normal(shape))
+    with tf.device("/cpu:0" if not use_gpu else None):
+      param_op = tf.group(random_ops.parameterized_truncated_normal(shape))
+      naive_op = tf.group(random_ops.truncated_normal(shape))
 
+    # Burn-in to avoid session setup costs in the timing.
+    sess.run(param_op)
+    sess.run(param_op)
     param_dt = timeit.timeit(lambda: sess.run(param_op), number=num_iters)
+    sess.run(naive_op)
+    sess.run(naive_op)
     naive_dt = timeit.timeit(lambda: sess.run(naive_op), number=num_iters)
     return param_dt, naive_dt
 
 
 class TruncatedNormalBenchmark(tf.test.Benchmark):
 
-  def benchmarkParameterizedOpVsNaiveOp(self):
+  def benchmarkParameterizedOpVsNaiveOpCpu(self):
+    self._benchmarkParameterizedOpVsNaiveOp(False)
+
+  def benchmarkParameterizedOpVsNaiveOpGpu(self):
+    self._benchmarkParameterizedOpVsNaiveOp(True)
+
+  def _benchmarkParameterizedOpVsNaiveOp(self, use_gpu):
     num_iters = 50
     print(("Composition of new ParameterizedTruncatedNormalOp vs. "
            "naive TruncatedNormalOp [%d iters]") % num_iters)
@@ -201,16 +221,16 @@ class TruncatedNormalBenchmark(tf.test.Benchmark):
 
     for shape in [[10000, 100], [1000, 1000], [1000000], [100, 100, 100],
                   [20, 20, 20, 20]]:
-      p_dt, n_dt = parameterized_vs_naive(shape, num_iters)
+      p_dt, n_dt = parameterized_vs_naive(shape, num_iters, use_gpu)
       print("%s\t%.3f\t%.3f\t%.2f" % (shape, p_dt, n_dt, p_dt / n_dt))
 
       shape_str = "-".join(map(str, shape))
-      self.report_benchmark(name="parameterized_shape" + shape_str,
-                            iters=num_iters,
-                            wall_time=p_dt)
-      self.report_benchmark(name="naive_shape" + shape_str,
-                            iters=num_iters,
-                            wall_time=n_dt)
+      self.report_benchmark(
+          name="parameterized_shape" + shape_str,
+          iters=num_iters,
+          wall_time=p_dt)
+      self.report_benchmark(
+          name="naive_shape" + shape_str, iters=num_iters, wall_time=n_dt)
 
 
 if __name__ == "__main__":
diff --git a/tensorflow/python/kernel_tests/scatter_nd_ops_test.py b/tensorflow/python/kernel_tests/scatter_nd_ops_test.py
new file mode 100644
index 00000000000..7ff5a286c69
--- /dev/null
+++ b/tensorflow/python/kernel_tests/scatter_nd_ops_test.py
@@ -0,0 +1,380 @@
+# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""Tests for tensorflow.ops.tf.scatter_nd."""
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import functools
+
+import numpy as np
+import tensorflow as tf
+
+
+def _AsType(v, vtype):
+  return v.astype(vtype) if isinstance(v, np.ndarray) else vtype(v)
+
+
+def _FlatInnerDims(tensor, ndims=2):
+  shape = list(tensor.shape)
+  return tensor.reshape([functools.reduce(lambda x, y: x * y,
+                                          shape[:-ndims + 1], 1)] +
+                        shape[-ndims + 1:])
+
+
+def _FlatOuterDims(tensor, ndims=2):
+  shape = list(tensor.shape)
+  return tensor.reshape(shape[:ndims - 1] +
+                        [functools.reduce(lambda x, y: x * y,
+                                          shape[ndims - 1:], 1)])
+
+
+def _NumpyScatterNd(ref, indices, updates, op):
+  ixdim = indices.shape[-1]
+  num_updates = indices.size / ixdim
+  total_nd = len(ref.shape)
+  slice_size = 1
+  for i in range(ixdim, total_nd):
+    slice_size *= ref.shape[i]
+  flat_indices = _FlatInnerDims(indices)
+  flat_updates = updates.reshape((num_updates, slice_size))
+  output_flat = _FlatOuterDims(ref, ixdim + 1)
+  for ix_updates, ix_output in enumerate(flat_indices):
+    ix_output = tuple(ix_output)
+    output_flat[ix_output] = op(output_flat[ix_output],
+                                flat_updates[ix_updates])
+  return output_flat.reshape(ref.shape)
+
+
+def _NumpyUpdate(ref, indices, updates):
+  return _NumpyScatterNd(ref, indices, updates, lambda p, u: u)
+
+
+def _NumpyAdd(ref, indices, updates):
+  return _NumpyScatterNd(ref, indices, updates, lambda p, u: p + u)
+
+
+def _NumpySub(ref, indices, updates):
+  return _NumpyScatterNd(ref, indices, updates, lambda p, u: p - u)
+
+
+def _NumpyMul(ref, indices, updates):
+  return _NumpyScatterNd(ref, indices, updates, lambda p, u: p * u)
+
+
+def _NumpyDiv(ref, indices, updates):
+  return _NumpyScatterNd(ref, indices, updates, lambda p, u: p / u)
+
+
+class ScatterTest(tf.test.TestCase):
+
+  def _VariableRankTest(self,
+                        np_scatter,
+                        tf_scatter,
+                        vtype,
+                        itype,
+                        use_gpu,
+                        repeat_indices=False):
+    np.random.seed(8)
+    ref_shapes = [(3, 6), (3, 6), (3, 6, 9), (3, 6, 9), (3, 6, 9), (3, 6, 9)]
+    indices_shapes = [(2,), (2, 2), (2,), (2, 2), (2, 3), (2, 3, 3)]
+    with self.test_session(use_gpu=use_gpu):
+      for ref_shape, indices_shape in zip(ref_shapes, indices_shapes):
+        num_updates = indices_shape[0]
+        ixdim = indices_shape[-1]
+
+        indexable_area_shape = ()
+        for i in range(ixdim):
+          indexable_area_shape += (ref_shape[i],)
+        all_indices = [
+            list(coord)
+            for coord, _ in np.ndenumerate(
+                np.empty(indexable_area_shape, vtype))
+        ]
+        np.random.shuffle(all_indices)
+        indices = np.array(all_indices[:num_updates])
+
+        if num_updates > 1 and repeat_indices:
+          indices = indices[:num_updates // 2]
+          for _ in range(num_updates - num_updates // 2):
+            indices = np.append(
+                indices, [indices[np.random.randint(num_updates // 2)]], axis=0)
+          np.random.shuffle(indices)
+        indices = _AsType(indices[:num_updates], itype)
+
+        updates_shape = (num_updates,)
+        for i in range(ixdim, len(ref_shape)):
+          updates_shape += (ref_shape[i],)
+        updates = _AsType(np.random.randn(*(updates_shape)), vtype)
+        ref = _AsType(np.random.randn(*(ref_shape)), vtype)
+
+        # Scatter via numpy
+        new = ref.copy()
+        np_scatter(new, indices, updates)
+        # Scatter via tensorflow
+        ref_var = tf.Variable(ref)
+        ref_var.initializer.run()
+        tf_scatter(ref_var, indices, updates).eval()
+        # Compare
+        self.assertAllClose(new, ref_var.eval())
+
+  def _VariableRankTests(self, np_scatter, tf_scatter):
+    for vtype in (np.float32, np.float64):
+      for itype in (np.int32, np.int64):
+        for use_gpu in (False, True):
+          self._VariableRankTest(np_scatter, tf_scatter, vtype, itype, use_gpu)
+
+  def testVariableRankUpdate(self):
+    self._VariableRankTests(_NumpyUpdate, tf.scatter_nd_update)
+
+  def testVariableRankAdd(self):
+    self._VariableRankTests(_NumpyAdd, tf.scatter_nd_add)
+
+  def testVariableRankSub(self):
+    self._VariableRankTests(_NumpySub, tf.scatter_nd_sub)
+
+  def testVariableRankMul(self):
+    self._VariableRankTests(_NumpyMul, tf.scatter_nd_mul)
+
+  def testVariableRankDiv(self):
+    self._VariableRankTests(_NumpyDiv, tf.scatter_nd_div)
+
+  def _ScatterRepeatIndicesTest(self, np_scatter, tf_scatter):
+    for vtype in (np.float32, np.float64):
+      for itype in (np.int32, np.int64):
+        for use_gpu in (False, True):
+          self._VariableRankTest(
+              np_scatter,
+              tf_scatter,
+              vtype,
+              itype,
+              use_gpu,
+              repeat_indices=True)
+
+  def testScatterRepeatIndices(self):
+    """This tests scatter_add using indices that repeat."""
+    self._ScatterRepeatIndicesTest(_NumpyAdd, tf.scatter_nd_add)
+    self._ScatterRepeatIndicesTest(_NumpySub, tf.scatter_nd_sub)
+    self._ScatterRepeatIndicesTest(_NumpyMul, tf.scatter_nd_mul)
+    self._ScatterRepeatIndicesTest(_NumpyDiv, tf.scatter_nd_div)
+
+  def testBooleanScatterUpdate(self):
+    with self.test_session(use_gpu=False) as session:
+      var = tf.Variable([True, False])
+      update0 = tf.scatter_nd_update(var, [[1]], [True])
+      update1 = tf.scatter_nd_update(
+          var, tf.constant(
+              [[0]], dtype=tf.int64), [False])
+      var.initializer.run()
+
+      session.run([update0, update1])
+
+      self.assertAllEqual([False, True], var.eval())
+
+  def testScatterOutOfRangeCpu(self):
+    for op in (tf.scatter_nd_add, tf.scatter_nd_sub, tf.scatter_nd_mul,
+               tf.scatter_nd_div, tf.scatter_nd_update):
+      params = np.array([1, 2, 3, 4, 5, 6]).astype(np.float32)
+      updates = np.array([-3, -4, -5]).astype(np.float32)
+      with self.test_session(use_gpu=False):
+        ref = tf.Variable(params)
+        ref.initializer.run()
+
+        # Indices all in range, no problem.
+        indices = np.array([[2], [0], [5]])
+        op(ref, indices, updates).eval()
+
+        # Test some out of range errors.
+        indices = np.array([[-1], [0], [5]])
+        with self.assertRaisesOpError(
+            r"Invalid indices: \[0,0\] = \[-1\] is not in \[0, 6\)"):
+          op(ref, indices, updates).eval()
+
+        indices = np.array([[2], [0], [6]])
+        with self.assertRaisesOpError(
+            r"Invalid indices: \[2,0\] = \[6\] is not in \[0, 6\)"):
+          op(ref, indices, updates).eval()
+
+  def testRank3ValidShape(self):
+    indices = tf.zeros([2, 2, 2], tf.int32)
+    updates = tf.zeros([2, 2, 2], tf.int32)
+    shape = np.array([2, 2, 2])
+    self.assertAllEqual(
+        tf.scatter_nd(indices, updates, shape).get_shape().as_list(), shape)
+
+    ref = tf.Variable(tf.zeros(shape, tf.int32))
+    self.assertAllEqual(
+        tf.scatter_nd_update(ref, indices, updates).get_shape().as_list(),
+        shape)
+
+  def testUndefinedIndicesShape(self):
+    indices = tf.placeholder(tf.int32, shape=None)
+    updates = tf.placeholder(tf.int32, shape=[2, 2, 2])
+    shape = tf.constant([2, 2, 2], tf.int32)
+    tf.scatter_nd(indices, updates, shape)
+
+  def testUndefinedUpdatesShape(self):
+    indices = tf.placeholder(tf.int32, shape=[2, 2, 2])
+    updates = tf.placeholder(tf.int32, shape=None)
+    shape = tf.constant([2, 2, 2], tf.int32)
+    tf.scatter_nd(indices, updates, shape)
+
+  def testUndefinedOutputShape(self):
+    indices = tf.placeholder(tf.int32, shape=[2, 2, 2])
+    updates = tf.placeholder(tf.int32, shape=[2, 2, 2])
+    shape = tf.placeholder(tf.int32, shape=[None])
+    tf.scatter_nd(indices, updates, shape)
+
+  def testEmptyoutputShape1(self):
+    indices = tf.zeros([2, 2, 2], tf.int32)
+    updates = tf.zeros([2, 2, 2], tf.int32)
+    shape = tf.constant([0, 3, 2], tf.int32)
+
+    with self.assertRaisesWithPredicateMatch(
+        ValueError, "Indices and updates specified for empty output shape"):
+      tf.scatter_nd(indices, updates, shape)
+
+  def testEmptyoutputShape2(self):
+    indices = tf.placeholder(tf.int32, shape=None)
+    updates = tf.placeholder(tf.int32, shape=None)
+    shape = tf.constant([0, 3, 2], tf.int32)
+
+    with self.test_session():
+      tf.scatter_nd(indices, updates, shape).eval(feed_dict={
+          indices: np.zeros(
+              [2, 2, 2], dtype=np.int32),
+          updates: np.zeros(
+              [2, 2, 2], dtype=np.int32)
+      })
+
+  def testEmptyoutputShape3(self):
+    indices = tf.zeros([0], tf.int32)
+    updates = tf.zeros([0], tf.int32)
+    shape = tf.constant([0], tf.int32)
+    scatter = tf.scatter_nd(indices, updates, shape)
+
+    with self.test_session():
+      self.assertEqual(scatter.eval().size, 0)
+
+  def testRank3InvalidShape1(self):
+    indices = tf.zeros([3, 2, 2], tf.int32)
+    updates = tf.zeros([2, 2, 2], tf.int32)
+    shape = np.array([2, 2, 2])
+    with self.assertRaisesWithPredicateMatch(
+        ValueError, "The outer \\d+ dimensions of indices\\.shape="):
+      tf.scatter_nd(indices, updates, shape)
+
+    ref = tf.Variable(tf.zeros(shape, tf.int32))
+    with self.assertRaisesWithPredicateMatch(
+        ValueError, "The outer \\d+ dimensions of indices\\.shape="):
+      tf.scatter_nd_update(ref, indices, updates)
+
+  def testRank3InvalidShape2(self):
+    indices = tf.zeros([2, 2, 1], tf.int32)
+    updates = tf.zeros([2, 2], tf.int32)
+    shape = np.array([2, 2, 2])
+    with self.assertRaisesWithPredicateMatch(
+        ValueError, "The inner \\d+ dimensions of output\\.shape="):
+      tf.scatter_nd(indices, updates, shape)
+
+    ref = tf.Variable(tf.zeros(shape, tf.int32))
+    with self.assertRaisesWithPredicateMatch(
+        ValueError, "The inner \\d+ dimensions of ref\\.shape="):
+      tf.scatter_nd_update(ref, indices, updates)
+
+  def testGradientsRank2ElementUpdate(self):
+    indices = tf.constant([[0, 0], [1, 1]], dtype=tf.int32)
+    updates = tf.constant([1, 4], dtype=tf.float64)
+    shape = tf.constant([2, 2], dtype=tf.int32)
+    outputs = tf.scatter_nd(indices, updates, shape)
+
+    grad_vals = tf.constant([[1, 2], [3, 4]], dtype=tf.float64)
+    grads = tf.gradients([outputs], [updates], [grad_vals])[0]
+    expected_grads = np.array([1, 4], dtype=np.float64)
+    with self.test_session():
+      self.assertAllEqual(expected_grads, grads.eval())
+
+  def testGradientsRank2SliceUpdate(self):
+    indices = tf.constant([[1], [0]], dtype=tf.int32)
+    updates = tf.constant([[3, 4], [1, 2]], dtype=tf.float64)
+    shape = tf.constant([2, 2], dtype=tf.int32)
+    outputs = tf.scatter_nd(indices, updates, shape)
+
+    grad_vals = tf.constant([[3, 4], [1, 2]], dtype=tf.float64)
+    grads = tf.gradients([outputs], [updates], [grad_vals])[0]
+    expected_grads = np.array([[1, 2], [3, 4]], dtype=np.float64)
+    with self.test_session():
+      self.assertAllEqual(expected_grads, grads.eval())
+
+  def testGradientsRank3SliceUpdate(self):
+    indices = tf.constant([[[0, 1], [1, 0]], [[0, 0], [1, 1]]], dtype=tf.int32)
+    updates = tf.constant(
+        [[[5, 7], [2, 4]], [[1, 3], [6, 8]]], dtype=tf.float64)
+    shape = tf.constant([2, 2, 2], dtype=tf.int32)
+    outputs = tf.scatter_nd(indices, updates, shape)
+
+    grad_vals = tf.constant(
+        [[[1, 2], [3, 4]], [[5, 6], [7, 8]]], dtype=tf.float64)
+    grads = tf.gradients([outputs], [updates], [grad_vals])[0]
+    expected_grads = np.array(
+        [[[3, 4], [5, 6]], [[1, 2], [7, 8]]], dtype=np.float64)
+    with self.test_session():
+      self.assertAllEqual(expected_grads, grads.eval())
+
+  def testConcurrentUpdates(self):
+    num_updates = 10000
+    update_values = np.random.rand(num_updates)
+    ref = tf.Variable(np.zeros([2, 2]), dtype=tf.float64)
+    indices = tf.constant([[0, 1]] * num_updates, dtype=tf.int32)
+    updates = tf.constant(update_values, dtype=tf.float64)
+
+    exepected_result = np.zeros([2, 2], dtype=np.float64)
+    exepected_result[0, 1] = np.sum(update_values)
+
+    scatter = tf.scatter_nd_add(ref, indices, updates)
+    init = tf.initialize_all_variables()
+
+    with tf.Session() as sess:
+      sess.run(init)
+      result = sess.run(scatter)
+      assert np.allclose(result, exepected_result)
+
+  # TODO(fpmc): Re-enable this test when gpu_pip test actually runs on a GPU.
+  def _disabledTestScatterOutOfRangeGpu(self):
+    if not tf.test.IsBuiltWithCuda():
+      return
+    for op in (tf.scatter_nd_add, tf.scatter_nd_sub, tf.scatter_nd_mul,
+               tf.scatter_nd_div, tf.scatter_nd_update):
+      params = np.array([1, 2, 3, 4, 5, 6]).astype(np.float32)
+      updates = np.array([-3, -4, -5]).astype(np.float32)
+      # With GPU, the code ignores indices that are out of range.
+      # We don't test the implementation; just test there's no failures.
+      with self.test_session(force_gpu=True):
+        ref = tf.Variable(params)
+        ref.initializer.run()
+
+        # Indices all in range, no problem.
+        indices = np.array([2, 0, 5])
+        op(ref, indices, updates).eval()
+
+        # Indicies out of range should not fail.
+        indices = np.array([-1, 0, 5])
+        op(ref, indices, updates).eval()
+        indices = np.array([2, 0, 6])
+        op(ref, indices, updates).eval()
+
+
+if __name__ == "__main__":
+  tf.test.main()
diff --git a/tensorflow/python/kernel_tests/variables_test.py b/tensorflow/python/kernel_tests/variables_test.py
index a3cd3240f25..c715e5630d0 100644
--- a/tensorflow/python/kernel_tests/variables_test.py
+++ b/tensorflow/python/kernel_tests/variables_test.py
@@ -49,7 +49,7 @@ class VariablesTestCase(tf.test.TestCase):
       with self.assertRaisesOpError("Attempting to use uninitialized value"):
         var1.eval()
 
-      tf.initialize_all_variables().run()
+      tf.global_variables_initializer().run()
 
       self.assertAllClose(0.0, var0.eval())
       self.assertAllClose(1.1, var1.eval())
@@ -75,7 +75,7 @@ class VariablesTestCase(tf.test.TestCase):
       self.assertEqual([3, 6], depdep.get_shape())
       self.assertEqual([3, 6], depdep.get_shape())
 
-      tf.initialize_all_variables().run()
+      tf.global_variables_initializer().run()
 
       self.assertAllClose(rnd.eval(), dep.eval())
       self.assertAllClose(rnd.eval() + dep.eval() + 2.0,
@@ -95,7 +95,7 @@ class VariablesTestCase(tf.test.TestCase):
       plus_one = var.assign_add(1.0)
       minus_one = var.assign_sub(2.0)
       four = var.assign(4.0)
-      tf.initialize_all_variables().run()
+      tf.global_variables_initializer().run()
       self.assertAllClose(0.0, var.eval())
 
       self.assertAllClose(1.0, plus_one.eval())
@@ -113,7 +113,7 @@ class VariablesTestCase(tf.test.TestCase):
       var = tf.Variable(zero)
       count_up_to = var.count_up_to(3)
 
-      tf.initialize_all_variables().run()
+      tf.global_variables_initializer().run()
       self.assertEqual(0, var.eval())
 
       self.assertEqual(0, count_up_to.eval())
@@ -193,7 +193,7 @@ class VariablesTestCase(tf.test.TestCase):
     with self.test_session():
       var_x = tf.Variable(2.0)
       var_y = tf.Variable(3.0)
-      tf.initialize_all_variables().run()
+      tf.global_variables_initializer().run()
       self.assertAllClose(2.0, var_x.eval())
       self.assertAllClose(3.0, var_y.eval())
       self.assertAllClose(5.0, tf.add(var_x, var_y).eval())
@@ -204,7 +204,7 @@ class VariablesTestCase(tf.test.TestCase):
       zero_size_const = tf.ones([2, 0])
       variable_mul = tf.matmul(zero_size_const, zero_size_var)
       const_mul = tf.matmul(zero_size_const, zero_size_const, transpose_b=True)
-      tf.initialize_all_variables().run()
+      tf.global_variables_initializer().run()
       variable_output = variable_mul.eval()
       self.assertAllClose(const_mul.eval(), variable_output)
       self.assertAllClose([[0., 0.], [0., 0.]], variable_output)
@@ -230,7 +230,7 @@ class VariablesTestCase(tf.test.TestCase):
           2.0, trainable=True,
           collections=[tf.GraphKeys.TRAINABLE_VARIABLES,
                        tf.GraphKeys.VARIABLES])
-      self.assertEqual([var_x, var_y, var_z, var_t], tf.all_variables())
+      self.assertEqual([var_x, var_y, var_z, var_t], tf.global_variables())
       self.assertEqual([var_x, var_z, var_t], tf.trainable_variables())
 
   def testOperators(self):
@@ -269,7 +269,7 @@ class VariablesTestCase(tf.test.TestCase):
       var_t = tf.Variable(rnd)
       slice_v = var_t[2, 0:0]
 
-      tf.initialize_all_variables().run()
+      tf.global_variables_initializer().run()
       self.assertAllClose([2.0], add.eval())
       self.assertAllClose([3.0], radd.eval())
       self.assertAllClose([1.0], sub.eval())
@@ -302,7 +302,7 @@ class VariablesTestCase(tf.test.TestCase):
   def testSession(self):
     with self.test_session() as sess:
       var = tf.Variable([1, 12])
-      tf.initialize_all_variables().run()
+      tf.global_variables_initializer().run()
       self.assertAllClose([1, 12], sess.run(var))
 
   def testDevicePlacement(self):
@@ -310,7 +310,7 @@ class VariablesTestCase(tf.test.TestCase):
       with tf.device("/cpu:0"):
         var = tf.Variable([1, 12])
       init_value = var.initialized_value()
-      init_op = tf.initialize_all_variables()
+      init_op = tf.global_variables_initializer()
       self.assertEqual(var.op.device, init_value.device)
       self.assertEqual(var.op.device, init_op.device)
       sess.run(init_op)
@@ -348,7 +348,7 @@ class VariablesTestCase(tf.test.TestCase):
 
       with self.assertRaises(tf.errors.FailedPreconditionError):
         v2.eval()
-      tf.initialize_all_variables().run()
+      tf.global_variables_initializer().run()
       self.assertAllClose(np.negative(value), v2.eval())
 
   def testInitializerFunctionDevicePlacement(self):
@@ -385,7 +385,7 @@ class IsInitializedTest(tf.test.TestCase):
       _ = v, w
       uninited = tf.report_uninitialized_variables()
       self.assertAllEqual(np.array([b"v", b"w"]), sess.run(uninited))
-      tf.initialize_all_variables().run()
+      tf.global_variables_initializer().run()
       self.assertEqual(0, sess.run(uninited).size)
 
   def testVariableList(self):
@@ -411,7 +411,7 @@ class IsInitializedTest(tf.test.TestCase):
       a = tf.Variable(tf.zeros([0, 2]))
       b = tf.Variable(tf.ones([2, 2]))
       objective = tf.reduce_sum(b + tf.matmul(a, a, transpose_a=True))
-      tf.initialize_all_variables().run()
+      tf.global_variables_initializer().run()
       do_opt = tf.train.GradientDescentOptimizer(0.1).minimize(objective)
       sess.run([do_opt])
       self.assertAllClose([[0.9, 0.9], [0.9, 0.9]], b.eval())
@@ -431,7 +431,7 @@ class ObsoleteIsInitializedTest(tf.test.TestCase):
       inited = tf.assert_variables_initialized()
       with self.assertRaisesOpError("Attempting to use uninitialized value"):
         sess.run(inited)
-      tf.initialize_all_variables().run()
+      tf.global_variables_initializer().run()
       sess.run(inited)
 
   def testVariableList(self):
diff --git a/tensorflow/python/ops/array_grad.py b/tensorflow/python/ops/array_grad.py
index d6a093cf7b8..d96002169ab 100644
--- a/tensorflow/python/ops/array_grad.py
+++ b/tensorflow/python/ops/array_grad.py
@@ -302,8 +302,12 @@ def _GatherGrad(op, grad):
 
 
 @ops.RegisterGradient("GatherNd")
-def _GatherNdGrad(unused_op, unused_grad):
-  raise NotImplementedError("Gradient for gather_nd is not implemented.")
+def _GatherNdGrad(op, grad):
+  ref = op.inputs[0]
+  ref_shape = array_ops.shape(ref)
+  indices = op.inputs[1]
+  ref_grad = array_ops.scatter_nd(indices, grad, ref_shape)
+  return [ref_grad, None]
 
 
 @ops.RegisterGradient("CheckNumerics")
@@ -567,3 +571,10 @@ def _ExtractImagePatchesGrad(op, grad):
   grad_out = array_ops.transpose(grad_out, (2, 0, 1, 3))
 
   return [grad_out]
+
+
+@ops.RegisterGradient("ScatterNd")
+def _ScatterNdGrad(op, grad):
+  indices = op.inputs[0]
+  updates_grad = array_ops.gather_nd(grad, indices)
+  return [None, updates_grad, None]
diff --git a/tensorflow/python/ops/array_ops.py b/tensorflow/python/ops/array_ops.py
index 5e92eb9bc7d..9206002f5ef 100644
--- a/tensorflow/python/ops/array_ops.py
+++ b/tensorflow/python/ops/array_ops.py
@@ -71,6 +71,7 @@ or join multiple tensors together.
 @@gather
 @@gather_nd
 @@unique_with_counts
+@@scatter_nd
 @@dynamic_partition
 @@dynamic_stitch
 @@boolean_mask
@@ -81,6 +82,15 @@ or join multiple tensors together.
 @@quantized_concat
 @@setdiff1d
 
+## Fake quantization
+Operations used to help train for better quantization accuracy.
+
+@@fake_quant_with_min_max_args
+@@fake_quant_with_min_max_args_gradient
+@@fake_quant_with_min_max_vars
+@@fake_quant_with_min_max_vars_gradient
+@@fake_quant_with_min_max_vars_per_channel
+@@fake_quant_with_min_max_vars_per_channel_gradient
 """
 from __future__ import absolute_import
 from __future__ import division
@@ -2043,9 +2053,15 @@ def _FakeQuantWithMinMaxArgsGradient(op, grad):
 
 
 ops.RegisterShape("FakeQuantWithMinMaxArgs")(common_shapes.call_cpp_shape_fn)
+ops.RegisterShape("FakeQuantWithMinMaxArgsGradient")(
+    common_shapes.call_cpp_shape_fn)
 ops.RegisterShape("FakeQuantWithMinMaxVars")(common_shapes.call_cpp_shape_fn)
+ops.RegisterShape("FakeQuantWithMinMaxVarsGradient")(
+    common_shapes.call_cpp_shape_fn)
 ops.RegisterShape("FakeQuantWithMinMaxVarsPerChannel")(
     common_shapes.call_cpp_shape_fn)
+ops.RegisterShape("FakeQuantWithMinMaxVarsPerChannelGradient")(
+    common_shapes.call_cpp_shape_fn)
 
 
 @ops.RegisterGradient("FakeQuantWithMinMaxVars")
@@ -2537,3 +2553,49 @@ def _QuantizedReshapeShape(op):
 ops.RegisterShape("QuantizeV2")(None)
 ops.RegisterShape("QuantizedBatchNormWithGlobalNormalization")(None)
 ops.RegisterShape("QuantizedConcat")(None)
+
+
+@ops.RegisterShape("ScatterNd")
+def _ScatterNdShape(op):
+  """Shape function for the ScatterNd op.
+
+  The shape of the ouput is defined as a parameter on the Operation.
+
+  Args:
+    op: A ScatterNd Operation.
+
+  Returns:
+    A single-element list containing the shape of the output.
+
+  Raises:
+    ValueError: if the arguments have invalid rank
+  """
+  indices_shape = op.inputs[0].get_shape()
+  updates_shape = op.inputs[1].get_shape()
+  output_shape = tensor_util.constant_value_as_shape(op.inputs[2])
+
+  if output_shape.num_elements() == 0 and not (
+      indices_shape.num_elements() in
+      (None, 0) and updates_shape.num_elements() in (None, 0)):
+    raise ValueError("Indices and updates specified for empty output shape")
+
+  if indices_shape.ndims is not None and output_shape is not None:
+    outer_dims = len(indices_shape) - 1
+    ixdim = indices_shape[-1].value or 0
+
+    if not indices_shape[:outer_dims].is_compatible_with(
+        updates_shape[:outer_dims]):
+      raise ValueError("The outer %d dimensions of indices.shape=%s must "
+                       "match the outer %d dimensions of updates.shape=%s" % (
+                           outer_dims, indices_shape, outer_dims,
+                           updates_shape))
+    if output_shape.ndims is not None:
+      if not output_shape[ixdim:].is_compatible_with(updates_shape[
+          outer_dims:]):
+        raise ValueError("The inner %d dimensions of output.shape=%s must "
+                         "match the inner %d dimensions of updates.shape=%s" % (
+                             len(output_shape)-ixdim, output_shape,
+                             len(updates_shape)-outer_dims, updates_shape))
+
+    return [output_shape]
+  return [None]
diff --git a/tensorflow/python/ops/candidate_sampling_ops.py b/tensorflow/python/ops/candidate_sampling_ops.py
index a1cd8d803ef..b27167df542 100644
--- a/tensorflow/python/ops/candidate_sampling_ops.py
+++ b/tensorflow/python/ops/candidate_sampling_ops.py
@@ -13,7 +13,7 @@
 # limitations under the License.
 # ==============================================================================
 
-"""Wrappers for primitive Neural Net (NN) Operations."""
+"""Wrappers for candidate sampling operations."""
 
 from __future__ import absolute_import
 from __future__ import division
diff --git a/tensorflow/python/ops/gradients.py b/tensorflow/python/ops/gradients.py
index e2803a2f093..921fd50aa9f 100644
--- a/tensorflow/python/ops/gradients.py
+++ b/tensorflow/python/ops/gradients.py
@@ -18,878 +18,17 @@ from __future__ import absolute_import
 from __future__ import division
 from __future__ import print_function
 
-import collections
-import contextlib
-import warnings
-
-import numpy as np
-import six
-from six.moves import xrange  # pylint: disable=redefined-builtin
-
-from tensorflow.core.framework import attr_value_pb2
-from tensorflow.python.framework import constant_op
-from tensorflow.python.framework import dtypes
-from tensorflow.python.framework import ops
-from tensorflow.python.framework import tensor_shape
-from tensorflow.python.framework import tensor_util
-from tensorflow.python.ops import array_grad  # pylint: disable=unused-import
-from tensorflow.python.ops import array_ops
-from tensorflow.python.ops import control_flow_grad  # pylint: disable=unused-import
-from tensorflow.python.ops import control_flow_ops
-from tensorflow.python.ops import image_grad  # pylint: disable=unused-import
-from tensorflow.python.ops import logging_ops  # pylint: disable=unused-import
-from tensorflow.python.ops import linalg_grad  # pylint: disable=unused-import
-from tensorflow.python.ops import math_grad  # pylint: disable=unused-import
-from tensorflow.python.ops import math_ops
-from tensorflow.python.ops import linalg_ops
-from tensorflow.python.ops import functional_ops
-
-from tensorflow.python.platform import tf_logging as logging
-
-# Warn the user if we convert a sparse representation to dense with at
-# least this number of elements.
-_LARGE_SPARSE_NUM_ELEMENTS = 100000000
-
-
-def _IndexedSlicesToTensor(value, dtype=None, name=None, as_ref=False):
-  """Converts an IndexedSlices object `value` to a Tensor.
-
-  NOTE(mrry): This function is potentially expensive.
-
-  Args:
-    value: An ops.IndexedSlices object.
-    dtype: The dtype of the Tensor to be returned.
-    name: Optional name to use for the returned Tensor.
-    as_ref: True if a ref is requested.
-
-  Returns:
-    A dense Tensor representing the values in the given IndexedSlices.
-
-  Raises:
-    ValueError: If the IndexedSlices does not have the same dtype.
-  """
-  _ = as_ref
-  if dtype and not dtype.is_compatible_with(value.dtype):
-    raise ValueError(
-        "Tensor conversion requested dtype %s for IndexedSlices with dtype %s" %
-        (dtype.name, value.dtype.name))
-  if value.dense_shape is None:
-    raise ValueError(
-        "Tensor conversion requested for IndexedSlices without dense_shape: %s"
-        % str(value))
-  # TODO(mrry): Consider adding static shape information to
-  # IndexedSlices, to avoid using numpy here.
-  dense_shape_value = tensor_util.constant_value(value.dense_shape)
-  if dense_shape_value is not None:
-    num_elements = np.prod(dense_shape_value)
-    if num_elements >= _LARGE_SPARSE_NUM_ELEMENTS:
-      warnings.warn(
-          "Converting sparse IndexedSlices to a dense Tensor with %d elements. "
-          "This may consume a large amount of memory." % num_elements)
-  else:
-    warnings.warn(
-        "Converting sparse IndexedSlices to a dense Tensor of unknown shape. "
-        "This may consume a large amount of memory.")
-  return math_ops.unsorted_segment_sum(
-      value.values, value.indices, value.dense_shape[0], name=name)
-
-
-ops.register_tensor_conversion_function(ops.IndexedSlices,
-                                        _IndexedSlicesToTensor)
-
-
-def _MarkReachedOps(from_ops, reached_ops):
-  """Mark all ops reached from "from_ops".
-
-  Args:
-    from_ops: list of Operations.
-    reached_ops: list of booleans, indexed by operation id.
-  """
-  queue = collections.deque()
-  queue.extend(from_ops)
-  while queue:
-    op = queue.popleft()
-    if not reached_ops[op._id]:
-      reached_ops[op._id] = True
-      for output in op.outputs:
-        queue.extend(output.consumers())
-
-
-def _GatherInputs(to_ops, reached_ops):
-  """List all inputs of to_ops that are in reached_ops.
-
-  Args:
-    to_ops: list of Operations.
-    reached_ops: list of booleans, indexed by operation id.
-
-  Returns:
-    The list of all inputs of to_ops that are in reached_ops.
-    That list includes all elements of to_ops.
-  """
-  inputs = []
-  queue = collections.deque()
-  queue.extend(to_ops)
-  while queue:
-    op = queue.popleft()
-    # We are interested in this op.
-    if reached_ops[op._id]:
-      inputs.append(op)
-      # Clear the boolean so we won't add the inputs again.
-      reached_ops[op._id] = False
-      for inp in op.inputs:
-        queue.append(inp.op)
-  return inputs
-
-
-def _PendingCount(graph, to_ops, from_ops, colocate_gradients_with_ops):
-  """Initialize the pending count for ops between two lists of Operations.
-
-  'pending_count[op._id]' indicates the number of backprop inputs
-  to this operation.
-
-  Args:
-    graph: a Graph.
-    to_ops: list of Operations.
-    from_ops: list of Operations.
-    colocate_gradients_with_ops: Python bool.  See docstring of gradients().
-
-  Returns:
-    A tuple containing: (1) a list of integers indexed by operation id,
-    indicating the number of backprop inputs to this operation, and (2)
-    a ControlFlowState object which is not None if the ops between from_ops
-    and to_ops contain control flow loops.
-  """
-  # Mark reachable ops from from_ops.
-  reached_ops = [False] * (graph._last_id + 1)
-  for op in to_ops:
-    reached_ops[op._id] = True
-  _MarkReachedOps(from_ops, reached_ops)
-
-  # Mark between ops.
-  between_ops = [False] * (graph._last_id + 1)
-  between_op_list = []
-  queue = collections.deque()
-  queue.extend(to_ops)
-  while queue:
-    op = queue.popleft()
-    # We are interested in this op.
-    if reached_ops[op._id]:
-      between_ops[op._id] = True
-      between_op_list.append(op)
-      # Clear the boolean so we won't add the inputs again.
-      reached_ops[op._id] = False
-      for inp in op.inputs:
-        queue.append(inp.op)
-
-  # 'loop_state' is None if there are no while loops.
-  loop_state = control_flow_ops.MaybeCreateControlFlowState(
-      between_op_list, between_ops, colocate_gradients_with_ops)
-
-  # Initialize pending count for between ops.
-  pending_count = [0] * (graph._last_id + 1)
-  for op in between_op_list:
-    for x in op.inputs:
-      if between_ops[x.op._id]:
-        pending_count[x.op._id] += 1
-
-  return pending_count, loop_state
-
-
-def _AsList(x):
-  return x if isinstance(x, (list, tuple)) else [x]
-
-
-def _DefaultGradYs(grad_ys, ys, colocate_gradients_with_ops):
-  """Fill in default values for grad_ys.
-
-  Args:
-    grad_ys: List of gradients, can contain None.
-    ys: List of tensors.
-    colocate_gradients_with_ops: If True, try colocating gradients with
-      the corresponding op.
-
-  Returns:
-    A list of gradients to use, without None.
-
-  Raises:
-    ValueError: If one of the grad_ys is invalid.
-  """
-  if len(grad_ys) != len(ys):
-    raise ValueError("Passed %d grad_ys for %d ys" % (len(grad_ys), len(ys)))
-  grad_ys = ops.convert_n_to_tensor_or_indexed_slices(grad_ys, name="grad_y")
-  for i in xrange(len(grad_ys)):
-    grad_y = grad_ys[i]
-    y = ys[i]
-    if grad_y is None:
-      with _maybe_colocate_with(y.op, colocate_gradients_with_ops):
-        grad_ys[i] = array_ops.fill(
-            array_ops.shape(y), constant_op.constant(
-                1, dtype=y.dtype))
-    else:
-      if grad_y.dtype != y.dtype:
-        raise ValueError("Y and ys_grad must be of the same type, "
-                         "not y: %s, ys_grad: %s " %
-                         (dtypes.as_dtype(y.dtype).name,
-                          dtypes.as_dtype(grad_y.dtype).name))
-  return grad_ys
-
-
-def _IsTrainable(tensor):
-  dtype = dtypes.as_dtype(tensor.dtype)
-  return dtype.base_dtype in (dtypes.float16, dtypes.float32, dtypes.float64,
-                              dtypes.complex64, dtypes.complex128)
-
-
-def _VerifyGeneratedGradients(grads, op):
-  """Verify that gradients are valid in number and type.
-
-  Args:
-    grads: List of generated gradients.
-    op: Operation for which the gradients where generated.
-
-  Raises:
-    ValueError: if the gradients are invalid.
-  """
-  if len(grads) != len(op.inputs):
-    raise ValueError("Num gradients %d generated for op %s do not match num "
-                     "inputs %d" % (len(grads), op.node_def, len(op.inputs)))
-  for i in xrange(len(grads)):
-    grad = grads[i]
-    inp = op.inputs[i]
-    if grad is not None:
-      if not grad.dtype.is_compatible_with(inp.dtype):
-        raise ValueError("Gradient type %s generated for op %s does "
-                         "not match input type %s" %
-                         (dtypes.as_dtype(grad.dtype).name, op.node_def,
-                          dtypes.as_dtype(inp.dtype).name))
-
-
-def _StopOps(from_ops, pending_count):
-  """The set of ops that terminate the gradient computation.
-
-  This computes the frontier of the forward graph *before* which backprop
-  should stop. Operations in the returned set will not be differentiated.
-  This set is defined as the subset of `from_ops` containing ops that have
-  no predecessor in `from_ops`. `pending_count` is the result of
-  `_PendingCount(g, xs, from_ops)`. An 'op' has predecessors in `from_ops`
-  iff pending_count[op._id] > 0.
-
-  Args:
-    from_ops: list of Operations.
-    pending_count: List of integers, indexed by operation id.
-
-  Returns:
-    The set of operations.
-  """
-  stop_ops = set()
-  for op in from_ops:
-    is_stop_op = True
-    for inp in op.inputs:
-      if pending_count[inp.op._id] > 0:
-        is_stop_op = False
-        break
-    if is_stop_op:
-      stop_ops.add(op._id)
-  return stop_ops
-
-
-@contextlib.contextmanager
-def _maybe_colocate_with(op, colocate_gradients_with_ops):
-  """Context to colocate with `op` if `colocate_gradients_with_ops`."""
-  if colocate_gradients_with_ops:
-    with ops.colocate_with(op):
-      yield
-  else:
-    yield
-
-
-def _SymGrad(op, out_grads):
-  """Backprop through a function call node op given its outputs' gradients."""
-  f_in = [x for x in op.inputs] + out_grads
-  f_types = [x.dtype for x in op.inputs]
-  f = attr_value_pb2.NameAttrList()
-  f.name = op.type
-  for k in op.node_def.attr:
-    f.attr[k].CopyFrom(op.node_def.attr[k])
-  # pylint: disable=protected-access
-  in_grads = functional_ops._symbolic_gradient(input=f_in, Tout=f_types, f=f)
-  # pylint: enable=protected-access
-  return in_grads
-
-
-def gradients(ys,
-              xs,
-              grad_ys=None,
-              name="gradients",
-              colocate_gradients_with_ops=False,
-              gate_gradients=False,
-              aggregation_method=None):
-  """Constructs symbolic partial derivatives of sum of `ys` w.r.t. x in `xs`.
-
-  `ys` and `xs` are each a `Tensor` or a list of tensors.  `grad_ys`
-  is a list of `Tensor`, holding the gradients received by the
-  `ys`. The list must be the same length as `ys`.
-
-  `gradients()` adds ops to the graph to output the partial
-  derivatives of `ys` with respect to `xs`.  It returns a list of
-  `Tensor` of length `len(xs)` where each tensor is the `sum(dy/dx)`
-  for y in `ys`.
-
-  `grad_ys` is a list of tensors of the same length as `ys` that holds
-  the initial gradients for each y in `ys`.  When `grad_ys` is None,
-  we fill in a tensor of '1's of the shape of y for each y in `ys`.  A
-  user can provide their own initial `grad_ys` to compute the
-  derivatives using a different initial gradient for each y (e.g., if
-  one wanted to weight the gradient differently for each value in
-  each y).
-
-  Args:
-    ys: A `Tensor` or list of tensors to be differentiated.
-    xs: A `Tensor` or list of tensors to be used for differentiation.
-    grad_ys: Optional. A `Tensor` or list of tensors the same size as
-      `ys` and holding the gradients computed for each y in `ys`.
-    name: Optional name to use for grouping all the gradient ops together.
-      defaults to 'gradients'.
-    colocate_gradients_with_ops: If True, try colocating gradients with
-      the corresponding op.
-    gate_gradients: If True, add a tuple around the gradients returned
-      for an operation.  This avoids some race conditions.
-    aggregation_method: Specifies the method used to combine gradient terms.
-      Accepted values are constants defined in the class `AggregationMethod`.
-
-  Returns:
-    A list of `sum(dy/dx)` for each x in `xs`.
-
-  Raises:
-    LookupError: if one of the operations between `x` and `y` does not
-      have a registered gradient function.
-    ValueError: if the arguments are invalid.
-
-  """
-  ys = _AsList(ys)
-  xs = _AsList(xs)
-  if grad_ys is None:
-    grad_ys = [None] * len(ys)
-  else:
-    grad_ys = _AsList(grad_ys)
-
-  with ops.name_scope(name, "gradients", ys + xs + grad_ys):
-    ys = ops.convert_n_to_tensor_or_indexed_slices(ys, name="y")
-    xs = ops.convert_n_to_tensor_or_indexed_slices(xs, name="x")
-    grad_ys = _DefaultGradYs(grad_ys, ys, colocate_gradients_with_ops)
-
-    # The approach we take here is as follows: Create a list of all ops in the
-    # subgraph between the ys and xs.  Visit these ops in reverse order of ids
-    # to ensure that when we visit an op the gradients w.r.t its outputs have
-    # been collected.  Then aggregate these gradients if needed, call the op's
-    # gradient function, and add the generated gradients to the gradients for
-    # its input.
-
-    # Initialize the pending count for ops in the connected subgraph from ys
-    # to the xs.
-    to_ops = [t.op for t in ys]
-    from_ops = [t.op for t in xs]
-    pending_count, loop_state = _PendingCount(ops.get_default_graph(), to_ops,
-                                              from_ops,
-                                              colocate_gradients_with_ops)
-
-    # Iterate over the collected ops.
-    #
-    # grads: op => list of gradients received on each output endpoint of the
-    # op.  The gradients for each endpoint are initially collected as a list.
-    # When it is time to call the op's gradient function, for each endpoint we
-    # aggregate the list of received gradients into a Add() Operation if there
-    # is more than one.
-    grads = {}
-
-    # Add the initial gradients for the ys.
-    for y, grad_y in zip(ys, grad_ys):
-      _SetGrad(grads, y, grad_y)
-
-    # Initialize queue with to_ops.
-    queue = collections.deque()
-    # Add the ops in 'to_ops' into the queue.
-    to_ops_set = set()
-    for op in to_ops:
-      # 'ready' handles the case where one output gradient relies on
-      # another output's gradient.
-      # pylint: disable=protected-access
-      ready = (pending_count[op._id] == 0)
-      if ready and op._id not in to_ops_set:
-        to_ops_set.add(op._id)
-        queue.append(op)
-      # pylint: enable=protected-access
-
-    if loop_state:
-      loop_exits = loop_state.ProcessUnusedLoopExits(pending_count, to_ops_set)
-      for y in loop_exits:
-        if _IsTrainable(y):
-          _SetGrad(grads, y, loop_state.ZerosLikeForExit(y))
-          queue.append(y.op)
-
-    # The set of 'from_ops'.
-    stop_ops = _StopOps(from_ops, pending_count)
-    while queue:
-      # generate gradient subgraph for op.
-      op = queue.popleft()
-      with _maybe_colocate_with(op, colocate_gradients_with_ops):
-        if loop_state:
-          loop_state.EnterGradWhileContext(op, before=True)
-        out_grads = _AggregatedGrads(grads, op, loop_state, aggregation_method)
-        if loop_state:
-          loop_state.ExitGradWhileContext(op, before=True)
-
-        grad_fn = None
-        # pylint: disable=protected-access
-        is_func_call = ops.get_default_graph()._is_function(op.type)
-        has_out_grads = any(isinstance(g, ops.Tensor) or g for g in out_grads)
-        if has_out_grads and (op._id not in stop_ops):
-          if is_func_call:
-            grad_fn = ops.get_default_graph()._get_function(
-                op.type).python_grad_func
-            # pylint: enable=protected-access
-          else:
-            # A grad_fn must be defined, either as a function or as None
-            # for ops that do not have gradients.
-            try:
-              grad_fn = ops.get_gradient_function(op)
-            except LookupError:
-              raise LookupError(
-                  "No gradient defined for operation '%s' (op type: %s)" %
-                  (op.name, op.type))
-        if loop_state:
-          loop_state.EnterGradWhileContext(op, before=False)
-        if (grad_fn or is_func_call) and has_out_grads:
-          # NOTE: If _AggregatedGrads didn't compute a value for the i'th
-          # output, it means that the cost does not depend on output[i],
-          # therefore dC/doutput[i] is 0.
-          for i, out_grad in enumerate(out_grads):
-            if (not isinstance(out_grad, ops.Tensor) and
-                not out_grad) and _IsTrainable(op.outputs[i]):
-              # Only floating-point outputs get a zero gradient. Gradient
-              # functions should ignore the gradient for other outputs.
-              if loop_state:
-                out_grads[i] = loop_state.ZerosLike(op, i)
-              else:
-                out_grads[i] = control_flow_ops.ZerosLikeOutsideLoop(op, i)
-          with ops.name_scope(op.name + "_grad"):
-            # pylint: disable=protected-access
-            with ops.get_default_graph()._original_op(op):
-              # pylint: enable=protected-access
-              if grad_fn:
-                # If grad_fn was found, do not use SymbolicGradient even for
-                # functions.
-                in_grads = grad_fn(op, *out_grads)
-              else:
-                # For function call ops, we add a 'SymbolicGradient'
-                # node to the graph to compute gradients.
-                in_grads = _SymGrad(op, out_grads)
-              in_grads = _AsList(in_grads)
-              _VerifyGeneratedGradients(in_grads, op)
-              if gate_gradients and len(
-                  [x for x in in_grads if x is not None]) > 1:
-                in_grads = control_flow_ops.tuple(in_grads)
-          _LogOpGradients(op, out_grads, in_grads)
-        else:
-          # If no grad_fn is defined or none of out_grads is available,
-          # just propagates a list of None backwards.
-          in_grads = [None] * len(op.inputs)
-        for t_in, in_grad in zip(op.inputs, in_grads):
-          if in_grad is not None:
-            if isinstance(in_grad, ops.Tensor):
-              in_grad.set_shape(t_in.get_shape())
-            _SetGrad(grads, t_in, in_grad)
-        if loop_state:
-          loop_state.ExitGradWhileContext(op, before=False)
-
-      # Update pending count for the inputs of op and enqueue ready ops.
-      _UpdatePendingAndEnqueueReady(grads, op, queue, pending_count, loop_state)
-
-  if loop_state:
-    loop_state.PostProcessing()
-  return [_GetGrad(grads, x) for x in xs]
-
-
-def _HasAnyNotNoneGrads(grads, op):
-  """Return true iff op has real gradient."""
-  out_grads = _GetGrads(grads, op)
-  for out_grad in out_grads:
-    if isinstance(out_grad, (ops.Tensor, ops.IndexedSlices)):
-      return True
-    if out_grad and isinstance(out_grad, collections.Sequence):
-      if any([g is not None for g in out_grad]):
-        return True
-  return False
-
-
-def _UpdatePendingAndEnqueueReady(grads, op, queue, pending_count, loop_state):
-  """Update pending count for the inputs of op and enqueue ready ops."""
-  for x in op.inputs:
-    # pylint: disable=protected-access
-    pending_count[x.op._id] -= 1
-    ready = (pending_count[x.op._id] == 0)
-    if loop_state and not ready:
-      ready = (pending_count[x.op._id] > 0 and
-               control_flow_ops.IsLoopSwitch(x.op))
-    # pylint: enable=protected-access
-    if ready:
-      if control_flow_ops.IsLoopExit(x.op):
-        # if x is an exit without real gradient, defer processing them.
-        grad_state = loop_state.GetGradState(x.op, before=False)
-        grad_state.deferred_exits.append(x)
-        grad_state.pending_exits_count -= 1
-        if grad_state.pending_exits_count == 0:
-          # We now have all the exits so process them.
-          has_real_grad = False
-          for y in grad_state.deferred_exits:
-            if _HasAnyNotNoneGrads(grads, y.op):
-              has_real_grad = True
-              queue.append(y.op)
-            else:
-              grad_state.unused_exits.append(y)
-          if has_real_grad:
-            # For an unused exit, if it has floating-point outputs, backprop
-            # a zero gradient. Otherwise, just ignore it.
-            for y in grad_state.unused_exits:
-              if _IsTrainable(y):
-                _SetGrad(grads, y, loop_state.ZerosLikeForExit(y))
-              queue.append(y.op)
-          else:
-            # All exits are "unused" so use None as gradient.
-            for y in grad_state.unused_exits:
-              queue.append(y.op)
-      else:
-        queue.append(x.op)
-
-
-def _SetGrad(grads, t, grad):
-  """Sets gradient "grad" in "grads" for tensor "t"."""
-  op = t.op
-  op_grads = grads.get(op)
-  if not op_grads:
-    op_grads = [[] for _ in xrange(len(op.outputs))]
-    grads[op] = op_grads
-  t_grads = op_grads[t.value_index]
-  if isinstance(t_grads, list):
-    t_grads.append(grad)
-  else:
-    assert control_flow_ops.IsLoopSwitch(op)
-    op_grads[t.value_index] = grad
-
-
-def _GetGrad(grads, t):
-  """Gets gradient for tensor "t"."""
-  op = t.op
-  op_grads = grads.get(op)
-  if not op_grads:
-    return None
-  t_grad = op_grads[t.value_index]
-  assert not isinstance(t_grad, list), (
-      "gradients list should have been aggregated by now.")
-  return t_grad
-
-
-def _GetGrads(grads, op):
-  """Gets all gradients for op."""
-  if op in grads:
-    return grads[op]
-  else:
-    return [[] for _ in xrange(len(op.outputs))]
-
-
-def _HandleNestedIndexedSlices(grad):
-  assert isinstance(grad, ops.IndexedSlices)
-  if isinstance(grad.values, ops.Tensor):
-    return grad
-  else:
-    assert isinstance(grad.values, ops.IndexedSlices)
-    g = _HandleNestedIndexedSlices(grad.values)
-    return ops.IndexedSlices(g.values,
-                             array_ops.gather(grad.indices, g.indices),
-                             g.dense_shape)
-
-
-def _AccumulatorShape(inputs):
-  shape = tensor_shape.unknown_shape()
-  for i in inputs:
-    if isinstance(i, ops.Tensor):
-      shape = shape.merge_with(i.get_shape())
-  return shape
-
-
-def _LogOpGradients(op, out_grads, in_grads):
-  """Log the in and out grads of an op."""
-  logging.vlog(1, "Gradient for '" + op.name + "'")
-
-  def _FilterGrad(x):
-    if x is None:
-      return False
-    if isinstance(x, (list, tuple)):
-      return bool(x)
-    else:
-      return True
-
-  logging.vlog(1, "  in  --> %s",
-               ", ".join([x.name for x in out_grads if _FilterGrad(x)]))
-  logging.vlog(1, "  out --> %s",
-               ", ".join([x.name for x in in_grads if _FilterGrad(x)]))
-
-
-def _MultiDeviceAddN(tensor_list):
-  """Adds tensors from potentially multiple devices."""
-  # Basic function structure comes from control_flow_ops.group().
-  # Sort tensors according to their devices.
-  tensors_on_device = collections.defaultdict(lambda: [])
-  for tensor in tensor_list:
-    tensors_on_device[tensor.device].append(tensor)
-
-  # For each device, add the tensors on that device first.
-  # Then gather the partial sums from multiple devices.
-  # TODO(sjhwang): Create hierarchical aggregation tree as pbar's suggestion.
-  # E.g., aggregate per GPU, then per task, and so on.
-  summands = []
-
-  def DeviceKey(dev):
-    return "" if dev is None else dev
-
-  for dev in sorted(six.iterkeys(tensors_on_device), key=DeviceKey):
-    tensors = tensors_on_device[dev]
-    with ops.colocate_with(tensors[0].op, ignore_existing=True):
-      summands.append(math_ops.add_n(tensors))
-
-  return math_ops.add_n(summands)
-
-
-class AggregationMethod(object):
-  """A class listing aggregation methods used to combine gradients.
-
-  Computing partial derivatives can require aggregating gradient
-  contributions. This class lists the various methods that can
-  be used to combine gradients in the graph:
-
-  *  `ADD_N`: All of the gradient terms are summed as part of one
-     operation using the "AddN" op. It has the property that all
-     gradients must be ready before any aggregation is performed.
-  *  `DEFAULT`: The system-chosen default aggregation method.
-  """
-  ADD_N = 0
-  DEFAULT = ADD_N
-  # The following are experimental and may not be supported in future releases.
-  EXPERIMENTAL_TREE = 1
-  EXPERIMENTAL_ACCUMULATE_N = 2
-
-
-def _AggregatedGrads(grads, op, loop_state, aggregation_method=None):
-  """Get the aggregated gradients for op.
-
-  Args:
-    grads: The map of memoized gradients.
-    op: The op to get gradients for.
-    loop_state: An object for maintaining the state of the while loops in the
-                graph. It is of type ControlFlowState. None if the graph
-                contains no while loops.
-    aggregation_method: Specifies the method used to combine gradient terms.
-      Accepted values are constants defined in the class `AggregationMethod`.
-
-  Returns:
-    A list of gradients, one per each output of `op`. If the gradients
-      for a particular output is a list, this function aggregates it
-      before returning.
-
-  Raises:
-    TypeError: if the incoming grads are not Tensors or IndexedSlices.
-    ValueError: if the arguments are invalid.
-
-  """
-  if aggregation_method is None:
-    aggregation_method = AggregationMethod.DEFAULT
-  if aggregation_method not in [
-      AggregationMethod.ADD_N, AggregationMethod.EXPERIMENTAL_TREE,
-      AggregationMethod.EXPERIMENTAL_ACCUMULATE_N
-  ]:
-    raise ValueError("Invalid aggregation_method specified %s." %
-                     aggregation_method)
-  out_grads = _GetGrads(grads, op)
-  for i, out_grad in enumerate(out_grads):
-    if loop_state:
-      if isinstance(out_grad, (ops.Tensor, ops.IndexedSlices)):
-        assert control_flow_ops.IsLoopSwitch(op)
-        continue
-    # Grads have to be Tensors or IndexedSlices
-    if (isinstance(out_grad, collections.Sequence) and not all([
-        isinstance(g, (ops.Tensor, ops.IndexedSlices)) for g in out_grad
-        if g is not None
-    ])):
-      raise TypeError("gradients have to be either all Tensors "
-                      "or all IndexedSlices")
-    # Aggregate multiple gradients, and convert [] to None.
-    if out_grad:
-      if len(out_grad) < 2:
-        used = "nop"
-        out_grads[i] = out_grad[0]
-      elif all([isinstance(g, ops.Tensor) for g in out_grad if g is not None]):
-        tensor_shape = _AccumulatorShape(out_grad)
-        if (aggregation_method == AggregationMethod.EXPERIMENTAL_ACCUMULATE_N
-            and len(out_grad) > 2 and tensor_shape.is_fully_defined()):
-          # The benefit of using AccumulateN is that its inputs can be combined
-          # in any order and this can allow the expression to be evaluated with
-          # a smaller memory footprint.  When used with gpu_allocator_retry,
-          # it is possible to compute a sum of terms which are much larger than
-          # total GPU memory.
-          # AccumulateN can currently only be used if we know the shape for
-          # an accumulator variable.  If this is not known, or if we only have
-          # 2 grads then we fall through to the "tree" case below.
-          used = "accumulate_n"
-          out_grads[i] = math_ops.accumulate_n(out_grad)
-        elif aggregation_method in [
-            AggregationMethod.EXPERIMENTAL_TREE,
-            AggregationMethod.EXPERIMENTAL_ACCUMULATE_N
-        ]:
-          # Aggregate all gradients by doing pairwise sums: this may
-          # reduce performance, but it can improve memory because the
-          # gradients can be released earlier.
-          #
-          # TODO(vrv): Consider replacing this with a version of
-          # tf.AddN() that eagerly frees its inputs as soon as they are
-          # ready, so the order of this tree does not become a problem.
-          used = "tree"
-          with ops.name_scope(op.name + "_gradient_sum"):
-            running_sum = out_grad[0]
-            for grad in out_grad[1:]:
-              running_sum = math_ops.add_n([running_sum, grad])
-            out_grads[i] = running_sum
-        else:
-          used = "add_n"
-          out_grads[i] = _MultiDeviceAddN(out_grad)
-        logging.vlog(2, "  _AggregatedGrads %d x %s using %s",
-                     len(out_grad), tensor_shape, used)
-      else:
-        out_grad = math_ops._as_indexed_slices_list(
-            [g for g in out_grad if g is not None])
-        out_grad = [_HandleNestedIndexedSlices(x) for x in out_grad]
-        # Form IndexedSlices out of the concatenated values and
-        # indices.
-        out_grads[i] = ops.IndexedSlices(
-            array_ops.concat(0, [x.values for x in out_grad]),
-            array_ops.concat(0, [x.indices for x in out_grad]),
-            out_grad[0].dense_shape)
-    else:
-      out_grads[i] = []
-  return out_grads
-
-
-# TODO(vrv): Make this available when we want to make it public.
-def _hessian_vector_product(ys, xs, v):
-  """Multiply the Hessian of `ys` wrt `xs` by `v`.
-
-  This is an efficient construction that uses a backprop-like approach
-  to compute the product between the Hessian and another vector. The
-  Hessian is usually too large to be explicitly computed or even
-  represented, but this method allows us to at least multiply by it
-  for the same big-O cost as backprop.
-
-  Implicit Hessian-vector products are the main practical, scalable way
-  of using second derivatives with neural networks. They allow us to
-  do things like construct Krylov subspaces and approximate conjugate
-  gradient descent.
-
-  Example: if `y` = 1/2 `x`^T A `x`, then `hessian_vector_product(y,
-  x, v)` will return an expression that evaluates to the same values
-  as (A + A.T) `v`.
-
-  Args:
-    ys: A scalar value, or a tensor or list of tensors to be summed to
-        yield a scalar.
-    xs: A list of tensors that we should construct the Hessian over.
-    v: A list of tensors, with the same shapes as xs, that we want to
-       multiply by the Hessian.
-
-  Returns:
-    A list of tensors (or if the list would be length 1, a single tensor)
-    containing the product between the Hessian and `v`.
-
-  Raises:
-    ValueError: `xs` and `v` have different length.
-
-  """
-
-  # Validate the input
-  length = len(xs)
-  if len(v) != length:
-    raise ValueError("xs and v must have the same length.")
-
-  # First backprop
-  grads = gradients(ys, xs)
-
-  assert len(grads) == length
-  elemwise_products = [
-      math_ops.mul(grad_elem, array_ops.stop_gradient(v_elem))
-      for grad_elem, v_elem in zip(grads, v) if grad_elem is not None
-  ]
-
-  # Second backprop
-  return gradients(elemwise_products, xs)
-
-
-def hessians(ys, xs, name="hessians", colocate_gradients_with_ops=False, 
-            gate_gradients=False, aggregation_method=None):
-  """Constructs the Hessian of sum of `ys` with respect to `x` in `xs`.
-
-  `hessians()` adds ops to the graph to output the Hessian matrix of `ys` 
-  with respect to `xs`.  It returns a list of `Tensor` of length `len(xs)` 
-  where each tensor is the Hessian of `sum(ys)`. This function currently
-  only supports evaluating the Hessian with respect to (a list of) one-
-  dimensional tensors.
-
-  The Hessian is a matrix of second-order partial derivatives of a scalar
-  tensor (see https://en.wikipedia.org/wiki/Hessian_matrix for more details).
-
-  Args:
-    ys: A `Tensor` or list of tensors to be differentiated.
-    xs: A `Tensor` or list of tensors to be used for differentiation.
-    name: Optional name to use for grouping all the gradient ops together.
-      defaults to 'hessians'.
-    colocate_gradients_with_ops: See `gradients()` documentation for details.
-    gate_gradients: See `gradients()` documentation for details.
-    aggregation_method: See `gradients()` documentation for details.
-
-  Returns:
-    A list of Hessian matrices of `sum(y)` for each `x` in `xs`.
-
-  Raises:
-    LookupError: if one of the operations between `xs` and `ys` does not
-      have a registered gradient function.
-    ValueError: if the arguments are invalid or not supported. Currently,
-      this function only supports one-dimensional `x` in `xs`.
-  """
-  xs = _AsList(xs)
-  kwargs = {
-      'colocate_gradients_with_ops': colocate_gradients_with_ops,
-      'gate_gradients': gate_gradients,
-      'aggregation_method': aggregation_method
-    }
-  # Compute a hessian matrix for each x in xs
-  hessians = []
-  for i, x in enumerate(xs):
-    # Check dimensions
-    ndims = x.get_shape().ndims
-    if ndims is None:
-      raise ValueError('Cannot compute Hessian because the dimensionality of '
-                       'element number %d of `xs` cannot be determined' % i)
-    elif ndims != 1:
-      raise ValueError('Computing hessians is currently only supported for '
-                       'one-dimensional tensors. Element number %d of `xs` has '
-                       '%d dimensions.' % (i, ndims))
-    with ops.name_scope(name + '_first_derivative'):
-      # Compute the partial derivatives of the input with respect to all 
-      # elements of `x`
-      _gradients = gradients(ys, x, **kwargs)[0]
-      # Unpack the gradients into a list so we can take derivatives with 
-      # respect to each element
-      _gradients = array_ops.unpack(_gradients)
-    with ops.name_scope(name + '_second_derivative'):
-      # Compute the partial derivatives with respect to each element of the list
-      _hess = [gradients(_gradient, x, **kwargs)[0] for _gradient in _gradients]
-      # Pack the list into a matrix and add to the list of hessians
-      hessians.append(array_ops.pack(_hess, name=name))
-  return hessians
+# pylint: disable=unused-import
+from tensorflow.python.ops.gradients_impl import AggregationMethod
+from tensorflow.python.ops.gradients_impl import gradients
+from tensorflow.python.ops.gradients_impl import hessians
+# pylint: enable=unused-import
+from tensorflow.python.util.all_util import remove_undocumented
+
+_allowed_symbols = [
+    # TODO(drpng): find a good place to reference this.
+    "AggregationMethod",
+    "gradients",  # tf.gradients.gradients.
+    "hessians",  # tf.gradients.hessians
+]
+remove_undocumented(__name__, _allowed_symbols)
diff --git a/tensorflow/python/ops/gradients_impl.py b/tensorflow/python/ops/gradients_impl.py
new file mode 100644
index 00000000000..5db3dd77228
--- /dev/null
+++ b/tensorflow/python/ops/gradients_impl.py
@@ -0,0 +1,895 @@
+# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""Implements the graph generation for computation of gradients."""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import collections
+import contextlib
+import warnings
+
+import numpy as np
+import six
+from six.moves import xrange  # pylint: disable=redefined-builtin
+
+from tensorflow.core.framework import attr_value_pb2
+from tensorflow.python.framework import constant_op
+from tensorflow.python.framework import dtypes
+from tensorflow.python.framework import ops
+from tensorflow.python.framework import tensor_shape
+from tensorflow.python.framework import tensor_util
+from tensorflow.python.ops import array_grad  # pylint: disable=unused-import
+from tensorflow.python.ops import array_ops
+from tensorflow.python.ops import control_flow_grad  # pylint: disable=unused-import
+from tensorflow.python.ops import control_flow_ops
+from tensorflow.python.ops import image_grad  # pylint: disable=unused-import
+from tensorflow.python.ops import logging_ops  # pylint: disable=unused-import
+from tensorflow.python.ops import linalg_grad  # pylint: disable=unused-import
+from tensorflow.python.ops import math_grad  # pylint: disable=unused-import
+from tensorflow.python.ops import math_ops
+from tensorflow.python.ops import linalg_ops
+from tensorflow.python.ops import functional_ops
+from tensorflow.python.platform import tf_logging as logging
+
+
+# Warn the user if we convert a sparse representation to dense with at
+# least this number of elements.
+_LARGE_SPARSE_NUM_ELEMENTS = 100000000
+
+
+def _IndexedSlicesToTensor(value, dtype=None, name=None, as_ref=False):
+  """Converts an IndexedSlices object `value` to a Tensor.
+
+  NOTE(mrry): This function is potentially expensive.
+
+  Args:
+    value: An ops.IndexedSlices object.
+    dtype: The dtype of the Tensor to be returned.
+    name: Optional name to use for the returned Tensor.
+    as_ref: True if a ref is requested.
+
+  Returns:
+    A dense Tensor representing the values in the given IndexedSlices.
+
+  Raises:
+    ValueError: If the IndexedSlices does not have the same dtype.
+  """
+  _ = as_ref
+  if dtype and not dtype.is_compatible_with(value.dtype):
+    raise ValueError(
+        "Tensor conversion requested dtype %s for IndexedSlices with dtype %s" %
+        (dtype.name, value.dtype.name))
+  if value.dense_shape is None:
+    raise ValueError(
+        "Tensor conversion requested for IndexedSlices without dense_shape: %s"
+        % str(value))
+  # TODO(mrry): Consider adding static shape information to
+  # IndexedSlices, to avoid using numpy here.
+  dense_shape_value = tensor_util.constant_value(value.dense_shape)
+  if dense_shape_value is not None:
+    num_elements = np.prod(dense_shape_value)
+    if num_elements >= _LARGE_SPARSE_NUM_ELEMENTS:
+      warnings.warn(
+          "Converting sparse IndexedSlices to a dense Tensor with %d elements. "
+          "This may consume a large amount of memory." % num_elements)
+  else:
+    warnings.warn(
+        "Converting sparse IndexedSlices to a dense Tensor of unknown shape. "
+        "This may consume a large amount of memory.")
+  return math_ops.unsorted_segment_sum(
+      value.values, value.indices, value.dense_shape[0], name=name)
+
+
+ops.register_tensor_conversion_function(ops.IndexedSlices,
+                                        _IndexedSlicesToTensor)
+
+
+def _MarkReachedOps(from_ops, reached_ops):
+  """Mark all ops reached from "from_ops".
+
+  Args:
+    from_ops: list of Operations.
+    reached_ops: list of booleans, indexed by operation id.
+  """
+  queue = collections.deque()
+  queue.extend(from_ops)
+  while queue:
+    op = queue.popleft()
+    if not reached_ops[op._id]:
+      reached_ops[op._id] = True
+      for output in op.outputs:
+        queue.extend(output.consumers())
+
+
+def _GatherInputs(to_ops, reached_ops):
+  """List all inputs of to_ops that are in reached_ops.
+
+  Args:
+    to_ops: list of Operations.
+    reached_ops: list of booleans, indexed by operation id.
+
+  Returns:
+    The list of all inputs of to_ops that are in reached_ops.
+    That list includes all elements of to_ops.
+  """
+  inputs = []
+  queue = collections.deque()
+  queue.extend(to_ops)
+  while queue:
+    op = queue.popleft()
+    # We are interested in this op.
+    if reached_ops[op._id]:
+      inputs.append(op)
+      # Clear the boolean so we won't add the inputs again.
+      reached_ops[op._id] = False
+      for inp in op.inputs:
+        queue.append(inp.op)
+  return inputs
+
+
+def _PendingCount(graph, to_ops, from_ops, colocate_gradients_with_ops):
+  """Initialize the pending count for ops between two lists of Operations.
+
+  'pending_count[op._id]' indicates the number of backprop inputs
+  to this operation.
+
+  Args:
+    graph: a Graph.
+    to_ops: list of Operations.
+    from_ops: list of Operations.
+    colocate_gradients_with_ops: Python bool.  See docstring of gradients().
+
+  Returns:
+    A tuple containing: (1) a list of integers indexed by operation id,
+    indicating the number of backprop inputs to this operation, and (2)
+    a ControlFlowState object which is not None if the ops between from_ops
+    and to_ops contain control flow loops.
+  """
+  # Mark reachable ops from from_ops.
+  reached_ops = [False] * (graph._last_id + 1)
+  for op in to_ops:
+    reached_ops[op._id] = True
+  _MarkReachedOps(from_ops, reached_ops)
+
+  # Mark between ops.
+  between_ops = [False] * (graph._last_id + 1)
+  between_op_list = []
+  queue = collections.deque()
+  queue.extend(to_ops)
+  while queue:
+    op = queue.popleft()
+    # We are interested in this op.
+    if reached_ops[op._id]:
+      between_ops[op._id] = True
+      between_op_list.append(op)
+      # Clear the boolean so we won't add the inputs again.
+      reached_ops[op._id] = False
+      for inp in op.inputs:
+        queue.append(inp.op)
+
+  # 'loop_state' is None if there are no while loops.
+  loop_state = control_flow_ops.MaybeCreateControlFlowState(
+      between_op_list, between_ops, colocate_gradients_with_ops)
+
+  # Initialize pending count for between ops.
+  pending_count = [0] * (graph._last_id + 1)
+  for op in between_op_list:
+    for x in op.inputs:
+      if between_ops[x.op._id]:
+        pending_count[x.op._id] += 1
+
+  return pending_count, loop_state
+
+
+def _AsList(x):
+  return x if isinstance(x, (list, tuple)) else [x]
+
+
+def _DefaultGradYs(grad_ys, ys, colocate_gradients_with_ops):
+  """Fill in default values for grad_ys.
+
+  Args:
+    grad_ys: List of gradients, can contain None.
+    ys: List of tensors.
+    colocate_gradients_with_ops: If True, try colocating gradients with
+      the corresponding op.
+
+  Returns:
+    A list of gradients to use, without None.
+
+  Raises:
+    ValueError: If one of the grad_ys is invalid.
+  """
+  if len(grad_ys) != len(ys):
+    raise ValueError("Passed %d grad_ys for %d ys" % (len(grad_ys), len(ys)))
+  grad_ys = ops.convert_n_to_tensor_or_indexed_slices(grad_ys, name="grad_y")
+  for i in xrange(len(grad_ys)):
+    grad_y = grad_ys[i]
+    y = ys[i]
+    if grad_y is None:
+      with _maybe_colocate_with(y.op, colocate_gradients_with_ops):
+        grad_ys[i] = array_ops.fill(
+            array_ops.shape(y), constant_op.constant(
+                1, dtype=y.dtype))
+    else:
+      if grad_y.dtype != y.dtype:
+        raise ValueError("Y and ys_grad must be of the same type, "
+                         "not y: %s, ys_grad: %s " %
+                         (dtypes.as_dtype(y.dtype).name,
+                          dtypes.as_dtype(grad_y.dtype).name))
+  return grad_ys
+
+
+def _IsTrainable(tensor):
+  dtype = dtypes.as_dtype(tensor.dtype)
+  return dtype.base_dtype in (dtypes.float16, dtypes.float32, dtypes.float64,
+                              dtypes.complex64, dtypes.complex128)
+
+
+def _VerifyGeneratedGradients(grads, op):
+  """Verify that gradients are valid in number and type.
+
+  Args:
+    grads: List of generated gradients.
+    op: Operation for which the gradients where generated.
+
+  Raises:
+    ValueError: if the gradients are invalid.
+  """
+  if len(grads) != len(op.inputs):
+    raise ValueError("Num gradients %d generated for op %s do not match num "
+                     "inputs %d" % (len(grads), op.node_def, len(op.inputs)))
+  for i in xrange(len(grads)):
+    grad = grads[i]
+    inp = op.inputs[i]
+    if grad is not None:
+      if not grad.dtype.is_compatible_with(inp.dtype):
+        raise ValueError("Gradient type %s generated for op %s does "
+                         "not match input type %s" %
+                         (dtypes.as_dtype(grad.dtype).name, op.node_def,
+                          dtypes.as_dtype(inp.dtype).name))
+
+
+def _StopOps(from_ops, pending_count):
+  """The set of ops that terminate the gradient computation.
+
+  This computes the frontier of the forward graph *before* which backprop
+  should stop. Operations in the returned set will not be differentiated.
+  This set is defined as the subset of `from_ops` containing ops that have
+  no predecessor in `from_ops`. `pending_count` is the result of
+  `_PendingCount(g, xs, from_ops)`. An 'op' has predecessors in `from_ops`
+  iff pending_count[op._id] > 0.
+
+  Args:
+    from_ops: list of Operations.
+    pending_count: List of integers, indexed by operation id.
+
+  Returns:
+    The set of operations.
+  """
+  stop_ops = set()
+  for op in from_ops:
+    is_stop_op = True
+    for inp in op.inputs:
+      if pending_count[inp.op._id] > 0:
+        is_stop_op = False
+        break
+    if is_stop_op:
+      stop_ops.add(op._id)
+  return stop_ops
+
+
+@contextlib.contextmanager
+def _maybe_colocate_with(op, colocate_gradients_with_ops):
+  """Context to colocate with `op` if `colocate_gradients_with_ops`."""
+  if colocate_gradients_with_ops:
+    with ops.colocate_with(op):
+      yield
+  else:
+    yield
+
+
+def _SymGrad(op, out_grads):
+  """Backprop through a function call node op given its outputs' gradients."""
+  f_in = [x for x in op.inputs] + out_grads
+  f_types = [x.dtype for x in op.inputs]
+  f = attr_value_pb2.NameAttrList()
+  f.name = op.type
+  for k in op.node_def.attr:
+    f.attr[k].CopyFrom(op.node_def.attr[k])
+  # pylint: disable=protected-access
+  in_grads = functional_ops._symbolic_gradient(input=f_in, Tout=f_types, f=f)
+  # pylint: enable=protected-access
+  return in_grads
+
+
+def gradients(ys,
+              xs,
+              grad_ys=None,
+              name="gradients",
+              colocate_gradients_with_ops=False,
+              gate_gradients=False,
+              aggregation_method=None):
+  """Constructs symbolic partial derivatives of sum of `ys` w.r.t. x in `xs`.
+
+  `ys` and `xs` are each a `Tensor` or a list of tensors.  `grad_ys`
+  is a list of `Tensor`, holding the gradients received by the
+  `ys`. The list must be the same length as `ys`.
+
+  `gradients()` adds ops to the graph to output the partial
+  derivatives of `ys` with respect to `xs`.  It returns a list of
+  `Tensor` of length `len(xs)` where each tensor is the `sum(dy/dx)`
+  for y in `ys`.
+
+  `grad_ys` is a list of tensors of the same length as `ys` that holds
+  the initial gradients for each y in `ys`.  When `grad_ys` is None,
+  we fill in a tensor of '1's of the shape of y for each y in `ys`.  A
+  user can provide their own initial `grad_ys` to compute the
+  derivatives using a different initial gradient for each y (e.g., if
+  one wanted to weight the gradient differently for each value in
+  each y).
+
+  Args:
+    ys: A `Tensor` or list of tensors to be differentiated.
+    xs: A `Tensor` or list of tensors to be used for differentiation.
+    grad_ys: Optional. A `Tensor` or list of tensors the same size as
+      `ys` and holding the gradients computed for each y in `ys`.
+    name: Optional name to use for grouping all the gradient ops together.
+      defaults to 'gradients'.
+    colocate_gradients_with_ops: If True, try colocating gradients with
+      the corresponding op.
+    gate_gradients: If True, add a tuple around the gradients returned
+      for an operations.  This avoids some race conditions.
+    aggregation_method: Specifies the method used to combine gradient terms.
+      Accepted values are constants defined in the class `AggregationMethod`.
+
+  Returns:
+    A list of `sum(dy/dx)` for each x in `xs`.
+
+  Raises:
+    LookupError: if one of the operations between `x` and `y` does not
+      have a registered gradient function.
+    ValueError: if the arguments are invalid.
+
+  """
+  ys = _AsList(ys)
+  xs = _AsList(xs)
+  if grad_ys is None:
+    grad_ys = [None] * len(ys)
+  else:
+    grad_ys = _AsList(grad_ys)
+
+  with ops.name_scope(name, "gradients", ys + xs + grad_ys):
+    ys = ops.convert_n_to_tensor_or_indexed_slices(ys, name="y")
+    xs = ops.convert_n_to_tensor_or_indexed_slices(xs, name="x")
+    grad_ys = _DefaultGradYs(grad_ys, ys, colocate_gradients_with_ops)
+
+    # The approach we take here is as follows: Create a list of all ops in the
+    # subgraph between the ys and xs.  Visit these ops in reverse order of ids
+    # to ensure that when we visit an op the gradients w.r.t its outputs have
+    # been collected.  Then aggregate these gradients if needed, call the op's
+    # gradient function, and add the generated gradients to the gradients for
+    # its input.
+
+    # Initialize the pending count for ops in the connected subgraph from ys
+    # to the xs.
+    to_ops = [t.op for t in ys]
+    from_ops = [t.op for t in xs]
+    pending_count, loop_state = _PendingCount(ops.get_default_graph(), to_ops,
+                                              from_ops,
+                                              colocate_gradients_with_ops)
+
+    # Iterate over the collected ops.
+    #
+    # grads: op => list of gradients received on each output endpoint of the
+    # op.  The gradients for each endpoint are initially collected as a list.
+    # When it is time to call the op's gradient function, for each endpoint we
+    # aggregate the list of received gradients into a Add() Operation if there
+    # is more than one.
+    grads = {}
+
+    # Add the initial gradients for the ys.
+    for y, grad_y in zip(ys, grad_ys):
+      _SetGrad(grads, y, grad_y)
+
+    # Initialize queue with to_ops.
+    queue = collections.deque()
+    # Add the ops in 'to_ops' into the queue.
+    to_ops_set = set()
+    for op in to_ops:
+      # 'ready' handles the case where one output gradient relies on
+      # another output's gradient.
+      # pylint: disable=protected-access
+      ready = (pending_count[op._id] == 0)
+      if ready and op._id not in to_ops_set:
+        to_ops_set.add(op._id)
+        queue.append(op)
+      # pylint: enable=protected-access
+
+    if loop_state:
+      loop_exits = loop_state.ProcessUnusedLoopExits(pending_count, to_ops_set)
+      for y in loop_exits:
+        if _IsTrainable(y):
+          _SetGrad(grads, y, loop_state.ZerosLikeForExit(y))
+          queue.append(y.op)
+
+    # The set of 'from_ops'.
+    stop_ops = _StopOps(from_ops, pending_count)
+    while queue:
+      # generate gradient subgraph for op.
+      op = queue.popleft()
+      with _maybe_colocate_with(op, colocate_gradients_with_ops):
+        if loop_state:
+          loop_state.EnterGradWhileContext(op, before=True)
+        out_grads = _AggregatedGrads(grads, op, loop_state, aggregation_method)
+        if loop_state:
+          loop_state.ExitGradWhileContext(op, before=True)
+
+        grad_fn = None
+        # pylint: disable=protected-access
+        is_func_call = ops.get_default_graph()._is_function(op.type)
+        has_out_grads = any(isinstance(g, ops.Tensor) or g for g in out_grads)
+        if has_out_grads and (op._id not in stop_ops):
+          if is_func_call:
+            grad_fn = ops.get_default_graph()._get_function(
+                op.type).python_grad_func
+            # pylint: enable=protected-access
+          else:
+            # A grad_fn must be defined, either as a function or as None
+            # for ops that do not have gradients.
+            try:
+              grad_fn = ops.get_gradient_function(op)
+            except LookupError:
+              raise LookupError(
+                  "No gradient defined for operation '%s' (op type: %s)" %
+                  (op.name, op.type))
+        if loop_state:
+          loop_state.EnterGradWhileContext(op, before=False)
+        if (grad_fn or is_func_call) and has_out_grads:
+          # NOTE: If _AggregatedGrads didn't compute a value for the i'th
+          # output, it means that the cost does not depend on output[i],
+          # therefore dC/doutput[i] is 0.
+          for i, out_grad in enumerate(out_grads):
+            if (not isinstance(out_grad, ops.Tensor) and
+                not out_grad) and _IsTrainable(op.outputs[i]):
+              # Only floating-point outputs get a zero gradient. Gradient
+              # functions should ignore the gradient for other outputs.
+              if loop_state:
+                out_grads[i] = loop_state.ZerosLike(op, i)
+              else:
+                out_grads[i] = control_flow_ops.ZerosLikeOutsideLoop(op, i)
+          with ops.name_scope(op.name + "_grad"):
+            # pylint: disable=protected-access
+            with ops.get_default_graph()._original_op(op):
+              # pylint: enable=protected-access
+              if grad_fn:
+                # If grad_fn was found, do not use SymbolicGradient even for
+                # functions.
+                in_grads = grad_fn(op, *out_grads)
+              else:
+                # For function call ops, we add a 'SymbolicGradient'
+                # node to the graph to compute gradients.
+                in_grads = _SymGrad(op, out_grads)
+              in_grads = _AsList(in_grads)
+              _VerifyGeneratedGradients(in_grads, op)
+              if gate_gradients and len(
+                  [x for x in in_grads if x is not None]) > 1:
+                in_grads = control_flow_ops.tuple(in_grads)
+          _LogOpGradients(op, out_grads, in_grads)
+        else:
+          # If no grad_fn is defined or none of out_grads is available,
+          # just propagates a list of None backwards.
+          in_grads = [None] * len(op.inputs)
+        for t_in, in_grad in zip(op.inputs, in_grads):
+          if in_grad is not None:
+            if isinstance(in_grad, ops.Tensor):
+              in_grad.set_shape(t_in.get_shape())
+            _SetGrad(grads, t_in, in_grad)
+        if loop_state:
+          loop_state.ExitGradWhileContext(op, before=False)
+
+      # Update pending count for the inputs of op and enqueue ready ops.
+      _UpdatePendingAndEnqueueReady(grads, op, queue, pending_count, loop_state)
+
+  if loop_state:
+    loop_state.PostProcessing()
+  return [_GetGrad(grads, x) for x in xs]
+
+
+def _HasAnyNotNoneGrads(grads, op):
+  """Return true iff op has real gradient."""
+  out_grads = _GetGrads(grads, op)
+  for out_grad in out_grads:
+    if isinstance(out_grad, (ops.Tensor, ops.IndexedSlices)):
+      return True
+    if out_grad and isinstance(out_grad, collections.Sequence):
+      if any([g is not None for g in out_grad]):
+        return True
+  return False
+
+
+def _UpdatePendingAndEnqueueReady(grads, op, queue, pending_count, loop_state):
+  """Update pending count for the inputs of op and enqueue ready ops."""
+  for x in op.inputs:
+    # pylint: disable=protected-access
+    pending_count[x.op._id] -= 1
+    ready = (pending_count[x.op._id] == 0)
+    if loop_state and not ready:
+      ready = (pending_count[x.op._id] > 0 and
+               control_flow_ops.IsLoopSwitch(x.op))
+    # pylint: enable=protected-access
+    if ready:
+      if control_flow_ops.IsLoopExit(x.op):
+        # if x is an exit without real gradient, defer processing them.
+        grad_state = loop_state.GetGradState(x.op, before=False)
+        grad_state.deferred_exits.append(x)
+        grad_state.pending_exits_count -= 1
+        if grad_state.pending_exits_count == 0:
+          # We now have all the exits so process them.
+          has_real_grad = False
+          for y in grad_state.deferred_exits:
+            if _HasAnyNotNoneGrads(grads, y.op):
+              has_real_grad = True
+              queue.append(y.op)
+            else:
+              grad_state.unused_exits.append(y)
+          if has_real_grad:
+            # For an unused exit, if it has floating-point outputs, backprop
+            # a zero gradient. Otherwise, just ignore it.
+            for y in grad_state.unused_exits:
+              if _IsTrainable(y):
+                _SetGrad(grads, y, loop_state.ZerosLikeForExit(y))
+              queue.append(y.op)
+          else:
+            # All exits are "unused" so use None as gradient.
+            for y in grad_state.unused_exits:
+              queue.append(y.op)
+      else:
+        queue.append(x.op)
+
+
+def _SetGrad(grads, t, grad):
+  """Sets gradient "grad" in "grads" for tensor "t"."""
+  op = t.op
+  op_grads = grads.get(op)
+  if not op_grads:
+    op_grads = [[] for _ in xrange(len(op.outputs))]
+    grads[op] = op_grads
+  t_grads = op_grads[t.value_index]
+  if isinstance(t_grads, list):
+    t_grads.append(grad)
+  else:
+    assert control_flow_ops.IsLoopSwitch(op)
+    op_grads[t.value_index] = grad
+
+
+def _GetGrad(grads, t):
+  """Gets gradient for tensor "t"."""
+  op = t.op
+  op_grads = grads.get(op)
+  if not op_grads:
+    return None
+  t_grad = op_grads[t.value_index]
+  assert not isinstance(t_grad, list), (
+      "gradients list should have been aggregated by now.")
+  return t_grad
+
+
+def _GetGrads(grads, op):
+  """Gets all gradients for op."""
+  if op in grads:
+    return grads[op]
+  else:
+    return [[] for _ in xrange(len(op.outputs))]
+
+
+def _HandleNestedIndexedSlices(grad):
+  assert isinstance(grad, ops.IndexedSlices)
+  if isinstance(grad.values, ops.Tensor):
+    return grad
+  else:
+    assert isinstance(grad.values, ops.IndexedSlices)
+    g = _HandleNestedIndexedSlices(grad.values)
+    return ops.IndexedSlices(g.values,
+                             array_ops.gather(grad.indices, g.indices),
+                             g.dense_shape)
+
+
+def _AccumulatorShape(inputs):
+  shape = tensor_shape.unknown_shape()
+  for i in inputs:
+    if isinstance(i, ops.Tensor):
+      shape = shape.merge_with(i.get_shape())
+  return shape
+
+
+def _LogOpGradients(op, out_grads, in_grads):
+  """Log the in and out grads of an op."""
+  logging.vlog(1, "Gradient for '" + op.name + "'")
+
+  def _FilterGrad(x):
+    if x is None:
+      return False
+    if isinstance(x, (list, tuple)):
+      return bool(x)
+    else:
+      return True
+
+  logging.vlog(1, "  in  --> %s",
+               ", ".join([x.name for x in out_grads if _FilterGrad(x)]))
+  logging.vlog(1, "  out --> %s",
+               ", ".join([x.name for x in in_grads if _FilterGrad(x)]))
+
+
+def _MultiDeviceAddN(tensor_list):
+  """Adds tensors from potentially multiple devices."""
+  # Basic function structure comes from control_flow_ops.group().
+  # Sort tensors according to their devices.
+  tensors_on_device = collections.defaultdict(lambda: [])
+  for tensor in tensor_list:
+    tensors_on_device[tensor.device].append(tensor)
+
+  # For each device, add the tensors on that device first.
+  # Then gather the partial sums from multiple devices.
+  # TODO(sjhwang): Create hierarchical aggregation tree as pbar's suggestion.
+  # E.g., aggregate per GPU, then per task, and so on.
+  summands = []
+
+  def DeviceKey(dev):
+    return "" if dev is None else dev
+
+  for dev in sorted(six.iterkeys(tensors_on_device), key=DeviceKey):
+    tensors = tensors_on_device[dev]
+    with ops.colocate_with(tensors[0].op, ignore_existing=True):
+      summands.append(math_ops.add_n(tensors))
+
+  return math_ops.add_n(summands)
+
+
+class AggregationMethod(object):
+  """A class listing aggregation methods used to combine gradients.
+
+  Computing partial derivatives can require aggregating gradient
+  contributions. This class lists the various methods that can
+  be used to combine gradients in the graph:
+
+  *  `ADD_N`: All of the gradient terms are summed as part of one
+     operation using the "AddN" op. It has the property that all
+     gradients must be ready before any aggregation is performed.
+  *  `DEFAULT`: The system-chosen default aggregation method.
+  """
+  ADD_N = 0
+  DEFAULT = ADD_N
+  # The following are experimental and may not be supported in future releases.
+  EXPERIMENTAL_TREE = 1
+  EXPERIMENTAL_ACCUMULATE_N = 2
+
+
+def _AggregatedGrads(grads, op, loop_state, aggregation_method=None):
+  """Get the aggregated gradients for op.
+
+  Args:
+    grads: The map of memoized gradients.
+    op: The op to get gradients for.
+    loop_state: An object for maintaining the state of the while loops in the
+                graph. It is of type ControlFlowState. None if the graph
+                contains no while loops.
+    aggregation_method: Specifies the method used to combine gradient terms.
+      Accepted values are constants defined in the class `AggregationMethod`.
+
+  Returns:
+    A list of gradients, one per each output of `op`. If the gradients
+      for a particular output is a list, this function aggregates it
+      before returning.
+
+  Raises:
+    TypeError: if the incoming grads are not Tensors or IndexedSlices.
+    ValueError: if the arguments are invalid.
+
+  """
+  if aggregation_method is None:
+    aggregation_method = AggregationMethod.DEFAULT
+  if aggregation_method not in [
+      AggregationMethod.ADD_N, AggregationMethod.EXPERIMENTAL_TREE,
+      AggregationMethod.EXPERIMENTAL_ACCUMULATE_N
+  ]:
+    raise ValueError("Invalid aggregation_method specified %s." %
+                     aggregation_method)
+  out_grads = _GetGrads(grads, op)
+  for i, out_grad in enumerate(out_grads):
+    if loop_state:
+      if isinstance(out_grad, (ops.Tensor, ops.IndexedSlices)):
+        assert control_flow_ops.IsLoopSwitch(op)
+        continue
+    # Grads have to be Tensors or IndexedSlices
+    if (isinstance(out_grad, collections.Sequence) and not all([
+        isinstance(g, (ops.Tensor, ops.IndexedSlices)) for g in out_grad
+        if g is not None
+    ])):
+      raise TypeError("gradients have to be either all Tensors "
+                      "or all IndexedSlices")
+    # Aggregate multiple gradients, and convert [] to None.
+    if out_grad:
+      if len(out_grad) < 2:
+        used = "nop"
+        out_grads[i] = out_grad[0]
+      elif all([isinstance(g, ops.Tensor) for g in out_grad if g is not None]):
+        tensor_shape = _AccumulatorShape(out_grad)
+        if (aggregation_method == AggregationMethod.EXPERIMENTAL_ACCUMULATE_N
+            and len(out_grad) > 2 and tensor_shape.is_fully_defined()):
+          # The benefit of using AccumulateN is that its inputs can be combined
+          # in any order and this can allow the expression to be evaluated with
+          # a smaller memory footprint.  When used with gpu_allocator_retry,
+          # it is possible to compute a sum of terms which are much larger than
+          # total GPU memory.
+          # AccumulateN can currently only be used if we know the shape for
+          # an accumulator variable.  If this is not known, or if we only have
+          # 2 grads then we fall through to the "tree" case below.
+          used = "accumulate_n"
+          out_grads[i] = math_ops.accumulate_n(out_grad)
+        elif aggregation_method in [
+            AggregationMethod.EXPERIMENTAL_TREE,
+            AggregationMethod.EXPERIMENTAL_ACCUMULATE_N
+        ]:
+          # Aggregate all gradients by doing pairwise sums: this may
+          # reduce performance, but it can improve memory because the
+          # gradients can be released earlier.
+          #
+          # TODO(vrv): Consider replacing this with a version of
+          # tf.AddN() that eagerly frees its inputs as soon as they are
+          # ready, so the order of this tree does not become a problem.
+          used = "tree"
+          with ops.name_scope(op.name + "_gradient_sum"):
+            running_sum = out_grad[0]
+            for grad in out_grad[1:]:
+              running_sum = math_ops.add_n([running_sum, grad])
+            out_grads[i] = running_sum
+        else:
+          used = "add_n"
+          out_grads[i] = _MultiDeviceAddN(out_grad)
+        logging.vlog(2, "  _AggregatedGrads %d x %s using %s",
+                     len(out_grad), tensor_shape, used)
+      else:
+        out_grad = math_ops._as_indexed_slices_list(
+            [g for g in out_grad if g is not None])
+        out_grad = [_HandleNestedIndexedSlices(x) for x in out_grad]
+        # Form IndexedSlices out of the concatenated values and
+        # indices.
+        out_grads[i] = ops.IndexedSlices(
+            array_ops.concat(0, [x.values for x in out_grad]),
+            array_ops.concat(0, [x.indices for x in out_grad]),
+            out_grad[0].dense_shape)
+    else:
+      out_grads[i] = []
+  return out_grads
+
+
+# TODO(vrv): Make this available when we want to make it public.
+def _hessian_vector_product(ys, xs, v):
+  """Multiply the Hessian of `ys` wrt `xs` by `v`.
+
+  This is an efficient construction that uses a backprop-like approach
+  to compute the product between the Hessian and another vector. The
+  Hessian is usually too large to be explicitly computed or even
+  represented, but this method allows us to at least multiply by it
+  for the same big-O cost as backprop.
+
+  Implicit Hessian-vector products are the main practical, scalable way
+  of using second derivatives with neural networks. They allow us to
+  do things like construct Krylov subspaces and approximate conjugate
+  gradient descent.
+
+  Example: if `y` = 1/2 `x`^T A `x`, then `hessian_vector_product(y,
+  x, v)` will return an expression that evaluates to the same values
+  as (A + A.T) `v`.
+
+  Args:
+    ys: A scalar value, or a tensor or list of tensors to be summed to
+        yield a scalar.
+    xs: A list of tensors that we should construct the Hessian over.
+    v: A list of tensors, with the same shapes as xs, that we want to
+       multiply by the Hessian.
+
+  Returns:
+    A list of tensors (or if the list would be length 1, a single tensor)
+    containing the product between the Hessian and `v`.
+
+  Raises:
+    ValueError: `xs` and `v` have different length.
+
+  """
+
+  # Validate the input
+  length = len(xs)
+  if len(v) != length:
+    raise ValueError("xs and v must have the same length.")
+
+  # First backprop
+  grads = gradients(ys, xs)
+
+  assert len(grads) == length
+  elemwise_products = [
+      math_ops.mul(grad_elem, array_ops.stop_gradient(v_elem))
+      for grad_elem, v_elem in zip(grads, v) if grad_elem is not None
+  ]
+
+  # Second backprop
+  return gradients(elemwise_products, xs)
+
+
+def hessians(ys, xs, name="hessians", colocate_gradients_with_ops=False,
+            gate_gradients=False, aggregation_method=None):
+  """Constructs the Hessian of sum of `ys` with respect to `x` in `xs`.
+
+  `hessians()` adds ops to the graph to output the Hessian matrix of `ys`
+  with respect to `xs`.  It returns a list of `Tensor` of length `len(xs)`
+  where each tensor is the Hessian of `sum(ys)`. This function currently
+  only supports evaluating the Hessian with respect to (a list of) one-
+  dimensional tensors.
+
+  The Hessian is a matrix of second-order partial derivatives of a scalar
+  tensor (see https://en.wikipedia.org/wiki/Hessian_matrix for more details).
+
+  Args:
+    ys: A `Tensor` or list of tensors to be differentiated.
+    xs: A `Tensor` or list of tensors to be used for differentiation.
+    name: Optional name to use for grouping all the gradient ops together.
+      defaults to 'hessians'.
+    colocate_gradients_with_ops: See `gradients()` documentation for details.
+    gate_gradients: See `gradients()` documentation for details.
+    aggregation_method: See `gradients()` documentation for details.
+
+  Returns:
+    A list of Hessian matrices of `sum(y)` for each `x` in `xs`.
+
+  Raises:
+    LookupError: if one of the operations between `xs` and `ys` does not
+      have a registered gradient function.
+    ValueError: if the arguments are invalid or not supported. Currently,
+      this function only supports one-dimensional `x` in `xs`.
+  """
+  xs = _AsList(xs)
+  kwargs = {
+      'colocate_gradients_with_ops': colocate_gradients_with_ops,
+      'gate_gradients': gate_gradients,
+      'aggregation_method': aggregation_method
+    }
+  # Compute a hessian matrix for each x in xs
+  hessians = []
+  for i, x in enumerate(xs):
+    # Check dimensions
+    ndims = x.get_shape().ndims
+    if ndims is None:
+      raise ValueError('Cannot compute Hessian because the dimensionality of '
+                       'element number %d of `xs` cannot be determined' % i)
+    elif ndims != 1:
+      raise ValueError('Computing hessians is currently only supported for '
+                       'one-dimensional tensors. Element number %d of `xs` has '
+                       '%d dimensions.' % (i, ndims))
+    with ops.name_scope(name + '_first_derivative'):
+      # Compute the partial derivatives of the input with respect to all
+      # elements of `x`
+      _gradients = gradients(ys, x, **kwargs)[0]
+      # Unpack the gradients into a list so we can take derivatives with
+      # respect to each element
+      _gradients = array_ops.unpack(_gradients)
+    with ops.name_scope(name + '_second_derivative'):
+      # Compute the partial derivatives with respect to each element of the list
+      _hess = [gradients(_gradient, x, **kwargs)[0] for _gradient in _gradients]
+      # Pack the list into a matrix and add to the list of hessians
+      hessians.append(array_ops.pack(_hess, name=name))
+  return hessians
diff --git a/tensorflow/python/ops/gradients_test.py b/tensorflow/python/ops/gradients_test.py
index 7097f10cbc6..1743cd8caaa 100644
--- a/tensorflow/python/ops/gradients_test.py
+++ b/tensorflow/python/ops/gradients_test.py
@@ -34,6 +34,7 @@ from tensorflow.python.ops import array_ops
 from tensorflow.python.ops import data_flow_grad  # pylint: disable=unused-import
 from tensorflow.python.ops import data_flow_ops  # pylint: disable=unused-import
 from tensorflow.python.ops import gradients
+from tensorflow.python.ops import gradients_impl
 from tensorflow.python.ops import math_grad  # pylint: disable=unused-import
 from tensorflow.python.ops import math_ops
 from tensorflow.python.ops import nn_grad  # pylint: disable=unused-import
@@ -66,8 +67,8 @@ def _OpsBetween(graph, to_ops, from_ops):
   # output ops as reached to avoid recursing past them.
   for op in to_ops:
     reached_ops[op._id] = True
-  gradients._MarkReachedOps(from_ops, reached_ops)
-  between_ops = gradients._GatherInputs(to_ops, reached_ops)
+  gradients_impl._MarkReachedOps(from_ops, reached_ops)
+  between_ops = gradients_impl._GatherInputs(to_ops, reached_ops)
   between_ops.sort(key=lambda x: -x._id)
   return between_ops
 
@@ -414,7 +415,7 @@ class HessianVectorProductTest(test_util.TensorFlowTestCase):
         x = constant_op.constant(x_value)
         mat_x = math_ops.matmul(mat, x, name="Ax")
         x_mat_x = math_ops.matmul(array_ops.transpose(x), mat_x, name="xAx")
-        hess_v = gradients._hessian_vector_product(x_mat_x, [x], [v])[0]
+        hess_v = gradients_impl._hessian_vector_product(x_mat_x, [x], [v])[0]
         hess_v_actual = hess_v.eval()
       self.assertAllClose(hess_v_value, hess_v_actual)
 
diff --git a/tensorflow/python/ops/linalg_grad.py b/tensorflow/python/ops/linalg_grad.py
index 604fd4b3aa1..4b680c69464 100644
--- a/tensorflow/python/ops/linalg_grad.py
+++ b/tensorflow/python/ops/linalg_grad.py
@@ -74,6 +74,92 @@ def _MatrixSolveGrad(op, grad):
   return (grad_a, grad_b)
 
 
+@ops.RegisterGradient("MatrixSolveLs")
+def _MatrixSolveLsGrad(op, grad):
+  """Gradients for MatrixSolveLs."""
+
+  # TODO(rmlarsen): The implementation could be more efficient:
+  #   a) Output the Cholesky factorization from forward op instead of
+  #      recomputing it here.
+  #   b) Implement a symmetric rank-k update op instead of computing
+  #      x*z + transpose(x*z). This pattern occurs other places in TensorFlow.
+
+  def _overdetermined(op, grad):
+    """Gradients for the overdetermined case of MatrixSolveLs.
+
+    This is the backprop for the solution to the normal equations of the first
+    kind:
+       X = F(A, B) = (A^T * A + lambda * I)^{-1} * A^T * B
+    which solve the least squares problem
+       min ||A * X - B||_F^2 + lambda ||X||_F^2.
+    """
+    a = op.inputs[0]
+    b = op.inputs[1]
+    l2_regularizer = op.inputs[2]
+    x = op.outputs[0]
+    a_shape = array_ops.shape(a)
+    batch_shape = a_shape[:-2]
+    n = a_shape[-1]
+
+    identity = linalg_ops.eye(n, batch_shape=batch_shape, dtype=a.dtype)
+    gramian = math_ops.batch_matmul(
+        a, a, adj_x=True) + l2_regularizer * identity
+    chol = linalg_ops.cholesky(gramian)
+    # Temporary z = (A^T * A + lambda * I)^{-1} * grad.
+    z = linalg_ops.cholesky_solve(chol, grad)
+    xzt = math_ops.batch_matmul(x, z, adj_y=True)
+    zx_sym = xzt + array_ops.matrix_transpose(xzt)
+    grad_a = -math_ops.batch_matmul(a, zx_sym) + math_ops.batch_matmul(
+        b, z, adj_y=True)
+    grad_b = math_ops.batch_matmul(a, z)
+    return (grad_a, grad_b, None)
+
+  def _underdetermined(op, grad):
+    """Gradients for the underdetermined case of MatrixSolveLs.
+
+    This is the backprop for the solution to the normal equations of the second
+    kind:
+      X = F(A, B) = A * (A*A^T + lambda*I)^{-1} * B
+    that (for lambda=0) solve the least squares problem
+      min ||X||_F subject to A*X = B.
+    """
+    a = op.inputs[0]
+    b = op.inputs[1]
+    l2_regularizer = op.inputs[2]
+    a_shape = array_ops.shape(a)
+    batch_shape = a_shape[:-2]
+    m = a_shape[-2]
+
+    identity = linalg_ops.eye(m, batch_shape=batch_shape, dtype=a.dtype)
+    gramian = math_ops.batch_matmul(
+        a, a, adj_y=True) + l2_regularizer * identity
+    chol = linalg_ops.cholesky(gramian)
+    grad_b = linalg_ops.cholesky_solve(chol, math_ops.batch_matmul(a, grad))
+    # Temporary z = (A * A^T + lambda * I)^{-1} * B.
+    z = linalg_ops.cholesky_solve(chol, b)
+    bz = -math_ops.batch_matmul(grad_b, z, adj_y=True)
+    bz_sym = bz + array_ops.matrix_transpose(bz)
+    grad_a = math_ops.batch_matmul(bz_sym, a) + math_ops.batch_matmul(z, grad)
+    return (grad_a, grad_b, None)
+
+  fast = op.get_attr("fast")
+  if fast is False:
+    raise ValueError("Gradient not defined for fast=False")
+  matrix_shape = op.inputs[0].get_shape()[-2:]
+  if matrix_shape.is_fully_defined():
+    if matrix_shape[-2] >= matrix_shape[-1]:
+      return _overdetermined(op, grad)
+    else:
+      return _underdetermined(op, grad)
+  else:
+    # We have to defer determining the shape to runtime and use
+    # conditional execution of the appropriate graph.
+    matrix_shape = array_ops.shape(op.inputs[0])[-2:]
+    return control_flow_ops.cond(matrix_shape[-2] >= matrix_shape[-1],
+                                 lambda: _overdetermined(op, grad),
+                                 lambda: _underdetermined(op, grad))
+
+
 @ops.RegisterGradient("MatrixTriangularSolve")
 def _MatrixTriangularSolveGrad(op, grad):
   """Gradient for MatrixTriangularSolve."""
@@ -129,6 +215,6 @@ def _SelfAdjointEigV2Grad(op, grad_e, grad_v):
     # symmetrize and take the lower triangle
     grad_a = array_ops.matrix_band_part(
         grad_a + array_ops.matrix_transpose(grad_a), -1, 0)
-    grad_a = array_ops.matrix_set_diag(grad_a, 0.5 *
-                                       array_ops.matrix_diag_part(grad_a))
+    grad_a = array_ops.matrix_set_diag(grad_a,
+                                       0.5 * array_ops.matrix_diag_part(grad_a))
     return grad_a
diff --git a/tensorflow/python/ops/linalg_ops.py b/tensorflow/python/ops/linalg_ops.py
index da411044384..36cdcc7dc4c 100644
--- a/tensorflow/python/ops/linalg_ops.py
+++ b/tensorflow/python/ops/linalg_ops.py
@@ -239,7 +239,7 @@ def self_adjoint_eigvals(tensor, name=None):
   return e
 
 
-def svd(tensor, compute_uv=True, full_matrices=False, name=None):
+def svd(tensor, full_matrices=False, compute_uv=True, name=None):
   """Computes the singular value decompositions of one or more matrices.
 
   Computes the SVD of each inner matrix in `tensor` such that
@@ -258,12 +258,12 @@ def svd(tensor, compute_uv=True, full_matrices=False, name=None):
   Args:
     matrix: `Tensor` of shape `[..., M, N]`. Let `P` be the minimum of `M` and
       `N`.
-    compute_uv: If `True` then left and right singular vectors will be
-      computed and returned in `u` and `v`, respectively. Otherwise, only the
-      singular values will be computed, which can be significantly faster.
     full_matrices: If true, compute full-sized `u` and `v`. If false
       (the default), compute only the leading `P` singular vectors.
       Ignored if `compute_uv` is `False`.
+    compute_uv: If `True` then left and right singular vectors will be
+      computed and returned in `u` and `v`, respectively. Otherwise, only the
+      singular values will be computed, which can be significantly faster.
     name: string, optional name of the operation.
 
   Returns:
diff --git a/tensorflow/python/ops/rnn.py b/tensorflow/python/ops/rnn.py
index cc5464d5720..f67f4f35e88 100644
--- a/tensorflow/python/ops/rnn.py
+++ b/tensorflow/python/ops/rnn.py
@@ -1057,8 +1057,8 @@ def raw_rnn(cell, loop_fn,
         time=time + 1, cell_output=output, cell_state=cell_state,
         loop_state=loop_state)
     # Emit zeros and copy forward state for minibatch entries that are finished.
-    state = tf.select(finished, state, next_state)
-    emit = tf.select(finished, tf.zeros_like(emit), emit)
+    state = tf.where(finished, state, next_state)
+    emit = tf.where(finished, tf.zeros_like(emit), emit)
     emit_ta = emit_ta.write(time, emit)
     # If any new minibatch entries are marked as finished, mark these.
     finished = tf.logical_or(finished, next_finished)
diff --git a/tensorflow/python/ops/standard_ops.py b/tensorflow/python/ops/standard_ops.py
index 9267b8ef2ee..847d1b99c83 100644
--- a/tensorflow/python/ops/standard_ops.py
+++ b/tensorflow/python/ops/standard_ops.py
@@ -67,6 +67,11 @@ from tensorflow.python.ops.state_ops import scatter_div
 from tensorflow.python.ops.state_ops import scatter_mul
 from tensorflow.python.ops.state_ops import scatter_sub
 from tensorflow.python.ops.state_ops import scatter_update
+from tensorflow.python.ops.state_ops import scatter_nd_add
+from tensorflow.python.ops.state_ops import scatter_nd_sub
+from tensorflow.python.ops.state_ops import scatter_nd_mul
+from tensorflow.python.ops.state_ops import scatter_nd_div
+from tensorflow.python.ops.state_ops import scatter_nd_update
 from tensorflow.python.ops.string_ops import *
 from tensorflow.python.ops.template import *
 from tensorflow.python.ops.tensor_array_ops import *
diff --git a/tensorflow/python/ops/state_grad.py b/tensorflow/python/ops/state_grad.py
index 871ce780c5c..314f9f0c1af 100644
--- a/tensorflow/python/ops/state_grad.py
+++ b/tensorflow/python/ops/state_grad.py
@@ -20,7 +20,7 @@ from __future__ import division
 from __future__ import print_function
 
 from tensorflow.python.framework import ops
-from tensorflow.python.ops import state_ops
+
 
 # TODO(b/31222613): These ops may be differentiable, and there may be
 # latent bugs here.
@@ -43,3 +43,14 @@ ops.NotDifferentiable("ScatterMul")
 
 
 ops.NotDifferentiable("ScatterDiv")
+
+
+ops.NotDifferentiable("ScatterNdUpdate")
+
+ops.NotDifferentiable("ScatterNdAdd")
+
+ops.NotDifferentiable("ScatterNdSub")
+
+ops.NotDifferentiable("ScatterNdMul")
+
+ops.NotDifferentiable("ScatterNdDiv")
diff --git a/tensorflow/python/ops/state_ops.py b/tensorflow/python/ops/state_ops.py
index 636acc3e2ad..2c12865df06 100644
--- a/tensorflow/python/ops/state_ops.py
+++ b/tensorflow/python/ops/state_ops.py
@@ -22,15 +22,15 @@
 TensorFlow provides a set of functions to help manage the set of variables
 collected in the graph.
 
-@@all_variables
-@@trainable_variables
+@@global_variables
 @@local_variables
 @@model_variables
+@@trainable_variables
 @@moving_average_variables
 
-@@initialize_all_variables
-@@initialize_variables
-@@initialize_local_variables
+@@global_variables_initializer
+@@local_variables_initializer
+@@variables_initializer
 @@is_variable_initialized
 @@report_uninitialized_variables
 @@assert_variables_initialized
@@ -95,6 +95,11 @@ automatically by the optimizers in most cases.
 @@scatter_sub
 @@scatter_mul
 @@scatter_div
+@@scatter_nd_update
+@@scatter_nd_add
+@@scatter_nd_sub
+@@scatter_nd_mul
+@@scatter_nd_div
 @@sparse_mask
 @@IndexedSlices
 
@@ -108,6 +113,13 @@ automatically by the optimizers in most cases.
 @@export_meta_graph
 @@import_meta_graph
 
+# Deprecated functions (removed after 2017-03-02). Please don't use them.
+
+@@all_variables
+@@initialize_all_variables
+@@initialize_local_variables
+@@initialize_variables
+
 """
 
 from __future__ import absolute_import
@@ -209,3 +221,34 @@ ops.RegisterShape("ScatterDiv")(common_shapes.call_cpp_shape_fn)
 ops.RegisterShape("ScatterMul")(common_shapes.call_cpp_shape_fn)
 ops.RegisterShape("ScatterSub")(common_shapes.call_cpp_shape_fn)
 ops.RegisterShape("ScatterUpdate")(common_shapes.call_cpp_shape_fn)
+
+
+@ops.RegisterShape("ScatterNdAdd")
+@ops.RegisterShape("ScatterNdSub")
+@ops.RegisterShape("ScatterNdMul")
+@ops.RegisterShape("ScatterNdDiv")
+@ops.RegisterShape("ScatterNdUpdate")
+def scatter_nd_update_shape(op):
+  """Shape function for the ScatterNd update ops."""
+  ref_shape = op.inputs[0].get_shape()
+  indices_shape = op.inputs[1].get_shape()
+  updates_shape = op.inputs[2].get_shape()
+
+  if indices_shape.ndims is not None and ref_shape.ndims is not None:
+    outer_dims = len(indices_shape) - 1
+    ixdim = indices_shape[-1].value or 0
+
+    if not indices_shape[:outer_dims].is_compatible_with(
+        updates_shape[:outer_dims]):
+      raise ValueError("The outer %d dimensions of indices.shape=%s must "
+                       "match the outer %d dimensions of updates.shape=%s" % (
+                           outer_dims, indices_shape, outer_dims,
+                           updates_shape))
+
+    if not ref_shape[ixdim:].is_compatible_with(updates_shape[outer_dims:]):
+      raise ValueError("The inner %d dimensions of ref.shape=%s must match "
+                       "the inner %d dimensions of updates.shape=%s" % (
+                           len(ref_shape)-ixdim, ref_shape,
+                           len(updates_shape)-outer_dims, updates_shape))
+
+  return [ref_shape]
diff --git a/tensorflow/python/ops/variables.py b/tensorflow/python/ops/variables.py
index 9eec0d215dc..45261ecaac9 100644
--- a/tensorflow/python/ops/variables.py
+++ b/tensorflow/python/ops/variables.py
@@ -26,6 +26,7 @@ from tensorflow.python.ops import control_flow_ops
 from tensorflow.python.ops import gen_state_ops
 from tensorflow.python.ops import math_ops
 from tensorflow.python.ops import state_ops
+from tensorflow.python.util.deprecation import deprecated
 
 
 class Variable(object):
@@ -82,16 +83,16 @@ class Variable(object):
   ```
 
   The most common initialization pattern is to use the convenience function
-  `initialize_all_variables()` to add an Op to the graph that initializes
+  `global_variable_initializers()` to add an Op to the graph that initializes
   all the variables. You then run that Op after launching the graph.
 
   ```python
-  # Add an Op to initialize all variables.
-  init_op = tf.initialize_all_variables()
+  # Add an Op to initialize global variables.
+  init_op = tf.global_variable_initializers()
 
   # Launch the graph in a session.
   with tf.Session() as sess:
-      # Run the Op that initializes all variables.
+      # Run the Op that initializes global variables.
       sess.run(init_op)
       # ...you can now run any Op that uses variable values...
   ```
@@ -102,8 +103,8 @@ class Variable(object):
 
   All variables are automatically collected in the graph where they are
   created. By default, the constructor adds the new variable to the graph
-  collection `GraphKeys.VARIABLES`. The convenience function
-  `all_variables()` returns the contents of that collection.
+  collection `GraphKeys.GLOBAL_VARIABLES`. The convenience function
+  `global_variables()` returns the contents of that collection.
 
   When building a machine learning model it is often convenient to distinguish
   between variables holding the trainable model parameters and other variables
@@ -159,7 +160,7 @@ class Variable(object):
     """Creates a new variable with value `initial_value`.
 
     The new variable is added to the graph collections listed in `collections`,
-    which defaults to `[GraphKeys.VARIABLES]`.
+    which defaults to `[GraphKeys.GLOBAL_VARIABLES]`.
 
     If `trainable` is `True` the variable is also added to the graph collection
     `GraphKeys.TRAINABLE_VARIABLES`.
@@ -178,7 +179,7 @@ class Variable(object):
         collection `GraphKeys.TRAINABLE_VARIABLES`. This collection is used as
         the default list of variables to use by the `Optimizer` classes.
       collections: List of graph collections keys. The new variable is added to
-        these collections. Defaults to `[GraphKeys.VARIABLES]`.
+        these collections. Defaults to `[GraphKeys.GLOBAL_VARIABLES]`.
       validate_shape: If `False`, allows the variable to be initialized with a
         value of unknown shape. If `True`, the default, the shape of
         `initial_value` must be known.
@@ -245,7 +246,7 @@ class Variable(object):
         collection `GraphKeys.TRAINABLE_VARIABLES`. This collection is used as
         the default list of variables to use by the `Optimizer` classes.
       collections: List of graph collections keys. The new variable is added to
-        these collections. Defaults to `[GraphKeys.VARIABLES]`.
+        these collections. Defaults to `[GraphKeys.GLOBAL_VARIABLES]`.
       validate_shape: If `False`, allows the variable to be initialized with a
         value of unknown shape. If `True`, the default, the shape of
         `initial_value` must be known.
@@ -275,7 +276,7 @@ class Variable(object):
           "dtype must also be specified when initial_value is callable.")
 
     if collections is None:
-      collections = [ops.GraphKeys.VARIABLES]
+      collections = [ops.GraphKeys.GLOBAL_VARIABLES]
     if not isinstance(collections, (list, tuple, set)):
       raise ValueError(
           "collections argument to Variable constructor must be a list, tuple, "
@@ -479,7 +480,7 @@ class Variable(object):
 
     ```python
     v = tf.Variable([1, 2])
-    init = tf.initialize_all_variables()
+    init = tf.global_variable_initializers()
 
     with tf.Session() as sess:
         sess.run(init)
@@ -1037,17 +1038,28 @@ class PartitionedVariable(object):
         "assign() has not been implemented for PartitionedVariable.")
 
 
-def all_variables():
-  """Returns all variables that must be saved/restored.
+def global_variables():
+  """Returns global variables.
 
-  The `Variable()` constructor automatically adds new variables to the graph
-  collection `GraphKeys.VARIABLES`. This convenience function returns the
-  contents of that collection.
+  Global variables are variables that are shared across machines in a
+  distributed environment. The `Variable()` constructor or `get_variable()`
+  automatically adds new variables to the graph collection
+  `GraphKeys.GLOBAL_VARIABLES`.
+  This convenience function returns the contents of that collection.
+
+  An alternative to global variables are local variables. See
+  [`tf.local_variables()`](../../api_docs/python/state_ops.md#local_variables)
 
   Returns:
     A list of `Variable` objects.
   """
-  return ops.get_collection(ops.GraphKeys.VARIABLES)
+  return ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
+
+
+@deprecated("2016-03-02", "Please use tf.global_variables instead.")
+def all_variables():
+  """See `tf.global_variables`."""
+  return global_variables()
 
 
 def _all_saveable_objects():
@@ -1057,8 +1069,37 @@ def _all_saveable_objects():
     A list of `Variable` and `SaveableObject` to be checkpointed
   """
   # TODO(andreasst): make this function public once things are settled.
-  return ops.get_collection(ops.GraphKeys.VARIABLES) + ops.get_collection(
-      ops.GraphKeys.SAVEABLE_OBJECTS)
+  return (ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES) +
+          ops.get_collection(ops.GraphKeys.SAVEABLE_OBJECTS))
+
+
+def local_variables():
+  """Returns local variables.
+
+  Local variables - per process variables, usually not saved/restored to
+  checkpoint and used for temporary or intermediate values.
+  For example, they can be used as counters for metrics computation or
+  number of epochs this machine has read data.
+  The `local_variable()` automatically adds new variable to
+  `GraphKeys.LOCAL_VARIABLES`.
+  This convenience function returns the contents of that collection.
+
+  An alternative to local variables are global variables. See
+  [`tf.global_variables()`](../../api_docs/python/state_ops.md#global_variables)
+
+  Returns:
+    A list of local `Variable` objects.
+  """
+  return ops.get_collection(ops.GraphKeys.LOCAL_VARIABLES)
+
+
+def model_variables():
+  """Returns all variables in the MODEL_VARIABLES collection.
+
+  Returns:
+    A list of local Variable objects.
+  """
+  return ops.get_collection(ops.GraphKeys.MODEL_VARIABLES)
 
 
 def trainable_variables():
@@ -1075,24 +1116,6 @@ def trainable_variables():
   return ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)
 
 
-def local_variables():
-  """Returns all variables created with collection=[LOCAL_VARIABLES].
-
-  Returns:
-    A list of local Variable objects.
-  """
-  return ops.get_collection(ops.GraphKeys.LOCAL_VARIABLES)
-
-
-def model_variables():
-  """Returns all variables in the MODEL_VARIABLES collection.
-
-  Returns:
-    A list of local Variable objects.
-  """
-  return ops.get_collection(ops.GraphKeys.MODEL_VARIABLES)
-
-
 def moving_average_variables():
   """Returns all variables that maintain their moving averages.
 
@@ -1107,7 +1130,7 @@ def moving_average_variables():
   return ops.get_collection(ops.GraphKeys.MOVING_AVERAGE_VARIABLES)
 
 
-def initialize_variables(var_list, name="init"):
+def variables_initializer(var_list, name="init"):
   """Returns an Op that initializes a list of variables.
 
   After you launch the graph in a session, you can run the returned Op to
@@ -1132,26 +1155,44 @@ def initialize_variables(var_list, name="init"):
   return control_flow_ops.no_op(name=name)
 
 
-def initialize_all_variables():
-  """Returns an Op that initializes all variables.
+@deprecated("2017-03-02", "Use `tf.variables_initializer` instead.")
+def initialize_variables(var_list, name="init"):
+  """See `tf.variables_initializer`."""
+  return variables_initializer(var_list, name=name)
 
-  This is just a shortcut for `initialize_variables(all_variables())`
+
+def global_variables_initializer():
+  """Returns an Op that initializes global variables.
+
+  This is just a shortcut for `variable_initializers(global_variables())`
 
   Returns:
-    An Op that initializes all variables in the graph.
+    An Op that initializes global variables in the graph.
   """
-  return initialize_variables(all_variables())
+  return variables_initializer(global_variables())
 
 
-def initialize_local_variables():
+@deprecated("2017-03-02", "Use `tf.global_variables_initializer` instead.")
+def initialize_all_variables():
+  """See `tf.global_variables_initializer`."""
+  return global_variables_initializer()
+
+
+def local_variables_initializer():
   """Returns an Op that initializes all local variables.
 
-  This is just a shortcut for `initialize_variables(local_variables())`
+  This is just a shortcut for `variable_initializers(local_variables())`
 
   Returns:
     An Op that initializes all local variables in the graph.
   """
-  return initialize_variables(local_variables())
+  return variables_initializer(local_variables())
+
+
+@deprecated("2017-03-02", "Use `tf.local_variables_initializer` instead.")
+def initialize_local_variables():
+  """See `tf.local_variables_initializer`."""
+  return local_variables_initializer()
 
 
 def is_variable_initialized(variable):
@@ -1182,13 +1223,13 @@ def assert_variables_initialized(var_list=None):
 
   Args:
     var_list: List of `Variable` objects to check. Defaults to the
-      value of `all_variables().`
+      value of `global_variables().`
 
   Returns:
     An Op, or None if there are no variables.
   """
   if var_list is None:
-    var_list = all_variables() + local_variables()
+    var_list = global_variables() + local_variables()
   # Backwards compatibility for old-style variables. TODO(touts): remove.
   if not var_list:
     var_list = []
@@ -1217,7 +1258,7 @@ def report_uninitialized_variables(var_list=None,
 
   Args:
     var_list: List of `Variable` objects to check. Defaults to the
-      value of `all_variables() + local_variables()`
+      value of `global_variables() + local_variables()`
     name: Optional name of the `Operation`.
 
   Returns:
@@ -1225,7 +1266,7 @@ def report_uninitialized_variables(var_list=None,
     1-D tensor if there are no variables or no uninitialized variables.
   """
   if var_list is None:
-    var_list = all_variables() + local_variables()
+    var_list = global_variables() + local_variables()
     # Backwards compatibility for old-style variables. TODO(touts): remove.
     if not var_list:
       var_list = []
@@ -1257,7 +1298,7 @@ ops.register_tensor_conversion_function(
 
 ops.register_dense_tensor_like_type(Variable)
 ops.register_proto_function(
-    ops.GraphKeys.VARIABLES,
+    ops.GraphKeys.GLOBAL_VARIABLES,
     proto_type=variable_pb2.VariableDef,
     to_proto=Variable.to_proto,
     from_proto=Variable.from_proto)
diff --git a/tensorflow/python/saved_model/README.md b/tensorflow/python/saved_model/README.md
new file mode 100644
index 00000000000..1323c91f86a
--- /dev/null
+++ b/tensorflow/python/saved_model/README.md
@@ -0,0 +1,155 @@
+# TensorFlow SavedModel
+
+[TOC]
+
+## Overview
+This document describes SavedModel, the universal serialization format for
+[TensorFlow](https://www.tensorflow.org/) models.
+
+SavedModel provides a language-neutral format to save machine-learned models
+that is recoverable and hermetic. It enables higher-level systems and tools to
+produce, consume and transform TensorFlow models.
+
+## Features
+
+The following is a summary of the features in SavedModel:
+
+* Multiple graphs sharing a single set of variables and assets can be added to a
+  single SavedModel. Each graph is associated with a specific set of tags to
+  allow identification during a load or restore operation.
+* Support for `SignatureDefs`
+    * Graphs that are used for inference tasks typically have a set of inputs
+      and outputs. This is called a `Signature`.
+    * SavedModel uses [SignatureDefs](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/core/protobuf/meta_graph.proto)
+      to allow generic support for signatures that may need to be saved with the graphs.
+* Support for `Assets`.
+    * For cases where ops depend on external files for initialization, such as
+      vocabularies, SavedModel supports this via `assets`.
+    * Assets are copied to the SavedModel location and can be read when loading
+      a specific meta graph def.
+* Support to clear devices before generating the SavedModel.
+
+The following is a summary of features that are NOT supported in SavedModel.
+Higher-level frameworks and tools that use SavedModel may provide these.
+
+* Implicit versioning.
+* Garbage collection.
+* Atomic writes to the SavedModel location.
+
+## Background
+SavedModel manages and builds upon existing TensorFlow primitives such as
+`TensorFlow Saver` and `MetaGraphDef`. Specifically, SavedModel wraps a [TensorFlow Saver](https://github.com/tensorflow/tensorflow/tree/master/tensorflow/python/training/saver.py).
+The Saver is primarily used to generate the variable checkpoints. SavedModel
+will replace the existing [TensorFlow Inference Model Format](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/session_bundle/README.md)
+as the canonical way to export TensorFlow graphs for serving.
+
+## Components
+A SavedModel directory has the following structure:
+
+```
+assets/
+assets.extra/
+variables/
+    variables.data-?????-of-?????
+    variables.index
+saved_model.pb
+```
+
+* SavedModel protocol buffer
+    * `saved_model.pb` or `saved_model.pbtxt`
+    * Includes the graph definitions as `MetaGraphDef` protocol buffers.
+* Assets
+    * Subfolder called `assets`.
+    * Contains auxiliary files such as vocabularies, etc.
+* Extra assets
+    * Subfolder where higher-level libraries and users can add their own assets
+      that co-exist with the model, but are not loaded by the graph.
+    * This subfolder is not managed by the SavedModel libraries.
+* Variables
+    * Subfolder called `variables`.
+    * Includes output from the [TensorFlow Saver](https://github.com/tensorflow/tensorflow/tree/master/tensorflow/python/training/saver.py).
+        * `variables.data-?????-of-?????`
+        * `variables.index`
+
+## APIs
+The APIs for building and loading a SavedModel are described in this section.
+
+### Builder
+The SavedModel [builder](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/saved_model/builder.py)
+is implemented in Python.
+
+The `SavedModelBuilder` class provides functionality to save multiple meta graph
+defs, associated variables and assets.
+
+To build a SavedModel, the first meta graph must be saved with variables.
+Subsequent meta graphs will simply be saved with their graph definitions. If
+assets need to be saved and written or copied to disk, they can be provided
+when the meta graph def is added. If multiple meta graph defs are associated
+with an asset of the same name, only the first version is retained.
+
+#### Tags
+Each meta graph added to the SavedModel must be annotated with user specified
+tags. The tags provide a means to identify the specific meta graph to load and
+restore, along with the shared set of variables and assets. These tags
+typically annotate a MetaGraph with it's functionality (e.g. serving or
+training), and possibly hardware specific aspects such as GPU.
+
+A subset of commonly used tags is specified in [Python](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/saved_model/tag_constants.py)
+and [C++](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/cc/saved_model/tag_constants.h)
+for the purpose of easy and consistent usability.
+
+#### Usage
+The typical usage of `builder` is as follows:
+
+~~~python
+export_dir = ...
+...
+builder = saved_model_builder.SavedModelBuilder(export_dir)
+with tf.Session(graph=tf.Graph()) as sess:
+...
+builder.add_meta_graph_and_variables(sess,
+                                     [tag_constants.TRAINING],
+                                     signature_def_map=foo_signatures,
+                                     assets_collection=foo_assets)
+...
+with tf.Session(graph=tf.Graph()) as sess:
+  ...
+  builder.add_meta_graph(["bar-tag", "baz-tag"])
+...
+builder.save()
+~~~
+
+### Loader
+The SavedModel loader is implemented in C++ and Python.
+
+#### Python
+The Python version of the SavedModel [loader](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/saved_model/builder.py)
+provides load and restore capability for a SavedModel. The `load` operation
+requires the session in which to restore the graph definition and variables, the
+tags used to identify the meta graph def to load and the location of the
+SavedModel. Upon a load, the subset of variables and assets supplied as part of
+the specific meta graph def, will be restored into the supplied session.
+
+~~~python
+export_dir = ...
+...
+with tf.Session(graph=tf.Graph()) as sess:
+  loader.load(sess, [tag_constants.TRAINING], export_dir)
+  ...
+~~~
+
+#### C++
+The C++ version of the SavedModel [loader](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/cc/saved_model/loader.h)
+provides an API to load a SavedModel from a path, while allowing
+`SessionOptions` and `RunOptions`. Similar to the Python version, the C++
+version requires the tags associated with the graph to be loaded, to be
+specified. The loaded version of SavedModel is referred to as `SavedModelBundle`
+and contains the meta graph def and the session within which it is loaded.
+
+~~~c++
+const string export_dir = ...
+SavedModelBundle bundle;
+...
+LoadSavedModel(session_options, run_options, export_dir, {kSavedModelTagTrain},
+               &bundle);
+~~~
diff --git a/tensorflow/python/saved_model/builder.py b/tensorflow/python/saved_model/builder.py
index 24126c01935..523dd501ba6 100644
--- a/tensorflow/python/saved_model/builder.py
+++ b/tensorflow/python/saved_model/builder.py
@@ -49,10 +49,9 @@ class SavedModelBuilder(object):
 
   To build a SavedModel, the first meta graph must be saved with variables.
   Subsequent meta graphs will simply be saved with their graph definitions. If
-  assets need to be saved and written or copied to disk, they must be provided
-  as part of the first meta graph to be saved. Subsequent meta graphs can
-  provide a subset of the initial assets to be added to the SavedModel
-  definition.
+  assets need to be saved and written or copied to disk, they can be provided
+  when the meta graph def is added. If multiple meta graph defs are associated
+  an asset of the same name, only the first version is retained.
 
   Each meta graph added to the SavedModel must be annotated with tags. The tags
   provide a means to identify the specific meta graph to load and restore, along
@@ -249,12 +248,6 @@ class SavedModelBuilder(object):
     proto_meta_graph_def = self._saved_model.meta_graphs.add()
     proto_meta_graph_def.CopyFrom(meta_graph_def)
 
-  def _maybe_clear_devices(self, clear_devices):
-    if not clear_devices:
-      return
-    for node in ops.get_default_graph().as_graph_def().node:
-      node.device = ""
-
   def add_meta_graph(self,
                      tags,
                      signature_def_map=None,
@@ -288,8 +281,6 @@ class SavedModelBuilder(object):
           "Variables and assets have not been saved yet. "
           "Please invoke `add_meta_graph_and_variables()` first.")
 
-    self._maybe_clear_devices(clear_devices)
-
     # Save asset files and write them to disk, if any.
     self._save_and_write_assets(assets_collection)
 
@@ -301,7 +292,7 @@ class SavedModelBuilder(object):
         sharded=True,
         write_version=saver_pb2.SaverDef.V2)
 
-    meta_graph_def = saver.export_meta_graph()
+    meta_graph_def = saver.export_meta_graph(clear_devices=clear_devices)
 
     # Tag the meta graph def and add it to the SavedModel.
     self._tag_and_add_meta_graph(meta_graph_def, tags, signature_def_map)
@@ -337,8 +328,6 @@ class SavedModelBuilder(object):
       raise AssertionError("Variables and assets have already been saved. "
                            "Please invoke `add_meta_graph()` instead.")
 
-    self._maybe_clear_devices(clear_devices)
-
     # Save asset files and write them to disk, if any.
     self._save_and_write_assets(assets_collection)
 
@@ -362,7 +351,7 @@ class SavedModelBuilder(object):
         sharded=True,
         write_version=saver_pb2.SaverDef.V2)
     saver.save(sess, variables_path, write_meta_graph=False)
-    meta_graph_def = saver.export_meta_graph()
+    meta_graph_def = saver.export_meta_graph(clear_devices=clear_devices)
 
     # Tag the meta graph def and add it to the SavedModel.
     self._tag_and_add_meta_graph(meta_graph_def, tags, signature_def_map)
diff --git a/tensorflow/python/summary/summary.py b/tensorflow/python/summary/summary.py
index a6b348cc991..b5db6b802fa 100644
--- a/tensorflow/python/summary/summary.py
+++ b/tensorflow/python/summary/summary.py
@@ -35,8 +35,6 @@ from __future__ import print_function
 
 import re as _re
 
-import six
-
 from google.protobuf import json_format as _json_format
 from tensorflow.core.framework import summary_pb2 as _summary_pb2
 from tensorflow.python.framework import dtypes as _dtypes
@@ -47,8 +45,8 @@ from tensorflow.python.ops import gen_logging_ops as _gen_logging_ops
 from tensorflow.python.ops.summary_ops import tensor_summary
 # pylint: enable=unused-import
 from tensorflow.python.platform import tf_logging as _logging
-from tensorflow.python.util.all_util import remove_undocumented
 from tensorflow.python.util import compat as _compat
+from tensorflow.python.util.all_util import remove_undocumented
 
 
 def _collect(val, collections, default_collections):
@@ -60,6 +58,7 @@ def _collect(val, collections, default_collections):
 
 _INVALID_TAG_CHARACTERS = _re.compile(r'[^-/\w\.]')
 
+
 def _clean_tag(name):
   # In the past, the first argument to summary ops was a tag, which allowed
   # arbitrary characters. Now we are changing the first argument to be the node
diff --git a/tensorflow/python/training/saver.py b/tensorflow/python/training/saver.py
index 34893dbb9ff..3a5c2ebb114 100644
--- a/tensorflow/python/training/saver.py
+++ b/tensorflow/python/training/saver.py
@@ -899,7 +899,7 @@ class Saver(object):
                builder=None,
                defer_build=False,
                allow_empty=False,
-               write_version=saver_pb2.SaverDef.V1,
+               write_version=saver_pb2.SaverDef.V2,
                pad_step_number=False):
     """Creates a `Saver`.
 
diff --git a/tensorflow/python/util/decorator_utils.py b/tensorflow/python/util/decorator_utils.py
index 155003498ce..c4b033d59ae 100644
--- a/tensorflow/python/util/decorator_utils.py
+++ b/tensorflow/python/util/decorator_utils.py
@@ -60,3 +60,25 @@ def validate_callable(func, decorator_name):
         ' @property appears before @%s in your source code:'
         '\n\n@property\n@%s\ndef method(...)' % (
             func, decorator_name, decorator_name))
+
+
+class classproperty(object):  # pylint: disable=invalid-name
+  """Class property decorator.
+
+  Example usage:
+
+  class MyClass(object):
+
+    @classproperty
+    def value(cls):
+      return '123'
+
+  > print MyClass.value
+  123
+  """
+
+  def __init__(self, func):
+    self._func = func
+
+  def __get__(self, owner_self, owner_cls):
+    return self._func(owner_cls)
diff --git a/tensorflow/tensorboard/TAG b/tensorflow/tensorboard/TAG
index a7873645902..8f92bfdd497 100644
--- a/tensorflow/tensorboard/TAG
+++ b/tensorflow/tensorboard/TAG
@@ -1 +1 @@
-34
+35
diff --git a/tensorflow/tensorboard/backend/server_test.py b/tensorflow/tensorboard/backend/server_test.py
index dba0bbfd0eb..dc7c28e9e1c 100644
--- a/tensorflow/tensorboard/backend/server_test.py
+++ b/tensorflow/tensorboard/backend/server_test.py
@@ -27,6 +27,7 @@ import json
 import numbers
 import os
 import shutil
+import tempfile
 import threading
 import zlib
 
@@ -51,10 +52,10 @@ class TensorboardServerTest(tf.test.TestCase):
   _SCALAR_COUNT = 99
 
   def setUp(self):
-    self._GenerateTestData()
+    temp_dir = self._GenerateTestData()
     self._multiplexer = event_multiplexer.EventMultiplexer(
         size_guidance=server.TENSORBOARD_SIZE_GUIDANCE)
-    server.ReloadMultiplexer(self._multiplexer, {self.get_temp_dir(): None})
+    server.ReloadMultiplexer(self._multiplexer, {temp_dir: None})
     # 0 to pick an unused port.
     self._server = server.BuildServer(
         self._multiplexer, 'localhost', 0, '/foo/logdir/argument')
@@ -322,8 +323,11 @@ class TensorboardServerTest(tf.test.TestCase):
      - scalar events containing the value i at step 10 * i and wall time
          100 * i, for i in [1, _SCALAR_COUNT).
      - a graph definition
+
+    Returns:
+      temp_dir: The directory the test data is generated under.
     """
-    temp_dir = self.get_temp_dir()
+    temp_dir = tempfile.mkdtemp(prefix=self.get_temp_dir())
     self.addCleanup(shutil.rmtree, temp_dir)
     run1_path = os.path.join(temp_dir, 'run1')
     os.makedirs(run1_path)
@@ -396,6 +400,8 @@ class TensorboardServerTest(tf.test.TestCase):
     if 'projector' in REGISTERED_PLUGINS:
       self._GenerateProjectorTestData(run1_path)
 
+    return temp_dir
+
   def _GenerateProjectorTestData(self, run_path):
     # Write a projector config file in run1.
     config_path = os.path.join(run_path, 'projector_config.pbtxt')
diff --git a/tensorflow/tensorboard/components/tf_dashboard_common/tf-multi-checkbox.html b/tensorflow/tensorboard/components/tf_dashboard_common/tf-multi-checkbox.html
index dadad81a343..8dfcb3153db 100644
--- a/tensorflow/tensorboard/components/tf_dashboard_common/tf-multi-checkbox.html
+++ b/tensorflow/tensorboard/components/tf_dashboard_common/tf-multi-checkbox.html
@@ -58,7 +58,7 @@ handle these situations gracefully.
             <paper-checkbox
               class="checkbox vertical-align-center"
               name="[[item]]"
-              checked$="[[_isChecked(item, runToIsCheckedMapping.*)]]"
+              checked$="[[_isChecked(item, runsDisabled.*)]]"
               on-change="_checkboxChange"
             ></paper-checkbox>
 
@@ -161,7 +161,10 @@ handle these situations gracefully.
   Polymer({
     is: "tf-multi-checkbox",
     properties: {
-      names: Array, // All the runs in consideration
+      names: {
+        type: Array,
+        value: function() {return [];},
+      }, // All the runs in consideration
       regexInput: {
         type: String,
         value: TF.URIStorage.getStringInitializer("regexInput", ""),
@@ -175,15 +178,15 @@ handle these situations gracefully.
         type: Array,
         computed: "computeNamesMatchingRegex(names.*, regex)"
       }, // Runs that match the regex
-      runToIsCheckedMapping: {
+      runsDisabled: {
         type: Object,
-        value: TF.URIStorage.getObjectInitializer('runToIsCheckedMapping', {}),
-      }, // run name -> Boolean (if its enabled)
+        value: TF.URIStorage.getObjectInitializer('runsDisabled', {}),
+      }, // Every run that is disabled is stored in the map (with value true)
       // (Allows state to persist across regex filtering)
       outSelected: {
         type: Array,
         notify: true,
-        computed: 'computeOutSelected(namesMatchingRegex.*, runToIsCheckedMapping.*)'
+        computed: 'computeOutSelected(namesMatchingRegex.*, runsDisabled.*)'
       },
       colorScale: {
         type: Object,
@@ -219,11 +222,10 @@ handle these situations gracefully.
       'dom-change': 'synchronizeColors',
     },
     observers: [
-      "_initializeRunToIsCheckedMapping(names.*)",
-      "_setIsolatorIcon(runToIsCheckedMapping)",
-      "_storeRunToIsCheckedMapping(runToIsCheckedMapping)",
+      "_setIsolatorIcon(runsDisabled, names)",
+      "_storeRunToIsCheckedMapping(runsDisabled)",
     ],
-    _storeRunToIsCheckedMapping: TF.URIStorage.getObjectObserver('runToIsCheckedMapping', {}),
+    _storeRunToIsCheckedMapping: TF.URIStorage.getObjectObserver('runsDisabled', {}),
     _makeRegex: function(regex) {
       try {
         return new RegExp(regex)
@@ -232,29 +234,18 @@ handle these situations gracefully.
       }
     },
     _setIsolatorIcon: function() {
-      var runMap = this.runToIsCheckedMapping;
-      var numChecked = _.filter(_.values(runMap)).length;
+      var runMap = this.runsDisabled;
+      var numChecked = this.names.length - _.filter(_.values(runMap)).length;
       var buttons = Array.prototype.slice.call(this.querySelectorAll(".isolator"));
 
       buttons.forEach(function(b) {
-        if (numChecked === 1 && runMap[b.name]) {
+        if (numChecked === 1 && !runMap[b.name]) {
           b.icon = "radio-button-checked";
         } else {
           b.icon = "radio-button-unchecked";
         }
       });
     },
-    _initializeRunToIsCheckedMapping: function(change) {
-      var runToIsCheckedMapping = _.clone(this.runToIsCheckedMapping);
-
-      this.names.forEach(function(n) {
-        if (runToIsCheckedMapping[n] == null) {
-          // runs default to on
-          runToIsCheckedMapping[n] = true;
-        }
-      });
-      this.runToIsCheckedMapping = runToIsCheckedMapping;
-    },
     computeNamesMatchingRegex: function(__, ___) {
       var regex = this.regex;
       return this.names.filter(function(n) {
@@ -262,9 +253,9 @@ handle these situations gracefully.
       });
     },
     computeOutSelected: function(__, ___) {
-      var runToIsCheckedMapping = this.runToIsCheckedMapping;
+      var runsDisabled = this.runsDisabled;
       return this.namesMatchingRegex.filter(function(n) {
-        return runToIsCheckedMapping[n];
+        return !runsDisabled[n];
       });
     },
     synchronizeColors: function(e) {
@@ -296,24 +287,25 @@ handle these situations gracefully.
       // If user clicks on the label for one run, enable it and disable all other runs.
 
       var name = Polymer.dom(e).localTarget.name;
-      var _this = this;
-      _.keys(this.runToIsCheckedMapping).forEach(function(k) {
-        _this.runToIsCheckedMapping[k] = false;
-      });
-      this.runToIsCheckedMapping[name] = true;
-      // we can't use notifyPath because the run names may have periods
-      this.runToIsCheckedMapping = _.clone(this.runToIsCheckedMapping);
+      var newDisabled = {};
+      this.names.forEach(function(n) {
+        newDisabled[n] = true;
+      })
+      delete newDisabled[name];
+      this.runsDisabled = newDisabled;
     },
     _checkboxChange: function(e) {
       var target = Polymer.dom(e).localTarget;
-      var name = target.name;
-      var checked = target.checked;
-      this.runToIsCheckedMapping[name] = checked;
+      if (target.checked) {
+        delete this.runsDisabled[target.name];
+      } else {
+        this.runsDisabled[target.name] = true;
+      }
       // n.b. notifyPath won't work because run names may have periods.
-      this.runToIsCheckedMapping = _.clone(this.runToIsCheckedMapping);
+      this.runsDisabled = _.clone(this.runsDisabled);
     },
     _isChecked: function(item, outSelectedChange) {
-      return this.runToIsCheckedMapping[item];
+      return this.runsDisabled[item] == undefined;
     },
     _initializeRuns: function(change) {
       this.outSelected = change.base.slice();
@@ -322,10 +314,15 @@ handle these situations gracefully.
     toggleAll: function() {
       var _this = this;
       var allOn = this.namesMatchingRegex
-                    .filter(function(n) {return !_this.runToIsCheckedMapping[n]})
+                    .filter(function(n) {return _this.runsDisabled[n]})
                     .length === 0;
-      this.namesMatchingRegex.forEach(function(n) {_this.runToIsCheckedMapping[n] = !allOn});
-      this.runToIsCheckedMapping = _.clone(this.runToIsCheckedMapping);
+      let newRunsDisabled = {}
+      if (allOn) {
+        this.names.forEach(function(n) {
+          newRunsDisabled[n] = true;
+        })
+      }
+      this.runsDisabled = newRunsDisabled;
     },
   });
   </script>
diff --git a/tensorflow/tensorboard/components/tf_image_dashboard/tf-image-loader.html b/tensorflow/tensorboard/components/tf_image_dashboard/tf-image-loader.html
index b2e86bc6727..fc3e383f3c7 100644
--- a/tensorflow/tensorboard/components/tf_image_dashboard/tf-image-loader.html
+++ b/tensorflow/tensorboard/components/tf_image_dashboard/tf-image-loader.html
@@ -40,6 +40,7 @@ future for loading older images.
       }
 
       img {
+        image-rendering: -moz-crisp-edges;
         image-rendering: pixelated;
         display: block;
         width: 100%;
diff --git a/tensorflow/tensorboard/components/vz_projector/data-provider-demo.ts b/tensorflow/tensorboard/components/vz_projector/data-provider-demo.ts
index a839ad4a0b3..643862c797a 100644
--- a/tensorflow/tensorboard/components/vz_projector/data-provider-demo.ts
+++ b/tensorflow/tensorboard/components/vz_projector/data-provider-demo.ts
@@ -20,69 +20,15 @@ import * as logging from './logging';
 
 /** Data provider that loads data from a demo folder. */
 export class DemoDataProvider implements DataProvider {
-  /** List of demo datasets for showing the capabilities of the tool. */
-  private DEMO_CONFIG: ProjectorConfig = {
-    embeddings: [
-      {
-        tensorName: 'Word2Vec 5K',
-        tensorShape: [5000, 200],
-        tensorPath: 'word2vec_5000_200d_tensors.tsv',
-        metadataPath: 'word2vec_5000_200d_labels.tsv'
-      },
-      {
-        tensorName: 'Word2Vec 10K',
-        tensorShape: [10000, 200],
-        tensorPath: 'word2vec_10000_200d_tensors.tsv',
-        metadataPath: 'word2vec_10000_200d_labels.tsv'
-      },
-      {
-        tensorName: 'Word2Vec All',
-        tensorShape: [71291, 200],
-        tensorPath: 'word2vec_full_200d_tensors.tsv',
-        metadataPath: 'word2vec_full_200d_labels.tsv'
-      },
-      {
-        tensorName: 'SmartReply 5K',
-        tensorShape: [5000, 256],
-        tensorPath: 'smartreply_5000_256d_tensors.tsv',
-        metadataPath: 'smartreply_5000_256d_labels.tsv'
-      },
-      {
-        tensorName: 'SmartReply All',
-        tensorShape: [35860, 256],
-        tensorPath: 'smartreply_full_256d_tensors.tsv',
-        metadataPath: 'smartreply_full_256d_labels.tsv'
-      },
-      {
-        tensorName: 'Mnist with images 10K',
-        tensorShape: [10000, 784],
-        tensorPath: 'mnist_10k_784d_tensors.tsv',
-        metadataPath: 'mnist_10k_784d_labels.tsv',
-        sprite: {
-          imagePath: 'mnist_10k_sprite.png',
-          singleImageDim: [28, 28]
-        }
-      },
-      {
-        tensorName: 'Iris',
-        tensorShape: [150, 4],
-        tensorPath: 'iris_tensors.tsv',
-        metadataPath: 'iris_labels.tsv'
-      },
-      {
-        tensorName: 'Unit Cube',
-        tensorShape: [8, 3],
-        tensorPath: 'cube_tensors.tsv',
-        metadataPath: 'cube_metadata.tsv'
-      }
-    ],
-    modelCheckpointPath: 'Demo datasets'
-  };
-  /** Name of the folder where the demo datasets are stored. */
-  private DEMO_FOLDER = 'data';
+  private projectorConfigPath: string;
+  private projectorConfig: ProjectorConfig;
+
+  constructor(projectorConfigPath: string) {
+    this.projectorConfigPath = projectorConfigPath;
+  }
 
   private getEmbeddingInfo(tensorName: string): EmbeddingInfo {
-    let embeddings = this.DEMO_CONFIG.embeddings;
+    let embeddings = this.projectorConfig.embeddings;
     for (let i = 0; i < embeddings.length; i++) {
       let embedding = embeddings[i];
       if (embedding.tensorName === tensorName) {
@@ -98,18 +44,28 @@ export class DemoDataProvider implements DataProvider {
 
   retrieveProjectorConfig(run: string, callback: (d: ProjectorConfig) => void)
       : void {
-    callback(this.DEMO_CONFIG);
+    let msgId = logging.setModalMessage('Fetching projector config...');
+    d3.json(this.projectorConfigPath, (err, projectorConfig) => {
+      if (err) {
+        logging.setModalMessage('Error: ' + err.responseText);
+        return;
+      }
+      logging.setModalMessage(null, msgId);
+      this.projectorConfig = projectorConfig;
+      callback(projectorConfig);
+    });
   }
 
   getDefaultTensor(run: string, callback: (tensorName: string) => void) {
-    callback('SmartReply 5K');
+    // Return the first tensor as the default tensor.
+    callback(this.projectorConfig.embeddings[0].tensorName);
   }
 
   retrieveTensor(run: string, tensorName: string,
       callback: (ds: DataSet) => void) {
     let embedding = this.getEmbeddingInfo(tensorName);
     let separator = embedding.tensorPath.substr(-3) === 'tsv' ? '\t' : ' ';
-    let url = `${this.DEMO_FOLDER}/${embedding.tensorPath}`;
+    let url = `${embedding.tensorPath}`;
     logging.setModalMessage('Fetching tensors...', TENSORS_MSG_ID);
     d3.text(url, (error: any, dataString: string) => {
       if (error) {
@@ -125,16 +81,12 @@ export class DemoDataProvider implements DataProvider {
   retrieveSpriteAndMetadata(run: string, tensorName: string,
       callback: (r: SpriteAndMetadataInfo) => void) {
     let embedding = this.getEmbeddingInfo(tensorName);
-    let metadataPath = null;
-    if (embedding.metadataPath) {
-      metadataPath = `${this.DEMO_FOLDER}/${embedding.metadataPath}`;
-    }
     let spriteImagePath = null;
     if (embedding.sprite && embedding.sprite.imagePath) {
-      spriteImagePath = `${this.DEMO_FOLDER}/${embedding.sprite.imagePath}`;
+      spriteImagePath = embedding.sprite.imagePath;
     }
-    dataProvider.retrieveSpriteAndMetadataInfo(metadataPath, spriteImagePath,
-        embedding.sprite, callback);
+    dataProvider.retrieveSpriteAndMetadataInfo(
+        embedding.metadataPath, spriteImagePath, embedding.sprite, callback);
   }
 
   getBookmarks(
diff --git a/tensorflow/tensorboard/components/vz_projector/vz-projector-app.html b/tensorflow/tensorboard/components/vz_projector/vz-projector-app.html
index f2a9fcad137..6221485c2d1 100644
--- a/tensorflow/tensorboard/components/vz_projector/vz-projector-app.html
+++ b/tensorflow/tensorboard/components/vz_projector/vz-projector-app.html
@@ -76,7 +76,9 @@ vz-projector {
       </a>
     </div>
   </div>
-  <vz-projector route-prefix="[[routePrefix]]" serving-mode="[[servingMode]]"></vz-projector>
+  <vz-projector route-prefix="[[routePrefix]]"
+      serving-mode="[[servingMode]]"
+      projector-config-json-path="[[projectorConfigJsonPath]]"></vz-projector>
 </div>
 <!-- Google analytics -->
 <script>
diff --git a/tensorflow/tensorboard/components/vz_projector/vz-projector-projections-panel.ts b/tensorflow/tensorboard/components/vz_projector/vz-projector-projections-panel.ts
index b319cffc29f..7b28b94060a 100644
--- a/tensorflow/tensorboard/components/vz_projector/vz-projector-projections-panel.ts
+++ b/tensorflow/tensorboard/components/vz_projector/vz-projector-projections-panel.ts
@@ -133,7 +133,7 @@ export class ProjectionsPanel extends ProjectionsPanelPolymer {
     this.polymerChangesTriggerReprojection = true;
   }
 
-  private updateTSNEPerplexityFromUIChange() {
+  private updateTSNEPerplexityFromSliderChange() {
     if (this.perplexitySlider) {
       this.perplexity = +this.perplexitySlider.value;
     }
@@ -161,8 +161,8 @@ export class ProjectionsPanel extends ProjectionsPanelPolymer {
 
     this.perplexitySlider.value = this.perplexity.toString();
     this.perplexitySlider.addEventListener(
-        'change', () => this.updateTSNEPerplexityFromUIChange());
-    this.updateTSNEPerplexityFromUIChange();
+        'change', () => this.updateTSNEPerplexityFromSliderChange());
+    this.updateTSNEPerplexityFromSliderChange();
 
     this.learningRateInput.addEventListener(
         'change', () => this.updateTSNELearningRateFromUIChange());
@@ -217,7 +217,7 @@ export class ProjectionsPanel extends ProjectionsPanelPolymer {
     this.computeAllCentroids();
 
     this.setZDropdownEnabled(this.pcaIs3d);
-    this.updateTSNEPerplexityFromUIChange();
+    this.updateTSNEPerplexityFromSliderChange();
     this.updateTSNELearningRateFromUIChange();
     if (this.iterationLabel) {
       this.iterationLabel.text(bookmark.tSNEIteration.toString());
@@ -284,6 +284,10 @@ export class ProjectionsPanel extends ProjectionsPanelPolymer {
     this.dataSet = dataSet;
     this.originalDataSet = originalDataSet;
     this.dim = dim;
+    let perplexity =
+        Math.max(5, Math.ceil(Math.sqrt(dataSet.points.length) / 4));
+    this.perplexitySlider.value = perplexity.toString();
+    this.updateTSNEPerplexityFromSliderChange();
     this.clearCentroids();
 
     this.dom.select('#tsne-sampling')
diff --git a/tensorflow/tensorboard/components/vz_projector/vz-projector.ts b/tensorflow/tensorboard/components/vz_projector/vz-projector.ts
index 8bef32fa4f5..277c5c8c358 100644
--- a/tensorflow/tensorboard/components/vz_projector/vz-projector.ts
+++ b/tensorflow/tensorboard/components/vz_projector/vz-projector.ts
@@ -48,7 +48,8 @@ export let ProjectorPolymer = PolymerElement({
   properties: {
     routePrefix: String,
     dataProto: {type: String, observer: '_dataProtoChanged'},
-    servingMode: String
+    servingMode: String,
+    projectorConfigJsonPath: String
   }
 });
 
@@ -59,6 +60,8 @@ export class Projector extends ProjectorPolymer implements SelectionContext,
   // The working subset of the data source's original data set.
   dataSet: DataSet;
   servingMode: ServingMode;
+  // The path to the projector config JSON file for demo mode.
+  projectorConfigJsonPath: string;
 
   private selectionChangedListeners: SelectionChangedListener[];
   private hoverListeners: HoverListener[];
@@ -247,7 +250,7 @@ export class Projector extends ProjectorPolymer implements SelectionContext,
 
   private initializeDataProvider(dataProto?: DataProto) {
     if (this.servingMode === 'demo') {
-      this.dataProvider = new DemoDataProvider();
+      this.dataProvider = new DemoDataProvider(this.projectorConfigJsonPath);
     } else if (this.servingMode === 'server') {
       if (!this.routePrefix) {
         throw 'route-prefix is a required parameter';
diff --git a/tensorflow/workspace.bzl b/tensorflow/workspace.bzl
index 25b9866be9c..0e80aabdd27 100644
--- a/tensorflow/workspace.bzl
+++ b/tensorflow/workspace.bzl
@@ -14,8 +14,8 @@ def tf_workspace(path_prefix = "", tf_repo_name = ""):
   # These lines need to be changed when updating Eigen. They are parsed from
   # this file by the cmake and make builds to determine the eigen version and
   # hash.
-  eigen_version = "1d454915237a"
-  eigen_sha256 = "7e05dd4b9866ef0aa4498be34752a362596cc5db2f8439cee111e4ea54046b57"
+  eigen_version = "22b492048b2f"
+  eigen_sha256 = "8b9bd14a037c1a3fe37dc5e4a71504ebe48148cf2498fd8eb6848165a7a0538f"
 
   native.new_http_archive(
     name = "eigen_archive",