diff --git a/tensorflow/contrib/framework/__init__.py b/tensorflow/contrib/framework/__init__.py index 28459fb0003..063717f08aa 100644 --- a/tensorflow/contrib/framework/__init__.py +++ b/tensorflow/contrib/framework/__init__.py @@ -124,7 +124,7 @@ from tensorflow.python.ops.init_ops import convolutional_orthogonal_2d from tensorflow.python.ops.init_ops import convolutional_orthogonal_3d from tensorflow.python.util.all_util import remove_undocumented -_allowed_symbols = ['nest', 'broadcast_to', 'where'] +_allowed_symbols = ['nest'] _nest_allowed_symbols = [ 'assert_same_structure', 'is_nested', diff --git a/tensorflow/core/api_def/base_api/api_def_SelectV2.pbtxt b/tensorflow/core/api_def/base_api/api_def_SelectV2.pbtxt deleted file mode 100644 index e567206d913..00000000000 --- a/tensorflow/core/api_def/base_api/api_def_SelectV2.pbtxt +++ /dev/null @@ -1,3 +0,0 @@ -op { - graph_op_name: "SelectV2" -} diff --git a/tensorflow/core/api_def/python_api/api_def_SelectV2.pbtxt b/tensorflow/core/api_def/python_api/api_def_SelectV2.pbtxt deleted file mode 100644 index bf57ed7164d..00000000000 --- a/tensorflow/core/api_def/python_api/api_def_SelectV2.pbtxt +++ /dev/null @@ -1,4 +0,0 @@ -op { - graph_op_name: "SelectV2" - visibility: HIDDEN -} diff --git a/tensorflow/core/kernels/cwise_op_gpu_select.cu.cc b/tensorflow/core/kernels/cwise_op_gpu_select.cu.cc index ba872db2172..303d8e47913 100644 --- a/tensorflow/core/kernels/cwise_op_gpu_select.cu.cc +++ b/tensorflow/core/kernels/cwise_op_gpu_select.cu.cc @@ -23,22 +23,6 @@ limitations under the License. namespace tensorflow { namespace functor { -template -struct BCastSelectFunctor { - void operator()(const GPUDevice& d, - typename TTypes::Tensor output_tensor, - typename TTypes::ConstTensor cond_tensor, - typename TTypes::ConstTensor then_tensor, - typename TTypes::ConstTensor else_tensor, - typename Eigen::array cond_bcast, - typename Eigen::array then_bcast, - typename Eigen::array else_bcast) { - output_tensor.device(d) = cond_tensor.broadcast(cond_bcast) - .select(then_tensor.broadcast(then_bcast), - else_tensor.broadcast(else_bcast)); - } -}; - template struct SelectFunctor { void operator()(const GPUDevice& d, typename TTypes::Flat out, @@ -105,15 +89,10 @@ struct BatchSelectFunctor { } }; -#define SELECT_FUNCTOR(T) \ - template struct SelectFunctor; \ - template struct SelectScalarFunctor; \ - template struct BatchSelectFunctor; \ - template struct BCastSelectFunctor; \ - template struct BCastSelectFunctor; \ - template struct BCastSelectFunctor; \ - template struct BCastSelectFunctor; \ - template struct BCastSelectFunctor; +#define SELECT_FUNCTOR(T) \ + template struct SelectFunctor; \ + template struct SelectScalarFunctor; \ + template struct BatchSelectFunctor; SELECT_FUNCTOR(bool); SELECT_FUNCTOR(Eigen::half); diff --git a/tensorflow/core/kernels/cwise_op_select.cc b/tensorflow/core/kernels/cwise_op_select.cc index c85c9d0599f..3b51563ca28 100644 --- a/tensorflow/core/kernels/cwise_op_select.cc +++ b/tensorflow/core/kernels/cwise_op_select.cc @@ -143,141 +143,21 @@ class SelectOp : public OpKernel { private: TF_DISALLOW_COPY_AND_ASSIGN(SelectOp); }; -template -class SelectV2Op : public OpKernel { - public: - explicit SelectV2Op(OpKernelConstruction* context) : OpKernel(context) {} - void Compute(OpKernelContext* ctx) override { - const Tensor* cond; - const Tensor* then; - const Tensor* else_; - OP_REQUIRES_OK(ctx, ctx->input("condition", &cond)); - OP_REQUIRES_OK(ctx, ctx->input("t", &then)); - OP_REQUIRES_OK(ctx, ctx->input("e", &else_)); - - // The `cond`, `then`, and `else` are broadcastable (bcast.IsValid()), - // This matches the behavior of numpy. - // TODO (yongtang): Consolidate into n-ary broadcast, instead of multiple - // 2-ary broadcast. - - // Combine `then` and `else`. - BCast then_else_bcast(BCast::FromShape(then->shape()), - BCast::FromShape(else_->shape()), false); - OP_REQUIRES(ctx, then_else_bcast.IsValid(), - errors::InvalidArgument( - "then ", then->shape().DebugString(), " and else ", - else_->shape().DebugString(), " must be broadcastable")); - // Combine `cond` with `then` and `else`. - BCast bcast( - BCast::FromShape(cond->shape()), - BCast::FromShape(BCast::ToShape(then_else_bcast.output_shape())), - false); - OP_REQUIRES(ctx, bcast.IsValid(), - errors::InvalidArgument( - "condition ", cond->shape().DebugString(), ", then ", - then->shape().DebugString(), ", and else ", - else_->shape().DebugString(), " must be broadcastable")); - - // Broadcast `cond`, `then` and `else` to combined shape, - // in order to obtain the reshape. - BCast cond_bcast(BCast::FromShape(BCast::ToShape(bcast.output_shape())), - BCast::FromShape(cond->shape()), false); - BCast then_bcast(BCast::FromShape(BCast::ToShape(bcast.output_shape())), - BCast::FromShape(then->shape()), false); - BCast else_bcast(BCast::FromShape(BCast::ToShape(bcast.output_shape())), - BCast::FromShape(else_->shape()), false); - OP_REQUIRES( - ctx, - cond_bcast.IsValid() && then_bcast.IsValid() && else_bcast.IsValid(), - errors::InvalidArgument("condition ", cond->shape().DebugString(), - ", then ", then->shape().DebugString(), - ", and else ", else_->shape().DebugString(), - " must be broadcastable")); - - // Combined shape should be the final shape. - OP_REQUIRES( - ctx, - cond_bcast.output_shape() == bcast.output_shape() && - then_bcast.output_shape() == bcast.output_shape() && - else_bcast.output_shape() == bcast.output_shape(), - errors::InvalidArgument("condition ", cond->shape().DebugString(), - ", then ", then->shape().DebugString(), - ", and else ", else_->shape().DebugString(), - " must be broadcastable to the same shape")); - - Tensor* output = nullptr; - const TensorShape output_shape = BCast::ToShape(bcast.output_shape()); - OP_REQUIRES_OK(ctx, ctx->forward_input_or_allocate_output( - {"t", "e"}, "output", output_shape, &output)); - - if (output->NumElements() == 0) { - return; - } - -#define HANDLE_DIM(NDIMS) \ - { \ - functor::BCastSelectFunctor func; \ - func(ctx->eigen_device(), \ - output->shaped(bcast.result_shape()), \ - cond->template shaped(cond_bcast.y_reshape()), \ - then->template shaped(then_bcast.y_reshape()), \ - else_->template shaped(else_bcast.y_reshape()), \ - BCast::ToIndexArray(cond_bcast.y_bcast()), \ - BCast::ToIndexArray(then_bcast.y_bcast()), \ - BCast::ToIndexArray(else_bcast.y_bcast())); \ - } - - const int ndims = static_cast(bcast.result_shape().size()); - switch (ndims) { - case 1: - HANDLE_DIM(1); - break; - case 2: - HANDLE_DIM(2); - break; - case 3: - HANDLE_DIM(3); - break; - case 4: - HANDLE_DIM(4); - break; - case 5: - HANDLE_DIM(5); - break; - default: - ctx->SetStatus(errors::Unimplemented( - "Broadcast between ", ctx->input(0).shape().DebugString(), " and ", - ctx->input(1).shape().DebugString(), " is not supported yet.")); - break; - } - return; - } - - private: - TF_DISALLOW_COPY_AND_ASSIGN(SelectV2Op); -}; - -#define REGISTER_SELECT(type) \ - REGISTER_KERNEL_BUILDER( \ - Name("Select").Device(DEVICE_CPU).TypeConstraint("T"), \ - SelectOp); \ - REGISTER_KERNEL_BUILDER( \ - Name("SelectV2").Device(DEVICE_CPU).TypeConstraint("T"), \ - SelectV2Op); +#define REGISTER_SELECT(type) \ + REGISTER_KERNEL_BUILDER( \ + Name("Select").Device(DEVICE_CPU).TypeConstraint("T"), \ + SelectOp); TF_CALL_ALL_TYPES(REGISTER_SELECT); #if GOOGLE_CUDA // Registration of the GPU implementations. -#define REGISTER_SELECT_GPU(type) \ - REGISTER_KERNEL_BUILDER( \ - Name("Select").Device(DEVICE_GPU).TypeConstraint("T"), \ - SelectOp); \ - REGISTER_KERNEL_BUILDER( \ - Name("SelectV2").Device(DEVICE_GPU).TypeConstraint("T"), \ - SelectV2Op); +#define REGISTER_SELECT_GPU(type) \ + REGISTER_KERNEL_BUILDER( \ + Name("Select").Device(DEVICE_GPU).TypeConstraint("T"), \ + SelectOp); REGISTER_SELECT_GPU(bool); REGISTER_SELECT_GPU(Eigen::half); @@ -294,12 +174,9 @@ REGISTER_SELECT_GPU(complex128); #ifdef TENSORFLOW_USE_SYCL // Registration of the SYCL implementations. -#define REGISTER_SELECT_SYCL(type) \ - REGISTER_KERNEL_BUILDER( \ - Name("Select").Device(DEVICE_SYCL).TypeConstraint("T"), \ - SelectOp); \ - REGISTER_KERNEL_BUILDER( \ - Name("SelectV2").Device(DEVICE_SYCL).TypeConstraint("T"), \ +#define REGISTER_SELECT_SYCL(type) \ + REGISTER_KERNEL_BUILDER( \ + Name("Select").Device(DEVICE_SYCL).TypeConstraint("T"), \ SelectOp); REGISTER_SELECT_SYCL(float); @@ -447,35 +324,10 @@ struct BatchSelectFunctor { } }; -template -struct BCastSelectFunctorBase { - void operator()(const Device& d, - typename TTypes::Tensor output_tensor, - typename TTypes::ConstTensor cond_tensor, - typename TTypes::ConstTensor then_tensor, - typename TTypes::ConstTensor else_tensor, - typename Eigen::array cond_bcast, - typename Eigen::array then_bcast, - typename Eigen::array else_bcast) { - output_tensor.device(d) = cond_tensor.broadcast(cond_bcast) - .select(then_tensor.broadcast(then_bcast), - else_tensor.broadcast(else_bcast)); - } -}; - -template -struct BCastSelectFunctor - : BCastSelectFunctorBase {}; - #ifdef TENSORFLOW_USE_SYCL template struct BatchSelectFunctor : BatchSelectFunctorBase {}; - -template -struct BCastSelectFunctor - : BCastSelectFunctorBase {}; - #endif // TENSORFLOW_USE_SYCL } // namespace functor diff --git a/tensorflow/core/kernels/cwise_ops.h b/tensorflow/core/kernels/cwise_ops.h index cc8eba69555..898147b3661 100644 --- a/tensorflow/core/kernels/cwise_ops.h +++ b/tensorflow/core/kernels/cwise_ops.h @@ -1208,18 +1208,6 @@ struct BatchSelectFunctor { typename TTypes::ConstMatrix else_flat_outer_dims); }; -template -struct BCastSelectFunctor { - void operator()(const Device& d, - typename TTypes::Tensor output_tensor, - typename TTypes::ConstTensor cond_tensor, - typename TTypes::ConstTensor then_tensor, - typename TTypes::ConstTensor else_tensor, - typename Eigen::array cond_bcast, - typename Eigen::array then_bcast, - typename Eigen::array else_bcast); -}; - } // end namespace functor } // end namespace tensorflow diff --git a/tensorflow/core/ops/math_ops.cc b/tensorflow/core/ops/math_ops.cc index 3ff9bc09853..29a93753e7d 100644 --- a/tensorflow/core/ops/math_ops.cc +++ b/tensorflow/core/ops/math_ops.cc @@ -828,57 +828,6 @@ REGISTER_OP("Select") return Status::OK(); }); -REGISTER_OP("SelectV2") - .Input("condition: bool") - .Input("t: T") - .Input("e: T") - .Output("output: T") - .Attr("T: type") - .SetShapeFn([](InferenceContext* c) { - auto* handle_data_1 = c->input_handle_shapes_and_types(1); - auto* handle_data_2 = c->input_handle_shapes_and_types(2); - // Merge handle shape and dtype if applicable. - if (handle_data_1 != nullptr && handle_data_2 != nullptr) { - const auto size = handle_data_1->size(); - std::vector merged_handle_data(size); - if (size != handle_data_2->size()) { - return errors::InvalidArgument( - "Trying to merge handles pointing to different numbers of " - "tensors."); - } - - for (int i = 0; i < size; ++i) { - const shape_inference::ShapeAndType& s1 = (*handle_data_1)[i]; - const shape_inference::ShapeAndType& s2 = (*handle_data_2)[i]; - if (s1.dtype != s2.dtype) { - // TODO(apassos) resolve this in the manner of b/32476923 - return errors::InvalidArgument( - "Trying to merge handles pointing to different dtypes."); - } - merged_handle_data[i].dtype = s1.dtype; - TF_RETURN_IF_ERROR( - c->Merge(s1.shape, s2.shape, &merged_handle_data[i].shape)); - } - - c->set_output_handle_shapes_and_types(0, merged_handle_data); - } - - // The inputs 'cond', 'then', and 'else' must be broadcastable. - // TODO (yongtang): Consolidate 3-ary broadcast instead of - // multiple 2-ary broadcast. - ShapeHandle cond = c->input(0); - ShapeHandle then = c->input(1); - ShapeHandle else_ = c->input(2); - ShapeHandle other; - TF_RETURN_IF_ERROR( - BroadcastBinaryOpOutputShapeFnHelper(c, then, else_, &other)); - ShapeHandle output; - TF_RETURN_IF_ERROR( - BroadcastBinaryOpOutputShapeFnHelper(c, cond, other, &output)); - c->set_output(0, output); - return Status::OK(); - }); - // -------------------------------------------------------------------------- REGISTER_OP("MatMul") diff --git a/tensorflow/python/kernel_tests/where_op_test.py b/tensorflow/python/kernel_tests/where_op_test.py index 00d916c06eb..56c13904113 100644 --- a/tensorflow/python/kernel_tests/where_op_test.py +++ b/tensorflow/python/kernel_tests/where_op_test.py @@ -37,10 +37,10 @@ from tensorflow.python.platform import test class WhereOpTest(test.TestCase): - def _testWhere(self, x, truth, expected_err_re=None, fn=array_ops.where): + def _testWhere(self, x, truth, expected_err_re=None): with self.cached_session(use_gpu=True): - ans = fn(x) - self.assertTrue(ans.get_shape().is_compatible_with([None, x.ndim])) + ans = array_ops.where(x) + self.assertEqual([None, x.ndim], ans.get_shape().as_list()) if expected_err_re is None: tf_ans = self.evaluate(ans) self.assertAllClose(tf_ans, truth, atol=1e-10) @@ -48,40 +48,44 @@ class WhereOpTest(test.TestCase): with self.assertRaisesOpError(expected_err_re): self.evaluate(ans) - def _testWrongNumbers(self, fn=array_ops.where): + def testWrongNumbers(self): with self.session(use_gpu=True): with self.assertRaises(ValueError): - fn([False, True], [1, 2], None) + array_ops.where([False, True], [1, 2], None) with self.assertRaises(ValueError): - fn([False, True], None, [1, 2]) + array_ops.where([False, True], None, [1, 2]) - def _testBasicVec(self, fn=array_ops.where): + @test_util.run_deprecated_v1 + def testBasicVec(self): x = np.asarray([True, False]) truth = np.asarray([[0]], dtype=np.int64) - self._testWhere(x, truth, None, fn) + self._testWhere(x, truth) x = np.asarray([False, True, False]) truth = np.asarray([[1]], dtype=np.int64) - self._testWhere(x, truth, None, fn) + self._testWhere(x, truth) x = np.asarray([False, False, True, False, True]) truth = np.asarray([[2], [4]], dtype=np.int64) - self._testWhere(x, truth, None, fn) + self._testWhere(x, truth) - def _testRandomVec(self, fn=array_ops.where): + @test_util.run_deprecated_v1 + def testRandomVec(self): x = np.random.rand(1000000) > 0.5 truth = np.vstack([np.where(x)[0].astype(np.int64)]).T - self._testWhere(x, truth, None, fn) + self._testWhere(x, truth) - def _testBasicMat(self, fn=array_ops.where): + @test_util.run_deprecated_v1 + def testBasicMat(self): x = np.asarray([[True, False], [True, False]]) # Ensure RowMajor mode truth = np.asarray([[0, 0], [1, 0]], dtype=np.int64) - self._testWhere(x, truth, None, fn) + self._testWhere(x, truth) - def _testBasic3Tensor(self, fn=array_ops.where): + @test_util.run_deprecated_v1 + def testBasic3Tensor(self): x = np.asarray([[[True, False], [True, False]], [[False, True], [False, True]], [[False, False], [False, True]]]) @@ -90,41 +94,15 @@ class WhereOpTest(test.TestCase): truth = np.asarray( [[0, 0, 0], [0, 1, 0], [1, 0, 1], [1, 1, 1], [2, 1, 1]], dtype=np.int64) - self._testWhere(x, truth, None, fn) + self._testWhere(x, truth) - def _testRandom(self, dtype, expected_err_re=None, fn=array_ops.where): + def _testRandom(self, dtype, expected_err_re=None): shape = [127, 33, 53] x = np.random.randn(*shape) + 1j * np.random.randn(*shape) x = (np.random.randn(*shape) > 0).astype(dtype) truth = np.where(np.abs(x) > 0) # Tuples of indices by axis. truth = np.vstack(truth).T # Convert to [num_true, indices]. - self._testWhere(x, truth, expected_err_re, fn) - - def _testThreeArgument(self, fn=array_ops.where): - x = np.array([[-2, 3, -1], [1, -3, -3]]) - np_val = np.where(x > 0, x * x, -x) - with self.test_session(use_gpu=True): - tf_val = self.evaluate(fn(constant_op.constant(x) > 0, x * x, -x)) - self.assertAllEqual(tf_val, np_val) - - def testWrongNumbers(self): - self._testWrongNumbers() - - @test_util.run_deprecated_v1 - def testBasicVec(self): - self._testBasicVec() - - @test_util.run_deprecated_v1 - def testRandomVec(self): - self._testRandomVec() - - @test_util.run_deprecated_v1 - def testBasicMat(self): - self._testBasicMat() - - @test_util.run_deprecated_v1 - def testBasic3Tensor(self): - self._testBasic3Tensor() + self._testWhere(x, truth, expected_err_re) @test_util.run_deprecated_v1 def testRandomBool(self): @@ -168,95 +146,12 @@ class WhereOpTest(test.TestCase): @test_util.run_deprecated_v1 def testThreeArgument(self): - self._testThreeArgument() - - def testV2WrongNumbers(self): - self._testWrongNumbers(array_ops.where_v2) - - def testV2BasicVec(self): - self._testBasicVec(array_ops.where_v2) - - def testV2RandomVec(self): - self._testRandomVec(array_ops.where_v2) - - def testV2BasicMat(self): - self._testBasicMat(array_ops.where_v2) - - def testV2Basic3Tensor(self): - self._testBasic3Tensor(array_ops.where_v2) - - def testV2RandomBool(self): - self._testRandom(np.bool, None, array_ops.where_v2) - - def testV2RandomInt32(self): - self._testRandom(np.int32, None, array_ops.where_v2) - - def testV2RandomInt64(self): - self._testRandom(np.int64, None, array_ops.where_v2) - - def testV2RandomFloat(self): - self._testRandom(np.float32, None, array_ops.where_v2) - - def testV2RandomDouble(self): - self._testRandom(np.float64, None, array_ops.where_v2) - - def testV2RandomComplex64(self): - self._testRandom(np.complex64, None, array_ops.where_v2) - - def testV2RandomComplex128(self): - self._testRandom(np.complex128, None, array_ops.where_v2) - - def testV2RandomUint8(self): - self._testRandom(np.uint8, None, array_ops.where_v2) - - def testV2RandomInt8(self): - self._testRandom(np.int8, None, array_ops.where_v2) - - def testV2RandomInt16(self): - self._testRandom(np.int16, None, array_ops.where_v2) - - def testV2ThreeArgument(self): - self._testThreeArgument(array_ops.where_v2) - - def testV2Broadcasting(self): - f = np.random.normal(0, 1, (3, 5, 1, 1)) - x = np.zeros((7, 11)) - y = np.ones((7, 11)) - np_val = np.where(f < 0, x, y) - with self.test_session(use_gpu=True): - tf_val = self.evaluate( - array_ops.where_v2(constant_op.constant(f) < 0, x, y)) + x = np.array([[-2, 3, -1], [1, -3, -3]]) + np_val = np.where(x > 0, x * x, -x) + with self.session(use_gpu=True): + tf_val = array_ops.where(constant_op.constant(x) > 0, x * x, -x).eval() self.assertAllEqual(tf_val, np_val) - def testV2ScalarBroadcasting(self): - x = np.zeros((7, 11)) - y = np.ones((7, 11)) - np_val = np.where(True, x, y) - with self.test_session(use_gpu=True): - tf_val = self.evaluate( - array_ops.where_v2( - constant_op.constant(True, dtype=dtypes.bool), x, y)) - self.assertAllEqual(tf_val, np_val) - - def testV2VectorBroadcasting(self): - x = np.zeros(7) - y = np.ones(7) - np_val = np.where([True], x, y) - with self.test_session(use_gpu=True): - tf_val = self.evaluate( - array_ops.where_v2( - constant_op.constant([True], dtype=dtypes.bool), x, y)) - self.assertAllEqual(tf_val, np_val) - - def testV2PredBroadcasting(self): - pred = np.array([1, 0, 0]).reshape((3, 1)) - x = np.random.randn(3, 4) - y = np.random.randn(3, 4) - np_val = np.where(pred, x, y) - with self.test_session(use_gpu=True): - tf_val = self.evaluate(array_ops.where_v2(pred, x, y)) - self.assertAllClose(tf_val, np_val) - @test_util.run_deprecated_v1 def testBatchSelect(self): x = np.array([[-2, 3, -1] * 64, [1, -3, -3] * 64] * 8192) # [16384, 192] diff --git a/tensorflow/python/ops/array_ops.py b/tensorflow/python/ops/array_ops.py index 28c1cb3a772..11ef64d3a4f 100644 --- a/tensorflow/python/ops/array_ops.py +++ b/tensorflow/python/ops/array_ops.py @@ -3163,11 +3163,7 @@ def squeeze_v2(input, axis=None, name=None): return squeeze(input, axis, name) -@tf_export(v1=["where"]) -@deprecation.deprecated( - date=None, - instructions="Use tf.where in 2.0, " - "which has the same broadcast rule as np.where") +@tf_export("where") @dispatch.add_dispatch_support def where(condition, x=None, y=None, name=None): """Return the elements, either from `x` or `y`, depending on the `condition`. @@ -3221,48 +3217,6 @@ def where(condition, x=None, y=None, name=None): raise ValueError("x and y must both be non-None or both be None.") -@tf_export("where", v1=["where_v2"]) -def where_v2(condition, x=None, y=None, name=None): - """Return the elements, either from `x` or `y`, depending on the `condition`. - - If both `x` and `y` are None, then this operation returns the coordinates of - true elements of `condition`. The coordinates are returned in a 2-D tensor - where the first dimension (rows) represents the number of true elements, and - the second dimension (columns) represents the coordinates of the true - elements. Keep in mind, the shape of the output tensor can vary depending on - how many true values there are in input. Indices are output in row-major - order. - If both non-None, `condition`, `x` and `y` must be broadcastable to the same - shape. - The `condition` tensor acts as a mask that chooses, based on the value at each - element, whether the corresponding element / row in the output should be taken - from `x` (if true) or `y` (if false). - Args: - condition: A `Tensor` of type `bool` - x: A Tensor which is of the same type as `y`, and may be broadcastable with - `condition` and `y`. - y: A Tensor which is of the same type as `x`, and may be broadcastable with - `condition` and `x`. - name: A name of the operation (optional). - - Returns: - A `Tensor` with the same type as `x` and `y`, and shape that - is broadcasted from `condition`, `x`, and `y`, if `x`, `y` are non-None. - A `Tensor` with shape `(num_true, dim_size(condition))`. - Raises: - ValueError: When exactly one of `x` or `y` is non-None. - """ - if x is None and y is None: - with ops.name_scope(name, "Where", [condition]) as name: - condition = ops.convert_to_tensor( - condition, preferred_dtype=dtypes.bool, name="condition") - return gen_array_ops.where(condition=condition, name=name) - elif x is not None and y is not None: - return gen_math_ops.select_v2(condition=condition, t=x, e=y, name=name) - else: - raise ValueError("x and y must both be non-None or both be None.") - - # pylint: disable=redefined-builtin @tf_export(v1=["reverse_sequence"]) @deprecation.deprecated_args(None, diff --git a/tensorflow/tools/api/golden/v1/tensorflow.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.pbtxt index e65238eab40..a5c4fb03e26 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.pbtxt @@ -2424,10 +2424,6 @@ tf_module { name: "where" argspec: "args=[\'condition\', \'x\', \'y\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\'], " } - member_method { - name: "where_v2" - argspec: "args=[\'condition\', \'x\', \'y\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\'], " - } member_method { name: "while_loop" argspec: "args=[\'cond\', \'body\', \'loop_vars\', \'shape_invariants\', \'parallel_iterations\', \'back_prop\', \'swap_memory\', \'name\', \'maximum_iterations\', \'return_same_structure\'], varargs=None, keywords=None, defaults=[\'None\', \'10\', \'True\', \'False\', \'None\', \'None\', \'False\'], " diff --git a/tensorflow/tools/api/golden/v1/tensorflow.raw_ops.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.raw_ops.pbtxt index 5b71c0f9295..d2e12dc7156 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.raw_ops.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.raw_ops.pbtxt @@ -3348,10 +3348,6 @@ tf_module { name: "Select" argspec: "args=[\'condition\', \'x\', \'y\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], " } - member_method { - name: "SelectV2" - argspec: "args=[\'condition\', \'t\', \'e\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], " - } member_method { name: "SelfAdjointEig" argspec: "args=[\'input\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], " diff --git a/tensorflow/tools/api/golden/v2/tensorflow.raw_ops.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.raw_ops.pbtxt index 5b71c0f9295..d2e12dc7156 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.raw_ops.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.raw_ops.pbtxt @@ -3348,10 +3348,6 @@ tf_module { name: "Select" argspec: "args=[\'condition\', \'x\', \'y\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], " } - member_method { - name: "SelectV2" - argspec: "args=[\'condition\', \'t\', \'e\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], " - } member_method { name: "SelfAdjointEig" argspec: "args=[\'input\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], " diff --git a/tensorflow/tools/compatibility/renames_v2.py b/tensorflow/tools/compatibility/renames_v2.py index 3c9129b5b6e..201925889ef 100644 --- a/tensorflow/tools/compatibility/renames_v2.py +++ b/tensorflow/tools/compatibility/renames_v2.py @@ -1543,10 +1543,6 @@ renames = { 'tf.compat.v1.variables_initializer', 'tf.verify_tensor_all_finite': 'tf.compat.v1.verify_tensor_all_finite', - 'tf.where': - 'tf.compat.v1.where', - 'tf.where_v2': - 'tf.compat.v2.where', 'tf.wrap_function': 'tf.compat.v1.wrap_function', 'tf.write_file':