Deprecate C++ kernel for matrix exponential, which is now implemented as a python function.
PiperOrigin-RevId: 210180168
This commit is contained in:
parent
98884cf5cd
commit
829b6691f9
@ -1,32 +1,5 @@
|
||||
op {
|
||||
graph_op_name: "MatrixExponential"
|
||||
in_arg {
|
||||
name: "input"
|
||||
description: <<END
|
||||
Shape is `[..., M, M]`.
|
||||
END
|
||||
}
|
||||
out_arg {
|
||||
name: "output"
|
||||
description: <<END
|
||||
Shape is `[..., M, M]`.
|
||||
|
||||
@compatibility(scipy)
|
||||
Equivalent to scipy.linalg.expm
|
||||
@end_compatibility
|
||||
END
|
||||
}
|
||||
summary: "Computes the matrix exponential of one or more square matrices:"
|
||||
description: <<END
|
||||
\\(exp(A) = \sum_{n=0}^\infty A^n/n!\\)
|
||||
|
||||
The exponential is computed using a combination of the scaling and squaring
|
||||
method and the Pade approximation. Details can be founds in:
|
||||
Nicholas J. Higham, "The scaling and squaring method for the matrix exponential
|
||||
revisited," SIAM J. Matrix Anal. Applic., 26:1179-1193, 2005.
|
||||
|
||||
The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions
|
||||
form square matrices. The output is a tensor of the same shape as the input
|
||||
containing the exponential for all input submatrices `[..., :, :]`.
|
||||
END
|
||||
visibility: SKIP
|
||||
summary: "Deprecated, use python implementation tf.linalg.matrix_exponential."
|
||||
}
|
||||
|
@ -49,6 +49,7 @@ class MatrixExponentialOp : public LinearAlgebraOp<Scalar> {
|
||||
TF_DISALLOW_COPY_AND_ASSIGN(MatrixExponentialOp);
|
||||
};
|
||||
|
||||
// Deprecated kernels (2018/08/21).
|
||||
REGISTER_LINALG_OP("MatrixExponential", (MatrixExponentialOp<float>), float);
|
||||
REGISTER_LINALG_OP("MatrixExponential", (MatrixExponentialOp<double>), double);
|
||||
REGISTER_LINALG_OP("MatrixExponential", (MatrixExponentialOp<complex64>),
|
||||
|
@ -235,6 +235,8 @@ REGISTER_OP("MatrixInverse")
|
||||
.SetShapeFn(BatchUnchangedSquareShapeFn);
|
||||
|
||||
REGISTER_OP("MatrixExponential")
|
||||
.Deprecated(
|
||||
27, "Use Python implementation tf.linalg.matrix_exponential instead.")
|
||||
.Input("input: T")
|
||||
.Output("output: T")
|
||||
.Attr("T: {double, float, complex64, complex128}")
|
||||
|
@ -96,10 +96,12 @@ limitations under the License.
|
||||
// GraphDef. (7dec2017)
|
||||
// 27. Deprecate TensorArray ops v2 in favor of v3 and deprecated io_ops
|
||||
// deprecated in favor of V2 ops. (2018/01/23)
|
||||
// 28. Deprecate MatrixExponential op in favor of Python implementation.
|
||||
// (2018/08/21).
|
||||
|
||||
#define TF_GRAPH_DEF_VERSION_MIN_PRODUCER 0
|
||||
#define TF_GRAPH_DEF_VERSION_MIN_CONSUMER 0
|
||||
#define TF_GRAPH_DEF_VERSION 26
|
||||
#define TF_GRAPH_DEF_VERSION 27
|
||||
|
||||
// Checkpoint compatibility versions (the versions field in SavedSliceMeta).
|
||||
//
|
||||
|
@ -601,7 +601,7 @@ tf_py_test(
|
||||
|
||||
tf_py_test(
|
||||
name = "matrix_logarithm_op_test",
|
||||
size = "small",
|
||||
size = "medium",
|
||||
srcs = ["matrix_logarithm_op_test.py"],
|
||||
additional_deps = [
|
||||
"//third_party/py/numpy",
|
||||
|
@ -30,6 +30,7 @@ from tensorflow.python.ops import gen_linalg_ops
|
||||
from tensorflow.python.ops import math_ops
|
||||
from tensorflow.python.ops import random_ops
|
||||
from tensorflow.python.ops import variables
|
||||
from tensorflow.python.ops.linalg import linalg_impl
|
||||
from tensorflow.python.platform import test
|
||||
|
||||
|
||||
@ -39,7 +40,7 @@ class LogarithmOpTest(test.TestCase):
|
||||
inp = x.astype(np_type)
|
||||
with self.test_session(use_gpu=True):
|
||||
# Verify that expm(logm(A)) == A.
|
||||
tf_ans = gen_linalg_ops.matrix_exponential(
|
||||
tf_ans = linalg_impl.matrix_exponential(
|
||||
gen_linalg_ops.matrix_logarithm(inp))
|
||||
out = tf_ans.eval()
|
||||
self.assertAllClose(inp, out, rtol=1e-4, atol=1e-3)
|
||||
@ -98,16 +99,25 @@ class LogarithmOpTest(test.TestCase):
|
||||
self._verifyLogarithmComplex(np.empty([0, 2, 2], dtype=np.complex64))
|
||||
self._verifyLogarithmComplex(np.empty([2, 0, 0], dtype=np.complex64))
|
||||
|
||||
def testRandomSmallAndLarge(self):
|
||||
def testRandomSmallAndLargeComplex64(self):
|
||||
np.random.seed(42)
|
||||
for dtype in np.complex64, np.complex128:
|
||||
for batch_dims in [(), (1,), (3,), (2, 2)]:
|
||||
for size in 8, 31, 32:
|
||||
shape = batch_dims + (size, size)
|
||||
matrix = np.random.uniform(
|
||||
low=-1.0, high=1.0,
|
||||
size=np.prod(shape)).reshape(shape).astype(dtype)
|
||||
self._verifyLogarithmComplex(matrix)
|
||||
for batch_dims in [(), (1,), (3,), (2, 2)]:
|
||||
for size in 8, 31, 32:
|
||||
shape = batch_dims + (size, size)
|
||||
matrix = np.random.uniform(
|
||||
low=-1.0, high=1.0,
|
||||
size=np.prod(shape)).reshape(shape).astype(np.complex64)
|
||||
self._verifyLogarithmComplex(matrix)
|
||||
|
||||
def testRandomSmallAndLargeComplex128(self):
|
||||
np.random.seed(42)
|
||||
for batch_dims in [(), (1,), (3,), (2, 2)]:
|
||||
for size in 8, 31, 32:
|
||||
shape = batch_dims + (size, size)
|
||||
matrix = np.random.uniform(
|
||||
low=-1.0, high=1.0,
|
||||
size=np.prod(shape)).reshape(shape).astype(np.complex128)
|
||||
self._verifyLogarithmComplex(matrix)
|
||||
|
||||
def testConcurrentExecutesWithoutError(self):
|
||||
with self.test_session(use_gpu=True) as sess:
|
||||
|
Loading…
Reference in New Issue
Block a user