Move LinearOperator to tf.linalg (with backwards compatibility support in contrib.linalg.)

PiperOrigin-RevId: 171732711
This commit is contained in:
Eugene Brevdo 2017-10-10 14:16:21 -07:00 committed by TensorFlower Gardener
parent 3f4c6ccadf
commit 23418e4317
67 changed files with 1631 additions and 333 deletions

View File

@ -490,7 +490,9 @@ filegroup(
"//tensorflow/python/keras:all_files",
"//tensorflow/python/kernel_tests:all_files",
"//tensorflow/python/kernel_tests/distributions:all_files",
"//tensorflow/python/kernel_tests/linalg:all_files",
"//tensorflow/python/ops/distributions:all_files",
"//tensorflow/python/ops/linalg:all_files",
"//tensorflow/python/profiler:all_files",
"//tensorflow/python/profiler/internal:all_files",
"//tensorflow/python/saved_model:all_files",

View File

@ -266,12 +266,14 @@ add_python_module("tensorflow/python/keras/_impl/keras/utils")
add_python_module("tensorflow/python/keras/_impl/keras/wrappers")
add_python_module("tensorflow/python/kernel_tests")
add_python_module("tensorflow/python/kernel_tests/distributions")
add_python_module("tensorflow/python/kernel_tests/linalg")
add_python_module("tensorflow/python/layers")
add_python_module("tensorflow/python/lib")
add_python_module("tensorflow/python/lib/core")
add_python_module("tensorflow/python/lib/io")
add_python_module("tensorflow/python/ops")
add_python_module("tensorflow/python/ops/distributions")
add_python_module("tensorflow/python/ops/linalg")
add_python_module("tensorflow/python/ops/losses")
add_python_module("tensorflow/python/platform")
add_python_module("tensorflow/python/platform/default")

View File

@ -72,7 +72,7 @@ class AffineLinearOperatorTest(test.TestCase):
[3, -2, 0],
[4, 3, 2]]],
dtype=np.float32)
scale = linalg.LinearOperatorTriL(tril, is_non_singular=True)
scale = linalg.LinearOperatorLowerTriangular(tril, is_non_singular=True)
affine = AffineLinearOperator(
shift=shift, scale=scale, validate_args=True)

View File

@ -23,11 +23,11 @@ import itertools
import numpy as np
from tensorflow.contrib.distributions.python.ops import distribution_util
from tensorflow.contrib.linalg.python.ops import linear_operator_diag
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops.linalg import linear_operator_diag
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.platform import test

View File

@ -22,9 +22,9 @@ import numpy as np
from tensorflow.contrib.distributions.python.ops import test_util
from tensorflow.contrib.distributions.python.ops import vector_diffeomixture as vector_diffeomixture_lib
from tensorflow.contrib.linalg.python.ops import linear_operator_diag as linop_diag_lib
from tensorflow.contrib.linalg.python.ops import linear_operator_identity as linop_identity_lib
from tensorflow.python.ops.distributions import normal as normal_lib
from tensorflow.python.ops.linalg import linear_operator_diag as linop_diag_lib
from tensorflow.python.ops.linalg import linear_operator_identity as linop_identity_lib
from tensorflow.python.platform import test

View File

@ -326,7 +326,7 @@ class Affine(bijector.Bijector):
shape_hint=shape_hint)
if perturb_factor is not None:
return linalg.LinearOperatorUDVHUpdate(
return linalg.LinearOperatorLowRankUpdate(
scale,
u=perturb_factor,
diag_update=perturb_diag,

View File

@ -19,7 +19,6 @@ from __future__ import division
from __future__ import print_function
from tensorflow.contrib.distributions.python.ops.shape import _DistributionShape
from tensorflow.contrib.linalg.python.ops import linear_operator
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
@ -27,6 +26,7 @@ from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops.distributions import bijector
from tensorflow.python.ops.linalg import linear_operator
__all__ = [
@ -66,7 +66,7 @@ class AffineLinearOperator(bijector.Bijector):
Example Use:
```python
linalg = tf.contrib.linalg
linalg = tf.linalg
x = [1., 2, 3]
@ -82,7 +82,7 @@ class AffineLinearOperator(bijector.Bijector):
tril = [[1., 0, 0],
[2, 1, 0],
[3, 2, 1]]
scale = linalg.LinearOperatorTriL(tril)
scale = linalg.LinearOperatorLowerTriangular(tril)
affine = AffineLinearOperator(shift, scale)
# In this case, `forward` is equivalent to:
# np.squeeze(np.matmul(tril, np.expand_dims(x, -1)), -1) + shift

View File

@ -160,7 +160,7 @@ def make_tril_scale(
scale_tril = array_ops.matrix_set_diag(scale_tril, tril_diag)
return linalg.LinearOperatorTriL(
return linalg.LinearOperatorLowerTriangular(
tril=_maybe_attach_assertion(scale_tril),
is_non_singular=True,
is_self_adjoint=False,

View File

@ -237,7 +237,7 @@ class MultivariateNormalDiagPlusLowRank(
scale_perturb_diag,
name="scale_perturb_diag")
if has_low_rank:
scale = linalg.LinearOperatorUDVHUpdate(
scale = linalg.LinearOperatorLowRankUpdate(
scale,
u=scale_perturb_factor,
diag_update=scale_perturb_diag,

View File

@ -174,8 +174,8 @@ class MultivariateNormalFullCovariance(mvn_tril.MultivariateNormalTriL):
covariance_matrix = control_flow_ops.with_dependencies(
[assert_symmetric], covariance_matrix)
# No need to validate that covariance_matrix is non-singular.
# LinearOperatorTriL has an assert_non_singular method that is called
# by the Bijector.
# LinearOperatorLowerTriangular has an assert_non_singular method that
# is called by the Bijector.
# However, cholesky() ignores the upper triangular part, so we do need
# to separately assert symmetric.
scale_tril = linalg_ops.cholesky(covariance_matrix)

View File

@ -18,7 +18,6 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib import linalg
from tensorflow.contrib.distributions.python.ops import distribution_util
from tensorflow.contrib.distributions.python.ops.bijectors import AffineLinearOperator
from tensorflow.python.framework import ops
@ -28,6 +27,7 @@ from tensorflow.python.ops import math_ops
from tensorflow.python.ops.distributions import kullback_leibler
from tensorflow.python.ops.distributions import normal
from tensorflow.python.ops.distributions import transformed_distribution
from tensorflow.python.ops.linalg import linalg
__all__ = [
@ -92,7 +92,7 @@ class MultivariateNormalLinearOperator(
```python
ds = tf.contrib.distributions
la = tf.contrib.linalg
la = tf.linalg
# Initialize a single 3-variate Gaussian.
mu = [1., 2, 3]
@ -106,7 +106,7 @@ class MultivariateNormalLinearOperator(
mvn = ds.MultivariateNormalLinearOperator(
loc=mu,
scale=la.LinearOperatorTriL(scale))
scale=la.LinearOperatorLowerTriangular(scale))
# Covariance agrees with cholesky(cov) parameterization.
mvn.covariance().eval()
@ -243,8 +243,8 @@ class MultivariateNormalLinearOperator(
def _variance(self):
if distribution_util.is_diagonal_scale(self.scale):
return math_ops.square(self.scale.diag_part())
elif (isinstance(self.scale, linalg.LinearOperatorUDVHUpdate)
and self.scale.is_self_adjoint):
elif (isinstance(self.scale, linalg.LinearOperatorLowRankUpdate) and
self.scale.is_self_adjoint):
return array_ops.matrix_diag_part(
self.scale.matmul(self.scale.to_dense()))
else:
@ -254,8 +254,8 @@ class MultivariateNormalLinearOperator(
def _stddev(self):
if distribution_util.is_diagonal_scale(self.scale):
return math_ops.abs(self.scale.diag_part())
elif (isinstance(self.scale, linalg.LinearOperatorUDVHUpdate)
and self.scale.is_self_adjoint):
elif (isinstance(self.scale, linalg.LinearOperatorLowRankUpdate) and
self.scale.is_self_adjoint):
return math_ops.sqrt(array_ops.matrix_diag_part(
self.scale.matmul(self.scale.to_dense())))
else:

View File

@ -188,9 +188,9 @@ class MultivariateNormalTriL(
assert_proper_shapes=validate_args)
else:
# No need to validate that scale_tril is non-singular.
# LinearOperatorTriL has an assert_non_singular method that is called
# by the Bijector.
scale = linalg.LinearOperatorTriL(
# LinearOperatorLowerTriangular has an assert_non_singular
# method that is called by the Bijector.
scale = linalg.LinearOperatorLowerTriangular(
scale_tril,
is_non_singular=True,
is_self_adjoint=False,

View File

@ -23,10 +23,6 @@ import numpy as np
from tensorflow.contrib.distributions.python.ops import distribution_util
from tensorflow.contrib.distributions.python.ops.bijectors.affine_linear_operator import AffineLinearOperator
from tensorflow.contrib.linalg.python.ops import linear_operator_addition as linop_add_lib
from tensorflow.contrib.linalg.python.ops import linear_operator_diag as linop_diag_lib
from tensorflow.contrib.linalg.python.ops import linear_operator_full_matrix as linop_full_lib
from tensorflow.contrib.linalg.python.ops import linear_operator_identity as linop_identity_lib
from tensorflow.contrib.linalg.python.ops import linear_operator_tril as linop_tril_lib
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
@ -37,6 +33,10 @@ from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.distributions import categorical as categorical_lib
from tensorflow.python.ops.distributions import distribution as distribution_lib
from tensorflow.python.ops.linalg import linear_operator_diag as linop_diag_lib
from tensorflow.python.ops.linalg import linear_operator_full_matrix as linop_full_lib
from tensorflow.python.ops.linalg import linear_operator_identity as linop_identity_lib
from tensorflow.python.ops.linalg import linear_operator_lower_triangular as linop_tril_lib
static_value = distribution_util.static_value
@ -185,7 +185,7 @@ class VectorDiffeomixture(distribution_lib.Distribution):
```python
ds = tf.contrib.distributions
la = tf.contrib.linalg
la = tf.linalg
# Create two batches of VectorDiffeomixtures, one with mix_loc=[0.] and
# another with mix_loc=[1]. In both cases, `K=2` and the affine
@ -772,8 +772,8 @@ def linop_scale(w, op):
is_non_singular=op.is_non_singular,
is_self_adjoint=op.is_self_adjoint,
is_positive_definite=op.is_positive_definite)
if isinstance(op, linop_tril_lib.LinearOperatorTriL):
return linop_tril_lib.LinearOperatorTriL(
if isinstance(op, linop_tril_lib.LinearOperatorLowerTriangular):
return linop_tril_lib.LinearOperatorLowerTriangular(
tril=w[..., array_ops.newaxis, array_ops.newaxis] * op.to_dense(),
is_non_singular=op.is_non_singular,
is_self_adjoint=op.is_self_adjoint,

View File

@ -90,7 +90,7 @@ class VectorExponentialDiag(
```python
ds = tf.contrib.distributions
la = tf.contrib.linalg
la = tf.linalg
# Initialize a single 2-variate VectorExponential, supported on
# {(x, y) in R^2 : x > 0, y > 0}.

View File

@ -18,7 +18,6 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib import linalg
from tensorflow.contrib.distributions.python.ops import bijectors
from tensorflow.contrib.distributions.python.ops import distribution_util
from tensorflow.python.framework import ops
@ -26,6 +25,7 @@ from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.distributions import exponential
from tensorflow.python.ops.distributions import transformed_distribution
from tensorflow.python.ops.linalg import linalg
__all__ = ["VectorExponentialLinearOperator"]
@ -108,7 +108,7 @@ class VectorExponentialLinearOperator(
```python
ds = tf.contrib.distributions
la = tf.contrib.linalg
la = tf.linalg
# Initialize a single 2-variate VectorExponential, supported on
# {(x, y) in R^2 : x > 0, y > 0}.
@ -247,7 +247,7 @@ class VectorExponentialLinearOperator(
def _variance(self):
if distribution_util.is_diagonal_scale(self.scale):
return math_ops.square(self.scale.diag_part())
elif (isinstance(self.scale, linalg.LinearOperatorUDVHUpdate) and
elif (isinstance(self.scale, linalg.LinearOperatorLowRankUpdate) and
self.scale.is_self_adjoint):
return array_ops.matrix_diag_part(
self.scale.matmul(self.scale.to_dense()))
@ -258,7 +258,7 @@ class VectorExponentialLinearOperator(
def _stddev(self):
if distribution_util.is_diagonal_scale(self.scale):
return math_ops.abs(self.scale.diag_part())
elif (isinstance(self.scale, linalg.LinearOperatorUDVHUpdate) and
elif (isinstance(self.scale, linalg.LinearOperatorLowRankUpdate) and
self.scale.is_self_adjoint):
return math_ops.sqrt(
array_ops.matrix_diag_part(self.scale.matmul(self.scale.to_dense())))

View File

@ -20,7 +20,6 @@ from __future__ import print_function
import numpy as np
from tensorflow.contrib import linalg
from tensorflow.contrib.distributions.python.ops import bijectors
from tensorflow.contrib.distributions.python.ops import distribution_util
from tensorflow.python.framework import ops
@ -28,6 +27,7 @@ from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.distributions import laplace
from tensorflow.python.ops.distributions import transformed_distribution
from tensorflow.python.ops.linalg import linalg
__all__ = [
@ -110,7 +110,7 @@ class VectorLaplaceLinearOperator(
```python
ds = tf.contrib.distributions
la = tf.contrib.linalg
la = tf.linalg
# Initialize a single 3-variate VectorLaplace with some desired covariance.
mu = [1., 2, 3]
@ -126,7 +126,7 @@ class VectorLaplaceLinearOperator(
# Divide scale by sqrt(2) so that the final covariance will be what we want.
vla = ds.VectorLaplaceLinearOperator(
loc=mu,
scale=la.LinearOperatorTriL(scale / tf.sqrt(2)))
scale=la.LinearOperatorLowerTriangular(scale / tf.sqrt(2)))
# Covariance agrees with cholesky(cov) parameterization.
vla.covariance().eval()
@ -271,8 +271,8 @@ class VectorLaplaceLinearOperator(
def _variance(self):
if distribution_util.is_diagonal_scale(self.scale):
return 2. * math_ops.square(self.scale.diag_part())
elif (isinstance(self.scale, linalg.LinearOperatorUDVHUpdate)
and self.scale.is_self_adjoint):
elif (isinstance(self.scale, linalg.LinearOperatorLowRankUpdate) and
self.scale.is_self_adjoint):
return array_ops.matrix_diag_part(
2. * self.scale.matmul(self.scale.to_dense()))
else:
@ -282,8 +282,8 @@ class VectorLaplaceLinearOperator(
def _stddev(self):
if distribution_util.is_diagonal_scale(self.scale):
return np.sqrt(2) * math_ops.abs(self.scale.diag_part())
elif (isinstance(self.scale, linalg.LinearOperatorUDVHUpdate)
and self.scale.is_self_adjoint):
elif (isinstance(self.scale, linalg.LinearOperatorLowRankUpdate) and
self.scale.is_self_adjoint):
return np.sqrt(2) * math_ops.sqrt(array_ops.matrix_diag_part(
self.scale.matmul(self.scale.to_dense())))
else:

View File

@ -251,8 +251,8 @@ class _WishartLinearOperator(distribution.Distribution):
# Complexity: O(nbM) where M is the complexity of the operator solving a
# vector system. E.g., for LinearOperatorDiag, each matmul is O(k**2), so
# this complexity is O(nbk**2). For LinearOperatorTriL, each matmul is
# O(k^3) so this step has complexity O(nbk^3).
# this complexity is O(nbk**2). For LinearOperatorLowerTriangular,
# each matmul is O(k^3) so this step has complexity O(nbk^3).
x = self.scale_operator.matmul(x)
# Undo make batch-op ready.
@ -307,8 +307,8 @@ class _WishartLinearOperator(distribution.Distribution):
# Complexity: O(nbM*k) where M is the complexity of the operator solving
# a vector system. E.g., for LinearOperatorDiag, each solve is O(k), so
# this complexity is O(nbk**2). For LinearOperatorTriL, each solve is
# O(k**2) so this step has complexity O(nbk^3).
# this complexity is O(nbk**2). For LinearOperatorLowerTriangular,
# each solve is O(k**2) so this step has complexity O(nbk^3).
scale_sqrt_inv_x_sqrt = self.scale_operator.solve(
scale_sqrt_inv_x_sqrt)
@ -544,7 +544,7 @@ class WishartCholesky(_WishartLinearOperator):
super(WishartCholesky, self).__init__(
df=df,
scale_operator=linalg.LinearOperatorTriL(
scale_operator=linalg.LinearOperatorLowerTriangular(
tril=scale,
is_non_singular=True,
is_positive_definite=True,
@ -655,7 +655,7 @@ class WishartFull(_WishartLinearOperator):
] if validate_args else [], chol)
super(WishartFull, self).__init__(
df=df,
scale_operator=linalg.LinearOperatorTriL(
scale_operator=linalg.LinearOperatorLowerTriangular(
tril=chol,
is_non_singular=True,
is_positive_definite=True,

View File

@ -10,152 +10,7 @@ exports_files(["LICENSE"])
package(default_visibility = ["//tensorflow:__subpackages__"])
load("//tensorflow:tensorflow.bzl", "cuda_py_tests")
cuda_py_tests(
name = "linear_operator_test",
size = "small",
srcs = ["python/kernel_tests/linear_operator_test.py"],
additional_deps = [
":linalg_py",
"//third_party/py/numpy",
"//tensorflow/python:array_ops",
"//tensorflow/python:client_testlib",
"//tensorflow/python:framework_for_generated_wrappers",
"//tensorflow/python:framework_test_lib",
"//tensorflow/python:math_ops",
"//tensorflow/python:platform_test",
],
)
cuda_py_tests(
name = "linear_operator_addition_test",
size = "small",
srcs = ["python/kernel_tests/linear_operator_addition_test.py"],
additional_deps = [
":linalg_py",
"//third_party/py/numpy",
"//tensorflow/python:array_ops",
"//tensorflow/python:client_testlib",
"//tensorflow/python:framework",
"//tensorflow/python:framework_for_generated_wrappers",
"//tensorflow/python:framework_test_lib",
"//tensorflow/python:math_ops",
"//tensorflow/python:platform_test",
],
)
cuda_py_tests(
name = "linear_operator_composition_test",
size = "medium",
srcs = ["python/kernel_tests/linear_operator_composition_test.py"],
additional_deps = [
":linalg_py",
"//third_party/py/numpy",
"//tensorflow/python:array_ops",
"//tensorflow/python:client_testlib",
"//tensorflow/python:framework",
"//tensorflow/python:framework_for_generated_wrappers",
"//tensorflow/python:framework_test_lib",
"//tensorflow/python:math_ops",
"//tensorflow/python:platform_test",
],
tags = ["noasan"], # times out b/63678675
)
cuda_py_tests(
name = "linear_operator_diag_test",
size = "medium",
srcs = ["python/kernel_tests/linear_operator_diag_test.py"],
additional_deps = [
":linalg_py",
"//tensorflow/python:array_ops",
"//tensorflow/python:client_testlib",
"//tensorflow/python:framework",
"//tensorflow/python:framework_test_lib",
"//tensorflow/python:linalg_ops",
"//tensorflow/python:math_ops",
"//tensorflow/python:platform_test",
"//tensorflow/python:random_ops",
],
)
cuda_py_tests(
name = "linear_operator_identity_test",
size = "medium",
srcs = ["python/kernel_tests/linear_operator_identity_test.py"],
additional_deps = [
":linalg_py",
"//tensorflow/python:array_ops",
"//tensorflow/python:client_testlib",
"//tensorflow/python:framework",
"//tensorflow/python:framework_test_lib",
"//tensorflow/python:linalg_ops",
"//tensorflow/python:platform_test",
"//tensorflow/python:random_ops",
],
)
cuda_py_tests(
name = "linear_operator_full_matrix_test",
size = "medium",
srcs = ["python/kernel_tests/linear_operator_full_matrix_test.py"],
additional_deps = [
":linalg_py",
"//tensorflow/python:array_ops",
"//tensorflow/python:client_testlib",
"//tensorflow/python:framework",
"//tensorflow/python:framework_for_generated_wrappers",
"//tensorflow/python:framework_test_lib",
"//tensorflow/python:platform_test",
],
)
cuda_py_tests(
name = "linear_operator_tril_test",
size = "medium",
srcs = ["python/kernel_tests/linear_operator_tril_test.py"],
additional_deps = [
":linalg_py",
"//tensorflow/python:array_ops",
"//tensorflow/python:client_testlib",
"//tensorflow/python:framework",
"//tensorflow/python:framework_for_generated_wrappers",
"//tensorflow/python:framework_test_lib",
"//tensorflow/python:platform_test",
],
)
cuda_py_tests(
name = "linear_operator_udvh_update_test",
size = "medium",
srcs = ["python/kernel_tests/linear_operator_udvh_update_test.py"],
additional_deps = [
":linalg_py",
"//tensorflow/python:array_ops",
"//tensorflow/python:client_testlib",
"//tensorflow/python:framework",
"//tensorflow/python:framework_for_generated_wrappers",
"//tensorflow/python:framework_test_lib",
"//tensorflow/python:platform_test",
],
shard_count = 5,
)
cuda_py_tests(
name = "linear_operator_util_test",
size = "medium",
srcs = ["python/kernel_tests/linear_operator_util_test.py"],
additional_deps = [
":linalg_py",
"//tensorflow/python:client_testlib",
"//tensorflow/python:framework",
"//tensorflow/python:framework_for_generated_wrappers",
"//tensorflow/python:framework_test_lib",
"//tensorflow/python:math_ops",
"//tensorflow/python:platform_test",
],
)
load("//tensorflow:tensorflow.bzl", "cuda_py_test")
py_library(
name = "linalg_py",
@ -176,11 +31,29 @@ py_library(
"//tensorflow/python:random_seed",
"//tensorflow/python:tensor_util",
"//tensorflow/python:util",
"//tensorflow/python/ops/linalg",
"//third_party/py/numpy",
"@six_archive//:six",
],
)
cuda_py_test(
name = "linear_operator_addition_test",
size = "small",
srcs = ["python/kernel_tests/linear_operator_addition_test.py"],
additional_deps = [
":linalg_py",
"//third_party/py/numpy",
"//tensorflow/python:array_ops",
"//tensorflow/python:client_testlib",
"//tensorflow/python:framework",
"//tensorflow/python:framework_for_generated_wrappers",
"//tensorflow/python:framework_test_lib",
"//tensorflow/python:math_ops",
"//tensorflow/python:platform_test",
],
)
filegroup(
name = "all_files",
srcs = glob(

View File

@ -21,8 +21,8 @@ See the @{$python/contrib.linalg} guide.
@@LinearOperatorIdentity
@@LinearOperatorScaledIdentity
@@LinearOperatorFullMatrix
@@LinearOperatorTriL
@@LinearOperatorUDVHUpdate
@@LinearOperatorLowerTriangular
@@LinearOperatorLowRankUpdate
@@LinearOperatorComposition
@@add_operators
@ -33,14 +33,14 @@ from __future__ import print_function
# pylint: disable=unused-import,wildcard-import,line-too-long,g-importing-member
from tensorflow.contrib.linalg.python.ops.linear_operator import *
from tensorflow.contrib.linalg.python.ops.linear_operator_addition import *
from tensorflow.contrib.linalg.python.ops.linear_operator_composition import *
from tensorflow.contrib.linalg.python.ops.linear_operator_diag import *
from tensorflow.contrib.linalg.python.ops.linear_operator_full_matrix import *
from tensorflow.contrib.linalg.python.ops.linear_operator_identity import *
from tensorflow.contrib.linalg.python.ops.linear_operator_tril import *
from tensorflow.contrib.linalg.python.ops.linear_operator_udvh_update import *
from tensorflow.python.ops.linalg.linear_operator import *
from tensorflow.python.ops.linalg.linear_operator_composition import *
from tensorflow.python.ops.linalg.linear_operator_diag import *
from tensorflow.python.ops.linalg.linear_operator_full_matrix import *
from tensorflow.python.ops.linalg.linear_operator_identity import *
from tensorflow.python.ops.linalg.linear_operator_low_rank_update import *
from tensorflow.python.ops.linalg.linear_operator_lower_triangular import *
# pylint: enable=unused-import,wildcard-import,line-too-long,g-importing-member

View File

@ -19,10 +19,10 @@ from __future__ import print_function
import numpy as np
from tensorflow.contrib import linalg as linalg_lib
from tensorflow.contrib.linalg.python.ops import linear_operator_addition
from tensorflow.python.framework import random_seed
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops.linalg import linalg as linalg_lib
from tensorflow.python.platform import test
linalg = linalg_lib
@ -114,7 +114,7 @@ class LinearOperatorAdditionCorrectnessTest(test.TestCase):
def test_diag_tril_diag(self):
op1 = linalg.LinearOperatorDiag(
[1., 1.], is_non_singular=True, name="diag_a")
op2 = linalg.LinearOperatorTriL(
op2 = linalg.LinearOperatorLowerTriangular(
[[2., 0.], [0., 2.]],
is_self_adjoint=True,
is_non_singular=True,
@ -125,7 +125,7 @@ class LinearOperatorAdditionCorrectnessTest(test.TestCase):
op_sum = add_operators([op1, op2, op3])
self.assertEqual(1, len(op_sum))
op = op_sum[0]
self.assertTrue(isinstance(op, linalg_lib.LinearOperatorTriL))
self.assertTrue(isinstance(op, linalg_lib.LinearOperatorLowerTriangular))
self.assertAllClose([[6., 0.], [0., 6.]], op.to_dense().eval())
# The diag operators will be self-adjoint (because real and diagonal).
@ -140,7 +140,8 @@ class LinearOperatorAdditionCorrectnessTest(test.TestCase):
op0 = linalg.LinearOperatorFullMatrix(
[[-1., -1.], [-1., -1.]], name="matrix")
op1 = linalg.LinearOperatorDiag([1., 1.], name="diag_a")
op2 = linalg.LinearOperatorTriL([[2., 0.], [1.5, 2.]], name="tril")
op2 = linalg.LinearOperatorLowerTriangular(
[[2., 0.], [1.5, 2.]], name="tril")
op3 = linalg.LinearOperatorDiag([3., 3.], name="diag_b")
with self.test_session():
op_sum = add_operators([op0, op1, op2, op3], operator_name="my_operator")
@ -189,7 +190,7 @@ class LinearOperatorOrderOfAdditionTest(test.TestCase):
def test_tier_1_additions_done_by_tier_1(self):
diag1 = linalg.LinearOperatorDiag([1.])
diag2 = linalg.LinearOperatorDiag([1.])
tril = linalg.LinearOperatorTriL([[1.]])
tril = linalg.LinearOperatorLowerTriangular([[1.]])
addition_tiers = [
[linear_operator_addition._AddAndReturnDiag()],
[linear_operator_addition._AddAndReturnTriL()],
@ -199,12 +200,12 @@ class LinearOperatorOrderOfAdditionTest(test.TestCase):
# _BadAdder) was never reached.
op_sum = add_operators([diag1, diag2, tril], addition_tiers=addition_tiers)
self.assertEqual(1, len(op_sum))
self.assertTrue(isinstance(op_sum[0], linalg.LinearOperatorTriL))
self.assertTrue(isinstance(op_sum[0], linalg.LinearOperatorLowerTriangular))
def test_tier_1_additions_done_by_tier_1_with_order_flipped(self):
diag1 = linalg.LinearOperatorDiag([1.])
diag2 = linalg.LinearOperatorDiag([1.])
tril = linalg.LinearOperatorTriL([[1.]])
tril = linalg.LinearOperatorLowerTriangular([[1.]])
addition_tiers = [
[linear_operator_addition._AddAndReturnTriL()],
[linear_operator_addition._AddAndReturnDiag()],
@ -216,12 +217,12 @@ class LinearOperatorOrderOfAdditionTest(test.TestCase):
# Tier 2 was never used (therefore, _BadAdder didn't raise).
op_sum = add_operators([diag1, diag2, tril], addition_tiers=addition_tiers)
self.assertEqual(1, len(op_sum))
self.assertTrue(isinstance(op_sum[0], linalg.LinearOperatorTriL))
self.assertTrue(isinstance(op_sum[0], linalg.LinearOperatorLowerTriangular))
def test_cannot_add_everything_so_return_more_than_one_operator(self):
diag1 = linalg.LinearOperatorDiag([1.])
diag2 = linalg.LinearOperatorDiag([2.])
tril5 = linalg.LinearOperatorTriL([[5.]])
tril5 = linalg.LinearOperatorLowerTriangular([[5.]])
addition_tiers = [
[linear_operator_addition._AddAndReturnDiag()],
]
@ -237,7 +238,7 @@ class LinearOperatorOrderOfAdditionTest(test.TestCase):
if isinstance(op, linalg.LinearOperatorDiag):
found_diag = True
self.assertAllClose([[3.]], op.to_dense().eval())
if isinstance(op, linalg.LinearOperatorTriL):
if isinstance(op, linalg.LinearOperatorLowerTriangular):
found_tril = True
self.assertAllClose([[5.]], op.to_dense().eval())
self.assertTrue(found_diag and found_tril)
@ -245,7 +246,7 @@ class LinearOperatorOrderOfAdditionTest(test.TestCase):
def test_intermediate_tier_is_not_skipped(self):
diag1 = linalg.LinearOperatorDiag([1.])
diag2 = linalg.LinearOperatorDiag([1.])
tril = linalg.LinearOperatorTriL([[1.]])
tril = linalg.LinearOperatorLowerTriangular([[1.]])
addition_tiers = [
[linear_operator_addition._AddAndReturnDiag()],
[_BadAdder()],
@ -369,14 +370,14 @@ class AddAndReturnTriLTest(test.TestCase):
def test_diag_plus_tril(self):
diag = linalg.LinearOperatorDiag([1., 2.])
tril = linalg.LinearOperatorTriL([[10., 0.], [30., 0.]])
tril = linalg.LinearOperatorLowerTriangular([[10., 0.], [30., 0.]])
hints = linear_operator_addition._Hints(
is_positive_definite=True, is_non_singular=True)
self.assertTrue(self._adder.can_add(diag, diag))
self.assertTrue(self._adder.can_add(diag, tril))
operator = self._adder.add(diag, tril, "my_operator", hints)
self.assertTrue(isinstance(operator, linalg.LinearOperatorTriL))
self.assertTrue(isinstance(operator, linalg.LinearOperatorLowerTriangular))
with self.test_session():
self.assertAllClose([[11., 0.], [30., 2.]], operator.to_dense().eval())

View File

@ -22,14 +22,14 @@ import abc
import six
from tensorflow.contrib.linalg.python.ops import linear_operator
from tensorflow.contrib.linalg.python.ops import linear_operator_diag
from tensorflow.contrib.linalg.python.ops import linear_operator_full_matrix
from tensorflow.contrib.linalg.python.ops import linear_operator_identity
from tensorflow.contrib.linalg.python.ops import linear_operator_tril
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops.linalg import linear_operator
from tensorflow.python.ops.linalg import linear_operator_diag
from tensorflow.python.ops.linalg import linear_operator_full_matrix
from tensorflow.python.ops.linalg import linear_operator_identity
from tensorflow.python.ops.linalg import linear_operator_lower_triangular
__all__ = []
@ -347,7 +347,7 @@ class _AddAndReturnTriL(_Adder):
else:
op_add_to_tensor, op_other = op2, op1
return linear_operator_tril.LinearOperatorTriL(
return linear_operator_lower_triangular.LinearOperatorLowerTriangular(
tril=op_add_to_tensor.add_to_tensor(op_other.to_dense()),
is_non_singular=hints.is_non_singular,
is_self_adjoint=hints.is_self_adjoint,
@ -397,7 +397,8 @@ def _type(operator):
"""Returns the type name constant (e.g. _TRIL) for operator."""
if isinstance(operator, linear_operator_diag.LinearOperatorDiag):
return _DIAG
if isinstance(operator, linear_operator_tril.LinearOperatorTriL):
if isinstance(operator,
linear_operator_lower_triangular.LinearOperatorLowerTriangular):
return _TRIL
if isinstance(operator, linear_operator_full_matrix.LinearOperatorFullMatrix):
return _MATRIX

View File

@ -22,8 +22,8 @@ Subclasses of `LinearOperator` provide a access to common methods on a
* @{tf.contrib.linalg.LinearOperatorIdentity}
* @{tf.contrib.linalg.LinearOperatorScaledIdentity}
* @{tf.contrib.linalg.LinearOperatorFullMatrix}
* @{tf.contrib.linalg.LinearOperatorTriL}
* @{tf.contrib.linalg.LinearOperatorUDVHUpdate}
* @{tf.contrib.linalg.LinearOperatorLowerTriangular}
* @{tf.contrib.linalg.LinearOperatorLowRankUpdate}
### Transformations and Combinations of operators

View File

@ -70,7 +70,6 @@ py_library(
":io_ops",
":layers",
":lib",
":linalg_ns",
":math_ops",
":metrics",
":nn",
@ -104,6 +103,7 @@ py_library(
"//tensorflow/python/keras",
"//tensorflow/python/ops/losses",
"//tensorflow/python/ops/distributions",
"//tensorflow/python/ops/linalg",
"//tensorflow/python/profiler",
"//tensorflow/python/saved_model",
] + if_not_windows([
@ -1710,21 +1710,6 @@ py_library(
],
)
py_library(
name = "linalg_ns",
srcs = [
"ops/linalg_impl.py",
"ops/linalg_ns.py",
],
srcs_version = "PY2AND3",
deps = [
":array_ops",
":linalg_ops",
":math_ops",
":special_math_ops",
],
)
py_library(
name = "linalg_grad",
srcs = ["ops/linalg_grad.py"],
@ -2223,6 +2208,7 @@ py_library(
":variable_scope",
":variables",
"//tensorflow/python/ops/distributions",
"//tensorflow/python/ops/linalg",
],
)

View File

@ -73,7 +73,6 @@ from tensorflow.python.ops.standard_ops import *
# Namespaces
from tensorflow.python.ops import initializers_ns as initializers
from tensorflow.python.ops import linalg_ns as linalg
# pylint: enable=wildcard-import
@ -90,6 +89,7 @@ from tensorflow.python.ops import nn
from tensorflow.python.ops import sets
from tensorflow.python.ops import spectral_ops as spectral
from tensorflow.python.ops.distributions import distributions
from tensorflow.python.ops.linalg import linalg
from tensorflow.python.ops.losses import losses
from tensorflow.python.profiler import profiler
from tensorflow.python.saved_model import saved_model

View File

@ -1485,8 +1485,8 @@ cuda_py_test(
"//tensorflow/python:client_testlib",
"//tensorflow/python:framework_for_generated_wrappers",
"//tensorflow/python:linalg_ops",
"//tensorflow/python:linalg_ns",
"//tensorflow/python:math_ops",
"//tensorflow/python/ops/linalg",
],
tags = ["no_windows_gpu"],
)

View File

@ -0,0 +1,149 @@
# Tests of TensorFlow kernels written using the Python API.
package(
default_visibility = ["//tensorflow:internal"],
)
licenses(["notice"]) # Apache 2.0
load("//tensorflow:tensorflow.bzl", "cuda_py_test")
cuda_py_test(
name = "linear_operator_test",
size = "small",
srcs = ["linear_operator_test.py"],
additional_deps = [
"//tensorflow/python/ops/linalg",
"//third_party/py/numpy",
"//tensorflow/python:array_ops",
"//tensorflow/python:client_testlib",
"//tensorflow/python:framework_for_generated_wrappers",
"//tensorflow/python:framework_test_lib",
"//tensorflow/python:math_ops",
"//tensorflow/python:platform_test",
],
)
cuda_py_test(
name = "linear_operator_composition_test",
size = "medium",
srcs = ["linear_operator_composition_test.py"],
additional_deps = [
"//tensorflow/python/ops/linalg",
"//third_party/py/numpy",
"//tensorflow/python:array_ops",
"//tensorflow/python:client_testlib",
"//tensorflow/python:framework",
"//tensorflow/python:framework_for_generated_wrappers",
"//tensorflow/python:framework_test_lib",
"//tensorflow/python:math_ops",
"//tensorflow/python:platform_test",
],
tags = ["noasan"], # times out b/63678675
)
cuda_py_test(
name = "linear_operator_diag_test",
size = "medium",
srcs = ["linear_operator_diag_test.py"],
additional_deps = [
"//tensorflow/python/ops/linalg",
"//tensorflow/python:array_ops",
"//tensorflow/python:client_testlib",
"//tensorflow/python:framework",
"//tensorflow/python:framework_test_lib",
"//tensorflow/python:linalg_ops",
"//tensorflow/python:math_ops",
"//tensorflow/python:platform_test",
"//tensorflow/python:random_ops",
],
)
cuda_py_test(
name = "linear_operator_identity_test",
size = "medium",
srcs = ["linear_operator_identity_test.py"],
additional_deps = [
"//tensorflow/python/ops/linalg",
"//tensorflow/python:array_ops",
"//tensorflow/python:client_testlib",
"//tensorflow/python:framework",
"//tensorflow/python:framework_test_lib",
"//tensorflow/python:linalg_ops",
"//tensorflow/python:platform_test",
"//tensorflow/python:random_ops",
],
)
cuda_py_test(
name = "linear_operator_full_matrix_test",
size = "medium",
srcs = ["linear_operator_full_matrix_test.py"],
additional_deps = [
"//tensorflow/python/ops/linalg",
"//tensorflow/python:array_ops",
"//tensorflow/python:client_testlib",
"//tensorflow/python:framework",
"//tensorflow/python:framework_for_generated_wrappers",
"//tensorflow/python:framework_test_lib",
"//tensorflow/python:platform_test",
],
)
cuda_py_test(
name = "linear_operator_lower_triangular_test",
size = "medium",
srcs = ["linear_operator_lower_triangular_test.py"],
additional_deps = [
"//tensorflow/python/ops/linalg",
"//tensorflow/python:array_ops",
"//tensorflow/python:client_testlib",
"//tensorflow/python:framework",
"//tensorflow/python:framework_for_generated_wrappers",
"//tensorflow/python:framework_test_lib",
"//tensorflow/python:platform_test",
],
)
cuda_py_test(
name = "linear_operator_low_rank_update_test",
size = "medium",
srcs = ["linear_operator_low_rank_update_test.py"],
additional_deps = [
"//tensorflow/python/ops/linalg",
"//tensorflow/python:array_ops",
"//tensorflow/python:client_testlib",
"//tensorflow/python:framework",
"//tensorflow/python:framework_for_generated_wrappers",
"//tensorflow/python:framework_test_lib",
"//tensorflow/python:platform_test",
],
shard_count = 5,
)
cuda_py_test(
name = "linear_operator_util_test",
size = "medium",
srcs = ["linear_operator_util_test.py"],
additional_deps = [
"//tensorflow/python/ops/linalg",
"//tensorflow/python:client_testlib",
"//tensorflow/python:framework",
"//tensorflow/python:framework_for_generated_wrappers",
"//tensorflow/python:framework_test_lib",
"//tensorflow/python:math_ops",
"//tensorflow/python:platform_test",
],
)
filegroup(
name = "all_files",
srcs = glob(
["**/*"],
exclude = [
"**/METADATA",
"**/OWNERS",
],
),
visibility = ["//tensorflow:__subpackages__"],
)

View File

@ -0,0 +1,18 @@
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Kernel tests for tf.linalg."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

View File

@ -19,13 +19,13 @@ from __future__ import print_function
import numpy as np
from tensorflow.contrib import linalg as linalg_lib
from tensorflow.contrib.linalg.python.ops import linear_operator_test_util
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.linalg import linalg as linalg_lib
from tensorflow.python.ops.linalg import linear_operator_test_util
from tensorflow.python.platform import test
linalg = linalg_lib

View File

@ -17,13 +17,13 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib import linalg as linalg_lib
from tensorflow.contrib.linalg.python.ops import linear_operator_test_util
from tensorflow.python.framework import random_seed
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops.linalg import linalg as linalg_lib
from tensorflow.python.ops.linalg import linear_operator_test_util
from tensorflow.python.platform import test
linalg = linalg_lib

View File

@ -19,13 +19,13 @@ from __future__ import print_function
import numpy as np
from tensorflow.contrib import linalg as linalg_lib
from tensorflow.contrib.linalg.python.ops import linear_operator_test_util
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.linalg import linalg as linalg_lib
from tensorflow.python.ops.linalg import linear_operator_test_util
from tensorflow.python.platform import test
linalg = linalg_lib

View File

@ -19,13 +19,13 @@ from __future__ import print_function
import numpy as np
from tensorflow.contrib import linalg as linalg_lib
from tensorflow.contrib.linalg.python.ops import linear_operator_test_util
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import random_seed
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops.linalg import linalg as linalg_lib
from tensorflow.python.ops.linalg import linear_operator_test_util
from tensorflow.python.platform import test

View File

@ -19,12 +19,12 @@ from __future__ import print_function
import numpy as np
from tensorflow.contrib import linalg as linalg_lib
from tensorflow.contrib.linalg.python.ops import linear_operator_test_util
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import random_seed
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.linalg import linalg as linalg_lib
from tensorflow.python.ops.linalg import linear_operator_test_util
from tensorflow.python.platform import test
linalg = linalg_lib
@ -32,7 +32,7 @@ random_seed.set_random_seed(23)
rng = np.random.RandomState(0)
class BaseLinearOperatorUDVHUpdatetest(object):
class BaseLinearOperatorLowRankUpdatetest(object):
"""Base test for this type of operator."""
# Subclasses should set these attributes to either True or False.
@ -51,7 +51,7 @@ class BaseLinearOperatorUDVHUpdatetest(object):
@property
def _dtypes_to_test(self):
# TODO(langmore) Test complex types once cholesky works with them.
# See comment in LinearOperatorUDVHUpdate.__init__.
# See comment in LinearOperatorLowRankUpdate.__init__.
return [dtypes.float32, dtypes.float64]
@property
@ -108,7 +108,7 @@ class BaseLinearOperatorUDVHUpdatetest(object):
base_operator = linalg.LinearOperatorDiag(
base_diag_ph, is_positive_definite=True)
operator = linalg.LinearOperatorUDVHUpdate(
operator = linalg.LinearOperatorLowRankUpdate(
base_operator,
u=u_ph,
v=v_ph if self._use_v else None,
@ -122,7 +122,7 @@ class BaseLinearOperatorUDVHUpdatetest(object):
else:
base_operator = linalg.LinearOperatorDiag(
base_diag, is_positive_definite=True)
operator = linalg.LinearOperatorUDVHUpdate(
operator = linalg.LinearOperatorLowRankUpdate(
base_operator,
u,
v=v if self._use_v else None,
@ -164,8 +164,8 @@ class BaseLinearOperatorUDVHUpdatetest(object):
return operator, mat, feed_dict
class LinearOperatorUDVHUpdatetestWithDiagUseCholesky(
BaseLinearOperatorUDVHUpdatetest,
class LinearOperatorLowRankUpdatetestWithDiagUseCholesky(
BaseLinearOperatorLowRankUpdatetest,
linear_operator_test_util.SquareLinearOperatorDerivedClassTest):
"""A = L + UDU^H, D > 0, L > 0 ==> A > 0 and we can use a Cholesky."""
@ -182,8 +182,8 @@ class LinearOperatorUDVHUpdatetestWithDiagUseCholesky(
self._rtol[dtypes.float64] = 1e-10
class LinearOperatorUDVHUpdatetestWithDiagCannotUseCholesky(
BaseLinearOperatorUDVHUpdatetest,
class LinearOperatorLowRankUpdatetestWithDiagCannotUseCholesky(
BaseLinearOperatorLowRankUpdatetest,
linear_operator_test_util.SquareLinearOperatorDerivedClassTest):
"""A = L + UDU^H, D !> 0, L > 0 ==> A !> 0 and we cannot use a Cholesky."""
@ -201,8 +201,8 @@ class LinearOperatorUDVHUpdatetestWithDiagCannotUseCholesky(
self._rtol[dtypes.float64] = 1e-9
class LinearOperatorUDVHUpdatetestNoDiagUseCholesky(
BaseLinearOperatorUDVHUpdatetest,
class LinearOperatorLowRankUpdatetestNoDiagUseCholesky(
BaseLinearOperatorLowRankUpdatetest,
linear_operator_test_util.SquareLinearOperatorDerivedClassTest):
"""A = L + UU^H, L > 0 ==> A > 0 and we can use a Cholesky."""
@ -219,8 +219,8 @@ class LinearOperatorUDVHUpdatetestNoDiagUseCholesky(
self._rtol[dtypes.float64] = 1e-10
class LinearOperatorUDVHUpdatetestNoDiagCannotUseCholesky(
BaseLinearOperatorUDVHUpdatetest,
class LinearOperatorLowRankUpdatetestNoDiagCannotUseCholesky(
BaseLinearOperatorLowRankUpdatetest,
linear_operator_test_util.SquareLinearOperatorDerivedClassTest):
"""A = L + UV^H, L > 0 ==> A is not symmetric and we cannot use a Cholesky."""
@ -238,8 +238,8 @@ class LinearOperatorUDVHUpdatetestNoDiagCannotUseCholesky(
self._rtol[dtypes.float64] = 1e-9
class LinearOperatorUDVHUpdatetestWithDiagNotSquare(
BaseLinearOperatorUDVHUpdatetest,
class LinearOperatorLowRankUpdatetestWithDiagNotSquare(
BaseLinearOperatorLowRankUpdatetest,
linear_operator_test_util.NonSquareLinearOperatorDerivedClassTest):
"""A = L + UDU^H, D > 0, L > 0 ==> A > 0 and we can use a Cholesky."""
@ -248,7 +248,7 @@ class LinearOperatorUDVHUpdatetestWithDiagNotSquare(
_use_v = True
class LinearOpearatorUDVHUpdateBroadcastsShape(test.TestCase):
class LinearOpearatorLowRankUpdateBroadcastsShape(test.TestCase):
"""Test that the operator's shape is the broadcast of arguments."""
def test_static_shape_broadcasts_up_from_operator_to_other_args(self):
@ -256,8 +256,7 @@ class LinearOpearatorUDVHUpdateBroadcastsShape(test.TestCase):
u = array_ops.ones(shape=[2, 3, 2])
diag = array_ops.ones(shape=[2, 2])
operator = linalg.LinearOperatorUDVHUpdate(
base_operator, u, diag)
operator = linalg.LinearOperatorLowRankUpdate(base_operator, u, diag)
# domain_dimension is 3
self.assertAllEqual([2, 3, 3], operator.shape)
@ -272,7 +271,7 @@ class LinearOpearatorUDVHUpdateBroadcastsShape(test.TestCase):
u_shape_ph = array_ops.placeholder(dtypes.int32)
u = array_ops.ones(shape=u_shape_ph)
operator = linalg.LinearOperatorUDVHUpdate(base_operator, u)
operator = linalg.LinearOperatorLowRankUpdate(base_operator, u)
feed_dict = {
num_rows_ph: 3,
@ -290,34 +289,34 @@ class LinearOpearatorUDVHUpdateBroadcastsShape(test.TestCase):
u = rng.rand(5, 3, 2)
v = rng.rand(4, 3, 2)
with self.assertRaisesRegexp(ValueError, "Incompatible shapes"):
linalg.LinearOperatorUDVHUpdate(base_operator, u=u, v=v)
linalg.LinearOperatorLowRankUpdate(base_operator, u=u, v=v)
def test_u_and_base_operator_incompatible_batch_shape_raises(self):
base_operator = linalg.LinearOperatorIdentity(
num_rows=3, batch_shape=[4], dtype=np.float64)
u = rng.rand(5, 3, 2)
with self.assertRaisesRegexp(ValueError, "Incompatible shapes"):
linalg.LinearOperatorUDVHUpdate(base_operator, u=u)
linalg.LinearOperatorLowRankUpdate(base_operator, u=u)
def test_u_and_base_operator_incompatible_domain_dimension(self):
base_operator = linalg.LinearOperatorIdentity(num_rows=3, dtype=np.float64)
u = rng.rand(5, 4, 2)
with self.assertRaisesRegexp(ValueError, "not compatible"):
linalg.LinearOperatorUDVHUpdate(base_operator, u=u)
linalg.LinearOperatorLowRankUpdate(base_operator, u=u)
def test_u_and_diag_incompatible_low_rank_raises(self):
base_operator = linalg.LinearOperatorIdentity(num_rows=3, dtype=np.float64)
u = rng.rand(5, 3, 2)
diag = rng.rand(5, 4) # Last dimension should be 2
with self.assertRaisesRegexp(ValueError, "not compatible"):
linalg.LinearOperatorUDVHUpdate(base_operator, u=u, diag_update=diag)
linalg.LinearOperatorLowRankUpdate(base_operator, u=u, diag_update=diag)
def test_diag_incompatible_batch_shape_raises(self):
base_operator = linalg.LinearOperatorIdentity(num_rows=3, dtype=np.float64)
u = rng.rand(5, 3, 2)
diag = rng.rand(4, 2) # First dimension should be 5
with self.assertRaisesRegexp(ValueError, "Incompatible shapes"):
linalg.LinearOperatorUDVHUpdate(base_operator, u=u, diag_update=diag)
linalg.LinearOperatorLowRankUpdate(base_operator, u=u, diag_update=diag)
if __name__ == "__main__":

View File

@ -17,18 +17,18 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib import linalg as linalg_lib
from tensorflow.contrib.linalg.python.ops import linear_operator_test_util
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import random_seed
from tensorflow.python.ops import array_ops
from tensorflow.python.ops.linalg import linalg as linalg_lib
from tensorflow.python.ops.linalg import linear_operator_test_util
from tensorflow.python.platform import test
linalg = linalg_lib
random_seed.set_random_seed(23)
class LinearOperatorTriLTest(
class LinearOperatorLowerTriangularTest(
linear_operator_test_util.SquareLinearOperatorDerivedClassTest):
"""Most tests done in the base class LinearOperatorDerivedClassTest."""
@ -50,10 +50,10 @@ class LinearOperatorTriLTest(
# tril is random and we want the same value used for both mat and
# feed_dict.
tril = tril.eval()
operator = linalg.LinearOperatorTriL(tril_ph)
operator = linalg.LinearOperatorLowerTriangular(tril_ph)
feed_dict = {tril_ph: tril}
else:
operator = linalg.LinearOperatorTriL(tril)
operator = linalg.LinearOperatorLowerTriangular(tril)
feed_dict = None
mat = array_ops.matrix_band_part(tril, -1, 0)
@ -64,14 +64,14 @@ class LinearOperatorTriLTest(
# Singlular matrix with one positive eigenvalue and one zero eigenvalue.
with self.test_session():
tril = [[1., 0.], [1., 0.]]
operator = linalg.LinearOperatorTriL(tril)
operator = linalg.LinearOperatorLowerTriangular(tril)
with self.assertRaisesOpError("Singular operator"):
operator.assert_non_singular().run()
def test_is_x_flags(self):
# Matrix with two positive eigenvalues.
tril = [[1., 0.], [1., 1.]]
operator = linalg.LinearOperatorTriL(
operator = linalg.LinearOperatorLowerTriangular(
tril,
is_positive_definite=True,
is_non_singular=True,
@ -82,7 +82,7 @@ class LinearOperatorTriLTest(
def test_tril_must_have_at_least_two_dims_or_raises(self):
with self.assertRaisesRegexp(ValueError, "at least 2 dimensions"):
linalg.LinearOperatorTriL([1.])
linalg.LinearOperatorLowerTriangular([1.])
if __name__ == "__main__":

View File

@ -17,7 +17,7 @@ from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib import linalg as linalg_lib
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
@ -25,6 +25,7 @@ from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.linalg import linalg as linalg_lib
from tensorflow.python.platform import test
linalg = linalg_lib

View File

@ -19,16 +19,14 @@ from __future__ import print_function
import numpy as np
from tensorflow.contrib import linalg as linalg_lib
from tensorflow.contrib.linalg.python.ops import linear_operator_util
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.linalg import linear_operator_util
from tensorflow.python.platform import test
linalg = linalg_lib
random_seed.set_random_seed(23)
rng = np.random.RandomState(0)

View File

@ -22,9 +22,9 @@ import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import linalg_ns as linalg
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.linalg import linalg
from tensorflow.python.platform import test

View File

@ -523,8 +523,8 @@ def matrix_diag_transform(matrix, transform=None, name=None):
# valid Cholesky factor.
chol = matrix_diag_transform(matrix, transform=tf.nn.softplus)
# LinearOperatorTriL ignores the upper triangle.
operator = LinearOperatorTriL(chol)
# LinearOperatorLowerTriangular ignores the upper triangle.
operator = LinearOperatorLowerTriangular(chol)
```
Example of heteroskedastic 2-D linear regression.

View File

@ -0,0 +1,38 @@
package(
default_visibility = ["//tensorflow:internal"],
)
licenses(["notice"]) # Apache 2.0
py_library(
name = "linalg",
srcs = glob(["*.py"]),
srcs_version = "PY2AND3",
deps = [
"//tensorflow/python:array_ops",
"//tensorflow/python:check_ops",
"//tensorflow/python:control_flow_ops",
"//tensorflow/python:framework_for_generated_wrappers",
"//tensorflow/python:math_ops",
"//tensorflow/python:nn",
"//tensorflow/python:nn_ops",
"//tensorflow/python:random_ops",
"//tensorflow/python:special_math_ops",
"//tensorflow/python:tensor_util",
"//tensorflow/python:util",
"//third_party/py/numpy",
"@six_archive//:six",
],
)
filegroup(
name = "all_files",
srcs = glob(
["**/*"],
exclude = [
"**/METADATA",
"**/OWNERS",
],
),
visibility = ["//tensorflow:__subpackages__"],
)

View File

@ -12,10 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Public API for tf.linalg namespace.
@@logdet
"""
"""Public API for tf.linalg namespace."""
from __future__ import absolute_import
from __future__ import division
@ -29,7 +26,14 @@ from tensorflow.python.ops import special_math_ops
# go/tf-wildcard-import
# pylint: disable=wildcard-import,unused-import
from tensorflow.python.ops.linalg_impl import *
from tensorflow.python.ops.linalg.linalg_impl import *
from tensorflow.python.ops.linalg.linear_operator import *
from tensorflow.python.ops.linalg.linear_operator_composition import *
from tensorflow.python.ops.linalg.linear_operator_diag import *
from tensorflow.python.ops.linalg.linear_operator_full_matrix import *
from tensorflow.python.ops.linalg.linear_operator_identity import *
from tensorflow.python.ops.linalg.linear_operator_low_rank_update import *
from tensorflow.python.ops.linalg.linear_operator_lower_triangular import *
# pylint: enable=wildcard-import
# Linear algebra ops.

View File

@ -23,13 +23,13 @@ import contextlib
import numpy as np
from tensorflow.contrib import framework as contrib_framework
from tensorflow.contrib.linalg.python.ops import linear_operator_util
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.linalg import linear_operator_util
from tensorflow.python.platform import tf_logging as logging
__all__ = ["LinearOperator"]
@ -192,7 +192,7 @@ class LinearOperator(object):
graph_parents = [] if graph_parents is None else graph_parents
for i, t in enumerate(graph_parents):
if t is None or not contrib_framework.is_tensor(t):
if t is None or not tensor_util.is_tensor(t):
raise ValueError("Graph parent item %d is not a Tensor; %s." % (i, t))
self._dtype = dtype
self._graph_parents = graph_parents

View File

@ -18,13 +18,13 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.linalg.python.ops import linear_operator
from tensorflow.python.framework import common_shapes
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops.linalg import linear_operator
__all__ = ["LinearOperatorComposition"]

View File

@ -18,13 +18,13 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.linalg.python.ops import linear_operator
from tensorflow.contrib.linalg.python.ops import linear_operator_util
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.linalg import linear_operator
from tensorflow.python.ops.linalg import linear_operator_util
__all__ = ["LinearOperatorDiag",]

View File

@ -18,11 +18,11 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.linalg.python.ops import linear_operator
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.linalg import linear_operator
__all__ = ["LinearOperatorFullMatrix"]

View File

@ -20,8 +20,6 @@ from __future__ import print_function
import numpy as np
from tensorflow.contrib.linalg.python.ops import linear_operator
from tensorflow.contrib.linalg.python.ops import linear_operator_util
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
@ -30,6 +28,8 @@ from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.linalg import linear_operator
from tensorflow.python.ops.linalg import linear_operator_util
__all__ = [
"LinearOperatorIdentity",

View File

@ -18,20 +18,22 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.framework.python.framework import tensor_util as contrib_tensor_util
from tensorflow.contrib.linalg.python.ops import linear_operator
from tensorflow.contrib.linalg.python.ops import linear_operator_diag
from tensorflow.contrib.linalg.python.ops import linear_operator_identity
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.linalg import linear_operator
from tensorflow.python.ops.linalg import linear_operator_diag
from tensorflow.python.ops.linalg import linear_operator_identity
__all__ = ["LinearOperatorUDVHUpdate",]
__all__ = [
"LinearOperatorLowRankUpdate",
]
class LinearOperatorUDVHUpdate(linear_operator.LinearOperator):
class LinearOperatorLowRankUpdate(linear_operator.LinearOperator):
"""Perturb a `LinearOperator` with a rank `K` update.
This operator acts like a [batch] matrix `A` with shape
@ -39,7 +41,7 @@ class LinearOperatorUDVHUpdate(linear_operator.LinearOperator):
batch member. For every batch index `(i1,...,ib)`, `A[i1,...,ib, : :]` is
an `M x N` matrix.
`LinearOperatorUDVHUpdate` represents `A = L + U D V^H`, where
`LinearOperatorLowRankUpdate` represents `A = L + U D V^H`, where
```
L, is a LinearOperator representing [batch] M x N matrices
@ -65,7 +67,7 @@ class LinearOperatorUDVHUpdate(linear_operator.LinearOperator):
is_positive_definite=True)
# Perturb with a rank 2 perturbation
operator = LinearOperatorUDVHUpdate(
operator = LinearOperatorLowRankUpdate(
operator=diag_operator,
u=[[1., 2.], [-1., 3.], [0., 0.]],
diag_update=[11., 12.],
@ -94,7 +96,7 @@ class LinearOperatorUDVHUpdate(linear_operator.LinearOperator):
### Performance
Suppose `operator` is a `LinearOperatorUDVHUpdate` of shape `[M, N]`,
Suppose `operator` is a `LinearOperatorLowRankUpdate` of shape `[M, N]`,
made from a rank `K` update of `base_operator` which performs `.matmul(x)` on
`x` having `x.shape = [N, R]` with `O(L_matmul*N*R)` complexity (and similarly
for `solve`, `determinant`. Then, if `x.shape = [N, R]`,
@ -134,8 +136,8 @@ class LinearOperatorUDVHUpdate(linear_operator.LinearOperator):
is_self_adjoint=None,
is_positive_definite=None,
is_square=None,
name="LinearOperatorUDVHUpdate"):
"""Initialize a `LinearOperatorUDVHUpdate`.
name="LinearOperatorLowRankUpdate"):
"""Initialize a `LinearOperatorLowRankUpdate`.
This creates a `LinearOperator` of the form `A = L + U D V^H`, with
`L` a `LinearOperator`, `U, V` both [batch] matrices, and `D` a [batch]
@ -249,7 +251,7 @@ class LinearOperatorUDVHUpdate(linear_operator.LinearOperator):
self.u, self._diag_update, self.v]
graph_parents = [p for p in graph_parents if p is not None]
super(LinearOperatorUDVHUpdate, self).__init__(
super(LinearOperatorLowRankUpdate, self).__init__(
dtype=self._base_operator.dtype,
graph_parents=graph_parents,
is_non_singular=is_non_singular,
@ -262,8 +264,8 @@ class LinearOperatorUDVHUpdate(linear_operator.LinearOperator):
self._set_diag_operators(diag_update, is_diag_update_positive)
self._is_diag_update_positive = is_diag_update_positive
contrib_tensor_util.assert_same_float_dtype(
(base_operator, self.u, self.v, self._diag_update))
check_ops.assert_same_float_dtype((base_operator, self.u, self.v,
self._diag_update))
self._check_shapes()
# Pre-compute the so-called "capacitance" matrix

View File

@ -18,18 +18,20 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.linalg.python.ops import linear_operator
from tensorflow.contrib.linalg.python.ops import linear_operator_util
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.linalg import linear_operator
from tensorflow.python.ops.linalg import linear_operator_util
__all__ = ["LinearOperatorTriL",]
__all__ = [
"LinearOperatorLowerTriangular",
]
class LinearOperatorTriL(linear_operator.LinearOperator):
class LinearOperatorLowerTriangular(linear_operator.LinearOperator):
"""`LinearOperator` acting like a [batch] square lower triangular matrix.
This operator acts like a [batch] lower triangular matrix `A` with shape
@ -37,13 +39,14 @@ class LinearOperatorTriL(linear_operator.LinearOperator):
batch member. For every batch index `(i1,...,ib)`, `A[i1,...,ib, : :]` is
an `N x N` matrix.
`LinearOperatorTriL` is initialized with a `Tensor` having dimensions
`[B1,...,Bb, N, N]`. The upper triangle of the last two dimensions is ignored.
`LinearOperatorLowerTriangular` is initialized with a `Tensor` having
dimensions `[B1,...,Bb, N, N]`. The upper triangle of the last two
dimensions is ignored.
```python
# Create a 2 x 2 lower-triangular linear operator.
tril = [[1., 2.], [3., 4.]]
operator = LinearOperatorTriL(tril)
operator = LinearOperatorLowerTriangular(tril)
# The upper triangle is ignored.
operator.to_dense()
@ -62,7 +65,7 @@ class LinearOperatorTriL(linear_operator.LinearOperator):
# Create a [2, 3] batch of 4 x 4 linear operators.
tril = tf.random_normal(shape=[2, 3, 4, 4])
operator = LinearOperatorTriL(tril)
operator = LinearOperatorLowerTriangular(tril)
```
#### Shape compatibility
@ -77,7 +80,7 @@ class LinearOperatorTriL(linear_operator.LinearOperator):
#### Performance
Suppose `operator` is a `LinearOperatorTriL` of shape `[N, N]`,
Suppose `operator` is a `LinearOperatorLowerTriangular` of shape `[N, N]`,
and `x.shape = [N, R]`. Then
* `operator.matmul(x)` involves `N^2 * R` multiplications.
@ -108,8 +111,8 @@ class LinearOperatorTriL(linear_operator.LinearOperator):
is_self_adjoint=None,
is_positive_definite=None,
is_square=None,
name="LinearOperatorTriL"):
r"""Initialize a `LinearOperatorTriL`.
name="LinearOperatorLowerTriangular"):
r"""Initialize a `LinearOperatorLowerTriangular`.
Args:
tril: Shape `[B1,...,Bb, N, N]` with `b >= 0`, `N >= 0`.
@ -147,7 +150,7 @@ class LinearOperatorTriL(linear_operator.LinearOperator):
self._tril = array_ops.matrix_band_part(tril, -1, 0)
self._diag = array_ops.matrix_diag_part(self._tril)
super(LinearOperatorTriL, self).__init__(
super(LinearOperatorLowerTriangular, self).__init__(
dtype=self._tril.dtype,
graph_parents=[self._tril],
is_non_singular=is_non_singular,

View File

@ -22,16 +22,16 @@ import abc
import numpy as np
import six
from tensorflow.contrib.framework.python.framework import tensor_util as contrib_tensor_util
from tensorflow.contrib.linalg.python.ops import linear_operator_util
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops.linalg import linear_operator_util
from tensorflow.python.platform import test
@ -428,7 +428,7 @@ def random_positive_definite_matrix(shape, dtype, force_well_conditioned=False):
`Tensor` with desired shape and dtype.
"""
dtype = dtypes.as_dtype(dtype)
if not contrib_tensor_util.is_tensor(shape):
if not tensor_util.is_tensor(shape):
shape = tensor_shape.TensorShape(shape)
# Matrix must be square.
shape[-1].assert_is_compatible_with(shape[-2])

View File

@ -0,0 +1,14 @@
path: "tensorflow.linalg.LinearOperatorComposition.__metaclass__"
tf_class {
is_instance: "<class \'abc.ABCMeta\'>"
member_method {
name: "__init__"
}
member_method {
name: "mro"
}
member_method {
name: "register"
argspec: "args=[\'cls\', \'subclass\'], varargs=None, keywords=None, defaults=None"
}
}

View File

@ -0,0 +1,134 @@
path: "tensorflow.linalg.LinearOperatorComposition"
tf_class {
is_instance: "<class \'tensorflow.python.ops.linalg.linear_operator_composition.LinearOperatorComposition\'>"
is_instance: "<class \'tensorflow.python.ops.linalg.linear_operator.LinearOperator\'>"
is_instance: "<type \'object\'>"
member {
name: "batch_shape"
mtype: "<type \'property\'>"
}
member {
name: "domain_dimension"
mtype: "<type \'property\'>"
}
member {
name: "dtype"
mtype: "<type \'property\'>"
}
member {
name: "graph_parents"
mtype: "<type \'property\'>"
}
member {
name: "is_non_singular"
mtype: "<type \'property\'>"
}
member {
name: "is_positive_definite"
mtype: "<type \'property\'>"
}
member {
name: "is_self_adjoint"
mtype: "<type \'property\'>"
}
member {
name: "is_square"
mtype: "<type \'property\'>"
}
member {
name: "name"
mtype: "<type \'property\'>"
}
member {
name: "operators"
mtype: "<type \'property\'>"
}
member {
name: "range_dimension"
mtype: "<type \'property\'>"
}
member {
name: "shape"
mtype: "<type \'property\'>"
}
member {
name: "tensor_rank"
mtype: "<type \'property\'>"
}
member_method {
name: "__init__"
argspec: "args=[\'self\', \'operators\', \'is_non_singular\', \'is_self_adjoint\', \'is_positive_definite\', \'is_square\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\'], "
}
member_method {
name: "add_to_tensor"
argspec: "args=[\'self\', \'x\', \'name\'], varargs=None, keywords=None, defaults=[\'add_to_tensor\'], "
}
member_method {
name: "assert_non_singular"
argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'assert_non_singular\'], "
}
member_method {
name: "assert_positive_definite"
argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'assert_positive_definite\'], "
}
member_method {
name: "assert_self_adjoint"
argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'assert_self_adjoint\'], "
}
member_method {
name: "batch_shape_tensor"
argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'batch_shape_tensor\'], "
}
member_method {
name: "determinant"
argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'det\'], "
}
member_method {
name: "diag_part"
argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'diag_part\'], "
}
member_method {
name: "domain_dimension_tensor"
argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'domain_dimension_tensor\'], "
}
member_method {
name: "log_abs_determinant"
argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'log_abs_det\'], "
}
member_method {
name: "matmul"
argspec: "args=[\'self\', \'x\', \'adjoint\', \'adjoint_arg\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'False\', \'matmul\'], "
}
member_method {
name: "matvec"
argspec: "args=[\'self\', \'x\', \'adjoint\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'matvec\'], "
}
member_method {
name: "range_dimension_tensor"
argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'range_dimension_tensor\'], "
}
member_method {
name: "shape_tensor"
argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'shape_tensor\'], "
}
member_method {
name: "solve"
argspec: "args=[\'self\', \'rhs\', \'adjoint\', \'adjoint_arg\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'False\', \'solve\'], "
}
member_method {
name: "solvevec"
argspec: "args=[\'self\', \'rhs\', \'adjoint\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'solve\'], "
}
member_method {
name: "tensor_rank_tensor"
argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'tensor_rank_tensor\'], "
}
member_method {
name: "to_dense"
argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'to_dense\'], "
}
member_method {
name: "trace"
argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'trace\'], "
}
}

View File

@ -0,0 +1,14 @@
path: "tensorflow.linalg.LinearOperatorDiag.__metaclass__"
tf_class {
is_instance: "<class \'abc.ABCMeta\'>"
member_method {
name: "__init__"
}
member_method {
name: "mro"
}
member_method {
name: "register"
argspec: "args=[\'cls\', \'subclass\'], varargs=None, keywords=None, defaults=None"
}
}

View File

@ -0,0 +1,134 @@
path: "tensorflow.linalg.LinearOperatorDiag"
tf_class {
is_instance: "<class \'tensorflow.python.ops.linalg.linear_operator_diag.LinearOperatorDiag\'>"
is_instance: "<class \'tensorflow.python.ops.linalg.linear_operator.LinearOperator\'>"
is_instance: "<type \'object\'>"
member {
name: "batch_shape"
mtype: "<type \'property\'>"
}
member {
name: "diag"
mtype: "<type \'property\'>"
}
member {
name: "domain_dimension"
mtype: "<type \'property\'>"
}
member {
name: "dtype"
mtype: "<type \'property\'>"
}
member {
name: "graph_parents"
mtype: "<type \'property\'>"
}
member {
name: "is_non_singular"
mtype: "<type \'property\'>"
}
member {
name: "is_positive_definite"
mtype: "<type \'property\'>"
}
member {
name: "is_self_adjoint"
mtype: "<type \'property\'>"
}
member {
name: "is_square"
mtype: "<type \'property\'>"
}
member {
name: "name"
mtype: "<type \'property\'>"
}
member {
name: "range_dimension"
mtype: "<type \'property\'>"
}
member {
name: "shape"
mtype: "<type \'property\'>"
}
member {
name: "tensor_rank"
mtype: "<type \'property\'>"
}
member_method {
name: "__init__"
argspec: "args=[\'self\', \'diag\', \'is_non_singular\', \'is_self_adjoint\', \'is_positive_definite\', \'is_square\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'LinearOperatorDiag\'], "
}
member_method {
name: "add_to_tensor"
argspec: "args=[\'self\', \'x\', \'name\'], varargs=None, keywords=None, defaults=[\'add_to_tensor\'], "
}
member_method {
name: "assert_non_singular"
argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'assert_non_singular\'], "
}
member_method {
name: "assert_positive_definite"
argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'assert_positive_definite\'], "
}
member_method {
name: "assert_self_adjoint"
argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'assert_self_adjoint\'], "
}
member_method {
name: "batch_shape_tensor"
argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'batch_shape_tensor\'], "
}
member_method {
name: "determinant"
argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'det\'], "
}
member_method {
name: "diag_part"
argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'diag_part\'], "
}
member_method {
name: "domain_dimension_tensor"
argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'domain_dimension_tensor\'], "
}
member_method {
name: "log_abs_determinant"
argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'log_abs_det\'], "
}
member_method {
name: "matmul"
argspec: "args=[\'self\', \'x\', \'adjoint\', \'adjoint_arg\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'False\', \'matmul\'], "
}
member_method {
name: "matvec"
argspec: "args=[\'self\', \'x\', \'adjoint\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'matvec\'], "
}
member_method {
name: "range_dimension_tensor"
argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'range_dimension_tensor\'], "
}
member_method {
name: "shape_tensor"
argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'shape_tensor\'], "
}
member_method {
name: "solve"
argspec: "args=[\'self\', \'rhs\', \'adjoint\', \'adjoint_arg\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'False\', \'solve\'], "
}
member_method {
name: "solvevec"
argspec: "args=[\'self\', \'rhs\', \'adjoint\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'solve\'], "
}
member_method {
name: "tensor_rank_tensor"
argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'tensor_rank_tensor\'], "
}
member_method {
name: "to_dense"
argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'to_dense\'], "
}
member_method {
name: "trace"
argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'trace\'], "
}
}

View File

@ -0,0 +1,14 @@
path: "tensorflow.linalg.LinearOperatorFullMatrix.__metaclass__"
tf_class {
is_instance: "<class \'abc.ABCMeta\'>"
member_method {
name: "__init__"
}
member_method {
name: "mro"
}
member_method {
name: "register"
argspec: "args=[\'cls\', \'subclass\'], varargs=None, keywords=None, defaults=None"
}
}

View File

@ -0,0 +1,130 @@
path: "tensorflow.linalg.LinearOperatorFullMatrix"
tf_class {
is_instance: "<class \'tensorflow.python.ops.linalg.linear_operator_full_matrix.LinearOperatorFullMatrix\'>"
is_instance: "<class \'tensorflow.python.ops.linalg.linear_operator.LinearOperator\'>"
is_instance: "<type \'object\'>"
member {
name: "batch_shape"
mtype: "<type \'property\'>"
}
member {
name: "domain_dimension"
mtype: "<type \'property\'>"
}
member {
name: "dtype"
mtype: "<type \'property\'>"
}
member {
name: "graph_parents"
mtype: "<type \'property\'>"
}
member {
name: "is_non_singular"
mtype: "<type \'property\'>"
}
member {
name: "is_positive_definite"
mtype: "<type \'property\'>"
}
member {
name: "is_self_adjoint"
mtype: "<type \'property\'>"
}
member {
name: "is_square"
mtype: "<type \'property\'>"
}
member {
name: "name"
mtype: "<type \'property\'>"
}
member {
name: "range_dimension"
mtype: "<type \'property\'>"
}
member {
name: "shape"
mtype: "<type \'property\'>"
}
member {
name: "tensor_rank"
mtype: "<type \'property\'>"
}
member_method {
name: "__init__"
argspec: "args=[\'self\', \'matrix\', \'is_non_singular\', \'is_self_adjoint\', \'is_positive_definite\', \'is_square\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'LinearOperatorFullMatrix\'], "
}
member_method {
name: "add_to_tensor"
argspec: "args=[\'self\', \'x\', \'name\'], varargs=None, keywords=None, defaults=[\'add_to_tensor\'], "
}
member_method {
name: "assert_non_singular"
argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'assert_non_singular\'], "
}
member_method {
name: "assert_positive_definite"
argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'assert_positive_definite\'], "
}
member_method {
name: "assert_self_adjoint"
argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'assert_self_adjoint\'], "
}
member_method {
name: "batch_shape_tensor"
argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'batch_shape_tensor\'], "
}
member_method {
name: "determinant"
argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'det\'], "
}
member_method {
name: "diag_part"
argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'diag_part\'], "
}
member_method {
name: "domain_dimension_tensor"
argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'domain_dimension_tensor\'], "
}
member_method {
name: "log_abs_determinant"
argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'log_abs_det\'], "
}
member_method {
name: "matmul"
argspec: "args=[\'self\', \'x\', \'adjoint\', \'adjoint_arg\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'False\', \'matmul\'], "
}
member_method {
name: "matvec"
argspec: "args=[\'self\', \'x\', \'adjoint\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'matvec\'], "
}
member_method {
name: "range_dimension_tensor"
argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'range_dimension_tensor\'], "
}
member_method {
name: "shape_tensor"
argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'shape_tensor\'], "
}
member_method {
name: "solve"
argspec: "args=[\'self\', \'rhs\', \'adjoint\', \'adjoint_arg\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'False\', \'solve\'], "
}
member_method {
name: "solvevec"
argspec: "args=[\'self\', \'rhs\', \'adjoint\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'solve\'], "
}
member_method {
name: "tensor_rank_tensor"
argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'tensor_rank_tensor\'], "
}
member_method {
name: "to_dense"
argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'to_dense\'], "
}
member_method {
name: "trace"
argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'trace\'], "
}
}

View File

@ -0,0 +1,14 @@
path: "tensorflow.linalg.LinearOperatorIdentity.__metaclass__"
tf_class {
is_instance: "<class \'abc.ABCMeta\'>"
member_method {
name: "__init__"
}
member_method {
name: "mro"
}
member_method {
name: "register"
argspec: "args=[\'cls\', \'subclass\'], varargs=None, keywords=None, defaults=None"
}
}

View File

@ -0,0 +1,131 @@
path: "tensorflow.linalg.LinearOperatorIdentity"
tf_class {
is_instance: "<class \'tensorflow.python.ops.linalg.linear_operator_identity.LinearOperatorIdentity\'>"
is_instance: "<class \'tensorflow.python.ops.linalg.linear_operator_identity.BaseLinearOperatorIdentity\'>"
is_instance: "<class \'tensorflow.python.ops.linalg.linear_operator.LinearOperator\'>"
is_instance: "<type \'object\'>"
member {
name: "batch_shape"
mtype: "<type \'property\'>"
}
member {
name: "domain_dimension"
mtype: "<type \'property\'>"
}
member {
name: "dtype"
mtype: "<type \'property\'>"
}
member {
name: "graph_parents"
mtype: "<type \'property\'>"
}
member {
name: "is_non_singular"
mtype: "<type \'property\'>"
}
member {
name: "is_positive_definite"
mtype: "<type \'property\'>"
}
member {
name: "is_self_adjoint"
mtype: "<type \'property\'>"
}
member {
name: "is_square"
mtype: "<type \'property\'>"
}
member {
name: "name"
mtype: "<type \'property\'>"
}
member {
name: "range_dimension"
mtype: "<type \'property\'>"
}
member {
name: "shape"
mtype: "<type \'property\'>"
}
member {
name: "tensor_rank"
mtype: "<type \'property\'>"
}
member_method {
name: "__init__"
argspec: "args=[\'self\', \'num_rows\', \'batch_shape\', \'dtype\', \'is_non_singular\', \'is_self_adjoint\', \'is_positive_definite\', \'is_square\', \'assert_proper_shapes\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'True\', \'True\', \'True\', \'True\', \'False\', \'LinearOperatorIdentity\'], "
}
member_method {
name: "add_to_tensor"
argspec: "args=[\'self\', \'mat\', \'name\'], varargs=None, keywords=None, defaults=[\'add_to_tensor\'], "
}
member_method {
name: "assert_non_singular"
argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'assert_non_singular\'], "
}
member_method {
name: "assert_positive_definite"
argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'assert_positive_definite\'], "
}
member_method {
name: "assert_self_adjoint"
argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'assert_self_adjoint\'], "
}
member_method {
name: "batch_shape_tensor"
argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'batch_shape_tensor\'], "
}
member_method {
name: "determinant"
argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'det\'], "
}
member_method {
name: "diag_part"
argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'diag_part\'], "
}
member_method {
name: "domain_dimension_tensor"
argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'domain_dimension_tensor\'], "
}
member_method {
name: "log_abs_determinant"
argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'log_abs_det\'], "
}
member_method {
name: "matmul"
argspec: "args=[\'self\', \'x\', \'adjoint\', \'adjoint_arg\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'False\', \'matmul\'], "
}
member_method {
name: "matvec"
argspec: "args=[\'self\', \'x\', \'adjoint\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'matvec\'], "
}
member_method {
name: "range_dimension_tensor"
argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'range_dimension_tensor\'], "
}
member_method {
name: "shape_tensor"
argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'shape_tensor\'], "
}
member_method {
name: "solve"
argspec: "args=[\'self\', \'rhs\', \'adjoint\', \'adjoint_arg\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'False\', \'solve\'], "
}
member_method {
name: "solvevec"
argspec: "args=[\'self\', \'rhs\', \'adjoint\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'solve\'], "
}
member_method {
name: "tensor_rank_tensor"
argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'tensor_rank_tensor\'], "
}
member_method {
name: "to_dense"
argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'to_dense\'], "
}
member_method {
name: "trace"
argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'trace\'], "
}
}

View File

@ -0,0 +1,14 @@
path: "tensorflow.linalg.LinearOperatorLowRankUpdate.__metaclass__"
tf_class {
is_instance: "<class \'abc.ABCMeta\'>"
member_method {
name: "__init__"
}
member_method {
name: "mro"
}
member_method {
name: "register"
argspec: "args=[\'cls\', \'subclass\'], varargs=None, keywords=None, defaults=None"
}
}

View File

@ -0,0 +1,154 @@
path: "tensorflow.linalg.LinearOperatorLowRankUpdate"
tf_class {
is_instance: "<class \'tensorflow.python.ops.linalg.linear_operator_low_rank_update.LinearOperatorLowRankUpdate\'>"
is_instance: "<class \'tensorflow.python.ops.linalg.linear_operator.LinearOperator\'>"
is_instance: "<type \'object\'>"
member {
name: "base_operator"
mtype: "<type \'property\'>"
}
member {
name: "batch_shape"
mtype: "<type \'property\'>"
}
member {
name: "diag_operator"
mtype: "<type \'property\'>"
}
member {
name: "diag_update"
mtype: "<type \'property\'>"
}
member {
name: "domain_dimension"
mtype: "<type \'property\'>"
}
member {
name: "dtype"
mtype: "<type \'property\'>"
}
member {
name: "graph_parents"
mtype: "<type \'property\'>"
}
member {
name: "is_diag_update_positive"
mtype: "<type \'property\'>"
}
member {
name: "is_non_singular"
mtype: "<type \'property\'>"
}
member {
name: "is_positive_definite"
mtype: "<type \'property\'>"
}
member {
name: "is_self_adjoint"
mtype: "<type \'property\'>"
}
member {
name: "is_square"
mtype: "<type \'property\'>"
}
member {
name: "name"
mtype: "<type \'property\'>"
}
member {
name: "range_dimension"
mtype: "<type \'property\'>"
}
member {
name: "shape"
mtype: "<type \'property\'>"
}
member {
name: "tensor_rank"
mtype: "<type \'property\'>"
}
member {
name: "u"
mtype: "<type \'property\'>"
}
member {
name: "v"
mtype: "<type \'property\'>"
}
member_method {
name: "__init__"
argspec: "args=[\'self\', \'base_operator\', \'u\', \'diag_update\', \'v\', \'is_diag_update_positive\', \'is_non_singular\', \'is_self_adjoint\', \'is_positive_definite\', \'is_square\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'LinearOperatorLowRankUpdate\'], "
}
member_method {
name: "add_to_tensor"
argspec: "args=[\'self\', \'x\', \'name\'], varargs=None, keywords=None, defaults=[\'add_to_tensor\'], "
}
member_method {
name: "assert_non_singular"
argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'assert_non_singular\'], "
}
member_method {
name: "assert_positive_definite"
argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'assert_positive_definite\'], "
}
member_method {
name: "assert_self_adjoint"
argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'assert_self_adjoint\'], "
}
member_method {
name: "batch_shape_tensor"
argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'batch_shape_tensor\'], "
}
member_method {
name: "determinant"
argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'det\'], "
}
member_method {
name: "diag_part"
argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'diag_part\'], "
}
member_method {
name: "domain_dimension_tensor"
argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'domain_dimension_tensor\'], "
}
member_method {
name: "log_abs_determinant"
argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'log_abs_det\'], "
}
member_method {
name: "matmul"
argspec: "args=[\'self\', \'x\', \'adjoint\', \'adjoint_arg\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'False\', \'matmul\'], "
}
member_method {
name: "matvec"
argspec: "args=[\'self\', \'x\', \'adjoint\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'matvec\'], "
}
member_method {
name: "range_dimension_tensor"
argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'range_dimension_tensor\'], "
}
member_method {
name: "shape_tensor"
argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'shape_tensor\'], "
}
member_method {
name: "solve"
argspec: "args=[\'self\', \'rhs\', \'adjoint\', \'adjoint_arg\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'False\', \'solve\'], "
}
member_method {
name: "solvevec"
argspec: "args=[\'self\', \'rhs\', \'adjoint\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'solve\'], "
}
member_method {
name: "tensor_rank_tensor"
argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'tensor_rank_tensor\'], "
}
member_method {
name: "to_dense"
argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'to_dense\'], "
}
member_method {
name: "trace"
argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'trace\'], "
}
}

View File

@ -0,0 +1,14 @@
path: "tensorflow.linalg.LinearOperatorLowerTriangular.__metaclass__"
tf_class {
is_instance: "<class \'abc.ABCMeta\'>"
member_method {
name: "__init__"
}
member_method {
name: "mro"
}
member_method {
name: "register"
argspec: "args=[\'cls\', \'subclass\'], varargs=None, keywords=None, defaults=None"
}
}

View File

@ -0,0 +1,130 @@
path: "tensorflow.linalg.LinearOperatorLowerTriangular"
tf_class {
is_instance: "<class \'tensorflow.python.ops.linalg.linear_operator_lower_triangular.LinearOperatorLowerTriangular\'>"
is_instance: "<class \'tensorflow.python.ops.linalg.linear_operator.LinearOperator\'>"
is_instance: "<type \'object\'>"
member {
name: "batch_shape"
mtype: "<type \'property\'>"
}
member {
name: "domain_dimension"
mtype: "<type \'property\'>"
}
member {
name: "dtype"
mtype: "<type \'property\'>"
}
member {
name: "graph_parents"
mtype: "<type \'property\'>"
}
member {
name: "is_non_singular"
mtype: "<type \'property\'>"
}
member {
name: "is_positive_definite"
mtype: "<type \'property\'>"
}
member {
name: "is_self_adjoint"
mtype: "<type \'property\'>"
}
member {
name: "is_square"
mtype: "<type \'property\'>"
}
member {
name: "name"
mtype: "<type \'property\'>"
}
member {
name: "range_dimension"
mtype: "<type \'property\'>"
}
member {
name: "shape"
mtype: "<type \'property\'>"
}
member {
name: "tensor_rank"
mtype: "<type \'property\'>"
}
member_method {
name: "__init__"
argspec: "args=[\'self\', \'tril\', \'is_non_singular\', \'is_self_adjoint\', \'is_positive_definite\', \'is_square\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'LinearOperatorLowerTriangular\'], "
}
member_method {
name: "add_to_tensor"
argspec: "args=[\'self\', \'x\', \'name\'], varargs=None, keywords=None, defaults=[\'add_to_tensor\'], "
}
member_method {
name: "assert_non_singular"
argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'assert_non_singular\'], "
}
member_method {
name: "assert_positive_definite"
argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'assert_positive_definite\'], "
}
member_method {
name: "assert_self_adjoint"
argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'assert_self_adjoint\'], "
}
member_method {
name: "batch_shape_tensor"
argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'batch_shape_tensor\'], "
}
member_method {
name: "determinant"
argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'det\'], "
}
member_method {
name: "diag_part"
argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'diag_part\'], "
}
member_method {
name: "domain_dimension_tensor"
argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'domain_dimension_tensor\'], "
}
member_method {
name: "log_abs_determinant"
argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'log_abs_det\'], "
}
member_method {
name: "matmul"
argspec: "args=[\'self\', \'x\', \'adjoint\', \'adjoint_arg\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'False\', \'matmul\'], "
}
member_method {
name: "matvec"
argspec: "args=[\'self\', \'x\', \'adjoint\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'matvec\'], "
}
member_method {
name: "range_dimension_tensor"
argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'range_dimension_tensor\'], "
}
member_method {
name: "shape_tensor"
argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'shape_tensor\'], "
}
member_method {
name: "solve"
argspec: "args=[\'self\', \'rhs\', \'adjoint\', \'adjoint_arg\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'False\', \'solve\'], "
}
member_method {
name: "solvevec"
argspec: "args=[\'self\', \'rhs\', \'adjoint\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'solve\'], "
}
member_method {
name: "tensor_rank_tensor"
argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'tensor_rank_tensor\'], "
}
member_method {
name: "to_dense"
argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'to_dense\'], "
}
member_method {
name: "trace"
argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'trace\'], "
}
}

View File

@ -0,0 +1,14 @@
path: "tensorflow.linalg.LinearOperatorScaledIdentity.__metaclass__"
tf_class {
is_instance: "<class \'abc.ABCMeta\'>"
member_method {
name: "__init__"
}
member_method {
name: "mro"
}
member_method {
name: "register"
argspec: "args=[\'cls\', \'subclass\'], varargs=None, keywords=None, defaults=None"
}
}

View File

@ -0,0 +1,135 @@
path: "tensorflow.linalg.LinearOperatorScaledIdentity"
tf_class {
is_instance: "<class \'tensorflow.python.ops.linalg.linear_operator_identity.LinearOperatorScaledIdentity\'>"
is_instance: "<class \'tensorflow.python.ops.linalg.linear_operator_identity.BaseLinearOperatorIdentity\'>"
is_instance: "<class \'tensorflow.python.ops.linalg.linear_operator.LinearOperator\'>"
is_instance: "<type \'object\'>"
member {
name: "batch_shape"
mtype: "<type \'property\'>"
}
member {
name: "domain_dimension"
mtype: "<type \'property\'>"
}
member {
name: "dtype"
mtype: "<type \'property\'>"
}
member {
name: "graph_parents"
mtype: "<type \'property\'>"
}
member {
name: "is_non_singular"
mtype: "<type \'property\'>"
}
member {
name: "is_positive_definite"
mtype: "<type \'property\'>"
}
member {
name: "is_self_adjoint"
mtype: "<type \'property\'>"
}
member {
name: "is_square"
mtype: "<type \'property\'>"
}
member {
name: "multiplier"
mtype: "<type \'property\'>"
}
member {
name: "name"
mtype: "<type \'property\'>"
}
member {
name: "range_dimension"
mtype: "<type \'property\'>"
}
member {
name: "shape"
mtype: "<type \'property\'>"
}
member {
name: "tensor_rank"
mtype: "<type \'property\'>"
}
member_method {
name: "__init__"
argspec: "args=[\'self\', \'num_rows\', \'multiplier\', \'is_non_singular\', \'is_self_adjoint\', \'is_positive_definite\', \'is_square\', \'assert_proper_shapes\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'False\', \'LinearOperatorScaledIdentity\'], "
}
member_method {
name: "add_to_tensor"
argspec: "args=[\'self\', \'mat\', \'name\'], varargs=None, keywords=None, defaults=[\'add_to_tensor\'], "
}
member_method {
name: "assert_non_singular"
argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'assert_non_singular\'], "
}
member_method {
name: "assert_positive_definite"
argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'assert_positive_definite\'], "
}
member_method {
name: "assert_self_adjoint"
argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'assert_self_adjoint\'], "
}
member_method {
name: "batch_shape_tensor"
argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'batch_shape_tensor\'], "
}
member_method {
name: "determinant"
argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'det\'], "
}
member_method {
name: "diag_part"
argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'diag_part\'], "
}
member_method {
name: "domain_dimension_tensor"
argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'domain_dimension_tensor\'], "
}
member_method {
name: "log_abs_determinant"
argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'log_abs_det\'], "
}
member_method {
name: "matmul"
argspec: "args=[\'self\', \'x\', \'adjoint\', \'adjoint_arg\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'False\', \'matmul\'], "
}
member_method {
name: "matvec"
argspec: "args=[\'self\', \'x\', \'adjoint\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'matvec\'], "
}
member_method {
name: "range_dimension_tensor"
argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'range_dimension_tensor\'], "
}
member_method {
name: "shape_tensor"
argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'shape_tensor\'], "
}
member_method {
name: "solve"
argspec: "args=[\'self\', \'rhs\', \'adjoint\', \'adjoint_arg\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'False\', \'solve\'], "
}
member_method {
name: "solvevec"
argspec: "args=[\'self\', \'rhs\', \'adjoint\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'solve\'], "
}
member_method {
name: "tensor_rank_tensor"
argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'tensor_rank_tensor\'], "
}
member_method {
name: "to_dense"
argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'to_dense\'], "
}
member_method {
name: "trace"
argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'trace\'], "
}
}

View File

@ -0,0 +1,14 @@
path: "tensorflow.linalg.LinearOperator.__metaclass__"
tf_class {
is_instance: "<class \'abc.ABCMeta\'>"
member_method {
name: "__init__"
}
member_method {
name: "mro"
}
member_method {
name: "register"
argspec: "args=[\'cls\', \'subclass\'], varargs=None, keywords=None, defaults=None"
}
}

View File

@ -0,0 +1,129 @@
path: "tensorflow.linalg.LinearOperator"
tf_class {
is_instance: "<class \'tensorflow.python.ops.linalg.linear_operator.LinearOperator\'>"
is_instance: "<type \'object\'>"
member {
name: "batch_shape"
mtype: "<type \'property\'>"
}
member {
name: "domain_dimension"
mtype: "<type \'property\'>"
}
member {
name: "dtype"
mtype: "<type \'property\'>"
}
member {
name: "graph_parents"
mtype: "<type \'property\'>"
}
member {
name: "is_non_singular"
mtype: "<type \'property\'>"
}
member {
name: "is_positive_definite"
mtype: "<type \'property\'>"
}
member {
name: "is_self_adjoint"
mtype: "<type \'property\'>"
}
member {
name: "is_square"
mtype: "<type \'property\'>"
}
member {
name: "name"
mtype: "<type \'property\'>"
}
member {
name: "range_dimension"
mtype: "<type \'property\'>"
}
member {
name: "shape"
mtype: "<type \'property\'>"
}
member {
name: "tensor_rank"
mtype: "<type \'property\'>"
}
member_method {
name: "__init__"
argspec: "args=[\'self\', \'dtype\', \'graph_parents\', \'is_non_singular\', \'is_self_adjoint\', \'is_positive_definite\', \'is_square\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\'], "
}
member_method {
name: "add_to_tensor"
argspec: "args=[\'self\', \'x\', \'name\'], varargs=None, keywords=None, defaults=[\'add_to_tensor\'], "
}
member_method {
name: "assert_non_singular"
argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'assert_non_singular\'], "
}
member_method {
name: "assert_positive_definite"
argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'assert_positive_definite\'], "
}
member_method {
name: "assert_self_adjoint"
argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'assert_self_adjoint\'], "
}
member_method {
name: "batch_shape_tensor"
argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'batch_shape_tensor\'], "
}
member_method {
name: "determinant"
argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'det\'], "
}
member_method {
name: "diag_part"
argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'diag_part\'], "
}
member_method {
name: "domain_dimension_tensor"
argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'domain_dimension_tensor\'], "
}
member_method {
name: "log_abs_determinant"
argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'log_abs_det\'], "
}
member_method {
name: "matmul"
argspec: "args=[\'self\', \'x\', \'adjoint\', \'adjoint_arg\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'False\', \'matmul\'], "
}
member_method {
name: "matvec"
argspec: "args=[\'self\', \'x\', \'adjoint\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'matvec\'], "
}
member_method {
name: "range_dimension_tensor"
argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'range_dimension_tensor\'], "
}
member_method {
name: "shape_tensor"
argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'shape_tensor\'], "
}
member_method {
name: "solve"
argspec: "args=[\'self\', \'rhs\', \'adjoint\', \'adjoint_arg\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'False\', \'solve\'], "
}
member_method {
name: "solvevec"
argspec: "args=[\'self\', \'rhs\', \'adjoint\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'solve\'], "
}
member_method {
name: "tensor_rank_tensor"
argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'tensor_rank_tensor\'], "
}
member_method {
name: "to_dense"
argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'to_dense\'], "
}
member_method {
name: "trace"
argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'trace\'], "
}
}

View File

@ -1,5 +1,37 @@
path: "tensorflow.linalg"
tf_module {
member {
name: "LinearOperator"
mtype: "<class \'abc.ABCMeta\'>"
}
member {
name: "LinearOperatorComposition"
mtype: "<class \'abc.ABCMeta\'>"
}
member {
name: "LinearOperatorDiag"
mtype: "<class \'abc.ABCMeta\'>"
}
member {
name: "LinearOperatorFullMatrix"
mtype: "<class \'abc.ABCMeta\'>"
}
member {
name: "LinearOperatorIdentity"
mtype: "<class \'abc.ABCMeta\'>"
}
member {
name: "LinearOperatorLowRankUpdate"
mtype: "<class \'abc.ABCMeta\'>"
}
member {
name: "LinearOperatorLowerTriangular"
mtype: "<class \'abc.ABCMeta\'>"
}
member {
name: "LinearOperatorScaledIdentity"
mtype: "<class \'abc.ABCMeta\'>"
}
member_method {
name: "band_part"
argspec: "args=[\'input\', \'num_lower\', \'num_upper\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "