[TF:XLA] Disable or fix assertions for various tests on XLA.
PiperOrigin-RevId: 269520699
This commit is contained in:
parent
137de6aad1
commit
db3b4c518e
@ -87,6 +87,8 @@ cuda_py_test(
|
||||
"multi_and_single_gpu",
|
||||
"no_oss", # TODO(b/133330625)
|
||||
],
|
||||
# TODO(b/141096229): Find problem with AssignAddVariableOps
|
||||
xla_enable_strict_auto_jit = False,
|
||||
)
|
||||
|
||||
py_library(
|
||||
|
@ -67,8 +67,7 @@ class ProductDistributionTest(test.TestCase):
|
||||
self.assertEqual([4, 5], log_prob_x.shape)
|
||||
|
||||
expected_log_prob_x = stats.norm(loc, scale).logpdf(x_).sum(-1)
|
||||
self.assertAllClose(expected_log_prob_x, actual_log_prob_x,
|
||||
rtol=1e-5, atol=0.)
|
||||
self.assertAllCloseAccordingToType(expected_log_prob_x, actual_log_prob_x)
|
||||
|
||||
def testSampleAndLogProbMultivariate(self):
|
||||
loc = np.float32([[-1., 1], [1, -1]])
|
||||
@ -91,8 +90,7 @@ class ProductDistributionTest(test.TestCase):
|
||||
|
||||
expected_log_prob_x = stats.norm(loc, scale[:, None]).logpdf(
|
||||
x_).sum(-1).sum(-1)
|
||||
self.assertAllClose(expected_log_prob_x, actual_log_prob_x,
|
||||
rtol=1e-6, atol=0.)
|
||||
self.assertAllCloseAccordingToType(expected_log_prob_x, actual_log_prob_x)
|
||||
|
||||
def testSampleConsistentStats(self):
|
||||
loc = np.float32([[-1., 1], [1, -1]])
|
||||
@ -121,11 +119,12 @@ class ProductDistributionTest(test.TestCase):
|
||||
ind.mean(), ind.variance(), ind.stddev(), ind.entropy(), ind.mode(),
|
||||
])
|
||||
|
||||
self.assertAllClose(sample_mean_, actual_mean_, rtol=0.02, atol=0.)
|
||||
self.assertAllClose(sample_var_, actual_var_, rtol=0.04, atol=0.)
|
||||
self.assertAllClose(sample_std_, actual_std_, rtol=0.02, atol=0.)
|
||||
self.assertAllClose(sample_entropy_, actual_entropy_, rtol=0.01, atol=0.)
|
||||
self.assertAllClose(loc, actual_mode_, rtol=1e-6, atol=0.)
|
||||
self.assertAllCloseAccordingToType(sample_mean_, actual_mean_, rtol=0.02)
|
||||
self.assertAllCloseAccordingToType(sample_var_, actual_var_, rtol=0.04)
|
||||
self.assertAllCloseAccordingToType(sample_std_, actual_std_, rtol=0.02)
|
||||
self.assertAllCloseAccordingToType(
|
||||
sample_entropy_, actual_entropy_, rtol=0.01)
|
||||
self.assertAllCloseAccordingToType(loc, actual_mode_, rtol=1e-6)
|
||||
|
||||
def testKLRaises(self):
|
||||
ind1 = independent_lib.Independent(
|
||||
@ -173,7 +172,7 @@ class ProductDistributionTest(test.TestCase):
|
||||
|
||||
normal_kl = kullback_leibler.kl_divergence(normal1, normal2)
|
||||
ind_kl = kullback_leibler.kl_divergence(ind1, ind2)
|
||||
self.assertAllClose(
|
||||
self.assertAllCloseAccordingToType(
|
||||
self.evaluate(math_ops.reduce_sum(normal_kl, axis=-1)),
|
||||
self.evaluate(ind_kl))
|
||||
|
||||
@ -196,7 +195,7 @@ class ProductDistributionTest(test.TestCase):
|
||||
|
||||
normal_kl = kullback_leibler.kl_divergence(normal1, normal2)
|
||||
ind_kl = kullback_leibler.kl_divergence(ind1, ind2)
|
||||
self.assertAllClose(
|
||||
self.assertAllCloseAccordingToType(
|
||||
self.evaluate(normal_kl), self.evaluate(ind_kl))
|
||||
|
||||
def testKLMultivariateToMultivariate(self):
|
||||
@ -217,7 +216,7 @@ class ProductDistributionTest(test.TestCase):
|
||||
|
||||
mvn_kl = kullback_leibler.kl_divergence(mvn1, mvn2)
|
||||
ind_kl = kullback_leibler.kl_divergence(ind1, ind2)
|
||||
self.assertAllClose(
|
||||
self.assertAllCloseAccordingToType(
|
||||
self.evaluate(math_ops.reduce_sum(mvn_kl, axis=[-1, -2])),
|
||||
self.evaluate(ind_kl))
|
||||
|
||||
@ -264,9 +263,8 @@ class ProductDistributionTest(test.TestCase):
|
||||
self.assertAllEqual(image_shape, ind_event_shape)
|
||||
self.assertAllEqual(sample_shape + batch_shape + image_shape, x_shape)
|
||||
self.assertAllEqual(sample_shape + batch_shape, log_prob_x_shape)
|
||||
self.assertAllClose(expected_log_prob(x_, logits),
|
||||
actual_log_prob_x,
|
||||
rtol=1e-6, atol=0.)
|
||||
self.assertAllCloseAccordingToType(
|
||||
expected_log_prob(x_, logits), actual_log_prob_x)
|
||||
|
||||
def testMnistLikeStaticShape(self):
|
||||
self._testMnistLike(static_shape=True)
|
||||
|
@ -24,6 +24,7 @@ from tensorflow.contrib.distributions.python.ops import relaxed_bernoulli
|
||||
from tensorflow.python.framework import constant_op
|
||||
from tensorflow.python.framework import dtypes
|
||||
from tensorflow.python.framework import errors_impl
|
||||
from tensorflow.python.framework import test_util
|
||||
from tensorflow.python.platform import test
|
||||
|
||||
|
||||
@ -136,6 +137,8 @@ class RelaxedBernoulliTest(test.TestCase):
|
||||
self.assertAllClose(np.nan, dist.log_prob(0.0).eval())
|
||||
self.assertAllClose([np.nan], [dist.log_prob(1.0).eval()])
|
||||
|
||||
@test_util.disable_xla(
|
||||
"TODO(b/141092326): prevent negative values in Sigmoid on XLA:CPU")
|
||||
def testSampleN(self):
|
||||
"""mean of quantized samples still approximates the Bernoulli mean."""
|
||||
with self.cached_session():
|
||||
|
@ -24,6 +24,7 @@ from tensorflow.contrib import distributions as distributions_lib
|
||||
from tensorflow.python.framework import dtypes
|
||||
from tensorflow.python.framework import errors_impl
|
||||
from tensorflow.python.framework import random_seed
|
||||
from tensorflow.python.framework import test_util
|
||||
from tensorflow.python.ops import array_ops
|
||||
from tensorflow.python.ops import math_ops
|
||||
from tensorflow.python.platform import test
|
||||
@ -98,14 +99,18 @@ class WishartCholeskyTest(test.TestCase):
|
||||
scale = make_pd(1., 2)
|
||||
df = 4
|
||||
w = distributions.WishartCholesky(df, chol(scale))
|
||||
self.assertAllEqual(chol(wishart_var(df, scale)), w.stddev().eval())
|
||||
self.assertAllCloseAccordingToType(
|
||||
chol(wishart_var(df, scale)),
|
||||
w.stddev().eval())
|
||||
|
||||
def testVariance(self):
|
||||
with self.cached_session():
|
||||
scale = make_pd(1., 2)
|
||||
df = 4
|
||||
w = distributions.WishartCholesky(df, chol(scale))
|
||||
self.assertAllEqual(wishart_var(df, scale), w.variance().eval())
|
||||
self.assertAllCloseAccordingToType(
|
||||
wishart_var(df, scale),
|
||||
w.variance().eval())
|
||||
|
||||
def testSample(self):
|
||||
with self.cached_session():
|
||||
@ -318,6 +323,7 @@ class WishartCholeskyTest(test.TestCase):
|
||||
sess.run(w.event_shape_tensor(),
|
||||
feed_dict={scale_deferred: [chol_scale, chol_scale]}))
|
||||
|
||||
@test_util.disable_xla("XLA cannot assert inside of an op.")
|
||||
def testValidateArgs(self):
|
||||
with self.cached_session() as sess:
|
||||
df_deferred = array_ops.placeholder(dtypes.float32)
|
||||
|
@ -24,6 +24,7 @@ from tensorflow.python import tf2
|
||||
from tensorflow.python.client import session
|
||||
from tensorflow.python.compat import compat
|
||||
from tensorflow.python.framework import ops
|
||||
from tensorflow.python.framework import test_util
|
||||
from tensorflow.python.ops import array_ops
|
||||
from tensorflow.python.ops import gradient_checker_v2
|
||||
from tensorflow.python.ops import math_ops
|
||||
@ -278,14 +279,19 @@ if __name__ == "__main__":
|
||||
setattr(
|
||||
BatchMatmulOpTest,
|
||||
"testBatchMatmulOp_" + name + "_{}".format(use_static_shape_),
|
||||
_GetBatchMatmulOpTest(dtype_, adjoint_a_, adjoint_b_,
|
||||
use_static_shape_))
|
||||
test_util.xla_allow_fallback(
|
||||
"TODO(b/134526360): XLA:CPU hasn't implemented int32 dot.")(
|
||||
_GetBatchMatmulOpTest(dtype_, adjoint_a_, adjoint_b_,
|
||||
use_static_shape_)))
|
||||
# Broadcasting is supported only in v2.
|
||||
setattr(
|
||||
BatchMatmulOpTest, "testBatchMatmulBroadcasting_" + name +
|
||||
("_%s" % use_static_shape_),
|
||||
_GetBatchMatmulOpBroadcastingTest(dtype_, adjoint_a_, adjoint_b_,
|
||||
use_static_shape_))
|
||||
test_util.xla_allow_fallback(
|
||||
"TODO(b/134526360): XLA:CPU hasn't implemented int32 dot.")(
|
||||
_GetBatchMatmulOpBroadcastingTest(dtype_, adjoint_a_,
|
||||
adjoint_b_,
|
||||
use_static_shape_)))
|
||||
if dtype_ == np.int32:
|
||||
continue
|
||||
setattr(BatchMatmulGradientTest, "testBatchMatmulGradient_" + name,
|
||||
|
@ -858,7 +858,7 @@ class Conv2DTest(test.TestCase):
|
||||
self.assertShapeEqual(value, conv)
|
||||
tf_logging.debug("expected = %s", expected)
|
||||
tf_logging.debug("actual = %s", value)
|
||||
self.assertArrayNear(expected, value.flatten(), err)
|
||||
self.assertAllCloseAccordingToType(expected, value.flatten())
|
||||
|
||||
def _CompareBackpropInput(self, input_sizes, filter_sizes, output_sizes,
|
||||
conv_strides, padding):
|
||||
|
@ -205,6 +205,10 @@ class GatherTest(test.TestCase, parameterized.TestCase):
|
||||
gather_t = array_ops.gather(params, indices, axis=axis)
|
||||
self.assertEqual(None, gather_t.shape)
|
||||
|
||||
@test_util.disable_xla(
|
||||
"Assertion inside an op is not supported in XLA. Instead XLA clamps the "
|
||||
"index to be in bounds and returns the indexed value there (Don't rely "
|
||||
"on this behavior).")
|
||||
def testBadIndicesCPU(self):
|
||||
with test_util.force_cpu():
|
||||
params = [[0, 1, 2], [3, 4, 5]]
|
||||
|
@ -778,12 +778,14 @@ class SparseMathOpsTest(test_util.TensorFlowTestCase):
|
||||
def _check(self, result_tensor, result_np, input_sp_t):
|
||||
self.assertTrue(isinstance(result_tensor, sparse_tensor.SparseTensor))
|
||||
self.assertTrue(isinstance(input_sp_t, sparse_tensor.SparseTensor))
|
||||
self.assertAllEqual(input_sp_t.indices, result_tensor.indices)
|
||||
self.assertAllEqual(input_sp_t.dense_shape, result_tensor.dense_shape)
|
||||
self.assertAllCloseAccordingToType(input_sp_t.indices,
|
||||
result_tensor.indices)
|
||||
self.assertAllCloseAccordingToType(input_sp_t.dense_shape,
|
||||
result_tensor.dense_shape)
|
||||
|
||||
res_densified = sparse_ops.sparse_to_dense(
|
||||
result_tensor.indices, result_tensor.dense_shape, result_tensor.values)
|
||||
self.assertAllEqual(result_np, res_densified)
|
||||
self.assertAllCloseAccordingToType(result_np, res_densified)
|
||||
|
||||
@test_util.run_deprecated_v1
|
||||
def testCwiseShapeValidation(self):
|
||||
|
Loading…
x
Reference in New Issue
Block a user