Remove workarounds for XLA's previous inf/nan behavior after it's been fixed.

PiperOrigin-RevId: 313559788
Change-Id: I3d5fe3d7b7267d073ef45fe042503932d99b03cb
This commit is contained in:
Tres Popp 2020-05-28 03:52:35 -07:00 committed by TensorFlower Gardener
parent 2217251dfa
commit 35312cceb1
5 changed files with 11 additions and 24 deletions

View File

@ -19,7 +19,6 @@ from __future__ import division
from __future__ import print_function from __future__ import print_function
import itertools import itertools
import os
import numpy as np import numpy as np
@ -1609,8 +1608,4 @@ class BinaryOpsTest(xla_test.XLATestCase):
if __name__ == "__main__": if __name__ == "__main__":
# TODO(b/130689556): XLA CPU does not honor inf/nan which causes problems
os.environ[
"XLA_FLAGS"] = "--xla_cpu_enable_fast_math=false " + os.environ.get(
"XLA_FLAGS", "")
googletest.main() googletest.main()

View File

@ -347,17 +347,15 @@ class UnaryOpsTest(xla_test.XLATestCase):
expected=np.array( expected=np.array(
[1.55740772, -2.18503986, -0.14254654, 1.15782128], dtype=dtype)) [1.55740772, -2.18503986, -0.14254654, 1.15782128], dtype=dtype))
# TODO(b/130689556): Turn this on for CPU when we start honoring NaNs. self._assertOpOutputMatchesExpected(
if self.device != "XLA_CPU": math_ops.tanh,
self._assertOpOutputMatchesExpected( np.array([[1, 2, 3, 4], [np.inf, -np.inf, np.nan, 20],
math_ops.tanh, [19, -19, 22, -22]],
np.array([[1, 2, 3, 4], [np.inf, -np.inf, np.nan, 20], dtype=dtype),
[19, -19, 22, -22]], expected=np.array(
dtype=dtype), [[0.76159418, 0.96402758, 0.99505478, 0.99932933],
expected=np.array( [1.0, -1.0, np.nan, 1.0], [1.0, -1.0, 1.0, -1.0]],
[[0.76159418, 0.96402758, 0.99505478, 0.99932933], dtype=dtype))
[1.0, -1.0, np.nan, 1.0], [1.0, -1.0, 1.0, -1.0]],
dtype=dtype))
self._assertOpOutputMatchesExpected( self._assertOpOutputMatchesExpected(
nn_ops.log_softmax, nn_ops.log_softmax,

View File

@ -5122,8 +5122,6 @@ cuda_py_test(
srcs = ["ops/nn_test.py"], srcs = ["ops/nn_test.py"],
python_version = "PY3", python_version = "PY3",
tags = ["no_windows"], tags = ["no_windows"],
# TODO(b/130689556): Numerical differences due to fast math on CPU.
xla_enable_strict_auto_jit = False,
deps = [ deps = [
":array_ops", ":array_ops",
":client_testlib", ":client_testlib",

View File

@ -18,8 +18,6 @@ from __future__ import absolute_import
from __future__ import division from __future__ import division
from __future__ import print_function from __future__ import print_function
import os
import numpy as np import numpy as np
from tensorflow.python.framework import constant_op from tensorflow.python.framework import constant_op
@ -133,8 +131,4 @@ class NumericsTest(test.TestCase):
if __name__ == "__main__": if __name__ == "__main__":
# TODO(b/130689556): XLA CPU does not honor inf/nan which causes problems
os.environ[
"XLA_FLAGS"] = "--xla_cpu_enable_fast_math=false " + os.environ.get(
"XLA_FLAGS", "")
test.main() test.main()

View File

@ -1207,6 +1207,7 @@ class DataFormatVectorPermuteTest(test_lib.TestCase):
y_val = self.evaluate(y) y_val = self.evaluate(y)
self.assertAllEqual(y_val, [4, 9]) self.assertAllEqual(y_val, [4, 9])
@test_util.disable_xla("unsupported data format")
def testNHWCToWHCN(self): def testNHWCToWHCN(self):
x_val = [7, 4, 9, 3] x_val = [7, 4, 9, 3]
x = constant_op.constant(x_val) x = constant_op.constant(x_val)
@ -1215,6 +1216,7 @@ class DataFormatVectorPermuteTest(test_lib.TestCase):
y_val = self.evaluate(y) y_val = self.evaluate(y)
self.assertAllEqual(y_val, [9, 4, 3, 7]) self.assertAllEqual(y_val, [9, 4, 3, 7])
@test_util.disable_xla("unsupported data format")
def testNHWCToWHCN_Size2(self): def testNHWCToWHCN_Size2(self):
x_val = [4, 9] x_val = [4, 9]
x = constant_op.constant(x_val) x = constant_op.constant(x_val)