[TF:XLA] Enable more TF python tests for XLA:GPU

Additionally increase tolerances in the flaky multi_worker_callback_test.py assertions.
Fix typo in matmul assertions.

PiperOrigin-RevId: 249787704
This commit is contained in:
A. Unique TensorFlower 2019-05-24 00:34:26 -07:00 committed by TensorFlower Gardener
parent eafe861c2b
commit ba19774664
15 changed files with 37 additions and 9 deletions

View File

@ -67,9 +67,9 @@ class MatMulOp : public XlaOpKernel {
OP_REQUIRES(ctx,
a_shape.dim_size(first_index) == b_shape.dim_size(second_index),
errors::InvalidArgument("Matrix size-compatible: In[0]: ",
a_shape.DebugString(), ", In[1]: ",
b_shape.DebugString()));
errors::InvalidArgument(
"Matrix size-incompatible: In[0]: ", a_shape.DebugString(),
", In[1]: ", b_shape.DebugString()));
xla::XlaOp a = ctx->Input(0);
xla::XlaOp b = ctx->Input(1);

View File

@ -104,9 +104,9 @@ class QuantizedMatMulOp : public OpKernel {
OP_REQUIRES(context,
a.dim_size(dim_pair[0].first) == b.dim_size(dim_pair[0].second),
errors::InvalidArgument(
"Matrix size-compatible: In[0]: ", a.shape().DebugString(),
", In[1]: ", b.shape().DebugString()));
errors::InvalidArgument("Matrix size-incompatible: In[0]: ",
a.shape().DebugString(),
", In[1]: ", b.shape().DebugString()));
OP_REQUIRES(context, ((shift_c >= 0) && (shift_c <= 31)),
errors::InvalidArgument("shift_c must be between 0 and 31, "

View File

@ -50,6 +50,7 @@ cuda_py_test(
"nomsan", # forge input size exceeded
"notsan", # forge input size exceeded
],
xla_enable_strict_auto_jit = True,
)
# b/132234211: Target added to support internal test target that runs the test

View File

@ -1501,6 +1501,7 @@ cuda_py_test(
"noasan",
"optonly",
],
xla_enable_strict_auto_jit = True,
)
tf_py_test(
@ -2972,6 +2973,7 @@ cuda_py_test(
"@absl_py//absl/testing:parameterized",
"//tensorflow/python:client_testlib",
],
xla_enable_strict_auto_jit = True,
)
py_library(
@ -6448,6 +6450,8 @@ cuda_py_test(
tags = [
"grappler",
],
# This test analyzes the graph, but XLA changes the names of nodes.
xla_enable_strict_auto_jit = False,
)
tf_gen_op_wrapper_private_py(

View File

@ -40,6 +40,7 @@ cuda_py_test(
"//tensorflow/python:variable_scope",
"//tensorflow/python:variables",
],
xla_enable_strict_auto_jit = True,
xla_enabled = True,
)
@ -86,5 +87,6 @@ cuda_py_test(
"no_mac",
"no_windows",
],
xla_enable_strict_auto_jit = True,
xla_enabled = True,
)

View File

@ -583,6 +583,7 @@ cuda_py_test(
"//tensorflow/python:sparse_tensor",
"//tensorflow/python/data/ops:dataset_ops",
],
xla_enable_strict_auto_jit = True,
)
tf_py_test(

View File

@ -635,6 +635,7 @@ cuda_py_test(
tags = [
"multi_and_single_gpu",
],
xla_enable_strict_auto_jit = True,
)
tf_xla_py_test(
@ -694,6 +695,7 @@ cuda_py_test(
"//tensorflow/python/eager:context",
"//tensorflow/python/eager:test",
],
xla_enable_strict_auto_jit = True,
)
cuda_py_test(
@ -718,6 +720,7 @@ cuda_py_test(
tags = [
"multi_and_single_gpu",
],
xla_enable_strict_auto_jit = True,
)
cuda_py_test(
@ -730,6 +733,7 @@ cuda_py_test(
"//tensorflow/python/eager:test",
],
grpc_enabled = True,
xla_enable_strict_auto_jit = True,
)
py_library(
@ -912,6 +916,7 @@ cuda_py_test(
tags = [
"multi_and_single_gpu",
],
xla_enable_strict_auto_jit = True,
)
cuda_py_test(
@ -943,6 +948,7 @@ cuda_py_test(
"multi_and_single_gpu",
"no_windows_gpu", # TODO(b/130551176)
],
xla_enable_strict_auto_jit = True,
)
distribute_py_test(

View File

@ -1610,6 +1610,7 @@ class UnrollLSTMTest(test.TestCase):
class FunctionInlineControlTest(test.TestCase):
@test_util.disable_xla("XLA changes the names, breaking graph analysis")
def testFoo(self):
dtype = dtypes.float32
cfg = config_pb2.ConfigProto(

View File

@ -267,6 +267,7 @@ cuda_py_test(
"no_oss", # http://b/119349471
"tf_integration_test",
],
xla_enable_strict_auto_jit = True,
)
cuda_py_test(
@ -302,6 +303,7 @@ cuda_py_test(
"no_oss", # TODO(b/130369494): Investigate why it times out on OSS.
# TODO(b/123307453): Add "multi_and_single_gpu",
],
xla_enable_strict_auto_jit = True,
)
cuda_py_test(
@ -323,6 +325,7 @@ cuda_py_test(
"multi_and_single_gpu",
"no_oss", # TODO(b/132384649): Flakily times out.
],
xla_enable_strict_auto_jit = True,
)
cuda_py_test(
@ -340,6 +343,7 @@ cuda_py_test(
tags = [
"multi_and_single_gpu",
],
xla_enable_strict_auto_jit = True,
)
py_library(

View File

@ -211,7 +211,8 @@ class KerasMultiWorkerCallbackTest(test_base.IndependentWorkerTestBase,
test_obj.assertAllClose(
history_after_one_more_epoch.history,
history_after_loading_weight_and_one_more_epoch.history)
history_after_loading_weight_and_one_more_epoch.history,
rtol=6e-6)
# Verify the temp files are indeed removed (no trace left behind).
for filepath in filepaths:
@ -243,7 +244,8 @@ class KerasMultiWorkerCallbackTest(test_base.IndependentWorkerTestBase,
# restoring are closed.
test_obj.assertAllClose(
history_after_one_more_epoch.history,
history_after_model_restoring_and_one_more_epoch.history)
history_after_model_restoring_and_one_more_epoch.history,
rtol=5e-6)
history_one_more_epoch_without_model_restoring = model.fit(
x=train_ds, epochs=1, steps_per_epoch=steps)
@ -606,7 +608,7 @@ class KerasMultiWorkerCallbackTest(test_base.IndependentWorkerTestBase,
def assert_all_elements_are_identical(list_to_check):
first_item = list_to_check[0]
for item in list_to_check[1:]:
self.assertAllClose(first_item, item, rtol=1e-5, atol=1e-5)
self.assertAllClose(first_item, item, rtol=2e-5, atol=1e-5)
# Important: the results from preemption interrupted and non-interrupted
# cases should give the same final results.

View File

@ -117,6 +117,7 @@ cuda_py_test(
"//tensorflow/python/distribute:one_device_strategy",
"//tensorflow/python/keras",
],
xla_enable_strict_auto_jit = True,
)
py_library(
@ -140,4 +141,5 @@ cuda_py_test(
"//tensorflow/python/distribute:one_device_strategy",
"//tensorflow/python/keras",
],
xla_enable_strict_auto_jit = True,
)

View File

@ -240,6 +240,7 @@ cuda_py_test(
"//tensorflow/python/keras",
],
shard_count = 4,
xla_enable_strict_auto_jit = True,
)
cuda_py_test(

View File

@ -1272,6 +1272,7 @@ cuda_py_test(
"//tensorflow/python:linalg_ops",
],
shard_count = 10,
xla_enable_strict_auto_jit = True,
)
tf_py_test(
@ -3828,4 +3829,5 @@ cuda_py_test(
"//tensorflow/python:linalg_ops",
],
shard_count = 10,
xla_enable_strict_auto_jit = True,
)

View File

@ -187,4 +187,5 @@ cuda_py_test(
"//tensorflow/python:platform",
"//tensorflow/python:random_ops",
],
xla_enable_strict_auto_jit = True,
)

View File

@ -397,6 +397,7 @@ cuda_py_test(
],
shard_count = 10,
tags = ["no_mac"], # TODO(b/124822121): Re-enable this test.
xla_enable_strict_auto_jit = True,
)
tf_py_test(