Clean up a few disable_tfrt annotations.

PiperOrigin-RevId: 334224724
Change-Id: I63d1c72fb9977abb54669e5154259684d2f920c7
This commit is contained in:
Xiao Yu 2020-09-28 13:20:33 -07:00 committed by TensorFlower Gardener
parent 16c7ece1fd
commit d5f13af5f5
9 changed files with 36 additions and 145 deletions

View File

@ -108,16 +108,12 @@ class ResNet50Test(tf.test.TestCase):
def test_apply(self):
self._apply(defun=False)
@test_util.disable_tfrt(
'TFE_ContextGetExecutorForThread not implemented b/156188669')
def test_apply_async(self):
self._apply(defun=False, execution_mode=context.ASYNC)
@test_util.disable_tfrt('Graph is not supported yet. b/156187905')
def test_apply_with_defun(self):
self._apply(defun=True)
@test_util.disable_tfrt('Graph is not supported yet. b/156187905')
def test_apply_with_defun_async(self):
self._apply(defun=True, execution_mode=context.ASYNC)

View File

@ -322,7 +322,6 @@ class MicroBenchmarks(benchmarks_test_base.MicroBenchmarksBase):
func = lambda: math_ops.multiply(m, m)
self._run(func, num_iters)
@test_util.disable_tfrt("numpy() not supported")
def benchmark_np_multiply(self):
self._benchmark_np_multiply(self._m_2, 30000)
@ -331,7 +330,6 @@ class MicroBenchmarks(benchmarks_test_base.MicroBenchmarksBase):
m = self._m_2.cpu()
self._benchmark_tf_multiply(m, 30000)
@test_util.disable_tfrt("copy to GPU not supported")
def benchmark_tf_multiply_GPU(self):
if not context.num_gpus():
return
@ -344,7 +342,6 @@ class MicroBenchmarks(benchmarks_test_base.MicroBenchmarksBase):
m = self._m_2.cpu()
self._benchmark_tf_multiply_op(m, 30000)
@test_util.disable_tfrt("copy to GPU not supported")
def benchmark_tf_multiply_op_GPU(self):
if not context.num_gpus():
return
@ -358,7 +355,6 @@ class MicroBenchmarks(benchmarks_test_base.MicroBenchmarksBase):
m2 = self._m_1_3_3_1.cpu()
self._benchmark_tf_conv2d(m1, m2, 30000)
@test_util.disable_tfrt("copy to GPU not supported")
def benchmark_tf_conv2d_GPU(self):
if not context.num_gpus():
return
@ -371,7 +367,6 @@ class MicroBenchmarks(benchmarks_test_base.MicroBenchmarksBase):
m = self._m_2
self._run(lambda: gen_array_ops.identity(m), 30000)
@test_util.disable_tfrt("identity not supported")
def benchmark_slowpath_tf_identity(self):
self._run(lambda: gen_array_ops.identity(1), 30000)
@ -386,7 +381,6 @@ class MicroBenchmarks(benchmarks_test_base.MicroBenchmarksBase):
self._run(f, 30000)
@test_util.disable_tfrt("identity not supported")
def benchmark_tf_gradient_function_identity(self):
with context.device(CPU):
m = gen_array_ops.identity(self._m_2)
@ -394,14 +388,12 @@ class MicroBenchmarks(benchmarks_test_base.MicroBenchmarksBase):
lambda: backprop.gradients_function(gen_array_ops.identity, [0])(m),
30000)
@test_util.disable_tfrt("identity not supported")
def benchmark_tf_gradient_forward_identity(self):
with backprop.GradientTape() as tape:
m = self._m_2
tape.watch(m)
self._run(lambda: gen_array_ops.identity(m), 30000)
@test_util.disable_tfrt("gradients not supported")
def benchmark_tf_gradient_tape_push_pop(self):
def f():
@ -410,7 +402,6 @@ class MicroBenchmarks(benchmarks_test_base.MicroBenchmarksBase):
self._run(f, 30000)
@test_util.disable_tfrt("gradients not supported")
def benchmark_tf_gradient_function_no_op(self):
with context.device(CPU):
m = gen_array_ops.identity(self._m_2)
@ -558,7 +549,6 @@ class MicroBenchmarks(benchmarks_test_base.MicroBenchmarksBase):
self._benchmark_tf_matmul(
m, transpose_b=False, num_iters=self._num_iters_2_by_2)
@test_util.disable_tfrt("async not supported")
def benchmark_tf_matmul_2_by_2_CPU_async(self):
with context.device(CPU):
m = self._m_2_by_2.cpu()
@ -604,13 +594,11 @@ class MicroBenchmarks(benchmarks_test_base.MicroBenchmarksBase):
self._benchmark_defun_matmul_relaxed_shape(
m, num_iters=self._num_iters_2_by_2)
@test_util.disable_tfrt("Graph is not supported yet. b/156187905")
def benchmark_defun_args_matmul_2_by_2_CPU(self):
with context.device(CPU):
m = self._m_2_by_2.cpu()
self._benchmark_defun_args_matmul(m, num_iters=self._num_iters_2_by_2)
@test_util.disable_tfrt("async not supported")
def benchmark_defun_matmul_2_by_2_CPU_async(self):
with context.device(CPU):
m = self._m_2_by_2.cpu()
@ -628,7 +616,6 @@ class MicroBenchmarks(benchmarks_test_base.MicroBenchmarksBase):
m, transpose_b=False, num_iters=self._num_iters_2_by_2)
def_function.run_functions_eagerly(False)
@test_util.disable_tfrt("async not supported")
def _benchmark_matmul_forward_backward_2_by_2_CPU_async(
self, run_eager=False):
def_function.run_functions_eagerly(run_eager)
@ -643,14 +630,12 @@ class MicroBenchmarks(benchmarks_test_base.MicroBenchmarksBase):
def benchmark_defun_matmul_forward_backward_2_by_2_CPU(self):
self._benchmark_matmul_forward_backward_2_by_2_CPU(False)
@test_util.disable_tfrt("async not supported")
def benchmark_defun_matmul_forward_backward_2_by_2_CPU_async(self):
self._benchmark_matmul_forward_backward_2_by_2_CPU_async(False)
def benchmark_defun_eager_matmul_forward_backward_2_by_2_CPU(self):
self._benchmark_matmul_forward_backward_2_by_2_CPU(True)
@test_util.disable_tfrt("async not supported")
def benchmark_defun_eager_matmul_forward_backward_2_by_2_CPU_async(self):
self._benchmark_matmul_forward_backward_2_by_2_CPU_async(True)
@ -662,7 +647,6 @@ class MicroBenchmarks(benchmarks_test_base.MicroBenchmarksBase):
self._benchmark_tf_matmul(
m, transpose_b=False, num_iters=self._num_iters_2_by_2)
@test_util.disable_tfrt("async not supported")
def benchmark_tf_matmul_2_by_2_GPU_async(self):
if not context.num_gpus():
return
@ -674,7 +658,6 @@ class MicroBenchmarks(benchmarks_test_base.MicroBenchmarksBase):
num_iters=self._num_iters_2_by_2,
execution_mode=context.ASYNC)
@test_util.disable_tfrt("copy to GPU not supported")
def benchmark_gen_math_ops_matmul_2_by_2_GPU(self):
if not context.num_gpus():
return
@ -683,7 +666,6 @@ class MicroBenchmarks(benchmarks_test_base.MicroBenchmarksBase):
self._benchmark_gen_math_ops_matmul(
m, transpose_b=False, num_iters=self._num_iters_2_by_2)
@test_util.disable_tfrt("copy to GPU not supported")
def benchmark_tfe_py_execute_matmul_2_by_2_GPU(self):
if not context.num_gpus():
return
@ -692,7 +674,6 @@ class MicroBenchmarks(benchmarks_test_base.MicroBenchmarksBase):
self._benchmark_tfe_py_execute_matmul(
m, transpose_b=False, num_iters=self._num_iters_2_by_2)
@test_util.disable_tfrt("copy to GPU not supported")
def benchmark_defun_matmul_2_by_2_GPU(self):
if not context.num_gpus():
return
@ -701,7 +682,6 @@ class MicroBenchmarks(benchmarks_test_base.MicroBenchmarksBase):
self._benchmark_defun_matmul(
m, transpose_b=False, num_iters=self._num_iters_2_by_2)
@test_util.disable_tfrt("copy to GPU not supported")
def benchmark_defun_matmul_2_by_2_with_signature_GPU(self):
if not context.num_gpus():
return
@ -710,7 +690,6 @@ class MicroBenchmarks(benchmarks_test_base.MicroBenchmarksBase):
self._benchmark_defun_matmul_with_signature(
m, num_iters=self._num_iters_2_by_2)
@test_util.disable_tfrt("copy to GPU not supported")
def benchmark_defun_matmul_2_by_2_relaxed_shape_GPU(self):
if not context.num_gpus():
return
@ -719,7 +698,6 @@ class MicroBenchmarks(benchmarks_test_base.MicroBenchmarksBase):
self._benchmark_defun_matmul_relaxed_shape(
m, num_iters=self._num_iters_2_by_2)
@test_util.disable_tfrt("Graph is not supported yet. b/156187905")
def benchmark_defun_args_matmul_2_by_2_GPU(self):
if not context.num_gpus():
return
@ -727,7 +705,6 @@ class MicroBenchmarks(benchmarks_test_base.MicroBenchmarksBase):
m = self._m_2_by_2.gpu()
self._benchmark_defun_args_matmul(m, num_iters=self._num_iters_2_by_2)
@test_util.disable_tfrt("async not supported")
def benchmark_defun_matmul_2_by_2_GPU_async(self):
if not context.num_gpus():
return
@ -757,7 +734,6 @@ class MicroBenchmarks(benchmarks_test_base.MicroBenchmarksBase):
self._benchmark_tf_matmul(
m, transpose_b=True, num_iters=self._num_iters_100_by_784)
@test_util.disable_tfrt("async not supported")
def benchmark_tf_matmul_100_by_784_CPU_async(self):
with context.device(CPU):
m = self._m_100_by_784.cpu()
@ -779,7 +755,6 @@ class MicroBenchmarks(benchmarks_test_base.MicroBenchmarksBase):
self._benchmark_tfe_py_fastpath_execute_matmul(
m, transpose_b=True, num_iters=self._num_iters_100_by_784)
@test_util.disable_tfrt("copy to GPU not supported")
def benchmark_tfe_py_execute_matmul_100_by_784_CPU(self):
with context.device(CPU):
m = self._m_100_by_784.cpu()
@ -792,7 +767,6 @@ class MicroBenchmarks(benchmarks_test_base.MicroBenchmarksBase):
self._benchmark_defun_matmul(
m, transpose_b=True, num_iters=self._num_iters_100_by_784)
@test_util.disable_tfrt("copy to GPU not supported")
def benchmark_tf_matmul_100_by_784_GPU(self):
if not context.num_gpus():
return
@ -801,7 +775,6 @@ class MicroBenchmarks(benchmarks_test_base.MicroBenchmarksBase):
self._benchmark_tf_matmul(
m, transpose_b=True, num_iters=self._num_iters_100_by_784)
@test_util.disable_tfrt("async not supported")
def benchmark_tf_matmul_100_by_784_GPU_async(self):
if not context.num_gpus():
return
@ -813,7 +786,6 @@ class MicroBenchmarks(benchmarks_test_base.MicroBenchmarksBase):
num_iters=self._num_iters_100_by_784,
execution_mode=context.ASYNC)
@test_util.disable_tfrt("copy to GPU not supported")
def benchmark_gen_math_ops_matmul_100_by_784_GPU(self):
if not context.num_gpus():
return
@ -822,7 +794,6 @@ class MicroBenchmarks(benchmarks_test_base.MicroBenchmarksBase):
self._benchmark_gen_math_ops_matmul(
m, transpose_b=True, num_iters=self._num_iters_100_by_784)
@test_util.disable_tfrt("copy to GPU not supported")
def benchmark_tfe_py_execute_matmul_100_by_784_GPU(self):
if not context.num_gpus():
return
@ -831,7 +802,6 @@ class MicroBenchmarks(benchmarks_test_base.MicroBenchmarksBase):
self._benchmark_tfe_py_execute_matmul(
m, transpose_b=True, num_iters=self._num_iters_100_by_784)
@test_util.disable_tfrt("copy to GPU not supported")
def benchmark_defun_matmul_100_by_784_GPU(self):
if not context.num_gpus():
return
@ -840,7 +810,8 @@ class MicroBenchmarks(benchmarks_test_base.MicroBenchmarksBase):
self._benchmark_defun_matmul(
m, transpose_b=True, num_iters=self._num_iters_100_by_784)
@test_util.disable_tfrt("copy to GPU not supported")
@test_util.disable_tfrt(
"b/169371527: Support inserting transfer op in lowering.")
def benchmark_nested_defun_matmul_100_by_784_GPU(self):
m = self._m_100_by_784.gpu()
self._benchmark_nested_defun_matmul(
@ -952,47 +923,45 @@ class MicroBenchmarks(benchmarks_test_base.MicroBenchmarksBase):
func = lambda: math_ops.reduce_logsumexp(x)
self._run(func, 3000, execution_mode=execution_mode)
@test_util.disable_tfrt("reduce logsumexp not supported")
@test_util.disable_tfrt("b/169371018: Support ScalarHost in RTFB.")
def benchmark_tf_reduce_logsumexp_CPU(self):
self._benchmark_tf_reduce_logsumexp()
@test_util.disable_tfrt("reduce logsumexp not supported")
@test_util.disable_tfrt("b/169371018: Support ScalarHost in RTFB.")
def benchmark_tf_reduce_logsumexp_CPU_async(self):
self._benchmark_tf_reduce_logsumexp(execution_mode=context.ASYNC)
@test_util.disable_tfrt("reduce logsumexp not supported")
@test_util.disable_tfrt("b/169371018: Support ScalarHost in RTFB.")
def benchmark_tf_reduce_logsumexp_GPU(self):
self._benchmark_tf_reduce_logsumexp(device=GPU)
@test_util.disable_tfrt("reduce logsumexp not supported")
@test_util.disable_tfrt("b/169371018: Support ScalarHost in RTFB.")
def benchmark_tf_reduce_logsumexp_GPU_async(self):
self._benchmark_tf_reduce_logsumexp(device=GPU,
execution_mode=context.ASYNC)
@test_util.disable_tfrt("reduce logsumexp not supported")
@test_util.disable_tfrt(
"b/169371527: Support inserting transfer op in lowering.")
def benchmark_tf_reduce_logsumexp_CPU_defunc(self):
self._benchmark_tf_reduce_logsumexp(defunc=True)
@test_util.disable_tfrt("reduce logsumexp not supported")
@test_util.disable_tfrt(
"b/169371527: Support inserting transfer op in lowering.")
def benchmark_tf_reduce_logsumexp_CPU_async_defun(self):
self._benchmark_tf_reduce_logsumexp(
execution_mode=context.ASYNC, defunc=True)
@test_util.disable_tfrt("reduce logsumexp not supported")
def benchmark_tf_reduce_logsumexp_GPU_defun(self):
self._benchmark_tf_reduce_logsumexp(device=GPU, defunc=True)
@test_util.disable_tfrt("reduce logsumexp not supported")
def benchmark_tf_reduce_logsumexp_GPU_async_defun(self):
self._benchmark_tf_reduce_logsumexp(
device=GPU, execution_mode=context.ASYNC, defunc=True)
@test_util.disable_tfrt("reduce logsumexp not supported")
def benchmark_tf_reduce_logsumexp_GPU_defun_compile(self):
self._benchmark_tf_reduce_logsumexp(
device=GPU, defunc=True, xla_compile=True)
@test_util.disable_tfrt("reduce logsumexp not supported")
def benchmark_tf_reduce_logsumexp_GPU_async_defun_compile(self):
self._benchmark_tf_reduce_logsumexp(
device=GPU, execution_mode=context.ASYNC, defunc=True, xla_compile=True)
@ -1004,19 +973,15 @@ class MicroBenchmarks(benchmarks_test_base.MicroBenchmarksBase):
func = lambda: math_ops.tensordot(a, b, [[1], [0]])
self._run(func, 30000, execution_mode=execution_mode)
@test_util.disable_tfrt("tensordot not supported")
def benchmark_tf_tensordot_CPU(self):
self._benchmark_tf_tensordot()
@test_util.disable_tfrt("tensordot not supported")
def benchmark_tf_tensordot_CPU_async(self):
self._benchmark_tf_tensordot(execution_mode=context.ASYNC)
@test_util.disable_tfrt("tensordot not supported")
def benchmark_tf_tensordot_GPU(self):
self._benchmark_tf_tensordot(device=GPU)
@test_util.disable_tfrt("tensordot not supported")
def benchmark_tf_tensordot_GPU_async(self):
self._benchmark_tf_tensordot(device=GPU, execution_mode=context.ASYNC)
@ -1025,63 +990,48 @@ class MicroBenchmarks(benchmarks_test_base.MicroBenchmarksBase):
func = lambda: array_ops.zeros(shape, dtype)
self._run(func, 3000)
@test_util.disable_tfrt("context.device not supported")
def benchmark_tf_zeros_2_by_2_float32_CPU(self):
self._benchmark_tf_zeros((2, 2), dtypes.float32)
@test_util.disable_tfrt("context.device not supported")
def benchmark_tf_zeros_2_by_2_bool_CPU(self):
self._benchmark_tf_zeros((2, 2), dtypes.bool)
@test_util.disable_tfrt("context.device not supported")
def benchmark_tf_zeros_2_by_2_string_CPU(self):
self._benchmark_tf_zeros((2, 2), dtypes.string)
@test_util.disable_tfrt("context.device not supported")
def benchmark_tf_zeros_2_by_2_float32_GPU(self):
self._benchmark_tf_zeros((2, 2), dtypes.float32, device=GPU)
@test_util.disable_tfrt("context.device not supported")
def benchmark_tf_zeros_2_by_2_bool_GPU(self):
self._benchmark_tf_zeros((2, 2), dtypes.bool, device=GPU)
@test_util.disable_tfrt("context.device not supported")
def benchmark_tf_zeros_30_by_30_float32_CPU(self):
self._benchmark_tf_zeros((30, 30), dtypes.float32)
@test_util.disable_tfrt("context.device not supported")
def benchmark_tf_zeros_30_by_30_bool_CPU(self):
self._benchmark_tf_zeros((30, 30), dtypes.bool)
@test_util.disable_tfrt("context.device not supported")
def benchmark_tf_zeros_30_by_30_string_CPU(self):
self._benchmark_tf_zeros((30, 30), dtypes.string)
@test_util.disable_tfrt("context.device not supported")
def benchmark_tf_zeros_30_by_30_float32_GPU(self):
self._benchmark_tf_zeros((30, 30), dtypes.float32, device=GPU)
@test_util.disable_tfrt("context.device not supported")
def benchmark_tf_zeros_30_by_30_bool_GPU(self):
self._benchmark_tf_zeros((30, 30), dtypes.bool, device=GPU)
@test_util.disable_tfrt("context.device not supported")
def benchmark_tf_zeros_100_by_100_float32_CPU(self):
self._benchmark_tf_zeros((100, 100), dtypes.float32)
@test_util.disable_tfrt("context.device not supported")
def benchmark_tf_zeros_100_by_100_bool_CPU(self):
self._benchmark_tf_zeros((100, 100), dtypes.bool)
@test_util.disable_tfrt("context.device not supported")
def benchmark_tf_zeros_100_by_100_string_CPU(self):
self._benchmark_tf_zeros((100, 100), dtypes.string)
@test_util.disable_tfrt("context.device not supported")
def benchmark_tf_zeros_100_by_100_float32_GPU(self):
self._benchmark_tf_zeros((100, 100), dtypes.float32, device=GPU)
@test_util.disable_tfrt("context.device not supported")
def benchmark_tf_zeros_100_by_100_bool_GPU(self):
self._benchmark_tf_zeros((100, 100), dtypes.bool, device=GPU)
@ -1180,7 +1130,6 @@ class MicroBenchmarks(benchmarks_test_base.MicroBenchmarksBase):
m = self._m_2_by_2.cpu()
self._benchmark_transpose(m, num_iters=self._num_iters_2_by_2)
@test_util.disable_tfrt("copy to GPU not supported")
def benchmark_tf_transpose_2_by_2_GPU(self):
with context.device(GPU):
m = self._m_2_by_2.gpu()
@ -1191,7 +1140,6 @@ class MicroBenchmarks(benchmarks_test_base.MicroBenchmarksBase):
m = resource_variable_ops.ResourceVariable(self._m_2_by_2)
self._benchmark_transpose(m, num_iters=self._num_iters_2_by_2)
@test_util.disable_tfrt("Cannot convert array to EagerTensor of dtype int32")
def benchmark_tf_transpose_variable_2_by_2_GPU(self):
with context.device(GPU):
m = resource_variable_ops.ResourceVariable(self._m_2_by_2)
@ -1261,7 +1209,6 @@ class MicroBenchmarks(benchmarks_test_base.MicroBenchmarksBase):
m = resource_variable_ops.ResourceVariable(self._m_2_by_2)
self._benchmark_read_variable(m, num_iters=self._num_iters_2_by_2)
@test_util.disable_tfrt("copy to GPU not supported")
def benchmark_read_variable_op_2_by_2_GPU(self):
if not context.num_gpus():
return
@ -1275,7 +1222,6 @@ class MicroBenchmarks(benchmarks_test_base.MicroBenchmarksBase):
self._benchmark_read_variable_with_tape(
m, num_iters=self._num_iters_2_by_2)
@test_util.disable_tfrt("copy to GPU not supported")
def benchmark_read_variable_op_with_tape_2_by_2_GPU(self):
if not context.num_gpus():
return
@ -1293,8 +1239,7 @@ class MicroBenchmarks(benchmarks_test_base.MicroBenchmarksBase):
self._run(scan, 100)
@test_util.disable_tfrt(
"tf.While not supported in TF to CoreRT lowing. b/162685874")
@test_util.disable_tfrt("tf.While not supported RTFB tensor. b/169374895")
def benchmarkScanDefun(self):
elems = math_ops.range(1600)
@ -1361,12 +1306,10 @@ class MicroBenchmarks(benchmarks_test_base.MicroBenchmarksBase):
def benchmark_convert_numpy_float_uncached(self):
self._benchmark_convert_constant(np.array(42.0), cached=False)
@test_util.disable_tfrt("convert to tensor not supported")
def benchmark_convert_3x_list_to_tensor(self):
xs = [1, 2, 3]
self._run(lambda: ops.convert_to_tensor(xs), 1000)
@test_util.disable_tfrt("convert to tensor not supported")
def benchmark_convert_3x_array_to_tensor(self):
xs = np.array([1, 2, 3], dtype=np.int32)
self._run(lambda: ops.convert_to_tensor(xs), 1000)
@ -1375,7 +1318,6 @@ class MicroBenchmarks(benchmarks_test_base.MicroBenchmarksBase):
xs = [[0] * 2] * 40
self._run(lambda: constant_op.constant(xs), 1000)
@test_util.disable_tfrt("convert to tensor not supported")
def benchmark_constant_40x2_array_to_tensor(self):
xs = np.array([[0] * 2] * 40, dtype=np.int32)
self._run(lambda: constant_op.constant(xs), 1000)
@ -1454,15 +1396,12 @@ class MicroBenchmarks(benchmarks_test_base.MicroBenchmarksBase):
with context.device(CPU):
self._run(benchmark_fn, 10)
@test_util.disable_tfrt("Graph is not supported yet. b/156187905")
def benchmarkTenThousandResourceReadsInCondInInnerFunc(self):
self._benchmarkResourceReadsInCondInInnerFunc(10000)
@test_util.disable_tfrt("Graph is not supported yet. b/156187905")
def benchmarkHundredResourceReadsInCondInInnerFunc(self):
self._benchmarkResourceReadsInCondInInnerFunc(100)
@test_util.disable_tfrt("Graph is not supported yet. b/156187905")
def benchmarkTenResourceReadsInCondInInnerFunc(self):
self._benchmarkResourceReadsInCondInInnerFunc(10)

View File

@ -324,7 +324,7 @@ class TFETest(test_util.TensorFlowTestCase):
else:
ops.disable_tensor_equality()
@test_util.disable_tfrt('Async execution mode not supported in TFRT.')
@test_util.disable_tfrt('Get execution mode not supported in TFRT.')
def testContext(self):
ctx = context.Context()
self.assertTrue(ctx.executing_eagerly())
@ -384,7 +384,6 @@ class TFETest(test_util.TensorFlowTestCase):
with ctx.device(device_spec):
self.assertEqual(device_name, ctx.device_name)
@test_util.disable_tfrt('Async execution mode not supported in TFRT.')
def testAsyncBasic(self):
ctx = context.Context(execution_mode=context.ASYNC)
ctx.ensure_initialized()
@ -498,8 +497,6 @@ class TFETest(test_util.TensorFlowTestCase):
cpu.__exit__()
@test_util.run_gpu_only
@test_util.disable_tfrt('Device name incorrect (known issue for runtime '
'fallback).')
def testReEntrant(self):
cpu = context.device('cpu:0')
gpu = context.device('gpu:0')
@ -546,7 +543,6 @@ class TFETest(test_util.TensorFlowTestCase):
x.gpu(context.context().num_gpus() + 1)
@test_util.run_gpu_only
@test_util.disable_tfrt('Async execution mode not supported in TFRT.')
def testCopyBetweenDevicesAsync(self):
with context.execution_mode(context.ASYNC):
x = constant_op.constant([[1., 2.], [3., 4.]])
@ -563,7 +559,6 @@ class TFETest(test_util.TensorFlowTestCase):
context.context().executor.clear_error()
@test_util.run_gpu_only
@test_util.disable_tfrt('Device placement policy not configurable yet.')
def testCopyScope(self):
constant = constant_op.constant(1.0)
with ops.device('gpu:0'):
@ -586,7 +581,7 @@ class TFETest(test_util.TensorFlowTestCase):
self.assertAllEqual(test_fn(test_var), 1.0)
@test_util.disable_tfrt('Async execution mode not supported in TFRT.')
@test_util.disable_tfrt('PyFunc is not supported in TFRT.')
def testPyFunctionAsync(self):
def simple_fn(v):
@ -632,7 +627,6 @@ class TFETest(test_util.TensorFlowTestCase):
attrs=('T', three.dtype.as_datatype_enum))[0]
self.assertAllEqual(15, product)
@test_util.disable_tfrt('Async execution mode not supported in TFRT.')
def testExecuteBasicAsync(self):
with context.execution_mode(context.ASYNC):
three = constant_op.constant(3)
@ -1053,8 +1047,6 @@ class TFETest(test_util.TensorFlowTestCase):
for t in threads:
t.join()
@test_util.disable_tfrt('Does not support converting DT_RESOURCE'
'to op attr type yet.')
def testEmptyResourceReturned(self):
with ops.device('CPU:0'):
v = variables.Variable(1.)

View File

@ -70,7 +70,6 @@ class DefFunctionTest(test.TestCase, parameterized.TestCase):
self.assertAllEqual(fn(constant_op.constant(4.0)), 8.0)
@test_util.disable_tfrt('Variable argument is not supported')
def testFailIfVariablesAreCreatedMoreThanOnce(self):
@def_function.function
@ -80,7 +79,6 @@ class DefFunctionTest(test.TestCase, parameterized.TestCase):
with self.assertRaises(ValueError):
fn(1.0)
@test_util.disable_tfrt('Variable argument is not supported')
def testFailIfVariablesAreCreatedMoreThanOnceNoWeakRef(self):
state = []
@ -100,7 +98,6 @@ class DefFunctionTest(test.TestCase, parameterized.TestCase):
self.assertAllEqual(f(range(5)), 1.0)
@test_util.disable_tfrt('Variable argument is not supported')
def testCorrectVariableCreation(self):
state = []
@ -114,7 +111,6 @@ class DefFunctionTest(test.TestCase, parameterized.TestCase):
self.assertAllEqual(fn(constant_op.constant(1.0)), 2.0)
self.assertAllEqual(fn(constant_op.constant(3.0)), 6.0)
@test_util.disable_tfrt('Variable argument is not supported')
def testFunctionInitializer(self):
state = []
@ -127,7 +123,6 @@ class DefFunctionTest(test.TestCase, parameterized.TestCase):
self.assertAllEqual(fn(constant_op.constant(1.0)), 2.0)
@test_util.disable_tfrt('Variable argument is not supported')
def testFunctionMultipleVariableInitializer(self):
state = []
@ -141,7 +136,6 @@ class DefFunctionTest(test.TestCase, parameterized.TestCase):
self.assertAllEqual(fn(constant_op.constant(1.0)), [2.0, 5.0])
@test_util.disable_tfrt('Variable argument is not supported')
def testFunctionInitializationFunction(self):
state = []
@ -159,7 +153,7 @@ class DefFunctionTest(test.TestCase, parameterized.TestCase):
init_fn()
self.assertEqual(state[0].numpy(), 2.0)
@test_util.disable_tfrt('Variable argument is not supported')
@test_util.disable_tfrt('Error in native condition op.')
def testVariableInitializerNotConstant(self):
state = []
@ -189,7 +183,6 @@ class DefFunctionTest(test.TestCase, parameterized.TestCase):
self.assertAllEqual(sess.run(state[0]), 2.0)
self.assertAllEqual(self.evaluate(result), 6.0)
@test_util.disable_tfrt('Variable argument is not supported')
def testLegacyGraphModeVariablesNonTrivialInitializer(self):
with ops.Graph().as_default(), self.test_session() as sess:
state = []
@ -209,7 +202,6 @@ class DefFunctionTest(test.TestCase, parameterized.TestCase):
self.assertAllEqual(sess.run(state[0]), 6.0)
self.assertAllEqual(self.evaluate(result), 18.0)
@test_util.disable_tfrt('Variable argument is not supported')
def testLegacyGraphModeInputDependentInitializerFails(self):
with ops.Graph().as_default():
state = []
@ -224,7 +216,6 @@ class DefFunctionTest(test.TestCase, parameterized.TestCase):
r'transitively.* mul .* x'):
fn(constant_op.constant(3.0))
@test_util.disable_tfrt('Variable argument is not supported')
def testMethod(self):
class MyModel(object):
@ -253,7 +244,6 @@ class DefFunctionTest(test.TestCase, parameterized.TestCase):
def_function.function(functools.partial(lambda x, y: x + y, 1.))(
constant_op.constant(2.)))
@test_util.disable_tfrt('Partial is not supported')
def test_functools_partial_new_default(self):
def f(x=3, y=7):
return x + y
@ -262,7 +252,6 @@ class DefFunctionTest(test.TestCase, parameterized.TestCase):
self.assertEqual(func().numpy(), 9)
self.assertEqual(func(y=8).numpy(), 11)
@test_util.disable_tfrt('Partial is not supported')
def test_functools_partial_keywords(self):
def f(x, y):
return x + y
@ -271,7 +260,6 @@ class DefFunctionTest(test.TestCase, parameterized.TestCase):
functools.partial(f, x=array_ops.zeros([1]), y=array_ops.zeros([1])))
self.assertAllEqual(func(), [0.0])
@test_util.disable_tfrt('Partial is not supported')
def test_functools_partial_single_positional(self):
def f(x, y):
return x + y
@ -280,7 +268,6 @@ class DefFunctionTest(test.TestCase, parameterized.TestCase):
functools.partial(f, constant_op.constant(1)))
self.assertAllEqual(func(5), 6)
@test_util.disable_tfrt('Partial is not supported')
def test_complicated_partial_with_defaults(self):
def identity(*args):
@ -328,7 +315,6 @@ class DefFunctionTest(test.TestCase, parameterized.TestCase):
(tensor_spec.TensorSpec(
None, dtypes.float32, name='x'),))
@test_util.disable_tfrt('Variable argument is not supported')
@test_util.run_in_graph_and_eager_modes
def test_variable_naming(self):
class HasVars(module.Module):
@ -399,7 +385,7 @@ class DefFunctionTest(test.TestCase, parameterized.TestCase):
'defined in another function or code block'):
f(array_ops.zeros(shape=(8, 42, 3)))
@test_util.disable_tfrt('Control flow is not supported')
@test_util.disable_tfrt('b/169375363: error code support')
def testRuntimeErrorNotSticky(self):
@def_function.function
@ -504,7 +490,6 @@ class DefFunctionTest(test.TestCase, parameterized.TestCase):
constant_op.constant(3.),
constant_op.constant(4.)))
@test_util.disable_tfrt('Variable argument is not supported')
def testVariableCreatorScope(self):
created_variables = []
captured_variables = []
@ -524,7 +509,6 @@ class DefFunctionTest(test.TestCase, parameterized.TestCase):
f()
self.assertEqual(created_variables, captured_variables)
@test_util.disable_tfrt('Variable argument is not supported')
def testVarAlreadyInitializedNoClobbering(self):
v_holder = []
@ -542,7 +526,6 @@ class DefFunctionTest(test.TestCase, parameterized.TestCase):
add_var.get_concrete_function(constant_op.constant(2.))
self.assertAllClose([13., 14.], add_var(constant_op.constant(2.)))
@test_util.disable_tfrt('Variable argument is not supported')
def testSameVariableTwice(self):
v = variables.Variable(1.0)
@ -552,7 +535,6 @@ class DefFunctionTest(test.TestCase, parameterized.TestCase):
self.assertAllEqual(add(v, v), 2.0)
@test_util.disable_tfrt('Variable argument is not supported')
def testVariableUpdate(self):
v1 = variables.Variable(1.0)
v2 = variables.Variable(2.0)
@ -604,7 +586,6 @@ class DefFunctionTest(test.TestCase, parameterized.TestCase):
self.assertIs(func_a, func_c)
@test_util.disable_tfrt('Nested function is not supported')
def testInitializationInNestedCall(self):
v_holder = []
@ -627,7 +608,6 @@ class DefFunctionTest(test.TestCase, parameterized.TestCase):
v_holder[1].assign(11.)
self.assertAllClose([14., 15.], wrapper(constant_op.constant(2.)))
@test_util.disable_tfrt('Variable argument is not supported')
@test_util.run_gpu_only
def testDeviceAnnotationRespected(self):
a = []
@ -647,7 +627,6 @@ class DefFunctionTest(test.TestCase, parameterized.TestCase):
create_variable()
self.assertRegex(a[0].device, 'CPU')
@test_util.disable_tfrt('Variable argument is not supported')
@test_util.run_gpu_only
def testDeviceAnnotationForInitializerRespected(self):
a = []
@ -685,8 +664,7 @@ class DefFunctionTest(test.TestCase, parameterized.TestCase):
(True, False), # compile
(True, False), # override_function
))
@test_util.disable_tfrt('b/168618526: design proper method to copy tensors'
'for function.')
def testClone(self, input_signature, autograph, autograph_options, implements,
relax_shapes, compile_, override_function):
original_py_function = lambda x: x
@ -724,7 +702,6 @@ class DefFunctionTest(test.TestCase, parameterized.TestCase):
self.assertEqual(self.evaluate(cloned(x)),
self.evaluate(cloned_py_function(x)))
@test_util.disable_tfrt('Variable argument is not supported')
def testLiftPlaceholderInitializedVariable(self):
with ops.Graph().as_default():
var_list = []
@ -769,8 +746,7 @@ class DefFunctionTest(test.TestCase, parameterized.TestCase):
(None, 'foo.bar'), # implements
(None, True, False), # relax_shapes
))
@test_util.disable_tfrt('b/168618526: design proper method to copy tensors'
'for function.')
def test_pickle(self, input_signature, autograph, autograph_options,
implements, relax_shapes):
"""@function objects can be pickled and unpickled."""
@ -880,7 +856,6 @@ class DefFunctionTest(test.TestCase, parameterized.TestCase):
self.assertLen(logs.output, 1)
self.assertIn('Tracing is expensive', logs.output[0])
@test_util.disable_tfrt('Nested function is not supported')
def test_frequent_retracing_warning_nested(self):
if sys.version_info[0] < 3:
self.skipTest('self.assertLogs() call is not available in Python 2.')

View File

@ -26,7 +26,6 @@ from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
@ -39,8 +38,6 @@ from tensorflow.python.platform import test
class ArgumentNamingTests(test.TestCase, parameterized.TestCase):
"""Tests for recognizable export signatures from concrete functions."""
@test_util.disable_tfrt('b/168618526: design proper method to copy tensors'
'for function.')
def testBasic(self, function_decorator):
@function_decorator
def fn(a, b):
@ -65,8 +62,6 @@ class ArgumentNamingTests(test.TestCase, parameterized.TestCase):
[3., 2.],
fn_op(a=constant_op.constant(1.), b=constant_op.constant(2.)))
@test_util.disable_tfrt('b/168618526: design proper method to copy tensors'
'for function.')
def testVariable(self, function_decorator):
@function_decorator
def fn(a, b):
@ -85,8 +80,6 @@ class ArgumentNamingTests(test.TestCase, parameterized.TestCase):
[inp.op.get_attr('_user_specified_name') for inp in fn_op.inputs])
self.assertEqual(2, len(fn_op.graph.structured_outputs))
@test_util.disable_tfrt('b/168618526: design proper method to copy tensors'
'for function.')
def testDictReturned(self, function_decorator):
@function_decorator
def fn(x, z=(1., 2.), y=3.):
@ -204,8 +197,6 @@ class ArgumentNamingTests(test.TestCase, parameterized.TestCase):
[b'y'],
[inp.op.get_attr('_user_specified_name') for inp in method_op2.inputs])
@test_util.disable_tfrt('b/168618526: design proper method to copy tensors'
'for function.')
def testVariadic(self, function_decorator):
@function_decorator
def variadic_fn(x, *args, **kwargs):
@ -228,8 +219,6 @@ class ArgumentNamingTests(test.TestCase, parameterized.TestCase):
[b'x', b'y', b'args_1', b'second_variadic', b'z', b'cust'],
[inp.op.get_attr('_user_specified_name') for inp in variadic_op.inputs])
@test_util.disable_tfrt('b/168618526: design proper method to copy tensors'
'for function.')
def testVariadicInputSignature(self, function_decorator):
@function_decorator(
input_signature=(

View File

@ -91,7 +91,6 @@ class ResourceTest(test_util.TensorFlowTestCase):
resources.shared_resources()).eval()), 0)
@test_util.disable_tfrt("Graph is not supported yet. b/156187905")
class TensorAndShapeTest(test_util.TensorFlowTestCase):
def testShape(self):
@ -327,6 +326,7 @@ class TensorAndShapeTest(test_util.TensorFlowTestCase):
self.assertAllEqual(z, [False, False, False, True])
@test_util.disable_tfrt("b/169375363: error code support")
@test_util.run_in_graph_and_eager_modes
def testBitwiseAndErrors(self):
x_int = constant_op.constant(0)
@ -368,6 +368,7 @@ class TensorAndShapeTest(test_util.TensorFlowTestCase):
self.assertAllEqual(z, [False, True, True, True])
@test_util.disable_tfrt("b/169375363: error code support")
@test_util.run_in_graph_and_eager_modes
def testBitwiseOrErrors(self):
x_int = constant_op.constant(0)
@ -409,6 +410,7 @@ class TensorAndShapeTest(test_util.TensorFlowTestCase):
self.assertAllEqual(z, [False, True, True, False])
@test_util.disable_tfrt("b/169375363: error code support")
@test_util.run_in_graph_and_eager_modes
def testBitwiseXorErrors(self):
x_int = constant_op.constant(0)
@ -448,6 +450,7 @@ class TensorAndShapeTest(test_util.TensorFlowTestCase):
self.assertAllEqual(y, [True, False])
@test_util.disable_tfrt("b/169375363: error code support")
@test_util.run_in_graph_and_eager_modes
def testBitwiseNotErrors(self):
if context.executing_eagerly(): # :(
@ -459,7 +462,6 @@ class TensorAndShapeTest(test_util.TensorFlowTestCase):
_ = ~constant_op.constant("a")
@test_util.disable_tfrt("Graph is not supported yet. b/156187905")
@test_util.run_all_in_graph_and_eager_modes
class IndexedSlicesTest(test_util.TensorFlowTestCase):
@ -504,7 +506,6 @@ class IndexedSlicesTest(test_util.TensorFlowTestCase):
self.assertAllEqual(x.indices, [0, 2])
@test_util.disable_tfrt("Graph is not supported yet. b/156187905")
@test_util.run_all_in_graph_and_eager_modes
class IndexedSlicesSpecTest(test_util.TensorFlowTestCase,
parameterized.TestCase):
@ -650,7 +651,6 @@ def _apply_op(g, *args, **kwargs):
return op.outputs
@test_util.disable_tfrt("Graph is not supported yet. b/156187905")
class OperationTest(test_util.TensorFlowTestCase):
@test_util.run_deprecated_v1
@ -1605,7 +1605,6 @@ class NameTest(test_util.TensorFlowTestCase):
g.create_op("FloatOutput", [], [dtypes.float32]).name)
@test_util.disable_tfrt("Device API are not supported yet. b/156188344")
class DeviceTest(test_util.TensorFlowTestCase):
def testNoDevice(self):
@ -2186,7 +2185,6 @@ class CollectionTest(test_util.TensorFlowTestCase):
# Collections are ordered.
self.assertEqual([90, 100], ops.get_collection("key"))
@test_util.disable_tfrt("Graph is not supported yet. b/156187905")
def test_defun(self):
with context.eager_mode():
@ -2293,7 +2291,6 @@ class ControlDependenciesTest(test_util.TensorFlowTestCase):
# e should be dominated by c.
self.assertEqual(e.op.control_inputs, [])
@test_util.disable_tfrt("Graph is not supported yet. b/156187905")
@test_util.run_in_graph_and_eager_modes
def testEager(self):
def future():
@ -2614,7 +2611,6 @@ class OpScopeTest(test_util.TensorFlowTestCase):
self._testGraphElements([a, variable, b])
@test_util.disable_tfrt("Graph is not supported yet. b/156187905")
class InitScopeTest(test_util.TensorFlowTestCase):
def testClearsControlDependencies(self):
@ -2916,7 +2912,6 @@ class InitScopeTest(test_util.TensorFlowTestCase):
self.assertFalse(self.evaluate(f()))
@test_util.disable_tfrt("Graph is not supported yet. b/156187905")
class GraphTest(test_util.TensorFlowTestCase):
def setUp(self):
@ -3405,7 +3400,6 @@ class ColocationGroupTest(test_util.TensorFlowTestCase):
self.assertEqual("/device:CPU:0", b.op.device)
f()
@test_util.disable_tfrt("Graph is not supported yet. b/156187905")
def testColocateWithVariableInFunction(self):
v = variables.Variable(1.)

View File

@ -940,7 +940,6 @@ class CondV2Test(test.TestCase):
self.assertEqual(fn_output[0].op.type, "StatefulPartitionedCall")
self.assertAllEqual(self.evaluate(fn_output), [2.0, 4.0])
@test_util.disable_tfrt("GPU to host copy not implemented yet.")
def testGradientTapeOfCondWithResourceVariableInFunction(self):
with context.eager_mode():
v = variables.Variable(2.)

View File

@ -57,8 +57,6 @@ from tensorflow.python.training import training_util
from tensorflow.python.util import compat
@test_util.disable_tfrt(
"Trying to assign variable with wrong dtype. b/156200342")
@test_util.with_control_flow_v2
class ResourceVariableOpsTest(test_util.TensorFlowTestCase,
parameterized.TestCase):
@ -105,6 +103,7 @@ class ResourceVariableOpsTest(test_util.TensorFlowTestCase,
v0 = resource_variable_ops.ResourceVariable(1.0)
self.assertAllEqual(v0.numpy(), 1.0)
@test_util.disable_tfrt("b/169375363: error code support")
def testReadVariableDtypeMismatchEager(self):
with context.eager_mode():
handle = resource_variable_ops.var_handle_op(
@ -200,6 +199,7 @@ class ResourceVariableOpsTest(test_util.TensorFlowTestCase,
value, _ = sess.run([v, v.assign_add(1.0)])
self.assertAllEqual(value, 0.0)
@test_util.disable_tfrt("b/169375363: error code support")
def testAssignVariableDtypeMismatchEager(self):
with context.eager_mode():
handle = resource_variable_ops.var_handle_op(
@ -336,7 +336,6 @@ class ResourceVariableOpsTest(test_util.TensorFlowTestCase,
g = gradients_impl.gradients(c, [b], unconnected_gradients="zero")[0]
self.assertAllEqual(g.shape.as_list(), [1, 2])
@test_util.disable_tfrt("Graph is not supported yet. b/156187905")
@test_util.run_deprecated_v1
def testGradientCondInWhileLoop(self):
v = resource_variable_ops.ResourceVariable(initial_value=1.0)
@ -751,6 +750,7 @@ class ResourceVariableOpsTest(test_util.TensorFlowTestCase,
self.assertEqual(v.handle.op.colocation_groups(),
v.initializer.inputs[1].op.colocation_groups())
@test_util.disable_tfrt("b/169375363: error code support")
def testCountUpTo(self):
with context.eager_mode():
v = resource_variable_ops.ResourceVariable(0, name="upto")
@ -758,6 +758,7 @@ class ResourceVariableOpsTest(test_util.TensorFlowTestCase,
with self.assertRaises(errors.OutOfRangeError):
v.count_up_to(1)
@test_util.disable_tfrt("b/169375363: error code support")
def testCountUpToFunction(self):
with context.eager_mode():
v = resource_variable_ops.ResourceVariable(0, name="upto")
@ -856,6 +857,7 @@ class ResourceVariableOpsTest(test_util.TensorFlowTestCase,
variable_def=other_v_def)
self.assertIsNotNone(other_v_prime._cached_value)
@test_util.disable_tfrt("b/169375363: error code support")
def testVariableDefInitializedInstances(self):
with ops.Graph().as_default(), self.cached_session():
v_def = resource_variable_ops.ResourceVariable(
@ -977,6 +979,7 @@ class ResourceVariableOpsTest(test_util.TensorFlowTestCase,
self.evaluate(assign_without_read)
self.assertEqual(0.0, self.evaluate(v.value()))
@test_util.disable_tfrt("b/169375363: error code support")
@test_util.run_in_graph_and_eager_modes
@test_util.run_v1_only("b/120545219")
def testDestroyResource(self):
@ -1003,6 +1006,7 @@ class ResourceVariableOpsTest(test_util.TensorFlowTestCase,
[assign],
feed_dict={placeholder: np.zeros(shape=[2, 2], dtype=np.float32)})
@test_util.disable_tfrt("b/169375363: error code support")
def testAssignDifferentShapesEagerNotAllowed(self):
with context.eager_mode():
with variable_scope.variable_scope("foo"):
@ -1013,7 +1017,6 @@ class ResourceVariableOpsTest(test_util.TensorFlowTestCase,
assign = var.assign(np.zeros(shape=[2, 2]))
self.evaluate(assign)
@test_util.disable_tfrt("Graph is not supported yet. b/156187905")
@test_util.disable_xla("XLA doesn't allow changing shape at assignment, as "
"dictated by tf2xla/xla_resource.cc:SetTypeAndShape")
@test_util.run_in_graph_and_eager_modes
@ -1066,6 +1069,7 @@ class ResourceVariableOpsTest(test_util.TensorFlowTestCase,
.batch_scatter_update(batch_slices2),
[[1, 3], [2, 3]])
@test_util.disable_tfrt("b/169375363: error code support")
@test_util.run_in_graph_and_eager_modes
def testInitValueWrongShape(self):
with self.assertRaisesWithPredicateMatch(
@ -1084,6 +1088,7 @@ class ResourceVariableOpsTest(test_util.TensorFlowTestCase,
self.assertEqual(v.dtype, w.dtype)
# TODO(alive): get caching to work in eager mode.
@test_util.disable_tfrt("b/169375363: error code support")
@test_util.run_deprecated_v1
def testCachingDevice(self):
with ops.device("/job:server/task:1"):
@ -1100,6 +1105,7 @@ class ResourceVariableOpsTest(test_util.TensorFlowTestCase,
with self.assertRaises(ValueError):
_ = w.value().op.get_attr("_class")
@test_util.disable_tfrt("b/169375363: error code support")
@test_util.run_deprecated_v1
def testSharedName(self):
with self.cached_session():
@ -1158,6 +1164,7 @@ class ResourceVariableOpsTest(test_util.TensorFlowTestCase,
v.initializer.run(feed_dict={v.initial_value: 3.0})
self.assertEqual(3.0, v.value().eval())
@test_util.disable_tfrt("b/169375363: error code support")
@test_util.run_v1_only("b/120545219")
def testControlFlowInitialization(self):
"""Expects an error if an initializer is in a control-flow scope."""
@ -1245,6 +1252,7 @@ class ResourceVariableOpsTest(test_util.TensorFlowTestCase,
self.assertEqual(1, v1.read_value().numpy())
self.assertEqual(2, v2.read_value().numpy())
@test_util.disable_tfrt("b/169375363: error code support")
def testDestruction(self):
with context.eager_mode():
var = resource_variable_ops.ResourceVariable(initial_value=1.0,
@ -1332,6 +1340,7 @@ class ResourceVariableOpsTest(test_util.TensorFlowTestCase,
state_ops.scatter_update(v, [1], [3])
self.assertAllEqual([1.0, 3.0], v.numpy())
@test_util.disable_tfrt("b/169375363: error code support")
@test_util.run_in_graph_and_eager_modes
def testScatterUpdateInvalidArgs(self):
v = resource_variable_ops.ResourceVariable([0, 1, 2, 3], name="update")
@ -1341,6 +1350,7 @@ class ResourceVariableOpsTest(test_util.TensorFlowTestCase,
with self.assertRaisesRegex(Exception, r"shape.*2.*3"):
state_ops.scatter_update(v, [0, 1], [0, 1, 2])
@test_util.disable_tfrt("b/169375363: error code support")
@test_util.run_in_graph_and_eager_modes
def testAssignIncompatibleShape(self):
v = resource_variable_ops.ResourceVariable([0, 1, 2, 3])
@ -1387,7 +1397,7 @@ class ResourceVariableOpsTest(test_util.TensorFlowTestCase,
# TODO(ebrevdo): Add run_in_graph_and_eager_modes once we can create
# EagerTensor constants with TensorProto inputs.
@test_util.disable_tfrt("Graph is not supported yet. b/156187905")
@test_util.disable_tfrt("Does not support tf.Const in lowering.")
@test_util.run_in_graph_and_eager_modes()
def testVariantInitializer(self):
variant_shape_and_type_data = self.create_variant_shape_and_type_data()

View File

@ -173,7 +173,6 @@ class VariablesTestCase(test.TestCase, parameterized.TestCase):
with self.assertRaisesRegex(ValueError, "shape.*and.*are incompatible"):
var.assign(np.zeros(shape=[2, 2]))
@test_util.disable_tfrt("Graph is not supported yet. b/156187905")
@test_util.run_in_graph_and_eager_modes
def testAssignDifferentShapesAllowed(self):
var = variables.Variable(np.zeros(shape=[1, 1]),
@ -183,8 +182,6 @@ class VariablesTestCase(test.TestCase, parameterized.TestCase):
self.evaluate(var.assign(np.zeros(shape=[2, 2])))
self.assertAllEqual(np.zeros(shape=[2, 2]), var.read_value())
@test_util.disable_tfrt("GetHostSize() is not expected to be called with "
"string type. b/156761465")
def testZeroSizeStringAssign(self):
with self.cached_session() as sess:
array = variables.VariableV1(