From 11799bd3c0c880063766dbed5c02ac386a7ce277 Mon Sep 17 00:00:00 2001 From: "A. Unique TensorFlower" Date: Wed, 27 Feb 2019 14:48:00 -0800 Subject: [PATCH] Makes a few more nightly v2 tests pass PiperOrigin-RevId: 235996764 --- tensorflow/python/client/timeline_test.py | 3 ++- tensorflow/python/client/virtual_gpu_test.py | 2 ++ .../python/grappler/layout_optimizer_test.py | 22 +++++++++++++++++++ .../profiler/internal/run_metadata_test.py | 2 ++ 4 files changed, 28 insertions(+), 1 deletion(-) diff --git a/tensorflow/python/client/timeline_test.py b/tensorflow/python/client/timeline_test.py index 61c0da01b83..e7d60de6905 100644 --- a/tensorflow/python/client/timeline_test.py +++ b/tensorflow/python/client/timeline_test.py @@ -57,7 +57,7 @@ class TimelineTest(test.TestCase): ctf = tl.generate_chrome_trace_format() self._validateTrace(ctf) - @test_util.run_deprecated_v1 + @test_util.deprecated_graph_mode_only def testTimelineCpu(self): run_options = config_pb2.RunOptions( trace_level=config_pb2.RunOptions.FULL_TRACE) @@ -86,6 +86,7 @@ class TimelineTest(test.TestCase): show_memory=False, show_dataflow=False) self._validateTrace(ctf) + @test_util.deprecated_graph_mode_only def testTimelineGpu(self): if not test.is_gpu_available(cuda_only=True): return diff --git a/tensorflow/python/client/virtual_gpu_test.py b/tensorflow/python/client/virtual_gpu_test.py index e82ee0666c3..f6dee3bfd8e 100644 --- a/tensorflow/python/client/virtual_gpu_test.py +++ b/tensorflow/python/client/virtual_gpu_test.py @@ -198,6 +198,7 @@ class VirtualGpuTest(test_util.TensorFlowTestCase): super(VirtualGpuTest, self).__init__(method_name) self._util = VirtualGpuTestUtil() + @test_util.deprecated_graph_mode_only def testStatsContainAllDeviceNames(self): with self.session(config=self._util.config) as sess: # TODO(laigd): b/70811538. The is_gpu_available() call will invoke @@ -231,6 +232,7 @@ class VirtualGpuTest(test_util.TensorFlowTestCase): self.assertTrue('/job:localhost/replica:0/task:0/device:GPU:1' in devices) self.assertTrue('/job:localhost/replica:0/task:0/device:GPU:2' in devices) + @test_util.deprecated_graph_mode_only def testLargeRandomGraph(self): with self.session(config=self._util.config) as sess: if not test.is_gpu_available(cuda_only=True): diff --git a/tensorflow/python/grappler/layout_optimizer_test.py b/tensorflow/python/grappler/layout_optimizer_test.py index 620689008c9..3b6d2ce26af 100644 --- a/tensorflow/python/grappler/layout_optimizer_test.py +++ b/tensorflow/python/grappler/layout_optimizer_test.py @@ -254,6 +254,7 @@ class LayoutOptimizerTest(test.TestCase): else: saver.save(sess, checkpoint_path) + @test_util.deprecated_graph_mode_only def testTwoConvLayers(self): if test.is_gpu_available(cuda_only=True): random_seed.set_random_seed(0) @@ -354,6 +355,7 @@ class LayoutOptimizerTest(test.TestCase): self._assert_map_nhwc_to_nchw('SplitV-2', nodes) self.assertAllClose(output_val_ref, output_val, atol=1e-3) + @test_util.deprecated_graph_mode_only def testPadWithConstPaddings(self): if test.is_gpu_available(cuda_only=True): random_seed.set_random_seed(0) @@ -388,6 +390,7 @@ class LayoutOptimizerTest(test.TestCase): self.assertIn('Pad-1-LayoutOptimizer', nodes) self.assertAllClose(output_val_ref, output_val, atol=1e-3) + @test_util.deprecated_graph_mode_only def testReduceSum(self): if test.is_gpu_available(cuda_only=True): random_seed.set_random_seed(0) @@ -417,6 +420,7 @@ class LayoutOptimizerTest(test.TestCase): self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes) self.assertAllClose(output_val_ref, output_val, atol=1e-3) + @test_util.deprecated_graph_mode_only def testCast(self): if test.is_gpu_available(cuda_only=True): random_seed.set_random_seed(0) @@ -447,6 +451,7 @@ class LayoutOptimizerTest(test.TestCase): self._assert_trans_nchw_to_nhwc('Cast-0-0', nodes) self.assertAllClose(output_val_ref, output_val, atol=1e-3) + @test_util.deprecated_graph_mode_only def testSqueeze(self): if test.is_gpu_available(cuda_only=True): random_seed.set_random_seed(0) @@ -477,6 +482,7 @@ class LayoutOptimizerTest(test.TestCase): self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes) self.assertAllClose(output_val_ref, output_val, atol=1e-3) + @test_util.deprecated_graph_mode_only def testSqueezeAlongHW(self): if test.is_gpu_available(cuda_only=True): random_seed.set_random_seed(0) @@ -507,6 +513,7 @@ class LayoutOptimizerTest(test.TestCase): self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes) self.assertAllClose(output_val_ref, output_val, atol=1e-3) + @test_util.deprecated_graph_mode_only def testSqueezeAlongNHW(self): if test.is_gpu_available(cuda_only=True): random_seed.set_random_seed(0) @@ -537,6 +544,7 @@ class LayoutOptimizerTest(test.TestCase): self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes) self.assertAllClose(output_val_ref, output_val, atol=1e-3) + @test_util.deprecated_graph_mode_only def testReduceSumAlongHWC(self): if test.is_gpu_available(cuda_only=True): random_seed.set_random_seed(0) @@ -566,6 +574,7 @@ class LayoutOptimizerTest(test.TestCase): self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes) self.assertAllClose(output_val_ref, output_val, atol=1e-3) + @test_util.deprecated_graph_mode_only def testReduceSumAlongNHW(self): if test.is_gpu_available(cuda_only=True): random_seed.set_random_seed(0) @@ -595,6 +604,7 @@ class LayoutOptimizerTest(test.TestCase): self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes) self.assertAllClose(output_val_ref, output_val, atol=1e-3) + @test_util.deprecated_graph_mode_only def testReduceSumAlongC(self): if test.is_gpu_available(cuda_only=True): random_seed.set_random_seed(0) @@ -624,6 +634,7 @@ class LayoutOptimizerTest(test.TestCase): self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes) self.assertAllClose(output_val_ref, output_val, atol=1e-3) + @test_util.deprecated_graph_mode_only def testReduceSumAlongCKeepDims(self): if test.is_gpu_available(cuda_only=True): random_seed.set_random_seed(0) @@ -654,6 +665,7 @@ class LayoutOptimizerTest(test.TestCase): self._assert_trans_nchw_to_nhwc('Sum-0-0', nodes) self.assertAllClose(output_val_ref, output_val, atol=1e-3) + @test_util.deprecated_graph_mode_only def testReduceSumAlongHKeepDims(self): if test.is_gpu_available(cuda_only=True): random_seed.set_random_seed(0) @@ -683,6 +695,7 @@ class LayoutOptimizerTest(test.TestCase): self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes) self.assertAllClose(output_val_ref, output_val, atol=1e-3) + @test_util.deprecated_graph_mode_only def testReduceSumAlongWCKeepDims(self): if test.is_gpu_available(cuda_only=True): random_seed.set_random_seed(0) @@ -712,6 +725,7 @@ class LayoutOptimizerTest(test.TestCase): self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes) self.assertAllClose(output_val_ref, output_val, atol=1e-3) + @test_util.deprecated_graph_mode_only def testConcatWithControlDependency(self): if test.is_gpu_available(cuda_only=True): random_seed.set_random_seed(0) @@ -828,6 +842,7 @@ class LayoutOptimizerTest(test.TestCase): self._assert_vec_nhwc_to_nchw('Tile-1', nodes) self.assertAllClose(output_val_ref, output_val, atol=1e-3) + @test_util.deprecated_graph_mode_only def testReverseWithConstDims(self): if test.is_gpu_available(cuda_only=True): random_seed.set_random_seed(0) @@ -897,6 +912,7 @@ class LayoutOptimizerTest(test.TestCase): self._assert_map_nhwc_to_nchw('ReverseV2-1', nodes) self.assertAllClose(output_val_ref, output_val, atol=1e-3) + @test_util.deprecated_graph_mode_only def testSelectOp(self): if test.is_gpu_available(cuda_only=True): random_seed.set_random_seed(0) @@ -960,6 +976,7 @@ class LayoutOptimizerTest(test.TestCase): self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes) self.assertAllClose(output_val_ref, output_val, atol=1e-3) + @test_util.deprecated_graph_mode_only def testSelectOpScalarCondition(self): if test.is_gpu_available(cuda_only=True): random_seed.set_random_seed(0) @@ -1178,6 +1195,7 @@ class LayoutOptimizerTest(test.TestCase): self.assertIn('StridedSlice-3-LayoutOptimizer', nodes) self.assertAllClose(output_val_ref, output_val, atol=1e-3) + @test_util.deprecated_graph_mode_only def testStridedSliceWithMask1011(self): if test.is_gpu_available(cuda_only=True): random_seed.set_random_seed(0) @@ -1213,6 +1231,7 @@ class LayoutOptimizerTest(test.TestCase): self.assertIn('strided_slice-3-LayoutOptimizer', nodes) self.assertAllClose(output_val_ref, output_val, atol=1e-3) + @test_util.deprecated_graph_mode_only def testStridedSliceWithMask0111(self): if test.is_gpu_available(cuda_only=True): random_seed.set_random_seed(0) @@ -1355,6 +1374,7 @@ class LayoutOptimizerTest(test.TestCase): self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes) self.assertAllClose(output_val_ref, output_val, atol=1e-3) + @test_util.deprecated_graph_mode_only def testLoop(self): if test.is_gpu_available(cuda_only=True): output = _loop() @@ -1382,6 +1402,7 @@ class LayoutOptimizerTest(test.TestCase): self._assert_trans_nchw_to_nhwc('map/while/MaxPool_1-0-2', nodes) self.assertAllClose(output_val_ref, output_val, atol=1e-3) + @test_util.deprecated_graph_mode_only def testLoopWithBranch(self): if test.is_gpu_available(cuda_only=True): output = _loop_with_branch() @@ -1406,6 +1427,7 @@ class LayoutOptimizerTest(test.TestCase): self._assert_trans_nchw_to_nhwc('map/while/Add_1-0-2', nodes) self.assertAllClose(output_val_ref, output_val, atol=1e-3) + @test_util.deprecated_graph_mode_only def testLoopWithVecAnd4D(self): if test.is_gpu_available(cuda_only=True): output = _loop_with_vec_and_4d() diff --git a/tensorflow/python/profiler/internal/run_metadata_test.py b/tensorflow/python/profiler/internal/run_metadata_test.py index 9e92a8f5f36..88392ff3f08 100644 --- a/tensorflow/python/profiler/internal/run_metadata_test.py +++ b/tensorflow/python/profiler/internal/run_metadata_test.py @@ -111,6 +111,7 @@ def _run_loop_model(): class RunMetadataTest(test.TestCase): + @test_util.run_deprecated_v1 def testGPU(self): if not test.is_gpu_available(cuda_only=True): return @@ -126,6 +127,7 @@ class RunMetadataTest(test.TestCase): self.assertEqual(len(ret['gpu:0']), 1) self.assertEqual(len(ret['gpu:0/stream:all']), 1, '%s' % run_meta) + @test_util.run_deprecated_v1 def testAllocationHistory(self): if not test.is_gpu_available(cuda_only=True): return