Move from deprecated self.test_session() to self.session() or self.cached_session().
Move to cached_session() if the session is create more than once per test. Move to session() otherwise.
self.test_session() has been deprecated in 9962eb5e84
as its name confuses readers of the test. Moving to session() instead which slightly changes the semantic of the function:
* the session is not cached anymore (a new session is created).
* the session is closed when exiting the "with" scope.
PiperOrigin-RevId: 216868101
This commit is contained in:
parent
1e8ee52c70
commit
b94f5bb165
@ -36,7 +36,7 @@ class BuiltinFunctionsTest(converter_testing.TestCase):
|
|||||||
return len(a)
|
return len(a)
|
||||||
|
|
||||||
with self.converted(test_fn, builtin_functions, {'len': len}) as result:
|
with self.converted(test_fn, builtin_functions, {'len': len}) as result:
|
||||||
with self.test_session() as sess:
|
with self.session() as sess:
|
||||||
p = array_ops.placeholder(dtype=dtypes.int32, shape=None)
|
p = array_ops.placeholder(dtype=dtypes.int32, shape=None)
|
||||||
ops = result.test_fn(p)
|
ops = result.test_fn(p)
|
||||||
self.assertEqual(sess.run(ops, {p: [0, 0, 0]}), 3)
|
self.assertEqual(sess.run(ops, {p: [0, 0, 0]}), 3)
|
||||||
@ -50,7 +50,7 @@ class BuiltinFunctionsTest(converter_testing.TestCase):
|
|||||||
return print(a)
|
return print(a)
|
||||||
|
|
||||||
with self.converted(test_fn, builtin_functions, {'print': print}) as result:
|
with self.converted(test_fn, builtin_functions, {'print': print}) as result:
|
||||||
with self.test_session() as sess:
|
with self.session() as sess:
|
||||||
with self.assertPrints('a\n'):
|
with self.assertPrints('a\n'):
|
||||||
sess.run(result.test_fn('a'))
|
sess.run(result.test_fn('a'))
|
||||||
|
|
||||||
@ -63,7 +63,7 @@ class BuiltinFunctionsTest(converter_testing.TestCase):
|
|||||||
return print(a, b, c)
|
return print(a, b, c)
|
||||||
|
|
||||||
with self.converted(test_fn, builtin_functions, {'print': print}) as result:
|
with self.converted(test_fn, builtin_functions, {'print': print}) as result:
|
||||||
with self.test_session() as sess:
|
with self.session() as sess:
|
||||||
with self.assertPrints('a 1 [2, 3]\n'):
|
with self.assertPrints('a 1 [2, 3]\n'):
|
||||||
sess.run(
|
sess.run(
|
||||||
result.test_fn(
|
result.test_fn(
|
||||||
|
@ -127,7 +127,7 @@ class PyBuiltinsTest(test.TestCase):
|
|||||||
self.assertAllEqual(sess.run(r), [2, 1])
|
self.assertAllEqual(sess.run(r), [2, 1])
|
||||||
|
|
||||||
def test_range_tensor_empty_range(self):
|
def test_range_tensor_empty_range(self):
|
||||||
with self.test_session() as sess:
|
with self.session() as sess:
|
||||||
r = py_builtins.range_(constant_op.constant(-3))
|
r = py_builtins.range_(constant_op.constant(-3))
|
||||||
self.assertAllEqual(sess.run(r), [])
|
self.assertAllEqual(sess.run(r), [])
|
||||||
r = py_builtins.range_(5, constant_op.constant(2))
|
r = py_builtins.range_(5, constant_op.constant(2))
|
||||||
|
@ -62,7 +62,7 @@ class TimelineTest(test.TestCase):
|
|||||||
trace_level=config_pb2.RunOptions.FULL_TRACE)
|
trace_level=config_pb2.RunOptions.FULL_TRACE)
|
||||||
run_metadata = config_pb2.RunMetadata()
|
run_metadata = config_pb2.RunMetadata()
|
||||||
|
|
||||||
with self.test_session(use_gpu=False) as sess:
|
with self.session(use_gpu=False) as sess:
|
||||||
const1 = constant_op.constant(1.0, name='const1')
|
const1 = constant_op.constant(1.0, name='const1')
|
||||||
const2 = constant_op.constant(2.0, name='const2')
|
const2 = constant_op.constant(2.0, name='const2')
|
||||||
result = math_ops.add(const1, const2) + const1 * const2
|
result = math_ops.add(const1, const2) + const1 * const2
|
||||||
@ -93,7 +93,7 @@ class TimelineTest(test.TestCase):
|
|||||||
trace_level=config_pb2.RunOptions.FULL_TRACE)
|
trace_level=config_pb2.RunOptions.FULL_TRACE)
|
||||||
run_metadata = config_pb2.RunMetadata()
|
run_metadata = config_pb2.RunMetadata()
|
||||||
|
|
||||||
with self.test_session(force_gpu=True) as sess:
|
with self.session(force_gpu=True) as sess:
|
||||||
const1 = constant_op.constant(1.0, name='const1')
|
const1 = constant_op.constant(1.0, name='const1')
|
||||||
const2 = constant_op.constant(2.0, name='const2')
|
const2 = constant_op.constant(2.0, name='const2')
|
||||||
result = math_ops.add(const1, const2) + const1 * const2
|
result = math_ops.add(const1, const2) + const1 * const2
|
||||||
|
@ -199,7 +199,7 @@ class VirtualGpuTest(test_util.TensorFlowTestCase):
|
|||||||
self._util = VirtualGpuTestUtil()
|
self._util = VirtualGpuTestUtil()
|
||||||
|
|
||||||
def testStatsContainAllDeviceNames(self):
|
def testStatsContainAllDeviceNames(self):
|
||||||
with self.test_session(config=self._util.config) as sess:
|
with self.session(config=self._util.config) as sess:
|
||||||
# TODO(laigd): b/70811538. The is_gpu_available() call will invoke
|
# TODO(laigd): b/70811538. The is_gpu_available() call will invoke
|
||||||
# DeviceFactory::AddDevices() with a default SessionOption, which prevents
|
# DeviceFactory::AddDevices() with a default SessionOption, which prevents
|
||||||
# adding virtual devices in the future, thus must be called within a
|
# adding virtual devices in the future, thus must be called within a
|
||||||
@ -232,7 +232,7 @@ class VirtualGpuTest(test_util.TensorFlowTestCase):
|
|||||||
self.assertTrue('/job:localhost/replica:0/task:0/device:GPU:2' in devices)
|
self.assertTrue('/job:localhost/replica:0/task:0/device:GPU:2' in devices)
|
||||||
|
|
||||||
def testLargeRandomGraph(self):
|
def testLargeRandomGraph(self):
|
||||||
with self.test_session(config=self._util.config) as sess:
|
with self.session(config=self._util.config) as sess:
|
||||||
if not test.is_gpu_available(cuda_only=True):
|
if not test.is_gpu_available(cuda_only=True):
|
||||||
self.skipTest('No GPU available')
|
self.skipTest('No GPU available')
|
||||||
for _ in range(5):
|
for _ in range(5):
|
||||||
|
@ -573,7 +573,7 @@ class IteratorTest(test.TestCase):
|
|||||||
f=_remote_fn,
|
f=_remote_fn,
|
||||||
target=target_placeholder)
|
target=target_placeholder)
|
||||||
|
|
||||||
with self.test_session(config=worker_config) as sess:
|
with self.session(config=worker_config) as sess:
|
||||||
elem = sess.run(
|
elem = sess.run(
|
||||||
remote_op,
|
remote_op,
|
||||||
feed_dict={
|
feed_dict={
|
||||||
|
@ -58,7 +58,7 @@ class LargeGraphAndLargeTensorsDebugTest(test_util.TensorFlowTestCase):
|
|||||||
self.debug_server.clear_data()
|
self.debug_server.clear_data()
|
||||||
|
|
||||||
def testSendingLargeGraphDefsWorks(self):
|
def testSendingLargeGraphDefsWorks(self):
|
||||||
with self.test_session(
|
with self.session(
|
||||||
use_gpu=True,
|
use_gpu=True,
|
||||||
config=session_debug_testlib.no_rewrite_session_config()) as sess:
|
config=session_debug_testlib.no_rewrite_session_config()) as sess:
|
||||||
u = variables.VariableV1(42.0, name="original_u")
|
u = variables.VariableV1(42.0, name="original_u")
|
||||||
@ -86,7 +86,7 @@ class LargeGraphAndLargeTensorsDebugTest(test_util.TensorFlowTestCase):
|
|||||||
self.assertGreater(max_graph_def_size, 4 * 1024 * 1024)
|
self.assertGreater(max_graph_def_size, 4 * 1024 * 1024)
|
||||||
|
|
||||||
def testSendingLargeFloatTensorWorks(self):
|
def testSendingLargeFloatTensorWorks(self):
|
||||||
with self.test_session(
|
with self.session(
|
||||||
use_gpu=True,
|
use_gpu=True,
|
||||||
config=session_debug_testlib.no_rewrite_session_config()) as sess:
|
config=session_debug_testlib.no_rewrite_session_config()) as sess:
|
||||||
u_init_val_array = list(xrange(1200 * 1024))
|
u_init_val_array = list(xrange(1200 * 1024))
|
||||||
@ -110,7 +110,7 @@ class LargeGraphAndLargeTensorsDebugTest(test_util.TensorFlowTestCase):
|
|||||||
self.debug_server.debug_tensor_values["u_init:0:DebugIdentity"][0])
|
self.debug_server.debug_tensor_values["u_init:0:DebugIdentity"][0])
|
||||||
|
|
||||||
def testSendingStringTensorWithAlmostTooLargeStringsWorks(self):
|
def testSendingStringTensorWithAlmostTooLargeStringsWorks(self):
|
||||||
with self.test_session(
|
with self.session(
|
||||||
use_gpu=True,
|
use_gpu=True,
|
||||||
config=session_debug_testlib.no_rewrite_session_config()) as sess:
|
config=session_debug_testlib.no_rewrite_session_config()) as sess:
|
||||||
u_init_val = [
|
u_init_val = [
|
||||||
@ -133,7 +133,7 @@ class LargeGraphAndLargeTensorsDebugTest(test_util.TensorFlowTestCase):
|
|||||||
self.debug_server.debug_tensor_values["u_init:0:DebugIdentity"][0])
|
self.debug_server.debug_tensor_values["u_init:0:DebugIdentity"][0])
|
||||||
|
|
||||||
def testSendingLargeStringTensorWorks(self):
|
def testSendingLargeStringTensorWorks(self):
|
||||||
with self.test_session(
|
with self.session(
|
||||||
use_gpu=True,
|
use_gpu=True,
|
||||||
config=session_debug_testlib.no_rewrite_session_config()) as sess:
|
config=session_debug_testlib.no_rewrite_session_config()) as sess:
|
||||||
strs_total_size_threshold = 5000 * 1024
|
strs_total_size_threshold = 5000 * 1024
|
||||||
@ -162,7 +162,7 @@ class LargeGraphAndLargeTensorsDebugTest(test_util.TensorFlowTestCase):
|
|||||||
self.debug_server.debug_tensor_values["u_init:0:DebugIdentity"][0])
|
self.debug_server.debug_tensor_values["u_init:0:DebugIdentity"][0])
|
||||||
|
|
||||||
def testSendingEmptyFloatTensorWorks(self):
|
def testSendingEmptyFloatTensorWorks(self):
|
||||||
with self.test_session(
|
with self.session(
|
||||||
use_gpu=True,
|
use_gpu=True,
|
||||||
config=session_debug_testlib.no_rewrite_session_config()) as sess:
|
config=session_debug_testlib.no_rewrite_session_config()) as sess:
|
||||||
u_init = constant_op.constant(
|
u_init = constant_op.constant(
|
||||||
@ -184,7 +184,7 @@ class LargeGraphAndLargeTensorsDebugTest(test_util.TensorFlowTestCase):
|
|||||||
self.assertEqual(0, len(u_init_value))
|
self.assertEqual(0, len(u_init_value))
|
||||||
|
|
||||||
def testSendingEmptyStringTensorWorks(self):
|
def testSendingEmptyStringTensorWorks(self):
|
||||||
with self.test_session(
|
with self.session(
|
||||||
use_gpu=True,
|
use_gpu=True,
|
||||||
config=session_debug_testlib.no_rewrite_session_config()) as sess:
|
config=session_debug_testlib.no_rewrite_session_config()) as sess:
|
||||||
u_init = constant_op.constant(
|
u_init = constant_op.constant(
|
||||||
|
@ -455,7 +455,7 @@ class FunctionTest(test.TestCase):
|
|||||||
_ = MyFn(100.0).eval()
|
_ = MyFn(100.0).eval()
|
||||||
|
|
||||||
def testWhileLoopCallsFunc(self):
|
def testWhileLoopCallsFunc(self):
|
||||||
with self.test_session(use_gpu=True) as sess:
|
with self.session(use_gpu=True) as sess:
|
||||||
|
|
||||||
@function.Defun(dtypes.float32)
|
@function.Defun(dtypes.float32)
|
||||||
def Times2(x):
|
def Times2(x):
|
||||||
@ -1077,7 +1077,7 @@ class FunctionTest(test.TestCase):
|
|||||||
self.assertNotEqual("GuaranteeConst", fifth.consumers()[0].node_def.op)
|
self.assertNotEqual("GuaranteeConst", fifth.consumers()[0].node_def.op)
|
||||||
return output
|
return output
|
||||||
|
|
||||||
with self.test_session(use_gpu=False) as sess:
|
with self.session(use_gpu=False) as sess:
|
||||||
sess.run(var.initializer)
|
sess.run(var.initializer)
|
||||||
_ = sess.run(CapturesGuaranteedConst(), {also_not_const: 1.0})
|
_ = sess.run(CapturesGuaranteedConst(), {also_not_const: 1.0})
|
||||||
|
|
||||||
|
@ -69,7 +69,7 @@ class TrainingGPUTest(test.TestCase):
|
|||||||
return simple_model
|
return simple_model
|
||||||
|
|
||||||
if test.is_gpu_available(cuda_only=True):
|
if test.is_gpu_available(cuda_only=True):
|
||||||
with self.test_session(use_gpu=True):
|
with self.session(use_gpu=True):
|
||||||
losses_to_test = ['sparse_categorical_crossentropy',
|
losses_to_test = ['sparse_categorical_crossentropy',
|
||||||
'categorical_crossentropy', 'binary_crossentropy']
|
'categorical_crossentropy', 'binary_crossentropy']
|
||||||
|
|
||||||
|
@ -39,7 +39,7 @@ class Convolution1DTest(test.TestCase):
|
|||||||
test_kwargs = copy.copy(kwargs)
|
test_kwargs = copy.copy(kwargs)
|
||||||
for value in values:
|
for value in values:
|
||||||
test_kwargs[arg] = value
|
test_kwargs[arg] = value
|
||||||
with self.test_session(use_gpu=True):
|
with self.cached_session(use_gpu=True):
|
||||||
testing_utils.layer_test(
|
testing_utils.layer_test(
|
||||||
keras.layers.Conv1D,
|
keras.layers.Conv1D,
|
||||||
kwargs=test_kwargs,
|
kwargs=test_kwargs,
|
||||||
@ -74,7 +74,7 @@ class Convolution1DTest(test.TestCase):
|
|||||||
'activity_regularizer': 'l2',
|
'activity_regularizer': 'l2',
|
||||||
'strides': 1
|
'strides': 1
|
||||||
}
|
}
|
||||||
with self.test_session(use_gpu=True):
|
with self.session(use_gpu=True):
|
||||||
layer = keras.layers.Conv1D(**kwargs)
|
layer = keras.layers.Conv1D(**kwargs)
|
||||||
layer.build((None, 5, 2))
|
layer.build((None, 5, 2))
|
||||||
self.assertEqual(len(layer.losses), 2)
|
self.assertEqual(len(layer.losses), 2)
|
||||||
@ -93,7 +93,7 @@ class Convolution1DTest(test.TestCase):
|
|||||||
'bias_constraint': b_constraint,
|
'bias_constraint': b_constraint,
|
||||||
'strides': 1
|
'strides': 1
|
||||||
}
|
}
|
||||||
with self.test_session(use_gpu=True):
|
with self.session(use_gpu=True):
|
||||||
layer = keras.layers.Conv1D(**kwargs)
|
layer = keras.layers.Conv1D(**kwargs)
|
||||||
layer.build((None, 5, 2))
|
layer.build((None, 5, 2))
|
||||||
self.assertEqual(layer.kernel.constraint, k_constraint)
|
self.assertEqual(layer.kernel.constraint, k_constraint)
|
||||||
@ -111,7 +111,7 @@ class Conv2DTest(test.TestCase):
|
|||||||
test_kwargs = copy.copy(kwargs)
|
test_kwargs = copy.copy(kwargs)
|
||||||
for value in values:
|
for value in values:
|
||||||
test_kwargs[arg] = value
|
test_kwargs[arg] = value
|
||||||
with self.test_session(use_gpu=True):
|
with self.cached_session(use_gpu=True):
|
||||||
testing_utils.layer_test(
|
testing_utils.layer_test(
|
||||||
keras.layers.Conv2D,
|
keras.layers.Conv2D,
|
||||||
kwargs=test_kwargs,
|
kwargs=test_kwargs,
|
||||||
@ -149,7 +149,7 @@ class Conv2DTest(test.TestCase):
|
|||||||
'activity_regularizer': 'l2',
|
'activity_regularizer': 'l2',
|
||||||
'strides': 1
|
'strides': 1
|
||||||
}
|
}
|
||||||
with self.test_session(use_gpu=True):
|
with self.session(use_gpu=True):
|
||||||
layer = keras.layers.Conv2D(**kwargs)
|
layer = keras.layers.Conv2D(**kwargs)
|
||||||
layer.build((None, 5, 5, 2))
|
layer.build((None, 5, 5, 2))
|
||||||
self.assertEqual(len(layer.losses), 2)
|
self.assertEqual(len(layer.losses), 2)
|
||||||
@ -168,7 +168,7 @@ class Conv2DTest(test.TestCase):
|
|||||||
'bias_constraint': b_constraint,
|
'bias_constraint': b_constraint,
|
||||||
'strides': 1
|
'strides': 1
|
||||||
}
|
}
|
||||||
with self.test_session(use_gpu=True):
|
with self.session(use_gpu=True):
|
||||||
layer = keras.layers.Conv2D(**kwargs)
|
layer = keras.layers.Conv2D(**kwargs)
|
||||||
layer.build((None, 5, 5, 2))
|
layer.build((None, 5, 5, 2))
|
||||||
self.assertEqual(layer.kernel.constraint, k_constraint)
|
self.assertEqual(layer.kernel.constraint, k_constraint)
|
||||||
@ -186,7 +186,7 @@ class Conv2DTransposeTest(test.TestCase):
|
|||||||
test_kwargs = copy.copy(kwargs)
|
test_kwargs = copy.copy(kwargs)
|
||||||
for value in values:
|
for value in values:
|
||||||
test_kwargs[arg] = value
|
test_kwargs[arg] = value
|
||||||
with self.test_session(use_gpu=True):
|
with self.cached_session(use_gpu=True):
|
||||||
testing_utils.layer_test(
|
testing_utils.layer_test(
|
||||||
keras.layers.Conv2DTranspose,
|
keras.layers.Conv2DTranspose,
|
||||||
kwargs=test_kwargs,
|
kwargs=test_kwargs,
|
||||||
@ -217,7 +217,7 @@ class Conv2DTransposeTest(test.TestCase):
|
|||||||
'activity_regularizer': 'l2',
|
'activity_regularizer': 'l2',
|
||||||
'strides': 1
|
'strides': 1
|
||||||
}
|
}
|
||||||
with self.test_session(use_gpu=True):
|
with self.session(use_gpu=True):
|
||||||
layer = keras.layers.Conv2DTranspose(**kwargs)
|
layer = keras.layers.Conv2DTranspose(**kwargs)
|
||||||
layer.build((None, 5, 5, 2))
|
layer.build((None, 5, 5, 2))
|
||||||
self.assertEqual(len(layer.losses), 2)
|
self.assertEqual(len(layer.losses), 2)
|
||||||
@ -236,7 +236,7 @@ class Conv2DTransposeTest(test.TestCase):
|
|||||||
'bias_constraint': b_constraint,
|
'bias_constraint': b_constraint,
|
||||||
'strides': 1
|
'strides': 1
|
||||||
}
|
}
|
||||||
with self.test_session(use_gpu=True):
|
with self.session(use_gpu=True):
|
||||||
layer = keras.layers.Conv2DTranspose(**kwargs)
|
layer = keras.layers.Conv2DTranspose(**kwargs)
|
||||||
layer.build((None, 5, 5, 2))
|
layer.build((None, 5, 5, 2))
|
||||||
self.assertEqual(layer.kernel.constraint, k_constraint)
|
self.assertEqual(layer.kernel.constraint, k_constraint)
|
||||||
@ -280,7 +280,7 @@ class Conv3DTransposeTest(test.TestCase):
|
|||||||
test_kwargs = copy.copy(kwargs)
|
test_kwargs = copy.copy(kwargs)
|
||||||
for value in values:
|
for value in values:
|
||||||
test_kwargs[arg] = value
|
test_kwargs[arg] = value
|
||||||
with self.test_session(use_gpu=True):
|
with self.cached_session(use_gpu=True):
|
||||||
testing_utils.layer_test(
|
testing_utils.layer_test(
|
||||||
keras.layers.Conv3DTranspose,
|
keras.layers.Conv3DTranspose,
|
||||||
kwargs=test_kwargs,
|
kwargs=test_kwargs,
|
||||||
@ -311,7 +311,7 @@ class Conv3DTransposeTest(test.TestCase):
|
|||||||
'activity_regularizer': 'l2',
|
'activity_regularizer': 'l2',
|
||||||
'strides': 1
|
'strides': 1
|
||||||
}
|
}
|
||||||
with self.test_session(use_gpu=True):
|
with self.session(use_gpu=True):
|
||||||
layer = keras.layers.Conv3DTranspose(**kwargs)
|
layer = keras.layers.Conv3DTranspose(**kwargs)
|
||||||
layer.build((None, 5, 5, 5, 2))
|
layer.build((None, 5, 5, 5, 2))
|
||||||
self.assertEqual(len(layer.losses), 2)
|
self.assertEqual(len(layer.losses), 2)
|
||||||
@ -330,7 +330,7 @@ class Conv3DTransposeTest(test.TestCase):
|
|||||||
'bias_constraint': b_constraint,
|
'bias_constraint': b_constraint,
|
||||||
'strides': 1
|
'strides': 1
|
||||||
}
|
}
|
||||||
with self.test_session(use_gpu=True):
|
with self.session(use_gpu=True):
|
||||||
layer = keras.layers.Conv3DTranspose(**kwargs)
|
layer = keras.layers.Conv3DTranspose(**kwargs)
|
||||||
layer.build((None, 5, 5, 5, 2))
|
layer.build((None, 5, 5, 5, 2))
|
||||||
self.assertEqual(layer.kernel.constraint, k_constraint)
|
self.assertEqual(layer.kernel.constraint, k_constraint)
|
||||||
@ -347,7 +347,7 @@ class SeparableConv1DTest(test.TestCase):
|
|||||||
test_kwargs = copy.copy(kwargs)
|
test_kwargs = copy.copy(kwargs)
|
||||||
for value in values:
|
for value in values:
|
||||||
test_kwargs[arg] = value
|
test_kwargs[arg] = value
|
||||||
with self.test_session(use_gpu=True):
|
with self.cached_session(use_gpu=True):
|
||||||
testing_utils.layer_test(
|
testing_utils.layer_test(
|
||||||
keras.layers.SeparableConv1D,
|
keras.layers.SeparableConv1D,
|
||||||
kwargs=test_kwargs,
|
kwargs=test_kwargs,
|
||||||
@ -383,7 +383,7 @@ class SeparableConv1DTest(test.TestCase):
|
|||||||
'activity_regularizer': 'l2',
|
'activity_regularizer': 'l2',
|
||||||
'strides': 1
|
'strides': 1
|
||||||
}
|
}
|
||||||
with self.test_session(use_gpu=True):
|
with self.session(use_gpu=True):
|
||||||
layer = keras.layers.SeparableConv1D(**kwargs)
|
layer = keras.layers.SeparableConv1D(**kwargs)
|
||||||
layer.build((None, 5, 2))
|
layer.build((None, 5, 2))
|
||||||
self.assertEqual(len(layer.losses), 3)
|
self.assertEqual(len(layer.losses), 3)
|
||||||
@ -404,7 +404,7 @@ class SeparableConv1DTest(test.TestCase):
|
|||||||
'bias_constraint': b_constraint,
|
'bias_constraint': b_constraint,
|
||||||
'strides': 1
|
'strides': 1
|
||||||
}
|
}
|
||||||
with self.test_session(use_gpu=True):
|
with self.session(use_gpu=True):
|
||||||
layer = keras.layers.SeparableConv1D(**kwargs)
|
layer = keras.layers.SeparableConv1D(**kwargs)
|
||||||
layer.build((None, 5, 2))
|
layer.build((None, 5, 2))
|
||||||
self.assertEqual(layer.depthwise_kernel.constraint, d_constraint)
|
self.assertEqual(layer.depthwise_kernel.constraint, d_constraint)
|
||||||
@ -423,7 +423,7 @@ class SeparableConv2DTest(test.TestCase):
|
|||||||
test_kwargs = copy.copy(kwargs)
|
test_kwargs = copy.copy(kwargs)
|
||||||
for value in values:
|
for value in values:
|
||||||
test_kwargs[arg] = value
|
test_kwargs[arg] = value
|
||||||
with self.test_session(use_gpu=True):
|
with self.cached_session(use_gpu=True):
|
||||||
testing_utils.layer_test(
|
testing_utils.layer_test(
|
||||||
keras.layers.SeparableConv2D,
|
keras.layers.SeparableConv2D,
|
||||||
kwargs=test_kwargs,
|
kwargs=test_kwargs,
|
||||||
@ -461,7 +461,7 @@ class SeparableConv2DTest(test.TestCase):
|
|||||||
'activity_regularizer': 'l2',
|
'activity_regularizer': 'l2',
|
||||||
'strides': 1
|
'strides': 1
|
||||||
}
|
}
|
||||||
with self.test_session(use_gpu=True):
|
with self.session(use_gpu=True):
|
||||||
layer = keras.layers.SeparableConv2D(**kwargs)
|
layer = keras.layers.SeparableConv2D(**kwargs)
|
||||||
layer.build((None, 5, 5, 2))
|
layer.build((None, 5, 5, 2))
|
||||||
self.assertEqual(len(layer.losses), 3)
|
self.assertEqual(len(layer.losses), 3)
|
||||||
@ -482,7 +482,7 @@ class SeparableConv2DTest(test.TestCase):
|
|||||||
'bias_constraint': b_constraint,
|
'bias_constraint': b_constraint,
|
||||||
'strides': 1
|
'strides': 1
|
||||||
}
|
}
|
||||||
with self.test_session(use_gpu=True):
|
with self.session(use_gpu=True):
|
||||||
layer = keras.layers.SeparableConv2D(**kwargs)
|
layer = keras.layers.SeparableConv2D(**kwargs)
|
||||||
layer.build((None, 5, 5, 2))
|
layer.build((None, 5, 5, 2))
|
||||||
self.assertEqual(layer.depthwise_kernel.constraint, d_constraint)
|
self.assertEqual(layer.depthwise_kernel.constraint, d_constraint)
|
||||||
@ -502,7 +502,7 @@ class Conv3DTest(test.TestCase):
|
|||||||
test_kwargs = copy.copy(kwargs)
|
test_kwargs = copy.copy(kwargs)
|
||||||
for value in values:
|
for value in values:
|
||||||
test_kwargs[arg] = value
|
test_kwargs[arg] = value
|
||||||
with self.test_session(use_gpu=True):
|
with self.cached_session(use_gpu=True):
|
||||||
testing_utils.layer_test(
|
testing_utils.layer_test(
|
||||||
keras.layers.Conv3D,
|
keras.layers.Conv3D,
|
||||||
kwargs=test_kwargs,
|
kwargs=test_kwargs,
|
||||||
@ -531,7 +531,7 @@ class Conv3DTest(test.TestCase):
|
|||||||
'activity_regularizer': 'l2',
|
'activity_regularizer': 'l2',
|
||||||
'strides': 1
|
'strides': 1
|
||||||
}
|
}
|
||||||
with self.test_session(use_gpu=True):
|
with self.session(use_gpu=True):
|
||||||
layer = keras.layers.Conv3D(**kwargs)
|
layer = keras.layers.Conv3D(**kwargs)
|
||||||
layer.build((None, 5, 5, 5, 2))
|
layer.build((None, 5, 5, 5, 2))
|
||||||
self.assertEqual(len(layer.losses), 2)
|
self.assertEqual(len(layer.losses), 2)
|
||||||
@ -551,7 +551,7 @@ class Conv3DTest(test.TestCase):
|
|||||||
'bias_constraint': b_constraint,
|
'bias_constraint': b_constraint,
|
||||||
'strides': 1
|
'strides': 1
|
||||||
}
|
}
|
||||||
with self.test_session(use_gpu=True):
|
with self.session(use_gpu=True):
|
||||||
layer = keras.layers.Conv3D(**kwargs)
|
layer = keras.layers.Conv3D(**kwargs)
|
||||||
layer.build((None, 5, 5, 5, 2))
|
layer.build((None, 5, 5, 5, 2))
|
||||||
self.assertEqual(layer.kernel.constraint, k_constraint)
|
self.assertEqual(layer.kernel.constraint, k_constraint)
|
||||||
@ -568,8 +568,8 @@ class ZeroPaddingTest(test.TestCase):
|
|||||||
shape = (num_samples, num_steps, input_dim)
|
shape = (num_samples, num_steps, input_dim)
|
||||||
inputs = np.ones(shape)
|
inputs = np.ones(shape)
|
||||||
|
|
||||||
|
with self.session(use_gpu=True):
|
||||||
# basic test
|
# basic test
|
||||||
with self.test_session(use_gpu=True):
|
|
||||||
testing_utils.layer_test(
|
testing_utils.layer_test(
|
||||||
keras.layers.ZeroPadding1D,
|
keras.layers.ZeroPadding1D,
|
||||||
kwargs={'padding': 2},
|
kwargs={'padding': 2},
|
||||||
@ -580,7 +580,6 @@ class ZeroPaddingTest(test.TestCase):
|
|||||||
input_shape=inputs.shape)
|
input_shape=inputs.shape)
|
||||||
|
|
||||||
# correctness test
|
# correctness test
|
||||||
with self.test_session(use_gpu=True):
|
|
||||||
layer = keras.layers.ZeroPadding1D(padding=2)
|
layer = keras.layers.ZeroPadding1D(padding=2)
|
||||||
layer.build(shape)
|
layer.build(shape)
|
||||||
output = layer(keras.backend.variable(inputs))
|
output = layer(keras.backend.variable(inputs))
|
||||||
@ -623,7 +622,7 @@ class ZeroPaddingTest(test.TestCase):
|
|||||||
inputs = np.ones((num_samples, stack_size, input_num_row, input_num_col))
|
inputs = np.ones((num_samples, stack_size, input_num_row, input_num_col))
|
||||||
|
|
||||||
# basic test
|
# basic test
|
||||||
with self.test_session(use_gpu=True):
|
with self.cached_session(use_gpu=True):
|
||||||
testing_utils.layer_test(
|
testing_utils.layer_test(
|
||||||
keras.layers.ZeroPadding2D,
|
keras.layers.ZeroPadding2D,
|
||||||
kwargs={'padding': (2, 2),
|
kwargs={'padding': (2, 2),
|
||||||
@ -636,7 +635,7 @@ class ZeroPaddingTest(test.TestCase):
|
|||||||
input_shape=inputs.shape)
|
input_shape=inputs.shape)
|
||||||
|
|
||||||
# correctness test
|
# correctness test
|
||||||
with self.test_session(use_gpu=True):
|
with self.cached_session(use_gpu=True):
|
||||||
layer = keras.layers.ZeroPadding2D(
|
layer = keras.layers.ZeroPadding2D(
|
||||||
padding=(2, 2), data_format=data_format)
|
padding=(2, 2), data_format=data_format)
|
||||||
layer.build(inputs.shape)
|
layer.build(inputs.shape)
|
||||||
@ -702,15 +701,14 @@ class ZeroPaddingTest(test.TestCase):
|
|||||||
inputs = np.ones((num_samples, input_len_dim1, input_len_dim2,
|
inputs = np.ones((num_samples, input_len_dim1, input_len_dim2,
|
||||||
input_len_dim3, stack_size))
|
input_len_dim3, stack_size))
|
||||||
|
|
||||||
|
with self.session(use_gpu=True):
|
||||||
# basic test
|
# basic test
|
||||||
with self.test_session(use_gpu=True):
|
|
||||||
testing_utils.layer_test(
|
testing_utils.layer_test(
|
||||||
keras.layers.ZeroPadding3D,
|
keras.layers.ZeroPadding3D,
|
||||||
kwargs={'padding': (2, 2, 2)},
|
kwargs={'padding': (2, 2, 2)},
|
||||||
input_shape=inputs.shape)
|
input_shape=inputs.shape)
|
||||||
|
|
||||||
# correctness test
|
# correctness test
|
||||||
with self.test_session(use_gpu=True):
|
|
||||||
layer = keras.layers.ZeroPadding3D(padding=(2, 2, 2))
|
layer = keras.layers.ZeroPadding3D(padding=(2, 2, 2))
|
||||||
layer.build(inputs.shape)
|
layer.build(inputs.shape)
|
||||||
output = layer(keras.backend.variable(inputs))
|
output = layer(keras.backend.variable(inputs))
|
||||||
@ -735,7 +733,7 @@ class UpSamplingTest(test.TestCase):
|
|||||||
|
|
||||||
@tf_test_util.run_in_graph_and_eager_modes
|
@tf_test_util.run_in_graph_and_eager_modes
|
||||||
def test_upsampling_1d(self):
|
def test_upsampling_1d(self):
|
||||||
with self.test_session(use_gpu=True):
|
with self.session(use_gpu=True):
|
||||||
testing_utils.layer_test(
|
testing_utils.layer_test(
|
||||||
keras.layers.UpSampling1D, kwargs={'size': 2}, input_shape=(3, 5, 4))
|
keras.layers.UpSampling1D, kwargs={'size': 2}, input_shape=(3, 5, 4))
|
||||||
|
|
||||||
@ -755,7 +753,7 @@ class UpSamplingTest(test.TestCase):
|
|||||||
stack_size)
|
stack_size)
|
||||||
|
|
||||||
# basic test
|
# basic test
|
||||||
with self.test_session(use_gpu=True):
|
with self.cached_session(use_gpu=True):
|
||||||
testing_utils.layer_test(
|
testing_utils.layer_test(
|
||||||
keras.layers.UpSampling2D,
|
keras.layers.UpSampling2D,
|
||||||
kwargs={'size': (2, 2),
|
kwargs={'size': (2, 2),
|
||||||
@ -842,7 +840,7 @@ class UpSamplingTest(test.TestCase):
|
|||||||
input_len_dim3, stack_size)
|
input_len_dim3, stack_size)
|
||||||
|
|
||||||
# basic test
|
# basic test
|
||||||
with self.test_session(use_gpu=True):
|
with self.cached_session(use_gpu=True):
|
||||||
testing_utils.layer_test(
|
testing_utils.layer_test(
|
||||||
keras.layers.UpSampling3D,
|
keras.layers.UpSampling3D,
|
||||||
kwargs={'size': (2, 2, 2),
|
kwargs={'size': (2, 2, 2),
|
||||||
@ -892,7 +890,7 @@ class CroppingTest(test.TestCase):
|
|||||||
input_len_dim1 = 2
|
input_len_dim1 = 2
|
||||||
inputs = np.random.rand(num_samples, time_length, input_len_dim1)
|
inputs = np.random.rand(num_samples, time_length, input_len_dim1)
|
||||||
|
|
||||||
with self.test_session(use_gpu=True):
|
with self.session(use_gpu=True):
|
||||||
testing_utils.layer_test(
|
testing_utils.layer_test(
|
||||||
keras.layers.Cropping1D,
|
keras.layers.Cropping1D,
|
||||||
kwargs={'cropping': (2, 2)},
|
kwargs={'cropping': (2, 2)},
|
||||||
@ -919,15 +917,14 @@ class CroppingTest(test.TestCase):
|
|||||||
else:
|
else:
|
||||||
inputs = np.random.rand(num_samples, input_len_dim1, input_len_dim2,
|
inputs = np.random.rand(num_samples, input_len_dim1, input_len_dim2,
|
||||||
stack_size)
|
stack_size)
|
||||||
|
with self.cached_session(use_gpu=True):
|
||||||
# basic test
|
# basic test
|
||||||
with self.test_session(use_gpu=True):
|
|
||||||
testing_utils.layer_test(
|
testing_utils.layer_test(
|
||||||
keras.layers.Cropping2D,
|
keras.layers.Cropping2D,
|
||||||
kwargs={'cropping': cropping,
|
kwargs={'cropping': cropping,
|
||||||
'data_format': data_format},
|
'data_format': data_format},
|
||||||
input_shape=inputs.shape)
|
input_shape=inputs.shape)
|
||||||
# correctness test
|
# correctness test
|
||||||
with self.test_session(use_gpu=True):
|
|
||||||
layer = keras.layers.Cropping2D(
|
layer = keras.layers.Cropping2D(
|
||||||
cropping=cropping, data_format=data_format)
|
cropping=cropping, data_format=data_format)
|
||||||
layer.build(inputs.shape)
|
layer.build(inputs.shape)
|
||||||
@ -953,7 +950,7 @@ class CroppingTest(test.TestCase):
|
|||||||
inputs = np.random.rand(num_samples, input_len_dim1, input_len_dim2,
|
inputs = np.random.rand(num_samples, input_len_dim1, input_len_dim2,
|
||||||
stack_size)
|
stack_size)
|
||||||
# another correctness test (no cropping)
|
# another correctness test (no cropping)
|
||||||
with self.test_session(use_gpu=True):
|
with self.cached_session(use_gpu=True):
|
||||||
cropping = ((0, 0), (0, 0))
|
cropping = ((0, 0), (0, 0))
|
||||||
layer = keras.layers.Cropping2D(
|
layer = keras.layers.Cropping2D(
|
||||||
cropping=cropping, data_format=data_format)
|
cropping=cropping, data_format=data_format)
|
||||||
@ -990,7 +987,7 @@ class CroppingTest(test.TestCase):
|
|||||||
inputs = np.random.rand(num_samples, input_len_dim1, input_len_dim2,
|
inputs = np.random.rand(num_samples, input_len_dim1, input_len_dim2,
|
||||||
input_len_dim3, stack_size)
|
input_len_dim3, stack_size)
|
||||||
# basic test
|
# basic test
|
||||||
with self.test_session(use_gpu=True):
|
with self.cached_session(use_gpu=True):
|
||||||
testing_utils.layer_test(
|
testing_utils.layer_test(
|
||||||
keras.layers.Cropping3D,
|
keras.layers.Cropping3D,
|
||||||
kwargs={'cropping': cropping,
|
kwargs={'cropping': cropping,
|
||||||
@ -999,7 +996,7 @@ class CroppingTest(test.TestCase):
|
|||||||
|
|
||||||
if len(croppings) == 3 and len(croppings[0]) == 2:
|
if len(croppings) == 3 and len(croppings[0]) == 2:
|
||||||
# correctness test
|
# correctness test
|
||||||
with self.test_session(use_gpu=True):
|
with self.cached_session(use_gpu=True):
|
||||||
layer = keras.layers.Cropping3D(
|
layer = keras.layers.Cropping3D(
|
||||||
cropping=cropping, data_format=data_format)
|
cropping=cropping, data_format=data_format)
|
||||||
layer.build(inputs.shape)
|
layer.build(inputs.shape)
|
||||||
@ -1039,7 +1036,7 @@ class DepthwiseConv2DTest(test.TestCase):
|
|||||||
test_kwargs = copy.copy(kwargs)
|
test_kwargs = copy.copy(kwargs)
|
||||||
for value in values:
|
for value in values:
|
||||||
test_kwargs[arg] = value
|
test_kwargs[arg] = value
|
||||||
with self.test_session(use_gpu=True):
|
with self.cached_session(use_gpu=True):
|
||||||
testing_utils.layer_test(
|
testing_utils.layer_test(
|
||||||
keras.layers.DepthwiseConv2D,
|
keras.layers.DepthwiseConv2D,
|
||||||
kwargs=test_kwargs,
|
kwargs=test_kwargs,
|
||||||
|
@ -36,7 +36,7 @@ class CuDNNTest(test.TestCase, parameterized.TestCase):
|
|||||||
@test_util.run_in_graph_and_eager_modes
|
@test_util.run_in_graph_and_eager_modes
|
||||||
def test_cudnn_rnn_basics(self):
|
def test_cudnn_rnn_basics(self):
|
||||||
if test.is_gpu_available(cuda_only=True):
|
if test.is_gpu_available(cuda_only=True):
|
||||||
with self.test_session(use_gpu=True):
|
with self.session(use_gpu=True):
|
||||||
input_size = 10
|
input_size = 10
|
||||||
timesteps = 6
|
timesteps = 6
|
||||||
units = 2
|
units = 2
|
||||||
@ -64,7 +64,7 @@ class CuDNNTest(test.TestCase, parameterized.TestCase):
|
|||||||
@test_util.run_in_graph_and_eager_modes
|
@test_util.run_in_graph_and_eager_modes
|
||||||
def test_trainability(self):
|
def test_trainability(self):
|
||||||
if test.is_gpu_available(cuda_only=True):
|
if test.is_gpu_available(cuda_only=True):
|
||||||
with self.test_session(use_gpu=True):
|
with self.session(use_gpu=True):
|
||||||
input_size = 10
|
input_size = 10
|
||||||
units = 2
|
units = 2
|
||||||
for layer_class in [keras.layers.CuDNNGRU, keras.layers.CuDNNLSTM]:
|
for layer_class in [keras.layers.CuDNNGRU, keras.layers.CuDNNLSTM]:
|
||||||
@ -88,7 +88,7 @@ class CuDNNTest(test.TestCase, parameterized.TestCase):
|
|||||||
)
|
)
|
||||||
def test_regularizer(self, layer_class):
|
def test_regularizer(self, layer_class):
|
||||||
if test.is_gpu_available(cuda_only=True):
|
if test.is_gpu_available(cuda_only=True):
|
||||||
with self.test_session(use_gpu=True):
|
with self.session(use_gpu=True):
|
||||||
input_size = 10
|
input_size = 10
|
||||||
timesteps = 6
|
timesteps = 6
|
||||||
units = 2
|
units = 2
|
||||||
@ -120,7 +120,7 @@ class CuDNNTest(test.TestCase, parameterized.TestCase):
|
|||||||
)
|
)
|
||||||
def test_return_state(self, layer_class):
|
def test_return_state(self, layer_class):
|
||||||
if test.is_gpu_available(cuda_only=True):
|
if test.is_gpu_available(cuda_only=True):
|
||||||
with self.test_session(use_gpu=True):
|
with self.session(use_gpu=True):
|
||||||
input_size = 10
|
input_size = 10
|
||||||
timesteps = 6
|
timesteps = 6
|
||||||
units = 2
|
units = 2
|
||||||
@ -171,7 +171,7 @@ class CuDNNTest(test.TestCase, parameterized.TestCase):
|
|||||||
)
|
)
|
||||||
def test_specify_initial_state_keras_tensor(self, layer_class):
|
def test_specify_initial_state_keras_tensor(self, layer_class):
|
||||||
if test.is_gpu_available(cuda_only=True):
|
if test.is_gpu_available(cuda_only=True):
|
||||||
with self.test_session(use_gpu=True):
|
with self.session(use_gpu=True):
|
||||||
input_size = 10
|
input_size = 10
|
||||||
timesteps = 6
|
timesteps = 6
|
||||||
units = 2
|
units = 2
|
||||||
@ -203,7 +203,7 @@ class CuDNNTest(test.TestCase, parameterized.TestCase):
|
|||||||
)
|
)
|
||||||
def test_statefulness(self, layer_class):
|
def test_statefulness(self, layer_class):
|
||||||
if test.is_gpu_available(cuda_only=True):
|
if test.is_gpu_available(cuda_only=True):
|
||||||
with self.test_session(use_gpu=True):
|
with self.session(use_gpu=True):
|
||||||
input_size = 10
|
input_size = 10
|
||||||
timesteps = 6
|
timesteps = 6
|
||||||
units = 2
|
units = 2
|
||||||
@ -255,7 +255,7 @@ class CuDNNTest(test.TestCase, parameterized.TestCase):
|
|||||||
bidirectional, implementation,
|
bidirectional, implementation,
|
||||||
model_nest_level, model_type):
|
model_nest_level, model_type):
|
||||||
if test.is_gpu_available(cuda_only=True):
|
if test.is_gpu_available(cuda_only=True):
|
||||||
with self.test_session(use_gpu=True):
|
with self.session(use_gpu=True):
|
||||||
input_size = 10
|
input_size = 10
|
||||||
timesteps = 6
|
timesteps = 6
|
||||||
input_shape = (timesteps, input_size)
|
input_shape = (timesteps, input_size)
|
||||||
@ -335,7 +335,7 @@ class CuDNNTest(test.TestCase, parameterized.TestCase):
|
|||||||
# Similar test as test_load_weights_between_noncudnn_rnn() but has different
|
# Similar test as test_load_weights_between_noncudnn_rnn() but has different
|
||||||
# rank of input due to usage of TimeDistributed. Issue: #10356.
|
# rank of input due to usage of TimeDistributed. Issue: #10356.
|
||||||
if test.is_gpu_available(cuda_only=True):
|
if test.is_gpu_available(cuda_only=True):
|
||||||
with self.test_session(use_gpu=True):
|
with self.session(use_gpu=True):
|
||||||
input_size = 10
|
input_size = 10
|
||||||
steps = 6
|
steps = 6
|
||||||
timesteps = 6
|
timesteps = 6
|
||||||
@ -377,7 +377,7 @@ class CuDNNTest(test.TestCase, parameterized.TestCase):
|
|||||||
@test_util.run_in_graph_and_eager_modes
|
@test_util.run_in_graph_and_eager_modes
|
||||||
def test_cudnnrnn_bidirectional(self):
|
def test_cudnnrnn_bidirectional(self):
|
||||||
if test.is_gpu_available(cuda_only=True):
|
if test.is_gpu_available(cuda_only=True):
|
||||||
with self.test_session(use_gpu=True):
|
with self.session(use_gpu=True):
|
||||||
rnn = keras.layers.CuDNNGRU
|
rnn = keras.layers.CuDNNGRU
|
||||||
samples = 2
|
samples = 2
|
||||||
dim = 2
|
dim = 2
|
||||||
@ -441,7 +441,7 @@ class CuDNNTest(test.TestCase, parameterized.TestCase):
|
|||||||
Should fail fast with an exception.
|
Should fail fast with an exception.
|
||||||
"""
|
"""
|
||||||
if test.is_gpu_available(cuda_only=True):
|
if test.is_gpu_available(cuda_only=True):
|
||||||
with self.test_session(use_gpu=True):
|
with self.session(use_gpu=True):
|
||||||
input_shape = (3, 5)
|
input_shape = (3, 5)
|
||||||
|
|
||||||
def gru(cudnn=False, **kwargs):
|
def gru(cudnn=False, **kwargs):
|
||||||
|
@ -115,7 +115,7 @@ class NormalizationLayersTest(test.TestCase):
|
|||||||
|
|
||||||
def test_batchnorm_convnet(self):
|
def test_batchnorm_convnet(self):
|
||||||
if test.is_gpu_available(cuda_only=True):
|
if test.is_gpu_available(cuda_only=True):
|
||||||
with self.test_session(use_gpu=True):
|
with self.session(use_gpu=True):
|
||||||
model = keras.models.Sequential()
|
model = keras.models.Sequential()
|
||||||
norm = keras.layers.BatchNormalization(
|
norm = keras.layers.BatchNormalization(
|
||||||
axis=1, input_shape=(3, 4, 4), momentum=0.8)
|
axis=1, input_shape=(3, 4, 4), momentum=0.8)
|
||||||
|
@ -879,7 +879,7 @@ class SoftplusTest(test.TestCase):
|
|||||||
def _testSoftplus(self, np_features, use_gpu=False):
|
def _testSoftplus(self, np_features, use_gpu=False):
|
||||||
np_features = np.asarray(np_features)
|
np_features = np.asarray(np_features)
|
||||||
np_softplus = self._npSoftplus(np_features)
|
np_softplus = self._npSoftplus(np_features)
|
||||||
with self.test_session(use_gpu=use_gpu) as sess:
|
with self.session(use_gpu=use_gpu) as sess:
|
||||||
softplus = nn_ops.softplus(np_features)
|
softplus = nn_ops.softplus(np_features)
|
||||||
softplus_inverse = du.softplus_inverse(softplus)
|
softplus_inverse = du.softplus_inverse(softplus)
|
||||||
[tf_softplus, tf_softplus_inverse] = sess.run([
|
[tf_softplus, tf_softplus_inverse] = sess.run([
|
||||||
|
@ -43,7 +43,7 @@ class RandomGammaTest(test.TestCase):
|
|||||||
def _Sampler(self, num, alpha, beta, dtype, use_gpu, seed=None):
|
def _Sampler(self, num, alpha, beta, dtype, use_gpu, seed=None):
|
||||||
|
|
||||||
def func():
|
def func():
|
||||||
with self.test_session(use_gpu=use_gpu, graph=ops.Graph()) as sess:
|
with self.session(use_gpu=use_gpu, graph=ops.Graph()) as sess:
|
||||||
rng = random_ops.random_gamma(
|
rng = random_ops.random_gamma(
|
||||||
[num], alpha, beta=beta, dtype=dtype, seed=seed)
|
[num], alpha, beta=beta, dtype=dtype, seed=seed)
|
||||||
ret = np.empty([10, num])
|
ret = np.empty([10, num])
|
||||||
@ -216,7 +216,7 @@ class RandomGammaTest(test.TestCase):
|
|||||||
"""
|
"""
|
||||||
for dtype in dtypes.float16, dtypes.float32, dtypes.float64:
|
for dtype in dtypes.float16, dtypes.float32, dtypes.float64:
|
||||||
for use_gpu in [False, True]:
|
for use_gpu in [False, True]:
|
||||||
with self.test_session(use_gpu=use_gpu):
|
with self.cached_session(use_gpu=use_gpu):
|
||||||
rnd1 = random_ops.random_gamma([24], 2.0, dtype=dtype)
|
rnd1 = random_ops.random_gamma([24], 2.0, dtype=dtype)
|
||||||
rnd2 = random_ops.random_gamma([24], 2.0, dtype=dtype)
|
rnd2 = random_ops.random_gamma([24], 2.0, dtype=dtype)
|
||||||
diff = rnd2 - rnd1
|
diff = rnd2 - rnd1
|
||||||
|
@ -44,7 +44,7 @@ class RandomOpTestCommon(test.TestCase):
|
|||||||
use_gpu,
|
use_gpu,
|
||||||
op_seed=None,
|
op_seed=None,
|
||||||
graph_seed=None):
|
graph_seed=None):
|
||||||
with self.test_session(use_gpu=use_gpu, graph=ops.Graph()) as sess:
|
with self.session(use_gpu=use_gpu, graph=ops.Graph()) as sess:
|
||||||
if graph_seed is not None:
|
if graph_seed is not None:
|
||||||
random_seed.set_random_seed(graph_seed)
|
random_seed.set_random_seed(graph_seed)
|
||||||
x = rng_func([num], min_or_mean, max_or_stddev, dtype=dtype, seed=op_seed)
|
x = rng_func([num], min_or_mean, max_or_stddev, dtype=dtype, seed=op_seed)
|
||||||
@ -64,7 +64,7 @@ class RandomNormalTest(RandomOpTestCommon):
|
|||||||
def _Sampler(self, num, mu, sigma, dtype, use_gpu, seed=None):
|
def _Sampler(self, num, mu, sigma, dtype, use_gpu, seed=None):
|
||||||
|
|
||||||
def func():
|
def func():
|
||||||
with self.test_session(use_gpu=use_gpu, graph=ops.Graph()) as sess:
|
with self.session(use_gpu=use_gpu, graph=ops.Graph()) as sess:
|
||||||
rng = random_ops.random_normal(
|
rng = random_ops.random_normal(
|
||||||
[num], mean=mu, stddev=sigma, dtype=dtype, seed=seed)
|
[num], mean=mu, stddev=sigma, dtype=dtype, seed=seed)
|
||||||
ret = np.empty([10, num])
|
ret = np.empty([10, num])
|
||||||
@ -112,7 +112,7 @@ class RandomNormalTest(RandomOpTestCommon):
|
|||||||
|
|
||||||
def testNoCSE(self):
|
def testNoCSE(self):
|
||||||
for use_gpu in [False, True]:
|
for use_gpu in [False, True]:
|
||||||
with self.test_session(use_gpu=use_gpu):
|
with self.session(use_gpu=use_gpu):
|
||||||
shape = [2, 3, 4]
|
shape = [2, 3, 4]
|
||||||
rnd1 = random_ops.random_normal(shape, 0.0, 1.0, dtypes.float32)
|
rnd1 = random_ops.random_normal(shape, 0.0, 1.0, dtypes.float32)
|
||||||
rnd2 = random_ops.random_normal(shape, 0.0, 1.0, dtypes.float32)
|
rnd2 = random_ops.random_normal(shape, 0.0, 1.0, dtypes.float32)
|
||||||
@ -155,7 +155,7 @@ class TruncatedNormalTest(test.TestCase):
|
|||||||
def _Sampler(self, num, mu, sigma, dtype, use_gpu, seed=None):
|
def _Sampler(self, num, mu, sigma, dtype, use_gpu, seed=None):
|
||||||
|
|
||||||
def func():
|
def func():
|
||||||
with self.test_session(use_gpu=use_gpu, graph=ops.Graph()) as sess:
|
with self.session(use_gpu=use_gpu, graph=ops.Graph()) as sess:
|
||||||
rng = random_ops.truncated_normal(
|
rng = random_ops.truncated_normal(
|
||||||
[num], mean=mu, stddev=sigma, dtype=dtype, seed=seed)
|
[num], mean=mu, stddev=sigma, dtype=dtype, seed=seed)
|
||||||
ret = np.empty([10, num])
|
ret = np.empty([10, num])
|
||||||
@ -220,14 +220,14 @@ class TruncatedNormalTest(test.TestCase):
|
|||||||
self.assertTrue(abs(np.std(x) / stddev - 0.85) < 0.04)
|
self.assertTrue(abs(np.std(x) / stddev - 0.85) < 0.04)
|
||||||
|
|
||||||
def testLargeShape(self):
|
def testLargeShape(self):
|
||||||
with self.test_session(use_gpu=True):
|
with self.session(use_gpu=True):
|
||||||
v = variables.Variable(
|
v = variables.Variable(
|
||||||
array_ops.zeros(dtype=dtypes.float32, shape=[2**33, 1]))
|
array_ops.zeros(dtype=dtypes.float32, shape=[2**33, 1]))
|
||||||
n = random_ops.truncated_normal(v.shape)
|
n = random_ops.truncated_normal(v.shape)
|
||||||
self.assertEqual([8589934592, 1], n.shape.as_list())
|
self.assertEqual([8589934592, 1], n.shape.as_list())
|
||||||
|
|
||||||
def testNoCSE(self):
|
def testNoCSE(self):
|
||||||
with self.test_session(use_gpu=True):
|
with self.session(use_gpu=True):
|
||||||
shape = [2, 3, 4]
|
shape = [2, 3, 4]
|
||||||
rnd1 = random_ops.truncated_normal(shape, 0.0, 1.0, dtypes.float32)
|
rnd1 = random_ops.truncated_normal(shape, 0.0, 1.0, dtypes.float32)
|
||||||
rnd2 = random_ops.truncated_normal(shape, 0.0, 1.0, dtypes.float32)
|
rnd2 = random_ops.truncated_normal(shape, 0.0, 1.0, dtypes.float32)
|
||||||
@ -251,7 +251,7 @@ class RandomUniformTest(RandomOpTestCommon):
|
|||||||
def _Sampler(self, num, minv, maxv, dtype, use_gpu, seed=None):
|
def _Sampler(self, num, minv, maxv, dtype, use_gpu, seed=None):
|
||||||
|
|
||||||
def func():
|
def func():
|
||||||
with self.test_session(use_gpu=use_gpu, graph=ops.Graph()) as sess:
|
with self.session(use_gpu=use_gpu, graph=ops.Graph()) as sess:
|
||||||
rng = random_ops.random_uniform(
|
rng = random_ops.random_uniform(
|
||||||
[num], minval=minv, maxval=maxv, dtype=dtype, seed=seed)
|
[num], minval=minv, maxval=maxv, dtype=dtype, seed=seed)
|
||||||
ret = np.empty([10, num])
|
ret = np.empty([10, num])
|
||||||
@ -353,7 +353,7 @@ class RandomUniformTest(RandomOpTestCommon):
|
|||||||
def testNoCSE(self):
|
def testNoCSE(self):
|
||||||
shape = [2, 3, 4]
|
shape = [2, 3, 4]
|
||||||
for dtype in dtypes.float16, dtypes.float32, dtypes.int32:
|
for dtype in dtypes.float16, dtypes.float32, dtypes.int32:
|
||||||
with self.test_session(use_gpu=True):
|
with self.session(use_gpu=True):
|
||||||
rnd1 = random_ops.random_uniform(shape, 0, 17, dtype=dtype)
|
rnd1 = random_ops.random_uniform(shape, 0, 17, dtype=dtype)
|
||||||
rnd2 = random_ops.random_uniform(shape, 0, 17, dtype=dtype)
|
rnd2 = random_ops.random_uniform(shape, 0, 17, dtype=dtype)
|
||||||
diff = (rnd2 - rnd1).eval()
|
diff = (rnd2 - rnd1).eval()
|
||||||
|
@ -39,7 +39,7 @@ class RandomPoissonTest(test.TestCase):
|
|||||||
def _Sampler(self, num, lam, dtype, use_gpu, seed=None):
|
def _Sampler(self, num, lam, dtype, use_gpu, seed=None):
|
||||||
|
|
||||||
def func():
|
def func():
|
||||||
with self.test_session(use_gpu=use_gpu, graph=ops.Graph()) as sess:
|
with self.session(use_gpu=use_gpu, graph=ops.Graph()) as sess:
|
||||||
rng = random_ops.random_poisson(lam, [num], dtype=dtype, seed=seed)
|
rng = random_ops.random_poisson(lam, [num], dtype=dtype, seed=seed)
|
||||||
ret = np.empty([10, num])
|
ret = np.empty([10, num])
|
||||||
for i in xrange(10):
|
for i in xrange(10):
|
||||||
@ -128,7 +128,7 @@ class RandomPoissonTest(test.TestCase):
|
|||||||
merged.
|
merged.
|
||||||
"""
|
"""
|
||||||
for dtype in dtypes.float16, dtypes.float32, dtypes.float64:
|
for dtype in dtypes.float16, dtypes.float32, dtypes.float64:
|
||||||
with self.test_session(use_gpu=True):
|
with self.cached_session(use_gpu=True):
|
||||||
rnd1 = random_ops.random_poisson(2.0, [24], dtype=dtype)
|
rnd1 = random_ops.random_poisson(2.0, [24], dtype=dtype)
|
||||||
rnd2 = random_ops.random_poisson(2.0, [24], dtype=dtype)
|
rnd2 = random_ops.random_poisson(2.0, [24], dtype=dtype)
|
||||||
diff = rnd2 - rnd1
|
diff = rnd2 - rnd1
|
||||||
|
@ -402,7 +402,7 @@ class BNTest(test.TestCase):
|
|||||||
training = array_ops.placeholder(dtype='bool')
|
training = array_ops.placeholder(dtype='bool')
|
||||||
outputs = bn.apply(inputs, training=training)
|
outputs = bn.apply(inputs, training=training)
|
||||||
|
|
||||||
with self.test_session(use_gpu=True) as sess:
|
with self.session(use_gpu=True) as sess:
|
||||||
# Test training with placeholder learning phase.
|
# Test training with placeholder learning phase.
|
||||||
sess.run(variables.global_variables_initializer())
|
sess.run(variables.global_variables_initializer())
|
||||||
np_gamma, np_beta = sess.run([bn.gamma, bn.beta])
|
np_gamma, np_beta = sess.run([bn.gamma, bn.beta])
|
||||||
@ -884,7 +884,7 @@ class BNTest(test.TestCase):
|
|||||||
moving_variance = 1.
|
moving_variance = 1.
|
||||||
renorm_mean = renorm_stddev = 0.
|
renorm_mean = renorm_stddev = 0.
|
||||||
renorm_weight = 0.
|
renorm_weight = 0.
|
||||||
with self.test_session(use_gpu=True) as sess:
|
with self.session(use_gpu=True) as sess:
|
||||||
sess.run(variables.global_variables_initializer())
|
sess.run(variables.global_variables_initializer())
|
||||||
for _ in range(5):
|
for _ in range(5):
|
||||||
x = np.random.random(shape)
|
x = np.random.random(shape)
|
||||||
@ -936,7 +936,7 @@ class BNTest(test.TestCase):
|
|||||||
|
|
||||||
moving_mean = 0.
|
moving_mean = 0.
|
||||||
moving_variance = 1.
|
moving_variance = 1.
|
||||||
with self.test_session(use_gpu=True) as sess:
|
with self.session(use_gpu=True) as sess:
|
||||||
sess.run(variables.global_variables_initializer())
|
sess.run(variables.global_variables_initializer())
|
||||||
for _ in range(5):
|
for _ in range(5):
|
||||||
x = np.random.random(shape)
|
x = np.random.random(shape)
|
||||||
@ -989,7 +989,7 @@ class BNTest(test.TestCase):
|
|||||||
moving_variance = 1.
|
moving_variance = 1.
|
||||||
renorm_mean = renorm_stddev = 0.
|
renorm_mean = renorm_stddev = 0.
|
||||||
renorm_weight = 0.
|
renorm_weight = 0.
|
||||||
with self.test_session(use_gpu=True) as sess:
|
with self.session(use_gpu=True) as sess:
|
||||||
sess.run(variables.global_variables_initializer())
|
sess.run(variables.global_variables_initializer())
|
||||||
for _ in range(5):
|
for _ in range(5):
|
||||||
x = np.random.random(shape)
|
x = np.random.random(shape)
|
||||||
@ -1039,7 +1039,7 @@ class BNTest(test.TestCase):
|
|||||||
self.assertListEqual(
|
self.assertListEqual(
|
||||||
out1.shape.as_list(), out2.shape.as_list())
|
out1.shape.as_list(), out2.shape.as_list())
|
||||||
|
|
||||||
with self.test_session(use_gpu=True) as sess:
|
with self.session(use_gpu=True) as sess:
|
||||||
sess.run(variables.global_variables_initializer())
|
sess.run(variables.global_variables_initializer())
|
||||||
|
|
||||||
x = np.random.random(shape)
|
x = np.random.random(shape)
|
||||||
@ -1061,7 +1061,7 @@ class BNTest(test.TestCase):
|
|||||||
out = normalization_layers.batch_normalization(
|
out = normalization_layers.batch_normalization(
|
||||||
inp, virtual_batch_size=2)
|
inp, virtual_batch_size=2)
|
||||||
|
|
||||||
with self.test_session(use_gpu=True) as sess:
|
with self.session(use_gpu=True) as sess:
|
||||||
sess.run(variables.global_variables_initializer())
|
sess.run(variables.global_variables_initializer())
|
||||||
|
|
||||||
x = np.random.random(np_shape)
|
x = np.random.random(np_shape)
|
||||||
@ -1092,7 +1092,7 @@ class BNTest(test.TestCase):
|
|||||||
shape[0] // virtual_batch_size,
|
shape[0] // virtual_batch_size,
|
||||||
shape[1]])
|
shape[1]])
|
||||||
|
|
||||||
with self.test_session(use_gpu=True) as sess:
|
with self.session(use_gpu=True) as sess:
|
||||||
sess.run(variables.global_variables_initializer())
|
sess.run(variables.global_variables_initializer())
|
||||||
for _ in range(5):
|
for _ in range(5):
|
||||||
x = np.random.random(shape)
|
x = np.random.random(shape)
|
||||||
@ -1145,7 +1145,7 @@ class BNTest(test.TestCase):
|
|||||||
ghost_shape = ([virtual_batch_size, shape[0] // virtual_batch_size] +
|
ghost_shape = ([virtual_batch_size, shape[0] // virtual_batch_size] +
|
||||||
shape[1:])
|
shape[1:])
|
||||||
|
|
||||||
with self.test_session(use_gpu=True) as sess:
|
with self.session(use_gpu=True) as sess:
|
||||||
sess.run(variables.global_variables_initializer())
|
sess.run(variables.global_variables_initializer())
|
||||||
for _ in range(5):
|
for _ in range(5):
|
||||||
x = np.random.random(shape)
|
x = np.random.random(shape)
|
||||||
@ -1199,7 +1199,7 @@ class BNTest(test.TestCase):
|
|||||||
ghost_shape = ([virtual_batch_size, shape[0] // virtual_batch_size] +
|
ghost_shape = ([virtual_batch_size, shape[0] // virtual_batch_size] +
|
||||||
shape[1:])
|
shape[1:])
|
||||||
|
|
||||||
with self.test_session(use_gpu=True) as sess:
|
with self.session(use_gpu=True) as sess:
|
||||||
sess.run(variables.global_variables_initializer())
|
sess.run(variables.global_variables_initializer())
|
||||||
for _ in range(5):
|
for _ in range(5):
|
||||||
x = np.random.random(shape)
|
x = np.random.random(shape)
|
||||||
@ -1349,7 +1349,7 @@ class BNTest(test.TestCase):
|
|||||||
ghost_shape = ([virtual_batch_size, shape[0] // virtual_batch_size] +
|
ghost_shape = ([virtual_batch_size, shape[0] // virtual_batch_size] +
|
||||||
shape[1:])
|
shape[1:])
|
||||||
|
|
||||||
with self.test_session(use_gpu=True) as sess:
|
with self.session(use_gpu=True) as sess:
|
||||||
sess.run(variables.global_variables_initializer())
|
sess.run(variables.global_variables_initializer())
|
||||||
for _ in range(5):
|
for _ in range(5):
|
||||||
x = np.random.random(shape)
|
x = np.random.random(shape)
|
||||||
|
@ -173,7 +173,7 @@ class OptimizeForInferenceTest(test.TestCase):
|
|||||||
|
|
||||||
def testFoldFusedBatchNorms(self):
|
def testFoldFusedBatchNorms(self):
|
||||||
for data_format, use_gpu in [("NHWC", False), ("NCHW", True)]:
|
for data_format, use_gpu in [("NHWC", False), ("NCHW", True)]:
|
||||||
with self.test_session(use_gpu=use_gpu) as sess:
|
with self.cached_session(use_gpu=use_gpu) as sess:
|
||||||
inputs = [1, 4, 2, 5, 3, 6, -1, -4, -2, -5, -3, -6]
|
inputs = [1, 4, 2, 5, 3, 6, -1, -4, -2, -5, -3, -6]
|
||||||
input_op = constant_op.constant(
|
input_op = constant_op.constant(
|
||||||
np.array(inputs),
|
np.array(inputs),
|
||||||
@ -212,7 +212,6 @@ class OptimizeForInferenceTest(test.TestCase):
|
|||||||
optimized_graph_def = optimize_for_inference_lib.fold_batch_norms(
|
optimized_graph_def = optimize_for_inference_lib.fold_batch_norms(
|
||||||
original_graph_def)
|
original_graph_def)
|
||||||
|
|
||||||
with self.test_session(use_gpu=use_gpu) as sess:
|
|
||||||
_ = importer.import_graph_def(
|
_ = importer.import_graph_def(
|
||||||
optimized_graph_def, input_map={}, name="optimized")
|
optimized_graph_def, input_map={}, name="optimized")
|
||||||
optimized_result = sess.run(["optimized/output:0"])
|
optimized_result = sess.run(["optimized/output:0"])
|
||||||
|
@ -109,7 +109,7 @@ class AdamOptimizerTest(test.TestCase):
|
|||||||
|
|
||||||
def testSparseDevicePlacement(self):
|
def testSparseDevicePlacement(self):
|
||||||
for index_dtype in [dtypes.int32, dtypes.int64]:
|
for index_dtype in [dtypes.int32, dtypes.int64]:
|
||||||
with self.test_session(force_gpu=test.is_gpu_available()):
|
with self.cached_session(force_gpu=test.is_gpu_available()):
|
||||||
# If a GPU is available, tests that all optimizer ops can be placed on
|
# If a GPU is available, tests that all optimizer ops can be placed on
|
||||||
# it (i.e. they have GPU kernels).
|
# it (i.e. they have GPU kernels).
|
||||||
var = variables.Variable([[1.0], [2.0]])
|
var = variables.Variable([[1.0], [2.0]])
|
||||||
|
@ -534,7 +534,7 @@ class CheckpointingTests(test.TestCase):
|
|||||||
num_training_steps = 10
|
num_training_steps = 10
|
||||||
checkpoint_directory = self.get_temp_dir()
|
checkpoint_directory = self.get_temp_dir()
|
||||||
for training_continuation in range(3):
|
for training_continuation in range(3):
|
||||||
with ops.Graph().as_default(), self.test_session(
|
with ops.Graph().as_default(), self.session(
|
||||||
graph=ops.get_default_graph()), test_util.device(use_gpu=True):
|
graph=ops.get_default_graph()), test_util.device(use_gpu=True):
|
||||||
model = MyModel()
|
model = MyModel()
|
||||||
optimizer = adam.AdamOptimizer(0.001)
|
optimizer = adam.AdamOptimizer(0.001)
|
||||||
@ -621,7 +621,7 @@ class CheckpointingTests(test.TestCase):
|
|||||||
checkpoint_directory = self.get_temp_dir()
|
checkpoint_directory = self.get_temp_dir()
|
||||||
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
|
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
|
||||||
for training_continuation in range(3):
|
for training_continuation in range(3):
|
||||||
with ops.Graph().as_default(), self.test_session(
|
with ops.Graph().as_default(), self.session(
|
||||||
graph=ops.get_default_graph()), test_util.device(use_gpu=True):
|
graph=ops.get_default_graph()), test_util.device(use_gpu=True):
|
||||||
model = MyModel()
|
model = MyModel()
|
||||||
# Don't actually train so we can test variable values
|
# Don't actually train so we can test variable values
|
||||||
@ -1018,7 +1018,7 @@ class CheckpointingTests(test.TestCase):
|
|||||||
checkpoint_directory = self.get_temp_dir()
|
checkpoint_directory = self.get_temp_dir()
|
||||||
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
|
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
|
||||||
save_graph = ops.Graph()
|
save_graph = ops.Graph()
|
||||||
with save_graph.as_default(), self.test_session(save_graph):
|
with save_graph.as_default(), self.session(save_graph):
|
||||||
first = tracking.Checkpointable()
|
first = tracking.Checkpointable()
|
||||||
first.var1 = variable_scope.get_variable(
|
first.var1 = variable_scope.get_variable(
|
||||||
name="outside_var", initializer=0.)
|
name="outside_var", initializer=0.)
|
||||||
@ -1029,7 +1029,7 @@ class CheckpointingTests(test.TestCase):
|
|||||||
save_path = checkpointable_utils.CheckpointableSaver(first).save(
|
save_path = checkpointable_utils.CheckpointableSaver(first).save(
|
||||||
checkpoint_prefix)
|
checkpoint_prefix)
|
||||||
restore_graph = ops.Graph()
|
restore_graph = ops.Graph()
|
||||||
with restore_graph.as_default(), self.test_session(restore_graph):
|
with restore_graph.as_default(), self.session(restore_graph):
|
||||||
second = tracking.Checkpointable()
|
second = tracking.Checkpointable()
|
||||||
second.var2 = variable_scope.get_variable(
|
second.var2 = variable_scope.get_variable(
|
||||||
name="blah", initializer=0.)
|
name="blah", initializer=0.)
|
||||||
@ -1248,7 +1248,7 @@ class CheckpointingTests(test.TestCase):
|
|||||||
checkpoint_directory = self.get_temp_dir()
|
checkpoint_directory = self.get_temp_dir()
|
||||||
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
|
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
|
||||||
optimizer_only_prefix = os.path.join(checkpoint_directory, "opt")
|
optimizer_only_prefix = os.path.join(checkpoint_directory, "opt")
|
||||||
with ops.Graph().as_default(), self.test_session(
|
with ops.Graph().as_default(), self.session(
|
||||||
graph=ops.get_default_graph()), test_util.device(use_gpu=True):
|
graph=ops.get_default_graph()), test_util.device(use_gpu=True):
|
||||||
model = MyModel()
|
model = MyModel()
|
||||||
optimizer = adam.AdamOptimizer(0.001)
|
optimizer = adam.AdamOptimizer(0.001)
|
||||||
@ -1276,7 +1276,7 @@ class CheckpointingTests(test.TestCase):
|
|||||||
optimizer_save_path = optimizer_checkpoint.save(optimizer_only_prefix)
|
optimizer_save_path = optimizer_checkpoint.save(optimizer_only_prefix)
|
||||||
|
|
||||||
# Restore into a graph with the optimizer
|
# Restore into a graph with the optimizer
|
||||||
with ops.Graph().as_default(), self.test_session(
|
with ops.Graph().as_default(), self.session(
|
||||||
graph=ops.get_default_graph()), test_util.device(use_gpu=True):
|
graph=ops.get_default_graph()), test_util.device(use_gpu=True):
|
||||||
model = MyModel()
|
model = MyModel()
|
||||||
optimizer = adam.AdamOptimizer(0.001)
|
optimizer = adam.AdamOptimizer(0.001)
|
||||||
@ -1299,7 +1299,7 @@ class CheckpointingTests(test.TestCase):
|
|||||||
status.assert_consumed()
|
status.assert_consumed()
|
||||||
|
|
||||||
# Make sure initialization doesn't clobber later restores
|
# Make sure initialization doesn't clobber later restores
|
||||||
with ops.Graph().as_default(), self.test_session(
|
with ops.Graph().as_default(), self.session(
|
||||||
graph=ops.get_default_graph()), test_util.device(use_gpu=True):
|
graph=ops.get_default_graph()), test_util.device(use_gpu=True):
|
||||||
model = MyModel()
|
model = MyModel()
|
||||||
optimizer = adam.AdamOptimizer(0.001, beta1=1.0)
|
optimizer = adam.AdamOptimizer(0.001, beta1=1.0)
|
||||||
@ -1483,7 +1483,7 @@ class CheckpointCompatibilityTests(test.TestCase):
|
|||||||
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
|
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
|
||||||
with context.graph_mode():
|
with context.graph_mode():
|
||||||
save_graph = ops.Graph()
|
save_graph = ops.Graph()
|
||||||
with save_graph.as_default(), self.test_session(
|
with save_graph.as_default(), self.session(
|
||||||
graph=save_graph) as session:
|
graph=save_graph) as session:
|
||||||
root = self._initialized_model()
|
root = self._initialized_model()
|
||||||
name_saver = saver_lib.Saver()
|
name_saver = saver_lib.Saver()
|
||||||
@ -1539,7 +1539,7 @@ class CheckpointCompatibilityTests(test.TestCase):
|
|||||||
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
|
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
|
||||||
with context.graph_mode():
|
with context.graph_mode():
|
||||||
save_graph = ops.Graph()
|
save_graph = ops.Graph()
|
||||||
with save_graph.as_default(), self.test_session(
|
with save_graph.as_default(), self.session(
|
||||||
graph=save_graph) as session:
|
graph=save_graph) as session:
|
||||||
root = self._initialized_model()
|
root = self._initialized_model()
|
||||||
save_path = root.save(session=session, file_prefix=checkpoint_prefix)
|
save_path = root.save(session=session, file_prefix=checkpoint_prefix)
|
||||||
@ -1557,7 +1557,7 @@ class CheckpointCompatibilityTests(test.TestCase):
|
|||||||
save_path = root.save(file_prefix=checkpoint_prefix)
|
save_path = root.save(file_prefix=checkpoint_prefix)
|
||||||
with context.graph_mode():
|
with context.graph_mode():
|
||||||
save_graph = ops.Graph()
|
save_graph = ops.Graph()
|
||||||
with save_graph.as_default(), self.test_session(
|
with save_graph.as_default(), self.session(
|
||||||
graph=save_graph):
|
graph=save_graph):
|
||||||
root = self._initialized_model()
|
root = self._initialized_model()
|
||||||
self._set_sentinels(root)
|
self._set_sentinels(root)
|
||||||
|
@ -92,7 +92,7 @@ class RMSPropOptimizerTest(test.TestCase):
|
|||||||
# TODO(yori): Use ParameterizedTest when available
|
# TODO(yori): Use ParameterizedTest when available
|
||||||
for (dtype, learning_rate, decay, momentum,
|
for (dtype, learning_rate, decay, momentum,
|
||||||
epsilon, centered, use_resource) in _TESTPARAMS:
|
epsilon, centered, use_resource) in _TESTPARAMS:
|
||||||
with self.test_session(use_gpu=True):
|
with self.cached_session(use_gpu=True):
|
||||||
# Initialize variables for numpy implementation.
|
# Initialize variables for numpy implementation.
|
||||||
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
|
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
|
||||||
grads0_np = np.array([0.1, 0.2], dtype=dtype.as_numpy_dtype)
|
grads0_np = np.array([0.1, 0.2], dtype=dtype.as_numpy_dtype)
|
||||||
@ -211,7 +211,7 @@ class RMSPropOptimizerTest(test.TestCase):
|
|||||||
# TODO(yori): Use ParameterizedTest when available
|
# TODO(yori): Use ParameterizedTest when available
|
||||||
for (dtype, learning_rate, decay,
|
for (dtype, learning_rate, decay,
|
||||||
momentum, epsilon, centered, _) in _TESTPARAMS:
|
momentum, epsilon, centered, _) in _TESTPARAMS:
|
||||||
with self.test_session(use_gpu=True):
|
with self.cached_session(use_gpu=True):
|
||||||
# Initialize variables for numpy implementation.
|
# Initialize variables for numpy implementation.
|
||||||
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
|
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
|
||||||
grads0_np = np.array([0.1], dtype=dtype.as_numpy_dtype)
|
grads0_np = np.array([0.1], dtype=dtype.as_numpy_dtype)
|
||||||
@ -285,7 +285,7 @@ class RMSPropOptimizerTest(test.TestCase):
|
|||||||
|
|
||||||
def testWithoutMomentum(self):
|
def testWithoutMomentum(self):
|
||||||
for dtype in [dtypes.half, dtypes.float32]:
|
for dtype in [dtypes.half, dtypes.float32]:
|
||||||
with self.test_session(use_gpu=True):
|
with self.cached_session(use_gpu=True):
|
||||||
var0 = variables.Variable([1.0, 2.0], dtype=dtype)
|
var0 = variables.Variable([1.0, 2.0], dtype=dtype)
|
||||||
var1 = variables.Variable([3.0, 4.0], dtype=dtype)
|
var1 = variables.Variable([3.0, 4.0], dtype=dtype)
|
||||||
grads0 = constant_op.constant([0.1, 0.1], dtype=dtype)
|
grads0 = constant_op.constant([0.1, 0.1], dtype=dtype)
|
||||||
@ -351,7 +351,7 @@ class RMSPropOptimizerTest(test.TestCase):
|
|||||||
|
|
||||||
def testWithMomentum(self):
|
def testWithMomentum(self):
|
||||||
for dtype in [dtypes.half, dtypes.float32]:
|
for dtype in [dtypes.half, dtypes.float32]:
|
||||||
with self.test_session(use_gpu=True):
|
with self.cached_session(use_gpu=True):
|
||||||
var0 = variables.Variable([1.0, 2.0], dtype=dtype)
|
var0 = variables.Variable([1.0, 2.0], dtype=dtype)
|
||||||
var1 = variables.Variable([3.0, 4.0], dtype=dtype)
|
var1 = variables.Variable([3.0, 4.0], dtype=dtype)
|
||||||
grads0 = constant_op.constant([0.1, 0.1], dtype=dtype)
|
grads0 = constant_op.constant([0.1, 0.1], dtype=dtype)
|
||||||
|
@ -831,7 +831,7 @@ class SaverTest(test.TestCase):
|
|||||||
orig_vals = sess.run(orig_vars)
|
orig_vals = sess.run(orig_vars)
|
||||||
|
|
||||||
restore_graph = ops_lib.Graph()
|
restore_graph = ops_lib.Graph()
|
||||||
with restore_graph.as_default(), self.test_session(
|
with restore_graph.as_default(), self.session(
|
||||||
graph=restore_graph) as sess:
|
graph=restore_graph) as sess:
|
||||||
restored_vars = _model()
|
restored_vars = _model()
|
||||||
save = saver_module.Saver(max_to_keep=1)
|
save = saver_module.Saver(max_to_keep=1)
|
||||||
@ -3015,7 +3015,7 @@ class CheckpointableCompatibilityTests(test.TestCase):
|
|||||||
checkpoint_directory, "second"))
|
checkpoint_directory, "second"))
|
||||||
|
|
||||||
restore_graph = ops_lib.Graph()
|
restore_graph = ops_lib.Graph()
|
||||||
with restore_graph.as_default(), self.test_session(
|
with restore_graph.as_default(), self.session(
|
||||||
graph=restore_graph) as sess:
|
graph=restore_graph) as sess:
|
||||||
root = self._initialized_model()
|
root = self._initialized_model()
|
||||||
self._set_sentinels(root)
|
self._set_sentinels(root)
|
||||||
|
@ -50,7 +50,7 @@ class TrainingOpsTest(TensorFlowTestCase):
|
|||||||
|
|
||||||
def _testTypes(self, x, alpha, delta, use_gpu=None):
|
def _testTypes(self, x, alpha, delta, use_gpu=None):
|
||||||
self.setUp()
|
self.setUp()
|
||||||
with self.test_session(use_gpu=use_gpu):
|
with self.session(use_gpu=use_gpu):
|
||||||
var = variables.VariableV1(x)
|
var = variables.VariableV1(x)
|
||||||
variables.global_variables_initializer().run()
|
variables.global_variables_initializer().run()
|
||||||
self.assertAllCloseAccordingToType(x, var.eval())
|
self.assertAllCloseAccordingToType(x, var.eval())
|
||||||
@ -69,7 +69,7 @@ class TrainingOpsTest(TensorFlowTestCase):
|
|||||||
|
|
||||||
def _testTypesForAdagrad(self, x, y, lr, grad, use_gpu=None):
|
def _testTypesForAdagrad(self, x, y, lr, grad, use_gpu=None):
|
||||||
self.setUp()
|
self.setUp()
|
||||||
with self.test_session(use_gpu=use_gpu):
|
with self.session(use_gpu=use_gpu):
|
||||||
var = variables.VariableV1(x)
|
var = variables.VariableV1(x)
|
||||||
accum = variables.VariableV1(y)
|
accum = variables.VariableV1(y)
|
||||||
variables.global_variables_initializer().run()
|
variables.global_variables_initializer().run()
|
||||||
@ -93,7 +93,7 @@ class TrainingOpsTest(TensorFlowTestCase):
|
|||||||
l2=0.0,
|
l2=0.0,
|
||||||
lr_power=-0.5):
|
lr_power=-0.5):
|
||||||
self.setUp()
|
self.setUp()
|
||||||
with self.test_session(use_gpu=use_gpu):
|
with self.session(use_gpu=use_gpu):
|
||||||
var = variables.VariableV1(x)
|
var = variables.VariableV1(x)
|
||||||
accum = variables.VariableV1(y)
|
accum = variables.VariableV1(y)
|
||||||
linear = variables.VariableV1(z)
|
linear = variables.VariableV1(z)
|
||||||
@ -147,7 +147,7 @@ class TrainingOpsTest(TensorFlowTestCase):
|
|||||||
|
|
||||||
def _testTypesForSparseAdagrad(self, x, y, lr, grad, indices):
|
def _testTypesForSparseAdagrad(self, x, y, lr, grad, indices):
|
||||||
self.setUp()
|
self.setUp()
|
||||||
with self.test_session(use_gpu=False):
|
with self.session(use_gpu=False):
|
||||||
var = variables.VariableV1(x)
|
var = variables.VariableV1(x)
|
||||||
accum = variables.VariableV1(y)
|
accum = variables.VariableV1(y)
|
||||||
variables.global_variables_initializer().run()
|
variables.global_variables_initializer().run()
|
||||||
@ -177,7 +177,7 @@ class TrainingOpsTest(TensorFlowTestCase):
|
|||||||
l2=0.0,
|
l2=0.0,
|
||||||
lr_power=-0.5):
|
lr_power=-0.5):
|
||||||
self.setUp()
|
self.setUp()
|
||||||
with self.test_session(use_gpu=False):
|
with self.session(use_gpu=False):
|
||||||
var = variables.VariableV1(x)
|
var = variables.VariableV1(x)
|
||||||
accum = variables.VariableV1(y)
|
accum = variables.VariableV1(y)
|
||||||
linear = variables.VariableV1(z)
|
linear = variables.VariableV1(z)
|
||||||
@ -256,7 +256,7 @@ class TrainingOpsTest(TensorFlowTestCase):
|
|||||||
|
|
||||||
def _testTypesForAdam(self, var, m, v, grad, use_gpu):
|
def _testTypesForAdam(self, var, m, v, grad, use_gpu):
|
||||||
self.setUp()
|
self.setUp()
|
||||||
with self.test_session(use_gpu=use_gpu):
|
with self.session(use_gpu=use_gpu):
|
||||||
var_t = variables.VariableV1(var)
|
var_t = variables.VariableV1(var)
|
||||||
m_t = variables.VariableV1(m)
|
m_t = variables.VariableV1(m)
|
||||||
v_t = variables.VariableV1(v)
|
v_t = variables.VariableV1(v)
|
||||||
|
Loading…
Reference in New Issue
Block a user