Remove unused "sess" variables from tests; NFC

PiperOrigin-RevId: 357594586
Change-Id: Ia8c8274d12181aa59f2c46f8d7c8c3599aaffe74
This commit is contained in:
Sanjoy Das 2021-02-15 11:10:09 -08:00 committed by TensorFlower Gardener
parent 1859e99359
commit 0d2c3d19ac
26 changed files with 88 additions and 88 deletions

View File

@ -462,7 +462,7 @@ class FunctionTest(test.TestCase):
@test_util.run_deprecated_v1
def testWhileLoopCallsFunc(self):
with self.session() as sess:
with self.session():
@function.Defun(dtypes.float32)
def Times2(x):

View File

@ -58,7 +58,7 @@ class AddNTest(test.TestCase):
def testAddN(self):
np.random.seed(12345)
with self.session() as sess:
with self.session():
for dtype in self._supported_types():
for count in range(1, self._MAX_N + 1):
data = [self._buildData((2, 2), dtype) for _ in range(count)]

View File

@ -211,7 +211,7 @@ class StatefulScatterNdTest(test.TestCase):
scatter = state_ops.scatter_nd_update(ref, indices, updates)
init = variables.global_variables_initializer()
with self.session() as sess:
with self.session():
self.evaluate(init)
result = self.evaluate(scatter)
self.assertAllClose(result, expected)
@ -225,7 +225,7 @@ class StatefulScatterNdTest(test.TestCase):
scatter = state_ops.scatter_nd_update(ref, indices, updates)
init = variables.global_variables_initializer()
with self.session() as sess:
with self.session():
self.evaluate(init)
result = self.evaluate(scatter)
self.assertAllClose(result, expected)

View File

@ -1001,7 +1001,7 @@ class StridedSliceGradTest(test_util.TensorFlowTestCase):
@test_util.run_deprecated_v1
def testInt64Indices(self):
with self.session() as sess:
with self.session():
a = math_ops.range(3, dtype=dtypes.float32)
index = constant_op.constant(1, dtype=dtypes.int64)
b = 2. * a[index]

View File

@ -40,13 +40,13 @@ from tensorflow.python.platform import test
class GPUBinaryOpsTest(test.TestCase):
def _compareGPU(self, x, y, np_func, tf_func):
with self.cached_session() as sess:
with self.cached_session():
inx = ops.convert_to_tensor(x)
iny = ops.convert_to_tensor(y)
out = tf_func(inx, iny)
tf_gpu = self.evaluate(out)
with self.cached_session(use_gpu=False) as sess:
with self.cached_session(use_gpu=False):
inx = ops.convert_to_tensor(x)
iny = ops.convert_to_tensor(y)
out = tf_func(inx, iny)
@ -143,7 +143,7 @@ class MathBuiltinUnaryTest(test.TestCase):
np_out = np.floor_divide(x, y + 0.1)
with self.session() as sess:
with self.session():
inx = ops.convert_to_tensor(x)
iny = ops.convert_to_tensor(y + 0.1)
ofunc = inx / iny

View File

@ -36,7 +36,7 @@ class BucketizationOpTest(test.TestCase):
constant_op.constant([-5, 0, 2, 3, 5, 8, 10, 11, 12]),
boundaries=[0, 3, 8, 11])
expected_out = [0, 1, 1, 2, 2, 3, 3, 4, 4]
with self.session() as sess:
with self.session():
self.assertAllEqual(expected_out, self.evaluate(op))
def testEmptyFloat(self):
@ -51,7 +51,7 @@ class BucketizationOpTest(test.TestCase):
constant_op.constant([-5., 0., 2., 3., 5., 8., 10., 11., 12.]),
boundaries=[0., 3., 8., 11.])
expected_out = [0, 1, 1, 2, 2, 3, 3, 4, 4]
with self.session() as sess:
with self.session():
self.assertAllEqual(expected_out, self.evaluate(op))
def test2DInput(self):
@ -59,14 +59,14 @@ class BucketizationOpTest(test.TestCase):
constant_op.constant([[-5, 0, 2, 3, 5], [8, 10, 11, 12, 0]]),
boundaries=[0, 3, 8, 11])
expected_out = [[0, 1, 1, 2, 2], [3, 3, 4, 4, 1]]
with self.session() as sess:
with self.session():
self.assertAllEqual(expected_out, self.evaluate(op))
@test_util.run_deprecated_v1
def testInvalidBoundariesOrder(self):
op = math_ops._bucketize(
constant_op.constant([-5, 0]), boundaries=[0, 8, 3, 11])
with self.session() as sess:
with self.session():
with self.assertRaisesRegex(errors_impl.InvalidArgumentError,
"Expected sorted boundaries"):
self.evaluate(op)

View File

@ -176,7 +176,7 @@ class ClipTest(test.TestCase):
def _testClipIndexedSlicesByValue(self, values, indices, shape,
clip_value_min, clip_value_max, expected):
with self.session() as sess:
with self.session():
values = constant_op.constant(values)
indices = constant_op.constant(indices)
shape = constant_op.constant(shape)

View File

@ -2787,7 +2787,7 @@ class SeparableConv2DTest(test.TestCase):
expected: An array containing the expected operation outputs.
data_format: string data format for input tensor.
"""
with self.cached_session() as sess:
with self.cached_session():
t1 = self._InitValues(tensor_in_sizes)
f1 = self._InitValues(depthwise_filter_in_sizes)
f1.set_shape(depthwise_filter_in_sizes)

View File

@ -37,7 +37,7 @@ class DecodeImageOpTest(test.TestCase):
def testBmp(self):
# Read a real bmp and verify shape
path = os.path.join(prefix_path, "bmp", "testdata", "lena.bmp")
with self.session() as sess:
with self.session():
bmp0 = io_ops.read_file(path)
image0 = image_ops.decode_image(bmp0)
image1 = image_ops.decode_bmp(bmp0)
@ -53,7 +53,7 @@ class DecodeImageOpTest(test.TestCase):
stride = 5
shape = (12, height, width, 3)
with self.session() as sess:
with self.session():
gif0 = io_ops.read_file(path)
image0 = image_ops.decode_image(gif0)
image1 = image_ops.decode_gif(gif0)
@ -82,7 +82,7 @@ class DecodeImageOpTest(test.TestCase):
def testJpeg(self):
# Read a real jpeg and verify shape
path = os.path.join(prefix_path, "jpeg", "testdata", "jpeg_merge_test1.jpg")
with self.session() as sess:
with self.session():
jpeg0 = io_ops.read_file(path)
image0 = image_ops.decode_image(jpeg0)
image1 = image_ops.decode_jpeg(jpeg0)

View File

@ -154,7 +154,7 @@ class DeterminantOpTest(test.TestCase):
@test_util.run_v1_only("b/120545219")
def testConcurrentExecutesWithoutError(self):
with self.session() as sess:
with self.session():
matrix1 = random_ops.random_normal([5, 5], seed=42)
matrix2 = random_ops.random_normal([5, 5], seed=42)
det1 = linalg_ops.matrix_determinant(matrix1)

View File

@ -39,7 +39,7 @@ class DynamicPartitionTest(test.TestCase):
@test_util.run_deprecated_v1
def testSimpleOneDimensional(self):
with self.session() as sess:
with self.session():
data = constant_op.constant([0, 13, 2, 39, 4, 17], dtype=dtypes.float32)
indices = constant_op.constant([0, 0, 2, 3, 2, 1])
partitions = data_flow_ops.dynamic_partition(
@ -60,7 +60,7 @@ class DynamicPartitionTest(test.TestCase):
@test_util.run_deprecated_v1
def testSimpleTwoDimensional(self):
with self.session() as sess:
with self.session():
data = constant_op.constant([[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 10, 11],
[12, 13, 14], [15, 16, 17]],
dtype=dtypes.float32)
@ -87,7 +87,7 @@ class DynamicPartitionTest(test.TestCase):
indices_list = [x % 2 for x in range(num)]
part1 = [x for x in range(num) if x % 2 == 0]
part2 = [x for x in range(num) if x % 2 == 1]
with self.session() as sess:
with self.session():
data = constant_op.constant(data_list, dtype=dtypes.float32)
indices = constant_op.constant(indices_list, dtype=dtypes.int32)
partitions = data_flow_ops.dynamic_partition(
@ -109,7 +109,7 @@ class DynamicPartitionTest(test.TestCase):
parts = [[] for _ in range(num_partitions)]
for i in range(rows):
parts[(i ** 2) % num_partitions].append(data_list[i])
with self.session() as sess:
with self.session():
data = constant_op.constant(data_list, dtype=dtypes.float32)
indices = constant_op.constant(indices_list, dtype=dtypes.int32)
partitions = data_flow_ops.dynamic_partition(
@ -125,7 +125,7 @@ class DynamicPartitionTest(test.TestCase):
def testSimpleComplex(self):
data_list = [1 + 2j, 3 + 4j, 5 + 6j, 7 + 8j]
indices_list = [1, 0, 1, 0]
with self.session() as sess:
with self.session():
data = constant_op.constant(data_list, dtype=dtypes.complex64)
indices = constant_op.constant(indices_list, dtype=dtypes.int32)
partitions = data_flow_ops.dynamic_partition(
@ -138,7 +138,7 @@ class DynamicPartitionTest(test.TestCase):
def testScalarPartitions(self):
data_list = [10, 13, 12, 11]
with self.session() as sess:
with self.session():
data = constant_op.constant(data_list, dtype=dtypes.float64)
indices = 3
partitions = data_flow_ops.dynamic_partition(
@ -184,7 +184,7 @@ class DynamicPartitionTest(test.TestCase):
def testEmptyParts(self):
data_list = [1, 2, 3, 4]
indices_list = [1, 3, 1, 3]
with self.session() as sess:
with self.session():
data = constant_op.constant(data_list, dtype=dtypes.float32)
indices = constant_op.constant(indices_list, dtype=dtypes.int32)
partitions = data_flow_ops.dynamic_partition(
@ -200,7 +200,7 @@ class DynamicPartitionTest(test.TestCase):
def testEmptyDataTwoDimensional(self):
data_list = [[], []]
indices_list = [0, 1]
with self.session() as sess:
with self.session():
data = constant_op.constant(data_list, dtype=dtypes.float32)
indices = constant_op.constant(indices_list, dtype=dtypes.int32)
partitions = data_flow_ops.dynamic_partition(
@ -216,7 +216,7 @@ class DynamicPartitionTest(test.TestCase):
def testEmptyPartitions(self):
data_list = []
indices_list = []
with self.session() as sess:
with self.session():
data = constant_op.constant(data_list, dtype=dtypes.float32)
indices = constant_op.constant(indices_list, dtype=dtypes.int32)
partitions = data_flow_ops.dynamic_partition(
@ -237,7 +237,7 @@ class DynamicPartitionTest(test.TestCase):
data_list = [1, 2, 3, 4, 5, 6]
indices_list = [6, 5, 4, 3, 1, 0]
with self.session() as sess:
with self.session():
data = constant_op.constant(data_list, dtype=dtypes.float32)
indices = constant_op.constant(indices_list, dtype=dtypes.int32)
partitions = data_flow_ops.dynamic_partition(
@ -258,7 +258,7 @@ class DynamicPartitionTest(test.TestCase):
data_list = [1, 2, 3, 4, 5, 6]
indices_list = [10, 11, 2, 12, 0, 1000]
with self.session() as sess:
with self.session():
data = constant_op.constant(data_list, dtype=dtypes.float32)
indices = constant_op.constant(indices_list, dtype=dtypes.int32)
partitions = data_flow_ops.dynamic_partition(
@ -282,7 +282,7 @@ class DynamicPartitionTest(test.TestCase):
data_list = [1.1, 2.1, 3.1, 4.1, 5.1, 6.1]
indices_list = [90, 70, 60, 100, 110, 40]
with self.session() as sess:
with self.session():
data = constant_op.constant(data_list, dtype=dtypes.float32)
indices = constant_op.constant(indices_list, dtype=dtypes.int32)
partitions = data_flow_ops.dynamic_partition(
@ -295,7 +295,7 @@ class DynamicPartitionTest(test.TestCase):
@test_util.run_deprecated_v1
def testErrorIndexOutOfRange(self):
with self.cached_session() as sess:
with self.cached_session():
data = constant_op.constant([[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 10, 11],
[12, 13, 14]])
indices = constant_op.constant([0, 2, 99, 2, 2])
@ -346,7 +346,7 @@ class DynamicPartitionTest(test.TestCase):
inds += [13]*194 + [14]*194 + [15]*192
self.assertEqual(len(inds), x.shape[0])
partitioned = data_flow_ops.dynamic_partition(x, inds, 16)
with self.cached_session() as sess:
with self.cached_session():
res = self.evaluate(partitioned)
self.assertEqual(res[-1].shape[0], 192)

View File

@ -55,7 +55,7 @@ class EigTest(test.TestCase):
@test_util.run_deprecated_v1
def testConcurrentExecutesWithoutError(self):
all_ops = []
with self.session() as sess:
with self.session():
for compute_v_ in True, False:
matrix1 = random_ops.random_normal([5, 5], seed=42)
matrix2 = random_ops.random_normal([5, 5], seed=42)

View File

@ -340,7 +340,7 @@ class FunctionalOpsTest(test.TestCase):
lambda elem_, input_: (a, b), elems, initializer=(0., 0.))
loss = l0 + array_ops.stop_gradient(l1)
grad = gradients_impl.gradients(ys=[loss], xs=[a, b])
with self.test_session() as sess:
with self.test_session():
self.evaluate(variables.global_variables_initializer())
self.evaluate(grad)

View File

@ -910,7 +910,7 @@ class ConvolutionDeltaOrthogonalInitializerTest(test.TestCase):
outputs_2norm = linalg_ops.norm(outputs)
ratio = outputs_2norm / inputs_2norm
my_ops = variables.global_variables_initializer()
with self.session() as sess:
with self.session():
self.evaluate(my_ops)
# Check the shape of the outputs
t = self.evaluate(outputs)
@ -1063,7 +1063,7 @@ class ConvolutionOrthogonal1dInitializerTest(test.TestCase):
outputs_2norm = linalg_ops.norm(outputs)
ratio = outputs_2norm / inputs_2norm
my_ops = variables.global_variables_initializer()
with self.session() as sess:
with self.session():
self.evaluate(my_ops)
# Check the shape of the outputs
t = self.evaluate(outputs)
@ -1167,7 +1167,7 @@ class ConvolutionOrthogonal2dInitializerTest(test.TestCase):
outputs_2norm = linalg_ops.norm(outputs)
ratio = outputs_2norm / inputs_2norm
my_ops = variables.global_variables_initializer()
with self.session() as sess:
with self.session():
self.evaluate(my_ops)
# Check the shape of the outputs
t = self.evaluate(outputs)
@ -1302,7 +1302,7 @@ class ConvolutionOrthogonal3dInitializerTest(test.TestCase):
outputs_2norm = linalg_ops.norm(outputs)
ratio = outputs_2norm / inputs_2norm
my_ops = variables.global_variables_initializer()
with self.cached_session() as sess:
with self.cached_session():
self.evaluate(my_ops)
# Check the shape of the outputs
t = self.evaluate(outputs)

View File

@ -157,7 +157,7 @@ class ExponentialOpTest(test.TestCase):
@test_util.run_deprecated_v1
def testConcurrentExecutesWithoutError(self):
with self.session() as sess:
with self.session():
matrix1 = random_ops.random_normal([5, 5], seed=42)
matrix2 = random_ops.random_normal([5, 5], seed=42)
expm1 = linalg_impl.matrix_exponential(matrix1)

View File

@ -34,7 +34,7 @@ class MultinomialTest(test.TestCase):
def testLargeDynamicRange(self):
random_seed.set_random_seed(10)
counts_by_indices = {}
with self.test_session() as sess:
with self.test_session():
samples = random_ops.multinomial(
constant_op.constant([[-30, 0]], dtype=dtypes.float32),
num_samples=1000000,
@ -52,7 +52,7 @@ class MultinomialTest(test.TestCase):
def testLargeDynamicRange2(self):
random_seed.set_random_seed(10)
counts_by_indices = {}
with self.test_session() as sess:
with self.test_session():
samples = random_ops.multinomial(
constant_op.constant([[0, -30]], dtype=dtypes.float32),
num_samples=1000000,
@ -72,7 +72,7 @@ class MultinomialTest(test.TestCase):
random_seed.set_random_seed(10)
counts_by_indices = {}
# here the cpu undersamples and won't pass this test either
with self.test_session() as sess:
with self.test_session():
samples = random_ops.multinomial(
constant_op.constant([[0, -17]], dtype=dtypes.float32),
num_samples=1000000,

View File

@ -208,7 +208,7 @@ class SumReductionTest(BaseReductionTest):
def testAxesType(self):
for dtype in [dtypes.int64, dtypes.int32]:
with self.cached_session() as sess:
with self.cached_session():
v = math_ops.reduce_sum([0, 0], constant_op.constant(0, dtype=dtype))
tf_v = self.evaluate(v)
self.assertAllEqual(tf_v, 0)
@ -446,7 +446,7 @@ class MeanReductionTest(BaseReductionTest):
def testAxesType(self):
for dtype in [dtypes.int64, dtypes.int32]:
with self.cached_session() as sess:
with self.cached_session():
v = math_ops.reduce_mean([0, 0], constant_op.constant(0, dtype=dtype))
tf_v = self.evaluate(v)
self.assertAllEqual(tf_v, 0)
@ -640,7 +640,7 @@ class ProdReductionTest(BaseReductionTest):
def testAxesType(self):
for dtype in [dtypes.int64, dtypes.int32]:
with self.cached_session() as sess:
with self.cached_session():
v = math_ops.reduce_prod([0, 0], constant_op.constant(0, dtype=dtype))
tf_v = self.evaluate(v)
self.assertAllEqual(tf_v, 0)
@ -750,7 +750,7 @@ class MinReductionTest(test.TestCase):
def testAxesType(self):
for dtype in [dtypes.int64, dtypes.int32]:
with self.cached_session() as sess:
with self.cached_session():
v = math_ops.reduce_min([0, 0], constant_op.constant(0, dtype=dtype))
tf_v = self.evaluate(v)
self.assertAllEqual(tf_v, 0)
@ -866,7 +866,7 @@ class MaxReductionTest(test.TestCase):
def testAxesType(self):
for dtype in [dtypes.int64, dtypes.int32]:
with self.cached_session() as sess:
with self.cached_session():
v = math_ops.reduce_max([0, 0], constant_op.constant(0, dtype=dtype))
tf_v = self.evaluate(v)
self.assertAllEqual(tf_v, 0)
@ -998,7 +998,7 @@ class AllReductionTest(test.TestCase):
def testAxesType(self):
for dtype in [dtypes.int64, dtypes.int32]:
with self.session() as sess:
with self.session():
v = math_ops.reduce_all([True, True],
constant_op.constant(0, dtype=dtype))
tf_v = self.evaluate(v)
@ -1047,7 +1047,7 @@ class AnyReductionTest(test.TestCase):
def testAxesType(self):
for dtype in [dtypes.int64, dtypes.int32]:
with self.session() as sess:
with self.session():
v = math_ops.reduce_any([True, True],
constant_op.constant(0, dtype=dtype))
tf_v = self.evaluate(v)

View File

@ -55,7 +55,7 @@ class SelfAdjointEigTest(test.TestCase):
@test_util.run_deprecated_v1
def testConcurrentExecutesWithoutError(self):
all_ops = []
with self.session() as sess:
with self.session():
for compute_v_ in True, False:
matrix1 = random_ops.random_normal([5, 5], seed=42)
matrix2 = random_ops.random_normal([5, 5], seed=42)
@ -84,7 +84,7 @@ class SelfAdjointEigTest(test.TestCase):
"self_adjoint_eig_fail_if_denorms_flushed.txt")).astype(np.float32)
self.assertEqual(matrix.shape, (32, 32))
matrix_tensor = constant_op.constant(matrix)
with self.session() as sess:
with self.session():
(e, v) = self.evaluate(linalg_ops.self_adjoint_eig(matrix_tensor))
self.assertEqual(e.size, 32)
self.assertAllClose(

View File

@ -64,7 +64,7 @@ class SparseXentTest(test.TestCase):
def _testXent(self, np_features, np_labels):
np_loss, np_backprop = self._npXent(np_features, np_labels)
with self.cached_session() as sess:
with self.cached_session():
loss, backprop = gen_nn_ops.sparse_softmax_cross_entropy_with_logits(
np_features, np_labels)
tf_loss, tf_backprop = self.evaluate([loss, backprop])
@ -73,7 +73,7 @@ class SparseXentTest(test.TestCase):
def testSingleClass(self):
for label_dtype in np.int32, np.int64:
with self.cached_session() as sess:
with self.cached_session():
loss, backprop = gen_nn_ops.sparse_softmax_cross_entropy_with_logits(
np.array([[1.], [-1.], [0.]]).astype(np.float32),
np.array([0, 0, 0]).astype(label_dtype))

View File

@ -801,7 +801,7 @@ class TensorArrayTest(test.TestCase):
self._testTensorArrayGradientWriteReadType(dtype)
def _testTensorArrayGradientWritePackConcatAndRead(self):
with self.cached_session() as sess:
with self.cached_session():
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name="foo",
@ -1371,7 +1371,7 @@ class TensorArrayTest(test.TestCase):
self.assertAllEqual(grad_r0_x1_vals, [1.0, 0.0])
def _testTensorArrayUnpackDynamic(self):
with self.cached_session() as sess:
with self.cached_session():
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, size=3, dynamic_size=True)
x = constant_op.constant([1.0, 2.0, 3.0])
@ -1388,7 +1388,7 @@ class TensorArrayTest(test.TestCase):
@test_util.run_deprecated_v1
def testSkipEagerTensorArraySplitDynamic(self):
with self.session() as sess:
with self.session():
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, size=3, dynamic_size=True)
x = constant_op.constant([1.0, 2.0, 3.0])
@ -1771,7 +1771,7 @@ class TensorArrayTest(test.TestCase):
# dy is outside of the gradients name scope; tf.gradients must
# wrap it in the correct name scope.
dx, = gradients_impl.gradients(ys=[y], xs=[x], grad_ys=[dy])
with self.cached_session() as sess:
with self.cached_session():
vdx, vdy = self.evaluate([dx, dy])
self.assertAllClose(vdx, vdy)

View File

@ -47,7 +47,7 @@ class TopKTest(test.TestCase):
sorted=True): # pylint: disable=redefined-builtin
np_expected_values = np.array(expected_values)
np_expected_indices = np.array(expected_indices)
with self.cached_session() as sess:
with self.cached_session():
values_op, indices_op = nn_ops.top_k(inputs, k, sorted=sorted)
values, indices = self.evaluate([values_op, indices_op])

View File

@ -319,7 +319,7 @@ class XentTest(test.TestCase):
features = np.zeros([0, 2, 4]).astype(np.float32)
labels = np.zeros([0, 2, 4]).astype(np.float32)
np_loss, _ = self._npXent(features, labels)
with self.session() as sess:
with self.session():
loss = nn_ops.softmax_cross_entropy_with_logits(
labels=labels, logits=features)
tf_loss = self.evaluate(loss)

View File

@ -62,7 +62,7 @@ class BitwiseOpTest(test_util.TensorFlowTestCase):
def count_bits(x):
return sum(bin(z).count("1") for z in six.iterbytes(x.tobytes()))
for dtype in dtype_list:
with self.cached_session() as sess:
with self.cached_session():
print("PopulationCount test: ", dtype)
inputs = np.array(raw_inputs, dtype=dtype.as_numpy_dtype)
truth = [count_bits(x) for x in inputs]

View File

@ -444,7 +444,7 @@ class CropAndResizeOpTestBase(test.TestCase):
constant_op.constant(boxes, shape=[num_boxes, 4]),
constant_op.constant(box_ind, shape=[num_boxes]),
constant_op.constant(crop_size, shape=[2]))
with self.session() as sess:
with self.session():
self.assertEqual(crops_shape, list(crops.get_shape()))
crops = self.evaluate(crops)
self.assertEqual(crops_shape, list(crops.shape))

View File

@ -71,7 +71,7 @@ class RGBToHSVTest(test_util.TensorFlowTestCase):
inp = np.random.rand(*shape).astype(nptype)
# Convert to HSV and back, as a batch and individually
with self.cached_session() as sess:
with self.cached_session():
batch0 = constant_op.constant(inp)
batch1 = image_ops.rgb_to_hsv(batch0)
batch2 = image_ops.hsv_to_rgb(batch1)
@ -113,7 +113,7 @@ class RGBToYIQTest(test_util.TensorFlowTestCase):
inp = np.random.rand(*shape).astype(nptype)
# Convert to YIQ and back, as a batch and individually
with self.cached_session() as sess:
with self.cached_session():
batch0 = constant_op.constant(inp)
batch1 = image_ops.rgb_to_yiq(batch0)
batch2 = image_ops.yiq_to_rgb(batch1)
@ -145,7 +145,7 @@ class RGBToYUVTest(test_util.TensorFlowTestCase):
inp = np.random.rand(*shape).astype(nptype)
# Convert to YUV and back, as a batch and individually
with self.cached_session() as sess:
with self.cached_session():
batch0 = constant_op.constant(inp)
batch1 = image_ops.rgb_to_yuv(batch0)
batch2 = image_ops.yuv_to_rgb(batch1)
@ -3199,7 +3199,7 @@ class ResizeImagesTest(test_util.TensorFlowTestCase,
img_np = np.array(data, dtype=nptype).reshape(img_shape)
for method in self.METHODS:
with self.cached_session() as sess:
with self.cached_session():
image = constant_op.constant(img_np, shape=img_shape)
y = image_ops.resize_images(image, [target_height, target_width],
method)
@ -4176,7 +4176,7 @@ class JpegTest(test_util.TensorFlowTestCase):
# Read a real jpeg and verify shape
path = ("tensorflow/core/lib/jpeg/testdata/"
"jpeg_merge_test1.jpg")
with self.cached_session() as sess:
with self.cached_session():
jpeg0 = io_ops.read_file(path)
image0 = image_ops.decode_jpeg(jpeg0)
image1 = image_ops.decode_jpeg(image_ops.encode_jpeg(image0))
@ -4192,7 +4192,7 @@ class JpegTest(test_util.TensorFlowTestCase):
cmyk_path = os.path.join(base, "jpeg_merge_test1_cmyk.jpg")
shape = 256, 128, 3
for channels in 3, 0:
with self.cached_session() as sess:
with self.cached_session():
rgb = image_ops.decode_jpeg(
io_ops.read_file(rgb_path), channels=channels)
cmyk = image_ops.decode_jpeg(
@ -4248,7 +4248,7 @@ class JpegTest(test_util.TensorFlowTestCase):
self.evaluate(result)
def testSynthetic(self):
with self.cached_session() as sess:
with self.cached_session():
# Encode it, then decode it, then encode it
image0 = constant_op.constant(simple_color_ramp())
jpeg0 = image_ops.encode_jpeg(image0)
@ -4269,7 +4269,7 @@ class JpegTest(test_util.TensorFlowTestCase):
self.assertLessEqual(len(jpeg0), 6000)
def testSyntheticFasterAlgorithm(self):
with self.cached_session() as sess:
with self.cached_session():
# Encode it, then decode it, then encode it
image0 = constant_op.constant(simple_color_ramp())
jpeg0 = image_ops.encode_jpeg(image0)
@ -4293,7 +4293,7 @@ class JpegTest(test_util.TensorFlowTestCase):
self.assertLessEqual(len(jpeg0), 6000)
def testDefaultDCTMethodIsIntegerFast(self):
with self.cached_session() as sess:
with self.cached_session():
# Compare decoding with both dct_option=INTEGER_FAST and
# default. They should be the same.
image0 = constant_op.constant(simple_color_ramp())
@ -4308,7 +4308,7 @@ class JpegTest(test_util.TensorFlowTestCase):
def testShape(self):
# Shape function requires placeholders and a graph.
with ops.Graph().as_default():
with self.cached_session() as sess:
with self.cached_session():
jpeg = constant_op.constant("nonsense")
for channels in 0, 1, 3:
image = image_ops.decode_jpeg(jpeg, channels=channels)
@ -4418,7 +4418,7 @@ class PngTest(test_util.TensorFlowTestCase):
(3, "lena_palette.png"), (4, "lena_palette_trns.png"))
for channels_in, filename in inputs:
for channels in 0, 1, 3, 4:
with self.cached_session() as sess:
with self.cached_session():
png0 = io_ops.read_file(prefix + filename)
image0 = image_ops.decode_png(png0, channels=channels)
png0, image0 = self.evaluate([png0, image0])
@ -4428,7 +4428,7 @@ class PngTest(test_util.TensorFlowTestCase):
self.assertAllEqual(image0, self.evaluate(image1))
def testSynthetic(self):
with self.cached_session() as sess:
with self.cached_session():
# Encode it, then decode it
image0 = constant_op.constant(simple_color_ramp())
png0 = image_ops.encode_png(image0, compression=7)
@ -4443,7 +4443,7 @@ class PngTest(test_util.TensorFlowTestCase):
self.assertLessEqual(len(png0), 750)
def testSyntheticUint16(self):
with self.cached_session() as sess:
with self.cached_session():
# Encode it, then decode it
image0 = constant_op.constant(simple_color_ramp(), dtype=dtypes.uint16)
png0 = image_ops.encode_png(image0, compression=7)
@ -4458,7 +4458,7 @@ class PngTest(test_util.TensorFlowTestCase):
self.assertLessEqual(len(png0), 1500)
def testSyntheticTwoChannel(self):
with self.cached_session() as sess:
with self.cached_session():
# Strip the b channel from an rgb image to get a two-channel image.
gray_alpha = simple_color_ramp()[:, :, 0:2]
image0 = constant_op.constant(gray_alpha)
@ -4469,7 +4469,7 @@ class PngTest(test_util.TensorFlowTestCase):
self.assertAllEqual(image0, image1)
def testSyntheticTwoChannelUint16(self):
with self.cached_session() as sess:
with self.cached_session():
# Strip the b channel from an rgb image to get a two-channel image.
gray_alpha = simple_color_ramp()[:, :, 0:2]
image0 = constant_op.constant(gray_alpha, dtype=dtypes.uint16)
@ -4500,7 +4500,7 @@ class GifTest(test_util.TensorFlowTestCase):
STRIDE = 5
shape = (12, HEIGHT, WIDTH, 3)
with self.cached_session() as sess:
with self.cached_session():
gif0 = io_ops.read_file(prefix + filename)
image0 = image_ops.decode_gif(gif0)
gif0, image0 = self.evaluate([gif0, image0])
@ -4528,7 +4528,7 @@ class GifTest(test_util.TensorFlowTestCase):
def testShape(self):
# Shape function requires placeholders and a graph.
with ops.Graph().as_default():
with self.cached_session() as sess:
with self.cached_session():
gif = constant_op.constant("nonsense")
image = image_ops.decode_gif(gif)
self.assertEqual(image.get_shape().as_list(), [None, None, None, 3])
@ -5842,7 +5842,7 @@ class DecodeImageTest(test_util.TensorFlowTestCase, parameterized.TestCase):
def testJpegUint16(self):
for horizon in self._FORWARD_COMPATIBILITY_HORIZONS:
with compat.forward_compatibility_horizon(*horizon):
with self.cached_session() as sess:
with self.cached_session():
base = "tensorflow/core/lib/jpeg/testdata"
jpeg0 = io_ops.read_file(os.path.join(base, "jpeg_merge_test1.jpg"))
image0 = image_ops.decode_image(jpeg0, dtype=dtypes.uint16)
@ -5854,7 +5854,7 @@ class DecodeImageTest(test_util.TensorFlowTestCase, parameterized.TestCase):
def testPngUint16(self):
for horizon in self._FORWARD_COMPATIBILITY_HORIZONS:
with compat.forward_compatibility_horizon(*horizon):
with self.cached_session() as sess:
with self.cached_session():
base = "tensorflow/core/lib/png/testdata"
png0 = io_ops.read_file(os.path.join(base, "lena_rgba.png"))
image0 = image_ops.decode_image(png0, dtype=dtypes.uint16)
@ -5873,7 +5873,7 @@ class DecodeImageTest(test_util.TensorFlowTestCase, parameterized.TestCase):
def testGifUint16(self):
for horizon in self._FORWARD_COMPATIBILITY_HORIZONS:
with compat.forward_compatibility_horizon(*horizon):
with self.cached_session() as sess:
with self.cached_session():
base = "tensorflow/core/lib/gif/testdata"
gif0 = io_ops.read_file(os.path.join(base, "scan.gif"))
image0 = image_ops.decode_image(gif0, dtype=dtypes.uint16)
@ -5885,7 +5885,7 @@ class DecodeImageTest(test_util.TensorFlowTestCase, parameterized.TestCase):
def testBmpUint16(self):
for horizon in self._FORWARD_COMPATIBILITY_HORIZONS:
with compat.forward_compatibility_horizon(*horizon):
with self.cached_session() as sess:
with self.cached_session():
base = "tensorflow/core/lib/bmp/testdata"
bmp0 = io_ops.read_file(os.path.join(base, "lena.bmp"))
image0 = image_ops.decode_image(bmp0, dtype=dtypes.uint16)
@ -5897,7 +5897,7 @@ class DecodeImageTest(test_util.TensorFlowTestCase, parameterized.TestCase):
def testJpegFloat32(self):
for horizon in self._FORWARD_COMPATIBILITY_HORIZONS:
with compat.forward_compatibility_horizon(*horizon):
with self.cached_session() as sess:
with self.cached_session():
base = "tensorflow/core/lib/jpeg/testdata"
jpeg0 = io_ops.read_file(os.path.join(base, "jpeg_merge_test1.jpg"))
image0 = image_ops.decode_image(jpeg0, dtype=dtypes.float32)
@ -5909,7 +5909,7 @@ class DecodeImageTest(test_util.TensorFlowTestCase, parameterized.TestCase):
def testPngFloat32(self):
for horizon in self._FORWARD_COMPATIBILITY_HORIZONS:
with compat.forward_compatibility_horizon(*horizon):
with self.cached_session() as sess:
with self.cached_session():
base = "tensorflow/core/lib/png/testdata"
png0 = io_ops.read_file(os.path.join(base, "lena_rgba.png"))
image0 = image_ops.decode_image(png0, dtype=dtypes.float32)
@ -5921,7 +5921,7 @@ class DecodeImageTest(test_util.TensorFlowTestCase, parameterized.TestCase):
def testGifFloat32(self):
for horizon in self._FORWARD_COMPATIBILITY_HORIZONS:
with compat.forward_compatibility_horizon(*horizon):
with self.cached_session() as sess:
with self.cached_session():
base = "tensorflow/core/lib/gif/testdata"
gif0 = io_ops.read_file(os.path.join(base, "scan.gif"))
image0 = image_ops.decode_image(gif0, dtype=dtypes.float32)
@ -5933,7 +5933,7 @@ class DecodeImageTest(test_util.TensorFlowTestCase, parameterized.TestCase):
def testBmpFloat32(self):
for horizon in self._FORWARD_COMPATIBILITY_HORIZONS:
with compat.forward_compatibility_horizon(*horizon):
with self.cached_session() as sess:
with self.cached_session():
base = "tensorflow/core/lib/bmp/testdata"
bmp0 = io_ops.read_file(os.path.join(base, "lena.bmp"))
image0 = image_ops.decode_image(bmp0, dtype=dtypes.float32)
@ -5945,7 +5945,7 @@ class DecodeImageTest(test_util.TensorFlowTestCase, parameterized.TestCase):
def testExpandAnimations(self):
for horizon in self._FORWARD_COMPATIBILITY_HORIZONS:
with compat.forward_compatibility_horizon(*horizon):
with self.cached_session() as sess:
with self.cached_session():
base = "tensorflow/core/lib/gif/testdata"
gif0 = io_ops.read_file(os.path.join(base, "scan.gif"))

View File

@ -76,7 +76,7 @@ class NcclTestCase(test.TestCase):
for dtype in [np.float16, np.float32, np.int32, np.int64, np.float64]:
# Create session inside outer loop to test use of
# same communicator across multiple sessions.
with self.test_session() as sess:
with self.test_session():
for devices in device_sets:
shape = (3, 4)