Merge pull request #45386 from ROCmSoftwarePlatform:google_upstream_rocm_misc_update_201203
PiperOrigin-RevId: 346546025 Change-Id: I5f4639b9e68a915cb35420789edb841b916e08a5
This commit is contained in:
commit
d41295260e
@ -631,13 +631,6 @@ TEST_F(ConstantFoldingTest, ConstShapeKnown) {
|
||||
}
|
||||
}
|
||||
|
||||
// Disabling the following test on the ROCm platform because it relies on the
|
||||
// "topK" operator being supported on the ROCm platform (which is currently not
|
||||
// the case)
|
||||
// TODO(rocm) :
|
||||
// re-enable this test once support for "topK" operator is available on ROCm
|
||||
|
||||
#ifndef TENSORFLOW_USE_ROCM
|
||||
TEST_F(ConstantFoldingTest, NoReplacePartialOutput) {
|
||||
Graph g(OpRegistry::Global());
|
||||
{
|
||||
@ -662,7 +655,6 @@ TEST_F(ConstantFoldingTest, NoReplacePartialOutput) {
|
||||
&g, &was_mutated));
|
||||
EXPECT_FALSE(was_mutated);
|
||||
}
|
||||
#endif // TENSORFLOW_USE_ROCM
|
||||
|
||||
namespace {
|
||||
|
||||
|
@ -102,7 +102,7 @@ TEST_F(DepthwiseConvOpTest, DepthwiseConvHalfCpu) {
|
||||
Run<Eigen::half>(Device::CPU);
|
||||
}
|
||||
|
||||
#ifdef GOOGLE_CUDA
|
||||
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
|
||||
TEST_F(DepthwiseConvOpTest, DepthwiseConvFloatGpu) { Run<float>(Device::GPU); }
|
||||
TEST_F(DepthwiseConvOpTest, DepthwiseConvDoubleGpu) {
|
||||
Run<double>(Device::GPU);
|
||||
|
@ -533,7 +533,7 @@ INSTANTIATE_TEST_SUITE_P(ResizeBilinearHalfPixelCentersOpTestCpu,
|
||||
INSTANTIATE_TEST_SUITE_P(ResizeBilinearOpAlignCornersTestCpu,
|
||||
ResizeBilinearOpAlignCornersTest,
|
||||
::testing::Values(TestDevice::CPU));
|
||||
#if GOOGLE_CUDA
|
||||
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
|
||||
// Instantiate tests for GPU.
|
||||
INSTANTIATE_TEST_SUITE_P(ResizeBilinearOpTestGpu, ResizeBilinearOpTest,
|
||||
::testing::Values(TestDevice::GPU));
|
||||
@ -543,7 +543,7 @@ INSTANTIATE_TEST_SUITE_P(ResizeBilinearHalfPixelCentersOpTestGpu,
|
||||
INSTANTIATE_TEST_SUITE_P(ResizeBilinearOpAlignCornersTestGpu,
|
||||
ResizeBilinearOpAlignCornersTest,
|
||||
::testing::Values(TestDevice::GPU));
|
||||
#endif // GOOGLE_CUDA
|
||||
#endif // GOOGLE_CUDA || TENSORFLOW_USE_ROCM
|
||||
|
||||
class ResizeBM : public ResizeBilinearOpTest {
|
||||
public:
|
||||
|
@ -37,20 +37,14 @@ class JitCompileTest(test.TestCase):
|
||||
|
||||
xla_func = def_function.function(fn, jit_compile=True)
|
||||
inputs = array_ops.placeholder(dtypes.float32, [5])
|
||||
# XLA support is not yet enabled for TF ROCm
|
||||
if not test.is_built_with_rocm():
|
||||
x = xla_func(inputs, 1)
|
||||
with session.Session(graph=g) as sess:
|
||||
y = sess.run(x, feed_dict={inputs: [1, 2, 2, 3, 3]})
|
||||
self.assertTrue(x.graph.as_graph_def().library.function[0]
|
||||
.attr["_XlaMustCompile"].b)
|
||||
self.assertAllClose([2, 3, 3, 4, 4], y)
|
||||
x = xla_func(inputs, 1)
|
||||
with session.Session(graph=g) as sess:
|
||||
y = sess.run(x, feed_dict={inputs: [1, 2, 2, 3, 3]})
|
||||
self.assertTrue(x.graph.as_graph_def().library.function[0]
|
||||
.attr["_XlaMustCompile"].b)
|
||||
self.assertAllClose([2, 3, 3, 4, 4], y)
|
||||
|
||||
def testDerivative(self):
|
||||
# XLA support is not yet enabled for TF ROCm
|
||||
if test.is_built_with_rocm():
|
||||
return
|
||||
|
||||
def fn(x, a):
|
||||
return 2 * x + a
|
||||
|
||||
@ -81,14 +75,12 @@ class JitCompileTest(test.TestCase):
|
||||
|
||||
xla_func = def_function.function(fn, jit_compile=True)
|
||||
inputs = array_ops.placeholder(dtypes.int32, [5])
|
||||
# XLA support is not yet enabled for TF ROCm
|
||||
if not test.is_built_with_rocm():
|
||||
x = xla_func(inputs, 1)
|
||||
with session.Session(graph=g) as sess:
|
||||
y = sess.run(x, feed_dict={inputs: [1, 2, 2, 3, 3]})
|
||||
self.assertTrue(x.graph.as_graph_def().library.function[0]
|
||||
.attr["_XlaMustCompile"].b)
|
||||
self.assertAllClose([2, 3, 3, 4, 4], y)
|
||||
x = xla_func(inputs, 1)
|
||||
with session.Session(graph=g) as sess:
|
||||
y = sess.run(x, feed_dict={inputs: [1, 2, 2, 3, 3]})
|
||||
self.assertTrue(x.graph.as_graph_def().library.function[0]
|
||||
.attr["_XlaMustCompile"].b)
|
||||
self.assertAllClose([2, 3, 3, 4, 4], y)
|
||||
|
||||
# Checking that we crash on an unsupported operation lets us test that the XLA
|
||||
# compiler was actually invoked.
|
||||
@ -101,12 +93,10 @@ class JitCompileTest(test.TestCase):
|
||||
xla_func = def_function.function(fn, jit_compile=True)
|
||||
inputs = array_ops.placeholder(dtypes.float32, [5])
|
||||
x = xla_func(inputs)
|
||||
# XLA support is not yet enabled for TF ROCm
|
||||
if not test.is_built_with_rocm():
|
||||
with self.assertRaisesRegex(errors.InvalidArgumentError,
|
||||
"not compilable"):
|
||||
with session.Session(graph=g) as sess:
|
||||
sess.run(x, feed_dict={inputs: [1, 2, 2, 3, 3]})
|
||||
with self.assertRaisesRegex(errors.InvalidArgumentError,
|
||||
"not compilable"):
|
||||
with session.Session(graph=g) as sess:
|
||||
sess.run(x, feed_dict={inputs: [1, 2, 2, 3, 3]})
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
@ -50,12 +50,9 @@ from tensorflow.python.ops.unconnected_gradients import UnconnectedGradients
|
||||
from tensorflow.python.platform import test
|
||||
from tensorflow.python.util import nest
|
||||
|
||||
|
||||
_X11_35_DERIVATIVES = [
|
||||
1.1 ** 3.5,
|
||||
3.5 * 1.1 ** 2.5,
|
||||
3.5 * 2.5 * 1.1 ** 1.5,
|
||||
3.5 * 2.5 * 1.5 * 1.1 ** 0.5]
|
||||
1.1**3.5, 3.5 * 1.1**2.5, 3.5 * 2.5 * 1.1**1.5, 3.5 * 2.5 * 1.5 * 1.1**0.5
|
||||
]
|
||||
|
||||
|
||||
# TODO(allenl): Move this somewhere useful once forward gradients are stable.
|
||||
@ -83,8 +80,8 @@ def _jacfwd(f, primals):
|
||||
jac_columns.append(
|
||||
nest.map_structure(
|
||||
functools.partial(array_ops.reshape, shape=[-1]),
|
||||
_jvp(f, primals,
|
||||
nest.pack_sequence_as(primals, tangent_mask))[1]))
|
||||
_jvp(f, primals, nest.pack_sequence_as(primals,
|
||||
tangent_mask))[1]))
|
||||
jac_flat.append(array_ops.stack(jac_columns, axis=1))
|
||||
tangent_mask[primal_index] = array_ops.zeros_like(primal)
|
||||
return nest.pack_sequence_as(primals, jac_flat)
|
||||
@ -129,15 +126,18 @@ def _gradfwd(f, argnums=0, f_out_dtypes=dtypes.float32):
|
||||
"""Return a function which computes the gradient of `f` in forward mode."""
|
||||
|
||||
def _f(*params):
|
||||
|
||||
def _single_jvp(param_mask):
|
||||
with forwardprop.ForwardAccumulator(primals=[params[argnums]],
|
||||
tangents=param_mask) as acc:
|
||||
with forwardprop.ForwardAccumulator(
|
||||
primals=[params[argnums]], tangents=param_mask) as acc:
|
||||
primals_out = f(*params)
|
||||
return acc.jvp(primals_out)
|
||||
|
||||
# Building up a function to run with pfor takes a bit too long since we're
|
||||
# only running it a handful of times.
|
||||
return _vectorize_parameters(_single_jvp, [params[argnums]],
|
||||
use_pfor=False, dtype=f_out_dtypes)
|
||||
return _vectorize_parameters(
|
||||
_single_jvp, [params[argnums]], use_pfor=False, dtype=f_out_dtypes)
|
||||
|
||||
return _f
|
||||
|
||||
|
||||
@ -159,8 +159,10 @@ def _vectorize_parameters(f, params, use_pfor, dtype):
|
||||
def _wrapper(index):
|
||||
full_onehot = array_ops.one_hot(index, total_size)
|
||||
split_onehot = array_ops.split(full_onehot, parameter_sizes)
|
||||
tangents = [array_ops.reshape(v, array_ops.shape(param))
|
||||
for param, v in zip(params, split_onehot)]
|
||||
tangents = [
|
||||
array_ops.reshape(v, array_ops.shape(param))
|
||||
for param, v in zip(params, split_onehot)
|
||||
]
|
||||
return f(tangents)
|
||||
|
||||
if use_pfor:
|
||||
@ -188,7 +190,9 @@ def _forward_over_back_hessian(f, params, use_pfor, dtype=None):
|
||||
"""
|
||||
return _vectorize_parameters(
|
||||
functools.partial(_hvp, f, params),
|
||||
params, use_pfor=use_pfor, dtype=dtype)
|
||||
params,
|
||||
use_pfor=use_pfor,
|
||||
dtype=dtype)
|
||||
|
||||
|
||||
def _test_gradients(testcase,
|
||||
@ -335,8 +339,7 @@ class ForwardpropTest(test.TestCase, parameterized.TestCase):
|
||||
execution_count = getattr(self, "_execution_count", 0)
|
||||
self._execution_count = execution_count + 1
|
||||
x = array_ops.zeros([execution_count])
|
||||
with forwardprop.ForwardAccumulator(
|
||||
x, array_ops.ones_like(x)) as acc:
|
||||
with forwardprop.ForwardAccumulator(x, array_ops.ones_like(x)) as acc:
|
||||
y = x + x
|
||||
self.assertAllClose(2. * array_ops.ones_like(x), acc.jvp(y))
|
||||
|
||||
@ -353,11 +356,9 @@ class ForwardpropTest(test.TestCase, parameterized.TestCase):
|
||||
def testMultipleWatchesAdd(self):
|
||||
x = constant_op.constant(-2.)
|
||||
with self.assertRaisesRegex(ValueError, "multiple times"):
|
||||
with forwardprop.ForwardAccumulator(
|
||||
[x, x], [1., 2.]):
|
||||
with forwardprop.ForwardAccumulator([x, x], [1., 2.]):
|
||||
pass
|
||||
with forwardprop.ForwardAccumulator(
|
||||
[x], [3.]) as acc:
|
||||
with forwardprop.ForwardAccumulator([x], [3.]) as acc:
|
||||
self.assertAllClose(3., acc.jvp(x))
|
||||
acc._watch(x, constant_op.constant(10.))
|
||||
self.assertAllClose(13., acc.jvp(x))
|
||||
@ -452,8 +453,10 @@ class ForwardpropTest(test.TestCase, parameterized.TestCase):
|
||||
|
||||
@custom_gradient.custom_gradient
|
||||
def f(unused_x):
|
||||
|
||||
def grad(unused_dy):
|
||||
raise ValueError("test_error_string")
|
||||
|
||||
return 1., grad
|
||||
|
||||
c = constant_op.constant(1.)
|
||||
@ -462,22 +465,15 @@ class ForwardpropTest(test.TestCase, parameterized.TestCase):
|
||||
with self.assertRaisesRegex(ValueError, "test_error_string"):
|
||||
f(c)
|
||||
|
||||
@parameterized.named_parameters(
|
||||
[("EluM5", -0.5, nn_ops.elu),
|
||||
("EluP5", [0.5], nn_ops.elu),
|
||||
("SwishP5", 0.5, nn_impl.swish),
|
||||
("SwishM5", [-0.5], nn_impl.swish)])
|
||||
@parameterized.named_parameters([("EluM5", -0.5, nn_ops.elu),
|
||||
("EluP5", [0.5], nn_ops.elu),
|
||||
("SwishP5", 0.5, nn_impl.swish),
|
||||
("SwishM5", [-0.5], nn_impl.swish)])
|
||||
def testElementwiseNNOps(self, value, op_fn):
|
||||
_test_gradients(self, op_fn, [constant_op.constant(value)], order=3)
|
||||
|
||||
def testFusedBatchNormGradsInference(self):
|
||||
|
||||
if test.is_built_with_rocm():
|
||||
# This test was added recently and has been failing on the ROCm
|
||||
# platform, since it was added.
|
||||
# TODO(rocm): do root cause analysis of test failure and fix it.
|
||||
self.skipTest("Test fails on ROCm platform, needs further analysis")
|
||||
|
||||
x_shape = [4, 10, 10, 2]
|
||||
increment = 3. / math_ops.reduce_prod(
|
||||
constant_op.constant(x_shape, dtype=dtypes.float32))
|
||||
@ -489,11 +485,16 @@ class ForwardpropTest(test.TestCase, parameterized.TestCase):
|
||||
epsilon = 0.001
|
||||
|
||||
def _bn_fused(x_arg, scale_arg, offset_arg):
|
||||
return nn_impl.fused_batch_norm(x_arg, scale_arg, offset_arg,
|
||||
mean, variance,
|
||||
epsilon=epsilon, is_training=False)[0]
|
||||
_test_gradients(self, _bn_fused, [x, scale, offset],
|
||||
order=2, atol=1e-2)
|
||||
return nn_impl.fused_batch_norm(
|
||||
x_arg,
|
||||
scale_arg,
|
||||
offset_arg,
|
||||
mean,
|
||||
variance,
|
||||
epsilon=epsilon,
|
||||
is_training=False)[0]
|
||||
|
||||
_test_gradients(self, _bn_fused, [x, scale, offset], order=2, atol=1e-2)
|
||||
|
||||
def testPushPopAccumulatorState(self):
|
||||
# Note that this example is somewhat contrived. push_forwardprop_state is
|
||||
@ -519,22 +520,25 @@ class ForwardpropTest(test.TestCase, parameterized.TestCase):
|
||||
output = f(c)
|
||||
self.assertAllClose(d * math_ops.cos(c), acc.jvp(output))
|
||||
|
||||
@parameterized.named_parameters(
|
||||
[("Order{}".format(order), order, expected)
|
||||
for order, expected in enumerate(_X11_35_DERIVATIVES)])
|
||||
@parameterized.named_parameters([
|
||||
("Order{}".format(order), order, expected)
|
||||
for order, expected in enumerate(_X11_35_DERIVATIVES)
|
||||
])
|
||||
@test_util.assert_no_new_pyobjects_executing_eagerly
|
||||
def testHigherOrderPureForward(self, order, expected):
|
||||
|
||||
def _forwardgrad(f):
|
||||
|
||||
def _compute_forwardgrad(primal):
|
||||
tangent = constant_op.constant(1.)
|
||||
with forwardprop.ForwardAccumulator(primal, tangent) as acc:
|
||||
primal_out = f(primal)
|
||||
return acc.jvp(primal_out)
|
||||
|
||||
return _compute_forwardgrad
|
||||
|
||||
def _forward(x):
|
||||
return x ** 3.5
|
||||
return x**3.5
|
||||
|
||||
f = _forward
|
||||
primal = constant_op.constant(1.1)
|
||||
@ -542,26 +546,25 @@ class ForwardpropTest(test.TestCase, parameterized.TestCase):
|
||||
f = _forwardgrad(f)
|
||||
self.assertAllClose(expected, f(primal))
|
||||
|
||||
@parameterized.named_parameters(
|
||||
[("Function", def_function.function),
|
||||
("NoFunction", lambda f: f)])
|
||||
@parameterized.named_parameters([("Function", def_function.function),
|
||||
("NoFunction", lambda f: f)])
|
||||
def testGradPureForward(self, decorator):
|
||||
|
||||
@decorator
|
||||
def f(x):
|
||||
return x ** 3.5
|
||||
return x**3.5
|
||||
|
||||
primal = constant_op.constant(1.1)
|
||||
with forwardprop.ForwardAccumulator(
|
||||
primal, constant_op.constant(1.)) as outer_acc:
|
||||
with forwardprop.ForwardAccumulator(
|
||||
primal, constant_op.constant(1.)) as acc:
|
||||
with forwardprop.ForwardAccumulator(primal,
|
||||
constant_op.constant(1.)) as outer_acc:
|
||||
with forwardprop.ForwardAccumulator(primal,
|
||||
constant_op.constant(1.)) as acc:
|
||||
primal_out = f(primal)
|
||||
inner_jvp = acc.jvp(primal_out)
|
||||
outer_jvp = outer_acc.jvp(inner_jvp)
|
||||
self.assertAllClose(1.1 ** 3.5, primal_out)
|
||||
self.assertAllClose(3.5 * 1.1 ** 2.5, inner_jvp)
|
||||
self.assertAllClose(3.5 * 2.5 * 1.1 ** 1.5, outer_jvp)
|
||||
self.assertAllClose(1.1**3.5, primal_out)
|
||||
self.assertAllClose(3.5 * 1.1**2.5, inner_jvp)
|
||||
self.assertAllClose(3.5 * 2.5 * 1.1**1.5, outer_jvp)
|
||||
self.assertIsNone(acc.jvp(outer_acc.jvp(primal_out)))
|
||||
|
||||
@test_util.assert_no_new_pyobjects_executing_eagerly
|
||||
@ -571,18 +574,18 @@ class ForwardpropTest(test.TestCase, parameterized.TestCase):
|
||||
inner_jvp = constant_op.constant(3.)
|
||||
with forwardprop.ForwardAccumulator(
|
||||
[primal_in, inner_jvp],
|
||||
[constant_op.constant(2.), constant_op.constant(4.)]) as outer_acc:
|
||||
with forwardprop.ForwardAccumulator(
|
||||
primal_in, inner_jvp) as inner_acc:
|
||||
[constant_op.constant(2.),
|
||||
constant_op.constant(4.)]) as outer_acc:
|
||||
with forwardprop.ForwardAccumulator(primal_in, inner_jvp) as inner_acc:
|
||||
packed_input_indices, packed_input_tangents = (
|
||||
forwardprop_util.pack_tangents([primal_in]))
|
||||
self.assertAllClose([3., 2., 4.], packed_input_tangents)
|
||||
expected_indices = (
|
||||
# inner_acc watches primal_in
|
||||
((0, 1),),
|
||||
(
|
||||
(0, 1),),
|
||||
# outer_acc watches primal_in and inner_jvp
|
||||
((0, 2),
|
||||
(1, 3)))
|
||||
((0, 2), (1, 3)))
|
||||
self.assertAllEqual(expected_indices, packed_input_indices)
|
||||
primal_out = primal_in * two
|
||||
self.assertAllClose(6., inner_acc.jvp(primal_out))
|
||||
@ -597,15 +600,16 @@ class ForwardpropTest(test.TestCase, parameterized.TestCase):
|
||||
|
||||
@def_function.function
|
||||
def take_gradients():
|
||||
|
||||
@def_function.function
|
||||
def f(x):
|
||||
return x ** 3.5
|
||||
return x**3.5
|
||||
|
||||
primal = constant_op.constant(1.1)
|
||||
with forwardprop.ForwardAccumulator(
|
||||
primal, constant_op.constant(1.)) as outer_acc:
|
||||
with forwardprop.ForwardAccumulator(
|
||||
primal, constant_op.constant(1.)) as acc:
|
||||
with forwardprop.ForwardAccumulator(primal,
|
||||
constant_op.constant(1.)) as acc:
|
||||
primal_out = f(primal)
|
||||
inner_jvp = acc.jvp(primal_out)
|
||||
outer_jvp = outer_acc.jvp(inner_jvp)
|
||||
@ -613,9 +617,9 @@ class ForwardpropTest(test.TestCase, parameterized.TestCase):
|
||||
return primal_out, inner_jvp, outer_jvp
|
||||
|
||||
primal_out, inner_jvp, outer_jvp = take_gradients()
|
||||
self.assertAllClose(1.1 ** 3.5, primal_out)
|
||||
self.assertAllClose(3.5 * 1.1 ** 2.5, inner_jvp)
|
||||
self.assertAllClose(3.5 * 2.5 * 1.1 ** 1.5, outer_jvp)
|
||||
self.assertAllClose(1.1**3.5, primal_out)
|
||||
self.assertAllClose(3.5 * 1.1**2.5, inner_jvp)
|
||||
self.assertAllClose(3.5 * 2.5 * 1.1**1.5, outer_jvp)
|
||||
|
||||
def testFunctionGrad(self):
|
||||
|
||||
@ -623,11 +627,7 @@ class ForwardpropTest(test.TestCase, parameterized.TestCase):
|
||||
def f(x):
|
||||
return math_ops.reduce_prod(math_ops.tanh(x)**2)
|
||||
|
||||
_test_gradients(
|
||||
self,
|
||||
f,
|
||||
[constant_op.constant([1., 2.])],
|
||||
order=3)
|
||||
_test_gradients(self, f, [constant_op.constant([1., 2.])], order=3)
|
||||
|
||||
def testReusingJVP(self):
|
||||
m1 = random_ops.random_uniform((256, 2096))
|
||||
@ -642,8 +642,8 @@ class ForwardpropTest(test.TestCase, parameterized.TestCase):
|
||||
result2 = matmul(m2, m2, transpose_b=True)
|
||||
|
||||
def _expected(mat, tangent):
|
||||
return (math_ops.matmul(tangent, mat, transpose_b=True)
|
||||
+ math_ops.matmul(mat, tangent, transpose_b=True))
|
||||
return (math_ops.matmul(tangent, mat, transpose_b=True) +
|
||||
math_ops.matmul(mat, tangent, transpose_b=True))
|
||||
|
||||
self.assertAllClose(result1, result2)
|
||||
self.assertAllClose(_expected(m1, tangent1), acc.jvp(result1))
|
||||
@ -693,19 +693,16 @@ class ForwardpropTest(test.TestCase, parameterized.TestCase):
|
||||
with forwardprop.ForwardAccumulator(c, c_tangent) as acc:
|
||||
with backprop.GradientTape() as tape:
|
||||
self.assertFalse(tape_lib.should_record_backprop([c]))
|
||||
self.assertEqual(1,
|
||||
pywrap_tfe.TFE_Py_TapeSetPossibleGradientTypes([c]))
|
||||
self.assertEqual(1, pywrap_tfe.TFE_Py_TapeSetPossibleGradientTypes([c]))
|
||||
tape.watch(c)
|
||||
self.assertEqual(2,
|
||||
pywrap_tfe.TFE_Py_TapeSetPossibleGradientTypes([c]))
|
||||
self.assertEqual(2, pywrap_tfe.TFE_Py_TapeSetPossibleGradientTypes([c]))
|
||||
self.assertTrue(tape_lib.should_record_backprop([c]))
|
||||
with tape_lib.stop_recording():
|
||||
self.assertEqual(0,
|
||||
pywrap_tfe.TFE_Py_TapeSetPossibleGradientTypes([c]))
|
||||
self.assertFalse(tape_lib.should_record_backprop([c]))
|
||||
d = c * 2.
|
||||
self.assertEqual(2,
|
||||
pywrap_tfe.TFE_Py_TapeSetPossibleGradientTypes([c]))
|
||||
self.assertEqual(2, pywrap_tfe.TFE_Py_TapeSetPossibleGradientTypes([c]))
|
||||
self.assertTrue(tape_lib.should_record_backprop([c]))
|
||||
self.assertFalse(tape_lib.should_record_backprop([d]))
|
||||
self.assertIsNone(acc.jvp(d))
|
||||
@ -728,11 +725,11 @@ class ForwardpropTest(test.TestCase, parameterized.TestCase):
|
||||
self.assertIsNone(tape.gradient(d, c))
|
||||
self.assertIsNone(tape.gradient(e, c))
|
||||
tape_lib.record_operation_forwardprop_only(
|
||||
"CustomForwardMul", [d], [c, two],
|
||||
lambda dd: (two * dd, c * dd), None)
|
||||
tape_lib.record_operation_backprop_only(
|
||||
"CustomBackwardMul", [e], [c, three],
|
||||
lambda de: (three * de, c * de))
|
||||
"CustomForwardMul", [d], [c, two], lambda dd: (two * dd, c * dd),
|
||||
None)
|
||||
tape_lib.record_operation_backprop_only("CustomBackwardMul", [e],
|
||||
[c, three], lambda de:
|
||||
(three * de, c * de))
|
||||
self.assertAllClose(4., acc.jvp(d))
|
||||
self.assertIsNone(acc.jvp(e))
|
||||
self.assertIsNone(tape.gradient(d, c))
|
||||
@ -749,16 +746,17 @@ class ForwardpropTest(test.TestCase, parameterized.TestCase):
|
||||
def testVariableReadInFunction(self):
|
||||
v = variables.Variable(1.)
|
||||
with forwardprop.ForwardAccumulator(v, 11.) as acc:
|
||||
|
||||
@def_function.function
|
||||
def f():
|
||||
return v.read_value(), 2. * v.read_value()
|
||||
|
||||
result = f()
|
||||
self.assertAllClose((1.0, 2.), result)
|
||||
self.assertAllClose((11., 22.), acc.jvp(result))
|
||||
|
||||
@parameterized.named_parameters(
|
||||
[("ForwardPropFirst", True),
|
||||
("TapeFirst", False)])
|
||||
@parameterized.named_parameters([("ForwardPropFirst", True),
|
||||
("TapeFirst", False)])
|
||||
def testForwardOverBackwardMemoryEfficiency(self, forward_prop_first):
|
||||
# Watching depends on nesting, not creation order
|
||||
c = constant_op.constant(1.)
|
||||
@ -788,9 +786,8 @@ class ForwardpropTest(test.TestCase, parameterized.TestCase):
|
||||
finally:
|
||||
gc.enable()
|
||||
|
||||
@parameterized.named_parameters(
|
||||
[("ForwardPropFirst", True),
|
||||
("TapeFirst", False)])
|
||||
@parameterized.named_parameters([("ForwardPropFirst", True),
|
||||
("TapeFirst", False)])
|
||||
def testBackwardOverForward(self, forward_prop_first):
|
||||
c = constant_op.constant(1.)
|
||||
# Watching depends on nesting, not creation order
|
||||
@ -805,8 +802,7 @@ class ForwardpropTest(test.TestCase, parameterized.TestCase):
|
||||
tape.watch(c)
|
||||
d = math_ops.cos(c)
|
||||
self.assertTrue(tape_lib.should_record_backprop((acc.jvp(d),)))
|
||||
self.assertAllClose(-.1 * math_ops.cos(1.),
|
||||
tape.gradient(acc.jvp(d), c))
|
||||
self.assertAllClose(-.1 * math_ops.cos(1.), tape.gradient(acc.jvp(d), c))
|
||||
|
||||
@test_util.assert_no_new_pyobjects_executing_eagerly
|
||||
def testRecordingWithJVPIndices(self):
|
||||
@ -816,11 +812,10 @@ class ForwardpropTest(test.TestCase, parameterized.TestCase):
|
||||
self.assertAllClose([10.], packed_input_tangents)
|
||||
d = constant_op.constant(2.)
|
||||
d_tangent = constant_op.constant(3.)
|
||||
tape_lib.record_operation_forwardprop_only(
|
||||
"FunctionWithInlineJVPs",
|
||||
[d] + [d_tangent],
|
||||
[c] + packed_input_tangents,
|
||||
None, (((0, 1),),))
|
||||
tape_lib.record_operation_forwardprop_only("FunctionWithInlineJVPs",
|
||||
[d] + [d_tangent],
|
||||
[c] + packed_input_tangents,
|
||||
None, (((0, 1),),))
|
||||
self.assertAllClose(3., acc.jvp(d))
|
||||
|
||||
@test_util.assert_no_new_pyobjects_executing_eagerly
|
||||
@ -829,26 +824,19 @@ class ForwardpropTest(test.TestCase, parameterized.TestCase):
|
||||
d = constant_op.constant(2.)
|
||||
e = constant_op.constant(3.)
|
||||
with forwardprop.ForwardAccumulator(c, 10.) as acc:
|
||||
tape_lib.record_operation(
|
||||
"ForwardIsSpecial",
|
||||
[d], [c],
|
||||
None, lambda jvp: [-2. * jvp])
|
||||
tape_lib.record_operation("ForwardIsSpecial", [d], [c], None,
|
||||
lambda jvp: [-2. * jvp])
|
||||
self.assertAllClose(-20., acc.jvp(d))
|
||||
tape_lib.record_operation(
|
||||
"ForwardIsSpecial2",
|
||||
[], [],
|
||||
None, lambda: [])
|
||||
tape_lib.record_operation(
|
||||
"ForwardIsSpecial3",
|
||||
[e], [d],
|
||||
None, lambda x: [x])
|
||||
tape_lib.record_operation("ForwardIsSpecial2", [], [], None, lambda: [])
|
||||
tape_lib.record_operation("ForwardIsSpecial3", [e], [d], None,
|
||||
lambda x: [x])
|
||||
self.assertAllClose(-20., acc.jvp(e))
|
||||
|
||||
@test_util.assert_no_new_pyobjects_executing_eagerly
|
||||
def testVariableWatched(self):
|
||||
v = variables.Variable([1., 2., 3.])
|
||||
with forwardprop.ForwardAccumulator(
|
||||
v, constant_op.constant([.1, -.2, .3])) as acc:
|
||||
with forwardprop.ForwardAccumulator(v, constant_op.constant([.1, -.2,
|
||||
.3])) as acc:
|
||||
self.assertAllClose([.1, -.2, .3], acc.jvp(v))
|
||||
x = v * 2.
|
||||
self.assertAllClose([.2, -.4, .6], acc.jvp(x))
|
||||
@ -878,8 +866,9 @@ class ForwardpropTest(test.TestCase, parameterized.TestCase):
|
||||
def compute_jvps(self):
|
||||
if self._v is None:
|
||||
self._v = variables.Variable([1., 2., 3.])
|
||||
with forwardprop.ForwardAccumulator(
|
||||
self._v, constant_op.constant([.1, -.2, .3])) as acc:
|
||||
with forwardprop.ForwardAccumulator(self._v,
|
||||
constant_op.constant([.1, -.2,
|
||||
.3])) as acc:
|
||||
x = self._v * 2.
|
||||
x2 = self._v + .1
|
||||
return acc.jvp((self._v, x, x2))
|
||||
@ -898,6 +887,7 @@ class ForwardpropTest(test.TestCase, parameterized.TestCase):
|
||||
self.assertAllClose(3., acc.jvp(y))
|
||||
|
||||
def testIndexSlicesGradInFunction(self):
|
||||
|
||||
@def_function.function
|
||||
def f(a):
|
||||
return array_ops.gather(a, 0)
|
||||
@ -983,17 +973,14 @@ class ControlFlowTests(test.TestCase):
|
||||
def testOfFunctionWhile(self):
|
||||
y = constant_op.constant(1.)
|
||||
with forwardprop.ForwardAccumulator(y, 1.) as acc:
|
||||
self.assertAllClose(
|
||||
10., acc.jvp(_has_loop(constant_op.constant(5), y)))
|
||||
self.assertAllClose(10., acc.jvp(_has_loop(constant_op.constant(5), y)))
|
||||
|
||||
@test_util.assert_no_new_pyobjects_executing_eagerly
|
||||
def testOfFunctionCond(self):
|
||||
y = constant_op.constant(1.)
|
||||
with forwardprop.ForwardAccumulator(y, 1.) as acc:
|
||||
self.assertAllClose(
|
||||
3., acc.jvp(_has_cond(constant_op.constant(5), y)))
|
||||
self.assertAllClose(
|
||||
0., acc.jvp(_has_cond(constant_op.constant(0), y)))
|
||||
self.assertAllClose(3., acc.jvp(_has_cond(constant_op.constant(5), y)))
|
||||
self.assertAllClose(0., acc.jvp(_has_cond(constant_op.constant(0), y)))
|
||||
|
||||
@test_util.assert_no_new_pyobjects_executing_eagerly
|
||||
def testInFunctionWhile(self):
|
||||
@ -1024,15 +1011,18 @@ class HessianTests(test.TestCase, parameterized.TestCase):
|
||||
|
||||
hessian_eager, = _forward_over_back_hessian(
|
||||
_f, [constant_op.constant(x_value)],
|
||||
use_pfor=False, dtype=[dtypes.float32])
|
||||
use_pfor=False,
|
||||
dtype=[dtypes.float32])
|
||||
self.assertAllClose(hess_value, hessian_eager)
|
||||
hessian_function, = def_function.function(_forward_over_back_hessian)(
|
||||
_f, [constant_op.constant(x_value)],
|
||||
use_pfor=False, dtype=[dtypes.float32])
|
||||
use_pfor=False,
|
||||
dtype=[dtypes.float32])
|
||||
self.assertAllClose(hess_value, hessian_function)
|
||||
hessian_pfor, = def_function.function(_forward_over_back_hessian)(
|
||||
_f, [constant_op.constant(x_value)],
|
||||
use_pfor=True, dtype=[dtypes.float32])
|
||||
use_pfor=True,
|
||||
dtype=[dtypes.float32])
|
||||
self.assertAllClose(hess_value, hessian_pfor)
|
||||
|
||||
|
||||
|
@ -93,8 +93,8 @@ def compare_two_inputs_op_to_numpy(keras_op,
|
||||
backend.variable(input_a, dtype=dtype),
|
||||
backend.variable(input_b, dtype=dtype), *keras_args, **keras_kwargs)
|
||||
keras_output = backend.eval(keras_output)
|
||||
np_output = np_op(input_a.astype(dtype), input_b.astype(dtype),
|
||||
*np_args, **np_kwargs)
|
||||
np_output = np_op(
|
||||
input_a.astype(dtype), input_b.astype(dtype), *np_args, **np_kwargs)
|
||||
try:
|
||||
np.testing.assert_allclose(keras_output, np_output, atol=1e-4)
|
||||
except AssertionError:
|
||||
@ -425,19 +425,31 @@ class BackendLinearAlgebraTest(test.TestCase, parameterized.TestCase):
|
||||
(backend.argmax, np.argmax),
|
||||
]
|
||||
for keras_op, np_op in ops_to_test:
|
||||
compare_single_input_op_to_numpy(keras_op, np_op, input_shape=(4, 7, 5),
|
||||
keras_kwargs={'axis': 1},
|
||||
np_kwargs={'axis': 1})
|
||||
compare_single_input_op_to_numpy(keras_op, np_op, input_shape=(4, 7, 5),
|
||||
keras_kwargs={'axis': -1},
|
||||
np_kwargs={'axis': -1})
|
||||
compare_single_input_op_to_numpy(
|
||||
keras_op,
|
||||
np_op,
|
||||
input_shape=(4, 7, 5),
|
||||
keras_kwargs={'axis': 1},
|
||||
np_kwargs={'axis': 1})
|
||||
compare_single_input_op_to_numpy(
|
||||
keras_op,
|
||||
np_op,
|
||||
input_shape=(4, 7, 5),
|
||||
keras_kwargs={'axis': -1},
|
||||
np_kwargs={'axis': -1})
|
||||
if 'keepdims' in tf_inspect.getargspec(keras_op).args:
|
||||
compare_single_input_op_to_numpy(keras_op, np_op,
|
||||
input_shape=(4, 7, 5),
|
||||
keras_kwargs={'axis': 1,
|
||||
'keepdims': True},
|
||||
np_kwargs={'axis': 1,
|
||||
'keepdims': True})
|
||||
compare_single_input_op_to_numpy(
|
||||
keras_op,
|
||||
np_op,
|
||||
input_shape=(4, 7, 5),
|
||||
keras_kwargs={
|
||||
'axis': 1,
|
||||
'keepdims': True
|
||||
},
|
||||
np_kwargs={
|
||||
'axis': 1,
|
||||
'keepdims': True
|
||||
})
|
||||
|
||||
def test_elementwise_ops(self):
|
||||
ops_to_test = [
|
||||
@ -457,9 +469,8 @@ class BackendLinearAlgebraTest(test.TestCase, parameterized.TestCase):
|
||||
(backend.log, np.log),
|
||||
]
|
||||
for keras_op, np_op in ops_to_test:
|
||||
compare_single_input_op_to_numpy(keras_op, np_op,
|
||||
input_shape=(4, 7),
|
||||
negative_values=False)
|
||||
compare_single_input_op_to_numpy(
|
||||
keras_op, np_op, input_shape=(4, 7), negative_values=False)
|
||||
|
||||
compare_single_input_op_to_numpy(
|
||||
backend.clip,
|
||||
@ -489,9 +500,8 @@ class BackendLinearAlgebraTest(test.TestCase, parameterized.TestCase):
|
||||
(backend.minimum, np.minimum),
|
||||
]
|
||||
for keras_op, np_op in ops_to_test:
|
||||
compare_two_inputs_op_to_numpy(keras_op, np_op,
|
||||
input_shape_a=(4, 7),
|
||||
input_shape_b=(4, 7))
|
||||
compare_two_inputs_op_to_numpy(
|
||||
keras_op, np_op, input_shape_a=(4, 7), input_shape_b=(4, 7))
|
||||
|
||||
def test_relu(self):
|
||||
x = ops.convert_to_tensor_v2_with_dispatch([[-4, 0], [2, 7]], 'float32')
|
||||
@ -713,19 +723,14 @@ class BackendShapeOpsTest(test.TestCase):
|
||||
shape[2] += padding[1][0] + padding[1][1]
|
||||
shape[3] += padding[2][0] + padding[2][1]
|
||||
y = np.zeros(tuple(shape))
|
||||
y[:,
|
||||
padding[0][0]:-padding[0][1],
|
||||
padding[1][0]:-padding[1][1],
|
||||
padding[2][0]:-padding[2][1],
|
||||
:] = x
|
||||
y[:, padding[0][0]:-padding[0][1], padding[1][0]:-padding[1][1],
|
||||
padding[2][0]:-padding[2][1], :] = x
|
||||
else:
|
||||
shape[2] += padding[0][0] + padding[0][1]
|
||||
shape[3] += padding[1][0] + padding[1][1]
|
||||
shape[4] += padding[2][0] + padding[2][1]
|
||||
y = np.zeros(tuple(shape))
|
||||
y[:, :,
|
||||
padding[0][0]:-padding[0][1],
|
||||
padding[1][0]:-padding[1][1],
|
||||
y[:, :, padding[0][0]:-padding[0][1], padding[1][0]:-padding[1][1],
|
||||
padding[2][0]:-padding[2][1]] = x
|
||||
return y
|
||||
|
||||
@ -753,18 +758,14 @@ class BackendNNOpsTest(test.TestCase, parameterized.TestCase):
|
||||
def test_bias_add(self):
|
||||
keras_op = backend.bias_add
|
||||
np_op = np.add
|
||||
compare_two_inputs_op_to_numpy(keras_op, np_op,
|
||||
input_shape_a=(4, 7),
|
||||
input_shape_b=(7,))
|
||||
compare_two_inputs_op_to_numpy(keras_op, np_op,
|
||||
input_shape_a=(4, 3, 7),
|
||||
input_shape_b=(7,))
|
||||
compare_two_inputs_op_to_numpy(keras_op, np_op,
|
||||
input_shape_a=(4, 3, 5, 7),
|
||||
input_shape_b=(7,))
|
||||
compare_two_inputs_op_to_numpy(keras_op, np_op,
|
||||
input_shape_a=(4, 3, 5, 2, 7),
|
||||
input_shape_b=(7,))
|
||||
compare_two_inputs_op_to_numpy(
|
||||
keras_op, np_op, input_shape_a=(4, 7), input_shape_b=(7,))
|
||||
compare_two_inputs_op_to_numpy(
|
||||
keras_op, np_op, input_shape_a=(4, 3, 7), input_shape_b=(7,))
|
||||
compare_two_inputs_op_to_numpy(
|
||||
keras_op, np_op, input_shape_a=(4, 3, 5, 7), input_shape_b=(7,))
|
||||
compare_two_inputs_op_to_numpy(
|
||||
keras_op, np_op, input_shape_a=(4, 3, 5, 2, 7), input_shape_b=(7,))
|
||||
|
||||
with self.assertRaises((ValueError, errors_impl.InvalidArgumentError)):
|
||||
x = backend.variable((3, 4))
|
||||
@ -787,12 +788,10 @@ class BackendNNOpsTest(test.TestCase, parameterized.TestCase):
|
||||
b = b.reshape((1, b.shape[0], 1, 1))
|
||||
return x + b
|
||||
|
||||
compare_two_inputs_op_to_numpy(keras_op, np_op,
|
||||
input_shape_a=(4, 3, 7),
|
||||
input_shape_b=(3,))
|
||||
compare_two_inputs_op_to_numpy(keras_op, np_op,
|
||||
input_shape_a=(4, 3, 5, 7),
|
||||
input_shape_b=(3,))
|
||||
compare_two_inputs_op_to_numpy(
|
||||
keras_op, np_op, input_shape_a=(4, 3, 7), input_shape_b=(3,))
|
||||
compare_two_inputs_op_to_numpy(
|
||||
keras_op, np_op, input_shape_a=(4, 3, 5, 7), input_shape_b=(3,))
|
||||
|
||||
def test_pool2d(self):
|
||||
val = np.random.random((10, 3, 10, 10))
|
||||
@ -847,8 +846,6 @@ class BackendNNOpsTest(test.TestCase, parameterized.TestCase):
|
||||
y = backend.pool2d(x, (2, 2), strides=(2, 2), pool_mode='other')
|
||||
|
||||
def test_pool3d(self):
|
||||
if test.is_built_with_rocm():
|
||||
self.skipTest('Pooling with 3D tensors is not supported in ROCm')
|
||||
val = np.random.random((10, 3, 10, 10, 10))
|
||||
x = backend.variable(val)
|
||||
y = backend.pool3d(
|
||||
@ -938,18 +935,16 @@ class BackendNNOpsTest(test.TestCase, parameterized.TestCase):
|
||||
kernel_sizes = (kernel_size,) * dim
|
||||
strides = (stride,) * dim
|
||||
|
||||
output_shape = tuple([(i - kernel_size + stride) // stride
|
||||
for i in input_spatial_shape])
|
||||
output_shape = tuple([
|
||||
(i - kernel_size + stride) // stride for i in input_spatial_shape
|
||||
])
|
||||
|
||||
kernel_shape = (np.prod(output_shape),
|
||||
np.prod(kernel_sizes) * channels_in,
|
||||
filters)
|
||||
np.prod(kernel_sizes) * channels_in, filters)
|
||||
|
||||
kernel = np.random.normal(
|
||||
0,
|
||||
1,
|
||||
output_shape + (channels_in, np.prod(kernel_sizes), filters)
|
||||
)
|
||||
0, 1,
|
||||
output_shape + (channels_in, np.prod(kernel_sizes), filters))
|
||||
|
||||
kernel_cf = np.reshape(kernel, kernel_shape)
|
||||
kernel_cf = backend.variable(kernel_cf)
|
||||
@ -957,14 +952,14 @@ class BackendNNOpsTest(test.TestCase, parameterized.TestCase):
|
||||
conv_cf = backend.local_conv(inputs_cf, kernel_cf, kernel_sizes,
|
||||
strides, output_shape, 'channels_first')
|
||||
|
||||
inputs_cl = np.transpose(inputs, [0, 2] + list(range(3, dim + 2)) +
|
||||
[1])
|
||||
inputs_cl = np.transpose(inputs,
|
||||
[0, 2] + list(range(3, dim + 2)) + [1])
|
||||
inputs_cl = backend.variable(inputs_cl)
|
||||
|
||||
kernel_cl = np.reshape(
|
||||
np.transpose(kernel, list(range(dim)) + [dim + 1, dim, dim + 2]),
|
||||
kernel_shape
|
||||
)
|
||||
np.transpose(kernel,
|
||||
list(range(dim)) + [dim + 1, dim, dim + 2]),
|
||||
kernel_shape)
|
||||
kernel_cl = backend.variable(kernel_cl)
|
||||
|
||||
conv_cl = backend.local_conv(inputs_cl, kernel_cl, kernel_sizes,
|
||||
@ -975,18 +970,13 @@ class BackendNNOpsTest(test.TestCase, parameterized.TestCase):
|
||||
|
||||
self.assertAllCloseAccordingToType(
|
||||
conv_cf,
|
||||
np.transpose(conv_cl,
|
||||
[0, dim + 1] + list(range(1, dim + 1))),
|
||||
atol=1e-5
|
||||
)
|
||||
np.transpose(conv_cl, [0, dim + 1] + list(range(1, dim + 1))),
|
||||
atol=1e-5)
|
||||
|
||||
@parameterized.named_parameters(
|
||||
('local_conv1d', (5, 6), (3,), (1,), (3,)),
|
||||
('local_conv2d', (4, 5, 6), (3, 3), (1, 1), (2, 3)))
|
||||
def test_local_conv_1d_and_2d(self,
|
||||
input_shape,
|
||||
kernel_sizes,
|
||||
strides,
|
||||
def test_local_conv_1d_and_2d(self, input_shape, kernel_sizes, strides,
|
||||
output_shape):
|
||||
filters = 3
|
||||
batch_size = 2
|
||||
@ -994,9 +984,9 @@ class BackendNNOpsTest(test.TestCase, parameterized.TestCase):
|
||||
inputs = np.random.normal(0, 1, (batch_size,) + input_shape)
|
||||
inputs = backend.variable(inputs)
|
||||
|
||||
kernel = np.random.normal(0, 1, (np.prod(output_shape),
|
||||
np.prod(kernel_sizes) * input_shape[-1],
|
||||
filters))
|
||||
kernel = np.random.normal(0, 1,
|
||||
(np.prod(output_shape), np.prod(kernel_sizes) *
|
||||
input_shape[-1], filters))
|
||||
kernel = backend.variable(kernel)
|
||||
|
||||
local_conv = backend.local_conv(inputs, kernel, kernel_sizes, strides,
|
||||
@ -1225,12 +1215,33 @@ class BackendNNOpsTest(test.TestCase, parameterized.TestCase):
|
||||
mask = backend.variable(np_mask)
|
||||
|
||||
kwargs_list = [
|
||||
{'go_backwards': False, 'mask': None},
|
||||
{'go_backwards': False, 'mask': None, 'unroll': True},
|
||||
{'go_backwards': True, 'mask': None},
|
||||
{'go_backwards': True, 'mask': None, 'unroll': True},
|
||||
{'go_backwards': False, 'mask': mask},
|
||||
{'go_backwards': False, 'mask': mask, 'unroll': True},
|
||||
{
|
||||
'go_backwards': False,
|
||||
'mask': None
|
||||
},
|
||||
{
|
||||
'go_backwards': False,
|
||||
'mask': None,
|
||||
'unroll': True
|
||||
},
|
||||
{
|
||||
'go_backwards': True,
|
||||
'mask': None
|
||||
},
|
||||
{
|
||||
'go_backwards': True,
|
||||
'mask': None,
|
||||
'unroll': True
|
||||
},
|
||||
{
|
||||
'go_backwards': False,
|
||||
'mask': mask
|
||||
},
|
||||
{
|
||||
'go_backwards': False,
|
||||
'mask': mask,
|
||||
'unroll': True
|
||||
},
|
||||
]
|
||||
for i, kwargs in enumerate(kwargs_list):
|
||||
last_output, outputs, new_states = backend.rnn(rnn_fn, inputs,
|
||||
@ -1319,12 +1330,33 @@ class BackendNNOpsTest(test.TestCase, parameterized.TestCase):
|
||||
mask = backend.variable(np_mask)
|
||||
|
||||
kwargs_list = [
|
||||
{'go_backwards': False, 'mask': None},
|
||||
{'go_backwards': False, 'mask': None, 'unroll': True},
|
||||
{'go_backwards': True, 'mask': None},
|
||||
{'go_backwards': True, 'mask': None, 'unroll': True},
|
||||
{'go_backwards': False, 'mask': mask},
|
||||
{'go_backwards': False, 'mask': mask, 'unroll': True},
|
||||
{
|
||||
'go_backwards': False,
|
||||
'mask': None
|
||||
},
|
||||
{
|
||||
'go_backwards': False,
|
||||
'mask': None,
|
||||
'unroll': True
|
||||
},
|
||||
{
|
||||
'go_backwards': True,
|
||||
'mask': None
|
||||
},
|
||||
{
|
||||
'go_backwards': True,
|
||||
'mask': None,
|
||||
'unroll': True
|
||||
},
|
||||
{
|
||||
'go_backwards': False,
|
||||
'mask': mask
|
||||
},
|
||||
{
|
||||
'go_backwards': False,
|
||||
'mask': mask,
|
||||
'unroll': True
|
||||
},
|
||||
]
|
||||
for i, kwargs in enumerate(kwargs_list):
|
||||
last_output, outputs, new_states = backend.rnn(rnn_fn, inputs,
|
||||
@ -1394,8 +1426,8 @@ class BackendNNOpsTest(test.TestCase, parameterized.TestCase):
|
||||
def step_function(inputs, states):
|
||||
return inputs, [s + 1 for s in states]
|
||||
|
||||
inputs_vals = np.random.random((num_samples, num_timesteps,
|
||||
state_and_io_size))
|
||||
inputs_vals = np.random.random(
|
||||
(num_samples, num_timesteps, state_and_io_size))
|
||||
initial_state_vals = np.random.random((num_samples, state_and_io_size))
|
||||
# masking of two last timesteps for second sample only
|
||||
mask_vals = np.ones((num_samples, num_timesteps))
|
||||
@ -1785,29 +1817,34 @@ class TestCTC(test.TestCase):
|
||||
depth = 6
|
||||
seq_len_0 = 5
|
||||
input_prob_matrix_0 = np.asarray(
|
||||
[[0.30999, 0.309938, 0.0679938, 0.0673362, 0.0708352, 0.173908],
|
||||
[0.215136, 0.439699, 0.0370931, 0.0393967, 0.0381581, 0.230517],
|
||||
[0.199959, 0.489485, 0.0233221, 0.0251417, 0.0233289, 0.238763],
|
||||
[0.279611, 0.452966, 0.0204795, 0.0209126, 0.0194803, 0.20655],
|
||||
[0.51286, 0.288951, 0.0243026, 0.0220788, 0.0219297, 0.129878],
|
||||
# Random entry added in at time=5
|
||||
[0.155251, 0.164444, 0.173517, 0.176138, 0.169979, 0.160671]],
|
||||
[
|
||||
[0.30999, 0.309938, 0.0679938, 0.0673362, 0.0708352, 0.173908],
|
||||
[0.215136, 0.439699, 0.0370931, 0.0393967, 0.0381581, 0.230517],
|
||||
[0.199959, 0.489485, 0.0233221, 0.0251417, 0.0233289, 0.238763],
|
||||
[0.279611, 0.452966, 0.0204795, 0.0209126, 0.0194803, 0.20655],
|
||||
[0.51286, 0.288951, 0.0243026, 0.0220788, 0.0219297, 0.129878],
|
||||
# Random entry added in at time=5
|
||||
[0.155251, 0.164444, 0.173517, 0.176138, 0.169979, 0.160671]
|
||||
],
|
||||
dtype=np.float32)
|
||||
|
||||
# len max_time_steps array of batch_size x depth matrices
|
||||
inputs = ([input_prob_matrix_0[t, :][np.newaxis, :]
|
||||
for t in range(seq_len_0)] + # Pad to max_time_steps = 8
|
||||
2 * [np.zeros((1, depth), dtype=np.float32)])
|
||||
inputs = (
|
||||
[input_prob_matrix_0[t, :][np.newaxis, :] for t in range(seq_len_0)
|
||||
] + # Pad to max_time_steps = 8
|
||||
2 * [np.zeros((1, depth), dtype=np.float32)])
|
||||
|
||||
inputs = backend.variable(np.asarray(inputs).transpose((1, 0, 2)))
|
||||
|
||||
# batch_size length vector of sequence_lengths
|
||||
input_length = backend.variable(np.array([seq_len_0], dtype=np.int32))
|
||||
# batch_size length vector of negative log probabilities
|
||||
log_prob_truth = np.array([
|
||||
-3.5821197, # output beam 0
|
||||
-3.777835 # output beam 1
|
||||
], np.float32)[np.newaxis, :]
|
||||
log_prob_truth = np.array(
|
||||
[
|
||||
-3.5821197, # output beam 0
|
||||
-3.777835 # output beam 1
|
||||
],
|
||||
np.float32)[np.newaxis, :]
|
||||
|
||||
decode_truth = [
|
||||
np.array([1, 0, -1, -1, -1, -1, -1]),
|
||||
@ -1866,9 +1903,9 @@ class TestCTC(test.TestCase):
|
||||
|
||||
labels = np.asarray([[0, 1, 2, 1, 0]])
|
||||
inputs = np.asarray(
|
||||
[[[0.633766, 0.221185, 0.0917319, 0.0129757, 0.0142857, 0.0260553], [
|
||||
0.111121, 0.588392, 0.278779, 0.0055756, 0.00569609, 0.010436
|
||||
], [0.0357786, 0.633813, 0.321418, 0.00249248, 0.00272882, 0.0037688],
|
||||
[[[0.633766, 0.221185, 0.0917319, 0.0129757, 0.0142857, 0.0260553],
|
||||
[0.111121, 0.588392, 0.278779, 0.0055756, 0.00569609, 0.010436],
|
||||
[0.0357786, 0.633813, 0.321418, 0.00249248, 0.00272882, 0.0037688],
|
||||
[0.0663296, 0.643849, 0.280111, 0.00283995, 0.0035545, 0.00331533],
|
||||
[0.458235, 0.396634, 0.123377, 0.00648837, 0.00903441, 0.00623107]]
|
||||
],
|
||||
@ -1975,12 +2012,12 @@ class FunctionTest(test.TestCase):
|
||||
|
||||
x_ph = backend.placeholder(ndim=2)
|
||||
v = backend.variable(np.ones((4, 2)))
|
||||
output = x_ph ** 2 + v
|
||||
output = x_ph**2 + v
|
||||
new_v = v + x_ph
|
||||
f = backend.function(x_ph, output, updates=[(v, new_v)])
|
||||
input_val = np.random.random((4, 2))
|
||||
result = f(input_val)
|
||||
self.assertAllClose(result, input_val ** 2 + 1)
|
||||
self.assertAllClose(result, input_val**2 + 1)
|
||||
self.assertAllClose(backend.get_value(v), np.ones((4, 2)) + input_val)
|
||||
|
||||
|
||||
|
@ -118,7 +118,9 @@ def _forward_over_back_hessian(f, params, use_pfor, dtype=None):
|
||||
"""
|
||||
return _vectorize_parameters(
|
||||
functools.partial(_hvp, f, params),
|
||||
params, use_pfor=use_pfor, dtype=dtype)
|
||||
params,
|
||||
use_pfor=use_pfor,
|
||||
dtype=dtype)
|
||||
|
||||
|
||||
def _test_gradients(testcase,
|
||||
@ -173,7 +175,10 @@ class ForwardpropTest(tf.test.TestCase, parameterized.TestCase):
|
||||
2. / tf.size(v, out_type=tf.float32),
|
||||
dtype=tf.float32), v.shape))
|
||||
_test_gradients(
|
||||
self, layer, [input_value], atol=atol,
|
||||
self,
|
||||
layer,
|
||||
[input_value],
|
||||
atol=atol,
|
||||
# These are linear, so second-order is pretty boring.
|
||||
order=2)
|
||||
|
||||
@ -189,8 +194,10 @@ class ForwardpropTest(tf.test.TestCase, parameterized.TestCase):
|
||||
input_value = tf.constant(value, dtype=tf.float32)
|
||||
layer.build(input_value.shape)
|
||||
_test_gradients(
|
||||
self, functools.partial(layer, training=training), [input_value],
|
||||
order=2, atol=1e-3)
|
||||
self,
|
||||
functools.partial(layer, training=training), [input_value],
|
||||
order=2,
|
||||
atol=1e-3)
|
||||
|
||||
@parameterized.named_parameters([
|
||||
("NonFused", [[0.1], [0.2], [-0.3]],
|
||||
@ -205,8 +212,8 @@ class ForwardpropTest(tf.test.TestCase, parameterized.TestCase):
|
||||
input_value = tf.constant(value, dtype=tf.float32)
|
||||
tape.watch(input_value)
|
||||
output = layer(input_value, training=training)
|
||||
jac_back = tape.jacobian(
|
||||
output, [input_value] + layer.trainable_variables)
|
||||
jac_back = tape.jacobian(output,
|
||||
[input_value] + layer.trainable_variables)
|
||||
jac_forward = _jacfwd(
|
||||
lambda *args: layer(args[0], training=training), # pylint:disable=cell-var-from-loop
|
||||
[input_value] + layer.trainable_variables)
|
||||
@ -218,12 +225,6 @@ class ForwardpropTest(tf.test.TestCase, parameterized.TestCase):
|
||||
("NoFunction", lambda f: f)])
|
||||
def testVariablesHVP(self, decorator):
|
||||
|
||||
if tf.test.is_built_with_rocm():
|
||||
# TODO(rocm)
|
||||
# This test was recently added and has never passed on the
|
||||
# ROCm platform. Remove this skip once the test is passing again
|
||||
self.skipTest("NoFunction decorator test fails on the ROCm platform")
|
||||
|
||||
class _Model(tf.Module):
|
||||
|
||||
def __init__(self):
|
||||
@ -240,6 +241,7 @@ class ForwardpropTest(tf.test.TestCase, parameterized.TestCase):
|
||||
return self._second_dense(x)
|
||||
|
||||
model = _Model()
|
||||
|
||||
def _loss():
|
||||
input_value = tf.constant([[-0.5, 1.], [0.5, -1.]])
|
||||
target = tf.constant([[-1.], [2.]])
|
||||
@ -251,8 +253,8 @@ class ForwardpropTest(tf.test.TestCase, parameterized.TestCase):
|
||||
loss = _loss()
|
||||
vector = tape.gradient(loss, model.trainable_variables)
|
||||
variable_input_fn = lambda unused_variables: _loss()
|
||||
forward_over_back_hvp, = _hvp(
|
||||
variable_input_fn, [model.trainable_variables], [vector])
|
||||
forward_over_back_hvp, = _hvp(variable_input_fn,
|
||||
[model.trainable_variables], [vector])
|
||||
with tf.GradientTape(persistent=True) as tape:
|
||||
tape.watch(model.trainable_variables)
|
||||
loss = _loss()
|
||||
@ -260,6 +262,7 @@ class ForwardpropTest(tf.test.TestCase, parameterized.TestCase):
|
||||
back_over_back_hvp = tape.gradient(
|
||||
first_grads, model.trainable_variables, output_gradients=vector)
|
||||
return forward_over_back_hvp, back_over_back_hvp
|
||||
|
||||
self.assertAllClose(*_compute_hvps(), rtol=1e-5, atol=1e-5)
|
||||
|
||||
def testEmbeddingLayerInFunction(self):
|
||||
@ -288,9 +291,7 @@ class ForwardpropTest(tf.test.TestCase, parameterized.TestCase):
|
||||
|
||||
class HessianTests(tf.test.TestCase, parameterized.TestCase):
|
||||
|
||||
@parameterized.named_parameters(
|
||||
[("PFor", True),
|
||||
("MapFn", False)])
|
||||
@parameterized.named_parameters([("PFor", True), ("MapFn", False)])
|
||||
def testHessianOfVariables(self, use_pfor):
|
||||
model = tf.keras.layers.Dense(1)
|
||||
model.build([None, 2])
|
||||
|
@ -34,16 +34,18 @@ from tensorflow.python.platform import test
|
||||
class GlobalPoolingTest(test.TestCase, parameterized.TestCase):
|
||||
|
||||
def test_globalpooling_1d(self):
|
||||
testing_utils.layer_test(keras.layers.pooling.GlobalMaxPooling1D,
|
||||
input_shape=(3, 4, 5))
|
||||
testing_utils.layer_test(keras.layers.pooling.GlobalMaxPooling1D,
|
||||
kwargs={'data_format': 'channels_first'},
|
||||
input_shape=(3, 4, 5))
|
||||
testing_utils.layer_test(
|
||||
keras.layers.pooling.GlobalMaxPooling1D, input_shape=(3, 4, 5))
|
||||
testing_utils.layer_test(
|
||||
keras.layers.pooling.GlobalMaxPooling1D,
|
||||
kwargs={'data_format': 'channels_first'},
|
||||
input_shape=(3, 4, 5))
|
||||
testing_utils.layer_test(
|
||||
keras.layers.pooling.GlobalAveragePooling1D, input_shape=(3, 4, 5))
|
||||
testing_utils.layer_test(keras.layers.pooling.GlobalAveragePooling1D,
|
||||
kwargs={'data_format': 'channels_first'},
|
||||
input_shape=(3, 4, 5))
|
||||
testing_utils.layer_test(
|
||||
keras.layers.pooling.GlobalAveragePooling1D,
|
||||
kwargs={'data_format': 'channels_first'},
|
||||
input_shape=(3, 4, 5))
|
||||
|
||||
def test_globalpooling_1d_masking_support(self):
|
||||
model = keras.Sequential()
|
||||
@ -57,9 +59,9 @@ class GlobalPoolingTest(test.TestCase, parameterized.TestCase):
|
||||
self.assertAllClose(output[0], model_input[0, 0, :])
|
||||
|
||||
def test_globalpooling_1d_with_ragged(self):
|
||||
ragged_data = ragged_factory_ops.constant([
|
||||
[[1.0, 1.0], [2.0, 2.0], [3.0, 3.0]],
|
||||
[[1.0, 1.0], [2.0, 2.0]]], ragged_rank=1)
|
||||
ragged_data = ragged_factory_ops.constant(
|
||||
[[[1.0, 1.0], [2.0, 2.0], [3.0, 3.0]], [[1.0, 1.0], [2.0, 2.0]]],
|
||||
ragged_rank=1)
|
||||
dense_data = ragged_data.to_tensor()
|
||||
|
||||
inputs = keras.Input(shape=(None, 2), dtype='float32', ragged=True)
|
||||
@ -76,9 +78,10 @@ class GlobalPoolingTest(test.TestCase, parameterized.TestCase):
|
||||
self.assertAllEqual(output_ragged, output_dense)
|
||||
|
||||
def test_globalpooling_2d_with_ragged(self):
|
||||
ragged_data = ragged_factory_ops.constant([
|
||||
[[[1.0], [1.0]], [[2.0], [2.0]], [[3.0], [3.0]]],
|
||||
[[[1.0], [1.0]], [[2.0], [2.0]]]], ragged_rank=1)
|
||||
ragged_data = ragged_factory_ops.constant(
|
||||
[[[[1.0], [1.0]], [[2.0], [2.0]], [[3.0], [3.0]]],
|
||||
[[[1.0], [1.0]], [[2.0], [2.0]]]],
|
||||
ragged_rank=1)
|
||||
dense_data = ragged_data.to_tensor()
|
||||
|
||||
inputs = keras.Input(shape=(None, 2, 1), dtype='float32', ragged=True)
|
||||
@ -94,9 +97,10 @@ class GlobalPoolingTest(test.TestCase, parameterized.TestCase):
|
||||
self.assertAllEqual(output_ragged, output_dense)
|
||||
|
||||
def test_globalpooling_3d_with_ragged(self):
|
||||
ragged_data = ragged_factory_ops.constant([
|
||||
[[[[1.0]], [[1.0]]], [[[2.0]], [[2.0]]], [[[3.0]], [[3.0]]]],
|
||||
[[[[1.0]], [[1.0]]], [[[2.0]], [[2.0]]]]], ragged_rank=1)
|
||||
ragged_data = ragged_factory_ops.constant(
|
||||
[[[[[1.0]], [[1.0]]], [[[2.0]], [[2.0]]], [[[3.0]], [[3.0]]]],
|
||||
[[[[1.0]], [[1.0]]], [[[2.0]], [[2.0]]]]],
|
||||
ragged_rank=1)
|
||||
|
||||
inputs = keras.Input(shape=(None, 2, 1, 1), dtype='float32', ragged=True)
|
||||
out = keras.layers.GlobalAveragePooling3D()(inputs)
|
||||
@ -162,15 +166,19 @@ class Pooling2DTest(test.TestCase, parameterized.TestCase):
|
||||
def test_averagepooling_2d(self):
|
||||
testing_utils.layer_test(
|
||||
keras.layers.AveragePooling2D,
|
||||
kwargs={'strides': (2, 2),
|
||||
'padding': 'same',
|
||||
'pool_size': (2, 2)},
|
||||
kwargs={
|
||||
'strides': (2, 2),
|
||||
'padding': 'same',
|
||||
'pool_size': (2, 2)
|
||||
},
|
||||
input_shape=(3, 5, 6, 4))
|
||||
testing_utils.layer_test(
|
||||
keras.layers.AveragePooling2D,
|
||||
kwargs={'strides': (2, 2),
|
||||
'padding': 'valid',
|
||||
'pool_size': (3, 3)},
|
||||
kwargs={
|
||||
'strides': (2, 2),
|
||||
'padding': 'valid',
|
||||
'pool_size': (3, 3)
|
||||
},
|
||||
input_shape=(3, 5, 6, 4))
|
||||
|
||||
# This part of the test can only run on GPU but doesn't appear
|
||||
@ -194,14 +202,14 @@ class Pooling2DTest(test.TestCase, parameterized.TestCase):
|
||||
class Pooling3DTest(test.TestCase, parameterized.TestCase):
|
||||
|
||||
def test_maxpooling_3d(self):
|
||||
if test.is_built_with_rocm():
|
||||
self.skipTest('Pooling with 3D tensors is not supported in ROCm')
|
||||
pool_size = (3, 3, 3)
|
||||
testing_utils.layer_test(
|
||||
keras.layers.MaxPooling3D,
|
||||
kwargs={'strides': 2,
|
||||
'padding': 'valid',
|
||||
'pool_size': pool_size},
|
||||
kwargs={
|
||||
'strides': 2,
|
||||
'padding': 'valid',
|
||||
'pool_size': pool_size
|
||||
},
|
||||
input_shape=(3, 11, 12, 10, 4))
|
||||
testing_utils.layer_test(
|
||||
keras.layers.MaxPooling3D,
|
||||
@ -214,14 +222,14 @@ class Pooling3DTest(test.TestCase, parameterized.TestCase):
|
||||
input_shape=(3, 4, 11, 12, 10))
|
||||
|
||||
def test_averagepooling_3d(self):
|
||||
if test.is_built_with_rocm():
|
||||
self.skipTest('Pooling with 3D tensors is not supported in ROCm')
|
||||
pool_size = (3, 3, 3)
|
||||
testing_utils.layer_test(
|
||||
keras.layers.AveragePooling3D,
|
||||
kwargs={'strides': 2,
|
||||
'padding': 'valid',
|
||||
'pool_size': pool_size},
|
||||
kwargs={
|
||||
'strides': 2,
|
||||
'padding': 'valid',
|
||||
'pool_size': pool_size
|
||||
},
|
||||
input_shape=(3, 11, 12, 10, 4))
|
||||
testing_utils.layer_test(
|
||||
keras.layers.AveragePooling3D,
|
||||
@ -242,8 +250,10 @@ class Pooling1DTest(test.TestCase, parameterized.TestCase):
|
||||
for stride in [1, 2]:
|
||||
testing_utils.layer_test(
|
||||
keras.layers.MaxPooling1D,
|
||||
kwargs={'strides': stride,
|
||||
'padding': padding},
|
||||
kwargs={
|
||||
'strides': stride,
|
||||
'padding': padding
|
||||
},
|
||||
input_shape=(3, 5, 4))
|
||||
testing_utils.layer_test(
|
||||
keras.layers.MaxPooling1D,
|
||||
@ -255,8 +265,10 @@ class Pooling1DTest(test.TestCase, parameterized.TestCase):
|
||||
for stride in [1, 2]:
|
||||
testing_utils.layer_test(
|
||||
keras.layers.AveragePooling1D,
|
||||
kwargs={'strides': stride,
|
||||
'padding': padding},
|
||||
kwargs={
|
||||
'strides': stride,
|
||||
'padding': padding
|
||||
},
|
||||
input_shape=(3, 5, 4))
|
||||
|
||||
testing_utils.layer_test(
|
||||
|
@ -56,24 +56,36 @@ class ResizingTest(keras_parameterized.TestCase):
|
||||
expected_output_shape=(None, expected_height, expected_width,
|
||||
channels))
|
||||
|
||||
@parameterized.named_parameters(
|
||||
('down_sample_bilinear_2_by_2', {'interpolation': 'bilinear'}, 2, 2),
|
||||
('down_sample_bilinear_3_by_2', {'interpolation': 'bilinear'}, 3, 2),
|
||||
('down_sample_nearest_2_by_2', {'interpolation': 'nearest'}, 2, 2),
|
||||
('down_sample_nearest_3_by_2', {'interpolation': 'nearest'}, 3, 2),
|
||||
('down_sample_area_2_by_2', {'interpolation': 'area'}, 2, 2),
|
||||
('down_sample_area_3_by_2', {'interpolation': 'area'}, 3, 2))
|
||||
@parameterized.named_parameters(('down_sample_bilinear_2_by_2', {
|
||||
'interpolation': 'bilinear'
|
||||
}, 2, 2), ('down_sample_bilinear_3_by_2', {
|
||||
'interpolation': 'bilinear'
|
||||
}, 3, 2), ('down_sample_nearest_2_by_2', {
|
||||
'interpolation': 'nearest'
|
||||
}, 2, 2), ('down_sample_nearest_3_by_2', {
|
||||
'interpolation': 'nearest'
|
||||
}, 3, 2), ('down_sample_area_2_by_2', {
|
||||
'interpolation': 'area'
|
||||
}, 2, 2), ('down_sample_area_3_by_2', {
|
||||
'interpolation': 'area'
|
||||
}, 3, 2))
|
||||
def test_down_sampling(self, kwargs, expected_height, expected_width):
|
||||
with CustomObjectScope({'Resizing': image_preprocessing.Resizing}):
|
||||
self._run_test(kwargs, expected_height, expected_width)
|
||||
|
||||
@parameterized.named_parameters(
|
||||
('up_sample_bilinear_10_by_12', {'interpolation': 'bilinear'}, 10, 12),
|
||||
('up_sample_bilinear_12_by_12', {'interpolation': 'bilinear'}, 12, 12),
|
||||
('up_sample_nearest_10_by_12', {'interpolation': 'nearest'}, 10, 12),
|
||||
('up_sample_nearest_12_by_12', {'interpolation': 'nearest'}, 12, 12),
|
||||
('up_sample_area_10_by_12', {'interpolation': 'area'}, 10, 12),
|
||||
('up_sample_area_12_by_12', {'interpolation': 'area'}, 12, 12))
|
||||
@parameterized.named_parameters(('up_sample_bilinear_10_by_12', {
|
||||
'interpolation': 'bilinear'
|
||||
}, 10, 12), ('up_sample_bilinear_12_by_12', {
|
||||
'interpolation': 'bilinear'
|
||||
}, 12, 12), ('up_sample_nearest_10_by_12', {
|
||||
'interpolation': 'nearest'
|
||||
}, 10, 12), ('up_sample_nearest_12_by_12', {
|
||||
'interpolation': 'nearest'
|
||||
}, 12, 12), ('up_sample_area_10_by_12', {
|
||||
'interpolation': 'area'
|
||||
}, 10, 12), ('up_sample_area_12_by_12', {
|
||||
'interpolation': 'area'
|
||||
}, 12, 12))
|
||||
def test_up_sampling(self, kwargs, expected_height, expected_width):
|
||||
with CustomObjectScope({'Resizing': image_preprocessing.Resizing}):
|
||||
self._run_test(kwargs, expected_height, expected_width)
|
||||
@ -112,8 +124,9 @@ class ResizingTest(keras_parameterized.TestCase):
|
||||
expected_output = np.reshape(expected_output, (1, 4, 4, 1))
|
||||
self.assertAllEqual(expected_output, output_image)
|
||||
|
||||
@parameterized.named_parameters(
|
||||
('reshape_bilinear_10_by_4', {'interpolation': 'bilinear'}, 10, 4))
|
||||
@parameterized.named_parameters(('reshape_bilinear_10_by_4', {
|
||||
'interpolation': 'bilinear'
|
||||
}, 10, 4))
|
||||
def test_reshaping(self, kwargs, expected_height, expected_width):
|
||||
with CustomObjectScope({'Resizing': image_preprocessing.Resizing}):
|
||||
self._run_test(kwargs, expected_height, expected_width)
|
||||
@ -151,8 +164,8 @@ class CenterCropTest(keras_parameterized.TestCase):
|
||||
kwargs = {'height': expected_height, 'width': expected_width}
|
||||
input_images = np.random.random(
|
||||
(num_samples, orig_height, orig_width, channels)).astype(np.float32)
|
||||
expected_output = get_numpy_center_crop(
|
||||
input_images, expected_height, expected_width)
|
||||
expected_output = get_numpy_center_crop(input_images, expected_height,
|
||||
expected_width)
|
||||
with testing_utils.use_gpu():
|
||||
testing_utils.layer_test(
|
||||
image_preprocessing.CenterCrop,
|
||||
@ -163,31 +176,27 @@ class CenterCropTest(keras_parameterized.TestCase):
|
||||
expected_output_shape=(None, expected_height, expected_width,
|
||||
channels))
|
||||
|
||||
@parameterized.named_parameters(
|
||||
('center_crop_3_by_4', 3, 4),
|
||||
('center_crop_3_by_2', 3, 2))
|
||||
@parameterized.named_parameters(('center_crop_3_by_4', 3, 4),
|
||||
('center_crop_3_by_2', 3, 2))
|
||||
def test_center_crop_aligned(self, expected_height, expected_width):
|
||||
with CustomObjectScope({'CenterCrop': image_preprocessing.CenterCrop}):
|
||||
self._run_test(expected_height, expected_width)
|
||||
|
||||
@parameterized.named_parameters(
|
||||
('center_crop_4_by_5', 4, 5),
|
||||
('center_crop_4_by_3', 4, 3))
|
||||
@parameterized.named_parameters(('center_crop_4_by_5', 4, 5),
|
||||
('center_crop_4_by_3', 4, 3))
|
||||
def test_center_crop_mis_aligned(self, expected_height, expected_width):
|
||||
with CustomObjectScope({'CenterCrop': image_preprocessing.CenterCrop}):
|
||||
self._run_test(expected_height, expected_width)
|
||||
|
||||
@parameterized.named_parameters(
|
||||
('center_crop_4_by_6', 4, 6),
|
||||
('center_crop_3_by_2', 3, 2))
|
||||
@parameterized.named_parameters(('center_crop_4_by_6', 4, 6),
|
||||
('center_crop_3_by_2', 3, 2))
|
||||
def test_center_crop_half_mis_aligned(self, expected_height, expected_width):
|
||||
with CustomObjectScope({'CenterCrop': image_preprocessing.CenterCrop}):
|
||||
self._run_test(expected_height, expected_width)
|
||||
|
||||
@parameterized.named_parameters(
|
||||
('center_crop_5_by_12', 5, 12),
|
||||
('center_crop_10_by_8', 10, 8),
|
||||
('center_crop_10_by_12', 10, 12))
|
||||
@parameterized.named_parameters(('center_crop_5_by_12', 5, 12),
|
||||
('center_crop_10_by_8', 10, 8),
|
||||
('center_crop_10_by_12', 10, 12))
|
||||
def test_invalid_center_crop(self, expected_height, expected_width):
|
||||
with self.assertRaisesRegex(errors.InvalidArgumentError,
|
||||
r'assertion failed'):
|
||||
@ -218,28 +227,23 @@ class RandomCropTest(keras_parameterized.TestCase):
|
||||
expected_output_shape=(None, expected_height, expected_width,
|
||||
channels))
|
||||
|
||||
@parameterized.named_parameters(
|
||||
('random_crop_5_by_12', 5, 12),
|
||||
('random_crop_10_by_8', 10, 8),
|
||||
('random_crop_10_by_12', 10, 12))
|
||||
@parameterized.named_parameters(('random_crop_5_by_12', 5, 12),
|
||||
('random_crop_10_by_8', 10, 8),
|
||||
('random_crop_10_by_12', 10, 12))
|
||||
def test_invalid_random_crop(self, expected_height, expected_width):
|
||||
with self.assertRaises(errors.InvalidArgumentError):
|
||||
with CustomObjectScope({'RandomCrop': image_preprocessing.RandomCrop}):
|
||||
self._run_test(expected_height, expected_width)
|
||||
|
||||
def test_training_with_mock(self):
|
||||
if test.is_built_with_rocm():
|
||||
# TODO(rocm):
|
||||
# re-enable this test once ROCm adds support for
|
||||
# the StatefulUniformFullInt Op (on the GPU)
|
||||
self.skipTest('Feature not supported on ROCm')
|
||||
np.random.seed(1337)
|
||||
height, width = 3, 4
|
||||
height_offset = np.random.randint(low=0, high=3)
|
||||
width_offset = np.random.randint(low=0, high=5)
|
||||
mock_offset = [0, height_offset, width_offset, 0]
|
||||
with test.mock.patch.object(
|
||||
stateless_random_ops, 'stateless_random_uniform',
|
||||
stateless_random_ops,
|
||||
'stateless_random_uniform',
|
||||
return_value=mock_offset):
|
||||
with testing_utils.use_gpu():
|
||||
layer = image_preprocessing.RandomCrop(height, width)
|
||||
@ -249,15 +253,9 @@ class RandomCropTest(keras_parameterized.TestCase):
|
||||
width_offset:(width_offset + width), :]
|
||||
self.assertAllClose(expected_output, actual_output)
|
||||
|
||||
@parameterized.named_parameters(
|
||||
('random_crop_4_by_6', 4, 6),
|
||||
('random_crop_3_by_2', 3, 2))
|
||||
@parameterized.named_parameters(('random_crop_4_by_6', 4, 6),
|
||||
('random_crop_3_by_2', 3, 2))
|
||||
def test_random_crop_output_shape(self, expected_height, expected_width):
|
||||
if test.is_built_with_rocm():
|
||||
# TODO(rocm):
|
||||
# re-enable this test once ROCm adds support for
|
||||
# the StatefulUniformFullInt Op (on the GPU)
|
||||
self.skipTest('Feature not supported on ROCm')
|
||||
with CustomObjectScope({'RandomCrop': image_preprocessing.RandomCrop}):
|
||||
self._run_test(expected_height, expected_width)
|
||||
|
||||
@ -283,8 +281,7 @@ class RandomCropTest(keras_parameterized.TestCase):
|
||||
with testing_utils.use_gpu():
|
||||
layer = image_preprocessing.RandomCrop(height, width)
|
||||
actual_output = layer(inp, training=0)
|
||||
resized_inp = image_ops.resize_images_v2(
|
||||
inp, size=[5, 3])
|
||||
resized_inp = image_ops.resize_images_v2(inp, size=[5, 3])
|
||||
expected_output = resized_inp[:, 1:4, :, :]
|
||||
self.assertAllClose(expected_output, actual_output)
|
||||
|
||||
@ -310,7 +307,7 @@ class RescalingTest(keras_parameterized.TestCase):
|
||||
|
||||
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
|
||||
def test_rescaling_base(self):
|
||||
kwargs = {'scale': 1./127.5, 'offset': -1.}
|
||||
kwargs = {'scale': 1. / 127.5, 'offset': -1.}
|
||||
testing_utils.layer_test(
|
||||
image_preprocessing.Rescaling,
|
||||
kwargs=kwargs,
|
||||
@ -319,18 +316,18 @@ class RescalingTest(keras_parameterized.TestCase):
|
||||
|
||||
@testing_utils.run_v2_only
|
||||
def test_rescaling_correctness_float(self):
|
||||
layer = image_preprocessing.Rescaling(scale=1./127.5, offset=-1.)
|
||||
layer = image_preprocessing.Rescaling(scale=1. / 127.5, offset=-1.)
|
||||
inputs = random_ops.random_uniform((2, 4, 5, 3))
|
||||
outputs = layer(inputs)
|
||||
self.assertAllClose(outputs.numpy(), inputs.numpy() * (1./127.5) - 1)
|
||||
self.assertAllClose(outputs.numpy(), inputs.numpy() * (1. / 127.5) - 1)
|
||||
|
||||
@testing_utils.run_v2_only
|
||||
def test_rescaling_correctness_int(self):
|
||||
layer = image_preprocessing.Rescaling(scale=1./127.5, offset=-1)
|
||||
layer = image_preprocessing.Rescaling(scale=1. / 127.5, offset=-1)
|
||||
inputs = random_ops.random_uniform((2, 4, 5, 3), 0, 100, dtype='int32')
|
||||
outputs = layer(inputs)
|
||||
self.assertEqual(outputs.dtype.name, 'float32')
|
||||
self.assertAllClose(outputs.numpy(), inputs.numpy() * (1./127.5) - 1)
|
||||
self.assertAllClose(outputs.numpy(), inputs.numpy() * (1. / 127.5) - 1)
|
||||
|
||||
def test_config_with_custom_name(self):
|
||||
layer = image_preprocessing.Rescaling(0.5, name='rescaling')
|
||||
@ -426,11 +423,7 @@ class RandomFlipTest(keras_parameterized.TestCase):
|
||||
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
|
||||
class RandomContrastTest(keras_parameterized.TestCase):
|
||||
|
||||
def _run_test(self,
|
||||
lower,
|
||||
upper,
|
||||
expected_output=None,
|
||||
mock_random=None):
|
||||
def _run_test(self, lower, upper, expected_output=None, mock_random=None):
|
||||
np.random.seed(1337)
|
||||
num_samples = 2
|
||||
orig_height = 5
|
||||
@ -452,18 +445,16 @@ class RandomContrastTest(keras_parameterized.TestCase):
|
||||
actual_output = layer(inp, training=True)
|
||||
self.assertAllClose(expected_output, actual_output)
|
||||
|
||||
@parameterized.named_parameters(
|
||||
('random_contrast_2_by_5', 0.2, 0.5),
|
||||
('random_contrast_2_by_13', 0.2, 1.3),
|
||||
('random_contrast_5_by_2', 0.5, 0.2))
|
||||
@parameterized.named_parameters(('random_contrast_2_by_5', 0.2, 0.5),
|
||||
('random_contrast_2_by_13', 0.2, 1.3),
|
||||
('random_contrast_5_by_2', 0.5, 0.2))
|
||||
def test_random_contrast(self, lower, upper):
|
||||
with CustomObjectScope(
|
||||
{'RandomContrast': image_preprocessing.RandomContrast}):
|
||||
self._run_test(lower, upper)
|
||||
|
||||
@parameterized.named_parameters(
|
||||
('random_contrast_amplitude_2', 0.2),
|
||||
('random_contrast_amplitude_5', 0.5))
|
||||
@parameterized.named_parameters(('random_contrast_amplitude_2', 0.2),
|
||||
('random_contrast_amplitude_5', 0.5))
|
||||
def test_random_contrast_amplitude(self, amplitude):
|
||||
with CustomObjectScope(
|
||||
{'RandomContrast': image_preprocessing.RandomContrast}):
|
||||
@ -1002,8 +993,10 @@ class RandomTransformTest(keras_parameterized.TestCase):
|
||||
# pyformat: enable
|
||||
transform_matrix = np.asarray([[1., 0., 0., 0., 1., -1., 0., 0.]])
|
||||
self._run_random_transform_with_mock(
|
||||
transform_matrix, expected_output,
|
||||
mode='constant', interpolation='nearest')
|
||||
transform_matrix,
|
||||
expected_output,
|
||||
mode='constant',
|
||||
interpolation='nearest')
|
||||
|
||||
# Test up shift by 1.
|
||||
# pyformat: disable
|
||||
@ -1016,8 +1009,10 @@ class RandomTransformTest(keras_parameterized.TestCase):
|
||||
# pyformat: enable
|
||||
transform_matrix = np.asarray([[1., 0., 0., 0., 1., 1., 0., 0.]])
|
||||
self._run_random_transform_with_mock(
|
||||
transform_matrix, expected_output,
|
||||
mode='constant', interpolation='nearest')
|
||||
transform_matrix,
|
||||
expected_output,
|
||||
mode='constant',
|
||||
interpolation='nearest')
|
||||
|
||||
# Test left shift by 1.
|
||||
# pyformat: disable
|
||||
@ -1030,8 +1025,10 @@ class RandomTransformTest(keras_parameterized.TestCase):
|
||||
# pyformat: enable
|
||||
transform_matrix = np.asarray([[1., 0., 1., 0., 1., 0., 0., 0.]])
|
||||
self._run_random_transform_with_mock(
|
||||
transform_matrix, expected_output,
|
||||
mode='constant', interpolation='nearest')
|
||||
transform_matrix,
|
||||
expected_output,
|
||||
mode='constant',
|
||||
interpolation='nearest')
|
||||
|
||||
# Test right shift by 1.
|
||||
# pyformat: disable
|
||||
@ -1044,8 +1041,10 @@ class RandomTransformTest(keras_parameterized.TestCase):
|
||||
# pyformat: enable
|
||||
transform_matrix = np.asarray([[1., 0., -1., 0., 1., 0., 0., 0.]])
|
||||
self._run_random_transform_with_mock(
|
||||
transform_matrix, expected_output,
|
||||
mode='constant', interpolation='nearest')
|
||||
transform_matrix,
|
||||
expected_output,
|
||||
mode='constant',
|
||||
interpolation='nearest')
|
||||
|
||||
|
||||
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
|
||||
@ -1193,8 +1192,7 @@ class RandomZoomTest(keras_parameterized.TestCase):
|
||||
self.assertAllEqual(expected_output, output_image)
|
||||
|
||||
def test_random_zoom_inference(self):
|
||||
with CustomObjectScope(
|
||||
{'RandomZoom': image_preprocessing.RandomZoom}):
|
||||
with CustomObjectScope({'RandomZoom': image_preprocessing.RandomZoom}):
|
||||
input_images = np.random.random((2, 5, 8, 3)).astype(np.float32)
|
||||
expected_output = input_images
|
||||
with testing_utils.use_gpu():
|
||||
@ -1239,7 +1237,8 @@ class RandomHeightTest(keras_parameterized.TestCase):
|
||||
with test.mock.patch.object(
|
||||
gen_stateful_random_ops, 'stateful_uniform', return_value=mock_factor):
|
||||
with test.mock.patch.object(
|
||||
gen_stateless_random_ops_v2, 'stateless_random_uniform_v2',
|
||||
gen_stateless_random_ops_v2,
|
||||
'stateless_random_uniform_v2',
|
||||
return_value=mock_factor):
|
||||
with testing_utils.use_gpu():
|
||||
img = np.random.random((12, 5, 8, 3))
|
||||
@ -1254,8 +1253,8 @@ class RandomHeightTest(keras_parameterized.TestCase):
|
||||
layer = image_preprocessing.RandomHeight(factor=(1., 1.))
|
||||
# Return type of RandomHeight() is float32 if `interpolation` is not
|
||||
# set to `ResizeMethod.NEAREST_NEIGHBOR`; cast `layer` to desired dtype.
|
||||
output_image = math_ops.cast(layer(np.expand_dims(input_image, axis=0)),
|
||||
dtype=dtype)
|
||||
output_image = math_ops.cast(
|
||||
layer(np.expand_dims(input_image, axis=0)), dtype=dtype)
|
||||
# pyformat: disable
|
||||
expected_output = np.asarray([
|
||||
[0, 1, 2],
|
||||
@ -1333,7 +1332,8 @@ class RandomWidthTest(keras_parameterized.TestCase):
|
||||
with test.mock.patch.object(
|
||||
gen_stateful_random_ops, 'stateful_uniform', return_value=mock_factor):
|
||||
with test.mock.patch.object(
|
||||
gen_stateless_random_ops_v2, 'stateless_random_uniform_v2',
|
||||
gen_stateless_random_ops_v2,
|
||||
'stateless_random_uniform_v2',
|
||||
return_value=mock_factor):
|
||||
with testing_utils.use_gpu():
|
||||
img = np.random.random((12, 8, 5, 3))
|
||||
@ -1348,8 +1348,8 @@ class RandomWidthTest(keras_parameterized.TestCase):
|
||||
layer = image_preprocessing.RandomWidth(factor=(1., 1.))
|
||||
# Return type of RandomWidth() is float32 if `interpolation` is not
|
||||
# set to `ResizeMethod.NEAREST_NEIGHBOR`; cast `layer` to desired dtype.
|
||||
output_image = math_ops.cast(layer(np.expand_dims(input_image, axis=0)),
|
||||
dtype=dtype)
|
||||
output_image = math_ops.cast(
|
||||
layer(np.expand_dims(input_image, axis=0)), dtype=dtype)
|
||||
# pyformat: disable
|
||||
expected_output = np.asarray([
|
||||
[0, 0.25, 0.75, 1],
|
||||
|
@ -192,30 +192,25 @@ if __name__ == '__main__':
|
||||
for extra in [(), (2,), (3,)] + [(3, 2)] * (size < 10):
|
||||
for adjoint in False, True:
|
||||
shape = extra + (size, size)
|
||||
name = '%s_%s_adj_%s' % (dtype.__name__, '_'.join(map(str, shape)),
|
||||
str(adjoint))
|
||||
_AddTest(MatrixBinaryFunctorGradientTest, 'MatrixSolveGradient', name,
|
||||
_GetMatrixBinaryFunctorGradientTest(
|
||||
linalg_ops.matrix_solve, dtype, shape, adjoint=adjoint))
|
||||
name = '%s_%s_adj_%s' % (dtype.__name__, '_'.join(map(
|
||||
str, shape)), str(adjoint))
|
||||
_AddTest(
|
||||
MatrixBinaryFunctorGradientTest, 'MatrixSolveGradient', name,
|
||||
_GetMatrixBinaryFunctorGradientTest(
|
||||
linalg_ops.matrix_solve, dtype, shape, adjoint=adjoint))
|
||||
|
||||
for lower in True, False:
|
||||
name = '%s_low_%s' % (name, lower)
|
||||
if (name == 'float32_10_10_adj_False_low_True') and \
|
||||
test_lib.is_built_with_rocm():
|
||||
# Skip this one particular subtest on the ROCm platform
|
||||
# It will fail because of 1 element in 10,000 mismatch,
|
||||
# and the mismatch is minor (tolerance is 0.20, mismatch is 0,22)
|
||||
# TODO(rocm) : investigate cause of mismatch and fix
|
||||
continue
|
||||
_AddTest(MatrixBinaryFunctorGradientTest,
|
||||
'MatrixTriangularSolveGradient', name,
|
||||
_GetMatrixBinaryFunctorGradientTest(
|
||||
linalg_ops.matrix_triangular_solve,
|
||||
dtype,
|
||||
shape,
|
||||
float32_tol_fudge=4.0,
|
||||
adjoint=adjoint,
|
||||
lower=lower))
|
||||
_AddTest(
|
||||
MatrixBinaryFunctorGradientTest,
|
||||
'MatrixTriangularSolveGradient', name,
|
||||
_GetMatrixBinaryFunctorGradientTest(
|
||||
linalg_ops.matrix_triangular_solve,
|
||||
dtype,
|
||||
shape,
|
||||
float32_tol_fudge=4.0,
|
||||
adjoint=adjoint,
|
||||
lower=lower))
|
||||
|
||||
band_shape = extra + (size // 2 + 1, size)
|
||||
name = '%s_%s_adj_%s_low_%s' % (dtype.__name__, '_'.join(
|
||||
@ -239,9 +234,10 @@ if __name__ == '__main__':
|
||||
for extra in [(), (2,), (3,)] + [(3, 2)] * (size < 10):
|
||||
shape = extra + (size, size)
|
||||
name = '%s_%s' % (dtype.__name__, '_'.join(map(str, shape)))
|
||||
_AddTest(MatrixUnaryFunctorGradientTest, 'MatrixInverseGradient', name,
|
||||
_GetMatrixUnaryFunctorGradientTest(linalg_ops.matrix_inverse,
|
||||
dtype, shape))
|
||||
_AddTest(
|
||||
MatrixUnaryFunctorGradientTest, 'MatrixInverseGradient', name,
|
||||
_GetMatrixUnaryFunctorGradientTest(linalg_ops.matrix_inverse, dtype,
|
||||
shape))
|
||||
if not test_lib.is_built_with_rocm():
|
||||
# TODO(rocm) :
|
||||
# re-enable this test when upstream issues are resolved
|
||||
@ -258,8 +254,8 @@ if __name__ == '__main__':
|
||||
MatrixUnaryFunctorGradientTest, 'LogMatrixDeterminantGradient',
|
||||
name,
|
||||
_GetMatrixUnaryFunctorGradientTest(
|
||||
lambda x: linalg_ops.log_matrix_determinant(x)[1],
|
||||
dtype, shape))
|
||||
lambda x: linalg_ops.log_matrix_determinant(x)[1], dtype,
|
||||
shape))
|
||||
|
||||
# The numerical Jacobian is consistently invalid for these four shapes
|
||||
# because the matrix square root of the perturbed input doesn't exist
|
||||
@ -278,8 +274,8 @@ if __name__ == '__main__':
|
||||
for cols in 2, 5, 10:
|
||||
for l2_regularization in 1e-6, 0.001, 1.0:
|
||||
shape = (rows, cols)
|
||||
name = '%s_%s_%s' % (dtype.__name__, '_'.join(map(str, shape)),
|
||||
l2_regularization)
|
||||
name = '%s_%s_%s' % (dtype.__name__, '_'.join(map(
|
||||
str, shape)), l2_regularization)
|
||||
float32_tol_fudge = 5.1 if l2_regularization == 1e-6 else 4.0
|
||||
_AddTest(
|
||||
MatrixBinaryFunctorGradientTest,
|
||||
@ -287,10 +283,7 @@ if __name__ == '__main__':
|
||||
name,
|
||||
# pylint: disable=long-lambda,g-long-lambda
|
||||
_GetMatrixBinaryFunctorGradientTest(
|
||||
(lambda a, b, l=l2_regularization:
|
||||
linalg_ops.matrix_solve_ls(a, b, l)),
|
||||
dtype,
|
||||
shape,
|
||||
float32_tol_fudge))
|
||||
(lambda a, b, l=l2_regularization: linalg_ops.matrix_solve_ls(
|
||||
a, b, l)), dtype, shape, float32_tol_fudge))
|
||||
|
||||
test_lib.main()
|
||||
|
@ -274,9 +274,6 @@ class PoolingTest(test.TestCase):
|
||||
strides=[1, 2],
|
||||
dilation_rate=[1, 1],
|
||||
data_format="NCHW")
|
||||
if test.is_built_with_rocm():
|
||||
# Pooling with 3D tensors is not supported in ROCm
|
||||
continue
|
||||
self._test(
|
||||
input_shape=[2, 2, 7, 5, 3],
|
||||
window_shape=[2, 2, 2],
|
||||
|
@ -57,11 +57,9 @@ from tensorflow.python.ops import variable_scope
|
||||
from tensorflow.python.ops import variables
|
||||
import tensorflow.python.ops.tensor_array_grad # pylint: disable=unused-import
|
||||
from tensorflow.python.platform import googletest
|
||||
from tensorflow.python.platform import test
|
||||
from tensorflow.python.training import momentum
|
||||
from tensorflow.python.util import nest
|
||||
|
||||
|
||||
TestTuple = collections.namedtuple("TestTuple", "a b")
|
||||
SingletonTestTuple = collections.namedtuple("SingletonTestTuple", "a")
|
||||
|
||||
@ -85,7 +83,8 @@ class GroupTestCase(test_util.TensorFlowTestCase):
|
||||
c = constant_op.constant(0, name="c")
|
||||
control_flow_ops.group(a.op, b.op, c.op, name="root")
|
||||
gd = g.as_graph_def()
|
||||
self.assertProtoEquals("""
|
||||
self.assertProtoEquals(
|
||||
"""
|
||||
node { name: "a" op: "Const"}
|
||||
node { name: "b" op: "Const"}
|
||||
node { name: "c" op: "Const"}
|
||||
@ -99,7 +98,8 @@ class GroupTestCase(test_util.TensorFlowTestCase):
|
||||
b = constant_op.constant(0, name="b")
|
||||
control_flow_ops.group(a.op, b.op, name="root")
|
||||
gd = g.as_graph_def()
|
||||
self.assertProtoEquals("""
|
||||
self.assertProtoEquals(
|
||||
"""
|
||||
node { name: "a" op: "Const" device: "/task:0" }
|
||||
node { name: "b" op: "Const" device: "/task:0" }
|
||||
node { name: "root" op: "NoOp" input: "^a" input: "^b" device: "/task:0" }
|
||||
@ -116,7 +116,8 @@ class GroupTestCase(test_util.TensorFlowTestCase):
|
||||
with g.device("/task:2"):
|
||||
control_flow_ops.group(a.op, b.op, c.op, d.op, name="root")
|
||||
gd = g.as_graph_def()
|
||||
self.assertProtoEquals("""
|
||||
self.assertProtoEquals(
|
||||
"""
|
||||
node { name: "a" op: "Const" device: "/task:0"}
|
||||
node { name: "b" op: "Const" device: "/task:0"}
|
||||
node { name: "c" op: "Const" device: "/task:1"}
|
||||
@ -135,7 +136,8 @@ class GroupTestCase(test_util.TensorFlowTestCase):
|
||||
b = constant_op.constant(0, name="b")
|
||||
control_flow_ops.group([a.op, b.op], name="root")
|
||||
gd = g.as_graph_def()
|
||||
self.assertProtoEquals("""
|
||||
self.assertProtoEquals(
|
||||
"""
|
||||
node { name: "a" op: "Const"}
|
||||
node { name: "b" op: "Const"}
|
||||
node { name: "root" op: "NoOp" input: "^a" input: "^b" }
|
||||
@ -165,8 +167,7 @@ class WithDependenciesTestCase(test_util.TensorFlowTestCase):
|
||||
"my_counter", shape=[], initializer=init_ops.zeros_initializer())
|
||||
increment_counter = state_ops.assign_add(counter, 1)
|
||||
const_with_dep = control_flow_ops.with_dependencies(
|
||||
(increment_counter, constant_op.constant(42)),
|
||||
constant_op.constant(7))
|
||||
(increment_counter, constant_op.constant(42)), constant_op.constant(7))
|
||||
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
self.assertEqual(0, self.evaluate(counter))
|
||||
@ -179,8 +180,7 @@ class WithDependenciesTestCase(test_util.TensorFlowTestCase):
|
||||
"my_counter", shape=[], initializer=init_ops.zeros_initializer())
|
||||
increment_counter = state_ops.assign_add(counter, 1)
|
||||
const_with_dep = control_flow_ops.with_dependencies(
|
||||
[increment_counter, constant_op.constant(42)],
|
||||
constant_op.constant(7))
|
||||
[increment_counter, constant_op.constant(42)], constant_op.constant(7))
|
||||
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
self.assertEqual(0, self.evaluate(counter))
|
||||
@ -364,18 +364,16 @@ class CondTest(test_util.TensorFlowTestCase):
|
||||
x = constant_op.constant(2)
|
||||
y = constant_op.constant(5)
|
||||
z = control_flow_ops.cond(
|
||||
math_ops.less(
|
||||
x,
|
||||
y), lambda: math_ops.multiply(x, 17), lambda: math_ops.add(y, 23))
|
||||
math_ops.less(x, y), lambda: math_ops.multiply(x, 17),
|
||||
lambda: math_ops.add(y, 23))
|
||||
self.assertEqual(self.evaluate(z), 34)
|
||||
|
||||
def testCondFalse(self):
|
||||
x = constant_op.constant(2)
|
||||
y = constant_op.constant(1)
|
||||
z = control_flow_ops.cond(
|
||||
math_ops.less(
|
||||
x,
|
||||
y), lambda: math_ops.multiply(x, 17), lambda: math_ops.add(y, 23))
|
||||
math_ops.less(x, y), lambda: math_ops.multiply(x, 17),
|
||||
lambda: math_ops.add(y, 23))
|
||||
self.assertEqual(self.evaluate(z), 24)
|
||||
|
||||
def testCondTrueLegacy(self):
|
||||
@ -508,16 +506,18 @@ class ContextTest(test_util.TensorFlowTestCase):
|
||||
|
||||
@test_util.run_deprecated_v1
|
||||
def testControlContextImportScope(self):
|
||||
|
||||
class NoABCControlFlowContext(control_flow_ops.ControlFlowContext):
|
||||
"""A noop wrapper around `ControlFlowContext`.
|
||||
|
||||
`ControlFlowContext` is an ABC and therefore cannot be instantiated.
|
||||
"""
|
||||
|
||||
# pylint: disable=useless-super-delegation
|
||||
|
||||
def to_control_flow_context_def(self, context_def, export_scope=None):
|
||||
super(NoABCControlFlowContext, self).to_control_flow_context_def(
|
||||
context_def, export_scope)
|
||||
super(NoABCControlFlowContext,
|
||||
self).to_control_flow_context_def(context_def, export_scope)
|
||||
|
||||
with self.cached_session():
|
||||
constant_op.constant(0, name="a")
|
||||
@ -557,8 +557,8 @@ def _get_nested_shape(nested):
|
||||
|
||||
|
||||
def _create_tensor_array(size, shape):
|
||||
ta = tensor_array_ops.TensorArray(dtype=dtypes.float32, size=size,
|
||||
clear_after_read=False)
|
||||
ta = tensor_array_ops.TensorArray(
|
||||
dtype=dtypes.float32, size=size, clear_after_read=False)
|
||||
for i in range(size):
|
||||
ta = ta.write(i, array_ops.zeros(shape))
|
||||
return ta
|
||||
@ -585,30 +585,37 @@ class DataTypesTest(test_util.TensorFlowTestCase):
|
||||
else:
|
||||
self.assertAllEqual(a, b)
|
||||
|
||||
def _testShape(self, fn_true, fn_false, expected_shape,
|
||||
strict=False):
|
||||
def _testShape(self, fn_true, fn_false, expected_shape, strict=False):
|
||||
condition = array_ops.placeholder(dtypes.bool)
|
||||
output_cond = control_flow_ops.cond(condition, fn_true, fn_false,
|
||||
strict=strict)
|
||||
output_cond = control_flow_ops.cond(
|
||||
condition, fn_true, fn_false, strict=strict)
|
||||
self.assertEqual(
|
||||
_raw_nested_shape(_get_nested_shape(output_cond)),
|
||||
_raw_nested_shape(expected_shape))
|
||||
|
||||
output_case = control_flow_ops.case([(condition, fn_true)], fn_false,
|
||||
output_case = control_flow_ops.case([(condition, fn_true)],
|
||||
fn_false,
|
||||
strict=strict)
|
||||
self.assertEqual(
|
||||
_raw_nested_shape(_get_nested_shape(output_case)),
|
||||
_raw_nested_shape(expected_shape))
|
||||
|
||||
def _testReturnValues(self, fn_true, fn_false, expected_value_true,
|
||||
expected_value_false, strict=False,
|
||||
check_cond=True, feed_dict=None):
|
||||
if feed_dict is None: feed_dict = {}
|
||||
def _testReturnValues(self,
|
||||
fn_true,
|
||||
fn_false,
|
||||
expected_value_true,
|
||||
expected_value_false,
|
||||
strict=False,
|
||||
check_cond=True,
|
||||
feed_dict=None):
|
||||
if feed_dict is None:
|
||||
feed_dict = {}
|
||||
|
||||
condition = array_ops.placeholder(dtypes.bool)
|
||||
output_cond = control_flow_ops.cond(condition, fn_true, fn_false,
|
||||
strict=strict)
|
||||
output_case = control_flow_ops.case([(condition, fn_true)], fn_false,
|
||||
output_cond = control_flow_ops.cond(
|
||||
condition, fn_true, fn_false, strict=strict)
|
||||
output_case = control_flow_ops.case([(condition, fn_true)],
|
||||
fn_false,
|
||||
strict=strict)
|
||||
|
||||
with self.cached_session() as sess:
|
||||
@ -650,8 +657,12 @@ class DataTypesTest(test_util.TensorFlowTestCase):
|
||||
def test_noop(self):
|
||||
shape = tensor_shape.TensorShape(None)
|
||||
self._testShape(control_flow_ops.no_op, control_flow_ops.no_op, shape)
|
||||
self._testReturnValues(control_flow_ops.no_op, control_flow_ops.no_op,
|
||||
True, False, check_cond=False)
|
||||
self._testReturnValues(
|
||||
control_flow_ops.no_op,
|
||||
control_flow_ops.no_op,
|
||||
True,
|
||||
False,
|
||||
check_cond=False)
|
||||
|
||||
@test_util.run_deprecated_v1
|
||||
def test_string(self):
|
||||
@ -686,22 +697,24 @@ class DataTypesTest(test_util.TensorFlowTestCase):
|
||||
def _build_true_branch(dtype):
|
||||
|
||||
def _build():
|
||||
return (array_ops.zeros([2, 2], dtype=dtype),
|
||||
array_ops.ones([3, 3], dtype=dtype))
|
||||
return (array_ops.zeros([2, 2],
|
||||
dtype=dtype), array_ops.ones([3, 3],
|
||||
dtype=dtype))
|
||||
|
||||
return _build
|
||||
|
||||
def _build_false_branch(dtype):
|
||||
|
||||
def _build():
|
||||
return (array_ops.ones([2, 2], dtype=dtype),
|
||||
array_ops.zeros([3, 3], dtype=dtype))
|
||||
return (array_ops.ones([2, 2],
|
||||
dtype=dtype), array_ops.zeros([3, 3],
|
||||
dtype=dtype))
|
||||
|
||||
return _build
|
||||
|
||||
for dtype in (dtypes.float16, dtypes.int8, dtypes.int32, dtypes.uint8):
|
||||
shape = (tensor_shape.TensorShape([2, 2]),
|
||||
tensor_shape.TensorShape([3, 3]))
|
||||
shape = (tensor_shape.TensorShape([2,
|
||||
2]), tensor_shape.TensorShape([3, 3]))
|
||||
fn_true = _build_true_branch(dtype)
|
||||
fn_false = _build_false_branch(dtype)
|
||||
self._testShape(fn_true, fn_false, shape)
|
||||
@ -733,27 +746,36 @@ class DataTypesTest(test_util.TensorFlowTestCase):
|
||||
fn_true, true_tensor = _build_true_branch(dtype)
|
||||
fn_false, false_tensor = _build_false_branch(dtype)
|
||||
self._testShape(fn_true, fn_false, shape)
|
||||
self._testReturnValues(fn_true, fn_false,
|
||||
np.zeros([2, 2]), np.ones([2, 2]),
|
||||
feed_dict={true_tensor: np.zeros([2, 2]),
|
||||
false_tensor: np.ones([2, 2])})
|
||||
self._testReturnValues(
|
||||
fn_true,
|
||||
fn_false,
|
||||
np.zeros([2, 2]),
|
||||
np.ones([2, 2]),
|
||||
feed_dict={
|
||||
true_tensor: np.zeros([2, 2]),
|
||||
false_tensor: np.ones([2, 2])
|
||||
})
|
||||
|
||||
@test_util.run_deprecated_v1
|
||||
def test_sparse_tensors(self):
|
||||
shape = tensor_shape.TensorShape([None, None])
|
||||
|
||||
def true_fn():
|
||||
return [sparse_tensor.SparseTensor(indices=[[0, 0], [1, 2]],
|
||||
values=[1, 2], dense_shape=[3, 4])]
|
||||
return [
|
||||
sparse_tensor.SparseTensor(
|
||||
indices=[[0, 0], [1, 2]], values=[1, 2], dense_shape=[3, 4])
|
||||
]
|
||||
|
||||
def false_fn():
|
||||
return [sparse_tensor.SparseTensor(indices=[[0, 0], [2, 1]],
|
||||
values=[3, 4], dense_shape=[3, 4])]
|
||||
return [
|
||||
sparse_tensor.SparseTensor(
|
||||
indices=[[0, 0], [2, 1]], values=[3, 4], dense_shape=[3, 4])
|
||||
]
|
||||
|
||||
value1 = sparse_tensor.SparseTensorValue(indices=[[0, 0], [1, 2]],
|
||||
values=[1, 2], dense_shape=[3, 4])
|
||||
value2 = sparse_tensor.SparseTensorValue(indices=[[0, 0], [2, 1]],
|
||||
values=[3, 4], dense_shape=[3, 4])
|
||||
value1 = sparse_tensor.SparseTensorValue(
|
||||
indices=[[0, 0], [1, 2]], values=[1, 2], dense_shape=[3, 4])
|
||||
value2 = sparse_tensor.SparseTensorValue(
|
||||
indices=[[0, 0], [2, 1]], values=[3, 4], dense_shape=[3, 4])
|
||||
# Non-strict cond is only available in v1
|
||||
if not tf2.enabled():
|
||||
self._testShape(true_fn, false_fn, shape)
|
||||
@ -775,21 +797,24 @@ class DataTypesTest(test_util.TensorFlowTestCase):
|
||||
return _build, (a, b, c)
|
||||
|
||||
for dtype in (dtypes.float16, dtypes.int8, dtypes.int32, dtypes.uint8):
|
||||
shape = (tensor_shape.TensorShape([None, 2]),
|
||||
tensor_shape.TensorShape([None]),
|
||||
shape = (tensor_shape.TensorShape([None,
|
||||
2]), tensor_shape.TensorShape([None]),
|
||||
tensor_shape.TensorShape([3, None]))
|
||||
fn_true, true_tensors = _build_branch(dtype, shape)
|
||||
fn_false, false_tensors = _build_branch(dtype, shape)
|
||||
self._testShape(fn_true, fn_false, shape)
|
||||
self._testReturnValues(fn_true, fn_false,
|
||||
(np.zeros([2, 2]), np.zeros(5), np.ones([3, 3])),
|
||||
(np.zeros([2, 2]), np.zeros(5), np.ones([3, 3])),
|
||||
feed_dict={true_tensors[0]: np.zeros([2, 2]),
|
||||
false_tensors[0]: np.zeros([2, 2]),
|
||||
true_tensors[1]: np.zeros([5]),
|
||||
false_tensors[1]: np.zeros([5]),
|
||||
true_tensors[2]: np.ones([3, 3]),
|
||||
false_tensors[2]: np.ones([3, 3])})
|
||||
self._testReturnValues(
|
||||
fn_true,
|
||||
fn_false, (np.zeros([2, 2]), np.zeros(5), np.ones([3, 3])),
|
||||
(np.zeros([2, 2]), np.zeros(5), np.ones([3, 3])),
|
||||
feed_dict={
|
||||
true_tensors[0]: np.zeros([2, 2]),
|
||||
false_tensors[0]: np.zeros([2, 2]),
|
||||
true_tensors[1]: np.zeros([5]),
|
||||
false_tensors[1]: np.zeros([5]),
|
||||
true_tensors[2]: np.ones([3, 3]),
|
||||
false_tensors[2]: np.ones([3, 3])
|
||||
})
|
||||
|
||||
@test_util.run_deprecated_v1
|
||||
def test_tensor_arrays(self):
|
||||
@ -811,8 +836,11 @@ class DataTypesTest(test_util.TensorFlowTestCase):
|
||||
|
||||
@test_util.run_v1_only("b/138741991")
|
||||
def test_list(self):
|
||||
shape = [tensor_shape.TensorShape([]), tensor_shape.TensorShape([]),
|
||||
tensor_shape.TensorShape([])]
|
||||
shape = [
|
||||
tensor_shape.TensorShape([]),
|
||||
tensor_shape.TensorShape([]),
|
||||
tensor_shape.TensorShape([])
|
||||
]
|
||||
fn_true = lambda: [constant_op.constant(1), 2, variables.Variable(3.0)]
|
||||
fn_false = lambda: [constant_op.constant(3), 4, variables.Variable(5.0)]
|
||||
self._testShape(fn_true, fn_false, shape)
|
||||
@ -838,19 +866,21 @@ class DataTypesTest(test_util.TensorFlowTestCase):
|
||||
fn_tuple = lambda: (constant_op.constant(3),)
|
||||
|
||||
with self.assertRaises(ValueError):
|
||||
control_flow_ops.cond(constant_op.constant(True), fn_tensor, fn_list,
|
||||
strict=True)
|
||||
control_flow_ops.cond(
|
||||
constant_op.constant(True), fn_tensor, fn_list, strict=True)
|
||||
|
||||
with self.assertRaises(TypeError):
|
||||
control_flow_ops.cond(constant_op.constant(True), fn_list, fn_tuple,
|
||||
strict=True)
|
||||
control_flow_ops.cond(
|
||||
constant_op.constant(True), fn_list, fn_tuple, strict=True)
|
||||
|
||||
with self.assertRaises(ValueError):
|
||||
control_flow_ops.case([(constant_op.constant(True), fn_tensor)], fn_list,
|
||||
control_flow_ops.case([(constant_op.constant(True), fn_tensor)],
|
||||
fn_list,
|
||||
strict=True)
|
||||
|
||||
with self.assertRaises(TypeError):
|
||||
control_flow_ops.case([(constant_op.constant(True), fn_list)], fn_tuple,
|
||||
control_flow_ops.case([(constant_op.constant(True), fn_list)],
|
||||
fn_tuple,
|
||||
strict=True)
|
||||
|
||||
@test_util.run_deprecated_v1
|
||||
@ -875,8 +905,7 @@ class DataTypesTest(test_util.TensorFlowTestCase):
|
||||
self._testShape(fn_true, fn_false, shape)
|
||||
self._testReturnValues(fn_true, fn_false, 1, 3)
|
||||
self._testShape(fn_true, fn_false, (shape,), strict=True)
|
||||
self._testReturnValues(fn_true, fn_false, (1,), (3,),
|
||||
strict=True)
|
||||
self._testReturnValues(fn_true, fn_false, (1,), (3,), strict=True)
|
||||
|
||||
@test_util.run_deprecated_v1
|
||||
def test_singleton_namedtuple(self):
|
||||
@ -887,10 +916,13 @@ class DataTypesTest(test_util.TensorFlowTestCase):
|
||||
if not tf2.enabled():
|
||||
self._testShape(fn_true, fn_false, shape)
|
||||
self._testReturnValues(fn_true, fn_false, 1, 3)
|
||||
self._testShape(fn_true, fn_false, SingletonTestTuple(shape),
|
||||
strict=True)
|
||||
self._testReturnValues(fn_true, fn_false, SingletonTestTuple(1),
|
||||
SingletonTestTuple(3), strict=True)
|
||||
self._testShape(fn_true, fn_false, SingletonTestTuple(shape), strict=True)
|
||||
self._testReturnValues(
|
||||
fn_true,
|
||||
fn_false,
|
||||
SingletonTestTuple(1),
|
||||
SingletonTestTuple(3),
|
||||
strict=True)
|
||||
|
||||
@test_util.run_deprecated_v1
|
||||
def test_tuple(self):
|
||||
@ -902,8 +934,8 @@ class DataTypesTest(test_util.TensorFlowTestCase):
|
||||
|
||||
@test_util.run_deprecated_v1
|
||||
def test_namedtuple(self):
|
||||
shape = TestTuple(tensor_shape.TensorShape([]),
|
||||
tensor_shape.TensorShape([]))
|
||||
shape = TestTuple(
|
||||
tensor_shape.TensorShape([]), tensor_shape.TensorShape([]))
|
||||
fn_true = lambda: TestTuple(constant_op.constant(1), 2)
|
||||
fn_false = lambda: TestTuple(constant_op.constant(3), 4)
|
||||
self._testShape(fn_true, fn_false, shape)
|
||||
@ -911,22 +943,29 @@ class DataTypesTest(test_util.TensorFlowTestCase):
|
||||
|
||||
@test_util.run_deprecated_v1
|
||||
def test_nested(self):
|
||||
shape = [tensor_shape.TensorShape([]),
|
||||
TestTuple(tensor_shape.TensorShape([]),
|
||||
[tensor_shape.TensorShape([]),
|
||||
tensor_shape.TensorShape([])]),
|
||||
tensor_shape.TensorShape([5, 5]),
|
||||
tensor_shape.TensorShape([])]
|
||||
shape = [
|
||||
tensor_shape.TensorShape([]),
|
||||
TestTuple(
|
||||
tensor_shape.TensorShape([]),
|
||||
[tensor_shape.TensorShape([]),
|
||||
tensor_shape.TensorShape([])]),
|
||||
tensor_shape.TensorShape([5, 5]),
|
||||
tensor_shape.TensorShape([])
|
||||
]
|
||||
|
||||
def true_fn():
|
||||
return [constant_op.constant(1),
|
||||
TestTuple(constant_op.constant(2), [3, 4]),
|
||||
array_ops.zeros([5, 5]), 6]
|
||||
return [
|
||||
constant_op.constant(1),
|
||||
TestTuple(constant_op.constant(2), [3, 4]),
|
||||
array_ops.zeros([5, 5]), 6
|
||||
]
|
||||
|
||||
def false_fn():
|
||||
return [constant_op.constant(11),
|
||||
TestTuple(constant_op.constant(12), [13, 14]),
|
||||
array_ops.ones([5, 5]), 16]
|
||||
return [
|
||||
constant_op.constant(11),
|
||||
TestTuple(constant_op.constant(12), [13, 14]),
|
||||
array_ops.ones([5, 5]), 16
|
||||
]
|
||||
|
||||
self._testShape(true_fn, false_fn, shape)
|
||||
self._testReturnValues(
|
||||
@ -940,10 +979,10 @@ class DataTypesTest(test_util.TensorFlowTestCase):
|
||||
|
||||
def body(i, matrix):
|
||||
result_tuple, unused_matrix = control_flow_ops.cond(
|
||||
constant_op.constant(True),
|
||||
lambda: (TestTuple(matrix * 2, matrix * 4), matrix),
|
||||
lambda: (TestTuple(matrix * 4, matrix * 2), matrix))
|
||||
return [i+1, result_tuple.a]
|
||||
constant_op.constant(True), lambda:
|
||||
(TestTuple(matrix * 2, matrix * 4), matrix), lambda:
|
||||
(TestTuple(matrix * 4, matrix * 2), matrix))
|
||||
return [i + 1, result_tuple.a]
|
||||
|
||||
iteration, matrix = control_flow_ops.while_loop(
|
||||
lambda i, matrix: i < 10,
|
||||
@ -1113,9 +1152,6 @@ class IndexedCaseTest(test_util.TensorFlowTestCase, parameterized.TestCase):
|
||||
"""Verify disjoint branches across while iterations are run in parallel."""
|
||||
if control_flow_v2_toggles.control_flow_v2_enabled():
|
||||
self.skipTest("b/138870290")
|
||||
if test.is_built_with_rocm():
|
||||
self.skipTest(
|
||||
"Disable subtest on ROCm due to missing Cholesky op support")
|
||||
|
||||
with ops.Graph().as_default() as g:
|
||||
nbranches = 7
|
||||
@ -1124,16 +1160,20 @@ class IndexedCaseTest(test_util.TensorFlowTestCase, parameterized.TestCase):
|
||||
random_ops.random_uniform([nbranches, 8, 512]) + 1e-3))
|
||||
|
||||
def make_branch(i, mat, name):
|
||||
|
||||
def branch_fn():
|
||||
next_i = i + 1
|
||||
with ops.device("gpu:0"):
|
||||
return next_i, math_ops.reduce_sum(
|
||||
linalg_ops.cholesky(mat, name=name + "_Cholesky"))
|
||||
|
||||
return branch_fn
|
||||
|
||||
def make_branches(i):
|
||||
return [make_branch(i, matrices[bi], "br{}".format(bi))
|
||||
for bi in range(nbranches)]
|
||||
return [
|
||||
make_branch(i, matrices[bi], "br{}".format(bi))
|
||||
for bi in range(nbranches)
|
||||
]
|
||||
|
||||
def cond(i, _):
|
||||
return i < nbranches
|
||||
@ -1163,9 +1203,7 @@ class IndexedCaseTest(test_util.TensorFlowTestCase, parameterized.TestCase):
|
||||
self.assertLen(chol_node_stats, nbranches)
|
||||
|
||||
chol_node_stats = sorted(chol_node_stats, key=lambda stats: stats.node_name)
|
||||
op_start_nanos = [
|
||||
stats.all_start_nanos for stats in chol_node_stats
|
||||
]
|
||||
op_start_nanos = [stats.all_start_nanos for stats in chol_node_stats]
|
||||
op_end_nanos = [
|
||||
stats.all_start_nanos + stats.op_end_rel_nanos
|
||||
for stats in chol_node_stats
|
||||
@ -1494,20 +1532,26 @@ class WhileLoopTestCase(test_util.TensorFlowTestCase):
|
||||
@test_util.enable_control_flow_v2
|
||||
@test_util.run_in_graph_and_eager_modes
|
||||
def testSkipsUnnecessaryCaptureGradients(self):
|
||||
|
||||
@custom_gradient.custom_gradient
|
||||
def gradient_trap(t):
|
||||
|
||||
def grad(w):
|
||||
# Computing this gradient should fail the test
|
||||
check_ops.assert_equal(0, 1)
|
||||
return w
|
||||
|
||||
return t, grad
|
||||
|
||||
x = array_ops.constant(0.0, name="x")
|
||||
y = array_ops.constant(1.0, name="y")
|
||||
|
||||
def cond(s):
|
||||
return s < 10.0
|
||||
|
||||
def body(s):
|
||||
return s + 2*x + gradient_trap(y)
|
||||
return s + 2 * x + gradient_trap(y)
|
||||
|
||||
with backprop.GradientTape() as tape:
|
||||
tape.watch(x)
|
||||
out = control_flow_ops.while_loop(cond, body, (array_ops.constant(0.0),))
|
||||
@ -1548,5 +1592,6 @@ class AssertTest(test_util.TensorFlowTestCase):
|
||||
|
||||
self.assertAllEqual(whiny(True), 5)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
googletest.main()
|
||||
|
@ -47,10 +47,7 @@ class InitializersTest(test.TestCase):
|
||||
self.assertEqual(tensor_shape.as_shape(shape), t2.shape)
|
||||
self.assertEqual(assertion, np.allclose(t1, t2, rtol=1e-15, atol=1e-15))
|
||||
|
||||
def _duplicated_test(self,
|
||||
init,
|
||||
shape=None,
|
||||
dtype=dtypes.float32):
|
||||
def _duplicated_test(self, init, shape=None, dtype=dtypes.float32):
|
||||
if shape is None:
|
||||
shape = [100]
|
||||
t1 = self.evaluate(init(shape, dtype))
|
||||
@ -98,8 +95,8 @@ class ConstantInitializersTest(InitializersTest):
|
||||
|
||||
@test_util.run_in_graph_and_eager_modes
|
||||
def testZeros(self):
|
||||
self._range_test(init_ops_v2.Zeros(), shape=(4, 5),
|
||||
target_mean=0., target_max=0.)
|
||||
self._range_test(
|
||||
init_ops_v2.Zeros(), shape=(4, 5), target_mean=0., target_max=0.)
|
||||
|
||||
@test_util.run_in_graph_and_eager_modes
|
||||
def testZerosPartition(self):
|
||||
@ -115,8 +112,8 @@ class ConstantInitializersTest(InitializersTest):
|
||||
|
||||
@test_util.run_in_graph_and_eager_modes
|
||||
def testOnes(self):
|
||||
self._range_test(init_ops_v2.Ones(), shape=(4, 5),
|
||||
target_mean=1., target_max=1.)
|
||||
self._range_test(
|
||||
init_ops_v2.Ones(), shape=(4, 5), target_mean=1., target_max=1.)
|
||||
|
||||
@test_util.run_in_graph_and_eager_modes
|
||||
def testOnesPartition(self):
|
||||
@ -176,15 +173,13 @@ class ConstantInitializersTest(InitializersTest):
|
||||
|
||||
self._testNDimConstantInitializer(value, shape, expected)
|
||||
self._testNDimConstantInitializer(np.asarray(value), shape, expected)
|
||||
self._testNDimConstantInitializer(np.asarray(value).reshape(tuple(shape)),
|
||||
shape, expected)
|
||||
self._testNDimConstantInitializer(
|
||||
np.asarray(value).reshape(tuple(shape)), shape, expected)
|
||||
|
||||
def _testNDimConstantInitializerIncorrectNumberValues(self, value, shape):
|
||||
with test_util.use_gpu():
|
||||
init = init_ops_v2.constant_initializer(value)
|
||||
self.assertRaises(TypeError,
|
||||
init,
|
||||
shape=shape)
|
||||
self.assertRaises(TypeError, init, shape=shape)
|
||||
|
||||
@test_util.run_in_graph_and_eager_modes
|
||||
def testNDimConstantInitializerIncorrectNumberValues(self):
|
||||
@ -192,8 +187,8 @@ class ConstantInitializersTest(InitializersTest):
|
||||
|
||||
for shape in [[2, 4], [2, 2]]:
|
||||
self._testNDimConstantInitializerIncorrectNumberValues(value, shape)
|
||||
self._testNDimConstantInitializerIncorrectNumberValues(np.asarray(value),
|
||||
shape)
|
||||
self._testNDimConstantInitializerIncorrectNumberValues(
|
||||
np.asarray(value), shape)
|
||||
self._testNDimConstantInitializerIncorrectNumberValues(
|
||||
np.asarray(value).reshape(tuple([2, 3])), shape)
|
||||
|
||||
@ -351,8 +346,7 @@ class VarianceScalingInitializerTest(InitializersTest):
|
||||
shape = [100, 100]
|
||||
expect_mean = 0.
|
||||
expect_var = 1. / shape[0]
|
||||
init = init_ops_v2.VarianceScaling(
|
||||
distribution="untruncated_normal")
|
||||
init = init_ops_v2.VarianceScaling(distribution="untruncated_normal")
|
||||
|
||||
with test_util.use_gpu(), test.mock.patch.object(
|
||||
random_ops, "random_normal",
|
||||
@ -399,8 +393,8 @@ class OrthogonalInitializerTest(InitializersTest):
|
||||
|
||||
@test_util.run_in_graph_and_eager_modes
|
||||
def testRangeInitializer(self):
|
||||
self._range_test(init_ops_v2.Orthogonal(seed=123), shape=(20, 20),
|
||||
target_mean=0.)
|
||||
self._range_test(
|
||||
init_ops_v2.Orthogonal(seed=123), shape=(20, 20), target_mean=0.)
|
||||
|
||||
@test_util.run_in_graph_and_eager_modes
|
||||
def testInitializerIdentical(self):
|
||||
@ -443,10 +437,6 @@ class OrthogonalInitializerTest(InitializersTest):
|
||||
|
||||
@test_util.run_in_graph_and_eager_modes
|
||||
def testShapesValues(self):
|
||||
|
||||
if test.is_built_with_rocm():
|
||||
self.skipTest("Disable subtest on ROCm due to missing QR op support")
|
||||
|
||||
for shape in [(10, 10), (10, 9, 8), (100, 5, 5), (50, 40), (40, 50)]:
|
||||
init = init_ops_v2.Orthogonal()
|
||||
tol = 1e-5
|
||||
@ -518,11 +508,12 @@ class IdentityInitializerTest(InitializersTest):
|
||||
init_default = init_ops_v2.Identity()
|
||||
init_custom = init_ops_v2.Identity(gain=0.9)
|
||||
with test_util.use_gpu():
|
||||
self.assertAllClose(self.evaluate(init_default(shape, dtype=dtype)),
|
||||
np.eye(*shape))
|
||||
self.assertAllClose(
|
||||
self.evaluate(init_default(shape, dtype=dtype)), np.eye(*shape))
|
||||
with test_util.use_gpu():
|
||||
self.assertAllClose(self.evaluate(init_custom(shape, dtype=dtype)),
|
||||
np.eye(*shape) * 0.9)
|
||||
self.assertAllClose(
|
||||
self.evaluate(init_custom(shape, dtype=dtype)),
|
||||
np.eye(*shape) * 0.9)
|
||||
|
||||
@test_util.run_in_graph_and_eager_modes
|
||||
def testPartition(self):
|
||||
@ -577,10 +568,7 @@ class MethodInitializers(InitializersTest):
|
||||
fan_in, _ = init_ops_v2._compute_fans(shape)
|
||||
std = np.sqrt(2. / fan_in)
|
||||
self._range_test(
|
||||
init_ops_v2.he_uniform(seed=123),
|
||||
shape,
|
||||
target_mean=0.,
|
||||
target_std=std)
|
||||
init_ops_v2.he_uniform(seed=123), shape, target_mean=0., target_std=std)
|
||||
|
||||
@test_util.run_in_graph_and_eager_modes
|
||||
def testLecunNormal(self):
|
||||
@ -599,10 +587,7 @@ class MethodInitializers(InitializersTest):
|
||||
fan_in, _ = init_ops_v2._compute_fans(shape)
|
||||
std = np.sqrt(2. / fan_in)
|
||||
self._range_test(
|
||||
init_ops_v2.he_normal(seed=123),
|
||||
shape,
|
||||
target_mean=0.,
|
||||
target_std=std)
|
||||
init_ops_v2.he_normal(seed=123), shape, target_mean=0., target_std=std)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
Loading…
Reference in New Issue
Block a user