Fix Conv3DBackpropFilterOp with int64 input_sizes on GPU.
Re-enable the test that was failing before. PiperOrigin-RevId: 236230029
This commit is contained in:
parent
e91d746e0b
commit
f86747fe82
@ -1145,8 +1145,7 @@ class Conv3DBackpropInputOp<GPUDevice, T> : public OpKernel {
|
||||
TensorShape input_shape;
|
||||
if (takes_shape_) {
|
||||
const Tensor& input_sizes = context->input(0);
|
||||
OP_REQUIRES_OK(context, TensorShapeUtils::MakeShape(
|
||||
input_sizes.vec<int32>(), &input_shape));
|
||||
OP_REQUIRES_OK(context, MakeShape(input_sizes, &input_shape));
|
||||
} else {
|
||||
input_shape = context->input(0).shape();
|
||||
}
|
||||
@ -1530,8 +1529,7 @@ class Conv3DBackpropFilterOp<GPUDevice, T> : public OpKernel {
|
||||
TensorShape filter_shape;
|
||||
if (takes_shape_) {
|
||||
const Tensor& filter_sizes = context->input(1);
|
||||
OP_REQUIRES_OK(context, TensorShapeUtils::MakeShape(
|
||||
filter_sizes.vec<int32>(), &filter_shape));
|
||||
OP_REQUIRES_OK(context, MakeShape(filter_sizes, &filter_shape));
|
||||
} else {
|
||||
filter_shape = context->input(1).shape();
|
||||
}
|
||||
|
@ -135,19 +135,20 @@ class Conv3DTransposeTest(test.TestCase):
|
||||
|
||||
def testConv3DTransposeOutputShapeType(self):
|
||||
# Test case for GitHub issue 18887
|
||||
for dtype in [dtypes.int32]: # b/126733996 fails with dtypes.int64 in tf2
|
||||
x_shape = [2, 5, 6, 4, 3]
|
||||
y_shape = [2, 5, 6, 4, 2]
|
||||
f_shape = [3, 3, 3, 2, 3]
|
||||
strides = [1, 1, 1, 1, 1]
|
||||
x_value = constant_op.constant(
|
||||
1.0, shape=x_shape, name="x", dtype=dtypes.float32)
|
||||
f_value = constant_op.constant(
|
||||
1.0, shape=f_shape, name="filter", dtype=dtypes.float32)
|
||||
output = nn_ops.conv3d_transpose(
|
||||
x_value, f_value, constant_op.constant(y_shape, dtype=dtype),
|
||||
strides=strides, padding="SAME")
|
||||
self.evaluate(output)
|
||||
for dtype in [dtypes.int32, dtypes.int64]:
|
||||
with self.cached_session():
|
||||
x_shape = [2, 5, 6, 4, 3]
|
||||
y_shape = [2, 5, 6, 4, 2]
|
||||
f_shape = [3, 3, 3, 2, 3]
|
||||
strides = [1, 1, 1, 1, 1]
|
||||
x_value = constant_op.constant(
|
||||
1.0, shape=x_shape, name="x", dtype=dtypes.float32)
|
||||
f_value = constant_op.constant(
|
||||
1.0, shape=f_shape, name="filter", dtype=dtypes.float32)
|
||||
output = nn_ops.conv3d_transpose(
|
||||
x_value, f_value, constant_op.constant(y_shape, dtype=dtype),
|
||||
strides=strides, padding="SAME")
|
||||
self.evaluate(output)
|
||||
|
||||
def testConv3DTransposeValid(self):
|
||||
with self.cached_session():
|
||||
|
@ -475,7 +475,6 @@ class FunctionalOpsTest(test.TestCase):
|
||||
mul = self.evaluate(remote_op)
|
||||
self.assertEqual(mul, [6])
|
||||
|
||||
@test_util.run_deprecated_v1
|
||||
def testRemoteFunctionCPUGPU(self):
|
||||
if not test_util.is_gpu_available():
|
||||
self.skipTest("No GPU available")
|
||||
@ -500,7 +499,6 @@ class FunctionalOpsTest(test.TestCase):
|
||||
mul = self.evaluate(remote_op)
|
||||
self.assertEqual(mul, 9.0)
|
||||
|
||||
@test_util.run_deprecated_v1
|
||||
def testRemoteFunctionGPUCPU(self):
|
||||
if not test_util.is_gpu_available():
|
||||
self.skipTest("No GPU available")
|
||||
@ -525,7 +523,6 @@ class FunctionalOpsTest(test.TestCase):
|
||||
mul = self.evaluate(remote_op)
|
||||
self.assertEqual(mul, 9.0)
|
||||
|
||||
@test_util.run_deprecated_v1
|
||||
def testRemoteFunctionGPUCPUStrings(self):
|
||||
if not test_util.is_gpu_available():
|
||||
self.skipTest("No GPU available")
|
||||
@ -984,7 +981,6 @@ class PartitionedCallTest(test.TestCase):
|
||||
constant_op.constant(2.)], f=Body)
|
||||
self.assertEqual(output.eval(), 12.)
|
||||
|
||||
@test_util.run_deprecated_v1
|
||||
def testBasicMultiDeviceGPU(self):
|
||||
if not test_util.is_gpu_available():
|
||||
return
|
||||
@ -1065,7 +1061,6 @@ class PartitionedCallTest(test.TestCase):
|
||||
value = self.evaluate(v.read_value())
|
||||
self.assertEqual(value, 2.0)
|
||||
|
||||
@test_util.run_deprecated_v1
|
||||
def testFunctionWithResourcesOnDifferentDevices(self):
|
||||
if not test_util.is_gpu_available():
|
||||
self.skipTest("No GPUs available.")
|
||||
|
Loading…
Reference in New Issue
Block a user