Clean up error message differences between XLA and TF for convolutions

1. The Conv2DTest.testOpEdgeCases test was enabled, and the two places where
     the XLA and TF error messages were different now check for different error
     messages.

  2. Change TF to return an Unimplemented error instead of an InvalidArgument
     error for unsupported convolution configurations.  This is what XLA does and
     seems more logical.

  3. Change the tests to use placeholders correctly as otherwise the XLA version
     throws an exception because of the missing feeds.

PiperOrigin-RevId: 324338945
Change-Id: Ib6999c31918987cc1db9c0ba220e229097157747
This commit is contained in:
Sanjoy Das 2020-07-31 19:12:08 -07:00 committed by TensorFlower Gardener
parent d033ed5ee8
commit 71bb46a15f
4 changed files with 116 additions and 53 deletions

View File

@ -203,6 +203,10 @@ xla::StatusOr<ConvOpAttrs> ConvOpAttrs::Create(int num_spatial_dims,
return errors::InvalidArgument("Invalid data format: ", data_format);
}
TF_RETURN_IF_ERROR(CheckValidPadding(attrs.padding, attrs.explicit_paddings,
/*num_dims=*/num_spatial_dims + 2,
attrs.data_format));
return attrs;
}

View File

@ -397,8 +397,8 @@ class Conv2DBackpropInputOp : public OpKernel {
int stride_w = GetTensorDim(strides_, data_format_, 'W');
OP_REQUIRES(
context, (stride_n == 1 && stride_c == 1),
errors::InvalidArgument("Current implementation does not yet support "
"strides in the batch and depth dimensions."));
errors::Unimplemented("Current implementation does not yet support "
"strides in the batch and depth dimensions."));
OP_REQUIRES(context, stride_h > 0 && stride_w > 0,
errors::InvalidArgument(
"Row and column strides should be larger than 0."));
@ -411,10 +411,10 @@ class Conv2DBackpropInputOp : public OpKernel {
int dilation_c = GetTensorDim(dilations_, data_format_, 'C');
int dilation_h = GetTensorDim(dilations_, data_format_, 'H');
int dilation_w = GetTensorDim(dilations_, data_format_, 'W');
OP_REQUIRES(context, (dilation_n == 1 && dilation_c == 1),
errors::InvalidArgument(
"Current implementation does not yet support "
"dilations in the batch and depth dimensions."));
OP_REQUIRES(
context, (dilation_n == 1 && dilation_c == 1),
errors::Unimplemented("Current implementation does not yet support "
"dilations in the batch and depth dimensions."));
OP_REQUIRES(
context, dilation_h > 0 && dilation_w > 0,
errors::InvalidArgument("Dilated rates should be larger than 0."));
@ -517,8 +517,8 @@ class Conv2DCustomBackpropInputOp : public OpKernel {
"specify 4 dimensions"));
OP_REQUIRES(
context, (strides_[0] == 1 && strides_[3] == 1),
errors::InvalidArgument("Current implementation does not yet support "
"strides in the batch and depth dimensions."));
errors::Unimplemented("Current implementation does not yet support "
"strides in the batch and depth dimensions."));
OP_REQUIRES(context, strides_[1] > 0 && strides_[2] > 0,
errors::InvalidArgument(
"Row and column strides should be larger than 0."));
@ -527,10 +527,10 @@ class Conv2DCustomBackpropInputOp : public OpKernel {
OP_REQUIRES(context, dilations_.size() == 4,
errors::InvalidArgument("Sliding window dilations field must "
"specify 4 dimensions"));
OP_REQUIRES(context, (dilations_[0] == 1 && dilations_[3] == 1),
errors::InvalidArgument(
"Current implementation does not yet support "
"dilations in the batch and depth dimensions."));
OP_REQUIRES(
context, (dilations_[0] == 1 && dilations_[3] == 1),
errors::Unimplemented("Current implementation does not yet support "
"dilations in the batch and depth dimensions."));
// TODO(yangzihao): Add a CPU implementation for dilated convolution.
OP_REQUIRES(context, (dilations_[1] == 1 && dilations_[2] == 1),
errors::InvalidArgument(

View File

@ -374,8 +374,8 @@ Status InitConv2DParameters(const OpKernelConstruction* context,
const int64 stride_w = GetTensorDim(strides, data_format, 'W');
TF_REQUIRES(
stride_n == 1 && stride_c == 1,
errors::InvalidArgument("Current implementation does not yet support "
"strides in the batch and depth dimensions."));
errors::Unimplemented("Current implementation does not yet support "
"strides in the batch and depth dimensions."));
TF_REQUIRES(stride_h > 0 && stride_w > 0,
errors::InvalidArgument(
"Row and column strides should be larger than 0."));
@ -386,8 +386,8 @@ Status InitConv2DParameters(const OpKernelConstruction* context,
const int64 dilation_w = GetTensorDim(dilations, data_format, 'W');
TF_REQUIRES(
dilation_n == 1 && dilation_c == 1,
errors::InvalidArgument("Current implementation does not yet support "
"dilations in the batch and depth dimensions."));
errors::Unimplemented("Current implementation does not yet support "
"dilations in the batch and depth dimensions."));
TF_REQUIRES(
dilation_h > 0 && dilation_w > 0,
errors::InvalidArgument("Dilated rates should be larger than 0."));

View File

@ -2522,79 +2522,138 @@ class Conv2DTest(test.TestCase):
padding=[0, 0, 0, 0])
@test_util.deprecated_graph_mode_only
@test_util.disable_xla("b/123337890") # Error messages differ
def testOpEdgeCases(self):
with self.cached_session() as sess:
# Illegal strides.
with self.assertRaisesRegex(errors_impl.InvalidArgumentError,
with self.assertRaisesRegex(errors_impl.UnimplementedError,
"strides in the batch and depth"):
input_placeholder = array_ops.placeholder(dtypes.float32)
input_val = np.ones([10, 10])
filter_placeholder = array_ops.placeholder(dtypes.float32)
filter_val = np.ones([10, 10])
sess.run(
nn_ops.conv2d(
array_ops.placeholder(dtypes.float32),
array_ops.placeholder(dtypes.float32),
input_placeholder,
filter_placeholder,
strides=[2, 1, 1, 1],
padding="SAME"))
with self.assertRaisesRegex(errors_impl.InvalidArgumentError,
padding="SAME"),
feed_dict={
input_placeholder: input_val,
filter_placeholder: filter_val
})
with self.assertRaisesRegex(errors_impl.UnimplementedError,
"strides in the batch and depth"):
input_placeholder = array_ops.placeholder(dtypes.float32)
filter_placeholder = array_ops.placeholder(dtypes.float32)
input_val = np.ones([10, 10])
filter_val = np.ones([10, 10])
sess.run(
nn_ops.conv2d(
array_ops.placeholder(dtypes.float32),
array_ops.placeholder(dtypes.float32),
input_placeholder,
filter_placeholder,
strides=[1, 1, 1, 2],
padding="SAME"))
padding="SAME"),
feed_dict={
input_placeholder: input_val,
filter_placeholder: filter_val
})
# Filter larger than input.
with self.assertRaisesRegex(ValueError, "Negative dimension size"):
input_placeholder = array_ops.placeholder(
dtypes.float32, shape=[32, 20, 20, 3])
input_val = np.ones([32, 20, 20, 3])
filter_placeholder = array_ops.placeholder(
dtypes.float32, shape=[20, 21, 3, 2])
filter_val = np.ones([20, 21, 3, 2])
sess.run(
nn_ops.conv2d(
array_ops.placeholder(
dtypes.float32, shape=[32, 20, 20, 3]),
array_ops.placeholder(
dtypes.float32, shape=[20, 21, 3, 2]),
input_placeholder,
filter_placeholder,
strides=[1, 1, 1, 1],
padding="VALID"))
padding="VALID"),
feed_dict={
input_placeholder: input_val,
filter_placeholder: filter_val
})
with self.assertRaisesRegex(ValueError, "Negative dimension size"):
input_placeholder = array_ops.placeholder(
dtypes.float32, shape=[32, 20, 20, 3])
input_val = np.ones([32, 20, 20, 3])
filter_placeholder = array_ops.placeholder(
dtypes.float32, shape=[21, 20, 3, 2])
filter_val = np.ones([21, 20, 3, 2])
sess.run(
nn_ops.conv2d(
array_ops.placeholder(
dtypes.float32, shape=[32, 20, 20, 3]),
array_ops.placeholder(
dtypes.float32, shape=[21, 20, 3, 2]),
input_placeholder,
filter_placeholder,
strides=[1, 1, 1, 1],
padding="VALID"))
padding="VALID"),
feed_dict={
input_placeholder: input_val,
filter_placeholder: filter_val
})
# Filter larger than input + padding.
with self.assertRaisesRegex(ValueError, "Negative dimension size"):
input_placeholder = array_ops.placeholder(
dtypes.float32, shape=[32, 20, 20, 3])
input_val = np.ones([32, 20, 20, 3])
filter_placeholder = array_ops.placeholder(
dtypes.float32, shape=[24, 25, 3, 2])
filter_val = np.ones([24, 25, 3, 2])
sess.run(
nn_ops.conv2d(
array_ops.placeholder(dtypes.float32, shape=[32, 20, 20, 3]),
array_ops.placeholder(dtypes.float32, shape=[24, 25, 3, 2]),
input_placeholder,
filter_placeholder,
strides=[1, 1, 1, 1],
padding=[[0, 0], [2, 2], [2, 2], [0, 0]]))
padding=[[0, 0], [2, 2], [2, 2], [0, 0]]),
feed_dict={
input_placeholder: input_val,
filter_placeholder: filter_val
})
# Negative padding during backprop.
with self.assertRaisesRegex(errors_impl.InvalidArgumentError,
"nonnegative"):
with self.assertRaisesRegex(
errors_impl.InvalidArgumentError,
"All elements of explicit_paddings must be nonnegative"):
filter_placeholder = array_ops.placeholder(
dtypes.float32, shape=[18, 18, 3, 2])
filter_val = np.ones([18, 18, 3, 2])
out_backprop = array_ops.placeholder(
dtypes.float32, shape=[32, 3, 2, 2])
out_backprop_val = np.ones([32, 3, 2, 2])
sess.run(
nn_ops.conv2d_backprop_input([32, 20, 20, 3],
array_ops.placeholder(
dtypes.float32,
shape=[18, 18, 3, 2]),
array_ops.placeholder(
dtypes.float32,
shape=[32, 3, 2, 2]),
filter_placeholder,
out_backprop,
strides=[1, 1, 1, 1],
padding=[[0, 0], [-1, 0], [0, 0],
[0, 0]]))
with self.assertRaisesRegex(errors_impl.InvalidArgumentError,
"nonnegative"):
[0, 0]]),
feed_dict={
filter_placeholder: filter_val,
out_backprop: out_backprop_val
})
with self.assertRaisesRegex(
errors_impl.InvalidArgumentError,
"All elements of explicit_paddings must be nonnegative"):
input_placeholder = array_ops.placeholder(
dtypes.float32, shape=[32, 20, 20, 3])
input_val = np.ones([32, 20, 20, 3])
out_backprop = array_ops.placeholder(
dtypes.float32, shape=[32, 3, 2, 2])
out_backprop_val = np.ones([32, 3, 2, 2])
sess.run(
nn_ops.conv2d_backprop_filter(
array_ops.placeholder(dtypes.float32, shape=[32, 20, 20, 3]),
[18, 18, 3, 2],
array_ops.placeholder(dtypes.float32, shape=[32, 3, 2, 2]),
input_placeholder, [18, 18, 3, 2],
out_backprop,
strides=[1, 1, 1, 1],
padding=[[0, 0], [-1, 0], [0, 0], [0, 0]]))
padding=[[0, 0], [-1, 0], [0, 0], [0, 0]]),
feed_dict={
input_placeholder: input_val,
out_backprop: out_backprop_val
})
class DepthwiseConv2DTest(test.TestCase):