Reuse ComputePaddingHeightWidth, also support dilation != 1 case.

PiperOrigin-RevId: 244797614
This commit is contained in:
Renjie Liu 2019-04-22 22:43:43 -07:00 committed by TensorFlower Gardener
parent a311216a9f
commit 354b95f958
6 changed files with 42 additions and 84 deletions

View File

@ -50,12 +50,12 @@ struct OpData {
TfLiteStatus CalculateOpData(TfLiteContext* context, TfLiteNode* node,
TfLiteDepthwiseConvParams* params, int width,
int height, int filter_width, int filter_height,
int out_width, int out_height,
const TfLiteType data_type, OpData* data) {
data->padding.height = ComputePadding(params->stride_height, 1, height,
filter_height, out_height);
data->padding.width =
ComputePadding(params->stride_width, 1, width, filter_width, out_width);
int unused_output_height, unused_output_width;
data->padding = ComputePaddingHeightWidth(
params->stride_height, params->stride_width, 1, 1, height, width,
filter_height, filter_width, params->padding, &unused_output_height,
&unused_output_width);
// Note that quantized inference requires that all tensors have their
// parameters set. This is usually done during quantized training.
@ -168,15 +168,11 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
int height = SizeOfDimension(input, 1);
int filter_width = SizeOfDimension(filter, 2);
int filter_height = SizeOfDimension(filter, 1);
int out_width = ComputeOutSize(params->padding, width, filter_width,
params->stride_width);
int out_height = ComputeOutSize(params->padding, height, filter_height,
params->stride_height);
OpData local_data_object;
OpData* data = &local_data_object;
TF_LITE_ENSURE_STATUS(CalculateOpData(context, node, params, width, height,
filter_width, filter_height, out_width,
out_height, data_type, data));
filter_width, filter_height, data_type,
data));
// TODO(aselle): Consider whether float conv and quantized conv should be
// separate ops to avoid dispatch overhead here.

View File

@ -284,28 +284,11 @@ TfLiteStatus Prepare(KernelType kernel_type, TfLiteContext* context,
// Matching GetWindowedOutputSize in TensorFlow.
auto padding = params->padding;
auto compute_out_size = [padding](int image_size, int filter_size, int stride,
int dilation_rate) -> int {
int effective_filter_size = (filter_size - 1) * dilation_rate + 1;
return padding == kTfLitePaddingSame
? (image_size + stride - 1) / stride
: padding == kTfLitePaddingValid
? (image_size - effective_filter_size + stride) / stride
: 0;
};
int out_width = compute_out_size(width, filter_width, params->stride_width,
params->dilation_width_factor);
int out_height =
compute_out_size(height, filter_height, params->stride_height,
params->dilation_height_factor);
data->padding.height =
ComputePadding(params->stride_height, params->dilation_height_factor,
height, filter_height, out_height);
data->padding.width =
ComputePadding(params->stride_width, params->dilation_width_factor, width,
filter_width, out_width);
int out_width, out_height;
data->padding = ComputePaddingHeightWidth(
params->stride_height, params->stride_width,
params->dilation_height_factor, params->dilation_width_factor, height,
width, filter_height, filter_width, padding, &out_height, &out_width);
TF_LITE_ENSURE(context, has_bias);

View File

@ -138,28 +138,12 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
// Matching GetWindowedOutputSize in TensorFlow.
auto padding = params->padding;
auto compute_out_size = [padding](int image_size, int filter_size, int stride,
int dilation_rate) -> int {
int effective_filter_size = (filter_size - 1) * dilation_rate + 1;
return padding == kTfLitePaddingSame
? (image_size + stride - 1) / stride
: padding == kTfLitePaddingValid
? (image_size - effective_filter_size + stride) / stride
: 0;
};
int out_width, out_height;
int out_width = compute_out_size(width, filter_width, params->stride_width,
params->dilation_width_factor);
int out_height =
compute_out_size(height, filter_height, params->stride_height,
params->dilation_height_factor);
data->padding.height =
ComputePadding(params->stride_height, params->dilation_height_factor,
height, filter_height, out_height);
data->padding.width =
ComputePadding(params->stride_width, params->dilation_width_factor, width,
filter_width, out_width);
data->padding = ComputePaddingHeightWidth(
params->stride_height, params->stride_width,
params->dilation_height_factor, params->dilation_width_factor, height,
width, filter_height, filter_width, padding, &out_height, &out_width);
// Note that quantized inference requires that all tensors have their
// parameters set. This is usually done during quantized training or

View File

@ -42,31 +42,36 @@ inline int ComputePaddingWithOffset(int stride, int dilation_rate, int in_size,
// Matching GetWindowedOutputSize in TensorFlow.
inline int ComputeOutSize(TfLitePadding padding, int image_size,
int filter_size, int stride) {
int filter_size, int stride, int dilation_rate = 1) {
int effective_filter_size = (filter_size - 1) * dilation_rate + 1;
switch (padding) {
case kTfLitePaddingSame:
return (image_size + stride - 1) / stride;
case kTfLitePaddingValid:
return (image_size + stride - filter_size) / stride;
return (image_size + stride - effective_filter_size) / stride;
default:
return 0;
}
}
inline TfLitePaddingValues ComputePaddingHeightWidth(
int stride_height, int stride_width, int dilation_rate, int in_height,
int in_width, int filter_height, int filter_width, TfLitePadding padding) {
int out_width = ComputeOutSize(padding, in_width, filter_width, stride_width);
int out_height =
ComputeOutSize(padding, in_height, filter_height, stride_height);
int stride_height, int stride_width, int dilation_rate_height,
int dilation_rate_width, int in_height, int in_width, int filter_height,
int filter_width, TfLitePadding padding, int* out_height, int* out_width) {
*out_width = ComputeOutSize(padding, in_width, filter_width, stride_width,
dilation_rate_width);
*out_height = ComputeOutSize(padding, in_height, filter_height, stride_height,
dilation_rate_height);
TfLitePaddingValues padding_values;
int offset = 0;
padding_values.height = ComputePaddingWithOffset(
stride_height, 1, in_height, filter_height, out_height, &offset);
padding_values.height =
ComputePaddingWithOffset(stride_height, dilation_rate_height, in_height,
filter_height, *out_height, &offset);
padding_values.height_offset = offset;
padding_values.width = ComputePaddingWithOffset(
stride_width, 1, in_width, filter_width, out_width, &offset);
padding_values.width =
ComputePaddingWithOffset(stride_width, dilation_rate_width, in_width,
filter_width, *out_width, &offset);
padding_values.width_offset = offset;
return padding_values;
}

View File

@ -81,24 +81,12 @@ TfLiteStatus GenericPrepare(TfLiteContext* context, TfLiteNode* node) {
// Matching GetWindowedOutputSize in TensorFlow.
auto padding = params->padding;
auto compute_out_size = [padding](int image_size, int filter_size,
int stride) -> int {
return padding == kTfLitePaddingSame
? (image_size + stride - 1) / stride
: padding == kTfLitePaddingValid
? (image_size - filter_size + stride) / stride
: 0;
};
int out_width, out_height;
int out_width =
compute_out_size(width, params->filter_width, params->stride_width);
int out_height =
compute_out_size(height, params->filter_height, params->stride_height);
data->padding.height = ComputePadding(params->stride_height, 1, height,
params->filter_height, out_height);
data->padding.width = ComputePadding(params->stride_width, 1, width,
params->filter_width, out_width);
data->padding = ComputePaddingHeightWidth(
params->stride_height, params->stride_width, 1, 1, height, width,
params->filter_height, params->filter_width, padding, &out_height,
&out_width);
if (input->type == kTfLiteUInt8 || input->type == kTfLiteInt8) {
if (pool_type == kAverage || pool_type == kMax) {

View File

@ -404,9 +404,11 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
const int filter_width = SizeOfDimension(weights, 2);
const int filter_height = SizeOfDimension(weights, 1);
int unused_output_height, unused_output_width;
data->padding = ComputePaddingHeightWidth(
params->stride_height, params->stride_width, 1, height, width,
filter_height, filter_width, params->padding);
params->stride_height, params->stride_width, 1, 1, height, width,
filter_height, filter_width, params->padding, &unused_output_height,
&unused_output_width);
// Currently support float32 and uint8.
switch (input->type) {