add support for PadV2
PiperOrigin-RevId: 195503894
This commit is contained in:
parent
dd5ef1b9fc
commit
5fb53fe69a
@ -161,6 +161,9 @@ typedef struct {
|
||||
typedef struct {
|
||||
} TfLitePadParams;
|
||||
|
||||
typedef struct {
|
||||
} TfLitePadV2Params;
|
||||
|
||||
typedef struct {
|
||||
// TODO(ahentz): We can't have dynamic data in this struct, at least not yet.
|
||||
// For now we will fix the maximum possible number of dimensions.
|
||||
|
@ -85,6 +85,7 @@ typedef enum {
|
||||
kTfLiteBuiltinMinimum = 57,
|
||||
kTfLiteBuiltinLess = 58,
|
||||
kTfLiteBuiltinNeg = 59,
|
||||
kTfLiteBuiltinPadv2 = 60,
|
||||
} TfLiteBuiltinOperator;
|
||||
|
||||
#ifdef __cplusplus
|
||||
|
@ -5851,10 +5851,26 @@ inline void BatchToSpaceND(const T* input_data, const Dims<4>& input_dims,
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
inline void Pad(const T* input_data, const Dims<4>& input_dims,
|
||||
const std::vector<int>& left_paddings,
|
||||
const std::vector<int>& right_paddings, T* output_data,
|
||||
const Dims<4>& output_dims, const int32_t pad_value) {
|
||||
void TypedMemset(void* ptr, T value, size_t num) {
|
||||
// Optimization for common cases where memset() will suffice.
|
||||
if (value == 0 || std::is_same<T, uint8_t>::value) {
|
||||
memset(ptr, value, num * sizeof(T));
|
||||
} else {
|
||||
// Default implementation for cases where memset() will not preserve the
|
||||
// bytes, e.g., typically when sizeof(T) > sizeof(uint8_t).
|
||||
char* pos = static_cast<char*>(ptr);
|
||||
for (size_t i = 0; i < num; ++i) {
|
||||
memcpy(pos, &value, sizeof(T));
|
||||
pos = pos + sizeof(T);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
inline void PadV2(const T* input_data, const Dims<4>& input_dims,
|
||||
const std::vector<int>& left_paddings,
|
||||
const std::vector<int>& right_paddings, T* output_data,
|
||||
const Dims<4>& output_dims, const T pad_value) {
|
||||
gemmlowp::ScopedProfilingLabel label("Pad");
|
||||
TFLITE_DCHECK_EQ(left_paddings.size(), 4);
|
||||
TFLITE_DCHECK_EQ(right_paddings.size(), 4);
|
||||
@ -5877,27 +5893,28 @@ inline void Pad(const T* input_data, const Dims<4>& input_dims,
|
||||
const int input_depth = ArraySize(input_dims, 0);
|
||||
|
||||
if (left_b_padding != 0) {
|
||||
memset(output_data, pad_value,
|
||||
left_b_padding * output_height * output_width * output_depth *
|
||||
sizeof(T));
|
||||
TypedMemset<T>(
|
||||
output_data, pad_value,
|
||||
left_b_padding * output_height * output_width * output_depth);
|
||||
}
|
||||
for (int out_b = left_b_padding; out_b < output_batch - right_b_padding;
|
||||
++out_b) {
|
||||
if (left_h_padding != 0) {
|
||||
memset(output_data + Offset(output_dims, 0, 0, 0, out_b), pad_value,
|
||||
left_h_padding * output_width * output_depth * sizeof(T));
|
||||
TypedMemset<T>(output_data + Offset(output_dims, 0, 0, 0, out_b),
|
||||
pad_value, left_h_padding * output_width * output_depth);
|
||||
}
|
||||
for (int out_h = left_h_padding; out_h < output_height - right_h_padding;
|
||||
++out_h) {
|
||||
if (left_w_padding != 0) {
|
||||
memset(output_data + Offset(output_dims, 0, 0, out_h, out_b), pad_value,
|
||||
left_w_padding * output_depth * sizeof(T));
|
||||
TypedMemset<T>(output_data + Offset(output_dims, 0, 0, out_h, out_b),
|
||||
pad_value, left_w_padding * output_depth);
|
||||
}
|
||||
for (int out_w = left_w_padding; out_w < output_width - right_w_padding;
|
||||
++out_w) {
|
||||
if (left_d_padding != 0) {
|
||||
memset(output_data + Offset(output_dims, 0, out_w, out_h, out_b),
|
||||
pad_value, left_d_padding * sizeof(T));
|
||||
TypedMemset<T>(
|
||||
output_data + Offset(output_dims, 0, out_w, out_h, out_b),
|
||||
pad_value, left_d_padding);
|
||||
}
|
||||
|
||||
T* out = output_data +
|
||||
@ -5908,35 +5925,46 @@ inline void Pad(const T* input_data, const Dims<4>& input_dims,
|
||||
memcpy(out, in, input_depth * sizeof(T));
|
||||
|
||||
if (right_d_padding != 0) {
|
||||
memset(
|
||||
TypedMemset<T>(
|
||||
output_data + Offset(output_dims, output_depth - right_d_padding,
|
||||
out_w, out_h, out_b),
|
||||
pad_value, right_d_padding * sizeof(T));
|
||||
pad_value, right_d_padding);
|
||||
}
|
||||
}
|
||||
if (right_w_padding != 0) {
|
||||
memset(
|
||||
TypedMemset<T>(
|
||||
output_data + Offset(output_dims, 0, output_width - right_w_padding,
|
||||
out_h, out_b),
|
||||
pad_value, right_w_padding * output_depth * sizeof(T));
|
||||
pad_value, right_w_padding * output_depth);
|
||||
}
|
||||
}
|
||||
if (right_h_padding != 0) {
|
||||
memset(output_data + Offset(output_dims, 0, 0,
|
||||
output_height - right_h_padding, out_b),
|
||||
pad_value,
|
||||
right_h_padding * output_width * output_depth * sizeof(T));
|
||||
TypedMemset<T>(
|
||||
output_data +
|
||||
Offset(output_dims, 0, 0, output_height - right_h_padding, out_b),
|
||||
pad_value, right_h_padding * output_width * output_depth);
|
||||
}
|
||||
}
|
||||
if (right_b_padding != 0) {
|
||||
memset(output_data +
|
||||
Offset(output_dims, 0, 0, 0, output_batch - right_b_padding),
|
||||
0,
|
||||
right_b_padding * output_height * output_width * output_depth *
|
||||
sizeof(T));
|
||||
TypedMemset<T>(
|
||||
output_data +
|
||||
Offset(output_dims, 0, 0, 0, output_batch - right_b_padding),
|
||||
pad_value,
|
||||
right_b_padding * output_height * output_width * output_depth);
|
||||
}
|
||||
}
|
||||
|
||||
// Legacy Pad() method that casts an int32_t to T before padding.
|
||||
template <typename T>
|
||||
inline void Pad(const T* input_data, const Dims<4>& input_dims,
|
||||
const std::vector<int>& left_paddings,
|
||||
const std::vector<int>& right_paddings, T* output_data,
|
||||
const Dims<4>& output_dims, const int32_t pad_value) {
|
||||
const T converted_pad_value = static_cast<T>(pad_value);
|
||||
PadV2<T>(input_data, input_dims, left_paddings, right_paddings, output_data,
|
||||
output_dims, converted_pad_value);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
inline void Pad(const T* input_data, const Dims<4>& input_dims,
|
||||
const std::vector<int>& left_paddings,
|
||||
|
@ -3158,10 +3158,10 @@ inline void BatchToSpaceND(const T* input_data, const Dims<4>& input_dims,
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
inline void Pad(const T* input_data, const Dims<4>& input_dims,
|
||||
const std::vector<int>& left_paddings,
|
||||
const std::vector<int>& right_paddings, T* output_data,
|
||||
const Dims<4>& output_dims, const int32_t pad_value) {
|
||||
inline void PadV2(const T* input_data, const Dims<4>& input_dims,
|
||||
const std::vector<int>& left_paddings,
|
||||
const std::vector<int>& right_paddings, T* output_data,
|
||||
const Dims<4>& output_dims, const T pad_value) {
|
||||
TFLITE_DCHECK_EQ(left_paddings.size(), 4);
|
||||
TFLITE_DCHECK_EQ(right_paddings.size(), 4);
|
||||
|
||||
@ -3194,7 +3194,7 @@ inline void Pad(const T* input_data, const Dims<4>& input_dims,
|
||||
out_w >= output_width - right_w_padding ||
|
||||
out_d < left_d_padding ||
|
||||
out_d >= output_depth - right_d_padding) {
|
||||
*out_ptr++ = static_cast<T>(pad_value);
|
||||
*out_ptr++ = pad_value;
|
||||
} else {
|
||||
*out_ptr++ = *in_ptr++;
|
||||
}
|
||||
@ -3204,6 +3204,17 @@ inline void Pad(const T* input_data, const Dims<4>& input_dims,
|
||||
}
|
||||
}
|
||||
|
||||
// Legacy Pad() method that casts an int32_t to T before padding.
|
||||
template <typename T>
|
||||
inline void Pad(const T* input_data, const Dims<4>& input_dims,
|
||||
const std::vector<int>& left_paddings,
|
||||
const std::vector<int>& right_paddings, T* output_data,
|
||||
const Dims<4>& output_dims, const int32_t pad_value) {
|
||||
const T converted_pad_value = static_cast<T>(pad_value);
|
||||
PadV2<T>(input_data, input_dims, left_paddings, right_paddings, output_data,
|
||||
output_dims, converted_pad_value);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
inline void Pad(const T* input_data, const Dims<4>& input_dims,
|
||||
const std::vector<int>& left_paddings,
|
||||
|
@ -37,9 +37,15 @@ struct PadContext {
|
||||
PadContext(TfLiteContext* context, TfLiteNode* node) {
|
||||
input = GetInput(context, node, 0);
|
||||
paddings = GetInput(context, node, 1);
|
||||
if (NumInputs(node) == 3) {
|
||||
constant_values = GetOptionalInputTensor(context, node, 2);
|
||||
} else {
|
||||
constant_values = nullptr;
|
||||
}
|
||||
output = GetOutput(context, node, 0);
|
||||
dims = NumDimensions(input);
|
||||
}
|
||||
TfLiteTensor* constant_values;
|
||||
TfLiteTensor* input;
|
||||
TfLiteTensor* paddings;
|
||||
TfLiteTensor* output;
|
||||
@ -76,11 +82,15 @@ TfLiteStatus ResizeOutputTensor(TfLiteContext* context,
|
||||
}
|
||||
|
||||
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
|
||||
TF_LITE_ENSURE_EQ(context, NumInputs(node), 2);
|
||||
TF_LITE_ENSURE(context, NumInputs(node) == 2 || NumInputs(node) == 3);
|
||||
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
|
||||
|
||||
PadContext op_context(context, node);
|
||||
TF_LITE_ENSURE_EQ(context, op_context.input->type, op_context.output->type);
|
||||
if (op_context.constant_values != nullptr) {
|
||||
TF_LITE_ENSURE_EQ(context, op_context.input->type,
|
||||
op_context.constant_values->type);
|
||||
}
|
||||
|
||||
// TODO(nupurgarg): Our current implementations rely on the inputs being 4D.
|
||||
TF_LITE_ENSURE_EQ(context, op_context.dims, 4);
|
||||
@ -98,6 +108,11 @@ template <KernelType kernel_type>
|
||||
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
|
||||
PadContext op_context(context, node);
|
||||
|
||||
if (op_context.constant_values != nullptr) {
|
||||
// Ensure that constant_values is a scalar.
|
||||
TF_LITE_ENSURE_EQ(context, NumElements(op_context.constant_values), 1);
|
||||
}
|
||||
|
||||
// Resize the output tensor if the output tensor is dynamic.
|
||||
if (IsDynamicTensor(op_context.output)) {
|
||||
TF_LITE_ENSURE_OK(context, ResizeOutputTensor(context, &op_context));
|
||||
@ -119,48 +134,70 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
|
||||
after_padding.push_back(paddings_data[idx * 2 + 1]);
|
||||
}
|
||||
|
||||
#define TF_LITE_PAD(type, scalar, pad_value) \
|
||||
type::Pad(GetTensorData<scalar>(op_context.input), \
|
||||
GetTensorDims(op_context.input), before_padding, after_padding, \
|
||||
GetTensorData<scalar>(op_context.output), \
|
||||
GetTensorDims(op_context.output), pad_value)
|
||||
#define TF_LITE_PAD(type, scalar, pad_value) \
|
||||
type::PadV2(GetTensorData<scalar>(op_context.input), \
|
||||
GetTensorDims(op_context.input), before_padding, after_padding, \
|
||||
GetTensorData<scalar>(op_context.output), \
|
||||
GetTensorDims(op_context.output), pad_value)
|
||||
|
||||
switch (op_context.input->type) {
|
||||
case kTfLiteFloat32:
|
||||
case kTfLiteFloat32: {
|
||||
float pad_value = op_context.constant_values == nullptr
|
||||
? 0.f
|
||||
: *GetTensorData<float>(op_context.constant_values);
|
||||
if (kernel_type == kReference) {
|
||||
TF_LITE_PAD(reference_ops, float, 0);
|
||||
TF_LITE_PAD(reference_ops, float, pad_value);
|
||||
} else if (kernel_type == kGenericOptimized) {
|
||||
TF_LITE_PAD(optimized_ops, float, 0);
|
||||
TF_LITE_PAD(optimized_ops, float, pad_value);
|
||||
}
|
||||
} break;
|
||||
case kTfLiteUInt8: {
|
||||
uint8_t pad_value;
|
||||
if (op_context.constant_values == nullptr) {
|
||||
// Quantized Pad requires that 0 is represented in the quantized
|
||||
// range.
|
||||
TF_LITE_ENSURE(context, op_context.output->params.zero_point >=
|
||||
std::numeric_limits<uint8_t>::min());
|
||||
TF_LITE_ENSURE(context, op_context.output->params.zero_point <=
|
||||
std::numeric_limits<uint8_t>::max());
|
||||
pad_value = static_cast<uint8_t>(op_context.output->params.zero_point);
|
||||
} else {
|
||||
// Quantized Pad requires that 'constant_values' is represented in the
|
||||
// same quantized range as the input and output tensors.
|
||||
TF_LITE_ENSURE_EQ(context, op_context.output->params.zero_point,
|
||||
op_context.constant_values->params.zero_point);
|
||||
TF_LITE_ENSURE_EQ(context, op_context.output->params.scale,
|
||||
op_context.constant_values->params.scale);
|
||||
pad_value = *GetTensorData<uint8_t>(op_context.constant_values);
|
||||
}
|
||||
break;
|
||||
case kTfLiteUInt8:
|
||||
// Quantized Pad requires that 0 is represented in the quantized range.
|
||||
TF_LITE_ENSURE(context, op_context.output->params.zero_point >=
|
||||
std::numeric_limits<uint8_t>::min());
|
||||
TF_LITE_ENSURE(context, op_context.output->params.zero_point <=
|
||||
std::numeric_limits<uint8_t>::max());
|
||||
if (kernel_type == kReference) {
|
||||
TF_LITE_PAD(reference_ops, uint8_t,
|
||||
op_context.output->params.zero_point);
|
||||
TF_LITE_PAD(reference_ops, uint8_t, pad_value);
|
||||
} else if (kernel_type == kGenericOptimized) {
|
||||
TF_LITE_PAD(optimized_ops, uint8_t,
|
||||
op_context.output->params.zero_point);
|
||||
TF_LITE_PAD(optimized_ops, uint8_t, pad_value);
|
||||
}
|
||||
break;
|
||||
case kTfLiteInt32:
|
||||
} break;
|
||||
case kTfLiteInt32: {
|
||||
int32_t pad_value =
|
||||
op_context.constant_values == nullptr
|
||||
? 0
|
||||
: *GetTensorData<int32_t>(op_context.constant_values);
|
||||
if (kernel_type == kReference) {
|
||||
TF_LITE_PAD(reference_ops, int32_t, 0);
|
||||
TF_LITE_PAD(reference_ops, int32_t, pad_value);
|
||||
} else if (kernel_type == kGenericOptimized) {
|
||||
TF_LITE_PAD(optimized_ops, int32_t, 0);
|
||||
TF_LITE_PAD(optimized_ops, int32_t, pad_value);
|
||||
}
|
||||
break;
|
||||
case kTfLiteInt64:
|
||||
} break;
|
||||
case kTfLiteInt64: {
|
||||
int64_t pad_value =
|
||||
op_context.constant_values == nullptr
|
||||
? 0L
|
||||
: *GetTensorData<int64_t>(op_context.constant_values);
|
||||
if (kernel_type == kReference) {
|
||||
TF_LITE_PAD(reference_ops, int64_t, 0);
|
||||
TF_LITE_PAD(reference_ops, int64_t, pad_value);
|
||||
} else if (kernel_type == kGenericOptimized) {
|
||||
TF_LITE_PAD(optimized_ops, int64_t, 0);
|
||||
TF_LITE_PAD(optimized_ops, int64_t, pad_value);
|
||||
}
|
||||
break;
|
||||
} break;
|
||||
default:
|
||||
context->ReportError(context, "Type is currently not supported by Pad.");
|
||||
return kTfLiteError;
|
||||
@ -185,6 +222,21 @@ TfLiteRegistration* Register_PAD_GENERIC_OPT() {
|
||||
|
||||
TfLiteRegistration* Register_PAD() { return Register_PAD_GENERIC_OPT(); }
|
||||
|
||||
// Also register Pad as PadV2.
|
||||
TfLiteRegistration* Register_PADV2_REF() {
|
||||
static TfLiteRegistration r = {nullptr, nullptr, pad::Prepare,
|
||||
pad::Eval<pad::kReference>};
|
||||
return &r;
|
||||
}
|
||||
|
||||
TfLiteRegistration* Register_PADV2_GENERIC_OPT() {
|
||||
static TfLiteRegistration r = {nullptr, nullptr, pad::Prepare,
|
||||
pad::Eval<pad::kGenericOptimized>};
|
||||
return &r;
|
||||
}
|
||||
|
||||
TfLiteRegistration* Register_PADV2() { return Register_PADV2_GENERIC_OPT(); }
|
||||
|
||||
} // namespace builtin
|
||||
} // namespace ops
|
||||
} // namespace tflite
|
||||
|
@ -24,21 +24,26 @@ namespace {
|
||||
using ::testing::ElementsAreArray;
|
||||
using ::testing::Matcher;
|
||||
|
||||
template <typename T>
|
||||
class PadOpModel : public SingleOpModel {
|
||||
public:
|
||||
void SetInput(std::initializer_list<float> data) {
|
||||
PopulateTensor<float>(input_, data);
|
||||
void SetInput(std::initializer_list<T> data) {
|
||||
PopulateTensor<T>(input_, data);
|
||||
}
|
||||
|
||||
void SetQuantizedInput(std::initializer_list<float> data) {
|
||||
QuantizeAndPopulate<uint8_t>(input_, data);
|
||||
}
|
||||
|
||||
void SetQuantizedPadValue(float data) {
|
||||
QuantizeAndPopulate<uint8_t>(constant_values_, {data});
|
||||
}
|
||||
|
||||
void SetPaddings(std::initializer_list<int> paddings) {
|
||||
PopulateTensor<int>(paddings_, paddings);
|
||||
}
|
||||
|
||||
std::vector<float> GetOutput() { return ExtractVector<float>(output_); }
|
||||
std::vector<T> GetOutput() { return ExtractVector<T>(output_); }
|
||||
std::vector<int> GetOutputShape() { return GetTensorShape(output_); }
|
||||
|
||||
std::vector<float> GetDequantizedOutput() {
|
||||
@ -50,6 +55,59 @@ class PadOpModel : public SingleOpModel {
|
||||
int input_;
|
||||
int output_;
|
||||
int paddings_;
|
||||
int constant_values_;
|
||||
};
|
||||
|
||||
namespace {
|
||||
|
||||
// Returns the corresponding TensorType given the type T.
|
||||
template <typename T>
|
||||
TensorType GetTensorType() {
|
||||
if (std::is_same<T, float>::value) return TensorType_FLOAT32;
|
||||
if (std::is_same<T, int32_t>::value) return TensorType_INT32;
|
||||
if (std::is_same<T, uint8_t>::value) return TensorType_UINT8;
|
||||
return TensorType_MIN; // default value
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
// Tests case where paddings is a const tensor. Type T is the dtype.
|
||||
template <typename T>
|
||||
class PadV2OpConstModel : public PadOpModel<T> {
|
||||
public:
|
||||
PadV2OpConstModel(const TensorData& input,
|
||||
std::initializer_list<int> paddings_shape,
|
||||
std::initializer_list<int> paddings, T constant_values,
|
||||
const TensorData& output) {
|
||||
this->input_ = this->AddInput(input);
|
||||
this->paddings_ =
|
||||
this->AddConstInput(TensorType_INT32, paddings, paddings_shape);
|
||||
this->constant_values_ =
|
||||
this->AddConstInput(GetTensorType<T>(), {constant_values}, {1});
|
||||
|
||||
this->output_ = this->AddOutput(output);
|
||||
|
||||
this->SetBuiltinOp(BuiltinOperator_PADV2, BuiltinOptions_PadV2Options,
|
||||
CreatePadV2Options(this->builder_).Union());
|
||||
this->BuildInterpreter({input.shape});
|
||||
}
|
||||
|
||||
PadV2OpConstModel(const TensorData& input,
|
||||
std::initializer_list<int> paddings_shape,
|
||||
std::initializer_list<int> paddings,
|
||||
const TensorData& constant_values,
|
||||
const TensorData& output) {
|
||||
this->input_ = this->AddInput(input);
|
||||
this->paddings_ =
|
||||
this->AddConstInput(TensorType_INT32, paddings, paddings_shape);
|
||||
this->constant_values_ = this->AddInput(constant_values);
|
||||
|
||||
this->output_ = this->AddOutput(output);
|
||||
|
||||
this->SetBuiltinOp(BuiltinOperator_PADV2, BuiltinOptions_PadV2Options,
|
||||
CreatePadV2Options(this->builder_).Union());
|
||||
this->BuildInterpreter({input.shape});
|
||||
}
|
||||
};
|
||||
|
||||
// Tests case where paddings is a const tensor.
|
||||
@ -58,7 +116,7 @@ class PadOpModel : public SingleOpModel {
|
||||
// PadOpDynamicModel m(input_shape, paddings_shape, paddings_data);
|
||||
// m.SetInput(input_data);
|
||||
// m.Invoke();
|
||||
class PadOpConstModel : public PadOpModel {
|
||||
class PadOpConstModel : public PadOpModel<float> {
|
||||
public:
|
||||
PadOpConstModel(const TensorData& input,
|
||||
std::initializer_list<int> paddings_shape,
|
||||
@ -66,6 +124,7 @@ class PadOpConstModel : public PadOpModel {
|
||||
const TensorData& output) {
|
||||
input_ = AddInput(input);
|
||||
paddings_ = AddConstInput(TensorType_INT32, paddings, paddings_shape);
|
||||
constant_values_ = AddNullInput();
|
||||
output_ = AddOutput(output);
|
||||
|
||||
SetBuiltinOp(BuiltinOperator_PAD, BuiltinOptions_PadOptions,
|
||||
@ -74,6 +133,38 @@ class PadOpConstModel : public PadOpModel {
|
||||
}
|
||||
};
|
||||
|
||||
// Test case where paddings is a non-const tensor.
|
||||
template <typename T>
|
||||
class PadV2OpDynamicModel : public PadOpModel<T> {
|
||||
public:
|
||||
PadV2OpDynamicModel(const TensorData& input,
|
||||
std::initializer_list<int> paddings_shape,
|
||||
T constant_values, const TensorData& output) {
|
||||
this->input_ = this->AddInput(input);
|
||||
this->paddings_ = this->AddInput(TensorType_INT32);
|
||||
this->constant_values_ =
|
||||
this->AddConstInput(GetTensorType<T>(), {constant_values}, {1});
|
||||
this->output_ = this->AddOutput(output);
|
||||
|
||||
this->SetBuiltinOp(BuiltinOperator_PADV2, BuiltinOptions_PadV2Options,
|
||||
CreatePadV2Options(this->builder_).Union());
|
||||
this->BuildInterpreter({input.shape, paddings_shape});
|
||||
}
|
||||
PadV2OpDynamicModel(const TensorData& input,
|
||||
std::initializer_list<int> paddings_shape,
|
||||
const TensorData& constant_values,
|
||||
const TensorData& output) {
|
||||
this->input_ = this->AddInput(input);
|
||||
this->paddings_ = this->AddInput(TensorType_INT32);
|
||||
this->constant_values_ = this->AddInput(constant_values);
|
||||
this->output_ = this->AddOutput(output);
|
||||
|
||||
this->SetBuiltinOp(BuiltinOperator_PADV2, BuiltinOptions_PadV2Options,
|
||||
CreatePadV2Options(this->builder_).Union());
|
||||
this->BuildInterpreter({input.shape, paddings_shape});
|
||||
}
|
||||
};
|
||||
|
||||
// Test case where paddings is a non-const tensor.
|
||||
//
|
||||
// Example usage is as follows:
|
||||
@ -81,13 +172,14 @@ class PadOpConstModel : public PadOpModel {
|
||||
// m.SetInput(input_data);
|
||||
// m.SetPaddings(paddings_data);
|
||||
// m.Invoke();
|
||||
class PadOpDynamicModel : public PadOpModel {
|
||||
class PadOpDynamicModel : public PadOpModel<float> {
|
||||
public:
|
||||
PadOpDynamicModel(const TensorData& input,
|
||||
std::initializer_list<int> paddings_shape,
|
||||
const TensorData& output) {
|
||||
input_ = AddInput(input);
|
||||
paddings_ = AddInput(TensorType_INT32);
|
||||
constant_values_ = AddNullInput();
|
||||
output_ = AddOutput(output);
|
||||
|
||||
SetBuiltinOp(BuiltinOperator_PAD, BuiltinOptions_PadOptions,
|
||||
@ -237,6 +329,272 @@ TEST_F(QuantizedPadOpTest, AdvancedDynamicTest) {
|
||||
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({1, 4, 7, 1}));
|
||||
}
|
||||
|
||||
TEST(PadV2OpTest, TooManyDimensions) {
|
||||
EXPECT_DEATH(PadV2OpConstModel<float>(
|
||||
{TensorType_FLOAT32, {1, 2, 3, 4, 5, 6, 7, 8, 9}}, {9, 2},
|
||||
{1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9}, 0.0,
|
||||
{TensorType_FLOAT32}),
|
||||
"dims != 4");
|
||||
}
|
||||
|
||||
TEST(PadV2OpTest, UnequalDimensions) {
|
||||
EXPECT_DEATH(
|
||||
PadV2OpConstModel<float>({TensorType_FLOAT32, {1, 1, 2, 1}}, {3, 2},
|
||||
{1, 1, 2, 2, 3, 3}, 0.0, {TensorType_FLOAT32}),
|
||||
"3 != 4");
|
||||
}
|
||||
|
||||
TEST(PadV2OpTest, InvalidPadValue) {
|
||||
EXPECT_DEATH(PadV2OpConstModel<float>({TensorType_FLOAT32, {1, 1, 2, 1}},
|
||||
{4, 2}, {0, 0, 1, -1, 2, -1, 0, 0}, 0.0,
|
||||
{TensorType_FLOAT32}),
|
||||
"Pad value has to be greater than equal to 0.");
|
||||
}
|
||||
|
||||
TEST(PadV2OpTest, SimpleConstTest) {
|
||||
// Padding is represented as four 2-D lists representing above padding and
|
||||
// below padding (i.e. {{0, 0}, {1, 1}, {1, 1}, {0, 0}}).
|
||||
PadV2OpConstModel<float> m({TensorType_FLOAT32, {1, 2, 2, 1}}, {4, 2},
|
||||
{0, 0, 1, 1, 1, 1, 0, 0}, 0.0,
|
||||
{TensorType_FLOAT32});
|
||||
m.SetInput({1, 2, 3, 4});
|
||||
m.Invoke();
|
||||
EXPECT_THAT(m.GetOutput(), ElementsAreArray({0, 0, 0, 0, 0, 1, 2, 0, 0, 3, 4,
|
||||
0, 0, 0, 0, 0}));
|
||||
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({1, 4, 4, 1}));
|
||||
}
|
||||
|
||||
TEST(PadV2OpTest, SimpleConstFloat32ValuedTest) {
|
||||
// Padding is represented as four 2-D lists representing above padding and
|
||||
// below padding (i.e. {{0, 0}, {1, 1}, {1, 1}, {0, 0}}).
|
||||
PadV2OpConstModel<float> m({TensorType_FLOAT32, {1, 2, 2, 1}}, {4, 2},
|
||||
{0, 0, 1, 1, 1, 1, 0, 0}, 5, {TensorType_FLOAT32});
|
||||
m.SetInput({1, 2, 3, 4});
|
||||
m.Invoke();
|
||||
EXPECT_THAT(m.GetOutput(), ElementsAreArray({5, 5, 5, 5, 5, 1, 2, 5, 5, 3, 4,
|
||||
5, 5, 5, 5, 5}));
|
||||
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({1, 4, 4, 1}));
|
||||
}
|
||||
|
||||
TEST(PadV2OpTest, Simple4DConstFloat32ValuedTest) {
|
||||
// Padding is represented as four 2-D lists representing above padding and
|
||||
// below padding (i.e. {{0, 0}, {1, 1}, {1, 1}, {0, 0}}).
|
||||
PadV2OpConstModel<float> m({TensorType_FLOAT32, {1, 1, 2, 1}}, {4, 2},
|
||||
{0, 1, 0, 0, 0, 0, 0, 1}, 5, {TensorType_FLOAT32});
|
||||
m.SetInput({3, 3});
|
||||
m.Invoke();
|
||||
EXPECT_THAT(m.GetOutput(), ElementsAreArray({3, 5, 3, 5, 5, 5, 5, 5}));
|
||||
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({2, 1, 2, 2}));
|
||||
}
|
||||
|
||||
TEST(PadV2OpTest, SimpleConstInt32ValuedTest) {
|
||||
// Padding is represented as four 2-D lists representing above padding and
|
||||
// below padding (i.e. {{0, 0}, {1, 1}, {1, 1}, {0, 0}}).
|
||||
PadV2OpConstModel<int32_t> m({TensorType_INT32, {1, 2, 2, 1}}, {4, 2},
|
||||
{0, 0, 1, 1, 1, 1, 0, 0}, 5, {TensorType_INT32});
|
||||
m.SetInput({1, 2, 3, 4});
|
||||
m.Invoke();
|
||||
EXPECT_THAT(m.GetOutput(), ElementsAreArray({5, 5, 5, 5, 5, 1, 2, 5, 5, 3, 4,
|
||||
5, 5, 5, 5, 5}));
|
||||
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({1, 4, 4, 1}));
|
||||
}
|
||||
|
||||
TEST(PadV2OpTest, SimpleDynamicTest) {
|
||||
PadV2OpDynamicModel<float> m({TensorType_FLOAT32, {1, 2, 2, 1}}, {4, 2}, 0.0,
|
||||
{TensorType_FLOAT32});
|
||||
m.SetInput({1, 2, 3, 4});
|
||||
m.SetPaddings({0, 0, 1, 1, 1, 1, 0, 0});
|
||||
m.Invoke();
|
||||
EXPECT_THAT(m.GetOutput(), ElementsAreArray({0, 0, 0, 0, 0, 1, 2, 0, 0, 3, 4,
|
||||
0, 0, 0, 0, 0}));
|
||||
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({1, 4, 4, 1}));
|
||||
}
|
||||
|
||||
TEST(PadV2OpTest, SimpleDynamicValuedTest) {
|
||||
PadV2OpDynamicModel<float> m({TensorType_FLOAT32, {1, 2, 2, 1}}, {4, 2}, 5,
|
||||
{TensorType_FLOAT32});
|
||||
m.SetInput({1, 2, 3, 4});
|
||||
m.SetPaddings({0, 0, 1, 1, 1, 1, 0, 0});
|
||||
m.Invoke();
|
||||
EXPECT_THAT(m.GetOutput(), ElementsAreArray({5, 5, 5, 5, 5, 1, 2, 5, 5, 3, 4,
|
||||
5, 5, 5, 5, 5}));
|
||||
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({1, 4, 4, 1}));
|
||||
}
|
||||
|
||||
TEST(PadV2OpTest, AdvancedConstTest) {
|
||||
PadV2OpConstModel<float> m({TensorType_FLOAT32, {1, 2, 3, 1}}, {4, 2},
|
||||
{0, 0, 0, 2, 1, 3, 0, 0}, 0, {TensorType_FLOAT32});
|
||||
m.SetInput({1, 2, 3, 4, 5, 6});
|
||||
m.Invoke();
|
||||
EXPECT_THAT(m.GetOutput(),
|
||||
ElementsAreArray({0, 1, 2, 3, 0, 0, 0, 0, 4, 5, 6, 0, 0, 0,
|
||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}));
|
||||
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({1, 4, 7, 1}));
|
||||
}
|
||||
|
||||
TEST(PadV2OpTest, AdvancedDynamicTest) {
|
||||
PadV2OpDynamicModel<float> m({TensorType_FLOAT32, {1, 2, 3, 1}}, {4, 2}, 0,
|
||||
{TensorType_FLOAT32});
|
||||
m.SetInput({1, 2, 3, 4, 5, 6});
|
||||
m.SetPaddings({0, 0, 0, 2, 1, 3, 0, 0});
|
||||
m.Invoke();
|
||||
EXPECT_THAT(m.GetOutput(),
|
||||
ElementsAreArray({0, 1, 2, 3, 0, 0, 0, 0, 4, 5, 6, 0, 0, 0,
|
||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}));
|
||||
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({1, 4, 7, 1}));
|
||||
}
|
||||
|
||||
class QuantizedPadV2OpTest : public ::testing::Test {
|
||||
protected:
|
||||
std::vector<Matcher<float>> DequantizedArrayNear(
|
||||
const std::vector<float>& values, const float min, const float max) {
|
||||
const float quantization_tolerance = (max - min) / 255.0;
|
||||
return ArrayFloatNear(values, quantization_tolerance);
|
||||
}
|
||||
};
|
||||
|
||||
TEST_F(QuantizedPadV2OpTest, ZeroNotInQuantizationRange) {
|
||||
// The test_util and actual quantization code currently ensure that the range
|
||||
// must include zero, but if that ever changes, this test will catch it.
|
||||
EXPECT_DEATH(
|
||||
PadV2OpConstModel<float> m({TensorType_UINT8, {1, 2, 2, 1}, 1.0, 2.0},
|
||||
{4, 2}, {0, 0, 1, 1, 1, 1, 0, 0}, 0,
|
||||
{TensorType_UINT8, {}, 1.0, 2.0}),
|
||||
".*Check failed: f_min <= 0.*");
|
||||
}
|
||||
|
||||
TEST_F(QuantizedPadV2OpTest, SimpleConstTest) {
|
||||
// Padding is represented as four 2-D lists representing above padding and
|
||||
// below padding (i.e. {{0, 0}, {1, 1}, {1, 1}, {0, 0}}).
|
||||
PadV2OpConstModel<uint8_t> m({TensorType_UINT8, {1, 2, 2, 1}, -1.0, 1.0},
|
||||
{4, 2}, {0, 0, 1, 1, 1, 1, 0, 0},
|
||||
{TensorType_UINT8, {1}, -1.0, 1.0},
|
||||
{TensorType_UINT8, {}, -1.0, 1.0});
|
||||
m.SetQuantizedInput({-0.8, 0.2, 0.9, 0.7});
|
||||
m.SetQuantizedPadValue(0);
|
||||
m.Invoke();
|
||||
EXPECT_THAT(m.GetDequantizedOutput(),
|
||||
ElementsAreArray(DequantizedArrayNear(
|
||||
{0, 0, 0, 0, 0, -0.8, 0.2, 0, 0, 0.9, 0.7, 0, 0, 0, 0, 0},
|
||||
-1.0, 1.0)));
|
||||
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({1, 4, 4, 1}));
|
||||
}
|
||||
|
||||
TEST_F(QuantizedPadV2OpTest, SimpleDynamicTest) {
|
||||
PadV2OpDynamicModel<uint8_t> m({TensorType_UINT8, {1, 2, 2, 1}, -1.0, 1.0},
|
||||
{4, 2}, {TensorType_UINT8, {1}, -1.0, 1.0},
|
||||
{TensorType_UINT8, {}, -1.0, 1.0});
|
||||
m.SetQuantizedInput({-0.8, 0.2, 0.9, 0.7});
|
||||
m.SetQuantizedPadValue(0);
|
||||
m.SetPaddings({0, 0, 1, 1, 1, 1, 0, 0});
|
||||
m.Invoke();
|
||||
EXPECT_THAT(m.GetDequantizedOutput(),
|
||||
ElementsAreArray(DequantizedArrayNear(
|
||||
{0, 0, 0, 0, 0, -0.8, 0.2, 0, 0, 0.9, 0.7, 0, 0, 0, 0, 0},
|
||||
-1.0, 1.0)));
|
||||
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({1, 4, 4, 1}));
|
||||
}
|
||||
|
||||
TEST_F(QuantizedPadV2OpTest, AdvancedConstTest) {
|
||||
PadV2OpConstModel<uint8_t> m({TensorType_UINT8, {1, 2, 3, 1}, -1.0, 1.0},
|
||||
{4, 2}, {0, 0, 0, 2, 1, 3, 0, 0},
|
||||
{TensorType_UINT8, {1}, -1.0, 1.0},
|
||||
{TensorType_UINT8, {}, -1.0, 1.0});
|
||||
m.SetQuantizedInput({-0.8, 0.2, 0.9, 0.7, 0.1, -0.3});
|
||||
m.SetQuantizedPadValue(0);
|
||||
m.Invoke();
|
||||
EXPECT_THAT(m.GetDequantizedOutput(),
|
||||
ElementsAreArray(DequantizedArrayNear(
|
||||
{0, -0.8, 0.2, 0.9, 0, 0, 0, 0, 0.7, 0.1, -0.3, 0, 0, 0,
|
||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
|
||||
-1.0, 1.0)));
|
||||
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({1, 4, 7, 1}));
|
||||
}
|
||||
|
||||
TEST_F(QuantizedPadV2OpTest, AdvancedDynamicTest) {
|
||||
PadV2OpDynamicModel<uint8_t> m({TensorType_UINT8, {1, 2, 3, 1}, -1.0, 1.0},
|
||||
{4, 2}, {TensorType_UINT8, {1}, -1.0, 1.0},
|
||||
{TensorType_UINT8, {}, -1.0, 1.0});
|
||||
m.SetQuantizedInput({-0.8, 0.2, 0.9, 0.7, 0.1, -0.3});
|
||||
m.SetQuantizedPadValue(0);
|
||||
m.SetPaddings({0, 0, 0, 2, 1, 3, 0, 0});
|
||||
m.Invoke();
|
||||
EXPECT_THAT(m.GetDequantizedOutput(),
|
||||
ElementsAreArray(DequantizedArrayNear(
|
||||
{0, -0.8, 0.2, 0.9, 0, 0, 0, 0, 0.7, 0.1, -0.3, 0, 0, 0,
|
||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
|
||||
-1.0, 1.0)));
|
||||
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({1, 4, 7, 1}));
|
||||
}
|
||||
|
||||
TEST_F(QuantizedPadV2OpTest, SimpleConstValuedTest) {
|
||||
// Padding is represented as four 2-D lists representing above padding and
|
||||
// below padding (i.e. {{0, 0}, {1, 1}, {1, 1}, {0, 0}}).
|
||||
PadV2OpConstModel<uint8_t> m({TensorType_UINT8, {1, 2, 2, 1}, -1.0, 1.0},
|
||||
{4, 2}, {0, 0, 1, 1, 1, 1, 0, 0},
|
||||
{TensorType_UINT8, {1}, -1.0, 1.0},
|
||||
{TensorType_UINT8, {}, -1.0, 1.0});
|
||||
m.SetQuantizedInput({-0.8, 0.2, 0.9, 0.7});
|
||||
m.SetQuantizedPadValue(-0.5);
|
||||
m.Invoke();
|
||||
EXPECT_THAT(m.GetDequantizedOutput(),
|
||||
ElementsAreArray(DequantizedArrayNear(
|
||||
{-0.5, -0.5, -0.5, -0.5, -0.5, -0.8, 0.2, -0.5, -0.5, 0.9,
|
||||
0.7, -0.5, -0.5, -0.5, -0.5, -0.5},
|
||||
-1.0, 1.0)));
|
||||
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({1, 4, 4, 1}));
|
||||
}
|
||||
|
||||
TEST_F(QuantizedPadV2OpTest, SimpleDynamicValuedTest) {
|
||||
PadV2OpDynamicModel<uint8_t> m({TensorType_UINT8, {1, 2, 2, 1}, -1.0, 1.0},
|
||||
{4, 2}, {TensorType_UINT8, {1}, -1.0, 1.0},
|
||||
{TensorType_UINT8, {}, -1.0, 1.0});
|
||||
m.SetQuantizedInput({-0.8, 0.2, 0.9, 0.7});
|
||||
m.SetQuantizedPadValue(-0.5);
|
||||
m.SetPaddings({0, 0, 1, 1, 1, 1, 0, 0});
|
||||
m.Invoke();
|
||||
EXPECT_THAT(m.GetDequantizedOutput(),
|
||||
ElementsAreArray(DequantizedArrayNear(
|
||||
{-0.5, -0.5, -0.5, -0.5, -0.5, -0.8, 0.2, -0.5, -0.5, 0.9,
|
||||
0.7, -0.5, -0.5, -0.5, -0.5, -0.5},
|
||||
-1.0, 1.0)));
|
||||
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({1, 4, 4, 1}));
|
||||
}
|
||||
|
||||
TEST_F(QuantizedPadV2OpTest, AdvancedConstValuedTest) {
|
||||
PadV2OpConstModel<uint8_t> m({TensorType_UINT8, {1, 2, 3, 1}, -1.0, 1.0},
|
||||
{4, 2}, {0, 0, 0, 2, 1, 3, 0, 0},
|
||||
{TensorType_UINT8, {1}, -1.0, 1.0},
|
||||
{TensorType_UINT8, {}, -1.0, 1.0});
|
||||
m.SetQuantizedInput({-0.8, 0.2, 0.9, 0.7, 0.1, -0.3});
|
||||
m.SetQuantizedPadValue(-0.5);
|
||||
m.Invoke();
|
||||
EXPECT_THAT(m.GetDequantizedOutput(),
|
||||
ElementsAreArray(DequantizedArrayNear(
|
||||
{-0.5, -0.8, 0.2, 0.9, -0.5, -0.5, -0.5, -0.5, 0.7, 0.1,
|
||||
-0.3, -0.5, -0.5, -0.5, -0.5, -0.5, -0.5, -0.5, -0.5, -0.5,
|
||||
-0.5, -0.5, -0.5, -0.5, -0.5, -0.5, -0.5, -0.5},
|
||||
-1.0, 1.0)));
|
||||
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({1, 4, 7, 1}));
|
||||
}
|
||||
|
||||
TEST_F(QuantizedPadV2OpTest, AdvancedDynamicValuedTest) {
|
||||
PadV2OpDynamicModel<uint8_t> m({TensorType_UINT8, {1, 2, 3, 1}, -1.0, 1.0},
|
||||
{4, 2}, {TensorType_UINT8, {1}, -1.0, 1.0},
|
||||
{TensorType_UINT8, {}, -1.0, 1.0});
|
||||
m.SetQuantizedInput({-0.8, 0.2, 0.9, 0.7, 0.1, -0.3});
|
||||
m.SetQuantizedPadValue(-0.5);
|
||||
m.SetPaddings({0, 0, 0, 2, 1, 3, 0, 0});
|
||||
m.Invoke();
|
||||
EXPECT_THAT(m.GetDequantizedOutput(),
|
||||
ElementsAreArray(DequantizedArrayNear(
|
||||
{-0.5, -0.8, 0.2, 0.9, -0.5, -0.5, -0.5, -0.5, 0.7, 0.1,
|
||||
-0.3, -0.5, -0.5, -0.5, -0.5, -0.5, -0.5, -0.5, -0.5, -0.5,
|
||||
-0.5, -0.5, -0.5, -0.5, -0.5, -0.5, -0.5, -0.5},
|
||||
-1.0, 1.0)));
|
||||
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({1, 4, 7, 1}));
|
||||
}
|
||||
|
||||
} // namespace
|
||||
} // namespace tflite
|
||||
|
||||
|
@ -60,6 +60,7 @@ TfLiteRegistration* Register_LSTM();
|
||||
TfLiteRegistration* Register_BIDIRECTIONAL_SEQUENCE_LSTM();
|
||||
TfLiteRegistration* Register_UNIDIRECTIONAL_SEQUENCE_LSTM();
|
||||
TfLiteRegistration* Register_PAD();
|
||||
TfLiteRegistration* Register_PADV2();
|
||||
TfLiteRegistration* Register_RESHAPE();
|
||||
TfLiteRegistration* Register_RESIZE_BILINEAR();
|
||||
TfLiteRegistration* Register_SKIP_GRAM();
|
||||
@ -121,6 +122,7 @@ BuiltinOpResolver::BuiltinOpResolver() {
|
||||
AddBuiltin(BuiltinOperator_UNIDIRECTIONAL_SEQUENCE_LSTM,
|
||||
Register_UNIDIRECTIONAL_SEQUENCE_LSTM());
|
||||
AddBuiltin(BuiltinOperator_PAD, Register_PAD());
|
||||
AddBuiltin(BuiltinOperator_PADV2, Register_PADV2());
|
||||
AddBuiltin(BuiltinOperator_RESHAPE, Register_RESHAPE());
|
||||
AddBuiltin(BuiltinOperator_RESIZE_BILINEAR, Register_RESIZE_BILINEAR());
|
||||
AddBuiltin(BuiltinOperator_SKIP_GRAM, Register_SKIP_GRAM());
|
||||
|
@ -22,23 +22,6 @@ namespace tflite {
|
||||
using ::testing::FloatNear;
|
||||
using ::testing::Matcher;
|
||||
|
||||
namespace {
|
||||
template <typename T>
|
||||
std::pair<float, int32_t> QuantizationParams(float f_min, float f_max) {
|
||||
// These are required by many quantized operations.
|
||||
CHECK_LE(f_min, 0);
|
||||
CHECK_GE(f_max, 0);
|
||||
T q_min = std::numeric_limits<T>::min();
|
||||
T q_max = std::numeric_limits<T>::max();
|
||||
float range = q_max - q_min;
|
||||
float scale = (f_max - f_min) / range;
|
||||
int32_t zero_point = std::min(
|
||||
q_max,
|
||||
std::max(q_min, static_cast<T>(std::round(q_min - f_min / scale))));
|
||||
return {scale, zero_point};
|
||||
}
|
||||
} // namespace
|
||||
|
||||
std::vector<Matcher<float>> ArrayFloatNear(const std::vector<float>& values,
|
||||
float max_abs_error) {
|
||||
std::vector<Matcher<float>> matchers;
|
||||
@ -49,69 +32,8 @@ std::vector<Matcher<float>> ArrayFloatNear(const std::vector<float>& values,
|
||||
return matchers;
|
||||
}
|
||||
|
||||
int SingleOpModel::AddTensor(TensorData t, std::initializer_list<int> data) {
|
||||
int id = tensors_.size();
|
||||
|
||||
// This is slightly different depending on whether we are adding a
|
||||
// quantized or a regular tensor.
|
||||
bool is_quantized = (t.min != 0 || t.max != 0 || t.scale != 0);
|
||||
|
||||
flatbuffers::Offset<QuantizationParameters> q_params = 0;
|
||||
|
||||
if (is_quantized) {
|
||||
if (t.min != 0 || t.max != 0) {
|
||||
if (t.type == TensorType_UINT8) {
|
||||
std::tie(t.scale, t.zero_point) =
|
||||
QuantizationParams<uint8_t>(t.min, t.max);
|
||||
} else if (t.type == TensorType_INT32) {
|
||||
std::tie(t.scale, t.zero_point) =
|
||||
QuantizationParams<int32_t>(t.min, t.max);
|
||||
} else {
|
||||
LOG(FATAL) << "No support for the requested quantized type";
|
||||
}
|
||||
t.min = 0;
|
||||
t.max = 0;
|
||||
}
|
||||
|
||||
q_params = CreateQuantizationParameters(
|
||||
builder_, /*min=*/0, /*max=*/0, builder_.CreateVector<float>({t.scale}),
|
||||
builder_.CreateVector<int64_t>({t.zero_point}));
|
||||
}
|
||||
|
||||
int buffer_id = 0;
|
||||
if (data.size()) {
|
||||
// Initialize buffers list with empty buffer to allow for non-const tensors.
|
||||
if (buffers_.empty()) {
|
||||
buffers_.push_back(CreateBuffer(builder_, builder_.CreateVector({})));
|
||||
}
|
||||
|
||||
// Add data as a Buffer to buffers list.
|
||||
buffer_id = buffers_.size();
|
||||
auto data_buffer =
|
||||
builder_.CreateVector(reinterpret_cast<const uint8_t*>(data.begin()),
|
||||
sizeof(int) * data.size());
|
||||
buffers_.push_back(CreateBuffer(builder_, data_buffer));
|
||||
}
|
||||
|
||||
tensors_.push_back(CreateTensor(builder_, builder_.CreateVector<int>(t.shape),
|
||||
t.type, /*buffer=*/buffer_id,
|
||||
/*name=*/0, q_params));
|
||||
|
||||
tensor_data_[id] = t;
|
||||
|
||||
return id;
|
||||
}
|
||||
|
||||
int SingleOpModel::AddInput(const TensorData& t) {
|
||||
int id = AddTensor(t, {});
|
||||
inputs_.push_back(id);
|
||||
return id;
|
||||
}
|
||||
|
||||
int SingleOpModel::AddConstInput(TensorType type,
|
||||
std::initializer_list<int> data,
|
||||
std::initializer_list<int> shape) {
|
||||
int id = AddTensor(TensorData{type, shape}, data);
|
||||
int id = AddTensor<float>(t, {});
|
||||
inputs_.push_back(id);
|
||||
return id;
|
||||
}
|
||||
@ -123,7 +45,7 @@ int SingleOpModel::AddNullInput() {
|
||||
}
|
||||
|
||||
int SingleOpModel::AddOutput(const TensorData& t) {
|
||||
int id = AddTensor(t, {});
|
||||
int id = AddTensor<float>(t, {});
|
||||
outputs_.push_back(id);
|
||||
return id;
|
||||
}
|
||||
|
@ -116,9 +116,14 @@ class SingleOpModel {
|
||||
int AddInput(TensorType type) { return AddInput(TensorData{type}); }
|
||||
int AddInput(const TensorData& t);
|
||||
|
||||
// Add a Tensor containing const data and return the tensor id.
|
||||
int AddConstInput(TensorType type, std::initializer_list<int> data,
|
||||
std::initializer_list<int> shape);
|
||||
// Templated version of AddConstInput().
|
||||
template <typename T>
|
||||
int AddConstInput(TensorType type, std::initializer_list<T> data,
|
||||
std::initializer_list<int> shape) {
|
||||
int id = AddTensor(TensorData{type, shape}, data);
|
||||
inputs_.push_back(id);
|
||||
return id;
|
||||
}
|
||||
|
||||
// Add a null input tensor (optional input) and return kOptionalTensor.
|
||||
int AddNullInput();
|
||||
@ -224,7 +229,79 @@ class SingleOpModel {
|
||||
std::unique_ptr<OpResolver> resolver_;
|
||||
|
||||
private:
|
||||
int AddTensor(TensorData t, std::initializer_list<int> data);
|
||||
// TODO(gavinbelson): sync this method with
|
||||
// //tensorflow/contrib/lite/kernels/internal/quantization_util.h?l=31
|
||||
template <typename T>
|
||||
std::pair<float, int32_t> QuantizationParams(float f_min, float f_max) {
|
||||
// These are required by many quantized operations.
|
||||
CHECK_LE(f_min, 0);
|
||||
CHECK_GE(f_max, 0);
|
||||
T q_min = std::numeric_limits<T>::min();
|
||||
T q_max = std::numeric_limits<T>::max();
|
||||
float range = q_max - q_min;
|
||||
float scale = (f_max - f_min) / range;
|
||||
int32_t zero_point = std::min(
|
||||
q_max,
|
||||
std::max(q_min, static_cast<T>(std::round(q_min - f_min / scale))));
|
||||
return {scale, zero_point};
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
int AddTensor(TensorData t, std::initializer_list<T> data) {
|
||||
int id = tensors_.size();
|
||||
|
||||
// This is slightly different depending on whether we are adding a
|
||||
// quantized or a regular tensor.
|
||||
bool is_quantized = (t.min != 0 || t.max != 0 || t.scale != 0);
|
||||
|
||||
flatbuffers::Offset<QuantizationParameters> q_params = 0;
|
||||
|
||||
if (is_quantized) {
|
||||
if (t.min != 0 || t.max != 0) {
|
||||
if (t.type == TensorType_UINT8) {
|
||||
std::tie(t.scale, t.zero_point) =
|
||||
QuantizationParams<uint8_t>(t.min, t.max);
|
||||
} else if (t.type == TensorType_INT32) {
|
||||
std::tie(t.scale, t.zero_point) =
|
||||
QuantizationParams<int32_t>(t.min, t.max);
|
||||
} else {
|
||||
LOG(FATAL) << "No support for the requested quantized type";
|
||||
}
|
||||
t.min = 0;
|
||||
t.max = 0;
|
||||
}
|
||||
|
||||
q_params = CreateQuantizationParameters(
|
||||
builder_, /*min=*/0, /*max=*/0,
|
||||
builder_.CreateVector<float>({t.scale}),
|
||||
builder_.CreateVector<int64_t>({t.zero_point}));
|
||||
}
|
||||
|
||||
int buffer_id = 0;
|
||||
if (data.size()) {
|
||||
// Initialize buffers list with empty buffer to allow for non-const
|
||||
// tensors.
|
||||
if (buffers_.empty()) {
|
||||
buffers_.push_back(CreateBuffer(builder_, builder_.CreateVector({})));
|
||||
}
|
||||
|
||||
// Add data as a Buffer to buffers list.
|
||||
buffer_id = buffers_.size();
|
||||
auto data_buffer =
|
||||
builder_.CreateVector(reinterpret_cast<const uint8_t*>(data.begin()),
|
||||
sizeof(T) * data.size());
|
||||
buffers_.push_back(CreateBuffer(builder_, data_buffer));
|
||||
}
|
||||
|
||||
tensors_.push_back(CreateTensor(builder_,
|
||||
builder_.CreateVector<int>(t.shape), t.type,
|
||||
/*buffer=*/buffer_id,
|
||||
/*name=*/0, q_params));
|
||||
|
||||
tensor_data_[id] = t;
|
||||
|
||||
return id;
|
||||
}
|
||||
|
||||
std::map<int, TensorData> tensor_data_;
|
||||
std::vector<int32_t> inputs_;
|
||||
|
@ -569,6 +569,9 @@ TfLiteStatus ParseOpData(const Operator* op, BuiltinOperator op_type,
|
||||
case BuiltinOperator_PAD: {
|
||||
break;
|
||||
}
|
||||
case BuiltinOperator_PADV2: {
|
||||
break;
|
||||
}
|
||||
case BuiltinOperator_RESHAPE: {
|
||||
auto* params = MallocPOD<TfLiteReshapeParams>();
|
||||
if (auto* schema_params = op->builtin_options_as_ReshapeOptions()) {
|
||||
|
@ -347,6 +347,7 @@ void AddOpsAndParams(tflite::Interpreter* interpreter,
|
||||
case tflite::BuiltinOperator_L2_NORMALIZATION:
|
||||
case tflite::BuiltinOperator_LOCAL_RESPONSE_NORMALIZATION:
|
||||
case tflite::BuiltinOperator_PAD:
|
||||
case tflite::BuiltinOperator_PADV2:
|
||||
case tflite::BuiltinOperator_RESIZE_BILINEAR:
|
||||
case tflite::BuiltinOperator_CALL:
|
||||
case tflite::BuiltinOperator_SKIP_GRAM:
|
||||
|
@ -137,6 +137,7 @@ enum BuiltinOperator : byte {
|
||||
MINIMUM = 57,
|
||||
LESS = 58,
|
||||
NEG = 59,
|
||||
PADV2 = 60,
|
||||
}
|
||||
|
||||
// Options for the builtin operators.
|
||||
@ -163,6 +164,7 @@ union BuiltinOptions {
|
||||
EmbeddingLookupSparseOptions,
|
||||
MulOptions,
|
||||
PadOptions,
|
||||
PadV2Options,
|
||||
GatherOptions,
|
||||
BatchToSpaceNDOptions,
|
||||
SpaceToBatchNDOptions,
|
||||
@ -316,6 +318,9 @@ table CallOptions {
|
||||
table PadOptions {
|
||||
}
|
||||
|
||||
table PadV2Options {
|
||||
}
|
||||
|
||||
table ReshapeOptions {
|
||||
new_shape:[int];
|
||||
}
|
||||
|
@ -88,6 +88,9 @@ struct CallOptionsT;
|
||||
struct PadOptions;
|
||||
struct PadOptionsT;
|
||||
|
||||
struct PadV2Options;
|
||||
struct PadV2OptionsT;
|
||||
|
||||
struct ReshapeOptions;
|
||||
struct ReshapeOptionsT;
|
||||
|
||||
@ -276,11 +279,12 @@ enum BuiltinOperator {
|
||||
BuiltinOperator_MINIMUM = 57,
|
||||
BuiltinOperator_LESS = 58,
|
||||
BuiltinOperator_NEG = 59,
|
||||
BuiltinOperator_PADV2 = 60,
|
||||
BuiltinOperator_MIN = BuiltinOperator_ADD,
|
||||
BuiltinOperator_MAX = BuiltinOperator_NEG
|
||||
BuiltinOperator_MAX = BuiltinOperator_PADV2
|
||||
};
|
||||
|
||||
inline BuiltinOperator (&EnumValuesBuiltinOperator())[59] {
|
||||
inline BuiltinOperator (&EnumValuesBuiltinOperator())[60] {
|
||||
static BuiltinOperator values[] = {
|
||||
BuiltinOperator_ADD,
|
||||
BuiltinOperator_AVERAGE_POOL_2D,
|
||||
@ -340,7 +344,8 @@ inline BuiltinOperator (&EnumValuesBuiltinOperator())[59] {
|
||||
BuiltinOperator_ARG_MAX,
|
||||
BuiltinOperator_MINIMUM,
|
||||
BuiltinOperator_LESS,
|
||||
BuiltinOperator_NEG
|
||||
BuiltinOperator_NEG,
|
||||
BuiltinOperator_PADV2
|
||||
};
|
||||
return values;
|
||||
}
|
||||
@ -407,6 +412,7 @@ inline const char **EnumNamesBuiltinOperator() {
|
||||
"MINIMUM",
|
||||
"LESS",
|
||||
"NEG",
|
||||
"PADV2",
|
||||
nullptr
|
||||
};
|
||||
return names;
|
||||
@ -441,31 +447,32 @@ enum BuiltinOptions {
|
||||
BuiltinOptions_EmbeddingLookupSparseOptions = 20,
|
||||
BuiltinOptions_MulOptions = 21,
|
||||
BuiltinOptions_PadOptions = 22,
|
||||
BuiltinOptions_GatherOptions = 23,
|
||||
BuiltinOptions_BatchToSpaceNDOptions = 24,
|
||||
BuiltinOptions_SpaceToBatchNDOptions = 25,
|
||||
BuiltinOptions_TransposeOptions = 26,
|
||||
BuiltinOptions_MeanOptions = 27,
|
||||
BuiltinOptions_SubOptions = 28,
|
||||
BuiltinOptions_DivOptions = 29,
|
||||
BuiltinOptions_SqueezeOptions = 30,
|
||||
BuiltinOptions_SequenceRNNOptions = 31,
|
||||
BuiltinOptions_StridedSliceOptions = 32,
|
||||
BuiltinOptions_ExpOptions = 33,
|
||||
BuiltinOptions_TopKV2Options = 34,
|
||||
BuiltinOptions_SplitOptions = 35,
|
||||
BuiltinOptions_LogSoftmaxOptions = 36,
|
||||
BuiltinOptions_CastOptions = 37,
|
||||
BuiltinOptions_DequantizeOptions = 38,
|
||||
BuiltinOptions_MaximumMinimumOptions = 39,
|
||||
BuiltinOptions_ArgMaxOptions = 40,
|
||||
BuiltinOptions_LessOptions = 41,
|
||||
BuiltinOptions_NegOptions = 42,
|
||||
BuiltinOptions_PadV2Options = 23,
|
||||
BuiltinOptions_GatherOptions = 24,
|
||||
BuiltinOptions_BatchToSpaceNDOptions = 25,
|
||||
BuiltinOptions_SpaceToBatchNDOptions = 26,
|
||||
BuiltinOptions_TransposeOptions = 27,
|
||||
BuiltinOptions_MeanOptions = 28,
|
||||
BuiltinOptions_SubOptions = 29,
|
||||
BuiltinOptions_DivOptions = 30,
|
||||
BuiltinOptions_SqueezeOptions = 31,
|
||||
BuiltinOptions_SequenceRNNOptions = 32,
|
||||
BuiltinOptions_StridedSliceOptions = 33,
|
||||
BuiltinOptions_ExpOptions = 34,
|
||||
BuiltinOptions_TopKV2Options = 35,
|
||||
BuiltinOptions_SplitOptions = 36,
|
||||
BuiltinOptions_LogSoftmaxOptions = 37,
|
||||
BuiltinOptions_CastOptions = 38,
|
||||
BuiltinOptions_DequantizeOptions = 39,
|
||||
BuiltinOptions_MaximumMinimumOptions = 40,
|
||||
BuiltinOptions_ArgMaxOptions = 41,
|
||||
BuiltinOptions_LessOptions = 42,
|
||||
BuiltinOptions_NegOptions = 43,
|
||||
BuiltinOptions_MIN = BuiltinOptions_NONE,
|
||||
BuiltinOptions_MAX = BuiltinOptions_NegOptions
|
||||
};
|
||||
|
||||
inline BuiltinOptions (&EnumValuesBuiltinOptions())[43] {
|
||||
inline BuiltinOptions (&EnumValuesBuiltinOptions())[44] {
|
||||
static BuiltinOptions values[] = {
|
||||
BuiltinOptions_NONE,
|
||||
BuiltinOptions_Conv2DOptions,
|
||||
@ -490,6 +497,7 @@ inline BuiltinOptions (&EnumValuesBuiltinOptions())[43] {
|
||||
BuiltinOptions_EmbeddingLookupSparseOptions,
|
||||
BuiltinOptions_MulOptions,
|
||||
BuiltinOptions_PadOptions,
|
||||
BuiltinOptions_PadV2Options,
|
||||
BuiltinOptions_GatherOptions,
|
||||
BuiltinOptions_BatchToSpaceNDOptions,
|
||||
BuiltinOptions_SpaceToBatchNDOptions,
|
||||
@ -539,6 +547,7 @@ inline const char **EnumNamesBuiltinOptions() {
|
||||
"EmbeddingLookupSparseOptions",
|
||||
"MulOptions",
|
||||
"PadOptions",
|
||||
"PadV2Options",
|
||||
"GatherOptions",
|
||||
"BatchToSpaceNDOptions",
|
||||
"SpaceToBatchNDOptions",
|
||||
@ -661,6 +670,10 @@ template<> struct BuiltinOptionsTraits<PadOptions> {
|
||||
static const BuiltinOptions enum_value = BuiltinOptions_PadOptions;
|
||||
};
|
||||
|
||||
template<> struct BuiltinOptionsTraits<PadV2Options> {
|
||||
static const BuiltinOptions enum_value = BuiltinOptions_PadV2Options;
|
||||
};
|
||||
|
||||
template<> struct BuiltinOptionsTraits<GatherOptions> {
|
||||
static const BuiltinOptions enum_value = BuiltinOptions_GatherOptions;
|
||||
};
|
||||
@ -948,6 +961,14 @@ struct BuiltinOptionsUnion {
|
||||
return type == BuiltinOptions_PadOptions ?
|
||||
reinterpret_cast<const PadOptionsT *>(value) : nullptr;
|
||||
}
|
||||
PadV2OptionsT *AsPadV2Options() {
|
||||
return type == BuiltinOptions_PadV2Options ?
|
||||
reinterpret_cast<PadV2OptionsT *>(value) : nullptr;
|
||||
}
|
||||
const PadV2OptionsT *AsPadV2Options() const {
|
||||
return type == BuiltinOptions_PadV2Options ?
|
||||
reinterpret_cast<const PadV2OptionsT *>(value) : nullptr;
|
||||
}
|
||||
GatherOptionsT *AsGatherOptions() {
|
||||
return type == BuiltinOptions_GatherOptions ?
|
||||
reinterpret_cast<GatherOptionsT *>(value) : nullptr;
|
||||
@ -2873,6 +2894,46 @@ inline flatbuffers::Offset<PadOptions> CreatePadOptions(
|
||||
|
||||
flatbuffers::Offset<PadOptions> CreatePadOptions(flatbuffers::FlatBufferBuilder &_fbb, const PadOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
|
||||
|
||||
struct PadV2OptionsT : public flatbuffers::NativeTable {
|
||||
typedef PadV2Options TableType;
|
||||
PadV2OptionsT() {
|
||||
}
|
||||
};
|
||||
|
||||
struct PadV2Options FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
|
||||
typedef PadV2OptionsT NativeTableType;
|
||||
bool Verify(flatbuffers::Verifier &verifier) const {
|
||||
return VerifyTableStart(verifier) &&
|
||||
verifier.EndTable();
|
||||
}
|
||||
PadV2OptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
|
||||
void UnPackTo(PadV2OptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
|
||||
static flatbuffers::Offset<PadV2Options> Pack(flatbuffers::FlatBufferBuilder &_fbb, const PadV2OptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
|
||||
};
|
||||
|
||||
struct PadV2OptionsBuilder {
|
||||
flatbuffers::FlatBufferBuilder &fbb_;
|
||||
flatbuffers::uoffset_t start_;
|
||||
explicit PadV2OptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
|
||||
: fbb_(_fbb) {
|
||||
start_ = fbb_.StartTable();
|
||||
}
|
||||
PadV2OptionsBuilder &operator=(const PadV2OptionsBuilder &);
|
||||
flatbuffers::Offset<PadV2Options> Finish() {
|
||||
const auto end = fbb_.EndTable(start_);
|
||||
auto o = flatbuffers::Offset<PadV2Options>(end);
|
||||
return o;
|
||||
}
|
||||
};
|
||||
|
||||
inline flatbuffers::Offset<PadV2Options> CreatePadV2Options(
|
||||
flatbuffers::FlatBufferBuilder &_fbb) {
|
||||
PadV2OptionsBuilder builder_(_fbb);
|
||||
return builder_.Finish();
|
||||
}
|
||||
|
||||
flatbuffers::Offset<PadV2Options> CreatePadV2Options(flatbuffers::FlatBufferBuilder &_fbb, const PadV2OptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
|
||||
|
||||
struct ReshapeOptionsT : public flatbuffers::NativeTable {
|
||||
typedef ReshapeOptions TableType;
|
||||
std::vector<int32_t> new_shape;
|
||||
@ -4258,6 +4319,9 @@ struct Operator FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
|
||||
const PadOptions *builtin_options_as_PadOptions() const {
|
||||
return builtin_options_type() == BuiltinOptions_PadOptions ? static_cast<const PadOptions *>(builtin_options()) : nullptr;
|
||||
}
|
||||
const PadV2Options *builtin_options_as_PadV2Options() const {
|
||||
return builtin_options_type() == BuiltinOptions_PadV2Options ? static_cast<const PadV2Options *>(builtin_options()) : nullptr;
|
||||
}
|
||||
const GatherOptions *builtin_options_as_GatherOptions() const {
|
||||
return builtin_options_type() == BuiltinOptions_GatherOptions ? static_cast<const GatherOptions *>(builtin_options()) : nullptr;
|
||||
}
|
||||
@ -4432,6 +4496,10 @@ template<> inline const PadOptions *Operator::builtin_options_as<PadOptions>() c
|
||||
return builtin_options_as_PadOptions();
|
||||
}
|
||||
|
||||
template<> inline const PadV2Options *Operator::builtin_options_as<PadV2Options>() const {
|
||||
return builtin_options_as_PadV2Options();
|
||||
}
|
||||
|
||||
template<> inline const GatherOptions *Operator::builtin_options_as<GatherOptions>() const {
|
||||
return builtin_options_as_GatherOptions();
|
||||
}
|
||||
@ -5572,6 +5640,29 @@ inline flatbuffers::Offset<PadOptions> CreatePadOptions(flatbuffers::FlatBufferB
|
||||
_fbb);
|
||||
}
|
||||
|
||||
inline PadV2OptionsT *PadV2Options::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
|
||||
auto _o = new PadV2OptionsT();
|
||||
UnPackTo(_o, _resolver);
|
||||
return _o;
|
||||
}
|
||||
|
||||
inline void PadV2Options::UnPackTo(PadV2OptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const {
|
||||
(void)_o;
|
||||
(void)_resolver;
|
||||
}
|
||||
|
||||
inline flatbuffers::Offset<PadV2Options> PadV2Options::Pack(flatbuffers::FlatBufferBuilder &_fbb, const PadV2OptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
|
||||
return CreatePadV2Options(_fbb, _o, _rehasher);
|
||||
}
|
||||
|
||||
inline flatbuffers::Offset<PadV2Options> CreatePadV2Options(flatbuffers::FlatBufferBuilder &_fbb, const PadV2OptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
|
||||
(void)_rehasher;
|
||||
(void)_o;
|
||||
struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const PadV2OptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
|
||||
return tflite::CreatePadV2Options(
|
||||
_fbb);
|
||||
}
|
||||
|
||||
inline ReshapeOptionsT *ReshapeOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
|
||||
auto _o = new ReshapeOptionsT();
|
||||
UnPackTo(_o, _resolver);
|
||||
@ -6432,6 +6523,10 @@ inline bool VerifyBuiltinOptions(flatbuffers::Verifier &verifier, const void *ob
|
||||
auto ptr = reinterpret_cast<const PadOptions *>(obj);
|
||||
return verifier.VerifyTable(ptr);
|
||||
}
|
||||
case BuiltinOptions_PadV2Options: {
|
||||
auto ptr = reinterpret_cast<const PadV2Options *>(obj);
|
||||
return verifier.VerifyTable(ptr);
|
||||
}
|
||||
case BuiltinOptions_GatherOptions: {
|
||||
auto ptr = reinterpret_cast<const GatherOptions *>(obj);
|
||||
return verifier.VerifyTable(ptr);
|
||||
@ -6618,6 +6713,10 @@ inline void *BuiltinOptionsUnion::UnPack(const void *obj, BuiltinOptions type, c
|
||||
auto ptr = reinterpret_cast<const PadOptions *>(obj);
|
||||
return ptr->UnPack(resolver);
|
||||
}
|
||||
case BuiltinOptions_PadV2Options: {
|
||||
auto ptr = reinterpret_cast<const PadV2Options *>(obj);
|
||||
return ptr->UnPack(resolver);
|
||||
}
|
||||
case BuiltinOptions_GatherOptions: {
|
||||
auto ptr = reinterpret_cast<const GatherOptions *>(obj);
|
||||
return ptr->UnPack(resolver);
|
||||
@ -6792,6 +6891,10 @@ inline flatbuffers::Offset<void> BuiltinOptionsUnion::Pack(flatbuffers::FlatBuff
|
||||
auto ptr = reinterpret_cast<const PadOptionsT *>(value);
|
||||
return CreatePadOptions(_fbb, ptr, _rehasher).Union();
|
||||
}
|
||||
case BuiltinOptions_PadV2Options: {
|
||||
auto ptr = reinterpret_cast<const PadV2OptionsT *>(value);
|
||||
return CreatePadV2Options(_fbb, ptr, _rehasher).Union();
|
||||
}
|
||||
case BuiltinOptions_GatherOptions: {
|
||||
auto ptr = reinterpret_cast<const GatherOptionsT *>(value);
|
||||
return CreateGatherOptions(_fbb, ptr, _rehasher).Union();
|
||||
@ -6966,6 +7069,10 @@ inline BuiltinOptionsUnion::BuiltinOptionsUnion(const BuiltinOptionsUnion &u) FL
|
||||
value = new PadOptionsT(*reinterpret_cast<PadOptionsT *>(u.value));
|
||||
break;
|
||||
}
|
||||
case BuiltinOptions_PadV2Options: {
|
||||
value = new PadV2OptionsT(*reinterpret_cast<PadV2OptionsT *>(u.value));
|
||||
break;
|
||||
}
|
||||
case BuiltinOptions_GatherOptions: {
|
||||
value = new GatherOptionsT(*reinterpret_cast<GatherOptionsT *>(u.value));
|
||||
break;
|
||||
@ -7163,6 +7270,11 @@ inline void BuiltinOptionsUnion::Reset() {
|
||||
delete ptr;
|
||||
break;
|
||||
}
|
||||
case BuiltinOptions_PadV2Options: {
|
||||
auto ptr = reinterpret_cast<PadV2OptionsT *>(value);
|
||||
delete ptr;
|
||||
break;
|
||||
}
|
||||
case BuiltinOptions_GatherOptions: {
|
||||
auto ptr = reinterpret_cast<GatherOptionsT *>(value);
|
||||
delete ptr;
|
||||
|
@ -45,6 +45,7 @@ gen_zipped_test_files(
|
||||
"mul.zip",
|
||||
"neg.zip",
|
||||
"pad.zip",
|
||||
"padv2.zip",
|
||||
"relu.zip",
|
||||
"relu1.zip",
|
||||
"relu6.zip",
|
||||
|
@ -1391,6 +1391,60 @@ def make_pad_tests(zip_path):
|
||||
make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs)
|
||||
|
||||
|
||||
def make_padv2_tests(zip_path):
|
||||
"""Make a set of tests to do padv2."""
|
||||
|
||||
# TODO(nupurgarg): Add test for tf.uint8.
|
||||
test_parameters = [
|
||||
{
|
||||
"dtype": [tf.int32, tf.int64, tf.float32],
|
||||
"input_shape": [[1, 1, 2, 1], [2, 1, 1, 1]],
|
||||
"paddings": [[[0, 0], [0, 1], [2, 3], [0, 0]], [[0, 1], [0, 0],
|
||||
[0, 0], [2, 3]]],
|
||||
"constant_paddings": [True, False],
|
||||
"constant_values": [0, 2],
|
||||
},
|
||||
# Non-4D use case.
|
||||
{
|
||||
"dtype": [tf.int32, tf.int64, tf.float32],
|
||||
"input_shape": [[1, 2], [0, 1, 2]],
|
||||
"paddings": [[[0, 1], [2, 3]]],
|
||||
"constant_paddings": [True, False],
|
||||
"constant_values": [0, 2],
|
||||
},
|
||||
]
|
||||
|
||||
def build_graph(parameters):
|
||||
"""Build a pad graph given `parameters`."""
|
||||
input_tensor = tf.placeholder(
|
||||
dtype=parameters["dtype"],
|
||||
name="input",
|
||||
shape=parameters["input_shape"])
|
||||
|
||||
# Get paddings as either a placeholder or constants.
|
||||
if parameters["constant_paddings"]:
|
||||
paddings = parameters["paddings"]
|
||||
input_tensors = [input_tensor]
|
||||
else:
|
||||
shape = [len(parameters["paddings"]), 2]
|
||||
paddings = tf.placeholder(dtype=tf.int32, name="padding", shape=shape)
|
||||
input_tensors = [input_tensor, paddings]
|
||||
|
||||
out = tf.pad(input_tensor, paddings=paddings,
|
||||
constant_values=parameters["constant_values"])
|
||||
return input_tensors, [out]
|
||||
|
||||
def build_inputs(parameters, sess, inputs, outputs):
|
||||
values = [
|
||||
create_tensor_data(parameters["dtype"], parameters["input_shape"])
|
||||
]
|
||||
if not parameters["constant_paddings"]:
|
||||
values.append(np.array(parameters["paddings"]))
|
||||
return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))
|
||||
|
||||
make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs)
|
||||
|
||||
|
||||
def make_reshape_tests(zip_path):
|
||||
"""Make a set of tests to do reshape."""
|
||||
|
||||
|
@ -54,9 +54,11 @@ std::map<string, string> kBrokenTests = {
|
||||
{R"(^\/div.*int32)", "68808744"},
|
||||
{R"(^\/sub.*int32)", "68808744"},
|
||||
|
||||
// Pad only supports 4D tensors.
|
||||
// Pad and PadV2 only supports 4D tensors.
|
||||
{R"(^\/pad.*,input_shape=\[.,.\],paddings=\[\[.,.\],\[.,.\]\])",
|
||||
"70527055"},
|
||||
{R"(^\/padv2.*,input_shape=\[.,.\],paddings=\[\[.,.\],\[.,.\]\])",
|
||||
"70527055"},
|
||||
|
||||
// L2Norm only supports tensors with 4D or fewer.
|
||||
{R"(^\/l2normdim=.*,epsilon=.*,input_shape=\[.,.,.,.,.*\])", "67963684"},
|
||||
@ -268,6 +270,7 @@ INSTANTIATE_TESTS(minimum)
|
||||
INSTANTIATE_TESTS(mul)
|
||||
INSTANTIATE_TESTS(neg)
|
||||
INSTANTIATE_TESTS(pad)
|
||||
INSTANTIATE_TESTS(padv2)
|
||||
// INSTANTIATE_TESTS(prelu)
|
||||
INSTANTIATE_TESTS(relu)
|
||||
INSTANTIATE_TESTS(relu1)
|
||||
|
@ -280,6 +280,7 @@ cc_library(
|
||||
"graph_transformations/resolve_mean_attributes.cc",
|
||||
"graph_transformations/resolve_multiply_by_zero.cc",
|
||||
"graph_transformations/resolve_pad_attributes.cc",
|
||||
"graph_transformations/resolve_padv2_attributes.cc",
|
||||
"graph_transformations/resolve_reorder_axes.cc",
|
||||
"graph_transformations/resolve_reshape_attributes.cc",
|
||||
"graph_transformations/resolve_slice_attributes.cc",
|
||||
|
@ -1492,6 +1492,37 @@ void ConvertPadOperator(const Model& model, const PadOperator& src_op,
|
||||
shape->add_dim()->set_size(2);
|
||||
}
|
||||
|
||||
void ConvertPadV2Operator(const Model& model, const PadV2Operator& src_op,
|
||||
GraphDef* tensorflow_graph) {
|
||||
auto* new_op = tensorflow_graph->add_node();
|
||||
new_op->set_op("PadV2");
|
||||
new_op->set_name(src_op.outputs[0]);
|
||||
CHECK_EQ(src_op.inputs.size(), 2);
|
||||
*new_op->add_input() = src_op.inputs[0];
|
||||
*new_op->add_input() = src_op.inputs[1];
|
||||
*new_op->add_input() = src_op.inputs[2];
|
||||
|
||||
const auto params_type = GetTensorFlowDataType(model, src_op.inputs[0]);
|
||||
(*new_op->mutable_attr())["T"].set_type(params_type);
|
||||
|
||||
// Create the params tensor.
|
||||
auto* params_op = tensorflow_graph->add_node();
|
||||
params_op->set_op("Const");
|
||||
params_op->set_name(src_op.inputs[1]);
|
||||
(*params_op->mutable_attr())["dtype"].set_type(DT_INT32);
|
||||
auto* tensor = (*params_op->mutable_attr())["value"].mutable_tensor();
|
||||
tensor->set_dtype(DT_INT32);
|
||||
|
||||
CHECK_EQ(src_op.left_padding.size(), src_op.right_padding.size());
|
||||
for (int i = 0; i < src_op.left_padding.size(); ++i) {
|
||||
tensor->add_int_val(src_op.left_padding[i]);
|
||||
tensor->add_int_val(src_op.right_padding[i]);
|
||||
}
|
||||
auto* shape = tensor->mutable_tensor_shape();
|
||||
shape->add_dim()->set_size(src_op.left_padding.size());
|
||||
shape->add_dim()->set_size(2);
|
||||
}
|
||||
|
||||
void CreateSliceInput(const string& input_name, const std::vector<int>& values,
|
||||
GraphDef* tensorflow_graph) {
|
||||
auto* params_op = tensorflow_graph->add_node();
|
||||
@ -1795,6 +1826,9 @@ void ConvertOperator(const Model& model, const Operator& src_op,
|
||||
} else if (src_op.type == OperatorType::kPad) {
|
||||
ConvertPadOperator(model, static_cast<const PadOperator&>(src_op),
|
||||
tensorflow_graph);
|
||||
} else if (src_op.type == OperatorType::kPadV2) {
|
||||
ConvertPadV2Operator(model, static_cast<const PadV2Operator&>(src_op),
|
||||
tensorflow_graph);
|
||||
} else if (src_op.type == OperatorType::kStridedSlice) {
|
||||
ConvertStridedSliceOperator(
|
||||
model, static_cast<const StridedSliceOperator&>(src_op),
|
||||
|
@ -174,6 +174,7 @@ DECLARE_GRAPH_TRANSFORMATION(UnrollBatchMatMul)
|
||||
DECLARE_GRAPH_TRANSFORMATION(ResolveSpaceToBatchNDAttributes)
|
||||
DECLARE_GRAPH_TRANSFORMATION(ResolveBatchToSpaceNDAttributes)
|
||||
DECLARE_GRAPH_TRANSFORMATION(ResolvePadAttributes)
|
||||
DECLARE_GRAPH_TRANSFORMATION(ResolvePadV2Attributes)
|
||||
DECLARE_GRAPH_TRANSFORMATION(ResolveStridedSliceAttributes)
|
||||
DECLARE_GRAPH_TRANSFORMATION(ResolveSliceAttributes)
|
||||
DECLARE_GRAPH_TRANSFORMATION(ResolveMeanAttributes)
|
||||
|
@ -1146,6 +1146,32 @@ void ProcessPadOperator(Model* model, PadOperator* op) {
|
||||
output_array.copy_shape(output_shape);
|
||||
}
|
||||
|
||||
void ProcessPadV2Operator(Model* model, PadV2Operator* op) {
|
||||
CHECK_EQ(op->inputs.size(), 3);
|
||||
CHECK_EQ(op->outputs.size(), 1);
|
||||
|
||||
const auto& input_array = model->GetArray(op->inputs[0]);
|
||||
|
||||
// Yield until input dims have been resolved.
|
||||
if (!input_array.has_shape()) return;
|
||||
|
||||
if (op->left_padding.empty()) return;
|
||||
CHECK_EQ(op->left_padding.size(), op->right_padding.size());
|
||||
|
||||
auto& output_array = model->GetArray(op->outputs[0]);
|
||||
if (output_array.has_shape()) return;
|
||||
|
||||
Shape output_shape = input_array.shape();
|
||||
std::vector<int>& dims = *output_shape.mutable_dims();
|
||||
CHECK_EQ(op->left_padding.size(), dims.size());
|
||||
|
||||
for (int i = 0; i < op->left_padding.size(); ++i) {
|
||||
dims[i] += op->left_padding[i] + op->right_padding[i];
|
||||
}
|
||||
|
||||
output_array.copy_shape(output_shape);
|
||||
}
|
||||
|
||||
void ProcessRankOperator(Model* model, RankOperator* op) {
|
||||
CHECK_GE(op->inputs.size(), 1);
|
||||
CHECK_EQ(op->outputs.size(), 1);
|
||||
@ -1628,6 +1654,9 @@ bool PropagateFixedSizes::Run(Model* model, std::size_t op_index) {
|
||||
case OperatorType::kPad:
|
||||
ProcessPadOperator(model, static_cast<PadOperator*>(op));
|
||||
break;
|
||||
case OperatorType::kPadV2:
|
||||
ProcessPadV2Operator(model, static_cast<PadV2Operator*>(op));
|
||||
break;
|
||||
case OperatorType::kStridedSlice:
|
||||
ProcessStridedSliceOperator(model,
|
||||
static_cast<StridedSliceOperator*>(op));
|
||||
|
@ -48,6 +48,7 @@ bool SupportsQuantization(const Operator& op) {
|
||||
type == OperatorType::kLogSoftmax ||
|
||||
type == OperatorType::kTensorFlowSplit || type == OperatorType::kSub ||
|
||||
type == OperatorType::kSqueeze || type == OperatorType::kPad ||
|
||||
type == OperatorType::kPadV2 ||
|
||||
type == OperatorType::kTensorFlowReshape ||
|
||||
type == OperatorType::kTanh || type == OperatorType::kMul ||
|
||||
type == OperatorType::kSpaceToDepth ||
|
||||
|
@ -0,0 +1,55 @@
|
||||
/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
==============================================================================*/
|
||||
#include <memory>
|
||||
#include <string>
|
||||
#include <unordered_map>
|
||||
#include <vector>
|
||||
|
||||
#include "tensorflow/contrib/lite/toco/graph_transformations/graph_transformations.h"
|
||||
#include "tensorflow/contrib/lite/toco/model.h"
|
||||
#include "tensorflow/contrib/lite/toco/tooling_util.h"
|
||||
#include "tensorflow/core/platform/logging.h"
|
||||
|
||||
namespace toco {
|
||||
|
||||
bool ResolvePadV2Attributes::Run(Model* model, std::size_t op_index) {
|
||||
const auto pad_it = model->operators.begin() + op_index;
|
||||
auto* pad_op = pad_it->get();
|
||||
if (pad_op->type != OperatorType::kPadV2) return false;
|
||||
|
||||
auto* op = static_cast<PadV2Operator*>(pad_op);
|
||||
if (!op->left_padding.empty()) return false;
|
||||
|
||||
CHECK_EQ(op->inputs.size(), 3);
|
||||
if (!IsConstantParameterArray(*model, op->inputs[1])) return false;
|
||||
|
||||
const auto& array = model->GetArray(op->inputs[1]);
|
||||
if (!array.has_shape()) return false;
|
||||
|
||||
const std::vector<int>& dims = array.shape().dims();
|
||||
CHECK_EQ(dims.size(), 2);
|
||||
|
||||
std::vector<int> buffer = array.GetBuffer<ArrayDataType::kInt32>().data;
|
||||
|
||||
for (int i = 0; i < dims[0]; ++i) {
|
||||
op->left_padding.push_back(buffer[i * 2]);
|
||||
op->right_padding.push_back(buffer[i * 2 + 1]);
|
||||
}
|
||||
|
||||
// TODO(dkalenichenko): Delete the extra input?
|
||||
|
||||
return true;
|
||||
}
|
||||
} // namespace toco
|
@ -925,6 +925,19 @@ void ConvertPadOperator(const NodeDef& node,
|
||||
model->operators.emplace_back(op);
|
||||
}
|
||||
|
||||
void ConvertPadV2Operator(const NodeDef& node,
|
||||
const TensorFlowImportFlags& tf_import_flags,
|
||||
Model* model) {
|
||||
CHECK_EQ(node.op(), "PadV2");
|
||||
CheckInputsCount(node, tf_import_flags, 3);
|
||||
auto* op = new PadV2Operator;
|
||||
op->inputs.push_back(node.input(0));
|
||||
op->inputs.push_back(node.input(1));
|
||||
op->inputs.push_back(node.input(2));
|
||||
op->outputs.push_back(node.name());
|
||||
model->operators.emplace_back(op);
|
||||
}
|
||||
|
||||
void ConvertShapeOperator(const NodeDef& node,
|
||||
const TensorFlowImportFlags& tf_import_flags,
|
||||
Model* model) {
|
||||
@ -2169,6 +2182,8 @@ Status ImportTensorFlowNode(const tensorflow::NodeDef& node,
|
||||
ConvertMergeOperator(node, tf_import_flags, model);
|
||||
} else if (node.op() == "Pad") {
|
||||
ConvertPadOperator(node, tf_import_flags, model);
|
||||
} else if (node.op() == "PadV2") {
|
||||
ConvertPadV2Operator(node, tf_import_flags, model);
|
||||
} else if (node.op() == "StridedSlice") {
|
||||
ConvertStridedSliceOperator(node, tf_import_flags, model);
|
||||
} else if (node.op() == "Shape") {
|
||||
|
@ -82,6 +82,7 @@ enum class OperatorType {
|
||||
kStack,
|
||||
kBatchToSpaceND,
|
||||
kPad,
|
||||
kPadV2,
|
||||
kStridedSlice,
|
||||
kSlice,
|
||||
kSqueeze,
|
||||
@ -825,6 +826,29 @@ struct PadOperator : Operator {
|
||||
std::vector<int> right_padding;
|
||||
};
|
||||
|
||||
// PaddingV2 operator. Pads a tensor with the given constant value.
|
||||
//
|
||||
// Inputs:
|
||||
// inputs[0]: required: the input array
|
||||
// inputs[1]: required: the padding array
|
||||
// inputs[2]: required: the scalar constant_values
|
||||
//
|
||||
// This operation pads input according to the paddings and constant_values you
|
||||
// specify. paddings is an integer tensor with shape [Dn, 2], where n is the
|
||||
// rank of input. For each dimension D of input, paddings[D, 0] indicates how
|
||||
// many padding values to add before the contents of input in that dimension,
|
||||
// and paddings[D, 1] indicates how many padding values to add after the
|
||||
// contents of input in that dimension. constant_values is a scalar tensor of
|
||||
// the same type as input that indicates the value to use for padding input.
|
||||
//
|
||||
// TensorFlow equivalent: PadV2
|
||||
struct PadV2Operator : Operator {
|
||||
PadV2Operator() : Operator(OperatorType::kPadV2) {}
|
||||
|
||||
std::vector<int> left_padding;
|
||||
std::vector<int> right_padding;
|
||||
};
|
||||
|
||||
// Strided slice operator.
|
||||
//
|
||||
// Inputs:
|
||||
|
@ -465,6 +465,21 @@ class Pad : public BuiltinOperator<PadOperator, ::tflite::PadOptions,
|
||||
TocoOperator* op) const override {}
|
||||
};
|
||||
|
||||
class PadV2 : public BuiltinOperator<PadV2Operator, ::tflite::PadV2Options,
|
||||
::tflite::BuiltinOptions_PadV2Options> {
|
||||
public:
|
||||
using BuiltinOperator::BuiltinOperator;
|
||||
|
||||
flatbuffers::Offset<TfLiteOptions> WriteOptions(
|
||||
const TocoOperator& op,
|
||||
flatbuffers::FlatBufferBuilder* builder) const override {
|
||||
return ::tflite::CreatePadV2Options(*builder);
|
||||
}
|
||||
|
||||
void ReadOptions(const TfLiteOptions& options,
|
||||
TocoOperator* op) const override {}
|
||||
};
|
||||
|
||||
class Reshape
|
||||
: public BuiltinOperator<TensorFlowReshapeOperator,
|
||||
::tflite::ReshapeOptions,
|
||||
@ -832,6 +847,8 @@ std::vector<std::unique_ptr<BaseOperator>> BuildOperatorList() {
|
||||
OperatorType::kMaxPool));
|
||||
ops.emplace_back(new Mul(::tflite::BuiltinOperator_MUL, OperatorType::kMul));
|
||||
ops.emplace_back(new Pad(::tflite::BuiltinOperator_PAD, OperatorType::kPad));
|
||||
ops.emplace_back(
|
||||
new PadV2(::tflite::BuiltinOperator_PADV2, OperatorType::kPadV2));
|
||||
ops.emplace_back(new Reshape(::tflite::BuiltinOperator_RESHAPE,
|
||||
OperatorType::kTensorFlowReshape));
|
||||
ops.emplace_back(
|
||||
|
@ -106,6 +106,7 @@ void MakeGeneralGraphTransformationsSet(
|
||||
transformations->Add(new ResolveSpaceToBatchNDAttributes);
|
||||
transformations->Add(new ResolveBatchToSpaceNDAttributes);
|
||||
transformations->Add(new ResolvePadAttributes);
|
||||
transformations->Add(new ResolvePadV2Attributes);
|
||||
transformations->Add(new ResolveStridedSliceAttributes);
|
||||
transformations->Add(new ResolveSliceAttributes);
|
||||
transformations->Add(new ResolveMeanAttributes);
|
||||
|
@ -356,6 +356,7 @@ const char* OperatorTypeName(OperatorType type) {
|
||||
HANDLE_OPERATORTYPENAME_CASE(TensorFlowMinimum)
|
||||
HANDLE_OPERATORTYPENAME_CASE(Neg)
|
||||
HANDLE_OPERATORTYPENAME_CASE(Pad)
|
||||
HANDLE_OPERATORTYPENAME_CASE(PadV2)
|
||||
HANDLE_OPERATORTYPENAME_CASE(StridedSlice)
|
||||
HANDLE_OPERATORTYPENAME_CASE(Stack)
|
||||
HANDLE_OPERATORTYPENAME_CASE(Range)
|
||||
|
Loading…
x
Reference in New Issue
Block a user