Change the scratch tensor a stack array like for float SVDF.
Similar to integer SVDF. This makes TFL and TFLM models compatible. PiperOrigin-RevId: 301893460 Change-Id: I6b279d359ca0985bcc7cfeae908a83214ae8a2e0
This commit is contained in:
parent
dc8554d9f5
commit
d7ca6adbd1
@ -116,14 +116,11 @@ static inline void ApplyTimeWeightsBiasAndActivation(
|
||||
}
|
||||
}
|
||||
|
||||
inline void EvalFloatSVDF(TfLiteContext* context, TfLiteNode* node,
|
||||
const TfLiteTensor* input,
|
||||
const TfLiteTensor* weights_feature,
|
||||
const TfLiteTensor* weights_time,
|
||||
const TfLiteTensor* bias,
|
||||
const TfLiteSVDFParams* params, TfLiteTensor* scratch,
|
||||
TfLiteTensor* activation_state,
|
||||
TfLiteTensor* output) {
|
||||
inline void EvalFloatSVDF(
|
||||
TfLiteContext* context, TfLiteNode* node, const TfLiteTensor* input,
|
||||
const TfLiteTensor* weights_feature, const TfLiteTensor* weights_time,
|
||||
const TfLiteTensor* bias, const TfLiteSVDFParams* params,
|
||||
TfLiteTensor* activation_state, TfLiteTensor* output) {
|
||||
const int rank = params->rank;
|
||||
const int batch_size = input->dims->data[0];
|
||||
const int input_size = input->dims->data[1];
|
||||
@ -137,7 +134,11 @@ inline void EvalFloatSVDF(TfLiteContext* context, TfLiteNode* node,
|
||||
const float* input_ptr = GetTensorData<float>(input);
|
||||
|
||||
float* state_ptr = GetTensorData<float>(activation_state);
|
||||
float* scratch_ptr = GetTensorData<float>(scratch);
|
||||
|
||||
// TODO(b/132070898): Move this temp variable to the new scratch buffer API
|
||||
// when ready.
|
||||
float scratch_tensor[kScratchTensorMaxSize];
|
||||
float* scratch_ptr = scratch_tensor;
|
||||
|
||||
float* output_ptr = GetTensorData<float>(output);
|
||||
|
||||
@ -421,7 +422,7 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
|
||||
// Validate output tensor:
|
||||
TF_LITE_ENSURE_EQ(context, output->type, kTfLiteInt8);
|
||||
} else {
|
||||
TF_LITE_ENSURE_EQ(context, node->inputs->size, 6);
|
||||
TF_LITE_ENSURE_EQ(context, node->inputs->size, 5);
|
||||
|
||||
// Validate Input Tensor dtypes:
|
||||
TF_LITE_ENSURE_EQ(context, weights_feature->type, kTfLiteFloat32);
|
||||
@ -436,15 +437,8 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
|
||||
// [0] = Holds dot-product of time-forward calculations in
|
||||
// ApplyTimeWeightsBiasAndActivation():
|
||||
// float/int32, {2, batch_size, num_filters}
|
||||
// TODO(b/132070898): Use input tensor as variable until scratch tensor
|
||||
// allocation has been implemented (b/132070898) TfLiteTensor*
|
||||
// scratch_tensor = GetTemporary(context, node, 0);
|
||||
TfLiteTensor* scratch_tensor = &context->tensors[node->inputs->data[5]];
|
||||
TF_LITE_ENSURE_EQ(context, scratch_tensor->type, kTfLiteFloat32);
|
||||
|
||||
TF_LITE_ENSURE_EQ(context, NumDimensions(scratch_tensor), 2);
|
||||
TF_LITE_ENSURE_EQ(context, scratch_tensor->dims->data[0], batch_size);
|
||||
TF_LITE_ENSURE_EQ(context, scratch_tensor->dims->data[1], num_filters);
|
||||
// TODO(b/132070898): Scratch values are used as stack variables in
|
||||
// EvalIntegerSVDF().
|
||||
|
||||
// Full-float SVDF only uses the one shared scratch tensor (see above for
|
||||
// usage).
|
||||
@ -475,11 +469,10 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
|
||||
switch (weights_feature->type) {
|
||||
case kTfLiteFloat32: {
|
||||
// TODO(b/132070898): Use input tensor as variable until scratch tensor
|
||||
// allocation has been implemented. TfLiteTensor* scratch =
|
||||
// GetTemporary(context, node, /*index=*/0);
|
||||
TfLiteTensor* scratch = &context->tensors[node->inputs->data[5]];
|
||||
// allocation has been implemented.
|
||||
// TfLiteTensor* scratch = GetTemporary(context, node, /*index=*/0);
|
||||
EvalFloatSVDF(context, node, input, weights_feature, weights_time, bias,
|
||||
params, scratch, activation_state, output);
|
||||
params, activation_state, output);
|
||||
return kTfLiteOk;
|
||||
break;
|
||||
}
|
||||
|
@ -124,7 +124,7 @@ static float svdf_golden_output_rank_2[] = {
|
||||
void ValidateSVDFGoldens(const int batch_size, const int num_units,
|
||||
const int input_size, const int rank,
|
||||
TfLiteTensor* tensors, const int tensor_count,
|
||||
bool is_hybrid_op, float* golden_input_data,
|
||||
float* golden_input_data,
|
||||
const int golden_input_data_size, float* output_data,
|
||||
float* expected_output, float tolerance = 1e-5f) {
|
||||
TfLiteContext context;
|
||||
@ -145,30 +145,15 @@ void ValidateSVDFGoldens(const int batch_size, const int num_units,
|
||||
}
|
||||
|
||||
// Bias is an optional tensor:
|
||||
// TODO(kreeger): Use input tensor as variable until scratch tensor allocation
|
||||
// has been implemented (b/132070898)
|
||||
// int inputs_array_data[] = {5, 0, 1, 2, kTfLiteOptionalTensor, 3};
|
||||
int inputs_array_data[] = {6, 0, 1, 2, kTfLiteOptionalTensor, 3, 5};
|
||||
int inputs_array_data[] = {5, 0, 1, 2, kTfLiteOptionalTensor, 3};
|
||||
TfLiteIntArray* inputs_array = IntArrayFromInts(inputs_array_data);
|
||||
|
||||
int outputs_array_data[] = {1, 4};
|
||||
TfLiteIntArray* outputs_array = IntArrayFromInts(outputs_array_data);
|
||||
|
||||
int temporaries_array_data[] = {1, 5};
|
||||
TfLiteIntArray* temporaries_array = IntArrayFromInts(temporaries_array_data);
|
||||
|
||||
int hybrid_temporaries_array_data[] = {4, 5, 6, 7, 8};
|
||||
TfLiteIntArray* hybrid_temporaries_array =
|
||||
IntArrayFromInts(hybrid_temporaries_array_data);
|
||||
|
||||
TfLiteNode node;
|
||||
node.inputs = inputs_array;
|
||||
node.outputs = outputs_array;
|
||||
if (is_hybrid_op) {
|
||||
node.temporaries = hybrid_temporaries_array;
|
||||
} else {
|
||||
node.temporaries = temporaries_array;
|
||||
}
|
||||
node.user_data = user_data;
|
||||
node.builtin_data = reinterpret_cast<void*>(¶ms);
|
||||
node.custom_initial_data = nullptr;
|
||||
@ -294,14 +279,10 @@ void TestSVDF(const int batch_size, const int num_units, const int input_size,
|
||||
TfLiteIntArray* activation_state_dims =
|
||||
IntArrayFromInts(activation_state_dims_args);
|
||||
|
||||
// Scratch output is the same shape as output:
|
||||
const int scratch_dims_args[] = {2, batch_size, num_filters};
|
||||
TfLiteIntArray* scratch_dims = IntArrayFromInts(scratch_dims_args);
|
||||
|
||||
const int output_dims_args[] = {2, batch_size, num_units};
|
||||
TfLiteIntArray* output_dims = IntArrayFromInts(output_dims_args);
|
||||
|
||||
const int tensor_count = 6; // 4 inputs, 1 output, 1 scratch
|
||||
const int tensor_count = 5; // 4 inputs, 1 output
|
||||
TfLiteTensor tensors[] = {
|
||||
CreateFloatTensor(input_data, input_dims, "input"),
|
||||
CreateFloatTensor(weights_feature_data, weights_feature_dims,
|
||||
@ -310,13 +291,11 @@ void TestSVDF(const int batch_size, const int num_units, const int input_size,
|
||||
CreateFloatTensor(activation_state_data, activation_state_dims,
|
||||
"activation_state", true /* is_variable */),
|
||||
CreateFloatTensor(output_data, output_dims, "output"),
|
||||
CreateFloatTensor(scratch_data, scratch_dims, "scratch"),
|
||||
};
|
||||
|
||||
ValidateSVDFGoldens(batch_size, num_units, input_size, rank, tensors,
|
||||
tensor_count, false /* is_hybrid */, golden_input_data,
|
||||
golden_input_data_size, output_data, expected_output,
|
||||
tolerance);
|
||||
tensor_count, golden_input_data, golden_input_data_size,
|
||||
output_data, expected_output, tolerance);
|
||||
}
|
||||
|
||||
inline void TestIntegerSVDF(
|
||||
@ -583,12 +562,12 @@ TF_LITE_MICRO_TEST(BlackBoxTestIntegerRank1) {
|
||||
const int output_dims_count = batch_size * num_units;
|
||||
int8_t output_data[output_dims_count];
|
||||
|
||||
float input_scale = 1.f / INT8_MAX; // Range is [-1, 1]
|
||||
float input_scale = 1.f / INT8_MAX; // Range is [-1, 1]
|
||||
float weights_feature_scale = 0.5f / INT8_MAX; // Range is [-0.5, 0.5]
|
||||
float weights_time_scale = 1.f / INT16_MAX; // Range is [-1, 1]
|
||||
float activation_scale = 16.f / INT16_MAX; // Range is [-16, 16]
|
||||
float bias_scale = 512.f / INT32_MAX; // Range is [-512, 512]
|
||||
float output_scale = 0.5f / INT8_MAX; // Range is [-0.5, 0.5]
|
||||
float activation_scale = 16.f / INT16_MAX; // Range is [-16, 16]
|
||||
float bias_scale = 512.f / INT32_MAX; // Range is [-512, 512]
|
||||
float output_scale = 0.5f / INT8_MAX; // Range is [-0.5, 0.5]
|
||||
|
||||
tflite::testing::TestIntegerSVDF(
|
||||
batch_size, num_units, input_size, memory_size, rank, input_data,
|
||||
|
Loading…
Reference in New Issue
Block a user