Merge pull request #38481 from edgeimpulse:master

PiperOrigin-RevId: 309125746
Change-Id: I63d5ec46c9dfde162c687b933d8361ebd15b7d28
This commit is contained in:
TensorFlower Gardener 2020-04-29 17:06:04 -07:00
commit f9aea1e784
3 changed files with 90 additions and 8 deletions

View File

@ -48,7 +48,17 @@ cp tensorflow/lite/micro/tools/make/downloads/cmsis/CMSIS/DSP/Include/\
arm_math.h mbed-os/cmsis/TARGET_CORTEX_M/arm_math.h
```
This issue will be resolved soon. Now type
There's also a dependency to an old cmsis_gcc.h, which you can fix with the
following:
```
tensorflow/lite/micro/tools/make/downloads/cmsis/CMSIS/Core/Include/\
cmsis_gcc.h mbed-os/cmsis/TARGET_CORTEX_M/cmsis_gcc.h
```
This issue will be resolved soon.
Now type:
```
mbed compile -m DISCO_F746NG -t GCC_ARM

View File

@ -145,7 +145,7 @@ TfLiteStatus AverageEvalInt8(TfLiteContext* context, const TfLiteNode* node,
ARM_MATH_SUCCESS);
#else
#pragma message( \
"CMSIS-NN optimization for depthwise_conv not available for this target. Using reference kernel.")
"CMSIS-NN optimization for avg_pool not available for this target. Using reference kernel.")
PoolParams op_params;
op_params.stride_height = params->stride_height;
@ -165,8 +165,8 @@ TfLiteStatus AverageEvalInt8(TfLiteContext* context, const TfLiteNode* node,
}
void MaxEvalFloat(TfLiteContext* context, TfLiteNode* node,
TfLitePoolParams* params, OpData* data,
const TfLiteTensor* input, TfLiteTensor* output) {
TfLitePoolParams* params, OpData* data, TfLiteTensor* input,
TfLiteTensor* output) {
float activation_min, activation_max;
CalculateActivationRange(params->activation, &activation_min,
&activation_max);
@ -187,7 +187,7 @@ void MaxEvalFloat(TfLiteContext* context, TfLiteNode* node,
void MaxEvalQuantizedUInt8(TfLiteContext* context, TfLiteNode* node,
TfLitePoolParams* params, OpData* data,
const TfLiteTensor* input, TfLiteTensor* output) {
TfLiteTensor* input, TfLiteTensor* output) {
int32_t activation_min, activation_max;
(void)CalculateActivationRangeQuantized(context, params->activation, output,
&activation_min, &activation_max);
@ -206,6 +206,74 @@ void MaxEvalQuantizedUInt8(TfLiteContext* context, TfLiteNode* node,
GetTensorData<uint8_t>(output));
}
TfLiteStatus MaxEvalInt8(TfLiteContext* context, const TfLiteNode* node,
const TfLitePoolParams* params, const OpData* data,
TfLiteTensor* input, TfLiteTensor* output) {
int32_t activation_min, activation_max;
(void)CalculateActivationRangeQuantized(context, params->activation, output,
&activation_min, &activation_max);
TFLITE_DCHECK_LE(activation_min, activation_max);
#if defined(__ARM_FEATURE_DSP)
RuntimeShape input_shape = GetTensorShape(input);
TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 4);
RuntimeShape output_shape = GetTensorShape(output);
TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 4);
const int depth = MatchingDim(input_shape, 3, output_shape, 3);
const int input_height = input_shape.Dims(1);
const int input_width = input_shape.Dims(2);
const int output_height = output_shape.Dims(1);
const int output_width = output_shape.Dims(2);
const int stride_height = params->stride_height;
const int stride_width = params->stride_width;
const int filter_height = params->filter_height;
const int filter_width = params->filter_width;
const int padding_height = data->padding.height;
const int padding_width = data->padding.width;
int16_t* scratch_buffer = nullptr;
auto* buffer_idx = reinterpret_cast<int*>(node->user_data);
if (*buffer_idx > -1) {
void* raw = context->GetScratchBuffer(context, *buffer_idx);
scratch_buffer = reinterpret_cast<int16_t*>(raw);
}
TF_LITE_ENSURE_EQ(
context,
arm_max_pool_s8_opt(input_height, input_width, output_height,
output_width, stride_height, stride_width,
filter_height, filter_width, padding_height,
padding_width, activation_min, activation_max, depth,
GetTensorData<int8_t>(input), scratch_buffer,
GetTensorData<int8_t>(output)),
ARM_MATH_SUCCESS);
#else
#pragma message( \
"CMSIS-NN optimization for max_pool not available for this target. Using reference kernel.")
PoolParams op_params;
op_params.stride_height = params->stride_height;
op_params.stride_width = params->stride_width;
op_params.filter_height = params->filter_height;
op_params.filter_width = params->filter_width;
op_params.padding_values.height = data->padding.height;
op_params.padding_values.width = data->padding.width;
op_params.quantized_activation_min = activation_min;
op_params.quantized_activation_max = activation_max;
reference_integer_ops::MaxPool(
op_params, GetTensorShape(input), GetTensorData<int8_t>(input),
GetTensorShape(output), GetTensorData<int8_t>(output));
#endif
return kTfLiteOk;
}
} // namespace
void* Init(TfLiteContext* context, const char* buffer, size_t length) {
@ -278,7 +346,8 @@ TfLiteStatus MaxEval(TfLiteContext* context, TfLiteNode* node) {
auto* params = reinterpret_cast<TfLitePoolParams*>(node->builtin_data);
OpData data;
const TfLiteTensor* input = GetInput(context, node, kInputTensor);
TfLiteTensor* input = &context->tensors[flatbuffers::EndianScalar(
node->inputs->data[kInputTensor])];
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
TF_LITE_ENSURE_STATUS(CalculateOpData(context, params, input, output, &data));
@ -290,6 +359,9 @@ TfLiteStatus MaxEval(TfLiteContext* context, TfLiteNode* node) {
case kTfLiteUInt8:
MaxEvalQuantizedUInt8(context, node, params, &data, input, output);
break;
case kTfLiteInt8:
MaxEvalInt8(context, node, params, &data, input, output);
break;
default:
TF_LITE_KERNEL_LOG(context, "Type %s not currently supported.",
TfLiteTypeGetName(input->type));

View File

@ -28,8 +28,8 @@ LEON_BCC2_MD5 := "cdf78082be4882da2a92c9baa82fe765"
TSIM_URL := "https://www.gaisler.com/anonftp/tsim/tsim-eval-2.0.63.tar.gz"
TSIM_MD5 := "afa0095d3ed989a949e1467f94e41d2f"
CMSIS_URL := "https://github.com/ARM-software/CMSIS_5/archive/3d8235079ade1e4df06f91be65e0309cc45e1952.zip"
CMSIS_MD5 := "f3e93203e875caf4ba6aff0bccd95d85"
CMSIS_URL := "https://github.com/ARM-software/CMSIS_5/archive/8a4db53f69da06e97565fe2f2e8926d193a5759d.zip"
CMSIS_MD5 := "e9864fb71b65adc4f7d92a9dea6e1aab"
AM_SDK_URL := "http://s3.asia.ambiqmicro.com/downloads/AmbiqSuite-Rel2.2.0.zip"
AM_SDK_MD5 := "7605fa2d4d97e6bb7a1190c92b66b597"