PR #46195: micro: Add Sony Spresense board target

Imported from GitHub PR https://github.com/tensorflow/tensorflow/pull/46195

Add build terget on Sony Spresense board.
To build it, Spresense SDK is required.
And hello_world, micro_speech and preson_detection are added for the board.

The related issue is #46240
Copybara import of the project:

--
bff4913f03be75a368c6845c448fdd2fe5624bb1 by Takayoshi Koizumi <takayoshi.koizumi@gmail.com>:

Add Sony Spresense board target

Add build terget on Sony Spresense board.
To build it, Spresense SDK is required.
And hello_world, micro_speech and preson_detection are added for the
board.

COPYBARA_INTEGRATE_REVIEW=https://github.com/tensorflow/tensorflow/pull/46195 from takayoshi-k:add_spresense_target bff4913f03be75a368c6845c448fdd2fe5624bb1
PiperOrigin-RevId: 356357681
Change-Id: I4b62059ef27b47775431168536da563a56343a6d
This commit is contained in:
Takayoshi Koizumi 2021-02-08 14:46:02 -08:00 committed by TensorFlower Gardener
parent 236369d651
commit 885c55ca95
12 changed files with 599 additions and 0 deletions

View File

@ -0,0 +1,18 @@
# Settings for Spresense platform for Hello World example
# This should be read when the EXTERNALS_TENSORFLOW_EXAMPLE_HELLOWORLD option is selected
# in Spresense configuration.
ifeq ($(TARGET), spresense)
ifeq ($(CONFIG_EXTERNALS_TENSORFLOW_EXAMPLE_HELLOWORLD),y)
SPRESENSE_HELLO_WORLD_EXCLUDED_SRCS = \
tensorflow/lite/micro/examples/hello_world/main.cc
SPRESENSE_HELLO_WORLD_SRCS = \
$(filter-out $(SPRESENSE_HELLO_WORLD_EXCLUDED_SRCS),$(HELLO_WORLD_SRCS))
# In spresence case, those file should be included into libtensorflow-microlite.
THIRD_PARTY_CC_SRCS += $(SPRESENSE_HELLO_WORLD_SRCS)
endif
endif

View File

@ -0,0 +1,84 @@
# Hello World Example for Spresense
Here explaines how to build and execute this Hello World Example for Spresense.
To try this on the Spresense, below hardware is required.
Spresense Main board, which is a microcontroller board.
## Table of contents
- [How to build](#how-to-build)
- [How to run](#how-to-run)
## How to build
The tensorflow.git will be downloaded in build system of Spresense.
### Initial setup
The Spresense SDK build system is required to build this example. The following
instructions will help you to make it on your PC.
[Spresense SDK Getting Started Guide:EN](https://developer.sony.com/develop/spresense/docs/sdk_set_up_en.html)
[Spresense SDK Getting Started Guide:JA](https://developer.sony.com/develop/spresense/docs/sdk_set_up_ja.html)
[Spresense SDK Getting Started Guide:CN](https://developer.sony.com/develop/spresense/docs/sdk_set_up_zh.html)
And after setup the build system, download
[Spresense repository](https://github.com/sonydevworld/spresense).
```
git clone --recursive https://github.com/sonydevworld/spresense.git
```
### Configure Spresense for this example
The Spresense SDK uses Kconfig mechanism for configuration of software
components. So at first, you need to configure it for this example. Spresense
SDK provides some default configurations, and there is a default config to build
this Hello World example.
1. Go to sdk/ directory in the repository.
```
cd spresense/sdk
```
2. Execute config.py to configure for this example.
```
./tools/config.py examples/tf_example_hello_world
```
This command creates .config file in spesense/nuttx directory.
### Build and Flash the binary into Spresense Main board
After configured, execute make and then flash built image.
1. Execute "make" command in the same directory you configured.
```
make
```
2. Flash built image into Spresense main board. If the build is successful, a
file named nuttx.spk will be created in the current directory, and flash it
into Spresense Main board. Make sure USB cable is connected between the
board and your PC. The USB will be recognized as USB/serial device like
/dev/ttyUSB0 in your PC. In this explanation, we will assume that the device
is recognized as /dev/ttyUSB0.
```
./tools/flash.sh -c /dev/ttyUSB0 nuttx.spk
```
## How to run
To run the example, connect to the device with a terminal soft like "minicom".
Then you can see a "nsh>" prompt on it. (If you can't see the prompt, try to
press enter.)
1. Execute tf_example command on the prompt.
```
nsh> tf_example
```

View File

@ -0,0 +1,22 @@
# Settings for Spresense platform for Hello World example
# This should be read when the EXTERNALS_TENSORFLOW_EXAMPLE_MICROSPEECH option is selected
# in Spresense configuration.
ifeq ($(TARGET), spresense)
ifeq ($(CONFIG_EXTERNALS_TENSORFLOW_EXAMPLE_MICROSPEECH),y)
SPRESENSE_MICRO_SPEECH_EXCLUDED_SRCS = \
tensorflow/lite/micro/examples/micro_speech/main.cc \
tensorflow/lite/micro/examples/micro_speech/audio_provider.cc \
tensorflow/lite/micro/examples/micro_speech/command_responder.cc
SPRESENSE_MICRO_SPEECH_SRCS = \
tensorflow/lite/micro/examples/micro_speech/spresense/src/spresense_audio_provider.cc \
tensorflow/lite/micro/examples/micro_speech/spresense/src/spresense_command_responder.cc \
$(filter-out $(SPRESENSE_MICRO_SPEECH_EXCLUDED_SRCS),$(MICRO_SPEECH_SRCS))
# In spresence case, those file should be included into libtensorflow-microlite.
THIRD_PARTY_CC_SRCS += $(SPRESENSE_MICRO_SPEECH_SRCS)
endif
endif

View File

@ -0,0 +1,94 @@
# Micro Speech Example for Spresense
Here explaines how to build and execute this Micro Speach Example for Spresense.
To try this on the Spresense, below hardware is required.
Spresense Main board, which is a microcontroller board. Spresense Extention
board, which is for connecting a mic like MEMS mic. Analog mic, which is like
MEMS mic.
To connect a mic on Spresense Extention board, the following page helps you.
[How to use microphons](https://developer.sony.com/develop/spresense/docs/hw_docs_en.html#_how_to_use_microphones)
For this example, the Mic-A is used.
## Table of contents
- [How to build](#how-to-build)
- [How to run](#how-to-run)
## How to build
The tensorflow.git will be downloaded in build system of Spresense.
### Initial setup
The Spresense SDK build system is required to build this example. The following
instructions will help you to make it on your PC.
[Spresense SDK Getting Started Guide:EN](https://developer.sony.com/develop/spresense/docs/sdk_set_up_en.html)
[Spresense SDK Getting Started Guide:JA](https://developer.sony.com/develop/spresense/docs/sdk_set_up_ja.html)
[Spresense SDK Getting Started Guide:CN](https://developer.sony.com/develop/spresense/docs/sdk_set_up_zh.html)
And after setup the build system, download
[Spresense repository](https://github.com/sonydevworld/spresense).
```
git clone --recursive https://github.com/sonydevworld/spresense.git
```
### Configure Spresense for this example
The Spresense SDK uses Kconfig mechanism for configuration of software
components. So at first, you need to configure it for this example. Spresense
SDK provides some default configurations, and there is a default config to build
this Micro Speach example.
1. Go to sdk/ directory in the repository.
```
cd spresense/sdk
```
2. Execute config.py to configure for this example.
```
./tools/config.py examples/tf_example_micro_speech
```
This command creates .config file in spesense/nuttx directory.
### Build and Flash the binary into Spresense Main board
After configured, execute make and then flash built image.
1. Execute "make" command in the same directory you configured.
```
make
```
2. Flash built image into Spresense main board. If the build is successful, a
file named nuttx.spk will be created in the current directory, and flash it
into Spresense Main board. Make sure USB cable is connected between the
board and your PC. The USB will be recognized as USB/serial device like
/dev/ttyUSB0 in your PC. In this explanation, we will assume that the device
is recognized as /dev/ttyUSB0.
```
./tools/flash.sh -c /dev/ttyUSB0 nuttx.spk
```
## How to run
To run the example, connect to the device with a terminal soft like "minicom".
Then you can see a "nsh>" prompt on it. (If you can't see the prompt, try to
press enter.)
1. Execute tf_example command on the prompt.
```
nsh> tf_example
```
2. Speak 'yes' or 'no' on your mic. If the Micro Speech recognized it, the log
shows 'yes' or 'no'.

View File

@ -0,0 +1,66 @@
/* Copyright 2021 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
// The SPRESENSE_CONFIG_H is defined on compiler option.
// It contains "nuttx/config.h" from Spresense SDK to see the configurated
// parameters.
#include SPRESENSE_CONFIG_H
#include "spresense_audio_provider.h"
#include "tensorflow/lite/micro/examples/micro_speech/audio_provider.h"
#include "tensorflow/lite/micro/examples/micro_speech/micro_features/micro_model_settings.h"
// Below definition is for dump audio captured data for debugging.
// #define CAPTURE_DATA
#ifdef CAPTURE_DATA
#include <stdio.h>
#include <string.h>
static int16_t tmp_data[16000];
static int data_cnt;
static bool is_printed = false;
#endif
TfLiteStatus GetAudioSamples(tflite::ErrorReporter* error_reporter,
int start_ms, int duration_ms,
int* audio_samples_size, int16_t** audio_samples) {
if (spresense_audio_getsamples(start_ms, duration_ms, kAudioSampleFrequency,
audio_samples_size, audio_samples) < 0) {
return kTfLiteError;
} else {
#ifdef CAPTURE_DATA
if (start_ms >= 10000) {
if (data_cnt == 0) printf("=========== Start Recording ==============\n");
if (data_cnt < 16000) {
int sz = (16000 - data_cnt) > *audio_samples_size ? *audio_samples_size
: (16000 - data_cnt);
memcpy(&tmp_data[data_cnt], *audio_samples, sz * 2);
data_cnt += sz;
}
if (!is_printed && data_cnt >= 16000) {
printf("============ Stop Recording =============\n");
for (int i = 0; i < 16000; i++) {
printf("%d\n", tmp_data[i]);
}
is_printed = true;
}
}
#endif
return kTfLiteOk;
}
}
int32_t LatestAudioTimestamp() { return spresense_audio_lasttimestamp(); }

View File

@ -0,0 +1,33 @@
/* Copyright 2021 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
// The SPRESENSE_CONFIG_H is defined on compiler option.
// It contains "nuttx/config.h" from Spresense SDK to see the configurated
// parameters.
#include SPRESENSE_CONFIG_H
#include "spresense_command_responder.h"
#include "tensorflow/lite/micro/examples/micro_speech/command_responder.h"
// The default implementation writes out the name of the recognized command
// to the error console. Real applications will want to take some custom
// action instead, and should implement their own versions of this function.
void RespondToCommand(tflite::ErrorReporter* error_reporter,
int32_t current_time, const char* found_command,
uint8_t score, bool is_new_command) {
TF_LITE_REPORT_ERROR(error_reporter, "%s Heard %s (%d) @%dms",
is_new_command ? "F" : " ", found_command, score,
current_time);
}

View File

@ -0,0 +1,20 @@
# Settings for Spresense platform for Person detection example
# This should be read when the EXTERNALS_TENSORFLOW_EXAMPLE_PERSONDETECTION option is selected
# in Spresense configuration.
ifeq ($(TARGET), spresense)
ifeq ($(CONFIG_EXTERNALS_TENSORFLOW_EXAMPLE_PERSONDETECTION),y)
SPRESENSE_PERSON_DETECTION_EXCLUDED_SRCS = \
tensorflow/lite/micro/examples/person_detection/main.cc \
tensorflow/lite/micro/examples/person_detection/image_provider.cc
SPRESENSE_PERSON_DETECTION_SRCS = \
tensorflow/lite/micro/examples/person_detection/spresense/src/spresense_image_provider.cc \
$(filter-out $(SPRESENSE_PERSON_DETECTION_EXCLUDED_SRCS),$(person_detection_SRCS))
# In spresence case, those file should be included into libtensorflow-microlite.
THIRD_PARTY_CC_SRCS += $(SPRESENSE_PERSON_DETECTION_SRCS)
endif
endif

View File

@ -0,0 +1,89 @@
# Person detection example for Spresense
Here explaines how to build and execute this Person detection example for
Spresense. To try this on the Spresense, below hardware is required.
Spresense Main board, which is a microcontroller board. Spresense Extention
board, which is for connecting a mic like MEMS mic. Spresense Camera board,
which is for sensing image
## Table of contents
- [How to build](#how-to-build)
- [How to run](#how-to-run)
## How to build
The tensorflow.git will be downloaded in build system of Spresense.
### Initial setup
The Spresense SDK build system is required to build this example. The following
instructions will help you to make it on your PC.
[Spresense SDK Getting Started Guide:EN](https://developer.sony.com/develop/spresense/docs/sdk_set_up_en.html)
[Spresense SDK Getting Started Guide:JA](https://developer.sony.com/develop/spresense/docs/sdk_set_up_ja.html)
[Spresense SDK Getting Started Guide:CN](https://developer.sony.com/develop/spresense/docs/sdk_set_up_zh.html)
And after setup the build system, download
[Spresense repository](https://github.com/sonydevworld/spresense).
```
git clone --recursive https://github.com/sonydevworld/spresense.git
```
### Configure Spresense for this example
The Spresense SDK uses Kconfig mechanism for configuration of software
components. So at first, you need to configure it for this example. Spresense
SDK provides some default configurations, and there is a default config to build
this Person detection example.
1. Go to sdk/ directory in the repository.
```
cd spresense/sdk
```
2. Execute config.py to configure for this example.
```
./tools/config.py examples/tf_example_persondetection
```
This command creates .config file in spesense/nuttx directory.
### Build and Flash the binary into Spresense Main board
After configured, execute make and then flash built image.
1. Execute "make" command in the same directory you configured.
```
make
```
2. Flash built image into Spresense main board. If the build is successful, a
file named nuttx.spk will be created in the current directory, and flash it
into Spresense Main board. Make sure USB cable is connected between the
board and your PC. The USB will be recognized as USB/serial device like
/dev/ttyUSB0 in your PC. In this explanation, we will assume that the device
is recognized as /dev/ttyUSB0.
```
./tools/flash.sh -c /dev/ttyUSB0 nuttx.spk
```
## How to run
To run the example, connect to the device with a terminal soft like "minicom".
Then you can see a "nsh>" prompt on it. (If you can't see the prompt, try to
press enter.)
1. Execute tf_example command on the prompt.
```
nsh> tf_example
```
2. Put a person's face in the camera image. Rate which is a face or not will
print on the terminal as a result of the detection.

View File

@ -0,0 +1,32 @@
/* Copyright 2021 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
// The SPRESENSE_CONFIG_H is defined on compiler option.
// It contains "nuttx/config.h" from Spresense SDK to see the configurated
// parameters.
#include SPRESENSE_CONFIG_H
#include "spresense_image_provider.h"
#include "tensorflow/lite/micro/examples/person_detection/image_provider.h"
#include "tensorflow/lite/micro/examples/person_detection/model_settings.h"
TfLiteStatus GetImage(tflite::ErrorReporter* error_reporter, int image_width,
int image_height, int channels, int8_t* image_data) {
if (spresense_getimage((unsigned char*)image_data) == 0) {
return kTfLiteOk;
} else {
return kTfLiteError;
}
}

View File

@ -0,0 +1,21 @@
/* Copyright 2021 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
extern "C" {
char dummy[16];
char* _impure_ptr = &dummy[0];
void __assert_func(const char*, int, const char*, const char*) {}
}

View File

@ -0,0 +1,20 @@
/* Copyright 2021 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "tensorflow/lite/micro/debug_log.h"
#include <stdio.h>
extern "C" void DebugLog(const char* s) { printf(s); }

View File

@ -0,0 +1,100 @@
# Settings for Spresense based platforms
# For spresense, Tensorflow lite Microcontroller is used as a library.
# This setting makefile accept 4 optional parameters on the make command line.
# These options below are needed for build an example of Tensorflow Microcontroller.
# But just build a library, no need to add those options.
#
# SPRESENSE_DEFS : This is the file path to Make.defs which includes configuration
# parameters of spresense.
# SPRESENSE_CONFIG_H : This is the file path to config.h which includes configuration
# parameters for source code.
# SPRESENSE_CURDIR : This is the directory path of externals/tensorflow in spresense
# source repository.
# SPRESENSE_APP_TFMAKE : This is the file path to makefile.inc for additional source code
# in spresense to use tensorflow.
# Evacuate Compiler flags to avoid override them with loading Spresense Config
TMP_CXXFLAGS := $(CXXFLAGS)
TMP_CCLAGS := $(CCFLAGS)
# Define empty variable for add spresense specific settings
SPRESENSE_PLATFORM_FLAGS :=
ifneq ($(SPRESENSE_DEFS),)
# Load Spresense Config
include $(SPRESENSE_DEFS)
SPRESENSE_PLATFORM_FLAGS := \
-DSPRESENSE_CONFIG_H="\"$(SPRESENSE_CONFIG_H)\"" \
-I$(SPRESENSE_CURDIR)/wrapper_include
# Load application for Tensorflow lite micro in Spresense
ifneq ($(SPRESENSE_APP_TFMAKE),)
ifeq ($(CONFIG_EXTERNALS_TENSORFLOW_EXAMPLE_NONE),y)
-include $(SPRESENSE_APP_TFMAKE)
endif
endif
endif
TARGET_ARCH := cortex-m4
TARGET_TOOLCHAIN_PREFIX := arm-none-eabi-
PLATFORM_FLAGS = \
$(SPRESENSE_PLATFORM_FLAGS) \
-DGEMMLOWP_ALLOW_SLOW_SCALAR_FALLBACK \
-DTF_LITE_STATIC_MEMORY \
-DTF_LITE_MCU_DEBUG_LOG \
-fmessage-length=0 \
-fno-exceptions \
-fno-unwind-tables \
-ffunction-sections \
-fdata-sections \
-funsigned-char \
-MMD \
-mcpu=cortex-m4 \
-mabi=aapcs \
-mthumb \
-mfpu=fpv4-sp-d16 \
-mfloat-abi=hard \
-Wall \
-Wextra \
-Wno-shadow \
-Wno-vla \
-Wno-strict-aliasing \
-Wno-type-limits \
-Wno-unused-parameter \
-Wno-missing-field-initializers \
-Wno-write-strings \
-Wno-sign-compare \
-Wunused-function \
-fno-delete-null-pointer-checks \
-fomit-frame-pointer \
-Os
CXXFLAGS := $(TMP_CXXFLAGS) $(PLATFORM_FLAGS) -std=gnu++11 -fno-rtti -fno-use-cxa-atexit
CCFLAGS := $(TMP_CCFLAGS) $(PLATFORM_FLAGS)
BUILD_TYPE := micro
INCLUDES += -isystem$(MAKEFILE_DIR)/downloads/cmsis/CMSIS/Core/Include/
THIRD_PARTY_CC_SRCS := \
$(THIRD_PARTY_CC_SRCS) \
$(MAKEFILE_DIR)/../../spresense/compiler_specific.cc \
# TODO: Now Spresense environment is not support tests.
# So remove every tests.
MICROLITE_TEST_SRCS :=
MICRO_LITE_EXAMPLE_TESTS :=
# These are microcontroller-specific rules for converting the ELF output
# of the linker into a binary image that can be loaded directly.
OBJCOPY := $(TARGET_TOOLCHAIN_PREFIX)objcopy
$(BINDIR)/%.bin: $(BINDIR)/%
@mkdir -p $(dir $@)
$(OBJCOPY) $< $@ -O binary