From d46acc6016666abfc27d28e35936ed70979c822e Mon Sep 17 00:00:00 2001 From: "902449@58880@bigcat_chen" Date: Wed, 29 Apr 2020 15:05:06 +0800 Subject: [PATCH 01/11] debug_message --- tensorflow/lite/micro/we_i/debug_log.cc | 33 +++++++++++++++++++++++++ 1 file changed, 33 insertions(+) create mode 100644 tensorflow/lite/micro/we_i/debug_log.cc diff --git a/tensorflow/lite/micro/we_i/debug_log.cc b/tensorflow/lite/micro/we_i/debug_log.cc new file mode 100644 index 00000000000..a115d476aff --- /dev/null +++ b/tensorflow/lite/micro/we_i/debug_log.cc @@ -0,0 +1,33 @@ +/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +// Implementation for the DebugLog() function that prints to the UART on the +// SparkFun Edge microcontroller. The same should work for other targets using +// the Ambiq Apollo 3. + +#include "tensorflow/lite/micro/debug_log.h" +#include "xprintf.h" +#include "console_io.h" +#include + +extern "C" void DebugLog(const char* s) { + static bool is_initialized = false; + if (!is_initialized) { + xprintf_setup(); + is_initialized = true; + } + + xprintf("%s", s); +} From 521b7595b7adba5627a2687befc3fb41bc5c2bec Mon Sep 17 00:00:00 2001 From: "902449@58880@bigcat_chen@ASIC" Date: Thu, 4 Jun 2020 17:19:31 +0800 Subject: [PATCH 02/11] TFLM: add HIMAX WE1 EVB to support TFLM example(hello word and person detection INT8) --- .../lite/micro/examples/hello_world/README.md | 132 +++- .../micro/examples/hello_world/README.md~ | 595 ++++++++++++++++++ .../hello_world/himax_we1_evb/constants.cc | 19 + .../himax_we1_evb/output_handler.cc | 35 ++ .../himax_we1_evb/output_handler.cc~ | 53 ++ .../person_detection_experimental/README.md | 129 +++- .../himax_we1_evb/detection_responder.cc | 34 + .../himax_we1_evb/image_provider.cc | 44 ++ .../himax_we1_evb/image_provider.cc~ | 44 ++ .../himax_we1_evb/main_functions.cc | 127 ++++ .../{we_i => himax_we1_evb}/debug_log.cc | 9 +- .../make/targets/himax_we1_evb_makefile.inc | 91 +++ .../make/targets/himax_we1_evb_makefile.inc~ | 93 +++ .../tools/make/third_party_downloads.inc | 4 + .../tools/make/third_party_downloads.inc~ | 86 +++ 15 files changed, 1476 insertions(+), 19 deletions(-) create mode 100644 tensorflow/lite/micro/examples/hello_world/README.md~ create mode 100644 tensorflow/lite/micro/examples/hello_world/himax_we1_evb/constants.cc create mode 100644 tensorflow/lite/micro/examples/hello_world/himax_we1_evb/output_handler.cc create mode 100644 tensorflow/lite/micro/examples/hello_world/himax_we1_evb/output_handler.cc~ create mode 100644 tensorflow/lite/micro/examples/person_detection_experimental/himax_we1_evb/detection_responder.cc create mode 100644 tensorflow/lite/micro/examples/person_detection_experimental/himax_we1_evb/image_provider.cc create mode 100644 tensorflow/lite/micro/examples/person_detection_experimental/himax_we1_evb/image_provider.cc~ create mode 100644 tensorflow/lite/micro/examples/person_detection_experimental/himax_we1_evb/main_functions.cc rename tensorflow/lite/micro/{we_i => himax_we1_evb}/debug_log.cc (90%) create mode 100644 tensorflow/lite/micro/tools/make/targets/himax_we1_evb_makefile.inc create mode 100644 tensorflow/lite/micro/tools/make/targets/himax_we1_evb_makefile.inc~ create mode 100644 tensorflow/lite/micro/tools/make/third_party_downloads.inc~ diff --git a/tensorflow/lite/micro/examples/hello_world/README.md b/tensorflow/lite/micro/examples/hello_world/README.md index 3b633890306..9c0a5e2306a 100644 --- a/tensorflow/lite/micro/examples/hello_world/README.md +++ b/tensorflow/lite/micro/examples/hello_world/README.md @@ -14,13 +14,34 @@ of the device. ## Table of contents -- [Deploy to ARC EM SDP](#deploy-to-arc-em-sdp) -- [Deploy to Arduino](#deploy-to-arduino) -- [Deploy to ESP32](#deploy-to-esp32) -- [Deploy to SparkFun Edge](#deploy-to-sparkfun-edge) -- [Deploy to STM32F746](#deploy-to-STM32F746) -- [Run the tests on a development machine](#run-the-tests-on-a-development-machine) -- [Train your own model](#train-your-own-model) +- [Hello World Example](#hello-world-example) + - [Table of contents](#table-of-contents) + - [Deploy to ARC EM SDP](#deploy-to-arc-em-sdp) + - [Initial Setup](#initial-setup) + - [Generate Example Project](#generate-example-project) + - [Build and Run Example](#build-and-run-example) + - [Deploy to Arduino](#deploy-to-arduino) + - [Install the Arduino_TensorFlowLite library](#install-the-arduinotensorflowlite-library) + - [Load and run the example](#load-and-run-the-example) + - [Deploy to ESP32](#deploy-to-esp32) + - [Install the ESP IDF](#install-the-esp-idf) + - [Generate the examples](#generate-the-examples) + - [Building the example](#building-the-example) + - [Load and run the example](#load-and-run-the-example-1) + - [Deploy to himax WE1 EVB](#deploy-to-himax-we1-evb) + - [Initial Setup](#initial-setup-1) + - [MetaWare Development Toolkit](#metaware-development-toolkit) + - [Make Tool version](#make-tool-version) + - [Serial Terminal Emulation Application](#serial-terminal-emulation-application) + - [Generate Example Project](#generate-example-project-1) + - [Build and Burn Example](#build-and-burn-example) + - [Deploy to SparkFun Edge](#deploy-to-sparkfun-edge) + - [Compile the binary](#compile-the-binary) + - [Sign the binary](#sign-the-binary) + - [Flash the binary](#flash-the-binary) + - [Deploy to STM32F746](#deploy-to-stm32f746) + - [Run the tests on a development machine](#run-the-tests-on-a-development-machine) + - [Train your own model](#train-your-own-model) ## Deploy to ARC EM SDP @@ -191,6 +212,103 @@ The previous two commands can be combined: idf.py --port /dev/ttyUSB0 flash monitor ``` +## Deploy to himax WE1 EVB + +The following instructions will help you build and deploy this example to +[HIMAX WE1 EVB](https://github.com/HimaxWiseEyePlus/bsp_tflu/tree/master/HIMAX_WE1_EVB_board_brief) +board. To undstand more about using this board, please check +[HIMAX WE1 EVB user guide](https://github.com/HimaxWiseEyePlus/bsp_tflu/tree/master/HIMAX_WE1_EVB_user_guide). + +### Initial Setup + +To use the HIMAX WE1 EVB, please make sure following software are installed: + +#### MetaWare Development Toolkit + +See +[Install the Synopsys DesignWare ARC MetaWare Development Toolkit](/tensorflow/lite/micro/tools/make/targets/arc/README.md#install-the-synopsys-designware-arc-metaware-development-toolkit) +section for instructions on toolchain installation. + +#### Make Tool version + +A `'make'` tool is required for deploying Tensorflow Lite Micro +applications on HIMAX WE1 EVB, See +[Check make tool version](/tensorflow/lite/micro/tools/make/targets/arc/README.md#make-tool) +section for proper environment. + +#### Serial Terminal Emulation Application + +There are 2 main purposes for HIMAX WE1 EVB Debug UART port + +- print application output +- burn application to flash by using xmodem send application binary + +You can use any terminal emulation program (like [PuTTY](https://www.putty.org/) or [minicom](https://linux.die.net/man/1/minicom)). + + +### Generate Example Project + +The example project for HIMAX WE1 EVB platform can be generated with the following +command: + +Download related third party data + +``` +make -f tensorflow/lite/micro/tools/make/Makefile TARGET=himax_we1_evb third_party_downloads +``` + +Generate hello world project + +``` +make -f tensorflow/lite/micro/tools/make/Makefile generate_hello_world_make_project TARGET=himax_we1_evb TAGS=no_arc_mli +``` + +### Build and Burn Example + +Following the Steps to run hello world example at HIMAX WE1 EVB platform. + +1. Go to the generated example project directory. + + ``` + cd tensorflow/lite/micro/tools/make/gen/himax_we1_evb_arc/prj/hello_world/make + ``` + +2. Build the example using + + ``` + make app + ``` + +3. After example build finish, copy ELF file and map file to image generate tool directory. + image generate tool directory located at `'tensorflow/lite/micro/tools/make/downloads/himax_we1_sdk/image_gen_linux_v3/'` + + ``` + cp hello_world.elf himax_we1_evb.map ../../../../../downloads/himax_we1_sdk/image_gen_linux_v3/ + ``` + +4. Go to flash image generate tool directory. + + ``` + cd ../../../../../downloads/himax_we1_sdk/image_gen_linux_v3/ + ``` + +5. run image generate tool, generate flash image file. + + * Before running image generate tool, by typing `sudo chmod +x image_gen` + and `sudo chmod +x sign_tool` to make sure it is executable. + + ``` + image_gen -e hello_world.elf -m himax_we1_evb.map -o out.img + ``` + + +6. Download flash image file to HIMAX WE1 EVB by UART: + + * more detail about download image through UART can be found at [HIMAX WE1 EVB update Flash image](https://github.com/HimaxWiseEyePlus/bsp_tflu/tree/master/HIMAX_WE1_EVB_user_guide#flash-image-update) + +After these steps, press reset button on the HIMAX WE1 EVB, you will see application output in the serial +terminal. + ## Deploy to SparkFun Edge The following instructions will help you build and deploy this sample on the diff --git a/tensorflow/lite/micro/examples/hello_world/README.md~ b/tensorflow/lite/micro/examples/hello_world/README.md~ new file mode 100644 index 00000000000..011711493d5 --- /dev/null +++ b/tensorflow/lite/micro/examples/hello_world/README.md~ @@ -0,0 +1,595 @@ +# Hello World Example + +This example is designed to demonstrate the absolute basics of using [TensorFlow +Lite for Microcontrollers](https://www.tensorflow.org/lite/microcontrollers). +It includes the full end-to-end workflow of training a model, converting it for +use with TensorFlow Lite for Microcontrollers for running inference on a +microcontroller. + +The model is trained to replicate a `sine` function and generates a pattern of +data to either blink LEDs or control an animation, depending on the capabilities +of the device. + +![Animation on STM32F746](images/animation_on_STM32F746.gif) + +## Table of contents + +- [Hello World Example](#hello-world-example) + - [Table of contents](#table-of-contents) + - [Deploy to ARC EM SDP](#deploy-to-arc-em-sdp) + - [Initial Setup](#initial-setup) + - [Generate Example Project](#generate-example-project) + - [Build and Run Example](#build-and-run-example) + - [Deploy to Arduino](#deploy-to-arduino) + - [Install the Arduino_TensorFlowLite library](#install-the-arduinotensorflowlite-library) + - [Load and run the example](#load-and-run-the-example) + - [Deploy to ESP32](#deploy-to-esp32) + - [Install the ESP IDF](#install-the-esp-idf) + - [Generate the examples](#generate-the-examples) + - [Building the example](#building-the-example) + - [Load and run the example](#load-and-run-the-example-1) + - [Deploy to himax WE1 EVB](#deploy-to-himax-we1-evb) + - [Initial Setup](#initial-setup-1) + - [MetaWare Development Toolkit](#metaware-development-toolkit) + - [Make Tool version](#make-tool-version) + - [Serial Terminal Emulation Application](#serial-terminal-emulation-application) + - [Generate Example Project](#generate-example-project-1) + - [Build and Burn Example](#build-and-burn-example) + - [Deploy to SparkFun Edge](#deploy-to-sparkfun-edge) + - [Compile the binary](#compile-the-binary) + - [Sign the binary](#sign-the-binary) + - [Flash the binary](#flash-the-binary) + - [Deploy to STM32F746](#deploy-to-stm32f746) + - [Run the tests on a development machine](#run-the-tests-on-a-development-machine) + - [Train your own model](#train-your-own-model) + +## Deploy to ARC EM SDP + +The following instructions will help you to build and deploy this example to +[ARC EM SDP](https://www.synopsys.com/dw/ipdir.php?ds=arc-em-software-development-platform) +board. General information and instructions on using the board with TensorFlow +Lite Micro can be found in the common +[ARC targets description](/tensorflow/lite/micro/tools/make/targets/arc/README.md). + +### Initial Setup + +Follow the instructions on the +[ARC EM SDP Initial Setup](/tensorflow/lite/micro/tools/make/targets/arc/README.md#ARC-EM-Software-Development-Platform-ARC-EM-SDP) +to get and install all required tools for work with ARC EM SDP. + +### Generate Example Project + +The example project for ARC EM SDP platform can be generated with the following +command: + +``` +make -f tensorflow/lite/micro/tools/make/Makefile TARGET=arc_emsdp TAGS=no_arc_mli generate_hello_world_make_project +``` + +### Build and Run Example + +For more detailed information on building and running examples see the +appropriate sections of general descriptions of the +[ARC EM SDP usage with TFLM](/tensorflow/lite/micro/tools/make/targets/arc/README.md#ARC-EM-Software-Development-Platform-ARC-EM-SDP). +In the directory with generated project you can also find a +*README_ARC_EMSDP.md* file with instructions and options on building and +running. Here we only briefly mention main steps which are typically enough to +get it started. + +1. You need to + [connect the board](/tensorflow/lite/micro/tools/make/targets/arc/README.md#connect-the-board) + and open an serial connection. + +2. Go to the generated example project director + + ``` + cd tensorflow/lite/micro/tools/make/gen/arc_emsdp_arc/prj/hello_world/make + ``` + +3. Build the example using + + ``` + make app + ``` + +4. To generate artefacts for self-boot of example from the board use + + ``` + make flash + ``` + +5. To run application from the board using microSD card: + + * Copy the content of the created /bin folder into the root of microSD + card. Note that the card must be formatted as FAT32 with default cluster + size (but less than 32 Kbytes) + * Plug in the microSD card into the J11 connector. + * Push the RST button. If a red LED is lit beside RST button, push the CFG + button. + +6. If you have the MetaWare Debugger installed in your environment: + + * To run application from the console using it type `make run`. + * To stop the execution type `Ctrl+C` in the console several times. + +In both cases (step 5 and 6) you will see the application output in the serial +terminal. + +## Deploy to Arduino + +The following instructions will help you build and deploy this sample +to [Arduino](https://www.arduino.cc/) devices. + +![Animation on Arduino MKRZERO](images/animation_on_arduino_mkrzero.gif) + +The sample has been tested with the following devices: + +- [Arduino Nano 33 BLE Sense](https://store.arduino.cc/usa/nano-33-ble-sense-with-headers) +- [Arduino MKRZERO](https://store.arduino.cc/usa/arduino-mkrzero) + +The sample will use PWM to fade an LED on and off according to the model's +output. In the code, the `LED_BUILTIN` constant is used to specify the board's +built-in LED as the one being controlled. However, on some boards, this built-in +LED is not attached to a pin with PWM capabilities. In this case, the LED will +blink instead of fading. + +### Install the Arduino_TensorFlowLite library + +This example application is included as part of the official TensorFlow Lite +Arduino library. To install it, open the Arduino library manager in +`Tools -> Manage Libraries...` and search for `Arduino_TensorFlowLite`. + +### Load and run the example + +Once the library has been added, go to `File -> Examples`. You should see an +example near the bottom of the list named `TensorFlowLite:hello_world`. Select +it and click `hello_world` to load the example. + +Use the Arduino IDE to build and upload the example. Once it is running, +you should see the built-in LED on your device flashing. + +The Arduino Desktop IDE includes a plotter that we can use to display the sine +wave graphically. To view it, go to `Tools -> Serial Plotter`. You will see one +datapoint being logged for each inference cycle, expressed as a number between 0 +and 255. + +## Deploy to ESP32 + +The following instructions will help you build and deploy this sample +to [ESP32](https://www.espressif.com/en/products/hardware/esp32/overview) +devices using the [ESP IDF](https://github.com/espressif/esp-idf). + +The sample has been tested on ESP-IDF version 4.0 with the following devices: +- [ESP32-DevKitC](http://esp-idf.readthedocs.io/en/latest/get-started/get-started-devkitc.html) +- [ESP-EYE](https://github.com/espressif/esp-who/blob/master/docs/en/get-started/ESP-EYE_Getting_Started_Guide.md) + +### Install the ESP IDF + +Follow the instructions of the +[ESP-IDF get started guide](https://docs.espressif.com/projects/esp-idf/en/latest/get-started/index.html) +to setup the toolchain and the ESP-IDF itself. + +The next steps assume that the +[IDF environment variables are set](https://docs.espressif.com/projects/esp-idf/en/latest/get-started/index.html#step-4-set-up-the-environment-variables) : + + * The `IDF_PATH` environment variable is set + * `idf.py` and Xtensa-esp32 tools (e.g. `xtensa-esp32-elf-gcc`) are in `$PATH` + +### Generate the examples +The example project can be generated with the following command: +``` +make -f tensorflow/lite/micro/tools/make/Makefile TARGET=esp generate_hello_world_esp_project +``` + +### Building the example + +Go the the example project directory +``` +cd tensorflow/lite/micro/tools/make/gen/esp_xtensa-esp32/prj/hello_world/esp-idf +``` + +Then build with `idf.py` +``` +idf.py build +``` + +### Load and run the example + +To flash (replace `/dev/ttyUSB0` with the device serial port): +``` +idf.py --port /dev/ttyUSB0 flash +``` + +Monitor the serial output: +``` +idf.py --port /dev/ttyUSB0 monitor +``` + +Use `Ctrl+]` to exit. + +The previous two commands can be combined: +``` +idf.py --port /dev/ttyUSB0 flash monitor +``` + +## Deploy to himax WE1 EVB + +The following instructions will help you build and deploy this example to +[HIMAX WE1 EVB](https://github.com/HimaxWiseEyePlus/bsp_tflu/tree/master/HIMAX_WE1_EVB_board_brief) +board. To undstand more about using this board, please check +[HIMAX WE1 EVB user guide](https://github.com/HimaxWiseEyePlus/bsp_tflu/tree/master/HIMAX_WE1_EVB_user_guide). + +### Initial Setup + +To use the HIMAX WE1 EVB, please make sure following software are installed: + +#### MetaWare Development Toolkit + +See +[Install the Synopsys DesignWare ARC MetaWare Development Toolkit](/tensorflow/lite/micro/tools/make/targets/arc/README.md#install-the-synopsys-designware-arc-metaware-development-toolkit) +section for instructions on toolchain installation. + +#### Make Tool version + +A `'make'` tool is required for deploying Tensorflow Lite Micro +applications on HIMAX WE1 EVB, See +[Check make tool version](/tensorflow/lite/micro/tools/make/targets/arc/README.md#make-tool) +section for proper environment. + +#### Serial Terminal Emulation Application + +There are 2 main purposes for HIMAX WE1 EVB Debug UART port + +- print application output +- burn application to flash by using xmodem send application binary + +You can use any terminal emulation program (like [PuTTY](https://www.putty.org/) or [minicom](https://linux.die.net/man/1/minicom)). + + +### Generate Example Project + +The example project for HIMAX WE1 EVB platform can be generated with the following +command: + +Download related third party data + +``` +make -f tensorflow/lite/micro/tools/make/Makefile TARGET=himax_we1_evb third_party_downloads +``` + +Generate hello world project + +``` +make -f tensorflow/lite/micro/tools/make/Makefile generate_hello_world_make_project TARGET=himax_we1_evb +``` + +### Build and Burn Example + +Following the Steps to run hello world example at HIMAX WE1 EVB platform. + +1. Go to the generated example project directory. + + ``` + cd tensorflow/lite/micro/tools/make/gen/himax_we1_evb_arc/prj/hello_world/make + ``` + +2. Build the example using + + ``` + make app + ``` + +3. After example build finish, copy ELF file and map file to image generate tool directory. + image generate tool directory located at `'tensorflow/lite/micro/tools/make/downloads/himax_we1_sdk/image_gen_linux_v3/'` + + ``` + cp hello_world.elf himax_we1_evb.map ../../../../../downloads/himax_we1_sdk/image_gen_linux_v3/ + ``` + +4. Go to flash image generate tool directory. + + ``` + cd ../../../../../downloads/himax_we1_sdk/image_gen_linux_v3/ + ``` + +5. run image generate tool, generate flash image file. + + * Before running image generate tool, by typing `sudo chmod +x image_gen` + and `sudo chmod +x sign_tool` to make sure it is executable. + + ``` + image_gen -e hello_world.elf -m himax_we1_evb.map -o out.img + ``` + + +6. Download flash image file to HIMAX WE1 EVB by UART: + + * more detail about download image through UART can be found at [HIMAX WE1 EVB update Flash image](https://github.com/HimaxWiseEyePlus/bsp_tflu/tree/master/HIMAX_WE1_EVB_user_guide#flash-image-update) + +After these steps, press reset button on the HIMAX WE1 EVB, you will see application output in the serial +terminal. + +## Deploy to SparkFun Edge + +The following instructions will help you build and deploy this sample on the +[SparkFun Edge development board](https://sparkfun.com/products/15170). + +![Animation on SparkFun Edge](images/animation_on_sparkfun_edge.gif) + +If you're new to using this board, we recommend walking through the +[AI on a microcontroller with TensorFlow Lite and SparkFun Edge](https://codelabs.developers.google.com/codelabs/sparkfun-tensorflow) +codelab to get an understanding of the workflow. + +### Compile the binary + +The following command will download the required dependencies and then compile a +binary for the SparkFun Edge: + +``` +make -f tensorflow/lite/micro/tools/make/Makefile TARGET=sparkfun_edge hello_world_bin +``` + +The binary will be created in the following location: + +``` +tensorflow/lite/micro/tools/make/gen/sparkfun_edge_cortex-m4/bin/hello_world.bin +``` + +### Sign the binary + +The binary must be signed with cryptographic keys to be deployed to the device. +We'll now run some commands that will sign our binary so it can be flashed to +the SparkFun Edge. The scripts we are using come from the Ambiq SDK, which is +downloaded when the `Makefile` is run. + +Enter the following command to set up some dummy cryptographic keys we can use +for development: + +``` +cp tensorflow/lite/micro/tools/make/downloads/AmbiqSuite-Rel2.2.0/tools/apollo3_scripts/keys_info0.py \ +tensorflow/lite/micro/tools/make/downloads/AmbiqSuite-Rel2.2.0/tools/apollo3_scripts/keys_info.py +``` + +Next, run the following command to create a signed binary: + +``` +python3 tensorflow/lite/micro/tools/make/downloads/AmbiqSuite-Rel2.2.0/tools/apollo3_scripts/create_cust_image_blob.py \ +--bin tensorflow/lite/micro/tools/make/gen/sparkfun_edge_cortex-m4/bin/hello_world.bin \ +--load-address 0xC000 \ +--magic-num 0xCB \ +-o main_nonsecure_ota \ +--version 0x0 +``` + +This will create the file `main_nonsecure_ota.bin`. We'll now run another +command to create a final version of the file that can be used to flash our +device with the bootloader script we will use in the next step: + +``` +python3 tensorflow/lite/micro/tools/make/downloads/AmbiqSuite-Rel2.2.0/tools/apollo3_scripts/create_cust_wireupdate_blob.py \ +--load-address 0x20000 \ +--bin main_nonsecure_ota.bin \ +-i 6 \ +-o main_nonsecure_wire \ +--options 0x1 +``` + +You should now have a file called `main_nonsecure_wire.bin` in the directory +where you ran the commands. This is the file we'll be flashing to the device. + +### Flash the binary + +Next, attach the board to your computer via a USB-to-serial adapter. + +**Note:** If you're using the [SparkFun Serial Basic Breakout](https://www.sparkfun.com/products/15096), +you should [install the latest drivers](https://learn.sparkfun.com/tutorials/sparkfun-serial-basic-ch340c-hookup-guide#drivers-if-you-need-them) +before you continue. + +Once connected, assign the USB device name to an environment variable: + +``` +export DEVICENAME=put your device name here +``` + +Set another variable with the baud rate: + +``` +export BAUD_RATE=921600 +``` + +Now, hold the button marked `14` on the device. While still holding the button, +hit the button marked `RST`. Continue holding the button marked `14` while +running the following command: + +``` +python3 tensorflow/lite/micro/tools/make/downloads/AmbiqSuite-Rel2.2.0/tools/apollo3_scripts/uart_wired_update.py \ +-b ${BAUD_RATE} ${DEVICENAME} \ +-r 1 \ +-f main_nonsecure_wire.bin \ +-i 6 +``` + +You should see a long stream of output as the binary is flashed to the device. +Once you see the following lines, flashing is complete: + +``` +Sending Reset Command. +Done. +``` + +If you don't see these lines, flashing may have failed. Try running through the +steps in [Flash the binary](#flash-the-binary) again (you can skip over setting +the environment variables). If you continue to run into problems, follow the +[AI on a microcontroller with TensorFlow Lite and SparkFun Edge](https://codelabs.developers.google.com/codelabs/sparkfun-tensorflow) +codelab, which includes more comprehensive instructions for the flashing +process. + +The binary should now be deployed to the device. Hit the button marked `RST` to +reboot the board. You should see the device's four LEDs flashing in sequence. + +Debug information is logged by the board while the program is running. To view +it, establish a serial connection to the board using a baud rate of `115200`. +On OSX and Linux, the following command should work: + +``` +screen ${DEVICENAME} 115200 +``` + +You will see a lot of output flying past! To stop the scrolling, hit `Ctrl+A`, +immediately followed by `Esc`. You can then use the arrow keys to explore the +output, which will contain the results of running inference on various `x` +values: + +``` +x_value: 1.1843798*2^2, y_value: -1.9542645*2^-1 +``` + +To stop viewing the debug output with `screen`, hit `Ctrl+A`, immediately +followed by the `K` key, then hit the `Y` key. + + +## Deploy to STM32F746 + +The following instructions will help you build and deploy the sample to the +[STM32F7 discovery kit](https://os.mbed.com/platforms/ST-Discovery-F746NG/) +using [ARM Mbed](https://github.com/ARMmbed/mbed-cli). + +![Animation on STM32F746](images/animation_on_STM32F746.gif) + +Before we begin, you'll need the following: + +- STM32F7 discovery kit board +- Mini-USB cable +- ARM Mbed CLI ([installation instructions](https://os.mbed.com/docs/mbed-os/v5.12/tools/installation-and-setup.html)) +- Python 2.7 and pip + +Since Mbed requires a special folder structure for projects, we'll first run a +command to generate a subfolder containing the required source files in this +structure: + +``` +make -f tensorflow/lite/micro/tools/make/Makefile TARGET=mbed TAGS="CMSIS disco_f746ng" generate_hello_world_mbed_project +``` + +This will result in the creation of a new folder: + +``` +tensorflow/lite/micro/tools/make/gen/mbed_cortex-m4/prj/hello_world/mbed +``` + +This folder contains all of the example's dependencies structured in the correct +way for Mbed to be able to build it. + +Change into the directory and run the following commands, making sure you are +using Python 2.7.15. + +First, tell Mbed that the current directory is the root of an Mbed project: + +``` +mbed config root . +``` + +Next, tell Mbed to download the dependencies and prepare to build: + +``` +mbed deploy +``` + +By default, Mbed will build the project using C++98. However, TensorFlow Lite +requires C++11. Run the following Python snippet to modify the Mbed +configuration files so that it uses C++11: + +``` +python -c 'import fileinput, glob; +for filename in glob.glob("mbed-os/tools/profiles/*.json"): + for line in fileinput.input(filename, inplace=True): + print line.replace("\"-std=gnu++98\"","\"-std=c++11\", \"-fpermissive\"")' + +``` + +Finally, run the following command to compile: + +``` +mbed compile -m DISCO_F746NG -t GCC_ARM +``` + +This should result in a binary at the following path: + +``` +./BUILD/DISCO_F746NG/GCC_ARM/mbed.bin +``` + +To deploy, plug in your STM board and copy the file to it. On MacOS, you can do +this with the following command: + +``` +cp ./BUILD/DISCO_F746NG/GCC_ARM/mbed.bin /Volumes/DIS_F746NG/ +``` + +Copying the file will initiate the flashing process. Once this is complete, you +should see an animation on the device's screen. + + +``` +screen /dev/tty.usbmodem14403 9600 +``` + +In addition to this animation, debug information is logged by the board while +the program is running. To view it, establish a serial connection to the board +using a baud rate of `9600`. On OSX and Linux, the following command should +work, replacing `/dev/tty.devicename` with the name of your device as it appears +in `/dev`: + +``` +screen /dev/tty.devicename 9600 +``` + +You will see a lot of output flying past! To stop the scrolling, hit `Ctrl+A`, +immediately followed by `Esc`. You can then use the arrow keys to explore the +output, which will contain the results of running inference on various `x` +values: + +``` +x_value: 1.1843798*2^2, y_value: -1.9542645*2^-1 +``` + +To stop viewing the debug output with `screen`, hit `Ctrl+A`, immediately +followed by the `K` key, then hit the `Y` key. + +### Run the tests on a development machine + +To compile and test this example on a desktop Linux or macOS machine, first +clone the TensorFlow repository from GitHub to a convenient place: + +```bash +git clone --depth 1 https://github.com/tensorflow/tensorflow.git +``` + +Next, `cd` into the source directory from a terminal, and then run the following +command: + +```bash +make -f tensorflow/lite/micro/tools/make/Makefile test_hello_world_test +``` + +This will take a few minutes, and downloads frameworks the code uses. Once the +process has finished, you should see a series of files get compiled, followed by +some logging output from a test, which should conclude with +`~~~ALL TESTS PASSED~~~`. + +If you see this, it means that a small program has been built and run that loads +the trained TensorFlow model, runs some example inputs through it, and got the +expected outputs. + +To understand how TensorFlow Lite does this, you can look at the source in +[hello_world_test.cc](hello_world_test.cc). +It's a fairly small amount of code that creates an interpreter, gets a handle to +a model that's been compiled into the program, and then invokes the interpreter +with the model and sample inputs. + +### Train your own model + +So far you have used an existing trained model to run inference on +microcontrollers. If you wish to train your own model, follow the instructions +given in the [train/](train/) directory. + diff --git a/tensorflow/lite/micro/examples/hello_world/himax_we1_evb/constants.cc b/tensorflow/lite/micro/examples/hello_world/himax_we1_evb/constants.cc new file mode 100644 index 00000000000..1816a2f3207 --- /dev/null +++ b/tensorflow/lite/micro/examples/hello_world/himax_we1_evb/constants.cc @@ -0,0 +1,19 @@ +/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "tensorflow/lite/micro/examples/hello_world/constants.h" + +// This is tuned so that a full cycle takes ~4 seconds on a SparkFun Edge. +const int kInferencesPerCycle = 1000; diff --git a/tensorflow/lite/micro/examples/hello_world/himax_we1_evb/output_handler.cc b/tensorflow/lite/micro/examples/hello_world/himax_we1_evb/output_handler.cc new file mode 100644 index 00000000000..8ca028acc55 --- /dev/null +++ b/tensorflow/lite/micro/examples/hello_world/himax_we1_evb/output_handler.cc @@ -0,0 +1,35 @@ +/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "tensorflow/lite/micro/examples/hello_world/output_handler.h" + + +/* +This function trigger different device's LEDthrough y value. +y value range -1 <= y <= 1. +| Range is from -1~1 | LEDs | +| 0 <= y <= 1 | [ 0 1 ] | +| -1 <= y < 0 | [ 1 0 ] | + +*/ +void HandleOutput(tflite::ErrorReporter* error_reporter, float x_value, + float y_value) { + // The first time this method runs, set up our LEDs correctly + + // Log the current X and Y values + TF_LITE_REPORT_ERROR(error_reporter, "x_value: %f, y_value: %f\n", + static_cast(x_value), + static_cast(y_value)); +} diff --git a/tensorflow/lite/micro/examples/hello_world/himax_we1_evb/output_handler.cc~ b/tensorflow/lite/micro/examples/hello_world/himax_we1_evb/output_handler.cc~ new file mode 100644 index 00000000000..b59242d0b6f --- /dev/null +++ b/tensorflow/lite/micro/examples/hello_world/himax_we1_evb/output_handler.cc~ @@ -0,0 +1,53 @@ +/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "tensorflow/lite/micro/examples/hello_world/output_handler.h" + + +/* +This function trigger different device's LEDthrough y value. +y value range -1 <= y <= 1. +| Range is from -1~1 | LEDs | +| 0 <= y <= 1 | [ 0 1 ] | +| -1 <= y < 0 | [ 1 0 ] | + +*/ +void HandleOutput(tflite::ErrorReporter* error_reporter, float x_value, + float y_value) { + // The first time this method runs, set up our LEDs correctly +/* static bool is_initialized = false; + if (!is_initialized) { + // TODO Setup LED's as outputs + + // end of setup + is_initialized = true; + } + + // Set the LEDs to represent negative values + if (y_value < 0) { + //enable LED1 + + //enable LED0 + } else if (y_value > 0) { + //enable LED0 + + //enable LED1 + } + */ + // Log the current X and Y values + TF_LITE_REPORT_ERROR(error_reporter, "x_value: %f, y_value: %f\n", + static_cast(x_value), + static_cast(y_value)); +} diff --git a/tensorflow/lite/micro/examples/person_detection_experimental/README.md b/tensorflow/lite/micro/examples/person_detection_experimental/README.md index bf99b40d776..4d53e551431 100644 --- a/tensorflow/lite/micro/examples/person_detection_experimental/README.md +++ b/tensorflow/lite/micro/examples/person_detection_experimental/README.md @@ -7,13 +7,31 @@ This uses the experimental int8 quantized version of the person detection model. ## Table of contents -- [Getting started](#getting-started) -- [Running on ARC EM SDP](#running-on-arc-em-sdp) -- [Running on Arduino](#running-on-arduino) -- [Running on SparkFun Edge](#running-on-sparkfun-edge) -- [Run the tests on a development machine](#run-the-tests-on-a-development-machine) -- [Debugging image capture](#debugging-image-capture) -- [Training your own model](#training-your-own-model) +- [Person detection example](#person-detection-example) + - [Table of contents](#table-of-contents) + - [Running on ARC EM SDP](#running-on-arc-em-sdp) + - [Initial setup](#initial-setup) + - [Generate Example Project](#generate-example-project) + - [Build and Run Example](#build-and-run-example) + - [Running on Arduino](#running-on-arduino) + - [Hardware](#hardware) + - [Install the Arduino_TensorFlowLite library](#install-the-arduinotensorflowlite-library) + - [Install other libraries](#install-other-libraries) + - [Load and run the example](#load-and-run-the-example) + - [Running on HIMAX WE1 EVB](#running-on-himax-we1-evb) + - [Initial Setup](#initial-setup) + - [MetaWare Development Toolkit](#metaware-development-toolkit) + - [Make Tool version](#make-tool-version) + - [Serial Terminal Emulation Application](#serial-terminal-emulation-application) + - [Generate Example Project](#generate-example-project-1) + - [Build and Burn Example](#build-and-burn-example) + - [Running on SparkFun Edge](#running-on-sparkfun-edge) + - [Compile the binary](#compile-the-binary) + - [Sign the binary](#sign-the-binary) + - [Flash the binary](#flash-the-binary) + - [Run the tests on a development machine](#run-the-tests-on-a-development-machine) + - [Debugging image capture](#debugging-image-capture) + - [Training your own model](#training-your-own-model) ## Running on ARC EM SDP @@ -260,6 +278,103 @@ From the log, we can see that it took around 170 ms to capture and read the image data from the camera module, 180 ms to decode the JPEG and convert it to greyscale, and 18.6 seconds to run inference. +## Running on HIMAX WE1 EVB + +The following instructions will help you build and deploy this example to +[HIMAX WE1 EVB](https://github.com/HimaxWiseEyePlus/bsp_tflu/tree/master/HIMAX_WE1_EVB_board_brief) +board. To undstand more about using this board, please check +[HIMAX WE1 EVB user guide](https://github.com/HimaxWiseEyePlus/bsp_tflu/tree/master/HIMAX_WE1_EVB_user_guide). + +### Initial Setup + +To use the HIMAX WE1 EVB, please make sure following software are installed: + +#### MetaWare Development Toolkit + +See +[Install the Synopsys DesignWare ARC MetaWare Development Toolkit](/tensorflow/lite/micro/tools/make/targets/arc/README.md#install-the-synopsys-designware-arc-metaware-development-toolkit) +section for instructions on toolchain installation. + +#### Make Tool version + +A `'make'` tool is required for deploying Tensorflow Lite Micro +applications on HIMAX WE1 EVB, See +[Check make tool version](/tensorflow/lite/micro/tools/make/targets/arc/README.md#make-tool) +section for proper environment. + +#### Serial Terminal Emulation Application + +There are 2 main purposes for HIMAX WE1 EVB Debug UART port + +- print application output +- burn application to flash by using xmodem send application binary + +You can use any terminal emulation program (like [PuTTY](https://www.putty.org/) or [minicom](https://linux.die.net/man/1/minicom)). + + +### Generate Example Project + +The example project for HIMAX WE1 EVB platform can be generated with the following +command: + +Download related third party data + +``` +make -f tensorflow/lite/micro/tools/make/Makefile TARGET=himax_we1_evb third_party_downloads +``` + +Generate person detection project + +``` +make -f tensorflow/lite/micro/tools/make/Makefile generate_person_detection_int8_make_project TARGET=himax_we1_evb +``` + +### Build and Burn Example + +Following the Steps to run person detection example at HIMAX WE1 EVB platform. + +1. Go to the generated example project directory. + + ``` + cd tensorflow/lite/micro/tools/make/gen/himax_we1_evb_arc/prj/person_detection_int8/make + ``` + +2. Build the example using + + ``` + make app + ``` + +3. After example build finish, copy ELF file and map file to image generate tool directory. + image generate tool directory located at `'tensorflow/lite/micro/tools/make/downloads/himax_we1_sdk/image_gen_linux_v3/'` + + ``` + cp person_detection_int8.elf himax_we1_evb.map ../../../../../downloads/himax_we1_sdk/image_gen_linux_v3/ + ``` + +4. Go to flash image generate tool directory. + + ``` + cd ../../../../../downloads/himax_we1_sdk/image_gen_linux_v3/ + ``` + +5. run image generate tool, generate flash image file. + + * Before running image generate tool, by typing `sudo chmod +x image_gen` + and `sudo chmod +x sign_tool` to make sure it is executable. + + ``` + image_gen -e person_detection_int8.elf -m himax_we1_evb.map -o out.img + ``` + + +6. Download flash image file to HIMAX WE1 EVB by UART: + + * more detail about download image through UART can be found at [HIMAX WE1 EVB update Flash image](https://github.com/HimaxWiseEyePlus/bsp_tflu/tree/master/HIMAX_WE1_EVB_user_guide#flash-image-update) + +After these steps, press reset button on the HIMAX WE1 EVB, you will see application output in the serial +terminal. + ## Running on SparkFun Edge The following instructions will help you build and deploy this sample on the diff --git a/tensorflow/lite/micro/examples/person_detection_experimental/himax_we1_evb/detection_responder.cc b/tensorflow/lite/micro/examples/person_detection_experimental/himax_we1_evb/detection_responder.cc new file mode 100644 index 00000000000..a353dc8a9b8 --- /dev/null +++ b/tensorflow/lite/micro/examples/person_detection_experimental/himax_we1_evb/detection_responder.cc @@ -0,0 +1,34 @@ +/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "tensorflow/lite/micro/examples/person_detection_experimental/detection_responder.h" + +#include "hx_drv_tflm.h" + +// This dummy implementation writes person and no person scores to the error +// console. Real applications will want to take some custom action instead, and +// should implement their own versions of this function. +void RespondToDetection(tflite::ErrorReporter* error_reporter, + int8_t person_score, int8_t no_person_score) { + + if (person_score > no_person_score) { + hx_drv_led_on(HX_DRV_LED_GREEN); + } else { + hx_drv_led_off(HX_DRV_LED_GREEN); + } + + TF_LITE_REPORT_ERROR(error_reporter, "person score:%d no person score %d", + person_score, no_person_score); +} diff --git a/tensorflow/lite/micro/examples/person_detection_experimental/himax_we1_evb/image_provider.cc b/tensorflow/lite/micro/examples/person_detection_experimental/himax_we1_evb/image_provider.cc new file mode 100644 index 00000000000..727d93c61d1 --- /dev/null +++ b/tensorflow/lite/micro/examples/person_detection_experimental/himax_we1_evb/image_provider.cc @@ -0,0 +1,44 @@ +/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "tensorflow/lite/micro/examples/person_detection_experimental/image_provider.h" + +#include "tensorflow/lite/micro/examples/person_detection_experimental/model_settings.h" + +#include "hx_drv_tflm.h" + +hx_drv_sensor_image_config_t g_pimg_config; + + +TfLiteStatus GetImage(tflite::ErrorReporter* error_reporter, int image_width, + int image_height, int channels, int8_t* image_data) { + static bool is_initialized = false; + + if (!is_initialized) { + if(hx_drv_sensor_initial(&g_pimg_config)!= HX_DRV_LIB_PASS) + { + return kTfLiteError; + } + is_initialized = true; + } + + hx_drv_sensor_capture(&g_pimg_config); + + hx_drv_image_rescale((uint8_t*)g_pimg_config.raw_address, g_pimg_config.img_width, g_pimg_config.img_height, + image_data, image_width, image_height); + + + return kTfLiteOk; +} diff --git a/tensorflow/lite/micro/examples/person_detection_experimental/himax_we1_evb/image_provider.cc~ b/tensorflow/lite/micro/examples/person_detection_experimental/himax_we1_evb/image_provider.cc~ new file mode 100644 index 00000000000..d5b4d136642 --- /dev/null +++ b/tensorflow/lite/micro/examples/person_detection_experimental/himax_we1_evb/image_provider.cc~ @@ -0,0 +1,44 @@ +/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "tensorflow/lite/micro/examples/person_detection_experimental/image_provider.h" + +#include "tensorflow/lite/micro/examples/person_detection_experimental/model_settings.h" + +#include "hx_drv_tflm.h" + +hx_drv_sensor_image_config_t g_pimg_config; + + +TfLiteStatus GetImage(tflite::ErrorReporter* error_reporter, int image_width, + int image_height, int channels, int8_t* image_data) { + static bool is_initialized = false; + + if (!is_initialized) { + if(hx_drv_sensor_initial(&g_pimg_config)!= HX_DRV_LIB_PASS) + { + return kTfLiteError; + } + is_initialized = true; + } + + hx_drv_sensor_capture(&g_pimg_config); + + hx_drv_image_rescale((uint8_t*)g_pimg_config.raw_address, g_pimg_config.img_width, g_pimg_config.img_height, + image_data, image_data, image_height); + + + return kTfLiteOk; +} diff --git a/tensorflow/lite/micro/examples/person_detection_experimental/himax_we1_evb/main_functions.cc b/tensorflow/lite/micro/examples/person_detection_experimental/himax_we1_evb/main_functions.cc new file mode 100644 index 00000000000..552b52c9c51 --- /dev/null +++ b/tensorflow/lite/micro/examples/person_detection_experimental/himax_we1_evb/main_functions.cc @@ -0,0 +1,127 @@ +/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "tensorflow/lite/micro/examples/person_detection_experimental/main_functions.h" + +#include "tensorflow/lite/micro/examples/person_detection_experimental/detection_responder.h" +#include "tensorflow/lite/micro/examples/person_detection_experimental/image_provider.h" +#include "tensorflow/lite/micro/examples/person_detection_experimental/model_settings.h" +#include "tensorflow/lite/micro/examples/person_detection_experimental/person_detect_model_data.h" +#include "tensorflow/lite/micro/kernels/micro_ops.h" +#include "tensorflow/lite/micro/micro_error_reporter.h" +#include "tensorflow/lite/micro/micro_interpreter.h" +#include "tensorflow/lite/micro/micro_mutable_op_resolver.h" +#include "tensorflow/lite/schema/schema_generated.h" +#include "tensorflow/lite/version.h" + +// Globals, used for compatibility with Arduino-style sketches. +namespace { +tflite::ErrorReporter* error_reporter = nullptr; +const tflite::Model* model = nullptr; +tflite::MicroInterpreter* interpreter = nullptr; +TfLiteTensor* input = nullptr; + +// In order to use optimized tensorflow lite kernels, a signed int8 quantized +// model is preferred over the legacy unsigned model format. This means that +// throughout this project, input images must be converted from unisgned to +// signed format. The easiest and quickest way to convert from unsigned to +// signed 8-bit integers is to subtract 128 from the unsigned value to get a +// signed value. + +// An area of memory to use for input, output, and intermediate arrays. +constexpr int kTensorArenaSize = 125 * 1024; +#pragma Bss(".tensor_arena") +static uint8_t tensor_arena[kTensorArenaSize]; +#pragma Bss() +} // namespace + +// The name of this function is important for Arduino compatibility. +void setup() { + // Set up logging. Google style is to avoid globals or statics because of + // lifetime uncertainty, but since this has a trivial destructor it's okay. + // NOLINTNEXTLINE(runtime-global-variables) + static tflite::MicroErrorReporter micro_error_reporter; + error_reporter = µ_error_reporter; + + // Map the model into a usable data structure. This doesn't involve any + // copying or parsing, it's a very lightweight operation. + model = tflite::GetModel(g_person_detect_model_data); + if (model->version() != TFLITE_SCHEMA_VERSION) { + TF_LITE_REPORT_ERROR(error_reporter, + "Model provided is schema version %d not equal " + "to supported version %d.", + model->version(), TFLITE_SCHEMA_VERSION); + return; + } + + // Pull in only the operation implementations we need. + // This relies on a complete list of all the ops needed by this graph. + // An easier approach is to just use the AllOpsResolver, but this will + // incur some penalty in code space for op implementations that are not + // needed by this graph. + // + // tflite::ops::micro::AllOpsResolver resolver; + // NOLINTNEXTLINE(runtime-global-variables) + static tflite::MicroOpResolver<12> micro_op_resolver; + micro_op_resolver.AddBuiltin(tflite::BuiltinOperator_DEPTHWISE_CONV_2D, + tflite::ops::micro::Register_DEPTHWISE_CONV_2D(), + 1, 3); + micro_op_resolver.AddBuiltin(tflite::BuiltinOperator_CONV_2D, + tflite::ops::micro::Register_CONV_2D(), 1, 3); + micro_op_resolver.AddBuiltin(tflite::BuiltinOperator_AVERAGE_POOL_2D, + tflite::ops::micro::Register_AVERAGE_POOL_2D(), + 1, 2); + micro_op_resolver.AddBuiltin(tflite::BuiltinOperator_RESHAPE, + tflite::ops::micro::Register_RESHAPE()); + micro_op_resolver.AddBuiltin(tflite::BuiltinOperator_SOFTMAX, + tflite::ops::micro::Register_SOFTMAX(), 1, 3); + + // Build an interpreter to run the model with. + // NOLINTNEXTLINE(runtime-global-variables) + static tflite::MicroInterpreter static_interpreter( + model, micro_op_resolver, tensor_arena, kTensorArenaSize, error_reporter); + interpreter = &static_interpreter; + + // Allocate memory from the tensor_arena for the model's tensors. + TfLiteStatus allocate_status = interpreter->AllocateTensors(); + if (allocate_status != kTfLiteOk) { + TF_LITE_REPORT_ERROR(error_reporter, "AllocateTensors() failed"); + return; + } + + // Get information about the memory area to use for the model's input. + input = interpreter->input(0); +} + +// The name of this function is important for Arduino compatibility. +void loop() { + // Get image from provider. + if (kTfLiteOk != GetImage(error_reporter, kNumCols, kNumRows, kNumChannels, + input->data.int8)) { + TF_LITE_REPORT_ERROR(error_reporter, "Image capture failed."); + } + + // Run the model on this input and make sure it succeeds. + if (kTfLiteOk != interpreter->Invoke()) { + TF_LITE_REPORT_ERROR(error_reporter, "Invoke failed."); + } + + TfLiteTensor* output = interpreter->output(0); + + // Process the inference results. + int8_t person_score = output->data.uint8[kPersonIndex]; + int8_t no_person_score = output->data.uint8[kNotAPersonIndex]; + RespondToDetection(error_reporter, person_score, no_person_score); +} diff --git a/tensorflow/lite/micro/we_i/debug_log.cc b/tensorflow/lite/micro/himax_we1_evb/debug_log.cc similarity index 90% rename from tensorflow/lite/micro/we_i/debug_log.cc rename to tensorflow/lite/micro/himax_we1_evb/debug_log.cc index a115d476aff..32af2625630 100644 --- a/tensorflow/lite/micro/we_i/debug_log.cc +++ b/tensorflow/lite/micro/himax_we1_evb/debug_log.cc @@ -18,16 +18,15 @@ limitations under the License. // the Ambiq Apollo 3. #include "tensorflow/lite/micro/debug_log.h" -#include "xprintf.h" -#include "console_io.h" -#include +#include "hx_drv_tflm.h" + extern "C" void DebugLog(const char* s) { static bool is_initialized = false; if (!is_initialized) { - xprintf_setup(); + hx_drv_uart_initial(); is_initialized = true; } - xprintf("%s", s); + hx_drv_uart_print("%s", s); } diff --git a/tensorflow/lite/micro/tools/make/targets/himax_we1_evb_makefile.inc b/tensorflow/lite/micro/tools/make/targets/himax_we1_evb_makefile.inc new file mode 100644 index 00000000000..60fc2e7cca1 --- /dev/null +++ b/tensorflow/lite/micro/tools/make/targets/himax_we1_evb_makefile.inc @@ -0,0 +1,91 @@ +# Settings for himax WE_1 evb. +ifeq ($(TARGET), himax_we1_evb) + + CC_TOOL = ccac + AR_TOOL = arac + CXX_TOOL = ccac + LD_TOOL := ccac + TARGET_ARCH := arc + #ARC_TOOLCHAIN := mwdt + + BUILD_ARC_MLI := false + ARC_MLI_PRE_COMPILED_TARGET := himax_arcem9d_r16 + + include $(MAKEFILE_DIR)/targets/arc/arc_common.inc + + #download SDK & MLI + HIMAX_WE1_SDK_NAME := himax_we1_sdk + $(eval $(call add_third_party_download,$(HIMAX_WE1_SDK_URL),$(HIMAX_WE1_SDK_MD5),$(HIMAX_WE1_SDK_NAME),)) + + #export path of toolchain + #export PATH := $(MAKEFILE_DIR)/downloads/$(HIMAX_WE1_SDK_NAME)/image_gen_linux_v3/:$(PATH) + + TCF_FILE := $(PWD)/$(MAKEFILE_DIR)/downloads/$(HIMAX_WE1_SDK_NAME)/arcem9d_wei_r16.tcf + LCF_FILE := $(PWD)/$(MAKEFILE_DIR)/downloads/$(HIMAX_WE1_SDK_NAME)/memory.lcf + ARCLIB_FILE := $(PWD)/$(MAKEFILE_DIR)/downloads/$(HIMAX_WE1_SDK_NAME)/libembarc.a + LIB_HEADER_FILE := $(PWD)/$(MAKEFILE_DIR)/downloads/$(HIMAX_WE1_SDK_NAME)/hx_drv_tflm.h + + + DEFAULT_HEAPSZ := 8192 + DEFAULT_STACKSZ := 8192 + + TCF_FILE_NAME = $(notdir $(TCF_FILE)) + ARC_TARGET_FILES_DIRS = $(dir $(TCF_FILE_NAME)) + MAKE_PROJECT_FILES += $(TCF_FILE_NAME) + + LCF_FILE_NAME = $(notdir $(LCF_FILE)) + ARC_TARGET_FILES_DIRS += $(dir $(LCF_FILE)) + MAKE_PROJECT_FILES += $(LCF_FILE_NAME) + + ARCLIB_FILE_NAME = $(notdir $(ARCLIB_FILE)) + ARC_TARGET_FILES_DIRS += $(dir $(ARCLIB_FILE)) + MAKE_PROJECT_FILES += $(ARCLIB_FILE_NAME) + + LIB_HEADER_FILE_NAME = $(notdir $(LIB_HEADER_FILE)) + ARC_TARGET_FILES_DIRS += $(dir $(LIB_HEADER_FILE)) + MAKE_PROJECT_FILES += $(LIB_HEADER_FILE_NAME) + + + + # Need a pointer to the TCF and lcf file + + PLATFORM_FLAGS = \ + -DNDEBUG \ + -g \ + -DCPU_ARC \ + -Hnosdata \ + -DTF_LITE_STATIC_MEMORY \ + -tcf=$(TCF_FILE_NAME) \ + -Hnocopyr \ + -Hpurge \ + -Hcl \ + -fslp-vectorize-aggressive \ + -ffunction-sections \ + -fdata-sections \ + -tcf_core_config \ + + CXXFLAGS += -fno-rtti -DSCRATCH_MEM_Z_SIZE=0x10000 $(PLATFORM_FLAGS) + CCFLAGS += $(PLATFORM_FLAGS) + + INCLUDES+= \ + -I $(MAKEFILE_DIR)/downloads/$(WEI_SDK_NAME) \ + -I $(MAKEFILE_DIR)/downloads/kissfft + + GENERATED_PROJECT_INCLUDES += \ + -I. \ + -I./third_party/kissfft + + LDFLAGS += \ + -Hheap=8192 \ + -tcf=$(TCF_FILE_NAME) \ + -Hnocopyr \ + -m \ + -Hldopt=-Coutput=$(TARGET).map \ + $(LCF_FILE_NAME) \ + -Hldopt=-Bgrouplib $(ARCLIB_FILE_NAME) + + CXXFLAGS := $(filter-out -std=c++11,$(CXXFLAGS)) + CCFLAGS := $(filter-out -std=c11,$(CCFLAGS)) + MICROLITE_LIBS := $(filter-out -lm,$(MICROLITE_LIBS)) + +endif diff --git a/tensorflow/lite/micro/tools/make/targets/himax_we1_evb_makefile.inc~ b/tensorflow/lite/micro/tools/make/targets/himax_we1_evb_makefile.inc~ new file mode 100644 index 00000000000..733f258fbbb --- /dev/null +++ b/tensorflow/lite/micro/tools/make/targets/himax_we1_evb_makefile.inc~ @@ -0,0 +1,93 @@ +# Settings for himax WE_1 evb. +ifeq ($(TARGET), himax_we1_evb) + + CC_TOOL = ccac + AR_TOOL = arac + CXX_TOOL = ccac + LD_TOOL := ccac + TARGET_ARCH := arc + #ARC_TOOLCHAIN := mwdt + + BUILD_ARC_MLI := false + ARC_MLI_PRE_COMPILED_TARGET := himax_arcem9d_r16 + +include $(MAKEFILE_DIR)/targets/arc/arc_common.inc + #download SDK & MLI + HIMAX_WE1_SDK_NAME := himax_we1_sdk + #MLI_LIB_DIR = arc_mli_package + #MLI_LIB_DIR = arc_mli_package + #$(eval $(call add_third_party_download,$(EMBARC_MLI_PRE_COMPILED_URL),$(EMBARC_MLI_PRE_COMPILED_MD5),$(MLI_LIB_DIR),)) + $(eval $(call add_third_party_download,$(HIMAX_WE1_SDK_URL),$(HIMAX_WE1_SDK_MD5),$(HIMAX_WE1_SDK_NAME),)) + + #export path of toolchain + #export PATH := $(MAKEFILE_DIR)/downloads/$(HIMAX_WE1_SDK_NAME)/image_gen_linux_v3/:$(PATH) + + TCF_FILE := $(PWD)/$(MAKEFILE_DIR)/downloads/$(HIMAX_WE1_SDK_NAME)/arcem9d_wei_r16.tcf + LCF_FILE := $(PWD)/$(MAKEFILE_DIR)/downloads/$(HIMAX_WE1_SDK_NAME)/memory.lcf + ARCLIB_FILE := $(PWD)/$(MAKEFILE_DIR)/downloads/$(HIMAX_WE1_SDK_NAME)/libembarc.a + LIB_HEADER_FILE := $(PWD)/$(MAKEFILE_DIR)/downloads/$(HIMAX_WE1_SDK_NAME)/hx_drv_tflm.h + + + DEFAULT_HEAPSZ := 8192 + DEFAULT_STACKSZ := 8192 + + TCF_FILE_NAME = $(notdir $(TCF_FILE)) + ARC_TARGET_FILES_DIRS = $(dir $(TCF_FILE_NAME)) + MAKE_PROJECT_FILES += $(TCF_FILE_NAME) + + LCF_FILE_NAME = $(notdir $(LCF_FILE)) + ARC_TARGET_FILES_DIRS += $(dir $(LCF_FILE)) + MAKE_PROJECT_FILES += $(LCF_FILE_NAME) + + ARCLIB_FILE_NAME = $(notdir $(ARCLIB_FILE)) + ARC_TARGET_FILES_DIRS += $(dir $(ARCLIB_FILE)) + MAKE_PROJECT_FILES += $(ARCLIB_FILE_NAME) + + LIB_HEADER_FILE_NAME = $(notdir $(LIB_HEADER_FILE)) + ARC_TARGET_FILES_DIRS += $(dir $(LIB_HEADER_FILE)) + MAKE_PROJECT_FILES += $(LIB_HEADER_FILE_NAME) + + + + # Need a pointer to the TCF and lcf file + + PLATFORM_FLAGS = \ + -DNDEBUG \ + -g \ + -DCPU_ARC \ + -Hnosdata \ + -DTF_LITE_STATIC_MEMORY \ + -tcf=$(TCF_FILE_NAME) \ + -Hnocopyr \ + -Hpurge \ + -Hcl \ + -fslp-vectorize-aggressive \ + -ffunction-sections \ + -fdata-sections \ + -tcf_core_config \ + + CXXFLAGS += -fno-rtti -DSCRATCH_MEM_Z_SIZE=0x10000 $(PLATFORM_FLAGS) + CCFLAGS += $(PLATFORM_FLAGS) + + INCLUDES+= \ + -I $(MAKEFILE_DIR)/downloads/$(WEI_SDK_NAME) \ + -I $(MAKEFILE_DIR)/downloads/kissfft + + GENERATED_PROJECT_INCLUDES += \ + -I. \ + -I./third_party/kissfft + + LDFLAGS += \ + -Hheap=8192 \ + -tcf=$(TCF_FILE_NAME) \ + -Hnocopyr \ + -m \ + -Hldopt=-Coutput=$(TARGET).map \ + $(LCF_FILE_NAME) \ + -Hldopt=-Bgrouplib $(ARCLIB_FILE_NAME) + + CXXFLAGS := $(filter-out -std=c++11,$(CXXFLAGS)) + CCFLAGS := $(filter-out -std=c11,$(CCFLAGS)) + MICROLITE_LIBS := $(filter-out -lm,$(MICROLITE_LIBS)) + +endif diff --git a/tensorflow/lite/micro/tools/make/third_party_downloads.inc b/tensorflow/lite/micro/tools/make/third_party_downloads.inc index 806501a004a..75a51e0df10 100644 --- a/tensorflow/lite/micro/tools/make/third_party_downloads.inc +++ b/tensorflow/lite/micro/tools/make/third_party_downloads.inc @@ -80,3 +80,7 @@ EMBARC_MLI_PRE_COMPILED_MD5 := "a95ff9e0370434484f14e7e4114327f6" XTENSA_HIFI4_URL :="https://github.com/foss-xtensa/nnlib-hifi4/raw/master/archive/xa_nnlib_04_07.zip" XTENSA_HIFI4_MD5 :="f234764928f9a42901df33a27e118c8b" +HIMAX_WE1_SDK_URL ="https://www.himax.com.tw/we-i/himax_we1_sdk_v02.zip" +HIMAX_WE1_SDK_MD5 ="9a4b2f29b16052764e437b64bdcba816" + + diff --git a/tensorflow/lite/micro/tools/make/third_party_downloads.inc~ b/tensorflow/lite/micro/tools/make/third_party_downloads.inc~ new file mode 100644 index 00000000000..3c7ee1b64d2 --- /dev/null +++ b/tensorflow/lite/micro/tools/make/third_party_downloads.inc~ @@ -0,0 +1,86 @@ +# Add URLs and MD5 checksums for third-party libraries here. + +GEMMLOWP_URL := "https://github.com/google/gemmlowp/archive/719139ce755a0f31cbf1c37f7f98adcc7fc9f425.zip" +GEMMLOWP_MD5 := "7e8191b24853d75de2af87622ad293ba" + +ifeq ($(HOST_OS),windows) + FLATBUFFERS_URL := "https://github.com/google/flatbuffers/archive/v1.12.0.zip" + FLATBUFFERS_MD5 := "a1afdbf114dec01a861c1b8c917d0fc7" +else + FLATBUFFERS_URL := "https://github.com/google/flatbuffers/archive/v1.12.0.tar.gz" + FLATBUFFERS_MD5 := "c62ffefb3d4548b127cca14ce047f16c" +endif + +ifeq ($(HOST_OS),osx) + GCC_EMBEDDED_URL := "https://developer.arm.com/-/media/Files/downloads/gnu-rm/7-2018q2/gcc-arm-none-eabi-7-2018-q2-update-mac.tar.bz2" + GCC_EMBEDDED_MD5 := "a66be9828cf3c57d7d21178e07cd8904" +else ifeq ($(HOST_OS),windows) + GCC_EMBEDDED_URL := "https://developer.arm.com/-/media/Files/downloads/gnu-rm/7-2018q2/gcc-arm-none-eabi-7-2018-q2-update-win32.zip" + GCC_EMBEDDED_MD5 := "bc8ae26d7c429f30d583a605a4bcf9bc" +else + GCC_EMBEDDED_URL := "https://developer.arm.com/-/media/Files/downloads/gnu-rm/7-2018q2/gcc-arm-none-eabi-7-2018-q2-update-linux.tar.bz2" + GCC_EMBEDDED_MD5 := "299ebd3f1c2c90930d28ab82e5d8d6c0" +endif + +LEON_BCC2_URL := "https://www.gaisler.com/anonftp/bcc2/bin/bcc-2.0.7-gcc-linux64.tar.xz" +LEON_BCC2_MD5 := "cdf78082be4882da2a92c9baa82fe765" + +TSIM_URL := "https://www.gaisler.com/anonftp/tsim/tsim-eval-2.0.63.tar.gz" +TSIM_MD5 := "afa0095d3ed989a949e1467f94e41d2f" + +CMSIS_URL := "https://github.com/ARM-software/CMSIS_5/archive/1150e71e07c79b538efd842aba5b210a31827ae5.zip" +CMSIS_MD5 := "e05f4222ef58825193910b41a0871dcb" + +AM_SDK_URL := "http://s3.asia.ambiqmicro.com/downloads/AmbiqSuite-Rel2.2.0.zip" +AM_SDK_MD5 := "7605fa2d4d97e6bb7a1190c92b66b597" +AM_SDK_DEST := AmbiqSuite-Rel2.2.0 + +SF_BSPS_URL := "https://github.com/sparkfun/SparkFun_Apollo3_AmbiqSuite_BSPs/archive/v0.0.7.zip" +SF_BSPS_MD5 := "34199f7e754735661d1c8a70a40ca7a3" +SF_BSPS_DEST := boards_sfe + +STM32_BARE_LIB_URL := "https://github.com/google/stm32_bare_lib/archive/c07d611fb0af58450c5a3e0ab4d52b47f99bc82d.zip" +STM32_BARE_LIB_MD5 := "282bff40d4d0b92278fd123a3b6e3123" + +ifeq ($(HOST_OS),osx) + RISCV_TOOLCHAIN_URL := "https://static.dev.sifive.com/dev-tools/riscv64-unknown-elf-gcc-8.1.0-2019.01.0-x86_64-apple-darwin.tar.gz" + RISCV_TOOLCHAIN_MD5 := "2ac2fa00618b9ab7fa0c7d0ec173de94" +else + RISCV_TOOLCHAIN_URL := "https://static.dev.sifive.com/dev-tools/riscv64-unknown-elf-gcc-20181030-x86_64-linux-ubuntu14.tar.gz" + RISCV_TOOLCHAIN_MD5="2366b7afe36a54dc94fb0ff8a0830934" +endif + +SIFIVE_FE310_LIB_URL := "https://github.com/sifive/freedom-e-sdk/archive/baeeb8fd497a99b3c141d7494309ec2e64f19bdf.zip" +SIFIVE_FE310_LIB_MD5 := "06ee24c4956f8e21670ab3395861fe64" + +KISSFFT_URL="https://github.com/mborgerding/kissfft/archive/v130.zip" +KISSFFT_MD5="438ba1fef5783cc5f5f201395cc477ca" + +RUY_URL="https://github.com/google/ruy/archive/1b313682ef8b8fc8ed08719c610d1c3503b016bf.zip" +RUY_MD5="2d54f058f8f7120dfc1ecee79dbf259e" + +CIFAR10_DATASET_URL="https://www.cs.toronto.edu/~kriz/cifar-10-binary.tar.gz" +CIFAR10_DATASET_MD5="c32a1d4ab5d03f1284b67883e8d87530" + +IMAGE_RECOGNITION_MODEL_URL := "https://storage.googleapis.com/download.tensorflow.org/models/tflite/cifar_image_recognition_model_2020_4_14.zip" +IMAGE_RECOGNITION_MODEL_MD5 := "2b886156e7ef4d6e53d0f1a4bc800e56" + +PERSON_MODEL_URL := "https://storage.googleapis.com/download.tensorflow.org/data/tf_lite_micro_person_data_grayscale_2019_11_21.zip" +PERSON_MODEL_MD5 := "fe2934bd0788f1dcc7af3f0a954542ab" + +PERSON_MODEL_INT8_URL := "https://storage.googleapis.com/download.tensorflow.org/data/tf_lite_micro_person_data_int8_grayscale_2020_01_13.zip" +PERSON_MODEL_INT8_MD5 := "8a7d2c70325f53136faea6dde517b8cc" + +EMBARC_MLI_URL := "https://github.com/foss-for-synopsys-dwc-arc-processors/embarc_mli/archive/58284867ca52d1f43b25045e8601999d7359d986.zip" +EMBARC_MLI_MD5 := "2bf4982a327fdaa9d475803ce014d1ef" + +EMBARC_MLI_PRE_COMPILED_URL := "https://github.com/foss-for-synopsys-dwc-arc-processors/embarc_mli/releases/download/Release_1.1_RC2/embARC_MLI_package.zip" +EMBARC_MLI_PRE_COMPILED_MD5 := "a95ff9e0370434484f14e7e4114327f6" + +XTENSA_HIFI4_URL :="https://github.com/foss-xtensa/nnlib-hifi4/raw/master/archive/xa_nnlib_04_07.zip" +XTENSA_HIFI4_MD5 :="f234764928f9a42901df33a27e118c8b" + +HIMAX_WE1_SDK_URL ="https://www.himax.com.tw/we-i/himax_we1_sdk_v02.zip" +HIMAX_WE1_SDK_MD5 ="5063c24d298fbcfe118163f3ccc43079" + + From 96eb311826d68179bf85a228e294ae55f39ef2e4 Mon Sep 17 00:00:00 2001 From: "902449@58880@bigcat_chen@ASIC" Date: Thu, 4 Jun 2020 18:08:43 +0800 Subject: [PATCH 03/11] TFLM: update example readme --- .../lite/micro/examples/hello_world/README.md | 36 +- .../micro/examples/hello_world/README.md~ | 2 +- .../hello_world/himax_we1_evb/constants.cc | 19 - .../himax_we1_evb/output_handler.cc | 35 -- .../himax_we1_evb/output_handler.cc~ | 53 -- .../person_detection_experimental/README.md | 33 +- .../person_detection_experimental/README.md~ | 568 ++++++++++++++++++ .../tools/make/third_party_downloads.inc~ | 86 --- 8 files changed, 585 insertions(+), 247 deletions(-) delete mode 100644 tensorflow/lite/micro/examples/hello_world/himax_we1_evb/constants.cc delete mode 100644 tensorflow/lite/micro/examples/hello_world/himax_we1_evb/output_handler.cc delete mode 100644 tensorflow/lite/micro/examples/hello_world/himax_we1_evb/output_handler.cc~ create mode 100644 tensorflow/lite/micro/examples/person_detection_experimental/README.md~ delete mode 100644 tensorflow/lite/micro/tools/make/third_party_downloads.inc~ diff --git a/tensorflow/lite/micro/examples/hello_world/README.md b/tensorflow/lite/micro/examples/hello_world/README.md index 9c0a5e2306a..d3762ada790 100644 --- a/tensorflow/lite/micro/examples/hello_world/README.md +++ b/tensorflow/lite/micro/examples/hello_world/README.md @@ -14,34 +14,14 @@ of the device. ## Table of contents -- [Hello World Example](#hello-world-example) - - [Table of contents](#table-of-contents) - - [Deploy to ARC EM SDP](#deploy-to-arc-em-sdp) - - [Initial Setup](#initial-setup) - - [Generate Example Project](#generate-example-project) - - [Build and Run Example](#build-and-run-example) - - [Deploy to Arduino](#deploy-to-arduino) - - [Install the Arduino_TensorFlowLite library](#install-the-arduinotensorflowlite-library) - - [Load and run the example](#load-and-run-the-example) - - [Deploy to ESP32](#deploy-to-esp32) - - [Install the ESP IDF](#install-the-esp-idf) - - [Generate the examples](#generate-the-examples) - - [Building the example](#building-the-example) - - [Load and run the example](#load-and-run-the-example-1) - - [Deploy to himax WE1 EVB](#deploy-to-himax-we1-evb) - - [Initial Setup](#initial-setup-1) - - [MetaWare Development Toolkit](#metaware-development-toolkit) - - [Make Tool version](#make-tool-version) - - [Serial Terminal Emulation Application](#serial-terminal-emulation-application) - - [Generate Example Project](#generate-example-project-1) - - [Build and Burn Example](#build-and-burn-example) - - [Deploy to SparkFun Edge](#deploy-to-sparkfun-edge) - - [Compile the binary](#compile-the-binary) - - [Sign the binary](#sign-the-binary) - - [Flash the binary](#flash-the-binary) - - [Deploy to STM32F746](#deploy-to-stm32f746) - - [Run the tests on a development machine](#run-the-tests-on-a-development-machine) - - [Train your own model](#train-your-own-model) +- [Deploy to ARC EM SDP](#deploy-to-arc-em-sdp) +- [Deploy to Arduino](#deploy-to-arduino) +- [Deploy to ESP32](#deploy-to-esp32) +- [Deploy to himax WE1 EVB](#deploy-to-himax-we1-evb) +- [Deploy to SparkFun Edge](#deploy-to-sparkfun-edge) +- [Deploy to STM32F746](#deploy-to-STM32F746) +- [Run the tests on a development machine](#run-the-tests-on-a-development-machine) +- [Train your own model](#train-your-own-model) ## Deploy to ARC EM SDP diff --git a/tensorflow/lite/micro/examples/hello_world/README.md~ b/tensorflow/lite/micro/examples/hello_world/README.md~ index 011711493d5..9c0a5e2306a 100644 --- a/tensorflow/lite/micro/examples/hello_world/README.md~ +++ b/tensorflow/lite/micro/examples/hello_world/README.md~ @@ -260,7 +260,7 @@ make -f tensorflow/lite/micro/tools/make/Makefile TARGET=himax_we1_evb third_par Generate hello world project ``` -make -f tensorflow/lite/micro/tools/make/Makefile generate_hello_world_make_project TARGET=himax_we1_evb +make -f tensorflow/lite/micro/tools/make/Makefile generate_hello_world_make_project TARGET=himax_we1_evb TAGS=no_arc_mli ``` ### Build and Burn Example diff --git a/tensorflow/lite/micro/examples/hello_world/himax_we1_evb/constants.cc b/tensorflow/lite/micro/examples/hello_world/himax_we1_evb/constants.cc deleted file mode 100644 index 1816a2f3207..00000000000 --- a/tensorflow/lite/micro/examples/hello_world/himax_we1_evb/constants.cc +++ /dev/null @@ -1,19 +0,0 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "tensorflow/lite/micro/examples/hello_world/constants.h" - -// This is tuned so that a full cycle takes ~4 seconds on a SparkFun Edge. -const int kInferencesPerCycle = 1000; diff --git a/tensorflow/lite/micro/examples/hello_world/himax_we1_evb/output_handler.cc b/tensorflow/lite/micro/examples/hello_world/himax_we1_evb/output_handler.cc deleted file mode 100644 index 8ca028acc55..00000000000 --- a/tensorflow/lite/micro/examples/hello_world/himax_we1_evb/output_handler.cc +++ /dev/null @@ -1,35 +0,0 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "tensorflow/lite/micro/examples/hello_world/output_handler.h" - - -/* -This function trigger different device's LEDthrough y value. -y value range -1 <= y <= 1. -| Range is from -1~1 | LEDs | -| 0 <= y <= 1 | [ 0 1 ] | -| -1 <= y < 0 | [ 1 0 ] | - -*/ -void HandleOutput(tflite::ErrorReporter* error_reporter, float x_value, - float y_value) { - // The first time this method runs, set up our LEDs correctly - - // Log the current X and Y values - TF_LITE_REPORT_ERROR(error_reporter, "x_value: %f, y_value: %f\n", - static_cast(x_value), - static_cast(y_value)); -} diff --git a/tensorflow/lite/micro/examples/hello_world/himax_we1_evb/output_handler.cc~ b/tensorflow/lite/micro/examples/hello_world/himax_we1_evb/output_handler.cc~ deleted file mode 100644 index b59242d0b6f..00000000000 --- a/tensorflow/lite/micro/examples/hello_world/himax_we1_evb/output_handler.cc~ +++ /dev/null @@ -1,53 +0,0 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "tensorflow/lite/micro/examples/hello_world/output_handler.h" - - -/* -This function trigger different device's LEDthrough y value. -y value range -1 <= y <= 1. -| Range is from -1~1 | LEDs | -| 0 <= y <= 1 | [ 0 1 ] | -| -1 <= y < 0 | [ 1 0 ] | - -*/ -void HandleOutput(tflite::ErrorReporter* error_reporter, float x_value, - float y_value) { - // The first time this method runs, set up our LEDs correctly -/* static bool is_initialized = false; - if (!is_initialized) { - // TODO Setup LED's as outputs - - // end of setup - is_initialized = true; - } - - // Set the LEDs to represent negative values - if (y_value < 0) { - //enable LED1 - - //enable LED0 - } else if (y_value > 0) { - //enable LED0 - - //enable LED1 - } - */ - // Log the current X and Y values - TF_LITE_REPORT_ERROR(error_reporter, "x_value: %f, y_value: %f\n", - static_cast(x_value), - static_cast(y_value)); -} diff --git a/tensorflow/lite/micro/examples/person_detection_experimental/README.md b/tensorflow/lite/micro/examples/person_detection_experimental/README.md index 4d53e551431..06f5640986f 100644 --- a/tensorflow/lite/micro/examples/person_detection_experimental/README.md +++ b/tensorflow/lite/micro/examples/person_detection_experimental/README.md @@ -7,31 +7,14 @@ This uses the experimental int8 quantized version of the person detection model. ## Table of contents -- [Person detection example](#person-detection-example) - - [Table of contents](#table-of-contents) - - [Running on ARC EM SDP](#running-on-arc-em-sdp) - - [Initial setup](#initial-setup) - - [Generate Example Project](#generate-example-project) - - [Build and Run Example](#build-and-run-example) - - [Running on Arduino](#running-on-arduino) - - [Hardware](#hardware) - - [Install the Arduino_TensorFlowLite library](#install-the-arduinotensorflowlite-library) - - [Install other libraries](#install-other-libraries) - - [Load and run the example](#load-and-run-the-example) - - [Running on HIMAX WE1 EVB](#running-on-himax-we1-evb) - - [Initial Setup](#initial-setup) - - [MetaWare Development Toolkit](#metaware-development-toolkit) - - [Make Tool version](#make-tool-version) - - [Serial Terminal Emulation Application](#serial-terminal-emulation-application) - - [Generate Example Project](#generate-example-project-1) - - [Build and Burn Example](#build-and-burn-example) - - [Running on SparkFun Edge](#running-on-sparkfun-edge) - - [Compile the binary](#compile-the-binary) - - [Sign the binary](#sign-the-binary) - - [Flash the binary](#flash-the-binary) - - [Run the tests on a development machine](#run-the-tests-on-a-development-machine) - - [Debugging image capture](#debugging-image-capture) - - [Training your own model](#training-your-own-model) +- [Getting started](#getting-started) +- [Running on ARC EM SDP](#running-on-arc-em-sdp) +- [Running on Arduino](#running-on-arduino) +- [Running on HIMAX WE1 EVB](#running-on-himax-we1-evb) +- [Running on SparkFun Edge](#running-on-sparkfun-edge) +- [Run the tests on a development machine](#run-the-tests-on-a-development-machine) +- [Debugging image capture](#debugging-image-capture) +- [Training your own model](#training-your-own-model) ## Running on ARC EM SDP diff --git a/tensorflow/lite/micro/examples/person_detection_experimental/README.md~ b/tensorflow/lite/micro/examples/person_detection_experimental/README.md~ new file mode 100644 index 00000000000..4d53e551431 --- /dev/null +++ b/tensorflow/lite/micro/examples/person_detection_experimental/README.md~ @@ -0,0 +1,568 @@ +# Person detection example + +This example shows how you can use Tensorflow Lite to run a 250 kilobyte neural +network to recognize people in images captured by a camera. It is designed to +run on systems with small amounts of memory such as microcontrollers and DSPs. +This uses the experimental int8 quantized version of the person detection model. + +## Table of contents + +- [Person detection example](#person-detection-example) + - [Table of contents](#table-of-contents) + - [Running on ARC EM SDP](#running-on-arc-em-sdp) + - [Initial setup](#initial-setup) + - [Generate Example Project](#generate-example-project) + - [Build and Run Example](#build-and-run-example) + - [Running on Arduino](#running-on-arduino) + - [Hardware](#hardware) + - [Install the Arduino_TensorFlowLite library](#install-the-arduinotensorflowlite-library) + - [Install other libraries](#install-other-libraries) + - [Load and run the example](#load-and-run-the-example) + - [Running on HIMAX WE1 EVB](#running-on-himax-we1-evb) + - [Initial Setup](#initial-setup) + - [MetaWare Development Toolkit](#metaware-development-toolkit) + - [Make Tool version](#make-tool-version) + - [Serial Terminal Emulation Application](#serial-terminal-emulation-application) + - [Generate Example Project](#generate-example-project-1) + - [Build and Burn Example](#build-and-burn-example) + - [Running on SparkFun Edge](#running-on-sparkfun-edge) + - [Compile the binary](#compile-the-binary) + - [Sign the binary](#sign-the-binary) + - [Flash the binary](#flash-the-binary) + - [Run the tests on a development machine](#run-the-tests-on-a-development-machine) + - [Debugging image capture](#debugging-image-capture) + - [Training your own model](#training-your-own-model) + +## Running on ARC EM SDP + +The following instructions will help you to build and deploy this example to +[ARC EM SDP](https://www.synopsys.com/dw/ipdir.php?ds=arc-em-software-development-platform) +board. General information and instructions on using the board with TensorFlow +Lite Micro can be found in the common +[ARC targets description](/tensorflow/lite/micro/tools/make/targets/arc/README.md). + +This example uses asymmetric int8 quantization and can therefore leverage +optimized int8 kernels from the embARC MLI library + +The ARC EM SDP board contains a rich set of extension interfaces. You can choose +any compatible camera and modify +[image_provider.cc](/tensorflow/lite/micro/examples/person_detection_experimental/image_provider.cc) +file accordingly to use input from your specific camera. By default, results of +running this example are printed to the console. If you would like to instead +implement some target-specific actions, you need to modify +[detection_responder.cc](/tensorflow/lite/micro/examples/person_detection_experimental/detection_responder.cc) +accordingly. + +The reference implementations of these files are used by default on the EM SDP. + +### Initial setup + +Follow the instructions on the +[ARC EM SDP Initial Setup](/tensorflow/lite/micro/tools/make/targets/arc/README.md#ARC-EM-Software-Development-Platform-ARC-EM-SDP) +to get and install all required tools for work with ARC EM SDP. + +### Generate Example Project + +The example project for ARC EM SDP platform can be generated with the following +command: + +``` +make -f tensorflow/lite/micro/tools/make/Makefile TARGET=arc_emsdp generate_person_detection_int8_make_project +``` + +### Build and Run Example + +For more detailed information on building and running examples see the +appropriate sections of general descriptions of the +[ARC EM SDP usage with TFLM](/tensorflow/lite/micro/tools/make/targets/arc/README.md#ARC-EM-Software-Development-Platform-ARC-EM-SDP). +In the directory with generated project you can also find a +*README_ARC_EMSDP.md* file with instructions and options on building and +running. Here we only briefly mention main steps which are typically enough to +get it started. + +1. You need to + [connect the board](/tensorflow/lite/micro/tools/make/targets/arc/README.md#connect-the-board) + and open an serial connection. + +2. Go to the generated example project director + + ``` + cd tensorflow/lite/micro/tools/make/gen/arc_emsdp_arc/prj/person_detection_int8/make + ``` + +3. Build the example using + + ``` + make app + ``` + +4. To generate artefacts for self-boot of example from the board use + + ``` + make flash + ``` + +5. To run application from the board using microSD card: + + * Copy the content of the created /bin folder into the root of microSD + card. Note that the card must be formatted as FAT32 with default cluster + size (but less than 32 Kbytes) + * Plug in the microSD card into the J11 connector. + * Push the RST button. If a red LED is lit beside RST button, push the CFG + button. + +6. If you have the MetaWare Debugger installed in your environment: + + * To run application from the console using it type `make run`. + * To stop the execution type `Ctrl+C` in the console several times. + +In both cases (step 5 and 6) you will see the application output in the serial +terminal. + +## Running on Arduino + +The following instructions will help you build and deploy this sample +to [Arduino](https://www.arduino.cc/) devices. + +The sample has been tested with the following device: + +- [Arduino Nano 33 BLE Sense](https://store.arduino.cc/usa/nano-33-ble-sense-with-headers) + +You will also need the following camera module: + +- [Arducam Mini 2MP Plus](https://www.amazon.com/Arducam-Module-Megapixels-Arduino-Mega2560/dp/B012UXNDOY) + +### Hardware + +Connect the Arducam pins as follows: + +|Arducam pin name|Arduino pin name| +|----------------|----------------| +|CS|D7 (unlabelled, immediately to the right of D6)| +|MOSI|D11| +|MISO|D12| +|SCK|D13| +|GND|GND (either pin marked GND is fine)| +|VCC|3.3 V| +|SDA|A4| +|SCL|A5| + +### Install the Arduino_TensorFlowLite library + +Download the current nightly build of the library: +[person_detection.zip](https://storage.googleapis.com/download.tensorflow.org/data/tf_lite_micro_person_data_int8_grayscale_2020_01_13.zip) + +This example application is included as part of the official TensorFlow Lite +Arduino library. To install it, open the Arduino library manager in +`Tools -> Manage Libraries...` and search for `Arduino_TensorFlowLite`. + +### Install other libraries + +In addition to the TensorFlow library, you'll also need to install two +libraries: + +* The Arducam library, so our code can interface with the hardware +* The JPEGDecoder library, so we can decode JPEG-encoded images + +The Arducam Arduino library is available from GitHub at +[https://github.com/ArduCAM/Arduino](https://github.com/ArduCAM/Arduino). +To install it, download or clone the repository. Next, copy its `ArduCAM` +subdirectory into your `Arduino/libraries` directory. To find this directory on +your machine, check the *Sketchbook location* in the Arduino IDE's +*Preferences* window. + +After downloading the library, you'll need to edit one of its files to make sure +it is configured for the Arducam Mini 2MP Plus. To do so, open the following +file: + +``` +Arduino/libraries/ArduCAM/memorysaver.h +``` + +You'll see a bunch of `#define` statements listed. Make sure that they are all +commented out, except for `#define OV2640_MINI_2MP_PLUS`, as so: + +``` +//Step 1: select the hardware platform, only one at a time +//#define OV2640_MINI_2MP +//#define OV3640_MINI_3MP +//#define OV5642_MINI_5MP +//#define OV5642_MINI_5MP_BIT_ROTATION_FIXED +#define OV2640_MINI_2MP_PLUS +//#define OV5642_MINI_5MP_PLUS +//#define OV5640_MINI_5MP_PLUS +``` + +Once you save the file, we're done configuring the Arducam library. + +Our next step is to install the JPEGDecoder library. We can do this from within +the Arduino IDE. First, go to the *Manage Libraries...* option in the *Tools* +menu and search for `JPEGDecoder`. You should install version _1.8.0_ of the +library. + +Once the library has installed, we'll need to configure it to disable some +optional components that are not compatible with the Arduino Nano 33 BLE Sense. +Open the following file: + +``` +Arduino/libraries/JPEGDecoder/src/User_Config.h +``` + +Make sure that both `#define LOAD_SD_LIBRARY` and `#define LOAD_SDFAT_LIBRARY` +are commented out, as shown in this excerpt from the file: + +```c++ +// Comment out the next #defines if you are not using an SD Card to store the JPEGs +// Commenting out the line is NOT essential but will save some FLASH space if +// SD Card access is not needed. Note: use of SdFat is currently untested! + +//#define LOAD_SD_LIBRARY // Default SD Card library +//#define LOAD_SDFAT_LIBRARY // Use SdFat library instead, so SD Card SPI can be bit bashed +``` + +Once you've saved the file, you are done installing libraries. + +### Load and run the example + +Go to `File -> Examples`. You should see an +example near the bottom of the list named `TensorFlowLite`. Select +it and click `person_detection` to load the example. Connect your device, then +build and upload the example. + +To test the camera, start by pointing the device's camera at something that is +definitely not a person, or just covering it up. The next time the blue LED +flashes, the device will capture a frame from the camera and begin to run +inference. Since the vision model we are using for person detection is +relatively large, it takes a long time to run inference—around 19 seconds at the +time of writing, though it's possible TensorFlow Lite has gotten faster since +then. + +After 19 seconds or so, the inference result will be translated into another LED +being lit. Since you pointed the camera at something that isn't a person, the +red LED should light up. + +Now, try pointing the device's camera at yourself! The next time the blue LED +flashes, the device will capture another image and begin to run inference. After +19 seconds, the green LED should light up! + +Remember, image data is captured as a snapshot before each inference, whenever +the blue LED flashes. Whatever the camera is pointed at during that moment is +what will be fed into the model. It doesn't matter where the camera is pointed +until the next time an image is captured, when the blue LED will flash again. + +If you're getting seemingly incorrect results, make sure you are in an +environment with good lighting. You should also make sure that the camera is +oriented correctly, with the pins pointing downwards, so that the images it +captures are the right way up—the model was not trained to recognize upside-down +people! In addition, it's good to remember that this is a tiny model, which +trades accuracy for small size. It works very well, but it isn't accurate 100% +of the time. + +We can also see the results of inference via the Arduino Serial Monitor. To do +this, open the *Serial Monitor* from the *Tools* menu. You'll see a detailed +log of what is happening while our application runs. It's also interesting to +check the *Show timestamp* box, so you can see how long each part of the process +takes: + +``` +14:17:50.714 -> Starting capture +14:17:50.714 -> Image captured +14:17:50.784 -> Reading 3080 bytes from ArduCAM +14:17:50.887 -> Finished reading +14:17:50.887 -> Decoding JPEG and converting to greyscale +14:17:51.074 -> Image decoded and processed +14:18:09.710 -> Person score: 246 No person score: 66 +``` + +From the log, we can see that it took around 170 ms to capture and read the +image data from the camera module, 180 ms to decode the JPEG and convert it to +greyscale, and 18.6 seconds to run inference. + +## Running on HIMAX WE1 EVB + +The following instructions will help you build and deploy this example to +[HIMAX WE1 EVB](https://github.com/HimaxWiseEyePlus/bsp_tflu/tree/master/HIMAX_WE1_EVB_board_brief) +board. To undstand more about using this board, please check +[HIMAX WE1 EVB user guide](https://github.com/HimaxWiseEyePlus/bsp_tflu/tree/master/HIMAX_WE1_EVB_user_guide). + +### Initial Setup + +To use the HIMAX WE1 EVB, please make sure following software are installed: + +#### MetaWare Development Toolkit + +See +[Install the Synopsys DesignWare ARC MetaWare Development Toolkit](/tensorflow/lite/micro/tools/make/targets/arc/README.md#install-the-synopsys-designware-arc-metaware-development-toolkit) +section for instructions on toolchain installation. + +#### Make Tool version + +A `'make'` tool is required for deploying Tensorflow Lite Micro +applications on HIMAX WE1 EVB, See +[Check make tool version](/tensorflow/lite/micro/tools/make/targets/arc/README.md#make-tool) +section for proper environment. + +#### Serial Terminal Emulation Application + +There are 2 main purposes for HIMAX WE1 EVB Debug UART port + +- print application output +- burn application to flash by using xmodem send application binary + +You can use any terminal emulation program (like [PuTTY](https://www.putty.org/) or [minicom](https://linux.die.net/man/1/minicom)). + + +### Generate Example Project + +The example project for HIMAX WE1 EVB platform can be generated with the following +command: + +Download related third party data + +``` +make -f tensorflow/lite/micro/tools/make/Makefile TARGET=himax_we1_evb third_party_downloads +``` + +Generate person detection project + +``` +make -f tensorflow/lite/micro/tools/make/Makefile generate_person_detection_int8_make_project TARGET=himax_we1_evb +``` + +### Build and Burn Example + +Following the Steps to run person detection example at HIMAX WE1 EVB platform. + +1. Go to the generated example project directory. + + ``` + cd tensorflow/lite/micro/tools/make/gen/himax_we1_evb_arc/prj/person_detection_int8/make + ``` + +2. Build the example using + + ``` + make app + ``` + +3. After example build finish, copy ELF file and map file to image generate tool directory. + image generate tool directory located at `'tensorflow/lite/micro/tools/make/downloads/himax_we1_sdk/image_gen_linux_v3/'` + + ``` + cp person_detection_int8.elf himax_we1_evb.map ../../../../../downloads/himax_we1_sdk/image_gen_linux_v3/ + ``` + +4. Go to flash image generate tool directory. + + ``` + cd ../../../../../downloads/himax_we1_sdk/image_gen_linux_v3/ + ``` + +5. run image generate tool, generate flash image file. + + * Before running image generate tool, by typing `sudo chmod +x image_gen` + and `sudo chmod +x sign_tool` to make sure it is executable. + + ``` + image_gen -e person_detection_int8.elf -m himax_we1_evb.map -o out.img + ``` + + +6. Download flash image file to HIMAX WE1 EVB by UART: + + * more detail about download image through UART can be found at [HIMAX WE1 EVB update Flash image](https://github.com/HimaxWiseEyePlus/bsp_tflu/tree/master/HIMAX_WE1_EVB_user_guide#flash-image-update) + +After these steps, press reset button on the HIMAX WE1 EVB, you will see application output in the serial +terminal. + +## Running on SparkFun Edge + +The following instructions will help you build and deploy this sample on the +[SparkFun Edge development board](https://sparkfun.com/products/15170). This +sample requires the Sparkfun Himax camera for the Sparkfun Edge board. It is +not available for purchase yet. + +If you're new to using this board, we recommend walking through the +[AI on a microcontroller with TensorFlow Lite and SparkFun Edge](https://codelabs.developers.google.com/codelabs/sparkfun-tensorflow) +codelab to get an understanding of the workflow. + +### Compile the binary + +The following command will download the required dependencies and then compile a +binary for the SparkFun Edge: + +``` +make -f tensorflow/lite/micro/tools/make/Makefile TARGET=sparkfun_edge person_detection_bin +``` + +The binary will be created in the following location: + +``` +tensorflow/lite/micro/tools/make/gen/sparkfun_edge_cortex-m4/bin/person_detection.bin +``` + +### Sign the binary + +The binary must be signed with cryptographic keys to be deployed to the device. +We'll now run some commands that will sign our binary so it can be flashed to +the SparkFun Edge. The scripts we are using come from the Ambiq SDK, which is +downloaded when the `Makefile` is run. + +Enter the following command to set up some dummy cryptographic keys we can use +for development: + +``` +cp tensorflow/lite/micro/tools/make/downloads/AmbiqSuite-Rel2.0.0/tools/apollo3_scripts/keys_info0.py \ +tensorflow/lite/micro/tools/make/downloads/AmbiqSuite-Rel2.0.0/tools/apollo3_scripts/keys_info.py +``` + +Next, run the following command to create a signed binary: + +``` +python3 tensorflow/lite/micro/tools/make/downloads/AmbiqSuite-Rel2.0.0/tools/apollo3_scripts/create_cust_image_blob.py \ +--bin tensorflow/lite/micro/tools/make/gen/sparkfun_edge_cortex-m4/bin/person_detection.bin \ +--load-address 0xC000 \ +--magic-num 0xCB \ +-o main_nonsecure_ota \ +--version 0x0 +``` + +This will create the file `main_nonsecure_ota.bin`. We'll now run another +command to create a final version of the file that can be used to flash our +device with the bootloader script we will use in the next step: + +``` +python3 tensorflow/lite/micro/tools/make/downloads/AmbiqSuite-Rel2.0.0/tools/apollo3_scripts/create_cust_wireupdate_blob.py \ +--load-address 0x20000 \ +--bin main_nonsecure_ota.bin \ +-i 6 \ +-o main_nonsecure_wire \ +--options 0x1 +``` + +You should now have a file called `main_nonsecure_wire.bin` in the directory +where you ran the commands. This is the file we'll be flashing to the device. + +### Flash the binary + +Next, attach the board to your computer via a USB-to-serial adapter. + +**Note:** If you're using the [SparkFun Serial Basic Breakout](https://www.sparkfun.com/products/15096), +you should [install the latest drivers](https://learn.sparkfun.com/tutorials/sparkfun-serial-basic-ch340c-hookup-guide#drivers-if-you-need-them) +before you continue. + +Once connected, assign the USB device name to an environment variable: + +``` +export DEVICENAME=put your device name here +``` + +Set another variable with the baud rate: + +``` +export BAUD_RATE=921600 +``` + +Now, hold the button marked `14` on the device. While still holding the button, +hit the button marked `RST`. Continue holding the button marked `14` while +running the following command: + +``` +python3 tensorflow/lite/micro/tools/make/downloads/AmbiqSuite-Rel2.0.0/tools/apollo3_scripts/uart_wired_update.py \ +-b ${BAUD_RATE} ${DEVICENAME} \ +-r 1 \ +-f main_nonsecure_wire.bin \ +-i 6 +``` + +You should see a long stream of output as the binary is flashed to the device. +Once you see the following lines, flashing is complete: + +``` +Sending Reset Command. +Done. +``` + +If you don't see these lines, flashing may have failed. Try running through the +steps in [Flash the binary](#flash-the-binary) again (you can skip over setting +the environment variables). If you continue to run into problems, follow the +[AI on a microcontroller with TensorFlow Lite and SparkFun Edge](https://codelabs.developers.google.com/codelabs/sparkfun-tensorflow) +codelab, which includes more comprehensive instructions for the flashing +process. + +The binary should now be deployed to the device. Hit the button marked `RST` to +reboot the board. You should see the device's four LEDs flashing in sequence. + +Debug information is logged by the board while the program is running. To view +it, establish a serial connection to the board using a baud rate of `115200`. +On OSX and Linux, the following command should work: + +``` +screen ${DEVICENAME} 115200 +``` + +To stop viewing the debug output with `screen`, hit `Ctrl+A`, immediately +followed by the `K` key, then hit the `Y` key. + +## Run the tests on a development machine + +To compile and test this example on a desktop Linux or MacOS machine, download +[the TensorFlow source code](https://github.com/tensorflow/tensorflow), `cd` +into the source directory from a terminal, and then run the following command: + +``` +make -f tensorflow/lite/micro/tools/make/Makefile +``` + +This will take a few minutes, and downloads frameworks the code uses like +[CMSIS](https://developer.arm.com/embedded/cmsis) and +[flatbuffers](https://google.github.io/flatbuffers/). Once that process has +finished, run: + +``` +make -f tensorflow/lite/micro/tools/make/Makefile test_person_detection_test +``` + +You should see a series of files get compiled, followed by some logging output +from a test, which should conclude with `~~~ALL TESTS PASSED~~~`. If you see +this, it means that a small program has been built and run that loads a trained +TensorFlow model, runs some example images through it, and got the expected +outputs. This particular test runs images with a and without a person in them, +and checks that the network correctly identifies them. + +To understand how TensorFlow Lite does this, you can look at the `TestInvoke()` +function in +[person_detection_test.cc](https://github.com/tensorflow/tensorflow/tree/master/tensorflow/lite/micro/examples/person_detection/person_detection_test.cc). +It's a fairly small amount of code, creating an interpreter, getting a handle to +a model that's been compiled into the program, and then invoking the interpreter +with the model and sample inputs. + +## Debugging image capture +When the sample is running, check the LEDs to determine whether the inference is +running correctly. If the red light is stuck on, it means there was an error +communicating with the camera. This is likely due to an incorrectly connected +or broken camera. + +During inference, the blue LED will toggle every time inference is complete. The +orange LED indicates that no person was found, and the green LED indicates a +person was found. The red LED should never turn on, since it indicates an error. + +In order to view the captured image, set the DUMP_IMAGE define in main.cc.  This +causes the board to log raw image info to the console. After the board has been +flashed and reset, dump the log to a text file: + + +``` +screen -L -Logfile ${DEVICENAME} 115200 +``` + +Next, run the raw to bitmap converter to view captured images: + +``` +python3 raw_to_bitmap.py -r GRAY -i +``` + +## Training your own model + +You can train your own model with some easy-to-use scripts. See +[training_a_model.md](training_a_model.md) for instructions. diff --git a/tensorflow/lite/micro/tools/make/third_party_downloads.inc~ b/tensorflow/lite/micro/tools/make/third_party_downloads.inc~ deleted file mode 100644 index 3c7ee1b64d2..00000000000 --- a/tensorflow/lite/micro/tools/make/third_party_downloads.inc~ +++ /dev/null @@ -1,86 +0,0 @@ -# Add URLs and MD5 checksums for third-party libraries here. - -GEMMLOWP_URL := "https://github.com/google/gemmlowp/archive/719139ce755a0f31cbf1c37f7f98adcc7fc9f425.zip" -GEMMLOWP_MD5 := "7e8191b24853d75de2af87622ad293ba" - -ifeq ($(HOST_OS),windows) - FLATBUFFERS_URL := "https://github.com/google/flatbuffers/archive/v1.12.0.zip" - FLATBUFFERS_MD5 := "a1afdbf114dec01a861c1b8c917d0fc7" -else - FLATBUFFERS_URL := "https://github.com/google/flatbuffers/archive/v1.12.0.tar.gz" - FLATBUFFERS_MD5 := "c62ffefb3d4548b127cca14ce047f16c" -endif - -ifeq ($(HOST_OS),osx) - GCC_EMBEDDED_URL := "https://developer.arm.com/-/media/Files/downloads/gnu-rm/7-2018q2/gcc-arm-none-eabi-7-2018-q2-update-mac.tar.bz2" - GCC_EMBEDDED_MD5 := "a66be9828cf3c57d7d21178e07cd8904" -else ifeq ($(HOST_OS),windows) - GCC_EMBEDDED_URL := "https://developer.arm.com/-/media/Files/downloads/gnu-rm/7-2018q2/gcc-arm-none-eabi-7-2018-q2-update-win32.zip" - GCC_EMBEDDED_MD5 := "bc8ae26d7c429f30d583a605a4bcf9bc" -else - GCC_EMBEDDED_URL := "https://developer.arm.com/-/media/Files/downloads/gnu-rm/7-2018q2/gcc-arm-none-eabi-7-2018-q2-update-linux.tar.bz2" - GCC_EMBEDDED_MD5 := "299ebd3f1c2c90930d28ab82e5d8d6c0" -endif - -LEON_BCC2_URL := "https://www.gaisler.com/anonftp/bcc2/bin/bcc-2.0.7-gcc-linux64.tar.xz" -LEON_BCC2_MD5 := "cdf78082be4882da2a92c9baa82fe765" - -TSIM_URL := "https://www.gaisler.com/anonftp/tsim/tsim-eval-2.0.63.tar.gz" -TSIM_MD5 := "afa0095d3ed989a949e1467f94e41d2f" - -CMSIS_URL := "https://github.com/ARM-software/CMSIS_5/archive/1150e71e07c79b538efd842aba5b210a31827ae5.zip" -CMSIS_MD5 := "e05f4222ef58825193910b41a0871dcb" - -AM_SDK_URL := "http://s3.asia.ambiqmicro.com/downloads/AmbiqSuite-Rel2.2.0.zip" -AM_SDK_MD5 := "7605fa2d4d97e6bb7a1190c92b66b597" -AM_SDK_DEST := AmbiqSuite-Rel2.2.0 - -SF_BSPS_URL := "https://github.com/sparkfun/SparkFun_Apollo3_AmbiqSuite_BSPs/archive/v0.0.7.zip" -SF_BSPS_MD5 := "34199f7e754735661d1c8a70a40ca7a3" -SF_BSPS_DEST := boards_sfe - -STM32_BARE_LIB_URL := "https://github.com/google/stm32_bare_lib/archive/c07d611fb0af58450c5a3e0ab4d52b47f99bc82d.zip" -STM32_BARE_LIB_MD5 := "282bff40d4d0b92278fd123a3b6e3123" - -ifeq ($(HOST_OS),osx) - RISCV_TOOLCHAIN_URL := "https://static.dev.sifive.com/dev-tools/riscv64-unknown-elf-gcc-8.1.0-2019.01.0-x86_64-apple-darwin.tar.gz" - RISCV_TOOLCHAIN_MD5 := "2ac2fa00618b9ab7fa0c7d0ec173de94" -else - RISCV_TOOLCHAIN_URL := "https://static.dev.sifive.com/dev-tools/riscv64-unknown-elf-gcc-20181030-x86_64-linux-ubuntu14.tar.gz" - RISCV_TOOLCHAIN_MD5="2366b7afe36a54dc94fb0ff8a0830934" -endif - -SIFIVE_FE310_LIB_URL := "https://github.com/sifive/freedom-e-sdk/archive/baeeb8fd497a99b3c141d7494309ec2e64f19bdf.zip" -SIFIVE_FE310_LIB_MD5 := "06ee24c4956f8e21670ab3395861fe64" - -KISSFFT_URL="https://github.com/mborgerding/kissfft/archive/v130.zip" -KISSFFT_MD5="438ba1fef5783cc5f5f201395cc477ca" - -RUY_URL="https://github.com/google/ruy/archive/1b313682ef8b8fc8ed08719c610d1c3503b016bf.zip" -RUY_MD5="2d54f058f8f7120dfc1ecee79dbf259e" - -CIFAR10_DATASET_URL="https://www.cs.toronto.edu/~kriz/cifar-10-binary.tar.gz" -CIFAR10_DATASET_MD5="c32a1d4ab5d03f1284b67883e8d87530" - -IMAGE_RECOGNITION_MODEL_URL := "https://storage.googleapis.com/download.tensorflow.org/models/tflite/cifar_image_recognition_model_2020_4_14.zip" -IMAGE_RECOGNITION_MODEL_MD5 := "2b886156e7ef4d6e53d0f1a4bc800e56" - -PERSON_MODEL_URL := "https://storage.googleapis.com/download.tensorflow.org/data/tf_lite_micro_person_data_grayscale_2019_11_21.zip" -PERSON_MODEL_MD5 := "fe2934bd0788f1dcc7af3f0a954542ab" - -PERSON_MODEL_INT8_URL := "https://storage.googleapis.com/download.tensorflow.org/data/tf_lite_micro_person_data_int8_grayscale_2020_01_13.zip" -PERSON_MODEL_INT8_MD5 := "8a7d2c70325f53136faea6dde517b8cc" - -EMBARC_MLI_URL := "https://github.com/foss-for-synopsys-dwc-arc-processors/embarc_mli/archive/58284867ca52d1f43b25045e8601999d7359d986.zip" -EMBARC_MLI_MD5 := "2bf4982a327fdaa9d475803ce014d1ef" - -EMBARC_MLI_PRE_COMPILED_URL := "https://github.com/foss-for-synopsys-dwc-arc-processors/embarc_mli/releases/download/Release_1.1_RC2/embARC_MLI_package.zip" -EMBARC_MLI_PRE_COMPILED_MD5 := "a95ff9e0370434484f14e7e4114327f6" - -XTENSA_HIFI4_URL :="https://github.com/foss-xtensa/nnlib-hifi4/raw/master/archive/xa_nnlib_04_07.zip" -XTENSA_HIFI4_MD5 :="f234764928f9a42901df33a27e118c8b" - -HIMAX_WE1_SDK_URL ="https://www.himax.com.tw/we-i/himax_we1_sdk_v02.zip" -HIMAX_WE1_SDK_MD5 ="5063c24d298fbcfe118163f3ccc43079" - - From 233f1d53f829f157788ed687c07698b12cc8e091 Mon Sep 17 00:00:00 2001 From: "902449@58880@bigcat_chen@ASIC" Date: Thu, 4 Jun 2020 18:16:32 +0800 Subject: [PATCH 04/11] remove temp readme in example directory --- .../micro/examples/hello_world/README.md~ | 595 ------------------ .../person_detection_experimental/README.md~ | 568 ----------------- 2 files changed, 1163 deletions(-) delete mode 100644 tensorflow/lite/micro/examples/hello_world/README.md~ delete mode 100644 tensorflow/lite/micro/examples/person_detection_experimental/README.md~ diff --git a/tensorflow/lite/micro/examples/hello_world/README.md~ b/tensorflow/lite/micro/examples/hello_world/README.md~ deleted file mode 100644 index 9c0a5e2306a..00000000000 --- a/tensorflow/lite/micro/examples/hello_world/README.md~ +++ /dev/null @@ -1,595 +0,0 @@ -# Hello World Example - -This example is designed to demonstrate the absolute basics of using [TensorFlow -Lite for Microcontrollers](https://www.tensorflow.org/lite/microcontrollers). -It includes the full end-to-end workflow of training a model, converting it for -use with TensorFlow Lite for Microcontrollers for running inference on a -microcontroller. - -The model is trained to replicate a `sine` function and generates a pattern of -data to either blink LEDs or control an animation, depending on the capabilities -of the device. - -![Animation on STM32F746](images/animation_on_STM32F746.gif) - -## Table of contents - -- [Hello World Example](#hello-world-example) - - [Table of contents](#table-of-contents) - - [Deploy to ARC EM SDP](#deploy-to-arc-em-sdp) - - [Initial Setup](#initial-setup) - - [Generate Example Project](#generate-example-project) - - [Build and Run Example](#build-and-run-example) - - [Deploy to Arduino](#deploy-to-arduino) - - [Install the Arduino_TensorFlowLite library](#install-the-arduinotensorflowlite-library) - - [Load and run the example](#load-and-run-the-example) - - [Deploy to ESP32](#deploy-to-esp32) - - [Install the ESP IDF](#install-the-esp-idf) - - [Generate the examples](#generate-the-examples) - - [Building the example](#building-the-example) - - [Load and run the example](#load-and-run-the-example-1) - - [Deploy to himax WE1 EVB](#deploy-to-himax-we1-evb) - - [Initial Setup](#initial-setup-1) - - [MetaWare Development Toolkit](#metaware-development-toolkit) - - [Make Tool version](#make-tool-version) - - [Serial Terminal Emulation Application](#serial-terminal-emulation-application) - - [Generate Example Project](#generate-example-project-1) - - [Build and Burn Example](#build-and-burn-example) - - [Deploy to SparkFun Edge](#deploy-to-sparkfun-edge) - - [Compile the binary](#compile-the-binary) - - [Sign the binary](#sign-the-binary) - - [Flash the binary](#flash-the-binary) - - [Deploy to STM32F746](#deploy-to-stm32f746) - - [Run the tests on a development machine](#run-the-tests-on-a-development-machine) - - [Train your own model](#train-your-own-model) - -## Deploy to ARC EM SDP - -The following instructions will help you to build and deploy this example to -[ARC EM SDP](https://www.synopsys.com/dw/ipdir.php?ds=arc-em-software-development-platform) -board. General information and instructions on using the board with TensorFlow -Lite Micro can be found in the common -[ARC targets description](/tensorflow/lite/micro/tools/make/targets/arc/README.md). - -### Initial Setup - -Follow the instructions on the -[ARC EM SDP Initial Setup](/tensorflow/lite/micro/tools/make/targets/arc/README.md#ARC-EM-Software-Development-Platform-ARC-EM-SDP) -to get and install all required tools for work with ARC EM SDP. - -### Generate Example Project - -The example project for ARC EM SDP platform can be generated with the following -command: - -``` -make -f tensorflow/lite/micro/tools/make/Makefile TARGET=arc_emsdp TAGS=no_arc_mli generate_hello_world_make_project -``` - -### Build and Run Example - -For more detailed information on building and running examples see the -appropriate sections of general descriptions of the -[ARC EM SDP usage with TFLM](/tensorflow/lite/micro/tools/make/targets/arc/README.md#ARC-EM-Software-Development-Platform-ARC-EM-SDP). -In the directory with generated project you can also find a -*README_ARC_EMSDP.md* file with instructions and options on building and -running. Here we only briefly mention main steps which are typically enough to -get it started. - -1. You need to - [connect the board](/tensorflow/lite/micro/tools/make/targets/arc/README.md#connect-the-board) - and open an serial connection. - -2. Go to the generated example project director - - ``` - cd tensorflow/lite/micro/tools/make/gen/arc_emsdp_arc/prj/hello_world/make - ``` - -3. Build the example using - - ``` - make app - ``` - -4. To generate artefacts for self-boot of example from the board use - - ``` - make flash - ``` - -5. To run application from the board using microSD card: - - * Copy the content of the created /bin folder into the root of microSD - card. Note that the card must be formatted as FAT32 with default cluster - size (but less than 32 Kbytes) - * Plug in the microSD card into the J11 connector. - * Push the RST button. If a red LED is lit beside RST button, push the CFG - button. - -6. If you have the MetaWare Debugger installed in your environment: - - * To run application from the console using it type `make run`. - * To stop the execution type `Ctrl+C` in the console several times. - -In both cases (step 5 and 6) you will see the application output in the serial -terminal. - -## Deploy to Arduino - -The following instructions will help you build and deploy this sample -to [Arduino](https://www.arduino.cc/) devices. - -![Animation on Arduino MKRZERO](images/animation_on_arduino_mkrzero.gif) - -The sample has been tested with the following devices: - -- [Arduino Nano 33 BLE Sense](https://store.arduino.cc/usa/nano-33-ble-sense-with-headers) -- [Arduino MKRZERO](https://store.arduino.cc/usa/arduino-mkrzero) - -The sample will use PWM to fade an LED on and off according to the model's -output. In the code, the `LED_BUILTIN` constant is used to specify the board's -built-in LED as the one being controlled. However, on some boards, this built-in -LED is not attached to a pin with PWM capabilities. In this case, the LED will -blink instead of fading. - -### Install the Arduino_TensorFlowLite library - -This example application is included as part of the official TensorFlow Lite -Arduino library. To install it, open the Arduino library manager in -`Tools -> Manage Libraries...` and search for `Arduino_TensorFlowLite`. - -### Load and run the example - -Once the library has been added, go to `File -> Examples`. You should see an -example near the bottom of the list named `TensorFlowLite:hello_world`. Select -it and click `hello_world` to load the example. - -Use the Arduino IDE to build and upload the example. Once it is running, -you should see the built-in LED on your device flashing. - -The Arduino Desktop IDE includes a plotter that we can use to display the sine -wave graphically. To view it, go to `Tools -> Serial Plotter`. You will see one -datapoint being logged for each inference cycle, expressed as a number between 0 -and 255. - -## Deploy to ESP32 - -The following instructions will help you build and deploy this sample -to [ESP32](https://www.espressif.com/en/products/hardware/esp32/overview) -devices using the [ESP IDF](https://github.com/espressif/esp-idf). - -The sample has been tested on ESP-IDF version 4.0 with the following devices: -- [ESP32-DevKitC](http://esp-idf.readthedocs.io/en/latest/get-started/get-started-devkitc.html) -- [ESP-EYE](https://github.com/espressif/esp-who/blob/master/docs/en/get-started/ESP-EYE_Getting_Started_Guide.md) - -### Install the ESP IDF - -Follow the instructions of the -[ESP-IDF get started guide](https://docs.espressif.com/projects/esp-idf/en/latest/get-started/index.html) -to setup the toolchain and the ESP-IDF itself. - -The next steps assume that the -[IDF environment variables are set](https://docs.espressif.com/projects/esp-idf/en/latest/get-started/index.html#step-4-set-up-the-environment-variables) : - - * The `IDF_PATH` environment variable is set - * `idf.py` and Xtensa-esp32 tools (e.g. `xtensa-esp32-elf-gcc`) are in `$PATH` - -### Generate the examples -The example project can be generated with the following command: -``` -make -f tensorflow/lite/micro/tools/make/Makefile TARGET=esp generate_hello_world_esp_project -``` - -### Building the example - -Go the the example project directory -``` -cd tensorflow/lite/micro/tools/make/gen/esp_xtensa-esp32/prj/hello_world/esp-idf -``` - -Then build with `idf.py` -``` -idf.py build -``` - -### Load and run the example - -To flash (replace `/dev/ttyUSB0` with the device serial port): -``` -idf.py --port /dev/ttyUSB0 flash -``` - -Monitor the serial output: -``` -idf.py --port /dev/ttyUSB0 monitor -``` - -Use `Ctrl+]` to exit. - -The previous two commands can be combined: -``` -idf.py --port /dev/ttyUSB0 flash monitor -``` - -## Deploy to himax WE1 EVB - -The following instructions will help you build and deploy this example to -[HIMAX WE1 EVB](https://github.com/HimaxWiseEyePlus/bsp_tflu/tree/master/HIMAX_WE1_EVB_board_brief) -board. To undstand more about using this board, please check -[HIMAX WE1 EVB user guide](https://github.com/HimaxWiseEyePlus/bsp_tflu/tree/master/HIMAX_WE1_EVB_user_guide). - -### Initial Setup - -To use the HIMAX WE1 EVB, please make sure following software are installed: - -#### MetaWare Development Toolkit - -See -[Install the Synopsys DesignWare ARC MetaWare Development Toolkit](/tensorflow/lite/micro/tools/make/targets/arc/README.md#install-the-synopsys-designware-arc-metaware-development-toolkit) -section for instructions on toolchain installation. - -#### Make Tool version - -A `'make'` tool is required for deploying Tensorflow Lite Micro -applications on HIMAX WE1 EVB, See -[Check make tool version](/tensorflow/lite/micro/tools/make/targets/arc/README.md#make-tool) -section for proper environment. - -#### Serial Terminal Emulation Application - -There are 2 main purposes for HIMAX WE1 EVB Debug UART port - -- print application output -- burn application to flash by using xmodem send application binary - -You can use any terminal emulation program (like [PuTTY](https://www.putty.org/) or [minicom](https://linux.die.net/man/1/minicom)). - - -### Generate Example Project - -The example project for HIMAX WE1 EVB platform can be generated with the following -command: - -Download related third party data - -``` -make -f tensorflow/lite/micro/tools/make/Makefile TARGET=himax_we1_evb third_party_downloads -``` - -Generate hello world project - -``` -make -f tensorflow/lite/micro/tools/make/Makefile generate_hello_world_make_project TARGET=himax_we1_evb TAGS=no_arc_mli -``` - -### Build and Burn Example - -Following the Steps to run hello world example at HIMAX WE1 EVB platform. - -1. Go to the generated example project directory. - - ``` - cd tensorflow/lite/micro/tools/make/gen/himax_we1_evb_arc/prj/hello_world/make - ``` - -2. Build the example using - - ``` - make app - ``` - -3. After example build finish, copy ELF file and map file to image generate tool directory. - image generate tool directory located at `'tensorflow/lite/micro/tools/make/downloads/himax_we1_sdk/image_gen_linux_v3/'` - - ``` - cp hello_world.elf himax_we1_evb.map ../../../../../downloads/himax_we1_sdk/image_gen_linux_v3/ - ``` - -4. Go to flash image generate tool directory. - - ``` - cd ../../../../../downloads/himax_we1_sdk/image_gen_linux_v3/ - ``` - -5. run image generate tool, generate flash image file. - - * Before running image generate tool, by typing `sudo chmod +x image_gen` - and `sudo chmod +x sign_tool` to make sure it is executable. - - ``` - image_gen -e hello_world.elf -m himax_we1_evb.map -o out.img - ``` - - -6. Download flash image file to HIMAX WE1 EVB by UART: - - * more detail about download image through UART can be found at [HIMAX WE1 EVB update Flash image](https://github.com/HimaxWiseEyePlus/bsp_tflu/tree/master/HIMAX_WE1_EVB_user_guide#flash-image-update) - -After these steps, press reset button on the HIMAX WE1 EVB, you will see application output in the serial -terminal. - -## Deploy to SparkFun Edge - -The following instructions will help you build and deploy this sample on the -[SparkFun Edge development board](https://sparkfun.com/products/15170). - -![Animation on SparkFun Edge](images/animation_on_sparkfun_edge.gif) - -If you're new to using this board, we recommend walking through the -[AI on a microcontroller with TensorFlow Lite and SparkFun Edge](https://codelabs.developers.google.com/codelabs/sparkfun-tensorflow) -codelab to get an understanding of the workflow. - -### Compile the binary - -The following command will download the required dependencies and then compile a -binary for the SparkFun Edge: - -``` -make -f tensorflow/lite/micro/tools/make/Makefile TARGET=sparkfun_edge hello_world_bin -``` - -The binary will be created in the following location: - -``` -tensorflow/lite/micro/tools/make/gen/sparkfun_edge_cortex-m4/bin/hello_world.bin -``` - -### Sign the binary - -The binary must be signed with cryptographic keys to be deployed to the device. -We'll now run some commands that will sign our binary so it can be flashed to -the SparkFun Edge. The scripts we are using come from the Ambiq SDK, which is -downloaded when the `Makefile` is run. - -Enter the following command to set up some dummy cryptographic keys we can use -for development: - -``` -cp tensorflow/lite/micro/tools/make/downloads/AmbiqSuite-Rel2.2.0/tools/apollo3_scripts/keys_info0.py \ -tensorflow/lite/micro/tools/make/downloads/AmbiqSuite-Rel2.2.0/tools/apollo3_scripts/keys_info.py -``` - -Next, run the following command to create a signed binary: - -``` -python3 tensorflow/lite/micro/tools/make/downloads/AmbiqSuite-Rel2.2.0/tools/apollo3_scripts/create_cust_image_blob.py \ ---bin tensorflow/lite/micro/tools/make/gen/sparkfun_edge_cortex-m4/bin/hello_world.bin \ ---load-address 0xC000 \ ---magic-num 0xCB \ --o main_nonsecure_ota \ ---version 0x0 -``` - -This will create the file `main_nonsecure_ota.bin`. We'll now run another -command to create a final version of the file that can be used to flash our -device with the bootloader script we will use in the next step: - -``` -python3 tensorflow/lite/micro/tools/make/downloads/AmbiqSuite-Rel2.2.0/tools/apollo3_scripts/create_cust_wireupdate_blob.py \ ---load-address 0x20000 \ ---bin main_nonsecure_ota.bin \ --i 6 \ --o main_nonsecure_wire \ ---options 0x1 -``` - -You should now have a file called `main_nonsecure_wire.bin` in the directory -where you ran the commands. This is the file we'll be flashing to the device. - -### Flash the binary - -Next, attach the board to your computer via a USB-to-serial adapter. - -**Note:** If you're using the [SparkFun Serial Basic Breakout](https://www.sparkfun.com/products/15096), -you should [install the latest drivers](https://learn.sparkfun.com/tutorials/sparkfun-serial-basic-ch340c-hookup-guide#drivers-if-you-need-them) -before you continue. - -Once connected, assign the USB device name to an environment variable: - -``` -export DEVICENAME=put your device name here -``` - -Set another variable with the baud rate: - -``` -export BAUD_RATE=921600 -``` - -Now, hold the button marked `14` on the device. While still holding the button, -hit the button marked `RST`. Continue holding the button marked `14` while -running the following command: - -``` -python3 tensorflow/lite/micro/tools/make/downloads/AmbiqSuite-Rel2.2.0/tools/apollo3_scripts/uart_wired_update.py \ --b ${BAUD_RATE} ${DEVICENAME} \ --r 1 \ --f main_nonsecure_wire.bin \ --i 6 -``` - -You should see a long stream of output as the binary is flashed to the device. -Once you see the following lines, flashing is complete: - -``` -Sending Reset Command. -Done. -``` - -If you don't see these lines, flashing may have failed. Try running through the -steps in [Flash the binary](#flash-the-binary) again (you can skip over setting -the environment variables). If you continue to run into problems, follow the -[AI on a microcontroller with TensorFlow Lite and SparkFun Edge](https://codelabs.developers.google.com/codelabs/sparkfun-tensorflow) -codelab, which includes more comprehensive instructions for the flashing -process. - -The binary should now be deployed to the device. Hit the button marked `RST` to -reboot the board. You should see the device's four LEDs flashing in sequence. - -Debug information is logged by the board while the program is running. To view -it, establish a serial connection to the board using a baud rate of `115200`. -On OSX and Linux, the following command should work: - -``` -screen ${DEVICENAME} 115200 -``` - -You will see a lot of output flying past! To stop the scrolling, hit `Ctrl+A`, -immediately followed by `Esc`. You can then use the arrow keys to explore the -output, which will contain the results of running inference on various `x` -values: - -``` -x_value: 1.1843798*2^2, y_value: -1.9542645*2^-1 -``` - -To stop viewing the debug output with `screen`, hit `Ctrl+A`, immediately -followed by the `K` key, then hit the `Y` key. - - -## Deploy to STM32F746 - -The following instructions will help you build and deploy the sample to the -[STM32F7 discovery kit](https://os.mbed.com/platforms/ST-Discovery-F746NG/) -using [ARM Mbed](https://github.com/ARMmbed/mbed-cli). - -![Animation on STM32F746](images/animation_on_STM32F746.gif) - -Before we begin, you'll need the following: - -- STM32F7 discovery kit board -- Mini-USB cable -- ARM Mbed CLI ([installation instructions](https://os.mbed.com/docs/mbed-os/v5.12/tools/installation-and-setup.html)) -- Python 2.7 and pip - -Since Mbed requires a special folder structure for projects, we'll first run a -command to generate a subfolder containing the required source files in this -structure: - -``` -make -f tensorflow/lite/micro/tools/make/Makefile TARGET=mbed TAGS="CMSIS disco_f746ng" generate_hello_world_mbed_project -``` - -This will result in the creation of a new folder: - -``` -tensorflow/lite/micro/tools/make/gen/mbed_cortex-m4/prj/hello_world/mbed -``` - -This folder contains all of the example's dependencies structured in the correct -way for Mbed to be able to build it. - -Change into the directory and run the following commands, making sure you are -using Python 2.7.15. - -First, tell Mbed that the current directory is the root of an Mbed project: - -``` -mbed config root . -``` - -Next, tell Mbed to download the dependencies and prepare to build: - -``` -mbed deploy -``` - -By default, Mbed will build the project using C++98. However, TensorFlow Lite -requires C++11. Run the following Python snippet to modify the Mbed -configuration files so that it uses C++11: - -``` -python -c 'import fileinput, glob; -for filename in glob.glob("mbed-os/tools/profiles/*.json"): - for line in fileinput.input(filename, inplace=True): - print line.replace("\"-std=gnu++98\"","\"-std=c++11\", \"-fpermissive\"")' - -``` - -Finally, run the following command to compile: - -``` -mbed compile -m DISCO_F746NG -t GCC_ARM -``` - -This should result in a binary at the following path: - -``` -./BUILD/DISCO_F746NG/GCC_ARM/mbed.bin -``` - -To deploy, plug in your STM board and copy the file to it. On MacOS, you can do -this with the following command: - -``` -cp ./BUILD/DISCO_F746NG/GCC_ARM/mbed.bin /Volumes/DIS_F746NG/ -``` - -Copying the file will initiate the flashing process. Once this is complete, you -should see an animation on the device's screen. - - -``` -screen /dev/tty.usbmodem14403 9600 -``` - -In addition to this animation, debug information is logged by the board while -the program is running. To view it, establish a serial connection to the board -using a baud rate of `9600`. On OSX and Linux, the following command should -work, replacing `/dev/tty.devicename` with the name of your device as it appears -in `/dev`: - -``` -screen /dev/tty.devicename 9600 -``` - -You will see a lot of output flying past! To stop the scrolling, hit `Ctrl+A`, -immediately followed by `Esc`. You can then use the arrow keys to explore the -output, which will contain the results of running inference on various `x` -values: - -``` -x_value: 1.1843798*2^2, y_value: -1.9542645*2^-1 -``` - -To stop viewing the debug output with `screen`, hit `Ctrl+A`, immediately -followed by the `K` key, then hit the `Y` key. - -### Run the tests on a development machine - -To compile and test this example on a desktop Linux or macOS machine, first -clone the TensorFlow repository from GitHub to a convenient place: - -```bash -git clone --depth 1 https://github.com/tensorflow/tensorflow.git -``` - -Next, `cd` into the source directory from a terminal, and then run the following -command: - -```bash -make -f tensorflow/lite/micro/tools/make/Makefile test_hello_world_test -``` - -This will take a few minutes, and downloads frameworks the code uses. Once the -process has finished, you should see a series of files get compiled, followed by -some logging output from a test, which should conclude with -`~~~ALL TESTS PASSED~~~`. - -If you see this, it means that a small program has been built and run that loads -the trained TensorFlow model, runs some example inputs through it, and got the -expected outputs. - -To understand how TensorFlow Lite does this, you can look at the source in -[hello_world_test.cc](hello_world_test.cc). -It's a fairly small amount of code that creates an interpreter, gets a handle to -a model that's been compiled into the program, and then invokes the interpreter -with the model and sample inputs. - -### Train your own model - -So far you have used an existing trained model to run inference on -microcontrollers. If you wish to train your own model, follow the instructions -given in the [train/](train/) directory. - diff --git a/tensorflow/lite/micro/examples/person_detection_experimental/README.md~ b/tensorflow/lite/micro/examples/person_detection_experimental/README.md~ deleted file mode 100644 index 4d53e551431..00000000000 --- a/tensorflow/lite/micro/examples/person_detection_experimental/README.md~ +++ /dev/null @@ -1,568 +0,0 @@ -# Person detection example - -This example shows how you can use Tensorflow Lite to run a 250 kilobyte neural -network to recognize people in images captured by a camera. It is designed to -run on systems with small amounts of memory such as microcontrollers and DSPs. -This uses the experimental int8 quantized version of the person detection model. - -## Table of contents - -- [Person detection example](#person-detection-example) - - [Table of contents](#table-of-contents) - - [Running on ARC EM SDP](#running-on-arc-em-sdp) - - [Initial setup](#initial-setup) - - [Generate Example Project](#generate-example-project) - - [Build and Run Example](#build-and-run-example) - - [Running on Arduino](#running-on-arduino) - - [Hardware](#hardware) - - [Install the Arduino_TensorFlowLite library](#install-the-arduinotensorflowlite-library) - - [Install other libraries](#install-other-libraries) - - [Load and run the example](#load-and-run-the-example) - - [Running on HIMAX WE1 EVB](#running-on-himax-we1-evb) - - [Initial Setup](#initial-setup) - - [MetaWare Development Toolkit](#metaware-development-toolkit) - - [Make Tool version](#make-tool-version) - - [Serial Terminal Emulation Application](#serial-terminal-emulation-application) - - [Generate Example Project](#generate-example-project-1) - - [Build and Burn Example](#build-and-burn-example) - - [Running on SparkFun Edge](#running-on-sparkfun-edge) - - [Compile the binary](#compile-the-binary) - - [Sign the binary](#sign-the-binary) - - [Flash the binary](#flash-the-binary) - - [Run the tests on a development machine](#run-the-tests-on-a-development-machine) - - [Debugging image capture](#debugging-image-capture) - - [Training your own model](#training-your-own-model) - -## Running on ARC EM SDP - -The following instructions will help you to build and deploy this example to -[ARC EM SDP](https://www.synopsys.com/dw/ipdir.php?ds=arc-em-software-development-platform) -board. General information and instructions on using the board with TensorFlow -Lite Micro can be found in the common -[ARC targets description](/tensorflow/lite/micro/tools/make/targets/arc/README.md). - -This example uses asymmetric int8 quantization and can therefore leverage -optimized int8 kernels from the embARC MLI library - -The ARC EM SDP board contains a rich set of extension interfaces. You can choose -any compatible camera and modify -[image_provider.cc](/tensorflow/lite/micro/examples/person_detection_experimental/image_provider.cc) -file accordingly to use input from your specific camera. By default, results of -running this example are printed to the console. If you would like to instead -implement some target-specific actions, you need to modify -[detection_responder.cc](/tensorflow/lite/micro/examples/person_detection_experimental/detection_responder.cc) -accordingly. - -The reference implementations of these files are used by default on the EM SDP. - -### Initial setup - -Follow the instructions on the -[ARC EM SDP Initial Setup](/tensorflow/lite/micro/tools/make/targets/arc/README.md#ARC-EM-Software-Development-Platform-ARC-EM-SDP) -to get and install all required tools for work with ARC EM SDP. - -### Generate Example Project - -The example project for ARC EM SDP platform can be generated with the following -command: - -``` -make -f tensorflow/lite/micro/tools/make/Makefile TARGET=arc_emsdp generate_person_detection_int8_make_project -``` - -### Build and Run Example - -For more detailed information on building and running examples see the -appropriate sections of general descriptions of the -[ARC EM SDP usage with TFLM](/tensorflow/lite/micro/tools/make/targets/arc/README.md#ARC-EM-Software-Development-Platform-ARC-EM-SDP). -In the directory with generated project you can also find a -*README_ARC_EMSDP.md* file with instructions and options on building and -running. Here we only briefly mention main steps which are typically enough to -get it started. - -1. You need to - [connect the board](/tensorflow/lite/micro/tools/make/targets/arc/README.md#connect-the-board) - and open an serial connection. - -2. Go to the generated example project director - - ``` - cd tensorflow/lite/micro/tools/make/gen/arc_emsdp_arc/prj/person_detection_int8/make - ``` - -3. Build the example using - - ``` - make app - ``` - -4. To generate artefacts for self-boot of example from the board use - - ``` - make flash - ``` - -5. To run application from the board using microSD card: - - * Copy the content of the created /bin folder into the root of microSD - card. Note that the card must be formatted as FAT32 with default cluster - size (but less than 32 Kbytes) - * Plug in the microSD card into the J11 connector. - * Push the RST button. If a red LED is lit beside RST button, push the CFG - button. - -6. If you have the MetaWare Debugger installed in your environment: - - * To run application from the console using it type `make run`. - * To stop the execution type `Ctrl+C` in the console several times. - -In both cases (step 5 and 6) you will see the application output in the serial -terminal. - -## Running on Arduino - -The following instructions will help you build and deploy this sample -to [Arduino](https://www.arduino.cc/) devices. - -The sample has been tested with the following device: - -- [Arduino Nano 33 BLE Sense](https://store.arduino.cc/usa/nano-33-ble-sense-with-headers) - -You will also need the following camera module: - -- [Arducam Mini 2MP Plus](https://www.amazon.com/Arducam-Module-Megapixels-Arduino-Mega2560/dp/B012UXNDOY) - -### Hardware - -Connect the Arducam pins as follows: - -|Arducam pin name|Arduino pin name| -|----------------|----------------| -|CS|D7 (unlabelled, immediately to the right of D6)| -|MOSI|D11| -|MISO|D12| -|SCK|D13| -|GND|GND (either pin marked GND is fine)| -|VCC|3.3 V| -|SDA|A4| -|SCL|A5| - -### Install the Arduino_TensorFlowLite library - -Download the current nightly build of the library: -[person_detection.zip](https://storage.googleapis.com/download.tensorflow.org/data/tf_lite_micro_person_data_int8_grayscale_2020_01_13.zip) - -This example application is included as part of the official TensorFlow Lite -Arduino library. To install it, open the Arduino library manager in -`Tools -> Manage Libraries...` and search for `Arduino_TensorFlowLite`. - -### Install other libraries - -In addition to the TensorFlow library, you'll also need to install two -libraries: - -* The Arducam library, so our code can interface with the hardware -* The JPEGDecoder library, so we can decode JPEG-encoded images - -The Arducam Arduino library is available from GitHub at -[https://github.com/ArduCAM/Arduino](https://github.com/ArduCAM/Arduino). -To install it, download or clone the repository. Next, copy its `ArduCAM` -subdirectory into your `Arduino/libraries` directory. To find this directory on -your machine, check the *Sketchbook location* in the Arduino IDE's -*Preferences* window. - -After downloading the library, you'll need to edit one of its files to make sure -it is configured for the Arducam Mini 2MP Plus. To do so, open the following -file: - -``` -Arduino/libraries/ArduCAM/memorysaver.h -``` - -You'll see a bunch of `#define` statements listed. Make sure that they are all -commented out, except for `#define OV2640_MINI_2MP_PLUS`, as so: - -``` -//Step 1: select the hardware platform, only one at a time -//#define OV2640_MINI_2MP -//#define OV3640_MINI_3MP -//#define OV5642_MINI_5MP -//#define OV5642_MINI_5MP_BIT_ROTATION_FIXED -#define OV2640_MINI_2MP_PLUS -//#define OV5642_MINI_5MP_PLUS -//#define OV5640_MINI_5MP_PLUS -``` - -Once you save the file, we're done configuring the Arducam library. - -Our next step is to install the JPEGDecoder library. We can do this from within -the Arduino IDE. First, go to the *Manage Libraries...* option in the *Tools* -menu and search for `JPEGDecoder`. You should install version _1.8.0_ of the -library. - -Once the library has installed, we'll need to configure it to disable some -optional components that are not compatible with the Arduino Nano 33 BLE Sense. -Open the following file: - -``` -Arduino/libraries/JPEGDecoder/src/User_Config.h -``` - -Make sure that both `#define LOAD_SD_LIBRARY` and `#define LOAD_SDFAT_LIBRARY` -are commented out, as shown in this excerpt from the file: - -```c++ -// Comment out the next #defines if you are not using an SD Card to store the JPEGs -// Commenting out the line is NOT essential but will save some FLASH space if -// SD Card access is not needed. Note: use of SdFat is currently untested! - -//#define LOAD_SD_LIBRARY // Default SD Card library -//#define LOAD_SDFAT_LIBRARY // Use SdFat library instead, so SD Card SPI can be bit bashed -``` - -Once you've saved the file, you are done installing libraries. - -### Load and run the example - -Go to `File -> Examples`. You should see an -example near the bottom of the list named `TensorFlowLite`. Select -it and click `person_detection` to load the example. Connect your device, then -build and upload the example. - -To test the camera, start by pointing the device's camera at something that is -definitely not a person, or just covering it up. The next time the blue LED -flashes, the device will capture a frame from the camera and begin to run -inference. Since the vision model we are using for person detection is -relatively large, it takes a long time to run inference—around 19 seconds at the -time of writing, though it's possible TensorFlow Lite has gotten faster since -then. - -After 19 seconds or so, the inference result will be translated into another LED -being lit. Since you pointed the camera at something that isn't a person, the -red LED should light up. - -Now, try pointing the device's camera at yourself! The next time the blue LED -flashes, the device will capture another image and begin to run inference. After -19 seconds, the green LED should light up! - -Remember, image data is captured as a snapshot before each inference, whenever -the blue LED flashes. Whatever the camera is pointed at during that moment is -what will be fed into the model. It doesn't matter where the camera is pointed -until the next time an image is captured, when the blue LED will flash again. - -If you're getting seemingly incorrect results, make sure you are in an -environment with good lighting. You should also make sure that the camera is -oriented correctly, with the pins pointing downwards, so that the images it -captures are the right way up—the model was not trained to recognize upside-down -people! In addition, it's good to remember that this is a tiny model, which -trades accuracy for small size. It works very well, but it isn't accurate 100% -of the time. - -We can also see the results of inference via the Arduino Serial Monitor. To do -this, open the *Serial Monitor* from the *Tools* menu. You'll see a detailed -log of what is happening while our application runs. It's also interesting to -check the *Show timestamp* box, so you can see how long each part of the process -takes: - -``` -14:17:50.714 -> Starting capture -14:17:50.714 -> Image captured -14:17:50.784 -> Reading 3080 bytes from ArduCAM -14:17:50.887 -> Finished reading -14:17:50.887 -> Decoding JPEG and converting to greyscale -14:17:51.074 -> Image decoded and processed -14:18:09.710 -> Person score: 246 No person score: 66 -``` - -From the log, we can see that it took around 170 ms to capture and read the -image data from the camera module, 180 ms to decode the JPEG and convert it to -greyscale, and 18.6 seconds to run inference. - -## Running on HIMAX WE1 EVB - -The following instructions will help you build and deploy this example to -[HIMAX WE1 EVB](https://github.com/HimaxWiseEyePlus/bsp_tflu/tree/master/HIMAX_WE1_EVB_board_brief) -board. To undstand more about using this board, please check -[HIMAX WE1 EVB user guide](https://github.com/HimaxWiseEyePlus/bsp_tflu/tree/master/HIMAX_WE1_EVB_user_guide). - -### Initial Setup - -To use the HIMAX WE1 EVB, please make sure following software are installed: - -#### MetaWare Development Toolkit - -See -[Install the Synopsys DesignWare ARC MetaWare Development Toolkit](/tensorflow/lite/micro/tools/make/targets/arc/README.md#install-the-synopsys-designware-arc-metaware-development-toolkit) -section for instructions on toolchain installation. - -#### Make Tool version - -A `'make'` tool is required for deploying Tensorflow Lite Micro -applications on HIMAX WE1 EVB, See -[Check make tool version](/tensorflow/lite/micro/tools/make/targets/arc/README.md#make-tool) -section for proper environment. - -#### Serial Terminal Emulation Application - -There are 2 main purposes for HIMAX WE1 EVB Debug UART port - -- print application output -- burn application to flash by using xmodem send application binary - -You can use any terminal emulation program (like [PuTTY](https://www.putty.org/) or [minicom](https://linux.die.net/man/1/minicom)). - - -### Generate Example Project - -The example project for HIMAX WE1 EVB platform can be generated with the following -command: - -Download related third party data - -``` -make -f tensorflow/lite/micro/tools/make/Makefile TARGET=himax_we1_evb third_party_downloads -``` - -Generate person detection project - -``` -make -f tensorflow/lite/micro/tools/make/Makefile generate_person_detection_int8_make_project TARGET=himax_we1_evb -``` - -### Build and Burn Example - -Following the Steps to run person detection example at HIMAX WE1 EVB platform. - -1. Go to the generated example project directory. - - ``` - cd tensorflow/lite/micro/tools/make/gen/himax_we1_evb_arc/prj/person_detection_int8/make - ``` - -2. Build the example using - - ``` - make app - ``` - -3. After example build finish, copy ELF file and map file to image generate tool directory. - image generate tool directory located at `'tensorflow/lite/micro/tools/make/downloads/himax_we1_sdk/image_gen_linux_v3/'` - - ``` - cp person_detection_int8.elf himax_we1_evb.map ../../../../../downloads/himax_we1_sdk/image_gen_linux_v3/ - ``` - -4. Go to flash image generate tool directory. - - ``` - cd ../../../../../downloads/himax_we1_sdk/image_gen_linux_v3/ - ``` - -5. run image generate tool, generate flash image file. - - * Before running image generate tool, by typing `sudo chmod +x image_gen` - and `sudo chmod +x sign_tool` to make sure it is executable. - - ``` - image_gen -e person_detection_int8.elf -m himax_we1_evb.map -o out.img - ``` - - -6. Download flash image file to HIMAX WE1 EVB by UART: - - * more detail about download image through UART can be found at [HIMAX WE1 EVB update Flash image](https://github.com/HimaxWiseEyePlus/bsp_tflu/tree/master/HIMAX_WE1_EVB_user_guide#flash-image-update) - -After these steps, press reset button on the HIMAX WE1 EVB, you will see application output in the serial -terminal. - -## Running on SparkFun Edge - -The following instructions will help you build and deploy this sample on the -[SparkFun Edge development board](https://sparkfun.com/products/15170). This -sample requires the Sparkfun Himax camera for the Sparkfun Edge board. It is -not available for purchase yet. - -If you're new to using this board, we recommend walking through the -[AI on a microcontroller with TensorFlow Lite and SparkFun Edge](https://codelabs.developers.google.com/codelabs/sparkfun-tensorflow) -codelab to get an understanding of the workflow. - -### Compile the binary - -The following command will download the required dependencies and then compile a -binary for the SparkFun Edge: - -``` -make -f tensorflow/lite/micro/tools/make/Makefile TARGET=sparkfun_edge person_detection_bin -``` - -The binary will be created in the following location: - -``` -tensorflow/lite/micro/tools/make/gen/sparkfun_edge_cortex-m4/bin/person_detection.bin -``` - -### Sign the binary - -The binary must be signed with cryptographic keys to be deployed to the device. -We'll now run some commands that will sign our binary so it can be flashed to -the SparkFun Edge. The scripts we are using come from the Ambiq SDK, which is -downloaded when the `Makefile` is run. - -Enter the following command to set up some dummy cryptographic keys we can use -for development: - -``` -cp tensorflow/lite/micro/tools/make/downloads/AmbiqSuite-Rel2.0.0/tools/apollo3_scripts/keys_info0.py \ -tensorflow/lite/micro/tools/make/downloads/AmbiqSuite-Rel2.0.0/tools/apollo3_scripts/keys_info.py -``` - -Next, run the following command to create a signed binary: - -``` -python3 tensorflow/lite/micro/tools/make/downloads/AmbiqSuite-Rel2.0.0/tools/apollo3_scripts/create_cust_image_blob.py \ ---bin tensorflow/lite/micro/tools/make/gen/sparkfun_edge_cortex-m4/bin/person_detection.bin \ ---load-address 0xC000 \ ---magic-num 0xCB \ --o main_nonsecure_ota \ ---version 0x0 -``` - -This will create the file `main_nonsecure_ota.bin`. We'll now run another -command to create a final version of the file that can be used to flash our -device with the bootloader script we will use in the next step: - -``` -python3 tensorflow/lite/micro/tools/make/downloads/AmbiqSuite-Rel2.0.0/tools/apollo3_scripts/create_cust_wireupdate_blob.py \ ---load-address 0x20000 \ ---bin main_nonsecure_ota.bin \ --i 6 \ --o main_nonsecure_wire \ ---options 0x1 -``` - -You should now have a file called `main_nonsecure_wire.bin` in the directory -where you ran the commands. This is the file we'll be flashing to the device. - -### Flash the binary - -Next, attach the board to your computer via a USB-to-serial adapter. - -**Note:** If you're using the [SparkFun Serial Basic Breakout](https://www.sparkfun.com/products/15096), -you should [install the latest drivers](https://learn.sparkfun.com/tutorials/sparkfun-serial-basic-ch340c-hookup-guide#drivers-if-you-need-them) -before you continue. - -Once connected, assign the USB device name to an environment variable: - -``` -export DEVICENAME=put your device name here -``` - -Set another variable with the baud rate: - -``` -export BAUD_RATE=921600 -``` - -Now, hold the button marked `14` on the device. While still holding the button, -hit the button marked `RST`. Continue holding the button marked `14` while -running the following command: - -``` -python3 tensorflow/lite/micro/tools/make/downloads/AmbiqSuite-Rel2.0.0/tools/apollo3_scripts/uart_wired_update.py \ --b ${BAUD_RATE} ${DEVICENAME} \ --r 1 \ --f main_nonsecure_wire.bin \ --i 6 -``` - -You should see a long stream of output as the binary is flashed to the device. -Once you see the following lines, flashing is complete: - -``` -Sending Reset Command. -Done. -``` - -If you don't see these lines, flashing may have failed. Try running through the -steps in [Flash the binary](#flash-the-binary) again (you can skip over setting -the environment variables). If you continue to run into problems, follow the -[AI on a microcontroller with TensorFlow Lite and SparkFun Edge](https://codelabs.developers.google.com/codelabs/sparkfun-tensorflow) -codelab, which includes more comprehensive instructions for the flashing -process. - -The binary should now be deployed to the device. Hit the button marked `RST` to -reboot the board. You should see the device's four LEDs flashing in sequence. - -Debug information is logged by the board while the program is running. To view -it, establish a serial connection to the board using a baud rate of `115200`. -On OSX and Linux, the following command should work: - -``` -screen ${DEVICENAME} 115200 -``` - -To stop viewing the debug output with `screen`, hit `Ctrl+A`, immediately -followed by the `K` key, then hit the `Y` key. - -## Run the tests on a development machine - -To compile and test this example on a desktop Linux or MacOS machine, download -[the TensorFlow source code](https://github.com/tensorflow/tensorflow), `cd` -into the source directory from a terminal, and then run the following command: - -``` -make -f tensorflow/lite/micro/tools/make/Makefile -``` - -This will take a few minutes, and downloads frameworks the code uses like -[CMSIS](https://developer.arm.com/embedded/cmsis) and -[flatbuffers](https://google.github.io/flatbuffers/). Once that process has -finished, run: - -``` -make -f tensorflow/lite/micro/tools/make/Makefile test_person_detection_test -``` - -You should see a series of files get compiled, followed by some logging output -from a test, which should conclude with `~~~ALL TESTS PASSED~~~`. If you see -this, it means that a small program has been built and run that loads a trained -TensorFlow model, runs some example images through it, and got the expected -outputs. This particular test runs images with a and without a person in them, -and checks that the network correctly identifies them. - -To understand how TensorFlow Lite does this, you can look at the `TestInvoke()` -function in -[person_detection_test.cc](https://github.com/tensorflow/tensorflow/tree/master/tensorflow/lite/micro/examples/person_detection/person_detection_test.cc). -It's a fairly small amount of code, creating an interpreter, getting a handle to -a model that's been compiled into the program, and then invoking the interpreter -with the model and sample inputs. - -## Debugging image capture -When the sample is running, check the LEDs to determine whether the inference is -running correctly. If the red light is stuck on, it means there was an error -communicating with the camera. This is likely due to an incorrectly connected -or broken camera. - -During inference, the blue LED will toggle every time inference is complete. The -orange LED indicates that no person was found, and the green LED indicates a -person was found. The red LED should never turn on, since it indicates an error. - -In order to view the captured image, set the DUMP_IMAGE define in main.cc.  This -causes the board to log raw image info to the console. After the board has been -flashed and reset, dump the log to a text file: - - -``` -screen -L -Logfile ${DEVICENAME} 115200 -``` - -Next, run the raw to bitmap converter to view captured images: - -``` -python3 raw_to_bitmap.py -r GRAY -i -``` - -## Training your own model - -You can train your own model with some easy-to-use scripts. See -[training_a_model.md](training_a_model.md) for instructions. From 39e65d52e400f8c343195e2f8ac34f286648a415 Mon Sep 17 00:00:00 2001 From: "902449@58880@bigcat_chen@ASIC" Date: Thu, 4 Jun 2020 18:19:58 +0800 Subject: [PATCH 05/11] remove temp makefile in target --- .../make/targets/himax_we1_evb_makefile.inc~ | 93 ------------------- 1 file changed, 93 deletions(-) delete mode 100644 tensorflow/lite/micro/tools/make/targets/himax_we1_evb_makefile.inc~ diff --git a/tensorflow/lite/micro/tools/make/targets/himax_we1_evb_makefile.inc~ b/tensorflow/lite/micro/tools/make/targets/himax_we1_evb_makefile.inc~ deleted file mode 100644 index 733f258fbbb..00000000000 --- a/tensorflow/lite/micro/tools/make/targets/himax_we1_evb_makefile.inc~ +++ /dev/null @@ -1,93 +0,0 @@ -# Settings for himax WE_1 evb. -ifeq ($(TARGET), himax_we1_evb) - - CC_TOOL = ccac - AR_TOOL = arac - CXX_TOOL = ccac - LD_TOOL := ccac - TARGET_ARCH := arc - #ARC_TOOLCHAIN := mwdt - - BUILD_ARC_MLI := false - ARC_MLI_PRE_COMPILED_TARGET := himax_arcem9d_r16 - -include $(MAKEFILE_DIR)/targets/arc/arc_common.inc - #download SDK & MLI - HIMAX_WE1_SDK_NAME := himax_we1_sdk - #MLI_LIB_DIR = arc_mli_package - #MLI_LIB_DIR = arc_mli_package - #$(eval $(call add_third_party_download,$(EMBARC_MLI_PRE_COMPILED_URL),$(EMBARC_MLI_PRE_COMPILED_MD5),$(MLI_LIB_DIR),)) - $(eval $(call add_third_party_download,$(HIMAX_WE1_SDK_URL),$(HIMAX_WE1_SDK_MD5),$(HIMAX_WE1_SDK_NAME),)) - - #export path of toolchain - #export PATH := $(MAKEFILE_DIR)/downloads/$(HIMAX_WE1_SDK_NAME)/image_gen_linux_v3/:$(PATH) - - TCF_FILE := $(PWD)/$(MAKEFILE_DIR)/downloads/$(HIMAX_WE1_SDK_NAME)/arcem9d_wei_r16.tcf - LCF_FILE := $(PWD)/$(MAKEFILE_DIR)/downloads/$(HIMAX_WE1_SDK_NAME)/memory.lcf - ARCLIB_FILE := $(PWD)/$(MAKEFILE_DIR)/downloads/$(HIMAX_WE1_SDK_NAME)/libembarc.a - LIB_HEADER_FILE := $(PWD)/$(MAKEFILE_DIR)/downloads/$(HIMAX_WE1_SDK_NAME)/hx_drv_tflm.h - - - DEFAULT_HEAPSZ := 8192 - DEFAULT_STACKSZ := 8192 - - TCF_FILE_NAME = $(notdir $(TCF_FILE)) - ARC_TARGET_FILES_DIRS = $(dir $(TCF_FILE_NAME)) - MAKE_PROJECT_FILES += $(TCF_FILE_NAME) - - LCF_FILE_NAME = $(notdir $(LCF_FILE)) - ARC_TARGET_FILES_DIRS += $(dir $(LCF_FILE)) - MAKE_PROJECT_FILES += $(LCF_FILE_NAME) - - ARCLIB_FILE_NAME = $(notdir $(ARCLIB_FILE)) - ARC_TARGET_FILES_DIRS += $(dir $(ARCLIB_FILE)) - MAKE_PROJECT_FILES += $(ARCLIB_FILE_NAME) - - LIB_HEADER_FILE_NAME = $(notdir $(LIB_HEADER_FILE)) - ARC_TARGET_FILES_DIRS += $(dir $(LIB_HEADER_FILE)) - MAKE_PROJECT_FILES += $(LIB_HEADER_FILE_NAME) - - - - # Need a pointer to the TCF and lcf file - - PLATFORM_FLAGS = \ - -DNDEBUG \ - -g \ - -DCPU_ARC \ - -Hnosdata \ - -DTF_LITE_STATIC_MEMORY \ - -tcf=$(TCF_FILE_NAME) \ - -Hnocopyr \ - -Hpurge \ - -Hcl \ - -fslp-vectorize-aggressive \ - -ffunction-sections \ - -fdata-sections \ - -tcf_core_config \ - - CXXFLAGS += -fno-rtti -DSCRATCH_MEM_Z_SIZE=0x10000 $(PLATFORM_FLAGS) - CCFLAGS += $(PLATFORM_FLAGS) - - INCLUDES+= \ - -I $(MAKEFILE_DIR)/downloads/$(WEI_SDK_NAME) \ - -I $(MAKEFILE_DIR)/downloads/kissfft - - GENERATED_PROJECT_INCLUDES += \ - -I. \ - -I./third_party/kissfft - - LDFLAGS += \ - -Hheap=8192 \ - -tcf=$(TCF_FILE_NAME) \ - -Hnocopyr \ - -m \ - -Hldopt=-Coutput=$(TARGET).map \ - $(LCF_FILE_NAME) \ - -Hldopt=-Bgrouplib $(ARCLIB_FILE_NAME) - - CXXFLAGS := $(filter-out -std=c++11,$(CXXFLAGS)) - CCFLAGS := $(filter-out -std=c11,$(CCFLAGS)) - MICROLITE_LIBS := $(filter-out -lm,$(MICROLITE_LIBS)) - -endif From ac123654efc63ffa17240479a5b926ca6357c766 Mon Sep 17 00:00:00 2001 From: "902449@58880@bigcat_chen@ASIC" Date: Fri, 5 Jun 2020 10:03:47 +0800 Subject: [PATCH 06/11] TFLM: update hello world example readme --- .../lite/micro/examples/hello_world/README.md | 4 +- .../micro/examples/hello_world/README.md~ | 575 ++++++++++++++++++ 2 files changed, 577 insertions(+), 2 deletions(-) create mode 100644 tensorflow/lite/micro/examples/hello_world/README.md~ diff --git a/tensorflow/lite/micro/examples/hello_world/README.md b/tensorflow/lite/micro/examples/hello_world/README.md index d3762ada790..26b0f12c83a 100644 --- a/tensorflow/lite/micro/examples/hello_world/README.md +++ b/tensorflow/lite/micro/examples/hello_world/README.md @@ -17,7 +17,7 @@ of the device. - [Deploy to ARC EM SDP](#deploy-to-arc-em-sdp) - [Deploy to Arduino](#deploy-to-arduino) - [Deploy to ESP32](#deploy-to-esp32) -- [Deploy to himax WE1 EVB](#deploy-to-himax-we1-evb) +- [Deploy to Himax WE1 EVB](#deploy-to-himax-we1-evb) - [Deploy to SparkFun Edge](#deploy-to-sparkfun-edge) - [Deploy to STM32F746](#deploy-to-STM32F746) - [Run the tests on a development machine](#run-the-tests-on-a-development-machine) @@ -192,7 +192,7 @@ The previous two commands can be combined: idf.py --port /dev/ttyUSB0 flash monitor ``` -## Deploy to himax WE1 EVB +## Deploy to Himax WE1 EVB The following instructions will help you build and deploy this example to [HIMAX WE1 EVB](https://github.com/HimaxWiseEyePlus/bsp_tflu/tree/master/HIMAX_WE1_EVB_board_brief) diff --git a/tensorflow/lite/micro/examples/hello_world/README.md~ b/tensorflow/lite/micro/examples/hello_world/README.md~ new file mode 100644 index 00000000000..d3762ada790 --- /dev/null +++ b/tensorflow/lite/micro/examples/hello_world/README.md~ @@ -0,0 +1,575 @@ +# Hello World Example + +This example is designed to demonstrate the absolute basics of using [TensorFlow +Lite for Microcontrollers](https://www.tensorflow.org/lite/microcontrollers). +It includes the full end-to-end workflow of training a model, converting it for +use with TensorFlow Lite for Microcontrollers for running inference on a +microcontroller. + +The model is trained to replicate a `sine` function and generates a pattern of +data to either blink LEDs or control an animation, depending on the capabilities +of the device. + +![Animation on STM32F746](images/animation_on_STM32F746.gif) + +## Table of contents + +- [Deploy to ARC EM SDP](#deploy-to-arc-em-sdp) +- [Deploy to Arduino](#deploy-to-arduino) +- [Deploy to ESP32](#deploy-to-esp32) +- [Deploy to himax WE1 EVB](#deploy-to-himax-we1-evb) +- [Deploy to SparkFun Edge](#deploy-to-sparkfun-edge) +- [Deploy to STM32F746](#deploy-to-STM32F746) +- [Run the tests on a development machine](#run-the-tests-on-a-development-machine) +- [Train your own model](#train-your-own-model) + +## Deploy to ARC EM SDP + +The following instructions will help you to build and deploy this example to +[ARC EM SDP](https://www.synopsys.com/dw/ipdir.php?ds=arc-em-software-development-platform) +board. General information and instructions on using the board with TensorFlow +Lite Micro can be found in the common +[ARC targets description](/tensorflow/lite/micro/tools/make/targets/arc/README.md). + +### Initial Setup + +Follow the instructions on the +[ARC EM SDP Initial Setup](/tensorflow/lite/micro/tools/make/targets/arc/README.md#ARC-EM-Software-Development-Platform-ARC-EM-SDP) +to get and install all required tools for work with ARC EM SDP. + +### Generate Example Project + +The example project for ARC EM SDP platform can be generated with the following +command: + +``` +make -f tensorflow/lite/micro/tools/make/Makefile TARGET=arc_emsdp TAGS=no_arc_mli generate_hello_world_make_project +``` + +### Build and Run Example + +For more detailed information on building and running examples see the +appropriate sections of general descriptions of the +[ARC EM SDP usage with TFLM](/tensorflow/lite/micro/tools/make/targets/arc/README.md#ARC-EM-Software-Development-Platform-ARC-EM-SDP). +In the directory with generated project you can also find a +*README_ARC_EMSDP.md* file with instructions and options on building and +running. Here we only briefly mention main steps which are typically enough to +get it started. + +1. You need to + [connect the board](/tensorflow/lite/micro/tools/make/targets/arc/README.md#connect-the-board) + and open an serial connection. + +2. Go to the generated example project director + + ``` + cd tensorflow/lite/micro/tools/make/gen/arc_emsdp_arc/prj/hello_world/make + ``` + +3. Build the example using + + ``` + make app + ``` + +4. To generate artefacts for self-boot of example from the board use + + ``` + make flash + ``` + +5. To run application from the board using microSD card: + + * Copy the content of the created /bin folder into the root of microSD + card. Note that the card must be formatted as FAT32 with default cluster + size (but less than 32 Kbytes) + * Plug in the microSD card into the J11 connector. + * Push the RST button. If a red LED is lit beside RST button, push the CFG + button. + +6. If you have the MetaWare Debugger installed in your environment: + + * To run application from the console using it type `make run`. + * To stop the execution type `Ctrl+C` in the console several times. + +In both cases (step 5 and 6) you will see the application output in the serial +terminal. + +## Deploy to Arduino + +The following instructions will help you build and deploy this sample +to [Arduino](https://www.arduino.cc/) devices. + +![Animation on Arduino MKRZERO](images/animation_on_arduino_mkrzero.gif) + +The sample has been tested with the following devices: + +- [Arduino Nano 33 BLE Sense](https://store.arduino.cc/usa/nano-33-ble-sense-with-headers) +- [Arduino MKRZERO](https://store.arduino.cc/usa/arduino-mkrzero) + +The sample will use PWM to fade an LED on and off according to the model's +output. In the code, the `LED_BUILTIN` constant is used to specify the board's +built-in LED as the one being controlled. However, on some boards, this built-in +LED is not attached to a pin with PWM capabilities. In this case, the LED will +blink instead of fading. + +### Install the Arduino_TensorFlowLite library + +This example application is included as part of the official TensorFlow Lite +Arduino library. To install it, open the Arduino library manager in +`Tools -> Manage Libraries...` and search for `Arduino_TensorFlowLite`. + +### Load and run the example + +Once the library has been added, go to `File -> Examples`. You should see an +example near the bottom of the list named `TensorFlowLite:hello_world`. Select +it and click `hello_world` to load the example. + +Use the Arduino IDE to build and upload the example. Once it is running, +you should see the built-in LED on your device flashing. + +The Arduino Desktop IDE includes a plotter that we can use to display the sine +wave graphically. To view it, go to `Tools -> Serial Plotter`. You will see one +datapoint being logged for each inference cycle, expressed as a number between 0 +and 255. + +## Deploy to ESP32 + +The following instructions will help you build and deploy this sample +to [ESP32](https://www.espressif.com/en/products/hardware/esp32/overview) +devices using the [ESP IDF](https://github.com/espressif/esp-idf). + +The sample has been tested on ESP-IDF version 4.0 with the following devices: +- [ESP32-DevKitC](http://esp-idf.readthedocs.io/en/latest/get-started/get-started-devkitc.html) +- [ESP-EYE](https://github.com/espressif/esp-who/blob/master/docs/en/get-started/ESP-EYE_Getting_Started_Guide.md) + +### Install the ESP IDF + +Follow the instructions of the +[ESP-IDF get started guide](https://docs.espressif.com/projects/esp-idf/en/latest/get-started/index.html) +to setup the toolchain and the ESP-IDF itself. + +The next steps assume that the +[IDF environment variables are set](https://docs.espressif.com/projects/esp-idf/en/latest/get-started/index.html#step-4-set-up-the-environment-variables) : + + * The `IDF_PATH` environment variable is set + * `idf.py` and Xtensa-esp32 tools (e.g. `xtensa-esp32-elf-gcc`) are in `$PATH` + +### Generate the examples +The example project can be generated with the following command: +``` +make -f tensorflow/lite/micro/tools/make/Makefile TARGET=esp generate_hello_world_esp_project +``` + +### Building the example + +Go the the example project directory +``` +cd tensorflow/lite/micro/tools/make/gen/esp_xtensa-esp32/prj/hello_world/esp-idf +``` + +Then build with `idf.py` +``` +idf.py build +``` + +### Load and run the example + +To flash (replace `/dev/ttyUSB0` with the device serial port): +``` +idf.py --port /dev/ttyUSB0 flash +``` + +Monitor the serial output: +``` +idf.py --port /dev/ttyUSB0 monitor +``` + +Use `Ctrl+]` to exit. + +The previous two commands can be combined: +``` +idf.py --port /dev/ttyUSB0 flash monitor +``` + +## Deploy to himax WE1 EVB + +The following instructions will help you build and deploy this example to +[HIMAX WE1 EVB](https://github.com/HimaxWiseEyePlus/bsp_tflu/tree/master/HIMAX_WE1_EVB_board_brief) +board. To undstand more about using this board, please check +[HIMAX WE1 EVB user guide](https://github.com/HimaxWiseEyePlus/bsp_tflu/tree/master/HIMAX_WE1_EVB_user_guide). + +### Initial Setup + +To use the HIMAX WE1 EVB, please make sure following software are installed: + +#### MetaWare Development Toolkit + +See +[Install the Synopsys DesignWare ARC MetaWare Development Toolkit](/tensorflow/lite/micro/tools/make/targets/arc/README.md#install-the-synopsys-designware-arc-metaware-development-toolkit) +section for instructions on toolchain installation. + +#### Make Tool version + +A `'make'` tool is required for deploying Tensorflow Lite Micro +applications on HIMAX WE1 EVB, See +[Check make tool version](/tensorflow/lite/micro/tools/make/targets/arc/README.md#make-tool) +section for proper environment. + +#### Serial Terminal Emulation Application + +There are 2 main purposes for HIMAX WE1 EVB Debug UART port + +- print application output +- burn application to flash by using xmodem send application binary + +You can use any terminal emulation program (like [PuTTY](https://www.putty.org/) or [minicom](https://linux.die.net/man/1/minicom)). + + +### Generate Example Project + +The example project for HIMAX WE1 EVB platform can be generated with the following +command: + +Download related third party data + +``` +make -f tensorflow/lite/micro/tools/make/Makefile TARGET=himax_we1_evb third_party_downloads +``` + +Generate hello world project + +``` +make -f tensorflow/lite/micro/tools/make/Makefile generate_hello_world_make_project TARGET=himax_we1_evb TAGS=no_arc_mli +``` + +### Build and Burn Example + +Following the Steps to run hello world example at HIMAX WE1 EVB platform. + +1. Go to the generated example project directory. + + ``` + cd tensorflow/lite/micro/tools/make/gen/himax_we1_evb_arc/prj/hello_world/make + ``` + +2. Build the example using + + ``` + make app + ``` + +3. After example build finish, copy ELF file and map file to image generate tool directory. + image generate tool directory located at `'tensorflow/lite/micro/tools/make/downloads/himax_we1_sdk/image_gen_linux_v3/'` + + ``` + cp hello_world.elf himax_we1_evb.map ../../../../../downloads/himax_we1_sdk/image_gen_linux_v3/ + ``` + +4. Go to flash image generate tool directory. + + ``` + cd ../../../../../downloads/himax_we1_sdk/image_gen_linux_v3/ + ``` + +5. run image generate tool, generate flash image file. + + * Before running image generate tool, by typing `sudo chmod +x image_gen` + and `sudo chmod +x sign_tool` to make sure it is executable. + + ``` + image_gen -e hello_world.elf -m himax_we1_evb.map -o out.img + ``` + + +6. Download flash image file to HIMAX WE1 EVB by UART: + + * more detail about download image through UART can be found at [HIMAX WE1 EVB update Flash image](https://github.com/HimaxWiseEyePlus/bsp_tflu/tree/master/HIMAX_WE1_EVB_user_guide#flash-image-update) + +After these steps, press reset button on the HIMAX WE1 EVB, you will see application output in the serial +terminal. + +## Deploy to SparkFun Edge + +The following instructions will help you build and deploy this sample on the +[SparkFun Edge development board](https://sparkfun.com/products/15170). + +![Animation on SparkFun Edge](images/animation_on_sparkfun_edge.gif) + +If you're new to using this board, we recommend walking through the +[AI on a microcontroller with TensorFlow Lite and SparkFun Edge](https://codelabs.developers.google.com/codelabs/sparkfun-tensorflow) +codelab to get an understanding of the workflow. + +### Compile the binary + +The following command will download the required dependencies and then compile a +binary for the SparkFun Edge: + +``` +make -f tensorflow/lite/micro/tools/make/Makefile TARGET=sparkfun_edge hello_world_bin +``` + +The binary will be created in the following location: + +``` +tensorflow/lite/micro/tools/make/gen/sparkfun_edge_cortex-m4/bin/hello_world.bin +``` + +### Sign the binary + +The binary must be signed with cryptographic keys to be deployed to the device. +We'll now run some commands that will sign our binary so it can be flashed to +the SparkFun Edge. The scripts we are using come from the Ambiq SDK, which is +downloaded when the `Makefile` is run. + +Enter the following command to set up some dummy cryptographic keys we can use +for development: + +``` +cp tensorflow/lite/micro/tools/make/downloads/AmbiqSuite-Rel2.2.0/tools/apollo3_scripts/keys_info0.py \ +tensorflow/lite/micro/tools/make/downloads/AmbiqSuite-Rel2.2.0/tools/apollo3_scripts/keys_info.py +``` + +Next, run the following command to create a signed binary: + +``` +python3 tensorflow/lite/micro/tools/make/downloads/AmbiqSuite-Rel2.2.0/tools/apollo3_scripts/create_cust_image_blob.py \ +--bin tensorflow/lite/micro/tools/make/gen/sparkfun_edge_cortex-m4/bin/hello_world.bin \ +--load-address 0xC000 \ +--magic-num 0xCB \ +-o main_nonsecure_ota \ +--version 0x0 +``` + +This will create the file `main_nonsecure_ota.bin`. We'll now run another +command to create a final version of the file that can be used to flash our +device with the bootloader script we will use in the next step: + +``` +python3 tensorflow/lite/micro/tools/make/downloads/AmbiqSuite-Rel2.2.0/tools/apollo3_scripts/create_cust_wireupdate_blob.py \ +--load-address 0x20000 \ +--bin main_nonsecure_ota.bin \ +-i 6 \ +-o main_nonsecure_wire \ +--options 0x1 +``` + +You should now have a file called `main_nonsecure_wire.bin` in the directory +where you ran the commands. This is the file we'll be flashing to the device. + +### Flash the binary + +Next, attach the board to your computer via a USB-to-serial adapter. + +**Note:** If you're using the [SparkFun Serial Basic Breakout](https://www.sparkfun.com/products/15096), +you should [install the latest drivers](https://learn.sparkfun.com/tutorials/sparkfun-serial-basic-ch340c-hookup-guide#drivers-if-you-need-them) +before you continue. + +Once connected, assign the USB device name to an environment variable: + +``` +export DEVICENAME=put your device name here +``` + +Set another variable with the baud rate: + +``` +export BAUD_RATE=921600 +``` + +Now, hold the button marked `14` on the device. While still holding the button, +hit the button marked `RST`. Continue holding the button marked `14` while +running the following command: + +``` +python3 tensorflow/lite/micro/tools/make/downloads/AmbiqSuite-Rel2.2.0/tools/apollo3_scripts/uart_wired_update.py \ +-b ${BAUD_RATE} ${DEVICENAME} \ +-r 1 \ +-f main_nonsecure_wire.bin \ +-i 6 +``` + +You should see a long stream of output as the binary is flashed to the device. +Once you see the following lines, flashing is complete: + +``` +Sending Reset Command. +Done. +``` + +If you don't see these lines, flashing may have failed. Try running through the +steps in [Flash the binary](#flash-the-binary) again (you can skip over setting +the environment variables). If you continue to run into problems, follow the +[AI on a microcontroller with TensorFlow Lite and SparkFun Edge](https://codelabs.developers.google.com/codelabs/sparkfun-tensorflow) +codelab, which includes more comprehensive instructions for the flashing +process. + +The binary should now be deployed to the device. Hit the button marked `RST` to +reboot the board. You should see the device's four LEDs flashing in sequence. + +Debug information is logged by the board while the program is running. To view +it, establish a serial connection to the board using a baud rate of `115200`. +On OSX and Linux, the following command should work: + +``` +screen ${DEVICENAME} 115200 +``` + +You will see a lot of output flying past! To stop the scrolling, hit `Ctrl+A`, +immediately followed by `Esc`. You can then use the arrow keys to explore the +output, which will contain the results of running inference on various `x` +values: + +``` +x_value: 1.1843798*2^2, y_value: -1.9542645*2^-1 +``` + +To stop viewing the debug output with `screen`, hit `Ctrl+A`, immediately +followed by the `K` key, then hit the `Y` key. + + +## Deploy to STM32F746 + +The following instructions will help you build and deploy the sample to the +[STM32F7 discovery kit](https://os.mbed.com/platforms/ST-Discovery-F746NG/) +using [ARM Mbed](https://github.com/ARMmbed/mbed-cli). + +![Animation on STM32F746](images/animation_on_STM32F746.gif) + +Before we begin, you'll need the following: + +- STM32F7 discovery kit board +- Mini-USB cable +- ARM Mbed CLI ([installation instructions](https://os.mbed.com/docs/mbed-os/v5.12/tools/installation-and-setup.html)) +- Python 2.7 and pip + +Since Mbed requires a special folder structure for projects, we'll first run a +command to generate a subfolder containing the required source files in this +structure: + +``` +make -f tensorflow/lite/micro/tools/make/Makefile TARGET=mbed TAGS="CMSIS disco_f746ng" generate_hello_world_mbed_project +``` + +This will result in the creation of a new folder: + +``` +tensorflow/lite/micro/tools/make/gen/mbed_cortex-m4/prj/hello_world/mbed +``` + +This folder contains all of the example's dependencies structured in the correct +way for Mbed to be able to build it. + +Change into the directory and run the following commands, making sure you are +using Python 2.7.15. + +First, tell Mbed that the current directory is the root of an Mbed project: + +``` +mbed config root . +``` + +Next, tell Mbed to download the dependencies and prepare to build: + +``` +mbed deploy +``` + +By default, Mbed will build the project using C++98. However, TensorFlow Lite +requires C++11. Run the following Python snippet to modify the Mbed +configuration files so that it uses C++11: + +``` +python -c 'import fileinput, glob; +for filename in glob.glob("mbed-os/tools/profiles/*.json"): + for line in fileinput.input(filename, inplace=True): + print line.replace("\"-std=gnu++98\"","\"-std=c++11\", \"-fpermissive\"")' + +``` + +Finally, run the following command to compile: + +``` +mbed compile -m DISCO_F746NG -t GCC_ARM +``` + +This should result in a binary at the following path: + +``` +./BUILD/DISCO_F746NG/GCC_ARM/mbed.bin +``` + +To deploy, plug in your STM board and copy the file to it. On MacOS, you can do +this with the following command: + +``` +cp ./BUILD/DISCO_F746NG/GCC_ARM/mbed.bin /Volumes/DIS_F746NG/ +``` + +Copying the file will initiate the flashing process. Once this is complete, you +should see an animation on the device's screen. + + +``` +screen /dev/tty.usbmodem14403 9600 +``` + +In addition to this animation, debug information is logged by the board while +the program is running. To view it, establish a serial connection to the board +using a baud rate of `9600`. On OSX and Linux, the following command should +work, replacing `/dev/tty.devicename` with the name of your device as it appears +in `/dev`: + +``` +screen /dev/tty.devicename 9600 +``` + +You will see a lot of output flying past! To stop the scrolling, hit `Ctrl+A`, +immediately followed by `Esc`. You can then use the arrow keys to explore the +output, which will contain the results of running inference on various `x` +values: + +``` +x_value: 1.1843798*2^2, y_value: -1.9542645*2^-1 +``` + +To stop viewing the debug output with `screen`, hit `Ctrl+A`, immediately +followed by the `K` key, then hit the `Y` key. + +### Run the tests on a development machine + +To compile and test this example on a desktop Linux or macOS machine, first +clone the TensorFlow repository from GitHub to a convenient place: + +```bash +git clone --depth 1 https://github.com/tensorflow/tensorflow.git +``` + +Next, `cd` into the source directory from a terminal, and then run the following +command: + +```bash +make -f tensorflow/lite/micro/tools/make/Makefile test_hello_world_test +``` + +This will take a few minutes, and downloads frameworks the code uses. Once the +process has finished, you should see a series of files get compiled, followed by +some logging output from a test, which should conclude with +`~~~ALL TESTS PASSED~~~`. + +If you see this, it means that a small program has been built and run that loads +the trained TensorFlow model, runs some example inputs through it, and got the +expected outputs. + +To understand how TensorFlow Lite does this, you can look at the source in +[hello_world_test.cc](hello_world_test.cc). +It's a fairly small amount of code that creates an interpreter, gets a handle to +a model that's been compiled into the program, and then invokes the interpreter +with the model and sample inputs. + +### Train your own model + +So far you have used an existing trained model to run inference on +microcontrollers. If you wish to train your own model, follow the instructions +given in the [train/](train/) directory. + From 1c26e6abd76fe700ecf87d892ceed1dc5bfa90d3 Mon Sep 17 00:00:00 2001 From: "902449@58880@bigcat_chen@ASIC" Date: Fri, 5 Jun 2020 10:12:46 +0800 Subject: [PATCH 07/11] TFLM: delete temp readme file in hello world example --- .../micro/examples/hello_world/README.md~ | 575 ------------------ 1 file changed, 575 deletions(-) delete mode 100644 tensorflow/lite/micro/examples/hello_world/README.md~ diff --git a/tensorflow/lite/micro/examples/hello_world/README.md~ b/tensorflow/lite/micro/examples/hello_world/README.md~ deleted file mode 100644 index d3762ada790..00000000000 --- a/tensorflow/lite/micro/examples/hello_world/README.md~ +++ /dev/null @@ -1,575 +0,0 @@ -# Hello World Example - -This example is designed to demonstrate the absolute basics of using [TensorFlow -Lite for Microcontrollers](https://www.tensorflow.org/lite/microcontrollers). -It includes the full end-to-end workflow of training a model, converting it for -use with TensorFlow Lite for Microcontrollers for running inference on a -microcontroller. - -The model is trained to replicate a `sine` function and generates a pattern of -data to either blink LEDs or control an animation, depending on the capabilities -of the device. - -![Animation on STM32F746](images/animation_on_STM32F746.gif) - -## Table of contents - -- [Deploy to ARC EM SDP](#deploy-to-arc-em-sdp) -- [Deploy to Arduino](#deploy-to-arduino) -- [Deploy to ESP32](#deploy-to-esp32) -- [Deploy to himax WE1 EVB](#deploy-to-himax-we1-evb) -- [Deploy to SparkFun Edge](#deploy-to-sparkfun-edge) -- [Deploy to STM32F746](#deploy-to-STM32F746) -- [Run the tests on a development machine](#run-the-tests-on-a-development-machine) -- [Train your own model](#train-your-own-model) - -## Deploy to ARC EM SDP - -The following instructions will help you to build and deploy this example to -[ARC EM SDP](https://www.synopsys.com/dw/ipdir.php?ds=arc-em-software-development-platform) -board. General information and instructions on using the board with TensorFlow -Lite Micro can be found in the common -[ARC targets description](/tensorflow/lite/micro/tools/make/targets/arc/README.md). - -### Initial Setup - -Follow the instructions on the -[ARC EM SDP Initial Setup](/tensorflow/lite/micro/tools/make/targets/arc/README.md#ARC-EM-Software-Development-Platform-ARC-EM-SDP) -to get and install all required tools for work with ARC EM SDP. - -### Generate Example Project - -The example project for ARC EM SDP platform can be generated with the following -command: - -``` -make -f tensorflow/lite/micro/tools/make/Makefile TARGET=arc_emsdp TAGS=no_arc_mli generate_hello_world_make_project -``` - -### Build and Run Example - -For more detailed information on building and running examples see the -appropriate sections of general descriptions of the -[ARC EM SDP usage with TFLM](/tensorflow/lite/micro/tools/make/targets/arc/README.md#ARC-EM-Software-Development-Platform-ARC-EM-SDP). -In the directory with generated project you can also find a -*README_ARC_EMSDP.md* file with instructions and options on building and -running. Here we only briefly mention main steps which are typically enough to -get it started. - -1. You need to - [connect the board](/tensorflow/lite/micro/tools/make/targets/arc/README.md#connect-the-board) - and open an serial connection. - -2. Go to the generated example project director - - ``` - cd tensorflow/lite/micro/tools/make/gen/arc_emsdp_arc/prj/hello_world/make - ``` - -3. Build the example using - - ``` - make app - ``` - -4. To generate artefacts for self-boot of example from the board use - - ``` - make flash - ``` - -5. To run application from the board using microSD card: - - * Copy the content of the created /bin folder into the root of microSD - card. Note that the card must be formatted as FAT32 with default cluster - size (but less than 32 Kbytes) - * Plug in the microSD card into the J11 connector. - * Push the RST button. If a red LED is lit beside RST button, push the CFG - button. - -6. If you have the MetaWare Debugger installed in your environment: - - * To run application from the console using it type `make run`. - * To stop the execution type `Ctrl+C` in the console several times. - -In both cases (step 5 and 6) you will see the application output in the serial -terminal. - -## Deploy to Arduino - -The following instructions will help you build and deploy this sample -to [Arduino](https://www.arduino.cc/) devices. - -![Animation on Arduino MKRZERO](images/animation_on_arduino_mkrzero.gif) - -The sample has been tested with the following devices: - -- [Arduino Nano 33 BLE Sense](https://store.arduino.cc/usa/nano-33-ble-sense-with-headers) -- [Arduino MKRZERO](https://store.arduino.cc/usa/arduino-mkrzero) - -The sample will use PWM to fade an LED on and off according to the model's -output. In the code, the `LED_BUILTIN` constant is used to specify the board's -built-in LED as the one being controlled. However, on some boards, this built-in -LED is not attached to a pin with PWM capabilities. In this case, the LED will -blink instead of fading. - -### Install the Arduino_TensorFlowLite library - -This example application is included as part of the official TensorFlow Lite -Arduino library. To install it, open the Arduino library manager in -`Tools -> Manage Libraries...` and search for `Arduino_TensorFlowLite`. - -### Load and run the example - -Once the library has been added, go to `File -> Examples`. You should see an -example near the bottom of the list named `TensorFlowLite:hello_world`. Select -it and click `hello_world` to load the example. - -Use the Arduino IDE to build and upload the example. Once it is running, -you should see the built-in LED on your device flashing. - -The Arduino Desktop IDE includes a plotter that we can use to display the sine -wave graphically. To view it, go to `Tools -> Serial Plotter`. You will see one -datapoint being logged for each inference cycle, expressed as a number between 0 -and 255. - -## Deploy to ESP32 - -The following instructions will help you build and deploy this sample -to [ESP32](https://www.espressif.com/en/products/hardware/esp32/overview) -devices using the [ESP IDF](https://github.com/espressif/esp-idf). - -The sample has been tested on ESP-IDF version 4.0 with the following devices: -- [ESP32-DevKitC](http://esp-idf.readthedocs.io/en/latest/get-started/get-started-devkitc.html) -- [ESP-EYE](https://github.com/espressif/esp-who/blob/master/docs/en/get-started/ESP-EYE_Getting_Started_Guide.md) - -### Install the ESP IDF - -Follow the instructions of the -[ESP-IDF get started guide](https://docs.espressif.com/projects/esp-idf/en/latest/get-started/index.html) -to setup the toolchain and the ESP-IDF itself. - -The next steps assume that the -[IDF environment variables are set](https://docs.espressif.com/projects/esp-idf/en/latest/get-started/index.html#step-4-set-up-the-environment-variables) : - - * The `IDF_PATH` environment variable is set - * `idf.py` and Xtensa-esp32 tools (e.g. `xtensa-esp32-elf-gcc`) are in `$PATH` - -### Generate the examples -The example project can be generated with the following command: -``` -make -f tensorflow/lite/micro/tools/make/Makefile TARGET=esp generate_hello_world_esp_project -``` - -### Building the example - -Go the the example project directory -``` -cd tensorflow/lite/micro/tools/make/gen/esp_xtensa-esp32/prj/hello_world/esp-idf -``` - -Then build with `idf.py` -``` -idf.py build -``` - -### Load and run the example - -To flash (replace `/dev/ttyUSB0` with the device serial port): -``` -idf.py --port /dev/ttyUSB0 flash -``` - -Monitor the serial output: -``` -idf.py --port /dev/ttyUSB0 monitor -``` - -Use `Ctrl+]` to exit. - -The previous two commands can be combined: -``` -idf.py --port /dev/ttyUSB0 flash monitor -``` - -## Deploy to himax WE1 EVB - -The following instructions will help you build and deploy this example to -[HIMAX WE1 EVB](https://github.com/HimaxWiseEyePlus/bsp_tflu/tree/master/HIMAX_WE1_EVB_board_brief) -board. To undstand more about using this board, please check -[HIMAX WE1 EVB user guide](https://github.com/HimaxWiseEyePlus/bsp_tflu/tree/master/HIMAX_WE1_EVB_user_guide). - -### Initial Setup - -To use the HIMAX WE1 EVB, please make sure following software are installed: - -#### MetaWare Development Toolkit - -See -[Install the Synopsys DesignWare ARC MetaWare Development Toolkit](/tensorflow/lite/micro/tools/make/targets/arc/README.md#install-the-synopsys-designware-arc-metaware-development-toolkit) -section for instructions on toolchain installation. - -#### Make Tool version - -A `'make'` tool is required for deploying Tensorflow Lite Micro -applications on HIMAX WE1 EVB, See -[Check make tool version](/tensorflow/lite/micro/tools/make/targets/arc/README.md#make-tool) -section for proper environment. - -#### Serial Terminal Emulation Application - -There are 2 main purposes for HIMAX WE1 EVB Debug UART port - -- print application output -- burn application to flash by using xmodem send application binary - -You can use any terminal emulation program (like [PuTTY](https://www.putty.org/) or [minicom](https://linux.die.net/man/1/minicom)). - - -### Generate Example Project - -The example project for HIMAX WE1 EVB platform can be generated with the following -command: - -Download related third party data - -``` -make -f tensorflow/lite/micro/tools/make/Makefile TARGET=himax_we1_evb third_party_downloads -``` - -Generate hello world project - -``` -make -f tensorflow/lite/micro/tools/make/Makefile generate_hello_world_make_project TARGET=himax_we1_evb TAGS=no_arc_mli -``` - -### Build and Burn Example - -Following the Steps to run hello world example at HIMAX WE1 EVB platform. - -1. Go to the generated example project directory. - - ``` - cd tensorflow/lite/micro/tools/make/gen/himax_we1_evb_arc/prj/hello_world/make - ``` - -2. Build the example using - - ``` - make app - ``` - -3. After example build finish, copy ELF file and map file to image generate tool directory. - image generate tool directory located at `'tensorflow/lite/micro/tools/make/downloads/himax_we1_sdk/image_gen_linux_v3/'` - - ``` - cp hello_world.elf himax_we1_evb.map ../../../../../downloads/himax_we1_sdk/image_gen_linux_v3/ - ``` - -4. Go to flash image generate tool directory. - - ``` - cd ../../../../../downloads/himax_we1_sdk/image_gen_linux_v3/ - ``` - -5. run image generate tool, generate flash image file. - - * Before running image generate tool, by typing `sudo chmod +x image_gen` - and `sudo chmod +x sign_tool` to make sure it is executable. - - ``` - image_gen -e hello_world.elf -m himax_we1_evb.map -o out.img - ``` - - -6. Download flash image file to HIMAX WE1 EVB by UART: - - * more detail about download image through UART can be found at [HIMAX WE1 EVB update Flash image](https://github.com/HimaxWiseEyePlus/bsp_tflu/tree/master/HIMAX_WE1_EVB_user_guide#flash-image-update) - -After these steps, press reset button on the HIMAX WE1 EVB, you will see application output in the serial -terminal. - -## Deploy to SparkFun Edge - -The following instructions will help you build and deploy this sample on the -[SparkFun Edge development board](https://sparkfun.com/products/15170). - -![Animation on SparkFun Edge](images/animation_on_sparkfun_edge.gif) - -If you're new to using this board, we recommend walking through the -[AI on a microcontroller with TensorFlow Lite and SparkFun Edge](https://codelabs.developers.google.com/codelabs/sparkfun-tensorflow) -codelab to get an understanding of the workflow. - -### Compile the binary - -The following command will download the required dependencies and then compile a -binary for the SparkFun Edge: - -``` -make -f tensorflow/lite/micro/tools/make/Makefile TARGET=sparkfun_edge hello_world_bin -``` - -The binary will be created in the following location: - -``` -tensorflow/lite/micro/tools/make/gen/sparkfun_edge_cortex-m4/bin/hello_world.bin -``` - -### Sign the binary - -The binary must be signed with cryptographic keys to be deployed to the device. -We'll now run some commands that will sign our binary so it can be flashed to -the SparkFun Edge. The scripts we are using come from the Ambiq SDK, which is -downloaded when the `Makefile` is run. - -Enter the following command to set up some dummy cryptographic keys we can use -for development: - -``` -cp tensorflow/lite/micro/tools/make/downloads/AmbiqSuite-Rel2.2.0/tools/apollo3_scripts/keys_info0.py \ -tensorflow/lite/micro/tools/make/downloads/AmbiqSuite-Rel2.2.0/tools/apollo3_scripts/keys_info.py -``` - -Next, run the following command to create a signed binary: - -``` -python3 tensorflow/lite/micro/tools/make/downloads/AmbiqSuite-Rel2.2.0/tools/apollo3_scripts/create_cust_image_blob.py \ ---bin tensorflow/lite/micro/tools/make/gen/sparkfun_edge_cortex-m4/bin/hello_world.bin \ ---load-address 0xC000 \ ---magic-num 0xCB \ --o main_nonsecure_ota \ ---version 0x0 -``` - -This will create the file `main_nonsecure_ota.bin`. We'll now run another -command to create a final version of the file that can be used to flash our -device with the bootloader script we will use in the next step: - -``` -python3 tensorflow/lite/micro/tools/make/downloads/AmbiqSuite-Rel2.2.0/tools/apollo3_scripts/create_cust_wireupdate_blob.py \ ---load-address 0x20000 \ ---bin main_nonsecure_ota.bin \ --i 6 \ --o main_nonsecure_wire \ ---options 0x1 -``` - -You should now have a file called `main_nonsecure_wire.bin` in the directory -where you ran the commands. This is the file we'll be flashing to the device. - -### Flash the binary - -Next, attach the board to your computer via a USB-to-serial adapter. - -**Note:** If you're using the [SparkFun Serial Basic Breakout](https://www.sparkfun.com/products/15096), -you should [install the latest drivers](https://learn.sparkfun.com/tutorials/sparkfun-serial-basic-ch340c-hookup-guide#drivers-if-you-need-them) -before you continue. - -Once connected, assign the USB device name to an environment variable: - -``` -export DEVICENAME=put your device name here -``` - -Set another variable with the baud rate: - -``` -export BAUD_RATE=921600 -``` - -Now, hold the button marked `14` on the device. While still holding the button, -hit the button marked `RST`. Continue holding the button marked `14` while -running the following command: - -``` -python3 tensorflow/lite/micro/tools/make/downloads/AmbiqSuite-Rel2.2.0/tools/apollo3_scripts/uart_wired_update.py \ --b ${BAUD_RATE} ${DEVICENAME} \ --r 1 \ --f main_nonsecure_wire.bin \ --i 6 -``` - -You should see a long stream of output as the binary is flashed to the device. -Once you see the following lines, flashing is complete: - -``` -Sending Reset Command. -Done. -``` - -If you don't see these lines, flashing may have failed. Try running through the -steps in [Flash the binary](#flash-the-binary) again (you can skip over setting -the environment variables). If you continue to run into problems, follow the -[AI on a microcontroller with TensorFlow Lite and SparkFun Edge](https://codelabs.developers.google.com/codelabs/sparkfun-tensorflow) -codelab, which includes more comprehensive instructions for the flashing -process. - -The binary should now be deployed to the device. Hit the button marked `RST` to -reboot the board. You should see the device's four LEDs flashing in sequence. - -Debug information is logged by the board while the program is running. To view -it, establish a serial connection to the board using a baud rate of `115200`. -On OSX and Linux, the following command should work: - -``` -screen ${DEVICENAME} 115200 -``` - -You will see a lot of output flying past! To stop the scrolling, hit `Ctrl+A`, -immediately followed by `Esc`. You can then use the arrow keys to explore the -output, which will contain the results of running inference on various `x` -values: - -``` -x_value: 1.1843798*2^2, y_value: -1.9542645*2^-1 -``` - -To stop viewing the debug output with `screen`, hit `Ctrl+A`, immediately -followed by the `K` key, then hit the `Y` key. - - -## Deploy to STM32F746 - -The following instructions will help you build and deploy the sample to the -[STM32F7 discovery kit](https://os.mbed.com/platforms/ST-Discovery-F746NG/) -using [ARM Mbed](https://github.com/ARMmbed/mbed-cli). - -![Animation on STM32F746](images/animation_on_STM32F746.gif) - -Before we begin, you'll need the following: - -- STM32F7 discovery kit board -- Mini-USB cable -- ARM Mbed CLI ([installation instructions](https://os.mbed.com/docs/mbed-os/v5.12/tools/installation-and-setup.html)) -- Python 2.7 and pip - -Since Mbed requires a special folder structure for projects, we'll first run a -command to generate a subfolder containing the required source files in this -structure: - -``` -make -f tensorflow/lite/micro/tools/make/Makefile TARGET=mbed TAGS="CMSIS disco_f746ng" generate_hello_world_mbed_project -``` - -This will result in the creation of a new folder: - -``` -tensorflow/lite/micro/tools/make/gen/mbed_cortex-m4/prj/hello_world/mbed -``` - -This folder contains all of the example's dependencies structured in the correct -way for Mbed to be able to build it. - -Change into the directory and run the following commands, making sure you are -using Python 2.7.15. - -First, tell Mbed that the current directory is the root of an Mbed project: - -``` -mbed config root . -``` - -Next, tell Mbed to download the dependencies and prepare to build: - -``` -mbed deploy -``` - -By default, Mbed will build the project using C++98. However, TensorFlow Lite -requires C++11. Run the following Python snippet to modify the Mbed -configuration files so that it uses C++11: - -``` -python -c 'import fileinput, glob; -for filename in glob.glob("mbed-os/tools/profiles/*.json"): - for line in fileinput.input(filename, inplace=True): - print line.replace("\"-std=gnu++98\"","\"-std=c++11\", \"-fpermissive\"")' - -``` - -Finally, run the following command to compile: - -``` -mbed compile -m DISCO_F746NG -t GCC_ARM -``` - -This should result in a binary at the following path: - -``` -./BUILD/DISCO_F746NG/GCC_ARM/mbed.bin -``` - -To deploy, plug in your STM board and copy the file to it. On MacOS, you can do -this with the following command: - -``` -cp ./BUILD/DISCO_F746NG/GCC_ARM/mbed.bin /Volumes/DIS_F746NG/ -``` - -Copying the file will initiate the flashing process. Once this is complete, you -should see an animation on the device's screen. - - -``` -screen /dev/tty.usbmodem14403 9600 -``` - -In addition to this animation, debug information is logged by the board while -the program is running. To view it, establish a serial connection to the board -using a baud rate of `9600`. On OSX and Linux, the following command should -work, replacing `/dev/tty.devicename` with the name of your device as it appears -in `/dev`: - -``` -screen /dev/tty.devicename 9600 -``` - -You will see a lot of output flying past! To stop the scrolling, hit `Ctrl+A`, -immediately followed by `Esc`. You can then use the arrow keys to explore the -output, which will contain the results of running inference on various `x` -values: - -``` -x_value: 1.1843798*2^2, y_value: -1.9542645*2^-1 -``` - -To stop viewing the debug output with `screen`, hit `Ctrl+A`, immediately -followed by the `K` key, then hit the `Y` key. - -### Run the tests on a development machine - -To compile and test this example on a desktop Linux or macOS machine, first -clone the TensorFlow repository from GitHub to a convenient place: - -```bash -git clone --depth 1 https://github.com/tensorflow/tensorflow.git -``` - -Next, `cd` into the source directory from a terminal, and then run the following -command: - -```bash -make -f tensorflow/lite/micro/tools/make/Makefile test_hello_world_test -``` - -This will take a few minutes, and downloads frameworks the code uses. Once the -process has finished, you should see a series of files get compiled, followed by -some logging output from a test, which should conclude with -`~~~ALL TESTS PASSED~~~`. - -If you see this, it means that a small program has been built and run that loads -the trained TensorFlow model, runs some example inputs through it, and got the -expected outputs. - -To understand how TensorFlow Lite does this, you can look at the source in -[hello_world_test.cc](hello_world_test.cc). -It's a fairly small amount of code that creates an interpreter, gets a handle to -a model that's been compiled into the program, and then invokes the interpreter -with the model and sample inputs. - -### Train your own model - -So far you have used an existing trained model to run inference on -microcontrollers. If you wish to train your own model, follow the instructions -given in the [train/](train/) directory. - From d6676205f20e6a9476f6e0eca8f5b00367f9c623 Mon Sep 17 00:00:00 2001 From: "902449@58880@bigcat_chen@ASIC" Date: Fri, 5 Jun 2020 10:16:04 +0800 Subject: [PATCH 08/11] TFLM: remove temp file in person detection example --- .../himax_we1_evb/image_provider.cc~ | 44 ------------------- 1 file changed, 44 deletions(-) delete mode 100644 tensorflow/lite/micro/examples/person_detection_experimental/himax_we1_evb/image_provider.cc~ diff --git a/tensorflow/lite/micro/examples/person_detection_experimental/himax_we1_evb/image_provider.cc~ b/tensorflow/lite/micro/examples/person_detection_experimental/himax_we1_evb/image_provider.cc~ deleted file mode 100644 index d5b4d136642..00000000000 --- a/tensorflow/lite/micro/examples/person_detection_experimental/himax_we1_evb/image_provider.cc~ +++ /dev/null @@ -1,44 +0,0 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "tensorflow/lite/micro/examples/person_detection_experimental/image_provider.h" - -#include "tensorflow/lite/micro/examples/person_detection_experimental/model_settings.h" - -#include "hx_drv_tflm.h" - -hx_drv_sensor_image_config_t g_pimg_config; - - -TfLiteStatus GetImage(tflite::ErrorReporter* error_reporter, int image_width, - int image_height, int channels, int8_t* image_data) { - static bool is_initialized = false; - - if (!is_initialized) { - if(hx_drv_sensor_initial(&g_pimg_config)!= HX_DRV_LIB_PASS) - { - return kTfLiteError; - } - is_initialized = true; - } - - hx_drv_sensor_capture(&g_pimg_config); - - hx_drv_image_rescale((uint8_t*)g_pimg_config.raw_address, g_pimg_config.img_width, g_pimg_config.img_height, - image_data, image_data, image_height); - - - return kTfLiteOk; -} From b9db1ee4174f26a94ac332ff8f60c9e0152403a8 Mon Sep 17 00:00:00 2001 From: "902449@58880@bigcat_chen@ASIC" Date: Mon, 8 Jun 2020 16:53:21 +0800 Subject: [PATCH 09/11] sync third_party_downloads to avoid conflict --- .../micro/tools/make/third_party_downloads.inc | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) diff --git a/tensorflow/lite/micro/tools/make/third_party_downloads.inc b/tensorflow/lite/micro/tools/make/third_party_downloads.inc index 75a51e0df10..85016dc49b6 100644 --- a/tensorflow/lite/micro/tools/make/third_party_downloads.inc +++ b/tensorflow/lite/micro/tools/make/third_party_downloads.inc @@ -62,14 +62,14 @@ RUY_MD5="2d54f058f8f7120dfc1ecee79dbf259e" CIFAR10_DATASET_URL="https://www.cs.toronto.edu/~kriz/cifar-10-binary.tar.gz" CIFAR10_DATASET_MD5="c32a1d4ab5d03f1284b67883e8d87530" -IMAGE_RECOGNITION_MODEL_URL := "https://storage.googleapis.com/download.tensorflow.org/models/tflite/cifar_image_recognition_model_2020_4_14.zip" -IMAGE_RECOGNITION_MODEL_MD5 := "2b886156e7ef4d6e53d0f1a4bc800e56" +IMAGE_RECOGNITION_MODEL_URL := "https://storage.googleapis.com/download.tensorflow.org/models/tflite/cifar_image_recognition_model_2020_05_27.zip" +IMAGE_RECOGNITION_MODEL_MD5 := "1f4607b05ac45b8a6146fb883dbc2d7b" -PERSON_MODEL_URL := "https://storage.googleapis.com/download.tensorflow.org/data/tf_lite_micro_person_data_grayscale_2019_11_21.zip" -PERSON_MODEL_MD5 := "fe2934bd0788f1dcc7af3f0a954542ab" +PERSON_MODEL_URL := "https://storage.googleapis.com/download.tensorflow.org/data/tf_lite_micro_person_data_grayscale_2020_05_27.zip" +PERSON_MODEL_MD5 := "55b85f76e2995153e660391d4a209ef1" -PERSON_MODEL_INT8_URL := "https://storage.googleapis.com/download.tensorflow.org/data/tf_lite_micro_person_data_int8_grayscale_2020_01_13.zip" -PERSON_MODEL_INT8_MD5 := "8a7d2c70325f53136faea6dde517b8cc" +PERSON_MODEL_INT8_URL := "https://storage.googleapis.com/download.tensorflow.org/data/tf_lite_micro_person_data_int8_grayscale_2020_05_27.zip" +PERSON_MODEL_INT8_MD5 := "a0ede2d058aa2a1d413893455dd55352" EMBARC_MLI_URL := "https://github.com/foss-for-synopsys-dwc-arc-processors/embarc_mli/archive/58284867ca52d1f43b25045e8601999d7359d986.zip" EMBARC_MLI_MD5 := "2bf4982a327fdaa9d475803ce014d1ef" @@ -77,9 +77,15 @@ EMBARC_MLI_MD5 := "2bf4982a327fdaa9d475803ce014d1ef" EMBARC_MLI_PRE_COMPILED_URL := "https://github.com/foss-for-synopsys-dwc-arc-processors/embarc_mli/releases/download/Release_1.1_RC2/embARC_MLI_package.zip" EMBARC_MLI_PRE_COMPILED_MD5 := "a95ff9e0370434484f14e7e4114327f6" +ZEPHYR_URL := "https://github.com/antmicro/zephyr/archive/55e36b9.zip" +ZEPHYR_MD5 := "755622eb4812fde918a6382b65d50c3b" + XTENSA_HIFI4_URL :="https://github.com/foss-xtensa/nnlib-hifi4/raw/master/archive/xa_nnlib_04_07.zip" XTENSA_HIFI4_MD5 :="f234764928f9a42901df33a27e118c8b" +ETHOSU_URL := "https://git.mlplatform.org/ml/ethos-u/ethos-u-core-driver.git/snapshot/ethos-u-core-driver-bcb5aaa99756f1b5c1295b079ebdd60996bc75a5.tar.gz" +ETHOSU_MD5 := "d2073c8d88fc167fd5c46b5dcda58ea1" + HIMAX_WE1_SDK_URL ="https://www.himax.com.tw/we-i/himax_we1_sdk_v02.zip" HIMAX_WE1_SDK_MD5 ="9a4b2f29b16052764e437b64bdcba816" From 13d3b343498d499f87230f2e596b738be5cf1109 Mon Sep 17 00:00:00 2001 From: "902449@58880@bigcat_chen@ASIC" Date: Tue, 9 Jun 2020 09:28:23 +0800 Subject: [PATCH 10/11] modify example main API usage --- .../himax_we1_evb/main_functions.cc | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) diff --git a/tensorflow/lite/micro/examples/person_detection_experimental/himax_we1_evb/main_functions.cc b/tensorflow/lite/micro/examples/person_detection_experimental/himax_we1_evb/main_functions.cc index 552b52c9c51..f0c7a405974 100644 --- a/tensorflow/lite/micro/examples/person_detection_experimental/himax_we1_evb/main_functions.cc +++ b/tensorflow/lite/micro/examples/person_detection_experimental/himax_we1_evb/main_functions.cc @@ -72,21 +72,20 @@ void setup() { // incur some penalty in code space for op implementations that are not // needed by this graph. // - // tflite::ops::micro::AllOpsResolver resolver; + // tflite::AllOpsResolver resolver; // NOLINTNEXTLINE(runtime-global-variables) - static tflite::MicroOpResolver<12> micro_op_resolver; - micro_op_resolver.AddBuiltin(tflite::BuiltinOperator_DEPTHWISE_CONV_2D, - tflite::ops::micro::Register_DEPTHWISE_CONV_2D(), - 1, 3); + static tflite::MicroMutableOpResolver<5> micro_op_resolver; + micro_op_resolver.AddBuiltin( + tflite::BuiltinOperator_DEPTHWISE_CONV_2D, + tflite::ops::micro::Register_DEPTHWISE_CONV_2D()); micro_op_resolver.AddBuiltin(tflite::BuiltinOperator_CONV_2D, - tflite::ops::micro::Register_CONV_2D(), 1, 3); + tflite::ops::micro::Register_CONV_2D()); micro_op_resolver.AddBuiltin(tflite::BuiltinOperator_AVERAGE_POOL_2D, - tflite::ops::micro::Register_AVERAGE_POOL_2D(), - 1, 2); + tflite::ops::micro::Register_AVERAGE_POOL_2D()); micro_op_resolver.AddBuiltin(tflite::BuiltinOperator_RESHAPE, tflite::ops::micro::Register_RESHAPE()); micro_op_resolver.AddBuiltin(tflite::BuiltinOperator_SOFTMAX, - tflite::ops::micro::Register_SOFTMAX(), 1, 3); + tflite::ops::micro::Register_SOFTMAX()); // Build an interpreter to run the model with. // NOLINTNEXTLINE(runtime-global-variables) From 3b95c2c54df8a7bc3641871197262841b803f8cd Mon Sep 17 00:00:00 2001 From: "902449@58880@bigcat_chen@ASIC" Date: Thu, 11 Jun 2020 13:54:16 +0800 Subject: [PATCH 11/11] Correcting for PR comments --- .../himax_we1_evb/detection_responder.cc | 1 - .../himax_we1_evb/image_provider.cc | 10 +- .../himax_we1_evb/main_functions.cc | 126 ------------------ .../lite/micro/himax_we1_evb/debug_log.cc | 5 +- 4 files changed, 6 insertions(+), 136 deletions(-) delete mode 100644 tensorflow/lite/micro/examples/person_detection_experimental/himax_we1_evb/main_functions.cc diff --git a/tensorflow/lite/micro/examples/person_detection_experimental/himax_we1_evb/detection_responder.cc b/tensorflow/lite/micro/examples/person_detection_experimental/himax_we1_evb/detection_responder.cc index a353dc8a9b8..ae5de962fd3 100644 --- a/tensorflow/lite/micro/examples/person_detection_experimental/himax_we1_evb/detection_responder.cc +++ b/tensorflow/lite/micro/examples/person_detection_experimental/himax_we1_evb/detection_responder.cc @@ -22,7 +22,6 @@ limitations under the License. // should implement their own versions of this function. void RespondToDetection(tflite::ErrorReporter* error_reporter, int8_t person_score, int8_t no_person_score) { - if (person_score > no_person_score) { hx_drv_led_on(HX_DRV_LED_GREEN); } else { diff --git a/tensorflow/lite/micro/examples/person_detection_experimental/himax_we1_evb/image_provider.cc b/tensorflow/lite/micro/examples/person_detection_experimental/himax_we1_evb/image_provider.cc index 727d93c61d1..4a3ab5775be 100644 --- a/tensorflow/lite/micro/examples/person_detection_experimental/himax_we1_evb/image_provider.cc +++ b/tensorflow/lite/micro/examples/person_detection_experimental/himax_we1_evb/image_provider.cc @@ -21,14 +21,12 @@ limitations under the License. hx_drv_sensor_image_config_t g_pimg_config; - TfLiteStatus GetImage(tflite::ErrorReporter* error_reporter, int image_width, int image_height, int channels, int8_t* image_data) { static bool is_initialized = false; if (!is_initialized) { - if(hx_drv_sensor_initial(&g_pimg_config)!= HX_DRV_LIB_PASS) - { + if (hx_drv_sensor_initial(&g_pimg_config) != HX_DRV_LIB_PASS) { return kTfLiteError; } is_initialized = true; @@ -36,9 +34,9 @@ TfLiteStatus GetImage(tflite::ErrorReporter* error_reporter, int image_width, hx_drv_sensor_capture(&g_pimg_config); - hx_drv_image_rescale((uint8_t*)g_pimg_config.raw_address, g_pimg_config.img_width, g_pimg_config.img_height, - image_data, image_width, image_height); - + hx_drv_image_rescale((uint8_t*)g_pimg_config.raw_address, + g_pimg_config.img_width, g_pimg_config.img_height, + image_data, image_width, image_height); return kTfLiteOk; } diff --git a/tensorflow/lite/micro/examples/person_detection_experimental/himax_we1_evb/main_functions.cc b/tensorflow/lite/micro/examples/person_detection_experimental/himax_we1_evb/main_functions.cc deleted file mode 100644 index f0c7a405974..00000000000 --- a/tensorflow/lite/micro/examples/person_detection_experimental/himax_we1_evb/main_functions.cc +++ /dev/null @@ -1,126 +0,0 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "tensorflow/lite/micro/examples/person_detection_experimental/main_functions.h" - -#include "tensorflow/lite/micro/examples/person_detection_experimental/detection_responder.h" -#include "tensorflow/lite/micro/examples/person_detection_experimental/image_provider.h" -#include "tensorflow/lite/micro/examples/person_detection_experimental/model_settings.h" -#include "tensorflow/lite/micro/examples/person_detection_experimental/person_detect_model_data.h" -#include "tensorflow/lite/micro/kernels/micro_ops.h" -#include "tensorflow/lite/micro/micro_error_reporter.h" -#include "tensorflow/lite/micro/micro_interpreter.h" -#include "tensorflow/lite/micro/micro_mutable_op_resolver.h" -#include "tensorflow/lite/schema/schema_generated.h" -#include "tensorflow/lite/version.h" - -// Globals, used for compatibility with Arduino-style sketches. -namespace { -tflite::ErrorReporter* error_reporter = nullptr; -const tflite::Model* model = nullptr; -tflite::MicroInterpreter* interpreter = nullptr; -TfLiteTensor* input = nullptr; - -// In order to use optimized tensorflow lite kernels, a signed int8 quantized -// model is preferred over the legacy unsigned model format. This means that -// throughout this project, input images must be converted from unisgned to -// signed format. The easiest and quickest way to convert from unsigned to -// signed 8-bit integers is to subtract 128 from the unsigned value to get a -// signed value. - -// An area of memory to use for input, output, and intermediate arrays. -constexpr int kTensorArenaSize = 125 * 1024; -#pragma Bss(".tensor_arena") -static uint8_t tensor_arena[kTensorArenaSize]; -#pragma Bss() -} // namespace - -// The name of this function is important for Arduino compatibility. -void setup() { - // Set up logging. Google style is to avoid globals or statics because of - // lifetime uncertainty, but since this has a trivial destructor it's okay. - // NOLINTNEXTLINE(runtime-global-variables) - static tflite::MicroErrorReporter micro_error_reporter; - error_reporter = µ_error_reporter; - - // Map the model into a usable data structure. This doesn't involve any - // copying or parsing, it's a very lightweight operation. - model = tflite::GetModel(g_person_detect_model_data); - if (model->version() != TFLITE_SCHEMA_VERSION) { - TF_LITE_REPORT_ERROR(error_reporter, - "Model provided is schema version %d not equal " - "to supported version %d.", - model->version(), TFLITE_SCHEMA_VERSION); - return; - } - - // Pull in only the operation implementations we need. - // This relies on a complete list of all the ops needed by this graph. - // An easier approach is to just use the AllOpsResolver, but this will - // incur some penalty in code space for op implementations that are not - // needed by this graph. - // - // tflite::AllOpsResolver resolver; - // NOLINTNEXTLINE(runtime-global-variables) - static tflite::MicroMutableOpResolver<5> micro_op_resolver; - micro_op_resolver.AddBuiltin( - tflite::BuiltinOperator_DEPTHWISE_CONV_2D, - tflite::ops::micro::Register_DEPTHWISE_CONV_2D()); - micro_op_resolver.AddBuiltin(tflite::BuiltinOperator_CONV_2D, - tflite::ops::micro::Register_CONV_2D()); - micro_op_resolver.AddBuiltin(tflite::BuiltinOperator_AVERAGE_POOL_2D, - tflite::ops::micro::Register_AVERAGE_POOL_2D()); - micro_op_resolver.AddBuiltin(tflite::BuiltinOperator_RESHAPE, - tflite::ops::micro::Register_RESHAPE()); - micro_op_resolver.AddBuiltin(tflite::BuiltinOperator_SOFTMAX, - tflite::ops::micro::Register_SOFTMAX()); - - // Build an interpreter to run the model with. - // NOLINTNEXTLINE(runtime-global-variables) - static tflite::MicroInterpreter static_interpreter( - model, micro_op_resolver, tensor_arena, kTensorArenaSize, error_reporter); - interpreter = &static_interpreter; - - // Allocate memory from the tensor_arena for the model's tensors. - TfLiteStatus allocate_status = interpreter->AllocateTensors(); - if (allocate_status != kTfLiteOk) { - TF_LITE_REPORT_ERROR(error_reporter, "AllocateTensors() failed"); - return; - } - - // Get information about the memory area to use for the model's input. - input = interpreter->input(0); -} - -// The name of this function is important for Arduino compatibility. -void loop() { - // Get image from provider. - if (kTfLiteOk != GetImage(error_reporter, kNumCols, kNumRows, kNumChannels, - input->data.int8)) { - TF_LITE_REPORT_ERROR(error_reporter, "Image capture failed."); - } - - // Run the model on this input and make sure it succeeds. - if (kTfLiteOk != interpreter->Invoke()) { - TF_LITE_REPORT_ERROR(error_reporter, "Invoke failed."); - } - - TfLiteTensor* output = interpreter->output(0); - - // Process the inference results. - int8_t person_score = output->data.uint8[kPersonIndex]; - int8_t no_person_score = output->data.uint8[kNotAPersonIndex]; - RespondToDetection(error_reporter, person_score, no_person_score); -} diff --git a/tensorflow/lite/micro/himax_we1_evb/debug_log.cc b/tensorflow/lite/micro/himax_we1_evb/debug_log.cc index 32af2625630..36ac3f3fa03 100644 --- a/tensorflow/lite/micro/himax_we1_evb/debug_log.cc +++ b/tensorflow/lite/micro/himax_we1_evb/debug_log.cc @@ -20,12 +20,11 @@ limitations under the License. #include "tensorflow/lite/micro/debug_log.h" #include "hx_drv_tflm.h" - extern "C" void DebugLog(const char* s) { static bool is_initialized = false; if (!is_initialized) { - hx_drv_uart_initial(); - is_initialized = true; + hx_drv_uart_initial(); + is_initialized = true; } hx_drv_uart_print("%s", s);