Move person_detection_experimental to person_detection and scrap uint8 version.
Addresses https://github.com/tensorflow/tensorflow/issues/44912 PiperOrigin-RevId: 345335944 Change-Id: If46bdf590faf66d4158596030b2f41a50c9eb388
This commit is contained in:
parent
bf2209a095
commit
b0092ab899
@ -62,21 +62,3 @@ cc_binary(
|
|||||||
"//tensorflow/lite/schema:schema_fbs",
|
"//tensorflow/lite/schema:schema_fbs",
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
|
|
||||||
cc_binary(
|
|
||||||
name = "person_detection_experimental_benchmark",
|
|
||||||
srcs = ["person_detection_experimental_benchmark.cc"],
|
|
||||||
deps = [
|
|
||||||
":micro_benchmark",
|
|
||||||
"//tensorflow/lite:version",
|
|
||||||
"//tensorflow/lite/c:common",
|
|
||||||
"//tensorflow/lite/micro:micro_error_reporter",
|
|
||||||
"//tensorflow/lite/micro:micro_framework",
|
|
||||||
"//tensorflow/lite/micro:micro_utils",
|
|
||||||
"//tensorflow/lite/micro:op_resolvers",
|
|
||||||
"//tensorflow/lite/micro/examples/person_detection_experimental:model_settings",
|
|
||||||
"//tensorflow/lite/micro/examples/person_detection_experimental:person_detect_model_data",
|
|
||||||
"//tensorflow/lite/micro/examples/person_detection_experimental:simple_images_test_data",
|
|
||||||
"//tensorflow/lite/schema:schema_fbs",
|
|
||||||
],
|
|
||||||
)
|
|
||||||
|
@ -7,21 +7,12 @@ tensorflow/lite/micro/benchmarks/keyword_scrambled_model_data.h
|
|||||||
|
|
||||||
PERSON_DETECTION_BENCHMARK_SRCS := \
|
PERSON_DETECTION_BENCHMARK_SRCS := \
|
||||||
tensorflow/lite/micro/benchmarks/person_detection_benchmark.cc \
|
tensorflow/lite/micro/benchmarks/person_detection_benchmark.cc \
|
||||||
$(MAKEFILE_DIR)/downloads/person_model_grayscale/no_person_image_data.cc \
|
|
||||||
$(MAKEFILE_DIR)/downloads/person_model_grayscale/person_detect_model_data.cc \
|
|
||||||
$(MAKEFILE_DIR)/downloads/person_model_grayscale/person_image_data.cc
|
|
||||||
|
|
||||||
PERSON_DETECTION_BENCHMARK_HDRS := \
|
|
||||||
tensorflow/lite/micro/examples/person_detection/person_detect_model_data.h
|
|
||||||
|
|
||||||
PERSON_DETECTION_EXPERIMENTAL_BENCHMARK_SRCS := \
|
|
||||||
tensorflow/lite/micro/benchmarks/person_detection_experimental_benchmark.cc \
|
|
||||||
$(MAKEFILE_DIR)/downloads/person_model_int8/no_person_image_data.cc \
|
$(MAKEFILE_DIR)/downloads/person_model_int8/no_person_image_data.cc \
|
||||||
$(MAKEFILE_DIR)/downloads/person_model_int8/person_detect_model_data.cc \
|
$(MAKEFILE_DIR)/downloads/person_model_int8/person_detect_model_data.cc \
|
||||||
$(MAKEFILE_DIR)/downloads/person_model_int8/person_image_data.cc
|
$(MAKEFILE_DIR)/downloads/person_model_int8/person_image_data.cc
|
||||||
|
|
||||||
PERSON_DETECTION_EXPERIMENTAL_BENCHMARK_HDRS := \
|
PERSON_DETECTION_BENCHMARK_HDRS := \
|
||||||
tensorflow/lite/micro/examples/person_detection_experimental/person_detect_model_data.h
|
tensorflow/lite/micro/examples/person_detection/person_detect_model_data.h
|
||||||
|
|
||||||
# Builds a standalone binary.
|
# Builds a standalone binary.
|
||||||
$(eval $(call microlite_test,keyword_benchmark,\
|
$(eval $(call microlite_test,keyword_benchmark,\
|
||||||
@ -30,6 +21,3 @@ $(KEYWORD_BENCHMARK_SRCS),$(KEYWORD_BENCHMARK_HDRS)))
|
|||||||
$(eval $(call microlite_test,person_detection_benchmark,\
|
$(eval $(call microlite_test,person_detection_benchmark,\
|
||||||
$(PERSON_DETECTION_BENCHMARK_SRCS),$(PERSON_DETECTION_BENCHMARK_HDRS)))
|
$(PERSON_DETECTION_BENCHMARK_SRCS),$(PERSON_DETECTION_BENCHMARK_HDRS)))
|
||||||
|
|
||||||
$(eval $(call microlite_test,person_detection_experimental_benchmark,\
|
|
||||||
$(PERSON_DETECTION_EXPERIMENTAL_BENCHMARK_SRCS),$(PERSON_DETECTION_EXPERIMENTAL_BENCHMARK_HDRS)))
|
|
||||||
|
|
||||||
|
@ -35,13 +35,11 @@ limitations under the License.
|
|||||||
namespace {
|
namespace {
|
||||||
|
|
||||||
using PersonDetectionOpResolver = tflite::AllOpsResolver;
|
using PersonDetectionOpResolver = tflite::AllOpsResolver;
|
||||||
using PersonDetectionBenchmarkRunner = MicroBenchmarkRunner<uint8_t>;
|
using PersonDetectionBenchmarkRunner = MicroBenchmarkRunner<int8_t>;
|
||||||
|
|
||||||
constexpr int kRandomSeed = 42;
|
|
||||||
|
|
||||||
// Create an area of memory to use for input, output, and intermediate arrays.
|
// Create an area of memory to use for input, output, and intermediate arrays.
|
||||||
// Align arena to 16 bytes to avoid alignment warnings on certain platforms.
|
// Align arena to 16 bytes to avoid alignment warnings on certain platforms.
|
||||||
constexpr int kTensorArenaSize = 95 * 1024;
|
constexpr int kTensorArenaSize = 135 * 1024;
|
||||||
alignas(16) uint8_t tensor_arena[kTensorArenaSize];
|
alignas(16) uint8_t tensor_arena[kTensorArenaSize];
|
||||||
|
|
||||||
uint8_t op_resolver_buffer[sizeof(PersonDetectionOpResolver)];
|
uint8_t op_resolver_buffer[sizeof(PersonDetectionOpResolver)];
|
||||||
@ -52,9 +50,9 @@ PersonDetectionBenchmarkRunner* benchmark_runner = nullptr;
|
|||||||
// issues on Sparkfun. Use new since static variables within a method
|
// issues on Sparkfun. Use new since static variables within a method
|
||||||
// are automatically surrounded by locking, which breaks bluepill and stm32f4.
|
// are automatically surrounded by locking, which breaks bluepill and stm32f4.
|
||||||
void CreateBenchmarkRunner() {
|
void CreateBenchmarkRunner() {
|
||||||
// We allocate PersonDetectionOpResolver from a global buffer because the
|
// We allocate PersonDetectionOpResolver from a global buffer
|
||||||
// object's lifetime must exceed that of the PersonDetectionBenchmarkRunner
|
// because the object's lifetime must exceed that of the
|
||||||
// object.
|
// PersonDetectionBenchmarkRunner object.
|
||||||
benchmark_runner = new (benchmark_runner_buffer)
|
benchmark_runner = new (benchmark_runner_buffer)
|
||||||
PersonDetectionBenchmarkRunner(g_person_detect_model_data,
|
PersonDetectionBenchmarkRunner(g_person_detect_model_data,
|
||||||
new (op_resolver_buffer)
|
new (op_resolver_buffer)
|
||||||
@ -62,24 +60,20 @@ void CreateBenchmarkRunner() {
|
|||||||
tensor_arena, kTensorArenaSize);
|
tensor_arena, kTensorArenaSize);
|
||||||
}
|
}
|
||||||
|
|
||||||
void PersonDetectionTenIterationsWithRandomInput() {
|
void InitializeBenchmarkRunner() {
|
||||||
benchmark_runner->SetRandomInput(kRandomSeed);
|
CreateBenchmarkRunner();
|
||||||
for (int i = 0; i < 10; i++) {
|
benchmark_runner->SetInput(reinterpret_cast<const int8_t*>(g_person_data));
|
||||||
benchmark_runner->RunSingleIteration();
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void PersonDetectionTenIerationsWithPerson() {
|
void PersonDetectionTenIerationsWithPerson() {
|
||||||
// TODO(b/152644476): Add a way to run more than a single deterministic input.
|
benchmark_runner->SetInput(reinterpret_cast<const int8_t*>(g_person_data));
|
||||||
benchmark_runner->SetInput(g_person_data);
|
|
||||||
for (int i = 0; i < 10; i++) {
|
for (int i = 0; i < 10; i++) {
|
||||||
benchmark_runner->RunSingleIteration();
|
benchmark_runner->RunSingleIteration();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void PersonDetectionTenIerationsWithoutPerson() {
|
void PersonDetectionTenIerationsWithoutPerson() {
|
||||||
// TODO(b/152644476): Add a way to run more than a single deterministic input.
|
benchmark_runner->SetInput(reinterpret_cast<const int8_t*>(g_no_person_data));
|
||||||
benchmark_runner->SetInput(g_no_person_data);
|
|
||||||
for (int i = 0; i < 10; i++) {
|
for (int i = 0; i < 10; i++) {
|
||||||
benchmark_runner->RunSingleIteration();
|
benchmark_runner->RunSingleIteration();
|
||||||
}
|
}
|
||||||
@ -89,8 +83,8 @@ void PersonDetectionTenIerationsWithoutPerson() {
|
|||||||
|
|
||||||
TF_LITE_MICRO_BENCHMARKS_BEGIN
|
TF_LITE_MICRO_BENCHMARKS_BEGIN
|
||||||
|
|
||||||
TF_LITE_MICRO_BENCHMARK(CreateBenchmarkRunner());
|
TF_LITE_MICRO_BENCHMARK(InitializeBenchmarkRunner());
|
||||||
TF_LITE_MICRO_BENCHMARK(PersonDetectionTenIterationsWithRandomInput());
|
TF_LITE_MICRO_BENCHMARK(benchmark_runner->RunSingleIteration());
|
||||||
TF_LITE_MICRO_BENCHMARK(PersonDetectionTenIerationsWithPerson());
|
TF_LITE_MICRO_BENCHMARK(PersonDetectionTenIerationsWithPerson());
|
||||||
TF_LITE_MICRO_BENCHMARK(PersonDetectionTenIerationsWithoutPerson());
|
TF_LITE_MICRO_BENCHMARK(PersonDetectionTenIerationsWithoutPerson());
|
||||||
|
|
||||||
|
@ -1,92 +0,0 @@
|
|||||||
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
==============================================================================*/
|
|
||||||
|
|
||||||
#include "tensorflow/lite/c/common.h"
|
|
||||||
#include "tensorflow/lite/micro/all_ops_resolver.h"
|
|
||||||
#include "tensorflow/lite/micro/benchmarks/micro_benchmark.h"
|
|
||||||
#include "tensorflow/lite/micro/examples/person_detection_experimental/model_settings.h"
|
|
||||||
#include "tensorflow/lite/micro/examples/person_detection_experimental/no_person_image_data.h"
|
|
||||||
#include "tensorflow/lite/micro/examples/person_detection_experimental/person_detect_model_data.h"
|
|
||||||
#include "tensorflow/lite/micro/examples/person_detection_experimental/person_image_data.h"
|
|
||||||
#include "tensorflow/lite/micro/micro_error_reporter.h"
|
|
||||||
#include "tensorflow/lite/micro/micro_interpreter.h"
|
|
||||||
#include "tensorflow/lite/micro/micro_utils.h"
|
|
||||||
#include "tensorflow/lite/schema/schema_generated.h"
|
|
||||||
#include "tensorflow/lite/version.h"
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Person Detection benchmark. Evaluates runtime performance of the visual
|
|
||||||
* wakewords person detection model. This is the same model found in
|
|
||||||
* exmaples/person_detection.
|
|
||||||
*/
|
|
||||||
|
|
||||||
namespace {
|
|
||||||
|
|
||||||
using PersonDetectionExperimentalOpResolver = tflite::AllOpsResolver;
|
|
||||||
using PersonDetectionExperimentalBenchmarkRunner = MicroBenchmarkRunner<int8_t>;
|
|
||||||
|
|
||||||
// Create an area of memory to use for input, output, and intermediate arrays.
|
|
||||||
// Align arena to 16 bytes to avoid alignment warnings on certain platforms.
|
|
||||||
constexpr int kTensorArenaSize = 135 * 1024;
|
|
||||||
alignas(16) uint8_t tensor_arena[kTensorArenaSize];
|
|
||||||
|
|
||||||
uint8_t op_resolver_buffer[sizeof(PersonDetectionExperimentalOpResolver)];
|
|
||||||
uint8_t
|
|
||||||
benchmark_runner_buffer[sizeof(PersonDetectionExperimentalBenchmarkRunner)];
|
|
||||||
PersonDetectionExperimentalBenchmarkRunner* benchmark_runner = nullptr;
|
|
||||||
|
|
||||||
// Initialize benchmark runner instance explicitly to avoid global init order
|
|
||||||
// issues on Sparkfun. Use new since static variables within a method
|
|
||||||
// are automatically surrounded by locking, which breaks bluepill and stm32f4.
|
|
||||||
void CreateBenchmarkRunner() {
|
|
||||||
// We allocate PersonDetectionExperimentalOpResolver from a global buffer
|
|
||||||
// because the object's lifetime must exceed that of the
|
|
||||||
// PersonDetectionBenchmarkRunner object.
|
|
||||||
benchmark_runner =
|
|
||||||
new (benchmark_runner_buffer) PersonDetectionExperimentalBenchmarkRunner(
|
|
||||||
g_person_detect_model_data,
|
|
||||||
new (op_resolver_buffer) PersonDetectionExperimentalOpResolver(),
|
|
||||||
tensor_arena, kTensorArenaSize);
|
|
||||||
}
|
|
||||||
|
|
||||||
void InitializeBenchmarkRunner() {
|
|
||||||
CreateBenchmarkRunner();
|
|
||||||
benchmark_runner->SetInput(reinterpret_cast<const int8_t*>(g_person_data));
|
|
||||||
}
|
|
||||||
|
|
||||||
void PersonDetectionTenIerationsWithPerson() {
|
|
||||||
benchmark_runner->SetInput(reinterpret_cast<const int8_t*>(g_person_data));
|
|
||||||
for (int i = 0; i < 10; i++) {
|
|
||||||
benchmark_runner->RunSingleIteration();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void PersonDetectionTenIerationsWithoutPerson() {
|
|
||||||
benchmark_runner->SetInput(reinterpret_cast<const int8_t*>(g_no_person_data));
|
|
||||||
for (int i = 0; i < 10; i++) {
|
|
||||||
benchmark_runner->RunSingleIteration();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
} // namespace
|
|
||||||
|
|
||||||
TF_LITE_MICRO_BENCHMARKS_BEGIN
|
|
||||||
|
|
||||||
TF_LITE_MICRO_BENCHMARK(InitializeBenchmarkRunner());
|
|
||||||
TF_LITE_MICRO_BENCHMARK(benchmark_runner->RunSingleIteration());
|
|
||||||
TF_LITE_MICRO_BENCHMARK(PersonDetectionTenIerationsWithPerson());
|
|
||||||
TF_LITE_MICRO_BENCHMARK(PersonDetectionTenIerationsWithoutPerson());
|
|
||||||
|
|
||||||
TF_LITE_MICRO_BENCHMARKS_END
|
|
@ -135,9 +135,3 @@ cc_binary(
|
|||||||
"//tensorflow/lite/schema:schema_fbs",
|
"//tensorflow/lite/schema:schema_fbs",
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
|
|
||||||
sh_test(
|
|
||||||
name = "person_detection_binary_test",
|
|
||||||
srcs = ["person_detection_binary_test.sh"],
|
|
||||||
data = [":person_detection"],
|
|
||||||
)
|
|
||||||
|
@ -1,6 +1,8 @@
|
|||||||
|
$(eval $(call add_third_party_download,$(PERSON_MODEL_INT8_URL),$(PERSON_MODEL_INT8_MD5),person_model_int8,))
|
||||||
|
|
||||||
person_detection_MODEL_SRCS := \
|
person_detection_MODEL_SRCS := \
|
||||||
tensorflow/lite/micro/examples/person_detection/model_settings.cc \
|
tensorflow/lite/micro/examples/person_detection/model_settings.cc \
|
||||||
$(MAKEFILE_DIR)/downloads/person_model_grayscale/person_detect_model_data.cc
|
$(MAKEFILE_DIR)/downloads/person_model_int8/person_detect_model_data.cc
|
||||||
|
|
||||||
person_detection_MODEL_HDRS := \
|
person_detection_MODEL_HDRS := \
|
||||||
tensorflow/lite/micro/examples/person_detection/model_settings.h \
|
tensorflow/lite/micro/examples/person_detection/model_settings.h \
|
||||||
@ -8,8 +10,8 @@ tensorflow/lite/micro/examples/person_detection/person_detect_model_data.h
|
|||||||
|
|
||||||
person_detection_TEST_SRCS := \
|
person_detection_TEST_SRCS := \
|
||||||
tensorflow/lite/micro/examples/person_detection/person_detection_test.cc \
|
tensorflow/lite/micro/examples/person_detection/person_detection_test.cc \
|
||||||
$(MAKEFILE_DIR)/downloads/person_model_grayscale/no_person_image_data.cc \
|
$(MAKEFILE_DIR)/downloads/person_model_int8/no_person_image_data.cc \
|
||||||
$(MAKEFILE_DIR)/downloads/person_model_grayscale/person_image_data.cc \
|
$(MAKEFILE_DIR)/downloads/person_model_int8/person_image_data.cc \
|
||||||
$(person_detection_MODEL_SRCS)
|
$(person_detection_MODEL_SRCS)
|
||||||
|
|
||||||
person_detection_TEST_HDRS := \
|
person_detection_TEST_HDRS := \
|
||||||
@ -50,7 +52,7 @@ $(person_detection_MODEL_HDRS)
|
|||||||
include $(wildcard tensorflow/lite/micro/examples/person_detection/*/Makefile.inc)
|
include $(wildcard tensorflow/lite/micro/examples/person_detection/*/Makefile.inc)
|
||||||
|
|
||||||
# Tests loading and running a vision model.
|
# Tests loading and running a vision model.
|
||||||
$(eval $(call microlite_test,person_detection_test,\
|
$(eval $(call microlite_test,person_detection_test_int8,\
|
||||||
$(person_detection_TEST_SRCS),$(person_detection_TEST_HDRS)))
|
$(person_detection_TEST_SRCS),$(person_detection_TEST_HDRS)))
|
||||||
|
|
||||||
# Three conflicting issues here:
|
# Three conflicting issues here:
|
||||||
@ -68,14 +70,14 @@ $(person_detection_TEST_SRCS),$(person_detection_TEST_HDRS)))
|
|||||||
# basically equivalent).
|
# basically equivalent).
|
||||||
ifneq ($(TARGET),sparkfun_edge)
|
ifneq ($(TARGET),sparkfun_edge)
|
||||||
# Tests the image provider module.
|
# Tests the image provider module.
|
||||||
$(eval $(call microlite_test,image_provider_test,\
|
$(eval $(call microlite_test,image_provider_test_int8,\
|
||||||
$(IMAGE_PROVIDER_TEST_SRCS),$(IMAGE_PROVIDER_TEST_HDRS)))
|
$(IMAGE_PROVIDER_TEST_SRCS),$(IMAGE_PROVIDER_TEST_HDRS)))
|
||||||
endif
|
endif
|
||||||
|
|
||||||
# Tests the detection responder module.
|
# Tests the detection responder module.
|
||||||
$(eval $(call microlite_test,detection_responder_test,\
|
$(eval $(call microlite_test,detection_responder_test_int8,\
|
||||||
$(DETECTION_RESPONDER_TEST_SRCS),$(DETECTION_RESPONDER_TEST_HDRS)))
|
$(DETECTION_RESPONDER_TEST_SRCS),$(DETECTION_RESPONDER_TEST_HDRS)))
|
||||||
|
|
||||||
# Builds a standalone object recognition binary.
|
# Builds a standalone object recognition binary.
|
||||||
$(eval $(call microlite_test,person_detection,\
|
$(eval $(call microlite_test,person_detection_int8,\
|
||||||
$(person_detection_SRCS),$(person_detection_HDRS)))
|
$(person_detection_SRCS),$(person_detection_HDRS)))
|
||||||
|
@ -1,10 +1,9 @@
|
|||||||
<!-- mdformat off(b/169948621#comment2) -->
|
|
||||||
|
|
||||||
# Person detection example
|
# Person detection example
|
||||||
|
|
||||||
This example shows how you can use Tensorflow Lite to run a 250 kilobyte neural
|
This example shows how you can use Tensorflow Lite to run a 250 kilobyte neural
|
||||||
network to recognize people in images captured by a camera. It is designed to
|
network to recognize people in images captured by a camera. It is designed to
|
||||||
run on systems with small amounts of memory such as microcontrollers and DSPs.
|
run on systems with small amounts of memory such as microcontrollers and DSPs.
|
||||||
|
This uses the experimental int8 quantized version of the person detection model.
|
||||||
|
|
||||||
## Table of contents
|
## Table of contents
|
||||||
|
|
||||||
@ -12,6 +11,7 @@ run on systems with small amounts of memory such as microcontrollers and DSPs.
|
|||||||
- [Running on ARC EM SDP](#running-on-arc-em-sdp)
|
- [Running on ARC EM SDP](#running-on-arc-em-sdp)
|
||||||
- [Running on Arduino](#running-on-arduino)
|
- [Running on Arduino](#running-on-arduino)
|
||||||
- [Running on ESP32](#running-on-esp32)
|
- [Running on ESP32](#running-on-esp32)
|
||||||
|
- [Running on HIMAX WE1 EVB](#running-on-himax-we1-evb)
|
||||||
- [Running on SparkFun Edge](#running-on-sparkfun-edge)
|
- [Running on SparkFun Edge](#running-on-sparkfun-edge)
|
||||||
- [Run the tests on a development machine](#run-the-tests-on-a-development-machine)
|
- [Run the tests on a development machine](#run-the-tests-on-a-development-machine)
|
||||||
- [Debugging image capture](#debugging-image-capture)
|
- [Debugging image capture](#debugging-image-capture)
|
||||||
@ -25,10 +25,8 @@ board. General information and instructions on using the board with TensorFlow
|
|||||||
Lite Micro can be found in the common
|
Lite Micro can be found in the common
|
||||||
[ARC targets description](/tensorflow/lite/micro/tools/make/targets/arc/README.md).
|
[ARC targets description](/tensorflow/lite/micro/tools/make/targets/arc/README.md).
|
||||||
|
|
||||||
This example is quantized with symmetric uint8 scheme. As noted in
|
This example uses asymmetric int8 quantization and can therefore leverage
|
||||||
[kernels/arc_mli/README.md](/tensorflow/lite/micro/kernels/arc_mli/README.md),
|
optimized int8 kernels from the embARC MLI library
|
||||||
embARC MLI supports optimized kernels for int8 quantization only. Therefore,
|
|
||||||
this example will only use TFLM reference kernels.
|
|
||||||
|
|
||||||
The ARC EM SDP board contains a rich set of extension interfaces. You can choose
|
The ARC EM SDP board contains a rich set of extension interfaces. You can choose
|
||||||
any compatible camera and modify
|
any compatible camera and modify
|
||||||
@ -53,9 +51,14 @@ The example project for ARC EM SDP platform can be generated with the following
|
|||||||
command:
|
command:
|
||||||
|
|
||||||
```
|
```
|
||||||
make -f tensorflow/lite/micro/tools/make/Makefile TARGET=arc_emsdp TAGS=no_arc_mli generate_person_detection_make_project
|
make -f tensorflow/lite/micro/tools/make/Makefile \
|
||||||
|
TARGET=arc_emsdp TAGS=reduce_codesize \
|
||||||
|
generate_person_detection_int8_make_project
|
||||||
```
|
```
|
||||||
|
|
||||||
|
Note that `TAGS=reduce_codesize` applies example specific changes of code to
|
||||||
|
reduce total size of application. It can be omitted.
|
||||||
|
|
||||||
### Build and Run Example
|
### Build and Run Example
|
||||||
|
|
||||||
For more detailed information on building and running examples see the
|
For more detailed information on building and running examples see the
|
||||||
@ -73,7 +76,7 @@ get it started.
|
|||||||
2. Go to the generated example project director
|
2. Go to the generated example project director
|
||||||
|
|
||||||
```
|
```
|
||||||
cd tensorflow/lite/micro/tools/make/gen/arc_emsdp_arc/prj/person_detection/make
|
cd tensorflow/lite/micro/tools/make/gen/arc_emsdp_arc/prj/person_detection_int8/make
|
||||||
```
|
```
|
||||||
|
|
||||||
3. Build the example using
|
3. Build the example using
|
||||||
@ -141,7 +144,7 @@ Connect the Arducam pins as follows:
|
|||||||
### Install the Arduino_TensorFlowLite library
|
### Install the Arduino_TensorFlowLite library
|
||||||
|
|
||||||
Download the current nightly build of the library:
|
Download the current nightly build of the library:
|
||||||
[person_detection.zip](https://storage.googleapis.com/tensorflow-nightly/github/tensorflow/tensorflow/lite/micro/tools/make/gen/arduino_x86_64/prj/person_detection/tensorflow_lite.zip)
|
[person_detection.zip](https://storage.googleapis.com/download.tensorflow.org/data/tf_lite_micro_person_data_int8_grayscale_2020_01_13.zip)
|
||||||
|
|
||||||
This example application is included as part of the official TensorFlow Lite
|
This example application is included as part of the official TensorFlow Lite
|
||||||
Arduino library. To install it, open the Arduino library manager in
|
Arduino library. To install it, open the Arduino library manager in
|
||||||
@ -344,6 +347,112 @@ The previous two commands can be combined:
|
|||||||
idf.py --port /dev/ttyUSB0 flash monitor
|
idf.py --port /dev/ttyUSB0 flash monitor
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## Running on HIMAX WE1 EVB
|
||||||
|
|
||||||
|
The following instructions will help you build and deploy this example to
|
||||||
|
[HIMAX WE1 EVB](https://github.com/HimaxWiseEyePlus/bsp_tflu/tree/master/HIMAX_WE1_EVB_board_brief)
|
||||||
|
board. To understand more about using this board, please check
|
||||||
|
[HIMAX WE1 EVB user guide](https://github.com/HimaxWiseEyePlus/bsp_tflu/tree/master/HIMAX_WE1_EVB_user_guide).
|
||||||
|
|
||||||
|
### Initial Setup
|
||||||
|
|
||||||
|
To use the HIMAX WE1 EVB, please make sure following software are installed:
|
||||||
|
|
||||||
|
#### MetaWare Development Toolkit
|
||||||
|
|
||||||
|
See
|
||||||
|
[Install the Synopsys DesignWare ARC MetaWare Development Toolkit](/tensorflow/lite/micro/tools/make/targets/arc/README.md#install-the-synopsys-designware-arc-metaware-development-toolkit)
|
||||||
|
section for instructions on toolchain installation.
|
||||||
|
|
||||||
|
#### Make Tool version
|
||||||
|
|
||||||
|
A `'make'` tool is required for deploying Tensorflow Lite Micro applications on
|
||||||
|
HIMAX WE1 EVB, See
|
||||||
|
[Check make tool version](/tensorflow/lite/micro/tools/make/targets/arc/README.md#make-tool)
|
||||||
|
section for proper environment.
|
||||||
|
|
||||||
|
#### Serial Terminal Emulation Application
|
||||||
|
|
||||||
|
There are 2 main purposes for HIMAX WE1 EVB Debug UART port
|
||||||
|
|
||||||
|
- print application output
|
||||||
|
- burn application to flash by using xmodem send application binary
|
||||||
|
|
||||||
|
You can use any terminal emulation program (like [PuTTY](https://www.putty.org/)
|
||||||
|
or [minicom](https://linux.die.net/man/1/minicom)).
|
||||||
|
|
||||||
|
### Generate Example Project
|
||||||
|
|
||||||
|
The example project for HIMAX WE1 EVB platform can be generated with the
|
||||||
|
following command:
|
||||||
|
|
||||||
|
Download related third party data
|
||||||
|
|
||||||
|
```
|
||||||
|
make -f tensorflow/lite/micro/tools/make/Makefile TARGET=himax_we1_evb third_party_downloads
|
||||||
|
```
|
||||||
|
|
||||||
|
Generate person detection project
|
||||||
|
|
||||||
|
```
|
||||||
|
make -f tensorflow/lite/micro/tools/make/Makefile generate_person_detection_int8_make_project TARGET=himax_we1_evb
|
||||||
|
```
|
||||||
|
|
||||||
|
### Build and Burn Example
|
||||||
|
|
||||||
|
Following the Steps to run person detection example at HIMAX WE1 EVB platform.
|
||||||
|
|
||||||
|
1. Go to the generated example project directory.
|
||||||
|
|
||||||
|
```
|
||||||
|
cd tensorflow/lite/micro/tools/make/gen/himax_we1_evb_arc/prj/person_detection_int8/make
|
||||||
|
```
|
||||||
|
|
||||||
|
2. Build the example using
|
||||||
|
|
||||||
|
```
|
||||||
|
make app
|
||||||
|
```
|
||||||
|
|
||||||
|
3. After example build finish, copy ELF file and map file to image generate
|
||||||
|
tool directory. \
|
||||||
|
image generate tool directory located at
|
||||||
|
`'tensorflow/lite/micro/tools/make/downloads/himax_we1_sdk/image_gen_linux_v3/'`
|
||||||
|
|
||||||
|
```
|
||||||
|
cp person_detection_int8.elf himax_we1_evb.map ../../../../../downloads/himax_we1_sdk/image_gen_linux_v3/
|
||||||
|
```
|
||||||
|
|
||||||
|
4. Go to flash image generate tool directory.
|
||||||
|
|
||||||
|
```
|
||||||
|
cd ../../../../../downloads/himax_we1_sdk/image_gen_linux_v3/
|
||||||
|
```
|
||||||
|
|
||||||
|
make sure this tool directory is in $PATH. You can permanently set it to
|
||||||
|
PATH by
|
||||||
|
|
||||||
|
```
|
||||||
|
export PATH=$PATH:$(pwd)
|
||||||
|
```
|
||||||
|
|
||||||
|
5. run image generate tool, generate flash image file.
|
||||||
|
|
||||||
|
* Before running image generate tool, by typing `sudo chmod +x image_gen`
|
||||||
|
and `sudo chmod +x sign_tool` to make sure it is executable.
|
||||||
|
|
||||||
|
```
|
||||||
|
image_gen -e person_detection_int8.elf -m himax_we1_evb.map -o out.img
|
||||||
|
```
|
||||||
|
|
||||||
|
6. Download flash image file to HIMAX WE1 EVB by UART:
|
||||||
|
|
||||||
|
* more detail about download image through UART can be found at
|
||||||
|
[HIMAX WE1 EVB update Flash image](https://github.com/HimaxWiseEyePlus/bsp_tflu/tree/master/HIMAX_WE1_EVB_user_guide#flash-image-update)
|
||||||
|
|
||||||
|
After these steps, press reset button on the HIMAX WE1 EVB, you will see
|
||||||
|
application output in the serial terminal.
|
||||||
|
|
||||||
## Running on SparkFun Edge
|
## Running on SparkFun Edge
|
||||||
|
|
||||||
The following instructions will help you build and deploy this sample on the
|
The following instructions will help you build and deploy this sample on the
|
||||||
@ -381,14 +490,14 @@ Enter the following command to set up some dummy cryptographic keys we can use
|
|||||||
for development:
|
for development:
|
||||||
|
|
||||||
```
|
```
|
||||||
cp tensorflow/lite/micro/tools/make/downloads/AmbiqSuite-Rel2.2.0/tools/apollo3_scripts/keys_info0.py \
|
cp tensorflow/lite/micro/tools/make/downloads/AmbiqSuite-Rel2.0.0/tools/apollo3_scripts/keys_info0.py \
|
||||||
tensorflow/lite/micro/tools/make/downloads/AmbiqSuite-Rel2.2.0/tools/apollo3_scripts/keys_info.py
|
tensorflow/lite/micro/tools/make/downloads/AmbiqSuite-Rel2.0.0/tools/apollo3_scripts/keys_info.py
|
||||||
```
|
```
|
||||||
|
|
||||||
Next, run the following command to create a signed binary:
|
Next, run the following command to create a signed binary:
|
||||||
|
|
||||||
```
|
```
|
||||||
python3 tensorflow/lite/micro/tools/make/downloads/AmbiqSuite-Rel2.2.0/tools/apollo3_scripts/create_cust_image_blob.py \
|
python3 tensorflow/lite/micro/tools/make/downloads/AmbiqSuite-Rel2.0.0/tools/apollo3_scripts/create_cust_image_blob.py \
|
||||||
--bin tensorflow/lite/micro/tools/make/gen/sparkfun_edge_cortex-m4/bin/person_detection.bin \
|
--bin tensorflow/lite/micro/tools/make/gen/sparkfun_edge_cortex-m4/bin/person_detection.bin \
|
||||||
--load-address 0xC000 \
|
--load-address 0xC000 \
|
||||||
--magic-num 0xCB \
|
--magic-num 0xCB \
|
||||||
@ -401,7 +510,7 @@ command to create a final version of the file that can be used to flash our
|
|||||||
device with the bootloader script we will use in the next step:
|
device with the bootloader script we will use in the next step:
|
||||||
|
|
||||||
```
|
```
|
||||||
python3 tensorflow/lite/micro/tools/make/downloads/AmbiqSuite-Rel2.2.0/tools/apollo3_scripts/create_cust_wireupdate_blob.py \
|
python3 tensorflow/lite/micro/tools/make/downloads/AmbiqSuite-Rel2.0.0/tools/apollo3_scripts/create_cust_wireupdate_blob.py \
|
||||||
--load-address 0x20000 \
|
--load-address 0x20000 \
|
||||||
--bin main_nonsecure_ota.bin \
|
--bin main_nonsecure_ota.bin \
|
||||||
-i 6 \
|
-i 6 \
|
||||||
@ -437,7 +546,7 @@ hit the button marked `RST`. Continue holding the button marked `14` while
|
|||||||
running the following command:
|
running the following command:
|
||||||
|
|
||||||
```
|
```
|
||||||
python3 tensorflow/lite/micro/tools/make/downloads/AmbiqSuite-Rel2.2.0/tools/apollo3_scripts/uart_wired_update.py \
|
python3 tensorflow/lite/micro/tools/make/downloads/AmbiqSuite-Rel2.0.0/tools/apollo3_scripts/uart_wired_update.py \
|
||||||
-b ${BAUD_RATE} ${DEVICENAME} \
|
-b ${BAUD_RATE} ${DEVICENAME} \
|
||||||
-r 1 \
|
-r 1 \
|
||||||
-f main_nonsecure_wire.bin \
|
-f main_nonsecure_wire.bin \
|
||||||
|
@ -147,12 +147,10 @@ TfLiteStatus InitCamera(tflite::ErrorReporter* error_reporter) {
|
|||||||
|
|
||||||
hm01b0_power_up(&s_HM01B0Cfg);
|
hm01b0_power_up(&s_HM01B0Cfg);
|
||||||
|
|
||||||
// TODO(njeff): check the delay time to just fit the spec.
|
|
||||||
am_util_delay_ms(1);
|
am_util_delay_ms(1);
|
||||||
|
|
||||||
hm01b0_mclk_enable(&s_HM01B0Cfg);
|
hm01b0_mclk_enable(&s_HM01B0Cfg);
|
||||||
|
|
||||||
// TODO(njeff): check the delay time to just fit the spec.
|
|
||||||
am_util_delay_ms(1);
|
am_util_delay_ms(1);
|
||||||
|
|
||||||
hm01b0_init_if(&s_HM01B0Cfg);
|
hm01b0_init_if(&s_HM01B0Cfg);
|
||||||
|
@ -1,24 +1,43 @@
|
|||||||
ifeq ($(TARGET), arc_emsdp)
|
ifeq ($(TARGET), arc_emsdp)
|
||||||
|
|
||||||
# Patch of arc make project to adjust it specifically
|
#Patch of arc make project to adjust it specifically
|
||||||
# for person detection example. In particular:
|
#for experimental person detection example.In particular:
|
||||||
# - Use Linker command file with better usage of fast memory
|
# - Use Linker command file with better usage of fast memory
|
||||||
# - In case project was generated with MLI usage, reduce scratch buffers.
|
#- Stripout TFLM reference code by default.
|
||||||
|
#- Optional : replace mli switchers with specialized kernels
|
||||||
|
#for smaller code size
|
||||||
|
|
||||||
person_detection_HDRS += \
|
person_detection_HDRS += \
|
||||||
person_detection_patch.txt
|
person_detection_int8_patch.txt
|
||||||
|
|
||||||
person_detection_TEST_HDRS += \
|
person_detection_TEST_HDRS += \
|
||||||
person_detection_patch.txt
|
person_detection_int8_patch.txt
|
||||||
|
|
||||||
|
ARC_MLI_BACKEND_PATH = /tensorflow/lite/micro/kernels/arc_mli
|
||||||
|
|
||||||
%/person_detection_patch.txt: %/emsdp.lcf %/Makefile
|
#Apply changes in generated project files.
|
||||||
@cp tensorflow/lite/micro/tools/make/targets/arc/emsdp/emsdp_v2.lcf $<
|
#See related comment echoed(@echo <comment>) after each change
|
||||||
@echo emsdp.lcf > $@
|
#to get understanding on it's purpose.
|
||||||
@sed -E -i 's#MLI_ONLY *\?= *false#MLI_ONLY \?= false\n\
|
%/person_detection_int8_patch.txt: %/emsdp.lcf %/Makefile %$(ARC_MLI_BACKEND_PATH)/conv.cc %$(ARC_MLI_BACKEND_PATH)/depthwise_conv.cc %$(ARC_MLI_BACKEND_PATH)/pooling.cc
|
||||||
CXXFLAGS += -DSCRATCH_MEM_X_SIZE=0 -DSCRATCH_MEM_Y_SIZE=0 -DSCRATCH_MEM_Z_SIZE=0\
|
@cp tensorflow/lite/micro/examples/person_detection/arc_emsdp/emsdp.lcf $<
|
||||||
CCFLAGS += -DSCRATCH_MEM_X_SIZE=0 -DSCRATCH_MEM_Y_SIZE=0 -DSCRATCH_MEM_Z_SIZE=0#'\
|
@echo emsdp.lcf: Replace with example specific memory map > $@
|
||||||
$(word 2, $^)
|
|
||||||
@echo Makefile >> $@
|
@sed -E -i 's#MLI_ONLY *\?= *false#MLI_ONLY \?= true#' $(word 2, $^)
|
||||||
|
@echo Makefile: No Reference fallback for MLI supported functions >> $@
|
||||||
|
|
||||||
|
ifneq ($(filter $(ALL_TAGS), reduce_codesize),)
|
||||||
|
#In case 'reduce_codesize' tag is present, we replace common MLI functions with
|
||||||
|
#specializations appropriate for this particular graph.But such changes of code
|
||||||
|
#with high probability may not be acceptable for other graphs and will need
|
||||||
|
#to be adjusted by the user
|
||||||
|
|
||||||
|
@sed -E -i 's#mli_krn_conv2d_nhwc_sa8_sa8_sa32#mli_krn_conv2d_nhwc_sa8_sa8_sa32_k1x1_nopad#' $(word 3, $^)
|
||||||
|
@sed -E -i 's#mli_krn_depthwise_conv2d_hwcn_sa8_sa8_sa32#mli_krn_depthwise_conv2d_hwcn_sa8_sa8_sa32_k3x3_krnpad#' $(word 4, $^)
|
||||||
|
@sed -E -i 's#mli_krn_avepool_hwc_sa8#mli_krn_avepool_hwc_sa8_k3x3_nopad#' $(word 5, $^)
|
||||||
|
@sed -E -i 's#mli_krn_maxpool_hwc_sa8\(in_ptr, \&cfg, out_ptr\);#return kTfLiteError;#' $(word 5, $^)
|
||||||
|
@echo $(word 3, $^): Use specialization >> $@
|
||||||
|
@echo $(word 4, $^): Use specialization >> $@
|
||||||
|
@echo $(word 5, $^): Use specialization and remove max pooling >> $@
|
||||||
|
endif
|
||||||
|
|
||||||
endif
|
endif
|
||||||
|
@ -25,7 +25,7 @@ limitations under the License.
|
|||||||
|
|
||||||
// Flash the blue LED after each inference
|
// Flash the blue LED after each inference
|
||||||
void RespondToDetection(tflite::ErrorReporter* error_reporter,
|
void RespondToDetection(tflite::ErrorReporter* error_reporter,
|
||||||
uint8_t person_score, uint8_t no_person_score) {
|
int8_t person_score, int8_t no_person_score) {
|
||||||
static bool is_initialized = false;
|
static bool is_initialized = false;
|
||||||
if (!is_initialized) {
|
if (!is_initialized) {
|
||||||
// Pins for the built-in RGB LEDs on the Arduino Nano 33 BLE Sense
|
// Pins for the built-in RGB LEDs on the Arduino Nano 33 BLE Sense
|
||||||
|
@ -150,7 +150,7 @@ TfLiteStatus ReadData(tflite::ErrorReporter* error_reporter) {
|
|||||||
// Decode the JPEG image, crop it, and convert it to greyscale
|
// Decode the JPEG image, crop it, and convert it to greyscale
|
||||||
TfLiteStatus DecodeAndProcessImage(tflite::ErrorReporter* error_reporter,
|
TfLiteStatus DecodeAndProcessImage(tflite::ErrorReporter* error_reporter,
|
||||||
int image_width, int image_height,
|
int image_width, int image_height,
|
||||||
uint8_t* image_data) {
|
int8_t* image_data) {
|
||||||
TF_LITE_REPORT_ERROR(error_reporter,
|
TF_LITE_REPORT_ERROR(error_reporter,
|
||||||
"Decoding JPEG and converting to greyscale");
|
"Decoding JPEG and converting to greyscale");
|
||||||
// Parse the JPEG headers. The image will be decoded as a sequence of Minimum
|
// Parse the JPEG headers. The image will be decoded as a sequence of Minimum
|
||||||
@ -221,11 +221,14 @@ TfLiteStatus DecodeAndProcessImage(tflite::ErrorReporter* error_reporter,
|
|||||||
// See https://en.wikipedia.org/wiki/Grayscale for magic numbers
|
// See https://en.wikipedia.org/wiki/Grayscale for magic numbers
|
||||||
float gray_value = (0.2126 * r) + (0.7152 * g) + (0.0722 * b);
|
float gray_value = (0.2126 * r) + (0.7152 * g) + (0.0722 * b);
|
||||||
|
|
||||||
|
// Convert to signed 8-bit integer by subtracting 128.
|
||||||
|
gray_value -= 128;
|
||||||
|
|
||||||
// The x coordinate of this pixel in the output image
|
// The x coordinate of this pixel in the output image
|
||||||
int current_x = x_origin + mcu_col;
|
int current_x = x_origin + mcu_col;
|
||||||
// The index of this pixel in our flat output buffer
|
// The index of this pixel in our flat output buffer
|
||||||
int index = (current_y * image_width) + current_x;
|
int index = (current_y * image_width) + current_x;
|
||||||
image_data[index] = static_cast<uint8_t>(gray_value);
|
image_data[index] = static_cast<int8_t>(gray_value);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -235,7 +238,7 @@ TfLiteStatus DecodeAndProcessImage(tflite::ErrorReporter* error_reporter,
|
|||||||
|
|
||||||
// Get an image from the camera module
|
// Get an image from the camera module
|
||||||
TfLiteStatus GetImage(tflite::ErrorReporter* error_reporter, int image_width,
|
TfLiteStatus GetImage(tflite::ErrorReporter* error_reporter, int image_width,
|
||||||
int image_height, int channels, uint8_t* image_data) {
|
int image_height, int channels, int8_t* image_data) {
|
||||||
static bool g_is_camera_initialized = false;
|
static bool g_is_camera_initialized = false;
|
||||||
if (!g_is_camera_initialized) {
|
if (!g_is_camera_initialized) {
|
||||||
TfLiteStatus init_status = InitCamera(error_reporter);
|
TfLiteStatus init_status = InitCamera(error_reporter);
|
||||||
|
@ -19,7 +19,7 @@ limitations under the License.
|
|||||||
// console. Real applications will want to take some custom action instead, and
|
// console. Real applications will want to take some custom action instead, and
|
||||||
// should implement their own versions of this function.
|
// should implement their own versions of this function.
|
||||||
void RespondToDetection(tflite::ErrorReporter* error_reporter,
|
void RespondToDetection(tflite::ErrorReporter* error_reporter,
|
||||||
uint8_t person_score, uint8_t no_person_score) {
|
int8_t person_score, int8_t no_person_score) {
|
||||||
TF_LITE_REPORT_ERROR(error_reporter, "person score:%d no person score %d",
|
TF_LITE_REPORT_ERROR(error_reporter, "person score:%d no person score %d",
|
||||||
person_score, no_person_score);
|
person_score, no_person_score);
|
||||||
}
|
}
|
||||||
|
@ -29,6 +29,6 @@ limitations under the License.
|
|||||||
// image is considered to contain a person. This threshold may be adjusted for
|
// image is considered to contain a person. This threshold may be adjusted for
|
||||||
// particular applications.
|
// particular applications.
|
||||||
void RespondToDetection(tflite::ErrorReporter* error_reporter,
|
void RespondToDetection(tflite::ErrorReporter* error_reporter,
|
||||||
uint8_t person_score, uint8_t no_person_score);
|
int8_t person_score, int8_t no_person_score);
|
||||||
|
|
||||||
#endif // TENSORFLOW_LITE_MICRO_EXAMPLES_PERSON_DETECTION_DETECTION_RESPONDER_H_
|
#endif // TENSORFLOW_LITE_MICRO_EXAMPLES_PERSON_DETECTION_DETECTION_RESPONDER_H_
|
||||||
|
@ -25,8 +25,8 @@ TF_LITE_MICRO_TEST(TestCallability) {
|
|||||||
// This will have external side-effects (like printing to the debug console
|
// This will have external side-effects (like printing to the debug console
|
||||||
// or lighting an LED) that are hard to observe, so the most we can do is
|
// or lighting an LED) that are hard to observe, so the most we can do is
|
||||||
// make sure the call doesn't crash.
|
// make sure the call doesn't crash.
|
||||||
RespondToDetection(µ_error_reporter, 100, 200);
|
RespondToDetection(µ_error_reporter, -100, 100);
|
||||||
RespondToDetection(µ_error_reporter, 200, 100);
|
RespondToDetection(µ_error_reporter, 100, 50);
|
||||||
}
|
}
|
||||||
|
|
||||||
TF_LITE_MICRO_TESTS_END
|
TF_LITE_MICRO_TESTS_END
|
||||||
|
@ -45,8 +45,9 @@ static const int kStrideShift = 1;
|
|||||||
//! @return Error code.
|
//! @return Error code.
|
||||||
//
|
//
|
||||||
//*****************************************************************************
|
//*****************************************************************************
|
||||||
uint32_t hm01b0_blocking_read_oneframe_scaled(
|
uint32_t hm01b0_blocking_read_oneframe_scaled(hm01b0_cfg_t* psCfg,
|
||||||
hm01b0_cfg_t* psCfg, uint8_t* buffer, int w, int h, int channels) {
|
int8_t* buffer, int w, int h,
|
||||||
|
int channels) {
|
||||||
hm01b0_single_frame_capture(psCfg);
|
hm01b0_single_frame_capture(psCfg);
|
||||||
|
|
||||||
// Calculate the number of pixels to crop to get a centered image.
|
// Calculate the number of pixels to crop to get a centered image.
|
||||||
@ -76,7 +77,9 @@ uint32_t hm01b0_blocking_read_oneframe_scaled(
|
|||||||
if (output_x < w && output_y < h) {
|
if (output_x < w && output_y < h) {
|
||||||
const int output_idx = (output_y * w + output_x) * channels;
|
const int output_idx = (output_y * w + output_x) * channels;
|
||||||
for (int i=0; i<channels; i++) {
|
for (int i=0; i<channels; i++) {
|
||||||
buffer[output_idx + i] = value;
|
// See the top of main_functions.cc for an explanation of and
|
||||||
|
// rationale for our unsigned to signed input conversion.
|
||||||
|
buffer[output_idx + i] = value - 128;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -40,7 +40,7 @@ extern "C" {
|
|||||||
//
|
//
|
||||||
//*****************************************************************************
|
//*****************************************************************************
|
||||||
uint32_t hm01b0_blocking_read_oneframe_scaled(hm01b0_cfg_t* psCfg,
|
uint32_t hm01b0_blocking_read_oneframe_scaled(hm01b0_cfg_t* psCfg,
|
||||||
uint8_t* buffer, int w, int h,
|
int8_t* buffer, int w, int h,
|
||||||
int channels);
|
int channels);
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
|
@ -13,9 +13,15 @@ See the License for the specific language governing permissions and
|
|||||||
limitations under the License.
|
limitations under the License.
|
||||||
==============================================================================*/
|
==============================================================================*/
|
||||||
|
|
||||||
#include "tensorflow/lite/micro/examples/person_detection_experimental/detection_responder.h"
|
#if defined(ARDUINO)
|
||||||
|
#define ARDUINO_EXCLUDE_CODE
|
||||||
|
#endif // defined(ARDUINO)
|
||||||
|
|
||||||
#include "hx_drv_tflm.h"
|
#ifndef ARDUINO_EXCLUDE_CODE
|
||||||
|
|
||||||
|
#include "tensorflow/lite/micro/examples/person_detection/detection_responder.h"
|
||||||
|
|
||||||
|
#include "hx_drv_tflm.h" // NOLINT
|
||||||
|
|
||||||
// This dummy implementation writes person and no person scores to the error
|
// This dummy implementation writes person and no person scores to the error
|
||||||
// console. Real applications will want to take some custom action instead, and
|
// console. Real applications will want to take some custom action instead, and
|
||||||
@ -31,3 +37,5 @@ void RespondToDetection(tflite::ErrorReporter* error_reporter,
|
|||||||
TF_LITE_REPORT_ERROR(error_reporter, "person score:%d no person score %d",
|
TF_LITE_REPORT_ERROR(error_reporter, "person score:%d no person score %d",
|
||||||
person_score, no_person_score);
|
person_score, no_person_score);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#endif // ARDUINO_EXCLUDE_CODE
|
@ -13,10 +13,16 @@ See the License for the specific language governing permissions and
|
|||||||
limitations under the License.
|
limitations under the License.
|
||||||
==============================================================================*/
|
==============================================================================*/
|
||||||
|
|
||||||
#include "tensorflow/lite/micro/examples/person_detection_experimental/image_provider.h"
|
#if defined(ARDUINO)
|
||||||
|
#define ARDUINO_EXCLUDE_CODE
|
||||||
|
#endif // defined(ARDUINO)
|
||||||
|
|
||||||
#include "hx_drv_tflm.h"
|
#ifndef ARDUINO_EXCLUDE_CODE
|
||||||
#include "tensorflow/lite/micro/examples/person_detection_experimental/model_settings.h"
|
|
||||||
|
#include "tensorflow/lite/micro/examples/person_detection/image_provider.h"
|
||||||
|
|
||||||
|
#include "hx_drv_tflm.h" // NOLINT
|
||||||
|
#include "tensorflow/lite/micro/examples/person_detection/model_settings.h"
|
||||||
|
|
||||||
hx_drv_sensor_image_config_t g_pimg_config;
|
hx_drv_sensor_image_config_t g_pimg_config;
|
||||||
|
|
||||||
@ -39,3 +45,5 @@ TfLiteStatus GetImage(tflite::ErrorReporter* error_reporter, int image_width,
|
|||||||
|
|
||||||
return kTfLiteOk;
|
return kTfLiteOk;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#endif // ARDUINO_EXCLUDE_CODE
|
@ -18,7 +18,7 @@ limitations under the License.
|
|||||||
#include "tensorflow/lite/micro/examples/person_detection/model_settings.h"
|
#include "tensorflow/lite/micro/examples/person_detection/model_settings.h"
|
||||||
|
|
||||||
TfLiteStatus GetImage(tflite::ErrorReporter* error_reporter, int image_width,
|
TfLiteStatus GetImage(tflite::ErrorReporter* error_reporter, int image_width,
|
||||||
int image_height, int channels, uint8_t* image_data) {
|
int image_height, int channels, int8_t* image_data) {
|
||||||
for (int i = 0; i < image_width * image_height * channels; ++i) {
|
for (int i = 0; i < image_width * image_height * channels; ++i) {
|
||||||
image_data[i] = 0;
|
image_data[i] = 0;
|
||||||
}
|
}
|
||||||
|
@ -34,6 +34,6 @@ limitations under the License.
|
|||||||
// it just returns a static image. For real applications, you should
|
// it just returns a static image. For real applications, you should
|
||||||
// ensure there's a specialized implementation that accesses hardware APIs.
|
// ensure there's a specialized implementation that accesses hardware APIs.
|
||||||
TfLiteStatus GetImage(tflite::ErrorReporter* error_reporter, int image_width,
|
TfLiteStatus GetImage(tflite::ErrorReporter* error_reporter, int image_width,
|
||||||
int image_height, int channels, uint8_t* image_data);
|
int image_height, int channels, int8_t* image_data);
|
||||||
|
|
||||||
#endif // TENSORFLOW_LITE_MICRO_EXAMPLES_PERSON_DETECTION_IMAGE_PROVIDER_H_
|
#endif // TENSORFLOW_LITE_MICRO_EXAMPLES_PERSON_DETECTION_IMAGE_PROVIDER_H_
|
||||||
|
@ -27,7 +27,7 @@ TF_LITE_MICRO_TESTS_BEGIN
|
|||||||
TF_LITE_MICRO_TEST(TestImageProvider) {
|
TF_LITE_MICRO_TEST(TestImageProvider) {
|
||||||
tflite::MicroErrorReporter micro_error_reporter;
|
tflite::MicroErrorReporter micro_error_reporter;
|
||||||
|
|
||||||
uint8_t image_data[kMaxImageSize];
|
int8_t image_data[kMaxImageSize];
|
||||||
TfLiteStatus get_status = GetImage(µ_error_reporter, kNumCols, kNumRows,
|
TfLiteStatus get_status = GetImage(µ_error_reporter, kNumCols, kNumRows,
|
||||||
kNumChannels, image_data);
|
kNumChannels, image_data);
|
||||||
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, get_status);
|
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, get_status);
|
||||||
|
@ -32,8 +32,15 @@ const tflite::Model* model = nullptr;
|
|||||||
tflite::MicroInterpreter* interpreter = nullptr;
|
tflite::MicroInterpreter* interpreter = nullptr;
|
||||||
TfLiteTensor* input = nullptr;
|
TfLiteTensor* input = nullptr;
|
||||||
|
|
||||||
|
// In order to use optimized tensorflow lite kernels, a signed int8_t quantized
|
||||||
|
// model is preferred over the legacy unsigned model format. This means that
|
||||||
|
// throughout this project, input images must be converted from unisgned to
|
||||||
|
// signed format. The easiest and quickest way to convert from unsigned to
|
||||||
|
// signed 8-bit integers is to subtract 128 from the unsigned value to get a
|
||||||
|
// signed value.
|
||||||
|
|
||||||
// An area of memory to use for input, output, and intermediate arrays.
|
// An area of memory to use for input, output, and intermediate arrays.
|
||||||
constexpr int kTensorArenaSize = 93 * 1024;
|
constexpr int kTensorArenaSize = 136 * 1024;
|
||||||
static uint8_t tensor_arena[kTensorArenaSize];
|
static uint8_t tensor_arena[kTensorArenaSize];
|
||||||
} // namespace
|
} // namespace
|
||||||
|
|
||||||
@ -64,12 +71,15 @@ void setup() {
|
|||||||
//
|
//
|
||||||
// tflite::AllOpsResolver resolver;
|
// tflite::AllOpsResolver resolver;
|
||||||
// NOLINTNEXTLINE(runtime-global-variables)
|
// NOLINTNEXTLINE(runtime-global-variables)
|
||||||
static tflite::MicroMutableOpResolver<3> micro_op_resolver;
|
static tflite::MicroMutableOpResolver<5> micro_op_resolver;
|
||||||
micro_op_resolver.AddAveragePool2D();
|
micro_op_resolver.AddAveragePool2D();
|
||||||
micro_op_resolver.AddConv2D();
|
micro_op_resolver.AddConv2D();
|
||||||
micro_op_resolver.AddDepthwiseConv2D();
|
micro_op_resolver.AddDepthwiseConv2D();
|
||||||
|
micro_op_resolver.AddReshape();
|
||||||
|
micro_op_resolver.AddSoftmax();
|
||||||
|
|
||||||
// Build an interpreter to run the model with.
|
// Build an interpreter to run the model with.
|
||||||
|
// NOLINTNEXTLINE(runtime-global-variables)
|
||||||
static tflite::MicroInterpreter static_interpreter(
|
static tflite::MicroInterpreter static_interpreter(
|
||||||
model, micro_op_resolver, tensor_arena, kTensorArenaSize, error_reporter);
|
model, micro_op_resolver, tensor_arena, kTensorArenaSize, error_reporter);
|
||||||
interpreter = &static_interpreter;
|
interpreter = &static_interpreter;
|
||||||
@ -89,7 +99,7 @@ void setup() {
|
|||||||
void loop() {
|
void loop() {
|
||||||
// Get image from provider.
|
// Get image from provider.
|
||||||
if (kTfLiteOk != GetImage(error_reporter, kNumCols, kNumRows, kNumChannels,
|
if (kTfLiteOk != GetImage(error_reporter, kNumCols, kNumRows, kNumChannels,
|
||||||
input->data.uint8)) {
|
input->data.int8)) {
|
||||||
TF_LITE_REPORT_ERROR(error_reporter, "Image capture failed.");
|
TF_LITE_REPORT_ERROR(error_reporter, "Image capture failed.");
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -101,7 +111,7 @@ void loop() {
|
|||||||
TfLiteTensor* output = interpreter->output(0);
|
TfLiteTensor* output = interpreter->output(0);
|
||||||
|
|
||||||
// Process the inference results.
|
// Process the inference results.
|
||||||
uint8_t person_score = output->data.uint8[kPersonIndex];
|
int8_t person_score = output->data.uint8[kPersonIndex];
|
||||||
uint8_t no_person_score = output->data.uint8[kNotAPersonIndex];
|
int8_t no_person_score = output->data.uint8[kNotAPersonIndex];
|
||||||
RespondToDetection(error_reporter, person_score, no_person_score);
|
RespondToDetection(error_reporter, person_score, no_person_score);
|
||||||
}
|
}
|
||||||
|
@ -16,7 +16,6 @@ limitations under the License.
|
|||||||
#include "tensorflow/lite/micro/examples/person_detection/model_settings.h"
|
#include "tensorflow/lite/micro/examples/person_detection/model_settings.h"
|
||||||
|
|
||||||
const char* kCategoryLabels[kCategoryCount] = {
|
const char* kCategoryLabels[kCategoryCount] = {
|
||||||
"unused",
|
|
||||||
"person",
|
|
||||||
"notperson",
|
"notperson",
|
||||||
|
"person",
|
||||||
};
|
};
|
||||||
|
@ -27,9 +27,9 @@ constexpr int kNumChannels = 1;
|
|||||||
|
|
||||||
constexpr int kMaxImageSize = kNumCols * kNumRows * kNumChannels;
|
constexpr int kMaxImageSize = kNumCols * kNumRows * kNumChannels;
|
||||||
|
|
||||||
constexpr int kCategoryCount = 3;
|
constexpr int kCategoryCount = 2;
|
||||||
constexpr int kPersonIndex = 1;
|
constexpr int kPersonIndex = 1;
|
||||||
constexpr int kNotAPersonIndex = 2;
|
constexpr int kNotAPersonIndex = 0;
|
||||||
extern const char* kCategoryLabels[kCategoryCount];
|
extern const char* kCategoryLabels[kCategoryCount];
|
||||||
|
|
||||||
#endif // TENSORFLOW_LITE_MICRO_EXAMPLES_PERSON_DETECTION_MODEL_SETTINGS_H_
|
#endif // TENSORFLOW_LITE_MICRO_EXAMPLES_PERSON_DETECTION_MODEL_SETTINGS_H_
|
||||||
|
@ -26,8 +26,7 @@ limitations under the License.
|
|||||||
#include "tensorflow/lite/version.h"
|
#include "tensorflow/lite/version.h"
|
||||||
|
|
||||||
// Create an area of memory to use for input, output, and intermediate arrays.
|
// Create an area of memory to use for input, output, and intermediate arrays.
|
||||||
constexpr int tensor_arena_size = 93 * 1024;
|
constexpr int tensor_arena_size = 136 * 1024;
|
||||||
__attribute__((section(".bss.NoInit"), aligned(16)))
|
|
||||||
uint8_t tensor_arena[tensor_arena_size];
|
uint8_t tensor_arena[tensor_arena_size];
|
||||||
|
|
||||||
TF_LITE_MICRO_TESTS_BEGIN
|
TF_LITE_MICRO_TESTS_BEGIN
|
||||||
@ -51,12 +50,12 @@ TF_LITE_MICRO_TEST(TestInvoke) {
|
|||||||
// An easier approach is to just use the AllOpsResolver, but this will
|
// An easier approach is to just use the AllOpsResolver, but this will
|
||||||
// incur some penalty in code space for op implementations that are not
|
// incur some penalty in code space for op implementations that are not
|
||||||
// needed by this graph.
|
// needed by this graph.
|
||||||
//
|
tflite::MicroMutableOpResolver<5> micro_op_resolver;
|
||||||
// tflite::AllOpsResolver resolver;
|
|
||||||
tflite::MicroMutableOpResolver<3> micro_op_resolver;
|
|
||||||
micro_op_resolver.AddAveragePool2D();
|
micro_op_resolver.AddAveragePool2D();
|
||||||
micro_op_resolver.AddConv2D();
|
micro_op_resolver.AddConv2D();
|
||||||
micro_op_resolver.AddDepthwiseConv2D();
|
micro_op_resolver.AddDepthwiseConv2D();
|
||||||
|
micro_op_resolver.AddReshape();
|
||||||
|
micro_op_resolver.AddSoftmax();
|
||||||
|
|
||||||
// Build an interpreter to run the model with.
|
// Build an interpreter to run the model with.
|
||||||
tflite::MicroInterpreter interpreter(model, micro_op_resolver, tensor_arena,
|
tflite::MicroInterpreter interpreter(model, micro_op_resolver, tensor_arena,
|
||||||
@ -74,13 +73,11 @@ TF_LITE_MICRO_TEST(TestInvoke) {
|
|||||||
TF_LITE_MICRO_EXPECT_EQ(kNumRows, input->dims->data[1]);
|
TF_LITE_MICRO_EXPECT_EQ(kNumRows, input->dims->data[1]);
|
||||||
TF_LITE_MICRO_EXPECT_EQ(kNumCols, input->dims->data[2]);
|
TF_LITE_MICRO_EXPECT_EQ(kNumCols, input->dims->data[2]);
|
||||||
TF_LITE_MICRO_EXPECT_EQ(kNumChannels, input->dims->data[3]);
|
TF_LITE_MICRO_EXPECT_EQ(kNumChannels, input->dims->data[3]);
|
||||||
TF_LITE_MICRO_EXPECT_EQ(kTfLiteUInt8, input->type);
|
TF_LITE_MICRO_EXPECT_EQ(kTfLiteInt8, input->type);
|
||||||
|
|
||||||
// Copy an image with a person into the memory area used for the input.
|
// Copy an image with a person into the memory area used for the input.
|
||||||
const uint8_t* person_data = g_person_data;
|
TFLITE_DCHECK_EQ(input->bytes, static_cast<size_t>(g_person_data_size));
|
||||||
for (size_t i = 0; i < input->bytes; ++i) {
|
memcpy(input->data.int8, g_person_data, input->bytes);
|
||||||
input->data.uint8[i] = person_data[i];
|
|
||||||
}
|
|
||||||
|
|
||||||
// Run the model on this input and make sure it succeeds.
|
// Run the model on this input and make sure it succeeds.
|
||||||
TfLiteStatus invoke_status = interpreter.Invoke();
|
TfLiteStatus invoke_status = interpreter.Invoke();
|
||||||
@ -92,26 +89,21 @@ TF_LITE_MICRO_TEST(TestInvoke) {
|
|||||||
// Get the output from the model, and make sure it's the expected size and
|
// Get the output from the model, and make sure it's the expected size and
|
||||||
// type.
|
// type.
|
||||||
TfLiteTensor* output = interpreter.output(0);
|
TfLiteTensor* output = interpreter.output(0);
|
||||||
TF_LITE_MICRO_EXPECT_EQ(4, output->dims->size);
|
TF_LITE_MICRO_EXPECT_EQ(2, output->dims->size);
|
||||||
TF_LITE_MICRO_EXPECT_EQ(1, output->dims->data[0]);
|
TF_LITE_MICRO_EXPECT_EQ(1, output->dims->data[0]);
|
||||||
TF_LITE_MICRO_EXPECT_EQ(1, output->dims->data[1]);
|
TF_LITE_MICRO_EXPECT_EQ(kCategoryCount, output->dims->data[1]);
|
||||||
TF_LITE_MICRO_EXPECT_EQ(1, output->dims->data[2]);
|
TF_LITE_MICRO_EXPECT_EQ(kTfLiteInt8, output->type);
|
||||||
TF_LITE_MICRO_EXPECT_EQ(kCategoryCount, output->dims->data[3]);
|
|
||||||
TF_LITE_MICRO_EXPECT_EQ(kTfLiteUInt8, output->type);
|
|
||||||
|
|
||||||
// Make sure that the expected "Person" score is higher than the other class.
|
// Make sure that the expected "Person" score is higher than the other class.
|
||||||
uint8_t person_score = output->data.uint8[kPersonIndex];
|
int8_t person_score = output->data.int8[kPersonIndex];
|
||||||
uint8_t no_person_score = output->data.uint8[kNotAPersonIndex];
|
int8_t no_person_score = output->data.int8[kNotAPersonIndex];
|
||||||
TF_LITE_REPORT_ERROR(µ_error_reporter,
|
TF_LITE_REPORT_ERROR(µ_error_reporter,
|
||||||
"person data. person score: %d, no person score: %d\n",
|
"person data. person score: %d, no person score: %d\n",
|
||||||
person_score, no_person_score);
|
person_score, no_person_score);
|
||||||
TF_LITE_MICRO_EXPECT_GT(person_score, no_person_score);
|
TF_LITE_MICRO_EXPECT_GT(person_score, no_person_score);
|
||||||
|
|
||||||
// Now test with a different input, from an image without a person.
|
// TODO(b/161461076): Update model to make this work on real negative inputs.
|
||||||
const uint8_t* no_person_data = g_no_person_data;
|
memset(input->data.int8, 0, input->bytes);
|
||||||
for (size_t i = 0; i < input->bytes; ++i) {
|
|
||||||
input->data.uint8[i] = no_person_data[i];
|
|
||||||
}
|
|
||||||
|
|
||||||
// Run the model on this "No Person" input.
|
// Run the model on this "No Person" input.
|
||||||
invoke_status = interpreter.Invoke();
|
invoke_status = interpreter.Invoke();
|
||||||
@ -123,16 +115,14 @@ TF_LITE_MICRO_TEST(TestInvoke) {
|
|||||||
// Get the output from the model, and make sure it's the expected size and
|
// Get the output from the model, and make sure it's the expected size and
|
||||||
// type.
|
// type.
|
||||||
output = interpreter.output(0);
|
output = interpreter.output(0);
|
||||||
TF_LITE_MICRO_EXPECT_EQ(4, output->dims->size);
|
TF_LITE_MICRO_EXPECT_EQ(2, output->dims->size);
|
||||||
TF_LITE_MICRO_EXPECT_EQ(1, output->dims->data[0]);
|
TF_LITE_MICRO_EXPECT_EQ(1, output->dims->data[0]);
|
||||||
TF_LITE_MICRO_EXPECT_EQ(1, output->dims->data[1]);
|
TF_LITE_MICRO_EXPECT_EQ(kCategoryCount, output->dims->data[1]);
|
||||||
TF_LITE_MICRO_EXPECT_EQ(1, output->dims->data[2]);
|
TF_LITE_MICRO_EXPECT_EQ(kTfLiteInt8, output->type);
|
||||||
TF_LITE_MICRO_EXPECT_EQ(kCategoryCount, output->dims->data[3]);
|
|
||||||
TF_LITE_MICRO_EXPECT_EQ(kTfLiteUInt8, output->type);
|
|
||||||
|
|
||||||
// Make sure that the expected "No Person" score is higher.
|
// Make sure that the expected "No Person" score is higher.
|
||||||
person_score = output->data.uint8[kPersonIndex];
|
person_score = output->data.int8[kPersonIndex];
|
||||||
no_person_score = output->data.uint8[kNotAPersonIndex];
|
no_person_score = output->data.int8[kNotAPersonIndex];
|
||||||
TF_LITE_REPORT_ERROR(
|
TF_LITE_REPORT_ERROR(
|
||||||
µ_error_reporter,
|
µ_error_reporter,
|
||||||
"no person data. person score: %d, no person score: %d\n", person_score,
|
"no person data. person score: %d, no person score: %d\n", person_score,
|
||||||
|
@ -26,7 +26,7 @@ limitations under the License.
|
|||||||
// This implementation will light up LEDs on the board in response to the
|
// This implementation will light up LEDs on the board in response to the
|
||||||
// inference results.
|
// inference results.
|
||||||
void RespondToDetection(tflite::ErrorReporter* error_reporter,
|
void RespondToDetection(tflite::ErrorReporter* error_reporter,
|
||||||
uint8_t person_score, uint8_t no_person_score) {
|
int8_t person_score, int8_t no_person_score) {
|
||||||
static bool is_initialized = false;
|
static bool is_initialized = false;
|
||||||
if (!is_initialized) {
|
if (!is_initialized) {
|
||||||
// Setup LED's as outputs. Leave red LED alone since that's an error
|
// Setup LED's as outputs. Leave red LED alone since that's an error
|
||||||
|
@ -190,7 +190,7 @@ TfLiteStatus InitCamera(tflite::ErrorReporter* error_reporter) {
|
|||||||
// Capture single frame. Frame pointer passed in to reduce memory usage. This
|
// Capture single frame. Frame pointer passed in to reduce memory usage. This
|
||||||
// allows the input tensor to be used instead of requiring an extra copy.
|
// allows the input tensor to be used instead of requiring an extra copy.
|
||||||
TfLiteStatus GetImage(tflite::ErrorReporter* error_reporter, int frame_width,
|
TfLiteStatus GetImage(tflite::ErrorReporter* error_reporter, int frame_width,
|
||||||
int frame_height, int channels, uint8_t* frame) {
|
int frame_height, int channels, int8_t* frame) {
|
||||||
if (!g_is_camera_initialized) {
|
if (!g_is_camera_initialized) {
|
||||||
TfLiteStatus init_status = InitCamera(error_reporter);
|
TfLiteStatus init_status = InitCamera(error_reporter);
|
||||||
if (init_status != kTfLiteOk) {
|
if (init_status != kTfLiteOk) {
|
||||||
|
@ -372,6 +372,9 @@ tf.lite.TFLiteConverter.from_frozen_graph('vww_96_grayscale_frozen.pb',
|
|||||||
['input'], ['MobilenetV1/Predictions/Reshape_1'])
|
['input'], ['MobilenetV1/Predictions/Reshape_1'])
|
||||||
converter.optimizations = [tf.lite.Optimize.DEFAULT]
|
converter.optimizations = [tf.lite.Optimize.DEFAULT]
|
||||||
converter.representative_dataset = representative_dataset_gen
|
converter.representative_dataset = representative_dataset_gen
|
||||||
|
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
|
||||||
|
converter.inference_input_type = tf.int8
|
||||||
|
converter.inference_output_type = tf.int8
|
||||||
|
|
||||||
tflite_quant_model = converter.convert()
|
tflite_quant_model = converter.convert()
|
||||||
open("vww_96_grayscale_quantized.tflite", "wb").write(tflite_quant_model)
|
open("vww_96_grayscale_quantized.tflite", "wb").write(tflite_quant_model)
|
||||||
|
@ -1,138 +0,0 @@
|
|||||||
# Description:
|
|
||||||
# TensorFlow Lite for Microcontrollers Vision Example.
|
|
||||||
|
|
||||||
load(
|
|
||||||
"//tensorflow/lite/micro/testing:micro_test.bzl",
|
|
||||||
"tflite_micro_cc_test",
|
|
||||||
)
|
|
||||||
|
|
||||||
package(default_visibility = ["//visibility:public"])
|
|
||||||
|
|
||||||
licenses(["notice"]) # Apache 2.0
|
|
||||||
|
|
||||||
cc_library(
|
|
||||||
name = "model_settings",
|
|
||||||
srcs = [
|
|
||||||
"model_settings.cc",
|
|
||||||
],
|
|
||||||
hdrs = [
|
|
||||||
"model_settings.h",
|
|
||||||
],
|
|
||||||
)
|
|
||||||
|
|
||||||
cc_library(
|
|
||||||
name = "person_detect_model_data",
|
|
||||||
srcs = [
|
|
||||||
"person_detect_model_data.cc",
|
|
||||||
],
|
|
||||||
hdrs = [
|
|
||||||
"person_detect_model_data.h",
|
|
||||||
],
|
|
||||||
)
|
|
||||||
|
|
||||||
cc_library(
|
|
||||||
name = "simple_images_test_data",
|
|
||||||
srcs = [
|
|
||||||
"no_person_image_data.cc",
|
|
||||||
"person_image_data.cc",
|
|
||||||
],
|
|
||||||
hdrs = [
|
|
||||||
"no_person_image_data.h",
|
|
||||||
"person_image_data.h",
|
|
||||||
],
|
|
||||||
deps = [
|
|
||||||
":model_settings",
|
|
||||||
],
|
|
||||||
)
|
|
||||||
|
|
||||||
tflite_micro_cc_test(
|
|
||||||
name = "person_detection_test",
|
|
||||||
srcs = ["person_detection_test.cc"],
|
|
||||||
deps = [
|
|
||||||
":model_settings",
|
|
||||||
":person_detect_model_data",
|
|
||||||
":simple_images_test_data",
|
|
||||||
"//tensorflow/lite:schema_fbs_version",
|
|
||||||
"//tensorflow/lite/c:common",
|
|
||||||
"//tensorflow/lite/micro:micro_error_reporter",
|
|
||||||
"//tensorflow/lite/micro:micro_framework",
|
|
||||||
"//tensorflow/lite/micro:op_resolvers",
|
|
||||||
"//tensorflow/lite/micro/testing:micro_test",
|
|
||||||
"//tensorflow/lite/schema:schema_fbs",
|
|
||||||
],
|
|
||||||
)
|
|
||||||
|
|
||||||
cc_library(
|
|
||||||
name = "image_provider",
|
|
||||||
srcs = [
|
|
||||||
"image_provider.cc",
|
|
||||||
],
|
|
||||||
hdrs = [
|
|
||||||
"image_provider.h",
|
|
||||||
],
|
|
||||||
deps = [
|
|
||||||
":model_settings",
|
|
||||||
"//tensorflow/lite/c:common",
|
|
||||||
"//tensorflow/lite/micro:micro_error_reporter",
|
|
||||||
],
|
|
||||||
)
|
|
||||||
|
|
||||||
tflite_micro_cc_test(
|
|
||||||
name = "image_provider_test",
|
|
||||||
srcs = [
|
|
||||||
"image_provider_test.cc",
|
|
||||||
],
|
|
||||||
deps = [
|
|
||||||
":image_provider",
|
|
||||||
":model_settings",
|
|
||||||
"//tensorflow/lite/c:common",
|
|
||||||
"//tensorflow/lite/micro:micro_error_reporter",
|
|
||||||
"//tensorflow/lite/micro:micro_framework",
|
|
||||||
"//tensorflow/lite/micro/testing:micro_test",
|
|
||||||
],
|
|
||||||
)
|
|
||||||
|
|
||||||
cc_library(
|
|
||||||
name = "detection_responder",
|
|
||||||
srcs = [
|
|
||||||
"detection_responder.cc",
|
|
||||||
],
|
|
||||||
hdrs = [
|
|
||||||
"detection_responder.h",
|
|
||||||
],
|
|
||||||
deps = [
|
|
||||||
"//tensorflow/lite/c:common",
|
|
||||||
"//tensorflow/lite/micro:micro_error_reporter",
|
|
||||||
],
|
|
||||||
)
|
|
||||||
|
|
||||||
tflite_micro_cc_test(
|
|
||||||
name = "detection_responder_test",
|
|
||||||
srcs = [
|
|
||||||
"detection_responder_test.cc",
|
|
||||||
],
|
|
||||||
deps = [
|
|
||||||
":detection_responder",
|
|
||||||
"//tensorflow/lite/micro/testing:micro_test",
|
|
||||||
],
|
|
||||||
)
|
|
||||||
|
|
||||||
cc_binary(
|
|
||||||
name = "person_detection",
|
|
||||||
srcs = [
|
|
||||||
"main.cc",
|
|
||||||
"main_functions.cc",
|
|
||||||
"main_functions.h",
|
|
||||||
],
|
|
||||||
deps = [
|
|
||||||
":detection_responder",
|
|
||||||
":image_provider",
|
|
||||||
":model_settings",
|
|
||||||
":person_detect_model_data",
|
|
||||||
"//tensorflow/lite:schema_fbs_version",
|
|
||||||
"//tensorflow/lite/micro:micro_error_reporter",
|
|
||||||
"//tensorflow/lite/micro:micro_framework",
|
|
||||||
"//tensorflow/lite/micro:op_resolvers",
|
|
||||||
"//tensorflow/lite/schema:schema_fbs",
|
|
||||||
],
|
|
||||||
)
|
|
@ -1,83 +0,0 @@
|
|||||||
$(eval $(call add_third_party_download,$(PERSON_MODEL_INT8_URL),$(PERSON_MODEL_INT8_MD5),person_model_int8,))
|
|
||||||
|
|
||||||
person_detection_MODEL_SRCS := \
|
|
||||||
tensorflow/lite/micro/examples/person_detection_experimental/model_settings.cc \
|
|
||||||
$(MAKEFILE_DIR)/downloads/person_model_int8/person_detect_model_data.cc
|
|
||||||
|
|
||||||
person_detection_MODEL_HDRS := \
|
|
||||||
tensorflow/lite/micro/examples/person_detection_experimental/model_settings.h \
|
|
||||||
tensorflow/lite/micro/examples/person_detection_experimental/person_detect_model_data.h
|
|
||||||
|
|
||||||
person_detection_TEST_SRCS := \
|
|
||||||
tensorflow/lite/micro/examples/person_detection_experimental/person_detection_test.cc \
|
|
||||||
$(MAKEFILE_DIR)/downloads/person_model_int8/no_person_image_data.cc \
|
|
||||||
$(MAKEFILE_DIR)/downloads/person_model_int8/person_image_data.cc \
|
|
||||||
$(person_detection_MODEL_SRCS)
|
|
||||||
|
|
||||||
person_detection_TEST_HDRS := \
|
|
||||||
tensorflow/lite/micro/examples/person_detection_experimental/no_person_image_data.h \
|
|
||||||
tensorflow/lite/micro/examples/person_detection_experimental/person_image_data.h \
|
|
||||||
$(person_detection_MODEL_HDRS)
|
|
||||||
|
|
||||||
IMAGE_PROVIDER_TEST_SRCS := \
|
|
||||||
tensorflow/lite/micro/examples/person_detection_experimental/image_provider.cc \
|
|
||||||
tensorflow/lite/micro/examples/person_detection_experimental/image_provider_test.cc \
|
|
||||||
tensorflow/lite/micro/examples/person_detection_experimental/model_settings.cc
|
|
||||||
|
|
||||||
IMAGE_PROVIDER_TEST_HDRS := \
|
|
||||||
tensorflow/lite/micro/examples/person_detection_experimental/image_provider.h \
|
|
||||||
tensorflow/lite/micro/examples/person_detection_experimental/model_settings.h
|
|
||||||
|
|
||||||
DETECTION_RESPONDER_TEST_SRCS := \
|
|
||||||
tensorflow/lite/micro/examples/person_detection_experimental/detection_responder.cc \
|
|
||||||
tensorflow/lite/micro/examples/person_detection_experimental/detection_responder_test.cc
|
|
||||||
|
|
||||||
DETECTION_RESPONDER_TEST_HDRS := \
|
|
||||||
tensorflow/lite/micro/examples/person_detection_experimental/detection_responder.h
|
|
||||||
|
|
||||||
person_detection_SRCS := \
|
|
||||||
tensorflow/lite/micro/examples/person_detection_experimental/detection_responder.cc \
|
|
||||||
tensorflow/lite/micro/examples/person_detection_experimental/image_provider.cc \
|
|
||||||
tensorflow/lite/micro/examples/person_detection_experimental/main.cc \
|
|
||||||
tensorflow/lite/micro/examples/person_detection_experimental/main_functions.cc \
|
|
||||||
$(person_detection_MODEL_SRCS)
|
|
||||||
|
|
||||||
person_detection_HDRS := \
|
|
||||||
tensorflow/lite/micro/examples/person_detection_experimental/detection_responder.h \
|
|
||||||
tensorflow/lite/micro/examples/person_detection_experimental/image_provider.h \
|
|
||||||
tensorflow/lite/micro/examples/person_detection_experimental/main_functions.h \
|
|
||||||
$(person_detection_MODEL_HDRS)
|
|
||||||
|
|
||||||
#Find any platform - specific rules for this example.
|
|
||||||
include $(wildcard tensorflow/lite/micro/examples/person_detection_experimental/*/Makefile.inc)
|
|
||||||
|
|
||||||
# Tests loading and running a vision model.
|
|
||||||
$(eval $(call microlite_test,person_detection_test_int8,\
|
|
||||||
$(person_detection_TEST_SRCS),$(person_detection_TEST_HDRS)))
|
|
||||||
|
|
||||||
# Three conflicting issues here:
|
|
||||||
# 1. The image_provider_test fails on Sparkfun Edge we do not have a way to
|
|
||||||
# filter out individual tests within and example.
|
|
||||||
# 2. We do not want to completely remove person_detection from the sparkfun_edge
|
|
||||||
# build.
|
|
||||||
# 3. We do want to keep as many targets as possible be part of the sparkfun_edge
|
|
||||||
# CI build to avoid getting into similar situations where some parts of the
|
|
||||||
# code are supported on a platform while other parts are not.
|
|
||||||
#
|
|
||||||
# The current nasty workaround is to explicitly exclude the offending test for
|
|
||||||
# the sparkfun_edge target. Note that we are not exluding it for
|
|
||||||
# TARGET=apollo3evb becuase that is not part of our CI builds (and the two are
|
|
||||||
# basically equivalent).
|
|
||||||
ifneq ($(TARGET),sparkfun_edge)
|
|
||||||
# Tests the image provider module.
|
|
||||||
$(eval $(call microlite_test,image_provider_test_int8,\
|
|
||||||
$(IMAGE_PROVIDER_TEST_SRCS),$(IMAGE_PROVIDER_TEST_HDRS)))
|
|
||||||
endif
|
|
||||||
|
|
||||||
# Tests the detection responder module.
|
|
||||||
$(eval $(call microlite_test,detection_responder_test_int8,\
|
|
||||||
$(DETECTION_RESPONDER_TEST_SRCS),$(DETECTION_RESPONDER_TEST_HDRS)))
|
|
||||||
|
|
||||||
# Builds a standalone object recognition binary.
|
|
||||||
$(eval $(call microlite_test,person_detection_int8,\
|
|
||||||
$(person_detection_SRCS),$(person_detection_HDRS)))
|
|
@ -1,570 +0,0 @@
|
|||||||
# Person detection example
|
|
||||||
|
|
||||||
This example shows how you can use Tensorflow Lite to run a 250 kilobyte neural
|
|
||||||
network to recognize people in images captured by a camera. It is designed to
|
|
||||||
run on systems with small amounts of memory such as microcontrollers and DSPs.
|
|
||||||
This uses the experimental int8 quantized version of the person detection model.
|
|
||||||
|
|
||||||
## Table of contents
|
|
||||||
|
|
||||||
- [Getting started](#getting-started)
|
|
||||||
- [Running on ARC EM SDP](#running-on-arc-em-sdp)
|
|
||||||
- [Running on Arduino](#running-on-arduino)
|
|
||||||
- [Running on HIMAX WE1 EVB](#running-on-himax-we1-evb)
|
|
||||||
- [Running on SparkFun Edge](#running-on-sparkfun-edge)
|
|
||||||
- [Run the tests on a development machine](#run-the-tests-on-a-development-machine)
|
|
||||||
- [Debugging image capture](#debugging-image-capture)
|
|
||||||
- [Training your own model](#training-your-own-model)
|
|
||||||
|
|
||||||
## Running on ARC EM SDP
|
|
||||||
|
|
||||||
The following instructions will help you to build and deploy this example to
|
|
||||||
[ARC EM SDP](https://www.synopsys.com/dw/ipdir.php?ds=arc-em-software-development-platform)
|
|
||||||
board. General information and instructions on using the board with TensorFlow
|
|
||||||
Lite Micro can be found in the common
|
|
||||||
[ARC targets description](/tensorflow/lite/micro/tools/make/targets/arc/README.md).
|
|
||||||
|
|
||||||
This example uses asymmetric int8 quantization and can therefore leverage
|
|
||||||
optimized int8 kernels from the embARC MLI library
|
|
||||||
|
|
||||||
The ARC EM SDP board contains a rich set of extension interfaces. You can choose
|
|
||||||
any compatible camera and modify
|
|
||||||
[image_provider.cc](/tensorflow/lite/micro/examples/person_detection_experimental/image_provider.cc)
|
|
||||||
file accordingly to use input from your specific camera. By default, results of
|
|
||||||
running this example are printed to the console. If you would like to instead
|
|
||||||
implement some target-specific actions, you need to modify
|
|
||||||
[detection_responder.cc](/tensorflow/lite/micro/examples/person_detection_experimental/detection_responder.cc)
|
|
||||||
accordingly.
|
|
||||||
|
|
||||||
The reference implementations of these files are used by default on the EM SDP.
|
|
||||||
|
|
||||||
### Initial setup
|
|
||||||
|
|
||||||
Follow the instructions on the
|
|
||||||
[ARC EM SDP Initial Setup](/tensorflow/lite/micro/tools/make/targets/arc/README.md#ARC-EM-Software-Development-Platform-ARC-EM-SDP)
|
|
||||||
to get and install all required tools for work with ARC EM SDP.
|
|
||||||
|
|
||||||
### Generate Example Project
|
|
||||||
|
|
||||||
The example project for ARC EM SDP platform can be generated with the following
|
|
||||||
command:
|
|
||||||
|
|
||||||
```
|
|
||||||
make -f tensorflow/lite/micro/tools/make/Makefile \
|
|
||||||
TARGET=arc_emsdp TAGS=reduce_codesize \
|
|
||||||
generate_person_detection_int8_make_project
|
|
||||||
```
|
|
||||||
|
|
||||||
Note that `TAGS=reduce_codesize` applies example specific changes of code to
|
|
||||||
reduce total size of application. It can be omitted.
|
|
||||||
|
|
||||||
### Build and Run Example
|
|
||||||
|
|
||||||
For more detailed information on building and running examples see the
|
|
||||||
appropriate sections of general descriptions of the
|
|
||||||
[ARC EM SDP usage with TFLM](/tensorflow/lite/micro/tools/make/targets/arc/README.md#ARC-EM-Software-Development-Platform-ARC-EM-SDP).
|
|
||||||
In the directory with generated project you can also find a
|
|
||||||
*README_ARC_EMSDP.md* file with instructions and options on building and
|
|
||||||
running. Here we only briefly mention main steps which are typically enough to
|
|
||||||
get it started.
|
|
||||||
|
|
||||||
1. You need to
|
|
||||||
[connect the board](/tensorflow/lite/micro/tools/make/targets/arc/README.md#connect-the-board)
|
|
||||||
and open an serial connection.
|
|
||||||
|
|
||||||
2. Go to the generated example project director
|
|
||||||
|
|
||||||
```
|
|
||||||
cd tensorflow/lite/micro/tools/make/gen/arc_emsdp_arc/prj/person_detection_int8/make
|
|
||||||
```
|
|
||||||
|
|
||||||
3. Build the example using
|
|
||||||
|
|
||||||
```
|
|
||||||
make app
|
|
||||||
```
|
|
||||||
|
|
||||||
4. To generate artefacts for self-boot of example from the board use
|
|
||||||
|
|
||||||
```
|
|
||||||
make flash
|
|
||||||
```
|
|
||||||
|
|
||||||
5. To run application from the board using microSD card:
|
|
||||||
|
|
||||||
* Copy the content of the created /bin folder into the root of microSD
|
|
||||||
card. Note that the card must be formatted as FAT32 with default cluster
|
|
||||||
size (but less than 32 Kbytes)
|
|
||||||
* Plug in the microSD card into the J11 connector.
|
|
||||||
* Push the RST button. If a red LED is lit beside RST button, push the CFG
|
|
||||||
button.
|
|
||||||
* Type or copy next commands one-by-another into serial terminal: `setenv
|
|
||||||
loadaddr 0x10800000 setenv bootfile app.elf setenv bootdelay 1 setenv
|
|
||||||
bootcmd fatload mmc 0 \$\{loadaddr\} \$\{bootfile\} \&\& bootelf
|
|
||||||
saveenv`
|
|
||||||
* Push the RST button.
|
|
||||||
|
|
||||||
6. If you have the MetaWare Debugger installed in your environment:
|
|
||||||
|
|
||||||
* To run application from the console using it type `make run`.
|
|
||||||
* To stop the execution type `Ctrl+C` in the console several times.
|
|
||||||
|
|
||||||
In both cases (step 5 and 6) you will see the application output in the serial
|
|
||||||
terminal.
|
|
||||||
|
|
||||||
## Running on Arduino
|
|
||||||
|
|
||||||
The following instructions will help you build and deploy this sample
|
|
||||||
to [Arduino](https://www.arduino.cc/) devices.
|
|
||||||
|
|
||||||
The sample has been tested with the following device:
|
|
||||||
|
|
||||||
- [Arduino Nano 33 BLE Sense](https://store.arduino.cc/usa/nano-33-ble-sense-with-headers)
|
|
||||||
|
|
||||||
You will also need the following camera module:
|
|
||||||
|
|
||||||
- [Arducam Mini 2MP Plus](https://www.amazon.com/Arducam-Module-Megapixels-Arduino-Mega2560/dp/B012UXNDOY)
|
|
||||||
|
|
||||||
### Hardware
|
|
||||||
|
|
||||||
Connect the Arducam pins as follows:
|
|
||||||
|
|
||||||
|Arducam pin name|Arduino pin name|
|
|
||||||
|----------------|----------------|
|
|
||||||
|CS|D7 (unlabelled, immediately to the right of D6)|
|
|
||||||
|MOSI|D11|
|
|
||||||
|MISO|D12|
|
|
||||||
|SCK|D13|
|
|
||||||
|GND|GND (either pin marked GND is fine)|
|
|
||||||
|VCC|3.3 V|
|
|
||||||
|SDA|A4|
|
|
||||||
|SCL|A5|
|
|
||||||
|
|
||||||
### Install the Arduino_TensorFlowLite library
|
|
||||||
|
|
||||||
Download the current nightly build of the library:
|
|
||||||
[person_detection.zip](https://storage.googleapis.com/download.tensorflow.org/data/tf_lite_micro_person_data_int8_grayscale_2020_01_13.zip)
|
|
||||||
|
|
||||||
This example application is included as part of the official TensorFlow Lite
|
|
||||||
Arduino library. To install it, open the Arduino library manager in
|
|
||||||
`Tools -> Manage Libraries...` and search for `Arduino_TensorFlowLite`.
|
|
||||||
|
|
||||||
### Install other libraries
|
|
||||||
|
|
||||||
In addition to the TensorFlow library, you'll also need to install two
|
|
||||||
libraries:
|
|
||||||
|
|
||||||
* The Arducam library, so our code can interface with the hardware
|
|
||||||
* The JPEGDecoder library, so we can decode JPEG-encoded images
|
|
||||||
|
|
||||||
The Arducam Arduino library is available from GitHub at
|
|
||||||
[https://github.com/ArduCAM/Arduino](https://github.com/ArduCAM/Arduino).
|
|
||||||
To install it, download or clone the repository. Next, copy its `ArduCAM`
|
|
||||||
subdirectory into your `Arduino/libraries` directory. To find this directory on
|
|
||||||
your machine, check the *Sketchbook location* in the Arduino IDE's
|
|
||||||
*Preferences* window.
|
|
||||||
|
|
||||||
After downloading the library, you'll need to edit one of its files to make sure
|
|
||||||
it is configured for the Arducam Mini 2MP Plus. To do so, open the following
|
|
||||||
file:
|
|
||||||
|
|
||||||
```
|
|
||||||
Arduino/libraries/ArduCAM/memorysaver.h
|
|
||||||
```
|
|
||||||
|
|
||||||
You'll see a bunch of `#define` statements listed. Make sure that they are all
|
|
||||||
commented out, except for `#define OV2640_MINI_2MP_PLUS`, as so:
|
|
||||||
|
|
||||||
```
|
|
||||||
//Step 1: select the hardware platform, only one at a time
|
|
||||||
//#define OV2640_MINI_2MP
|
|
||||||
//#define OV3640_MINI_3MP
|
|
||||||
//#define OV5642_MINI_5MP
|
|
||||||
//#define OV5642_MINI_5MP_BIT_ROTATION_FIXED
|
|
||||||
#define OV2640_MINI_2MP_PLUS
|
|
||||||
//#define OV5642_MINI_5MP_PLUS
|
|
||||||
//#define OV5640_MINI_5MP_PLUS
|
|
||||||
```
|
|
||||||
|
|
||||||
Once you save the file, we're done configuring the Arducam library.
|
|
||||||
|
|
||||||
Our next step is to install the JPEGDecoder library. We can do this from within
|
|
||||||
the Arduino IDE. First, go to the *Manage Libraries...* option in the *Tools*
|
|
||||||
menu and search for `JPEGDecoder`. You should install version _1.8.0_ of the
|
|
||||||
library.
|
|
||||||
|
|
||||||
Once the library has installed, we'll need to configure it to disable some
|
|
||||||
optional components that are not compatible with the Arduino Nano 33 BLE Sense.
|
|
||||||
Open the following file:
|
|
||||||
|
|
||||||
```
|
|
||||||
Arduino/libraries/JPEGDecoder/src/User_Config.h
|
|
||||||
```
|
|
||||||
|
|
||||||
Make sure that both `#define LOAD_SD_LIBRARY` and `#define LOAD_SDFAT_LIBRARY`
|
|
||||||
are commented out, as shown in this excerpt from the file:
|
|
||||||
|
|
||||||
```c++
|
|
||||||
// Comment out the next #defines if you are not using an SD Card to store the JPEGs
|
|
||||||
// Commenting out the line is NOT essential but will save some FLASH space if
|
|
||||||
// SD Card access is not needed. Note: use of SdFat is currently untested!
|
|
||||||
|
|
||||||
//#define LOAD_SD_LIBRARY // Default SD Card library
|
|
||||||
//#define LOAD_SDFAT_LIBRARY // Use SdFat library instead, so SD Card SPI can be bit bashed
|
|
||||||
```
|
|
||||||
|
|
||||||
Once you've saved the file, you are done installing libraries.
|
|
||||||
|
|
||||||
### Load and run the example
|
|
||||||
|
|
||||||
Go to `File -> Examples`. You should see an
|
|
||||||
example near the bottom of the list named `TensorFlowLite`. Select
|
|
||||||
it and click `person_detection` to load the example. Connect your device, then
|
|
||||||
build and upload the example.
|
|
||||||
|
|
||||||
To test the camera, start by pointing the device's camera at something that is
|
|
||||||
definitely not a person, or just covering it up. The next time the blue LED
|
|
||||||
flashes, the device will capture a frame from the camera and begin to run
|
|
||||||
inference. Since the vision model we are using for person detection is
|
|
||||||
relatively large, it takes a long time to run inference—around 19 seconds at the
|
|
||||||
time of writing, though it's possible TensorFlow Lite has gotten faster since
|
|
||||||
then.
|
|
||||||
|
|
||||||
After 19 seconds or so, the inference result will be translated into another LED
|
|
||||||
being lit. Since you pointed the camera at something that isn't a person, the
|
|
||||||
red LED should light up.
|
|
||||||
|
|
||||||
Now, try pointing the device's camera at yourself! The next time the blue LED
|
|
||||||
flashes, the device will capture another image and begin to run inference. After
|
|
||||||
19 seconds, the green LED should light up!
|
|
||||||
|
|
||||||
Remember, image data is captured as a snapshot before each inference, whenever
|
|
||||||
the blue LED flashes. Whatever the camera is pointed at during that moment is
|
|
||||||
what will be fed into the model. It doesn't matter where the camera is pointed
|
|
||||||
until the next time an image is captured, when the blue LED will flash again.
|
|
||||||
|
|
||||||
If you're getting seemingly incorrect results, make sure you are in an
|
|
||||||
environment with good lighting. You should also make sure that the camera is
|
|
||||||
oriented correctly, with the pins pointing downwards, so that the images it
|
|
||||||
captures are the right way up—the model was not trained to recognize upside-down
|
|
||||||
people! In addition, it's good to remember that this is a tiny model, which
|
|
||||||
trades accuracy for small size. It works very well, but it isn't accurate 100%
|
|
||||||
of the time.
|
|
||||||
|
|
||||||
We can also see the results of inference via the Arduino Serial Monitor. To do
|
|
||||||
this, open the *Serial Monitor* from the *Tools* menu. You'll see a detailed
|
|
||||||
log of what is happening while our application runs. It's also interesting to
|
|
||||||
check the *Show timestamp* box, so you can see how long each part of the process
|
|
||||||
takes:
|
|
||||||
|
|
||||||
```
|
|
||||||
14:17:50.714 -> Starting capture
|
|
||||||
14:17:50.714 -> Image captured
|
|
||||||
14:17:50.784 -> Reading 3080 bytes from ArduCAM
|
|
||||||
14:17:50.887 -> Finished reading
|
|
||||||
14:17:50.887 -> Decoding JPEG and converting to greyscale
|
|
||||||
14:17:51.074 -> Image decoded and processed
|
|
||||||
14:18:09.710 -> Person score: 246 No person score: 66
|
|
||||||
```
|
|
||||||
|
|
||||||
From the log, we can see that it took around 170 ms to capture and read the
|
|
||||||
image data from the camera module, 180 ms to decode the JPEG and convert it to
|
|
||||||
greyscale, and 18.6 seconds to run inference.
|
|
||||||
|
|
||||||
## Running on HIMAX WE1 EVB
|
|
||||||
|
|
||||||
The following instructions will help you build and deploy this example to
|
|
||||||
[HIMAX WE1 EVB](https://github.com/HimaxWiseEyePlus/bsp_tflu/tree/master/HIMAX_WE1_EVB_board_brief)
|
|
||||||
board. To understand more about using this board, please check
|
|
||||||
[HIMAX WE1 EVB user guide](https://github.com/HimaxWiseEyePlus/bsp_tflu/tree/master/HIMAX_WE1_EVB_user_guide).
|
|
||||||
|
|
||||||
### Initial Setup
|
|
||||||
|
|
||||||
To use the HIMAX WE1 EVB, please make sure following software are installed:
|
|
||||||
|
|
||||||
#### MetaWare Development Toolkit
|
|
||||||
|
|
||||||
See
|
|
||||||
[Install the Synopsys DesignWare ARC MetaWare Development Toolkit](/tensorflow/lite/micro/tools/make/targets/arc/README.md#install-the-synopsys-designware-arc-metaware-development-toolkit)
|
|
||||||
section for instructions on toolchain installation.
|
|
||||||
|
|
||||||
#### Make Tool version
|
|
||||||
|
|
||||||
A `'make'` tool is required for deploying Tensorflow Lite Micro applications on
|
|
||||||
HIMAX WE1 EVB, See
|
|
||||||
[Check make tool version](/tensorflow/lite/micro/tools/make/targets/arc/README.md#make-tool)
|
|
||||||
section for proper environment.
|
|
||||||
|
|
||||||
#### Serial Terminal Emulation Application
|
|
||||||
|
|
||||||
There are 2 main purposes for HIMAX WE1 EVB Debug UART port
|
|
||||||
|
|
||||||
- print application output
|
|
||||||
- burn application to flash by using xmodem send application binary
|
|
||||||
|
|
||||||
You can use any terminal emulation program (like [PuTTY](https://www.putty.org/)
|
|
||||||
or [minicom](https://linux.die.net/man/1/minicom)).
|
|
||||||
|
|
||||||
### Generate Example Project
|
|
||||||
|
|
||||||
The example project for HIMAX WE1 EVB platform can be generated with the
|
|
||||||
following command:
|
|
||||||
|
|
||||||
Download related third party data
|
|
||||||
|
|
||||||
```
|
|
||||||
make -f tensorflow/lite/micro/tools/make/Makefile TARGET=himax_we1_evb third_party_downloads
|
|
||||||
```
|
|
||||||
|
|
||||||
Generate person detection project
|
|
||||||
|
|
||||||
```
|
|
||||||
make -f tensorflow/lite/micro/tools/make/Makefile generate_person_detection_int8_make_project TARGET=himax_we1_evb
|
|
||||||
```
|
|
||||||
|
|
||||||
### Build and Burn Example
|
|
||||||
|
|
||||||
Following the Steps to run person detection example at HIMAX WE1 EVB platform.
|
|
||||||
|
|
||||||
1. Go to the generated example project directory.
|
|
||||||
|
|
||||||
```
|
|
||||||
cd tensorflow/lite/micro/tools/make/gen/himax_we1_evb_arc/prj/person_detection_int8/make
|
|
||||||
```
|
|
||||||
|
|
||||||
2. Build the example using
|
|
||||||
|
|
||||||
```
|
|
||||||
make app
|
|
||||||
```
|
|
||||||
|
|
||||||
3. After example build finish, copy ELF file and map file to image generate
|
|
||||||
tool directory. \
|
|
||||||
image generate tool directory located at
|
|
||||||
`'tensorflow/lite/micro/tools/make/downloads/himax_we1_sdk/image_gen_linux_v3/'`
|
|
||||||
|
|
||||||
```
|
|
||||||
cp person_detection_int8.elf himax_we1_evb.map ../../../../../downloads/himax_we1_sdk/image_gen_linux_v3/
|
|
||||||
```
|
|
||||||
|
|
||||||
4. Go to flash image generate tool directory.
|
|
||||||
|
|
||||||
```
|
|
||||||
cd ../../../../../downloads/himax_we1_sdk/image_gen_linux_v3/
|
|
||||||
```
|
|
||||||
|
|
||||||
make sure this tool directory is in $PATH. You can permanently set it to
|
|
||||||
PATH by
|
|
||||||
|
|
||||||
```
|
|
||||||
export PATH=$PATH:$(pwd)
|
|
||||||
```
|
|
||||||
|
|
||||||
5. run image generate tool, generate flash image file.
|
|
||||||
|
|
||||||
* Before running image generate tool, by typing `sudo chmod +x image_gen`
|
|
||||||
and `sudo chmod +x sign_tool` to make sure it is executable.
|
|
||||||
|
|
||||||
```
|
|
||||||
image_gen -e person_detection_int8.elf -m himax_we1_evb.map -o out.img
|
|
||||||
```
|
|
||||||
|
|
||||||
6. Download flash image file to HIMAX WE1 EVB by UART:
|
|
||||||
|
|
||||||
* more detail about download image through UART can be found at
|
|
||||||
[HIMAX WE1 EVB update Flash image](https://github.com/HimaxWiseEyePlus/bsp_tflu/tree/master/HIMAX_WE1_EVB_user_guide#flash-image-update)
|
|
||||||
|
|
||||||
After these steps, press reset button on the HIMAX WE1 EVB, you will see
|
|
||||||
application output in the serial terminal.
|
|
||||||
|
|
||||||
## Running on SparkFun Edge
|
|
||||||
|
|
||||||
The following instructions will help you build and deploy this sample on the
|
|
||||||
[SparkFun Edge development board](https://sparkfun.com/products/15170). This
|
|
||||||
sample requires the Sparkfun Himax camera for the Sparkfun Edge board. It is
|
|
||||||
not available for purchase yet.
|
|
||||||
|
|
||||||
If you're new to using this board, we recommend walking through the
|
|
||||||
[AI on a microcontroller with TensorFlow Lite and SparkFun Edge](https://codelabs.developers.google.com/codelabs/sparkfun-tensorflow)
|
|
||||||
codelab to get an understanding of the workflow.
|
|
||||||
|
|
||||||
### Compile the binary
|
|
||||||
|
|
||||||
The following command will download the required dependencies and then compile a
|
|
||||||
binary for the SparkFun Edge:
|
|
||||||
|
|
||||||
```
|
|
||||||
make -f tensorflow/lite/micro/tools/make/Makefile TARGET=sparkfun_edge person_detection_bin
|
|
||||||
```
|
|
||||||
|
|
||||||
The binary will be created in the following location:
|
|
||||||
|
|
||||||
```
|
|
||||||
tensorflow/lite/micro/tools/make/gen/sparkfun_edge_cortex-m4/bin/person_detection.bin
|
|
||||||
```
|
|
||||||
|
|
||||||
### Sign the binary
|
|
||||||
|
|
||||||
The binary must be signed with cryptographic keys to be deployed to the device.
|
|
||||||
We'll now run some commands that will sign our binary so it can be flashed to
|
|
||||||
the SparkFun Edge. The scripts we are using come from the Ambiq SDK, which is
|
|
||||||
downloaded when the `Makefile` is run.
|
|
||||||
|
|
||||||
Enter the following command to set up some dummy cryptographic keys we can use
|
|
||||||
for development:
|
|
||||||
|
|
||||||
```
|
|
||||||
cp tensorflow/lite/micro/tools/make/downloads/AmbiqSuite-Rel2.0.0/tools/apollo3_scripts/keys_info0.py \
|
|
||||||
tensorflow/lite/micro/tools/make/downloads/AmbiqSuite-Rel2.0.0/tools/apollo3_scripts/keys_info.py
|
|
||||||
```
|
|
||||||
|
|
||||||
Next, run the following command to create a signed binary:
|
|
||||||
|
|
||||||
```
|
|
||||||
python3 tensorflow/lite/micro/tools/make/downloads/AmbiqSuite-Rel2.0.0/tools/apollo3_scripts/create_cust_image_blob.py \
|
|
||||||
--bin tensorflow/lite/micro/tools/make/gen/sparkfun_edge_cortex-m4/bin/person_detection.bin \
|
|
||||||
--load-address 0xC000 \
|
|
||||||
--magic-num 0xCB \
|
|
||||||
-o main_nonsecure_ota \
|
|
||||||
--version 0x0
|
|
||||||
```
|
|
||||||
|
|
||||||
This will create the file `main_nonsecure_ota.bin`. We'll now run another
|
|
||||||
command to create a final version of the file that can be used to flash our
|
|
||||||
device with the bootloader script we will use in the next step:
|
|
||||||
|
|
||||||
```
|
|
||||||
python3 tensorflow/lite/micro/tools/make/downloads/AmbiqSuite-Rel2.0.0/tools/apollo3_scripts/create_cust_wireupdate_blob.py \
|
|
||||||
--load-address 0x20000 \
|
|
||||||
--bin main_nonsecure_ota.bin \
|
|
||||||
-i 6 \
|
|
||||||
-o main_nonsecure_wire \
|
|
||||||
--options 0x1
|
|
||||||
```
|
|
||||||
|
|
||||||
You should now have a file called `main_nonsecure_wire.bin` in the directory
|
|
||||||
where you ran the commands. This is the file we'll be flashing to the device.
|
|
||||||
|
|
||||||
### Flash the binary
|
|
||||||
|
|
||||||
Next, attach the board to your computer via a USB-to-serial adapter.
|
|
||||||
|
|
||||||
**Note:** If you're using the [SparkFun Serial Basic Breakout](https://www.sparkfun.com/products/15096),
|
|
||||||
you should [install the latest drivers](https://learn.sparkfun.com/tutorials/sparkfun-serial-basic-ch340c-hookup-guide#drivers-if-you-need-them)
|
|
||||||
before you continue.
|
|
||||||
|
|
||||||
Once connected, assign the USB device name to an environment variable:
|
|
||||||
|
|
||||||
```
|
|
||||||
export DEVICENAME=put your device name here
|
|
||||||
```
|
|
||||||
|
|
||||||
Set another variable with the baud rate:
|
|
||||||
|
|
||||||
```
|
|
||||||
export BAUD_RATE=921600
|
|
||||||
```
|
|
||||||
|
|
||||||
Now, hold the button marked `14` on the device. While still holding the button,
|
|
||||||
hit the button marked `RST`. Continue holding the button marked `14` while
|
|
||||||
running the following command:
|
|
||||||
|
|
||||||
```
|
|
||||||
python3 tensorflow/lite/micro/tools/make/downloads/AmbiqSuite-Rel2.0.0/tools/apollo3_scripts/uart_wired_update.py \
|
|
||||||
-b ${BAUD_RATE} ${DEVICENAME} \
|
|
||||||
-r 1 \
|
|
||||||
-f main_nonsecure_wire.bin \
|
|
||||||
-i 6
|
|
||||||
```
|
|
||||||
|
|
||||||
You should see a long stream of output as the binary is flashed to the device.
|
|
||||||
Once you see the following lines, flashing is complete:
|
|
||||||
|
|
||||||
```
|
|
||||||
Sending Reset Command.
|
|
||||||
Done.
|
|
||||||
```
|
|
||||||
|
|
||||||
If you don't see these lines, flashing may have failed. Try running through the
|
|
||||||
steps in [Flash the binary](#flash-the-binary) again (you can skip over setting
|
|
||||||
the environment variables). If you continue to run into problems, follow the
|
|
||||||
[AI on a microcontroller with TensorFlow Lite and SparkFun Edge](https://codelabs.developers.google.com/codelabs/sparkfun-tensorflow)
|
|
||||||
codelab, which includes more comprehensive instructions for the flashing
|
|
||||||
process.
|
|
||||||
|
|
||||||
The binary should now be deployed to the device. Hit the button marked `RST` to
|
|
||||||
reboot the board. You should see the device's four LEDs flashing in sequence.
|
|
||||||
|
|
||||||
Debug information is logged by the board while the program is running. To view
|
|
||||||
it, establish a serial connection to the board using a baud rate of `115200`.
|
|
||||||
On OSX and Linux, the following command should work:
|
|
||||||
|
|
||||||
```
|
|
||||||
screen ${DEVICENAME} 115200
|
|
||||||
```
|
|
||||||
|
|
||||||
To stop viewing the debug output with `screen`, hit `Ctrl+A`, immediately
|
|
||||||
followed by the `K` key, then hit the `Y` key.
|
|
||||||
|
|
||||||
## Run the tests on a development machine
|
|
||||||
|
|
||||||
To compile and test this example on a desktop Linux or MacOS machine, download
|
|
||||||
[the TensorFlow source code](https://github.com/tensorflow/tensorflow), `cd`
|
|
||||||
into the source directory from a terminal, and then run the following command:
|
|
||||||
|
|
||||||
```
|
|
||||||
make -f tensorflow/lite/micro/tools/make/Makefile
|
|
||||||
```
|
|
||||||
|
|
||||||
This will take a few minutes, and downloads frameworks the code uses like
|
|
||||||
[CMSIS](https://developer.arm.com/embedded/cmsis) and
|
|
||||||
[flatbuffers](https://google.github.io/flatbuffers/). Once that process has
|
|
||||||
finished, run:
|
|
||||||
|
|
||||||
```
|
|
||||||
make -f tensorflow/lite/micro/tools/make/Makefile test_person_detection_test
|
|
||||||
```
|
|
||||||
|
|
||||||
You should see a series of files get compiled, followed by some logging output
|
|
||||||
from a test, which should conclude with `~~~ALL TESTS PASSED~~~`. If you see
|
|
||||||
this, it means that a small program has been built and run that loads a trained
|
|
||||||
TensorFlow model, runs some example images through it, and got the expected
|
|
||||||
outputs. This particular test runs images with a and without a person in them,
|
|
||||||
and checks that the network correctly identifies them.
|
|
||||||
|
|
||||||
To understand how TensorFlow Lite does this, you can look at the `TestInvoke()`
|
|
||||||
function in
|
|
||||||
[person_detection_test.cc](https://github.com/tensorflow/tensorflow/tree/master/tensorflow/lite/micro/examples/person_detection/person_detection_test.cc).
|
|
||||||
It's a fairly small amount of code, creating an interpreter, getting a handle to
|
|
||||||
a model that's been compiled into the program, and then invoking the interpreter
|
|
||||||
with the model and sample inputs.
|
|
||||||
|
|
||||||
## Debugging image capture
|
|
||||||
When the sample is running, check the LEDs to determine whether the inference is
|
|
||||||
running correctly. If the red light is stuck on, it means there was an error
|
|
||||||
communicating with the camera. This is likely due to an incorrectly connected
|
|
||||||
or broken camera.
|
|
||||||
|
|
||||||
During inference, the blue LED will toggle every time inference is complete. The
|
|
||||||
orange LED indicates that no person was found, and the green LED indicates a
|
|
||||||
person was found. The red LED should never turn on, since it indicates an error.
|
|
||||||
|
|
||||||
In order to view the captured image, set the DUMP_IMAGE define in main.cc. This
|
|
||||||
causes the board to log raw image info to the console. After the board has been
|
|
||||||
flashed and reset, dump the log to a text file:
|
|
||||||
|
|
||||||
|
|
||||||
```
|
|
||||||
screen -L -Logfile <dump file> ${DEVICENAME} 115200
|
|
||||||
```
|
|
||||||
|
|
||||||
Next, run the raw to bitmap converter to view captured images:
|
|
||||||
|
|
||||||
```
|
|
||||||
python3 raw_to_bitmap.py -r GRAY -i <dump file>
|
|
||||||
```
|
|
||||||
|
|
||||||
## Training your own model
|
|
||||||
|
|
||||||
You can train your own model with some easy-to-use scripts. See
|
|
||||||
[training_a_model.md](training_a_model.md) for instructions.
|
|
@ -1,198 +0,0 @@
|
|||||||
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
==============================================================================*/
|
|
||||||
|
|
||||||
#include "tensorflow/lite/micro/examples/person_detection_experimental/image_provider.h"
|
|
||||||
|
|
||||||
#include "tensorflow/lite/micro/examples/person_detection_experimental/himax_driver/HM01B0.h"
|
|
||||||
#include "tensorflow/lite/micro/examples/person_detection_experimental/himax_driver/HM01B0_RAW8_QVGA_8bits_lsb_5fps.h"
|
|
||||||
#include "tensorflow/lite/micro/examples/person_detection_experimental/himax_driver/HM01B0_debug.h"
|
|
||||||
#include "tensorflow/lite/micro/examples/person_detection_experimental/himax_driver/HM01B0_optimized.h"
|
|
||||||
#include "tensorflow/lite/micro/examples/person_detection_experimental/himax_driver/platform_Sparkfun_Edge.h"
|
|
||||||
|
|
||||||
// These are headers from Ambiq's Apollo3 SDK.
|
|
||||||
#include "am_bsp.h" // NOLINT
|
|
||||||
#include "am_mcu_apollo.h" // NOLINT
|
|
||||||
#include "am_util.h" // NOLINT
|
|
||||||
|
|
||||||
// #define DEMO_HM01B0_FRAMEBUFFER_DUMP_ENABLE
|
|
||||||
|
|
||||||
// Enabling logging increases power consumption by preventing low power mode
|
|
||||||
// from being enabled.
|
|
||||||
#define ENABLE_LOGGING
|
|
||||||
|
|
||||||
namespace {
|
|
||||||
|
|
||||||
//*****************************************************************************
|
|
||||||
//
|
|
||||||
// HM01B0 Configuration
|
|
||||||
//
|
|
||||||
//*****************************************************************************
|
|
||||||
static hm01b0_cfg_t s_HM01B0Cfg = {
|
|
||||||
// i2c settings
|
|
||||||
ui16SlvAddr : HM01B0_DEFAULT_ADDRESS,
|
|
||||||
eIOMMode : HM01B0_IOM_MODE,
|
|
||||||
ui32IOMModule : HM01B0_IOM_MODULE,
|
|
||||||
sIOMCfg : {
|
|
||||||
eInterfaceMode : HM01B0_IOM_MODE,
|
|
||||||
ui32ClockFreq : HM01B0_I2C_CLOCK_FREQ,
|
|
||||||
},
|
|
||||||
pIOMHandle : NULL,
|
|
||||||
|
|
||||||
// MCLK settings
|
|
||||||
ui32CTimerModule : HM01B0_MCLK_GENERATOR_MOD,
|
|
||||||
ui32CTimerSegment : HM01B0_MCLK_GENERATOR_SEG,
|
|
||||||
ui32CTimerOutputPin : HM01B0_PIN_MCLK,
|
|
||||||
|
|
||||||
// data interface
|
|
||||||
ui8PinSCL : HM01B0_PIN_SCL,
|
|
||||||
ui8PinSDA : HM01B0_PIN_SDA,
|
|
||||||
ui8PinD0 : HM01B0_PIN_D0,
|
|
||||||
ui8PinD1 : HM01B0_PIN_D1,
|
|
||||||
ui8PinD2 : HM01B0_PIN_D2,
|
|
||||||
ui8PinD3 : HM01B0_PIN_D3,
|
|
||||||
ui8PinD4 : HM01B0_PIN_D4,
|
|
||||||
ui8PinD5 : HM01B0_PIN_D5,
|
|
||||||
ui8PinD6 : HM01B0_PIN_D6,
|
|
||||||
ui8PinD7 : HM01B0_PIN_D7,
|
|
||||||
ui8PinVSYNC : HM01B0_PIN_VSYNC,
|
|
||||||
ui8PinHSYNC : HM01B0_PIN_HSYNC,
|
|
||||||
ui8PinPCLK : HM01B0_PIN_PCLK,
|
|
||||||
|
|
||||||
ui8PinTrig : HM01B0_PIN_TRIG,
|
|
||||||
ui8PinInt : HM01B0_PIN_INT,
|
|
||||||
pfnGpioIsr : NULL,
|
|
||||||
};
|
|
||||||
|
|
||||||
static constexpr int kFramesToInitialize = 4;
|
|
||||||
|
|
||||||
bool g_is_camera_initialized = false;
|
|
||||||
|
|
||||||
void boost_mode_enable(tflite::ErrorReporter* error_reporter, bool bEnable) {
|
|
||||||
am_hal_burst_avail_e eBurstModeAvailable;
|
|
||||||
am_hal_burst_mode_e eBurstMode;
|
|
||||||
|
|
||||||
// Check that the Burst Feature is available.
|
|
||||||
if (AM_HAL_STATUS_SUCCESS ==
|
|
||||||
am_hal_burst_mode_initialize(&eBurstModeAvailable)) {
|
|
||||||
if (AM_HAL_BURST_AVAIL == eBurstModeAvailable) {
|
|
||||||
TF_LITE_REPORT_ERROR(error_reporter, "Apollo3 Burst Mode is Available\n");
|
|
||||||
} else {
|
|
||||||
TF_LITE_REPORT_ERROR(error_reporter,
|
|
||||||
"Apollo3 Burst Mode is Not Available\n");
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
TF_LITE_REPORT_ERROR(error_reporter,
|
|
||||||
"Failed to Initialize for Burst Mode operation\n");
|
|
||||||
}
|
|
||||||
|
|
||||||
// Make sure we are in "Normal" mode.
|
|
||||||
if (AM_HAL_STATUS_SUCCESS == am_hal_burst_mode_disable(&eBurstMode)) {
|
|
||||||
if (AM_HAL_NORMAL_MODE == eBurstMode) {
|
|
||||||
TF_LITE_REPORT_ERROR(error_reporter,
|
|
||||||
"Apollo3 operating in Normal Mode (48MHz)\n");
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
TF_LITE_REPORT_ERROR(error_reporter,
|
|
||||||
"Failed to Disable Burst Mode operation\n");
|
|
||||||
}
|
|
||||||
|
|
||||||
// Put the MCU into "Burst" mode.
|
|
||||||
if (bEnable) {
|
|
||||||
if (AM_HAL_STATUS_SUCCESS == am_hal_burst_mode_enable(&eBurstMode)) {
|
|
||||||
if (AM_HAL_BURST_MODE == eBurstMode) {
|
|
||||||
TF_LITE_REPORT_ERROR(error_reporter,
|
|
||||||
"Apollo3 operating in Burst Mode (96MHz)\n");
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
TF_LITE_REPORT_ERROR(error_reporter,
|
|
||||||
"Failed to Enable Burst Mode operation\n");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
} // namespace
|
|
||||||
|
|
||||||
TfLiteStatus InitCamera(tflite::ErrorReporter* error_reporter) {
|
|
||||||
TF_LITE_REPORT_ERROR(error_reporter, "Initializing HM01B0...\n");
|
|
||||||
|
|
||||||
am_hal_clkgen_control(AM_HAL_CLKGEN_CONTROL_SYSCLK_MAX, 0);
|
|
||||||
|
|
||||||
// Set the default cache configuration
|
|
||||||
am_hal_cachectrl_config(&am_hal_cachectrl_defaults);
|
|
||||||
am_hal_cachectrl_enable();
|
|
||||||
|
|
||||||
// Configure the board for low power operation. This breaks logging by
|
|
||||||
// turning off the itm and uart interfaces.
|
|
||||||
#ifndef ENABLE_LOGGING
|
|
||||||
am_bsp_low_power_init();
|
|
||||||
#endif
|
|
||||||
|
|
||||||
// Enable interrupts so we can receive messages from the boot host.
|
|
||||||
am_hal_interrupt_master_enable();
|
|
||||||
|
|
||||||
boost_mode_enable(error_reporter, true);
|
|
||||||
|
|
||||||
hm01b0_power_up(&s_HM01B0Cfg);
|
|
||||||
|
|
||||||
am_util_delay_ms(1);
|
|
||||||
|
|
||||||
hm01b0_mclk_enable(&s_HM01B0Cfg);
|
|
||||||
|
|
||||||
am_util_delay_ms(1);
|
|
||||||
|
|
||||||
hm01b0_init_if(&s_HM01B0Cfg);
|
|
||||||
|
|
||||||
hm01b0_init_system(&s_HM01B0Cfg, (hm_script_t*)sHM01B0InitScript,
|
|
||||||
sizeof(sHM01B0InitScript) / sizeof(hm_script_t));
|
|
||||||
|
|
||||||
// Put camera into streaming mode - this makes it so that the camera
|
|
||||||
// constantly captures images. It is still OK to read and image since the
|
|
||||||
// camera uses a double-buffered input. This means there is always one valid
|
|
||||||
// image to read while the other buffer fills. Streaming mode allows the
|
|
||||||
// camera to perform auto exposure constantly.
|
|
||||||
hm01b0_set_mode(&s_HM01B0Cfg, HM01B0_REG_MODE_SELECT_STREAMING, 0);
|
|
||||||
|
|
||||||
return kTfLiteOk;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Capture single frame. Frame pointer passed in to reduce memory usage. This
|
|
||||||
// allows the input tensor to be used instead of requiring an extra copy.
|
|
||||||
TfLiteStatus GetImage(tflite::ErrorReporter* error_reporter, int frame_width,
|
|
||||||
int frame_height, int channels, uint8_t* frame) {
|
|
||||||
if (!g_is_camera_initialized) {
|
|
||||||
TfLiteStatus init_status = InitCamera(error_reporter);
|
|
||||||
if (init_status != kTfLiteOk) {
|
|
||||||
return init_status;
|
|
||||||
}
|
|
||||||
// Drop a few frames until auto exposure is calibrated.
|
|
||||||
for (int i = 0; i < kFramesToInitialize; ++i) {
|
|
||||||
hm01b0_blocking_read_oneframe_scaled(frame, frame_width, frame_height,
|
|
||||||
channels);
|
|
||||||
}
|
|
||||||
g_is_camera_initialized = true;
|
|
||||||
}
|
|
||||||
|
|
||||||
hm01b0_blocking_read_oneframe_scaled(frame, frame_width, frame_height,
|
|
||||||
channels);
|
|
||||||
|
|
||||||
#ifdef DEMO_HM01B0_FRAMEBUFFER_DUMP_ENABLE
|
|
||||||
// Allow some time to see result of previous inference before dumping image.
|
|
||||||
am_util_delay_ms(2000);
|
|
||||||
hm01b0_framebuffer_dump(frame, frame_width * frame_height * channels);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
return kTfLiteOk;
|
|
||||||
}
|
|
@ -1,43 +0,0 @@
|
|||||||
ifeq ($(TARGET), arc_emsdp)
|
|
||||||
|
|
||||||
# Patch of arc make project to adjust it specifically
|
|
||||||
# for experimental person detection example. In particular:
|
|
||||||
# - Use Linker command file with better usage of fast memory
|
|
||||||
# - Stripout TFLM reference code by default.
|
|
||||||
# - Optional: replace mli switchers with specialized kernels
|
|
||||||
# for smaller code size
|
|
||||||
|
|
||||||
person_detection_HDRS += \
|
|
||||||
person_detection_int8_patch.txt
|
|
||||||
|
|
||||||
person_detection_TEST_HDRS += \
|
|
||||||
person_detection_int8_patch.txt
|
|
||||||
|
|
||||||
ARC_MLI_BACKEND_PATH = /tensorflow/lite/micro/kernels/arc_mli
|
|
||||||
|
|
||||||
# Apply changes in generated project files.
|
|
||||||
# See related comment echoed (@echo <comment>) after each change
|
|
||||||
# to get understanding on it's purpose.
|
|
||||||
%/person_detection_int8_patch.txt: %/emsdp.lcf %/Makefile %$(ARC_MLI_BACKEND_PATH)/conv.cc %$(ARC_MLI_BACKEND_PATH)/depthwise_conv.cc %$(ARC_MLI_BACKEND_PATH)/pooling.cc
|
|
||||||
@cp tensorflow/lite/micro/examples/person_detection_experimental/arc_emsdp/emsdp.lcf $<
|
|
||||||
@echo emsdp.lcf: Replace with example specific memory map > $@
|
|
||||||
|
|
||||||
@sed -E -i 's#MLI_ONLY *\?= *false#MLI_ONLY \?= true#' $(word 2, $^)
|
|
||||||
@echo Makefile: No Reference fallback for MLI supported functions >> $@
|
|
||||||
|
|
||||||
ifneq ($(filter $(ALL_TAGS), reduce_codesize),)
|
|
||||||
# In case 'reduce_codesize' tag is present, we replace common MLI functions with
|
|
||||||
# specializations appropriate for this particular graph. But such changes of code
|
|
||||||
# with high probability may not be acceptable for other graphs and will need
|
|
||||||
# to be adjusted by the user
|
|
||||||
|
|
||||||
@sed -E -i 's#mli_krn_conv2d_nhwc_sa8_sa8_sa32#mli_krn_conv2d_nhwc_sa8_sa8_sa32_k1x1_nopad#' $(word 3, $^)
|
|
||||||
@sed -E -i 's#mli_krn_depthwise_conv2d_hwcn_sa8_sa8_sa32#mli_krn_depthwise_conv2d_hwcn_sa8_sa8_sa32_k3x3_krnpad#' $(word 4, $^)
|
|
||||||
@sed -E -i 's#mli_krn_avepool_hwc_sa8#mli_krn_avepool_hwc_sa8_k3x3_nopad#' $(word 5, $^)
|
|
||||||
@sed -E -i 's#mli_krn_maxpool_hwc_sa8\(in_ptr, \&cfg, out_ptr\);#return kTfLiteError;#' $(word 5, $^)
|
|
||||||
@echo $(word 3, $^): Use specialization >> $@
|
|
||||||
@echo $(word 4, $^): Use specialization >> $@
|
|
||||||
@echo $(word 5, $^): Use specialization and remove max pooling >> $@
|
|
||||||
endif
|
|
||||||
|
|
||||||
endif
|
|
@ -1,56 +0,0 @@
|
|||||||
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
==============================================================================*/
|
|
||||||
|
|
||||||
#include "tensorflow/lite/micro/examples/person_detection_experimental/detection_responder.h"
|
|
||||||
|
|
||||||
#include "Arduino.h"
|
|
||||||
|
|
||||||
// Flash the blue LED after each inference
|
|
||||||
void RespondToDetection(tflite::ErrorReporter* error_reporter,
|
|
||||||
int8_t person_score, int8_t no_person_score) {
|
|
||||||
static bool is_initialized = false;
|
|
||||||
if (!is_initialized) {
|
|
||||||
// Pins for the built-in RGB LEDs on the Arduino Nano 33 BLE Sense
|
|
||||||
pinMode(LEDR, OUTPUT);
|
|
||||||
pinMode(LEDG, OUTPUT);
|
|
||||||
pinMode(LEDB, OUTPUT);
|
|
||||||
is_initialized = true;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Note: The RGB LEDs on the Arduino Nano 33 BLE
|
|
||||||
// Sense are on when the pin is LOW, off when HIGH.
|
|
||||||
|
|
||||||
// Switch the person/not person LEDs off
|
|
||||||
digitalWrite(LEDG, HIGH);
|
|
||||||
digitalWrite(LEDR, HIGH);
|
|
||||||
|
|
||||||
// Flash the blue LED after every inference.
|
|
||||||
digitalWrite(LEDB, LOW);
|
|
||||||
delay(100);
|
|
||||||
digitalWrite(LEDB, HIGH);
|
|
||||||
|
|
||||||
// Switch on the green LED when a person is detected,
|
|
||||||
// the red when no person is detected
|
|
||||||
if (person_score > no_person_score) {
|
|
||||||
digitalWrite(LEDG, LOW);
|
|
||||||
digitalWrite(LEDR, HIGH);
|
|
||||||
} else {
|
|
||||||
digitalWrite(LEDG, HIGH);
|
|
||||||
digitalWrite(LEDR, LOW);
|
|
||||||
}
|
|
||||||
|
|
||||||
TF_LITE_REPORT_ERROR(error_reporter, "Person score: %d No person score: %d",
|
|
||||||
person_score, no_person_score);
|
|
||||||
}
|
|
@ -1,266 +0,0 @@
|
|||||||
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
==============================================================================*/
|
|
||||||
|
|
||||||
#include "tensorflow/lite/micro/examples/person_detection_experimental/image_provider.h"
|
|
||||||
|
|
||||||
/*
|
|
||||||
* The sample requires the following third-party libraries to be installed and
|
|
||||||
* configured:
|
|
||||||
*
|
|
||||||
* Arducam
|
|
||||||
* -------
|
|
||||||
* 1. Download https://github.com/ArduCAM/Arduino and copy its `ArduCAM`
|
|
||||||
* subdirectory into `Arduino/libraries`. Commit #e216049 has been tested
|
|
||||||
* with this code.
|
|
||||||
* 2. Edit `Arduino/libraries/ArduCAM/memorysaver.h` and ensure that
|
|
||||||
* "#define OV2640_MINI_2MP_PLUS" is not commented out. Ensure all other
|
|
||||||
* defines in the same section are commented out.
|
|
||||||
*
|
|
||||||
* JPEGDecoder
|
|
||||||
* -----------
|
|
||||||
* 1. Install "JPEGDecoder" 1.8.0 from the Arduino library manager.
|
|
||||||
* 2. Edit "Arduino/Libraries/JPEGDecoder/src/User_Config.h" and comment out
|
|
||||||
* "#define LOAD_SD_LIBRARY" and "#define LOAD_SDFAT_LIBRARY".
|
|
||||||
*/
|
|
||||||
|
|
||||||
// Required by Arducam library
|
|
||||||
#include <SPI.h>
|
|
||||||
#include <Wire.h>
|
|
||||||
#include <memorysaver.h>
|
|
||||||
// Arducam library
|
|
||||||
#include <ArduCAM.h>
|
|
||||||
// JPEGDecoder library
|
|
||||||
#include <JPEGDecoder.h>
|
|
||||||
|
|
||||||
// Checks that the Arducam library has been correctly configured
|
|
||||||
#if !(defined OV2640_MINI_2MP_PLUS)
|
|
||||||
#error Please select the hardware platform and camera module in the Arduino/libraries/ArduCAM/memorysaver.h
|
|
||||||
#endif
|
|
||||||
|
|
||||||
// The size of our temporary buffer for holding
|
|
||||||
// JPEG data received from the Arducam module
|
|
||||||
#define MAX_JPEG_BYTES 4096
|
|
||||||
// The pin connected to the Arducam Chip Select
|
|
||||||
#define CS 7
|
|
||||||
|
|
||||||
// Camera library instance
|
|
||||||
ArduCAM myCAM(OV2640, CS);
|
|
||||||
// Temporary buffer for holding JPEG data from camera
|
|
||||||
uint8_t jpeg_buffer[MAX_JPEG_BYTES] = {0};
|
|
||||||
// Length of the JPEG data currently in the buffer
|
|
||||||
uint32_t jpeg_length = 0;
|
|
||||||
|
|
||||||
// Get the camera module ready
|
|
||||||
TfLiteStatus InitCamera(tflite::ErrorReporter* error_reporter) {
|
|
||||||
TF_LITE_REPORT_ERROR(error_reporter, "Attempting to start Arducam");
|
|
||||||
// Enable the Wire library
|
|
||||||
Wire.begin();
|
|
||||||
// Configure the CS pin
|
|
||||||
pinMode(CS, OUTPUT);
|
|
||||||
digitalWrite(CS, HIGH);
|
|
||||||
// initialize SPI
|
|
||||||
SPI.begin();
|
|
||||||
// Reset the CPLD
|
|
||||||
myCAM.write_reg(0x07, 0x80);
|
|
||||||
delay(100);
|
|
||||||
myCAM.write_reg(0x07, 0x00);
|
|
||||||
delay(100);
|
|
||||||
// Test whether we can communicate with Arducam via SPI
|
|
||||||
myCAM.write_reg(ARDUCHIP_TEST1, 0x55);
|
|
||||||
uint8_t test;
|
|
||||||
test = myCAM.read_reg(ARDUCHIP_TEST1);
|
|
||||||
if (test != 0x55) {
|
|
||||||
TF_LITE_REPORT_ERROR(error_reporter, "Can't communicate with Arducam");
|
|
||||||
delay(1000);
|
|
||||||
return kTfLiteError;
|
|
||||||
}
|
|
||||||
// Use JPEG capture mode, since it allows us to specify
|
|
||||||
// a resolution smaller than the full sensor frame
|
|
||||||
myCAM.set_format(JPEG);
|
|
||||||
myCAM.InitCAM();
|
|
||||||
// Specify the smallest possible resolution
|
|
||||||
myCAM.OV2640_set_JPEG_size(OV2640_160x120);
|
|
||||||
delay(100);
|
|
||||||
return kTfLiteOk;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Begin the capture and wait for it to finish
|
|
||||||
TfLiteStatus PerformCapture(tflite::ErrorReporter* error_reporter) {
|
|
||||||
TF_LITE_REPORT_ERROR(error_reporter, "Starting capture");
|
|
||||||
// Make sure the buffer is emptied before each capture
|
|
||||||
myCAM.flush_fifo();
|
|
||||||
myCAM.clear_fifo_flag();
|
|
||||||
// Start capture
|
|
||||||
myCAM.start_capture();
|
|
||||||
// Wait for indication that it is done
|
|
||||||
while (!myCAM.get_bit(ARDUCHIP_TRIG, CAP_DONE_MASK)) {
|
|
||||||
}
|
|
||||||
TF_LITE_REPORT_ERROR(error_reporter, "Image captured");
|
|
||||||
delay(50);
|
|
||||||
// Clear the capture done flag
|
|
||||||
myCAM.clear_fifo_flag();
|
|
||||||
return kTfLiteOk;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Read data from the camera module into a local buffer
|
|
||||||
TfLiteStatus ReadData(tflite::ErrorReporter* error_reporter) {
|
|
||||||
// This represents the total length of the JPEG data
|
|
||||||
jpeg_length = myCAM.read_fifo_length();
|
|
||||||
TF_LITE_REPORT_ERROR(error_reporter, "Reading %d bytes from Arducam",
|
|
||||||
jpeg_length);
|
|
||||||
// Ensure there's not too much data for our buffer
|
|
||||||
if (jpeg_length > MAX_JPEG_BYTES) {
|
|
||||||
TF_LITE_REPORT_ERROR(error_reporter, "Too many bytes in FIFO buffer (%d)",
|
|
||||||
MAX_JPEG_BYTES);
|
|
||||||
return kTfLiteError;
|
|
||||||
}
|
|
||||||
if (jpeg_length == 0) {
|
|
||||||
TF_LITE_REPORT_ERROR(error_reporter, "No data in Arducam FIFO buffer");
|
|
||||||
return kTfLiteError;
|
|
||||||
}
|
|
||||||
myCAM.CS_LOW();
|
|
||||||
myCAM.set_fifo_burst();
|
|
||||||
for (int index = 0; index < jpeg_length; index++) {
|
|
||||||
jpeg_buffer[index] = SPI.transfer(0x00);
|
|
||||||
}
|
|
||||||
delayMicroseconds(15);
|
|
||||||
TF_LITE_REPORT_ERROR(error_reporter, "Finished reading");
|
|
||||||
myCAM.CS_HIGH();
|
|
||||||
return kTfLiteOk;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Decode the JPEG image, crop it, and convert it to greyscale
|
|
||||||
TfLiteStatus DecodeAndProcessImage(tflite::ErrorReporter* error_reporter,
|
|
||||||
int image_width, int image_height,
|
|
||||||
int8_t* image_data) {
|
|
||||||
TF_LITE_REPORT_ERROR(error_reporter,
|
|
||||||
"Decoding JPEG and converting to greyscale");
|
|
||||||
// Parse the JPEG headers. The image will be decoded as a sequence of Minimum
|
|
||||||
// Coded Units (MCUs), which are 16x8 blocks of pixels.
|
|
||||||
JpegDec.decodeArray(jpeg_buffer, jpeg_length);
|
|
||||||
|
|
||||||
// Crop the image by keeping a certain number of MCUs in each dimension
|
|
||||||
const int keep_x_mcus = image_width / JpegDec.MCUWidth;
|
|
||||||
const int keep_y_mcus = image_height / JpegDec.MCUHeight;
|
|
||||||
|
|
||||||
// Calculate how many MCUs we will throw away on the x axis
|
|
||||||
const int skip_x_mcus = JpegDec.MCUSPerRow - keep_x_mcus;
|
|
||||||
// Roughly center the crop by skipping half the throwaway MCUs at the
|
|
||||||
// beginning of each row
|
|
||||||
const int skip_start_x_mcus = skip_x_mcus / 2;
|
|
||||||
// Index where we will start throwing away MCUs after the data
|
|
||||||
const int skip_end_x_mcu_index = skip_start_x_mcus + keep_x_mcus;
|
|
||||||
// Same approach for the columns
|
|
||||||
const int skip_y_mcus = JpegDec.MCUSPerCol - keep_y_mcus;
|
|
||||||
const int skip_start_y_mcus = skip_y_mcus / 2;
|
|
||||||
const int skip_end_y_mcu_index = skip_start_y_mcus + keep_y_mcus;
|
|
||||||
|
|
||||||
// Pointer to the current pixel
|
|
||||||
uint16_t* pImg;
|
|
||||||
// Color of the current pixel
|
|
||||||
uint16_t color;
|
|
||||||
|
|
||||||
// Loop over the MCUs
|
|
||||||
while (JpegDec.read()) {
|
|
||||||
// Skip over the initial set of rows
|
|
||||||
if (JpegDec.MCUy < skip_start_y_mcus) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
// Skip if we're on a column that we don't want
|
|
||||||
if (JpegDec.MCUx < skip_start_x_mcus ||
|
|
||||||
JpegDec.MCUx >= skip_end_x_mcu_index) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
// Skip if we've got all the rows we want
|
|
||||||
if (JpegDec.MCUy >= skip_end_y_mcu_index) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
// Pointer to the current pixel
|
|
||||||
pImg = JpegDec.pImage;
|
|
||||||
|
|
||||||
// The x and y indexes of the current MCU, ignoring the MCUs we skip
|
|
||||||
int relative_mcu_x = JpegDec.MCUx - skip_start_x_mcus;
|
|
||||||
int relative_mcu_y = JpegDec.MCUy - skip_start_y_mcus;
|
|
||||||
|
|
||||||
// The coordinates of the top left of this MCU when applied to the output
|
|
||||||
// image
|
|
||||||
int x_origin = relative_mcu_x * JpegDec.MCUWidth;
|
|
||||||
int y_origin = relative_mcu_y * JpegDec.MCUHeight;
|
|
||||||
|
|
||||||
// Loop through the MCU's rows and columns
|
|
||||||
for (int mcu_row = 0; mcu_row < JpegDec.MCUHeight; mcu_row++) {
|
|
||||||
// The y coordinate of this pixel in the output index
|
|
||||||
int current_y = y_origin + mcu_row;
|
|
||||||
for (int mcu_col = 0; mcu_col < JpegDec.MCUWidth; mcu_col++) {
|
|
||||||
// Read the color of the pixel as 16-bit integer
|
|
||||||
color = *pImg++;
|
|
||||||
// Extract the color values (5 red bits, 6 green, 5 blue)
|
|
||||||
uint8_t r, g, b;
|
|
||||||
r = ((color & 0xF800) >> 11) * 8;
|
|
||||||
g = ((color & 0x07E0) >> 5) * 4;
|
|
||||||
b = ((color & 0x001F) >> 0) * 8;
|
|
||||||
// Convert to grayscale by calculating luminance
|
|
||||||
// See https://en.wikipedia.org/wiki/Grayscale for magic numbers
|
|
||||||
float gray_value = (0.2126 * r) + (0.7152 * g) + (0.0722 * b);
|
|
||||||
|
|
||||||
// Convert to signed 8-bit integer by subtracting 128.
|
|
||||||
gray_value -= 128;
|
|
||||||
|
|
||||||
// The x coordinate of this pixel in the output image
|
|
||||||
int current_x = x_origin + mcu_col;
|
|
||||||
// The index of this pixel in our flat output buffer
|
|
||||||
int index = (current_y * image_width) + current_x;
|
|
||||||
image_data[index] = static_cast<int8_t>(gray_value);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
TF_LITE_REPORT_ERROR(error_reporter, "Image decoded and processed");
|
|
||||||
return kTfLiteOk;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get an image from the camera module
|
|
||||||
TfLiteStatus GetImage(tflite::ErrorReporter* error_reporter, int image_width,
|
|
||||||
int image_height, int channels, int8_t* image_data) {
|
|
||||||
static bool g_is_camera_initialized = false;
|
|
||||||
if (!g_is_camera_initialized) {
|
|
||||||
TfLiteStatus init_status = InitCamera(error_reporter);
|
|
||||||
if (init_status != kTfLiteOk) {
|
|
||||||
TF_LITE_REPORT_ERROR(error_reporter, "InitCamera failed");
|
|
||||||
return init_status;
|
|
||||||
}
|
|
||||||
g_is_camera_initialized = true;
|
|
||||||
}
|
|
||||||
|
|
||||||
TfLiteStatus capture_status = PerformCapture(error_reporter);
|
|
||||||
if (capture_status != kTfLiteOk) {
|
|
||||||
TF_LITE_REPORT_ERROR(error_reporter, "PerformCapture failed");
|
|
||||||
return capture_status;
|
|
||||||
}
|
|
||||||
|
|
||||||
TfLiteStatus read_data_status = ReadData(error_reporter);
|
|
||||||
if (read_data_status != kTfLiteOk) {
|
|
||||||
TF_LITE_REPORT_ERROR(error_reporter, "ReadData failed");
|
|
||||||
return read_data_status;
|
|
||||||
}
|
|
||||||
|
|
||||||
TfLiteStatus decode_status = DecodeAndProcessImage(
|
|
||||||
error_reporter, image_width, image_height, image_data);
|
|
||||||
if (decode_status != kTfLiteOk) {
|
|
||||||
TF_LITE_REPORT_ERROR(error_reporter, "DecodeAndProcessImage failed");
|
|
||||||
return decode_status;
|
|
||||||
}
|
|
||||||
|
|
||||||
return kTfLiteOk;
|
|
||||||
}
|
|
@ -1,20 +0,0 @@
|
|||||||
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
==============================================================================*/
|
|
||||||
|
|
||||||
#include "tensorflow/lite/micro/examples/person_detection_experimental/main_functions.h"
|
|
||||||
|
|
||||||
// Arduino automatically calls the setup() and loop() functions in a sketch, so
|
|
||||||
// where other systems need their own main routine in this file, it can be left
|
|
||||||
// empty.
|
|
@ -1,25 +0,0 @@
|
|||||||
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
==============================================================================*/
|
|
||||||
|
|
||||||
#include "tensorflow/lite/micro/examples/person_detection_experimental/detection_responder.h"
|
|
||||||
|
|
||||||
// This dummy implementation writes person and no person scores to the error
|
|
||||||
// console. Real applications will want to take some custom action instead, and
|
|
||||||
// should implement their own versions of this function.
|
|
||||||
void RespondToDetection(tflite::ErrorReporter* error_reporter,
|
|
||||||
int8_t person_score, int8_t no_person_score) {
|
|
||||||
TF_LITE_REPORT_ERROR(error_reporter, "person score:%d no person score %d",
|
|
||||||
person_score, no_person_score);
|
|
||||||
}
|
|
@ -1,34 +0,0 @@
|
|||||||
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
==============================================================================*/
|
|
||||||
|
|
||||||
// Provides an interface to take an action based on the output from the person
|
|
||||||
// detection model.
|
|
||||||
|
|
||||||
#ifndef TENSORFLOW_LITE_MICRO_EXAMPLES_PERSON_DETECTION_EXPERIMENTAL_DETECTION_RESPONDER_H_
|
|
||||||
#define TENSORFLOW_LITE_MICRO_EXAMPLES_PERSON_DETECTION_EXPERIMENTAL_DETECTION_RESPONDER_H_
|
|
||||||
|
|
||||||
#include "tensorflow/lite/c/common.h"
|
|
||||||
#include "tensorflow/lite/micro/micro_error_reporter.h"
|
|
||||||
|
|
||||||
// Called every time the results of a person detection run are available. The
|
|
||||||
// `person_score` has the numerical confidence that the captured image contains
|
|
||||||
// a person, and `no_person_score` has the numerical confidence that the image
|
|
||||||
// does not contain a person. Typically if person_score > no person score, the
|
|
||||||
// image is considered to contain a person. This threshold may be adjusted for
|
|
||||||
// particular applications.
|
|
||||||
void RespondToDetection(tflite::ErrorReporter* error_reporter,
|
|
||||||
int8_t person_score, int8_t no_person_score);
|
|
||||||
|
|
||||||
#endif // TENSORFLOW_LITE_MICRO_EXAMPLES_PERSON_DETECTION_EXPERIMENTAL_DETECTION_RESPONDER_H_
|
|
@ -1,32 +0,0 @@
|
|||||||
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
==============================================================================*/
|
|
||||||
|
|
||||||
#include "tensorflow/lite/micro/examples/person_detection_experimental/detection_responder.h"
|
|
||||||
|
|
||||||
#include "tensorflow/lite/micro/testing/micro_test.h"
|
|
||||||
|
|
||||||
TF_LITE_MICRO_TESTS_BEGIN
|
|
||||||
|
|
||||||
TF_LITE_MICRO_TEST(TestCallability) {
|
|
||||||
tflite::MicroErrorReporter micro_error_reporter;
|
|
||||||
|
|
||||||
// This will have external side-effects (like printing to the debug console
|
|
||||||
// or lighting an LED) that are hard to observe, so the most we can do is
|
|
||||||
// make sure the call doesn't crash.
|
|
||||||
RespondToDetection(µ_error_reporter, -100, 100);
|
|
||||||
RespondToDetection(µ_error_reporter, 100, 50);
|
|
||||||
}
|
|
||||||
|
|
||||||
TF_LITE_MICRO_TESTS_END
|
|
@ -1,719 +0,0 @@
|
|||||||
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
==============================================================================*/
|
|
||||||
|
|
||||||
#include "HM01B0.h"
|
|
||||||
|
|
||||||
#include "HM01B0_Walking1s_01.h"
|
|
||||||
#include "am_bsp.h"
|
|
||||||
#include "am_mcu_apollo.h"
|
|
||||||
#include "am_util.h"
|
|
||||||
#include "platform_Sparkfun_Edge.h"
|
|
||||||
|
|
||||||
//#define ENABLE_ASYNC
|
|
||||||
|
|
||||||
const am_hal_gpio_pincfg_t g_HM01B0_pin_vsync = {
|
|
||||||
.uFuncSel = 3,
|
|
||||||
.eGPOutcfg = AM_HAL_GPIO_PIN_OUTCFG_DISABLE,
|
|
||||||
#ifdef ENABLE_ASYNC
|
|
||||||
.eIntDir = AM_HAL_GPIO_PIN_INTDIR_BOTH,
|
|
||||||
#endif
|
|
||||||
.eGPInput = AM_HAL_GPIO_PIN_INPUT_ENABLE,
|
|
||||||
.eGPRdZero = AM_HAL_GPIO_PIN_RDZERO_READPIN};
|
|
||||||
|
|
||||||
const am_hal_gpio_pincfg_t g_HM01B0_pin_int = {
|
|
||||||
.uFuncSel = 3,
|
|
||||||
.eGPOutcfg = AM_HAL_GPIO_PIN_OUTCFG_DISABLE,
|
|
||||||
.eIntDir = AM_HAL_GPIO_PIN_INTDIR_LO2HI,
|
|
||||||
.eGPInput = AM_HAL_GPIO_PIN_INPUT_ENABLE,
|
|
||||||
.eGPRdZero = AM_HAL_GPIO_PIN_RDZERO_READPIN};
|
|
||||||
|
|
||||||
#ifdef ENABLE_ASYNC
|
|
||||||
static bool s_bVsyncAsserted = false;
|
|
||||||
|
|
||||||
//*****************************************************************************
|
|
||||||
//
|
|
||||||
// GPIO ISR
|
|
||||||
//
|
|
||||||
//*****************************************************************************
|
|
||||||
static void hm01b0_gpio_isr(void) {
|
|
||||||
//
|
|
||||||
// Clear the GPIO Interrupt (write to clear).
|
|
||||||
//
|
|
||||||
am_hal_gpio_interrupt_clear(1 << HM01B0_PIN_VSYNC);
|
|
||||||
|
|
||||||
if (read_vsync()) {
|
|
||||||
s_bVsyncAsserted = true;
|
|
||||||
} else {
|
|
||||||
s_bVsyncAsserted = false;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
//*****************************************************************************
|
|
||||||
//
|
|
||||||
//! @brief Write HM01B0 registers
|
|
||||||
//!
|
|
||||||
//! @param psCfg - Pointer to HM01B0 configuration structure.
|
|
||||||
//! @param ui16Reg - Register address.
|
|
||||||
//! @param pui8Value - Pointer to the data to be written.
|
|
||||||
//! @param ui32NumBytes - Length of the data in bytes to be written.
|
|
||||||
//!
|
|
||||||
//! This function writes value to HM01B0 registers.
|
|
||||||
//!
|
|
||||||
//! @return Error code.
|
|
||||||
//
|
|
||||||
//*****************************************************************************
|
|
||||||
static uint32_t hm01b0_write_reg(hm01b0_cfg_t* psCfg, uint16_t ui16Reg,
|
|
||||||
uint8_t* pui8Value, uint32_t ui32NumBytes) {
|
|
||||||
am_hal_iom_transfer_t Transaction;
|
|
||||||
|
|
||||||
//
|
|
||||||
// Create the transaction.
|
|
||||||
//
|
|
||||||
Transaction.ui32InstrLen = sizeof(uint16_t);
|
|
||||||
Transaction.ui32Instr = (ui16Reg & 0x0000FFFF);
|
|
||||||
Transaction.eDirection = AM_HAL_IOM_TX;
|
|
||||||
Transaction.ui32NumBytes = ui32NumBytes;
|
|
||||||
Transaction.pui32TxBuffer = (uint32_t*)pui8Value;
|
|
||||||
Transaction.uPeerInfo.ui32I2CDevAddr = (uint32_t)psCfg->ui16SlvAddr;
|
|
||||||
Transaction.bContinue = false;
|
|
||||||
Transaction.ui8RepeatCount = 0;
|
|
||||||
Transaction.ui32PauseCondition = 0;
|
|
||||||
Transaction.ui32StatusSetClr = 0;
|
|
||||||
|
|
||||||
//
|
|
||||||
// Execute the transction over IOM.
|
|
||||||
//
|
|
||||||
if (am_hal_iom_blocking_transfer(psCfg->pIOMHandle, &Transaction)) {
|
|
||||||
return HM01B0_ERR_I2C;
|
|
||||||
}
|
|
||||||
|
|
||||||
return HM01B0_ERR_OK;
|
|
||||||
}
|
|
||||||
|
|
||||||
//*****************************************************************************
|
|
||||||
//
|
|
||||||
//! @brief Read HM01B0 registers
|
|
||||||
//!
|
|
||||||
//! @param psCfg - Pointer to HM01B0 configuration structure.
|
|
||||||
//! @param ui16Reg - Register address.
|
|
||||||
//! @param pui8Value - Pointer to the buffer for read data to be put
|
|
||||||
//! into.
|
|
||||||
//! @param ui32NumBytes - Length of the data to be read.
|
|
||||||
//!
|
|
||||||
//! This function reads value from HM01B0 registers.
|
|
||||||
//!
|
|
||||||
//! @return Error code.
|
|
||||||
//
|
|
||||||
//*****************************************************************************
|
|
||||||
static uint32_t hm01b0_read_reg(hm01b0_cfg_t* psCfg, uint16_t ui16Reg,
|
|
||||||
uint8_t* pui8Value, uint32_t ui32NumBytes) {
|
|
||||||
am_hal_iom_transfer_t Transaction;
|
|
||||||
|
|
||||||
//
|
|
||||||
// Create the transaction.
|
|
||||||
//
|
|
||||||
Transaction.ui32InstrLen = sizeof(uint16_t);
|
|
||||||
Transaction.ui32Instr = (ui16Reg & 0x0000FFFF);
|
|
||||||
Transaction.eDirection = AM_HAL_IOM_RX;
|
|
||||||
Transaction.ui32NumBytes = ui32NumBytes;
|
|
||||||
Transaction.pui32RxBuffer = (uint32_t*)pui8Value;
|
|
||||||
;
|
|
||||||
Transaction.uPeerInfo.ui32I2CDevAddr = (uint32_t)psCfg->ui16SlvAddr;
|
|
||||||
Transaction.bContinue = false;
|
|
||||||
Transaction.ui8RepeatCount = 0;
|
|
||||||
Transaction.ui32PauseCondition = 0;
|
|
||||||
Transaction.ui32StatusSetClr = 0;
|
|
||||||
|
|
||||||
//
|
|
||||||
// Execute the transction over IOM.
|
|
||||||
//
|
|
||||||
if (am_hal_iom_blocking_transfer(psCfg->pIOMHandle, &Transaction)) {
|
|
||||||
return HM01B0_ERR_I2C;
|
|
||||||
}
|
|
||||||
|
|
||||||
return HM01B0_ERR_OK;
|
|
||||||
}
|
|
||||||
|
|
||||||
//*****************************************************************************
|
|
||||||
//
|
|
||||||
//! @brief Load HM01B0 a given script
|
|
||||||
//!
|
|
||||||
//! @param psCfg - Pointer to HM01B0 configuration structure.
|
|
||||||
//! @param psScrip - Pointer to the script to be loaded.
|
|
||||||
//! @param ui32ScriptCmdNum - Number of entries in a given script.
|
|
||||||
//!
|
|
||||||
//! This function loads HM01B0 a given script.
|
|
||||||
//!
|
|
||||||
//! @return Error code.
|
|
||||||
//
|
|
||||||
//*****************************************************************************
|
|
||||||
static uint32_t hm01b0_load_script(hm01b0_cfg_t* psCfg, hm_script_t* psScript,
|
|
||||||
uint32_t ui32ScriptCmdNum) {
|
|
||||||
uint32_t ui32Err = HM01B0_ERR_OK;
|
|
||||||
for (uint32_t idx = 0; idx < ui32ScriptCmdNum; idx++) {
|
|
||||||
ui32Err = hm01b0_write_reg(psCfg, (psScript + idx)->ui16Reg,
|
|
||||||
&((psScript + idx)->ui8Val), sizeof(uint8_t));
|
|
||||||
if (ui32Err != HM01B0_ERR_OK) {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return ui32Err;
|
|
||||||
}
|
|
||||||
|
|
||||||
//*****************************************************************************
|
|
||||||
//
|
|
||||||
//! @brief Power up HM01B0
|
|
||||||
//!
|
|
||||||
//! @param psCfg - Pointer to HM01B0 configuration structure.
|
|
||||||
//!
|
|
||||||
//! This function powers up HM01B0.
|
|
||||||
//!
|
|
||||||
//! @return none.
|
|
||||||
//
|
|
||||||
//*****************************************************************************
|
|
||||||
void hm01b0_power_up(hm01b0_cfg_t* psCfg) {
|
|
||||||
// place holder
|
|
||||||
}
|
|
||||||
|
|
||||||
//*****************************************************************************
|
|
||||||
//
|
|
||||||
//! @brief Power down HM01B0
|
|
||||||
//!
|
|
||||||
//! @param psCfg - Pointer to HM01B0 configuration structure.
|
|
||||||
//!
|
|
||||||
//! This function powers up HM01B0.
|
|
||||||
//!
|
|
||||||
//! @return none.
|
|
||||||
//
|
|
||||||
//*****************************************************************************
|
|
||||||
void hm01b0_power_down(hm01b0_cfg_t* psCfg) {
|
|
||||||
// place holder
|
|
||||||
}
|
|
||||||
|
|
||||||
//*****************************************************************************
|
|
||||||
//
|
|
||||||
//! @brief Enable MCLK
|
|
||||||
//!
|
|
||||||
//! @param psCfg - Pointer to HM01B0 configuration structure.
|
|
||||||
//!
|
|
||||||
//! This function utilizes CTimer to generate MCLK for HM01B0.
|
|
||||||
//!
|
|
||||||
//! @return none.
|
|
||||||
//
|
|
||||||
//*****************************************************************************
|
|
||||||
void hm01b0_mclk_enable(hm01b0_cfg_t* psCfg) {
|
|
||||||
#define MCLK_UI64PATTERN 0x55555555
|
|
||||||
#define MCLK_UI64PATTERNLEN 31
|
|
||||||
|
|
||||||
am_hal_clkgen_control(AM_HAL_CLKGEN_CONTROL_SYSCLK_MAX, 0);
|
|
||||||
|
|
||||||
//
|
|
||||||
// Set up timer.
|
|
||||||
//
|
|
||||||
am_hal_ctimer_clear(psCfg->ui32CTimerModule, psCfg->ui32CTimerSegment);
|
|
||||||
|
|
||||||
am_hal_ctimer_config_single(
|
|
||||||
psCfg->ui32CTimerModule, psCfg->ui32CTimerSegment,
|
|
||||||
(AM_HAL_CTIMER_FN_PTN_REPEAT | AM_HAL_CTIMER_HFRC_12MHZ));
|
|
||||||
|
|
||||||
//
|
|
||||||
// Set the pattern in the CMPR registers.
|
|
||||||
//
|
|
||||||
am_hal_ctimer_compare_set(psCfg->ui32CTimerModule, psCfg->ui32CTimerSegment,
|
|
||||||
0, (uint32_t)(MCLK_UI64PATTERN & 0xFFFF));
|
|
||||||
am_hal_ctimer_compare_set(psCfg->ui32CTimerModule, psCfg->ui32CTimerSegment,
|
|
||||||
1, (uint32_t)((MCLK_UI64PATTERN >> 16) & 0xFFFF));
|
|
||||||
|
|
||||||
//
|
|
||||||
// Set the timer trigger and pattern length.
|
|
||||||
//
|
|
||||||
am_hal_ctimer_config_trigger(
|
|
||||||
psCfg->ui32CTimerModule, psCfg->ui32CTimerSegment,
|
|
||||||
((MCLK_UI64PATTERNLEN << CTIMER_AUX0_TMRA0LMT_Pos) |
|
|
||||||
(CTIMER_AUX0_TMRB0TRIG_DIS << CTIMER_AUX0_TMRA0TRIG_Pos)));
|
|
||||||
|
|
||||||
//
|
|
||||||
// Configure timer output pin.
|
|
||||||
//
|
|
||||||
am_hal_ctimer_output_config(psCfg->ui32CTimerModule, psCfg->ui32CTimerSegment,
|
|
||||||
psCfg->ui32CTimerOutputPin,
|
|
||||||
AM_HAL_CTIMER_OUTPUT_NORMAL,
|
|
||||||
AM_HAL_GPIO_PIN_DRIVESTRENGTH_12MA);
|
|
||||||
|
|
||||||
//
|
|
||||||
// Start the timer.
|
|
||||||
//
|
|
||||||
am_hal_ctimer_start(psCfg->ui32CTimerModule, psCfg->ui32CTimerSegment);
|
|
||||||
}
|
|
||||||
|
|
||||||
//*****************************************************************************
|
|
||||||
//
|
|
||||||
//! @brief Disable MCLK
|
|
||||||
//!
|
|
||||||
//! @param psCfg - Pointer to HM01B0 configuration structure.
|
|
||||||
//!
|
|
||||||
//! This function disable CTimer to stop MCLK for HM01B0.
|
|
||||||
//!
|
|
||||||
//! @return none.
|
|
||||||
//
|
|
||||||
//*****************************************************************************
|
|
||||||
void hm01b0_mclk_disable(hm01b0_cfg_t* psCfg) {
|
|
||||||
//
|
|
||||||
// Stop the timer.
|
|
||||||
//
|
|
||||||
am_hal_ctimer_stop(psCfg->ui32CTimerModule, psCfg->ui32CTimerSegment);
|
|
||||||
am_hal_gpio_pinconfig(psCfg->ui32CTimerOutputPin, g_AM_HAL_GPIO_DISABLE);
|
|
||||||
}
|
|
||||||
|
|
||||||
//*****************************************************************************
|
|
||||||
//
|
|
||||||
//! @brief Initialize interfaces
|
|
||||||
//!
|
|
||||||
//! @param psCfg - Pointer to HM01B0 configuration structure.
|
|
||||||
//!
|
|
||||||
//! This function initializes interfaces.
|
|
||||||
//!
|
|
||||||
//! @return Error code.
|
|
||||||
//
|
|
||||||
//*****************************************************************************
|
|
||||||
uint32_t hm01b0_init_if(hm01b0_cfg_t* psCfg) {
|
|
||||||
void* pIOMHandle = NULL;
|
|
||||||
|
|
||||||
if (psCfg->ui32IOMModule > AM_REG_IOM_NUM_MODULES) {
|
|
||||||
return HM01B0_ERR_I2C;
|
|
||||||
}
|
|
||||||
|
|
||||||
//
|
|
||||||
// Enable fault detection.
|
|
||||||
//
|
|
||||||
#if AM_APOLLO3_MCUCTRL
|
|
||||||
am_hal_mcuctrl_control(AM_HAL_MCUCTRL_CONTROL_FAULT_CAPTURE_ENABLE, 0);
|
|
||||||
#else // AM_APOLLO3_MCUCTRL
|
|
||||||
am_hal_mcuctrl_fault_capture_enable();
|
|
||||||
#endif // AM_APOLLO3_MCUCTRL
|
|
||||||
|
|
||||||
//
|
|
||||||
// Initialize the IOM instance.
|
|
||||||
// Enable power to the IOM instance.
|
|
||||||
// Configure the IOM for Serial operation during initialization.
|
|
||||||
// Enable the IOM.
|
|
||||||
//
|
|
||||||
if (am_hal_iom_initialize(psCfg->ui32IOMModule, &pIOMHandle) ||
|
|
||||||
am_hal_iom_power_ctrl(pIOMHandle, AM_HAL_SYSCTRL_WAKE, false) ||
|
|
||||||
am_hal_iom_configure(pIOMHandle, &(psCfg->sIOMCfg)) ||
|
|
||||||
am_hal_iom_enable(pIOMHandle)) {
|
|
||||||
return HM01B0_ERR_I2C;
|
|
||||||
} else {
|
|
||||||
//
|
|
||||||
// Configure the IOM pins.
|
|
||||||
//
|
|
||||||
am_bsp_iom_pins_enable(psCfg->ui32IOMModule, psCfg->eIOMMode);
|
|
||||||
|
|
||||||
psCfg->pIOMHandle = pIOMHandle;
|
|
||||||
}
|
|
||||||
|
|
||||||
// initialize pins for camera parallel interface.
|
|
||||||
am_hal_gpio_fastgpio_disable(psCfg->ui8PinD0);
|
|
||||||
am_hal_gpio_fastgpio_disable(psCfg->ui8PinD1);
|
|
||||||
am_hal_gpio_fastgpio_disable(psCfg->ui8PinD2);
|
|
||||||
am_hal_gpio_fastgpio_disable(psCfg->ui8PinD3);
|
|
||||||
am_hal_gpio_fastgpio_disable(psCfg->ui8PinD4);
|
|
||||||
am_hal_gpio_fastgpio_disable(psCfg->ui8PinD5);
|
|
||||||
am_hal_gpio_fastgpio_disable(psCfg->ui8PinD6);
|
|
||||||
am_hal_gpio_fastgpio_disable(psCfg->ui8PinD7);
|
|
||||||
|
|
||||||
am_hal_gpio_fastgpio_clr(psCfg->ui8PinD0);
|
|
||||||
am_hal_gpio_fastgpio_clr(psCfg->ui8PinD1);
|
|
||||||
am_hal_gpio_fastgpio_clr(psCfg->ui8PinD2);
|
|
||||||
am_hal_gpio_fastgpio_clr(psCfg->ui8PinD3);
|
|
||||||
am_hal_gpio_fastgpio_clr(psCfg->ui8PinD4);
|
|
||||||
am_hal_gpio_fastgpio_clr(psCfg->ui8PinD5);
|
|
||||||
am_hal_gpio_fastgpio_clr(psCfg->ui8PinD6);
|
|
||||||
am_hal_gpio_fastgpio_clr(psCfg->ui8PinD7);
|
|
||||||
|
|
||||||
am_hal_gpio_fast_pinconfig(
|
|
||||||
(uint64_t)0x1 << psCfg->ui8PinD0 | (uint64_t)0x1 << psCfg->ui8PinD1 |
|
|
||||||
(uint64_t)0x1 << psCfg->ui8PinD2 | (uint64_t)0x1 << psCfg->ui8PinD3 |
|
|
||||||
(uint64_t)0x1 << psCfg->ui8PinD4 | (uint64_t)0x1 << psCfg->ui8PinD5 |
|
|
||||||
(uint64_t)0x1 << psCfg->ui8PinD6 | (uint64_t)0x1 << psCfg->ui8PinD7,
|
|
||||||
g_AM_HAL_GPIO_INPUT, 0);
|
|
||||||
|
|
||||||
am_hal_gpio_pinconfig(psCfg->ui8PinVSYNC, g_HM01B0_pin_vsync);
|
|
||||||
#ifdef ENABLE_ASYNC
|
|
||||||
psCfg->pfnGpioIsr = hm01b0_gpio_isr;
|
|
||||||
am_hal_gpio_interrupt_clear(AM_HAL_GPIO_BIT(psCfg->ui8PinVSYNC));
|
|
||||||
am_hal_gpio_interrupt_enable(AM_HAL_GPIO_BIT(psCfg->ui8PinVSYNC));
|
|
||||||
NVIC_EnableIRQ(GPIO_IRQn);
|
|
||||||
#endif
|
|
||||||
am_hal_gpio_pinconfig(psCfg->ui8PinHSYNC, g_AM_HAL_GPIO_INPUT);
|
|
||||||
am_hal_gpio_pinconfig(psCfg->ui8PinPCLK, g_AM_HAL_GPIO_INPUT);
|
|
||||||
|
|
||||||
am_hal_gpio_pinconfig(psCfg->ui8PinTrig, g_AM_HAL_GPIO_OUTPUT);
|
|
||||||
|
|
||||||
am_hal_gpio_pinconfig(psCfg->ui8PinInt, g_AM_HAL_GPIO_DISABLE);
|
|
||||||
// am_hal_gpio_pinconfig(psCfg->ui8PinInt, g_HM01B0_pin_int);
|
|
||||||
// am_hal_gpio_interrupt_clear(AM_HAL_GPIO_BIT(psCfg->ui8PinInt));
|
|
||||||
// am_hal_gpio_interrupt_enable(AM_HAL_GPIO_BIT(psCfg->ui8PinInt));
|
|
||||||
// NVIC_EnableIRQ(GPIO_IRQn);
|
|
||||||
|
|
||||||
return HM01B0_ERR_OK;
|
|
||||||
}
|
|
||||||
|
|
||||||
//*****************************************************************************
|
|
||||||
//
|
|
||||||
//! @brief Deinitialize interfaces
|
|
||||||
//!
|
|
||||||
//! @param psCfg - Pointer to HM01B0 configuration structure.
|
|
||||||
//!
|
|
||||||
//! This function deinitializes interfaces.
|
|
||||||
//!
|
|
||||||
//! @return Error code.
|
|
||||||
//
|
|
||||||
//*****************************************************************************
|
|
||||||
uint32_t hm01b0_deinit_if(hm01b0_cfg_t* psCfg) {
|
|
||||||
am_hal_iom_disable(psCfg->pIOMHandle);
|
|
||||||
am_hal_iom_uninitialize(psCfg->pIOMHandle);
|
|
||||||
|
|
||||||
am_hal_gpio_pinconfig(psCfg->ui8PinSCL, g_AM_HAL_GPIO_DISABLE);
|
|
||||||
am_hal_gpio_pinconfig(psCfg->ui8PinSDA, g_AM_HAL_GPIO_DISABLE);
|
|
||||||
|
|
||||||
// initialize pins for camera parallel interface.
|
|
||||||
am_hal_gpio_fastgpio_disable(psCfg->ui8PinD0);
|
|
||||||
am_hal_gpio_fastgpio_disable(psCfg->ui8PinD1);
|
|
||||||
am_hal_gpio_fastgpio_disable(psCfg->ui8PinD2);
|
|
||||||
am_hal_gpio_fastgpio_disable(psCfg->ui8PinD3);
|
|
||||||
am_hal_gpio_fastgpio_disable(psCfg->ui8PinD4);
|
|
||||||
am_hal_gpio_fastgpio_disable(psCfg->ui8PinD5);
|
|
||||||
am_hal_gpio_fastgpio_disable(psCfg->ui8PinD6);
|
|
||||||
am_hal_gpio_fastgpio_disable(psCfg->ui8PinD7);
|
|
||||||
|
|
||||||
am_hal_gpio_fastgpio_clr(psCfg->ui8PinD0);
|
|
||||||
am_hal_gpio_fastgpio_clr(psCfg->ui8PinD1);
|
|
||||||
am_hal_gpio_fastgpio_clr(psCfg->ui8PinD2);
|
|
||||||
am_hal_gpio_fastgpio_clr(psCfg->ui8PinD3);
|
|
||||||
am_hal_gpio_fastgpio_clr(psCfg->ui8PinD4);
|
|
||||||
am_hal_gpio_fastgpio_clr(psCfg->ui8PinD5);
|
|
||||||
am_hal_gpio_fastgpio_clr(psCfg->ui8PinD6);
|
|
||||||
am_hal_gpio_fastgpio_clr(psCfg->ui8PinD7);
|
|
||||||
|
|
||||||
am_hal_gpio_pinconfig(psCfg->ui8PinVSYNC, g_AM_HAL_GPIO_DISABLE);
|
|
||||||
#ifdef ENABLE_ASYNC
|
|
||||||
NVIC_DisableIRQ(GPIO_IRQn);
|
|
||||||
am_hal_gpio_interrupt_disable(AM_HAL_GPIO_BIT(psCfg->ui8PinVSYNC));
|
|
||||||
am_hal_gpio_interrupt_clear(AM_HAL_GPIO_BIT(psCfg->ui8PinVSYNC));
|
|
||||||
psCfg->pfnGpioIsr = NULL;
|
|
||||||
#endif
|
|
||||||
am_hal_gpio_pinconfig(psCfg->ui8PinHSYNC, g_AM_HAL_GPIO_DISABLE);
|
|
||||||
am_hal_gpio_pinconfig(psCfg->ui8PinPCLK, g_AM_HAL_GPIO_DISABLE);
|
|
||||||
|
|
||||||
am_hal_gpio_pinconfig(psCfg->ui8PinTrig, g_AM_HAL_GPIO_DISABLE);
|
|
||||||
am_hal_gpio_pinconfig(psCfg->ui8PinInt, g_AM_HAL_GPIO_DISABLE);
|
|
||||||
|
|
||||||
return HM01B0_ERR_OK;
|
|
||||||
}
|
|
||||||
|
|
||||||
//*****************************************************************************
|
|
||||||
//
|
|
||||||
//! @brief Get HM01B0 Model ID
|
|
||||||
//!
|
|
||||||
//! @param psCfg - Pointer to HM01B0 configuration structure.
|
|
||||||
//! @param pui16MID - Pointer to buffer for the read back model ID.
|
|
||||||
//!
|
|
||||||
//! This function reads back HM01B0 model ID.
|
|
||||||
//!
|
|
||||||
//! @return Error code.
|
|
||||||
//
|
|
||||||
//*****************************************************************************
|
|
||||||
uint32_t hm01b0_get_modelid(hm01b0_cfg_t* psCfg, uint16_t* pui16MID) {
|
|
||||||
uint8_t ui8Data[1];
|
|
||||||
uint32_t ui32Err;
|
|
||||||
|
|
||||||
*pui16MID = 0x0000;
|
|
||||||
|
|
||||||
ui32Err =
|
|
||||||
hm01b0_read_reg(psCfg, HM01B0_REG_MODEL_ID_H, ui8Data, sizeof(ui8Data));
|
|
||||||
if (ui32Err == HM01B0_ERR_OK) {
|
|
||||||
*pui16MID |= (ui8Data[0] << 8);
|
|
||||||
}
|
|
||||||
|
|
||||||
ui32Err =
|
|
||||||
hm01b0_read_reg(psCfg, HM01B0_REG_MODEL_ID_L, ui8Data, sizeof(ui8Data));
|
|
||||||
if (ui32Err == HM01B0_ERR_OK) {
|
|
||||||
*pui16MID |= ui8Data[0];
|
|
||||||
}
|
|
||||||
|
|
||||||
return ui32Err;
|
|
||||||
}
|
|
||||||
|
|
||||||
//*****************************************************************************
|
|
||||||
//
|
|
||||||
//! @brief Initialize HM01B0
|
|
||||||
//!
|
|
||||||
//! @param psCfg - Pointer to HM01B0 configuration structure.
|
|
||||||
//! @param psScript - Pointer to HM01B0 initialization script.
|
|
||||||
//! @param ui32ScriptCmdNum - No. of commands in HM01B0 initialization
|
|
||||||
//! script.
|
|
||||||
//!
|
|
||||||
//! This function initilizes HM01B0 with a given script.
|
|
||||||
//!
|
|
||||||
//! @return Error code.
|
|
||||||
//
|
|
||||||
//*****************************************************************************
|
|
||||||
uint32_t hm01b0_init_system(hm01b0_cfg_t* psCfg, hm_script_t* psScript,
|
|
||||||
uint32_t ui32ScriptCmdNum) {
|
|
||||||
return hm01b0_load_script(psCfg, psScript, ui32ScriptCmdNum);
|
|
||||||
}
|
|
||||||
|
|
||||||
//*****************************************************************************
|
|
||||||
//
|
|
||||||
//! @brief Set HM01B0 in the walking 1s test mode
|
|
||||||
//!
|
|
||||||
//! @param psCfg - Pointer to HM01B0 configuration structure.
|
|
||||||
//!
|
|
||||||
//! This function sets HM01B0 in the walking 1s test mode.
|
|
||||||
//!
|
|
||||||
//! @return Error code.
|
|
||||||
//
|
|
||||||
//*****************************************************************************
|
|
||||||
uint32_t hm01b0_test_walking1s(hm01b0_cfg_t* psCfg) {
|
|
||||||
uint32_t ui32ScriptCmdNum =
|
|
||||||
sizeof(sHM01b0TestModeScript_Walking1s) / sizeof(hm_script_t);
|
|
||||||
hm_script_t* psScript = (hm_script_t*)sHM01b0TestModeScript_Walking1s;
|
|
||||||
|
|
||||||
return hm01b0_load_script(psCfg, psScript, ui32ScriptCmdNum);
|
|
||||||
}
|
|
||||||
|
|
||||||
//*****************************************************************************
|
|
||||||
//
|
|
||||||
//! @brief Software reset HM01B0
|
|
||||||
//!
|
|
||||||
//! @param psCfg - Pointer to HM01B0 configuration structure.
|
|
||||||
//!
|
|
||||||
//! This function resets HM01B0 by issuing a reset command.
|
|
||||||
//!
|
|
||||||
//! @return Error code.
|
|
||||||
//
|
|
||||||
//*****************************************************************************
|
|
||||||
uint32_t hm01b0_reset_sw(hm01b0_cfg_t* psCfg) {
|
|
||||||
uint8_t ui8Data[1] = {0x00};
|
|
||||||
return hm01b0_write_reg(psCfg, HM01B0_REG_SW_RESET, ui8Data, sizeof(ui8Data));
|
|
||||||
}
|
|
||||||
|
|
||||||
//*****************************************************************************
|
|
||||||
//
|
|
||||||
//! @brief Get current HM01B0 operation mode.
|
|
||||||
//!
|
|
||||||
//! @param psCfg - Pointer to HM01B0 configuration structure.
|
|
||||||
//! @param pui8Mode - Pointer to buffer
|
|
||||||
//! - for the read back operation mode to be put into
|
|
||||||
//!
|
|
||||||
//! This function get HM01B0 operation mode.
|
|
||||||
//!
|
|
||||||
//! @return Error code.
|
|
||||||
//
|
|
||||||
//*****************************************************************************
|
|
||||||
uint32_t hm01b0_get_mode(hm01b0_cfg_t* psCfg, uint8_t* pui8Mode) {
|
|
||||||
uint8_t ui8Data[1] = {0x01};
|
|
||||||
uint32_t ui32Err;
|
|
||||||
|
|
||||||
ui32Err =
|
|
||||||
hm01b0_read_reg(psCfg, HM01B0_REG_MODE_SELECT, ui8Data, sizeof(ui8Data));
|
|
||||||
|
|
||||||
*pui8Mode = ui8Data[0];
|
|
||||||
|
|
||||||
return ui32Err;
|
|
||||||
}
|
|
||||||
|
|
||||||
//*****************************************************************************
|
|
||||||
//
|
|
||||||
//! @brief Set HM01B0 operation mode.
|
|
||||||
//!
|
|
||||||
//! @param psCfg - Pointer to HM01B0 configuration structure.
|
|
||||||
//! @param ui8Mode - Operation mode. One of:
|
|
||||||
//! HM01B0_REG_MODE_SELECT_STANDBY
|
|
||||||
//! HM01B0_REG_MODE_SELECT_STREAMING
|
|
||||||
//! HM01B0_REG_MODE_SELECT_STREAMING_NFRAMES
|
|
||||||
//! HM01B0_REG_MODE_SELECT_STREAMING_HW_TRIGGER
|
|
||||||
//! @param ui8FrameCnt - Frame count for
|
|
||||||
//! HM01B0_REG_MODE_SELECT_STREAMING_NFRAMES.
|
|
||||||
//! - Discarded if other modes.
|
|
||||||
//!
|
|
||||||
//! This function set HM01B0 operation mode.
|
|
||||||
//!
|
|
||||||
//! @return Error code.
|
|
||||||
//
|
|
||||||
//*****************************************************************************
|
|
||||||
uint32_t hm01b0_set_mode(hm01b0_cfg_t* psCfg, uint8_t ui8Mode,
|
|
||||||
uint8_t ui8FrameCnt) {
|
|
||||||
uint32_t ui32Err = HM01B0_ERR_OK;
|
|
||||||
|
|
||||||
if (ui8Mode == HM01B0_REG_MODE_SELECT_STREAMING_NFRAMES) {
|
|
||||||
ui32Err = hm01b0_write_reg(psCfg, HM01B0_REG_PMU_PROGRAMMABLE_FRAMECNT,
|
|
||||||
&ui8FrameCnt, sizeof(ui8FrameCnt));
|
|
||||||
}
|
|
||||||
|
|
||||||
if (ui32Err == HM01B0_ERR_OK) {
|
|
||||||
ui32Err = hm01b0_write_reg(psCfg, HM01B0_REG_MODE_SELECT, &ui8Mode,
|
|
||||||
sizeof(ui8Mode));
|
|
||||||
}
|
|
||||||
|
|
||||||
return ui32Err;
|
|
||||||
}
|
|
||||||
|
|
||||||
//*****************************************************************************
|
|
||||||
//
|
|
||||||
//! @brief Hardware trigger HM01B0 to stream.
|
|
||||||
//!
|
|
||||||
//! @param psCfg - Pointer to HM01B0 configuration structure.
|
|
||||||
//! @param bTrigger - True to start streaming
|
|
||||||
//! - False to stop streaming
|
|
||||||
//!
|
|
||||||
//! This function triggers HM01B0 to stream by toggling the TRIG pin.
|
|
||||||
//!
|
|
||||||
//! @return Error code.
|
|
||||||
//
|
|
||||||
//*****************************************************************************
|
|
||||||
uint32_t hm01b0_hardware_trigger_streaming(hm01b0_cfg_t* psCfg, bool bTrigger) {
|
|
||||||
uint32_t ui32Err = HM01B0_ERR_OK;
|
|
||||||
uint8_t ui8Mode;
|
|
||||||
|
|
||||||
ui32Err = hm01b0_get_mode(psCfg, &ui8Mode);
|
|
||||||
|
|
||||||
if (ui32Err != HM01B0_ERR_OK) goto end;
|
|
||||||
|
|
||||||
if (ui8Mode != HM01B0_REG_MODE_SELECT_STREAMING_HW_TRIGGER) {
|
|
||||||
ui32Err = HM01B0_ERR_MODE;
|
|
||||||
goto end;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (bTrigger) {
|
|
||||||
am_hal_gpio_output_set(psCfg->ui8PinTrig);
|
|
||||||
} else {
|
|
||||||
am_hal_gpio_output_clear(psCfg->ui8PinTrig);
|
|
||||||
}
|
|
||||||
|
|
||||||
end:
|
|
||||||
return ui32Err;
|
|
||||||
}
|
|
||||||
|
|
||||||
//*****************************************************************************
|
|
||||||
//
|
|
||||||
//! @brief Set HM01B0 mirror mode.
|
|
||||||
//!
|
|
||||||
//! @param psCfg - Pointer to HM01B0 configuration structure.
|
|
||||||
//! @param bHmirror - Horizontal mirror
|
|
||||||
//! @param bVmirror - Vertical mirror
|
|
||||||
//!
|
|
||||||
//! This function set HM01B0 mirror mode.
|
|
||||||
//!
|
|
||||||
//! @return Error code.
|
|
||||||
//
|
|
||||||
//*****************************************************************************
|
|
||||||
uint32_t hm01b0_set_mirror(hm01b0_cfg_t* psCfg, bool bHmirror, bool bVmirror) {
|
|
||||||
uint8_t ui8Data = 0x00;
|
|
||||||
uint32_t ui32Err = HM01B0_ERR_OK;
|
|
||||||
|
|
||||||
if (bHmirror) {
|
|
||||||
ui8Data |= HM01B0_REG_IMAGE_ORIENTATION_HMIRROR;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (bVmirror) {
|
|
||||||
ui8Data |= HM01B0_REG_IMAGE_ORIENTATION_VMIRROR;
|
|
||||||
}
|
|
||||||
|
|
||||||
ui32Err = hm01b0_write_reg(psCfg, HM01B0_REG_IMAGE_ORIENTATION, &ui8Data,
|
|
||||||
sizeof(ui8Data));
|
|
||||||
|
|
||||||
if (ui32Err == HM01B0_ERR_OK) {
|
|
||||||
ui8Data = HM01B0_REG_GRP_PARAM_HOLD_HOLD;
|
|
||||||
ui32Err = hm01b0_write_reg(psCfg, HM01B0_REG_GRP_PARAM_HOLD, &ui8Data,
|
|
||||||
sizeof(ui8Data));
|
|
||||||
}
|
|
||||||
|
|
||||||
return ui32Err;
|
|
||||||
}
|
|
||||||
|
|
||||||
//*****************************************************************************
|
|
||||||
//
|
|
||||||
//! @brief Read data of one frame from HM01B0.
|
|
||||||
//!
|
|
||||||
//! @param psCfg - Pointer to HM01B0 configuration structure.
|
|
||||||
//! @param pui8Buffer - Pointer to the frame buffer.
|
|
||||||
//! @param ui32BufferLen - Framebuffer size.
|
|
||||||
//!
|
|
||||||
//! This function read data of one frame from HM01B0.
|
|
||||||
//!
|
|
||||||
//! @return Error code.
|
|
||||||
//
|
|
||||||
//*****************************************************************************
|
|
||||||
uint32_t hm01b0_blocking_read_oneframe(hm01b0_cfg_t* psCfg, uint8_t* pui8Buffer,
|
|
||||||
uint32_t ui32BufferLen) {
|
|
||||||
uint32_t ui32Err = HM01B0_ERR_OK;
|
|
||||||
uint32_t ui32Idx = 0x00;
|
|
||||||
|
|
||||||
am_util_stdio_printf("[%s] +\n", __func__);
|
|
||||||
#ifdef ENABLE_ASYNC
|
|
||||||
while (!s_bVsyncAsserted);
|
|
||||||
|
|
||||||
while (s_bVsyncAsserted) {
|
|
||||||
// we don't check HSYNC here on the basis of assuming HM01B0 in the gated
|
|
||||||
// PCLK mode which PCLK toggles only when HSYNC is asserted. And also to
|
|
||||||
// minimize the overhead of polling.
|
|
||||||
|
|
||||||
if (read_pclk()) {
|
|
||||||
*(pui8Buffer + ui32Idx++) = read_byte();
|
|
||||||
|
|
||||||
if (ui32Idx == ui32BufferLen) {
|
|
||||||
goto end;
|
|
||||||
}
|
|
||||||
|
|
||||||
while (read_pclk());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
#else
|
|
||||||
uint32_t ui32HsyncCnt = 0x00;
|
|
||||||
|
|
||||||
while ((ui32HsyncCnt < HM01B0_PIXEL_Y_NUM)) {
|
|
||||||
while (0x00 == read_hsync());
|
|
||||||
|
|
||||||
// read one row
|
|
||||||
while (read_hsync()) {
|
|
||||||
while (0x00 == read_pclk());
|
|
||||||
|
|
||||||
*(pui8Buffer + ui32Idx++) = read_byte();
|
|
||||||
|
|
||||||
if (ui32Idx == ui32BufferLen) {
|
|
||||||
goto end;
|
|
||||||
}
|
|
||||||
|
|
||||||
while (read_pclk());
|
|
||||||
}
|
|
||||||
|
|
||||||
ui32HsyncCnt++;
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
end:
|
|
||||||
am_util_stdio_printf("[%s] - Byte Counts %d\n", __func__, ui32Idx);
|
|
||||||
return ui32Err;
|
|
||||||
}
|
|
||||||
|
|
||||||
uint32_t hm01b0_single_frame_capture(hm01b0_cfg_t* psCfg) {
|
|
||||||
hm01b0_write_reg(psCfg, HM01B0_REG_PMU_PROGRAMMABLE_FRAMECNT, 0x01, 1);
|
|
||||||
hm01b0_write_reg(psCfg, HM01B0_REG_MODE_SELECT,
|
|
||||||
HM01B0_REG_MODE_SELECT_STREAMING_NFRAMES, 1);
|
|
||||||
hm01b0_write_reg(psCfg, HM01B0_REG_GRP_PARAM_HOLD, 0x01, 1);
|
|
||||||
}
|
|
@ -1,402 +0,0 @@
|
|||||||
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
==============================================================================*/
|
|
||||||
|
|
||||||
#ifndef TENSORFLOW_LITE_MICRO_EXAMPLES_PERSON_DETECTION_EXPERIMENTAL_HIMAX_DRIVER_HM01B0_H_
|
|
||||||
#define TENSORFLOW_LITE_MICRO_EXAMPLES_PERSON_DETECTION_EXPERIMENTAL_HIMAX_DRIVER_HM01B0_H_
|
|
||||||
|
|
||||||
#ifdef __cplusplus
|
|
||||||
extern "C" {
|
|
||||||
#endif
|
|
||||||
#include "am_bsp.h" // NOLINT
|
|
||||||
#include "am_mcu_apollo.h" // NOLINT
|
|
||||||
#include "am_util.h" // NOLINT
|
|
||||||
|
|
||||||
#define HM01B0_DRV_VERSION (0)
|
|
||||||
#define HM01B0_DRV_SUBVERSION (3)
|
|
||||||
|
|
||||||
#define HM01B0_DEFAULT_ADDRESS (0x24)
|
|
||||||
|
|
||||||
#define HM01B0_PIXEL_X_NUM (324)
|
|
||||||
#define HM01B0_PIXEL_Y_NUM (244)
|
|
||||||
|
|
||||||
#define HM01B0_REG_MODEL_ID_H (0x0000)
|
|
||||||
#define HM01B0_REG_MODEL_ID_L (0x0001)
|
|
||||||
#define HM01B0_REG_SILICON_REV (0x0002)
|
|
||||||
#define HM01B0_REG_FRAME_COUNT (0x0005)
|
|
||||||
#define HM01B0_REG_PIXEL_ORDER (0x0006)
|
|
||||||
|
|
||||||
#define HM01B0_REG_MODE_SELECT (0x0100)
|
|
||||||
#define HM01B0_REG_IMAGE_ORIENTATION (0x0101)
|
|
||||||
#define HM01B0_REG_SW_RESET (0x0103)
|
|
||||||
#define HM01B0_REG_GRP_PARAM_HOLD (0x0104)
|
|
||||||
|
|
||||||
#define HM01B0_REG_I2C_ID_SEL (0x3400)
|
|
||||||
#define HM01B0_REG_I2C_ID_REG (0x3401)
|
|
||||||
|
|
||||||
#define HM01B0_REG_PMU_PROGRAMMABLE_FRAMECNT (0x3020)
|
|
||||||
|
|
||||||
// #define HM01B0_REG_MODE_SELECT (0x0100)
|
|
||||||
#define HM01B0_REG_MODE_SELECT_STANDBY (0x00)
|
|
||||||
#define HM01B0_REG_MODE_SELECT_STREAMING (0x01)
|
|
||||||
#define HM01B0_REG_MODE_SELECT_STREAMING_NFRAMES (0x03)
|
|
||||||
#define HM01B0_REG_MODE_SELECT_STREAMING_HW_TRIGGER (0x05)
|
|
||||||
|
|
||||||
// #define HM01B0_REG_IMAGE_ORIENTATION (0x0101)
|
|
||||||
#define HM01B0_REG_IMAGE_ORIENTATION_DEFAULT (0x00)
|
|
||||||
#define HM01B0_REG_IMAGE_ORIENTATION_HMIRROR (0x01)
|
|
||||||
#define HM01B0_REG_IMAGE_ORIENTATION_VMIRROR (0x02)
|
|
||||||
#define HM01B0_REG_IMAGE_ORIENTATION_HVMIRROR \
|
|
||||||
(HM01B0_REG_IMAGE_ORIENTATION_HMIRROR | HM01B0_REG_IMAGE_ORIENTATION_HVMIRROR)
|
|
||||||
|
|
||||||
// #define HM01B0_REG_GRP_PARAM_HOLD (0x0104)
|
|
||||||
#define HM01B0_REG_GRP_PARAM_HOLD_CONSUME (0x00)
|
|
||||||
#define HM01B0_REG_GRP_PARAM_HOLD_HOLD (0x01)
|
|
||||||
|
|
||||||
// Helpers for reading raw values from the camera.
|
|
||||||
#define read_vsync() \
|
|
||||||
(AM_REGVAL(AM_REGADDR(GPIO, RDA)) & (1 << HM01B0_PIN_VSYNC))
|
|
||||||
#define read_hsync() \
|
|
||||||
(AM_REGVAL(AM_REGADDR(GPIO, RDA)) & (1 << HM01B0_PIN_HSYNC))
|
|
||||||
#define read_pclk() (AM_REGVAL(AM_REGADDR(GPIO, RDA)) & (1 << HM01B0_PIN_PCLK))
|
|
||||||
#define read_byte() (APBDMA->BBINPUT)
|
|
||||||
|
|
||||||
enum {
|
|
||||||
HM01B0_ERR_OK = 0x00,
|
|
||||||
HM01B0_ERR_I2C,
|
|
||||||
HM01B0_ERR_MODE,
|
|
||||||
};
|
|
||||||
|
|
||||||
typedef struct {
|
|
||||||
uint16_t ui16Reg;
|
|
||||||
uint8_t ui8Val;
|
|
||||||
} hm_script_t;
|
|
||||||
|
|
||||||
typedef struct {
|
|
||||||
uint16_t ui16SlvAddr;
|
|
||||||
am_hal_iom_mode_e eIOMMode;
|
|
||||||
uint32_t ui32IOMModule;
|
|
||||||
am_hal_iom_config_t sIOMCfg;
|
|
||||||
void *pIOMHandle;
|
|
||||||
|
|
||||||
uint32_t ui32CTimerModule;
|
|
||||||
uint32_t ui32CTimerSegment;
|
|
||||||
uint32_t ui32CTimerOutputPin;
|
|
||||||
|
|
||||||
uint8_t ui8PinSCL;
|
|
||||||
uint8_t ui8PinSDA;
|
|
||||||
uint8_t ui8PinD0;
|
|
||||||
uint8_t ui8PinD1;
|
|
||||||
uint8_t ui8PinD2;
|
|
||||||
uint8_t ui8PinD3;
|
|
||||||
uint8_t ui8PinD4;
|
|
||||||
uint8_t ui8PinD5;
|
|
||||||
uint8_t ui8PinD6;
|
|
||||||
uint8_t ui8PinD7;
|
|
||||||
uint8_t ui8PinVSYNC;
|
|
||||||
uint8_t ui8PinHSYNC;
|
|
||||||
uint8_t ui8PinPCLK;
|
|
||||||
|
|
||||||
uint8_t ui8PinTrig;
|
|
||||||
uint8_t ui8PinInt;
|
|
||||||
void (*pfnGpioIsr)(void);
|
|
||||||
} hm01b0_cfg_t;
|
|
||||||
|
|
||||||
//*****************************************************************************
|
|
||||||
//
|
|
||||||
//! @brief Write HM01B0 registers
|
|
||||||
//!
|
|
||||||
//! @param psCfg - Pointer to HM01B0 configuration structure.
|
|
||||||
//! @param ui16Reg - Register address.
|
|
||||||
//! @param pui8Value - Pointer to the data to be written.
|
|
||||||
//! @param ui32NumBytes - Length of the data in bytes to be written.
|
|
||||||
//!
|
|
||||||
//! This function writes value to HM01B0 registers.
|
|
||||||
//!
|
|
||||||
//! @return Error code.
|
|
||||||
//
|
|
||||||
//*****************************************************************************
|
|
||||||
static uint32_t hm01b0_write_reg(hm01b0_cfg_t *psCfg, uint16_t ui16Reg,
|
|
||||||
uint8_t *pui8Value, uint32_t ui32NumBytes);
|
|
||||||
|
|
||||||
//*****************************************************************************
|
|
||||||
//
|
|
||||||
//! @brief Read HM01B0 registers
|
|
||||||
//!
|
|
||||||
//! @param psCfg - Pointer to HM01B0 configuration structure.
|
|
||||||
//! @param ui16Reg - Register address.
|
|
||||||
//! @param pui8Value - Pointer to the buffer for read data to be put
|
|
||||||
//! into.
|
|
||||||
//! @param ui32NumBytes - Length of the data to be read.
|
|
||||||
//!
|
|
||||||
//! This function reads value from HM01B0 registers.
|
|
||||||
//!
|
|
||||||
//! @return Error code.
|
|
||||||
//
|
|
||||||
//*****************************************************************************
|
|
||||||
static uint32_t hm01b0_read_reg(hm01b0_cfg_t *psCfg, uint16_t ui16Reg,
|
|
||||||
uint8_t *pui8Value, uint32_t ui32NumBytes);
|
|
||||||
|
|
||||||
//*****************************************************************************
|
|
||||||
//
|
|
||||||
//! @brief Load HM01B0 a given script
|
|
||||||
//!
|
|
||||||
//! @param psCfg - Pointer to HM01B0 configuration structure.
|
|
||||||
//! @param psScrip - Pointer to the script to be loaded.
|
|
||||||
//! @param ui32ScriptCmdNum - Number of entries in a given script.
|
|
||||||
//!
|
|
||||||
//! This function loads HM01B0 a given script.
|
|
||||||
//!
|
|
||||||
//! @return Error code.
|
|
||||||
//
|
|
||||||
//*****************************************************************************
|
|
||||||
static uint32_t hm01b0_load_script(hm01b0_cfg_t *psCfg, hm_script_t *psScript,
|
|
||||||
uint32_t ui32ScriptCmdNum);
|
|
||||||
|
|
||||||
//*****************************************************************************
|
|
||||||
//
|
|
||||||
//! @brief Power up HM01B0
|
|
||||||
//!
|
|
||||||
//! @param psCfg - Pointer to HM01B0 configuration structure.
|
|
||||||
//!
|
|
||||||
//! This function powers up HM01B0.
|
|
||||||
//!
|
|
||||||
//! @return none.
|
|
||||||
//
|
|
||||||
//*****************************************************************************
|
|
||||||
void hm01b0_power_up(hm01b0_cfg_t *psCfg);
|
|
||||||
|
|
||||||
//*****************************************************************************
|
|
||||||
//
|
|
||||||
//! @brief Power down HM01B0
|
|
||||||
//!
|
|
||||||
//! @param psCfg - Pointer to HM01B0 configuration structure.
|
|
||||||
//!
|
|
||||||
//! This function powers up HM01B0.
|
|
||||||
//!
|
|
||||||
//! @return none.
|
|
||||||
//
|
|
||||||
//*****************************************************************************
|
|
||||||
void hm01b0_power_down(hm01b0_cfg_t *psCfg);
|
|
||||||
|
|
||||||
//*****************************************************************************
|
|
||||||
//
|
|
||||||
//! @brief Enable MCLK
|
|
||||||
//!
|
|
||||||
//! @param psCfg - Pointer to HM01B0 configuration structure.
|
|
||||||
//!
|
|
||||||
//! This function utilizes CTimer to generate MCLK for HM01B0.
|
|
||||||
//!
|
|
||||||
//! @return none.
|
|
||||||
//
|
|
||||||
//*****************************************************************************
|
|
||||||
void hm01b0_mclk_enable(hm01b0_cfg_t *psCfg);
|
|
||||||
|
|
||||||
//*****************************************************************************
|
|
||||||
//
|
|
||||||
//! @brief Disable MCLK
|
|
||||||
//!
|
|
||||||
//! @param psCfg - Pointer to HM01B0 configuration structure.
|
|
||||||
//!
|
|
||||||
//! This function disable CTimer to stop MCLK for HM01B0.
|
|
||||||
//!
|
|
||||||
//! @return none.
|
|
||||||
//
|
|
||||||
//*****************************************************************************
|
|
||||||
void hm01b0_mclk_disable(hm01b0_cfg_t *psCfg);
|
|
||||||
|
|
||||||
//*****************************************************************************
|
|
||||||
//
|
|
||||||
//! @brief Initialize interfaces
|
|
||||||
//!
|
|
||||||
//! @param psCfg - Pointer to HM01B0 configuration structure.
|
|
||||||
//!
|
|
||||||
//! This function initializes interfaces.
|
|
||||||
//!
|
|
||||||
//! @return Error code.
|
|
||||||
//
|
|
||||||
//*****************************************************************************
|
|
||||||
uint32_t hm01b0_init_if(hm01b0_cfg_t *psCfg);
|
|
||||||
|
|
||||||
//*****************************************************************************
|
|
||||||
//
|
|
||||||
//! @brief Deinitialize interfaces
|
|
||||||
//!
|
|
||||||
//! @param psCfg - Pointer to HM01B0 configuration structure.
|
|
||||||
//!
|
|
||||||
//! This function deinitializes interfaces.
|
|
||||||
//!
|
|
||||||
//! @return Error code.
|
|
||||||
//
|
|
||||||
//*****************************************************************************
|
|
||||||
uint32_t hm01b0_deinit_if(hm01b0_cfg_t *psCfg);
|
|
||||||
|
|
||||||
//*****************************************************************************
|
|
||||||
//
|
|
||||||
//! @brief Get HM01B0 Model ID
|
|
||||||
//!
|
|
||||||
//! @param psCfg - Pointer to HM01B0 configuration structure.
|
|
||||||
//! @param pui16MID - Pointer to buffer for the read back model ID.
|
|
||||||
//!
|
|
||||||
//! This function reads back HM01B0 model ID.
|
|
||||||
//!
|
|
||||||
//! @return Error code.
|
|
||||||
//
|
|
||||||
//*****************************************************************************
|
|
||||||
uint32_t hm01b0_get_modelid(hm01b0_cfg_t *psCfg, uint16_t *pui16MID);
|
|
||||||
|
|
||||||
//*****************************************************************************
|
|
||||||
//
|
|
||||||
//! @brief Initialize HM01B0
|
|
||||||
//!
|
|
||||||
//! @param psCfg - Pointer to HM01B0 configuration structure.
|
|
||||||
//! @param psScript - Pointer to HM01B0 initialization script.
|
|
||||||
//! @param ui32ScriptCmdNum - No. of commands in HM01B0 initialization
|
|
||||||
//! script.
|
|
||||||
//!
|
|
||||||
//! This function initilizes HM01B0 with a given script.
|
|
||||||
//!
|
|
||||||
//! @return Error code.
|
|
||||||
//
|
|
||||||
//*****************************************************************************
|
|
||||||
uint32_t hm01b0_init_system(hm01b0_cfg_t *psCfg, hm_script_t *psScript,
|
|
||||||
uint32_t ui32ScriptCmdNum);
|
|
||||||
|
|
||||||
//*****************************************************************************
|
|
||||||
//
|
|
||||||
//! @brief Set HM01B0 in the walking 1s test mode
|
|
||||||
//!
|
|
||||||
//! @param psCfg - Pointer to HM01B0 configuration structure.
|
|
||||||
//!
|
|
||||||
//! This function sets HM01B0 in the walking 1s test mode.
|
|
||||||
//!
|
|
||||||
//! @return Error code.
|
|
||||||
//
|
|
||||||
//*****************************************************************************
|
|
||||||
uint32_t hm01b0_test_walking1s(hm01b0_cfg_t *psCfg);
|
|
||||||
|
|
||||||
//*****************************************************************************
|
|
||||||
//
|
|
||||||
//! @brief Software reset HM01B0
|
|
||||||
//!
|
|
||||||
//! @param psCfg - Pointer to HM01B0 configuration structure.
|
|
||||||
//!
|
|
||||||
//! This function resets HM01B0 by issuing a reset command.
|
|
||||||
//!
|
|
||||||
//! @return Error code.
|
|
||||||
//
|
|
||||||
//*****************************************************************************
|
|
||||||
uint32_t hm01b0_reset_sw(hm01b0_cfg_t *psCfg);
|
|
||||||
|
|
||||||
//*****************************************************************************
|
|
||||||
//
|
|
||||||
//! @brief Get current HM01B0 operation mode.
|
|
||||||
//!
|
|
||||||
//! @param psCfg - Pointer to HM01B0 configuration structure.
|
|
||||||
//! @param pui8Mode - Pointer to buffer
|
|
||||||
//! - for the read back operation mode to be put into
|
|
||||||
//!
|
|
||||||
//! This function get HM01B0 operation mode.
|
|
||||||
//!
|
|
||||||
//! @return Error code.
|
|
||||||
//
|
|
||||||
//*****************************************************************************
|
|
||||||
uint32_t hm01b0_get_mode(hm01b0_cfg_t *psCfg, uint8_t *pui8Mode);
|
|
||||||
|
|
||||||
//*****************************************************************************
|
|
||||||
//
|
|
||||||
//! @brief Set HM01B0 operation mode.
|
|
||||||
//!
|
|
||||||
//! @param psCfg - Pointer to HM01B0 configuration structure.
|
|
||||||
//! @param ui8Mode - Operation mode. One of:
|
|
||||||
//! HM01B0_REG_MODE_SELECT_STANDBY
|
|
||||||
//! HM01B0_REG_MODE_SELECT_STREAMING
|
|
||||||
//! HM01B0_REG_MODE_SELECT_STREAMING_NFRAMES
|
|
||||||
//! HM01B0_REG_MODE_SELECT_STREAMING_HW_TRIGGER
|
|
||||||
//! @param framecnt - Frame count for
|
|
||||||
//! HM01B0_REG_MODE_SELECT_STREAMING_NFRAMES.
|
|
||||||
//! - Discarded if other modes.
|
|
||||||
//!
|
|
||||||
//! This function set HM01B0 operation mode.
|
|
||||||
//!
|
|
||||||
//! @return Error code.
|
|
||||||
//
|
|
||||||
//*****************************************************************************
|
|
||||||
uint32_t hm01b0_set_mode(hm01b0_cfg_t *psCfg, uint8_t ui8Mode,
|
|
||||||
uint8_t framecnt);
|
|
||||||
|
|
||||||
//*****************************************************************************
|
|
||||||
//
|
|
||||||
//! @brief Hardware trigger HM01B0 to stream.
|
|
||||||
//!
|
|
||||||
//! @param psCfg - Pointer to HM01B0 configuration structure.
|
|
||||||
//! @param bTrigger - True to start streaming
|
|
||||||
//! - False to stop streaming
|
|
||||||
//!
|
|
||||||
//! This function triggers HM01B0 to stream by toggling the TRIG pin.
|
|
||||||
//!
|
|
||||||
//! @return Error code.
|
|
||||||
//
|
|
||||||
//*****************************************************************************
|
|
||||||
uint32_t hm01b0_hardware_trigger_streaming(hm01b0_cfg_t *psCfg, bool bTrigger);
|
|
||||||
|
|
||||||
//*****************************************************************************
|
|
||||||
//
|
|
||||||
//! @brief Set HM01B0 mirror mode.
|
|
||||||
//!
|
|
||||||
//! @param psCfg - Pointer to HM01B0 configuration structure.
|
|
||||||
//! @param bHmirror - Horizontal mirror
|
|
||||||
//! @param bVmirror - Vertical mirror
|
|
||||||
//!
|
|
||||||
//! This function set HM01B0 mirror mode.
|
|
||||||
//!
|
|
||||||
//! @return Error code.
|
|
||||||
//
|
|
||||||
//*****************************************************************************
|
|
||||||
uint32_t hm01b0_set_mirror(hm01b0_cfg_t *psCfg, bool bHmirror, bool bVmirror);
|
|
||||||
|
|
||||||
//*****************************************************************************
|
|
||||||
//
|
|
||||||
//! @brief Read data of one frame from HM01B0.
|
|
||||||
//!
|
|
||||||
//! @param psCfg - Pointer to HM01B0 configuration structure.
|
|
||||||
//! @param pui8Buffer - Pointer to the frame buffer.
|
|
||||||
//! @param ui32BufferLen - Framebuffer size.
|
|
||||||
//!
|
|
||||||
//! This function read data of one frame from HM01B0.
|
|
||||||
//!
|
|
||||||
//! @return Error code.
|
|
||||||
//
|
|
||||||
//*****************************************************************************
|
|
||||||
uint32_t hm01b0_blocking_read_oneframe(hm01b0_cfg_t *psCfg, uint8_t *pui8Buffer,
|
|
||||||
uint32_t ui32BufferLen);
|
|
||||||
|
|
||||||
//*****************************************************************************
|
|
||||||
//
|
|
||||||
//! @brief Read data of one frame from HM01B0.
|
|
||||||
//!
|
|
||||||
//! @param psCfg - Pointer to HM01B0 configuration structure.
|
|
||||||
//!
|
|
||||||
//! This function wakes up the camera and captures a single frame.
|
|
||||||
//!
|
|
||||||
//! @return Error code.
|
|
||||||
//
|
|
||||||
//*****************************************************************************
|
|
||||||
uint32_t hm01b0_single_frame_capture(hm01b0_cfg_t *psCfg);
|
|
||||||
|
|
||||||
#ifdef __cplusplus
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#endif // TENSORFLOW_LITE_MICRO_EXAMPLES_PERSON_DETECTION_EXPERIMENTAL_HIMAX_DRIVER_HM01B0_H_
|
|
@ -1,510 +0,0 @@
|
|||||||
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
==============================================================================*/
|
|
||||||
|
|
||||||
#ifndef TENSORFLOW_LITE_MICRO_EXAMPLES_PERSON_DETECTION_EXPERIMENTAL_HIMAX_DRIVER_HM01B0_RAW8_QVGA_8BITS_LSB_5FPS_H_
|
|
||||||
#define TENSORFLOW_LITE_MICRO_EXAMPLES_PERSON_DETECTION_EXPERIMENTAL_HIMAX_DRIVER_HM01B0_RAW8_QVGA_8BITS_LSB_5FPS_H_
|
|
||||||
|
|
||||||
#include "HM01B0.h"
|
|
||||||
|
|
||||||
const hm_script_t sHM01B0InitScript[] = {
|
|
||||||
// ;*************************************************************************
|
|
||||||
// ; Sensor: HM01B0
|
|
||||||
// ; I2C ID: 24
|
|
||||||
// ; Resolution: 324x244
|
|
||||||
// ; Lens:
|
|
||||||
// ; Flicker:
|
|
||||||
// ; Frequency:
|
|
||||||
// ; Description: AE control enable
|
|
||||||
// ; 8-bit mode, LSB first
|
|
||||||
// ;
|
|
||||||
// ;
|
|
||||||
// ; Note:
|
|
||||||
// ;
|
|
||||||
// ; $Revision: 1338 $
|
|
||||||
// ; $Date:: 2017-04-11 15:43:45 +0800#$
|
|
||||||
// ;*************************************************************************
|
|
||||||
//
|
|
||||||
// // ---------------------------------------------------
|
|
||||||
// // HUB system initial
|
|
||||||
// // ---------------------------------------------------
|
|
||||||
// W 20 8A04 01 2 1
|
|
||||||
// W 20 8A00 22 2 1
|
|
||||||
// W 20 8A01 00 2 1
|
|
||||||
// W 20 8A02 01 2 1
|
|
||||||
// W 20 0035 93 2 1 ; [3]&[1] hub616 20bits in, [5:4]=1 mclk=48/2=24mhz
|
|
||||||
// W 20 0036 00 2 1
|
|
||||||
// W 20 0011 09 2 1
|
|
||||||
// W 20 0012 B6 2 1
|
|
||||||
// W 20 0014 08 2 1
|
|
||||||
// W 20 0015 98 2 1
|
|
||||||
// ;W 20 0130 16 2 1 ; 3m soc, signal buffer control
|
|
||||||
// ;W 20 0100 44 2 1 ; [6] hub616 20bits in
|
|
||||||
// W 20 0100 04 2 1 ; [6] hub616 20bits in
|
|
||||||
// W 20 0121 01 2 1 ; [0] Q1 Intf enable, [1]:4bit mode, [2] msb first, [3]
|
|
||||||
// serial mode
|
|
||||||
// W 20 0150 00 2 1 ;
|
|
||||||
// W 20 0150 04 2 1 ;
|
|
||||||
//
|
|
||||||
//
|
|
||||||
// //---------------------------------------------------
|
|
||||||
// // Initial
|
|
||||||
// //---------------------------------------------------
|
|
||||||
// W 24 0103 00 2 1 ; software reset-> was 0x22
|
|
||||||
{
|
|
||||||
0x0103,
|
|
||||||
0x00,
|
|
||||||
},
|
|
||||||
// W 24 0100 00 2 1; power up
|
|
||||||
{
|
|
||||||
0x0100,
|
|
||||||
0x00,
|
|
||||||
},
|
|
||||||
//
|
|
||||||
//
|
|
||||||
//
|
|
||||||
// //---------------------------------------------------
|
|
||||||
// // Analog
|
|
||||||
// //---------------------------------------------------
|
|
||||||
// L HM01B0_analog_setting.txt
|
|
||||||
{
|
|
||||||
0x1003,
|
|
||||||
0x08,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
0x1007,
|
|
||||||
0x08,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
0x3044,
|
|
||||||
0x0A,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
0x3045,
|
|
||||||
0x00,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
0x3047,
|
|
||||||
0x0A,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
0x3050,
|
|
||||||
0xC0,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
0x3051,
|
|
||||||
0x42,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
0x3052,
|
|
||||||
0x50,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
0x3053,
|
|
||||||
0x00,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
0x3054,
|
|
||||||
0x03,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
0x3055,
|
|
||||||
0xF7,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
0x3056,
|
|
||||||
0xF8,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
0x3057,
|
|
||||||
0x29,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
0x3058,
|
|
||||||
0x1F,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
0x3059,
|
|
||||||
0x1E,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
0x3064,
|
|
||||||
0x00,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
0x3065,
|
|
||||||
0x04,
|
|
||||||
},
|
|
||||||
//
|
|
||||||
//
|
|
||||||
// //---------------------------------------------------
|
|
||||||
// // Digital function
|
|
||||||
// //---------------------------------------------------
|
|
||||||
//
|
|
||||||
// // BLC
|
|
||||||
// W 24 1000 43 2 1 ; BLC_on, IIR
|
|
||||||
{
|
|
||||||
0x1000,
|
|
||||||
0x43,
|
|
||||||
},
|
|
||||||
// W 24 1001 40 2 1 ; [6] : BLC dithering en
|
|
||||||
{
|
|
||||||
0x1001,
|
|
||||||
0x40,
|
|
||||||
},
|
|
||||||
// W 24 1002 32 2 1 ; // blc_darkpixel_thd
|
|
||||||
{
|
|
||||||
0x1002,
|
|
||||||
0x32,
|
|
||||||
},
|
|
||||||
//
|
|
||||||
// // Dgain
|
|
||||||
// W 24 0350 7F 2 1 ; Dgain Control
|
|
||||||
{
|
|
||||||
0x0350,
|
|
||||||
0x7F,
|
|
||||||
},
|
|
||||||
//
|
|
||||||
// // BLI
|
|
||||||
// W 24 1006 01 2 1 ; [0] : bli enable
|
|
||||||
{
|
|
||||||
0x1006,
|
|
||||||
0x01,
|
|
||||||
},
|
|
||||||
//
|
|
||||||
// // DPC
|
|
||||||
// W 24 1008 00 2 1 ; [2:0] : DPC option 0: DPC off 1 : mono 3 : bayer1 5 :
|
|
||||||
// bayer2
|
|
||||||
{
|
|
||||||
0x1008,
|
|
||||||
0x00,
|
|
||||||
},
|
|
||||||
// W 24 1009 A0 2 1 ; cluster hot pixel th
|
|
||||||
{
|
|
||||||
0x1009,
|
|
||||||
0xA0,
|
|
||||||
},
|
|
||||||
// W 24 100A 60 2 1 ; cluster cold pixel th
|
|
||||||
{
|
|
||||||
0x100A,
|
|
||||||
0x60,
|
|
||||||
},
|
|
||||||
// W 24 100B 90 2 1 ; single hot pixel th
|
|
||||||
{
|
|
||||||
0x100B,
|
|
||||||
0x90,
|
|
||||||
},
|
|
||||||
// W 24 100C 40 2 1 ; single cold pixel th
|
|
||||||
{
|
|
||||||
0x100C,
|
|
||||||
0x40,
|
|
||||||
},
|
|
||||||
// //
|
|
||||||
// advance VSYNC by 1 row
|
|
||||||
{
|
|
||||||
0x3022,
|
|
||||||
0x01,
|
|
||||||
},
|
|
||||||
// W 24 1012 00 2 1 ; Sync. enable VSYNC shift
|
|
||||||
{
|
|
||||||
0x1012,
|
|
||||||
0x01,
|
|
||||||
},
|
|
||||||
|
|
||||||
//
|
|
||||||
// // ROI Statistic
|
|
||||||
// W 24 2000 07 2 1 ; [0] : AE stat en [1] : MD LROI stat en [2] : MD GROI
|
|
||||||
// stat en [3] : RGB stat ratio en [4] : IIR selection (1 -> 16, 0 -> 8)
|
|
||||||
{
|
|
||||||
0x2000,
|
|
||||||
0x07,
|
|
||||||
},
|
|
||||||
// W 24 2003 00 2 1 ; MD GROI 0 y start HB
|
|
||||||
{
|
|
||||||
0x2003,
|
|
||||||
0x00,
|
|
||||||
},
|
|
||||||
// W 24 2004 1C 2 1 ; MD GROI 0 y start LB
|
|
||||||
{
|
|
||||||
0x2004,
|
|
||||||
0x1C,
|
|
||||||
},
|
|
||||||
// W 24 2007 00 2 1 ; MD GROI 1 y start HB
|
|
||||||
{
|
|
||||||
0x2007,
|
|
||||||
0x00,
|
|
||||||
},
|
|
||||||
// W 24 2008 58 2 1 ; MD GROI 1 y start LB
|
|
||||||
{
|
|
||||||
0x2008,
|
|
||||||
0x58,
|
|
||||||
},
|
|
||||||
// W 24 200B 00 2 1 ; MD GROI 2 y start HB
|
|
||||||
{
|
|
||||||
0x200B,
|
|
||||||
0x00,
|
|
||||||
},
|
|
||||||
// W 24 200C 7A 2 1 ; MD GROI 2 y start LB
|
|
||||||
{
|
|
||||||
0x200C,
|
|
||||||
0x7A,
|
|
||||||
},
|
|
||||||
// W 24 200F 00 2 1 ; MD GROI 3 y start HB
|
|
||||||
{
|
|
||||||
0x200F,
|
|
||||||
0x00,
|
|
||||||
},
|
|
||||||
// W 24 2010 B8 2 1 ; MD GROI 3 y start LB
|
|
||||||
{
|
|
||||||
0x2010,
|
|
||||||
0xB8,
|
|
||||||
},
|
|
||||||
//
|
|
||||||
// W 24 2013 00 2 1 ; MD LRIO y start HB
|
|
||||||
{
|
|
||||||
0x2013,
|
|
||||||
0x00,
|
|
||||||
},
|
|
||||||
// W 24 2014 58 2 1 ; MD LROI y start LB
|
|
||||||
{
|
|
||||||
0x2014,
|
|
||||||
0x58,
|
|
||||||
},
|
|
||||||
// W 24 2017 00 2 1 ; MD LROI y end HB
|
|
||||||
{
|
|
||||||
0x2017,
|
|
||||||
0x00,
|
|
||||||
},
|
|
||||||
// W 24 2018 9B 2 1 ; MD LROI y end LB
|
|
||||||
{
|
|
||||||
0x2018,
|
|
||||||
0x9B,
|
|
||||||
},
|
|
||||||
//
|
|
||||||
// // AE
|
|
||||||
// W 24 2100 01 2 1 ; [0]: AE control enable
|
|
||||||
{
|
|
||||||
0x2100,
|
|
||||||
0x01,
|
|
||||||
},
|
|
||||||
// W 24 2101 07 2 1 ; AE target mean
|
|
||||||
{
|
|
||||||
0x2101,
|
|
||||||
0x5F,
|
|
||||||
},
|
|
||||||
// W 24 2102 0A 2 1 ; AE min mean
|
|
||||||
{
|
|
||||||
0x2102,
|
|
||||||
0x0A,
|
|
||||||
},
|
|
||||||
// W 24 2104 03 2 1 ; AE Threshold
|
|
||||||
{
|
|
||||||
0x2103,
|
|
||||||
0x03,
|
|
||||||
},
|
|
||||||
// W 24 2104 05 2 1 ; AE Threshold
|
|
||||||
{
|
|
||||||
0x2104,
|
|
||||||
0x05,
|
|
||||||
},
|
|
||||||
// W 24 2105 01 2 1 ; max INTG Hb
|
|
||||||
{
|
|
||||||
0x2105,
|
|
||||||
0x02,
|
|
||||||
},
|
|
||||||
// W 24 2106 54 2 1 ; max INTG Lb
|
|
||||||
{
|
|
||||||
0x2106,
|
|
||||||
0x14,
|
|
||||||
},
|
|
||||||
// W 24 2108 02 2 1 ; max AGain in full
|
|
||||||
{
|
|
||||||
0x2107,
|
|
||||||
0x02,
|
|
||||||
},
|
|
||||||
// W 24 2108 03 2 1 ; max AGain in full
|
|
||||||
{
|
|
||||||
0x2108,
|
|
||||||
0x03,
|
|
||||||
},
|
|
||||||
// W 24 2109 04 2 1 ; max AGain in bin2
|
|
||||||
{
|
|
||||||
0x2109,
|
|
||||||
0x03,
|
|
||||||
},
|
|
||||||
// W 24 210A 00 2 1 ; min AGAIN
|
|
||||||
{
|
|
||||||
0x210A,
|
|
||||||
0x00,
|
|
||||||
},
|
|
||||||
// W 24 210B C0 2 1 ; max DGain
|
|
||||||
{
|
|
||||||
0x210B,
|
|
||||||
0x80,
|
|
||||||
},
|
|
||||||
// W 24 210C 40 2 1 ; min DGain
|
|
||||||
{
|
|
||||||
0x210C,
|
|
||||||
0x40,
|
|
||||||
},
|
|
||||||
// W 24 210D 20 2 1 ; damping factor
|
|
||||||
{
|
|
||||||
0x210D,
|
|
||||||
0x20,
|
|
||||||
},
|
|
||||||
// W 24 210E 03 2 1 ; FS ctrl
|
|
||||||
{
|
|
||||||
0x210E,
|
|
||||||
0x03,
|
|
||||||
},
|
|
||||||
// W 24 210F 00 2 1 ; FS 60Hz Hb
|
|
||||||
{
|
|
||||||
0x210F,
|
|
||||||
0x00,
|
|
||||||
},
|
|
||||||
// W 24 2110 85 2 1 ; FS 60Hz Lb
|
|
||||||
{
|
|
||||||
0x2110,
|
|
||||||
0x85,
|
|
||||||
},
|
|
||||||
// W 24 2111 00 2 1 ; Fs 50Hz Hb
|
|
||||||
{
|
|
||||||
0x2111,
|
|
||||||
0x00,
|
|
||||||
},
|
|
||||||
// W 24 2112 A0 2 1 ; FS 50Hz Lb
|
|
||||||
{
|
|
||||||
0x2112,
|
|
||||||
0xA0,
|
|
||||||
},
|
|
||||||
|
|
||||||
//
|
|
||||||
//
|
|
||||||
// // MD
|
|
||||||
// W 24 2150 03 2 1 ; [0] : MD LROI en [1] : MD GROI en
|
|
||||||
{
|
|
||||||
0x2150,
|
|
||||||
0x03,
|
|
||||||
},
|
|
||||||
//
|
|
||||||
//
|
|
||||||
// //---------------------------------------------------
|
|
||||||
// // frame rate : 5 FPS
|
|
||||||
// //---------------------------------------------------
|
|
||||||
// W 24 0340 0C 2 1 ; smia frame length Hb
|
|
||||||
{
|
|
||||||
0x0340,
|
|
||||||
0x0C,
|
|
||||||
},
|
|
||||||
// W 24 0341 7A 2 1 ; smia frame length Lb 3192
|
|
||||||
{
|
|
||||||
0x0341,
|
|
||||||
0x7A,
|
|
||||||
},
|
|
||||||
//
|
|
||||||
// W 24 0342 01 2 1 ; smia line length Hb
|
|
||||||
{
|
|
||||||
0x0342,
|
|
||||||
0x01,
|
|
||||||
},
|
|
||||||
// W 24 0343 77 2 1 ; smia line length Lb 375
|
|
||||||
{
|
|
||||||
0x0343,
|
|
||||||
0x77,
|
|
||||||
},
|
|
||||||
//
|
|
||||||
// //---------------------------------------------------
|
|
||||||
// // Resolution : QVGA 324x244
|
|
||||||
// //---------------------------------------------------
|
|
||||||
// W 24 3010 01 2 1 ; [0] : window mode 0 : full frame 324x324 1 : QVGA
|
|
||||||
{
|
|
||||||
0x3010,
|
|
||||||
0x01,
|
|
||||||
},
|
|
||||||
//
|
|
||||||
//
|
|
||||||
// W 24 0383 01 2 1 ;
|
|
||||||
{
|
|
||||||
0x0383,
|
|
||||||
0x01,
|
|
||||||
},
|
|
||||||
// W 24 0387 01 2 1 ;
|
|
||||||
{
|
|
||||||
0x0387,
|
|
||||||
0x01,
|
|
||||||
},
|
|
||||||
// W 24 0390 00 2 1 ;
|
|
||||||
{
|
|
||||||
0x0390,
|
|
||||||
0x00,
|
|
||||||
},
|
|
||||||
//
|
|
||||||
// //---------------------------------------------------
|
|
||||||
// // bit width Selection
|
|
||||||
// //---------------------------------------------------
|
|
||||||
// W 24 3011 70 2 1 ; [0] : 6 bit mode enable
|
|
||||||
{
|
|
||||||
0x3011,
|
|
||||||
0x70,
|
|
||||||
},
|
|
||||||
//
|
|
||||||
//
|
|
||||||
// W 24 3059 02 2 1 ; [7]: Self OSC En, [6]: 4bit mode, [5]: serial mode,
|
|
||||||
// [4:0]: keep value as 0x02
|
|
||||||
{
|
|
||||||
0x3059,
|
|
||||||
0x02,
|
|
||||||
},
|
|
||||||
// W 24 3060 01 2 1 ; [5]: gated_clock, [4]: msb first,
|
|
||||||
{
|
|
||||||
0x3060,
|
|
||||||
0x20,
|
|
||||||
},
|
|
||||||
// ; [3:2]: vt_reg_div -> div by 4/8/1/2
|
|
||||||
// ; [1;0]: vt_sys_div -> div by 8/4/2/1
|
|
||||||
//
|
|
||||||
//
|
|
||||||
{
|
|
||||||
0x0101,
|
|
||||||
0x01,
|
|
||||||
},
|
|
||||||
// //---------------------------------------------------
|
|
||||||
// // CMU update
|
|
||||||
// //---------------------------------------------------
|
|
||||||
//
|
|
||||||
// W 24 0104 01 2 1 ; was 0100
|
|
||||||
{
|
|
||||||
0x0104,
|
|
||||||
0x01,
|
|
||||||
},
|
|
||||||
//
|
|
||||||
//
|
|
||||||
//
|
|
||||||
// //---------------------------------------------------
|
|
||||||
// // Turn on rolling shutter
|
|
||||||
// //---------------------------------------------------
|
|
||||||
// W 24 0100 01 2 1 ; was 0005 ; mode_select 00 : standby - wait fir I2C SW
|
|
||||||
// trigger 01 : streaming 03 : output "N" frame, then enter standby 04 :
|
|
||||||
// standby - wait for HW trigger (level), then continuous video out til HW
|
|
||||||
// TRIG goes off 06 : standby - wait for HW trigger (edge), then output "N"
|
|
||||||
// frames then enter standby
|
|
||||||
{
|
|
||||||
0x0100,
|
|
||||||
0x01,
|
|
||||||
},
|
|
||||||
//
|
|
||||||
// ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
|
||||||
};
|
|
||||||
|
|
||||||
#endif // TENSORFLOW_LITE_MICRO_EXAMPLES_PERSON_DETECTION_EXPERIMENTAL_HIMAX_DRIVER_HM01B0_RAW8_QVGA_8BITS_LSB_5FPS_H_
|
|
@ -1,56 +0,0 @@
|
|||||||
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
==============================================================================*/
|
|
||||||
|
|
||||||
#ifndef TENSORFLOW_LITE_MICRO_EXAMPLES_PERSON_DETECTION_EXPERIMENTAL_HIMAX_DRIVER_HM01B0_WALKING1S_01_H_
|
|
||||||
#define TENSORFLOW_LITE_MICRO_EXAMPLES_PERSON_DETECTION_EXPERIMENTAL_HIMAX_DRIVER_HM01B0_WALKING1S_01_H_
|
|
||||||
|
|
||||||
#include "HM01B0.h"
|
|
||||||
|
|
||||||
const hm_script_t sHM01b0TestModeScript_Walking1s[] = {
|
|
||||||
{
|
|
||||||
0x2100,
|
|
||||||
0x00,
|
|
||||||
}, // W 24 2100 00 2 1 ; AE
|
|
||||||
{
|
|
||||||
0x1000,
|
|
||||||
0x00,
|
|
||||||
}, // W 24 1000 00 2 1 ; BLC
|
|
||||||
{
|
|
||||||
0x1008,
|
|
||||||
0x00,
|
|
||||||
}, // W 24 1008 00 2 1 ; DPC
|
|
||||||
{
|
|
||||||
0x0205,
|
|
||||||
0x00,
|
|
||||||
}, // W 24 0205 00 2 1 ; AGain
|
|
||||||
{
|
|
||||||
0x020E,
|
|
||||||
0x01,
|
|
||||||
}, // W 24 020E 01 2 1 ; DGain
|
|
||||||
{
|
|
||||||
0x020F,
|
|
||||||
0x00,
|
|
||||||
}, // W 24 020F 00 2 1 ; DGain
|
|
||||||
{
|
|
||||||
0x0601,
|
|
||||||
0x11,
|
|
||||||
}, // W 24 0601 11 2 1 ; Test pattern
|
|
||||||
{
|
|
||||||
0x0104,
|
|
||||||
0x01,
|
|
||||||
}, // W 24 0104 01 2 1 ;
|
|
||||||
};
|
|
||||||
|
|
||||||
#endif // TENSORFLOW_LITE_MICRO_EXAMPLES_PERSON_DETECTION_EXPERIMENTAL_HIMAX_DRIVER_HM01B0_WALKING1S_01_H_
|
|
@ -1,8 +0,0 @@
|
|||||||
W 24 2100 00 2 1 ; AE
|
|
||||||
W 24 1000 00 2 1 ; BLC
|
|
||||||
W 24 1008 00 2 1 ; DPC
|
|
||||||
W 24 0205 00 2 1 ; AGain
|
|
||||||
W 24 020E 01 2 1 ; DGain
|
|
||||||
W 24 020F 00 2 1 ; DGain
|
|
||||||
W 24 0601 11 2 1 ; Test pattern
|
|
||||||
W 24 0104 01 2 1 ;
|
|
@ -1,35 +0,0 @@
|
|||||||
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
==============================================================================*/
|
|
||||||
|
|
||||||
#include "HM01B0_debug.h"
|
|
||||||
#include "am_util.h" // NOLINT
|
|
||||||
|
|
||||||
void hm01b0_framebuffer_dump(uint8_t* frame, uint32_t length) {
|
|
||||||
am_util_stdio_printf("+++ frame +++");
|
|
||||||
|
|
||||||
for (uint32_t i = 0; i < length; i++) {
|
|
||||||
if ((i & 0xF) == 0x00) {
|
|
||||||
am_util_stdio_printf("\n0x%08LX ", i);
|
|
||||||
// this delay is to let itm have time to flush out data.
|
|
||||||
am_util_delay_ms(1);
|
|
||||||
}
|
|
||||||
|
|
||||||
am_util_stdio_printf("%02X ", frame[i]);
|
|
||||||
}
|
|
||||||
|
|
||||||
am_util_stdio_printf("\n--- frame ---\n");
|
|
||||||
am_util_delay_ms(1);
|
|
||||||
}
|
|
||||||
|
|
@ -1,49 +0,0 @@
|
|||||||
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
==============================================================================*/
|
|
||||||
|
|
||||||
#ifndef TENSORFLOW_LITE_MICRO_EXAMPLES_PERSON_DETECTION_EXPERIMENTAL_HIMAX_DRIVER_HM01B0_DEBUG_H_
|
|
||||||
#define TENSORFLOW_LITE_MICRO_EXAMPLES_PERSON_DETECTION_EXPERIMENTAL_HIMAX_DRIVER_HM01B0_DEBUG_H_
|
|
||||||
|
|
||||||
#ifdef __cplusplus
|
|
||||||
extern "C" {
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#include "HM01B0.h"
|
|
||||||
|
|
||||||
//*****************************************************************************
|
|
||||||
//
|
|
||||||
//! @brief Read one frame of data from HM01B0 scaled to 96x96 RGB.
|
|
||||||
//!
|
|
||||||
//! @param buffer - Pointer to the frame buffer.
|
|
||||||
//! @param w - Image width.
|
|
||||||
//! @param h - Image height.
|
|
||||||
//! @param channels - Number of channels per pixel.
|
|
||||||
//!
|
|
||||||
//! This function reads data of one frame from HM01B0. It trims the image to an
|
|
||||||
//! even power of two multiple of the requested width and height. It down
|
|
||||||
//! samples the original image and duplicates the greyscale value for each color
|
|
||||||
//! channel.
|
|
||||||
//!
|
|
||||||
//! @return Error code.
|
|
||||||
//
|
|
||||||
//*****************************************************************************
|
|
||||||
|
|
||||||
void hm01b0_framebuffer_dump(uint8_t* frame, uint32_t len);
|
|
||||||
|
|
||||||
#ifdef __cplusplus
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#endif // TENSORFLOW_LITE_MICRO_EXAMPLES_PERSON_DETECTION_EXPERIMENTAL_HIMAX_DRIVER_HM01B0_DEBUG_H_
|
|
@ -1,87 +0,0 @@
|
|||||||
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
==============================================================================*/
|
|
||||||
|
|
||||||
#include "HM01B0.h"
|
|
||||||
#include "am_bsp.h" //NOLINT
|
|
||||||
#include "am_mcu_apollo.h" //NOLINT
|
|
||||||
#include "platform_Sparkfun_Edge.h"
|
|
||||||
|
|
||||||
// Image is down-sampled by applying a stride of 2 pixels in both the x and y
|
|
||||||
// directions.
|
|
||||||
static const int kStrideShift = 1;
|
|
||||||
|
|
||||||
//*****************************************************************************
|
|
||||||
//
|
|
||||||
//! @brief Read one frame of data from HM01B0 scaled to 96x96 RGB.
|
|
||||||
//!
|
|
||||||
//! @param buffer - Pointer to the frame buffer.
|
|
||||||
//! @param w - Image width.
|
|
||||||
//! @param h - Image height.
|
|
||||||
//! @param channels - Number of channels per pixel.
|
|
||||||
//!
|
|
||||||
//! This function reads data of one frame from HM01B0. It trims the image to an
|
|
||||||
//! even power of two mulitple of the requested width and height. It down
|
|
||||||
//! samples the original image and duplicates the greyscale value for each color
|
|
||||||
//! channel.
|
|
||||||
//!
|
|
||||||
//! @return Error code.
|
|
||||||
//
|
|
||||||
//*****************************************************************************
|
|
||||||
uint32_t hm01b0_blocking_read_oneframe_scaled(hm01b0_cfg_t* psCfg,
|
|
||||||
int8_t* buffer, int w, int h,
|
|
||||||
int channels) {
|
|
||||||
hm01b0_single_frame_capture(psCfg);
|
|
||||||
|
|
||||||
// Calculate the number of pixels to crop to get a centered image.
|
|
||||||
const int offset_x = (HM01B0_PIXEL_X_NUM - (w * (1 << kStrideShift))) / 2;
|
|
||||||
const int offset_y = (HM01B0_PIXEL_Y_NUM - (h * (1 << kStrideShift))) / 2;
|
|
||||||
|
|
||||||
uint32_t hsync_count = 0;
|
|
||||||
|
|
||||||
while ((hsync_count < HM01B0_PIXEL_Y_NUM)) {
|
|
||||||
// Wait for horizontal sync.
|
|
||||||
while (!read_hsync());
|
|
||||||
|
|
||||||
// Get resulting image position. When hsync_count < offset_y, this will
|
|
||||||
// underflow resulting in an index out of bounds which we check later,
|
|
||||||
// avoiding an unnecessary conditional.
|
|
||||||
const uint32_t output_y = (hsync_count - offset_y) >> kStrideShift;
|
|
||||||
uint32_t rowidx = 0;
|
|
||||||
|
|
||||||
// Read one row. Hsync is held high for the duration of a row read.
|
|
||||||
while (read_hsync()) {
|
|
||||||
// Wait for pixel value to be ready.
|
|
||||||
while (!read_pclk());
|
|
||||||
|
|
||||||
// Read 8-bit value from camera.
|
|
||||||
const uint8_t value = read_byte();
|
|
||||||
const uint32_t output_x = (rowidx++ - offset_x) >> kStrideShift;
|
|
||||||
if (output_x < w && output_y < h) {
|
|
||||||
const int output_idx = (output_y * w + output_x) * channels;
|
|
||||||
for (int i=0; i<channels; i++) {
|
|
||||||
// See the top of main_functions.cc for an explanation of and
|
|
||||||
// rationale for our unsigned to signed input conversion.
|
|
||||||
buffer[output_idx + i] = value - 128;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Wait for next pixel clock.
|
|
||||||
while (read_pclk());
|
|
||||||
}
|
|
||||||
|
|
||||||
hsync_count++;
|
|
||||||
}
|
|
||||||
return HM01B0_ERR_OK;
|
|
||||||
}
|
|
@ -1,50 +0,0 @@
|
|||||||
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
==============================================================================*/
|
|
||||||
|
|
||||||
#ifndef TENSORFLOW_LITE_MICRO_EXAMPLES_PERSON_DETECTION_EXPERIMENTAL_HIMAX_DRIVER_HM01B0_OPTIMIZED_H_
|
|
||||||
#define TENSORFLOW_LITE_MICRO_EXAMPLES_PERSON_DETECTION_EXPERIMENTAL_HIMAX_DRIVER_HM01B0_OPTIMIZED_H_
|
|
||||||
|
|
||||||
#ifdef __cplusplus
|
|
||||||
extern "C" {
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#include "HM01B0.h"
|
|
||||||
|
|
||||||
//*****************************************************************************
|
|
||||||
//
|
|
||||||
//! @brief Read one frame of data from HM01B0 scaled to 96x96 RGB.
|
|
||||||
//!
|
|
||||||
//! @param buffer - Pointer to the frame buffer.
|
|
||||||
//! @param w - Image width.
|
|
||||||
//! @param h - Image height.
|
|
||||||
//! @param channels - Number of channels per pixel.
|
|
||||||
//!
|
|
||||||
//! This function reads data of one frame from HM01B0. It trims the image to an
|
|
||||||
//! even power of two multiple of the requested width and height. It down
|
|
||||||
//! samples the original image and duplicates the greyscale value for each color
|
|
||||||
//! channel.
|
|
||||||
//!
|
|
||||||
//! @return Error code.
|
|
||||||
//
|
|
||||||
//*****************************************************************************
|
|
||||||
uint32_t hm01b0_blocking_read_oneframe_scaled(hm01b0_cfg_t* psCfg,
|
|
||||||
int8_t* buffer, int w, int h,
|
|
||||||
int channels);
|
|
||||||
|
|
||||||
#ifdef __cplusplus
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#endif // TENSORFLOW_LITE_MICRO_EXAMPLES_PERSON_DETECTION_EXPERIMENTAL_HIMAX_DRIVER_HM01B0_OPTIMIZED_H_
|
|
@ -1,13 +0,0 @@
|
|||||||
ifeq ($(TARGET),$(filter $(TARGET),apollo3evb sparkfun_edge))
|
|
||||||
person_detection_SRCS += \
|
|
||||||
tensorflow/lite/micro/examples/person_detection_experimental/himax_driver/HM01B0.c \
|
|
||||||
tensorflow/lite/micro/examples/person_detection_experimental/himax_driver/HM01B0_debug.c \
|
|
||||||
tensorflow/lite/micro/examples/person_detection_experimental/himax_driver/HM01B0_optimized.c
|
|
||||||
|
|
||||||
person_detection_HDRS += \
|
|
||||||
tensorflow/lite/micro/examples/person_detection_experimental/himax_driver/HM01B0.h \
|
|
||||||
tensorflow/lite/micro/examples/person_detection_experimental/himax_driver/HM01B0_debug.h \
|
|
||||||
tensorflow/lite/micro/examples/person_detection_experimental/himax_driver/HM01B0_optimized.h \
|
|
||||||
tensorflow/lite/micro/examples/person_detection_experimental/himax_driver/HM01B0_RAW8_QVGA_8bits_lsb_5fps.h \
|
|
||||||
tensorflow/lite/micro/examples/person_detection_experimental/himax_driver/HM01B0_Walking1s_01.h
|
|
||||||
endif
|
|
@ -1,54 +0,0 @@
|
|||||||
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
==============================================================================*/
|
|
||||||
|
|
||||||
#ifndef TENSORFLOW_LITE_MICRO_EXAMPLES_PERSON_DETECTION_EXPERIMENTAL_HIMAX_DRIVER_PLATFORM_SPARKFUN_EDGE_H_
|
|
||||||
#define TENSORFLOW_LITE_MICRO_EXAMPLES_PERSON_DETECTION_EXPERIMENTAL_HIMAX_DRIVER_PLATFORM_SPARKFUN_EDGE_H_
|
|
||||||
|
|
||||||
#ifdef __cplusplus
|
|
||||||
extern "C" {
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#define HM01B0_PIN_D0 24
|
|
||||||
#define HM01B0_PIN_D1 25
|
|
||||||
#define HM01B0_PIN_D2 26
|
|
||||||
#define HM01B0_PIN_D3 27
|
|
||||||
#define HM01B0_PIN_D4 28
|
|
||||||
#define HM01B0_PIN_D5 5
|
|
||||||
#define HM01B0_PIN_D6 6
|
|
||||||
#define HM01B0_PIN_D7 7
|
|
||||||
#define HM01B0_PIN_VSYNC 15
|
|
||||||
#define HM01B0_PIN_HSYNC 22
|
|
||||||
#define HM01B0_PIN_PCLK 23
|
|
||||||
#define HM01B0_PIN_TRIG 12
|
|
||||||
#define HM01B0_PIN_INT 4
|
|
||||||
#define HM01B0_PIN_SCL 8
|
|
||||||
#define HM01B0_PIN_SDA 9
|
|
||||||
#define HM01B0_PIN_DVDD_EN 10
|
|
||||||
|
|
||||||
// Define AP3B's CTIMER and output pin for HM01B0 MCLK generation
|
|
||||||
#define HM01B0_MCLK_GENERATOR_MOD 0
|
|
||||||
#define HM01B0_MCLK_GENERATOR_SEG AM_HAL_CTIMER_TIMERB
|
|
||||||
#define HM01B0_PIN_MCLK 13
|
|
||||||
|
|
||||||
// Deifne I2C controller and SCL(pin8)/SDA(pin9) are configured automatically.
|
|
||||||
#define HM01B0_IOM_MODE AM_HAL_IOM_I2C_MODE
|
|
||||||
#define HM01B0_IOM_MODULE 1
|
|
||||||
#define HM01B0_I2C_CLOCK_FREQ AM_HAL_IOM_100KHZ
|
|
||||||
|
|
||||||
#ifdef __cplusplus
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#endif // TENSORFLOW_LITE_MICRO_EXAMPLES_PERSON_DETECTION_EXPERIMENTAL_HIMAX_DRIVER_PLATFORM_SPARKFUN_EDGE_H_
|
|
@ -1,26 +0,0 @@
|
|||||||
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
==============================================================================*/
|
|
||||||
|
|
||||||
#include "tensorflow/lite/micro/examples/person_detection_experimental/image_provider.h"
|
|
||||||
|
|
||||||
#include "tensorflow/lite/micro/examples/person_detection_experimental/model_settings.h"
|
|
||||||
|
|
||||||
TfLiteStatus GetImage(tflite::ErrorReporter* error_reporter, int image_width,
|
|
||||||
int image_height, int channels, int8_t* image_data) {
|
|
||||||
for (int i = 0; i < image_width * image_height * channels; ++i) {
|
|
||||||
image_data[i] = 0;
|
|
||||||
}
|
|
||||||
return kTfLiteOk;
|
|
||||||
}
|
|
@ -1,39 +0,0 @@
|
|||||||
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
==============================================================================*/
|
|
||||||
|
|
||||||
#ifndef TENSORFLOW_LITE_MICRO_EXAMPLES_PERSON_DETECTION_EXPERIMENTAL_IMAGE_PROVIDER_H_
|
|
||||||
#define TENSORFLOW_LITE_MICRO_EXAMPLES_PERSON_DETECTION_EXPERIMENTAL_IMAGE_PROVIDER_H_
|
|
||||||
|
|
||||||
#include "tensorflow/lite/c/common.h"
|
|
||||||
#include "tensorflow/lite/micro/micro_error_reporter.h"
|
|
||||||
|
|
||||||
// This is an abstraction around an image source like a camera, and is
|
|
||||||
// expected to return 8-bit sample data. The assumption is that this will be
|
|
||||||
// called in a low duty-cycle fashion in a low-power application. In these
|
|
||||||
// cases, the imaging sensor need not be run in a streaming mode, but rather can
|
|
||||||
// be idled in a relatively low-power mode between calls to GetImage(). The
|
|
||||||
// assumption is that the overhead and time of bringing the low-power sensor out
|
|
||||||
// of this standby mode is commensurate with the expected duty cycle of the
|
|
||||||
// application. The underlying sensor may actually be put into a streaming
|
|
||||||
// configuration, but the image buffer provided to GetImage should not be
|
|
||||||
// overwritten by the driver code until the next call to GetImage();
|
|
||||||
//
|
|
||||||
// The reference implementation can have no platform-specific dependencies, so
|
|
||||||
// it just returns a static image. For real applications, you should
|
|
||||||
// ensure there's a specialized implementation that accesses hardware APIs.
|
|
||||||
TfLiteStatus GetImage(tflite::ErrorReporter* error_reporter, int image_width,
|
|
||||||
int image_height, int channels, int8_t* image_data);
|
|
||||||
|
|
||||||
#endif // TENSORFLOW_LITE_MICRO_EXAMPLES_PERSON_DETECTION_EXPERIMENTAL_IMAGE_PROVIDER_H_
|
|
@ -1,43 +0,0 @@
|
|||||||
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
==============================================================================*/
|
|
||||||
|
|
||||||
#include "tensorflow/lite/micro/examples/person_detection_experimental/image_provider.h"
|
|
||||||
|
|
||||||
#include <limits>
|
|
||||||
|
|
||||||
#include "tensorflow/lite/c/common.h"
|
|
||||||
#include "tensorflow/lite/micro/examples/person_detection_experimental/model_settings.h"
|
|
||||||
#include "tensorflow/lite/micro/micro_error_reporter.h"
|
|
||||||
#include "tensorflow/lite/micro/testing/micro_test.h"
|
|
||||||
|
|
||||||
TF_LITE_MICRO_TESTS_BEGIN
|
|
||||||
|
|
||||||
TF_LITE_MICRO_TEST(TestImageProvider) {
|
|
||||||
tflite::MicroErrorReporter micro_error_reporter;
|
|
||||||
|
|
||||||
int8_t image_data[kMaxImageSize];
|
|
||||||
TfLiteStatus get_status = GetImage(µ_error_reporter, kNumCols, kNumRows,
|
|
||||||
kNumChannels, image_data);
|
|
||||||
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, get_status);
|
|
||||||
TF_LITE_MICRO_EXPECT_NE(image_data, nullptr);
|
|
||||||
|
|
||||||
// Make sure we can read all of the returned memory locations.
|
|
||||||
uint32_t total = 0;
|
|
||||||
for (int i = 0; i < kMaxImageSize; ++i) {
|
|
||||||
total += image_data[i];
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
TF_LITE_MICRO_TESTS_END
|
|
@ -1,27 +0,0 @@
|
|||||||
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
==============================================================================*/
|
|
||||||
|
|
||||||
#include "tensorflow/lite/micro/examples/person_detection_experimental/main_functions.h"
|
|
||||||
|
|
||||||
// This is the default main used on systems that have the standard C entry
|
|
||||||
// point. Other devices (for example FreeRTOS or ESP32) that have different
|
|
||||||
// requirements for entry code (like an app_main function) should specialize
|
|
||||||
// this main.cc file in a target-specific subfolder.
|
|
||||||
int main(int argc, char* argv[]) {
|
|
||||||
setup();
|
|
||||||
while (true) {
|
|
||||||
loop();
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,117 +0,0 @@
|
|||||||
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
==============================================================================*/
|
|
||||||
|
|
||||||
#include "tensorflow/lite/micro/examples/person_detection_experimental/main_functions.h"
|
|
||||||
|
|
||||||
#include "tensorflow/lite/micro/examples/person_detection_experimental/detection_responder.h"
|
|
||||||
#include "tensorflow/lite/micro/examples/person_detection_experimental/image_provider.h"
|
|
||||||
#include "tensorflow/lite/micro/examples/person_detection_experimental/model_settings.h"
|
|
||||||
#include "tensorflow/lite/micro/examples/person_detection_experimental/person_detect_model_data.h"
|
|
||||||
#include "tensorflow/lite/micro/micro_error_reporter.h"
|
|
||||||
#include "tensorflow/lite/micro/micro_interpreter.h"
|
|
||||||
#include "tensorflow/lite/micro/micro_mutable_op_resolver.h"
|
|
||||||
#include "tensorflow/lite/schema/schema_generated.h"
|
|
||||||
#include "tensorflow/lite/version.h"
|
|
||||||
|
|
||||||
// Globals, used for compatibility with Arduino-style sketches.
|
|
||||||
namespace {
|
|
||||||
tflite::ErrorReporter* error_reporter = nullptr;
|
|
||||||
const tflite::Model* model = nullptr;
|
|
||||||
tflite::MicroInterpreter* interpreter = nullptr;
|
|
||||||
TfLiteTensor* input = nullptr;
|
|
||||||
|
|
||||||
// In order to use optimized tensorflow lite kernels, a signed int8_t quantized
|
|
||||||
// model is preferred over the legacy unsigned model format. This means that
|
|
||||||
// throughout this project, input images must be converted from unisgned to
|
|
||||||
// signed format. The easiest and quickest way to convert from unsigned to
|
|
||||||
// signed 8-bit integers is to subtract 128 from the unsigned value to get a
|
|
||||||
// signed value.
|
|
||||||
|
|
||||||
// An area of memory to use for input, output, and intermediate arrays.
|
|
||||||
constexpr int kTensorArenaSize = 136 * 1024;
|
|
||||||
static uint8_t tensor_arena[kTensorArenaSize];
|
|
||||||
} // namespace
|
|
||||||
|
|
||||||
// The name of this function is important for Arduino compatibility.
|
|
||||||
void setup() {
|
|
||||||
// Set up logging. Google style is to avoid globals or statics because of
|
|
||||||
// lifetime uncertainty, but since this has a trivial destructor it's okay.
|
|
||||||
// NOLINTNEXTLINE(runtime-global-variables)
|
|
||||||
static tflite::MicroErrorReporter micro_error_reporter;
|
|
||||||
error_reporter = µ_error_reporter;
|
|
||||||
|
|
||||||
// Map the model into a usable data structure. This doesn't involve any
|
|
||||||
// copying or parsing, it's a very lightweight operation.
|
|
||||||
model = tflite::GetModel(g_person_detect_model_data);
|
|
||||||
if (model->version() != TFLITE_SCHEMA_VERSION) {
|
|
||||||
TF_LITE_REPORT_ERROR(error_reporter,
|
|
||||||
"Model provided is schema version %d not equal "
|
|
||||||
"to supported version %d.",
|
|
||||||
model->version(), TFLITE_SCHEMA_VERSION);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Pull in only the operation implementations we need.
|
|
||||||
// This relies on a complete list of all the ops needed by this graph.
|
|
||||||
// An easier approach is to just use the AllOpsResolver, but this will
|
|
||||||
// incur some penalty in code space for op implementations that are not
|
|
||||||
// needed by this graph.
|
|
||||||
//
|
|
||||||
// tflite::AllOpsResolver resolver;
|
|
||||||
// NOLINTNEXTLINE(runtime-global-variables)
|
|
||||||
static tflite::MicroMutableOpResolver<5> micro_op_resolver;
|
|
||||||
micro_op_resolver.AddAveragePool2D();
|
|
||||||
micro_op_resolver.AddConv2D();
|
|
||||||
micro_op_resolver.AddDepthwiseConv2D();
|
|
||||||
micro_op_resolver.AddReshape();
|
|
||||||
micro_op_resolver.AddSoftmax();
|
|
||||||
|
|
||||||
// Build an interpreter to run the model with.
|
|
||||||
// NOLINTNEXTLINE(runtime-global-variables)
|
|
||||||
static tflite::MicroInterpreter static_interpreter(
|
|
||||||
model, micro_op_resolver, tensor_arena, kTensorArenaSize, error_reporter);
|
|
||||||
interpreter = &static_interpreter;
|
|
||||||
|
|
||||||
// Allocate memory from the tensor_arena for the model's tensors.
|
|
||||||
TfLiteStatus allocate_status = interpreter->AllocateTensors();
|
|
||||||
if (allocate_status != kTfLiteOk) {
|
|
||||||
TF_LITE_REPORT_ERROR(error_reporter, "AllocateTensors() failed");
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get information about the memory area to use for the model's input.
|
|
||||||
input = interpreter->input(0);
|
|
||||||
}
|
|
||||||
|
|
||||||
// The name of this function is important for Arduino compatibility.
|
|
||||||
void loop() {
|
|
||||||
// Get image from provider.
|
|
||||||
if (kTfLiteOk != GetImage(error_reporter, kNumCols, kNumRows, kNumChannels,
|
|
||||||
input->data.int8)) {
|
|
||||||
TF_LITE_REPORT_ERROR(error_reporter, "Image capture failed.");
|
|
||||||
}
|
|
||||||
|
|
||||||
// Run the model on this input and make sure it succeeds.
|
|
||||||
if (kTfLiteOk != interpreter->Invoke()) {
|
|
||||||
TF_LITE_REPORT_ERROR(error_reporter, "Invoke failed.");
|
|
||||||
}
|
|
||||||
|
|
||||||
TfLiteTensor* output = interpreter->output(0);
|
|
||||||
|
|
||||||
// Process the inference results.
|
|
||||||
int8_t person_score = output->data.uint8[kPersonIndex];
|
|
||||||
int8_t no_person_score = output->data.uint8[kNotAPersonIndex];
|
|
||||||
RespondToDetection(error_reporter, person_score, no_person_score);
|
|
||||||
}
|
|
@ -1,28 +0,0 @@
|
|||||||
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
==============================================================================*/
|
|
||||||
|
|
||||||
#ifndef TENSORFLOW_LITE_MICRO_EXAMPLES_PERSON_DETECTION_EXPERIMENTAL_MAIN_FUNCTIONS_H_
|
|
||||||
#define TENSORFLOW_LITE_MICRO_EXAMPLES_PERSON_DETECTION_EXPERIMENTAL_MAIN_FUNCTIONS_H_
|
|
||||||
|
|
||||||
// Initializes all data needed for the example. The name is important, and needs
|
|
||||||
// to be setup() for Arduino compatibility.
|
|
||||||
void setup();
|
|
||||||
|
|
||||||
// Runs one iteration of data gathering and inference. This should be called
|
|
||||||
// repeatedly from the application code. The name needs to be loop() for Arduino
|
|
||||||
// compatibility.
|
|
||||||
void loop();
|
|
||||||
|
|
||||||
#endif // TENSORFLOW_LITE_MICRO_EXAMPLES_PERSON_DETECTION_EXPERIMENTAL_MAIN_FUNCTIONS_H_
|
|
@ -1,21 +0,0 @@
|
|||||||
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
==============================================================================*/
|
|
||||||
|
|
||||||
#include "tensorflow/lite/micro/examples/person_detection_experimental/model_settings.h"
|
|
||||||
|
|
||||||
const char* kCategoryLabels[kCategoryCount] = {
|
|
||||||
"notperson",
|
|
||||||
"person",
|
|
||||||
};
|
|
@ -1,35 +0,0 @@
|
|||||||
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
==============================================================================*/
|
|
||||||
|
|
||||||
#ifndef TENSORFLOW_LITE_MICRO_EXAMPLES_PERSON_DETECTION_EXPERIMENTAL_MODEL_SETTINGS_H_
|
|
||||||
#define TENSORFLOW_LITE_MICRO_EXAMPLES_PERSON_DETECTION_EXPERIMENTAL_MODEL_SETTINGS_H_
|
|
||||||
|
|
||||||
// Keeping these as constant expressions allow us to allocate fixed-sized arrays
|
|
||||||
// on the stack for our working memory.
|
|
||||||
|
|
||||||
// All of these values are derived from the values used during model training,
|
|
||||||
// if you change your model you'll need to update these constants.
|
|
||||||
constexpr int kNumCols = 96;
|
|
||||||
constexpr int kNumRows = 96;
|
|
||||||
constexpr int kNumChannels = 1;
|
|
||||||
|
|
||||||
constexpr int kMaxImageSize = kNumCols * kNumRows * kNumChannels;
|
|
||||||
|
|
||||||
constexpr int kCategoryCount = 2;
|
|
||||||
constexpr int kPersonIndex = 1;
|
|
||||||
constexpr int kNotAPersonIndex = 0;
|
|
||||||
extern const char* kCategoryLabels[kCategoryCount];
|
|
||||||
|
|
||||||
#endif // TENSORFLOW_LITE_MICRO_EXAMPLES_PERSON_DETECTION_EXPERIMENTAL_MODEL_SETTINGS_H_
|
|
@ -1,30 +0,0 @@
|
|||||||
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
==============================================================================*/
|
|
||||||
|
|
||||||
// This data was created from a sample image from without a person in it.
|
|
||||||
// Convert original image to simpler format:
|
|
||||||
// convert -resize 96x96\! noperson.PNG noperson.bmp3
|
|
||||||
// Skip the 54 byte bmp3 header and add the reset of the bytes to a C array:
|
|
||||||
// xxd -s 54 -i /tmp/noperson.bmp3 > /tmp/noperson.cc
|
|
||||||
|
|
||||||
#ifndef TENSORFLOW_LITE_MICRO_EXAMPLES_PERSON_DETECTION_EXPERIMENTAL_NO_PERSON_IMAGE_DATA_H_
|
|
||||||
#define TENSORFLOW_LITE_MICRO_EXAMPLES_PERSON_DETECTION_EXPERIMENTAL_NO_PERSON_IMAGE_DATA_H_
|
|
||||||
|
|
||||||
#include <cstdint>
|
|
||||||
|
|
||||||
extern const int g_no_person_data_size;
|
|
||||||
extern const uint8_t g_no_person_data[];
|
|
||||||
|
|
||||||
#endif // TENSORFLOW_LITE_MICRO_EXAMPLES_PERSON_DETECTION_EXPERIMENTAL_NO_PERSON_IMAGE_DATA_H_
|
|
@ -1,27 +0,0 @@
|
|||||||
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
==============================================================================*/
|
|
||||||
|
|
||||||
// This is a standard TensorFlow Lite model file that has been converted into a
|
|
||||||
// C data array, so it can be easily compiled into a binary for devices that
|
|
||||||
// don't have a file system. It was created using the command:
|
|
||||||
// xxd -i person_detect.tflite > person_detect_model_data.cc
|
|
||||||
|
|
||||||
#ifndef TENSORFLOW_LITE_MICRO_EXAMPLES_PERSON_DETECTION_EXPERIMENTAL_PERSON_DETECT_MODEL_DATA_H_
|
|
||||||
#define TENSORFLOW_LITE_MICRO_EXAMPLES_PERSON_DETECTION_EXPERIMENTAL_PERSON_DETECT_MODEL_DATA_H_
|
|
||||||
|
|
||||||
extern const unsigned char g_person_detect_model_data[];
|
|
||||||
extern const int g_person_detect_model_data_len;
|
|
||||||
|
|
||||||
#endif // TENSORFLOW_LITE_MICRO_EXAMPLES_PERSON_DETECTION_EXPERIMENTAL_PERSON_DETECT_MODEL_DATA_H_
|
|
@ -1,135 +0,0 @@
|
|||||||
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
==============================================================================*/
|
|
||||||
|
|
||||||
#include "tensorflow/lite/c/common.h"
|
|
||||||
#include "tensorflow/lite/micro/examples/person_detection_experimental/model_settings.h"
|
|
||||||
#include "tensorflow/lite/micro/examples/person_detection_experimental/no_person_image_data.h"
|
|
||||||
#include "tensorflow/lite/micro/examples/person_detection_experimental/person_detect_model_data.h"
|
|
||||||
#include "tensorflow/lite/micro/examples/person_detection_experimental/person_image_data.h"
|
|
||||||
#include "tensorflow/lite/micro/micro_error_reporter.h"
|
|
||||||
#include "tensorflow/lite/micro/micro_interpreter.h"
|
|
||||||
#include "tensorflow/lite/micro/micro_mutable_op_resolver.h"
|
|
||||||
#include "tensorflow/lite/micro/testing/micro_test.h"
|
|
||||||
#include "tensorflow/lite/schema/schema_generated.h"
|
|
||||||
#include "tensorflow/lite/version.h"
|
|
||||||
|
|
||||||
// Create an area of memory to use for input, output, and intermediate arrays.
|
|
||||||
constexpr int tensor_arena_size = 136 * 1024;
|
|
||||||
uint8_t tensor_arena[tensor_arena_size];
|
|
||||||
|
|
||||||
TF_LITE_MICRO_TESTS_BEGIN
|
|
||||||
|
|
||||||
TF_LITE_MICRO_TEST(TestInvoke) {
|
|
||||||
// Set up logging.
|
|
||||||
tflite::MicroErrorReporter micro_error_reporter;
|
|
||||||
|
|
||||||
// Map the model into a usable data structure. This doesn't involve any
|
|
||||||
// copying or parsing, it's a very lightweight operation.
|
|
||||||
const tflite::Model* model = ::tflite::GetModel(g_person_detect_model_data);
|
|
||||||
if (model->version() != TFLITE_SCHEMA_VERSION) {
|
|
||||||
TF_LITE_REPORT_ERROR(µ_error_reporter,
|
|
||||||
"Model provided is schema version %d not equal "
|
|
||||||
"to supported version %d.\n",
|
|
||||||
model->version(), TFLITE_SCHEMA_VERSION);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Pull in only the operation implementations we need.
|
|
||||||
// This relies on a complete list of all the ops needed by this graph.
|
|
||||||
// An easier approach is to just use the AllOpsResolver, but this will
|
|
||||||
// incur some penalty in code space for op implementations that are not
|
|
||||||
// needed by this graph.
|
|
||||||
tflite::MicroMutableOpResolver<5> micro_op_resolver;
|
|
||||||
micro_op_resolver.AddAveragePool2D();
|
|
||||||
micro_op_resolver.AddConv2D();
|
|
||||||
micro_op_resolver.AddDepthwiseConv2D();
|
|
||||||
micro_op_resolver.AddReshape();
|
|
||||||
micro_op_resolver.AddSoftmax();
|
|
||||||
|
|
||||||
// Build an interpreter to run the model with.
|
|
||||||
tflite::MicroInterpreter interpreter(model, micro_op_resolver, tensor_arena,
|
|
||||||
tensor_arena_size,
|
|
||||||
µ_error_reporter);
|
|
||||||
interpreter.AllocateTensors();
|
|
||||||
|
|
||||||
// Get information about the memory area to use for the model's input.
|
|
||||||
TfLiteTensor* input = interpreter.input(0);
|
|
||||||
|
|
||||||
// Make sure the input has the properties we expect.
|
|
||||||
TF_LITE_MICRO_EXPECT_NE(nullptr, input);
|
|
||||||
TF_LITE_MICRO_EXPECT_EQ(4, input->dims->size);
|
|
||||||
TF_LITE_MICRO_EXPECT_EQ(1, input->dims->data[0]);
|
|
||||||
TF_LITE_MICRO_EXPECT_EQ(kNumRows, input->dims->data[1]);
|
|
||||||
TF_LITE_MICRO_EXPECT_EQ(kNumCols, input->dims->data[2]);
|
|
||||||
TF_LITE_MICRO_EXPECT_EQ(kNumChannels, input->dims->data[3]);
|
|
||||||
TF_LITE_MICRO_EXPECT_EQ(kTfLiteInt8, input->type);
|
|
||||||
|
|
||||||
// Copy an image with a person into the memory area used for the input.
|
|
||||||
TFLITE_DCHECK_EQ(input->bytes, static_cast<size_t>(g_person_data_size));
|
|
||||||
memcpy(input->data.int8, g_person_data, input->bytes);
|
|
||||||
|
|
||||||
// Run the model on this input and make sure it succeeds.
|
|
||||||
TfLiteStatus invoke_status = interpreter.Invoke();
|
|
||||||
if (invoke_status != kTfLiteOk) {
|
|
||||||
TF_LITE_REPORT_ERROR(µ_error_reporter, "Invoke failed\n");
|
|
||||||
}
|
|
||||||
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, invoke_status);
|
|
||||||
|
|
||||||
// Get the output from the model, and make sure it's the expected size and
|
|
||||||
// type.
|
|
||||||
TfLiteTensor* output = interpreter.output(0);
|
|
||||||
TF_LITE_MICRO_EXPECT_EQ(2, output->dims->size);
|
|
||||||
TF_LITE_MICRO_EXPECT_EQ(1, output->dims->data[0]);
|
|
||||||
TF_LITE_MICRO_EXPECT_EQ(kCategoryCount, output->dims->data[1]);
|
|
||||||
TF_LITE_MICRO_EXPECT_EQ(kTfLiteInt8, output->type);
|
|
||||||
|
|
||||||
// Make sure that the expected "Person" score is higher than the other class.
|
|
||||||
int8_t person_score = output->data.int8[kPersonIndex];
|
|
||||||
int8_t no_person_score = output->data.int8[kNotAPersonIndex];
|
|
||||||
TF_LITE_REPORT_ERROR(µ_error_reporter,
|
|
||||||
"person data. person score: %d, no person score: %d\n",
|
|
||||||
person_score, no_person_score);
|
|
||||||
TF_LITE_MICRO_EXPECT_GT(person_score, no_person_score);
|
|
||||||
|
|
||||||
// TODO(b/161461076): Update model to make this work on real negative inputs.
|
|
||||||
memset(input->data.int8, 0, input->bytes);
|
|
||||||
|
|
||||||
// Run the model on this "No Person" input.
|
|
||||||
invoke_status = interpreter.Invoke();
|
|
||||||
if (invoke_status != kTfLiteOk) {
|
|
||||||
TF_LITE_REPORT_ERROR(µ_error_reporter, "Invoke failed\n");
|
|
||||||
}
|
|
||||||
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, invoke_status);
|
|
||||||
|
|
||||||
// Get the output from the model, and make sure it's the expected size and
|
|
||||||
// type.
|
|
||||||
output = interpreter.output(0);
|
|
||||||
TF_LITE_MICRO_EXPECT_EQ(2, output->dims->size);
|
|
||||||
TF_LITE_MICRO_EXPECT_EQ(1, output->dims->data[0]);
|
|
||||||
TF_LITE_MICRO_EXPECT_EQ(kCategoryCount, output->dims->data[1]);
|
|
||||||
TF_LITE_MICRO_EXPECT_EQ(kTfLiteInt8, output->type);
|
|
||||||
|
|
||||||
// Make sure that the expected "No Person" score is higher.
|
|
||||||
person_score = output->data.int8[kPersonIndex];
|
|
||||||
no_person_score = output->data.int8[kNotAPersonIndex];
|
|
||||||
TF_LITE_REPORT_ERROR(
|
|
||||||
µ_error_reporter,
|
|
||||||
"no person data. person score: %d, no person score: %d\n", person_score,
|
|
||||||
no_person_score);
|
|
||||||
TF_LITE_MICRO_EXPECT_GT(no_person_score, person_score);
|
|
||||||
|
|
||||||
TF_LITE_REPORT_ERROR(µ_error_reporter, "Ran successfully\n");
|
|
||||||
}
|
|
||||||
|
|
||||||
TF_LITE_MICRO_TESTS_END
|
|
@ -1,30 +0,0 @@
|
|||||||
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
==============================================================================*/
|
|
||||||
|
|
||||||
// This data was created from a sample image from with a person in it.
|
|
||||||
// Convert original image to simpler format:
|
|
||||||
// convert -resize 96x96\! person.PNG person.bmp3
|
|
||||||
// Skip the 54 byte bmp3 header and add the reset of the bytes to a C array:
|
|
||||||
// xxd -s 54 -i /tmp/person.bmp3 > /tmp/person.cc
|
|
||||||
|
|
||||||
#ifndef TENSORFLOW_LITE_MICRO_EXAMPLES_PERSON_DETECTION_EXPERIMENTAL_PERSON_IMAGE_DATA_H_
|
|
||||||
#define TENSORFLOW_LITE_MICRO_EXAMPLES_PERSON_DETECTION_EXPERIMENTAL_PERSON_IMAGE_DATA_H_
|
|
||||||
|
|
||||||
#include <cstdint>
|
|
||||||
|
|
||||||
extern const int g_person_data_size;
|
|
||||||
extern const uint8_t g_person_data[];
|
|
||||||
|
|
||||||
#endif // TENSORFLOW_LITE_MICRO_EXAMPLES_PERSON_DETECTION_EXPERIMENTAL_PERSON_IMAGE_DATA_H_
|
|
@ -1,54 +0,0 @@
|
|||||||
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
==============================================================================*/
|
|
||||||
|
|
||||||
#include "tensorflow/lite/micro/examples/person_detection_experimental/detection_responder.h"
|
|
||||||
|
|
||||||
#include "am_bsp.h" // NOLINT
|
|
||||||
|
|
||||||
// This implementation will light up LEDs on the board in response to the
|
|
||||||
// inference results.
|
|
||||||
void RespondToDetection(tflite::ErrorReporter* error_reporter,
|
|
||||||
int8_t person_score, int8_t no_person_score) {
|
|
||||||
static bool is_initialized = false;
|
|
||||||
if (!is_initialized) {
|
|
||||||
// Setup LED's as outputs. Leave red LED alone since that's an error
|
|
||||||
// indicator for sparkfun_edge in image_provider.
|
|
||||||
am_hal_gpio_pinconfig(AM_BSP_GPIO_LED_BLUE, g_AM_HAL_GPIO_OUTPUT_12);
|
|
||||||
am_hal_gpio_pinconfig(AM_BSP_GPIO_LED_GREEN, g_AM_HAL_GPIO_OUTPUT_12);
|
|
||||||
am_hal_gpio_pinconfig(AM_BSP_GPIO_LED_YELLOW, g_AM_HAL_GPIO_OUTPUT_12);
|
|
||||||
is_initialized = true;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Toggle the blue LED every time an inference is performed.
|
|
||||||
static int count = 0;
|
|
||||||
if (++count & 1) {
|
|
||||||
am_hal_gpio_output_set(AM_BSP_GPIO_LED_BLUE);
|
|
||||||
} else {
|
|
||||||
am_hal_gpio_output_clear(AM_BSP_GPIO_LED_BLUE);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Turn on the green LED if a person was detected. Turn on the yellow LED
|
|
||||||
// otherwise.
|
|
||||||
am_hal_gpio_output_clear(AM_BSP_GPIO_LED_YELLOW);
|
|
||||||
am_hal_gpio_output_clear(AM_BSP_GPIO_LED_GREEN);
|
|
||||||
if (person_score > no_person_score) {
|
|
||||||
am_hal_gpio_output_set(AM_BSP_GPIO_LED_GREEN);
|
|
||||||
} else {
|
|
||||||
am_hal_gpio_output_set(AM_BSP_GPIO_LED_YELLOW);
|
|
||||||
}
|
|
||||||
|
|
||||||
TF_LITE_REPORT_ERROR(error_reporter, "Person score: %d No person score: %d",
|
|
||||||
person_score, no_person_score);
|
|
||||||
}
|
|
@ -1,203 +0,0 @@
|
|||||||
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
==============================================================================*/
|
|
||||||
|
|
||||||
#include "tensorflow/lite/micro/examples/person_detection_experimental/image_provider.h"
|
|
||||||
|
|
||||||
#include "tensorflow/lite/micro/examples/person_detection_experimental/himax_driver/HM01B0.h"
|
|
||||||
#include "tensorflow/lite/micro/examples/person_detection_experimental/himax_driver/HM01B0_RAW8_QVGA_8bits_lsb_5fps.h"
|
|
||||||
#include "tensorflow/lite/micro/examples/person_detection_experimental/himax_driver/HM01B0_debug.h"
|
|
||||||
#include "tensorflow/lite/micro/examples/person_detection_experimental/himax_driver/HM01B0_optimized.h"
|
|
||||||
#include "tensorflow/lite/micro/examples/person_detection_experimental/himax_driver/platform_Sparkfun_Edge.h"
|
|
||||||
|
|
||||||
// These are headers from Ambiq's Apollo3 SDK.
|
|
||||||
#include "am_bsp.h" // NOLINT
|
|
||||||
#include "am_mcu_apollo.h" // NOLINT
|
|
||||||
#include "am_util.h" // NOLINT
|
|
||||||
|
|
||||||
// #define DEMO_HM01B0_FRAMEBUFFER_DUMP_ENABLE
|
|
||||||
|
|
||||||
// Enabling logging increases power consumption by preventing low power mode
|
|
||||||
// from being enabled.
|
|
||||||
#define ENABLE_LOGGING
|
|
||||||
|
|
||||||
namespace {
|
|
||||||
|
|
||||||
//*****************************************************************************
|
|
||||||
//
|
|
||||||
// HM01B0 Configuration
|
|
||||||
//
|
|
||||||
//*****************************************************************************
|
|
||||||
static hm01b0_cfg_t s_HM01B0Cfg = {
|
|
||||||
// i2c settings
|
|
||||||
ui16SlvAddr : HM01B0_DEFAULT_ADDRESS,
|
|
||||||
eIOMMode : HM01B0_IOM_MODE,
|
|
||||||
ui32IOMModule : HM01B0_IOM_MODULE,
|
|
||||||
sIOMCfg : {
|
|
||||||
eInterfaceMode : HM01B0_IOM_MODE,
|
|
||||||
ui32ClockFreq : HM01B0_I2C_CLOCK_FREQ,
|
|
||||||
},
|
|
||||||
pIOMHandle : NULL,
|
|
||||||
|
|
||||||
// MCLK settings
|
|
||||||
ui32CTimerModule : HM01B0_MCLK_GENERATOR_MOD,
|
|
||||||
ui32CTimerSegment : HM01B0_MCLK_GENERATOR_SEG,
|
|
||||||
ui32CTimerOutputPin : HM01B0_PIN_MCLK,
|
|
||||||
|
|
||||||
// data interface
|
|
||||||
ui8PinSCL : HM01B0_PIN_SCL,
|
|
||||||
ui8PinSDA : HM01B0_PIN_SDA,
|
|
||||||
ui8PinD0 : HM01B0_PIN_D0,
|
|
||||||
ui8PinD1 : HM01B0_PIN_D1,
|
|
||||||
ui8PinD2 : HM01B0_PIN_D2,
|
|
||||||
ui8PinD3 : HM01B0_PIN_D3,
|
|
||||||
ui8PinD4 : HM01B0_PIN_D4,
|
|
||||||
ui8PinD5 : HM01B0_PIN_D5,
|
|
||||||
ui8PinD6 : HM01B0_PIN_D6,
|
|
||||||
ui8PinD7 : HM01B0_PIN_D7,
|
|
||||||
ui8PinVSYNC : HM01B0_PIN_VSYNC,
|
|
||||||
ui8PinHSYNC : HM01B0_PIN_HSYNC,
|
|
||||||
ui8PinPCLK : HM01B0_PIN_PCLK,
|
|
||||||
|
|
||||||
ui8PinTrig : HM01B0_PIN_TRIG,
|
|
||||||
ui8PinInt : HM01B0_PIN_INT,
|
|
||||||
pfnGpioIsr : NULL,
|
|
||||||
};
|
|
||||||
|
|
||||||
static constexpr int kFramesToInitialize = 4;
|
|
||||||
|
|
||||||
bool g_is_camera_initialized = false;
|
|
||||||
|
|
||||||
void burst_mode_enable(tflite::ErrorReporter* error_reporter, bool bEnable) {
|
|
||||||
am_hal_burst_avail_e eBurstModeAvailable;
|
|
||||||
am_hal_burst_mode_e eBurstMode;
|
|
||||||
|
|
||||||
// Check that the Burst Feature is available.
|
|
||||||
if (AM_HAL_STATUS_SUCCESS ==
|
|
||||||
am_hal_burst_mode_initialize(&eBurstModeAvailable)) {
|
|
||||||
if (AM_HAL_BURST_AVAIL == eBurstModeAvailable) {
|
|
||||||
TF_LITE_REPORT_ERROR(error_reporter, "Apollo3 Burst Mode is Available\n");
|
|
||||||
} else {
|
|
||||||
TF_LITE_REPORT_ERROR(error_reporter,
|
|
||||||
"Apollo3 Burst Mode is Not Available\n");
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
TF_LITE_REPORT_ERROR(error_reporter,
|
|
||||||
"Failed to Initialize for Burst Mode operation\n");
|
|
||||||
}
|
|
||||||
|
|
||||||
// Make sure we are in "Normal" mode.
|
|
||||||
if (AM_HAL_STATUS_SUCCESS == am_hal_burst_mode_disable(&eBurstMode)) {
|
|
||||||
if (AM_HAL_NORMAL_MODE == eBurstMode) {
|
|
||||||
TF_LITE_REPORT_ERROR(error_reporter,
|
|
||||||
"Apollo3 operating in Normal Mode (48MHz)\n");
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
TF_LITE_REPORT_ERROR(error_reporter,
|
|
||||||
"Failed to Disable Burst Mode operation\n");
|
|
||||||
}
|
|
||||||
|
|
||||||
// Put the MCU into "Burst" mode.
|
|
||||||
if (bEnable) {
|
|
||||||
if (AM_HAL_STATUS_SUCCESS == am_hal_burst_mode_enable(&eBurstMode)) {
|
|
||||||
if (AM_HAL_BURST_MODE == eBurstMode) {
|
|
||||||
TF_LITE_REPORT_ERROR(error_reporter,
|
|
||||||
"Apollo3 operating in Burst Mode (96MHz)\n");
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
TF_LITE_REPORT_ERROR(error_reporter,
|
|
||||||
"Failed to Enable Burst Mode operation\n");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
} // namespace
|
|
||||||
|
|
||||||
TfLiteStatus InitCamera(tflite::ErrorReporter* error_reporter) {
|
|
||||||
TF_LITE_REPORT_ERROR(error_reporter, "Initializing HM01B0...\n");
|
|
||||||
|
|
||||||
am_hal_clkgen_control(AM_HAL_CLKGEN_CONTROL_SYSCLK_MAX, 0);
|
|
||||||
|
|
||||||
// Set the default cache configuration
|
|
||||||
am_hal_cachectrl_config(&am_hal_cachectrl_defaults);
|
|
||||||
am_hal_cachectrl_enable();
|
|
||||||
|
|
||||||
// Configure the board for low power operation. This breaks logging by
|
|
||||||
// turning off the itm and uart interfaces.
|
|
||||||
#ifndef ENABLE_LOGGING
|
|
||||||
am_bsp_low_power_init();
|
|
||||||
#endif
|
|
||||||
|
|
||||||
// Enable interrupts so we can receive messages from the boot host.
|
|
||||||
am_hal_interrupt_master_enable();
|
|
||||||
|
|
||||||
burst_mode_enable(error_reporter, true);
|
|
||||||
|
|
||||||
// Turn on the 1.8V regulator for DVDD on the camera.
|
|
||||||
am_hal_gpio_pinconfig(HM01B0_PIN_DVDD_EN, g_AM_HAL_GPIO_OUTPUT_12);
|
|
||||||
am_hal_gpio_output_set(HM01B0_PIN_DVDD_EN);
|
|
||||||
|
|
||||||
// Configure Red LED for debugging.
|
|
||||||
am_hal_gpio_pinconfig(AM_BSP_GPIO_LED_RED, g_AM_HAL_GPIO_OUTPUT_12);
|
|
||||||
am_hal_gpio_output_clear(AM_BSP_GPIO_LED_RED);
|
|
||||||
|
|
||||||
hm01b0_power_up(&s_HM01B0Cfg);
|
|
||||||
|
|
||||||
am_util_delay_ms(1);
|
|
||||||
|
|
||||||
hm01b0_mclk_enable(&s_HM01B0Cfg);
|
|
||||||
|
|
||||||
am_util_delay_ms(1);
|
|
||||||
|
|
||||||
if (HM01B0_ERR_OK != hm01b0_init_if(&s_HM01B0Cfg)) {
|
|
||||||
return kTfLiteError;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (HM01B0_ERR_OK !=
|
|
||||||
hm01b0_init_system(&s_HM01B0Cfg, (hm_script_t*)sHM01B0InitScript,
|
|
||||||
sizeof(sHM01B0InitScript) / sizeof(hm_script_t))) {
|
|
||||||
return kTfLiteError;
|
|
||||||
}
|
|
||||||
|
|
||||||
return kTfLiteOk;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Capture single frame. Frame pointer passed in to reduce memory usage. This
|
|
||||||
// allows the input tensor to be used instead of requiring an extra copy.
|
|
||||||
TfLiteStatus GetImage(tflite::ErrorReporter* error_reporter, int frame_width,
|
|
||||||
int frame_height, int channels, int8_t* frame) {
|
|
||||||
if (!g_is_camera_initialized) {
|
|
||||||
TfLiteStatus init_status = InitCamera(error_reporter);
|
|
||||||
if (init_status != kTfLiteOk) {
|
|
||||||
am_hal_gpio_output_set(AM_BSP_GPIO_LED_RED);
|
|
||||||
return init_status;
|
|
||||||
}
|
|
||||||
// Drop a few frames until auto exposure is calibrated.
|
|
||||||
for (int i = 0; i < kFramesToInitialize; ++i) {
|
|
||||||
hm01b0_blocking_read_oneframe_scaled(&s_HM01B0Cfg, frame, frame_width,
|
|
||||||
frame_height, channels);
|
|
||||||
}
|
|
||||||
g_is_camera_initialized = true;
|
|
||||||
}
|
|
||||||
|
|
||||||
hm01b0_blocking_read_oneframe_scaled(&s_HM01B0Cfg, frame, frame_width,
|
|
||||||
frame_height, channels);
|
|
||||||
|
|
||||||
#ifdef DEMO_HM01B0_FRAMEBUFFER_DUMP_ENABLE
|
|
||||||
hm01b0_framebuffer_dump(frame, frame_width * frame_height * channels);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
return kTfLiteOk;
|
|
||||||
}
|
|
@ -1,455 +0,0 @@
|
|||||||
## Training a model
|
|
||||||
|
|
||||||
The following document will walk you through the process of training your own
|
|
||||||
250 KB embedded vision model using scripts that are easy to run. You can use
|
|
||||||
either the [Visual Wake Words dataset](https://arxiv.org/abs/1906.05721) for
|
|
||||||
person detection, or choose one of the [80
|
|
||||||
categories from the MSCOCO dataset](http://cocodataset.org/#explore).
|
|
||||||
|
|
||||||
This model will take several days to train on a powerful machine with GPUs. We
|
|
||||||
recommend using a [Google Cloud Deep
|
|
||||||
Learning VM](https://cloud.google.com/deep-learning-vm/).
|
|
||||||
|
|
||||||
### Training framework choice
|
|
||||||
|
|
||||||
Keras is the recommended interface for building models in TensorFlow, but when
|
|
||||||
the person detector model was being created it didn't yet support all the
|
|
||||||
features we needed. For that reason, we'll be showing you how to train a model
|
|
||||||
using tf.slim, an older interface. It is still widely used but deprecated, so
|
|
||||||
future versions of TensorFlow may not support this approach. We hope to publish
|
|
||||||
Keras instructions in the future.
|
|
||||||
|
|
||||||
The model definitions for Slim are part of the
|
|
||||||
[TensorFlow models repository](https://github.com/tensorflow/models), so to get
|
|
||||||
started you'll need to download it from GitHub using a command like this:
|
|
||||||
|
|
||||||
```
|
|
||||||
! cd ~
|
|
||||||
! git clone https://github.com/tensorflow/models.git
|
|
||||||
```
|
|
||||||
|
|
||||||
The following guide is going to assume that you've done this from your home
|
|
||||||
directory, so the model repository code is at ~/models, and that all commands
|
|
||||||
are run from the home directory too unless otherwise noted. You can place the
|
|
||||||
repository somewhere else, but you'll need to update all references to it.
|
|
||||||
|
|
||||||
To use Slim, you'll need to make sure its modules can be found by Python, and
|
|
||||||
install one dependency. Here's how to do this in an iPython notebook:
|
|
||||||
|
|
||||||
```
|
|
||||||
! pip install contextlib2
|
|
||||||
import os
|
|
||||||
new_python_path = (os.environ.get("PYTHONPATH") or '') + ":models/research/slim"
|
|
||||||
%env PYTHONPATH=$new_python_path
|
|
||||||
```
|
|
||||||
|
|
||||||
Updating `PYTHONPATH` through an `EXPORT` statement like this only works for the
|
|
||||||
current Jupyter session, so if you're using bash directly, you should add it to
|
|
||||||
a persistent startup script, running something like this:
|
|
||||||
|
|
||||||
```
|
|
||||||
echo 'export PYTHONPATH=$PYTHONPATH:models/research/slim' >> ~/.bashrc
|
|
||||||
source ~/.bashrc
|
|
||||||
```
|
|
||||||
|
|
||||||
If you see import errors running the slim scripts, you should make sure the
|
|
||||||
`PYTHONPATH` is set up correctly, and that contextlib2 has been installed. You
|
|
||||||
can find more general information on tf.slim in the
|
|
||||||
[repository's
|
|
||||||
README](https://github.com/tensorflow/models/tree/master/research/slim).
|
|
||||||
|
|
||||||
### Building the dataset
|
|
||||||
|
|
||||||
In order to train a person detector model, we need a large collection of images
|
|
||||||
that are labeled depending on whether or not they have people in them. The
|
|
||||||
ImageNet one-thousand class data that's widely used for training image
|
|
||||||
classifiers doesn't include labels for people, but luckily the
|
|
||||||
[COCO dataset](http://cocodataset.org/#home) does. You can also download this
|
|
||||||
data without manually registering too, and Slim provides a convenient script to
|
|
||||||
grab it automatically:
|
|
||||||
|
|
||||||
```
|
|
||||||
! chmod +x models/research/slim/datasets/download_mscoco.sh
|
|
||||||
! bash models/research/slim/datasets/download_mscoco.sh coco
|
|
||||||
```
|
|
||||||
|
|
||||||
This is a large download, about 40GB, so it will take a while and you'll need
|
|
||||||
to make sure you have at least 100GB free on your drive to allow space for
|
|
||||||
unpacking and further processing. The argument to the script is the path that
|
|
||||||
the data will be downloaded to. If you change this, you'll also need to update
|
|
||||||
the commands below that use it.
|
|
||||||
|
|
||||||
The dataset is designed to be used for training models for localization, so the
|
|
||||||
images aren't labeled with the "contains a person", "doesn't contain a person"
|
|
||||||
categories that we want to train for. Instead each image comes with a list of
|
|
||||||
bounding boxes for all of the objects it contains. "Person" is one of these
|
|
||||||
object categories, so to get to the classification labels we want, we have to
|
|
||||||
look for images with bounding boxes for people. To make sure that they aren't
|
|
||||||
too tiny to be recognizable we also need to exclude very small bounding boxes.
|
|
||||||
Slim contains a script to convert the bounding box into labels:
|
|
||||||
|
|
||||||
```
|
|
||||||
! python models/research/slim/datasets/build_visualwakewords_data.py
|
|
||||||
--logtostderr \
|
|
||||||
--train_image_dir=coco/raw-data/train2014 \
|
|
||||||
--val_image_dir=coco/raw-data/val2014 \
|
|
||||||
--train_annotations_file=coco/raw-data/annotations/instances_train2014.json \
|
|
||||||
--val_annotations_file=coco/raw-data/annotations/instances_val2014.json \
|
|
||||||
--output_dir=coco/processed \
|
|
||||||
--small_object_area_threshold=0.005 \
|
|
||||||
--foreground_class_of_interest='person'
|
|
||||||
```
|
|
||||||
|
|
||||||
Don't be surprised if this takes up to twenty minutes to complete. When it's
|
|
||||||
done, you'll have a set of TFRecords in `coco/processed` holding the labeled
|
|
||||||
image information. This data was created by Aakanksha Chowdhery and is known as
|
|
||||||
the [Visual Wake Words dataset](https://arxiv.org/abs/1906.05721). It's designed
|
|
||||||
to be useful for benchmarking and testing embedded computer vision, since it
|
|
||||||
represents a very common task that we need to accomplish with tight resource
|
|
||||||
constraints. We're hoping to see it drive even better models for this and
|
|
||||||
similar tasks.
|
|
||||||
|
|
||||||
### Training the model
|
|
||||||
|
|
||||||
One of the nice things about using tf.slim to handle the training is that the
|
|
||||||
parameters you commonly need to modify are available as command line arguments,
|
|
||||||
so we can just call the standard `train_image_classifier.py` script to train
|
|
||||||
our model. You can use this command to build the model we use in the example:
|
|
||||||
|
|
||||||
```
|
|
||||||
! python models/research/slim/train_image_classifier.py \
|
|
||||||
--train_dir=vww_96_grayscale \
|
|
||||||
--dataset_name=visualwakewords \
|
|
||||||
--dataset_split_name=train \
|
|
||||||
--dataset_dir=coco/processed \
|
|
||||||
--model_name=mobilenet_v1_025 \
|
|
||||||
--preprocessing_name=mobilenet_v1 \
|
|
||||||
--train_image_size=96 \
|
|
||||||
--input_grayscale=True \
|
|
||||||
--save_summaries_secs=300 \
|
|
||||||
--learning_rate=0.045 \
|
|
||||||
--label_smoothing=0.1 \
|
|
||||||
--learning_rate_decay_factor=0.98 \
|
|
||||||
--num_epochs_per_decay=2.5 \
|
|
||||||
--moving_average_decay=0.9999 \
|
|
||||||
--batch_size=96 \
|
|
||||||
--max_number_of_steps=1000000
|
|
||||||
```
|
|
||||||
|
|
||||||
This will take a couple of days on a single-GPU v100 instance to complete all
|
|
||||||
one-million steps, but you should be able to get a fairly accurate model after
|
|
||||||
a few hours if you want to experiment early.
|
|
||||||
|
|
||||||
- The checkpoints and summaries will the saved in the folder given in the
|
|
||||||
`--train_dir` argument, so that's where you'll have to look for the results.
|
|
||||||
- The `--dataset_dir` parameter should match the one where you saved the
|
|
||||||
TFRecords from the Visual Wake Words build script.
|
|
||||||
- The architecture we'll be using is defined by the `--model_name` argument.
|
|
||||||
The 'mobilenet_v1' prefix tells the script to use the first version of
|
|
||||||
MobileNet. We did experiment with later versions, but these used more RAM
|
|
||||||
for their intermediate activation buffers, so for now we kept with the
|
|
||||||
original. The '025' is the depth multiplier to use, which mostly affects the
|
|
||||||
number of weight parameters, this low setting ensures the model fits within
|
|
||||||
250KB of Flash.
|
|
||||||
- `--preprocessing_name` controls how input images are modified before they're
|
|
||||||
fed into the model. The 'mobilenet_v1' version shrinks the width and height
|
|
||||||
of the images to the size given in `--train_image_size` (in our case 96
|
|
||||||
pixels since we want to reduce the compute requirements). It also scales the
|
|
||||||
pixel values from 0 to 255 integers into -1.0 to +1.0 floating point numbers
|
|
||||||
(though we'll be quantizing those after training).
|
|
||||||
- The
|
|
||||||
[HM01B0](https://himax.com.tw/products/cmos-image-sensor/image-sensors/hm01b0/)
|
|
||||||
camera we're using on the SparkFun Edge board is monochrome, so to get the
|
|
||||||
best results we have to train our model on black and white images too, so we
|
|
||||||
pass in the `--input_grayscale` flag to enable that preprocessing.
|
|
||||||
- The `--learning_rate`, `--label_smoothing`, `--learning_rate_decay_factor`,
|
|
||||||
`--num_epochs_per_decay`, `--moving_average_decay` and `--batch_size` are
|
|
||||||
all parameters that control how weights are updated during the training
|
|
||||||
process. Training deep networks is still a bit of a dark art, so these exact
|
|
||||||
values we found through experimentation for this particular model. You can
|
|
||||||
try tweaking them to speed up training or gain a small boost in accuracy,
|
|
||||||
but we can't give much guidance for how to make those changes, and it's easy
|
|
||||||
to get combinations where the training accuracy never converges.
|
|
||||||
- The `--max_number_of_steps` defines how long the training should continue.
|
|
||||||
There's no good way to figure out this threshold in advance, you have to
|
|
||||||
experiment to tell when the accuracy of the model is no longer improving to
|
|
||||||
tell when to cut it off. In our case we default to a million steps, since
|
|
||||||
with this particular model we know that's a good point to stop.
|
|
||||||
|
|
||||||
Once you start the script, you should see output that looks something like this:
|
|
||||||
|
|
||||||
```
|
|
||||||
INFO:tensorflow:global step 4670: loss = 0.7112 (0.251 sec/step)
|
|
||||||
I0928 00:16:21.774756 140518023943616 learning.py:507] global step 4670: loss =
|
|
||||||
0.7112 (0.251 sec/step)
|
|
||||||
INFO:tensorflow:global step 4680: loss = 0.6596 (0.227 sec/step)
|
|
||||||
I0928 00:16:24.365901 140518023943616 learning.py:507] global step 4680: loss =
|
|
||||||
0.6596 (0.227 sec/step)
|
|
||||||
```
|
|
||||||
|
|
||||||
Don't worry about the line duplication, this is just a side-effect of the way
|
|
||||||
TensorFlow log printing interacts with Python. Each line has two key bits of
|
|
||||||
information about the training process. The global step is a count of how far
|
|
||||||
through the training we are. Since we've set the limit as a million steps, in
|
|
||||||
this case we're nearly five percent complete. The steps per second estimate is
|
|
||||||
also useful, since you can use it to estimate a rough duration for the whole
|
|
||||||
training process. In this case, we're completing about four steps a second, so
|
|
||||||
a million steps will take about 70 hours, or three days. The other crucial
|
|
||||||
piece of information is the loss. This is a measure of how close the
|
|
||||||
partially-trained model's predictions are to the correct values, and lower
|
|
||||||
values are better. This will show a lot of variation but should on average
|
|
||||||
decrease during training if the model is learning. Because it's so noisy, the
|
|
||||||
amounts will bounce around a lot over short time periods, but if things are
|
|
||||||
working well you should see a noticeable drop if you wait an hour or so and
|
|
||||||
check back. This kind of variation is a lot easier to see in a graph, which is
|
|
||||||
one of the main reasons to try TensorBoard.
|
|
||||||
|
|
||||||
### TensorBoard
|
|
||||||
|
|
||||||
TensorBoard is a web application that lets you view data visualizations from
|
|
||||||
TensorFlow training sessions, and it's included by default in most cloud
|
|
||||||
instances. If you're using Google Cloud's AI Platform, you can start up a new
|
|
||||||
TensorBoard session by open the command palette from the left tabs on the
|
|
||||||
notebook interface, and scrolling down to select "Create a new tensorboard".
|
|
||||||
You'll be prompted for the location of the summary logs, enter the path you
|
|
||||||
used for `--train_dir` in the training script, in our example
|
|
||||||
'vww_96_grayscale'. One common error to watch out for is adding a slash to the
|
|
||||||
end of the path, which will cause tensorboard to fail to find the directory. If
|
|
||||||
you're starting tensorboard from the command line in a different environment
|
|
||||||
you'll have to pass in this path as the `--logdir` argument to the tensorboard
|
|
||||||
command line tool, and point your browser to http://localhost:6006 (or the
|
|
||||||
address of the machine you're running it on).
|
|
||||||
|
|
||||||
It may take a little while for the graphs to have anything useful in them, since
|
|
||||||
the script only saves summaries every five minutes. The most important graph is
|
|
||||||
called 'clone_loss', and this shows the progression of the same loss value
|
|
||||||
that's displayed on the logging output. It fluctuates a lot, but the
|
|
||||||
overall trend is downwards over time. If you don't see this sort of progression
|
|
||||||
after a few hours of training, it's a good sign that your model isn't
|
|
||||||
converging to a good solution, and you may need to debug what's going wrong
|
|
||||||
either with your dataset or the training parameters.
|
|
||||||
|
|
||||||
Tensorboard defaults to the 'Scalars' tab when it opens, but the other section
|
|
||||||
that can be useful during training is 'Images'. This shows a
|
|
||||||
random selection of the pictures the model is currently being trained on,
|
|
||||||
including any distortions and other preprocessing. This information isn't as
|
|
||||||
essential as the loss graphs, but it can be useful to ensure the dataset is what
|
|
||||||
you expect, and it is interesting to see the examples updating as training
|
|
||||||
progresses.
|
|
||||||
|
|
||||||
### Evaluating the model
|
|
||||||
|
|
||||||
The loss function correlates with how well your model is training, but it isn't
|
|
||||||
a direct, understandable metric. What we really care about is how many people
|
|
||||||
our model detects correctly, but to get calculate this we need to run a
|
|
||||||
separate script. You don't need to wait until the model is fully trained, you
|
|
||||||
can check the accuracy of any checkpoints in the `--train_dir` folder.
|
|
||||||
|
|
||||||
```
|
|
||||||
! python models/research/slim/eval_image_classifier.py \
|
|
||||||
--alsologtostderr \
|
|
||||||
--checkpoint_path=vww_96_grayscale/model.ckpt-698580 \
|
|
||||||
--dataset_dir=coco/processed/ \
|
|
||||||
--dataset_name=visualwakewords \
|
|
||||||
--dataset_split_name=val \
|
|
||||||
--model_name=mobilenet_v1_025 \
|
|
||||||
--preprocessing_name=mobilenet_v1 \
|
|
||||||
--input_grayscale=True \
|
|
||||||
--train_image_size=96
|
|
||||||
```
|
|
||||||
|
|
||||||
You'll need to make sure that `--checkpoint_path` is pointing to a valid set of
|
|
||||||
checkpoint data. Checkpoints are stored in three separate files, so the value
|
|
||||||
should be their common prefix. For example if you have a checkpoint file called
|
|
||||||
'model.ckpt-5179.data-00000-of-00001', the prefix would be 'model.ckpt-5179'.
|
|
||||||
The script should produce output that looks something like this:
|
|
||||||
|
|
||||||
```
|
|
||||||
INFO:tensorflow:Evaluation [406/406]
|
|
||||||
I0929 22:52:59.936022 140225887045056 evaluation.py:167] Evaluation [406/406]
|
|
||||||
eval/Accuracy[0.717438412]eval/Recall_5[1]
|
|
||||||
```
|
|
||||||
|
|
||||||
The important number here is the accuracy. It shows the proportion of the
|
|
||||||
images that were classified correctly, which is 72% in this case, after
|
|
||||||
converting to a percentage. If you follow the example script, you should expect
|
|
||||||
a fully-trained model to achieve an accuracy of around 84% after one million
|
|
||||||
steps, and show a loss of around 0.4.
|
|
||||||
|
|
||||||
### Exporting the model to TensorFlow Lite
|
|
||||||
|
|
||||||
When the model has trained to an accuracy you're happy with, you'll need to
|
|
||||||
convert the results from the TensorFlow training environment into a form you
|
|
||||||
can run on an embedded device. As we've seen in previous chapters, this can be
|
|
||||||
a complex process, and tf.slim adds a few of its own wrinkles too.
|
|
||||||
|
|
||||||
#### Exporting to a GraphDef protobuf file
|
|
||||||
|
|
||||||
Slim generates the architecture from the model_name every time one of its
|
|
||||||
scripts is run, so for a model to be used outside of Slim it needs to be saved
|
|
||||||
in a common format. We're going to use the GraphDef protobuf serialization
|
|
||||||
format, since that's understood by both Slim and the rest of TensorFlow.
|
|
||||||
|
|
||||||
```
|
|
||||||
! python models/research/slim/export_inference_graph.py \
|
|
||||||
--alsologtostderr \
|
|
||||||
--dataset_name=visualwakewords \
|
|
||||||
--model_name=mobilenet_v1_025 \
|
|
||||||
--image_size=96 \
|
|
||||||
--input_grayscale=True \
|
|
||||||
--output_file=vww_96_grayscale_graph.pb
|
|
||||||
```
|
|
||||||
|
|
||||||
If this succeeds, you should have a new 'vww_96_grayscale_graph.pb' file in
|
|
||||||
your home folder. This contains the layout of the operations in the model, but
|
|
||||||
doesn't yet have any of the weight data.
|
|
||||||
|
|
||||||
#### Freezing the weights
|
|
||||||
|
|
||||||
The process of storing the trained weights together with the operation graph is
|
|
||||||
known as freezing. This converts all of the variables in the graph to
|
|
||||||
constants, after loading their values from a checkpoint file. The command below
|
|
||||||
uses a checkpoint from the millionth training step, but you can supply any
|
|
||||||
valid checkpoint path. The graph freezing script is stored inside the main
|
|
||||||
tensorflow repository, so we have to download this from GitHub before running
|
|
||||||
this command.
|
|
||||||
|
|
||||||
```
|
|
||||||
! git clone https://github.com/tensorflow/tensorflow
|
|
||||||
! python tensorflow/tensorflow/python/tools/freeze_graph.py \
|
|
||||||
--input_graph=vww_96_grayscale_graph.pb \
|
|
||||||
--input_checkpoint=vww_96_grayscale/model.ckpt-1000000 \
|
|
||||||
--input_binary=true --output_graph=vww_96_grayscale_frozen.pb \
|
|
||||||
--output_node_names=MobilenetV1/Predictions/Reshape_1
|
|
||||||
```
|
|
||||||
|
|
||||||
After this, you should see a file called 'vww_96_grayscale_frozen.pb'.
|
|
||||||
|
|
||||||
#### Quantizing and converting to TensorFlow Lite
|
|
||||||
|
|
||||||
Quantization is a tricky and involved process, and it's still very much an
|
|
||||||
active area of research, so taking the float graph that we've trained so far
|
|
||||||
and converting it down to eight bit takes quite a bit of code. You can find
|
|
||||||
more of an explanation of what quantization is and how it works in the chapter
|
|
||||||
on latency optimization, but here we'll show you how to use it with the model
|
|
||||||
we've trained. The majority of the code is preparing example images to feed
|
|
||||||
into the trained network, so that the ranges of the activation layers in
|
|
||||||
typical use can be measured. We rely on the TFLiteConverter class to handle the
|
|
||||||
quantization and conversion into the TensorFlow Lite flatbuffer file that we
|
|
||||||
need for the inference engine.
|
|
||||||
|
|
||||||
```
|
|
||||||
import tensorflow as tf
|
|
||||||
import io
|
|
||||||
import PIL
|
|
||||||
import numpy as np
|
|
||||||
|
|
||||||
def representative_dataset_gen():
|
|
||||||
|
|
||||||
record_iterator =
|
|
||||||
tf.python_io.tf_record_iterator(path='coco/processed/val.record-00000-of-00010')
|
|
||||||
|
|
||||||
count = 0
|
|
||||||
for string_record in record_iterator:
|
|
||||||
example = tf.train.Example()
|
|
||||||
example.ParseFromString(string_record)
|
|
||||||
image_stream =
|
|
||||||
io.BytesIO(example.features.feature['image/encoded'].bytes_list.value[0])
|
|
||||||
image = PIL.Image.open(image_stream)
|
|
||||||
image = image.resize((96, 96))
|
|
||||||
image = image.convert('L')
|
|
||||||
array = np.array(image)
|
|
||||||
array = np.expand_dims(array, axis=2)
|
|
||||||
array = np.expand_dims(array, axis=0)
|
|
||||||
array = ((array / 127.5) - 1.0).astype(np.float32)
|
|
||||||
yield([array])
|
|
||||||
count += 1
|
|
||||||
if count > 300:
|
|
||||||
break
|
|
||||||
|
|
||||||
converter =
|
|
||||||
tf.lite.TFLiteConverter.from_frozen_graph('vww_96_grayscale_frozen.pb',
|
|
||||||
['input'], ['MobilenetV1/Predictions/Reshape_1'])
|
|
||||||
converter.optimizations = [tf.lite.Optimize.DEFAULT]
|
|
||||||
converter.representative_dataset = representative_dataset_gen
|
|
||||||
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
|
|
||||||
converter.inference_input_type = tf.int8
|
|
||||||
converter.inference_output_type = tf.int8
|
|
||||||
|
|
||||||
tflite_quant_model = converter.convert()
|
|
||||||
open("vww_96_grayscale_quantized.tflite", "wb").write(tflite_quant_model)
|
|
||||||
```
|
|
||||||
|
|
||||||
#### Converting into a C source file
|
|
||||||
|
|
||||||
The converter writes out a file, but most embedded devices don't have a file
|
|
||||||
system. To access the serialized data from our program, we have to compile it
|
|
||||||
into the executable and store it in Flash. The easiest way to do that is to
|
|
||||||
convert the file into a C data array.
|
|
||||||
|
|
||||||
```
|
|
||||||
# Install xxd if it is not available
|
|
||||||
!apt-get -qq install xxd
|
|
||||||
# Save the file as a C source file
|
|
||||||
!xxd -i vww_96_grayscale_quantized.tflite > person_detect_model_data.cc
|
|
||||||
```
|
|
||||||
|
|
||||||
You can now replace the existing person_detect_model_data.cc file with the
|
|
||||||
version you've trained, and be able to run your own model on embedded devices.
|
|
||||||
|
|
||||||
### Training for other categories
|
|
||||||
|
|
||||||
There are over 60 different object types in the MS-COCO dataset, so an easy way
|
|
||||||
to customize your model would be to choose one of those instead of 'person'
|
|
||||||
when you build the training dataset. Here's an example that looks for cars:
|
|
||||||
|
|
||||||
```
|
|
||||||
! python models/research/slim/datasets/build_visualwakewords_data.py
|
|
||||||
--logtostderr \
|
|
||||||
--train_image_dir=coco/raw-data/train2014 \
|
|
||||||
--val_image_dir=coco/raw-data/val2014 \
|
|
||||||
--train_annotations_file=coco/raw-data/annotations/instances_train2014.json \
|
|
||||||
--val_annotations_file=coco/raw-data/annotations/instances_val2014.json \
|
|
||||||
--output_dir=coco/processed_cars \
|
|
||||||
--small_object_area_threshold=0.005 \
|
|
||||||
--foreground_class_of_interest='car'
|
|
||||||
```
|
|
||||||
|
|
||||||
You should be able to follow the same steps you did for the person detector,
|
|
||||||
but substitute the new 'coco/processed_cars' path wherever 'coco/processed'
|
|
||||||
used to be.
|
|
||||||
|
|
||||||
If the kind of object you're interested in isn't present in MS-COCO, you may be
|
|
||||||
able to use transfer learning to help you train on a custom dataset you've
|
|
||||||
gathered, even if it's much smaller. We don't have an example of this
|
|
||||||
yet, but we hope to share one soon.
|
|
||||||
|
|
||||||
### Understanding the architecture
|
|
||||||
|
|
||||||
[MobileNets](https://arxiv.org/abs/1704.04861) are a family of architectures
|
|
||||||
designed to provide good accuracy for as few weight parameters and arithmetic
|
|
||||||
operations as possible. There are now multiple versions, but in our case we're
|
|
||||||
using the original v1 since it required the smallest amount of RAM at runtime.
|
|
||||||
The core concept behind the architecture is depthwise separable convolution.
|
|
||||||
This is a variant of classical two-dimensional convolutions that works in a
|
|
||||||
much more efficient way, without sacrificing very much accuracy. Regular
|
|
||||||
convolution calculates an output value based on applying a filter of a
|
|
||||||
particular size across all channels of the input. This means the number of
|
|
||||||
calculations involved in each output is width of the filter multiplied by
|
|
||||||
height, multiplied by the number of input channels. Depthwise convolution
|
|
||||||
breaks this large calculation into separate parts. First each input channel is
|
|
||||||
filtered by one or more rectangular filters to produce intermediate values.
|
|
||||||
These values are then combined using pointwise convolutions. This dramatically
|
|
||||||
reduces the number of calculations needed, and in practice produces similar
|
|
||||||
results to regular convolution.
|
|
||||||
|
|
||||||
MobileNet v1 is a stack of 14 of these depthwise separable convolution layers
|
|
||||||
with an average pool, then a fully-connected layer followed by a softmax at the
|
|
||||||
end. We've specified a 'width multiplier' of 0.25, which has the effect of
|
|
||||||
reducing the number of computations down to around 60 million per inference, by
|
|
||||||
shrinking the number of channels in each activation layer by 75% compared to
|
|
||||||
the standard model. In essence it's very similar to a normal convolutional
|
|
||||||
neural network in operation, with each layer learning patterns in the input.
|
|
||||||
Earlier layers act more like edge recognition filters, spotting low-level
|
|
||||||
structure in the image, and later layers synthesize that information into more
|
|
||||||
abstract patterns that help with the final object classification.
|
|
@ -65,7 +65,7 @@ quantization only, `dilation_ratio==1` 3. Average Pooling 4. Max Pooling 5.
|
|||||||
Fully Connected
|
Fully Connected
|
||||||
|
|
||||||
Currently only
|
Currently only
|
||||||
[/tensorflow/lite/micro/examples/person_detection_experimental](/tensorflow/lite/micro/examples/person_detection_experimental)
|
[/tensorflow/lite/micro/examples/person_detection](/tensorflow/lite/micro/examples/person_detection)
|
||||||
is quantized using this specification. Other examples can be executed on
|
is quantized using this specification. Other examples can be executed on
|
||||||
ARC-based targets, but will only use reference kernels.
|
ARC-based targets, but will only use reference kernels.
|
||||||
|
|
||||||
|
@ -42,7 +42,7 @@ def move_person_data(library_dir):
|
|||||||
"""Moves the downloaded person model into the examples folder."""
|
"""Moves the downloaded person model into the examples folder."""
|
||||||
old_person_data_path = os.path.join(
|
old_person_data_path = os.path.join(
|
||||||
library_dir, 'src/tensorflow/lite/micro/tools/make/downloads/' +
|
library_dir, 'src/tensorflow/lite/micro/tools/make/downloads/' +
|
||||||
'person_model_grayscale/person_detect_model_data.cpp')
|
'person_model_int8/person_detect_model_data.cpp')
|
||||||
new_person_data_path = os.path.join(
|
new_person_data_path = os.path.join(
|
||||||
library_dir, 'examples/person_detection/person_detect_model_data.cpp')
|
library_dir, 'examples/person_detection/person_detect_model_data.cpp')
|
||||||
if os.path.exists(old_person_data_path):
|
if os.path.exists(old_person_data_path):
|
||||||
@ -58,28 +58,6 @@ def move_person_data(library_dir):
|
|||||||
source_file.write(file_contents)
|
source_file.write(file_contents)
|
||||||
|
|
||||||
|
|
||||||
def move_person_data_experimental(library_dir):
|
|
||||||
"""Moves the downloaded person model into the examples folder."""
|
|
||||||
old_person_data_path = os.path.join(
|
|
||||||
library_dir, 'src/tensorflow/lite/micro/tools/make/downloads/' +
|
|
||||||
'person_model_int8/person_detect_model_data.cpp')
|
|
||||||
new_person_data_path = os.path.join(
|
|
||||||
library_dir,
|
|
||||||
'examples/person_detection_experimental/person_detect_model_data.cpp')
|
|
||||||
if os.path.exists(old_person_data_path):
|
|
||||||
os.rename(old_person_data_path, new_person_data_path)
|
|
||||||
# Update include.
|
|
||||||
with open(new_person_data_path, 'r') as source_file:
|
|
||||||
file_contents = source_file.read()
|
|
||||||
file_contents = file_contents.replace(
|
|
||||||
six.ensure_str(
|
|
||||||
'#include "tensorflow/lite/micro/examples/' +
|
|
||||||
'person_detection_experimental/person_detect_model_data.h"'),
|
|
||||||
'#include "person_detect_model_data.h"')
|
|
||||||
with open(new_person_data_path, 'w') as source_file:
|
|
||||||
source_file.write(file_contents)
|
|
||||||
|
|
||||||
|
|
||||||
def move_image_data_experimental(library_dir):
|
def move_image_data_experimental(library_dir):
|
||||||
"""Moves the downloaded image detection model into the examples folder."""
|
"""Moves the downloaded image detection model into the examples folder."""
|
||||||
old_image_data_path = os.path.join(
|
old_image_data_path = os.path.join(
|
||||||
@ -117,7 +95,6 @@ def main(unparsed_args):
|
|||||||
rename_example_subfolder_files(library_dir)
|
rename_example_subfolder_files(library_dir)
|
||||||
rename_example_main_inos(library_dir)
|
rename_example_main_inos(library_dir)
|
||||||
move_person_data(library_dir)
|
move_person_data(library_dir)
|
||||||
move_person_data_experimental(library_dir)
|
|
||||||
move_image_data_experimental(library_dir)
|
move_image_data_experimental(library_dir)
|
||||||
|
|
||||||
|
|
||||||
|
@ -30,7 +30,7 @@ mkdir -p `dirname ${EXAMPLES_SUBDIR_HEADER}`
|
|||||||
touch ${EXAMPLES_SUBDIR_HEADER}
|
touch ${EXAMPLES_SUBDIR_HEADER}
|
||||||
|
|
||||||
TENSORFLOW_SRC_DIR=${LIBRARY_DIR}/src/
|
TENSORFLOW_SRC_DIR=${LIBRARY_DIR}/src/
|
||||||
PERSON_DATA_FILE=${TENSORFLOW_SRC_DIR}tensorflow/lite/micro/tools/make/downloads/person_model_grayscale/person_detect_model_data.cpp
|
PERSON_DATA_FILE=${TENSORFLOW_SRC_DIR}tensorflow/lite/micro/tools/make/downloads/person_model_int8/person_detect_model_data.cpp
|
||||||
mkdir -p `dirname ${PERSON_DATA_FILE}`
|
mkdir -p `dirname ${PERSON_DATA_FILE}`
|
||||||
echo '#include "tensorflow/lite/micro/examples/person_detection/person_detect_model_data.h"' > ${PERSON_DATA_FILE}
|
echo '#include "tensorflow/lite/micro/examples/person_detection/person_detect_model_data.h"' > ${PERSON_DATA_FILE}
|
||||||
mkdir -p ${LIBRARY_DIR}/examples/person_detection
|
mkdir -p ${LIBRARY_DIR}/examples/person_detection
|
||||||
|
@ -74,7 +74,7 @@ MICROLITE_TEST_SRCS := $(filter-out $(EXCLUDED_TESTS), $(MICROLITE_TEST_SRCS))
|
|||||||
EXCLUDED_EXAMPLE_TESTS := \
|
EXCLUDED_EXAMPLE_TESTS := \
|
||||||
tensorflow/lite/micro/examples/magic_wand/Makefile.inc \
|
tensorflow/lite/micro/examples/magic_wand/Makefile.inc \
|
||||||
tensorflow/lite/micro/examples/micro_speech/Makefile.inc \
|
tensorflow/lite/micro/examples/micro_speech/Makefile.inc \
|
||||||
tensorflow/lite/micro/examples/person_detection_experimental/Makefile.inc \
|
tensorflow/lite/micro/examples/person_detection/Makefile.inc \
|
||||||
tensorflow/lite/micro/examples/image_recognition_experimental/Makefile.inc
|
tensorflow/lite/micro/examples/image_recognition_experimental/Makefile.inc
|
||||||
MICRO_LITE_EXAMPLE_TESTS := $(filter-out $(EXCLUDED_EXAMPLE_TESTS), $(MICRO_LITE_EXAMPLE_TESTS))
|
MICRO_LITE_EXAMPLE_TESTS := $(filter-out $(EXCLUDED_EXAMPLE_TESTS), $(MICRO_LITE_EXAMPLE_TESTS))
|
||||||
|
|
||||||
|
@ -56,7 +56,6 @@ EXCLUDED_EXAMPLE_TESTS := \
|
|||||||
tensorflow/lite/micro/examples/magic_wand/Makefile.inc \
|
tensorflow/lite/micro/examples/magic_wand/Makefile.inc \
|
||||||
tensorflow/lite/micro/examples/micro_speech/Makefile.inc \
|
tensorflow/lite/micro/examples/micro_speech/Makefile.inc \
|
||||||
tensorflow/lite/micro/examples/network_tester/Makefile.inc \
|
tensorflow/lite/micro/examples/network_tester/Makefile.inc \
|
||||||
tensorflow/lite/micro/examples/person_detection/Makefile.inc \
|
tensorflow/lite/micro/examples/person_detection/Makefile.inc
|
||||||
tensorflow/lite/micro/examples/person_detection_experimental/Makefile.inc
|
|
||||||
MICRO_LITE_EXAMPLE_TESTS := $(filter-out $(EXCLUDED_EXAMPLE_TESTS), $(MICRO_LITE_EXAMPLE_TESTS))
|
MICRO_LITE_EXAMPLE_TESTS := $(filter-out $(EXCLUDED_EXAMPLE_TESTS), $(MICRO_LITE_EXAMPLE_TESTS))
|
||||||
|
|
||||||
|
@ -65,8 +65,8 @@ IMAGE_RECOGNITION_MODEL_MD5 := "1f4607b05ac45b8a6146fb883dbc2d7b"
|
|||||||
PERSON_MODEL_URL := "https://storage.googleapis.com/download.tensorflow.org/data/tf_lite_micro_person_data_grayscale_2020_05_27.zip"
|
PERSON_MODEL_URL := "https://storage.googleapis.com/download.tensorflow.org/data/tf_lite_micro_person_data_grayscale_2020_05_27.zip"
|
||||||
PERSON_MODEL_MD5 := "55b85f76e2995153e660391d4a209ef1"
|
PERSON_MODEL_MD5 := "55b85f76e2995153e660391d4a209ef1"
|
||||||
|
|
||||||
PERSON_MODEL_INT8_URL := "https://storage.googleapis.com/download.tensorflow.org/data/tf_lite_micro_person_data_int8_grayscale_2020_06_23.zip"
|
PERSON_MODEL_INT8_URL := "https://storage.googleapis.com/download.tensorflow.org/data/tf_lite_micro_person_data_int8_grayscale_2020_12_1.zip"
|
||||||
PERSON_MODEL_INT8_MD5 := "9b5b6d4677dd0a91b1bb992d1c4c0417"
|
PERSON_MODEL_INT8_MD5 := "e765cc76889db8640cfe876a37e4ec00"
|
||||||
|
|
||||||
EMBARC_MLI_URL := "https://github.com/foss-for-synopsys-dwc-arc-processors/embarc_mli/archive/ef7dd3c4e37d74a908f30713a7d0121387d3c678.zip"
|
EMBARC_MLI_URL := "https://github.com/foss-for-synopsys-dwc-arc-processors/embarc_mli/archive/ef7dd3c4e37d74a908f30713a7d0121387d3c678.zip"
|
||||||
EMBARC_MLI_MD5 := "65c4ff3f4a2963e90fd014f97c69f451"
|
EMBARC_MLI_MD5 := "65c4ff3f4a2963e90fd014f97c69f451"
|
||||||
|
Loading…
Reference in New Issue
Block a user