Merge pull request from jenselofsson:micro_recognition

PiperOrigin-RevId: 308109446
Change-Id: Iee03e8573ad8161d5186c98face0a1eb3a130907
This commit is contained in:
TensorFlower Gardener 2020-04-23 12:50:03 -07:00
commit 8c607acc89
17 changed files with 744 additions and 1 deletions

View File

@ -0,0 +1 @@
first_10_cifar_images.h

View File

@ -0,0 +1,38 @@
$(eval $(call add_third_party_download,$(IMAGE_RECOGNITION_MODEL_URL),$(IMAGE_RECOGNITION_MODEL_MD5),image_recognition_model,))
$(eval $(call add_third_party_download,$(CIFAR10_DATASET_URL),$(CIFAR10_DATASET_MD5),cifar10,patch_cifar10_dataset))
IMAGE_RECOGNITION_HDRS := \
tensorflow/lite/micro/examples/image_recognition_experimental/image_recognition_model.h \
tensorflow/lite/micro/examples/image_recognition_experimental/image_provider.h \
tensorflow/lite/micro/examples/image_recognition_experimental/stm32f746_discovery/image_util.h \
tensorflow/lite/micro/examples/image_recognition_experimental/stm32f746_discovery/display_util.h \
tensorflow/lite/micro/examples/image_recognition_experimental/util.h
IMAGE_RECOGNITION_SRCS := \
$(MAKEFILE_DIR)/downloads/image_recognition_model/image_recognition_model.cc \
tensorflow/lite/micro/examples/image_recognition_experimental/main.cc \
tensorflow/lite/micro/examples/image_recognition_experimental/stm32f746_discovery/image_provider.cc \
tensorflow/lite/micro/examples/image_recognition_experimental/stm32f746_discovery/image_util.cc \
tensorflow/lite/micro/examples/image_recognition_experimental/stm32f746_discovery/display_util.cc
IMAGE_RECOGNITION_TEST_SRCS := \
tensorflow/lite/micro/examples/image_recognition_experimental/image_recognition_test.cc \
$(MAKEFILE_DIR)/downloads/image_recognition_model/image_recognition_model.cc
IMAGE_RECOGNITION_TEST_HDRS := \
tensorflow/lite/micro/examples/image_recognition_experimental/image_recognition_model.h \
tensorflow/lite/micro/examples/image_recognition_experimental/util.h
include $(wildcard tensorflow/lite/micro/examples/image_recognition_experimental/*/Makefile.inc)
ifneq ($(filter disco_f746ng,$(ALL_TAGS)),)
MBED_PROJECT_FILES += \
BSP_DISCO_F746NG.lib \
LCD_DISCO_F746NG.lib
endif
$(eval $(call microlite_test,image_recognition,\
$(IMAGE_RECOGNITION_SRCS),$(IMAGE_RECOGNITION_HDRS)))
$(eval $(call microlite_test,image_recognition_test,\
$(IMAGE_RECOGNITION_TEST_SRCS),$(IMAGE_RECOGNITION_TEST_HDRS)))

View File

@ -0,0 +1,90 @@
# Image Recognition Example
## Table of Contents
- [Introduction](#introduction)
- [Hardware](#hardware)
- [Building](#building)
- [Building the testcase](#building-the-testcase)
- [Building the image recognition application](#building-the-image-recognition-application)
- [Prerequisites](#prerequisites)
- [Compiling and flashing](#compiling-and-flashing)
## Introduction
This example shows how you can use Tensorflow Lite Micro to perform image
recognition on a
[STM32F746 discovery kit](https://www.st.com/en/evaluation-tools/32f746gdiscovery.html)
with a STM32F4DIS-CAM camera module attached. It classifies the captured image
into 1 of 10 different classes, and those classes are "Plane", "Car", "Bird",
"Cat", "Deer", "Dog", "Frog", "Horse", "Ship", "Truck".
## Hardware
[STM32F746G-DISCO board (Cortex-M7)](https://www.st.com/en/evaluation-tools/32f746gdiscovery.html)
\
[STM32F4DIS-CAM Camera module](https://www.element14.com/community/docs/DOC-67585?ICID=knode-STM32F4-cameramore)
## Building
These instructions have been tested on Ubuntu 16.04.
### Building the test case
```
$ make -f tensorflow/lite/micro/tools/make/Makefile image_recognition_test
```
This will build and run the test case. As input, the test case uses the first 10
images of the test batch included in the
[CIFAR10](https://www.cs.toronto.edu/~kriz/cifar.html) dataset. Details
surrounding the dataset can be found in
[this paper](https://www.cs.toronto.edu/~kriz/learning-features-2009-TR.pdf).
### Building the image recognition application
#### Prerequisites
Install mbed-cli: `$ pip install mbed-cli`
Install the arm-none-eabi-toolchain.
For Ubuntu, this can be done by installing the package `gcc-arm-none-eabi`. In
Ubuntu 16.04, the version included in the repository is 4.9.3 while the
recommended version is 6 and up. Later versions can be downloaded from
[here](https://developer.arm.com/tools-and-software/open-source-software/developer-tools/gnu-toolchain/gnu-rm/downloads)
for Windows, Mac OS X and Linux.
#### Compiling and flashing
In order to generate the mbed project, run the following command: `$ make -f
tensorflow/lite/micro/tools/make/Makefile TAGS=disco_f746ng
generate_image_recognition_mbed_project` This will copy all of the necessary
files needed to build and flash the application.
Navigate to the output folder: `$ cd
tensorflow/lite/micro/tools/make/gen/linux_x86_64/prj/image_recognition/mbed/`
The following instructions for compiling and flashing can also be found in the
file README_MBED.md in the output folder.
To load the dependencies required, run: `$ mbed config root . $ mbed deploy`
In order to compile, run: `mbed compile -m auto -t GCC_ARM --profile release`
`-m auto`: Automatically detects the correct target if the Discovery board is
connected to the computer. If the board is not connected, replace `auto` with
`DISCO_F746NG`. \
`-t GCC_ARM`: Specifies the toolchain used to compile. `GCC_ARM` indicates that
the arm-none-eabi-toolchain will be used. \
`--profile release`: Build the `release` profile. The different profiles can be
found under mbed-os/tools/profiles/.
This will produce a file named `mbed.bin` in
`BUILD/DISCO_F746NG/GCC_ARM-RELEASE/`. To flash it to the board, simply copy the
file to the volume mounted as a USB drive. Alternatively, the `-f` option can be
appended to flash automatically after compilation.
On Ubuntu 16.04 (and possibly other Linux distributions) there may be an error
message when running `mbed compile` saying that the Python module `pywin32`
failed to install. This message can be ignored.

View File

@ -0,0 +1,41 @@
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_MICRO_EXAMPLES_IMAGE_RECOGNITION_EXPERIMENTAL_IMAGE_PROVIDER_H_
#define TENSORFLOW_LITE_MICRO_EXAMPLES_IMAGE_RECOGNITION_EXPERIMENTAL_IMAGE_PROVIDER_H_
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/micro/micro_error_reporter.h"
TfLiteStatus InitCamera(tflite::ErrorReporter* error_reporter);
// This is an abstraction around an image source like a camera, and is
// expected to return 8-bit sample data. The assumption is that this will be
// called in a low duty-cycle fashion in a low-power application. In these
// cases, the imaging sensor need not be run in a streaming mode, but rather can
// be idled in a relatively low-power mode between calls to GetImage(). The
// assumption is that the overhead and time of bringing the low-power sensor out
// of this standby mode is commensurate with the expected duty cycle of the
// application. The underlying sensor may actually be put into a streaming
// configuration, but the image buffer provided to GetImage should not be
// overwritten by the driver code until the next call to GetImage();
//
// The reference implementation can have no platform-specific dependencies, so
// it just returns a static image. For real applications, you should
// ensure there's a specialized implementation that accesses hardware APIs.
TfLiteStatus GetImage(tflite::ErrorReporter* error_reporter, int image_width,
int image_height, int channels, uint8_t* image_data);
#endif // TENSORFLOW_LITE_MICRO_EXAMPLES_IMAGE_RECOGNITION_EXPERIMENTAL_IMAGE_PROVIDER_H_

View File

@ -0,0 +1,27 @@
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
// This is a standard TensorFlow Lite model file that has been converted into a
// C data array, so it can be easily compiled into a binary for devices that
// don't have a file system. It can be created using the command:
// xxd -i image_recognition_model.tflite > image_recognition_model.cc
#ifndef TENSORFLOW_LITE_MICRO_EXAMPLES_IMAGE_RECOGNITION_EXPERIMENTAL_IMAGE_RECOGNITION_MODEL_H_
#define TENSORFLOW_LITE_MICRO_EXAMPLES_IMAGE_RECOGNITION_EXPERIMENTAL_IMAGE_RECOGNITION_MODEL_H_
extern const unsigned char image_recognition_model_data[];
extern const unsigned int image_recognition_model_data_len;
#endif // TENSORFLOW_LITE_MICRO_EXAMPLES_IMAGE_RECOGNITION_EXPERIMENTAL_IMAGE_RECOGNITION_MODEL_H_

View File

@ -0,0 +1,105 @@
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "tensorflow/lite/micro/examples/image_recognition_experimental/first_10_cifar_images.h"
#include "tensorflow/lite/micro/examples/image_recognition_experimental/image_recognition_model.h"
#include "tensorflow/lite/micro/examples/image_recognition_experimental/util.h"
#include "tensorflow/lite/micro/kernels/micro_ops.h"
#include "tensorflow/lite/micro/micro_error_reporter.h"
#include "tensorflow/lite/micro/micro_interpreter.h"
#include "tensorflow/lite/micro/micro_mutable_op_resolver.h"
#include "tensorflow/lite/micro/testing/micro_test.h"
#include "tensorflow/lite/schema/schema_generated.h"
#include "tensorflow/lite/version.h"
#define IMAGE_BYTES 3072
#define LABEL_BYTES 1
#define ENTRY_BYTES (IMAGE_BYTES + LABEL_BYTES)
TF_LITE_MICRO_TESTS_BEGIN
TF_LITE_MICRO_TEST(TestImageRecognitionInvoke) {
tflite::MicroErrorReporter micro_error_reporter;
tflite::ErrorReporter* error_reporter = &micro_error_reporter;
const tflite::Model* model = ::tflite::GetModel(image_recognition_model_data);
if (model->version() != TFLITE_SCHEMA_VERSION) {
TF_LITE_REPORT_ERROR(error_reporter,
"Model provided is schema version %d not equal "
"to supported version %d.\n",
model->version(), TFLITE_SCHEMA_VERSION);
}
tflite::MicroOpResolver<4> micro_op_resolver;
micro_op_resolver.AddBuiltin(tflite::BuiltinOperator_CONV_2D,
tflite::ops::micro::Register_CONV_2D());
micro_op_resolver.AddBuiltin(tflite::BuiltinOperator_MAX_POOL_2D,
tflite::ops::micro::Register_MAX_POOL_2D());
micro_op_resolver.AddBuiltin(tflite::BuiltinOperator_FULLY_CONNECTED,
tflite::ops::micro::Register_FULLY_CONNECTED());
micro_op_resolver.AddBuiltin(tflite::BuiltinOperator_SOFTMAX,
tflite::ops::micro::Register_SOFTMAX());
const int tensor_arena_size = 45 * 1024;
uint8_t tensor_arena[tensor_arena_size];
tflite::MicroInterpreter interpreter(model, micro_op_resolver, tensor_arena,
tensor_arena_size, error_reporter);
interpreter.AllocateTensors();
TfLiteTensor* input = interpreter.input(0);
TF_LITE_MICRO_EXPECT_NE(nullptr, input);
TF_LITE_MICRO_EXPECT_EQ(4, input->dims->size);
TF_LITE_MICRO_EXPECT_EQ(1, input->dims->data[0]);
TF_LITE_MICRO_EXPECT_EQ(32, input->dims->data[1]);
TF_LITE_MICRO_EXPECT_EQ(32, input->dims->data[2]);
TF_LITE_MICRO_EXPECT_EQ(3, input->dims->data[3]);
TF_LITE_MICRO_EXPECT_EQ(kTfLiteUInt8, input->type);
int num_correct = 0;
int num_images = 10;
for (int image_num = 0; image_num < num_images; image_num++) {
memset(input->data.uint8, 0, input->bytes);
uint8_t correct_label = 0;
correct_label =
tensorflow_lite_micro_tools_make_downloads_cifar10_test_batch_bin
[image_num * ENTRY_BYTES];
memcpy(input->data.uint8,
&tensorflow_lite_micro_tools_make_downloads_cifar10_test_batch_bin
[image_num * ENTRY_BYTES + LABEL_BYTES],
IMAGE_BYTES);
reshape_cifar_image(input->data.uint8, IMAGE_BYTES);
TfLiteStatus invoke_status = interpreter.Invoke();
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, invoke_status);
if (invoke_status != kTfLiteOk) {
TF_LITE_REPORT_ERROR(error_reporter, "Invoke failed\n");
}
TfLiteTensor* output = interpreter.output(0);
int guess = get_top_prediction(output->data.uint8, 10);
if (correct_label == guess) {
num_correct++;
}
}
TF_LITE_MICRO_EXPECT_EQ(6, num_correct);
}
TF_LITE_MICRO_TESTS_END

View File

@ -0,0 +1,108 @@
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
// NOLINTNEXTLINE
#include "mbed.h"
#include "tensorflow/lite/micro/examples/image_recognition_experimental/image_provider.h"
#include "tensorflow/lite/micro/examples/image_recognition_experimental/image_recognition_model.h"
#include "tensorflow/lite/micro/examples/image_recognition_experimental/stm32f746_discovery/display_util.h"
#include "tensorflow/lite/micro/examples/image_recognition_experimental/stm32f746_discovery/image_util.h"
#include "tensorflow/lite/micro/examples/image_recognition_experimental/util.h"
#include "tensorflow/lite/micro/kernels/micro_ops.h"
#include "tensorflow/lite/micro/micro_error_reporter.h"
#include "tensorflow/lite/micro/micro_interpreter.h"
#include "tensorflow/lite/micro/micro_mutable_op_resolver.h"
#include "tensorflow/lite/schema/schema_generated.h"
#include "tensorflow/lite/version.h"
#define NUM_OUT_CH 3
#define CNN_IMG_SIZE 32
uint8_t camera_buffer[NUM_IN_CH * IN_IMG_WIDTH * IN_IMG_HEIGHT]
__attribute__((aligned(4)));
static const char* labels[] = {"Plane", "Car", "Bird", "Cat", "Deer",
"Dog", "Frog", "Horse", "Ship", "Truck"};
int main(int argc, char** argv) {
init_lcd();
wait_ms(100);
tflite::MicroErrorReporter micro_error_reporter;
tflite::ErrorReporter* error_reporter = &micro_error_reporter;
if (InitCamera(error_reporter) != kTfLiteOk) {
TF_LITE_REPORT_ERROR(error_reporter, "Failed to init camera.");
return 1;
}
const tflite::Model* model = ::tflite::GetModel(image_recognition_model_data);
if (model->version() != TFLITE_SCHEMA_VERSION) {
TF_LITE_REPORT_ERROR(error_reporter,
"Model provided is schema version %d not equal "
"to supported version %d.",
model->version(), TFLITE_SCHEMA_VERSION);
return 1;
}
tflite::MicroOpResolver<4> micro_op_resolver;
micro_op_resolver.AddBuiltin(tflite::BuiltinOperator_CONV_2D,
tflite::ops::micro::Register_CONV_2D());
micro_op_resolver.AddBuiltin(tflite::BuiltinOperator_MAX_POOL_2D,
tflite::ops::micro::Register_MAX_POOL_2D());
micro_op_resolver.AddBuiltin(tflite::BuiltinOperator_FULLY_CONNECTED,
tflite::ops::micro::Register_FULLY_CONNECTED());
micro_op_resolver.AddBuiltin(tflite::BuiltinOperator_SOFTMAX,
tflite::ops::micro::Register_SOFTMAX());
constexpr int tensor_arena_size = 45 * 1024;
uint8_t tensor_arena[tensor_arena_size];
tflite::MicroInterpreter interpreter(model, resolver, tensor_arena,
tensor_arena_size, error_reporter);
interpreter.AllocateTensors();
while (true) {
TfLiteTensor* input = interpreter.input(0);
GetImage(error_reporter, IN_IMG_WIDTH, IN_IMG_HEIGHT, NUM_OUT_CH,
camera_buffer);
ResizeConvertImage(error_reporter, IN_IMG_WIDTH, IN_IMG_HEIGHT, NUM_IN_CH,
CNN_IMG_SIZE, CNN_IMG_SIZE, NUM_OUT_CH, camera_buffer,
input->data.uint8);
if (input->type != kTfLiteUInt8) {
TF_LITE_REPORT_ERROR(error_reporter, "Wrong input type.");
}
TfLiteStatus invoke_status = interpreter.Invoke();
if (invoke_status != kTfLiteOk) {
TF_LITE_REPORT_ERROR(error_reporter, "Invoke failed.");
break;
}
display_image_rgb565(IN_IMG_WIDTH, IN_IMG_HEIGHT, camera_buffer, 40, 40);
display_image_rgb888(CNN_IMG_SIZE, CNN_IMG_SIZE, input->data.uint8, 300,
100);
TfLiteTensor* output = interpreter.output(0);
int top_ind = get_top_prediction(output->data.uint8, 10);
print_prediction(labels[top_ind]);
print_confidence(output->data.uint8[top_ind]);
}
return 0;
}

View File

@ -0,0 +1,79 @@
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "tensorflow/lite/micro/examples/image_recognition_experimental/stm32f746_discovery/display_util.h"
#include <stdint.h>
#include "LCD_DISCO_F746NG/LCD_DISCO_F746NG.h"
LCD_DISCO_F746NG lcd;
extern "C" {
// defined in stm32746g_discovery_camera.c
extern DCMI_HandleTypeDef hDcmiHandler;
void DCMI_IRQHandler(void) { HAL_DCMI_IRQHandler(&hDcmiHandler); }
void DMA2_Stream1_IRQHandler(void) {
HAL_DMA_IRQHandler(hDcmiHandler.DMA_Handle);
}
}
static char lcd_output_string[50];
void init_lcd() { lcd.Clear(LCD_COLOR_WHITE); }
void display_image_rgb888(int x_dim, int y_dim, const uint8_t* image_data,
int x_loc, int y_loc) {
for (int y = 0; y < y_dim; ++y) {
for (int x = 0; x < x_dim; ++x, image_data += 3) {
uint8_t a = 0xFF;
auto r = image_data[0];
auto g = image_data[1];
auto b = image_data[2];
int pixel = a << 24 | r << 16 | g << 8 | b;
lcd.DrawPixel(x_loc + x, y_loc + y, pixel);
}
}
}
void display_image_rgb565(int x_dim, int y_dim, const uint8_t* image_data,
int x_loc, int y_loc) {
for (int y = 0; y < y_dim; ++y) {
for (int x = 0; x < x_dim; ++x, image_data += 2) {
uint8_t a = 0xFF;
uint8_t pix_lo = image_data[0];
uint8_t pix_hi = image_data[1];
uint8_t r = (0xF8 & pix_hi);
uint8_t g = ((0x07 & pix_hi) << 5) | ((0xE0 & pix_lo) >> 3);
uint8_t b = (0x1F & pix_lo) << 3;
int pixel = a << 24 | r << 16 | g << 8 | b;
// inverted image, so draw from bottom-right to top-left
lcd.DrawPixel(x_loc + (x_dim - x), y_loc + (y_dim - y), pixel);
}
}
}
void print_prediction(const char* prediction) {
// NOLINTNEXTLINE
sprintf(lcd_output_string, " Prediction: %s ", prediction);
lcd.DisplayStringAt(0, LINE(8), (uint8_t*)lcd_output_string, LEFT_MODE);
}
void print_confidence(uint8_t max_score) {
// NOLINTNEXTLINE
sprintf(lcd_output_string, " Confidence: %.1f%% ",
(max_score / 255.0) * 100.0);
lcd.DisplayStringAt(0, LINE(9), (uint8_t*)lcd_output_string, LEFT_MODE);
}

View File

@ -0,0 +1,33 @@
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_MICRO_EXAMPLES_IMAGE_RECOGNITION_EXPERIMENTAL_STM32F746_DISCOVERY_DISPLAY_UTIL_H_
#define TENSORFLOW_LITE_MICRO_EXAMPLES_IMAGE_RECOGNITION_EXPERIMENTAL_STM32F746_DISCOVERY_DISPLAY_UTIL_H_
#include <stdint.h>
void init_lcd();
void display_image_rgb888(int x_dim, int y_dim, const uint8_t* image_data,
int x_loc, int y_loc);
void display_image_rgb565(int x_dim, int y_dim, const uint8_t* image_data,
int x_loc, int y_loc);
void print_prediction(const char* prediction);
void print_confidence(uint8_t max_score);
#endif // TENSORFLOW_LITE_MICRO_EXAMPLES_IMAGE_RECOGNITION_EXPERIMENTAL_STM32F746_DISCOVERY_DISPLAY_UTIL_H_

View File

@ -0,0 +1,39 @@
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "tensorflow/lite/micro/examples/image_recognition_experimental/image_provider.h"
#include "BSP_DISCO_F746NG/Drivers/BSP/STM32746G-Discovery/stm32746g_discovery_camera.h"
TfLiteStatus InitCamera(tflite::ErrorReporter* error_reporter) {
if (BSP_CAMERA_Init(RESOLUTION_R160x120) != CAMERA_OK) {
TF_LITE_REPORT_ERROR(error_reporter, "Failed to init camera.\n");
return kTfLiteError;
}
return kTfLiteOk;
}
TfLiteStatus GetImage(tflite::ErrorReporter* error_reporter, int frame_width,
int frame_height, int channels, uint8_t* frame) {
// For consistency, the signature of this function is the
// same as the GetImage-function in micro_vision.
(void)error_reporter;
(void)frame_width;
(void)frame_height;
(void)channels;
BSP_CAMERA_SnapshotStart(frame);
return kTfLiteOk;
}

View File

@ -0,0 +1,49 @@
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "tensorflow/lite/micro/examples/image_recognition_experimental/stm32f746_discovery/image_util.h"
void ResizeConvertImage(tflite::ErrorReporter* error_reporter,
int in_frame_width, int in_frame_height,
int num_in_channels, int out_frame_width,
int out_frame_height, int channels,
const uint8_t* in_image, uint8_t* out_image) {
// offset so that only the center part of rectangular image is selected for
// resizing
int width_offset = ((in_frame_width - in_frame_height) / 2) * num_in_channels;
int yresize_ratio = (in_frame_height / out_frame_height) * num_in_channels;
int xresize_ratio = (in_frame_width / out_frame_width) * num_in_channels;
int resize_ratio =
(xresize_ratio < yresize_ratio) ? xresize_ratio : yresize_ratio;
for (int y = 0; y < out_frame_height; y++) {
for (int x = 0; x < out_frame_width; x++) {
int orig_img_loc =
y * in_frame_width * resize_ratio + x * resize_ratio + width_offset;
// correcting the image inversion here
int out_img_loc = ((out_frame_height - 1 - y) * out_frame_width +
(out_frame_width - 1 - x)) *
channels;
uint8_t pix_lo = in_image[orig_img_loc];
uint8_t pix_hi = in_image[orig_img_loc + 1];
// convert RGB565 to RGB888
out_image[out_img_loc] = (0xF8 & pix_hi);
out_image[out_img_loc + 1] =
((0x07 & pix_hi) << 5) | ((0xE0 & pix_lo) >> 3);
out_image[out_img_loc + 2] = (0x1F & pix_lo) << 3;
}
}
}

View File

@ -0,0 +1,32 @@
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_MICRO_EXAMPLES_IMAGE_RECOGNITION_EXPERIMENTAL_STM32F746_DISCOVERY_IMAGE_UTIL_H_
#define TENSORFLOW_LITE_MICRO_EXAMPLES_IMAGE_RECOGNITION_EXPERIMENTAL_STM32F746_DISCOVERY_IMAGE_UTIL_H_
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/micro/micro_error_reporter.h"
#define NUM_IN_CH 2
#define IN_IMG_WIDTH 160
#define IN_IMG_HEIGHT 120
void ResizeConvertImage(tflite::ErrorReporter* error_reporter,
int in_frame_width, int in_frame_height,
int num_in_channels, int out_frame_width,
int out_frame_height, int channels,
const uint8_t* in_frame, uint8_t* out_frame);
#endif // TENSORFLOW_LITE_MICRO_EXAMPLES_IMAGE_RECOGNITION_EXPERIMENTAL_STM32F746_DISCOVERY_IMAGE_UTIL_H_

View File

@ -0,0 +1,64 @@
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_MICRO_EXAMPLES_IMAGE_RECOGNITION_EXPERIMENTAL_UTIL_H_
#define TENSORFLOW_LITE_MICRO_EXAMPLES_IMAGE_RECOGNITION_EXPERIMENTAL_UTIL_H_
#include <stdint.h>
#include <string.h>
#define IMAGE_SIZE 3072
#define CHANNEL_SIZE 1024
#define R_CHANNEL_OFFSET 0
#define G_CHANNEL_OFFSET CHANNEL_SIZE
#define B_CHANNEL_OFFSET (CHANNEL_SIZE * 2)
int get_top_prediction(const uint8_t* predictions, int num_categories) {
int max_score = predictions[0];
int guess = 0;
for (int category_index = 1; category_index < num_categories;
category_index++) {
const uint8_t category_score = predictions[category_index];
if (category_score > max_score) {
max_score = category_score;
guess = category_index;
}
}
return guess;
}
void reshape_cifar_image(uint8_t* image_data, int num_bytes) {
uint8_t temp_data[IMAGE_SIZE];
memcpy(temp_data, image_data, num_bytes);
int k = 0;
for (int i = 0; i < CHANNEL_SIZE; i++) {
int r_ind = R_CHANNEL_OFFSET + i;
int g_ind = G_CHANNEL_OFFSET + i;
int b_ind = B_CHANNEL_OFFSET + i;
image_data[k] = temp_data[r_ind];
k++;
image_data[k] = temp_data[g_ind];
k++;
image_data[k] = temp_data[b_ind];
k++;
}
}
#endif // TENSORFLOW_LITE_MICRO_EXAMPLES_IMAGE_RECOGNITION_EXPERIMENTAL_UTIL_H_

View File

@ -82,6 +82,13 @@ patch_kissfft() {
echo "Finished patching kissfft"
}
# Create a header file containing an array with the first 10 images from the
# CIFAR10 test dataset.
patch_cifar10_dataset() {
xxd -l 30730 -i ${1}/test_batch.bin ${1}/../../../../examples/image_recognition_experimental/first_10_cifar_images.h
sed -i "s/unsigned char/const unsigned char/g" ${1}/../../../../examples/image_recognition_experimental/first_10_cifar_images.h
}
build_embarc_mli() {
gmake -j 4 -C ${1}/lib/make TCF_FILE=${2}
}
@ -160,6 +167,8 @@ download_and_extract() {
patch_am_sdk ${dir}
elif [[ ${action} == "patch_kissfft" ]]; then
patch_kissfft ${dir}
elif [[ ${action} == "patch_cifar10_dataset" ]]; then
patch_cifar10_dataset ${dir}
elif [[ ${action} == "build_embarc_mli" ]]; then
build_embarc_mli ${dir} ${action_param1}
elif [[ ${action} ]]; then

View File

@ -80,6 +80,27 @@ def move_person_data_experimental(library_dir):
source_file.write(file_contents)
def move_image_data_experimental(library_dir):
"""Moves the downloaded image detection model into the examples folder."""
old_image_data_path = os.path.join(
library_dir, 'src/tensorflow/lite/micro/tools/make/downloads/' +
'image_recognition_model/image_recognition_model.cpp')
new_image_data_path = os.path.join(
library_dir,
'examples/image_recognition_experimental/image_recognition_model.cpp')
if os.path.exists(old_image_data_path):
os.rename(old_image_data_path, new_image_data_path)
# Update include.
with open(new_image_data_path, 'r') as source_file:
file_contents = source_file.read()
file_contents = file_contents.replace(
six.ensure_str('#include "tensorflow/lite/micro/examples/' +
'image_recognition_example/image_recognition_model.h"'),
'#include "image_recognition_model.h"')
with open(new_image_data_path, 'w') as source_file:
source_file.write(file_contents)
def rename_example_main_inos(library_dir):
"""Makes sure the .ino sketch files match the example name."""
search_path = os.path.join(library_dir, 'examples/*', 'main.ino')
@ -97,6 +118,7 @@ def main(unparsed_args):
rename_example_main_inos(library_dir)
move_person_data(library_dir)
move_person_data_experimental(library_dir)
move_image_data_experimental(library_dir)
def parse_args():

View File

@ -59,6 +59,12 @@ KISSFFT_MD5="438ba1fef5783cc5f5f201395cc477ca"
RUY_URL="https://github.com/google/ruy/archive/9f53ba413e6fc879236dcaa3e008915973d67a4f.zip"
RUY_MD5="ce2c2444cced9dcf6ca6bc908061faa8"
CIFAR10_DATASET_URL="https://www.cs.toronto.edu/~kriz/cifar-10-binary.tar.gz"
CIFAR10_DATASET_MD5="c32a1d4ab5d03f1284b67883e8d87530"
IMAGE_RECOGNITION_MODEL_URL := "https://storage.googleapis.com/download.tensorflow.org/models/tflite/cifar_image_recognition_model_2020_4_14.zip"
IMAGE_RECOGNITION_MODEL_MD5 := "2b886156e7ef4d6e53d0f1a4bc800e56"
PERSON_MODEL_URL := "https://storage.googleapis.com/download.tensorflow.org/data/tf_lite_micro_person_data_grayscale_2019_11_21.zip"
PERSON_MODEL_MD5 := "fe2934bd0788f1dcc7af3f0a954542ab"

View File

@ -5,5 +5,5 @@ FROM python:3.5-stretch
LABEL maintainer="Pete Warden <petewarden@google.com>"
RUN apt-get update && apt-get install -y zip
RUN apt-get update && apt-get install -y zip xxd
RUN pip install six