Clean up the way logging is configured and improve error checking. Make failure conditions in micro_interpreter clearer. Reduce tensor arena size in micro_vision so that it builds for Sparkfun Edge.
PiperOrigin-RevId: 258473403
This commit is contained in:
parent
41730b8b0d
commit
65db2281c3
@ -36,11 +36,16 @@ limitations under the License.
|
||||
|
||||
#include "tensorflow/lite/experimental/micro/debug_log.h"
|
||||
|
||||
#include <cstdio>
|
||||
|
||||
// These are headers from Ambiq's Apollo3 SDK.
|
||||
#include "am_bsp.h" // NOLINT
|
||||
#include "am_mcu_apollo.h" // NOLINT
|
||||
#include "am_util.h" // NOLINT
|
||||
|
||||
extern "C" void DebugLog(const char* s) { am_util_stdio_printf("%s", s); }
|
||||
extern "C" void DebugLog(const char* s) {
|
||||
static bool is_initialized = false;
|
||||
if (!is_initialized) {
|
||||
am_bsp_itm_printf_enable();
|
||||
is_initialized = true;
|
||||
}
|
||||
|
||||
am_util_stdio_printf("%s", s);
|
||||
}
|
||||
|
@ -120,9 +120,6 @@ void boost_mode_enable(tflite::ErrorReporter* error_reporter, bool bEnable) {
|
||||
} // namespace
|
||||
|
||||
TfLiteStatus InitCamera(tflite::ErrorReporter* error_reporter) {
|
||||
// Enable the ITM print interface.
|
||||
am_bsp_itm_printf_enable();
|
||||
|
||||
error_reporter->Report("Initializing HM01B0...\n");
|
||||
|
||||
am_hal_clkgen_control(AM_HAL_CLKGEN_CONTROL_SYSCLK_MAX, 0);
|
||||
@ -187,8 +184,9 @@ TfLiteStatus GetImage(tflite::ErrorReporter* error_reporter, int frame_width,
|
||||
hm01b0_blocking_read_oneframe_scaled(frame, frame_width, frame_height,
|
||||
channels);
|
||||
|
||||
am_util_delay_ms(2000);
|
||||
#ifdef DEMO_HM01B0_FRAMEBUFFER_DUMP_ENABLE
|
||||
// Allow some time to see result of previous inference before dumping image.
|
||||
am_util_delay_ms(2000);
|
||||
hm01b0_framebuffer_dump(frame, frame_width * frame_height * channels);
|
||||
#endif
|
||||
|
||||
|
@ -1,10 +1,13 @@
|
||||
ifeq ($(TARGET),$(filter $(TARGET),apollo3evb sparkfun_edge))
|
||||
MICRO_VISION_SRCS += \
|
||||
tensorflow/lite/experimental/micro/examples/micro_vision/himax_driver/HM01B0.c \
|
||||
tensorflow/lite/experimental/micro/examples/micro_vision/himax_driver/HM01B0_debug.c \
|
||||
tensorflow/lite/experimental/micro/examples/micro_vision/himax_driver/HM01B0_optimized.c
|
||||
|
||||
MICRO_VISION_HDRS += \
|
||||
tensorflow/lite/experimental/micro/examples/micro_vision/himax_driver/HM01B0.h \
|
||||
tensorflow/lite/experimental/micro/examples/micro_vision/himax_driver/HM01B0_debug.h \
|
||||
tensorflow/lite/experimental/micro/examples/micro_vision/himax_driver/HM01B0_optimized.h \
|
||||
tensorflow/lite/experimental/micro/examples/micro_vision/himax_driver/HM01B0_RAW8_QVGA_8bits_lsb_5fps.h \
|
||||
tensorflow/lite/experimental/micro/examples/micro_vision/himax_driver/HM01B0_Walking1s_01.h \
|
||||
tensorflow/lite/experimental/micro/examples/micro_vision/himax_driver/platform_Sparkfun_Edge.h
|
||||
|
@ -31,7 +31,7 @@ limitations under the License.
|
||||
#ifdef ARDUINO
|
||||
constexpr int tensor_arena_size = 10 * 1024;
|
||||
#else // ARDUINO
|
||||
constexpr int tensor_arena_size = 291 * 1024;
|
||||
constexpr int tensor_arena_size = 270 * 1024;
|
||||
#endif // ARDUINO
|
||||
uint8_t tensor_arena[tensor_arena_size];
|
||||
|
||||
@ -46,7 +46,7 @@ int main(int argc, char* argv[]) {
|
||||
if (model->version() != TFLITE_SCHEMA_VERSION) {
|
||||
error_reporter->Report(
|
||||
"Model provided is schema version %d not equal "
|
||||
"to supported version %d.\n",
|
||||
"to supported version %d.",
|
||||
model->version(), TFLITE_SCHEMA_VERSION);
|
||||
}
|
||||
|
||||
@ -63,13 +63,14 @@ int main(int argc, char* argv[]) {
|
||||
|
||||
while (true) {
|
||||
// Get image from provider.
|
||||
GetImage(error_reporter, kNumCols, kNumRows, kNumChannels,
|
||||
input->data.uint8);
|
||||
if (kTfLiteOk != GetImage(error_reporter, kNumCols, kNumRows, kNumChannels,
|
||||
input->data.uint8)) {
|
||||
error_reporter->Report("Image capture failed.");
|
||||
}
|
||||
|
||||
// Run the model on this input and make sure it succeeds.
|
||||
TfLiteStatus invoke_status = interpreter.Invoke();
|
||||
if (invoke_status != kTfLiteOk) {
|
||||
error_reporter->Report("Invoke failed\n");
|
||||
if (kTfLiteOk != interpreter.Invoke()) {
|
||||
error_reporter->Report("Invoke failed.");
|
||||
}
|
||||
|
||||
TfLiteTensor* output = interpreter.output(0);
|
||||
|
@ -120,9 +120,6 @@ void boost_mode_enable(tflite::ErrorReporter* error_reporter, bool bEnable) {
|
||||
} // namespace
|
||||
|
||||
TfLiteStatus InitCamera(tflite::ErrorReporter* error_reporter) {
|
||||
// Enable the ITM print interface.
|
||||
am_bsp_itm_printf_enable();
|
||||
|
||||
error_reporter->Report("Initializing HM01B0...\n");
|
||||
|
||||
am_hal_clkgen_control(AM_HAL_CLKGEN_CONTROL_SYSCLK_MAX, 0);
|
||||
|
@ -69,7 +69,8 @@ MicroInterpreter::MicroInterpreter(const Model* model,
|
||||
error_reporter_(error_reporter),
|
||||
context_(),
|
||||
allocator_(&context_, model_, tensor_arena, tensor_arena_size,
|
||||
error_reporter_) {
|
||||
error_reporter_),
|
||||
tensors_allocated_(false) {
|
||||
auto* subgraphs = model->subgraphs();
|
||||
if (subgraphs->size() != 1) {
|
||||
error_reporter->Report("Only 1 subgraph is currently supported.\n");
|
||||
@ -92,7 +93,10 @@ TfLiteStatus MicroInterpreter::RegisterPreallocatedInput(uint8_t* buffer,
|
||||
}
|
||||
|
||||
TfLiteStatus MicroInterpreter::AllocateTensors() {
|
||||
return allocator_.AllocateTensors();
|
||||
TfLiteStatus status = allocator_.AllocateTensors();
|
||||
TF_LITE_ENSURE_OK(&context_, status);
|
||||
tensors_allocated_ = true;
|
||||
return kTfLiteOk;
|
||||
}
|
||||
|
||||
TfLiteStatus MicroInterpreter::Invoke() {
|
||||
@ -100,6 +104,12 @@ TfLiteStatus MicroInterpreter::Invoke() {
|
||||
error_reporter_->Report("Invoke() called after initialization failed\n");
|
||||
return kTfLiteError;
|
||||
}
|
||||
|
||||
// Ensure tensors are allocated before the interpreter is invoked to avoid
|
||||
// difficult to debug segfaults.
|
||||
if (!tensors_allocated_) {
|
||||
AllocateTensors();
|
||||
}
|
||||
TfLiteStatus status = kTfLiteOk;
|
||||
auto opcodes = model_->operator_codes();
|
||||
for (int i = 0; i < operators_->size(); ++i) {
|
||||
|
@ -76,6 +76,7 @@ class MicroInterpreter {
|
||||
ErrorReporter* error_reporter_;
|
||||
TfLiteContext context_;
|
||||
MicroAllocator allocator_;
|
||||
bool tensors_allocated_;
|
||||
|
||||
TfLiteStatus initialization_status_;
|
||||
const flatbuffers::Vector<flatbuffers::Offset<Tensor>>* tensors_;
|
||||
|
Loading…
Reference in New Issue
Block a user