Update network_tester.

New features:
Multiple inputs
Multiple outputs
Output in json format
Can call invoke() more than once

Updated README
This commit is contained in:
Jens Elofsson 2020-04-07 11:20:02 +02:00
parent 7404928316
commit cc08b5dff7
6 changed files with 104 additions and 33 deletions

View File

@ -33,6 +33,10 @@ ifeq ($(COMPARE_OUTPUT_DATA),no)
CXXFLAGS += -DNO_COMPARE_OUTPUT_DATA
endif
ifdef NUM_INFERENCES
CXXFLAGS += -DNUM_INFERENCES=$(NUM_INFERENCES)
endif
# Builds a standalone object recognition binary.
$(eval $(call microlite_test,network_tester_test,\
$(NETWORK_TESTER_TEST_SRCS),$(NETWORK_TESTER_TEST_HDRS)))

View File

@ -34,8 +34,40 @@ make -f tensorflow/lite/micro/tools/make/Makefile network_tester_test \
`ARENA_SIZE`: The size of the memory to be allocated (in bytes) by the
interpreter. \
`NUM_BYTES_TO_PRINT`: The number of bytes of the output data to print. \
Defaults to 0 if not specified. \
If set to 0, all bytes of the output are printed. \
`COMPARE_OUTPUT_DATA`: If set to "no" the output data is not compared to the
expected output data. This could be useful e.g. if the execution time needs to
be minimized, or there is no expected output data. If omitted, the output data
is compared to the expected output.
`NUM_INFERENCES`: Define how many inferences that are made. Defaults to 1. \
The output is printed in JSON format using printf:
```
num_of_outputs: 1
output_begin
[
{
"dims": [4,1,2,2,1],
"data_address": "0x000000",
"data":"0x06,0x08,0x0e,0x10"
}]
output_end
```
If there are multiple output tensors, the output will look like this:
```
num_of_outputs: 2
output_begin
[
{
"dims": [4,1,2,2,1],
"data_address": "0x000000",
"data":"0x06,0x08,0x0e,0x10"
},
{
"dims": [4,1,2,2,1],
"data_address": "0x111111",
"data":"0x06,0x08,0x0e,0x10"
}]
output_end
```

View File

@ -17,6 +17,6 @@ limitations under the License.
#define TENSORFLOW_LITE_MICRO_EXAMPLES_NETWORK_TESTER_EXPECTED_OUTPUT_DATA_H_
static unsigned int expected_output_data_len = 4;
static unsigned char expected_output_data[] = {6, 8, 14, 16};
static unsigned char expected_output_data[1][4] = {6, 8, 14, 16};
#endif // TENSORFLOW_LITE_MICRO_EXAMPLES_NETWORK_TESTER_EXPECTED_OUTPUT_DATA_H_

View File

@ -17,7 +17,7 @@ limitations under the License.
#define TENSORFLOW_LITE_MICRO_EXAMPLES_NETWORK_TESTER_INPUT_DATA_H_
static const int input_data_len = 16;
static const unsigned char input_data[] = {1, 2, 3, 4, 5, 6, 7, 8,
9, 10, 11, 12, 13, 14, 15, 16};
static const unsigned char input_data[1][16] = {{1, 2, 3, 4, 5, 6, 7, 8,
9, 10, 11, 12, 13, 14, 15, 16}};
#endif // TENSORFLOW_LITE_MICRO_EXAMPLES_NETWORK_TESTER_INPUT_DATA_H_

View File

@ -1,8 +1,11 @@
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
@ -64,4 +67,4 @@ const unsigned char network_model[] = {
0x08, 0x00, 0x07, 0x00, 0x06, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x11};
const unsigned int network_model_len = 576;
#endif // TENSORFLOW_LITE_MICRO_EXAMPLES_NETWORK_TESTER_NETWORK_MODEL_H_
#endif

View File

@ -1,8 +1,11 @@
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
@ -10,44 +13,54 @@ See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "tensorflow/lite/micro/examples/network_tester/expected_output_data.h"
#include "tensorflow/lite/micro/examples/network_tester/input_data.h"
#include "tensorflow/lite/micro/examples/network_tester/network_model.h"
#include "tensorflow/lite/micro/kernels/all_ops_resolver.h"
#include "tensorflow/lite/micro/micro_error_reporter.h"
#include "tensorflow/lite/micro/micro_interpreter.h"
#include "tensorflow/lite/micro/testing/micro_test.h"
#include "tensorflow/lite/micro/testing/test_utils.h"
#include "tensorflow/lite/schema/schema_generated.h"
#include "tensorflow/lite/version.h"
#include "tensorflow/lite/micro/examples/network_tester/expected_output_data.h"
#include "tensorflow/lite/micro/examples/network_tester/input_data.h"
#include "tensorflow/lite/micro/examples/network_tester/network_model.h"
#include "tensorflow/lite/micro/testing/micro_test.h"
#include "tensorflow/lite/micro/testing/test_utils.h"
#ifndef TENSOR_ARENA_SIZE
#define TENSOR_ARENA_SIZE (1024)
#endif
#ifndef NUM_INFERENCES
#define NUM_INFERENCES 1
#endif
uint8_t tensor_arena[TENSOR_ARENA_SIZE];
#ifdef NUM_BYTES_TO_PRINT
inline void print_output_data(TfLiteTensor* output) {
int num_bytes_to_print =
(output->bytes < NUM_BYTES_TO_PRINT) ? output->bytes : NUM_BYTES_TO_PRINT;
((output->bytes < NUM_BYTES_TO_PRINT) || NUM_BYTES_TO_PRINT == 0)
? output->bytes
: NUM_BYTES_TO_PRINT;
int dims_size = output->dims->size;
printf("dims: {%d,", dims_size);
printf("{\n");
printf("\"dims\": [%d,", dims_size);
for (int i = 0; i < output->dims->size - 1; ++i) {
printf("%d,", output->dims->data[i]);
}
printf("%d}\n", output->dims->data[dims_size - 1]);
printf("%d],\n", output->dims->data[dims_size - 1]);
printf("data_address: %p\n", output->data.raw);
printf("data:\n{");
printf("\"data_address\": \"%p\",\n", output->data.raw);
printf("\"data\":\"");
for (int i = 0; i < num_bytes_to_print - 1; ++i) {
if (i % 16 == 0) {
if (i % 16 == 0 && i != 0) {
printf("\n");
}
printf("0x%02x,", output->data.uint8[i]);
}
printf("0x%02x\n}\n", output->data.uint8[num_bytes_to_print - 1]);
printf("0x%02x\"\n", output->data.uint8[num_bytes_to_print - 1]);
printf("}");
}
#endif
@ -63,7 +76,7 @@ TF_LITE_MICRO_TEST(TestInvoke) {
"Model provided is schema version %d not equal "
"to supported version %d.\n",
model->version(), TFLITE_SCHEMA_VERSION);
return 1;
return kTfLiteError;
}
tflite::ops::micro::AllOpsResolver resolver;
@ -74,29 +87,48 @@ TF_LITE_MICRO_TEST(TestInvoke) {
TfLiteStatus allocate_status = interpreter.AllocateTensors();
if (allocate_status != kTfLiteOk) {
TF_LITE_REPORT_ERROR(error_reporter, "Tensor allocation failed\n");
return kTfLiteError;
}
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, allocate_status);
TfLiteTensor* input = interpreter.input(0);
memcpy(input->data.uint8, input_data, input->bytes);
TfLiteStatus invoke_status = interpreter.Invoke();
if (invoke_status != kTfLiteOk) {
TF_LITE_REPORT_ERROR(error_reporter, "Invoke failed\n");
}
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, invoke_status);
TfLiteTensor* output = interpreter.output(0);
for (int n = 0; n < NUM_INFERENCES; n++) {
for (int i = 0; i < interpreter.inputs_size(); ++i) {
TfLiteTensor* input = interpreter.input(i);
memcpy(input->data.uint8, input_data[i], input->bytes);
}
TfLiteStatus invoke_status = interpreter.Invoke();
if (invoke_status != kTfLiteOk) {
TF_LITE_REPORT_ERROR(error_reporter, "Invoke failed\n");
return kTfLiteError;
}
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, invoke_status);
#ifdef NUM_BYTES_TO_PRINT
print_output_data(output);
// Print all of the output data, or the first NUM_BYTES_TO_PRINT bytes,
// whichever comes first as well as the output shape.
printf("num_of_outputs: %d\n", interpreter.outputs_size());
printf("output_begin\n");
printf("[\n");
for (int i = 0; i < interpreter.outputs_size(); i++) {
TfLiteTensor* output = interpreter.output(i);
print_output_data(output);
if (i != interpreter.outputs_size() - 1) {
printf(",\n");
}
}
printf("]\n");
printf("output_end\n");
#endif
#ifndef NO_COMPARE_OUTPUT_DATA
for (int i = 0; i < output->bytes; ++i) {
TF_LITE_MICRO_EXPECT_EQ(output->data.uint8[i], expected_output_data[i]);
}
for (int i = 0; i < interpreter.outputs_size(); i++) {
TfLiteTensor* output = interpreter.output(i);
for (int j = 0; j < output->bytes; ++j) {
TF_LITE_MICRO_EXPECT_EQ(output->data.uint8[j],
expected_output_data[i][j]);
}
}
#endif
}
TF_LITE_REPORT_ERROR(error_reporter, "Ran successfully\n");
}