Modify conv and depthwise conv benchmarks to use TFLM time methods instead of clock and TF_LITE_REPORT_ERROR instead of printf.

PiperOrigin-RevId: 306276045
Change-Id: I4870b9605b13d453aefb903ea035e708d8a730ee
This commit is contained in:
Nat Jeffries 2020-04-13 11:43:33 -07:00 committed by TensorFlower Gardener
parent 94f88814d1
commit b2c5581d4c
3 changed files with 42 additions and 36 deletions

View File

@ -11,6 +11,7 @@ cc_binary(
], ],
deps = [ deps = [
"//tensorflow/lite/c:common", "//tensorflow/lite/c:common",
"//tensorflow/lite/micro:micro_time",
"//tensorflow/lite/micro/kernels:micro_ops", "//tensorflow/lite/micro/kernels:micro_ops",
"//tensorflow/lite/micro/testing:micro_test", "//tensorflow/lite/micro/testing:micro_test",
], ],
@ -23,6 +24,7 @@ cc_binary(
], ],
deps = [ deps = [
"//tensorflow/lite/c:common", "//tensorflow/lite/c:common",
"//tensorflow/lite/micro:micro_time",
"//tensorflow/lite/micro/kernels:micro_ops", "//tensorflow/lite/micro/kernels:micro_ops",
"//tensorflow/lite/micro/testing:micro_test", "//tensorflow/lite/micro/testing:micro_test",
], ],

View File

@ -13,11 +13,10 @@ See the License for the specific language governing permissions and
limitations under the License. limitations under the License.
==============================================================================*/ ==============================================================================*/
#include <ctime>
#include "tensorflow/lite/c/builtin_op_data.h" #include "tensorflow/lite/c/builtin_op_data.h"
#include "tensorflow/lite/c/common.h" #include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/micro/kernels/micro_ops.h" #include "tensorflow/lite/micro/kernels/micro_ops.h"
#include "tensorflow/lite/micro/micro_time.h"
#include "tensorflow/lite/micro/testing/test_utils.h" #include "tensorflow/lite/micro/testing/test_utils.h"
namespace tflite { namespace tflite {
@ -27,14 +26,14 @@ namespace {
// Takes in quantized tensors along with expected outputs, and runs a single // Takes in quantized tensors along with expected outputs, and runs a single
// iteration of the conv op with the supplied parameters. Compares outputs vs // iteration of the conv op with the supplied parameters. Compares outputs vs
// the expected outputs and logs any differences found. Additionally, logs the // the expected outputs and logs any differences found. Additionally, logs the
// number of clocks taken by the invoke call. // number of clock ticks taken by the invoke call.
TfLiteStatus ValidateConvGoldens(TfLiteTensor* tensors, int tensors_size, TfLiteStatus ValidateConvGoldens(TfLiteTensor* tensors, int tensors_size,
TfLiteConvParams* conv_params, int tolerance, TfLiteConvParams* conv_params, int tolerance,
int output_length, int output_length,
const int8_t* expected_output_data) { const int8_t* expected_output_data,
MicroErrorReporter micro_reporter; ErrorReporter* reporter) {
TfLiteContext context; TfLiteContext context;
PopulateContext(tensors, tensors_size, &micro_reporter, &context); PopulateContext(tensors, tensors_size, reporter, &context);
const TfLiteRegistration* registration = ops::micro::Register_CONV_2D(); const TfLiteRegistration* registration = ops::micro::Register_CONV_2D();
@ -68,16 +67,17 @@ TfLiteStatus ValidateConvGoldens(TfLiteTensor* tensors, int tensors_size,
node.custom_initial_data_size = 0; node.custom_initial_data_size = 0;
node.delegate = nullptr; node.delegate = nullptr;
TfLiteStatus prepare_status = registration->prepare(&context, &node); if (registration->prepare) {
if (prepare_status != kTfLiteOk) { TfLiteStatus prepare_status = registration->prepare(&context, &node);
return prepare_status; if (prepare_status != kTfLiteOk) {
return prepare_status;
}
} }
// On xtensa-hifimini, clock() returns a cyle count. On x86, it returns a time int32_t start = tflite::GetCurrentTimeTicks();
// based on CLOCKS_PER_SECOND.
clock_t start = clock();
TfLiteStatus invoke_status = registration->invoke(&context, &node); TfLiteStatus invoke_status = registration->invoke(&context, &node);
printf("invoke took %ld cycles\n", clock() - start); TF_LITE_REPORT_ERROR(reporter, "invoke took %d cycles\n",
tflite::GetCurrentTimeTicks() - start);
if (registration->free) { if (registration->free) {
registration->free(&context, user_data); registration->free(&context, user_data);
@ -90,9 +90,9 @@ TfLiteStatus ValidateConvGoldens(TfLiteTensor* tensors, int tensors_size,
int8_t* output_data = tensors[3].data.int8; int8_t* output_data = tensors[3].data.int8;
for (int i = 0; i < output_length; ++i) { for (int i = 0; i < output_length; ++i) {
if (std::abs(expected_output_data[i] - output_data[i]) > tolerance) { if (std::abs(expected_output_data[i] - output_data[i]) > tolerance) {
printf("output[%d] failed, was %f expected %f\n", i, TF_LITE_REPORT_ERROR(reporter, "output[%d] failed, was %d expected %d\n",
static_cast<float>(output_data[i]), i, static_cast<int>(output_data[i]),
static_cast<float>(expected_output_data[i])); static_cast<int>(expected_output_data[i]));
} }
} }
return kTfLiteOk; return kTfLiteOk;
@ -103,6 +103,8 @@ TfLiteStatus ValidateConvGoldens(TfLiteTensor* tensors, int tensors_size,
} // namespace tflite } // namespace tflite
int main() { int main() {
tflite::MicroErrorReporter micro_reporter;
tflite::ErrorReporter* reporter = &micro_reporter;
const int input_shape[] = {4, 1, 1, 1, 32}; const int input_shape[] = {4, 1, 1, 1, 32};
const int filter_shape[] = {4, 32, 1, 1, 32}; const int filter_shape[] = {4, 32, 1, 1, 32};
const int bias_shape[] = {1, 32}; const int bias_shape[] = {1, 32};
@ -229,9 +231,9 @@ int main() {
const int num_tensors = sizeof(tensors) / sizeof(TfLiteTensor); const int num_tensors = sizeof(tensors) / sizeof(TfLiteTensor);
TfLiteStatus status = tflite::testing::ValidateConvGoldens( TfLiteStatus status = tflite::testing::ValidateConvGoldens(
tensors, num_tensors, &conv_params, kQuantizationTolerance, tensors, num_tensors, &conv_params, kQuantizationTolerance,
output_dims_count, golden_quantized); output_dims_count, golden_quantized, reporter);
if (status != kTfLiteOk) { if (status != kTfLiteOk) {
printf("Model invoke failed\n"); TF_LITE_REPORT_ERROR(reporter, "Model invoke failed\n");
} }
return 0; return 0;
} }

View File

@ -13,11 +13,10 @@ See the License for the specific language governing permissions and
limitations under the License. limitations under the License.
==============================================================================*/ ==============================================================================*/
#include <ctime>
#include "tensorflow/lite/c/builtin_op_data.h" #include "tensorflow/lite/c/builtin_op_data.h"
#include "tensorflow/lite/c/common.h" #include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/micro/kernels/micro_ops.h" #include "tensorflow/lite/micro/kernels/micro_ops.h"
#include "tensorflow/lite/micro/micro_time.h"
#include "tensorflow/lite/micro/testing/test_utils.h" #include "tensorflow/lite/micro/testing/test_utils.h"
namespace tflite { namespace tflite {
@ -27,15 +26,15 @@ namespace {
// Takes in quantized tensors along with expected outputs, and runs a single // Takes in quantized tensors along with expected outputs, and runs a single
// iteration of the depthwise_conv op with the supplied parameters. Compares // iteration of the depthwise_conv op with the supplied parameters. Compares
// outputs vs the expected outputs and logs any differences found. Additionally, // outputs vs the expected outputs and logs any differences found. Additionally,
// logs the number of clocks taken by the invoke call. // logs the number of clock ticks taken by the invoke call.
TfLiteStatus ValidateDepthwiseConvGoldens(TfLiteTensor* tensors, TfLiteStatus ValidateDepthwiseConvGoldens(TfLiteTensor* tensors,
int tensors_size, int tensors_size,
TfLiteFusedActivation activation, TfLiteFusedActivation activation,
int tolerance, int output_length, int tolerance, int output_length,
const int8_t* expected_output_data) { const int8_t* expected_output_data,
MicroErrorReporter micro_reporter; ErrorReporter* reporter) {
TfLiteContext context; TfLiteContext context;
PopulateContext(tensors, tensors_size, &micro_reporter, &context); PopulateContext(tensors, tensors_size, reporter, &context);
const TfLiteRegistration* registration = const TfLiteRegistration* registration =
ops::micro::Register_DEPTHWISE_CONV_2D(); ops::micro::Register_DEPTHWISE_CONV_2D();
@ -81,16 +80,17 @@ TfLiteStatus ValidateDepthwiseConvGoldens(TfLiteTensor* tensors,
node.custom_initial_data_size = 0; node.custom_initial_data_size = 0;
node.delegate = nullptr; node.delegate = nullptr;
TfLiteStatus prepare_status = registration->prepare(&context, &node); if (registration->prepare) {
if (prepare_status != kTfLiteOk) { TfLiteStatus prepare_status = registration->prepare(&context, &node);
return prepare_status; if (prepare_status != kTfLiteOk) {
return prepare_status;
}
} }
// On xtensa-hifimini, clock() returns a cyle count. On x86, it returns a time int32_t start = tflite::GetCurrentTimeTicks();
// based on CLOCKS_PER_SECOND.
clock_t start = clock();
TfLiteStatus invoke_status = registration->invoke(&context, &node); TfLiteStatus invoke_status = registration->invoke(&context, &node);
printf("invoke took %ld cycles\n", clock() - start); TF_LITE_REPORT_ERROR(reporter, "invoke took %d cycles\n",
tflite::GetCurrentTimeTicks() - start);
if (registration->free) { if (registration->free) {
registration->free(&context, user_data); registration->free(&context, user_data);
@ -103,9 +103,9 @@ TfLiteStatus ValidateDepthwiseConvGoldens(TfLiteTensor* tensors,
int8_t* output_data = tensors[3].data.int8; int8_t* output_data = tensors[3].data.int8;
for (int i = 0; i < output_length; ++i) { for (int i = 0; i < output_length; ++i) {
if (std::abs(expected_output_data[i] - output_data[i]) > tolerance) { if (std::abs(expected_output_data[i] - output_data[i]) > tolerance) {
printf("outputs[%d] was %f expected %f\n", i, TF_LITE_REPORT_ERROR(reporter, "outputs[%d] was %d expected %d\n", i,
static_cast<float>(output_data[i]), static_cast<int>(output_data[i]),
static_cast<float>(expected_output_data[i])); static_cast<int>(expected_output_data[i]));
} }
} }
return kTfLiteOk; return kTfLiteOk;
@ -116,6 +116,8 @@ TfLiteStatus ValidateDepthwiseConvGoldens(TfLiteTensor* tensors,
} // namespace tflite } // namespace tflite
int main() { int main() {
tflite::MicroErrorReporter micro_reporter;
tflite::ErrorReporter* reporter = &micro_reporter;
const int input_elements = 32 * 4; const int input_elements = 32 * 4;
const int filter_elements = 32 * 4; const int filter_elements = 32 * 4;
const int bias_elements = 32; const int bias_elements = 32;
@ -240,9 +242,9 @@ int main() {
constexpr int kQuantizationTolerance = 1; constexpr int kQuantizationTolerance = 1;
TfLiteStatus status = tflite::testing::ValidateDepthwiseConvGoldens( TfLiteStatus status = tflite::testing::ValidateDepthwiseConvGoldens(
tensors, kTensorsSize, kTfLiteActNone, kQuantizationTolerance, tensors, kTensorsSize, kTfLiteActNone, kQuantizationTolerance,
output_elements, golden_quantized); output_elements, golden_quantized, reporter);
if (status != kTfLiteOk) { if (status != kTfLiteOk) {
printf("Model invoke failed\n"); TF_LITE_REPORT_ERROR(reporter, "Model invoke failed\n");
} }
return 0; return 0;
} }