Merge pull request #43513 from vsilyaev:pull_request/Use_std_endl_for_flush
PiperOrigin-RevId: 341725520
This commit is contained in:
commit
0accbd6e73
@ -63,6 +63,7 @@ cc_library(
|
||||
"bitmap_helpers.h",
|
||||
"bitmap_helpers_impl.h",
|
||||
"label_image.h",
|
||||
"log.h",
|
||||
],
|
||||
deps = [
|
||||
"//tensorflow/lite:builtin_op_data",
|
||||
|
@ -13,17 +13,17 @@ See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
==============================================================================*/
|
||||
|
||||
#include "tensorflow/lite/examples/label_image/bitmap_helpers.h"
|
||||
|
||||
#include <unistd.h> // NOLINT(build/include_order)
|
||||
|
||||
#include <cstdint>
|
||||
#include <cstdio>
|
||||
#include <cstdlib>
|
||||
#include <fstream>
|
||||
#include <iostream>
|
||||
|
||||
#include <unistd.h> // NOLINT(build/include_order)
|
||||
|
||||
#include "tensorflow/lite/examples/label_image/bitmap_helpers.h"
|
||||
|
||||
#define LOG(x) std::cerr
|
||||
#include "tensorflow/lite/examples/label_image/log.h"
|
||||
|
||||
namespace tflite {
|
||||
namespace label_image {
|
||||
@ -76,7 +76,7 @@ std::vector<uint8_t> read_bmp(const std::string& input_bmp_name, int* width,
|
||||
|
||||
std::ifstream file(input_bmp_name, std::ios::in | std::ios::binary);
|
||||
if (!file) {
|
||||
LOG(FATAL) << "input file " << input_bmp_name << " not found\n";
|
||||
LOG(FATAL) << "input file " << input_bmp_name << " not found";
|
||||
exit(-1);
|
||||
}
|
||||
|
||||
@ -85,7 +85,7 @@ std::vector<uint8_t> read_bmp(const std::string& input_bmp_name, int* width,
|
||||
end = file.tellg();
|
||||
size_t len = end - begin;
|
||||
|
||||
if (s->verbose) LOG(INFO) << "len: " << len << "\n";
|
||||
if (s->verbose) LOG(INFO) << "len: " << len;
|
||||
|
||||
std::vector<uint8_t> img_bytes(len);
|
||||
file.seekg(0, std::ios::beg);
|
||||
@ -100,7 +100,7 @@ std::vector<uint8_t> read_bmp(const std::string& input_bmp_name, int* width,
|
||||
|
||||
if (s->verbose)
|
||||
LOG(INFO) << "width, height, channels: " << *width << ", " << *height
|
||||
<< ", " << *channels << "\n";
|
||||
<< ", " << *channels;
|
||||
|
||||
// there may be padding bytes when the width is not a multiple of 4 bytes
|
||||
// 8 * channels == bits per pixel
|
||||
|
@ -52,7 +52,7 @@ limitations under the License.
|
||||
#include "tensorflow/lite/delegates/gpu/delegate.h"
|
||||
#endif
|
||||
|
||||
#define LOG(severity) (std::cerr << (#severity) << ": ")
|
||||
#include "tensorflow/lite/examples/label_image/log.h"
|
||||
|
||||
namespace tflite {
|
||||
namespace label_image {
|
||||
@ -98,7 +98,7 @@ class DelegateProviders {
|
||||
// It's possible that a delegate of certain type won't be created as
|
||||
// user-specified benchmark params tells not to.
|
||||
if (ptr == nullptr) continue;
|
||||
LOG(INFO) << delegate->GetName() << " delegate created.\n";
|
||||
LOG(INFO) << delegate->GetName() << " delegate created.";
|
||||
delegates_map.emplace(delegate->GetName(), std::move(ptr));
|
||||
}
|
||||
return delegates_map;
|
||||
@ -134,7 +134,7 @@ TfLiteDelegatePtrMap GetDelegates(Settings* s,
|
||||
if (s->gl_backend) {
|
||||
auto delegate = CreateGPUDelegate(s);
|
||||
if (!delegate) {
|
||||
LOG(INFO) << "GPU acceleration is unsupported on this platform.\n";
|
||||
LOG(INFO) << "GPU acceleration is unsupported on this platform.";
|
||||
} else {
|
||||
delegates.emplace("GPU", std::move(delegate));
|
||||
}
|
||||
@ -145,7 +145,7 @@ TfLiteDelegatePtrMap GetDelegates(Settings* s,
|
||||
options.allow_fp16 = s->allow_fp16;
|
||||
auto delegate = evaluation::CreateNNAPIDelegate(options);
|
||||
if (!delegate) {
|
||||
LOG(INFO) << "NNAPI acceleration is unsupported on this platform.\n";
|
||||
LOG(INFO) << "NNAPI acceleration is unsupported on this platform.";
|
||||
} else {
|
||||
delegates.emplace("NNAPI", std::move(delegate));
|
||||
}
|
||||
@ -157,7 +157,7 @@ TfLiteDelegatePtrMap GetDelegates(Settings* s,
|
||||
evaluation::CreateHexagonDelegate(libhexagon_path, s->profiling);
|
||||
|
||||
if (!delegate) {
|
||||
LOG(INFO) << "Hexagon acceleration is unsupported on this platform.\n";
|
||||
LOG(INFO) << "Hexagon acceleration is unsupported on this platform.";
|
||||
} else {
|
||||
delegates.emplace("Hexagon", std::move(delegate));
|
||||
}
|
||||
@ -166,7 +166,7 @@ TfLiteDelegatePtrMap GetDelegates(Settings* s,
|
||||
if (s->xnnpack_delegate) {
|
||||
auto delegate = evaluation::CreateXNNPACKDelegate(s->number_of_threads);
|
||||
if (!delegate) {
|
||||
LOG(INFO) << "XNNPACK acceleration is unsupported on this platform.\n";
|
||||
LOG(INFO) << "XNNPACK acceleration is unsupported on this platform.";
|
||||
} else {
|
||||
delegates.emplace("XNNPACK", std::move(delegate));
|
||||
}
|
||||
@ -195,7 +195,7 @@ TfLiteStatus ReadLabelsFile(const string& file_name,
|
||||
size_t* found_label_count) {
|
||||
std::ifstream file(file_name);
|
||||
if (!file) {
|
||||
LOG(ERROR) << "Labels file " << file_name << " not found\n";
|
||||
LOG(ERROR) << "Labels file " << file_name << " not found";
|
||||
return kTfLiteError;
|
||||
}
|
||||
result->clear();
|
||||
@ -225,14 +225,13 @@ void PrintProfilingInfo(const profiling::ProfileEvent* e,
|
||||
<< std::setprecision(3) << op_index << ", OpCode " << std::setw(3)
|
||||
<< std::setprecision(3) << registration.builtin_code << ", "
|
||||
<< EnumNameBuiltinOperator(
|
||||
static_cast<BuiltinOperator>(registration.builtin_code))
|
||||
<< "\n";
|
||||
static_cast<BuiltinOperator>(registration.builtin_code));
|
||||
}
|
||||
|
||||
void RunInference(Settings* settings,
|
||||
const DelegateProviders& delegate_providers) {
|
||||
if (!settings->model_name.c_str()) {
|
||||
LOG(ERROR) << "no model file name\n";
|
||||
LOG(ERROR) << "no model file name";
|
||||
exit(-1);
|
||||
}
|
||||
|
||||
@ -240,29 +239,29 @@ void RunInference(Settings* settings,
|
||||
std::unique_ptr<tflite::Interpreter> interpreter;
|
||||
model = tflite::FlatBufferModel::BuildFromFile(settings->model_name.c_str());
|
||||
if (!model) {
|
||||
LOG(ERROR) << "\nFailed to mmap model " << settings->model_name << "\n";
|
||||
LOG(ERROR) << "Failed to mmap model " << settings->model_name;
|
||||
exit(-1);
|
||||
}
|
||||
settings->model = model.get();
|
||||
LOG(INFO) << "Loaded model " << settings->model_name << "\n";
|
||||
LOG(INFO) << "Loaded model " << settings->model_name;
|
||||
model->error_reporter();
|
||||
LOG(INFO) << "resolved reporter\n";
|
||||
LOG(INFO) << "resolved reporter";
|
||||
|
||||
tflite::ops::builtin::BuiltinOpResolver resolver;
|
||||
|
||||
tflite::InterpreterBuilder(*model, resolver)(&interpreter);
|
||||
if (!interpreter) {
|
||||
LOG(ERROR) << "Failed to construct interpreter\n";
|
||||
LOG(ERROR) << "Failed to construct interpreter";
|
||||
exit(-1);
|
||||
}
|
||||
|
||||
interpreter->SetAllowFp16PrecisionForFp32(settings->allow_fp16);
|
||||
|
||||
if (settings->verbose) {
|
||||
LOG(INFO) << "tensors size: " << interpreter->tensors_size() << "\n";
|
||||
LOG(INFO) << "nodes size: " << interpreter->nodes_size() << "\n";
|
||||
LOG(INFO) << "inputs: " << interpreter->inputs().size() << "\n";
|
||||
LOG(INFO) << "input(0) name: " << interpreter->GetInputName(0) << "\n";
|
||||
LOG(INFO) << "tensors size: " << interpreter->tensors_size();
|
||||
LOG(INFO) << "nodes size: " << interpreter->nodes_size();
|
||||
LOG(INFO) << "inputs: " << interpreter->inputs().size();
|
||||
LOG(INFO) << "input(0) name: " << interpreter->GetInputName(0);
|
||||
|
||||
int t_size = interpreter->tensors_size();
|
||||
for (int i = 0; i < t_size; i++) {
|
||||
@ -271,7 +270,7 @@ void RunInference(Settings* settings,
|
||||
<< interpreter->tensor(i)->bytes << ", "
|
||||
<< interpreter->tensor(i)->type << ", "
|
||||
<< interpreter->tensor(i)->params.scale << ", "
|
||||
<< interpreter->tensor(i)->params.zero_point << "\n";
|
||||
<< interpreter->tensor(i)->params.zero_point;
|
||||
}
|
||||
}
|
||||
|
||||
@ -286,29 +285,29 @@ void RunInference(Settings* settings,
|
||||
&image_height, &image_channels, settings);
|
||||
|
||||
int input = interpreter->inputs()[0];
|
||||
if (settings->verbose) LOG(INFO) << "input: " << input << "\n";
|
||||
if (settings->verbose) LOG(INFO) << "input: " << input;
|
||||
|
||||
const std::vector<int> inputs = interpreter->inputs();
|
||||
const std::vector<int> outputs = interpreter->outputs();
|
||||
|
||||
if (settings->verbose) {
|
||||
LOG(INFO) << "number of inputs: " << inputs.size() << "\n";
|
||||
LOG(INFO) << "number of outputs: " << outputs.size() << "\n";
|
||||
LOG(INFO) << "number of inputs: " << inputs.size();
|
||||
LOG(INFO) << "number of outputs: " << outputs.size();
|
||||
}
|
||||
|
||||
auto delegates_ = GetDelegates(settings, delegate_providers);
|
||||
for (const auto& delegate : delegates_) {
|
||||
if (interpreter->ModifyGraphWithDelegate(delegate.second.get()) !=
|
||||
kTfLiteOk) {
|
||||
LOG(ERROR) << "Failed to apply " << delegate.first << " delegate.\n";
|
||||
LOG(ERROR) << "Failed to apply " << delegate.first << " delegate.";
|
||||
exit(-1);
|
||||
} else {
|
||||
LOG(INFO) << "Applied " << delegate.first << " delegate.\n";
|
||||
LOG(INFO) << "Applied " << delegate.first << " delegate.";
|
||||
}
|
||||
}
|
||||
|
||||
if (interpreter->AllocateTensors() != kTfLiteOk) {
|
||||
LOG(ERROR) << "Failed to allocate tensors!\n";
|
||||
LOG(ERROR) << "Failed to allocate tensors!";
|
||||
exit(-1);
|
||||
}
|
||||
|
||||
@ -340,7 +339,7 @@ void RunInference(Settings* settings,
|
||||
break;
|
||||
default:
|
||||
LOG(ERROR) << "cannot handle input type "
|
||||
<< interpreter->tensor(input)->type << " yet\n";
|
||||
<< interpreter->tensor(input)->type << " yet";
|
||||
exit(-1);
|
||||
}
|
||||
auto profiler = absl::make_unique<profiling::Profiler>(
|
||||
@ -351,7 +350,7 @@ void RunInference(Settings* settings,
|
||||
if (settings->loop_count > 1) {
|
||||
for (int i = 0; i < settings->number_of_warmup_runs; i++) {
|
||||
if (interpreter->Invoke() != kTfLiteOk) {
|
||||
LOG(ERROR) << "Failed to invoke tflite!\n";
|
||||
LOG(ERROR) << "Failed to invoke tflite!";
|
||||
exit(-1);
|
||||
}
|
||||
}
|
||||
@ -361,16 +360,16 @@ void RunInference(Settings* settings,
|
||||
gettimeofday(&start_time, nullptr);
|
||||
for (int i = 0; i < settings->loop_count; i++) {
|
||||
if (interpreter->Invoke() != kTfLiteOk) {
|
||||
LOG(ERROR) << "Failed to invoke tflite!\n";
|
||||
LOG(ERROR) << "Failed to invoke tflite!";
|
||||
exit(-1);
|
||||
}
|
||||
}
|
||||
gettimeofday(&stop_time, nullptr);
|
||||
LOG(INFO) << "invoked\n";
|
||||
LOG(INFO) << "invoked";
|
||||
LOG(INFO) << "average time: "
|
||||
<< (get_us(stop_time) - get_us(start_time)) /
|
||||
(settings->loop_count * 1000)
|
||||
<< " ms \n";
|
||||
<< " ms";
|
||||
|
||||
if (settings->profiling) {
|
||||
profiler->StopProfiling();
|
||||
@ -413,7 +412,7 @@ void RunInference(Settings* settings,
|
||||
break;
|
||||
default:
|
||||
LOG(ERROR) << "cannot handle output type "
|
||||
<< interpreter->tensor(output)->type << " yet\n";
|
||||
<< interpreter->tensor(output)->type << " yet";
|
||||
exit(-1);
|
||||
}
|
||||
|
||||
@ -427,7 +426,7 @@ void RunInference(Settings* settings,
|
||||
for (const auto& result : top_results) {
|
||||
const float confidence = result.first;
|
||||
const int index = result.second;
|
||||
LOG(INFO) << confidence << ": " << index << " " << labels[index] << "\n";
|
||||
LOG(INFO) << confidence << ": " << index << " " << labels[index];
|
||||
}
|
||||
}
|
||||
|
||||
@ -449,8 +448,7 @@ void display_usage() {
|
||||
<< "--threads, -t: number of threads\n"
|
||||
<< "--verbose, -v: [0|1] print more information\n"
|
||||
<< "--warmup_runs, -w: number of warmup runs\n"
|
||||
<< "--xnnpack_delegate, -x [0:1]: xnnpack delegate\n"
|
||||
<< "\n";
|
||||
<< "--xnnpack_delegate, -x [0:1]: xnnpack delegate\n";
|
||||
}
|
||||
|
||||
int Main(int argc, char** argv) {
|
||||
|
39
tensorflow/lite/examples/label_image/log.h
Normal file
39
tensorflow/lite/examples/label_image/log.h
Normal file
@ -0,0 +1,39 @@
|
||||
/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
==============================================================================*/
|
||||
|
||||
#ifndef TENSORFLOW_LITE_EXAMPLES_LABEL_IMAGE_LOG_H_
|
||||
#define TENSORFLOW_LITE_EXAMPLES_LABEL_IMAGE_LOG_H_
|
||||
|
||||
#include <iostream>
|
||||
#include <sstream>
|
||||
|
||||
namespace tflite {
|
||||
namespace label_image {
|
||||
|
||||
class Log {
|
||||
std::stringstream stream_;
|
||||
|
||||
public:
|
||||
explicit Log(const char* severity) { stream_ << severity << ": "; }
|
||||
std::stringstream& Stream() { return stream_; }
|
||||
~Log() { std::cerr << stream_.str() << std::endl; }
|
||||
};
|
||||
|
||||
#define LOG(severity) tflite::label_image::Log(#severity).Stream()
|
||||
|
||||
} // namespace label_image
|
||||
} // namespace tflite
|
||||
|
||||
#endif // TENSORFLOW_LITE_EXAMPLES_LABEL_IMAGE_LOG_H_
|
Loading…
Reference in New Issue
Block a user