Support the verbose mode in logging TFLite delegate-related parameter values.
PiperOrigin-RevId: 319892460 Change-Id: Ie1c393dc0a7fcccaccd4399115963de06ff50057
This commit is contained in:
parent
7d2a2a6a11
commit
68beb052a5
tensorflow/lite
@ -29,7 +29,7 @@ class DummyDelegateProvider : public DelegateProvider {
|
||||
|
||||
std::vector<Flag> CreateFlags(ToolParams* params) const final;
|
||||
|
||||
void LogParams(const ToolParams& params) const final;
|
||||
void LogParams(const ToolParams& params, bool verbose) const final;
|
||||
|
||||
TfLiteDelegatePtr CreateTfLiteDelegate(const ToolParams& params) const final;
|
||||
|
||||
@ -43,9 +43,10 @@ std::vector<Flag> DummyDelegateProvider::CreateFlags(ToolParams* params) const {
|
||||
return flags;
|
||||
}
|
||||
|
||||
void DummyDelegateProvider::LogParams(const ToolParams& params) const {
|
||||
TFLITE_LOG(INFO) << "Use dummy test delegate : ["
|
||||
<< params.Get<bool>("use_dummy_delegate") << "]";
|
||||
void DummyDelegateProvider::LogParams(const ToolParams& params,
|
||||
bool verbose) const {
|
||||
LOG_TOOL_PARAM(params, bool, "use_dummy_delegate", "Use dummy test delegate",
|
||||
verbose);
|
||||
}
|
||||
|
||||
TfLiteDelegatePtr DummyDelegateProvider::CreateTfLiteDelegate(
|
||||
|
@ -53,6 +53,10 @@ and the following optional parameters:
|
||||
`stdout` if option is not set. Requires `enable_op_profiling` to be `true`
|
||||
and the path to include the name of the output CSV; otherwise results are
|
||||
printed to `stdout`.
|
||||
* `verbose`: `bool` (default=false) \
|
||||
Whether to log parameters whose values are not set. By default, only log
|
||||
those parameters that are set by parsing their values from the commandline
|
||||
flags.
|
||||
|
||||
### TFLite delegate parameters
|
||||
The tool supports all runtime/delegate parameters introduced by
|
||||
|
@ -104,28 +104,28 @@ std::vector<Flag> BenchmarkModel::GetFlags() {
|
||||
CreateFlag<bool>("verbose", ¶ms_,
|
||||
"Whether to log parameters whose values are not set. "
|
||||
"By default, only log those parameters that are set by "
|
||||
"parsing their values from the commandline flag.."),
|
||||
"parsing their values from the commandline flags."),
|
||||
};
|
||||
}
|
||||
|
||||
#define LOG_PARAM(type, name, prefix, suffix) \
|
||||
LOG_BENCHMARK_PARAM(params_, type, name, prefix, suffix, verbose)
|
||||
void BenchmarkModel::LogParams() {
|
||||
const bool verbose = params_.Get<bool>("verbose");
|
||||
LOG_PARAM(int32_t, "num_runs", "Min num runs: [", "]");
|
||||
LOG_PARAM(int32_t, "num_runs", "Min num runs: [", "]");
|
||||
LOG_PARAM(float, "min_secs", "Min runs duration (seconds): [", "]");
|
||||
LOG_PARAM(float, "max_secs", "Max runs duration (seconds): [", "]");
|
||||
LOG_PARAM(float, "run_delay", "Inter-run delay (seconds): [", "]");
|
||||
LOG_PARAM(int32_t, "num_threads", "Num threads: [", "]");
|
||||
LOG_PARAM(bool, "use_caching", "Use caching: [", "]");
|
||||
LOG_PARAM(std::string, "benchmark_name", "Benchmark name: [", "]");
|
||||
LOG_PARAM(std::string, "output_prefix", "Output prefix: [", "]");
|
||||
LOG_PARAM(int32_t, "warmup_runs", "Min warmup runs: [", "]");
|
||||
LOG_PARAM(float, "warmup_min_secs", "Min warmup runs duration (seconds): [",
|
||||
"]");
|
||||
TFLITE_LOG(INFO) << "Log parameter values verbosely: [" << verbose << "]";
|
||||
|
||||
LOG_BENCHMARK_PARAM(int32_t, "num_runs", "Min num runs", verbose);
|
||||
LOG_BENCHMARK_PARAM(float, "min_secs", "Min runs duration (seconds)",
|
||||
verbose);
|
||||
LOG_BENCHMARK_PARAM(float, "max_secs", "Max runs duration (seconds)",
|
||||
verbose);
|
||||
LOG_BENCHMARK_PARAM(float, "run_delay", "Inter-run delay (seconds)", verbose);
|
||||
LOG_BENCHMARK_PARAM(int32_t, "num_threads", "Num threads", verbose);
|
||||
LOG_BENCHMARK_PARAM(bool, "use_caching", "Use caching", verbose);
|
||||
LOG_BENCHMARK_PARAM(std::string, "benchmark_name", "Benchmark name", verbose);
|
||||
LOG_BENCHMARK_PARAM(std::string, "output_prefix", "Output prefix", verbose);
|
||||
LOG_BENCHMARK_PARAM(int32_t, "warmup_runs", "Min warmup runs", verbose);
|
||||
LOG_BENCHMARK_PARAM(float, "warmup_min_secs",
|
||||
"Min warmup runs duration (seconds)", verbose);
|
||||
}
|
||||
#undef LOG_PARAM
|
||||
|
||||
TfLiteStatus BenchmarkModel::PrepareInputData() { return kTfLiteOk; }
|
||||
|
||||
|
@ -22,11 +22,10 @@ namespace benchmark {
|
||||
using BenchmarkParam = tflite::tools::ToolParam;
|
||||
using BenchmarkParams = tflite::tools::ToolParams;
|
||||
|
||||
#define LOG_BENCHMARK_PARAM(params, type, name, prefix, suffix, verbose) \
|
||||
do { \
|
||||
TFLITE_MAY_LOG(INFO, verbose || params.HasValueSet<type>(name)) \
|
||||
<< prefix << params.Get<type>(name) << suffix; \
|
||||
} while (0)
|
||||
// To be used in BenchmarkModel::LogParams() and its overrides as we assume
|
||||
// logging the parameters defined in BenchmarkModel as 'params_'.
|
||||
#define LOG_BENCHMARK_PARAM(type, name, description, verbose) \
|
||||
LOG_TOOL_PARAM(params_, type, name, description, verbose)
|
||||
} // namespace benchmark
|
||||
} // namespace tflite
|
||||
#endif // TENSORFLOW_LITE_TOOLS_BENCHMARK_BENCHMARK_PARAMS_H_
|
||||
|
@ -352,38 +352,35 @@ std::vector<Flag> BenchmarkTfLiteModel::GetFlags() {
|
||||
|
||||
void BenchmarkTfLiteModel::LogParams() {
|
||||
BenchmarkModel::LogParams();
|
||||
TFLITE_LOG(INFO) << "Graph: [" << params_.Get<std::string>("graph") << "]";
|
||||
|
||||
const bool verbose = params_.Get<bool>("verbose");
|
||||
|
||||
#define LOG_PARAM(type, name, prefix, suffix) \
|
||||
LOG_BENCHMARK_PARAM(params_, type, name, prefix, suffix, verbose)
|
||||
|
||||
LOG_PARAM(std::string, "input_layer", "Input layers: [", "]");
|
||||
LOG_PARAM(std::string, "input_layer_shape", "Input shapes: [", "]");
|
||||
LOG_PARAM(std::string, "input_layer_value_range", "Input value ranges: [",
|
||||
"]");
|
||||
LOG_PARAM(std::string, "input_layer_value_files", "Input value files: [",
|
||||
"]");
|
||||
// Always log the value of --graph.
|
||||
LOG_BENCHMARK_PARAM(std::string, "graph", "Graph", /*verbose*/ true);
|
||||
LOG_BENCHMARK_PARAM(std::string, "input_layer", "Input layers", verbose);
|
||||
LOG_BENCHMARK_PARAM(std::string, "input_layer_shape", "Input shapes",
|
||||
verbose);
|
||||
LOG_BENCHMARK_PARAM(std::string, "input_layer_value_range",
|
||||
"Input value ranges", verbose);
|
||||
LOG_BENCHMARK_PARAM(std::string, "input_layer_value_files",
|
||||
"Input value files", verbose);
|
||||
|
||||
#if defined(__ANDROID__)
|
||||
LOG_PARAM(bool, "use_legacy_nnapi", "Use legacy nnapi: [", "]");
|
||||
LOG_BENCHMARK_PARAM(bool, "use_legacy_nnapi", "Use legacy nnapi", verbose);
|
||||
#endif
|
||||
LOG_PARAM(bool, "allow_fp16", "Allow fp16: [", "]");
|
||||
LOG_PARAM(bool, "require_full_delegation", "Require full delegation: [", "]");
|
||||
LOG_PARAM(bool, "enable_op_profiling", "Enable op profiling: [", "]");
|
||||
LOG_PARAM(int32_t, "max_profiling_buffer_entries",
|
||||
"Max profiling buffer entries: [", "]");
|
||||
LOG_PARAM(std::string, "profiling_output_csv_file",
|
||||
"CSV File to export profiling data to: [", "]");
|
||||
LOG_PARAM(bool, "enable_platform_tracing", "Enable platform-wide tracing: [",
|
||||
"]");
|
||||
|
||||
#undef LOG_PARAM
|
||||
LOG_BENCHMARK_PARAM(bool, "allow_fp16", "Allow fp16", verbose);
|
||||
LOG_BENCHMARK_PARAM(bool, "require_full_delegation",
|
||||
"Require full delegation", verbose);
|
||||
LOG_BENCHMARK_PARAM(bool, "enable_op_profiling", "Enable op profiling",
|
||||
verbose);
|
||||
LOG_BENCHMARK_PARAM(int32_t, "max_profiling_buffer_entries",
|
||||
"Max profiling buffer entries", verbose);
|
||||
LOG_BENCHMARK_PARAM(std::string, "profiling_output_csv_file",
|
||||
"CSV File to export profiling data to", verbose);
|
||||
LOG_BENCHMARK_PARAM(bool, "enable_platform_tracing",
|
||||
"Enable platform-wide tracing", verbose);
|
||||
|
||||
for (const auto& delegate_provider :
|
||||
tools::GetRegisteredDelegateProviders()) {
|
||||
delegate_provider->LogParams(params_);
|
||||
delegate_provider->LogParams(params_, verbose);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -38,7 +38,7 @@ class CoreMlDelegateProvider : public DelegateProvider {
|
||||
}
|
||||
std::vector<Flag> CreateFlags(ToolParams* params) const final;
|
||||
|
||||
void LogParams(const ToolParams& params) const final;
|
||||
void LogParams(const ToolParams& params, bool verbose) const final;
|
||||
|
||||
TfLiteDelegatePtr CreateTfLiteDelegate(const ToolParams& params) const final;
|
||||
|
||||
@ -62,10 +62,11 @@ std::vector<Flag> CoreMlDelegateProvider::CreateFlags(
|
||||
#endif
|
||||
}
|
||||
|
||||
void CoreMlDelegateProvider::LogParams(const ToolParams& params) const {
|
||||
void CoreMlDelegateProvider::LogParams(const ToolParams& params,
|
||||
bool verbose) const {
|
||||
#if defined(REAL_IPHONE_DEVICE)
|
||||
TFLITE_LOG(INFO) << "Use Core ML : [" << params.Get<bool>("use_coreml")
|
||||
<< "]";
|
||||
LOG_TOOL_PARAM(params, bool, "use_coreml", "Use CoreML", verbose);
|
||||
LOG_TOOL_PARAM(params, int, "coreml_version", "CoreML version", verbose);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -33,7 +33,7 @@ class DefaultExecutionProvider : public DelegateProvider {
|
||||
}
|
||||
|
||||
std::vector<Flag> CreateFlags(ToolParams* params) const final;
|
||||
void LogParams(const ToolParams& params) const final;
|
||||
void LogParams(const ToolParams& params, bool verbose) const final;
|
||||
TfLiteDelegatePtr CreateTfLiteDelegate(const ToolParams& params) const final;
|
||||
std::string GetName() const final { return "Default-NoDelegate"; }
|
||||
};
|
||||
@ -54,13 +54,14 @@ std::vector<Flag> DefaultExecutionProvider::CreateFlags(
|
||||
return flags;
|
||||
}
|
||||
|
||||
void DefaultExecutionProvider::LogParams(const ToolParams& params) const {
|
||||
TFLITE_LOG(INFO) << "#threads used for CPU inference: ["
|
||||
<< params.Get<int32_t>("num_threads") << "]";
|
||||
TFLITE_LOG(INFO) << "Max number of delegated partitions : ["
|
||||
<< params.Get<int32_t>("max_delegated_partitions") << "]";
|
||||
TFLITE_LOG(INFO) << "Min nodes per partition : ["
|
||||
<< params.Get<int32_t>("min_nodes_per_partition") << "]";
|
||||
void DefaultExecutionProvider::LogParams(const ToolParams& params,
|
||||
bool verbose) const {
|
||||
LOG_TOOL_PARAM(params, int32_t, "num_threads",
|
||||
"#threads used for CPU inference", verbose);
|
||||
LOG_TOOL_PARAM(params, int32_t, "max_delegated_partitions",
|
||||
"Max number of delegated partitions", verbose);
|
||||
LOG_TOOL_PARAM(params, int32_t, "min_nodes_per_partition",
|
||||
"Min nodes per partition", verbose);
|
||||
}
|
||||
|
||||
TfLiteDelegatePtr DefaultExecutionProvider::CreateTfLiteDelegate(
|
||||
|
@ -40,8 +40,10 @@ class DelegateProvider {
|
||||
// 'params' whose value will be set to the corresponding runtime flag value.
|
||||
virtual std::vector<Flag> CreateFlags(ToolParams* params) const = 0;
|
||||
|
||||
// Log tool params.
|
||||
virtual void LogParams(const ToolParams& params) const = 0;
|
||||
// Log tool params. If 'verbose' is set to false, the param is going to be
|
||||
// only logged if its value has been set, say via being parsed from
|
||||
// commandline flags.
|
||||
virtual void LogParams(const ToolParams& params, bool verbose) const = 0;
|
||||
|
||||
// Create a TfLiteDelegate based on tool params.
|
||||
virtual TfLiteDelegatePtr CreateTfLiteDelegate(
|
||||
|
@ -33,7 +33,6 @@ std::vector<std::string> SplitString(const std::string& str, char delimiter) {
|
||||
return tokens;
|
||||
}
|
||||
|
||||
|
||||
// External delegate provider used to dynamically load delegate libraries
|
||||
// Note: Assumes the lifetime of the provider exceeds the usage scope of
|
||||
// the generated delegates.
|
||||
@ -48,7 +47,7 @@ class ExternalDelegateProvider : public DelegateProvider {
|
||||
|
||||
std::vector<Flag> CreateFlags(ToolParams* params) const final;
|
||||
|
||||
void LogParams(const ToolParams& params) const final;
|
||||
void LogParams(const ToolParams& params, bool verbose) const final;
|
||||
|
||||
TfLiteDelegatePtr CreateTfLiteDelegate(const ToolParams& params) const final;
|
||||
|
||||
@ -63,16 +62,18 @@ std::vector<Flag> ExternalDelegateProvider::CreateFlags(
|
||||
"The library path for the underlying external."),
|
||||
CreateFlag<std::string>(
|
||||
"external_delegate_options", params,
|
||||
"Comma-separated options to be passed to the external delegate")};
|
||||
"A list of comma-separated options to be passed to the external "
|
||||
"delegate. Each option is a colon-separated key-value pair, e.g. "
|
||||
"option_name:option_value.")};
|
||||
return flags;
|
||||
}
|
||||
|
||||
void ExternalDelegateProvider::LogParams(const ToolParams& params) const {
|
||||
TFLITE_LOG(INFO) << "External delegate path : ["
|
||||
<< params.Get<std::string>("external_delegate_path") << "]";
|
||||
TFLITE_LOG(INFO) << "External delegate options : ["
|
||||
<< params.Get<std::string>("external_delegate_options")
|
||||
<< "]";
|
||||
void ExternalDelegateProvider::LogParams(const ToolParams& params,
|
||||
bool verbose) const {
|
||||
LOG_TOOL_PARAM(params, std::string, "external_delegate_path",
|
||||
"External delegate path", verbose);
|
||||
LOG_TOOL_PARAM(params, std::string, "external_delegate_options",
|
||||
"External delegate options", verbose);
|
||||
}
|
||||
|
||||
TfLiteDelegatePtr ExternalDelegateProvider::CreateTfLiteDelegate(
|
||||
|
@ -51,7 +51,7 @@ class GpuDelegateProvider : public DelegateProvider {
|
||||
|
||||
std::vector<Flag> CreateFlags(ToolParams* params) const final;
|
||||
|
||||
void LogParams(const ToolParams& params) const final;
|
||||
void LogParams(const ToolParams& params, bool verbose) const final;
|
||||
|
||||
TfLiteDelegatePtr CreateTfLiteDelegate(const ToolParams& params) const final;
|
||||
|
||||
@ -86,21 +86,21 @@ std::vector<Flag> GpuDelegateProvider::CreateFlags(ToolParams* params) const {
|
||||
return flags;
|
||||
}
|
||||
|
||||
void GpuDelegateProvider::LogParams(const ToolParams& params) const {
|
||||
TFLITE_LOG(INFO) << "Use gpu : [" << params.Get<bool>("use_gpu") << "]";
|
||||
void GpuDelegateProvider::LogParams(const ToolParams& params,
|
||||
bool verbose) const {
|
||||
LOG_TOOL_PARAM(params, bool, "use_gpu", "Use gpu", verbose);
|
||||
#if defined(__ANDROID__) || defined(REAL_IPHONE_DEVICE)
|
||||
TFLITE_LOG(INFO) << "Allow lower precision in gpu : ["
|
||||
<< params.Get<bool>("gpu_precision_loss_allowed") << "]";
|
||||
TFLITE_LOG(INFO) << "Enable running quant models in gpu : ["
|
||||
<< params.Get<bool>("gpu_experimental_enable_quant") << "]";
|
||||
LOG_TOOL_PARAM(params, bool, "gpu_precision_loss_allowed",
|
||||
"Allow lower precision in gpu", verbose);
|
||||
LOG_TOOL_PARAM(params, bool, "gpu_experimental_enable_quant",
|
||||
"Enable running quant models in gpu", verbose);
|
||||
#endif
|
||||
#if defined(__ANDROID__)
|
||||
TFLITE_LOG(INFO) << "GPU backend : ["
|
||||
<< params.Get<std::string>("gpu_backend") << "]";
|
||||
LOG_TOOL_PARAM(params, std::string, "gpu_backend", "GPU backend", verbose);
|
||||
#endif
|
||||
#if defined(REAL_IPHONE_DEVICE)
|
||||
TFLITE_LOG(INFO) << "GPU delegate wait type : ["
|
||||
<< params.Get<std::string>("gpu_wait_type") << "]";
|
||||
LOG_TOOL_PARAM(params, std::string, "gpu_wait_type", "GPU delegate wait type",
|
||||
verbose);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -43,7 +43,7 @@ class HexagonDelegateProvider : public DelegateProvider {
|
||||
|
||||
std::vector<Flag> CreateFlags(ToolParams* params) const final;
|
||||
|
||||
void LogParams(const ToolParams& params) const final;
|
||||
void LogParams(const ToolParams& params, bool verbose) const final;
|
||||
|
||||
TfLiteDelegatePtr CreateTfLiteDelegate(const ToolParams& params) const final;
|
||||
|
||||
@ -67,14 +67,14 @@ std::vector<Flag> HexagonDelegateProvider::CreateFlags(
|
||||
#endif
|
||||
}
|
||||
|
||||
void HexagonDelegateProvider::LogParams(const ToolParams& params) const {
|
||||
void HexagonDelegateProvider::LogParams(const ToolParams& params,
|
||||
bool verbose) const {
|
||||
#if defined(TFLITE_ENABLE_HEXAGON)
|
||||
TFLITE_LOG(INFO) << "Use Hexagon : [" << params.Get<bool>("use_hexagon")
|
||||
<< "]";
|
||||
TFLITE_LOG(INFO) << "Hexagon lib path : ["
|
||||
<< params.Get<std::string>("hexagon_lib_path") << "]";
|
||||
TFLITE_LOG(INFO) << "Hexagon Profiling : ["
|
||||
<< params.Get<bool>("hexagon_profiling") << "]";
|
||||
LOG_TOOL_PARAM(params, bool, "use_hexagon", "Use Hexagon", verbose);
|
||||
LOG_TOOL_PARAM(params, std::string, "hexagon_lib_path", "Hexagon lib path",
|
||||
verbose);
|
||||
LOG_TOOL_PARAM(params, bool, "hexagon_profiling", "Hexagon profiling",
|
||||
verbose);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -43,7 +43,7 @@ class NnapiDelegateProvider : public DelegateProvider {
|
||||
|
||||
std::vector<Flag> CreateFlags(ToolParams* params) const final;
|
||||
|
||||
void LogParams(const ToolParams& params) const final;
|
||||
void LogParams(const ToolParams& params, bool verbose) const final;
|
||||
|
||||
TfLiteDelegatePtr CreateTfLiteDelegate(const ToolParams& params) const final;
|
||||
|
||||
@ -76,38 +76,30 @@ std::vector<Flag> NnapiDelegateProvider::CreateFlags(ToolParams* params) const {
|
||||
return flags;
|
||||
}
|
||||
|
||||
void NnapiDelegateProvider::LogParams(const ToolParams& params) const {
|
||||
void NnapiDelegateProvider::LogParams(const ToolParams& params,
|
||||
bool verbose) const {
|
||||
#if defined(__ANDROID__)
|
||||
TFLITE_LOG(INFO) << "Use nnapi : [" << params.Get<bool>("use_nnapi") << "]";
|
||||
if (params.Get<bool>("use_nnapi")) {
|
||||
if (!params.Get<std::string>("nnapi_execution_preference").empty()) {
|
||||
TFLITE_LOG(INFO) << "nnapi execution preference: ["
|
||||
<< params.Get<std::string>("nnapi_execution_preference")
|
||||
<< "]";
|
||||
}
|
||||
if (!params.Get<std::string>("nnapi_execution_priority").empty()) {
|
||||
TFLITE_LOG(INFO) << "model execution priority in nnapi: ["
|
||||
<< params.Get<std::string>("nnapi_execution_priority")
|
||||
<< "]";
|
||||
}
|
||||
std::string log_string = "nnapi accelerator name: [" +
|
||||
params.Get<std::string>("nnapi_accelerator_name") +
|
||||
"]";
|
||||
std::string string_device_names_list = nnapi::GetStringDeviceNamesList();
|
||||
// Print available devices when possible
|
||||
if (!string_device_names_list.empty()) {
|
||||
log_string += " (Available: " + string_device_names_list + ")";
|
||||
}
|
||||
TFLITE_LOG(INFO) << log_string;
|
||||
if (params.Get<bool>("disable_nnapi_cpu")) {
|
||||
TFLITE_LOG(INFO) << "disable_nnapi_cpu: ["
|
||||
<< params.Get<bool>("disable_nnapi_cpu") << "]";
|
||||
}
|
||||
if (params.Get<bool>("nnapi_allow_fp16")) {
|
||||
TFLITE_LOG(INFO) << "Allow fp16 in NNAPI: ["
|
||||
<< params.Get<bool>("nnapi_allow_fp16") << "]";
|
||||
}
|
||||
LOG_TOOL_PARAM(params, bool, "use_nnapi", "Use NNAPI", verbose);
|
||||
if (!params.Get<bool>("use_nnapi")) return;
|
||||
|
||||
LOG_TOOL_PARAM(params, std::string, "nnapi_execution_preference",
|
||||
"NNAPI execution preference", verbose);
|
||||
LOG_TOOL_PARAM(params, std::string, "nnapi_execution_priority",
|
||||
"Model execution priority in nnapi", verbose);
|
||||
LOG_TOOL_PARAM(params, std::string, "nnapi_accelerator_name",
|
||||
"NNAPI accelerator name", verbose);
|
||||
|
||||
std::string string_device_names_list = nnapi::GetStringDeviceNamesList();
|
||||
// Print available devices when possible as it's informative.
|
||||
if (!string_device_names_list.empty()) {
|
||||
TFLITE_LOG(INFO) << "NNAPI accelerators available: ["
|
||||
<< string_device_names_list << "]";
|
||||
}
|
||||
|
||||
LOG_TOOL_PARAM(params, bool, "disable_nnapi_cpu", "Disable NNAPI cpu",
|
||||
verbose);
|
||||
LOG_TOOL_PARAM(params, bool, "nnapi_allow_fp16", "Allow fp16 in NNAPI",
|
||||
verbose);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -28,7 +28,7 @@ class XnnpackDelegateProvider : public DelegateProvider {
|
||||
|
||||
std::vector<Flag> CreateFlags(ToolParams* params) const final;
|
||||
|
||||
void LogParams(const ToolParams& params) const final;
|
||||
void LogParams(const ToolParams& params, bool verbose) const final;
|
||||
|
||||
TfLiteDelegatePtr CreateTfLiteDelegate(const ToolParams& params) const final;
|
||||
|
||||
@ -43,9 +43,9 @@ std::vector<Flag> XnnpackDelegateProvider::CreateFlags(
|
||||
return flags;
|
||||
}
|
||||
|
||||
void XnnpackDelegateProvider::LogParams(const ToolParams& params) const {
|
||||
TFLITE_LOG(INFO) << "Use xnnpack : [" << params.Get<bool>("use_xnnpack")
|
||||
<< "]";
|
||||
void XnnpackDelegateProvider::LogParams(const ToolParams& params,
|
||||
bool verbose) const {
|
||||
LOG_TOOL_PARAM(params, bool, "use_xnnpack", "Use xnnpack", verbose);
|
||||
}
|
||||
|
||||
TfLiteDelegatePtr XnnpackDelegateProvider::CreateTfLiteDelegate(
|
||||
|
@ -143,6 +143,12 @@ class ToolParams {
|
||||
std::unordered_map<std::string, std::unique_ptr<ToolParam>> params_;
|
||||
};
|
||||
|
||||
#define LOG_TOOL_PARAM(params, type, name, description, verbose) \
|
||||
do { \
|
||||
TFLITE_MAY_LOG(INFO, (verbose) || params.HasValueSet<type>(name)) \
|
||||
<< description << ": [" << params.Get<type>(name) << "]"; \
|
||||
} while (0)
|
||||
|
||||
} // namespace tools
|
||||
} // namespace tflite
|
||||
#endif // TENSORFLOW_LITE_TOOLS_TOOL_PARAMS_H_
|
||||
|
Loading…
Reference in New Issue
Block a user