diff --git a/tensorflow/lite/tools/benchmark/benchmark_test.cc b/tensorflow/lite/tools/benchmark/benchmark_test.cc index 563bf9e6eef..5d94d86d855 100644 --- a/tensorflow/lite/tools/benchmark/benchmark_test.cc +++ b/tensorflow/lite/tools/benchmark/benchmark_test.cc @@ -61,7 +61,7 @@ class TestBenchmark : public BenchmarkTfLiteModel { public: explicit TestBenchmark(BenchmarkParams params) : BenchmarkTfLiteModel(std::move(params)) {} - const tflite::Interpreter* GetInterpreter() { return interpreter.get(); } + const tflite::Interpreter* GetInterpreter() { return interpreter_.get(); } void Prepare() { PrepareInputData(); diff --git a/tensorflow/lite/tools/benchmark/benchmark_tflite_model.cc b/tensorflow/lite/tools/benchmark/benchmark_tflite_model.cc index e527796664f..0035a0b4373 100644 --- a/tensorflow/lite/tools/benchmark/benchmark_tflite_model.cc +++ b/tensorflow/lite/tools/benchmark/benchmark_tflite_model.cc @@ -318,27 +318,27 @@ bool BenchmarkTfLiteModel::ValidateParams() { } return PopulateInputLayerInfo(params_.Get("input_layer"), params_.Get("input_layer_shape"), - &inputs); + &inputs_); } uint64_t BenchmarkTfLiteModel::ComputeInputBytes() { - TFLITE_BENCHMARK_CHECK(interpreter); + TFLITE_BENCHMARK_CHECK(interpreter_); uint64_t total_input_bytes = 0; - for (int input : interpreter->inputs()) { - auto* t = interpreter->tensor(input); + for (int input : interpreter_->inputs()) { + auto* t = interpreter_->tensor(input); total_input_bytes += t->bytes; } return total_input_bytes; } void BenchmarkTfLiteModel::PrepareInputData() { - auto interpreter_inputs = interpreter->inputs(); + auto interpreter_inputs = interpreter_->inputs(); const size_t input_size = interpreter_inputs.size(); CleanUp(); for (int j = 0; j < input_size; ++j) { int i = interpreter_inputs[j]; - TfLiteTensor* t = interpreter->tensor(i); + TfLiteTensor* t = interpreter_->tensor(i); std::vector sizes = TfLiteIntArrayToVector(t->dims); int num_elements = 1; for (int i = 0; i < sizes.size(); ++i) { @@ -388,25 +388,25 @@ void BenchmarkTfLiteModel::PrepareInputData() { } void BenchmarkTfLiteModel::ResetInputsAndOutputs() { - auto interpreter_inputs = interpreter->inputs(); + auto interpreter_inputs = interpreter_->inputs(); // Set the values of the input tensors from inputs_data_. for (int j = 0; j < interpreter_inputs.size(); ++j) { int i = interpreter_inputs[j]; - TfLiteTensor* t = interpreter->tensor(i); + TfLiteTensor* t = interpreter_->tensor(i); if (t->type == kTfLiteFloat32) { - std::memcpy(interpreter->typed_tensor(i), inputs_data_[j].data.f, + std::memcpy(interpreter_->typed_tensor(i), inputs_data_[j].data.f, inputs_data_[j].bytes); } else if (t->type == kTfLiteInt32) { - std::memcpy(interpreter->typed_tensor(i), + std::memcpy(interpreter_->typed_tensor(i), inputs_data_[j].data.i32, inputs_data_[j].bytes); } else if (t->type == kTfLiteInt16) { - std::memcpy(interpreter->typed_tensor(i), + std::memcpy(interpreter_->typed_tensor(i), inputs_data_[j].data.i16, inputs_data_[j].bytes); } else if (t->type == kTfLiteUInt8) { - std::memcpy(interpreter->typed_tensor(i), + std::memcpy(interpreter_->typed_tensor(i), inputs_data_[j].data.uint8, inputs_data_[j].bytes); } else if (t->type == kTfLiteInt8) { - std::memcpy(interpreter->typed_tensor(i), + std::memcpy(interpreter_->typed_tensor(i), inputs_data_[j].data.int8, inputs_data_[j].bytes); } else if (t->type == kTfLiteString) { tflite::DynamicBuffer buffer; @@ -414,7 +414,7 @@ void BenchmarkTfLiteModel::ResetInputsAndOutputs() { FillRandomString(&buffer, sizes, []() { return "we're have some friends over saturday to hang out in the yard"; }); - buffer.WriteToTensor(interpreter->tensor(i), /*new_shape=*/nullptr); + buffer.WriteToTensor(interpreter_->tensor(i), /*new_shape=*/nullptr); } else { TFLITE_LOG(FATAL) << "Don't know how to populate tensor " << t->name << " of type " << t->type; @@ -424,27 +424,27 @@ void BenchmarkTfLiteModel::ResetInputsAndOutputs() { void BenchmarkTfLiteModel::Init() { std::string graph = params_.Get("graph"); - model = tflite::FlatBufferModel::BuildFromFile(graph.c_str()); - if (!model) { + model_ = tflite::FlatBufferModel::BuildFromFile(graph.c_str()); + if (!model_) { TFLITE_LOG(FATAL) << "Failed to mmap model " << graph; } TFLITE_LOG(INFO) << "Loaded model " << graph; - model->error_reporter(); + model_->error_reporter(); TFLITE_LOG(INFO) << "resolved reporter"; auto resolver = GetOpResolver(); const int32_t num_threads = params_.Get("num_threads"); - tflite::InterpreterBuilder(*model, *resolver)(&interpreter, num_threads); - if (!interpreter) { + tflite::InterpreterBuilder(*model_, *resolver)(&interpreter_, num_threads); + if (!interpreter_) { TFLITE_LOG(FATAL) << "Failed to construct interpreter"; } - interpreter->UseNNAPI(params_.Get("use_legacy_nnapi")); + interpreter_->UseNNAPI(params_.Get("use_legacy_nnapi")); delegates_ = GetDelegates(); for (const auto& delegate : delegates_) { - if (interpreter->ModifyGraphWithDelegate(delegate.second.get()) != + if (interpreter_->ModifyGraphWithDelegate(delegate.second.get()) != kTfLiteOk) { TFLITE_LOG(FATAL) << "Failed to apply " << delegate.first << " delegate."; } else { @@ -452,23 +452,23 @@ void BenchmarkTfLiteModel::Init() { } } - interpreter->SetAllowFp16PrecisionForFp32(params_.Get("allow_fp16")); + interpreter_->SetAllowFp16PrecisionForFp32(params_.Get("allow_fp16")); - auto interpreter_inputs = interpreter->inputs(); + auto interpreter_inputs = interpreter_->inputs(); - if (!inputs.empty()) { - TFLITE_BENCHMARK_CHECK_EQ(inputs.size(), interpreter_inputs.size()) + if (!inputs_.empty()) { + TFLITE_BENCHMARK_CHECK_EQ(inputs_.size(), interpreter_inputs.size()) << "Inputs mismatch: Model inputs #:" << interpreter_inputs.size() - << " expected: " << inputs.size(); + << " expected: " << inputs_.size(); } // Check if the tensor names match, and log a warning if it doesn't. // TODO(ycling): Consider to make this an error again when the new converter // create tensors with consistent naming. - for (int j = 0; j < inputs.size(); ++j) { - const InputLayerInfo& input = inputs[j]; + for (int j = 0; j < inputs_.size(); ++j) { + const InputLayerInfo& input = inputs_[j]; int i = interpreter_inputs[j]; - TfLiteTensor* t = interpreter->tensor(i); + TfLiteTensor* t = interpreter_->tensor(i); if (input.name != t->name) { TFLITE_LOG(WARN) << "Tensor # " << i << " is named " << t->name << " but flags call it " << input.name; @@ -476,23 +476,23 @@ void BenchmarkTfLiteModel::Init() { } // Resize all non-string tensors. - for (int j = 0; j < inputs.size(); ++j) { - const InputLayerInfo& input = inputs[j]; + for (int j = 0; j < inputs_.size(); ++j) { + const InputLayerInfo& input = inputs_[j]; int i = interpreter_inputs[j]; - TfLiteTensor* t = interpreter->tensor(i); + TfLiteTensor* t = interpreter_->tensor(i); if (t->type != kTfLiteString) { - interpreter->ResizeInputTensor(i, input.shape); + interpreter_->ResizeInputTensor(i, input.shape); } } - if (interpreter->AllocateTensors() != kTfLiteOk) { + if (interpreter_->AllocateTensors() != kTfLiteOk) { TFLITE_LOG(FATAL) << "Failed to allocate tensors!"; } // Install profilers if necessary. if (params_.Get("enable_op_profiling")) { profiling_listener_.reset(new ProfilingListener( - interpreter.get(), + interpreter_.get(), params_.Get("max_profiling_buffer_entries"))); AddListener(profiling_listener_.get()); } @@ -507,7 +507,7 @@ BenchmarkTfLiteModel::TfLiteDelegatePtrMap BenchmarkTfLiteModel::GetDelegates() TfLiteDelegatePtrMap delegates; if (params_.Get("use_gpu")) { Interpreter::TfLiteDelegatePtr delegate = - evaluation::CreateGPUDelegate(model.get()); + evaluation::CreateGPUDelegate(model_.get()); if (!delegate) { TFLITE_LOG(WARN) << "GPU acceleration is unsupported on this platform."; } else { @@ -551,7 +551,7 @@ std::unique_ptr BenchmarkTfLiteModel::GetOpResolver() } void BenchmarkTfLiteModel::RunImpl() { - if (interpreter->Invoke() != kTfLiteOk) { + if (interpreter_->Invoke() != kTfLiteOk) { TFLITE_LOG(FATAL) << "Failed to invoke!"; } } diff --git a/tensorflow/lite/tools/benchmark/benchmark_tflite_model.h b/tensorflow/lite/tools/benchmark/benchmark_tflite_model.h index 04d190531b8..79b59474235 100644 --- a/tensorflow/lite/tools/benchmark/benchmark_tflite_model.h +++ b/tensorflow/lite/tools/benchmark/benchmark_tflite_model.h @@ -62,15 +62,15 @@ class BenchmarkTfLiteModel : public BenchmarkModel { void CleanUp(); - std::unique_ptr model; - std::unique_ptr interpreter; + std::unique_ptr model_; + std::unique_ptr interpreter_; private: struct InputTensorData { TfLitePtrUnion data; size_t bytes; }; - std::vector inputs; + std::vector inputs_; std::vector inputs_data_; std::unique_ptr profiling_listener_; std::unique_ptr gemmlowp_profiling_listener_;