diff --git a/tensorflow/lite/kernels/test_util.cc b/tensorflow/lite/kernels/test_util.cc index dd3ff589a54..1e1519befe2 100644 --- a/tensorflow/lite/kernels/test_util.cc +++ b/tensorflow/lite/kernels/test_util.cc @@ -176,6 +176,21 @@ void SingleOpModel::Invoke() { ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk); } TfLiteStatus SingleOpModel::InvokeUnchecked() { return interpreter_->Invoke(); } +void SingleOpModel::BuildInterpreter( + std::vector> input_shapes) { + BuildInterpreter(input_shapes, -1, false); +} + +void SingleOpModel::BuildInterpreter(std::vector> input_shapes, + int num_threads) { + BuildInterpreter(input_shapes, num_threads, false); +} + +void SingleOpModel::BuildInterpreter(std::vector> input_shapes, + bool allow_fp32_relax_to_fp16) { + BuildInterpreter(input_shapes, -1, allow_fp32_relax_to_fp16); +} + // static void SingleOpModel::SetForceUseNnapi(bool use_nnapi) { force_use_nnapi = use_nnapi; diff --git a/tensorflow/lite/kernels/test_util.h b/tensorflow/lite/kernels/test_util.h index b9780c81a01..5334e39082e 100644 --- a/tensorflow/lite/kernels/test_util.h +++ b/tensorflow/lite/kernels/test_util.h @@ -250,8 +250,15 @@ class SingleOpModel { // Build the interpreter for this model. Also, resize and allocate all // tensors given the shapes of the inputs. void BuildInterpreter(std::vector> input_shapes, - int num_threads = -1, - bool allow_fp32_relax_to_fp16 = false); + int num_threads, bool allow_fp32_relax_to_fp16); + + void BuildInterpreter(std::vector> input_shapes, + int num_threads); + + void BuildInterpreter(std::vector> input_shapes, + bool allow_fp32_relax_to_fp16); + + void BuildInterpreter(std::vector> input_shapes); // Executes inference, asserting success. void Invoke();