Remove default parameters of BuildInterpreter, keep only two overloads: with only input shapes or input shapes plus all optional settings.

There were calls passing bools to num_threads. This change forces to spell out all optional parameters (or none), avoiding such issues.

PiperOrigin-RevId: 314746668
Change-Id: I445364bcc7bec8f0030b9c04ed140394df25edb7
This commit is contained in:
Robert David 2020-06-04 09:46:05 -07:00 committed by TensorFlower Gardener
parent ce2f9824ee
commit 8b61a1e246
7 changed files with 21 additions and 35 deletions

View File

@ -76,8 +76,8 @@ class FloatAddOpModel : public SingleOpModelWithNNAPI {
output_ = AddOutput(output);
SetBuiltinOp(BuiltinOperator_ADD, BuiltinOptions_AddOptions,
CreateAddOptions(builder_, activation_type).Union());
BuildInterpreter({GetShape(input1_), GetShape(input2_)},
allow_fp32_relax_to_fp16);
BuildInterpreter({GetShape(input1_), GetShape(input2_)}, /*num_threads=*/-1,
allow_fp32_relax_to_fp16, /*apply_delegate=*/true);
}
int input1() { return input1_; }
@ -433,7 +433,8 @@ class AddSubOpsAcceleratedModel : public MultiOpModel, public AcceleratedModel {
CreateSubOptions(builder_, activation_type).Union(),
{add_output, input3_}, {output_});
BuildInterpreter({GetShape(input1_), GetShape(input2_), GetShape(input3_)},
allow_fp32_relax_to_fp16);
/*num_threads=*/-1, allow_fp32_relax_to_fp16,
/*apply_delegate=*/true);
}
};
@ -616,8 +617,8 @@ class HardSwishAddOpsAcceleratedModel : public MultiOpModel,
AddBuiltinOp(BuiltinOperator_ADD, BuiltinOptions_AddOptions,
CreateAddOptions(builder_, activation_type).Union(),
{input1_, hard_swish_output}, {output_});
BuildInterpreter({GetShape(input1_), GetShape(input2_)},
allow_fp32_relax_to_fp16);
BuildInterpreter({GetShape(input1_), GetShape(input2_)}, /*num_threads=*/-1,
allow_fp32_relax_to_fp16, /*apply_delegate=*/true);
}
};
@ -749,7 +750,8 @@ class QuantizedWeightsConvolutionOpModel : public SingleOpModel,
.Union());
BuildInterpreter({GetShape(input_), GetShape(filter_), GetShape(bias_)},
num_threads);
num_threads, /*allow_fp32_relax_to_fp16=*/false,
/*apply_delegate=*/true);
}
void SetInput(std::initializer_list<float> data) {

View File

@ -77,8 +77,8 @@ class FloatAddOpModel : public SingleOpModelWithNNAPI {
output_ = AddOutput(output);
SetBuiltinOp(BuiltinOperator_ADD, BuiltinOptions_AddOptions,
CreateAddOptions(builder_, activation_type).Union());
BuildInterpreter({GetShape(input1_), GetShape(input2_)},
allow_fp32_relax_to_fp16);
BuildInterpreter({GetShape(input1_), GetShape(input2_)}, /*num_threads=*/-1,
allow_fp32_relax_to_fp16, /*apply_delegate=*/true);
}
};

View File

@ -154,8 +154,8 @@ class FloatAddOpModel : public SingleOpModelWithNNAPI {
output_ = AddOutput(output);
SetBuiltinOp(BuiltinOperator_ADD, BuiltinOptions_AddOptions,
CreateAddOptions(builder_, activation_type).Union());
BuildInterpreter({GetShape(input1_), GetShape(input2_)},
allow_fp32_relax_to_fp16);
BuildInterpreter({GetShape(input1_), GetShape(input2_)}, /*num_threads=*/-1,
allow_fp32_relax_to_fp16, /*apply_delegate=*/true);
}
};

View File

@ -113,7 +113,8 @@ class BaseConvolutionOpModel : public SingleOpModel {
resolver_ = absl::make_unique<SingleOpResolver>(BuiltinOperator_CONV_2D,
registration);
BuildInterpreter({GetShape(input_), GetShape(filter_), GetShape(bias_)},
num_threads);
num_threads, /*allow_fp32_relax_to_fp16=*/false,
/*apply_delegate=*/true);
}
protected:

View File

@ -139,7 +139,8 @@ class LSTMOpModel : public SingleOpModel {
// Do not apply delegate yet since tensor values are not known (and more
// specifically scales in quantized tensors are not known).
BuildInterpreter(input_shapes, /*allow_fp32_relax_to_fp16=*/false,
BuildInterpreter(input_shapes, /*num_threads=*/-1,
/*allow_fp32_relax_to_fp16=*/false,
/*apply_delegate=*/false);
}
@ -2259,7 +2260,8 @@ class LSTMIntegerOpModel : public SingleOpModel {
// Do not apply delegate yet since tensor values are not known (and more
// specifically scales in quantized tensors are not known).
BuildInterpreter(input_shapes, /*allow_fp32_relax_to_fp16=*/false,
BuildInterpreter(input_shapes, /*num_threads=*/-1,
/*allow_fp32_relax_to_fp16=*/false,
/*apply_delegate=*/false);
}
@ -2938,7 +2940,8 @@ class LSTMIntegerOpModel8x8_8 : public SingleOpModel {
// Do not apply delegate yet since tensor values are not known (and more
// specifically scales in quantized tensors are not known).
BuildInterpreter(input_shapes, /*allow_fp32_relax_to_fp16=*/false,
BuildInterpreter(input_shapes, /*num_threads=*/-1,
/*allow_fp32_relax_to_fp16=*/false,
/*apply_delegate=*/false);
}

View File

@ -221,20 +221,6 @@ void SingleOpModel::BuildInterpreter(
/*apply_delegate=*/true);
}
void SingleOpModel::BuildInterpreter(std::vector<std::vector<int>> input_shapes,
bool allow_fp32_relax_to_fp16,
bool apply_delegate) {
BuildInterpreter(input_shapes, /*num_threads=*/-1, allow_fp32_relax_to_fp16,
apply_delegate);
}
void SingleOpModel::BuildInterpreter(std::vector<std::vector<int>> input_shapes,
int num_threads) {
BuildInterpreter(input_shapes, num_threads,
/*allow_fp32_relax_to_fp16=*/false,
/*apply_delegate=*/true);
}
// static
void SingleOpModel::SetForceUseNnapi(bool use_nnapi) {
force_use_nnapi = use_nnapi;

View File

@ -382,13 +382,7 @@ class SingleOpModel {
// tensors given the shapes of the inputs.
void BuildInterpreter(std::vector<std::vector<int>> input_shapes,
int num_threads, bool allow_fp32_relax_to_fp16,
bool apply_delegate = true);
void BuildInterpreter(std::vector<std::vector<int>> input_shapes,
int num_threads);
void BuildInterpreter(std::vector<std::vector<int>> input_shapes,
bool allow_fp32_relax_to_fp16, bool apply_delegate);
bool apply_delegate);
void BuildInterpreter(std::vector<std::vector<int>> input_shapes);