Skip applying default TfLite delegates for nnapi delegate tests.
PiperOrigin-RevId: 337440144 Change-Id: I1e1ea6259df21c8a3c001e1138d729f5606f6d32
This commit is contained in:
parent
fe651f1252
commit
15dd772865
@ -270,7 +270,10 @@ class ArgMaxOpModel : public SingleOpModel, public AcceleratedModel {
|
|||||||
|
|
||||||
SetBuiltinOp(BuiltinOperator_ARG_MAX, BuiltinOptions_ArgMaxOptions,
|
SetBuiltinOp(BuiltinOperator_ARG_MAX, BuiltinOptions_ArgMaxOptions,
|
||||||
CreateArgMaxOptions(builder_, output_type).Union());
|
CreateArgMaxOptions(builder_, output_type).Union());
|
||||||
BuildInterpreter({input_shape, {1}});
|
BuildInterpreter({input_shape, {1}}, /*num_threads*/ -1,
|
||||||
|
/*allow_fp32_relax_to_fp16=*/false,
|
||||||
|
/*apply_delegate=*/false);
|
||||||
|
ApplyDelegate();
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -410,7 +413,8 @@ class AddSubOpsAcceleratedModel : public MultiOpModel, public AcceleratedModel {
|
|||||||
{add_output, input3_}, {output_});
|
{add_output, input3_}, {output_});
|
||||||
BuildInterpreter({GetShape(input1_), GetShape(input2_), GetShape(input3_)},
|
BuildInterpreter({GetShape(input1_), GetShape(input2_), GetShape(input3_)},
|
||||||
/*num_threads=*/-1, allow_fp32_relax_to_fp16,
|
/*num_threads=*/-1, allow_fp32_relax_to_fp16,
|
||||||
/*apply_delegate=*/true);
|
/*apply_delegate=*/false);
|
||||||
|
ApplyDelegate();
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -591,7 +595,8 @@ class HardSwishAddOpsAcceleratedModel : public MultiOpModel,
|
|||||||
CreateAddOptions(builder_, activation_type).Union(),
|
CreateAddOptions(builder_, activation_type).Union(),
|
||||||
{input1_, hard_swish_output}, {output_});
|
{input1_, hard_swish_output}, {output_});
|
||||||
BuildInterpreter({GetShape(input1_), GetShape(input2_)}, /*num_threads=*/-1,
|
BuildInterpreter({GetShape(input1_), GetShape(input2_)}, /*num_threads=*/-1,
|
||||||
allow_fp32_relax_to_fp16, /*apply_delegate=*/true);
|
allow_fp32_relax_to_fp16, /*apply_delegate=*/false);
|
||||||
|
ApplyDelegate();
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -721,7 +726,8 @@ class QuantizedWeightsConvolutionOpModel : public SingleOpModel,
|
|||||||
|
|
||||||
BuildInterpreter({GetShape(input_), GetShape(filter_), GetShape(bias_)},
|
BuildInterpreter({GetShape(input_), GetShape(filter_), GetShape(bias_)},
|
||||||
num_threads, /*allow_fp32_relax_to_fp16=*/false,
|
num_threads, /*allow_fp32_relax_to_fp16=*/false,
|
||||||
/*apply_delegate=*/true);
|
/*apply_delegate=*/false);
|
||||||
|
ApplyDelegate();
|
||||||
}
|
}
|
||||||
|
|
||||||
void SetInput(std::initializer_list<float> data) {
|
void SetInput(std::initializer_list<float> data) {
|
||||||
@ -867,7 +873,11 @@ class LongIdentityModel : public MultiOpModel, public AcceleratedModel {
|
|||||||
{intermediate_outputs[intermediate_outputs.size() - 1], zero_input_},
|
{intermediate_outputs[intermediate_outputs.size() - 1], zero_input_},
|
||||||
{output_});
|
{output_});
|
||||||
|
|
||||||
BuildInterpreter({GetShape(input_), GetShape(zero_input_)});
|
BuildInterpreter({GetShape(input_), GetShape(zero_input_)},
|
||||||
|
/*num_threads*/ -1,
|
||||||
|
/*allow_fp32_relax_to_fp16=*/false,
|
||||||
|
/*apply_delegate=*/false);
|
||||||
|
ApplyDelegate();
|
||||||
|
|
||||||
std::vector<float> zero(GetTensorSize(input_), 0.0);
|
std::vector<float> zero(GetTensorSize(input_), 0.0);
|
||||||
PopulateTensor(zero_input_, zero);
|
PopulateTensor(zero_input_, zero);
|
||||||
|
@ -75,7 +75,11 @@ class FloatAddOpModel : public SingleOpModelWithNNAPI {
|
|||||||
SetBuiltinOp(BuiltinOperator_ADD, BuiltinOptions_AddOptions,
|
SetBuiltinOp(BuiltinOperator_ADD, BuiltinOptions_AddOptions,
|
||||||
CreateAddOptions(builder_, activation_type).Union());
|
CreateAddOptions(builder_, activation_type).Union());
|
||||||
BuildInterpreter({GetShape(input1_), GetShape(input2_)}, /*num_threads=*/-1,
|
BuildInterpreter({GetShape(input1_), GetShape(input2_)}, /*num_threads=*/-1,
|
||||||
allow_fp32_relax_to_fp16, /*apply_delegate=*/true);
|
allow_fp32_relax_to_fp16, /*apply_delegate=*/false);
|
||||||
|
// We defer applying the 'stateful_delegate_' till now (i.e. via setting
|
||||||
|
// 'apply_delegate=false' above) so that default TfLite delegates won't be
|
||||||
|
// applied.
|
||||||
|
ApplyDelegate();
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -193,7 +193,10 @@ void SingleOpModel::BuildInterpreter(std::vector<std::vector<int>> input_shapes,
|
|||||||
UpdateOpVersion(buffer_pointer);
|
UpdateOpVersion(buffer_pointer);
|
||||||
|
|
||||||
if (!resolver_) {
|
if (!resolver_) {
|
||||||
auto resolver = new ops::builtin::BuiltinOpResolver();
|
MutableOpResolver* resolver =
|
||||||
|
apply_delegate
|
||||||
|
? new ops::builtin::BuiltinOpResolver()
|
||||||
|
: new ops::builtin::BuiltinOpResolverWithoutDefaultDelegates();
|
||||||
for (const auto& reg : custom_registrations_) {
|
for (const auto& reg : custom_registrations_) {
|
||||||
resolver->AddCustom(reg.first.data(), reg.second());
|
resolver->AddCustom(reg.first.data(), reg.second());
|
||||||
}
|
}
|
||||||
|
@ -485,6 +485,10 @@ class SingleOpModel {
|
|||||||
|
|
||||||
// Build the interpreter for this model. Also, resize and allocate all
|
// Build the interpreter for this model. Also, resize and allocate all
|
||||||
// tensors given the shapes of the inputs.
|
// tensors given the shapes of the inputs.
|
||||||
|
// Note: 'apply_delegate' also serves to tell whether default TfLite delegates
|
||||||
|
// should be applied implicitly for a test case. For example, when testing the
|
||||||
|
// specific implementation of a TfLite delegate, it might be necessary to set
|
||||||
|
// this to false.
|
||||||
void BuildInterpreter(std::vector<std::vector<int>> input_shapes,
|
void BuildInterpreter(std::vector<std::vector<int>> input_shapes,
|
||||||
int num_threads, bool allow_fp32_relax_to_fp16,
|
int num_threads, bool allow_fp32_relax_to_fp16,
|
||||||
bool apply_delegate, bool allocate_and_delegate = true);
|
bool apply_delegate, bool allocate_and_delegate = true);
|
||||||
|
Loading…
Reference in New Issue
Block a user