Always clear lazy delegate providers to ensure they are applied only once.
PiperOrigin-RevId: 339630704 Change-Id: I0f4dedf17ea49b047bd0b7a2bdf022eb7a772ea6
This commit is contained in:
parent
045f110f33
commit
849682d6a0
tensorflow/lite
@ -180,17 +180,20 @@ TfLiteStatus Interpreter::AllocateTensors() {
|
||||
// Apply the default delegate that TFLite will enable at this point to allow
|
||||
// other user-level delegates to be applied first.
|
||||
if (!lazy_delegate_providers_.empty()) {
|
||||
// We only apply lazy delegate providers once.
|
||||
std::vector<TfLiteDelegatePtr> delegate_providers;
|
||||
delegate_providers.swap(lazy_delegate_providers_);
|
||||
|
||||
TFLITE_LOG(TFLITE_LOG_INFO,
|
||||
"Applying %zu TensorFlow Lite delegate(s) lazily.",
|
||||
lazy_delegate_providers_.size());
|
||||
delegate_providers.size());
|
||||
// At the momement, XNNPACK delegate is the only one that might be applied
|
||||
// by default, in which case, the execution will fall back to default
|
||||
// implementation if the XNNPACK delegate fails to be applied. Therefore, we
|
||||
// ignore the return status here and let it fall through the rest of the
|
||||
// code.
|
||||
for (size_t i = 0; i < lazy_delegate_providers_.size(); ++i) {
|
||||
auto status =
|
||||
ModifyGraphWithDelegate(std::move(lazy_delegate_providers_[i]));
|
||||
for (size_t i = 0; i < delegate_providers.size(); ++i) {
|
||||
auto status = ModifyGraphWithDelegate(std::move(delegate_providers[i]));
|
||||
switch (status) {
|
||||
case kTfLiteOk:
|
||||
TFLITE_LOG(TFLITE_LOG_INFO,
|
||||
@ -225,7 +228,6 @@ TfLiteStatus Interpreter::AllocateTensors() {
|
||||
return kTfLiteError;
|
||||
}
|
||||
}
|
||||
lazy_delegate_providers_.clear();
|
||||
}
|
||||
|
||||
return primary_subgraph().AllocateTensors();
|
||||
|
@ -1804,8 +1804,14 @@ class TestLazyDelegateProvider : public InterpreterTest {
|
||||
};
|
||||
|
||||
void InitWithLazyDelegate(int64_t delegate_flags,
|
||||
bool create_dyanmic_tensor = false) {
|
||||
TfLiteRegistration reg = {nullptr, nullptr, nullptr, nullptr};
|
||||
bool create_dyanmic_tensor = false,
|
||||
bool return_error = false) {
|
||||
TfLiteRegistration reg = {nullptr};
|
||||
if (return_error) {
|
||||
reg.prepare = [](TfLiteContext* context, TfLiteNode* node) {
|
||||
return kTfLiteError;
|
||||
};
|
||||
}
|
||||
ASSERT_EQ(interpreter_.AddTensors(2), kTfLiteOk);
|
||||
interpreter_.SetInputs({0});
|
||||
interpreter_.SetOutputs({1});
|
||||
@ -1836,6 +1842,16 @@ TEST_F(TestLazyDelegateProvider, ApplicationSuccess) {
|
||||
EXPECT_TRUE(HasDelegates());
|
||||
}
|
||||
|
||||
TEST_F(TestLazyDelegateProvider, ApplicationFailure) {
|
||||
InitWithLazyDelegate(kTfLiteDelegateFlagsNone,
|
||||
false /* create_dyanmic_tensor */,
|
||||
true /* return_error */);
|
||||
EXPECT_EQ(kTfLiteError, interpreter_.AllocateTensors());
|
||||
// We clear Interpreter::lazy_delegate_providers_ after they are tried out.
|
||||
EXPECT_TRUE(mutable_lazy_delegate_providers()->empty());
|
||||
EXPECT_FALSE(HasDelegates());
|
||||
}
|
||||
|
||||
TEST_F(TestLazyDelegateProvider, ApplicationSkipped) {
|
||||
InitWithLazyDelegate(kTfLiteDelegateFlagsNone,
|
||||
true /* create_dyanmic_tensor */);
|
||||
|
Loading…
Reference in New Issue
Block a user