Always clear lazy delegate providers to ensure they are applied only once.

PiperOrigin-RevId: 339630704
Change-Id: I0f4dedf17ea49b047bd0b7a2bdf022eb7a772ea6
This commit is contained in:
Chao Mei 2020-10-29 02:58:31 -07:00 committed by TensorFlower Gardener
parent 045f110f33
commit 849682d6a0
2 changed files with 25 additions and 7 deletions

View File

@ -180,17 +180,20 @@ TfLiteStatus Interpreter::AllocateTensors() {
// Apply the default delegate that TFLite will enable at this point to allow // Apply the default delegate that TFLite will enable at this point to allow
// other user-level delegates to be applied first. // other user-level delegates to be applied first.
if (!lazy_delegate_providers_.empty()) { if (!lazy_delegate_providers_.empty()) {
// We only apply lazy delegate providers once.
std::vector<TfLiteDelegatePtr> delegate_providers;
delegate_providers.swap(lazy_delegate_providers_);
TFLITE_LOG(TFLITE_LOG_INFO, TFLITE_LOG(TFLITE_LOG_INFO,
"Applying %zu TensorFlow Lite delegate(s) lazily.", "Applying %zu TensorFlow Lite delegate(s) lazily.",
lazy_delegate_providers_.size()); delegate_providers.size());
// At the momement, XNNPACK delegate is the only one that might be applied // At the momement, XNNPACK delegate is the only one that might be applied
// by default, in which case, the execution will fall back to default // by default, in which case, the execution will fall back to default
// implementation if the XNNPACK delegate fails to be applied. Therefore, we // implementation if the XNNPACK delegate fails to be applied. Therefore, we
// ignore the return status here and let it fall through the rest of the // ignore the return status here and let it fall through the rest of the
// code. // code.
for (size_t i = 0; i < lazy_delegate_providers_.size(); ++i) { for (size_t i = 0; i < delegate_providers.size(); ++i) {
auto status = auto status = ModifyGraphWithDelegate(std::move(delegate_providers[i]));
ModifyGraphWithDelegate(std::move(lazy_delegate_providers_[i]));
switch (status) { switch (status) {
case kTfLiteOk: case kTfLiteOk:
TFLITE_LOG(TFLITE_LOG_INFO, TFLITE_LOG(TFLITE_LOG_INFO,
@ -225,7 +228,6 @@ TfLiteStatus Interpreter::AllocateTensors() {
return kTfLiteError; return kTfLiteError;
} }
} }
lazy_delegate_providers_.clear();
} }
return primary_subgraph().AllocateTensors(); return primary_subgraph().AllocateTensors();

View File

@ -1804,8 +1804,14 @@ class TestLazyDelegateProvider : public InterpreterTest {
}; };
void InitWithLazyDelegate(int64_t delegate_flags, void InitWithLazyDelegate(int64_t delegate_flags,
bool create_dyanmic_tensor = false) { bool create_dyanmic_tensor = false,
TfLiteRegistration reg = {nullptr, nullptr, nullptr, nullptr}; bool return_error = false) {
TfLiteRegistration reg = {nullptr};
if (return_error) {
reg.prepare = [](TfLiteContext* context, TfLiteNode* node) {
return kTfLiteError;
};
}
ASSERT_EQ(interpreter_.AddTensors(2), kTfLiteOk); ASSERT_EQ(interpreter_.AddTensors(2), kTfLiteOk);
interpreter_.SetInputs({0}); interpreter_.SetInputs({0});
interpreter_.SetOutputs({1}); interpreter_.SetOutputs({1});
@ -1836,6 +1842,16 @@ TEST_F(TestLazyDelegateProvider, ApplicationSuccess) {
EXPECT_TRUE(HasDelegates()); EXPECT_TRUE(HasDelegates());
} }
TEST_F(TestLazyDelegateProvider, ApplicationFailure) {
InitWithLazyDelegate(kTfLiteDelegateFlagsNone,
false /* create_dyanmic_tensor */,
true /* return_error */);
EXPECT_EQ(kTfLiteError, interpreter_.AllocateTensors());
// We clear Interpreter::lazy_delegate_providers_ after they are tried out.
EXPECT_TRUE(mutable_lazy_delegate_providers()->empty());
EXPECT_FALSE(HasDelegates());
}
TEST_F(TestLazyDelegateProvider, ApplicationSkipped) { TEST_F(TestLazyDelegateProvider, ApplicationSkipped) {
InitWithLazyDelegate(kTfLiteDelegateFlagsNone, InitWithLazyDelegate(kTfLiteDelegateFlagsNone,
true /* create_dyanmic_tensor */); true /* create_dyanmic_tensor */);