Revert pruning of old devices (#15360)
* Revert "Fix registering a device on an account with lots of devices (#15348)" This reverts commitf0d8f66eaa
. * Revert "Delete stale non-e2e devices for users, take 3 (#15183)" This reverts commit78cdb72cd6
.
This commit is contained in:
parent
72d2ceaa9a
commit
6204c3663e
|
@ -1 +0,0 @@
|
||||||
Prune user's old devices on login if they have too many.
|
|
|
@ -1 +0,0 @@
|
||||||
Prune user's old devices on login if they have too many.
|
|
|
@ -485,7 +485,7 @@ class DeviceHandler(DeviceWorkerHandler):
|
||||||
device_ids = [d for d in device_ids if d != except_device_id]
|
device_ids = [d for d in device_ids if d != except_device_id]
|
||||||
await self.delete_devices(user_id, device_ids)
|
await self.delete_devices(user_id, device_ids)
|
||||||
|
|
||||||
async def delete_devices(self, user_id: str, device_ids: StrCollection) -> None:
|
async def delete_devices(self, user_id: str, device_ids: List[str]) -> None:
|
||||||
"""Delete several devices
|
"""Delete several devices
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
|
|
|
@ -16,7 +16,7 @@
|
||||||
"""Contains functions for registering clients."""
|
"""Contains functions for registering clients."""
|
||||||
|
|
||||||
import logging
|
import logging
|
||||||
from typing import TYPE_CHECKING, Iterable, List, Optional, Set, Tuple
|
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple
|
||||||
|
|
||||||
from prometheus_client import Counter
|
from prometheus_client import Counter
|
||||||
from typing_extensions import TypedDict
|
from typing_extensions import TypedDict
|
||||||
|
@ -40,7 +40,6 @@ from synapse.appservice import ApplicationService
|
||||||
from synapse.config.server import is_threepid_reserved
|
from synapse.config.server import is_threepid_reserved
|
||||||
from synapse.handlers.device import DeviceHandler
|
from synapse.handlers.device import DeviceHandler
|
||||||
from synapse.http.servlet import assert_params_in_dict
|
from synapse.http.servlet import assert_params_in_dict
|
||||||
from synapse.metrics.background_process_metrics import run_as_background_process
|
|
||||||
from synapse.replication.http.login import RegisterDeviceReplicationServlet
|
from synapse.replication.http.login import RegisterDeviceReplicationServlet
|
||||||
from synapse.replication.http.register import (
|
from synapse.replication.http.register import (
|
||||||
ReplicationPostRegisterActionsServlet,
|
ReplicationPostRegisterActionsServlet,
|
||||||
|
@ -49,7 +48,6 @@ from synapse.replication.http.register import (
|
||||||
from synapse.spam_checker_api import RegistrationBehaviour
|
from synapse.spam_checker_api import RegistrationBehaviour
|
||||||
from synapse.types import RoomAlias, UserID, create_requester
|
from synapse.types import RoomAlias, UserID, create_requester
|
||||||
from synapse.types.state import StateFilter
|
from synapse.types.state import StateFilter
|
||||||
from synapse.util.iterutils import batch_iter
|
|
||||||
|
|
||||||
if TYPE_CHECKING:
|
if TYPE_CHECKING:
|
||||||
from synapse.server import HomeServer
|
from synapse.server import HomeServer
|
||||||
|
@ -112,10 +110,6 @@ class RegistrationHandler:
|
||||||
self._server_notices_mxid = hs.config.servernotices.server_notices_mxid
|
self._server_notices_mxid = hs.config.servernotices.server_notices_mxid
|
||||||
self._server_name = hs.hostname
|
self._server_name = hs.hostname
|
||||||
|
|
||||||
# The set of users that we're currently pruning devices for. Ensures
|
|
||||||
# that we don't have two such jobs for the same user running at once.
|
|
||||||
self._currently_pruning_devices_for_users: Set[str] = set()
|
|
||||||
|
|
||||||
self.spam_checker = hs.get_spam_checker()
|
self.spam_checker = hs.get_spam_checker()
|
||||||
|
|
||||||
if hs.config.worker.worker_app:
|
if hs.config.worker.worker_app:
|
||||||
|
@ -127,10 +121,7 @@ class RegistrationHandler:
|
||||||
ReplicationPostRegisterActionsServlet.make_client(hs)
|
ReplicationPostRegisterActionsServlet.make_client(hs)
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
device_handler = hs.get_device_handler()
|
self.device_handler = hs.get_device_handler()
|
||||||
assert isinstance(device_handler, DeviceHandler)
|
|
||||||
self.device_handler = device_handler
|
|
||||||
|
|
||||||
self._register_device_client = self.register_device_inner
|
self._register_device_client = self.register_device_inner
|
||||||
self.pusher_pool = hs.get_pusherpool()
|
self.pusher_pool = hs.get_pusherpool()
|
||||||
|
|
||||||
|
@ -860,9 +851,6 @@ class RegistrationHandler:
|
||||||
# This can only run on the main process.
|
# This can only run on the main process.
|
||||||
assert isinstance(self.device_handler, DeviceHandler)
|
assert isinstance(self.device_handler, DeviceHandler)
|
||||||
|
|
||||||
# Prune the user's device list if they already have a lot of devices.
|
|
||||||
await self._maybe_prune_too_many_devices(user_id)
|
|
||||||
|
|
||||||
registered_device_id = await self.device_handler.check_device_registered(
|
registered_device_id = await self.device_handler.check_device_registered(
|
||||||
user_id,
|
user_id,
|
||||||
device_id,
|
device_id,
|
||||||
|
@ -931,42 +919,6 @@ class RegistrationHandler:
|
||||||
"refresh_token": refresh_token,
|
"refresh_token": refresh_token,
|
||||||
}
|
}
|
||||||
|
|
||||||
async def _maybe_prune_too_many_devices(self, user_id: str) -> None:
|
|
||||||
"""Delete any excess old devices this user may have."""
|
|
||||||
|
|
||||||
if user_id in self._currently_pruning_devices_for_users:
|
|
||||||
return
|
|
||||||
|
|
||||||
# We also cap the number of users whose devices we prune at the same
|
|
||||||
# time, to avoid performance problems.
|
|
||||||
if len(self._currently_pruning_devices_for_users) > 5:
|
|
||||||
return
|
|
||||||
|
|
||||||
device_ids = await self.store.check_too_many_devices_for_user(user_id)
|
|
||||||
if not device_ids:
|
|
||||||
return
|
|
||||||
|
|
||||||
logger.info("Pruning %d stale devices for %s", len(device_ids), user_id)
|
|
||||||
|
|
||||||
# Now spawn a background loop that deletes said devices.
|
|
||||||
async def _prune_too_many_devices_loop() -> None:
|
|
||||||
if user_id in self._currently_pruning_devices_for_users:
|
|
||||||
return
|
|
||||||
|
|
||||||
self._currently_pruning_devices_for_users.add(user_id)
|
|
||||||
|
|
||||||
try:
|
|
||||||
for batch in batch_iter(device_ids, 10):
|
|
||||||
await self.device_handler.delete_devices(user_id, batch)
|
|
||||||
|
|
||||||
await self.clock.sleep(60)
|
|
||||||
finally:
|
|
||||||
self._currently_pruning_devices_for_users.discard(user_id)
|
|
||||||
|
|
||||||
run_as_background_process(
|
|
||||||
"_prune_too_many_devices_loop", _prune_too_many_devices_loop
|
|
||||||
)
|
|
||||||
|
|
||||||
async def post_registration_actions(
|
async def post_registration_actions(
|
||||||
self, user_id: str, auth_result: dict, access_token: Optional[str]
|
self, user_id: str, auth_result: dict, access_token: Optional[str]
|
||||||
) -> None:
|
) -> None:
|
||||||
|
|
|
@ -1599,76 +1599,6 @@ class DeviceBackgroundUpdateStore(SQLBaseStore):
|
||||||
|
|
||||||
return rows
|
return rows
|
||||||
|
|
||||||
async def check_too_many_devices_for_user(self, user_id: str) -> List[str]:
|
|
||||||
"""Check if the user has a lot of devices, and if so return the set of
|
|
||||||
devices we can prune.
|
|
||||||
|
|
||||||
This does *not* return hidden devices or devices with E2E keys.
|
|
||||||
"""
|
|
||||||
|
|
||||||
num_devices = await self.db_pool.simple_select_one_onecol(
|
|
||||||
table="devices",
|
|
||||||
keyvalues={"user_id": user_id, "hidden": False},
|
|
||||||
retcol="COALESCE(COUNT(*), 0)",
|
|
||||||
desc="count_devices",
|
|
||||||
)
|
|
||||||
|
|
||||||
# We let users have up to ten devices without pruning.
|
|
||||||
if num_devices <= 10:
|
|
||||||
return []
|
|
||||||
|
|
||||||
# We always prune devices not seen in the last 14 days...
|
|
||||||
max_last_seen = self._clock.time_msec() - 14 * 24 * 60 * 60 * 1000
|
|
||||||
|
|
||||||
# ... but we also cap the maximum number of devices the user can have to
|
|
||||||
# 50.
|
|
||||||
if num_devices > 50:
|
|
||||||
# Choose a last seen that ensures we keep at most 50 devices.
|
|
||||||
sql = """
|
|
||||||
SELECT last_seen FROM devices
|
|
||||||
LEFT JOIN e2e_device_keys_json USING (user_id, device_id)
|
|
||||||
WHERE
|
|
||||||
user_id = ?
|
|
||||||
AND NOT hidden
|
|
||||||
AND last_seen IS NOT NULL
|
|
||||||
AND key_json IS NULL
|
|
||||||
ORDER BY last_seen DESC
|
|
||||||
LIMIT 1
|
|
||||||
OFFSET 50
|
|
||||||
"""
|
|
||||||
|
|
||||||
rows = await self.db_pool.execute(
|
|
||||||
"check_too_many_devices_for_user_last_seen",
|
|
||||||
None,
|
|
||||||
sql,
|
|
||||||
user_id,
|
|
||||||
)
|
|
||||||
if rows:
|
|
||||||
max_last_seen = max(rows[0][0], max_last_seen)
|
|
||||||
|
|
||||||
# Fetch the devices to delete.
|
|
||||||
sql = """
|
|
||||||
SELECT device_id FROM devices
|
|
||||||
LEFT JOIN e2e_device_keys_json USING (user_id, device_id)
|
|
||||||
WHERE
|
|
||||||
user_id = ?
|
|
||||||
AND NOT hidden
|
|
||||||
AND last_seen <= ?
|
|
||||||
AND key_json IS NULL
|
|
||||||
ORDER BY last_seen
|
|
||||||
"""
|
|
||||||
|
|
||||||
def check_too_many_devices_for_user_txn(
|
|
||||||
txn: LoggingTransaction,
|
|
||||||
) -> List[str]:
|
|
||||||
txn.execute(sql, (user_id, max_last_seen))
|
|
||||||
return [device_id for device_id, in txn]
|
|
||||||
|
|
||||||
return await self.db_pool.runInteraction(
|
|
||||||
"check_too_many_devices_for_user",
|
|
||||||
check_too_many_devices_for_user_txn,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore):
|
class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore):
|
||||||
# Because we have write access, this will be a StreamIdGenerator
|
# Because we have write access, this will be a StreamIdGenerator
|
||||||
|
@ -1727,7 +1657,6 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore):
|
||||||
values={},
|
values={},
|
||||||
insertion_values={
|
insertion_values={
|
||||||
"display_name": initial_device_display_name,
|
"display_name": initial_device_display_name,
|
||||||
"last_seen": self._clock.time_msec(),
|
|
||||||
"hidden": False,
|
"hidden": False,
|
||||||
},
|
},
|
||||||
desc="store_device",
|
desc="store_device",
|
||||||
|
@ -1773,15 +1702,7 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore):
|
||||||
)
|
)
|
||||||
raise StoreError(500, "Problem storing device.")
|
raise StoreError(500, "Problem storing device.")
|
||||||
|
|
||||||
@cached(max_entries=0)
|
async def delete_devices(self, user_id: str, device_ids: List[str]) -> None:
|
||||||
async def delete_device(self, user_id: str, device_id: str) -> None:
|
|
||||||
raise NotImplementedError()
|
|
||||||
|
|
||||||
# Note: sometimes deleting rows out of `device_inbox` can take a long time,
|
|
||||||
# so we use a cache so that we deduplicate in flight requests to delete
|
|
||||||
# devices.
|
|
||||||
@cachedList(cached_method_name="delete_device", list_name="device_ids")
|
|
||||||
async def delete_devices(self, user_id: str, device_ids: Collection[str]) -> dict:
|
|
||||||
"""Deletes several devices.
|
"""Deletes several devices.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
|
@ -1818,8 +1739,6 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore):
|
||||||
for device_id in device_ids:
|
for device_id in device_ids:
|
||||||
self.device_id_exists_cache.invalidate((user_id, device_id))
|
self.device_id_exists_cache.invalidate((user_id, device_id))
|
||||||
|
|
||||||
return {}
|
|
||||||
|
|
||||||
async def update_device(
|
async def update_device(
|
||||||
self, user_id: str, device_id: str, new_display_name: Optional[str] = None
|
self, user_id: str, device_id: str, new_display_name: Optional[str] = None
|
||||||
) -> None:
|
) -> None:
|
||||||
|
|
|
@ -272,7 +272,7 @@ class ExfiltrateData(unittest.HomeserverTestCase):
|
||||||
self.assertIn("device_id", args[0][0])
|
self.assertIn("device_id", args[0][0])
|
||||||
self.assertIsNone(args[0][0]["display_name"])
|
self.assertIsNone(args[0][0]["display_name"])
|
||||||
self.assertIsNone(args[0][0]["last_seen_user_agent"])
|
self.assertIsNone(args[0][0]["last_seen_user_agent"])
|
||||||
self.assertEqual(args[0][0]["last_seen_ts"], 600)
|
self.assertIsNone(args[0][0]["last_seen_ts"])
|
||||||
self.assertIsNone(args[0][0]["last_seen_ip"])
|
self.assertIsNone(args[0][0]["last_seen_ip"])
|
||||||
|
|
||||||
def test_connections(self) -> None:
|
def test_connections(self) -> None:
|
||||||
|
|
|
@ -115,7 +115,7 @@ class DeviceTestCase(unittest.HomeserverTestCase):
|
||||||
"device_id": "xyz",
|
"device_id": "xyz",
|
||||||
"display_name": "display 0",
|
"display_name": "display 0",
|
||||||
"last_seen_ip": None,
|
"last_seen_ip": None,
|
||||||
"last_seen_ts": 1000000,
|
"last_seen_ts": None,
|
||||||
},
|
},
|
||||||
device_map["xyz"],
|
device_map["xyz"],
|
||||||
)
|
)
|
||||||
|
|
|
@ -794,53 +794,6 @@ class RegisterRestServletTestCase(unittest.HomeserverTestCase):
|
||||||
ApprovalNoticeMedium.NONE, channel.json_body["approval_notice_medium"]
|
ApprovalNoticeMedium.NONE, channel.json_body["approval_notice_medium"]
|
||||||
)
|
)
|
||||||
|
|
||||||
def test_check_stale_devices_get_pruned(self) -> None:
|
|
||||||
"""Check that if a user has some stale devices we log them out when they
|
|
||||||
log in a new device."""
|
|
||||||
|
|
||||||
# Register some devices, but not too many that we go over the threshold
|
|
||||||
# where we prune more aggressively.
|
|
||||||
user_id = self.register_user("user", "pass")
|
|
||||||
for _ in range(0, 50):
|
|
||||||
self.login(user_id, "pass")
|
|
||||||
|
|
||||||
store = self.hs.get_datastores().main
|
|
||||||
|
|
||||||
res = self.get_success(store.get_devices_by_user(user_id))
|
|
||||||
self.assertEqual(len(res), 50)
|
|
||||||
|
|
||||||
# Advance time so that the above devices are considered "old".
|
|
||||||
self.reactor.advance(30 * 24 * 60 * 60 * 1000)
|
|
||||||
|
|
||||||
self.login(user_id, "pass")
|
|
||||||
|
|
||||||
self.reactor.pump([60] * 10) # Ensure background job runs
|
|
||||||
|
|
||||||
# We expect all old devices to have been logged out
|
|
||||||
res = self.get_success(store.get_devices_by_user(user_id))
|
|
||||||
self.assertEqual(len(res), 1)
|
|
||||||
|
|
||||||
def test_check_recent_devices_get_pruned(self) -> None:
|
|
||||||
"""Check that if a user has many devices we log out the last oldest
|
|
||||||
ones.
|
|
||||||
|
|
||||||
Note: this is similar to above, except if we lots of devices we prune
|
|
||||||
devices even if they're not old.
|
|
||||||
"""
|
|
||||||
|
|
||||||
# Register a lot of devices in a short amount of time
|
|
||||||
user_id = self.register_user("user", "pass")
|
|
||||||
for _ in range(0, 100):
|
|
||||||
self.login(user_id, "pass")
|
|
||||||
self.reactor.advance(100)
|
|
||||||
|
|
||||||
store = self.hs.get_datastores().main
|
|
||||||
|
|
||||||
# We keep up to 50 devices that have been used in the last week, plus
|
|
||||||
# the device that was last logged in.
|
|
||||||
res = self.get_success(store.get_devices_by_user(user_id))
|
|
||||||
self.assertEqual(len(res), 51)
|
|
||||||
|
|
||||||
|
|
||||||
class AccountValidityTestCase(unittest.HomeserverTestCase):
|
class AccountValidityTestCase(unittest.HomeserverTestCase):
|
||||||
servlets = [
|
servlets = [
|
||||||
|
|
|
@ -170,8 +170,6 @@ class ClientIpStoreTestCase(unittest.HomeserverTestCase):
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
last_seen = self.clock.time_msec()
|
|
||||||
|
|
||||||
if after_persisting:
|
if after_persisting:
|
||||||
# Trigger the storage loop
|
# Trigger the storage loop
|
||||||
self.reactor.advance(10)
|
self.reactor.advance(10)
|
||||||
|
@ -192,7 +190,7 @@ class ClientIpStoreTestCase(unittest.HomeserverTestCase):
|
||||||
"device_id": device_id,
|
"device_id": device_id,
|
||||||
"ip": None,
|
"ip": None,
|
||||||
"user_agent": None,
|
"user_agent": None,
|
||||||
"last_seen": last_seen,
|
"last_seen": None,
|
||||||
},
|
},
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
|
|
Loading…
Reference in New Issue