Some house keeping on `maybe_backfill()` functions (#15709)

This commit is contained in:
Eric Eastwood 2023-06-05 23:38:52 -05:00 committed by GitHub
parent ca8906be2c
commit f9561b9e37
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 18 additions and 0 deletions

1
changelog.d/15709.misc Normal file
View File

@ -0,0 +1 @@
Update docstring and traces on `maybe_backfill()` functions.

View File

@ -200,6 +200,7 @@ class FederationHandler:
)
@trace
@tag_args
async def maybe_backfill(
self, room_id: str, current_depth: int, limit: int
) -> bool:
@ -214,6 +215,9 @@ class FederationHandler:
limit: The number of events that the pagination request will
return. This is used as part of the heuristic to decide if we
should back paginate.
Returns:
True if we actually tried to backfill something, otherwise False.
"""
# Starting the processing time here so we can include the room backfill
# linearizer lock queue in the timing
@ -227,6 +231,8 @@ class FederationHandler:
processing_start_time=processing_start_time,
)
@trace
@tag_args
async def _maybe_backfill_inner(
self,
room_id: str,
@ -247,6 +253,9 @@ class FederationHandler:
limit: The max number of events to request from the remote federated server.
processing_start_time: The time when `maybe_backfill` started processing.
Only used for timing. If `None`, no timing observation will be made.
Returns:
True if we actually tried to backfill something, otherwise False.
"""
backwards_extremities = [
_BackfillPoint(event_id, depth, _BackfillPointType.BACKWARDS_EXTREMITY)
@ -302,6 +311,14 @@ class FederationHandler:
len(sorted_backfill_points),
sorted_backfill_points,
)
set_tag(
SynapseTags.RESULT_PREFIX + "sorted_backfill_points",
str(sorted_backfill_points),
)
set_tag(
SynapseTags.RESULT_PREFIX + "sorted_backfill_points.length",
str(len(sorted_backfill_points)),
)
# If we have no backfill points lower than the `current_depth` then
# either we can a) bail or b) still attempt to backfill. We opt to try