Merge remote-tracking branch 'origin/develop' into clokep/psycopg3
This commit is contained in:
commit
396fa974a1
|
@ -47,7 +47,7 @@ if not IS_PR:
|
|||
"database": "sqlite",
|
||||
"extras": "all",
|
||||
}
|
||||
for version in ("3.9", "3.10", "3.11", "3.12.0-rc.2")
|
||||
for version in ("3.9", "3.10", "3.11", "3.12")
|
||||
)
|
||||
|
||||
trial_postgres_tests = [
|
||||
|
@ -62,7 +62,7 @@ trial_postgres_tests = [
|
|||
if not IS_PR:
|
||||
trial_postgres_tests.append(
|
||||
{
|
||||
"python-version": "3.11",
|
||||
"python-version": "3.12",
|
||||
"database": "postgres",
|
||||
"postgres-version": "16",
|
||||
"extras": "all",
|
||||
|
|
|
@ -197,11 +197,14 @@ jobs:
|
|||
with:
|
||||
path: synapse
|
||||
|
||||
- uses: actions/setup-go@v4
|
||||
|
||||
- name: Prepare Complement's Prerequisites
|
||||
run: synapse/.ci/scripts/setup_complement_prerequisites.sh
|
||||
|
||||
- uses: actions/setup-go@v4
|
||||
with:
|
||||
cache-dependency-path: complement/go.sum
|
||||
go-version-file: complement/go.mod
|
||||
|
||||
- run: |
|
||||
set -o pipefail
|
||||
TEST_ONLY_IGNORE_POETRY_LOCKFILE=1 POSTGRES=${{ (matrix.database == 'Postgres') && 1 || '' }} WORKERS=${{ (matrix.arrangement == 'workers') && 1 || '' }} COMPLEMENT_DIR=`pwd`/complement synapse/scripts-dev/complement.sh -json 2>&1 | synapse/.ci/scripts/gotestfmt
|
||||
|
|
|
@ -37,15 +37,18 @@ jobs:
|
|||
- 'Cargo.toml'
|
||||
- 'Cargo.lock'
|
||||
- '.rustfmt.toml'
|
||||
- '.github/workflows/tests.yml'
|
||||
|
||||
trial:
|
||||
- 'synapse/**'
|
||||
- 'tests/**'
|
||||
- 'rust/**'
|
||||
- '.ci/scripts/calculate_jobs.py'
|
||||
- 'Cargo.toml'
|
||||
- 'Cargo.lock'
|
||||
- 'pyproject.toml'
|
||||
- 'poetry.lock'
|
||||
- '.github/workflows/tests.yml'
|
||||
|
||||
integration:
|
||||
- 'synapse/**'
|
||||
|
@ -56,6 +59,9 @@ jobs:
|
|||
- 'pyproject.toml'
|
||||
- 'poetry.lock'
|
||||
- 'docker/**'
|
||||
- '.ci/**'
|
||||
- 'scripts-dev/complement.sh'
|
||||
- '.github/workflows/tests.yml'
|
||||
|
||||
linting:
|
||||
- 'synapse/**'
|
||||
|
@ -69,6 +75,7 @@ jobs:
|
|||
- 'mypy.ini'
|
||||
- 'pyproject.toml'
|
||||
- 'poetry.lock'
|
||||
- '.github/workflows/tests.yml'
|
||||
|
||||
check-sampleconfig:
|
||||
runs-on: ubuntu-latest
|
||||
|
@ -280,7 +287,6 @@ jobs:
|
|||
- check-lockfile
|
||||
- lint-clippy
|
||||
- lint-rustfmt
|
||||
- check-signoff
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- run: "true"
|
||||
|
@ -627,14 +633,18 @@ jobs:
|
|||
uses: dtolnay/rust-toolchain@1.61.0
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
|
||||
- uses: actions/setup-go@v4
|
||||
|
||||
- name: Prepare Complement's Prerequisites
|
||||
run: synapse/.ci/scripts/setup_complement_prerequisites.sh
|
||||
|
||||
- uses: actions/setup-go@v4
|
||||
with:
|
||||
cache-dependency-path: complement/go.sum
|
||||
go-version-file: complement/go.mod
|
||||
|
||||
# use p=1 concurrency as GHA boxes are underpowered and don't like running tons of synapses at once.
|
||||
- run: |
|
||||
set -o pipefail
|
||||
COMPLEMENT_DIR=`pwd`/complement synapse/scripts-dev/complement.sh -json 2>&1 | synapse/.ci/scripts/gotestfmt
|
||||
COMPLEMENT_DIR=`pwd`/complement synapse/scripts-dev/complement.sh -p 1 -json 2>&1 | synapse/.ci/scripts/gotestfmt
|
||||
shell: bash
|
||||
env:
|
||||
POSTGRES: ${{ (matrix.database == 'Postgres') && 1 || '' }}
|
||||
|
|
|
@ -168,11 +168,14 @@ jobs:
|
|||
with:
|
||||
path: synapse
|
||||
|
||||
- uses: actions/setup-go@v4
|
||||
|
||||
- name: Prepare Complement's Prerequisites
|
||||
run: synapse/.ci/scripts/setup_complement_prerequisites.sh
|
||||
|
||||
- uses: actions/setup-go@v4
|
||||
with:
|
||||
cache-dependency-path: complement/go.sum
|
||||
go-version-file: complement/go.mod
|
||||
|
||||
# This step is specific to the 'Twisted trunk' test run:
|
||||
- name: Patch dependencies
|
||||
run: |
|
||||
|
|
200
CHANGES.md
200
CHANGES.md
|
@ -1,3 +1,203 @@
|
|||
# Synapse 1.96.0rc1 (2023-10-31)
|
||||
|
||||
### Features
|
||||
|
||||
- Add experimental support to allow multiple workers to write to receipts stream. ([\#16432](https://github.com/matrix-org/synapse/issues/16432))
|
||||
- Add a new module API for controller presence. ([\#16544](https://github.com/matrix-org/synapse/issues/16544))
|
||||
- Add a new module API callback that allows adding extra fields to events' unsigned section when sent down to clients. ([\#16549](https://github.com/matrix-org/synapse/issues/16549))
|
||||
- Improve the performance of claiming encryption keys. ([\#16565](https://github.com/matrix-org/synapse/issues/16565), [\#16570](https://github.com/matrix-org/synapse/issues/16570))
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- Fixed a bug in the example Grafana dashboard that prevents it from finding the correct datasource. Contributed by @MichaelSasser. ([\#16471](https://github.com/matrix-org/synapse/issues/16471))
|
||||
- Fix a long-standing, exceedingly rare edge case where the first event persisted by a new event persister worker might not be sent down `/sync`. ([\#16473](https://github.com/matrix-org/synapse/issues/16473), [\#16557](https://github.com/matrix-org/synapse/issues/16557), [\#16561](https://github.com/matrix-org/synapse/issues/16561), [\#16578](https://github.com/matrix-org/synapse/issues/16578), [\#16580](https://github.com/matrix-org/synapse/issues/16580))
|
||||
- Fix long-standing bug where `/sync` incorrectly did not mark a room as `limited` in a sync requests when there were missing remote events. ([\#16485](https://github.com/matrix-org/synapse/issues/16485))
|
||||
- Fix a bug introduced in Synapse 1.41 where HTTP(S) forward proxy authorization would fail when using basic HTTP authentication with a long `username:password` string. ([\#16504](https://github.com/matrix-org/synapse/issues/16504))
|
||||
- Force TLS certificate verification in user registration script. ([\#16530](https://github.com/matrix-org/synapse/issues/16530))
|
||||
- Fix long-standing bug where `/sync` could tightloop after restart when using SQLite. ([\#16540](https://github.com/matrix-org/synapse/issues/16540))
|
||||
- Fix ratelimiting of message sending when using workers, where the ratelimit would only be applied after most of the work has been done. ([\#16558](https://github.com/matrix-org/synapse/issues/16558))
|
||||
- Fix a long-standing bug where invited/knocking users would not leave during a room purge. ([\#16559](https://github.com/matrix-org/synapse/issues/16559))
|
||||
|
||||
### Improved Documentation
|
||||
|
||||
- Improve documentation of presence router. ([\#16529](https://github.com/matrix-org/synapse/issues/16529))
|
||||
- Add a sentence to the [opentracing docs](https://matrix-org.github.io/synapse/latest/opentracing.html) on how you can have jaeger in a different place than synapse. ([\#16531](https://github.com/matrix-org/synapse/issues/16531))
|
||||
- Correctly describe the meaning of unspecified rule lists in the [`alias_creation_rules`](https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html#alias_creation_rules) and [`room_list_publication_rules`](https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html#room_list_publication_rules) config options and improve their descriptions more generally. ([\#16541](https://github.com/matrix-org/synapse/issues/16541))
|
||||
- Pin the recommended poetry version in [contributors' guide](https://matrix-org.github.io/synapse/latest/development/contributing_guide.html). ([\#16550](https://github.com/matrix-org/synapse/issues/16550))
|
||||
- Fix a broken link to the [client breakdown](https://matrix.org/ecosystem/clients/) in the README. ([\#16569](https://github.com/matrix-org/synapse/issues/16569))
|
||||
|
||||
### Internal Changes
|
||||
|
||||
- Improve performance of delete device messages query, cf issue [16479](https://github.com/matrix-org/synapse/issues/16479). ([\#16492](https://github.com/matrix-org/synapse/issues/16492))
|
||||
- Reduce memory allocations. ([\#16505](https://github.com/matrix-org/synapse/issues/16505))
|
||||
- Improve replication performance when purging rooms. ([\#16510](https://github.com/matrix-org/synapse/issues/16510))
|
||||
- Run tests against Python 3.12. ([\#16511](https://github.com/matrix-org/synapse/issues/16511))
|
||||
- Run trial & integration tests in continuous integration when `.ci` directory is modified. ([\#16512](https://github.com/matrix-org/synapse/issues/16512))
|
||||
- Remove duplicate call to mark remote server 'awake' when using a federation sending worker. ([\#16515](https://github.com/matrix-org/synapse/issues/16515))
|
||||
- Enable dirty runs on Complement CI, which is significantly faster. ([\#16520](https://github.com/matrix-org/synapse/issues/16520))
|
||||
- Stop deleting from an unused table. ([\#16521](https://github.com/matrix-org/synapse/issues/16521))
|
||||
- Improve type hints. ([\#16526](https://github.com/matrix-org/synapse/issues/16526), [\#16551](https://github.com/matrix-org/synapse/issues/16551))
|
||||
- Fix running unit tests on Twisted trunk. ([\#16528](https://github.com/matrix-org/synapse/issues/16528))
|
||||
- Reduce some spurious logging in worker mode. ([\#16555](https://github.com/matrix-org/synapse/issues/16555))
|
||||
- Stop porting a table in port db that we're going to nuke and rebuild anyway. ([\#16563](https://github.com/matrix-org/synapse/issues/16563))
|
||||
- Deal with warnings from running complement in CI. ([\#16567](https://github.com/matrix-org/synapse/issues/16567))
|
||||
- Allow building with `setuptools_rust` 1.8.0. ([\#16574](https://github.com/matrix-org/synapse/issues/16574))
|
||||
|
||||
### Updates to locked dependencies
|
||||
|
||||
* Bump black from 23.10.0 to 23.10.1. ([\#16575](https://github.com/matrix-org/synapse/issues/16575))
|
||||
* Bump black from 23.9.1 to 23.10.0. ([\#16538](https://github.com/matrix-org/synapse/issues/16538))
|
||||
* Bump cryptography from 41.0.4 to 41.0.5. ([\#16572](https://github.com/matrix-org/synapse/issues/16572))
|
||||
* Bump gitpython from 3.1.37 to 3.1.40. ([\#16534](https://github.com/matrix-org/synapse/issues/16534))
|
||||
* Bump phonenumbers from 8.13.22 to 8.13.23. ([\#16576](https://github.com/matrix-org/synapse/issues/16576))
|
||||
* Bump pygithub from 1.59.1 to 2.1.1. ([\#16535](https://github.com/matrix-org/synapse/issues/16535))
|
||||
- Bump matrix-synapse-ldap3 from 0.2.2 to 0.3.0. ([\#16539](https://github.com/matrix-org/synapse/issues/16539))
|
||||
* Bump serde from 1.0.189 to 1.0.190. ([\#16577](https://github.com/matrix-org/synapse/issues/16577))
|
||||
* Bump setuptools-rust from 1.7.0 to 1.8.0. ([\#16574](https://github.com/matrix-org/synapse/issues/16574))
|
||||
* Bump types-pillow from 10.0.0.3 to 10.1.0.0. ([\#16536](https://github.com/matrix-org/synapse/issues/16536))
|
||||
* Bump types-psycopg2 from 2.9.21.14 to 2.9.21.15. ([\#16573](https://github.com/matrix-org/synapse/issues/16573))
|
||||
* Bump types-requests from 2.31.0.2 to 2.31.0.10. ([\#16537](https://github.com/matrix-org/synapse/issues/16537))
|
||||
* Bump urllib3 from 1.26.17 to 1.26.18. ([\#16516](https://github.com/matrix-org/synapse/issues/16516))
|
||||
|
||||
# Synapse 1.95.1 (2023-10-31)
|
||||
|
||||
## Security advisory
|
||||
|
||||
The following issue is fixed in 1.95.1.
|
||||
|
||||
- [GHSA-mp92-3jfm-3575](https://github.com/matrix-org/synapse/security/advisories/GHSA-mp92-3jfm-3575) / [CVE-2023-43796](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2023-43796) — Moderate Severity
|
||||
|
||||
Cached device information of remote users can be queried from Synapse. This can be used to enumerate the remote users known to a homeserver.
|
||||
|
||||
See the advisory for more details. If you have any questions, email security@matrix.org.
|
||||
|
||||
|
||||
|
||||
# Synapse 1.95.0 (2023-10-24)
|
||||
|
||||
### Internal Changes
|
||||
|
||||
- Build Debian packages for [Ubuntu 23.10 Mantic Minotaur](https://canonical.com/blog/canonical-releases-ubuntu-23-10-mantic-minotaur). ([\#16524](https://github.com/matrix-org/synapse/issues/16524))
|
||||
|
||||
|
||||
# Synapse 1.95.0rc1 (2023-10-17)
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- Remove legacy unspecced `knock_state_events` field returned in some responses. ([\#16403](https://github.com/matrix-org/synapse/issues/16403))
|
||||
- Fix a bug introduced in Synapse 1.81.0 where an `AttributeError` would be raised when `_matrix/client/v3/account/whoami` is called over a unix socket. Contributed by @Sir-Photch. ([\#16404](https://github.com/matrix-org/synapse/issues/16404))
|
||||
- Properly return inline media when content types have parameters. ([\#16440](https://github.com/matrix-org/synapse/issues/16440))
|
||||
- Prevent the purging of large rooms from timing out when Postgres is in use. The timeout which causes this issue was introduced in Synapse 1.88.0. ([\#16455](https://github.com/matrix-org/synapse/issues/16455))
|
||||
- Improve the performance of purging rooms, particularly encrypted rooms. ([\#16457](https://github.com/matrix-org/synapse/issues/16457))
|
||||
- Fix a bug introduced in Synapse 1.59.0 where servers could be incorrectly marked as available after an error response was received. ([\#16506](https://github.com/matrix-org/synapse/issues/16506))
|
||||
|
||||
### Improved Documentation
|
||||
|
||||
- Document internal background update mechanism. ([\#16420](https://github.com/matrix-org/synapse/issues/16420))
|
||||
- Fix a typo in the sql for [useful SQL for admins document](https://matrix-org.github.io/synapse/latest/usage/administration/useful_sql_for_admins.html). ([\#16477](https://github.com/matrix-org/synapse/issues/16477))
|
||||
|
||||
### Internal Changes
|
||||
|
||||
- Bump pyo3 from 0.17.1 to 0.19.2. ([\#16162](https://github.com/matrix-org/synapse/issues/16162))
|
||||
- Update registration of media repository URLs. ([\#16419](https://github.com/matrix-org/synapse/issues/16419))
|
||||
- Improve type hints. ([\#16421](https://github.com/matrix-org/synapse/issues/16421), [\#16468](https://github.com/matrix-org/synapse/issues/16468), [\#16469](https://github.com/matrix-org/synapse/issues/16469), [\#16507](https://github.com/matrix-org/synapse/issues/16507))
|
||||
- Refactor some code to simplify and better type receipts stream adjacent code. ([\#16426](https://github.com/matrix-org/synapse/issues/16426))
|
||||
- Factor out `MultiWriter` token from `RoomStreamToken`. ([\#16427](https://github.com/matrix-org/synapse/issues/16427))
|
||||
- Improve code comments. ([\#16428](https://github.com/matrix-org/synapse/issues/16428))
|
||||
- Reduce memory allocations. ([\#16429](https://github.com/matrix-org/synapse/issues/16429), [\#16431](https://github.com/matrix-org/synapse/issues/16431), [\#16433](https://github.com/matrix-org/synapse/issues/16433), [\#16434](https://github.com/matrix-org/synapse/issues/16434), [\#16438](https://github.com/matrix-org/synapse/issues/16438), [\#16444](https://github.com/matrix-org/synapse/issues/16444))
|
||||
- Remove unused method. ([\#16435](https://github.com/matrix-org/synapse/issues/16435))
|
||||
- Improve rate limiting logic. ([\#16441](https://github.com/matrix-org/synapse/issues/16441))
|
||||
- Do not block running of CI behind the check for sign-off on PRs. ([\#16454](https://github.com/matrix-org/synapse/issues/16454))
|
||||
- Update the release script to remind releaser to check for special release notes. ([\#16461](https://github.com/matrix-org/synapse/issues/16461))
|
||||
- Update complement.sh to match new public API shape. ([\#16466](https://github.com/matrix-org/synapse/issues/16466))
|
||||
- Clean up logging on event persister endpoints. ([\#16488](https://github.com/matrix-org/synapse/issues/16488))
|
||||
- Remove useless async job to delete device messages on sync, since we only deliver (and hence delete) up to 100 device messages at a time. ([\#16491](https://github.com/matrix-org/synapse/issues/16491))
|
||||
|
||||
### Updates to locked dependencies
|
||||
|
||||
* Bump bleach from 6.0.0 to 6.1.0. ([\#16451](https://github.com/matrix-org/synapse/issues/16451))
|
||||
* Bump jsonschema from 4.19.0 to 4.19.1. ([\#16500](https://github.com/matrix-org/synapse/issues/16500))
|
||||
* Bump netaddr from 0.8.0 to 0.9.0. ([\#16453](https://github.com/matrix-org/synapse/issues/16453))
|
||||
* Bump packaging from 23.1 to 23.2. ([\#16497](https://github.com/matrix-org/synapse/issues/16497))
|
||||
* Bump pillow from 10.0.1 to 10.1.0. ([\#16498](https://github.com/matrix-org/synapse/issues/16498))
|
||||
* Bump psycopg2 from 2.9.8 to 2.9.9. ([\#16452](https://github.com/matrix-org/synapse/issues/16452))
|
||||
* Bump pyo3-log from 0.8.3 to 0.8.4. ([\#16495](https://github.com/matrix-org/synapse/issues/16495))
|
||||
* Bump ruff from 0.0.290 to 0.0.292. ([\#16449](https://github.com/matrix-org/synapse/issues/16449))
|
||||
* Bump sentry-sdk from 1.31.0 to 1.32.0. ([\#16496](https://github.com/matrix-org/synapse/issues/16496))
|
||||
* Bump serde from 1.0.188 to 1.0.189. ([\#16494](https://github.com/matrix-org/synapse/issues/16494))
|
||||
* Bump types-bleach from 6.0.0.4 to 6.1.0.0. ([\#16450](https://github.com/matrix-org/synapse/issues/16450))
|
||||
* Bump types-jsonschema from 4.17.0.10 to 4.19.0.3. ([\#16499](https://github.com/matrix-org/synapse/issues/16499))
|
||||
|
||||
# Synapse 1.94.0 (2023-10-10)
|
||||
|
||||
No significant changes since 1.94.0rc1.
|
||||
However, please take note of the security advisory that follows.
|
||||
|
||||
## Security advisory
|
||||
|
||||
The following issue is fixed in 1.94.0 (and RC).
|
||||
|
||||
- [GHSA-5chr-wjw5-3gq4](https://github.com/matrix-org/synapse/security/advisories/GHSA-5chr-wjw5-3gq4) / [CVE-2023-45129](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2023-45129) — Moderate Severity
|
||||
|
||||
A malicious server ACL event can impact performance temporarily or permanently leading to a persistent denial of service.
|
||||
|
||||
Homeservers running on a closed federation (which presumably do not need to use server ACLs) are not affected.
|
||||
|
||||
See the advisory for more details. If you have any questions, email security@matrix.org.
|
||||
|
||||
|
||||
# Synapse 1.94.0rc1 (2023-10-03)
|
||||
|
||||
### Features
|
||||
|
||||
- Render plain, CSS, CSV, JSON and common image formats in the browser (inline) when requested through the /download endpoint. ([\#15988](https://github.com/matrix-org/synapse/issues/15988))
|
||||
- Add experimental support for [MSC4028](https://github.com/matrix-org/matrix-spec-proposals/pull/4028) to push all encrypted events to clients. ([\#16361](https://github.com/matrix-org/synapse/issues/16361))
|
||||
- Minor performance improvement when sending presence to federated servers. ([\#16385](https://github.com/matrix-org/synapse/issues/16385))
|
||||
- Minor performance improvement by caching server ACL checking. ([\#16360](https://github.com/matrix-org/synapse/issues/16360))
|
||||
|
||||
### Improved Documentation
|
||||
|
||||
- Add developer documentation concerning gradual schema migrations with column alterations. ([\#15691](https://github.com/matrix-org/synapse/issues/15691))
|
||||
- Improve documentation of the user directory search algorithm. ([\#16320](https://github.com/matrix-org/synapse/issues/16320))
|
||||
- Fix rendering of user admin API documentation around deactivation. This was broken in Synapse 1.91.0. ([\#16355](https://github.com/matrix-org/synapse/issues/16355))
|
||||
- Update documentation around message retention policies. ([\#16382](https://github.com/matrix-org/synapse/issues/16382))
|
||||
- Add note to `federation_domain_whitelist` config option to clarify its usage. ([\#16416](https://github.com/matrix-org/synapse/issues/16416))
|
||||
- Improve legacy release notes. ([\#16418](https://github.com/matrix-org/synapse/issues/16418))
|
||||
|
||||
### Deprecations and Removals
|
||||
|
||||
- Remove Python version from `/_synapse/admin/v1/server_version`. ([\#16380](https://github.com/matrix-org/synapse/issues/16380))
|
||||
|
||||
### Internal Changes
|
||||
|
||||
- Avoid running CI steps when the files they check have not been changed. ([\#14745](https://github.com/matrix-org/synapse/issues/14745), [\#16387](https://github.com/matrix-org/synapse/issues/16387))
|
||||
- Improve type hints. ([\#14911](https://github.com/matrix-org/synapse/issues/14911), [\#16350](https://github.com/matrix-org/synapse/issues/16350), [\#16356](https://github.com/matrix-org/synapse/issues/16356), [\#16395](https://github.com/matrix-org/synapse/issues/16395))
|
||||
- Added support for pydantic v2 in addition to pydantic v1. Contributed by Maxwell G (@gotmax23). ([\#16332](https://github.com/matrix-org/synapse/issues/16332))
|
||||
- Get CI to check PRs have been signed-off. ([\#16348](https://github.com/matrix-org/synapse/issues/16348))
|
||||
- Add missing licence header. ([\#16359](https://github.com/matrix-org/synapse/issues/16359))
|
||||
- Improve type hints, and bump types-psycopg2 from 2.9.21.11 to 2.9.21.14. ([\#16381](https://github.com/matrix-org/synapse/issues/16381))
|
||||
- Improve comments in `StateGroupBackgroundUpdateStore`. ([\#16383](https://github.com/matrix-org/synapse/issues/16383))
|
||||
- Update maturin configuration. ([\#16394](https://github.com/matrix-org/synapse/issues/16394))
|
||||
- Downgrade replication stream time out error log lines to warning. ([\#16401](https://github.com/matrix-org/synapse/issues/16401))
|
||||
|
||||
### Updates to locked dependencies
|
||||
|
||||
* Bump actions/checkout from 3 to 4. ([\#16250](https://github.com/matrix-org/synapse/issues/16250))
|
||||
* Bump cryptography from 41.0.3 to 41.0.4. ([\#16362](https://github.com/matrix-org/synapse/issues/16362))
|
||||
* Bump dawidd6/action-download-artifact from 2.27.0 to 2.28.0. ([\#16374](https://github.com/matrix-org/synapse/issues/16374))
|
||||
* Bump docker/setup-buildx-action from 2 to 3. ([\#16375](https://github.com/matrix-org/synapse/issues/16375))
|
||||
* Bump gitpython from 3.1.35 to 3.1.37. ([\#16376](https://github.com/matrix-org/synapse/issues/16376))
|
||||
* Bump msgpack from 1.0.5 to 1.0.6. ([\#16377](https://github.com/matrix-org/synapse/issues/16377))
|
||||
* Bump msgpack from 1.0.6 to 1.0.7. ([\#16412](https://github.com/matrix-org/synapse/issues/16412))
|
||||
* Bump phonenumbers from 8.13.19 to 8.13.22. ([\#16413](https://github.com/matrix-org/synapse/issues/16413))
|
||||
* Bump psycopg2 from 2.9.7 to 2.9.8. ([\#16409](https://github.com/matrix-org/synapse/issues/16409))
|
||||
* Bump pydantic from 2.3.0 to 2.4.2. ([\#16410](https://github.com/matrix-org/synapse/issues/16410))
|
||||
* Bump regex from 1.9.5 to 1.9.6. ([\#16408](https://github.com/matrix-org/synapse/issues/16408))
|
||||
* Bump sentry-sdk from 1.30.0 to 1.31.0. ([\#16378](https://github.com/matrix-org/synapse/issues/16378))
|
||||
* Bump types-netaddr from 0.8.0.9 to 0.9.0.1. ([\#16411](https://github.com/matrix-org/synapse/issues/16411))
|
||||
* Bump types-psycopg2 from 2.9.21.11 to 2.9.21.14. ([\#16381](https://github.com/matrix-org/synapse/issues/16381))
|
||||
* Bump urllib3 from 1.26.15 to 1.26.17. ([\#16422](https://github.com/matrix-org/synapse/issues/16422))
|
||||
|
||||
# Synapse 1.93.0 (2023-09-26)
|
||||
|
||||
No significant changes since 1.93.0rc1.
|
||||
|
|
|
@ -144,9 +144,9 @@ checksum = "8f232d6ef707e1956a43342693d2a31e72989554d58299d7a88738cc95b0d35c"
|
|||
|
||||
[[package]]
|
||||
name = "memoffset"
|
||||
version = "0.6.5"
|
||||
version = "0.9.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "5aa361d4faea93603064a027415f07bd8e1d5c88c9fbf68bf56a285428fd79ce"
|
||||
checksum = "5a634b1c61a95585bd15607c6ab0c4e5b226e695ff2800ba0cdccddf208c406c"
|
||||
dependencies = [
|
||||
"autocfg",
|
||||
]
|
||||
|
@ -191,9 +191,9 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "pyo3"
|
||||
version = "0.17.3"
|
||||
version = "0.19.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "268be0c73583c183f2b14052337465768c07726936a260f480f0857cb95ba543"
|
||||
checksum = "e681a6cfdc4adcc93b4d3cf993749a4552018ee0a9b65fc0ccfad74352c72a38"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"cfg-if",
|
||||
|
@ -209,9 +209,9 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "pyo3-build-config"
|
||||
version = "0.17.3"
|
||||
version = "0.19.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "28fcd1e73f06ec85bf3280c48c67e731d8290ad3d730f8be9dc07946923005c8"
|
||||
checksum = "076c73d0bc438f7a4ef6fdd0c3bb4732149136abd952b110ac93e4edb13a6ba5"
|
||||
dependencies = [
|
||||
"once_cell",
|
||||
"target-lexicon",
|
||||
|
@ -219,9 +219,9 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "pyo3-ffi"
|
||||
version = "0.17.3"
|
||||
version = "0.19.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0f6cb136e222e49115b3c51c32792886defbfb0adead26a688142b346a0b9ffc"
|
||||
checksum = "e53cee42e77ebe256066ba8aa77eff722b3bb91f3419177cf4cd0f304d3284d9"
|
||||
dependencies = [
|
||||
"libc",
|
||||
"pyo3-build-config",
|
||||
|
@ -229,9 +229,9 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "pyo3-log"
|
||||
version = "0.8.3"
|
||||
version = "0.8.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f47b0777feb17f61eea78667d61103758b243a871edc09a7786500a50467b605"
|
||||
checksum = "c09c2b349b6538d8a73d436ca606dab6ce0aaab4dad9e6b7bdd57a4f556c3bc3"
|
||||
dependencies = [
|
||||
"arc-swap",
|
||||
"log",
|
||||
|
@ -240,9 +240,9 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "pyo3-macros"
|
||||
version = "0.17.3"
|
||||
version = "0.19.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "94144a1266e236b1c932682136dc35a9dee8d3589728f68130c7c3861ef96b28"
|
||||
checksum = "dfeb4c99597e136528c6dd7d5e3de5434d1ceaf487436a3f03b2d56b6fc9efd1"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"pyo3-macros-backend",
|
||||
|
@ -252,9 +252,9 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "pyo3-macros-backend"
|
||||
version = "0.17.3"
|
||||
version = "0.19.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c8df9be978a2d2f0cdebabb03206ed73b11314701a5bfe71b0d753b81997777f"
|
||||
checksum = "947dc12175c254889edc0c02e399476c2f652b4b9ebd123aa655c224de259536"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
|
@ -263,9 +263,9 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "pythonize"
|
||||
version = "0.17.0"
|
||||
version = "0.19.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0f7f0c136f5fbc01868185eef462800e49659eb23acca83b9e884367a006acb6"
|
||||
checksum = "8e35b716d430ace57e2d1b4afb51c9e5b7c46d2bce72926e07f9be6a98ced03e"
|
||||
dependencies = [
|
||||
"pyo3",
|
||||
"serde",
|
||||
|
@ -291,9 +291,9 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "regex"
|
||||
version = "1.9.5"
|
||||
version = "1.9.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "697061221ea1b4a94a624f67d0ae2bfe4e22b8a17b6a192afb11046542cc8c47"
|
||||
checksum = "ebee201405406dbf528b8b672104ae6d6d63e6d118cb10e4d51abbc7b58044ff"
|
||||
dependencies = [
|
||||
"aho-corasick",
|
||||
"memchr",
|
||||
|
@ -303,9 +303,9 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "regex-automata"
|
||||
version = "0.3.8"
|
||||
version = "0.3.9"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c2f401f4955220693b56f8ec66ee9c78abffd8d1c4f23dc41a23839eb88f0795"
|
||||
checksum = "59b23e92ee4318893fa3fe3e6fb365258efbfe6ac6ab30f090cdcbb7aa37efa9"
|
||||
dependencies = [
|
||||
"aho-corasick",
|
||||
"memchr",
|
||||
|
@ -332,18 +332,18 @@ checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd"
|
|||
|
||||
[[package]]
|
||||
name = "serde"
|
||||
version = "1.0.188"
|
||||
version = "1.0.190"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "cf9e0fcba69a370eed61bcf2b728575f726b50b55cba78064753d708ddc7549e"
|
||||
checksum = "91d3c334ca1ee894a2c6f6ad698fe8c435b76d504b13d436f0685d648d6d96f7"
|
||||
dependencies = [
|
||||
"serde_derive",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "serde_derive"
|
||||
version = "1.0.188"
|
||||
version = "1.0.190"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "4eca7ac642d82aa35b60049a6eccb4be6be75e599bd2e9adb5f875a737654af2"
|
||||
checksum = "67c5609f394e5c2bd7fc51efda478004ea80ef42fee983d5c67a65e34f32c0e3"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
|
@ -352,9 +352,9 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "serde_json"
|
||||
version = "1.0.107"
|
||||
version = "1.0.108"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "6b420ce6e3d8bd882e9b243c6eed35dbc9a6110c9769e74b584e0d68d1f20c65"
|
||||
checksum = "3d1c7e3eac408d115102c4c24ad393e0821bb3a5df4d506a80f85f7a742a526b"
|
||||
dependencies = [
|
||||
"itoa",
|
||||
"ryu",
|
||||
|
|
|
@ -122,7 +122,7 @@ You will need to change the server you are logging into from ``matrix.org``
|
|||
and instead specify a Homeserver URL of ``https://<server_name>:8448``
|
||||
(or just ``https://<server_name>`` if you are using a reverse proxy).
|
||||
If you prefer to use another client, refer to our
|
||||
`client breakdown <https://matrix.org/docs/projects/clients-matrix>`_.
|
||||
`client breakdown <https://matrix.org/ecosystem/clients/>`_.
|
||||
|
||||
If all goes well you should at least be able to log in, create a room, and
|
||||
start sending messages.
|
||||
|
|
|
@ -1 +0,0 @@
|
|||
Avoid running CI steps when the files they check have not been changed.
|
|
@ -1 +0,0 @@
|
|||
Add developer documentation concerning gradual schema migrations with column alterations.
|
|
@ -1 +0,0 @@
|
|||
Improve documentation of the user directory search algorithm.
|
|
@ -1 +0,0 @@
|
|||
Added support for pydantic v2 in addition to pydantic v1. Contributed by Maxwell G (@gotmax23).
|
|
@ -1 +0,0 @@
|
|||
Get CI to check PRs have been signed-off.
|
|
@ -1 +0,0 @@
|
|||
Fix rendering of user admin API documentation around deactivation. This was broken in Synapse 1.91.0.
|
|
@ -1 +0,0 @@
|
|||
Improve type hints.
|
|
@ -1 +0,0 @@
|
|||
Add missing licence header.
|
|
@ -1 +0,0 @@
|
|||
Cache server ACL checking.
|
|
@ -1 +0,0 @@
|
|||
Experimental support for [MSC4028](https://github.com/matrix-org/matrix-spec-proposals/pull/4028) to push all encrypted events to clients.
|
|
@ -1 +0,0 @@
|
|||
Improve type hints, and bump types-psycopg2 from 2.9.21.11 to 2.9.21.14.
|
|
@ -1 +0,0 @@
|
|||
Update documentation around message retention policies.
|
|
@ -1 +0,0 @@
|
|||
Improve comments in `StateGroupBackgroundUpdateStore`.
|
|
@ -1 +0,0 @@
|
|||
Minor performance improvement when sending presence to federated servers.
|
|
@ -1 +0,0 @@
|
|||
Avoid running CI steps when the files they check have not been changed.
|
|
@ -1 +0,0 @@
|
|||
Update maturin configuration.
|
|
@ -1 +0,0 @@
|
|||
Improve type hints.
|
|
@ -0,0 +1 @@
|
|||
Support reactor tick timings on more types of event loops.
|
|
@ -0,0 +1 @@
|
|||
Avoid executing no-op queries.
|
|
@ -0,0 +1 @@
|
|||
Simplify persistance code to be per-room.
|
|
@ -0,0 +1 @@
|
|||
Use standard SQL helpers in persistence code.
|
|
@ -0,0 +1 @@
|
|||
Avoid updating the stream cache unnecessarily.
|
|
@ -0,0 +1 @@
|
|||
Bump twisted from 23.8.0 to 23.10.0.
|
|
@ -0,0 +1 @@
|
|||
Improve performance when using opentracing.
|
|
@ -0,0 +1 @@
|
|||
Run push rule evaluator setup in parallel.
|
|
@ -0,0 +1 @@
|
|||
Improve tests of the SQL generator.
|
|
@ -0,0 +1 @@
|
|||
Bump setuptools-rust from 1.8.0 to 1.8.1.
|
|
@ -0,0 +1 @@
|
|||
Fix a long-standing bug where some queries updated the same row twice. Introduced in Synapse 1.57.0.
|
File diff suppressed because it is too large
Load Diff
|
@ -1,3 +1,39 @@
|
|||
matrix-synapse-py3 (1.96.0~rc1) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.96.0rc1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 31 Oct 2023 14:09:09 +0000
|
||||
|
||||
matrix-synapse-py3 (1.95.1) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.95.1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 31 Oct 2023 14:00:00 +0000
|
||||
|
||||
matrix-synapse-py3 (1.95.0) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.95.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 24 Oct 2023 13:00:46 +0100
|
||||
|
||||
matrix-synapse-py3 (1.95.0~rc1) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.95.0rc1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 17 Oct 2023 15:50:17 +0000
|
||||
|
||||
matrix-synapse-py3 (1.94.0) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.94.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 10 Oct 2023 10:57:41 +0100
|
||||
|
||||
matrix-synapse-py3 (1.94.0~rc1) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.94.0rc1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 03 Oct 2023 11:48:18 +0100
|
||||
|
||||
matrix-synapse-py3 (1.93.0) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.93.0.
|
||||
|
|
|
@ -68,6 +68,11 @@ if [[ -n "$SYNAPSE_COMPLEMENT_USE_WORKERS" ]]; then
|
|||
|
||||
fi
|
||||
log "Workers requested: $SYNAPSE_WORKER_TYPES"
|
||||
# adjust connection pool limits on worker mode as otherwise running lots of worker synapses
|
||||
# can make docker unhappy (in GHA)
|
||||
export POSTGRES_CP_MIN=1
|
||||
export POSTGRES_CP_MAX=3
|
||||
echo "using reduced connection pool limits for worker mode"
|
||||
# Improve startup times by using a launcher based on fork()
|
||||
export SYNAPSE_USE_EXPERIMENTAL_FORKING_LAUNCHER=1
|
||||
else
|
||||
|
|
|
@ -67,8 +67,8 @@ database:
|
|||
host: "{{ POSTGRES_HOST or "db" }}"
|
||||
port: "{{ POSTGRES_PORT or "5432" }}"
|
||||
{% endif %}
|
||||
cp_min: 5
|
||||
cp_max: 10
|
||||
cp_min: {{ POSTGRES_CP_MIN or 5 }}
|
||||
cp_max: {{ POSTGRES_CP_MAX or 10 }}
|
||||
{% else %}
|
||||
database:
|
||||
name: "sqlite3"
|
||||
|
|
|
@ -19,7 +19,7 @@
|
|||
# Usage
|
||||
- [Federation](federate.md)
|
||||
- [Configuration](usage/configuration/README.md)
|
||||
- [Configuration Manual](usage/configuration/config_documentation.md)
|
||||
- [Configuration Manual](usage/configuration/config_documentation.md)
|
||||
- [Homeserver Sample Config File](usage/configuration/homeserver_sample_config.md)
|
||||
- [Logging Sample Config File](usage/configuration/logging_sample_config.md)
|
||||
- [Structured Logging](structured_logging.md)
|
||||
|
@ -48,6 +48,7 @@
|
|||
- [Password auth provider callbacks](modules/password_auth_provider_callbacks.md)
|
||||
- [Background update controller callbacks](modules/background_update_controller_callbacks.md)
|
||||
- [Account data callbacks](modules/account_data_callbacks.md)
|
||||
- [Add extra fields to client events unsigned section callbacks](modules/add_extra_fields_to_client_events_unsigned.md)
|
||||
- [Porting a legacy module to the new interface](modules/porting_legacy_module.md)
|
||||
- [Workers](workers.md)
|
||||
- [Using `synctl` with Workers](synctl_workers.md)
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
# Version API
|
||||
|
||||
This API returns the running Synapse version and the Python version
|
||||
on which Synapse is being run. This is useful when a Synapse instance
|
||||
This API returns the running Synapse version.
|
||||
This is useful when a Synapse instance
|
||||
is behind a proxy that does not forward the 'Server' header (which also
|
||||
contains Synapse version information).
|
||||
|
||||
|
@ -15,7 +15,9 @@ It returns a JSON body like the following:
|
|||
|
||||
```json
|
||||
{
|
||||
"server_version": "0.99.2rc1 (b=develop, abcdef123)",
|
||||
"python_version": "3.7.8"
|
||||
"server_version": "0.99.2rc1 (b=develop, abcdef123)"
|
||||
}
|
||||
```
|
||||
|
||||
*Changed in Synapse 1.94.0:* The `python_version` key was removed from the
|
||||
response body.
|
||||
|
|
|
@ -1186,9 +1186,9 @@ Synapse 0.33.0rc1 (2018-07-18)
|
|||
Features
|
||||
--------
|
||||
|
||||
- Enforce the specified API for report\_event. ([\#3316](https://github.com/matrix-org/synapse/issues/3316))
|
||||
- Enforce the specified API for `report_event`. ([\#3316](https://github.com/matrix-org/synapse/issues/3316))
|
||||
- Include CPU time from database threads in request/block metrics. ([\#3496](https://github.com/matrix-org/synapse/issues/3496), [\#3501](https://github.com/matrix-org/synapse/issues/3501))
|
||||
- Add CPU metrics for \_fetch\_event\_list. ([\#3497](https://github.com/matrix-org/synapse/issues/3497))
|
||||
- Add CPU metrics for `_fetch_event_list`. ([\#3497](https://github.com/matrix-org/synapse/issues/3497))
|
||||
- Optimisation to make handling incoming federation requests more efficient. ([\#3541](https://github.com/matrix-org/synapse/issues/3541))
|
||||
|
||||
Bugfixes
|
||||
|
@ -1238,19 +1238,19 @@ Features
|
|||
- Add metrics to track appservice transactions ([\#3344](https://github.com/matrix-org/synapse/issues/3344))
|
||||
- Try to log more helpful info when a sig verification fails ([\#3372](https://github.com/matrix-org/synapse/issues/3372))
|
||||
- Synapse now uses the best performing JSON encoder/decoder according to your runtime (simplejson on CPython, stdlib json on PyPy). ([\#3462](https://github.com/matrix-org/synapse/issues/3462))
|
||||
- Add optional ip\_range\_whitelist param to AS registration files to lock AS IP access ([\#3465](https://github.com/matrix-org/synapse/issues/3465))
|
||||
- Add optional `ip_range_whitelist` param to AS registration files to lock AS IP access ([\#3465](https://github.com/matrix-org/synapse/issues/3465))
|
||||
- Reject invalid server names in federation requests ([\#3480](https://github.com/matrix-org/synapse/issues/3480))
|
||||
- Reject invalid server names in homeserver.yaml ([\#3483](https://github.com/matrix-org/synapse/issues/3483))
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Strip access\_token from outgoing requests ([\#3327](https://github.com/matrix-org/synapse/issues/3327))
|
||||
- Strip `access_token` from outgoing requests ([\#3327](https://github.com/matrix-org/synapse/issues/3327))
|
||||
- Redact AS tokens in logs ([\#3349](https://github.com/matrix-org/synapse/issues/3349))
|
||||
- Fix federation backfill from SQLite servers ([\#3355](https://github.com/matrix-org/synapse/issues/3355))
|
||||
- Fix event-purge-by-ts admin API ([\#3363](https://github.com/matrix-org/synapse/issues/3363))
|
||||
- Fix event filtering in get\_missing\_events handler ([\#3371](https://github.com/matrix-org/synapse/issues/3371))
|
||||
- Synapse is now stricter regarding accepting events which it cannot retrieve the prev\_events for. ([\#3456](https://github.com/matrix-org/synapse/issues/3456))
|
||||
- Fix event filtering in `get_missing_events` handler ([\#3371](https://github.com/matrix-org/synapse/issues/3371))
|
||||
- Synapse is now stricter regarding accepting events which it cannot retrieve the `prev_events` for. ([\#3456](https://github.com/matrix-org/synapse/issues/3456))
|
||||
- Fix bug where synapse would explode when receiving unicode in HTTP User-Agent header ([\#3470](https://github.com/matrix-org/synapse/issues/3470))
|
||||
- Invalidate cache on correct thread to avoid race ([\#3473](https://github.com/matrix-org/synapse/issues/3473))
|
||||
|
||||
|
@ -1262,7 +1262,7 @@ Improved Documentation
|
|||
Deprecations and Removals
|
||||
-------------------------
|
||||
|
||||
- Remove was\_forgotten\_at ([\#3324](https://github.com/matrix-org/synapse/issues/3324))
|
||||
- Remove `was_forgotten_at` ([\#3324](https://github.com/matrix-org/synapse/issues/3324))
|
||||
|
||||
Misc
|
||||
----
|
||||
|
@ -1285,7 +1285,7 @@ We are not aware of it being actively exploited but please upgrade asap.
|
|||
|
||||
Bug Fixes:
|
||||
|
||||
- Fix event filtering in get\_missing\_events handler (PR #3371)
|
||||
- Fix event filtering in `get_missing_events` handler (PR #3371)
|
||||
|
||||
Changes in synapse v0.31.0 (2018-06-06)
|
||||
=======================================
|
||||
|
@ -1309,7 +1309,7 @@ Features:
|
|||
Changes:
|
||||
|
||||
- daily user type phone home stats (PR #3264)
|
||||
- Use iter\* methods for \_filter\_events\_for\_server (PR #3267)
|
||||
- Use `iter*` methods for `_filter_events_for_server` (PR #3267)
|
||||
- Docs on consent bits (PR #3268)
|
||||
- Remove users from user directory on deactivate (PR #3277)
|
||||
- Avoid sending consent notice to guest users (PR #3288)
|
||||
|
@ -1323,10 +1323,10 @@ Changes, python 3 migration:
|
|||
|
||||
- Replace some more comparisons with six (PR #3243) Thanks to @NotAFile!
|
||||
- replace some iteritems with six (PR #3244) Thanks to @NotAFile!
|
||||
- Add batch\_iter to utils (PR #3245) Thanks to @NotAFile!
|
||||
- Add `batch_iter` to utils (PR #3245) Thanks to @NotAFile!
|
||||
- use repr, not str (PR #3246) Thanks to @NotAFile!
|
||||
- Misc Python3 fixes (PR #3247) Thanks to @NotAFile!
|
||||
- Py3 storage/\_base.py (PR #3278) Thanks to @NotAFile!
|
||||
- Py3 `storage/_base.py` (PR #3278) Thanks to @NotAFile!
|
||||
- more six iteritems (PR #3279) Thanks to @NotAFile!
|
||||
- More Misc. py3 fixes (PR #3280) Thanks to @NotAFile!
|
||||
- remaining isintance fixes (PR #3281) Thanks to @NotAFile!
|
||||
|
@ -1342,7 +1342,7 @@ Bugs:
|
|||
Changes in synapse v0.30.0 (2018-05-24)
|
||||
=======================================
|
||||
|
||||
\'Server Notices\' are a new feature introduced in Synapse 0.30. They provide a channel whereby server administrators can send messages to users on the server.
|
||||
"Server Notices" are a new feature introduced in Synapse 0.30. They provide a channel whereby server administrators can send messages to users on the server.
|
||||
|
||||
They are used as part of communication of the server policies (see `docs/consent_tracking.md`), however the intention is that they may also find a use for features such as "Message of the day".
|
||||
|
||||
|
@ -1350,9 +1350,9 @@ This feature is specific to Synapse, but uses standard Matrix communication mech
|
|||
|
||||
Further Server Notices/Consent Tracking Support:
|
||||
|
||||
- Allow overriding the server\_notices user's avatar (PR #3273)
|
||||
- Allow overriding the `server_notices` user's avatar (PR #3273)
|
||||
- Use the localpart in the consent uri (PR #3272)
|
||||
- Support for putting %(consent\_uri)s in messages (PR #3271)
|
||||
- Support for putting `%(consent_uri)s` in messages (PR #3271)
|
||||
- Block attempts to send server notices to remote users (PR #3270)
|
||||
- Docs on consent bits (PR #3268)
|
||||
|
||||
|
@ -1366,7 +1366,7 @@ Server Notices/Consent Tracking Support:
|
|||
- Infrastructure for a server notices room (PR #3232)
|
||||
- Send users a server notice about consent (PR #3236)
|
||||
- Reject attempts to send event before privacy consent is given (PR #3257)
|
||||
- Add a \'has\_consented\' template var to consent forms (PR #3262)
|
||||
- Add a `has_consented` template var to consent forms (PR #3262)
|
||||
- Fix dependency on jinja2 (PR #3263)
|
||||
|
||||
Features:
|
||||
|
@ -1377,9 +1377,9 @@ Features:
|
|||
|
||||
Changes:
|
||||
|
||||
- Remove unused update\_external\_syncs (PR #3233)
|
||||
- Remove unused `update_external_syncs` (PR #3233)
|
||||
- Use stream rather depth ordering for push actions (PR #3212)
|
||||
- Make purge\_history operate on tokens (PR #3221)
|
||||
- Make `purge_history` operate on tokens (PR #3221)
|
||||
- Don't support limitless pagination (PR #3265)
|
||||
|
||||
Bug Fixes:
|
||||
|
@ -1421,29 +1421,29 @@ Changes - General:
|
|||
|
||||
- nuke-room-from-db.sh: added postgresql option and help (PR #2337) Thanks to @rubo77!
|
||||
- Part user from rooms on account deactivate (PR #3201)
|
||||
- Make \'unexpected logging context\' into warnings (PR #3007)
|
||||
- Make "unexpected logging context" into warnings (PR #3007)
|
||||
- Set Server header in SynapseRequest (PR #3208)
|
||||
- remove duplicates from groups tables (PR #3129)
|
||||
- Improve exception handling for background processes (PR #3138)
|
||||
- Add missing consumeErrors to improve exception handling (PR #3139)
|
||||
- reraise exceptions more carefully (PR #3142)
|
||||
- Remove redundant call to preserve\_fn (PR #3143)
|
||||
- Trap exceptions thrown within run\_in\_background (PR #3144)
|
||||
- Remove redundant call to `preserve_fn` (PR #3143)
|
||||
- Trap exceptions thrown within `run_in_background` (PR #3144)
|
||||
|
||||
Changes - Refactors:
|
||||
|
||||
- Refactor /context to reuse pagination storage functions (PR #3193)
|
||||
- Refactor recent events func to use pagination func (PR #3195)
|
||||
- Refactor pagination DB API to return concrete type (PR #3196)
|
||||
- Refactor get\_recent\_events\_for\_room return type (PR #3198)
|
||||
- Refactor `get_recent_events_for_room` return type (PR #3198)
|
||||
- Refactor sync APIs to reuse pagination API (PR #3199)
|
||||
- Remove unused code path from member change DB func (PR #3200)
|
||||
- Refactor request handling wrappers (PR #3203)
|
||||
- transaction\_id, destination defined twice (PR #3209) Thanks to @damir-manapov!
|
||||
- `transaction_id`, destination defined twice (PR #3209) Thanks to @damir-manapov!
|
||||
- Refactor event storage to prepare for changes in state calculations (PR #3141)
|
||||
- Set Server header in SynapseRequest (PR #3208)
|
||||
- Use deferred.addTimeout instead of time\_bound\_deferred (PR #3127, #3178)
|
||||
- Use run\_in\_background in preference to preserve\_fn (PR #3140)
|
||||
- Use deferred.addTimeout instead of `time_bound_deferred` (PR #3127, #3178)
|
||||
- Use `run_in_background` in preference to `preserve_fn` (PR #3140)
|
||||
|
||||
Changes - Python 3 migration:
|
||||
|
||||
|
@ -1463,29 +1463,29 @@ Changes - Python 3 migration:
|
|||
|
||||
Bug Fixes:
|
||||
|
||||
- synapse fails to start under Twisted \>= 18.4 (PR #3157)
|
||||
- synapse fails to start under Twisted >= 18.4 (PR #3157)
|
||||
- Fix a class of logcontext leaks (PR #3170)
|
||||
- Fix a couple of logcontext leaks in unit tests (PR #3172)
|
||||
- Fix logcontext leak in media repo (PR #3174)
|
||||
- Escape label values in prometheus metrics (PR #3175, #3186)
|
||||
- Fix \'Unhandled Error\' logs with Twisted 18.4 (PR #3182) Thanks to @Half-Shot!
|
||||
- Fix "Unhandled Error" logs with Twisted 18.4 (PR #3182) Thanks to @Half-Shot!
|
||||
- Fix logcontext leaks in rate limiter (PR #3183)
|
||||
- notifications: Convert next\_token to string according to the spec (PR #3190) Thanks to @mujx!
|
||||
- notifications: Convert `next_token` to string according to the spec (PR #3190) Thanks to @mujx!
|
||||
- nuke-room-from-db.sh: fix deletion from search table (PR #3194) Thanks to @rubo77!
|
||||
- add guard for None on purge\_history api (PR #3160) Thanks to @krombel!
|
||||
- add guard for None on `purge_history` api (PR #3160) Thanks to @krombel!
|
||||
|
||||
Changes in synapse v0.28.1 (2018-05-01)
|
||||
=======================================
|
||||
|
||||
SECURITY UPDATE
|
||||
|
||||
- Clamp the allowed values of event depth received over federation to be \[0, 2\^63 - 1\]. This mitigates an attack where malicious events injected with depth = 2\^63 - 1 render rooms unusable. Depth is used to determine the cosmetic ordering of events within a room, and so the ordering of events in such a room will default to using stream\_ordering rather than depth (topological\_ordering).
|
||||
- Clamp the allowed values of event depth received over federation to be `[0, 2^63 - 1]`. This mitigates an attack where malicious events injected with `depth = 2^63 - 1` render rooms unusable. Depth is used to determine the cosmetic ordering of events within a room, and so the ordering of events in such a room will default to using `stream_ordering` rather than `depth` (topological ordering).
|
||||
|
||||
This is a temporary solution to mitigate abuse in the wild, whilst a long term solution is being implemented to improve how the depth parameter is used.
|
||||
|
||||
Full details at <https://docs.google.com/document/d/1I3fi2S-XnpO45qrpCsowZv8P8dHcNZ4fsBsbOW7KABI>
|
||||
|
||||
- Pin Twisted to \<18.4 until we stop using the private \_OpenSSLECCurve API.
|
||||
- Pin Twisted to <18.4 until we stop using the private `_OpenSSLECCurve` API.
|
||||
|
||||
Changes in synapse v0.28.0 (2018-04-26)
|
||||
=======================================
|
||||
|
@ -1510,7 +1510,7 @@ Features:
|
|||
Changes:
|
||||
|
||||
- Synapse on PyPy (PR #2760) Thanks to @Valodim!
|
||||
- move handling of auto\_join\_rooms to RegisterHandler (PR #2996) Thanks to @krombel!
|
||||
- move handling of `auto_join_rooms` to RegisterHandler (PR #2996) Thanks to @krombel!
|
||||
- Improve handling of SRV records for federation connections (PR #3016) Thanks to @silkeh!
|
||||
- Document the behaviour of ResponseCache (PR #3059)
|
||||
- Preparation for py3 (PR #3061, #3073, #3074, #3075, #3103, #3104, #3106, #3107, #3109, #3110) Thanks to @NotAFile!
|
||||
|
@ -1524,15 +1524,15 @@ Changes:
|
|||
- Clarify that SRV may not point to a CNAME (PR #3100) Thanks to @silkeh!
|
||||
- Use str(e) instead of e.message (PR #3103) Thanks to @NotAFile!
|
||||
- Use six.itervalues in some places (PR #3106) Thanks to @NotAFile!
|
||||
- Refactor store.have\_events (PR #3117)
|
||||
- Refactor `store.have_events` (PR #3117)
|
||||
|
||||
Bug Fixes:
|
||||
|
||||
- Return 401 for invalid access\_token on logout (PR #2938) Thanks to @dklug!
|
||||
- Return 401 for invalid `access_token` on logout (PR #2938) Thanks to @dklug!
|
||||
- Return a 404 rather than a 500 on rejoining empty rooms (PR #3080)
|
||||
- fix federation\_domain\_whitelist (PR #3099)
|
||||
- Avoid creating events with huge numbers of prev\_events (PR #3113)
|
||||
- Reject events which have lots of prev\_events (PR #3118)
|
||||
- fix `federation_domain_whitelist` (PR #3099)
|
||||
- Avoid creating events with huge numbers of `prev_events` (PR #3113)
|
||||
- Reject events which have lots of `prev_events` (PR #3118)
|
||||
|
||||
Changes in synapse v0.27.4 (2018-04-13)
|
||||
=======================================
|
||||
|
@ -1556,12 +1556,13 @@ v0.27.3-rc1 used a stale version of the develop branch so the changelog overstat
|
|||
Changes in synapse v0.27.3-rc1 (2018-04-09)
|
||||
===========================================
|
||||
|
||||
Notable changes include API support for joinability of groups. Also new metrics and phone home stats. Phone home stats include better visibility of system usage so we can tweak synpase to work better for all users rather than our own experience with matrix.org. Also, recording \'r30\' stat which is the measure we use to track overall growth of the Matrix ecosystem. It is defined as:-
|
||||
Notable changes include API support for joinability of groups. Also new metrics and phone home stats. Phone home stats include better visibility of system usage so we can tweak synpase to work better for all users rather than our own experience with matrix.org. Also, recording "r30" stat which is the measure we use to track overall growth of the Matrix ecosystem. It is defined as:-
|
||||
|
||||
Counts the number of native 30 day retained users, defined as:- \* Users who have created their accounts more than 30 days
|
||||
Counts the number of native 30 day retained users, defined as:
|
||||
|
||||
: - Where last seen at most 30 days ago
|
||||
- Where account creation and last\_seen are \> 30 days\"
|
||||
- Users who have created their accounts more than 30 days
|
||||
- Where last seen at most 30 days ago
|
||||
- Where account creation and `last_seen` are > 30 days
|
||||
|
||||
Features:
|
||||
|
||||
|
@ -1577,9 +1578,9 @@ Features:
|
|||
Changes:
|
||||
|
||||
- Add a blurb explaining the main synapse worker (PR #2886) Thanks to @turt2live!
|
||||
- Replace old style error catching with \'as\' keyword (PR #3000) Thanks to @NotAFile!
|
||||
- Use .iter\* to avoid copies in StateHandler (PR #3006)
|
||||
- Linearize calls to \_generate\_user\_id (PR #3029)
|
||||
- Replace old style error catching with `as` keyword (PR #3000) Thanks to @NotAFile!
|
||||
- Use `.iter*` to avoid copies in StateHandler (PR #3006)
|
||||
- Linearize calls to `_generate_user_id` (PR #3029)
|
||||
- Remove last usage of ujson (PR #3030)
|
||||
- Use simplejson throughout (PR #3048)
|
||||
- Use static JSONEncoders (PR #3049)
|
||||
|
@ -1588,13 +1589,13 @@ Changes:
|
|||
|
||||
Bug fixes:
|
||||
|
||||
- Add room\_id to the response of rooms/{roomId}/join (PR #2986) Thanks to @jplatte!
|
||||
- Add `room_id` to the response of rooms/{roomId}/join (PR #2986) Thanks to @jplatte!
|
||||
- Fix replication after switch to simplejson (PR #3015)
|
||||
- 404 correctly on missing paths via NoResource (PR #3022)
|
||||
- Fix error when claiming e2e keys from offline servers (PR #3034)
|
||||
- fix tests/storage/test\_user\_directory.py (PR #3042)
|
||||
- use PUT instead of POST for federating groups/m.join\_policy (PR #3070) Thanks to @krombel!
|
||||
- postgres port script: fix state\_groups\_pkey error (PR #3072)
|
||||
- fix `tests/storage/test_user_directory.py` (PR #3042)
|
||||
- use `PUT` instead of `POST` for federating `groups`/`m.join_policy` (PR #3070) Thanks to @krombel!
|
||||
- postgres port script: fix `state_groups_pkey` error (PR #3072)
|
||||
|
||||
Changes in synapse v0.27.2 (2018-03-26)
|
||||
=======================================
|
||||
|
@ -1640,7 +1641,7 @@ Features:
|
|||
|
||||
- Add ability for ASes to override message send time (PR #2754)
|
||||
- Add support for custom storage providers for media repository (PR #2867, #2777, #2783, #2789, #2791, #2804, #2812, #2814, #2857, #2868, #2767)
|
||||
- Add purge API features, see [docs/admin\_api/purge\_history\_api.rst](docs/admin_api/purge_history_api.rst) for full details (PR #2858, #2867, #2882, #2946, #2962, #2943)
|
||||
- Add purge API features, see [docs/admin_api/purge_history_api.rst](docs/admin_api/purge_history_api.rst) for full details (PR #2858, #2867, #2882, #2946, #2962, #2943)
|
||||
- Add support for whitelisting 3PIDs that users can register. (PR #2813)
|
||||
- Add `/room/{id}/event/{id}` API (PR #2766)
|
||||
- Add an admin API to get all the media in a room (PR #2818) Thanks to @turt2live!
|
||||
|
@ -1669,8 +1670,8 @@ Bug fixes:
|
|||
- Fix publicised groups GET API (singular) over federation (PR #2772)
|
||||
- Fix user directory when using `user_directory_search_all_users` config option (PR #2803, #2831)
|
||||
- Fix error on `/publicRooms` when no rooms exist (PR #2827)
|
||||
- Fix bug in quarantine\_media (PR #2837)
|
||||
- Fix url\_previews when no Content-Type is returned from URL (PR #2845)
|
||||
- Fix bug in `quarantine_media` (PR #2837)
|
||||
- Fix `url_previews` when no `Content-Type` is returned from URL (PR #2845)
|
||||
- Fix rare race in sync API when joining room (PR #2944)
|
||||
- Fix slow event search, switch back from GIST to GIN indexes (PR #2769, #2848)
|
||||
|
||||
|
@ -1685,27 +1686,27 @@ Changes in synapse v0.26.0-rc1 (2017-12-13)
|
|||
Features:
|
||||
|
||||
- Add ability for ASes to publicise groups for their users (PR #2686)
|
||||
- Add all local users to the user\_directory and optionally search them (PR #2723)
|
||||
- Add all local users to the `user_directory` and optionally search them (PR #2723)
|
||||
- Add support for custom login types for validating users (PR #2729)
|
||||
|
||||
Changes:
|
||||
|
||||
- Update example Prometheus config to new format (PR #2648) Thanks to @krombel!
|
||||
- Rename redact\_content option to include\_content in Push API (PR #2650)
|
||||
- Rename `redact_content` option to `include_content` in Push API (PR #2650)
|
||||
- Declare support for r0.3.0 (PR #2677)
|
||||
- Improve upserts (PR #2684, #2688, #2689, #2713)
|
||||
- Improve documentation of workers (PR #2700)
|
||||
- Improve tracebacks on exceptions (PR #2705)
|
||||
- Allow guest access to group APIs for reading (PR #2715)
|
||||
- Support for posting content in federation\_client script (PR #2716)
|
||||
- Support for posting content in `federation_client` script (PR #2716)
|
||||
- Delete devices and pushers on logouts etc (PR #2722)
|
||||
|
||||
Bug fixes:
|
||||
|
||||
- Fix database port script (PR #2673)
|
||||
- Fix internal server error on login with ldap\_auth\_provider (PR #2678) Thanks to @jkolo!
|
||||
- Fix internal server error on login with `ldap_auth_provider` (PR #2678) Thanks to @jkolo!
|
||||
- Fix error on sqlite 3.7 (PR #2697)
|
||||
- Fix OPTIONS on preview\_url (PR #2707)
|
||||
- Fix `OPTIONS` on `preview_url` (PR #2707)
|
||||
- Fix error handling on dns lookup (PR #2711)
|
||||
- Fix wrong avatars when inviting multiple users when creating room (PR #2717)
|
||||
- Fix 500 when joining matrix-dev (PR #2719)
|
||||
|
@ -1729,7 +1730,7 @@ Changes in synapse v0.25.0-rc1 (2017-11-14)
|
|||
|
||||
Features:
|
||||
|
||||
- Add is\_public to groups table to allow for private groups (PR #2582)
|
||||
- Add `is_public` to groups table to allow for private groups (PR #2582)
|
||||
- Add a route for determining who you are (PR #2668) Thanks to @turt2live!
|
||||
- Add more features to the password providers (PR #2608, #2610, #2620, #2622, #2623, #2624, #2626, #2628, #2629)
|
||||
- Add a hook for custom rest endpoints (PR #2627)
|
||||
|
@ -1737,7 +1738,7 @@ Features:
|
|||
|
||||
Changes:
|
||||
|
||||
- Ignore \<noscript\> tags when generating URL preview descriptions (PR #2576) Thanks to @maximevaillancourt!
|
||||
- Ignore `<noscript\>` tags when generating URL preview descriptions (PR #2576) Thanks to @maximevaillancourt!
|
||||
- Register some /unstable endpoints in /r0 as well (PR #2579) Thanks to @krombel!
|
||||
- Support /keys/upload on /r0 as well as /unstable (PR #2585)
|
||||
- Front-end proxy: pass through auth header (PR #2586)
|
||||
|
@ -1745,9 +1746,9 @@ Changes:
|
|||
- Remove refresh tokens (PR #2613)
|
||||
- Automatically set default displayname on register (PR #2617)
|
||||
- Log login requests (PR #2618)
|
||||
- Always return is\_public in the /groups/:group\_id/rooms API (PR #2630)
|
||||
- Always return `is_public` in the `/groups/:group_id/rooms` API (PR #2630)
|
||||
- Avoid no-op media deletes (PR #2637) Thanks to @spantaleev!
|
||||
- Fix various embarrassing typos around user\_directory and add some doc. (PR #2643)
|
||||
- Fix various embarrassing typos around `user_directory` and add some doc. (PR #2643)
|
||||
- Return whether a user is an admin within a group (PR #2647)
|
||||
- Namespace visibility options for groups (PR #2657)
|
||||
- Downcase UserIDs on registration (PR #2662)
|
||||
|
@ -1760,7 +1761,7 @@ Bug fixes:
|
|||
- Fix UI auth when deleting devices (PR #2591)
|
||||
- Fix typo when checking if user is invited to group (PR #2599)
|
||||
- Fix the port script to drop NUL values in all tables (PR #2611)
|
||||
- Fix appservices being backlogged and not receiving new events due to a bug in notify\_interested\_services (PR #2631) Thanks to @xyzz!
|
||||
- Fix appservices being backlogged and not receiving new events due to a bug in `notify_interested_services` (PR #2631) Thanks to @xyzz!
|
||||
- Fix updating rooms avatar/display name when modified by admin (PR #2636) Thanks to @farialima!
|
||||
- Fix bug in state group storage (PR #2649)
|
||||
- Fix 500 on invalid utf-8 in request (PR #2663)
|
||||
|
@ -1794,7 +1795,7 @@ Changes:
|
|||
- Ignore incoming events for rooms that we have left (PR #2490)
|
||||
- Allow spam checker to reject invites too (PR #2492)
|
||||
- Add room creation checks to spam checker (PR #2495)
|
||||
- Spam checking: add the invitee to user\_may\_invite (PR #2502)
|
||||
- Spam checking: add the invitee to `user_may_invite` (PR #2502)
|
||||
- Process events from federation for different rooms in parallel (PR #2520)
|
||||
- Allow error strings from spam checker (PR #2531)
|
||||
- Improve error handling for missing files in config (PR #2551)
|
||||
|
@ -1805,7 +1806,7 @@ Bug fixes:
|
|||
- Fix incompatibility with newer versions of ujson (PR #2483) Thanks to @jeremycline!
|
||||
- Fix notification keywords that start/end with non-word chars (PR #2500)
|
||||
- Fix stack overflow and logcontexts from linearizer (PR #2532)
|
||||
- Fix 500 error when fields missing from power\_levels event (PR #2552)
|
||||
- Fix 500 error when fields missing from `power_levels` event (PR #2552)
|
||||
- Fix 500 error when we get an error handling a PDU (PR #2553)
|
||||
|
||||
Changes in synapse v0.23.1 (2017-10-02)
|
||||
|
@ -1813,7 +1814,7 @@ Changes in synapse v0.23.1 (2017-10-02)
|
|||
|
||||
Changes:
|
||||
|
||||
- Make \'affinity\' package optional, as it is not supported on some platforms
|
||||
- Make `affinity` package optional, as it is not supported on some platforms
|
||||
|
||||
Changes in synapse v0.23.0 (2017-10-02)
|
||||
=======================================
|
||||
|
@ -1833,7 +1834,7 @@ Changes in synapse v0.23.0-rc1 (2017-09-25)
|
|||
Features:
|
||||
|
||||
- Add a frontend proxy worker (PR #2344)
|
||||
- Add support for event\_id\_only push format (PR #2450)
|
||||
- Add support for `event_id_only` push format (PR #2450)
|
||||
- Add a PoC for filtering spammy events (PR #2456)
|
||||
- Add a config option to block all room invites (PR #2457)
|
||||
|
||||
|
@ -1897,12 +1898,12 @@ Changes:
|
|||
- Deduplicate sync filters (PR #2219) Thanks to @krombel!
|
||||
- Correct a typo in UPGRADE.rst (PR #2231) Thanks to @aaronraimist!
|
||||
- Add count of one time keys to sync stream (PR #2237)
|
||||
- Only store event\_auth for state events (PR #2247)
|
||||
- Only store `event_auth` for state events (PR #2247)
|
||||
- Store URL cache preview downloads separately (PR #2299)
|
||||
|
||||
Bug fixes:
|
||||
|
||||
- Fix users not getting notifications when AS listened to that user\_id (PR #2216) Thanks to @slipeer!
|
||||
- Fix users not getting notifications when AS listened to that `user_id` (PR #2216) Thanks to @slipeer!
|
||||
- Fix users without push set up not getting notifications after joining rooms (PR #2236)
|
||||
- Fix preview url API to trim long descriptions (PR #2243)
|
||||
- Fix bug where we used cached but unpersisted state group as prev group, resulting in broken state of restart (PR #2263)
|
||||
|
@ -1935,7 +1936,7 @@ Changes:
|
|||
- Update username availability checker API (PR #2209, #2213)
|
||||
- When purging, Don't de-delta state groups we're about to delete (PR #2214)
|
||||
- Documentation to check synapse version (PR #2215) Thanks to @hamber-dick!
|
||||
- Add an index to event\_search to speed up purge history API (PR #2218)
|
||||
- Add an index to `event_search` to speed up purge history API (PR #2218)
|
||||
|
||||
Bug fixes:
|
||||
|
||||
|
@ -2004,7 +2005,7 @@ Changes in synapse v0.20.0-rc1 (2017-03-30)
|
|||
|
||||
Features:
|
||||
|
||||
- Add delete\_devices API (PR #1993)
|
||||
- Add `delete_devices` API (PR #1993)
|
||||
- Add phone number registration/login support (PR #1994, #2055)
|
||||
|
||||
Changes:
|
||||
|
@ -2024,12 +2025,12 @@ Changes:
|
|||
|
||||
Bug fixes:
|
||||
|
||||
- Fix bug where current\_state\_events renamed to current\_state\_ids (PR #1849)
|
||||
- Fix bug where `current_state_events` renamed to `current_state_ids` (PR #1849)
|
||||
- Fix routing loop when fetching remote media (PR #1992)
|
||||
- Fix current\_state\_events table to not lie (PR #1996)
|
||||
- Fix `current_state_events` table to not lie (PR #1996)
|
||||
- Fix CAS login to handle PartialDownloadError (PR #1997)
|
||||
- Fix assertion to stop transaction queue getting wedged (PR #2010)
|
||||
- Fix presence to fallback to last\_active\_ts if it beats the last sync time. Thanks @Half-Shot! (PR #2014)
|
||||
- Fix presence to fallback to `last_active_ts` if it beats the last sync time. Thanks @Half-Shot! (PR #2014)
|
||||
- Fix bug when federation received a PDU while a room join is in progress (PR #2016)
|
||||
- Fix resetting state on rejected events (PR #2025)
|
||||
- Fix installation issues in readme. Thanks @ricco386 (PR #2037)
|
||||
|
@ -2064,7 +2065,7 @@ Changes:
|
|||
|
||||
Bug fixes:
|
||||
|
||||
- Fix synapse\_port\_db failure. Thanks to Pneumaticat! (PR #1904)
|
||||
- Fix synapse_port_db failure. Thanks to Pneumaticat! (PR #1904)
|
||||
- Fix caching to not cache error responses (PR #1913)
|
||||
- Fix APIs to make kick & ban reasons work (PR #1917)
|
||||
- Fix bugs in the /keys/changes api (PR #1921)
|
||||
|
@ -2099,7 +2100,7 @@ Changes in synapse v0.19.0-rc3 (2017-02-02)
|
|||
===========================================
|
||||
|
||||
- Fix email push in pusher worker (PR #1875)
|
||||
- Make presence.get\_new\_events a bit faster (PR #1876)
|
||||
- Make `presence.get_new_events` a bit faster (PR #1876)
|
||||
- Make /keys/changes a bit more performant (PR #1877)
|
||||
|
||||
Changes in synapse v0.19.0-rc2 (2017-02-02)
|
||||
|
@ -2122,14 +2123,14 @@ Features:
|
|||
Changes:
|
||||
|
||||
- Improve IPv6 support (PR #1696). Thanks to @kyrias and @glyph!
|
||||
- Log which files we saved attachments to in the media\_repository (PR #1791)
|
||||
- Log which files we saved attachments to in the `media_repository` (PR #1791)
|
||||
- Linearize updates to membership via PUT /state/ to better handle multiple joins (PR #1787)
|
||||
- Limit number of entries to prefill from cache on startup (PR #1792)
|
||||
- Remove full\_twisted\_stacktraces option (PR #1802)
|
||||
- Remove `full_twisted_stacktraces` option (PR #1802)
|
||||
- Measure size of some caches by sum of the size of cached values (PR #1815)
|
||||
- Measure metrics of string\_cache (PR #1821)
|
||||
- Measure metrics of `string_cache` (PR #1821)
|
||||
- Reduce logging verbosity (PR #1822, #1823, #1824)
|
||||
- Don't clobber a displayname or avatar\_url if provided by an m.room.member event (PR #1852)
|
||||
- Don't clobber a displayname or `avatar_url` if provided by an m.room.member event (PR #1852)
|
||||
- Better handle 401/404 response for federation /send/ (PR #1866, #1871)
|
||||
|
||||
Fixes:
|
||||
|
@ -2142,7 +2143,7 @@ Fixes:
|
|||
Performance:
|
||||
|
||||
- Don't block messages sending on bumping presence (PR #1789)
|
||||
- Change device\_inbox stream index to include user (PR #1793)
|
||||
- Change `device_inbox` stream index to include user (PR #1793)
|
||||
- Optimise state resolution (PR #1818)
|
||||
- Use DB cache of joined users for presence (PR #1862)
|
||||
- Add an index to make membership queries faster (PR #1867)
|
||||
|
@ -2225,7 +2226,7 @@ Changes:
|
|||
- Enable guest access for private rooms by default (PR #653)
|
||||
- Limit the number of events that can be created on a given room concurrently (PR #1620)
|
||||
- Log the args that we have on UI auth completion (PR #1649)
|
||||
- Stop generating refresh\_tokens (PR #1654)
|
||||
- Stop generating `refresh_tokens` (PR #1654)
|
||||
- Stop putting a time caveat on access tokens (PR #1656)
|
||||
- Remove unspecced GET endpoints for e2e keys (PR #1694)
|
||||
|
||||
|
@ -2250,7 +2251,7 @@ Changes in synapse v0.18.5-rc1 (2016-11-24)
|
|||
|
||||
Features:
|
||||
|
||||
- Implement \"event\_fields\" in filters (PR #1638)
|
||||
- Implement `event_fields` in filters (PR #1638)
|
||||
|
||||
Changes:
|
||||
|
||||
|
@ -2279,7 +2280,7 @@ Bug fixes:
|
|||
|
||||
- Fix media repo to set CORs headers on responses (PR #1190)
|
||||
- Fix registration to not error on non-ascii passwords (PR #1191)
|
||||
- Fix create event code to limit the number of prev\_events (PR #1615)
|
||||
- Fix create event code to limit the number of `prev_events` (PR #1615)
|
||||
- Fix bug in transaction ID deduplication (PR #1624)
|
||||
|
||||
Changes in synapse v0.18.3 (2016-11-08)
|
||||
|
@ -2338,10 +2339,10 @@ Changes in synapse v0.18.2-rc1 (2016-10-17)
|
|||
|
||||
Changes:
|
||||
|
||||
- Remove redundant event\_auth index (PR #1113)
|
||||
- Remove redundant `event_auth` index (PR #1113)
|
||||
- Reduce DB hits for replication (PR #1141)
|
||||
- Implement pluggable password auth (PR #1155)
|
||||
- Remove rate limiting from app service senders and fix get\_or\_create\_user requester, thanks to Patrik Oldsberg (PR #1157)
|
||||
- Remove rate limiting from app service senders and fix `get_or_create_user` requester, thanks to Patrik Oldsberg (PR #1157)
|
||||
- window.postmessage for Interactive Auth fallback (PR #1159)
|
||||
- Use sys.executable instead of hardcoded python, thanks to Pedro Larroy (PR #1162)
|
||||
- Add config option for adding additional TLS fingerprints (PR #1167)
|
||||
|
@ -2349,7 +2350,7 @@ Changes:
|
|||
|
||||
Bug fixes:
|
||||
|
||||
- Fix not being allowed to set your own state\_key, thanks to Patrik Oldsberg (PR #1150)
|
||||
- Fix not being allowed to set your own `state_key`, thanks to Patrik Oldsberg (PR #1150)
|
||||
- Fix interactive auth to return 401 from for incorrect password (PR #1160, #1166)
|
||||
- Fix email push notifs being dropped (PR #1169)
|
||||
|
||||
|
@ -2363,7 +2364,7 @@ Changes in synapse v0.18.1-rc1 (2016-09-30)
|
|||
|
||||
Features:
|
||||
|
||||
- Add total\_room\_count\_estimate to `/publicRooms` (PR #1133)
|
||||
- Add `total_room_count_estimate` to `/publicRooms` (PR #1133)
|
||||
|
||||
Changes:
|
||||
|
||||
|
@ -2398,17 +2399,17 @@ Features:
|
|||
- Add `only=highlight` on `/notifications` (PR #1081)
|
||||
- Add server param to /publicRooms (PR #1082)
|
||||
- Allow clients to ask for the whole of a single state event (PR #1094)
|
||||
- Add is\_direct param to /createRoom (PR #1108)
|
||||
- Add `is_direct` param to /createRoom (PR #1108)
|
||||
- Add pagination support to publicRooms (PR #1121)
|
||||
- Add very basic filter API to /publicRooms (PR #1126)
|
||||
- Add basic direct to device messaging support for E2E (PR #1074, #1084, #1104, #1111)
|
||||
|
||||
Changes:
|
||||
|
||||
- Move to storing state\_groups\_state as deltas, greatly reducing DB size (PR #1065)
|
||||
- Move to storing `state_groups_state` as deltas, greatly reducing DB size (PR #1065)
|
||||
- Reduce amount of state pulled out of the DB during common requests (PR #1069)
|
||||
- Allow PDF to be rendered from media repo (PR #1071)
|
||||
- Reindex state\_groups\_state after pruning (PR #1085)
|
||||
- Reindex `state_groups_state` after pruning (PR #1085)
|
||||
- Clobber EDUs in send queue (PR #1095)
|
||||
- Conform better to the CAS protocol specification (PR #1100)
|
||||
- Limit how often we ask for keys from dead servers (PR #1114)
|
||||
|
@ -2442,22 +2443,22 @@ Changes:
|
|||
- Avoid pulling the full state of a room out so often (PR #1047, #1049, #1063, #1068)
|
||||
- Don't notify for online to online presence transitions. (PR #1054)
|
||||
- Occasionally persist unpersisted presence updates (PR #1055)
|
||||
- Allow application services to have an optional \'url\' (PR #1056)
|
||||
- Allow application services to have an optional `url` (PR #1056)
|
||||
- Clean up old sent transactions from DB (PR #1059)
|
||||
|
||||
Bug fixes:
|
||||
|
||||
- Fix None check in backfill (PR #1043)
|
||||
- Fix membership changes to be idempotent (PR #1067)
|
||||
- Fix bug in get\_pdu where it would sometimes return events with incorrect signature
|
||||
- Fix bug in `get_pdu` where it would sometimes return events with incorrect signature
|
||||
|
||||
Changes in synapse v0.17.1 (2016-08-24)
|
||||
=======================================
|
||||
|
||||
Changes:
|
||||
|
||||
- Delete old received\_transactions rows (PR #1038)
|
||||
- Pass through user-supplied content in /join/\$room\_id (PR #1039)
|
||||
- Delete old `received_transactions` rows (PR #1038)
|
||||
- Pass through user-supplied content in `/join/$room_id` (PR #1039)
|
||||
|
||||
Bug fixes:
|
||||
|
||||
|
@ -2478,15 +2479,15 @@ Changes:
|
|||
- Move default display name push rule (PR #1011, #1023)
|
||||
- Fix up preview URL API. Add tests. (PR #1015)
|
||||
- Set `Content-Security-Policy` on media repo (PR #1021)
|
||||
- Make notify\_interested\_services faster (PR #1022)
|
||||
- Make `notify_interested_services` faster (PR #1022)
|
||||
- Add usage stats to prometheus monitoring (PR #1037)
|
||||
|
||||
Bug fixes:
|
||||
|
||||
- Fix token login (PR #993)
|
||||
- Fix CAS login (PR #994, #995)
|
||||
- Fix /sync to not clobber status\_msg (PR #997)
|
||||
- Fix redacted state events to include prev\_content (PR #1003)
|
||||
- Fix /sync to not clobber `status_msg` (PR #997)
|
||||
- Fix redacted state events to include `prev_content` (PR #1003)
|
||||
- Fix some bugs in the auth/ldap handler (PR #1007)
|
||||
- Fix backfill request to limit URI length, so that remotes Don't reject the requests due to path length limits (PR #1012)
|
||||
- Fix AS push code to not send duplicate events (PR #1025)
|
||||
|
@ -2527,7 +2528,7 @@ Changes in synapse v0.17.0-rc3 (2016-08-02)
|
|||
|
||||
Changes:
|
||||
|
||||
- Forbid non-ASes from registering users whose names begin with \'\_\' (PR #958)
|
||||
- Forbid non-ASes from registering users whose names begin with `_` (PR #958)
|
||||
- Add some basic admin API docs (PR #963)
|
||||
|
||||
Bug fixes:
|
||||
|
@ -2549,16 +2550,16 @@ This release changes the LDAP configuration format in a backwards incompatible w
|
|||
|
||||
Features:
|
||||
|
||||
- Add purge\_media\_cache admin API (PR #902)
|
||||
- Add `purge_media_cache` admin API (PR #902)
|
||||
- Add deactivate account admin API (PR #903)
|
||||
- Add optional pepper to password hashing (PR #907, #910 by KentShikama)
|
||||
- Add an admin option to shared secret registration (breaks backwards compat) (PR #909)
|
||||
- Add purge local room history API (PR #911, #923, #924)
|
||||
- Add requestToken endpoints (PR #915)
|
||||
- Add an /account/deactivate endpoint (PR #921)
|
||||
- Add filter param to /messages. Add \'contains\_url\' to filter. (PR #922)
|
||||
- Add device\_id support to /login (PR #929)
|
||||
- Add device\_id support to /v2/register flow. (PR #937, #942)
|
||||
- Add filter param to /messages. Add `contains_url` to filter. (PR #922)
|
||||
- Add `device_id` support to /login (PR #929)
|
||||
- Add `device_id` support to /v2/register flow. (PR #937, #942)
|
||||
- Add GET /devices endpoint (PR #939, #944)
|
||||
- Add GET /device/{deviceId} (PR #943)
|
||||
- Add update and delete APIs for devices (PR #949)
|
||||
|
@ -2566,14 +2567,14 @@ Features:
|
|||
Changes:
|
||||
|
||||
- Rewrite LDAP Authentication against ldap3 (PR #843 by mweinelt)
|
||||
- Linearize some federation endpoints based on (origin, room\_id) (PR #879)
|
||||
- Linearize some federation endpoints based on `(origin, room_id)` (PR #879)
|
||||
- Remove the legacy v0 content upload API. (PR #888)
|
||||
- Use similar naming we use in email notifs for push (PR #894)
|
||||
- Optionally include password hash in createUser endpoint (PR #905 by KentShikama)
|
||||
- Use a query that postgresql optimises better for get\_events\_around (PR #906)
|
||||
- Fall back to \'username\' if \'user\' is not given for appservice registration. (PR #927 by Half-Shot)
|
||||
- Use a query that postgresql optimises better for `get_events_around` (PR #906)
|
||||
- Fall back to '`username` if `user` is not given for appservice registration. (PR #927 by Half-Shot)
|
||||
- Add metrics for psutil derived memory usage (PR #936)
|
||||
- Record device\_id in client\_ips (PR #938)
|
||||
- Record `device_id` in `client_ips` (PR #938)
|
||||
- Send the correct host header when fetching keys (PR #941)
|
||||
- Log the hostname the reCAPTCHA was completed on (PR #946)
|
||||
- Make the device id on e2e key upload optional (PR #956)
|
||||
|
@ -2586,8 +2587,8 @@ Bug fixes:
|
|||
- Put most recent 20 messages in email notif (PR #892)
|
||||
- Ensure that the guest user is in the database when upgrading accounts (PR #914)
|
||||
- Fix various edge cases in auth handling (PR #919)
|
||||
- Fix 500 ISE when sending alias event without a state\_key (PR #925)
|
||||
- Fix bug where we stored rejections in the state\_group, persist all rejections (PR #948)
|
||||
- Fix 500 ISE when sending alias event without a `state_key` (PR #925)
|
||||
- Fix bug where we stored rejections in the `state_group`, persist all rejections (PR #948)
|
||||
- Fix lack of check of if the user is banned when handling 3pid invites (PR #952)
|
||||
- Fix a couple of bugs in the transaction and keyring code (PR #954, #955)
|
||||
|
||||
|
@ -2656,7 +2657,7 @@ Changes:
|
|||
|
||||
Bug fixes:
|
||||
|
||||
- Fix \'From\' header in email notifications (PR #843)
|
||||
- Fix `From` header in email notifications (PR #843)
|
||||
- Fix presence where timeouts were not being fired for the first 8h after restarts (PR #842)
|
||||
- Fix bug where synapse sent malformed transactions to AS's when retrying transactions (Commits 310197b, 8437906)
|
||||
|
||||
|
@ -2677,22 +2678,22 @@ Features:
|
|||
- Add a `url_preview_ip_range_whitelist` config param (PR #760)
|
||||
- Add /report endpoint (PR #762)
|
||||
- Add basic ignore user API (PR #763)
|
||||
- Add an openidish mechanism for proving that you own a given user\_id (PR #765)
|
||||
- Allow clients to specify a server\_name to avoid \'No known servers\' (PR #794)
|
||||
- Add secondary\_directory\_servers option to fetch room list from other servers (PR #808, #813)
|
||||
- Add an openidish mechanism for proving that you own a given `user_id` (PR #765)
|
||||
- Allow clients to specify a `server_name` to avoid "No known servers" (PR #794)
|
||||
- Add `secondary_directory_servers` option to fetch room list from other servers (PR #808, #813)
|
||||
|
||||
Changes:
|
||||
|
||||
- Report per request metrics for all of the things using request\_handler (PR #756)
|
||||
- Report per request metrics for all of the things using `request_handler` (PR #756)
|
||||
- Correctly handle `NULL` password hashes from the database (PR #775)
|
||||
- Allow receipts for events we haven't seen in the db (PR #784)
|
||||
- Make synctl read a cache factor from config file (PR #785)
|
||||
- Increment badge count per missed convo, not per msg (PR #793)
|
||||
- Special case m.room.third\_party\_invite event auth to match invites (PR #814)
|
||||
- Special case `m.room.third_party_invite` event auth to match invites (PR #814)
|
||||
|
||||
Bug fixes:
|
||||
|
||||
- Fix typo in event\_auth servlet path (PR #757)
|
||||
- Fix typo in `event_auth` servlet path (PR #757)
|
||||
- Fix password reset (PR #758)
|
||||
|
||||
Performance improvements:
|
||||
|
@ -2708,7 +2709,7 @@ Performance improvements:
|
|||
- Add `get_users_with_read_receipts_in_room` cache (PR #809)
|
||||
- Use state to calculate `get_users_in_room` (PR #811)
|
||||
- Load push rules in storage layer so that they get cached (PR #825)
|
||||
- Make `get_joined_hosts_for_room` use get\_users\_in\_room (PR #828)
|
||||
- Make `get_joined_hosts_for_room` use `get_users_in_room` (PR #828)
|
||||
- Poke notifier on next reactor tick (PR #829)
|
||||
- Change CacheMetrics to be quicker (PR #830)
|
||||
|
||||
|
@ -2772,19 +2773,19 @@ Changes in synapse v0.14.0-rc1 (2016-03-14)
|
|||
|
||||
Features:
|
||||
|
||||
- Add event\_id to response to state event PUT (PR #581)
|
||||
- Add `event_id` to response to state event PUT (PR #581)
|
||||
- Allow guest users access to messages in rooms they have joined (PR #587)
|
||||
- Add config for what state is included in a room invite (PR #598)
|
||||
- Send the inviter's member event in room invite state (PR #607)
|
||||
- Add error codes for malformed/bad JSON in /login (PR #608)
|
||||
- Add support for changing the actions for default rules (PR #609)
|
||||
- Add environment variable SYNAPSE\_CACHE\_FACTOR, default it to 0.1 (PR #612)
|
||||
- Add environment variable `SYNAPSE_CACHE_FACTOR`, default it to 0.1 (PR #612)
|
||||
- Add ability for alias creators to delete aliases (PR #614)
|
||||
- Add profile information to invites (PR #624)
|
||||
|
||||
Changes:
|
||||
|
||||
- Enforce user\_id exclusivity for AS registrations (PR #572)
|
||||
- Enforce `user_id` exclusivity for AS registrations (PR #572)
|
||||
- Make adding push rules idempotent (PR #587)
|
||||
- Improve presence performance (PR #582, #586)
|
||||
- Change presence semantics for `last_active_ago` (PR #582, #586)
|
||||
|
@ -2792,7 +2793,7 @@ Changes:
|
|||
- Add 800x600 to default list of valid thumbnail sizes (PR #616)
|
||||
- Always include kicks and bans in full /sync (PR #625)
|
||||
- Send history visibility on boundary changes (PR #626)
|
||||
- Register endpoint now returns a refresh\_token (PR #637)
|
||||
- Register endpoint now returns a `refresh_token` (PR #637)
|
||||
|
||||
Bug fixes:
|
||||
|
||||
|
@ -2963,7 +2964,7 @@ Changes in synapse v0.11.0-rc1 (2015-11-11)
|
|||
===========================================
|
||||
|
||||
- Add Search API (PR #307, #324, #327, #336, #350, #359)
|
||||
- Add \'archived\' state to v2 /sync API (PR #316)
|
||||
- Add `archived` state to v2 /sync API (PR #316)
|
||||
- Add ability to reject invites (PR #317)
|
||||
- Add config option to disable password login (PR #322)
|
||||
- Add the login fallback API (PR #330)
|
||||
|
@ -3028,7 +3029,7 @@ Changes in synapse v0.10.0-rc3 (2015-08-25)
|
|||
===========================================
|
||||
|
||||
- Add `--keys-directory` config option to specify where files such as certs and signing keys should be stored in, when using `--generate-config` or `--generate-keys`. (PR #250)
|
||||
- Allow `--config-path` to specify a directory, causing synapse to use all \*.yaml files in the directory as config files. (PR #249)
|
||||
- Allow `--config-path` to specify a directory, causing synapse to use all `*.yaml` files in the directory as config files. (PR #249)
|
||||
- Add `web_client_location` config option to specify static files to be hosted by synapse under `/_matrix/client`. (PR #245)
|
||||
- Add helper utility to synapse to read and parse the config files and extract the value of a given key. For example:
|
||||
|
||||
|
@ -3060,7 +3061,7 @@ General:
|
|||
- Batch various storage request (PR #226, #228)
|
||||
- Fix bug where we didn't correctly log the entity that triggered the request if the request came in via an application service (PR #230)
|
||||
- Fix bug where we needlessly regenerated the full list of rooms an AS is interested in. (PR #232)
|
||||
- Add support for AS's to use v2\_alpha registration API (PR #210)
|
||||
- Add support for AS's to use `v2_alpha` registration API (PR #210)
|
||||
|
||||
Configuration:
|
||||
|
||||
|
@ -3207,7 +3208,7 @@ Configuration:
|
|||
Application services:
|
||||
|
||||
- Reliably retry sending of events from Synapse to application services, as per [Application Services](https://github.com/matrix-org/matrix-doc/blob/0c6bd9/specification/25_application_service_api.rst#home-server---application-service-api) spec.
|
||||
- Application services can no longer register via the `/register` API, instead their configuration should be saved to a file and listed in the synapse `app_service_config_files` config option. The AS configuration file has the same format as the old `/register` request. See [docs/application\_services.rst](docs/application_services.rst) for more information.
|
||||
- Application services can no longer register via the `/register` API, instead their configuration should be saved to a file and listed in the synapse `app_service_config_files` config option. The AS configuration file has the same format as the old `/register` request. See [docs/application_services.rst](docs/application_services.rst) for more information.
|
||||
|
||||
Changes in synapse v0.8.1 (2015-03-18)
|
||||
======================================
|
||||
|
@ -3289,7 +3290,7 @@ Changes in synapse 0.6.0 (2014-12-16)
|
|||
=====================================
|
||||
|
||||
- Add new API for media upload and download that supports thumbnailing.
|
||||
- Replicate media uploads over multiple homeservers so media is always served to clients from their local homeserver. This obsoletes the \--content-addr parameter and confusion over accessing content directly from remote homeservers.
|
||||
- Replicate media uploads over multiple homeservers so media is always served to clients from their local homeserver. This obsoletes the `--content-addr` parameter and confusion over accessing content directly from remote homeservers.
|
||||
- Implement exponential backoff when retrying federation requests when sending to remote homeservers which are offline.
|
||||
- Implement typing notifications.
|
||||
- Fix bugs where we sent events with invalid signatures due to bugs where we incorrectly persisted events.
|
||||
|
@ -3304,13 +3305,13 @@ Changes in synapse 0.5.4 (2014-12-03)
|
|||
=====================================
|
||||
|
||||
- Fix presence bug where some rooms did not display presence updates for remote users.
|
||||
- Do not log SQL timing log lines when started with \"-v\"
|
||||
- Do not log SQL timing log lines when started with `-v`
|
||||
- Fix potential memory leak.
|
||||
|
||||
Changes in synapse 0.5.3c (2014-12-02)
|
||||
======================================
|
||||
|
||||
- Change the default value for the content\_addr option to use the HTTP listener, as by default the HTTPS listener will be using a self-signed certificate.
|
||||
- Change the default value for the `content_addr` option to use the HTTP listener, as by default the HTTPS listener will be using a self-signed certificate.
|
||||
|
||||
Changes in synapse 0.5.3 (2014-11-27)
|
||||
=====================================
|
||||
|
@ -3391,7 +3392,7 @@ You will also need an updated syutil and config. See UPGRADES.rst.
|
|||
Homeserver:
|
||||
|
||||
- Sign federation transactions to assert strong identity over federation.
|
||||
- Rename timestamp keys in PDUs and events from \'ts\' and \'hsob\_ts\' to \'origin\_server\_ts\'.
|
||||
- Rename timestamp keys in PDUs and events from `ts` and `hsob_ts` to `origin_server_ts`.
|
||||
|
||||
Changes in synapse 0.3.4 (2014-09-25)
|
||||
=====================================
|
||||
|
@ -3461,9 +3462,9 @@ See UPGRADE for information about changes to the client server API, including br
|
|||
Homeserver:
|
||||
|
||||
- When a user changes their displayname or avatar the server will now update all their join states to reflect this.
|
||||
- The server now adds \"age\" key to events to indicate how old they are. This is clock independent, so at no point does any server or webclient have to assume their clock is in sync with everyone else.
|
||||
- The server now adds `age` key to events to indicate how old they are. This is clock independent, so at no point does any server or webclient have to assume their clock is in sync with everyone else.
|
||||
- Fix bug where we didn't correctly pull in missing PDUs.
|
||||
- Fix bug where prev\_content key wasn't always returned.
|
||||
- Fix bug where `prev_content` key wasn't always returned.
|
||||
- Add support for password resets.
|
||||
|
||||
Webclient:
|
||||
|
@ -3481,9 +3482,9 @@ Webclient:
|
|||
|
||||
Registration API:
|
||||
|
||||
- The registration API has been overhauled to function like the login API. In practice, this means registration requests must now include the following: \'type\':\'m.login.password\'. See UPGRADE for more information on this.
|
||||
- The \'user\_id\' key has been renamed to \'user\' to better match the login API.
|
||||
- There is an additional login type: \'m.login.email.identity\'.
|
||||
- The registration API has been overhauled to function like the login API. In practice, this means registration requests must now include the following: `type`:`m.login.password`. See UPGRADE for more information on this.
|
||||
- The `user_id` key has been renamed to `user` to better match the login API.
|
||||
- There is an additional login type: `m.login.email.identity`.
|
||||
- The command client and web client have been updated to reflect these changes.
|
||||
|
||||
Changes in synapse 0.2.3 (2014-09-12)
|
||||
|
@ -3516,7 +3517,7 @@ Homeserver:
|
|||
- When the server returns state events it now also includes the previous content.
|
||||
- Add support for inviting people when creating a new room.
|
||||
- Make the homeserver inform the room via m.room.aliases when a new alias is added for a room.
|
||||
- Validate m.room.power\_level events.
|
||||
- Validate `m.room.power_level` events.
|
||||
|
||||
Webclient:
|
||||
|
||||
|
@ -3559,7 +3560,7 @@ Homeserver:
|
|||
- Add support for kicking/banning and power levels.
|
||||
- Allow setting of room names and topics on creation.
|
||||
- Change presence to include last seen time of the user.
|
||||
- Change url path prefix to /\_matrix/\...
|
||||
- Change url path prefix to `/_matrix/...`
|
||||
- Bug fixes to presence.
|
||||
|
||||
Webclient:
|
||||
|
|
|
@ -66,7 +66,7 @@ Of their installation methods, we recommend
|
|||
|
||||
```shell
|
||||
pip install --user pipx
|
||||
pipx install poetry
|
||||
pipx install poetry==1.5.2 # Problems with Poetry 1.6, see https://github.com/matrix-org/synapse/issues/16147
|
||||
```
|
||||
|
||||
but see poetry's [installation instructions](https://python-poetry.org/docs/#installation)
|
||||
|
|
|
@ -150,6 +150,67 @@ def run_upgrade(
|
|||
...
|
||||
```
|
||||
|
||||
## Background updates
|
||||
|
||||
It is sometimes appropriate to perform database migrations as part of a background
|
||||
process (instead of blocking Synapse until the migration is done). In particular,
|
||||
this is useful for migrating data when adding new columns or tables.
|
||||
|
||||
Pending background updates stored in the `background_updates` table and are denoted
|
||||
by a unique name, the current status (stored in JSON), and some dependency information:
|
||||
|
||||
* Whether the update requires a previous update to be complete.
|
||||
* A rough ordering for which to complete updates.
|
||||
|
||||
A new background updates needs to be added to the `background_updates` table:
|
||||
|
||||
```sql
|
||||
INSERT INTO background_updates (ordering, update_name, depends_on, progress_json) VALUES
|
||||
(7706, 'my_background_update', 'a_previous_background_update' '{}');
|
||||
```
|
||||
|
||||
And then needs an associated handler in the appropriate datastore:
|
||||
|
||||
```python
|
||||
self.db_pool.updates.register_background_update_handler(
|
||||
"my_background_update",
|
||||
update_handler=self._my_background_update,
|
||||
)
|
||||
```
|
||||
|
||||
There are a few types of updates that can be performed, see the `BackgroundUpdater`:
|
||||
|
||||
* `register_background_update_handler`: A generic handler for custom SQL
|
||||
* `register_background_index_update`: Create an index in the background
|
||||
* `register_background_validate_constraint`: Validate a constraint in the background
|
||||
(PostgreSQL-only)
|
||||
* `register_background_validate_constraint_and_delete_rows`: Similar to
|
||||
`register_background_validate_constraint`, but deletes rows which don't fit
|
||||
the constraint.
|
||||
|
||||
For `register_background_update_handler`, the generic handler must track progress
|
||||
and then finalize the background update:
|
||||
|
||||
```python
|
||||
async def _my_background_update(self, progress: JsonDict, batch_size: int) -> int:
|
||||
def _do_something(txn: LoggingTransaction) -> int:
|
||||
...
|
||||
self.db_pool.updates._background_update_progress_txn(
|
||||
txn, "my_background_update", {"last_processed": last_processed}
|
||||
)
|
||||
return last_processed - prev_last_processed
|
||||
|
||||
num_processed = await self.db_pool.runInteraction("_do_something", _do_something)
|
||||
await self.db_pool.updates._end_background_update("my_background_update")
|
||||
|
||||
return num_processed
|
||||
```
|
||||
|
||||
Synapse will attempt to rate-limit how often background updates are run via the
|
||||
given batch-size and the returned number of processed entries (and how long the
|
||||
function took to run). See
|
||||
[background update controller callbacks](../modules/background_update_controller_callbacks.md).
|
||||
|
||||
## Boolean columns
|
||||
|
||||
Boolean columns require special treatment, since SQLite treats booleans the
|
||||
|
|
|
@ -51,17 +51,24 @@ will be inserted with that ID.
|
|||
|
||||
For any given stream reader (including writers themselves), we may define a per-writer current stream ID:
|
||||
|
||||
> The current stream ID _for a writer W_ is the largest stream ID such that
|
||||
> A current stream ID _for a writer W_ is the largest stream ID such that
|
||||
> all transactions added by W with equal or smaller ID have completed.
|
||||
|
||||
Similarly, there is a "linear" notion of current stream ID:
|
||||
|
||||
> The "linear" current stream ID is the largest stream ID such that
|
||||
> A "linear" current stream ID is the largest stream ID such that
|
||||
> all facts (added by any writer) with equal or smaller ID have completed.
|
||||
|
||||
Because different stream readers A and B learn about new facts at different times, A and B may disagree about current stream IDs.
|
||||
Put differently: we should think of stream readers as being independent of each other, proceeding through a stream of facts at different rates.
|
||||
|
||||
The above definition does not give a unique current stream ID, in fact there can
|
||||
be a range of current stream IDs. Synapse uses both the minimum and maximum IDs
|
||||
for different purposes. Most often the maximum is used, as its generally
|
||||
beneficial for workers to advance their IDs as soon as possible. However, the
|
||||
minimum is used in situations where e.g. another worker is going to wait until
|
||||
the stream advances past a position.
|
||||
|
||||
**NB.** For both senses of "current", that if a writer opens a transaction that never completes, the current stream ID will never advance beyond that writer's last written stream ID.
|
||||
|
||||
For single-writer streams, the per-writer current ID and the linear current ID are the same.
|
||||
|
@ -114,7 +121,7 @@ Writers need to track:
|
|||
- track their current position (i.e. its own per-writer stream ID).
|
||||
- their facts currently awaiting completion.
|
||||
|
||||
At startup,
|
||||
At startup,
|
||||
- the current position of that writer can be found by querying the database (which suggests that facts need to be written to the database atomically, in a transaction); and
|
||||
- there are no facts awaiting completion.
|
||||
|
||||
|
|
|
@ -0,0 +1,32 @@
|
|||
# Add extra fields to client events unsigned section callbacks
|
||||
|
||||
_First introduced in Synapse v1.96.0_
|
||||
|
||||
This callback allows modules to add extra fields to the unsigned section of
|
||||
events when they get sent down to clients.
|
||||
|
||||
These get called *every* time an event is to be sent to clients, so care should
|
||||
be taken to ensure with respect to performance.
|
||||
|
||||
### API
|
||||
|
||||
To register the callback, use
|
||||
`register_add_extra_fields_to_unsigned_client_event_callbacks` on the
|
||||
`ModuleApi`.
|
||||
|
||||
The callback should be of the form
|
||||
|
||||
```python
|
||||
async def add_field_to_unsigned(
|
||||
event: EventBase,
|
||||
) -> JsonDict:
|
||||
```
|
||||
|
||||
where the extra fields to add to the event's unsigned section is returned.
|
||||
(Modules must not attempt to modify the `event` directly).
|
||||
|
||||
This cannot be used to alter the "core" fields in the unsigned section emitted
|
||||
by Synapse itself.
|
||||
|
||||
If multiple such callbacks try to add the same field to an event's unsigned
|
||||
section, the last-registered callback wins.
|
|
@ -1,8 +1,16 @@
|
|||
# Presence router callbacks
|
||||
|
||||
Presence router callbacks allow module developers to specify additional users (local or remote)
|
||||
to receive certain presence updates from local users. Presence router callbacks can be
|
||||
registered using the module API's `register_presence_router_callbacks` method.
|
||||
Presence router callbacks allow module developers to define additional users
|
||||
which receive presence updates from local users. The additional users
|
||||
can be local or remote.
|
||||
|
||||
For example, it could be used to direct all of `@alice:example.com` (a local user)'s
|
||||
presence updates to `@bob:matrix.org` (a remote user), even though they don't share a
|
||||
room. (Note that those presence updates might not make it to `@bob:matrix.org`'s client
|
||||
unless a similar presence router is running on that homeserver.)
|
||||
|
||||
Presence router callbacks can be registered using the module API's
|
||||
`register_presence_router_callbacks` method.
|
||||
|
||||
## Callbacks
|
||||
|
||||
|
|
|
@ -51,6 +51,11 @@ docker run -d --name jaeger \
|
|||
jaegertracing/all-in-one:1
|
||||
```
|
||||
|
||||
By default, Synapse will publish traces to Jaeger on localhost.
|
||||
If Jaeger is hosted elsewhere, point Synapse to the correct host by setting
|
||||
`opentracing.jaeger_config.local_agent.reporting_host` [in the Synapse configuration](usage/configuration/config_documentation.md#opentracing-1)
|
||||
or by setting the `JAEGER_AGENT_HOST` environment variable to the desired address.
|
||||
|
||||
Latest documentation is probably at
|
||||
https://www.jaegertracing.io/docs/latest/getting-started.
|
||||
|
||||
|
|
|
@ -193,7 +193,7 @@ SELECT rss.room_id, rss.name, rss.canonical_alias, rss.topic, rss.encryption,
|
|||
rsc.joined_members, rsc.local_users_in_room, rss.join_rules
|
||||
FROM room_stats_state rss
|
||||
LEFT JOIN room_stats_current rsc USING (room_id)
|
||||
WHERE room_id IN ( WHERE room_id IN (
|
||||
WHERE room_id IN (
|
||||
'!OGEhHVWSdvArJzumhm:matrix.org',
|
||||
'!YTvKGNlinIzlkMTVRl:matrix.org'
|
||||
);
|
||||
|
|
|
@ -230,6 +230,13 @@ Example configuration:
|
|||
presence:
|
||||
enabled: false
|
||||
```
|
||||
|
||||
`enabled` can also be set to a special value of "untracked" which ignores updates
|
||||
received via clients and federation, while still accepting updates from the
|
||||
[module API](../../modules/index.md).
|
||||
|
||||
*The "untracked" option was added in Synapse 1.96.0.*
|
||||
|
||||
---
|
||||
### `require_auth_for_profile_requests`
|
||||
|
||||
|
@ -1190,6 +1197,11 @@ inbound federation traffic as early as possible, rather than relying
|
|||
purely on this application-layer restriction. If not specified, the
|
||||
default is to whitelist everything.
|
||||
|
||||
Note: this does not stop a server from joining rooms that servers not on the
|
||||
whitelist are in. As such, this option is really only useful to establish a
|
||||
"private federation", where a group of servers all whitelist each other and have
|
||||
the same whitelist.
|
||||
|
||||
Example configuration:
|
||||
```yaml
|
||||
federation_domain_whitelist:
|
||||
|
@ -3792,62 +3804,160 @@ enable_room_list_search: false
|
|||
---
|
||||
### `alias_creation_rules`
|
||||
|
||||
The `alias_creation_rules` option controls who is allowed to create aliases
|
||||
on this server.
|
||||
The `alias_creation_rules` option allows server admins to prevent unwanted
|
||||
alias creation on this server.
|
||||
|
||||
The format of this option is a list of rules that contain globs that
|
||||
match against user_id, room_id and the new alias (fully qualified with
|
||||
server name). The action in the first rule that matches is taken,
|
||||
which can currently either be "allow" or "deny".
|
||||
This setting is an optional list of 0 or more rules. By default, no list is
|
||||
provided, meaning that all alias creations are permitted.
|
||||
|
||||
Missing user_id/room_id/alias fields default to "*".
|
||||
Otherwise, requests to create aliases are matched against each rule in order.
|
||||
The first rule that matches decides if the request is allowed or denied. If no
|
||||
rule matches, the request is denied. In particular, this means that configuring
|
||||
an empty list of rules will deny every alias creation request.
|
||||
|
||||
If no rules match the request is denied. An empty list means no one
|
||||
can create aliases.
|
||||
Each rule is a YAML object containing four fields, each of which is an optional string:
|
||||
|
||||
Options for the rules include:
|
||||
* `user_id`: Matches against the creator of the alias. Defaults to "*".
|
||||
* `alias`: Matches against the alias being created. Defaults to "*".
|
||||
* `room_id`: Matches against the room ID the alias is being pointed at. Defaults to "*"
|
||||
* `action`: Whether to "allow" or "deny" the request if the rule matches. Defaults to allow.
|
||||
* `user_id`: a glob pattern that matches against the creator of the alias.
|
||||
* `alias`: a glob pattern that matches against the alias being created.
|
||||
* `room_id`: a glob pattern that matches against the room ID the alias is being pointed at.
|
||||
* `action`: either `allow` or `deny`. What to do with the request if the rule matches. Defaults to `allow`.
|
||||
|
||||
Each of the glob patterns is optional, defaulting to `*` ("match anything").
|
||||
Note that the patterns match against fully qualified IDs, e.g. against
|
||||
`@alice:example.com`, `#room:example.com` and `!abcdefghijk:example.com` instead
|
||||
of `alice`, `room` and `abcedgghijk`.
|
||||
|
||||
Example configuration:
|
||||
|
||||
```yaml
|
||||
# No rule list specified. All alias creations are allowed.
|
||||
# This is the default behaviour.
|
||||
alias_creation_rules:
|
||||
- user_id: "bad_user"
|
||||
alias: "spammy_alias"
|
||||
room_id: "*"
|
||||
action: deny
|
||||
```
|
||||
|
||||
```yaml
|
||||
# A list of one rule which allows everything.
|
||||
# This has the same effect as the previous example.
|
||||
alias_creation_rules:
|
||||
- "action": "allow"
|
||||
```
|
||||
|
||||
```yaml
|
||||
# An empty list of rules. All alias creations are denied.
|
||||
alias_creation_rules: []
|
||||
```
|
||||
|
||||
```yaml
|
||||
# A list of one rule which denies everything.
|
||||
# This has the same effect as the previous example.
|
||||
alias_creation_rules:
|
||||
- "action": "deny"
|
||||
```
|
||||
|
||||
```yaml
|
||||
# Prevent a specific user from creating aliases.
|
||||
# Allow other users to create any alias
|
||||
alias_creation_rules:
|
||||
- user_id: "@bad_user:example.com"
|
||||
action: deny
|
||||
|
||||
- action: allow
|
||||
```
|
||||
|
||||
```yaml
|
||||
# Prevent aliases being created which point to a specific room.
|
||||
alias_creation_rules:
|
||||
- room_id: "!forbiddenRoom:example.com"
|
||||
action: deny
|
||||
|
||||
- action: allow
|
||||
```
|
||||
|
||||
---
|
||||
### `room_list_publication_rules`
|
||||
|
||||
The `room_list_publication_rules` option controls who can publish and
|
||||
which rooms can be published in the public room list.
|
||||
The `room_list_publication_rules` option allows server admins to prevent
|
||||
unwanted entries from being published in the public room list.
|
||||
|
||||
The format of this option is the same as that for
|
||||
`alias_creation_rules`.
|
||||
[`alias_creation_rules`](#alias_creation_rules): an optional list of 0 or more
|
||||
rules. By default, no list is provided, meaning that all rooms may be
|
||||
published to the room list.
|
||||
|
||||
If the room has one or more aliases associated with it, only one of
|
||||
the aliases needs to match the alias rule. If there are no aliases
|
||||
then only rules with `alias: *` match.
|
||||
Otherwise, requests to publish a room are matched against each rule in order.
|
||||
The first rule that matches decides if the request is allowed or denied. If no
|
||||
rule matches, the request is denied. In particular, this means that configuring
|
||||
an empty list of rules will deny every alias creation request.
|
||||
|
||||
If no rules match the request is denied. An empty list means no one
|
||||
can publish rooms.
|
||||
Each rule is a YAML object containing four fields, each of which is an optional string:
|
||||
|
||||
* `user_id`: a glob pattern that matches against the user publishing the room.
|
||||
* `alias`: a glob pattern that matches against one of published room's aliases.
|
||||
- If the room has no aliases, the alias match fails unless `alias` is unspecified or `*`.
|
||||
- If the room has exactly one alias, the alias match succeeds if the `alias` pattern matches that alias.
|
||||
- If the room has two or more aliases, the alias match succeeds if the pattern matches at least one of the aliases.
|
||||
* `room_id`: a glob pattern that matches against the room ID of the room being published.
|
||||
* `action`: either `allow` or `deny`. What to do with the request if the rule matches. Defaults to `allow`.
|
||||
|
||||
Each of the glob patterns is optional, defaulting to `*` ("match anything").
|
||||
Note that the patterns match against fully qualified IDs, e.g. against
|
||||
`@alice:example.com`, `#room:example.com` and `!abcdefghijk:example.com` instead
|
||||
of `alice`, `room` and `abcedgghijk`.
|
||||
|
||||
Options for the rules include:
|
||||
* `user_id`: Matches against the creator of the alias. Defaults to "*".
|
||||
* `alias`: Matches against any current local or canonical aliases associated with the room. Defaults to "*".
|
||||
* `room_id`: Matches against the room ID being published. Defaults to "*".
|
||||
* `action`: Whether to "allow" or "deny" the request if the rule matches. Defaults to allow.
|
||||
|
||||
Example configuration:
|
||||
|
||||
```yaml
|
||||
# No rule list specified. Anyone may publish any room to the public list.
|
||||
# This is the default behaviour.
|
||||
room_list_publication_rules:
|
||||
- user_id: "*"
|
||||
alias: "*"
|
||||
room_id: "*"
|
||||
action: allow
|
||||
```
|
||||
|
||||
```yaml
|
||||
# A list of one rule which allows everything.
|
||||
# This has the same effect as the previous example.
|
||||
room_list_publication_rules:
|
||||
- "action": "allow"
|
||||
```
|
||||
|
||||
```yaml
|
||||
# An empty list of rules. No-one may publish to the room list.
|
||||
room_list_publication_rules: []
|
||||
```
|
||||
|
||||
```yaml
|
||||
# A list of one rule which denies everything.
|
||||
# This has the same effect as the previous example.
|
||||
room_list_publication_rules:
|
||||
- "action": "deny"
|
||||
```
|
||||
|
||||
```yaml
|
||||
# Prevent a specific user from publishing rooms.
|
||||
# Allow other users to publish anything.
|
||||
room_list_publication_rules:
|
||||
- user_id: "@bad_user:example.com"
|
||||
action: deny
|
||||
|
||||
- action: allow
|
||||
```
|
||||
|
||||
```yaml
|
||||
# Prevent publication of a specific room.
|
||||
room_list_publication_rules:
|
||||
- room_id: "!forbiddenRoom:example.com"
|
||||
action: deny
|
||||
|
||||
- action: allow
|
||||
```
|
||||
|
||||
```yaml
|
||||
# Prevent publication of rooms with at least one alias containing the word "potato".
|
||||
room_list_publication_rules:
|
||||
- alias: "#*potato*:example.com"
|
||||
action: deny
|
||||
|
||||
- action: allow
|
||||
```
|
||||
|
||||
---
|
||||
|
|
|
@ -83,7 +83,7 @@ The search term is then split into words:
|
|||
available, then the system's [default locale](https://unicode-org.github.io/icu/userguide/locale/#default-locales)
|
||||
will be used to break the search term into words. (See the
|
||||
[installation instructions](setup/installation.md) for how to install ICU.)
|
||||
* If unavailable, then runs of ASCII characters, numbers, underscores, and hypens
|
||||
* If unavailable, then runs of ASCII characters, numbers, underscores, and hyphens
|
||||
are considered words.
|
||||
|
||||
The queries for PostgreSQL and SQLite are detailed below, by their overall goal
|
||||
|
|
8
mypy.ini
8
mypy.ini
|
@ -32,12 +32,13 @@ files =
|
|||
docker/,
|
||||
scripts-dev/,
|
||||
synapse/,
|
||||
synmark/,
|
||||
tests/,
|
||||
build_rust.py
|
||||
|
||||
[mypy-synapse.metrics._reactor_metrics]
|
||||
# This module imports select.epoll. That exists on Linux, but doesn't on macOS.
|
||||
# See https://github.com/matrix-org/synapse/pull/11771.
|
||||
# This module pokes at the internals of OS-specific classes, to appease mypy
|
||||
# on different systems we add additional ignores.
|
||||
warn_unused_ignores = False
|
||||
|
||||
[mypy-synapse.util.caches.treecache]
|
||||
|
@ -80,6 +81,9 @@ ignore_missing_imports = True
|
|||
[mypy-pympler.*]
|
||||
ignore_missing_imports = True
|
||||
|
||||
[mypy-pyperf.*]
|
||||
ignore_missing_imports = True
|
||||
|
||||
[mypy-rust_python_jaeger_reporter.*]
|
||||
ignore_missing_imports = True
|
||||
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -96,7 +96,7 @@ module-name = "synapse.synapse_rust"
|
|||
|
||||
[tool.poetry]
|
||||
name = "matrix-synapse"
|
||||
version = "1.93.0"
|
||||
version = "1.96.0rc1"
|
||||
description = "Homeserver for the Matrix decentralised comms protocol"
|
||||
authors = ["Matrix.org Team and Contributors <packages@matrix.org>"]
|
||||
license = "Apache-2.0"
|
||||
|
@ -322,7 +322,7 @@ all = [
|
|||
# This helps prevents merge conflicts when running a batch of dependabot updates.
|
||||
isort = ">=5.10.1"
|
||||
black = ">=22.7.0"
|
||||
ruff = "0.0.290"
|
||||
ruff = "0.1.4"
|
||||
# Type checking only works with the pydantic.v1 compat module from pydantic v2
|
||||
pydantic = "^2"
|
||||
|
||||
|
@ -382,7 +382,7 @@ furo = ">=2022.12.7,<2024.0.0"
|
|||
# system changes.
|
||||
# We are happy to raise these upper bounds upon request,
|
||||
# provided we check that it's safe to do so (i.e. that CI passes).
|
||||
requires = ["poetry-core>=1.1.0,<=1.7.0", "setuptools_rust>=1.3,<=1.7.0"]
|
||||
requires = ["poetry-core>=1.1.0,<=1.7.0", "setuptools_rust>=1.3,<=1.8.1"]
|
||||
build-backend = "poetry.core.masonry.api"
|
||||
|
||||
|
||||
|
|
|
@ -25,14 +25,14 @@ name = "synapse.synapse_rust"
|
|||
anyhow = "1.0.63"
|
||||
lazy_static = "1.4.0"
|
||||
log = "0.4.17"
|
||||
pyo3 = { version = "0.17.1", features = [
|
||||
pyo3 = { version = "0.19.2", features = [
|
||||
"macros",
|
||||
"anyhow",
|
||||
"abi3",
|
||||
"abi3-py37",
|
||||
"abi3-py38",
|
||||
] }
|
||||
pyo3-log = "0.8.1"
|
||||
pythonize = "0.17.0"
|
||||
pythonize = "0.19.0"
|
||||
regex = "1.6.0"
|
||||
serde = { version = "1.0.144", features = ["derive"] }
|
||||
serde_json = "1.0.85"
|
||||
|
|
|
@ -105,6 +105,17 @@ impl PushRuleEvaluator {
|
|||
/// Create a new `PushRuleEvaluator`. See struct docstring for details.
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
#[new]
|
||||
#[pyo3(signature = (
|
||||
flattened_keys,
|
||||
has_mentions,
|
||||
room_member_count,
|
||||
sender_power_level,
|
||||
notification_power_levels,
|
||||
related_events_flattened,
|
||||
related_event_match_enabled,
|
||||
room_version_feature_flags,
|
||||
msc3931_enabled,
|
||||
))]
|
||||
pub fn py_new(
|
||||
flattened_keys: BTreeMap<String, JsonValue>,
|
||||
has_mentions: bool,
|
||||
|
|
|
@ -33,6 +33,7 @@ DISTS = (
|
|||
"ubuntu:focal", # 20.04 LTS (EOL 2025-04) (our EOL forced by Python 3.8 is 2024-10-14)
|
||||
"ubuntu:jammy", # 22.04 LTS (EOL 2027-04) (our EOL forced by Python 3.10 is 2026-10-04)
|
||||
"ubuntu:lunar", # 23.04 (EOL 2024-01) (our EOL forced by Python 3.11 is 2027-10-24)
|
||||
"ubuntu:mantic", # 23.10 (EOL 2024-07) (our EOL forced by Python 3.11 is 2027-10-24)
|
||||
"debian:trixie", # (EOL not specified yet)
|
||||
)
|
||||
|
||||
|
|
|
@ -214,7 +214,11 @@ fi
|
|||
|
||||
extra_test_args=()
|
||||
|
||||
test_tags="synapse_blacklist,msc3874,msc3890,msc3391,msc3930,faster_joins"
|
||||
test_packages="./tests/csapi ./tests ./tests/msc3874 ./tests/msc3890 ./tests/msc3391 ./tests/msc3930 ./tests/msc3902"
|
||||
|
||||
# Enable dirty runs, so tests will reuse the same container where possible.
|
||||
# This significantly speeds up tests, but increases the possibility of test pollution.
|
||||
export COMPLEMENT_ENABLE_DIRTY_RUNS=1
|
||||
|
||||
# All environment variables starting with PASS_ will be shared.
|
||||
# (The prefix is stripped off before reaching the container.)
|
||||
|
@ -274,7 +278,7 @@ fi
|
|||
export PASS_SYNAPSE_LOG_TESTING=1
|
||||
|
||||
# Run the tests!
|
||||
echo "Images built; running complement"
|
||||
echo "Images built; running complement with ${extra_test_args[@]} $@ $test_packages"
|
||||
cd "$COMPLEMENT_DIR"
|
||||
|
||||
go test -v -tags $test_tags -count=1 "${extra_test_args[@]}" "$@" ./tests/...
|
||||
go test -v -tags "synapse_blacklist" -count=1 "${extra_test_args[@]}" "$@" $test_packages
|
||||
|
|
|
@ -16,13 +16,24 @@
|
|||
can crop up, e.g the cache descriptors.
|
||||
"""
|
||||
|
||||
from typing import Callable, Optional, Type
|
||||
from typing import Callable, Optional, Tuple, Type, Union
|
||||
|
||||
import mypy.types
|
||||
from mypy.erasetype import remove_instance_last_known_values
|
||||
from mypy.nodes import ARG_NAMED_OPT
|
||||
from mypy.plugin import MethodSigContext, Plugin
|
||||
from mypy.errorcodes import ErrorCode
|
||||
from mypy.nodes import ARG_NAMED_OPT, TempNode, Var
|
||||
from mypy.plugin import FunctionSigContext, MethodSigContext, Plugin
|
||||
from mypy.typeops import bind_self
|
||||
from mypy.types import CallableType, Instance, NoneType, UnionType
|
||||
from mypy.types import (
|
||||
AnyType,
|
||||
CallableType,
|
||||
Instance,
|
||||
NoneType,
|
||||
TupleType,
|
||||
TypeAliasType,
|
||||
UninhabitedType,
|
||||
UnionType,
|
||||
)
|
||||
|
||||
|
||||
class SynapsePlugin(Plugin):
|
||||
|
@ -36,9 +47,37 @@ class SynapsePlugin(Plugin):
|
|||
)
|
||||
):
|
||||
return cached_function_method_signature
|
||||
|
||||
if fullname in (
|
||||
"synapse.util.caches.descriptors._CachedFunctionDescriptor.__call__",
|
||||
"synapse.util.caches.descriptors._CachedListFunctionDescriptor.__call__",
|
||||
):
|
||||
return check_is_cacheable_wrapper
|
||||
|
||||
return None
|
||||
|
||||
|
||||
def _get_true_return_type(signature: CallableType) -> mypy.types.Type:
|
||||
"""
|
||||
Get the "final" return type of a callable which might return an Awaitable/Deferred.
|
||||
"""
|
||||
if isinstance(signature.ret_type, Instance):
|
||||
# If a coroutine, unwrap the coroutine's return type.
|
||||
if signature.ret_type.type.fullname == "typing.Coroutine":
|
||||
return signature.ret_type.args[2]
|
||||
|
||||
# If an awaitable, unwrap the awaitable's final value.
|
||||
elif signature.ret_type.type.fullname == "typing.Awaitable":
|
||||
return signature.ret_type.args[0]
|
||||
|
||||
# If a Deferred, unwrap the Deferred's final value.
|
||||
elif signature.ret_type.type.fullname == "twisted.internet.defer.Deferred":
|
||||
return signature.ret_type.args[0]
|
||||
|
||||
# Otherwise, return the raw value of the function.
|
||||
return signature.ret_type
|
||||
|
||||
|
||||
def cached_function_method_signature(ctx: MethodSigContext) -> CallableType:
|
||||
"""Fixes the `CachedFunction.__call__` signature to be correct.
|
||||
|
||||
|
@ -47,16 +86,17 @@ def cached_function_method_signature(ctx: MethodSigContext) -> CallableType:
|
|||
1. the `self` argument needs to be marked as "bound";
|
||||
2. any `cache_context` argument should be removed;
|
||||
3. an optional keyword argument `on_invalidated` should be added.
|
||||
4. Wrap the return type to always be a Deferred.
|
||||
"""
|
||||
|
||||
# First we mark this as a bound function signature.
|
||||
signature = bind_self(ctx.default_signature)
|
||||
# 1. Mark this as a bound function signature.
|
||||
signature: CallableType = bind_self(ctx.default_signature)
|
||||
|
||||
# Secondly, we remove any "cache_context" args.
|
||||
# 2. Remove any "cache_context" args.
|
||||
#
|
||||
# Note: We should be only doing this if `cache_context=True` is set, but if
|
||||
# it isn't then the code will raise an exception when its called anyway, so
|
||||
# its not the end of the world.
|
||||
# it's not the end of the world.
|
||||
context_arg_index = None
|
||||
for idx, name in enumerate(signature.arg_names):
|
||||
if name == "cache_context":
|
||||
|
@ -72,7 +112,7 @@ def cached_function_method_signature(ctx: MethodSigContext) -> CallableType:
|
|||
arg_names.pop(context_arg_index)
|
||||
arg_kinds.pop(context_arg_index)
|
||||
|
||||
# Third, we add an optional "on_invalidate" argument.
|
||||
# 3. Add an optional "on_invalidate" argument.
|
||||
#
|
||||
# This is a either
|
||||
# - a callable which accepts no input and returns nothing, or
|
||||
|
@ -94,35 +134,16 @@ def cached_function_method_signature(ctx: MethodSigContext) -> CallableType:
|
|||
arg_names.append("on_invalidate")
|
||||
arg_kinds.append(ARG_NAMED_OPT) # Arg is an optional kwarg.
|
||||
|
||||
# Finally we ensure the return type is a Deferred.
|
||||
if (
|
||||
isinstance(signature.ret_type, Instance)
|
||||
and signature.ret_type.type.fullname == "twisted.internet.defer.Deferred"
|
||||
):
|
||||
# If it is already a Deferred, nothing to do.
|
||||
ret_type = signature.ret_type
|
||||
else:
|
||||
ret_arg = None
|
||||
if isinstance(signature.ret_type, Instance):
|
||||
# If a coroutine, wrap the coroutine's return type in a Deferred.
|
||||
if signature.ret_type.type.fullname == "typing.Coroutine":
|
||||
ret_arg = signature.ret_type.args[2]
|
||||
# 4. Ensure the return type is a Deferred.
|
||||
ret_arg = _get_true_return_type(signature)
|
||||
|
||||
# If an awaitable, wrap the awaitable's final value in a Deferred.
|
||||
elif signature.ret_type.type.fullname == "typing.Awaitable":
|
||||
ret_arg = signature.ret_type.args[0]
|
||||
|
||||
# Otherwise, wrap the return value in a Deferred.
|
||||
if ret_arg is None:
|
||||
ret_arg = signature.ret_type
|
||||
|
||||
# This should be able to use ctx.api.named_generic_type, but that doesn't seem
|
||||
# to find the correct symbol for anything more than 1 module deep.
|
||||
#
|
||||
# modules is not part of CheckerPluginInterface. The following is a combination
|
||||
# of TypeChecker.named_generic_type and TypeChecker.lookup_typeinfo.
|
||||
sym = ctx.api.modules["twisted.internet.defer"].names.get("Deferred") # type: ignore[attr-defined]
|
||||
ret_type = Instance(sym.node, [remove_instance_last_known_values(ret_arg)])
|
||||
# This should be able to use ctx.api.named_generic_type, but that doesn't seem
|
||||
# to find the correct symbol for anything more than 1 module deep.
|
||||
#
|
||||
# modules is not part of CheckerPluginInterface. The following is a combination
|
||||
# of TypeChecker.named_generic_type and TypeChecker.lookup_typeinfo.
|
||||
sym = ctx.api.modules["twisted.internet.defer"].names.get("Deferred") # type: ignore[attr-defined]
|
||||
ret_type = Instance(sym.node, [remove_instance_last_known_values(ret_arg)])
|
||||
|
||||
signature = signature.copy_modified(
|
||||
arg_types=arg_types,
|
||||
|
@ -134,6 +155,198 @@ def cached_function_method_signature(ctx: MethodSigContext) -> CallableType:
|
|||
return signature
|
||||
|
||||
|
||||
def check_is_cacheable_wrapper(ctx: MethodSigContext) -> CallableType:
|
||||
"""Asserts that the signature of a method returns a value which can be cached.
|
||||
|
||||
Makes no changes to the provided method signature.
|
||||
"""
|
||||
# The true signature, this isn't being modified so this is what will be returned.
|
||||
signature: CallableType = ctx.default_signature
|
||||
|
||||
if not isinstance(ctx.args[0][0], TempNode):
|
||||
ctx.api.note("Cached function is not a TempNode?!", ctx.context) # type: ignore[attr-defined]
|
||||
return signature
|
||||
|
||||
orig_sig = ctx.args[0][0].type
|
||||
if not isinstance(orig_sig, CallableType):
|
||||
ctx.api.fail("Cached 'function' is not a callable", ctx.context)
|
||||
return signature
|
||||
|
||||
check_is_cacheable(orig_sig, ctx)
|
||||
|
||||
return signature
|
||||
|
||||
|
||||
def check_is_cacheable(
|
||||
signature: CallableType,
|
||||
ctx: Union[MethodSigContext, FunctionSigContext],
|
||||
) -> None:
|
||||
"""
|
||||
Check if a callable returns a type which can be cached.
|
||||
|
||||
Args:
|
||||
signature: The callable to check.
|
||||
ctx: The signature context, used for error reporting.
|
||||
"""
|
||||
# Unwrap the true return type from the cached function.
|
||||
return_type = _get_true_return_type(signature)
|
||||
|
||||
verbose = ctx.api.options.verbosity >= 1
|
||||
# TODO Technically a cachedList only needs immutable values, but forcing them
|
||||
# to return Mapping instead of Dict is fine.
|
||||
ok, note = is_cacheable(return_type, signature, verbose)
|
||||
|
||||
if ok:
|
||||
message = f"function {signature.name} is @cached, returning {return_type}"
|
||||
else:
|
||||
message = f"function {signature.name} is @cached, but has mutable return value {return_type}"
|
||||
|
||||
if note:
|
||||
message += f" ({note})"
|
||||
message = message.replace("builtins.", "").replace("typing.", "")
|
||||
|
||||
if ok and note:
|
||||
ctx.api.note(message, ctx.context) # type: ignore[attr-defined]
|
||||
elif not ok:
|
||||
ctx.api.fail(message, ctx.context, code=AT_CACHED_MUTABLE_RETURN)
|
||||
|
||||
|
||||
# Immutable simple values.
|
||||
IMMUTABLE_VALUE_TYPES = {
|
||||
"builtins.bool",
|
||||
"builtins.int",
|
||||
"builtins.float",
|
||||
"builtins.str",
|
||||
"builtins.bytes",
|
||||
}
|
||||
|
||||
# Types defined in Synapse which are known to be immutable.
|
||||
IMMUTABLE_CUSTOM_TYPES = {
|
||||
"synapse.synapse_rust.acl.ServerAclEvaluator",
|
||||
"synapse.synapse_rust.push.FilteredPushRules",
|
||||
# This is technically not immutable, but close enough.
|
||||
"signedjson.types.VerifyKey",
|
||||
}
|
||||
|
||||
# Immutable containers only if the values are also immutable.
|
||||
IMMUTABLE_CONTAINER_TYPES_REQUIRING_IMMUTABLE_ELEMENTS = {
|
||||
"builtins.frozenset",
|
||||
"builtins.tuple",
|
||||
"typing.AbstractSet",
|
||||
"typing.Sequence",
|
||||
"immutabledict.immutabledict",
|
||||
}
|
||||
|
||||
MUTABLE_CONTAINER_TYPES = {
|
||||
"builtins.set",
|
||||
"builtins.list",
|
||||
"builtins.dict",
|
||||
}
|
||||
|
||||
AT_CACHED_MUTABLE_RETURN = ErrorCode(
|
||||
"synapse-@cached-mutable",
|
||||
"@cached() should have an immutable return type",
|
||||
"General",
|
||||
)
|
||||
|
||||
|
||||
def is_cacheable(
|
||||
rt: mypy.types.Type, signature: CallableType, verbose: bool
|
||||
) -> Tuple[bool, Optional[str]]:
|
||||
"""
|
||||
Check if a particular type is cachable.
|
||||
|
||||
A type is cachable if it is immutable; for complex types this recurses to
|
||||
check each type parameter.
|
||||
|
||||
Returns: a 2-tuple (cacheable, message).
|
||||
- cachable: False means the type is definitely not cacheable;
|
||||
true means anything else.
|
||||
- Optional message.
|
||||
"""
|
||||
|
||||
# This should probably be done via a TypeVisitor. Apologies to the reader!
|
||||
if isinstance(rt, AnyType):
|
||||
return True, ("may be mutable" if verbose else None)
|
||||
|
||||
elif isinstance(rt, Instance):
|
||||
if (
|
||||
rt.type.fullname in IMMUTABLE_VALUE_TYPES
|
||||
or rt.type.fullname in IMMUTABLE_CUSTOM_TYPES
|
||||
):
|
||||
# "Simple" types are generally immutable.
|
||||
return True, None
|
||||
|
||||
elif rt.type.fullname == "typing.Mapping":
|
||||
# Generally mapping keys are immutable, but they only *have* to be
|
||||
# hashable, which doesn't imply immutability. E.g. Mapping[K, V]
|
||||
# is cachable iff K and V are cachable.
|
||||
return is_cacheable(rt.args[0], signature, verbose) and is_cacheable(
|
||||
rt.args[1], signature, verbose
|
||||
)
|
||||
|
||||
elif rt.type.fullname in IMMUTABLE_CONTAINER_TYPES_REQUIRING_IMMUTABLE_ELEMENTS:
|
||||
# E.g. Collection[T] is cachable iff T is cachable.
|
||||
return is_cacheable(rt.args[0], signature, verbose)
|
||||
|
||||
elif rt.type.fullname in MUTABLE_CONTAINER_TYPES:
|
||||
# Mutable containers are mutable regardless of their underlying type.
|
||||
return False, None
|
||||
|
||||
elif "attrs" in rt.type.metadata:
|
||||
# attrs classes are only cachable iff it is frozen (immutable itself)
|
||||
# and all attributes are cachable.
|
||||
frozen = rt.type.metadata["attrs"]["frozen"]
|
||||
if frozen:
|
||||
for attribute in rt.type.metadata["attrs"]["attributes"]:
|
||||
attribute_name = attribute["name"]
|
||||
symbol_node = rt.type.names[attribute_name].node
|
||||
assert isinstance(symbol_node, Var)
|
||||
assert symbol_node.type is not None
|
||||
ok, note = is_cacheable(symbol_node.type, signature, verbose)
|
||||
if not ok:
|
||||
return False, f"non-frozen attrs property: {attribute_name}"
|
||||
# All attributes were frozen.
|
||||
return True, None
|
||||
else:
|
||||
return False, "non-frozen attrs class"
|
||||
|
||||
else:
|
||||
# Ensure we fail for unknown types, these generally means that the
|
||||
# above code is not complete.
|
||||
return (
|
||||
False,
|
||||
f"Don't know how to handle {rt.type.fullname} return type instance",
|
||||
)
|
||||
|
||||
elif isinstance(rt, NoneType):
|
||||
# None is cachable.
|
||||
return True, None
|
||||
|
||||
elif isinstance(rt, (TupleType, UnionType)):
|
||||
# Tuples and unions are cachable iff all their items are cachable.
|
||||
for item in rt.items:
|
||||
ok, note = is_cacheable(item, signature, verbose)
|
||||
if not ok:
|
||||
return False, note
|
||||
# This discards notes but that's probably fine
|
||||
return True, None
|
||||
|
||||
elif isinstance(rt, TypeAliasType):
|
||||
# For a type alias, check if the underlying real type is cachable.
|
||||
return is_cacheable(mypy.types.get_proper_type(rt), signature, verbose)
|
||||
|
||||
elif isinstance(rt, UninhabitedType) and rt.is_noreturn:
|
||||
# There is no return value, just consider it cachable. This is only used
|
||||
# in tests.
|
||||
return True, None
|
||||
|
||||
else:
|
||||
# Ensure we fail for unknown types, these generally means that the
|
||||
# above code is not complete.
|
||||
return False, f"Don't know how to handle {type(rt).__qualname__} return type"
|
||||
|
||||
|
||||
def plugin(version: str) -> Type[SynapsePlugin]:
|
||||
# This is the entry point of the plugin, and lets us deal with the fact
|
||||
# that the mypy plugin interface is *not* stable by looking at the version
|
||||
|
|
|
@ -684,6 +684,10 @@ def full(gh_token: str) -> None:
|
|||
click.echo("1. If this is a security release, read the security wiki page.")
|
||||
click.echo("2. Check for any release blockers before proceeding.")
|
||||
click.echo(" https://github.com/matrix-org/synapse/labels/X-Release-Blocker")
|
||||
click.echo(
|
||||
"3. Check for any other special release notes, including announcements to add to the changelog or special deployment instructions."
|
||||
)
|
||||
click.echo(" See the 'Synapse Maintainer Report'.")
|
||||
|
||||
click.confirm("Ready?", abort=True)
|
||||
|
||||
|
|
|
@ -50,7 +50,7 @@ def request_registration(
|
|||
url = "%s/_synapse/admin/v1/register" % (server_location.rstrip("/"),)
|
||||
|
||||
# Get the nonce
|
||||
r = requests.get(url, verify=False)
|
||||
r = requests.get(url)
|
||||
|
||||
if r.status_code != 200:
|
||||
_print("ERROR! Received %d %s" % (r.status_code, r.reason))
|
||||
|
@ -88,7 +88,7 @@ def request_registration(
|
|||
}
|
||||
|
||||
_print("Sending registration request...")
|
||||
r = requests.post(url, json=data, verify=False)
|
||||
r = requests.post(url, json=data)
|
||||
|
||||
if r.status_code != 200:
|
||||
_print("ERROR! Received %d %s" % (r.status_code, r.reason))
|
||||
|
|
|
@ -191,7 +191,7 @@ IGNORED_TABLES = {
|
|||
"user_directory_search_stat",
|
||||
"user_directory_search_pos",
|
||||
"users_who_share_private_rooms",
|
||||
"users_in_public_room",
|
||||
"users_in_public_rooms",
|
||||
# UI auth sessions have foreign keys so additional care needs to be taken,
|
||||
# the sessions are transient anyway, so ignore them.
|
||||
"ui_auth_sessions",
|
||||
|
|
|
@ -115,7 +115,7 @@ class InternalAuth(BaseAuth):
|
|||
Once get_user_by_req has set up the opentracing span, this does the actual work.
|
||||
"""
|
||||
try:
|
||||
ip_addr = request.getClientAddress().host
|
||||
ip_addr = request.get_client_ip_if_available()
|
||||
user_agent = get_request_user_agent(request)
|
||||
|
||||
access_token = self.get_access_token_from_request(request)
|
||||
|
|
|
@ -80,10 +80,6 @@ class UserPresenceState:
|
|||
def as_dict(self) -> JsonDict:
|
||||
return attr.asdict(self)
|
||||
|
||||
@staticmethod
|
||||
def from_dict(d: JsonDict) -> "UserPresenceState":
|
||||
return UserPresenceState(**d)
|
||||
|
||||
def copy_and_replace(self, **kwargs: Any) -> "UserPresenceState":
|
||||
return attr.evolve(self, **kwargs)
|
||||
|
||||
|
|
|
@ -368,9 +368,14 @@ class ServerConfig(Config):
|
|||
|
||||
# Whether to enable user presence.
|
||||
presence_config = config.get("presence") or {}
|
||||
self.use_presence = presence_config.get("enabled")
|
||||
if self.use_presence is None:
|
||||
self.use_presence = config.get("use_presence", True)
|
||||
presence_enabled = presence_config.get("enabled")
|
||||
if presence_enabled is None:
|
||||
presence_enabled = config.get("use_presence", True)
|
||||
|
||||
# Whether presence is enabled *at all*.
|
||||
self.presence_enabled = bool(presence_enabled)
|
||||
# Whether to internally track presence, requires that presence is enabled,
|
||||
self.track_presence = self.presence_enabled and presence_enabled != "untracked"
|
||||
|
||||
# Custom presence router module
|
||||
# This is the legacy way of configuring it (the config should now be put in the modules section)
|
||||
|
|
|
@ -358,9 +358,9 @@ class WorkerConfig(Config):
|
|||
"Must only specify one instance to handle `account_data` messages."
|
||||
)
|
||||
|
||||
if len(self.writers.receipts) != 1:
|
||||
if len(self.writers.receipts) == 0:
|
||||
raise ConfigError(
|
||||
"Must only specify one instance to handle `receipts` messages."
|
||||
"Must specify at least one instance to handle `receipts` messages."
|
||||
)
|
||||
|
||||
if len(self.writers.events) == 0:
|
||||
|
|
|
@ -17,6 +17,7 @@ import re
|
|||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
Any,
|
||||
Awaitable,
|
||||
Callable,
|
||||
Dict,
|
||||
Iterable,
|
||||
|
@ -45,6 +46,7 @@ from . import EventBase
|
|||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.handlers.relations import BundledAggregations
|
||||
from synapse.server import HomeServer
|
||||
|
||||
|
||||
# Split strings on "." but not "\." (or "\\\.").
|
||||
|
@ -56,6 +58,13 @@ CANONICALJSON_MAX_INT = (2**53) - 1
|
|||
CANONICALJSON_MIN_INT = -CANONICALJSON_MAX_INT
|
||||
|
||||
|
||||
# Module API callback that allows adding fields to the unsigned section of
|
||||
# events that are sent to clients.
|
||||
ADD_EXTRA_FIELDS_TO_UNSIGNED_CLIENT_EVENT_CALLBACK = Callable[
|
||||
[EventBase], Awaitable[JsonDict]
|
||||
]
|
||||
|
||||
|
||||
def prune_event(event: EventBase) -> EventBase:
|
||||
"""Returns a pruned version of the given event, which removes all keys we
|
||||
don't know about or think could potentially be dodgy.
|
||||
|
@ -509,7 +518,13 @@ class EventClientSerializer:
|
|||
clients.
|
||||
"""
|
||||
|
||||
def serialize_event(
|
||||
def __init__(self, hs: "HomeServer") -> None:
|
||||
self._store = hs.get_datastores().main
|
||||
self._add_extra_fields_to_unsigned_client_event_callbacks: List[
|
||||
ADD_EXTRA_FIELDS_TO_UNSIGNED_CLIENT_EVENT_CALLBACK
|
||||
] = []
|
||||
|
||||
async def serialize_event(
|
||||
self,
|
||||
event: Union[JsonDict, EventBase],
|
||||
time_now: int,
|
||||
|
@ -535,10 +550,21 @@ class EventClientSerializer:
|
|||
|
||||
serialized_event = serialize_event(event, time_now, config=config)
|
||||
|
||||
new_unsigned = {}
|
||||
for callback in self._add_extra_fields_to_unsigned_client_event_callbacks:
|
||||
u = await callback(event)
|
||||
new_unsigned.update(u)
|
||||
|
||||
if new_unsigned:
|
||||
# We do the `update` this way round so that modules can't clobber
|
||||
# existing fields.
|
||||
new_unsigned.update(serialized_event["unsigned"])
|
||||
serialized_event["unsigned"] = new_unsigned
|
||||
|
||||
# Check if there are any bundled aggregations to include with the event.
|
||||
if bundle_aggregations:
|
||||
if event.event_id in bundle_aggregations:
|
||||
self._inject_bundled_aggregations(
|
||||
await self._inject_bundled_aggregations(
|
||||
event,
|
||||
time_now,
|
||||
config,
|
||||
|
@ -548,7 +574,7 @@ class EventClientSerializer:
|
|||
|
||||
return serialized_event
|
||||
|
||||
def _inject_bundled_aggregations(
|
||||
async def _inject_bundled_aggregations(
|
||||
self,
|
||||
event: EventBase,
|
||||
time_now: int,
|
||||
|
@ -590,7 +616,7 @@ class EventClientSerializer:
|
|||
# said that we should only include the `event_id`, `origin_server_ts` and
|
||||
# `sender` of the edit; however MSC3925 proposes extending it to the whole
|
||||
# of the edit, which is what we do here.
|
||||
serialized_aggregations[RelationTypes.REPLACE] = self.serialize_event(
|
||||
serialized_aggregations[RelationTypes.REPLACE] = await self.serialize_event(
|
||||
event_aggregations.replace,
|
||||
time_now,
|
||||
config=config,
|
||||
|
@ -600,7 +626,7 @@ class EventClientSerializer:
|
|||
if event_aggregations.thread:
|
||||
thread = event_aggregations.thread
|
||||
|
||||
serialized_latest_event = self.serialize_event(
|
||||
serialized_latest_event = await self.serialize_event(
|
||||
thread.latest_event,
|
||||
time_now,
|
||||
config=config,
|
||||
|
@ -623,7 +649,7 @@ class EventClientSerializer:
|
|||
"m.relations", {}
|
||||
).update(serialized_aggregations)
|
||||
|
||||
def serialize_events(
|
||||
async def serialize_events(
|
||||
self,
|
||||
events: Iterable[Union[JsonDict, EventBase]],
|
||||
time_now: int,
|
||||
|
@ -645,7 +671,7 @@ class EventClientSerializer:
|
|||
The list of serialized events
|
||||
"""
|
||||
return [
|
||||
self.serialize_event(
|
||||
await self.serialize_event(
|
||||
event,
|
||||
time_now,
|
||||
config=config,
|
||||
|
@ -654,6 +680,14 @@ class EventClientSerializer:
|
|||
for event in events
|
||||
]
|
||||
|
||||
def register_add_extra_fields_to_unsigned_client_event_callback(
|
||||
self, callback: ADD_EXTRA_FIELDS_TO_UNSIGNED_CLIENT_EVENT_CALLBACK
|
||||
) -> None:
|
||||
"""Register a callback that returns additions to the unsigned section of
|
||||
serialized events.
|
||||
"""
|
||||
self._add_extra_fields_to_unsigned_client_event_callbacks.append(callback)
|
||||
|
||||
|
||||
_PowerLevel = Union[str, int]
|
||||
PowerLevelsContent = Mapping[str, Union[_PowerLevel, Mapping[str, _PowerLevel]]]
|
||||
|
|
|
@ -1402,7 +1402,7 @@ class FederationClient(FederationBase):
|
|||
The remote homeserver return some state from the room. The response
|
||||
dictionary is in the form:
|
||||
|
||||
{"knock_state_events": [<state event dict>, ...]}
|
||||
{"knock_room_state": [<state event dict>, ...]}
|
||||
|
||||
The list of state events may be empty.
|
||||
|
||||
|
@ -1429,7 +1429,7 @@ class FederationClient(FederationBase):
|
|||
The remote homeserver can optionally return some state from the room. The response
|
||||
dictionary is in the form:
|
||||
|
||||
{"knock_state_events": [<state event dict>, ...]}
|
||||
{"knock_room_state": [<state event dict>, ...]}
|
||||
|
||||
The list of state events may be empty.
|
||||
"""
|
||||
|
|
|
@ -84,7 +84,7 @@ from synapse.replication.http.federation import (
|
|||
from synapse.storage.databases.main.lock import Lock
|
||||
from synapse.storage.databases.main.roommember import extract_heroes_from_room_summary
|
||||
from synapse.storage.roommember import MemberSummary
|
||||
from synapse.types import JsonDict, StateMap, get_domain_from_id
|
||||
from synapse.types import JsonDict, StateMap, UserID, get_domain_from_id
|
||||
from synapse.util import unwrapFirstError
|
||||
from synapse.util.async_helpers import Linearizer, concurrently_execute, gather_results
|
||||
from synapse.util.caches.response_cache import ResponseCache
|
||||
|
@ -850,14 +850,7 @@ class FederationServer(FederationBase):
|
|||
context, self._room_prejoin_state_types
|
||||
)
|
||||
)
|
||||
return {
|
||||
"knock_room_state": stripped_room_state,
|
||||
# Since v1.37, Synapse incorrectly used "knock_state_events" for this field.
|
||||
# Thus, we also populate a 'knock_state_events' with the same content to
|
||||
# support old instances.
|
||||
# See https://github.com/matrix-org/synapse/issues/14088.
|
||||
"knock_state_events": stripped_room_state,
|
||||
}
|
||||
return {"knock_room_state": stripped_room_state}
|
||||
|
||||
async def _on_send_membership_event(
|
||||
self, origin: str, content: JsonDict, membership_type: str, room_id: str
|
||||
|
@ -1006,6 +999,12 @@ class FederationServer(FederationBase):
|
|||
async def on_claim_client_keys(
|
||||
self, query: List[Tuple[str, str, str, int]], always_include_fallback_keys: bool
|
||||
) -> Dict[str, Any]:
|
||||
if any(
|
||||
not self.hs.is_mine(UserID.from_string(user_id))
|
||||
for user_id, _, _, _ in query
|
||||
):
|
||||
raise SynapseError(400, "User is not hosted on this homeserver")
|
||||
|
||||
log_kv({"message": "Claiming one time keys.", "user, device pairs": query})
|
||||
results = await self._e2e_keys_handler.claim_local_one_time_keys(
|
||||
query, always_include_fallback_keys=always_include_fallback_keys
|
||||
|
@ -1402,7 +1401,7 @@ class FederationHandlerRegistry:
|
|||
self._edu_type_to_instance[edu_type] = instance_names
|
||||
|
||||
async def on_edu(self, edu_type: str, origin: str, content: dict) -> None:
|
||||
if not self.config.server.use_presence and edu_type == EduTypes.PRESENCE:
|
||||
if not self.config.server.track_presence and edu_type == EduTypes.PRESENCE:
|
||||
return
|
||||
|
||||
# Check if we have a handler on this instance
|
||||
|
|
|
@ -395,7 +395,7 @@ class PresenceDestinationsRow(BaseFederationRow):
|
|||
@staticmethod
|
||||
def from_data(data: JsonDict) -> "PresenceDestinationsRow":
|
||||
return PresenceDestinationsRow(
|
||||
state=UserPresenceState.from_dict(data["state"]), destinations=data["dests"]
|
||||
state=UserPresenceState(**data["state"]), destinations=data["dests"]
|
||||
)
|
||||
|
||||
def to_data(self) -> JsonDict:
|
||||
|
|
|
@ -67,7 +67,7 @@ The loop continues so long as there is anything to send. At each iteration of th
|
|||
|
||||
When the `PerDestinationQueue` has the catch-up flag set, the *Catch-Up Transmission Loop*
|
||||
(`_catch_up_transmission_loop`) is used in lieu of the regular `_transaction_transmission_loop`.
|
||||
(Only once the catch-up mode has been exited can the regular tranaction transmission behaviour
|
||||
(Only once the catch-up mode has been exited can the regular transaction transmission behaviour
|
||||
be resumed.)
|
||||
|
||||
*Catch-Up Mode*, entered upon Synapse startup or once a homeserver has fallen behind due to
|
||||
|
@ -844,7 +844,7 @@ class FederationSender(AbstractFederationSender):
|
|||
destinations (list[str])
|
||||
"""
|
||||
|
||||
if not states or not self.hs.config.server.use_presence:
|
||||
if not states or not self.hs.config.server.track_presence:
|
||||
# No-op if presence is disabled.
|
||||
return
|
||||
|
||||
|
|
|
@ -431,7 +431,7 @@ class TransportLayerClient:
|
|||
The remote homeserver can optionally return some state from the room. The response
|
||||
dictionary is in the form:
|
||||
|
||||
{"knock_state_events": [<state event dict>, ...]}
|
||||
{"knock_room_state": [<state event dict>, ...]}
|
||||
|
||||
The list of state events may be empty.
|
||||
"""
|
||||
|
|
|
@ -212,8 +212,8 @@ class AccountValidityHandler:
|
|||
|
||||
addresses = []
|
||||
for threepid in threepids:
|
||||
if threepid["medium"] == "email":
|
||||
addresses.append(threepid["address"])
|
||||
if threepid.medium == "email":
|
||||
addresses.append(threepid.address)
|
||||
|
||||
return addresses
|
||||
|
||||
|
|
|
@ -16,6 +16,8 @@ import abc
|
|||
import logging
|
||||
from typing import TYPE_CHECKING, Any, Dict, List, Mapping, Optional, Sequence, Set
|
||||
|
||||
import attr
|
||||
|
||||
from synapse.api.constants import Direction, Membership
|
||||
from synapse.events import EventBase
|
||||
from synapse.types import JsonMapping, RoomStreamToken, StateMap, UserID, UserInfo
|
||||
|
@ -93,7 +95,7 @@ class AdminHandler:
|
|||
]
|
||||
user_info_dict["displayname"] = profile.display_name
|
||||
user_info_dict["avatar_url"] = profile.avatar_url
|
||||
user_info_dict["threepids"] = threepids
|
||||
user_info_dict["threepids"] = [attr.asdict(t) for t in threepids]
|
||||
user_info_dict["external_ids"] = external_ids
|
||||
user_info_dict["erased"] = await self._store.is_user_erased(user.to_string())
|
||||
|
||||
|
@ -171,8 +173,8 @@ class AdminHandler:
|
|||
else:
|
||||
stream_ordering = room.stream_ordering
|
||||
|
||||
from_key = RoomStreamToken(0, 0)
|
||||
to_key = RoomStreamToken(None, stream_ordering)
|
||||
from_key = RoomStreamToken(topological=0, stream=0)
|
||||
to_key = RoomStreamToken(stream=stream_ordering)
|
||||
|
||||
# Events that we've processed in this room
|
||||
written_events: Set[str] = set()
|
||||
|
@ -281,7 +283,7 @@ class AdminHandler:
|
|||
start, limit, user_id
|
||||
)
|
||||
for media in media_ids:
|
||||
writer.write_media_id(media["media_id"], media)
|
||||
writer.write_media_id(media.media_id, attr.asdict(media))
|
||||
|
||||
logger.info(
|
||||
"[%s] Written %d media_ids of %s",
|
||||
|
|
|
@ -47,6 +47,7 @@ from synapse.types import (
|
|||
DeviceListUpdates,
|
||||
JsonDict,
|
||||
JsonMapping,
|
||||
MultiWriterStreamToken,
|
||||
RoomAlias,
|
||||
RoomStreamToken,
|
||||
StreamKeyType,
|
||||
|
@ -216,8 +217,8 @@ class ApplicationServicesHandler:
|
|||
|
||||
def notify_interested_services_ephemeral(
|
||||
self,
|
||||
stream_key: str,
|
||||
new_token: Union[int, RoomStreamToken],
|
||||
stream_key: StreamKeyType,
|
||||
new_token: Union[int, RoomStreamToken, MultiWriterStreamToken],
|
||||
users: Collection[Union[str, UserID]],
|
||||
) -> None:
|
||||
"""
|
||||
|
@ -259,19 +260,6 @@ class ApplicationServicesHandler:
|
|||
):
|
||||
return
|
||||
|
||||
# Assert that new_token is an integer (and not a RoomStreamToken).
|
||||
# All of the supported streams that this function handles use an
|
||||
# integer to track progress (rather than a RoomStreamToken - a
|
||||
# vector clock implementation) as they don't support multiple
|
||||
# stream writers.
|
||||
#
|
||||
# As a result, we simply assert that new_token is an integer.
|
||||
# If we do end up needing to pass a RoomStreamToken down here
|
||||
# in the future, using RoomStreamToken.stream (the minimum stream
|
||||
# position) to convert to an ascending integer value should work.
|
||||
# Additional context: https://github.com/matrix-org/synapse/pull/11137
|
||||
assert isinstance(new_token, int)
|
||||
|
||||
# Ignore to-device messages if the feature flag is not enabled
|
||||
if (
|
||||
stream_key == StreamKeyType.TO_DEVICE
|
||||
|
@ -286,6 +274,9 @@ class ApplicationServicesHandler:
|
|||
):
|
||||
return
|
||||
|
||||
# We know we're not a `RoomStreamToken` at this point.
|
||||
assert not isinstance(new_token, RoomStreamToken)
|
||||
|
||||
# Check whether there are any appservices which have registered to receive
|
||||
# ephemeral events.
|
||||
#
|
||||
|
@ -326,8 +317,8 @@ class ApplicationServicesHandler:
|
|||
async def _notify_interested_services_ephemeral(
|
||||
self,
|
||||
services: List[ApplicationService],
|
||||
stream_key: str,
|
||||
new_token: int,
|
||||
stream_key: StreamKeyType,
|
||||
new_token: Union[int, MultiWriterStreamToken],
|
||||
users: Collection[Union[str, UserID]],
|
||||
) -> None:
|
||||
logger.debug("Checking interested services for %s", stream_key)
|
||||
|
@ -340,6 +331,7 @@ class ApplicationServicesHandler:
|
|||
#
|
||||
# Instead we simply grab the latest typing updates in _handle_typing
|
||||
# and, if they apply to this application service, send it off.
|
||||
assert isinstance(new_token, int)
|
||||
events = await self._handle_typing(service, new_token)
|
||||
if events:
|
||||
self.scheduler.enqueue_for_appservice(service, ephemeral=events)
|
||||
|
@ -350,15 +342,23 @@ class ApplicationServicesHandler:
|
|||
(service.id, stream_key)
|
||||
):
|
||||
if stream_key == StreamKeyType.RECEIPT:
|
||||
assert isinstance(new_token, MultiWriterStreamToken)
|
||||
|
||||
# We store appservice tokens as integers, so we ignore
|
||||
# the `instance_map` components and instead simply
|
||||
# follow the base stream position.
|
||||
new_token = MultiWriterStreamToken(stream=new_token.stream)
|
||||
|
||||
events = await self._handle_receipts(service, new_token)
|
||||
self.scheduler.enqueue_for_appservice(service, ephemeral=events)
|
||||
|
||||
# Persist the latest handled stream token for this appservice
|
||||
await self.store.set_appservice_stream_type_pos(
|
||||
service, "read_receipt", new_token
|
||||
service, "read_receipt", new_token.stream
|
||||
)
|
||||
|
||||
elif stream_key == StreamKeyType.PRESENCE:
|
||||
assert isinstance(new_token, int)
|
||||
events = await self._handle_presence(service, users, new_token)
|
||||
self.scheduler.enqueue_for_appservice(service, ephemeral=events)
|
||||
|
||||
|
@ -368,6 +368,7 @@ class ApplicationServicesHandler:
|
|||
)
|
||||
|
||||
elif stream_key == StreamKeyType.TO_DEVICE:
|
||||
assert isinstance(new_token, int)
|
||||
# Retrieve a list of to-device message events, as well as the
|
||||
# maximum stream token of the messages we were able to retrieve.
|
||||
to_device_messages = await self._get_to_device_messages(
|
||||
|
@ -383,6 +384,7 @@ class ApplicationServicesHandler:
|
|||
)
|
||||
|
||||
elif stream_key == StreamKeyType.DEVICE_LIST:
|
||||
assert isinstance(new_token, int)
|
||||
device_list_summary = await self._get_device_list_summary(
|
||||
service, new_token
|
||||
)
|
||||
|
@ -432,7 +434,7 @@ class ApplicationServicesHandler:
|
|||
return typing
|
||||
|
||||
async def _handle_receipts(
|
||||
self, service: ApplicationService, new_token: int
|
||||
self, service: ApplicationService, new_token: MultiWriterStreamToken
|
||||
) -> List[JsonMapping]:
|
||||
"""
|
||||
Return the latest read receipts that the given application service should receive.
|
||||
|
@ -455,15 +457,17 @@ class ApplicationServicesHandler:
|
|||
from_key = await self.store.get_type_stream_id_for_appservice(
|
||||
service, "read_receipt"
|
||||
)
|
||||
if new_token is not None and new_token <= from_key:
|
||||
if new_token is not None and new_token.stream <= from_key:
|
||||
logger.debug(
|
||||
"Rejecting token lower than or equal to stored: %s" % (new_token,)
|
||||
)
|
||||
return []
|
||||
|
||||
from_token = MultiWriterStreamToken(stream=from_key)
|
||||
|
||||
receipts_source = self.event_sources.sources.receipt
|
||||
receipts, _ = await receipts_source.get_new_events_as(
|
||||
service=service, from_key=from_key, to_key=new_token
|
||||
service=service, from_key=from_token, to_key=new_token
|
||||
)
|
||||
return receipts
|
||||
|
||||
|
|
|
@ -103,10 +103,10 @@ class DeactivateAccountHandler:
|
|||
# Attempt to unbind any known bound threepids to this account from identity
|
||||
# server(s).
|
||||
bound_threepids = await self.store.user_get_bound_threepids(user_id)
|
||||
for threepid in bound_threepids:
|
||||
for medium, address in bound_threepids:
|
||||
try:
|
||||
result = await self._identity_handler.try_unbind_threepid(
|
||||
user_id, threepid["medium"], threepid["address"], id_server
|
||||
user_id, medium, address, id_server
|
||||
)
|
||||
except Exception:
|
||||
# Do we want this to be a fatal error or should we carry on?
|
||||
|
@ -117,9 +117,9 @@ class DeactivateAccountHandler:
|
|||
|
||||
# Remove any local threepid associations for this account.
|
||||
local_threepids = await self.store.user_get_threepids(user_id)
|
||||
for threepid in local_threepids:
|
||||
for local_threepid in local_threepids:
|
||||
await self._auth_handler.delete_local_threepid(
|
||||
user_id, threepid["medium"], threepid["address"]
|
||||
user_id, local_threepid.medium, local_threepid.address
|
||||
)
|
||||
|
||||
# delete any devices belonging to the user, which will also
|
||||
|
|
|
@ -14,17 +14,7 @@
|
|||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import logging
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
Any,
|
||||
Dict,
|
||||
Iterable,
|
||||
List,
|
||||
Mapping,
|
||||
Optional,
|
||||
Set,
|
||||
Tuple,
|
||||
)
|
||||
from typing import TYPE_CHECKING, Dict, Iterable, List, Mapping, Optional, Set, Tuple
|
||||
|
||||
from synapse.api import errors
|
||||
from synapse.api.constants import EduTypes, EventTypes
|
||||
|
@ -41,6 +31,7 @@ from synapse.metrics.background_process_metrics import (
|
|||
run_as_background_process,
|
||||
wrap_as_background_process,
|
||||
)
|
||||
from synapse.storage.databases.main.client_ips import DeviceLastConnectionInfo
|
||||
from synapse.types import (
|
||||
JsonDict,
|
||||
JsonMapping,
|
||||
|
@ -337,6 +328,9 @@ class DeviceWorkerHandler:
|
|||
return result
|
||||
|
||||
async def on_federation_query_user_devices(self, user_id: str) -> JsonDict:
|
||||
if not self.hs.is_mine(UserID.from_string(user_id)):
|
||||
raise SynapseError(400, "User is not hosted on this homeserver")
|
||||
|
||||
stream_id, devices = await self.store.get_e2e_device_keys_for_federation_query(
|
||||
user_id
|
||||
)
|
||||
|
@ -601,6 +595,8 @@ class DeviceHandler(DeviceWorkerHandler):
|
|||
)
|
||||
|
||||
# Delete device messages asynchronously and in batches using the task scheduler
|
||||
# We specify an upper stream id to avoid deleting non delivered messages
|
||||
# if an user re-uses a device ID.
|
||||
await self._task_scheduler.schedule_task(
|
||||
DELETE_DEVICE_MSGS_TASK_NAME,
|
||||
resource_id=device_id,
|
||||
|
@ -845,7 +841,6 @@ class DeviceHandler(DeviceWorkerHandler):
|
|||
else:
|
||||
assert max_stream_id == stream_id
|
||||
# Avoid moving `room_id` backwards.
|
||||
pass
|
||||
|
||||
if self._handle_new_device_update_new_data:
|
||||
continue
|
||||
|
@ -1009,14 +1004,14 @@ class DeviceHandler(DeviceWorkerHandler):
|
|||
|
||||
|
||||
def _update_device_from_client_ips(
|
||||
device: JsonDict, client_ips: Mapping[Tuple[str, str], Mapping[str, Any]]
|
||||
device: JsonDict, client_ips: Mapping[Tuple[str, str], DeviceLastConnectionInfo]
|
||||
) -> None:
|
||||
ip = client_ips.get((device["user_id"], device["device_id"]), {})
|
||||
ip = client_ips.get((device["user_id"], device["device_id"]))
|
||||
device.update(
|
||||
{
|
||||
"last_seen_user_agent": ip.get("user_agent"),
|
||||
"last_seen_ts": ip.get("last_seen"),
|
||||
"last_seen_ip": ip.get("ip"),
|
||||
"last_seen_user_agent": ip.user_agent if ip else None,
|
||||
"last_seen_ts": ip.last_seen if ip else None,
|
||||
"last_seen_ip": ip.ip if ip else None,
|
||||
}
|
||||
)
|
||||
|
||||
|
|
|
@ -542,6 +542,12 @@ class E2eKeysHandler:
|
|||
device_keys_query: Dict[str, Optional[List[str]]] = query_body.get(
|
||||
"device_keys", {}
|
||||
)
|
||||
if any(
|
||||
not self.is_mine(UserID.from_string(user_id))
|
||||
for user_id in device_keys_query
|
||||
):
|
||||
raise SynapseError(400, "User is not hosted on this homeserver")
|
||||
|
||||
res = await self.query_local_devices(
|
||||
device_keys_query,
|
||||
include_displaynames=(
|
||||
|
@ -659,6 +665,20 @@ class E2eKeysHandler:
|
|||
timeout: Optional[int],
|
||||
always_include_fallback_keys: bool,
|
||||
) -> JsonDict:
|
||||
"""
|
||||
Args:
|
||||
query: A chain of maps from (user_id, device_id, algorithm) to the requested
|
||||
number of keys to claim.
|
||||
user: The user who is claiming these keys.
|
||||
timeout: How long to wait for any federation key claim requests before
|
||||
giving up.
|
||||
always_include_fallback_keys: always include a fallback key for local users'
|
||||
devices, even if we managed to claim a one-time-key.
|
||||
|
||||
Returns: a heterogeneous dict with two keys:
|
||||
one_time_keys: chain of maps user ID -> device ID -> key ID -> key.
|
||||
failures: map from remote destination to a JsonDict describing the error.
|
||||
"""
|
||||
local_query: List[Tuple[str, str, str, int]] = []
|
||||
remote_queries: Dict[str, Dict[str, Dict[str, Dict[str, int]]]] = {}
|
||||
|
||||
|
@ -739,6 +759,16 @@ class E2eKeysHandler:
|
|||
async def upload_keys_for_user(
|
||||
self, user_id: str, device_id: str, keys: JsonDict
|
||||
) -> JsonDict:
|
||||
"""
|
||||
Args:
|
||||
user_id: user whose keys are being uploaded.
|
||||
device_id: device whose keys are being uploaded.
|
||||
keys: the body of a /keys/upload request.
|
||||
|
||||
Returns a dictionary with one field:
|
||||
"one_time_keys": A mapping from algorithm to number of keys for that
|
||||
algorithm, including those previously persisted.
|
||||
"""
|
||||
# This can only be called from the main process.
|
||||
assert isinstance(self.device_handler, DeviceHandler)
|
||||
|
||||
|
|
|
@ -120,7 +120,7 @@ class EventStreamHandler:
|
|||
|
||||
events.extend(to_add)
|
||||
|
||||
chunks = self._event_serializer.serialize_events(
|
||||
chunks = await self._event_serializer.serialize_events(
|
||||
events,
|
||||
time_now,
|
||||
config=SerializeEventConfig(
|
||||
|
|
|
@ -868,19 +868,10 @@ class FederationHandler:
|
|||
# This is a bit of a hack and is cribbing off of invites. Basically we
|
||||
# store the room state here and retrieve it again when this event appears
|
||||
# in the invitee's sync stream. It is stripped out for all other local users.
|
||||
stripped_room_state = (
|
||||
knock_response.get("knock_room_state")
|
||||
# Since v1.37, Synapse incorrectly used "knock_state_events" for this field.
|
||||
# Thus, we also check for a 'knock_state_events' to support old instances.
|
||||
# See https://github.com/matrix-org/synapse/issues/14088.
|
||||
or knock_response.get("knock_state_events")
|
||||
)
|
||||
stripped_room_state = knock_response.get("knock_room_state")
|
||||
|
||||
if stripped_room_state is None:
|
||||
raise KeyError(
|
||||
"Missing 'knock_room_state' (or legacy 'knock_state_events') field in "
|
||||
"send_knock response"
|
||||
)
|
||||
raise KeyError("Missing 'knock_room_state' field in send_knock response")
|
||||
|
||||
event.unsigned["knock_room_state"] = stripped_room_state
|
||||
|
||||
|
@ -1506,7 +1497,6 @@ class FederationHandler:
|
|||
# in the meantime and context needs to be recomputed, so let's do so.
|
||||
if i == max_retries - 1:
|
||||
raise e
|
||||
pass
|
||||
else:
|
||||
destinations = {x.split(":", 1)[-1] for x in (sender_user_id, room_id)}
|
||||
|
||||
|
@ -1582,7 +1572,6 @@ class FederationHandler:
|
|||
# in the meantime and context needs to be recomputed, so let's do so.
|
||||
if i == max_retries - 1:
|
||||
raise e
|
||||
pass
|
||||
|
||||
async def add_display_name_to_third_party_invite(
|
||||
self,
|
||||
|
|
|
@ -19,6 +19,8 @@ import logging
|
|||
import urllib.parse
|
||||
from typing import TYPE_CHECKING, Awaitable, Callable, Dict, List, Optional, Tuple
|
||||
|
||||
import attr
|
||||
|
||||
from synapse.api.errors import (
|
||||
CodeMessageException,
|
||||
Codes,
|
||||
|
@ -357,9 +359,9 @@ class IdentityHandler:
|
|||
|
||||
# Check to see if a session already exists and that it is not yet
|
||||
# marked as validated
|
||||
if session and session.get("validated_at") is None:
|
||||
session_id = session["session_id"]
|
||||
last_send_attempt = session["last_send_attempt"]
|
||||
if session and session.validated_at is None:
|
||||
session_id = session.session_id
|
||||
last_send_attempt = session.last_send_attempt
|
||||
|
||||
# Check that the send_attempt is higher than previous attempts
|
||||
if send_attempt <= last_send_attempt:
|
||||
|
@ -480,7 +482,6 @@ class IdentityHandler:
|
|||
|
||||
# We don't actually know which medium this 3PID is. Thus we first assume it's email,
|
||||
# and if validation fails we try msisdn
|
||||
validation_session = None
|
||||
|
||||
# Try to validate as email
|
||||
if self.hs.config.email.can_verify_email:
|
||||
|
@ -488,19 +489,18 @@ class IdentityHandler:
|
|||
validation_session = await self.store.get_threepid_validation_session(
|
||||
"email", client_secret, sid=sid, validated=True
|
||||
)
|
||||
|
||||
if validation_session:
|
||||
return validation_session
|
||||
if validation_session:
|
||||
return attr.asdict(validation_session)
|
||||
|
||||
# Try to validate as msisdn
|
||||
if self.hs.config.registration.account_threepid_delegate_msisdn:
|
||||
# Ask our delegated msisdn identity server
|
||||
validation_session = await self.threepid_from_creds(
|
||||
return await self.threepid_from_creds(
|
||||
self.hs.config.registration.account_threepid_delegate_msisdn,
|
||||
threepid_creds,
|
||||
)
|
||||
|
||||
return validation_session
|
||||
return None
|
||||
|
||||
async def proxy_msisdn_submit_token(
|
||||
self, id_server: str, client_secret: str, sid: str, token: str
|
||||
|
|
|
@ -145,7 +145,7 @@ class InitialSyncHandler:
|
|||
joined_rooms = [r.room_id for r in room_list if r.membership == Membership.JOIN]
|
||||
receipt = await self.store.get_linearized_receipts_for_rooms(
|
||||
joined_rooms,
|
||||
to_key=int(now_token.receipt_key),
|
||||
to_key=now_token.receipt_key,
|
||||
)
|
||||
|
||||
receipt = ReceiptEventSource.filter_out_private_receipts(receipt, user_id)
|
||||
|
@ -173,7 +173,7 @@ class InitialSyncHandler:
|
|||
d["inviter"] = event.sender
|
||||
|
||||
invite_event = await self.store.get_event(event.event_id)
|
||||
d["invite"] = self._event_serializer.serialize_event(
|
||||
d["invite"] = await self._event_serializer.serialize_event(
|
||||
invite_event,
|
||||
time_now,
|
||||
config=serializer_options,
|
||||
|
@ -192,8 +192,7 @@ class InitialSyncHandler:
|
|||
)
|
||||
elif event.membership == Membership.LEAVE:
|
||||
room_end_token = RoomStreamToken(
|
||||
None,
|
||||
event.stream_ordering,
|
||||
stream=event.stream_ordering,
|
||||
)
|
||||
deferred_room_state = run_in_background(
|
||||
self._state_storage_controller.get_state_for_events,
|
||||
|
@ -226,7 +225,7 @@ class InitialSyncHandler:
|
|||
|
||||
d["messages"] = {
|
||||
"chunk": (
|
||||
self._event_serializer.serialize_events(
|
||||
await self._event_serializer.serialize_events(
|
||||
messages,
|
||||
time_now=time_now,
|
||||
config=serializer_options,
|
||||
|
@ -236,7 +235,7 @@ class InitialSyncHandler:
|
|||
"end": await end_token.to_string(self.store),
|
||||
}
|
||||
|
||||
d["state"] = self._event_serializer.serialize_events(
|
||||
d["state"] = await self._event_serializer.serialize_events(
|
||||
current_state.values(),
|
||||
time_now=time_now,
|
||||
config=serializer_options,
|
||||
|
@ -388,7 +387,7 @@ class InitialSyncHandler:
|
|||
"messages": {
|
||||
"chunk": (
|
||||
# Don't bundle aggregations as this is a deprecated API.
|
||||
self._event_serializer.serialize_events(
|
||||
await self._event_serializer.serialize_events(
|
||||
messages, time_now, config=serialize_options
|
||||
)
|
||||
),
|
||||
|
@ -397,7 +396,7 @@ class InitialSyncHandler:
|
|||
},
|
||||
"state": (
|
||||
# Don't bundle aggregations as this is a deprecated API.
|
||||
self._event_serializer.serialize_events(
|
||||
await self._event_serializer.serialize_events(
|
||||
room_state.values(), time_now, config=serialize_options
|
||||
)
|
||||
),
|
||||
|
@ -421,7 +420,7 @@ class InitialSyncHandler:
|
|||
time_now = self.clock.time_msec()
|
||||
serialize_options = SerializeEventConfig(requester=requester)
|
||||
# Don't bundle aggregations as this is a deprecated API.
|
||||
state = self._event_serializer.serialize_events(
|
||||
state = await self._event_serializer.serialize_events(
|
||||
current_state.values(),
|
||||
time_now,
|
||||
config=serialize_options,
|
||||
|
@ -440,7 +439,7 @@ class InitialSyncHandler:
|
|||
|
||||
async def get_presence() -> List[JsonDict]:
|
||||
# If presence is disabled, return an empty list
|
||||
if not self.hs.config.server.use_presence:
|
||||
if not self.hs.config.server.presence_enabled:
|
||||
return []
|
||||
|
||||
states = await presence_handler.get_states(
|
||||
|
@ -498,7 +497,7 @@ class InitialSyncHandler:
|
|||
"messages": {
|
||||
"chunk": (
|
||||
# Don't bundle aggregations as this is a deprecated API.
|
||||
self._event_serializer.serialize_events(
|
||||
await self._event_serializer.serialize_events(
|
||||
messages, time_now, config=serialize_options
|
||||
)
|
||||
),
|
||||
|
|
|
@ -244,7 +244,7 @@ class MessageHandler:
|
|||
)
|
||||
room_state = room_state_events[membership_event_id]
|
||||
|
||||
events = self._event_serializer.serialize_events(
|
||||
events = await self._event_serializer.serialize_events(
|
||||
room_state.values(),
|
||||
self.clock.time_msec(),
|
||||
config=SerializeEventConfig(requester=requester),
|
||||
|
@ -999,7 +999,26 @@ class EventCreationHandler:
|
|||
raise ShadowBanError()
|
||||
|
||||
if ratelimit:
|
||||
await self.request_ratelimiter.ratelimit(requester, update=False)
|
||||
room_id = event_dict["room_id"]
|
||||
try:
|
||||
room_version = await self.store.get_room_version(room_id)
|
||||
except NotFoundError:
|
||||
# The room doesn't exist.
|
||||
raise AuthError(403, f"User {requester.user} not in room {room_id}")
|
||||
|
||||
if room_version.updated_redaction_rules:
|
||||
redacts = event_dict["content"].get("redacts")
|
||||
else:
|
||||
redacts = event_dict.get("redacts")
|
||||
|
||||
is_admin_redaction = await self.is_admin_redaction(
|
||||
event_type=event_dict["type"],
|
||||
sender=event_dict["sender"],
|
||||
redacts=redacts,
|
||||
)
|
||||
await self.request_ratelimiter.ratelimit(
|
||||
requester, is_admin_redaction=is_admin_redaction, update=False
|
||||
)
|
||||
|
||||
# We limit the number of concurrent event sends in a room so that we
|
||||
# don't fork the DAG too much. If we don't limit then we can end up in
|
||||
|
@ -1133,7 +1152,6 @@ class EventCreationHandler:
|
|||
# in the meantime and context needs to be recomputed, so let's do so.
|
||||
if i == max_retries - 1:
|
||||
raise e
|
||||
pass
|
||||
|
||||
# we know it was persisted, so must have a stream ordering
|
||||
assert ev.internal_metadata.stream_ordering
|
||||
|
@ -1509,6 +1527,18 @@ class EventCreationHandler:
|
|||
first_event.room_id
|
||||
)
|
||||
if writer_instance != self._instance_name:
|
||||
# Ratelimit before sending to the other event persister, to
|
||||
# ensure that we correctly have ratelimits on both the event
|
||||
# creators and event persisters.
|
||||
if ratelimit:
|
||||
for event, _ in events_and_context:
|
||||
is_admin_redaction = await self.is_admin_redaction(
|
||||
event.type, event.sender, event.redacts
|
||||
)
|
||||
await self.request_ratelimiter.ratelimit(
|
||||
requester, is_admin_redaction=is_admin_redaction
|
||||
)
|
||||
|
||||
try:
|
||||
result = await self.send_events(
|
||||
instance_name=writer_instance,
|
||||
|
@ -1539,6 +1569,7 @@ class EventCreationHandler:
|
|||
# stream_ordering entry manually (as it was persisted on
|
||||
# another worker).
|
||||
event.internal_metadata.stream_ordering = stream_id
|
||||
|
||||
return event
|
||||
|
||||
event = await self.persist_and_notify_client_events(
|
||||
|
@ -1697,21 +1728,9 @@ class EventCreationHandler:
|
|||
# can apply different ratelimiting. We do this by simply checking
|
||||
# it's not a self-redaction (to avoid having to look up whether the
|
||||
# user is actually admin or not).
|
||||
is_admin_redaction = False
|
||||
if event.type == EventTypes.Redaction:
|
||||
assert event.redacts is not None
|
||||
|
||||
original_event = await self.store.get_event(
|
||||
event.redacts,
|
||||
redact_behaviour=EventRedactBehaviour.as_is,
|
||||
get_prev_content=False,
|
||||
allow_rejected=False,
|
||||
allow_none=True,
|
||||
)
|
||||
|
||||
is_admin_redaction = bool(
|
||||
original_event and event.sender != original_event.sender
|
||||
)
|
||||
is_admin_redaction = await self.is_admin_redaction(
|
||||
event.type, event.sender, event.redacts
|
||||
)
|
||||
|
||||
await self.request_ratelimiter.ratelimit(
|
||||
requester, is_admin_redaction=is_admin_redaction
|
||||
|
@ -1931,6 +1950,27 @@ class EventCreationHandler:
|
|||
|
||||
return persisted_events[-1]
|
||||
|
||||
async def is_admin_redaction(
|
||||
self, event_type: str, sender: str, redacts: Optional[str]
|
||||
) -> bool:
|
||||
"""Return whether the event is a redaction made by an admin, and thus
|
||||
should use a different ratelimiter.
|
||||
"""
|
||||
if event_type != EventTypes.Redaction:
|
||||
return False
|
||||
|
||||
assert redacts is not None
|
||||
|
||||
original_event = await self.store.get_event(
|
||||
redacts,
|
||||
redact_behaviour=EventRedactBehaviour.as_is,
|
||||
get_prev_content=False,
|
||||
allow_rejected=False,
|
||||
allow_none=True,
|
||||
)
|
||||
|
||||
return bool(original_event and sender != original_event.sender)
|
||||
|
||||
async def _maybe_kick_guest_users(
|
||||
self, event: EventBase, context: EventContext
|
||||
) -> None:
|
||||
|
@ -2038,7 +2078,6 @@ class EventCreationHandler:
|
|||
# in the meantime and context needs to be recomputed, so let's do so.
|
||||
if i == max_retries - 1:
|
||||
raise e
|
||||
pass
|
||||
return True
|
||||
except AuthError:
|
||||
logger.info(
|
||||
|
|
|
@ -657,7 +657,7 @@ class PaginationHandler:
|
|||
|
||||
chunk = {
|
||||
"chunk": (
|
||||
self._event_serializer.serialize_events(
|
||||
await self._event_serializer.serialize_events(
|
||||
events,
|
||||
time_now,
|
||||
config=serialize_options,
|
||||
|
@ -669,7 +669,7 @@ class PaginationHandler:
|
|||
}
|
||||
|
||||
if state:
|
||||
chunk["state"] = self._event_serializer.serialize_events(
|
||||
chunk["state"] = await self._event_serializer.serialize_events(
|
||||
state, time_now, config=serialize_options
|
||||
)
|
||||
|
||||
|
|
|
@ -110,6 +110,7 @@ from synapse.replication.http.streams import ReplicationGetStreamUpdates
|
|||
from synapse.replication.tcp.commands import ClearUserSyncsCommand
|
||||
from synapse.replication.tcp.streams import PresenceFederationStream, PresenceStream
|
||||
from synapse.storage.databases.main import DataStore
|
||||
from synapse.storage.databases.main.state_deltas import StateDelta
|
||||
from synapse.streams import EventSource
|
||||
from synapse.types import (
|
||||
JsonDict,
|
||||
|
@ -191,7 +192,8 @@ class BasePresenceHandler(abc.ABC):
|
|||
self.state = hs.get_state_handler()
|
||||
self.is_mine_id = hs.is_mine_id
|
||||
|
||||
self._presence_enabled = hs.config.server.use_presence
|
||||
self._presence_enabled = hs.config.server.presence_enabled
|
||||
self._track_presence = hs.config.server.track_presence
|
||||
|
||||
self._federation = None
|
||||
if hs.should_send_federation():
|
||||
|
@ -511,7 +513,7 @@ class WorkerPresenceHandler(BasePresenceHandler):
|
|||
)
|
||||
|
||||
async def _on_shutdown(self) -> None:
|
||||
if self._presence_enabled:
|
||||
if self._track_presence:
|
||||
self.hs.get_replication_command_handler().send_command(
|
||||
ClearUserSyncsCommand(self.instance_id)
|
||||
)
|
||||
|
@ -523,7 +525,7 @@ class WorkerPresenceHandler(BasePresenceHandler):
|
|||
is_syncing: bool,
|
||||
last_sync_ms: int,
|
||||
) -> None:
|
||||
if self._presence_enabled:
|
||||
if self._track_presence:
|
||||
self.hs.get_replication_command_handler().send_user_sync(
|
||||
self.instance_id, user_id, device_id, is_syncing, last_sync_ms
|
||||
)
|
||||
|
@ -570,7 +572,7 @@ class WorkerPresenceHandler(BasePresenceHandler):
|
|||
Called by the sync and events servlets to record that a user has connected to
|
||||
this worker and is waiting for some events.
|
||||
"""
|
||||
if not affect_presence or not self._presence_enabled:
|
||||
if not affect_presence or not self._track_presence:
|
||||
return _NullContextManager()
|
||||
|
||||
# Note that this causes last_active_ts to be incremented which is not
|
||||
|
@ -701,8 +703,8 @@ class WorkerPresenceHandler(BasePresenceHandler):
|
|||
|
||||
user_id = target_user.to_string()
|
||||
|
||||
# If presence is disabled, no-op
|
||||
if not self._presence_enabled:
|
||||
# If tracking of presence is disabled, no-op
|
||||
if not self._track_presence:
|
||||
return
|
||||
|
||||
# Proxy request to instance that writes presence
|
||||
|
@ -722,7 +724,7 @@ class WorkerPresenceHandler(BasePresenceHandler):
|
|||
with the app.
|
||||
"""
|
||||
# If presence is disabled, no-op
|
||||
if not self._presence_enabled:
|
||||
if not self._track_presence:
|
||||
return
|
||||
|
||||
# Proxy request to instance that writes presence
|
||||
|
@ -759,7 +761,7 @@ class PresenceHandler(BasePresenceHandler):
|
|||
] = {}
|
||||
|
||||
now = self.clock.time_msec()
|
||||
if self._presence_enabled:
|
||||
if self._track_presence:
|
||||
for state in self.user_to_current_state.values():
|
||||
# Create a psuedo-device to properly handle time outs. This will
|
||||
# be overridden by any "real" devices within SYNC_ONLINE_TIMEOUT.
|
||||
|
@ -830,7 +832,7 @@ class PresenceHandler(BasePresenceHandler):
|
|||
|
||||
self.external_sync_linearizer = Linearizer(name="external_sync_linearizer")
|
||||
|
||||
if self._presence_enabled:
|
||||
if self._track_presence:
|
||||
# Start a LoopingCall in 30s that fires every 5s.
|
||||
# The initial delay is to allow disconnected clients a chance to
|
||||
# reconnect before we treat them as offline.
|
||||
|
@ -838,6 +840,9 @@ class PresenceHandler(BasePresenceHandler):
|
|||
30, self.clock.looping_call, self._handle_timeouts, 5000
|
||||
)
|
||||
|
||||
# Presence information is persisted, whether or not it is being tracked
|
||||
# internally.
|
||||
if self._presence_enabled:
|
||||
self.clock.call_later(
|
||||
60,
|
||||
self.clock.looping_call,
|
||||
|
@ -853,7 +858,7 @@ class PresenceHandler(BasePresenceHandler):
|
|||
)
|
||||
|
||||
# Used to handle sending of presence to newly joined users/servers
|
||||
if self._presence_enabled:
|
||||
if self._track_presence:
|
||||
self.notifier.add_replication_callback(self.notify_new_event)
|
||||
|
||||
# Presence is best effort and quickly heals itself, so lets just always
|
||||
|
@ -904,7 +909,9 @@ class PresenceHandler(BasePresenceHandler):
|
|||
)
|
||||
|
||||
async def _update_states(
|
||||
self, new_states: Iterable[UserPresenceState], force_notify: bool = False
|
||||
self,
|
||||
new_states: Iterable[UserPresenceState],
|
||||
force_notify: bool = False,
|
||||
) -> None:
|
||||
"""Updates presence of users. Sets the appropriate timeouts. Pokes
|
||||
the notifier and federation if and only if the changed presence state
|
||||
|
@ -942,7 +949,7 @@ class PresenceHandler(BasePresenceHandler):
|
|||
for new_state in new_states:
|
||||
user_id = new_state.user_id
|
||||
|
||||
# Its fine to not hit the database here, as the only thing not in
|
||||
# It's fine to not hit the database here, as the only thing not in
|
||||
# the current state cache are OFFLINE states, where the only field
|
||||
# of interest is last_active which is safe enough to assume is 0
|
||||
# here.
|
||||
|
@ -956,6 +963,9 @@ class PresenceHandler(BasePresenceHandler):
|
|||
is_mine=self.is_mine_id(user_id),
|
||||
wheel_timer=self.wheel_timer,
|
||||
now=now,
|
||||
# When overriding disabled presence, don't kick off all the
|
||||
# wheel timers.
|
||||
persist=not self._track_presence,
|
||||
)
|
||||
|
||||
if force_notify:
|
||||
|
@ -1071,7 +1081,7 @@ class PresenceHandler(BasePresenceHandler):
|
|||
with the app.
|
||||
"""
|
||||
# If presence is disabled, no-op
|
||||
if not self._presence_enabled:
|
||||
if not self._track_presence:
|
||||
return
|
||||
|
||||
user_id = user.to_string()
|
||||
|
@ -1123,7 +1133,7 @@ class PresenceHandler(BasePresenceHandler):
|
|||
client that is being used by a user.
|
||||
presence_state: The presence state indicated in the sync request
|
||||
"""
|
||||
if not affect_presence or not self._presence_enabled:
|
||||
if not affect_presence or not self._track_presence:
|
||||
return _NullContextManager()
|
||||
|
||||
curr_sync = self._user_device_to_num_current_syncs.get((user_id, device_id), 0)
|
||||
|
@ -1283,7 +1293,7 @@ class PresenceHandler(BasePresenceHandler):
|
|||
|
||||
async def incoming_presence(self, origin: str, content: JsonDict) -> None:
|
||||
"""Called when we receive a `m.presence` EDU from a remote server."""
|
||||
if not self._presence_enabled:
|
||||
if not self._track_presence:
|
||||
return
|
||||
|
||||
now = self.clock.time_msec()
|
||||
|
@ -1358,7 +1368,7 @@ class PresenceHandler(BasePresenceHandler):
|
|||
raise SynapseError(400, "Invalid presence state")
|
||||
|
||||
# If presence is disabled, no-op
|
||||
if not self._presence_enabled:
|
||||
if not self._track_presence:
|
||||
return
|
||||
|
||||
user_id = target_user.to_string()
|
||||
|
@ -1499,9 +1509,9 @@ class PresenceHandler(BasePresenceHandler):
|
|||
# We may get multiple deltas for different rooms, but we want to
|
||||
# handle them on a room by room basis, so we batch them up by
|
||||
# room.
|
||||
deltas_by_room: Dict[str, List[JsonDict]] = {}
|
||||
deltas_by_room: Dict[str, List[StateDelta]] = {}
|
||||
for delta in deltas:
|
||||
deltas_by_room.setdefault(delta["room_id"], []).append(delta)
|
||||
deltas_by_room.setdefault(delta.room_id, []).append(delta)
|
||||
|
||||
for room_id, deltas_for_room in deltas_by_room.items():
|
||||
await self._handle_state_delta(room_id, deltas_for_room)
|
||||
|
@ -1513,7 +1523,7 @@ class PresenceHandler(BasePresenceHandler):
|
|||
max_pos
|
||||
)
|
||||
|
||||
async def _handle_state_delta(self, room_id: str, deltas: List[JsonDict]) -> None:
|
||||
async def _handle_state_delta(self, room_id: str, deltas: List[StateDelta]) -> None:
|
||||
"""Process current state deltas for the room to find new joins that need
|
||||
to be handled.
|
||||
"""
|
||||
|
@ -1524,31 +1534,30 @@ class PresenceHandler(BasePresenceHandler):
|
|||
newly_joined_users = set()
|
||||
|
||||
for delta in deltas:
|
||||
assert room_id == delta["room_id"]
|
||||
assert room_id == delta.room_id
|
||||
|
||||
typ = delta["type"]
|
||||
state_key = delta["state_key"]
|
||||
event_id = delta["event_id"]
|
||||
prev_event_id = delta["prev_event_id"]
|
||||
|
||||
logger.debug("Handling: %r %r, %s", typ, state_key, event_id)
|
||||
logger.debug(
|
||||
"Handling: %r %r, %s", delta.event_type, delta.state_key, delta.event_id
|
||||
)
|
||||
|
||||
# Drop any event that isn't a membership join
|
||||
if typ != EventTypes.Member:
|
||||
if delta.event_type != EventTypes.Member:
|
||||
continue
|
||||
|
||||
if event_id is None:
|
||||
if delta.event_id is None:
|
||||
# state has been deleted, so this is not a join. We only care about
|
||||
# joins.
|
||||
continue
|
||||
|
||||
event = await self.store.get_event(event_id, allow_none=True)
|
||||
event = await self.store.get_event(delta.event_id, allow_none=True)
|
||||
if not event or event.content.get("membership") != Membership.JOIN:
|
||||
# We only care about joins
|
||||
continue
|
||||
|
||||
if prev_event_id:
|
||||
prev_event = await self.store.get_event(prev_event_id, allow_none=True)
|
||||
if delta.prev_event_id:
|
||||
prev_event = await self.store.get_event(
|
||||
delta.prev_event_id, allow_none=True
|
||||
)
|
||||
if (
|
||||
prev_event
|
||||
and prev_event.content.get("membership") == Membership.JOIN
|
||||
|
@ -1556,7 +1565,7 @@ class PresenceHandler(BasePresenceHandler):
|
|||
# Ignore changes to join events.
|
||||
continue
|
||||
|
||||
newly_joined_users.add(state_key)
|
||||
newly_joined_users.add(delta.state_key)
|
||||
|
||||
if not newly_joined_users:
|
||||
# If nobody has joined then there's nothing to do.
|
||||
|
@ -2118,6 +2127,7 @@ def handle_update(
|
|||
is_mine: bool,
|
||||
wheel_timer: WheelTimer,
|
||||
now: int,
|
||||
persist: bool,
|
||||
) -> Tuple[UserPresenceState, bool, bool]:
|
||||
"""Given a presence update:
|
||||
1. Add any appropriate timers.
|
||||
|
@ -2129,6 +2139,8 @@ def handle_update(
|
|||
is_mine: Whether the user is ours
|
||||
wheel_timer
|
||||
now: Time now in ms
|
||||
persist: True if this state should persist until another update occurs.
|
||||
Skips insertion into wheel timers.
|
||||
|
||||
Returns:
|
||||
3-tuple: `(new_state, persist_and_notify, federation_ping)` where:
|
||||
|
@ -2146,14 +2158,15 @@ def handle_update(
|
|||
if is_mine:
|
||||
if new_state.state == PresenceState.ONLINE:
|
||||
# Idle timer
|
||||
wheel_timer.insert(
|
||||
now=now, obj=user_id, then=new_state.last_active_ts + IDLE_TIMER
|
||||
)
|
||||
if not persist:
|
||||
wheel_timer.insert(
|
||||
now=now, obj=user_id, then=new_state.last_active_ts + IDLE_TIMER
|
||||
)
|
||||
|
||||
active = now - new_state.last_active_ts < LAST_ACTIVE_GRANULARITY
|
||||
new_state = new_state.copy_and_replace(currently_active=active)
|
||||
|
||||
if active:
|
||||
if active and not persist:
|
||||
wheel_timer.insert(
|
||||
now=now,
|
||||
obj=user_id,
|
||||
|
@ -2162,11 +2175,12 @@ def handle_update(
|
|||
|
||||
if new_state.state != PresenceState.OFFLINE:
|
||||
# User has stopped syncing
|
||||
wheel_timer.insert(
|
||||
now=now,
|
||||
obj=user_id,
|
||||
then=new_state.last_user_sync_ts + SYNC_ONLINE_TIMEOUT,
|
||||
)
|
||||
if not persist:
|
||||
wheel_timer.insert(
|
||||
now=now,
|
||||
obj=user_id,
|
||||
then=new_state.last_user_sync_ts + SYNC_ONLINE_TIMEOUT,
|
||||
)
|
||||
|
||||
last_federate = new_state.last_federation_update_ts
|
||||
if now - last_federate > FEDERATION_PING_INTERVAL:
|
||||
|
@ -2174,7 +2188,7 @@ def handle_update(
|
|||
new_state = new_state.copy_and_replace(last_federation_update_ts=now)
|
||||
federation_ping = True
|
||||
|
||||
if new_state.state == PresenceState.BUSY:
|
||||
if new_state.state == PresenceState.BUSY and not persist:
|
||||
wheel_timer.insert(
|
||||
now=now,
|
||||
obj=user_id,
|
||||
|
@ -2182,11 +2196,13 @@ def handle_update(
|
|||
)
|
||||
|
||||
else:
|
||||
wheel_timer.insert(
|
||||
now=now,
|
||||
obj=user_id,
|
||||
then=new_state.last_federation_update_ts + FEDERATION_TIMEOUT,
|
||||
)
|
||||
# An update for a remote user was received.
|
||||
if not persist:
|
||||
wheel_timer.insert(
|
||||
now=now,
|
||||
obj=user_id,
|
||||
then=new_state.last_federation_update_ts + FEDERATION_TIMEOUT,
|
||||
)
|
||||
|
||||
# Check whether the change was something worth notifying about
|
||||
if should_notify(prev_state, new_state, is_mine):
|
||||
|
|
|
@ -19,7 +19,7 @@ from synapse.api.errors import SynapseError, UnrecognizedRequestError
|
|||
from synapse.push.clientformat import format_push_rules_for_user
|
||||
from synapse.storage.push_rule import RuleNotFoundException
|
||||
from synapse.synapse_rust.push import get_base_rule_ids
|
||||
from synapse.types import JsonDict, UserID
|
||||
from synapse.types import JsonDict, StreamKeyType, UserID
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.server import HomeServer
|
||||
|
@ -114,7 +114,9 @@ class PushRulesHandler:
|
|||
user_id: the user ID the change is for.
|
||||
"""
|
||||
stream_id = self._main_store.get_max_push_rules_stream_id()
|
||||
self._notifier.on_new_event("push_rules_key", stream_id, users=[user_id])
|
||||
self._notifier.on_new_event(
|
||||
StreamKeyType.PUSH_RULES, stream_id, users=[user_id]
|
||||
)
|
||||
|
||||
async def push_rules_for_user(
|
||||
self, user: UserID
|
||||
|
|
|
@ -20,6 +20,7 @@ from synapse.streams import EventSource
|
|||
from synapse.types import (
|
||||
JsonDict,
|
||||
JsonMapping,
|
||||
MultiWriterStreamToken,
|
||||
ReadReceipt,
|
||||
StreamKeyType,
|
||||
UserID,
|
||||
|
@ -130,11 +131,10 @@ class ReceiptsHandler:
|
|||
|
||||
async def _handle_new_receipts(self, receipts: List[ReadReceipt]) -> bool:
|
||||
"""Takes a list of receipts, stores them and informs the notifier."""
|
||||
min_batch_id: Optional[int] = None
|
||||
max_batch_id: Optional[int] = None
|
||||
|
||||
receipts_persisted: List[ReadReceipt] = []
|
||||
for receipt in receipts:
|
||||
res = await self.store.insert_receipt(
|
||||
stream_id = await self.store.insert_receipt(
|
||||
receipt.room_id,
|
||||
receipt.receipt_type,
|
||||
receipt.user_id,
|
||||
|
@ -143,30 +143,26 @@ class ReceiptsHandler:
|
|||
receipt.data,
|
||||
)
|
||||
|
||||
if not res:
|
||||
# res will be None if this receipt is 'old'
|
||||
if stream_id is None:
|
||||
# stream_id will be None if this receipt is 'old'
|
||||
continue
|
||||
|
||||
stream_id, max_persisted_id = res
|
||||
receipts_persisted.append(receipt)
|
||||
|
||||
if min_batch_id is None or stream_id < min_batch_id:
|
||||
min_batch_id = stream_id
|
||||
if max_batch_id is None or max_persisted_id > max_batch_id:
|
||||
max_batch_id = max_persisted_id
|
||||
|
||||
# Either both of these should be None or neither.
|
||||
if min_batch_id is None or max_batch_id is None:
|
||||
if not receipts_persisted:
|
||||
# no new receipts
|
||||
return False
|
||||
|
||||
affected_room_ids = list({r.room_id for r in receipts})
|
||||
max_batch_id = self.store.get_max_receipt_stream_id()
|
||||
|
||||
affected_room_ids = list({r.room_id for r in receipts_persisted})
|
||||
|
||||
self.notifier.on_new_event(
|
||||
StreamKeyType.RECEIPT, max_batch_id, rooms=affected_room_ids
|
||||
)
|
||||
# Note that the min here shouldn't be relied upon to be accurate.
|
||||
await self.hs.get_pusherpool().on_new_receipts(
|
||||
min_batch_id, max_batch_id, affected_room_ids
|
||||
{r.user_id for r in receipts_persisted}
|
||||
)
|
||||
|
||||
return True
|
||||
|
@ -205,7 +201,7 @@ class ReceiptsHandler:
|
|||
await self.federation_sender.send_read_receipt(receipt)
|
||||
|
||||
|
||||
class ReceiptEventSource(EventSource[int, JsonMapping]):
|
||||
class ReceiptEventSource(EventSource[MultiWriterStreamToken, JsonMapping]):
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
self.store = hs.get_datastores().main
|
||||
self.config = hs.config
|
||||
|
@ -278,13 +274,12 @@ class ReceiptEventSource(EventSource[int, JsonMapping]):
|
|||
async def get_new_events(
|
||||
self,
|
||||
user: UserID,
|
||||
from_key: int,
|
||||
from_key: MultiWriterStreamToken,
|
||||
limit: int,
|
||||
room_ids: Iterable[str],
|
||||
is_guest: bool,
|
||||
explicit_room_id: Optional[str] = None,
|
||||
) -> Tuple[List[JsonMapping], int]:
|
||||
from_key = int(from_key)
|
||||
) -> Tuple[List[JsonMapping], MultiWriterStreamToken]:
|
||||
to_key = self.get_current_key()
|
||||
|
||||
if from_key == to_key:
|
||||
|
@ -301,8 +296,11 @@ class ReceiptEventSource(EventSource[int, JsonMapping]):
|
|||
return events, to_key
|
||||
|
||||
async def get_new_events_as(
|
||||
self, from_key: int, to_key: int, service: ApplicationService
|
||||
) -> Tuple[List[JsonMapping], int]:
|
||||
self,
|
||||
from_key: MultiWriterStreamToken,
|
||||
to_key: MultiWriterStreamToken,
|
||||
service: ApplicationService,
|
||||
) -> Tuple[List[JsonMapping], MultiWriterStreamToken]:
|
||||
"""Returns a set of new read receipt events that an appservice
|
||||
may be interested in.
|
||||
|
||||
|
@ -317,8 +315,6 @@ class ReceiptEventSource(EventSource[int, JsonMapping]):
|
|||
appservice may be interested in.
|
||||
* The current read receipt stream token.
|
||||
"""
|
||||
from_key = int(from_key)
|
||||
|
||||
if from_key == to_key:
|
||||
return [], to_key
|
||||
|
||||
|
@ -338,5 +334,5 @@ class ReceiptEventSource(EventSource[int, JsonMapping]):
|
|||
|
||||
return events, to_key
|
||||
|
||||
def get_current_key(self) -> int:
|
||||
def get_current_key(self) -> MultiWriterStreamToken:
|
||||
return self.store.get_max_receipt_stream_id()
|
||||
|
|
|
@ -167,7 +167,7 @@ class RelationsHandler:
|
|||
now = self._clock.time_msec()
|
||||
serialize_options = SerializeEventConfig(requester=requester)
|
||||
return_value: JsonDict = {
|
||||
"chunk": self._event_serializer.serialize_events(
|
||||
"chunk": await self._event_serializer.serialize_events(
|
||||
events,
|
||||
now,
|
||||
bundle_aggregations=aggregations,
|
||||
|
@ -177,7 +177,9 @@ class RelationsHandler:
|
|||
if include_original_event:
|
||||
# Do not bundle aggregations when retrieving the original event because
|
||||
# we want the content before relations are applied to it.
|
||||
return_value["original_event"] = self._event_serializer.serialize_event(
|
||||
return_value[
|
||||
"original_event"
|
||||
] = await self._event_serializer.serialize_event(
|
||||
event,
|
||||
now,
|
||||
bundle_aggregations=None,
|
||||
|
@ -602,7 +604,7 @@ class RelationsHandler:
|
|||
)
|
||||
|
||||
now = self._clock.time_msec()
|
||||
serialized_events = self._event_serializer.serialize_events(
|
||||
serialized_events = await self._event_serializer.serialize_events(
|
||||
events, now, bundle_aggregations=aggregations
|
||||
)
|
||||
|
||||
|
|
|
@ -261,7 +261,6 @@ class RoomCreationHandler:
|
|||
# in the meantime and context needs to be recomputed, so let's do so.
|
||||
if i == max_retries - 1:
|
||||
raise e
|
||||
pass
|
||||
|
||||
# This is to satisfy mypy and should never happen
|
||||
raise PartialStateConflictError()
|
||||
|
@ -1708,7 +1707,7 @@ class RoomEventSource(EventSource[RoomStreamToken, EventBase]):
|
|||
|
||||
if from_key.topological:
|
||||
logger.warning("Stream has topological part!!!! %r", from_key)
|
||||
from_key = RoomStreamToken(None, from_key.stream)
|
||||
from_key = RoomStreamToken(stream=from_key.stream)
|
||||
|
||||
app_service = self.store.get_app_service_by_user_id(user.to_string())
|
||||
if app_service:
|
||||
|
@ -1940,9 +1939,10 @@ class RoomShutdownHandler:
|
|||
else:
|
||||
logger.info("Shutting down room %r", room_id)
|
||||
|
||||
users = await self.store.get_users_in_room(room_id)
|
||||
for user_id in users:
|
||||
if not self.hs.is_mine_id(user_id):
|
||||
users = await self.store.get_local_users_related_to_room(room_id)
|
||||
for user_id, membership in users:
|
||||
# If the user is not in the room (or is banned), nothing to do.
|
||||
if membership not in (Membership.JOIN, Membership.INVITE, Membership.KNOCK):
|
||||
continue
|
||||
|
||||
logger.info("Kicking %r from %r...", user_id, room_id)
|
||||
|
|
|
@ -33,7 +33,8 @@ from synapse.api.errors import (
|
|||
RequestSendFailed,
|
||||
SynapseError,
|
||||
)
|
||||
from synapse.types import JsonDict, ThirdPartyInstanceID
|
||||
from synapse.storage.databases.main.room import LargestRoomStats
|
||||
from synapse.types import JsonDict, JsonMapping, ThirdPartyInstanceID
|
||||
from synapse.util.caches.descriptors import _CacheContext, cached
|
||||
from synapse.util.caches.response_cache import ResponseCache
|
||||
|
||||
|
@ -170,26 +171,24 @@ class RoomListHandler:
|
|||
ignore_non_federatable=from_federation,
|
||||
)
|
||||
|
||||
def build_room_entry(room: JsonDict) -> JsonDict:
|
||||
def build_room_entry(room: LargestRoomStats) -> JsonDict:
|
||||
entry = {
|
||||
"room_id": room["room_id"],
|
||||
"name": room["name"],
|
||||
"topic": room["topic"],
|
||||
"canonical_alias": room["canonical_alias"],
|
||||
"num_joined_members": room["joined_members"],
|
||||
"avatar_url": room["avatar"],
|
||||
"world_readable": room["history_visibility"]
|
||||
"room_id": room.room_id,
|
||||
"name": room.name,
|
||||
"topic": room.topic,
|
||||
"canonical_alias": room.canonical_alias,
|
||||
"num_joined_members": room.joined_members,
|
||||
"avatar_url": room.avatar,
|
||||
"world_readable": room.history_visibility
|
||||
== HistoryVisibility.WORLD_READABLE,
|
||||
"guest_can_join": room["guest_access"] == "can_join",
|
||||
"join_rule": room["join_rules"],
|
||||
"room_type": room["room_type"],
|
||||
"guest_can_join": room.guest_access == "can_join",
|
||||
"join_rule": room.join_rules,
|
||||
"room_type": room.room_type,
|
||||
}
|
||||
|
||||
# Filter out Nones – rather omit the field altogether
|
||||
return {k: v for k, v in entry.items() if v is not None}
|
||||
|
||||
results = [build_room_entry(r) for r in results]
|
||||
|
||||
response: JsonDict = {}
|
||||
num_results = len(results)
|
||||
if limit is not None:
|
||||
|
@ -212,33 +211,33 @@ class RoomListHandler:
|
|||
# If there was a token given then we assume that there
|
||||
# must be previous results.
|
||||
response["prev_batch"] = RoomListNextBatch(
|
||||
last_joined_members=initial_entry["num_joined_members"],
|
||||
last_room_id=initial_entry["room_id"],
|
||||
last_joined_members=initial_entry.joined_members,
|
||||
last_room_id=initial_entry.room_id,
|
||||
direction_is_forward=False,
|
||||
).to_token()
|
||||
|
||||
if more_to_come:
|
||||
response["next_batch"] = RoomListNextBatch(
|
||||
last_joined_members=final_entry["num_joined_members"],
|
||||
last_room_id=final_entry["room_id"],
|
||||
last_joined_members=final_entry.joined_members,
|
||||
last_room_id=final_entry.room_id,
|
||||
direction_is_forward=True,
|
||||
).to_token()
|
||||
else:
|
||||
if has_batch_token:
|
||||
response["next_batch"] = RoomListNextBatch(
|
||||
last_joined_members=final_entry["num_joined_members"],
|
||||
last_room_id=final_entry["room_id"],
|
||||
last_joined_members=final_entry.joined_members,
|
||||
last_room_id=final_entry.room_id,
|
||||
direction_is_forward=True,
|
||||
).to_token()
|
||||
|
||||
if more_to_come:
|
||||
response["prev_batch"] = RoomListNextBatch(
|
||||
last_joined_members=initial_entry["num_joined_members"],
|
||||
last_room_id=initial_entry["room_id"],
|
||||
last_joined_members=initial_entry.joined_members,
|
||||
last_room_id=initial_entry.room_id,
|
||||
direction_is_forward=False,
|
||||
).to_token()
|
||||
|
||||
response["chunk"] = results
|
||||
response["chunk"] = [build_room_entry(r) for r in results]
|
||||
|
||||
response["total_room_count_estimate"] = await self.store.count_public_rooms(
|
||||
network_tuple,
|
||||
|
@ -256,7 +255,7 @@ class RoomListHandler:
|
|||
cache_context: _CacheContext,
|
||||
with_alias: bool = True,
|
||||
allow_private: bool = False,
|
||||
) -> Optional[JsonDict]:
|
||||
) -> Optional[JsonMapping]:
|
||||
"""Returns the entry for a room
|
||||
|
||||
Args:
|
||||
|
|
|
@ -16,7 +16,7 @@ import abc
|
|||
import logging
|
||||
import random
|
||||
from http import HTTPStatus
|
||||
from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Optional, Set, Tuple
|
||||
from typing import TYPE_CHECKING, Iterable, List, Optional, Set, Tuple
|
||||
|
||||
from synapse import types
|
||||
from synapse.api.constants import (
|
||||
|
@ -44,6 +44,7 @@ from synapse.handlers.worker_lock import NEW_EVENT_DURING_PURGE_LOCK_NAME
|
|||
from synapse.logging import opentracing
|
||||
from synapse.metrics import event_processing_positions
|
||||
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||
from synapse.storage.databases.main.state_deltas import StateDelta
|
||||
from synapse.types import (
|
||||
JsonDict,
|
||||
Requester,
|
||||
|
@ -382,8 +383,10 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
|
|||
and persist a new event for the new membership change.
|
||||
|
||||
Args:
|
||||
requester:
|
||||
target:
|
||||
requester: User requesting the membership change, i.e. the sender of the
|
||||
desired membership event.
|
||||
target: Use whose membership should change, i.e. the state_key of the
|
||||
desired membership event.
|
||||
room_id:
|
||||
membership:
|
||||
|
||||
|
@ -415,7 +418,6 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
|
|||
Returns:
|
||||
Tuple of event ID and stream ordering position
|
||||
"""
|
||||
|
||||
user_id = target.to_string()
|
||||
|
||||
if content is None:
|
||||
|
@ -475,21 +477,6 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
|
|||
(EventTypes.Member, user_id), None
|
||||
)
|
||||
|
||||
if event.membership == Membership.JOIN:
|
||||
newly_joined = True
|
||||
if prev_member_event_id:
|
||||
prev_member_event = await self.store.get_event(
|
||||
prev_member_event_id
|
||||
)
|
||||
newly_joined = prev_member_event.membership != Membership.JOIN
|
||||
|
||||
# Only rate-limit if the user actually joined the room, otherwise we'll end
|
||||
# up blocking profile updates.
|
||||
if newly_joined and ratelimit:
|
||||
await self._join_rate_limiter_local.ratelimit(requester)
|
||||
await self._join_rate_per_room_limiter.ratelimit(
|
||||
requester, key=room_id, update=False
|
||||
)
|
||||
with opentracing.start_active_span("handle_new_client_event"):
|
||||
result_event = (
|
||||
await self.event_creation_handler.handle_new_client_event(
|
||||
|
@ -514,7 +501,6 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
|
|||
# in the meantime and context needs to be recomputed, so let's do so.
|
||||
if i == max_retries - 1:
|
||||
raise e
|
||||
pass
|
||||
|
||||
# we know it was persisted, so should have a stream ordering
|
||||
assert result_event.internal_metadata.stream_ordering
|
||||
|
@ -618,6 +604,25 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
|
|||
Raises:
|
||||
ShadowBanError if a shadow-banned requester attempts to send an invite.
|
||||
"""
|
||||
if ratelimit:
|
||||
if action == Membership.JOIN:
|
||||
# Only rate-limit if the user isn't already joined to the room, otherwise
|
||||
# we'll end up blocking profile updates.
|
||||
(
|
||||
current_membership,
|
||||
_,
|
||||
) = await self.store.get_local_current_membership_for_user_in_room(
|
||||
requester.user.to_string(),
|
||||
room_id,
|
||||
)
|
||||
if current_membership != Membership.JOIN:
|
||||
await self._join_rate_limiter_local.ratelimit(requester)
|
||||
await self._join_rate_per_room_limiter.ratelimit(
|
||||
requester, key=room_id, update=False
|
||||
)
|
||||
elif action == Membership.INVITE:
|
||||
await self.ratelimit_invite(requester, room_id, target.to_string())
|
||||
|
||||
if action == Membership.INVITE and requester.shadow_banned:
|
||||
# We randomly sleep a bit just to annoy the requester.
|
||||
await self.clock.sleep(random.randint(1, 10))
|
||||
|
@ -794,8 +799,6 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
|
|||
|
||||
if effective_membership_state == Membership.INVITE:
|
||||
target_id = target.to_string()
|
||||
if ratelimit:
|
||||
await self.ratelimit_invite(requester, room_id, target_id)
|
||||
|
||||
# block any attempts to invite the server notices mxid
|
||||
if target_id == self._server_notices_mxid:
|
||||
|
@ -2002,7 +2005,6 @@ class RoomMemberMasterHandler(RoomMemberHandler):
|
|||
# in the meantime and context needs to be recomputed, so let's do so.
|
||||
if i == max_retries - 1:
|
||||
raise e
|
||||
pass
|
||||
|
||||
# we know it was persisted, so must have a stream ordering
|
||||
assert result_event.internal_metadata.stream_ordering
|
||||
|
@ -2145,24 +2147,18 @@ class RoomForgetterHandler(StateDeltasHandler):
|
|||
|
||||
await self._store.update_room_forgetter_stream_pos(max_pos)
|
||||
|
||||
async def _handle_deltas(self, deltas: List[Dict[str, Any]]) -> None:
|
||||
async def _handle_deltas(self, deltas: List[StateDelta]) -> None:
|
||||
"""Called with the state deltas to process"""
|
||||
for delta in deltas:
|
||||
typ = delta["type"]
|
||||
state_key = delta["state_key"]
|
||||
room_id = delta["room_id"]
|
||||
event_id = delta["event_id"]
|
||||
prev_event_id = delta["prev_event_id"]
|
||||
|
||||
if typ != EventTypes.Member:
|
||||
if delta.event_type != EventTypes.Member:
|
||||
continue
|
||||
|
||||
if not self._hs.is_mine_id(state_key):
|
||||
if not self._hs.is_mine_id(delta.state_key):
|
||||
continue
|
||||
|
||||
change = await self._get_key_change(
|
||||
prev_event_id,
|
||||
event_id,
|
||||
delta.prev_event_id,
|
||||
delta.event_id,
|
||||
key_name="membership",
|
||||
public_value=Membership.JOIN,
|
||||
)
|
||||
|
@ -2171,7 +2167,7 @@ class RoomForgetterHandler(StateDeltasHandler):
|
|||
if is_leave:
|
||||
try:
|
||||
await self._room_member_handler.forget(
|
||||
UserID.from_string(state_key), room_id
|
||||
UserID.from_string(delta.state_key), delta.room_id
|
||||
)
|
||||
except SynapseError as e:
|
||||
if e.code == 400:
|
||||
|
|
|
@ -703,24 +703,24 @@ class RoomSummaryHandler:
|
|||
# there should always be an entry
|
||||
assert stats is not None, "unable to retrieve stats for %s" % (room_id,)
|
||||
|
||||
entry = {
|
||||
"room_id": stats["room_id"],
|
||||
"name": stats["name"],
|
||||
"topic": stats["topic"],
|
||||
"canonical_alias": stats["canonical_alias"],
|
||||
"num_joined_members": stats["joined_members"],
|
||||
"avatar_url": stats["avatar"],
|
||||
"join_rule": stats["join_rules"],
|
||||
entry: JsonDict = {
|
||||
"room_id": stats.room_id,
|
||||
"name": stats.name,
|
||||
"topic": stats.topic,
|
||||
"canonical_alias": stats.canonical_alias,
|
||||
"num_joined_members": stats.joined_members,
|
||||
"avatar_url": stats.avatar,
|
||||
"join_rule": stats.join_rules,
|
||||
"world_readable": (
|
||||
stats["history_visibility"] == HistoryVisibility.WORLD_READABLE
|
||||
stats.history_visibility == HistoryVisibility.WORLD_READABLE
|
||||
),
|
||||
"guest_can_join": stats["guest_access"] == "can_join",
|
||||
"room_type": stats["room_type"],
|
||||
"guest_can_join": stats.guest_access == "can_join",
|
||||
"room_type": stats.room_type,
|
||||
}
|
||||
|
||||
if self._msc3266_enabled:
|
||||
entry["im.nheko.summary.version"] = stats["version"]
|
||||
entry["im.nheko.summary.encryption"] = stats["encryption"]
|
||||
entry["im.nheko.summary.version"] = stats.version
|
||||
entry["im.nheko.summary.encryption"] = stats.encryption
|
||||
|
||||
# Federation requests need to provide additional information so the
|
||||
# requested server is able to filter the response appropriately.
|
||||
|
|
|
@ -374,13 +374,13 @@ class SearchHandler:
|
|||
serialize_options = SerializeEventConfig(requester=requester)
|
||||
|
||||
for context in contexts.values():
|
||||
context["events_before"] = self._event_serializer.serialize_events(
|
||||
context["events_before"] = await self._event_serializer.serialize_events(
|
||||
context["events_before"],
|
||||
time_now,
|
||||
bundle_aggregations=aggregations,
|
||||
config=serialize_options,
|
||||
)
|
||||
context["events_after"] = self._event_serializer.serialize_events(
|
||||
context["events_after"] = await self._event_serializer.serialize_events(
|
||||
context["events_after"],
|
||||
time_now,
|
||||
bundle_aggregations=aggregations,
|
||||
|
@ -390,7 +390,7 @@ class SearchHandler:
|
|||
results = [
|
||||
{
|
||||
"rank": search_result.rank_map[e.event_id],
|
||||
"result": self._event_serializer.serialize_event(
|
||||
"result": await self._event_serializer.serialize_event(
|
||||
e,
|
||||
time_now,
|
||||
bundle_aggregations=aggregations,
|
||||
|
@ -409,7 +409,7 @@ class SearchHandler:
|
|||
|
||||
if state_results:
|
||||
rooms_cat_res["state"] = {
|
||||
room_id: self._event_serializer.serialize_events(
|
||||
room_id: await self._event_serializer.serialize_events(
|
||||
state_events, time_now, config=serialize_options
|
||||
)
|
||||
for room_id, state_events in state_results.items()
|
||||
|
|
|
@ -1206,10 +1206,7 @@ class SsoHandler:
|
|||
# We have no guarantee that all the devices of that session are for the same
|
||||
# `user_id`. Hence, we have to iterate over the list of devices and log them out
|
||||
# one by one.
|
||||
for device in devices:
|
||||
user_id = device["user_id"]
|
||||
device_id = device["device_id"]
|
||||
|
||||
for user_id, device_id in devices:
|
||||
# If the user_id associated with that device/session is not the one we got
|
||||
# out of the `sub` claim, skip that device and show log an error.
|
||||
if expected_user_id is not None and user_id != expected_user_id:
|
||||
|
|
|
@ -27,6 +27,7 @@ from typing import (
|
|||
from synapse.api.constants import EventContentFields, EventTypes, Membership
|
||||
from synapse.metrics import event_processing_positions
|
||||
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||
from synapse.storage.databases.main.state_deltas import StateDelta
|
||||
from synapse.types import JsonDict
|
||||
|
||||
if TYPE_CHECKING:
|
||||
|
@ -142,7 +143,7 @@ class StatsHandler:
|
|||
self.pos = max_pos
|
||||
|
||||
async def _handle_deltas(
|
||||
self, deltas: Iterable[JsonDict]
|
||||
self, deltas: Iterable[StateDelta]
|
||||
) -> Tuple[Dict[str, CounterType[str]], Dict[str, CounterType[str]]]:
|
||||
"""Called with the state deltas to process
|
||||
|
||||
|
@ -157,51 +158,50 @@ class StatsHandler:
|
|||
room_to_state_updates: Dict[str, Dict[str, Any]] = {}
|
||||
|
||||
for delta in deltas:
|
||||
typ = delta["type"]
|
||||
state_key = delta["state_key"]
|
||||
room_id = delta["room_id"]
|
||||
event_id = delta["event_id"]
|
||||
stream_id = delta["stream_id"]
|
||||
prev_event_id = delta["prev_event_id"]
|
||||
logger.debug(
|
||||
"Handling: %r, %r %r, %s",
|
||||
delta.room_id,
|
||||
delta.event_type,
|
||||
delta.state_key,
|
||||
delta.event_id,
|
||||
)
|
||||
|
||||
logger.debug("Handling: %r, %r %r, %s", room_id, typ, state_key, event_id)
|
||||
|
||||
token = await self.store.get_earliest_token_for_stats("room", room_id)
|
||||
token = await self.store.get_earliest_token_for_stats("room", delta.room_id)
|
||||
|
||||
# If the earliest token to begin from is larger than our current
|
||||
# stream ID, skip processing this delta.
|
||||
if token is not None and token >= stream_id:
|
||||
if token is not None and token >= delta.stream_id:
|
||||
logger.debug(
|
||||
"Ignoring: %s as earlier than this room's initial ingestion event",
|
||||
event_id,
|
||||
delta.event_id,
|
||||
)
|
||||
continue
|
||||
|
||||
if event_id is None and prev_event_id is None:
|
||||
if delta.event_id is None and delta.prev_event_id is None:
|
||||
logger.error(
|
||||
"event ID is None and so is the previous event ID. stream_id: %s",
|
||||
stream_id,
|
||||
delta.stream_id,
|
||||
)
|
||||
continue
|
||||
|
||||
event_content: JsonDict = {}
|
||||
|
||||
if event_id is not None:
|
||||
event = await self.store.get_event(event_id, allow_none=True)
|
||||
if delta.event_id is not None:
|
||||
event = await self.store.get_event(delta.event_id, allow_none=True)
|
||||
if event:
|
||||
event_content = event.content or {}
|
||||
|
||||
# All the values in this dict are deltas (RELATIVE changes)
|
||||
room_stats_delta = room_to_stats_deltas.setdefault(room_id, Counter())
|
||||
room_stats_delta = room_to_stats_deltas.setdefault(delta.room_id, Counter())
|
||||
|
||||
room_state = room_to_state_updates.setdefault(room_id, {})
|
||||
room_state = room_to_state_updates.setdefault(delta.room_id, {})
|
||||
|
||||
if prev_event_id is None:
|
||||
if delta.prev_event_id is None:
|
||||
# this state event doesn't overwrite another,
|
||||
# so it is a new effective/current state event
|
||||
room_stats_delta["current_state_events"] += 1
|
||||
|
||||
if typ == EventTypes.Member:
|
||||
if delta.event_type == EventTypes.Member:
|
||||
# we could use StateDeltasHandler._get_key_change here but it's
|
||||
# a bit inefficient given we're not testing for a specific
|
||||
# result; might as well just grab the prev_membership and
|
||||
|
@ -210,9 +210,9 @@ class StatsHandler:
|
|||
# in the absence of a previous event because we do not want to
|
||||
# reduce the leave count when a new-to-the-room user joins.
|
||||
prev_membership = None
|
||||
if prev_event_id is not None:
|
||||
if delta.prev_event_id is not None:
|
||||
prev_event = await self.store.get_event(
|
||||
prev_event_id, allow_none=True
|
||||
delta.prev_event_id, allow_none=True
|
||||
)
|
||||
if prev_event:
|
||||
prev_event_content = prev_event.content
|
||||
|
@ -256,7 +256,7 @@ class StatsHandler:
|
|||
else:
|
||||
raise ValueError("%r is not a valid membership" % (membership,))
|
||||
|
||||
user_id = state_key
|
||||
user_id = delta.state_key
|
||||
if self.is_mine_id(user_id):
|
||||
# this accounts for transitions like leave → ban and so on.
|
||||
has_changed_joinedness = (prev_membership == Membership.JOIN) != (
|
||||
|
@ -272,30 +272,30 @@ class StatsHandler:
|
|||
|
||||
room_stats_delta["local_users_in_room"] += membership_delta
|
||||
|
||||
elif typ == EventTypes.Create:
|
||||
elif delta.event_type == EventTypes.Create:
|
||||
room_state["is_federatable"] = (
|
||||
event_content.get(EventContentFields.FEDERATE, True) is True
|
||||
)
|
||||
room_type = event_content.get(EventContentFields.ROOM_TYPE)
|
||||
if isinstance(room_type, str):
|
||||
room_state["room_type"] = room_type
|
||||
elif typ == EventTypes.JoinRules:
|
||||
elif delta.event_type == EventTypes.JoinRules:
|
||||
room_state["join_rules"] = event_content.get("join_rule")
|
||||
elif typ == EventTypes.RoomHistoryVisibility:
|
||||
elif delta.event_type == EventTypes.RoomHistoryVisibility:
|
||||
room_state["history_visibility"] = event_content.get(
|
||||
"history_visibility"
|
||||
)
|
||||
elif typ == EventTypes.RoomEncryption:
|
||||
elif delta.event_type == EventTypes.RoomEncryption:
|
||||
room_state["encryption"] = event_content.get("algorithm")
|
||||
elif typ == EventTypes.Name:
|
||||
elif delta.event_type == EventTypes.Name:
|
||||
room_state["name"] = event_content.get("name")
|
||||
elif typ == EventTypes.Topic:
|
||||
elif delta.event_type == EventTypes.Topic:
|
||||
room_state["topic"] = event_content.get("topic")
|
||||
elif typ == EventTypes.RoomAvatar:
|
||||
elif delta.event_type == EventTypes.RoomAvatar:
|
||||
room_state["avatar"] = event_content.get("url")
|
||||
elif typ == EventTypes.CanonicalAlias:
|
||||
elif delta.event_type == EventTypes.CanonicalAlias:
|
||||
room_state["canonical_alias"] = event_content.get("alias")
|
||||
elif typ == EventTypes.GuestAccess:
|
||||
elif delta.event_type == EventTypes.GuestAccess:
|
||||
room_state["guest_access"] = event_content.get(
|
||||
EventContentFields.GUEST_ACCESS
|
||||
)
|
||||
|
|
|
@ -40,7 +40,6 @@ from synapse.api.filtering import FilterCollection
|
|||
from synapse.api.presence import UserPresenceState
|
||||
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS
|
||||
from synapse.events import EventBase
|
||||
from synapse.handlers.device import DELETE_DEVICE_MSGS_TASK_NAME
|
||||
from synapse.handlers.relations import BundledAggregations
|
||||
from synapse.logging import issue9533_logger
|
||||
from synapse.logging.context import current_context
|
||||
|
@ -58,6 +57,7 @@ from synapse.types import (
|
|||
DeviceListUpdates,
|
||||
JsonDict,
|
||||
JsonMapping,
|
||||
MultiWriterStreamToken,
|
||||
MutableStateMap,
|
||||
Requester,
|
||||
RoomStreamToken,
|
||||
|
@ -363,36 +363,15 @@ class SyncHandler:
|
|||
# (since we now know that the device has received them)
|
||||
if since_token is not None:
|
||||
since_stream_id = since_token.to_device_key
|
||||
# Fast path: delete a limited number of to-device messages up front.
|
||||
# We do this to avoid the overhead of scheduling a task for every
|
||||
# sync.
|
||||
device_deletion_limit = 100
|
||||
deleted = await self.store.delete_messages_for_device(
|
||||
sync_config.user.to_string(),
|
||||
sync_config.device_id,
|
||||
since_stream_id,
|
||||
limit=device_deletion_limit,
|
||||
)
|
||||
logger.debug(
|
||||
"Deleted %d to-device messages up to %d", deleted, since_stream_id
|
||||
)
|
||||
|
||||
# If we hit the limit, schedule a background task to delete the rest.
|
||||
if deleted >= device_deletion_limit:
|
||||
await self._task_scheduler.schedule_task(
|
||||
DELETE_DEVICE_MSGS_TASK_NAME,
|
||||
resource_id=sync_config.device_id,
|
||||
params={
|
||||
"user_id": sync_config.user.to_string(),
|
||||
"device_id": sync_config.device_id,
|
||||
"up_to_stream_id": since_stream_id,
|
||||
},
|
||||
)
|
||||
logger.debug(
|
||||
"Deletion of to-device messages up to %d scheduled",
|
||||
since_stream_id,
|
||||
)
|
||||
|
||||
if timeout == 0 or since_token is None or full_state:
|
||||
# we are going to return immediately, so don't bother calling
|
||||
# notifier.wait_for_events.
|
||||
|
@ -499,7 +478,11 @@ class SyncHandler:
|
|||
event_copy = {k: v for (k, v) in event.items() if k != "room_id"}
|
||||
ephemeral_by_room.setdefault(room_id, []).append(event_copy)
|
||||
|
||||
receipt_key = since_token.receipt_key if since_token else 0
|
||||
receipt_key = (
|
||||
since_token.receipt_key
|
||||
if since_token
|
||||
else MultiWriterStreamToken(stream=0)
|
||||
)
|
||||
|
||||
receipt_source = self.event_sources.sources.receipt
|
||||
receipts, receipt_key = await receipt_source.get_new_events(
|
||||
|
@ -522,12 +505,27 @@ class SyncHandler:
|
|||
async def _load_filtered_recents(
|
||||
self,
|
||||
room_id: str,
|
||||
sync_result_builder: "SyncResultBuilder",
|
||||
sync_config: SyncConfig,
|
||||
now_token: StreamToken,
|
||||
upto_token: StreamToken,
|
||||
since_token: Optional[StreamToken] = None,
|
||||
potential_recents: Optional[List[EventBase]] = None,
|
||||
newly_joined_room: bool = False,
|
||||
) -> TimelineBatch:
|
||||
"""Create a timeline batch for the room
|
||||
|
||||
Args:
|
||||
room_id
|
||||
sync_result_builder
|
||||
sync_config
|
||||
upto_token: The token up to which we should fetch (more) events.
|
||||
If `potential_results` is non-empty then this is *start* of
|
||||
the the list.
|
||||
since_token
|
||||
potential_recents: If non-empty, the events between the since token
|
||||
and current token to send down to clients.
|
||||
newly_joined_room
|
||||
"""
|
||||
with Measure(self.clock, "load_filtered_recents"):
|
||||
timeline_limit = sync_config.filter_collection.timeline_limit()
|
||||
block_all_timeline = (
|
||||
|
@ -543,6 +541,20 @@ class SyncHandler:
|
|||
else:
|
||||
limited = False
|
||||
|
||||
# Check if there is a gap, if so we need to mark this as limited and
|
||||
# recalculate which events to send down.
|
||||
gap_token = await self.store.get_timeline_gaps(
|
||||
room_id,
|
||||
since_token.room_key if since_token else None,
|
||||
sync_result_builder.now_token.room_key,
|
||||
)
|
||||
if gap_token:
|
||||
# There's a gap, so we need to ignore the passed in
|
||||
# `potential_recents`, and reset `upto_token` to match.
|
||||
potential_recents = None
|
||||
upto_token = sync_result_builder.now_token
|
||||
limited = True
|
||||
|
||||
log_kv({"limited": limited})
|
||||
|
||||
if potential_recents:
|
||||
|
@ -581,10 +593,10 @@ class SyncHandler:
|
|||
recents = []
|
||||
|
||||
if not limited or block_all_timeline:
|
||||
prev_batch_token = now_token
|
||||
prev_batch_token = upto_token
|
||||
if recents:
|
||||
room_key = recents[0].internal_metadata.before
|
||||
prev_batch_token = now_token.copy_and_replace(
|
||||
prev_batch_token = upto_token.copy_and_replace(
|
||||
StreamKeyType.ROOM, room_key
|
||||
)
|
||||
|
||||
|
@ -595,11 +607,15 @@ class SyncHandler:
|
|||
filtering_factor = 2
|
||||
load_limit = max(timeline_limit * filtering_factor, 10)
|
||||
max_repeat = 5 # Only try a few times per room, otherwise
|
||||
room_key = now_token.room_key
|
||||
room_key = upto_token.room_key
|
||||
end_key = room_key
|
||||
|
||||
since_key = None
|
||||
if since_token and not newly_joined_room:
|
||||
if since_token and gap_token:
|
||||
# If there is a gap then we need to only include events after
|
||||
# it.
|
||||
since_key = gap_token
|
||||
elif since_token and not newly_joined_room:
|
||||
since_key = since_token.room_key
|
||||
|
||||
while limited and len(recents) < timeline_limit and max_repeat:
|
||||
|
@ -669,7 +685,7 @@ class SyncHandler:
|
|||
recents = recents[-timeline_limit:]
|
||||
room_key = recents[0].internal_metadata.before
|
||||
|
||||
prev_batch_token = now_token.copy_and_replace(StreamKeyType.ROOM, room_key)
|
||||
prev_batch_token = upto_token.copy_and_replace(StreamKeyType.ROOM, room_key)
|
||||
|
||||
# Don't bother to bundle aggregations if the timeline is unlimited,
|
||||
# as clients will have all the necessary information.
|
||||
|
@ -684,7 +700,9 @@ class SyncHandler:
|
|||
return TimelineBatch(
|
||||
events=recents,
|
||||
prev_batch=prev_batch_token,
|
||||
limited=limited or newly_joined_room,
|
||||
# Also mark as limited if this is a new room or there has been a gap
|
||||
# (to force client to paginate the gap).
|
||||
limited=limited or newly_joined_room or gap_token is not None,
|
||||
bundled_aggregations=bundled_aggregations,
|
||||
)
|
||||
|
||||
|
@ -1499,7 +1517,7 @@ class SyncHandler:
|
|||
|
||||
# Presence data is included if the server has it enabled and not filtered out.
|
||||
include_presence_data = bool(
|
||||
self.hs_config.server.use_presence
|
||||
self.hs_config.server.presence_enabled
|
||||
and not sync_config.filter_collection.blocks_all_presence()
|
||||
)
|
||||
# Device list updates are sent if a since token is provided.
|
||||
|
@ -2333,7 +2351,7 @@ class SyncHandler:
|
|||
continue
|
||||
|
||||
leave_token = now_token.copy_and_replace(
|
||||
StreamKeyType.ROOM, RoomStreamToken(None, event.stream_ordering)
|
||||
StreamKeyType.ROOM, RoomStreamToken(stream=event.stream_ordering)
|
||||
)
|
||||
room_entries.append(
|
||||
RoomSyncResultBuilder(
|
||||
|
@ -2419,8 +2437,9 @@ class SyncHandler:
|
|||
|
||||
batch = await self._load_filtered_recents(
|
||||
room_id,
|
||||
sync_result_builder,
|
||||
sync_config,
|
||||
now_token=upto_token,
|
||||
upto_token=upto_token,
|
||||
since_token=since_token,
|
||||
potential_recents=events,
|
||||
newly_joined_room=newly_joined,
|
||||
|
|
|
@ -187,9 +187,9 @@ class _BaseThreepidAuthChecker:
|
|||
|
||||
if row:
|
||||
threepid = {
|
||||
"medium": row["medium"],
|
||||
"address": row["address"],
|
||||
"validated_at": row["validated_at"],
|
||||
"medium": row.medium,
|
||||
"address": row.address,
|
||||
"validated_at": row.validated_at,
|
||||
}
|
||||
|
||||
# Valid threepid returned, delete from the db
|
||||
|
|
|
@ -14,7 +14,7 @@
|
|||
|
||||
import logging
|
||||
from http import HTTPStatus
|
||||
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Set, Tuple
|
||||
from typing import TYPE_CHECKING, List, Optional, Set, Tuple
|
||||
|
||||
from twisted.internet.interfaces import IDelayedCall
|
||||
|
||||
|
@ -23,6 +23,7 @@ from synapse.api.constants import EventTypes, HistoryVisibility, JoinRules, Memb
|
|||
from synapse.api.errors import Codes, SynapseError
|
||||
from synapse.handlers.state_deltas import MatchChange, StateDeltasHandler
|
||||
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||
from synapse.storage.databases.main.state_deltas import StateDelta
|
||||
from synapse.storage.databases.main.user_directory import SearchResult
|
||||
from synapse.storage.roommember import ProfileInfo
|
||||
from synapse.types import UserID
|
||||
|
@ -247,32 +248,31 @@ class UserDirectoryHandler(StateDeltasHandler):
|
|||
|
||||
await self.store.update_user_directory_stream_pos(max_pos)
|
||||
|
||||
async def _handle_deltas(self, deltas: List[Dict[str, Any]]) -> None:
|
||||
async def _handle_deltas(self, deltas: List[StateDelta]) -> None:
|
||||
"""Called with the state deltas to process"""
|
||||
for delta in deltas:
|
||||
typ = delta["type"]
|
||||
state_key = delta["state_key"]
|
||||
room_id = delta["room_id"]
|
||||
event_id: Optional[str] = delta["event_id"]
|
||||
prev_event_id: Optional[str] = delta["prev_event_id"]
|
||||
|
||||
logger.debug("Handling: %r %r, %s", typ, state_key, event_id)
|
||||
logger.debug(
|
||||
"Handling: %r %r, %s", delta.event_type, delta.state_key, delta.event_id
|
||||
)
|
||||
|
||||
# For join rule and visibility changes we need to check if the room
|
||||
# may have become public or not and add/remove the users in said room
|
||||
if typ in (EventTypes.RoomHistoryVisibility, EventTypes.JoinRules):
|
||||
if delta.event_type in (
|
||||
EventTypes.RoomHistoryVisibility,
|
||||
EventTypes.JoinRules,
|
||||
):
|
||||
await self._handle_room_publicity_change(
|
||||
room_id, prev_event_id, event_id, typ
|
||||
delta.room_id, delta.prev_event_id, delta.event_id, delta.event_type
|
||||
)
|
||||
elif typ == EventTypes.Member:
|
||||
elif delta.event_type == EventTypes.Member:
|
||||
await self._handle_room_membership_event(
|
||||
room_id,
|
||||
prev_event_id,
|
||||
event_id,
|
||||
state_key,
|
||||
delta.room_id,
|
||||
delta.prev_event_id,
|
||||
delta.event_id,
|
||||
delta.state_key,
|
||||
)
|
||||
else:
|
||||
logger.debug("Ignoring irrelevant type: %r", typ)
|
||||
logger.debug("Ignoring irrelevant type: %r", delta.event_type)
|
||||
|
||||
async def _handle_room_publicity_change(
|
||||
self,
|
||||
|
|
|
@ -59,7 +59,7 @@ class BasicProxyCredentials(ProxyCredentials):
|
|||
a Proxy-Authorization header.
|
||||
"""
|
||||
# Encode as base64 and prepend the authorization type
|
||||
return b"Basic " + base64.encodebytes(self.username_password)
|
||||
return b"Basic " + base64.b64encode(self.username_password)
|
||||
|
||||
|
||||
@attr.s(auto_attribs=True)
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue