Merge branch 'release-v0.16.1' of github.com:matrix-org/synapse
This commit is contained in:
commit
bc72d381b2
43
CHANGES.rst
43
CHANGES.rst
|
@ -1,3 +1,44 @@
|
||||||
|
Changes in synapse v0.16.1 (2016-06-20)
|
||||||
|
=======================================
|
||||||
|
|
||||||
|
Bug fixes:
|
||||||
|
|
||||||
|
* Fix assorted bugs in ``/preview_url`` (PR #872)
|
||||||
|
* Fix TypeError when setting unicode passwords (PR #873)
|
||||||
|
|
||||||
|
|
||||||
|
Performance improvements:
|
||||||
|
|
||||||
|
* Turn ``use_frozen_events`` off by default (PR #877)
|
||||||
|
* Disable responding with canonical json for federation (PR #878)
|
||||||
|
|
||||||
|
|
||||||
|
Changes in synapse v0.16.1-rc1 (2016-06-15)
|
||||||
|
===========================================
|
||||||
|
|
||||||
|
Features: None
|
||||||
|
|
||||||
|
Changes:
|
||||||
|
|
||||||
|
* Log requester for ``/publicRoom`` endpoints when possible (PR #856)
|
||||||
|
* 502 on ``/thumbnail`` when can't connect to remote server (PR #862)
|
||||||
|
* Linearize fetching of gaps on incoming events (PR #871)
|
||||||
|
|
||||||
|
|
||||||
|
Bugs fixes:
|
||||||
|
|
||||||
|
* Fix bug where rooms where marked as published by default (PR #857)
|
||||||
|
* Fix bug where joining room with an event with invalid sender (PR #868)
|
||||||
|
* Fix bug where backfilled events were sent down sync streams (PR #869)
|
||||||
|
* Fix bug where outgoing connections could wedge indefinitely, causing push
|
||||||
|
notifications to be unreliable (PR #870)
|
||||||
|
|
||||||
|
|
||||||
|
Performance improvements:
|
||||||
|
|
||||||
|
* Improve ``/publicRooms`` performance(PR #859)
|
||||||
|
|
||||||
|
|
||||||
Changes in synapse v0.16.0 (2016-06-09)
|
Changes in synapse v0.16.0 (2016-06-09)
|
||||||
=======================================
|
=======================================
|
||||||
|
|
||||||
|
@ -28,7 +69,7 @@ Bug fixes:
|
||||||
* Fix bug where synapse sent malformed transactions to AS's when retrying
|
* Fix bug where synapse sent malformed transactions to AS's when retrying
|
||||||
transactions (Commits 310197b, 8437906)
|
transactions (Commits 310197b, 8437906)
|
||||||
|
|
||||||
Performance Improvements:
|
Performance improvements:
|
||||||
|
|
||||||
* Remove event fetching from DB threads (PR #835)
|
* Remove event fetching from DB threads (PR #835)
|
||||||
* Change the way we cache events (PR #836)
|
* Change the way we cache events (PR #836)
|
||||||
|
|
|
@ -73,12 +73,14 @@ git checkout "${GIT_BRANCH}" || (echo >&2 "No ref ${GIT_BRANCH} found, falling b
|
||||||
|
|
||||||
./jenkins/prep_sytest_for_postgres.sh
|
./jenkins/prep_sytest_for_postgres.sh
|
||||||
|
|
||||||
|
mkdir -p var
|
||||||
|
|
||||||
echo >&2 "Running sytest with PostgreSQL";
|
echo >&2 "Running sytest with PostgreSQL";
|
||||||
./jenkins/install_and_run.sh --python $TOX_BIN/python \
|
./jenkins/install_and_run.sh --python $TOX_BIN/python \
|
||||||
--synapse-directory $WORKSPACE \
|
--synapse-directory $WORKSPACE \
|
||||||
--dendron $WORKSPACE/dendron/bin/dendron \
|
--dendron $WORKSPACE/dendron/bin/dendron \
|
||||||
--synchrotron \
|
|
||||||
--pusher \
|
--pusher \
|
||||||
|
--synchrotron \
|
||||||
--port-base $PORT_BASE
|
--port-base $PORT_BASE
|
||||||
|
|
||||||
cd ..
|
cd ..
|
||||||
|
|
|
@ -16,4 +16,4 @@
|
||||||
""" This is a reference implementation of a Matrix home server.
|
""" This is a reference implementation of a Matrix home server.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
__version__ = "0.16.0"
|
__version__ = "0.16.1"
|
||||||
|
|
|
@ -266,10 +266,9 @@ def setup(config_options):
|
||||||
HomeServer
|
HomeServer
|
||||||
"""
|
"""
|
||||||
try:
|
try:
|
||||||
config = HomeServerConfig.load_config(
|
config = HomeServerConfig.load_or_generate_config(
|
||||||
"Synapse Homeserver",
|
"Synapse Homeserver",
|
||||||
config_options,
|
config_options,
|
||||||
generate_section="Homeserver"
|
|
||||||
)
|
)
|
||||||
except ConfigError as e:
|
except ConfigError as e:
|
||||||
sys.stderr.write("\n" + e.message + "\n")
|
sys.stderr.write("\n" + e.message + "\n")
|
||||||
|
|
|
@ -18,10 +18,8 @@ import synapse
|
||||||
|
|
||||||
from synapse.server import HomeServer
|
from synapse.server import HomeServer
|
||||||
from synapse.config._base import ConfigError
|
from synapse.config._base import ConfigError
|
||||||
from synapse.config.database import DatabaseConfig
|
from synapse.config.logger import setup_logging
|
||||||
from synapse.config.logger import LoggingConfig
|
from synapse.config.homeserver import HomeServerConfig
|
||||||
from synapse.config.emailconfig import EmailConfig
|
|
||||||
from synapse.config.key import KeyConfig
|
|
||||||
from synapse.http.site import SynapseSite
|
from synapse.http.site import SynapseSite
|
||||||
from synapse.metrics.resource import MetricsResource, METRICS_PREFIX
|
from synapse.metrics.resource import MetricsResource, METRICS_PREFIX
|
||||||
from synapse.storage.roommember import RoomMemberStore
|
from synapse.storage.roommember import RoomMemberStore
|
||||||
|
@ -43,98 +41,13 @@ from twisted.web.resource import Resource
|
||||||
|
|
||||||
from daemonize import Daemonize
|
from daemonize import Daemonize
|
||||||
|
|
||||||
import gc
|
|
||||||
import sys
|
import sys
|
||||||
import logging
|
import logging
|
||||||
|
import gc
|
||||||
|
|
||||||
logger = logging.getLogger("synapse.app.pusher")
|
logger = logging.getLogger("synapse.app.pusher")
|
||||||
|
|
||||||
|
|
||||||
class SlaveConfig(DatabaseConfig):
|
|
||||||
def read_config(self, config):
|
|
||||||
self.replication_url = config["replication_url"]
|
|
||||||
self.server_name = config["server_name"]
|
|
||||||
self.use_insecure_ssl_client_just_for_testing_do_not_use = config.get(
|
|
||||||
"use_insecure_ssl_client_just_for_testing_do_not_use", False
|
|
||||||
)
|
|
||||||
self.user_agent_suffix = None
|
|
||||||
self.start_pushers = True
|
|
||||||
self.listeners = config["listeners"]
|
|
||||||
self.soft_file_limit = config.get("soft_file_limit")
|
|
||||||
self.daemonize = config.get("daemonize")
|
|
||||||
self.pid_file = self.abspath(config.get("pid_file"))
|
|
||||||
self.public_baseurl = config["public_baseurl"]
|
|
||||||
|
|
||||||
thresholds = config.get("gc_thresholds", None)
|
|
||||||
if thresholds is not None:
|
|
||||||
try:
|
|
||||||
assert len(thresholds) == 3
|
|
||||||
self.gc_thresholds = (
|
|
||||||
int(thresholds[0]), int(thresholds[1]), int(thresholds[2]),
|
|
||||||
)
|
|
||||||
except:
|
|
||||||
raise ConfigError(
|
|
||||||
"Value of `gc_threshold` must be a list of three integers if set"
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
self.gc_thresholds = None
|
|
||||||
|
|
||||||
# some things used by the auth handler but not actually used in the
|
|
||||||
# pusher codebase
|
|
||||||
self.bcrypt_rounds = None
|
|
||||||
self.ldap_enabled = None
|
|
||||||
self.ldap_server = None
|
|
||||||
self.ldap_port = None
|
|
||||||
self.ldap_tls = None
|
|
||||||
self.ldap_search_base = None
|
|
||||||
self.ldap_search_property = None
|
|
||||||
self.ldap_email_property = None
|
|
||||||
self.ldap_full_name_property = None
|
|
||||||
|
|
||||||
# We would otherwise try to use the registration shared secret as the
|
|
||||||
# macaroon shared secret if there was no macaroon_shared_secret, but
|
|
||||||
# that means pulling in RegistrationConfig too. We don't need to be
|
|
||||||
# backwards compaitible in the pusher codebase so just make people set
|
|
||||||
# macaroon_shared_secret. We set this to None to prevent it referencing
|
|
||||||
# an undefined key.
|
|
||||||
self.registration_shared_secret = None
|
|
||||||
|
|
||||||
def default_config(self, server_name, **kwargs):
|
|
||||||
pid_file = self.abspath("pusher.pid")
|
|
||||||
return """\
|
|
||||||
# Slave configuration
|
|
||||||
|
|
||||||
# The replication listener on the synapse to talk to.
|
|
||||||
#replication_url: https://localhost:{replication_port}/_synapse/replication
|
|
||||||
|
|
||||||
server_name: "%(server_name)s"
|
|
||||||
|
|
||||||
listeners: []
|
|
||||||
# Enable a ssh manhole listener on the pusher.
|
|
||||||
# - type: manhole
|
|
||||||
# port: {manhole_port}
|
|
||||||
# bind_address: 127.0.0.1
|
|
||||||
# Enable a metric listener on the pusher.
|
|
||||||
# - type: http
|
|
||||||
# port: {metrics_port}
|
|
||||||
# bind_address: 127.0.0.1
|
|
||||||
# resources:
|
|
||||||
# - names: ["metrics"]
|
|
||||||
# compress: False
|
|
||||||
|
|
||||||
report_stats: False
|
|
||||||
|
|
||||||
daemonize: False
|
|
||||||
|
|
||||||
pid_file: %(pid_file)s
|
|
||||||
|
|
||||||
""" % locals()
|
|
||||||
|
|
||||||
|
|
||||||
class PusherSlaveConfig(SlaveConfig, LoggingConfig, EmailConfig, KeyConfig):
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
class PusherSlaveStore(
|
class PusherSlaveStore(
|
||||||
SlavedEventStore, SlavedPusherStore, SlavedReceiptsStore,
|
SlavedEventStore, SlavedPusherStore, SlavedReceiptsStore,
|
||||||
SlavedAccountDataStore
|
SlavedAccountDataStore
|
||||||
|
@ -199,7 +112,7 @@ class PusherServer(HomeServer):
|
||||||
|
|
||||||
def remove_pusher(self, app_id, push_key, user_id):
|
def remove_pusher(self, app_id, push_key, user_id):
|
||||||
http_client = self.get_simple_http_client()
|
http_client = self.get_simple_http_client()
|
||||||
replication_url = self.config.replication_url
|
replication_url = self.config.worker_replication_url
|
||||||
url = replication_url + "/remove_pushers"
|
url = replication_url + "/remove_pushers"
|
||||||
return http_client.post_json_get_json(url, {
|
return http_client.post_json_get_json(url, {
|
||||||
"remove": [{
|
"remove": [{
|
||||||
|
@ -232,8 +145,8 @@ class PusherServer(HomeServer):
|
||||||
)
|
)
|
||||||
logger.info("Synapse pusher now listening on port %d", port)
|
logger.info("Synapse pusher now listening on port %d", port)
|
||||||
|
|
||||||
def start_listening(self):
|
def start_listening(self, listeners):
|
||||||
for listener in self.config.listeners:
|
for listener in listeners:
|
||||||
if listener["type"] == "http":
|
if listener["type"] == "http":
|
||||||
self._listen_http(listener)
|
self._listen_http(listener)
|
||||||
elif listener["type"] == "manhole":
|
elif listener["type"] == "manhole":
|
||||||
|
@ -253,7 +166,7 @@ class PusherServer(HomeServer):
|
||||||
def replicate(self):
|
def replicate(self):
|
||||||
http_client = self.get_simple_http_client()
|
http_client = self.get_simple_http_client()
|
||||||
store = self.get_datastore()
|
store = self.get_datastore()
|
||||||
replication_url = self.config.replication_url
|
replication_url = self.config.worker_replication_url
|
||||||
pusher_pool = self.get_pusherpool()
|
pusher_pool = self.get_pusherpool()
|
||||||
clock = self.get_clock()
|
clock = self.get_clock()
|
||||||
|
|
||||||
|
@ -329,19 +242,30 @@ class PusherServer(HomeServer):
|
||||||
yield sleep(30)
|
yield sleep(30)
|
||||||
|
|
||||||
|
|
||||||
def setup(config_options):
|
def start(config_options):
|
||||||
try:
|
try:
|
||||||
config = PusherSlaveConfig.load_config(
|
config = HomeServerConfig.load_config(
|
||||||
"Synapse pusher", config_options
|
"Synapse pusher", config_options
|
||||||
)
|
)
|
||||||
except ConfigError as e:
|
except ConfigError as e:
|
||||||
sys.stderr.write("\n" + e.message + "\n")
|
sys.stderr.write("\n" + e.message + "\n")
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
if not config:
|
assert config.worker_app == "synapse.app.pusher"
|
||||||
sys.exit(0)
|
|
||||||
|
|
||||||
config.setup_logging()
|
setup_logging(config.worker_log_config, config.worker_log_file)
|
||||||
|
|
||||||
|
if config.start_pushers:
|
||||||
|
sys.stderr.write(
|
||||||
|
"\nThe pushers must be disabled in the main synapse process"
|
||||||
|
"\nbefore they can be run in a separate worker."
|
||||||
|
"\nPlease add ``start_pushers: false`` to the main config"
|
||||||
|
"\n"
|
||||||
|
)
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
# Force the pushers to start since they will be disabled in the main config
|
||||||
|
config.start_pushers = True
|
||||||
|
|
||||||
database_engine = create_engine(config.database_config)
|
database_engine = create_engine(config.database_config)
|
||||||
|
|
||||||
|
@ -354,11 +278,15 @@ def setup(config_options):
|
||||||
)
|
)
|
||||||
|
|
||||||
ps.setup()
|
ps.setup()
|
||||||
ps.start_listening()
|
ps.start_listening(config.worker_listeners)
|
||||||
|
|
||||||
change_resource_limit(ps.config.soft_file_limit)
|
def run():
|
||||||
if ps.config.gc_thresholds:
|
with LoggingContext("run"):
|
||||||
gc.set_threshold(*ps.config.gc_thresholds)
|
logger.info("Running")
|
||||||
|
change_resource_limit(config.soft_file_limit)
|
||||||
|
if config.gc_thresholds:
|
||||||
|
gc.set_threshold(*config.gc_thresholds)
|
||||||
|
reactor.run()
|
||||||
|
|
||||||
def start():
|
def start():
|
||||||
ps.replicate()
|
ps.replicate()
|
||||||
|
@ -367,30 +295,20 @@ def setup(config_options):
|
||||||
|
|
||||||
reactor.callWhenRunning(start)
|
reactor.callWhenRunning(start)
|
||||||
|
|
||||||
return ps
|
if config.worker_daemonize:
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
with LoggingContext("main"):
|
|
||||||
ps = setup(sys.argv[1:])
|
|
||||||
|
|
||||||
if ps.config.daemonize:
|
|
||||||
def run():
|
|
||||||
with LoggingContext("run"):
|
|
||||||
change_resource_limit(ps.config.soft_file_limit)
|
|
||||||
if ps.config.gc_thresholds:
|
|
||||||
gc.set_threshold(*ps.config.gc_thresholds)
|
|
||||||
reactor.run()
|
|
||||||
|
|
||||||
daemon = Daemonize(
|
daemon = Daemonize(
|
||||||
app="synapse-pusher",
|
app="synapse-pusher",
|
||||||
pid=ps.config.pid_file,
|
pid=config.worker_pid_file,
|
||||||
action=run,
|
action=run,
|
||||||
auto_close_fds=False,
|
auto_close_fds=False,
|
||||||
verbose=True,
|
verbose=True,
|
||||||
logger=logger,
|
logger=logger,
|
||||||
)
|
)
|
||||||
|
|
||||||
daemon.start()
|
daemon.start()
|
||||||
else:
|
else:
|
||||||
reactor.run()
|
run()
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
with LoggingContext("main"):
|
||||||
|
ps = start(sys.argv[1:])
|
||||||
|
|
|
@ -18,9 +18,8 @@ import synapse
|
||||||
|
|
||||||
from synapse.api.constants import EventTypes, PresenceState
|
from synapse.api.constants import EventTypes, PresenceState
|
||||||
from synapse.config._base import ConfigError
|
from synapse.config._base import ConfigError
|
||||||
from synapse.config.database import DatabaseConfig
|
from synapse.config.homeserver import HomeServerConfig
|
||||||
from synapse.config.logger import LoggingConfig
|
from synapse.config.logger import setup_logging
|
||||||
from synapse.config.appservice import AppServiceConfig
|
|
||||||
from synapse.events import FrozenEvent
|
from synapse.events import FrozenEvent
|
||||||
from synapse.handlers.presence import PresenceHandler
|
from synapse.handlers.presence import PresenceHandler
|
||||||
from synapse.http.site import SynapseSite
|
from synapse.http.site import SynapseSite
|
||||||
|
@ -63,70 +62,6 @@ import ujson as json
|
||||||
logger = logging.getLogger("synapse.app.synchrotron")
|
logger = logging.getLogger("synapse.app.synchrotron")
|
||||||
|
|
||||||
|
|
||||||
class SynchrotronConfig(DatabaseConfig, LoggingConfig, AppServiceConfig):
|
|
||||||
def read_config(self, config):
|
|
||||||
self.replication_url = config["replication_url"]
|
|
||||||
self.server_name = config["server_name"]
|
|
||||||
self.use_insecure_ssl_client_just_for_testing_do_not_use = config.get(
|
|
||||||
"use_insecure_ssl_client_just_for_testing_do_not_use", False
|
|
||||||
)
|
|
||||||
self.user_agent_suffix = None
|
|
||||||
self.listeners = config["listeners"]
|
|
||||||
self.soft_file_limit = config.get("soft_file_limit")
|
|
||||||
self.daemonize = config.get("daemonize")
|
|
||||||
self.pid_file = self.abspath(config.get("pid_file"))
|
|
||||||
self.macaroon_secret_key = config["macaroon_secret_key"]
|
|
||||||
self.expire_access_token = config.get("expire_access_token", False)
|
|
||||||
|
|
||||||
thresholds = config.get("gc_thresholds", None)
|
|
||||||
if thresholds is not None:
|
|
||||||
try:
|
|
||||||
assert len(thresholds) == 3
|
|
||||||
self.gc_thresholds = (
|
|
||||||
int(thresholds[0]), int(thresholds[1]), int(thresholds[2]),
|
|
||||||
)
|
|
||||||
except:
|
|
||||||
raise ConfigError(
|
|
||||||
"Value of `gc_threshold` must be a list of three integers if set"
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
self.gc_thresholds = None
|
|
||||||
|
|
||||||
def default_config(self, server_name, **kwargs):
|
|
||||||
pid_file = self.abspath("synchroton.pid")
|
|
||||||
return """\
|
|
||||||
# Slave configuration
|
|
||||||
|
|
||||||
# The replication listener on the synapse to talk to.
|
|
||||||
#replication_url: https://localhost:{replication_port}/_synapse/replication
|
|
||||||
|
|
||||||
server_name: "%(server_name)s"
|
|
||||||
|
|
||||||
listeners:
|
|
||||||
# Enable a /sync listener on the synchrontron
|
|
||||||
#- type: http
|
|
||||||
# port: {http_port}
|
|
||||||
# bind_address: ""
|
|
||||||
# Enable a ssh manhole listener on the synchrotron
|
|
||||||
# - type: manhole
|
|
||||||
# port: {manhole_port}
|
|
||||||
# bind_address: 127.0.0.1
|
|
||||||
# Enable a metric listener on the synchrotron
|
|
||||||
# - type: http
|
|
||||||
# port: {metrics_port}
|
|
||||||
# bind_address: 127.0.0.1
|
|
||||||
# resources:
|
|
||||||
# - names: ["metrics"]
|
|
||||||
# compress: False
|
|
||||||
|
|
||||||
report_stats: False
|
|
||||||
|
|
||||||
daemonize: False
|
|
||||||
|
|
||||||
pid_file: %(pid_file)s
|
|
||||||
""" % locals()
|
|
||||||
|
|
||||||
|
|
||||||
class SynchrotronSlavedStore(
|
class SynchrotronSlavedStore(
|
||||||
SlavedPushRuleStore,
|
SlavedPushRuleStore,
|
||||||
SlavedEventStore,
|
SlavedEventStore,
|
||||||
|
@ -163,7 +98,7 @@ class SynchrotronPresence(object):
|
||||||
self.http_client = hs.get_simple_http_client()
|
self.http_client = hs.get_simple_http_client()
|
||||||
self.store = hs.get_datastore()
|
self.store = hs.get_datastore()
|
||||||
self.user_to_num_current_syncs = {}
|
self.user_to_num_current_syncs = {}
|
||||||
self.syncing_users_url = hs.config.replication_url + "/syncing_users"
|
self.syncing_users_url = hs.config.worker_replication_url + "/syncing_users"
|
||||||
self.clock = hs.get_clock()
|
self.clock = hs.get_clock()
|
||||||
|
|
||||||
active_presence = self.store.take_presence_startup_info()
|
active_presence = self.store.take_presence_startup_info()
|
||||||
|
@ -350,8 +285,8 @@ class SynchrotronServer(HomeServer):
|
||||||
)
|
)
|
||||||
logger.info("Synapse synchrotron now listening on port %d", port)
|
logger.info("Synapse synchrotron now listening on port %d", port)
|
||||||
|
|
||||||
def start_listening(self):
|
def start_listening(self, listeners):
|
||||||
for listener in self.config.listeners:
|
for listener in listeners:
|
||||||
if listener["type"] == "http":
|
if listener["type"] == "http":
|
||||||
self._listen_http(listener)
|
self._listen_http(listener)
|
||||||
elif listener["type"] == "manhole":
|
elif listener["type"] == "manhole":
|
||||||
|
@ -371,7 +306,7 @@ class SynchrotronServer(HomeServer):
|
||||||
def replicate(self):
|
def replicate(self):
|
||||||
http_client = self.get_simple_http_client()
|
http_client = self.get_simple_http_client()
|
||||||
store = self.get_datastore()
|
store = self.get_datastore()
|
||||||
replication_url = self.config.replication_url
|
replication_url = self.config.worker_replication_url
|
||||||
clock = self.get_clock()
|
clock = self.get_clock()
|
||||||
notifier = self.get_notifier()
|
notifier = self.get_notifier()
|
||||||
presence_handler = self.get_presence_handler()
|
presence_handler = self.get_presence_handler()
|
||||||
|
@ -470,19 +405,18 @@ class SynchrotronServer(HomeServer):
|
||||||
return SynchrotronTyping(self)
|
return SynchrotronTyping(self)
|
||||||
|
|
||||||
|
|
||||||
def setup(config_options):
|
def start(config_options):
|
||||||
try:
|
try:
|
||||||
config = SynchrotronConfig.load_config(
|
config = HomeServerConfig.load_config(
|
||||||
"Synapse synchrotron", config_options
|
"Synapse synchrotron", config_options
|
||||||
)
|
)
|
||||||
except ConfigError as e:
|
except ConfigError as e:
|
||||||
sys.stderr.write("\n" + e.message + "\n")
|
sys.stderr.write("\n" + e.message + "\n")
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
if not config:
|
assert config.worker_app == "synapse.app.synchrotron"
|
||||||
sys.exit(0)
|
|
||||||
|
|
||||||
config.setup_logging()
|
setup_logging(config.worker_log_config, config.worker_log_file)
|
||||||
|
|
||||||
database_engine = create_engine(config.database_config)
|
database_engine = create_engine(config.database_config)
|
||||||
|
|
||||||
|
@ -496,11 +430,15 @@ def setup(config_options):
|
||||||
)
|
)
|
||||||
|
|
||||||
ss.setup()
|
ss.setup()
|
||||||
ss.start_listening()
|
ss.start_listening(config.worker_listeners)
|
||||||
|
|
||||||
change_resource_limit(ss.config.soft_file_limit)
|
def run():
|
||||||
if ss.config.gc_thresholds:
|
with LoggingContext("run"):
|
||||||
ss.set_threshold(*ss.config.gc_thresholds)
|
logger.info("Running")
|
||||||
|
change_resource_limit(config.soft_file_limit)
|
||||||
|
if config.gc_thresholds:
|
||||||
|
gc.set_threshold(*config.gc_thresholds)
|
||||||
|
reactor.run()
|
||||||
|
|
||||||
def start():
|
def start():
|
||||||
ss.get_datastore().start_profiling()
|
ss.get_datastore().start_profiling()
|
||||||
|
@ -508,30 +446,20 @@ def setup(config_options):
|
||||||
|
|
||||||
reactor.callWhenRunning(start)
|
reactor.callWhenRunning(start)
|
||||||
|
|
||||||
return ss
|
if config.worker_daemonize:
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
with LoggingContext("main"):
|
|
||||||
ss = setup(sys.argv[1:])
|
|
||||||
|
|
||||||
if ss.config.daemonize:
|
|
||||||
def run():
|
|
||||||
with LoggingContext("run"):
|
|
||||||
change_resource_limit(ss.config.soft_file_limit)
|
|
||||||
if ss.config.gc_thresholds:
|
|
||||||
gc.set_threshold(*ss.config.gc_thresholds)
|
|
||||||
reactor.run()
|
|
||||||
|
|
||||||
daemon = Daemonize(
|
daemon = Daemonize(
|
||||||
app="synapse-synchrotron",
|
app="synapse-synchrotron",
|
||||||
pid=ss.config.pid_file,
|
pid=config.worker_pid_file,
|
||||||
action=run,
|
action=run,
|
||||||
auto_close_fds=False,
|
auto_close_fds=False,
|
||||||
verbose=True,
|
verbose=True,
|
||||||
logger=logger,
|
logger=logger,
|
||||||
)
|
)
|
||||||
|
|
||||||
daemon.start()
|
daemon.start()
|
||||||
else:
|
else:
|
||||||
reactor.run()
|
run()
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
with LoggingContext("main"):
|
||||||
|
start(sys.argv[1:])
|
||||||
|
|
|
@ -157,9 +157,40 @@ class Config(object):
|
||||||
return default_config, config
|
return default_config, config
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def load_config(cls, description, argv, generate_section=None):
|
def load_config(cls, description, argv):
|
||||||
obj = cls()
|
config_parser = argparse.ArgumentParser(
|
||||||
|
description=description,
|
||||||
|
)
|
||||||
|
config_parser.add_argument(
|
||||||
|
"-c", "--config-path",
|
||||||
|
action="append",
|
||||||
|
metavar="CONFIG_FILE",
|
||||||
|
help="Specify config file. Can be given multiple times and"
|
||||||
|
" may specify directories containing *.yaml files."
|
||||||
|
)
|
||||||
|
|
||||||
|
config_parser.add_argument(
|
||||||
|
"--keys-directory",
|
||||||
|
metavar="DIRECTORY",
|
||||||
|
help="Where files such as certs and signing keys are stored when"
|
||||||
|
" their location is given explicitly in the config."
|
||||||
|
" Defaults to the directory containing the last config file",
|
||||||
|
)
|
||||||
|
|
||||||
|
config_args = config_parser.parse_args(argv)
|
||||||
|
|
||||||
|
config_files = find_config_files(search_paths=config_args.config_path)
|
||||||
|
|
||||||
|
obj = cls()
|
||||||
|
obj.read_config_files(
|
||||||
|
config_files,
|
||||||
|
keys_directory=config_args.keys_directory,
|
||||||
|
generate_keys=False,
|
||||||
|
)
|
||||||
|
return obj
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def load_or_generate_config(cls, description, argv):
|
||||||
config_parser = argparse.ArgumentParser(add_help=False)
|
config_parser = argparse.ArgumentParser(add_help=False)
|
||||||
config_parser.add_argument(
|
config_parser.add_argument(
|
||||||
"-c", "--config-path",
|
"-c", "--config-path",
|
||||||
|
@ -176,7 +207,7 @@ class Config(object):
|
||||||
config_parser.add_argument(
|
config_parser.add_argument(
|
||||||
"--report-stats",
|
"--report-stats",
|
||||||
action="store",
|
action="store",
|
||||||
help="Stuff",
|
help="Whether the generated config reports anonymized usage statistics",
|
||||||
choices=["yes", "no"]
|
choices=["yes", "no"]
|
||||||
)
|
)
|
||||||
config_parser.add_argument(
|
config_parser.add_argument(
|
||||||
|
@ -197,36 +228,11 @@ class Config(object):
|
||||||
)
|
)
|
||||||
config_args, remaining_args = config_parser.parse_known_args(argv)
|
config_args, remaining_args = config_parser.parse_known_args(argv)
|
||||||
|
|
||||||
|
config_files = find_config_files(search_paths=config_args.config_path)
|
||||||
|
|
||||||
generate_keys = config_args.generate_keys
|
generate_keys = config_args.generate_keys
|
||||||
|
|
||||||
config_files = []
|
obj = cls()
|
||||||
if config_args.config_path:
|
|
||||||
for config_path in config_args.config_path:
|
|
||||||
if os.path.isdir(config_path):
|
|
||||||
# We accept specifying directories as config paths, we search
|
|
||||||
# inside that directory for all files matching *.yaml, and then
|
|
||||||
# we apply them in *sorted* order.
|
|
||||||
files = []
|
|
||||||
for entry in os.listdir(config_path):
|
|
||||||
entry_path = os.path.join(config_path, entry)
|
|
||||||
if not os.path.isfile(entry_path):
|
|
||||||
print (
|
|
||||||
"Found subdirectory in config directory: %r. IGNORING."
|
|
||||||
) % (entry_path, )
|
|
||||||
continue
|
|
||||||
|
|
||||||
if not entry.endswith(".yaml"):
|
|
||||||
print (
|
|
||||||
"Found file in config directory that does not"
|
|
||||||
" end in '.yaml': %r. IGNORING."
|
|
||||||
) % (entry_path, )
|
|
||||||
continue
|
|
||||||
|
|
||||||
files.append(entry_path)
|
|
||||||
|
|
||||||
config_files.extend(sorted(files))
|
|
||||||
else:
|
|
||||||
config_files.append(config_path)
|
|
||||||
|
|
||||||
if config_args.generate_config:
|
if config_args.generate_config:
|
||||||
if config_args.report_stats is None:
|
if config_args.report_stats is None:
|
||||||
|
@ -299,28 +305,43 @@ class Config(object):
|
||||||
" -c CONFIG-FILE\""
|
" -c CONFIG-FILE\""
|
||||||
)
|
)
|
||||||
|
|
||||||
if config_args.keys_directory:
|
obj.read_config_files(
|
||||||
config_dir_path = config_args.keys_directory
|
config_files,
|
||||||
else:
|
keys_directory=config_args.keys_directory,
|
||||||
config_dir_path = os.path.dirname(config_args.config_path[-1])
|
generate_keys=generate_keys,
|
||||||
config_dir_path = os.path.abspath(config_dir_path)
|
)
|
||||||
|
|
||||||
|
if generate_keys:
|
||||||
|
return None
|
||||||
|
|
||||||
|
obj.invoke_all("read_arguments", args)
|
||||||
|
|
||||||
|
return obj
|
||||||
|
|
||||||
|
def read_config_files(self, config_files, keys_directory=None,
|
||||||
|
generate_keys=False):
|
||||||
|
if not keys_directory:
|
||||||
|
keys_directory = os.path.dirname(config_files[-1])
|
||||||
|
|
||||||
|
config_dir_path = os.path.abspath(keys_directory)
|
||||||
|
|
||||||
specified_config = {}
|
specified_config = {}
|
||||||
for config_file in config_files:
|
for config_file in config_files:
|
||||||
yaml_config = cls.read_config_file(config_file)
|
yaml_config = self.read_config_file(config_file)
|
||||||
specified_config.update(yaml_config)
|
specified_config.update(yaml_config)
|
||||||
|
|
||||||
if "server_name" not in specified_config:
|
if "server_name" not in specified_config:
|
||||||
raise ConfigError(MISSING_SERVER_NAME)
|
raise ConfigError(MISSING_SERVER_NAME)
|
||||||
|
|
||||||
server_name = specified_config["server_name"]
|
server_name = specified_config["server_name"]
|
||||||
_, config = obj.generate_config(
|
_, config = self.generate_config(
|
||||||
config_dir_path=config_dir_path,
|
config_dir_path=config_dir_path,
|
||||||
server_name=server_name,
|
server_name=server_name,
|
||||||
is_generating_file=False,
|
is_generating_file=False,
|
||||||
)
|
)
|
||||||
config.pop("log_config")
|
config.pop("log_config")
|
||||||
config.update(specified_config)
|
config.update(specified_config)
|
||||||
|
|
||||||
if "report_stats" not in config:
|
if "report_stats" not in config:
|
||||||
raise ConfigError(
|
raise ConfigError(
|
||||||
MISSING_REPORT_STATS_CONFIG_INSTRUCTIONS + "\n" +
|
MISSING_REPORT_STATS_CONFIG_INSTRUCTIONS + "\n" +
|
||||||
|
@ -328,11 +349,51 @@ class Config(object):
|
||||||
)
|
)
|
||||||
|
|
||||||
if generate_keys:
|
if generate_keys:
|
||||||
obj.invoke_all("generate_files", config)
|
self.invoke_all("generate_files", config)
|
||||||
return
|
return
|
||||||
|
|
||||||
obj.invoke_all("read_config", config)
|
self.invoke_all("read_config", config)
|
||||||
|
|
||||||
obj.invoke_all("read_arguments", args)
|
|
||||||
|
|
||||||
return obj
|
def find_config_files(search_paths):
|
||||||
|
"""Finds config files using a list of search paths. If a path is a file
|
||||||
|
then that file path is added to the list. If a search path is a directory
|
||||||
|
then all the "*.yaml" files in that directory are added to the list in
|
||||||
|
sorted order.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
search_paths(list(str)): A list of paths to search.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
list(str): A list of file paths.
|
||||||
|
"""
|
||||||
|
|
||||||
|
config_files = []
|
||||||
|
if search_paths:
|
||||||
|
for config_path in search_paths:
|
||||||
|
if os.path.isdir(config_path):
|
||||||
|
# We accept specifying directories as config paths, we search
|
||||||
|
# inside that directory for all files matching *.yaml, and then
|
||||||
|
# we apply them in *sorted* order.
|
||||||
|
files = []
|
||||||
|
for entry in os.listdir(config_path):
|
||||||
|
entry_path = os.path.join(config_path, entry)
|
||||||
|
if not os.path.isfile(entry_path):
|
||||||
|
print (
|
||||||
|
"Found subdirectory in config directory: %r. IGNORING."
|
||||||
|
) % (entry_path, )
|
||||||
|
continue
|
||||||
|
|
||||||
|
if not entry.endswith(".yaml"):
|
||||||
|
print (
|
||||||
|
"Found file in config directory that does not"
|
||||||
|
" end in '.yaml': %r. IGNORING."
|
||||||
|
) % (entry_path, )
|
||||||
|
continue
|
||||||
|
|
||||||
|
files.append(entry_path)
|
||||||
|
|
||||||
|
config_files.extend(sorted(files))
|
||||||
|
else:
|
||||||
|
config_files.append(config_path)
|
||||||
|
return config_files
|
||||||
|
|
|
@ -32,13 +32,15 @@ from .password import PasswordConfig
|
||||||
from .jwt import JWTConfig
|
from .jwt import JWTConfig
|
||||||
from .ldap import LDAPConfig
|
from .ldap import LDAPConfig
|
||||||
from .emailconfig import EmailConfig
|
from .emailconfig import EmailConfig
|
||||||
|
from .workers import WorkerConfig
|
||||||
|
|
||||||
|
|
||||||
class HomeServerConfig(TlsConfig, ServerConfig, DatabaseConfig, LoggingConfig,
|
class HomeServerConfig(TlsConfig, ServerConfig, DatabaseConfig, LoggingConfig,
|
||||||
RatelimitConfig, ContentRepositoryConfig, CaptchaConfig,
|
RatelimitConfig, ContentRepositoryConfig, CaptchaConfig,
|
||||||
VoipConfig, RegistrationConfig, MetricsConfig, ApiConfig,
|
VoipConfig, RegistrationConfig, MetricsConfig, ApiConfig,
|
||||||
AppServiceConfig, KeyConfig, SAML2Config, CasConfig,
|
AppServiceConfig, KeyConfig, SAML2Config, CasConfig,
|
||||||
JWTConfig, LDAPConfig, PasswordConfig, EmailConfig,):
|
JWTConfig, LDAPConfig, PasswordConfig, EmailConfig,
|
||||||
|
WorkerConfig,):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -126,17 +126,21 @@ class LoggingConfig(Config):
|
||||||
)
|
)
|
||||||
|
|
||||||
def setup_logging(self):
|
def setup_logging(self):
|
||||||
|
setup_logging(self.log_config, self.log_file, self.verbosity)
|
||||||
|
|
||||||
|
|
||||||
|
def setup_logging(log_config=None, log_file=None, verbosity=None):
|
||||||
log_format = (
|
log_format = (
|
||||||
"%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s"
|
"%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s"
|
||||||
" - %(message)s"
|
" - %(message)s"
|
||||||
)
|
)
|
||||||
if self.log_config is None:
|
if log_config is None:
|
||||||
|
|
||||||
level = logging.INFO
|
level = logging.INFO
|
||||||
level_for_storage = logging.INFO
|
level_for_storage = logging.INFO
|
||||||
if self.verbosity:
|
if verbosity:
|
||||||
level = logging.DEBUG
|
level = logging.DEBUG
|
||||||
if self.verbosity > 1:
|
if verbosity > 1:
|
||||||
level_for_storage = logging.DEBUG
|
level_for_storage = logging.DEBUG
|
||||||
|
|
||||||
# FIXME: we need a logging.WARN for a -q quiet option
|
# FIXME: we need a logging.WARN for a -q quiet option
|
||||||
|
@ -146,10 +150,10 @@ class LoggingConfig(Config):
|
||||||
logging.getLogger('synapse.storage').setLevel(level_for_storage)
|
logging.getLogger('synapse.storage').setLevel(level_for_storage)
|
||||||
|
|
||||||
formatter = logging.Formatter(log_format)
|
formatter = logging.Formatter(log_format)
|
||||||
if self.log_file:
|
if log_file:
|
||||||
# TODO: Customisable file size / backup count
|
# TODO: Customisable file size / backup count
|
||||||
handler = logging.handlers.RotatingFileHandler(
|
handler = logging.handlers.RotatingFileHandler(
|
||||||
self.log_file, maxBytes=(1000 * 1000 * 100), backupCount=3
|
log_file, maxBytes=(1000 * 1000 * 100), backupCount=3
|
||||||
)
|
)
|
||||||
|
|
||||||
def sighup(signum, stack):
|
def sighup(signum, stack):
|
||||||
|
@ -172,7 +176,7 @@ class LoggingConfig(Config):
|
||||||
|
|
||||||
logger.addHandler(handler)
|
logger.addHandler(handler)
|
||||||
else:
|
else:
|
||||||
with open(self.log_config, 'r') as f:
|
with open(log_config, 'r') as f:
|
||||||
logging.config.dictConfig(yaml.load(f))
|
logging.config.dictConfig(yaml.load(f))
|
||||||
|
|
||||||
observer = PythonLoggingObserver()
|
observer = PythonLoggingObserver()
|
||||||
|
|
|
@ -27,7 +27,7 @@ class ServerConfig(Config):
|
||||||
self.daemonize = config.get("daemonize")
|
self.daemonize = config.get("daemonize")
|
||||||
self.print_pidfile = config.get("print_pidfile")
|
self.print_pidfile = config.get("print_pidfile")
|
||||||
self.user_agent_suffix = config.get("user_agent_suffix")
|
self.user_agent_suffix = config.get("user_agent_suffix")
|
||||||
self.use_frozen_dicts = config.get("use_frozen_dicts", True)
|
self.use_frozen_dicts = config.get("use_frozen_dicts", False)
|
||||||
self.public_baseurl = config.get("public_baseurl")
|
self.public_baseurl = config.get("public_baseurl")
|
||||||
self.secondary_directory_servers = config.get("secondary_directory_servers", [])
|
self.secondary_directory_servers = config.get("secondary_directory_servers", [])
|
||||||
|
|
||||||
|
@ -38,19 +38,7 @@ class ServerConfig(Config):
|
||||||
|
|
||||||
self.listeners = config.get("listeners", [])
|
self.listeners = config.get("listeners", [])
|
||||||
|
|
||||||
thresholds = config.get("gc_thresholds", None)
|
self.gc_thresholds = read_gc_thresholds(config.get("gc_thresholds", None))
|
||||||
if thresholds is not None:
|
|
||||||
try:
|
|
||||||
assert len(thresholds) == 3
|
|
||||||
self.gc_thresholds = (
|
|
||||||
int(thresholds[0]), int(thresholds[1]), int(thresholds[2]),
|
|
||||||
)
|
|
||||||
except:
|
|
||||||
raise ConfigError(
|
|
||||||
"Value of `gc_threshold` must be a list of three integers if set"
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
self.gc_thresholds = None
|
|
||||||
|
|
||||||
bind_port = config.get("bind_port")
|
bind_port = config.get("bind_port")
|
||||||
if bind_port:
|
if bind_port:
|
||||||
|
@ -264,3 +252,20 @@ class ServerConfig(Config):
|
||||||
type=int,
|
type=int,
|
||||||
help="Turn on the twisted telnet manhole"
|
help="Turn on the twisted telnet manhole"
|
||||||
" service on the given port.")
|
" service on the given port.")
|
||||||
|
|
||||||
|
|
||||||
|
def read_gc_thresholds(thresholds):
|
||||||
|
"""Reads the three integer thresholds for garbage collection. Ensures that
|
||||||
|
the thresholds are integers if thresholds are supplied.
|
||||||
|
"""
|
||||||
|
if thresholds is None:
|
||||||
|
return None
|
||||||
|
try:
|
||||||
|
assert len(thresholds) == 3
|
||||||
|
return (
|
||||||
|
int(thresholds[0]), int(thresholds[1]), int(thresholds[2]),
|
||||||
|
)
|
||||||
|
except:
|
||||||
|
raise ConfigError(
|
||||||
|
"Value of `gc_threshold` must be a list of three integers if set"
|
||||||
|
)
|
||||||
|
|
|
@ -0,0 +1,31 @@
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
# Copyright 2016 matrix.org
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
from ._base import Config
|
||||||
|
|
||||||
|
|
||||||
|
class WorkerConfig(Config):
|
||||||
|
"""The workers are processes run separately to the main synapse process.
|
||||||
|
They have their own pid_file and listener configuration. They use the
|
||||||
|
replication_url to talk to the main synapse process."""
|
||||||
|
|
||||||
|
def read_config(self, config):
|
||||||
|
self.worker_app = config.get("worker_app")
|
||||||
|
self.worker_listeners = config.get("worker_listeners")
|
||||||
|
self.worker_daemonize = config.get("worker_daemonize")
|
||||||
|
self.worker_pid_file = config.get("worker_pid_file")
|
||||||
|
self.worker_log_file = config.get("worker_log_file")
|
||||||
|
self.worker_log_config = config.get("worker_log_config")
|
||||||
|
self.worker_replication_url = config.get("worker_replication_url")
|
|
@ -31,6 +31,9 @@ logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
class FederationBase(object):
|
class FederationBase(object):
|
||||||
|
def __init__(self, hs):
|
||||||
|
pass
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def _check_sigs_and_hash_and_fetch(self, origin, pdus, outlier=False,
|
def _check_sigs_and_hash_and_fetch(self, origin, pdus, outlier=False,
|
||||||
include_none=False):
|
include_none=False):
|
||||||
|
|
|
@ -52,6 +52,8 @@ sent_queries_counter = metrics.register_counter("sent_queries", labels=["type"])
|
||||||
|
|
||||||
|
|
||||||
class FederationClient(FederationBase):
|
class FederationClient(FederationBase):
|
||||||
|
def __init__(self, hs):
|
||||||
|
super(FederationClient, self).__init__(hs)
|
||||||
|
|
||||||
def start_get_pdu_cache(self):
|
def start_get_pdu_cache(self):
|
||||||
self._get_pdu_cache = ExpiringCache(
|
self._get_pdu_cache = ExpiringCache(
|
||||||
|
|
|
@ -19,6 +19,7 @@ from twisted.internet import defer
|
||||||
from .federation_base import FederationBase
|
from .federation_base import FederationBase
|
||||||
from .units import Transaction, Edu
|
from .units import Transaction, Edu
|
||||||
|
|
||||||
|
from synapse.util.async import Linearizer
|
||||||
from synapse.util.logutils import log_function
|
from synapse.util.logutils import log_function
|
||||||
from synapse.events import FrozenEvent
|
from synapse.events import FrozenEvent
|
||||||
import synapse.metrics
|
import synapse.metrics
|
||||||
|
@ -44,6 +45,11 @@ received_queries_counter = metrics.register_counter("received_queries", labels=[
|
||||||
|
|
||||||
|
|
||||||
class FederationServer(FederationBase):
|
class FederationServer(FederationBase):
|
||||||
|
def __init__(self, hs):
|
||||||
|
super(FederationServer, self).__init__(hs)
|
||||||
|
|
||||||
|
self._room_pdu_linearizer = Linearizer()
|
||||||
|
|
||||||
def set_handler(self, handler):
|
def set_handler(self, handler):
|
||||||
"""Sets the handler that the replication layer will use to communicate
|
"""Sets the handler that the replication layer will use to communicate
|
||||||
receipt of new PDUs from other home servers. The required methods are
|
receipt of new PDUs from other home servers. The required methods are
|
||||||
|
@ -187,6 +193,9 @@ class FederationServer(FederationBase):
|
||||||
)
|
)
|
||||||
|
|
||||||
for event in auth_chain:
|
for event in auth_chain:
|
||||||
|
# We sign these again because there was a bug where we
|
||||||
|
# incorrectly signed things the first time round
|
||||||
|
if self.hs.is_mine_id(event.event_id):
|
||||||
event.signatures.update(
|
event.signatures.update(
|
||||||
compute_event_signature(
|
compute_event_signature(
|
||||||
event,
|
event,
|
||||||
|
@ -377,10 +386,20 @@ class FederationServer(FederationBase):
|
||||||
@log_function
|
@log_function
|
||||||
def on_get_missing_events(self, origin, room_id, earliest_events,
|
def on_get_missing_events(self, origin, room_id, earliest_events,
|
||||||
latest_events, limit, min_depth):
|
latest_events, limit, min_depth):
|
||||||
|
logger.info(
|
||||||
|
"on_get_missing_events: earliest_events: %r, latest_events: %r,"
|
||||||
|
" limit: %d, min_depth: %d",
|
||||||
|
earliest_events, latest_events, limit, min_depth
|
||||||
|
)
|
||||||
missing_events = yield self.handler.on_get_missing_events(
|
missing_events = yield self.handler.on_get_missing_events(
|
||||||
origin, room_id, earliest_events, latest_events, limit, min_depth
|
origin, room_id, earliest_events, latest_events, limit, min_depth
|
||||||
)
|
)
|
||||||
|
|
||||||
|
if len(missing_events) < 5:
|
||||||
|
logger.info("Returning %d events: %r", len(missing_events), missing_events)
|
||||||
|
else:
|
||||||
|
logger.info("Returning %d events", len(missing_events))
|
||||||
|
|
||||||
time_now = self._clock.time_msec()
|
time_now = self._clock.time_msec()
|
||||||
|
|
||||||
defer.returnValue({
|
defer.returnValue({
|
||||||
|
@ -481,6 +500,14 @@ class FederationServer(FederationBase):
|
||||||
pdu.internal_metadata.outlier = True
|
pdu.internal_metadata.outlier = True
|
||||||
elif min_depth and pdu.depth > min_depth:
|
elif min_depth and pdu.depth > min_depth:
|
||||||
if get_missing and prevs - seen:
|
if get_missing and prevs - seen:
|
||||||
|
# If we're missing stuff, ensure we only fetch stuff one
|
||||||
|
# at a time.
|
||||||
|
with (yield self._room_pdu_linearizer.queue(pdu.room_id)):
|
||||||
|
# We recalculate seen, since it may have changed.
|
||||||
|
have_seen = yield self.store.have_events(prevs)
|
||||||
|
seen = set(have_seen.keys())
|
||||||
|
|
||||||
|
if prevs - seen:
|
||||||
latest = yield self.store.get_latest_event_ids_in_room(
|
latest = yield self.store.get_latest_event_ids_in_room(
|
||||||
pdu.room_id
|
pdu.room_id
|
||||||
)
|
)
|
||||||
|
@ -490,6 +517,11 @@ class FederationServer(FederationBase):
|
||||||
latest = set(latest)
|
latest = set(latest)
|
||||||
latest |= seen
|
latest |= seen
|
||||||
|
|
||||||
|
logger.info(
|
||||||
|
"Missing %d events for room %r: %r...",
|
||||||
|
len(prevs - seen), pdu.room_id, list(prevs - seen)[:5]
|
||||||
|
)
|
||||||
|
|
||||||
missing_events = yield self.get_missing_events(
|
missing_events = yield self.get_missing_events(
|
||||||
origin,
|
origin,
|
||||||
pdu.room_id,
|
pdu.room_id,
|
||||||
|
@ -517,6 +549,10 @@ class FederationServer(FederationBase):
|
||||||
prevs = {e_id for e_id, _ in pdu.prev_events}
|
prevs = {e_id for e_id, _ in pdu.prev_events}
|
||||||
seen = set(have_seen.keys())
|
seen = set(have_seen.keys())
|
||||||
if prevs - seen:
|
if prevs - seen:
|
||||||
|
logger.info(
|
||||||
|
"Still missing %d events for room %r: %r...",
|
||||||
|
len(prevs - seen), pdu.room_id, list(prevs - seen)[:5]
|
||||||
|
)
|
||||||
fetch_state = True
|
fetch_state = True
|
||||||
|
|
||||||
if fetch_state:
|
if fetch_state:
|
||||||
|
|
|
@ -72,5 +72,7 @@ class ReplicationLayer(FederationClient, FederationServer):
|
||||||
|
|
||||||
self.hs = hs
|
self.hs = hs
|
||||||
|
|
||||||
|
super(ReplicationLayer, self).__init__(hs)
|
||||||
|
|
||||||
def __str__(self):
|
def __str__(self):
|
||||||
return "<ReplicationLayer(%s)>" % self.server_name
|
return "<ReplicationLayer(%s)>" % self.server_name
|
||||||
|
|
|
@ -37,7 +37,7 @@ class TransportLayerServer(JsonResource):
|
||||||
self.hs = hs
|
self.hs = hs
|
||||||
self.clock = hs.get_clock()
|
self.clock = hs.get_clock()
|
||||||
|
|
||||||
super(TransportLayerServer, self).__init__(hs)
|
super(TransportLayerServer, self).__init__(hs, canonical_json=False)
|
||||||
|
|
||||||
self.authenticator = Authenticator(hs)
|
self.authenticator = Authenticator(hs)
|
||||||
self.ratelimiter = FederationRateLimiter(
|
self.ratelimiter = FederationRateLimiter(
|
||||||
|
@ -528,15 +528,10 @@ class PublicRoomList(BaseFederationServlet):
|
||||||
PATH = "/publicRooms"
|
PATH = "/publicRooms"
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def on_GET(self, request):
|
def on_GET(self, origin, content, query):
|
||||||
data = yield self.room_list_handler.get_local_public_room_list()
|
data = yield self.room_list_handler.get_local_public_room_list()
|
||||||
defer.returnValue((200, data))
|
defer.returnValue((200, data))
|
||||||
|
|
||||||
# Avoid doing remote HS authorization checks which are done by default by
|
|
||||||
# BaseFederationServlet.
|
|
||||||
def _wrap(self, code):
|
|
||||||
return code
|
|
||||||
|
|
||||||
|
|
||||||
SERVLET_CLASSES = (
|
SERVLET_CLASSES = (
|
||||||
FederationSendServlet,
|
FederationSendServlet,
|
||||||
|
|
|
@ -626,6 +626,6 @@ class AuthHandler(BaseHandler):
|
||||||
Whether self.hash(password) == stored_hash (bool).
|
Whether self.hash(password) == stored_hash (bool).
|
||||||
"""
|
"""
|
||||||
if stored_hash:
|
if stored_hash:
|
||||||
return bcrypt.hashpw(password, stored_hash) == stored_hash
|
return bcrypt.hashpw(password, stored_hash.encode('utf-8')) == stored_hash
|
||||||
else:
|
else:
|
||||||
return False
|
return False
|
||||||
|
|
|
@ -345,6 +345,8 @@ class FederationHandler(BaseHandler):
|
||||||
)
|
)
|
||||||
|
|
||||||
missing_auth = required_auth - set(auth_events)
|
missing_auth = required_auth - set(auth_events)
|
||||||
|
if missing_auth:
|
||||||
|
logger.info("Missing auth for backfill: %r", missing_auth)
|
||||||
results = yield defer.gatherResults(
|
results = yield defer.gatherResults(
|
||||||
[
|
[
|
||||||
self.replication_layer.get_pdu(
|
self.replication_layer.get_pdu(
|
||||||
|
@ -399,7 +401,7 @@ class FederationHandler(BaseHandler):
|
||||||
# previous to work out the state.
|
# previous to work out the state.
|
||||||
# TODO: We can probably do something more clever here.
|
# TODO: We can probably do something more clever here.
|
||||||
yield self._handle_new_event(
|
yield self._handle_new_event(
|
||||||
dest, event
|
dest, event, backfilled=True,
|
||||||
)
|
)
|
||||||
|
|
||||||
defer.returnValue(events)
|
defer.returnValue(events)
|
||||||
|
@ -1016,6 +1018,9 @@ class FederationHandler(BaseHandler):
|
||||||
|
|
||||||
res = results.values()
|
res = results.values()
|
||||||
for event in res:
|
for event in res:
|
||||||
|
# We sign these again because there was a bug where we
|
||||||
|
# incorrectly signed things the first time round
|
||||||
|
if self.hs.is_mine_id(event.event_id):
|
||||||
event.signatures.update(
|
event.signatures.update(
|
||||||
compute_event_signature(
|
compute_event_signature(
|
||||||
event,
|
event,
|
||||||
|
|
|
@ -36,13 +36,6 @@ class ProfileHandler(BaseHandler):
|
||||||
"profile", self.on_profile_query
|
"profile", self.on_profile_query
|
||||||
)
|
)
|
||||||
|
|
||||||
distributor = hs.get_distributor()
|
|
||||||
|
|
||||||
distributor.observe("registered_user", self.registered_user)
|
|
||||||
|
|
||||||
def registered_user(self, user):
|
|
||||||
return self.store.create_profile(user.localpart)
|
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def get_displayname(self, target_user):
|
def get_displayname(self, target_user):
|
||||||
if self.hs.is_mine(target_user):
|
if self.hs.is_mine(target_user):
|
||||||
|
|
|
@ -23,7 +23,6 @@ from synapse.api.errors import (
|
||||||
from ._base import BaseHandler
|
from ._base import BaseHandler
|
||||||
from synapse.util.async import run_on_reactor
|
from synapse.util.async import run_on_reactor
|
||||||
from synapse.http.client import CaptchaServerHttpClient
|
from synapse.http.client import CaptchaServerHttpClient
|
||||||
from synapse.util.distributor import registered_user
|
|
||||||
|
|
||||||
import logging
|
import logging
|
||||||
import urllib
|
import urllib
|
||||||
|
@ -37,8 +36,6 @@ class RegistrationHandler(BaseHandler):
|
||||||
super(RegistrationHandler, self).__init__(hs)
|
super(RegistrationHandler, self).__init__(hs)
|
||||||
|
|
||||||
self.auth = hs.get_auth()
|
self.auth = hs.get_auth()
|
||||||
self.distributor = hs.get_distributor()
|
|
||||||
self.distributor.declare("registered_user")
|
|
||||||
self.captcha_client = CaptchaServerHttpClient(hs)
|
self.captcha_client = CaptchaServerHttpClient(hs)
|
||||||
|
|
||||||
self._next_generated_user_id = None
|
self._next_generated_user_id = None
|
||||||
|
@ -140,9 +137,11 @@ class RegistrationHandler(BaseHandler):
|
||||||
password_hash=password_hash,
|
password_hash=password_hash,
|
||||||
was_guest=was_guest,
|
was_guest=was_guest,
|
||||||
make_guest=make_guest,
|
make_guest=make_guest,
|
||||||
|
create_profile_with_localpart=(
|
||||||
|
# If the user was a guest then they already have a profile
|
||||||
|
None if was_guest else user.localpart
|
||||||
|
),
|
||||||
)
|
)
|
||||||
|
|
||||||
yield registered_user(self.distributor, user)
|
|
||||||
else:
|
else:
|
||||||
# autogen a sequential user ID
|
# autogen a sequential user ID
|
||||||
attempts = 0
|
attempts = 0
|
||||||
|
@ -160,7 +159,8 @@ class RegistrationHandler(BaseHandler):
|
||||||
user_id=user_id,
|
user_id=user_id,
|
||||||
token=token,
|
token=token,
|
||||||
password_hash=password_hash,
|
password_hash=password_hash,
|
||||||
make_guest=make_guest
|
make_guest=make_guest,
|
||||||
|
create_profile_with_localpart=user.localpart,
|
||||||
)
|
)
|
||||||
except SynapseError:
|
except SynapseError:
|
||||||
# if user id is taken, just generate another
|
# if user id is taken, just generate another
|
||||||
|
@ -168,7 +168,6 @@ class RegistrationHandler(BaseHandler):
|
||||||
user_id = None
|
user_id = None
|
||||||
token = None
|
token = None
|
||||||
attempts += 1
|
attempts += 1
|
||||||
yield registered_user(self.distributor, user)
|
|
||||||
|
|
||||||
# We used to generate default identicons here, but nowadays
|
# We used to generate default identicons here, but nowadays
|
||||||
# we want clients to generate their own as part of their branding
|
# we want clients to generate their own as part of their branding
|
||||||
|
@ -201,8 +200,8 @@ class RegistrationHandler(BaseHandler):
|
||||||
token=token,
|
token=token,
|
||||||
password_hash="",
|
password_hash="",
|
||||||
appservice_id=service_id,
|
appservice_id=service_id,
|
||||||
|
create_profile_with_localpart=user.localpart,
|
||||||
)
|
)
|
||||||
yield registered_user(self.distributor, user)
|
|
||||||
defer.returnValue((user_id, token))
|
defer.returnValue((user_id, token))
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
|
@ -248,9 +247,9 @@ class RegistrationHandler(BaseHandler):
|
||||||
yield self.store.register(
|
yield self.store.register(
|
||||||
user_id=user_id,
|
user_id=user_id,
|
||||||
token=token,
|
token=token,
|
||||||
password_hash=None
|
password_hash=None,
|
||||||
|
create_profile_with_localpart=user.localpart,
|
||||||
)
|
)
|
||||||
yield registered_user(self.distributor, user)
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
yield self.store.add_access_token_to_user(user_id, token)
|
yield self.store.add_access_token_to_user(user_id, token)
|
||||||
# Ignore Registration errors
|
# Ignore Registration errors
|
||||||
|
@ -388,17 +387,16 @@ class RegistrationHandler(BaseHandler):
|
||||||
|
|
||||||
user = UserID(localpart, self.hs.hostname)
|
user = UserID(localpart, self.hs.hostname)
|
||||||
user_id = user.to_string()
|
user_id = user.to_string()
|
||||||
auth_handler = self.hs.get_handlers().auth_handler
|
token = self.auth_handler().generate_short_term_login_token(
|
||||||
token = auth_handler.generate_short_term_login_token(user_id, duration_seconds)
|
user_id, duration_seconds)
|
||||||
|
|
||||||
if need_register:
|
if need_register:
|
||||||
yield self.store.register(
|
yield self.store.register(
|
||||||
user_id=user_id,
|
user_id=user_id,
|
||||||
token=token,
|
token=token,
|
||||||
password_hash=None
|
password_hash=None,
|
||||||
|
create_profile_with_localpart=user.localpart,
|
||||||
)
|
)
|
||||||
|
|
||||||
yield registered_user(self.distributor, user)
|
|
||||||
else:
|
else:
|
||||||
yield self.store.user_delete_access_tokens(user_id=user_id)
|
yield self.store.user_delete_access_tokens(user_id=user_id)
|
||||||
yield self.store.add_access_token_to_user(user_id=user_id, token=token)
|
yield self.store.add_access_token_to_user(user_id=user_id, token=token)
|
||||||
|
|
|
@ -20,7 +20,7 @@ from ._base import BaseHandler
|
||||||
|
|
||||||
from synapse.types import UserID, RoomAlias, RoomID, RoomStreamToken
|
from synapse.types import UserID, RoomAlias, RoomID, RoomStreamToken
|
||||||
from synapse.api.constants import (
|
from synapse.api.constants import (
|
||||||
EventTypes, JoinRules, RoomCreationPreset,
|
EventTypes, JoinRules, RoomCreationPreset, Membership,
|
||||||
)
|
)
|
||||||
from synapse.api.errors import AuthError, StoreError, SynapseError
|
from synapse.api.errors import AuthError, StoreError, SynapseError
|
||||||
from synapse.util import stringutils
|
from synapse.util import stringutils
|
||||||
|
@ -367,14 +367,10 @@ class RoomListHandler(BaseHandler):
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def handle_room(room_id):
|
def handle_room(room_id):
|
||||||
# We pull each bit of state out indvidually to avoid pulling the
|
current_state = yield self.state_handler.get_current_state(room_id)
|
||||||
# full state into memory. Due to how the caching works this should
|
|
||||||
# be fairly quick, even if not originally in the cache.
|
|
||||||
def get_state(etype, state_key):
|
|
||||||
return self.state_handler.get_current_state(room_id, etype, state_key)
|
|
||||||
|
|
||||||
# Double check that this is actually a public room.
|
# Double check that this is actually a public room.
|
||||||
join_rules_event = yield get_state(EventTypes.JoinRules, "")
|
join_rules_event = current_state.get((EventTypes.JoinRules, ""))
|
||||||
if join_rules_event:
|
if join_rules_event:
|
||||||
join_rule = join_rules_event.content.get("join_rule", None)
|
join_rule = join_rules_event.content.get("join_rule", None)
|
||||||
if join_rule and join_rule != JoinRules.PUBLIC:
|
if join_rule and join_rule != JoinRules.PUBLIC:
|
||||||
|
@ -382,47 +378,51 @@ class RoomListHandler(BaseHandler):
|
||||||
|
|
||||||
result = {"room_id": room_id}
|
result = {"room_id": room_id}
|
||||||
|
|
||||||
joined_users = yield self.store.get_users_in_room(room_id)
|
num_joined_users = len([
|
||||||
if len(joined_users) == 0:
|
1 for _, event in current_state.items()
|
||||||
|
if event.type == EventTypes.Member
|
||||||
|
and event.membership == Membership.JOIN
|
||||||
|
])
|
||||||
|
if num_joined_users == 0:
|
||||||
return
|
return
|
||||||
|
|
||||||
result["num_joined_members"] = len(joined_users)
|
result["num_joined_members"] = num_joined_users
|
||||||
|
|
||||||
aliases = yield self.store.get_aliases_for_room(room_id)
|
aliases = yield self.store.get_aliases_for_room(room_id)
|
||||||
if aliases:
|
if aliases:
|
||||||
result["aliases"] = aliases
|
result["aliases"] = aliases
|
||||||
|
|
||||||
name_event = yield get_state(EventTypes.Name, "")
|
name_event = yield current_state.get((EventTypes.Name, ""))
|
||||||
if name_event:
|
if name_event:
|
||||||
name = name_event.content.get("name", None)
|
name = name_event.content.get("name", None)
|
||||||
if name:
|
if name:
|
||||||
result["name"] = name
|
result["name"] = name
|
||||||
|
|
||||||
topic_event = yield get_state(EventTypes.Topic, "")
|
topic_event = current_state.get((EventTypes.Topic, ""))
|
||||||
if topic_event:
|
if topic_event:
|
||||||
topic = topic_event.content.get("topic", None)
|
topic = topic_event.content.get("topic", None)
|
||||||
if topic:
|
if topic:
|
||||||
result["topic"] = topic
|
result["topic"] = topic
|
||||||
|
|
||||||
canonical_event = yield get_state(EventTypes.CanonicalAlias, "")
|
canonical_event = current_state.get((EventTypes.CanonicalAlias, ""))
|
||||||
if canonical_event:
|
if canonical_event:
|
||||||
canonical_alias = canonical_event.content.get("alias", None)
|
canonical_alias = canonical_event.content.get("alias", None)
|
||||||
if canonical_alias:
|
if canonical_alias:
|
||||||
result["canonical_alias"] = canonical_alias
|
result["canonical_alias"] = canonical_alias
|
||||||
|
|
||||||
visibility_event = yield get_state(EventTypes.RoomHistoryVisibility, "")
|
visibility_event = current_state.get((EventTypes.RoomHistoryVisibility, ""))
|
||||||
visibility = None
|
visibility = None
|
||||||
if visibility_event:
|
if visibility_event:
|
||||||
visibility = visibility_event.content.get("history_visibility", None)
|
visibility = visibility_event.content.get("history_visibility", None)
|
||||||
result["world_readable"] = visibility == "world_readable"
|
result["world_readable"] = visibility == "world_readable"
|
||||||
|
|
||||||
guest_event = yield get_state(EventTypes.GuestAccess, "")
|
guest_event = current_state.get((EventTypes.GuestAccess, ""))
|
||||||
guest = None
|
guest = None
|
||||||
if guest_event:
|
if guest_event:
|
||||||
guest = guest_event.content.get("guest_access", None)
|
guest = guest_event.content.get("guest_access", None)
|
||||||
result["guest_can_join"] = guest == "can_join"
|
result["guest_can_join"] = guest == "can_join"
|
||||||
|
|
||||||
avatar_event = yield get_state("m.room.avatar", "")
|
avatar_event = current_state.get(("m.room.avatar", ""))
|
||||||
if avatar_event:
|
if avatar_event:
|
||||||
avatar_url = avatar_event.content.get("url", None)
|
avatar_url = avatar_event.content.get("url", None)
|
||||||
if avatar_url:
|
if avatar_url:
|
||||||
|
|
|
@ -221,6 +221,9 @@ class TypingHandler(object):
|
||||||
|
|
||||||
def get_all_typing_updates(self, last_id, current_id):
|
def get_all_typing_updates(self, last_id, current_id):
|
||||||
# TODO: Work out a way to do this without scanning the entire state.
|
# TODO: Work out a way to do this without scanning the entire state.
|
||||||
|
if last_id == current_id:
|
||||||
|
return []
|
||||||
|
|
||||||
rows = []
|
rows = []
|
||||||
for room_id, serial in self._room_serials.items():
|
for room_id, serial in self._room_serials.items():
|
||||||
if last_id < serial and serial <= current_id:
|
if last_id < serial and serial <= current_id:
|
||||||
|
|
|
@ -24,12 +24,13 @@ from synapse.http.endpoint import SpiderEndpoint
|
||||||
|
|
||||||
from canonicaljson import encode_canonical_json
|
from canonicaljson import encode_canonical_json
|
||||||
|
|
||||||
from twisted.internet import defer, reactor, ssl, protocol
|
from twisted.internet import defer, reactor, ssl, protocol, task
|
||||||
from twisted.internet.endpoints import SSL4ClientEndpoint, TCP4ClientEndpoint
|
from twisted.internet.endpoints import SSL4ClientEndpoint, TCP4ClientEndpoint
|
||||||
from twisted.web.client import (
|
from twisted.web.client import (
|
||||||
BrowserLikeRedirectAgent, ContentDecoderAgent, GzipDecoder, Agent,
|
BrowserLikeRedirectAgent, ContentDecoderAgent, GzipDecoder, Agent,
|
||||||
readBody, FileBodyProducer, PartialDownloadError,
|
readBody, PartialDownloadError,
|
||||||
)
|
)
|
||||||
|
from twisted.web.client import FileBodyProducer as TwistedFileBodyProducer
|
||||||
from twisted.web.http import PotentialDataLoss
|
from twisted.web.http import PotentialDataLoss
|
||||||
from twisted.web.http_headers import Headers
|
from twisted.web.http_headers import Headers
|
||||||
from twisted.web._newclient import ResponseDone
|
from twisted.web._newclient import ResponseDone
|
||||||
|
@ -468,3 +469,26 @@ class InsecureInterceptableContextFactory(ssl.ContextFactory):
|
||||||
|
|
||||||
def creatorForNetloc(self, hostname, port):
|
def creatorForNetloc(self, hostname, port):
|
||||||
return self
|
return self
|
||||||
|
|
||||||
|
|
||||||
|
class FileBodyProducer(TwistedFileBodyProducer):
|
||||||
|
"""Workaround for https://twistedmatrix.com/trac/ticket/8473
|
||||||
|
|
||||||
|
We override the pauseProducing and resumeProducing methods in twisted's
|
||||||
|
FileBodyProducer so that they do not raise exceptions if the task has
|
||||||
|
already completed.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def pauseProducing(self):
|
||||||
|
try:
|
||||||
|
super(FileBodyProducer, self).pauseProducing()
|
||||||
|
except task.TaskDone:
|
||||||
|
# task has already completed
|
||||||
|
pass
|
||||||
|
|
||||||
|
def resumeProducing(self):
|
||||||
|
try:
|
||||||
|
super(FileBodyProducer, self).resumeProducing()
|
||||||
|
except task.NotPaused:
|
||||||
|
# task was not paused (probably because it had already completed)
|
||||||
|
pass
|
||||||
|
|
|
@ -273,16 +273,16 @@ class Mailer(object):
|
||||||
|
|
||||||
sender_state_event = room_state[("m.room.member", event.sender)]
|
sender_state_event = room_state[("m.room.member", event.sender)]
|
||||||
sender_name = name_from_member_event(sender_state_event)
|
sender_name = name_from_member_event(sender_state_event)
|
||||||
sender_avatar_url = None
|
sender_avatar_url = sender_state_event.content.get("avatar_url")
|
||||||
if "avatar_url" in sender_state_event.content:
|
|
||||||
sender_avatar_url = sender_state_event.content["avatar_url"]
|
|
||||||
|
|
||||||
# 'hash' for deterministically picking default images: use
|
# 'hash' for deterministically picking default images: use
|
||||||
# sender_hash % the number of default images to choose from
|
# sender_hash % the number of default images to choose from
|
||||||
sender_hash = string_ordinal_total(event.sender)
|
sender_hash = string_ordinal_total(event.sender)
|
||||||
|
|
||||||
|
msgtype = event.content.get("msgtype")
|
||||||
|
|
||||||
ret = {
|
ret = {
|
||||||
"msgtype": event.content["msgtype"],
|
"msgtype": msgtype,
|
||||||
"is_historical": event.event_id != notif['event_id'],
|
"is_historical": event.event_id != notif['event_id'],
|
||||||
"id": event.event_id,
|
"id": event.event_id,
|
||||||
"ts": event.origin_server_ts,
|
"ts": event.origin_server_ts,
|
||||||
|
@ -291,9 +291,9 @@ class Mailer(object):
|
||||||
"sender_hash": sender_hash,
|
"sender_hash": sender_hash,
|
||||||
}
|
}
|
||||||
|
|
||||||
if event.content["msgtype"] == "m.text":
|
if msgtype == "m.text":
|
||||||
self.add_text_message_vars(ret, event)
|
self.add_text_message_vars(ret, event)
|
||||||
elif event.content["msgtype"] == "m.image":
|
elif msgtype == "m.image":
|
||||||
self.add_image_message_vars(ret, event)
|
self.add_image_message_vars(ret, event)
|
||||||
|
|
||||||
if "body" in event.content:
|
if "body" in event.content:
|
||||||
|
@ -302,16 +302,17 @@ class Mailer(object):
|
||||||
return ret
|
return ret
|
||||||
|
|
||||||
def add_text_message_vars(self, messagevars, event):
|
def add_text_message_vars(self, messagevars, event):
|
||||||
if "format" in event.content:
|
msgformat = event.content.get("format")
|
||||||
msgformat = event.content["format"]
|
|
||||||
else:
|
|
||||||
msgformat = None
|
|
||||||
messagevars["format"] = msgformat
|
messagevars["format"] = msgformat
|
||||||
|
|
||||||
if msgformat == "org.matrix.custom.html":
|
formatted_body = event.content.get("formatted_body")
|
||||||
messagevars["body_text_html"] = safe_markup(event.content["formatted_body"])
|
body = event.content.get("body")
|
||||||
else:
|
|
||||||
messagevars["body_text_html"] = safe_text(event.content["body"])
|
if msgformat == "org.matrix.custom.html" and formatted_body:
|
||||||
|
messagevars["body_text_html"] = safe_markup(formatted_body)
|
||||||
|
elif body:
|
||||||
|
messagevars["body_text_html"] = safe_text(body)
|
||||||
|
|
||||||
return messagevars
|
return messagevars
|
||||||
|
|
||||||
|
|
|
@ -45,7 +45,7 @@ class EventStreamRestServlet(ClientV1RestServlet):
|
||||||
raise SynapseError(400, "Guest users must specify room_id param")
|
raise SynapseError(400, "Guest users must specify room_id param")
|
||||||
if "room_id" in request.args:
|
if "room_id" in request.args:
|
||||||
room_id = request.args["room_id"][0]
|
room_id = request.args["room_id"][0]
|
||||||
try:
|
|
||||||
handler = self.handlers.event_stream_handler
|
handler = self.handlers.event_stream_handler
|
||||||
pagin_config = PaginationConfig.from_request(request)
|
pagin_config = PaginationConfig.from_request(request)
|
||||||
timeout = EventStreamRestServlet.DEFAULT_LONGPOLL_TIME_MS
|
timeout = EventStreamRestServlet.DEFAULT_LONGPOLL_TIME_MS
|
||||||
|
@ -66,9 +66,6 @@ class EventStreamRestServlet(ClientV1RestServlet):
|
||||||
room_id=room_id,
|
room_id=room_id,
|
||||||
is_guest=is_guest,
|
is_guest=is_guest,
|
||||||
)
|
)
|
||||||
except:
|
|
||||||
logger.exception("Event stream failed")
|
|
||||||
raise
|
|
||||||
|
|
||||||
defer.returnValue((200, chunk))
|
defer.returnValue((200, chunk))
|
||||||
|
|
||||||
|
|
|
@ -72,8 +72,6 @@ class RoomCreateRestServlet(ClientV1RestServlet):
|
||||||
|
|
||||||
def get_room_config(self, request):
|
def get_room_config(self, request):
|
||||||
user_supplied_config = parse_json_object_from_request(request)
|
user_supplied_config = parse_json_object_from_request(request)
|
||||||
# default visibility
|
|
||||||
user_supplied_config.setdefault("visibility", "public")
|
|
||||||
return user_supplied_config
|
return user_supplied_config
|
||||||
|
|
||||||
def on_OPTIONS(self, request):
|
def on_OPTIONS(self, request):
|
||||||
|
@ -279,6 +277,13 @@ class PublicRoomListRestServlet(ClientV1RestServlet):
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def on_GET(self, request):
|
def on_GET(self, request):
|
||||||
|
try:
|
||||||
|
yield self.auth.get_user_by_req(request)
|
||||||
|
except AuthError:
|
||||||
|
# This endpoint isn't authed, but its useful to know who's hitting
|
||||||
|
# it if they *do* supply an access token
|
||||||
|
pass
|
||||||
|
|
||||||
handler = self.hs.get_room_list_handler()
|
handler = self.hs.get_room_list_handler()
|
||||||
data = yield handler.get_aggregated_public_room_list()
|
data = yield handler.get_aggregated_public_room_list()
|
||||||
|
|
||||||
|
|
|
@ -26,6 +26,7 @@ from .thumbnailer import Thumbnailer
|
||||||
|
|
||||||
from synapse.http.matrixfederationclient import MatrixFederationHttpClient
|
from synapse.http.matrixfederationclient import MatrixFederationHttpClient
|
||||||
from synapse.util.stringutils import random_string
|
from synapse.util.stringutils import random_string
|
||||||
|
from synapse.api.errors import SynapseError
|
||||||
|
|
||||||
from twisted.internet import defer, threads
|
from twisted.internet import defer, threads
|
||||||
|
|
||||||
|
@ -134,10 +135,15 @@ class MediaRepository(object):
|
||||||
request_path = "/".join((
|
request_path = "/".join((
|
||||||
"/_matrix/media/v1/download", server_name, media_id,
|
"/_matrix/media/v1/download", server_name, media_id,
|
||||||
))
|
))
|
||||||
|
try:
|
||||||
length, headers = yield self.client.get_file(
|
length, headers = yield self.client.get_file(
|
||||||
server_name, request_path, output_stream=f,
|
server_name, request_path, output_stream=f,
|
||||||
max_size=self.max_upload_size,
|
max_size=self.max_upload_size,
|
||||||
)
|
)
|
||||||
|
except Exception as e:
|
||||||
|
logger.warn("Failed to fetch remoted media %r", e)
|
||||||
|
raise SynapseError(502, "Failed to fetch remoted media")
|
||||||
|
|
||||||
media_type = headers["Content-Type"][0]
|
media_type = headers["Content-Type"][0]
|
||||||
time_now_ms = self.clock.time_msec()
|
time_now_ms = self.clock.time_msec()
|
||||||
|
|
||||||
|
|
|
@ -252,6 +252,7 @@ class PreviewUrlResource(Resource):
|
||||||
|
|
||||||
og = {}
|
og = {}
|
||||||
for tag in tree.xpath("//*/meta[starts-with(@property, 'og:')]"):
|
for tag in tree.xpath("//*/meta[starts-with(@property, 'og:')]"):
|
||||||
|
if 'content' in tag.attrib:
|
||||||
og[tag.attrib['property']] = tag.attrib['content']
|
og[tag.attrib['property']] = tag.attrib['content']
|
||||||
|
|
||||||
# TODO: grab article: meta tags too, e.g.:
|
# TODO: grab article: meta tags too, e.g.:
|
||||||
|
@ -279,7 +280,7 @@ class PreviewUrlResource(Resource):
|
||||||
# TODO: consider inlined CSS styles as well as width & height attribs
|
# TODO: consider inlined CSS styles as well as width & height attribs
|
||||||
images = tree.xpath("//img[@src][number(@width)>10][number(@height)>10]")
|
images = tree.xpath("//img[@src][number(@width)>10][number(@height)>10]")
|
||||||
images = sorted(images, key=lambda i: (
|
images = sorted(images, key=lambda i: (
|
||||||
-1 * int(i.attrib['width']) * int(i.attrib['height'])
|
-1 * float(i.attrib['width']) * float(i.attrib['height'])
|
||||||
))
|
))
|
||||||
if not images:
|
if not images:
|
||||||
images = tree.xpath("//img[@src]")
|
images = tree.xpath("//img[@src]")
|
||||||
|
@ -287,9 +288,9 @@ class PreviewUrlResource(Resource):
|
||||||
og['og:image'] = images[0].attrib['src']
|
og['og:image'] = images[0].attrib['src']
|
||||||
|
|
||||||
# pre-cache the image for posterity
|
# pre-cache the image for posterity
|
||||||
# FIXME: it might be cleaner to use the same flow as the main /preview_url request
|
# FIXME: it might be cleaner to use the same flow as the main /preview_url
|
||||||
# itself and benefit from the same caching etc. But for now we just rely on the
|
# request itself and benefit from the same caching etc. But for now we
|
||||||
# caching on the master request to speed things up.
|
# just rely on the caching on the master request to speed things up.
|
||||||
if 'og:image' in og and og['og:image']:
|
if 'og:image' in og and og['og:image']:
|
||||||
image_info = yield self._download_url(
|
image_info = yield self._download_url(
|
||||||
self._rebase_url(og['og:image'], media_info['uri']), requester.user
|
self._rebase_url(og['og:image'], media_info['uri']), requester.user
|
||||||
|
|
|
@ -138,6 +138,9 @@ class AccountDataStore(SQLBaseStore):
|
||||||
A deferred pair of lists of tuples of stream_id int, user_id string,
|
A deferred pair of lists of tuples of stream_id int, user_id string,
|
||||||
room_id string, type string, and content string.
|
room_id string, type string, and content string.
|
||||||
"""
|
"""
|
||||||
|
if last_room_id == current_id and last_global_id == current_id:
|
||||||
|
return defer.succeed(([], []))
|
||||||
|
|
||||||
def get_updated_account_data_txn(txn):
|
def get_updated_account_data_txn(txn):
|
||||||
sql = (
|
sql = (
|
||||||
"SELECT stream_id, user_id, account_data_type, content"
|
"SELECT stream_id, user_id, account_data_type, content"
|
||||||
|
|
|
@ -118,6 +118,9 @@ class PresenceStore(SQLBaseStore):
|
||||||
)
|
)
|
||||||
|
|
||||||
def get_all_presence_updates(self, last_id, current_id):
|
def get_all_presence_updates(self, last_id, current_id):
|
||||||
|
if last_id == current_id:
|
||||||
|
return defer.succeed([])
|
||||||
|
|
||||||
def get_all_presence_updates_txn(txn):
|
def get_all_presence_updates_txn(txn):
|
||||||
sql = (
|
sql = (
|
||||||
"SELECT stream_id, user_id, state, last_active_ts,"
|
"SELECT stream_id, user_id, state, last_active_ts,"
|
||||||
|
|
|
@ -421,6 +421,9 @@ class PushRuleStore(SQLBaseStore):
|
||||||
|
|
||||||
def get_all_push_rule_updates(self, last_id, current_id, limit):
|
def get_all_push_rule_updates(self, last_id, current_id, limit):
|
||||||
"""Get all the push rules changes that have happend on the server"""
|
"""Get all the push rules changes that have happend on the server"""
|
||||||
|
if last_id == current_id:
|
||||||
|
return defer.succeed([])
|
||||||
|
|
||||||
def get_all_push_rule_updates_txn(txn):
|
def get_all_push_rule_updates_txn(txn):
|
||||||
sql = (
|
sql = (
|
||||||
"SELECT stream_id, event_stream_ordering, user_id, rule_id,"
|
"SELECT stream_id, event_stream_ordering, user_id, rule_id,"
|
||||||
|
|
|
@ -76,7 +76,8 @@ class RegistrationStore(SQLBaseStore):
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def register(self, user_id, token, password_hash,
|
def register(self, user_id, token, password_hash,
|
||||||
was_guest=False, make_guest=False, appservice_id=None):
|
was_guest=False, make_guest=False, appservice_id=None,
|
||||||
|
create_profile_with_localpart=None):
|
||||||
"""Attempts to register an account.
|
"""Attempts to register an account.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
|
@ -88,6 +89,8 @@ class RegistrationStore(SQLBaseStore):
|
||||||
make_guest (boolean): True if the the new user should be guest,
|
make_guest (boolean): True if the the new user should be guest,
|
||||||
false to add a regular user account.
|
false to add a regular user account.
|
||||||
appservice_id (str): The ID of the appservice registering the user.
|
appservice_id (str): The ID of the appservice registering the user.
|
||||||
|
create_profile_with_localpart (str): Optionally create a profile for
|
||||||
|
the given localpart.
|
||||||
Raises:
|
Raises:
|
||||||
StoreError if the user_id could not be registered.
|
StoreError if the user_id could not be registered.
|
||||||
"""
|
"""
|
||||||
|
@ -99,7 +102,8 @@ class RegistrationStore(SQLBaseStore):
|
||||||
password_hash,
|
password_hash,
|
||||||
was_guest,
|
was_guest,
|
||||||
make_guest,
|
make_guest,
|
||||||
appservice_id
|
appservice_id,
|
||||||
|
create_profile_with_localpart,
|
||||||
)
|
)
|
||||||
self.get_user_by_id.invalidate((user_id,))
|
self.get_user_by_id.invalidate((user_id,))
|
||||||
self.is_guest.invalidate((user_id,))
|
self.is_guest.invalidate((user_id,))
|
||||||
|
@ -112,7 +116,8 @@ class RegistrationStore(SQLBaseStore):
|
||||||
password_hash,
|
password_hash,
|
||||||
was_guest,
|
was_guest,
|
||||||
make_guest,
|
make_guest,
|
||||||
appservice_id
|
appservice_id,
|
||||||
|
create_profile_with_localpart,
|
||||||
):
|
):
|
||||||
now = int(self.clock.time())
|
now = int(self.clock.time())
|
||||||
|
|
||||||
|
@ -157,6 +162,12 @@ class RegistrationStore(SQLBaseStore):
|
||||||
(next_id, user_id, token,)
|
(next_id, user_id, token,)
|
||||||
)
|
)
|
||||||
|
|
||||||
|
if create_profile_with_localpart:
|
||||||
|
txn.execute(
|
||||||
|
"INSERT INTO profiles(user_id) VALUES (?)",
|
||||||
|
(create_profile_with_localpart,)
|
||||||
|
)
|
||||||
|
|
||||||
@cached()
|
@cached()
|
||||||
def get_user_by_id(self, user_id):
|
def get_user_by_id(self, user_id):
|
||||||
return self._simple_select_one(
|
return self._simple_select_one(
|
||||||
|
|
|
@ -68,6 +68,9 @@ class TagsStore(SQLBaseStore):
|
||||||
A deferred list of tuples of stream_id int, user_id string,
|
A deferred list of tuples of stream_id int, user_id string,
|
||||||
room_id string, tag string and content string.
|
room_id string, tag string and content string.
|
||||||
"""
|
"""
|
||||||
|
if last_id == current_id:
|
||||||
|
defer.returnValue([])
|
||||||
|
|
||||||
def get_all_updated_tags_txn(txn):
|
def get_all_updated_tags_txn(txn):
|
||||||
sql = (
|
sql = (
|
||||||
"SELECT stream_id, user_id, room_id"
|
"SELECT stream_id, user_id, room_id"
|
||||||
|
|
|
@ -22,7 +22,10 @@ Requester = namedtuple("Requester", ["user", "access_token_id", "is_guest"])
|
||||||
|
|
||||||
|
|
||||||
def get_domain_from_id(string):
|
def get_domain_from_id(string):
|
||||||
|
try:
|
||||||
return string.split(":", 1)[1]
|
return string.split(":", 1)[1]
|
||||||
|
except IndexError:
|
||||||
|
raise SynapseError(400, "Invalid ID: %r", string)
|
||||||
|
|
||||||
|
|
||||||
class DomainSpecificString(
|
class DomainSpecificString(
|
||||||
|
|
|
@ -27,10 +27,6 @@ import logging
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
def registered_user(distributor, user):
|
|
||||||
return distributor.fire("registered_user", user)
|
|
||||||
|
|
||||||
|
|
||||||
def user_left_room(distributor, user, room_id):
|
def user_left_room(distributor, user, room_id):
|
||||||
return preserve_context_over_fn(
|
return preserve_context_over_fn(
|
||||||
distributor.fire,
|
distributor.fire,
|
||||||
|
|
|
@ -30,7 +30,7 @@ class ConfigGenerationTestCase(unittest.TestCase):
|
||||||
shutil.rmtree(self.dir)
|
shutil.rmtree(self.dir)
|
||||||
|
|
||||||
def test_generate_config_generates_files(self):
|
def test_generate_config_generates_files(self):
|
||||||
HomeServerConfig.load_config("", [
|
HomeServerConfig.load_or_generate_config("", [
|
||||||
"--generate-config",
|
"--generate-config",
|
||||||
"-c", self.file,
|
"-c", self.file,
|
||||||
"--report-stats=yes",
|
"--report-stats=yes",
|
||||||
|
|
|
@ -34,6 +34,8 @@ class ConfigLoadingTestCase(unittest.TestCase):
|
||||||
self.generate_config_and_remove_lines_containing("server_name")
|
self.generate_config_and_remove_lines_containing("server_name")
|
||||||
with self.assertRaises(Exception):
|
with self.assertRaises(Exception):
|
||||||
HomeServerConfig.load_config("", ["-c", self.file])
|
HomeServerConfig.load_config("", ["-c", self.file])
|
||||||
|
with self.assertRaises(Exception):
|
||||||
|
HomeServerConfig.load_or_generate_config("", ["-c", self.file])
|
||||||
|
|
||||||
def test_generates_and_loads_macaroon_secret_key(self):
|
def test_generates_and_loads_macaroon_secret_key(self):
|
||||||
self.generate_config()
|
self.generate_config()
|
||||||
|
@ -54,11 +56,24 @@ class ConfigLoadingTestCase(unittest.TestCase):
|
||||||
"was: %r" % (config.macaroon_secret_key,)
|
"was: %r" % (config.macaroon_secret_key,)
|
||||||
)
|
)
|
||||||
|
|
||||||
|
config = HomeServerConfig.load_or_generate_config("", ["-c", self.file])
|
||||||
|
self.assertTrue(
|
||||||
|
hasattr(config, "macaroon_secret_key"),
|
||||||
|
"Want config to have attr macaroon_secret_key"
|
||||||
|
)
|
||||||
|
if len(config.macaroon_secret_key) < 5:
|
||||||
|
self.fail(
|
||||||
|
"Want macaroon secret key to be string of at least length 5,"
|
||||||
|
"was: %r" % (config.macaroon_secret_key,)
|
||||||
|
)
|
||||||
|
|
||||||
def test_load_succeeds_if_macaroon_secret_key_missing(self):
|
def test_load_succeeds_if_macaroon_secret_key_missing(self):
|
||||||
self.generate_config_and_remove_lines_containing("macaroon")
|
self.generate_config_and_remove_lines_containing("macaroon")
|
||||||
config1 = HomeServerConfig.load_config("", ["-c", self.file])
|
config1 = HomeServerConfig.load_config("", ["-c", self.file])
|
||||||
config2 = HomeServerConfig.load_config("", ["-c", self.file])
|
config2 = HomeServerConfig.load_config("", ["-c", self.file])
|
||||||
|
config3 = HomeServerConfig.load_or_generate_config("", ["-c", self.file])
|
||||||
self.assertEqual(config1.macaroon_secret_key, config2.macaroon_secret_key)
|
self.assertEqual(config1.macaroon_secret_key, config2.macaroon_secret_key)
|
||||||
|
self.assertEqual(config1.macaroon_secret_key, config3.macaroon_secret_key)
|
||||||
|
|
||||||
def test_disable_registration(self):
|
def test_disable_registration(self):
|
||||||
self.generate_config()
|
self.generate_config()
|
||||||
|
@ -70,14 +85,17 @@ class ConfigLoadingTestCase(unittest.TestCase):
|
||||||
config = HomeServerConfig.load_config("", ["-c", self.file])
|
config = HomeServerConfig.load_config("", ["-c", self.file])
|
||||||
self.assertFalse(config.enable_registration)
|
self.assertFalse(config.enable_registration)
|
||||||
|
|
||||||
|
config = HomeServerConfig.load_or_generate_config("", ["-c", self.file])
|
||||||
|
self.assertFalse(config.enable_registration)
|
||||||
|
|
||||||
# Check that either config value is clobbered by the command line.
|
# Check that either config value is clobbered by the command line.
|
||||||
config = HomeServerConfig.load_config("", [
|
config = HomeServerConfig.load_or_generate_config("", [
|
||||||
"-c", self.file, "--enable-registration"
|
"-c", self.file, "--enable-registration"
|
||||||
])
|
])
|
||||||
self.assertTrue(config.enable_registration)
|
self.assertTrue(config.enable_registration)
|
||||||
|
|
||||||
def generate_config(self):
|
def generate_config(self):
|
||||||
HomeServerConfig.load_config("", [
|
HomeServerConfig.load_or_generate_config("", [
|
||||||
"--generate-config",
|
"--generate-config",
|
||||||
"-c", self.file,
|
"-c", self.file,
|
||||||
"--report-stats=yes",
|
"--report-stats=yes",
|
||||||
|
|
|
@ -41,14 +41,15 @@ class RegistrationTestCase(unittest.TestCase):
|
||||||
handlers=None,
|
handlers=None,
|
||||||
http_client=None,
|
http_client=None,
|
||||||
expire_access_token=True)
|
expire_access_token=True)
|
||||||
|
self.auth_handler = Mock(
|
||||||
|
generate_short_term_login_token=Mock(return_value='secret'))
|
||||||
self.hs.handlers = RegistrationHandlers(self.hs)
|
self.hs.handlers = RegistrationHandlers(self.hs)
|
||||||
self.handler = self.hs.get_handlers().registration_handler
|
self.handler = self.hs.get_handlers().registration_handler
|
||||||
self.hs.get_handlers().profile_handler = Mock()
|
self.hs.get_handlers().profile_handler = Mock()
|
||||||
self.mock_handler = Mock(spec=[
|
self.mock_handler = Mock(spec=[
|
||||||
"generate_short_term_login_token",
|
"generate_short_term_login_token",
|
||||||
])
|
])
|
||||||
|
self.hs.get_auth_handler = Mock(return_value=self.auth_handler)
|
||||||
self.hs.get_handlers().auth_handler = self.mock_handler
|
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def test_user_is_created_and_logged_in_if_doesnt_exist(self):
|
def test_user_is_created_and_logged_in_if_doesnt_exist(self):
|
||||||
|
@ -56,8 +57,6 @@ class RegistrationTestCase(unittest.TestCase):
|
||||||
local_part = "someone"
|
local_part = "someone"
|
||||||
display_name = "someone"
|
display_name = "someone"
|
||||||
user_id = "@someone:test"
|
user_id = "@someone:test"
|
||||||
mock_token = self.mock_handler.generate_short_term_login_token
|
|
||||||
mock_token.return_value = 'secret'
|
|
||||||
result_user_id, result_token = yield self.handler.get_or_create_user(
|
result_user_id, result_token = yield self.handler.get_or_create_user(
|
||||||
local_part, display_name, duration_ms)
|
local_part, display_name, duration_ms)
|
||||||
self.assertEquals(result_user_id, user_id)
|
self.assertEquals(result_user_id, user_id)
|
||||||
|
@ -75,8 +74,6 @@ class RegistrationTestCase(unittest.TestCase):
|
||||||
local_part = "frank"
|
local_part = "frank"
|
||||||
display_name = "Frank"
|
display_name = "Frank"
|
||||||
user_id = "@frank:test"
|
user_id = "@frank:test"
|
||||||
mock_token = self.mock_handler.generate_short_term_login_token
|
|
||||||
mock_token.return_value = 'secret'
|
|
||||||
result_user_id, result_token = yield self.handler.get_or_create_user(
|
result_user_id, result_token = yield self.handler.get_or_create_user(
|
||||||
local_part, display_name, duration_ms)
|
local_part, display_name, duration_ms)
|
||||||
self.assertEquals(result_user_id, user_id)
|
self.assertEquals(result_user_id, user_id)
|
||||||
|
|
|
@ -54,6 +54,7 @@ def setup_test_homeserver(name="test", datastore=None, config=None, **kargs):
|
||||||
config.trusted_third_party_id_servers = []
|
config.trusted_third_party_id_servers = []
|
||||||
config.room_invite_state_types = []
|
config.room_invite_state_types = []
|
||||||
|
|
||||||
|
config.use_frozen_dicts = True
|
||||||
config.database_config = {"name": "sqlite3"}
|
config.database_config = {"name": "sqlite3"}
|
||||||
|
|
||||||
if "clock" not in kargs:
|
if "clock" not in kargs:
|
||||||
|
|
4
tox.ini
4
tox.ini
|
@ -11,7 +11,7 @@ deps =
|
||||||
setenv =
|
setenv =
|
||||||
PYTHONDONTWRITEBYTECODE = no_byte_code
|
PYTHONDONTWRITEBYTECODE = no_byte_code
|
||||||
commands =
|
commands =
|
||||||
/bin/bash -c "find {toxinidir} -name '*.pyc' -delete ; coverage run {env:COVERAGE_OPTS:} --source={toxinidir}/synapse \
|
/bin/sh -c "find {toxinidir} -name '*.pyc' -delete ; coverage run {env:COVERAGE_OPTS:} --source={toxinidir}/synapse \
|
||||||
{envbindir}/trial {env:TRIAL_FLAGS:} {posargs:tests} {env:TOXSUFFIX:}"
|
{envbindir}/trial {env:TRIAL_FLAGS:} {posargs:tests} {env:TOXSUFFIX:}"
|
||||||
{env:DUMP_COVERAGE_COMMAND:coverage report -m}
|
{env:DUMP_COVERAGE_COMMAND:coverage report -m}
|
||||||
|
|
||||||
|
@ -26,4 +26,4 @@ skip_install = True
|
||||||
basepython = python2.7
|
basepython = python2.7
|
||||||
deps =
|
deps =
|
||||||
flake8
|
flake8
|
||||||
commands = /bin/bash -c "flake8 synapse tests {env:PEP8SUFFIX:}"
|
commands = /bin/sh -c "flake8 synapse tests {env:PEP8SUFFIX:}"
|
||||||
|
|
Loading…
Reference in New Issue