Merge remote-tracking branch 'origin/develop' into markjh/worker_config
This commit is contained in:
commit
f1f70bf4b5
28
CHANGES.rst
28
CHANGES.rst
|
@ -1,3 +1,29 @@
|
|||
Changes in synapse v0.16.1-rc1 (2016-06-15)
|
||||
===========================================
|
||||
|
||||
Features: None
|
||||
|
||||
Changes:
|
||||
|
||||
* Log requester for ``/publicRoom`` endpoints when possible (PR #856)
|
||||
* 502 on ``/thumbnail`` when can't connect to remote server (PR #862)
|
||||
* Linearize fetching of gaps on incoming events (PR #871)
|
||||
|
||||
|
||||
Bugs fixes:
|
||||
|
||||
* Fix bug where rooms where marked as published by default (PR #857)
|
||||
* Fix bug where joining room with an event with invalid sender (PR #868)
|
||||
* Fix bug where backfilled events were sent down sync streams (PR #869)
|
||||
* Fix bug where outgoing connections could wedge indefinitely, causing push
|
||||
notifications to be unreliable (PR #870)
|
||||
|
||||
|
||||
Performance improvements:
|
||||
|
||||
* Improve ``/publicRooms`` performance(PR #859)
|
||||
|
||||
|
||||
Changes in synapse v0.16.0 (2016-06-09)
|
||||
=======================================
|
||||
|
||||
|
@ -28,7 +54,7 @@ Bug fixes:
|
|||
* Fix bug where synapse sent malformed transactions to AS's when retrying
|
||||
transactions (Commits 310197b, 8437906)
|
||||
|
||||
Performance Improvements:
|
||||
Performance improvements:
|
||||
|
||||
* Remove event fetching from DB threads (PR #835)
|
||||
* Change the way we cache events (PR #836)
|
||||
|
|
|
@ -80,6 +80,7 @@ echo >&2 "Running sytest with PostgreSQL";
|
|||
--synapse-directory $WORKSPACE \
|
||||
--dendron $WORKSPACE/dendron/bin/dendron \
|
||||
--pusher \
|
||||
--synchrotron \
|
||||
--port-base $PORT_BASE
|
||||
|
||||
cd ..
|
||||
|
|
|
@ -16,4 +16,4 @@
|
|||
""" This is a reference implementation of a Matrix home server.
|
||||
"""
|
||||
|
||||
__version__ = "0.16.0"
|
||||
__version__ = "0.16.1-rc1"
|
||||
|
|
|
@ -31,6 +31,9 @@ logger = logging.getLogger(__name__)
|
|||
|
||||
|
||||
class FederationBase(object):
|
||||
def __init__(self, hs):
|
||||
pass
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _check_sigs_and_hash_and_fetch(self, origin, pdus, outlier=False,
|
||||
include_none=False):
|
||||
|
|
|
@ -52,6 +52,8 @@ sent_queries_counter = metrics.register_counter("sent_queries", labels=["type"])
|
|||
|
||||
|
||||
class FederationClient(FederationBase):
|
||||
def __init__(self, hs):
|
||||
super(FederationClient, self).__init__(hs)
|
||||
|
||||
def start_get_pdu_cache(self):
|
||||
self._get_pdu_cache = ExpiringCache(
|
||||
|
|
|
@ -19,6 +19,7 @@ from twisted.internet import defer
|
|||
from .federation_base import FederationBase
|
||||
from .units import Transaction, Edu
|
||||
|
||||
from synapse.util.async import Linearizer
|
||||
from synapse.util.logutils import log_function
|
||||
from synapse.events import FrozenEvent
|
||||
import synapse.metrics
|
||||
|
@ -44,6 +45,11 @@ received_queries_counter = metrics.register_counter("received_queries", labels=[
|
|||
|
||||
|
||||
class FederationServer(FederationBase):
|
||||
def __init__(self, hs):
|
||||
super(FederationServer, self).__init__(hs)
|
||||
|
||||
self._room_pdu_linearizer = Linearizer()
|
||||
|
||||
def set_handler(self, handler):
|
||||
"""Sets the handler that the replication layer will use to communicate
|
||||
receipt of new PDUs from other home servers. The required methods are
|
||||
|
@ -491,43 +497,51 @@ class FederationServer(FederationBase):
|
|||
pdu.internal_metadata.outlier = True
|
||||
elif min_depth and pdu.depth > min_depth:
|
||||
if get_missing and prevs - seen:
|
||||
latest = yield self.store.get_latest_event_ids_in_room(
|
||||
pdu.room_id
|
||||
)
|
||||
# If we're missing stuff, ensure we only fetch stuff one
|
||||
# at a time.
|
||||
with (yield self._room_pdu_linearizer.queue(pdu.room_id)):
|
||||
# We recalculate seen, since it may have changed.
|
||||
have_seen = yield self.store.have_events(prevs)
|
||||
seen = set(have_seen.keys())
|
||||
|
||||
# We add the prev events that we have seen to the latest
|
||||
# list to ensure the remote server doesn't give them to us
|
||||
latest = set(latest)
|
||||
latest |= seen
|
||||
if prevs - seen:
|
||||
latest = yield self.store.get_latest_event_ids_in_room(
|
||||
pdu.room_id
|
||||
)
|
||||
|
||||
logger.info(
|
||||
"Missing %d events for room %r: %r...",
|
||||
len(prevs - seen), pdu.room_id, list(prevs - seen)[:5]
|
||||
)
|
||||
# We add the prev events that we have seen to the latest
|
||||
# list to ensure the remote server doesn't give them to us
|
||||
latest = set(latest)
|
||||
latest |= seen
|
||||
|
||||
missing_events = yield self.get_missing_events(
|
||||
origin,
|
||||
pdu.room_id,
|
||||
earliest_events_ids=list(latest),
|
||||
latest_events=[pdu],
|
||||
limit=10,
|
||||
min_depth=min_depth,
|
||||
)
|
||||
logger.info(
|
||||
"Missing %d events for room %r: %r...",
|
||||
len(prevs - seen), pdu.room_id, list(prevs - seen)[:5]
|
||||
)
|
||||
|
||||
# We want to sort these by depth so we process them and
|
||||
# tell clients about them in order.
|
||||
missing_events.sort(key=lambda x: x.depth)
|
||||
missing_events = yield self.get_missing_events(
|
||||
origin,
|
||||
pdu.room_id,
|
||||
earliest_events_ids=list(latest),
|
||||
latest_events=[pdu],
|
||||
limit=10,
|
||||
min_depth=min_depth,
|
||||
)
|
||||
|
||||
for e in missing_events:
|
||||
yield self._handle_new_pdu(
|
||||
origin,
|
||||
e,
|
||||
get_missing=False
|
||||
)
|
||||
# We want to sort these by depth so we process them and
|
||||
# tell clients about them in order.
|
||||
missing_events.sort(key=lambda x: x.depth)
|
||||
|
||||
have_seen = yield self.store.have_events(
|
||||
[ev for ev, _ in pdu.prev_events]
|
||||
)
|
||||
for e in missing_events:
|
||||
yield self._handle_new_pdu(
|
||||
origin,
|
||||
e,
|
||||
get_missing=False
|
||||
)
|
||||
|
||||
have_seen = yield self.store.have_events(
|
||||
[ev for ev, _ in pdu.prev_events]
|
||||
)
|
||||
|
||||
prevs = {e_id for e_id, _ in pdu.prev_events}
|
||||
seen = set(have_seen.keys())
|
||||
|
|
|
@ -72,5 +72,7 @@ class ReplicationLayer(FederationClient, FederationServer):
|
|||
|
||||
self.hs = hs
|
||||
|
||||
super(ReplicationLayer, self).__init__(hs)
|
||||
|
||||
def __str__(self):
|
||||
return "<ReplicationLayer(%s)>" % self.server_name
|
||||
|
|
|
@ -626,6 +626,6 @@ class AuthHandler(BaseHandler):
|
|||
Whether self.hash(password) == stored_hash (bool).
|
||||
"""
|
||||
if stored_hash:
|
||||
return bcrypt.hashpw(password, stored_hash) == stored_hash
|
||||
return bcrypt.hashpw(password, stored_hash.encode('utf-8')) == stored_hash
|
||||
else:
|
||||
return False
|
||||
|
|
|
@ -345,19 +345,21 @@ class FederationHandler(BaseHandler):
|
|||
)
|
||||
|
||||
missing_auth = required_auth - set(auth_events)
|
||||
results = yield defer.gatherResults(
|
||||
[
|
||||
self.replication_layer.get_pdu(
|
||||
[dest],
|
||||
event_id,
|
||||
outlier=True,
|
||||
timeout=10000,
|
||||
)
|
||||
for event_id in missing_auth
|
||||
],
|
||||
consumeErrors=True
|
||||
).addErrback(unwrapFirstError)
|
||||
auth_events.update({a.event_id: a for a in results})
|
||||
if missing_auth:
|
||||
logger.info("Missing auth for backfill: %r", missing_auth)
|
||||
results = yield defer.gatherResults(
|
||||
[
|
||||
self.replication_layer.get_pdu(
|
||||
[dest],
|
||||
event_id,
|
||||
outlier=True,
|
||||
timeout=10000,
|
||||
)
|
||||
for event_id in missing_auth
|
||||
],
|
||||
consumeErrors=True
|
||||
).addErrback(unwrapFirstError)
|
||||
auth_events.update({a.event_id: a for a in results})
|
||||
|
||||
ev_infos = []
|
||||
for a in auth_events.values():
|
||||
|
@ -399,7 +401,7 @@ class FederationHandler(BaseHandler):
|
|||
# previous to work out the state.
|
||||
# TODO: We can probably do something more clever here.
|
||||
yield self._handle_new_event(
|
||||
dest, event
|
||||
dest, event, backfilled=True,
|
||||
)
|
||||
|
||||
defer.returnValue(events)
|
||||
|
|
|
@ -24,12 +24,13 @@ from synapse.http.endpoint import SpiderEndpoint
|
|||
|
||||
from canonicaljson import encode_canonical_json
|
||||
|
||||
from twisted.internet import defer, reactor, ssl, protocol
|
||||
from twisted.internet import defer, reactor, ssl, protocol, task
|
||||
from twisted.internet.endpoints import SSL4ClientEndpoint, TCP4ClientEndpoint
|
||||
from twisted.web.client import (
|
||||
BrowserLikeRedirectAgent, ContentDecoderAgent, GzipDecoder, Agent,
|
||||
readBody, FileBodyProducer, PartialDownloadError,
|
||||
readBody, PartialDownloadError,
|
||||
)
|
||||
from twisted.web.client import FileBodyProducer as TwistedFileBodyProducer
|
||||
from twisted.web.http import PotentialDataLoss
|
||||
from twisted.web.http_headers import Headers
|
||||
from twisted.web._newclient import ResponseDone
|
||||
|
@ -468,3 +469,26 @@ class InsecureInterceptableContextFactory(ssl.ContextFactory):
|
|||
|
||||
def creatorForNetloc(self, hostname, port):
|
||||
return self
|
||||
|
||||
|
||||
class FileBodyProducer(TwistedFileBodyProducer):
|
||||
"""Workaround for https://twistedmatrix.com/trac/ticket/8473
|
||||
|
||||
We override the pauseProducing and resumeProducing methods in twisted's
|
||||
FileBodyProducer so that they do not raise exceptions if the task has
|
||||
already completed.
|
||||
"""
|
||||
|
||||
def pauseProducing(self):
|
||||
try:
|
||||
super(FileBodyProducer, self).pauseProducing()
|
||||
except task.TaskDone:
|
||||
# task has already completed
|
||||
pass
|
||||
|
||||
def resumeProducing(self):
|
||||
try:
|
||||
super(FileBodyProducer, self).resumeProducing()
|
||||
except task.NotPaused:
|
||||
# task was not paused (probably because it had already completed)
|
||||
pass
|
||||
|
|
|
@ -252,7 +252,8 @@ class PreviewUrlResource(Resource):
|
|||
|
||||
og = {}
|
||||
for tag in tree.xpath("//*/meta[starts-with(@property, 'og:')]"):
|
||||
og[tag.attrib['property']] = tag.attrib['content']
|
||||
if 'content' in tag.attrib:
|
||||
og[tag.attrib['property']] = tag.attrib['content']
|
||||
|
||||
# TODO: grab article: meta tags too, e.g.:
|
||||
|
||||
|
@ -279,7 +280,7 @@ class PreviewUrlResource(Resource):
|
|||
# TODO: consider inlined CSS styles as well as width & height attribs
|
||||
images = tree.xpath("//img[@src][number(@width)>10][number(@height)>10]")
|
||||
images = sorted(images, key=lambda i: (
|
||||
-1 * int(i.attrib['width']) * int(i.attrib['height'])
|
||||
-1 * float(i.attrib['width']) * float(i.attrib['height'])
|
||||
))
|
||||
if not images:
|
||||
images = tree.xpath("//img[@src]")
|
||||
|
@ -287,9 +288,9 @@ class PreviewUrlResource(Resource):
|
|||
og['og:image'] = images[0].attrib['src']
|
||||
|
||||
# pre-cache the image for posterity
|
||||
# FIXME: it might be cleaner to use the same flow as the main /preview_url request
|
||||
# itself and benefit from the same caching etc. But for now we just rely on the
|
||||
# caching on the master request to speed things up.
|
||||
# FIXME: it might be cleaner to use the same flow as the main /preview_url
|
||||
# request itself and benefit from the same caching etc. But for now we
|
||||
# just rely on the caching on the master request to speed things up.
|
||||
if 'og:image' in og and og['og:image']:
|
||||
image_info = yield self._download_url(
|
||||
self._rebase_url(og['og:image'], media_info['uri']), requester.user
|
||||
|
|
|
@ -22,7 +22,10 @@ Requester = namedtuple("Requester", ["user", "access_token_id", "is_guest"])
|
|||
|
||||
|
||||
def get_domain_from_id(string):
|
||||
return string.split(":", 1)[1]
|
||||
try:
|
||||
return string.split(":", 1)[1]
|
||||
except IndexError:
|
||||
raise SynapseError(400, "Invalid ID: %r", string)
|
||||
|
||||
|
||||
class DomainSpecificString(
|
||||
|
|
Loading…
Reference in New Issue