Skip to content
This repository was archived by the owner on Apr 26, 2024. It is now read-only.

Commit 00db778

Browse files
committed
Merge tag 'v1.20.0rc5' into develop
Synapse 1.20.0rc5 (2020-09-18) ============================== In addition to the below, Synapse 1.20.0rc5 also includes the bug fix that was included in 1.19.3. Features -------- - Add flags to the `/versions` endpoint for whether new rooms default to using E2EE. ([\#8343](#8343)) Bugfixes -------- - Fix rate limiting of federation `/send` requests. ([\#8342](#8342)) - Fix a longstanding bug where back pagination over federation could get stuck if it failed to handle a received event. ([\#8349](#8349)) Internal Changes ---------------- - Blacklist [MSC2753](matrix-org/matrix-spec-proposals#2753) SyTests until it is implemented. ([\#8285](#8285))
2 parents d688b4b + c7e060b commit 00db778

File tree

10 files changed

+128
-28
lines changed

10 files changed

+128
-28
lines changed

CHANGES.md

+33
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,36 @@
1+
Synapse 1.20.0rc5 (2020-09-18)
2+
==============================
3+
4+
In addition to the below, Synapse 1.20.0rc5 also includes the bug fix that was included in 1.19.3.
5+
6+
Features
7+
--------
8+
9+
- Add flags to the `/versions` endpoint for whether new rooms default to using E2EE. ([\#8343](https://github.com/matrix-org/synapse/issues/8343))
10+
11+
12+
Bugfixes
13+
--------
14+
15+
- Fix rate limiting of federation `/send` requests. ([\#8342](https://github.com/matrix-org/synapse/issues/8342))
16+
- Fix a longstanding bug where back pagination over federation could get stuck if it failed to handle a received event. ([\#8349](https://github.com/matrix-org/synapse/issues/8349))
17+
18+
19+
Internal Changes
20+
----------------
21+
22+
- Blacklist [MSC2753](https://github.com/matrix-org/matrix-doc/pull/2753) SyTests until it is implemented. ([\#8285](https://github.com/matrix-org/synapse/issues/8285))
23+
24+
25+
Synapse 1.19.3 (2020-09-18)
26+
===========================
27+
28+
Bugfixes
29+
--------
30+
31+
- Partially mitigate bug where newly joined servers couldn't get past events in a room when there is a malformed event. ([\#8350](https://github.com/matrix-org/synapse/issues/8350))
32+
33+
134
Synapse 1.20.0rc4 (2020-09-16)
235
==============================
336

changelog.d/8285.misc

-1
This file was deleted.

changelog.d/8342.bugfix

-1
This file was deleted.

debian/changelog

+6
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,12 @@ matrix-synapse-py3 (1.20.0ubuntu1) UNRELEASED; urgency=medium
44

55
-- Dexter Chua <[email protected]> Wed, 26 Aug 2020 12:41:36 +0000
66

7+
matrix-synapse-py3 (1.19.3) stable; urgency=medium
8+
9+
* New synapse release 1.19.3.
10+
11+
-- Synapse Packaging team <[email protected]> Fri, 18 Sep 2020 14:59:30 +0100
12+
713
matrix-synapse-py3 (1.19.2) stable; urgency=medium
814

915
* New synapse release 1.19.2.

synapse/__init__.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -48,7 +48,7 @@
4848
except ImportError:
4949
pass
5050

51-
__version__ = "1.20.0rc4"
51+
__version__ = "1.20.0rc5"
5252

5353
if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)):
5454
# We import here so that we don't have to install a bunch of deps when

synapse/federation/federation_client.py

+3-5
Original file line numberDiff line numberDiff line change
@@ -217,11 +217,9 @@ async def backfill(
217217
for p in transaction_data["pdus"]
218218
]
219219

220-
# FIXME: We should handle signature failures more gracefully.
221-
pdus[:] = await make_deferred_yieldable(
222-
defer.gatherResults(
223-
self._check_sigs_and_hashes(room_version, pdus), consumeErrors=True,
224-
).addErrback(unwrapFirstError)
220+
# Check signatures and hash of pdus, removing any from the list that fail checks
221+
pdus[:] = await self._check_sigs_and_hash_and_fetch(
222+
dest, pdus, outlier=True, room_version=room_version
225223
)
226224

227225
return pdus

synapse/handlers/federation.py

+57-8
Original file line numberDiff line numberDiff line change
@@ -917,15 +917,26 @@ async def backfill(self, dest, room_id, limit, extremities):
917917

918918
return events
919919

920-
async def maybe_backfill(self, room_id, current_depth):
920+
async def maybe_backfill(
921+
self, room_id: str, current_depth: int, limit: int
922+
) -> bool:
921923
"""Checks the database to see if we should backfill before paginating,
922924
and if so do.
925+
926+
Args:
927+
room_id
928+
current_depth: The depth from which we're paginating from. This is
929+
used to decide if we should backfill and what extremities to
930+
use.
931+
limit: The number of events that the pagination request will
932+
return. This is used as part of the heuristic to decide if we
933+
should back paginate.
923934
"""
924935
extremities = await self.store.get_oldest_events_with_depth_in_room(room_id)
925936

926937
if not extremities:
927938
logger.debug("Not backfilling as no extremeties found.")
928-
return
939+
return False
929940

930941
# We only want to paginate if we can actually see the events we'll get,
931942
# as otherwise we'll just spend a lot of resources to get redacted
@@ -978,16 +989,54 @@ async def maybe_backfill(self, room_id, current_depth):
978989
sorted_extremeties_tuple = sorted(extremities.items(), key=lambda e: -int(e[1]))
979990
max_depth = sorted_extremeties_tuple[0][1]
980991

992+
# If we're approaching an extremity we trigger a backfill, otherwise we
993+
# no-op.
994+
#
995+
# We chose twice the limit here as then clients paginating backwards
996+
# will send pagination requests that trigger backfill at least twice
997+
# using the most recent extremity before it gets removed (see below). We
998+
# chose more than one times the limit in case of failure, but choosing a
999+
# much larger factor will result in triggering a backfill request much
1000+
# earlier than necessary.
1001+
if current_depth - 2 * limit > max_depth:
1002+
logger.debug(
1003+
"Not backfilling as we don't need to. %d < %d - 2 * %d",
1004+
max_depth,
1005+
current_depth,
1006+
limit,
1007+
)
1008+
return False
1009+
1010+
logger.debug(
1011+
"room_id: %s, backfill: current_depth: %s, max_depth: %s, extrems: %s",
1012+
room_id,
1013+
current_depth,
1014+
max_depth,
1015+
sorted_extremeties_tuple,
1016+
)
1017+
1018+
# We ignore extremities that have a greater depth than our current depth
1019+
# as:
1020+
# 1. we don't really care about getting events that have happened
1021+
# before our current position; and
1022+
# 2. we have likely previously tried and failed to backfill from that
1023+
# extremity, so to avoid getting "stuck" requesting the same
1024+
# backfill repeatedly we drop those extremities.
1025+
filtered_sorted_extremeties_tuple = [
1026+
t for t in sorted_extremeties_tuple if int(t[1]) <= current_depth
1027+
]
1028+
1029+
# However, we need to check that the filtered extremities are non-empty.
1030+
# If they are empty then either we can a) bail or b) still attempt to
1031+
# backill. We opt to try backfilling anyway just in case we do get
1032+
# relevant events.
1033+
if filtered_sorted_extremeties_tuple:
1034+
sorted_extremeties_tuple = filtered_sorted_extremeties_tuple
1035+
9811036
# We don't want to specify too many extremities as it causes the backfill
9821037
# request URI to be too long.
9831038
extremities = dict(sorted_extremeties_tuple[:5])
9841039

985-
if current_depth > max_depth:
986-
logger.debug(
987-
"Not backfilling as we don't need to. %d < %d", max_depth, current_depth
988-
)
989-
return
990-
9911040
# Now we need to decide which hosts to hit first.
9921041

9931042
# First we try hosts that are already in the room

synapse/handlers/pagination.py

+4-4
Original file line numberDiff line numberDiff line change
@@ -358,9 +358,9 @@ async def get_messages(
358358
# if we're going backwards, we might need to backfill. This
359359
# requires that we have a topo token.
360360
if room_token.topological:
361-
max_topo = room_token.topological
361+
curr_topo = room_token.topological
362362
else:
363-
max_topo = await self.store.get_max_topological_token(
363+
curr_topo = await self.store.get_current_topological_token(
364364
room_id, room_token.stream
365365
)
366366

@@ -379,13 +379,13 @@ async def get_messages(
379379
leave_token = RoomStreamToken.parse(leave_token_str)
380380
assert leave_token.topological is not None
381381

382-
if leave_token.topological < max_topo:
382+
if leave_token.topological < curr_topo:
383383
from_token = from_token.copy_and_replace(
384384
"room_key", leave_token
385385
)
386386

387387
await self.hs.get_handlers().federation_handler.maybe_backfill(
388-
room_id, max_topo
388+
room_id, curr_topo, limit=source_config.limit,
389389
)
390390

391391
to_room_key = None

synapse/rest/client/versions.py

+19
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,7 @@
1919
import logging
2020
import re
2121

22+
from synapse.api.constants import RoomCreationPreset
2223
from synapse.http.servlet import RestServlet
2324

2425
logger = logging.getLogger(__name__)
@@ -31,6 +32,20 @@ def __init__(self, hs):
3132
super().__init__()
3233
self.config = hs.config
3334

35+
# Calculate these once since they shouldn't change after start-up.
36+
self.e2ee_forced_public = (
37+
RoomCreationPreset.PUBLIC_CHAT
38+
in self.config.encryption_enabled_by_default_for_room_presets
39+
)
40+
self.e2ee_forced_private = (
41+
RoomCreationPreset.PRIVATE_CHAT
42+
in self.config.encryption_enabled_by_default_for_room_presets
43+
)
44+
self.e2ee_forced_trusted_private = (
45+
RoomCreationPreset.TRUSTED_PRIVATE_CHAT
46+
in self.config.encryption_enabled_by_default_for_room_presets
47+
)
48+
3449
def on_GET(self, request):
3550
return (
3651
200,
@@ -62,6 +77,10 @@ def on_GET(self, request):
6277
"org.matrix.msc2432": True,
6378
# Implements additional endpoints as described in MSC2666
6479
"uk.half-shot.msc2666": True,
80+
# Whether new rooms will be set to encrypted or not (based on presets).
81+
"io.element.e2ee_forced.public": self.e2ee_forced_public,
82+
"io.element.e2ee_forced.private": self.e2ee_forced_private,
83+
"io.element.e2ee_forced.trusted_private": self.e2ee_forced_trusted_private,
6584
},
6685
},
6786
)

synapse/storage/databases/main/stream.py

+5-8
Original file line numberDiff line numberDiff line change
@@ -640,23 +640,20 @@ async def get_topological_token_for_event(self, event_id: str) -> str:
640640
)
641641
return "t%d-%d" % (row["topological_ordering"], row["stream_ordering"])
642642

643-
async def get_max_topological_token(self, room_id: str, stream_key: int) -> int:
644-
"""Get the max topological token in a room before the given stream
643+
async def get_current_topological_token(self, room_id: str, stream_key: int) -> int:
644+
"""Gets the topological token in a room after or at the given stream
645645
ordering.
646646
647647
Args:
648648
room_id
649649
stream_key
650-
651-
Returns:
652-
The maximum topological token.
653650
"""
654651
sql = (
655-
"SELECT coalesce(max(topological_ordering), 0) FROM events"
656-
" WHERE room_id = ? AND stream_ordering < ?"
652+
"SELECT coalesce(MIN(topological_ordering), 0) FROM events"
653+
" WHERE room_id = ? AND stream_ordering >= ?"
657654
)
658655
row = await self.db_pool.execute(
659-
"get_max_topological_token", None, sql, room_id, stream_key
656+
"get_current_topological_token", None, sql, room_id, stream_key
660657
)
661658
return row[0][0] if row else 0
662659

0 commit comments

Comments
 (0)