mirror of
https://github.com/element-hq/synapse.git
synced 2025-03-14 09:45:51 +00:00
Merge branch 'release-v1.124' into develop
This commit is contained in:
commit
77261301d2
6 changed files with 61 additions and 18 deletions
|
@ -1,3 +1,12 @@
|
|||
# Synapse 1.124.0rc3 (2025-02-07)
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- Fix regression in performance of sending events due to superfluous reads and locks. Introduced in v1.124.0rc1. ([\#18141](https://github.com/element-hq/synapse/issues/18141))
|
||||
|
||||
|
||||
|
||||
|
||||
# Synapse 1.124.0rc2 (2025-02-05)
|
||||
|
||||
### Bugfixes
|
||||
|
|
6
debian/changelog
vendored
6
debian/changelog
vendored
|
@ -1,3 +1,9 @@
|
|||
matrix-synapse-py3 (1.124.0~rc3) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.124.0rc3.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Fri, 07 Feb 2025 13:42:55 +0000
|
||||
|
||||
matrix-synapse-py3 (1.124.0~rc2) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.124.0rc2.
|
||||
|
|
|
@ -97,7 +97,7 @@ module-name = "synapse.synapse_rust"
|
|||
|
||||
[tool.poetry]
|
||||
name = "matrix-synapse"
|
||||
version = "1.124.0rc2"
|
||||
version = "1.124.0rc3"
|
||||
description = "Homeserver for the Matrix decentralised comms protocol"
|
||||
authors = ["Matrix.org Team and Contributors <packages@matrix.org>"]
|
||||
license = "AGPL-3.0-or-later"
|
||||
|
|
|
@ -359,6 +359,28 @@ class StateHandler:
|
|||
await_full_state=False,
|
||||
)
|
||||
|
||||
# Ensure we still have the state groups we're relying on, and bump
|
||||
# their usage time to avoid them being deleted from under us.
|
||||
if entry.state_group:
|
||||
missing_state_group = await self._state_deletion_store.check_state_groups_and_bump_deletion(
|
||||
{entry.state_group}
|
||||
)
|
||||
if missing_state_group:
|
||||
raise Exception(f"Missing state group: {entry.state_group}")
|
||||
elif entry.prev_group:
|
||||
# We only rely on the prev group when persisting the event if we
|
||||
# don't have an `entry.state_group`.
|
||||
missing_state_group = await self._state_deletion_store.check_state_groups_and_bump_deletion(
|
||||
{entry.prev_group}
|
||||
)
|
||||
|
||||
if missing_state_group:
|
||||
# If we're missing the prev group then we can just clear the
|
||||
# entries, and rely on `entry._state` (which must exist if
|
||||
# `entry.state_group` is None)
|
||||
entry.prev_group = None
|
||||
entry.delta_ids = None
|
||||
|
||||
state_group_before_event_prev_group = entry.prev_group
|
||||
deltas_to_state_group_before_event = entry.delta_ids
|
||||
state_ids_before_event = None
|
||||
|
@ -519,16 +541,6 @@ class StateHandler:
|
|||
state_group_id
|
||||
)
|
||||
|
||||
if prev_group:
|
||||
# Ensure that we still have the prev group, and ensure we don't
|
||||
# delete it while we're persisting the event.
|
||||
missing_state_group = await self._state_deletion_store.check_state_groups_and_bump_deletion(
|
||||
{prev_group}
|
||||
)
|
||||
if missing_state_group:
|
||||
prev_group = None
|
||||
delta_ids = None
|
||||
|
||||
return _StateCacheEntry(
|
||||
state=None,
|
||||
state_group=state_group_id,
|
||||
|
|
|
@ -123,12 +123,28 @@ class StateDeletionDataStore:
|
|||
"check_state_groups_and_bump_deletion",
|
||||
self._check_state_groups_and_bump_deletion_txn,
|
||||
state_groups,
|
||||
# We don't need to lock if we're just doing a quick check, as the
|
||||
# lock doesn't prevent any races here.
|
||||
lock=False,
|
||||
)
|
||||
|
||||
def _check_state_groups_and_bump_deletion_txn(
|
||||
self, txn: LoggingTransaction, state_groups: AbstractSet[int]
|
||||
self, txn: LoggingTransaction, state_groups: AbstractSet[int], lock: bool = True
|
||||
) -> Collection[int]:
|
||||
existing_state_groups = self._get_existing_groups_with_lock(txn, state_groups)
|
||||
"""Checks to make sure that the state groups haven't been deleted, and
|
||||
if they're pending deletion we delay it (allowing time for any event
|
||||
that will use them to finish persisting).
|
||||
|
||||
The `lock` flag sets if we should lock the `state_group` rows we're
|
||||
checking, which we should do when storing new groups.
|
||||
|
||||
Returns:
|
||||
The state groups that are missing, if any.
|
||||
"""
|
||||
|
||||
existing_state_groups = self._get_existing_groups_with_lock(
|
||||
txn, state_groups, lock=lock
|
||||
)
|
||||
|
||||
self._bump_deletion_txn(txn, existing_state_groups)
|
||||
|
||||
|
@ -188,18 +204,18 @@ class StateDeletionDataStore:
|
|||
)
|
||||
|
||||
def _get_existing_groups_with_lock(
|
||||
self, txn: LoggingTransaction, state_groups: Collection[int]
|
||||
self, txn: LoggingTransaction, state_groups: Collection[int], lock: bool = True
|
||||
) -> AbstractSet[int]:
|
||||
"""Return which of the given state groups are in the database, and locks
|
||||
those rows with `KEY SHARE` to ensure they don't get concurrently
|
||||
deleted."""
|
||||
deleted (if `lock` is true)."""
|
||||
clause, args = make_in_list_sql_clause(self.db_pool.engine, "id", state_groups)
|
||||
|
||||
sql = f"""
|
||||
SELECT id FROM state_groups
|
||||
WHERE {clause}
|
||||
"""
|
||||
if isinstance(self.db_pool.engine, PostgresEngine):
|
||||
if lock and isinstance(self.db_pool.engine, PostgresEngine):
|
||||
# On postgres we add a row level lock to the rows to ensure that we
|
||||
# conflict with any concurrent DELETEs. `FOR KEY SHARE` lock will
|
||||
# not conflict with other read
|
||||
|
|
|
@ -742,7 +742,7 @@ class RoomsCreateTestCase(RoomBase):
|
|||
self.assertEqual(HTTPStatus.OK, channel.code, channel.result)
|
||||
self.assertTrue("room_id" in channel.json_body)
|
||||
assert channel.resource_usage is not None
|
||||
self.assertEqual(36, channel.resource_usage.db_txn_count)
|
||||
self.assertEqual(35, channel.resource_usage.db_txn_count)
|
||||
|
||||
def test_post_room_initial_state(self) -> None:
|
||||
# POST with initial_state config key, expect new room id
|
||||
|
@ -755,7 +755,7 @@ class RoomsCreateTestCase(RoomBase):
|
|||
self.assertEqual(HTTPStatus.OK, channel.code, channel.result)
|
||||
self.assertTrue("room_id" in channel.json_body)
|
||||
assert channel.resource_usage is not None
|
||||
self.assertEqual(38, channel.resource_usage.db_txn_count)
|
||||
self.assertEqual(37, channel.resource_usage.db_txn_count)
|
||||
|
||||
def test_post_room_visibility_key(self) -> None:
|
||||
# POST with visibility config key, expect new room id
|
||||
|
|
Loading…
Add table
Reference in a new issue