This commit is contained in:
Andrew Ferrazzutti 2025-03-13 20:55:07 +01:00 committed by GitHub
commit 18b455d7f5
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
12 changed files with 191 additions and 91 deletions

1
changelog.d/18208.docker Normal file
View file

@ -0,0 +1 @@
Use a [`distroless`](https://github.com/GoogleContainerTools/distroless) base runtime image. This is a breaking change to downstream images that build on the Synapse image.

View file

@ -21,6 +21,7 @@
# in `poetry export` in the past.
ARG DEBIAN_VERSION=bookworm
ARG DEBIAN_VERSION_NUMERIC=12
ARG PYTHON_VERSION=3.12
ARG POETRY_VERSION=1.8.3
@ -134,9 +135,7 @@ RUN \
--mount=type=cache,target=/var/cache/apt,sharing=locked \
--mount=type=cache,target=/var/lib/apt,sharing=locked \
apt-get update -qq && \
apt-get install -y --no-install-recommends rsync && \
apt-cache depends --recurse --no-recommends --no-suggests --no-conflicts --no-breaks --no-replaces --no-enhances --no-pre-depends \
curl \
gosu \
libjpeg62-turbo \
libpq5 \
@ -144,18 +143,28 @@ RUN \
xmlsec1 \
libjemalloc2 \
libicu \
# for health checks (slimmer than curl)
wget \
# dependencies of debian's python3.12-minimal
zlib1g \
libexpat1 \
# libraries used by python's lib-dynload
libbz2-1.0 \
libcrypt1 \
libgdbm6 \
libncursesw6 \
libnsl2 \
libreadline8 \
libsqlite3-0 \
libuuid1 \
| grep '^\w' > /tmp/pkg-list && \
for arch in arm64 amd64; do \
mkdir -p /tmp/debs-${arch} && \
cd /tmp/debs-${arch} && \
apt-get download $(sed "s/$/:${arch}/" /tmp/pkg-list); \
apt-get -o APT::Architecture="${arch}" download $(cat /tmp/pkg-list); \
done
# Extract the debs for each architecture
# On the runtime image, /lib is a symlink to /usr/lib, so we need to copy the
# libraries to the right place, else the `COPY` won't work.
# On amd64, we'll also have a /lib64 folder with ld-linux-x86-64.so.2, which is
# already present in the runtime image.
RUN \
for arch in arm64 amd64; do \
mkdir -p /install-${arch}/var/lib/dpkg/status.d/ && \
@ -165,16 +174,28 @@ RUN \
dpkg --ctrl-tarfile $deb | tar -Ox ./control > /install-${arch}/var/lib/dpkg/status.d/${package_name}; \
dpkg --extract $deb /install-${arch}; \
done; \
rsync -avr /install-${arch}/lib/ /install-${arch}/usr/lib; \
rm -rf /install-${arch}/lib /install-${arch}/lib64; \
done
###
### Stage 3: runtime
### Stage 3: python
###
FROM docker.io/library/python:${PYTHON_VERSION}-slim-${DEBIAN_VERSION}
FROM docker.io/library/python:${PYTHON_VERSION}-slim-${DEBIAN_VERSION} AS python
# Make a directory with just the Python binaries & symlinks to be copied into the final image
RUN \
PYBIN=/usr/local/pybin && \
mkdir ${PYBIN} && \
cp -a /usr/local/bin/python* ${PYBIN}/ && \
rm ${PYBIN}/python*-config
###
### Stage 4: runtime
###
FROM gcr.io/distroless/base-debian${DEBIAN_VERSION_NUMERIC}
ARG TARGETARCH
@ -184,7 +205,12 @@ LABEL org.opencontainers.image.source='https://github.com/element-hq/synapse.git
LABEL org.opencontainers.image.licenses='AGPL-3.0-or-later'
COPY --from=runtime-deps /install-${TARGETARCH} /
# Needed for start.py's config generation scripts
COPY --from=runtime-deps /usr/bin/chown /usr/bin/chown
COPY --from=builder /install /usr/local
# Copy the entire directory to preserve symlinks
COPY --from=python /usr/local/pybin /usr/local/bin
COPY --from=python /usr/local/lib /usr/local/lib
COPY ./docker/start.py /start.py
COPY ./docker/conf /conf
@ -192,5 +218,6 @@ EXPOSE 8008/tcp 8009/tcp 8448/tcp
ENTRYPOINT ["/start.py"]
COPY ./docker/healthcheck.py /healthcheck.py
HEALTHCHECK --start-period=5s --interval=15s --timeout=5s \
CMD curl -fSs http://localhost:8008/health || exit 1
CMD ["/healthcheck.py"]

View file

@ -2,50 +2,58 @@
ARG SYNAPSE_VERSION=latest
ARG FROM=matrixdotorg/synapse:$SYNAPSE_VERSION
ARG DEBIAN_VERSION=bookworm
ARG PYTHON_VERSION=3.12
# first of all, we create a base image with an nginx which we can copy into the
# target image. For repeated rebuilds, this is much faster than apt installing
# each time.
FROM docker.io/library/debian:bookworm-slim AS deps_base
FROM ghcr.io/astral-sh/uv:python${PYTHON_VERSION}-${DEBIAN_VERSION} AS deps_base
# This silences a warning as uv isn't able to do hardlinks between its cache
# (mounted as --mount=type=cache) and the target directory.
ENV UV_LINK_MODE=copy
RUN \
--mount=type=cache,target=/var/cache/apt,sharing=locked \
--mount=type=cache,target=/var/lib/apt,sharing=locked \
apt-get update -qq && \
DEBIAN_FRONTEND=noninteractive apt-get install -yqq --no-install-recommends \
redis-server nginx-light
apt-cache depends --recurse --no-recommends --no-suggests --no-conflicts --no-breaks --no-replaces --no-enhances --no-pre-depends \
redis-server \
nginx-light \
mawk \
| grep '^\w' > /tmp/pkg-list && \
mkdir -p /tmp/debs && \
cd /tmp/debs && \
apt-get download $(cat /tmp/pkg-list)
# Similarly, a base to copy the redis server from.
#
# The redis docker image has fewer dynamic libraries than the debian package,
# which makes it much easier to copy (but we need to make sure we use an image
# based on the same debian version as the synapse image, to make sure we get
# the expected version of libc.
FROM docker.io/library/redis:7-bookworm AS redis_base
RUN \
mkdir -p /install/var/lib/dpkg/status.d/ && \
for deb in /tmp/debs/*.deb; do \
package_name=$(dpkg-deb -I ${deb} | awk '/^ Package: .*$/ {print $2}'); \
echo "Extracting: ${package_name}"; \
dpkg --ctrl-tarfile $deb | tar -Ox ./control > /install/var/lib/dpkg/status.d/${package_name}; \
dpkg --extract $deb /install; \
done;
RUN --mount=type=cache,target=/root/.cache/uv \
uv pip install --prefix="/install/usr/local" supervisor~=4.2
# now build the final image, based on the the regular Synapse docker image
FROM $FROM
# Install supervisord with pip instead of apt, to avoid installing a second
# copy of python.
RUN --mount=type=cache,target=/root/.cache/pip \
pip install supervisor~=4.2
RUN mkdir -p /etc/supervisor/conf.d
# Copy over redis and nginx
COPY --from=redis_base /usr/local/bin/redis-server /usr/local/bin
COPY --from=deps_base /usr/sbin/nginx /usr/sbin
COPY --from=deps_base /usr/share/nginx /usr/share/nginx
COPY --from=deps_base /usr/lib/nginx /usr/lib/nginx
COPY --from=deps_base /etc/nginx /etc/nginx
RUN rm /etc/nginx/sites-enabled/default
RUN mkdir /var/log/nginx /var/lib/nginx
RUN chown www-data /var/lib/nginx
# Copy over redis, nginx and supervisor
COPY --from=deps_base /install /
RUN \
--mount=type=bind,from=deps_base,source=/bin,target=/bin \
--mount=type=bind,from=deps_base,source=/sbin,target=/sbin \
mkdir -p /etc/supervisor/conf.d && \
useradd --system --user-group --uid 33 --home-dir /var/www --shell /usr/sbin/nologin www-data && \
chown www-data /var/lib/nginx && \
# have nginx log to stderr/out
RUN ln -sf /dev/stdout /var/log/nginx/access.log
RUN ln -sf /dev/stderr /var/log/nginx/error.log
ln -sf /dev/stdout /var/log/nginx/access.log && \
ln -sf /dev/stderr /var/log/nginx/error.log
# Copy Synapse worker, nginx and supervisord configuration template files
COPY ./docker/conf-workers/* /conf/
@ -63,5 +71,6 @@ FROM $FROM
# Replace the healthcheck with one which checks *all* the workers. The script
# is generated by configure_workers_and_start.py.
RUN ["python", "-c", "import os; os.unlink('/healthcheck.py')"]
HEALTHCHECK --start-period=5s --interval=15s --timeout=5s \
CMD /bin/sh /healthcheck.sh
CMD ["/healthcheck.py"]

View file

@ -9,20 +9,28 @@
ARG SYNAPSE_VERSION=latest
# This is an intermediate image, to be built locally (not pulled from a registry).
ARG FROM=matrixdotorg/synapse-workers:$SYNAPSE_VERSION
ARG DEBIAN_VERSION=bookworm
FROM docker.io/library/postgres:13-${DEBIAN_VERSION} AS postgres
FROM $FROM
# First of all, we copy postgres server from the official postgres image,
# since for repeated rebuilds, this is much faster than apt installing
# postgres each time.
# This trick only works because (a) the Synapse image happens to have all the
# shared libraries that postgres wants, (b) we use a postgres image based on
# the same debian version as Synapse's docker image (so the versions of the
# shared libraries match).
RUN adduser --system --uid 999 postgres --home /var/lib/postgresql
COPY --from=docker.io/library/postgres:13-bookworm /usr/lib/postgresql /usr/lib/postgresql
COPY --from=docker.io/library/postgres:13-bookworm /usr/share/postgresql /usr/share/postgresql
RUN mkdir /var/run/postgresql && chown postgres /var/run/postgresql
# This trick only works because we use a postgres image based on the same debian
# version as Synapse's docker image (so the versions of the shared libraries
# match).
# sh is needed by initdb & start_for_complement.sh. Copy it now to get access to shell form instructions
COPY --from=postgres /bin/sh /bin/sh
# Choose useradd over adduser since the latter requires perl
RUN --mount=type=bind,from=postgres,source=/sbin/useradd,target=/sbin/useradd \
useradd --system --user-group --uid 999 --home-dir /var/lib/postgresql --shell /usr/sbin/nologin postgres
COPY --from=postgres /usr/lib/postgresql /usr/lib/postgresql
COPY --from=postgres /usr/share/postgresql /usr/share/postgresql
COPY --from=postgres --chown=postgres /var/lib/postgresql /var/lib/postgresql
COPY --from=postgres --chown=postgres /var/run/postgresql /var/run/postgresql
ENV PATH="${PATH}:/usr/lib/postgresql/13/bin"
ENV PGDATA=/var/lib/postgresql/data
@ -38,7 +46,7 @@ RUN echo "CREATE DATABASE synapse" | gosu postgres postgres --single
# tweaks to get Synapse ready for testing.
# To do this, we copy the old template out of the way and then include it
# with Jinja2.
RUN mv /conf/shared.yaml.j2 /conf/shared-orig.yaml.j2
RUN ["python", "-c", "import os; os.rename('/conf/shared.yaml.j2', '/conf/shared-orig.yaml.j2')"]
COPY conf/workers-shared-extra.yaml.j2 /conf/shared.yaml.j2
WORKDIR /data
@ -47,6 +55,9 @@ COPY conf/postgres.supervisord.conf /etc/supervisor/conf.d/postgres.conf
# Copy the entrypoint
COPY conf/start_for_complement.sh /
# Copy utilities used by the entrypoint
COPY --from=postgres /bin/openssl /bin/date /bin/
COPY --from=postgres /etc/ssl /etc/ssl
# Expose nginx's listener ports
EXPOSE 8008 8448
@ -55,4 +66,4 @@ ENTRYPOINT ["/start_for_complement.sh"]
# Update the healthcheck to have a shorter check interval
HEALTHCHECK --start-period=5s --interval=1s --timeout=1s \
CMD /bin/sh /healthcheck.sh
CMD ["/healthcheck.py"]

View file

@ -1,16 +1,16 @@
#!/bin/bash
#!/bin/sh
#
# Default ENTRYPOINT for the docker image used for testing synapse with workers under complement
set -e
echo "Complement Synapse launcher"
echo " Args: $@"
echo " Args: $*"
echo " Env: SYNAPSE_COMPLEMENT_DATABASE=$SYNAPSE_COMPLEMENT_DATABASE SYNAPSE_COMPLEMENT_USE_WORKERS=$SYNAPSE_COMPLEMENT_USE_WORKERS SYNAPSE_COMPLEMENT_USE_ASYNCIO_REACTOR=$SYNAPSE_COMPLEMENT_USE_ASYNCIO_REACTOR"
function log {
log () {
d=$(date +"%Y-%m-%d %H:%M:%S,%3N")
echo "$d $@"
echo "$d $*"
}
# Set the server name of the homeserver
@ -44,13 +44,13 @@ case "$SYNAPSE_COMPLEMENT_DATABASE" in
esac
if [[ -n "$SYNAPSE_COMPLEMENT_USE_WORKERS" ]]; then
if [ -n "$SYNAPSE_COMPLEMENT_USE_WORKERS" ]; then
# Specify the workers to test with
# Allow overriding by explicitly setting SYNAPSE_WORKER_TYPES outside, while still
# utilizing WORKERS=1 for backwards compatibility.
# -n True if the length of string is non-zero.
# -z True if the length of string is zero.
if [[ -z "$SYNAPSE_WORKER_TYPES" ]]; then
if [ -z "$SYNAPSE_WORKER_TYPES" ]; then
export SYNAPSE_WORKER_TYPES="\
event_persister:2, \
background_worker, \
@ -82,8 +82,8 @@ else
fi
if [[ -n "$SYNAPSE_COMPLEMENT_USE_ASYNCIO_REACTOR" ]]; then
if [[ -n "$SYNAPSE_USE_EXPERIMENTAL_FORKING_LAUNCHER" ]]; then
if [ -n "$SYNAPSE_COMPLEMENT_USE_ASYNCIO_REACTOR" ]; then
if [ -n "$SYNAPSE_USE_EXPERIMENTAL_FORKING_LAUNCHER" ]; then
export SYNAPSE_COMPLEMENT_FORKING_LAUNCHER_ASYNC_IO_REACTOR="1"
else
export SYNAPSE_ASYNC_IO_REACTOR="1"
@ -103,12 +103,11 @@ fi
# Note that both the key and certificate are in PEM format (not DER).
# First generate a configuration file to set up a Subject Alternative Name.
cat > /conf/server.tls.conf <<EOF
echo "\
.include /etc/ssl/openssl.cnf
[SAN]
subjectAltName=DNS:${SERVER_NAME}
EOF
subjectAltName=DNS:${SERVER_NAME}" > /conf/server.tls.conf
# Generate an RSA key
openssl genrsa -out /conf/server.tls.key 2048
@ -123,12 +122,14 @@ openssl x509 -req -in /conf/server.tls.csr \
-out /conf/server.tls.crt -extfile /conf/server.tls.conf -extensions SAN
# Assert that we have a Subject Alternative Name in the certificate.
# (grep will exit with 1 here if there isn't a SAN in the certificate.)
openssl x509 -in /conf/server.tls.crt -noout -text | grep DNS:
case $(openssl x509 -in /conf/server.tls.crt -noout -text) in
*DNS:*) ;;
*) exit 1
esac
export SYNAPSE_TLS_CERT=/conf/server.tls.crt
export SYNAPSE_TLS_KEY=/conf/server.tls.key
# Run the script that writes the necessary config files and starts supervisord, which in turn
# starts everything else
exec /configure_workers_and_start.py
exec /configure_workers_and_start.py "$@"

View file

@ -0,0 +1,10 @@
#!/usr/local/bin/python
# This healthcheck script is designed to return OK when every
# host involved returns OK
import subprocess
import sys
try:
for healthcheck_url in {{ healthcheck_urls }}:
subprocess.check_call(["wget", "--quiet", "--tries=1", "--spider", healthcheck_url])
except subprocess.CalledProcessError as e:
sys.exit(1)

View file

@ -1,6 +0,0 @@
#!/bin/sh
# This healthcheck script is designed to return OK when every
# host involved returns OK
{%- for healthcheck_url in healthcheck_urls %}
curl -fSs {{ healthcheck_url }} || exit 1
{%- endfor %}

View file

@ -20,9 +20,9 @@ autorestart=true
[program:redis]
{% if using_unix_sockets %}
command=/usr/local/bin/prefix-log /usr/local/bin/redis-server --unixsocket /tmp/redis.sock
command=/usr/local/bin/prefix-log /usr/bin/redis-server --unixsocket /tmp/redis.sock
{% else %}
command=/usr/local/bin/prefix-log /usr/local/bin/redis-server
command=/usr/local/bin/prefix-log /usr/bin/redis-server
{% endif %}
priority=1
stdout_logfile=/dev/stdout

View file

@ -1,4 +1,4 @@
#!/usr/bin/env python
#!/usr/local/bin/python
#
# This file is licensed under the Affero General Public License (AGPL) version 3.
#
@ -376,9 +376,11 @@ def convert(src: str, dst: str, **template_vars: object) -> None:
#
# We use append mode in case the files have already been written to by something else
# (for instance, as part of the instructions in a dockerfile).
exists = os.path.isfile(dst)
with open(dst, "a") as outfile:
# In case the existing file doesn't end with a newline
outfile.write("\n")
if exists:
outfile.write("\n")
outfile.write(rendered)
@ -602,9 +604,9 @@ def generate_base_homeserver_config() -> None:
Raises: CalledProcessError if calling start.py returned a non-zero exit code.
"""
# start.py already does this for us, so just call that.
# note that this script is copied in in the official, monolith dockerfile
# note that this script is copied in the official, monolith dockerfile
os.environ["SYNAPSE_HTTP_PORT"] = str(MAIN_PROCESS_HTTP_LISTENER_PORT)
subprocess.run(["/usr/local/bin/python", "/start.py", "migrate_config"], check=True)
subprocess.run([sys.executable, "/start.py", "migrate_config"], check=True)
def parse_worker_types(
@ -994,10 +996,11 @@ def generate_worker_files(
# healthcheck config
convert(
"/conf/healthcheck.sh.j2",
"/healthcheck.sh",
"/conf/healthcheck.py.j2",
"/healthcheck.py",
healthcheck_urls=healthcheck_urls,
)
os.chmod("/healthcheck.py", 0o755)
# Ensure the logging directory exists
log_dir = data_dir + "/logs"

10
docker/healthcheck.py Executable file
View file

@ -0,0 +1,10 @@
#!/usr/local/bin/python
import subprocess
import sys
try:
subprocess.check_call(
["wget", "--quiet", "--tries=1", "--spider", "http://localhost:8008/health"]
)
except subprocess.CalledProcessError:
sys.exit(1)

View file

@ -1,4 +1,4 @@
#!/bin/bash
#!/usr/local/bin/python
#
# Prefixes all lines on stdout and stderr with the process name (as determined by
# the SUPERVISOR_PROCESS_NAME env var, which is automatically set by Supervisor).
@ -6,10 +6,30 @@
# Usage:
# prefix-log command [args...]
#
import os
import sys
# '-W interactive' is a `mawk` extension which disables buffering on stdout and sets line-buffered reads on
# stdin. The effect is that the output is flushed after each line, rather than being batched, which helps reduce
# confusion due to to interleaving of the different processes.
exec 1> >(awk -W interactive '{print "'"${SUPERVISOR_PROCESS_NAME}"' | "$0 }' >&1)
exec 2> >(awk -W interactive '{print "'"${SUPERVISOR_PROCESS_NAME}"' | "$0 }' >&2)
exec "$@"
prefixer = ['mawk', '-W', 'interactive', f'{{print "{os.environ['SUPERVISOR_PROCESS_NAME']} | "$0; fflush() }}']
r_out, w_out = os.pipe()
if os.fork() == 0:
os.close(w_out)
os.dup2(r_out, sys.stdin.fileno())
os.execvp(prefixer[0], prefixer)
os.close(r_out)
r_err, w_err = os.pipe()
if os.fork() == 0:
os.close(w_out)
os.close(w_err)
os.dup2(r_err, sys.stdin.fileno())
os.dup2(sys.stderr.fileno(), sys.stdout.fileno())
os.execvp(prefixer[0], prefixer)
os.close(r_err)
os.dup2(w_out, sys.stdout.fileno())
os.dup2(w_err, sys.stderr.fileno())
os.execvp(sys.argv[1], sys.argv[1:])

View file

@ -181,24 +181,38 @@ if [ -z "$skip_docker_build" ]; then
# Build the base Synapse image from the local checkout
echo_if_github "::group::Build Docker image: matrixdotorg/synapse"
$CONTAINER_RUNTIME build -t matrixdotorg/synapse \
--build-arg TEST_ONLY_SKIP_DEP_HASH_VERIFICATION \
--build-arg TEST_ONLY_IGNORE_POETRY_LOCKFILE \
-f "docker/Dockerfile" .
# Tag local builds with a dummy registry so that later builds
# may reference them instead of pulling from a remote registry
LOCAL_REGISTRY=localhost:5000
SYNAPSE_TAG=matrixdotorg/synapse
$CONTAINER_RUNTIME build \
-t "$SYNAPSE_TAG" \
-t "$LOCAL_REGISTRY/$SYNAPSE_TAG" \
--build-arg TEST_ONLY_SKIP_DEP_HASH_VERIFICATION \
--build-arg TEST_ONLY_IGNORE_POETRY_LOCKFILE \
-f "docker/Dockerfile" .
echo_if_github "::endgroup::"
# Build the workers docker image (from the base Synapse image we just built).
SYNAPSE_WORKERS_TAG=matrixdotorg/synapse-workers
echo_if_github "::group::Build Docker image: matrixdotorg/synapse-workers"
$CONTAINER_RUNTIME build -t matrixdotorg/synapse-workers -f "docker/Dockerfile-workers" .
$CONTAINER_RUNTIME build \
-t "$SYNAPSE_WORKERS_TAG" \
--build-arg FROM="$LOCAL_REGISTRY/$SYNAPSE_TAG" \
-f "docker/Dockerfile-workers" .
echo_if_github "::endgroup::"
# Build the unified Complement image (from the worker Synapse image we just built).
COMPLEMENT_SYNAPSE_TAG=complement-synapse
echo_if_github "::group::Build Docker image: complement/Dockerfile"
$CONTAINER_RUNTIME build -t complement-synapse \
$CONTAINER_RUNTIME build \
-t "$COMPLEMENT_SYNAPSE_TAG" \
`# This is the tag we end up pushing to the registry (see` \
`# .github/workflows/push_complement_image.yml) so let's just label it now` \
`# so people can reference it by the same name locally.` \
-t ghcr.io/element-hq/synapse/complement-synapse \
-t "ghcr.io/element-hq/synapse/$COMPLEMENT_SYNAPSE_TAG" \
-f "docker/complement/Dockerfile" "docker/complement"
echo_if_github "::endgroup::"