mirror of
https://github.com/element-hq/dendrite.git
synced 2025-03-14 14:15:35 +00:00
Merge branch 'main' into dependabot/bundler/docs/rexml-3.3.9
This commit is contained in:
commit
2f9f371848
71 changed files with 1589 additions and 555 deletions
2
.github/PULL_REQUEST_TEMPLATE.md
vendored
2
.github/PULL_REQUEST_TEMPLATE.md
vendored
|
@ -3,6 +3,6 @@
|
|||
<!-- Please read https://matrix-org.github.io/dendrite/development/contributing before submitting your pull request -->
|
||||
|
||||
* [ ] I have added Go unit tests or [Complement integration tests](https://github.com/matrix-org/complement) for this PR _or_ I have justified why this PR doesn't need tests
|
||||
* [ ] Pull request includes a [sign off below using a legally identifiable name](https://matrix-org.github.io/dendrite/development/contributing#sign-off) _or_ I have already signed off privately
|
||||
* [ ] Pull request includes a [sign off below](https://element-hq.github.io/dendrite/development/contributing#sign-off) _or_ I have already signed off privately
|
||||
|
||||
Signed-off-by: `Your Name <your@email.example.org>`
|
||||
|
|
13
.github/dependabot.yaml
vendored
Normal file
13
.github/dependabot.yaml
vendored
Normal file
|
@ -0,0 +1,13 @@
|
|||
version: 2
|
||||
updates:
|
||||
- package-ecosystem: gomod
|
||||
directory: /
|
||||
schedule:
|
||||
interval: weekly
|
||||
labels:
|
||||
- "dependencies"
|
||||
- "go"
|
||||
- package-ecosystem: "github-actions"
|
||||
directory: /
|
||||
schedule:
|
||||
interval: weekly
|
30
.github/workflows/dendrite.yml
vendored
30
.github/workflows/dendrite.yml
vendored
|
@ -31,13 +31,13 @@ jobs:
|
|||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v4
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version-file: "go.mod"
|
||||
cache: true
|
||||
|
||||
- name: Install Node
|
||||
uses: actions/setup-node@v2
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: 14
|
||||
|
||||
|
@ -70,11 +70,11 @@ jobs:
|
|||
- name: Install libolm
|
||||
run: sudo apt-get install libolm-dev libolm3
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v4
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version-file: "go.mod"
|
||||
- name: golangci-lint
|
||||
uses: golangci/golangci-lint-action@v3
|
||||
uses: golangci/golangci-lint-action@v6
|
||||
|
||||
# run go test with different go versions
|
||||
test:
|
||||
|
@ -106,7 +106,7 @@ jobs:
|
|||
- name: Install libolm
|
||||
run: sudo apt-get install libolm-dev libolm3
|
||||
- name: Setup go
|
||||
uses: actions/setup-go@v4
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version-file: "go.mod"
|
||||
- uses: actions/cache@v4
|
||||
|
@ -143,7 +143,7 @@ jobs:
|
|||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Setup go
|
||||
uses: actions/setup-go@v4
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version-file: "go.mod"
|
||||
- uses: actions/cache@v4
|
||||
|
@ -176,7 +176,7 @@ jobs:
|
|||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v4
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version-file: "go.mod"
|
||||
- uses: actions/cache@v4
|
||||
|
@ -239,7 +239,7 @@ jobs:
|
|||
- name: Install libolm
|
||||
run: sudo apt-get install libolm-dev libolm3
|
||||
- name: Setup go
|
||||
uses: actions/setup-go@v4
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version-file: "go.mod"
|
||||
- name: Set up gotestfmt
|
||||
|
@ -262,7 +262,7 @@ jobs:
|
|||
POSTGRES_PASSWORD: postgres
|
||||
POSTGRES_DB: dendrite
|
||||
- name: Upload coverage to Codecov
|
||||
uses: codecov/codecov-action@v4
|
||||
uses: codecov/codecov-action@v5
|
||||
with:
|
||||
flags: unittests
|
||||
fail_ci_if_error: true
|
||||
|
@ -277,7 +277,7 @@ jobs:
|
|||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Setup go
|
||||
uses: actions/setup-go@v4
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version-file: "go.mod"
|
||||
cache: true
|
||||
|
@ -294,9 +294,9 @@ jobs:
|
|||
- name: Build upgrade-tests
|
||||
run: go build ./cmd/dendrite-upgrade-tests
|
||||
- name: Test upgrade (PostgreSQL)
|
||||
run: ./dendrite-upgrade-tests --head .
|
||||
run: ./dendrite-upgrade-tests -repository=matrix-org/dendrite --head .
|
||||
- name: Test upgrade (SQLite)
|
||||
run: ./dendrite-upgrade-tests --sqlite --head .
|
||||
run: ./dendrite-upgrade-tests --sqlite -repository=matrix-org/dendrite --head .
|
||||
|
||||
# run database upgrade tests, skipping over one version
|
||||
upgrade_test_direct:
|
||||
|
@ -307,7 +307,7 @@ jobs:
|
|||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Setup go
|
||||
uses: actions/setup-go@v4
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version-file: "go.mod"
|
||||
cache: true
|
||||
|
@ -324,9 +324,9 @@ jobs:
|
|||
- name: Build upgrade-tests
|
||||
run: go build ./cmd/dendrite-upgrade-tests
|
||||
- name: Test upgrade (PostgreSQL)
|
||||
run: ./dendrite-upgrade-tests -direct -from HEAD-2 --head .
|
||||
run: ./dendrite-upgrade-tests -direct -from HEAD-2 -repository=matrix-org/dendrite --head .
|
||||
- name: Test upgrade (SQLite)
|
||||
run: ./dendrite-upgrade-tests -direct -from HEAD-2 --head .
|
||||
run: ./dendrite-upgrade-tests --sqlite -direct -from HEAD-2 -repository=matrix-org/dendrite --head .
|
||||
|
||||
# run Sytest in different variations
|
||||
sytest:
|
||||
|
|
54
.github/workflows/docker.yml
vendored
54
.github/workflows/docker.yml
vendored
|
@ -48,13 +48,27 @@ jobs:
|
|||
username: ${{ github.repository_owner }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
# Build until the "build" stage, this then can be used by other steps.
|
||||
- name: Build "build" image
|
||||
if: github.ref_name == 'main' || github.event_name == 'release'
|
||||
id: docker_build_cache
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
target: build
|
||||
cache-from: type=registry,ref=ghcr.io/${{ env.GHCR_NAMESPACE }}/dendrite-monolith:buildcache
|
||||
cache-to: type=registry,ref=ghcr.io/${{ env.GHCR_NAMESPACE }}/dendrite-monolith:buildcache,mode=max
|
||||
context: .
|
||||
platforms: ${{ env.PLATFORMS }}
|
||||
push: true
|
||||
tags: |
|
||||
ghcr.io/${{ env.GHCR_NAMESPACE }}/dendrite-monolith:binaries
|
||||
|
||||
- name: Build main monolith image
|
||||
if: github.ref_name == 'main'
|
||||
id: docker_build_monolith
|
||||
uses: docker/build-push-action@v3
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
cache-from: type=registry,ref=ghcr.io/${{ env.GHCR_NAMESPACE }}/dendrite-monolith:buildcache
|
||||
cache-to: type=registry,ref=ghcr.io/${{ env.GHCR_NAMESPACE }}/dendrite-monolith:buildcache,mode=max
|
||||
context: .
|
||||
platforms: ${{ env.PLATFORMS }}
|
||||
push: true
|
||||
|
@ -65,10 +79,8 @@ jobs:
|
|||
- name: Build release monolith image
|
||||
if: github.event_name == 'release' # Only for GitHub releases
|
||||
id: docker_build_monolith_release
|
||||
uses: docker/build-push-action@v3
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
context: .
|
||||
platforms: ${{ env.PLATFORMS }}
|
||||
push: true
|
||||
|
@ -86,13 +98,14 @@ jobs:
|
|||
output: "trivy-results.sarif"
|
||||
|
||||
- name: Upload Trivy scan results to GitHub Security tab
|
||||
uses: github/codeql-action/upload-sarif@v2
|
||||
uses: github/codeql-action/upload-sarif@v3
|
||||
with:
|
||||
sarif_file: "trivy-results.sarif"
|
||||
|
||||
demo-pinecone:
|
||||
name: Pinecone demo image
|
||||
runs-on: ubuntu-latest
|
||||
needs: monolith
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
|
@ -122,10 +135,9 @@ jobs:
|
|||
- name: Build main Pinecone demo image
|
||||
if: github.ref_name == 'main'
|
||||
id: docker_build_demo_pinecone
|
||||
uses: docker/build-push-action@v3
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
cache-from: type=registry,ref=ghcr.io/${{ env.GHCR_NAMESPACE }}/dendrite-monolith:buildcache
|
||||
context: .
|
||||
file: ./build/docker/Dockerfile.demo-pinecone
|
||||
platforms: ${{ env.PLATFORMS }}
|
||||
|
@ -137,23 +149,23 @@ jobs:
|
|||
- name: Build release Pinecone demo image
|
||||
if: github.event_name == 'release' # Only for GitHub releases
|
||||
id: docker_build_demo_pinecone_release
|
||||
uses: docker/build-push-action@v3
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
cache-from: type=registry,ref=ghcr.io/${{ env.GHCR_NAMESPACE }}/dendrite-monolith:buildcache
|
||||
context: .
|
||||
file: ./build/docker/Dockerfile.demo-pinecone
|
||||
platforms: ${{ env.PLATFORMS }}
|
||||
push: true
|
||||
tags: |
|
||||
${{ env.DOCKER_NAMESPACE }}/dendrite-demo-yggdrasil:latest
|
||||
${{ env.DOCKER_NAMESPACE }}/dendrite-demo-yggdrasil:${{ env.RELEASE_VERSION }}
|
||||
ghcr.io/${{ env.GHCR_NAMESPACE }}/dendrite-demo-yggdrasil:latest
|
||||
ghcr.io/${{ env.GHCR_NAMESPACE }}/dendrite-demo-yggdrasil:${{ env.RELEASE_VERSION }}
|
||||
${{ env.DOCKER_NAMESPACE }}/dendrite-demo-pinecone:latest
|
||||
${{ env.DOCKER_NAMESPACE }}/dendrite-demo-pinecone:${{ env.RELEASE_VERSION }}
|
||||
ghcr.io/${{ env.GHCR_NAMESPACE }}/dendrite-demo-pinecone:latest
|
||||
ghcr.io/${{ env.GHCR_NAMESPACE }}/dendrite-demo-pinecone:${{ env.RELEASE_VERSION }}
|
||||
|
||||
demo-yggdrasil:
|
||||
name: Yggdrasil demo image
|
||||
runs-on: ubuntu-latest
|
||||
needs: monolith
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
|
@ -183,10 +195,9 @@ jobs:
|
|||
- name: Build main Yggdrasil demo image
|
||||
if: github.ref_name == 'main'
|
||||
id: docker_build_demo_yggdrasil
|
||||
uses: docker/build-push-action@v3
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
cache-from: type=registry,ref=ghcr.io/${{ env.GHCR_NAMESPACE }}/dendrite-monolith:buildcache
|
||||
context: .
|
||||
file: ./build/docker/Dockerfile.demo-yggdrasil
|
||||
platforms: ${{ env.PLATFORMS }}
|
||||
|
@ -198,10 +209,9 @@ jobs:
|
|||
- name: Build release Yggdrasil demo image
|
||||
if: github.event_name == 'release' # Only for GitHub releases
|
||||
id: docker_build_demo_yggdrasil_release
|
||||
uses: docker/build-push-action@v3
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
cache-from: type=registry,ref=ghcr.io/${{ env.GHCR_NAMESPACE }}/dendrite-monolith:buildcache
|
||||
context: .
|
||||
file: ./build/docker/Dockerfile.demo-yggdrasil
|
||||
platforms: ${{ env.PLATFORMS }}
|
||||
|
|
6
.github/workflows/gh-pages.yml
vendored
6
.github/workflows/gh-pages.yml
vendored
|
@ -30,14 +30,14 @@ jobs:
|
|||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
- name: Setup Pages
|
||||
uses: actions/configure-pages@v2
|
||||
uses: actions/configure-pages@v5
|
||||
- name: Build with Jekyll
|
||||
uses: actions/jekyll-build-pages@v1
|
||||
with:
|
||||
source: ./docs
|
||||
destination: ./_site
|
||||
- name: Upload artifact
|
||||
uses: actions/upload-pages-artifact@v1
|
||||
uses: actions/upload-pages-artifact@v3
|
||||
|
||||
# Deployment job
|
||||
deploy:
|
||||
|
@ -49,4 +49,4 @@ jobs:
|
|||
steps:
|
||||
- name: Deploy to GitHub Pages
|
||||
id: deployment
|
||||
uses: actions/deploy-pages@v1
|
||||
uses: actions/deploy-pages@v4
|
||||
|
|
4
.github/workflows/helm.yml
vendored
4
.github/workflows/helm.yml
vendored
|
@ -27,12 +27,12 @@ jobs:
|
|||
git config user.email "$GITHUB_ACTOR@users.noreply.github.com"
|
||||
|
||||
- name: Install Helm
|
||||
uses: azure/setup-helm@v3
|
||||
uses: azure/setup-helm@v4
|
||||
with:
|
||||
version: v3.10.0
|
||||
|
||||
- name: Run chart-releaser
|
||||
uses: helm/chart-releaser-action@v1.6.0
|
||||
uses: helm/chart-releaser-action@v1.7.0
|
||||
env:
|
||||
CR_TOKEN: "${{ secrets.GITHUB_TOKEN }}"
|
||||
with:
|
||||
|
|
12
.github/workflows/k8s.yml
vendored
12
.github/workflows/k8s.yml
vendored
|
@ -20,14 +20,14 @@ jobs:
|
|||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- uses: azure/setup-helm@v3
|
||||
- uses: azure/setup-helm@v4
|
||||
with:
|
||||
version: v3.10.0
|
||||
- uses: actions/setup-python@v4
|
||||
- uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: 3.11
|
||||
check-latest: true
|
||||
- uses: helm/chart-testing-action@v2.3.1
|
||||
- uses: helm/chart-testing-action@v2.7.0
|
||||
- name: Get changed status
|
||||
id: list-changed
|
||||
run: |
|
||||
|
@ -53,16 +53,16 @@ jobs:
|
|||
fetch-depth: 0
|
||||
ref: ${{ inputs.checkoutCommit }}
|
||||
- name: Install Kubernetes tools
|
||||
uses: yokawasa/action-setup-kube-tools@v0.8.2
|
||||
uses: yokawasa/action-setup-kube-tools@v0.11.2
|
||||
with:
|
||||
setup-tools: |
|
||||
helmv3
|
||||
helm: "3.10.3"
|
||||
- uses: actions/setup-python@v4
|
||||
- uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "3.10"
|
||||
- name: Set up chart-testing
|
||||
uses: helm/chart-testing-action@v2.3.1
|
||||
uses: helm/chart-testing-action@v2.7.0
|
||||
- name: Create k3d cluster
|
||||
uses: nolar/setup-k3d-k3s@v1
|
||||
with:
|
||||
|
|
16
.github/workflows/schedules.yaml
vendored
16
.github/workflows/schedules.yaml
vendored
|
@ -98,7 +98,7 @@ jobs:
|
|||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v4
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: 'stable'
|
||||
cache: true
|
||||
|
@ -110,7 +110,7 @@ jobs:
|
|||
grep -Ev 'relayapi|setup/mscs|api_trace' sytest.cov > final.cov
|
||||
go tool covdata func -i="$(find Sytest* -name 'covmeta*' -type f -exec dirname {} \; | uniq | paste -s -d ',' -)"
|
||||
- name: Upload coverage to Codecov
|
||||
uses: codecov/codecov-action@v4
|
||||
uses: codecov/codecov-action@v5
|
||||
with:
|
||||
files: ./final.cov
|
||||
flags: sytest
|
||||
|
@ -222,7 +222,7 @@ jobs:
|
|||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v4
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: 'stable'
|
||||
cache: true
|
||||
|
@ -234,7 +234,7 @@ jobs:
|
|||
grep -Ev 'relayapi|setup/mscs|api_trace' complement.cov > final.cov
|
||||
go tool covdata func -i="$(find Complement* -name 'covmeta*' -type f -exec dirname {} \; | uniq | paste -s -d ',' -)"
|
||||
- name: Upload coverage to Codecov
|
||||
uses: codecov/codecov-action@v4
|
||||
uses: codecov/codecov-action@v5
|
||||
with:
|
||||
files: ./final.cov
|
||||
flags: complement
|
||||
|
@ -254,7 +254,7 @@ jobs:
|
|||
- uses: actions/checkout@v4
|
||||
with:
|
||||
repository: matrix-org/matrix-react-sdk
|
||||
- uses: actions/setup-node@v3
|
||||
- uses: actions/setup-node@v4
|
||||
with:
|
||||
cache: 'yarn'
|
||||
- name: Fetch layered build
|
||||
|
@ -272,7 +272,7 @@ jobs:
|
|||
run: |
|
||||
sed -i '/HOMESERVER/c\ HOMESERVER: "dendrite",' cypress.config.ts
|
||||
- name: "Run cypress tests"
|
||||
uses: cypress-io/github-action@v4.1.1
|
||||
uses: cypress-io/github-action@v6.7.10
|
||||
with:
|
||||
browser: chrome
|
||||
start: npx serve -p 8080 ./element-web/webapp
|
||||
|
@ -294,7 +294,7 @@ jobs:
|
|||
- uses: actions/checkout@v4
|
||||
with:
|
||||
repository: matrix-org/matrix-react-sdk
|
||||
- uses: actions/setup-node@v3
|
||||
- uses: actions/setup-node@v4
|
||||
with:
|
||||
cache: 'yarn'
|
||||
- name: Fetch layered build
|
||||
|
@ -312,7 +312,7 @@ jobs:
|
|||
run: |
|
||||
sed -i '/HOMESERVER/c\ HOMESERVER: "dendritePinecone",' cypress.config.ts
|
||||
- name: "Run cypress tests"
|
||||
uses: cypress-io/github-action@v4.1.1
|
||||
uses: cypress-io/github-action@v6.7.10
|
||||
with:
|
||||
browser: chrome
|
||||
start: npx serve -p 8080 ./element-web/webapp
|
||||
|
|
43
CHANGES.md
43
CHANGES.md
|
@ -1,5 +1,48 @@
|
|||
# Changelog
|
||||
|
||||
## Dendrite 0.14.1 (2025-01-16)
|
||||
|
||||
### ⚠ Important
|
||||
|
||||
This is a security release, [gomatrixserverlib](https://github.com/matrix-org/gomatrixserverlib) was vulnerable to
|
||||
server-side request forgery, serving content from a private network it can access, under certain conditions.
|
||||
|
||||
Upgrading to this version is **highly** recommended.
|
||||
|
||||
### Security
|
||||
|
||||
- Support for blocking access to certain networks, fixing [CVE-2024-52594](https://www.cve.org/CVERecord?id=CVE-2024-52594) and
|
||||
[GHSA-4ff6-858j-r822](https://github.com/matrix-org/gomatrixserverlib/security/advisories/GHSA-4ff6-858j-r822)
|
||||
|
||||
### Fixes
|
||||
|
||||
- Speed-up loading server ACLs on startup, this is mostly noticeable on larger instances with many rooms.
|
||||
|
||||
## Dendrite 0.14.0 (2024-12-18)
|
||||
|
||||
This is the first release after forking matrix-org/dendrite, this repository is now licensed under AGPLv3.0.
|
||||
|
||||
Upgrading to this version is **highly** recommended, as it fixes several long-standing bugs which could lead to state resets.
|
||||
It also improves performance and memory usage.
|
||||
|
||||
### Features
|
||||
|
||||
- The required Go version to build Dendrite is now 1.22
|
||||
- Support for listening and connecting to I2P and Onion services was added (contributed by [eyedeekay](https://github.com/eyedeekay))
|
||||
- Add via parameter on join room requests as per [MSC4156](https://github.com/matrix-org/matrix-spec-proposals/pull/MSC4156) (contributed by [Johennes](https://github.com/Johennes))
|
||||
- Support for fallback keys has been added (contributed by [neilalexander](https://github.com/neilalexander))
|
||||
- Dendrite now supports [MSC4225](https://github.com/matrix-org/matrix-spec-proposals/pull/4225)
|
||||
- Updated dependencies
|
||||
- Internal NATS Server has been updated from v2.10.20 to v2.10.23 (contributed by [neilalexander](https://github.com/neilalexander))
|
||||
- gomatrixserverlib has been updated, which includes several performance improvements
|
||||
|
||||
### Fixes
|
||||
|
||||
- Correctly respond to `OPTIONS` requests on authed media endpoints (contributed by [arenekosreal](https://github.com/arenekosreal))
|
||||
- A long-standing bug which could lead to state resets has been fixed (contributed by [neilalexander](https://github.com/neilalexander))
|
||||
- Note: While state resets should happen less frequently, they are still part of the Matrix protocol, so they are not entirely fixed.
|
||||
- Also, rooms which have been utterly broken may take some time to reconcile, it may be worth to leave, purge and rejoin such rooms.
|
||||
|
||||
## Dendrite 0.13.8 (2024-09-13)
|
||||
|
||||
### Features
|
||||
|
|
|
@ -1,29 +1,18 @@
|
|||
FROM docker.io/golang:1.22-alpine AS base
|
||||
#syntax=docker/dockerfile:1.2
|
||||
|
||||
#
|
||||
# Needs to be separate from the main Dockerfile for OpenShift,
|
||||
# as --target is not supported there.
|
||||
#
|
||||
|
||||
RUN apk --update --no-cache add bash build-base
|
||||
|
||||
WORKDIR /build
|
||||
|
||||
COPY . /build
|
||||
|
||||
RUN mkdir -p bin
|
||||
RUN go build -trimpath -o bin/ ./cmd/dendrite-demo-pinecone
|
||||
RUN go build -trimpath -o bin/ ./cmd/create-account
|
||||
RUN go build -trimpath -o bin/ ./cmd/generate-keys
|
||||
FROM --platform=${BUILDPLATFORM} ghcr.io/element-hq/dendrite-monolith:binaries AS build
|
||||
|
||||
FROM alpine:latest
|
||||
RUN apk --update --no-cache add curl
|
||||
LABEL org.opencontainers.image.title="Dendrite (Pinecone demo)"
|
||||
LABEL org.opencontainers.image.description="Next-generation Matrix homeserver written in Go"
|
||||
LABEL org.opencontainers.image.source="https://github.com/matrix-org/dendrite"
|
||||
LABEL org.opencontainers.image.licenses="Apache-2.0"
|
||||
LABEL org.opencontainers.image.licenses="AGPL-3.0-only OR LicenseRef-Element-Commercial"
|
||||
|
||||
COPY --from=base /build/bin/* /usr/bin/
|
||||
COPY --from=build /out/create-account /usr/bin/create-account
|
||||
COPY --from=build /out/generate-config /usr/bin/generate-config
|
||||
COPY --from=build /out/generate-keys /usr/bin/generate-keys
|
||||
COPY --from=build /out/dendrite-demo-pinecone /usr/bin/dendrite-demo-pinecone
|
||||
|
||||
VOLUME /etc/dendrite
|
||||
WORKDIR /etc/dendrite
|
||||
|
|
|
@ -1,28 +1,17 @@
|
|||
FROM docker.io/golang:1.22 AS base
|
||||
#syntax=docker/dockerfile:1.2
|
||||
|
||||
#
|
||||
# Needs to be separate from the main Dockerfile for OpenShift,
|
||||
# as --target is not supported there.
|
||||
#
|
||||
|
||||
RUN apk --update --no-cache add bash build-base
|
||||
|
||||
WORKDIR /build
|
||||
|
||||
COPY . /build
|
||||
|
||||
RUN mkdir -p bin
|
||||
RUN go build -trimpath -o bin/ ./cmd/dendrite-demo-yggdrasil
|
||||
RUN go build -trimpath -o bin/ ./cmd/create-account
|
||||
RUN go build -trimpath -o bin/ ./cmd/generate-keys
|
||||
FROM --platform=${BUILDPLATFORM} ghcr.io/element-hq/dendrite-monolith:binaries AS build
|
||||
|
||||
FROM alpine:latest
|
||||
LABEL org.opencontainers.image.title="Dendrite (Yggdrasil demo)"
|
||||
LABEL org.opencontainers.image.description="Next-generation Matrix homeserver written in Go"
|
||||
LABEL org.opencontainers.image.source="https://github.com/matrix-org/dendrite"
|
||||
LABEL org.opencontainers.image.licenses="Apache-2.0"
|
||||
LABEL org.opencontainers.image.licenses="AGPL-3.0-only OR LicenseRef-Element-Commercial"
|
||||
|
||||
COPY --from=base /build/bin/* /usr/bin/
|
||||
COPY --from=build /out/create-account /usr/bin/create-account
|
||||
COPY --from=build /out/generate-config /usr/bin/generate-config
|
||||
COPY --from=build /out/generate-keys /usr/bin/generate-keys
|
||||
COPY --from=build /out/dendrite-demo-yggdrasil /usr/bin/dendrite-demo-yggdrasil
|
||||
|
||||
VOLUME /etc/dendrite
|
||||
WORKDIR /etc/dendrite
|
||||
|
|
|
@ -12,7 +12,7 @@ The `Dockerfile` is a multistage file which can build Dendrite. From the root of
|
|||
repository, run:
|
||||
|
||||
```
|
||||
docker build . -t matrixdotorg/dendrite-monolith
|
||||
docker build -t ghcr.io/element-hq/dendrite-monolith:latest .
|
||||
```
|
||||
|
||||
## Compose file
|
||||
|
@ -36,7 +36,7 @@ To generate keys:
|
|||
```
|
||||
docker run --rm --entrypoint="" \
|
||||
-v $(pwd):/mnt \
|
||||
matrixdotorg/dendrite-monolith:latest \
|
||||
ghcr.io/element-hq/dendrite-monolith:latest \
|
||||
/usr/bin/generate-keys \
|
||||
-private-key /mnt/matrix_key.pem \
|
||||
-tls-cert /mnt/server.crt \
|
||||
|
|
|
@ -23,7 +23,7 @@ services:
|
|||
|
||||
monolith:
|
||||
hostname: monolith
|
||||
image: matrixdotorg/dendrite-monolith:latest
|
||||
image: ghcr.io/element-hq/dendrite-monolith:latest
|
||||
ports:
|
||||
- 8008:8008
|
||||
- 8448:8448
|
||||
|
|
|
@ -6,6 +6,9 @@ TAG=${1:-latest}
|
|||
|
||||
echo "Building tag '${TAG}'"
|
||||
|
||||
docker build . --target monolith -t matrixdotorg/dendrite-monolith:${TAG}
|
||||
docker build . --target demo-pinecone -t matrixdotorg/dendrite-demo-pinecone:${TAG}
|
||||
docker build . --target demo-yggdrasil -t matrixdotorg/dendrite-demo-yggdrasil:${TAG}
|
||||
docker build -t ghcr.io/element-hq/dendrite-monolith:binaries --target build .
|
||||
|
||||
docker build -t ghcr.io/element-hq/dendrite-monolith:${TAG} .
|
||||
|
||||
docker build -t ghcr.io/element-hq/dendrite-demo-yggdrasil:${TAG} -f build/docker/Dockerfile.demo-yggdrasil .
|
||||
docker build -t ghcr.io/element-hq/dendrite-demo-pinecone:${TAG} -f build/docker/Dockerfile.demo-pinecone .
|
|
@ -4,4 +4,4 @@ TAG=${1:-latest}
|
|||
|
||||
echo "Pulling tag '${TAG}'"
|
||||
|
||||
docker pull matrixdotorg/dendrite-monolith:${TAG}
|
||||
docker pull ghcr.io/element-hq/dendrite-monolith:${TAG}
|
||||
|
|
|
@ -4,4 +4,4 @@ TAG=${1:-latest}
|
|||
|
||||
echo "Pushing tag '${TAG}'"
|
||||
|
||||
docker push matrixdotorg/dendrite-monolith:${TAG}
|
||||
docker push ghcr.io/element-hq/dendrite-monolith:${TAG}
|
||||
|
|
|
@ -36,9 +36,16 @@ func JoinRoomByIDOrAlias(
|
|||
Content: map[string]interface{}{},
|
||||
}
|
||||
|
||||
// Check to see if any ?server_name= query parameters were
|
||||
// given in the request.
|
||||
if serverNames, ok := req.URL.Query()["server_name"]; ok {
|
||||
// Check to see if any ?via= or ?server_name= query parameters
|
||||
// were given in the request.
|
||||
if serverNames, ok := req.URL.Query()["via"]; ok {
|
||||
for _, serverName := range serverNames {
|
||||
joinReq.ServerNames = append(
|
||||
joinReq.ServerNames,
|
||||
spec.ServerName(serverName),
|
||||
)
|
||||
}
|
||||
} else if serverNames, ok := req.URL.Query()["server_name"]; ok {
|
||||
for _, serverName := range serverNames {
|
||||
joinReq.ServerNames = append(
|
||||
joinReq.ServerNames,
|
||||
|
|
|
@ -7,7 +7,12 @@
|
|||
package routing
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/matrix-org/gomatrixserverlib/fclient"
|
||||
"github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/element-hq/dendrite/clientapi/auth"
|
||||
"github.com/element-hq/dendrite/clientapi/auth/authtypes"
|
||||
|
@ -23,10 +28,15 @@ type crossSigningRequest struct {
|
|||
Auth newPasswordAuth `json:"auth"`
|
||||
}
|
||||
|
||||
type UploadKeysAPI interface {
|
||||
QueryKeys(ctx context.Context, req *api.QueryKeysRequest, res *api.QueryKeysResponse)
|
||||
api.UploadDeviceKeysAPI
|
||||
}
|
||||
|
||||
func UploadCrossSigningDeviceKeys(
|
||||
req *http.Request, userInteractiveAuth *auth.UserInteractive,
|
||||
keyserverAPI api.ClientKeyAPI, device *api.Device,
|
||||
accountAPI api.ClientUserAPI, cfg *config.ClientAPI,
|
||||
req *http.Request,
|
||||
keyserverAPI UploadKeysAPI, device *api.Device,
|
||||
accountAPI auth.GetAccountByPassword, cfg *config.ClientAPI,
|
||||
) util.JSONResponse {
|
||||
uploadReq := &crossSigningRequest{}
|
||||
uploadRes := &api.PerformUploadDeviceKeysResponse{}
|
||||
|
@ -35,32 +45,59 @@ func UploadCrossSigningDeviceKeys(
|
|||
if resErr != nil {
|
||||
return *resErr
|
||||
}
|
||||
sessionID := uploadReq.Auth.Session
|
||||
if sessionID == "" {
|
||||
sessionID = util.RandomString(sessionIDLength)
|
||||
}
|
||||
if uploadReq.Auth.Type != authtypes.LoginTypePassword {
|
||||
|
||||
// Query existing keys to determine if UIA is required
|
||||
keyResp := api.QueryKeysResponse{}
|
||||
keyserverAPI.QueryKeys(req.Context(), &api.QueryKeysRequest{
|
||||
UserID: device.UserID,
|
||||
UserToDevices: map[string][]string{device.UserID: {device.ID}},
|
||||
Timeout: time.Second * 10,
|
||||
}, &keyResp)
|
||||
|
||||
if keyResp.Error != nil {
|
||||
logrus.WithError(keyResp.Error).Error("Failed to query keys")
|
||||
return util.JSONResponse{
|
||||
Code: http.StatusUnauthorized,
|
||||
JSON: newUserInteractiveResponse(
|
||||
sessionID,
|
||||
[]authtypes.Flow{
|
||||
{
|
||||
Stages: []authtypes.LoginType{authtypes.LoginTypePassword},
|
||||
},
|
||||
},
|
||||
nil,
|
||||
),
|
||||
Code: http.StatusBadRequest,
|
||||
JSON: spec.Unknown(keyResp.Error.Error()),
|
||||
}
|
||||
}
|
||||
typePassword := auth.LoginTypePassword{
|
||||
GetAccountByPassword: accountAPI.QueryAccountByPassword,
|
||||
Config: cfg,
|
||||
|
||||
existingMasterKey, hasMasterKey := keyResp.MasterKeys[device.UserID]
|
||||
requireUIA := false
|
||||
if hasMasterKey {
|
||||
// If we have a master key, check if any of the existing keys differ. If they do,
|
||||
// we need to re-authenticate the user.
|
||||
requireUIA = keysDiffer(existingMasterKey, keyResp, uploadReq, device.UserID)
|
||||
}
|
||||
if _, authErr := typePassword.Login(req.Context(), &uploadReq.Auth.PasswordRequest); authErr != nil {
|
||||
return *authErr
|
||||
|
||||
if requireUIA {
|
||||
sessionID := uploadReq.Auth.Session
|
||||
if sessionID == "" {
|
||||
sessionID = util.RandomString(sessionIDLength)
|
||||
}
|
||||
if uploadReq.Auth.Type != authtypes.LoginTypePassword {
|
||||
return util.JSONResponse{
|
||||
Code: http.StatusUnauthorized,
|
||||
JSON: newUserInteractiveResponse(
|
||||
sessionID,
|
||||
[]authtypes.Flow{
|
||||
{
|
||||
Stages: []authtypes.LoginType{authtypes.LoginTypePassword},
|
||||
},
|
||||
},
|
||||
nil,
|
||||
),
|
||||
}
|
||||
}
|
||||
typePassword := auth.LoginTypePassword{
|
||||
GetAccountByPassword: accountAPI,
|
||||
Config: cfg,
|
||||
}
|
||||
if _, authErr := typePassword.Login(req.Context(), &uploadReq.Auth.PasswordRequest); authErr != nil {
|
||||
return *authErr
|
||||
}
|
||||
sessions.addCompletedSessionStage(sessionID, authtypes.LoginTypePassword)
|
||||
}
|
||||
sessions.addCompletedSessionStage(sessionID, authtypes.LoginTypePassword)
|
||||
|
||||
uploadReq.UserID = device.UserID
|
||||
keyserverAPI.PerformUploadDeviceKeys(req.Context(), &uploadReq.PerformUploadDeviceKeysRequest, uploadRes)
|
||||
|
@ -96,6 +133,21 @@ func UploadCrossSigningDeviceKeys(
|
|||
}
|
||||
}
|
||||
|
||||
func keysDiffer(existingMasterKey fclient.CrossSigningKey, keyResp api.QueryKeysResponse, uploadReq *crossSigningRequest, userID string) bool {
|
||||
masterKeyEqual := existingMasterKey.Equal(&uploadReq.MasterKey)
|
||||
if !masterKeyEqual {
|
||||
return true
|
||||
}
|
||||
existingSelfSigningKey := keyResp.SelfSigningKeys[userID]
|
||||
selfSigningEqual := existingSelfSigningKey.Equal(&uploadReq.SelfSigningKey)
|
||||
if !selfSigningEqual {
|
||||
return true
|
||||
}
|
||||
existingUserSigningKey := keyResp.UserSigningKeys[userID]
|
||||
userSigningEqual := existingUserSigningKey.Equal(&uploadReq.UserSigningKey)
|
||||
return !userSigningEqual
|
||||
}
|
||||
|
||||
func UploadCrossSigningDeviceSignatures(req *http.Request, keyserverAPI api.ClientKeyAPI, device *api.Device) util.JSONResponse {
|
||||
uploadReq := &api.PerformUploadDeviceSignaturesRequest{}
|
||||
uploadRes := &api.PerformUploadDeviceSignaturesResponse{}
|
||||
|
|
316
clientapi/routing/key_crosssigning_test.go
Normal file
316
clientapi/routing/key_crosssigning_test.go
Normal file
|
@ -0,0 +1,316 @@
|
|||
package routing
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/element-hq/dendrite/setup/config"
|
||||
"github.com/element-hq/dendrite/test"
|
||||
"github.com/element-hq/dendrite/test/testrig"
|
||||
"github.com/element-hq/dendrite/userapi/api"
|
||||
"github.com/matrix-org/gomatrixserverlib"
|
||||
"github.com/matrix-org/gomatrixserverlib/fclient"
|
||||
"github.com/matrix-org/gomatrixserverlib/spec"
|
||||
)
|
||||
|
||||
type mockKeyAPI struct {
|
||||
t *testing.T
|
||||
userResponses map[string]api.QueryKeysResponse
|
||||
}
|
||||
|
||||
func (m mockKeyAPI) QueryKeys(ctx context.Context, req *api.QueryKeysRequest, res *api.QueryKeysResponse) {
|
||||
res.MasterKeys = m.userResponses[req.UserID].MasterKeys
|
||||
res.SelfSigningKeys = m.userResponses[req.UserID].SelfSigningKeys
|
||||
res.UserSigningKeys = m.userResponses[req.UserID].UserSigningKeys
|
||||
if m.t != nil {
|
||||
m.t.Logf("QueryKeys: %+v => %+v", req, res)
|
||||
}
|
||||
}
|
||||
|
||||
func (m mockKeyAPI) PerformUploadDeviceKeys(ctx context.Context, req *api.PerformUploadDeviceKeysRequest, res *api.PerformUploadDeviceKeysResponse) {
|
||||
// Just a dummy upload which always succeeds
|
||||
}
|
||||
|
||||
func getAccountByPassword(ctx context.Context, req *api.QueryAccountByPasswordRequest, res *api.QueryAccountByPasswordResponse) error {
|
||||
res.Exists = true
|
||||
res.Account = &api.Account{UserID: fmt.Sprintf("@%s:%s", req.Localpart, req.ServerName)}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Tests that if there is no existing master key for the user, the request is allowed
|
||||
func Test_UploadCrossSigningDeviceKeys_ValidRequest(t *testing.T) {
|
||||
req := httptest.NewRequest(http.MethodPost, "/", strings.NewReader(`{
|
||||
"master_key": {"user_id": "@user:example.com", "usage": ["master"], "keys": {"ed25519:1": "key1"}},
|
||||
"self_signing_key": {"user_id": "@user:example.com", "usage": ["self_signing"], "keys": {"ed25519:2": "key2"}},
|
||||
"user_signing_key": {"user_id": "@user:example.com", "usage": ["user_signing"], "keys": {"ed25519:3": "key3"}}
|
||||
}`))
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
|
||||
keyserverAPI := &mockKeyAPI{
|
||||
userResponses: map[string]api.QueryKeysResponse{
|
||||
"@user:example.com": {},
|
||||
},
|
||||
}
|
||||
device := &api.Device{UserID: "@user:example.com", ID: "device"}
|
||||
cfg := &config.ClientAPI{}
|
||||
|
||||
res := UploadCrossSigningDeviceKeys(req, keyserverAPI, device, getAccountByPassword, cfg)
|
||||
if res.Code != http.StatusOK {
|
||||
t.Fatalf("expected status %d, got %d", http.StatusOK, res.Code)
|
||||
}
|
||||
}
|
||||
|
||||
// Require UIA if there is an existing master key and there is no auth provided.
|
||||
func Test_UploadCrossSigningDeviceKeys_Unauthorised(t *testing.T) {
|
||||
userID := "@user:example.com"
|
||||
|
||||
// Note that there is no auth field.
|
||||
request := fclient.CrossSigningKeys{
|
||||
MasterKey: fclient.CrossSigningKey{
|
||||
Keys: map[gomatrixserverlib.KeyID]spec.Base64Bytes{"ed25519:1": spec.Base64Bytes("key1")},
|
||||
Usage: []fclient.CrossSigningKeyPurpose{fclient.CrossSigningKeyPurposeMaster},
|
||||
UserID: userID,
|
||||
},
|
||||
SelfSigningKey: fclient.CrossSigningKey{
|
||||
Keys: map[gomatrixserverlib.KeyID]spec.Base64Bytes{"ed25519:1": spec.Base64Bytes("key2")},
|
||||
Usage: []fclient.CrossSigningKeyPurpose{fclient.CrossSigningKeyPurposeSelfSigning},
|
||||
UserID: userID,
|
||||
},
|
||||
UserSigningKey: fclient.CrossSigningKey{
|
||||
Keys: map[gomatrixserverlib.KeyID]spec.Base64Bytes{"ed25519:1": spec.Base64Bytes("key3")},
|
||||
Usage: []fclient.CrossSigningKeyPurpose{fclient.CrossSigningKeyPurposeUserSigning},
|
||||
UserID: userID,
|
||||
},
|
||||
}
|
||||
|
||||
b := bytes.Buffer{}
|
||||
m := json.NewEncoder(&b)
|
||||
err := m.Encode(request)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
req := httptest.NewRequest(http.MethodPost, "/", &b)
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
|
||||
keyserverAPI := &mockKeyAPI{
|
||||
t: t,
|
||||
userResponses: map[string]api.QueryKeysResponse{
|
||||
"@user:example.com": {
|
||||
MasterKeys: map[string]fclient.CrossSigningKey{
|
||||
"@user:example.com": {UserID: "@user:example.com", Usage: []fclient.CrossSigningKeyPurpose{"master"}, Keys: map[gomatrixserverlib.KeyID]spec.Base64Bytes{"ed25519:1": spec.Base64Bytes("key1")}},
|
||||
},
|
||||
SelfSigningKeys: nil,
|
||||
UserSigningKeys: nil,
|
||||
},
|
||||
},
|
||||
}
|
||||
device := &api.Device{UserID: "@user:example.com", ID: "device"}
|
||||
cfg := &config.ClientAPI{}
|
||||
|
||||
res := UploadCrossSigningDeviceKeys(req, keyserverAPI, device, getAccountByPassword, cfg)
|
||||
if res.Code != http.StatusUnauthorized {
|
||||
t.Fatalf("expected status %d, got %d", http.StatusUnauthorized, res.Code)
|
||||
}
|
||||
}
|
||||
|
||||
// Invalid JSON is rejected
|
||||
func Test_UploadCrossSigningDeviceKeys_InvalidJSON(t *testing.T) {
|
||||
req := httptest.NewRequest(http.MethodPost, "/", strings.NewReader(`{
|
||||
"auth": {"type": "m.login.password", "session": "session", "user": "user", "password": "password"},
|
||||
"master_key": {"user_id": "@user:example.com", "usage": ["master"], "keys": {"ed25519:1": "key1"}},
|
||||
"self_signing_key": {"user_id": "@user:example.com", "usage": ["self_signing"], "keys": {"ed25519:2": "key2"}},
|
||||
"user_signing_key": {"user_id": "@user:example.com", "usage": ["user_signing"], "keys": {"ed25519:3": "key3"}
|
||||
}`)) // Missing closing brace
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
|
||||
keyserverAPI := &mockKeyAPI{}
|
||||
device := &api.Device{UserID: "@user:example.com", ID: "device"}
|
||||
cfg := &config.ClientAPI{}
|
||||
|
||||
res := UploadCrossSigningDeviceKeys(req, keyserverAPI, device, getAccountByPassword, cfg)
|
||||
if res.Code != http.StatusBadRequest {
|
||||
t.Fatalf("expected status %d, got %d", http.StatusBadRequest, res.Code)
|
||||
}
|
||||
}
|
||||
|
||||
// Require UIA if an existing master key is present and the keys differ.
|
||||
func Test_UploadCrossSigningDeviceKeys_ExistingKeysMismatch(t *testing.T) {
|
||||
// Again, no auth provided
|
||||
req := httptest.NewRequest(http.MethodPost, "/", strings.NewReader(`{
|
||||
"master_key": {"user_id": "@user:example.com", "usage": ["master"], "keys": {"ed25519:1": "key1"}},
|
||||
"self_signing_key": {"user_id": "@user:example.com", "usage": ["self_signing"], "keys": {"ed25519:2": "key2"}},
|
||||
"user_signing_key": {"user_id": "@user:example.com", "usage": ["user_signing"], "keys": {"ed25519:3": "key3"}}
|
||||
}`))
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
|
||||
keyserverAPI := &mockKeyAPI{
|
||||
userResponses: map[string]api.QueryKeysResponse{
|
||||
"@user:example.com": {
|
||||
MasterKeys: map[string]fclient.CrossSigningKey{
|
||||
"@user:example.com": {UserID: "@user:example.com", Usage: []fclient.CrossSigningKeyPurpose{"master"}, Keys: map[gomatrixserverlib.KeyID]spec.Base64Bytes{"ed25519:1": spec.Base64Bytes("different_key")}},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
device := &api.Device{UserID: "@user:example.com", ID: "device"}
|
||||
|
||||
cfg, _, _ := testrig.CreateConfig(t, test.DBTypeSQLite)
|
||||
cfg.Global.ServerName = "example.com"
|
||||
|
||||
res := UploadCrossSigningDeviceKeys(req, keyserverAPI, device, getAccountByPassword, &cfg.ClientAPI)
|
||||
if res.Code != http.StatusUnauthorized {
|
||||
t.Fatalf("expected status %d, got %d", http.StatusUnauthorized, res.Code)
|
||||
}
|
||||
}
|
||||
|
||||
func Test_KeysDiffer_MasterKeyMismatch(t *testing.T) {
|
||||
existingMasterKey := fclient.CrossSigningKey{
|
||||
UserID: "@user:example.com",
|
||||
Usage: []fclient.CrossSigningKeyPurpose{fclient.CrossSigningKeyPurposeMaster},
|
||||
Keys: map[gomatrixserverlib.KeyID]spec.Base64Bytes{"ed25519:1": spec.Base64Bytes("existing_key")},
|
||||
}
|
||||
keyResp := api.QueryKeysResponse{}
|
||||
uploadReq := &crossSigningRequest{
|
||||
PerformUploadDeviceKeysRequest: api.PerformUploadDeviceKeysRequest{
|
||||
CrossSigningKeys: fclient.CrossSigningKeys{
|
||||
MasterKey: fclient.CrossSigningKey{
|
||||
UserID: "@user:example.com",
|
||||
Usage: []fclient.CrossSigningKeyPurpose{fclient.CrossSigningKeyPurposeMaster},
|
||||
Keys: map[gomatrixserverlib.KeyID]spec.Base64Bytes{"ed25519:1": spec.Base64Bytes("new_key")},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
userID := "@user:example.com"
|
||||
|
||||
result := keysDiffer(existingMasterKey, keyResp, uploadReq, userID)
|
||||
if !result {
|
||||
t.Fatalf("expected keys to differ, but they did not")
|
||||
}
|
||||
}
|
||||
|
||||
func Test_KeysDiffer_SelfSigningKeyMismatch(t *testing.T) {
|
||||
existingMasterKey := fclient.CrossSigningKey{
|
||||
UserID: "@user:example.com",
|
||||
Usage: []fclient.CrossSigningKeyPurpose{fclient.CrossSigningKeyPurposeMaster},
|
||||
Keys: map[gomatrixserverlib.KeyID]spec.Base64Bytes{"ed25519:1": spec.Base64Bytes("key")},
|
||||
}
|
||||
keyResp := api.QueryKeysResponse{
|
||||
SelfSigningKeys: map[string]fclient.CrossSigningKey{
|
||||
"@user:example.com": {
|
||||
UserID: "@user:example.com",
|
||||
Usage: []fclient.CrossSigningKeyPurpose{fclient.CrossSigningKeyPurposeSelfSigning},
|
||||
Keys: map[gomatrixserverlib.KeyID]spec.Base64Bytes{"ed25519:2": spec.Base64Bytes("existing_key")},
|
||||
},
|
||||
},
|
||||
}
|
||||
uploadReq := &crossSigningRequest{
|
||||
PerformUploadDeviceKeysRequest: api.PerformUploadDeviceKeysRequest{
|
||||
CrossSigningKeys: fclient.CrossSigningKeys{
|
||||
SelfSigningKey: fclient.CrossSigningKey{
|
||||
UserID: "@user:example.com",
|
||||
Usage: []fclient.CrossSigningKeyPurpose{fclient.CrossSigningKeyPurposeSelfSigning},
|
||||
Keys: map[gomatrixserverlib.KeyID]spec.Base64Bytes{"ed25519:2": spec.Base64Bytes("new_key")},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
userID := "@user:example.com"
|
||||
|
||||
result := keysDiffer(existingMasterKey, keyResp, uploadReq, userID)
|
||||
if !result {
|
||||
t.Fatalf("expected keys to differ, but they did not")
|
||||
}
|
||||
}
|
||||
|
||||
func Test_KeysDiffer_UserSigningKeyMismatch(t *testing.T) {
|
||||
existingMasterKey := fclient.CrossSigningKey{
|
||||
UserID: "@user:example.com",
|
||||
Usage: []fclient.CrossSigningKeyPurpose{fclient.CrossSigningKeyPurposeMaster},
|
||||
Keys: map[gomatrixserverlib.KeyID]spec.Base64Bytes{"ed25519:1": spec.Base64Bytes("key")},
|
||||
}
|
||||
keyResp := api.QueryKeysResponse{
|
||||
UserSigningKeys: map[string]fclient.CrossSigningKey{
|
||||
"@user:example.com": {
|
||||
UserID: "@user:example.com",
|
||||
Usage: []fclient.CrossSigningKeyPurpose{fclient.CrossSigningKeyPurposeUserSigning},
|
||||
Keys: map[gomatrixserverlib.KeyID]spec.Base64Bytes{"ed25519:3": spec.Base64Bytes("existing_key")},
|
||||
},
|
||||
},
|
||||
}
|
||||
uploadReq := &crossSigningRequest{
|
||||
PerformUploadDeviceKeysRequest: api.PerformUploadDeviceKeysRequest{
|
||||
CrossSigningKeys: fclient.CrossSigningKeys{
|
||||
UserSigningKey: fclient.CrossSigningKey{
|
||||
UserID: "@user:example.com",
|
||||
Usage: []fclient.CrossSigningKeyPurpose{fclient.CrossSigningKeyPurposeUserSigning},
|
||||
Keys: map[gomatrixserverlib.KeyID]spec.Base64Bytes{"ed25519:3": spec.Base64Bytes("new_key")},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
userID := "@user:example.com"
|
||||
|
||||
result := keysDiffer(existingMasterKey, keyResp, uploadReq, userID)
|
||||
if !result {
|
||||
t.Fatalf("expected keys to differ, but they did not")
|
||||
}
|
||||
}
|
||||
|
||||
func Test_KeysDiffer_AllKeysMatch(t *testing.T) {
|
||||
existingMasterKey := fclient.CrossSigningKey{
|
||||
UserID: "@user:example.com",
|
||||
Usage: []fclient.CrossSigningKeyPurpose{fclient.CrossSigningKeyPurposeMaster},
|
||||
Keys: map[gomatrixserverlib.KeyID]spec.Base64Bytes{"ed25519:1": spec.Base64Bytes("key")},
|
||||
}
|
||||
keyResp := api.QueryKeysResponse{
|
||||
SelfSigningKeys: map[string]fclient.CrossSigningKey{
|
||||
"@user:example.com": {
|
||||
UserID: "@user:example.com",
|
||||
Usage: []fclient.CrossSigningKeyPurpose{fclient.CrossSigningKeyPurposeSelfSigning},
|
||||
Keys: map[gomatrixserverlib.KeyID]spec.Base64Bytes{"ed25519:2": spec.Base64Bytes("key")},
|
||||
},
|
||||
},
|
||||
UserSigningKeys: map[string]fclient.CrossSigningKey{
|
||||
"@user:example.com": {
|
||||
UserID: "@user:example.com",
|
||||
Usage: []fclient.CrossSigningKeyPurpose{fclient.CrossSigningKeyPurposeUserSigning},
|
||||
Keys: map[gomatrixserverlib.KeyID]spec.Base64Bytes{"ed25519:3": spec.Base64Bytes("key")},
|
||||
},
|
||||
},
|
||||
}
|
||||
uploadReq := &crossSigningRequest{
|
||||
PerformUploadDeviceKeysRequest: api.PerformUploadDeviceKeysRequest{
|
||||
CrossSigningKeys: fclient.CrossSigningKeys{
|
||||
MasterKey: fclient.CrossSigningKey{
|
||||
UserID: "@user:example.com",
|
||||
Usage: []fclient.CrossSigningKeyPurpose{fclient.CrossSigningKeyPurposeMaster},
|
||||
Keys: map[gomatrixserverlib.KeyID]spec.Base64Bytes{"ed25519:1": spec.Base64Bytes("key")},
|
||||
},
|
||||
SelfSigningKey: fclient.CrossSigningKey{
|
||||
UserID: "@user:example.com",
|
||||
Usage: []fclient.CrossSigningKeyPurpose{fclient.CrossSigningKeyPurposeSelfSigning},
|
||||
Keys: map[gomatrixserverlib.KeyID]spec.Base64Bytes{"ed25519:2": spec.Base64Bytes("key")},
|
||||
},
|
||||
UserSigningKey: fclient.CrossSigningKey{
|
||||
UserID: "@user:example.com",
|
||||
Usage: []fclient.CrossSigningKeyPurpose{fclient.CrossSigningKeyPurposeUserSigning},
|
||||
Keys: map[gomatrixserverlib.KeyID]spec.Base64Bytes{"ed25519:3": spec.Base64Bytes("key")},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
userID := "@user:example.com"
|
||||
|
||||
result := keysDiffer(existingMasterKey, keyResp, uploadReq, userID)
|
||||
if result {
|
||||
t.Fatalf("expected keys to match, but they did not")
|
||||
}
|
||||
}
|
|
@ -1441,7 +1441,7 @@ func Setup(
|
|||
// Cross-signing device keys
|
||||
|
||||
postDeviceSigningKeys := httputil.MakeAuthAPI("post_device_signing_keys", userAPI, func(req *http.Request, device *userapi.Device) util.JSONResponse {
|
||||
return UploadCrossSigningDeviceKeys(req, userInteractiveAuth, userAPI, device, userAPI, cfg)
|
||||
return UploadCrossSigningDeviceKeys(req, userAPI, device, userAPI.QueryAccountByPassword, cfg)
|
||||
})
|
||||
|
||||
postDeviceSigningSignatures := httputil.MakeAuthAPI("post_device_signing_signatures", userAPI, func(req *http.Request, device *userapi.Device) util.JSONResponse {
|
||||
|
|
|
@ -414,8 +414,14 @@ func generateSendEvent(
|
|||
for i := range queryRes.StateEvents {
|
||||
stateEvents[i] = queryRes.StateEvents[i].PDU
|
||||
}
|
||||
provider := gomatrixserverlib.NewAuthEvents(gomatrixserverlib.ToPDUs(stateEvents))
|
||||
if err = gomatrixserverlib.Allowed(e.PDU, &provider, func(roomID spec.RoomID, senderID spec.SenderID) (*spec.UserID, error) {
|
||||
provider, err := gomatrixserverlib.NewAuthEvents(gomatrixserverlib.ToPDUs(stateEvents))
|
||||
if err != nil {
|
||||
return nil, &util.JSONResponse{
|
||||
Code: http.StatusForbidden,
|
||||
JSON: spec.Forbidden(err.Error()),
|
||||
}
|
||||
}
|
||||
if err = gomatrixserverlib.Allowed(e.PDU, provider, func(roomID spec.RoomID, senderID spec.SenderID) (*spec.UserID, error) {
|
||||
return rsAPI.QueryUserIDForSender(ctx, *validRoomID, senderID)
|
||||
}); err != nil {
|
||||
return nil, &util.JSONResponse{
|
||||
|
|
|
@ -13,9 +13,9 @@ import (
|
|||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/coder/websocket"
|
||||
"github.com/element-hq/dendrite/setup/config"
|
||||
"github.com/matrix-org/gomatrixserverlib/fclient"
|
||||
"nhooyr.io/websocket"
|
||||
|
||||
pineconeRouter "github.com/matrix-org/pinecone/router"
|
||||
pineconeSessions "github.com/matrix-org/pinecone/sessions"
|
||||
|
|
|
@ -138,6 +138,7 @@ func (p *P2PMonolith) SetupDendrite(
|
|||
rsAPI.SetFederationAPI(fsAPI, keyRing)
|
||||
|
||||
userAPI := userapi.NewInternalAPI(processCtx, cfg, cm, &natsInstance, rsAPI, federation, enableMetrics, fsAPI.IsBlacklistedOrBackingOff)
|
||||
rsAPI.SetUserAPI(userAPI)
|
||||
|
||||
asAPI := appservice.NewInternalAPI(processCtx, cfg, &natsInstance, userAPI, rsAPI)
|
||||
|
||||
|
|
|
@ -39,6 +39,7 @@ var (
|
|||
flagDockerHost = flag.String("docker-host", "localhost", "The hostname of the docker client. 'localhost' if running locally, 'host.docker.internal' if running in Docker.")
|
||||
flagDirect = flag.Bool("direct", false, "If a direct upgrade from the defined FROM version to TO should be done")
|
||||
flagSqlite = flag.Bool("sqlite", false, "Test SQLite instead of PostgreSQL")
|
||||
flagRepository = flag.String("repository", "element-hq/dendrite", "The base repository to use when running upgrade tests.")
|
||||
alphaNumerics = regexp.MustCompile("[^a-zA-Z0-9]+")
|
||||
)
|
||||
|
||||
|
@ -187,7 +188,7 @@ func downloadArchive(cli *http.Client, tmpDir, archiveURL string, dockerfile []b
|
|||
}
|
||||
|
||||
// buildDendrite builds Dendrite on the branchOrTagName given. Returns the image ID or an error
|
||||
func buildDendrite(httpClient *http.Client, dockerClient *client.Client, tmpDir string, branchOrTagName, binary string) (string, error) {
|
||||
func buildDendrite(httpClient *http.Client, dockerClient *client.Client, tmpDir string, branchOrTagName, binary, repository string) (string, error) {
|
||||
var tarball *bytes.Buffer
|
||||
var err error
|
||||
// If a custom HEAD location is given, use that, else pull from github. Mostly useful for CI
|
||||
|
@ -210,7 +211,7 @@ func buildDendrite(httpClient *http.Client, dockerClient *client.Client, tmpDir
|
|||
log.Printf("%s: Downloading version %s to %s\n", branchOrTagName, branchOrTagName, tmpDir)
|
||||
// pull an archive, this contains a top-level directory which screws with the build context
|
||||
// which we need to fix up post download
|
||||
u := fmt.Sprintf("https://github.com/element-hq/dendrite/archive/%s.tar.gz", branchOrTagName)
|
||||
u := fmt.Sprintf("https://github.com/%s/archive/%s.tar.gz", repository, branchOrTagName)
|
||||
tarball, err = downloadArchive(httpClient, tmpDir, u, dockerfile())
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to download archive %s: %w", u, err)
|
||||
|
@ -254,8 +255,8 @@ func buildDendrite(httpClient *http.Client, dockerClient *client.Client, tmpDir
|
|||
return imageID, nil
|
||||
}
|
||||
|
||||
func getAndSortVersionsFromGithub(httpClient *http.Client) (semVers []*semver.Version, err error) {
|
||||
u := "https://api.github.com/repos/element-hq/dendrite/tags"
|
||||
func getAndSortVersionsFromGithub(httpClient *http.Client, repository string) (semVers []*semver.Version, err error) {
|
||||
u := fmt.Sprintf("https://api.github.com/repos/%s/tags", repository)
|
||||
|
||||
var res *http.Response
|
||||
for i := 0; i < 3; i++ {
|
||||
|
@ -290,8 +291,8 @@ func getAndSortVersionsFromGithub(httpClient *http.Client) (semVers []*semver.Ve
|
|||
return semVers, nil
|
||||
}
|
||||
|
||||
func calculateVersions(cli *http.Client, from, to string, direct bool) []*semver.Version {
|
||||
semvers, err := getAndSortVersionsFromGithub(cli)
|
||||
func calculateVersions(cli *http.Client, from, to, repository string, direct bool) []*semver.Version {
|
||||
semvers, err := getAndSortVersionsFromGithub(cli, repository)
|
||||
if err != nil {
|
||||
log.Fatalf("failed to collect semvers from github: %s", err)
|
||||
}
|
||||
|
@ -348,7 +349,7 @@ func calculateVersions(cli *http.Client, from, to string, direct bool) []*semver
|
|||
return semvers
|
||||
}
|
||||
|
||||
func buildDendriteImages(httpClient *http.Client, dockerClient *client.Client, baseTempDir string, concurrency int, versions []*semver.Version) map[string]string {
|
||||
func buildDendriteImages(httpClient *http.Client, dockerClient *client.Client, baseTempDir, repository string, concurrency int, versions []*semver.Version) map[string]string {
|
||||
// concurrently build all versions, this can be done in any order. The mutex protects the map
|
||||
branchToImageID := make(map[string]string)
|
||||
var mu sync.Mutex
|
||||
|
@ -368,7 +369,7 @@ func buildDendriteImages(httpClient *http.Client, dockerClient *client.Client, b
|
|||
branchName, binary := versionToBranchAndBinary(version)
|
||||
log.Printf("Building version %s with binary %s", branchName, binary)
|
||||
tmpDir := baseTempDir + alphaNumerics.ReplaceAllString(branchName, "")
|
||||
imgID, err := buildDendrite(httpClient, dockerClient, tmpDir, branchName, binary)
|
||||
imgID, err := buildDendrite(httpClient, dockerClient, tmpDir, branchName, binary, repository)
|
||||
if err != nil {
|
||||
log.Fatalf("%s: failed to build dendrite image: %s", version, err)
|
||||
}
|
||||
|
@ -583,10 +584,10 @@ func main() {
|
|||
os.Exit(1)
|
||||
}
|
||||
cleanup(dockerClient)
|
||||
versions := calculateVersions(httpClient, *flagFrom, *flagTo, *flagDirect)
|
||||
versions := calculateVersions(httpClient, *flagFrom, *flagTo, *flagRepository, *flagDirect)
|
||||
log.Printf("Testing dendrite versions: %v\n", versions)
|
||||
|
||||
branchToImageID := buildDendriteImages(httpClient, dockerClient, *flagTempDir, *flagBuildConcurrency, versions)
|
||||
branchToImageID := buildDendriteImages(httpClient, dockerClient, *flagTempDir, *flagRepository, *flagBuildConcurrency, versions)
|
||||
|
||||
// make a shared postgres volume
|
||||
volume, err := dockerClient.VolumeCreate(context.Background(), volume.CreateOptions{
|
||||
|
|
|
@ -94,6 +94,8 @@ func main() {
|
|||
dnsCache = fclient.NewDNSCache(
|
||||
cfg.Global.DNSCache.CacheSize,
|
||||
cfg.Global.DNSCache.CacheLifetime,
|
||||
cfg.FederationAPI.AllowNetworkCIDRs,
|
||||
cfg.FederationAPI.DenyNetworkCIDRs,
|
||||
)
|
||||
logrus.Infof(
|
||||
"DNS cache enabled (size %d, lifetime %s)",
|
||||
|
|
|
@ -71,6 +71,10 @@ func main() {
|
|||
cfg.ClientAPI.RateLimiting.Enabled = false
|
||||
cfg.FederationAPI.DisableTLSValidation = false
|
||||
cfg.FederationAPI.DisableHTTPKeepalives = true
|
||||
// Allow allow networks when running in CI, as otherwise connections
|
||||
// to other servers might be blocked when running Complement/Sytest.
|
||||
cfg.FederationAPI.DenyNetworkCIDRs = []string{}
|
||||
cfg.FederationAPI.AllowNetworkCIDRs = []string{}
|
||||
// don't hit matrix.org when running tests!!!
|
||||
cfg.FederationAPI.KeyPerspectives = config.KeyPerspectives{}
|
||||
cfg.MediaAPI.BasePath = config.Path(filepath.Join(*dirPath, "media"))
|
||||
|
|
|
@ -70,6 +70,8 @@ func main() {
|
|||
dnsCache = fclient.NewDNSCache(
|
||||
cfg.Global.DNSCache.CacheSize,
|
||||
cfg.Global.DNSCache.CacheLifetime,
|
||||
cfg.FederationAPI.AllowNetworkCIDRs,
|
||||
cfg.FederationAPI.DenyNetworkCIDRs,
|
||||
)
|
||||
logrus.Infof(
|
||||
"DNS cache enabled (size %d, lifetime %s)",
|
||||
|
|
|
@ -65,6 +65,8 @@ func main() {
|
|||
dnsCache = fclient.NewDNSCache(
|
||||
cfg.Global.DNSCache.CacheSize,
|
||||
cfg.Global.DNSCache.CacheLifetime,
|
||||
cfg.FederationAPI.AllowNetworkCIDRs,
|
||||
cfg.FederationAPI.DenyNetworkCIDRs,
|
||||
)
|
||||
logrus.Infof(
|
||||
"DNS cache enabled (size %d, lifetime %s)",
|
||||
|
|
|
@ -254,6 +254,24 @@ federation_api:
|
|||
# last resort.
|
||||
prefer_direct_fetch: false
|
||||
|
||||
# deny_networks and allow_networks are the CIDR ranges used to prevent requests
|
||||
# from accessing private IPs. If your system has specific IPs it should never
|
||||
# contact, add them here with CIDR notation.
|
||||
#
|
||||
# The deny list is checked before the allow list.
|
||||
deny_networks:
|
||||
- "127.0.0.1/8"
|
||||
- "10.0.0.0/8"
|
||||
- "172.16.0.0/12"
|
||||
- "192.168.0.0/16"
|
||||
- "100.64.0.0/10"
|
||||
- "169.254.0.0/16"
|
||||
- "::1/128"
|
||||
- "fe80::/64"
|
||||
- "fc00::/7"
|
||||
allow_networks:
|
||||
- "0.0.0.0/0" # "Everything". The deny list will help limit this.
|
||||
|
||||
# Configuration for the Media API.
|
||||
media_api:
|
||||
# Storage path for uploaded media. May be relative or absolute.
|
||||
|
|
|
@ -34,27 +34,49 @@ The following items are unlikely to be accepted into a main Dendrite release for
|
|||
|
||||
## Sign off
|
||||
|
||||
We require that everyone who contributes to the project signs off their contributions
|
||||
in accordance with the [Developer Certificate of Origin](https://github.com/matrix-org/matrix-spec/blob/main/CONTRIBUTING.rst#sign-off).
|
||||
In effect, this means adding a statement to your pull requests or commit messages
|
||||
along the lines of:
|
||||
We ask that everybody who contributes to this project signs off their contributions, as explained below.
|
||||
|
||||
We follow a simple 'inbound=outbound' model for contributions: the act of submitting an 'inbound' contribution means that the contributor agrees to license their contribution under the same terms as the project's overall 'outbound' license - in our case, this is Apache Software License v2 (see [LICENSE](../..//LICENSE)).
|
||||
|
||||
In order to have a concrete record that your contribution is intentional and you agree to license it under the same terms as the project's license, we've adopted the same lightweight approach used by the [Linux Kernel](https://www.kernel.org/doc/html/latest/process/submitting-patches.html), [Docker](https://github.com/docker/docker/blob/master/CONTRIBUTING.md), and many other projects: the [Developer Certificate of Origin](https://developercertificate.org/) (DCO). This is a simple declaration that you wrote the contribution or otherwise have the right to contribute it to Matrix:
|
||||
|
||||
```
|
||||
Signed-off-by: Full Name <email address>
|
||||
Developer Certificate of Origin
|
||||
Version 1.1
|
||||
Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
|
||||
660 York Street, Suite 102,
|
||||
San Francisco, CA 94110 USA
|
||||
Everyone is permitted to copy and distribute verbatim copies of this
|
||||
license document, but changing it is not allowed.
|
||||
Developer's Certificate of Origin 1.1
|
||||
By making a contribution to this project, I certify that:
|
||||
(a) The contribution was created in whole or in part by me and I
|
||||
have the right to submit it under the open source license
|
||||
indicated in the file; or
|
||||
(b) The contribution is based upon previous work that, to the best
|
||||
of my knowledge, is covered under an appropriate open source
|
||||
license and I have the right under that license to submit that
|
||||
work with modifications, whether created in whole or in part
|
||||
by me, under the same open source license (unless I am
|
||||
permitted to submit under a different license), as indicated
|
||||
in the file; or
|
||||
(c) The contribution was provided directly to me by some other
|
||||
person who certified (a), (b) or (c) and I have not modified
|
||||
it.
|
||||
(d) I understand and agree that this project and the contribution
|
||||
are public and that a record of the contribution (including all
|
||||
personal information I submit with it, including my sign-off) is
|
||||
maintained indefinitely and may be redistributed consistent with
|
||||
this project or the open source license(s) involved.
|
||||
```
|
||||
|
||||
Unfortunately we can't accept contributions without a sign-off.
|
||||
If you agree to this for your contribution, then all that's needed is to include the line in your commit or pull request comment:
|
||||
|
||||
Please note that we can only accept contributions under a legally identifiable name,
|
||||
such as your name as it appears on government-issued documentation or common-law names
|
||||
(claimed by legitimate usage or repute). We cannot accept sign-offs from a pseudonym or
|
||||
alias and cannot accept anonymous contributions.
|
||||
```
|
||||
Signed-off-by: Your Name <your@email.example.org>
|
||||
```
|
||||
|
||||
If you would prefer to sign off privately instead (so as to not reveal your full
|
||||
name on a public pull request), you can do so by emailing a sign-off declaration
|
||||
and a link to your pull request directly to the [Matrix.org Foundation](https://matrix.org/foundation/)
|
||||
at `dco@matrix.org`. Once a private sign-off has been made, you will not be required
|
||||
to do so for future contributions.
|
||||
Git allows you to add this signoff automatically when using the `-s` flag to `git commit`, which uses the name and email set in your `user.name` and `user.email` git configs.
|
||||
|
||||
## Getting up and running
|
||||
|
||||
|
|
|
@ -24,10 +24,10 @@ First we'll generate private key, which is used to sign events, the following wi
|
|||
mkdir -p ./config
|
||||
docker run --rm --entrypoint="/usr/bin/generate-keys" \
|
||||
-v $(pwd)/config:/mnt \
|
||||
matrixdotorg/dendrite-monolith:latest \
|
||||
ghcr.io/element-hq/dendrite-monolith:latest \
|
||||
-private-key /mnt/matrix_key.pem
|
||||
|
||||
# Windows equivalent: docker run --rm --entrypoint="/usr/bin/generate-keys" -v %cd%/config:/mnt matrixdotorg/dendrite-monolith:latest -private-key /mnt/matrix_key.pem
|
||||
# Windows equivalent: docker run --rm --entrypoint="/usr/bin/generate-keys" -v %cd%/config:/mnt ghcr.io/element-hq/dendrite-monolith:latest -private-key /mnt/matrix_key.pem
|
||||
```
|
||||
(**NOTE**: This only needs to be executed **once**, as you otherwise overwrite the key)
|
||||
|
||||
|
@ -41,13 +41,13 @@ to the docker-compose file (`services.postgres.environment` values):
|
|||
mkdir -p ./config
|
||||
docker run --rm --entrypoint="/bin/sh" \
|
||||
-v $(pwd)/config:/mnt \
|
||||
matrixdotorg/dendrite-monolith:latest \
|
||||
ghcr.io/element-hq/dendrite-monolith:latest \
|
||||
-c "/usr/bin/generate-config \
|
||||
-dir /var/dendrite/ \
|
||||
-db postgres://dendrite:itsasecret@postgres/dendrite?sslmode=disable \
|
||||
-server YourDomainHere > /mnt/dendrite.yaml"
|
||||
|
||||
# Windows equivalent: docker run --rm --entrypoint="/bin/sh" -v %cd%/config:/mnt matrixdotorg/dendrite-monolith:latest -c "/usr/bin/generate-config -dir /var/dendrite/ -db postgres://dendrite:itsasecret@postgres/dendrite?sslmode=disable -server YourDomainHere > /mnt/dendrite.yaml"
|
||||
# Windows equivalent: docker run --rm --entrypoint="/bin/sh" -v %cd%/config:/mnt ghcr.io/element-hq/dendrite-monolith:latest -c "/usr/bin/generate-config -dir /var/dendrite/ -db postgres://dendrite:itsasecret@postgres/dendrite?sslmode=disable -server YourDomainHere > /mnt/dendrite.yaml"
|
||||
```
|
||||
|
||||
You can then change `config/dendrite.yaml` to your liking.
|
||||
|
|
|
@ -344,7 +344,7 @@ func buildMembershipEvent(
|
|||
protoEvent.Depth = queryRes.Depth
|
||||
protoEvent.PrevEvents = queryRes.LatestEvents
|
||||
|
||||
authEvents := gomatrixserverlib.NewAuthEvents(nil)
|
||||
authEvents, _ := gomatrixserverlib.NewAuthEvents(nil)
|
||||
|
||||
for i := range queryRes.StateEvents {
|
||||
err = authEvents.AddEvent(queryRes.StateEvents[i].PDU)
|
||||
|
@ -357,7 +357,7 @@ func buildMembershipEvent(
|
|||
return nil, err
|
||||
}
|
||||
|
||||
refs, err := eventsNeeded.AuthEventReferences(&authEvents)
|
||||
refs, err := eventsNeeded.AuthEventReferences(authEvents)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -421,7 +421,7 @@ func sendToRemoteServer(
|
|||
// found. Returning an error isn't necessary in this case as the event will be
|
||||
// rejected by gomatrixserverlib.
|
||||
func fillDisplayName(
|
||||
builder *gomatrixserverlib.ProtoEvent, authEvents gomatrixserverlib.AuthEvents,
|
||||
builder *gomatrixserverlib.ProtoEvent, authEvents *gomatrixserverlib.AuthEvents,
|
||||
) error {
|
||||
var content gomatrixserverlib.MemberContent
|
||||
if err := json.Unmarshal(builder.Content, &content); err != nil {
|
||||
|
|
99
go.mod
99
go.mod
|
@ -5,12 +5,13 @@ require (
|
|||
github.com/DATA-DOG/go-sqlmock v1.5.0
|
||||
github.com/MFAshby/stdemuxerhook v1.0.0
|
||||
github.com/Masterminds/semver/v3 v3.1.1
|
||||
github.com/blevesearch/bleve/v2 v2.4.0
|
||||
github.com/blevesearch/bleve/v2 v2.4.4
|
||||
github.com/codeclysm/extract v2.2.0+incompatible
|
||||
github.com/coder/websocket v1.8.12
|
||||
github.com/cretz/bine v0.2.0
|
||||
github.com/dgraph-io/ristretto v0.1.1
|
||||
github.com/docker/docker v25.0.6+incompatible
|
||||
github.com/docker/go-connections v0.4.0
|
||||
github.com/dgraph-io/ristretto v0.2.0
|
||||
github.com/docker/docker v26.1.5+incompatible
|
||||
github.com/docker/go-connections v0.5.0
|
||||
github.com/eyedeekay/goSam v0.32.54
|
||||
github.com/eyedeekay/onramp v0.33.8
|
||||
github.com/getsentry/sentry-go v0.14.0
|
||||
|
@ -18,61 +19,60 @@ require (
|
|||
github.com/google/go-cmp v0.6.0
|
||||
github.com/google/uuid v1.6.0
|
||||
github.com/gorilla/mux v1.8.0
|
||||
github.com/gorilla/websocket v1.5.0
|
||||
github.com/gorilla/websocket v1.5.3
|
||||
github.com/kardianos/minwinsvc v1.0.2
|
||||
github.com/lib/pq v1.10.9
|
||||
github.com/matrix-org/dugong v0.0.0-20210921133753-66e6b1c67e2e
|
||||
github.com/matrix-org/go-sqlite3-js v0.0.0-20220419092513-28aa791a1c91
|
||||
github.com/matrix-org/gomatrix v0.0.0-20220926102614-ceba4d9f7530
|
||||
github.com/matrix-org/gomatrixserverlib v0.0.0-20240910190622-2c764912ce93
|
||||
github.com/matrix-org/gomatrixserverlib v0.0.0-20250116181547-c4f1e01eab0d
|
||||
github.com/matrix-org/pinecone v0.11.1-0.20230810010612-ea4c33717fd7
|
||||
github.com/matrix-org/util v0.0.0-20221111132719-399730281e66
|
||||
github.com/mattn/go-sqlite3 v1.14.22
|
||||
github.com/nats-io/nats-server/v2 v2.10.20
|
||||
github.com/nats-io/nats.go v1.36.0
|
||||
github.com/mattn/go-sqlite3 v1.14.24
|
||||
github.com/nats-io/nats-server/v2 v2.10.25
|
||||
github.com/nats-io/nats.go v1.38.0
|
||||
github.com/nfnt/resize v0.0.0-20180221191011-83c6a9932646
|
||||
github.com/opentracing/opentracing-go v1.2.0
|
||||
github.com/patrickmn/go-cache v2.1.0+incompatible
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/prometheus/client_golang v1.19.1
|
||||
github.com/prometheus/client_golang v1.20.5
|
||||
github.com/sirupsen/logrus v1.9.3
|
||||
github.com/stretchr/testify v1.9.0
|
||||
github.com/stretchr/testify v1.10.0
|
||||
github.com/tidwall/gjson v1.18.0
|
||||
github.com/tidwall/sjson v1.2.5
|
||||
github.com/uber/jaeger-client-go v2.30.0+incompatible
|
||||
github.com/uber/jaeger-lib v2.4.1+incompatible
|
||||
github.com/yggdrasil-network/yggdrasil-go v0.5.6
|
||||
github.com/yggdrasil-network/yggquic v0.0.0-20240802104827-b4e97a928967
|
||||
github.com/yggdrasil-network/yggdrasil-go v0.5.12
|
||||
github.com/yggdrasil-network/yggquic v0.0.0-20241212194307-0d495106021f
|
||||
go.uber.org/atomic v1.11.0
|
||||
golang.org/x/crypto v0.28.0
|
||||
golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842
|
||||
golang.org/x/crypto v0.32.0
|
||||
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56
|
||||
golang.org/x/image v0.18.0
|
||||
golang.org/x/mobile v0.0.0-20240520174638-fa72addaaa1b
|
||||
golang.org/x/sync v0.9.0
|
||||
golang.org/x/term v0.25.0
|
||||
golang.org/x/sync v0.10.0
|
||||
golang.org/x/term v0.28.0
|
||||
gopkg.in/h2non/bimg.v1 v1.1.9
|
||||
gopkg.in/yaml.v2 v2.4.0
|
||||
gotest.tools/v3 v3.4.0
|
||||
gotest.tools/v3 v3.5.1
|
||||
maunium.net/go/mautrix v0.15.1
|
||||
modernc.org/sqlite v1.29.5
|
||||
nhooyr.io/websocket v1.8.7
|
||||
modernc.org/sqlite v1.34.5
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/Arceliar/ironwood v0.0.0-20240529054413-b8e59574e2b2 // indirect
|
||||
github.com/Arceliar/ironwood v0.0.0-20241213013129-743fe2fccbd3 // indirect
|
||||
github.com/HdrHistogram/hdrhistogram-go v1.1.2 // indirect
|
||||
github.com/Microsoft/go-winio v0.5.2 // indirect
|
||||
github.com/RoaringBitmap/roaring v1.2.3 // indirect
|
||||
github.com/RoaringBitmap/roaring v1.9.3 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/bits-and-blooms/bitset v1.13.0 // indirect
|
||||
github.com/bits-and-blooms/bloom/v3 v3.7.0 // indirect
|
||||
github.com/blevesearch/bleve_index_api v1.1.6 // indirect
|
||||
github.com/blevesearch/bleve_index_api v1.1.12 // indirect
|
||||
github.com/blevesearch/geo v0.1.20 // indirect
|
||||
github.com/blevesearch/go-faiss v1.0.13 // indirect
|
||||
github.com/blevesearch/go-faiss v1.0.24 // indirect
|
||||
github.com/blevesearch/go-porterstemmer v1.0.3 // indirect
|
||||
github.com/blevesearch/gtreap v0.1.1 // indirect
|
||||
github.com/blevesearch/mmap-go v1.0.4 // indirect
|
||||
github.com/blevesearch/scorch_segment_api/v2 v2.2.9 // indirect
|
||||
github.com/blevesearch/scorch_segment_api/v2 v2.2.16 // indirect
|
||||
github.com/blevesearch/segment v0.9.1 // indirect
|
||||
github.com/blevesearch/snowballstem v0.9.0 // indirect
|
||||
github.com/blevesearch/upsidedown_store_api v1.0.2 // indirect
|
||||
|
@ -81,9 +81,9 @@ require (
|
|||
github.com/blevesearch/zapx/v12 v12.3.10 // indirect
|
||||
github.com/blevesearch/zapx/v13 v13.3.10 // indirect
|
||||
github.com/blevesearch/zapx/v14 v14.3.10 // indirect
|
||||
github.com/blevesearch/zapx/v15 v15.3.13 // indirect
|
||||
github.com/blevesearch/zapx/v16 v16.0.12 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.2.0 // indirect
|
||||
github.com/blevesearch/zapx/v15 v15.3.16 // indirect
|
||||
github.com/blevesearch/zapx/v16 v16.1.9-0.20241217210638-a0519e7caf3b // indirect
|
||||
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
||||
github.com/containerd/log v0.1.0 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/distribution/reference v0.6.0 // indirect
|
||||
|
@ -97,41 +97,42 @@ require (
|
|||
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang/geo v0.0.0-20210211234256-740aa86cb551 // indirect
|
||||
github.com/golang/glog v1.0.0 // indirect
|
||||
github.com/golang/protobuf v1.5.3 // indirect
|
||||
github.com/golang/snappy v0.0.4 // indirect
|
||||
github.com/google/pprof v0.0.0-20230808223545-4887780b67fb // indirect
|
||||
github.com/google/pprof v0.0.0-20240409012703-83162a5b38cd // indirect
|
||||
github.com/h2non/filetype v1.1.3 // indirect
|
||||
github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect
|
||||
github.com/hjson/hjson-go/v4 v4.4.0 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/juju/errors v1.0.0 // indirect
|
||||
github.com/klauspost/compress v1.17.9 // indirect
|
||||
github.com/klauspost/compress v1.17.11 // indirect
|
||||
github.com/mattn/go-colorable v0.1.13 // indirect
|
||||
github.com/mattn/go-isatty v0.0.19 // indirect
|
||||
github.com/mattn/go-isatty v0.0.20 // indirect
|
||||
github.com/minio/highwayhash v1.0.3 // indirect
|
||||
github.com/moby/docker-image-spec v1.3.1 // indirect
|
||||
github.com/moby/term v0.5.0 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/morikuni/aec v1.0.0 // indirect
|
||||
github.com/mschoch/smat v0.2.0 // indirect
|
||||
github.com/nats-io/jwt/v2 v2.5.8 // indirect
|
||||
github.com/nats-io/nkeys v0.4.7 // indirect
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||
github.com/nats-io/jwt/v2 v2.7.3 // indirect
|
||||
github.com/nats-io/nkeys v0.4.9 // indirect
|
||||
github.com/nats-io/nuid v1.0.1 // indirect
|
||||
github.com/ncruces/go-strftime v0.1.9 // indirect
|
||||
github.com/onsi/ginkgo/v2 v2.11.0 // indirect
|
||||
github.com/opencontainers/go-digest v1.0.0 // indirect
|
||||
github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/prometheus/client_model v0.5.0 // indirect
|
||||
github.com/prometheus/common v0.48.0 // indirect
|
||||
github.com/prometheus/procfs v0.12.0 // indirect
|
||||
github.com/quic-go/quic-go v0.45.2 // indirect
|
||||
github.com/prometheus/client_model v0.6.1 // indirect
|
||||
github.com/prometheus/common v0.55.0 // indirect
|
||||
github.com/prometheus/procfs v0.15.1 // indirect
|
||||
github.com/quic-go/quic-go v0.48.2 // indirect
|
||||
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect
|
||||
github.com/rogpeppe/go-internal v1.13.1 // indirect
|
||||
github.com/rs/zerolog v1.29.1 // indirect
|
||||
github.com/tidwall/match v1.1.1 // indirect
|
||||
github.com/tidwall/pretty v1.2.1 // indirect
|
||||
github.com/wlynxg/anet v0.0.5 // indirect
|
||||
go.etcd.io/bbolt v1.3.7 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 // indirect
|
||||
go.opentelemetry.io/otel v1.32.0 // indirect
|
||||
|
@ -140,22 +141,20 @@ require (
|
|||
go.opentelemetry.io/otel/sdk v1.32.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.32.0 // indirect
|
||||
go.uber.org/mock v0.4.0 // indirect
|
||||
golang.org/x/mod v0.18.0 // indirect
|
||||
golang.org/x/net v0.30.0 // indirect
|
||||
golang.org/x/sys v0.27.0 // indirect
|
||||
golang.org/x/text v0.20.0 // indirect
|
||||
golang.org/x/time v0.6.0 // indirect
|
||||
golang.org/x/tools v0.22.0 // indirect
|
||||
golang.org/x/mod v0.19.0 // indirect
|
||||
golang.org/x/net v0.33.0 // indirect
|
||||
golang.org/x/sys v0.29.0 // indirect
|
||||
golang.org/x/text v0.21.0 // indirect
|
||||
golang.org/x/time v0.9.0 // indirect
|
||||
golang.org/x/tools v0.23.0 // indirect
|
||||
google.golang.org/protobuf v1.35.1 // indirect
|
||||
gopkg.in/macaroon.v2 v2.1.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
maunium.net/go/maulogger/v2 v2.4.1 // indirect
|
||||
modernc.org/gc/v3 v3.0.0-20240107210532-573471604cb6 // indirect
|
||||
modernc.org/libc v1.41.0 // indirect
|
||||
modernc.org/libc v1.55.3 // indirect
|
||||
modernc.org/mathutil v1.6.0 // indirect
|
||||
modernc.org/memory v1.7.2 // indirect
|
||||
modernc.org/strutil v1.2.0 // indirect
|
||||
modernc.org/token v1.1.0 // indirect
|
||||
modernc.org/memory v1.8.0 // indirect
|
||||
nhooyr.io/websocket v1.8.7 // indirect
|
||||
)
|
||||
|
||||
go 1.22
|
||||
|
|
253
go.sum
253
go.sum
|
@ -1,6 +1,6 @@
|
|||
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
|
||||
github.com/Arceliar/ironwood v0.0.0-20240529054413-b8e59574e2b2 h1:SBdYBKeXYUUFef5wi2CMhYmXFVGiYaRpTvbki0Bu+JQ=
|
||||
github.com/Arceliar/ironwood v0.0.0-20240529054413-b8e59574e2b2/go.mod h1:6WP4799FX0OuWdENGQAh+0RXp9FLh0y7NZ7tM9cJyXk=
|
||||
github.com/Arceliar/ironwood v0.0.0-20241213013129-743fe2fccbd3 h1:d8N0z+udAnbU5PdjpLSNPTWlqeU/nnYsQ42B6+879aw=
|
||||
github.com/Arceliar/ironwood v0.0.0-20241213013129-743fe2fccbd3/go.mod h1:SrrElc3FFMpYCODSr11jWbLFeOM8WsY+DbDY/l2AXF0=
|
||||
github.com/Arceliar/phony v0.0.0-20220903101357-530938a4b13d h1:UK9fsWbWqwIQkMCz1CP+v5pGbsGoWAw6g4AyvMpm1EM=
|
||||
github.com/Arceliar/phony v0.0.0-20220903101357-530938a4b13d/go.mod h1:BCnxhRf47C/dy/e/D2pmB8NkB3dQVIrkD98b220rx5Q=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8=
|
||||
|
@ -16,33 +16,33 @@ github.com/Masterminds/semver/v3 v3.1.1 h1:hLg3sBzpNErnxhQtUy/mmLR2I9foDujNK030I
|
|||
github.com/Masterminds/semver/v3 v3.1.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs=
|
||||
github.com/Microsoft/go-winio v0.5.2 h1:a9IhgEQBCUEk6QCdml9CiJGhAws+YwffDHEMp1VMrpA=
|
||||
github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY=
|
||||
github.com/RoaringBitmap/roaring v1.2.3 h1:yqreLINqIrX22ErkKI0vY47/ivtJr6n+kMhVOVmhWBY=
|
||||
github.com/RoaringBitmap/roaring v1.2.3/go.mod h1:plvDsJQpxOC5bw8LRteu/MLWHsHez/3y6cubLI4/1yE=
|
||||
github.com/RoaringBitmap/roaring v1.9.3 h1:t4EbC5qQwnisr5PrP9nt0IRhRTb9gMUgQF4t4S2OByM=
|
||||
github.com/RoaringBitmap/roaring v1.9.3/go.mod h1:6AXUsoIEzDTFFQCe1RbGA6uFONMhvejWj5rqITANK90=
|
||||
github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw=
|
||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||
github.com/bits-and-blooms/bitset v1.2.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA=
|
||||
github.com/bits-and-blooms/bitset v1.10.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8=
|
||||
github.com/bits-and-blooms/bitset v1.12.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8=
|
||||
github.com/bits-and-blooms/bitset v1.13.0 h1:bAQ9OPNFYbGHV6Nez0tmNI0RiEu7/hxlYJRUA0wFAVE=
|
||||
github.com/bits-and-blooms/bitset v1.13.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8=
|
||||
github.com/bits-and-blooms/bloom/v3 v3.7.0 h1:VfknkqV4xI+PsaDIsoHueyxVDZrfvMn56jeWUzvzdls=
|
||||
github.com/bits-and-blooms/bloom/v3 v3.7.0/go.mod h1:VKlUSvp0lFIYqxJjzdnSsZEw4iHb1kOL2tfHTgyJBHg=
|
||||
github.com/blevesearch/bleve/v2 v2.4.0 h1:2xyg+Wv60CFHYccXc+moGxbL+8QKT/dZK09AewHgKsg=
|
||||
github.com/blevesearch/bleve/v2 v2.4.0/go.mod h1:IhQHoFAbHgWKYavb9rQgQEJJVMuY99cKdQ0wPpst2aY=
|
||||
github.com/blevesearch/bleve_index_api v1.1.6 h1:orkqDFCBuNU2oHW9hN2YEJmet+TE9orml3FCGbl1cKk=
|
||||
github.com/blevesearch/bleve_index_api v1.1.6/go.mod h1:PbcwjIcRmjhGbkS/lJCpfgVSMROV6TRubGGAODaK1W8=
|
||||
github.com/blevesearch/bleve/v2 v2.4.4 h1:RwwLGjUm54SwyyykbrZs4vc1qjzYic4ZnAnY9TwNl60=
|
||||
github.com/blevesearch/bleve/v2 v2.4.4/go.mod h1:fa2Eo6DP7JR+dMFpQe+WiZXINKSunh7WBtlDGbolKXk=
|
||||
github.com/blevesearch/bleve_index_api v1.1.12 h1:P4bw9/G/5rulOF7SJ9l4FsDoo7UFJ+5kexNy1RXfegY=
|
||||
github.com/blevesearch/bleve_index_api v1.1.12/go.mod h1:PbcwjIcRmjhGbkS/lJCpfgVSMROV6TRubGGAODaK1W8=
|
||||
github.com/blevesearch/geo v0.1.20 h1:paaSpu2Ewh/tn5DKn/FB5SzvH0EWupxHEIwbCk/QPqM=
|
||||
github.com/blevesearch/geo v0.1.20/go.mod h1:DVG2QjwHNMFmjo+ZgzrIq2sfCh6rIHzy9d9d0B59I6w=
|
||||
github.com/blevesearch/go-faiss v1.0.13 h1:zfFs7ZYD0NqXVSY37j0JZjZT1BhE9AE4peJfcx/NB4A=
|
||||
github.com/blevesearch/go-faiss v1.0.13/go.mod h1:jrxHrbl42X/RnDPI+wBoZU8joxxuRwedrxqswQ3xfU8=
|
||||
github.com/blevesearch/go-faiss v1.0.24 h1:K79IvKjoKHdi7FdiXEsAhxpMuns0x4fM0BO93bW5jLI=
|
||||
github.com/blevesearch/go-faiss v1.0.24/go.mod h1:OMGQwOaRRYxrmeNdMrXJPvVx8gBnvE5RYrr0BahNnkk=
|
||||
github.com/blevesearch/go-porterstemmer v1.0.3 h1:GtmsqID0aZdCSNiY8SkuPJ12pD4jI+DdXTAn4YRcHCo=
|
||||
github.com/blevesearch/go-porterstemmer v1.0.3/go.mod h1:angGc5Ht+k2xhJdZi511LtmxuEf0OVpvUUNrwmM1P7M=
|
||||
github.com/blevesearch/gtreap v0.1.1 h1:2JWigFrzDMR+42WGIN/V2p0cUvn4UP3C4Q5nmaZGW8Y=
|
||||
github.com/blevesearch/gtreap v0.1.1/go.mod h1:QaQyDRAT51sotthUWAH4Sj08awFSSWzgYICSZ3w0tYk=
|
||||
github.com/blevesearch/mmap-go v1.0.4 h1:OVhDhT5B/M1HNPpYPBKIEJaD0F3Si+CrEKULGCDPWmc=
|
||||
github.com/blevesearch/mmap-go v1.0.4/go.mod h1:EWmEAOmdAS9z/pi/+Toxu99DnsbhG1TIxUoRmJw/pSs=
|
||||
github.com/blevesearch/scorch_segment_api/v2 v2.2.9 h1:3nBaSBRFokjE4FtPW3eUDgcAu3KphBg1GP07zy/6Uyk=
|
||||
github.com/blevesearch/scorch_segment_api/v2 v2.2.9/go.mod h1:ckbeb7knyOOvAdZinn/ASbB7EA3HoagnJkmEV3J7+sg=
|
||||
github.com/blevesearch/scorch_segment_api/v2 v2.2.16 h1:uGvKVvG7zvSxCwcm4/ehBa9cCEuZVE+/zvrSl57QUVY=
|
||||
github.com/blevesearch/scorch_segment_api/v2 v2.2.16/go.mod h1:VF5oHVbIFTu+znY1v30GjSpT5+9YFs9dV2hjvuh34F0=
|
||||
github.com/blevesearch/segment v0.9.1 h1:+dThDy+Lvgj5JMxhmOVlgFfkUtZV2kw49xax4+jTfSU=
|
||||
github.com/blevesearch/segment v0.9.1/go.mod h1:zN21iLm7+GnBHWTao9I+Au/7MBiL8pPFtJBJTsk6kQw=
|
||||
github.com/blevesearch/snowballstem v0.9.0 h1:lMQ189YspGP6sXvZQ4WZ+MLawfV8wOmPoD/iWeNXm8s=
|
||||
|
@ -59,17 +59,18 @@ github.com/blevesearch/zapx/v13 v13.3.10 h1:0KY9tuxg06rXxOZHg3DwPJBjniSlqEgVpxIq
|
|||
github.com/blevesearch/zapx/v13 v13.3.10/go.mod h1:w2wjSDQ/WBVeEIvP0fvMJZAzDwqwIEzVPnCPrz93yAk=
|
||||
github.com/blevesearch/zapx/v14 v14.3.10 h1:SG6xlsL+W6YjhX5N3aEiL/2tcWh3DO75Bnz77pSwwKU=
|
||||
github.com/blevesearch/zapx/v14 v14.3.10/go.mod h1:qqyuR0u230jN1yMmE4FIAuCxmahRQEOehF78m6oTgns=
|
||||
github.com/blevesearch/zapx/v15 v15.3.13 h1:6EkfaZiPlAxqXz0neniq35my6S48QI94W/wyhnpDHHQ=
|
||||
github.com/blevesearch/zapx/v15 v15.3.13/go.mod h1:Turk/TNRKj9es7ZpKK95PS7f6D44Y7fAFy8F4LXQtGg=
|
||||
github.com/blevesearch/zapx/v16 v16.0.12 h1:Uccxvjmn+hQ6ywQP+wIiTpdq9LnAviGoryJOmGwAo/I=
|
||||
github.com/blevesearch/zapx/v16 v16.0.12/go.mod h1:MYnOshRfSm4C4drxx1LGRI+MVFByykJ2anDY1fxdk9Q=
|
||||
github.com/blevesearch/zapx/v15 v15.3.16 h1:Ct3rv7FUJPfPk99TI/OofdC+Kpb4IdyfdMH48sb+FmE=
|
||||
github.com/blevesearch/zapx/v15 v15.3.16/go.mod h1:Turk/TNRKj9es7ZpKK95PS7f6D44Y7fAFy8F4LXQtGg=
|
||||
github.com/blevesearch/zapx/v16 v16.1.9-0.20241217210638-a0519e7caf3b h1:ju9Az5YgrzCeK3M1QwvZIpxYhChkXp7/L0RhDYsxXoE=
|
||||
github.com/blevesearch/zapx/v16 v16.1.9-0.20241217210638-a0519e7caf3b/go.mod h1:BlrYNpOu4BvVRslmIG+rLtKhmjIaRhIbG8sb9scGTwI=
|
||||
github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8=
|
||||
github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
|
||||
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
|
||||
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
|
||||
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/codeclysm/extract v2.2.0+incompatible h1:q3wyckoA30bhUSiwdQezMqVhwd8+WGE64/GL//LtUhI=
|
||||
github.com/codeclysm/extract v2.2.0+incompatible/go.mod h1:2nhFMPHiU9At61hz+12bfrlpXSUrOnK+wR+KlGO4Uks=
|
||||
github.com/coder/websocket v1.8.12 h1:5bUXkEPPIbewrnkU8LTCLVaxi4N4J8ahufH2vlo4NAo=
|
||||
github.com/coder/websocket v1.8.12/go.mod h1:LNVeNrXQZfe5qhS9ALED3uA+l5pPqvwXg3CKoDBB2gs=
|
||||
github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I=
|
||||
github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo=
|
||||
github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
|
||||
|
@ -79,19 +80,18 @@ github.com/cretz/bine v0.2.0/go.mod h1:WU4o9QR9wWp8AVKtTM1XD5vUHkEqnf2vVSo6dBqbe
|
|||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/dgraph-io/ristretto v0.1.1 h1:6CWw5tJNgpegArSHpNHJKldNeq03FQCwYvfMVWajOK8=
|
||||
github.com/dgraph-io/ristretto v0.1.1/go.mod h1:S1GPSBCYCIhmVNfcth17y2zZtQT6wzkzgwUve0VDWWA=
|
||||
github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2 h1:tdlZCpZ/P9DhczCTSixgIKmwPv6+wP5DGjqLYw5SUiA=
|
||||
github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw=
|
||||
github.com/dgraph-io/ristretto v0.2.0 h1:XAfl+7cmoUDWW/2Lx8TGZQjjxIQ2Ley9DSf52dru4WE=
|
||||
github.com/dgraph-io/ristretto v0.2.0/go.mod h1:8uBHCU/PBV4Ag0CJrP47b9Ofby5dqWNh4FicAdoqFNU=
|
||||
github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13 h1:fAjc9m62+UWV/WAFKLNi6ZS0675eEUC9y3AlwSbQu1Y=
|
||||
github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw=
|
||||
github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk=
|
||||
github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
|
||||
github.com/docker/docker v25.0.6+incompatible h1:5cPwbwriIcsua2REJe8HqQV+6WlWc1byg2QSXzBxBGg=
|
||||
github.com/docker/docker v25.0.6+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
|
||||
github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
|
||||
github.com/docker/docker v26.1.5+incompatible h1:NEAxTwEjxV6VbBMBoGG3zPqbiJosIApZjxlbrG9q3/g=
|
||||
github.com/docker/docker v26.1.5+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c=
|
||||
github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc=
|
||||
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
|
||||
github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
|
||||
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
|
||||
github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
|
||||
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
|
||||
github.com/eyedeekay/goSam v0.32.54 h1:Uq1F9rePGi5aiHZ8J8ZC0HRpf4hvTUR+PJvmcCBpmWU=
|
||||
|
@ -149,12 +149,15 @@ github.com/go-playground/validator/v10 v10.11.1/go.mod h1:i+3WkQ1FvaUjjxh1kSvIA4
|
|||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI=
|
||||
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls=
|
||||
github.com/gobwas/httphead v0.0.0-20180130184737-2c6c146eadee h1:s+21KNqlpePfkah2I+gwHF8xmJWRjooY+5248k6m4A0=
|
||||
github.com/gobwas/httphead v0.0.0-20180130184737-2c6c146eadee/go.mod h1:L0fX3K22YWvt/FAX9NnzrNzcI4wNYi9Yku4O0LKYflo=
|
||||
github.com/gobwas/pool v0.2.0 h1:QEmUOlnSjWtnpRGHF3SauEiOsy82Cup83Vf2LcMlnc8=
|
||||
github.com/gobwas/httphead v0.1.0 h1:exrUm0f4YX0L7EBwZHuCF4GDp8aJfVeBrlLQrs6NqWU=
|
||||
github.com/gobwas/httphead v0.1.0/go.mod h1:O/RXo79gxV8G+RqlR/otEwx4Q36zl9rqC5u12GKvMCM=
|
||||
github.com/gobwas/pool v0.2.0/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw=
|
||||
github.com/gobwas/ws v1.0.2 h1:CoAavW/wd/kulfZmSIBt6p24n4j7tHgNVCjsfHVNUbo=
|
||||
github.com/gobwas/pool v0.2.1 h1:xfeeEhW7pwmX8nuLVlqbzVc7udMDrwetjEv+TZIz1og=
|
||||
github.com/gobwas/pool v0.2.1/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw=
|
||||
github.com/gobwas/ws v1.0.2/go.mod h1:szmBTxLgaFppYjEmNtny/v3w89xOydFnnZMcgRRu/EM=
|
||||
github.com/gobwas/ws v1.2.1 h1:F2aeBZrm2NDsc7vbovKrWSogd4wvfAxg0FQ89/iqOTk=
|
||||
github.com/gobwas/ws v1.2.1/go.mod h1:hRKAFb8wOxFROYNsT1bqfWnhX+b5MFeJM9r2ZSwg/KY=
|
||||
github.com/goccy/go-json v0.9.11 h1:/pAaQDLHEoCq/5FFmSKBswWmK6H0e8g4159Kc/X/nqk=
|
||||
github.com/goccy/go-json v0.9.11/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I=
|
||||
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
|
||||
|
@ -163,9 +166,6 @@ github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69
|
|||
github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k=
|
||||
github.com/golang/geo v0.0.0-20210211234256-740aa86cb551 h1:gtexQ/VGyN+VVFRXSFiguSNcXmS6rkKT+X7FdIrTtfo=
|
||||
github.com/golang/geo v0.0.0-20210211234256-740aa86cb551/go.mod h1:QZ0nwyI2jOfgRAoBvP+ab5aRr7c9x7lhGEJrKvBwjWI=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
github.com/golang/glog v1.0.0 h1:nfP3RFugxnNRyKgeWd4oI1nYvXpxrx8ck8ZrcizshdQ=
|
||||
github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4=
|
||||
github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
|
||||
github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk=
|
||||
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
|
||||
|
@ -182,23 +182,21 @@ github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
|
|||
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
|
||||
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/pprof v0.0.0-20230808223545-4887780b67fb h1:oqpb3Cwpc7EOml5PVGMYbSGmwNui2R7i8IW83gs4W0c=
|
||||
github.com/google/pprof v0.0.0-20230808223545-4887780b67fb/go.mod h1:Jh3hGz2jkYak8qXPD19ryItVnUgpgeqzdkY/D0EaeuA=
|
||||
github.com/google/pprof v0.0.0-20240409012703-83162a5b38cd h1:gbpYu9NMq8jhDVbvlGkMFWCjLFlqqEZjEmObmhUy6Vo=
|
||||
github.com/google/pprof v0.0.0-20240409012703-83162a5b38cd/go.mod h1:kf6iHlnVGwgKolg33glAes7Yg/8iWP8ukqeldJSO7jw=
|
||||
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI=
|
||||
github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
|
||||
github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc=
|
||||
github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg=
|
||||
github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.23.0 h1:ad0vkEBuk23VJzZR9nkLVG0YAoN9coASF1GusYX6AlU=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.23.0/go.mod h1:igFoXX2ELCW06bol23DWPB5BEWfZISOzSP5K2sbLea0=
|
||||
github.com/h2non/filetype v1.1.3 h1:FKkx9QbD7HR/zjK1Ia5XiBsq9zdLi5Kf3zGyFTAFkGg=
|
||||
github.com/h2non/filetype v1.1.3/go.mod h1:319b3zT68BvV+WRj7cwy856M2ehB3HqNOt6sy1HndBY=
|
||||
github.com/h2non/parth v0.0.0-20190131123155-b4df798d6542 h1:2VTzZjLZBgl62/EtslCrtky5vbi9dd7HrQPQIx6wqiw=
|
||||
github.com/h2non/parth v0.0.0-20190131123155-b4df798d6542/go.mod h1:Ow0tF8D4Kplbc8s8sSb3V2oUCygFHVp8gC3Dn6U4MNI=
|
||||
github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k=
|
||||
github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM=
|
||||
github.com/hjson/hjson-go/v4 v4.4.0 h1:D/NPvqOCH6/eisTb5/ztuIS8GUvmpHaLOcNk1Bjr298=
|
||||
github.com/hjson/hjson-go/v4 v4.4.0/go.mod h1:KaYt3bTw3zhBjYqnXkYywcYctk0A2nxeEFTse3rH13E=
|
||||
github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||
|
@ -212,8 +210,8 @@ github.com/kardianos/minwinsvc v1.0.2/go.mod h1:LUZNYhNmxujx2tR7FbdxqYJ9XDDoCd3M
|
|||
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/klauspost/compress v1.10.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
|
||||
github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA=
|
||||
github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw=
|
||||
github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc=
|
||||
github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0=
|
||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
|
||||
|
@ -221,6 +219,8 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
|||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
|
||||
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
|
||||
github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII=
|
||||
github.com/leodido/go-urn v1.2.1 h1:BqpAaACuzVSgi/VLzGZIobT2z4v53pjosyNd9Yv6n/w=
|
||||
github.com/leodido/go-urn v1.2.1/go.mod h1:zt4jvISO2HfUBqxjfIshjdMTYS56ZS/qv49ictyFfxY=
|
||||
|
@ -232,8 +232,8 @@ github.com/matrix-org/go-sqlite3-js v0.0.0-20220419092513-28aa791a1c91 h1:s7fexw
|
|||
github.com/matrix-org/go-sqlite3-js v0.0.0-20220419092513-28aa791a1c91/go.mod h1:e+cg2q7C7yE5QnAXgzo512tgFh1RbQLC0+jozuegKgo=
|
||||
github.com/matrix-org/gomatrix v0.0.0-20220926102614-ceba4d9f7530 h1:kHKxCOLcHH8r4Fzarl4+Y3K5hjothkVW5z7T1dUM11U=
|
||||
github.com/matrix-org/gomatrix v0.0.0-20220926102614-ceba4d9f7530/go.mod h1:/gBX06Kw0exX1HrwmoBibFA98yBk/jxKpGVeyQbff+s=
|
||||
github.com/matrix-org/gomatrixserverlib v0.0.0-20240910190622-2c764912ce93 h1:FbyZ/xkeBVYHi2xfwAVaNmDhP+4HNbt9e6ucOR+jvBk=
|
||||
github.com/matrix-org/gomatrixserverlib v0.0.0-20240910190622-2c764912ce93/go.mod h1:HZGsVJ3bUE+DkZtufkH9H0mlsvbhEGK5CpX0Zlavylg=
|
||||
github.com/matrix-org/gomatrixserverlib v0.0.0-20250116181547-c4f1e01eab0d h1:c3Dkci0GDH/6cGGt8zGIiJMP+UOdtX0DPY6dxiJvtZM=
|
||||
github.com/matrix-org/gomatrixserverlib v0.0.0-20250116181547-c4f1e01eab0d/go.mod h1:qil34SWn6VB6gO5312rzziCUcZtgROPjrLE+4ly/0os=
|
||||
github.com/matrix-org/pinecone v0.11.1-0.20230810010612-ea4c33717fd7 h1:6t8kJr8i1/1I5nNttw6nn1ryQJgzVlBmSGgPiiaTdw4=
|
||||
github.com/matrix-org/pinecone v0.11.1-0.20230810010612-ea4c33717fd7/go.mod h1:ReWMS/LoVnOiRAdq9sNUC2NZnd1mZkMNB52QhpTRWjg=
|
||||
github.com/matrix-org/util v0.0.0-20221111132719-399730281e66 h1:6z4KxomXSIGWqhHcfzExgkH3Z3UkIXry4ibJS4Aqz2Y=
|
||||
|
@ -244,14 +244,16 @@ github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovk
|
|||
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
|
||||
github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94=
|
||||
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
|
||||
github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA=
|
||||
github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||
github.com/mattn/go-sqlite3 v1.14.22 h1:2gZY6PC6kBnID23Tichd1K+Z0oS6nE/XwU+Vz/5o4kU=
|
||||
github.com/mattn/go-sqlite3 v1.14.22/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y=
|
||||
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
|
||||
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||
github.com/mattn/go-sqlite3 v1.14.24 h1:tpSp2G2KyMnnQu99ngJ47EIkWVmliIizyZBfPrBWDRM=
|
||||
github.com/mattn/go-sqlite3 v1.14.24/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y=
|
||||
github.com/miekg/dns v1.1.50 h1:DQUfb9uc6smULcREF09Uc+/Gd46YWqJd5DbpPE9xkcA=
|
||||
github.com/miekg/dns v1.1.50/go.mod h1:e3IlAVfNqAllflbibAZEWOXOQ+Ynzk/dDozDxY7XnME=
|
||||
github.com/minio/highwayhash v1.0.3 h1:kbnuUMoHYyVl7szWjSxJnxw11k2U709jqFPPmIUyD6Q=
|
||||
github.com/minio/highwayhash v1.0.3/go.mod h1:GGYsuwP/fPD6Y9hMiXuapVvlIUEhFhMTh0rxU3ik1LQ=
|
||||
github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0=
|
||||
github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo=
|
||||
github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0=
|
||||
github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y=
|
||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
|
@ -264,14 +266,16 @@ github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A=
|
|||
github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc=
|
||||
github.com/mschoch/smat v0.2.0 h1:8imxQsjDm8yFEAVBe7azKmKSgzSkZXDuKkSq9374khM=
|
||||
github.com/mschoch/smat v0.2.0/go.mod h1:kc9mz7DoBKqDyiRL7VZN8KvXQMWeTaVnttLRXOlotKw=
|
||||
github.com/nats-io/jwt/v2 v2.5.8 h1:uvdSzwWiEGWGXf+0Q+70qv6AQdvcvxrv9hPM0RiPamE=
|
||||
github.com/nats-io/jwt/v2 v2.5.8/go.mod h1:ZdWS1nZa6WMZfFwwgpEaqBV8EPGVgOTDHN/wTbz0Y5A=
|
||||
github.com/nats-io/nats-server/v2 v2.10.20 h1:CXDTYNHeBiAKBTAIP2gjpgbWap2GhATnTLgP8etyvEI=
|
||||
github.com/nats-io/nats-server/v2 v2.10.20/go.mod h1:hgcPnoUtMfxz1qVOvLZGurVypQ+Cg6GXVXjG53iHk+M=
|
||||
github.com/nats-io/nats.go v1.36.0 h1:suEUPuWzTSse/XhESwqLxXGuj8vGRuPRoG7MoRN/qyU=
|
||||
github.com/nats-io/nats.go v1.36.0/go.mod h1:Ubdu4Nh9exXdSz0RVWRFBbRfrbSxOYd26oF0wkWclB8=
|
||||
github.com/nats-io/nkeys v0.4.7 h1:RwNJbbIdYCoClSDNY7QVKZlyb/wfT6ugvFCiKy6vDvI=
|
||||
github.com/nats-io/nkeys v0.4.7/go.mod h1:kqXRgRDPlGy7nGaEDMuYzmiJCIAAWDK0IMBtDmGD0nc=
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
|
||||
github.com/nats-io/jwt/v2 v2.7.3 h1:6bNPK+FXgBeAqdj4cYQ0F8ViHRbi7woQLq4W29nUAzE=
|
||||
github.com/nats-io/jwt/v2 v2.7.3/go.mod h1:GvkcbHhKquj3pkioy5put1wvPxs78UlZ7D/pY+BgZk4=
|
||||
github.com/nats-io/nats-server/v2 v2.10.25 h1:J0GWLDDXo5HId7ti/lTmBfs+lzhmu8RPkoKl0eSCqwc=
|
||||
github.com/nats-io/nats-server/v2 v2.10.25/go.mod h1:/YYYQO7cuoOBt+A7/8cVjuhWTaTUEAlZbJT+3sMAfFU=
|
||||
github.com/nats-io/nats.go v1.38.0 h1:A7P+g7Wjp4/NWqDOOP/K6hfhr54DvdDQUznt5JFg9XA=
|
||||
github.com/nats-io/nats.go v1.38.0/go.mod h1:IGUM++TwokGnXPs82/wCuiHS02/aKrdYUQkU8If6yjw=
|
||||
github.com/nats-io/nkeys v0.4.9 h1:qe9Faq2Gxwi6RZnZMXfmGMZkg3afLLOtrU+gDZJ35b0=
|
||||
github.com/nats-io/nkeys v0.4.9/go.mod h1:jcMqs+FLG+W5YO36OX6wFIFcmpdAns+w1Wm6D3I/evE=
|
||||
github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw=
|
||||
github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c=
|
||||
github.com/ncruces/go-strftime v0.1.9 h1:bY0MQC28UADQmHmaF5dgpLmImcShSi2kHU9XLdhx/f4=
|
||||
|
@ -300,16 +304,16 @@ github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
|||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE=
|
||||
github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho=
|
||||
github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw=
|
||||
github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI=
|
||||
github.com/prometheus/common v0.48.0 h1:QO8U2CdOzSn1BBsmXJXduaaW+dY/5QLjfB8svtSzKKE=
|
||||
github.com/prometheus/common v0.48.0/go.mod h1:0/KsvlIEfPQCQ5I2iNSAWKPZziNCvRs5EC6ILDTlAPc=
|
||||
github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo=
|
||||
github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo=
|
||||
github.com/quic-go/quic-go v0.45.2 h1:DfqBmqjb4ExSdxRIb/+qXhPC+7k6+DUNZha4oeiC9fY=
|
||||
github.com/quic-go/quic-go v0.45.2/go.mod h1:1dLehS7TIR64+vxGR70GDcatWTOtMX2PUtnKsjbTurI=
|
||||
github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+bR9r+8l63Y=
|
||||
github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE=
|
||||
github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
|
||||
github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
|
||||
github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc=
|
||||
github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8=
|
||||
github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=
|
||||
github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
|
||||
github.com/quic-go/quic-go v0.48.2 h1:wsKXZPeGWpMpCGSWqOcqpW2wZYic/8T3aqiOID0/KWE=
|
||||
github.com/quic-go/quic-go v0.48.2/go.mod h1:yBgs3rWBOADpga7F+jJsb6Ybg1LSYiQvwWlLX+/6HMs=
|
||||
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE=
|
||||
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
|
||||
github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII=
|
||||
|
@ -329,8 +333,8 @@ github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81P
|
|||
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
|
||||
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
|
||||
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
|
||||
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/tidwall/gjson v1.14.2/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk=
|
||||
github.com/tidwall/gjson v1.18.0 h1:FIDeeyB800efLX89e5a8Y0BNH+LOngJyGrIWxG2FKQY=
|
||||
github.com/tidwall/gjson v1.18.0/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk=
|
||||
|
@ -352,13 +356,14 @@ github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVM
|
|||
github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY=
|
||||
github.com/ugorji/go/codec v1.2.7 h1:YPXUKf7fYbp/y8xloBqZOw2qaVggbfwMlI8WM3wZUJ0=
|
||||
github.com/ugorji/go/codec v1.2.7/go.mod h1:WGN1fab3R1fzQlVQTkfxVtIBhWDRqOviHU95kRgeqEY=
|
||||
github.com/yggdrasil-network/yggdrasil-go v0.5.6 h1:thh5YQYXQgkhkSO6v2D9Ya9fLHXfY38VfsCTZTIbIeI=
|
||||
github.com/yggdrasil-network/yggdrasil-go v0.5.6/go.mod h1:WAqMZ4e1QSf/EKbzfD77XXTSAIRO/0nWKCVpHsKLg40=
|
||||
github.com/yggdrasil-network/yggquic v0.0.0-20240802104827-b4e97a928967 h1:IxtZy4a4ZFYc1OiEv1VUc8u4Xl1WF6986wfu1DbY/SI=
|
||||
github.com/yggdrasil-network/yggquic v0.0.0-20240802104827-b4e97a928967/go.mod h1:RVLAuYojgYebPO/fJwWRSVlzKLXbZzZpWAStnBwiSsk=
|
||||
github.com/wlynxg/anet v0.0.5 h1:J3VJGi1gvo0JwZ/P1/Yc/8p63SoW98B5dHkYDmpgvvU=
|
||||
github.com/wlynxg/anet v0.0.5/go.mod h1:eay5PRQr7fIVAMbTbchTnO9gG65Hg/uYGdc7mguHxoA=
|
||||
github.com/yggdrasil-network/yggdrasil-go v0.5.12 h1:SaQ8d59JP+uFy+nOWXTx1ETM5r2uCfe1Gt/d+IodHJw=
|
||||
github.com/yggdrasil-network/yggdrasil-go v0.5.12/go.mod h1:u4DU6dpTfWmVs8r0WjW1T3UpGyeUh9vRrS8zngvncwM=
|
||||
github.com/yggdrasil-network/yggquic v0.0.0-20241212194307-0d495106021f h1:nqinj7N9gyDNKvSAoQK8OTg1RnEE5Bu/01oaC1TMT1o=
|
||||
github.com/yggdrasil-network/yggquic v0.0.0-20241212194307-0d495106021f/go.mod h1:TVCKOUWiXR9cAqr3eDpKvXkVkTph38xwk0wjcvfrtKI=
|
||||
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
|
||||
go.etcd.io/bbolt v1.3.7 h1:j+zJOnnEjF/kyHlDDgGnVL/AIqIJPq8UoB2GSNfkUfQ=
|
||||
go.etcd.io/bbolt v1.3.7/go.mod h1:N9Mkw9X8x5fupy0IKsmuqVtoGDyxsaDlbk4Rd05IAQw=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 h1:4K4tsIXefpVJtvA/8srF4V4y0akAoPHkIslgAkjixJA=
|
||||
|
@ -387,18 +392,15 @@ golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8U
|
|||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8=
|
||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
|
||||
golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8=
|
||||
golang.org/x/crypto v0.28.0 h1:GBDwsMXVQi34v5CCYUm2jkJvu4cbtru2U4TN2PSyQnw=
|
||||
golang.org/x/crypto v0.28.0/go.mod h1:rmgy+3RHxRZMyY0jjAJShp2zgEdOqj2AO7U0pYmeQ7U=
|
||||
golang.org/x/crypto v0.32.0 h1:euUpcYgM8WcP71gNpTqQCn6rC2t6ULUPiOzfWaXVVfc=
|
||||
golang.org/x/crypto v0.32.0/go.mod h1:ZnnJkOaASj8g0AjIduWNlq2NRxL0PlBrbKVyZ6V/Ugc=
|
||||
golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY=
|
||||
golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842 h1:vr/HnozRka3pE4EsMEg1lgkXJkTFJCVUX+S/ZT6wYzM=
|
||||
golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842/go.mod h1:XtvwrStGgqGPLc4cjQfWqZHG1YFdYs6swckp8vpsjnc=
|
||||
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0LeHDbnYEryqj5Q1ug8=
|
||||
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY=
|
||||
golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs=
|
||||
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
|
||||
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
|
||||
|
@ -410,29 +412,21 @@ golang.org/x/mobile v0.0.0-20240520174638-fa72addaaa1b/go.mod h1:EiXZlVfUTaAyySF
|
|||
golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
|
||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
||||
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
golang.org/x/mod v0.18.0 h1:5+9lSbEzPSdWkH32vYPBwEpX8KwDbM52Ud9xBUvNlb0=
|
||||
golang.org/x/mod v0.18.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||
golang.org/x/mod v0.19.0 h1:fEdghXQSo20giMthA7cd28ZC+jts4amQ3YMXiP5oMQ8=
|
||||
golang.org/x/mod v0.19.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
|
||||
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
|
||||
golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44=
|
||||
golang.org/x/net v0.30.0 h1:AcW1SDZMkb8IpzCdQUaIq2sP4sZ4zw+55h6ynffypl4=
|
||||
golang.org/x/net v0.30.0/go.mod h1:2wGyMJ5iFasEhkwi13ChkO/t1ECNC4X4eBKkVFyYFlU=
|
||||
golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I=
|
||||
golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.9.0 h1:fEo0HyrW1GIgZdpbhCRO0PkJajUS5H9IFUztCgEo2jQ=
|
||||
golang.org/x/sync v0.9.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ=
|
||||
golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
|
@ -440,47 +434,29 @@ golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7w
|
|||
golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20221010170243-090e33056c14/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.27.0 h1:wBqf8DvsY9Y/2P8gAfPDEYNuS30J4lPHJxXSb/nJZ+s=
|
||||
golang.org/x/sys v0.27.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU=
|
||||
golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
|
||||
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
|
||||
golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk=
|
||||
golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY=
|
||||
golang.org/x/term v0.25.0 h1:WtHI/ltw4NvSUig5KARz9h521QvRC8RmF/cuYqifU24=
|
||||
golang.org/x/term v0.25.0/go.mod h1:RPyXicDX+6vLxogjjRxjgD2TKtmAO6NZBsBRfrOLu7M=
|
||||
golang.org/x/term v0.28.0 h1:/Ts8HFuMR2E6IP/jlo7QVLZHggjKQbhu/7H0LJFr3Gg=
|
||||
golang.org/x/term v0.28.0/go.mod h1:Sw/lC2IAUZ92udQNf3WodGtn4k/XoLyZoh8v/8uiwek=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
|
||||
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||
golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||
golang.org/x/text v0.20.0 h1:gK/Kv2otX8gz+wn7Rmb3vT96ZwuoxnQlY+HlJVj7Qug=
|
||||
golang.org/x/text v0.20.0/go.mod h1:D4IsuqiFMhST5bX19pQ9ikHC2GsaKyk/oF+pn3ducp4=
|
||||
golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo=
|
||||
golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
|
||||
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.6.0 h1:eTDhh4ZXt5Qf0augr54TN6suAUudPcawVZeIAPU7D4U=
|
||||
golang.org/x/time v0.6.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
|
||||
golang.org/x/time v0.9.0 h1:EsRrnYcQiGH+5FfbgvV4AP7qEZstoyrHB0DzarOQ4ZY=
|
||||
golang.org/x/time v0.9.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
|
||||
golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
|
@ -488,11 +464,8 @@ golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtn
|
|||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
|
||||
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
||||
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
|
||||
golang.org/x/tools v0.22.0 h1:gqSGLZqv+AI9lIQzniJ0nZDRG5GBPsSi+DRNHWNz6yA=
|
||||
golang.org/x/tools v0.22.0/go.mod h1:aCwcsjqvq7Yqt6TNyX7QMU2enbQ/Gt0bo6krSeEri+c=
|
||||
golang.org/x/tools v0.23.0 h1:SGsXPZ+2l4JsgaCKkx+FQ9YZ5XEtA1GZYuoDjenLjvg=
|
||||
golang.org/x/tools v0.23.0/go.mod h1:pnu6ufv6vQkll6szChhK3C3L/ruaIv5eBeztNG8wtsI=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
|
@ -528,24 +501,32 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
|||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gotest.tools/v3 v3.4.0 h1:ZazjZUfuVeZGLAmlKKuyv3IKP5orXcwtOwDQH6YVr6o=
|
||||
gotest.tools/v3 v3.4.0/go.mod h1:CtbdzLSsqVhDgMtKsx03ird5YTGB3ar27v0u/yKBW5g=
|
||||
gotest.tools/v3 v3.5.1 h1:EENdUnS3pdur5nybKYIh2Vfgc8IUNBjxDPSjtiJcOzU=
|
||||
gotest.tools/v3 v3.5.1/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU=
|
||||
maunium.net/go/maulogger/v2 v2.4.1 h1:N7zSdd0mZkB2m2JtFUsiGTQQAdP0YeFWT7YMc80yAL8=
|
||||
maunium.net/go/maulogger/v2 v2.4.1/go.mod h1:omPuYwYBILeVQobz8uO3XC8DIRuEb5rXYlQSuqrbCho=
|
||||
maunium.net/go/mautrix v0.15.1 h1:pmCtMjYRpd83+2UL+KTRFYQo5to0373yulimvLK+1k0=
|
||||
maunium.net/go/mautrix v0.15.1/go.mod h1:icQIrvz2NldkRLTuzSGzmaeuMUmw+fzO7UVycPeauN8=
|
||||
modernc.org/cc/v4 v4.21.4 h1:3Be/Rdo1fpr8GrQ7IVw9OHtplU4gWbb+wNgeoBMmGLQ=
|
||||
modernc.org/cc/v4 v4.21.4/go.mod h1:HM7VJTZbUCR3rV8EYBi9wxnJ0ZBRiGE5OeGXNA0IsLQ=
|
||||
modernc.org/ccgo/v4 v4.19.2 h1:lwQZgvboKD0jBwdaeVCTouxhxAyN6iawF3STraAal8Y=
|
||||
modernc.org/ccgo/v4 v4.19.2/go.mod h1:ysS3mxiMV38XGRTTcgo0DQTeTmAO4oCmJl1nX9VFI3s=
|
||||
modernc.org/fileutil v1.3.0 h1:gQ5SIzK3H9kdfai/5x41oQiKValumqNTDXMvKo62HvE=
|
||||
modernc.org/fileutil v1.3.0/go.mod h1:XatxS8fZi3pS8/hKG2GH/ArUogfxjpEKs3Ku3aK4JyQ=
|
||||
modernc.org/gc/v3 v3.0.0-20240107210532-573471604cb6 h1:5D53IMaUuA5InSeMu9eJtlQXS2NxAhyWQvkKEgXZhHI=
|
||||
modernc.org/gc/v3 v3.0.0-20240107210532-573471604cb6/go.mod h1:Qz0X07sNOR1jWYCrJMEnbW/X55x206Q7Vt4mz6/wHp4=
|
||||
modernc.org/libc v1.41.0 h1:g9YAc6BkKlgORsUWj+JwqoB1wU3o4DE3bM3yvA3k+Gk=
|
||||
modernc.org/libc v1.41.0/go.mod h1:w0eszPsiXoOnoMJgrXjglgLuDy/bt5RR4y3QzUUeodY=
|
||||
modernc.org/gc/v2 v2.4.1 h1:9cNzOqPyMJBvrUipmynX0ZohMhcxPtMccYgGOJdOiBw=
|
||||
modernc.org/gc/v2 v2.4.1/go.mod h1:wzN5dK1AzVGoH6XOzc3YZ+ey/jPgYHLuVckd62P0GYU=
|
||||
modernc.org/libc v1.55.3 h1:AzcW1mhlPNrRtjS5sS+eW2ISCgSOLLNyFzRh/V3Qj/U=
|
||||
modernc.org/libc v1.55.3/go.mod h1:qFXepLhz+JjFThQ4kzwzOjA/y/artDeg+pcYnY+Q83w=
|
||||
modernc.org/mathutil v1.6.0 h1:fRe9+AmYlaej+64JsEEhoWuAYBkOtQiMEU7n/XgfYi4=
|
||||
modernc.org/mathutil v1.6.0/go.mod h1:Ui5Q9q1TR2gFm0AQRqQUaBWFLAhQpCwNcuhBOSedWPo=
|
||||
modernc.org/memory v1.7.2 h1:Klh90S215mmH8c9gO98QxQFsY+W451E8AnzjoE2ee1E=
|
||||
modernc.org/memory v1.7.2/go.mod h1:NO4NVCQy0N7ln+T9ngWqOQfi7ley4vpwvARR+Hjw95E=
|
||||
modernc.org/sqlite v1.29.5 h1:8l/SQKAjDtZFo9lkJLdk8g9JEOeYRG4/ghStDCCTiTE=
|
||||
modernc.org/sqlite v1.29.5/go.mod h1:S02dvcmm7TnTRvGhv8IGYyLnIt7AS2KPaB1F/71p75U=
|
||||
modernc.org/memory v1.8.0 h1:IqGTL6eFMaDZZhEWwcREgeMXYwmW83LYW8cROZYkg+E=
|
||||
modernc.org/memory v1.8.0/go.mod h1:XPZ936zp5OMKGWPqbD3JShgd/ZoQ7899TUuQqxY+peU=
|
||||
modernc.org/opt v0.1.3 h1:3XOZf2yznlhC+ibLltsDGzABUGVx8J6pnFMS3E4dcq4=
|
||||
modernc.org/opt v0.1.3/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0=
|
||||
modernc.org/sortutil v1.2.0 h1:jQiD3PfS2REGJNzNCMMaLSp/wdMNieTbKX920Cqdgqc=
|
||||
modernc.org/sortutil v1.2.0/go.mod h1:TKU2s7kJMf1AE84OoiGppNHJwvB753OYfNl2WRb++Ss=
|
||||
modernc.org/sqlite v1.34.5 h1:Bb6SR13/fjp15jt70CL4f18JIN7p7dnMExd+UFnF15g=
|
||||
modernc.org/sqlite v1.34.5/go.mod h1:YLuNmX9NKs8wRNK2ko1LW1NGYcc9FkBO69JOt1AR9JE=
|
||||
modernc.org/strutil v1.2.0 h1:agBi9dp1I+eOnxXeiZawM8F4LawKv4NzGWSaLfyeNZA=
|
||||
modernc.org/strutil v1.2.0/go.mod h1:/mdcBmfOibveCTBxUl5B5l6W+TTH1FXPLHZE6bTosX0=
|
||||
modernc.org/token v1.1.0 h1:Xl7Ap9dKaEs5kLoOQeQmPWevfnk/DM5qcLcYlA8ys6Y=
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
apiVersion: v2
|
||||
name: dendrite
|
||||
version: "0.14.6"
|
||||
appVersion: "0.13.8"
|
||||
version: "0.15.1"
|
||||
appVersion: "0.14.1"
|
||||
description: Dendrite Matrix Homeserver
|
||||
type: application
|
||||
icon: https://avatars.githubusercontent.com/u/8418310?s=48&v=4
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
|
||||
# dendrite
|
||||
|
||||
  
|
||||
  
|
||||
Dendrite Matrix Homeserver
|
||||
|
||||
Status: **NOT PRODUCTION READY**
|
||||
|
@ -59,11 +59,15 @@ Create a folder `appservices` and place your configurations in there. The confi
|
|||
| persistence.search.existingClaim | string | `""` | Use an existing volume claim for the fulltext search index |
|
||||
| persistence.search.capacity | string | `"1Gi"` | PVC Storage Request for the search volume |
|
||||
| persistence.search.storageClass | string | `nil` | The storage class to use for volume claims. Defaults to persistence.storageClass If defined, storageClassName: <storageClass> If set to "-", storageClassName: "", which disables dynamic provisioning If undefined (the default) or set to null, no storageClassName spec is set, choosing the default provisioner. (gp2 on AWS, standard on GKE, AWS & OpenStack) |
|
||||
| extraArgs | list | `[]` | Add additional arguments to the dendrite command |
|
||||
| extraVolumes | list | `[]` | Add additional volumes to the Dendrite Pod |
|
||||
| extraVolumeMounts | list | `[]` | Configure additional mount points volumes in the Dendrite Pod |
|
||||
| strategy.type | string | `"Recreate"` | Strategy to use for rolling updates (e.g. Recreate, RollingUpdate) If you are using ReadWriteOnce volumes, you should probably use Recreate |
|
||||
| strategy.rollingUpdate.maxUnavailable | string | `"25%"` | Maximum number of pods that can be unavailable during the update process |
|
||||
| strategy.rollingUpdate.maxSurge | string | `"25%"` | Maximum number of pods that can be scheduled above the desired number of pods |
|
||||
| nodeSelector | object | `{}` | Node selector configuration |
|
||||
| tolerations | object | `{}` | Tolerations configuration |
|
||||
| affinity | object | `{}` | Affinity configuration |
|
||||
| dendrite_config.version | int | `2` | |
|
||||
| dendrite_config.global.server_name | string | `""` | **REQUIRED** Servername for this Dendrite deployment. |
|
||||
| dendrite_config.global.private_key | string | `"/etc/dendrite/secrets/signing.key"` | The private key to use. (**NOTE**: This is overriden in Helm) |
|
||||
|
@ -190,5 +194,3 @@ grafana:
|
|||
```
|
||||
PS: The label `release=kube-prometheus-stack` is setup with the helmchart of the Prometheus Operator. For Grafana Dashboards it may be necessary to enable scanning in the correct namespaces (or ALL), enabled by `sidecar.dashboards.searchNamespace` in [Helmchart of grafana](https://artifacthub.io/packages/helm/grafana/grafana) (which is part of PrometheusOperator, so `grafana.sidecar.dashboards.searchNamespace`)
|
||||
|
||||
----------------------------------------------
|
||||
Autogenerated from chart metadata using [helm-docs v1.14.2](https://github.com/norwoodj/helm-docs/releases/v1.14.2)
|
||||
|
|
|
@ -123,7 +123,7 @@ func addPrevEventsToEvent(
|
|||
|
||||
builder.Depth = queryRes.Depth
|
||||
|
||||
authEvents := gomatrixserverlib.NewAuthEvents(nil)
|
||||
authEvents, _ := gomatrixserverlib.NewAuthEvents(nil)
|
||||
|
||||
for i := range queryRes.StateEvents {
|
||||
err := authEvents.AddEvent(queryRes.StateEvents[i].PDU)
|
||||
|
@ -132,7 +132,7 @@ func addPrevEventsToEvent(
|
|||
}
|
||||
}
|
||||
|
||||
refs, err := eventsNeeded.AuthEventReferences(&authEvents)
|
||||
refs, err := eventsNeeded.AuthEventReferences(authEvents)
|
||||
if err != nil {
|
||||
return fmt.Errorf("eventsNeeded.AuthEventReferences: %w", err)
|
||||
}
|
||||
|
|
|
@ -216,13 +216,17 @@ func (t *TxnReq) processEDUs(ctx context.Context) {
|
|||
util.GetLogger(ctx).WithError(err).Debug("Failed to unmarshal typing event")
|
||||
continue
|
||||
}
|
||||
if _, serverName, err := gomatrixserverlib.SplitID('@', typingPayload.UserID); err != nil {
|
||||
_, serverName, err := gomatrixserverlib.SplitID('@', typingPayload.UserID)
|
||||
if err != nil {
|
||||
continue
|
||||
} else if serverName == t.ourServerName {
|
||||
continue
|
||||
} else if serverName != t.Origin {
|
||||
continue
|
||||
}
|
||||
if api.IsServerBannedFromRoom(ctx, t.rsAPI, typingPayload.RoomID, serverName) {
|
||||
continue
|
||||
}
|
||||
if err := t.producer.SendTyping(ctx, typingPayload.UserID, typingPayload.RoomID, typingPayload.Typing, 30*1000); err != nil {
|
||||
util.GetLogger(ctx).WithError(err).Error("Failed to send typing event to JetStream")
|
||||
}
|
||||
|
@ -278,6 +282,9 @@ func (t *TxnReq) processEDUs(ctx context.Context) {
|
|||
util.GetLogger(ctx).Debugf("Dropping receipt event where sender domain (%q) doesn't match origin (%q)", domain, t.Origin)
|
||||
continue
|
||||
}
|
||||
if api.IsServerBannedFromRoom(ctx, t.rsAPI, roomID, domain) {
|
||||
continue
|
||||
}
|
||||
if err := t.processReceiptEvent(ctx, userID, roomID, "m.read", mread.Data.TS, mread.EventIDs); err != nil {
|
||||
util.GetLogger(ctx).WithError(err).WithFields(logrus.Fields{
|
||||
"sender": t.Origin,
|
||||
|
|
|
@ -17,8 +17,8 @@ var build string
|
|||
|
||||
const (
|
||||
VersionMajor = 0
|
||||
VersionMinor = 13
|
||||
VersionPatch = 8
|
||||
VersionMinor = 14
|
||||
VersionPatch = 1
|
||||
VersionTag = "" // example: "rc1"
|
||||
|
||||
gitRevLen = 7 // 7 matches the displayed characters on github.com
|
||||
|
|
|
@ -14,9 +14,9 @@ import (
|
|||
"regexp"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/element-hq/dendrite/roomserver/storage/tables"
|
||||
"github.com/matrix-org/gomatrixserverlib"
|
||||
"github.com/matrix-org/gomatrixserverlib/spec"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
@ -27,9 +27,8 @@ type ServerACLDatabase interface {
|
|||
// RoomsWithACLs returns all room IDs for rooms with ACLs
|
||||
RoomsWithACLs(ctx context.Context) ([]string, error)
|
||||
|
||||
// GetBulkStateContent returns all state events which match a given room ID and a given state key tuple. Both must be satisfied for a match.
|
||||
// If a tuple has the StateKey of '*' and allowWildcards=true then all state events with the EventType should be returned.
|
||||
GetBulkStateContent(ctx context.Context, roomIDs []string, tuples []gomatrixserverlib.StateKeyTuple, allowWildcards bool) ([]tables.StrippedEvent, error)
|
||||
// GetBulkStateACLs returns all server ACLs for the given rooms.
|
||||
GetBulkStateACLs(ctx context.Context, roomIDs []string) ([]tables.StrippedEvent, error)
|
||||
}
|
||||
|
||||
type ServerACLs struct {
|
||||
|
@ -40,6 +39,16 @@ type ServerACLs struct {
|
|||
}
|
||||
|
||||
func NewServerACLs(db ServerACLDatabase) *ServerACLs {
|
||||
// Add some logging, as this can take a while on larger instances.
|
||||
logrus.Infof("Loading server ACLs...")
|
||||
start := time.Now()
|
||||
aclCount := 0
|
||||
defer func() {
|
||||
logrus.WithFields(logrus.Fields{
|
||||
"duration": time.Since(start),
|
||||
"acls": aclCount,
|
||||
}).Info("Finished loading server ACLs")
|
||||
}()
|
||||
ctx := context.TODO()
|
||||
acls := &ServerACLs{
|
||||
acls: make(map[string]*serverACL),
|
||||
|
@ -48,20 +57,25 @@ func NewServerACLs(db ServerACLDatabase) *ServerACLs {
|
|||
aclRegexCache: make(map[string]**regexp.Regexp, 100),
|
||||
}
|
||||
|
||||
// Look up all of the rooms that the current state server knows about.
|
||||
// Look up all rooms with ACLs.
|
||||
rooms, err := db.RoomsWithACLs(ctx)
|
||||
if err != nil {
|
||||
logrus.WithError(err).Fatalf("Failed to get known rooms")
|
||||
}
|
||||
// For each room, let's see if we have a server ACL state event. If we
|
||||
// do then we'll process it into memory so that we have the regexes to
|
||||
// hand.
|
||||
|
||||
events, err := db.GetBulkStateContent(ctx, rooms, []gomatrixserverlib.StateKeyTuple{{EventType: MRoomServerACL, StateKey: ""}}, false)
|
||||
if err != nil {
|
||||
logrus.WithError(err).Errorf("Failed to get server ACLs for all rooms: %q", err)
|
||||
// No rooms with ACLs, don't bother hitting the DB again.
|
||||
if len(rooms) == 0 {
|
||||
return acls
|
||||
}
|
||||
|
||||
// Get ACLs for the required rooms, bail if we are unable to get them.
|
||||
events, err := db.GetBulkStateACLs(ctx, rooms)
|
||||
if err != nil {
|
||||
logrus.WithError(err).Fatal("Failed to get server ACLs for all rooms")
|
||||
}
|
||||
|
||||
aclCount = len(events)
|
||||
|
||||
for _, event := range events {
|
||||
acls.OnServerACLUpdate(event)
|
||||
}
|
||||
|
|
|
@ -12,7 +12,6 @@ import (
|
|||
"testing"
|
||||
|
||||
"github.com/element-hq/dendrite/roomserver/storage/tables"
|
||||
"github.com/matrix-org/gomatrixserverlib"
|
||||
"github.com/matrix-org/gomatrixserverlib/spec"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
@ -108,11 +107,11 @@ var (
|
|||
|
||||
type dummyACLDB struct{}
|
||||
|
||||
func (d dummyACLDB) RoomsWithACLs(ctx context.Context) ([]string, error) {
|
||||
func (d dummyACLDB) RoomsWithACLs(_ context.Context) ([]string, error) {
|
||||
return []string{"1", "2"}, nil
|
||||
}
|
||||
|
||||
func (d dummyACLDB) GetBulkStateContent(ctx context.Context, roomIDs []string, tuples []gomatrixserverlib.StateKeyTuple, allowWildcards bool) ([]tables.StrippedEvent, error) {
|
||||
func (d dummyACLDB) GetBulkStateACLs(_ context.Context, _ []string) ([]tables.StrippedEvent, error) {
|
||||
return []tables.StrippedEvent{
|
||||
{
|
||||
RoomID: "1",
|
||||
|
|
|
@ -205,9 +205,27 @@ func (r *Inputer) processRoomEvent(
|
|||
}
|
||||
}
|
||||
|
||||
// Check that the auth events of the event are known.
|
||||
// If they aren't then we will ask the federation API for them.
|
||||
authEvents, _ := gomatrixserverlib.NewAuthEvents(nil)
|
||||
knownEvents := map[string]*types.Event{}
|
||||
if err = r.fetchAuthEvents(ctx, logger, roomInfo, virtualHost, headered, authEvents, knownEvents, serverRes.ServerNames); err != nil {
|
||||
return fmt.Errorf("r.fetchAuthEvents: %w", err)
|
||||
}
|
||||
|
||||
isRejected := false
|
||||
var rejectionErr error
|
||||
|
||||
// Check if the event is allowed by its auth events. If it isn't then
|
||||
// we consider the event to be "rejected" — it will still be persisted.
|
||||
if err = gomatrixserverlib.Allowed(event, authEvents, func(roomID spec.RoomID, senderID spec.SenderID) (*spec.UserID, error) {
|
||||
return r.Queryer.QueryUserIDForSender(ctx, roomID, senderID)
|
||||
}); err != nil {
|
||||
isRejected = true
|
||||
rejectionErr = err
|
||||
logger.WithError(rejectionErr).Warnf("Event %s not allowed by auth events", event.EventID())
|
||||
}
|
||||
|
||||
// At this point we are checking whether we know all of the prev events, and
|
||||
// if we know the state before the prev events. This is necessary before we
|
||||
// try to do `calculateAndSetState` on the event later, otherwise it will fail
|
||||
|
@ -283,24 +301,6 @@ func (r *Inputer) processRoomEvent(
|
|||
}
|
||||
}
|
||||
|
||||
// Check that the auth events of the event are known.
|
||||
// If they aren't then we will ask the federation API for them.
|
||||
authEvents := gomatrixserverlib.NewAuthEvents(nil)
|
||||
knownEvents := map[string]*types.Event{}
|
||||
if err = r.fetchAuthEvents(ctx, logger, roomInfo, virtualHost, headered, &authEvents, knownEvents, serverRes.ServerNames); err != nil {
|
||||
return fmt.Errorf("r.fetchAuthEvents: %w", err)
|
||||
}
|
||||
|
||||
// Check if the event is allowed by its auth events. If it isn't then
|
||||
// we consider the event to be "rejected" — it will still be persisted.
|
||||
if err = gomatrixserverlib.Allowed(event, &authEvents, func(roomID spec.RoomID, senderID spec.SenderID) (*spec.UserID, error) {
|
||||
return r.Queryer.QueryUserIDForSender(ctx, roomID, senderID)
|
||||
}); err != nil {
|
||||
isRejected = true
|
||||
rejectionErr = err
|
||||
logger.WithError(rejectionErr).Warnf("Event %s not allowed by auth events", event.EventID())
|
||||
}
|
||||
|
||||
// Accumulate the auth event NIDs.
|
||||
authEventIDs := event.AuthEventIDs()
|
||||
authEventNIDs := make([]types.EventNID, 0, len(authEventIDs))
|
||||
|
@ -323,7 +323,7 @@ func (r *Inputer) processRoomEvent(
|
|||
)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
} else if !knownEvents[authEventID].Rejected {
|
||||
authEventNIDs = append(authEventNIDs, knownEvents[authEventID].EventNID)
|
||||
}
|
||||
}
|
||||
|
@ -584,7 +584,8 @@ func (r *Inputer) processStateBefore(
|
|||
case input.HasState:
|
||||
// If we're overriding the state then we need to go and retrieve
|
||||
// them from the database. It's a hard error if they are missing.
|
||||
stateEvents, err := r.DB.EventsFromIDs(ctx, roomInfo, input.StateEventIDs)
|
||||
var stateEvents []types.Event
|
||||
stateEvents, err = r.DB.EventsFromIDs(ctx, roomInfo, input.StateEventIDs)
|
||||
if err != nil {
|
||||
return "", nil, fmt.Errorf("r.DB.EventsFromIDs: %w", err)
|
||||
}
|
||||
|
@ -620,7 +621,7 @@ func (r *Inputer) processStateBefore(
|
|||
StateToFetch: tuplesNeeded,
|
||||
}
|
||||
stateBeforeRes := &api.QueryStateAfterEventsResponse{}
|
||||
if err := r.Queryer.QueryStateAfterEvents(ctx, stateBeforeReq, stateBeforeRes); err != nil {
|
||||
if err = r.Queryer.QueryStateAfterEvents(ctx, stateBeforeReq, stateBeforeRes); err != nil {
|
||||
return "", nil, fmt.Errorf("r.Queryer.QueryStateAfterEvents: %w", err)
|
||||
}
|
||||
switch {
|
||||
|
@ -640,10 +641,15 @@ func (r *Inputer) processStateBefore(
|
|||
// At this point, stateBeforeEvent should be populated either by
|
||||
// the supplied state in the input request, or from the prev events.
|
||||
// Check whether the event is allowed or not.
|
||||
stateBeforeAuth := gomatrixserverlib.NewAuthEvents(
|
||||
var stateBeforeAuth *gomatrixserverlib.AuthEvents
|
||||
stateBeforeAuth, err = gomatrixserverlib.NewAuthEvents(
|
||||
gomatrixserverlib.ToPDUs(stateBeforeEvent),
|
||||
)
|
||||
if rejectionErr = gomatrixserverlib.Allowed(event, &stateBeforeAuth, func(roomID spec.RoomID, senderID spec.SenderID) (*spec.UserID, error) {
|
||||
if err != nil {
|
||||
rejectionErr = fmt.Errorf("NewAuthEvents failed: %w", err)
|
||||
return
|
||||
}
|
||||
if rejectionErr = gomatrixserverlib.Allowed(event, stateBeforeAuth, func(roomID spec.RoomID, senderID spec.SenderID) (*spec.UserID, error) {
|
||||
return r.Queryer.QueryUserIDForSender(ctx, roomID, senderID)
|
||||
}); rejectionErr != nil {
|
||||
rejectionErr = fmt.Errorf("Allowed() failed for stateBeforeEvent: %w", rejectionErr)
|
||||
|
@ -698,15 +704,14 @@ func (r *Inputer) fetchAuthEvents(
|
|||
}
|
||||
ev := authEvents[0]
|
||||
|
||||
isRejected := false
|
||||
if roomInfo != nil {
|
||||
isRejected, err = r.DB.IsEventRejected(ctx, roomInfo.RoomNID, ev.EventID())
|
||||
ev.Rejected, err = r.DB.IsEventRejected(ctx, roomInfo.RoomNID, ev.EventID())
|
||||
if err != nil && !errors.Is(err, sql.ErrNoRows) {
|
||||
return fmt.Errorf("r.DB.IsEventRejected failed: %w", err)
|
||||
}
|
||||
}
|
||||
known[authEventID] = &ev // don't take the pointer of the iterated event
|
||||
if !isRejected {
|
||||
if !ev.Rejected {
|
||||
if err = auth.AddEvent(ev.PDU); err != nil {
|
||||
return fmt.Errorf("auth.AddEvent: %w", err)
|
||||
}
|
||||
|
@ -738,8 +743,13 @@ func (r *Inputer) fetchAuthEvents(
|
|||
return fmt.Errorf("no servers provided event auth for event ID %q, tried servers %v", event.EventID(), servers)
|
||||
}
|
||||
|
||||
// Start with a clean state and see if we can auth with what the remote
|
||||
// server told us. Otherwise earlier topologically sorted events could
|
||||
// fail to be authed by more recent referenced ones.
|
||||
auth.Clear()
|
||||
|
||||
// Reuse these to reduce allocations.
|
||||
authEventNIDs := make([]types.EventNID, 0, 5)
|
||||
_authEventNIDs := [5]types.EventNID{}
|
||||
isRejected := false
|
||||
nextAuthEvent:
|
||||
for _, authEvent := range gomatrixserverlib.ReverseTopologicalOrdering(
|
||||
|
@ -749,7 +759,11 @@ nextAuthEvent:
|
|||
// If we already know about this event from the database then we don't
|
||||
// need to store it again or do anything further with it, so just skip
|
||||
// over it rather than wasting cycles.
|
||||
if ev, ok := known[authEvent.EventID()]; ok && ev != nil {
|
||||
if ev, ok := known[authEvent.EventID()]; ok && ev != nil && !ev.Rejected {
|
||||
// Need to add to the auth set for the next event being processed.
|
||||
if err := auth.AddEvent(authEvent); err != nil {
|
||||
return fmt.Errorf("auth.AddEvent: %w", err)
|
||||
}
|
||||
continue nextAuthEvent
|
||||
}
|
||||
|
||||
|
@ -764,11 +778,11 @@ nextAuthEvent:
|
|||
|
||||
// In order to store the new auth event, we need to know its auth chain
|
||||
// as NIDs for the `auth_event_nids` column. Let's see if we can find those.
|
||||
authEventNIDs = authEventNIDs[:0]
|
||||
authEventNIDs := _authEventNIDs[:0]
|
||||
for _, eventID := range authEvent.AuthEventIDs() {
|
||||
knownEvent, ok := known[eventID]
|
||||
if !ok {
|
||||
continue nextAuthEvent
|
||||
return fmt.Errorf("auth event ID %s not known but should be", eventID)
|
||||
}
|
||||
authEventNIDs = append(authEventNIDs, knownEvent.EventNID)
|
||||
}
|
||||
|
@ -815,6 +829,7 @@ nextAuthEvent:
|
|||
// Now we know about this event, it was stored and the signatures were OK.
|
||||
known[authEvent.EventID()] = &types.Event{
|
||||
EventNID: eventNID,
|
||||
Rejected: isRejected,
|
||||
PDU: authEvent,
|
||||
}
|
||||
}
|
||||
|
|
|
@ -50,7 +50,7 @@ func Test_EventAuth(t *testing.T) {
|
|||
}, test.WithStateKey(bob.ID), test.WithAuthIDs(authEventIDs))
|
||||
|
||||
// Add the auth events to the allower
|
||||
allower := gomatrixserverlib.NewAuthEvents(nil)
|
||||
allower, _ := gomatrixserverlib.NewAuthEvents(nil)
|
||||
for _, a := range authEvents {
|
||||
if err := allower.AddEvent(a); err != nil {
|
||||
t.Fatalf("allower.AddEvent failed: %v", err)
|
||||
|
@ -58,7 +58,7 @@ func Test_EventAuth(t *testing.T) {
|
|||
}
|
||||
|
||||
// Finally check that the event is NOT allowed
|
||||
if err := gomatrixserverlib.Allowed(ev.PDU, &allower, func(roomID spec.RoomID, senderID spec.SenderID) (*spec.UserID, error) {
|
||||
if err := gomatrixserverlib.Allowed(ev.PDU, allower, func(roomID spec.RoomID, senderID spec.SenderID) (*spec.UserID, error) {
|
||||
return spec.NewUserID(string(senderID), true)
|
||||
}); err == nil {
|
||||
t.Fatalf("event should not be allowed, but it was")
|
||||
|
|
|
@ -961,14 +961,14 @@ serverLoop:
|
|||
}
|
||||
|
||||
func checkAllowedByState(e gomatrixserverlib.PDU, stateEvents []gomatrixserverlib.PDU, userIDForSender spec.UserIDForSender) error {
|
||||
authUsingState := gomatrixserverlib.NewAuthEvents(nil)
|
||||
authUsingState, _ := gomatrixserverlib.NewAuthEvents(nil)
|
||||
for i := range stateEvents {
|
||||
err := authUsingState.AddEvent(stateEvents[i])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return gomatrixserverlib.Allowed(e, &authUsingState, userIDForSender)
|
||||
return gomatrixserverlib.Allowed(e, authUsingState, userIDForSender)
|
||||
}
|
||||
|
||||
func (t *missingStateReq) hadEvent(eventID string) {
|
||||
|
|
|
@ -343,7 +343,7 @@ func (c *Creator) PerformCreateRoom(ctx context.Context, userID spec.UserID, roo
|
|||
// TODO: 3pid invite events
|
||||
|
||||
var builtEvents []*types.HeaderedEvent
|
||||
authEvents := gomatrixserverlib.NewAuthEvents(nil)
|
||||
authEvents, _ := gomatrixserverlib.NewAuthEvents(nil)
|
||||
if err != nil {
|
||||
util.GetLogger(ctx).WithError(err).Error("rsapi.QuerySenderIDForUser failed")
|
||||
return "", &util.JSONResponse{
|
||||
|
@ -373,7 +373,7 @@ func (c *Creator) PerformCreateRoom(ctx context.Context, userID spec.UserID, roo
|
|||
builder.PrevEvents = []string{builtEvents[i-1].EventID()}
|
||||
}
|
||||
var ev gomatrixserverlib.PDU
|
||||
if err = builder.AddAuthEvents(&authEvents); err != nil {
|
||||
if err = builder.AddAuthEvents(authEvents); err != nil {
|
||||
util.GetLogger(ctx).WithError(err).Error("AddAuthEvents failed")
|
||||
return "", &util.JSONResponse{
|
||||
Code: http.StatusInternalServerError,
|
||||
|
@ -389,7 +389,7 @@ func (c *Creator) PerformCreateRoom(ctx context.Context, userID spec.UserID, roo
|
|||
}
|
||||
}
|
||||
|
||||
if err = gomatrixserverlib.Allowed(ev, &authEvents, func(roomID spec.RoomID, senderID spec.SenderID) (*spec.UserID, error) {
|
||||
if err = gomatrixserverlib.Allowed(ev, authEvents, func(roomID spec.RoomID, senderID spec.SenderID) (*spec.UserID, error) {
|
||||
return c.RSAPI.QueryUserIDForSender(ctx, roomID, senderID)
|
||||
}); err != nil {
|
||||
util.GetLogger(ctx).WithError(err).Error("gomatrixserverlib.Allowed failed")
|
||||
|
|
|
@ -470,7 +470,7 @@ func (r *Upgrader) generateInitialEvents(ctx context.Context, oldRoom *api.Query
|
|||
func (r *Upgrader) sendInitialEvents(ctx context.Context, evTime time.Time, senderID spec.SenderID, userDomain spec.ServerName, newRoomID string, newVersion gomatrixserverlib.RoomVersion, eventsToMake []gomatrixserverlib.FledglingEvent) error {
|
||||
var err error
|
||||
var builtEvents []*types.HeaderedEvent
|
||||
authEvents := gomatrixserverlib.NewAuthEvents(nil)
|
||||
authEvents, _ := gomatrixserverlib.NewAuthEvents(nil)
|
||||
for i, e := range eventsToMake {
|
||||
depth := i + 1 // depth starts at 1
|
||||
|
||||
|
@ -495,7 +495,7 @@ func (r *Upgrader) sendInitialEvents(ctx context.Context, evTime time.Time, send
|
|||
return err
|
||||
}
|
||||
builder := verImpl.NewEventBuilderFromProtoEvent(&proto)
|
||||
if err = builder.AddAuthEvents(&authEvents); err != nil {
|
||||
if err = builder.AddAuthEvents(authEvents); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -506,7 +506,7 @@ func (r *Upgrader) sendInitialEvents(ctx context.Context, evTime time.Time, send
|
|||
|
||||
}
|
||||
|
||||
if err = gomatrixserverlib.Allowed(event, &authEvents, func(roomID spec.RoomID, senderID spec.SenderID) (*spec.UserID, error) {
|
||||
if err = gomatrixserverlib.Allowed(event, authEvents, func(roomID spec.RoomID, senderID spec.SenderID) (*spec.UserID, error) {
|
||||
return r.URSAPI.QueryUserIDForSender(ctx, roomID, senderID)
|
||||
}); err != nil {
|
||||
return fmt.Errorf("Failed to auth new %q event: %w", builder.Type, err)
|
||||
|
@ -586,8 +586,11 @@ func (r *Upgrader) makeHeaderedEvent(ctx context.Context, evTime time.Time, send
|
|||
for i := range queryRes.StateEvents {
|
||||
stateEvents[i] = queryRes.StateEvents[i].PDU
|
||||
}
|
||||
provider := gomatrixserverlib.NewAuthEvents(stateEvents)
|
||||
if err = gomatrixserverlib.Allowed(headeredEvent.PDU, &provider, func(roomID spec.RoomID, senderID spec.SenderID) (*spec.UserID, error) {
|
||||
provider, err := gomatrixserverlib.NewAuthEvents(stateEvents)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err = gomatrixserverlib.Allowed(headeredEvent.PDU, provider, func(roomID spec.RoomID, senderID spec.SenderID) (*spec.UserID, error) {
|
||||
return r.URSAPI.QueryUserIDForSender(ctx, roomID, senderID)
|
||||
}); err != nil {
|
||||
return nil, api.ErrNotAllowed{Err: fmt.Errorf("failed to auth new %q event: %w", proto.Type, err)} // TODO: Is this error string comprehensible to the client?
|
||||
|
|
|
@ -187,6 +187,8 @@ type Database interface {
|
|||
|
||||
// RoomsWithACLs returns all room IDs for rooms with ACLs
|
||||
RoomsWithACLs(ctx context.Context) ([]string, error)
|
||||
// GetBulkStateACLs returns all server ACLs for the given rooms.
|
||||
GetBulkStateACLs(ctx context.Context, roomIDs []string) ([]tables.StrippedEvent, error)
|
||||
QueryAdminEventReports(ctx context.Context, from uint64, limit uint64, backwards bool, userID string, roomID string) ([]api.QueryAdminEventReportsResponse, int64, error)
|
||||
QueryAdminEventReport(ctx context.Context, reportID uint64) (api.QueryAdminEventReportResponse, error)
|
||||
AdminDeleteEventReport(ctx context.Context, reportID uint64) error
|
||||
|
|
|
@ -1437,6 +1437,63 @@ func (d *Database) GetRoomsByMembership(ctx context.Context, userID spec.UserID,
|
|||
return roomIDs, nil
|
||||
}
|
||||
|
||||
// GetBulkStateACLs is a lighter weight form of GetBulkStateContent, which only returns ACL state events.
|
||||
func (d *Database) GetBulkStateACLs(ctx context.Context, roomIDs []string) ([]tables.StrippedEvent, error) {
|
||||
tuples := []gomatrixserverlib.StateKeyTuple{{EventType: "m.room.server_acl", StateKey: ""}}
|
||||
|
||||
var eventNIDs []types.EventNID
|
||||
eventNIDToVer := make(map[types.EventNID]gomatrixserverlib.RoomVersion)
|
||||
// TODO: This feels like this is going to be really slow...
|
||||
for _, roomID := range roomIDs {
|
||||
roomInfo, err2 := d.roomInfo(ctx, nil, roomID)
|
||||
if err2 != nil {
|
||||
return nil, fmt.Errorf("GetBulkStateACLs: failed to load room info for room %s : %w", roomID, err2)
|
||||
}
|
||||
// for unknown rooms or rooms which we don't have the current state, skip them.
|
||||
if roomInfo == nil || roomInfo.IsStub() {
|
||||
continue
|
||||
}
|
||||
// No querier needed, as we don't actually do state resolution
|
||||
stateRes := state.NewStateResolution(d, roomInfo, nil)
|
||||
entries, err2 := stateRes.LoadStateAtSnapshotForStringTuples(ctx, roomInfo.StateSnapshotNID(), tuples)
|
||||
if err2 != nil {
|
||||
return nil, fmt.Errorf("GetBulkStateACLs: failed to load state for room %s : %w", roomID, err2)
|
||||
}
|
||||
for _, entry := range entries {
|
||||
eventNIDs = append(eventNIDs, entry.EventNID)
|
||||
eventNIDToVer[entry.EventNID] = roomInfo.RoomVersion
|
||||
}
|
||||
}
|
||||
eventIDs, err := d.EventsTable.BulkSelectEventID(ctx, nil, eventNIDs)
|
||||
if err != nil {
|
||||
eventIDs = map[types.EventNID]string{}
|
||||
}
|
||||
events, err := d.EventJSONTable.BulkSelectEventJSON(ctx, nil, eventNIDs)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("GetBulkStateACLs: failed to load event JSON for event nids: %w", err)
|
||||
}
|
||||
result := make([]tables.StrippedEvent, len(events))
|
||||
for i := range events {
|
||||
roomVer := eventNIDToVer[events[i].EventNID]
|
||||
verImpl, err := gomatrixserverlib.GetRoomVersion(roomVer)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ev, err := verImpl.NewEventFromTrustedJSONWithEventID(eventIDs[events[i].EventNID], events[i].EventJSON, false)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("GetBulkStateACLs: failed to load event JSON for event NID %v : %w", events[i].EventNID, err)
|
||||
}
|
||||
result[i] = tables.StrippedEvent{
|
||||
EventType: ev.Type(),
|
||||
RoomID: ev.RoomID().String(),
|
||||
StateKey: *ev.StateKey(),
|
||||
ContentValue: tables.ExtractContentValue(&types.HeaderedEvent{PDU: ev}),
|
||||
}
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// GetBulkStateContent returns all state events which match a given room ID and a given state key tuple. Both must be satisfied for a match.
|
||||
// If a tuple has the StateKey of '*' and allowWildcards=true then all state events with the EventType should be returned.
|
||||
func (d *Database) GetBulkStateContent(ctx context.Context, roomIDs []string, tuples []gomatrixserverlib.StateKeyTuple, allowWildcards bool) ([]tables.StrippedEvent, error) {
|
||||
|
@ -1487,6 +1544,9 @@ func (d *Database) GetBulkStateContent(ctx context.Context, roomIDs []string, tu
|
|||
if roomInfo == nil || roomInfo.IsStub() {
|
||||
continue
|
||||
}
|
||||
// TODO: This is inefficient as we're loading the _entire_ state, but only care about a subset of it.
|
||||
// This is why GetBulkStateACLs exists. LoadStateAtSnapshotForStringTuples only loads the state we care about,
|
||||
// but is unfortunately not able to load wildcard state keys.
|
||||
entries, err2 := d.loadStateAtSnapshot(ctx, roomInfo.StateSnapshotNID())
|
||||
if err2 != nil {
|
||||
return nil, fmt.Errorf("GetBulkStateContent: failed to load state for room %s : %w", roomID, err2)
|
||||
|
|
|
@ -228,6 +228,7 @@ func (s StateAtEventAndReferences) EventIDs() string {
|
|||
// It is when performing bulk event lookup in the database.
|
||||
type Event struct {
|
||||
EventNID EventNID
|
||||
Rejected bool
|
||||
gomatrixserverlib.PDU
|
||||
}
|
||||
|
||||
|
|
|
@ -82,6 +82,7 @@ func CreateFederationClient(cfg *config.Dendrite, dnsCache *fclient.DNSCache) fc
|
|||
fclient.WithSkipVerify(cfg.FederationAPI.DisableTLSValidation),
|
||||
fclient.WithKeepAlives(!cfg.FederationAPI.DisableHTTPKeepalives),
|
||||
fclient.WithUserAgent(fmt.Sprintf("Dendrite/%s", internal.VersionString())),
|
||||
fclient.WithAllowDenyNetworks(cfg.FederationAPI.AllowNetworkCIDRs, cfg.FederationAPI.DenyNetworkCIDRs),
|
||||
}
|
||||
if cfg.Global.DNSCache.Enabled {
|
||||
opts = append(opts, fclient.WithDNSCache(dnsCache))
|
||||
|
|
|
@ -46,6 +46,10 @@ type FederationAPI struct {
|
|||
|
||||
// Should we prefer direct key fetches over perspective ones?
|
||||
PreferDirectFetch bool `yaml:"prefer_direct_fetch"`
|
||||
|
||||
// Deny/Allow lists used for restricting request scopes.
|
||||
DenyNetworkCIDRs []string `yaml:"deny_networks"`
|
||||
AllowNetworkCIDRs []string `yaml:"allow_networks"`
|
||||
}
|
||||
|
||||
func (c *FederationAPI) Defaults(opts DefaultOpts) {
|
||||
|
@ -53,6 +57,20 @@ func (c *FederationAPI) Defaults(opts DefaultOpts) {
|
|||
c.P2PFederationRetriesUntilAssumedOffline = 1
|
||||
c.DisableTLSValidation = false
|
||||
c.DisableHTTPKeepalives = false
|
||||
c.DenyNetworkCIDRs = []string{
|
||||
"127.0.0.1/8",
|
||||
"10.0.0.0/8",
|
||||
"172.16.0.0/12",
|
||||
"192.168.0.0/16",
|
||||
"100.64.0.0/10",
|
||||
"169.254.0.0/16",
|
||||
"::1/128",
|
||||
"fe80::/64",
|
||||
"fc00::/7",
|
||||
}
|
||||
c.AllowNetworkCIDRs = []string{
|
||||
"0.0.0.0/0",
|
||||
}
|
||||
if opts.Generate {
|
||||
c.KeyPerspectives = KeyPerspectives{
|
||||
{
|
||||
|
|
|
@ -15,6 +15,8 @@ type JetStream struct {
|
|||
// The prefix to use for stream names for this homeserver - really only
|
||||
// useful if running more than one Dendrite on the same NATS deployment.
|
||||
TopicPrefix string `yaml:"topic_prefix"`
|
||||
// The JetStream domain, if needed.
|
||||
JetStreamDomain string `yaml:"js_domain"`
|
||||
// Keep all storage in memory. This is mostly useful for unit tests.
|
||||
InMemory bool `yaml:"in_memory"`
|
||||
// Disable logging. This is mostly useful for unit tests.
|
||||
|
|
|
@ -22,7 +22,7 @@ func JetStreamConsumer(
|
|||
f func(ctx context.Context, msgs []*nats.Msg) bool,
|
||||
opts ...nats.SubOpt,
|
||||
) error {
|
||||
defer func() {
|
||||
defer func(durable string) {
|
||||
// If there are existing consumers from before they were pull
|
||||
// consumers, we need to clean up the old push consumers. However,
|
||||
// in order to not affect the interest-based policies, we need to
|
||||
|
@ -33,86 +33,93 @@ func JetStreamConsumer(
|
|||
logrus.WithContext(ctx).Warnf("Failed to clean up old consumer %q", durable)
|
||||
}
|
||||
}
|
||||
}()
|
||||
}(durable)
|
||||
|
||||
name := durable + "Pull"
|
||||
sub, err := js.PullSubscribe(subj, name, opts...)
|
||||
durable = durable + "Pull"
|
||||
sub, err := js.PullSubscribe(subj, durable, opts...)
|
||||
if err != nil {
|
||||
sentry.CaptureException(err)
|
||||
return fmt.Errorf("nats.SubscribeSync: %w", err)
|
||||
logrus.WithContext(ctx).WithError(err).Warnf("Failed to configure durable %q", durable)
|
||||
return err
|
||||
}
|
||||
go func() {
|
||||
for {
|
||||
// If the parent context has given up then there's no point in
|
||||
// carrying on doing anything, so stop the listener.
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
if err := sub.Unsubscribe(); err != nil {
|
||||
logrus.WithContext(ctx).Warnf("Failed to unsubscribe %q", durable)
|
||||
}
|
||||
return
|
||||
default:
|
||||
}
|
||||
// The context behaviour here is surprising — we supply a context
|
||||
// so that we can interrupt the fetch if we want, but NATS will still
|
||||
// enforce its own deadline (roughly 5 seconds by default). Therefore
|
||||
// it is our responsibility to check whether our context expired or
|
||||
// not when a context error is returned. Footguns. Footguns everywhere.
|
||||
msgs, err := sub.Fetch(batch, nats.Context(ctx))
|
||||
if err != nil {
|
||||
if err == context.Canceled || err == context.DeadlineExceeded {
|
||||
// Work out whether it was the JetStream context that expired
|
||||
// or whether it was our supplied context.
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
// The supplied context expired, so we want to stop the
|
||||
// consumer altogether.
|
||||
return
|
||||
default:
|
||||
// The JetStream context expired, so the fetch probably
|
||||
// just timed out and we should try again.
|
||||
continue
|
||||
}
|
||||
} else if errors.Is(err, nats.ErrConsumerDeleted) {
|
||||
// The consumer was deleted so stop.
|
||||
go jetStreamConsumerWorker(ctx, sub, subj, batch, f)
|
||||
return nil
|
||||
}
|
||||
|
||||
func jetStreamConsumerWorker(
|
||||
ctx context.Context, sub *nats.Subscription, subj string, batch int,
|
||||
f func(ctx context.Context, msgs []*nats.Msg) bool,
|
||||
) {
|
||||
for {
|
||||
// If the parent context has given up then there's no point in
|
||||
// carrying on doing anything, so stop the listener.
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
default:
|
||||
}
|
||||
// The context behaviour here is surprising — we supply a context
|
||||
// so that we can interrupt the fetch if we want, but NATS will still
|
||||
// enforce its own deadline (roughly 5 seconds by default). Therefore
|
||||
// it is our responsibility to check whether our context expired or
|
||||
// not when a context error is returned. Footguns. Footguns everywhere.
|
||||
msgs, err := sub.Fetch(batch, nats.Context(ctx))
|
||||
if err != nil {
|
||||
if err == context.Canceled || err == context.DeadlineExceeded {
|
||||
// Work out whether it was the JetStream context that expired
|
||||
// or whether it was our supplied context.
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
// The supplied context expired, so we want to stop the
|
||||
// consumer altogether.
|
||||
return
|
||||
} else {
|
||||
// Unfortunately, there's no ErrServerShutdown or similar, so we need to compare the string
|
||||
if err.Error() == "nats: Server Shutdown" {
|
||||
logrus.WithContext(ctx).Warn("nats server shutting down")
|
||||
return
|
||||
}
|
||||
// Something else went wrong, so we'll panic.
|
||||
sentry.CaptureException(err)
|
||||
logrus.WithContext(ctx).WithField("subject", subj).Fatal(err)
|
||||
}
|
||||
}
|
||||
if len(msgs) < 1 {
|
||||
continue
|
||||
}
|
||||
for _, msg := range msgs {
|
||||
if err = msg.InProgress(nats.Context(ctx)); err != nil {
|
||||
logrus.WithContext(ctx).WithField("subject", subj).Warn(fmt.Errorf("msg.InProgress: %w", err))
|
||||
sentry.CaptureException(err)
|
||||
default:
|
||||
// The JetStream context expired, so the fetch probably
|
||||
// just timed out and we should try again.
|
||||
continue
|
||||
}
|
||||
}
|
||||
if f(ctx, msgs) {
|
||||
for _, msg := range msgs {
|
||||
if err = msg.AckSync(nats.Context(ctx)); err != nil {
|
||||
logrus.WithContext(ctx).WithField("subject", subj).Warn(fmt.Errorf("msg.AckSync: %w", err))
|
||||
sentry.CaptureException(err)
|
||||
}
|
||||
}
|
||||
} else if errors.Is(err, nats.ErrTimeout) {
|
||||
// Pull request was invalidated, try again.
|
||||
continue
|
||||
} else if errors.Is(err, nats.ErrConsumerLeadershipChanged) {
|
||||
// Leadership changed so pending pull requests became invalidated,
|
||||
// just try again.
|
||||
continue
|
||||
} else if err.Error() == "nats: Server Shutdown" {
|
||||
// The server is shutting down, but we'll rely on reconnect
|
||||
// behaviour to try and either connect us to another node (if
|
||||
// clustered) or to reconnect when the server comes back up.
|
||||
continue
|
||||
} else {
|
||||
for _, msg := range msgs {
|
||||
if err = msg.Nak(nats.Context(ctx)); err != nil {
|
||||
logrus.WithContext(ctx).WithField("subject", subj).Warn(fmt.Errorf("msg.Nak: %w", err))
|
||||
sentry.CaptureException(err)
|
||||
}
|
||||
// Something else went wrong.
|
||||
logrus.WithContext(ctx).WithField("subject", subj).WithError(err).Warn("Error on pull subscriber fetch")
|
||||
return
|
||||
}
|
||||
}
|
||||
if len(msgs) < 1 {
|
||||
continue
|
||||
}
|
||||
for _, msg := range msgs {
|
||||
if err = msg.InProgress(nats.Context(ctx)); err != nil {
|
||||
logrus.WithContext(ctx).WithField("subject", subj).Warn(fmt.Errorf("msg.InProgress: %w", err))
|
||||
sentry.CaptureException(err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
if f(ctx, msgs) {
|
||||
for _, msg := range msgs {
|
||||
if err = msg.AckSync(nats.Context(ctx)); err != nil {
|
||||
logrus.WithContext(ctx).WithField("subject", subj).Warn(fmt.Errorf("msg.AckSync: %w", err))
|
||||
sentry.CaptureException(err)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
for _, msg := range msgs {
|
||||
if err = msg.Nak(nats.Context(ctx)); err != nil {
|
||||
logrus.WithContext(ctx).WithField("subject", subj).Warn(fmt.Errorf("msg.Nak: %w", err))
|
||||
sentry.CaptureException(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
|
|
@ -8,7 +8,6 @@ import (
|
|||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/getsentry/sentry-go"
|
||||
"github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/element-hq/dendrite/setup/config"
|
||||
|
@ -36,17 +35,20 @@ func DeleteAllStreams(js natsclient.JetStreamContext, cfg *config.JetStream) {
|
|||
func (s *NATSInstance) Prepare(process *process.ProcessContext, cfg *config.JetStream) (natsclient.JetStreamContext, *natsclient.Conn) {
|
||||
natsLock.Lock()
|
||||
defer natsLock.Unlock()
|
||||
// check if we need an in-process NATS Server
|
||||
if len(cfg.Addresses) != 0 {
|
||||
// reuse existing connections
|
||||
if s.nc != nil {
|
||||
return s.js, s.nc
|
||||
}
|
||||
var err error
|
||||
|
||||
// If an existing connection exists, return it.
|
||||
if s.nc != nil && s.js != nil {
|
||||
return s.js, s.nc
|
||||
}
|
||||
|
||||
// For connecting to an external NATS server.
|
||||
if len(cfg.Addresses) > 0 {
|
||||
s.js, s.nc = setupNATS(process, cfg, nil)
|
||||
return s.js, s.nc
|
||||
}
|
||||
if s.Server == nil {
|
||||
var err error
|
||||
|
||||
if len(cfg.Addresses) == 0 && s.Server == nil {
|
||||
opts := &natsserver.Options{
|
||||
ServerName: "monolith",
|
||||
DontListen: true,
|
||||
|
@ -58,8 +60,7 @@ func (s *NATSInstance) Prepare(process *process.ProcessContext, cfg *config.JetS
|
|||
NoLog: cfg.NoLog,
|
||||
SyncAlways: true,
|
||||
}
|
||||
s.Server, err = natsserver.NewServer(opts)
|
||||
if err != nil {
|
||||
if s.Server, err = natsserver.NewServer(opts); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
if !cfg.NoLog {
|
||||
|
@ -75,29 +76,42 @@ func (s *NATSInstance) Prepare(process *process.ProcessContext, cfg *config.JetS
|
|||
s.WaitForShutdown()
|
||||
process.ComponentFinished()
|
||||
}()
|
||||
if !s.ReadyForConnections(time.Second * 60) {
|
||||
logrus.Fatalln("NATS did not start in time")
|
||||
}
|
||||
}
|
||||
if !s.ReadyForConnections(time.Second * 60) {
|
||||
logrus.Fatalln("NATS did not start in time")
|
||||
}
|
||||
// reuse existing connections
|
||||
if s.nc != nil {
|
||||
return s.js, s.nc
|
||||
}
|
||||
nc, err := natsclient.Connect("", natsclient.InProcessServer(s))
|
||||
if err != nil {
|
||||
|
||||
// No existing process connection, create a new one.
|
||||
if s.nc, err = natsclient.Connect("", natsclient.InProcessServer(s.Server)); err != nil {
|
||||
logrus.Fatalln("Failed to create NATS client")
|
||||
}
|
||||
js, _ := setupNATS(process, cfg, nc)
|
||||
s.js = js
|
||||
s.nc = nc
|
||||
return js, nc
|
||||
s.js, s.nc = setupNATS(process, cfg, s.nc)
|
||||
return s.js, s.nc
|
||||
}
|
||||
|
||||
// nolint:gocyclo
|
||||
func setupNATS(process *process.ProcessContext, cfg *config.JetStream, nc *natsclient.Conn) (natsclient.JetStreamContext, *natsclient.Conn) {
|
||||
jsOpts := []natsclient.JSOpt{}
|
||||
if cfg.JetStreamDomain != "" {
|
||||
jsOpts = append(jsOpts, natsclient.Domain(cfg.JetStreamDomain))
|
||||
}
|
||||
|
||||
if nc == nil {
|
||||
var err error
|
||||
opts := []natsclient.Option{}
|
||||
opts := []natsclient.Option{
|
||||
natsclient.Name("Dendrite"),
|
||||
natsclient.MaxReconnects(-1), // Try forever
|
||||
natsclient.ReconnectJitter(time.Second, time.Second),
|
||||
natsclient.ReconnectWait(time.Second * 10),
|
||||
natsclient.ReconnectHandler(func(c *natsclient.Conn) {
|
||||
js, jerr := c.JetStream(jsOpts...)
|
||||
if jerr != nil {
|
||||
logrus.WithError(jerr).Panic("Unable to get JetStream context in reconnect handler")
|
||||
return
|
||||
}
|
||||
checkAndConfigureStreams(process, cfg, js)
|
||||
}),
|
||||
}
|
||||
if cfg.DisableTLSValidation {
|
||||
opts = append(opts, natsclient.Secure(&tls.Config{
|
||||
InsecureSkipVerify: true,
|
||||
|
@ -113,15 +127,19 @@ func setupNATS(process *process.ProcessContext, cfg *config.JetStream, nc *natsc
|
|||
}
|
||||
}
|
||||
|
||||
s, err := nc.JetStream()
|
||||
js, err := nc.JetStream(jsOpts...)
|
||||
if err != nil {
|
||||
logrus.WithError(err).Panic("Unable to get JetStream context")
|
||||
return nil, nil
|
||||
}
|
||||
checkAndConfigureStreams(process, cfg, js)
|
||||
return js, nc
|
||||
}
|
||||
|
||||
func checkAndConfigureStreams(process *process.ProcessContext, cfg *config.JetStream, js natsclient.JetStreamContext) {
|
||||
for _, stream := range streams { // streams are defined in streams.go
|
||||
name := cfg.Prefixed(stream.Name)
|
||||
info, err := s.StreamInfo(name)
|
||||
info, err := js.StreamInfo(name)
|
||||
if err != nil && err != natsclient.ErrStreamNotFound {
|
||||
logrus.WithError(err).Fatal("Unable to get stream info")
|
||||
}
|
||||
|
@ -153,11 +171,11 @@ func setupNATS(process *process.ProcessContext, cfg *config.JetStream, nc *natsc
|
|||
case info.Config.MaxAge != stream.MaxAge:
|
||||
// Try updating the stream first, as many things can be updated
|
||||
// non-destructively.
|
||||
if info, err = s.UpdateStream(stream); err != nil {
|
||||
if info, err = js.UpdateStream(stream); err != nil {
|
||||
logrus.WithError(err).Warnf("Unable to update stream %q, recreating...", name)
|
||||
// We failed to update the stream, this is a last attempt to get
|
||||
// things working but may result in data loss.
|
||||
if err = s.DeleteStream(name); err != nil {
|
||||
if err = js.DeleteStream(name); err != nil {
|
||||
logrus.WithError(err).Fatalf("Unable to delete stream %q", name)
|
||||
}
|
||||
info = nil
|
||||
|
@ -176,7 +194,7 @@ func setupNATS(process *process.ProcessContext, cfg *config.JetStream, nc *natsc
|
|||
namespaced := *stream
|
||||
namespaced.Name = name
|
||||
namespaced.Subjects = subjects
|
||||
if _, err = s.AddStream(&namespaced); err != nil {
|
||||
if _, err = js.AddStream(&namespaced); err != nil {
|
||||
logger := logrus.WithError(err).WithFields(logrus.Fields{
|
||||
"stream": namespaced.Name,
|
||||
"subjects": namespaced.Subjects,
|
||||
|
@ -193,10 +211,9 @@ func setupNATS(process *process.ProcessContext, cfg *config.JetStream, nc *natsc
|
|||
// we can't recover anything that was queued on the disk but we
|
||||
// will still be able to start and run hopefully in the meantime.
|
||||
logger.WithError(err).Error("Unable to add stream")
|
||||
sentry.CaptureException(fmt.Errorf("Unable to add stream %q: %w", namespaced.Name, err))
|
||||
|
||||
namespaced.Storage = natsclient.MemoryStorage
|
||||
if _, err = s.AddStream(&namespaced); err != nil {
|
||||
if _, err = js.AddStream(&namespaced); err != nil {
|
||||
// We tried to add the stream in-memory instead but something
|
||||
// went wrong. That's an unrecoverable situation so we will
|
||||
// give up at this point.
|
||||
|
@ -208,7 +225,6 @@ func setupNATS(process *process.ProcessContext, cfg *config.JetStream, nc *natsc
|
|||
// disk will be left alone, but our ability to recover from a
|
||||
// future crash will be limited. Yell about it.
|
||||
err := fmt.Errorf("Stream %q is running in-memory; this may be due to data corruption in the JetStream storage directory", namespaced.Name)
|
||||
sentry.CaptureException(err)
|
||||
process.Degraded(err)
|
||||
}
|
||||
}
|
||||
|
@ -229,15 +245,13 @@ func setupNATS(process *process.ProcessContext, cfg *config.JetStream, nc *natsc
|
|||
streamName := cfg.Matrix.JetStream.Prefixed(stream)
|
||||
for _, consumer := range consumers {
|
||||
consumerName := cfg.Matrix.JetStream.Prefixed(consumer) + "Pull"
|
||||
consumerInfo, err := s.ConsumerInfo(streamName, consumerName)
|
||||
consumerInfo, err := js.ConsumerInfo(streamName, consumerName)
|
||||
if err != nil || consumerInfo == nil {
|
||||
continue
|
||||
}
|
||||
if err = s.DeleteConsumer(streamName, consumerName); err != nil {
|
||||
if err = js.DeleteConsumer(streamName, consumerName); err != nil {
|
||||
logrus.WithError(err).Errorf("Unable to clean up old consumer %q for stream %q", consumer, stream)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return s, nc
|
||||
}
|
||||
|
|
|
@ -35,6 +35,7 @@ func DeviceOTKCounts(ctx context.Context, keyAPI api.SyncKeyAPI, userID, deviceI
|
|||
return queryRes.Error
|
||||
}
|
||||
res.DeviceListsOTKCount = queryRes.Count.KeyCount
|
||||
res.DeviceListsUnusedFallbackAlgorithms = queryRes.UnusedFallbackAlgorithms
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -350,13 +350,14 @@ type ToDeviceResponse struct {
|
|||
|
||||
// Response represents a /sync API response. See https://matrix.org/docs/spec/client_server/r0.2.0.html#get-matrix-client-r0-sync
|
||||
type Response struct {
|
||||
NextBatch StreamingToken `json:"next_batch"`
|
||||
AccountData *ClientEvents `json:"account_data,omitempty"`
|
||||
Presence *ClientEvents `json:"presence,omitempty"`
|
||||
Rooms *RoomsResponse `json:"rooms,omitempty"`
|
||||
ToDevice *ToDeviceResponse `json:"to_device,omitempty"`
|
||||
DeviceLists *DeviceLists `json:"device_lists,omitempty"`
|
||||
DeviceListsOTKCount map[string]int `json:"device_one_time_keys_count,omitempty"`
|
||||
NextBatch StreamingToken `json:"next_batch"`
|
||||
AccountData *ClientEvents `json:"account_data,omitempty"`
|
||||
Presence *ClientEvents `json:"presence,omitempty"`
|
||||
Rooms *RoomsResponse `json:"rooms,omitempty"`
|
||||
ToDevice *ToDeviceResponse `json:"to_device,omitempty"`
|
||||
DeviceLists *DeviceLists `json:"device_lists,omitempty"`
|
||||
DeviceListsOTKCount map[string]int `json:"device_one_time_keys_count,omitempty"`
|
||||
DeviceListsUnusedFallbackAlgorithms []string `json:"device_unused_fallback_key_types"`
|
||||
}
|
||||
|
||||
func (r Response) MarshalJSON() ([]byte, error) {
|
||||
|
@ -419,6 +420,7 @@ func NewResponse() *Response {
|
|||
res.DeviceLists = &DeviceLists{}
|
||||
res.ToDevice = &ToDeviceResponse{}
|
||||
res.DeviceListsOTKCount = map[string]int{}
|
||||
res.DeviceListsUnusedFallbackAlgorithms = []string{}
|
||||
|
||||
return &res
|
||||
}
|
||||
|
|
|
@ -17,4 +17,8 @@ If a device list update goes missing, the server resyncs on the next one
|
|||
Leaves are present in non-gapped incremental syncs
|
||||
|
||||
# We don't have any state to calculate m.room.guest_access when accepting invites
|
||||
Guest users can accept invites to private rooms over federation
|
||||
Guest users can accept invites to private rooms over federation
|
||||
|
||||
# Tests Synapse specific behavior
|
||||
/state returns M_NOT_FOUND for an outlier
|
||||
/state_ids returns M_NOT_FOUND for an outlier
|
|
@ -774,8 +774,6 @@ Remote user can backfill in a room with version 10
|
|||
Can reject invites over federation for rooms with version 10
|
||||
Can receive redactions from regular users over federation in room version 10
|
||||
New federated private chats get full presence information (SYN-115)
|
||||
/state returns M_NOT_FOUND for an outlier
|
||||
/state_ids returns M_NOT_FOUND for an outlier
|
||||
Outbound federation requests missing prev_events and then asks for /state_ids and resolves the state
|
||||
Invited user can reject invite for empty room
|
||||
Invited user can reject local invite after originator leaves
|
||||
|
@ -793,4 +791,6 @@ remote user can join room with version 11
|
|||
User can invite remote user to room with version 11
|
||||
Remote user can backfill in a room with version 11
|
||||
Can reject invites over federation for rooms with version 11
|
||||
Can receive redactions from regular users over federation in room version 11
|
||||
Can receive redactions from regular users over federation in room version 11
|
||||
Can upload self-signing keys
|
||||
uploading signed devices gets propagated over federation
|
||||
|
|
11
test/room.go
11
test/room.go
|
@ -43,7 +43,7 @@ type Room struct {
|
|||
visibility gomatrixserverlib.HistoryVisibility
|
||||
creator *User
|
||||
|
||||
authEvents gomatrixserverlib.AuthEvents
|
||||
authEvents *gomatrixserverlib.AuthEvents
|
||||
currentState map[string]*rstypes.HeaderedEvent
|
||||
events []*rstypes.HeaderedEvent
|
||||
}
|
||||
|
@ -55,10 +55,11 @@ func NewRoom(t *testing.T, creator *User, modifiers ...roomModifier) *Room {
|
|||
if creator.srvName == "" {
|
||||
t.Fatalf("NewRoom: creator doesn't belong to a server: %+v", *creator)
|
||||
}
|
||||
authEvents, _ := gomatrixserverlib.NewAuthEvents(nil)
|
||||
r := &Room{
|
||||
ID: fmt.Sprintf("!%d:%s", counter, creator.srvName),
|
||||
creator: creator,
|
||||
authEvents: gomatrixserverlib.NewAuthEvents(nil),
|
||||
authEvents: authEvents,
|
||||
preset: PresetPublicChat,
|
||||
Version: gomatrixserverlib.RoomVersionV9,
|
||||
currentState: make(map[string]*rstypes.HeaderedEvent),
|
||||
|
@ -73,7 +74,7 @@ func NewRoom(t *testing.T, creator *User, modifiers ...roomModifier) *Room {
|
|||
|
||||
func (r *Room) MustGetAuthEventRefsForEvent(t *testing.T, needed gomatrixserverlib.StateNeeded) []string {
|
||||
t.Helper()
|
||||
a, err := needed.AuthEventReferences(&r.authEvents)
|
||||
a, err := needed.AuthEventReferences(r.authEvents)
|
||||
if err != nil {
|
||||
t.Fatalf("MustGetAuthEvents: %v", err)
|
||||
}
|
||||
|
@ -175,7 +176,7 @@ func (r *Room) CreateEvent(t *testing.T, creator *User, eventType string, conten
|
|||
builder.PrevEvents = []string{r.events[len(r.events)-1].EventID()}
|
||||
}
|
||||
|
||||
err = builder.AddAuthEvents(&r.authEvents)
|
||||
err = builder.AddAuthEvents(r.authEvents)
|
||||
if err != nil {
|
||||
t.Fatalf("CreateEvent[%s]: failed to AuthEventReferences: %s", eventType, err)
|
||||
}
|
||||
|
@ -191,7 +192,7 @@ func (r *Room) CreateEvent(t *testing.T, creator *User, eventType string, conten
|
|||
if err != nil {
|
||||
t.Fatalf("CreateEvent[%s]: failed to build event: %s", eventType, err)
|
||||
}
|
||||
if err = gomatrixserverlib.Allowed(ev, &r.authEvents, UserIDForSender); err != nil {
|
||||
if err = gomatrixserverlib.Allowed(ev, r.authEvents, UserIDForSender); err != nil {
|
||||
t.Fatalf("CreateEvent[%s]: failed to verify event was allowed: %s", eventType, err)
|
||||
}
|
||||
headeredEvent := &rstypes.HeaderedEvent{PDU: ev}
|
||||
|
|
|
@ -788,12 +788,30 @@ type OneTimeKeysCount struct {
|
|||
KeyCount map[string]int
|
||||
}
|
||||
|
||||
// FallbackKeys represents a set of fallback keys for a single device
|
||||
// https://matrix.org/docs/spec/client_server/r0.6.1#post-matrix-client-r0-keys-upload
|
||||
type FallbackKeys struct {
|
||||
// The user who owns this device
|
||||
UserID string
|
||||
// The device ID of this device
|
||||
DeviceID string
|
||||
// A map of algorithm:key_id => key JSON
|
||||
KeyJSON map[string]json.RawMessage
|
||||
}
|
||||
|
||||
// Split a key in KeyJSON into algorithm and key ID
|
||||
func (k *FallbackKeys) Split(keyIDWithAlgo string) (algo string, keyID string) {
|
||||
segments := strings.Split(keyIDWithAlgo, ":")
|
||||
return segments[0], segments[1]
|
||||
}
|
||||
|
||||
// PerformUploadKeysRequest is the request to PerformUploadKeys
|
||||
type PerformUploadKeysRequest struct {
|
||||
UserID string // Required - User performing the request
|
||||
DeviceID string // Optional - Device performing the request, for fetching OTK count
|
||||
DeviceKeys []DeviceKeys
|
||||
OneTimeKeys []OneTimeKeys
|
||||
UserID string // Required - User performing the request
|
||||
DeviceID string // Optional - Device performing the request, for fetching OTK count
|
||||
DeviceKeys []DeviceKeys
|
||||
OneTimeKeys []OneTimeKeys
|
||||
FallbackKeys []FallbackKeys
|
||||
// OnlyDisplayNameUpdates should be `true` if ALL the DeviceKeys are present to update
|
||||
// the display name for their respective device, and NOT to modify the keys. The key
|
||||
// itself doesn't change but it's easier to pretend upload new keys and reuse the same code paths.
|
||||
|
@ -810,8 +828,9 @@ type PerformUploadKeysResponse struct {
|
|||
// A fatal error when processing e.g database failures
|
||||
Error *KeyError
|
||||
// A map of user_id -> device_id -> Error for tracking failures.
|
||||
KeyErrors map[string]map[string]*KeyError
|
||||
OneTimeKeyCounts []OneTimeKeysCount
|
||||
KeyErrors map[string]map[string]*KeyError
|
||||
OneTimeKeyCounts []OneTimeKeysCount
|
||||
FallbackKeysUnusedAlgorithms []string
|
||||
}
|
||||
|
||||
// PerformDeleteKeysRequest asks the keyserver to forget about certain
|
||||
|
@ -917,8 +936,9 @@ type QueryOneTimeKeysRequest struct {
|
|||
|
||||
type QueryOneTimeKeysResponse struct {
|
||||
// OTK key counts, in the extended /sync form described by https://matrix.org/docs/spec/client_server/r0.6.1#id84
|
||||
Count OneTimeKeysCount
|
||||
Error *KeyError
|
||||
Count OneTimeKeysCount
|
||||
UnusedFallbackAlgorithms []string
|
||||
Error *KeyError
|
||||
}
|
||||
|
||||
type QueryDeviceMessagesRequest struct {
|
||||
|
|
|
@ -44,14 +44,22 @@ func (a *UserInternalAPI) PerformUploadKeys(ctx context.Context, req *api.Perfor
|
|||
if len(req.DeviceKeys) > 0 {
|
||||
a.uploadLocalDeviceKeys(ctx, req, res)
|
||||
}
|
||||
if len(req.OneTimeKeys) > 0 {
|
||||
a.uploadOneTimeKeys(ctx, req, res)
|
||||
if len(req.OneTimeKeys) > 0 || len(req.FallbackKeys) > 0 {
|
||||
a.uploadOneTimeAndFallbackKeys(ctx, req, res)
|
||||
}
|
||||
otks, err := a.KeyDatabase.OneTimeKeysCount(ctx, req.UserID, req.DeviceID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
algos, err := a.KeyDatabase.UnusedFallbackKeyAlgorithms(ctx, req.UserID, req.DeviceID)
|
||||
if err != nil {
|
||||
res.Error = &api.KeyError{
|
||||
Err: fmt.Sprintf("Failed to query unused fallback algorithms: %s", err),
|
||||
}
|
||||
return nil
|
||||
}
|
||||
res.OneTimeKeyCounts = []api.OneTimeKeysCount{*otks}
|
||||
res.FallbackKeysUnusedAlgorithms = algos
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -169,7 +177,15 @@ func (a *UserInternalAPI) QueryOneTimeKeys(ctx context.Context, req *api.QueryOn
|
|||
}
|
||||
return nil
|
||||
}
|
||||
algos, err := a.KeyDatabase.UnusedFallbackKeyAlgorithms(ctx, req.UserID, req.DeviceID)
|
||||
if err != nil {
|
||||
res.Error = &api.KeyError{
|
||||
Err: fmt.Sprintf("Failed to query unused fallback algorithms: %s", err),
|
||||
}
|
||||
return nil
|
||||
}
|
||||
res.Count = *count
|
||||
res.UnusedFallbackAlgorithms = algos
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -507,6 +523,9 @@ func (a *UserInternalAPI) queryRemoteKeysOnServer(
|
|||
for userID := range userIDsForAllDevices {
|
||||
err := a.Updater.ManualUpdate(context.Background(), spec.ServerName(serverName), userID)
|
||||
if err != nil {
|
||||
if errors.Is(err, context.Canceled) {
|
||||
return
|
||||
}
|
||||
logrus.WithFields(logrus.Fields{
|
||||
logrus.ErrorKey: err,
|
||||
"user_id": userID,
|
||||
|
@ -520,6 +539,9 @@ func (a *UserInternalAPI) queryRemoteKeysOnServer(
|
|||
// user so the fact that we're populating all devices here isn't a problem so long as we have devices.
|
||||
err = a.populateResponseWithDeviceKeysFromDatabase(ctx, res, respMu, userID, nil)
|
||||
if err != nil {
|
||||
if errors.Is(err, context.Canceled) {
|
||||
return
|
||||
}
|
||||
logrus.WithFields(logrus.Fields{
|
||||
logrus.ErrorKey: err,
|
||||
"user_id": userID,
|
||||
|
@ -715,7 +737,7 @@ func (a *UserInternalAPI) uploadLocalDeviceKeys(ctx context.Context, req *api.Pe
|
|||
}
|
||||
}
|
||||
|
||||
func (a *UserInternalAPI) uploadOneTimeKeys(ctx context.Context, req *api.PerformUploadKeysRequest, res *api.PerformUploadKeysResponse) {
|
||||
func (a *UserInternalAPI) uploadOneTimeAndFallbackKeys(ctx context.Context, req *api.PerformUploadKeysRequest, res *api.PerformUploadKeysResponse) {
|
||||
if req.UserID == "" {
|
||||
res.Error = &api.KeyError{
|
||||
Err: "user ID missing",
|
||||
|
@ -768,7 +790,32 @@ func (a *UserInternalAPI) uploadOneTimeKeys(ctx context.Context, req *api.Perfor
|
|||
// collect counts
|
||||
res.OneTimeKeyCounts = append(res.OneTimeKeyCounts, *counts)
|
||||
}
|
||||
|
||||
if len(req.FallbackKeys) > 0 {
|
||||
if err := a.KeyDatabase.DeleteFallbackKeys(ctx, req.UserID, req.DeviceID); err != nil {
|
||||
res.KeyError(req.UserID, req.DeviceID, &api.KeyError{
|
||||
Err: fmt.Sprintf("%s device %s : failed to clear fallback keys: %s", req.UserID, req.DeviceID, err.Error()),
|
||||
})
|
||||
return
|
||||
}
|
||||
for _, key := range req.FallbackKeys {
|
||||
// grab existing keys based on (user/device/algorithm/key ID)
|
||||
keyIDsWithAlgorithms := make([]string, len(key.KeyJSON))
|
||||
i := 0
|
||||
for keyIDWithAlgo := range key.KeyJSON {
|
||||
keyIDsWithAlgorithms[i] = keyIDWithAlgo
|
||||
i++
|
||||
}
|
||||
unused, err := a.KeyDatabase.StoreFallbackKeys(ctx, key)
|
||||
if err != nil {
|
||||
res.KeyError(req.UserID, req.DeviceID, &api.KeyError{
|
||||
Err: fmt.Sprintf("%s device %s : failed to store fallback keys: %s", req.UserID, req.DeviceID, err.Error()),
|
||||
})
|
||||
continue
|
||||
}
|
||||
// collect counts
|
||||
res.FallbackKeysUnusedAlgorithms = unused
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func emitDeviceKeyChanges(producer KeyChangeProducer, existing, new []api.DeviceMessage, onlyUpdateDisplayName bool) error {
|
||||
|
|
|
@ -167,6 +167,15 @@ type KeyDatabase interface {
|
|||
// OneTimeKeysCount returns a count of all OTKs for this device.
|
||||
OneTimeKeysCount(ctx context.Context, userID, deviceID string) (*api.OneTimeKeysCount, error)
|
||||
|
||||
// StoreFallbackKeys persists the given fallback keys.
|
||||
StoreFallbackKeys(ctx context.Context, keys api.FallbackKeys) ([]string, error)
|
||||
|
||||
// UnusedFallbackKeyAlgorithms returns unused fallback algorithms for this user/device.
|
||||
UnusedFallbackKeyAlgorithms(ctx context.Context, userID, deviceID string) ([]string, error)
|
||||
|
||||
// DeleteFallbackKeys deletes all fallback keys for the user.
|
||||
DeleteFallbackKeys(ctx context.Context, userID, deviceID string) error
|
||||
|
||||
// DeviceKeysJSON populates the KeyJSON for the given keys. If any proided `keys` have a `KeyJSON` or `StreamID` already then it will be replaced.
|
||||
DeviceKeysJSON(ctx context.Context, keys []api.DeviceMessage) error
|
||||
|
||||
|
|
134
userapi/storage/postgres/fallback_keys_table.go
Normal file
134
userapi/storage/postgres/fallback_keys_table.go
Normal file
|
@ -0,0 +1,134 @@
|
|||
// Copyright 2024 New Vector Ltd.
|
||||
// Copyright 2017 Vector Creations Ltd
|
||||
//
|
||||
// SPDX-License-Identifier: AGPL-3.0-only OR LicenseRef-Element-Commercial
|
||||
// Please see LICENSE files in the repository root for full details.
|
||||
|
||||
package postgres
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"encoding/json"
|
||||
"time"
|
||||
|
||||
"github.com/element-hq/dendrite/internal"
|
||||
"github.com/element-hq/dendrite/internal/sqlutil"
|
||||
"github.com/element-hq/dendrite/userapi/api"
|
||||
"github.com/element-hq/dendrite/userapi/storage/tables"
|
||||
)
|
||||
|
||||
var fallbackKeysSchema = `
|
||||
-- Stores one-time public keys for users
|
||||
CREATE TABLE IF NOT EXISTS keyserver_fallback_keys (
|
||||
user_id TEXT NOT NULL,
|
||||
device_id TEXT NOT NULL,
|
||||
key_id TEXT NOT NULL,
|
||||
algorithm TEXT NOT NULL,
|
||||
ts_added_secs BIGINT NOT NULL,
|
||||
key_json TEXT NOT NULL,
|
||||
used BOOLEAN NOT NULL,
|
||||
-- Clobber based on tuple of user/device/algorithm.
|
||||
CONSTRAINT keyserver_fallback_keys_unique UNIQUE (user_id, device_id, algorithm)
|
||||
);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS keyserver_fallback_keys_idx ON keyserver_fallback_keys (user_id, device_id);
|
||||
`
|
||||
|
||||
const upsertFallbackKeysSQL = "" +
|
||||
"INSERT INTO keyserver_fallback_keys (user_id, device_id, key_id, algorithm, ts_added_secs, key_json, used)" +
|
||||
" VALUES ($1, $2, $3, $4, $5, $6, false)" +
|
||||
" ON CONFLICT ON CONSTRAINT keyserver_fallback_keys_unique" +
|
||||
" DO UPDATE SET key_id = $3, key_json = $6, used = false"
|
||||
|
||||
const selectFallbackUnusedAlgorithmsSQL = "" +
|
||||
"SELECT algorithm FROM keyserver_fallback_keys WHERE user_id = $1 AND device_id = $2 AND used = false"
|
||||
|
||||
const selectFallbackKeysByAlgorithmSQL = "" +
|
||||
"SELECT key_id, key_json FROM keyserver_fallback_keys WHERE user_id = $1 AND device_id = $2 AND algorithm = $3 ORDER BY used ASC LIMIT 1"
|
||||
|
||||
const deleteFallbackKeysSQL = "" +
|
||||
"DELETE FROM keyserver_fallback_keys WHERE user_id = $1 AND device_id = $2"
|
||||
|
||||
const updateFallbackKeyUsedSQL = "" +
|
||||
"UPDATE keyserver_fallback_keys SET used=true WHERE user_id = $1 AND device_id = $2 AND key_id = $3 AND algorithm = $4"
|
||||
|
||||
type fallbackKeysStatements struct {
|
||||
db *sql.DB
|
||||
upsertKeysStmt *sql.Stmt
|
||||
selectUnusedAlgorithmsStmt *sql.Stmt
|
||||
selectKeyByAlgorithmStmt *sql.Stmt
|
||||
deleteFallbackKeysStmt *sql.Stmt
|
||||
updateFallbackKeyUsedStmt *sql.Stmt
|
||||
}
|
||||
|
||||
func NewPostgresFallbackKeysTable(db *sql.DB) (tables.FallbackKeys, error) {
|
||||
s := &fallbackKeysStatements{
|
||||
db: db,
|
||||
}
|
||||
_, err := db.Exec(fallbackKeysSchema)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return s, sqlutil.StatementList{
|
||||
{&s.upsertKeysStmt, upsertFallbackKeysSQL},
|
||||
{&s.selectUnusedAlgorithmsStmt, selectFallbackUnusedAlgorithmsSQL},
|
||||
{&s.selectKeyByAlgorithmStmt, selectFallbackKeysByAlgorithmSQL},
|
||||
{&s.deleteFallbackKeysStmt, deleteFallbackKeysSQL},
|
||||
{&s.updateFallbackKeyUsedStmt, updateFallbackKeyUsedSQL},
|
||||
}.Prepare(db)
|
||||
}
|
||||
|
||||
func (s *fallbackKeysStatements) SelectUnusedFallbackKeyAlgorithms(ctx context.Context, userID, deviceID string) ([]string, error) {
|
||||
rows, err := s.selectUnusedAlgorithmsStmt.QueryContext(ctx, userID, deviceID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer internal.CloseAndLogIfError(ctx, rows, "selectKeysCountStmt: rows.close() failed")
|
||||
algos := []string{}
|
||||
for rows.Next() {
|
||||
var algorithm string
|
||||
if err = rows.Scan(&algorithm); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
algos = append(algos, algorithm)
|
||||
}
|
||||
return algos, rows.Err()
|
||||
}
|
||||
|
||||
func (s *fallbackKeysStatements) InsertFallbackKeys(ctx context.Context, txn *sql.Tx, keys api.FallbackKeys) ([]string, error) {
|
||||
now := time.Now().Unix()
|
||||
for keyIDWithAlgo, keyJSON := range keys.KeyJSON {
|
||||
algo, keyID := keys.Split(keyIDWithAlgo)
|
||||
_, err := sqlutil.TxStmt(txn, s.upsertKeysStmt).ExecContext(
|
||||
ctx, keys.UserID, keys.DeviceID, keyID, algo, now, string(keyJSON),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return s.SelectUnusedFallbackKeyAlgorithms(ctx, keys.UserID, keys.DeviceID)
|
||||
}
|
||||
|
||||
func (s *fallbackKeysStatements) DeleteFallbackKeys(ctx context.Context, txn *sql.Tx, userID, deviceID string) error {
|
||||
_, err := sqlutil.TxStmt(txn, s.deleteFallbackKeysStmt).ExecContext(ctx, userID, deviceID)
|
||||
return err
|
||||
}
|
||||
|
||||
func (s *fallbackKeysStatements) SelectAndUpdateFallbackKey(
|
||||
ctx context.Context, txn *sql.Tx, userID, deviceID, algorithm string,
|
||||
) (map[string]json.RawMessage, error) {
|
||||
var keyID string
|
||||
var keyJSON string
|
||||
err := sqlutil.TxStmtContext(ctx, txn, s.selectKeyByAlgorithmStmt).QueryRowContext(ctx, userID, deviceID, algorithm).Scan(&keyID, &keyJSON)
|
||||
if err != nil {
|
||||
if err == sql.ErrNoRows {
|
||||
return nil, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
_, err = sqlutil.TxStmtContext(ctx, txn, s.updateFallbackKeyUsedStmt).ExecContext(ctx, userID, deviceID, algorithm, keyID)
|
||||
return map[string]json.RawMessage{
|
||||
algorithm + ":" + keyID: json.RawMessage(keyJSON),
|
||||
}, err
|
||||
}
|
|
@ -53,7 +53,7 @@ const deleteOneTimeKeySQL = "" +
|
|||
"DELETE FROM keyserver_one_time_keys WHERE user_id = $1 AND device_id = $2 AND algorithm = $3 AND key_id = $4"
|
||||
|
||||
const selectKeyByAlgorithmSQL = "" +
|
||||
"SELECT key_id, key_json FROM keyserver_one_time_keys WHERE user_id = $1 AND device_id = $2 AND algorithm = $3 LIMIT 1"
|
||||
"SELECT key_id, key_json FROM keyserver_one_time_keys WHERE user_id = $1 AND device_id = $2 AND algorithm = $3 ORDER BY ts_added_secs ASC LIMIT 1"
|
||||
|
||||
const deleteOneTimeKeysSQL = "" +
|
||||
"DELETE FROM keyserver_one_time_keys WHERE user_id = $1 AND device_id = $2"
|
||||
|
|
|
@ -141,6 +141,10 @@ func NewKeyDatabase(conMan *sqlutil.Connections, dbProperties *config.DatabaseOp
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fk, err := NewPostgresFallbackKeysTable(db)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
dk, err := NewPostgresDeviceKeysTable(db)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -164,6 +168,7 @@ func NewKeyDatabase(conMan *sqlutil.Connections, dbProperties *config.DatabaseOp
|
|||
|
||||
return &shared.KeyDatabase{
|
||||
OneTimeKeysTable: otk,
|
||||
FallbackKeysTable: fk,
|
||||
DeviceKeysTable: dk,
|
||||
KeyChangesTable: kc,
|
||||
StaleDeviceListsTable: sdl,
|
||||
|
|
|
@ -57,6 +57,7 @@ type Database struct {
|
|||
|
||||
type KeyDatabase struct {
|
||||
OneTimeKeysTable tables.OneTimeKeys
|
||||
FallbackKeysTable tables.FallbackKeys
|
||||
DeviceKeysTable tables.DeviceKeys
|
||||
KeyChangesTable tables.KeyChanges
|
||||
StaleDeviceListsTable tables.StaleDeviceLists
|
||||
|
@ -937,6 +938,22 @@ func (d *KeyDatabase) OneTimeKeysCount(ctx context.Context, userID, deviceID str
|
|||
return d.OneTimeKeysTable.CountOneTimeKeys(ctx, userID, deviceID)
|
||||
}
|
||||
|
||||
func (d *KeyDatabase) StoreFallbackKeys(ctx context.Context, keys api.FallbackKeys) (unused []string, err error) {
|
||||
_ = d.Writer.Do(d.DB, nil, func(txn *sql.Tx) error {
|
||||
unused, err = d.FallbackKeysTable.InsertFallbackKeys(ctx, txn, keys)
|
||||
return err
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
func (d *KeyDatabase) DeleteFallbackKeys(ctx context.Context, userID, deviceID string) error {
|
||||
return d.FallbackKeysTable.DeleteFallbackKeys(ctx, nil, userID, deviceID)
|
||||
}
|
||||
|
||||
func (d *KeyDatabase) UnusedFallbackKeyAlgorithms(ctx context.Context, userID, deviceID string) ([]string, error) {
|
||||
return d.FallbackKeysTable.SelectUnusedFallbackKeyAlgorithms(ctx, userID, deviceID)
|
||||
}
|
||||
|
||||
func (d *KeyDatabase) DeviceKeysJSON(ctx context.Context, keys []api.DeviceMessage) error {
|
||||
return d.DeviceKeysTable.SelectDeviceKeysJSON(ctx, keys)
|
||||
}
|
||||
|
@ -999,6 +1016,12 @@ func (d *KeyDatabase) ClaimKeys(ctx context.Context, userToDeviceToAlgorithm map
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(keyJSON) == 0 {
|
||||
keyJSON, err = d.FallbackKeysTable.SelectAndUpdateFallbackKey(ctx, txn, userID, deviceID, algo)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if keyJSON != nil {
|
||||
result = append(result, api.OneTimeKeys{
|
||||
UserID: userID,
|
||||
|
|
132
userapi/storage/sqlite3/fallback_keys_table.go
Normal file
132
userapi/storage/sqlite3/fallback_keys_table.go
Normal file
|
@ -0,0 +1,132 @@
|
|||
// Copyright 2024 New Vector Ltd.
|
||||
// Copyright 2017 Vector Creations Ltd
|
||||
//
|
||||
// SPDX-License-Identifier: AGPL-3.0-only OR LicenseRef-Element-Commercial
|
||||
// Please see LICENSE files in the repository root for full details.
|
||||
|
||||
package sqlite3
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"encoding/json"
|
||||
"time"
|
||||
|
||||
"github.com/element-hq/dendrite/internal"
|
||||
"github.com/element-hq/dendrite/internal/sqlutil"
|
||||
"github.com/element-hq/dendrite/userapi/api"
|
||||
"github.com/element-hq/dendrite/userapi/storage/tables"
|
||||
)
|
||||
|
||||
var fallbackKeysSchema = `
|
||||
-- Stores one-time public keys for users
|
||||
CREATE TABLE IF NOT EXISTS keyserver_fallback_keys (
|
||||
user_id TEXT NOT NULL,
|
||||
device_id TEXT NOT NULL,
|
||||
key_id TEXT NOT NULL,
|
||||
algorithm TEXT NOT NULL,
|
||||
ts_added_secs BIGINT NOT NULL,
|
||||
key_json TEXT NOT NULL,
|
||||
used BOOLEAN NOT NULL
|
||||
);
|
||||
CREATE UNIQUE INDEX IF NOT EXISTS keyserver_fallback_keys_unique_idx ON keyserver_fallback_keys(user_id, device_id, algorithm);
|
||||
CREATE INDEX IF NOT EXISTS keyserver_fallback_keys_idx ON keyserver_fallback_keys (user_id, device_id);
|
||||
`
|
||||
|
||||
const upsertFallbackKeysSQL = "" +
|
||||
"INSERT INTO keyserver_fallback_keys (user_id, device_id, key_id, algorithm, ts_added_secs, key_json, used)" +
|
||||
" VALUES ($1, $2, $3, $4, $5, $6, false)" +
|
||||
" ON CONFLICT (user_id, device_id, algorithm)" +
|
||||
" DO UPDATE SET key_id = $3, key_json = $6, used = false"
|
||||
|
||||
const selectFallbackUnusedAlgorithmsSQL = "" +
|
||||
"SELECT algorithm FROM keyserver_fallback_keys WHERE user_id = $1 AND device_id = $2 AND used = false"
|
||||
|
||||
const selectFallbackKeysByAlgorithmSQL = "" +
|
||||
"SELECT key_id, key_json FROM keyserver_fallback_keys WHERE user_id = $1 AND device_id = $2 AND algorithm = $3 ORDER BY used ASC LIMIT 1"
|
||||
|
||||
const deleteFallbackKeysSQL = "" +
|
||||
"DELETE FROM keyserver_fallback_keys WHERE user_id = $1 AND device_id = $2"
|
||||
|
||||
const updateFallbackKeyUsedSQL = "" +
|
||||
"UPDATE keyserver_fallback_keys SET used=true WHERE user_id = $1 AND device_id = $2 AND key_id = $3 AND algorithm = $4"
|
||||
|
||||
type fallbackKeysStatements struct {
|
||||
db *sql.DB
|
||||
upsertKeysStmt *sql.Stmt
|
||||
selectUnusedAlgorithmsStmt *sql.Stmt
|
||||
selectKeyByAlgorithmStmt *sql.Stmt
|
||||
deleteFallbackKeysStmt *sql.Stmt
|
||||
updateFallbackKeyUsedStmt *sql.Stmt
|
||||
}
|
||||
|
||||
func NewSqliteFallbackKeysTable(db *sql.DB) (tables.FallbackKeys, error) {
|
||||
s := &fallbackKeysStatements{
|
||||
db: db,
|
||||
}
|
||||
_, err := db.Exec(fallbackKeysSchema)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return s, sqlutil.StatementList{
|
||||
{&s.upsertKeysStmt, upsertFallbackKeysSQL},
|
||||
{&s.selectUnusedAlgorithmsStmt, selectFallbackUnusedAlgorithmsSQL},
|
||||
{&s.selectKeyByAlgorithmStmt, selectFallbackKeysByAlgorithmSQL},
|
||||
{&s.deleteFallbackKeysStmt, deleteFallbackKeysSQL},
|
||||
{&s.updateFallbackKeyUsedStmt, updateFallbackKeyUsedSQL},
|
||||
}.Prepare(db)
|
||||
}
|
||||
|
||||
func (s *fallbackKeysStatements) SelectUnusedFallbackKeyAlgorithms(ctx context.Context, userID, deviceID string) ([]string, error) {
|
||||
rows, err := s.selectUnusedAlgorithmsStmt.QueryContext(ctx, userID, deviceID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer internal.CloseAndLogIfError(ctx, rows, "selectKeysCountStmt: rows.close() failed")
|
||||
algos := []string{}
|
||||
for rows.Next() {
|
||||
var algorithm string
|
||||
if err = rows.Scan(&algorithm); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
algos = append(algos, algorithm)
|
||||
}
|
||||
return algos, rows.Err()
|
||||
}
|
||||
|
||||
func (s *fallbackKeysStatements) InsertFallbackKeys(ctx context.Context, txn *sql.Tx, keys api.FallbackKeys) ([]string, error) {
|
||||
now := time.Now().Unix()
|
||||
for keyIDWithAlgo, keyJSON := range keys.KeyJSON {
|
||||
algo, keyID := keys.Split(keyIDWithAlgo)
|
||||
_, err := sqlutil.TxStmt(txn, s.upsertKeysStmt).ExecContext(
|
||||
ctx, keys.UserID, keys.DeviceID, keyID, algo, now, string(keyJSON),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return s.SelectUnusedFallbackKeyAlgorithms(ctx, keys.UserID, keys.DeviceID)
|
||||
}
|
||||
|
||||
func (s *fallbackKeysStatements) DeleteFallbackKeys(ctx context.Context, txn *sql.Tx, userID, deviceID string) error {
|
||||
_, err := sqlutil.TxStmt(txn, s.deleteFallbackKeysStmt).ExecContext(ctx, userID, deviceID)
|
||||
return err
|
||||
}
|
||||
|
||||
func (s *fallbackKeysStatements) SelectAndUpdateFallbackKey(
|
||||
ctx context.Context, txn *sql.Tx, userID, deviceID, algorithm string,
|
||||
) (map[string]json.RawMessage, error) {
|
||||
var keyID string
|
||||
var keyJSON string
|
||||
err := sqlutil.TxStmtContext(ctx, txn, s.selectKeyByAlgorithmStmt).QueryRowContext(ctx, userID, deviceID, algorithm).Scan(&keyID, &keyJSON)
|
||||
if err != nil {
|
||||
if err == sql.ErrNoRows {
|
||||
return nil, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
_, err = sqlutil.TxStmtContext(ctx, txn, s.updateFallbackKeyUsedStmt).ExecContext(ctx, userID, deviceID, algorithm, keyID)
|
||||
return map[string]json.RawMessage{
|
||||
algorithm + ":" + keyID: json.RawMessage(keyJSON),
|
||||
}, err
|
||||
}
|
|
@ -52,7 +52,7 @@ const deleteOneTimeKeySQL = "" +
|
|||
"DELETE FROM keyserver_one_time_keys WHERE user_id = $1 AND device_id = $2 AND algorithm = $3 AND key_id = $4"
|
||||
|
||||
const selectKeyByAlgorithmSQL = "" +
|
||||
"SELECT key_id, key_json FROM keyserver_one_time_keys WHERE user_id = $1 AND device_id = $2 AND algorithm = $3 LIMIT 1"
|
||||
"SELECT key_id, key_json FROM keyserver_one_time_keys WHERE user_id = $1 AND device_id = $2 AND algorithm = $3 ORDER BY ts_added_secs ASC LIMIT 1"
|
||||
|
||||
const deleteOneTimeKeysSQL = "" +
|
||||
"DELETE FROM keyserver_one_time_keys WHERE user_id = $1 AND device_id = $2"
|
||||
|
|
|
@ -138,6 +138,10 @@ func NewKeyDatabase(conMan *sqlutil.Connections, dbProperties *config.DatabaseOp
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fk, err := NewSqliteFallbackKeysTable(db)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
dk, err := NewSqliteDeviceKeysTable(db)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -161,6 +165,7 @@ func NewKeyDatabase(conMan *sqlutil.Connections, dbProperties *config.DatabaseOp
|
|||
|
||||
return &shared.KeyDatabase{
|
||||
OneTimeKeysTable: otk,
|
||||
FallbackKeysTable: fk,
|
||||
DeviceKeysTable: dk,
|
||||
KeyChangesTable: kc,
|
||||
StaleDeviceListsTable: sdl,
|
||||
|
|
|
@ -809,3 +809,42 @@ func TestOneTimeKeys(t *testing.T) {
|
|||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestFallbackKeys(t *testing.T) {
|
||||
test.WithAllDatabases(t, func(t *testing.T, dbType test.DBType) {
|
||||
db, clean := mustCreateKeyDatabase(t, dbType)
|
||||
defer clean()
|
||||
userID := "@alice:localhost"
|
||||
deviceID := "alice_device"
|
||||
fk := api.FallbackKeys{
|
||||
UserID: userID,
|
||||
DeviceID: deviceID,
|
||||
KeyJSON: map[string]json.RawMessage{"curve25519:KEY1": []byte(`{"key":"v1"}`)},
|
||||
}
|
||||
|
||||
_, err := db.StoreFallbackKeys(ctx, fk)
|
||||
MustNotError(t, err)
|
||||
|
||||
unused, err := db.UnusedFallbackKeyAlgorithms(ctx, userID, deviceID)
|
||||
MustNotError(t, err)
|
||||
if c := len(unused); c != 1 {
|
||||
t.Fatalf("Expected 1 unused key algorithm, got %d", c)
|
||||
}
|
||||
if unused[0] != "curve25519" {
|
||||
t.Fatalf("Expected unused key algorithm to be 'curve25519', got '%s'", unused[0])
|
||||
}
|
||||
|
||||
// No other one-time keys have been uploaded so we expect to get the fallback key instead.
|
||||
claimed, err := db.ClaimKeys(ctx, map[string]map[string]string{userID: {deviceID: "curve25519"}})
|
||||
MustNotError(t, err)
|
||||
|
||||
switch {
|
||||
case claimed[0].UserID != fk.UserID:
|
||||
t.Fatalf("Claimed user ID ID doesn't match, got %q, want %q", claimed[0].UserID, fk.DeviceID)
|
||||
case claimed[0].DeviceID != fk.DeviceID:
|
||||
t.Fatalf("Claimed device ID doesn't match, got %q, want %q", claimed[0].DeviceID, fk.DeviceID)
|
||||
case claimed[0].KeyJSON["curve25519:KEY1"] == nil:
|
||||
t.Fatalf("Claimed key JSON for curve25519:KEY1 not found")
|
||||
}
|
||||
})
|
||||
}
|
||||
|
|
|
@ -170,6 +170,13 @@ type DeviceKeys interface {
|
|||
DeleteAllDeviceKeys(ctx context.Context, txn *sql.Tx, userID string) error
|
||||
}
|
||||
|
||||
type FallbackKeys interface {
|
||||
SelectUnusedFallbackKeyAlgorithms(ctx context.Context, userID, deviceID string) ([]string, error)
|
||||
InsertFallbackKeys(ctx context.Context, txn *sql.Tx, keys api.FallbackKeys) ([]string, error)
|
||||
DeleteFallbackKeys(ctx context.Context, txn *sql.Tx, userID, deviceID string) error
|
||||
SelectAndUpdateFallbackKey(ctx context.Context, txn *sql.Tx, userID, deviceID, algorithm string) (map[string]json.RawMessage, error)
|
||||
}
|
||||
|
||||
type KeyChanges interface {
|
||||
InsertKeyChange(ctx context.Context, userID string) (int64, error)
|
||||
// SelectKeyChanges returns the set (de-duplicated) of users who have changed their keys between the two offsets.
|
||||
|
|
Loading…
Add table
Reference in a new issue