Compare commits
64 commits
renovate/n
...
main
Author | SHA1 | Date | |
---|---|---|---|
e3ab1aa2b4 | |||
0d482532ec | |||
e98a9b1a50 | |||
1ee4292d92 | |||
23a088ce4c | |||
e8277b9860 | |||
48b5690269 | |||
94bd090bcd | |||
decb20dab6 | |||
d9e392cba9 | |||
841a690499 | |||
0242447c99 | |||
c1b951135a | |||
677c1ac165 | |||
33ad79e646 | |||
b4fd4e7d68 | |||
e053d68597 | |||
33ebe1a0b2 | |||
a86ed9b3c3 | |||
2ab069fa3e | |||
9126c56449 | |||
a011ba5dca | |||
e2f58b8aaf | |||
d0276233a6 | |||
7ec613d27f | |||
89b659c41b | |||
07c08e1eef | |||
c37c22eb4f | |||
0163a18f4d | |||
c94e0e0163 | |||
86f1a88ba9 | |||
5bf9d9bffc | |||
8239f04ca2 | |||
d22e1e81b1 | |||
91fd97d189 | |||
75f57f954e | |||
ff115f79aa | |||
69dcc3e1af | |||
62bf2c6729 | |||
bb536a1ce7 | |||
18aadbbc76 | |||
70289eb3f8 | |||
8dc2fa4c79 | |||
c34a561992 | |||
890f74d0ae | |||
a5ffaac57c | |||
9b6236d614 | |||
df0a046b2b | |||
de0d93c40b | |||
828da0f181 | |||
7e4086e434 | |||
879b4ba285 | |||
f5177175db | |||
990ac0bc5f | |||
e87df865af | |||
c0be054d74 | |||
f2d1506438 | |||
b76ff4d7d4 | |||
e378209fbb | |||
49947ffe86 | |||
729542fa95 | |||
acd7c766b5 | |||
01c3e803dc | |||
db5a5bd72d |
56 changed files with 502 additions and 983 deletions
22
.forgejo/workflows/build-deployer.yaml
Normal file
22
.forgejo/workflows/build-deployer.yaml
Normal file
|
@ -0,0 +1,22 @@
|
|||
on:
|
||||
push:
|
||||
paths:
|
||||
- containers/deployer/**
|
||||
- .forgejo/workflows/build-deployer.yaml
|
||||
jobs:
|
||||
build-deployer:
|
||||
runs-on: docker
|
||||
container:
|
||||
image: library/docker:dind
|
||||
steps:
|
||||
- run: apk add --no-cache nodejs git
|
||||
- name: login to container registry
|
||||
run: echo "${{ secrets.DEPLOY_TOKEN }}" | docker login --username ${{ secrets.DEPLOY_USER }} --password-stdin git.janky.solutions
|
||||
- name: build container image
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
file: Containerfile
|
||||
context: "{{defaultContext}}:containers/deployer"
|
||||
tags: git.janky.solutions/jankysolutions/infra/deployer:latest
|
||||
platforms: linux/amd64
|
||||
push: ${{ github.ref == 'refs/heads/main' }}
|
|
@ -18,4 +18,4 @@ jobs:
|
|||
context: FreeTakServer
|
||||
tags: git.janky.solutions/jankysolutions/infra/freetakserver:latest
|
||||
platforms: linux/amd64
|
||||
push: true
|
||||
push: ${{ github.ref == 'refs/heads/main' }}
|
||||
|
|
|
@ -19,4 +19,4 @@ jobs:
|
|||
context: "{{defaultContext}}:containers/keycloak"
|
||||
tags: git.janky.solutions/jankysolutions/infra/keycloak:latest
|
||||
platforms: linux/amd64
|
||||
push: true
|
||||
push: ${{ github.ref == 'refs/heads/main' }}
|
||||
|
|
|
@ -19,4 +19,4 @@ jobs:
|
|||
context: "{{defaultContext}}:containers/openbao-csi-provider"
|
||||
tags: git.janky.solutions/jankysolutions/infra/openbao-csi-provider:latest
|
||||
platforms: linux/amd64
|
||||
push: true
|
||||
push: ${{ github.ref == 'refs/heads/main' }}
|
||||
|
|
|
@ -19,4 +19,4 @@ jobs:
|
|||
context: "{{defaultContext}}:containers/openbao"
|
||||
tags: git.janky.solutions/jankysolutions/infra/openbao:latest
|
||||
platforms: linux/amd64
|
||||
push: true
|
||||
push: ${{ github.ref == 'refs/heads/main' }}
|
||||
|
|
|
@ -18,4 +18,4 @@ jobs:
|
|||
context: pethublocal
|
||||
tags: git.janky.solutions/jankysolutions/infra/pethublocal:latest
|
||||
platforms: linux/amd64
|
||||
push: true
|
||||
push: ${{ github.ref == 'refs/heads/main' }}
|
||||
|
|
|
@ -18,4 +18,4 @@ jobs:
|
|||
file: containers/synapse/Containerfile
|
||||
tags: git.janky.solutions/jankysolutions/infra/synapse:latest
|
||||
platforms: linux/amd64
|
||||
push: true
|
||||
push: ${{ github.ref == 'refs/heads/main' }}
|
||||
|
|
|
@ -19,4 +19,4 @@ jobs:
|
|||
context: "{{defaultContext}}:containers/traefik-forward-auth"
|
||||
tags: git.janky.solutions/jankysolutions/infra/traefik-forward-auth:latest
|
||||
platforms: linux/amd64
|
||||
push: true
|
||||
push: ${{ github.ref == 'refs/heads/main' }}
|
||||
|
|
37
.forgejo/workflows/k8s-diff-and-deploy.yaml
Normal file
37
.forgejo/workflows/k8s-diff-and-deploy.yaml
Normal file
|
@ -0,0 +1,37 @@
|
|||
on:
|
||||
push:
|
||||
paths:
|
||||
- k8s/**
|
||||
- .forgejo/workflows/k8s-diff-and-deploy.yaml
|
||||
jobs:
|
||||
diff-and-deploy:
|
||||
runs-on: ubuntu-latest
|
||||
container:
|
||||
image: git.janky.solutions/jankysolutions/infra/deployer:latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: kubectl diff and deploy
|
||||
run: |
|
||||
set -euo pipefail
|
||||
echo "${{ secrets.KUBERNETES_CLIENT_CONFIG }}" > ~/.kube/config
|
||||
|
||||
for component in k8s/*; do
|
||||
if [ ! -d "${component}" ]; then
|
||||
continue
|
||||
fi
|
||||
|
||||
touch "${component}/secrets.yaml"
|
||||
|
||||
echo "👀 $ kubectl diff -k ${component}"
|
||||
kubectl diff -k "${component}" || echo
|
||||
|
||||
if [[ "${GITHUB_REF_NAME}" == "main" ]]; then
|
||||
echo "🚀 $ kubectl apply -k ${component}"
|
||||
if [[ "${component}" == "k8s/operators" ]]; then
|
||||
kubectl apply -k "${component}" --server-side
|
||||
else
|
||||
kubectl apply -k "${component}"
|
||||
fi
|
||||
echo
|
||||
fi
|
||||
done
|
3
containers/deployer/Containerfile
Normal file
3
containers/deployer/Containerfile
Normal file
|
@ -0,0 +1,3 @@
|
|||
FROM library/alpine:3.20
|
||||
RUN apk add --no-cache nodejs git bash helm kubectl
|
||||
RUN mkdir -p ~/.kube
|
|
@ -1,4 +1,6 @@
|
|||
FROM matrixdotorg/synapse:v1.117.0
|
||||
FROM matrixdotorg/synapse:v1.119.0
|
||||
RUN pip install boto3 humanize tqdm
|
||||
RUN curl -Lo /usr/local/lib/python3.11/site-packages/s3_storage_provider.py https://github.com/matrix-org/synapse-s3-storage-provider/raw/v1.4.0/s3_storage_provider.py
|
||||
# there is probably a better way to figure out where the site packages are
|
||||
# this used to be hard coded to /usr/local/lib/python3.11/site-packages but then synapse updated it's minor python version and it broke
|
||||
RUN curl -Lo $(python -c 'import sys; print([x for x in sys.path if "site-packages" in x][0])')/s3_storage_provider.py https://github.com/matrix-org/synapse-s3-storage-provider/raw/v1.4.0/s3_storage_provider.py
|
||||
RUN curl -L https://github.com/matrix-org/synapse-s3-storage-provider/raw/main/scripts/s3_media_upload | sed "s#/usr/bin/env python#/usr/local/bin/python#" > /usr/local/bin/s3_media_upload && chmod +x /usr/local/bin/s3_media_upload
|
||||
|
|
|
@ -7,5 +7,5 @@ helmCharts:
|
|||
enabled: false # default, bitwarden-sdk-server doesn't work with vaultwarden (https://github.com/external-secrets/bitwarden-sdk-server/issues/18)
|
||||
namespace: external-secrets
|
||||
releaseName: external-secrets
|
||||
version: 0.10.4
|
||||
version: 0.10.5
|
||||
repo: https://charts.external-secrets.io
|
||||
|
|
|
@ -26,13 +26,6 @@ monitoring:
|
|||
ansible_host: 10.5.1.251
|
||||
home_network: true
|
||||
|
||||
matrix.home.finn.io:
|
||||
ansible_host: 10.5.1.34
|
||||
home_network: true
|
||||
logs:
|
||||
jobs:
|
||||
synapse: /var/log/matrix-synapse/homeserver.log
|
||||
|
||||
minio.home.finn.io:
|
||||
ansible_host: 10.5.1.250
|
||||
home_network: true
|
||||
|
@ -55,6 +48,15 @@ monitoring:
|
|||
jobs:
|
||||
minecraft: /var/minecraft/logs/*.log
|
||||
|
||||
freepbx:
|
||||
ansible_host: 10.5.1.169
|
||||
home_network: true
|
||||
logs:
|
||||
jobs:
|
||||
apache2: /var/log/apache2/*.log
|
||||
redis: /var/log/redis/*.log
|
||||
asterisk: /var/log/asterisk/*.log
|
||||
|
||||
authentik:
|
||||
hosts:
|
||||
authentik.home.finn.io:
|
||||
|
|
20
k8s/external-account-rbac/infra-deployer.yaml
Normal file
20
k8s/external-account-rbac/infra-deployer.yaml
Normal file
|
@ -0,0 +1,20 @@
|
|||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: infra-deployer
|
||||
rules:
|
||||
- apiGroups: ["*"]
|
||||
resources: ["*"]
|
||||
verbs: ["*"]
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: infra-deployer
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: infra-deployer
|
||||
subjects:
|
||||
- kind: User
|
||||
name: infra-deployer
|
5
k8s/external-account-rbac/kustomization.yaml
Normal file
5
k8s/external-account-rbac/kustomization.yaml
Normal file
|
@ -0,0 +1,5 @@
|
|||
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
resources:
|
||||
- infra-deployer.yaml
|
||||
- matrix-bridge-meshtastic-deployer.yaml
|
|
@ -0,0 +1,22 @@
|
|||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
name: matrix-bridge-meshtastic-deployer
|
||||
namespace: meshtastic
|
||||
rules:
|
||||
- apiGroups: ["apps"]
|
||||
resources: ["deployments"]
|
||||
verbs: ["get", "patch"]
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: matrix-bridge-meshtastic-deployer
|
||||
namespace: meshtastic
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: matrix-bridge-meshtastic-deployer
|
||||
subjects:
|
||||
- kind: User
|
||||
name: matrix-bridge-meshtastic-deployer
|
62
k8s/forgejo/forgejo-secret-sync.yaml
Normal file
62
k8s/forgejo/forgejo-secret-sync.yaml
Normal file
|
@ -0,0 +1,62 @@
|
|||
apiVersion: batch/v1
|
||||
kind: CronJob
|
||||
metadata:
|
||||
name: forgejo-secret-sync
|
||||
spec:
|
||||
schedule: "0 0 * * *"
|
||||
jobTemplate:
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
containers:
|
||||
- name: secret-sync
|
||||
image: library/python:3
|
||||
command:
|
||||
- bash
|
||||
- -c
|
||||
- pip install requests && python /code/forgejo-secret-sync.py
|
||||
env:
|
||||
- name: REPO_MAPPINGS
|
||||
value: |
|
||||
[
|
||||
{"k8s_name": "infra-deployer", "owner": "JankySolutions", "repo": "infra"}
|
||||
]
|
||||
envFrom:
|
||||
- secretRef:
|
||||
name: forgejo-secret-sync
|
||||
volumeMounts:
|
||||
- name: code
|
||||
mountPath: /code
|
||||
- name: host-tls
|
||||
mountPath: /var/lib/rancher/k3s/server/tls
|
||||
restartPolicy: OnFailure
|
||||
affinity:
|
||||
nodeAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
nodeSelectorTerms:
|
||||
- matchExpressions:
|
||||
- key: node-role.kubernetes.io/control-plane
|
||||
operator: In
|
||||
values: ["true"]
|
||||
volumes:
|
||||
- name: code
|
||||
configMap:
|
||||
name: forgejo-secret-sync
|
||||
- name: host-tls
|
||||
hostPath:
|
||||
path: /var/lib/rancher/k3s/server/tls
|
||||
---
|
||||
apiVersion: external-secrets.io/v1beta1
|
||||
kind: ExternalSecret
|
||||
metadata:
|
||||
name: forgejo-secret-sync
|
||||
spec:
|
||||
secretStoreRef:
|
||||
kind: SecretStore
|
||||
name: openbao
|
||||
target:
|
||||
name: forgejo-secret-sync
|
||||
creationPolicy: Owner
|
||||
dataFrom:
|
||||
- extract:
|
||||
key: forgejo/default/secret-sync
|
86
k8s/forgejo/forgejo-secret-sync/forgejo-secret-sync.py
Normal file
86
k8s/forgejo/forgejo-secret-sync/forgejo-secret-sync.py
Normal file
|
@ -0,0 +1,86 @@
|
|||
#!/usr/bin/env python3
|
||||
import subprocess
|
||||
import logging
|
||||
import base64
|
||||
import os
|
||||
import requests
|
||||
import json
|
||||
|
||||
logging.basicConfig(level=logging.DEBUG)
|
||||
|
||||
with open("/var/lib/rancher/k3s/server/tls/server-ca.crt") as f:
|
||||
ca = base64.b64encode(f.read().encode()).decode()
|
||||
|
||||
forgejo_token = os.getenv("FORGEJO_TOKEN")
|
||||
|
||||
|
||||
def run(cmd: list[str], stdin=None) -> str:
|
||||
logging.debug("executing %s", cmd)
|
||||
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stdin=subprocess.PIPE)
|
||||
out = p.communicate(stdin)
|
||||
if p.returncode != 0:
|
||||
logging.critical("{} exited with code {}", cmd, p.returncode)
|
||||
os.exit(1)
|
||||
return out[0]
|
||||
|
||||
|
||||
def update_cert(k8s_name: str, owner: str, repo: str):
|
||||
key = run(["openssl", "genrsa", "4096"])
|
||||
req = run(
|
||||
["openssl", "req", "-key", "/dev/stdin", "-new", "-nodes", "-subj", f"/CN={k8s_name}"], stdin=key
|
||||
)
|
||||
cert = run(
|
||||
[
|
||||
"openssl",
|
||||
"x509",
|
||||
"-req",
|
||||
"-CA",
|
||||
"/var/lib/rancher/k3s/server/tls/client-ca.nochain.crt",
|
||||
"-CAkey",
|
||||
"/var/lib/rancher/k3s/server/tls/client-ca.key",
|
||||
"-CAcreateserial",
|
||||
"-days",
|
||||
"10",
|
||||
],
|
||||
stdin=req,
|
||||
)
|
||||
|
||||
keyb64 = base64.b64encode(key).decode()
|
||||
certb64 = base64.b64encode(cert).decode()
|
||||
|
||||
kubeconfig = f"""
|
||||
apiVersion: v1
|
||||
clusters:
|
||||
- cluster:
|
||||
certificate-authority-data: {ca}
|
||||
server: https://10.5.1.110:6443
|
||||
name: default
|
||||
contexts:
|
||||
- context:
|
||||
cluster: default
|
||||
user: default
|
||||
name: default
|
||||
current-context: default
|
||||
kind: Config
|
||||
preferences: {"{}"}
|
||||
users:
|
||||
- name: default
|
||||
user:
|
||||
client-certificate-data: {certb64}
|
||||
client-key-data: {keyb64}
|
||||
"""
|
||||
logging.info(f"updating secret for {owner}/{repo}")
|
||||
requests.put(
|
||||
f"https://git.janky.solutions/api/v1/repos/{owner}/{repo}/actions/secrets/KUBERNETES_CLIENT_CONFIG",
|
||||
data=json.dumps(
|
||||
{"data": kubeconfig},
|
||||
),
|
||||
headers={
|
||||
"Authorization": f"token {forgejo_token}",
|
||||
"Content-Type": "application/json",
|
||||
},
|
||||
).raise_for_status()
|
||||
|
||||
|
||||
for entry in json.loads(os.getenv("REPO_MAPPINGS")):
|
||||
update_cert(**entry)
|
|
@ -5,8 +5,10 @@ resources:
|
|||
- namespace.yaml
|
||||
- config.yaml
|
||||
- ingress.yaml
|
||||
- forgejo-secret-sync.yaml
|
||||
- services.yaml
|
||||
- statefulset.yaml
|
||||
- secret-store.yaml
|
||||
- secrets.yaml
|
||||
- renovatebot.yaml
|
||||
configMapGenerator:
|
||||
|
@ -16,3 +18,6 @@ configMapGenerator:
|
|||
- name: renovate-config
|
||||
files:
|
||||
- renovate/config.js
|
||||
- name: forgejo-secret-sync
|
||||
files:
|
||||
- forgejo-secret-sync/forgejo-secret-sync.py
|
||||
|
|
|
@ -11,7 +11,7 @@ spec:
|
|||
spec:
|
||||
containers:
|
||||
- name: renovate
|
||||
image: ghcr.io/renovatebot/renovate:38.124-full
|
||||
image: ghcr.io/renovatebot/renovate:39
|
||||
env:
|
||||
- name: RENOVATE_CONFIG_FILE
|
||||
value: /etc/renovate/config.js
|
||||
|
|
16
k8s/forgejo/secret-store.yaml
Normal file
16
k8s/forgejo/secret-store.yaml
Normal file
|
@ -0,0 +1,16 @@
|
|||
apiVersion: external-secrets.io/v1beta1
|
||||
kind: SecretStore
|
||||
metadata:
|
||||
name: openbao
|
||||
spec:
|
||||
provider:
|
||||
vault:
|
||||
server: http://openbao.openbao:8200
|
||||
path: static-secrets
|
||||
version: v2
|
||||
auth:
|
||||
kubernetes:
|
||||
mountPath: kubernetes
|
||||
role: kubernetes-default
|
||||
serviceAccountRef:
|
||||
name: default
|
|
@ -14,7 +14,7 @@ spec:
|
|||
app: forgejo
|
||||
spec:
|
||||
containers:
|
||||
- image: codeberg.org/forgejo/forgejo:9.0.0
|
||||
- image: codeberg.org/forgejo/forgejo:9.0.2
|
||||
imagePullPolicy: Always
|
||||
name: forgejo
|
||||
resources: {}
|
||||
|
|
|
@ -46,8 +46,8 @@ spec:
|
|||
groups:
|
||||
- count: 1
|
||||
usb:
|
||||
- vendor: "1A86"
|
||||
product: "55D4"
|
||||
- vendor: "10C4"
|
||||
product: "EA60"
|
||||
name: generic-device-plugin
|
||||
resources:
|
||||
requests:
|
||||
|
|
|
@ -30,7 +30,7 @@ spec:
|
|||
spec:
|
||||
containers:
|
||||
- name: mysql
|
||||
image: mysql:8
|
||||
image: docker.io/library/mysql:8.4.3
|
||||
envFrom:
|
||||
- secretRef:
|
||||
name: mysql
|
||||
|
|
|
@ -6,7 +6,7 @@ spec:
|
|||
spiloFSGroup: 103 # https://github.com/zalando/postgres-operator/issues/988
|
||||
teamId: keycloak
|
||||
volume:
|
||||
size: 10Gi
|
||||
size: 20Gi
|
||||
numberOfInstances: 2
|
||||
users:
|
||||
superuser:
|
||||
|
|
|
@ -1,17 +0,0 @@
|
|||
id: telegram
|
||||
as_token: SECRET_TELEGRAM_AS_TOKEN
|
||||
hs_token: SECRET_TELEGRAM_HS_TOKEN
|
||||
namespaces:
|
||||
users:
|
||||
- exclusive: true
|
||||
regex: '@telegram_.*:janky\.solutions'
|
||||
- exclusive: true
|
||||
regex: '@telegrambot:janky\.solutions'
|
||||
aliases:
|
||||
- exclusive: true
|
||||
regex: \#telegram_.*:janky\.solutions
|
||||
url: http://bridge-telegram:29317
|
||||
sender_localpart: SECRET_TELEGRAM_SENDER_LOCALPART
|
||||
rate_limited: false
|
||||
de.sorunome.msc2409.push_ephemeral: true
|
||||
push_ephemeral: true
|
|
@ -60,7 +60,7 @@ spec:
|
|||
- secretRef:
|
||||
name: bridge-facebook
|
||||
containers:
|
||||
- image: dock.mau.dev/mautrix/meta:v0.4.1
|
||||
- image: dock.mau.dev/mautrix/meta:v0.4.2
|
||||
name: bridge-facebook
|
||||
resources: {}
|
||||
command: ["/usr/bin/mautrix-meta", "-c", "/data/config.yaml", "--no-update"]
|
||||
|
@ -70,8 +70,6 @@ spec:
|
|||
volumeMounts:
|
||||
- name: storage
|
||||
mountPath: /data
|
||||
- name: config
|
||||
mountPath: /config
|
||||
volumes:
|
||||
- name: config
|
||||
configMap:
|
||||
|
@ -170,8 +168,8 @@ data:
|
|||
async_transactions: false
|
||||
|
||||
# Authentication tokens for AS <-> HS communication. Autogenerated; do not modify.
|
||||
as_token: "AS_TOKEN"
|
||||
hs_token: "HS_TOKEN"
|
||||
as_token: "SECRET_FACEBOOK_AS_TOKEN"
|
||||
hs_token: "SECRET_FACEBOOK_HS_TOKEN"
|
||||
|
||||
meta:
|
||||
# Which service is this bridge for? Available options:
|
||||
|
|
|
@ -57,7 +57,7 @@ spec:
|
|||
- secretRef:
|
||||
name: bridge-signal
|
||||
containers:
|
||||
- image: dock.mau.dev/mautrix/signal:v0.7.2
|
||||
- image: dock.mau.dev/mautrix/signal:v0.7.3
|
||||
name: bridge-signal
|
||||
resources: {}
|
||||
ports:
|
||||
|
@ -162,8 +162,8 @@ data:
|
|||
async_transactions: false
|
||||
|
||||
# Authentication tokens for AS <-> HS communication. Autogenerated; do not modify.
|
||||
as_token: "AS_TOKEN"
|
||||
hs_token: "HS_TOKEN"
|
||||
as_token: "SECRET_SIGNAL_AS_TOKEN"
|
||||
hs_token: "SECRET_SIGNAL_HS_TOKEN"
|
||||
|
||||
# Prometheus config.
|
||||
metrics:
|
||||
|
|
|
@ -1,752 +0,0 @@
|
|||
apiVersion: networking.k8s.io/v1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: bridge-telegram
|
||||
namespace: matrix
|
||||
spec:
|
||||
rules:
|
||||
- host: bridge-telegram.matrix.k8s
|
||||
http:
|
||||
paths:
|
||||
- path: /
|
||||
pathType: Prefix
|
||||
backend:
|
||||
service:
|
||||
name: bridge-telegram
|
||||
port:
|
||||
name: bridge
|
||||
---
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: bridge-telegram-public
|
||||
namespace: matrix
|
||||
spec:
|
||||
rules:
|
||||
- host: telegram-bridge.k8s.home.finn.io
|
||||
http:
|
||||
paths:
|
||||
- path: /public
|
||||
pathType: Prefix
|
||||
backend:
|
||||
service:
|
||||
name: bridge-telegram
|
||||
port:
|
||||
name: bridge
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: bridge-telegram
|
||||
namespace: matrix
|
||||
spec:
|
||||
publishNotReadyAddresses: true
|
||||
ports:
|
||||
- name: bridge
|
||||
port: 29317
|
||||
selector:
|
||||
app: bridge-telegram
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: StatefulSet
|
||||
metadata:
|
||||
name: bridge-telegram
|
||||
namespace: matrix
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: bridge-telegram
|
||||
serviceName: bridge-telegram
|
||||
replicas: 1
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: bridge-telegram
|
||||
spec:
|
||||
initContainers:
|
||||
- name: initialize-secrets
|
||||
image: docker.io/library/python
|
||||
command: ["python", "/init/initialize-secrets.py", "config.yaml"]
|
||||
volumeMounts:
|
||||
- name: init
|
||||
mountPath: /init
|
||||
- name: storage
|
||||
mountPath: /data
|
||||
- name: config
|
||||
mountPath: /config
|
||||
envFrom:
|
||||
- secretRef:
|
||||
name: bridge-telegram
|
||||
containers:
|
||||
- image: dock.mau.dev/mautrix/telegram:v0.15.2
|
||||
name: bridge-telegram
|
||||
resources: {}
|
||||
command: ["python3", "-m", "mautrix_telegram", "-c", "/data/config.yaml"]
|
||||
ports:
|
||||
- name: bridge
|
||||
containerPort: 29317
|
||||
volumeMounts:
|
||||
- name: storage
|
||||
mountPath: /data
|
||||
- name: config
|
||||
mountPath: /config
|
||||
volumes:
|
||||
- name: config
|
||||
configMap:
|
||||
name: bridge-telegram
|
||||
- name: init
|
||||
configMap:
|
||||
name: secrets-init
|
||||
volumeClaimTemplates:
|
||||
- metadata:
|
||||
name: storage
|
||||
spec:
|
||||
accessModes: ["ReadWriteOnce"]
|
||||
resources:
|
||||
requests:
|
||||
storage: 1Gi
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: bridge-telegram
|
||||
namespace: matrix
|
||||
data:
|
||||
config.yaml: |
|
||||
# Homeserver details
|
||||
homeserver:
|
||||
# The address that this appservice can use to connect to the homeserver.
|
||||
address: https://matrix.janky.solutions
|
||||
# The domain of the homeserver (for MXIDs, etc).
|
||||
domain: janky.solutions
|
||||
# Whether or not to verify the SSL certificate of the homeserver.
|
||||
# Only applies if address starts with https://
|
||||
verify_ssl: true
|
||||
# What software is the homeserver running?
|
||||
# Standard Matrix homeservers like Synapse, Dendrite and Conduit should just use "standard" here.
|
||||
software: standard
|
||||
# Number of retries for all HTTP requests if the homeserver isn't reachable.
|
||||
http_retry_count: 4
|
||||
# The URL to push real-time bridge status to.
|
||||
# If set, the bridge will make POST requests to this URL whenever a user's Telegram connection state changes.
|
||||
# The bridge will use the appservice as_token to authorize requests.
|
||||
status_endpoint: null
|
||||
# Endpoint for reporting per-message status.
|
||||
message_send_checkpoint_endpoint: null
|
||||
# Whether asynchronous uploads via MSC2246 should be enabled for media.
|
||||
# Requires a media repo that supports MSC2246.
|
||||
async_media: false
|
||||
# Application service host/registration related details
|
||||
# Changing these values requires regeneration of the registration.
|
||||
appservice:
|
||||
# The address that the homeserver can use to connect to this appservice.
|
||||
address: http://bridge-telegram.matrix.k8s:80
|
||||
# When using https:// the TLS certificate and key files for the address.
|
||||
tls_cert: false
|
||||
tls_key: false
|
||||
# The hostname and port where this appservice should listen.
|
||||
hostname: 0.0.0.0
|
||||
port: 29317
|
||||
# The maximum body size of appservice API requests (from the homeserver) in mebibytes
|
||||
# Usually 1 is enough, but on high-traffic bridges you might need to increase this to avoid 413s
|
||||
max_body_size: 1
|
||||
# The full URI to the database. SQLite and Postgres are supported.
|
||||
# Format examples:
|
||||
# SQLite: sqlite:filename.db
|
||||
# Postgres: postgres://username:password@hostname/dbname
|
||||
database: sqlite:/data/telegram.db
|
||||
# Additional arguments for asyncpg.create_pool() or sqlite3.connect()
|
||||
# https://magicstack.github.io/asyncpg/current/api/index.html#asyncpg.pool.create_pool
|
||||
# https://docs.python.org/3/library/sqlite3.html#sqlite3.connect
|
||||
# For sqlite, min_size is used as the connection thread pool size and max_size is ignored.
|
||||
# Additionally, SQLite supports init_commands as an array of SQL queries to run on connect (e.g. to set PRAGMAs).
|
||||
database_opts:
|
||||
min_size: 1
|
||||
max_size: 10
|
||||
# Public part of web server for out-of-Matrix interaction with the bridge.
|
||||
# Used for things like login if the user wants to make sure the 2FA password isn't stored in
|
||||
# the HS database.
|
||||
public:
|
||||
# Whether or not the public-facing endpoints should be enabled.
|
||||
enabled: true
|
||||
# The prefix to use in the public-facing endpoints.
|
||||
prefix: /public
|
||||
# The base URL where the public-facing endpoints are available. The prefix is not added
|
||||
# implicitly.
|
||||
external: https://telegram-bridge.k8s.home.finn.io/public
|
||||
# Provisioning API part of the web server for automated portal creation and fetching information.
|
||||
# Used by things like mautrix-manager (https://github.com/tulir/mautrix-manager).
|
||||
provisioning:
|
||||
# Whether or not the provisioning API should be enabled.
|
||||
enabled: true
|
||||
# The prefix to use in the provisioning API endpoints.
|
||||
prefix: /_matrix/provision
|
||||
# The shared secret to authorize users of the API.
|
||||
# Set to "generate" to generate and save a new token.
|
||||
shared_secret: generate
|
||||
# The unique ID of this appservice.
|
||||
id: telegram
|
||||
# Username of the appservice bot.
|
||||
bot_username: telegrambot
|
||||
# Display name and avatar for bot. Set to "remove" to remove display name/avatar, leave empty
|
||||
# to leave display name/avatar as-is.
|
||||
bot_displayname: Telegram bridge bot
|
||||
bot_avatar: mxc://maunium.net/tJCRmUyJDsgRNgqhOgoiHWbX
|
||||
# Whether or not to receive ephemeral events via appservice transactions.
|
||||
# Requires MSC2409 support (i.e. Synapse 1.22+).
|
||||
# You should disable bridge -> sync_with_custom_puppets when this is enabled.
|
||||
ephemeral_events: true
|
||||
# Authentication tokens for AS <-> HS communication. Autogenerated; do not modify.
|
||||
as_token: "AS_TOKEN"
|
||||
hs_token: "HS_TOKEN"
|
||||
# Prometheus telemetry config. Requires prometheus-client to be installed.
|
||||
metrics:
|
||||
enabled: true
|
||||
listen_port: 8000
|
||||
# Manhole config.
|
||||
manhole:
|
||||
# Whether or not opening the manhole is allowed.
|
||||
enabled: false
|
||||
# The path for the unix socket.
|
||||
path: /var/tmp/mautrix-telegram.manhole
|
||||
# The list of UIDs who can be added to the whitelist.
|
||||
# If empty, any UIDs can be specified in the open-manhole command.
|
||||
whitelist:
|
||||
- 0
|
||||
# Bridge config
|
||||
bridge:
|
||||
# Localpart template of MXIDs for Telegram users.
|
||||
# {userid} is replaced with the user ID of the Telegram user.
|
||||
username_template: "telegram_{userid}"
|
||||
# Localpart template of room aliases for Telegram portal rooms.
|
||||
# {groupname} is replaced with the name part of the public channel/group invite link ( https://t.me/{} )
|
||||
alias_template: "telegram_{groupname}"
|
||||
# Displayname template for Telegram users.
|
||||
# {displayname} is replaced with the display name of the Telegram user.
|
||||
displayname_template: "{displayname} (Telegram)"
|
||||
# Set the preferred order of user identifiers which to use in the Matrix puppet display name.
|
||||
# In the (hopefully unlikely) scenario that none of the given keys are found, the numeric user
|
||||
# ID is used.
|
||||
#
|
||||
# If the bridge is working properly, a phone number or an username should always be known, but
|
||||
# the other one can very well be empty.
|
||||
#
|
||||
# Valid keys:
|
||||
# "full name" (First and/or last name)
|
||||
# "full name reversed" (Last and/or first name)
|
||||
# "first name"
|
||||
# "last name"
|
||||
# "username"
|
||||
# "phone number"
|
||||
displayname_preference:
|
||||
- full name
|
||||
- username
|
||||
- phone number
|
||||
# Maximum length of displayname
|
||||
displayname_max_length: 100
|
||||
# Remove avatars from Telegram ghost users when removed on Telegram. This is disabled by default
|
||||
# as there's no way to determine whether an avatar is removed or just hidden from some users. If
|
||||
# you're on a single-user instance, this should be safe to enable.
|
||||
allow_avatar_remove: false
|
||||
# Should contact names and profile pictures be allowed?
|
||||
# This is only safe to enable on single-user instances.
|
||||
allow_contact_info: false
|
||||
# Maximum number of members to sync per portal when starting up. Other members will be
|
||||
# synced when they send messages. The maximum is 10000, after which the Telegram server
|
||||
# will not send any more members.
|
||||
# -1 means no limit (which means it's limited to 10000 by the server)
|
||||
max_initial_member_sync: 100
|
||||
# Maximum number of participants in chats to bridge. Only applies when the portal is being created.
|
||||
# If there are more members when trying to create a room, the room creation will be cancelled.
|
||||
# -1 means no limit (which means all chats can be bridged)
|
||||
max_member_count: -1
|
||||
# Whether or not to sync the member list in channels.
|
||||
# If no channel admins have logged into the bridge, the bridge won't be able to sync the member
|
||||
# list regardless of this setting.
|
||||
sync_channel_members: false
|
||||
# Whether or not to skip deleted members when syncing members.
|
||||
skip_deleted_members: true
|
||||
# Whether or not to automatically synchronize contacts and chats of Matrix users logged into
|
||||
# their Telegram account at startup.
|
||||
startup_sync: false
|
||||
# Number of most recently active dialogs to check when syncing chats.
|
||||
# Set to 0 to remove limit.
|
||||
sync_update_limit: 0
|
||||
# Number of most recently active dialogs to create portals for when syncing chats.
|
||||
# Set to 0 to remove limit.
|
||||
sync_create_limit: 15
|
||||
# Should all chats be scheduled to be created later?
|
||||
# This is best used in combination with MSC2716 infinite backfill.
|
||||
sync_deferred_create_all: false
|
||||
# Whether or not to sync and create portals for direct chats at startup.
|
||||
sync_direct_chats: false
|
||||
# The maximum number of simultaneous Telegram deletions to handle.
|
||||
# A large number of simultaneous redactions could put strain on your homeserver.
|
||||
max_telegram_delete: 10
|
||||
# Whether or not to automatically sync the Matrix room state (mostly unpuppeted displaynames)
|
||||
# at startup and when creating a bridge.
|
||||
sync_matrix_state: true
|
||||
# Allow logging in within Matrix. If false, users can only log in using login-qr or the
|
||||
# out-of-Matrix login website (see appservice.public config section)
|
||||
allow_matrix_login: true
|
||||
# Whether or not to make portals of publicly joinable channels/supergroups publicly joinable on Matrix.
|
||||
public_portals: false
|
||||
# Whether or not to use /sync to get presence, read receipts and typing notifications
|
||||
# when double puppeting is enabled
|
||||
sync_with_custom_puppets: false
|
||||
# Whether or not to update the m.direct account data event when double puppeting is enabled.
|
||||
# Note that updating the m.direct event is not atomic (except with mautrix-asmux)
|
||||
# and is therefore prone to race conditions.
|
||||
sync_direct_chat_list: false
|
||||
# Servers to always allow double puppeting from
|
||||
double_puppet_server_map:
|
||||
example.com: https://example.com
|
||||
# Allow using double puppeting from any server with a valid client .well-known file.
|
||||
double_puppet_allow_discovery: false
|
||||
# Shared secrets for https://github.com/devture/matrix-synapse-shared-secret-auth
|
||||
#
|
||||
# If set, custom puppets will be enabled automatically for local users
|
||||
# instead of users having to find an access token and run `login-matrix`
|
||||
# manually.
|
||||
# If using this for other servers than the bridge's server,
|
||||
# you must also set the URL in the double_puppet_server_map.
|
||||
login_shared_secret_map:
|
||||
example.com: foobar
|
||||
# Set to false to disable link previews in messages sent to Telegram.
|
||||
telegram_link_preview: true
|
||||
# Whether or not the !tg join command should do a HTTP request
|
||||
# to resolve redirects in invite links.
|
||||
invite_link_resolve: false
|
||||
# Send captions in the same message as images. This will send data compatible with both MSC2530 and MSC3552.
|
||||
# This is currently not supported in most clients.
|
||||
caption_in_message: false
|
||||
# Maximum size of image in megabytes before sending to Telegram as a document.
|
||||
image_as_file_size: 10
|
||||
# Maximum number of pixels in an image before sending to Telegram as a document. Defaults to 4096x4096 = 16777216.
|
||||
image_as_file_pixels: 16777216
|
||||
# Maximum size of Telegram documents before linking to Telegrm instead of bridge
|
||||
# to Matrix media.
|
||||
document_as_link_size:
|
||||
channel:
|
||||
bot:
|
||||
# Enable experimental parallel file transfer, which makes uploads/downloads much faster by
|
||||
# streaming from/to Matrix and using many connections for Telegram.
|
||||
# Note that generating HQ thumbnails for videos is not possible with streamed transfers.
|
||||
# This option uses internal Telethon implementation details and may break with minor updates.
|
||||
parallel_file_transfer: false
|
||||
# Whether or not created rooms should have federation enabled.
|
||||
# If false, created portal rooms will never be federated.
|
||||
federate_rooms: true
|
||||
# Should the bridge send all unicode reactions as custom emoji reactions to Telegram?
|
||||
# By default, the bridge only uses custom emojis for unicode emojis that aren't allowed in reactions.
|
||||
always_custom_emoji_reaction: false
|
||||
# Settings for converting animated stickers.
|
||||
animated_sticker:
|
||||
# Format to which animated stickers should be converted.
|
||||
# disable - No conversion, send as-is (gzipped lottie)
|
||||
# png - converts to non-animated png (fastest),
|
||||
# gif - converts to animated gif
|
||||
# webm - converts to webm video, requires ffmpeg executable with vp9 codec and webm container support
|
||||
# webp - converts to animated webp, requires ffmpeg executable with webp codec/container support
|
||||
target: gif
|
||||
# Should video stickers be converted to the specified format as well?
|
||||
convert_from_webm: false
|
||||
# Arguments for converter. All converters take width and height.
|
||||
args:
|
||||
width: 256
|
||||
height: 256
|
||||
fps: 25 # only for webm, webp and gif (2, 5, 10, 20 or 25 recommended)
|
||||
# Settings for converting animated emoji.
|
||||
# Same as animated_sticker, but webm is not supported as the target
|
||||
# (because inline images can only contain images, not videos).
|
||||
animated_emoji:
|
||||
target: webp
|
||||
args:
|
||||
width: 64
|
||||
height: 64
|
||||
fps: 25
|
||||
# End-to-bridge encryption support options.
|
||||
#
|
||||
# See https://docs.mau.fi/bridges/general/end-to-bridge-encryption.html for more info.
|
||||
encryption:
|
||||
# Allow encryption, work in group chat rooms with e2ee enabled
|
||||
allow: false
|
||||
# Default to encryption, force-enable encryption in all portals the bridge creates
|
||||
# This will cause the bridge bot to be in private chats for the encryption to work properly.
|
||||
default: false
|
||||
# Whether to use MSC2409/MSC3202 instead of /sync long polling for receiving encryption-related data.
|
||||
appservice: false
|
||||
# Require encryption, drop any unencrypted messages.
|
||||
require: false
|
||||
# Enable key sharing? If enabled, key requests for rooms where users are in will be fulfilled.
|
||||
# You must use a client that supports requesting keys from other users to use this feature.
|
||||
allow_key_sharing: false
|
||||
# Options for deleting megolm sessions from the bridge.
|
||||
delete_keys:
|
||||
# Beeper-specific: delete outbound sessions when hungryserv confirms
|
||||
# that the user has uploaded the key to key backup.
|
||||
delete_outbound_on_ack: false
|
||||
# Don't store outbound sessions in the inbound table.
|
||||
dont_store_outbound: false
|
||||
# Ratchet megolm sessions forward after decrypting messages.
|
||||
ratchet_on_decrypt: false
|
||||
# Delete fully used keys (index >= max_messages) after decrypting messages.
|
||||
delete_fully_used_on_decrypt: false
|
||||
# Delete previous megolm sessions from same device when receiving a new one.
|
||||
delete_prev_on_new_session: false
|
||||
# Delete megolm sessions received from a device when the device is deleted.
|
||||
delete_on_device_delete: false
|
||||
# Periodically delete megolm sessions when 2x max_age has passed since receiving the session.
|
||||
periodically_delete_expired: false
|
||||
# Delete inbound megolm sessions that don't have the received_at field used for
|
||||
# automatic ratcheting and expired session deletion. This is meant as a migration
|
||||
# to delete old keys prior to the bridge update.
|
||||
delete_outdated_inbound: false
|
||||
# What level of device verification should be required from users?
|
||||
#
|
||||
# Valid levels:
|
||||
# unverified - Send keys to all device in the room.
|
||||
# cross-signed-untrusted - Require valid cross-signing, but trust all cross-signing keys.
|
||||
# cross-signed-tofu - Require valid cross-signing, trust cross-signing keys on first use (and reject changes).
|
||||
# cross-signed-verified - Require valid cross-signing, plus a valid user signature from the bridge bot.
|
||||
# Note that creating user signatures from the bridge bot is not currently possible.
|
||||
# verified - Require manual per-device verification
|
||||
# (currently only possible by modifying the `trust` column in the `crypto_device` database table).
|
||||
verification_levels:
|
||||
# Minimum level for which the bridge should send keys to when bridging messages from Telegram to Matrix.
|
||||
receive: unverified
|
||||
# Minimum level that the bridge should accept for incoming Matrix messages.
|
||||
send: unverified
|
||||
# Minimum level that the bridge should require for accepting key requests.
|
||||
share: cross-signed-tofu
|
||||
# Options for Megolm room key rotation. These options allow you to
|
||||
# configure the m.room.encryption event content. See:
|
||||
# https://spec.matrix.org/v1.3/client-server-api/#mroomencryption for
|
||||
# more information about that event.
|
||||
rotation:
|
||||
# Enable custom Megolm room key rotation settings. Note that these
|
||||
# settings will only apply to rooms created after this option is
|
||||
# set.
|
||||
enable_custom: false
|
||||
# The maximum number of milliseconds a session should be used
|
||||
# before changing it. The Matrix spec recommends 604800000 (a week)
|
||||
# as the default.
|
||||
milliseconds: 604800000
|
||||
# The maximum number of messages that should be sent with a given a
|
||||
# session before changing it. The Matrix spec recommends 100 as the
|
||||
# default.
|
||||
messages: 100
|
||||
# Disable rotating keys when a user's devices change?
|
||||
# You should not enable this option unless you understand all the implications.
|
||||
disable_device_change_key_rotation: false
|
||||
# Whether to explicitly set the avatar and room name for private chat portal rooms.
|
||||
# If set to `default`, this will be enabled in encrypted rooms and disabled in unencrypted rooms.
|
||||
# If set to `always`, all DM rooms will have explicit names and avatars set.
|
||||
# If set to `never`, DM rooms will never have names and avatars set.
|
||||
private_chat_portal_meta: default
|
||||
# Disable generating reply fallbacks? Some extremely bad clients still rely on them,
|
||||
# but they're being phased out and will be completely removed in the future.
|
||||
disable_reply_fallbacks: false
|
||||
# Should cross-chat replies from Telegram be bridged? Most servers and clients don't support this.
|
||||
cross_room_replies: false
|
||||
# Whether or not the bridge should send a read receipt from the bridge bot when a message has
|
||||
# been sent to Telegram.
|
||||
delivery_receipts: false
|
||||
# Whether or not delivery errors should be reported as messages in the Matrix room.
|
||||
delivery_error_reports: false
|
||||
# Should errors in incoming message handling send a message to the Matrix room?
|
||||
incoming_bridge_error_reports: false
|
||||
# Whether the bridge should send the message status as a custom com.beeper.message_send_status event.
|
||||
message_status_events: false
|
||||
# Set this to true to tell the bridge to re-send m.bridge events to all rooms on the next run.
|
||||
# This field will automatically be changed back to false after it,
|
||||
# except if the config file is not writable.
|
||||
resend_bridge_info: false
|
||||
# When using double puppeting, should muted chats be muted in Matrix?
|
||||
mute_bridging: false
|
||||
# When using double puppeting, should pinned chats be moved to a specific tag in Matrix?
|
||||
# The favorites tag is `m.favourite`.
|
||||
pinned_tag: null
|
||||
# Same as above for archived chats, the low priority tag is `m.lowpriority`.
|
||||
archive_tag: null
|
||||
# Whether or not mute status and tags should only be bridged when the portal room is created.
|
||||
tag_only_on_create: true
|
||||
# Should leaving the room on Matrix make the user leave on Telegram?
|
||||
bridge_matrix_leave: true
|
||||
# Should the user be kicked out of all portals when logging out of the bridge?
|
||||
kick_on_logout: true
|
||||
# Should the "* user joined Telegram" notice always be marked as read automatically?
|
||||
always_read_joined_telegram_notice: true
|
||||
# Should the bridge auto-create a group chat on Telegram when a ghost is invited to a room?
|
||||
# Requires the user to have sufficient power level and double puppeting enabled.
|
||||
create_group_on_invite: true
|
||||
# Settings for backfilling messages from Telegram.
|
||||
backfill:
|
||||
# Allow backfilling at all?
|
||||
enable: true
|
||||
# Whether or not to enable backfilling in normal groups.
|
||||
# Normal groups have numerous technical problems in Telegram, and backfilling normal groups
|
||||
# will likely cause problems if there are multiple Matrix users in the group.
|
||||
normal_groups: false
|
||||
# If a backfilled chat is older than this number of hours, mark it as read even if it's unread on Telegram.
|
||||
# Set to -1 to let any chat be unread.
|
||||
unread_hours_threshold: 720
|
||||
# Forward backfilling limits.
|
||||
#
|
||||
# Using a negative initial limit is not recommended, as it would try to backfill everything in a single batch.
|
||||
forward_limits:
|
||||
# Number of messages to backfill immediately after creating a portal.
|
||||
initial:
|
||||
user: 50
|
||||
normal_group: 100
|
||||
supergroup: 10
|
||||
channel: 10
|
||||
# Number of messages to backfill when syncing chats.
|
||||
sync:
|
||||
user: 100
|
||||
normal_group: 100
|
||||
supergroup: 100
|
||||
channel: 100
|
||||
# Timeout for forward backfills in seconds. If you have a high limit, you'll have to increase this too.
|
||||
forward_timeout: 900
|
||||
# Settings for incremental backfill of history. These only apply to Beeper, as upstream abandoned MSC2716.
|
||||
incremental:
|
||||
# Maximum number of messages to backfill per batch.
|
||||
messages_per_batch: 100
|
||||
# The number of seconds to wait after backfilling the batch of messages.
|
||||
post_batch_delay: 20
|
||||
# The maximum number of batches to backfill per portal, split by the chat type.
|
||||
# If set to -1, all messages in the chat will eventually be backfilled.
|
||||
max_batches:
|
||||
# Direct chats
|
||||
user: -1
|
||||
# Normal groups. Note that the normal_groups option above must be enabled
|
||||
# for these to be backfilled.
|
||||
normal_group: -1
|
||||
# Supergroups
|
||||
supergroup: 10
|
||||
# Broadcast channels
|
||||
channel: -1
|
||||
# Overrides for base power levels.
|
||||
initial_power_level_overrides:
|
||||
user: {}
|
||||
group: {}
|
||||
# Whether to bridge Telegram bot messages as m.notices or m.texts.
|
||||
bot_messages_as_notices: true
|
||||
bridge_notices:
|
||||
# Whether or not Matrix bot messages (type m.notice) should be bridged.
|
||||
default: false
|
||||
# List of user IDs for whom the previous flag is flipped.
|
||||
# e.g. if bridge_notices.default is false, notices from other users will not be bridged, but
|
||||
# notices from users listed here will be bridged.
|
||||
exceptions: []
|
||||
# An array of possible values for the $distinguisher variable in message formats.
|
||||
# Each user gets one of the values here, based on a hash of their user ID.
|
||||
# If the array is empty, the $distinguisher variable will also be empty.
|
||||
relay_user_distinguishers: ["\U0001F7E6", "\U0001F7E3", "\U0001F7E9", "⭕️", "\U0001F536", "⬛️", "\U0001F535", "\U0001F7E2"]
|
||||
# The formats to use when sending messages to Telegram via the relay bot.
|
||||
# Text msgtypes (m.text, m.notice and m.emote) support HTML, media msgtypes don't.
|
||||
#
|
||||
# Available variables:
|
||||
# $sender_displayname - The display name of the sender (e.g. Example User)
|
||||
# $sender_username - The username (Matrix ID localpart) of the sender (e.g. exampleuser)
|
||||
# $sender_mxid - The Matrix ID of the sender (e.g. @exampleuser:example.com)
|
||||
# $distinguisher - A random string from the options in the relay_user_distinguishers array.
|
||||
# $message - The message content
|
||||
message_formats:
|
||||
m.text: "$distinguisher <b>$sender_displayname</b>: $message"
|
||||
m.notice: "$distinguisher <b>$sender_displayname</b>: $message"
|
||||
m.emote: "* $distinguisher <b>$sender_displayname</b> $message"
|
||||
m.file: "$distinguisher <b>$sender_displayname</b> sent a file: $message"
|
||||
m.image: "$distinguisher <b>$sender_displayname</b> sent an image: $message"
|
||||
m.audio: "$distinguisher <b>$sender_displayname</b> sent an audio file: $message"
|
||||
m.video: "$distinguisher <b>$sender_displayname</b> sent a video: $message"
|
||||
m.location: "$distinguisher <b>$sender_displayname</b> sent a location: $message"
|
||||
# Telegram doesn't have built-in emotes, this field specifies how m.emote's from authenticated
|
||||
# users are sent to telegram. All fields in message_formats are supported. Additionally, the
|
||||
# Telegram user info is available in the following variables:
|
||||
# $displayname - Telegram displayname
|
||||
# $username - Telegram username (may not exist)
|
||||
# $mention - Telegram @username or displayname mention (depending on which exists)
|
||||
emote_format: "* $mention $formatted_body"
|
||||
# The formats to use when sending state events to Telegram via the relay bot.
|
||||
#
|
||||
# Variables from `message_formats` that have the `sender_` prefix are available without the prefix.
|
||||
# In name_change events, `$prev_displayname` is the previous displayname.
|
||||
#
|
||||
# Set format to an empty string to disable the messages for that event.
|
||||
state_event_formats:
|
||||
join: "$distinguisher <b>$displayname</b> joined the room."
|
||||
leave: "$distinguisher <b>$displayname</b> left the room."
|
||||
name_change: "$distinguisher <b>$prev_displayname</b> changed their name to $distinguisher <b>$displayname</b>"
|
||||
# Filter rooms that can/can't be bridged. Can also be managed using the `filter` and
|
||||
# `filter-mode` management commands.
|
||||
#
|
||||
# An empty blacklist will essentially disable the filter.
|
||||
filter:
|
||||
# Filter mode to use. Either "blacklist" or "whitelist".
|
||||
# If the mode is "blacklist", the listed chats will never be bridged.
|
||||
# If the mode is "whitelist", only the listed chats can be bridged.
|
||||
mode: blacklist
|
||||
# The list of group/channel IDs to filter.
|
||||
list: []
|
||||
# How to handle direct chats:
|
||||
# If users is "null", direct chats will follow the previous settings.
|
||||
# If users is "true", direct chats will always be bridged.
|
||||
# If users is "false", direct chats will never be bridged.
|
||||
users: true
|
||||
# The prefix for commands. Only required in non-management rooms.
|
||||
command_prefix: "!tg"
|
||||
# Messages sent upon joining a management room.
|
||||
# Markdown is supported. The defaults are listed below.
|
||||
management_room_text:
|
||||
# Sent when joining a room.
|
||||
welcome: "Hello, I'm a Telegram bridge bot."
|
||||
# Sent when joining a management room and the user is already logged in.
|
||||
welcome_connected: "Use `help` for help."
|
||||
# Sent when joining a management room and the user is not logged in.
|
||||
welcome_unconnected: "Use `help` for help or `login` to log in."
|
||||
# Optional extra text sent when joining a management room.
|
||||
additional_help: ""
|
||||
# Send each message separately (for readability in some clients)
|
||||
management_room_multiple_messages: false
|
||||
# Permissions for using the bridge.
|
||||
# Permitted values:
|
||||
# relaybot - Only use the bridge via the relaybot, no access to commands.
|
||||
# user - Relaybot level + access to commands to create bridges.
|
||||
# puppeting - User level + logging in with a Telegram account.
|
||||
# full - Full access to use the bridge, i.e. previous levels + Matrix login.
|
||||
# admin - Full access to use the bridge and some extra administration commands.
|
||||
# Permitted keys:
|
||||
# * - All Matrix users
|
||||
# domain - All users on that homeserver
|
||||
# mxid - Specific user
|
||||
permissions:
|
||||
"*": "relaybot"
|
||||
"janky.solutions": "full"
|
||||
"@finn:janky.solutions": "admin"
|
||||
# Options related to the message relay Telegram bot.
|
||||
relaybot:
|
||||
private_chat:
|
||||
# List of users to invite to the portal when someone starts a private chat with the bot.
|
||||
# If empty, private chats with the bot won't create a portal.
|
||||
invite: []
|
||||
# Whether or not to bridge state change messages in relaybot private chats.
|
||||
state_changes: true
|
||||
# When private_chat_invite is empty, this message is sent to users /starting the
|
||||
# relaybot. Telegram's "markdown" is supported.
|
||||
message: This is a Matrix bridge relaybot and does not support direct chats
|
||||
# List of users to invite to all group chat portals created by the bridge.
|
||||
group_chat_invite: []
|
||||
# Whether or not the relaybot should not bridge events in unbridged group chats.
|
||||
# If false, portals will be created when the relaybot receives messages, just like normal
|
||||
# users. This behavior is usually not desirable, as it interferes with manually bridging
|
||||
# the chat to another room.
|
||||
ignore_unbridged_group_chat: true
|
||||
# Whether or not to allow creating portals from Telegram.
|
||||
authless_portals: true
|
||||
# Whether or not to allow Telegram group admins to use the bot commands.
|
||||
whitelist_group_admins: true
|
||||
# Whether or not to ignore incoming events sent by the relay bot.
|
||||
ignore_own_incoming_events: true
|
||||
# List of usernames/user IDs who are also allowed to use the bot commands.
|
||||
whitelist:
|
||||
- myusername
|
||||
- 12345678
|
||||
# Telegram config
|
||||
telegram:
|
||||
# Get your own API keys at https://my.telegram.org/apps
|
||||
api_id: TG_API_ID
|
||||
api_hash: TG_API_HASH
|
||||
# (Optional) Create your own bot at https://t.me/BotFather
|
||||
bot_token: disabled
|
||||
# Should the bridge request missed updates from Telegram when restarting?
|
||||
catch_up: true
|
||||
# Should incoming updates be handled sequentially to make sure order is preserved on Matrix?
|
||||
sequential_updates: true
|
||||
exit_on_update_error: false
|
||||
# Interval to force refresh the connection (full reconnect). 0 disables it.
|
||||
force_refresh_interval_seconds: 0
|
||||
# Telethon connection options.
|
||||
connection:
|
||||
# The timeout in seconds to be used when connecting.
|
||||
timeout: 120
|
||||
# How many times the reconnection should retry, either on the initial connection or when
|
||||
# Telegram disconnects us. May be set to a negative or null value for infinite retries, but
|
||||
# this is not recommended, since the program can get stuck in an infinite loop.
|
||||
retries: 5
|
||||
# The delay in seconds to sleep between automatic reconnections.
|
||||
retry_delay: 1
|
||||
# The threshold below which the library should automatically sleep on flood wait errors
|
||||
# (inclusive). For instance, if a FloodWaitError for 17s occurs and flood_sleep_threshold
|
||||
# is 20s, the library will sleep automatically. If the error was for 21s, it would raise
|
||||
# the error instead. Values larger than a day (86400) will be changed to a day.
|
||||
flood_sleep_threshold: 60
|
||||
# How many times a request should be retried. Request are retried when Telegram is having
|
||||
# internal issues, when there is a FloodWaitError less than flood_sleep_threshold, or when
|
||||
# there's a migrate error. May take a negative or null value for infinite retries, but this
|
||||
# is not recommended, since some requests can always trigger a call fail (such as searching
|
||||
# for messages).
|
||||
request_retries: 5
|
||||
# Use IPv6 for Telethon connection
|
||||
use_ipv6: false
|
||||
# Device info sent to Telegram.
|
||||
device_info:
|
||||
# "auto" = OS name+version.
|
||||
device_model: mautrix-telegram
|
||||
# "auto" = Telethon version.
|
||||
system_version: auto
|
||||
# "auto" = mautrix-telegram version.
|
||||
app_version: auto
|
||||
lang_code: en
|
||||
system_lang_code: en
|
||||
# Custom server to connect to.
|
||||
server:
|
||||
# Set to true to use these server settings. If false, will automatically
|
||||
# use production server assigned by Telegram. Set to false in production.
|
||||
enabled: false
|
||||
# The DC ID to connect to.
|
||||
dc: 2
|
||||
# The IP to connect to.
|
||||
ip: 149.154.167.40
|
||||
# The port to connect to. 443 may not work, 80 is better and both are equally secure.
|
||||
port: 80
|
||||
# Telethon proxy configuration.
|
||||
# You must install PySocks from pip for proxies to work.
|
||||
proxy:
|
||||
# Allowed types: disabled, socks4, socks5, http, mtproxy
|
||||
type: disabled
|
||||
# Proxy IP address and port.
|
||||
address: 127.0.0.1
|
||||
port: 1080
|
||||
# Whether or not to perform DNS resolving remotely. Only for socks/http proxies.
|
||||
rdns: true
|
||||
# Proxy authentication (optional). Put MTProxy secret in password field.
|
||||
username: ""
|
||||
password: ""
|
||||
# Python logging configuration.
|
||||
#
|
||||
# See section 16.7.2 of the Python documentation for more info:
|
||||
# https://docs.python.org/3.6/library/logging.config.html#configuration-dictionary-schema
|
||||
logging:
|
||||
version: 1
|
||||
formatters:
|
||||
colored:
|
||||
(): mautrix_telegram.util.ColorFormatter
|
||||
format: "[%(asctime)s] [%(levelname)s@%(name)s] %(message)s"
|
||||
normal:
|
||||
format: "[%(asctime)s] [%(levelname)s@%(name)s] %(message)s"
|
||||
handlers:
|
||||
console:
|
||||
class: logging.StreamHandler
|
||||
formatter: colored
|
||||
loggers:
|
||||
mau:
|
||||
level: INFO
|
||||
telethon:
|
||||
level: INFO
|
||||
aiohttp:
|
||||
level: INFO
|
||||
root:
|
||||
level: DEBUG
|
||||
handlers: [console]
|
|
@ -24,6 +24,9 @@ form_secret: "SECRET_form_secret"
|
|||
signing_key_path: "/secrets/janky.solutions.signing.key"
|
||||
trusted_key_servers:
|
||||
- server_name: "matrix.org"
|
||||
auto_join_rooms:
|
||||
- "#jankysolutions:janky.solutions"
|
||||
- "#general:janky.solutions"
|
||||
public_baseurl: https://matrix.janky.solutions
|
||||
ip_range_whitelist: [10.5.1.245,10.5.1.1]
|
||||
# oidc_providers:
|
||||
|
@ -42,7 +45,6 @@ password_config:
|
|||
enabled: false
|
||||
app_service_config_files:
|
||||
- /data/facebook.yaml
|
||||
- /data/telegram.yaml
|
||||
- /data/signal.yaml
|
||||
media_storage_providers:
|
||||
- module: s3_storage_provider.S3StorageProviderBackend
|
||||
|
|
|
@ -37,7 +37,7 @@ spec:
|
|||
- secretRef:
|
||||
name: synapse-janky-bot
|
||||
containers:
|
||||
- image: matrixdotorg/synapse:v1.117.0
|
||||
- image: matrixdotorg/synapse:v1.119.0
|
||||
name: synapse
|
||||
resources: {}
|
||||
volumeMounts:
|
||||
|
|
|
@ -38,7 +38,7 @@ spec:
|
|||
name: synapse-janky-solutions
|
||||
- name: initialize-bridge-secrets
|
||||
image: docker.io/library/python:3
|
||||
command: ["python", "/init/initialize-secrets.py", "facebook.yaml", "telegram.yaml", "signal.yaml"]
|
||||
command: ["python", "/init/initialize-secrets.py", "facebook.yaml", "signal.yaml"]
|
||||
volumeMounts:
|
||||
- name: init
|
||||
mountPath: /init
|
||||
|
@ -64,8 +64,6 @@ spec:
|
|||
mountPath: /config
|
||||
- name: secrets
|
||||
mountPath: /secrets
|
||||
- name: appservices
|
||||
mountPath: /appservices
|
||||
env:
|
||||
- name: SYNAPSE_SERVER_NAME
|
||||
value: matrix.janky.solutions
|
||||
|
|
|
@ -3,9 +3,8 @@ kind: Kustomization
|
|||
namespace: matrix
|
||||
resources:
|
||||
- namespace.yaml
|
||||
- bridge-facebook.yaml
|
||||
# - bridge-facebook.yaml
|
||||
- bridge-signal.yaml
|
||||
- bridge-telegram.yaml
|
||||
- janky.bot-homeserver.yaml
|
||||
- janky.solutions-homeserver.yaml
|
||||
- secrets.yaml
|
||||
|
@ -22,7 +21,6 @@ configMapGenerator:
|
|||
- name: appservices-janky-solutions
|
||||
files:
|
||||
- appservices-janky.solutions/facebook.yaml
|
||||
- appservices-janky.solutions/telegram.yaml
|
||||
- appservices-janky.solutions/signal.yaml
|
||||
- name: mas-janky-solutions
|
||||
files:
|
||||
|
|
|
@ -82,8 +82,8 @@ upstream_oauth2:
|
|||
action: suggest
|
||||
template: "{{ user.name }}"
|
||||
email:
|
||||
action: suggest
|
||||
template: "{{ user.email }}"
|
||||
action: require
|
||||
template: "{{ user.name }}@janky.solutions"
|
||||
set_email_verification: always
|
||||
account:
|
||||
email_change_allowed: false
|
||||
email_change_allowed: true
|
||||
|
|
|
@ -3,8 +3,9 @@ kind: Kustomization
|
|||
namespace: meshtastic
|
||||
resources:
|
||||
- namespace.yaml
|
||||
- matrix-meshtastic-bridge.yaml
|
||||
secretGenerator:
|
||||
- name: matrix-meshtastic-bridge
|
||||
- matrix-bridge-meshtastic.yaml
|
||||
- secrets.yaml
|
||||
configMapGenerator:
|
||||
- name: matrix-bridge-meshtastic
|
||||
files:
|
||||
- matrix-meshtastic-bridge/config.json
|
||||
- matrix-bridge-meshtastic/config.json
|
||||
|
|
44
k8s/meshtastic/matrix-bridge-meshtastic.yaml
Normal file
44
k8s/meshtastic/matrix-bridge-meshtastic.yaml
Normal file
|
@ -0,0 +1,44 @@
|
|||
apiVersion: apps/v1
|
||||
kind: StatefulSet
|
||||
metadata:
|
||||
name: matrix-bridge-meshtastic
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: matrix-bridge-meshtastic
|
||||
serviceName: matrix-bridge-meshtastic
|
||||
replicas: 1
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: matrix-bridge-meshtastic
|
||||
spec:
|
||||
containers:
|
||||
- name: matrix-bridge-meshtastic
|
||||
image: git.janky.solutions/finn/matrix-bridge-meshtastic:sha-fae2a30
|
||||
securityContext:
|
||||
privileged: true
|
||||
env:
|
||||
- name: MATRIX_BRIDGE_MESHTASTIC_CONFIG
|
||||
value: /config/config.json
|
||||
envFrom:
|
||||
- secretRef:
|
||||
name: matrix-bridge-meshtastic
|
||||
volumeMounts:
|
||||
- name: config
|
||||
mountPath: /config
|
||||
- name: data
|
||||
mountPath: /data
|
||||
volumes:
|
||||
- name: config
|
||||
configMap:
|
||||
name: matrix-bridge-meshtastic
|
||||
volumeClaimTemplates:
|
||||
- metadata:
|
||||
name: data
|
||||
spec:
|
||||
storageClassName: longhorn
|
||||
accessModes: ["ReadWriteOnce"]
|
||||
resources:
|
||||
requests:
|
||||
storage: 1Gi
|
11
k8s/meshtastic/matrix-bridge-meshtastic/config.json
Normal file
11
k8s/meshtastic/matrix-bridge-meshtastic/config.json
Normal file
|
@ -0,0 +1,11 @@
|
|||
{
|
||||
"matrix": {
|
||||
"user": "@meshtastic-test:janky.bot",
|
||||
"room": "!VRoqFXTXJCHdTDdilP:janky.solutions",
|
||||
"state": "/data/matrix.db"
|
||||
},
|
||||
"meshtastic": {
|
||||
"address": "10.5.0.214"
|
||||
},
|
||||
"db": "/data/bridge.db"
|
||||
}
|
|
@ -1,56 +0,0 @@
|
|||
apiVersion: apps/v1
|
||||
kind: StatefulSet
|
||||
metadata:
|
||||
name: matrix-meshtastic-bridge
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: matrix-meshtastic-bridge
|
||||
serviceName: matrix-meshtastic-bridge
|
||||
replicas: 1
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: matrix-meshtastic-bridge
|
||||
spec:
|
||||
tolerations:
|
||||
- key: rtlsdr
|
||||
value: "true"
|
||||
effect: NoSchedule
|
||||
containers:
|
||||
- name: matrix-meshtastic-bridge
|
||||
image: git.janky.solutions/finn/matrix-meshtastic-bridge:sha-008f7cd
|
||||
securityContext:
|
||||
privileged: true
|
||||
env:
|
||||
- name: RUST_LOG
|
||||
value: debug
|
||||
- name: CONFIG_PATH
|
||||
value: /config/config.json
|
||||
resources:
|
||||
limits:
|
||||
janky.solutions/meshtastic: "1"
|
||||
volumeMounts:
|
||||
- name: config
|
||||
mountPath: /config
|
||||
- name: ttyacm0
|
||||
mountPath: /dev/ttyACM0
|
||||
- name: data
|
||||
mountPath: /data
|
||||
volumes:
|
||||
- name: config
|
||||
secret:
|
||||
secretName: matrix-meshtastic-bridge
|
||||
- name: ttyacm0
|
||||
hostPath:
|
||||
type: CharDevice
|
||||
path: /dev/ttyACM0
|
||||
volumeClaimTemplates:
|
||||
- metadata:
|
||||
name: data
|
||||
spec:
|
||||
storageClassName: longhorn
|
||||
accessModes: ["ReadWriteOnce"]
|
||||
resources:
|
||||
requests:
|
||||
storage: 1Gi
|
|
@ -1,12 +0,0 @@
|
|||
{
|
||||
"matrix": {
|
||||
"username": "@meshtastic-test:janky.bot",
|
||||
"room": "!VRoqFXTXJCHdTDdilP:janky.solutions",
|
||||
"user": "@finn:janky.solutions",
|
||||
"state": "/data/matrix.db"
|
||||
},
|
||||
"meshtastic": {
|
||||
"device": "/dev/ttyACM0"
|
||||
},
|
||||
"db": "sqlite:///data/bridge.db"
|
||||
}
|
|
@ -16,7 +16,7 @@ spec:
|
|||
spec:
|
||||
containers:
|
||||
- name: miniflux
|
||||
image: docker.io/miniflux/miniflux:2.2.1
|
||||
image: docker.io/miniflux/miniflux:2.2.3
|
||||
imagePullPolicy: Always
|
||||
resources: {}
|
||||
envFrom:
|
||||
|
|
|
@ -64,7 +64,7 @@ spec:
|
|||
- metadata:
|
||||
name: storage
|
||||
spec:
|
||||
storageClassName: longhorn
|
||||
# storageClassName: longhorn
|
||||
accessModes: ["ReadWriteOnce"]
|
||||
resources:
|
||||
requests:
|
||||
|
|
|
@ -27,7 +27,7 @@ spec:
|
|||
app: mysql
|
||||
spec:
|
||||
containers:
|
||||
- image: docker.io/library/mysql:9
|
||||
- image: docker.io/library/mysql:5.7
|
||||
name: mysql
|
||||
resources: {}
|
||||
ports:
|
||||
|
@ -50,7 +50,7 @@ spec:
|
|||
- metadata:
|
||||
name: storage
|
||||
spec:
|
||||
storageClassName: longhorn
|
||||
# storageClassName: longhorn
|
||||
accessModes: ["ReadWriteOnce"]
|
||||
resources:
|
||||
requests:
|
||||
|
|
|
@ -9,9 +9,6 @@ resources:
|
|||
- thanos.yaml
|
||||
- alerts-longhorn.yaml
|
||||
- matrix-alertmanager-receiver.yaml
|
||||
images:
|
||||
- name: quay.io/thanos/thanos
|
||||
newTag: v0.36.1
|
||||
secretGenerator:
|
||||
- name: additional-scrape-configs
|
||||
options:
|
||||
|
|
|
@ -31,7 +31,7 @@ spec:
|
|||
name: matrix-alertmanager-receiver
|
||||
containers:
|
||||
- name: matrix-alertmanager-receiver
|
||||
image: docker.io/metio/matrix-alertmanager-receiver:2024.10.16
|
||||
image: docker.io/metio/matrix-alertmanager-receiver:2024.11.20
|
||||
args: ["--config-path", "/config/config.yaml"]
|
||||
resources:
|
||||
limits:
|
||||
|
|
|
@ -30,12 +30,9 @@
|
|||
static_configs:
|
||||
- targets:
|
||||
- ubnt:9001 # mongod-exporter
|
||||
- ubnt:9130 # unifi-exporter
|
||||
- rpi4-build:8080
|
||||
- ci-runner-0:8080
|
||||
- ci-runner-1:8080
|
||||
- ci-runner-2:8080
|
||||
- ci-runner-3:8080
|
||||
- docker:9170 # docker hub prometheus exporter
|
||||
- jellyfin:8096 # jellyfin
|
||||
- signald:9595 # signald on signald
|
||||
|
@ -67,8 +64,8 @@
|
|||
- forgejo-runner-1:9080
|
||||
- forgejo-runner-2:9080
|
||||
- forgejo-runner-3:9080
|
||||
- forgejo-runner-4:9080
|
||||
- monitoring-0:9080
|
||||
- freepbx:9080
|
||||
- job_name: node-exporter
|
||||
static_configs:
|
||||
- targets:
|
||||
|
@ -82,8 +79,6 @@
|
|||
- signald:9100
|
||||
- ci-runner-0:9100
|
||||
- ci-runner-1:9100
|
||||
- ci-runner-2:9100
|
||||
- ci-runner-3:9100
|
||||
- media-ingest:9100
|
||||
- mc:9100
|
||||
- http:9100
|
||||
|
@ -91,27 +86,26 @@
|
|||
- mx1.janky.email:9100
|
||||
- dns:9100
|
||||
- hypervisor-d:9100
|
||||
- livingroom-tv:9100
|
||||
- mobile-proxy:9100
|
||||
- monitoring-0:9100
|
||||
- forgejo-runner-0:9100
|
||||
- forgejo-runner-1:9100
|
||||
- forgejo-runner-2:9100
|
||||
- forgejo-runner-3:9100
|
||||
- forgejo-runner-4:9100
|
||||
- freepbx:9100
|
||||
- job_name: minio
|
||||
authorization:
|
||||
credentials_file: /etc/prometheus/secrets/scrape-secrets/minio.token
|
||||
metrics_path: /minio/v2/metrics/cluster
|
||||
static_configs:
|
||||
- targets: ['minio:9000']
|
||||
- job_name: 'home-assistant'
|
||||
metrics_path: /api/prometheus
|
||||
authorization:
|
||||
credentials_file: /etc/prometheus/secrets/scrape-secrets/home-assistant.token
|
||||
static_configs:
|
||||
- targets:
|
||||
- home-assistant:8123
|
||||
# - job_name: 'home-assistant'
|
||||
# metrics_path: /api/prometheus
|
||||
# authorization:
|
||||
# credentials_file: /etc/prometheus/secrets/scrape-secrets/home-assistant.token
|
||||
# static_configs:
|
||||
# - targets:
|
||||
# - home-assistant:8123
|
||||
- job_name: forgejo
|
||||
authorization:
|
||||
credentials_file: /etc/prometheus/secrets/scrape-secrets/forgejo.token
|
||||
|
|
|
@ -18,7 +18,7 @@ spec:
|
|||
spec:
|
||||
containers:
|
||||
- name: thanos-querier
|
||||
image: quay.io/thanos/thanos:latest
|
||||
image: quay.io/thanos/thanos:v0.36.1
|
||||
args:
|
||||
- query
|
||||
- --http-address
|
||||
|
@ -120,7 +120,7 @@ spec:
|
|||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: status.hostIP
|
||||
image: quay.io/thanos/thanos:latest
|
||||
image: quay.io/thanos/thanos:v0.36.1
|
||||
livenessProbe:
|
||||
failureThreshold: 8
|
||||
httpGet:
|
||||
|
|
|
@ -17,7 +17,7 @@ spec:
|
|||
spec:
|
||||
containers:
|
||||
- name: bitwarden-cli
|
||||
image: ghcr.io/charlesthomas/bitwarden-cli:2024.9.0
|
||||
image: ghcr.io/charlesthomas/bitwarden-cli:2024.11.0
|
||||
imagePullPolicy: IfNotPresent
|
||||
envFrom:
|
||||
- secretRef:
|
||||
|
|
|
@ -4147,6 +4147,9 @@ spec:
|
|||
type: string
|
||||
projectSlug:
|
||||
type: string
|
||||
recursive:
|
||||
default: false
|
||||
type: boolean
|
||||
secretsPath:
|
||||
default: /
|
||||
type: string
|
||||
|
@ -7505,7 +7508,27 @@ spec:
|
|||
type: array
|
||||
selector:
|
||||
description: The Secret Selector (k8s source) for the Push Secret
|
||||
maxProperties: 1
|
||||
minProperties: 1
|
||||
properties:
|
||||
generatorRef:
|
||||
description: Point to a generator to create a Secret.
|
||||
properties:
|
||||
apiVersion:
|
||||
default: generators.external-secrets.io/v1alpha1
|
||||
description: Specify the apiVersion of the generator resource
|
||||
type: string
|
||||
kind:
|
||||
description: Specify the Kind of the resource, e.g. Password,
|
||||
ACRAccessToken etc.
|
||||
type: string
|
||||
name:
|
||||
description: Specify the name of the generator resource
|
||||
type: string
|
||||
required:
|
||||
- kind
|
||||
- name
|
||||
type: object
|
||||
secret:
|
||||
description: Select a Secret to Push.
|
||||
properties:
|
||||
|
@ -7516,8 +7539,6 @@ spec:
|
|||
required:
|
||||
- name
|
||||
type: object
|
||||
required:
|
||||
- secret
|
||||
type: object
|
||||
template:
|
||||
description: Template defines a blueprint for the created Secret resource.
|
||||
|
@ -10968,6 +10989,9 @@ spec:
|
|||
type: string
|
||||
projectSlug:
|
||||
type: string
|
||||
recursive:
|
||||
default: false
|
||||
type: boolean
|
||||
secretsPath:
|
||||
default: /
|
||||
type: string
|
||||
|
@ -13671,8 +13695,8 @@ metadata:
|
|||
app.kubernetes.io/instance: external-secrets
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/name: external-secrets
|
||||
app.kubernetes.io/version: v0.10.4
|
||||
helm.sh/chart: external-secrets-0.10.4
|
||||
app.kubernetes.io/version: v0.10.5
|
||||
helm.sh/chart: external-secrets-0.10.5
|
||||
name: external-secrets
|
||||
namespace: external-secrets
|
||||
---
|
||||
|
@ -13683,8 +13707,8 @@ metadata:
|
|||
app.kubernetes.io/instance: external-secrets
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/name: external-secrets-cert-controller
|
||||
app.kubernetes.io/version: v0.10.4
|
||||
helm.sh/chart: external-secrets-0.10.4
|
||||
app.kubernetes.io/version: v0.10.5
|
||||
helm.sh/chart: external-secrets-0.10.5
|
||||
name: external-secrets-cert-controller
|
||||
namespace: external-secrets
|
||||
---
|
||||
|
@ -13695,8 +13719,8 @@ metadata:
|
|||
app.kubernetes.io/instance: external-secrets
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/name: external-secrets-webhook
|
||||
app.kubernetes.io/version: v0.10.4
|
||||
helm.sh/chart: external-secrets-0.10.4
|
||||
app.kubernetes.io/version: v0.10.5
|
||||
helm.sh/chart: external-secrets-0.10.5
|
||||
name: external-secrets-webhook
|
||||
namespace: external-secrets
|
||||
---
|
||||
|
@ -13707,8 +13731,8 @@ metadata:
|
|||
app.kubernetes.io/instance: external-secrets
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/name: external-secrets
|
||||
app.kubernetes.io/version: v0.10.4
|
||||
helm.sh/chart: external-secrets-0.10.4
|
||||
app.kubernetes.io/version: v0.10.5
|
||||
helm.sh/chart: external-secrets-0.10.5
|
||||
name: external-secrets-leaderelection
|
||||
namespace: external-secrets
|
||||
rules:
|
||||
|
@ -13745,8 +13769,8 @@ metadata:
|
|||
app.kubernetes.io/instance: external-secrets
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/name: external-secrets-cert-controller
|
||||
app.kubernetes.io/version: v0.10.4
|
||||
helm.sh/chart: external-secrets-0.10.4
|
||||
app.kubernetes.io/version: v0.10.5
|
||||
helm.sh/chart: external-secrets-0.10.5
|
||||
name: external-secrets-cert-controller
|
||||
rules:
|
||||
- apiGroups:
|
||||
|
@ -13819,8 +13843,8 @@ metadata:
|
|||
app.kubernetes.io/instance: external-secrets
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/name: external-secrets
|
||||
app.kubernetes.io/version: v0.10.4
|
||||
helm.sh/chart: external-secrets-0.10.4
|
||||
app.kubernetes.io/version: v0.10.5
|
||||
helm.sh/chart: external-secrets-0.10.5
|
||||
name: external-secrets-controller
|
||||
rules:
|
||||
- apiGroups:
|
||||
|
@ -13930,8 +13954,8 @@ metadata:
|
|||
app.kubernetes.io/instance: external-secrets
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/name: external-secrets
|
||||
app.kubernetes.io/version: v0.10.4
|
||||
helm.sh/chart: external-secrets-0.10.4
|
||||
app.kubernetes.io/version: v0.10.5
|
||||
helm.sh/chart: external-secrets-0.10.5
|
||||
rbac.authorization.k8s.io/aggregate-to-admin: "true"
|
||||
rbac.authorization.k8s.io/aggregate-to-edit: "true"
|
||||
name: external-secrets-edit
|
||||
|
@ -13974,8 +13998,8 @@ metadata:
|
|||
app.kubernetes.io/instance: external-secrets
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/name: external-secrets
|
||||
app.kubernetes.io/version: v0.10.4
|
||||
helm.sh/chart: external-secrets-0.10.4
|
||||
app.kubernetes.io/version: v0.10.5
|
||||
helm.sh/chart: external-secrets-0.10.5
|
||||
servicebinding.io/controller: "true"
|
||||
name: external-secrets-servicebindings
|
||||
rules:
|
||||
|
@ -13995,8 +14019,8 @@ metadata:
|
|||
app.kubernetes.io/instance: external-secrets
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/name: external-secrets
|
||||
app.kubernetes.io/version: v0.10.4
|
||||
helm.sh/chart: external-secrets-0.10.4
|
||||
app.kubernetes.io/version: v0.10.5
|
||||
helm.sh/chart: external-secrets-0.10.5
|
||||
rbac.authorization.k8s.io/aggregate-to-admin: "true"
|
||||
rbac.authorization.k8s.io/aggregate-to-edit: "true"
|
||||
rbac.authorization.k8s.io/aggregate-to-view: "true"
|
||||
|
@ -14036,8 +14060,8 @@ metadata:
|
|||
app.kubernetes.io/instance: external-secrets
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/name: external-secrets
|
||||
app.kubernetes.io/version: v0.10.4
|
||||
helm.sh/chart: external-secrets-0.10.4
|
||||
app.kubernetes.io/version: v0.10.5
|
||||
helm.sh/chart: external-secrets-0.10.5
|
||||
name: external-secrets-leaderelection
|
||||
namespace: external-secrets
|
||||
roleRef:
|
||||
|
@ -14056,8 +14080,8 @@ metadata:
|
|||
app.kubernetes.io/instance: external-secrets
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/name: external-secrets-cert-controller
|
||||
app.kubernetes.io/version: v0.10.4
|
||||
helm.sh/chart: external-secrets-0.10.4
|
||||
app.kubernetes.io/version: v0.10.5
|
||||
helm.sh/chart: external-secrets-0.10.5
|
||||
name: external-secrets-cert-controller
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
|
@ -14075,8 +14099,8 @@ metadata:
|
|||
app.kubernetes.io/instance: external-secrets
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/name: external-secrets
|
||||
app.kubernetes.io/version: v0.10.4
|
||||
helm.sh/chart: external-secrets-0.10.4
|
||||
app.kubernetes.io/version: v0.10.5
|
||||
helm.sh/chart: external-secrets-0.10.5
|
||||
name: external-secrets-controller
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
|
@ -14094,9 +14118,9 @@ metadata:
|
|||
app.kubernetes.io/instance: external-secrets
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/name: external-secrets-webhook
|
||||
app.kubernetes.io/version: v0.10.4
|
||||
app.kubernetes.io/version: v0.10.5
|
||||
external-secrets.io/component: webhook
|
||||
helm.sh/chart: external-secrets-0.10.4
|
||||
helm.sh/chart: external-secrets-0.10.5
|
||||
name: external-secrets-webhook
|
||||
namespace: external-secrets
|
||||
---
|
||||
|
@ -14107,9 +14131,9 @@ metadata:
|
|||
app.kubernetes.io/instance: external-secrets
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/name: external-secrets-webhook
|
||||
app.kubernetes.io/version: v0.10.4
|
||||
app.kubernetes.io/version: v0.10.5
|
||||
external-secrets.io/component: webhook
|
||||
helm.sh/chart: external-secrets-0.10.4
|
||||
helm.sh/chart: external-secrets-0.10.5
|
||||
name: external-secrets-webhook
|
||||
namespace: external-secrets
|
||||
spec:
|
||||
|
@ -14130,8 +14154,8 @@ metadata:
|
|||
app.kubernetes.io/instance: external-secrets
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/name: external-secrets
|
||||
app.kubernetes.io/version: v0.10.4
|
||||
helm.sh/chart: external-secrets-0.10.4
|
||||
app.kubernetes.io/version: v0.10.5
|
||||
helm.sh/chart: external-secrets-0.10.5
|
||||
name: external-secrets
|
||||
namespace: external-secrets
|
||||
spec:
|
||||
|
@ -14147,8 +14171,8 @@ spec:
|
|||
app.kubernetes.io/instance: external-secrets
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/name: external-secrets
|
||||
app.kubernetes.io/version: v0.10.4
|
||||
helm.sh/chart: external-secrets-0.10.4
|
||||
app.kubernetes.io/version: v0.10.5
|
||||
helm.sh/chart: external-secrets-0.10.5
|
||||
spec:
|
||||
automountServiceAccountToken: true
|
||||
containers:
|
||||
|
@ -14157,7 +14181,7 @@ spec:
|
|||
- --metrics-addr=:8080
|
||||
- --loglevel=info
|
||||
- --zap-time-encoding=epoch
|
||||
image: oci.external-secrets.io/external-secrets/external-secrets:v0.10.4
|
||||
image: oci.external-secrets.io/external-secrets/external-secrets:v0.10.5
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: external-secrets
|
||||
ports:
|
||||
|
@ -14185,8 +14209,8 @@ metadata:
|
|||
app.kubernetes.io/instance: external-secrets
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/name: external-secrets-cert-controller
|
||||
app.kubernetes.io/version: v0.10.4
|
||||
helm.sh/chart: external-secrets-0.10.4
|
||||
app.kubernetes.io/version: v0.10.5
|
||||
helm.sh/chart: external-secrets-0.10.5
|
||||
name: external-secrets-cert-controller
|
||||
namespace: external-secrets
|
||||
spec:
|
||||
|
@ -14202,8 +14226,8 @@ spec:
|
|||
app.kubernetes.io/instance: external-secrets
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/name: external-secrets-cert-controller
|
||||
app.kubernetes.io/version: v0.10.4
|
||||
helm.sh/chart: external-secrets-0.10.4
|
||||
app.kubernetes.io/version: v0.10.5
|
||||
helm.sh/chart: external-secrets-0.10.5
|
||||
spec:
|
||||
automountServiceAccountToken: true
|
||||
containers:
|
||||
|
@ -14219,7 +14243,7 @@ spec:
|
|||
- --loglevel=info
|
||||
- --zap-time-encoding=epoch
|
||||
- --enable-partial-cache=true
|
||||
image: oci.external-secrets.io/external-secrets/external-secrets:v0.10.4
|
||||
image: oci.external-secrets.io/external-secrets/external-secrets:v0.10.5
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: cert-controller
|
||||
ports:
|
||||
|
@ -14252,8 +14276,8 @@ metadata:
|
|||
app.kubernetes.io/instance: external-secrets
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/name: external-secrets-webhook
|
||||
app.kubernetes.io/version: v0.10.4
|
||||
helm.sh/chart: external-secrets-0.10.4
|
||||
app.kubernetes.io/version: v0.10.5
|
||||
helm.sh/chart: external-secrets-0.10.5
|
||||
name: external-secrets-webhook
|
||||
namespace: external-secrets
|
||||
spec:
|
||||
|
@ -14269,8 +14293,8 @@ spec:
|
|||
app.kubernetes.io/instance: external-secrets
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/name: external-secrets-webhook
|
||||
app.kubernetes.io/version: v0.10.4
|
||||
helm.sh/chart: external-secrets-0.10.4
|
||||
app.kubernetes.io/version: v0.10.5
|
||||
helm.sh/chart: external-secrets-0.10.5
|
||||
spec:
|
||||
automountServiceAccountToken: true
|
||||
containers:
|
||||
|
@ -14284,7 +14308,7 @@ spec:
|
|||
- --healthz-addr=:8081
|
||||
- --loglevel=info
|
||||
- --zap-time-encoding=epoch
|
||||
image: oci.external-secrets.io/external-secrets/external-secrets:v0.10.4
|
||||
image: oci.external-secrets.io/external-secrets/external-secrets:v0.10.5
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: webhook
|
||||
ports:
|
||||
|
|
|
@ -1,22 +1,3 @@
|
|||
- op: add
|
||||
path: /spec/additionalScrapeConfigs
|
||||
value:
|
||||
key: scrape-configs.yaml
|
||||
name: additional-scrape-configs
|
||||
optional: true
|
||||
- op: add
|
||||
path: /spec/secrets
|
||||
value: [scrape-secrets]
|
||||
- op: add
|
||||
path: /spec/externalUrl
|
||||
value: https://prometheus.k8s.home.finn.io
|
||||
- op: add
|
||||
path: /spec/thanos
|
||||
value:
|
||||
image: quay.io/thanos/thanos:v0.36.0
|
||||
objectStorageConfig:
|
||||
key: thanos.yaml
|
||||
name: thanos-objstore
|
||||
- op: add
|
||||
path: /spec/storage
|
||||
value:
|
||||
|
@ -24,4 +5,26 @@
|
|||
spec:
|
||||
resources:
|
||||
requests:
|
||||
storage: 20Gi
|
||||
storage: 50Gi
|
||||
- op: add
|
||||
path: /spec/additionalScrapeConfigs
|
||||
value:
|
||||
key: scrape-configs.yaml
|
||||
name: additional-scrape-configs
|
||||
optional: true
|
||||
- op: add
|
||||
path: /spec/externalUrl
|
||||
value: https://prometheus.k8s.home.finn.io
|
||||
- op: add
|
||||
path: /spec/retention
|
||||
value: 72h
|
||||
- op: add
|
||||
path: /spec/secrets
|
||||
value: [scrape-secrets]
|
||||
- op: add
|
||||
path: /spec/thanos
|
||||
value:
|
||||
image: quay.io/thanos/thanos:v0.36.1
|
||||
objectStorageConfig:
|
||||
key: thanos.yaml
|
||||
name: thanos-objstore
|
||||
|
|
|
@ -2,8 +2,9 @@ apiVersion: kustomize.config.k8s.io/v1beta1
|
|||
kind: Kustomization
|
||||
resources:
|
||||
- longhorn
|
||||
- postgres-operator
|
||||
- cert-manager
|
||||
- openbao
|
||||
- external-secrets
|
||||
- kube-prometheus
|
||||
- openbao
|
||||
- postgres-operator
|
||||
- secrets-store-csi-driver
|
||||
|
|
|
@ -3,7 +3,7 @@ kind: Kustomization
|
|||
namespace: longhorn-system
|
||||
resources:
|
||||
- https://github.com/longhorn/longhorn/releases/download/v1.6.2/longhorn.yaml
|
||||
- secrets.yaml
|
||||
# - secrets.yaml
|
||||
- backup.yaml
|
||||
- ingress.yaml
|
||||
- servicemonitor.yaml
|
||||
|
|
|
@ -4,7 +4,7 @@ namespace: postgres-operator
|
|||
resources:
|
||||
- namespace.yaml
|
||||
- github.com/zalando/postgres-operator/manifests?ref=v1.13.0
|
||||
- secrets.yaml
|
||||
# - secrets.yaml
|
||||
configMapGenerator:
|
||||
- name: postgres-operator
|
||||
behavior: merge
|
||||
|
|
|
@ -18,7 +18,7 @@ spec:
|
|||
fsGroup: 1001
|
||||
fsGroupChangePolicy: "OnRootMismatch"
|
||||
containers:
|
||||
- image: ghcr.io/shlinkio/shlink:4.2.2
|
||||
- image: ghcr.io/shlinkio/shlink:4.2.5
|
||||
name: shlink
|
||||
resources: {}
|
||||
ports:
|
||||
|
|
|
@ -18,7 +18,7 @@ spec:
|
|||
fsGroupChangePolicy: "OnRootMismatch"
|
||||
containers:
|
||||
- name: snipe
|
||||
image: snipe/snipe-it:v7.0.13
|
||||
image: snipe/snipe-it:v7.1.14
|
||||
ports:
|
||||
- containerPort: 80
|
||||
name: web
|
||||
|
|
|
@ -2,12 +2,13 @@ SECRET_KEY={{ lookup('ansible.builtin.ini', 'pdns_admin_secret section=pdns file
|
|||
OIDC_OAUTH_ENABLED=true
|
||||
OIDC_OAUTH_KEY=powerdnsadmin
|
||||
OIDC_OAUTH_SECRET={{ lookup('ansible.builtin.ini', 'oidc_secret section=pdns file=secrets/' + inventory_hostname + '.ini') }}
|
||||
OIDC_OAUTH_API_URL=https://auth.janky.solutions/auth/realms/janky.solutions/protocol/openid-connect/
|
||||
OIDC_OAUTH_METADATA_URL=https://auth.janky.solutions/auth/realms/janky.solutions/.well-known/openid-configuration
|
||||
OIDC_OAUTH_LOGOUT_URL=https://auth.janky.solutions/auth/realms/janky.solutions/protocol/openid-connect/logout
|
||||
OIDC_OAUTH_API_URL=https://auth.janky.solutions/realms/janky.solutions/protocol/openid-connect/
|
||||
OIDC_OAUTH_METADATA_URL=https://auth.janky.solutions/realms/janky.solutions/.well-known/openid-configuration
|
||||
OIDC_OAUTH_LOGOUT_URL=https://auth.janky.solutions/realms/janky.solutions/protocol/openid-connect/logout
|
||||
OIDC_OAUTH_USERNAME=preferred_username
|
||||
OIDC_OAUTH_FIRSTNAME=given_name
|
||||
OIDC_OAUTH_LAST_NAME=family_name
|
||||
OIDC_OAUTH_EMAIL=email
|
||||
OIDC_OAUTH_SCOPE=openid email
|
||||
SIGNUP_ENABLED=false
|
||||
LOCAL_DB_ENABLED=false
|
||||
|
|
|
@ -5,6 +5,8 @@ Wants=network.target
|
|||
[Service]
|
||||
Type=simple
|
||||
ExecStartPre=/usr/bin/podman pull docker.io/powerdnsadmin/pda-legacy:latest
|
||||
ExecStartPre=-/usr/bin/podman stop powerdns-admin
|
||||
ExecStartPre=-/usr/bin/podman rm powerdns-admin
|
||||
ExecStart=/usr/bin/podman run --rm -v pda-data:/data -p 9191:80 --env-file /etc/powerdns-admin.env --name powerdns-admin docker.io/powerdnsadmin/pda-legacy:latest
|
||||
Restart=always
|
||||
|
||||
|
|
Loading…
Reference in a new issue