Compare commits

..

81 commits

Author SHA1 Message Date
f8abb4747c handle effects of surprise traefik upgrade
All checks were successful
/ diff-and-deploy (push) Successful in 2m15s
2025-03-29 02:03:01 -07:00
e5f3be3eb7 chore(deps): update matrixdotorg/synapse docker tag to v1.127.1
All checks were successful
/ diff-and-deploy (push) Successful in 2m14s
2025-03-26 23:04:26 +00:00
7c91d07746 chore(deps): update ghcr.io/element-hq/synapse docker tag to v1.127.1
All checks were successful
/ build-synapse (push) Successful in 46s
/ roll out update (push) Successful in 4s
2025-03-26 22:04:22 +00:00
f01a17c160 provisionally allow facebookexternalhit
All checks were successful
/ diff-and-deploy (push) Successful in 2m12s
2025-03-26 11:09:55 -07:00
88791a76bb chore(deps): update ghcr.io/netbox-community/netbox docker tag to v4.2.6
All checks were successful
/ diff-and-deploy (push) Successful in 2m11s
2025-03-26 18:03:55 +00:00
19f6fbde2e netbox: use a version tag that renovate might be more familiar with
All checks were successful
/ diff-and-deploy (push) Successful in 2m16s
2025-03-26 10:18:41 -07:00
54a29bc482 chore(deps): update docker.io/metio/matrix-alertmanager-receiver docker tag to v2025.3.26
All checks were successful
/ diff-and-deploy (push) Successful in 2m11s
2025-03-26 06:03:59 +00:00
adb5af6867 chore(deps): update matrixdotorg/synapse docker tag to v1.127.0
All checks were successful
/ diff-and-deploy (push) Successful in 2m17s
2025-03-25 18:03:54 +00:00
e23eb72c06 chore(deps): update ghcr.io/element-hq/synapse docker tag to v1.127.0
All checks were successful
/ render-helm (push) Successful in 27s
/ diff-and-deploy (push) Successful in 1m13s
/ build-synapse (push) Successful in 27s
/ roll out update (push) Successful in 3s
2025-03-25 17:04:03 +00:00
1d5fca48c0 helm: don't break on missing components
All checks were successful
/ render-helm (push) Successful in 28s
I have WIP stuff that goes uncommitted frequently enough that I think this is better than keeping the list accurate. Probably wouldn't feel that was in a multi-user environment
2025-03-25 09:23:45 -07:00
fba90c0315 disable mobilizon since it's broken and unused
Some checks failed
/ render-helm (push) Failing after 25s
/ diff-and-deploy (push) Successful in 2m16s
2025-03-24 13:27:04 -07:00
7e72d7be05 disable broken services
All checks were successful
/ diff-and-deploy (push) Successful in 2m12s
2025-03-24 13:21:23 -07:00
ea20de8902 tf: fmt 2025-03-24 10:41:20 -07:00
1c11e70926 snipe: update timezone format
All checks were successful
/ diff-and-deploy (push) Successful in 2m16s
2025-03-24 10:39:26 -07:00
dc060b3806 chore(deps): update snipe/snipe-it docker tag to v8
All checks were successful
/ diff-and-deploy (push) Successful in 2m15s
2025-03-24 09:04:32 +00:00
abff93d121 chore(deps): update codeberg.org/forgejo/forgejo docker tag to v10.0.3
All checks were successful
/ diff-and-deploy (push) Successful in 2m18s
2025-03-24 08:03:56 +00:00
cde0ff8f10 chore(deps): update matrixdotorg/synapse docker tag to v1.126.0
All checks were successful
/ diff-and-deploy (push) Successful in 2m14s
2025-03-22 01:03:55 +00:00
73f9ff63d0 chore(deps): update helm release external-secrets to v0.15.0
All checks were successful
/ render-helm (push) Successful in 28s
/ diff-and-deploy (push) Successful in 2m20s
2025-03-22 00:04:02 +00:00
ce82a652aa chore(deps): update ghcr.io/element-hq/synapse docker tag to v1.126.0
All checks were successful
/ build-synapse (push) Successful in 48s
/ roll out update (push) Successful in 4s
2025-03-21 23:03:55 +00:00
0aff6e56ed chore(deps): update ghcr.io/charlesthomas/bitwarden-cli docker tag to v2025.2.0
All checks were successful
/ diff-and-deploy (push) Successful in 2m15s
2025-03-21 22:03:54 +00:00
78554afe63 chore(deps): update docker.io/metio/matrix-alertmanager-receiver docker tag to v2025.3.19
All checks were successful
/ diff-and-deploy (push) Successful in 2m17s
2025-03-21 21:04:15 +00:00
5ea6a2ae6b chore(deps): update quay.io/keycloak/keycloak docker tag to v26.1.4
All checks were successful
/ build-keycloak (push) Successful in 57s
/ roll out update (push) Successful in 4s
2025-03-21 20:04:20 +00:00
803556dd7d chore(deps): update ghcr.io/shlinkio/shlink docker tag to v4.4.6
All checks were successful
/ diff-and-deploy (push) Successful in 2m11s
2025-03-21 18:19:37 +00:00
9eaf580ba9 chore(deps): update framasoft/mobilizon docker tag to v5.1.2
Some checks failed
/ diff-and-deploy (push) Has been cancelled
2025-03-21 18:19:21 +00:00
31fabb54ac chore(deps): update dock.mau.dev/mautrix/meta docker tag to v0.4.5
Some checks failed
/ diff-and-deploy (push) Has been cancelled
2025-03-21 18:03:48 +00:00
5428af3c31 chore(deps): update docker.io/miniflux/miniflux docker tag to v2.2.6
All checks were successful
/ diff-and-deploy (push) Successful in 2m13s
2025-03-21 17:05:56 +00:00
7cb10d5d04 chore(deps): update dock.mau.dev/mautrix/signal docker tag to v0.8.1
Some checks failed
/ diff-and-deploy (push) Has been cancelled
2025-03-21 17:05:51 +00:00
78d0e98735 chore(deps): update codeberg.org/forgejo/forgejo docker tag to v10.0.2
All checks were successful
/ diff-and-deploy (push) Successful in 2m20s
2025-03-21 16:03:47 +00:00
2ff34765d0 chore(deps): update helm release traefik to v34.4.1
All checks were successful
/ build-synapse (push) Successful in 44s
/ build-keycloak (push) Successful in 53s
/ roll out update (push) Has been skipped
/ diff-and-deploy (push) Successful in 1m17s
/ render-helm (push) Successful in 27s
2025-03-21 08:51:56 +00:00
4c0876135c chore(deps): update quay.io/brancz/kube-rbac-proxy docker tag to v0.19.0 2025-03-21 08:51:30 +00:00
cd445fd231 Add web port to talos cluster traefik
All checks were successful
/ render-helm (push) Successful in 26s
2025-03-21 01:50:42 -07:00
44988d4cd2 chore(deps): update quay.io/ceph/ceph docker tag to v19.2.1 2025-03-21 08:28:49 +00:00
8b52908215 ceph: expose dashboard
All checks were successful
/ render-helm (push) Successful in 27s
/ diff-and-deploy (push) Successful in 2m10s
2025-03-21 00:28:39 -07:00
6c1a4ea726 Disable access logging 2025-03-19 20:03:22 -07:00
4b4a2f8454 Even more blocked user agents
All checks were successful
/ diff-and-deploy (push) Successful in 2m9s
Ancient versions of Android scraping the everloving shit out of my forgejo? fuck off
2025-03-07 21:25:25 -08:00
e5df849a58 switch synapse base image to ghcr
All checks were successful
/ build-synapse (push) Successful in 48s
/ roll out update (push) Successful in 3s
2025-03-06 11:51:09 -08:00
b6a32d7bb4 keycloak: pull latest built image
All checks were successful
/ diff-and-deploy (push) Successful in 2m19s
2025-03-06 11:41:18 -08:00
4540f485d8 keycloak: pin patch version
All checks were successful
/ build-keycloak (push) Successful in 1m0s
/ roll out update (push) Successful in 4s
2025-03-06 11:38:50 -08:00
e15a0a7b82 Update golang on traefik-forward-auth container
All checks were successful
/ build-traefik-forward-auth (push) Successful in 47s
/ roll out update (push) Successful in 3s
2025-02-27 10:46:29 -08:00
3f74c57827 s3staticsites: fix cert on janky.solutions
All checks were successful
/ diff-and-deploy (push) Successful in 2m18s
2025-02-27 08:39:28 -08:00
62a9409ca7 s3staticsites: Add janky.solutions
All checks were successful
/ diff-and-deploy (push) Successful in 2m16s
2025-02-26 23:39:35 -08:00
f5f9867a5f fuck AI scrapers: added GPTBot to blocked user agents
All checks were successful
/ diff-and-deploy (push) Successful in 2m16s
2025-02-26 23:05:01 -08:00
ff7b54a8a2 chore(deps): update docker.io/metio/matrix-alertmanager-receiver docker tag to v2025.2.19
All checks were successful
/ diff-and-deploy (push) Successful in 2m17s
2025-02-19 06:03:06 +00:00
ae5a141ca2 forgejo: explicitly disable SERVE_DIRECT
All checks were successful
/ diff-and-deploy (push) Successful in 2m12s
2025-02-18 21:57:38 -08:00
1f717237a0 forgejo: disable minio serve_direct
All checks were successful
/ diff-and-deploy (push) Successful in 2m19s
2025-02-18 11:22:42 -08:00
df5949425f chore(deps): update ghcr.io/element-hq/matrix-authentication-service docker tag to v0.14
All checks were successful
/ diff-and-deploy (push) Successful in 2m20s
2025-02-18 17:03:04 +00:00
7e872b6925 chore(deps): update quay.io/prometheus/node-exporter docker tag to v1.9.0 2025-02-17 08:03:11 +00:00
ba508dea57 Update seattledoulaservices.com ingress
All checks were successful
/ diff-and-deploy (push) Successful in 2m18s
2025-02-16 14:31:13 -08:00
09a0e717d9 Use s3 storage for forgejo
All checks were successful
/ diff-and-deploy (push) Successful in 2m12s
2025-02-16 11:59:50 -08:00
1b12147169 chore(deps): update dock.mau.dev/mautrix/meta docker tag to v0.4.4
All checks were successful
/ diff-and-deploy (push) Successful in 2m19s
2025-02-16 19:36:40 +00:00
787cfbb3a6 chore(deps): update dock.mau.dev/mautrix/signal docker tag to v0.8.0
Some checks failed
/ diff-and-deploy (push) Has been cancelled
2025-02-16 17:03:05 +00:00
91c7d7b60a wordpress: set externalsecret refresh interval to 0
All checks were successful
/ diff-and-deploy (push) Successful in 2m22s
2025-02-15 18:41:08 -08:00
658c6b983e chore(deps): update ghcr.io/shlinkio/shlink docker tag to v4.4.3
All checks were successful
/ diff-and-deploy (push) Successful in 2m15s
2025-02-15 11:02:56 +00:00
18e8a8a6d7 chore(deps): update wordpress docker tag to v6.7.2
Some checks failed
/ diff-and-deploy (push) Failing after 1m53s
2025-02-14 00:55:37 +00:00
92359e8f44 chore(deps): update helm release external-secrets to v0.14.2
All checks were successful
/ render-helm (push) Successful in 27s
/ diff-and-deploy (push) Successful in 2m16s
2025-02-13 13:04:05 +00:00
86daa12892 chore(deps): update matrixdotorg/synapse docker tag to v1.124.0
Some checks failed
/ build-synapse (push) Successful in 51s
/ roll out update (push) Successful in 4s
/ diff-and-deploy (push) Failing after 1m50s
2025-02-11 13:03:06 +00:00
098e511ff1 chore(deps): update helm release secrets-store-csi-driver to v1.4.8
Some checks failed
/ render-helm (push) Successful in 27s
/ diff-and-deploy (push) Failing after 1m48s
2025-02-09 18:33:14 +00:00
5f7c4f96c0 chore(deps): update docker.io/metio/matrix-alertmanager-receiver docker tag to v2025
All checks were successful
/ diff-and-deploy (push) Successful in 2m20s
2025-02-09 09:03:31 +00:00
f8dd413875 Test multi-instance postgres for matrix
All checks were successful
/ diff-and-deploy (push) Successful in 2m21s
2025-02-08 19:22:55 -08:00
d380eba9b7 chore(deps): update helm release external-secrets to v0.14.1
All checks were successful
/ render-helm (push) Successful in 27s
/ diff-and-deploy (push) Successful in 2m18s
2025-02-08 19:25:52 +00:00
be1b5f995c chore(deps): update codeberg.org/forgejo/forgejo docker tag to v10.0.1
All checks were successful
/ diff-and-deploy (push) Successful in 2m13s
2025-02-08 19:02:52 +00:00
2405086a40 chore(deps): update ghcr.io/charlesthomas/bitwarden-cli docker tag to v2025.1.3
All checks were successful
/ diff-and-deploy (push) Successful in 2m29s
2025-02-06 17:02:58 +00:00
86aa092634 chore(deps): update helm release rook-ceph to v1.16.3
All checks were successful
/ render-helm (push) Successful in 25s
2025-02-04 21:58:20 +00:00
b40f30c2e8 chore(deps): update helm release external-secrets to v0.14.0
Some checks failed
/ render-helm (push) Has been cancelled
/ diff-and-deploy (push) Successful in 2m20s
2025-02-04 21:58:00 +00:00
57b9179932 add postgres alerts
All checks were successful
/ diff-and-deploy (push) Successful in 2m25s
2025-02-04 10:29:19 -08:00
e9196a8772 chore(deps): update dependency prometheus-operator/prometheus-operator to v0.80.0 2025-02-04 17:11:34 +00:00
994032d831 chore(deps): update ghcr.io/element-hq/matrix-authentication-service docker tag to v0.13
All checks were successful
/ diff-and-deploy (push) Successful in 2m14s
2025-02-04 16:02:29 +00:00
736afc8d0d post-rollout: fix needs
All checks were successful
/ build-keycloak (push) Successful in 17s
/ build-traefik-forward-auth (push) Successful in 15s
/ roll out update (push) Successful in 3s
2025-02-03 22:57:36 -08:00
456ded4b58 Update rollouts after container builds
All checks were successful
/ build-keycloak (push) Successful in 45s
/ build-traefik-forward-auth (push) Successful in 46s
/ roll out update (push) Successful in 4s
/ diff-and-deploy (push) Successful in 2m17s
2025-02-03 22:55:28 -08:00
f38380898f chore(deps): update quay.io/keycloak/keycloak docker tag to v26.1
All checks were successful
/ build-keycloak (push) Successful in 50s
2025-02-04 06:45:45 +00:00
ac25224092 update kube-prometheus to a tagged version
All checks were successful
/ diff-and-deploy (push) Successful in 2m25s
2025-02-03 18:45:07 -08:00
dbd7d9aa2e talos metrics improvements
All checks were successful
/ render-helm (push) Successful in 29s
/ diff-and-deploy (push) Successful in 2m14s
2025-02-03 18:43:43 -08:00
be8364ec4a prometheus: drop external target ubnt:9001
All checks were successful
/ diff-and-deploy (push) Successful in 2m18s
2025-02-03 16:09:18 -08:00
8501a01bd9 chore(deps): update registry.k8s.io/kube-state-metrics/kube-state-metrics docker tag to v2.15.0 2025-02-03 23:02:26 +00:00
595e5caac9 helm validation: fix permissions on kube config
All checks were successful
/ render-helm (push) Successful in 36s
2025-02-03 10:26:00 -08:00
ba96b53d46 fix making ~/.kube
All checks were successful
/ render-helm (push) Successful in 28s
2025-02-03 09:11:03 -08:00
5badd0112d create ~/.kube before attempting to write to ~/.kube/config
Some checks failed
/ render-helm (push) Failing after 19s
2025-02-03 09:09:47 -08:00
195bd7b1fb deploy kubeconfig before running helm/render-all.sh
Some checks failed
/ render-helm (push) Failing after 18s
2025-02-03 09:08:21 -08:00
e9dcee8626 matrix: remove sliding sync ingress rules
All checks were successful
/ diff-and-deploy (push) Successful in 2m21s
2025-02-03 08:54:36 -08:00
7ede4fd40f external services: add jellyseerr
All checks were successful
/ diff-and-deploy (push) Successful in 2m21s
2025-01-29 16:31:07 -08:00
b95d4fc3d7 chore(deps): update ghcr.io/shlinkio/shlink docker tag to v4.4.2
All checks were successful
/ diff-and-deploy (push) Successful in 2m18s
2025-01-29 12:02:28 +00:00
77 changed files with 2473 additions and 319 deletions

View file

@ -20,3 +20,16 @@ jobs:
tags: git.janky.solutions/jankysolutions/infra/keycloak:latest tags: git.janky.solutions/jankysolutions/infra/keycloak:latest
platforms: linux/amd64 platforms: linux/amd64
push: ${{ github.ref == 'refs/heads/main' }} push: ${{ github.ref == 'refs/heads/main' }}
rollout:
name: roll out update
runs-on: ubuntu-latest
container:
image: git.janky.solutions/jankysolutions/infra/deployer:latest
needs: [build-keycloak]
if: github.ref == 'refs/heads/main'
steps:
- name: roll out update
run: |
set -euo pipefail
echo "${{ secrets.KUBERNETES_CLIENT_CONFIG }}" > ~/.kube/config
kubectl -n keycloak rollout restart deployment keycloak

View file

@ -20,3 +20,16 @@ jobs:
tags: git.janky.solutions/jankysolutions/infra/traefik-forward-auth:latest tags: git.janky.solutions/jankysolutions/infra/traefik-forward-auth:latest
platforms: linux/amd64 platforms: linux/amd64
push: ${{ github.ref == 'refs/heads/main' }} push: ${{ github.ref == 'refs/heads/main' }}
rollout:
name: roll out update
runs-on: ubuntu-latest
container:
image: git.janky.solutions/jankysolutions/infra/deployer:latest
needs: [build-traefik-forward-auth]
if: github.ref == 'refs/heads/main'
steps:
- name: roll out update
run: |
set -euo pipefail
echo "${{ secrets.KUBERNETES_CLIENT_CONFIG }}" > ~/.kube/config
kubectl -n kube-system rollout restart deployment -l app=traefik-forward-auth

View file

@ -11,5 +11,11 @@ jobs:
steps: steps:
- run: apk add --no-cache nodejs git helm kubectl bash - run: apk add --no-cache nodejs git helm kubectl bash
- uses: actions/checkout@v4 - uses: actions/checkout@v4
- name: Deploy ~/.kube/config
run: |
set -euo pipefail
mkdir -p ~/.kube
echo "${{ secrets.KUBERNETES_CLIENT_CONFIG }}" > ~/.kube/config
chmod 600 ~/.kube/config
- run: ./helm/render-all.sh - run: ./helm/render-all.sh
- run: git diff --exit-code - run: git diff --exit-code

View file

@ -1,4 +1,4 @@
FROM quay.io/keycloak/keycloak:26.0 as builder FROM quay.io/keycloak/keycloak:26.1.4 as builder
# Enable health and metrics support # Enable health and metrics support
ENV KC_HEALTH_ENABLED=true ENV KC_HEALTH_ENABLED=true
@ -10,7 +10,7 @@ ENV KC_DB=postgres
WORKDIR /opt/keycloak WORKDIR /opt/keycloak
RUN /opt/keycloak/bin/kc.sh build RUN /opt/keycloak/bin/kc.sh build
FROM quay.io/keycloak/keycloak:26.0 FROM quay.io/keycloak/keycloak:26.1
COPY --from=builder /opt/keycloak/ /opt/keycloak/ COPY --from=builder /opt/keycloak/ /opt/keycloak/
ADD themes/jankysolutions /opt/keycloak/themes/jankysolutions ADD themes/jankysolutions /opt/keycloak/themes/jankysolutions

View file

@ -1,4 +1,4 @@
FROM node:23 AS web-build FROM node:22 AS web-build
RUN git clone -b v2.1.0 https://github.com/openbao/openbao /usr/src/openbao RUN git clone -b v2.1.0 https://github.com/openbao/openbao /usr/src/openbao
WORKDIR /usr/src/openbao WORKDIR /usr/src/openbao
RUN make ember-dist RUN make ember-dist

View file

@ -1,4 +1,4 @@
FROM matrixdotorg/synapse:v1.123.0 FROM ghcr.io/element-hq/synapse:v1.127.1
RUN pip install boto3 humanize tqdm RUN pip install boto3 humanize tqdm
# there is probably a better way to figure out where the site packages are # there is probably a better way to figure out where the site packages are
# this used to be hard coded to /usr/local/lib/python3.11/site-packages but then synapse updated it's minor python version and it broke # this used to be hard coded to /usr/local/lib/python3.11/site-packages but then synapse updated it's minor python version and it broke

View file

@ -1,4 +1,4 @@
FROM library/golang:1.23 as builder FROM library/golang:1.24 as builder
RUN git clone -b v2.3.0 https://github.com/thomseddon/traefik-forward-auth /go/traefik-forward-auth RUN git clone -b v2.3.0 https://github.com/thomseddon/traefik-forward-auth /go/traefik-forward-auth
WORKDIR /go/traefik-forward-auth WORKDIR /go/traefik-forward-auth
RUN CGO_ENABLED=0 go build -a -installsuffix nocgo -o /traefik-forward-auth ./cmd RUN CGO_ENABLED=0 go build -a -installsuffix nocgo -o /traefik-forward-auth ./cmd

View file

@ -7,5 +7,5 @@ helmCharts:
enabled: false # default, bitwarden-sdk-server doesn't work with vaultwarden (https://github.com/external-secrets/bitwarden-sdk-server/issues/18) enabled: false # default, bitwarden-sdk-server doesn't work with vaultwarden (https://github.com/external-secrets/bitwarden-sdk-server/issues/18)
namespace: external-secrets namespace: external-secrets
releaseName: external-secrets releaseName: external-secrets
version: 0.13.0 version: 0.15.0
repo: https://charts.external-secrets.io repo: https://charts.external-secrets.io

View file

@ -0,0 +1,13 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: monitoring
helmCharts:
- name: metrics-server
releaseName: metrics-server
valuesInline:
metrics:
enabled: true
serviceMonitor:
enabled: true
version: 3.12.2
repo: https://kubernetes-sigs.github.io/metrics-server/

View file

@ -7,6 +7,12 @@ header="# DO NOT EDIT: This file has been automatically generated by the script
render_helm() { render_helm() {
target="${1}" target="${1}"
component="${2}" component="${2}"
if [ ! -d "${component}" ]; then
echo "skipping non-existant component ${component}"
return
fi
mkdir -p "${target}/${component}" mkdir -p "${target}/${component}"
echo "${header}" > "${target}/${component}/bundle.yaml" echo "${header}" > "${target}/${component}/bundle.yaml"
rm -rf "${component}/charts" # it doesn't seem to update them otherwise rm -rf "${component}/charts" # it doesn't seem to update them otherwise
@ -14,11 +20,11 @@ render_helm() {
} }
# main k8s cluster operators # main k8s cluster operators
for component in openbao external-secrets secrets-store-csi-driver cert-manager-webhook-pdns; do for component in openbao external-secrets secrets-store-csi-driver ceph-csi-cephfs cert-manager-webhook-pdns; do
render_helm ../k8s/operators "${component}" render_helm ../k8s/operators "${component}"
done done
# cisco k8s cluster operators # cisco k8s cluster operators
for component in rook cert-manager-webhook-pdns traefik; do for component in rook cert-manager-webhook-pdns traefik metrics-server; do
render_helm ../talos/k8s/operators "${component}" render_helm ../talos/k8s/operators "${component}"
done done

View file

@ -4,5 +4,5 @@ helmCharts:
- name: rook-ceph - name: rook-ceph
namespace: rook-ceph namespace: rook-ceph
releaseName: rook-ceph releaseName: rook-ceph
version: v1.16.2 version: v1.16.3
repo: https://charts.rook.io/release repo: https://charts.rook.io/release

View file

@ -10,5 +10,5 @@ helmCharts:
registrar: registrar:
logVerbosity: 1 logVerbosity: 1
releaseName: secrets-store-csi-driver releaseName: secrets-store-csi-driver
version: v1.4.5 version: 1.4.8
repo: https://kubernetes-sigs.github.io/secrets-store-csi-driver/charts repo: https://kubernetes-sigs.github.io/secrets-store-csi-driver/charts

View file

@ -4,11 +4,13 @@ helmCharts:
- name: traefik - name: traefik
namespace: traefik namespace: traefik
releaseName: traefik releaseName: traefik
version: 34.2.0 version: 34.4.1
valuesInline: valuesInline:
deployment: deployment:
replicas: 2 replicas: 1
ports: ports:
web:
hostPort: 80
websecure: websecure:
hostPort: 443 hostPort: 443
proxyProtocol: proxyProtocol:
@ -17,4 +19,12 @@ helmCharts:
providers: providers:
kubernetesCRD: kubernetesCRD:
allowCrossNamespace: true allowCrossNamespace: true
logs:
access:
enabled: false
format: json
fields:
headers:
names:
User-Agent: keep
repo: https://traefik.github.io/charts repo: https://traefik.github.io/charts

View file

@ -6,5 +6,5 @@ resources:
- config.yaml - config.yaml
- ingress.yaml - ingress.yaml
- services.yaml - services.yaml
- statefulset.yaml # - statefulset.yaml
- secrets.yaml - secrets.yaml

View file

@ -1,44 +0,0 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: forgejo
data:
FORGEJO__repository__DEFAULT_REPO_UNITS: repo.code,repo.releases,repo.issues,repo.pulls,repo.wiki,repo.projects,repo.packages,repo.actions # this is the default for 1.22, should be safe to remove when we get there
FORGEJO__repository__PREFERRED_LICENSES: AGPL-3.0-or-later,LGPL-3.0-or-later,GPL-3.0-or-later,Apache-2.0,MIT
FORGEJO__repository__ENABLE_PUSH_CREATE_USER: "true"
FORGEJO__metrics__ENABLED: "true"
FORGEJO__email.incoming__USERNAME: git@janky.solutions
FORGEJO__email.incoming__USE_TLS: "true"
FORGEJO__email.incoming__PORT: "993"
FORGEJO__email.incoming__HOST: mx1.janky.email
FORGEJO__email.incoming__REPLY_TO_ADDRESS: git+%{token}@janky.solutions
FORGEJO__email.incoming__ENABLED: "true"
FORGEJO__mailer__FROM: git@janky.solutions
FORGEJO__mailer__USER: git@janky.solutions
FORGEJO__mailer__SMTP_ADDR: mx1.janky.email
FORGEJO__mailer__PROTOCOL: smtps
FORGEJO__mailer__ENABLED: "true"
FORGEJO__service__NO_REPLY_ADDRESS: noreply.git.janky.solutions
FORGEJO__service__ALLOW_ONLY_EXTERNAL_REGISTRATION: "true"
FORGEJO__service__SHOW_REGISTRATION_BUTTON: "false"
FORGEJO__service__DEFAULT_KEEP_EMAIL_PRIVATE: "true"
FORGEJO__service__ENABLE_NOTIFY_MAIL: "true"
FORGEJO__oauth2_client__ENABLE_AUTO_REGISTRATION: "true"
FORGEJO__oauth2_client__REGISTER_EMAIL_CONFIRM: "false"
FORGEJO__openid__ENABLE_OPENID_SIGNUP: "false"
FORGEJO__server__ROOT_URL: https://git.janky.solutions/
FORGEJO__server__DOMAIN: git.janky.solutions
FORGEJO__DEFAULT__APP_NAME: Janky Solutions
DEFAULT_MERGE_STYLE: rebase
# FORGEJO__storage__STORAGE_TYPE: minio
# FORGEJO__storage__MINIO_ENDPOINT: storage.home.finn.io
# FORGEJO__storage__MINIO_USE_SSL: "true"
# FORGEJO__storage__MINIO_LOCATION: us-sea-1
# FORGEJO__storage__MINIO_ACCESS_KEY_ID: aQ0zCsTpCSJ8eKLtGZ3C
# FORGEJO__storage__MINIO_BUCKET: forgejo
# FORGEJO__attachment__STORAGE_TYPE: minio
# FORGEJO__attachment__MINIO_ENDPOINT: storage.home.finn.io
# FORGEJO__attachment__MINIO_USE_SSL: "true"
# FORGEJO__attachment__MINIO_LOCATION: us-sea-1
# FORGEJO__attachment__MINIO_ACCESS_KEY_ID: aQ0zCsTpCSJ8eKLtGZ3C
# FORGEJO__attachment__MINIO_BUCKET: forgejo

View file

@ -3,7 +3,7 @@ kind: Kustomization
namespace: forgejo namespace: forgejo
resources: resources:
- namespace.yaml - namespace.yaml
- config.yaml # - config.yaml
- ingress.yaml - ingress.yaml
- forgejo-secret-sync.yaml - forgejo-secret-sync.yaml
- services.yaml - services.yaml
@ -21,3 +21,37 @@ configMapGenerator:
- name: forgejo-secret-sync - name: forgejo-secret-sync
files: files:
- forgejo-secret-sync/forgejo-secret-sync.py - forgejo-secret-sync/forgejo-secret-sync.py
- name: forgejo
literals:
- FORGEJO__repository__DEFAULT_REPO_UNITS=repo.code,repo.releases,repo.issues,repo.pulls,repo.wiki,repo.projects,repo.packages,repo.actions # this is the default for 1.22, should be safe to remove when we get there
- FORGEJO__repository__PREFERRED_LICENSES=AGPL-3.0-or-later,LGPL-3.0-or-later,GPL-3.0-or-later,Apache-2.0,MIT
- FORGEJO__repository__ENABLE_PUSH_CREATE_USER="true"
- FORGEJO__metrics__ENABLED=true
- FORGEJO__email.incoming__USERNAME=git@janky.solutions
- FORGEJO__email.incoming__USE_TLS=true
- FORGEJO__email.incoming__PORT=993
- FORGEJO__email.incoming__HOST=mx1.janky.email
- FORGEJO__email.incoming__REPLY_TO_ADDRESS=git+%{token}@janky.solutions
- FORGEJO__email.incoming__ENABLED=true
- FORGEJO__mailer__FROM=git@janky.solutions
- FORGEJO__mailer__USER=git@janky.solutions
- FORGEJO__mailer__SMTP_ADDR=mx1.janky.email
- FORGEJO__mailer__PROTOCOL=smtps
- FORGEJO__mailer__ENABLED=true
- FORGEJO__service__NO_REPLY_ADDRESS=noreply.git.janky.solutions
- FORGEJO__service__ALLOW_ONLY_EXTERNAL_REGISTRATION=true
- FORGEJO__service__SHOW_REGISTRATION_BUTTON=false
- FORGEJO__service__DEFAULT_KEEP_EMAIL_PRIVATE=true
- FORGEJO__service__ENABLE_NOTIFY_MAIL=true
- FORGEJO__oauth2_client__ENABLE_AUTO_REGISTRATION=true
- FORGEJO__oauth2_client__REGISTER_EMAIL_CONFIRM=false
- FORGEJO__openid__ENABLE_OPENID_SIGNUP=false
- FORGEJO__server__ROOT_URL=https://git.janky.solutions/
- FORGEJO__server__DOMAIN=git.janky.solutions
- FORGEJO__DEFAULT__APP_NAME=Janky Solutions
- DEFAULT_MERGE_STYLE=rebase
- FORGEJO__storage__STORAGE_TYPE=minio
- FORGEJO__storage__MINIO_ENDPOINT=s3.janky.solutions
- FORGEJO__storage__MINIO_USE_SSL=true
- FORGEJO__storage__MINIO_BUCKET=forgejo
- FORGEJO__storage__SERVE_DIRECT=false

View file

@ -14,7 +14,7 @@ spec:
app: forgejo app: forgejo
spec: spec:
containers: containers:
- image: codeberg.org/forgejo/forgejo:10.0.0 - image: codeberg.org/forgejo/forgejo:10.0.3
imagePullPolicy: Always imagePullPolicy: Always
name: forgejo name: forgejo
resources: {} resources: {}

View file

@ -0,0 +1,29 @@
# ceph-dashboard.home.finn.io
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: ceph-dashboard.home.finn.io
spec:
entryPoints:
- websecure
routes:
- match: Host(`ceph-dashboard.k8s.home.finn.io`) && PathPrefix(`/`)
kind: Rule
services:
- name: ceph-dashboard-home-finn-io
kind: Service
port: 80
middlewares:
- name: kube-system-traefik-forward-auth@kubernetescrd
---
apiVersion: v1
kind: Service
metadata:
name: ceph-dashboard-home-finn-io
spec:
ports:
- protocol: TCP
port: 80
targetPort: 80
type: ExternalName
externalName: 10.5.1.219

View file

@ -0,0 +1,6 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: http-ingress
resources:
- namespace.yaml
- external-services.yaml

View file

@ -0,0 +1,4 @@
apiVersion: v1
kind: Namespace
metadata:
name: http-ingress

View file

@ -16,7 +16,7 @@ spec:
spec: spec:
containers: containers:
- name: keycloak - name: keycloak
image: git.janky.solutions/jankysolutions/infra/keycloak:25.0 image: git.janky.solutions/jankysolutions/infra/keycloak:latest
imagePullPolicy: Always imagePullPolicy: Always
resources: {} resources: {}
volumeMounts: volumeMounts:

View file

@ -1,4 +1,4 @@
apiVersion: traefik.containo.us/v1alpha1 apiVersion: traefik.io/v1alpha1
kind: ServersTransport kind: ServersTransport
metadata: metadata:
name: keycloak-frontend name: keycloak-frontend

View file

@ -6,6 +6,7 @@ resources:
- external-account-rbac - external-account-rbac
- forgejo - forgejo
- generic-device-plugin - generic-device-plugin
- http-ingress
- invoiceninja - invoiceninja
- keycloak - keycloak
- matrix - matrix

View file

@ -60,7 +60,7 @@ spec:
- secretRef: - secretRef:
name: bridge-facebook name: bridge-facebook
containers: containers:
- image: dock.mau.dev/mautrix/meta:v0.4.3 - image: dock.mau.dev/mautrix/meta:v0.4.5
name: bridge-facebook name: bridge-facebook
resources: {} resources: {}
command: ["/usr/bin/mautrix-meta", "-c", "/data/config.yaml", "--no-update"] command: ["/usr/bin/mautrix-meta", "-c", "/data/config.yaml", "--no-update"]

View file

@ -57,7 +57,7 @@ spec:
- secretRef: - secretRef:
name: bridge-signal name: bridge-signal
containers: containers:
- image: dock.mau.dev/mautrix/signal:v0.7.5 - image: dock.mau.dev/mautrix/signal:v0.8.1
name: bridge-signal name: bridge-signal
resources: {} resources: {}
ports: ports:

View file

@ -37,7 +37,7 @@ spec:
- secretRef: - secretRef:
name: synapse-janky-bot name: synapse-janky-bot
containers: containers:
- image: matrixdotorg/synapse:v1.123.0 - image: matrixdotorg/synapse:v1.127.1
name: synapse name: synapse
resources: {} resources: {}
volumeMounts: volumeMounts:

View file

@ -195,19 +195,7 @@ spec:
name: homeserver-janky-solutions name: homeserver-janky-solutions
port: 8008 port: 8008
- kind: Rule - kind: Rule
match: Host(`matrix.janky.solutions`) && PathPrefix(`/_matrix/client/unstable/org.matrix.msc3575/sync`) match: Host(`matrix.janky.solutions`) && PathRegexp(`^/_matrix/client/.*/(login|logout|refresh)$`)
services:
- kind: Service
name: sliding-sync-janky-solutions
port: 8008
- kind: Rule
match: Host(`matrix.janky.solutions`) && PathPrefix(`/client`)
services:
- kind: Service
name: sliding-sync-janky-solutions
port: 8008
- kind: Rule
match: Host(`matrix.janky.solutions`) && PathPrefix(`/_matrix/client/{version:.*}/{endpoint:(login|logout|refresh)}`)
services: services:
- kind: Service - kind: Service
name: mas-janky-solutions name: mas-janky-solutions
@ -242,7 +230,7 @@ spec:
name: mas-janky-solutions name: mas-janky-solutions
containers: containers:
- name: mas-janky-solutions - name: mas-janky-solutions
image: ghcr.io/element-hq/matrix-authentication-service:0.12 image: ghcr.io/element-hq/matrix-authentication-service:0.14
args: ["server", "-c", "/data/config.yaml"] args: ["server", "-c", "/data/config.yaml"]
env: env:
- name: PGPASSWORD - name: PGPASSWORD
@ -315,7 +303,7 @@ spec:
teamId: matrix teamId: matrix
volume: volume:
size: 50Gi size: 50Gi
numberOfInstances: 1 numberOfInstances: 2
users: users:
superuser: superuser:
- superuser - superuser

View file

@ -16,7 +16,7 @@ spec:
spec: spec:
containers: containers:
- name: miniflux - name: miniflux
image: docker.io/miniflux/miniflux:2.2.5 image: docker.io/miniflux/miniflux:2.2.6
imagePullPolicy: Always imagePullPolicy: Always
resources: {} resources: {}
envFrom: envFrom:

View file

@ -4,7 +4,7 @@ namespace: mobilizon
resources: resources:
- namespace.yaml - namespace.yaml
- database.yaml - database.yaml
- mobilizon.yaml # - mobilizon.yaml
- ingress.yaml - ingress.yaml
configMapGenerator: configMapGenerator:
- name: mobilizon - name: mobilizon

View file

@ -18,7 +18,7 @@ spec:
fsGroupChangePolicy: "OnRootMismatch" fsGroupChangePolicy: "OnRootMismatch"
containers: containers:
- name: mobilizon - name: mobilizon
image: framasoft/mobilizon:5.1.1 image: framasoft/mobilizon:5.1.2
ports: ports:
- containerPort: 4000 - containerPort: 4000
name: web name: web

View file

@ -0,0 +1,20 @@
apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
labels:
prometheus: k8s
role: alert-rules
name: postgres-operator-alerts
namespace: monitoring
spec:
groups:
- name: postgres-operator
rules:
- alert: PatroniHasNoLeader
expr: (max by (scope) (patroni_master) < 1) and (max by (scope) (patroni_standby_leader) < 1)
for: 0m
labels:
severity: critical
annotations:
summary: Patroni has no Leader (instance {{ $labels.instance }})
description: "A leader node (neither primary nor standby) cannot be found inside the cluster {{ $labels.scope }}\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"

View file

@ -2,6 +2,7 @@ apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization kind: Kustomization
namespace: monitoring namespace: monitoring
resources: resources:
- alerts-postgres.yaml
- promtail.yaml - promtail.yaml
- ingresses.yaml - ingresses.yaml
- secrets.yaml - secrets.yaml

View file

@ -31,7 +31,7 @@ spec:
name: matrix-alertmanager-receiver name: matrix-alertmanager-receiver
containers: containers:
- name: matrix-alertmanager-receiver - name: matrix-alertmanager-receiver
image: docker.io/metio/matrix-alertmanager-receiver:2024.12.18 image: docker.io/metio/matrix-alertmanager-receiver:2025.3.26
args: ["--config-path", "/config/config.yaml"] args: ["--config-path", "/config/config.yaml"]
resources: resources:
limits: limits:

View file

@ -29,7 +29,6 @@
- job_name: static_http_targets - job_name: static_http_targets
static_configs: static_configs:
- targets: - targets:
- ubnt:9001 # mongod-exporter
- rpi4-build:8080 - rpi4-build:8080
- docker:9170 # docker hub prometheus exporter - docker:9170 # docker hub prometheus exporter
- jellyfin:8096 # jellyfin - jellyfin:8096 # jellyfin

View file

@ -48,6 +48,7 @@ spec:
ports: ports:
- port: 9090 - port: 9090
targetPort: 9090 targetPort: 9090
name: http
--- ---
apiVersion: v1 apiVersion: v1
kind: Service kind: Service

View file

@ -13,7 +13,7 @@ spec:
spec: spec:
containers: containers:
- name: netbox - name: netbox
image: ghcr.io/netbox-community/netbox:v4.2.0-3.0.2 image: ghcr.io/netbox-community/netbox:v4.2.6
envFrom: envFrom:
- secretRef: - secretRef:
name: netbox name: netbox

View file

@ -17,7 +17,7 @@ spec:
spec: spec:
containers: containers:
- name: bitwarden-cli - name: bitwarden-cli
image: ghcr.io/charlesthomas/bitwarden-cli:2025.1.2 image: ghcr.io/charlesthomas/bitwarden-cli:2025.2.0
imagePullPolicy: IfNotPresent imagePullPolicy: IfNotPresent
envFrom: envFrom:
- secretRef: - secretRef:

File diff suppressed because it is too large Load diff

View file

@ -1,7 +1,7 @@
apiVersion: kustomize.config.k8s.io/v1beta1 apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization kind: Kustomization
resources: resources:
- https://github.com/prometheus-operator/kube-prometheus?ref=74f4e0cda3f3c2a4e8a1ab7d9bdbee019a47c851 - https://github.com/prometheus-operator/kube-prometheus?ref=v0.14.0
patches: patches:
- path: node-exporter-patch.yaml - path: node-exporter-patch.yaml
target: target:

View file

@ -7,8 +7,8 @@ metadata:
app.kubernetes.io/instance: secrets-store-csi-driver app.kubernetes.io/instance: secrets-store-csi-driver
app.kubernetes.io/managed-by: Helm app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: secrets-store-csi-driver app.kubernetes.io/name: secrets-store-csi-driver
app.kubernetes.io/version: 1.4.5 app.kubernetes.io/version: 1.4.8
helm.sh/chart: secrets-store-csi-driver-1.4.5 helm.sh/chart: secrets-store-csi-driver-1.4.8
name: secrets-store-csi-driver name: secrets-store-csi-driver
namespace: secrets-store-csi-driver namespace: secrets-store-csi-driver
--- ---
@ -24,8 +24,8 @@ metadata:
app.kubernetes.io/instance: secrets-store-csi-driver app.kubernetes.io/instance: secrets-store-csi-driver
app.kubernetes.io/managed-by: Helm app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: secrets-store-csi-driver app.kubernetes.io/name: secrets-store-csi-driver
app.kubernetes.io/version: 1.4.5 app.kubernetes.io/version: 1.4.8
helm.sh/chart: secrets-store-csi-driver-1.4.5 helm.sh/chart: secrets-store-csi-driver-1.4.8
name: secrets-store-csi-driver-keep-crds name: secrets-store-csi-driver-keep-crds
namespace: secrets-store-csi-driver namespace: secrets-store-csi-driver
--- ---
@ -41,8 +41,8 @@ metadata:
app.kubernetes.io/instance: secrets-store-csi-driver app.kubernetes.io/instance: secrets-store-csi-driver
app.kubernetes.io/managed-by: Helm app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: secrets-store-csi-driver app.kubernetes.io/name: secrets-store-csi-driver
app.kubernetes.io/version: 1.4.5 app.kubernetes.io/version: 1.4.8
helm.sh/chart: secrets-store-csi-driver-1.4.5 helm.sh/chart: secrets-store-csi-driver-1.4.8
name: secrets-store-csi-driver-upgrade-crds name: secrets-store-csi-driver-upgrade-crds
namespace: secrets-store-csi-driver namespace: secrets-store-csi-driver
--- ---
@ -55,8 +55,8 @@ metadata:
app.kubernetes.io/instance: secrets-store-csi-driver app.kubernetes.io/instance: secrets-store-csi-driver
app.kubernetes.io/managed-by: Helm app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: secrets-store-csi-driver app.kubernetes.io/name: secrets-store-csi-driver
app.kubernetes.io/version: 1.4.5 app.kubernetes.io/version: 1.4.8
helm.sh/chart: secrets-store-csi-driver-1.4.5 helm.sh/chart: secrets-store-csi-driver-1.4.8
rbac.authorization.k8s.io/aggregate-to-admin: "true" rbac.authorization.k8s.io/aggregate-to-admin: "true"
rbac.authorization.k8s.io/aggregate-to-edit: "true" rbac.authorization.k8s.io/aggregate-to-edit: "true"
name: secretproviderclasses-admin-role name: secretproviderclasses-admin-role
@ -82,8 +82,8 @@ metadata:
app.kubernetes.io/instance: secrets-store-csi-driver app.kubernetes.io/instance: secrets-store-csi-driver
app.kubernetes.io/managed-by: Helm app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: secrets-store-csi-driver app.kubernetes.io/name: secrets-store-csi-driver
app.kubernetes.io/version: 1.4.5 app.kubernetes.io/version: 1.4.8
helm.sh/chart: secrets-store-csi-driver-1.4.5 helm.sh/chart: secrets-store-csi-driver-1.4.8
name: secretproviderclasses-role name: secretproviderclasses-role
rules: rules:
- apiGroups: - apiGroups:
@ -149,8 +149,8 @@ metadata:
app.kubernetes.io/instance: secrets-store-csi-driver app.kubernetes.io/instance: secrets-store-csi-driver
app.kubernetes.io/managed-by: Helm app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: secrets-store-csi-driver app.kubernetes.io/name: secrets-store-csi-driver
app.kubernetes.io/version: 1.4.5 app.kubernetes.io/version: 1.4.8
helm.sh/chart: secrets-store-csi-driver-1.4.5 helm.sh/chart: secrets-store-csi-driver-1.4.8
rbac.authorization.k8s.io/aggregate-to-view: "true" rbac.authorization.k8s.io/aggregate-to-view: "true"
name: secretproviderclasses-viewer-role name: secretproviderclasses-viewer-role
rules: rules:
@ -172,8 +172,8 @@ metadata:
app.kubernetes.io/instance: secrets-store-csi-driver app.kubernetes.io/instance: secrets-store-csi-driver
app.kubernetes.io/managed-by: Helm app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: secrets-store-csi-driver app.kubernetes.io/name: secrets-store-csi-driver
app.kubernetes.io/version: 1.4.5 app.kubernetes.io/version: 1.4.8
helm.sh/chart: secrets-store-csi-driver-1.4.5 helm.sh/chart: secrets-store-csi-driver-1.4.8
rbac.authorization.k8s.io/aggregate-to-view: "true" rbac.authorization.k8s.io/aggregate-to-view: "true"
name: secretproviderclasspodstatuses-viewer-role name: secretproviderclasspodstatuses-viewer-role
rules: rules:
@ -194,8 +194,8 @@ metadata:
app.kubernetes.io/instance: secrets-store-csi-driver app.kubernetes.io/instance: secrets-store-csi-driver
app.kubernetes.io/managed-by: Helm app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: secrets-store-csi-driver app.kubernetes.io/name: secrets-store-csi-driver
app.kubernetes.io/version: 1.4.5 app.kubernetes.io/version: 1.4.8
helm.sh/chart: secrets-store-csi-driver-1.4.5 helm.sh/chart: secrets-store-csi-driver-1.4.8
name: secretprovidersyncing-role name: secretprovidersyncing-role
rules: rules:
- apiGroups: - apiGroups:
@ -223,8 +223,8 @@ metadata:
app.kubernetes.io/instance: secrets-store-csi-driver app.kubernetes.io/instance: secrets-store-csi-driver
app.kubernetes.io/managed-by: Helm app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: secrets-store-csi-driver app.kubernetes.io/name: secrets-store-csi-driver
app.kubernetes.io/version: 1.4.5 app.kubernetes.io/version: 1.4.8
helm.sh/chart: secrets-store-csi-driver-1.4.5 helm.sh/chart: secrets-store-csi-driver-1.4.8
name: secrets-store-csi-driver-keep-crds name: secrets-store-csi-driver-keep-crds
rules: rules:
- apiGroups: - apiGroups:
@ -247,8 +247,8 @@ metadata:
app.kubernetes.io/instance: secrets-store-csi-driver app.kubernetes.io/instance: secrets-store-csi-driver
app.kubernetes.io/managed-by: Helm app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: secrets-store-csi-driver app.kubernetes.io/name: secrets-store-csi-driver
app.kubernetes.io/version: 1.4.5 app.kubernetes.io/version: 1.4.8
helm.sh/chart: secrets-store-csi-driver-1.4.5 helm.sh/chart: secrets-store-csi-driver-1.4.8
name: secrets-store-csi-driver-upgrade-crds name: secrets-store-csi-driver-upgrade-crds
rules: rules:
- apiGroups: - apiGroups:
@ -269,8 +269,8 @@ metadata:
app.kubernetes.io/instance: secrets-store-csi-driver app.kubernetes.io/instance: secrets-store-csi-driver
app.kubernetes.io/managed-by: Helm app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: secrets-store-csi-driver app.kubernetes.io/name: secrets-store-csi-driver
app.kubernetes.io/version: 1.4.5 app.kubernetes.io/version: 1.4.8
helm.sh/chart: secrets-store-csi-driver-1.4.5 helm.sh/chart: secrets-store-csi-driver-1.4.8
name: secretproviderclasses-rolebinding name: secretproviderclasses-rolebinding
roleRef: roleRef:
apiGroup: rbac.authorization.k8s.io apiGroup: rbac.authorization.k8s.io
@ -289,8 +289,8 @@ metadata:
app.kubernetes.io/instance: secrets-store-csi-driver app.kubernetes.io/instance: secrets-store-csi-driver
app.kubernetes.io/managed-by: Helm app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: secrets-store-csi-driver app.kubernetes.io/name: secrets-store-csi-driver
app.kubernetes.io/version: 1.4.5 app.kubernetes.io/version: 1.4.8
helm.sh/chart: secrets-store-csi-driver-1.4.5 helm.sh/chart: secrets-store-csi-driver-1.4.8
name: secretprovidersyncing-rolebinding name: secretprovidersyncing-rolebinding
roleRef: roleRef:
apiGroup: rbac.authorization.k8s.io apiGroup: rbac.authorization.k8s.io
@ -313,8 +313,8 @@ metadata:
app.kubernetes.io/instance: secrets-store-csi-driver app.kubernetes.io/instance: secrets-store-csi-driver
app.kubernetes.io/managed-by: Helm app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: secrets-store-csi-driver app.kubernetes.io/name: secrets-store-csi-driver
app.kubernetes.io/version: 1.4.5 app.kubernetes.io/version: 1.4.8
helm.sh/chart: secrets-store-csi-driver-1.4.5 helm.sh/chart: secrets-store-csi-driver-1.4.8
name: secrets-store-csi-driver-keep-crds name: secrets-store-csi-driver-keep-crds
roleRef: roleRef:
apiGroup: rbac.authorization.k8s.io apiGroup: rbac.authorization.k8s.io
@ -337,8 +337,8 @@ metadata:
app.kubernetes.io/instance: secrets-store-csi-driver app.kubernetes.io/instance: secrets-store-csi-driver
app.kubernetes.io/managed-by: Helm app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: secrets-store-csi-driver app.kubernetes.io/name: secrets-store-csi-driver
app.kubernetes.io/version: 1.4.5 app.kubernetes.io/version: 1.4.8
helm.sh/chart: secrets-store-csi-driver-1.4.5 helm.sh/chart: secrets-store-csi-driver-1.4.8
name: secrets-store-csi-driver-upgrade-crds name: secrets-store-csi-driver-upgrade-crds
roleRef: roleRef:
apiGroup: rbac.authorization.k8s.io apiGroup: rbac.authorization.k8s.io
@ -357,8 +357,8 @@ metadata:
app.kubernetes.io/instance: secrets-store-csi-driver app.kubernetes.io/instance: secrets-store-csi-driver
app.kubernetes.io/managed-by: Helm app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: secrets-store-csi-driver app.kubernetes.io/name: secrets-store-csi-driver
app.kubernetes.io/version: 1.4.5 app.kubernetes.io/version: 1.4.8
helm.sh/chart: secrets-store-csi-driver-1.4.5 helm.sh/chart: secrets-store-csi-driver-1.4.8
name: secrets-store-csi-driver name: secrets-store-csi-driver
namespace: secrets-store-csi-driver namespace: secrets-store-csi-driver
spec: spec:
@ -374,8 +374,8 @@ spec:
app.kubernetes.io/instance: secrets-store-csi-driver app.kubernetes.io/instance: secrets-store-csi-driver
app.kubernetes.io/managed-by: Helm app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: secrets-store-csi-driver app.kubernetes.io/name: secrets-store-csi-driver
app.kubernetes.io/version: 1.4.5 app.kubernetes.io/version: 1.4.8
helm.sh/chart: secrets-store-csi-driver-1.4.5 helm.sh/chart: secrets-store-csi-driver-1.4.8
spec: spec:
affinity: affinity:
nodeAffinity: nodeAffinity:
@ -422,7 +422,7 @@ spec:
fieldRef: fieldRef:
apiVersion: v1 apiVersion: v1
fieldPath: spec.nodeName fieldPath: spec.nodeName
image: registry.k8s.io/csi-secrets-store/driver:v1.4.5 image: registry.k8s.io/csi-secrets-store/driver:v1.4.8
imagePullPolicy: IfNotPresent imagePullPolicy: IfNotPresent
livenessProbe: livenessProbe:
failureThreshold: 5 failureThreshold: 5
@ -520,8 +520,8 @@ metadata:
app.kubernetes.io/instance: secrets-store-csi-driver app.kubernetes.io/instance: secrets-store-csi-driver
app.kubernetes.io/managed-by: Helm app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: secrets-store-csi-driver app.kubernetes.io/name: secrets-store-csi-driver
app.kubernetes.io/version: 1.4.5 app.kubernetes.io/version: 1.4.8
helm.sh/chart: secrets-store-csi-driver-1.4.5 helm.sh/chart: secrets-store-csi-driver-1.4.8
name: secrets-store-csi-driver-keep-crds name: secrets-store-csi-driver-keep-crds
namespace: secrets-store-csi-driver namespace: secrets-store-csi-driver
spec: spec:
@ -538,7 +538,7 @@ spec:
- secretproviderclasspodstatuses.secrets-store.csi.x-k8s.io - secretproviderclasspodstatuses.secrets-store.csi.x-k8s.io
- -p - -p
- '{"metadata":{"annotations": {"helm.sh/resource-policy": "keep"}}}' - '{"metadata":{"annotations": {"helm.sh/resource-policy": "keep"}}}'
image: registry.k8s.io/csi-secrets-store/driver-crds:v1.4.5 image: registry.k8s.io/csi-secrets-store/driver-crds:v1.4.8
imagePullPolicy: IfNotPresent imagePullPolicy: IfNotPresent
name: crds-keep name: crds-keep
nodeSelector: nodeSelector:
@ -560,8 +560,8 @@ metadata:
app.kubernetes.io/instance: secrets-store-csi-driver app.kubernetes.io/instance: secrets-store-csi-driver
app.kubernetes.io/managed-by: Helm app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: secrets-store-csi-driver app.kubernetes.io/name: secrets-store-csi-driver
app.kubernetes.io/version: 1.4.5 app.kubernetes.io/version: 1.4.8
helm.sh/chart: secrets-store-csi-driver-1.4.5 helm.sh/chart: secrets-store-csi-driver-1.4.8
name: secrets-store-csi-driver-upgrade-crds name: secrets-store-csi-driver-upgrade-crds
namespace: secrets-store-csi-driver namespace: secrets-store-csi-driver
spec: spec:
@ -575,7 +575,7 @@ spec:
- apply - apply
- -f - -f
- crds/ - crds/
image: registry.k8s.io/csi-secrets-store/driver-crds:v1.4.5 image: registry.k8s.io/csi-secrets-store/driver-crds:v1.4.8
imagePullPolicy: IfNotPresent imagePullPolicy: IfNotPresent
name: crds-upgrade name: crds-upgrade
nodeSelector: nodeSelector:
@ -593,8 +593,8 @@ metadata:
app.kubernetes.io/instance: secrets-store-csi-driver app.kubernetes.io/instance: secrets-store-csi-driver
app.kubernetes.io/managed-by: Helm app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: secrets-store-csi-driver app.kubernetes.io/name: secrets-store-csi-driver
app.kubernetes.io/version: 1.4.5 app.kubernetes.io/version: 1.4.8
helm.sh/chart: secrets-store-csi-driver-1.4.5 helm.sh/chart: secrets-store-csi-driver-1.4.8
name: secrets-store.csi.k8s.io name: secrets-store.csi.k8s.io
spec: spec:
attachRequired: false attachRequired: false

View file

@ -79,3 +79,27 @@ spec:
name: s3staticsites name: s3staticsites
port: port:
name: http name: http
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: janky-solutions
labels:
name: janky-solutions
annotations:
cert-manager.io/cluster-issuer: letsencrypt
spec:
tls:
- hosts: [janky.solutions]
secretName: janky.solutions
rules:
- host: janky.solutions
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: s3staticsites
port:
name: http

View file

@ -18,7 +18,7 @@ spec:
fsGroup: 1001 fsGroup: 1001
fsGroupChangePolicy: "OnRootMismatch" fsGroupChangePolicy: "OnRootMismatch"
containers: containers:
- image: ghcr.io/shlinkio/shlink:4.4.1 - image: ghcr.io/shlinkio/shlink:4.4.6
name: shlink name: shlink
resources: {} resources: {}
ports: ports:

View file

@ -16,7 +16,7 @@ configMapGenerator:
- DB_HOST=mysql - DB_HOST=mysql
- APP_URL=https://snipe.herzfeld.casa - APP_URL=https://snipe.herzfeld.casa
- APP_FORCE_TLS=true - APP_FORCE_TLS=true
- APP_TIMEZONE=US/Pacific - APP_TIMEZONE=America/Los_Angeles
- APP_LOCALE=en-US - APP_LOCALE=en-US
- MAIL_MAILER=smtp - MAIL_MAILER=smtp
- MAIL_HOST=mx1.janky.email - MAIL_HOST=mx1.janky.email

View file

@ -18,7 +18,7 @@ spec:
fsGroupChangePolicy: "OnRootMismatch" fsGroupChangePolicy: "OnRootMismatch"
containers: containers:
- name: snipe - name: snipe
image: snipe/snipe-it:v7.1.16 image: snipe/snipe-it:v8.0.4
ports: ports:
- containerPort: 80 - containerPort: 80
name: web name: web

View file

@ -3,6 +3,6 @@ kind: Kustomization
namespace: spoolman namespace: spoolman
resources: resources:
- namespace.yaml - namespace.yaml
- database.yaml # - database.yaml
- deployment.yaml # - deployment.yaml
- ingress.yaml - ingress.yaml

View file

@ -43,7 +43,7 @@ spec:
entryPoints: entryPoints:
- websecure - websecure
routes: routes:
- match: HeadersRegexp(`User-Agent`, `.*(meta-externalagent|ClaudeBot|Amazonbot|SemrushBot|Googlebot|YandexBot|Bytespider).*`) - match: HeadersRegexp(`User-Agent`, `.*(meta-externalagent|ClaudeBot|Amazonbot|SemrushBot|Googlebot|YandexBot|Bytespider|GPTBot|Chrome/114.0.0.0|Chrome/56.0.8415.1887|Android 5.0|Android 6.0|Android 7.0|Android 8.0).*`)
kind: Rule kind: Rule
priority: 1000 priority: 1000
services: services:

View file

@ -1,4 +1,4 @@
apiVersion: traefik.containo.us/v1alpha1 apiVersion: traefik.io/v1alpha1
kind: IngressRoute kind: IngressRoute
metadata: metadata:
name: traefik-dashboard name: traefik-dashboard

View file

@ -4,18 +4,21 @@ metadata:
name: traefik-forward-auth-herzfeld-casa name: traefik-forward-auth-herzfeld-casa
namespace: kube-system namespace: kube-system
labels: labels:
app: traefik-forward-auth-herzfeld-casa app: traefik-forward-auth
instance: herzfeld-casa
spec: spec:
replicas: 1 replicas: 1
selector: selector:
matchLabels: matchLabels:
app: traefik-forward-auth-herzfeld-casa app: traefik-forward-auth
instance: herzfeld-casa
strategy: strategy:
type: Recreate type: Recreate
template: template:
metadata: metadata:
labels: labels:
app: traefik-forward-auth-herzfeld-casa app: traefik-forward-auth
instance: herzfeld-casa
spec: spec:
terminationGracePeriodSeconds: 60 terminationGracePeriodSeconds: 60
containers: containers:
@ -41,12 +44,13 @@ metadata:
namespace: kube-system namespace: kube-system
spec: spec:
selector: selector:
app: traefik-forward-auth-herzfeld-casa app: traefik-forward-auth
instance: herzfeld-casa
ports: ports:
- name: auth-http - name: auth-http
port: 4181 port: 4181
--- ---
apiVersion: traefik.containo.us/v1alpha1 apiVersion: traefik.io/v1alpha1
kind: Middleware kind: Middleware
metadata: metadata:
name: traefik-forward-auth-herzfeld-casa name: traefik-forward-auth-herzfeld-casa

View file

@ -5,17 +5,20 @@ metadata:
namespace: kube-system namespace: kube-system
labels: labels:
app: traefik-forward-auth app: traefik-forward-auth
instance: default
spec: spec:
replicas: 1 replicas: 1
selector: selector:
matchLabels: matchLabels:
app: traefik-forward-auth app: traefik-forward-auth
instance: default
strategy: strategy:
type: Recreate type: Recreate
template: template:
metadata: metadata:
labels: labels:
app: traefik-forward-auth app: traefik-forward-auth
instance: default
spec: spec:
terminationGracePeriodSeconds: 60 terminationGracePeriodSeconds: 60
containers: containers:
@ -42,11 +45,12 @@ metadata:
spec: spec:
selector: selector:
app: traefik-forward-auth app: traefik-forward-auth
instance: default
ports: ports:
- name: auth-http - name: auth-http
port: 4181 port: 4181
--- ---
apiVersion: traefik.containo.us/v1alpha1 apiVersion: traefik.io/v1alpha1
kind: Middleware kind: Middleware
metadata: metadata:
name: traefik-forward-auth name: traefik-forward-auth

View file

@ -33,9 +33,12 @@ spec:
providers: providers:
kubernetesCRD: kubernetesCRD:
allowCrossNamespace: true allowCrossNamespace: true
allowExternalNameServices: true
kubernetesIngress:
allowExternalNameServices: true
logs: logs:
access: access:
enabled: false enabled: true
format: json format: json
fields: fields:
headers: headers:

View file

@ -3,6 +3,7 @@
(list "minio-console" "minio-console.home.finn.io" "http://minio:9001") (list "minio-console" "minio-console.home.finn.io" "http://minio:9001")
(list "minio" "storage.home.finn.io" "http://minio:9000") (list "minio" "storage.home.finn.io" "http://minio:9000")
(list "jellyfin" "jellyfin.janky.solutions" "http://jellyfin:8096") (list "jellyfin" "jellyfin.janky.solutions" "http://jellyfin:8096")
(list "jellyseerr" "jellyfin-requests.janky.solutions" "http://media-ingest:5055")
(list "dns" "dns.janky.solutions" "http://dns:9191") (list "dns" "dns.janky.solutions" "http://dns:9191")
(list "dns443" "dns.janky.solutions:443" "http://dns:9191") (list "dns443" "dns.janky.solutions:443" "http://dns:9191")
(list "legacy-monitoring" "monitoring.home.finn.io" "http://monitoring-0:3000") (list "legacy-monitoring" "monitoring.home.finn.io" "http://monitoring-0:3000")

View file

@ -15,7 +15,7 @@ spec:
spec: spec:
containers: containers:
- name: wordpress - name: wordpress
image: library/wordpress:6.7.1 image: library/wordpress:6.7.2
env: env:
- name: WORDPRESS_DB_HOST - name: WORDPRESS_DB_HOST
value: hannah-db value: hannah-db
@ -128,6 +128,7 @@ kind: ExternalSecret
metadata: metadata:
name: hannah-db name: hannah-db
spec: spec:
refreshInterval: "0"
target: target:
name: hannah-db name: hannah-db
dataFrom: dataFrom:
@ -153,3 +154,35 @@ spec:
name: hannah name: hannah
port: port:
name: web name: web
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: seattledoulaservices.com
annotations:
cert-manager.io/cluster-issuer: letsencrypt
spec:
tls:
- hosts: [seattledoulaservices.com, www.seattledoulaservices.com]
secretName: seattledoulaservices.com
rules:
- host: seattledoulaservices.com
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: hannah
port:
name: web
- host: www.seattledoulaservices.com
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: hannah
port:
name: web

View file

@ -12,3 +12,5 @@ machine:
- bind - bind
- rshared - rshared
- rw - rw
extraArgs:
rotate-server-certificates: true

View file

@ -0,0 +1,5 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
# This will raise some warnings, issue filed here: https://github.com/alex1989hu/kubelet-serving-cert-approver/issues/255
- https://github.com/alex1989hu/kubelet-serving-cert-approver//deploy/standalone?ref=v0.9.0

View file

@ -2,5 +2,6 @@ apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization kind: Kustomization
resources: resources:
- operators - operators
- kubelet-serving-cert-approver
- monitoring - monitoring
- rook - rook

View file

@ -178,7 +178,7 @@ spec:
- --port=8081 - --port=8081
- --telemetry-host=127.0.0.1 - --telemetry-host=127.0.0.1
- --telemetry-port=8082 - --telemetry-port=8082
image: registry.k8s.io/kube-state-metrics/kube-state-metrics:v2.14.0 image: registry.k8s.io/kube-state-metrics/kube-state-metrics:v2.15.0
name: kube-state-metrics name: kube-state-metrics
resources: resources:
limits: limits:
@ -202,7 +202,7 @@ spec:
- --secure-listen-address=:8443 - --secure-listen-address=:8443
- --tls-cipher-suites=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305 - --tls-cipher-suites=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305
- --upstream=http://127.0.0.1:8081/ - --upstream=http://127.0.0.1:8081/
image: quay.io/brancz/kube-rbac-proxy:v0.18.2 image: quay.io/brancz/kube-rbac-proxy:v0.19.0
name: kube-rbac-proxy-main name: kube-rbac-proxy-main
ports: ports:
- containerPort: 8443 - containerPort: 8443
@ -229,7 +229,7 @@ spec:
- --secure-listen-address=:9443 - --secure-listen-address=:9443
- --tls-cipher-suites=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305 - --tls-cipher-suites=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305
- --upstream=http://127.0.0.1:8082/ - --upstream=http://127.0.0.1:8082/
image: quay.io/brancz/kube-rbac-proxy:v0.18.2 image: quay.io/brancz/kube-rbac-proxy:v0.19.0
name: kube-rbac-proxy-self name: kube-rbac-proxy-self
ports: ports:
- containerPort: 9443 - containerPort: 9443

View file

@ -0,0 +1,301 @@
# based on https://github.com/prometheus-operator/kube-prometheus/blob/v0.14.0/manifests/kubernetesControlPlane-prometheusRule.yaml
apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
labels:
app.kubernetes.io/component: prometheus
app.kubernetes.io/instance: k8s
app.kubernetes.io/name: prometheus
app.kubernetes.io/part-of: kube-prometheus
app.kubernetes.io/version: 2.54.1
prometheus: k8s
role: alert-rules
name: prometheus-k8s-prometheus-rules
namespace: monitoring
spec:
groups:
- name: prometheus
rules:
- alert: PrometheusBadConfig
annotations:
description: Prometheus {{$labels.namespace}}/{{$labels.pod}} has failed to reload its configuration.
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/prometheus/prometheusbadconfig
summary: Failed Prometheus configuration reload.
expr: |
# Without max_over_time, failed scrapes could create false negatives, see
# https://www.robustperception.io/alerting-on-gauges-in-prometheus-2-0 for details.
max_over_time(prometheus_config_last_reload_successful{job="prometheus-k8s",namespace="monitoring"}[5m]) == 0
for: 10m
labels:
severity: critical
- alert: PrometheusSDRefreshFailure
annotations:
description: Prometheus {{$labels.namespace}}/{{$labels.pod}} has failed to refresh SD with mechanism {{$labels.mechanism}}.
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/prometheus/prometheussdrefreshfailure
summary: Failed Prometheus SD refresh.
expr: |
increase(prometheus_sd_refresh_failures_total{job="prometheus-k8s",namespace="monitoring"}[10m]) > 0
for: 20m
labels:
severity: warning
- alert: PrometheusKubernetesListWatchFailures
annotations:
description: Kubernetes service discovery of Prometheus {{$labels.namespace}}/{{$labels.pod}} is experiencing {{ printf "%.0f" $value }} failures with LIST/WATCH requests to the Kubernetes API in the last 5 minutes.
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/prometheus/prometheuskuberneteslistwatchfailures
summary: Requests in Kubernetes SD are failing.
expr: |
increase(prometheus_sd_kubernetes_failures_total{job="prometheus-k8s",namespace="monitoring"}[5m]) > 0
for: 15m
labels:
severity: warning
- alert: PrometheusNotificationQueueRunningFull
annotations:
description: Alert notification queue of Prometheus {{$labels.namespace}}/{{$labels.pod}} is running full.
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/prometheus/prometheusnotificationqueuerunningfull
summary: Prometheus alert notification queue predicted to run full in less than 30m.
expr: |
# Without min_over_time, failed scrapes could create false negatives, see
# https://www.robustperception.io/alerting-on-gauges-in-prometheus-2-0 for details.
(
predict_linear(prometheus_notifications_queue_length{job="prometheus-k8s",namespace="monitoring"}[5m], 60 * 30)
>
min_over_time(prometheus_notifications_queue_capacity{job="prometheus-k8s",namespace="monitoring"}[5m])
)
for: 15m
labels:
severity: warning
- alert: PrometheusErrorSendingAlertsToSomeAlertmanagers
annotations:
description: '{{ printf "%.1f" $value }}% errors while sending alerts from Prometheus {{$labels.namespace}}/{{$labels.pod}} to Alertmanager {{$labels.alertmanager}}.'
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/prometheus/prometheuserrorsendingalertstosomealertmanagers
summary: Prometheus has encountered more than 1% errors sending alerts to a specific Alertmanager.
expr: |
(
rate(prometheus_notifications_errors_total{job="prometheus-k8s",namespace="monitoring"}[5m])
/
rate(prometheus_notifications_sent_total{job="prometheus-k8s",namespace="monitoring"}[5m])
)
* 100
> 1
for: 15m
labels:
severity: warning
- alert: PrometheusNotConnectedToAlertmanagers
annotations:
description: Prometheus {{$labels.namespace}}/{{$labels.pod}} is not connected to any Alertmanagers.
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/prometheus/prometheusnotconnectedtoalertmanagers
summary: Prometheus is not connected to any Alertmanagers.
expr: |
# Without max_over_time, failed scrapes could create false negatives, see
# https://www.robustperception.io/alerting-on-gauges-in-prometheus-2-0 for details.
max_over_time(prometheus_notifications_alertmanagers_discovered{job="prometheus-k8s",namespace="monitoring"}[5m]) < 1
for: 10m
labels:
severity: warning
- alert: PrometheusTSDBReloadsFailing
annotations:
description: Prometheus {{$labels.namespace}}/{{$labels.pod}} has detected {{$value | humanize}} reload failures over the last 3h.
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/prometheus/prometheustsdbreloadsfailing
summary: Prometheus has issues reloading blocks from disk.
expr: |
increase(prometheus_tsdb_reloads_failures_total{job="prometheus-k8s",namespace="monitoring"}[3h]) > 0
for: 4h
labels:
severity: warning
- alert: PrometheusTSDBCompactionsFailing
annotations:
description: Prometheus {{$labels.namespace}}/{{$labels.pod}} has detected {{$value | humanize}} compaction failures over the last 3h.
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/prometheus/prometheustsdbcompactionsfailing
summary: Prometheus has issues compacting blocks.
expr: |
increase(prometheus_tsdb_compactions_failed_total{job="prometheus-k8s",namespace="monitoring"}[3h]) > 0
for: 4h
labels:
severity: warning
- alert: PrometheusNotIngestingSamples
annotations:
description: Prometheus {{$labels.namespace}}/{{$labels.pod}} is not ingesting samples.
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/prometheus/prometheusnotingestingsamples
summary: Prometheus is not ingesting samples.
expr: |
(
sum without(type) (rate(prometheus_tsdb_head_samples_appended_total{job="prometheus-k8s",namespace="monitoring"}[5m])) <= 0
and
(
sum without(scrape_job) (prometheus_target_metadata_cache_entries{job="prometheus-k8s",namespace="monitoring"}) > 0
or
sum without(rule_group) (prometheus_rule_group_rules{job="prometheus-k8s",namespace="monitoring"}) > 0
)
)
for: 10m
labels:
severity: warning
- alert: PrometheusDuplicateTimestamps
annotations:
description: Prometheus {{$labels.namespace}}/{{$labels.pod}} is dropping {{ printf "%.4g" $value }} samples/s with different values but duplicated timestamp.
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/prometheus/prometheusduplicatetimestamps
summary: Prometheus is dropping samples with duplicate timestamps.
expr: |
rate(prometheus_target_scrapes_sample_duplicate_timestamp_total{job="prometheus-k8s",namespace="monitoring"}[5m]) > 0
for: 10m
labels:
severity: warning
- alert: PrometheusOutOfOrderTimestamps
annotations:
description: Prometheus {{$labels.namespace}}/{{$labels.pod}} is dropping {{ printf "%.4g" $value }} samples/s with timestamps arriving out of order.
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/prometheus/prometheusoutofordertimestamps
summary: Prometheus drops samples with out-of-order timestamps.
expr: |
rate(prometheus_target_scrapes_sample_out_of_order_total{job="prometheus-k8s",namespace="monitoring"}[5m]) > 0
for: 10m
labels:
severity: warning
- alert: PrometheusRemoteStorageFailures
annotations:
description: Prometheus {{$labels.namespace}}/{{$labels.pod}} failed to send {{ printf "%.1f" $value }}% of the samples to {{ $labels.remote_name}}:{{ $labels.url }}
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/prometheus/prometheusremotestoragefailures
summary: Prometheus fails to send samples to remote storage.
expr: |
(
(rate(prometheus_remote_storage_failed_samples_total{job="prometheus-k8s",namespace="monitoring"}[5m]) or rate(prometheus_remote_storage_samples_failed_total{job="prometheus-k8s",namespace="monitoring"}[5m]))
/
(
(rate(prometheus_remote_storage_failed_samples_total{job="prometheus-k8s",namespace="monitoring"}[5m]) or rate(prometheus_remote_storage_samples_failed_total{job="prometheus-k8s",namespace="monitoring"}[5m]))
+
(rate(prometheus_remote_storage_succeeded_samples_total{job="prometheus-k8s",namespace="monitoring"}[5m]) or rate(prometheus_remote_storage_samples_total{job="prometheus-k8s",namespace="monitoring"}[5m]))
)
)
* 100
> 1
for: 15m
labels:
severity: critical
- alert: PrometheusRemoteWriteBehind
annotations:
description: Prometheus {{$labels.namespace}}/{{$labels.pod}} remote write is {{ printf "%.1f" $value }}s behind for {{ $labels.remote_name}}:{{ $labels.url }}.
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/prometheus/prometheusremotewritebehind
summary: Prometheus remote write is behind.
expr: |
# Without max_over_time, failed scrapes could create false negatives, see
# https://www.robustperception.io/alerting-on-gauges-in-prometheus-2-0 for details.
(
max_over_time(prometheus_remote_storage_highest_timestamp_in_seconds{job="prometheus-k8s",namespace="monitoring"}[5m])
- ignoring(remote_name, url) group_right
max_over_time(prometheus_remote_storage_queue_highest_sent_timestamp_seconds{job="prometheus-k8s",namespace="monitoring"}[5m])
)
> 120
for: 15m
labels:
severity: critical
- alert: PrometheusRemoteWriteDesiredShards
annotations:
description: Prometheus {{$labels.namespace}}/{{$labels.pod}} remote write desired shards calculation wants to run {{ $value }} shards for queue {{ $labels.remote_name}}:{{ $labels.url }}, which is more than the max of {{ printf `prometheus_remote_storage_shards_max{instance="%s",job="prometheus-k8s",namespace="monitoring"}` $labels.instance | query | first | value }}.
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/prometheus/prometheusremotewritedesiredshards
summary: Prometheus remote write desired shards calculation wants to run more than configured max shards.
expr: |
# Without max_over_time, failed scrapes could create false negatives, see
# https://www.robustperception.io/alerting-on-gauges-in-prometheus-2-0 for details.
(
max_over_time(prometheus_remote_storage_shards_desired{job="prometheus-k8s",namespace="monitoring"}[5m])
>
max_over_time(prometheus_remote_storage_shards_max{job="prometheus-k8s",namespace="monitoring"}[5m])
)
for: 15m
labels:
severity: warning
- alert: PrometheusRuleFailures
annotations:
description: Prometheus {{$labels.namespace}}/{{$labels.pod}} has failed to evaluate {{ printf "%.0f" $value }} rules in the last 5m.
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/prometheus/prometheusrulefailures
summary: Prometheus is failing rule evaluations.
expr: |
increase(prometheus_rule_evaluation_failures_total{job="prometheus-k8s",namespace="monitoring"}[5m]) > 0
for: 15m
labels:
severity: critical
- alert: PrometheusMissingRuleEvaluations
annotations:
description: Prometheus {{$labels.namespace}}/{{$labels.pod}} has missed {{ printf "%.0f" $value }} rule group evaluations in the last 5m.
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/prometheus/prometheusmissingruleevaluations
summary: Prometheus is missing rule evaluations due to slow rule group evaluation.
expr: |
increase(prometheus_rule_group_iterations_missed_total{job="prometheus-k8s",namespace="monitoring"}[5m]) > 0
for: 15m
labels:
severity: warning
- alert: PrometheusTargetLimitHit
annotations:
description: Prometheus {{$labels.namespace}}/{{$labels.pod}} has dropped {{ printf "%.0f" $value }} targets because the number of targets exceeded the configured target_limit.
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/prometheus/prometheustargetlimithit
summary: Prometheus has dropped targets because some scrape configs have exceeded the targets limit.
expr: |
increase(prometheus_target_scrape_pool_exceeded_target_limit_total{job="prometheus-k8s",namespace="monitoring"}[5m]) > 0
for: 15m
labels:
severity: warning
- alert: PrometheusLabelLimitHit
annotations:
description: Prometheus {{$labels.namespace}}/{{$labels.pod}} has dropped {{ printf "%.0f" $value }} targets because some samples exceeded the configured label_limit, label_name_length_limit or label_value_length_limit.
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/prometheus/prometheuslabellimithit
summary: Prometheus has dropped targets because some scrape configs have exceeded the labels limit.
expr: |
increase(prometheus_target_scrape_pool_exceeded_label_limits_total{job="prometheus-k8s",namespace="monitoring"}[5m]) > 0
for: 15m
labels:
severity: warning
- alert: PrometheusScrapeBodySizeLimitHit
annotations:
description: Prometheus {{$labels.namespace}}/{{$labels.pod}} has failed {{ printf "%.0f" $value }} scrapes in the last 5m because some targets exceeded the configured body_size_limit.
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/prometheus/prometheusscrapebodysizelimithit
summary: Prometheus has dropped some targets that exceeded body size limit.
expr: |
increase(prometheus_target_scrapes_exceeded_body_size_limit_total{job="prometheus-k8s",namespace="monitoring"}[5m]) > 0
for: 15m
labels:
severity: warning
- alert: PrometheusScrapeSampleLimitHit
annotations:
description: Prometheus {{$labels.namespace}}/{{$labels.pod}} has failed {{ printf "%.0f" $value }} scrapes in the last 5m because some targets exceeded the configured sample_limit.
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/prometheus/prometheusscrapesamplelimithit
summary: Prometheus has failed scrapes that have exceeded the configured sample limit.
expr: |
increase(prometheus_target_scrapes_exceeded_sample_limit_total{job="prometheus-k8s",namespace="monitoring"}[5m]) > 0
for: 15m
labels:
severity: warning
- alert: PrometheusTargetSyncFailure
annotations:
description: '{{ printf "%.0f" $value }} targets in Prometheus {{$labels.namespace}}/{{$labels.pod}} have failed to sync because invalid configuration was supplied.'
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/prometheus/prometheustargetsyncfailure
summary: Prometheus has failed to sync targets.
expr: |
increase(prometheus_target_sync_failed_total{job="prometheus-k8s",namespace="monitoring"}[30m]) > 0
for: 5m
labels:
severity: critical
- alert: PrometheusHighQueryLoad
annotations:
description: Prometheus {{$labels.namespace}}/{{$labels.pod}} query API has less than 20% available capacity in its query engine for the last 15 minutes.
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/prometheus/prometheushighqueryload
summary: Prometheus is reaching its maximum capacity serving concurrent requests.
expr: |
avg_over_time(prometheus_engine_queries{job="prometheus-k8s",namespace="monitoring"}[5m]) / max_over_time(prometheus_engine_queries_concurrent_max{job="prometheus-k8s",namespace="monitoring"}[5m]) > 0.8
for: 15m
labels:
severity: warning
- alert: PrometheusErrorSendingAlertsToAnyAlertmanager
annotations:
description: '{{ printf "%.1f" $value }}% minimum errors while sending alerts from Prometheus {{$labels.namespace}}/{{$labels.pod}} to any Alertmanager.'
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/prometheus/prometheuserrorsendingalertstoanyalertmanager
summary: Prometheus encounters more than 3% errors sending alerts to any Alertmanager.
expr: |
min without (alertmanager) (
rate(prometheus_notifications_errors_total{job="prometheus-k8s",namespace="monitoring",alertmanager!~``}[5m])
/
rate(prometheus_notifications_sent_total{job="prometheus-k8s",namespace="monitoring",alertmanager!~``}[5m])
)
* 100
> 3
for: 15m
labels:
severity: critical

View file

@ -3,6 +3,7 @@ kind: Kustomization
namespace: monitoring namespace: monitoring
resources: resources:
- kube-state-metrics.yaml - kube-state-metrics.yaml
- kubernetesControlPlane-prometheusRule.yaml
- node-exporter.yaml - node-exporter.yaml
- prom.yaml - prom.yaml
- servicemonitors.yaml - servicemonitors.yaml

View file

@ -32,7 +32,7 @@ spec:
- --collector.filesystem.mount-points-exclude=^/(dev|proc|sys|run/k3s/containerd/.+|var/lib/docker/.+|var/lib/kubelet/pods/.+)($|/) - --collector.filesystem.mount-points-exclude=^/(dev|proc|sys|run/k3s/containerd/.+|var/lib/docker/.+|var/lib/kubelet/pods/.+)($|/)
- --collector.netclass.ignored-devices=^(veth.*|[a-f0-9]{15})$ - --collector.netclass.ignored-devices=^(veth.*|[a-f0-9]{15})$
- --collector.netdev.device-exclude=^(veth.*|[a-f0-9]{15})$ - --collector.netdev.device-exclude=^(veth.*|[a-f0-9]{15})$
image: quay.io/prometheus/node-exporter:v1.8.2 image: quay.io/prometheus/node-exporter:v1.9.0
imagePullPolicy: IfNotPresent imagePullPolicy: IfNotPresent
name: node-exporter name: node-exporter
ports: ports:
@ -76,7 +76,7 @@ spec:
fieldRef: fieldRef:
apiVersion: v1 apiVersion: v1
fieldPath: status.podIP fieldPath: status.podIP
image: quay.io/brancz/kube-rbac-proxy:v0.18.2 image: quay.io/brancz/kube-rbac-proxy:v0.19.0
imagePullPolicy: IfNotPresent imagePullPolicy: IfNotPresent
name: kube-rbac-proxy name: kube-rbac-proxy
ports: ports:

View file

@ -1,3 +1,4 @@
# this file is heavily based on https://github.com/prometheus-operator/kube-prometheus/blob/v0.14.0/manifests/kubernetesControlPlane-serviceMonitorKubelet.yaml
apiVersion: monitoring.coreos.com/v1 apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor kind: ServiceMonitor
metadata: metadata:
@ -264,6 +265,19 @@ spec:
scheme: https scheme: https
tlsConfig: tlsConfig:
insecureSkipVerify: true insecureSkipVerify: true
- bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
honorLabels: true
interval: 30s
path: /metrics/resource
port: https-metrics
relabelings:
- action: replace
sourceLabels:
- __metrics_path__
targetLabel: metrics_path
scheme: https
tlsConfig:
insecureSkipVerify: true
jobLabel: app.kubernetes.io/name jobLabel: app.kubernetes.io/name
namespaceSelector: namespaceSelector:
matchNames: matchNames:

View file

@ -4,6 +4,7 @@ resources:
- cert-manager - cert-manager
- cert-manager-webhook-pdns - cert-manager-webhook-pdns
- local-path-provisioner - local-path-provisioner
- metrics-server
- prometheus-operator - prometheus-operator
- rook - rook
- traefik - traefik

View file

@ -0,0 +1,273 @@
# DO NOT EDIT: This file has been automatically generated by the script in helm/render-all.sh, edits may get overwritten
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
app.kubernetes.io/instance: metrics-server
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: metrics-server
app.kubernetes.io/version: 0.7.2
helm.sh/chart: metrics-server-3.12.2
name: metrics-server
namespace: monitoring
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
app.kubernetes.io/instance: metrics-server
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: metrics-server
app.kubernetes.io/version: 0.7.2
helm.sh/chart: metrics-server-3.12.2
name: system:metrics-server
rules:
- apiGroups:
- ""
resources:
- nodes/metrics
verbs:
- get
- apiGroups:
- ""
resources:
- pods
- nodes
- namespaces
- configmaps
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
app.kubernetes.io/instance: metrics-server
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: metrics-server
app.kubernetes.io/version: 0.7.2
helm.sh/chart: metrics-server-3.12.2
rbac.authorization.k8s.io/aggregate-to-admin: "true"
rbac.authorization.k8s.io/aggregate-to-edit: "true"
rbac.authorization.k8s.io/aggregate-to-view: "true"
name: system:metrics-server-aggregated-reader
rules:
- apiGroups:
- metrics.k8s.io
resources:
- pods
- nodes
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
app.kubernetes.io/instance: metrics-server
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: metrics-server
app.kubernetes.io/version: 0.7.2
helm.sh/chart: metrics-server-3.12.2
name: metrics-server-auth-reader
namespace: monitoring
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: extension-apiserver-authentication-reader
subjects:
- kind: ServiceAccount
name: metrics-server
namespace: monitoring
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
app.kubernetes.io/instance: metrics-server
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: metrics-server
app.kubernetes.io/version: 0.7.2
helm.sh/chart: metrics-server-3.12.2
name: metrics-server:system:auth-delegator
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:auth-delegator
subjects:
- kind: ServiceAccount
name: metrics-server
namespace: monitoring
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
app.kubernetes.io/instance: metrics-server
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: metrics-server
app.kubernetes.io/version: 0.7.2
helm.sh/chart: metrics-server-3.12.2
name: system:metrics-server
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:metrics-server
subjects:
- kind: ServiceAccount
name: metrics-server
namespace: monitoring
---
apiVersion: v1
kind: Service
metadata:
labels:
app.kubernetes.io/instance: metrics-server
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: metrics-server
app.kubernetes.io/version: 0.7.2
helm.sh/chart: metrics-server-3.12.2
name: metrics-server
namespace: monitoring
spec:
ports:
- appProtocol: https
name: https
port: 443
protocol: TCP
targetPort: https
selector:
app.kubernetes.io/instance: metrics-server
app.kubernetes.io/name: metrics-server
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app.kubernetes.io/instance: metrics-server
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: metrics-server
app.kubernetes.io/version: 0.7.2
helm.sh/chart: metrics-server-3.12.2
name: metrics-server
namespace: monitoring
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/instance: metrics-server
app.kubernetes.io/name: metrics-server
template:
metadata:
labels:
app.kubernetes.io/instance: metrics-server
app.kubernetes.io/name: metrics-server
spec:
containers:
- args:
- --secure-port=10250
- --cert-dir=/tmp
- --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname
- --kubelet-use-node-status-port
- --metric-resolution=15s
- --authorization-always-allow-paths=/metrics
image: registry.k8s.io/metrics-server/metrics-server:v0.7.2
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 3
httpGet:
path: /livez
port: https
scheme: HTTPS
initialDelaySeconds: 0
periodSeconds: 10
name: metrics-server
ports:
- containerPort: 10250
name: https
protocol: TCP
readinessProbe:
failureThreshold: 3
httpGet:
path: /readyz
port: https
scheme: HTTPS
initialDelaySeconds: 20
periodSeconds: 10
resources:
requests:
cpu: 100m
memory: 200Mi
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
runAsNonRoot: true
runAsUser: 1000
seccompProfile:
type: RuntimeDefault
volumeMounts:
- mountPath: /tmp
name: tmp
priorityClassName: system-cluster-critical
serviceAccountName: metrics-server
volumes:
- emptyDir: {}
name: tmp
---
apiVersion: apiregistration.k8s.io/v1
kind: APIService
metadata:
labels:
app.kubernetes.io/instance: metrics-server
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: metrics-server
app.kubernetes.io/version: 0.7.2
helm.sh/chart: metrics-server-3.12.2
name: v1beta1.metrics.k8s.io
spec:
group: metrics.k8s.io
groupPriorityMinimum: 100
insecureSkipTLSVerify: true
service:
name: metrics-server
namespace: monitoring
port: 443
version: v1beta1
versionPriority: 100
---
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
labels:
app.kubernetes.io/instance: metrics-server
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: metrics-server
app.kubernetes.io/version: 0.7.2
helm.sh/chart: metrics-server-3.12.2
name: metrics-server
namespace: monitoring
spec:
endpoints:
- interval: 1m
path: /metrics
port: https
scheme: https
scrapeTimeout: 10s
tlsConfig:
insecureSkipVerify: true
jobLabel: app.kubernetes.io/instance
namespaceSelector:
matchNames:
- default
selector:
matchLabels:
app.kubernetes.io/instance: metrics-server
app.kubernetes.io/name: metrics-server

View file

@ -0,0 +1,5 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- namespace.yaml
- bundle.yaml

View file

@ -1,6 +1,6 @@
apiVersion: v1 apiVersion: v1
kind: Namespace kind: Namespace
metadata: metadata:
name: rook-ceph name: monitoring
labels: labels:
pod-security.kubernetes.io/enforce: privileged pod-security.kubernetes.io/enforce: privileged

View file

@ -2,5 +2,4 @@ apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization kind: Kustomization
namespace: monitoring namespace: monitoring
resources: resources:
- namespace.yaml - https://github.com/prometheus-operator/prometheus-operator?ref=v0.80.0
- https://github.com/prometheus-operator/prometheus-operator?ref=v0.79.2

View file

@ -15095,7 +15095,7 @@ metadata:
app.kubernetes.io/created-by: helm app.kubernetes.io/created-by: helm
app.kubernetes.io/managed-by: Helm app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: rook-ceph-operator app.kubernetes.io/part-of: rook-ceph-operator
helm.sh/chart: rook-ceph-v1.16.2 helm.sh/chart: rook-ceph-v1.16.3
operator: rook operator: rook
storage-backend: ceph storage-backend: ceph
name: rook-ceph-cmd-reporter name: rook-ceph-cmd-reporter
@ -15117,7 +15117,7 @@ metadata:
app.kubernetes.io/created-by: helm app.kubernetes.io/created-by: helm
app.kubernetes.io/managed-by: Helm app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: rook-ceph-operator app.kubernetes.io/part-of: rook-ceph-operator
helm.sh/chart: rook-ceph-v1.16.2 helm.sh/chart: rook-ceph-v1.16.3
operator: rook operator: rook
storage-backend: ceph storage-backend: ceph
name: rook-ceph-mgr name: rook-ceph-mgr
@ -15130,7 +15130,7 @@ metadata:
app.kubernetes.io/created-by: helm app.kubernetes.io/created-by: helm
app.kubernetes.io/managed-by: Helm app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: rook-ceph-operator app.kubernetes.io/part-of: rook-ceph-operator
helm.sh/chart: rook-ceph-v1.16.2 helm.sh/chart: rook-ceph-v1.16.3
operator: rook operator: rook
storage-backend: ceph storage-backend: ceph
name: rook-ceph-osd name: rook-ceph-osd
@ -15149,7 +15149,7 @@ metadata:
app.kubernetes.io/created-by: helm app.kubernetes.io/created-by: helm
app.kubernetes.io/managed-by: Helm app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: rook-ceph-operator app.kubernetes.io/part-of: rook-ceph-operator
helm.sh/chart: rook-ceph-v1.16.2 helm.sh/chart: rook-ceph-v1.16.3
operator: rook operator: rook
storage-backend: ceph storage-backend: ceph
name: rook-ceph-rgw name: rook-ceph-rgw
@ -15162,7 +15162,7 @@ metadata:
app.kubernetes.io/created-by: helm app.kubernetes.io/created-by: helm
app.kubernetes.io/managed-by: Helm app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: rook-ceph-operator app.kubernetes.io/part-of: rook-ceph-operator
helm.sh/chart: rook-ceph-v1.16.2 helm.sh/chart: rook-ceph-v1.16.3
operator: rook operator: rook
storage-backend: ceph storage-backend: ceph
name: rook-ceph-system name: rook-ceph-system
@ -15400,7 +15400,7 @@ metadata:
app.kubernetes.io/created-by: helm app.kubernetes.io/created-by: helm
app.kubernetes.io/managed-by: Helm app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: rook-ceph-operator app.kubernetes.io/part-of: rook-ceph-operator
helm.sh/chart: rook-ceph-v1.16.2 helm.sh/chart: rook-ceph-v1.16.3
operator: rook operator: rook
storage-backend: ceph storage-backend: ceph
name: rook-ceph-system name: rook-ceph-system
@ -15716,7 +15716,7 @@ metadata:
app.kubernetes.io/created-by: helm app.kubernetes.io/created-by: helm
app.kubernetes.io/managed-by: Helm app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: rook-ceph-operator app.kubernetes.io/part-of: rook-ceph-operator
helm.sh/chart: rook-ceph-v1.16.2 helm.sh/chart: rook-ceph-v1.16.3
operator: rook operator: rook
storage-backend: ceph storage-backend: ceph
name: rbd-csi-nodeplugin name: rbd-csi-nodeplugin
@ -15948,7 +15948,7 @@ metadata:
app.kubernetes.io/created-by: helm app.kubernetes.io/created-by: helm
app.kubernetes.io/managed-by: Helm app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: rook-ceph-operator app.kubernetes.io/part-of: rook-ceph-operator
helm.sh/chart: rook-ceph-v1.16.2 helm.sh/chart: rook-ceph-v1.16.3
operator: rook operator: rook
storage-backend: ceph storage-backend: ceph
name: rook-ceph-cluster-mgmt name: rook-ceph-cluster-mgmt
@ -15981,7 +15981,7 @@ metadata:
app.kubernetes.io/created-by: helm app.kubernetes.io/created-by: helm
app.kubernetes.io/managed-by: Helm app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: rook-ceph-operator app.kubernetes.io/part-of: rook-ceph-operator
helm.sh/chart: rook-ceph-v1.16.2 helm.sh/chart: rook-ceph-v1.16.3
operator: rook operator: rook
storage-backend: ceph storage-backend: ceph
name: rook-ceph-global name: rook-ceph-global
@ -16169,7 +16169,7 @@ metadata:
app.kubernetes.io/created-by: helm app.kubernetes.io/created-by: helm
app.kubernetes.io/managed-by: Helm app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: rook-ceph-operator app.kubernetes.io/part-of: rook-ceph-operator
helm.sh/chart: rook-ceph-v1.16.2 helm.sh/chart: rook-ceph-v1.16.3
operator: rook operator: rook
storage-backend: ceph storage-backend: ceph
name: rook-ceph-mgr-cluster name: rook-ceph-mgr-cluster
@ -16225,7 +16225,7 @@ metadata:
app.kubernetes.io/created-by: helm app.kubernetes.io/created-by: helm
app.kubernetes.io/managed-by: Helm app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: rook-ceph-operator app.kubernetes.io/part-of: rook-ceph-operator
helm.sh/chart: rook-ceph-v1.16.2 helm.sh/chart: rook-ceph-v1.16.3
operator: rook operator: rook
storage-backend: ceph storage-backend: ceph
name: rook-ceph-object-bucket name: rook-ceph-object-bucket
@ -16301,7 +16301,7 @@ metadata:
app.kubernetes.io/created-by: helm app.kubernetes.io/created-by: helm
app.kubernetes.io/managed-by: Helm app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: rook-ceph-operator app.kubernetes.io/part-of: rook-ceph-operator
helm.sh/chart: rook-ceph-v1.16.2 helm.sh/chart: rook-ceph-v1.16.3
operator: rook operator: rook
storage-backend: ceph storage-backend: ceph
name: rook-ceph-system name: rook-ceph-system
@ -16502,7 +16502,7 @@ metadata:
app.kubernetes.io/created-by: helm app.kubernetes.io/created-by: helm
app.kubernetes.io/managed-by: Helm app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: rook-ceph-operator app.kubernetes.io/part-of: rook-ceph-operator
helm.sh/chart: rook-ceph-v1.16.2 helm.sh/chart: rook-ceph-v1.16.3
operator: rook operator: rook
storage-backend: ceph storage-backend: ceph
name: rook-ceph-system name: rook-ceph-system
@ -16592,7 +16592,7 @@ metadata:
app.kubernetes.io/created-by: helm app.kubernetes.io/created-by: helm
app.kubernetes.io/managed-by: Helm app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: rook-ceph-operator app.kubernetes.io/part-of: rook-ceph-operator
helm.sh/chart: rook-ceph-v1.16.2 helm.sh/chart: rook-ceph-v1.16.3
operator: rook operator: rook
storage-backend: ceph storage-backend: ceph
name: rook-ceph-global name: rook-ceph-global
@ -16651,7 +16651,7 @@ metadata:
app.kubernetes.io/created-by: helm app.kubernetes.io/created-by: helm
app.kubernetes.io/managed-by: Helm app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: rook-ceph-operator app.kubernetes.io/part-of: rook-ceph-operator
helm.sh/chart: rook-ceph-v1.16.2 helm.sh/chart: rook-ceph-v1.16.3
operator: rook operator: rook
storage-backend: ceph storage-backend: ceph
name: rook-ceph-system name: rook-ceph-system
@ -16890,7 +16890,7 @@ metadata:
app.kubernetes.io/created-by: helm app.kubernetes.io/created-by: helm
app.kubernetes.io/managed-by: Helm app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: rook-ceph-operator app.kubernetes.io/part-of: rook-ceph-operator
helm.sh/chart: rook-ceph-v1.16.2 helm.sh/chart: rook-ceph-v1.16.3
operator: rook operator: rook
storage-backend: ceph storage-backend: ceph
name: rook-ceph-operator name: rook-ceph-operator
@ -16906,7 +16906,7 @@ spec:
metadata: metadata:
labels: labels:
app: rook-ceph-operator app: rook-ceph-operator
helm.sh/chart: rook-ceph-v1.16.2 helm.sh/chart: rook-ceph-v1.16.3
spec: spec:
containers: containers:
- args: - args:
@ -16933,7 +16933,7 @@ spec:
valueFrom: valueFrom:
fieldRef: fieldRef:
fieldPath: metadata.namespace fieldPath: metadata.namespace
image: docker.io/rook/ceph:v1.16.2 image: docker.io/rook/ceph:v1.16.3
imagePullPolicy: IfNotPresent imagePullPolicy: IfNotPresent
name: rook-ceph-operator name: rook-ceph-operator
resources: resources:

View file

@ -11,7 +11,7 @@ spec:
# versions running within the cluster. See tags available at https://hub.docker.com/r/ceph/ceph/tags/. # versions running within the cluster. See tags available at https://hub.docker.com/r/ceph/ceph/tags/.
# If you want to be more precise, you can always use a timestamp tag such as quay.io/ceph/ceph:v19.2.0-20240927 # If you want to be more precise, you can always use a timestamp tag such as quay.io/ceph/ceph:v19.2.0-20240927
# This tag might not contain a new Ceph version, just security fixes from the underlying operating system, which will reduce vulnerabilities # This tag might not contain a new Ceph version, just security fixes from the underlying operating system, which will reduce vulnerabilities
image: quay.io/ceph/ceph:v19.2.0 image: quay.io/ceph/ceph:v19.2.1
# Whether to allow unsupported versions of Ceph. Currently Reef and Squid are supported. # Whether to allow unsupported versions of Ceph. Currently Reef and Squid are supported.
# Future versions such as Tentacle (v20) would require this to be set to `true`. # Future versions such as Tentacle (v20) would require this to be set to `true`.
# Do not set to true in production. # Do not set to true in production.
@ -66,6 +66,7 @@ spec:
ssl: false ssl: false
# The url of the Prometheus instance # The url of the Prometheus instance
# prometheusEndpoint: <protocol>://<prometheus-host>:<port> # prometheusEndpoint: <protocol>://<prometheus-host>:<port>
prometheusEndpoint: http://prometheus-operated.monitoring.svc.cluster.local:9090
# Whether SSL should be verified if the Prometheus server is using https # Whether SSL should be verified if the Prometheus server is using https
# prometheusEndpointSSLVerify: false # prometheusEndpointSSLVerify: false
# enable prometheus alerting for cluster # enable prometheus alerting for cluster
@ -221,6 +222,11 @@ spec:
# monitoring: # monitoring:
# crashcollector: # crashcollector:
resources: resources:
osd-hdd:
limits:
memory: 10Gi
requests:
memory: 4Gi
#The requests and limits set here, allow the mgr pod to use half of one CPU core and 1 gigabyte of memory #The requests and limits set here, allow the mgr pod to use half of one CPU core and 1 gigabyte of memory
# mgr: # mgr:
# limits: # limits:

View file

@ -7,7 +7,7 @@ metadata:
app.kubernetes.io/instance: traefik-traefik app.kubernetes.io/instance: traefik-traefik
app.kubernetes.io/managed-by: Helm app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: traefik app.kubernetes.io/name: traefik
helm.sh/chart: traefik-34.2.0 helm.sh/chart: traefik-34.4.1
name: traefik name: traefik
namespace: traefik namespace: traefik
--- ---
@ -18,7 +18,7 @@ metadata:
app.kubernetes.io/instance: traefik-traefik app.kubernetes.io/instance: traefik-traefik
app.kubernetes.io/managed-by: Helm app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: traefik app.kubernetes.io/name: traefik
helm.sh/chart: traefik-34.2.0 helm.sh/chart: traefik-34.4.1
name: traefik-traefik name: traefik-traefik
rules: rules:
- apiGroups: - apiGroups:
@ -94,7 +94,7 @@ metadata:
app.kubernetes.io/instance: traefik-traefik app.kubernetes.io/instance: traefik-traefik
app.kubernetes.io/managed-by: Helm app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: traefik app.kubernetes.io/name: traefik
helm.sh/chart: traefik-34.2.0 helm.sh/chart: traefik-34.4.1
name: traefik-traefik name: traefik-traefik
roleRef: roleRef:
apiGroup: rbac.authorization.k8s.io apiGroup: rbac.authorization.k8s.io
@ -112,7 +112,7 @@ metadata:
app.kubernetes.io/instance: traefik-traefik app.kubernetes.io/instance: traefik-traefik
app.kubernetes.io/managed-by: Helm app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: traefik app.kubernetes.io/name: traefik
helm.sh/chart: traefik-34.2.0 helm.sh/chart: traefik-34.4.1
name: traefik name: traefik
namespace: traefik namespace: traefik
spec: spec:
@ -137,12 +137,12 @@ metadata:
app.kubernetes.io/instance: traefik-traefik app.kubernetes.io/instance: traefik-traefik
app.kubernetes.io/managed-by: Helm app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: traefik app.kubernetes.io/name: traefik
helm.sh/chart: traefik-34.2.0 helm.sh/chart: traefik-34.4.1
name: traefik name: traefik
namespace: traefik namespace: traefik
spec: spec:
minReadySeconds: 0 minReadySeconds: 0
replicas: 2 replicas: 1
selector: selector:
matchLabels: matchLabels:
app.kubernetes.io/instance: traefik-traefik app.kubernetes.io/instance: traefik-traefik
@ -162,7 +162,7 @@ spec:
app.kubernetes.io/instance: traefik-traefik app.kubernetes.io/instance: traefik-traefik
app.kubernetes.io/managed-by: Helm app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: traefik app.kubernetes.io/name: traefik
helm.sh/chart: traefik-34.2.0 helm.sh/chart: traefik-34.4.1
spec: spec:
automountServiceAccountToken: true automountServiceAccountToken: true
containers: containers:
@ -195,7 +195,7 @@ spec:
valueFrom: valueFrom:
fieldRef: fieldRef:
fieldPath: metadata.namespace fieldPath: metadata.namespace
image: docker.io/traefik:v3.3.2 image: docker.io/traefik:v3.3.4
imagePullPolicy: IfNotPresent imagePullPolicy: IfNotPresent
lifecycle: null lifecycle: null
livenessProbe: livenessProbe:
@ -217,6 +217,7 @@ spec:
name: traefik name: traefik
protocol: TCP protocol: TCP
- containerPort: 8000 - containerPort: 8000
hostPort: 80
name: web name: web
protocol: TCP protocol: TCP
- containerPort: 8443 - containerPort: 8443
@ -267,7 +268,7 @@ metadata:
app.kubernetes.io/instance: traefik-traefik app.kubernetes.io/instance: traefik-traefik
app.kubernetes.io/managed-by: Helm app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: traefik app.kubernetes.io/name: traefik
helm.sh/chart: traefik-34.2.0 helm.sh/chart: traefik-34.4.1
name: traefik name: traefik
spec: spec:
controller: traefik.io/ingress-controller controller: traefik.io/ingress-controller

View file

@ -37,3 +37,11 @@ metadata:
spec: spec:
bucketName: loki bucketName: loki
storageClassName: rook-ceph-bucket storageClassName: rook-ceph-bucket
---
apiVersion: objectbucket.io/v1alpha1
kind: ObjectBucketClaim
metadata:
name: forgejo
spec:
bucketName: forgejo
storageClassName: rook-ceph-bucket

View file

@ -0,0 +1,20 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: ceph-dashboard.k8s.home.finn.io
labels:
name: ceph-dashboard.k8s.home.finn.io
annotations:
cert-manager.io/cluster-issuer: letsencrypt
spec:
rules:
- host: ceph-dashboard.k8s.home.finn.io
http:
paths:
- pathType: Prefix
path: /
backend:
service:
name: rook-ceph-mgr-dashboard
port:
number: 7000

View file

@ -3,5 +3,7 @@ kind: Kustomization
namespace: rook-ceph namespace: rook-ceph
resources: resources:
- buckets.yaml - buckets.yaml
- dashboard.yaml
- filesystems.yaml
- s3-pool.yaml - s3-pool.yaml
- servicemonitor.yaml - servicemonitor.yaml

View file

@ -3,8 +3,8 @@ resource "vault_auth_backend" "kubernetes" {
} }
resource "vault_kubernetes_auth_backend_config" "example" { resource "vault_kubernetes_auth_backend_config" "example" {
backend = vault_auth_backend.kubernetes.path backend = vault_auth_backend.kubernetes.path
kubernetes_host = "https://kubernetes.default.svc.cluster.local:443" kubernetes_host = "https://kubernetes.default.svc.cluster.local:443"
} }
resource "vault_kubernetes_auth_backend_role" "k8s-default" { resource "vault_kubernetes_auth_backend_role" "k8s-default" {
@ -13,7 +13,7 @@ resource "vault_kubernetes_auth_backend_role" "k8s-default" {
bound_service_account_names = ["default"] bound_service_account_names = ["default"]
bound_service_account_namespaces = ["*"] bound_service_account_namespaces = ["*"]
token_ttl = 3600 token_ttl = 3600
token_policies = [ token_policies = [
vault_policy.k8s_default.name vault_policy.k8s_default.name
] ]
} }
@ -30,6 +30,6 @@ resource "vault_policy" "k8s_default" {
policy = templatefile("bao-policies/k8s-default.hcl", { policy = templatefile("bao-policies/k8s-default.hcl", {
k8s_auth_backend_accessor = vault_auth_backend.kubernetes.accessor, k8s_auth_backend_accessor = vault_auth_backend.kubernetes.accessor,
k8s_secrets_path = vault_mount.static_secrets.path, k8s_secrets_path = vault_mount.static_secrets.path,
}) })
} }

View file

@ -2,16 +2,16 @@
module "keycloak_client_tofu" { module "keycloak_client_tofu" {
source = "./keycloak-client" source = "./keycloak-client"
realm = keycloak_realm.dev.id realm = keycloak_realm.dev.id
vault_mount = vault_mount.static_secrets.path vault_mount = vault_mount.static_secrets.path
client_id = "tofu" client_id = "tofu"
service_accounts_enabled = true service_accounts_enabled = true
} }
data "keycloak_openid_client" "realm_management" { data "keycloak_openid_client" "realm_management" {
realm_id = keycloak_realm.dev.id realm_id = keycloak_realm.dev.id
client_id = "realm-management" client_id = "realm-management"
} }
resource "keycloak_openid_client_service_account_role" "client_service_account_role" { resource "keycloak_openid_client_service_account_role" "client_service_account_role" {

View file

@ -1,6 +1,6 @@
resource "keycloak_authentication_flow" "webauthn_browser" { resource "keycloak_authentication_flow" "webauthn_browser" {
realm_id = keycloak_realm.dev.id realm_id = keycloak_realm.dev.id
alias = "webauthn_browser" alias = "webauthn_browser"
description = "browser based authentication" description = "browser based authentication"
} }
@ -18,20 +18,20 @@ resource "keycloak_authentication_subflow" "webauthn_flow" {
parent_flow_alias = keycloak_authentication_flow.webauthn_browser.alias parent_flow_alias = keycloak_authentication_flow.webauthn_browser.alias
provider_id = "basic-flow" provider_id = "basic-flow"
requirement = "ALTERNATIVE" requirement = "ALTERNATIVE"
depends_on = [ keycloak_authentication_execution.auth_cookie ] depends_on = [keycloak_authentication_execution.auth_cookie]
} }
resource "keycloak_authentication_execution" "user_pass" { resource "keycloak_authentication_execution" "user_pass" {
realm_id = keycloak_realm.dev.id realm_id = keycloak_realm.dev.id
parent_flow_alias = keycloak_authentication_subflow.webauthn_flow.alias parent_flow_alias = keycloak_authentication_subflow.webauthn_flow.alias
authenticator = "auth-username-password-form" authenticator = "auth-username-password-form"
requirement = "REQUIRED" requirement = "REQUIRED"
} }
resource "keycloak_authentication_execution" "webauthn" { resource "keycloak_authentication_execution" "webauthn" {
realm_id = keycloak_realm.dev.id realm_id = keycloak_realm.dev.id
parent_flow_alias = keycloak_authentication_subflow.webauthn_flow.alias parent_flow_alias = keycloak_authentication_subflow.webauthn_flow.alias
authenticator = "webauthn-authenticator" authenticator = "webauthn-authenticator"
requirement = "REQUIRED" requirement = "REQUIRED"
} }

View file

@ -1,6 +1,6 @@
resource "keycloak_authentication_flow" "passkey" { resource "keycloak_authentication_flow" "passkey" {
realm_id = keycloak_realm.dev.id realm_id = keycloak_realm.dev.id
alias = "passkey" alias = "passkey"
description = "browser based authentication" description = "browser based authentication"
} }
@ -17,14 +17,14 @@ resource "keycloak_authentication_subflow" "passkey_forms" {
parent_flow_alias = keycloak_authentication_flow.passkey.alias parent_flow_alias = keycloak_authentication_flow.passkey.alias
provider_id = "basic-flow" provider_id = "basic-flow"
requirement = "ALTERNATIVE" requirement = "ALTERNATIVE"
depends_on = [ keycloak_authentication_execution.auth_cookie ] depends_on = [keycloak_authentication_execution.auth_cookie]
} }
resource "keycloak_authentication_execution" "passkey_username" { resource "keycloak_authentication_execution" "passkey_username" {
realm_id = keycloak_realm.dev.id realm_id = keycloak_realm.dev.id
parent_flow_alias = keycloak_authentication_subflow.passkey_forms.alias parent_flow_alias = keycloak_authentication_subflow.passkey_forms.alias
authenticator = "auth-username-form" authenticator = "auth-username-form"
requirement = "REQUIRED" requirement = "REQUIRED"
} }
resource "keycloak_authentication_subflow" "passkey_passwordless_or_2fa" { resource "keycloak_authentication_subflow" "passkey_passwordless_or_2fa" {
@ -33,15 +33,15 @@ resource "keycloak_authentication_subflow" "passkey_passwordless_or_2fa" {
parent_flow_alias = keycloak_authentication_subflow.passkey_forms.alias parent_flow_alias = keycloak_authentication_subflow.passkey_forms.alias
provider_id = "basic-flow" provider_id = "basic-flow"
requirement = "REQUIRED" requirement = "REQUIRED"
depends_on = [ keycloak_authentication_execution.passkey_username ] depends_on = [keycloak_authentication_execution.passkey_username]
} }
resource "keycloak_authentication_execution" "passkey_webauthn_passwordless" { resource "keycloak_authentication_execution" "passkey_webauthn_passwordless" {
realm_id = keycloak_realm.dev.id realm_id = keycloak_realm.dev.id
parent_flow_alias = keycloak_authentication_subflow.passkey_passwordless_or_2fa.alias parent_flow_alias = keycloak_authentication_subflow.passkey_passwordless_or_2fa.alias
authenticator = "webauthn-authenticator-passwordless" authenticator = "webauthn-authenticator-passwordless"
requirement = "ALTERNATIVE" requirement = "ALTERNATIVE"
depends_on = [ keycloak_authentication_execution.passkey_username ] depends_on = [keycloak_authentication_execution.passkey_username]
} }
resource "keycloak_authentication_subflow" "passkey_password_and_second_factor" { resource "keycloak_authentication_subflow" "passkey_password_and_second_factor" {
@ -53,10 +53,10 @@ resource "keycloak_authentication_subflow" "passkey_password_and_second_factor"
} }
resource "keycloak_authentication_execution" "passkey_password" { resource "keycloak_authentication_execution" "passkey_password" {
realm_id = keycloak_realm.dev.id realm_id = keycloak_realm.dev.id
parent_flow_alias = keycloak_authentication_subflow.passkey_password_and_second_factor.alias parent_flow_alias = keycloak_authentication_subflow.passkey_password_and_second_factor.alias
authenticator = "auth-password-form" authenticator = "auth-password-form"
requirement = "REQUIRED" requirement = "REQUIRED"
} }
resource "keycloak_authentication_subflow" "passkey_second_factor" { resource "keycloak_authentication_subflow" "passkey_second_factor" {
@ -68,22 +68,22 @@ resource "keycloak_authentication_subflow" "passkey_second_factor" {
} }
resource "keycloak_authentication_execution" "passkey_user_configured_condition" { resource "keycloak_authentication_execution" "passkey_user_configured_condition" {
realm_id = keycloak_realm.dev.id realm_id = keycloak_realm.dev.id
parent_flow_alias = keycloak_authentication_subflow.passkey_second_factor.alias parent_flow_alias = keycloak_authentication_subflow.passkey_second_factor.alias
authenticator = "conditional-user-configured" authenticator = "conditional-user-configured"
requirement = "REQUIRED" requirement = "REQUIRED"
} }
resource "keycloak_authentication_execution" "passkey_webauthn" { resource "keycloak_authentication_execution" "passkey_webauthn" {
realm_id = keycloak_realm.dev.id realm_id = keycloak_realm.dev.id
parent_flow_alias = keycloak_authentication_subflow.passkey_second_factor.alias parent_flow_alias = keycloak_authentication_subflow.passkey_second_factor.alias
authenticator = "webauthn-authenticator" authenticator = "webauthn-authenticator"
requirement = "ALTERNATIVE" requirement = "ALTERNATIVE"
} }
resource "keycloak_authentication_execution" "passkey_otp" { resource "keycloak_authentication_execution" "passkey_otp" {
realm_id = keycloak_realm.dev.id realm_id = keycloak_realm.dev.id
parent_flow_alias = keycloak_authentication_subflow.passkey_second_factor.alias parent_flow_alias = keycloak_authentication_subflow.passkey_second_factor.alias
authenticator = "auth-otp-form" authenticator = "auth-otp-form"
requirement = "ALTERNATIVE" requirement = "ALTERNATIVE"
} }

View file

@ -1,11 +1,11 @@
resource "keycloak_realm" "dev" { resource "keycloak_realm" "dev" {
realm = "dev.janky.solutions" realm = "dev.janky.solutions"
enabled = true enabled = true
display_name = "Janky Solutions (dev)" display_name = "Janky Solutions (dev)"
default_signature_algorithm = "RS256" default_signature_algorithm = "RS256"
} }
resource "keycloak_authentication_bindings" "browser_authentication_binding" { resource "keycloak_authentication_bindings" "browser_authentication_binding" {
realm_id = keycloak_realm.dev.id realm_id = keycloak_realm.dev.id
browser_flow = keycloak_authentication_flow.passkey.alias browser_flow = keycloak_authentication_flow.passkey.alias
} }

View file

@ -1,9 +1,9 @@
data "terraform_remote_state" "kube" { data "terraform_remote_state" "kube" {
backend = "kubernetes" backend = "kubernetes"
config = { config = {
secret_suffix = "state" secret_suffix = "state"
namespace = "tofu" namespace = "tofu"
config_path = "../.kubeconfig" config_path = "../.kubeconfig"
} }
} }
@ -12,13 +12,13 @@ provider "vault" {}
terraform { terraform {
required_providers { required_providers {
keycloak = { keycloak = {
source = "mrparkers/keycloak" source = "mrparkers/keycloak"
version = ">= 4.0.0" version = ">= 4.0.0"
} }
} }
} }
provider "keycloak" { provider "keycloak" {
realm = "dev.janky.solutions" realm = "dev.janky.solutions"
url = "https://auth.janky.solutions" url = "https://auth.janky.solutions"
} }