Compare commits

..

1 Commits

Author SHA1 Message Date
d143a90228 testing a few sample (joint) configurations 2024-03-03 20:35:37 +01:00
279 changed files with 4580 additions and 3108 deletions
.gitignore.gitmodulesREADME.md
apps
adguard
audiobookshelf
code-server
files
files1
finance
grafana
homeassistant
immich
kitchenowl
linkding
matrix
media
minecraft
monitoring

6
.gitignore vendored

@ -1,6 +1,2 @@
# Kubernetes secrets
*.secret.yaml
main.key
# Helm Chart files
charts/
charts/

3
.gitmodules vendored Normal file

@ -0,0 +1,3 @@
[submodule "infrastructure/external-dns/octodns"]
path = infrastructure/external-dns/octodns
url = ssh://git@git.kluster.moll.re:2222/remoll/dns.git

@ -1,6 +1,7 @@
# Kluster setup and IaaC using argoCD
### Initial setup
#### Requirements:
- A running k3s instance
@ -27,21 +28,5 @@ The app-of-apps will bootstrap a fully featured cluster with the following compo
- immich
- ...
#### Recap
- install sealedsecrets see [README](./infrastructure/sealedsecrets/README.md)
```bash
kubectl apply -k infrastructure/sealedsecrets
kubectl apply -f infrastructure/sealedsecrets/main.key
kubectl delete pod -n kube-system -l name=sealed-secrets-controller
```
- install argocd
```bash
kubectl apply -k infrastructure/argocd
```
- wait...
### Adding an application
todo

@ -27,10 +27,7 @@ data:
ratelimit_whitelist: []
refuse_any: true
upstream_dns:
- tls://1.1.1.1
- tls://dns.google
- tls://p0.freedns.controld.com
- tls://dns.quad9.net
- https://dns10.quad9.net/dns-query
upstream_dns_file: ""
bootstrap_dns:
- 9.9.9.10
@ -38,7 +35,8 @@ data:
- 2620:fe::10
- 2620:fe::fe:10
fallback_dns: []
upstream_mode: load_balance
all_servers: false
fastest_addr: false
fastest_timeout: 1s
allowed_clients: []
disallowed_clients: []
@ -74,8 +72,6 @@ data:
dns64_prefixes: []
serve_http3: false
use_http3_upstreams: false
serve_plain_dns: true
hostsfile_enabled: true
tls:
enabled: false
server_name: ""
@ -92,14 +88,12 @@ data:
private_key_path: ""
strict_sni_check: false
querylog:
dir_path: ""
ignored: []
interval: 2160h
size_memory: 1000
enabled: true
file_enabled: true
statistics:
dir_path: ""
ignored: []
interval: 24h
enabled: true
@ -116,10 +110,6 @@ data:
url: https://someonewhocares.org/hosts/zero/hosts
name: Dan Pollock's List
id: 1684963532
- enabled: true
url: https://adguardteam.github.io/HostlistsRegistry/assets/filter_3.txt
name: Peter Lowe's Blocklist
id: 1735824753
whitelist_filters: []
user_rules: []
dhcp:
@ -144,36 +134,13 @@ data:
blocking_ipv6: ""
blocked_services:
schedule:
time_zone: Europe/Berlin
sun:
start: 18h
end: 23h59m
mon:
start: 18h
end: 23h59m
tue:
start: 18h
end: 23h59m
wed:
start: 18h
end: 23h59m
thu:
start: 18h
end: 23h59m
fri:
start: 18h
end: 23h59m
sat:
start: 18h
end: 23h59m
ids:
- reddit
time_zone: UTC
ids: []
protection_disabled_until: null
safe_search:
enabled: false
bing: true
duckduckgo: true
ecosia: true
google: true
pixabay: true
yandex: true
@ -182,13 +149,11 @@ data:
parental_block_host: family-block.dns.adguard.com
safebrowsing_block_host: standard-block.dns.adguard.com
rewrites: []
safe_fs_patterns:
- /opt/adguardhome/data/userfilters/*
safebrowsing_cache_size: 1048576
safesearch_cache_size: 1048576
parental_cache_size: 1048576
cache_time: 30
filters_update_interval: 168
filters_update_interval: 24
blocked_response_ttl: 10
filtering_enabled: true
parental_enabled: true
@ -203,7 +168,6 @@ data:
hosts: true
persistent: []
log:
enabled: true
file: ""
max_backups: 0
max_size: 100
@ -215,4 +179,4 @@ data:
group: ""
user: ""
rlimit_nofile: 0
schema_version: 29
schema_version: 27

@ -1,4 +1,4 @@
apiVersion: traefik.io/v1alpha1
apiVersion: traefik.containo.us/v1alpha1
kind: IngressRouteTCP
metadata:
name: adguard-tls-ingress

@ -10,7 +10,7 @@ resources:
images:
- name: adguard/adguardhome
newName: adguard/adguardhome
newTag: v0.107.61
newTag: v0.107.44
namespace: adguard

@ -1,42 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: audiobookshelf
spec:
replicas: 1
selector:
matchLabels:
app: audiobookshelf
template:
metadata:
labels:
app: audiobookshelf
spec:
containers:
- name: audiobookshelf
image: audiobookshelf
ports:
- containerPort: 80
env:
- name: TZ
value: Europe/Berlin
- name: CONFIG_PATH
value: /data/config
- name: METADATA_PATH
value: /data/metadata
volumeMounts:
- name: data
mountPath: /data
resources:
requests:
cpu: "100m"
memory: "200Mi"
limits:
cpu: "2"
memory: "1Gi"
volumes:
- name: data
persistentVolumeClaim:
claimName: audiobookshelf-data

@ -1,17 +0,0 @@
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: audiobookshelf-ingressroute
spec:
entryPoints:
- websecure
routes:
- match: Host(`audiobookshelf.kluster.moll.re`)
kind: Rule
services:
- name: audiobookshelf-web
port: 80
tls:
certResolver: default-tls

@ -1,15 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- namespace.yaml
- pvc.yaml
- deployment.yaml
- service.yaml
- ingress.yaml
namespace: audiobookshelf
images:
- name: audiobookshelf
newName: ghcr.io/advplyr/audiobookshelf
newTag: "2.21.0"

@ -1,11 +0,0 @@
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: audiobookshelf-data
spec:
storageClassName: "nfs-client"
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 10Gi

@ -1,10 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: audiobookshelf-web
spec:
selector:
app: audiobookshelf
ports:
- port: 80
targetPort: 80

@ -1,41 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: code-server
spec:
replicas: 1
selector:
matchLabels:
app: code-server
template:
metadata:
labels:
app: code-server
spec:
containers:
- name: code-server
image: code-server
ports:
- containerPort: 8080
env:
- name: TZ
value: Europe/Berlin
- name: CONFIG_PATH
value: /data/config
- name: METADATA_PATH
value: /data/metadata
volumeMounts:
- name: data
mountPath: /home/coder
resources:
requests:
cpu: "50m"
memory: "100Mi"
limits:
cpu: "6"
memory: "16Gi"
volumes:
- name: data
persistentVolumeClaim:
claimName: code-server-data

@ -1,15 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- namespace.yaml
- pvc.yaml
- deployment.yaml
- service.yaml
- ingress.yaml
namespace: code-server
images:
- name: code-server
newName: ghcr.io/coder/code-server
newTag: 4.99.3-fedora

@ -1,11 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: code-server-web
spec:
selector:
app: code-server
ports:
- port: 8080
targetPort: 8080
type: LoadBalancer

8
apps/files/README.md Normal file

@ -0,0 +1,8 @@
# File sync
My personal cross-platform filesync. Using syncthing for my android and linux clients. And nextcloud for my ios clients.
## Overview
Both services share a common persistence which allows them to apply each their own logic for synching to other devices. The server acts as a relay.

@ -1,48 +0,0 @@
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: ocis-statefulset
spec:
selector:
matchLabels:
app: ocis
serviceName: ocis-web
replicas: 1
template:
metadata:
labels:
app: ocis
spec:
containers:
- name: ocis
image: ocis
resources:
limits:
memory: "1Gi"
cpu: "1000m"
env:
- name: OCIS_INSECURE
value: "true"
- name: OCIS_URL
value: "https://ocis.kluster.moll.re"
- name: OCIS_LOG_LEVEL
value: "debug"
ports:
- containerPort: 9200
volumeMounts:
- name: config
mountPath: /etc/ocis
# - name: ocis-config-file
# mountPath: /etc/ocis/config.yaml
- name: data
mountPath: /var/lib/ocis
volumes:
# - name: ocis-config
# persistentVolumeClaim:
# claimName: ocis-config
- name: config
secret:
secretName: ocis-config
- name: data
persistentVolumeClaim:
claimName: ocis

@ -1,18 +0,0 @@
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: ocis-ingressroute
spec:
entryPoints:
- websecure
routes:
- match: Host(`ocis.kluster.moll.re`)
kind: Rule
services:
- name: ocis-web
port: 9200
scheme: https
tls:
certResolver: default-tls

@ -1,16 +1,11 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- namespace.yaml
- ingress.yaml
- service.yaml
- pvc.yaml
- deployment.yaml
- ocis-config.sealedsecret.yaml
namespace: files
images:
- name: ocis
newName: owncloud/ocis
newTag: "7.1.2"
resources:
- namespace.yaml
- pvc.yaml
- syncthing/
- nextcloud/

@ -1,4 +1,4 @@
apiVersion: v1
kind: Namespace
metadata:
name: placeholder
name: placeholder

@ -1,17 +1,16 @@
apiVersion: traefik.io/v1alpha1
apiVersion: traefik.containo.us/v1alpha1
kind: IngressRoute
metadata:
name: kitchenowl-ingressroute
name: nextcloud-ingress
spec:
entryPoints:
- websecure
routes:
- match: Host(`kitchen.kluster.moll.re`)
- match: Host(`nextcloud2.kluster.moll.re`)
kind: Rule
services:
- name: kitchenowl-web
- name: nextcloud
port: 8080
tls:
certResolver: default-tls

@ -0,0 +1,15 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- pvc.yaml
- ingress.yaml
- postgres.yaml
- postgres-credentials.sealedsecret.yaml
helmCharts:
- name: nextcloud
releaseName: nextcloud
version: 4.5.5
valuesFile: values.yaml
repo: https://nextcloud.github.io/helm/

@ -0,0 +1,17 @@
---
apiVersion: bitnami.com/v1alpha1
kind: SealedSecret
metadata:
creationTimestamp: null
name: postgres-credentials
namespace: files
spec:
encryptedData:
database: AgBOgmqlfgiN2VqxNyYL6O+/jdzPmGg97zOXxZ7KiD07b4/2FdmlWgOZZp7oUpQ9RMV0WybC0jau2YVlgXB32afgJ3uinaAAhzZwvzy8dgapNpe8ClxnFINRhKKC9kxK7YeDwtptbDQn7YtEmVGHI66/71VyGy7NME4Pk0Y4FxxpF6KAZMAHNyez4JMa9V+XFtYV5G5bOkPY/ku4LcYntiMAlEaArF+re1m5nLQmZ4SVkWlOc41N4Hv1HrCv8qq2kj7zVR5/J2qW8NlzmdJJqv1AP1foELuITZZKxwNspxynNxhjXTX0fP6vzfJpxtzb2s/4Yh2uT/UPb2rOdcGaXjjHKxjSX23tG5ZT+z5lt0y9UEmUYytlcsYv9vsRqCmeFsB63S7aABeCRSOJyGLsuUc7xqSZ2ijDG38qLij+JPgoEIbSLfRYVGE5GMo9EbHt4N+ZIMpJYQXq0VhDip/r11SENfUa3XoautQ5uVR1D50FuSrN16t24bQXai9uifkBpDyvqbiqgv7s3qOjF9u8I0eyeJA0ZO1JO174B9SO3IcZYys8c87fSuWvFbGepLNqfneSIx93klDUdx3YEjqcrqib49+3/dn3RO9/puyhJ6O0TEZneToyauV3lxpR+XG/PDx7EQ88lELgD/AmtulsLHkYNgpoblFPbgDUeHhOgoBRAe22Hiy0Co4eh0SPVPyKhj8MyYhPtLEV+UY=
password: AgB2eY5aKJhEcJIgArGRrsqYf5pJJoXHRkplFpaqCCQW7X7WLREb+35HDijhnJSWRI2/LXDVy/8ArJe1LiiW+05aRY/9nvmjdpUmvsdQ6DK1mvirl8Py4JYueNrk2iUmI1h+ROyubBCvRBKxueQNkuwipKvk7nIlON6cwFnqp6GPcuWihSG/GZ2nSZmxmu+thdsM/S8DPaTW/N+Sut8DyarlCN94ZRiFVZIJialibfsJGQtL/uPX0W61GTkEU4m34IN9e+POdEdg3HuFMd3RvNQpndgPjaTv4A22TJRFs+rcHlcHr+5r8acVy1V+sZy97126Z7moeKDp1rFbG2/yMT1iS2oxQN4GJceTgMzSagqdn+KgD0N38OYvp+mRUQsl7+Fpglcq03vqbvxsc1fC78XpAAPMNA/pQDvtlS1qjuB7WCa5b3mkJxjc8efIuna9GAnDGh+djhlGHLEERnEfjlnpeDb/afRejUX+i6r00GnBxuRJfV+lKh4BJsnJm29nC4t10F6ff91Ngcjf+wCm5yWSFETZ9oFrPn10igGvoZwROJYABdtfMNjidGLkdKQnG1dj3EFu5XDn9vRqt8Iu/dEyoh7a2iGYDQ1lGpz+zxA/OZ9l/SuL6JUUwXq5W1/fSbtaBPdit4cwUTopq7AcpZkMuAQyVy6N9D0Hvjx432rCxmqyGU8PyjKHoAN+nuvTi79HtHR2wo4hJeIDoktdpxswSCe9VJEvqTFGQyCZtX3uEg==
username: AgATMaQ/BRCO9vx329YxGGUGl3E68Tm3coU6IO6pYm8f+Uf7ImH4l/P84mjGDLho1zBUfILPAvM4G5xG2qkkyW4mEuB8A7NNWAhXMOS5i1msNaV2oqLYNWCOG2lFO7unkYwPSyu9EyGn/Hq/kbGPAKfUf6dtDLEc+Y0S5Ue9YA2gYK4VYUec491+02EOoprGcfM1QdGPLBrunXn4krxtGm+eTsK8nd/lnm3DK+f5uGupO844i8T0mXE1xcliysBTZzxEVpmzPN8q4TMay6qcB2wOvEyngnGCfxJGTSjTrkydPFLcI4p6IONW5QAX9eQwo6ZDo56WVNgvyNW+ZJ6hmPP9nLeHnKb3rM91CIMM0GDRYc3VFsVXwBY/sj12hiompXEVQEp+EJUbgnDLK2lW+J602ZnzyHFgwGKnfdI8PHfKoxRVf06TXPdROu1mfXr5jOXc+++LoRotkVOuf2KXMip/7HlTkRlZXKkenhIqrTtQkENJ+aaxCKdQwgE8iDtmB6ZEBiMJq/dZgvn7qbcMc/SYF3l6YZKSU2L1359CRTeuQ6J6aDml+WHvgtwLH6sIgR9Sjgxid9XlhQ3/8f9UQdR6OpblsBZYn8gYEQ1WRr7H1R3IjENpA7LtburPYyogSk4eSFWR1hkwfiiTJrfwJCPEka28a7MqX0nCKZqzzUOQqXNGPX8W9rU8aA2HcnSPrzLoOV2av9h4icw=
template:
metadata:
creationTimestamp: null
name: postgres-credentials
namespace: files

@ -0,0 +1,20 @@
apiVersion: postgresql.cnpg.io/v1
kind: Cluster
metadata:
name: nextcloud-postgres
spec:
instances: 1
imageName: ghcr.io/cloudnative-pg/postgresql:16
bootstrap:
initdb:
owner: nextcloud
database: nextcloud
secret:
name: postgres-credentials
storage:
size: 1Gi
storageClass: nfs-client
monitoring:
enablePodMonitor: true

@ -1,11 +1,11 @@
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: data
name: nextcloud-config
spec:
storageClassName: "nfs-client"
storageClassName: nfs-client
accessModes:
- ReadWriteOnce
- ReadWriteMany
resources:
requests:
storage: 1Gi

@ -0,0 +1,155 @@
## Official nextcloud image version
## ref: https://hub.docker.com/r/library/nextcloud/tags/
ingress:
enabled: false
nextcloud:
host: nextcloud2.kluster.moll.re
username: admin
password: changeme
## Use an existing secret
existingSecret:
enabled: false
update: 0
# If web server is not binding default port, you can define it
# containerPort: 8080
datadir: /var/www/html/data
persistence:
subPath:
mail:
enabled: false
# PHP Configuration files
# Will be injected in /usr/local/etc/php/conf.d for apache image and in /usr/local/etc/php-fpm.d when nginx.enabled: true
phpConfigs: {}
# Default config files
# IMPORTANT: Will be used only if you put extra configs, otherwise default will come from nextcloud itself
# Default confgurations can be found here: https://github.com/nextcloud/docker/tree/master/16.0/apache/config
defaultConfigs:
# To protect /var/www/html/config
.htaccess: true
# Redis default configuration
redis.config.php: true
# Apache configuration for rewrite urls
apache-pretty-urls.config.php: true
# Define APCu as local cache
apcu.config.php: true
# Apps directory configs
apps.config.php: true
# Used for auto configure database
autoconfig.php: true
# SMTP default configuration
smtp.config.php: true
extraVolumes:
- name: files-nfs
persistentVolumeClaim:
claimName: files-nfs
extraVolumeMounts:
- name: files-nfs
mountPath: /files
# Extra config files created in /var/www/html/config/
# ref: https://docs.nextcloud.com/server/15/admin_manual/configuration_server/config_sample_php_parameters.html#multiple-config-php-file
# configs:
# config.php: |-
# For example, to use S3 as primary storage
# ref: https://docs.nextcloud.com/server/13/admin_manual/configuration_files/primary_storage.html#simple-storage-service-s3
#
# configs:
# s3.config.php: |-
# <?php
# $CONFIG = array (
# 'objectstore' => array(
# 'class' => '\\OC\\Files\\ObjectStore\\S3',
# 'arguments' => array(
# 'bucket' => 'my-bucket',
# 'autocreate' => true,
# 'key' => 'xxx',
# 'secret' => 'xxx',
# 'region' => 'us-east-1',
# 'use_ssl' => true
# )
# )
# );
nginx:
## You need to set an fpm version of the image for nextcloud if you want to use nginx!
enabled: false
internalDatabase:
enabled: false
##
## External database configuration
##
externalDatabase:
enabled: true
type: postgresql
host: nextcloud-postgres-rw
database: nextcloud
existingSecret:
enabled: true
secretName: postgres-credentials
usernameKey: username
passwordKey: password
mariadb:
enabled: false
postgresql:
enabled: false
redis:
enabled: false
cronjob:
enabled: false
persistence:
# Nextcloud Data (/var/www/html)
enabled: true
annotations: {}
## If defined, PVC must be created manually before volume will be bound
existingClaim: nextcloud-config
## Use an additional pvc for the data directory rather than a subpath of the default PVC
## Useful to store data on a different storageClass (e.g. on slower disks)
nextcloudData:
enabled: false
resources:
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
limits:
cpu: 2000m
memory: 2Gi
requests:
cpu: 100m
memory: 128Mi
livenessProbe:
enabled: false
# disable when upgrading from a previous chart version
hpa:
enabled: false
## Prometheus Exporter / Metrics
##
metrics:
enabled: false
rbac:
enabled: false

File diff suppressed because one or more lines are too long

@ -1,11 +1,11 @@
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: ocis
name: files-nfs
spec:
storageClassName: "nfs-client"
storageClassName: nfs-client
accessModes:
- ReadWriteOnce
- ReadWriteMany
resources:
requests:
storage: 150Gi
storage: 100Gi

@ -1,10 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: ocis-web
spec:
selector:
app: ocis
ports:
- port: 9200
targetPort: 9200

@ -0,0 +1,40 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: syncthing
spec:
selector:
matchLabels:
app: syncthing
template:
metadata:
labels:
app: syncthing
spec:
containers:
- name: syncthing
image: syncthing
resources:
limits:
memory: "256Mi"
cpu: "500m"
ports:
- containerPort: 8384
protocol: TCP
name: syncthing-web
- containerPort: 22000
protocol: TCP
- containerPort: 22000
protocol: UDP
volumeMounts:
- name: persistence
mountPath: /files
- name: config
mountPath: /var/syncthing/config
volumes:
- name: persistence
persistentVolumeClaim:
claimName: files-nfs
- name: config
persistentVolumeClaim:
claimName: syncthing-config

@ -0,0 +1,16 @@
apiVersion: traefik.containo.us/v1alpha1
kind: IngressRoute
metadata:
name: rss-ingressroute
spec:
entryPoints:
- websecure
routes:
- match: Host(`syncthing2.kluster.moll.re`)
kind: Rule
services:
- name: syncthing-web
port: 8384
tls:
certResolver: default-tls

@ -0,0 +1,15 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- pvc.yaml
- deployment.yaml
- service.yaml
- ingress.yaml
- servicemonitor.yaml
# - syncthing-api.sealedsecret.yaml
images:
- name: syncthing
newName: syncthing/syncthing
newTag: "1.27"

@ -1,11 +1,11 @@
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: minecraft-data
name: syncthing-config
spec:
storageClassName: "nfs-client"
storageClassName: nfs-client
accessModes:
- ReadWriteOnce
- ReadWriteMany
resources:
requests:
storage: 1Gi

@ -0,0 +1,46 @@
apiVersion: v1
kind: Service
metadata:
name: syncthing-web
labels:
app: syncthing
spec:
selector:
app: syncthing
type: ClusterIP
ports:
- port: 8384
targetPort: 8384
name: syncthing-web
---
apiVersion: v1
kind: Service
metadata:
name: syncthing-listen
annotations:
metallb.universe.tf/allow-shared-ip: syncthing-service
spec:
selector:
app: syncthing
type: LoadBalancer
loadBalancerIP: 192.168.3.5
ports:
- port: 22000
targetPort: 22000
protocol: TCP
---
apiVersion: v1
kind: Service
metadata:
name: syncthing-discover
annotations:
metallb.universe.tf/allow-shared-ip: syncthing-service
spec:
selector:
app: syncthing
type: LoadBalancer
loadBalancerIP: 192.168.3.5
ports:
- port: 22000
targetPort: 22000
protocol: UDP

@ -0,0 +1,16 @@
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: syncthing-servicemonitor
labels:
app: syncthing
spec:
selector:
matchLabels:
app: syncthing
endpoints:
- port: syncthing-web
path: /metrics
bearerTokenSecret:
name: syncthing-api
key: token

@ -0,0 +1,30 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: spacedrive
spec:
selector:
matchLabels:
app: spacedrive
template:
metadata:
labels:
app: spacedrive
spec:
containers:
- name: spacedrive
image: spacedrive
resources:
limits:
memory: "128Mi"
cpu: "500m"
ports:
- containerPort: 80
volumeMounts:
- name: storage
mountPath: /data
volumes:
- name: storage
persistentVolumeClaim:
claimName: spacedrive-nfs

@ -0,0 +1,15 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: files1
resources:
- namespace.yaml
- pvc.yaml
- deployment.yaml
images:
- name: spacedrive
newName: ghcr.io/spacedriveapp/spacedrive/server
newTag: 0.2.4

@ -1,11 +1,11 @@
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: lldap-data
name: spacedrive-nfs
spec:
storageClassName: "nfs-client"
storageClassName: nfs-client
accessModes:
- ReadWriteOnce
- ReadWriteMany
resources:
requests:
storage: 1Gi
storage: 100Gi

@ -22,13 +22,13 @@ spec:
- name: TZ
value: Europe/Berlin
volumeMounts:
- name: data
- name: actualbudget-data-nfs
mountPath: /data
ports:
- containerPort: 5006
name: http
protocol: TCP
volumes:
- name: data
- name: actualbudget-data-nfs
persistentVolumeClaim:
claimName: data
claimName: actualbudget-data-nfs

@ -1,4 +1,4 @@
apiVersion: traefik.io/v1alpha1
apiVersion: traefik.containo.us/v1alpha1
kind: IngressRoute
metadata:
name: actualbudget

@ -1,11 +1,27 @@
apiVersion: v1
kind: PersistentVolume
metadata:
name: "actualbudget-data-nfs"
spec:
capacity:
storage: "5Gi"
accessModes:
- ReadWriteOnce
nfs:
path: /export/kluster/actualbudget
server: 192.168.1.157
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: "data"
name: "actualbudget-data-nfs"
spec:
storageClassName: "nfs-client"
storageClassName: ""
accessModes:
- ReadWriteOnce
resources:
requests:
storage: "5Gi"
volumeName: actualbudget-data-nfs

@ -13,4 +13,4 @@ resources:
images:
- name: actualbudget
newName: actualbudget/actual-server
newTag: 25.5.0
newTag: 24.2.0

@ -1,17 +0,0 @@
---
apiVersion: bitnami.com/v1alpha1
kind: SealedSecret
metadata:
creationTimestamp: null
name: grafana-admin-secret
namespace: grafana
spec:
encryptedData:
password: AgAU6g/CwKj+1gPpt4DLvLsS0YCvJdVHWw4W4bRhibE9brVvcJtGB3D9MTJrSLVVwusaE6OR59og7oW5ge3yTd/9bbclXYLrxEi7OwvkQjCvo8MfD8yhJO9nV4Xs9Mjk2Z4SHGYuq6wvcssuJrpz5f0XEC7ocTRA+u0UaE+/b4FrYF71uyKGvj8GSXgLZUjGPFsGfPzwJn7cLBmlclVHx1xGbFpUc042m5Mulpn0QolFQnOwZiW4PL8pQyz1MXVRwCsz0RJd5apZL3XJ4X7BLMoAp+diHQ2xi3zoU9VScp+J2QgvFdRKgDa6v7Jz1f+HCwq5W/DoegwFXBrcMIfF2YrnvTnc1PCVwD9IHOeylO7J2hfi8teQiqTvvRlVgdBTLqoqlVovemf5k6ke6JfjTwnsJjTNnL7MKN5Qt0o7N2XRZ3ba9jp8cKbI7fyFQKaU2QEf2PIkp82kEnixmpA1aATgeA3W4E5Km7sKHUEB81+pwnOe54tzD2ShgQX/+UiswhWYTT+gdZKL1udBBemUDC0z9PSJNTPTy+hq+G4CIzVQUYxlioM3c+3geF7YLU8yXisj84pk44GN9KX3z5x+M2+LZL7agAWPUjxtrP2V+id7dNJQfCm0aSMeo57dVfb4zlBUAAgKIKjX+j1KqCVqE9zEO2F/QX7mY6MJTP2me3wmY7JAVRJ7d6bbkyyoDhs8JErLYLp0A+Eh+qx8nWgM9ErPVSA0
user: AgB8ZLG2EuERjg1nKdH/xadbUuIR2c8a9gF5fE8ctrp4DNDLLuuqmjyoHRiWpkrtfnE1yKg1rPP+asV9Lj5iVmE9J+OB3QUOeFS4MHciBNj7pa68zfFgnHP4kxMX6aXyKRQrYruYjHwfzCpOM1zyTEphuGlnokjQXxjF/mZsoM2NWn7WGReqfxqH95tJXfs9AUC5vVv/PHqd+KKRZH7+G1AnWVJ7RFQHedR7wyftO4/rkm8deMuZWtOLl25fAOyOr7+hSqT69s9/uTKSLJXjobSqtulqsR+v5lkwx2ThNKzmcEcuoenKG6lk8XLRSIscccZH3JTPh6IknQWUOC4nmYj+XUxE8Go0RX/4eL+D/6FrYrtp0gr3HOCLAGU4vAHMeKfJoyqykJVnvY6QY6bFgaziyOlWaoEHpg6g0vHHDwyX7HIDcQfJZGOLH9dhrWJ2sOkzyuuxfqWEgz/M2eBW4EUAudHwfTLPocSMUI+D6fjeciMojet5uxWMP7ZHh/E061f5+Vfk6CKYd9Kpi69Xah8KEyyHYP5NImkdIwjgllaEAd/FBE2+QJyTVZlUQC7y9ObagDMCUFaFbTS5QOLh5BOJDL5buEYFWG0IhoH47SC/pKeEOQH//uvoo27K9zvxTOQN1YOTrxCozmexMOsTIdhvU0dOnJDBrThSHKYLCeIokDOgUUT52FqDH51RoLoK3UkyGbMoq+M=
template:
metadata:
creationTimestamp: null
name: grafana-admin-secret
namespace: grafana
type: Opaque

@ -1,16 +0,0 @@
---
apiVersion: bitnami.com/v1alpha1
kind: SealedSecret
metadata:
creationTimestamp: null
name: grafana-auth
namespace: grafana
spec:
encryptedData:
client_secret: AgCEdC1/ERlPQyQP+bd9gcW33Yrvl4uRbx+RF5AY4vYAquOzxmLTygMl/WZlB5wlCE5idIHgto6/fUWVZrQbmfClRqsW2pFoddKQAtS9cQNXwMjLCm7e0lXk9GM9O3ZwktmklFbCu8XewHmefGHhoJ28vPxPMaINv1fM4zYKvNz5RHf0dJfTHgxb68wRYjAbE/eJpRcVE3a29Yw6Gfa8Mb+cFI7RTHvjuv9LBgWqM6b3qvvJ4wYR2WKuiQrnJ5xAtHpMAI/2R80qq151wlaZueDZ1PwjRBHURkmPTmwZnrMrmIugNge7Tpww+ArZlG9kDfSu1aTJidbXbcpN6fyt1qARTCYrBlbn60PTYLnPL/NObvMCpjS6DsYsYz7MJ7WoOupu46Ib5paZHmak+CilC6lb9LjJj4EKfRsagZmWT07JavhHBW/tqjB3GToccIz4fOAOdA9aU51J4wCL2ctp2SgzCEKe2EaBK/f9nDd9ASmmon9PDwRDVtG8yTukrNcZHNzodi09Af81DB0RNa36Z3Sjt5xu94paN+mjiOWGf2JduVEq+60NbPvDbPE9e1aVH3DdQcij2WGZaTE8dAGLSsLoOkIq3m2E+Mbk1Re1gI9H18xJM72ivb5uDe7pzReyvO5DY4Pfq8JgQhPxWcDq9ScmWS6Bb+jdCKytFq5NafSAl+akPbbwN+1GFu33if/P5D9I2TwOA8V1wyVU
template:
metadata:
creationTimestamp: null
name: grafana-auth
namespace: grafana
type: Opaque

@ -1,98 +0,0 @@
replicas: 1
## Create a headless service for the deployment
headlessService: false
## Expose the grafana service to be accessed from outside the cluster (LoadBalancer service).
## or access it from within the cluster (ClusterIP service). Set the service type and the port to serve it.
## ref: http://kubernetes.io/docs/user-guide/services/
##
service:
enabled: true
serviceMonitor:
## If true, a ServiceMonitor CRD is created for a prometheus operator
## https://github.com/coreos/prometheus-operator
##
enabled: false
envValueFrom:
AUTH_GRAFANA_CLIENT_SECRET:
secretKeyRef:
name: grafana-auth
key: client_secret
ingress:
enabled: false
# credentials
admin:
existingSecret: grafana-admin-secret
userKey: user
passwordKey: password
datasources:
datasources.yaml:
apiVersion: 1
datasources:
- name: Prometheus
type: prometheus
url: http://prometheus.monitoring.svc:9090
isDefault: true
- name: Thanos
type: prometheus
url: http://thanos-querier.monitoring.svc:10902
isDefault: false
- name: Loki
type: loki
url: http://loki.monitoring.svc:3100
isDefault: false
dashboardProviders:
dashboardproviders.yaml:
apiVersion: 1
providers:
- name: 'default'
orgId: 1
folder: ''
type: file
disableDeletion: false
editable: true
options:
path: /var/lib/grafana/dashboards/default
## Reference to external ConfigMap per provider. Use provider name as key and ConfigMap name as value.
## A provider dashboards must be defined either by external ConfigMaps or in values.yaml, not in both.
## ConfigMap data example:
##
## data:
## example-dashboard.json: |
## RAW_JSON
##
dashboardsConfigMaps:
default: grafana-dashboards
grafana.ini:
wal: true
default_theme: dark
unified_alerting:
enabled: false
analytics:
check_for_updates: false
server:
domain: grafana.kluster.moll.re
root_url: https://grafana.kluster.moll.re
auth.generic_oauth:
name: Authelia
enabled: true
allow_sign_up: true
client_id: grafana
client_secret: ${AUTH_GRAFANA_CLIENT_SECRET}
scopes: openid profile email groups
auth_url: https://auth.kluster.moll.re/api/oidc/authorization
token_url: https://auth.kluster.moll.re/api/oidc/token
api_url: https://auth.kluster.moll.re/api/oidc/authorization/userinfo
tls_skip_verify_insecure: true
auto_login: true
use_pkce: true
role_attribute_path: contains(groups[*], 'apps_admin') && 'Admin' || 'Editor'

@ -1,21 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: grafana
resources:
- namespace.yaml
- grafana.ingress.yaml
- grafana-admin.sealedsecret.yaml
- grafana-auth.sealedsecret.yaml
# grafana dashboards are provisioned from a git repository
# in the initial bootstrap of the app of apps, the git repo won't be available, so this sync will initially fail
- https://git.kluster.moll.re/remoll/grafana-dashboards//?timeout=10&ref=main
helmCharts:
- releaseName: grafana
name: grafana
repo: https://grafana.github.io/helm-charts
version: 9.0.0
valuesFile: grafana.values.yaml

@ -1,3 +1,4 @@
apiVersion: apps/v1
kind: Deployment
metadata:
@ -14,14 +15,14 @@ spec:
spec:
containers:
- name: homeassistant
image: homeassistant
image: homeassistant/home-assistant
ports:
- containerPort: 8123
env:
- name: TZ
value: Europe/Berlin
volumeMounts:
- name: config-dir
- name: config
mountPath: /config
resources:
requests:
@ -31,7 +32,6 @@ spec:
cpu: "2"
memory: "1Gi"
volumes:
- name: config-dir
- name: config
persistentVolumeClaim:
claimName: config
claimName: homeassistant-nfs

@ -1,4 +1,4 @@
apiVersion: traefik.io/v1alpha1
apiVersion: traefik.containo.us/v1alpha1
kind: IngressRoute
metadata:
name: homeassistant-ingress
@ -6,7 +6,7 @@ spec:
entryPoints:
- websecure
routes:
- match: Host(`home.kluster.moll.re`) && !Path(`/api/prometheus`)
- match: Host(`home.kluster.moll.re`)
middlewares:
- name: homeassistant-websocket
kind: Rule
@ -15,8 +15,9 @@ spec:
port: 8123
tls:
certResolver: default-tls
---
apiVersion: traefik.io/v1alpha1
apiVersion: traefik.containo.us/v1alpha1
kind: Middleware
metadata:
name: homeassistant-websocket
@ -26,3 +27,6 @@ spec:
X-Forwarded-Proto: "https"
# enable websockets
Upgrade: "websocket"

@ -9,10 +9,8 @@ resources:
- pvc.yaml
- service.yaml
- deployment.yaml
- servicemonitor.yaml
images:
- name: homeassistant
- name: homeassistant/home-assistant
newName: homeassistant/home-assistant
newTag: "2025.5"
newTag: "2024.2"

@ -1,11 +1,28 @@
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: homeassistant-nfs
spec:
# storageClassName: slow
capacity:
storage: "1Gi"
# volumeMode: Filesystem
accessModes:
- ReadWriteOnce
nfs:
path: /kluster/homeassistant
server: 192.168.1.157
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: config
name: homeassistant-nfs
spec:
storageClassName: "nfs-client"
storageClassName: ""
accessModes:
- ReadWriteOnce
resources:
requests:
storage: "1Gi"
volumeName: homeassistant-nfs

@ -2,12 +2,9 @@ apiVersion: v1
kind: Service
metadata:
name: homeassistant-web
labels:
app: homeassistant
spec:
selector:
app: homeassistant
ports:
- port: 8123
targetPort: 8123
name: http
targetPort: 8123

@ -1,13 +0,0 @@
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: homeassistant-servicemonitor
labels:
app: homeassistant
spec:
selector:
matchLabels:
app: homeassistant
endpoints:
- port: http
path: /api/prometheus

@ -1,4 +1,13 @@
apiVersion: traefik.io/v1alpha1
apiVersion: traefik.containo.us/v1alpha1
kind: Middleware
metadata:
name: stripprefix
spec:
stripPrefix:
prefixes:
- /api
---
apiVersion: traefik.containo.us/v1alpha1
kind: Middleware
metadata:
name: websocket
@ -9,21 +18,22 @@ spec:
# enable websockets
Upgrade: "websocket"
---
apiVersion: traefik.io/v1alpha1
apiVersion: traefik.containo.us/v1alpha1
kind: IngressRoute
metadata:
name: immich-ingressroute
name: immich-ingressroute
spec:
entryPoints:
- websecure
routes:
- match: Host(`immich.kluster.moll.re`)
kind: Rule
services:
- name: immich-server
port: 2283
middlewares:
- name: websocket
tls:
certResolver: default-tls
entryPoints:
- websecure
routes:
- match: Host(`immich.kluster.moll.re`)
kind: Rule
services:
- name: immich-server
port: 3001
passHostHeader: true
middlewares:
- name: websocket
tls:
certResolver: default-tls

@ -6,29 +6,19 @@ resources:
- pvc.yaml
- postgres.yaml
- postgres.sealedsecret.yaml
- servicemonitor.yaml
namespace: immich
helmCharts:
- name: immich
releaseName: immich
version: 0.9.2
version: 0.3.1
valuesFile: values.yaml
repo: https://immich-app.github.io/immich-charts
images:
- name: ghcr.io/immich-app/immich-machine-learning
newTag: v1.132.3
newTag: v1.95.1
- name: ghcr.io/immich-app/immich-server
newTag: v1.132.3
patches:
- path: patch-redis-pvc.yaml
target:
kind: StatefulSet
name: immich-redis-master
newTag: v1.95.1

@ -1,17 +0,0 @@
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: immich-redis-master
spec:
volumeClaimTemplates:
- apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: redis-data
spec:
storageClassName: nfs-client
accessModes:
- ReadWriteMany
resources:
requests:
storage: 2Gi

@ -1,3 +1,4 @@
apiVersion: postgresql.cnpg.io/v1
kind: Cluster
metadata:
@ -9,27 +10,16 @@ spec:
initdb:
owner: immich
database: immich
secret:
secret:
name: postgres-password
# Enable the VECTORS extension
postInitSQL:
- CREATE EXTENSION IF NOT EXISTS "vectors";
postgresql:
shared_preload_libraries:
- "vectors.so"
# Persistent storage configuration
storage:
size: 2Gi
pvcTemplate:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 2Gi
storageClassName: nfs-client
volumeMode: Filesystem
size: 1Gi
storageClass: nfs-client
monitoring:
enablePodMonitor: true

@ -1,11 +1,26 @@
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: immich-nfs
spec:
capacity:
storage: "50Gi"
accessModes:
- ReadWriteOnce
nfs:
path: /kluster/immich
server: 192.168.1.157
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: data
name: immich-nfs
spec:
storageClassName: "nfs-client"
storageClassName: ""
accessModes:
- ReadWriteOnce
resources:
requests:
storage: "100Gi"
storage: "50Gi"
volumeName: immich-nfs

@ -1,14 +0,0 @@
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: immich-service-monitor
spec:
endpoints:
- port: metrics-api
scheme: http
- port: metrics-ms
scheme: http
selector:
matchLabels:
app.kubernetes.io/name: server
app.kubernetes.io/service: immich-server

@ -22,21 +22,22 @@ env:
secretKeyRef:
name: postgres-password
key: password
IMMICH_WEB_URL: '{{ printf "http://%s-web:3000" .Release.Name }}'
IMMICH_MACHINE_LEARNING_URL: '{{ printf "http://%s-machine-learning:3003" .Release.Name }}'
IMMICH_METRICS: true
immich:
metrics:
# Enabling this will create the service monitors needed to monitor immich with the prometheus operator
enabled: true
persistence:
# Main data store for all photos shared between different components.
library:
# Automatically creating the library volume is not supported by this chart
# You have to specify an existing PVC to use
existingClaim: data
existingClaim: immich-nfs
# Dependencies
postgresql:
enabled: false
redis:
enabled: true
architecture: standalone
@ -51,6 +52,16 @@ server:
main:
enabled: false
microservices:
enabled: true
persistence:
geodata-cache:
enabled: true
size: 1Gi
# Optional: Set this to pvc to avoid downloading the geodata every start.
type: emptyDir
accessMode: ReadWriteMany
machine-learning:
enabled: true
persistence:

@ -1,42 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: kitchenowl
spec:
replicas: 1
selector:
matchLabels:
app: kitchenowl
template:
metadata:
labels:
app: kitchenowl
spec:
containers:
- name: kitchenowl
image: kitchenowl
ports:
- containerPort: 8080
env:
- name: TZ
value: Europe/Berlin
envFrom:
- configMapRef:
name: kitchenowl-config
- secretRef:
name: kitchenowl-oauth
volumeMounts:
- name: data
mountPath: /data
resources:
requests:
cpu: "50m"
memory: "100Mi"
limits:
cpu: "100m"
memory: "1Gi"
volumes:
- name: data
persistentVolumeClaim:
claimName: kitchenowl-data

@ -1,7 +0,0 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: kitchenowl-config
data:
FRONT_URL: https://kitchen.kluster.moll.re
DISABLE_USERNAME_PASSWORD_LOGIN: "true"

@ -1,19 +0,0 @@
---
apiVersion: bitnami.com/v1alpha1
kind: SealedSecret
metadata:
creationTimestamp: null
name: kitchenowl-oauth
namespace: kitchenowl
spec:
encryptedData:
JWT_SECRET_KEY: AgAclRIJS25ACVe4NqLQbAree6c6WpTBHnLpe3ZQJ0ScHG/EbW/ooABZj7y1ABAn/mCc+hBYXYHm81FNUfUtSuLKi2UlORbTCsfmisYH49WX0Lpku9LTM/8az9tjE0tjUUrJZcRUuJfdNJMDPQx7IPjUQ7sKk/exFnkPEbK98+AElXyHpPKXd9dxiCgll0n+ksbF9BDUR8KY8IB2Zvh4cXPww578qe/9XYnxLV8uY9K8KPvhl7NI40SIaL4PX8KmsDlBh1bpOR/OxhIwAGEZDQp/KROy6msrIOYW4SHM9nlSUSD4WvV8UjcbV1oNnYpE1usFOuxSfQlJ1zlFepKUv40JykyunvQv9nqVbEogsrS4o5N3gNEaB9yyFSHIlevp32LVpAuZu3cNplmT+Zg7+ODpCWIcVgmOAeapvB+X7H4ScbKVcYLAzrRFDtnS4Vo1M+RERhr0AuMU/tz0lGs99oRkCw2ZIg015R125u0VcRNqzgCtbBM5BFiKiP2kYrHn02Q6o5tRWxDQfrfb0mnfD5c/gM4+btlfM6DZMpr/l1kLlm8PDEpPGbkhK1XiAyJ4erHPDMLcmZXrSyxX9R1g8n7vnLnkqx5LkGmnltQI2FM7StxC6IrMlxY0nPnkq1lHhTz7yCpQJNXgfXZLVvov+f6jlD6WJhYHZCL/hIFfx3ybjGYZwJ0m84lH0OQJQw5dtsbPVqqoYZIPieqdRmHw7M7TTmFuQJXD94lZj5gsln1sMqs=
OIDC_CLIENT_ID: AgDOxWtGCiFrIP5aWHimrR6bu0uMS1/i1v1Kzo1lIR3j3zlw/Da2oCPNx1ZuhNAp9UMIs2euc8mtrWkyv4R6pcci+IxXiGzlQNBSkMfWu4DwhwlEdnVyCVehfE00t0ytBxX6NeLfS1b8JOtH9yo3e22fdaeTwn/iwGLNwi1BJxMmzk9pVp7jzXH09Zia8UvUmXw5GyGpIOIiGcIfaXkr1ZnY2l30jTw8eS7HaouzfWHWTpNXkMLN3qim4vs/B1Sgz/y9tUyVo/qMhWLdEcVklYHT7xHx03SPD7RtK+zdYZCqvDtj+tsdpYHt05zeGV+wflNQuiocjwP8TW7vhtbjKrf9lQIxB5CErju178ELOVrpsPBAYMgdEl7qZeKdSpydIwe3VbOg3XJ7O/Ps1KSnRYwrCvgE4ZCMm3geHyJxSiKRhBcuVYz5JkNd5ylD0Eq9NL5RCqJ4szL9NaGNPbzkvcdZzAnbTYFTzyqZ+XHX/BUFisXl5bKNHtaqAsOK8woGYnxJiawKRrwDUmHXU4RB3QiPCPhHSLU7OkvU+XDhyQqLa8bKEQzj9dUf4bX3Savk4noiRsXMYJznAlgMPo0Q4taxVIoyQHELYwIIdP5YRuw27B9wKUR7e2hhu4FTOEcyhwuFql3OuAjq8HJCDX6+COkL2WYFFxWBnbSCYEdzfbMBCHjrUUonijcQluU2VbaHODNMAvB16MRKapAW
OIDC_CLIENT_SECRET: AgAylnSUXwInlh/WvyCiFz+8asbCSZA6kk84Rt6l7bHVYw34c58lJHsZK2OvOIlHuaMe/ewnTqxVd0hI1Azl+wd/5NygMYlntKquq0vuzlhLrGc3u+0SOn9N2P6quA3slF9KR94CYsDx9ogy+EsEoA1yrsydB8S0g9W8syraR1MtpM0ZkcJ/D78OZ6qzyXUuBNAZc+iX/r96NvoMiGNYavgG7npOJh/pkKNYPuNkt4zpbAFjVyoCfgZd4V2nmZ6dhEVy8odW+jcsMn6OJ1OZVlPb1beq49lBEcaJqk83ZtKbq2evtBYHw9YAnENVq92ecenw/YL5LXUhOxeN0M9Amo99/O6pQwwrT1mtZqhTTeTIZTAxqmJKgyxGhE4DJUR/s71bc7K9hd2WvdAYnCyVC2uGa0MwXp4V7UuaN9GerldT8lcFxOpRnD7yroqVTqebjAJIkIinp5NNZ2ZP/LCiCwKKHHT19Pchn615WOPTofC6es/spIdQ8a1Nf2J5YzvRjsduFS55U6tMaC7cuV8kqKH9xTTf/sDHt+68wVEAO9koAe1zpO+zR2Pq3VuCnvcDGIwXopXjvyjfujEEhEWZl51PVJLZqtkP5Wg2wHvlgjJBbbIGTrqh4xa9pK7wLDM2hUFx1q/YKqwfP0EGVTc96G8Wermj0DtIqclqFLr54DtxVe+Rr8J4edG6YQ26/seYsrZ1Oq2PejHQt8u9EzQYAtYYlBsw2ujCWys6KrbhaVr3
OIDC_ISSUER: AgA2JPd5axkL5YIRA95qm/iH8wgM2J0AjKjgGClWabYJ3UKIk0hi/L/zR+1Pw9Z+6amYXj7Q0FxLqCcNYG+H5ABxnqUi7Gl8gvfVbegaO3q5QiO27g18RMDssNHDSun8PPaxHBvBD68hxgsaXntu8sZavCGdwEK0TzLJi7eH/4jtHlofzfYgaCsGqeOBgvs/q87PVJ/qxazXlY9e7abbRAKl9ZMY7Wga58/IU2HhWwYMvI53yQyGMcKf3XiI9iNHgIcj1+TmlgQo+PRKyopNfzgFbey7on8woQXphY+ioqQ0hyworpxAVoWlvzKKopt1xBDr4zxzkzbWyxtjwPVOOH3iyenZz8tZa/JkNYWxkWHbh0KCs9yUIji3D3shQOFM/NtE17THsQm3NgpZ2lg9ET1v6uXqwfOLiQ+J1JQLwNFnYeruH2lK4EGt2nDCq2VycOIjW4kMpiJ4LiT8gap9HwYjTpAn+opicYn5e9fmpgiHdMPvrsG1m9edg0cbwdSpEelliHnAUfKsxMV2e1fLsga6yrhBLSXIQs8rbURRa6wqVvGoLB86a9Q5Rm94Jfm0Sa9v5LMGRYvqO5LbLrjrR8e/2r17pHQE8ynMQCAW1yVTe09FcgRwYhDUohfThtjIh16sdoC97eUel7fo/POt3atP69JsCIBZstprhVtBIBssmavpIotVqi8F2/yUkhrZR26mH3gsOxkNTEk6XzHHtJRu0cU+BmObTvYgMi3DHg==
template:
metadata:
creationTimestamp: null
name: kitchenowl-oauth
namespace: kitchenowl
type: Opaque

@ -1,17 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- namespace.yaml
- pvc.yaml
- kitchenowl-oauth.sealedsecret.yaml
- kitchenowl-config.configmap.yaml
- deployment.yaml
- service.yaml
- ingress.yaml
namespace: kitchenowl
images:
- name: kitchenowl
newName: tombursch/kitchenowl
newTag: v0.6.15

@ -1,10 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: kitchenowl-web
spec:
selector:
app: kitchenowl
ports:
- port: 8080
targetPort: 8080

@ -1,40 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: linkding
spec:
replicas: 1
selector:
matchLabels:
app: linkding
template:
metadata:
labels:
app: linkding
spec:
containers:
- name: linkding
image: linkding
ports:
- containerPort: 9090
env:
- name: TZ
value: Europe/Berlin
envFrom:
- secretRef:
name: oauth-config
volumeMounts:
- name: linkding-data
mountPath: /etc/linkding/data
resources:
requests:
cpu: "100m"
memory: "200Mi"
limits:
cpu: "1"
memory: "1Gi"
volumes:
- name: linkding-data
persistentVolumeClaim:
claimName: data

@ -1,17 +0,0 @@
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: linkding-ingressroute
spec:
entryPoints:
- websecure
routes:
- match: Host(`linkding.kluster.moll.re`)
kind: Rule
services:
- name: linkding-web
port: 9090
tls:
certResolver: default-tls

@ -1,16 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- namespace.yaml
- ingress.yaml
- service.yaml
- pvc.yaml
- deployment.yaml
- oauth.sealedsecret.yaml
namespace: linkding
images:
- name: linkding
newName: sissbruecker/linkding
newTag: "1.39.1"

@ -1,22 +0,0 @@
---
apiVersion: bitnami.com/v1alpha1
kind: SealedSecret
metadata:
creationTimestamp: null
name: oauth-config
namespace: linkding
spec:
encryptedData:
LD_ENABLE_OIDC: AgBfiwAdh1RD8gpzzVnF6y3c8kn5NFbms0T74pmj1aaFD0uQd/dqIxrvZxiBMzgqdrrVUCxRlpG3tiJYMpDEVXQPwRj8uuAizYFEP4uuGFpCtErjb+Y+aOdXSPRCVPbIqROiE+qswNT5KkJm6rR3FPckWipiQdzfpoq+G00MARKqzo4sNrx8+JzgoBFbS/tVPej3VdmDy8iceeVCQ2dJs+roRw+jOfCxqiUjHH5SyoZPnCTlmTZcwr3F9gBGEWGyEl0FNVgL/ku7WEL2COeIIE/2r4RWPqZU4DkPtIv/J4Dosxuw7EQnC5kzTgJGB/TF9Mn/FcS7iYQZKATvWzjC/cTdwZ+aG1qZGz87wM4fxQ05GhgEUkVOlGgxlzxk9BUL3l9Xsaa1KTivaQ4dvB/jm3j12b9rPfO2CXB4jPK7Pk1vKbr5icPXa8dyFY4hiJ1PYh/295a7ZM02n3ao4uHw8KSaxK9GxsCFBxr59q+6ZpjOGnTHZvKGfJHn3jEcIT8k89J5gqrlWjLqR50TEJj3jPZ3QDPN/u1Bf0wAoIOGK7jpnwIXQpacudU2pxZNWC2KtdZbcF9QzuOqKVFABmabdCkBtLa34cyZdweBaCfLRg1Ic0yxrWGzctnwM0Rfd+OzsEpLB7GHKllV597znF5QiK3rQrl5TmgHf0jC1OmMsBkNYCRo2AhYsBN5nli0dgD1y028rsW6
OIDC_OP_AUTHORIZATION_ENDPOINT: AgBYHGF/D1tzWRy2issyi3oZjYK/m5cuwdlHuqboPBBHybfTwCkvSfWAL8FSGAgoLYWZEwwoGr5GDSCnAWfhoYwVgCxWw7y4IohCkUSrwbzTVwJxoRTeWIilzwAdH2zX3mZJVHTYCQiLTCjv+MPRsF5FZP4+STtgfecK5Fjgb3sIfZa/yazbN6jf7bdaR91yX2iddeZn7B80aQvu3jOkujPX9yCJz/H+1BN35CtC7KLhFrKgw6uvfQi3g90On5uBHwrC53ve4rEvcOgi575WBA3fDYW4RxNb8yxxocQOuqNRa3jUa/RIjOnX9tFoG/QIXY89DUSsgpAZFWIlV4np4KOe4V+xWT1khz2TPRDaakDumkNmE/sAjpYfGX13+88A0N/hO9o2aXJhEXOFZ5eulyAwDdIodMg6lQTG43OhdSZLFfR5TrnzkVR1pIzwPMutgf8jai4dyYK+SV+oyGeZosNpD484gzhjMGkz8Djs7/cADcvJVmGfWWYvjenlfvFC5lK7F+355o9L38fUeG2lhRw8NsPWuzsiH7m6GaSEfEPcdqj/AohArtUnf9cfF04tocPwCuNLeIgR42W+cfapD9g14sN9v4Z7g/5IQSqL2EhNR9xQMoIT9BkBEhI9JR518Yds/Z89Lg0HsMw6UCrkg6um1If80gjHyRa7JrfxzepKifWNn9/suHlhecaeXcSOKMCCFYDtHyvO4/gCe8/PXjyVc/SUZNBDJsDOikVvVJfKqDlEkimfl+BISzkMv5ALxGDHMRc=
OIDC_OP_JWKS_ENDPOINT: AgAg+6Ty8o++uc+UfaVNtJviu1A771rtazn9pj7KafqgIx1xNuPtUBwGEScfku8glUy0bS8r7MyMNlUe3sIYfnDKQmmHBVHFoiJ0IjLZP/pV51A4fT2DUrFv9pnIemqjFD2jew5ToXhuHwUc+Y3LvX8M8aPpB/J+DjIIvgKQe2faHyWt4c24jOZaH56xJhI114LIXD0A7Qvq4O7UfpIUNfYSMojTH7VURptL18Mh1YRKJmil1PmRIstX9Smr3ltAG9Rw032v9ISdDmV+OyuhPo1Wk3AU85RdOQ9hZGMSFXQXFEqQUp/N76n875KDUMT4W57//YGFRUrm8w4oB+PlkjGV2pG7DNYVxUZEmi5UXwY8fTI+KljAZHSk/YOSku+gc75hWYXX6s3g/R6/IWmr9sV5O5N0bc3guQ96nnRmjuzb3HebM0hPfPS+6/xn29erTDETs1bvfCQ9oWNMomDsH4FVz5gC+zwrUvUD3Af3TVsU5g+lfOE83+pmMMWcJFn8Z0uldud0jR27o/ftKgBDmUaGi3zCQWJrYxtXehBy9fo0K7QpbYnLHvNnXVX9fGQ0PZNMc8N0wYZUDuhOv114lqfbVR5dHYoger4iT0xC+SHcWGgvyjqb4YI7bfnY+bnh8TLfuI/ttw1l7/ev79/yvjrtgPuBwN9ygUxENLR2Ur1Cc/u72d+ST4NIg5esth+y9Z2JdP/3+nlYctnyakWhEkUyBPK+5Iyacv29t1bMXoesB6Ub5WsXaw==
OIDC_OP_TOKEN_ENDPOINT: AgBRpyDYbQlq7dcqJ2Gd+CfSRZRgvpuUsIngAXX85dt0dChYhQ/YvnFl9r3GqsXNBrWQBa0uE7t+uXxo+oobjgfSibq28kQBL92PM/s7OctINTJBN3q0Gdv43vnliS69/WR21kZkLuAmPne1nL+FZJXavIUF8N6CX3gKb4WMdv+Rl4AAmUo9vsB1C7mxDcS1CppUeJ8KdF5qkb8Xag28Lv2rDA7W9Ne+tNGFi4q/UWqdU76iUxrHu/Kfg6RD0rYlOaW+0b3A5Rvj5oU8ho1Z/eIsA9NaZNYBQjtGAk9fiD2EB9IcFi6kYv5zGZsRcPTzMv/35Wh+lV8I3mDRGcfkmzQsZ8Hcfx7c3zpemZqvY7LMgrvO5AatWKYZUFPsTcaT/mVFmAaVuq5PqeuCQhqekug3rdQxxf2n1cWMMnbptf4g19oTFKx3FtXImpPk97Iv9RbMATKHE/nnfin5/7PtQNn9VBBW785hzzB7cs+IiEzdjGu7MnFlKaGEoS94eZtgLSEmpIMeXFW6V0rXHQ6J+CUjBjiEpAh6LKsh4De+IrWFuzAYH0jwowuY2r4VX3jx+Yv8SFEJ5AfDYbvx8qX1zy1dGfsQvrAai298QCOTizLmeuJLMIC0qlNLZWrYhf8XzF2/N8/bC0R0Pyr+6Jxo8HrtHyFcnl8ckHycWosCOkQmQIbX+vOffOpQ6vYUkHM4MqIAiTl6G+bxjtxBZUTXvqX1sKCEO7pccL8gJZQ+ICN9nP785JAd4eW2JeGW
OIDC_OP_USER_ENDPOINT: AgBD4amxFPHYQR6JWjNTsPM63NNI+f9DgZ9+whv5f3bUo7KNtB05vAYYQtAdMYJR+5499S6t0BYwOi4cNxQVJyga1bKogqih3EI/QlJeLvoFvDFj2wEMmCkT5MS+qemtPJwXK7JTHzUFX74b+S/a30/mvWKkMRHzc+G+E678/RDIScFJFssfJVMJB4Kp9T4y+v9jJAKZRixkx2lOR48JR5umXwxm+xQGaexeJOv0MZY4Am4P/nJcWM+Gk1Ka/ih+VAJRLsu9dgvShytQ1OHaAPMRu6H7Wb2bLrzpz4rTcGbZA3aTgHfItjWQpBV3fhNvyNW8QRbRFSyxHBC4snqW5Bl77u4KdMicwL/0x1JhwP6cx+/TVEyZ2n+5Y5PyOW+E7LXtiZGZ/eRw3xVLxvrFDwY0EBMqAUWHyQjwlZLctjxgBud5XDtT4athaNq5hnCELMepRI05kF+o5ECxKWoFXN5rXrwrlcgftCV9PTZbP8pkxyau8O28C4YX07J65G9ntV3VIg9tAXww6X68YHIQEdAgjNOI1soWt15q582OlhXBTTKAf/QU7mrSCn5FH0Q4Z7VLIsJwMz0orGB69Qxo/lLF7z+pQEE4PArceoLi7cffQV/+VzUBOACDdfKFiN3LTDuoMm6lvkAOdmzXccmXlEGvNEwLR+8Ac26Ld4fAnEFSLIJpwQHheIQRY19qbN5+cOa+RmuOroxWWA2P/8uSlp4kXVvAmPJOt8PprI3h8iRG5zBv+J8kE8wpI1scJdDr
OIDC_RP_CLIENT_ID: AgCOGuVP8BVfAT5FRmmJLptdv+8vtppOgpzJ2LXU3vR3sjQE4MLKgWwoAyrnkAa2IMrsmkg5+pyHBDlp1AMba3OTVZmhyEVLrFe/vCiL45hEaW2l6kiwlIW3nZnoJlGG2Ugj4SX8YCQGlyr19vEFcPdieWlKpHfda/EP4xYhXMSzXCxFCtT7uGjgnBlrV0uXeFCezYGzvmA4SbDli7fvGv5H85cwgUlMdSn2ZIP3DAxQ8gP0ETF6KaOK5QVP0e/7kw9SK3oo4XHE2c8AjHLFFnmz/uf07+7LuOunSqunolbVy+Lm2mHHnzx+0PBmMYvl7FHY/TkBZaVjVaZtrELbFYaraop8iE6hFMvOYNa/1BFY1x0aeRfPb0jt5IPnuebllnEh4P7JUQxef4Bqbjp8u7P+uOBWnQbeMEp5F3rWE8qy09NnjsKPz87Jw9pb0aPgXWLKVHjJpArhcb6gTJLESCw9kgT+c0pYI0s3BYmwNkJ+6wxflvTLb5z3YyY5/+8/s3PgDz6Hj4tyA8tBru/KQwnBVMw0GhF5YwlZ4SYHPwVX+ZMj9UQc6swNsrxKLqs5Ci7KjvzEDUJ4/aW+rv8naoCiebIJrbmLB8iSqNGh90s1S9BJsQaWXbKYday3spt0eg+tH/iQgAnUAjd9RK3TxkkmWVjmeUc/rOltsbaIvy6/WdyKnF8/f9B03Pm5eal73yC7reFyGYiXvA==
OIDC_RP_CLIENT_SECRET: AgA+Q9osGcUgiGsyPfHph3vGiNBjmL7pK3JlaE4PoI+eGsvb+3Ozf9KnfHSMm2R0fq/eukFn6i25MZ/mKYliVSIcjWbnGDFSysiCAwirKTUXoFUo87zmguNUPr8Rr45m1AIaJb31T4MKeFQRHSg715rs/6fKlbejUWUBZuMTN1DXkWr+00atj0JmmZPScSfRmwKNsHnoZCUWFE/DaFChpoCU4fCp5vL9P2LcdzsY84vue8y7Trg0e/LpEi6+DzSoxurE9jwjoUauXmZnOSW3jFgy+u5c9Oa3RC+IB/UUsmHI8eUVOXGdQsSFufrAMd1uyPRa2g+aCX0zX5boZC9dTGqaT+D/6xXnMFsvw5K+K4Y/QZ+j9ZHx0232sPCFVi2HaYHV51c2Xi6tizy+/0J27/4gvaVREXw94pmsaI5rt9sNDHoKw6LwkPO8heqkYzfIWhAg5vKswDn/MWAVTIzIubdTvrDjVWxoJ2FM9sCUsai/X7rj6QUiVTgbWuYYO0hMrT2Q9y05n68hWOmpqmna4/JGIE+N48h0/wAHLsLeV4ZNLJdJhQovOSkYsB5FIYPTuihFASLhE+uf8VBwSfYlwcWORz7dssAYvCJAx3huYZCSHrT4WPtLt4Ok/IuplXvVbZ/d6NISqE/g+BiNmN7r4DZQ/QbN4TD9t6BQESKkTqPHYIiVtZHdalgPFFSS8JP2wv50mSh/imjlX51ruHGQbVbIfZnfJGwLEL0KN/Zn3BMrNtMgCqEs3itnGQQnBchQ
template:
metadata:
creationTimestamp: null
name: oauth-config
namespace: linkding
type: Opaque

@ -1,13 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: linkding-web
labels:
app: linkding
spec:
selector:
app: linkding
ports:
- port: 9090
targetPort: 9090
name: http

@ -0,0 +1,30 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- namespace.yaml
- postgres.yaml
- synapse.deployment.yaml
- synapse.service.yaml
- synapse.configmap.yaml
- synapse.ingress.yaml
- postgres-credentials.secret.yaml
- mautrix.pvc.yaml
- mautrix-telegram.statefulset.yaml
- mautrix-telegram.configmap.yaml
- mautrix-whatsapp.statefulset.yaml
namespace: matrix
images:
- name: mautrix-telegram
newName: dock.mau.dev/mautrix/telegram
newTag: "v0.15.1"
- name: mautrix-whatsapp
newName: dock.mau.dev/mautrix/whatsapp
newTag: "v0.10.5"
- name: synapse
newName: ghcr.io/element-hq/synapse
newTag: "v1.100.0"

@ -0,0 +1,511 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: mautrix-telegram
data:
config.yaml: |
# Homeserver details
homeserver:
# The address that this appservice can use to connect to the homeserver.
address: http://synapse:8448
# The domain of the homeserver (for MXIDs, etc).
domain: matrix.kluster.moll.re
# Whether or not to verify the SSL certificate of the homeserver.
# Only applies if address starts with https://
verify_ssl: false
# What software is the homeserver running?
# Standard Matrix homeservers like Synapse, Dendrite and Conduit should just use "standard" here.
software: standard
# Number of retries for all HTTP requests if the homeserver isn't reachable.
http_retry_count: 4
# The URL to push real-time bridge status to.
# If set, the bridge will make POST requests to this URL whenever a user's Telegram connection state changes.
# The bridge will use the appservice as_token to authorize requests.
status_endpoint: null
# Endpoint for reporting per-message status.
message_send_checkpoint_endpoint: null
# Whether asynchronous uploads via MSC2246 should be enabled for media.
# Requires a media repo that supports MSC2246.
async_media: false
# Application service host/registration related details
# Changing these values requires regeneration of the registration.
appservice:
# The address that the homeserver can use to connect to this appservice.
address: http://mautrix-telegram:29318
# When using https:// the TLS certificate and key files for the address.
tls_cert: false
tls_key: false
# The hostname and port where this appservice should listen.
hostname: 0.0.0.0
port: 29317
# The maximum body size of appservice API requests (from the homeserver) in mebibytes
# Usually 1 is enough, but on high-traffic bridges you might need to increase this to avoid 413s
max_body_size: 1
# The full URI to the database. SQLite and Postgres are supported.
# Format examples:
# SQLite: sqlite:filename.db
# Postgres: postgres://username:password@hostname/dbname
database: sqlite:mautrix-telegram.db
# The unique ID of this appservice.
id: telegram
# Username of the appservice bot.
bot_username: telegrambot
# Display name and avatar for bot. Set to "remove" to remove display name/avatar, leave empty
# to leave display name/avatar as-is.
bot_displayname: Telegram bridge bot
bot_avatar: mxc://maunium.net/tJCRmUyJDsgRNgqhOgoiHWbX
# Whether or not to receive ephemeral events via appservice transactions.
# Requires MSC2409 support (i.e. Synapse 1.22+).
# You should disable bridge -> sync_with_custom_puppets when this is enabled.
ephemeral_events: true
# Authentication tokens for AS <-> HS communication. Autogenerated; do not modify.
as_token: "This value is generated when generating the registration"
hs_token: "This value is generated when generating the registration"
# Bridge config
bridge:
# Localpart template of MXIDs for Telegram users.
# {userid} is replaced with the user ID of the Telegram user.
username_template: "telegram_{userid}"
# Localpart template of room aliases for Telegram portal rooms.
# {groupname} is replaced with the name part of the public channel/group invite link ( https://t.me/{} )
alias_template: "telegram_{groupname}"
# Displayname template for Telegram users.
# {displayname} is replaced with the display name of the Telegram user.
displayname_template: "{displayname} (Telegram)"
# Set the preferred order of user identifiers which to use in the Matrix puppet display name.
# In the (hopefully unlikely) scenario that none of the given keys are found, the numeric user
# ID is used.
#
# If the bridge is working properly, a phone number or an username should always be known, but
# the other one can very well be empty.
#
# Valid keys:
# "full name" (First and/or last name)
# "full name reversed" (Last and/or first name)
# "first name"
# "last name"
# "username"
# "phone number"
displayname_preference:
- full name
- username
- phone number
# Maximum length of displayname
displayname_max_length: 100
# Remove avatars from Telegram ghost users when removed on Telegram. This is disabled by default
# as there's no way to determine whether an avatar is removed or just hidden from some users. If
# you're on a single-user instance, this should be safe to enable.
allow_avatar_remove: false
# Should contact names and profile pictures be allowed?
# This is only safe to enable on single-user instances.
allow_contact_info: false
# Maximum number of members to sync per portal when starting up. Other members will be
# synced when they send messages. The maximum is 10000, after which the Telegram server
# will not send any more members.
# -1 means no limit (which means it's limited to 10000 by the server)
max_initial_member_sync: 100
# Maximum number of participants in chats to bridge. Only applies when the portal is being created.
# If there are more members when trying to create a room, the room creation will be cancelled.
# -1 means no limit (which means all chats can be bridged)
max_member_count: -1
# Whether or not to sync the member list in channels.
# If no channel admins have logged into the bridge, the bridge won't be able to sync the member
# list regardless of this setting.
sync_channel_members: false
# Whether or not to skip deleted members when syncing members.
skip_deleted_members: true
# Whether or not to automatically synchronize contacts and chats of Matrix users logged into
# their Telegram account at startup.
startup_sync: false
# Number of most recently active dialogs to check when syncing chats.
# Set to 0 to remove limit.
sync_update_limit: 0
# Number of most recently active dialogs to create portals for when syncing chats.
# Set to 0 to remove limit.
sync_create_limit: 15
# Should all chats be scheduled to be created later?
# This is best used in combination with MSC2716 infinite backfill.
sync_deferred_create_all: false
# Whether or not to sync and create portals for direct chats at startup.
sync_direct_chats: false
# The maximum number of simultaneous Telegram deletions to handle.
# A large number of simultaneous redactions could put strain on your homeserver.
max_telegram_delete: 10
# Whether or not to automatically sync the Matrix room state (mostly unpuppeted displaynames)
# at startup and when creating a bridge.
sync_matrix_state: true
# Allow logging in within Matrix. If false, users can only log in using login-qr or the
# out-of-Matrix login website (see appservice.public config section)
allow_matrix_login: true
# Whether or not to make portals of publicly joinable channels/supergroups publicly joinable on Matrix.
public_portals: false
# Whether or not to use /sync to get presence, read receipts and typing notifications
# when double puppeting is enabled
sync_with_custom_puppets: false
# Whether or not to update the m.direct account data event when double puppeting is enabled.
# Note that updating the m.direct event is not atomic (except with mautrix-asmux)
# and is therefore prone to race conditions.
sync_direct_chat_list: false
# Servers to always allow double puppeting from
double_puppet_server_map:
example.com: https://example.com
# Allow using double puppeting from any server with a valid client .well-known file.
double_puppet_allow_discovery: false
# Shared secrets for https://github.com/devture/matrix-synapse-shared-secret-auth
#
# If set, custom puppets will be enabled automatically for local users
# instead of users having to find an access token and run `login-matrix`
# manually.
# If using this for other servers than the bridge's server,
# you must also set the URL in the double_puppet_server_map.
login_shared_secret_map:
example.com: foobar
# Set to false to disable link previews in messages sent to Telegram.
telegram_link_preview: true
# Whether or not the !tg join command should do a HTTP request
# to resolve redirects in invite links.
invite_link_resolve: false
# Send captions in the same message as images. This will send data compatible with both MSC2530 and MSC3552.
# This is currently not supported in most clients.
caption_in_message: false
# Maximum size of image in megabytes before sending to Telegram as a document.
image_as_file_size: 10
# Maximum number of pixels in an image before sending to Telegram as a document. Defaults to 4096x4096 = 16777216.
image_as_file_pixels: 16777216
# Enable experimental parallel file transfer, which makes uploads/downloads much faster by
# streaming from/to Matrix and using many connections for Telegram.
# Note that generating HQ thumbnails for videos is not possible with streamed transfers.
# This option uses internal Telethon implementation details and may break with minor updates.
parallel_file_transfer: false
# Whether or not created rooms should have federation enabled.
# If false, created portal rooms will never be federated.
federate_rooms: true
# Should the bridge send all unicode reactions as custom emoji reactions to Telegram?
# By default, the bridge only uses custom emojis for unicode emojis that aren't allowed in reactions.
always_custom_emoji_reaction: false
# Settings for converting animated stickers.
animated_sticker:
# Format to which animated stickers should be converted.
# disable - No conversion, send as-is (gzipped lottie)
# png - converts to non-animated png (fastest),
# gif - converts to animated gif
# webm - converts to webm video, requires ffmpeg executable with vp9 codec and webm container support
# webp - converts to animated webp, requires ffmpeg executable with webp codec/container support
target: gif
# Should video stickers be converted to the specified format as well?
convert_from_webm: false
# Arguments for converter. All converters take width and height.
args:
width: 256
height: 256
fps: 25 # only for webm, webp and gif (2, 5, 10, 20 or 25 recommended)
# Settings for converting animated emoji.
# Same as animated_sticker, but webm is not supported as the target
# (because inline images can only contain images, not videos).
animated_emoji:
target: webp
args:
width: 64
height: 64
fps: 25
# # End-to-bridge encryption support options.
# #
# # See https://docs.mau.fi/bridges/general/end-to-bridge-encryption.html for more info.
# encryption:
# # Allow encryption, work in group chat rooms with e2ee enabled
# allow: false
# # Default to encryption, force-enable encryption in all portals the bridge creates
# # This will cause the bridge bot to be in private chats for the encryption to work properly.
# default: false
# # Whether to use MSC2409/MSC3202 instead of /sync long polling for receiving encryption-related data.
# appservice: false
# # Require encryption, drop any unencrypted messages.
# require: false
# # Enable key sharing? If enabled, key requests for rooms where users are in will be fulfilled.
# # You must use a client that supports requesting keys from other users to use this feature.
# allow_key_sharing: false
# # Options for deleting megolm sessions from the bridge.
# delete_keys:
# # Beeper-specific: delete outbound sessions when hungryserv confirms
# # that the user has uploaded the key to key backup.
# delete_outbound_on_ack: false
# # Don't store outbound sessions in the inbound table.
# dont_store_outbound: false
# # Ratchet megolm sessions forward after decrypting messages.
# ratchet_on_decrypt: false
# # Delete fully used keys (index >= max_messages) after decrypting messages.
# delete_fully_used_on_decrypt: false
# # Delete previous megolm sessions from same device when receiving a new one.
# delete_prev_on_new_session: false
# # Delete megolm sessions received from a device when the device is deleted.
# delete_on_device_delete: false
# # Periodically delete megolm sessions when 2x max_age has passed since receiving the session.
# periodically_delete_expired: false
# # Delete inbound megolm sessions that don't have the received_at field used for
# # automatic ratcheting and expired session deletion. This is meant as a migration
# # to delete old keys prior to the bridge update.
# delete_outdated_inbound: false
# # What level of device verification should be required from users?
# #
# # Valid levels:
# # unverified - Send keys to all device in the room.
# # cross-signed-untrusted - Require valid cross-signing, but trust all cross-signing keys.
# # cross-signed-tofu - Require valid cross-signing, trust cross-signing keys on first use (and reject changes).
# # cross-signed-verified - Require valid cross-signing, plus a valid user signature from the bridge bot.
# # Note that creating user signatures from the bridge bot is not currently possible.
# # verified - Require manual per-device verification
# # (currently only possible by modifying the `trust` column in the `crypto_device` database table).
# verification_levels:
# # Minimum level for which the bridge should send keys to when bridging messages from Telegram to Matrix.
# receive: unverified
# # Minimum level that the bridge should accept for incoming Matrix messages.
# send: unverified
# # Minimum level that the bridge should require for accepting key requests.
# share: cross-signed-tofu
# # Options for Megolm room key rotation. These options allow you to
# # configure the m.room.encryption event content. See:
# # https://spec.matrix.org/v1.3/client-server-api/#mroomencryption for
# # more information about that event.
# rotation:
# # Enable custom Megolm room key rotation settings. Note that these
# # settings will only apply to rooms created after this option is
# # set.
# enable_custom: false
# # The maximum number of milliseconds a session should be used
# # before changing it. The Matrix spec recommends 604800000 (a week)
# # as the default.
# milliseconds: 604800000
# # The maximum number of messages that should be sent with a given a
# # session before changing it. The Matrix spec recommends 100 as the
# # default.
# messages: 100
# # Disable rotating keys when a user's devices change?
# # You should not enable this option unless you understand all the implications.
# disable_device_change_key_rotation: false
# Whether to explicitly set the avatar and room name for private chat portal rooms.
# If set to `default`, this will be enabled in encrypted rooms and disabled in unencrypted rooms.
# If set to `always`, all DM rooms will have explicit names and avatars set.
# If set to `never`, DM rooms will never have names and avatars set.
private_chat_portal_meta: default
# Disable generating reply fallbacks? Some extremely bad clients still rely on them,
# but they're being phased out and will be completely removed in the future.
disable_reply_fallbacks: false
# Should cross-chat replies from Telegram be bridged? Most servers and clients don't support this.
cross_room_replies: false
# Whether or not the bridge should send a read receipt from the bridge bot when a message has
# been sent to Telegram.
delivery_receipts: false
# Whether or not delivery errors should be reported as messages in the Matrix room.
delivery_error_reports: false
# Should errors in incoming message handling send a message to the Matrix room?
incoming_bridge_error_reports: false
# Whether the bridge should send the message status as a custom com.beeper.message_send_status event.
message_status_events: false
# Set this to true to tell the bridge to re-send m.bridge events to all rooms on the next run.
# This field will automatically be changed back to false after it,
# except if the config file is not writable.
resend_bridge_info: false
# When using double puppeting, should muted chats be muted in Matrix?
mute_bridging: false
# When using double puppeting, should pinned chats be moved to a specific tag in Matrix?
# The favorites tag is `m.favourite`.
pinned_tag: null
# Same as above for archived chats, the low priority tag is `m.lowpriority`.
archive_tag: null
# Whether or not mute status and tags should only be bridged when the portal room is created.
tag_only_on_create: true
# Should leaving the room on Matrix make the user leave on Telegram?
bridge_matrix_leave: true
# Should the user be kicked out of all portals when logging out of the bridge?
kick_on_logout: true
# Should the "* user joined Telegram" notice always be marked as read automatically?
always_read_joined_telegram_notice: true
# Should the bridge auto-create a group chat on Telegram when a ghost is invited to a room?
# Requires the user to have sufficient power level and double puppeting enabled.
create_group_on_invite: true
# Settings for backfilling messages from Telegram.
backfill:
# Allow backfilling at all?
enable: true
# Whether or not to enable backfilling in normal groups.
# Normal groups have numerous technical problems in Telegram, and backfilling normal groups
# will likely cause problems if there are multiple Matrix users in the group.
normal_groups: false
# If a backfilled chat is older than this number of hours, mark it as read even if it's unread on Telegram.
# Set to -1 to let any chat be unread.
unread_hours_threshold: 720
# Forward backfilling limits.
#
# Using a negative initial limit is not recommended, as it would try to backfill everything in a single batch.
forward_limits:
# Number of messages to backfill immediately after creating a portal.
initial:
user: 50
normal_group: 100
supergroup: 10
channel: 10
# Number of messages to backfill when syncing chats.
sync:
user: 100
normal_group: 100
supergroup: 100
channel: 100
# Timeout for forward backfills in seconds. If you have a high limit, you'll have to increase this too.
forward_timeout: 900
# Settings for incremental backfill of history. These only apply to Beeper, as upstream abandoned MSC2716.
incremental:
# Maximum number of messages to backfill per batch.
messages_per_batch: 100
# The number of seconds to wait after backfilling the batch of messages.
post_batch_delay: 20
# The maximum number of batches to backfill per portal, split by the chat type.
# If set to -1, all messages in the chat will eventually be backfilled.
max_batches:
# Direct chats
user: -1
# Normal groups. Note that the normal_groups option above must be enabled
# for these to be backfilled.
normal_group: -1
# Supergroups
supergroup: 10
# Broadcast channels
channel: -1
# Overrides for base power levels.
initial_power_level_overrides:
user: {}
group: {}
# Whether to bridge Telegram bot messages as m.notices or m.texts.
bot_messages_as_notices: true
bridge_notices:
# Whether or not Matrix bot messages (type m.notice) should be bridged.
default: false
# List of user IDs for whom the previous flag is flipped.
# e.g. if bridge_notices.default is false, notices from other users will not be bridged, but
# notices from users listed here will be bridged.
exceptions: []
# An array of possible values for the $distinguisher variable in message formats.
# Each user gets one of the values here, based on a hash of their user ID.
# If the array is empty, the $distinguisher variable will also be empty.
relay_user_distinguishers: ["\U0001F7E6", "\U0001F7E3", "\U0001F7E9", "⭕️", "\U0001F536", "⬛️", "\U0001F535", "\U0001F7E2"]
# The formats to use when sending messages to Telegram via the relay bot.
# Text msgtypes (m.text, m.notice and m.emote) support HTML, media msgtypes don't.
#
# Available variables:
# $sender_displayname - The display name of the sender (e.g. Example User)
# $sender_username - The username (Matrix ID localpart) of the sender (e.g. exampleuser)
# $sender_mxid - The Matrix ID of the sender (e.g. @exampleuser:example.com)
# $distinguisher - A random string from the options in the relay_user_distinguishers array.
# $message - The message content
message_formats:
m.text: "$distinguisher <b>$sender_displayname</b>: $message"
m.notice: "$distinguisher <b>$sender_displayname</b>: $message"
m.emote: "* $distinguisher <b>$sender_displayname</b> $message"
m.file: "$distinguisher <b>$sender_displayname</b> sent a file: $message"
m.image: "$distinguisher <b>$sender_displayname</b> sent an image: $message"
m.audio: "$distinguisher <b>$sender_displayname</b> sent an audio file: $message"
m.video: "$distinguisher <b>$sender_displayname</b> sent a video: $message"
m.location: "$distinguisher <b>$sender_displayname</b> sent a location: $message"
# Telegram doesn't have built-in emotes, this field specifies how m.emote's from authenticated
# users are sent to telegram. All fields in message_formats are supported. Additionally, the
# Telegram user info is available in the following variables:
# $displayname - Telegram displayname
# $username - Telegram username (may not exist)
# $mention - Telegram @username or displayname mention (depending on which exists)
emote_format: "* $mention $formatted_body"
# The formats to use when sending state events to Telegram via the relay bot.
#
# Variables from `message_formats` that have the `sender_` prefix are available without the prefix.
# In name_change events, `$prev_displayname` is the previous displayname.
#
# Set format to an empty string to disable the messages for that event.
state_event_formats:
join: "$distinguisher <b>$displayname</b> joined the room."
leave: "$distinguisher <b>$displayname</b> left the room."
name_change: "$distinguisher <b>$prev_displayname</b> changed their name to $distinguisher <b>$displayname</b>"
# Filter rooms that can/can't be bridged. Can also be managed using the `filter` and
# `filter-mode` management commands.
#
# An empty blacklist will essentially disable the filter.
filter:
# Filter mode to use. Either "blacklist" or "whitelist".
# If the mode is "blacklist", the listed chats will never be bridged.
# If the mode is "whitelist", only the listed chats can be bridged.
mode: blacklist
# The list of group/channel IDs to filter.
list: []
# How to handle direct chats:
# If users is "null", direct chats will follow the previous settings.
# If users is "true", direct chats will always be bridged.
# If users is "false", direct chats will never be bridged.
users: true
# The prefix for commands. Only required in non-management rooms.
command_prefix: "!tg"
# Messages sent upon joining a management room.
# Markdown is supported. The defaults are listed below.
management_room_text:
# Sent when joining a room.
welcome: "Hello, I'm a Telegram bridge bot."
# Sent when joining a management room and the user is already logged in.
welcome_connected: "Use `help` for help."
# Sent when joining a management room and the user is not logged in.
welcome_unconnected: "Use `help` for help or `login` to log in."
# Optional extra text sent when joining a management room.
additional_help: ""
# Send each message separately (for readability in some clients)
management_room_multiple_messages: false
# Permissions for using the bridge.
# Permitted values:
# relaybot - Only use the bridge via the relaybot, no access to commands.
# user - Relaybot level + access to commands to create bridges.
# puppeting - User level + logging in with a Telegram account.
# full - Full access to use the bridge, i.e. previous levels + Matrix login.
# admin - Full access to use the bridge and some extra administration commands.
# Permitted keys:
# * - All Matrix users
# domain - All users on that homeserver
# mxid - Specific user
permissions:
"matrix.kluster.moll.re": "full"
"@remy:matrix.kluster.moll.re": "admin"
# Options related to the message relay Telegram bot.
relaybot:
private_chat:
# List of users to invite to the portal when someone starts a private chat with the bot.
# If empty, private chats with the bot won't create a portal.
invite: []
# Whether or not to bridge state change messages in relaybot private chats.
state_changes: true
# When private_chat_invite is empty, this message is sent to users /starting the
# relaybot. Telegram's "markdown" is supported.
message: This is a Matrix bridge relaybot and does not support direct chats
# List of users to invite to all group chat portals created by the bridge.
group_chat_invite: []
# Whether or not the relaybot should not bridge events in unbridged group chats.
# If false, portals will be created when the relaybot receives messages, just like normal
# users. This behavior is usually not desirable, as it interferes with manually bridging
# the chat to another room.
ignore_unbridged_group_chat: true
# Whether or not to allow creating portals from Telegram.
authless_portals: true
# Whether or not to allow Telegram group admins to use the bot commands.
whitelist_group_admins: true
# Whether or not to ignore incoming events sent by the relay bot.
ignore_own_incoming_events: true
# List of usernames/user IDs who are also allowed to use the bot commands.
whitelist:
- myusername
- 12345678
# Telegram config
telegram:
# Get your own API keys at https://my.telegram.org/apps
api_id: 862555
api_hash: 7387a7b6ba71793d6f3fa98261117e4e
# (Optional) Create your own bot at https://t.me/BotFather
bot_token: disabled
# Should the bridge request missed updates from Telegram when restarting?
catch_up: true
# Should incoming updates be handled sequentially to make sure order is preserved on Matrix?
sequential_updates: true
exit_on_update_error: false

@ -0,0 +1,32 @@
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: mautrix-telegram
spec:
selector:
matchLabels:
app: mautrix-telegram
serviceName: mautrix-telegram
replicas: 1
template:
metadata:
labels:
app: mautrix-telegram
spec:
containers:
- name: mautrix-telegram
image: mautrix-telegram
volumeMounts:
- name: config
mountPath: /data/config.yaml
subPath: config.yaml
- name: persistence
mountPath: /data
args:
- --no-update # disable overwriting config.yaml
volumes:
- name: config
configMap:
name: mautrix-telegram
- name: persistence
emptyDir: {}

@ -0,0 +1,428 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: mautrix-whatsapp
data:
config.yaml: |
# Homeserver details.
homeserver:
# The address that this appservice can use to connect to the homeserver.
address: http://synapse:8448
# The domain of the homeserver (also known as server_name, used for MXIDs, etc).
domain: matrix.kluster.moll.re
# What software is the homeserver running?
# Standard Matrix homeservers like Synapse, Dendrite and Conduit should just use "standard" here.
software: standard
# The URL to push real-time bridge status to.
# If set, the bridge will make POST requests to this URL whenever a user's whatsapp connection state changes.
# The bridge will use the appservice as_token to authorize requests.
status_endpoint: null
# Endpoint for reporting per-message status.
message_send_checkpoint_endpoint: null
# Does the homeserver support https://github.com/matrix-org/matrix-spec-proposals/pull/2246?
async_media: false
# Should the bridge use a websocket for connecting to the homeserver?
# The server side is currently not documented anywhere and is only implemented by mautrix-wsproxy,
# mautrix-asmux (deprecated), and hungryserv (proprietary).
websocket: false
# How often should the websocket be pinged? Pinging will be disabled if this is zero.
ping_interval_seconds: 0
# Application service host/registration related details.
# Changing these values requires regeneration of the registration.
appservice:
# The address that the homeserver can use to connect to this appservice.
address: http://mautrix-whatsapp:29318
# The hostname and port where this appservice should listen.
hostname: 0.0.0.0
port: 29318
# Database config.
database:
# The database type. "sqlite3-fk-wal" and "postgres" are supported.
type: sqlite3-fk-wal
# The database URI.
# SQLite: A raw file path is supported, but `file:<path>?_txlock=immediate` is recommended.
# https://github.com/mattn/go-sqlite3#connection-string
# Postgres: Connection string. For example, postgres://user:password@host/database?sslmode=disable
# To connect via Unix socket, use something like postgres:///dbname?host=/var/run/postgresql
uri: file:/data/mautrix-whatsapp.db?_txlock=immediate
# Maximum number of connections. Mostly relevant for Postgres.
max_open_conns: 20
max_idle_conns: 2
# Maximum connection idle time and lifetime before they're closed. Disabled if null.
# Parsed with https://pkg.go.dev/time#ParseDuration
max_conn_idle_time: null
max_conn_lifetime: null
# The unique ID of this appservice.
id: whatsapp
# Appservice bot details.
bot:
# Username of the appservice bot.
username: whatsappbot
# Display name and avatar for bot. Set to "remove" to remove display name/avatar, leave empty
# to leave display name/avatar as-is.
displayname: WhatsApp bridge bot
avatar: mxc://maunium.net/NeXNQarUbrlYBiPCpprYsRqr
# Whether or not to receive ephemeral events via appservice transactions.
# Requires MSC2409 support (i.e. Synapse 1.22+).
ephemeral_events: true
# Should incoming events be handled asynchronously?
# This may be necessary for large public instances with lots of messages going through.
# However, messages will not be guaranteed to be bridged in the same order they were sent in.
async_transactions: false
# Authentication tokens for AS <-> HS communication. Autogenerated; do not modify.
as_token: "This value is generated when generating the registration"
hs_token: "This value is generated when generating the registration"
# Segment-compatible analytics endpoint for tracking some events, like provisioning API login and encryption errors.
analytics:
# Hostname of the tracking server. The path is hardcoded to /v1/track
host: api.segment.io
# API key to send with tracking requests. Tracking is disabled if this is null.
token: null
# Optional user ID for tracking events. If null, defaults to using Matrix user ID.
user_id: null
# Prometheus config.
metrics:
# Enable prometheus metrics?
enabled: false
# IP and port where the metrics listener should be. The path is always /metrics
listen: 127.0.0.1:8001
# Config for things that are directly sent to WhatsApp.
whatsapp:
# Device name that's shown in the "WhatsApp Web" section in the mobile app.
os_name: Mautrix-WhatsApp bridge
# Browser name that determines the logo shown in the mobile app.
# Must be "unknown" for a generic icon or a valid browser name if you want a specific icon.
# List of valid browser names: https://github.com/tulir/whatsmeow/blob/efc632c008604016ddde63bfcfca8de4e5304da9/binary/proto/def.proto#L43-L64
browser_name: unknown
# Bridge config
bridge:
# Localpart template of MXIDs for WhatsApp users.
# {{.}} is replaced with the phone number of the WhatsApp user.
username_template: whatsapp_{{.}}
# Displayname template for WhatsApp users.
# {{.PushName}} - nickname set by the WhatsApp user
# {{.BusinessName}} - validated WhatsApp business name
# {{.Phone}} - phone number (international format)
# The following variables are also available, but will cause problems on multi-user instances:
# {{.FullName}} - full name from contact list
# {{.FirstName}} - first name from contact list
displayname_template: "{{or .BusinessName .PushName .JID}} (WA)"
# Should the bridge create a space for each logged-in user and add bridged rooms to it?
# Users who logged in before turning this on should run `!wa sync space` to create and fill the space for the first time.
personal_filtering_spaces: false
# Should the bridge send a read receipt from the bridge bot when a message has been sent to WhatsApp?
delivery_receipts: false
# Whether the bridge should send the message status as a custom com.beeper.message_send_status event.
message_status_events: false
# Whether the bridge should send error notices via m.notice events when a message fails to bridge.
message_error_notices: true
# Should incoming calls send a message to the Matrix room?
call_start_notices: true
# Should another user's cryptographic identity changing send a message to Matrix?
identity_change_notices: false
portal_message_buffer: 128
# Settings for handling history sync payloads.
history_sync:
# Enable backfilling history sync payloads from WhatsApp?
backfill: true
# The maximum number of initial conversations that should be synced.
# Other conversations will be backfilled on demand when receiving a message or when initiating a direct chat.
max_initial_conversations: -1
# Maximum number of messages to backfill in each conversation.
# Set to -1 to disable limit.
message_count: 50
# Should the bridge request a full sync from the phone when logging in?
# This bumps the size of history syncs from 3 months to 1 year.
request_full_sync: false
# Configuration parameters that are sent to the phone along with the request full sync flag.
# By default (when the values are null or 0), the config isn't sent at all.
full_sync_config:
# Number of days of history to request.
# The limit seems to be around 3 years, but using higher values doesn't break.
days_limit: null
# This is presumably the maximum size of the transferred history sync blob, which may affect what the phone includes in the blob.
size_mb_limit: null
# This is presumably the local storage quota, which may affect what the phone includes in the history sync blob.
storage_quota_mb: null
# If this value is greater than 0, then if the conversation's last message was more than
# this number of hours ago, then the conversation will automatically be marked it as read.
# Conversations that have a last message that is less than this number of hours ago will
# have their unread status synced from WhatsApp.
unread_hours_threshold: 0
# Should puppet avatars be fetched from the server even if an avatar is already set?
user_avatar_sync: true
# Should Matrix users leaving groups be bridged to WhatsApp?
bridge_matrix_leave: true
# Should the bridge update the m.direct account data event when double puppeting is enabled.
# Note that updating the m.direct event is not atomic (except with mautrix-asmux)
# and is therefore prone to race conditions.
sync_direct_chat_list: false
# Should the bridge use MSC2867 to bridge manual "mark as unread"s from
# WhatsApp and set the unread status on initial backfill?
# This will only work on clients that support the m.marked_unread or
# com.famedly.marked_unread room account data.
sync_manual_marked_unread: true
# When double puppeting is enabled, users can use `!wa toggle` to change whether
# presence is bridged. This setting sets the default value.
# Existing users won't be affected when these are changed.
default_bridge_presence: true
# Send the presence as "available" to whatsapp when users start typing on a portal.
# This works as a workaround for homeservers that do not support presence, and allows
# users to see when the whatsapp user on the other side is typing during a conversation.
send_presence_on_typing: false
# Should the bridge always send "active" delivery receipts (two gray ticks on WhatsApp)
# even if the user isn't marked as online (e.g. when presence bridging isn't enabled)?
#
# By default, the bridge acts like WhatsApp web, which only sends active delivery
# receipts when it's in the foreground.
force_active_delivery_receipts: false
# Servers to always allow double puppeting from
double_puppet_server_map:
example.com: https://example.com
# Allow using double puppeting from any server with a valid client .well-known file.
double_puppet_allow_discovery: false
# Shared secrets for https://github.com/devture/matrix-synapse-shared-secret-auth
#
# If set, double puppeting will be enabled automatically for local users
# instead of users having to find an access token and run `login-matrix`
# manually.
login_shared_secret_map:
example.com: foobar
# Whether to explicitly set the avatar and room name for private chat portal rooms.
# If set to `default`, this will be enabled in encrypted rooms and disabled in unencrypted rooms.
# If set to `always`, all DM rooms will have explicit names and avatars set.
# If set to `never`, DM rooms will never have names and avatars set.
private_chat_portal_meta: default
# Should group members be synced in parallel? This makes member sync faster
parallel_member_sync: false
# Should Matrix m.notice-type messages be bridged?
bridge_notices: true
# Set this to true to tell the bridge to re-send m.bridge events to all rooms on the next run.
# This field will automatically be changed back to false after it, except if the config file is not writable.
resend_bridge_info: false
# When using double puppeting, should muted chats be muted in Matrix?
mute_bridging: false
# When using double puppeting, should archived chats be moved to a specific tag in Matrix?
# Note that WhatsApp unarchives chats when a message is received, which will also be mirrored to Matrix.
# This can be set to a tag (e.g. m.lowpriority), or null to disable.
archive_tag: null
# Same as above, but for pinned chats. The favorite tag is called m.favourite
pinned_tag: null
# Should mute status and tags only be bridged when the portal room is created?
tag_only_on_create: true
# Should WhatsApp status messages be bridged into a Matrix room?
# Disabling this won't affect already created status broadcast rooms.
enable_status_broadcast: true
# Should sending WhatsApp status messages be allowed?
# This can cause issues if the user has lots of contacts, so it's disabled by default.
disable_status_broadcast_send: true
# Should the status broadcast room be muted and moved into low priority by default?
# This is only applied when creating the room, the user can unmute it later.
mute_status_broadcast: true
# Tag to apply to the status broadcast room.
status_broadcast_tag: m.lowpriority
# Should the bridge use thumbnails from WhatsApp?
# They're disabled by default due to very low resolution.
whatsapp_thumbnail: false
# Allow invite permission for user. User can invite any bots to room with whatsapp
# users (private chat and groups)
allow_user_invite: false
# Whether or not created rooms should have federation enabled.
# If false, created portal rooms will never be federated.
federate_rooms: true
# Should the bridge never send alerts to the bridge management room?
# These are mostly things like the user being logged out.
disable_bridge_alerts: false
# Should the bridge stop if the WhatsApp server says another user connected with the same session?
# This is only safe on single-user bridges.
crash_on_stream_replaced: false
# Should the bridge detect URLs in outgoing messages, ask the homeserver to generate a preview,
# and send it to WhatsApp? URL previews can always be sent using the `com.beeper.linkpreviews`
# key in the event content even if this is disabled.
url_previews: false
# Send captions in the same message as images. This will send data compatible with both MSC2530 and MSC3552.
# This is currently not supported in most clients.
caption_in_message: false
# Send galleries as a single event? This is not an MSC (yet).
beeper_galleries: false
# Should polls be sent using MSC3381 event types?
extev_polls: false
# Should cross-chat replies from WhatsApp be bridged? Most servers and clients don't support this.
cross_room_replies: false
# Disable generating reply fallbacks? Some extremely bad clients still rely on them,
# but they're being phased out and will be completely removed in the future.
disable_reply_fallbacks: false
# Maximum time for handling Matrix events. Duration strings formatted for https://pkg.go.dev/time#ParseDuration
# Null means there's no enforced timeout.
message_handling_timeout:
# Send an error message after this timeout, but keep waiting for the response until the deadline.
# This is counted from the origin_server_ts, so the warning time is consistent regardless of the source of delay.
# If the message is older than this when it reaches the bridge, the message won't be handled at all.
error_after: null
# Drop messages after this timeout. They may still go through if the message got sent to the servers.
# This is counted from the time the bridge starts handling the message.
deadline: 120s
# The prefix for commands. Only required in non-management rooms.
command_prefix: "!wa"
# Messages sent upon joining a management room.
# Markdown is supported. The defaults are listed below.
management_room_text:
# Sent when joining a room.
welcome: "Hello, I'm a WhatsApp bridge bot."
# Sent when joining a management room and the user is already logged in.
welcome_connected: "Use `help` for help."
# Sent when joining a management room and the user is not logged in.
welcome_unconnected: "Use `help` for help or `login` to log in."
# Optional extra text sent when joining a management room.
additional_help: ""
# End-to-bridge encryption support options.
#
# See https://docs.mau.fi/bridges/general/end-to-bridge-encryption.html for more info.
encryption:
# Allow encryption, work in group chat rooms with e2ee enabled
allow: false
# Default to encryption, force-enable encryption in all portals the bridge creates
# This will cause the bridge bot to be in private chats for the encryption to work properly.
default: false
# Whether to use MSC2409/MSC3202 instead of /sync long polling for receiving encryption-related data.
appservice: false
# Require encryption, drop any unencrypted messages.
require: false
# Enable key sharing? If enabled, key requests for rooms where users are in will be fulfilled.
# You must use a client that supports requesting keys from other users to use this feature.
allow_key_sharing: false
# Should users mentions be in the event wire content to enable the server to send push notifications?
plaintext_mentions: false
# Options for deleting megolm sessions from the bridge.
delete_keys:
# Beeper-specific: delete outbound sessions when hungryserv confirms
# that the user has uploaded the key to key backup.
delete_outbound_on_ack: false
# Don't store outbound sessions in the inbound table.
dont_store_outbound: false
# Ratchet megolm sessions forward after decrypting messages.
ratchet_on_decrypt: false
# Delete fully used keys (index >= max_messages) after decrypting messages.
delete_fully_used_on_decrypt: false
# Delete previous megolm sessions from same device when receiving a new one.
delete_prev_on_new_session: false
# Delete megolm sessions received from a device when the device is deleted.
delete_on_device_delete: false
# Periodically delete megolm sessions when 2x max_age has passed since receiving the session.
periodically_delete_expired: false
# Delete inbound megolm sessions that don't have the received_at field used for
# automatic ratcheting and expired session deletion. This is meant as a migration
# to delete old keys prior to the bridge update.
delete_outdated_inbound: false
# What level of device verification should be required from users?
#
# Valid levels:
# unverified - Send keys to all device in the room.
# cross-signed-untrusted - Require valid cross-signing, but trust all cross-signing keys.
# cross-signed-tofu - Require valid cross-signing, trust cross-signing keys on first use (and reject changes).
# cross-signed-verified - Require valid cross-signing, plus a valid user signature from the bridge bot.
# Note that creating user signatures from the bridge bot is not currently possible.
# verified - Require manual per-device verification
# (currently only possible by modifying the `trust` column in the `crypto_device` database table).
verification_levels:
# Minimum level for which the bridge should send keys to when bridging messages from WhatsApp to Matrix.
receive: unverified
# Minimum level that the bridge should accept for incoming Matrix messages.
send: unverified
# Minimum level that the bridge should require for accepting key requests.
share: cross-signed-tofu
# Options for Megolm room key rotation. These options allow you to
# configure the m.room.encryption event content. See:
# https://spec.matrix.org/v1.3/client-server-api/#mroomencryption for
# more information about that event.
rotation:
# Enable custom Megolm room key rotation settings. Note that these
# settings will only apply to rooms created after this option is
# set.
enable_custom: false
# The maximum number of milliseconds a session should be used
# before changing it. The Matrix spec recommends 604800000 (a week)
# as the default.
milliseconds: 604800000
# The maximum number of messages that should be sent with a given a
# session before changing it. The Matrix spec recommends 100 as the
# default.
messages: 100
# Disable rotating keys when a user's devices change?
# You should not enable this option unless you understand all the implications.
disable_device_change_key_rotation: false
# Settings for provisioning API
provisioning:
# Prefix for the provisioning API paths.
prefix: /_matrix/provision
# Shared secret for authentication. If set to "generate", a random secret will be generated,
# or if set to "disable", the provisioning API will be disabled.
shared_secret: generate
# Enable debug API at /debug with provisioning authentication.
debug_endpoints: false
# Permissions for using the bridge.
# Permitted values:
# relay - Talk through the relaybot (if enabled), no access otherwise
# user - Access to use the bridge to chat with a WhatsApp account.
# admin - User level and some additional administration tools
# Permitted keys:
# * - All Matrix users
# domain - All users on that homeserver
# mxid - Specific user
permissions:
"*": relay
"example.com": user
"@admin:example.com": admin
# Settings for relay mode
relay:
# Whether relay mode should be allowed. If allowed, `!wa set-relay` can be used to turn any
# authenticated user into a relaybot for that chat.
enabled: false
# Should only admins be allowed to set themselves as relay users?
admin_only: true
# The formats to use when sending messages to WhatsApp via the relaybot.
message_formats:
m.text: "<b>{{ .Sender.Displayname }}</b>: {{ .Message }}"
m.notice: "<b>{{ .Sender.Displayname }}</b>: {{ .Message }}"
m.emote: "* <b>{{ .Sender.Displayname }}</b> {{ .Message }}"
m.file: "<b>{{ .Sender.Displayname }}</b> sent a file"
m.image: "<b>{{ .Sender.Displayname }}</b> sent an image"
m.audio: "<b>{{ .Sender.Displayname }}</b> sent an audio file"
m.video: "<b>{{ .Sender.Displayname }}</b> sent a video"
m.location: "<b>{{ .Sender.Displayname }}</b> sent a location"
# Logging config. See https://github.com/tulir/zeroconfig for details.
logging:
min_level: debug
writers:
- type: stdout
format: pretty-colored
- type: file
format: json
filename: ./logs/mautrix-whatsapp.log
max_size: 100
max_backups: 10
compress: true

@ -0,0 +1,30 @@
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: mautrix-whatsapp
spec:
selector:
matchLabels:
app: mautrix-whatsapp
serviceName: mautrix-whatsapp
replicas: 1
template:
metadata:
labels:
app: mautrix-whatsapp
spec:
containers:
- name: mautrix-whatsapp
image: mautrix-whatsapp
volumeMounts:
- name: persistence
mountPath: /data
# contains config.yaml
securityContext:
fsGroup: 1337
volumes:
- name: persistence
persistentVolumeClaim:
claimName: mautrix-whatsapp

@ -1,23 +1,23 @@
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: stump-data
name: mautrix-telegram
spec:
storageClassName: "nfs-client"
storageClassName: nfs-client
accessModes:
- ReadWriteOnce
- ReadWriteMany
resources:
requests:
storage: 10Gi
storage: 1Gi
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: stump-config
name: mautrix-whatsapp
spec:
storageClassName: "nfs-client"
storageClassName: nfs-client
accessModes:
- ReadWriteOnce
- ReadWriteMany
resources:
requests:
storage: 10Gi
storage: 1Gi

20
apps/matrix/postgres.yaml Normal file

@ -0,0 +1,20 @@
apiVersion: postgresql.cnpg.io/v1
kind: Cluster
metadata:
name: matrix-postgres
spec:
instances: 1
imageName: ghcr.io/cloudnative-pg/postgresql:16
bootstrap:
initdb:
owner: matrix
database: matrix
secret:
name: postgres-credentials
storage:
size: 1Gi
storageClass: nfs-client
monitoring:
enablePodMonitor: true

@ -0,0 +1,62 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: synapse
data:
# matrix.kluster.moll.re.log.config: |
# version: 1
# formatters:
# precise:
# format: '%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s - %(message)s'
# handlers:
# console:
# class: logging.StreamHandler
# formatter: precise
# loggers:
# # This is just here so we can leave `loggers` in the config regardless of whether
# # we configure other loggers below (avoid empty yaml dict error).
# _placeholder:
# level: "INFO"
# synapse.storage.SQL:
# # beware: increasing this to DEBUG will make synapse log sensitive
# # information such as access tokens.
# level: INFO
# root:
# level: INFO
# handlers: [console]
homeserver.yaml: |
server_name: "matrix.kluster.moll.re"
report_stats: false
# enable_registration: true
# enable_registration_without_verification: true
listeners:
- port: 8448
tls: false
type: http
x_forwarded: true
bind_addresses: ['::1', '127.0.0.1']
resources:
- names: [client, federation]
compress: false
# log_config: "./matrix.kluster.moll.re.log.config"
media_store_path: /media_store
trusted_key_servers:
- server_name: "matrix.org"
database:
name: psycopg2
args:
user: matrix
password: "0ssdsdsdM6vbxhs.kdjsdasd9Z0qK5bdTwM6vbxh9Z"
dbname: matrix
host: matrix-postgres-rw
cp_min: 5
cp_max: 10

@ -0,0 +1,43 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: synapse
spec:
selector:
matchLabels:
app: synapse
template:
metadata:
labels:
app: synapse
spec:
containers:
- name: synapse
image: synapse
resources:
limits:
memory: "128Mi"
cpu: "500m"
ports:
- containerPort: 8448
env:
- name: SYNAPSE_CONFIG_PATH
value: /config/homeserver.yaml
volumeMounts:
- name: config
mountPath: /config/homeserver.yaml
subPath: homeserver.yaml
- name: config-persistence
mountPath: /config
- name: media
mountPath: /media_store
securityContext:
fsGroup: 1001
volumes:
- name: config
configMap:
name: synapse
- name: config-persistence
emptyDir: {}
- name: media
emptyDir: {}

@ -0,0 +1,29 @@
apiVersion: traefik.containo.us/v1alpha1
kind: IngressRoute
metadata:
name: synapse-federation
spec:
entryPoints:
- websecure
routes:
- match: Host(`matrix.kluster.moll.re`)
kind: Rule
services:
- name: synapse
port: 8448
# auto route to the _matrix path
middlewares:
- name: matrix-redirect
tls:
certResolver: default-tls
---
apiVersion: traefik.containo.us/v1alpha1
kind: Middleware
metadata:
name: matrix-redirect
spec:
redirectRegex:
regex: "^https://matrix.kluster.moll.re/(.*)"
replacement: "https://matrix.kluster.moll.re/_matrix/$${1}"
permanent: true

@ -0,0 +1,11 @@
apiVersion: v1
kind: Service
metadata:
name: synapse
spec:
selector:
app: synapse
ports:
- protocol: TCP
port: 8448
targetPort: 8448

@ -1,4 +1,23 @@
apiVersion: traefik.io/v1alpha1
apiVersion: traefik.containo.us/v1alpha1
kind: IngressRoute
metadata:
name: jellyfin-vue-ingress
namespace: media
spec:
entryPoints:
- websecure
routes:
- match: Host(`media.kluster.moll.re`)
middlewares:
- name: jellyfin-websocket
kind: Rule
services:
- name: jellyfin-web
port: 80
tls:
certResolver: default-tls
---
apiVersion: traefik.containo.us/v1alpha1
kind: IngressRoute
metadata:
name: jellyfin-backend-ingress
@ -7,7 +26,7 @@ spec:
entryPoints:
- websecure
routes:
- match: Host(`media.kluster.moll.re`) && !Path(`/metrics`)
- match: Host(`media-backend.kluster.moll.re`) && !Path(`/metrics`)
middlewares:
- name: jellyfin-websocket
- name: jellyfin-server-headers
@ -18,7 +37,7 @@ spec:
tls:
certResolver: default-tls
---
apiVersion: traefik.io/v1alpha1
apiVersion: traefik.containo.us/v1alpha1
kind: Middleware
metadata:
name: jellyfin-websocket
@ -29,7 +48,7 @@ spec:
Connection: keep-alive, Upgrade
Upgrade: WebSocket
---
apiVersion: traefik.io/v1alpha1
apiVersion: traefik.containo.us/v1alpha1
kind: Middleware
metadata:
name: jellyfin-server-headers
@ -41,4 +60,4 @@ spec:
accessControlAllowMethods: [ "GET","HEAD","OPTIONS" ] # "POST","PUT"
accessControlAllowOriginList:
- "*"
accessControlMaxAge: 100
accessControlMaxAge: 100

@ -0,0 +1,17 @@
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: jellyfin
labels:
metrics: prometheus
spec:
selector:
matchLabels:
app: jellyfin-server-service
endpoints:
- path: /metrics
targetPort: jellyfin
# this exposes metrics on port 8096 as enabled in the jellyfin config
# https://jellyfin.org/docs/general/networking/monitoring/
# the metrics are available at /metrics but blocked by the ingress

@ -5,11 +5,17 @@ namespace: media
resources:
- namespace.yaml
- pvc.yaml
- deployment.yaml
- service.yaml
- server.deployment.yaml
- server.service.yaml
- web.deployment.yaml
- web.service.yaml
- ingress.yaml
- jellyfin.servicemonitor.yaml
images:
- name: jellyfin/jellyfin
newName: jellyfin/jellyfin
newTag: 10.10.7
newTag: 10.8.13
- name: ghcr.io/jellyfin/jellyfin-vue
newName: ghcr.io/jellyfin/jellyfin-vue
newTag: stable-rc.0.3.1

@ -1,21 +1,39 @@
apiVersion: v1
kind: PersistentVolume
metadata:
namespace: media
name: jellyfin-config-nfs
spec:
capacity:
storage: "1Gi"
accessModes:
- ReadWriteOnce
nfs:
path: /export/kluster/jellyfin-config
server: 192.168.1.157
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: config
namespace: media
name: jellyfin-config-nfs
spec:
storageClassName: "nfs-client"
storageClassName: ""
accessModes:
- ReadWriteOnce
resources:
requests:
storage: "1Gi"
volumeName: jellyfin-config-nfs
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: media
namespace: media
name: jellyfin-data-nfs
spec:
capacity:
storage: "1Ti"
@ -28,7 +46,8 @@ spec:
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: media
namespace: media
name: jellyfin-data-nfs
spec:
storageClassName: ""
accessModes:
@ -36,4 +55,4 @@ spec:
resources:
requests:
storage: "1Ti"
volumeName: media
volumeName: jellyfin-data-nfs

@ -18,9 +18,6 @@ spec:
limits:
memory: "2Gi"
cpu: "2"
requests:
memory: "128Mi"
cpu: "250m"
ports:
- containerPort: 8096
name: jellyfin
@ -28,9 +25,9 @@ spec:
- name: TZ
value: Europe/Berlin
volumeMounts:
- name: config
- name: jellyfin-config
mountPath: /config
- name: media
- name: jellyfin-data
mountPath: /media
livenessProbe:
httpGet:
@ -39,10 +36,10 @@ spec:
initialDelaySeconds: 100
periodSeconds: 15
volumes:
- name: config
- name: jellyfin-config
persistentVolumeClaim:
claimName: config
- name: media
claimName: jellyfin-config-nfs
- name: jellyfin-data
persistentVolumeClaim:
claimName: media
claimName: jellyfin-data-nfs

@ -0,0 +1,27 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: jellyfin-web
spec:
selector:
matchLabels:
app: jellyfin-web
template:
metadata:
labels:
app: jellyfin-web
spec:
containers:
- name: jellyfin-web
image: ghcr.io/jellyfin/jellyfin-vue
resources:
limits:
memory: "128Mi"
cpu: "30m"
ports:
- containerPort: 80
env:
- name: TZ
value: Europe/Berlin
- name: DEFAULT_SERVERS
value: "https://media-backend.kluster.moll.re"

@ -0,0 +1,12 @@
apiVersion: v1
kind: Service
metadata:
name: jellyfin-web
spec:
selector:
app: jellyfin-web
ports:
- protocol: TCP
port: 80
targetPort: 80

@ -1,15 +0,0 @@
## Setup
Because minecraft is quite sensitive to io performance, we want the data to be stored on a local disk. But hostpath is not well supported in talos (and is not persistent), so we use an ephemeral volume instead. In order to do this, we create an emptyDir volume and mount it to the pod.
We use an initContaier that copies the data to the local storage. Afterwards, copying from the local storage back to the persistent storage is handled by a preStop lifecycle event.
This way, we can have the best of both worlds: fast local storage and persistent storage.
## Sending a command
```
kubectl exec -it -n minecraft deploy/minecraft-server -- /bin/bash
mc-send-to-console /help
# or directly
kubectl exec -it -n minecraft deploy/minecraft-server -- mc-send-to-console /help
```

@ -1,16 +0,0 @@
---
apiVersion: bitnami.com/v1alpha1
kind: SealedSecret
metadata:
creationTimestamp: null
name: curseforge-api
namespace: minecraft
spec:
encryptedData:
key: AgDG6apUvB38rB9tH+/ya5Af/32IUJjHiEGZFdYYqesuqyPB/qf99EtC/7CwqD6bDQQPVycJVcxwZuF8QtYfPXzv//yMkqEUJ2G1/Q5J8I6bjNGLR636UhliUpCkH1QDOspWJUjwKDVxlFN9l0g9UajvxnqLyGzbWPeay0sJEBvAY8ltEZpLP21V+GD+HgPk3HIfSFFBMsULS6GPCjMaFxkxQb6cG3K4Ej4NHCHRGOmax+4Rk7lwMyAHlXLlrwj/ytxrnHDWrugLIJE9KKmJn6UVNTuk6olgkhleg2PixV7oOiDVyu9ZQP8wbdppzRix6dnIcFEYJ1ZDK1rNF5QErYO0gBytiJnSsdFO0jUMsdBrho2FgUc5GgIdmgXWJJz3lrGFqXaRVvbPsBZTUAsQRh2+4IfqfWmAkEjBcjs1K8WWJfS+rO9e02KoHBT4decdsd8Qfr5EFdPIzMrkUoRMI9CJnIa5u2nR08Hhd9iojbL64FZ26kXMODtEdKmlo+HwjufLX5rYJVSfOyZYzivd/kgKA87YTFaMLKej07w3ofGrPYSoCnmLfJyoQdNyJhdonBDsgM1GgRWQZDpgJ1df0SB02A5lZ4V7lHWr8KlANv9YLuMoZnVehsH1NZjNQHDInIRiTLahEBbjcJzQz4vU1UWG100ATszEYKOUVkzPnTgkqKYU99ZQ23bHP8z7iAWQeumb6V84NTi6jNITBvU4yTFLuAiI3nW34Vb1mFVLwfWqMjEYX8gBB4yMSaVshB/japfkyXU0pYg4mK9gsB4=
template:
metadata:
creationTimestamp: null
name: curseforge-api
namespace: minecraft
type: Opaque

@ -1,92 +0,0 @@
apiVersion: batch/v1
kind: Job
metadata:
name: start-server
spec:
template:
metadata:
labels:
app: minecraft-server
spec:
restartPolicy: OnFailure
initContainers:
- name: copy-data-to-local
image: alpine
command: ["/bin/sh"]
args: ["-c", "cp -r /data/* /local-data/"]
volumeMounts:
- name: local-data
mountPath: /local-data
- name: minecraft-data
mountPath: /data
containers:
- name: minecraft-server
image: minecraft
resources:
limits:
memory: "11000Mi"
cpu: "5"
requests:
memory: "1500Mi"
cpu: "500m"
ports:
- containerPort: 25565
env:
- name: EULA
value: "TRUE"
- name: TYPE
value: "AUTO_CURSEFORGE"
- name: CF_API_KEY
valueFrom:
secretKeyRef:
name: curseforge-api
key: key
- name: CF_PAGE_URL
value: "https://www.curseforge.com/minecraft/modpacks/vault-hunters-1-18-2/files/5925838"
- name: VERSION
value: "1.18.2"
- name: INIT_MEMORY
value: "1G"
- name: MAX_MEMORY
value: "10G"
- name: MOTD
value: "VaultHunters baby!"
- name: ENABLE_RCON
value: "false"
- name: CREATE_CONSOLE_IN_PIPE
value: "true"
- name: ONLINE_MODE
value: "false"
- name: ENABLE_AUTOSTOP
value: "true"
- name: AUTOSTOP_TIMEOUT_EST
value: "1800" # stop 30 min after last disconnect
volumeMounts:
- name: local-data
mountPath: /data
- name: copy-data-to-persistent
image: rsync
command: ["/bin/sh"]
# args: ["-c", "sleep infinity"]
args: ["/run-rsync.sh"]
volumeMounts:
- name: local-data
mountPath: /local-data
- name: minecraft-data
mountPath: /persistent-data
- name: rsync-config
mountPath: /run-rsync.sh
subPath: run-rsync.sh
volumes:
- name: minecraft-data
persistentVolumeClaim:
claimName: minecraft-data
- name: local-data
emptyDir: {}
- name: rsync-config
configMap:
name: rsync-config
defaultMode: 0777

@ -1,24 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: minecraft
resources:
- namespace.yaml
- pvc.yaml
- job.yaml
- service.yaml
- rsync.configmap.yaml
- curseforge.sealedsecret.yaml
images:
- name: minecraft
newName: itzg/minecraft-server
newTag: java21
- name: alpine
newName: alpine
newTag: "3.21"
- name: rsync
newName: eeacms/rsync
newTag: "2.6"

@ -1,42 +0,0 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: rsync-config
data:
run-rsync.sh: |-
#!/bin/sh
set -eu
echo "Starting rsync..."
no_change_count=0
while [ "$no_change_count" -lt 3 ]; do
# use the i flag to get per line output of each change
rsync_output=$(rsync -avzi --delete /local-data/ /persistent-data/)
# echo "$rsync_output"
# in this format rsync outputs at least 4 lines:
# ---
# sending incremental file list
#
# sent 145,483 bytes received 717 bytes 26,581.82 bytes/sec
# total size is 708,682,765 speedup is 4,847.35
# ---
# even though a non-zero number of bytes is sent, no changes were made
line_count=$(echo "$rsync_output" | wc -l)
if [ "$line_count" -eq 4 ]; then
echo "Rsync output was: $rsync_output"
no_change_count=$((no_change_count + 1))
echo "No changes detected. Incrementing no_change_count to $no_change_count."
else
no_change_count=0
echo "Changes detected. Resetting no_change_count to 0."
fi
echo "Rsync completed. Sleeping for 10 minutes..."
sleep 600
done
echo "No changes detected for 3 consecutive runs. Exiting."

@ -1,12 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: minecraft-server
spec:
selector:
app: minecraft-server
ports:
- port: 25565
targetPort: 25565
type: LoadBalancer
loadBalancerIP: 192.168.3.4

@ -1,5 +1,5 @@
kind: IngressRoute
apiVersion: traefik.io/v1alpha1
apiVersion: traefik.containo.us/v1alpha1
metadata:
name: grafana-ingress
spec:

@ -0,0 +1,25 @@
apiVersion: v1
kind: PersistentVolume
metadata:
name: grafana-nfs
spec:
capacity:
storage: "1Gi"
accessModes:
- ReadWriteOnce
nfs:
path: /export/kluster/grafana
server: 192.168.1.157
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: grafana-nfs
spec:
storageClassName: ""
accessModes:
- ReadWriteOnce
resources:
requests:
storage: "1Gi"
volumeName: grafana-nfs

@ -0,0 +1,65 @@
replicas: 1
## Create a headless service for the deployment
headlessService: false
## Expose the grafana service to be accessed from outside the cluster (LoadBalancer service).
## or access it from within the cluster (ClusterIP service). Set the service type and the port to serve it.
## ref: http://kubernetes.io/docs/user-guide/services/
##
service:
enabled: true
type: ClusterIP
port: 80
targetPort: 3000
# targetPort: 4181 To be used with a proxy extraContainer
annotations: {}
labels: {}
portName: service
serviceMonitor:
## If true, a ServiceMonitor CRD is created for a prometheus operator
## https://github.com/coreos/prometheus-operator
##
enabled: false
ingress:
enabled: false
persistence:
type: pvc
enabled: true
# storageClassName: default
accessModes:
- ReadWriteOnce
size: 10Gi
# annotations: {}
finalizers:
- kubernetes.io/pvc-protection
# selectorLabels: {}
## Sub-directory of the PV to mount. Can be templated.
# subPath: ""
## Name of an existing PVC. Can be templated.
existingClaim: grafana-nfs
## If persistence is not enabled, this allows to mount the
## local storage in-memory to improve performance
##
inMemory:
enabled: false
## The maximum usage on memory medium EmptyDir would be
## the minimum value between the SizeLimit specified
## here and the sum of memory limits of all containers in a pod
##
# sizeLimit: 300Mi
initChownData:
## If false, data ownership will not be reset at startup
## This allows the prometheus-server to be run with an arbitrary user
##
enabled: true
# Administrator credentials when not using an existing secret (see below)
adminUser: admin
# adminPassword: strongpassword

@ -0,0 +1,25 @@
apiVersion: v1
kind: PersistentVolume
metadata:
name: influxdb-nfs
spec:
capacity:
storage: "10Gi"
accessModes:
- ReadWriteOnce
nfs:
path: /export/kluster/influxdb
server: 192.168.1.157
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: influxdb-nfs
spec:
storageClassName: ""
accessModes:
- ReadWriteOnce
resources:
requests:
storage: "10Gi"
volumeName: influxdb-nfs

@ -0,0 +1,26 @@
## Create default user through docker entrypoint
## Defaults indicated below
##
adminUser:
organization: "influxdata"
bucket: "default"
user: "admin"
retention_policy: "0s"
## Leave empty to generate a random password and token.
## Or fill any of these values to use fixed values.
password: ""
token: ""
## Persist data to a persistent volume
##
persistence:
enabled: true
## If true will use an existing PVC instead of creating one
useExisting: true
## Name of existing PVC to be used in the influx deployment
name: influxdb-nfs
ingress:
enabled: false

Some files were not shown because too many files have changed in this diff Show More