2 Commits

Author SHA1 Message Date
ac4d2c3fa3 using an alternative stack 2024-12-02 13:46:04 +01:00
c237e060fd add incomplete deployment 2024-05-13 14:28:37 +02:00
77 changed files with 1491 additions and 233 deletions

6
.gitmodules vendored Normal file
View File

@@ -0,0 +1,6 @@
[submodule "infrastructure/external-dns/octodns"]
path = infrastructure/external-dns/octodns
url = ssh://git@git.kluster.moll.re:2222/remoll/dns.git
[submodule "apps/monitoring/dashboards"]
path = apps/monitoring/dashboards
url = ssh://git@git.kluster.moll.re:2222/remoll/grafana-dashboards.git

View File

@@ -10,7 +10,7 @@ resources:
images:
- name: adguard/adguardhome
newName: adguard/adguardhome
newTag: v0.107.52
newTag: v0.107.48
namespace: adguard

View File

@@ -13,4 +13,4 @@ namespace: files
images:
- name: ocis
newName: owncloud/ocis
newTag: "5.0.6"
newTag: "5.0.3"

View File

@@ -13,4 +13,4 @@ resources:
images:
- name: actualbudget
newName: actualbudget/actual-server
newTag: 24.7.0
newTag: 24.5.0

View File

@@ -15,4 +15,4 @@ resources:
images:
- name: homeassistant/home-assistant
newName: homeassistant/home-assistant
newTag: "2024.7"
newTag: "2024.5"

View File

@@ -2,8 +2,6 @@ apiVersion: v1
kind: Service
metadata:
name: homeassistant-web
labels:
app: homeassistant
spec:
selector:
app: homeassistant

View File

@@ -0,0 +1,98 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: config
labels:
app.kubernetes.io/name: homepage
data:
kubernetes.yaml: "" #|
# mode: cluster
settings.yaml: |
title: "Homepage"
background: https://images.unsplash.com/photo-1547327132-5d20850c62b5?q=80&w=3870&auto=format&fit=crop
cardBlur: sm
#settings.yaml: |
# providers:
# longhorn:
# url: https://longhorn.my.network
custom.css: ""
custom.js: ""
bookmarks.yaml: |
- Developer:
- Github:
- abbr: GH
href: https://github.com/moll-re
services.yaml: |
- Media:
- Jellyfin backend:
href: https://media-backend.kluster.moll.re
ping: media-backend.kluster.moll.re
- Jellyfin vue:
href: https://media.kluster.moll.re
ping: media.kluster.moll.re
- Immich:
href: https://immich.kluster.moll.re
ping: immich.kluster.moll.re
- Productivity:
- OwnCloud:
href: https://ocis.kluster.moll.re
ping: ocis.kluster.moll.re
- ToDo:
href: https://todos.kluster.moll.re
ping: todos.kluster.moll.re
- Finance:
href: https://finance.kluster.moll.re
ping: finance.kluster.moll.re
- Home:
- Home Assistant:
href: https://home.kluster.moll.re
ping: home.kluster.moll.re
- Grafana:
href: https://grafana.kluster.moll.re
ping: grafana.kluster.moll.re
- Recipes:
href: https://recipes.kluster.moll.re
ping: recipes.kluster.moll.re
- Infra:
- Gitea:
href: https://git.kluster.moll.re
ping: git.kluster.moll.re
- ArgoCD:
href: https://argocd.kluster.moll.re
ping: argocd.kluster.moll.re
widgets.yaml: |
# - kubernetes:
# cluster:
# show: true
# cpu: true
# memory: true
# showLabel: true
# label: "cluster"
# nodes:
# show: true
# cpu: true
# memory: true
# showLabel: true
- search:
provider: duckduckgo
- openmeteo:
label: Zürich # optional
latitude: 47.24236
longitude: 8.30439
units: metric # or imperial
cache: 30 # Time in minutes to cache API responses, to stay within limits
format: # optional, Intl.NumberFormat options
maximumFractionDigits: 1
- datetime:
locale: de
format:
dateStyle: long
timeStyle: short
- adguard:
url: http://adguard-home-web.adguard-home:3000
docker.yaml: ""

View File

@@ -0,0 +1,64 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: homepage
labels:
app.kubernetes.io/name: homepage
spec:
revisionHistoryLimit: 3
replicas: 1
strategy:
type: RollingUpdate
selector:
matchLabels:
app.kubernetes.io/name: homepage
template:
metadata:
labels:
app.kubernetes.io/name: homepage
spec:
# serviceAccountName: homepage
# automountServiceAccountToken: true
dnsPolicy: ClusterFirst
# enableServiceLinks: true
containers:
- name: homepage
image: homepage
imagePullPolicy: Always
ports:
- name: http
containerPort: 3000
protocol: TCP
volumeMounts:
- mountPath: /app/config/custom.js
name: config
subPath: custom.js
- mountPath: /app/config/custom.css
name: config
subPath: custom.css
- mountPath: /app/config/bookmarks.yaml
name: config
subPath: bookmarks.yaml
- mountPath: /app/config/docker.yaml
name: config
subPath: docker.yaml
- mountPath: /app/config/kubernetes.yaml
name: config
subPath: kubernetes.yaml
- mountPath: /app/config/services.yaml
name: config
subPath: services.yaml
- mountPath: /app/config/settings.yaml
name: config
subPath: settings.yaml
- mountPath: /app/config/widgets.yaml
name: config
subPath: widgets.yaml
- mountPath: /app/config/logs
name: logs
volumes:
- name: config
configMap:
name: config
- name: logs
emptyDir: {}

View File

@@ -0,0 +1,16 @@
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: homepage-ingressroute
spec:
entryPoints:
- websecure
routes:
- match: Host(`start.kluster.moll.re`)
kind: Rule
services:
- name: homepage-web
port: 3000
tls:
certResolver: default-tls

View File

@@ -0,0 +1,17 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: homepage
resources:
- namespace.yaml
- deployment.yaml
- service.yaml
- configmap.yaml
- ingress.yaml
images:
- name: homepage
newName: ghcr.io/gethomepage/homepage
newTag: v0.8.13

View File

@@ -0,0 +1,4 @@
apiVersion: v1
kind: Namespace
metadata:
name: placeholder

View File

@@ -0,0 +1,15 @@
apiVersion: v1
kind: Service
metadata:
name: homepage-web
labels:
app.kubernetes.io/name: homepage
spec:
type: ClusterIP
ports:
- port: 3000
targetPort: http
protocol: TCP
name: http
selector:
app.kubernetes.io/name: homepage

View File

@@ -1,33 +1,24 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- namespace.yaml
- ingress.yaml
- pvc.yaml
- postgres.yaml
- postgres.sealedsecret.yaml
- namespace.yaml
- ingress.yaml
- pvc.yaml
- postgres.yaml
- postgres.sealedsecret.yaml
namespace: immich
helmCharts:
- name: immich
releaseName: immich
version: 0.7.1
version: 0.6.0
valuesFile: values.yaml
repo: https://immich-app.github.io/immich-charts
images:
- name: ghcr.io/immich-app/immich-machine-learning
newTag: v1.110.0
newTag: v1.103.1
- name: ghcr.io/immich-app/immich-server
newTag: v1.110.0
patches:
- path: patch-redis-pvc.yaml
target:
kind: StatefulSet
name: immich-redis-master
newTag: v1.103.1

View File

@@ -1,17 +0,0 @@
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: immich-redis-master
spec:
volumeClaimTemplates:
- apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: redis-data
spec:
storageClassName: nfs-client
accessModes:
- ReadWriteMany
resources:
requests:
storage: 2Gi

View File

@@ -16,16 +16,14 @@ spec:
shared_preload_libraries:
- "vectors.so"
# Persistent storage configuration
storage:
size: 1Gi
pvcTemplate:
accessModes:
- ReadWriteOnce
storageClassName: ""
resources:
requests:
storage: 1Gi
storageClassName: nfs-client
volumeMode: Filesystem
storage: "1Gi"
volumeName: immich-postgres
monitoring:
enablePodMonitor: true

View File

@@ -1,11 +1,40 @@
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: immich-nfs
spec:
capacity:
storage: "50Gi"
accessModes:
- ReadWriteOnce
nfs:
path: /kluster/immich
server: 192.168.1.157
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: data
name: immich-nfs
spec:
storageClassName: "nfs-client"
storageClassName: ""
accessModes:
- ReadWriteOnce
resources:
requests:
storage: "100Gi"
storage: "50Gi"
volumeName: immich-nfs
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: immich-postgres
spec:
capacity:
storage: "1Gi"
accessModes:
- ReadWriteOnce
nfs:
path: /kluster/immich-postgres
server: 192.168.1.157
# later used by cnpg

View File

@@ -22,19 +22,16 @@ env:
secretKeyRef:
name: postgres-password
key: password
IMMICH_WEB_URL: '{{ printf "http://%s-web:3000" .Release.Name }}'
IMMICH_MACHINE_LEARNING_URL: '{{ printf "http://%s-machine-learning:3003" .Release.Name }}'
IMMICH_METRICS: true
immich:
metrics:
# Enabling this will create the service monitors needed to monitor immich with the prometheus operator
enabled: true
persistence:
# Main data store for all photos shared between different components.
library:
# Automatically creating the library volume is not supported by this chart
# You have to specify an existing PVC to use
existingClaim: data
existingClaim: immich-nfs
# Dependencies
@@ -55,6 +52,16 @@ server:
main:
enabled: false
microservices:
enabled: true
persistence:
geodata-cache:
enabled: true
size: 1Gi
# Optional: Set this to pvc to avoid downloading the geodata every start.
type: emptyDir
accessMode: ReadWriteMany
machine-learning:
enabled: true
persistence:

View File

@@ -0,0 +1,47 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: jackett
spec:
selector:
matchLabels:
app: jackett
template:
metadata:
labels:
app: jackett
spec:
containers:
- name: jackett
image: jackett
resources:
limits:
memory: "128Mi"
cpu: "500m"
ports:
- containerPort: 9117
volumeMounts:
- name: media
mountPath: /media
- name: config
mountPath: /config
volumes:
- name: media
persistentVolumeClaim:
claimName: media-downloads
- name: config
persistentVolumeClaim:
claimName: transmission-config
---
apiVersion: v1
kind: Service
metadata:
name: jackett
spec:
selector:
app: jackett
ports:
- protocol: TCP
port: 9117
targetPort: 9117
type: ClusterIP

View File

@@ -0,0 +1,50 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: media-downloads
resources:
- namespace.yaml
- pvc.yaml
- transmission.deployment.yaml
- radarr.deployment.yaml
- jackett.deployment.yaml
images:
- name: transmission
newName: haugene/transmission-openvpn
newTag: 5.3.1
- name: jackett
newName: lscr.io/linuxserver/jackett
newTag: latest
- name: radarr
newName: lscr.io/linuxserver/radarr
newTag: 5.4.6
---
# 2nd version
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: media-downloads
resources:
- namespace.yaml
- pvc.yaml
- qbittorrent.deployment.yaml
- qbittorrent.service.yaml
- qbittorrent.configmap.yaml
- radarr.deployment.yaml
- radarr.service.yaml
- radarr.configmap.yaml
- openvpn.secret.yaml
images:
- name: qbittorrent
newName: binhex/arch-qbittorrentvpn
newTag: 5.0.1-1-02
- name: radarr
newName: hotio/radarr
newTag: release-5.14.0.9383

View File

@@ -0,0 +1,6 @@
apiVersion: v1
kind: Namespace
metadata:
name: placeholder
labels:
pod-security.kubernetes.io/enforce: privileged

View File

@@ -0,0 +1,35 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: radarr-config
spec:
storageClassName: "nfs-client"
accessModes:
- ReadWriteOnce
resources:
requests:
storage: "1Gi"
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: qbittorrent-config
spec:
storageClassName: "nfs-client"
accessModes:
- ReadWriteOnce
resources:
requests:
storage: "1Gi"
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: data
spec:
storageClassName: "nfs-client"
accessModes:
- ReadWriteOnce
resources:
requests:
storage: "10Gi"

View File

@@ -0,0 +1,15 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: qbittorrent
labels:
app: qbittorrent
data:
VPN_ENABLED: yes
VPN_USER: vpnbook
VPN_PASS: e83zu76
VPN_PROV: custom
VPN_CLIENT: openvpn
LAN_NETWORK: 10.244.0.0/24,10.9.0.0/24
WEBUI_PORT: "8080"
ENABLE_STARTUP_SCRIPTS: no

View File

@@ -0,0 +1,40 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: qbittorrent
spec:
selector:
matchLabels:
app: qbittorrent
replicas: 1
template:
metadata:
labels:
app: qbittorrent
spec:
containers:
- name: qbittorrent
image: qbittorrent
ports:
- containerPort: 8080
envFrom:
- configMapRef:
name: qbittorrent
volumeMounts:
- name: data
mountPath: /data
- name: config
mountPath: /config
securityContext:
capabilities:
add:
- NET_ADMIN
volumes:
- name: data
persistentVolumeClaim:
claimName: data
- name: config
persistentVolumeClaim:
claimName: qbittorrent-config

View File

@@ -0,0 +1,12 @@
kind: Service
apiVersion: v1
metadata:
name: qbittorrent
spec:
selector:
app: qbittorrent
type: ClusterIP
ports:
- name: qbittorrent
port: 8080
targetPort: 8080

View File

@@ -0,0 +1,20 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: radarr
labels:
app: radarr
data:
# VPN_ENABLED: "true"
# VPN_CONF: "wg0"
# VPN_PROVIDER: "generic"
# VPN_LAN_NETWORK: "192.168.1.0/24"
# VPN_LAN_LEAK_ENABLED: "false"
# VPN_EXPOSE_PORTS_ON_LAN: ""
# VPN_AUTO_PORT_FORWARD: "false"
# VPN_AUTO_PORT_FORWARD_TO_PORTS: ""
# VPN_KEEP_LOCAL_DNS: "false"
# VPN_FIREWALL_TYPE: "auto"
# VPN_HEALTHCHECK_ENABLED: "false"
# PRIVOXY_ENABLED: "false"
# UNBOUND_ENABLED: "false"

View File

@@ -0,0 +1,34 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: radarr
spec:
selector:
matchLabels:
app: radarr
replicas: 1
template:
metadata:
labels:
app: radarr
spec:
containers:
- name: radarr
image: radarr
ports:
- containerPort: 7878
envFrom:
- configMapRef:
name: radarr
volumeMounts:
- name: data
mountPath: /data
- name: config
mountPath: /config
volumes:
- name: data
persistentVolumeClaim:
claimName: data
- name: config
persistentVolumeClaim:
claimName: radarr-config

View File

@@ -0,0 +1,12 @@
kind: Service
apiVersion: v1
metadata:
name: radarr
spec:
selector:
app: radarr
type: ClusterIP
ports:
- name: radarr
port: 7878
targetPort: 7878

View File

@@ -0,0 +1,81 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: transmission
spec:
selector:
matchLabels:
app: transmission
template:
metadata:
labels:
app: transmission
spec:
containers:
- name: transmission
image: transmission
resources:
limits:
memory: "128Mi"
cpu: "500m"
ports:
- containerPort: 9091
env:
- name: OPENVPN_PROVIDER
value: PROTONVPN
- name: LOCAL_NETWORK
value: 10.42.0.0/16
- name: OPENVPN_CONFIG
valueFrom:
secretKeyRef:
name: protonvpn
key: country
- name: OPENVPN_USERNAME
valueFrom:
secretKeyRef:
name: protonvpn
key: username
- name: OPENVPN_PASSWORD
valueFrom:
secretKeyRef:
name: protonvpn
key: password
volumeMounts:
- name: media
mountPath: /data
- name: config
mountPath: /config
securityContext:
capabilities:
add: ["NET_ADMIN"]
volumes:
- name: media
persistentVolumeClaim:
claimName: media-downloads
- name: config
persistentVolumeClaim:
claimName: transmission-config
---
apiVersion: v1
kind: Service
metadata:
name: transmission
spec:
selector:
app: transmission
ports:
- protocol: TCP
port: 9091
targetPort: 9091
type: ClusterIP
---
apiVersion: v1
kind: Secret
metadata:
name: protonvpn
type: Opaque
stringData:
country: at.protonvpn.udp,fr.protonvpn.udp,pl.protonvpn.udp,ch.protonvpn.udp
username: VOYkNuZs5PHjeB8w
password: WvKCOPijcXKOqcL5d7zjXzOPToS4zPid

View File

@@ -1,5 +1,24 @@
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: jellyfin-vue-ingress
namespace: media
spec:
entryPoints:
- websecure
routes:
- match: Host(`media.kluster.moll.re`)
middlewares:
- name: jellyfin-websocket
kind: Rule
services:
- name: jellyfin-web
port: 80
tls:
certResolver: default-tls
---
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: jellyfin-backend-ingress
namespace: media
@@ -7,7 +26,7 @@ spec:
entryPoints:
- websecure
routes:
- match: Host(`media.kluster.moll.re`) && !Path(`/metrics`)
- match: Host(`media-backend.kluster.moll.re`) && !Path(`/metrics`)
middlewares:
- name: jellyfin-websocket
- name: jellyfin-server-headers

View File

@@ -0,0 +1,17 @@
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: jellyfin
labels:
metrics: prometheus
spec:
selector:
matchLabels:
app: jellyfin-server-service
endpoints:
- path: /metrics
targetPort: jellyfin
# this exposes metrics on port 8096 as enabled in the jellyfin config
# https://jellyfin.org/docs/general/networking/monitoring/
# the metrics are available at /metrics but blocked by the ingress

View File

@@ -5,11 +5,16 @@ namespace: media
resources:
- namespace.yaml
- pvc.yaml
- deployment.yaml
- service.yaml
- server.deployment.yaml
- server.service.yaml
- web.deployment.yaml
- web.service.yaml
- ingress.yaml
images:
- name: jellyfin/jellyfin
newName: jellyfin/jellyfin
newTag: 10.9.9
newTag: 10.9.0
- name: ghcr.io/jellyfin/jellyfin-vue
newName: ghcr.io/jellyfin/jellyfin-vue
newTag: stable-rc.0.3.1

View File

@@ -18,9 +18,6 @@ spec:
limits:
memory: "2Gi"
cpu: "2"
requests:
memory: "128Mi"
cpu: "250m"
ports:
- containerPort: 8096
name: jellyfin

View File

@@ -0,0 +1,27 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: jellyfin-web
spec:
selector:
matchLabels:
app: jellyfin-web
template:
metadata:
labels:
app: jellyfin-web
spec:
containers:
- name: jellyfin-web
image: ghcr.io/jellyfin/jellyfin-vue
resources:
limits:
memory: "128Mi"
cpu: "30m"
ports:
- containerPort: 80
env:
- name: TZ
value: Europe/Berlin
- name: DEFAULT_SERVERS
value: "https://media-backend.kluster.moll.re"

View File

@@ -0,0 +1,12 @@
apiVersion: v1
kind: Service
metadata:
name: jellyfin-web
spec:
selector:
app: jellyfin-web
ports:
- protocol: TCP
port: 80
targetPort: 80

View File

@@ -1,16 +0,0 @@
---
apiVersion: bitnami.com/v1alpha1
kind: SealedSecret
metadata:
creationTimestamp: null
name: curseforge-api
namespace: minecraft
spec:
encryptedData:
key: AgBYeAiejdmxDBorvgnxQX5YvUhR3NId2vfWybMKlc27e6D/bKglLNyZMk70xSnFAPjcDmZ20mYjFPYvDOr9T6IU/REJ8QlzoKAn0xW779R4SkIxRToT+dJv+OM2avgQ9uqp7vja29xeXMjYAnQML+QGZKcrT8mE04G/Ty8rdUiv3yUXK5HFAR3SUF35aVLdlthLjpRkv1s0R7GAP4L2pNzBJNV3i37viceUSSjU0zpOa23fsQOkPAs67AIukAJBqh/hyF/hR9H1GeYZNTI3OcHcvC2iNk/XGstvv0Zy6ApzoebsfWGdsbVn+QUI0EBw+mSTPqpl71cbkz0v4S4XAVndosxWpe6AIgm5MBTU0FXIyGyoFDe1aMPq8BXiQikYVwB48oVNh9KF0xXX5AOG0whB/FEsL3OJsiNQvQ3R/Hru43JBn64oxjVtLfM3E7u8v/xr1VQahX8dylDmb4s5EV01U6O4y19Ou4td1eEMlhpJb0fBPDRUYuWxZAEDGmp+U4tAakyPed11VkcZPPn9fKAAcv8sGs3TYAbbF18hqsBnv2Wd+i7ZEvKwmdmfR/T0r1TJGsvKI7jaW0QtH256XrSxQp7a52qMKMVQWOSKw2k27t/IkRhxT2Prw4GfJvaVr4RozUaBf3LV/hfDWlDfmM2zg3X9W8HkzjotGg021OLxsa0Wzmhffvb8h4bvZwxeq3U1xaJocqXui7z0rT2pF4z3wYHR/lPtexHcOA2M8gfBGKb1rBKh+kW+N+/ZfVLNI0mokg5vrTO2nR2rb4c=
template:
metadata:
creationTimestamp: null
name: curseforge-api
namespace: minecraft
type: Opaque

View File

@@ -1,41 +1,43 @@
apiVersion: batch/v1
kind: Job
apiVersion: apps/v1
kind: Deployment
metadata:
name: start-server
name: minecraft-server
spec:
selector:
matchLabels:
app: minecraft-server
template:
metadata:
labels:
app: minecraft-server
spec:
restartPolicy: OnFailure
containers:
- name: minecraft-server
image: minecraft
resources:
limits:
memory: "10000Mi"
cpu: "5"
memory: "4000Mi"
cpu: "2500m"
requests:
memory: "1500Mi"
memory: "1000Mi"
cpu: "500m"
ports:
- containerPort: 25565
env:
- name: EULA
value: "TRUE"
- name: TYPE
value: "AUTO_CURSEFORGE"
- name: CF_API_KEY
valueFrom:
secretKeyRef:
name: curseforge-api
key: key
- name: CF_PAGE_URL
value: "https://www.curseforge.com/minecraft/modpacks/vault-hunters-1-18-2/files/5413446"
- name: MODPACK
value: "https://www.curseforge.com/api/v1/mods/711537/files/5076228/download"
- name: VERSION
value: "1.18.2"
# - name: VERSION
# value: "1.16.5"
# - name: MODPACK
# value: "https://mediafilez.forgecdn.net/files/3602/5/VaultHunters-OfficialModpack-1.12.1-Server.zip"
- name: INIT_MEMORY
value: "1G"
- name: MAX_MEMORY
value: "8G"
value: "3G"
- name: MOTD
value: "VaultHunters baby!"
- name: ENABLE_RCON
@@ -44,9 +46,6 @@ spec:
value: "true"
- name: ONLINE_MODE
value: "true"
- name: ENABLE_AUTOSTOP
value: "true"
volumeMounts:
- name: minecraft-data
mountPath: /data

View File

@@ -6,10 +6,8 @@ namespace: minecraft
resources:
- namespace.yaml
- pvc.yaml
- job.yaml
- deployment.yaml
- service.yaml
- curseforge.sealedsecret.yaml
images:
- name: minecraft

View File

@@ -0,0 +1,25 @@
apiVersion: v1
kind: PersistentVolume
metadata:
name: grafana-nfs
spec:
capacity:
storage: "1Gi"
accessModes:
- ReadWriteOnce
nfs:
path: /export/kluster/grafana
server: 192.168.1.157
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: grafana-nfs
spec:
storageClassName: ""
accessModes:
- ReadWriteOnce
resources:
requests:
storage: "1Gi"
volumeName: grafana-nfs

View File

@@ -31,7 +31,7 @@ datasources:
datasources:
- name: Thanos
type: prometheus
url: http://thanos-querier.prometheus.svc:10902
url: http://thanos-querier.prometheus.svc:9090
isDefault: true
- name: Prometheus
type: prometheus

View File

@@ -0,0 +1,25 @@
apiVersion: v1
kind: PersistentVolume
metadata:
name: influxdb-nfs
spec:
capacity:
storage: "10Gi"
accessModes:
- ReadWriteOnce
nfs:
path: /export/kluster/influxdb
server: 192.168.1.157
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: influxdb-nfs
spec:
storageClassName: ""
accessModes:
- ReadWriteOnce
resources:
requests:
storage: "10Gi"
volumeName: influxdb-nfs

View File

@@ -0,0 +1,26 @@
## Create default user through docker entrypoint
## Defaults indicated below
##
adminUser:
organization: "influxdata"
bucket: "default"
user: "admin"
retention_policy: "0s"
## Leave empty to generate a random password and token.
## Or fill any of these values to use fixed values.
password: ""
token: ""
## Persist data to a persistent volume
##
persistence:
enabled: true
## If true will use an existing PVC instead of creating one
useExisting: true
## Name of existing PVC to be used in the influx deployment
name: influxdb-nfs
ingress:
enabled: false

View File

@@ -5,16 +5,16 @@ namespace: monitoring
resources:
- namespace.yaml
- grafana.pvc.yaml
# - influxdb.pvc.yaml
- grafana.ingress.yaml
- grafana-admin.sealedsecret.yaml
# grafana dashboards are provisioned from a git repository
# in the initial bootstrap of the app of apps, the git repo won't be available, so this sync will initially fail
- https://git.kluster.moll.re/remoll/grafana-dashboards//?timeout=10&ref=main
- dashboards/
helmCharts:
- releaseName: grafana
name: grafana
repo: https://grafana.github.io/helm-charts
version: 8.4.0
version: 7.3.9
valuesFile: grafana.values.yaml

View File

@@ -0,0 +1,52 @@
env:
- name: HOSTNAME
value: "telegraf-speedtest"
service:
enabled: false
rbac:
# Specifies whether RBAC resources should be created
create: false
serviceAccount:
# Specifies whether a ServiceAccount should be created
create: false
## Exposed telegraf configuration
## For full list of possible values see `/docs/all-config-values.yaml` and `/docs/all-config-values.toml`
## ref: https://docs.influxdata.com/telegraf/v1.1/administration/configuration/
config:
agent:
interval: "2h"
round_interval: true
metric_batch_size: 1000
metric_buffer_limit: 10000
collection_jitter: "0s"
flush_interval: "10s"
flush_jitter: "0s"
precision: ""
debug: false
quiet: false
logfile: ""
hostname: "$HOSTNAME"
omit_hostname: false
processors:
- enum:
mapping:
field: "status"
dest: "status_code"
value_mappings:
healthy: 1
problem: 2
critical: 3
outputs:
- influxdb_v2:
urls:
- "http://influxdb-influxdb2.monitoring:80"
token: We64mk4L4bqYCL77x3fAUSYfOse9Kktyf2eBLyrryG9c3-y8PQFiKPIh9EvSWuq78QSQz6hUcsm7XSFR2Zj1MA==
organization: "influxdata"
bucket: "homeassistant"
inputs:
- internet_speed:
enable_file_download: false

View File

@@ -13,4 +13,4 @@ resources:
images:
- name: binwiederhier/ntfy
newName: binwiederhier/ntfy
newTag: v2.11.0
newTag: v2.10.0

View File

@@ -12,5 +12,5 @@ resources:
images:
- name: mealie
newTag: v1.11.0
newTag: v1.6.0
newName: ghcr.io/mealie-recipes/mealie

View File

@@ -11,8 +11,8 @@ resources:
images:
- name: octodns
newName: octodns/octodns # has all plugins
newTag: "2024.06"
newTag: "2024.05"
- name: git
newName: alpine/git
newTag: "v2.45.2"
newTag: "2.43.0"

View File

@@ -8,4 +8,4 @@ resources:
- namespace.yaml
- omv-s3.ingress.yaml
- openmediavault.ingress.yaml
- proxmox.ingress.yaml
- proxmox.ingress.yaml

View File

@@ -1,31 +0,0 @@
# Using gitea actions
The actions deployment allows to use gitea actions from repositories within this instance.
### Building docker images
Docker builds use the kubernetes runner to build the images. For this to work, the pipeline needs to be able to access the kube-api. A service-account is created for this purpose.
To use the correct docker builder use the following action
```yaml
...
- name: Create Kubeconfig
run: |
mkdir $HOME/.kube
echo "${{ secrets.BUILDX_KUBECONFIG }}" > $HOME/.kube/config
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
with:
driver: kubernetes
driver-opts: |
namespace=act-runner
qemu.install=true
...
- name: Build and push
uses: docker/build-push-action@v5
with:
context: .
<other config>
```

View File

@@ -1,23 +1,25 @@
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: act-runner
name: act-runner
name: actions-runner
spec:
replicas: 1
selector:
matchLabels:
app: act-runner
app: actions-runner
template:
metadata:
labels:
app: act-runner
app: actions-runner
spec:
restartPolicy: Always
hostname: kube-runner
serviceAccountName: actions-runner
containers:
- name: runner
image: vegardit/gitea-act-runner:dind-latest
- name: actions-runner
image: actions-runner
resources:
requests:
memory: "128Mi"
cpu: "500m"
env:
- name: GITEA_INSTANCE_URL
value: "https://git.kluster.moll.re"
@@ -26,35 +28,12 @@ spec:
secretKeyRef:
name: actions-runner-secret
key: runner-token
- name: ACTIONS_RUNNER_POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: GITEA_RUNNER_UID
value: '1000'
- name: GITEA_RUNNER_GID
value: '1000'
- name: GITEA_RUNNER_JOB_CONTAINER_PRIVILEGED
value: 'true'
securityContext:
privileged: true
- name: GITEA_RUNNER_LABELS
value: k8s
volumeMounts:
- name: runner-data
mountPath: /data
volumes:
- name: runner-data
persistentVolumeClaim:
claimName: runner-data
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: runner-data
spec:
resources:
requests:
storage: 5Gi
storageClassName: "nfs-client"
volumeMode: Filesystem
accessModes:
- ReadWriteMany
emptyDir: {}

View File

@@ -0,0 +1,38 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: actions-runner
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: actions-role
rules:
- apiGroups: [""]
resources: ["pods"]
verbs: ["get", "list", "create", "delete"]
- apiGroups: [""]
resources: ["pods/exec"]
verbs: ["get", "create"]
- apiGroups: [""]
resources: ["pods/log"]
verbs: ["get", "list", "watch",]
- apiGroups: ["batch"]
resources: ["jobs"]
verbs: ["get", "list", "create", "delete"]
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get", "list", "create", "delete"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: actions-role-binding
subjects:
- kind: ServiceAccount
name: actions-runner
apiGroup: ""
roleRef:
kind: Role
name: actions-role
apiGroup: rbac.authorization.k8s.io

View File

@@ -7,7 +7,7 @@ metadata:
namespace: gitea
spec:
encryptedData:
runner-token: AgCexZDTtbYOdG3XgvmOh9CwxzTT+dhPVCPYv/arp8cM9R45PFIfwDvFCThnTHQYZJIYpsaUvoxdXSYYdhLyBVhmuAdD0NeH47q7qRh4U6WYUF6RMqUV3Dpri00nrROx2MN8Q+uGN+wq2xloSbkDzLiS+0sD8m+ItIKhEjIMcn9PyA2OheUmbCVfyOVzCEEaOt98nweTlOXgQLJBLPhxUJFCMePfGHudAlu2lZO3dH+T8G9cC86akZnAODuI69iScuIVOtGJvj1EhPNg9d7QjmjWZOKlk0ryPdwEoR/+kD069Jp4STX7IsVqKxZcvnY8yUICbmvj2TzJWzUDMit3leBopO+8+ECSng4uANGwp3QyankmyWD8SXN3fTuASoeoWa5mYN/qCih9m5ih7FxsDNqAFumUzX1QtAN3LDmzgcomdC7D6FHc1PIaOjoEF96TaDgBc8ODMRBWDtjCzD5sTLQ3zGLRieFsOxkdb0d9E+E4pestmnbdtwNTpfgtyT0pr2847FyAmS0DhA6bb4i+JohNswo+83koJQWf1UNyKhQ14kPhPiBRp1cnYieswFjSuvBbWAjB+SYrFIns97qJon2UjwcnlJ6/KewKj5KTOWnTel1Sgxgn4y7qXFnLvReIEKc6SCz+aFFEa4qAqJVmS1STMEwcqkiM7gA+he/8mdEB0BmfwdnGK0pA1I+RD4hOhDqIQlGyTtWGQINqFulbNUS2PcLmf5nX/ERRRPLlwmZNwR27pP3iQg29
runner-token: AgBHwek/Aj/0oOnI/bnZ4FgtRoeJw4tIKvcDzBhaPdQ7bMVHyHUKYUNP7lkPgZrIN+7rhMY7C/j13iGWx4iTdhTgipLiJvyZ70pXKLSix4IpcypJTElggWkW0JW79x1HyJfBtn9iJiHnEZXPi7sEnyKhA0asAOR0ae8NS6mxxei0TIImaPaC2RHL6MOi40xsXpHz2ZaVhDQaTSRWjv0U6+WkCGcueqM2HLYfF1gqqkzGCjjhdOTK1CKvIvApZ5n8x6x94IiywCXJraDCwLz+acF2c2vA/Jb/3p7TwyyRZ5uIF5LZufhTJ6+5sFJSReHYxO4CpPA8KvM880vtiEjN7LxVo/Jruj2459OvjviKZS03ZwLHHrjanom1+HA9Sx2ffRLiR5ayGkfj/6kvpIRt5x1F7BbPp+a0LXuxJX+1nGDyEa1D1WzVKvZASav6/v7cXcom/nKGO91Zb8qHlOv7ZTs5guGQ9G9VCOHOG8szwpW3ZmQwWfFoWsShzqbDqszBYOGeIjIiDllLzTZ8A9dv9J2ELngZ1IPGIkfpQNEW8hsbNXTYhdVIrkh7BIFkRWfYDNWxqZd4iE6XllQcT1rqndusgiNEJX2r+P4nT8dPewATXQ79wzvZU3kB+VHzM8cLymlVGADi7v/qTY9RcrhuE0oMLzHRShr6JU05VfLGbMsttrYKmW7smvBp3lRJitO5A8+r8cRniS1+Xr8mIx87vCvnoWSH6BKkl9pCdDeCGylAWfkJN9UpkaKg
template:
metadata:
creationTimestamp: null

View File

@@ -0,0 +1,84 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: drone-runner
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: drone-runner
rules:
- apiGroups:
- ""
resources:
- secrets
verbs:
- create
- delete
- apiGroups:
- ""
resources:
- pods
- pods/log
verbs:
- get
- create
- delete
- list
- watch
- update
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: drone-runner
subjects:
- kind: ServiceAccount
name: drone-runner
roleRef:
kind: Role
name: drone-runner
apiGroup: rbac.authorization.k8s.io
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: drone-runner
labels:
app.kubernetes.io/name: drone-runner
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: drone-runner
template:
metadata:
labels:
app.kubernetes.io/name: drone-runner
spec:
serviceAccountName: drone-runner
containers:
- name: runner
image: drone/drone-runner-kube:latest
ports:
- containerPort: 3000
env:
- name: DRONE_RPC_HOST
value: drone-server:80
- name: DRONE_RPC_PROTO
value: http
- name: DRONE_RPC_SECRET
valueFrom:
secretKeyRef:
name: drone-server-secret
key: rpc_secret
- name: DRONE_NAMESPACE_DEFAULT
value: gitea
# - name: DRONE_NAMESPACE_RULES
# value: "drone-runner:*"
- name: DRONE_SERVICE_ACCOUNT_DEFAULT
value: drone-runner

View File

@@ -0,0 +1,117 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: drone-server
labels:
app: drone-server
spec:
replicas: 1
selector:
matchLabels:
app: drone-server
template:
metadata:
labels:
app: drone-server
spec:
containers:
- name: drone
image: drone/drone:latest
env:
- name: DRONE_SERVER_PORT # because the deployment is called drone-server, override this var again!
value: ":80"
- name: DRONE_GITEA_SERVER
value: https://git.kluster.moll.re
- name: DRONE_USER_CREATE
value: username:remoll,admin:true
- name: DRONE_GITEA_CLIENT_ID
valueFrom:
secretKeyRef:
name: drone-server-secret
key: client_id
- name: DRONE_GITEA_CLIENT_SECRET
valueFrom:
secretKeyRef:
name: drone-server-secret
key: client_secret
- name: DRONE_RPC_SECRET
valueFrom:
secretKeyRef:
name: drone-server-secret
key: rpc_secret
- name: DRONE_SERVER_HOST
value: drone.kluster.moll.re
- name: DRONE_SERVER_PROTO
value: https
resources:
requests:
memory: "1Gi"
cpu: 1.5
volumeMounts:
- mountPath: /data
name: drone-data-nfs
volumes:
- name: drone-data-nfs
persistentVolumeClaim:
claimName: drone-data-nfs
---
apiVersion: v1
kind: Service
metadata:
name: drone-server
labels:
app: drone-server
spec:
type: ClusterIP
ports:
- port: 80
name: http
selector:
app: drone-server
---
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: drone-server-ingress
spec:
entryPoints:
- websecure
routes:
- match: Host(`drone.kluster.moll.re`)
kind: Rule
services:
- name: drone-server
port: 80
tls:
certResolver: default-tls
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: drone-data-nfs
spec:
capacity:
storage: "1Gi"
accessModes:
- ReadWriteOnce
nfs:
path: /export/kluster/drone
server: 192.168.1.157
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: drone-data-nfs
spec:
storageClassName: ""
accessModes:
- ReadWriteOnce
resources:
requests:
storage: "1Gi"
volumeName: drone-data-nfs

View File

@@ -0,0 +1,23 @@
{
"kind": "SealedSecret",
"apiVersion": "bitnami.com/v1alpha1",
"metadata": {
"name": "drone-server-secret",
"namespace": "gitea",
"creationTimestamp": null
},
"spec": {
"template": {
"metadata": {
"name": "drone-server-secret",
"namespace": "gitea",
"creationTimestamp": null
}
},
"encryptedData": {
"client_id": "AgA53a7kGJ6zZcx2ooTvTNwxaW2FvfzHJnxg6co54+HXinTJKsc4+GJ1PtdIbsZ7Dgu/sLi/4X90fT+PT2sgEx9jIilmHPdJeRtwV1UID3Y46A7cJlfcAKwNOFzp2PWvBvizbNp7tbJwxeAYnVX8GfN6fi700QxBGqAI3u8qQvLpU6UGW2RM96gCXI7s1QhE1Le6TgoESy5HX95pB7csDRNSwVE02OWfDHKEjH8QD8UvBB9xct6uwDfu7KrsJiNJvWMP6arvpfhy/X+UtCTFmj5wmFYL7oc6vSiCkq+QyHgQTEHTmGpEjEGKcQxPQaus3KhbhcxQBYLMEMYRlLPH0AEAA4dzbSpoVXM3LuIe9FppgrTCknK1uRB8wyrHUeInWO8mG7UraV6m5PUS+UYODMvfjwY3PyiGhTSf6LgMlhMl8e+2rb+OsWphT8Pbeom33PucrYaRFr9RpQkJSwE6HU3JEh25YLfIJ7caqRND8C/p8kD679C8UMcNpBN8WS4Cswn5jzmwbeJNM5DGp9yQVZNx7Bv3dHzx9i3ShjJ6QQnR/zWJZ/dWLy6weGYmdZMMXRAO8CCdruvcX5YyeieXZfchSIlZ/GqqBHptdcLpwLiZsfmyTWeBvk5pMAsZaKJ1tfWpQ84s4epzMoieTfhTueGXmeRKX+DJBBcriU+5YoqNxpU1lPL+LoInorJSKN7c3ouFx78N3GDOCq7mlWI94lY0bIs5zhrfUN137ITCcED62AJ7vks=",
"client_secret": "AgDQXU7x6RLhE9Hc+goeR2+3rW316SLLLA8tfqx3tsykL+vxhRkY5UCEaak3Rgei0k14jB/Rmme+/O/D1/5tc/i885+sGn0yjU7Jo4L5nkIssUOHlmRSGkRJDb9ABPauFXAjap9KLix9bd8ewI7R0lS3tOK9ZhThYhcfDUqV9qkkbSHzwNptkH7gYWt9qzG/rqqqpFP+PCtjzKVve4LCBgaxetcnh1t+d5oh7VAFnSI9Bt1G/DRzi+K3YZ+YG5+XKevBp06GMiLUMiv/eUvmOfAB/KO79LnNVbOcRsAHfnqLbXgNjFzspr5xDiGMC/ma1245LavywqXDp0S9jjNEe48i51PPQMwHWV8XEovsM6LHcteluNogt+VkL4mOnmP+sba/V3NO51rt1WXl+ca+U4kBq4dLMsdpWUKemz9BlIRC4etEXjwKJ5DznT7u6GUTrXx2RCm1j0OYWM++P10SdyD6tGjKnZf88a33Wrwm8Y7c47JrPTlP4PqLq9gzvD310uVfs1vGYGULaToGy+D/th8qiWWlu7BIfwqlIj8lruVnOhQ4GeEZmUAsqYf8JfsBwuDc0Y+8qbwjFrr2z+5x+2XBL8KGZVopyme45SHijlBZs7YsJqTBsg5oW09grM8/oO731GtzSYmpat2VZlaILuTjALqo/cu//kxwmqh7UX+jnTJ/2N3bKKSAfHWbHDeHeS2XJ+eKaI4onNYW9J70EfAP3vOpU+zmQ8rOzJuJjRt0HarLwzc5CXb1Xhlgsaoj7zKXPQMnqIDngg==",
"rpc_secret": "AgAcJNCFtOhK28vnLredkTgsVpnMPwaXss5NT5ysc0IbVid2vWRk2CTjBZc5DzjxxLwI1Ok88MFXHP08ZGCYy4rIbwoi7Ei1OEevGWfaI4n5CvAxr4ZamQHSfIX9dVAm9BSSx2M/mDtCKqVEGJEzyHCedrxf6LXM/YTNgjD43BuCZZMu35mRsHItpYFZQSttlHiUvR8y2YKrhV2P7fiWRD3cCVao8ldzKfGuvRfal8ByGoxpsYLj2D9CdtPvRF/TQsWUJJWwzbI9DmbW1MMI4/b26Jfa5TBvHxS1MQxFJpSXuMIengO+b0bi7WaR36y/FrKSNxIrQDHI7XCb00yYaSfj3RkSBVoAD0a2p8vNupHCqsKBoaWd8tMv/wGP8wbBk4DgGeQiTIvfhbQZU/Q2/LVDDficjXVn3IuKP/cqgGVf6lUh5YsUSs8qwpMil7XySiHvaZn+iFAnsXoejd4S2e/pbRvyaxP1aa7TCxnINjpU7IrnUEUiI4glQmAte3MqZWLXcc0Uk3Qz9PP0cD+V8qCOryrPMP2kTAI8LT/K4DgcEMAEGes4Vx1l0oBMF0xJvhM2kZXcEcf0NzuQJvYTgZpQF5xp0TchezLshmEUSIkII9NvAvn+iEYJeHsJUDijjmBloSYe4+QTgdYh6FakVUwYI5U4ztDNrvgqhWjExfbn8HxaFzsNTsuzGoYs+jwXH8Wk2z1Q1oQjDdO5YTjmdqvkSTdin/5CiuCDHaQX6a4gNQ=="
}
}
}

View File

@@ -119,7 +119,7 @@ gitea:
TYPE: level
indexer:
ISSUE_INDEXER_TYPE: bleve
REPO_INDEXER_ENABLED: false
REPO_INDEXER_ENABLED: true

View File

@@ -5,18 +5,26 @@ resources:
- gitea.pvc.yaml
- gitea.ingress.yaml
- gitea.servicemonitor.yaml
- drone-kube-runner.deployment.yaml
- drone-server.deployment.yaml
- drone-server.sealedsecret.yaml
- actions.deployment.yaml
- actions.sealedsecret.yaml
# - actions.rbac.yaml
- actions.rbac.yaml
namespace: gitea
images:
- name: actions-runner
newName: ghcr.io/christopherhx/gitea-actions-runner
newTag: v0.0.11
helmCharts:
- name: gitea
namespace: gitea # needs to be set explicitly for svc to be referenced correctly
releaseName: gitea
version: 10.4.0
version: 10.1.4
valuesFile: gitea.values.yaml
repo: https://dl.gitea.io/charts/

View File

@@ -1,6 +1,4 @@
apiVersion: v1
kind: Namespace
metadata:
name: placeholder
labels:
pod-security.kubernetes.io/enforce: privileged
name: placeholder

View File

@@ -10,6 +10,6 @@ namespace: metallb-system
helmCharts:
- name: metallb
repo: https://metallb.github.io/metallb
version: 0.14.8
version: 0.14.5
releaseName: metallb
valuesFile: values.yaml

View File

@@ -1,6 +1,4 @@
apiVersion: v1
kind: Namespace
metadata:
name: placeholder
labels:
pod-security.kubernetes.io/enforce: privileged
name: placeholder

View File

@@ -9,6 +9,6 @@ namespace: pg-ha
helmCharts:
- name: cloudnative-pg
releaseName: pg-controller
version: 0.21.5
version: 0.21.0
valuesFile: values.yaml
repo: https://cloudnative-pg.io/charts/

View File

@@ -17,4 +17,4 @@ resources:
images:
- name: thanos
newName: quay.io/thanos/thanos
newTag: v0.35.1
newTag: v0.34.1

View File

@@ -4,7 +4,7 @@ metadata:
name: prometheus
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
knd: ClusterRole
metadata:
name: prometheus
rules:
@@ -52,17 +52,26 @@ spec:
requests:
memory: 400Mi
retention: 730d
retentionSize: 3GiB
retentionSize: 50Gi
serviceAccountName: prometheus
enableAdminAPI: false
serviceMonitorNamespaceSelector: {}
serviceMonitorSelector: {}
thanos:
version: v0.34.1
version: v0.33.0
objectStorageConfig:
# loads the config from a secret named thanos-objstore-config in the same namespace
key: thanos.yaml
name: thanos-objstore-config
volumeClaimTemplate:
metadata:
name: prometheus-data
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 50Gi
---
apiVersion: v1
kind: Service

View File

@@ -53,3 +53,15 @@ spec:
protocol: TCP
port: 10901
targetPort: grpc
metadata:
labels:
app: thanos-querier
name: thanos-querier
spec:
ports:
- port: 9090
protocol: TCP
targetPort: http
name: http
selector:
app: thanos-querier

View File

@@ -1,32 +1,33 @@
apiVersion: apps/v1
kind: Deployment
kind: StatefulSet
metadata:
name: thanos-store
name: thanos-store-gateway
labels:
app: thanos-store
app: thanos-store-gateway
spec:
replicas: 1
selector:
matchLabels:
app: thanos-store
app: thanos-store-gateway
serviceName: thanos-store-gateway
template:
metadata:
labels:
app: thanos-store
app: thanos-store-gateway
thanos-store-api: "true"
spec:
containers:
- name: thanos
image: thanos
args:
- store
- --log.level=debug
- --data-dir=/data
- --grpc-address=0.0.0.0:10901
- --http-address=0.0.0.0:10902
- --objstore.config-file=/etc/secret/thanos.yaml
- --index-cache-size=500MB
- --chunk-pool-size=500MB
- "store"
- "--log.level=debug"
- "--data-dir=/data"
- "--grpc-address=0.0.0.0:10901"
- "--http-address=0.0.0.0:10902"
- "--objstore.config-file=/etc/secret/thanos.yaml"
- "--index-cache-size=500MB"
- "--chunk-pool-size=500MB"
ports:
- name: http
containerPort: 10902
@@ -60,6 +61,7 @@ metadata:
app.kubernetes.io/name: thanos-store
name: thanos-store
spec:
clusterIP: None
ports:
- name: grpc
port: 10901
@@ -68,4 +70,4 @@ spec:
port: 10902
targetPort: 10902
selector:
app: thanos-store
app: thanos-store-gateway

View File

@@ -11,4 +11,4 @@ resources:
images:
- name: renovate/renovate
newName: renovate/renovate
newTag: "38"
newTag: "37"

View File

@@ -9,4 +9,4 @@ resources:
images:
- name: controller
newName: docker.io/bitnami/sealed-secrets-controller
newTag: 0.27.0
newTag: 0.26.2

View File

@@ -74,13 +74,11 @@ data:
address = ":9000"
[entryPoints.dnsovertls]
address = ":8853"
address = ":853"
# route dns over https to other pods but provide own certificate
[metrics]
[metrics.prometheus]
# metrics are enabled and scraping is ensured through a servicemonitor
entryPoint = "metrics"
addEntryPointsLabels = true
addServicesLabels = true

View File

@@ -13,6 +13,6 @@ namespace: traefik-system
helmCharts:
- name: traefik
releaseName: traefik
version: 30.0.2
version: 27.0.2
valuesFile: values.yaml
repo: https://traefik.github.io/charts

View File

@@ -2,5 +2,3 @@ apiVersion: v1
kind: Namespace
metadata:
name: placeholder
labels:
pod-security.kubernetes.io/enforce: privileged

View File

@@ -1,11 +1,25 @@
apiVersion: v1
kind: PersistentVolume
metadata:
name: traefik-certificate
spec:
capacity:
storage: "10Mi"
accessModes:
- ReadWriteOnce
nfs:
path: /export/kluster/traefik/certs
server: 192.168.1.157
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: certs
name: traefik-certificate
spec:
storageClassName: "nfs-client"
accessModes:
- ReadWriteOnce
resources:
requests:
storage: "50Mi"
storage: "10Mi"
volumeName: traefik-certificate
storageClassName: ""

View File

@@ -0,0 +1,151 @@
## Default values.yaml for Telegraf
## This is a YAML-formatted file.
## ref: https://hub.docker.com/r/library/telegraf/tags/
replicaCount: 1
image:
repo: "telegraf"
tag: "1.24"
pullPolicy: IfNotPresent
podAnnotations: {}
podLabels: {}
imagePullSecrets: []
## Configure args passed to Telegraf containers
args: []
# The name of a secret in the same kubernetes namespace which contains values to
# be added to the environment (must be manually created)
# This can be useful for auth tokens, etc.
# envFromSecret: "telegraf-tokens"
env:
- name: HOSTNAME
value: "telegraf-polling-service"
# An older "volumeMounts" key was previously added which will likely
# NOT WORK as you expect. Please use this newer configuration.
volumes:
- name: traefik-logs
persistentVolumeClaim:
claimName: traefik-logs
mountPoints:
- name: traefik-logs
mountPath: /traefik_logs
## Node labels for pod assignment
## ref: https://kubernetes.io/docs/user-guide/node-selection/
nodeSelector: {}
## Affinity for pod assignment
## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
##
affinity: # to read the traefik logs the pod must be on the same node as traefik
podAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions: # matches labels: app.kubernetes.io/name=traefik
- key: app.kubernetes.io/name
operator: In
values:
- traefik
topologyKey: "kubernetes.io/hostname"
## Tolerations for pod assignment
## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
##
tolerations: []
# - key: "key"
# operator: "Equal|Exists"
# value: "value"
# effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)"
service:
enabled: false
type: ClusterIP
annotations: {}
rbac:
# Specifies whether RBAC resources should be created
create: true
# Create only for the release namespace or cluster wide (Role vs ClusterRole)
clusterWide: false
# Rules for the created rule
rules: []
# When using the prometheus input to scrape all pods you need extra rules set to the ClusterRole to be
# able to scan the pods for scraping labels. The following rules have been taken from:
# https://github.com/helm/charts/blob/master/stable/prometheus/templates/server-clusterrole.yaml#L8-L46
# - apiGroups:
# - ""
# resources:
# - nodes
# - nodes/proxy
# - nodes/metrics
# - services
# - endpoints
# - pods
# - ingresses
# - configmaps
# verbs:
# - get
# - list
# - watch
# - apiGroups:
# - "extensions"
# resources:
# - ingresses/status
# - ingresses
# verbs:
# - get
# - list
# - watch
# - nonResourceURLs:
# - "/metrics"
# verbs:
# - get
serviceAccount:
# Specifies whether a ServiceAccount should be created
create: true
# The name of the ServiceAccount to use.
# If not set and create is true, a name is generated using the fullname template
name:
# Annotations for the ServiceAccount
annotations: {}
## Exposed telegraf configuration
## For full list of possible values see `/docs/all-config-values.yaml` and `/docs/all-config-values.toml`
## ref: https://docs.influxdata.com/telegraf/v1.1/administration/configuration/
config:
agent:
interval: "10s"
round_interval: true
metric_batch_size: 1000
metric_buffer_limit: 10000
collection_jitter: "0s"
flush_interval: "10s"
flush_jitter: "0s"
precision: ""
debug: false
quiet: false
logfile: ""
hostname: "$HOSTNAME"
omit_hostname: true
# processors:
# - enum:
# mapping:
# field: "status"
# dest: "status_code"-+
# value_mappings:
# healthy: 1
# problem: 2
# critical: 3
outputs:
- influxdb_v2:
urls:
- "http://influxdb-influxdb2.monitoring:80"
token: N_jNm1hZTfyhJneTJj2G357mQ7EJdNzdvebjSJX6JkbyaXNup_IAqeYowblMgV8EjLypNvauTl27ewJvI_rbqQ==
organization: "influxdata"
bucket: "kluster"
# retention_policy: "2w"
inputs:
- docker_log:
endpoint: "unix:///var/run/docker.sock"
from_beginning: false
container_name_include: ["traefik"]

View File

@@ -7,15 +7,60 @@ deployment:
kind: Deployment
# Number of pods of the deployment (only applies when kind == Deployment)
replicas: 1
# Number of old history to retain to allow rollback (If not set, default Kubernetes value is set to 10)
# revisionHistoryLimit: 1
# Amount of time (in seconds) before Kubernetes will send the SIGKILL signal if Traefik does not shut down
terminationGracePeriodSeconds: 60
# The minimum number of seconds Traefik needs to be up and running before the DaemonSet/Deployment controller considers it available
minReadySeconds: 0
# Additional deployment annotations (e.g. for jaeger-operator sidecar injection)
annotations: {}
# Additional deployment labels (e.g. for filtering deployment by custom labels)
labels: {}
# Additional pod annotations (e.g. for mesh injection or prometheus scraping)
podAnnotations: {}
# Additional Pod labels (e.g. for filtering Pod by custom labels)
podLabels: {}
# Additional containers (e.g. for metric offloading sidecars)
additionalContainers: []
# https://docs.datadoghq.com/developers/dogstatsd/unix_socket/?tab=host
# - name: socat-proxy
# image: alpine/socat:1.0.5
# args: ["-s", "-u", "udp-recv:8125", "unix-sendto:/socket/socket"]
# volumeMounts:
# - name: dsdsocket
# mountPath: /socket
# Additional volumes available for use with initContainers and additionalContainers
additionalVolumes:
- name: certs
# - name: traefik-logs
# persistentVolumeClaim:
# claimName: traefik-logs
- name: traefik-certificate
persistentVolumeClaim:
claimName: certs
claimName: traefik-certificate
- name: traefik-config
configMap:
name: traefik-config
# - name: dsdsocket
# hostPath:
# path: /var/run/statsd-exporter
# Additional initContainers (e.g. for setting file permission as shown below)
initContainers: []
# The "volume-permissions" init container is required if you run into permission issues.
# Related issue: https://github.com/traefik/traefik/issues/6972
# - name: volume-permissions
# image: busybox:1.31.1
# command: ["sh", "-c", "chmod -Rv 600 /data/*"]
# volumeMounts:
# - name: data
# mountPath: /data
# Use process namespace sharing
shareProcessNamespace: false
# Custom pod DNS policy. Apply if `hostNetwork: true`
# dnsPolicy: ClusterFirstWithHostNet
# Additional imagePullSecrets
imagePullSecrets: []
# - name: myRegistryKeySecretName
# Use ingressClass. Ignored if Traefik version < 2.3 / kubernetes < 1.18.x
@@ -33,7 +78,7 @@ pilot:
# Toggle Pilot Dashboard
# dashboard: false
# Enable experimental featureskdes+
# Enable experimental features
experimental:
http3:
enabled: false
@@ -54,6 +99,11 @@ experimental:
ingressRoute:
dashboard:
enabled: false
# Additional ingressRoute annotations (e.g. for kubernetes.io/ingress.class)
annotations: {}
# Additional ingressRoute labels (e.g. for filtering IngressRoute by custom labels)
labels: {}
#
@@ -64,26 +114,65 @@ providers:
enabled: true
allowCrossNamespace: false
allowExternalNameServices: true
allowEmptyServices: false
# ingressClass: traefik-internal
# labelSelector: environment=production,method=traefik
namespaces: []
# - "default"
kubernetesIngress:
enabled: true
allowExternalNameServices: true
allowEmptyServices: false
ingressClass: traefik
# labelSelector: environment=production,method=traefik
namespaces: []
# - "default"
# IP used for Kubernetes Ingress endpoints
publishedService:
enabled: false
# Published Kubernetes Service to copy status from. Format: namespace/servicename
# By default this Traefik service
# pathOverride: ""
# Add volumes to the traefik pod. The volume name will be passed to tpl.
# This can be used to mount a cert pair or a configmap that holds a config.toml file.
# After the volume has been mounted, add the configs into traefik by using the `additionalArguments` list below, eg:
# additionalArguments:
# - "--providers.file.filename=/config/dynamic.toml"
# - "--ping"
# - "--ping.entrypoint=web"
volumes: []
# - name: traefik-config
# mountPath: /config
# configMap:
# name: traefik-config
# Additional volumeMounts to add to the Traefik container
additionalVolumeMounts:
- name: certs
# - name: traefik-logs
# mountPath: /var/log/traefik
# nfs:
# server: 192.168.1.157
# path: /kluster/traefik
# # For instance when using a logshipper for access logs
# - name: traefik-logs
# # claimName: traefik-logs
# mountPath: /var/log/traefik
- name: traefik-certificate
# claimName: traefik-certificate
mountPath: /certs
- name: traefik-config
mountPath: /config
additionalArguments:
globalArguments:
- "--configfile=/config/traefik.toml"
additionalArguments: []
# Environment variables to be passed to Traefik's binary
env:
@@ -96,13 +185,18 @@ env:
ports:
# add a new one, the other ones are kept the same.
dnsovertls:
port: 8853
expose:
default: true
port: 853
expose: true
exposedPort: 853
protocol: TCP
envFrom: []
# - configMapRef:
# name: config-map-name
# - secretRef:
# name: secret-name
tlsOptions: {}
@@ -124,4 +218,3 @@ service:
spec:
# externalTrafficPolicy: Local
loadBalancerIP: 192.168.3.1

View File

@@ -1,18 +1,19 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: minecraft-application
name: homepage-application
namespace: argocd
spec:
project: apps
source:
repoURL: ssh://git@git.kluster.moll.re:2222/remoll/k3s-infra.git
targetRevision: main
path: apps/minecraft
path: apps/homepage
destination:
server: https://kubernetes.default.svc
namespace: minecraft
namespace: homepage
syncPolicy:
automated:
prune: true
selfHeal: false
selfHeal: true

View File

@@ -28,10 +28,10 @@ resources:
- files/
- finance/
- homeassistant/
- homepage/application.yaml
- immich/
- journal/
- media/
- minecraft/application.yaml
- monitoring/
- ntfy/
- recipes/