2 Commits

133 changed files with 2477 additions and 922 deletions

6
.gitmodules vendored Normal file
View File

@@ -0,0 +1,6 @@
[submodule "infrastructure/external-dns/octodns"]
path = infrastructure/external-dns/octodns
url = ssh://git@git.kluster.moll.re:2222/remoll/dns.git
[submodule "apps/monitoring/dashboards"]
path = apps/monitoring/dashboards
url = ssh://git@git.kluster.moll.re:2222/remoll/grafana-dashboards.git

View File

@@ -10,7 +10,7 @@ resources:
images:
- name: adguard/adguardhome
newName: adguard/adguardhome
newTag: v0.107.52
newTag: v0.107.45
namespace: adguard

View File

@@ -1,42 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: audiobookshelf
spec:
replicas: 1
selector:
matchLabels:
app: audiobookshelf
template:
metadata:
labels:
app: audiobookshelf
spec:
containers:
- name: audiobookshelf
image: audiobookshelf
ports:
- containerPort: 80
env:
- name: TZ
value: Europe/Berlin
- name: CONFIG_PATH
value: /data/config
- name: METADATA_PATH
value: /data/metadata
volumeMounts:
- name: data
mountPath: /data
resources:
requests:
cpu: "100m"
memory: "200Mi"
limits:
cpu: "2"
memory: "1Gi"
volumes:
- name: data
persistentVolumeClaim:
claimName: audiobookshelf-data

View File

@@ -1,15 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- namespace.yaml
- pvc.yaml
- deployment.yaml
- service.yaml
- ingress.yaml
namespace: audiobookshelf
images:
- name: audiobookshelf
newName: ghcr.io/advplyr/audiobookshelf
newTag: "2.13.4"

View File

@@ -1,11 +0,0 @@
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: audiobookshelf-data
spec:
storageClassName: "nfs-client"
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 10Gi

View File

@@ -1,10 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: audiobookshelf-web
spec:
selector:
app: audiobookshelf
ports:
- port: 80
targetPort: 80

View File

@@ -13,4 +13,4 @@ namespace: files
images:
- name: ocis
newName: owncloud/ocis
newTag: "5.0.7"
newTag: "5.0"

File diff suppressed because one or more lines are too long

View File

@@ -13,4 +13,4 @@ resources:
images:
- name: actualbudget
newName: actualbudget/actual-server
newTag: 24.9.0
newTag: 24.3.0

View File

@@ -21,7 +21,7 @@ spec:
- name: TZ
value: Europe/Berlin
volumeMounts:
- name: config-dir
- name: config
mountPath: /config
resources:
requests:
@@ -31,7 +31,6 @@ spec:
cpu: "2"
memory: "1Gi"
volumes:
- name: config-dir
- name: config
persistentVolumeClaim:
claimName: config

View File

@@ -6,7 +6,7 @@ spec:
entryPoints:
- websecure
routes:
- match: Host(`home.kluster.moll.re`) && !Path(`/api/prometheus`)
- match: Host(`home.kluster.moll.re`)
middlewares:
- name: homeassistant-websocket
kind: Rule
@@ -15,6 +15,7 @@ spec:
port: 8123
tls:
certResolver: default-tls
---
apiVersion: traefik.io/v1alpha1
kind: Middleware
@@ -26,3 +27,6 @@ spec:
X-Forwarded-Proto: "https"
# enable websockets
Upgrade: "websocket"

View File

@@ -9,10 +9,8 @@ resources:
- pvc.yaml
- service.yaml
- deployment.yaml
- servicemonitor.yaml
images:
- name: homeassistant/home-assistant
newName: homeassistant/home-assistant
newTag: "2024.9"
newTag: "2024.3"

View File

@@ -2,12 +2,9 @@ apiVersion: v1
kind: Service
metadata:
name: homeassistant-web
labels:
app: homeassistant
spec:
selector:
app: homeassistant
ports:
- port: 8123
targetPort: 8123
name: http
targetPort: 8123

View File

@@ -1,13 +0,0 @@
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: homeassistant-servicemonitor
labels:
app: homeassistant
spec:
selector:
matchLabels:
app: homeassistant
endpoints:
- port: http
path: /api/prometheus

View File

@@ -1,33 +1,25 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- namespace.yaml
- ingress.yaml
- pvc.yaml
- postgres.yaml
- postgres.sealedsecret.yaml
- namespace.yaml
- ingress.yaml
- pvc.yaml
- postgres.yaml
- postgres.sealedsecret.yaml
namespace: immich
helmCharts:
- name: immich
releaseName: immich
version: 0.7.2
version: 0.4.0
valuesFile: values.yaml
repo: https://immich-app.github.io/immich-charts
images:
- name: ghcr.io/immich-app/immich-machine-learning
newTag: v1.116.2
newTag: v1.98.2
- name: ghcr.io/immich-app/immich-server
newTag: v1.116.2
newTag: v1.98.2
patches:
- path: patch-redis-pvc.yaml
target:
kind: StatefulSet
name: immich-redis-master

View File

@@ -1,17 +0,0 @@
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: immich-redis-master
spec:
volumeClaimTemplates:
- apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: redis-data
spec:
storageClassName: nfs-client
accessModes:
- ReadWriteMany
resources:
requests:
storage: 2Gi

View File

@@ -1,3 +1,4 @@
apiVersion: postgresql.cnpg.io/v1
kind: Cluster
metadata:
@@ -12,24 +13,18 @@ spec:
secret:
name: postgres-password
# Enable the VECTORS extension
postInitSQL:
- CREATE EXTENSION IF NOT EXISTS "vectors";
postgresql:
shared_preload_libraries:
- "vectors.so"
# Persistent storage configuration
storage:
size: 2Gi
size: 1Gi
pvcTemplate:
accessModes:
- ReadWriteOnce
storageClassName: ""
resources:
requests:
storage: 2Gi
storageClassName: nfs-client
volumeMode: Filesystem
storage: "1Gi"
volumeName: immich-postgres
monitoring:
enablePodMonitor: true

View File

@@ -1,11 +1,40 @@
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: immich-nfs
spec:
capacity:
storage: "50Gi"
accessModes:
- ReadWriteOnce
nfs:
path: /kluster/immich
server: 192.168.1.157
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: data
name: immich-nfs
spec:
storageClassName: "nfs-client"
storageClassName: ""
accessModes:
- ReadWriteOnce
resources:
requests:
storage: "100Gi"
storage: "50Gi"
volumeName: immich-nfs
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: immich-postgres
spec:
capacity:
storage: "1Gi"
accessModes:
- ReadWriteOnce
nfs:
path: /kluster/immich-postgres
server: 192.168.1.157
# later used by cnpg

View File

@@ -22,19 +22,16 @@ env:
secretKeyRef:
name: postgres-password
key: password
IMMICH_WEB_URL: '{{ printf "http://%s-web:3000" .Release.Name }}'
IMMICH_MACHINE_LEARNING_URL: '{{ printf "http://%s-machine-learning:3003" .Release.Name }}'
IMMICH_METRICS: true
immich:
metrics:
# Enabling this will create the service monitors needed to monitor immich with the prometheus operator
enabled: true
persistence:
# Main data store for all photos shared between different components.
library:
# Automatically creating the library volume is not supported by this chart
# You have to specify an existing PVC to use
existingClaim: data
existingClaim: immich-nfs
# Dependencies
@@ -55,6 +52,16 @@ server:
main:
enabled: false
microservices:
enabled: true
persistence:
geodata-cache:
enabled: true
size: 1Gi
# Optional: Set this to pvc to avoid downloading the geodata every start.
type: emptyDir
accessMode: ReadWriteMany
machine-learning:
enabled: true
persistence:

View File

@@ -1,5 +1,24 @@
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: jellyfin-vue-ingress
namespace: media
spec:
entryPoints:
- websecure
routes:
- match: Host(`media.kluster.moll.re`)
middlewares:
- name: jellyfin-websocket
kind: Rule
services:
- name: jellyfin-web
port: 80
tls:
certResolver: default-tls
---
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: jellyfin-backend-ingress
namespace: media
@@ -7,7 +26,7 @@ spec:
entryPoints:
- websecure
routes:
- match: Host(`media.kluster.moll.re`) && !Path(`/metrics`)
- match: Host(`media-backend.kluster.moll.re`) && !Path(`/metrics`)
middlewares:
- name: jellyfin-websocket
- name: jellyfin-server-headers
@@ -41,4 +60,4 @@ spec:
accessControlAllowMethods: [ "GET","HEAD","OPTIONS" ] # "POST","PUT"
accessControlAllowOriginList:
- "*"
accessControlMaxAge: 100
accessControlMaxAge: 100

View File

@@ -0,0 +1,17 @@
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: jellyfin
labels:
metrics: prometheus
spec:
selector:
matchLabels:
app: jellyfin-server-service
endpoints:
- path: /metrics
targetPort: jellyfin
# this exposes metrics on port 8096 as enabled in the jellyfin config
# https://jellyfin.org/docs/general/networking/monitoring/
# the metrics are available at /metrics but blocked by the ingress

View File

@@ -5,11 +5,17 @@ namespace: media
resources:
- namespace.yaml
- pvc.yaml
- deployment.yaml
- service.yaml
- server.deployment.yaml
- server.service.yaml
- web.deployment.yaml
- web.service.yaml
- ingress.yaml
- jellyfin.servicemonitor.yaml
images:
- name: jellyfin/jellyfin
newName: jellyfin/jellyfin
newTag: 10.9.11
newTag: 10.8.13
- name: ghcr.io/jellyfin/jellyfin-vue
newName: ghcr.io/jellyfin/jellyfin-vue
newTag: stable-rc.0.3.1

View File

@@ -18,9 +18,6 @@ spec:
limits:
memory: "2Gi"
cpu: "2"
requests:
memory: "128Mi"
cpu: "250m"
ports:
- containerPort: 8096
name: jellyfin

View File

@@ -0,0 +1,27 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: jellyfin-web
spec:
selector:
matchLabels:
app: jellyfin-web
template:
metadata:
labels:
app: jellyfin-web
spec:
containers:
- name: jellyfin-web
image: ghcr.io/jellyfin/jellyfin-vue
resources:
limits:
memory: "128Mi"
cpu: "30m"
ports:
- containerPort: 80
env:
- name: TZ
value: Europe/Berlin
- name: DEFAULT_SERVERS
value: "https://media-backend.kluster.moll.re"

View File

@@ -0,0 +1,12 @@
apiVersion: v1
kind: Service
metadata:
name: jellyfin-web
spec:
selector:
app: jellyfin-web
ports:
- protocol: TCP
port: 80
targetPort: 80

View File

@@ -1,7 +0,0 @@
## Sending a command
```
kubectl exec -it -n minecraft deploy/minecraft-server -- /bin/bash
mc-send-to-console /help
# or directly
kubectl exec -it -n minecraft deploy/minecraft-server -- mc-send-to-console /help
```

View File

@@ -1,16 +0,0 @@
---
apiVersion: bitnami.com/v1alpha1
kind: SealedSecret
metadata:
creationTimestamp: null
name: curseforge-api
namespace: minecraft
spec:
encryptedData:
key: AgBYeAiejdmxDBorvgnxQX5YvUhR3NId2vfWybMKlc27e6D/bKglLNyZMk70xSnFAPjcDmZ20mYjFPYvDOr9T6IU/REJ8QlzoKAn0xW779R4SkIxRToT+dJv+OM2avgQ9uqp7vja29xeXMjYAnQML+QGZKcrT8mE04G/Ty8rdUiv3yUXK5HFAR3SUF35aVLdlthLjpRkv1s0R7GAP4L2pNzBJNV3i37viceUSSjU0zpOa23fsQOkPAs67AIukAJBqh/hyF/hR9H1GeYZNTI3OcHcvC2iNk/XGstvv0Zy6ApzoebsfWGdsbVn+QUI0EBw+mSTPqpl71cbkz0v4S4XAVndosxWpe6AIgm5MBTU0FXIyGyoFDe1aMPq8BXiQikYVwB48oVNh9KF0xXX5AOG0whB/FEsL3OJsiNQvQ3R/Hru43JBn64oxjVtLfM3E7u8v/xr1VQahX8dylDmb4s5EV01U6O4y19Ou4td1eEMlhpJb0fBPDRUYuWxZAEDGmp+U4tAakyPed11VkcZPPn9fKAAcv8sGs3TYAbbF18hqsBnv2Wd+i7ZEvKwmdmfR/T0r1TJGsvKI7jaW0QtH256XrSxQp7a52qMKMVQWOSKw2k27t/IkRhxT2Prw4GfJvaVr4RozUaBf3LV/hfDWlDfmM2zg3X9W8HkzjotGg021OLxsa0Wzmhffvb8h4bvZwxeq3U1xaJocqXui7z0rT2pF4z3wYHR/lPtexHcOA2M8gfBGKb1rBKh+kW+N+/ZfVLNI0mokg5vrTO2nR2rb4c=
template:
metadata:
creationTimestamp: null
name: curseforge-api
namespace: minecraft
type: Opaque

View File

@@ -1,57 +0,0 @@
apiVersion: batch/v1
kind: Job
metadata:
name: start-server
spec:
template:
spec:
restartPolicy: OnFailure
containers:
- name: minecraft-server
image: minecraft
resources:
limits:
memory: "10000Mi"
cpu: "5"
requests:
memory: "1500Mi"
cpu: "500m"
ports:
- containerPort: 25565
env:
- name: EULA
value: "TRUE"
- name: TYPE
value: "AUTO_CURSEFORGE"
- name: CF_API_KEY
valueFrom:
secretKeyRef:
name: curseforge-api
key: key
- name: CF_PAGE_URL
value: "https://www.curseforge.com/minecraft/modpacks/vault-hunters-1-18-2/files/5413446"
- name: VERSION
value: "1.18.2"
- name: INIT_MEMORY
value: "1G"
- name: MAX_MEMORY
value: "8G"
- name: MOTD
value: "VaultHunters baby!"
- name: ENABLE_RCON
value: "false"
- name: CREATE_CONSOLE_IN_PIPE
value: "true"
- name: ONLINE_MODE
value: "true"
- name: ENABLE_AUTOSTOP
value: "true"
volumeMounts:
- name: minecraft-data
mountPath: /data
volumes:
- name: minecraft-data
persistentVolumeClaim:
claimName: minecraft-data

View File

@@ -1,17 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: minecraft
resources:
- namespace.yaml
- pvc.yaml
- job.yaml
- service.yaml
- curseforge.sealedsecret.yaml
images:
- name: minecraft
newName: itzg/minecraft-server
newTag: java21

View File

@@ -1,4 +0,0 @@
apiVersion: v1
kind: Namespace
metadata:
name: placeholder

View File

@@ -1,11 +0,0 @@
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: minecraft-data
spec:
storageClassName: "nfs-client"
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi

View File

@@ -1,12 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: minecraft-server
spec:
selector:
app: minecraft-server
ports:
- port: 25565
targetPort: 25565
type: LoadBalancer
loadBalancerIP: 192.168.3.4

View File

@@ -7,8 +7,8 @@ metadata:
namespace: monitoring
spec:
encryptedData:
password: AgAwMLnsYN1y8JQSqgGQbNG/8jKensTDsEw6ogITdkhDRlJcg8HQ5t7a6xLzNCrLHLJiQW8YOoyLT4lvFkBRMOa2EYcrDvBiRD0PjygWLIscKa7dA+jpAUf/icD9zsiDnTym2yf+VUANcmEgE6DiNvlcsrcmYqiR4pKVUTDlKPNOjOpTJ3nXETb3/sbt69E0JSGwtkvusYQSXKLU9KLbciihv+ycdkdlC9xy9myd4+vYZYXSh/eAvyZeb/hsmdSX7yaASmupMvet6Qsdt99PNzFQxtbQH+LQvYalVZ8bjWZQvCN/p0bA4H15otKBfe8rtEwVthgvyEvo6TK0Mg0pFY/b3AOGFmImnT3rDmgG6S8KTZH0Jce17ksFqvELQmHjqHuYpQsPDl44glM8kWRJ9Mf/Z424LRwZlJNVcOkuVl4qFqPUjzd2rWIyF0RaD0BE012C0ThJxKn2l17lVJbNtdUiR3qNpW01ot2m0CgKd2kXbjDmgRgAll4WgrukfCIn9ZnE0gVCFLJuK3MOQAaipFYy/bDO0izwl9T8nldgcI8OfiC3NTk2O+Es5jJRXu0oJGaC3HrTB7wXiwOoELvAsxLTPxKBiN9mCHCMtZX0PEtrio0dFRQ6Pi5xPng0KVT0I9dvGNsPdhPETNOB913WEvbgP8Gt3cj016nCzk51eUsYbXPpNL2B4kmbIhecqW/8kwKQPwYjVlBSXj3NxjzwMY6PvOl1
user: AgBqmjCYGMqy5zBE+vhtsynOvhWdHWDJDyl1D+laBtLjXTJwzRbNTdunHYo1ekwyqQ6Cr5pi4YMiLxAl1LIHF+Lfsp2QlY+ResAGzp9WgSBtNQDX3EmLDQofeWxMUDdMtMsE9wiKLCfNGDkRDsGquXTz+YFq03m1vH9cB8Bp+1ClWOTui+/Ce0MZlWsJZX1W8WXH7XTirtwUo0s53pc4AplUUH97ZEK3KSIxWa3gLCn0sAPDDLPX+JVA2xtpMq1XuVFiFifjzEtG2h0dejiF35FtSAR+rR4YmEfimk3QpRDfOqV5QUxvjCG+dTV49upSevF2mvbHW+o+lB6vEc6l9cZXvlbnMdaep3NmOsJcJ8wQIdFpFK4iVzFOTKSEbzLPlZ/J+sjS5vDXsfthorIO2faMA1iIf+I663zNxQU5btaK4TNYOZQlrFVjAmioRLkDhGZ6tDUPX/zMv+Crt+0HCwyEyhmvFZckDvezTZrxARSXXMKBVcvjHCyUNkz7ubZRiMU0PGM7fYuHr659e+XMRvj+LFA68ZaEIzCQpCFJenWWYAXgUdRG4LQ1LP2MwvRHpkOYSoRkHIpX7jOfhX82A60h/ta/CdbWifqNyL9OecvE3FKsZu/Kr0taw9W6nm6FBhQLgFkOnFrqp9dWnxfHruXuDBgcn0iE8nR7Ht2zS7hfQPeR4a3Y0xK3Plqbzdrb9HKnWQQhf14=
password: AgBe8isrCWd5MuaQq5CpA+P3fDizCCDo23BVauaBJLuMRIYbVwpfahaJW7Ocj3LTXwdeVVPBrOk2D6vESUXu6I0EWc3y/NFN4ZezScxMcjmeaAb+z1zWwdH0FynTPJYOxv1fis1FDTkXDmGy3FXo5NDK9ET899TtulKFkh7UqSxdrRWbD3pegJgqKGPIqDCTAxZN/ssiccfWGS4lHqQBJkXn8DeampcKwjOCvgaBdilF03GoSfpgsqa2Iw2SfTDEobWBWVMMK/RB3/Oi/YJkGwMW3ECUxvTDam8gb0RFA1xjWXoYTLVVP5fK7q7x63ns51HebloxAP1GBrt138N/iDrfbGfjNP8Lx0NFl5y5bTgYN/z8DVTOFf90xxWe+YYERdwllg0Ci1JLNbA+NszXTD4L/HC7a8XuBfjRzxMTeymNjR76jzfPkH6v1EvesOduTfSrahPgS0qS+eGOier1rHxj3EBRhOScY1ut5Bq4oJMNId9nMVbVa6xyq2HyxuJHXV+j6h5FGHmEXn9gIR7wGp8RhtPhKgVGLrHcbHZ5Th2E7eomz1T2NK/ezNP8ZhcwOj/lyGywlW0vhU798zpWhMf57k2OPeuMlfs8Y8y74epBdyBjsrMR4EDctF8RZR3vraxENiMJ6kk1gqKj04ir6HwL7blqwiybIFFnJrp2j7MzgjS4SQ687qMX5Zf5XT03aEE+9W9Epy73tT7zVQKdENCQlcm5
user: AgAdiOivMn0d+nYjYycMZz9QSiS/9QqwHPJQMHkE7/IOou+CJtBknlETNtdv84KZgBQTucufYqu3LR3djOBpdnQsYbIXDxPFgRZQ11pwu/sO2EGifDk218yyzzfZMvx1FL7JL4LI1rKoiHycZowCwsAjEtlICVOOYv1/Plki+6MHXiAGG4r/yUhugGx3VLLX+Poq8oaTeHndgSsFXJege8SfgYR4TsC7pQgsM1UQEFncGIhJYTD2ashmUxFJ+7CJjHqPR0lFRrZXmFvPwTYTCMT+tnSHnCFWtTht8cEi1NxA4kD/eKEX0rOol15EUZnFUws2WqWI634TbyGwZ7km/Yw4XoDxiQR4ar6ulkqb/djcc3cWDYE7PF1m1c+r3iog85S5CSfZ5EvdCHHrbPN9uO2gmoRQWiR5qI70YMxBSnkeLZWN05O1vUuopdXFDTafY7YskxLEdIGHGqFUpUrJZOvBB0zNBdHGgYxFzb5pNmMCC5LPlOuoKjV4yskh9Tgovz06aAvsPxn2WWx6NOJambeziKB5OmSKvPsFofViyGBekVAWSWtt9yJe6lu5OKpBEiA6xhGhQ4ZryTXu9wvVALuPSIwBFITv85sIxjJb80qhJ51wb12QgzLLcPby0HSanyBI1M4jfsXWpK8gIAbDNO+eD7z3PhD9Y/5hPqYKXZ37Geyq23xiyxG8XDj6cL+Ie6k8XipayI4=
template:
metadata:
creationTimestamp: null

View File

@@ -0,0 +1,25 @@
apiVersion: v1
kind: PersistentVolume
metadata:
name: grafana-nfs
spec:
capacity:
storage: "1Gi"
accessModes:
- ReadWriteOnce
nfs:
path: /export/kluster/grafana
server: 192.168.1.157
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: grafana-nfs
spec:
storageClassName: ""
accessModes:
- ReadWriteOnce
resources:
requests:
storage: "1Gi"
volumeName: grafana-nfs

View File

@@ -31,25 +31,12 @@ datasources:
datasources:
- name: Thanos
type: prometheus
url: http://thanos-querier.prometheus.svc:10902
url: http://thanos-querier.prometheus.svc:9090
isDefault: true
- name: Prometheus
type: prometheus
url: http://prometheus.prometheus.svc:9090
isDefault: false
dashboardProviders:
dashboardproviders.yaml:
apiVersion: 1
providers:
- name: 'default'
orgId: 1
folder: ''
type: file
disableDeletion: false
editable: true
options:
path: /var/lib/grafana/dashboards/default
dashboardproviders.yaml:
## Reference to external ConfigMap per provider. Use provider name as key and ConfigMap name as value.
## A provider dashboards must be defined either by external ConfigMaps or in values.yaml, not in both.
## ConfigMap data example:
@@ -59,11 +46,9 @@ dashboardProviders:
## RAW_JSON
##
dashboardsConfigMaps:
default: grafana-dashboards
home-metrics: dashboard-home-metrics
proxmox: dashboard-proxmox
gitea: dashboard-gitea
grafana.ini:
wal: true
default_theme: dark
unified_alerting:
enabled: false
wal: true

View File

@@ -0,0 +1,25 @@
apiVersion: v1
kind: PersistentVolume
metadata:
name: influxdb-nfs
spec:
capacity:
storage: "10Gi"
accessModes:
- ReadWriteOnce
nfs:
path: /export/kluster/influxdb
server: 192.168.1.157
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: influxdb-nfs
spec:
storageClassName: ""
accessModes:
- ReadWriteOnce
resources:
requests:
storage: "10Gi"
volumeName: influxdb-nfs

View File

@@ -0,0 +1,26 @@
## Create default user through docker entrypoint
## Defaults indicated below
##
adminUser:
organization: "influxdata"
bucket: "default"
user: "admin"
retention_policy: "0s"
## Leave empty to generate a random password and token.
## Or fill any of these values to use fixed values.
password: ""
token: ""
## Persist data to a persistent volume
##
persistence:
enabled: true
## If true will use an existing PVC instead of creating one
useExisting: true
## Name of existing PVC to be used in the influx deployment
name: influxdb-nfs
ingress:
enabled: false

View File

@@ -5,16 +5,28 @@ namespace: monitoring
resources:
- namespace.yaml
- grafana.pvc.yaml
# - influxdb.pvc.yaml
- grafana.ingress.yaml
- grafana-admin.sealedsecret.yaml
# grafana dashboards are provisioned from a git repository
# in the initial bootstrap of the app of apps, the git repo won't be available, so this sync will initially fail
- https://git.kluster.moll.re/remoll/grafana-dashboards//?timeout=10&ref=main
- dashboards/
helmCharts:
- releaseName: grafana
name: grafana
repo: https://grafana.github.io/helm-charts
version: 8.5.1
version: 7.3.7
valuesFile: grafana.values.yaml
# - releaseName: influxdb
# name: influxdb2
# repo: https://helm.influxdata.com/
# version: 2.1.2
# valuesFile: influxdb.values.yaml
# - releaseName: telegraf-speedtest
# name: telegraf
# repo: https://helm.influxdata.com/
# version: 1.8.39
# valuesFile: telegraf-speedtest.values.yaml

View File

@@ -0,0 +1,52 @@
env:
- name: HOSTNAME
value: "telegraf-speedtest"
service:
enabled: false
rbac:
# Specifies whether RBAC resources should be created
create: false
serviceAccount:
# Specifies whether a ServiceAccount should be created
create: false
## Exposed telegraf configuration
## For full list of possible values see `/docs/all-config-values.yaml` and `/docs/all-config-values.toml`
## ref: https://docs.influxdata.com/telegraf/v1.1/administration/configuration/
config:
agent:
interval: "2h"
round_interval: true
metric_batch_size: 1000
metric_buffer_limit: 10000
collection_jitter: "0s"
flush_interval: "10s"
flush_jitter: "0s"
precision: ""
debug: false
quiet: false
logfile: ""
hostname: "$HOSTNAME"
omit_hostname: false
processors:
- enum:
mapping:
field: "status"
dest: "status_code"
value_mappings:
healthy: 1
problem: 2
critical: 3
outputs:
- influxdb_v2:
urls:
- "http://influxdb-influxdb2.monitoring:80"
token: We64mk4L4bqYCL77x3fAUSYfOse9Kktyf2eBLyrryG9c3-y8PQFiKPIh9EvSWuq78QSQz6hUcsm7XSFR2Zj1MA==
organization: "influxdata"
bucket: "homeassistant"
inputs:
- internet_speed:
enable_file_download: false

5
apps/nextcloud/README.md Normal file
View File

@@ -0,0 +1,5 @@
### Runninf `occ` commands:
```
su -s /bin/bash www-data -c "php occ user:list"
```

View File

@@ -1,17 +1,16 @@
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: audiobookshelf-ingressroute
name: nextcloud-ingressroute
spec:
entryPoints:
- websecure
routes:
- match: Host(`audiobookshelf.kluster.moll.re`)
- match: Host(`nextcloud.kluster.moll.re`)
kind: Rule
services:
- name: audiobookshelf-web
port: 80
- name: nextcloud
port: 8080
tls:
certResolver: default-tls

View File

@@ -0,0 +1,16 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- namespace.yaml
- ingress.yaml
- pvc.yaml
- postgres.sealedsecret.yaml
namespace: nextcloud
helmCharts:
- name: nextcloud
releaseName: nextcloud
version: 4.5.5
valuesFile: values.yaml
repo: https://nextcloud.github.io/helm/

View File

@@ -0,0 +1,22 @@
{
"kind": "SealedSecret",
"apiVersion": "bitnami.com/v1alpha1",
"metadata": {
"name": "postgres-password",
"namespace": "nextcloud",
"creationTimestamp": null
},
"spec": {
"template": {
"metadata": {
"name": "postgres-password",
"namespace": "nextcloud",
"creationTimestamp": null
}
},
"encryptedData": {
"password": "AgCTmvBe9YFnyWOdz02rxr0hTXnWuVLeUt5dpieWMzl4cVMBj7WcyyODWtNd+eQOLARRssGNZAP4C9gH90iVRFAW1aU+NeA76oceXE5Kiiqoc8T30wE5FC6/UbTjQYRH520NF4wcCQKm//iH8o5uI2+NxZW4goeuShibXK9sijFVNXxUuTeXTmaSJjEPyB+pnmPwjzw+qjhkJJADefh9oryy5+t9ecCwXDiI/2ce2n1Vawm/Nq6/0rZMUSsF8XSiTFczKMunuGMhxGEyyx/I8NZd4XMXGSnBo0YZF7jR9+eRHIjuenPHq1kfEid2Ps4fhFSE8mEecnK7w5xE3r0XeTNHQcTId1yYneK/LQfcRkzInuRddytTwTAmsoSjROcjKjAvtyZSM81pFWJsMQ7bSVXOC0K2wvEz9khDT0RIoR/8tMh2G737F15raTe9Ggbgy3DHst4mYIpoWV/slHrOF0vR9j7X+MRN9R1cVtI1coof/tVSWQsLvv0AJfB4/6dUl+i/yNO/j+4c3WolGwqyXd+oxsZK1VrSwSCBZwBO17BmePJL2QsPVRdutq06TrlvGqP4wXySH9LRuHr3sWgr2VuDV00w+UvuU7ExI+16dWh7jrn/rvIBQSJlHDhl5+VpyM0WTMy5kSfO6nits73ZzT7BAoSU7AeQOMj3t+cUiEq9f9dk7em7QxWMuWg6QIJ+ZZ2+CCBms4rSE4x2glOxanNX/HktQg==",
"username": "AgCxJKzhsF7yNJesK5oLJP62kjFnX4UUNQ2NrHl02Hv6MAzi/AUEV3uJSXXIi3H/uMJSMxRpJQjIDsrznYVI0YHOoz1M8/y1dx8xotFv/i0XByI9sMuGtesop7ncmQbEPMaJ3pqTJyaGkEwcsEMGmwwYiRfJHmEhhCYtzEc5IAnx+nmk//HYsrSWKpJGSWl0LvdMJsnsTxrWoJjaYTW3J0Of3VOOmgkuwIFKyXW9S2cUbAco8xVYchbyiHc8LXbS3izyAidRzg1OWyqvTGMIKJDQZ3ibIiXheon5ZeYjj0fkEkv3TrB7WoKdo0090OY1eHabqAPHT8aP+WG1g6TAzbJEtg+zFfYDKIw5Tp1WkRlsD2me4HycGuZbsaXgP5vWlxF5+rULUzUgxfmTRmYTl0H8kIlmUrusZwxR5ZXnSuBJ3n3AMEjmpmTTALakxEFEPDJJoVbgcViLtANwk72yu15FlOxczT22uyW8FMkj9kYzcq/+2a/EjaTo62SnUYJ3UTQXvgMKML1yJD+zym2+xscPNmwZFBPN5BQ/64ru/Z51nWB20fWFgW3Rw67jEQMajmVclmUcASWOjHzO87feEprHeilTH+224IHzpmC4aLz/JtIP9EEvqfDUr3fRrxcgtT1DgxV37vPj6Pqn47MHr39AA850CxjFmb1VcwfH6ygXABFlxnVByZDn7xCyBNswtKJqtw=="
}
}
}

25
apps/nextcloud/pvc.yaml Normal file
View File

@@ -0,0 +1,25 @@
apiVersion: v1
kind: PersistentVolume
metadata:
name: nextcloud-nfs
spec:
capacity:
storage: "150Gi"
accessModes:
- ReadWriteOnce
nfs:
path: /kluster/nextcloud
server: 192.168.1.157
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: nextcloud-nfs
spec:
storageClassName: ""
accessModes:
- ReadWriteOnce
resources:
requests:
storage: "150Gi"
volumeName: nextcloud-nfs

17
apps/nextcloud/readme.md Normal file
View File

@@ -0,0 +1,17 @@
## Running occ commands
Sometimes you need to run a command on the Nextcloud container directly. You can do that by running commands as the user www-data via the kubectl exec command.
```
# $NEXTCLOUD_POD should be the name of *your* nextcloud pod :)
kubectl exec $NEXTCLOUD_POD -- su -s /bin/sh www-data -c "php occ myocccomand"
```
Here are some examples below.
Putting Nextcloud into maintanence mode
Some admin actions require you to put your Nextcloud instance into
(e.g. backups):
```
# $NEXTCLOUD_POD should be the name of *your* nextcloud pod :)
kubectl exec $NEXTCLOUD_POD -- su -s /bin/sh www-data -c "php occ maintenance:mode --on"
```

171
apps/nextcloud/values.yaml Normal file
View File

@@ -0,0 +1,171 @@
## Official nextcloud image version
## ref: https://hub.docker.com/r/library/nextcloud/tags/
image:
tag: "28"
ingress:
enabled: false
nextcloud:
host: nextcloud.kluster.moll.re
username: admin
password: changeme
## Use an existing secret
existingSecret:
enabled: false
update: 0
# If web server is not binding default port, you can define it
# containerPort: 8080
datadir: /var/www/html/data
persistence:
subPath:
mail:
enabled: false
# PHP Configuration files
# Will be injected in /usr/local/etc/php/conf.d for apache image and in /usr/local/etc/php-fpm.d when nginx.enabled: true
phpConfigs: {}
# Default config files
# IMPORTANT: Will be used only if you put extra configs, otherwise default will come from nextcloud itself
# Default confgurations can be found here: https://github.com/nextcloud/docker/tree/master/16.0/apache/config
defaultConfigs:
# To protect /var/www/html/config
.htaccess: true
# Redis default configuration
redis.config.php: true
# Apache configuration for rewrite urls
apache-pretty-urls.config.php: true
# Define APCu as local cache
apcu.config.php: true
# Apps directory configs
apps.config.php: true
# Used for auto configure database
autoconfig.php: true
# SMTP default configuration
smtp.config.php: true
# Extra config files created in /var/www/html/config/
# ref: https://docs.nextcloud.com/server/15/admin_manual/configuration_server/config_sample_php_parameters.html#multiple-config-php-file
configs: {}
# For example, to use S3 as primary storage
# ref: https://docs.nextcloud.com/server/13/admin_manual/configuration_files/primary_storage.html#simple-storage-service-s3
#
# configs:
# s3.config.php: |-
# <?php
# $CONFIG = array (
# 'objectstore' => array(
# 'class' => '\\OC\\Files\\ObjectStore\\S3',
# 'arguments' => array(
# 'bucket' => 'my-bucket',
# 'autocreate' => true,
# 'key' => 'xxx',
# 'secret' => 'xxx',
# 'region' => 'us-east-1',
# 'use_ssl' => true
# )
# )
# );
nginx:
## You need to set an fpm version of the image for nextcloud if you want to use nginx!
enabled: false
internalDatabase:
enabled: true
name: nextcloud
##
## External database configuration
##
externalDatabase:
enabled: true
## Supported database engines: mysql or postgresql
type: postgresql
## Database host
host: postgres-postgresql.postgres
## Database user
# user: nextcloud
# ## Database password
# password: test
## Database name
database: nextcloud
## Use a existing secret
existingSecret:
enabled: true
secretName: postgres-password
usernameKey: username
passwordKey: password
##
## MariaDB chart configuration
##
mariadb:
enabled: false
postgresql:
enabled: false
redis:
enabled: false
## Cronjob to execute Nextcloud background tasks
## ref: https://docs.nextcloud.com/server/latest/admin_manual/configuration_server/background_jobs_configuration.html#webcron
##
cronjob:
enabled: false
## Enable persistence using Persistent Volume Claims
## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/
##
persistence:
# Nextcloud Data (/var/www/html)
enabled: true
annotations: {}
## If defined, PVC must be created manually before volume will be bound
existingClaim: nextcloud-nfs
## Use an additional pvc for the data directory rather than a subpath of the default PVC
## Useful to store data on a different storageClass (e.g. on slower disks)
nextcloudData:
enabled: false
resources:
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
limits:
cpu: 2000m
memory: 2Gi
requests:
cpu: 100m
memory: 128Mi
livenessProbe:
enabled: true
# disable when upgrading from a previous chart version
## Enable pod autoscaling using HorizontalPodAutoscaler
## ref: https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/
##
hpa:
enabled: false
## Prometheus Exporter / Metrics
##
metrics:
enabled: false
rbac:
enabled: false

View File

@@ -13,4 +13,4 @@ resources:
images:
- name: binwiederhier/ntfy
newName: binwiederhier/ntfy
newTag: v2.11.0
newTag: v2.10.0

View File

@@ -1,55 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: ollama-rocm
spec:
replicas: 1
selector:
matchLabels:
app: ollama-rocm
template:
metadata:
labels:
app: ollama-rocm
spec:
nodeSelector:
gpu: full
containers:
- name: ollama
image: ollama
env:
- name: HSA_OVERRIDE_GFX_VERSION
# allows to run on IGPU as well
value: "11.0.0"
ports:
- containerPort: 11434
name: ollama
volumeMounts:
- name: ollama-data
mountPath: /root/.ollama
- name: dshm
mountPath: /dev/shm
- name: dri
mountPath: /dev/dri/
- name: kfd
mountPath: /dev/kfd
resources:
requests:
memory: "1Gi"
cpu: "1"
limits:
memory: "16Gi"
cpu: "8"
volumes:
- name: ollama-data
emptyDir: {}
- name: dri
hostPath:
path: /dev/dri/
- name: dshm
emptyDir:
medium: Memory
- name: kfd
hostPath: /dev/kfd

View File

@@ -1,13 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: ollama-service
spec:
selector:
app: ollama-rocm
ports:
- protocol: TCP
port: 11434
targetPort: 11434
name: ollama

View File

@@ -1,30 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: ollama-ui
labels:
app: ollama-ui
spec:
replicas: 1
selector:
matchLabels:
app: ollama-ui
template:
metadata:
labels:
app: ollama-ui
spec:
containers:
- name: ollama-ui
image: ollama-ui
ports:
- containerPort: 8080
env:
- name: OLLAMA_BASE_URL
value: http://ollama-service:11434
volumeMounts:
- name: ollama-ui-data
mountPath: /app/backend/data
volumes:
- name: ollama-ui-data
emptyDir: {}

View File

@@ -1,13 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: ollama-ui-service
spec:
selector:
app: ollama-ui
ports:
- protocol: TCP
port: 8080
targetPort: 8080
name: ollama-ui

View File

@@ -1,21 +0,0 @@
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: ollama-ingressroute
spec:
entryPoints:
- websecure
routes:
- match: Host(`llm.kluster.moll.re`)
kind: Rule
services:
- name: ollama-ui-service
port: 8080
# - match: Host(`todos.kluster.moll.re`) && PathPrefix(`/`)
# kind: Rule
# services:
# - name: todos-frontend
# port: 80
tls:
certResolver: default-tls

View File

@@ -1,23 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: ollama
resources:
- namespace.yaml
- backend.deployment.yaml
- backend.service.yaml
- frontend.deployment.yaml
- frontend.service.yaml
- ingress.yaml
images:
- name: ollama
newName: ollama/ollama
newTag: 0.3.6-rocm
- name: ollama-ui
newName: ghcr.io/open-webui/open-webui
newTag: main

View File

@@ -1,6 +0,0 @@
apiVersion: v1
kind: Namespace
metadata:
name: placeholder
labels:
pod-security.kubernetes.io/enforce: privileged

View File

@@ -1,52 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: paperless
spec:
replicas: 1
selector:
matchLabels:
app: paperless
template:
metadata:
labels:
app: paperless
spec:
containers:
- name: paperless
image: paperless
ports:
- containerPort: 8000
env:
- name: PAPERLESS_REDIS
value: redis://redis-master:6379
- name: PAPERLESS_TIME_ZONE
value: Europe/Berlin
- name: PAPERLESS_OCR_LANGUAGE
value: deu+eng+fra
- name: PAPERLESS_URL
value: https://paperless.kluster.moll.re
- name: PAPERLESS_SECRET_KEY
valueFrom:
secretKeyRef:
name: paperless-secret-key
key: key
- name: PAPERLESS_DATA_DIR
value: /data
- name: PAPERLESS_MEDIA_ROOT
value: /data
volumeMounts:
- name: data
mountPath: /data
resources:
requests:
cpu: "100m"
memory: "200Mi"
limits:
cpu: "2"
memory: "1Gi"
volumes:
- name: data
persistentVolumeClaim:
claimName: paperless-data

View File

@@ -1,31 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- namespace.yaml
- pvc.yaml
- deployment.yaml
- service.yaml
- ingress.yaml
- paperless-secret-key.sealedsecret.yaml
namespace: paperless
images:
- name: paperless
newName: ghcr.io/paperless-ngx/paperless-ngx
newTag: "2.12.1"
helmCharts:
- name: redis
releaseName: redis
repo: https://charts.bitnami.com/bitnami
version: 20.1.5
valuesInline:
auth:
enabled: false
replica:
replicaCount: 0
master:
persistence:
storageClass: "nfs-client"

View File

@@ -1,4 +0,0 @@
apiVersion: v1
kind: Namespace
metadata:
name: placeholder

View File

@@ -1,15 +0,0 @@
---
apiVersion: bitnami.com/v1alpha1
kind: SealedSecret
metadata:
creationTimestamp: null
name: paperless-secret-key
namespace: paperless
spec:
encryptedData:
key: AgAjDxYW+bf+7a+65DR/u5Qrh37JSPQCstUrNWRdxq9We1eGf7qzTnmyke/O5TiE26rHVx3yzyK/Lcp/R+pJUnDauCvL6ja7k82DLzElkoRGhvRg4nr5Iehw488WIdJDXWqAbus4oLFCgnj5axs1B97hEiAN2onCPDsOuk7oSdJfG4mMI47Ass2qFPyQaff9TulLXQQEY5U7LrawCTudUPeiTCYGbOjBadPjEzn5pDwsyAd1G+NrqoPOwkrbNzrwMwbnwB4hLO0f+jrYOh2OMNcdZzMZgM671VH9cRYSVV6uz5iAN4A1NpZ1ZdenQN4pcWvaPmPOcvp14vjZtrYbGeyaNGnob5IycRrO4yaf+0V7DZ4Thwc/vqm6r5y/MR9U4Q9EFoNNHYmfo9VEw7LhivtaDOG8OaZXUnIFoXFLOZ59qfoZdIyK4eByTRQBZFZLK9rVgXOommbqlCgzNuDM7u11OGcYfROJFeiI9pH333x5u7GZsDz0hnAjWKphXzeTglXdaXMsQUeAHusdqKCn0X1cMatGUjkBAXwlOBrqmaDwSRdyc/+J2QIdkyQM9A+88+yoop7q8c5P8oizBikVaL7SojulUTJStH5cv7nRzhmpAY4j15+o3RQKrbEjGB4HVVx3VBFjjOiP9gfjhiYqxznYwkYTpXADPwjhLFf4opOPuhpoUD1M3OKXlQpPK/RvFTWWsh14jbJuL7WJpXbfyYs0+drbVdnYeUsn8OKlnFDoOaACdpNUCr6t9dSFMs7o7Mo8yN0E
template:
metadata:
creationTimestamp: null
name: paperless-secret-key
namespace: paperless

View File

@@ -1,11 +0,0 @@
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: paperless-data
spec:
storageClassName: "nfs-client"
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 10Gi

View File

@@ -1,10 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: paperless-web
spec:
selector:
app: paperless
ports:
- port: 8000
targetPort: 8000

View File

@@ -12,5 +12,5 @@ resources:
images:
- name: mealie
newTag: v1.12.0
newTag: v1.3.2
newName: ghcr.io/mealie-recipes/mealie

View File

@@ -1,4 +1,4 @@
apiVersion: v1
kind: Namespace
metadata:
name: placeholder
name: placeholder

View File

@@ -0,0 +1,35 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: syncthing
spec:
selector:
matchLabels:
app: syncthing
template:
metadata:
labels:
app: syncthing
spec:
containers:
- name: syncthing
image: syncthing
resources:
limits:
memory: "256Mi"
cpu: "500m"
ports:
- containerPort: 8384
protocol: TCP
name: syncthing-web
- containerPort: 22000
protocol: TCP
- containerPort: 22000
protocol: UDP
volumeMounts:
- name: persistence
mountPath: /var/syncthing
volumes:
- name: persistence
persistentVolumeClaim:
claimName: syncthing-claim

View File

@@ -0,0 +1,16 @@
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: rss-ingressroute
spec:
entryPoints:
- websecure
routes:
- match: Host(`syncthing.kluster.moll.re`)
kind: Rule
services:
- name: syncthing-web
port: 8384
tls:
certResolver: default-tls

View File

@@ -0,0 +1,18 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: syncthing
resources:
- namespace.yaml
- pvc.yaml
- deployment.yaml
- service.yaml
- ingress.yaml
- servicemonitor.yaml
- syncthing-api.sealedsecret.yaml
images:
- name: syncthing
newName: syncthing/syncthing
newTag: "1.27"

25
apps/syncthing/pvc.yaml Normal file
View File

@@ -0,0 +1,25 @@
apiVersion: v1
kind: PersistentVolume
metadata:
name: syncthing-data
spec:
capacity:
storage: "50Gi"
accessModes:
- ReadWriteOnce
nfs:
path: /kluster/syncthing
server: 192.168.1.157
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: syncthing-claim
spec:
storageClassName: ""
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 10Gi
volumeName: syncthing

View File

@@ -0,0 +1,46 @@
apiVersion: v1
kind: Service
metadata:
name: syncthing-web
labels:
app: syncthing
spec:
selector:
app: syncthing
type: ClusterIP
ports:
- port: 8384
targetPort: 8384
name: syncthing-web
---
apiVersion: v1
kind: Service
metadata:
name: syncthing-listen
annotations:
metallb.universe.tf/allow-shared-ip: syncthing-service
spec:
selector:
app: syncthing
type: LoadBalancer
LoadBalancerIP: 192.168.3.4
ports:
- port: 22000
targetPort: 22000
protocol: TCP
---
apiVersion: v1
kind: Service
metadata:
name: syncthing-discover
annotations:
metallb.universe.tf/allow-shared-ip: syncthing-service
spec:
selector:
app: syncthing
type: LoadBalancer
LoadBalancerIP: 192.168.3.4
ports:
- port: 22000
targetPort: 22000
protocol: UDP

View File

@@ -0,0 +1,17 @@
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: syncthing-servicemonitor
labels:
app: syncthing
spec:
selector:
matchLabels:
app: syncthing
endpoints:
- port: syncthing-web
path: /metrics
bearerTokenSecret:
name: syncthing-api
key: token
namespace: syncthing

View File

@@ -0,0 +1,16 @@
---
apiVersion: bitnami.com/v1alpha1
kind: SealedSecret
metadata:
creationTimestamp: null
name: syncthing-api
namespace: syncthing
spec:
encryptedData:
token: AgC1hG1aguLIWBgA1R19MGrXDq7BONAldMEXtCeGXLO9Xar08f7qFqprtRJAMOID4trUEBMAkF96m7rH7QHTpO0WzRLrJctLi7U6NgESUJBDxusqjij3RAANS69Xt27mu2oa+rhm605CfFJT6Gpx/2CxrFtUD3yCijilDnEVvw4WvTLHvVQMCd8cM8ZDlpBsSYbxvtCUN1+B02DCucLpMphspxV2SGPAdc04xQD7d0vUhNLekFi0xSgu0jiRGVDHOG5Egd9d/BGeNOBgiUVxJxqqdXc6EmkslcSUtMQJ5luSxjogf+p3jdOqt4aPpUeR8sSPb6OSEIZD/Cfs9X4akHdpUAqkycu+V24lDxeHWAtIviCMBPttrwNAEytgwqaiT0U4UmL5GqR97jpmy3Tx+jYKuXkt4Igb6VByreuL9aZacRrqRhCCgbg95Y/UrYlLAbZYOI/+KsFzB5akGpZXUDcW9h2IkTUmcT+QxWXqEoNpoTI5qAnKiu/9T5elDKghjMHYX+CnPj+rXlQIJzX7NkZ0Q6HpKQ4B2Vd1Ewkvadf963jBodUe7WiMt8UeYgzCa33F4U23JjExIrL8t3r8MQ/IIdtfUvyz6Da1vp5hjpBUnUCk8rca/6VC3GO1GP3DLdIXiZQY1OOTHJlyLG7+bIL35zVfkmLMzmlIdaFsfeYiL4P+hYRbLABPAJk8lY7MEdiczpvI9HlmFVatJaPrFJwx9jyhzqIOq5eGt0OIkFt+fw==
template:
metadata:
creationTimestamp: null
name: syncthing-api
namespace: syncthing
type: Opaque

View File

@@ -12,9 +12,7 @@ spec:
destination:
server: https://kubernetes.default.svc
namespace: argocd
# syncPolicy:
# automated:
# prune: true
# selfHeal: false
# DO NOT AUTO SYNC THE APP OF APPS.
# all other apps are auto-synced, but adding new apps should be done manually.
syncPolicy:
automated:
prune: true
# selfHeal: true

View File

@@ -1,15 +0,0 @@
# How to restore
1. Port forward the rest api for gcloud
```bash
kubectl port-forward -n backup service/rclone-gcloud 8000
```
2. Load the snapshots locally
```bash
restic -r rest:http://127.0.0.1:8000/kluster mount /mnt/restic
```
(The password is in a secret)
3. Copy relevant files to the correct location on the NAS

View File

@@ -1,19 +0,0 @@
---
apiVersion: bitnami.com/v1alpha1
kind: SealedSecret
metadata:
creationTimestamp: null
name: backblaze-credentials
namespace: backup
spec:
encryptedData:
bucket-id: AgBwwjlkGjskxMXXpXrnfcT9fJGgDXtbOO/6WcpqsX0exoADw31dADjLTHztiddsGYipiGFf2DBWge69UEnL04NXIzh/xTwWtWaqlz6yOJm/89FMQE1mfbrrLc7tk98TO3oS8i+IDAnkUiYyvDXJexgJg56QLY595PXkpplYit2bAk43mAB02yUZAK0gMs3KRDIvhHFsMq8Uiqx78En5KGGXwEg6KbVDyNvI2k8suEyy+C0yNO/M6dlczoUQiIJbllQzbqIzuxbOp609PfvGFAYHuPlz1kwsg+feZJ3kNsHYi4hWvpd64BWb30iO9J3dAYfW6d7C61t3S5uabmnd9E7bMYZA/OppD8SCknBFalXF91BUiJao9qBVd/BB7TCZOzhdzhxTW+FhgARcA+GIfg+nIzgBqHfAfQQmAOO2RZnsWrvMysyZaUpODvU8kSsLWZJ3CESVRVU3BHmJZpyxjX/s6QEXShQXZaLq54zoFJULZU/kbom5yFNNDWW5sKbURPvbKJcf/J9QabY5toO4yOwDk96Sr/FI+CHHvMh8/amigva0Upq6naiTHXMf4BR6+w3VKP5ALn5cbD5jG7EpUA/j1roMoLn68GMAtTJDLvSq2BGeJENWrUpOmjWZHDKZy8DEKorJk/Wbp46ksteSALE8eXpi4DRKXYDPvDb57EhzoJGQ9NMXgAvU9+1vw2nyTZE4gAWKpg0JkiHglu4Om79HfuGuewzJXlY=
key-id: AgBe4Iytjw9CkT7CqqNLHWyG4F8F+R6m8W1fnQZdGJvLy7D81+nB2ZDMCUZgUQV/mppGnXCkSxHyelcyTYCswQ9bD8hZsAIiQACq39v2UFxdORFEgNMj4bTWPo65fwhSK2giozPqN/4lzPopP91uyq1Z0gQaiqn/HDtbfNjq+Nu9kPmV06O1NUj1f7QqvfsMK+Xadv0G+DAA+ClGaq1q3fZPDkl2MSDdbEPGq+fLPK2DSdYu9fr1bc9TPyaU5JDFvGrCg8Nyigugi4wVGjQFtVwR1QsHAADmnoDAdXoj1Sz65QO5F+zNaDZWvnLnVFxAFrXJHihilc4ilAc8hSpE6YHQm7dGO5gGejLRNaaCspb/fPJD1g2XlnkckhFCSwd9sHNrXb07BZk8gFTHfndEC0t2UyUNloYpXsfap6fvehPK91ey35jbxDk9zYdgKZ7bz/tY0450CkofSbhisf2DwS3prt5YCEDaXasrpUqlk4dSbmLaGm6aee8lM5VpO8qYE56nvXY7qr0yB6z/NGWuLX3oH3fV+C8hH1P4mxDjvVEFdtewlF50Bf9WFoE7KpboSvmChZhBfjOkbtsGmQE41ZuNoYVupetXn6IfvYo6MzGoG6dTRVpJ5S2KLQDtsUljQfXJDFCSYwo87DD2dGBEn/z9GFCIPAYNO2ewzU4RUgcXPuDD4I2tdNIC+xdEBZq7BaWn/46Yfc1+FAWUA9VWEL/9kz5tK6hFD3Ww
key-secret: AgBcKSHdXHeNBzkZRtbaOEZra7AAWVlzmubaQoklECr14gKNL7rTReqX87qQObjQjmGKXtnJlKXIVHGDuiuHGkqfxQ9PCxccvpA3/7LdbFZnZtlFDWpAv+VB6Tp7H7Quho/GeAo8u6de0BXz85lz7+RyDCssBpuuzpchMgOlcEmhhfgQM5E6ye7bD6LpAZWcay3PV6FW2xTrJvLobpCcJordye6iTdSySPKdk6zflkon9h1KuQT+njmW4cfTQg/u7iS/NDQYcHdCpDHRLCor4GkVmi7NW8q+WuYhUSGWBy55SGvcUobhUL7GEHFJZpKmyrBOwSbwiWUDoN+NjI2TR5xvG0Ldjd/Hj32Vk29I+xSnj/O7pZj5ho35qExlZ/WCe42i0VHjzHFbOoU1MkqB+Skm24L1cLufhyNBtA8NNN3GWZhkcozpe164gpx4H/Vfe0UyzxUn4VJIws/IXYiLb4DgDkGrV+wzigN2QfSgTgs6syQkSs4UJ4gUZeN0jsyq0YHIhq1VZ8qPtLH310d8LZLxpTjZdO0obBwJfnHkg3blwSABEt5756C5DvjKmvO1pjG+JX/PJ0yAINL9Sc+FsY7TnGlItVzD830NcZ3Gg9C4Tg4xBEHybUWCSl1rJjwMvmUvVKNcIzLBHPAOyle1VLTZ37zb13MnhwNwdUtBu7+RZTy9wVO26iqemXTtFVj13kgZkJsyLjM6bo2y2wvFmjBCV9EKQtm87ROStM7iKB46
repository-string: AgAEYSqT6VR4OKW9/ZDgjYV+rm4tK3uucZsQG2u7W+8rRO0Rowkuba1AbybjgTE+G3q8si/GtlgsB1J8suvztHcVLNxg0y0Olb40pn2sZjKd85Q8AzsM0pFMkzme1lvnwu1Bcgd6Ck+FCr4CuUnh+UvJM5iWhAoSHJtdBb/EKw2F1BhewAqMXSX4oWM7V/T8RTtW2wBUk4wgX4ia+gCBPMpTo6i00ZtSpRB3Ub9VfGJfRmiXZA7oh5j3yN9nJlXonbJYBp4DNWod76CF7s35HBnSzS7YfIV80R4lH4xAPgRbZ0tvPWkTLMhSBX8rJCEnxT7DjL2lS7WfyboLdL4Uy8WmP7n2difZSr7p2iic6SCP44YgQ8JY5UgXh2QZENQ6oLvjK/PpFTjQ4bfw3E1/dakg1EdCYc/6DPyK3YekccUe/pXvVBrazpgObxXWeKKT6RMDeYWLUXTBHWhJ5OaJHHePD5t9IM65FlFTtkuUFbxExTi/u+RczBlWWcy10Yow3I3LGrDPJ/PfBy4RPfHCeQDQ+WoLJkNT20nu5mBXEYYKgNvgdn4yogifSiNG8vsNpo4dO89aHppbnHdmdp7F9asfbn27btEoFoHzIFku/mEnY5srs+Smrhhzy0+iPKge9LOuW33Gi4oJban1UYFvLAjq8YsZGkbIO82r2p+v15UjwU3girWPl1KBUc8WzLJtnqBvRPWS1ul6/X9JIdmmrHJjC0KlcGiVMU/Ls9ljnQj7dgLXuDW7JzuK3MhTWV3Y2kS39ze3TskCKcbL
template:
metadata:
creationTimestamp: null
name: backblaze-credentials
namespace: backup
type: Opaque

View File

@@ -46,27 +46,14 @@ spec:
name: backup-nfs-access
env:
# secrets live in the same namespace as per kustomization.yaml
- name: RESTIC_REPOSITORY
value: rest:http://rclone-gcloud:8000/kluster
# lives in the same namespace
- name: RESTIC_PASSWORD
valueFrom:
secretKeyRef:
name: restic-gdrive-credentials
key: restic-password
- name: RESTIC_REPOSITORY
valueFrom:
secretKeyRef:
name: backblaze-credentials
key: repository-string
- name: AWS_ACCESS_KEY_ID
valueFrom:
secretKeyRef:
name: backblaze-credentials
key: key-id
- name: AWS_ACCESS_KEY
valueFrom:
secretKeyRef:
name: backblaze-credentials
key: key-secret
volumes:
- name: backup-nfs-access
persistentVolumeClaim:

View File

@@ -0,0 +1,6 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- rclone-config.sealedsecret.yaml
- restic-password.sealedsecret.yaml
- pvc.yaml

View File

@@ -1,13 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: backup
resources:
- namespace.yaml
- pvc.yaml
- restic-password.sealedsecret.yaml
- backblaze-credentials.sealedsecret.yaml
- cronjobs-overlays/prune/
- cronjobs-overlays/backup/

View File

@@ -4,7 +4,7 @@ kind: Kustomization
namespace: backup
# nameSuffix: -backup
resources:
- ../../cronjobs-base
- ../../base
# - ./restic-commands.yaml

View File

@@ -3,7 +3,7 @@ kind: Kustomization
namespace: backup
resources:
- ../../cronjobs-base
- ../../base
# patch the cronjob args field:

View File

@@ -17,12 +17,10 @@ spec:
# RESTIC_ARGS Can be for instance: --verbose --dry-run
# RESTIC_REPOSITORY is set in the secret
- >-
restic unlock
&&
restic forget
-r $(RESTIC_REPOSITORY)
--verbose=2
--keep-daily 7 --keep-weekly 10
--keep-daily 7 --keep-weekly 5
--prune
containers:
- name: ntfy-command-send

View File

@@ -11,8 +11,8 @@ resources:
images:
- name: octodns
newName: octodns/octodns # has all plugins
newTag: "2024.08"
newTag: "2024.03"
- name: git
newName: alpine/git
newTag: "v2.45.2"
newTag: "2.43.0"

View File

@@ -8,4 +8,4 @@ resources:
- namespace.yaml
- omv-s3.ingress.yaml
- openmediavault.ingress.yaml
- proxmox.ingress.yaml
- proxmox.ingress.yaml

View File

@@ -1,31 +0,0 @@
# Using gitea actions
The actions deployment allows to use gitea actions from repositories within this instance.
### Building docker images
Docker builds use the kubernetes runner to build the images. For this to work, the pipeline needs to be able to access the kube-api. A service-account is created for this purpose.
To use the correct docker builder use the following action
```yaml
...
- name: Create Kubeconfig
run: |
mkdir $HOME/.kube
echo "${{ secrets.BUILDX_KUBECONFIG }}" > $HOME/.kube/config
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
with:
driver: kubernetes
driver-opts: |
namespace=act-runner
qemu.install=true
...
- name: Build and push
uses: docker/build-push-action@v5
with:
context: .
<other config>
```

View File

@@ -1,60 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: act-runner
name: act-runner
spec:
replicas: 1
selector:
matchLabels:
app: act-runner
template:
metadata:
labels:
app: act-runner
spec:
restartPolicy: Always
containers:
- name: runner
image: vegardit/gitea-act-runner:dind-latest
env:
- name: GITEA_INSTANCE_URL
value: "https://git.kluster.moll.re"
- name: GITEA_RUNNER_REGISTRATION_TOKEN
valueFrom:
secretKeyRef:
name: actions-runner-secret
key: runner-token
- name: ACTIONS_RUNNER_POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: GITEA_RUNNER_UID
value: '1000'
- name: GITEA_RUNNER_GID
value: '1000'
- name: GITEA_RUNNER_JOB_CONTAINER_PRIVILEGED
value: 'true'
securityContext:
privileged: true
volumeMounts:
- name: runner-data
mountPath: /data
volumes:
- name: runner-data
persistentVolumeClaim:
claimName: runner-data
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: runner-data
spec:
resources:
requests:
storage: 5Gi
storageClassName: "nfs-client"
volumeMode: Filesystem
accessModes:
- ReadWriteMany

View File

@@ -1,16 +0,0 @@
---
apiVersion: bitnami.com/v1alpha1
kind: SealedSecret
metadata:
creationTimestamp: null
name: actions-runner-secret
namespace: gitea
spec:
encryptedData:
runner-token: AgAUU0jMe3bhoaOdqRZjRzvuQyRMagahDQtX2eqoJ78xihMPkL2yK5MZoCbcps2+xq2zSBgtdwA8xAMyVC4aKkeqYaPSlvBcvuGbcEsnGYJB1Fmjqn2CbvF4nbfaio+XMBmhZXW+GiPWmeiID6LhMwZghzVmcLEuqSmBJ0uB203j0wqsz/k9haL5zZ3vZRE0ofNFceDiVE55TrvTBiLQf1H6R9kFSaRRvcuCH8desX3OmkcSZ0PktULM7KElF9pX1gndrbwiEL5XK60KzE9URl2qpTK/mRrN88ZBa6IuX7u7M579yD3d7yS/JgYi2TL8s3Z69v8JF/nF1ha19xJFhEp1iiyS40xo8cuGHbfVzDSExbJ9fQMpG+1w8ZmyiARXT0EMjuz7tBSruKlr21R6lvwyri71Zg6cUKoVcmQlcmEW7Y6TkH4dsOGlpBX2KsLai7ObGgsQePZ7BHaMTEl54omtdsNsQaquElKhhhBVLEGGQgbP/YZ0wT244mgQkjuMLjVxAM1IWsu4THUY16F+bphzw4xYesZTYYCJUpNO3FDvcsyqlMgPlLMnO3CZyt+Y1avrfz/id5eJUxlVFx9y5htzXA1GaBgrnoRkrpv2OVRFIxatASGbbQgqcDIWx3VXfjVF32fnzVUNtiTZ+pvC/UcyAvFZmaZIrdbK42cA85O1FaOThHJg+8rpc4RXWOOiVg8+8BAQUd/c9bdPJeYLavDefaI5O9DZT4UqiQioBCET2yZPIhwm9JBT
template:
metadata:
creationTimestamp: null
name: actions-runner-secret
namespace: gitea
type: Opaque

View File

@@ -0,0 +1,84 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: drone-runner
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: drone-runner
rules:
- apiGroups:
- ""
resources:
- secrets
verbs:
- create
- delete
- apiGroups:
- ""
resources:
- pods
- pods/log
verbs:
- get
- create
- delete
- list
- watch
- update
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: drone-runner
subjects:
- kind: ServiceAccount
name: drone-runner
roleRef:
kind: Role
name: drone-runner
apiGroup: rbac.authorization.k8s.io
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: drone-runner
labels:
app.kubernetes.io/name: drone-runner
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: drone-runner
template:
metadata:
labels:
app.kubernetes.io/name: drone-runner
spec:
serviceAccountName: drone-runner
containers:
- name: runner
image: drone/drone-runner-kube:latest
ports:
- containerPort: 3000
env:
- name: DRONE_RPC_HOST
value: drone-server:80
- name: DRONE_RPC_PROTO
value: http
- name: DRONE_RPC_SECRET
valueFrom:
secretKeyRef:
name: drone-server-secret
key: rpc_secret
- name: DRONE_NAMESPACE_DEFAULT
value: gitea
# - name: DRONE_NAMESPACE_RULES
# value: "drone-runner:*"
- name: DRONE_SERVICE_ACCOUNT_DEFAULT
value: drone-runner

View File

@@ -0,0 +1,117 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: drone-server
labels:
app: drone-server
spec:
replicas: 1
selector:
matchLabels:
app: drone-server
template:
metadata:
labels:
app: drone-server
spec:
containers:
- name: drone
image: drone/drone:latest
env:
- name: DRONE_SERVER_PORT # because the deployment is called drone-server, override this var again!
value: ":80"
- name: DRONE_GITEA_SERVER
value: https://git.kluster.moll.re
- name: DRONE_USER_CREATE
value: username:remoll,admin:true
- name: DRONE_GITEA_CLIENT_ID
valueFrom:
secretKeyRef:
name: drone-server-secret
key: client_id
- name: DRONE_GITEA_CLIENT_SECRET
valueFrom:
secretKeyRef:
name: drone-server-secret
key: client_secret
- name: DRONE_RPC_SECRET
valueFrom:
secretKeyRef:
name: drone-server-secret
key: rpc_secret
- name: DRONE_SERVER_HOST
value: drone.kluster.moll.re
- name: DRONE_SERVER_PROTO
value: https
resources:
requests:
memory: "1Gi"
cpu: 1.5
volumeMounts:
- mountPath: /data
name: drone-data-nfs
volumes:
- name: drone-data-nfs
persistentVolumeClaim:
claimName: drone-data-nfs
---
apiVersion: v1
kind: Service
metadata:
name: drone-server
labels:
app: drone-server
spec:
type: ClusterIP
ports:
- port: 80
name: http
selector:
app: drone-server
---
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: drone-server-ingress
spec:
entryPoints:
- websecure
routes:
- match: Host(`drone.kluster.moll.re`)
kind: Rule
services:
- name: drone-server
port: 80
tls:
certResolver: default-tls
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: drone-data-nfs
spec:
capacity:
storage: "1Gi"
accessModes:
- ReadWriteOnce
nfs:
path: /export/kluster/drone
server: 192.168.1.157
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: drone-data-nfs
spec:
storageClassName: ""
accessModes:
- ReadWriteOnce
resources:
requests:
storage: "1Gi"
volumeName: drone-data-nfs

View File

@@ -0,0 +1,23 @@
{
"kind": "SealedSecret",
"apiVersion": "bitnami.com/v1alpha1",
"metadata": {
"name": "drone-server-secret",
"namespace": "gitea",
"creationTimestamp": null
},
"spec": {
"template": {
"metadata": {
"name": "drone-server-secret",
"namespace": "gitea",
"creationTimestamp": null
}
},
"encryptedData": {
"client_id": "AgA53a7kGJ6zZcx2ooTvTNwxaW2FvfzHJnxg6co54+HXinTJKsc4+GJ1PtdIbsZ7Dgu/sLi/4X90fT+PT2sgEx9jIilmHPdJeRtwV1UID3Y46A7cJlfcAKwNOFzp2PWvBvizbNp7tbJwxeAYnVX8GfN6fi700QxBGqAI3u8qQvLpU6UGW2RM96gCXI7s1QhE1Le6TgoESy5HX95pB7csDRNSwVE02OWfDHKEjH8QD8UvBB9xct6uwDfu7KrsJiNJvWMP6arvpfhy/X+UtCTFmj5wmFYL7oc6vSiCkq+QyHgQTEHTmGpEjEGKcQxPQaus3KhbhcxQBYLMEMYRlLPH0AEAA4dzbSpoVXM3LuIe9FppgrTCknK1uRB8wyrHUeInWO8mG7UraV6m5PUS+UYODMvfjwY3PyiGhTSf6LgMlhMl8e+2rb+OsWphT8Pbeom33PucrYaRFr9RpQkJSwE6HU3JEh25YLfIJ7caqRND8C/p8kD679C8UMcNpBN8WS4Cswn5jzmwbeJNM5DGp9yQVZNx7Bv3dHzx9i3ShjJ6QQnR/zWJZ/dWLy6weGYmdZMMXRAO8CCdruvcX5YyeieXZfchSIlZ/GqqBHptdcLpwLiZsfmyTWeBvk5pMAsZaKJ1tfWpQ84s4epzMoieTfhTueGXmeRKX+DJBBcriU+5YoqNxpU1lPL+LoInorJSKN7c3ouFx78N3GDOCq7mlWI94lY0bIs5zhrfUN137ITCcED62AJ7vks=",
"client_secret": "AgDQXU7x6RLhE9Hc+goeR2+3rW316SLLLA8tfqx3tsykL+vxhRkY5UCEaak3Rgei0k14jB/Rmme+/O/D1/5tc/i885+sGn0yjU7Jo4L5nkIssUOHlmRSGkRJDb9ABPauFXAjap9KLix9bd8ewI7R0lS3tOK9ZhThYhcfDUqV9qkkbSHzwNptkH7gYWt9qzG/rqqqpFP+PCtjzKVve4LCBgaxetcnh1t+d5oh7VAFnSI9Bt1G/DRzi+K3YZ+YG5+XKevBp06GMiLUMiv/eUvmOfAB/KO79LnNVbOcRsAHfnqLbXgNjFzspr5xDiGMC/ma1245LavywqXDp0S9jjNEe48i51PPQMwHWV8XEovsM6LHcteluNogt+VkL4mOnmP+sba/V3NO51rt1WXl+ca+U4kBq4dLMsdpWUKemz9BlIRC4etEXjwKJ5DznT7u6GUTrXx2RCm1j0OYWM++P10SdyD6tGjKnZf88a33Wrwm8Y7c47JrPTlP4PqLq9gzvD310uVfs1vGYGULaToGy+D/th8qiWWlu7BIfwqlIj8lruVnOhQ4GeEZmUAsqYf8JfsBwuDc0Y+8qbwjFrr2z+5x+2XBL8KGZVopyme45SHijlBZs7YsJqTBsg5oW09grM8/oO731GtzSYmpat2VZlaILuTjALqo/cu//kxwmqh7UX+jnTJ/2N3bKKSAfHWbHDeHeS2XJ+eKaI4onNYW9J70EfAP3vOpU+zmQ8rOzJuJjRt0HarLwzc5CXb1Xhlgsaoj7zKXPQMnqIDngg==",
"rpc_secret": "AgAcJNCFtOhK28vnLredkTgsVpnMPwaXss5NT5ysc0IbVid2vWRk2CTjBZc5DzjxxLwI1Ok88MFXHP08ZGCYy4rIbwoi7Ei1OEevGWfaI4n5CvAxr4ZamQHSfIX9dVAm9BSSx2M/mDtCKqVEGJEzyHCedrxf6LXM/YTNgjD43BuCZZMu35mRsHItpYFZQSttlHiUvR8y2YKrhV2P7fiWRD3cCVao8ldzKfGuvRfal8ByGoxpsYLj2D9CdtPvRF/TQsWUJJWwzbI9DmbW1MMI4/b26Jfa5TBvHxS1MQxFJpSXuMIengO+b0bi7WaR36y/FrKSNxIrQDHI7XCb00yYaSfj3RkSBVoAD0a2p8vNupHCqsKBoaWd8tMv/wGP8wbBk4DgGeQiTIvfhbQZU/Q2/LVDDficjXVn3IuKP/cqgGVf6lUh5YsUSs8qwpMil7XySiHvaZn+iFAnsXoejd4S2e/pbRvyaxP1aa7TCxnINjpU7IrnUEUiI4glQmAte3MqZWLXcc0Uk3Qz9PP0cD+V8qCOryrPMP2kTAI8LT/K4DgcEMAEGes4Vx1l0oBMF0xJvhM2kZXcEcf0NzuQJvYTgZpQF5xp0TchezLshmEUSIkII9NvAvn+iEYJeHsJUDijjmBloSYe4+QTgdYh6FakVUwYI5U4ztDNrvgqhWjExfbn8HxaFzsNTsuzGoYs+jwXH8Wk2z1Q1oQjDdO5YTjmdqvkSTdin/5CiuCDHaQX6a4gNQ=="
}
}
}

View File

@@ -111,15 +111,6 @@ gitea:
SSH_PORT: 2222
actions:
ENABLED: true
session:
PROVIDER: db
cache:
ADAPTER: memory
queue:
TYPE: level
indexer:
ISSUE_INDEXER_TYPE: bleve
REPO_INDEXER_ENABLED: false
@@ -163,7 +154,12 @@ postgresql:
redis-cluster:
enabled: false
enabled: true
usePassword: false
cluster:
nodes: 3 # default: 6
replicas: 0 # default: 1
postgresql-ha:
enabled: false

View File

@@ -5,10 +5,9 @@ resources:
- gitea.pvc.yaml
- gitea.ingress.yaml
- gitea.servicemonitor.yaml
- actions.deployment.yaml
- actions.sealedsecret.yaml
# - actions.rbac.yaml
- drone-kube-runner.deployment.yaml
- drone-server.deployment.yaml
- drone-server.sealedsecret.yaml
namespace: gitea
@@ -17,6 +16,6 @@ helmCharts:
- name: gitea
namespace: gitea # needs to be set explicitly for svc to be referenced correctly
releaseName: gitea
version: 10.4.1
version: 10.1.3
valuesFile: gitea.values.yaml
repo: https://dl.gitea.io/charts/

View File

@@ -1,6 +1,4 @@
apiVersion: v1
kind: Namespace
metadata:
name: placeholder
labels:
pod-security.kubernetes.io/enforce: privileged
name: placeholder

Some files were not shown because too many files have changed in this diff Show More