update backup password

This commit is contained in:
Remy Moll 2023-11-27 21:12:02 +01:00
parent 58b7de330b
commit ebf31c52dd
71 changed files with 18 additions and 9439 deletions

View File

@ -21,10 +21,14 @@ spec:
command: ["curl"]
args:
- "-H"
- "Title: $(cat /proc/sys/kernel/hostname)"
- "Title: $OPERATION"
- "-d"
- "Restic operation to gdrive finished"
- "Finished successfully"
- "https://ntfy.kluster.moll.re/backup"
env:
- name: OPERATION
value: "PLACEHOLDER"
initContainers:
- name: restic-base-container
image: restic/restic:latest

View File

@ -22,4 +22,9 @@ spec:
--exclude=s3/
&&
restic
list snapshots
list snapshots
containers:
- name: ntfy-command-send
env:
- name: OPERATION
value: "Restic backup to gdrive"

View File

@ -22,3 +22,8 @@ spec:
--verbose=2
--keep-daily 7 --keep-weekly 5
--prune
containers:
- name: ntfy-command-send
env:
- name: OPERATION
value: "Restic prune on gdrive"

View File

@ -1,2 +0,0 @@
export RESTIC_REPOSITORY=rest:http://127.0.0.1:8000/kluster
export RESTIC_PASSWORD="2r,TE0.,U@gni3e%xr)_LC64"

View File

@ -16,7 +16,7 @@
"type": "Opaque"
},
"encryptedData": {
"restic-password": "AgB5b+dgVUtVo5QPPFBYWuqNZd7vMSbTapVh7SIl1ogy+/WWpzDDSgsPvki2Qtxv11tljTQkhkhonil0aYcHgA/4LDEt9yuvB2SNpEkl+C2N5NO1Fn3sUtaDRVBT/eaUhEjXTlRN6XiYfDRgNwBVpH3AUwKHa3dqxCJ6fQazUEhn3Xymxpo/GZkScf5k+fhkBi2/YnzM9Kdl3C9r8Ekw1eg2Pan7KSkFRk1rkGuDJKdhsYBrmu632yU7x8no7rGAIzxYJpDYqJnXp6Y3nUTBMpwNibOszAwTdP+ShgnILSmi0izZrIzvvvwAjNu6hKrhPlcTx3ZA2NrClRYtXx1gqCgVmGRzonqLuzVnWzd2efWDHkb8S0QnYjN0aAeAvR6x77TdIm8b9WMhAqeheikXh9zrrB4GqLoSxpkgmRezJFBVQ01vZrjkpu+KHHfVZqKg+3ChfjdbS4CQoK7IgUSguAifOaHH6Kb81LsraKKCDEr5vynHKL6jsuvgOPSkgPtzxasQQGR7CDNHnvm1ekFTbDpQ2KyHl3Ep5LCqlB79RhuPytprePHxQJa3qxv/EEIL8zxV1qhAvKa55RQIV3pLqDbPrj9EbD7LAYMbmIsFg3nSbZZaZV55gATN4PX86EZCIaC5/WTXSNIEmyedpcyhSzCZvpQOVjWggtCzUDsgCBADmmFAtuqd/POXXNbgPprEOJXyCdLvbL9cp3lG01sw+gqq08RqPlhKn5Q="
"restic-password": "AgApoTzU3zoGZPcDxXwtF8/ZsPlzZH+WvL2E3yImCwfJQm/mMp4cLWqupn6mJLIJhewduM/9EWI/Yfe194Y6oY1wFlZChADa17nZbaWrbSlVAe5pNnkWUs/oB8D11Hnbw6fom5TnVjDxejG7GuitIVyVbpjETDrS7tCE2zhR1Xt3CjlpcM6BOcfCA/GOySixWGhN8CQlx9ujiQkofFZdB7GomcNbBlwo1SOc61un7casWPfGUEORObIQGqAc7APcUovpY/66iJ0IzZG9y+LNnnKgp2Vr91Oek2/S02S5HU+L5J6UO1KtAcCJGL4N0fhoHgstUqvycV4Aup24J3qnruzz1D83doghuHJEWx52wlMGiTNyK4ZCqaA19d6dFgpVNKiR/g0Qd/hx65/K4+U0nqm0GXCMmjUi3E0rx5u3KI2hUTwiffAPcfRZR7YBgFLAB1Gm+vanh23Qp2kdEojM63I/E35XD5ibr+HiEP6zFLgZw8KI4uJMxvCRiDFlYyQ3dXqTsi8sg7AzMwTtLsTAAmTCZdoD54SYYeLJ6nVW6FPznGF6CmMG/2ZpApZS8hsBsPclf3yipxON30dFmrDXDax9OSBPseucpZsJN2Vjikpuv5Lst2b+vL7/Bq/cfho58zFRHyc42G4xe5N+Evzv6jb/MaFWKmV5mBkMYdLZ0ANgFjrTXhI0T9zZTcgj4ohmHg7Z8v+1DgOgvAI6jo/kQSp4zJi69Q=="
}
}
}

View File

@ -1,34 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: aio
namespace: aio
labels:
app: aio
spec:
replicas: 1
selector:
matchLabels:
app: aio
template:
metadata:
labels:
app: aio
spec:
containers:
- name: aio
image: mollre/aio:latest
tty: true
volumeMounts:
- mountPath: /keys/
name: aio-nfs
resources:
requests:
memory: "250Mi"
cpu: 0.5
volumes:
- name: aio-nfs
persistentVolumeClaim:
claimName: aio-nfs

View File

@ -1,34 +0,0 @@
---
apiVersion: v1
kind: PersistentVolume
metadata:
namespace: aio
name: "aio-nfs"
labels:
directory: "aio"
spec:
storageClassName: fast
capacity:
storage: "100Mi"
accessModes:
- ReadWriteOnce
nfs:
path: /aio
server: 10.43.239.43 # assigned to nfs-server service. Won't change as long as service is not redeployed
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
namespace: aio
name: "aio-nfs"
spec:
storageClassName: "fast"
accessModes:
- ReadWriteOnce
resources:
requests:
storage: "100Mi"
selector:
matchLabels:
directory: "aio"

View File

@ -1,114 +0,0 @@
apiVersion: v1
kind: Namespace
metadata:
name: anki
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: anki
namespace: anki
labels:
app: anki
spec:
replicas: 1
selector:
matchLabels:
app: anki
template:
metadata:
labels:
app: anki
spec:
containers:
- name: anki-server
image: ankicommunity/anki-sync-server:20220516
tty: true
volumeMounts:
- mountPath: /app/data
name: anki-data-nfs
resources:
requests:
memory: "250Mi"
cpu: 0.5
nodeSelector:
kubernetes.io/arch: amd64
volumes:
- name: anki-data-nfs
persistentVolumeClaim:
claimName: anki-data-nfs
---
apiVersion: v1
kind: Service
metadata:
name: anki-http
namespace: anki
spec:
selector:
app: anki
ports:
- protocol: TCP
port: 27701
targetPort: 27701
type: ClusterIP
---
apiVersion: v1
kind: PersistentVolume
metadata:
namespace: anki
name: "anki-data-nfs"
labels:
directory: "anki"
spec:
storageClassName: fast
capacity:
storage: "100Mi"
accessModes:
- ReadWriteOnce
nfs:
path: /anki
server: 10.43.239.43 # assigned to nfs-server service. Won't change as long as service is not redeployed
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
namespace: anki
name: "anki-data-nfs"
spec:
storageClassName: "fast"
accessModes:
- ReadWriteOnce
resources:
requests:
storage: "100Mi"
selector:
matchLabels:
directory: "anki"
---
apiVersion: traefik.containo.us/v1alpha1
kind: IngressRoute
metadata:
name: anki-ingress
namespace: anki
spec:
entryPoints:
- websecure
routes:
- match: Host(`anki.kluster.moll.re`)
kind: Rule
services:
- name: anki-http
port: 27701
tls:
certResolver: default-tls

View File

@ -1,92 +0,0 @@
#
# IMPORTANT NOTE
#
# This chart inherits from our common library chart. You can check the default values/options here:
# https://github.com/k8s-at-home/library-charts/tree/main/charts/stable/common/values.yaml
#
image:
# -- image repository
repository: anonaddy/anonaddy
# -- image tag
tag: 0.11.2
# -- image pull policy
pullPolicy: IfNotPresent
strategy:
type: Recreate
# -- environment variables. See more environment variables in the [anonaddy documentation](https://github.com/anonaddy/docker#environment-variables).
# @default -- See below
env:
TZ: "Europe/Berlin"
# -- Application key for encrypter service
# You can generate one through `anonaddy key:generate --show` or `echo "base64:$(openssl rand -base64 32)"`
APP_KEY:
# -- Root domain to receive email from
ANONADDY_DOMAIN: anonaddy.kluster.moll.re
# -- Long random string used when hashing data for the anonymous replies
ANONADDY_SECRET:
# -- Configures service settings for the chart.
# @default -- See values.yaml
service:
main:
ports:
http:
port: 8000
smtp:
enabled: true
port: 25
type: LoadBalancer
ingress:
# -- Enable and configure ingress settings for the chart under this key.
# @default -- See values.yaml
main:
enabled: true
annotations:
kubernetes.io/ingress.class: nginx
cert-manager.io/cluster-issuer: cloudflare-letsencrypt-prod
hosts:
- host: anonaddy.kluster.moll.re
paths:
- path: /
pathType: Prefix
service:
port: 8000
tls:
- hosts:
- anonaddy.kluster.moll.re
secretName: cloudflare-letsencrypt-issuer-account-key
# -- Configure persistence settings for the chart under this key.
# @default -- See values.yaml
persistence:
config:
enabled: false
emptydir:
enabled: false
# https://github.com/bitnami/charts/tree/master/bitnami/mariadb/#installing-the-chart
mariadb:
enabled: true
image:
name: arm64v8/mariadb:latest
pullSecrets: []
# primary:
# persistence:
# enabled: true
# auth:
# username: "username"
# password: "password"
# database: database
# -- Enable and configure redis subchart under this key.
# For more options see [redis chart documentation](https://github.com/bitnami/charts/tree/master/bitnami/redis)
# @default -- See values.yaml
redis:
enabled: false
# auth:
# enabled: false

View File

@ -1,119 +0,0 @@
apiVersion: v1
kind: Namespace
metadata:
name: archive
labels:
app: archive
---
apiVersion: v1
kind: PersistentVolume
metadata:
namespace: archive
name: archive-data-nfs
labels:
directory: archive
spec:
storageClassName: fast
capacity:
storage: "100Gi"
volumeMode: Filesystem
accessModes:
- ReadWriteOnce
nfs:
path: /helbing_archive
server: 10.43.239.43 # assigned to nfs-server service. Won't change as long as service is not redeployed
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
namespace: archive
name: archive-data-nfs
spec:
storageClassName: fast
accessModes:
- ReadWriteOnce
resources:
requests:
storage: "100Gi"
selector:
matchLabels:
directory: archive
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: archive
namespace: archive
labels:
app: archive
spec:
replicas: 1
selector:
matchLabels:
app: archive
template:
metadata:
labels:
app: archive
spec:
containers:
- name: archive
image: archivebox/archivebox
tty: true
ports:
- containerPort: 8000
volumeMounts:
- mountPath: /data
name: archive-data
volumes:
- name: archive-data
persistentVolumeClaim:
claimName: archive-data-nfs
---
apiVersion: v1
kind: Service
metadata:
name: archive
namespace: archive
spec:
type: ClusterIP
ports:
- name: http
port: 8000
selector:
app: archive
---
kind: Ingress
apiVersion: networking.k8s.io/v1
metadata:
namespace: archive
name: archive-ingress
annotations:
kubernetes.io/ingress.class: nginx
cert-manager.io/cluster-issuer: cloudflare-letsencrypt-prod
spec:
tls:
- hosts:
- archive.kluster.moll.re
secretName: cloudflare-letsencrypt-issuer-account-key
rules:
- host: archive.kluster.moll.re
http:
paths:
- backend:
service:
name: archive
port:
number: 8000
path: /
pathType: Prefix

View File

@ -1,34 +0,0 @@
---
apiVersion: v1
kind: PersistentVolume
metadata:
namespace: authelia
name: authelia-config-nfs
labels:
directory: authelia
spec:
storageClassName: fast
capacity:
storage: "1Gi"
volumeMode: Filesystem
accessModes:
- ReadWriteOnce
nfs:
path: /authelia
server: 10.43.239.43 # assigned to nfs-server service. Won't change as long as service is not redeployed
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
namespace: authelia
name: authelia-config-nfs
spec:
storageClassName: fast
accessModes:
- ReadWriteOnce
resources:
requests:
storage: "1Gi"
selector:
matchLabels:
directory: authelia

File diff suppressed because it is too large Load Diff

View File

@ -1,34 +0,0 @@
apiVersion: traefik.containo.us/v1alpha1
kind: IngressRoute
metadata:
name: authentik-ingress
namespace: authentik
spec:
entryPoints:
- websecure
routes:
- match: Host(`authentik.kluster.moll.re`)
kind: Rule
middlewares:
- name: authentik-websocket
services:
- name: authentik
port: 80
tls:
certResolver: default-tls
---
apiVersion: traefik.containo.us/v1alpha1
kind: Middleware
metadata:
name: authentik-websocket
namespace: authentik
spec:
headers:
customRequestHeaders:
X-Forwarded-Proto: "https"
Upgrade: "websocket"

View File

@ -1,37 +0,0 @@
---
apiVersion: v1
kind: PersistentVolume
metadata:
namespace: authentik
name: authentik-postgres-nfs
labels:
directory: authentik
spec:
storageClassName: slow
capacity:
storage: "5Gi"
volumeMode: Filesystem
accessModes:
- ReadWriteOnce
nfs:
path: /export/kluster/authentik
server: 192.168.1.157
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
namespace: authentik
name: authentik-postgres-nfs
spec:
storageClassName: slow
accessModes:
- ReadWriteOnce
resources:
requests:
storage: "5Gi"
selector:
matchLabels:
directory: authentik

View File

@ -1,172 +0,0 @@
# -- Server replicas
replicas: 1
# -- Custom priority class for different treatment by the scheduler
priorityClassName:
# -- server securityContext
securityContext: {}
worker:
# -- worker replicas
replicas: 1
# -- Custom priority class for different treatment by the scheduler
priorityClassName:
# -- worker securityContext
securityContext: {}
image:
repository: ghcr.io/goauthentik/server
tag: 2023.4.1
pullPolicy: IfNotPresent
pullSecrets: []
# -- See https://github.com/k8s-at-home/library-charts/tree/main/charts/stable/common#values
initContainers: {}
# -- See https://github.com/k8s-at-home/library-charts/tree/main/charts/stable/common#values
additionalContainers: {}
authentik:
# -- Log level for server and worker
log_level: info
# -- Secret key used for cookie singing and unique user IDs,
# don't change this after the first install
secret_key: "K9F5uNx1gzsk3q5tnjwFabBYgjBJcAv0qM135QRgzL81hRg4"
# -- Path for the geoip database. If the file doesn't exist, GeoIP features are disabled.
geoip: /geoip/GeoLite2-City.mmdb
# -- Mode for the avatars. Defaults to gravatar. Possible options 'gravatar' and 'none'
avatars: gravatar
outposts:
# -- Template used for managed outposts. The following placeholders can be used
# %(type)s - the type of the outpost
# %(version)s - version of your authentik install
# %(build_hash)s - only for beta versions, the build hash of the image
container_image_base: ghcr.io/goauthentik/%(type)s:%(version)s
error_reporting:
# -- This sends anonymous usage-data, stack traces on errors and
# performance data to sentry.beryju.org, and is fully opt-in
enabled: false
# -- This is a string that is sent to sentry with your error reports
environment: "k8s"
# -- Send PII (Personally identifiable information) data to sentry
send_pii: false
postgresql:
# -- set the postgresql hostname to talk to
# if unset and .Values.postgresql.enabled == true, will generate the default
# @default -- `{{ .Release.Name }}-postgresql`
host: 'postgres-postgresql.postgres'
# -- postgresql Database name
# @default -- `authentik`
name: "authentik"
# -- postgresql Username
# @default -- `authentik`
user: "authentik"
password: "authentik"
port: 5432
redis:
# -- set the redis hostname to talk to
# @default -- `{{ .Release.Name }}-redis-master`
host: '{{ .Release.Name }}-redis-master'
password: ""
# -- see configuration options at https://goauthentik.io/docs/installation/configuration/
env: {}
# AUTHENTIK_VAR_NAME: VALUE
envFrom: []
# - configMapRef:
# name: special-config
envValueFrom: {}
# AUTHENTIK_VAR_NAME:
# secretKeyRef:
# key: password
# name: my-secret
service:
# -- Service that is created to access authentik
enabled: true
type: ClusterIP
port: 80
name: http
protocol: TCP
labels: {}
annotations: {}
volumes: []
volumeMounts: []
# -- affinity applied to the deployments
affinity: {}
# -- nodeSelector applied to the deployments
resources:
server: {}
worker: {}
# WARNING! When initially deploying, authentik has to do a few DB migrations. This may cause it to die from probe
# failure, but will continue on reboot. You can disable this during deployment if this is not desired
livenessProbe:
# -- enables or disables the livenessProbe
enabled: true
httpGet:
# -- liveness probe url path
path: /-/health/live/
port: http
initialDelaySeconds: 50
periodSeconds: 10
readinessProbe:
enabled: true
httpGet:
path: /-/health/ready/
port: http
initialDelaySeconds: 50
periodSeconds: 10
serviceAccount:
# -- Service account is needed for managed outposts
create: true
prometheus:
serviceMonitor:
create: false
interval: 30s
scrapeTimeout: 3s
rules:
create: false
geoip:
# -- optional GeoIP, deploys a cronjob to download the maxmind database
enabled: false
# -- sign up under https://www.maxmind.com/en/geolite2/signup
accountId: ""
# -- sign up under https://www.maxmind.com/en/geolite2/signup
licenseKey: ""
editionIds: "GeoLite2-City"
image: maxmindinc/geoipupdate:v4.8
# -- number of hours between update runs
updateInterval: 8
postgresql:
# -- enable the bundled bitnami postgresql chart
enabled: false
postgresqlUsername: "authentik"
postgresqlPassword: "authentik"
postgresqlDatabase: "authentik"
# persistence:
# enabled: true
# existingClaim: authentik-postgres-nfs
redis:
# -- enable the bundled bitnami redis chart
enabled: true
architecture: standalone
auth:
enabled: false

View File

@ -1,34 +0,0 @@
apiVersion: v1
kind: PersistentVolume
metadata:
namespace: backup
name: backup-nfs-access
labels:
directory: backup
spec:
storageClassName: fast
volumeMode: Filesystem
accessModes:
- ReadOnlyMany
capacity:
storage: "5M"
nfs:
path: /export/kluster
server: 192.168.1.157
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
namespace: backup
name: backup-nfs-access
spec:
resources:
requests:
storage: "5M"
storageClassName: fast
accessModes:
- ReadOnlyMany
selector:
matchLabels:
directory: backup

View File

@ -1,64 +0,0 @@
apiVersion: batch/v1
kind: CronJob
metadata:
name: restic-backblaze
spec:
schedule: "0 2 * * *"
# at 2:00, every tuesday and saturday
successfulJobsHistoryLimit: 2
failedJobsHistoryLimit: 2
jobTemplate:
spec:
template:
spec:
# nodeSelector:
# kubernetes.io/arch: arm64
# TODO no arm64 nodes anymore
restartPolicy: Never
hostname: restic-k3s-pod
# used by restic to identify the host
containers:
- name: restic-base-container
image: restic/restic:latest
command:
- /bin/sh
- -c
# >- strips newlines
# RESTIC_ARGS Can be for instance: --verbose --dry-run
args: []
volumeMounts:
- mountPath: /data
name: backup-nfs-access
- mountPath: /credentials
name: restic-credentials
env:
- name: RESTIC_REPOSITORY
valueFrom:
secretKeyRef:
name: restic-credentials
key: RESTIC_REPOSITORY
- name: B2_ACCOUNT_ID
valueFrom:
secretKeyRef:
name: restic-credentials
key: B2_ACCOUNT_ID
- name: B2_ACCOUNT_KEY
valueFrom:
secretKeyRef:
name: restic-credentials
key: B2_ACCOUNT_KEY
- name: RESTIC_PASSWORD_FILE
value: /credentials/restic-password
volumes:
- name: backup-nfs-access
persistentVolumeClaim:
claimName: backup-nfs-access
- name: restic-credentials
secret:
secretName: restic-credentials
optional: false

View File

@ -1,5 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- ./cronjob.yaml
- ./restic-credentials.secret.yaml

View File

@ -1,8 +0,0 @@
```
k kustomize backup/overlays/backup | k apply -f -
> secret/restic-credentials-backup created
> cronjob.batch/restic-backblaze-backup created
k kustomize backup/overlays/prune | k apply -f -
> secret/restic-credentials-prune created
> cronjob.batch/restic-backblaze-prune created
```

View File

@ -1,16 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: backup
nameSuffix: -backup
resources:
- ../../base
# - ./restic-commands.yaml
# patch the cronjob args field:
patches:
- path: ./restic-commands.yaml
target:
kind: CronJob

View File

@ -1,26 +0,0 @@
apiVersion: batch/v1
kind: CronJob
metadata:
name: backup-patch
spec:
jobTemplate:
spec:
template:
spec:
containers:
- name: restic-base-container
args:
# >- strips newlines
# RESTIC_ARGS Can be for instance: --verbose --dry-run
# restic_reository is set in the secret
- >-
restic backup
-r $(RESTIC_REPOSITORY)
--verbose=2
/data
--exclude=s3/
# &&
# restic
# -r $(RESTIC_REPOSITORY)
# list snapshots
# Add command to copy existing backups to here!

View File

@ -1,15 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: backup
nameSuffix: -prune
resources:
- ../../base
# - ./restic-commands.yaml
# patch the cronjob args field:
patches:
- path: ./restic-commands.yaml
target:
kind: CronJob

View File

@ -1,23 +0,0 @@
apiVersion: batch/v1
kind: CronJob
metadata:
name: prune-patch
spec:
schedule: "0 0 1/15 * *"
# at midnight, the first and 15. of every month
jobTemplate:
spec:
template:
spec:
containers:
- name: restic-base-container
args:
# >- strips newlines
# RESTIC_ARGS Can be for instance: --verbose --dry-run
# RESTIC_REPOSITORY is set in the secret
- >-
restic forget
-r $(RESTIC_REPOSITORY)
--verbose=2
--keep-daily 7 --keep-weekly 5
--prune

View File

@ -1,54 +0,0 @@
# apiVersion: v1
# kind: Secret
# metadata:
# name: cloudflare-api-token-secret
# namespace: cert-manager
# type: Opaque
# stringData:
# api-token:
# ---
apiVersion: cert-manager.io/v1
kind: ClusterIssuer
metadata:
name: cloudflare-letsencrypt-staging
spec:
acme:
email: me@moll.re
server: https://acme-staging-v02.api.letsencrypt.org/directory
privateKeySecretRef:
# Secret resource that will be used to store the account's private key.
name: cloudflare-letsencrypt-issuer-account-key
solvers:
- dns01:
cloudflare:
email: mollator2@gmail.com
apiTokenSecretRef:
# Name of the secret created on the other resource
name: cloudflare-api-token-secret
key: api-token
---
apiVersion: cert-manager.io/v1
kind: ClusterIssuer
metadata:
name: cloudflare-letsencrypt-prod
spec:
acme:
email: me@moll.re
server: https://acme-v02.api.letsencrypt.org/directory
privateKeySecretRef:
# Secret resource that will be used to store the account's private key.
name: cloudflare-letsencrypt-issuer-account-key
solvers:
- dns01:
cloudflare:
email: mollator2@gmail.com
apiTokenSecretRef:
# Name of the secret created on the other resource
name: cloudflare-api-token-secret
key: api-token

View File

@ -1,494 +0,0 @@
# Default values for cert-manager.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
global:
## Reference to one or more secrets to be used when pulling images
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
##
imagePullSecrets: []
# - name: "image-pull-secret"
# Optional priority class to be used for the cert-manager pods
priorityClassName: ""
rbac:
create: true
podSecurityPolicy:
enabled: false
useAppArmor: true
# Set the verbosity of cert-manager. Range of 0 - 6 with 6 being the most verbose.
logLevel: 2
leaderElection:
# Override the namespace used to store the ConfigMap for leader election
namespace: "kube-system"
# The duration that non-leader candidates will wait after observing a
# leadership renewal until attempting to acquire leadership of a led but
# unrenewed leader slot. This is effectively the maximum duration that a
# leader can be stopped before it is replaced by another candidate.
# leaseDuration: 60s
# The interval between attempts by the acting master to renew a leadership
# slot before it stops leading. This must be less than or equal to the
# lease duration.
# renewDeadline: 40s
# The duration the clients should wait between attempting acquisition and
# renewal of a leadership.
# retryPeriod: 15s
installCRDs: false
replicaCount: 1
strategy: {}
# type: RollingUpdate
# rollingUpdate:
# maxSurge: 0
# maxUnavailable: 1
# Comma separated list of feature gates that should be enabled on the
# controller pod.
featureGates: ""
image:
repository: quay.io/jetstack/cert-manager-controller
# You can manage a registry with
# registry: quay.io
# repository: jetstack/cert-manager-controller
# Override the image tag to deploy by setting this variable.
# If no value is set, the chart's appVersion will be used.
# tag: canary
# Setting a digest will override any tag
# digest: sha256:0e072dddd1f7f8fc8909a2ca6f65e76c5f0d2fcfb8be47935ae3457e8bbceb20
pullPolicy: IfNotPresent
# Override the namespace used to store DNS provider credentials etc. for ClusterIssuer
# resources. By default, the same namespace as cert-manager is deployed within is
# used. This namespace will not be automatically created by the Helm chart.
clusterResourceNamespace: ""
serviceAccount:
# Specifies whether a service account should be created
create: true
# The name of the service account to use.
# If not set and create is true, a name is generated using the fullname template
# name: ""
# Optional additional annotations to add to the controller's ServiceAccount
# annotations: {}
# Automount API credentials for a Service Account.
automountServiceAccountToken: true
# Optional additional arguments
extraArgs: []
# Use this flag to set a namespace that cert-manager will use to store
# supporting resources required for each ClusterIssuer (default is kube-system)
# - --cluster-resource-namespace=kube-system
# When this flag is enabled, secrets will be automatically removed when the certificate resource is deleted
# - --enable-certificate-owner-ref=true
# Use this flag to enabled or disable arbitrary controllers, for example, disable the CertificiateRequests approver
# - --controllers=*,-certificaterequests-approver
extraEnv: []
# - name: SOME_VAR
# value: 'some value'
resources: {}
# requests:
# cpu: 10m
# memory: 32Mi
# Pod Security Context
# ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
securityContext:
runAsNonRoot: true
# legacy securityContext parameter format: if enabled is set to true, only fsGroup and runAsUser are supported
# securityContext:
# enabled: false
# fsGroup: 1001
# runAsUser: 1001
# to support additional securityContext parameters, omit the `enabled` parameter and simply specify the parameters
# you want to set, e.g.
# securityContext:
# fsGroup: 1000
# runAsUser: 1000
# runAsNonRoot: true
# Container Security Context to be set on the controller component container
# ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
containerSecurityContext: {}
# capabilities:
# drop:
# - ALL
# readOnlyRootFilesystem: true
# runAsNonRoot: true
volumes: []
volumeMounts: []
# Optional additional annotations to add to the controller Deployment
# deploymentAnnotations: {}
# Optional additional annotations to add to the controller Pods
# podAnnotations: {}
podLabels: {}
# Optional additional labels to add to the controller Service
# serviceLabels: {}
# Optional additional annotations to add to the controller service
# serviceAnnotations: {}
# Optional DNS settings, useful if you have a public and private DNS zone for
# the same domain on Route 53. What follows is an example of ensuring
# cert-manager can access an ingress or DNS TXT records at all times.
# NOTE: This requires Kubernetes 1.10 or `CustomPodDNS` feature gate enabled for
# the cluster to work.
# podDnsPolicy: "None"
# podDnsConfig:
# nameservers:
# - "1.1.1.1"
# - "8.8.8.8"
nodeSelector: {}
ingressShim: {}
# defaultIssuerName: ""
# defaultIssuerKind: ""
# defaultIssuerGroup: ""
prometheus:
enabled: true
servicemonitor:
enabled: false
prometheusInstance: default
targetPort: 9402
path: /metrics
interval: 60s
scrapeTimeout: 30s
labels: {}
# Use these variables to configure the HTTP_PROXY environment variables
# http_proxy: "http://proxy:8080"
# https_proxy: "https://proxy:8080"
# no_proxy: 127.0.0.1,localhost
# expects input structure as per specification https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.11/#affinity-v1-core
# for example:
# affinity:
# nodeAffinity:
# requiredDuringSchedulingIgnoredDuringExecution:
# nodeSelectorTerms:
# - matchExpressions:
# - key: foo.bar.com/role
# operator: In
# values:
# - master
affinity: {}
# expects input structure as per specification https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.11/#toleration-v1-core
# for example:
# tolerations:
# - key: foo.bar.com/role
# operator: Equal
# value: master
# effect: NoSchedule
tolerations: []
webhook:
replicaCount: 1
timeoutSeconds: 10
strategy: {}
# type: RollingUpdate
# rollingUpdate:
# maxSurge: 0
# maxUnavailable: 1
# Pod Security Context to be set on the webhook component Pod
# ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
securityContext:
runAsNonRoot: true
# Container Security Context to be set on the webhook component container
# ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
containerSecurityContext: {}
# capabilities:
# drop:
# - ALL
# readOnlyRootFilesystem: true
# runAsNonRoot: true
# Optional additional annotations to add to the webhook Deployment
# deploymentAnnotations: {}
# Optional additional annotations to add to the webhook Pods
# podAnnotations: {}
# Optional additional annotations to add to the webhook MutatingWebhookConfiguration
# mutatingWebhookConfigurationAnnotations: {}
# Optional additional annotations to add to the webhook ValidatingWebhookConfiguration
# validatingWebhookConfigurationAnnotations: {}
# Optional additional annotations to add to the webhook service
# serviceAnnotations: {}
# Optional additional arguments for webhook
extraArgs: []
resources: {}
# requests:
# cpu: 10m
# memory: 32Mi
## Liveness and readiness probe values
## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes
##
livenessProbe:
failureThreshold: 3
initialDelaySeconds: 60
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
readinessProbe:
failureThreshold: 3
initialDelaySeconds: 5
periodSeconds: 5
successThreshold: 1
timeoutSeconds: 1
nodeSelector: {}
affinity: {}
tolerations: []
# Optional additional labels to add to the Webhook Pods
podLabels: {}
# Optional additional labels to add to the Webhook Service
serviceLabels: {}
image:
repository: quay.io/jetstack/cert-manager-webhook
# You can manage a registry with
# registry: quay.io
# repository: jetstack/cert-manager-webhook
# Override the image tag to deploy by setting this variable.
# If no value is set, the chart's appVersion will be used.
# tag: canary
# Setting a digest will override any tag
# digest: sha256:0e072dddd1f7f8fc8909a2ca6f65e76c5f0d2fcfb8be47935ae3457e8bbceb20
pullPolicy: IfNotPresent
serviceAccount:
# Specifies whether a service account should be created
create: true
# The name of the service account to use.
# If not set and create is true, a name is generated using the fullname template
# name: ""
# Optional additional annotations to add to the controller's ServiceAccount
# annotations: {}
# Automount API credentials for a Service Account.
automountServiceAccountToken: true
# The port that the webhook should listen on for requests.
# In GKE private clusters, by default kubernetes apiservers are allowed to
# talk to the cluster nodes only on 443 and 10250. so configuring
# securePort: 10250, will work out of the box without needing to add firewall
# rules or requiring NET_BIND_SERVICE capabilities to bind port numbers <1000
securePort: 10250
# Specifies if the webhook should be started in hostNetwork mode.
#
# Required for use in some managed kubernetes clusters (such as AWS EKS) with custom
# CNI (such as calico), because control-plane managed by AWS cannot communicate
# with pods' IP CIDR and admission webhooks are not working
#
# Since the default port for the webhook conflicts with kubelet on the host
# network, `webhook.securePort` should be changed to an available port if
# running in hostNetwork mode.
hostNetwork: false
# Specifies how the service should be handled. Useful if you want to expose the
# webhook to outside of the cluster. In some cases, the control plane cannot
# reach internal services.
serviceType: ClusterIP
# loadBalancerIP:
# Overrides the mutating webhook and validating webhook so they reach the webhook
# service using the `url` field instead of a service.
url: {}
# host:
cainjector:
enabled: true
replicaCount: 1
strategy: {}
# type: RollingUpdate
# rollingUpdate:
# maxSurge: 0
# maxUnavailable: 1
# Pod Security Context to be set on the cainjector component Pod
# ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
securityContext:
runAsNonRoot: true
# Container Security Context to be set on the cainjector component container
# ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
containerSecurityContext: {}
# capabilities:
# drop:
# - ALL
# readOnlyRootFilesystem: true
# runAsNonRoot: true
# Optional additional annotations to add to the cainjector Deployment
# deploymentAnnotations: {}
# Optional additional annotations to add to the cainjector Pods
# podAnnotations: {}
# Optional additional arguments for cainjector
extraArgs: []
resources: {}
# requests:
# cpu: 10m
# memory: 32Mi
nodeSelector: {}
affinity: {}
tolerations: []
# Optional additional labels to add to the CA Injector Pods
podLabels: {}
image:
repository: quay.io/jetstack/cert-manager-cainjector
# You can manage a registry with
# registry: quay.io
# repository: jetstack/cert-manager-cainjector
# Override the image tag to deploy by setting this variable.
# If no value is set, the chart's appVersion will be used.
# tag: canary
# Setting a digest will override any tag
# digest: sha256:0e072dddd1f7f8fc8909a2ca6f65e76c5f0d2fcfb8be47935ae3457e8bbceb20
pullPolicy: IfNotPresent
serviceAccount:
# Specifies whether a service account should be created
create: true
# The name of the service account to use.
# If not set and create is true, a name is generated using the fullname template
# name: ""
# Optional additional annotations to add to the controller's ServiceAccount
# annotations: {}
# Automount API credentials for a Service Account.
automountServiceAccountToken: true
# This startupapicheck is a Helm post-install hook that waits for the webhook
# endpoints to become available.
# The check is implemented using a Kubernetes Job- if you are injecting mesh
# sidecar proxies into cert-manager pods, you probably want to ensure that they
# are not injected into this Job's pod. Otherwise the installation may time out
# due to the Job never being completed because the sidecar proxy does not exit.
# See https://github.com/jetstack/cert-manager/pull/4414 for context.
startupapicheck:
enabled: true
# Pod Security Context to be set on the startupapicheck component Pod
# ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
securityContext:
runAsNonRoot: true
# Timeout for 'kubectl check api' command
timeout: 1m
# Job backoffLimit
backoffLimit: 4
# Optional additional annotations to add to the startupapicheck Job
jobAnnotations:
helm.sh/hook: post-install
helm.sh/hook-weight: "1"
helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded
# Optional additional annotations to add to the startupapicheck Pods
# podAnnotations: {}
# Optional additional arguments for startupapicheck
extraArgs: []
resources: {}
# requests:
# cpu: 10m
# memory: 32Mi
nodeSelector: {}
affinity: {}
tolerations: []
# Optional additional labels to add to the startupapicheck Pods
podLabels: {}
image:
repository: quay.io/jetstack/cert-manager-ctl
# You can manage a registry with
# registry: quay.io
# repository: jetstack/cert-manager-ctl
# Override the image tag to deploy by setting this variable.
# If no value is set, the chart's appVersion will be used.
# tag: canary
# Setting a digest will override any tag
# digest: sha256:0e072dddd1f7f8fc8909a2ca6f65e76c5f0d2fcfb8be47935ae3457e8bbceb20
pullPolicy: IfNotPresent
rbac:
# annotations for the startup API Check job RBAC and PSP resources
annotations:
helm.sh/hook: post-install
helm.sh/hook-weight: "-5"
helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded
serviceAccount:
# Specifies whether a service account should be created
create: true
# The name of the service account to use.
# If not set and create is true, a name is generated using the fullname template
# name: ""
# Optional additional annotations to add to the Job's ServiceAccount
annotations:
helm.sh/hook: post-install
helm.sh/hook-weight: "-5"
helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded
# Automount API credentials for a Service Account.
automountServiceAccountToken: true

View File

@ -1,26 +0,0 @@
kind: Ingress
apiVersion: networking.k8s.io/v1
metadata:
namespace: crowdsec
name: crowdsec-ingress
annotations:
kubernetes.io/ingress.class: nginx
cert-manager.io/cluster-issuer: cloudflare-letsencrypt-prod
spec:
tls:
- hosts:
- crowdsec.kluster.moll.re
secretName: cloudflare-letsencrypt-issuer-account-key
rules:
- host: crowdsec.kluster.moll.re
http:
paths:
- backend:
service:
name: crowdsec-service
port:
number: 3000
path: /
pathType: Prefix

View File

@ -1,30 +0,0 @@
controller:
extraVolumes:
- name: crowdsec-bouncer-plugin
emptyDir: {}
extraInitContainers:
- name: init-clone-crowdsec-bouncer
image: crowdsecurity/lua-bouncer-plugin
imagePullPolicy: IfNotPresent
env:
- name: API_URL
value: "http://crowdsec-service.crowdsec.svc.cluster.local:8080" # crowdsec lapi service-name
- name: API_KEY
value: "6cc4c975f123f4f24174e2d544e81282" # generated with `cscli bouncers add -n <bouncer_name>
- name: BOUNCER_CONFIG
value: "/crowdsec/crowdsec-bouncer.conf"
- name: BAN_TEMPLATE_PATH
value: /etc/nginx/lua/plugins/crowdsec/templates/ban.html
- name: CAPTCHA_TEMPLATE_PATH
value: /etc/nginx/lua/plugins/crowdsec/templates/captcha.html
command: ['sh', '-c', "sh /docker_start.sh; mkdir -p /lua_plugins/crowdsec/; cp -R /crowdsec/* /lua_plugins/crowdsec/"]
volumeMounts:
- name: crowdsec-bouncer-plugin
mountPath: /lua_plugins
extraVolumeMounts:
- name: crowdsec-bouncer-plugin
mountPath: /etc/nginx/lua/plugins/crowdsec
subPath: crowdsec
config:
plugins: "crowdsec"
lua-shared-dicts: "crowdsec_cache: 50m"

View File

@ -1,178 +0,0 @@
# Default values for crowdsec-chart.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
# -- for raw logs format: json or cri (docker|containerd)
container_runtime: containerd
image:
# -- docker image repository name
repository: crowdsecurity/crowdsec
# -- pullPolicy
pullPolicy: IfNotPresent
# -- docker image tag
tag: ""
# If you want to specify secrets that will be used for all your crowdsec-agents
# secrets can be provided be env variables
secrets:
# -- agent username (default is generated randomly)
username: ""
# -- agent password (default is generated randomly)
password: ""
# lapi will deploy pod with crowdsec lapi and dashboard as deployment
lapi:
# -- environment variables from crowdsecurity/crowdsec docker image
env: []
# by default disable the agent because it only the local API.
#- name: DISABLE_AGENT
# value: "true"
dashboard:
# -- Enable Metabase Dashboard (by default disabled)
enabled: true
image:
# -- docker image repository name
repository: loancrate/metabase
# -- pullPolicy
pullPolicy: IfNotPresent
# -- docker image tag
tag: "latest"
# -- Metabase SQLite static DB containing Dashboards
assetURL: https://crowdsec-statics-assets.s3-eu-west-1.amazonaws.com/metabase_sqlite.zip
# -- Enable ingress object
ingress:
enabled: false
annotations:
# metabase only supports http so we need this annotation
nginx.ingress.kubernetes.io/backend-protocol: "HTTP"
# labels: {}
ingressClassName: "nginx"
host: "" # metabase.example.com
# tls: {}
resources:
limits:
memory: 100Mi
requests:
cpu: 150m
memory: 100Mi
# -- Enable persistent volumes
persistentVolume:
# -- Persistent volume for data folder. Stores e.g. registered bouncer api keys
data:
enabled: true
accessModes:
- ReadWriteOnce
storageClassName: ""
size: 1Gi
# -- Persistent volume for config folder. Stores e.g. online api credentials
config:
enabled: true
accessModes:
- ReadWriteOnce
storageClassName: ""
size: 100Mi
# -- nodeSelector for lapi
nodeSelector: {}
# -- tolerations for lapi
tolerations: {}
# -- Enable service monitoring (exposes "metrics" port "6060" for Prometheus)
metrics:
enabled: false
# -- Creates a ServiceMonitor so Prometheus will monitor this service
# -- Prometheus needs to be configured to watch on all namespaces for ServiceMonitors
# -- See the documentation: https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack#prometheusioscrape
# -- See also: https://github.com/prometheus-community/helm-charts/issues/106#issuecomment-700847774
serviceMonitor:
enabled: false
strategy:
type: RollingUpdate
# agent will deploy pod on every node as daemonSet to read wanted pods logs
agent:
acquisition:
# -- Specify each pod you want to process it logs (namespace, podName and program)
- namespace: kube-system
# -- to select pod logs to process
podName: nginx-nginx-ingress-*
# -- program name related to specific parser you will use (see https://hub.crowdsec.net/author/crowdsecurity/configurations/docker-logs)
program: nginx
resources:
limits:
memory: 100Mi
requests:
cpu: 150m
memory: 100Mi
# -- Enable persistent volumes
persistentVolume:
# -- Persistent volume for config folder. Stores local config (parsers, scenarios etc.)
config:
enabled: true
accessModes:
- ReadWriteOnce
storageClassName: ""
size: 100Mi
# -- environment variables from crowdsecurity/crowdsec docker image
env: []
# by default we the docker-logs parser to be able to parse docker logs in k8s
# by default we disable local API on the agent pod
# - name: SCENARIOS
# value: "scenario/name otherScenario/name"
# - name: PARSERS
# value: "parser/name otherParser/name"
# - name: POSTOVERFLOWS
# value: "postoverflow/name otherPostoverflow/name"
# - name: CONFIG_FILE
# value: "/etc/crowdsec/config.yaml"
# - name: DSN
# value: "file:///var/log/toto.log"
# - name: TYPE
# value: "Labels.type_for_time-machine_mode"
# - name: TEST_MODE
# value: "false"
# - name: TZ
# value: ""
# - name: DISABLE_AGENT
# value: "false"
# - name: DISABLE_ONLINE_API
# value: "false"
# - name: LEVEL_TRACE
# value: "false"
# - name: LEVEL_DEBUG
# value: "false"
# - name: LEVEL_INFO
# value: "false"
# -- nodeSelector for agent
nodeSelector: {}
# -- tolerations for agent
tolerations: {}
# -- Enable service monitoring (exposes "metrics" port "6060" for Prometheus)
metrics:
enabled: false
# -- Creates a ServiceMonitor so Prometheus will monitor this service
# -- Prometheus needs to be configured to watch on all namespaces for ServiceMonitors
# -- See the documentation: https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack#prometheusioscrape
# -- See also: https://github.com/prometheus-community/helm-charts/issues/106#issuecomment-700847774
serviceMonitor:
enabled: false
# -- wait-for-lapi init container
wait_for_lapi:
image:
# -- docker image repository name
repository: busybox
# -- pullPolicy
pullPolicy: IfNotPresent
# -- docker image tag
tag: "1.28"
#service: {}

View File

@ -1,34 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
namespace: ddns
name: cloudflare-ddns
spec:
selector:
matchLabels:
app: cloudflare-ddns
template:
metadata:
labels:
app: cloudflare-ddns
spec:
containers:
- name: cloudflare-ddns
image: timothyjmiller/cloudflare-ddns:latest
resources:
limits:
memory: "32Mi"
cpu: "50m"
env:
- name: CONFIG_PATH
value: "/etc/cloudflare-ddns/"
volumeMounts:
- mountPath: "/etc/cloudflare-ddns"
name: config-cloudflare-ddns
readOnly: true
volumes:
- name: config-cloudflare-ddns
secret:
secretName: config-cloudflare-ddns

View File

@ -1,32 +0,0 @@
apiVersion: traefik.containo.us/v1alpha1
kind: IngressRoute
metadata:
name: focalboard-ingress
namespace: focalboard
spec:
entryPoints:
- websecure
routes:
- match: Host(`focalboard.kluster.moll.re`)
middlewares:
- name: focalboard-websocket
kind: Rule
services:
- name: focalboard
port: 8000
tls:
certResolver: default-tls
---
apiVersion: traefik.containo.us/v1alpha1
kind: Middleware
metadata:
name: focalboard-websocket
namespace: focalboard
spec:
headers:
customRequestHeaders:
X-Forwarded-Proto: "https"
Upgrade: "websocket"

View File

@ -1,37 +0,0 @@
---
apiVersion: v1
kind: PersistentVolume
metadata:
namespace: focalboard
name: focalboard-nfs
labels:
directory: focalboard
spec:
storageClassName: fast
capacity:
storage: "5Gi"
volumeMode: Filesystem
accessModes:
- ReadWriteOnce
nfs:
path: /focalboard
server: 10.43.239.43 # assigned to nfs-server service. Won't change as long as service is not redeployed
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
namespace: focalboard
name: focalboard-nfs
spec:
storageClassName: fast
accessModes:
- ReadWriteOnce
resources:
requests:
storage: "5Gi"
selector:
matchLabels:
directory: focalboard

View File

@ -1,63 +0,0 @@
#
# IMPORTANT NOTE
#
# This chart inherits from our common library chart. You can check the default values/options here:
# https://github.com/k8s-at-home/library-charts/tree/main/charts/stable/common/values.yaml
#
image:
# -- image repository
repository: flyskype2021/focalboard
# -- image pull policy
pullPolicy: IfNotPresent
# -- image tag
tag: latest
enableServiceLinks: false
# -- environment variables.
# @default -- See below
env: {}
# See the Administrator's Guide for config reference: https://www.focalboard.com/guide/admin/
config: |
{
"serverRoot": "https://focalboard.kluster.moll.re",
"port": 8000,
"dbtype": "sqlite3",
"dbconfig": "/data/focalboard.db",
"postgres_dbconfig": "dbname=focalboard sslmode=disable",
"useSSL": false,
"webpath": "./pack",
"filespath": "/data/files",
"telemetry": false,
"session_expire_time": 2592000,
"session_refresh_time": 18000,
"localOnly": false,
"enableLocalMode": true,
"localModeSocketLocation": "/var/tmp/focalboard_local.socket"
}
# -- Configures service settings for the chart.
# @default -- See values.yaml
service:
main:
ports:
http:
port: 8000
ingress:
# -- Enable and configure ingress settings for the chart under this key.
# @default -- See values.yaml
main:
enabled: false
# -- Configure persistence settings for the chart under this key.
# @default -- See values.yaml
persistence:
data:
enabled: true
existingClaim: focalboard-nfs

View File

@ -1,47 +0,0 @@
#
# IMPORTANT NOTE
#
# This chart inherits from our common library chart. You can check the default values/options here:
# https://github.com/k8s-at-home/library-charts/tree/main/charts/stable/common/values.yaml
#
image:
# -- image repository
repository: linuxserver/freshrss
# -- image pull policy
pullPolicy: IfNotPresent
# -- image tag
tag: version-1.18.1
# -- environment variables. See more environment variables in the [freshrss documentation](https://github.com/linuxserver/docker-freshrss#parameters).
# @default -- See below
env:
# -- Set the container timezone
TZ: "Europe/Berlin"
# -- Set the container user id
PUID: "1001"
# -- Set the container group id
PGID: "1001"
# -- Configures service settings for the chart.
# @default -- See values.yaml
service:
main:
ports:
http:
port: 80
ingress:
# -- Enable and configure ingress settings for the chart under this key.
# @default -- See values.yaml
main:
enabled: false
# -- Configure persistence settings for the chart under this key.
# @default -- See values.yaml
persistence:
config:
enabled: true
useExisting: true
name: freshrss-nfs

View File

@ -1,24 +0,0 @@
kind: Ingress
apiVersion: networking.k8s.io/v1
metadata:
namespace: freshrss
name: freshrss-ingress
annotations:
kubernetes.io/ingress.class: nginx
cert-manager.io/cluster-issuer: cloudflare-letsencrypt-prod
spec:
tls:
- hosts:
- rss.kluster.moll.re
secretName: cloudflare-letsencrypt-issuer-account-key
rules:
- host: rss.kluster.moll.re
http:
paths:
- backend:
service:
name: freshrss
port:
number: 80
path: /
pathType: Prefix

View File

@ -1,37 +0,0 @@
---
apiVersion: v1
kind: PersistentVolume
metadata:
namespace: freshrss
name: freshrss-nfs
labels:
directory: freshrss
spec:
storageClassName: slow
capacity:
storage: "1Gi"
volumeMode: Filesystem
accessModes:
- ReadWriteOnce
nfs:
path: /freshrss
server: 10.43.239.43 # assigned to nfs-server service. Won't change as long as service is not redeployed
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
namespace: freshrss
name: freshrss-nfs
spec:
storageClassName: slow
accessModes:
- ReadWriteOnce
resources:
requests:
storage: "1Gi"
selector:
matchLabels:
directory: freshrss

View File

@ -1,54 +0,0 @@
---
apiVersion: batch/v1
kind: CronJob
metadata:
name: gcloud-backup
namespace: backup
spec:
schedule: "0 2 15 * *"
# at 2:00, the 1. and 15. of every month
successfulJobsHistoryLimit: 2
failedJobsHistoryLimit: 2
jobTemplate:
spec:
template:
spec:
restartPolicy: Never
containers:
- name: gcloud-backup
image: shirakiya/gcloud-sdk:latest
command: ["/bin/bash", "-c", "--"]
args:
- |
ln -s /config/.boto /root/.boto &&
gsutil -m rsync -x "^(jellyfin|config|webtop|other root folder)/.*$" -U -r -e -d /data gs://kluster-backup
# command:
# -m multithreaded
# -U skip unsupported objects
# -e don't follow symlinks
# -r recursively follow folder structure
# -d deletes files from dst if they are not in src anymore
# -n dry runs
# This command runs with the knowledge the gs-bucket is set up with versioning. Rsync therefore serves as an incremental backup whose individual stages can be recovered
volumeMounts:
- mountPath: /data
name: backup-nfs-access
- mountPath: /config
name: gcloud-credentials
# entry .boto in the secret is mounted as /root/.boto
volumes:
- name: backup-nfs-access
persistentVolumeClaim:
claimName: backup-nfs-access
- name: gcloud-credentials
secret:
secretName: gcloud-credentials
optional: false

View File

@ -1,749 +0,0 @@
## nginx configuration
## Ref: https://github.com/kubernetes/ingress-nginx/blob/main/docs/user-guide/nginx-configuration/index.md
##
## Overrides for generated resource names
# See templates/_helpers.tpl
# nameOverride:
# fullnameOverride:
## Labels to apply to all resources
##
commonLabels: {}
# scmhash: abc123
# myLabel: aakkmd
controller:
name: controller
image:
## Keep false as default for now!
chroot: false
registry: registry.k8s.io
image: ingress-nginx/controller
## for backwards compatibility consider setting the full image url via the repository value below
## use *either* current default registry/image or repository format or installing chart by providing the values.yaml will fail
## repository:
tag: "v1.3.0"
digest: sha256:d1707ca76d3b044ab8a28277a2466a02100ee9f58a86af1535a3edf9323ea1b5
digestChroot: sha256:0fcb91216a22aae43b374fc2e6a03b8afe9e8c78cbf07a09d75636dc4ea3c191
pullPolicy: IfNotPresent
# www-data -> uid 101
runAsUser: 101
allowPrivilegeEscalation: true
# -- Use an existing PSP instead of creating one
existingPsp: ""
# -- Configures the controller container name
containerName: controller
# -- Configures the ports that the nginx-controller listens on
containerPort:
http: 80
https: 443
# -- Will add custom configuration options to Nginx https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/configmap/
config: {}
# -- Annotations to be added to the controller config configuration configmap.
configAnnotations: {}
# -- Will add custom headers before sending traffic to backends according to https://github.com/kubernetes/ingress-nginx/tree/main/docs/examples/customization/custom-headers
proxySetHeaders: {}
# -- Will add custom headers before sending response traffic to the client according to: https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/configmap/#add-headers
addHeaders: {}
# -- Optionally customize the pod dnsConfig.
dnsConfig: {}
# -- Optionally customize the pod hostname.
hostname: {}
# -- Optionally change this to ClusterFirstWithHostNet in case you have 'hostNetwork: true'.
# By default, while using host network, name resolution uses the host's DNS. If you wish nginx-controller
# to keep resolving names inside the k8s network, use ClusterFirstWithHostNet.
dnsPolicy: ClusterFirst
# -- Bare-metal considerations via the host network https://kubernetes.github.io/ingress-nginx/deploy/baremetal/#via-the-host-network
# Ingress status was blank because there is no Service exposing the NGINX Ingress controller in a configuration using the host network, the default --publish-service flag used in standard cloud setups does not apply
reportNodeInternalIp: false
# -- Process Ingress objects without ingressClass annotation/ingressClassName field
# Overrides value for --watch-ingress-without-class flag of the controller binary
# Defaults to false
watchIngressWithoutClass: false
# -- Process IngressClass per name (additionally as per spec.controller).
ingressClassByName: false
# -- This configuration defines if Ingress Controller should allow users to set
# their own *-snippet annotations, otherwise this is forbidden / dropped
# when users add those annotations.
# Global snippets in ConfigMap are still respected
allowSnippetAnnotations: true
# -- Required for use with CNI based kubernetes installations (such as ones set up by kubeadm),
# since CNI and hostport don't mix yet. Can be deprecated once https://github.com/kubernetes/kubernetes/issues/23920
# is merged
hostNetwork: false
## Use host ports 80 and 443
## Disabled by default
hostPort:
# -- Enable 'hostPort' or not
enabled: false
ports:
# -- 'hostPort' http port
http: 80
# -- 'hostPort' https port
https: 443
# -- Election ID to use for status update
electionID: ingress-controller-leader
## This section refers to the creation of the IngressClass resource
## IngressClass resources are supported since k8s >= 1.18 and required since k8s >= 1.19
ingressClassResource:
# -- Name of the ingressClass
name: nginx-new
# -- Is this ingressClass enabled or not
enabled: true
# -- Is this the default ingressClass for the cluster
default: false
# -- Controller-value of the controller that is processing this ingressClass
controllerValue: "k8s.io/ingress-nginx"
# -- Parameters is a link to a custom resource containing additional
# configuration for the controller. This is optional if the controller
# does not require extra parameters.
parameters: {}
# -- For backwards compatibility with ingress.class annotation, use ingressClass.
# Algorithm is as follows, first ingressClassName is considered, if not present, controller looks for ingress.class annotation
ingressClass: nginx
# -- Labels to add to the pod container metadata
podLabels: {}
# key: value
# -- Security Context policies for controller pods
podSecurityContext: {}
# -- See https://kubernetes.io/docs/tasks/administer-cluster/sysctl-cluster/ for notes on enabling and using sysctls
sysctls: {}
# sysctls:
# "net.core.somaxconn": "8192"
# -- Allows customization of the source of the IP address or FQDN to report
# in the ingress status field. By default, it reads the information provided
# by the service. If disable, the status field reports the IP address of the
# node or nodes where an ingress controller pod is running.
publishService:
# -- Enable 'publishService' or not
enabled: true
# -- Allows overriding of the publish service to bind to
# Must be <namespace>/<service_name>
pathOverride: ""
# Limit the scope of the controller to a specific namespace
scope:
# -- Enable 'scope' or not
enabled: false
# -- Namespace to limit the controller to; defaults to $(POD_NAMESPACE)
namespace: ""
# -- When scope.enabled == false, instead of watching all namespaces, we watching namespaces whose labels
# only match with namespaceSelector. Format like foo=bar. Defaults to empty, means watching all namespaces.
namespaceSelector: ""
# -- Allows customization of the configmap / nginx-configmap namespace; defaults to $(POD_NAMESPACE)
configMapNamespace: ""
tcp:
# -- Allows customization of the tcp-services-configmap; defaults to $(POD_NAMESPACE)
configMapNamespace: ""
# -- Annotations to be added to the tcp config configmap
annotations: {}
udp:
# -- Allows customization of the udp-services-configmap; defaults to $(POD_NAMESPACE)
configMapNamespace: ""
# -- Annotations to be added to the udp config configmap
annotations: {}
# -- Maxmind license key to download GeoLite2 Databases.
## https://blog.maxmind.com/2019/12/18/significant-changes-to-accessing-and-using-geolite2-databases
maxmindLicenseKey: ""
# -- Additional command line arguments to pass to nginx-ingress-controller
# E.g. to specify the default SSL certificate you can use
extraArgs: {}
## extraArgs:
## default-ssl-certificate: "<namespace>/<secret_name>"
# -- Additional environment variables to set
extraEnvs: []
# extraEnvs:
# - name: FOO
# valueFrom:
# secretKeyRef:
# key: FOO
# name: secret-resource
# -- Use a `DaemonSet` or `Deployment`
kind: Deployment
# -- Annotations to be added to the controller Deployment or DaemonSet
##
annotations: {}
# keel.sh/pollSchedule: "@every 60m"
# -- Labels to be added to the controller Deployment or DaemonSet and other resources that do not have option to specify labels
##
labels: {}
# keel.sh/policy: patch
# keel.sh/trigger: poll
# -- The update strategy to apply to the Deployment or DaemonSet
##
updateStrategy: {}
# rollingUpdate:
# maxUnavailable: 1
# type: RollingUpdate
# -- `minReadySeconds` to avoid killing pods before we are ready
##
minReadySeconds: 0
# -- Node tolerations for server scheduling to nodes with taints
## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
##
tolerations: []
# - key: "key"
# operator: "Equal|Exists"
# value: "value"
# effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)"
# -- Affinity and anti-affinity rules for server scheduling to nodes
## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
##
affinity: {}
# # An example of preferred pod anti-affinity, weight is in the range 1-100
# podAntiAffinity:
# preferredDuringSchedulingIgnoredDuringExecution:
# - weight: 100
# podAffinityTerm:
# labelSelector:
# matchExpressions:
# - key: app.kubernetes.io/name
# operator: In
# values:
# - ingress-nginx
# - key: app.kubernetes.io/instance
# operator: In
# values:
# - ingress-nginx
# - key: app.kubernetes.io/component
# operator: In
# values:
# - controller
# topologyKey: kubernetes.io/hostname
# # An example of required pod anti-affinity
# podAntiAffinity:
# requiredDuringSchedulingIgnoredDuringExecution:
# - labelSelector:
# matchExpressions:
# - key: app.kubernetes.io/name
# operator: In
# values:
# - ingress-nginx
# - key: app.kubernetes.io/instance
# operator: In
# values:
# - ingress-nginx
# - key: app.kubernetes.io/component
# operator: In
# values:
# - controller
# topologyKey: "kubernetes.io/hostname"
# -- Topology spread constraints rely on node labels to identify the topology domain(s) that each Node is in.
## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/
##
topologySpreadConstraints: []
# - maxSkew: 1
# topologyKey: topology.kubernetes.io/zone
# whenUnsatisfiable: DoNotSchedule
# labelSelector:
# matchLabels:
# app.kubernetes.io/instance: ingress-nginx-internal
# -- `terminationGracePeriodSeconds` to avoid killing pods before we are ready
## wait up to five minutes for the drain of connections
##
terminationGracePeriodSeconds: 300
# -- Node labels for controller pod assignment
## Ref: https://kubernetes.io/docs/user-guide/node-selection/
##
nodeSelector:
kubernetes.io/os: linux
## Liveness and readiness probe values
## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes
##
## startupProbe:
## httpGet:
## # should match container.healthCheckPath
## path: "/healthz"
## port: 10254
## scheme: HTTP
## initialDelaySeconds: 5
## periodSeconds: 5
## timeoutSeconds: 2
## successThreshold: 1
## failureThreshold: 5
livenessProbe:
httpGet:
# should match container.healthCheckPath
path: "/healthz"
port: 10254
scheme: HTTP
initialDelaySeconds: 10
periodSeconds: 10
timeoutSeconds: 1
successThreshold: 1
failureThreshold: 5
readinessProbe:
httpGet:
# should match container.healthCheckPath
path: "/healthz"
port: 10254
scheme: HTTP
initialDelaySeconds: 10
periodSeconds: 10
timeoutSeconds: 1
successThreshold: 1
failureThreshold: 3
# -- Path of the health check endpoint. All requests received on the port defined by
# the healthz-port parameter are forwarded internally to this path.
healthCheckPath: "/healthz"
# -- Address to bind the health check endpoint.
# It is better to set this option to the internal node address
# if the ingress nginx controller is running in the `hostNetwork: true` mode.
healthCheckHost: ""
# -- Annotations to be added to controller pods
##
podAnnotations: {}
replicaCount: 1
minAvailable: 1
## Define requests resources to avoid probe issues due to CPU utilization in busy nodes
## ref: https://github.com/kubernetes/ingress-nginx/issues/4735#issuecomment-551204903
## Ideally, there should be no limits.
## https://engineering.indeedblog.com/blog/2019/12/cpu-throttling-regression-fix/
resources:
## limits:
## cpu: 100m
## memory: 90Mi
requests:
cpu: 100m
memory: 90Mi
# Mutually exclusive with keda autoscaling
autoscaling:
enabled: false
minReplicas: 1
maxReplicas: 11
targetCPUUtilizationPercentage: 50
targetMemoryUtilizationPercentage: 50
behavior: {}
# scaleDown:
# stabilizationWindowSeconds: 300
# policies:
# - type: Pods
# value: 1
# periodSeconds: 180
# scaleUp:
# stabilizationWindowSeconds: 300
# policies:
# - type: Pods
# value: 2
# periodSeconds: 60
autoscalingTemplate: []
# Custom or additional autoscaling metrics
# ref: https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/#support-for-custom-metrics
# - type: Pods
# pods:
# metric:
# name: nginx_ingress_controller_nginx_process_requests_total
# target:
# type: AverageValue
# averageValue: 10000m
# Mutually exclusive with hpa autoscaling
# -- Enable mimalloc as a drop-in replacement for malloc.
## ref: https://github.com/microsoft/mimalloc
##
enableMimalloc: true
## Override NGINX template
customTemplate:
configMapName: ""
configMapKey: ""
service:
enabled: true
# -- If enabled is adding an appProtocol option for Kubernetes service. An appProtocol field replacing annotations that were
# using for setting a backend protocol. Here is an example for AWS: service.beta.kubernetes.io/aws-load-balancer-backend-protocol: http
# It allows choosing the protocol for each backend specified in the Kubernetes service.
# See the following GitHub issue for more details about the purpose: https://github.com/kubernetes/kubernetes/issues/40244
# Will be ignored for Kubernetes versions older than 1.20
##
appProtocol: true
annotations: {}
labels: {}
# clusterIP: ""
# -- List of IP addresses at which the controller services are available
## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips
##
externalIPs: []
# -- Used by cloud providers to connect the resulting `LoadBalancer` to a pre-existing static IP according to https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer
loadBalancerSourceRanges: []
enableHttp: true
enableHttps: true
## Set external traffic policy to: "Local" to preserve source IP on providers supporting it.
## Ref: https://kubernetes.io/docs/tutorials/services/source-ip/#source-ip-for-services-with-typeloadbalancer
# externalTrafficPolicy: ""
## Must be either "None" or "ClientIP" if set. Kubernetes will default to "None".
## Ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
# sessionAffinity: ""
## Specifies the health check node port (numeric port number) for the service. If healthCheckNodePort isnt specified,
## the service controller allocates a port from your clusters NodePort range.
## Ref: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip
# healthCheckNodePort: 0
# -- Represents the dual-stack-ness requested or required by this Service. Possible values are
# SingleStack, PreferDualStack or RequireDualStack.
# The ipFamilies and clusterIPs fields depend on the value of this field.
## Ref: https://kubernetes.io/docs/concepts/services-networking/dual-stack/
ipFamilyPolicy: "SingleStack"
# -- List of IP families (e.g. IPv4, IPv6) assigned to the service. This field is usually assigned automatically
# based on cluster configuration and the ipFamilyPolicy field.
## Ref: https://kubernetes.io/docs/concepts/services-networking/dual-stack/
ipFamilies:
- IPv4
ports:
http: 80
https: 443
targetPorts:
http: http
https: https
type: LoadBalancer
loadBalancerIP: "192.168.1.4"
## type: NodePort
## nodePorts:
## http: 32080
## https: 32443
## tcp:
## 8080: 32808
# shareProcessNamespace enables process namespace sharing within the pod.
# This can be used for example to signal log rotation using `kill -USR1` from a sidecar.
shareProcessNamespace: false
extraContainers: []
# - name: my-sidecar
# image: nginx:latest
# - name: lemonldap-ng-controller
# image: lemonldapng/lemonldap-ng-controller:0.2.0
# args:
# - /lemonldap-ng-controller
# - --alsologtostderr
# - --configmap=$(POD_NAMESPACE)/lemonldap-ng-configuration
# env:
# - name: POD_NAME
# valueFrom:
# fieldRef:
# fieldPath: metadata.name
# - name: POD_NAMESPACE
# valueFrom:
# fieldRef:
# fieldPath: metadata.namespace
# volumeMounts:
# - name: copy-portal-skins
# mountPath: /srv/var/lib/lemonldap-ng/portal/skins
# -- Additional volumeMounts to the controller main container.
extraVolumeMounts: []
# - name: copy-portal-skins
# mountPath: /var/lib/lemonldap-ng/portal/skins
# -- Additional volumes to the controller pod.
extraVolumes: []
# - name: copy-portal-skins
# emptyDir: {}
# -- Containers, which are run before the app containers are started.
extraInitContainers: []
# - name: init-myservice
# image: busybox
# command: ['sh', '-c', 'until nslookup myservice; do echo waiting for myservice; sleep 2; done;']
extraModules: []
## Modules, which are mounted into the core nginx image
# - name: opentelemetry
# image: registry.k8s.io/ingress-nginx/opentelemetry:v20220801-g00ee51f09@sha256:482562feba02ad178411efc284f8eb803a185e3ea5588b6111ccbc20b816b427
#
# The image must contain a `/usr/local/bin/init_module.sh` executable, which
# will be executed as initContainers, to move its config files within the
# mounted volume.
admissionWebhooks:
annotations: {}
# ignore-check.kube-linter.io/no-read-only-rootfs: "This deployment needs write access to root filesystem".
## Additional annotations to the admission webhooks.
## These annotations will be added to the ValidatingWebhookConfiguration and
## the Jobs Spec of the admission webhooks.
enabled: true
# -- Additional environment variables to set
extraEnvs: []
# extraEnvs:
# - name: FOO
# valueFrom:
# secretKeyRef:
# key: FOO
# name: secret-resource
# -- Admission Webhook failure policy to use
failurePolicy: Fail
# timeoutSeconds: 10
port: 8443
certificate: "/usr/local/certificates/cert"
key: "/usr/local/certificates/key"
namespaceSelector: {}
objectSelector: {}
# -- Labels to be added to admission webhooks
labels: {}
# -- Use an existing PSP instead of creating one
existingPsp: ""
networkPolicyEnabled: false
service:
annotations: {}
# clusterIP: ""
externalIPs: []
# loadBalancerIP: ""
loadBalancerSourceRanges: []
servicePort: 443
type: ClusterIP
createSecretJob:
resources: {}
# limits:
# cpu: 10m
# memory: 20Mi
# requests:
# cpu: 10m
# memory: 20Mi
patchWebhookJob:
resources: {}
patch:
enabled: true
image:
registry: registry.k8s.io
image: ingress-nginx/kube-webhook-certgen
## for backwards compatibility consider setting the full image url via the repository value below
## use *either* current default registry/image or repository format or installing chart by providing the values.yaml will fail
## repository:
tag: v1.3.0
digest: sha256:549e71a6ca248c5abd51cdb73dbc3083df62cf92ed5e6147c780e30f7e007a47
pullPolicy: IfNotPresent
# -- Provide a priority class name to the webhook patching job
##
priorityClassName: ""
podAnnotations: {}
nodeSelector:
kubernetes.io/os: linux
tolerations: []
# -- Labels to be added to patch job resources
labels: {}
securityContext:
runAsNonRoot: true
runAsUser: 2000
fsGroup: 2000
metrics:
port: 10254
# if this port is changed, change healthz-port: in extraArgs: accordingly
enabled: false
service:
annotations: {}
# prometheus.io/scrape: "true"
# prometheus.io/port: "10254"
# clusterIP: ""
# -- List of IP addresses at which the stats-exporter service is available
## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips
##
externalIPs: []
# loadBalancerIP: ""
loadBalancerSourceRanges: []
servicePort: 10254
type: ClusterIP
# externalTrafficPolicy: ""
# nodePort: ""
serviceMonitor:
enabled: false
additionalLabels: {}
## The label to use to retrieve the job name from.
## jobLabel: "app.kubernetes.io/name"
namespace: ""
namespaceSelector: {}
## Default: scrape .Release.Namespace only
## To scrape all, use the following:
## namespaceSelector:
## any: true
scrapeInterval: 30s
# honorLabels: true
targetLabels: []
relabelings: []
metricRelabelings: []
prometheusRule:
enabled: false
additionalLabels: {}
# namespace: ""
rules: []
# # These are just examples rules, please adapt them to your needs
# - alert: NGINXConfigFailed
# expr: count(nginx_ingress_controller_config_last_reload_successful == 0) > 0
# for: 1s
# labels:
# severity: critical
# annotations:
# description: bad ingress config - nginx config test failed
# summary: uninstall the latest ingress changes to allow config reloads to resume
# - alert: NGINXCertificateExpiry
# expr: (avg(nginx_ingress_controller_ssl_expire_time_seconds) by (host) - time()) < 604800
# for: 1s
# labels:
# severity: critical
# annotations:
# description: ssl certificate(s) will expire in less then a week
# summary: renew expiring certificates to avoid downtime
# - alert: NGINXTooMany500s
# expr: 100 * ( sum( nginx_ingress_controller_requests{status=~"5.+"} ) / sum(nginx_ingress_controller_requests) ) > 5
# for: 1m
# labels:
# severity: warning
# annotations:
# description: Too many 5XXs
# summary: More than 5% of all requests returned 5XX, this requires your attention
# - alert: NGINXTooMany400s
# expr: 100 * ( sum( nginx_ingress_controller_requests{status=~"4.+"} ) / sum(nginx_ingress_controller_requests) ) > 5
# for: 1m
# labels:
# severity: warning
# annotations:
# description: Too many 4XXs
# summary: More than 5% of all requests returned 4XX, this requires your attention
# -- Improve connection draining when ingress controller pod is deleted using a lifecycle hook:
# With this new hook, we increased the default terminationGracePeriodSeconds from 30 seconds
# to 300, allowing the draining of connections up to five minutes.
# If the active connections end before that, the pod will terminate gracefully at that time.
# To effectively take advantage of this feature, the Configmap feature
# worker-shutdown-timeout new value is 240s instead of 10s.
##
lifecycle:
preStop:
exec:
command:
- /wait-shutdown
priorityClassName: ""
# -- Rollback limit
##
revisionHistoryLimit: 10
## Enable RBAC as per https://github.com/kubernetes/ingress-nginx/blob/main/docs/deploy/rbac.md and https://github.com/kubernetes/ingress-nginx/issues/266
rbac:
create: true
scope: false
## If true, create & use Pod Security Policy resources
## https://kubernetes.io/docs/concepts/policy/pod-security-policy/
podSecurityPolicy:
enabled: false
serviceAccount:
create: true
name: ""
automountServiceAccountToken: true
# -- Annotations for the controller service account
annotations: {}
# -- Optional array of imagePullSecrets containing private registry credentials
## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
imagePullSecrets: []
# - name: secretName
# -- TCP service key-value pairs
## Ref: https://github.com/kubernetes/ingress-nginx/blob/main/docs/user-guide/exposing-tcp-udp-services.md
##
tcp: {}
# 8080: "default/example-tcp-svc:9000"
# -- UDP service key-value pairs
## Ref: https://github.com/kubernetes/ingress-nginx/blob/main/docs/user-guide/exposing-tcp-udp-services.md
##
udp: {}
# 53: "kube-system/kube-dns:53"
# -- Prefix for TCP and UDP ports names in ingress controller service
## Some cloud providers, like Yandex Cloud may have a requirements for a port name regex to support cloud load balancer integration
portNamePrefix: ""
# -- (string) A base64-encoded Diffie-Hellman parameter.
# This can be generated with: `openssl dhparam 4096 2> /dev/null | base64`
## Ref: https://github.com/kubernetes/ingress-nginx/tree/main/docs/examples/customization/ssl-dh-param
dhParam:

View File

@ -1,34 +0,0 @@
---
apiVersion: v1
kind: PersistentVolume
metadata:
namespace: gitea
name: jenkins-data-nfs
labels:
directory: jenkins
spec:
storageClassName: fast
capacity:
storage: "10Gi"
volumeMode: Filesystem
accessModes:
- ReadWriteOnce
nfs:
path: /jenkins
server: 10.43.239.43 # assigned to nfs-server service. Won't change as long as service is not redeployed
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
namespace: gitea
name: jenkins-data-nfs
spec:
storageClassName: fast
accessModes:
- ReadWriteOnce
resources:
requests:
storage: "10Gi"
selector:
matchLabels:
directory: jenkins

View File

@ -1,669 +0,0 @@
# Default values for jenkins.
# This is a YAML-formatted file.
# Declare name/value pairs to be passed into your templates.
# name: value
## Overrides for generated resource names
# See templates/_helpers.tpl
# nameOverride:
# fullnameOverride:
# namespaceOverride:
# For FQDN resolving of the controller service. Change this value to match your existing configuration.
# ref: https://github.com/kubernetes/dns/blob/master/docs/specification.md
clusterZone: "cluster.local"
renderHelmLabels: true
controller:
# Used for label app.kubernetes.io/component
componentName: "jenkins-controller"
image: "jenkins/jenkins"
# tag: "2.346.1-jdk11"
tagLabel: jdk11
imagePullPolicy: "Always"
imagePullSecretName:
# Optionally configure lifetime for controller-container
lifecycle:
# postStart:
# exec:
# command:
# - "uname"
# - "-a"
disableRememberMe: false
numExecutors: 0
# configures the executor mode of the Jenkins node. Possible values are: NORMAL or EXCLUSIVE
executorMode: "NORMAL"
# This is ignored if enableRawHtmlMarkupFormatter is true
markupFormatter: plainText
customJenkinsLabels: []
# The default configuration uses this secret to configure an admin user
# If you don't need that user or use a different security realm then you can disable it
adminSecret: true
hostNetworking: false
# When enabling LDAP or another non-Jenkins identity source, the built-in admin account will no longer exist.
# If you disable the non-Jenkins identity store and instead use the Jenkins internal one,
# you should revert controller.adminUser to your preferred admin user:
adminUser: "admin"
# adminPassword: <defaults to random>
admin:
existingSecret: ""
userKey: jenkins-admin-user
passwordKey: jenkins-admin-password
# This values should not be changed unless you use your custom image of jenkins or any devired from. If you want to use
# Cloudbees Jenkins Distribution docker, you should set jenkinsHome: "/var/cloudbees-jenkins-distribution"
jenkinsHome: "/var/jenkins_home"
# This values should not be changed unless you use your custom image of jenkins or any devired from. If you want to use
# Cloudbees Jenkins Distribution docker, you should set jenkinsRef: "/usr/share/cloudbees-jenkins-distribution/ref"
jenkinsRef: "/usr/share/jenkins/ref"
# Path to the jenkins war file which is used by jenkins-plugin-cli.
jenkinsWar: "/usr/share/jenkins/jenkins.war"
# Overrides the default arguments passed to the war
# overrideArgs:
# - --httpPort=8080
resources:
requests:
cpu: "50m"
memory: "256Mi"
limits:
cpu: "2000m"
memory: "4096Mi"
# Overrides the init container default values
# initContainerResources:
# requests:
# cpu: "50m"
# memory: "256Mi"
# limits:
# cpu: "2000m"
# memory: "4096Mi"
# Environment variables that get added to the init container (useful for e.g. http_proxy)
# initContainerEnv:
# - name: http_proxy
# value: "http://192.168.64.1:3128"
# containerEnv:
# - name: http_proxy
# value: "http://192.168.64.1:3128"
# Set min/max heap here if needed with:
# javaOpts: "-Xms512m -Xmx512m"
# jenkinsOpts: ""
# If you are using the ingress definitions provided by this chart via the `controller.ingress` block the configured hostname will be the ingress hostname starting with `https://` or `http://` depending on the `tls` configuration.
# The Protocol can be overwritten by specifying `controller.jenkinsUrlProtocol`.
# jenkinsUrlProtocol: "https"
# If you are not using the provided ingress you can specify `controller.jenkinsUrl` to change the url definition.
# jenkinsUrl: ""
# If you set this prefix and use ingress controller then you might want to set the ingress path below
# jenkinsUriPrefix: "/jenkins"
# Enable pod security context (must be `true` if podSecurityContextOverride, runAsUser or fsGroup are set)
usePodSecurityContext: true
# Note that `runAsUser`, `fsGroup`, and `securityContextCapabilities` are
# being deprecated and replaced by `podSecurityContextOverride`.
# Set runAsUser to 1000 to let Jenkins run as non-root user 'jenkins' which exists in 'jenkins/jenkins' docker image.
# When setting runAsUser to a different value than 0 also set fsGroup to the same value:
runAsUser: 1000
fsGroup: 1000
# If you have PodSecurityPolicies that require dropping of capabilities as suggested by CIS K8s benchmark, put them here
securityContextCapabilities: {}
# drop:
# - NET_RAW
# Completely overwrites the contents of the `securityContext`, ignoring the
# values provided for the deprecated fields: `runAsUser`, `fsGroup`, and
# `securityContextCapabilities`. In the case of mounting an ext4 filesystem,
# it might be desirable to use `supplementalGroups` instead of `fsGroup` in
# the `securityContext` block: https://github.com/kubernetes/kubernetes/issues/67014#issuecomment-589915496
# podSecurityContextOverride:
# runAsUser: 1000
# runAsNonRoot: true
# supplementalGroups: [1000]
# # capabilities: {}
# Container securityContext
containerSecurityContext:
runAsUser: 1000
runAsGroup: 1000
readOnlyRootFilesystem: true
allowPrivilegeEscalation: false
servicePort: 8080
targetPort: 8080
# For minikube, set this to NodePort, elsewhere use LoadBalancer
# Use ClusterIP if your setup includes ingress controller
serviceType: ClusterIP
# Use Local to preserve the client source IP and avoids a second hop for LoadBalancer and Nodeport type services,
# but risks potentially imbalanced traffic spreading.
serviceExternalTrafficPolicy:
# Jenkins controller service annotations
serviceAnnotations: {}
# Jenkins controller custom labels
statefulSetLabels: {}
# foo: bar
# bar: foo
# Jenkins controller service labels
serviceLabels: {}
# service.beta.kubernetes.io/aws-load-balancer-backend-protocol: https
# Put labels on Jenkins controller pod
podLabels: {}
# Used to create Ingress record (should used with ServiceType: ClusterIP)
# nodePort: <to set explicitly, choose port between 30000-32767
# Enable Kubernetes Startup, Liveness and Readiness Probes
# if Startup Probe is supported, enable it too
# ~ 2 minutes to allow Jenkins to restart when upgrading plugins. Set ReadinessTimeout to be shorter than LivenessTimeout.
healthProbes: true
probes:
startupProbe:
httpGet:
path: '{{ default "" .Values.controller.jenkinsUriPrefix }}/login'
port: http
periodSeconds: 10
timeoutSeconds: 5
failureThreshold: 12
livenessProbe:
failureThreshold: 5
httpGet:
path: '{{ default "" .Values.controller.jenkinsUriPrefix }}/login'
port: http
periodSeconds: 10
timeoutSeconds: 5
# If Startup Probe is not supported on your Kubernetes cluster, you might want to use "initialDelaySeconds" instead.
# It delays the initial liveness probe while Jenkins is starting
# initialDelaySeconds: 60
readinessProbe:
failureThreshold: 3
httpGet:
path: '{{ default "" .Values.controller.jenkinsUriPrefix }}/login'
port: http
periodSeconds: 10
timeoutSeconds: 5
# If Startup Probe is not supported on your Kubernetes cluster, you might want to use "initialDelaySeconds" instead.
# It delays the initial readyness probe while Jenkins is starting
# initialDelaySeconds: 60
# PodDisruptionBudget config
podDisruptionBudget:
enabled: false
# For Kubernetes v1.5+, use 'policy/v1beta1'
# For Kubernetes v1.21+, use 'policy/v1'
apiVersion: "policy/v1beta1"
annotations: {}
labels: {}
# maxUnavailable: "0"
agentListenerEnabled: true
agentListenerPort: 50000
agentListenerHostPort:
agentListenerNodePort:
agentListenerExternalTrafficPolicy:
agentListenerLoadBalancerSourceRanges:
- 0.0.0.0/0
disabledAgentProtocols:
- JNLP-connect
- JNLP2-connect
csrf:
defaultCrumbIssuer:
enabled: true
proxyCompatability: true
# Kubernetes service type for the JNLP agent service
# agentListenerServiceType is the Kubernetes Service type for the JNLP agent service,
# either 'LoadBalancer', 'NodePort', or 'ClusterIP'
# Note if you set this to 'LoadBalancer', you *must* define annotations to secure it. By default
# this will be an external load balancer and allowing inbound 0.0.0.0/0, a HUGE
# security risk: https://github.com/kubernetes/charts/issues/1341
agentListenerServiceType: "ClusterIP"
# Optionally assign an IP to the LoadBalancer agentListenerService LoadBalancer
# GKE users: only regional static IPs will work for Service Load balancer.
agentListenerLoadBalancerIP:
agentListenerServiceAnnotations: {}
# Example of 'LoadBalancer' type of agent listener with annotations securing it
# agentListenerServiceType: LoadBalancer
# agentListenerServiceAnnotations:
# service.beta.kubernetes.io/aws-load-balancer-internal: "True"
# service.beta.kubernetes.io/load-balancer-source-ranges: "172.0.0.0/8, 10.0.0.0/8"
# LoadBalancerSourcesRange is a list of allowed CIDR values, which are combined with ServicePort to
# set allowed inbound rules on the security group assigned to the controller load balancer
loadBalancerSourceRanges:
- 0.0.0.0/0
# Optionally assign a known public LB IP
# loadBalancerIP: 1.2.3.4
# Optionally configure a JMX port
# requires additional javaOpts, ie
# javaOpts: >
# -Dcom.sun.management.jmxremote.port=4000
# -Dcom.sun.management.jmxremote.authenticate=false
# -Dcom.sun.management.jmxremote.ssl=false
# jmxPort: 4000
# Optionally configure other ports to expose in the controller container
extraPorts: []
# - name: BuildInfoProxy
# port: 9000
# List of plugins to be install during Jenkins controller start
installPlugins:
- kubernetes:3600.v144b_cd192ca_a_
- workflow-aggregator:581.v0c46fa_697ffd
- git:4.11.3
- gitea:1.4.3
- configuration-as-code:1429.v09b_044a_c93de
# Set to false to download the minimum required version of all dependencies.
installLatestPlugins: true
# Set to true to download latest dependencies of any plugin that is requested to have the latest version.
installLatestSpecifiedPlugins: false
# List of plugins to install in addition to those listed in controller.installPlugins
additionalPlugins: []
# Enable to initialize the Jenkins controller only once on initial installation.
# Without this, whenever the controller gets restarted (Evicted, etc.) it will fetch plugin updates which has the potential to cause breakage.
# Note that for this to work, `persistence.enabled` needs to be set to `true`
initializeOnce: false
# Enable to always override the installed plugins with the values of 'controller.installPlugins' on upgrade or redeployment.
# overwritePlugins: true
# Configures if plugins bundled with `controller.image` should be overwritten with the values of 'controller.installPlugins' on upgrade or redeployment.
overwritePluginsFromImage: true
# Enable HTML parsing using OWASP Markup Formatter Plugin (antisamy-markup-formatter), useful with ghprb plugin.
# The plugin is not installed by default, please update controller.installPlugins.
enableRawHtmlMarkupFormatter: false
# Used to approve a list of groovy functions in pipelines used the script-security plugin. Can be viewed under /scriptApproval
scriptApproval: []
# - "method groovy.json.JsonSlurperClassic parseText java.lang.String"
# - "new groovy.json.JsonSlurperClassic"
# List of groovy init scripts to be executed during Jenkins controller start
initScripts: []
# - |
# print 'adding global pipeline libraries, register properties, bootstrap jobs...'
# 'name' is a name of an existing secret in same namespace as jenkins,
# 'keyName' is the name of one of the keys inside current secret.
# the 'name' and 'keyName' are concatenated with a '-' in between, so for example:
# an existing secret "secret-credentials" and a key inside it named "github-password" should be used in Jcasc as ${secret-credentials-github-password}
# 'name' and 'keyName' must be lowercase RFC 1123 label must consist of lower case alphanumeric characters or '-',
# and must start and end with an alphanumeric character (e.g. 'my-name', or '123-abc')
additionalExistingSecrets: []
# - name: secret-name-1
# keyName: username
# - name: secret-name-1
# keyName: password
additionalSecrets: []
# - name: nameOfSecret
# value: secretText
# Generate SecretClaim resources in order to create Kubernetes secrets from HashiCorp Vault using kube-vault-controller.
# 'name' is name of the secret that will be created in Kubernetes. The Jenkins fullname is prepended to this value.
# 'path' is the fully qualified path to the secret in Vault
# 'type' is an optional Kubernetes secret type. Defaults to 'Opaque'
# 'renew' is an optional secret renewal time in seconds
secretClaims: []
# - name: secretName # required
# path: testPath # required
# type: kubernetes.io/tls # optional
# renew: 60 # optional
# Name of default cloud configuration.
cloudName: "kubernetes"
# Below is the implementation of Jenkins Configuration as Code. Add a key under configScripts for each configuration area,
# where each corresponds to a plugin or section of the UI. Each key (prior to | character) is just a label, and can be any value.
# Keys are only used to give the section a meaningful name. The only restriction is they may only contain RFC 1123 \ DNS label
# characters: lowercase letters, numbers, and hyphens. The keys become the name of a configuration yaml file on the controller in
# /var/jenkins_home/casc_configs (by default) and will be processed by the Configuration as Code Plugin. The lines after each |
# become the content of the configuration yaml file. The first line after this is a JCasC root element, eg jenkins, credentials,
# etc. Best reference is https://<jenkins_url>/configuration-as-code/reference. The example below creates a welcome message:
JCasC:
defaultConfig: true
configScripts: {}
# welcome-message: |
# jenkins:
# systemMessage: Welcome to our CI\CD server. This Jenkins is configured and managed 'as code'.
# Ignored if securityRealm is defined in controller.JCasC.configScripts and
securityRealm: |-
local:
allowsSignup: false
enableCaptcha: false
users:
- id: "${chart-admin-username}"
name: "Jenkins Admin"
password: "${chart-admin-password}"
# Ignored if authorizationStrategy is defined in controller.JCasC.configScripts
authorizationStrategy: |-
loggedInUsersCanDoAnything:
allowAnonymousRead: false
# Optionally specify additional init-containers
customInitContainers: []
# - name: custom-init
# image: "alpine:3.7"
# imagePullPolicy: Always
# command: [ "uname", "-a" ]
sidecars:
configAutoReload:
# If enabled: true, Jenkins Configuration as Code will be reloaded on-the-fly without a reboot. If false or not-specified,
# jcasc changes will cause a reboot and will only be applied at the subsequent start-up. Auto-reload uses the
# http://<jenkins_url>/reload-configuration-as-code endpoint to reapply config when changes to the configScripts are detected.
enabled: true
image: kiwigrid/k8s-sidecar:1.15.0
imagePullPolicy: IfNotPresent
resources: {}
# limits:
# cpu: 100m
# memory: 100Mi
# requests:
# cpu: 50m
# memory: 50Mi
# How many connection-related errors to retry on
reqRetryConnect: 10
# env:
# - name: REQ_TIMEOUT
# value: "30"
# SSH port value can be set to any unused TCP port. The default, 1044, is a non-standard SSH port that has been chosen at random.
# Is only used to reload jcasc config from the sidecar container running in the Jenkins controller pod.
# This TCP port will not be open in the pod (unless you specifically configure this), so Jenkins will not be
# accessible via SSH from outside of the pod. Note if you use non-root pod privileges (runAsUser & fsGroup),
# this must be > 1024:
sshTcpPort: 1044
# folder in the pod that should hold the collected dashboards:
folder: "/var/jenkins_home/casc_configs"
# If specified, the sidecar will search for JCasC config-maps inside this namespace.
# Otherwise the namespace in which the sidecar is running will be used.
# It's also possible to specify ALL to search in all namespaces:
# searchNamespace:
containerSecurityContext:
readOnlyRootFilesystem: true
allowPrivilegeEscalation: false
# Allows you to inject additional/other sidecars
other: []
## The example below runs the client for https://smee.io as sidecar container next to Jenkins,
## that allows to trigger build behind a secure firewall.
## https://jenkins.io/blog/2019/01/07/webhook-firewalls/#triggering-builds-with-webhooks-behind-a-secure-firewall
##
## Note: To use it you should go to https://smee.io/new and update the url to the generete one.
# - name: smee
# image: docker.io/twalter/smee-client:1.0.2
# args: ["--port", "{{ .Values.controller.servicePort }}", "--path", "/github-webhook/", "--url", "https://smee.io/new"]
# resources:
# limits:
# cpu: 50m
# memory: 128Mi
# requests:
# cpu: 10m
# memory: 32Mi
# Name of the Kubernetes scheduler to use
schedulerName: ""
# Node labels and tolerations for pod assignment
# ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector
# ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature
nodeSelector: {}
terminationGracePeriodSeconds:
terminationMessagePath:
terminationMessagePolicy:
tolerations: []
affinity: {}
# Leverage a priorityClass to ensure your pods survive resource shortages
# ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/
priorityClassName:
podAnnotations: {}
# Add StatefulSet annotations
statefulSetAnnotations: {}
# StatefulSet updateStrategy
# ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies
updateStrategy: {}
ingress:
enabled: true
# Override for the default paths that map requests to the backend
paths: []
# - backend:
# serviceName: >-
# {{ template "jenkins.fullname" . }}
# # Don't use string here, use only integer value!
# servicePort: 8080
# For Kubernetes v1.19+, use 'networking.k8s.io/v1'
apiVersion: "networking.k8s.io/v1"
labels: {}
annotations:
kubernetes.io/ingress.class: nginx
cert-manager.io/cluster-issuer: cloudflare-letsencrypt-prod
hostName: jenkins.kluster.moll.re
tls:
- secretName: cloudflare-letsencrypt-issuer-account-key
hosts:
- jenkins.kluster.moll.re
# often you want to have your controller all locked down and private
# but you still want to get webhooks from your SCM
# A secondary ingress will let you expose different urls
# with a differnt configuration
secondaryingress:
enabled: false
# paths you want forwarded to the backend
# ex /github-webhook
paths: []
# For Kubernetes v1.14+, use 'networking.k8s.io/v1beta1'
# For Kubernetes v1.19+, use 'networking.k8s.io/v1'
apiVersion: "extensions/v1beta1"
labels: {}
annotations: {}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
# For Kubernetes >= 1.18 you should specify the ingress-controller via the field ingressClassName
# See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#specifying-the-class-of-an-ingress
# ingressClassName: nginx
# configures the hostname e.g. jenkins-external.example.com
hostName:
tls:
# - secretName: jenkins-external.example.com
# hosts:
# - jenkins-external.example.com
agent:
enabled: true
defaultsProviderTemplate: ""
# URL for connecting to the Jenkins contoller
jenkinsUrl:
# connect to the specified host and port, instead of connecting directly to the Jenkins controller
jenkinsTunnel:
kubernetesConnectTimeout: 5
kubernetesReadTimeout: 15
maxRequestsPerHostStr: "32"
namespace:
image: "jenkins/inbound-agent"
tag: "4.11.2-4"
workingDir: "/home/jenkins/agent"
nodeUsageMode: "NORMAL"
customJenkinsLabels: []
# name of the secret to be used for image pulling
imagePullSecretName:
componentName: "jenkins-agent"
websocket: false
privileged: false
runAsUser:
runAsGroup:
resources:
requests:
cpu: "512m"
memory: "512Mi"
limits:
cpu: "512m"
memory: "512Mi"
# You may want to change this to true while testing a new image
alwaysPullImage: false
# Controls how agent pods are retained after the Jenkins build completes
# Possible values: Always, Never, OnFailure
podRetention: "Never"
# Disable if you do not want the Yaml the agent pod template to show up
# in the job Console Output. This can be helpful for either security reasons
# or simply to clean up the output to make it easier to read.
showRawYaml: true
# You can define the volumes that you want to mount for this container
# Allowed types are: ConfigMap, EmptyDir, HostPath, Nfs, PVC, Secret
# Configure the attributes as they appear in the corresponding Java class for that type
# https://github.com/jenkinsci/kubernetes-plugin/tree/master/src/main/java/org/csanchez/jenkins/plugins/kubernetes/volumes
volumes: []
# - type: ConfigMap
# configMapName: myconfigmap
# mountPath: /var/myapp/myconfigmap
# - type: EmptyDir
# mountPath: /var/myapp/myemptydir
# memory: false
# - type: HostPath
# hostPath: /var/lib/containers
# mountPath: /var/myapp/myhostpath
# - type: Nfs
# mountPath: /var/myapp/mynfs
# readOnly: false
# serverAddress: "192.0.2.0"
# serverPath: /var/lib/containers
# - type: PVC
# claimName: mypvc
# mountPath: /var/myapp/mypvc
# readOnly: false
# - type: Secret
# defaultMode: "600"
# mountPath: /var/myapp/mysecret
# secretName: mysecret
# Pod-wide environment, these vars are visible to any container in the agent pod
# You can define the workspaceVolume that you want to mount for this container
# Allowed types are: DynamicPVC, EmptyDir, HostPath, Nfs, PVC
# Configure the attributes as they appear in the corresponding Java class for that type
# https://github.com/jenkinsci/kubernetes-plugin/tree/master/src/main/java/org/csanchez/jenkins/plugins/kubernetes/volumes/workspace
workspaceVolume: {}
## DynamicPVC example
# type: DynamicPVC
# configMapName: myconfigmap
## EmptyDir example
# type: EmptyDir
# memory: false
## HostPath example
# type: HostPath
# hostPath: /var/lib/containers
## NFS example
# type: Nfs
# readOnly: false
# serverAddress: "192.0.2.0"
# serverPath: /var/lib/containers
## PVC example
# type: PVC
# claimName: mypvc
# readOnly: false
#
# Pod-wide environment, these vars are visible to any container in the agent pod
envVars: []
# - name: PATH
# value: /usr/local/bin
nodeSelector: {}
# Key Value selectors. Ex:
# jenkins-agent: v1
# Executed command when side container gets started
command:
args: "${computer.jnlpmac} ${computer.name}"
# Side container name
sideContainerName: "jnlp"
# Doesn't allocate pseudo TTY by default
TTYEnabled: false
# Max number of spawned agent
containerCap: 10
# Pod name
podName: "default"
# Allows the Pod to remain active for reuse until the configured number of
# minutes has passed since the last step was executed on it.
idleMinutes: 0
# Raw yaml template for the Pod. For example this allows usage of toleration for agent pods.
# https://github.com/jenkinsci/kubernetes-plugin#using-yaml-to-define-pod-templates
# https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
yamlTemplate: ""
# yamlTemplate: |-
# apiVersion: v1
# kind: Pod
# spec:
# tolerations:
# - key: "key"
# operator: "Equal"
# value: "value"
# Defines how the raw yaml field gets merged with yaml definitions from inherited pod templates: merge or override
yamlMergeStrategy: "override"
# Timeout in seconds for an agent to be online
connectTimeout: 100
# Annotations to apply to the pod.
annotations: {}
# Disable the default Jenkins Agent configuration.
# Useful when configuring agents only with the podTemplates value, since the default podTemplate populated by values mentioned above will be excluded in the rendered template.
disableDefaultAgent: false
# Below is the implementation of custom pod templates for the default configured kubernetes cloud.
# Add a key under podTemplates for each pod template. Each key (prior to | character) is just a label, and can be any value.
# Keys are only used to give the pod template a meaningful name. The only restriction is they may only contain RFC 1123 \ DNS label
# characters: lowercase letters, numbers, and hyphens. Each pod template can contain multiple containers.
# For this pod templates configuration to be loaded the following values must be set:
# controller.JCasC.defaultConfig: true
# Best reference is https://<jenkins_url>/configuration-as-code/reference#Cloud-kubernetes. The example below creates a python pod template.
podTemplates: {}
# python: |
# - name: python
# label: jenkins-python
# serviceAccount: jenkins
# containers:
# - name: python
# image: python:3
# command: "/bin/sh -c"
# args: "cat"
# ttyEnabled: true
# privileged: true
# resourceRequestCpu: "400m"
# resourceRequestMemory: "512Mi"
# resourceLimitCpu: "1"
# resourceLimitMemory: "1024Mi"
# Here you can add additional agents
# They inherit all values from `agent` so you only need to specify values which differ
additionalAgents: {}
# maven:
# podName: maven
# customJenkinsLabels: maven
# # An example of overriding the jnlp container
# # sideContainerName: jnlp
# image: jenkins/jnlp-agent-maven
# tag: latest
# python:
# podName: python
# customJenkinsLabels: python
# sideContainerName: python
# image: python
# tag: "3"
# command: "/bin/sh -c"
# args: "cat"
# TTYEnabled: true
persistence:
enabled: true
## A manually managed Persistent Volume and Claim
## Requires persistence.enabled: true
## If defined, PVC must be created manually before volume will be bound
existingClaim: jenkins-data-nfs
## Install Default RBAC roles and bindings
rbac:
create: true
readSecrets: false
serviceAccount:
create: true
# The name of the service account is autogenerated by default
name:
annotations: {}
imagePullSecretName:

View File

@ -1,34 +0,0 @@
---
apiVersion: v1
kind: PersistentVolume
metadata:
namespace: mathieu
name: mathieu-nfs
labels:
directory: mathieu
spec:
storageClassName: fast
capacity:
storage: "10Gi"
volumeMode: Filesystem
accessModes:
- ReadWriteOnce
nfs:
path: /mathieu
server: 10.43.239.43 # assigned to nfs-server service. Won't change as long as service is not redeployed
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
namespace: mathieu
name: mathieu-nfs
spec:
storageClassName: fast
accessModes:
- ReadWriteOnce
resources:
requests:
storage: "10Gi"
selector:
matchLabels:
directory: mathieu

View File

@ -1,72 +0,0 @@
#
# IMPORTANT NOTE
#
# This chart inherits from our common library chart. You can check the default values/options here:
# https://github.com/k8s-at-home/library-charts/tree/main/charts/stable/common/values.yaml
#
image:
# -- image repository
repository: ghost
# -- image tag
# @default -- chart.appVersion
tag:
# -- image pull policy
pullPolicy: IfNotPresent
# See https://ghost.org/docs/config/#running-ghost-with-config-env-variables
env:
url: "https://cinema.kluster.moll.re"
database__client: sqlite3
database__connection__filename: "content/data/ghost-data.db"
database__useNullAsDefault: true,
database__debug: false
NODE_ENV: production
# -- Configures service settings for the chart.
# @default -- See values.yaml
service:
main:
ports:
http:
port: 2368
ingress:
# -- Enable and configure ingress settings for the chart under this key.
# @default -- See values.yaml
main:
enabled: true
annotations:
kubernetes.io/ingress.class: nginx
cert-manager.io/cluster-issuer: cloudflare-letsencrypt-prod
hosts:
- host: cinema.kluster.moll.re
paths:
- path: /
pathType: Prefix
tls:
- hosts:
- cinema.kluster.moll.re
secretName: cloudflare-letsencrypt-issuer-account-key
# -- Configure persistence settings for the chart under this key.
# @default -- See values.yaml
persistence:
content:
enabled: true
existingClaim: mathieu-nfs
mariadb:
enabled: false
architecture: standalone
auth:
database: ghost
username: ghost
password: ghost
rootPassword: ghost-rootpass
primary:
persistance:
enabled: false

View File

@ -1,52 +0,0 @@
apiVersion: v1
kind: Namespace
metadata:
name: minecraft
labels:
app: minecraft
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: forwarding
namespace: minecraft
labels:
app: forwarding
spec:
replicas: 1
selector:
matchLabels:
app: forwarding
template:
metadata:
labels:
app: forwarding
spec:
containers:
- name: forwarding
image: simonrupf/socat
tty: true
ports:
- containerPort: 25565
args: ["TCP4-LISTEN:25565,fork", "TCP6:mc.game.moll.re:25565"]
hostNetwork: true
nodeSelector:
hdd: enabled
# ensures we are running on 192.168.1.122, ie pi node 0
---
apiVersion: v1
kind: Service
metadata:
name: forwarding
namespace: minecraft
spec:
type: NodePort
ipFamilyPolicy: PreferDualStack
ports:
- name: mc
port: 25565
selector:
app: forwarding

View File

@ -1,351 +0,0 @@
controller:
## The name of the Ingress Controller daemonset or deployment.
## Autogenerated if not set or set to "".
# name: nginx-ingress
## The kind of the Ingress Controller installation - deployment or daemonset.
kind: deployment
## Deploys the Ingress Controller for NGINX Plus.
nginxplus: false
# Timeout in milliseconds which the Ingress Controller will wait for a successful NGINX reload after a change or at the initial start.
nginxReloadTimeout: 60000
## Support for App Protect
appprotect:
## Enable the App Protect module in the Ingress Controller.
enable: false
## Sets log level for App Protect. Allowed values: fatal, error, warn, info, debug, trace
# logLevel: fatal
## Support for App Protect Dos
appprotectdos:
## Enable the App Protect Dos module in the Ingress Controller.
enable: false
## Enable debugging for App Protect Dos.
debug: false
## Max number of nginx processes to support.
maxWorkers: 0
## Max number of ADMD instances.
maxDaemons: 0
## RAM memory size to consume in MB.
memory: 0
## Enables the Ingress Controller pods to use the host's network namespace.
hostNetwork: false
## Enables debugging for NGINX. Uses the nginx-debug binary. Requires error-log-level: debug in the ConfigMap via `controller.config.entries`.
nginxDebug: false
## The log level of the Ingress Controller.
logLevel: 1
## A list of custom ports to expose on the NGINX ingress controller pod. Follows the conventional Kubernetes yaml syntax for container ports.
customPorts: []
image:
## The image repository of the Ingress Controller.
repository: nginx/nginx-ingress
## The tag of the Ingress Controller image.
tag: "2.2.0"
## The pull policy for the Ingress Controller image.
pullPolicy: IfNotPresent
config:
## The name of the ConfigMap used by the Ingress Controller.
## Autogenerated if not set or set to "".
# name: nginx-config
## The annotations of the Ingress Controller configmap.
annotations: {}
## The entries of the ConfigMap for customizing NGINX configuration.
entries: {}
## It is recommended to use your own TLS certificates and keys
defaultTLS:
## The base64-encoded TLS certificate for the default HTTPS server. If not specified, a pre-generated self-signed certificate is used.
## Note: It is recommended that you specify your own certificate.
cert: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUN2akNDQWFZQ0NRREFPRjl0THNhWFhEQU5CZ2txaGtpRzl3MEJBUXNGQURBaE1SOHdIUVlEVlFRRERCWk8KUjBsT1dFbHVaM0psYzNORGIyNTBjbTlzYkdWeU1CNFhEVEU0TURreE1qRTRNRE16TlZvWERUSXpNRGt4TVRFNApNRE16TlZvd0lURWZNQjBHQTFVRUF3d1dUa2RKVGxoSmJtZHlaWE56UTI5dWRISnZiR3hsY2pDQ0FTSXdEUVlKCktvWklodmNOQVFFQkJRQURnZ0VQQURDQ0FRb0NnZ0VCQUwvN2hIUEtFWGRMdjNyaUM3QlBrMTNpWkt5eTlyQ08KR2xZUXYyK2EzUDF0azIrS3YwVGF5aGRCbDRrcnNUcTZzZm8vWUk1Y2Vhbkw4WGM3U1pyQkVRYm9EN2REbWs1Qgo4eDZLS2xHWU5IWlg0Rm5UZ0VPaStlM2ptTFFxRlBSY1kzVnNPazFFeUZBL0JnWlJVbkNHZUtGeERSN0tQdGhyCmtqSXVuektURXUyaDU4Tlp0S21ScUJHdDEwcTNRYzhZT3ExM2FnbmovUWRjc0ZYYTJnMjB1K1lYZDdoZ3krZksKWk4vVUkxQUQ0YzZyM1lma1ZWUmVHd1lxQVp1WXN2V0RKbW1GNWRwdEMzN011cDBPRUxVTExSakZJOTZXNXIwSAo1TmdPc25NWFJNV1hYVlpiNWRxT3R0SmRtS3FhZ25TZ1JQQVpQN2MwQjFQU2FqYzZjNGZRVXpNQ0F3RUFBVEFOCkJna3Foa2lHOXcwQkFRc0ZBQU9DQVFFQWpLb2tRdGRPcEsrTzhibWVPc3lySmdJSXJycVFVY2ZOUitjb0hZVUoKdGhrYnhITFMzR3VBTWI5dm15VExPY2xxeC9aYzJPblEwMEJCLzlTb0swcitFZ1U2UlVrRWtWcitTTFA3NTdUWgozZWI4dmdPdEduMS9ienM3bzNBaS9kclkrcUI5Q2k1S3lPc3FHTG1US2xFaUtOYkcyR1ZyTWxjS0ZYQU80YTY3Cklnc1hzYktNbTQwV1U3cG9mcGltU1ZmaXFSdkV5YmN3N0NYODF6cFErUyt1eHRYK2VBZ3V0NHh3VlI5d2IyVXYKelhuZk9HbWhWNThDd1dIQnNKa0kxNXhaa2VUWXdSN0diaEFMSkZUUkk3dkhvQXprTWIzbjAxQjQyWjNrN3RXNQpJUDFmTlpIOFUvOWxiUHNoT21FRFZkdjF5ZytVRVJxbStGSis2R0oxeFJGcGZnPT0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=
## The base64-encoded TLS key for the default HTTPS server. Note: If not specified, a pre-generated key is used.
## Note: It is recommended that you specify your own key.
key: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFcEFJQkFBS0NBUUVBdi91RWM4b1JkMHUvZXVJTHNFK1RYZUprckxMMnNJNGFWaEMvYjVyYy9XMlRiNHEvClJOcktGMEdYaVN1eE9ycXgrajlnamx4NXFjdnhkenRKbXNFUkJ1Z1B0ME9hVGtIekhvb3FVWmcwZGxmZ1dkT0EKUTZMNTdlT1l0Q29VOUZ4amRXdzZUVVRJVUQ4R0JsRlNjSVo0b1hFTkhzbysyR3VTTWk2Zk1wTVM3YUhudzFtMApxWkdvRWEzWFNyZEJ6eGc2clhkcUNlUDlCMXl3VmRyYURiUzc1aGQzdUdETDU4cGszOVFqVUFQaHpxdmRoK1JWClZGNGJCaW9CbTVpeTlZTW1hWVhsMm0wTGZzeTZuUTRRdFFzdEdNVWozcGJtdlFmazJBNnljeGRFeFpkZFZsdmwKMm82MjBsMllxcHFDZEtCRThCay90elFIVTlKcU56cHpoOUJUTXdJREFRQUJBb0lCQVFDZklHbXowOHhRVmorNwpLZnZJUXQwQ0YzR2MxNld6eDhVNml4MHg4Mm15d1kxUUNlL3BzWE9LZlRxT1h1SENyUlp5TnUvZ2IvUUQ4bUFOCmxOMjRZTWl0TWRJODg5TEZoTkp3QU5OODJDeTczckM5bzVvUDlkazAvYzRIbjAzSkVYNzZ5QjgzQm9rR1FvYksKMjhMNk0rdHUzUmFqNjd6Vmc2d2szaEhrU0pXSzBwV1YrSjdrUkRWYmhDYUZhNk5nMUZNRWxhTlozVDhhUUtyQgpDUDNDeEFTdjYxWTk5TEI4KzNXWVFIK3NYaTVGM01pYVNBZ1BkQUk3WEh1dXFET1lvMU5PL0JoSGt1aVg2QnRtCnorNTZud2pZMy8yUytSRmNBc3JMTnIwMDJZZi9oY0IraVlDNzVWYmcydVd6WTY3TWdOTGQ5VW9RU3BDRkYrVm4KM0cyUnhybnhBb0dCQU40U3M0ZVlPU2huMVpQQjdhTUZsY0k2RHR2S2ErTGZTTXFyY2pOZjJlSEpZNnhubmxKdgpGenpGL2RiVWVTbWxSekR0WkdlcXZXaHFISy9iTjIyeWJhOU1WMDlRQ0JFTk5jNmtWajJTVHpUWkJVbEx4QzYrCk93Z0wyZHhKendWelU0VC84ajdHalRUN05BZVpFS2FvRHFyRG5BYWkyaW5oZU1JVWZHRXFGKzJyQW9HQkFOMVAKK0tZL0lsS3RWRzRKSklQNzBjUis3RmpyeXJpY05iWCtQVzUvOXFHaWxnY2grZ3l4b25BWlBpd2NpeDN3QVpGdwpaZC96ZFB2aTBkWEppc1BSZjRMazg5b2pCUmpiRmRmc2l5UmJYbyt3TFU4NUhRU2NGMnN5aUFPaTVBRHdVU0FkCm45YWFweUNweEFkREtERHdObit3ZFhtaTZ0OHRpSFRkK3RoVDhkaVpBb0dCQUt6Wis1bG9OOTBtYlF4VVh5YUwKMjFSUm9tMGJjcndsTmVCaWNFSmlzaEhYa2xpSVVxZ3hSZklNM2hhUVRUcklKZENFaHFsV01aV0xPb2I2NTNyZgo3aFlMSXM1ZUtka3o0aFRVdnpldm9TMHVXcm9CV2xOVHlGanIrSWhKZnZUc0hpOGdsU3FkbXgySkJhZUFVWUNXCndNdlQ4NmNLclNyNkQrZG8wS05FZzFsL0FvR0FlMkFVdHVFbFNqLzBmRzgrV3hHc1RFV1JqclRNUzRSUjhRWXQKeXdjdFA4aDZxTGxKUTRCWGxQU05rMXZLTmtOUkxIb2pZT2pCQTViYjhibXNVU1BlV09NNENoaFJ4QnlHbmR2eAphYkJDRkFwY0IvbEg4d1R0alVZYlN5T294ZGt5OEp0ek90ajJhS0FiZHd6NlArWDZDODhjZmxYVFo5MWpYL3RMCjF3TmRKS2tDZ1lCbyt0UzB5TzJ2SWFmK2UwSkN5TGhzVDQ5cTN3Zis2QWVqWGx2WDJ1VnRYejN5QTZnbXo5aCsKcDNlK2JMRUxwb3B0WFhNdUFRR0xhUkcrYlNNcjR5dERYbE5ZSndUeThXczNKY3dlSTdqZVp2b0ZpbmNvVlVIMwphdmxoTUVCRGYxSjltSDB5cDBwWUNaS2ROdHNvZEZtQktzVEtQMjJhTmtsVVhCS3gyZzR6cFE9PQotLS0tLUVORCBSU0EgUFJJVkFURSBLRVktLS0tLQo=
## The secret with a TLS certificate and key for the default HTTPS server.
## The value must follow the following format: `<namespace>/<name>`.
## Used as an alternative to specifying a certificate and key using `controller.defaultTLS.cert` and `controller.defaultTLS.key` parameters.
## Format: <namespace>/<secret_name>
secret:
wildcardTLS:
## The base64-encoded TLS certificate for every Ingress/VirtualServer host that has TLS enabled but no secret specified.
## If the parameter is not set, for such Ingress/VirtualServer hosts NGINX will break any attempt to establish a TLS connection.
cert: ""
## The base64-encoded TLS key for every Ingress/VirtualServer host that has TLS enabled but no secret specified.
## If the parameter is not set, for such Ingress/VirtualServer hosts NGINX will break any attempt to establish a TLS connection.
key: ""
## The secret with a TLS certificate and key for every Ingress/VirtualServer host that has TLS enabled but no secret specified.
## The value must follow the following format: `<namespace>/<name>`.
## Used as an alternative to specifying a certificate and key using `controller.wildcardTLS.cert` and `controller.wildcardTLS.key` parameters.
## Format: <namespace>/<secret_name>
secret:
## The node selector for pod assignment for the Ingress Controller pods.
nodeSelector: {}
## The termination grace period of the Ingress Controller pod.
terminationGracePeriodSeconds: 30
## The resources of the Ingress Controller pods.
resources: {}
# limits:
# cpu: 100m
# memory: 64Mi
# requests:
# cpu: 100m
# memory: 64Mi
## The tolerations of the Ingress Controller pods.
tolerations: []
## The affinity of the Ingress Controller pods.
affinity: {}
## The volumes of the Ingress Controller pods.
volumes: []
# - name: extra-conf
# configMap:
# name: extra-conf
## The volumeMounts of the Ingress Controller pods.
volumeMounts: []
# - name: extra-conf
# mountPath: /etc/nginx/conf.d/extra.conf
# subPath: extra.conf
## InitContainers for the Ingress Controller pods.
initContainers: []
# - name: init-container
# image: busybox:1.34
# command: ['sh', '-c', 'echo this is initial setup!']
## Extra containers for the Ingress Controller pods.
extraContainers: []
# - name: container
# image: busybox:1.34
# command: ['sh', '-c', 'echo this is a sidecar!']
## The number of replicas of the Ingress Controller deployment.
replicaCount: 1
## A class of the Ingress Controller.
## IngressClass resource with the name equal to the class must be deployed. Otherwise,
## the Ingress Controller will fail to start.
## The Ingress Controller only processes resources that belong to its class - i.e. have the "ingressClassName" field resource equal to the class.
## The Ingress Controller processes all the resources that do not have the "ingressClassName" field for all versions of kubernetes.
ingressClass: nginx
## New Ingresses without an ingressClassName field specified will be assigned the class specified in `controller.ingressClass`.
setAsDefaultIngress: false
## Namespace to watch for Ingress resources. By default the Ingress Controller watches all namespaces.
watchNamespace: ""
## Enable the custom resources.
enableCustomResources: true
## Enable preview policies. This parameter is deprecated. To enable OIDC Policies please use controller.enableOIDC instead.
enablePreviewPolicies: false
## Enable OIDC policies.
enableOIDC: false
## Enable TLS Passthrough on port 443. Requires controller.enableCustomResources.
enableTLSPassthrough: false
## Enable cert manager for Virtual Server resources. Requires controller.enableCustomResources.
enableCertManager: false
globalConfiguration:
## Creates the GlobalConfiguration custom resource. Requires controller.enableCustomResources.
create: false
## The spec of the GlobalConfiguration for defining the global configuration parameters of the Ingress Controller.
spec: {}
# listeners:
# - name: dns-udp
# port: 5353
# protocol: UDP
# - name: dns-tcp
# port: 5353
# protocol: TCP
## Enable custom NGINX configuration snippets in Ingress, VirtualServer, VirtualServerRoute and TransportServer resources.
enableSnippets: false
## Add a location based on the value of health-status-uri to the default server. The location responds with the 200 status code for any request.
## Useful for external health-checking of the Ingress Controller.
healthStatus: false
## Sets the URI of health status location in the default server. Requires controller.healthStatus.
healthStatusURI: "/nginx-health"
nginxStatus:
## Enable the NGINX stub_status, or the NGINX Plus API.
enable: true
## Set the port where the NGINX stub_status or the NGINX Plus API is exposed.
port: 8080
## Add IPv4 IP/CIDR blocks to the allow list for NGINX stub_status or the NGINX Plus API. Separate multiple IP/CIDR by commas.
allowCidrs: "127.0.0.1"
service:
## Creates a service to expose the Ingress Controller pods.
create: true
## The type of service to create for the Ingress Controller.
type: LoadBalancer
## The externalTrafficPolicy of the service. The value Local preserves the client source IP.
externalTrafficPolicy: Local
## The annotations of the Ingress Controller service.
annotations: {}
## The extra labels of the service.
extraLabels: {}
## The static IP address for the load balancer. Requires controller.service.type set to LoadBalancer. The cloud provider must support this feature.
loadBalancerIP: ""
## The list of external IPs for the Ingress Controller service.
externalIPs: []
## The IP ranges (CIDR) that are allowed to access the load balancer. Requires controller.service.type set to LoadBalancer. The cloud provider must support this feature.
loadBalancerSourceRanges: []
## The name of the service
## Autogenerated if not set or set to "".
# name: nginx-ingress
httpPort:
## Enables the HTTP port for the Ingress Controller service.
enable: true
## The HTTP port of the Ingress Controller service.
port: 80
## The custom NodePort for the HTTP port. Requires controller.service.type set to NodePort.
nodePort: ""
## The HTTP port on the POD where the Ingress Controller service is running.
targetPort: 80
httpsPort:
## Enables the HTTPS port for the Ingress Controller service.
enable: true
## The HTTPS port of the Ingress Controller service.
port: 443
## The custom NodePort for the HTTPS port. Requires controller.service.type set to NodePort.
nodePort: ""
## The HTTPS port on the POD where the Ingress Controller service is running.
targetPort: 443
## A list of custom ports to expose through the Ingress Controller service. Follows the conventional Kubernetes yaml syntax for service ports.
customPorts: []
serviceAccount:
## The name of the service account of the Ingress Controller pods. Used for RBAC.
## Autogenerated if not set or set to "".
# name: nginx-ingress
## The name of the secret containing docker registry credentials.
## Secret must exist in the same namespace as the helm release.
imagePullSecretName: ""
reportIngressStatus:
## Updates the address field in the status of Ingress resources with an external address of the Ingress Controller.
## You must also specify the source of the external address either through an external service via controller.reportIngressStatus.externalService,
## controller.reportIngressStatus.ingressLink or the external-status-address entry in the ConfigMap via controller.config.entries.
## Note: controller.config.entries.external-status-address takes precedence over the others.
enable: true
## Specifies the name of the service with the type LoadBalancer through which the Ingress Controller is exposed externally.
## The external address of the service is used when reporting the status of Ingress, VirtualServer and VirtualServerRoute resources.
## controller.reportIngressStatus.enable must be set to true.
## The default is autogenerated and matches the created service (see controller.service.create).
# externalService: nginx-ingress
## Specifies the name of the IngressLink resource, which exposes the Ingress Controller pods via a BIG-IP system.
## The IP of the BIG-IP system is used when reporting the status of Ingress, VirtualServer and VirtualServerRoute resources.
## controller.reportIngressStatus.enable must be set to true.
ingressLink: ""
## Enable Leader election to avoid multiple replicas of the controller reporting the status of Ingress resources. controller.reportIngressStatus.enable must be set to true.
enableLeaderElection: true
## Specifies the name of the ConfigMap, within the same namespace as the controller, used as the lock for leader election. controller.reportIngressStatus.enableLeaderElection must be set to true.
## Autogenerated if not set or set to "".
# leaderElectionLockName: "nginx-ingress-leader-election"
## The annotations of the leader election configmap.
annotations: {}
pod:
## The annotations of the Ingress Controller pod.
annotations: {}
## The additional extra labels of the Ingress Controller pod.
extraLabels: {}
## The PriorityClass of the ingress controller pods.
priorityClassName:
readyStatus:
## Enables readiness endpoint "/nginx-ready". The endpoint returns a success code when NGINX has loaded all the config after startup.
enable: true
## Set the port where the readiness endpoint is exposed.
port: 8081
## Enable collection of latency metrics for upstreams. Requires prometheus.create.
enableLatencyMetrics: false
rbac:
## Configures RBAC.
create: true
prometheus:
## Expose NGINX or NGINX Plus metrics in the Prometheus format.
create: true
## Configures the port to scrape the metrics.
port: 9113
## Specifies the namespace/name of a Kubernetes TLS Secret which will be used to protect the Prometheus endpoint.
secret: ""
## Configures the HTTP scheme used.
scheme: http
nginxServiceMesh:
## Enables integration with NGINX Service Mesh.
## Requires controller.nginxplus
enable: false
## Enables NGINX Service Mesh workload to route egress traffic through the Ingress Controller.
## Requires nginxServiceMesh.enable
enableEgress: false

View File

@ -1,75 +0,0 @@
apiVersion: v1
kind: Namespace
metadata:
name: nocodb
labels:
app: nocodb
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: nocodb
namespace: nocodb
labels:
app: nocodb
spec:
replicas: 1
selector:
matchLabels:
app: nocodb
template:
metadata:
labels:
app: nocodb
spec:
containers:
- name: nocodb
image: nocodb/nocodb
tty: true
ports:
- containerPort: 8080
---
apiVersion: v1
kind: Service
metadata:
name: nocodb
namespace: nocodb
spec:
type: ClusterIP
ports:
- name: http
port: 8080
selector:
app: nocodb
---
kind: Ingress
apiVersion: networking.k8s.io/v1
metadata:
namespace: nocodb
name: nocodb-ingress
annotations:
kubernetes.io/ingress.class: nginx
cert-manager.io/cluster-issuer: cloudflare-letsencrypt-prod
spec:
tls:
- hosts:
- nocodb.kluster.moll.re
secretName: cloudflare-letsencrypt-issuer-account-key
rules:
- host: nocodb.kluster.moll.re
http:
paths:
- backend:
service:
name: nocodb
port:
number: 8080
path: /
pathType: Prefix

View File

@ -1,26 +0,0 @@
kind: Ingress
apiVersion: networking.k8s.io/v1
metadata:
namespace: pihole
name: pihole-ingress
annotations:
kubernetes.io/ingress.class: nginx
cert-manager.io/cluster-issuer: cloudflare-letsencrypt-prod
spec:
tls:
- hosts:
- pihole.kluster.moll.re
secretName: cloudflare-letsencrypt-issuer-account-key
rules:
- host: pihole.kluster.moll.re
http:
paths:
- backend:
service:
name: pihole-web
port:
number: 80
path: /
pathType: Prefix

View File

@ -1,37 +0,0 @@
---
apiVersion: v1
kind: PersistentVolume
metadata:
namespace: pihole
name: pihole-nfs
labels:
directory: pihole
spec:
storageClassName: slow
capacity:
storage: "500Mi"
volumeMode: Filesystem
accessModes:
- ReadWriteOnce
nfs:
path: /pihole
server: 10.43.239.43 # assigned to nfs-server service. Won't change as long as service is not redeployed
---
## pihole.persistentvolumeclaim.yml
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
namespace: pihole
name: pihole-nfs
spec:
storageClassName: slow
accessModes:
- ReadWriteOnce
resources:
requests:
storage: "500Mi"
selector:
matchLabels:
directory: pihole
---

View File

@ -1,397 +0,0 @@
# Default values for pihole.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
# -- The number of replicas
replicaCount: 1
# -- The `spec.strategyTpye` for updates
strategyType: RollingUpdate
# -- The maximum number of Pods that can be created over the desired number of `ReplicaSet` during updating.
maxSurge: 1
# -- The maximum number of Pods that can be unavailable during updating
maxUnavailable: 1
image:
# -- the repostory to pull the image from
repository: "pihole/pihole"
# -- the docker tag, if left empty it will get it from the chart's appVersion
tag: ""
# -- the pull policy
pullPolicy: IfNotPresent
dualStack:
# -- set this to true to enable creation of DualStack services or creation of separate IPv6 services if `serviceDns.type` is set to `"LoadBalancer"`
enabled: false
dnsHostPort:
# -- set this to true to enable dnsHostPort
enabled: false
# -- default port for this pod
port: 53
# -- Configuration for the DNS service on port 53
serviceDns:
# -- deploys a mixed (TCP + UDP) Service instead of separate ones
mixedService: false
# -- `spec.type` for the DNS Service
type: LoadBalancer
# -- The port of the DNS service
port: 53
# -- Optional node port for the DNS service
nodePort: ""
# -- `spec.externalTrafficPolicy` for the DHCP Service
externalTrafficPolicy: Local
# -- A fixed `spec.loadBalancerIP` for the DNS Service
loadBalancerIP: 192.168.1.3
# -- A fixed `spec.loadBalancerIP` for the IPv6 DNS Service
loadBalancerIPv6: ""
# -- Annotations for the DNS service
annotations:
# metallb.universe.tf/address-pool: network-services
metallb.universe.tf/allow-shared-ip: pihole-svc
# -- Configuration for the DHCP service on port 67
serviceDhcp:
# -- Generate a Service resource for DHCP traffic
enabled: false
# -- `spec.type` for the DHCP Service
type: NodePort
# -- `spec.externalTrafficPolicy` for the DHCP Service
externalTrafficPolicy: Local
# -- A fixed `spec.loadBalancerIP` for the DHCP Service
loadBalancerIP: ""
# -- A fixed `spec.loadBalancerIP` for the IPv6 DHCP Service
loadBalancerIPv6: ""
# -- Annotations for the DHCP service
annotations: {}
# metallb.universe.tf/address-pool: network-services
# metallb.universe.tf/allow-shared-ip: pihole-svc
# -- Configuration for the web interface service
serviceWeb:
# -- Configuration for the HTTP web interface listener
http:
# -- Generate a service for HTTP traffic
enabled: true
# -- The port of the web HTTP service
port: 80
# -- Configuration for the HTTPS web interface listener
https:
# -- Generate a service for HTTPS traffic
enabled: true
# -- The port of the web HTTPS service
port: 443
# -- `spec.type` for the web interface Service
type: ClusterIP
# -- `spec.externalTrafficPolicy` for the web interface Service
externalTrafficPolicy: Local
# -- A fixed `spec.loadBalancerIP` for the web interface Service
loadBalancerIP: ""
# -- A fixed `spec.loadBalancerIP` for the IPv6 web interface Service
loadBalancerIPv6: ""
# -- Annotations for the DHCP service
annotations: {}
# metallb.universe.tf/address-pool: network-services
# metallb.universe.tf/allow-shared-ip: pihole-svc
virtualHost: pi.hole
# -- Configuration for the Ingress
ingress:
# -- Generate a Ingress resource
enabled: false # DONE EXTERNALLY
# -- Specify an ingressClassName
# ingressClassName: nginx
# -- Annotations for the ingress
annotations: {}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
path: /
hosts:
# virtualHost (default value is pi.hole) will be appended to the hosts
- chart-example.local
tls: []
# - secretName: chart-example-tls
# hosts:
# #- virtualHost (default value is pi.hole) will be appended to the hosts
# - chart-example.local
# -- Probes configuration
probes:
# -- probes.liveness -- Configure the healthcheck for the ingress controller
liveness:
# -- Generate a liveness probe
enabled: true
initialDelaySeconds: 60
failureThreshold: 10
timeoutSeconds: 5
readiness:
# -- Generate a readiness probe
enabled: true
initialDelaySeconds: 60
failureThreshold: 3
timeoutSeconds: 5
# -- We usually recommend not to specify default resources and to leave this as a conscious
# -- choice for the user. This also increases chances charts run on environments with little
# -- resources, such as Minikube. If you do want to specify resources, uncomment the following
# -- lines, adjust them as necessary, and remove the curly braces after 'resources:'.
resources: {}
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
# -- `spec.PersitentVolumeClaim` configuration
persistentVolumeClaim:
# -- set to true to use pvc
enabled: true
# -- specify an existing `PersistentVolumeClaim` to use
existingClaim: "pihole-nfs"
# -- Annotations for the `PersitentVolumeClaim`
annotations: {}
accessModes:
- ReadWriteOnce
size: "500Mi"
## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner. (gp2 on AWS, standard on
## GKE, AWS & OpenStack)
##
# storageClass: "-"
## If subPath is set mount a sub folder of a volume instead of the root of the volume.
## This is especially handy for volume plugins that don't natively support sub mounting (like glusterfs).
## subPath: "pihole"
nodeSelector: {}
tolerations: []
# -- Specify a priorityClassName
# priorityClassName: ""
# Reference: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/
topologySpreadConstraints: []
# - maxSkew: <integer>
# topologyKey: <string>
# whenUnsatisfiable: <string>
# labelSelector: <object>
affinity: {}
# -- Administrator password when not using an existing secret (see below)
adminPassword: "admin"
# -- Use an existing secret for the admin password.
admin:
# -- Specify an existing secret to use as admin password
existingSecret: ""
# -- Specify the key inside the secret to use
passwordKey: ""
# -- extraEnvironmentVars is a list of extra enviroment variables to set for pihole to use
extraEnvVars: {}
# TZ: UTC
# -- extraEnvVarsSecret is a list of secrets to load in as environment variables.
extraEnvVarsSecret: {}
# env_var:
# name: secret-name
# key: secret-key
# -- default upstream DNS 1 server to use
DNS1: "8.8.8.8"
# -- default upstream DNS 2 server to use
DNS2: "8.8.4.4"
antiaff:
# -- set to true to enable antiaffinity (example: 2 pihole DNS in the same cluster)
enabled: false
# -- Here you can set the pihole release (you set in `helm install <releasename> ...`)
# you want to avoid
avoidRelease: pihole1
# -- Here you can choose between preferred or required
strict: true
doh:
# -- set to true to enabled DNS over HTTPs via cloudflared
enabled: false
name: "cloudflared"
repository: "crazymax/cloudflared"
tag: latest
pullPolicy: IfNotPresent
# -- Here you can pass environment variables to the DoH container, for example:
envVars: {}
# TUNNEL_DNS_UPSTREAM: "https://1.1.1.2/dns-query,https://1.0.0.2/dns-query"
# -- Probes configuration
probes:
# -- Configure the healthcheck for the doh container
liveness:
# -- set to true to enable liveness probe
enabled: true
# -- defines the initial delay for the liveness probe
initialDelaySeconds: 60
# -- defines the failure threshold for the liveness probe
failureThreshold: 10
# -- defines the timeout in secondes for the liveness probe
timeoutSeconds: 5
dnsmasq:
# -- Add upstream dns servers. All lines will be added to the pihole dnsmasq configuration
upstreamServers: []
# - server=/foo.bar/192.168.178.10
# - server=/bar.foo/192.168.178.11
# -- Add custom dns entries to override the dns resolution. All lines will be added to the pihole dnsmasq configuration.
customDnsEntries: []
# - address=/foo.bar/192.168.178.10
# - address=/bar.foo/192.168.178.11
# -- Dnsmasq reads the /etc/hosts file to resolve ips. You can add additional entries if you like
additionalHostsEntries: []
# - 192.168.0.3 host4
# - 192.168.0.4 host5
# -- Static DHCP config
staticDhcpEntries: []
# staticDhcpEntries:
# - dhcp-host=MAC_ADDRESS,IP_ADDRESS,HOSTNAME
# -- Other options
customSettings:
# otherSettings:
# - rebind-domain-ok=/plex.direct/
# -- Here we specify custom cname entries that should point to `A` records or
# elements in customDnsEntries array.
# The format should be:
# - cname=cname.foo.bar,foo.bar
# - cname=cname.bar.foo,bar.foo
# - cname=cname record,dns record
customCnameEntries: []
# Here we specify custom cname entries that should point to `A` records or
# elements in customDnsEntries array.
# The format should be:
# - cname=cname.foo.bar,foo.bar
# - cname=cname.bar.foo,bar.foo
# - cname=cname record,dns record
# -- list of adlists to import during initial start of the container
adlists: {}
# If you want to provide blocklists, add them here.
# - https://hosts-file.net/grm.txt
# - https://reddestdream.github.io/Projects/MinimalHosts/etc/MinimalHostsBlocker/minimalhosts
# -- list of whitelisted domains to import during initial start of the container
whitelist: {}
# If you want to provide whitelisted domains, add them here.
# - clients4.google.com
# -- list of blacklisted domains to import during initial start of the container
blacklist: {}
# If you want to have special domains blacklisted, add them here
# - *.blackist.com
# -- list of blacklisted regex expressions to import during initial start of the container
regex: {}
# Add regular expression blacklist items
# - (^|\.)facebook\.com$
# -- values that should be added to pihole-FTL.conf
ftl: {}
# Add values for pihole-FTL.conf
# MAXDBDAYS: 14
# -- port the container should use to expose HTTP traffic
webHttp: "80"
# -- port the container should use to expose HTTPS traffic
webHttps: "443"
# -- hostname of pod
hostname: ""
# -- should the container use host network
hostNetwork: "false"
# -- should container run in privileged mode
privileged: "false"
customVolumes:
# -- set this to true to enable custom volumes
enabled: false
# -- any volume type can be used here
config: {}
# hostPath:
# path: "/mnt/data"
# -- Additional annotations for pods
podAnnotations: {}
# Example below allows Prometheus to scape on metric port (requires pihole-exporter sidecar enabled)
# prometheus.io/port: '9617'
# prometheus.io/scrape: 'true'
monitoring:
# -- Preferably adding prometheus scrape annotations rather than enabling podMonitor.
podMonitor:
# -- set this to true to enable podMonitor
enabled: false
# -- Sidecar configuration
sidecar:
# -- set this to true to enable podMonitor as sidecar
enabled: false
port: 9617
image:
repository: ekofr/pihole-exporter
tag: 0.0.10
pullPolicy: IfNotPresent
resources:
limits:
memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
podDnsConfig:
enabled: true
policy: "None"
nameservers:
- 127.0.0.1
- 8.8.8.8

View File

@ -1,68 +0,0 @@
# Default values for portainer.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
replicaCount: 1
# If enterpriseEdition is enabled, then use the values below _instead_ of those in .image
enterpriseEdition:
enabled: false
image:
repository: portainer/portainer-ee
tag: 2.12.2
pullPolicy: Always
image:
repository: portainer/portainer-ce
tag: latest
pullPolicy: Always
imagePullSecrets: []
nodeSelector: {}
serviceAccount:
annotations: {}
name: portainer-sa-clusteradmin
service:
# Set the httpNodePort and edgeNodePort only if the type is NodePort
# For Ingress, set the type to be ClusterIP and set ingress.enabled to true
# For Cloud Providers, set the type to be LoadBalancer
type: ClusterIP
httpPort: 9000
httpsPort: 9443
httpNodePort: 30777
httpsNodePort: 30779
edgePort: 8000
edgeNodePort: 30776
annotations: {}
tls:
# If set, Portainer will be configured to use TLS only
force: false
# If set, will mount the existing secret into the pod
existingSecret: ""
feature:
flags: ""
ingress:
enabled: false
ingressClassName: ""
annotations: {}
# kubernetes.io/ingress.class: nginx
# Only use below if tls.force=true
# nginx.ingress.kubernetes.io/backend-protocol: HTTPS
# Note: Hosts and paths are of type array
hosts:
- host:
paths: []
# - path: "/"
tls: []
resources: {}
persistence:
existingClaim: portainer-data

View File

@ -1,17 +0,0 @@
apiVersion: traefik.containo.us/v1alpha1
kind: IngressRoute
metadata:
namespace: portainer
name: portainer-ingressroute
spec:
entryPoints:
- websecure
routes:
- match: Host(`portainer.kluster.moll.re`)
kind: Rule
services:
- name: portainer
port: 9000
tls:
certResolver: default-tls

View File

@ -1,37 +0,0 @@
---
apiVersion: v1
kind: PersistentVolume
metadata:
namespace: portainer
name: portainer-data
labels:
directory: portainer
spec:
storageClassName: fast
capacity:
storage: "10Gi"
volumeMode: Filesystem
accessModes:
- ReadWriteOnce
nfs:
path: /portainer
server: 10.43.239.43 # assigned to nfs-server service. Won't change as long as service is not redeployed
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
namespace: portainer
name: portainer-data
spec:
storageClassName: fast
accessModes:
- ReadWriteOnce
resources:
requests:
storage: "10Gi"
selector:
matchLabels:
directory: portainer

View File

@ -1,19 +0,0 @@
---
apiVersion: v1
kind: PersistentVolume
metadata:
namespace: monitoring
name: prometheus-data-nfs
labels:
directory: prometheus
spec:
storageClassName: slow
capacity:
storage: "50Gi"
volumeMode: Filesystem
accessModes:
- ReadWriteOnce
nfs:
path: /prometheus
server: 10.43.239.43 # assigned to nfs-server service. Won't change as long as service is not redeployed
---

File diff suppressed because it is too large Load Diff

View File

@ -1,79 +0,0 @@
# mkdir -p /var/lib/pufferpanel
# docker volume create pufferpanel-config
# docker create --name pufferpanel -p 8080:8080 -p 5657:5657 -v pufferpanel-config:/etc/pufferpanel -v /var/lib/pufferpanel:/var/lib/pufferpanel --restart=on-failure
# docker start pufferpanel
# docker exec -it pufferpanel /pufferpanel/pufferpanel user add
apiVersion: apps/v1
kind: Deployment
metadata:
name: pufferpanel
namespace: pufferpanel
labels:
app: pufferpanel
spec:
replicas: 1
selector:
matchLabels:
app: pufferpanel
template:
metadata:
labels:
app: pufferpanel
spec:
containers:
- name: pufferpanel
image: karyeet/pufferpanel:devel
tty: true
ports:
- containerPort: 8080
- containerPort: 5657
volumeMounts:
- mountPath: /var/lib/pufferpanel
name: pufferpanel-nfs
- mountPath: /etc/pufferpanel
name: pufferpanel-config-nfs
resources:
requests:
memory: "2Gi"
cpu: 1
volumes:
- name: pufferpanel-nfs
persistentVolumeClaim:
claimName: pufferpanel-nfs
- name: pufferpanel-config-nfs
persistentVolumeClaim:
claimName: pufferpanel-config-nfs
---
apiVersion: v1
kind: Service
metadata:
name: pufferpanel
namespace: pufferpanel
spec:
ports:
- name: http
port: 8080
selector:
app: pufferpanel
---
apiVersion: v1
kind: Service
metadata:
name: pufferpanel-mc
namespace: pufferpanel
spec:
# -- `spec.type` for the DNS Service
type: LoadBalancer
# -- The port of the DNS service
ports:
- name: mc-port
port: 25565
nodePort: 30005
protocol: TCP

View File

@ -1,28 +0,0 @@
kind: Ingress
apiVersion: networking.k8s.io/v1
metadata:
namespace: pufferpanel
name: pufferpanel-ingress
annotations:
kubernetes.io/ingress.class: nginx
# traefik.ingress.kubernetes.io/router.middlewares: default-redirect@kubernetescrd
cert-manager.io/cluster-issuer: cloudflare-letsencrypt-prod
spec:
tls:
- hosts:
- game.kluster.moll.re
secretName: cloudflare-letsencrypt-issuer-account-key
rules:
- host: game.kluster.moll.re
http:
paths:
- backend:
service:
name: pufferpanel
port:
number: 8080
path: /
pathType: Prefix

View File

@ -1,72 +0,0 @@
---
apiVersion: v1
kind: PersistentVolume
metadata:
namespace: pufferpanel
name: pufferpanel-nfs
labels:
directory: pufferpanel
spec:
storageClassName: fast
capacity:
storage: "20Gi"
volumeMode: Filesystem
accessModes:
- ReadWriteOnce
nfs:
path: /pufferpanel/data
server: 10.43.239.43 # assigned to nfs-server service. Won't change as long as service is not redeployed
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
namespace: pufferpanel
name: pufferpanel-nfs
spec:
storageClassName: fast
accessModes:
- ReadWriteOnce
resources:
requests:
storage: "20Gi"
selector:
matchLabels:
directory: pufferpanel
---
---
apiVersion: v1
kind: PersistentVolume
metadata:
namespace: pufferpanel
name: pufferpanel-config-nfs
labels:
directory: pufferpanel
spec:
storageClassName: fast
capacity:
storage: "1Gi"
volumeMode: Filesystem
accessModes:
- ReadWriteOnce
nfs:
path: /pufferpanel/config
server: 10.43.239.43 # assigned to nfs-server service. Won't change as long as service is not redeployed
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
namespace: pufferpanel
name: pufferpanel-config-nfs
spec:
storageClassName: fast
accessModes:
- ReadWriteOnce
resources:
requests:
storage: "1Gi"
selector:
matchLabels:
directory: pufferpanel
---

View File

@ -1,130 +0,0 @@
apiVersion: v1
kind: Namespace
metadata:
name: raspap
labels:
app: raspap
---
apiVersion: v1
kind: PersistentVolume
metadata:
namespace: raspap
name: raspap-nfs
labels:
directory: raspap
spec:
storageClassName: fast
capacity:
storage: "2Gi"
volumeMode: Filesystem
accessModes:
- ReadWriteOnce
nfs:
path: /raspap
server: 10.43.239.43 # assigned to nfs-server service. Won't change as long as service is not redeployed
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
namespace: raspap
name: raspap-nfs
spec:
storageClassName: fast
accessModes:
- ReadWriteOnce
resources:
requests:
storage: "2Gi"
selector:
matchLabels:
directory: raspap
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: raspap
namespace: raspap
labels:
app: raspap
spec:
replicas: 1
selector:
matchLabels:
app: raspap
template:
metadata:
labels:
app: raspap
spec:
containers:
- name: raspap
image: jrcichra/raspap-docker
tty: true
networkMode: "host"
securityContext:
capabilities:
add:
- SYS_ADMIN
ports:
- containerPort: 8000
volumeMounts:
- mountPath: /data
name: raspap-nfs
- mountPath: /sys/fs/cgroup
name: cgroup
volumes:
- name: cgroup
hostPath:
path: /sys/fs/cgroup
readOnly: true
- name: raspap-nfs
persistentVolumeClaim:
claimName: raspap-nfs
---
apiVersion: v1
kind: Service
metadata:
name: archive
namespace: archive
spec:
type: ClusterIP
ports:
- name: http
port: 8000
selector:
app: archive
---
kind: Ingress
apiVersion: networking.k8s.io/v1
metadata:
namespace: archive
name: archive-ingress
annotations:
kubernetes.io/ingress.class: nginx
cert-manager.io/cluster-issuer: cloudflare-letsencrypt-prod
spec:
tls:
- hosts:
- archive.kluster.moll.re
secretName: cloudflare-letsencrypt-issuer-account-key
rules:
- host: archive.kluster.moll.re
http:
paths:
- backend:
service:
name: archive
port:
number: 8000
path: /
pathType: Prefix

View File

@ -1,26 +0,0 @@
kind: Ingress
apiVersion: networking.k8s.io/v1
metadata:
namespace: rocketchat
name: rocketchat-ingress
annotations:
kubernetes.io/ingress.class: nginx
cert-manager.io/cluster-issuer: cloudflare-letsencrypt-prod
spec:
tls:
- hosts:
- chat.kluster.moll.re
secretName: cloudflare-letsencrypt-issuer-account-key
rules:
- host: chat.kluster.moll.re
http:
paths:
- backend:
service:
name: rocketchat-rocketchat
port:
number: 80
path: /
pathType: Prefix

View File

@ -1,37 +0,0 @@
---
apiVersion: v1
kind: PersistentVolume
metadata:
namespace: rocketchat
name: rocketchat-nfs
labels:
directory: rocketchat
spec:
storageClassName: slow
capacity:
storage: "8Gi"
volumeMode: Filesystem
accessModes:
- ReadWriteOnce
nfs:
path: /rocketchat
server: 10.43.239.43 # assigned to nfs-server service. Won't change as long as service is not redeployed
---
## pihole.persistentvolumeclaim.yml
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
namespace: rocketchat
name: rocketchat-nfs
spec:
storageClassName: slow
accessModes:
- ReadWriteOnce
resources:
requests:
storage: "8Gi"
selector:
matchLabels:
directory: rocketchat
---

View File

@ -1,247 +0,0 @@
## Rocket Chat image version
## ref: https://hub.docker.com/r/rocketchat/rocket.chat/tags
##
image:
## NOTE: for microsservices, those two itens get ignored,
## for now.
repository: rs1977rs/rocketchat
tag: arm64-4.3.3
pullPolicy: IfNotPresent
imagePullSecrets: []
## Host for the application
## set it to a domain pointing to your loadbalancer
# host:
replicaCount: 1
minAvailable: 1
smtp:
enabled: false
username:
password:
host:
port: 587
# Extra env vars for Rocket.Chat:
extraEnv:
# - name: MONGO_OPTIONS
# value: '{"ssl": "true"}'
# - name: MONGO_OPLOG_URL
# value: mongodb://oploguser:password@rocket-1:27017/local&replicaSet=rs0
## Specifies a Registration Token (obtainable at https://cloud.rocket.chat)
#registrationToken: ""
## Specifies an Enterprise License
# license: ""
## Pod anti-affinity can prevent the scheduler from placing RocketChat replicas on the same node.
## The default value "soft" means that the scheduler should *prefer* to not schedule two replica pods onto the same node but no guarantee is provided.
## The value "hard" means that the scheduler is *required* to not schedule two replica pods onto the same node.
## The value "" will disable pod anti-affinity so that no anti-affinity rules will be configured.
##
podAntiAffinity: ''
## If anti-affinity is enabled sets the topologyKey to use for anti-affinity.
## This can be changed to, for example, failure-domain.beta.kubernetes.io/zone
##
podAntiAffinityTopologyKey: kubernetes.io/hostname
## Assign custom affinity rules to the RocketChat instance
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
##
affinity: {}
# nodeAffinity:
# requiredDuringSchedulingIgnoredDuringExecution:
# nodeSelectorTerms:
# - matchExpressions:
# - key: kubernetes.io/e2e-az-name
# operator: In
# values:
# - e2e-az1
# - e2e-az2
# mongodb://user:password@localhost:27017/local?replicaSet=rs0&authSource=admin
##
## MongoDB chart configuration
### ref https://github.com/helm/charts/tree/master/stable/mongodb#configuration
##
mongodb:
## Enable or disable MongoDB dependency completely.
enabled: true
auth:
rootPassword: blablabla
username: rocketchat
password: blablabla
database: rocketchat
architecture: replicaset
replicaCount: 1
arbiter:
enabled: false
pdb:
minAvailable: 0
pdb:
minAvailable: 0
# key:
persistence:
enabled: false
## mongodb data Persistent Volume Storage Class
## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner. (gp2 on AWS, standard on
## GKE, AWS & OpenStack)
##
# storageClass: "-"
accessMode: ReadWriteOnce
size: 8Gi
volumePermissions: {enabled: true}
## Enable persistence using Persistent Volume Claims
## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/
##
persistence:
enabled: true
existingClaim: rocketchat-nfs
## rocketchat data Persistent Volume Storage Class
## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner. (gp2 on AWS, standard on
## GKE, AWS & OpenStack)
##
# storageClass: "-"
accessMode: ReadWriteOnce
size: 8Gi
## Configure resource requests and limits
## ref: http://kubernetes.io/docs/user-guide/compute-resources/
##
# resources:
# requests:
# memory: 512Mi
# cpu: 300m
securityContext:
enabled: true
runAsUser: 999
fsGroup: 999
serviceAccount:
# Specifies whether a ServiceAccount should be created
create: true
# The name of the ServiceAccount to use.
# If not set and create is true, a name is generated using the fullname template
name:
## Configure the ingress object to hook into existing infastructure
### ref : http://kubernetes.io/docs/user-guide/ingress/
###
ingress:
enabled: false
pathType: Prefix
annotations:
{}
# ingressClassName: "nxinx"
annotations: {}
# kubernetes.io/ingress.class: "nginx"
path: /
tls: {}
# - secretName: chart-example-tls
# hosts:
# - chart-example.local
service:
annotations: {}
# service.beta.kubernetes.io/aws-load-balancer-internal: "0.0.0.0/0"
labels: {}
# key: value
## ServiceType
## ref: https://kubernetes.io/docs/user-guide/services/#publishing-services---service-types
type: ClusterIP
## Optional static port assignment for service type NodePort.
# nodePort: 30000
port: 80
## Optional custom labels for the deployment resource.
deploymentLabels: {}
## Optional Pod Labels.
podLabels: {}
## Optional Pod Annotations.
podAnnotations:
{}
# prometheus.io/port: "9458"
# prometheus.io/path: "/metrics"
# prometheus.io/scrape: "true"
## Optional Prometheus scraping Settings
prometheusScraping:
enabled: true
port: 9458
## Liveness and readiness probe values
## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes
##
livenessProbe:
enabled: true
initialDelaySeconds: 60
periodSeconds: 15
timeoutSeconds: 5
failureThreshold: 3
successThreshold: 1
readinessProbe:
enabled: true
initialDelaySeconds: 10
periodSeconds: 15
timeoutSeconds: 5
failureThreshold: 3
successThreshold: 1
# # # # # # # # # # # # # # # # #
# M I C R O S E R V I C E S #
# Only available to E.E users #
# # # # # # # # # # # # # # # # #
## Deploy as microservices?
# Monolithic architecture, by default
microservices:
enabled: false
## Parameters for each deployment:
presence:
replicas: 1
ddpStreamer:
replicas: 1
streamHub:
replicas: 1
accounts:
replicas: 1
authorization:
replicas: 1
nats:
replicas: 1
## Parameters for each Kubernetes service
# NOTE: reserved for future usage still
presenceService:
ddpStreamerService:
natsService:
streamHubService:
accountsService:
authorizationService:
natsService:

View File

@ -1,131 +0,0 @@
apiVersion: v1
kind: Namespace
metadata:
name: searx
labels:
app: searx
---
apiVersion: v1
kind: PersistentVolume
metadata:
namespace: searx
name: searx-data-nfs
labels:
directory: searx
spec:
storageClassName: fast
capacity:
storage: "5Gi"
volumeMode: Filesystem
accessModes:
- ReadWriteOnce
nfs:
path: /searx
server: 10.43.239.43 # assigned to nfs-server service. Won't change as long as service is not redeployed
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
namespace: searx
name: searx-data-nfs
spec:
storageClassName: fast
accessModes:
- ReadWriteOnce
resources:
requests:
storage: "5Gi"
selector:
matchLabels:
directory: searx
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: searx
namespace: searx
labels:
app: searx
spec:
replicas: 1
selector:
matchLabels:
app: searx
template:
metadata:
labels:
app: searx
spec:
containers:
- name: searx
image: searxng/searxng
tty: true
ports:
- containerPort: 8080
volumeMounts:
- mountPath: /etc/searxng
name: searx-data
env:
- name: BASE_URL
value: https://search.kluster.moll.re
- name: INSTANCE_NAME
value: searx
resources:
requests:
cpu: "100m"
memory: "100Mi"
limits:
cpu: "1"
memory: "500Mi"
volumes:
- name: searx-data
persistentVolumeClaim:
claimName: searx-data-nfs
---
apiVersion: v1
kind: Service
metadata:
name: searx
namespace: searx
spec:
type: ClusterIP
ports:
- name: http
port: 8080
selector:
app: searx
---
kind: Ingress
apiVersion: networking.k8s.io/v1
metadata:
namespace: searx
name: searx-ingress
annotations:
kubernetes.io/ingress.class: nginx
cert-manager.io/cluster-issuer: cloudflare-letsencrypt-prod
spec:
tls:
- hosts:
- search.kluster.moll.re
secretName: cloudflare-letsencrypt-issuer-account-key
rules:
- host: search.kluster.moll.re
http:
paths:
- backend:
service:
name: searx
port:
number: 8080
path: /
pathType: Prefix

View File

@ -1,40 +0,0 @@
kind: Deployment
apiVersion: apps/v1
metadata:
name: skooner
namespace: kube-system
spec:
replicas: 1
selector:
matchLabels:
k8s-app: skooner
template:
metadata:
labels:
k8s-app: skooner
spec:
containers:
- name: skooner
image: nerdinexile/skooner:latest
ports:
- containerPort: 4654
livenessProbe:
httpGet:
scheme: HTTP
path: /
port: 4654
initialDelaySeconds: 30
timeoutSeconds: 30
---
kind: Service
apiVersion: v1
metadata:
name: skooner
namespace: kube-system
spec:
ports:
- port: 80
targetPort: 4654
selector:
k8s-app: skooner

View File

@ -1,28 +0,0 @@
kind: Ingress
apiVersion: networking.k8s.io/v1
metadata:
name: skooner
namespace: kube-system
annotations:
kubernetes.io/ingress.class: nginx
cert-manager.io/cluster-issuer: cloudflare-letsencrypt-prod
spec:
tls:
- hosts:
- dashboard.kluster.moll.re
secretName: cloudflare-letsencrypt-issuer-account-key
rules:
- host: dashboard.kluster.moll.re
http:
paths:
- backend:
service:
name: skooner
port:
number: 80
path: /
pathType: Prefix

View File

@ -1,115 +0,0 @@
apiVersion: v1
kind: Namespace
metadata:
name: storage
labels:
app: storage
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: local-pv
namespace: storage
spec:
capacity:
storage: 400Gi
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
storageClassName: local-storage
local:
path: "/mnt/data/"
nodeAffinity:
required:
nodeSelectorTerms:
- matchExpressions:
- key: hdd # only one node will have a drive attached to it!
operator: In
values:
- enabled
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: local-claim
namespace: storage
spec:
accessModes:
- ReadWriteOnce
storageClassName: local-storage
resources:
requests:
storage: 400Gi
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: nfs-server
namespace: storage
labels:
app: nfs-server
spec:
replicas: 1
selector:
matchLabels:
app: nfs-server
template:
metadata:
labels:
app: nfs-server
name: nfs-server
spec:
containers:
- name: nfs-server
image: itsthenetwork/nfs-server-alpine:11-arm
env:
- name: SHARED_DIRECTORY
value: /exports
- name: PERMITTED
value: 10.42.*.*
# ,192.168.1.112
ports:
- name: nfs
containerPort: 2049
- name: mountd
containerPort: 20048
- name: rpcbind
containerPort: 111
securityContext:
privileged: true
volumeMounts:
- mountPath: /exports
name: mypvc
volumes:
- name: mypvc
persistentVolumeClaim:
claimName: local-claim
nodeSelector:
hdd: enabled
---
kind: Service
apiVersion: v1
metadata:
name: nfs-server
namespace: storage
spec:
type: LoadBalancer
loadBalancerSourceRanges:
- 192.168.1.112/31
#restricted to archspectres ip only!
- 192.168.1.134/31
# and amd node
- 192.168.1.150/24
# and more
ports:
- name: nfs
port: 2049
- name: mountd
port: 20048
- name: rpcbind
port: 111
selector:
app: nfs-server

View File

@ -1,59 +0,0 @@
# apiVersion: traefik.containo.us/v1alpha1
# kind: IngressRoute
# metadata:
# name: syncthing-ingress
# namespace: syncthing
# spec:
# entryPoints:
# - websecure
# routes:
# - match: Host(`syncthing.kluster.moll.re`)
# kind: Rule
# services:
# - name: syncthing
# port: 8384
# tls:
# certResolver: default-tls
apiVersion: traefik.containo.us/v1alpha1
kind: Middleware
metadata:
name: authentik-auth
namespace: syncthing
spec:
forwardAuth:
address: https://syncthing.kluster.moll.re/outpost.goauthentik.io/auth/traefik
trustForwardHeader: true
authResponseHeaders:
- X-authentik-username
- X-authentik-groups
- X-authentik-email
- X-authentik-name
- X-authentik-uid
- X-authentik-jwt
- X-authentik-meta-jwks
- X-authentik-meta-outpost
- X-authentik-meta-provider
- X-authentik-meta-app
- X-authentik-meta-version
---
apiVersion: traefik.containo.us/v1alpha1
kind: IngressRoute
metadata:
name: syncthing-ingress
namespace: syncthing
spec:
entryPoints:
- websecure
routes:
- match: Host(`syncthing.kluster.moll.re`)
kind: Rule
middlewares:
- name: authentik-auth
services:
- name: syncthing
port: 8384
tls:
certResolver: default-tls

View File

@ -1,37 +0,0 @@
---
apiVersion: v1
kind: PersistentVolume
metadata:
namespace: syncthing
name: syncthing-nfs
labels:
directory: syncthing
spec:
storageClassName: fast
capacity:
storage: "100Gi"
volumeMode: Filesystem
accessModes:
- ReadWriteOnce
nfs:
path: /data-sync
server: 10.43.239.43 # assigned to nfs-server service. Won't change as long as service is not redeployed
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
namespace: syncthing
name: syncthing-nfs
spec:
storageClassName: fast
accessModes:
- ReadWriteOnce
resources:
requests:
storage: "100Gi"
selector:
matchLabels:
directory: syncthing

View File

@ -1,56 +0,0 @@
#
# IMPORTANT NOTE
#
# This chart inherits from our common library chart. You can check the default values/options here:
# https://github.com/k8s-at-home/library-charts/tree/main/charts/stable/common/values.yaml
#
image:
# -- image repository
repository: syncthing/syncthing
# -- image tag
tag: 1.18.2
# -- image pull policy
pullPolicy: IfNotPresent
# -- Configures service settings for the chart.
# @default -- See values.yaml
service:
main:
ports:
http:
port: 8384
listen:
enabled: true
type: NodePort
externalTrafficPolicy: Local
ports:
listen:
enabled: true
port: 22000
protocol: TCP
targetPort: 22000
discovery:
enabled: true
type: NodePort
externalTrafficPolicy: Local
ports:
discovery:
enabled: true
port: 21027
protocol: UDP
targetPort: 21027
ingress:
# -- Enable and configure ingress settings for the chart under this key.
# @default -- See values.yaml
main:
enabled: false
# -- Configure persistence settings for the chart under this key.
# @default -- See values.yaml
persistence:
data:
enabled: true
mountPath: /var/syncthing
existingClaim: syncthing-nfs

View File

@ -1,116 +0,0 @@
## Default values.yaml for Telegraf
## This is a YAML-formatted file.
## ref: https://hub.docker.com/r/library/telegraf/tags/
replicaCount: 1
image:
repo: "telegraf"
tag: "1.23"
pullPolicy: IfNotPresent
podAnnotations: {}
podLabels: {}
imagePullSecrets: []
## Configure args passed to Telegraf containers
args: []
# The name of a secret in the same kubernetes namespace which contains values to
# be added to the environment (must be manually created)
# This can be useful for auth tokens, etc.
# envFromSecret: "telegraf-tokens"
env:
- name: HOSTNAME
value: "telegraf-polling-service"
## Configure resource requests and limits
## ref: http://kubernetes.io/docs/user-guide/compute-resources/
resources: {}
# requests:
# memory: 128Mi
# cpu: 100m
# limits:
# memory: 128Mi
# cpu: 100m
## Node labels for pod assignment
## ref: https://kubernetes.io/docs/user-guide/node-selection/
nodeSelector: {}
## Affinity for pod assignment
## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
##
affinity: # launch on same node as nginx controller so that log file is readable
podAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: app.kubernetes.io/instance
operator: In
values:
- ingress-nginx
## Tolerations for pod assignment
## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
##
tolerations: []
# - key: "key"
# operator: "Equal|Exists"
# value: "value"
# effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)"
service:
enabled: false
type: ClusterIP
annotations: {}
## Exposed telegraf configuration
## For full list of possible values see `/docs/all-config-values.yaml` and `/docs/all-config-values.toml`
## ref: https://docs.influxdata.com/telegraf/v1.1/administration/configuration/
config:
agent:
interval: "10s"
round_interval: true
metric_batch_size: 1000
metric_buffer_limit: 10000
collection_jitter: "0s"
flush_interval: "10s"
flush_jitter: "0s"
precision: ""
debug: false
quiet: false
logfile: ""
hostname: "$HOSTNAME"
omit_hostname: false
processors:
- enum:
mapping:
field: "status"
dest: "status_code"
value_mappings:
healthy: 1
problem: 2
critical: 3
outputs:
- influxdb_v2:
urls:
- "http://influxdb-influxdb2.monitoring:80"
token: N_jNm1hZTfyhJneTJj2G357mQ7EJdNzdvebjSJX6JkbyaXNup_IAqeYowblMgV8EjLypNvauTl27ewJvI_rbqQ==
organization: "influxdata"
bucket: "kluster"
inputs:
- prometheus:
urls:
- "http://10.42.0.218:9113"

View File

@ -1,25 +0,0 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: trilium-ingress
namespace: trilium
annotations:
kubernetes.io/ingress.class: nginx
cert-manager.io/cluster-issuer: cloudflare-letsencrypt-prod
spec:
tls:
- hosts: [ 'trilium.kluster.moll.re' ]
secretName: cloudflare-letsencrypt-issuer-account-key
rules:
- host: trilium.kluster.moll.re
http:
paths:
- pathType: Prefix
path: /
backend:
service:
name: trilium-trilium-notes
port:
number: 8080

View File

@ -1,37 +0,0 @@
---
apiVersion: v1
kind: PersistentVolume
metadata:
namespace: trilium
name: trilium-data-nfs
labels:
directory: trilium
spec:
storageClassName: slow
capacity:
storage: "2Gi"
volumeMode: Filesystem
accessModes:
- ReadWriteOnce
nfs:
path: /trilium
server: 10.43.239.43 # assigned to nfs-server service. Won't change as long as service is not redeployed
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
namespace: trilium
name: trilium-data-nfs
spec:
storageClassName: slow
accessModes:
- ReadWriteOnce
resources:
requests:
storage: "2Gi"
selector:
matchLabels:
directory: trilium

View File

@ -1,78 +0,0 @@
# Default values for trilium-notes.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
replicaCount: 1
image:
repository: zadam/trilium
tag: "latest"
pullPolicy: IfNotPresent
imagePullSecrets: []
nameOverride: ""
fullnameOverride: ""
serviceAccount:
create: true
annotations: {}
# If not set and create is true, a name is generated using the fullname template
name:
podSecurityContext:
fsGroup: 10000
securityContext:
capabilities:
drop:
- ALL
runAsNonRoot: true
runAsUser: 10000
allowPrivilegeEscalation: false
service:
type: ClusterIP
port: 8080
dataDir: /srv/trilium-data
persistentVolume:
enabled: true
existingClaim: trilium-data-nfs
accessModes:
- ReadWriteOnce
annotations: {}
size: 2Gi
ingress:
enabled: false
annotations: {}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
hosts:
- host: chart-example.local
paths: []
tls: []
# - secretName: chart-example-tls
# hosts:
# - chart-example.local
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
nodeSelector: {}
tolerations: []
affinity: {}

View File

@ -1,95 +0,0 @@
---
apiVersion: v1
kind: PersistentVolume
metadata:
namespace: webtop
name: webtop-data-nfs
labels:
directory: webtop
spec:
storageClassName: fast
capacity:
storage: "15Gi"
volumeMode: Filesystem
accessModes:
- ReadWriteOnce
nfs:
path: /webtop
server: 10.43.239.43 # assigned to nfs-server service. Won't change as long as service is not redeployed
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
namespace: webtop
name: webtop-data-nfs
spec:
storageClassName: fast
accessModes:
- ReadWriteOnce
resources:
requests:
storage: "15Gi"
selector:
matchLabels:
directory: webtop
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: webtop
namespace: webtop
labels:
app: webtop
spec:
replicas: 1
selector:
matchLabels:
app: webtop
template:
metadata:
labels:
app: webtop
spec:
containers:
- name: webtop
image: lscr.io/linuxserver/webtop:ubuntu-mate
tty: true
volumeMounts:
- mountPath: /config
name: webtop-data-nfs
env:
- name: PUID
value: "1000"
- name: PGID
value: "1000"
- name: TZ
value: "Europe/Berlin"
ports:
- containerPort: 3000
volumes:
- name: webtop-data-nfs
persistentVolumeClaim:
claimName: webtop-data-nfs
---
apiVersion: v1
kind: Service
metadata:
name: webtop
namespace: webtop
spec:
ports:
- name: webtop
port: 3000
selector:
app: webtop