diff --git a/infrastructure/backup/base/cronjob.yaml b/infrastructure/backup/base/cronjob.yaml index 8198575..524ec38 100644 --- a/infrastructure/backup/base/cronjob.yaml +++ b/infrastructure/backup/base/cronjob.yaml @@ -21,10 +21,14 @@ spec: command: ["curl"] args: - "-H" - - "Title: $(cat /proc/sys/kernel/hostname)" + - "Title: $OPERATION" - "-d" - - "Restic operation to gdrive finished" + - "Finished successfully" - "https://ntfy.kluster.moll.re/backup" + env: + - name: OPERATION + value: "PLACEHOLDER" + initContainers: - name: restic-base-container image: restic/restic:latest diff --git a/infrastructure/backup/overlays/backup/restic-commands.yaml b/infrastructure/backup/overlays/backup/restic-commands.yaml index 1db68bb..d708333 100644 --- a/infrastructure/backup/overlays/backup/restic-commands.yaml +++ b/infrastructure/backup/overlays/backup/restic-commands.yaml @@ -22,4 +22,9 @@ spec: --exclude=s3/ && restic - list snapshots \ No newline at end of file + list snapshots + containers: + - name: ntfy-command-send + env: + - name: OPERATION + value: "Restic backup to gdrive" \ No newline at end of file diff --git a/infrastructure/backup/overlays/prune/restic-commands.yaml b/infrastructure/backup/overlays/prune/restic-commands.yaml index 636b57a..89a304d 100644 --- a/infrastructure/backup/overlays/prune/restic-commands.yaml +++ b/infrastructure/backup/overlays/prune/restic-commands.yaml @@ -22,3 +22,8 @@ spec: --verbose=2 --keep-daily 7 --keep-weekly 5 --prune + containers: + - name: ntfy-command-send + env: + - name: OPERATION + value: "Restic prune on gdrive" \ No newline at end of file diff --git a/infrastructure/backup/restic-rclone.env b/infrastructure/backup/restic-rclone.env deleted file mode 100644 index 730298b..0000000 --- a/infrastructure/backup/restic-rclone.env +++ /dev/null @@ -1,2 +0,0 @@ -export RESTIC_REPOSITORY=rest:http://127.0.0.1:8000/kluster -export RESTIC_PASSWORD="2r,TE0.,U@gni3e%xr)_LC64" \ No newline at end of file diff --git a/infrastructure/backup/secrets/restic-password.sealedsecret.yaml b/infrastructure/backup/secrets/restic-password.sealedsecret.yaml index 9b4fc59..9bbc301 100644 --- a/infrastructure/backup/secrets/restic-password.sealedsecret.yaml +++ b/infrastructure/backup/secrets/restic-password.sealedsecret.yaml @@ -16,7 +16,7 @@ "type": "Opaque" }, "encryptedData": { - "restic-password": "AgB5b+dgVUtVo5QPPFBYWuqNZd7vMSbTapVh7SIl1ogy+/WWpzDDSgsPvki2Qtxv11tljTQkhkhonil0aYcHgA/4LDEt9yuvB2SNpEkl+C2N5NO1Fn3sUtaDRVBT/eaUhEjXTlRN6XiYfDRgNwBVpH3AUwKHa3dqxCJ6fQazUEhn3Xymxpo/GZkScf5k+fhkBi2/YnzM9Kdl3C9r8Ekw1eg2Pan7KSkFRk1rkGuDJKdhsYBrmu632yU7x8no7rGAIzxYJpDYqJnXp6Y3nUTBMpwNibOszAwTdP+ShgnILSmi0izZrIzvvvwAjNu6hKrhPlcTx3ZA2NrClRYtXx1gqCgVmGRzonqLuzVnWzd2efWDHkb8S0QnYjN0aAeAvR6x77TdIm8b9WMhAqeheikXh9zrrB4GqLoSxpkgmRezJFBVQ01vZrjkpu+KHHfVZqKg+3ChfjdbS4CQoK7IgUSguAifOaHH6Kb81LsraKKCDEr5vynHKL6jsuvgOPSkgPtzxasQQGR7CDNHnvm1ekFTbDpQ2KyHl3Ep5LCqlB79RhuPytprePHxQJa3qxv/EEIL8zxV1qhAvKa55RQIV3pLqDbPrj9EbD7LAYMbmIsFg3nSbZZaZV55gATN4PX86EZCIaC5/WTXSNIEmyedpcyhSzCZvpQOVjWggtCzUDsgCBADmmFAtuqd/POXXNbgPprEOJXyCdLvbL9cp3lG01sw+gqq08RqPlhKn5Q=" + "restic-password": "AgApoTzU3zoGZPcDxXwtF8/ZsPlzZH+WvL2E3yImCwfJQm/mMp4cLWqupn6mJLIJhewduM/9EWI/Yfe194Y6oY1wFlZChADa17nZbaWrbSlVAe5pNnkWUs/oB8D11Hnbw6fom5TnVjDxejG7GuitIVyVbpjETDrS7tCE2zhR1Xt3CjlpcM6BOcfCA/GOySixWGhN8CQlx9ujiQkofFZdB7GomcNbBlwo1SOc61un7casWPfGUEORObIQGqAc7APcUovpY/66iJ0IzZG9y+LNnnKgp2Vr91Oek2/S02S5HU+L5J6UO1KtAcCJGL4N0fhoHgstUqvycV4Aup24J3qnruzz1D83doghuHJEWx52wlMGiTNyK4ZCqaA19d6dFgpVNKiR/g0Qd/hx65/K4+U0nqm0GXCMmjUi3E0rx5u3KI2hUTwiffAPcfRZR7YBgFLAB1Gm+vanh23Qp2kdEojM63I/E35XD5ibr+HiEP6zFLgZw8KI4uJMxvCRiDFlYyQ3dXqTsi8sg7AzMwTtLsTAAmTCZdoD54SYYeLJ6nVW6FPznGF6CmMG/2ZpApZS8hsBsPclf3yipxON30dFmrDXDax9OSBPseucpZsJN2Vjikpuv5Lst2b+vL7/Bq/cfho58zFRHyc42G4xe5N+Evzv6jb/MaFWKmV5mBkMYdLZ0ANgFjrTXhI0T9zZTcgj4ohmHg7Z8v+1DgOgvAI6jo/kQSp4zJi69Q==" } } } diff --git a/unused/aio.deployment.yaml b/unused/aio.deployment.yaml deleted file mode 100644 index ab6bcbd..0000000 --- a/unused/aio.deployment.yaml +++ /dev/null @@ -1,34 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: aio - namespace: aio - labels: - app: aio -spec: - replicas: 1 - selector: - matchLabels: - app: aio - template: - metadata: - labels: - app: aio - spec: - containers: - - name: aio - image: mollre/aio:latest - tty: true - volumeMounts: - - mountPath: /keys/ - name: aio-nfs - resources: - requests: - memory: "250Mi" - cpu: 0.5 - - - volumes: - - name: aio-nfs - persistentVolumeClaim: - claimName: aio-nfs diff --git a/unused/aio.pvc.yaml b/unused/aio.pvc.yaml deleted file mode 100644 index aa4f30b..0000000 --- a/unused/aio.pvc.yaml +++ /dev/null @@ -1,34 +0,0 @@ ---- -apiVersion: v1 -kind: PersistentVolume -metadata: - namespace: aio - name: "aio-nfs" - labels: - directory: "aio" -spec: - storageClassName: fast - capacity: - storage: "100Mi" - accessModes: - - ReadWriteOnce - nfs: - path: /aio - server: 10.43.239.43 # assigned to nfs-server service. Won't change as long as service is not redeployed - ---- -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - namespace: aio - name: "aio-nfs" -spec: - storageClassName: "fast" - accessModes: - - ReadWriteOnce - resources: - requests: - storage: "100Mi" - selector: - matchLabels: - directory: "aio" diff --git a/unused/anki/deployment.yaml b/unused/anki/deployment.yaml deleted file mode 100644 index b2bed6e..0000000 --- a/unused/anki/deployment.yaml +++ /dev/null @@ -1,114 +0,0 @@ -apiVersion: v1 -kind: Namespace -metadata: - name: anki - ---- - -apiVersion: apps/v1 -kind: Deployment -metadata: - name: anki - namespace: anki - labels: - app: anki -spec: - replicas: 1 - selector: - matchLabels: - app: anki - template: - metadata: - labels: - app: anki - spec: - containers: - - name: anki-server - image: ankicommunity/anki-sync-server:20220516 - tty: true - volumeMounts: - - mountPath: /app/data - name: anki-data-nfs - resources: - requests: - memory: "250Mi" - cpu: 0.5 - nodeSelector: - kubernetes.io/arch: amd64 - - - volumes: - - name: anki-data-nfs - persistentVolumeClaim: - claimName: anki-data-nfs - ---- -apiVersion: v1 -kind: Service -metadata: - name: anki-http - namespace: anki -spec: - selector: - app: anki - ports: - - protocol: TCP - port: 27701 - targetPort: 27701 - type: ClusterIP - ---- -apiVersion: v1 -kind: PersistentVolume -metadata: - namespace: anki - name: "anki-data-nfs" - labels: - directory: "anki" -spec: - storageClassName: fast - capacity: - storage: "100Mi" - accessModes: - - ReadWriteOnce - nfs: - path: /anki - server: 10.43.239.43 # assigned to nfs-server service. Won't change as long as service is not redeployed - ---- -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - namespace: anki - name: "anki-data-nfs" -spec: - storageClassName: "fast" - accessModes: - - ReadWriteOnce - resources: - requests: - storage: "100Mi" - selector: - matchLabels: - directory: "anki" - - ---- -apiVersion: traefik.containo.us/v1alpha1 -kind: IngressRoute -metadata: - name: anki-ingress - namespace: anki -spec: - entryPoints: - - websecure - routes: - - match: Host(`anki.kluster.moll.re`) - kind: Rule - services: - - name: anki-http - port: 27701 - tls: - certResolver: default-tls - - diff --git a/unused/anonaddy.values.yaml b/unused/anonaddy.values.yaml deleted file mode 100644 index 50473bf..0000000 --- a/unused/anonaddy.values.yaml +++ /dev/null @@ -1,92 +0,0 @@ -# -# IMPORTANT NOTE -# -# This chart inherits from our common library chart. You can check the default values/options here: -# https://github.com/k8s-at-home/library-charts/tree/main/charts/stable/common/values.yaml -# - -image: - # -- image repository - repository: anonaddy/anonaddy - # -- image tag - tag: 0.11.2 - # -- image pull policy - pullPolicy: IfNotPresent - -strategy: - type: Recreate - -# -- environment variables. See more environment variables in the [anonaddy documentation](https://github.com/anonaddy/docker#environment-variables). -# @default -- See below -env: - TZ: "Europe/Berlin" - # -- Application key for encrypter service - # You can generate one through `anonaddy key:generate --show` or `echo "base64:$(openssl rand -base64 32)"` - APP_KEY: - # -- Root domain to receive email from - ANONADDY_DOMAIN: anonaddy.kluster.moll.re - # -- Long random string used when hashing data for the anonymous replies - ANONADDY_SECRET: - -# -- Configures service settings for the chart. -# @default -- See values.yaml -service: - main: - ports: - http: - port: 8000 - smtp: - enabled: true - port: 25 - type: LoadBalancer - -ingress: - # -- Enable and configure ingress settings for the chart under this key. - # @default -- See values.yaml - main: - enabled: true - annotations: - kubernetes.io/ingress.class: nginx - cert-manager.io/cluster-issuer: cloudflare-letsencrypt-prod - hosts: - - host: anonaddy.kluster.moll.re - paths: - - path: / - pathType: Prefix - service: - port: 8000 - tls: - - hosts: - - anonaddy.kluster.moll.re - secretName: cloudflare-letsencrypt-issuer-account-key - -# -- Configure persistence settings for the chart under this key. -# @default -- See values.yaml -persistence: - config: - enabled: false - emptydir: - enabled: false - -# https://github.com/bitnami/charts/tree/master/bitnami/mariadb/#installing-the-chart -mariadb: - enabled: true - image: - name: arm64v8/mariadb:latest - pullSecrets: [] - # primary: - # persistence: - # enabled: true - # auth: - # username: "username" - # password: "password" - # database: database - -# -- Enable and configure redis subchart under this key. -# For more options see [redis chart documentation](https://github.com/bitnami/charts/tree/master/bitnami/redis) -# @default -- See values.yaml -redis: - enabled: false - # auth: - # enabled: false - diff --git a/unused/archive.deployment.yaml b/unused/archive.deployment.yaml deleted file mode 100644 index c108d49..0000000 --- a/unused/archive.deployment.yaml +++ /dev/null @@ -1,119 +0,0 @@ -apiVersion: v1 -kind: Namespace -metadata: - name: archive - labels: - app: archive - ---- - -apiVersion: v1 -kind: PersistentVolume -metadata: - namespace: archive - name: archive-data-nfs - labels: - directory: archive -spec: - storageClassName: fast - capacity: - storage: "100Gi" - volumeMode: Filesystem - accessModes: - - ReadWriteOnce - nfs: - path: /helbing_archive - server: 10.43.239.43 # assigned to nfs-server service. Won't change as long as service is not redeployed ---- -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - namespace: archive - name: archive-data-nfs -spec: - storageClassName: fast - accessModes: - - ReadWriteOnce - resources: - requests: - storage: "100Gi" - selector: - matchLabels: - directory: archive - ---- - -apiVersion: apps/v1 -kind: Deployment -metadata: - name: archive - namespace: archive - labels: - app: archive -spec: - replicas: 1 - selector: - matchLabels: - app: archive - template: - metadata: - labels: - app: archive - spec: - containers: - - name: archive - image: archivebox/archivebox - tty: true - ports: - - containerPort: 8000 - volumeMounts: - - mountPath: /data - name: archive-data - - - volumes: - - name: archive-data - persistentVolumeClaim: - claimName: archive-data-nfs - ---- -apiVersion: v1 -kind: Service -metadata: - name: archive - namespace: archive - -spec: - type: ClusterIP - ports: - - name: http - port: 8000 - selector: - app: archive - ---- - -kind: Ingress -apiVersion: networking.k8s.io/v1 -metadata: - namespace: archive - name: archive-ingress - annotations: - kubernetes.io/ingress.class: nginx - cert-manager.io/cluster-issuer: cloudflare-letsencrypt-prod -spec: - tls: - - hosts: - - archive.kluster.moll.re - secretName: cloudflare-letsencrypt-issuer-account-key - rules: - - host: archive.kluster.moll.re - http: - paths: - - backend: - service: - name: archive - port: - number: 8000 - path: / - pathType: Prefix \ No newline at end of file diff --git a/unused/authelia/pvc.yaml b/unused/authelia/pvc.yaml deleted file mode 100644 index 5a6ba53..0000000 --- a/unused/authelia/pvc.yaml +++ /dev/null @@ -1,34 +0,0 @@ ---- -apiVersion: v1 -kind: PersistentVolume -metadata: - namespace: authelia - name: authelia-config-nfs - labels: - directory: authelia -spec: - storageClassName: fast - capacity: - storage: "1Gi" - volumeMode: Filesystem - accessModes: - - ReadWriteOnce - nfs: - path: /authelia - server: 10.43.239.43 # assigned to nfs-server service. Won't change as long as service is not redeployed ---- -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - namespace: authelia - name: authelia-config-nfs -spec: - storageClassName: fast - accessModes: - - ReadWriteOnce - resources: - requests: - storage: "1Gi" - selector: - matchLabels: - directory: authelia \ No newline at end of file diff --git a/unused/authelia/values.yaml b/unused/authelia/values.yaml deleted file mode 100644 index 2546247..0000000 --- a/unused/authelia/values.yaml +++ /dev/null @@ -1,1235 +0,0 @@ ---- -## @formatter:off -## values.yaml -## -## Repository: authelia https://charts.authelia.com -## Chart: authelia -## -## This values file is designed for full deployment, eventually for in production once the chart makes it to 1.0.0. -## It uses the following providers: -## - authentication: LDAP -## - storage: MySQL -## - session: redis - -## Version Override allows changing some chart characteristics that render only on specific versions. -## This does NOT affect the image used, please see the below image section instead for this. -## If this value is not specified, it's assumed the appVersion of the chart is the version. -## The format of this value is x.x.x, for example 4.100.0. -## -## Important Points: -## - No guarantees of support for prior versions is given. The chart is intended to be used with the AppVersion. -## - Does not and will not support any version prior to 4.30.0 due to a significant refactor of the configuration -## system. -versionOverride: "" - -## Image Parameters -## ref: https://hub.docker.com/r/authelia/authelia/tags/ -## -image: - # registry: docker.io - registry: ghcr.io - repository: authelia/authelia - tag: "" - pullPolicy: IfNotPresent - pullSecrets: [] - # pullSecrets: - # - myPullSecretName - -# nameOverride: authelia-deployment-name -# appNameOverride: authelia - -## -## extra labels/annotations applied to all resources -## -annotations: {} -# annotations: -# myAnnotation: myValue - -labels: {} -# labels: -# myLabel: myValue - -## -## RBAC Configuration. -## -rbac: - - ## Enable RBAC. Turning this on associates Authelia with a service account. - ## If the vault injector is enabled, then RBAC must be enabled. - enabled: false - - annotations: {} - labels: {} - - serviceAccountName: authelia - - -## Authelia Domain -## Should be the root domain you want to protect. -## For example if you have apps app1.example.com and app2.example.com it should be example.com -## This affects the ingress (partially sets the domain used) and configMap. -## Authelia must be served from the domain or a subdomain under it. -domain: kluster.moll.re - -service: - annotations: {} - # annotations: - # myAnnotation: myValue - - labels: {} - # labels: - # myLabel: myValue - - port: 80 - - # clusterIP: - - -ingress: - enabled: true - - annotations: - kubernetes.io/ingress.class: nginx - cert-manager.io/cluster-issuer: cloudflare-letsencrypt-prod - - labels: {} - # labels: - # myLabel: myValue - - certManager: false - rewriteTarget: true - - ## The Ingress Class Name. - # className: ingress-nginx - - ## Subdomain is the only thing required since we specify the domain as part of the root values of the chart. - ## Example: To get Authelia to listen on https://auth.example.com specify 'auth' for ingress.subdomain, - ## and specify example.com for the domain. - subdomain: auth - - tls: - enabled: true - secretName: cloudflare-letsencrypt-issuer-account-key - hosts: - - auth.kluster.moll.re - - # hostNameOverride: - - traefikCRD: - enabled: false - - ## Use a standard Ingress object, not an IngressRoute. - disableIngressRoute: false - - # matchOverride: Host(`auth.example.com`) && PathPrefix(`/`) - - entryPoints: [] - # entryPoints: - # - http - - # priority: 10 - - # weight: 10 - - sticky: false - - # stickyCookieNameOverride: authelia_traefik_lb - - # strategy: RoundRobin - - # responseForwardingFlushInterval: 100ms - - middlewares: - auth: - # nameOverride: authelia-auth - authResponseHeaders: - - Remote-User - - Remote-Name - - Remote-Email - - Remote-Groups - - chains: - auth: - # nameOverride: authelia-auth-chain - - # List of Middlewares to apply before the forwardAuth Middleware in the authentication chain. - before: [] - # before: - # - name: extra-middleware-name - # namespace: default - - # List of Middlewares to apply after the forwardAuth Middleware in the authentication chain. - after: [] - # after: - # - name: extra-middleware-name - # namespace: default - - ingressRoute: - - # List of Middlewares to apply before the middleware in the IngressRoute chain. - before: [] - # before: - # - name: extra-middleware-name - # namespace: default - - # List of Middlewares to apply after the middleware in the IngressRoute chain. - after: [] - # after: - # - name: extra-middleware-name - # namespace: default - - # Specific options for the TraefikCRD TLS configuration. The above TLS section is still used. - tls: - ## Disables inclusion of the IngressRoute TLSOptions. - disableTLSOptions: false - # existingOptions: - # name: default-traefik-options - # namespace: default - # certResolver: default - # sans: - # - *.example.com - # - options: - # nameOverride: authelia-tls-options - nameOverride: "" - - minVersion: VersionTLS12 - maxVersion: VersionTLS13 - sniStrict: false - cipherSuites: - - TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 - - TLS_RSA_WITH_AES_256_GCM_SHA384 - curvePreferences: [] - # curvePreferences: - # - CurveP521 - # - CurveP384 - -pod: - # Must be Deployment, DaemonSet, or StatefulSet. - kind: DaemonSet - - annotations: {} - # annotations: - # myAnnotation: myValue - - labels: {} - # labels: - # myLabel: myValue - - replicas: 1 - revisionHistoryLimit: 5 - - strategy: - type: RollingUpdate - # rollingUpdate: - # partition: 1 - # maxSurge: 25% - # maxUnavailable: 25% - - securityContext: - container: {} - # container: - # runAsUser: 2000 - # runAsGroup: 2000 - # fsGroup: 2000 - pod: {} - # pod: - # readOnlyRootFilesystem: true - # allowPrivilegeEscalation: false - # privileged: false - - tolerations: [] - # tolerations: - # - key: key1 - # operator: Equal - # value: value1 - # effect: NoSchedule - # tolerationSeconds: 3600 - - selectors: - # nodeName: worker-1 - - nodeSelector: {} - # nodeSelector: - # disktype: ssd - # kubernetes.io/hostname: worker-1 - - affinity: - nodeAffinity: {} - # nodeAffinity: - # requiredDuringSchedulingIgnoredDuringExecution: - # nodeSelectorTerms: - # - matchExpressions: - # - key: kubernetes.io/hostname - # operator: In - # values: - # - worker-1 - # - worker-2 - # preferredDuringSchedulingIgnoredDuringExecution: - # - weight: 1 - # preference: - # matchExpressions: - # - key: node-label-key - # operator: NotIn - # values: - # - not-this - podAffinity: {} - # podAffinity: - # requiredDuringSchedulingIgnoredDuringExecution: - # - labelSelector: - # matchExpressions: - # - key: security - # operator: In - # values: - # - S1 - # topologyKey: topology.kubernetes.io/zone - podAntiAffinity: {} - # podAntiAffinity: - # preferredDuringSchedulingIgnoredDuringExecution: - # - weight: 100 - # podAffinityTerm: - # labelSelector: - # matchExpressions: - # - key: security - # operator: In - # values: - # - S2 - # topologyKey: topology.kubernetes.io/zone - - env: [] - # env: - # - name: TZ - # value: Australia/Melbourne - - resources: - limits: {} - # limits: - # cpu: "4.00" - # memory: 125Mi - requests: {} - # requests: - # cpu: "0.25" - # memory: 50Mi - - probes: - method: - httpGet: - path: /api/health - port: http - scheme: HTTP - - liveness: - initialDelaySeconds: 0 - periodSeconds: 30 - timeoutSeconds: 5 - successThreshold: 1 - failureThreshold: 5 - - readiness: - initialDelaySeconds: 0 - periodSeconds: 5 - timeoutSeconds: 5 - successThreshold: 1 - failureThreshold: 5 - - ## Note: Startup Probes are a Kubernetes feature gate which must be manually enabled pre-1.18. - ## Ref: https://kubernetes.io/docs/reference/command-line-tools-reference/feature-gates/ - startup: - initialDelaySeconds: 10 - periodSeconds: 5 - timeoutSeconds: 5 - successThreshold: 1 - failureThreshold: 6 - - extraVolumeMounts: [] - extraVolumes: [] - - -## -## Authelia Config Map Generator -## -configMap: - - # Enable the configMap source for the Authelia config. - # If this is false you need to provide a volumeMount via PV/PVC or other means that mounts to /config. - enabled: true - - annotations: {} - # annotations: - # myAnnotation: myValue - - labels: {} - # labels: - # myLabel: myValue - - key: configuration.yaml - - existingConfigMap: "" - - ## - ## Server Configuration - ## - server: - ## - ## Port sets the configured port for the daemon, service, and the probes. - ## Default is 9091 and should not need to be changed. - ## - port: 9091 - - ## Set the single level path Authelia listens on. - ## Must be alphanumeric chars and should not contain any slashes. - path: "" - - ## Set the path on disk to Authelia assets. - ## Useful to allow overriding of specific static assets. - # asset_path: /config/assets/ - asset_path: "" - - ## Customize Authelia headers. - headers: - ## Read the Authelia docs before setting this advanced option. - ## https://www.authelia.com/configuration/miscellaneous/server/#csp_template. - csp_template: "" - - ## Buffers usually should be configured to be the same value. - ## Explanation at https://www.authelia.com/configuration/miscellaneous/server/ - ## Read buffer size adjusts the server's max incoming request size in bytes. - ## Write buffer size does the same for outgoing responses. - read_buffer_size: 4096 - write_buffer_size: 4096 - - log: - ## Level of verbosity for logs: info, debug, trace. - level: info - - ## Format the logs are written as: json, text. - format: text - - ## TODO: Statefulness check should check if this is set, and the configMap should enable it. - ## File path where the logs will be written. If not set logs are written to stdout. - # file_path: /config/authelia.log - file_path: "" - - ## - ## Telemetry Configuration - ## - telemetry: - - ## - ## Metrics Configuration - ## - metrics: - ## Enable Metrics. - enabled: false - - ## The port to listen on for metrics. This should be on a different port to the main server.port value. - port: 9959 - - serviceMonitor: - enabled: false - annotations: {} - labels: {} - - ## Default redirection URL - ## - ## If user tries to authenticate without any referer, Authelia does not know where to redirect the user to at the end - ## of the authentication process. This parameter allows you to specify the default redirection URL Authelia will use - ## in such a case. - ## - ## Note: this parameter is optional. If not provided, user won't be redirected upon successful authentication. - ## Default is https://www. (value at the top of the values.yaml). - default_redirection_url: "" - # default_redirection_url: https://example.com - - ## Set the default 2FA method for new users and for when a user has a preferred method configured that has been - ## disabled. This setting must be a method that is enabled. - ## Options are totp, webauthn, mobile_push. - default_2fa_method: "totp" - - theme: light - - ## - ## TOTP Configuration - ## - ## Parameters used for TOTP generation. - totp: - ## Disable TOTP. - disable: false - - ## The issuer name displayed in the Authenticator application of your choice. - ## Defaults to . - issuer: "" - - ## The TOTP algorithm to use. - ## It is CRITICAL you read the documentation before changing this option: - ## https://www.authelia.com/configuration/second-factor/time-based-one-time-password/#algorithm - algorithm: sha1 - - ## The number of digits a user has to input. Must either be 6 or 8. - ## Changing this option only affects newly generated TOTP configurations. - ## It is CRITICAL you read the documentation before changing this option: - ## https://www.authelia.com/configuration/second-factor/time-based-one-time-password/#digits - digits: 6 - - ## The period in seconds a one-time password is valid for. - ## Changing this option only affects newly generated TOTP configurations. - period: 30 - - ## The skew controls number of one-time passwords either side of the current one that are valid. - ## Warning: before changing skew read the docs link below. - ## See: https://www.authelia.com/configuration/second-factor/time-based-one-time-password/#input-validation to read the documentation. - skew: 1 - - ## The size of the generated shared secrets. Default is 32 and is sufficient in most use cases, minimum is 20. - secret_size: 32 - - ## - ## WebAuthn Configuration - ## - ## Parameters used for WebAuthn. - webauthn: - ## Disable Webauthn. - disable: false - - ## Adjust the interaction timeout for Webauthn dialogues. - timeout: 60s - - ## The display name the browser should show the user for when using Webauthn to login/register. - display_name: Authelia - - ## Conveyance preference controls if we collect the attestation statement including the AAGUID from the device. - ## Options are none, indirect, direct. - attestation_conveyance_preference: indirect - - ## User verification controls if the user must make a gesture or action to confirm they are present. - ## Options are required, preferred, discouraged. - user_verification: preferred - - - - ## - ## Authentication Backend Provider Configuration - ## - ## Used for verifying user passwords and retrieve information such as email address and groups users belong to. - ## - ## The available providers are: `file`, `ldap`. You must use one and only one of these providers. - authentication_backend: - - ## Password Reset Options. - password_reset: - - ## Disable both the HTML element and the API for reset password functionality - disable: false - - ## External reset password url that redirects the user to an external reset portal. This disables the internal reset - ## functionality. - custom_url: "" - - ## The amount of time to wait before we refresh data from the authentication backend. Uses duration notation. - ## To disable this feature set it to 'disable', this will slightly reduce security because for Authelia, users will - ## always belong to groups they belonged to at the time of login even if they have been removed from them in LDAP. - ## To force update on every request you can set this to '0' or 'always', this will increase processor demand. - ## See the below documentation for more information. - ## Duration Notation docs: https://www.authelia.com/configuration/prologue/common/#duration-notation-format - ## Refresh Interval docs: https://www.authelia.com/configuration/first-factor/ldap/#refresh-interval - refresh_interval: 5m - - ## LDAP backend configuration. - ## - ## This backend allows Authelia to be scaled to more - ## than one instance and therefore is recommended for - ## production. - ldap: - enabled: false - - ## File (Authentication Provider) - ## - ## With this backend, the users database is stored in a file which is updated when users reset their passwords. - ## Therefore, this backend is meant to be used in a dev environment and not in production since it prevents Authelia - ## to be scaled to more than one instance. The options under 'password' have sane defaults, and as it has security - ## implications it is highly recommended you leave the default values. Before considering changing these settings - ## please read the docs page: https://www.authelia.com/reference/guides/passwords/#tuning - ## - ## Important: Kubernetes (or HA) users must read https://www.authelia.com/overview/authorization/statelessness/ - ## - file: - enabled: true - path: /config/users_database.yml - password: - algorithm: argon2id - iterations: 1 - key_length: 32 - salt_length: 16 - memory: 1024 - parallelism: 8 - - ## - ## Password Policy Configuration. - ## - password_policy: - - ## The standard policy allows you to tune individual settings manually. - standard: - enabled: false - - ## Require a minimum length for passwords. - min_length: 8 - - ## Require a maximum length for passwords. - max_length: 0 - - ## Require uppercase characters. - require_uppercase: true - - ## Require lowercase characters. - require_lowercase: true - - ## Require numeric characters. - require_number: true - - ## Require special characters. - require_special: true - - ## zxcvbn is a well known and used password strength algorithm. It does not have tunable settings. - zxcvbn: - enabled: false - - ## Configures the minimum score allowed. - min_score: 0 - - ## - ## Access Control Configuration - ## - ## Access control is a list of rules defining the authorizations applied for one resource to users or group of users. - ## - ## If 'access_control' is not defined, ACL rules are disabled and the 'bypass' rule is applied, i.e., access is allowed - ## to anyone. Otherwise restrictions follow the rules defined. - ## - ## Note: One can use the wildcard * to match any subdomain. - ## It must stand at the beginning of the pattern. (example: *.mydomain.com) - ## - ## Note: You must put patterns containing wildcards between simple quotes for the YAML to be syntactically correct. - ## - ## Definition: A 'rule' is an object with the following keys: 'domain', 'subject', 'policy' and 'resources'. - ## - ## - 'domain' defines which domain or set of domains the rule applies to. - ## - ## - 'subject' defines the subject to apply authorizations to. This parameter is optional and matching any user if not - ## provided. If provided, the parameter represents either a user or a group. It should be of the form - ## 'user:' or 'group:'. - ## - ## - 'policy' is the policy to apply to resources. It must be either 'bypass', 'one_factor', 'two_factor' or 'deny'. - ## - ## - 'resources' is a list of regular expressions that matches a set of resources to apply the policy to. This parameter - ## is optional and matches any resource if not provided. - ## - ## Note: the order of the rules is important. The first policy matching (domain, resource, subject) applies. - access_control: - - ## Configure the ACL as a Secret instead of part of the ConfigMap. - secret: - - ## Enables the ACL section being generated as a secret. - enabled: false - - ## The key in the secret which contains the file to mount. - key: configuration.acl.yaml - - ## An existingSecret name, if configured this will force the secret to be mounted using the key above. - existingSecret: "" - - ## Default policy can either be 'bypass', 'one_factor', 'two_factor' or 'deny'. It is the policy applied to any - ## resource if there is no policy to be applied to the user. - default_policy: deny - - networks: [] - # networks: - # - name: private - # networks: - # - 10.0.0.0/8 - # - 172.16.0.0/12 - # - 192.168.0.0/16 - # - name: vpn - # networks: - # - 10.9.0.0/16 - - rules: [] - # rules: - # - domain_regex: '^.*\.example.com$' - # policy: bypass - # - domain: public.example.com - # policy: bypass - # - domain: "*.example.com" - # policy: bypass - # methods: - # - OPTIONS - # - domain: secure.example.com - # policy: one_factor - # networks: - # - private - # - vpn - # - 192.168.1.0/24 - # - 10.0.0.1 - # - domain: - # - secure.example.com - # - private.example.com - # policy: two_factor - # - domain: singlefactor.example.com - # policy: one_factor - # - domain: "mx2.mail.example.com" - # subject: "group:admins" - # policy: deny - # - domain: "*.example.com" - # subject: - # - "group:admins" - # - "group:moderators" - # policy: two_factor - # - domain: dev.example.com - # resources: - # - "^/groups/dev/.*$" - # subject: "group:dev" - # policy: two_factor - # - domain: dev.example.com - # resources: - # - "^/users/john/.*$" - # subject: - # - ["group:dev", "user:john"] - # - "group:admins" - # policy: two_factor - # - domain: "{user}.example.com" - # policy: bypass - - ## - ## Session Provider Configuration - ## - ## The session cookies identify the user once logged in. - ## The available providers are: `memory`, `redis`. Memory is the provider unless redis is defined. - session: - ## The name of the session cookie. (default: authelia_session). - name: authelia_session - - ## Sets the Cookie SameSite value. Possible options are none, lax, or strict. - ## Please read https://www.authelia.com/configuration/session/introduction/#same_site - same_site: lax - - ## The time in seconds before the cookie expires and session is reset. - expiration: 1h - - ## The inactivity time in seconds before the session is reset. - inactivity: 5m - - ## The remember me duration. - ## Value is in seconds, or duration notation. Value of 0 disables remember me. - ## See: https://www.authelia.com/configuration/prologue/common/#duration-notation-format - ## Longer periods are considered less secure because a stolen cookie will last longer giving attackers more time to - ## spy or attack. Currently the default is 1M or 1 month. - remember_me_duration: 1M - - ## - ## Redis Provider - ## - ## Important: Kubernetes (or HA) users must read https://www.authelia.com/overview/authorization/statelessness/ - ## - ## The redis connection details - redis: - enabled: false - - - ## - ## Regulation Configuration - ## - ## This mechanism prevents attackers from brute forcing the first factor. It bans the user if too many attempts are done - ## in a short period of time. - regulation: - ## The number of failed login attempts before user is banned. Set it to 0 to disable regulation. - max_retries: 3 - - ## The time range during which the user can attempt login before being banned. The user is banned if the - ## authentication failed 'max_retries' times in a 'find_time' seconds window. Find Time accepts duration notation. - ## See: https://www.authelia.com/configuration/prologue/common/#duration-notation-format - find_time: 2m - - ## The length of time before a banned user can login again. Ban Time accepts duration notation. - ## See: https://www.authelia.com/configuration/prologue/common/#duration-notation-format - ban_time: 5m - - - ## - ## Storage Provider Configuration - ## - ## The available providers are: `local`, `mysql`, `postgres`. You must use one and only one of these providers. - storage: - ## - ## Local (Storage Provider) - ## - ## This stores the data in a SQLite3 Database. - ## This is only recommended for lightweight non-stateful installations. - ## - ## Important: Kubernetes (or HA) users must read https://www.authelia.com/overview/authorization/statelessness/ - ## - local: - enabled: true - path: /config/db.sqlite3 - - ## - ## MySQL (Storage Provider) - ## - ## Also supports MariaDB - ## - mysql: - enabled: false - ## - ## PostgreSQL (Storage Provider) - ## - postgres: - enabled: false - - ## - ## Notification Provider - ## - ## - ## Notifications are sent to users when they require a password reset, a u2f registration or a TOTP registration. - ## The available providers are: filesystem, smtp. You must use one and only one of these providers. - notifier: - ## You can disable the notifier startup check by setting this to true. - disable_startup_check: false - - ## - ## File System (Notification Provider) - ## - ## Important: Kubernetes (or HA) users must read https://www.authelia.com/overview/authorization/statelessness/ - ## - filesystem: - enabled: true - filename: /config/notification.txt - - ## - ## SMTP (Notification Provider) - ## - ## Use a SMTP server for sending notifications. Authelia uses the PLAIN or LOGIN methods to authenticate. - ## [Security] By default Authelia will: - ## - force all SMTP connections over TLS including unauthenticated connections - ## - use the disable_require_tls boolean value to disable this requirement - ## (only works for unauthenticated connections) - ## - validate the SMTP server x509 certificate during the TLS handshake against the hosts trusted certificates - ## (configure in tls section) - smtp: - enabled: false - - identity_providers: - oidc: - ## Enables this in the config map. Currently in beta stage. - ## See https://www.authelia.com/r/openid-connect/ - enabled: false - - access_token_lifespan: 1h - authorize_code_lifespan: 1m - id_token_lifespan: 1h - refresh_token_lifespan: 90m - - ## Adjusts the PKCE enforcement. Options are always, public_clients_only, never. - ## For security reasons it's recommended this option is public_clients_only or always, however always is not - ## compatible with all clients. - enforce_pkce: public_clients_only - - ## Enables the plain PKCE challenge which is not recommended for security reasons but may be necessary for some clients. - enable_pkce_plain_challenge: false - - ## SECURITY NOTICE: It's not recommended changing this option, and highly discouraged to have it below 8 for - ## security reasons. - minimum_parameter_entropy: 8 - - ## Enables additional debug messages. - enable_client_debug_messages: false - - ## Cross-Origin Resource Sharing (CORS) settings. - cors: - ## List of endpoints in addition to the metadata endpoints to permit cross-origin requests on. - # endpoints: - # - authorization - # - token - # - revocation - # - introspection - # - userinfo - endpoints: [] - - ## List of allowed origins. - ## Any origin with https is permitted unless this option is configured or the - ## allowed_origins_from_client_redirect_uris option is enabled. - # allowed_origins: - # - https://example.com - allowed_origins: [] - - ## Automatically adds the origin portion of all redirect URI's on all clients to the list of allowed_origins, - ## provided they have the scheme http or https and do not have the hostname of localhost. - allowed_origins_from_client_redirect_uris: true - - clients: [] - # clients: - # - - ## The ID is the OpenID Connect ClientID which is used to link an application to a configuration. - # id: myapp - - ## The description to show to users when they end up on the consent screen. Defaults to the ID above. - # description: My Application - - ## The client secret is a shared secret between Authelia and the consumer of this client. - # secret: apple123 - - ## Sector Identifiers are occasionally used to generate pairwise subject identifiers. In most cases this is not - ## necessary. Read the documentation for more information. - ## The subject identifier must be the host component of a URL, which is a domain name with an optional port. - # sector_identifier: example.com - - ## Sets the client to public. This should typically not be set, please see the documentation for usage. - # public: false - - ## The policy to require for this client; one_factor or two_factor. - # authorization_policy: two_factor - - ## By default users cannot remember pre-configured consents. Setting this value to a period of time using a - ## duration notation will enable users to remember consent for this client. The time configured is the amount - ## of time the pre-configured consent is valid for granting new authorizations to the user. - # pre_configured_consent_duration: 30d - - ## Audience this client is allowed to request. - # audience: [] - - ## Scopes this client is allowed to request. - # scopes: - # - openid - # - profile - # - email - # - groups - - ## Redirect URI's specifies a list of valid case-sensitive callbacks for this client. - # redirect_uris: - # - https://oidc.example.com/oauth2/callback - - ## Grant Types configures which grants this client can obtain. - ## It's not recommended to configure this unless you know what you're doing. - # grant_types: - # - refresh_token - # - authorization_code - - ## Response Types configures which responses this client can be sent. - ## It's not recommended to configure this unless you know what you're doing. - # response_types: - # - code - - ## Response Modes configures which response modes this client supports. - ## It's not recommended to configure this unless you know what you're doing. - # response_modes: - # - form_post - # - query - # - fragment - - ## The algorithm used to sign userinfo endpoint responses for this client, either none or RS256. - # userinfo_signing_algorithm: none - -## -## Authelia Secret Generator. -## -## If both the values and existingSecret are not defined, this chart randomly generates a new secret on each -## install. It is recommended that you use something like sealed-secrets (https://github.com/bitnami-labs/sealed-secrets) -## and use the existingSecrets. All secrets can be stored in a single k8s secret if desired using the key option. -## -secret: - existingSecret: "" - # existingSecret: authelia - - annotations: {} - # annotations: - # myAnnotation: myValue - - labels: {} - # labels: - # myLabel: myValue - - mountPath: /secrets - - excludeVolumeAndMounts: false - - ## Secrets. - jwt: - key: JWT_TOKEN - value: "" - filename: JWT_TOKEN - ldap: - key: LDAP_PASSWORD - value: "" - filename: LDAP_PASSWORD - storage: - key: STORAGE_PASSWORD - value: "" - filename: STORAGE_PASSWORD - storageEncryptionKey: - key: STORAGE_ENCRYPTION_KEY - value: "" - filename: STORAGE_ENCRYPTION_KEY - session: - key: SESSION_ENCRYPTION_KEY - value: "" - filename: SESSION_ENCRYPTION_KEY - duo: - key: DUO_API_KEY - value: "" - filename: DUO_API_KEY - redis: - key: REDIS_PASSWORD - value: "" - filename: REDIS_PASSWORD - redisSentinel: - key: REDIS_SENTINEL_PASSWORD - value: "" - filename: REDIS_SENTINEL_PASSWORD - smtp: - key: SMTP_PASSWORD - value: "" - filename: SMTP_PASSWORD - oidcPrivateKey: - key: OIDC_PRIVATE_KEY - value: "" - filename: OIDC_PRIVATE_KEY - oidcHMACSecret: - key: OIDC_HMAC_SECRET - value: "" - filename: OIDC_HMAC_SECRET - - ## HashiCorp Vault Injector configuration. - vaultInjector: - - ## Enable the vault injector annotations. This will disable secret injection via other means. - ## To see the annotations and what they do see: https://www.vaultproject.io/docs/platform/k8s/injector/annotations - ## Annotations with a blank string do not get configured at all. - ## Additional annotations can be configured via the secret.annotations: {} above. - ## Secrets are by default rendered in the /secrets directory. Changing this can be done via editing the - ## secret.mountPath value. You can alter the filenames with the secret..filename values. - ## Secrets are loaded from vault path specified below with secrets..path values. Its format should be - ## :. - ## Secrets are by default rendered by template suitable for vault KV v1 or database secrets engines. If other used, - ## it can be overriden per each secret by specifying secrets..templateValue. For example for KV v2 - ## secrets engine would be '{{ with secret "" }}{{ .Data.data. }}{{ end }}'. - enabled: false - - ## The vault role to assign via annotations. - ## Annotation: vault.hashicorp.com/role - role: authelia - - agent: - ## Annotation: vault.hashicorp.com/agent-inject-status - status: update - - ## Annotation: vault.hashicorp.com/agent-configmap - configMap: "" - - ## Annotation: vault.hashicorp.com/agent-image - image: "" - - ## Annotation: vault.hashicorp.com/agent-init-first - initFirst: "false" - - ## Annotation: vault.hashicorp.com/agent-inject-command - command: "sh -c 'kill HUP $(pidof authelia)'" - - ## Annotation: vault.hashicorp.com/agent-run-as-same-user - runAsSameUser: "true" - - secrets: - jwt: - ## Vault Path to the Authelia JWT secret. - ## Annotation: vault.hashicorp.com/agent-inject-secret-jwt - path: secrets/authelia/jwt:token - - ## Vault template specific to JWT. - ## Annotation: vault.hashicorp.com/agent-inject-template-jwt - templateValue: "" - - ## Vault after render command specific to JWT. - ## Annotation: vault.hashicorp.com/agent-inject-command-jwt - command: "" - ldap: - ## Vault Path to the Authelia LDAP secret. - ## Annotation: vault.hashicorp.com/agent-inject-secret-ldap - path: secrets/authelia/ldap:password - - ## Vault template specific to LDAP. - ## Annotation: vault.hashicorp.com/agent-inject-template-ldap - templateValue: "" - - ## Vault after render command specific to LDAP. - ## Annotation: vault.hashicorp.com/agent-inject-command-ldap - command: "" - storage: - ## Vault Path to the Authelia storage password secret. - ## Annotation: vault.hashicorp.com/agent-inject-secret-storage - path: secrets/authelia/storage:password - - ## Vault template specific to the storage password. - ## Annotation: vault.hashicorp.com/agent-inject-template-storage - templateValue: "" - - ## Vault after render command specific to the storage password. - ## Annotation: vault.hashicorp.com/agent-inject-command-storage - command: "" - storageEncryptionKey: - ## Vault Path to the Authelia storage encryption key secret. - ## Annotation: vault.hashicorp.com/agent-inject-secret-storage-encryption-key - path: secrets/authelia/storage:encryption_key - - ## Vault template specific to the storage encryption key. - ## Annotation: vault.hashicorp.com/agent-inject-template-storage-encryption-key - templateValue: "" - - ## Vault after render command specific to the storage encryption key. - ## Annotation: vault.hashicorp.com/agent-inject-command-storage-encryption-key - command: "" - session: - ## Vault Path to the Authelia session secret. - ## Annotation: vault.hashicorp.com/agent-inject-secret-session - path: secrets/authelia/session:encryption_key - - ## Vault template specific to session. - ## Annotation: vault.hashicorp.com/agent-inject-template-session - templateValue: "" - - ## Vault after render command specific to session. - ## Annotation: vault.hashicorp.com/agent-inject-command-session - command: "" - duo: - ## Vault Path to the Authelia duo secret. - ## Annotation: vault.hashicorp.com/agent-inject-secret-duo - path: secrets/authelia/duo:api_key - - ## Vault template specific to duo. - ## Annotation: vault.hashicorp.com/agent-inject-template-duo - templateValue: "" - - ## Vault after render command specific to duo. - ## Annotation: vault.hashicorp.com/agent-inject-command-duo - command: "" - redis: - ## Vault Path to the Authelia redis secret. - ## Annotation: vault.hashicorp.com/agent-inject-secret-redis - path: secrets/authelia/redis:password - - ## Vault template specific to redis. - ## Annotation: vault.hashicorp.com/agent-inject-template-redis - templateValue: "" - - ## Vault after render command specific to redis. - ## Annotation: vault.hashicorp.com/agent-inject-command-redis - command: "" - redisSentinel: - ## Vault Path to the Authelia redis sentinel secret. - ## Annotation: vault.hashicorp.com/agent-inject-secret-redis-sentinel - path: secrets/authelia/redis_sentinel:password - - ## Vault template specific to redis sentinel. - ## Annotation: vault.hashicorp.com/agent-inject-template-redis-sentinel - templateValue: "" - - ## Vault after render command specific to redis sentinel. - ## Annotation: vault.hashicorp.com/agent-inject-command-redis-sentinel - command: "" - smtp: - ## Vault Path to the Authelia SMTP secret. - ## Annotation: vault.hashicorp.com/agent-inject-secret-smtp - path: secrets/authelia/smtp:password - - ## Vault template specific to SMTP. - ## Annotation: vault.hashicorp.com/agent-inject-template-smtp - templateValue: "" - - ## Vault after render command specific to SMTP. - ## Annotation: vault.hashicorp.com/agent-inject-command-smtp - command: "" - oidcPrivateKey: - ## Vault Path to the Authelia OIDC private key secret. - ## Annotation: vault.hashicorp.com/agent-inject-secret-oidc-private-key - path: secrets/authelia/oidc:private_key - - ## Vault template specific to OIDC private key. - ## Annotation: vault.hashicorp.com/agent-inject-template-oidc-private-key - templateValue: "" - - ## Vault after render command specific to OIDC private key. - ## Annotation: vault.hashicorp.com/agent-inject-command-oidc-private-key - command: "" - oidcHMACSecret: - ## Vault Path to the Authelia OIDC HMAC secret. - ## Annotation: vault.hashicorp.com/agent-inject-secret-oidc-hmac-secret - path: secrets/authelia/oidc:hmac_secret - - ## Vault template specific to OIDC HMAC secret. - ## Annotation: vault.hashicorp.com/agent-inject-template-oidc-hmac-secret - templateValue: "" - - ## Vault after render command specific to OIDC HMAC secret. - ## Annotation: vault.hashicorp.com/agent-inject-command-oidc-hmac-secret - command: "" - -certificates: - existingSecret: "" - # existingSecret: authelia - - annotations: {} - # annotations: - # myAnnotation: myValue - - labels: {} - # labels: - # myLabel: myValue - - values: [] - # values: - # - name: Example_Com_Root_Certificate_Authority_B64.pem - # secretValue: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURYekNDQWtlZ0F3SUJBZ0lMQkFBQUFBQUJJVmhUQ0tJd0RRWUpLb1pJaHZjTkFRRUxCUUF3VERFZ01CNEcKQTFVRUN4TVhSMnh2WW1Gc1UybG5iaUJTYjI5MElFTkJJQzBnVWpNeEV6QVJCZ05WQkFvVENrZHNiMkpoYkZOcApaMjR4RXpBUkJnTlZCQU1UQ2tkc2IySmhiRk5wWjI0d0hoY05NRGt3TXpFNE1UQXdNREF3V2hjTk1qa3dNekU0Ck1UQXdNREF3V2pCTU1TQXdIZ1lEVlFRTEV4ZEhiRzlpWVd4VGFXZHVJRkp2YjNRZ1EwRWdMU0JTTXpFVE1CRUcKQTFVRUNoTUtSMnh2WW1Gc1UybG5iakVUTUJFR0ExVUVBeE1LUjJ4dlltRnNVMmxuYmpDQ0FTSXdEUVlKS29aSQpodmNOQVFFQkJRQURnZ0VQQURDQ0FRb0NnZ0VCQU13bGRwQjVCbmdpRnZYQWc3YUV5aWllL1FWMkVjV3RpSEw4ClJnSkR4N0tLblFSZkpNc3VTK0ZnZ2tiaFVxc01nVWR3Yk4xazBldjFMS01QZ2owTUs2NlgxN1lVaGhCNXV6c1QKZ0hlTUNPRkowbXBpTHg5ZStwWm8zNGtubFRpZkJ0Yyt5Y3NtV1ExejNyREk2U1lPZ3hYRzcxdUwwZ1JneWttbQpLUFpwTy9iTHlDaVI1WjJLWVZjM3JIUVUzSFRnT3U1eUx5NmMrOUM3di9VOUFPRUdNK2lDSzY1VHBqb1djNHpkClFRNGdPc0MwcDZIcHNrK1FMakpnNlZmTHVRU1NhR2psT0NaZ2RiS2ZkLytSRk8rdUlFbjhyVUFWU05FQ01XRVoKWHJpWDc2MTN0MlNhZXI5ZndSUHZtMkw3RFd6Z1ZHa1dxUVBhYnVtRGszRjJ4bW1GZ2hjQ0F3RUFBYU5DTUVBdwpEZ1lEVlIwUEFRSC9CQVFEQWdFR01BOEdBMVVkRXdFQi93UUZNQU1CQWY4d0hRWURWUjBPQkJZRUZJL3dTMytvCkxrVWtyazFRK21PYWk5N2kzUnU4TUEwR0NTcUdTSWIzRFFFQkN3VUFBNElCQVFCTFFOdkFVS3IreUF6djk1WlUKUlVtN2xnQUpRYXl6RTRhR0tBY3p5bXZtZExtNkFDMnVwQXJUOWZIeEQ0cS9jMmRLZzhkRWUzamdyMjVzYndNcApqak01UmNPTzVMbFhiS3I4RXBic1U4WXQ1Q1JzdVpSais5eFRhR2RXUG9PNHp6VWh3OGxvL3M3YXdsT3F6SkNLCjZmQmRSb3lWM1hwWUtCb3ZIZDdOQURkQmorMUViZGRUS0pkKzgyY0VIaFhYaXBhMDA5NU1KNlJNRzNOemR2UVgKbWNJZmVnN2pMUWl0Q2h3cy96eXJWUTRQa1g0MjY4TlhTYjdoTGkxOFlJdkRRVkVUSTUzTzl6SnJsQUdvbWVjcwpNeDg2T3lYU2hrRE9PeXlHZU1saEx4UzY3dHRWYjkrRTdnVUpUYjBvMkhMTzAySlFaUjdya3BlRE1kbXp0Y3BICldEOWYKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQ== - # - name: Example_Com_Root_Certificate_Authority.pem - # value: | - # -----BEGIN CERTIFICATE----- - # MIIDXzCCAkegAwIBAgILBAAAAAABIVhTCKIwDQYJKoZIhvcNAQELBQAwTDEgMB4G - # A1UECxMXR2xvYmFsU2lnbiBSb290IENBIC0gUjMxEzARBgNVBAoTCkdsb2JhbFNp - # Z24xEzARBgNVBAMTCkdsb2JhbFNpZ24wHhcNMDkwMzE4MTAwMDAwWhcNMjkwMzE4 - # MTAwMDAwWjBMMSAwHgYDVQQLExdHbG9iYWxTaWduIFJvb3QgQ0EgLSBSMzETMBEG - # A1UEChMKR2xvYmFsU2lnbjETMBEGA1UEAxMKR2xvYmFsU2lnbjCCASIwDQYJKoZI - # hvcNAQEBBQADggEPADCCAQoCggEBAMwldpB5BngiFvXAg7aEyiie/QV2EcWtiHL8 - # RgJDx7KKnQRfJMsuS+FggkbhUqsMgUdwbN1k0ev1LKMPgj0MK66X17YUhhB5uzsT - # gHeMCOFJ0mpiLx9e+pZo34knlTifBtc+ycsmWQ1z3rDI6SYOgxXG71uL0gRgykmm - # KPZpO/bLyCiR5Z2KYVc3rHQU3HTgOu5yLy6c+9C7v/U9AOEGM+iCK65TpjoWc4zd - # QQ4gOsC0p6Hpsk+QLjJg6VfLuQSSaGjlOCZgdbKfd/+RFO+uIEn8rUAVSNECMWEZ - # XriX7613t2Saer9fwRPvm2L7DWzgVGkWqQPabumDk3F2xmmFghcCAwEAAaNCMEAw - # DgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFI/wS3+o - # LkUkrk1Q+mOai97i3Ru8MA0GCSqGSIb3DQEBCwUAA4IBAQBLQNvAUKr+yAzv95ZU - # RUm7lgAJQayzE4aGKAczymvmdLm6AC2upArT9fHxD4q/c2dKg8dEe3jgr25sbwMp - # jjM5RcOO5LlXbKr8EpbsU8Yt5CRsuZRj+9xTaGdWPoO4zzUhw8lo/s7awlOqzJCK - # 6fBdRoyV3XpYKBovHd7NADdBj+1EbddTKJd+82cEHhXXipa0095MJ6RMG3NzdvQX - # mcIfeg7jLQitChws/zyrVQ4PkX4268NXSb7hLi18YIvDQVETI53O9zJrlAGomecs - # Mx86OyXShkDOOyyGeMlhLxS67ttVb9+E7gUJTb0o2HLO02JQZR7rkpeDMdmztcpH - # WD9f - # -----END CERTIFICATE----- - -## -## Authelia Persistence Configuration. -## -## Useful in scenarios where you need persistent storage. -## Auth Provider Use Case: file; we recommend you use the ldap provider instead. -## Storage Provider Use Case: local; we recommend you use the mysql/mariadb or postgres provider instead. -## Configuration Use Case: when you want to manually configure the configuration entirely (set configMap.enabled = false). -## -persistence: - enabled: true - - annotations: {} - # annotations: - readOnly: false - - existingClaim: "authelia-config-nfs" - # existingClaim: my-claim-name - - storageClass: "" - # storageClass: "my-storage-class" - - accessModes: - - ReadWriteOnce - diff --git a/unused/authentik/ingress.yml b/unused/authentik/ingress.yml deleted file mode 100644 index decab01..0000000 --- a/unused/authentik/ingress.yml +++ /dev/null @@ -1,34 +0,0 @@ - -apiVersion: traefik.containo.us/v1alpha1 -kind: IngressRoute -metadata: - name: authentik-ingress - namespace: authentik -spec: - entryPoints: - - websecure - routes: - - match: Host(`authentik.kluster.moll.re`) - kind: Rule - middlewares: - - name: authentik-websocket - services: - - name: authentik - port: 80 - tls: - certResolver: default-tls - - ---- -apiVersion: traefik.containo.us/v1alpha1 -kind: Middleware -metadata: - name: authentik-websocket - namespace: authentik -spec: - headers: - customRequestHeaders: - X-Forwarded-Proto: "https" - Upgrade: "websocket" - - diff --git a/unused/authentik/pvc.yaml b/unused/authentik/pvc.yaml deleted file mode 100644 index e867991..0000000 --- a/unused/authentik/pvc.yaml +++ /dev/null @@ -1,37 +0,0 @@ ---- -apiVersion: v1 -kind: PersistentVolume -metadata: - namespace: authentik - name: authentik-postgres-nfs - labels: - directory: authentik -spec: - storageClassName: slow - capacity: - storage: "5Gi" - volumeMode: Filesystem - accessModes: - - ReadWriteOnce - nfs: - path: /export/kluster/authentik - server: 192.168.1.157 ---- -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - namespace: authentik - name: authentik-postgres-nfs -spec: - storageClassName: slow - accessModes: - - ReadWriteOnce - resources: - requests: - storage: "5Gi" - selector: - matchLabels: - directory: authentik - - - diff --git a/unused/authentik/values.yaml b/unused/authentik/values.yaml deleted file mode 100644 index 90cf0f8..0000000 --- a/unused/authentik/values.yaml +++ /dev/null @@ -1,172 +0,0 @@ -# -- Server replicas -replicas: 1 -# -- Custom priority class for different treatment by the scheduler -priorityClassName: -# -- server securityContext -securityContext: {} - -worker: - # -- worker replicas - replicas: 1 - # -- Custom priority class for different treatment by the scheduler - priorityClassName: - # -- worker securityContext - securityContext: {} - -image: - repository: ghcr.io/goauthentik/server - tag: 2023.4.1 - pullPolicy: IfNotPresent - pullSecrets: [] - -# -- See https://github.com/k8s-at-home/library-charts/tree/main/charts/stable/common#values -initContainers: {} - -# -- See https://github.com/k8s-at-home/library-charts/tree/main/charts/stable/common#values -additionalContainers: {} - -authentik: - # -- Log level for server and worker - log_level: info - # -- Secret key used for cookie singing and unique user IDs, - # don't change this after the first install - secret_key: "K9F5uNx1gzsk3q5tnjwFabBYgjBJcAv0qM135QRgzL81hRg4" - # -- Path for the geoip database. If the file doesn't exist, GeoIP features are disabled. - geoip: /geoip/GeoLite2-City.mmdb - # -- Mode for the avatars. Defaults to gravatar. Possible options 'gravatar' and 'none' - avatars: gravatar - - outposts: - # -- Template used for managed outposts. The following placeholders can be used - # %(type)s - the type of the outpost - # %(version)s - version of your authentik install - # %(build_hash)s - only for beta versions, the build hash of the image - container_image_base: ghcr.io/goauthentik/%(type)s:%(version)s - error_reporting: - # -- This sends anonymous usage-data, stack traces on errors and - # performance data to sentry.beryju.org, and is fully opt-in - enabled: false - # -- This is a string that is sent to sentry with your error reports - environment: "k8s" - # -- Send PII (Personally identifiable information) data to sentry - send_pii: false - - - postgresql: - # -- set the postgresql hostname to talk to - # if unset and .Values.postgresql.enabled == true, will generate the default - # @default -- `{{ .Release.Name }}-postgresql` - host: 'postgres-postgresql.postgres' - # -- postgresql Database name - # @default -- `authentik` - name: "authentik" - # -- postgresql Username - # @default -- `authentik` - user: "authentik" - password: "authentik" - port: 5432 - - - redis: - # -- set the redis hostname to talk to - # @default -- `{{ .Release.Name }}-redis-master` - host: '{{ .Release.Name }}-redis-master' - password: "" - -# -- see configuration options at https://goauthentik.io/docs/installation/configuration/ -env: {} -# AUTHENTIK_VAR_NAME: VALUE - -envFrom: [] -# - configMapRef: -# name: special-config - -envValueFrom: {} -# AUTHENTIK_VAR_NAME: -# secretKeyRef: -# key: password -# name: my-secret - -service: - # -- Service that is created to access authentik - enabled: true - type: ClusterIP - port: 80 - name: http - protocol: TCP - labels: {} - annotations: {} - -volumes: [] - -volumeMounts: [] - -# -- affinity applied to the deployments -affinity: {} - -# -- nodeSelector applied to the deployments - -resources: - server: {} - worker: {} - -# WARNING! When initially deploying, authentik has to do a few DB migrations. This may cause it to die from probe -# failure, but will continue on reboot. You can disable this during deployment if this is not desired -livenessProbe: - # -- enables or disables the livenessProbe - enabled: true - httpGet: - # -- liveness probe url path - path: /-/health/live/ - port: http - initialDelaySeconds: 50 - periodSeconds: 10 - -readinessProbe: - enabled: true - httpGet: - path: /-/health/ready/ - port: http - initialDelaySeconds: 50 - periodSeconds: 10 - -serviceAccount: - # -- Service account is needed for managed outposts - create: true - -prometheus: - serviceMonitor: - create: false - interval: 30s - scrapeTimeout: 3s - rules: - create: false - -geoip: - # -- optional GeoIP, deploys a cronjob to download the maxmind database - enabled: false - # -- sign up under https://www.maxmind.com/en/geolite2/signup - accountId: "" - # -- sign up under https://www.maxmind.com/en/geolite2/signup - licenseKey: "" - editionIds: "GeoLite2-City" - image: maxmindinc/geoipupdate:v4.8 - # -- number of hours between update runs - updateInterval: 8 - -postgresql: - # -- enable the bundled bitnami postgresql chart - enabled: false - postgresqlUsername: "authentik" - postgresqlPassword: "authentik" - postgresqlDatabase: "authentik" - # persistence: - # enabled: true - # existingClaim: authentik-postgres-nfs -redis: - # -- enable the bundled bitnami redis chart - enabled: true - architecture: standalone - auth: - enabled: false - diff --git a/unused/backup/backup.pvc.yaml b/unused/backup/backup.pvc.yaml deleted file mode 100644 index d0f70b4..0000000 --- a/unused/backup/backup.pvc.yaml +++ /dev/null @@ -1,34 +0,0 @@ -apiVersion: v1 -kind: PersistentVolume -metadata: - namespace: backup - name: backup-nfs-access - labels: - directory: backup -spec: - storageClassName: fast - volumeMode: Filesystem - accessModes: - - ReadOnlyMany - capacity: - storage: "5M" - - nfs: - path: /export/kluster - server: 192.168.1.157 ---- -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - namespace: backup - name: backup-nfs-access -spec: - resources: - requests: - storage: "5M" - storageClassName: fast - accessModes: - - ReadOnlyMany - selector: - matchLabels: - directory: backup diff --git a/unused/backup/base/cronjob.yaml b/unused/backup/base/cronjob.yaml deleted file mode 100644 index 998f246..0000000 --- a/unused/backup/base/cronjob.yaml +++ /dev/null @@ -1,64 +0,0 @@ -apiVersion: batch/v1 -kind: CronJob -metadata: - name: restic-backblaze - -spec: - schedule: "0 2 * * *" - # at 2:00, every tuesday and saturday - successfulJobsHistoryLimit: 2 - failedJobsHistoryLimit: 2 - - jobTemplate: - spec: - template: - spec: - # nodeSelector: - # kubernetes.io/arch: arm64 - # TODO no arm64 nodes anymore - restartPolicy: Never - hostname: restic-k3s-pod - # used by restic to identify the host - containers: - - name: restic-base-container - image: restic/restic:latest - command: - - /bin/sh - - -c - # >- strips newlines - # RESTIC_ARGS Can be for instance: --verbose --dry-run - args: [] - - volumeMounts: - - mountPath: /data - name: backup-nfs-access - - mountPath: /credentials - name: restic-credentials - - env: - - name: RESTIC_REPOSITORY - valueFrom: - secretKeyRef: - name: restic-credentials - key: RESTIC_REPOSITORY - - name: B2_ACCOUNT_ID - valueFrom: - secretKeyRef: - name: restic-credentials - key: B2_ACCOUNT_ID - - name: B2_ACCOUNT_KEY - valueFrom: - secretKeyRef: - name: restic-credentials - key: B2_ACCOUNT_KEY - - name: RESTIC_PASSWORD_FILE - value: /credentials/restic-password - - volumes: - - name: backup-nfs-access - persistentVolumeClaim: - claimName: backup-nfs-access - - name: restic-credentials - secret: - secretName: restic-credentials - optional: false \ No newline at end of file diff --git a/unused/backup/base/kustomization.yaml b/unused/backup/base/kustomization.yaml deleted file mode 100644 index b7c59b8..0000000 --- a/unused/backup/base/kustomization.yaml +++ /dev/null @@ -1,5 +0,0 @@ -apiVersion: kustomize.config.k8s.io/v1beta1 -kind: Kustomization -resources: -- ./cronjob.yaml -- ./restic-credentials.secret.yaml \ No newline at end of file diff --git a/unused/backup/overlays/applying.md b/unused/backup/overlays/applying.md deleted file mode 100644 index e5c20c3..0000000 --- a/unused/backup/overlays/applying.md +++ /dev/null @@ -1,8 +0,0 @@ -``` -k kustomize backup/overlays/backup | k apply -f - -> secret/restic-credentials-backup created -> cronjob.batch/restic-backblaze-backup created -k kustomize backup/overlays/prune | k apply -f - -> secret/restic-credentials-prune created -> cronjob.batch/restic-backblaze-prune created -``` \ No newline at end of file diff --git a/unused/backup/overlays/backup/kustomization.yaml b/unused/backup/overlays/backup/kustomization.yaml deleted file mode 100644 index 903a955..0000000 --- a/unused/backup/overlays/backup/kustomization.yaml +++ /dev/null @@ -1,16 +0,0 @@ -apiVersion: kustomize.config.k8s.io/v1beta1 -kind: Kustomization - -namespace: backup -nameSuffix: -backup -resources: - - ../../base - # - ./restic-commands.yaml - - -# patch the cronjob args field: -patches: - - path: ./restic-commands.yaml - target: - kind: CronJob - \ No newline at end of file diff --git a/unused/backup/overlays/backup/restic-commands.yaml b/unused/backup/overlays/backup/restic-commands.yaml deleted file mode 100644 index 6b895ed..0000000 --- a/unused/backup/overlays/backup/restic-commands.yaml +++ /dev/null @@ -1,26 +0,0 @@ -apiVersion: batch/v1 -kind: CronJob -metadata: - name: backup-patch -spec: - jobTemplate: - spec: - template: - spec: - containers: - - name: restic-base-container - args: - # >- strips newlines - # RESTIC_ARGS Can be for instance: --verbose --dry-run - # restic_reository is set in the secret - - >- - restic backup - -r $(RESTIC_REPOSITORY) - --verbose=2 - /data - --exclude=s3/ - # && - # restic - # -r $(RESTIC_REPOSITORY) - # list snapshots - # Add command to copy existing backups to here! \ No newline at end of file diff --git a/unused/backup/overlays/prune/kustomization.yaml b/unused/backup/overlays/prune/kustomization.yaml deleted file mode 100644 index 1713faf..0000000 --- a/unused/backup/overlays/prune/kustomization.yaml +++ /dev/null @@ -1,15 +0,0 @@ -apiVersion: kustomize.config.k8s.io/v1beta1 -kind: Kustomization - -namespace: backup -nameSuffix: -prune -resources: - - ../../base - # - ./restic-commands.yaml - - -# patch the cronjob args field: -patches: - - path: ./restic-commands.yaml - target: - kind: CronJob diff --git a/unused/backup/overlays/prune/restic-commands.yaml b/unused/backup/overlays/prune/restic-commands.yaml deleted file mode 100644 index 8a085bd..0000000 --- a/unused/backup/overlays/prune/restic-commands.yaml +++ /dev/null @@ -1,23 +0,0 @@ -apiVersion: batch/v1 -kind: CronJob -metadata: - name: prune-patch -spec: - schedule: "0 0 1/15 * *" - # at midnight, the first and 15. of every month - jobTemplate: - spec: - template: - spec: - containers: - - name: restic-base-container - args: - # >- strips newlines - # RESTIC_ARGS Can be for instance: --verbose --dry-run - # RESTIC_REPOSITORY is set in the secret - - >- - restic forget - -r $(RESTIC_REPOSITORY) - --verbose=2 - --keep-daily 7 --keep-weekly 5 - --prune diff --git a/unused/cert-manager/cluster-issuer.yaml b/unused/cert-manager/cluster-issuer.yaml deleted file mode 100644 index e472462..0000000 --- a/unused/cert-manager/cluster-issuer.yaml +++ /dev/null @@ -1,54 +0,0 @@ -# apiVersion: v1 -# kind: Secret -# metadata: -# name: cloudflare-api-token-secret -# namespace: cert-manager -# type: Opaque -# stringData: -# api-token: - - -# --- - -apiVersion: cert-manager.io/v1 -kind: ClusterIssuer -metadata: - name: cloudflare-letsencrypt-staging -spec: - acme: - email: me@moll.re - server: https://acme-staging-v02.api.letsencrypt.org/directory - privateKeySecretRef: - # Secret resource that will be used to store the account's private key. - name: cloudflare-letsencrypt-issuer-account-key - solvers: - - dns01: - cloudflare: - email: mollator2@gmail.com - apiTokenSecretRef: - # Name of the secret created on the other resource - name: cloudflare-api-token-secret - key: api-token - ---- - -apiVersion: cert-manager.io/v1 -kind: ClusterIssuer -metadata: - name: cloudflare-letsencrypt-prod -spec: - acme: - email: me@moll.re - server: https://acme-v02.api.letsencrypt.org/directory - privateKeySecretRef: - # Secret resource that will be used to store the account's private key. - name: cloudflare-letsencrypt-issuer-account-key - solvers: - - dns01: - cloudflare: - email: mollator2@gmail.com - apiTokenSecretRef: - # Name of the secret created on the other resource - name: cloudflare-api-token-secret - key: api-token - diff --git a/unused/cert-manager/values.yaml b/unused/cert-manager/values.yaml deleted file mode 100644 index 06e2425..0000000 --- a/unused/cert-manager/values.yaml +++ /dev/null @@ -1,494 +0,0 @@ -# Default values for cert-manager. -# This is a YAML-formatted file. -# Declare variables to be passed into your templates. -global: - ## Reference to one or more secrets to be used when pulling images - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ - ## - imagePullSecrets: [] - # - name: "image-pull-secret" - - # Optional priority class to be used for the cert-manager pods - priorityClassName: "" - rbac: - create: true - - podSecurityPolicy: - enabled: false - useAppArmor: true - - # Set the verbosity of cert-manager. Range of 0 - 6 with 6 being the most verbose. - logLevel: 2 - - leaderElection: - # Override the namespace used to store the ConfigMap for leader election - namespace: "kube-system" - - # The duration that non-leader candidates will wait after observing a - # leadership renewal until attempting to acquire leadership of a led but - # unrenewed leader slot. This is effectively the maximum duration that a - # leader can be stopped before it is replaced by another candidate. - # leaseDuration: 60s - - # The interval between attempts by the acting master to renew a leadership - # slot before it stops leading. This must be less than or equal to the - # lease duration. - # renewDeadline: 40s - - # The duration the clients should wait between attempting acquisition and - # renewal of a leadership. - # retryPeriod: 15s - -installCRDs: false - -replicaCount: 1 - -strategy: {} - # type: RollingUpdate - # rollingUpdate: - # maxSurge: 0 - # maxUnavailable: 1 - -# Comma separated list of feature gates that should be enabled on the -# controller pod. -featureGates: "" - -image: - repository: quay.io/jetstack/cert-manager-controller - # You can manage a registry with - # registry: quay.io - # repository: jetstack/cert-manager-controller - - # Override the image tag to deploy by setting this variable. - # If no value is set, the chart's appVersion will be used. - # tag: canary - - # Setting a digest will override any tag - # digest: sha256:0e072dddd1f7f8fc8909a2ca6f65e76c5f0d2fcfb8be47935ae3457e8bbceb20 - pullPolicy: IfNotPresent - -# Override the namespace used to store DNS provider credentials etc. for ClusterIssuer -# resources. By default, the same namespace as cert-manager is deployed within is -# used. This namespace will not be automatically created by the Helm chart. -clusterResourceNamespace: "" - -serviceAccount: - # Specifies whether a service account should be created - create: true - # The name of the service account to use. - # If not set and create is true, a name is generated using the fullname template - # name: "" - # Optional additional annotations to add to the controller's ServiceAccount - # annotations: {} - # Automount API credentials for a Service Account. - automountServiceAccountToken: true - -# Optional additional arguments -extraArgs: [] - # Use this flag to set a namespace that cert-manager will use to store - # supporting resources required for each ClusterIssuer (default is kube-system) - # - --cluster-resource-namespace=kube-system - # When this flag is enabled, secrets will be automatically removed when the certificate resource is deleted - # - --enable-certificate-owner-ref=true - # Use this flag to enabled or disable arbitrary controllers, for example, disable the CertificiateRequests approver - # - --controllers=*,-certificaterequests-approver - -extraEnv: [] -# - name: SOME_VAR -# value: 'some value' - -resources: {} - # requests: - # cpu: 10m - # memory: 32Mi - -# Pod Security Context -# ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ -securityContext: - runAsNonRoot: true -# legacy securityContext parameter format: if enabled is set to true, only fsGroup and runAsUser are supported -# securityContext: -# enabled: false -# fsGroup: 1001 -# runAsUser: 1001 -# to support additional securityContext parameters, omit the `enabled` parameter and simply specify the parameters -# you want to set, e.g. -# securityContext: -# fsGroup: 1000 -# runAsUser: 1000 -# runAsNonRoot: true - -# Container Security Context to be set on the controller component container -# ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ -containerSecurityContext: {} - # capabilities: - # drop: - # - ALL - # readOnlyRootFilesystem: true - # runAsNonRoot: true - - -volumes: [] - -volumeMounts: [] - -# Optional additional annotations to add to the controller Deployment -# deploymentAnnotations: {} - -# Optional additional annotations to add to the controller Pods -# podAnnotations: {} - -podLabels: {} - -# Optional additional labels to add to the controller Service -# serviceLabels: {} - -# Optional additional annotations to add to the controller service -# serviceAnnotations: {} - -# Optional DNS settings, useful if you have a public and private DNS zone for -# the same domain on Route 53. What follows is an example of ensuring -# cert-manager can access an ingress or DNS TXT records at all times. -# NOTE: This requires Kubernetes 1.10 or `CustomPodDNS` feature gate enabled for -# the cluster to work. -# podDnsPolicy: "None" -# podDnsConfig: -# nameservers: -# - "1.1.1.1" -# - "8.8.8.8" - -nodeSelector: {} - -ingressShim: {} - # defaultIssuerName: "" - # defaultIssuerKind: "" - # defaultIssuerGroup: "" - -prometheus: - enabled: true - servicemonitor: - enabled: false - prometheusInstance: default - targetPort: 9402 - path: /metrics - interval: 60s - scrapeTimeout: 30s - labels: {} - -# Use these variables to configure the HTTP_PROXY environment variables -# http_proxy: "http://proxy:8080" -# https_proxy: "https://proxy:8080" -# no_proxy: 127.0.0.1,localhost - -# expects input structure as per specification https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.11/#affinity-v1-core -# for example: -# affinity: -# nodeAffinity: -# requiredDuringSchedulingIgnoredDuringExecution: -# nodeSelectorTerms: -# - matchExpressions: -# - key: foo.bar.com/role -# operator: In -# values: -# - master -affinity: {} - -# expects input structure as per specification https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.11/#toleration-v1-core -# for example: -# tolerations: -# - key: foo.bar.com/role -# operator: Equal -# value: master -# effect: NoSchedule -tolerations: [] - -webhook: - replicaCount: 1 - timeoutSeconds: 10 - - strategy: {} - # type: RollingUpdate - # rollingUpdate: - # maxSurge: 0 - # maxUnavailable: 1 - - # Pod Security Context to be set on the webhook component Pod - # ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ - securityContext: - runAsNonRoot: true - - # Container Security Context to be set on the webhook component container - # ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ - containerSecurityContext: {} - # capabilities: - # drop: - # - ALL - # readOnlyRootFilesystem: true - # runAsNonRoot: true - - # Optional additional annotations to add to the webhook Deployment - # deploymentAnnotations: {} - - # Optional additional annotations to add to the webhook Pods - # podAnnotations: {} - - # Optional additional annotations to add to the webhook MutatingWebhookConfiguration - # mutatingWebhookConfigurationAnnotations: {} - - # Optional additional annotations to add to the webhook ValidatingWebhookConfiguration - # validatingWebhookConfigurationAnnotations: {} - - # Optional additional annotations to add to the webhook service - # serviceAnnotations: {} - - # Optional additional arguments for webhook - extraArgs: [] - - resources: {} - # requests: - # cpu: 10m - # memory: 32Mi - - ## Liveness and readiness probe values - ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes - ## - livenessProbe: - failureThreshold: 3 - initialDelaySeconds: 60 - periodSeconds: 10 - successThreshold: 1 - timeoutSeconds: 1 - readinessProbe: - failureThreshold: 3 - initialDelaySeconds: 5 - periodSeconds: 5 - successThreshold: 1 - timeoutSeconds: 1 - - nodeSelector: {} - - affinity: {} - - tolerations: [] - - # Optional additional labels to add to the Webhook Pods - podLabels: {} - - # Optional additional labels to add to the Webhook Service - serviceLabels: {} - - image: - repository: quay.io/jetstack/cert-manager-webhook - # You can manage a registry with - # registry: quay.io - # repository: jetstack/cert-manager-webhook - - # Override the image tag to deploy by setting this variable. - # If no value is set, the chart's appVersion will be used. - # tag: canary - - # Setting a digest will override any tag - # digest: sha256:0e072dddd1f7f8fc8909a2ca6f65e76c5f0d2fcfb8be47935ae3457e8bbceb20 - - pullPolicy: IfNotPresent - - serviceAccount: - # Specifies whether a service account should be created - create: true - # The name of the service account to use. - # If not set and create is true, a name is generated using the fullname template - # name: "" - # Optional additional annotations to add to the controller's ServiceAccount - # annotations: {} - # Automount API credentials for a Service Account. - automountServiceAccountToken: true - - # The port that the webhook should listen on for requests. - # In GKE private clusters, by default kubernetes apiservers are allowed to - # talk to the cluster nodes only on 443 and 10250. so configuring - # securePort: 10250, will work out of the box without needing to add firewall - # rules or requiring NET_BIND_SERVICE capabilities to bind port numbers <1000 - securePort: 10250 - - # Specifies if the webhook should be started in hostNetwork mode. - # - # Required for use in some managed kubernetes clusters (such as AWS EKS) with custom - # CNI (such as calico), because control-plane managed by AWS cannot communicate - # with pods' IP CIDR and admission webhooks are not working - # - # Since the default port for the webhook conflicts with kubelet on the host - # network, `webhook.securePort` should be changed to an available port if - # running in hostNetwork mode. - hostNetwork: false - - # Specifies how the service should be handled. Useful if you want to expose the - # webhook to outside of the cluster. In some cases, the control plane cannot - # reach internal services. - serviceType: ClusterIP - # loadBalancerIP: - - # Overrides the mutating webhook and validating webhook so they reach the webhook - # service using the `url` field instead of a service. - url: {} - # host: - -cainjector: - enabled: true - replicaCount: 1 - - strategy: {} - # type: RollingUpdate - # rollingUpdate: - # maxSurge: 0 - # maxUnavailable: 1 - - # Pod Security Context to be set on the cainjector component Pod - # ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ - securityContext: - runAsNonRoot: true - - # Container Security Context to be set on the cainjector component container - # ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ - containerSecurityContext: {} - # capabilities: - # drop: - # - ALL - # readOnlyRootFilesystem: true - # runAsNonRoot: true - - - # Optional additional annotations to add to the cainjector Deployment - # deploymentAnnotations: {} - - # Optional additional annotations to add to the cainjector Pods - # podAnnotations: {} - - # Optional additional arguments for cainjector - extraArgs: [] - - resources: {} - # requests: - # cpu: 10m - # memory: 32Mi - - nodeSelector: {} - - affinity: {} - - tolerations: [] - - # Optional additional labels to add to the CA Injector Pods - podLabels: {} - - image: - repository: quay.io/jetstack/cert-manager-cainjector - # You can manage a registry with - # registry: quay.io - # repository: jetstack/cert-manager-cainjector - - # Override the image tag to deploy by setting this variable. - # If no value is set, the chart's appVersion will be used. - # tag: canary - - # Setting a digest will override any tag - # digest: sha256:0e072dddd1f7f8fc8909a2ca6f65e76c5f0d2fcfb8be47935ae3457e8bbceb20 - - pullPolicy: IfNotPresent - - serviceAccount: - # Specifies whether a service account should be created - create: true - # The name of the service account to use. - # If not set and create is true, a name is generated using the fullname template - # name: "" - # Optional additional annotations to add to the controller's ServiceAccount - # annotations: {} - # Automount API credentials for a Service Account. - automountServiceAccountToken: true - -# This startupapicheck is a Helm post-install hook that waits for the webhook -# endpoints to become available. -# The check is implemented using a Kubernetes Job- if you are injecting mesh -# sidecar proxies into cert-manager pods, you probably want to ensure that they -# are not injected into this Job's pod. Otherwise the installation may time out -# due to the Job never being completed because the sidecar proxy does not exit. -# See https://github.com/jetstack/cert-manager/pull/4414 for context. -startupapicheck: - enabled: true - - # Pod Security Context to be set on the startupapicheck component Pod - # ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ - securityContext: - runAsNonRoot: true - - # Timeout for 'kubectl check api' command - timeout: 1m - - # Job backoffLimit - backoffLimit: 4 - - # Optional additional annotations to add to the startupapicheck Job - jobAnnotations: - helm.sh/hook: post-install - helm.sh/hook-weight: "1" - helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded - - # Optional additional annotations to add to the startupapicheck Pods - # podAnnotations: {} - - # Optional additional arguments for startupapicheck - extraArgs: [] - - resources: {} - # requests: - # cpu: 10m - # memory: 32Mi - - nodeSelector: {} - - affinity: {} - - tolerations: [] - - # Optional additional labels to add to the startupapicheck Pods - podLabels: {} - - image: - repository: quay.io/jetstack/cert-manager-ctl - # You can manage a registry with - # registry: quay.io - # repository: jetstack/cert-manager-ctl - - # Override the image tag to deploy by setting this variable. - # If no value is set, the chart's appVersion will be used. - # tag: canary - - # Setting a digest will override any tag - # digest: sha256:0e072dddd1f7f8fc8909a2ca6f65e76c5f0d2fcfb8be47935ae3457e8bbceb20 - - pullPolicy: IfNotPresent - - rbac: - # annotations for the startup API Check job RBAC and PSP resources - annotations: - helm.sh/hook: post-install - helm.sh/hook-weight: "-5" - helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded - - serviceAccount: - # Specifies whether a service account should be created - create: true - - # The name of the service account to use. - # If not set and create is true, a name is generated using the fullname template - # name: "" - - # Optional additional annotations to add to the Job's ServiceAccount - annotations: - helm.sh/hook: post-install - helm.sh/hook-weight: "-5" - helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded - - # Automount API credentials for a Service Account. - automountServiceAccountToken: true - diff --git a/unused/crowdsec.ingress.yaml b/unused/crowdsec.ingress.yaml deleted file mode 100644 index 099d312..0000000 --- a/unused/crowdsec.ingress.yaml +++ /dev/null @@ -1,26 +0,0 @@ -kind: Ingress -apiVersion: networking.k8s.io/v1 -metadata: - namespace: crowdsec - name: crowdsec-ingress - annotations: - kubernetes.io/ingress.class: nginx - cert-manager.io/cluster-issuer: cloudflare-letsencrypt-prod - - -spec: - tls: - - hosts: - - crowdsec.kluster.moll.re - secretName: cloudflare-letsencrypt-issuer-account-key - rules: - - host: crowdsec.kluster.moll.re - http: - paths: - - backend: - service: - name: crowdsec-service - port: - number: 3000 - path: / - pathType: Prefix diff --git a/unused/crowdsec.nginx-bouncer.yaml b/unused/crowdsec.nginx-bouncer.yaml deleted file mode 100644 index 14c6aab..0000000 --- a/unused/crowdsec.nginx-bouncer.yaml +++ /dev/null @@ -1,30 +0,0 @@ -controller: - extraVolumes: - - name: crowdsec-bouncer-plugin - emptyDir: {} - extraInitContainers: - - name: init-clone-crowdsec-bouncer - image: crowdsecurity/lua-bouncer-plugin - imagePullPolicy: IfNotPresent - env: - - name: API_URL - value: "http://crowdsec-service.crowdsec.svc.cluster.local:8080" # crowdsec lapi service-name - - name: API_KEY - value: "6cc4c975f123f4f24174e2d544e81282" # generated with `cscli bouncers add -n - - name: BOUNCER_CONFIG - value: "/crowdsec/crowdsec-bouncer.conf" - - name: BAN_TEMPLATE_PATH - value: /etc/nginx/lua/plugins/crowdsec/templates/ban.html - - name: CAPTCHA_TEMPLATE_PATH - value: /etc/nginx/lua/plugins/crowdsec/templates/captcha.html - command: ['sh', '-c', "sh /docker_start.sh; mkdir -p /lua_plugins/crowdsec/; cp -R /crowdsec/* /lua_plugins/crowdsec/"] - volumeMounts: - - name: crowdsec-bouncer-plugin - mountPath: /lua_plugins - extraVolumeMounts: - - name: crowdsec-bouncer-plugin - mountPath: /etc/nginx/lua/plugins/crowdsec - subPath: crowdsec - config: - plugins: "crowdsec" - lua-shared-dicts: "crowdsec_cache: 50m" diff --git a/unused/crowdsec.values.yaml b/unused/crowdsec.values.yaml deleted file mode 100644 index f619088..0000000 --- a/unused/crowdsec.values.yaml +++ /dev/null @@ -1,178 +0,0 @@ -# Default values for crowdsec-chart. -# This is a YAML-formatted file. -# Declare variables to be passed into your templates. - -# -- for raw logs format: json or cri (docker|containerd) -container_runtime: containerd - -image: - # -- docker image repository name - repository: crowdsecurity/crowdsec - # -- pullPolicy - pullPolicy: IfNotPresent - # -- docker image tag - tag: "" - -# If you want to specify secrets that will be used for all your crowdsec-agents -# secrets can be provided be env variables -secrets: - # -- agent username (default is generated randomly) - username: "" - # -- agent password (default is generated randomly) - password: "" - -# lapi will deploy pod with crowdsec lapi and dashboard as deployment -lapi: - # -- environment variables from crowdsecurity/crowdsec docker image - env: [] - # by default disable the agent because it only the local API. - #- name: DISABLE_AGENT - # value: "true" - dashboard: - # -- Enable Metabase Dashboard (by default disabled) - enabled: true - image: - # -- docker image repository name - repository: loancrate/metabase - # -- pullPolicy - pullPolicy: IfNotPresent - # -- docker image tag - tag: "latest" - # -- Metabase SQLite static DB containing Dashboards - assetURL: https://crowdsec-statics-assets.s3-eu-west-1.amazonaws.com/metabase_sqlite.zip - - # -- Enable ingress object - ingress: - enabled: false - annotations: - # metabase only supports http so we need this annotation - nginx.ingress.kubernetes.io/backend-protocol: "HTTP" - # labels: {} - ingressClassName: "nginx" - host: "" # metabase.example.com - # tls: {} - - resources: - limits: - memory: 100Mi - requests: - cpu: 150m - memory: 100Mi - # -- Enable persistent volumes - persistentVolume: - # -- Persistent volume for data folder. Stores e.g. registered bouncer api keys - data: - enabled: true - accessModes: - - ReadWriteOnce - storageClassName: "" - size: 1Gi - # -- Persistent volume for config folder. Stores e.g. online api credentials - config: - enabled: true - accessModes: - - ReadWriteOnce - storageClassName: "" - size: 100Mi - - # -- nodeSelector for lapi - nodeSelector: {} - # -- tolerations for lapi - tolerations: {} - - # -- Enable service monitoring (exposes "metrics" port "6060" for Prometheus) - metrics: - enabled: false - # -- Creates a ServiceMonitor so Prometheus will monitor this service - # -- Prometheus needs to be configured to watch on all namespaces for ServiceMonitors - # -- See the documentation: https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack#prometheusioscrape - # -- See also: https://github.com/prometheus-community/helm-charts/issues/106#issuecomment-700847774 - serviceMonitor: - enabled: false - - strategy: - type: RollingUpdate - -# agent will deploy pod on every node as daemonSet to read wanted pods logs -agent: - acquisition: - # -- Specify each pod you want to process it logs (namespace, podName and program) - - namespace: kube-system - # -- to select pod logs to process - podName: nginx-nginx-ingress-* - # -- program name related to specific parser you will use (see https://hub.crowdsec.net/author/crowdsecurity/configurations/docker-logs) - program: nginx - resources: - limits: - memory: 100Mi - requests: - cpu: 150m - memory: 100Mi - # -- Enable persistent volumes - persistentVolume: - # -- Persistent volume for config folder. Stores local config (parsers, scenarios etc.) - config: - enabled: true - accessModes: - - ReadWriteOnce - storageClassName: "" - size: 100Mi - # -- environment variables from crowdsecurity/crowdsec docker image - env: [] - # by default we the docker-logs parser to be able to parse docker logs in k8s - # by default we disable local API on the agent pod - # - name: SCENARIOS - # value: "scenario/name otherScenario/name" - # - name: PARSERS - # value: "parser/name otherParser/name" - # - name: POSTOVERFLOWS - # value: "postoverflow/name otherPostoverflow/name" - # - name: CONFIG_FILE - # value: "/etc/crowdsec/config.yaml" - # - name: DSN - # value: "file:///var/log/toto.log" - # - name: TYPE - # value: "Labels.type_for_time-machine_mode" - # - name: TEST_MODE - # value: "false" - # - name: TZ - # value: "" - # - name: DISABLE_AGENT - # value: "false" - # - name: DISABLE_ONLINE_API - # value: "false" - # - name: LEVEL_TRACE - # value: "false" - # - name: LEVEL_DEBUG - # value: "false" - # - name: LEVEL_INFO - # value: "false" - - # -- nodeSelector for agent - nodeSelector: {} - # -- tolerations for agent - tolerations: {} - - # -- Enable service monitoring (exposes "metrics" port "6060" for Prometheus) - metrics: - enabled: false - # -- Creates a ServiceMonitor so Prometheus will monitor this service - # -- Prometheus needs to be configured to watch on all namespaces for ServiceMonitors - # -- See the documentation: https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack#prometheusioscrape - # -- See also: https://github.com/prometheus-community/helm-charts/issues/106#issuecomment-700847774 - serviceMonitor: - enabled: false - - # -- wait-for-lapi init container - wait_for_lapi: - image: - # -- docker image repository name - repository: busybox - # -- pullPolicy - pullPolicy: IfNotPresent - # -- docker image tag - tag: "1.28" - -#service: {} - - diff --git a/unused/ddns/deployment.yaml b/unused/ddns/deployment.yaml deleted file mode 100644 index 81187c0..0000000 --- a/unused/ddns/deployment.yaml +++ /dev/null @@ -1,34 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - namespace: ddns - name: cloudflare-ddns -spec: - selector: - matchLabels: - app: cloudflare-ddns - - template: - metadata: - labels: - app: cloudflare-ddns - - spec: - containers: - - name: cloudflare-ddns - image: timothyjmiller/cloudflare-ddns:latest - resources: - limits: - memory: "32Mi" - cpu: "50m" - env: - - name: CONFIG_PATH - value: "/etc/cloudflare-ddns/" - volumeMounts: - - mountPath: "/etc/cloudflare-ddns" - name: config-cloudflare-ddns - readOnly: true - volumes: - - name: config-cloudflare-ddns - secret: - secretName: config-cloudflare-ddns diff --git a/unused/focalboard/ingress.yaml b/unused/focalboard/ingress.yaml deleted file mode 100644 index c468165..0000000 --- a/unused/focalboard/ingress.yaml +++ /dev/null @@ -1,32 +0,0 @@ -apiVersion: traefik.containo.us/v1alpha1 -kind: IngressRoute -metadata: - name: focalboard-ingress - namespace: focalboard -spec: - entryPoints: - - websecure - routes: - - match: Host(`focalboard.kluster.moll.re`) - middlewares: - - name: focalboard-websocket - kind: Rule - services: - - name: focalboard - port: 8000 - tls: - certResolver: default-tls - ---- -apiVersion: traefik.containo.us/v1alpha1 -kind: Middleware -metadata: - name: focalboard-websocket - namespace: focalboard -spec: - headers: - customRequestHeaders: - X-Forwarded-Proto: "https" - Upgrade: "websocket" - - diff --git a/unused/focalboard/pvc.yaml b/unused/focalboard/pvc.yaml deleted file mode 100644 index 64b0d62..0000000 --- a/unused/focalboard/pvc.yaml +++ /dev/null @@ -1,37 +0,0 @@ ---- -apiVersion: v1 -kind: PersistentVolume -metadata: - namespace: focalboard - name: focalboard-nfs - labels: - directory: focalboard -spec: - storageClassName: fast - capacity: - storage: "5Gi" - volumeMode: Filesystem - accessModes: - - ReadWriteOnce - nfs: - path: /focalboard - server: 10.43.239.43 # assigned to nfs-server service. Won't change as long as service is not redeployed ---- -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - namespace: focalboard - name: focalboard-nfs -spec: - storageClassName: fast - accessModes: - - ReadWriteOnce - resources: - requests: - storage: "5Gi" - selector: - matchLabels: - directory: focalboard - - - diff --git a/unused/focalboard/values.yaml b/unused/focalboard/values.yaml deleted file mode 100644 index 9a9727d..0000000 --- a/unused/focalboard/values.yaml +++ /dev/null @@ -1,63 +0,0 @@ -# -# IMPORTANT NOTE -# -# This chart inherits from our common library chart. You can check the default values/options here: -# https://github.com/k8s-at-home/library-charts/tree/main/charts/stable/common/values.yaml -# - -image: - # -- image repository - repository: flyskype2021/focalboard - # -- image pull policy - pullPolicy: IfNotPresent - # -- image tag - tag: latest - - -enableServiceLinks: false - -# -- environment variables. -# @default -- See below -env: {} - -# See the Administrator's Guide for config reference: https://www.focalboard.com/guide/admin/ -config: | - { - "serverRoot": "https://focalboard.kluster.moll.re", - "port": 8000, - "dbtype": "sqlite3", - "dbconfig": "/data/focalboard.db", - "postgres_dbconfig": "dbname=focalboard sslmode=disable", - "useSSL": false, - "webpath": "./pack", - "filespath": "/data/files", - "telemetry": false, - "session_expire_time": 2592000, - "session_refresh_time": 18000, - "localOnly": false, - "enableLocalMode": true, - "localModeSocketLocation": "/var/tmp/focalboard_local.socket" - } - - -# -- Configures service settings for the chart. -# @default -- See values.yaml -service: - main: - ports: - http: - port: 8000 - -ingress: - # -- Enable and configure ingress settings for the chart under this key. - # @default -- See values.yaml - main: - enabled: false - -# -- Configure persistence settings for the chart under this key. -# @default -- See values.yaml -persistence: - data: - enabled: true - existingClaim: focalboard-nfs - diff --git a/unused/freshrss/freshrss.values.yaml b/unused/freshrss/freshrss.values.yaml deleted file mode 100644 index 70fc9a7..0000000 --- a/unused/freshrss/freshrss.values.yaml +++ /dev/null @@ -1,47 +0,0 @@ -# -# IMPORTANT NOTE -# -# This chart inherits from our common library chart. You can check the default values/options here: -# https://github.com/k8s-at-home/library-charts/tree/main/charts/stable/common/values.yaml -# - -image: - # -- image repository - repository: linuxserver/freshrss - # -- image pull policy - pullPolicy: IfNotPresent - # -- image tag - tag: version-1.18.1 - -# -- environment variables. See more environment variables in the [freshrss documentation](https://github.com/linuxserver/docker-freshrss#parameters). -# @default -- See below -env: - # -- Set the container timezone - TZ: "Europe/Berlin" - # -- Set the container user id - PUID: "1001" - # -- Set the container group id - PGID: "1001" - -# -- Configures service settings for the chart. -# @default -- See values.yaml -service: - main: - ports: - http: - port: 80 - -ingress: - # -- Enable and configure ingress settings for the chart under this key. - # @default -- See values.yaml - main: - enabled: false - -# -- Configure persistence settings for the chart under this key. -# @default -- See values.yaml -persistence: - config: - enabled: true - useExisting: true - name: freshrss-nfs - diff --git a/unused/freshrss/ingress.yaml b/unused/freshrss/ingress.yaml deleted file mode 100644 index 88f3394..0000000 --- a/unused/freshrss/ingress.yaml +++ /dev/null @@ -1,24 +0,0 @@ -kind: Ingress -apiVersion: networking.k8s.io/v1 -metadata: - namespace: freshrss - name: freshrss-ingress - annotations: - kubernetes.io/ingress.class: nginx - cert-manager.io/cluster-issuer: cloudflare-letsencrypt-prod -spec: - tls: - - hosts: - - rss.kluster.moll.re - secretName: cloudflare-letsencrypt-issuer-account-key - rules: - - host: rss.kluster.moll.re - http: - paths: - - backend: - service: - name: freshrss - port: - number: 80 - path: / - pathType: Prefix \ No newline at end of file diff --git a/unused/freshrss/pvc.yaml b/unused/freshrss/pvc.yaml deleted file mode 100644 index ed00d8f..0000000 --- a/unused/freshrss/pvc.yaml +++ /dev/null @@ -1,37 +0,0 @@ ---- -apiVersion: v1 -kind: PersistentVolume -metadata: - namespace: freshrss - name: freshrss-nfs - labels: - directory: freshrss -spec: - storageClassName: slow - capacity: - storage: "1Gi" - volumeMode: Filesystem - accessModes: - - ReadWriteOnce - nfs: - path: /freshrss - server: 10.43.239.43 # assigned to nfs-server service. Won't change as long as service is not redeployed ---- -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - namespace: freshrss - name: freshrss-nfs -spec: - storageClassName: slow - accessModes: - - ReadWriteOnce - resources: - requests: - storage: "1Gi" - selector: - matchLabels: - directory: freshrss - - - diff --git a/unused/grsync.cronjob.yaml b/unused/grsync.cronjob.yaml deleted file mode 100644 index 7ca482f..0000000 --- a/unused/grsync.cronjob.yaml +++ /dev/null @@ -1,54 +0,0 @@ ---- -apiVersion: batch/v1 -kind: CronJob -metadata: - name: gcloud-backup - namespace: backup - -spec: - schedule: "0 2 15 * *" - # at 2:00, the 1. and 15. of every month - successfulJobsHistoryLimit: 2 - failedJobsHistoryLimit: 2 - - jobTemplate: - spec: - template: - spec: - restartPolicy: Never - containers: - - name: gcloud-backup - image: shirakiya/gcloud-sdk:latest - command: ["/bin/bash", "-c", "--"] - args: - - | - ln -s /config/.boto /root/.boto && - gsutil -m rsync -x "^(jellyfin|config|webtop|other root folder)/.*$" -U -r -e -d /data gs://kluster-backup - # command: - # -m multithreaded - # -U skip unsupported objects - # -e don't follow symlinks - # -r recursively follow folder structure - # -d deletes files from dst if they are not in src anymore - # -n dry runs - # This command runs with the knowledge the gs-bucket is set up with versioning. Rsync therefore serves as an incremental backup whose individual stages can be recovered - volumeMounts: - - mountPath: /data - name: backup-nfs-access - - mountPath: /config - name: gcloud-credentials - # entry .boto in the secret is mounted as /root/.boto - - volumes: - - name: backup-nfs-access - persistentVolumeClaim: - claimName: backup-nfs-access - - name: gcloud-credentials - secret: - secretName: gcloud-credentials - optional: false - - - - - diff --git a/unused/ingress-nginx/values.yaml b/unused/ingress-nginx/values.yaml deleted file mode 100644 index de70dbb..0000000 --- a/unused/ingress-nginx/values.yaml +++ /dev/null @@ -1,749 +0,0 @@ -## nginx configuration -## Ref: https://github.com/kubernetes/ingress-nginx/blob/main/docs/user-guide/nginx-configuration/index.md -## - -## Overrides for generated resource names -# See templates/_helpers.tpl -# nameOverride: -# fullnameOverride: - -## Labels to apply to all resources -## -commonLabels: {} -# scmhash: abc123 -# myLabel: aakkmd - -controller: - name: controller - image: - ## Keep false as default for now! - chroot: false - registry: registry.k8s.io - image: ingress-nginx/controller - ## for backwards compatibility consider setting the full image url via the repository value below - ## use *either* current default registry/image or repository format or installing chart by providing the values.yaml will fail - ## repository: - tag: "v1.3.0" - digest: sha256:d1707ca76d3b044ab8a28277a2466a02100ee9f58a86af1535a3edf9323ea1b5 - digestChroot: sha256:0fcb91216a22aae43b374fc2e6a03b8afe9e8c78cbf07a09d75636dc4ea3c191 - pullPolicy: IfNotPresent - # www-data -> uid 101 - runAsUser: 101 - allowPrivilegeEscalation: true - - # -- Use an existing PSP instead of creating one - existingPsp: "" - - # -- Configures the controller container name - containerName: controller - - # -- Configures the ports that the nginx-controller listens on - containerPort: - http: 80 - https: 443 - - # -- Will add custom configuration options to Nginx https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/configmap/ - config: {} - - # -- Annotations to be added to the controller config configuration configmap. - configAnnotations: {} - - # -- Will add custom headers before sending traffic to backends according to https://github.com/kubernetes/ingress-nginx/tree/main/docs/examples/customization/custom-headers - proxySetHeaders: {} - - # -- Will add custom headers before sending response traffic to the client according to: https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/configmap/#add-headers - addHeaders: {} - - # -- Optionally customize the pod dnsConfig. - dnsConfig: {} - - # -- Optionally customize the pod hostname. - hostname: {} - - # -- Optionally change this to ClusterFirstWithHostNet in case you have 'hostNetwork: true'. - # By default, while using host network, name resolution uses the host's DNS. If you wish nginx-controller - # to keep resolving names inside the k8s network, use ClusterFirstWithHostNet. - dnsPolicy: ClusterFirst - - # -- Bare-metal considerations via the host network https://kubernetes.github.io/ingress-nginx/deploy/baremetal/#via-the-host-network - # Ingress status was blank because there is no Service exposing the NGINX Ingress controller in a configuration using the host network, the default --publish-service flag used in standard cloud setups does not apply - reportNodeInternalIp: false - - # -- Process Ingress objects without ingressClass annotation/ingressClassName field - # Overrides value for --watch-ingress-without-class flag of the controller binary - # Defaults to false - watchIngressWithoutClass: false - - # -- Process IngressClass per name (additionally as per spec.controller). - ingressClassByName: false - - # -- This configuration defines if Ingress Controller should allow users to set - # their own *-snippet annotations, otherwise this is forbidden / dropped - # when users add those annotations. - # Global snippets in ConfigMap are still respected - allowSnippetAnnotations: true - - # -- Required for use with CNI based kubernetes installations (such as ones set up by kubeadm), - # since CNI and hostport don't mix yet. Can be deprecated once https://github.com/kubernetes/kubernetes/issues/23920 - # is merged - hostNetwork: false - - ## Use host ports 80 and 443 - ## Disabled by default - hostPort: - # -- Enable 'hostPort' or not - enabled: false - ports: - # -- 'hostPort' http port - http: 80 - # -- 'hostPort' https port - https: 443 - - # -- Election ID to use for status update - electionID: ingress-controller-leader - - ## This section refers to the creation of the IngressClass resource - ## IngressClass resources are supported since k8s >= 1.18 and required since k8s >= 1.19 - ingressClassResource: - # -- Name of the ingressClass - name: nginx-new - # -- Is this ingressClass enabled or not - enabled: true - # -- Is this the default ingressClass for the cluster - default: false - # -- Controller-value of the controller that is processing this ingressClass - controllerValue: "k8s.io/ingress-nginx" - - # -- Parameters is a link to a custom resource containing additional - # configuration for the controller. This is optional if the controller - # does not require extra parameters. - parameters: {} - - # -- For backwards compatibility with ingress.class annotation, use ingressClass. - # Algorithm is as follows, first ingressClassName is considered, if not present, controller looks for ingress.class annotation - ingressClass: nginx - - # -- Labels to add to the pod container metadata - podLabels: {} - # key: value - - # -- Security Context policies for controller pods - podSecurityContext: {} - - # -- See https://kubernetes.io/docs/tasks/administer-cluster/sysctl-cluster/ for notes on enabling and using sysctls - sysctls: {} - # sysctls: - # "net.core.somaxconn": "8192" - - # -- Allows customization of the source of the IP address or FQDN to report - # in the ingress status field. By default, it reads the information provided - # by the service. If disable, the status field reports the IP address of the - # node or nodes where an ingress controller pod is running. - publishService: - # -- Enable 'publishService' or not - enabled: true - # -- Allows overriding of the publish service to bind to - # Must be / - pathOverride: "" - - # Limit the scope of the controller to a specific namespace - scope: - # -- Enable 'scope' or not - enabled: false - # -- Namespace to limit the controller to; defaults to $(POD_NAMESPACE) - namespace: "" - # -- When scope.enabled == false, instead of watching all namespaces, we watching namespaces whose labels - # only match with namespaceSelector. Format like foo=bar. Defaults to empty, means watching all namespaces. - namespaceSelector: "" - - # -- Allows customization of the configmap / nginx-configmap namespace; defaults to $(POD_NAMESPACE) - configMapNamespace: "" - - tcp: - # -- Allows customization of the tcp-services-configmap; defaults to $(POD_NAMESPACE) - configMapNamespace: "" - # -- Annotations to be added to the tcp config configmap - annotations: {} - - udp: - # -- Allows customization of the udp-services-configmap; defaults to $(POD_NAMESPACE) - configMapNamespace: "" - # -- Annotations to be added to the udp config configmap - annotations: {} - - # -- Maxmind license key to download GeoLite2 Databases. - ## https://blog.maxmind.com/2019/12/18/significant-changes-to-accessing-and-using-geolite2-databases - maxmindLicenseKey: "" - - # -- Additional command line arguments to pass to nginx-ingress-controller - # E.g. to specify the default SSL certificate you can use - extraArgs: {} - ## extraArgs: - ## default-ssl-certificate: "/" - - # -- Additional environment variables to set - extraEnvs: [] - # extraEnvs: - # - name: FOO - # valueFrom: - # secretKeyRef: - # key: FOO - # name: secret-resource - - # -- Use a `DaemonSet` or `Deployment` - kind: Deployment - - # -- Annotations to be added to the controller Deployment or DaemonSet - ## - annotations: {} - # keel.sh/pollSchedule: "@every 60m" - - # -- Labels to be added to the controller Deployment or DaemonSet and other resources that do not have option to specify labels - ## - labels: {} - # keel.sh/policy: patch - # keel.sh/trigger: poll - - - # -- The update strategy to apply to the Deployment or DaemonSet - ## - updateStrategy: {} - # rollingUpdate: - # maxUnavailable: 1 - # type: RollingUpdate - - # -- `minReadySeconds` to avoid killing pods before we are ready - ## - minReadySeconds: 0 - - - # -- Node tolerations for server scheduling to nodes with taints - ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ - ## - tolerations: [] - # - key: "key" - # operator: "Equal|Exists" - # value: "value" - # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" - - # -- Affinity and anti-affinity rules for server scheduling to nodes - ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity - ## - affinity: {} - # # An example of preferred pod anti-affinity, weight is in the range 1-100 - # podAntiAffinity: - # preferredDuringSchedulingIgnoredDuringExecution: - # - weight: 100 - # podAffinityTerm: - # labelSelector: - # matchExpressions: - # - key: app.kubernetes.io/name - # operator: In - # values: - # - ingress-nginx - # - key: app.kubernetes.io/instance - # operator: In - # values: - # - ingress-nginx - # - key: app.kubernetes.io/component - # operator: In - # values: - # - controller - # topologyKey: kubernetes.io/hostname - - # # An example of required pod anti-affinity - # podAntiAffinity: - # requiredDuringSchedulingIgnoredDuringExecution: - # - labelSelector: - # matchExpressions: - # - key: app.kubernetes.io/name - # operator: In - # values: - # - ingress-nginx - # - key: app.kubernetes.io/instance - # operator: In - # values: - # - ingress-nginx - # - key: app.kubernetes.io/component - # operator: In - # values: - # - controller - # topologyKey: "kubernetes.io/hostname" - - # -- Topology spread constraints rely on node labels to identify the topology domain(s) that each Node is in. - ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ - ## - topologySpreadConstraints: [] - # - maxSkew: 1 - # topologyKey: topology.kubernetes.io/zone - # whenUnsatisfiable: DoNotSchedule - # labelSelector: - # matchLabels: - # app.kubernetes.io/instance: ingress-nginx-internal - - # -- `terminationGracePeriodSeconds` to avoid killing pods before we are ready - ## wait up to five minutes for the drain of connections - ## - terminationGracePeriodSeconds: 300 - - # -- Node labels for controller pod assignment - ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ - ## - nodeSelector: - kubernetes.io/os: linux - - ## Liveness and readiness probe values - ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes - ## - ## startupProbe: - ## httpGet: - ## # should match container.healthCheckPath - ## path: "/healthz" - ## port: 10254 - ## scheme: HTTP - ## initialDelaySeconds: 5 - ## periodSeconds: 5 - ## timeoutSeconds: 2 - ## successThreshold: 1 - ## failureThreshold: 5 - livenessProbe: - httpGet: - # should match container.healthCheckPath - path: "/healthz" - port: 10254 - scheme: HTTP - initialDelaySeconds: 10 - periodSeconds: 10 - timeoutSeconds: 1 - successThreshold: 1 - failureThreshold: 5 - readinessProbe: - httpGet: - # should match container.healthCheckPath - path: "/healthz" - port: 10254 - scheme: HTTP - initialDelaySeconds: 10 - periodSeconds: 10 - timeoutSeconds: 1 - successThreshold: 1 - failureThreshold: 3 - - - # -- Path of the health check endpoint. All requests received on the port defined by - # the healthz-port parameter are forwarded internally to this path. - healthCheckPath: "/healthz" - - # -- Address to bind the health check endpoint. - # It is better to set this option to the internal node address - # if the ingress nginx controller is running in the `hostNetwork: true` mode. - healthCheckHost: "" - - # -- Annotations to be added to controller pods - ## - podAnnotations: {} - - replicaCount: 1 - - minAvailable: 1 - - ## Define requests resources to avoid probe issues due to CPU utilization in busy nodes - ## ref: https://github.com/kubernetes/ingress-nginx/issues/4735#issuecomment-551204903 - ## Ideally, there should be no limits. - ## https://engineering.indeedblog.com/blog/2019/12/cpu-throttling-regression-fix/ - resources: - ## limits: - ## cpu: 100m - ## memory: 90Mi - requests: - cpu: 100m - memory: 90Mi - - # Mutually exclusive with keda autoscaling - autoscaling: - enabled: false - minReplicas: 1 - maxReplicas: 11 - targetCPUUtilizationPercentage: 50 - targetMemoryUtilizationPercentage: 50 - behavior: {} - # scaleDown: - # stabilizationWindowSeconds: 300 - # policies: - # - type: Pods - # value: 1 - # periodSeconds: 180 - # scaleUp: - # stabilizationWindowSeconds: 300 - # policies: - # - type: Pods - # value: 2 - # periodSeconds: 60 - - autoscalingTemplate: [] - # Custom or additional autoscaling metrics - # ref: https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/#support-for-custom-metrics - # - type: Pods - # pods: - # metric: - # name: nginx_ingress_controller_nginx_process_requests_total - # target: - # type: AverageValue - # averageValue: 10000m - - # Mutually exclusive with hpa autoscaling - - # -- Enable mimalloc as a drop-in replacement for malloc. - ## ref: https://github.com/microsoft/mimalloc - ## - enableMimalloc: true - - ## Override NGINX template - customTemplate: - configMapName: "" - configMapKey: "" - - service: - enabled: true - - # -- If enabled is adding an appProtocol option for Kubernetes service. An appProtocol field replacing annotations that were - # using for setting a backend protocol. Here is an example for AWS: service.beta.kubernetes.io/aws-load-balancer-backend-protocol: http - # It allows choosing the protocol for each backend specified in the Kubernetes service. - # See the following GitHub issue for more details about the purpose: https://github.com/kubernetes/kubernetes/issues/40244 - # Will be ignored for Kubernetes versions older than 1.20 - ## - appProtocol: true - - annotations: {} - labels: {} - # clusterIP: "" - - # -- List of IP addresses at which the controller services are available - ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips - ## - externalIPs: [] - - # -- Used by cloud providers to connect the resulting `LoadBalancer` to a pre-existing static IP according to https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer - loadBalancerSourceRanges: [] - - enableHttp: true - enableHttps: true - - ## Set external traffic policy to: "Local" to preserve source IP on providers supporting it. - ## Ref: https://kubernetes.io/docs/tutorials/services/source-ip/#source-ip-for-services-with-typeloadbalancer - # externalTrafficPolicy: "" - - ## Must be either "None" or "ClientIP" if set. Kubernetes will default to "None". - ## Ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies - # sessionAffinity: "" - - ## Specifies the health check node port (numeric port number) for the service. If healthCheckNodePort isn’t specified, - ## the service controller allocates a port from your cluster’s NodePort range. - ## Ref: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip - # healthCheckNodePort: 0 - - # -- Represents the dual-stack-ness requested or required by this Service. Possible values are - # SingleStack, PreferDualStack or RequireDualStack. - # The ipFamilies and clusterIPs fields depend on the value of this field. - ## Ref: https://kubernetes.io/docs/concepts/services-networking/dual-stack/ - ipFamilyPolicy: "SingleStack" - - # -- List of IP families (e.g. IPv4, IPv6) assigned to the service. This field is usually assigned automatically - # based on cluster configuration and the ipFamilyPolicy field. - ## Ref: https://kubernetes.io/docs/concepts/services-networking/dual-stack/ - ipFamilies: - - IPv4 - - ports: - http: 80 - https: 443 - - targetPorts: - http: http - https: https - - type: LoadBalancer - loadBalancerIP: "192.168.1.4" - - ## type: NodePort - ## nodePorts: - ## http: 32080 - ## https: 32443 - ## tcp: - ## 8080: 32808 - - - # shareProcessNamespace enables process namespace sharing within the pod. - # This can be used for example to signal log rotation using `kill -USR1` from a sidecar. - shareProcessNamespace: false - - - extraContainers: [] - # - name: my-sidecar - # image: nginx:latest - # - name: lemonldap-ng-controller - # image: lemonldapng/lemonldap-ng-controller:0.2.0 - # args: - # - /lemonldap-ng-controller - # - --alsologtostderr - # - --configmap=$(POD_NAMESPACE)/lemonldap-ng-configuration - # env: - # - name: POD_NAME - # valueFrom: - # fieldRef: - # fieldPath: metadata.name - # - name: POD_NAMESPACE - # valueFrom: - # fieldRef: - # fieldPath: metadata.namespace - # volumeMounts: - # - name: copy-portal-skins - # mountPath: /srv/var/lib/lemonldap-ng/portal/skins - - # -- Additional volumeMounts to the controller main container. - extraVolumeMounts: [] - - # - name: copy-portal-skins - # mountPath: /var/lib/lemonldap-ng/portal/skins - - # -- Additional volumes to the controller pod. - extraVolumes: [] - # - name: copy-portal-skins - # emptyDir: {} - - # -- Containers, which are run before the app containers are started. - extraInitContainers: [] - # - name: init-myservice - # image: busybox - # command: ['sh', '-c', 'until nslookup myservice; do echo waiting for myservice; sleep 2; done;'] - - extraModules: [] - ## Modules, which are mounted into the core nginx image - # - name: opentelemetry - # image: registry.k8s.io/ingress-nginx/opentelemetry:v20220801-g00ee51f09@sha256:482562feba02ad178411efc284f8eb803a185e3ea5588b6111ccbc20b816b427 - # - # The image must contain a `/usr/local/bin/init_module.sh` executable, which - # will be executed as initContainers, to move its config files within the - # mounted volume. - - admissionWebhooks: - annotations: {} - # ignore-check.kube-linter.io/no-read-only-rootfs: "This deployment needs write access to root filesystem". - - ## Additional annotations to the admission webhooks. - ## These annotations will be added to the ValidatingWebhookConfiguration and - ## the Jobs Spec of the admission webhooks. - enabled: true - # -- Additional environment variables to set - extraEnvs: [] - # extraEnvs: - # - name: FOO - # valueFrom: - # secretKeyRef: - # key: FOO - # name: secret-resource - # -- Admission Webhook failure policy to use - failurePolicy: Fail - # timeoutSeconds: 10 - port: 8443 - certificate: "/usr/local/certificates/cert" - key: "/usr/local/certificates/key" - namespaceSelector: {} - objectSelector: {} - # -- Labels to be added to admission webhooks - labels: {} - - # -- Use an existing PSP instead of creating one - existingPsp: "" - networkPolicyEnabled: false - - service: - annotations: {} - # clusterIP: "" - externalIPs: [] - # loadBalancerIP: "" - loadBalancerSourceRanges: [] - servicePort: 443 - type: ClusterIP - - createSecretJob: - resources: {} - # limits: - # cpu: 10m - # memory: 20Mi - # requests: - # cpu: 10m - # memory: 20Mi - - patchWebhookJob: - resources: {} - - patch: - enabled: true - image: - registry: registry.k8s.io - image: ingress-nginx/kube-webhook-certgen - ## for backwards compatibility consider setting the full image url via the repository value below - ## use *either* current default registry/image or repository format or installing chart by providing the values.yaml will fail - ## repository: - tag: v1.3.0 - digest: sha256:549e71a6ca248c5abd51cdb73dbc3083df62cf92ed5e6147c780e30f7e007a47 - pullPolicy: IfNotPresent - # -- Provide a priority class name to the webhook patching job - ## - priorityClassName: "" - podAnnotations: {} - nodeSelector: - kubernetes.io/os: linux - tolerations: [] - # -- Labels to be added to patch job resources - labels: {} - securityContext: - runAsNonRoot: true - runAsUser: 2000 - fsGroup: 2000 - - - metrics: - port: 10254 - # if this port is changed, change healthz-port: in extraArgs: accordingly - enabled: false - - service: - annotations: {} - # prometheus.io/scrape: "true" - # prometheus.io/port: "10254" - - # clusterIP: "" - - # -- List of IP addresses at which the stats-exporter service is available - ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips - ## - externalIPs: [] - - # loadBalancerIP: "" - loadBalancerSourceRanges: [] - servicePort: 10254 - type: ClusterIP - # externalTrafficPolicy: "" - # nodePort: "" - - serviceMonitor: - enabled: false - additionalLabels: {} - ## The label to use to retrieve the job name from. - ## jobLabel: "app.kubernetes.io/name" - namespace: "" - namespaceSelector: {} - ## Default: scrape .Release.Namespace only - ## To scrape all, use the following: - ## namespaceSelector: - ## any: true - scrapeInterval: 30s - # honorLabels: true - targetLabels: [] - relabelings: [] - metricRelabelings: [] - - prometheusRule: - enabled: false - additionalLabels: {} - # namespace: "" - rules: [] - # # These are just examples rules, please adapt them to your needs - # - alert: NGINXConfigFailed - # expr: count(nginx_ingress_controller_config_last_reload_successful == 0) > 0 - # for: 1s - # labels: - # severity: critical - # annotations: - # description: bad ingress config - nginx config test failed - # summary: uninstall the latest ingress changes to allow config reloads to resume - # - alert: NGINXCertificateExpiry - # expr: (avg(nginx_ingress_controller_ssl_expire_time_seconds) by (host) - time()) < 604800 - # for: 1s - # labels: - # severity: critical - # annotations: - # description: ssl certificate(s) will expire in less then a week - # summary: renew expiring certificates to avoid downtime - # - alert: NGINXTooMany500s - # expr: 100 * ( sum( nginx_ingress_controller_requests{status=~"5.+"} ) / sum(nginx_ingress_controller_requests) ) > 5 - # for: 1m - # labels: - # severity: warning - # annotations: - # description: Too many 5XXs - # summary: More than 5% of all requests returned 5XX, this requires your attention - # - alert: NGINXTooMany400s - # expr: 100 * ( sum( nginx_ingress_controller_requests{status=~"4.+"} ) / sum(nginx_ingress_controller_requests) ) > 5 - # for: 1m - # labels: - # severity: warning - # annotations: - # description: Too many 4XXs - # summary: More than 5% of all requests returned 4XX, this requires your attention - - # -- Improve connection draining when ingress controller pod is deleted using a lifecycle hook: - # With this new hook, we increased the default terminationGracePeriodSeconds from 30 seconds - # to 300, allowing the draining of connections up to five minutes. - # If the active connections end before that, the pod will terminate gracefully at that time. - # To effectively take advantage of this feature, the Configmap feature - # worker-shutdown-timeout new value is 240s instead of 10s. - ## - lifecycle: - preStop: - exec: - command: - - /wait-shutdown - - priorityClassName: "" - -# -- Rollback limit -## -revisionHistoryLimit: 10 - - -## Enable RBAC as per https://github.com/kubernetes/ingress-nginx/blob/main/docs/deploy/rbac.md and https://github.com/kubernetes/ingress-nginx/issues/266 -rbac: - create: true - scope: false - -## If true, create & use Pod Security Policy resources -## https://kubernetes.io/docs/concepts/policy/pod-security-policy/ -podSecurityPolicy: - enabled: false - -serviceAccount: - create: true - name: "" - automountServiceAccountToken: true - # -- Annotations for the controller service account - annotations: {} - -# -- Optional array of imagePullSecrets containing private registry credentials -## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ -imagePullSecrets: [] -# - name: secretName - -# -- TCP service key-value pairs -## Ref: https://github.com/kubernetes/ingress-nginx/blob/main/docs/user-guide/exposing-tcp-udp-services.md -## -tcp: {} -# 8080: "default/example-tcp-svc:9000" - -# -- UDP service key-value pairs -## Ref: https://github.com/kubernetes/ingress-nginx/blob/main/docs/user-guide/exposing-tcp-udp-services.md -## -udp: {} -# 53: "kube-system/kube-dns:53" - -# -- Prefix for TCP and UDP ports names in ingress controller service -## Some cloud providers, like Yandex Cloud may have a requirements for a port name regex to support cloud load balancer integration -portNamePrefix: "" - -# -- (string) A base64-encoded Diffie-Hellman parameter. -# This can be generated with: `openssl dhparam 4096 2> /dev/null | base64` -## Ref: https://github.com/kubernetes/ingress-nginx/tree/main/docs/examples/customization/ssl-dh-param -dhParam: - diff --git a/unused/jenkins.pvc.yaml b/unused/jenkins.pvc.yaml deleted file mode 100644 index 083da3b..0000000 --- a/unused/jenkins.pvc.yaml +++ /dev/null @@ -1,34 +0,0 @@ ---- - apiVersion: v1 - kind: PersistentVolume - metadata: - namespace: gitea - name: jenkins-data-nfs - labels: - directory: jenkins - spec: - storageClassName: fast - capacity: - storage: "10Gi" - volumeMode: Filesystem - accessModes: - - ReadWriteOnce - nfs: - path: /jenkins - server: 10.43.239.43 # assigned to nfs-server service. Won't change as long as service is not redeployed ---- -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - namespace: gitea - name: jenkins-data-nfs -spec: - storageClassName: fast - accessModes: - - ReadWriteOnce - resources: - requests: - storage: "10Gi" - selector: - matchLabels: - directory: jenkins \ No newline at end of file diff --git a/unused/jenkins.values.yaml b/unused/jenkins.values.yaml deleted file mode 100644 index d89f190..0000000 --- a/unused/jenkins.values.yaml +++ /dev/null @@ -1,669 +0,0 @@ -# Default values for jenkins. -# This is a YAML-formatted file. -# Declare name/value pairs to be passed into your templates. -# name: value - -## Overrides for generated resource names -# See templates/_helpers.tpl -# nameOverride: -# fullnameOverride: -# namespaceOverride: - -# For FQDN resolving of the controller service. Change this value to match your existing configuration. -# ref: https://github.com/kubernetes/dns/blob/master/docs/specification.md -clusterZone: "cluster.local" - -renderHelmLabels: true - -controller: - # Used for label app.kubernetes.io/component - componentName: "jenkins-controller" - image: "jenkins/jenkins" - # tag: "2.346.1-jdk11" - tagLabel: jdk11 - imagePullPolicy: "Always" - imagePullSecretName: - # Optionally configure lifetime for controller-container - lifecycle: - # postStart: - # exec: - # command: - # - "uname" - # - "-a" - disableRememberMe: false - numExecutors: 0 - # configures the executor mode of the Jenkins node. Possible values are: NORMAL or EXCLUSIVE - executorMode: "NORMAL" - # This is ignored if enableRawHtmlMarkupFormatter is true - markupFormatter: plainText - customJenkinsLabels: [] - # The default configuration uses this secret to configure an admin user - # If you don't need that user or use a different security realm then you can disable it - adminSecret: true - - hostNetworking: false - # When enabling LDAP or another non-Jenkins identity source, the built-in admin account will no longer exist. - # If you disable the non-Jenkins identity store and instead use the Jenkins internal one, - # you should revert controller.adminUser to your preferred admin user: - adminUser: "admin" - # adminPassword: - admin: - existingSecret: "" - userKey: jenkins-admin-user - passwordKey: jenkins-admin-password - # This values should not be changed unless you use your custom image of jenkins or any devired from. If you want to use - # Cloudbees Jenkins Distribution docker, you should set jenkinsHome: "/var/cloudbees-jenkins-distribution" - jenkinsHome: "/var/jenkins_home" - # This values should not be changed unless you use your custom image of jenkins or any devired from. If you want to use - # Cloudbees Jenkins Distribution docker, you should set jenkinsRef: "/usr/share/cloudbees-jenkins-distribution/ref" - jenkinsRef: "/usr/share/jenkins/ref" - # Path to the jenkins war file which is used by jenkins-plugin-cli. - jenkinsWar: "/usr/share/jenkins/jenkins.war" - # Overrides the default arguments passed to the war - # overrideArgs: - # - --httpPort=8080 - resources: - requests: - cpu: "50m" - memory: "256Mi" - limits: - cpu: "2000m" - memory: "4096Mi" - # Overrides the init container default values - # initContainerResources: - # requests: - # cpu: "50m" - # memory: "256Mi" - # limits: - # cpu: "2000m" - # memory: "4096Mi" - # Environment variables that get added to the init container (useful for e.g. http_proxy) - # initContainerEnv: - # - name: http_proxy - # value: "http://192.168.64.1:3128" - # containerEnv: - # - name: http_proxy - # value: "http://192.168.64.1:3128" - # Set min/max heap here if needed with: - # javaOpts: "-Xms512m -Xmx512m" - # jenkinsOpts: "" - # If you are using the ingress definitions provided by this chart via the `controller.ingress` block the configured hostname will be the ingress hostname starting with `https://` or `http://` depending on the `tls` configuration. - # The Protocol can be overwritten by specifying `controller.jenkinsUrlProtocol`. - # jenkinsUrlProtocol: "https" - # If you are not using the provided ingress you can specify `controller.jenkinsUrl` to change the url definition. - # jenkinsUrl: "" - # If you set this prefix and use ingress controller then you might want to set the ingress path below - # jenkinsUriPrefix: "/jenkins" - # Enable pod security context (must be `true` if podSecurityContextOverride, runAsUser or fsGroup are set) - usePodSecurityContext: true - # Note that `runAsUser`, `fsGroup`, and `securityContextCapabilities` are - # being deprecated and replaced by `podSecurityContextOverride`. - # Set runAsUser to 1000 to let Jenkins run as non-root user 'jenkins' which exists in 'jenkins/jenkins' docker image. - # When setting runAsUser to a different value than 0 also set fsGroup to the same value: - runAsUser: 1000 - fsGroup: 1000 - # If you have PodSecurityPolicies that require dropping of capabilities as suggested by CIS K8s benchmark, put them here - securityContextCapabilities: {} - # drop: - # - NET_RAW - # Completely overwrites the contents of the `securityContext`, ignoring the - # values provided for the deprecated fields: `runAsUser`, `fsGroup`, and - # `securityContextCapabilities`. In the case of mounting an ext4 filesystem, - # it might be desirable to use `supplementalGroups` instead of `fsGroup` in - # the `securityContext` block: https://github.com/kubernetes/kubernetes/issues/67014#issuecomment-589915496 - # podSecurityContextOverride: - # runAsUser: 1000 - # runAsNonRoot: true - # supplementalGroups: [1000] - # # capabilities: {} - # Container securityContext - containerSecurityContext: - runAsUser: 1000 - runAsGroup: 1000 - readOnlyRootFilesystem: true - allowPrivilegeEscalation: false - servicePort: 8080 - targetPort: 8080 - # For minikube, set this to NodePort, elsewhere use LoadBalancer - # Use ClusterIP if your setup includes ingress controller - serviceType: ClusterIP - # Use Local to preserve the client source IP and avoids a second hop for LoadBalancer and Nodeport type services, - # but risks potentially imbalanced traffic spreading. - serviceExternalTrafficPolicy: - # Jenkins controller service annotations - serviceAnnotations: {} - # Jenkins controller custom labels - statefulSetLabels: {} - # foo: bar - # bar: foo - # Jenkins controller service labels - serviceLabels: {} - # service.beta.kubernetes.io/aws-load-balancer-backend-protocol: https - # Put labels on Jenkins controller pod - podLabels: {} - # Used to create Ingress record (should used with ServiceType: ClusterIP) - # nodePort: - # -Dcom.sun.management.jmxremote.port=4000 - # -Dcom.sun.management.jmxremote.authenticate=false - # -Dcom.sun.management.jmxremote.ssl=false - # jmxPort: 4000 - # Optionally configure other ports to expose in the controller container - extraPorts: [] - # - name: BuildInfoProxy - # port: 9000 - - # List of plugins to be install during Jenkins controller start - installPlugins: - - kubernetes:3600.v144b_cd192ca_a_ - - workflow-aggregator:581.v0c46fa_697ffd - - git:4.11.3 - - gitea:1.4.3 - - configuration-as-code:1429.v09b_044a_c93de - - # Set to false to download the minimum required version of all dependencies. - installLatestPlugins: true - - # Set to true to download latest dependencies of any plugin that is requested to have the latest version. - installLatestSpecifiedPlugins: false - - # List of plugins to install in addition to those listed in controller.installPlugins - additionalPlugins: [] - - # Enable to initialize the Jenkins controller only once on initial installation. - # Without this, whenever the controller gets restarted (Evicted, etc.) it will fetch plugin updates which has the potential to cause breakage. - # Note that for this to work, `persistence.enabled` needs to be set to `true` - initializeOnce: false - - # Enable to always override the installed plugins with the values of 'controller.installPlugins' on upgrade or redeployment. - # overwritePlugins: true - - # Configures if plugins bundled with `controller.image` should be overwritten with the values of 'controller.installPlugins' on upgrade or redeployment. - overwritePluginsFromImage: true - - # Enable HTML parsing using OWASP Markup Formatter Plugin (antisamy-markup-formatter), useful with ghprb plugin. - # The plugin is not installed by default, please update controller.installPlugins. - enableRawHtmlMarkupFormatter: false - # Used to approve a list of groovy functions in pipelines used the script-security plugin. Can be viewed under /scriptApproval - scriptApproval: [] - # - "method groovy.json.JsonSlurperClassic parseText java.lang.String" - # - "new groovy.json.JsonSlurperClassic" - # List of groovy init scripts to be executed during Jenkins controller start - initScripts: [] - # - | - # print 'adding global pipeline libraries, register properties, bootstrap jobs...' - - # 'name' is a name of an existing secret in same namespace as jenkins, - # 'keyName' is the name of one of the keys inside current secret. - # the 'name' and 'keyName' are concatenated with a '-' in between, so for example: - # an existing secret "secret-credentials" and a key inside it named "github-password" should be used in Jcasc as ${secret-credentials-github-password} - # 'name' and 'keyName' must be lowercase RFC 1123 label must consist of lower case alphanumeric characters or '-', - # and must start and end with an alphanumeric character (e.g. 'my-name', or '123-abc') - additionalExistingSecrets: [] - # - name: secret-name-1 - # keyName: username - # - name: secret-name-1 - # keyName: password - - additionalSecrets: [] - # - name: nameOfSecret - # value: secretText - - # Generate SecretClaim resources in order to create Kubernetes secrets from HashiCorp Vault using kube-vault-controller. - # 'name' is name of the secret that will be created in Kubernetes. The Jenkins fullname is prepended to this value. - # 'path' is the fully qualified path to the secret in Vault - # 'type' is an optional Kubernetes secret type. Defaults to 'Opaque' - # 'renew' is an optional secret renewal time in seconds - secretClaims: [] - # - name: secretName # required - # path: testPath # required - # type: kubernetes.io/tls # optional - # renew: 60 # optional - - # Name of default cloud configuration. - cloudName: "kubernetes" - - # Below is the implementation of Jenkins Configuration as Code. Add a key under configScripts for each configuration area, - # where each corresponds to a plugin or section of the UI. Each key (prior to | character) is just a label, and can be any value. - # Keys are only used to give the section a meaningful name. The only restriction is they may only contain RFC 1123 \ DNS label - # characters: lowercase letters, numbers, and hyphens. The keys become the name of a configuration yaml file on the controller in - # /var/jenkins_home/casc_configs (by default) and will be processed by the Configuration as Code Plugin. The lines after each | - # become the content of the configuration yaml file. The first line after this is a JCasC root element, eg jenkins, credentials, - # etc. Best reference is https:///configuration-as-code/reference. The example below creates a welcome message: - JCasC: - defaultConfig: true - configScripts: {} - # welcome-message: | - # jenkins: - # systemMessage: Welcome to our CI\CD server. This Jenkins is configured and managed 'as code'. - # Ignored if securityRealm is defined in controller.JCasC.configScripts and - securityRealm: |- - local: - allowsSignup: false - enableCaptcha: false - users: - - id: "${chart-admin-username}" - name: "Jenkins Admin" - password: "${chart-admin-password}" - # Ignored if authorizationStrategy is defined in controller.JCasC.configScripts - authorizationStrategy: |- - loggedInUsersCanDoAnything: - allowAnonymousRead: false - # Optionally specify additional init-containers - customInitContainers: [] - # - name: custom-init - # image: "alpine:3.7" - # imagePullPolicy: Always - # command: [ "uname", "-a" ] - - sidecars: - configAutoReload: - # If enabled: true, Jenkins Configuration as Code will be reloaded on-the-fly without a reboot. If false or not-specified, - # jcasc changes will cause a reboot and will only be applied at the subsequent start-up. Auto-reload uses the - # http:///reload-configuration-as-code endpoint to reapply config when changes to the configScripts are detected. - enabled: true - image: kiwigrid/k8s-sidecar:1.15.0 - imagePullPolicy: IfNotPresent - resources: {} - # limits: - # cpu: 100m - # memory: 100Mi - # requests: - # cpu: 50m - # memory: 50Mi - # How many connection-related errors to retry on - reqRetryConnect: 10 - # env: - # - name: REQ_TIMEOUT - # value: "30" - # SSH port value can be set to any unused TCP port. The default, 1044, is a non-standard SSH port that has been chosen at random. - # Is only used to reload jcasc config from the sidecar container running in the Jenkins controller pod. - # This TCP port will not be open in the pod (unless you specifically configure this), so Jenkins will not be - # accessible via SSH from outside of the pod. Note if you use non-root pod privileges (runAsUser & fsGroup), - # this must be > 1024: - sshTcpPort: 1044 - # folder in the pod that should hold the collected dashboards: - folder: "/var/jenkins_home/casc_configs" - # If specified, the sidecar will search for JCasC config-maps inside this namespace. - # Otherwise the namespace in which the sidecar is running will be used. - # It's also possible to specify ALL to search in all namespaces: - # searchNamespace: - containerSecurityContext: - readOnlyRootFilesystem: true - allowPrivilegeEscalation: false - - # Allows you to inject additional/other sidecars - other: [] - ## The example below runs the client for https://smee.io as sidecar container next to Jenkins, - ## that allows to trigger build behind a secure firewall. - ## https://jenkins.io/blog/2019/01/07/webhook-firewalls/#triggering-builds-with-webhooks-behind-a-secure-firewall - ## - ## Note: To use it you should go to https://smee.io/new and update the url to the generete one. - # - name: smee - # image: docker.io/twalter/smee-client:1.0.2 - # args: ["--port", "{{ .Values.controller.servicePort }}", "--path", "/github-webhook/", "--url", "https://smee.io/new"] - # resources: - # limits: - # cpu: 50m - # memory: 128Mi - # requests: - # cpu: 10m - # memory: 32Mi - # Name of the Kubernetes scheduler to use - schedulerName: "" - # Node labels and tolerations for pod assignment - # ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector - # ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature - nodeSelector: {} - - terminationGracePeriodSeconds: - - terminationMessagePath: - terminationMessagePolicy: - - tolerations: [] - - affinity: {} - # Leverage a priorityClass to ensure your pods survive resource shortages - # ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ - priorityClassName: - - podAnnotations: {} - # Add StatefulSet annotations - statefulSetAnnotations: {} - - # StatefulSet updateStrategy - # ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies - updateStrategy: {} - - ingress: - enabled: true - # Override for the default paths that map requests to the backend - paths: [] - # - backend: - # serviceName: >- - # {{ template "jenkins.fullname" . }} - # # Don't use string here, use only integer value! - # servicePort: 8080 - # For Kubernetes v1.19+, use 'networking.k8s.io/v1' - apiVersion: "networking.k8s.io/v1" - labels: {} - annotations: - kubernetes.io/ingress.class: nginx - cert-manager.io/cluster-issuer: cloudflare-letsencrypt-prod - - hostName: jenkins.kluster.moll.re - tls: - - secretName: cloudflare-letsencrypt-issuer-account-key - hosts: - - jenkins.kluster.moll.re - - # often you want to have your controller all locked down and private - # but you still want to get webhooks from your SCM - # A secondary ingress will let you expose different urls - # with a differnt configuration - secondaryingress: - enabled: false - # paths you want forwarded to the backend - # ex /github-webhook - paths: [] - # For Kubernetes v1.14+, use 'networking.k8s.io/v1beta1' - # For Kubernetes v1.19+, use 'networking.k8s.io/v1' - apiVersion: "extensions/v1beta1" - labels: {} - annotations: {} - # kubernetes.io/ingress.class: nginx - # kubernetes.io/tls-acme: "true" - # For Kubernetes >= 1.18 you should specify the ingress-controller via the field ingressClassName - # See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#specifying-the-class-of-an-ingress - # ingressClassName: nginx - # configures the hostname e.g. jenkins-external.example.com - hostName: - tls: - # - secretName: jenkins-external.example.com - # hosts: - # - jenkins-external.example.com - - -agent: - enabled: true - defaultsProviderTemplate: "" - # URL for connecting to the Jenkins contoller - jenkinsUrl: - # connect to the specified host and port, instead of connecting directly to the Jenkins controller - jenkinsTunnel: - kubernetesConnectTimeout: 5 - kubernetesReadTimeout: 15 - maxRequestsPerHostStr: "32" - namespace: - image: "jenkins/inbound-agent" - tag: "4.11.2-4" - workingDir: "/home/jenkins/agent" - nodeUsageMode: "NORMAL" - customJenkinsLabels: [] - # name of the secret to be used for image pulling - imagePullSecretName: - componentName: "jenkins-agent" - websocket: false - privileged: false - runAsUser: - runAsGroup: - resources: - requests: - cpu: "512m" - memory: "512Mi" - limits: - cpu: "512m" - memory: "512Mi" - # You may want to change this to true while testing a new image - alwaysPullImage: false - # Controls how agent pods are retained after the Jenkins build completes - # Possible values: Always, Never, OnFailure - podRetention: "Never" - # Disable if you do not want the Yaml the agent pod template to show up - # in the job Console Output. This can be helpful for either security reasons - # or simply to clean up the output to make it easier to read. - showRawYaml: true - # You can define the volumes that you want to mount for this container - # Allowed types are: ConfigMap, EmptyDir, HostPath, Nfs, PVC, Secret - # Configure the attributes as they appear in the corresponding Java class for that type - # https://github.com/jenkinsci/kubernetes-plugin/tree/master/src/main/java/org/csanchez/jenkins/plugins/kubernetes/volumes - volumes: [] - # - type: ConfigMap - # configMapName: myconfigmap - # mountPath: /var/myapp/myconfigmap - # - type: EmptyDir - # mountPath: /var/myapp/myemptydir - # memory: false - # - type: HostPath - # hostPath: /var/lib/containers - # mountPath: /var/myapp/myhostpath - # - type: Nfs - # mountPath: /var/myapp/mynfs - # readOnly: false - # serverAddress: "192.0.2.0" - # serverPath: /var/lib/containers - # - type: PVC - # claimName: mypvc - # mountPath: /var/myapp/mypvc - # readOnly: false - # - type: Secret - # defaultMode: "600" - # mountPath: /var/myapp/mysecret - # secretName: mysecret - # Pod-wide environment, these vars are visible to any container in the agent pod - - # You can define the workspaceVolume that you want to mount for this container - # Allowed types are: DynamicPVC, EmptyDir, HostPath, Nfs, PVC - # Configure the attributes as they appear in the corresponding Java class for that type - # https://github.com/jenkinsci/kubernetes-plugin/tree/master/src/main/java/org/csanchez/jenkins/plugins/kubernetes/volumes/workspace - workspaceVolume: {} - ## DynamicPVC example - # type: DynamicPVC - # configMapName: myconfigmap - ## EmptyDir example - # type: EmptyDir - # memory: false - ## HostPath example - # type: HostPath - # hostPath: /var/lib/containers - ## NFS example - # type: Nfs - # readOnly: false - # serverAddress: "192.0.2.0" - # serverPath: /var/lib/containers - ## PVC example - # type: PVC - # claimName: mypvc - # readOnly: false - # - # Pod-wide environment, these vars are visible to any container in the agent pod - envVars: [] - # - name: PATH - # value: /usr/local/bin - nodeSelector: {} - # Key Value selectors. Ex: - # jenkins-agent: v1 - - # Executed command when side container gets started - command: - args: "${computer.jnlpmac} ${computer.name}" - # Side container name - sideContainerName: "jnlp" - # Doesn't allocate pseudo TTY by default - TTYEnabled: false - # Max number of spawned agent - containerCap: 10 - # Pod name - podName: "default" - # Allows the Pod to remain active for reuse until the configured number of - # minutes has passed since the last step was executed on it. - idleMinutes: 0 - # Raw yaml template for the Pod. For example this allows usage of toleration for agent pods. - # https://github.com/jenkinsci/kubernetes-plugin#using-yaml-to-define-pod-templates - # https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ - yamlTemplate: "" - # yamlTemplate: |- - # apiVersion: v1 - # kind: Pod - # spec: - # tolerations: - # - key: "key" - # operator: "Equal" - # value: "value" - # Defines how the raw yaml field gets merged with yaml definitions from inherited pod templates: merge or override - yamlMergeStrategy: "override" - # Timeout in seconds for an agent to be online - connectTimeout: 100 - # Annotations to apply to the pod. - annotations: {} - - # Disable the default Jenkins Agent configuration. - # Useful when configuring agents only with the podTemplates value, since the default podTemplate populated by values mentioned above will be excluded in the rendered template. - disableDefaultAgent: false - - # Below is the implementation of custom pod templates for the default configured kubernetes cloud. - # Add a key under podTemplates for each pod template. Each key (prior to | character) is just a label, and can be any value. - # Keys are only used to give the pod template a meaningful name. The only restriction is they may only contain RFC 1123 \ DNS label - # characters: lowercase letters, numbers, and hyphens. Each pod template can contain multiple containers. - # For this pod templates configuration to be loaded the following values must be set: - # controller.JCasC.defaultConfig: true - # Best reference is https:///configuration-as-code/reference#Cloud-kubernetes. The example below creates a python pod template. - podTemplates: {} - # python: | - # - name: python - # label: jenkins-python - # serviceAccount: jenkins - # containers: - # - name: python - # image: python:3 - # command: "/bin/sh -c" - # args: "cat" - # ttyEnabled: true - # privileged: true - # resourceRequestCpu: "400m" - # resourceRequestMemory: "512Mi" - # resourceLimitCpu: "1" - # resourceLimitMemory: "1024Mi" - -# Here you can add additional agents -# They inherit all values from `agent` so you only need to specify values which differ -additionalAgents: {} -# maven: -# podName: maven -# customJenkinsLabels: maven -# # An example of overriding the jnlp container -# # sideContainerName: jnlp -# image: jenkins/jnlp-agent-maven -# tag: latest -# python: -# podName: python -# customJenkinsLabels: python -# sideContainerName: python -# image: python -# tag: "3" -# command: "/bin/sh -c" -# args: "cat" -# TTYEnabled: true - -persistence: - enabled: true - ## A manually managed Persistent Volume and Claim - ## Requires persistence.enabled: true - ## If defined, PVC must be created manually before volume will be bound - existingClaim: jenkins-data-nfs - -## Install Default RBAC roles and bindings -rbac: - create: true - readSecrets: false - -serviceAccount: - create: true - # The name of the service account is autogenerated by default - name: - annotations: {} - imagePullSecretName: diff --git a/unused/mathieu_ghost/mathieu.pvc.yaml b/unused/mathieu_ghost/mathieu.pvc.yaml deleted file mode 100644 index e72dc29..0000000 --- a/unused/mathieu_ghost/mathieu.pvc.yaml +++ /dev/null @@ -1,34 +0,0 @@ ---- -apiVersion: v1 -kind: PersistentVolume -metadata: - namespace: mathieu - name: mathieu-nfs - labels: - directory: mathieu -spec: - storageClassName: fast - capacity: - storage: "10Gi" - volumeMode: Filesystem - accessModes: - - ReadWriteOnce - nfs: - path: /mathieu - server: 10.43.239.43 # assigned to nfs-server service. Won't change as long as service is not redeployed ---- -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - namespace: mathieu - name: mathieu-nfs -spec: - storageClassName: fast - accessModes: - - ReadWriteOnce - resources: - requests: - storage: "10Gi" - selector: - matchLabels: - directory: mathieu \ No newline at end of file diff --git a/unused/mathieu_ghost/mathieu.values.yaml b/unused/mathieu_ghost/mathieu.values.yaml deleted file mode 100644 index a1283ba..0000000 --- a/unused/mathieu_ghost/mathieu.values.yaml +++ /dev/null @@ -1,72 +0,0 @@ -# -# IMPORTANT NOTE -# -# This chart inherits from our common library chart. You can check the default values/options here: -# https://github.com/k8s-at-home/library-charts/tree/main/charts/stable/common/values.yaml -# - -image: - # -- image repository - repository: ghost - # -- image tag - # @default -- chart.appVersion - tag: - # -- image pull policy - pullPolicy: IfNotPresent - -# See https://ghost.org/docs/config/#running-ghost-with-config-env-variables -env: - url: "https://cinema.kluster.moll.re" - database__client: sqlite3 - database__connection__filename: "content/data/ghost-data.db" - database__useNullAsDefault: true, - database__debug: false - NODE_ENV: production - -# -- Configures service settings for the chart. -# @default -- See values.yaml -service: - main: - ports: - http: - port: 2368 - - - -ingress: - # -- Enable and configure ingress settings for the chart under this key. - # @default -- See values.yaml - main: - enabled: true - annotations: - kubernetes.io/ingress.class: nginx - cert-manager.io/cluster-issuer: cloudflare-letsencrypt-prod - hosts: - - host: cinema.kluster.moll.re - paths: - - path: / - pathType: Prefix - tls: - - hosts: - - cinema.kluster.moll.re - secretName: cloudflare-letsencrypt-issuer-account-key - -# -- Configure persistence settings for the chart under this key. -# @default -- See values.yaml -persistence: - content: - enabled: true - existingClaim: mathieu-nfs - -mariadb: - enabled: false - architecture: standalone - auth: - database: ghost - username: ghost - password: ghost - rootPassword: ghost-rootpass - primary: - persistance: - enabled: false - diff --git a/unused/mc-forwarding.deployment.yaml b/unused/mc-forwarding.deployment.yaml deleted file mode 100644 index 28edb40..0000000 --- a/unused/mc-forwarding.deployment.yaml +++ /dev/null @@ -1,52 +0,0 @@ -apiVersion: v1 -kind: Namespace -metadata: - name: minecraft - labels: - app: minecraft - ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: forwarding - namespace: minecraft - labels: - app: forwarding -spec: - replicas: 1 - selector: - matchLabels: - app: forwarding - template: - metadata: - labels: - app: forwarding - spec: - containers: - - name: forwarding - image: simonrupf/socat - tty: true - ports: - - containerPort: 25565 - args: ["TCP4-LISTEN:25565,fork", "TCP6:mc.game.moll.re:25565"] - hostNetwork: true - nodeSelector: - hdd: enabled - # ensures we are running on 192.168.1.122, ie pi node 0 ---- -apiVersion: v1 -kind: Service -metadata: - name: forwarding - namespace: minecraft - -spec: - type: NodePort - ipFamilyPolicy: PreferDualStack - ports: - - name: mc - port: 25565 - selector: - app: forwarding - diff --git a/unused/nginx.values.yaml b/unused/nginx.values.yaml deleted file mode 100644 index 73b9326..0000000 --- a/unused/nginx.values.yaml +++ /dev/null @@ -1,351 +0,0 @@ -controller: - ## The name of the Ingress Controller daemonset or deployment. - ## Autogenerated if not set or set to "". - # name: nginx-ingress - - ## The kind of the Ingress Controller installation - deployment or daemonset. - kind: deployment - - ## Deploys the Ingress Controller for NGINX Plus. - nginxplus: false - - # Timeout in milliseconds which the Ingress Controller will wait for a successful NGINX reload after a change or at the initial start. - nginxReloadTimeout: 60000 - - ## Support for App Protect - appprotect: - ## Enable the App Protect module in the Ingress Controller. - enable: false - ## Sets log level for App Protect. Allowed values: fatal, error, warn, info, debug, trace - # logLevel: fatal - - ## Support for App Protect Dos - appprotectdos: - ## Enable the App Protect Dos module in the Ingress Controller. - enable: false - ## Enable debugging for App Protect Dos. - debug: false - ## Max number of nginx processes to support. - maxWorkers: 0 - ## Max number of ADMD instances. - maxDaemons: 0 - ## RAM memory size to consume in MB. - memory: 0 - - ## Enables the Ingress Controller pods to use the host's network namespace. - hostNetwork: false - - ## Enables debugging for NGINX. Uses the nginx-debug binary. Requires error-log-level: debug in the ConfigMap via `controller.config.entries`. - nginxDebug: false - - ## The log level of the Ingress Controller. - logLevel: 1 - - ## A list of custom ports to expose on the NGINX ingress controller pod. Follows the conventional Kubernetes yaml syntax for container ports. - customPorts: [] - - image: - ## The image repository of the Ingress Controller. - repository: nginx/nginx-ingress - - ## The tag of the Ingress Controller image. - tag: "2.2.0" - - ## The pull policy for the Ingress Controller image. - pullPolicy: IfNotPresent - - config: - ## The name of the ConfigMap used by the Ingress Controller. - ## Autogenerated if not set or set to "". - # name: nginx-config - - ## The annotations of the Ingress Controller configmap. - annotations: {} - - ## The entries of the ConfigMap for customizing NGINX configuration. - entries: {} - - ## It is recommended to use your own TLS certificates and keys - defaultTLS: - ## The base64-encoded TLS certificate for the default HTTPS server. If not specified, a pre-generated self-signed certificate is used. - ## Note: It is recommended that you specify your own certificate. - cert: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUN2akNDQWFZQ0NRREFPRjl0THNhWFhEQU5CZ2txaGtpRzl3MEJBUXNGQURBaE1SOHdIUVlEVlFRRERCWk8KUjBsT1dFbHVaM0psYzNORGIyNTBjbTlzYkdWeU1CNFhEVEU0TURreE1qRTRNRE16TlZvWERUSXpNRGt4TVRFNApNRE16TlZvd0lURWZNQjBHQTFVRUF3d1dUa2RKVGxoSmJtZHlaWE56UTI5dWRISnZiR3hsY2pDQ0FTSXdEUVlKCktvWklodmNOQVFFQkJRQURnZ0VQQURDQ0FRb0NnZ0VCQUwvN2hIUEtFWGRMdjNyaUM3QlBrMTNpWkt5eTlyQ08KR2xZUXYyK2EzUDF0azIrS3YwVGF5aGRCbDRrcnNUcTZzZm8vWUk1Y2Vhbkw4WGM3U1pyQkVRYm9EN2REbWs1Qgo4eDZLS2xHWU5IWlg0Rm5UZ0VPaStlM2ptTFFxRlBSY1kzVnNPazFFeUZBL0JnWlJVbkNHZUtGeERSN0tQdGhyCmtqSXVuektURXUyaDU4Tlp0S21ScUJHdDEwcTNRYzhZT3ExM2FnbmovUWRjc0ZYYTJnMjB1K1lYZDdoZ3krZksKWk4vVUkxQUQ0YzZyM1lma1ZWUmVHd1lxQVp1WXN2V0RKbW1GNWRwdEMzN011cDBPRUxVTExSakZJOTZXNXIwSAo1TmdPc25NWFJNV1hYVlpiNWRxT3R0SmRtS3FhZ25TZ1JQQVpQN2MwQjFQU2FqYzZjNGZRVXpNQ0F3RUFBVEFOCkJna3Foa2lHOXcwQkFRc0ZBQU9DQVFFQWpLb2tRdGRPcEsrTzhibWVPc3lySmdJSXJycVFVY2ZOUitjb0hZVUoKdGhrYnhITFMzR3VBTWI5dm15VExPY2xxeC9aYzJPblEwMEJCLzlTb0swcitFZ1U2UlVrRWtWcitTTFA3NTdUWgozZWI4dmdPdEduMS9ienM3bzNBaS9kclkrcUI5Q2k1S3lPc3FHTG1US2xFaUtOYkcyR1ZyTWxjS0ZYQU80YTY3Cklnc1hzYktNbTQwV1U3cG9mcGltU1ZmaXFSdkV5YmN3N0NYODF6cFErUyt1eHRYK2VBZ3V0NHh3VlI5d2IyVXYKelhuZk9HbWhWNThDd1dIQnNKa0kxNXhaa2VUWXdSN0diaEFMSkZUUkk3dkhvQXprTWIzbjAxQjQyWjNrN3RXNQpJUDFmTlpIOFUvOWxiUHNoT21FRFZkdjF5ZytVRVJxbStGSis2R0oxeFJGcGZnPT0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo= - - ## The base64-encoded TLS key for the default HTTPS server. Note: If not specified, a pre-generated key is used. - ## Note: It is recommended that you specify your own key. - key: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFcEFJQkFBS0NBUUVBdi91RWM4b1JkMHUvZXVJTHNFK1RYZUprckxMMnNJNGFWaEMvYjVyYy9XMlRiNHEvClJOcktGMEdYaVN1eE9ycXgrajlnamx4NXFjdnhkenRKbXNFUkJ1Z1B0ME9hVGtIekhvb3FVWmcwZGxmZ1dkT0EKUTZMNTdlT1l0Q29VOUZ4amRXdzZUVVRJVUQ4R0JsRlNjSVo0b1hFTkhzbysyR3VTTWk2Zk1wTVM3YUhudzFtMApxWkdvRWEzWFNyZEJ6eGc2clhkcUNlUDlCMXl3VmRyYURiUzc1aGQzdUdETDU4cGszOVFqVUFQaHpxdmRoK1JWClZGNGJCaW9CbTVpeTlZTW1hWVhsMm0wTGZzeTZuUTRRdFFzdEdNVWozcGJtdlFmazJBNnljeGRFeFpkZFZsdmwKMm82MjBsMllxcHFDZEtCRThCay90elFIVTlKcU56cHpoOUJUTXdJREFRQUJBb0lCQVFDZklHbXowOHhRVmorNwpLZnZJUXQwQ0YzR2MxNld6eDhVNml4MHg4Mm15d1kxUUNlL3BzWE9LZlRxT1h1SENyUlp5TnUvZ2IvUUQ4bUFOCmxOMjRZTWl0TWRJODg5TEZoTkp3QU5OODJDeTczckM5bzVvUDlkazAvYzRIbjAzSkVYNzZ5QjgzQm9rR1FvYksKMjhMNk0rdHUzUmFqNjd6Vmc2d2szaEhrU0pXSzBwV1YrSjdrUkRWYmhDYUZhNk5nMUZNRWxhTlozVDhhUUtyQgpDUDNDeEFTdjYxWTk5TEI4KzNXWVFIK3NYaTVGM01pYVNBZ1BkQUk3WEh1dXFET1lvMU5PL0JoSGt1aVg2QnRtCnorNTZud2pZMy8yUytSRmNBc3JMTnIwMDJZZi9oY0IraVlDNzVWYmcydVd6WTY3TWdOTGQ5VW9RU3BDRkYrVm4KM0cyUnhybnhBb0dCQU40U3M0ZVlPU2huMVpQQjdhTUZsY0k2RHR2S2ErTGZTTXFyY2pOZjJlSEpZNnhubmxKdgpGenpGL2RiVWVTbWxSekR0WkdlcXZXaHFISy9iTjIyeWJhOU1WMDlRQ0JFTk5jNmtWajJTVHpUWkJVbEx4QzYrCk93Z0wyZHhKendWelU0VC84ajdHalRUN05BZVpFS2FvRHFyRG5BYWkyaW5oZU1JVWZHRXFGKzJyQW9HQkFOMVAKK0tZL0lsS3RWRzRKSklQNzBjUis3RmpyeXJpY05iWCtQVzUvOXFHaWxnY2grZ3l4b25BWlBpd2NpeDN3QVpGdwpaZC96ZFB2aTBkWEppc1BSZjRMazg5b2pCUmpiRmRmc2l5UmJYbyt3TFU4NUhRU2NGMnN5aUFPaTVBRHdVU0FkCm45YWFweUNweEFkREtERHdObit3ZFhtaTZ0OHRpSFRkK3RoVDhkaVpBb0dCQUt6Wis1bG9OOTBtYlF4VVh5YUwKMjFSUm9tMGJjcndsTmVCaWNFSmlzaEhYa2xpSVVxZ3hSZklNM2hhUVRUcklKZENFaHFsV01aV0xPb2I2NTNyZgo3aFlMSXM1ZUtka3o0aFRVdnpldm9TMHVXcm9CV2xOVHlGanIrSWhKZnZUc0hpOGdsU3FkbXgySkJhZUFVWUNXCndNdlQ4NmNLclNyNkQrZG8wS05FZzFsL0FvR0FlMkFVdHVFbFNqLzBmRzgrV3hHc1RFV1JqclRNUzRSUjhRWXQKeXdjdFA4aDZxTGxKUTRCWGxQU05rMXZLTmtOUkxIb2pZT2pCQTViYjhibXNVU1BlV09NNENoaFJ4QnlHbmR2eAphYkJDRkFwY0IvbEg4d1R0alVZYlN5T294ZGt5OEp0ek90ajJhS0FiZHd6NlArWDZDODhjZmxYVFo5MWpYL3RMCjF3TmRKS2tDZ1lCbyt0UzB5TzJ2SWFmK2UwSkN5TGhzVDQ5cTN3Zis2QWVqWGx2WDJ1VnRYejN5QTZnbXo5aCsKcDNlK2JMRUxwb3B0WFhNdUFRR0xhUkcrYlNNcjR5dERYbE5ZSndUeThXczNKY3dlSTdqZVp2b0ZpbmNvVlVIMwphdmxoTUVCRGYxSjltSDB5cDBwWUNaS2ROdHNvZEZtQktzVEtQMjJhTmtsVVhCS3gyZzR6cFE9PQotLS0tLUVORCBSU0EgUFJJVkFURSBLRVktLS0tLQo= - - ## The secret with a TLS certificate and key for the default HTTPS server. - ## The value must follow the following format: `/`. - ## Used as an alternative to specifying a certificate and key using `controller.defaultTLS.cert` and `controller.defaultTLS.key` parameters. - ## Format: / - secret: - - wildcardTLS: - ## The base64-encoded TLS certificate for every Ingress/VirtualServer host that has TLS enabled but no secret specified. - ## If the parameter is not set, for such Ingress/VirtualServer hosts NGINX will break any attempt to establish a TLS connection. - cert: "" - - ## The base64-encoded TLS key for every Ingress/VirtualServer host that has TLS enabled but no secret specified. - ## If the parameter is not set, for such Ingress/VirtualServer hosts NGINX will break any attempt to establish a TLS connection. - key: "" - - ## The secret with a TLS certificate and key for every Ingress/VirtualServer host that has TLS enabled but no secret specified. - ## The value must follow the following format: `/`. - ## Used as an alternative to specifying a certificate and key using `controller.wildcardTLS.cert` and `controller.wildcardTLS.key` parameters. - ## Format: / - secret: - - ## The node selector for pod assignment for the Ingress Controller pods. - nodeSelector: {} - - ## The termination grace period of the Ingress Controller pod. - terminationGracePeriodSeconds: 30 - - ## The resources of the Ingress Controller pods. - resources: {} - # limits: - # cpu: 100m - # memory: 64Mi - # requests: - # cpu: 100m - # memory: 64Mi - - ## The tolerations of the Ingress Controller pods. - tolerations: [] - - ## The affinity of the Ingress Controller pods. - affinity: {} - - ## The volumes of the Ingress Controller pods. - volumes: [] - # - name: extra-conf - # configMap: - # name: extra-conf - - ## The volumeMounts of the Ingress Controller pods. - volumeMounts: [] - # - name: extra-conf - # mountPath: /etc/nginx/conf.d/extra.conf - # subPath: extra.conf - - ## InitContainers for the Ingress Controller pods. - initContainers: [] - # - name: init-container - # image: busybox:1.34 - # command: ['sh', '-c', 'echo this is initial setup!'] - - ## Extra containers for the Ingress Controller pods. - extraContainers: [] - # - name: container - # image: busybox:1.34 - # command: ['sh', '-c', 'echo this is a sidecar!'] - - ## The number of replicas of the Ingress Controller deployment. - replicaCount: 1 - - ## A class of the Ingress Controller. - - ## IngressClass resource with the name equal to the class must be deployed. Otherwise, - ## the Ingress Controller will fail to start. - ## The Ingress Controller only processes resources that belong to its class - i.e. have the "ingressClassName" field resource equal to the class. - - ## The Ingress Controller processes all the resources that do not have the "ingressClassName" field for all versions of kubernetes. - ingressClass: nginx - - ## New Ingresses without an ingressClassName field specified will be assigned the class specified in `controller.ingressClass`. - setAsDefaultIngress: false - - ## Namespace to watch for Ingress resources. By default the Ingress Controller watches all namespaces. - watchNamespace: "" - - ## Enable the custom resources. - enableCustomResources: true - - ## Enable preview policies. This parameter is deprecated. To enable OIDC Policies please use controller.enableOIDC instead. - enablePreviewPolicies: false - - ## Enable OIDC policies. - enableOIDC: false - - ## Enable TLS Passthrough on port 443. Requires controller.enableCustomResources. - enableTLSPassthrough: false - - ## Enable cert manager for Virtual Server resources. Requires controller.enableCustomResources. - enableCertManager: false - - globalConfiguration: - ## Creates the GlobalConfiguration custom resource. Requires controller.enableCustomResources. - create: false - - ## The spec of the GlobalConfiguration for defining the global configuration parameters of the Ingress Controller. - spec: {} - # listeners: - # - name: dns-udp - # port: 5353 - # protocol: UDP - # - name: dns-tcp - # port: 5353 - # protocol: TCP - - ## Enable custom NGINX configuration snippets in Ingress, VirtualServer, VirtualServerRoute and TransportServer resources. - enableSnippets: false - - ## Add a location based on the value of health-status-uri to the default server. The location responds with the 200 status code for any request. - ## Useful for external health-checking of the Ingress Controller. - healthStatus: false - - ## Sets the URI of health status location in the default server. Requires controller.healthStatus. - healthStatusURI: "/nginx-health" - - nginxStatus: - ## Enable the NGINX stub_status, or the NGINX Plus API. - enable: true - - ## Set the port where the NGINX stub_status or the NGINX Plus API is exposed. - port: 8080 - - ## Add IPv4 IP/CIDR blocks to the allow list for NGINX stub_status or the NGINX Plus API. Separate multiple IP/CIDR by commas. - allowCidrs: "127.0.0.1" - - service: - ## Creates a service to expose the Ingress Controller pods. - create: true - - ## The type of service to create for the Ingress Controller. - type: LoadBalancer - - ## The externalTrafficPolicy of the service. The value Local preserves the client source IP. - externalTrafficPolicy: Local - - ## The annotations of the Ingress Controller service. - annotations: {} - - ## The extra labels of the service. - extraLabels: {} - - ## The static IP address for the load balancer. Requires controller.service.type set to LoadBalancer. The cloud provider must support this feature. - loadBalancerIP: "" - - ## The list of external IPs for the Ingress Controller service. - externalIPs: [] - - ## The IP ranges (CIDR) that are allowed to access the load balancer. Requires controller.service.type set to LoadBalancer. The cloud provider must support this feature. - loadBalancerSourceRanges: [] - - ## The name of the service - ## Autogenerated if not set or set to "". - # name: nginx-ingress - - httpPort: - ## Enables the HTTP port for the Ingress Controller service. - enable: true - - ## The HTTP port of the Ingress Controller service. - port: 80 - - ## The custom NodePort for the HTTP port. Requires controller.service.type set to NodePort. - nodePort: "" - - ## The HTTP port on the POD where the Ingress Controller service is running. - targetPort: 80 - - httpsPort: - ## Enables the HTTPS port for the Ingress Controller service. - enable: true - - ## The HTTPS port of the Ingress Controller service. - port: 443 - - ## The custom NodePort for the HTTPS port. Requires controller.service.type set to NodePort. - nodePort: "" - - ## The HTTPS port on the POD where the Ingress Controller service is running. - targetPort: 443 - - ## A list of custom ports to expose through the Ingress Controller service. Follows the conventional Kubernetes yaml syntax for service ports. - customPorts: [] - - serviceAccount: - ## The name of the service account of the Ingress Controller pods. Used for RBAC. - ## Autogenerated if not set or set to "". - # name: nginx-ingress - - ## The name of the secret containing docker registry credentials. - ## Secret must exist in the same namespace as the helm release. - imagePullSecretName: "" - - reportIngressStatus: - ## Updates the address field in the status of Ingress resources with an external address of the Ingress Controller. - ## You must also specify the source of the external address either through an external service via controller.reportIngressStatus.externalService, - ## controller.reportIngressStatus.ingressLink or the external-status-address entry in the ConfigMap via controller.config.entries. - ## Note: controller.config.entries.external-status-address takes precedence over the others. - enable: true - - ## Specifies the name of the service with the type LoadBalancer through which the Ingress Controller is exposed externally. - ## The external address of the service is used when reporting the status of Ingress, VirtualServer and VirtualServerRoute resources. - ## controller.reportIngressStatus.enable must be set to true. - ## The default is autogenerated and matches the created service (see controller.service.create). - # externalService: nginx-ingress - - ## Specifies the name of the IngressLink resource, which exposes the Ingress Controller pods via a BIG-IP system. - ## The IP of the BIG-IP system is used when reporting the status of Ingress, VirtualServer and VirtualServerRoute resources. - ## controller.reportIngressStatus.enable must be set to true. - ingressLink: "" - - ## Enable Leader election to avoid multiple replicas of the controller reporting the status of Ingress resources. controller.reportIngressStatus.enable must be set to true. - enableLeaderElection: true - - ## Specifies the name of the ConfigMap, within the same namespace as the controller, used as the lock for leader election. controller.reportIngressStatus.enableLeaderElection must be set to true. - ## Autogenerated if not set or set to "". - # leaderElectionLockName: "nginx-ingress-leader-election" - - ## The annotations of the leader election configmap. - annotations: {} - - pod: - ## The annotations of the Ingress Controller pod. - annotations: {} - - ## The additional extra labels of the Ingress Controller pod. - extraLabels: {} - - ## The PriorityClass of the ingress controller pods. - priorityClassName: - - readyStatus: - ## Enables readiness endpoint "/nginx-ready". The endpoint returns a success code when NGINX has loaded all the config after startup. - enable: true - - ## Set the port where the readiness endpoint is exposed. - port: 8081 - - ## Enable collection of latency metrics for upstreams. Requires prometheus.create. - enableLatencyMetrics: false - -rbac: - ## Configures RBAC. - create: true - -prometheus: - ## Expose NGINX or NGINX Plus metrics in the Prometheus format. - create: true - - ## Configures the port to scrape the metrics. - port: 9113 - - ## Specifies the namespace/name of a Kubernetes TLS Secret which will be used to protect the Prometheus endpoint. - secret: "" - - ## Configures the HTTP scheme used. - scheme: http - -nginxServiceMesh: - ## Enables integration with NGINX Service Mesh. - ## Requires controller.nginxplus - enable: false - - ## Enables NGINX Service Mesh workload to route egress traffic through the Ingress Controller. - ## Requires nginxServiceMesh.enable - enableEgress: false - diff --git a/unused/nocodb.deployment.yaml b/unused/nocodb.deployment.yaml deleted file mode 100644 index 5a33f39..0000000 --- a/unused/nocodb.deployment.yaml +++ /dev/null @@ -1,75 +0,0 @@ -apiVersion: v1 -kind: Namespace -metadata: - name: nocodb - labels: - app: nocodb - - - ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: nocodb - namespace: nocodb - labels: - app: nocodb -spec: - replicas: 1 - selector: - matchLabels: - app: nocodb - template: - metadata: - labels: - app: nocodb - spec: - containers: - - name: nocodb - image: nocodb/nocodb - tty: true - ports: - - containerPort: 8080 - ---- -apiVersion: v1 -kind: Service -metadata: - name: nocodb - namespace: nocodb - -spec: - type: ClusterIP - ports: - - name: http - port: 8080 - selector: - app: nocodb - ---- - -kind: Ingress -apiVersion: networking.k8s.io/v1 -metadata: - namespace: nocodb - name: nocodb-ingress - annotations: - kubernetes.io/ingress.class: nginx - cert-manager.io/cluster-issuer: cloudflare-letsencrypt-prod -spec: - tls: - - hosts: - - nocodb.kluster.moll.re - secretName: cloudflare-letsencrypt-issuer-account-key - rules: - - host: nocodb.kluster.moll.re - http: - paths: - - backend: - service: - name: nocodb - port: - number: 8080 - path: / - pathType: Prefix \ No newline at end of file diff --git a/unused/pihole.ingress.yaml b/unused/pihole.ingress.yaml deleted file mode 100644 index 4badd26..0000000 --- a/unused/pihole.ingress.yaml +++ /dev/null @@ -1,26 +0,0 @@ -kind: Ingress -apiVersion: networking.k8s.io/v1 -metadata: - namespace: pihole - name: pihole-ingress - annotations: - kubernetes.io/ingress.class: nginx - cert-manager.io/cluster-issuer: cloudflare-letsencrypt-prod - - -spec: - tls: - - hosts: - - pihole.kluster.moll.re - secretName: cloudflare-letsencrypt-issuer-account-key - rules: - - host: pihole.kluster.moll.re - http: - paths: - - backend: - service: - name: pihole-web - port: - number: 80 - path: / - pathType: Prefix diff --git a/unused/pihole.persistentvolume.yml b/unused/pihole.persistentvolume.yml deleted file mode 100644 index 23e9d19..0000000 --- a/unused/pihole.persistentvolume.yml +++ /dev/null @@ -1,37 +0,0 @@ ---- -apiVersion: v1 -kind: PersistentVolume -metadata: - namespace: pihole - name: pihole-nfs - labels: - directory: pihole -spec: - storageClassName: slow - capacity: - storage: "500Mi" - volumeMode: Filesystem - accessModes: - - ReadWriteOnce - nfs: - path: /pihole - server: 10.43.239.43 # assigned to nfs-server service. Won't change as long as service is not redeployed ---- -## pihole.persistentvolumeclaim.yml ---- -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - namespace: pihole - name: pihole-nfs -spec: - storageClassName: slow - accessModes: - - ReadWriteOnce - resources: - requests: - storage: "500Mi" - selector: - matchLabels: - directory: pihole ---- \ No newline at end of file diff --git a/unused/pihole.values.yml b/unused/pihole.values.yml deleted file mode 100644 index 9a4f098..0000000 --- a/unused/pihole.values.yml +++ /dev/null @@ -1,397 +0,0 @@ -# Default values for pihole. -# This is a YAML-formatted file. -# Declare variables to be passed into your templates. - -# -- The number of replicas -replicaCount: 1 - -# -- The `spec.strategyTpye` for updates -strategyType: RollingUpdate - -# -- The maximum number of Pods that can be created over the desired number of `ReplicaSet` during updating. -maxSurge: 1 - -# -- The maximum number of Pods that can be unavailable during updating -maxUnavailable: 1 - -image: - # -- the repostory to pull the image from - repository: "pihole/pihole" - # -- the docker tag, if left empty it will get it from the chart's appVersion - tag: "" - # -- the pull policy - pullPolicy: IfNotPresent - -dualStack: - # -- set this to true to enable creation of DualStack services or creation of separate IPv6 services if `serviceDns.type` is set to `"LoadBalancer"` - enabled: false - -dnsHostPort: - # -- set this to true to enable dnsHostPort - enabled: false - # -- default port for this pod - port: 53 - -# -- Configuration for the DNS service on port 53 -serviceDns: - - # -- deploys a mixed (TCP + UDP) Service instead of separate ones - mixedService: false - - # -- `spec.type` for the DNS Service - type: LoadBalancer - - # -- The port of the DNS service - port: 53 - - # -- Optional node port for the DNS service - nodePort: "" - - # -- `spec.externalTrafficPolicy` for the DHCP Service - externalTrafficPolicy: Local - - # -- A fixed `spec.loadBalancerIP` for the DNS Service - loadBalancerIP: 192.168.1.3 - # -- A fixed `spec.loadBalancerIP` for the IPv6 DNS Service - loadBalancerIPv6: "" - - # -- Annotations for the DNS service - annotations: - # metallb.universe.tf/address-pool: network-services - metallb.universe.tf/allow-shared-ip: pihole-svc - -# -- Configuration for the DHCP service on port 67 -serviceDhcp: - - # -- Generate a Service resource for DHCP traffic - enabled: false - - # -- `spec.type` for the DHCP Service - type: NodePort - - # -- `spec.externalTrafficPolicy` for the DHCP Service - externalTrafficPolicy: Local - - # -- A fixed `spec.loadBalancerIP` for the DHCP Service - loadBalancerIP: "" - # -- A fixed `spec.loadBalancerIP` for the IPv6 DHCP Service - loadBalancerIPv6: "" - - # -- Annotations for the DHCP service - annotations: {} - # metallb.universe.tf/address-pool: network-services - # metallb.universe.tf/allow-shared-ip: pihole-svc - -# -- Configuration for the web interface service -serviceWeb: - # -- Configuration for the HTTP web interface listener - http: - - # -- Generate a service for HTTP traffic - enabled: true - - # -- The port of the web HTTP service - port: 80 - - # -- Configuration for the HTTPS web interface listener - https: - # -- Generate a service for HTTPS traffic - enabled: true - - # -- The port of the web HTTPS service - port: 443 - - # -- `spec.type` for the web interface Service - type: ClusterIP - - # -- `spec.externalTrafficPolicy` for the web interface Service - externalTrafficPolicy: Local - - # -- A fixed `spec.loadBalancerIP` for the web interface Service - loadBalancerIP: "" - # -- A fixed `spec.loadBalancerIP` for the IPv6 web interface Service - loadBalancerIPv6: "" - - # -- Annotations for the DHCP service - annotations: {} - # metallb.universe.tf/address-pool: network-services - # metallb.universe.tf/allow-shared-ip: pihole-svc - -virtualHost: pi.hole - -# -- Configuration for the Ingress -ingress: - # -- Generate a Ingress resource - enabled: false # DONE EXTERNALLY - - # -- Specify an ingressClassName - # ingressClassName: nginx - - # -- Annotations for the ingress - annotations: {} - # kubernetes.io/ingress.class: nginx - # kubernetes.io/tls-acme: "true" - path: / - hosts: - # virtualHost (default value is pi.hole) will be appended to the hosts - - chart-example.local - tls: [] - # - secretName: chart-example-tls - # hosts: - # #- virtualHost (default value is pi.hole) will be appended to the hosts - # - chart-example.local - -# -- Probes configuration -probes: - # -- probes.liveness -- Configure the healthcheck for the ingress controller - liveness: - # -- Generate a liveness probe - enabled: true - initialDelaySeconds: 60 - failureThreshold: 10 - timeoutSeconds: 5 - readiness: - # -- Generate a readiness probe - enabled: true - initialDelaySeconds: 60 - failureThreshold: 3 - timeoutSeconds: 5 - -# -- We usually recommend not to specify default resources and to leave this as a conscious -# -- choice for the user. This also increases chances charts run on environments with little -# -- resources, such as Minikube. If you do want to specify resources, uncomment the following -# -- lines, adjust them as necessary, and remove the curly braces after 'resources:'. -resources: {} - # limits: - # cpu: 100m - # memory: 128Mi - # requests: - # cpu: 100m - # memory: 128Mi - -# -- `spec.PersitentVolumeClaim` configuration -persistentVolumeClaim: - # -- set to true to use pvc - enabled: true - - # -- specify an existing `PersistentVolumeClaim` to use - existingClaim: "pihole-nfs" - - # -- Annotations for the `PersitentVolumeClaim` - annotations: {} - - accessModes: - - ReadWriteOnce - - size: "500Mi" - - ## If defined, storageClassName: - ## If set to "-", storageClassName: "", which disables dynamic provisioning - ## If undefined (the default) or set to null, no storageClassName spec is - ## set, choosing the default provisioner. (gp2 on AWS, standard on - ## GKE, AWS & OpenStack) - ## - # storageClass: "-" - - ## If subPath is set mount a sub folder of a volume instead of the root of the volume. - ## This is especially handy for volume plugins that don't natively support sub mounting (like glusterfs). - - ## subPath: "pihole" - -nodeSelector: {} - -tolerations: [] - -# -- Specify a priorityClassName -# priorityClassName: "" - -# Reference: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ -topologySpreadConstraints: [] -# - maxSkew: -# topologyKey: -# whenUnsatisfiable: -# labelSelector: - -affinity: {} - -# -- Administrator password when not using an existing secret (see below) -adminPassword: "admin" - -# -- Use an existing secret for the admin password. -admin: - # -- Specify an existing secret to use as admin password - existingSecret: "" - # -- Specify the key inside the secret to use - passwordKey: "" - -# -- extraEnvironmentVars is a list of extra enviroment variables to set for pihole to use -extraEnvVars: {} - # TZ: UTC - -# -- extraEnvVarsSecret is a list of secrets to load in as environment variables. -extraEnvVarsSecret: {} - # env_var: - # name: secret-name - # key: secret-key - -# -- default upstream DNS 1 server to use -DNS1: "8.8.8.8" -# -- default upstream DNS 2 server to use -DNS2: "8.8.4.4" - -antiaff: - # -- set to true to enable antiaffinity (example: 2 pihole DNS in the same cluster) - enabled: false - # -- Here you can set the pihole release (you set in `helm install ...`) - # you want to avoid - avoidRelease: pihole1 - # -- Here you can choose between preferred or required - strict: true - -doh: - # -- set to true to enabled DNS over HTTPs via cloudflared - enabled: false - name: "cloudflared" - repository: "crazymax/cloudflared" - tag: latest - pullPolicy: IfNotPresent - # -- Here you can pass environment variables to the DoH container, for example: - envVars: {} - # TUNNEL_DNS_UPSTREAM: "https://1.1.1.2/dns-query,https://1.0.0.2/dns-query" - - # -- Probes configuration - probes: - # -- Configure the healthcheck for the doh container - liveness: - # -- set to true to enable liveness probe - enabled: true - # -- defines the initial delay for the liveness probe - initialDelaySeconds: 60 - # -- defines the failure threshold for the liveness probe - failureThreshold: 10 - # -- defines the timeout in secondes for the liveness probe - timeoutSeconds: 5 - -dnsmasq: - # -- Add upstream dns servers. All lines will be added to the pihole dnsmasq configuration - upstreamServers: [] - # - server=/foo.bar/192.168.178.10 - # - server=/bar.foo/192.168.178.11 - - # -- Add custom dns entries to override the dns resolution. All lines will be added to the pihole dnsmasq configuration. - customDnsEntries: [] - # - address=/foo.bar/192.168.178.10 - # - address=/bar.foo/192.168.178.11 - - # -- Dnsmasq reads the /etc/hosts file to resolve ips. You can add additional entries if you like - additionalHostsEntries: [] - # - 192.168.0.3 host4 - # - 192.168.0.4 host5 - - # -- Static DHCP config - staticDhcpEntries: [] - # staticDhcpEntries: - # - dhcp-host=MAC_ADDRESS,IP_ADDRESS,HOSTNAME - - # -- Other options - customSettings: - # otherSettings: - # - rebind-domain-ok=/plex.direct/ - - # -- Here we specify custom cname entries that should point to `A` records or - # elements in customDnsEntries array. - # The format should be: - # - cname=cname.foo.bar,foo.bar - # - cname=cname.bar.foo,bar.foo - # - cname=cname record,dns record - customCnameEntries: [] - # Here we specify custom cname entries that should point to `A` records or - # elements in customDnsEntries array. - # The format should be: - # - cname=cname.foo.bar,foo.bar - # - cname=cname.bar.foo,bar.foo - # - cname=cname record,dns record - -# -- list of adlists to import during initial start of the container -adlists: {} - # If you want to provide blocklists, add them here. - # - https://hosts-file.net/grm.txt - # - https://reddestdream.github.io/Projects/MinimalHosts/etc/MinimalHostsBlocker/minimalhosts - -# -- list of whitelisted domains to import during initial start of the container -whitelist: {} - # If you want to provide whitelisted domains, add them here. - # - clients4.google.com - -# -- list of blacklisted domains to import during initial start of the container -blacklist: {} - # If you want to have special domains blacklisted, add them here - # - *.blackist.com - -# -- list of blacklisted regex expressions to import during initial start of the container -regex: {} - # Add regular expression blacklist items - # - (^|\.)facebook\.com$ - -# -- values that should be added to pihole-FTL.conf -ftl: {} - # Add values for pihole-FTL.conf - # MAXDBDAYS: 14 - -# -- port the container should use to expose HTTP traffic -webHttp: "80" - -# -- port the container should use to expose HTTPS traffic -webHttps: "443" - -# -- hostname of pod -hostname: "" - -# -- should the container use host network -hostNetwork: "false" - -# -- should container run in privileged mode -privileged: "false" - -customVolumes: - # -- set this to true to enable custom volumes - enabled: false - # -- any volume type can be used here - config: {} - # hostPath: - # path: "/mnt/data" - -# -- Additional annotations for pods -podAnnotations: {} - # Example below allows Prometheus to scape on metric port (requires pihole-exporter sidecar enabled) - # prometheus.io/port: '9617' - # prometheus.io/scrape: 'true' - -monitoring: - # -- Preferably adding prometheus scrape annotations rather than enabling podMonitor. - podMonitor: - # -- set this to true to enable podMonitor - enabled: false - # -- Sidecar configuration - sidecar: - # -- set this to true to enable podMonitor as sidecar - enabled: false - port: 9617 - image: - repository: ekofr/pihole-exporter - tag: 0.0.10 - pullPolicy: IfNotPresent - resources: - limits: - memory: 128Mi - # requests: - # cpu: 100m - # memory: 128Mi - -podDnsConfig: - enabled: true - policy: "None" - nameservers: - - 127.0.0.1 - - 8.8.8.8 - diff --git a/unused/portainer/deployment.yaml b/unused/portainer/deployment.yaml deleted file mode 100644 index 0fd8921..0000000 --- a/unused/portainer/deployment.yaml +++ /dev/null @@ -1,68 +0,0 @@ -# Default values for portainer. -# This is a YAML-formatted file. -# Declare variables to be passed into your templates. - -replicaCount: 1 - -# If enterpriseEdition is enabled, then use the values below _instead_ of those in .image -enterpriseEdition: - enabled: false - image: - repository: portainer/portainer-ee - tag: 2.12.2 - pullPolicy: Always - -image: - repository: portainer/portainer-ce - tag: latest - pullPolicy: Always - -imagePullSecrets: [] - -nodeSelector: {} - -serviceAccount: - annotations: {} - name: portainer-sa-clusteradmin - -service: - # Set the httpNodePort and edgeNodePort only if the type is NodePort - # For Ingress, set the type to be ClusterIP and set ingress.enabled to true - # For Cloud Providers, set the type to be LoadBalancer - type: ClusterIP - httpPort: 9000 - httpsPort: 9443 - httpNodePort: 30777 - httpsNodePort: 30779 - edgePort: 8000 - edgeNodePort: 30776 - annotations: {} - -tls: - # If set, Portainer will be configured to use TLS only - force: false - # If set, will mount the existing secret into the pod - existingSecret: "" - -feature: - flags: "" - -ingress: - enabled: false - ingressClassName: "" - annotations: {} - # kubernetes.io/ingress.class: nginx - # Only use below if tls.force=true - # nginx.ingress.kubernetes.io/backend-protocol: HTTPS - # Note: Hosts and paths are of type array - hosts: - - host: - paths: [] - # - path: "/" - tls: [] - -resources: {} - -persistence: - existingClaim: portainer-data - diff --git a/unused/portainer/ingress.yaml b/unused/portainer/ingress.yaml deleted file mode 100644 index d7e2509..0000000 --- a/unused/portainer/ingress.yaml +++ /dev/null @@ -1,17 +0,0 @@ -apiVersion: traefik.containo.us/v1alpha1 -kind: IngressRoute -metadata: - namespace: portainer - name: portainer-ingressroute - -spec: - entryPoints: - - websecure - routes: - - match: Host(`portainer.kluster.moll.re`) - kind: Rule - services: - - name: portainer - port: 9000 - tls: - certResolver: default-tls \ No newline at end of file diff --git a/unused/portainer/pvc.yaml b/unused/portainer/pvc.yaml deleted file mode 100644 index 12e7e65..0000000 --- a/unused/portainer/pvc.yaml +++ /dev/null @@ -1,37 +0,0 @@ ---- -apiVersion: v1 -kind: PersistentVolume -metadata: - namespace: portainer - name: portainer-data - labels: - directory: portainer -spec: - storageClassName: fast - capacity: - storage: "10Gi" - volumeMode: Filesystem - accessModes: - - ReadWriteOnce - nfs: - path: /portainer - server: 10.43.239.43 # assigned to nfs-server service. Won't change as long as service is not redeployed ---- -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - namespace: portainer - name: portainer-data -spec: - storageClassName: fast - accessModes: - - ReadWriteOnce - resources: - requests: - storage: "10Gi" - selector: - matchLabels: - directory: portainer - - - diff --git a/unused/prometheus.pv.yml b/unused/prometheus.pv.yml deleted file mode 100644 index d0d3b49..0000000 --- a/unused/prometheus.pv.yml +++ /dev/null @@ -1,19 +0,0 @@ ---- -apiVersion: v1 -kind: PersistentVolume -metadata: - namespace: monitoring - name: prometheus-data-nfs - labels: - directory: prometheus -spec: - storageClassName: slow - capacity: - storage: "50Gi" - volumeMode: Filesystem - accessModes: - - ReadWriteOnce - nfs: - path: /prometheus - server: 10.43.239.43 # assigned to nfs-server service. Won't change as long as service is not redeployed ---- \ No newline at end of file diff --git a/unused/prometheus.values.yaml b/unused/prometheus.values.yaml deleted file mode 100644 index 54fdc72..0000000 --- a/unused/prometheus.values.yaml +++ /dev/null @@ -1,2154 +0,0 @@ -# Default values for kube-prometheus-stack. -# This is a YAML-formatted file. -# Declare variables to be passed into your templates. - -## Provide a name in place of kube-prometheus-stack for `app:` labels -## -nameOverride: "" - -## Override the deployment namespace -## -namespaceOverride: "" - -## Provide a k8s version to auto dashboard import script example: kubeTargetVersionOverride: 1.16.6 -## -kubeTargetVersionOverride: "" - -## Allow kubeVersion to be overridden while creating the ingress -## -kubeVersionOverride: "" - -## Provide a name to substitute for the full names of resources -## -fullnameOverride: "" - -## Labels to apply to all resources -## -commonLabels: {} -# scmhash: abc123 -# myLabel: aakkmd - -## Create default rules for monitoring the cluster -## -defaultRules: - create: true - rules: - alertmanager: true - etcd: true - configReloaders: true - general: true - k8s: true - kubeApiserverAvailability: true - kubeApiserverBurnrate: true - kubeApiserverHistogram: true - kubeApiserverSlos: true - kubelet: true - kubeProxy: true - kubePrometheusGeneral: true - kubePrometheusNodeRecording: true - kubernetesApps: true - kubernetesResources: true - kubernetesStorage: true - kubernetesSystem: true - kubeScheduler: true - kubeStateMetrics: true - network: true - node: true - nodeExporterAlerting: true - nodeExporterRecording: true - prometheus: true - prometheusOperator: true - - ## Reduce app namespace alert scope - appNamespacesTarget: ".*" - - ## Labels for default rules - labels: {} - ## Annotations for default rules - annotations: {} - - ## Additional labels for PrometheusRule alerts - additionalRuleLabels: {} - - ## Additional annotations for PrometheusRule alerts - additionalRuleAnnotations: {} - - ## Prefix for runbook URLs. Use this to override the first part of the runbookURLs that is common to all rules. - runbookUrl: "https://runbooks.prometheus-operator.dev/runbooks" - - ## Disabled PrometheusRule alerts - disabled: {} - # KubeAPIDown: true - # NodeRAIDDegraded: true - -## Deprecated way to provide custom recording or alerting rules to be deployed into the cluster. -## -# additionalPrometheusRules: [] -# - name: my-rule-file -# groups: -# - name: my_group -# rules: -# - record: my_record -# expr: 100 * my_record - -## Provide custom recording or alerting rules to be deployed into the cluster. -## -additionalPrometheusRulesMap: {} -# rule-name: -# groups: -# - name: my_group -# rules: -# - record: my_record -# expr: 100 * my_record - -## -global: - rbac: - create: true - - ## Create ClusterRoles that extend the existing view, edit and admin ClusterRoles to interact with prometheus-operator CRDs - ## Ref: https://kubernetes.io/docs/reference/access-authn-authz/rbac/#aggregated-clusterroles - createAggregateClusterRoles: false - pspEnabled: false - pspAnnotations: {} - ## Specify pod annotations - ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#apparmor - ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#seccomp - ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#sysctl - ## - # seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*' - # seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default' - # apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default' - - ## Reference to one or more secrets to be used when pulling images - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ - ## - imagePullSecrets: [] - # - name: "image-pull-secret" - # or - # - "image-pull-secret" - -## Configuration for alertmanager -## ref: https://prometheus.io/docs/alerting/alertmanager/ -## - -## Using default values from https://github.com/grafana/helm-charts/blob/main/charts/grafana/values.yaml -## -grafana: - enabled: true - -## Component scraping the kube api server -## -kubeApiServer: - enabled: true - tlsConfig: - serverName: kubernetes - insecureSkipVerify: false - serviceMonitor: - ## Scrape interval. If not set, the Prometheus default scrape interval is used. - ## - interval: "" - ## proxyUrl: URL of a proxy that should be used for scraping. - ## - proxyUrl: "" - - jobLabel: component - selector: - matchLabels: - component: apiserver - provider: kubernetes - - ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion. - ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig - ## - metricRelabelings: [] - # - action: keep - # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' - # sourceLabels: [__name__] - - ## RelabelConfigs to apply to samples before scraping - ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig - ## - relabelings: [] - # - sourceLabels: - # - __meta_kubernetes_namespace - # - __meta_kubernetes_service_name - # - __meta_kubernetes_endpoint_port_name - # action: keep - # regex: default;kubernetes;https - # - targetLabel: __address__ - # replacement: kubernetes.default.svc:443 - -## Component scraping the kubelet and kubelet-hosted cAdvisor -## -kubelet: - enabled: true - namespace: kube-system - - serviceMonitor: - ## Scrape interval. If not set, the Prometheus default scrape interval is used. - ## - interval: "" - - ## proxyUrl: URL of a proxy that should be used for scraping. - ## - proxyUrl: "" - - ## Enable scraping the kubelet over https. For requirements to enable this see - ## https://github.com/prometheus-operator/prometheus-operator/issues/926 - ## - https: true - - ## Enable scraping /metrics/cadvisor from kubelet's service - ## - cAdvisor: true - - ## Enable scraping /metrics/probes from kubelet's service - ## - probes: true - - ## Enable scraping /metrics/resource from kubelet's service - ## This is disabled by default because container metrics are already exposed by cAdvisor - ## - resource: false - # From kubernetes 1.18, /metrics/resource/v1alpha1 renamed to /metrics/resource - resourcePath: "/metrics/resource/v1alpha1" - - ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion. - ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig - ## - cAdvisorMetricRelabelings: [] - # - sourceLabels: [__name__, image] - # separator: ; - # regex: container_([a-z_]+); - # replacement: $1 - # action: drop - # - sourceLabels: [__name__] - # separator: ; - # regex: container_(network_tcp_usage_total|network_udp_usage_total|tasks_state|cpu_load_average_10s) - # replacement: $1 - # action: drop - - ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion. - ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig - ## - probesMetricRelabelings: [] - # - sourceLabels: [__name__, image] - # separator: ; - # regex: container_([a-z_]+); - # replacement: $1 - # action: drop - # - sourceLabels: [__name__] - # separator: ; - # regex: container_(network_tcp_usage_total|network_udp_usage_total|tasks_state|cpu_load_average_10s) - # replacement: $1 - # action: drop - - ## RelabelConfigs to apply to samples before scraping - ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig - ## - ## metrics_path is required to match upstream rules and charts - cAdvisorRelabelings: - - sourceLabels: [__metrics_path__] - targetLabel: metrics_path - # - sourceLabels: [__meta_kubernetes_pod_node_name] - # separator: ; - # regex: ^(.*)$ - # targetLabel: nodename - # replacement: $1 - # action: replace - - ## RelabelConfigs to apply to samples before scraping - ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig - ## - probesRelabelings: - - sourceLabels: [__metrics_path__] - targetLabel: metrics_path - # - sourceLabels: [__meta_kubernetes_pod_node_name] - # separator: ; - # regex: ^(.*)$ - # targetLabel: nodename - # replacement: $1 - # action: replace - - ## RelabelConfigs to apply to samples before scraping - ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig - ## - resourceRelabelings: - - sourceLabels: [__metrics_path__] - targetLabel: metrics_path - # - sourceLabels: [__meta_kubernetes_pod_node_name] - # separator: ; - # regex: ^(.*)$ - # targetLabel: nodename - # replacement: $1 - # action: replace - - ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion. - ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig - ## - metricRelabelings: [] - # - sourceLabels: [__name__, image] - # separator: ; - # regex: container_([a-z_]+); - # replacement: $1 - # action: drop - # - sourceLabels: [__name__] - # separator: ; - # regex: container_(network_tcp_usage_total|network_udp_usage_total|tasks_state|cpu_load_average_10s) - # replacement: $1 - # action: drop - - ## RelabelConfigs to apply to samples before scraping - ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig - ## - ## metrics_path is required to match upstream rules and charts - relabelings: - - sourceLabels: [__metrics_path__] - targetLabel: metrics_path - # - sourceLabels: [__meta_kubernetes_pod_node_name] - # separator: ; - # regex: ^(.*)$ - # targetLabel: nodename - # replacement: $1 - # action: replace - -## Component scraping the kube controller manager -## -kubeControllerManager: - enabled: true - - ## If your kube controller manager is not deployed as a pod, specify IPs it can be found on - ## - endpoints: [] - # - 10.141.4.22 - # - 10.141.4.23 - # - 10.141.4.24 - - ## If using kubeControllerManager.endpoints only the port and targetPort are used - ## - service: - enabled: true - ## If null or unset, the value is determined dynamically based on target Kubernetes version due to change - ## of default port in Kubernetes 1.22. - ## - port: null - targetPort: null - # selector: - # component: kube-controller-manager - - serviceMonitor: - enabled: true - ## Scrape interval. If not set, the Prometheus default scrape interval is used. - ## - interval: "" - - ## proxyUrl: URL of a proxy that should be used for scraping. - ## - proxyUrl: "" - - ## Enable scraping kube-controller-manager over https. - ## Requires proper certs (not self-signed) and delegated authentication/authorization checks. - ## If null or unset, the value is determined dynamically based on target Kubernetes version. - ## - https: null - - # Skip TLS certificate validation when scraping - insecureSkipVerify: null - - # Name of the server to use when validating TLS certificate - serverName: null - - ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion. - ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig - ## - metricRelabelings: [] - # - action: keep - # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' - # sourceLabels: [__name__] - - ## RelabelConfigs to apply to samples before scraping - ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig - ## - relabelings: [] - # - sourceLabels: [__meta_kubernetes_pod_node_name] - # separator: ; - # regex: ^(.*)$ - # targetLabel: nodename - # replacement: $1 - # action: replace - -## Component scraping coreDns. Use either this or kubeDns -## -coreDns: - enabled: true - service: - port: 9153 - targetPort: 9153 - # selector: - # k8s-app: kube-dns - serviceMonitor: - ## Scrape interval. If not set, the Prometheus default scrape interval is used. - ## - interval: "" - - ## proxyUrl: URL of a proxy that should be used for scraping. - ## - proxyUrl: "" - - ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion. - ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig - ## - metricRelabelings: [] - # - action: keep - # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' - # sourceLabels: [__name__] - - ## RelabelConfigs to apply to samples before scraping - ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig - ## - relabelings: [] - # - sourceLabels: [__meta_kubernetes_pod_node_name] - # separator: ; - # regex: ^(.*)$ - # targetLabel: nodename - # replacement: $1 - # action: replace - -## Component scraping kubeDns. Use either this or coreDns -## -kubeDns: - enabled: false - service: - dnsmasq: - port: 10054 - targetPort: 10054 - skydns: - port: 10055 - targetPort: 10055 - # selector: - # k8s-app: kube-dns - serviceMonitor: - ## Scrape interval. If not set, the Prometheus default scrape interval is used. - ## - interval: "" - - ## proxyUrl: URL of a proxy that should be used for scraping. - ## - proxyUrl: "" - - ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion. - ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig - ## - metricRelabelings: [] - # - action: keep - # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' - # sourceLabels: [__name__] - - ## RelabelConfigs to apply to samples before scraping - ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig - ## - relabelings: [] - # - sourceLabels: [__meta_kubernetes_pod_node_name] - # separator: ; - # regex: ^(.*)$ - # targetLabel: nodename - # replacement: $1 - # action: replace - - ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion. - ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig - ## - dnsmasqMetricRelabelings: [] - # - action: keep - # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' - # sourceLabels: [__name__] - - ## RelabelConfigs to apply to samples before scraping - ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig - ## - dnsmasqRelabelings: [] - # - sourceLabels: [__meta_kubernetes_pod_node_name] - # separator: ; - # regex: ^(.*)$ - # targetLabel: nodename - # replacement: $1 - # action: replace - -## Component scraping etcd -## -kubeEtcd: - enabled: true - - ## If your etcd is not deployed as a pod, specify IPs it can be found on - ## - endpoints: [] - # - 10.141.4.22 - # - 10.141.4.23 - # - 10.141.4.24 - - ## Etcd service. If using kubeEtcd.endpoints only the port and targetPort are used - ## - service: - enabled: true - port: 2379 - targetPort: 2379 - # selector: - # component: etcd - - ## Configure secure access to the etcd cluster by loading a secret into prometheus and - ## specifying security configuration below. For example, with a secret named etcd-client-cert - ## - ## serviceMonitor: - ## scheme: https - ## insecureSkipVerify: false - ## serverName: localhost - ## caFile: /etc/prometheus/secrets/etcd-client-cert/etcd-ca - ## certFile: /etc/prometheus/secrets/etcd-client-cert/etcd-client - ## keyFile: /etc/prometheus/secrets/etcd-client-cert/etcd-client-key - ## - serviceMonitor: - enabled: true - ## Scrape interval. If not set, the Prometheus default scrape interval is used. - ## - interval: "" - ## proxyUrl: URL of a proxy that should be used for scraping. - ## - proxyUrl: "" - scheme: http - insecureSkipVerify: false - serverName: "" - caFile: "" - certFile: "" - keyFile: "" - - ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion. - ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig - ## - metricRelabelings: [] - # - action: keep - # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' - # sourceLabels: [__name__] - - ## RelabelConfigs to apply to samples before scraping - ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig - ## - relabelings: [] - # - sourceLabels: [__meta_kubernetes_pod_node_name] - # separator: ; - # regex: ^(.*)$ - # targetLabel: nodename - # replacement: $1 - # action: replace - - -## Component scraping kube scheduler -## -kubeScheduler: - enabled: true - - ## If your kube scheduler is not deployed as a pod, specify IPs it can be found on - ## - endpoints: [] - # - 10.141.4.22 - # - 10.141.4.23 - # - 10.141.4.24 - - ## If using kubeScheduler.endpoints only the port and targetPort are used - ## - service: - enabled: true - ## If null or unset, the value is determined dynamically based on target Kubernetes version due to change - ## of default port in Kubernetes 1.23. - ## - port: null - targetPort: null - # selector: - # component: kube-scheduler - - serviceMonitor: - enabled: true - ## Scrape interval. If not set, the Prometheus default scrape interval is used. - ## - interval: "" - ## proxyUrl: URL of a proxy that should be used for scraping. - ## - proxyUrl: "" - ## Enable scraping kube-scheduler over https. - ## Requires proper certs (not self-signed) and delegated authentication/authorization checks. - ## If null or unset, the value is determined dynamically based on target Kubernetes version. - ## - https: null - - ## Skip TLS certificate validation when scraping - insecureSkipVerify: null - - ## Name of the server to use when validating TLS certificate - serverName: null - - ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion. - ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig - ## - metricRelabelings: [] - # - action: keep - # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' - # sourceLabels: [__name__] - - ## RelabelConfigs to apply to samples before scraping - ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig - ## - relabelings: [] - # - sourceLabels: [__meta_kubernetes_pod_node_name] - # separator: ; - # regex: ^(.*)$ - # targetLabel: nodename - # replacement: $1 - # action: replace - - -## Component scraping kube proxy -## -kubeProxy: - enabled: true - - ## If your kube proxy is not deployed as a pod, specify IPs it can be found on - ## - endpoints: [] - # - 10.141.4.22 - # - 10.141.4.23 - # - 10.141.4.24 - - service: - enabled: true - port: 10249 - targetPort: 10249 - # selector: - # k8s-app: kube-proxy - - serviceMonitor: - enabled: true - ## Scrape interval. If not set, the Prometheus default scrape interval is used. - ## - interval: "" - - ## proxyUrl: URL of a proxy that should be used for scraping. - ## - proxyUrl: "" - - ## Enable scraping kube-proxy over https. - ## Requires proper certs (not self-signed) and delegated authentication/authorization checks - ## - https: false - - ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion. - ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig - ## - metricRelabelings: [] - # - action: keep - # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' - # sourceLabels: [__name__] - - ## RelabelConfigs to apply to samples before scraping - ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig - ## - relabelings: [] - # - action: keep - # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' - # sourceLabels: [__name__] - - -## Component scraping kube state metrics -## -kubeStateMetrics: - enabled: true - -## Configuration for kube-state-metrics subchart -## -kube-state-metrics: - namespaceOverride: "" - rbac: - create: true - releaseLabel: true - prometheus: - monitor: - enabled: true - - ## Scrape interval. If not set, the Prometheus default scrape interval is used. - ## - interval: "" - - ## Scrape Timeout. If not set, the Prometheus default scrape timeout is used. - ## - scrapeTimeout: "" - - ## proxyUrl: URL of a proxy that should be used for scraping. - ## - proxyUrl: "" - - # Keep labels from scraped data, overriding server-side labels - ## - honorLabels: true - - ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion. - ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig - ## - metricRelabelings: [] - # - action: keep - # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' - # sourceLabels: [__name__] - - ## RelabelConfigs to apply to samples before scraping - ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig - ## - relabelings: [] - # - sourceLabels: [__meta_kubernetes_pod_node_name] - # separator: ; - # regex: ^(.*)$ - # targetLabel: nodename - # replacement: $1 - # action: replace - - selfMonitor: - enabled: false - -## Deploy node exporter as a daemonset to all nodes -## -nodeExporter: - enabled: true - -## Configuration for prometheus-node-exporter subchart -## -prometheus-node-exporter: - namespaceOverride: "" - podLabels: - ## Add the 'node-exporter' label to be used by serviceMonitor to match standard common usage in rules and grafana dashboards - ## - jobLabel: node-exporter - extraArgs: - - --collector.filesystem.mount-points-exclude=^/(dev|proc|sys|var/lib/docker/.+|var/lib/kubelet/.+)($|/) - - --collector.filesystem.fs-types-exclude=^(autofs|binfmt_misc|bpf|cgroup2?|configfs|debugfs|devpts|devtmpfs|fusectl|hugetlbfs|iso9660|mqueue|nsfs|overlay|proc|procfs|pstore|rpc_pipefs|securityfs|selinuxfs|squashfs|sysfs|tracefs)$ - service: - portName: http-metrics - prometheus: - monitor: - enabled: true - - jobLabel: jobLabel - - ## Scrape interval. If not set, the Prometheus default scrape interval is used. - ## - interval: "" - - ## How long until a scrape request times out. If not set, the Prometheus default scape timeout is used. - ## - scrapeTimeout: "" - - ## proxyUrl: URL of a proxy that should be used for scraping. - ## - proxyUrl: "" - - ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion. - ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig - ## - metricRelabelings: [] - # - sourceLabels: [__name__] - # separator: ; - # regex: ^node_mountstats_nfs_(event|operations|transport)_.+ - # replacement: $1 - # action: drop - - ## RelabelConfigs to apply to samples before scraping - ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig - ## - relabelings: [] - # - sourceLabels: [__meta_kubernetes_pod_node_name] - # separator: ; - # regex: ^(.*)$ - # targetLabel: nodename - # replacement: $1 - # action: replace - rbac: - ## If true, create PSPs for node-exporter - ## - pspEnabled: false - -## Manages Prometheus and Alertmanager components -## -prometheusOperator: - enabled: true - - ## Prometheus-Operator v0.39.0 and later support TLS natively. - ## - tls: - enabled: true - # Value must match version names from https://golang.org/pkg/crypto/tls/#pkg-constants - tlsMinVersion: VersionTLS13 - # The default webhook port is 10250 in order to work out-of-the-box in GKE private clusters and avoid adding firewall rules. - internalPort: 10250 - - ## Admission webhook support for PrometheusRules resources added in Prometheus Operator 0.30 can be enabled to prevent incorrectly formatted - ## rules from making their way into prometheus and potentially preventing the container from starting - admissionWebhooks: - failurePolicy: Fail - enabled: true - ## A PEM encoded CA bundle which will be used to validate the webhook's server certificate. - ## If unspecified, system trust roots on the apiserver are used. - caBundle: "" - ## If enabled, generate a self-signed certificate, then patch the webhook configurations with the generated data. - ## On chart upgrades (or if the secret exists) the cert will not be re-generated. You can use this to provide your own - ## certs ahead of time if you wish. - ## - patch: - enabled: true - image: - repository: k8s.gcr.io/ingress-nginx/kube-webhook-certgen - tag: v1.1.1 - sha: "" - pullPolicy: IfNotPresent - resources: {} - ## Provide a priority class name to the webhook patching job - ## - priorityClassName: "" - podAnnotations: {} - nodeSelector: {} - affinity: {} - tolerations: [] - - ## SecurityContext holds pod-level security attributes and common container settings. - ## This defaults to non root user with uid 2000 and gid 2000. *v1.PodSecurityContext false - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ - ## - securityContext: - runAsGroup: 2000 - runAsNonRoot: true - runAsUser: 2000 - - # Use certmanager to generate webhook certs - certManager: - enabled: false - # self-signed root certificate - rootCert: - duration: "" # default to be 5y - admissionCert: - duration: "" # default to be 1y - # issuerRef: - # name: "issuer" - # kind: "ClusterIssuer" - - ## Namespaces to scope the interaction of the Prometheus Operator and the apiserver (allow list). - ## This is mutually exclusive with denyNamespaces. Setting this to an empty object will disable the configuration - ## - namespaces: {} - # releaseNamespace: true - # additional: - # - kube-system - - ## Namespaces not to scope the interaction of the Prometheus Operator (deny list). - ## - denyNamespaces: [] - - ## Filter namespaces to look for prometheus-operator custom resources - ## - alertmanagerInstanceNamespaces: [] - prometheusInstanceNamespaces: [] - thanosRulerInstanceNamespaces: [] - - ## The clusterDomain value will be added to the cluster.peer option of the alertmanager. - ## Without this specified option cluster.peer will have value alertmanager-monitoring-alertmanager-0.alertmanager-operated:9094 (default value) - ## With this specified option cluster.peer will have value alertmanager-monitoring-alertmanager-0.alertmanager-operated.namespace.svc.cluster-domain:9094 - ## - # clusterDomain: "cluster.local" - - ## Service account for Alertmanager to use. - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ - ## - serviceAccount: - create: true - name: "" - - ## Configuration for Prometheus operator service - ## - service: - annotations: {} - labels: {} - clusterIP: "" - - ## Port to expose on each node - ## Only used if service.type is 'NodePort' - ## - nodePort: 30080 - - nodePortTls: 30443 - - ## Additional ports to open for Prometheus service - ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#multi-port-services - ## - additionalPorts: [] - - ## Loadbalancer IP - ## Only use if service.type is "LoadBalancer" - ## - loadBalancerIP: "" - loadBalancerSourceRanges: [] - - ## Denotes if this Service desires to route external traffic to node-local or cluster-wide endpoints - ## - externalTrafficPolicy: Cluster - - ## Service type - ## NodePort, ClusterIP, LoadBalancer - ## - type: ClusterIP - - ## List of IP addresses at which the Prometheus server service is available - ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips - ## - externalIPs: [] - - ## Labels to add to the operator pod - ## - podLabels: {} - - ## Annotations to add to the operator pod - ## - podAnnotations: {} - - ## Assign a PriorityClassName to pods if set - # priorityClassName: "" - - ## Define Log Format - # Use logfmt (default) or json logging - # logFormat: logfmt - - ## Decrease log verbosity to errors only - # logLevel: error - - ## If true, the operator will create and maintain a service for scraping kubelets - ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/helm/prometheus-operator/README.md - ## - kubeletService: - enabled: true - namespace: kube-system - ## Use '{{ template "kube-prometheus-stack.fullname" . }}-kubelet' by default - name: "" - - ## Create a servicemonitor for the operator - ## - serviceMonitor: - ## Scrape interval. If not set, the Prometheus default scrape interval is used. - ## - interval: "" - ## Scrape timeout. If not set, the Prometheus default scrape timeout is used. - scrapeTimeout: "" - selfMonitor: true - - ## Metric relabel configs to apply to samples before ingestion. - ## - metricRelabelings: [] - # - action: keep - # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' - # sourceLabels: [__name__] - - # relabel configs to apply to samples before ingestion. - ## - relabelings: [] - # - sourceLabels: [__meta_kubernetes_pod_node_name] - # separator: ; - # regex: ^(.*)$ - # targetLabel: nodename - # replacement: $1 - # action: replace - - ## Resource limits & requests - ## - resources: {} - # limits: - # cpu: 200m - # memory: 200Mi - # requests: - # cpu: 100m - # memory: 100Mi - - # Required for use in managed kubernetes clusters (such as AWS EKS) with custom CNI (such as calico), - # because control-plane managed by AWS cannot communicate with pods' IP CIDR and admission webhooks are not working - ## - hostNetwork: false - - ## Define which Nodes the Pods are scheduled on. - ## ref: https://kubernetes.io/docs/user-guide/node-selection/ - ## - nodeSelector: {} - - ## Tolerations for use with node taints - ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ - ## - tolerations: [] - # - key: "key" - # operator: "Equal" - # value: "value" - # effect: "NoSchedule" - - ## Assign custom affinity rules to the prometheus operator - ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ - ## - affinity: {} - # nodeAffinity: - # requiredDuringSchedulingIgnoredDuringExecution: - # nodeSelectorTerms: - # - matchExpressions: - # - key: kubernetes.io/e2e-az-name - # operator: In - # values: - # - e2e-az1 - # - e2e-az2 - dnsConfig: {} - # nameservers: - # - 1.2.3.4 - # searches: - # - ns1.svc.cluster-domain.example - # - my.dns.search.suffix - # options: - # - name: ndots - # value: "2" - # - name: edns0 - securityContext: - fsGroup: 65534 - runAsGroup: 65534 - runAsNonRoot: true - runAsUser: 65534 - - ## Container-specific security context configuration - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ - ## - containerSecurityContext: - allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - - ## Prometheus-operator image - ## - image: - repository: quay.io/prometheus-operator/prometheus-operator - tag: v0.57.0 - sha: "" - pullPolicy: IfNotPresent - - ## Prometheus image to use for prometheuses managed by the operator - ## - # prometheusDefaultBaseImage: quay.io/prometheus/prometheus - - ## Alertmanager image to use for alertmanagers managed by the operator - ## - # alertmanagerDefaultBaseImage: quay.io/prometheus/alertmanager - - ## Prometheus-config-reloader - ## - prometheusConfigReloader: - # image to use for config and rule reloading - image: - repository: quay.io/prometheus-operator/prometheus-config-reloader - tag: v0.57.0 - sha: "" - - # resource config for prometheusConfigReloader - resources: - requests: - cpu: 200m - memory: 50Mi - limits: - cpu: 200m - memory: 50Mi - - ## Thanos side-car image when configured - ## - thanosImage: - repository: quay.io/thanos/thanos - tag: v0.25.2 - sha: "" - - ## Set a Field Selector to filter watched secrets - ## - secretFieldSelector: "" - -## Deploy a Prometheus instance -## -prometheus: - - enabled: true - - ## Annotations for Prometheus - ## - annotations: {} - - ## Service account for Prometheuses to use. - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ - ## - serviceAccount: - create: true - name: "" - annotations: {} - - # Service for thanos service discovery on sidecar - # Enable this can make Thanos Query can use - # `--store=dnssrv+_grpc._tcp.${kube-prometheus-stack.fullname}-thanos-discovery.${namespace}.svc.cluster.local` to discovery - # Thanos sidecar on prometheus nodes - # (Please remember to change ${kube-prometheus-stack.fullname} and ${namespace}. Not just copy and paste!) - thanosService: - enabled: false - annotations: {} - labels: {} - - ## Denotes if this Service desires to route external traffic to node-local or cluster-wide endpoints - ## - externalTrafficPolicy: Cluster - - ## Service type - ## - type: ClusterIP - - ## gRPC port config - portName: grpc - port: 10901 - targetPort: "grpc" - - ## HTTP port config (for metrics) - httpPortName: http - httpPort: 10902 - targetHttpPort: "http" - - ## ClusterIP to assign - # Default is to make this a headless service ("None") - clusterIP: "None" - - ## Port to expose on each node, if service type is NodePort - ## - nodePort: 30901 - httpNodePort: 30902 - - # ServiceMonitor to scrape Sidecar metrics - # Needs thanosService to be enabled as well - thanosServiceMonitor: - enabled: false - interval: "" - - ## scheme: HTTP scheme to use for scraping. Can be used with `tlsConfig` for example if using istio mTLS. - scheme: "" - - ## tlsConfig: TLS configuration to use when scraping the endpoint. For example if using istio mTLS. - ## Of type: https://github.com/coreos/prometheus-operator/blob/main/Documentation/api.md#tlsconfig - tlsConfig: {} - - bearerTokenFile: - - ## Metric relabel configs to apply to samples before ingestion. - metricRelabelings: [] - - ## relabel configs to apply to samples before ingestion. - relabelings: [] - - # Service for external access to sidecar - # Enabling this creates a service to expose thanos-sidecar outside the cluster. - thanosServiceExternal: - enabled: false - annotations: {} - labels: {} - loadBalancerIP: "" - loadBalancerSourceRanges: [] - - ## gRPC port config - portName: grpc - port: 10901 - targetPort: "grpc" - - ## HTTP port config (for metrics) - httpPortName: http - httpPort: 10902 - targetHttpPort: "http" - - ## Denotes if this Service desires to route external traffic to node-local or cluster-wide endpoints - ## - externalTrafficPolicy: Cluster - - ## Service type - ## - type: LoadBalancer - - ## Port to expose on each node - ## - nodePort: 30901 - httpNodePort: 30902 - - ## Configuration for Prometheus service - ## - service: - annotations: {} - labels: {} - clusterIP: "" - - ## Port for Prometheus Service to listen on - ## - port: 9090 - - ## To be used with a proxy extraContainer port - targetPort: 9090 - - ## List of IP addresses at which the Prometheus server service is available - ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips - ## - externalIPs: [] - - ## Port to expose on each node - ## Only used if service.type is 'NodePort' - ## - nodePort: 30090 - - ## Loadbalancer IP - ## Only use if service.type is "LoadBalancer" - loadBalancerIP: "" - loadBalancerSourceRanges: [] - - ## Denotes if this Service desires to route external traffic to node-local or cluster-wide endpoints - ## - externalTrafficPolicy: Cluster - - ## Service type - ## - type: ClusterIP - - ## Additional port to define in the Service - additionalPorts: [] - - ## Consider that all endpoints are considered "ready" even if the Pods themselves are not - ## Ref: https://kubernetes.io/docs/reference/kubernetes-api/service-resources/service-v1/#ServiceSpec - publishNotReadyAddresses: false - - sessionAffinity: "" - - ## Configuration for creating a separate Service for each statefulset Prometheus replica - ## - servicePerReplica: - enabled: false - annotations: {} - - ## Port for Prometheus Service per replica to listen on - ## - port: 9090 - - ## To be used with a proxy extraContainer port - targetPort: 9090 - - ## Port to expose on each node - ## Only used if servicePerReplica.type is 'NodePort' - ## - nodePort: 30091 - - ## Loadbalancer source IP ranges - ## Only used if servicePerReplica.type is "LoadBalancer" - loadBalancerSourceRanges: [] - - ## Denotes if this Service desires to route external traffic to node-local or cluster-wide endpoints - ## - externalTrafficPolicy: Cluster - - ## Service type - ## - type: ClusterIP - - ## Configure pod disruption budgets for Prometheus - ## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/#specifying-a-poddisruptionbudget - ## This configuration is immutable once created and will require the PDB to be deleted to be changed - ## https://github.com/kubernetes/kubernetes/issues/45398 - ## - podDisruptionBudget: - enabled: false - minAvailable: 1 - maxUnavailable: "" - - # Ingress exposes thanos sidecar outside the cluster - thanosIngress: - enabled: false - - # For Kubernetes >= 1.18 you should specify the ingress-controller via the field ingressClassName - # See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#specifying-the-class-of-an-ingress - # ingressClassName: nginx - - annotations: {} - labels: {} - servicePort: 10901 - - ## Port to expose on each node - ## Only used if service.type is 'NodePort' - ## - nodePort: 30901 - - ## Hosts must be provided if Ingress is enabled. - ## - hosts: [] - # - thanos-gateway.domain.com - - ## Paths to use for ingress rules - ## - paths: [] - # - / - - ## For Kubernetes >= 1.18 you should specify the pathType (determines how Ingress paths should be matched) - ## See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#better-path-matching-with-path-types - # pathType: ImplementationSpecific - - ## TLS configuration for Thanos Ingress - ## Secret must be manually created in the namespace - ## - tls: [] - # - secretName: thanos-gateway-tls - # hosts: - # - thanos-gateway.domain.com - # - - ## ExtraSecret can be used to store various data in an extra secret - ## (use it for example to store hashed basic auth credentials) - extraSecret: - ## if not set, name will be auto generated - # name: "" - annotations: {} - data: {} - # auth: | - # foo:$apr1$OFG3Xybp$ckL0FHDAkoXYIlH9.cysT0 - # someoneelse:$apr1$DMZX2Z4q$6SbQIfyuLQd.xmo/P0m2c. - - ingress: - enabled: false - - # For Kubernetes >= 1.18 you should specify the ingress-controller via the field ingressClassName - # See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#specifying-the-class-of-an-ingress - # ingressClassName: nginx - - annotations: {} - labels: {} - - ## Hostnames. - ## Must be provided if Ingress is enabled. - ## - # hosts: - # - prometheus.domain.com - hosts: [] - - ## Paths to use for ingress rules - one path should match the prometheusSpec.routePrefix - ## - paths: [] - # - / - - ## For Kubernetes >= 1.18 you should specify the pathType (determines how Ingress paths should be matched) - ## See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#better-path-matching-with-path-types - # pathType: ImplementationSpecific - - ## TLS configuration for Prometheus Ingress - ## Secret must be manually created in the namespace - ## - tls: [] - # - secretName: prometheus-general-tls - # hosts: - # - prometheus.example.com - - ## Configuration for creating an Ingress that will map to each Prometheus replica service - ## prometheus.servicePerReplica must be enabled - ## - ingressPerReplica: - enabled: false - - # For Kubernetes >= 1.18 you should specify the ingress-controller via the field ingressClassName - # See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#specifying-the-class-of-an-ingress - # ingressClassName: nginx - - annotations: {} - labels: {} - - ## Final form of the hostname for each per replica ingress is - ## {{ ingressPerReplica.hostPrefix }}-{{ $replicaNumber }}.{{ ingressPerReplica.hostDomain }} - ## - ## Prefix for the per replica ingress that will have `-$replicaNumber` - ## appended to the end - hostPrefix: "" - ## Domain that will be used for the per replica ingress - hostDomain: "" - - ## Paths to use for ingress rules - ## - paths: [] - # - / - - ## For Kubernetes >= 1.18 you should specify the pathType (determines how Ingress paths should be matched) - ## See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#better-path-matching-with-path-types - # pathType: ImplementationSpecific - - ## Secret name containing the TLS certificate for Prometheus per replica ingress - ## Secret must be manually created in the namespace - tlsSecretName: "" - - ## Separated secret for each per replica Ingress. Can be used together with cert-manager - ## - tlsSecretPerReplica: - enabled: false - ## Final form of the secret for each per replica ingress is - ## {{ tlsSecretPerReplica.prefix }}-{{ $replicaNumber }} - ## - prefix: "prometheus" - - ## Configure additional options for default pod security policy for Prometheus - ## ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/ - podSecurityPolicy: - allowedCapabilities: [] - allowedHostPaths: [] - volumes: [] - - serviceMonitor: - ## Scrape interval. If not set, the Prometheus default scrape interval is used. - ## - interval: "" - selfMonitor: true - - ## scheme: HTTP scheme to use for scraping. Can be used with `tlsConfig` for example if using istio mTLS. - scheme: "" - - ## tlsConfig: TLS configuration to use when scraping the endpoint. For example if using istio mTLS. - ## Of type: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#tlsconfig - tlsConfig: {} - - bearerTokenFile: - - ## Metric relabel configs to apply to samples before ingestion. - ## - metricRelabelings: [] - # - action: keep - # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' - # sourceLabels: [__name__] - - # relabel configs to apply to samples before ingestion. - ## - relabelings: [] - # - sourceLabels: [__meta_kubernetes_pod_node_name] - # separator: ; - # regex: ^(.*)$ - # targetLabel: nodename - # replacement: $1 - # action: replace - - ## Settings affecting prometheusSpec - ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#prometheusspec - ## - prometheusSpec: - ## If true, pass --storage.tsdb.max-block-duration=2h to prometheus. This is already done if using Thanos - ## - disableCompaction: false - ## APIServerConfig - ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#apiserverconfig - ## - apiserverConfig: {} - - ## Interval between consecutive scrapes. - ## Defaults to 30s. - ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/release-0.44/pkg/prometheus/promcfg.go#L180-L183 - ## - scrapeInterval: "" - - ## Number of seconds to wait for target to respond before erroring - ## - scrapeTimeout: "" - - ## Interval between consecutive evaluations. - ## - evaluationInterval: "" - - ## ListenLocal makes the Prometheus server listen on loopback, so that it does not bind against the Pod IP. - ## - listenLocal: false - - ## EnableAdminAPI enables Prometheus the administrative HTTP API which includes functionality such as deleting time series. - ## This is disabled by default. - ## ref: https://prometheus.io/docs/prometheus/latest/querying/api/#tsdb-admin-apis - ## - enableAdminAPI: false - - ## WebTLSConfig defines the TLS parameters for HTTPS - ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#webtlsconfig - web: {} - - # EnableFeatures API enables access to Prometheus disabled features. - # ref: https://prometheus.io/docs/prometheus/latest/disabled_features/ - enableFeatures: [] - # - exemplar-storage - - ## Image of Prometheus. - ## - image: - repository: quay.io/prometheus/prometheus - tag: v2.36.1 - sha: "" - - ## Tolerations for use with node taints - ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ - ## - tolerations: [] - # - key: "key" - # operator: "Equal" - # value: "value" - # effect: "NoSchedule" - - ## If specified, the pod's topology spread constraints. - ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ - ## - topologySpreadConstraints: [] - # - maxSkew: 1 - # topologyKey: topology.kubernetes.io/zone - # whenUnsatisfiable: DoNotSchedule - # labelSelector: - # matchLabels: - # app: prometheus - - ## Alertmanagers to which alerts will be sent - ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#alertmanagerendpoints - ## - ## Default configuration will connect to the alertmanager deployed as part of this release - ## - alertingEndpoints: [] - # - name: "" - # namespace: "" - # port: http - # scheme: http - # pathPrefix: "" - # tlsConfig: {} - # bearerTokenFile: "" - # apiVersion: v2 - - ## External labels to add to any time series or alerts when communicating with external systems - ## - externalLabels: {} - - ## enable --web.enable-remote-write-receiver flag on prometheus-server - ## - enableRemoteWriteReceiver: false - - ## Name of the external label used to denote replica name - ## - replicaExternalLabelName: "" - - ## If true, the Operator won't add the external label used to denote replica name - ## - replicaExternalLabelNameClear: false - - ## Name of the external label used to denote Prometheus instance name - ## - prometheusExternalLabelName: "" - - ## If true, the Operator won't add the external label used to denote Prometheus instance name - ## - prometheusExternalLabelNameClear: false - - ## External URL at which Prometheus will be reachable. - ## - externalUrl: "" - - ## Define which Nodes the Pods are scheduled on. - ## ref: https://kubernetes.io/docs/user-guide/node-selection/ - ## - nodeSelector: {} - - ## Secrets is a list of Secrets in the same namespace as the Prometheus object, which shall be mounted into the Prometheus Pods. - ## The Secrets are mounted into /etc/prometheus/secrets/. Secrets changes after initial creation of a Prometheus object are not - ## reflected in the running Pods. To change the secrets mounted into the Prometheus Pods, the object must be deleted and recreated - ## with the new list of secrets. - ## - secrets: [] - - ## ConfigMaps is a list of ConfigMaps in the same namespace as the Prometheus object, which shall be mounted into the Prometheus Pods. - ## The ConfigMaps are mounted into /etc/prometheus/configmaps/. - ## - configMaps: [] - - ## QuerySpec defines the query command line flags when starting Prometheus. - ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#queryspec - ## - query: {} - - ## Namespaces to be selected for PrometheusRules discovery. - ## If nil, select own namespace. Namespaces to be selected for ServiceMonitor discovery. - ## See https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#namespaceselector for usage - ## - ruleNamespaceSelector: {} - - ## If true, a nil or {} value for prometheus.prometheusSpec.ruleSelector will cause the - ## prometheus resource to be created with selectors based on values in the helm deployment, - ## which will also match the PrometheusRule resources created - ## - ruleSelectorNilUsesHelmValues: true - - ## PrometheusRules to be selected for target discovery. - ## If {}, select all PrometheusRules - ## - ruleSelector: {} - ## Example which select all PrometheusRules resources - ## with label "prometheus" with values any of "example-rules" or "example-rules-2" - # ruleSelector: - # matchExpressions: - # - key: prometheus - # operator: In - # values: - # - example-rules - # - example-rules-2 - # - ## Example which select all PrometheusRules resources with label "role" set to "example-rules" - # ruleSelector: - # matchLabels: - # role: example-rules - - ## If true, a nil or {} value for prometheus.prometheusSpec.serviceMonitorSelector will cause the - ## prometheus resource to be created with selectors based on values in the helm deployment, - ## which will also match the servicemonitors created - ## - serviceMonitorSelectorNilUsesHelmValues: true - - ## ServiceMonitors to be selected for target discovery. - ## If {}, select all ServiceMonitors - ## - serviceMonitorSelector: {} - ## Example which selects ServiceMonitors with label "prometheus" set to "somelabel" - # serviceMonitorSelector: - # matchLabels: - # prometheus: somelabel - - ## Namespaces to be selected for ServiceMonitor discovery. - ## - serviceMonitorNamespaceSelector: {} - ## Example which selects ServiceMonitors in namespaces with label "prometheus" set to "somelabel" - # serviceMonitorNamespaceSelector: - # matchLabels: - # prometheus: somelabel - - ## If true, a nil or {} value for prometheus.prometheusSpec.podMonitorSelector will cause the - ## prometheus resource to be created with selectors based on values in the helm deployment, - ## which will also match the podmonitors created - ## - podMonitorSelectorNilUsesHelmValues: true - - ## PodMonitors to be selected for target discovery. - ## If {}, select all PodMonitors - ## - podMonitorSelector: {} - ## Example which selects PodMonitors with label "prometheus" set to "somelabel" - # podMonitorSelector: - # matchLabels: - # prometheus: somelabel - - ## Namespaces to be selected for PodMonitor discovery. - ## See https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#namespaceselector for usage - ## - podMonitorNamespaceSelector: {} - - ## If true, a nil or {} value for prometheus.prometheusSpec.probeSelector will cause the - ## prometheus resource to be created with selectors based on values in the helm deployment, - ## which will also match the probes created - ## - probeSelectorNilUsesHelmValues: true - - ## Probes to be selected for target discovery. - ## If {}, select all Probes - ## - probeSelector: {} - ## Example which selects Probes with label "prometheus" set to "somelabel" - # probeSelector: - # matchLabels: - # prometheus: somelabel - - ## Namespaces to be selected for Probe discovery. - ## See https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#namespaceselector for usage - ## - probeNamespaceSelector: {} - - ## How long to retain metrics - ## - retention: 10d - - ## Maximum size of metrics - ## - retentionSize: "" - - ## Enable compression of the write-ahead log using Snappy. - ## - walCompression: false - - ## If true, the Operator won't process any Prometheus configuration changes - ## - paused: false - - ## Number of replicas of each shard to deploy for a Prometheus deployment. - ## Number of replicas multiplied by shards is the total number of Pods created. - ## - replicas: 1 - - ## EXPERIMENTAL: Number of shards to distribute targets onto. - ## Number of replicas multiplied by shards is the total number of Pods created. - ## Note that scaling down shards will not reshard data onto remaining instances, it must be manually moved. - ## Increasing shards will not reshard data either but it will continue to be available from the same instances. - ## To query globally use Thanos sidecar and Thanos querier or remote write data to a central location. - ## Sharding is done on the content of the `__address__` target meta-label. - ## - shards: 1 - - ## Log level for Prometheus be configured in - ## - logLevel: info - - ## Log format for Prometheus be configured in - ## - logFormat: logfmt - - ## Prefix used to register routes, overriding externalUrl route. - ## Useful for proxies that rewrite URLs. - ## - routePrefix: / - - ## Standard object's metadata. More info: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#metadata - ## Metadata Labels and Annotations gets propagated to the prometheus pods. - ## - podMetadata: {} - # labels: - # app: prometheus - # k8s-app: prometheus - - ## Pod anti-affinity can prevent the scheduler from placing Prometheus replicas on the same node. - ## The default value "soft" means that the scheduler should *prefer* to not schedule two replica pods onto the same node but no guarantee is provided. - ## The value "hard" means that the scheduler is *required* to not schedule two replica pods onto the same node. - ## The value "" will disable pod anti-affinity so that no anti-affinity rules will be configured. - podAntiAffinity: "" - - ## If anti-affinity is enabled sets the topologyKey to use for anti-affinity. - ## This can be changed to, for example, failure-domain.beta.kubernetes.io/zone - ## - podAntiAffinityTopologyKey: kubernetes.io/hostname - - ## Assign custom affinity rules to the prometheus instance - ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ - ## - affinity: {} - # nodeAffinity: - # requiredDuringSchedulingIgnoredDuringExecution: - # nodeSelectorTerms: - # - matchExpressions: - # - key: kubernetes.io/e2e-az-name - # operator: In - # values: - # - e2e-az1 - # - e2e-az2 - - ## The remote_read spec configuration for Prometheus. - ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#remotereadspec - remoteRead: [] - # - url: http://remote1/read - ## additionalRemoteRead is appended to remoteRead - additionalRemoteRead: [] - - ## The remote_write spec configuration for Prometheus. - ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#remotewritespec - remoteWrite: [] - # - url: http://remote1/push - ## additionalRemoteWrite is appended to remoteWrite - additionalRemoteWrite: [] - - ## Enable/Disable Grafana dashboards provisioning for prometheus remote write feature - remoteWriteDashboards: false - - ## Resource limits & requests - ## - resources: {} - # requests: - # memory: 400Mi - - ## Prometheus StorageSpec for persistent data - ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/user-guides/storage.md - ## - storageSpec: - ## Using PersistentVolumeClaim - ## - # volumeClaimTemplate: - spec: - storage: - volumeClaimTemplate: - spec: - resources: - requests: - storage: 50Gi - selector: - matchLabels: - directory: prometheus - - ## Using tmpfs volume - ## - # emptyDir: - # medium: Memory - - # Additional volumes on the output StatefulSet definition. - volumes: [] - - # Additional VolumeMounts on the output StatefulSet definition. - volumeMounts: [] - - ## AdditionalScrapeConfigs allows specifying additional Prometheus scrape configurations. Scrape configurations - ## are appended to the configurations generated by the Prometheus Operator. Job configurations must have the form - ## as specified in the official Prometheus documentation: - ## https://prometheus.io/docs/prometheus/latest/configuration/configuration/#scrape_config. As scrape configs are - ## appended, the user is responsible to make sure it is valid. Note that using this feature may expose the possibility - ## to break upgrades of Prometheus. It is advised to review Prometheus release notes to ensure that no incompatible - ## scrape configs are going to break Prometheus after the upgrade. - ## AdditionalScrapeConfigs can be defined as a list or as a templated string. - ## - ## The scrape configuration example below will find master nodes, provided they have the name .*mst.*, relabel the - ## port to 2379 and allow etcd scraping provided it is running on all Kubernetes master nodes - ## - additionalScrapeConfigs: [] - # - job_name: kube-etcd - # kubernetes_sd_configs: - # - role: node - # scheme: https - # tls_config: - # ca_file: /etc/prometheus/secrets/etcd-client-cert/etcd-ca - # cert_file: /etc/prometheus/secrets/etcd-client-cert/etcd-client - # key_file: /etc/prometheus/secrets/etcd-client-cert/etcd-client-key - # relabel_configs: - # - action: labelmap - # regex: __meta_kubernetes_node_label_(.+) - # - source_labels: [__address__] - # action: replace - # targetLabel: __address__ - # regex: ([^:;]+):(\d+) - # replacement: ${1}:2379 - # - source_labels: [__meta_kubernetes_node_name] - # action: keep - # regex: .*mst.* - # - source_labels: [__meta_kubernetes_node_name] - # action: replace - # targetLabel: node - # regex: (.*) - # replacement: ${1} - # metric_relabel_configs: - # - regex: (kubernetes_io_hostname|failure_domain_beta_kubernetes_io_region|beta_kubernetes_io_os|beta_kubernetes_io_arch|beta_kubernetes_io_instance_type|failure_domain_beta_kubernetes_io_zone) - # action: labeldrop - # - ## If scrape config contains a repetitive section, you may want to use a template. - ## In the following example, you can see how to define `gce_sd_configs` for multiple zones - # additionalScrapeConfigs: | - # - job_name: "node-exporter" - # gce_sd_configs: - # {{range $zone := .Values.gcp_zones}} - # - project: "project1" - # zone: "{{$zone}}" - # port: 9100 - # {{end}} - # relabel_configs: - # ... - - - ## If additional scrape configurations are already deployed in a single secret file you can use this section. - ## Expected values are the secret name and key - ## Cannot be used with additionalScrapeConfigs - additionalScrapeConfigsSecret: {} - # enabled: false - # name: - # key: - - ## additionalPrometheusSecretsAnnotations allows to add annotations to the kubernetes secret. This can be useful - ## when deploying via spinnaker to disable versioning on the secret, strategy.spinnaker.io/versioned: 'false' - additionalPrometheusSecretsAnnotations: {} - - ## AdditionalAlertManagerConfigs allows for manual configuration of alertmanager jobs in the form as specified - ## in the official Prometheus documentation https://prometheus.io/docs/prometheus/latest/configuration/configuration/#. - ## AlertManager configurations specified are appended to the configurations generated by the Prometheus Operator. - ## As AlertManager configs are appended, the user is responsible to make sure it is valid. Note that using this - ## feature may expose the possibility to break upgrades of Prometheus. It is advised to review Prometheus release - ## notes to ensure that no incompatible AlertManager configs are going to break Prometheus after the upgrade. - ## - additionalAlertManagerConfigs: [] - # - consul_sd_configs: - # - server: consul.dev.test:8500 - # scheme: http - # datacenter: dev - # tag_separator: ',' - # services: - # - metrics-prometheus-alertmanager - - ## If additional alertmanager configurations are already deployed in a single secret, or you want to manage - ## them separately from the helm deployment, you can use this section. - ## Expected values are the secret name and key - ## Cannot be used with additionalAlertManagerConfigs - additionalAlertManagerConfigsSecret: {} - # name: - # key: - - ## AdditionalAlertRelabelConfigs allows specifying Prometheus alert relabel configurations. Alert relabel configurations specified are appended - ## to the configurations generated by the Prometheus Operator. Alert relabel configurations specified must have the form as specified in the - ## official Prometheus documentation: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#alert_relabel_configs. - ## As alert relabel configs are appended, the user is responsible to make sure it is valid. Note that using this feature may expose the - ## possibility to break upgrades of Prometheus. It is advised to review Prometheus release notes to ensure that no incompatible alert relabel - ## configs are going to break Prometheus after the upgrade. - ## - additionalAlertRelabelConfigs: [] - # - separator: ; - # regex: prometheus_replica - # replacement: $1 - # action: labeldrop - - ## If additional alert relabel configurations are already deployed in a single secret, or you want to manage - ## them separately from the helm deployment, you can use this section. - ## Expected values are the secret name and key - ## Cannot be used with additionalAlertRelabelConfigs - additionalAlertRelabelConfigsSecret: {} - # name: - # key: - - ## SecurityContext holds pod-level security attributes and common container settings. - ## This defaults to non root user with uid 1000 and gid 2000. - ## https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md - ## - securityContext: - runAsGroup: 2000 - runAsNonRoot: true - runAsUser: 1000 - fsGroup: 2000 - - ## Priority class assigned to the Pods - ## - priorityClassName: "" - - ## Thanos configuration allows configuring various aspects of a Prometheus server in a Thanos environment. - ## This section is experimental, it may change significantly without deprecation notice in any release. - ## This is experimental and may change significantly without backward compatibility in any release. - ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#thanosspec - ## - thanos: {} - # secretProviderClass: - # provider: gcp - # parameters: - # secrets: | - # - resourceName: "projects/$PROJECT_ID/secrets/testsecret/versions/latest" - # fileName: "objstore.yaml" - # objectStorageConfigFile: /var/secrets/object-store.yaml - - ## Containers allows injecting additional containers. This is meant to allow adding an authentication proxy to a Prometheus pod. - ## if using proxy extraContainer update targetPort with proxy container port - containers: [] - - ## InitContainers allows injecting additional initContainers. This is meant to allow doing some changes - ## (permissions, dir tree) on mounted volumes before starting prometheus - initContainers: [] - - ## PortName to use for Prometheus. - ## - portName: "http-web" - - ## ArbitraryFSAccessThroughSMs configures whether configuration based on a service monitor can access arbitrary files - ## on the file system of the Prometheus container e.g. bearer token files. - arbitraryFSAccessThroughSMs: false - - ## OverrideHonorLabels if set to true overrides all user configured honor_labels. If HonorLabels is set in ServiceMonitor - ## or PodMonitor to true, this overrides honor_labels to false. - overrideHonorLabels: false - - ## OverrideHonorTimestamps allows to globally enforce honoring timestamps in all scrape configs. - overrideHonorTimestamps: false - - ## IgnoreNamespaceSelectors if set to true will ignore NamespaceSelector settings from the podmonitor and servicemonitor - ## configs, and they will only discover endpoints within their current namespace. Defaults to false. - ignoreNamespaceSelectors: false - - ## EnforcedNamespaceLabel enforces adding a namespace label of origin for each alert and metric that is user created. - ## The label value will always be the namespace of the object that is being created. - ## Disabled by default - enforcedNamespaceLabel: "" - - ## PrometheusRulesExcludedFromEnforce - list of prometheus rules to be excluded from enforcing of adding namespace labels. - ## Works only if enforcedNamespaceLabel set to true. Make sure both ruleNamespace and ruleName are set for each pair - ## Deprecated, use `excludedFromEnforcement` instead - prometheusRulesExcludedFromEnforce: [] - - ## ExcludedFromEnforcement - list of object references to PodMonitor, ServiceMonitor, Probe and PrometheusRule objects - ## to be excluded from enforcing a namespace label of origin. - ## Works only if enforcedNamespaceLabel set to true. - ## See https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#objectreference - excludedFromEnforcement: [] - - ## QueryLogFile specifies the file to which PromQL queries are logged. Note that this location must be writable, - ## and can be persisted using an attached volume. Alternatively, the location can be set to a stdout location such - ## as /dev/stdout to log querie information to the default Prometheus log stream. This is only available in versions - ## of Prometheus >= 2.16.0. For more details, see the Prometheus docs (https://prometheus.io/docs/guides/query-log/) - queryLogFile: false - - ## EnforcedSampleLimit defines global limit on number of scraped samples that will be accepted. This overrides any SampleLimit - ## set per ServiceMonitor or/and PodMonitor. It is meant to be used by admins to enforce the SampleLimit to keep overall - ## number of samples/series under the desired limit. Note that if SampleLimit is lower that value will be taken instead. - enforcedSampleLimit: false - - ## EnforcedTargetLimit defines a global limit on the number of scraped targets. This overrides any TargetLimit set - ## per ServiceMonitor or/and PodMonitor. It is meant to be used by admins to enforce the TargetLimit to keep the overall - ## number of targets under the desired limit. Note that if TargetLimit is lower, that value will be taken instead, except - ## if either value is zero, in which case the non-zero value will be used. If both values are zero, no limit is enforced. - enforcedTargetLimit: false - - - ## Per-scrape limit on number of labels that will be accepted for a sample. If more than this number of labels are present - ## post metric-relabeling, the entire scrape will be treated as failed. 0 means no limit. Only valid in Prometheus versions - ## 2.27.0 and newer. - enforcedLabelLimit: false - - ## Per-scrape limit on length of labels name that will be accepted for a sample. If a label name is longer than this number - ## post metric-relabeling, the entire scrape will be treated as failed. 0 means no limit. Only valid in Prometheus versions - ## 2.27.0 and newer. - enforcedLabelNameLengthLimit: false - - ## Per-scrape limit on length of labels value that will be accepted for a sample. If a label value is longer than this - ## number post metric-relabeling, the entire scrape will be treated as failed. 0 means no limit. Only valid in Prometheus - ## versions 2.27.0 and newer. - enforcedLabelValueLengthLimit: false - - ## AllowOverlappingBlocks enables vertical compaction and vertical query merge in Prometheus. This is still experimental - ## in Prometheus so it may change in any upcoming release. - allowOverlappingBlocks: false - - additionalRulesForClusterRole: [] - # - apiGroups: [ "" ] - # resources: - # - nodes/proxy - # verbs: [ "get", "list", "watch" ] - - additionalServiceMonitors: [] - ## Name of the ServiceMonitor to create - ## - # - name: "" - - ## Additional labels to set used for the ServiceMonitorSelector. Together with standard labels from - ## the chart - ## - # additionalLabels: {} - - ## Service label for use in assembling a job name of the form