diff --git a/apps/adguard/configmap.yaml b/apps/adguard/configmap.yaml new file mode 100644 index 0000000..43faca5 --- /dev/null +++ b/apps/adguard/configmap.yaml @@ -0,0 +1,150 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: adguard-home-config + namespace: adguard +data: + AdGuardHome.yaml: |- + bind_host: 0.0.0.0 + bind_port: 3000 + beta_bind_port: 0 + users: [] + auth_attempts: 5 + block_auth_min: 15 + http_proxy: "" + language: "" + debug_pprof: false + web_session_ttl: 720 + dns: + bind_hosts: + - 0.0.0.0 + port: 53 + statistics_interval: 1 + querylog_enabled: true + querylog_file_enabled: true + querylog_interval: 2160h + querylog_size_memory: 1000 + anonymize_client_ip: false + protection_enabled: true + blocking_mode: default + blocking_ipv4: "" + blocking_ipv6: "" + blocked_response_ttl: 10 + parental_block_host: family-block.dns.adguard.com + safebrowsing_block_host: standard-block.dns.adguard.com + ratelimit: 20 + ratelimit_whitelist: [] + refuse_any: true + upstream_dns: + - https://dns10.quad9.net/dns-query + upstream_dns_file: "" + bootstrap_dns: + - 9.9.9.10 + - 149.112.112.10 + - 2620:fe::10 + - 2620:fe::fe:10 + all_servers: false + fastest_addr: false + fastest_timeout: 1s + allowed_clients: [] + disallowed_clients: [] + blocked_hosts: + - version.bind + - id.server + - hostname.bind + trusted_proxies: + - 127.0.0.0/8 + - ::1/128 + cache_size: 4194304 + cache_ttl_min: 0 + cache_ttl_max: 0 + cache_optimistic: false + bogus_nxdomain: [] + aaaa_disabled: false + enable_dnssec: false + edns_client_subnet: false + max_goroutines: 300 + ipset: [] + filtering_enabled: true + filters_update_interval: 24 + parental_enabled: false + safesearch_enabled: false + safebrowsing_enabled: false + safebrowsing_cache_size: 1048576 + safesearch_cache_size: 1048576 + parental_cache_size: 1048576 + cache_time: 30 + rewrites: [] + blocked_services: [] + upstream_timeout: 10s + private_networks: [] + use_private_ptr_resolvers: true + local_ptr_upstreams: + - 192.168.1.1 + tls: + enabled: false + server_name: "" + force_https: false + port_https: 443 + port_dns_over_tls: 853 + port_dns_over_quic: 853 + port_dnscrypt: 0 + dnscrypt_config_file: "" + allow_unencrypted_doh: false + strict_sni_check: false + certificate_chain: "" + private_key: "" + certificate_path: "" + private_key_path: "" + filters: + - enabled: true + url: https://adguardteam.github.io/AdGuardSDNSFilter/Filters/filter.txt + name: AdGuard DNS filter + id: 1 + - enabled: true + url: https://adaway.org/hosts.txt + name: AdAway Default Blocklist + id: 2 + - enabled: true + url: https://someonewhocares.org/hosts/zero/hosts + name: Dan Pollock's List + id: 1684963532 + whitelist_filters: [] + user_rules: [] + dhcp: + enabled: false + interface_name: "" + local_domain_name: lan + dhcpv4: + gateway_ip: "" + subnet_mask: "" + range_start: "" + range_end: "" + lease_duration: 86400 + icmp_timeout_msec: 1000 + options: [] + dhcpv6: + range_start: "" + lease_duration: 86400 + ra_slaac_only: false + ra_allow_slaac: false + clients: + runtime_sources: + whois: true + arp: true + rdns: true + dhcp: true + hosts: true + persistent: [] + log_compress: false + log_localtime: false + log_max_backups: 0 + log_max_size: 100 + log_max_age: 3 + log_file: "" + verbose: false + os: + group: "" + user: "" + rlimit_nofile: 0 + schema_version: 14 diff --git a/apps/adguard/deployment.yaml b/apps/adguard/deployment.yaml new file mode 100644 index 0000000..454774d --- /dev/null +++ b/apps/adguard/deployment.yaml @@ -0,0 +1,80 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: adguard-home + namespace: adguard +spec: + replicas: 1 + revisionHistoryLimit: 3 + selector: + matchLabels: + app.kubernetes.io/instance: adguard + app.kubernetes.io/name: adguard-home + strategy: + type: Recreate + template: + metadata: + labels: + app.kubernetes.io/instance: adguard + app.kubernetes.io/name: adguard-home + spec: + containers: + - args: + - --config + - /opt/adguardhome/conf/AdGuardHome.yaml + - --work-dir + - /opt/adguardhome/work + - --no-check-update + env: + - name: TZ + value: Europe/Berlin + image: adguard/adguardhome:v0.107.7 + imagePullPolicy: IfNotPresent + livenessProbe: + failureThreshold: 3 + periodSeconds: 10 + successThreshold: 1 + tcpSocket: + port: 3000 + timeoutSeconds: 1 + name: adguard-home + ports: + - containerPort: 53 + name: dns-tcp + protocol: TCP + - containerPort: 53 + name: dns-udp + protocol: UDP + - containerPort: 3000 + name: http + protocol: TCP + readinessProbe: + failureThreshold: 3 + periodSeconds: 10 + successThreshold: 1 + tcpSocket: + port: 3000 + timeoutSeconds: 1 + resources: {} + startupProbe: + failureThreshold: 30 + periodSeconds: 5 + successThreshold: 1 + tcpSocket: + port: 3000 + timeoutSeconds: 1 + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /opt/adguardhome/conf/ + name: adguard-home-config + dnsPolicy: ClusterFirst + + restartPolicy: Always + + terminationGracePeriodSeconds: 30 + volumes: + - configMap: + defaultMode: 0777 + name: adguard-home-config + name: adguard-home-config diff --git a/apps/adguard/ingress.yaml b/apps/adguard/ingress.yaml new file mode 100644 index 0000000..d0ed6b6 --- /dev/null +++ b/apps/adguard/ingress.yaml @@ -0,0 +1,42 @@ +# apiVersion: traefik.containo.us/v1alpha1 +# kind: Middleware +# metadata: +# name: authentik-auth +# namespace: adguard +# spec: +# forwardAuth: +# address: https://adguard.kluster.moll.re/outpost.goauthentik.io/auth/traefik +# trustForwardHeader: true +# authResponseHeaders: +# - X-authentik-username +# - X-authentik-groups +# - X-authentik-email +# - X-authentik-name +# - X-authentik-uid +# - X-authentik-jwt +# - X-authentik-meta-jwks +# - X-authentik-meta-outpost +# - X-authentik-meta-provider +# - X-authentik-meta-app +# - X-authentik-meta-version + +# --- + +apiVersion: traefik.containo.us/v1alpha1 +kind: IngressRoute +metadata: + name: adguard-ingress + namespace: adguard +spec: + entryPoints: + - websecure + routes: + - match: Host(`adguard.kluster.moll.re`) + kind: Rule + # middlewares: + # - name: authentik-auth + services: + - name: adguard-home + port: 3000 + tls: + certResolver: default-tls diff --git a/apps/adguard/service.yaml b/apps/adguard/service.yaml new file mode 100644 index 0000000..f6aa339 --- /dev/null +++ b/apps/adguard/service.yaml @@ -0,0 +1,61 @@ +apiVersion: v1 +kind: Service +metadata: + name: adguard-home + namespace: adguard +spec: + ports: + - name: http + port: 3000 + protocol: TCP + targetPort: http + selector: + app.kubernetes.io/instance: adguard + app.kubernetes.io/name: adguard-home + type: ClusterIP +--- +apiVersion: v1 +kind: Service +metadata: + annotations: + metallb.universe.tf/ip-allocated-from-pool: default + metallb.universe.tf/allow-shared-ip: adguard-svc + + name: adguard-home-dns-tcp + namespace: adguard +spec: + allocateLoadBalancerNodePorts: true + loadBalancerIP: 192.168.3.2 + ports: + - name: dns-tcp + nodePort: 31306 + port: 53 + protocol: TCP + targetPort: 53 + selector: + app.kubernetes.io/instance: adguard + app.kubernetes.io/name: adguard-home + type: LoadBalancer +--- +apiVersion: v1 +kind: Service +metadata: + annotations: + metallb.universe.tf/ip-allocated-from-pool: default + metallb.universe.tf/allow-shared-ip: adguard-svc + + name: adguard-home-dns-udp + namespace: adguard +spec: + allocateLoadBalancerNodePorts: true + loadBalancerIP: 192.168.3.2 + ports: + - name: dns-udp + nodePort: 30547 + port: 53 + protocol: UDP + targetPort: 53 + selector: + app.kubernetes.io/instance: adguard + app.kubernetes.io/name: adguard-home + type: LoadBalancer \ No newline at end of file diff --git a/apps/adguard/values.yaml b/apps/adguard/values.yaml new file mode 100644 index 0000000..3a7d7b0 --- /dev/null +++ b/apps/adguard/values.yaml @@ -0,0 +1,365 @@ +# +# IMPORTANT NOTE +# +# This chart inherits from our common library chart. You can check the default values/options here: +# https://github.com/k8s-at-home/library-charts/tree/main/charts/stable/common/values.yaml +# + +controller: + # -- Number of pods to load balance between + replicas: 1 + +initContainers: +# -- Configures an initContainer that copies the configmap to the AdGuardHome conf directory +# It does NOT overwrite when the file already exists. +# @default -- See values.yaml + copy-configmap: + image: busybox + imagePullPolicy: IfNotPresent + command: + - "sh" + - "-c" + - | + if [ ! -f /opt/adguardhome/conf/AdGuardHome.yaml ]; then + mkdir -p /opt/adguardhome/conf + cp /tmp/AdGuardHome.yaml /opt/adguardhome/conf/AdGuardHome.yaml + fi + volumeMounts: + - name: adguard-home-config + mountPath: /tmp/AdGuardHome.yaml + subPath: AdGuardHome.yaml + - name: config + mountPath: /opt/adguardhome/conf + securityContext: + runAsUser: 0 + +image: + # -- image repository + repository: adguard/adguardhome + # @default -- chart.appVersion + tag: + # -- image pull policy + pullPolicy: IfNotPresent + +# -- environment variables. +# @default -- See below +env: + # -- Set the container timezone + TZ: Europe/Berlin + +# -- arguments passed to the adguard-home command line. +args: +- "--config" +- "/opt/adguardhome/conf/AdGuardHome.yaml" +- "--work-dir" +- "/opt/adguardhome/work" +- "--no-check-update" + +# -- Configures service settings for the chart. +# @default -- See values.yaml +service: + main: + primary: true + ports: + http: + port: 3000 + dns-tcp: + enabled: true + type: LoadBalancer + loadBalancerIP: 192.168.3.2 + annotations: + metallb.universe.tf/allow-shared-ip: adguard-svc + + ports: + dns-tcp: + enabled: true + port: 53 + protocol: TCP + targetPort: 53 + + dns-udp: + enabled: true + type: LoadBalancer + loadBalancerIP: 192.168.3.2 + annotations: + metallb.universe.tf/allow-shared-ip: adguard-svc + + ports: + dns-udp: + enabled: true + port: 53 + protocol: UDP + targetPort: 53 + + dns-tls-udp: + enabled: true + type: LoadBalancer + loadBalancerIP: 192.168.3.5 + annotations: + metallb.universe.tf/allow-shared-ip: adguard-svc + + ports: + dns-tls-udp: + enabled: true + port: 853 + protocol: UDP + targetPort: 853 + + dns-tls-tcp: + enabled: true + type: LoadBalancer + loadBalancerIP: 192.168.3.5 + annotations: + metallb.universe.tf/allow-shared-ip: adguard-svc + + ports: + dns-tls-tcp: + enabled: true + port: 853 + protocol: TCP + targetPort: 853 + + + + +# -- Configure persistence settings for the chart under this key. +# @default -- See values.yaml +persistence: + config: + enabled: true + mountPath: /opt/adguardhome/conf + data: + enabled: false + mountPath: /opt/adguardhome/work + +# config -- AdGuard Home cojnfiguration. For a full list of options see https://github.com/AdguardTeam/AdGuardHome/wiki/Configuration. +# @default -- See values.yaml +config: | + bind_host: 0.0.0.0 + bind_port: 3000 + beta_bind_port: 0 + users: [] + auth_attempts: 5 + block_auth_min: 15 + http_proxy: "" + language: "" + debug_pprof: false + web_session_ttl: 720 + dns: + bind_hosts: + - 0.0.0.0 + port: 53 + statistics_interval: 1 + querylog_enabled: true + querylog_file_enabled: true + querylog_interval: 2160h + querylog_size_memory: 1000 + anonymize_client_ip: false + protection_enabled: true + blocking_mode: default + blocking_ipv4: "" + blocking_ipv6: "" + blocked_response_ttl: 10 + parental_block_host: family-block.dns.adguard.com + safebrowsing_block_host: standard-block.dns.adguard.com + ratelimit: 20 + ratelimit_whitelist: [] + refuse_any: true + upstream_dns: + - https://dns10.quad9.net/dns-query + upstream_dns_file: "" + bootstrap_dns: + - 9.9.9.10 + - 149.112.112.10 + - 2620:fe::10 + - 2620:fe::fe:10 + all_servers: false + fastest_addr: false + fastest_timeout: 1s + allowed_clients: [] + disallowed_clients: [] + blocked_hosts: + - version.bind + - id.server + - hostname.bind + trusted_proxies: + - 127.0.0.0/8 + - ::1/128 + cache_size: 4194304 + cache_ttl_min: 0 + cache_ttl_max: 0 + cache_optimistic: false + bogus_nxdomain: [] + aaaa_disabled: false + enable_dnssec: false + edns_client_subnet: false + max_goroutines: 300 + ipset: [] + filtering_enabled: true + filters_update_interval: 24 + parental_enabled: false + safesearch_enabled: false + safebrowsing_enabled: false + safebrowsing_cache_size: 1048576 + safesearch_cache_size: 1048576 + parental_cache_size: 1048576 + cache_time: 30 + rewrites: [] + blocked_services: [] + upstream_timeout: 10s + private_networks: [] + use_private_ptr_resolvers: true + local_ptr_upstreams: + - 192.168.1.1 + tls: + enabled: true + server_name: "dns.moll.re" + force_https: false + port_https: 443 + port_dns_over_tls: 853 + port_dns_over_quic: 853 + port_dnscrypt: 0 + dnscrypt_config_file: "" + allow_unencrypted_doh: false + strict_sni_check: false + certificate_chain: |- + -----BEGIN CERTIFICATE----- + MIIFyzCCA7OgAwIBAgIUEvyI5bCa56vvyQgTbLyR7+c7vQMwDQYJKoZIhvcNAQEL + BQAwdTELMAkGA1UEBhMCREUxCzAJBgNVBAgMAkJXMREwDwYDVQQHDAhGcmVpYnVy + ZzENMAsGA1UECgwEUmVteTEKMAgGA1UECwwBTTEQMA4GA1UEAwwHbW9sbC5yZTEZ + MBcGCSqGSIb3DQEJARYKbWVAbW9sbC5yZTAeFw0yMzA3MTUxNzQ0MTVaFw0yNDA3 + MTQxNzQ0MTVaMHUxCzAJBgNVBAYTAkRFMQswCQYDVQQIDAJCVzERMA8GA1UEBwwI + RnJlaWJ1cmcxDTALBgNVBAoMBFJlbXkxCjAIBgNVBAsMAU0xEDAOBgNVBAMMB21v + bGwucmUxGTAXBgkqhkiG9w0BCQEWCm1lQG1vbGwucmUwggIiMA0GCSqGSIb3DQEB + AQUAA4ICDwAwggIKAoICAQDpS0Xtii0VITKFr9XFLcWchI6//I7iMeKkYi7uEq60 + 1YZQ8/Zppg1M15BhD8ZEQ0JZ42ufi0p4B0LYMGHYF+2kKsbFxcEPQTUeXCLcjYVA + ueZ+GTh+FrUrSQvHSevUbVXytAwiqAN/eAvXBMdOKisPUM9Cmk/KHA+W+anw4Uxq + ZvHq5GG9Z0IksTHI2oEMp/8cZ8lRXzHmOUYQGveBX6PBPvcttP8GwCU6vsPVSphZ + 7XF2LPqeMnBGgmOz51QTRpS7NBHMsSDR20VgSTjI+F8nJnQsGO5Iq9IpQzlDlAsL + jgPOT3W/pdeZD1mX/c9EpYEKf/0ubEBiWc+kJqkrdmsUX6cZ06qEUa08yCMSzkao + mHrMzw22kjICG9h+0sZvTetPvpYZsBqQRejDS/cu+buAaDNchGNhl1YPp8iAlKUT + YB4gbcNqceCGUmbQX06B/OwJiYIoN5ghh2wmqNrFXYltfALBVhWFtU2DTAS9k399 + W2hd4u77uJngK0WLoKQuV/wi81dbk0kAI7eRUI1H/Y4hC1MCI5M6zewrJ7QgOYBi + qkYydYQGFu1ToDt6maDVBX05PcoBPwbUfrmZBjR5kzBawvH6reDuANkEXfJ0+2hA + JBAxXPKyQVc9Y87nDATvkl7qWOKjfJairKAd03lvJlesr6+7GwMMnE/6h91QF4Vq + OQIDAQABo1MwUTAdBgNVHQ4EFgQUunr29QozKy+AlTrq+PAoSjPFOQIwHwYDVR0j + BBgwFoAUunr29QozKy+AlTrq+PAoSjPFOQIwDwYDVR0TAQH/BAUwAwEB/zANBgkq + hkiG9w0BAQsFAAOCAgEAPeczDC1OScGZ6UVjFUF+BqI1Am9TwUNVD2cRnbXvQ2g7 + nU8vYSfWx00bhRTpuDEG997HkCCvaUYIArbGtgplB+bCk6GMnQQfnRWIyFz/cy+Y + yuftUY0PufXzCe33J2Q0SQCNKdEvOsfiPCkyrgMSlomoIDPhs4wQ8SOE0Lnl4fNw + i1uVDd6pTxwwfpfsvN5lBwXN+RDr1Awe07f9SJmYklqQAIP5Kthq7QJsN1QHvmtW + JL7AYlltDTUYvE2kBnQKjkNYv9Qj4PGUvipVlCKA4cEVAZXHam01RqPXEFj5I9B4 + Q9S+oT7htoXWuz9kAwsSCZVEW1QBzRL7UNIckMWsc1jRSiCT5Nc/sOtPyIc9in+i + J/XGPjSBvQZrnitLhR4qByG/dY+istQkcEERjElwhzucEyNkgtENJfJEevdJsrBf + oGaaK5ljemYsk1e+QHB3FWmNbIysKBMn44bHgu7DeQediLCjvwdasjVorDW1mv5Z + 8Aoe075vxTmHGSjfMPiAzJnYMy0zCT1VcR+AtPKUtr11z2xgOrAqZqlTaR/ud6ce + B11n3oIs5Kwarvhwx2Qw7XvcGOa2PBGZW4kcoDRn9GNFcP5K2AAuRJD9FLTbr8ZO + 6a0bv0KUksQYX+U/r3+qSn87TXyIJ1IbKY2jQYu/+KEpeyFnviXw+IoM/YHDqdw= + -----END CERTIFICATE----- + private_key: |- + -----BEGIN PRIVATE KEY----- + MIIJQgIBADANBgkqhkiG9w0BAQEFAASCCSwwggkoAgEAAoICAQDpS0Xtii0VITKF + r9XFLcWchI6//I7iMeKkYi7uEq601YZQ8/Zppg1M15BhD8ZEQ0JZ42ufi0p4B0LY + MGHYF+2kKsbFxcEPQTUeXCLcjYVAueZ+GTh+FrUrSQvHSevUbVXytAwiqAN/eAvX + BMdOKisPUM9Cmk/KHA+W+anw4UxqZvHq5GG9Z0IksTHI2oEMp/8cZ8lRXzHmOUYQ + GveBX6PBPvcttP8GwCU6vsPVSphZ7XF2LPqeMnBGgmOz51QTRpS7NBHMsSDR20Vg + STjI+F8nJnQsGO5Iq9IpQzlDlAsLjgPOT3W/pdeZD1mX/c9EpYEKf/0ubEBiWc+k + JqkrdmsUX6cZ06qEUa08yCMSzkaomHrMzw22kjICG9h+0sZvTetPvpYZsBqQRejD + S/cu+buAaDNchGNhl1YPp8iAlKUTYB4gbcNqceCGUmbQX06B/OwJiYIoN5ghh2wm + qNrFXYltfALBVhWFtU2DTAS9k399W2hd4u77uJngK0WLoKQuV/wi81dbk0kAI7eR + UI1H/Y4hC1MCI5M6zewrJ7QgOYBiqkYydYQGFu1ToDt6maDVBX05PcoBPwbUfrmZ + BjR5kzBawvH6reDuANkEXfJ0+2hAJBAxXPKyQVc9Y87nDATvkl7qWOKjfJairKAd + 03lvJlesr6+7GwMMnE/6h91QF4VqOQIDAQABAoICAFXdtDe5X12DEf7dmJ9R+QVi + Ts5ADXEYrlQVpTNQIgiB/MVn/d6l1Qhe4Q+wiCeQ3+eIypB26qph9crvh9vK9tcx + PWcGocfVFtF9VQF7fzuzELCB5OaXwgfUA2dPAGN3+KXzefH5iAwPKcByzE6rO50P + /7ECbfK0QFKvwspbik4xZMIxW/4j9tbddzb3oX8AiGeylYkDMjEMDIsZ+dYe1v1m + CQFEOIeKCknkc9zZ71hOCjBWXsoCQ4vYKw1IzAuqM0zx3clKuoszGwZU/PcPX6pf + v2uJo46Q2zH/waBraWNP2nvBiFPJHSEDYtUMAJFCH0w3jn7bLhlk+AVxi1tpYwBx + SOFQKmKbJgTWpmX7o8bhyNmSg6gLTquKKYuOeUsJTe4SERnhKNVen/mf1BdV5S1A + iLj9mg5tFL1O+f8wl8q0QA5aM3o1G/YMlG28Na6X8l89BiDvfdG4YALzeJs5k1yn + VnpZElikhx63HQjaLE+u4nSBwr0s79Hnq4Xge+rEPCRVpHhfZ1T/Ka3NwqcflcM7 + GvvRnXfLLyfS3DOQg9BCwE94hzJgh7V4BqEQInzkAR3/wF83xTT0LaWLBsJXTsWr + rHcdPxpMVXNUfelBmA3Blu1d07lDw8kMzYXzCJ4AE9gjdgN9ltwjg7ZDQ3w6Tnc1 + 09aLmIUeRx6r7vs8pBMPAoIBAQD3epVeC4Urpmop21Jzop7nqvQqmHwDvUPIHKWZ + a1e9YmHfNR6Vibzw8jqjd7IJMd5mzlcot+bTjfFGxfZ/KidE5MB8rvwS0MVQnamZ + dnl1OX9c/+G4jW8xCzNQlkAXT2xcaMPO/ged6smdtZkvvnjfyX0L78fbKG+4fsc8 + PoIB5gXjApVVN4ujeaKUud2jr2uHueQqI8taZlhlIojxc1w/a9r0iiLK+sY/HvWH + gERxDFWQjg8kkFGXC3KFOz0UJiolDus9sK9cLcDI4IavOotVaxEoz778u9644+GM + wfRJCN8OBT3RQjPy77L1VOCjrbd1TtknDDG+kAN4ZLLEPCO/AoIBAQDxU6gqjGDy + SC1mSgl8x6ODkmCs2a9UvZeg9/KA/UzTGCLeSgftPwgCeGV6d6dpqFxsvqhVDVtp + pkqFa2+X0rsIG4JFl6qZTbXpJIqbdkTeWjjimg809fTqZnSJSchUiuIWzqvGlOSL + cM5c7+WNteLVHjldiNT0+jReXPtxAJD9jIV3LubmWZ5qs3tYXKGgQvCItLo6REYE + SKUZAsX/T6O6HAypv89AcS+UZxc2pq4htFRJY5XarLbs8BuDJAYWm3chMwwGIDEx + J7cCXWWWQkU7W1GOckU4oo6FPGzjREPwyeiYcvias2/nm4tOc5t0gRJHIR8W6tQF + 5An7lLSHe5AHAoIBAADiNSpSzDTtsS9ZEyBKklqtZ5XHWZoB0P4j7AtyMKwCb+sG + G4fZKA2ML91pjf8uaGbhkboZff9/YD8qccjec6lxT6aiUVAX4rx486QSojhi7it8 + 1md8SctZCOPexXfP1sk1ro1MpuZPckzX2yYqfe/+ni2uu33y1QNJoJh8eKZdFeRL + nBDj0+HPi18QktQEylN/vGrSGeXGu8YQq4CBMvEfB3ccDye+YXrUN3g2YwgsTRnp + B/DPexsY9V24am1p/XiIZxqfSOEBYNDWzGRPxzOU4EjPBRWN7ium1KVWA/NGztUT + +7aFj/3sES2DEhJDioYms+vJxVuy0/BYG7NLq60CggEAZCxZre+/flK/paot7gHg + ugjU4GssAH0Cp+rEWw7KCQYH00XfrHdxl7TqSr/IWm9sjidGMKfuvhgs7tz94YOz + 51Wj6cdfJWvAixqD/qxFQhcpbcaNcWp3U6Vb0nEyGwXbe6QmYbQEem1E/AcIvp41 + nkmBfnYCD/6cJl9qcCnQBa+C50osxomE3L3MAY3R+XhP6C887lrQxY5yGcOw9J3W + VLa3+u6H1TQmj++LD0B5H7x/EEeqOK9g71Fr2i/l5xR5iuppn1FVmhXmPbEPLiQs + IMtzOzHr0eqIRn4ipOP9X8IwLrfqwiyh0v4aAWKzsNSzBZuWEClCAX/7NNcxaNu9 + mQKCAQEA3dk8ScY8bVPgFg2x7oqujVZbrNizhw2+BXYuH6HRVINPDYzIapur9uiw + I+STHoUod8aRNvwDLfhkI+MabmEbt/eDsBpRrJYYLi2uTed5gIiLqPS8MPuKr++7 + UwJz4OPZu1xOjbFapvKvPSbPhS254tozQyi5Xbl8W268SCQhF+hEb+AT5JTcoPlI + ZNN5hp0Ooq6EouX8heyeG7le9V2G+HFHR9aWniD9kRRirO+oqWTXcG+9zHRhkdbF + 4vRGwZ8+mj/0fKAHlFpeDRiKNbma7rTNDyEDR9jQ+GOC1QmOYeiei6FDKYEPcHxh + UBWqdlD+gUjtzQvD3yMo7JN9DIO5Eg== + -----END PRIVATE KEY----- + certificate_path: "" + private_key_path: "" + filters: + - enabled: true + url: https://adguardteam.github.io/AdGuardSDNSFilter/Filters/filter.txt + name: AdGuard DNS filter + id: 1 + - enabled: true + url: https://adaway.org/hosts.txt + name: AdAway Default Blocklist + id: 2 + - enabled: true + url: https://someonewhocares.org/hosts/zero/hosts + name: Dan Pollock's List + id: 1684963532 + whitelist_filters: [] + user_rules: [] + dhcp: + enabled: false + interface_name: "" + local_domain_name: lan + dhcpv4: + gateway_ip: "" + subnet_mask: "" + range_start: "" + range_end: "" + lease_duration: 86400 + icmp_timeout_msec: 1000 + options: [] + dhcpv6: + range_start: "" + lease_duration: 86400 + ra_slaac_only: false + ra_allow_slaac: false + clients: + runtime_sources: + whois: true + arp: true + rdns: true + dhcp: true + hosts: true + persistent: [] + log_compress: false + log_localtime: false + log_max_backups: 0 + log_max_size: 100 + log_max_age: 3 + log_file: "" + verbose: false + os: + group: "" + user: "" + rlimit_nofile: 0 + schema_version: 14 diff --git a/apps/codeserver/deployment.yaml b/apps/codeserver/deployment.yaml new file mode 100644 index 0000000..dd1a6da --- /dev/null +++ b/apps/codeserver/deployment.yaml @@ -0,0 +1,126 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: codeserver +--- + +apiVersion: v1 +kind: PersistentVolume +metadata: + namespace: codeserver + name: codeserver-data-nfs + labels: + directory: codeserver +spec: + storageClassName: fast + capacity: + storage: "10Gi" + volumeMode: Filesystem + accessModes: + - ReadWriteOnce + nfs: + path: /codeserver + server: nfs-server.storage.svc.cluster.local +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + namespace: codeserver + name: codeserver-data-nfs +spec: + storageClassName: fast + accessModes: + - ReadWriteOnce + resources: + requests: + storage: "10Gi" + selector: + matchLabels: + directory: codeserver + +--- + +apiVersion: apps/v1 +kind: Deployment +metadata: + namespace: codeserver + name: codeserver + labels: + app: codeserver +spec: + replicas: 1 + selector: + matchLabels: + app: codeserver + template: + metadata: + labels: + app: codeserver + spec: + containers: + - name: codeserver + image: gitpod/openvscode-server + ports: + - containerPort: 3000 + volumeMounts: + - mountPath: /home/workspace + name: codeserver-data + + + volumes: + - name: codeserver-data + persistentVolumeClaim: + claimName: codeserver-data-nfs + +--- +apiVersion: v1 +kind: Service +metadata: + namespace: codeserver + name: codeserver + +spec: + type: ClusterIP + ports: + - name: http + port: 3000 + selector: + app: codeserver + +--- + + +apiVersion: traefik.containo.us/v1alpha1 +kind: IngressRoute +metadata: + name: codeserver-ingress + namespace: codeserver +spec: + entryPoints: + - websecure + routes: + - match: Host(`code.kluster.moll.re`) + middlewares: + - name: codeserver-websocket + kind: Rule + services: + - name: codeserver + port: 3000 + tls: + certResolver: default-tls + +--- +apiVersion: traefik.containo.us/v1alpha1 +kind: Middleware +metadata: + name: codeserver-websocket + namespace: codeserver +spec: + headers: + customRequestHeaders: + X-Forwarded-Proto: "https" + # enable websockets + Upgrade: "websocket" + + + diff --git a/apps/dendrite/ingressroute.yaml b/apps/dendrite/ingressroute.yaml new file mode 100644 index 0000000..369faf5 --- /dev/null +++ b/apps/dendrite/ingressroute.yaml @@ -0,0 +1,17 @@ +apiVersion: traefik.containo.us/v1alpha1 +kind: IngressRoute +metadata: + namespace: dendrite + name: dendrite-ingressroute + +spec: + entryPoints: + - websecure + routes: + - match: Host(`dendrite.kluster.moll.re`) + kind: Rule + services: + - name: dendrite + port: 8008 + tls: + certResolver: default-tls \ No newline at end of file diff --git a/apps/dendrite/values.yaml b/apps/dendrite/values.yaml new file mode 100644 index 0000000..a655edd --- /dev/null +++ b/apps/dendrite/values.yaml @@ -0,0 +1,407 @@ +image: + # -- Docker repository/image to use + repository: "ghcr.io/matrix-org/dendrite-monolith" + # -- Kubernetes pullPolicy + pullPolicy: IfNotPresent + # -- Overrides the image tag whose default is the chart appVersion. + tag: "" + + +# signing key to use +signing_key: + # -- Create a new signing key, if not exists + create: true + # -- Use an existing secret + existingSecret: "" + +# -- Default resource requests/limits. +# @default -- sets some sane default values +resources: + requests: + memory: "512Mi" + limits: + memory: "4096Mi" + +persistence: + # -- The storage class to use for volume claims. + # Used unless specified at the specific component. + # Defaults to the cluster default storage class. + storageClass: "nfs-client" + jetstream: + # -- Use an existing volume claim for jetstream + existingClaim: "" + # -- PVC Storage Request for the jetstream volume + capacity: "1Gi" + # -- The storage class to use for volume claims. + # Defaults to persistence.storageClass + storageClass: "" + media: + # -- Use an existing volume claim for media files + existingClaim: "" + # -- PVC Storage Request for the media volume + capacity: "1Gi" + # -- The storage class to use for volume claims. + # Defaults to persistence.storageClass + storageClass: "" + search: + # -- Use an existing volume claim for the fulltext search index + existingClaim: "" + # -- PVC Storage Request for the search volume + capacity: "1Gi" + # -- The storage class to use for volume claims. + # Defaults to persistence.storageClass + storageClass: "" + +# -- Add additional volumes to the Dendrite Pod +extraVolumes: [] +# ex. +# - name: extra-config +# secret: +# secretName: extra-config + +# -- Configure additional mount points volumes in the Dendrite Pod +extraVolumeMounts: [] +# ex. +# - mountPath: /etc/dendrite/extra-config +# name: extra-config + +strategy: + # -- Strategy to use for rolling updates (e.g. Recreate, RollingUpdate) + # If you are using ReadWriteOnce volumes, you should probably use Recreate + type: RollingUpdate + rollingUpdate: + # -- Maximum number of pods that can be unavailable during the update process + maxUnavailable: 25% + # -- Maximum number of pods that can be scheduled above the desired number of pods + maxSurge: 25% + +dendrite_config: + version: 2 + global: + # -- **REQUIRED** Servername for this Dendrite deployment. + server_name: "dendrite.kluster.moll.re" + + # -- The private key to use. (**NOTE**: This is overriden in Helm) + private_key: /etc/dendrite/secrets/signing.key + + # -- The server name to delegate server-server communications to, with optional port + # e.g. localhost:443 + well_known_server_name: "dendrite.kluster.moll.re:443" + + # -- The server name to delegate client-server communications to, with optional port + # e.g. localhost:443 + well_known_client_name: "dendrite.kluster.moll.re:443" + + # -- Lists of domains that the server will trust as identity servers to verify third + # party identifiers such as phone numbers and email addresses. + trusted_third_party_id_servers: + - matrix.org + - vector.im + + # -- The paths and expiry timestamps (as a UNIX timestamp in millisecond precision) + # to old signing keys that were formerly in use on this domain name. These + # keys will not be used for federation request or event signing, but will be + # provided to any other homeserver that asks when trying to verify old events. + old_private_keys: + # If the old private key file is available: + # - private_key: old_matrix_key.pem + # expired_at: 1601024554498 + # If only the public key (in base64 format) and key ID are known: + # - public_key: mn59Kxfdq9VziYHSBzI7+EDPDcBS2Xl7jeUdiiQcOnM= + # key_id: ed25519:mykeyid + # expired_at: 1601024554498 + + # -- Disable federation. Dendrite will not be able to make any outbound HTTP requests + # to other servers and the federation API will not be exposed. + disable_federation: false + + key_validity_period: 168h0m0s + + database: + # -- The connection string for connections to Postgres. + # This will be set automatically if using the Postgres dependency + connection_string: "" + + # -- Default database maximum open connections + max_open_conns: 90 + # -- Default database maximum idle connections + max_idle_conns: 5 + # -- Default database maximum lifetime + conn_max_lifetime: -1 + + jetstream: + # -- Persistent directory to store JetStream streams in. + storage_path: "/data/jetstream" + # -- NATS JetStream server addresses if not using internal NATS. + addresses: [] + # -- The prefix for JetStream streams + topic_prefix: "Dendrite" + # -- Keep all data in memory. (**NOTE**: This is overriden in Helm to `false`) + in_memory: false + # -- Disables TLS validation. This should **NOT** be used in production. + disable_tls_validation: true + + cache: + # -- The estimated maximum size for the global cache in bytes, or in terabytes, + # gigabytes, megabytes or kilobytes when the appropriate 'tb', 'gb', 'mb' or + # 'kb' suffix is specified. Note that this is not a hard limit, nor is it a + # memory limit for the entire process. A cache that is too small may ultimately + # provide little or no benefit. + max_size_estimated: 1gb + # -- The maximum amount of time that a cache entry can live for in memory before + # it will be evicted and/or refreshed from the database. Lower values result in + # easier admission of new cache entries but may also increase database load in + # comparison to higher values, so adjust conservatively. Higher values may make + # it harder for new items to make it into the cache, e.g. if new rooms suddenly + # become popular. + max_age: 1h + + report_stats: + # -- Configures phone-home statistics reporting. These statistics contain the server + # name, number of active users and some information on your deployment config. + # We use this information to understand how Dendrite is being used in the wild. + enabled: false + # -- Endpoint to report statistics to. + endpoint: https://matrix.org/report-usage-stats/push + + presence: + # -- Controls whether we receive presence events from other servers + enable_inbound: false + # -- Controls whether we send presence events for our local users to other servers. + # (_May increase CPU/memory usage_) + enable_outbound: false + + server_notices: + # -- Server notices allows server admins to send messages to all users on the server. + enabled: false + # -- The local part for the user sending server notices. + local_part: "_server" + # -- The display name for the user sending server notices. + display_name: "Server Alerts" + # -- The avatar URL (as a mxc:// URL) name for the user sending server notices. + avatar_url: "" + # The room name to be used when sending server notices. This room name will + # appear in user clients. + room_name: "Server Alerts" + + # prometheus metrics + metrics: + # -- Whether or not Prometheus metrics are enabled. + enabled: false + # HTTP basic authentication to protect access to monitoring. + basic_auth: + # -- HTTP basic authentication username + user: "metrics" + # -- HTTP basic authentication password + password: metrics + + dns_cache: + # -- Whether or not the DNS cache is enabled. + enabled: false + # -- Maximum number of entries to hold in the DNS cache + cache_size: 256 + # -- Duration for how long DNS cache items should be considered valid ([see time.ParseDuration](https://pkg.go.dev/time#ParseDuration) for more) + cache_lifetime: "10m" + + profiling: + # -- Enable pprof. You will need to manually create a port forwarding to the deployment to access PPROF, + # as it will only listen on localhost and the defined port. + # e.g. `kubectl port-forward deployments/dendrite 65432:65432` + enabled: false + # -- pprof port, if enabled + port: 65432 + + # -- Configuration for experimental MSC's. (Valid values are: msc2836) + mscs: + mscs: [] + # A list of enabled MSC's + # Currently valid values are: + # - msc2836 (Threading, see https://github.com/matrix-org/matrix-doc/pull/2836) + + app_service_api: + # -- Disable the validation of TLS certificates of appservices. This is + # not recommended in production since it may allow appservice traffic + # to be sent to an insecure endpoint. + disable_tls_validation: false + # -- Appservice config files to load on startup. (**NOTE**: This is overriden by Helm, if a folder `./appservices/` exists) + config_files: [] + + client_api: + # -- Prevents new users from being able to register on this homeserver, except when + # using the registration shared secret below. + registration_disabled: true + + # Prevents new guest accounts from being created. Guest registration is also + # disabled implicitly by setting 'registration_disabled' above. + guests_disabled: true + + # -- If set, allows registration by anyone who knows the shared secret, regardless of + # whether registration is otherwise disabled. + registration_shared_secret: "this is the shared secret" + + # -- enable reCAPTCHA registration + enable_registration_captcha: false + # -- reCAPTCHA public key + recaptcha_public_key: "" + # -- reCAPTCHA private key + recaptcha_private_key: "" + # -- reCAPTCHA bypass secret + recaptcha_bypass_secret: "" + recaptcha_siteverify_api: "" + + # TURN server information that this homeserver should send to clients. + turn: + # -- Duration for how long users should be considered valid ([see time.ParseDuration](https://pkg.go.dev/time#ParseDuration) for more) + turn_user_lifetime: "24h" + turn_uris: [] + turn_shared_secret: "" + # -- The TURN username + turn_username: "" + # -- The TURN password + turn_password: "" + + rate_limiting: + # -- Enable rate limiting + enabled: true + # -- After how many requests a rate limit should be activated + threshold: 20 + # -- Cooloff time in milliseconds + cooloff_ms: 500 + # -- Users which should be exempt from rate limiting + exempt_user_ids: + + federation_api: + # -- Federation failure threshold. How many consecutive failures that we should + # tolerate when sending federation requests to a specific server. The backoff + # is 2**x seconds, so 1 = 2 seconds, 2 = 4 seconds, 3 = 8 seconds, etc. + # The default value is 16 if not specified, which is circa 18 hours. + send_max_retries: 16 + # -- Disable TLS validation. This should **NOT** be used in production. + disable_tls_validation: false + prefer_direct_fetch: false + # -- Prevents Dendrite from keeping HTTP connections + # open for reuse for future requests. Connections will be closed quicker + # but we may spend more time on TLS handshakes instead. + disable_http_keepalives: false + # -- Perspective keyservers, to use as a backup when direct key fetch + # requests don't succeed. + # @default -- See value.yaml + key_perspectives: + - server_name: matrix.org + keys: + - key_id: ed25519:auto + public_key: Noi6WqcDj0QmPxCNQqgezwTlBKrfqehY1u2FyWP9uYw + - key_id: ed25519:a_RXGa + public_key: l8Hft5qXKn1vfHrg3p4+W8gELQVo8N13JkluMfmn2sQ + + media_api: + # -- The path to store media files (e.g. avatars) in + base_path: "/data/media_store" + # -- The max file size for uploaded media files + max_file_size_bytes: 10485760 + # Whether to dynamically generate thumbnails if needed. + dynamic_thumbnails: false + # -- The maximum number of simultaneous thumbnail generators to run. + max_thumbnail_generators: 10 + # -- A list of thumbnail sizes to be generated for media content. + # @default -- See value.yaml + thumbnail_sizes: + - width: 32 + height: 32 + method: crop + - width: 96 + height: 96 + method: crop + - width: 640 + height: 480 + method: scale + + sync_api: + # -- This option controls which HTTP header to inspect to find the real remote IP + # address of the client. This is likely required if Dendrite is running behind + # a reverse proxy server. + real_ip_header: X-Real-IP + # -- Configuration for the full-text search engine. + search: + # -- Whether fulltext search is enabled. + enabled: true + # -- The path to store the search index in. + index_path: "/data/search" + # -- The language most likely to be used on the server - used when indexing, to + # ensure the returned results match expectations. A full list of possible languages + # can be found [here](https://github.com/matrix-org/dendrite/blob/76db8e90defdfb9e61f6caea8a312c5d60bcc005/internal/fulltext/bleve.go#L25-L46) + language: "en" + + user_api: + # -- bcrypt cost to use when hashing passwords. + # (ranges from 4-31; 4 being least secure, 31 being most secure; _NOTE: Using a too high value can cause clients to timeout and uses more CPU._) + bcrypt_cost: 10 + # -- OpenID Token lifetime in milliseconds. + openid_token_lifetime_ms: 3600000 + # - Disable TLS validation when hitting push gateways. This should **NOT** be used in production. + push_gateway_disable_tls_validation: false + # -- Rooms to join users to after registration + auto_join_rooms: [] + + # -- Default logging configuration + logging: + - type: std + level: info + +postgresql: + # -- Enable and configure postgres as the database for dendrite. + # @default -- See value.yaml + enabled: true + image: + repository: bitnami/postgresql + tag: "15.1.0" + auth: + username: dendrite + password: changeme + database: dendrite + + persistence: + enabled: true + +ingress: + # -- Create an ingress for the deployment + enabled: false + # -- The ingressClass to use. Will be converted to annotation if not yet supported. + className: "" + # -- Extra, custom annotations + annotations: {} + # -- The ingress hostname for your matrix server. + # Should align with the server_name and well_known_* hosts. + # If not set, generated from the dendrite_config values. + hostName: "" + # -- TLS configuration. Should contain information for the server_name and well-known hosts. + # Alternatively, set tls.generate=true to generate defaults based on the dendrite_config. + tls: [] + +service: + type: ClusterIP + port: 8008 + +prometheus: + servicemonitor: + # -- Enable ServiceMonitor for Prometheus-Operator for scrape metric-endpoint + enabled: false + # -- Extra Labels on ServiceMonitor for selector of Prometheus Instance + labels: {} + rules: + # -- Enable PrometheusRules for Prometheus-Operator for setup alerting + enabled: false + # -- Extra Labels on PrometheusRules for selector of Prometheus Instance + labels: {} + # -- additional alertrules (no default alertrules are provided) + additionalRules: [] + +grafana: + dashboards: + enabled: false + # -- Extra Labels on ConfigMap for selector of grafana sidecar + labels: + grafana_dashboard: "1" + # -- Extra Annotations on ConfigMap additional config in grafana sidecar + annotations: {} + diff --git a/apps/finance/actualbudget.deployment.yaml b/apps/finance/actualbudget.deployment.yaml new file mode 100644 index 0000000..461b8b0 --- /dev/null +++ b/apps/finance/actualbudget.deployment.yaml @@ -0,0 +1,100 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + namespace: finance + name: actualbudget + labels: + app: actualbudget +spec: +# deployment running a single container + selector: + matchLabels: + app: actualbudget + replicas: 1 + template: + metadata: + labels: + app: actualbudget + spec: + containers: + - name: actualbudget + image: actualbudget/actual-server:latest + imagePullPolicy: Always + env: + - name: TZ + value: Europe/Berlin + volumeMounts: + - name: actualbudget-data-nfs + mountPath: /data + ports: + - containerPort: 5006 + name: http + protocol: TCP + volumes: + - name: actualbudget-data-nfs + persistentVolumeClaim: + claimName: actualbudget-data-nfs +--- +apiVersion: v1 +kind: PersistentVolume +metadata: + namespace: finance + name: "actualbudget-data-nfs" +spec: + storageClassName: fast + capacity: + storage: "5Gi" + accessModes: + - ReadWriteOnce + nfs: + path: /export/kluster/actualbudget + server: 192.168.1.157 + +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + namespace: finance + name: "actualbudget-data-nfs" +spec: + storageClassName: "fast" + accessModes: + - ReadWriteOnce + resources: + requests: + storage: "5Gi" + # selector: + # matchLabels: + # directory: "journal-data" + +--- +apiVersion: v1 +kind: Service +metadata: + namespace: finance + name: actualbudget +spec: + selector: + app: actualbudget + ports: + - protocol: TCP + port: 5006 + targetPort: 5006 + type: ClusterIP +--- +apiVersion: traefik.containo.us/v1alpha1 +kind: IngressRoute +metadata: + namespace: finance + name: actualbudget +spec: + entryPoints: + - websecure + routes: + - match: Host(`actualbudget.kluster.moll.re`) + kind: Rule + services: + - name: actualbudget + port: 5006 + tls: + certResolver: default-tls diff --git a/apps/finance/firefly-importer.deployment.yaml b/apps/finance/firefly-importer.deployment.yaml new file mode 100644 index 0000000..03872e0 --- /dev/null +++ b/apps/finance/firefly-importer.deployment.yaml @@ -0,0 +1,66 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: firefly-importer + name: firefly-importer + namespace: finance +spec: + selector: + matchLabels: + app: firefly-importer + template: + metadata: + labels: + app: firefly-importer + spec: + containers: + - image: fireflyiii/data-importer:latest + imagePullPolicy: Always + name: firefly-importer + resources: {} + ports: + - containerPort: 8080 + env: + - name: FIREFLY_III_ACCESS_TOKEN + value: redacted + - name: FIREFLY_III_URL + value: firefly-http:8080 + # - name: APP_URL + # value: https://finance.kluster.moll.re + - name: TRUSTED_PROXIES + value: "**" + +--- +apiVersion: v1 +kind: Service +metadata: + name: firefly-importer-http + namespace: finance + labels: + app: firefly-importer-http + +spec: + type: ClusterIP + ports: + - port: 8080 + # name: http + selector: + app: firefly-importer +--- +apiVersion: traefik.containo.us/v1alpha1 +kind: IngressRoute +metadata: + name: firefly-importer-ingress + namespace: finance +spec: + entryPoints: + - websecure + routes: + - match: Host(`importer.finance.kluster.moll.re`) + kind: Rule + services: + - name: firefly-importer-http + port: 8080 + tls: + certResolver: default-tls \ No newline at end of file diff --git a/apps/finance/firefly.deployment.yaml b/apps/finance/firefly.deployment.yaml new file mode 100644 index 0000000..e109a72 --- /dev/null +++ b/apps/finance/firefly.deployment.yaml @@ -0,0 +1,79 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: firefly + name: firefly + namespace: finance +spec: + selector: + matchLabels: + app: firefly + template: + metadata: + labels: + app: firefly + spec: + containers: + - image: fireflyiii/core:latest + imagePullPolicy: Always + name: firefly + resources: {} + ports: + - containerPort: 8080 + env: + - name: APP_ENV + value: "local" + - name: APP_KEY + value: iKejRAlgwx2Y/fxdosXjABbNxNzEuJdl + - name: DB_CONNECTION + value: sqlite + - name: APP_URL + value: https://finance.kluster.moll.re + - name: TRUSTED_PROXIES + value: "**" + volumeMounts: + - mountPath: /var/www/html/storage/database + name: firefly-database + + + volumes: + - name: firefly-database + persistentVolumeClaim: + claimName: firefly-database-nfs + +--- +apiVersion: v1 +kind: Service +metadata: + name: firefly-http + namespace: finance + labels: + app: firefly-http + +spec: + type: ClusterIP + ports: + - port: 8080 + # name: http + selector: + app: firefly +--- +apiVersion: traefik.containo.us/v1alpha1 +kind: IngressRoute +metadata: + name: firefly-ingress + namespace: finance +spec: + entryPoints: + - websecure + routes: + - match: Host(`finance.kluster.moll.re`) + kind: Rule + services: + - name: firefly-http + port: 8080 + tls: + certResolver: default-tls + + diff --git a/apps/finance/firefly.pvc.yaml b/apps/finance/firefly.pvc.yaml new file mode 100644 index 0000000..b06faf9 --- /dev/null +++ b/apps/finance/firefly.pvc.yaml @@ -0,0 +1,36 @@ +--- +apiVersion: v1 +kind: PersistentVolume +metadata: + namespace: finance + name: firefly-database-nfs + labels: + directory: firefly +spec: + storageClassName: fast + volumeMode: Filesystem + accessModes: + - ReadOnlyMany + capacity: + storage: "1G" + + nfs: + path: /firefly # inside nfs part. + server: 10.43.239.43 # assigned to nfs-server service. Won't change as long as service is not redeployed +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + namespace: finance + name: firefly-database-nfs +spec: + resources: + requests: + storage: "1G" + storageClassName: fast + accessModes: + - ReadOnlyMany + selector: + matchLabels: + directory: firefly +--- diff --git a/apps/gitea/drone-kube-runner.deployment.yaml b/apps/gitea/drone-kube-runner.deployment.yaml new file mode 100644 index 0000000..6dd8591 --- /dev/null +++ b/apps/gitea/drone-kube-runner.deployment.yaml @@ -0,0 +1,89 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: drone-runner + namespace: gitea +--- +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + namespace: gitea + name: drone-runner +rules: +- apiGroups: + - "" + resources: + - secrets + verbs: + - create + - delete +- apiGroups: + - "" + resources: + - pods + - pods/log + verbs: + - get + - create + - delete + - list + - watch + - update + +--- + +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: drone-runner + namespace: gitea +subjects: +- kind: ServiceAccount + name: drone-runner + namespace: gitea +roleRef: + kind: Role + name: drone-runner + apiGroup: rbac.authorization.k8s.io + +--- + +apiVersion: apps/v1 +kind: Deployment +metadata: + namespace: gitea + name: drone-runner + labels: + app.kubernetes.io/name: drone-runner +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: drone-runner + template: + metadata: + labels: + app.kubernetes.io/name: drone-runner + spec: + serviceAccountName: drone-runner + containers: + - name: runner + image: drone/drone-runner-kube:latest + ports: + - containerPort: 3000 + env: + - name: DRONE_RPC_HOST + value: drone-server:80 + - name: DRONE_RPC_PROTO + value: http + - name: DRONE_RPC_SECRET + valueFrom: + secretKeyRef: + name: drone-server-secret + key: rpc_secret + - name: DRONE_NAMESPACE_DEFAULT + value: gitea + # - name: DRONE_NAMESPACE_RULES + # value: "drone-runner:*" + - name: DRONE_SERVICE_ACCOUNT_DEFAULT + value: drone-runner \ No newline at end of file diff --git a/apps/gitea/drone-server.deployment.yaml b/apps/gitea/drone-server.deployment.yaml new file mode 100644 index 0000000..a12a73f --- /dev/null +++ b/apps/gitea/drone-server.deployment.yaml @@ -0,0 +1,129 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: drone-server + namespace: gitea + labels: + app: drone-server +spec: + replicas: 1 + selector: + matchLabels: + app: drone-server + template: + metadata: + labels: + app: drone-server + spec: + containers: + - name: drone + image: drone/drone:latest + env: + - name: DRONE_SERVER_PORT # because the deployment is called drone-server, override this var again! + value: ":80" + - name: DRONE_GITEA_SERVER + value: https://git.kluster.moll.re + - name: DRONE_GITEA_CLIENT_ID + valueFrom: + secretKeyRef: + name: drone-server-secret + key: client_id + - name: DRONE_GITEA_CLIENT_SECRET + valueFrom: + secretKeyRef: + name: drone-server-secret + key: client_secret + - name: DRONE_RPC_SECRET + valueFrom: + secretKeyRef: + name: drone-server-secret + key: rpc_secret + - name: DRONE_SERVER_HOST + value: drone.kluster.moll.re + - name: DRONE_SERVER_PROTO + value: https + resources: + requests: + memory: "1Gi" + cpu: 1.5 + volumeMounts: + - mountPath: /data + name: drone-data-nfs + volumes: + - name: drone-data-nfs + persistentVolumeClaim: + claimName: drone-data-nfs + +--- +apiVersion: v1 +kind: Service +metadata: + name: drone-server + namespace: gitea + labels: + app: drone-server + +spec: + type: ClusterIP + ports: + - port: 80 + name: http + selector: + app: drone-server +--- +apiVersion: traefik.containo.us/v1alpha1 +kind: IngressRoute +metadata: + name: drone-server-ingress + namespace: gitea + +spec: + entryPoints: + - websecure + routes: + - match: Host(`drone.kluster.moll.re`) + kind: Rule + services: + - name: drone-server + port: 80 + tls: + certResolver: default-tls + + +--- +apiVersion: v1 +kind: PersistentVolume +metadata: + namespace: gitea + name: drone-data-nfs + labels: + directory: drone +spec: + storageClassName: fast + capacity: + storage: "1Gi" + volumeMode: Filesystem + accessModes: + - ReadWriteOnce + nfs: + path: /export/kluster/drone + server: 192.168.1.157 +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + namespace: gitea + name: drone-data-nfs +spec: + storageClassName: fast + accessModes: + - ReadWriteOnce + resources: + requests: + storage: "1Gi" + selector: + matchLabels: + directory: drone + + + diff --git a/apps/gitea/drone-servers.sealedsecret.yaml b/apps/gitea/drone-servers.sealedsecret.yaml new file mode 100644 index 0000000..0395567 --- /dev/null +++ b/apps/gitea/drone-servers.sealedsecret.yaml @@ -0,0 +1,23 @@ +{ + "kind": "SealedSecret", + "apiVersion": "bitnami.com/v1alpha1", + "metadata": { + "name": "drone-server-secret", + "namespace": "gitea", + "creationTimestamp": null + }, + "spec": { + "template": { + "metadata": { + "name": "drone-server-secret", + "namespace": "gitea", + "creationTimestamp": null + } + }, + "encryptedData": { + "client_id": "AgA53a7kGJ6zZcx2ooTvTNwxaW2FvfzHJnxg6co54+HXinTJKsc4+GJ1PtdIbsZ7Dgu/sLi/4X90fT+PT2sgEx9jIilmHPdJeRtwV1UID3Y46A7cJlfcAKwNOFzp2PWvBvizbNp7tbJwxeAYnVX8GfN6fi700QxBGqAI3u8qQvLpU6UGW2RM96gCXI7s1QhE1Le6TgoESy5HX95pB7csDRNSwVE02OWfDHKEjH8QD8UvBB9xct6uwDfu7KrsJiNJvWMP6arvpfhy/X+UtCTFmj5wmFYL7oc6vSiCkq+QyHgQTEHTmGpEjEGKcQxPQaus3KhbhcxQBYLMEMYRlLPH0AEAA4dzbSpoVXM3LuIe9FppgrTCknK1uRB8wyrHUeInWO8mG7UraV6m5PUS+UYODMvfjwY3PyiGhTSf6LgMlhMl8e+2rb+OsWphT8Pbeom33PucrYaRFr9RpQkJSwE6HU3JEh25YLfIJ7caqRND8C/p8kD679C8UMcNpBN8WS4Cswn5jzmwbeJNM5DGp9yQVZNx7Bv3dHzx9i3ShjJ6QQnR/zWJZ/dWLy6weGYmdZMMXRAO8CCdruvcX5YyeieXZfchSIlZ/GqqBHptdcLpwLiZsfmyTWeBvk5pMAsZaKJ1tfWpQ84s4epzMoieTfhTueGXmeRKX+DJBBcriU+5YoqNxpU1lPL+LoInorJSKN7c3ouFx78N3GDOCq7mlWI94lY0bIs5zhrfUN137ITCcED62AJ7vks=", + "client_secret": "AgDQXU7x6RLhE9Hc+goeR2+3rW316SLLLA8tfqx3tsykL+vxhRkY5UCEaak3Rgei0k14jB/Rmme+/O/D1/5tc/i885+sGn0yjU7Jo4L5nkIssUOHlmRSGkRJDb9ABPauFXAjap9KLix9bd8ewI7R0lS3tOK9ZhThYhcfDUqV9qkkbSHzwNptkH7gYWt9qzG/rqqqpFP+PCtjzKVve4LCBgaxetcnh1t+d5oh7VAFnSI9Bt1G/DRzi+K3YZ+YG5+XKevBp06GMiLUMiv/eUvmOfAB/KO79LnNVbOcRsAHfnqLbXgNjFzspr5xDiGMC/ma1245LavywqXDp0S9jjNEe48i51PPQMwHWV8XEovsM6LHcteluNogt+VkL4mOnmP+sba/V3NO51rt1WXl+ca+U4kBq4dLMsdpWUKemz9BlIRC4etEXjwKJ5DznT7u6GUTrXx2RCm1j0OYWM++P10SdyD6tGjKnZf88a33Wrwm8Y7c47JrPTlP4PqLq9gzvD310uVfs1vGYGULaToGy+D/th8qiWWlu7BIfwqlIj8lruVnOhQ4GeEZmUAsqYf8JfsBwuDc0Y+8qbwjFrr2z+5x+2XBL8KGZVopyme45SHijlBZs7YsJqTBsg5oW09grM8/oO731GtzSYmpat2VZlaILuTjALqo/cu//kxwmqh7UX+jnTJ/2N3bKKSAfHWbHDeHeS2XJ+eKaI4onNYW9J70EfAP3vOpU+zmQ8rOzJuJjRt0HarLwzc5CXb1Xhlgsaoj7zKXPQMnqIDngg==", + "rpc_secret": "AgAcJNCFtOhK28vnLredkTgsVpnMPwaXss5NT5ysc0IbVid2vWRk2CTjBZc5DzjxxLwI1Ok88MFXHP08ZGCYy4rIbwoi7Ei1OEevGWfaI4n5CvAxr4ZamQHSfIX9dVAm9BSSx2M/mDtCKqVEGJEzyHCedrxf6LXM/YTNgjD43BuCZZMu35mRsHItpYFZQSttlHiUvR8y2YKrhV2P7fiWRD3cCVao8ldzKfGuvRfal8ByGoxpsYLj2D9CdtPvRF/TQsWUJJWwzbI9DmbW1MMI4/b26Jfa5TBvHxS1MQxFJpSXuMIengO+b0bi7WaR36y/FrKSNxIrQDHI7XCb00yYaSfj3RkSBVoAD0a2p8vNupHCqsKBoaWd8tMv/wGP8wbBk4DgGeQiTIvfhbQZU/Q2/LVDDficjXVn3IuKP/cqgGVf6lUh5YsUSs8qwpMil7XySiHvaZn+iFAnsXoejd4S2e/pbRvyaxP1aa7TCxnINjpU7IrnUEUiI4glQmAte3MqZWLXcc0Uk3Qz9PP0cD+V8qCOryrPMP2kTAI8LT/K4DgcEMAEGes4Vx1l0oBMF0xJvhM2kZXcEcf0NzuQJvYTgZpQF5xp0TchezLshmEUSIkII9NvAvn+iEYJeHsJUDijjmBloSYe4+QTgdYh6FakVUwYI5U4ztDNrvgqhWjExfbn8HxaFzsNTsuzGoYs+jwXH8Wk2z1Q1oQjDdO5YTjmdqvkSTdin/5CiuCDHaQX6a4gNQ==" + } + } +} diff --git a/apps/gitea/gitea.ingress.yaml b/apps/gitea/gitea.ingress.yaml new file mode 100644 index 0000000..47d7697 --- /dev/null +++ b/apps/gitea/gitea.ingress.yaml @@ -0,0 +1,18 @@ +apiVersion: traefik.containo.us/v1alpha1 +kind: IngressRoute +metadata: + name: gitea-ingress + namespace: gitea +spec: + entryPoints: + - websecure + routes: + - match: Host(`git.kluster.moll.re`) + kind: Rule + services: + - name: gitea-http + port: 3000 + tls: + certResolver: default-tls + + diff --git a/apps/gitea/gitea.pvc.yaml b/apps/gitea/gitea.pvc.yaml new file mode 100644 index 0000000..fe05361 --- /dev/null +++ b/apps/gitea/gitea.pvc.yaml @@ -0,0 +1,71 @@ +--- +apiVersion: v1 +kind: PersistentVolume +metadata: + namespace: gitea + name: gitea-data-nfs + labels: + directory: gitea +spec: + storageClassName: fast + capacity: + storage: "10Gi" + volumeMode: Filesystem + accessModes: + - ReadWriteOnce + nfs: + path: /export/kluster/gitea/data + server: 192.168.1.157 +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + namespace: gitea + name: gitea-data-nfs +spec: + storageClassName: fast + accessModes: + - ReadWriteOnce + resources: + requests: + storage: "10Gi" + selector: + matchLabels: + directory: gitea +--- +apiVersion: v1 +kind: PersistentVolume +metadata: + namespace: gitea + name: gitea-postgresql-data-nfs + labels: + directory: gitea +spec: + storageClassName: fast + capacity: + storage: "5Gi" + volumeMode: Filesystem + accessModes: + - ReadWriteOnce + nfs: + path: /export/kluster/gitea/postgres + server: 192.168.1.157 + +--- + +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + namespace: gitea + name: gitea-postgresql-data-nfs +spec: + storageClassName: fast + accessModes: + - ReadWriteOnce + resources: + requests: + storage: "5Gi" + selector: + matchLabels: + directory: gitea +--- \ No newline at end of file diff --git a/apps/gitea/gitea.values.yaml b/apps/gitea/gitea.values.yaml new file mode 100644 index 0000000..5a2b94a --- /dev/null +++ b/apps/gitea/gitea.values.yaml @@ -0,0 +1,497 @@ +# Default values for gitea. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. +## @section Global +# +## @param global.imageRegistry global image registry override +## @param global.imagePullSecrets global image pull secrets override; can be extended by `imagePullSecrets` +## @param global.storageClass global storage class override +## @param global.hostAliases global hostAliases which will be added to the pod's hosts files +global: + imageRegistry: "" + ## E.g. + ## imagePullSecrets: + ## - myRegistryKeySecretName + ## + imagePullSecrets: [] + storageClass: "" + hostAliases: [] + # - ip: 192.168.137.2 + # hostnames: + # - example.com + +## @param replicaCount number of replicas for the statefulset +replicaCount: 1 + +## @param clusterDomain cluster domain +clusterDomain: cluster.local + +## @section Image +## @param image.registry image registry, e.g. gcr.io,docker.io +## @param image.repository Image to start for this pod +## @param image.tag Visit: [Image tag](https://hub.docker.com/r/gitea/gitea/tags?page=1&ordering=last_updated). Defaults to `appVersion` within Chart.yaml. +## @param image.pullPolicy Image pull policy +## @param image.rootless Wether or not to pull the rootless version of Gitea, only works on Gitea 1.14.x or higher +image: + registry: "" + repository: gitea/gitea + # Overrides the image tag whose default is the chart appVersion. + tag: "" + pullPolicy: Always + rootless: false # only possible when running 1.14 or later + +## @param imagePullSecrets Secret to use for pulling the image +imagePullSecrets: [] + +## @section Security +# Security context is only usable with rootless image due to image design +## @param podSecurityContext.fsGroup Set the shared file system group for all containers in the pod. +podSecurityContext: + fsGroup: 1000 + +## @param containerSecurityContext Security context +containerSecurityContext: {} +# allowPrivilegeEscalation: false +# capabilities: +# drop: +# - ALL +# # Add the SYS_CHROOT capability for root and rootless images if you intend to +# # run pods on nodes that use the container runtime cri-o. Otherwise, you will +# # get an error message from the SSH server that it is not possible to read from +# # the repository. +# # https://gitea.com/gitea/helm-chart/issues/161 +# add: +# - SYS_CHROOT +# privileged: false +# readOnlyRootFilesystem: true +# runAsGroup: 1000 +# runAsNonRoot: true +# runAsUser: 1000 + +## @deprecated The securityContext variable has been split two: +## - containerSecurityContext +## - podSecurityContext. +## @param securityContext Run init and Gitea containers as a specific securityContext +securityContext: {} + +## @section Service +service: + ## @param service.http.type Kubernetes service type for web traffic + ## @param service.http.port Port number for web traffic + ## @param service.http.clusterIP ClusterIP setting for http autosetup for statefulset is None + ## @param service.http.loadBalancerIP LoadBalancer IP setting + ## @param service.http.nodePort NodePort for http service + ## @param service.http.externalTrafficPolicy If `service.http.type` is `NodePort` or `LoadBalancer`, set this to `Local` to enable source IP preservation + ## @param service.http.externalIPs External IPs for service + ## @param service.http.ipFamilyPolicy HTTP service dual-stack policy + ## @param service.http.ipFamilies HTTP service dual-stack familiy selection,for dual-stack parameters see official kubernetes [dual-stack concept documentation](https://kubernetes.io/docs/concepts/services-networking/dual-stack/). + ## @param service.http.loadBalancerSourceRanges Source range filter for http loadbalancer + ## @param service.http.annotations HTTP service annotations + http: + type: ClusterIP + port: 3000 + clusterIP: None + nodePort: + externalTrafficPolicy: + externalIPs: + ipFamilyPolicy: + ipFamilies: + loadBalancerSourceRanges: [] + annotations: {} + ## @param service.ssh.type Kubernetes service type for ssh traffic + ## @param service.ssh.port Port number for ssh traffic + ## @param service.ssh.clusterIP ClusterIP setting for ssh autosetup for statefulset is None + ## @param service.ssh.loadBalancerIP LoadBalancer IP setting + ## @param service.ssh.nodePort NodePort for ssh service + ## @param service.ssh.externalTrafficPolicy If `service.ssh.type` is `NodePort` or `LoadBalancer`, set this to `Local` to enable source IP preservation + ## @param service.ssh.externalIPs External IPs for service + ## @param service.ssh.ipFamilyPolicy SSH service dual-stack policy + ## @param service.ssh.ipFamilies SSH service dual-stack familiy selection,for dual-stack parameters see official kubernetes [dual-stack concept documentation](https://kubernetes.io/docs/concepts/services-networking/dual-stack/). + ## @param service.ssh.hostPort HostPort for ssh service + ## @param service.ssh.loadBalancerSourceRanges Source range filter for ssh loadbalancer + ## @param service.ssh.annotations SSH service annotations + ssh: + type: LoadBalancer + port: 2222 + loadBalancerIP: 192.168.3.3 + nodePort: + externalTrafficPolicy: + externalIPs: + ipFamilyPolicy: + ipFamilies: + hostPort: + loadBalancerSourceRanges: [] + annotations: {} + +## @section Ingress +## @param ingress.enabled Enable ingress +## @param ingress.className Ingress class name +## @param ingress.annotations Ingress annotations +## @param ingress.hosts[0].host Default Ingress host +## @param ingress.hosts[0].paths[0].path Default Ingress path +## @param ingress.hosts[0].paths[0].pathType Ingress path type +## @param ingress.tls Ingress tls settings +## @extra ingress.apiVersion Specify APIVersion of ingress object. Mostly would only be used for argocd. +ingress: + enabled: false + # className: nginx + className: + annotations: + {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + hosts: + - host: git.example.com + paths: + - path: / + pathType: Prefix + tls: [] + # - secretName: chart-example-tls + # hosts: + # - git.example.com + # Mostly for argocd or any other CI that uses `helm template | kubectl apply` or similar + # If helm doesn't correctly detect your ingress API version you can set it here. + # apiVersion: networking.k8s.io/v1 + +## @section StatefulSet +# +## @param resources Kubernetes resources +resources: + {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + +## Use an alternate scheduler, e.g. "stork". +## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ +## +## @param schedulerName Use an alternate scheduler, e.g. "stork" +schedulerName: "" + +## @param nodeSelector NodeSelector for the statefulset +nodeSelector: {} + +## @param tolerations Tolerations for the statefulset +tolerations: [] + +## @param affinity Affinity for the statefulset +affinity: {} + +## @param dnsConfig dnsConfig for the statefulset +dnsConfig: {} + +## @param priorityClassName priorityClassName for the statefulset +priorityClassName: "" + +## @param statefulset.env Additional environment variables to pass to containers +## @param statefulset.terminationGracePeriodSeconds How long to wait until forcefully kill the pod +## @param statefulset.labels Labels for the statefulset +## @param statefulset.annotations Annotations for the Gitea StatefulSet to be created +statefulset: + env: + [] + # - name: VARIABLE + # value: my-value + terminationGracePeriodSeconds: 60 + labels: {} + annotations: {} + +## @section Persistence +# +## @param persistence.enabled Enable persistent storage +## @param persistence.existingClaim Use an existing claim to store repository information +## @param persistence.size Size for persistence to store repo information +## @param persistence.accessModes AccessMode for persistence +## @param persistence.labels Labels for the persistence volume claim to be created +## @param persistence.annotations Annotations for the persistence volume claim to be created +## @param persistence.storageClass Name of the storage class to use +## @param persistence.subPath Subdirectory of the volume to mount at +persistence: + enabled: true + existingClaim: gitea-data-nfs + + size: 10Gi + accessModes: + - ReadWriteOnce + labels: {} + annotations: {} + storageClass: + subPath: + +## @param extraVolumes Additional volumes to mount to the Gitea statefulset +extraVolumes: [] +# - name: postgres-ssl-vol +# secret: +# secretName: gitea-postgres-ssl + +## @param extraContainerVolumeMounts Mounts that are only mapped into the Gitea runtime/main container, to e.g. override custom templates. +extraContainerVolumeMounts: [] + +## @param extraInitVolumeMounts Mounts that are only mapped into the init-containers. Can be used for additional preconfiguration. +extraInitVolumeMounts: [] + +## @deprecated The extraVolumeMounts variable has been split two: +## - extraContainerVolumeMounts +## - extraInitVolumeMounts +## As an example, can be used to mount a client cert when connecting to an external Postgres server. +## @param extraVolumeMounts **DEPRECATED** Additional volume mounts for init containers and the Gitea main container +extraVolumeMounts: [] +# - name: postgres-ssl-vol +# readOnly: true +# mountPath: "/pg-ssl" + +## @section Init +## @param initPreScript Bash shell script copied verbatim to the start of the init-container. +initPreScript: "" +# +# initPreScript: | +# mkdir -p /data/git/.postgresql +# cp /pg-ssl/* /data/git/.postgresql/ +# chown -R git:git /data/git/.postgresql/ +# chmod 400 /data/git/.postgresql/postgresql.key + +## @param initContainers.resources.limits initContainers.limits Kubernetes resource limits for init containers +## @param initContainers.resources.requests.cpu initContainers.requests.cpu Kubernetes cpu resource limits for init containers +## @param initContainers.resources.requests.memory initContainers.requests.memory Kubernetes memory resource limits for init containers +initContainers: + resources: + limits: {} + requests: + cpu: 100m + memory: 128Mi + +# Configure commit/action signing prerequisites +## @section Signing +# +## @param signing.enabled Enable commit/action signing +## @param signing.gpgHome GPG home directory +## @param signing.privateKey Inline private gpg key for signed Gitea actions +## @param signing.existingSecret Use an existing secret to store the value of `signing.privateKey` +signing: + enabled: false + gpgHome: /data/git/.gnupg + privateKey: "" + # privateKey: |- + # -----BEGIN PGP PRIVATE KEY BLOCK----- + # ... + # -----END PGP PRIVATE KEY BLOCK----- + existingSecret: "" + +## @section Gitea +# +gitea: + ## @param gitea.admin.username Username for the Gitea admin user + ## @param gitea.admin.existingSecret Use an existing secret to store admin user credentials + ## @param gitea.admin.password Password for the Gitea admin user + ## @param gitea.admin.email Email for the Gitea admin user + admin: + # existingSecret: gitea-admin-secret + existingSecret: + username: gitea_admin + password: r8sA8CPHD9!bt6d + email: "gitea@local.domain" + + ## @param gitea.metrics.enabled Enable Gitea metrics + ## @param gitea.metrics.serviceMonitor.enabled Enable Gitea metrics service monitor + metrics: + enabled: false + serviceMonitor: + enabled: false + # additionalLabels: + # prometheus-release: prom1 + + ## @param gitea.ldap LDAP configuration + ldap: + [] + # - name: "LDAP 1" + # existingSecret: + # securityProtocol: + # host: + # port: + # userSearchBase: + # userFilter: + # adminFilter: + # emailAttribute: + # bindDn: + # bindPassword: + # usernameAttribute: + # publicSSHKeyAttribute: + + # Either specify inline `key` and `secret` or refer to them via `existingSecret` + ## @param gitea.oauth OAuth configuration + oauth: + [] + # - name: 'OAuth 1' + # provider: + # key: + # secret: + # existingSecret: + # autoDiscoverUrl: + # useCustomUrls: + # customAuthUrl: + # customTokenUrl: + # customProfileUrl: + # customEmailUrl: + + ## @param gitea.config Configuration for the Gitea server,ref: [config-cheat-sheet](https://docs.gitea.io/en-us/config-cheat-sheet/) + config: + APP_NAME: "Remy's personal git hosting" + server: + DOMAIN: git.kluster.moll.re + ROOT_URL: https://git.kluster.moll.re + SSH_LISTEN_PORT: 2222 + actions: + ENABLED: true + + + ## @param gitea.additionalConfigSources Additional configuration from secret or configmap + additionalConfigSources: [] + # - secret: + # secretName: gitea-app-ini-oauth + # - configMap: + # name: gitea-app-ini-plaintext + + ## @param gitea.additionalConfigFromEnvs Additional configuration sources from environment variables + additionalConfigFromEnvs: [] + + ## @param gitea.podAnnotations Annotations for the Gitea pod + podAnnotations: {} + + ## @param gitea.ssh.logLevel Configure OpenSSH's log level. Only available for root-based Gitea image. + ssh: + logLevel: "INFO" + + ## @section LivenessProbe + # + ## @param gitea.livenessProbe.enabled Enable liveness probe + ## @param gitea.livenessProbe.tcpSocket.port Port to probe for liveness + ## @param gitea.livenessProbe.initialDelaySeconds Initial delay before liveness probe is initiated + ## @param gitea.livenessProbe.timeoutSeconds Timeout for liveness probe + ## @param gitea.livenessProbe.periodSeconds Period for liveness probe + ## @param gitea.livenessProbe.successThreshold Success threshold for liveness probe + ## @param gitea.livenessProbe.failureThreshold Failure threshold for liveness probe + # Modify the liveness probe for your needs or completely disable it by commenting out. + livenessProbe: + enabled: true + tcpSocket: + port: http + initialDelaySeconds: 200 + timeoutSeconds: 1 + periodSeconds: 10 + successThreshold: 1 + failureThreshold: 10 + + ## @section ReadinessProbe + # + ## @param gitea.readinessProbe.enabled Enable readiness probe + ## @param gitea.readinessProbe.tcpSocket.port Port to probe for readiness + ## @param gitea.readinessProbe.initialDelaySeconds Initial delay before readiness probe is initiated + ## @param gitea.readinessProbe.timeoutSeconds Timeout for readiness probe + ## @param gitea.readinessProbe.periodSeconds Period for readiness probe + ## @param gitea.readinessProbe.successThreshold Success threshold for readiness probe + ## @param gitea.readinessProbe.failureThreshold Failure threshold for readiness probe + # Modify the readiness probe for your needs or completely disable it by commenting out. + readinessProbe: + enabled: true + tcpSocket: + port: http + initialDelaySeconds: 5 + timeoutSeconds: 1 + periodSeconds: 10 + successThreshold: 1 + failureThreshold: 3 + + # # Uncomment the startup probe to enable and modify it for your needs. + ## @section StartupProbe + # + ## @param gitea.startupProbe.enabled Enable startup probe + ## @param gitea.startupProbe.tcpSocket.port Port to probe for startup + ## @param gitea.startupProbe.initialDelaySeconds Initial delay before startup probe is initiated + ## @param gitea.startupProbe.timeoutSeconds Timeout for startup probe + ## @param gitea.startupProbe.periodSeconds Period for startup probe + ## @param gitea.startupProbe.successThreshold Success threshold for startup probe + ## @param gitea.startupProbe.failureThreshold Failure threshold for startup probe + startupProbe: + enabled: false + tcpSocket: + port: http + initialDelaySeconds: 60 + timeoutSeconds: 1 + periodSeconds: 10 + successThreshold: 1 + failureThreshold: 10 + +## @section Memcached +# +## @param memcached.enabled Memcached is loaded as a dependency from [Bitnami](https://github.com/bitnami/charts/tree/master/bitnami/memcached) if enabled in the values. Complete Configuration can be taken from their website. +## ref: https://hub.docker.com/r/bitnami/memcached/tags/ +## @param memcached.service.ports.memcached Port for Memcached +memcached: + enabled: true + # image: + # registry: docker.io + # repository: bitnami/memcached + # tag: "" + # digest: "" + # pullPolicy: IfNotPresent + # pullSecrets: [] + service: + ports: + memcached: 11211 + +## @section PostgreSQL +# +## @param postgresql.enabled Enable PostgreSQL +## @param postgresql.global.postgresql.auth.password Password for the `gitea` user (overrides `auth.password`) +## @param postgresql.global.postgresql.auth.database Name for a custom database to create (overrides `auth.database`) +## @param postgresql.global.postgresql.auth.username Name for a custom user to create (overrides `auth.username`) +## @param postgresql.global.postgresql.service.ports.postgresql PostgreSQL service port (overrides `service.ports.postgresql`) +## @param postgresql.primary.persistence.size PVC Storage Request for PostgreSQL volume +postgresql: + enabled: true + image: + tag: 11 + # diagnosticMode: + # enabled: true + # containerSecurityContext: + # runAsUser: 0 + global: + postgresql: + auth: + password: gitea + database: gitea + username: gitea + service: + ports: + postgresql: 5432 + primary: + persistence: + size: 10Gi + existingClaim: gitea-postgresql-data-nfs + mountPath: /bitnami/postgresql/data + + +# By default, removed or moved settings that still remain in a user defined values.yaml will cause Helm to fail running the install/update. +# Set it to false to skip this basic validation check. +## @section Advanced +## @param checkDeprecation Set it to false to skip this basic validation check. +## @param test.enabled Set it to false to disable test-connection Pod. +## @param test.image.name Image name for the wget container used in the test-connection Pod. +## @param test.image.tag Image tag for the wget container used in the test-connection Pod. +checkDeprecation: true +test: + enabled: true + image: + name: busybox + tag: latest + +## @param extraDeploy Array of extra objects to deploy with the release +## +extraDeploy: [] diff --git a/apps/homeassistant/ingress.yaml b/apps/homeassistant/ingress.yaml new file mode 100644 index 0000000..501796b --- /dev/null +++ b/apps/homeassistant/ingress.yaml @@ -0,0 +1,34 @@ +apiVersion: traefik.containo.us/v1alpha1 +kind: IngressRoute +metadata: + name: homeassistant-ingress + namespace: homeassistant +spec: + entryPoints: + - websecure + routes: + - match: Host(`home.kluster.moll.re`) + middlewares: + - name: homeassistant-websocket + kind: Rule + services: + - name: homeassistant-home-assistant + port: 8123 + tls: + certResolver: default-tls + +--- +apiVersion: traefik.containo.us/v1alpha1 +kind: Middleware +metadata: + name: homeassistant-websocket + namespace: homeassistant +spec: + headers: + customRequestHeaders: + X-Forwarded-Proto: "https" + # enable websockets + Upgrade: "websocket" + + + diff --git a/apps/homeassistant/pvc.yaml b/apps/homeassistant/pvc.yaml new file mode 100644 index 0000000..4470983 --- /dev/null +++ b/apps/homeassistant/pvc.yaml @@ -0,0 +1,37 @@ +--- +apiVersion: v1 +kind: PersistentVolume +metadata: + namespace: homeassistant + name: homeassistant-nfs + labels: + directory: homeassistant +spec: + storageClassName: slow + capacity: + storage: "1Gi" + volumeMode: Filesystem + accessModes: + - ReadWriteOnce + nfs: + path: /export/kluster/homeassistant + server: 192.168.1.157 +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + namespace: homeassistant + name: homeassistant-nfs +spec: + storageClassName: slow + accessModes: + - ReadWriteOnce + resources: + requests: + storage: "1Gi" + selector: + matchLabels: + directory: homeassistant + + + diff --git a/apps/homeassistant/values.yaml b/apps/homeassistant/values.yaml new file mode 100644 index 0000000..5eb8220 --- /dev/null +++ b/apps/homeassistant/values.yaml @@ -0,0 +1,136 @@ +# +# IMPORTANT NOTE +# +# This chart inherits from our common library chart. You can check the default values/options here: +# https://github.com/k8s-at-home/library-charts/tree/main/charts/stable/common/values.yaml +# + +image: + # -- image repository + repository: homeassistant/home-assistant + # -- image tag + tag: "2023.3" + # -- image pull policy + pullPolicy: IfNotPresent + +# -- environment variables. +# @default -- See below +env: + # -- Set the container timezone + TZ: Europe/Berlin + +# -- Configures service settings for the chart. Normally this does not need to be modified. +# @default -- See values.yaml +service: + main: + ports: + http: + port: 8123 + +ingress: + # -- Enable and configure ingress settings for the chart under this key. + # @default -- See values.yaml + main: + enabled: false + +# -- Enable devices to be discoverable +# hostNetwork: true + +# -- When hostNetwork is true set dnsPolicy to ClusterFirstWithHostNet +# dnsPolicy: ClusterFirstWithHostNet + +securityContext: + # -- (bool) Privileged securityContext may be required if USB devics are accessed directly through the host machine + privileged: # true + + +resources: + requests: + cpu: "100m" + memory: "200Mi" + limits: + cpu: "2" + memory: "1Gi" + +# -- Configure persistence settings for the chart under this key. +# @default -- See values.yaml +persistence: + config: + enabled: true + existingClaim: homeassistant-nfs + + # -- Configure a hostPathMount to mount a USB device in the container. + # @default -- See values.yaml + usb: + enabled: false + type: hostPath + hostPath: /path/to/device + +# -- Enable and configure mariadb database subchart under this key. +# For more options see [mariadb chart documentation](https://github.com/bitnami/charts/tree/master/bitnami/mariadb) +# @default -- See values.yaml +mariadb: + enabled: false + architecture: standalone + auth: + database: home-assistant + username: home-assistant + password: home-assistant-pass + rootPassword: home-assistantrootpass + primary: + persistence: + enabled: false + # storageClass: "" + +# -- Enable and configure postgresql database subchart under this key. +# For more options see [postgresql chart documentation](https://github.com/bitnami/charts/tree/master/bitnami/postgresql) +# @default -- See values.yaml +postgresql: + enabled: false + image: +# -- Enable and configure influxdb database subchart under this key. +# For more options see [influxdb chart documentation](https://github.com/bitnami/charts/tree/master/bitnami/influxdb) +# @default -- See values.yaml +influxdb: + enabled: false + architecture: standalone + database: home_assistant + authEnabled: false + persistence: + enabled: false + # storageClass: "" + # size: 8Gi + +metrics: + # -- Enable and configure a Prometheus serviceMonitor for the chart under this key. + # @default -- See values.yaml + enabled: false + serviceMonitor: + interval: 1m + scrapeTimeout: 30s + labels: {} + ## See https://www.home-assistant.io/docs/authentication/ for where to find + ## long lived access token creation under your account profile, which is + ## needed to monitor Home Assistant + # bearerTokenSecret: + # name: "" + # key: "" + + # -- Enable and configure Prometheus Rules for the chart under this key. + # @default -- See values.yaml + prometheusRule: + enabled: false + labels: {} + # -- Configure additionial rules for the chart under this key. + # @default -- See prometheusrules.yaml + rules: [] + # - alert: HomeAssistantAbsent + # annotations: + # description: Home Assistant has disappeared from Prometheus service discovery. + # summary: Home Assistant is down. + # expr: | + # absent(up{job=~".*home-assistant.*"} == 1) + # for: 5m + # labels: + # severity: critical + diff --git a/apps/immich/ingress.yaml b/apps/immich/ingress.yaml new file mode 100644 index 0000000..6bb845d --- /dev/null +++ b/apps/immich/ingress.yaml @@ -0,0 +1,51 @@ +apiVersion: traefik.containo.us/v1alpha1 +kind: Middleware +metadata: + name: stripprefix +spec: + stripPrefix: + prefixes: + - /api +--- +apiVersion: traefik.containo.us/v1alpha1 +kind: Middleware +metadata: + name: websocket +spec: + headers: + customRequestHeaders: + X-Forwarded-Proto: "https" + # enable websockets + Upgrade: "websocket" +--- +apiVersion: traefik.containo.us/v1alpha1 +kind: IngressRoute +metadata: + name: immich-ingressroute + +spec: + entryPoints: + - websecure + routes: + - match: Host(`immich.kluster.moll.re`) && PathPrefix(`/api/`) + kind: Rule + services: + - name: immich-server + port: 3001 + passHostHeader: true + middlewares: + - name: stripprefix + - name: websocket + + - match: Host(`immich.kluster.moll.re`) && PathPrefix(`/`) + kind: Rule + services: + - name: immich-web + port: 3000 + passHostHeader: true + middlewares: + - name: websocket + + tls: + certResolver: default-tls + \ No newline at end of file diff --git a/apps/immich/pvc.yaml b/apps/immich/pvc.yaml new file mode 100644 index 0000000..3fc029c --- /dev/null +++ b/apps/immich/pvc.yaml @@ -0,0 +1,36 @@ +--- +apiVersion: v1 +kind: PersistentVolume +metadata: + name: immich-nfs + labels: + directory: immich +spec: + storageClassName: fast + capacity: + storage: "50Gi" + volumeMode: Filesystem + accessModes: + - ReadWriteOnce + nfs: + path: /kluster/immich + # path: /kluster/immich + server: 192.168.1.157 +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: immich-nfs +spec: + storageClassName: fast + accessModes: + - ReadWriteOnce + resources: + requests: + storage: "50Gi" + selector: + matchLabels: + directory: immich + + + diff --git a/apps/immich/values.yaml b/apps/immich/values.yaml new file mode 100644 index 0000000..0e0797e --- /dev/null +++ b/apps/immich/values.yaml @@ -0,0 +1,136 @@ +## This chart relies on the common library chart from bjw-s +## You can find it at https://github.com/bjw-s/helm-charts/tree/main/charts/library/common +## Refer there for more detail about the supported values + +# These entries are shared between all the Immich components + +env: + REDIS_HOSTNAME: '{{ printf "%s-redis-master" .Release.Name }}' + DB_HOSTNAME: "{{ .Release.Name }}-postgresql" + DB_USERNAME: "{{ .Values.postgresql.global.postgresql.auth.username }}" + DB_DATABASE_NAME: "{{ .Values.postgresql.global.postgresql.auth.database }}" + # -- You should provide your own secret outside of this helm-chart and use `postgresql.global.postgresql.auth.existingSecret` to provide credentials to the postgresql instance + DB_PASSWORD: "{{ .Values.postgresql.global.postgresql.auth.password }}" + TYPESENSE_ENABLED: "{{ .Values.typesense.enabled }}" + TYPESENSE_API_KEY: "{{ .Values.typesense.env.TYPESENSE_API_KEY }}" + TYPESENSE_HOST: '{{ printf "%s-typesense" .Release.Name }}' + IMMICH_WEB_URL: '{{ printf "http://%s-web:3000" .Release.Name }}' + IMMICH_SERVER_URL: '{{ printf "http://%s-server:3001" .Release.Name }}' + IMMICH_MACHINE_LEARNING_URL: '{{ printf "http://%s-machine-learning:3003" .Release.Name }}' + +image: + tag: v1.80.0 + +immich: + persistence: + # Main data store for all photos shared between different components. + library: + # Automatically creating the library volume is not supported by this chart + # You have to specify an existing PVC to use + existingClaim: immich-nfs + +# Dependencies + +postgresql: + enabled: true + global: + postgresql: + auth: + username: immich + database: immich + password: immich + +redis: + enabled: true + architecture: standalone + auth: + enabled: false + +typesense: + enabled: true + env: + TYPESENSE_DATA_DIR: /tsdata + TYPESENSE_API_KEY: typesense + persistence: + tsdata: + # Enabling typesense persistence is recommended to avoid slow reindexing + enabled: true + accessMode: ReadWriteOnce + size: 1Gi + # storageClass: storage-class + image: + repository: docker.io/typesense/typesense + tag: 0.24.0 + pullPolicy: IfNotPresent + +# Immich components + +server: + enabled: true + image: + repository: ghcr.io/immich-app/immich-server + pullPolicy: IfNotPresent + +microservices: + enabled: true + env: + REVERSE_GEOCODING_DUMP_DIRECTORY: /geodata-cache + persistence: + geodata-cache: + enabled: true + size: 1Gi + # Optional: Set this to pvc to avoid downloading the geodata every start. + type: emptyDir + accessMode: ReadWriteMany + # storageClass: your-class + image: + repository: ghcr.io/immich-app/immich-server + pullPolicy: IfNotPresent + +machine-learning: + enabled: true + image: + repository: ghcr.io/immich-app/immich-machine-learning + pullPolicy: IfNotPresent + env: + TRANSFORMERS_CACHE: /cache + persistence: + cache: + enabled: true + size: 10Gi + # Optional: Set this to pvc to avoid downloading the ML models every start. + type: emptyDir + accessMode: ReadWriteMany + # storageClass: your-class + +web: + enabled: true + image: + repository: ghcr.io/immich-app/immich-web + pullPolicy: IfNotPresent + persistence: + library: + enabled: false + +proxy: + enabled: true + image: + repository: ghcr.io/immich-app/immich-proxy + pullPolicy: IfNotPresent + + persistence: + library: + enabled: false + + ingress: + main: + enabled: false + annotations: + # proxy-body-size is set to 0 to remove the body limit on file uploads + nginx.ingress.kubernetes.io/proxy-body-size: "0" + hosts: + - host: immich.local + paths: + - path: "/" + tls: [] + diff --git a/apps/media/jellyfin.ingress.yaml b/apps/media/jellyfin.ingress.yaml new file mode 100644 index 0000000..3393c7b --- /dev/null +++ b/apps/media/jellyfin.ingress.yaml @@ -0,0 +1,32 @@ +apiVersion: traefik.containo.us/v1alpha1 +kind: IngressRoute +metadata: + name: jellyfin-ingress + namespace: media +spec: + entryPoints: + - websecure + routes: + - match: Host(`media.kluster.moll.re`) + middlewares: + - name: jellyfin-websocket + kind: Rule + services: + - name: jellyfin + port: 8096 + tls: + certResolver: default-tls + +--- +apiVersion: traefik.containo.us/v1alpha1 +kind: Middleware +metadata: + name: jellyfin-websocket + namespace: media +spec: + headers: + customRequestHeaders: + X-Forwarded-Proto: "https" + Upgrade: "websocket" + + diff --git a/apps/media/jellyfin.pvc.yaml b/apps/media/jellyfin.pvc.yaml new file mode 100644 index 0000000..d4f6594 --- /dev/null +++ b/apps/media/jellyfin.pvc.yaml @@ -0,0 +1,72 @@ + +apiVersion: v1 +kind: PersistentVolume +metadata: + namespace: media + name: jellyfin-config-nfs + labels: + directory: jellyfin + +spec: + storageClassName: slow + capacity: + storage: "1Gi" + volumeMode: Filesystem + accessModes: + - ReadWriteOnce + nfs: + path: /export/kluster/jellyfin-config + server: 192.168.1.157 +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + namespace: media + name: jellyfin-config-nfs +spec: + storageClassName: slow + accessModes: + - ReadWriteOnce + resources: + requests: + storage: "1Gi" + selector: + matchLabels: + directory: jellyfin +--- + +apiVersion: v1 +kind: PersistentVolume +metadata: + namespace: media + name: jellyfin-data-nfs + labels: + directory: jellyfin + +spec: + storageClassName: slow + capacity: + storage: "1Ti" + volumeMode: Filesystem + accessModes: + - ReadWriteOnce + nfs: + path: /export/jellyfin-media + server: 192.168.1.157 +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + namespace: media + name: jellyfin-data-nfs +spec: + storageClassName: slow + accessModes: + - ReadWriteOnce + resources: + requests: + storage: "1Ti" + selector: + matchLabels: + directory: jellyfin +--- diff --git a/apps/media/jellyfin.values.yaml b/apps/media/jellyfin.values.yaml new file mode 100644 index 0000000..b8d0832 --- /dev/null +++ b/apps/media/jellyfin.values.yaml @@ -0,0 +1,108 @@ +image: + # -- image repository + repository: jellyfin/jellyfin + # -- image tag + tag: 10.8.9 + + # -- image pull policy + pullPolicy: IfNotPresent + + +# -- environment variables. See [image docs](https://jellyfin.org/docs/general/administration/configuration.html) for more details. +# @default -- See below +env: + # -- Set the container timezone + TZ: Europe/Berlin + +# -- Configures service settings for the chart. +# @default -- See values.yaml +service: + main: + ports: + http: + port: 8096 + +ingress: + # -- Enable and configure ingress settings for the chart under this key. + # @default -- See values.yaml + main: + enabled: false + +# -- Configure persistence settings for the chart under this key. +# @default -- See values.yaml +persistence: + config: + enabled: true + type: pvc + existingClaim: jellyfin-config-nfs + accessMode: + - ReadWriteOnce + + # Cache does NOT contain temporary transcoding data. + cache: + enabled: false + mountPath: /cache + + media: + enabled: true + # use local storage + type: pvc + existingClaim: jellyfin-data-nfs + accessMode: + - ReadWriteOnce + mountPath: /media + + # encoder: + # enabled: true + # type: hostPath + # hostPath: /dev/dri/renderD128 + + +# # -- Configure the Security Context for the Pod +# podSecurityContext: +# runAsUser: 0 # root user -> access to /dev/video* +# runAsUser: 568 +# runAsGroup: 568 +# fsGroup: 568 +# # Hardware acceleration using an Intel iGPU w/ QuickSync +# # These IDs below should be matched to your `video` and `render` group on the host +# # To obtain those IDs run the following grep statement on the host: +# # $ cat /etc/group | grep "video\|render" +# # video:x:44: +# # render:x:109: +# supplementalGroups: +# - 44 +# - 109 + +# resources: +# requests: +# # Hardware acceleration using an Intel iGPU w/ QuickSync and +# # using intel-gpu-plugin (https://github.com/intel/intel-device-plugins-for-kubernetes) +# gpu.intel.com/i915: 1 +# cpu: 200m +# memory: 256Mi +# limits: +# # Hardware acceleration using an Intel iGPU w/ QuickSync and +# # using intel-gpu-plugin (https://github.com/intel/intel-device-plugins-for-kubernetes) +# gpu.intel.com/i915: 1 +# memory: 4096Mi + +probes: + # -- Liveness probe configuration + # @default -- See below + liveness: + # -- Enable the liveness probe + enabled: true + # -- Set this to `true` if you wish to specify your own livenessProbe + custom: true + # -- The spec field contains the values for the default livenessProbe. + # If you selected `custom: true`, this field holds the definition of the livenessProbe. + # @default -- See below + spec: + initialDelaySeconds: 100 + periodSeconds: 100 + timeoutSeconds: 5 + failureThreshold: 3 + httpGet: + path: /health + port: 8096 diff --git a/apps/monitoring/grafana.ingress.yaml b/apps/monitoring/grafana.ingress.yaml new file mode 100644 index 0000000..3c416dd --- /dev/null +++ b/apps/monitoring/grafana.ingress.yaml @@ -0,0 +1,17 @@ +kind: IngressRoute +apiVersion: traefik.containo.us/v1alpha1 +metadata: + name: grafana-ingress +spec: + entryPoints: + - websecure + routes: + - match: Host(`grafana.kluster.moll.re`) + kind: Rule + services: + - name: grafana + port: 80 + tls: + certResolver: default-tls + + diff --git a/apps/monitoring/grafana.pvc.yaml b/apps/monitoring/grafana.pvc.yaml new file mode 100644 index 0000000..548a10c --- /dev/null +++ b/apps/monitoring/grafana.pvc.yaml @@ -0,0 +1,35 @@ +--- +apiVersion: v1 +kind: PersistentVolume +metadata: + name: grafana-nfs + labels: + directory: grafana +spec: + storageClassName: slow + capacity: + storage: "1Gi" + volumeMode: Filesystem + accessModes: + - ReadWriteOnce + nfs: + path: /export/kluster/grafana + server: 192.168.1.157 +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: grafana-nfs +spec: + storageClassName: slow + accessModes: + - ReadWriteOnce + resources: + requests: + storage: "1Gi" + selector: + matchLabels: + directory: grafana + + + diff --git a/apps/monitoring/grafana.values.yaml b/apps/monitoring/grafana.values.yaml new file mode 100644 index 0000000..9e5bc44 --- /dev/null +++ b/apps/monitoring/grafana.values.yaml @@ -0,0 +1,873 @@ +rbac: + create: true + ## Use an existing ClusterRole/Role (depending on rbac.namespaced false/true) + # useExistingRole: name-of-some-(cluster)role + pspEnabled: true + pspUseAppArmor: true + namespaced: false + extraRoleRules: [] + # - apiGroups: [] + # resources: [] + # verbs: [] + extraClusterRoleRules: [] + # - apiGroups: [] + # resources: [] + # verbs: [] +serviceAccount: + create: true + name: + nameTest: +## Service account annotations. Can be templated. +# annotations: +# eks.amazonaws.com/role-arn: arn:aws:iam::123456789000:role/iam-role-name-here + autoMount: true + +replicas: 1 + +## Create a headless service for the deployment +headlessService: false + +## Create HorizontalPodAutoscaler object for deployment type +# +autoscaling: + enabled: false +# minReplicas: 1 +# maxReplicas: 10 +# metrics: +# - type: Resource +# resource: +# name: cpu +# targetAverageUtilization: 60 +# - type: Resource +# resource: +# name: memory +# targetAverageUtilization: 60 + +## See `kubectl explain poddisruptionbudget.spec` for more +## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/ +podDisruptionBudget: {} +# minAvailable: 1 +# maxUnavailable: 1 + +## See `kubectl explain deployment.spec.strategy` for more +## ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy +deploymentStrategy: + type: RollingUpdate + +readinessProbe: + httpGet: + path: /api/health + port: 3000 + +livenessProbe: + httpGet: + path: /api/health + port: 3000 + initialDelaySeconds: 60 + timeoutSeconds: 30 + failureThreshold: 10 + +## Use an alternate scheduler, e.g. "stork". +## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ +## +# schedulerName: "default-scheduler" + +image: + repository: grafana/grafana + tag: 9.0.2 + sha: "" + pullPolicy: IfNotPresent + + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## Can be templated. + ## + # pullSecrets: + # - myRegistrKeySecretName + +testFramework: + enabled: true + image: "bats/bats" + tag: "v1.4.1" + imagePullPolicy: IfNotPresent + securityContext: {} + +securityContext: + runAsUser: 472 + runAsGroup: 472 + fsGroup: 472 + +containerSecurityContext: + {} + +# Extra configmaps to mount in grafana pods +# Values are templated. +extraConfigmapMounts: [] + # - name: certs-configmap + # mountPath: /etc/grafana/ssl/ + # subPath: certificates.crt # (optional) + # configMap: certs-configmap + # readOnly: true + + +extraEmptyDirMounts: [] + # - name: provisioning-notifiers + # mountPath: /etc/grafana/provisioning/notifiers + + +# Apply extra labels to common labels. +extraLabels: {} + +## Assign a PriorityClassName to pods if set +# priorityClassName: + +downloadDashboardsImage: + repository: curlimages/curl + tag: 7.73.0 + sha: "" + pullPolicy: IfNotPresent + +downloadDashboards: + env: {} + envFromSecret: "" + resources: {} + +## Pod Annotations +# podAnnotations: {} + +## Pod Labels +# podLabels: {} + +podPortName: grafana + +## Deployment annotations +# annotations: {} + +## Expose the grafana service to be accessed from outside the cluster (LoadBalancer service). +## or access it from within the cluster (ClusterIP service). Set the service type and the port to serve it. +## ref: http://kubernetes.io/docs/user-guide/services/ +## +service: + enabled: true + type: ClusterIP + port: 80 + targetPort: 3000 + # targetPort: 4181 To be used with a proxy extraContainer + annotations: {} + labels: {} + portName: service + +serviceMonitor: + ## If true, a ServiceMonitor CRD is created for a prometheus operator + ## https://github.com/coreos/prometheus-operator + ## + enabled: false + path: /metrics + # namespace: monitoring (defaults to use the namespace this chart is deployed to) + labels: {} + interval: 1m + scheme: http + tlsConfig: {} + scrapeTimeout: 30s + relabelings: [] + +extraExposePorts: [] + # - name: keycloak + # port: 8080 + # targetPort: 8080 + # type: ClusterIP + +# overrides pod.spec.hostAliases in the grafana deployment's pods +hostAliases: [] + # - ip: "1.2.3.4" + # hostnames: + # - "my.host.com" + +ingress: + enabled: true + # For Kubernetes >= 1.18 you should specify the ingress-controller via the field ingressClassName + # See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#specifying-the-class-of-an-ingress + # ingressClassName: nginx + # Values can be templated + annotations: { + kubernetes.io/ingress.class: nginx, + cert-manager.io/cluster-issuer: cloudflare-letsencrypt-prod + } + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + labels: {} + path: / + + # pathType is only for k8s >= 1.1= + pathType: Prefix + + hosts: + - grafana.kluster.moll.re + ## Extra paths to prepend to every host configuration. This is useful when working with annotation based services. + extraPaths: [] + # - path: /* + # backend: + # serviceName: ssl-redirect + # servicePort: use-annotation + ## Or for k8s > 1.19 + # - path: /* + # pathType: Prefix + # backend: + # service: + # name: ssl-redirect + # port: + # name: use-annotation + + + tls: + - hosts: + - grafana.kluster.moll.re + secretName: cloudflare-letsencrypt-issuer-account-key + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +resources: {} +# limits: +# cpu: 100m +# memory: 128Mi +# requests: +# cpu: 100m +# memory: 128Mi + +## Node labels for pod assignment +## ref: https://kubernetes.io/docs/user-guide/node-selection/ +# +nodeSelector: {} + +## Tolerations for pod assignment +## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ +## +tolerations: [] + +## Affinity for pod assignment (evaluated as template) +## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity +## +affinity: {} + +## Additional init containers (evaluated as template) +## ref: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ +## +extraInitContainers: [] + +## Enable an Specify container in extraContainers. This is meant to allow adding an authentication proxy to a grafana pod +extraContainers: "" +# extraContainers: | +# - name: proxy +# image: quay.io/gambol99/keycloak-proxy:latest +# args: +# - -provider=github +# - -client-id= +# - -client-secret= +# - -github-org= +# - -email-domain=* +# - -cookie-secret= +# - -http-address=http://0.0.0.0:4181 +# - -upstream-url=http://127.0.0.1:3000 +# ports: +# - name: proxy-web +# containerPort: 4181 + +## Volumes that can be used in init containers that will not be mounted to deployment pods +extraContainerVolumes: [] +# - name: volume-from-secret +# secret: +# secretName: secret-to-mount +# - name: empty-dir-volume +# emptyDir: {} + +## Enable persistence using Persistent Volume Claims +## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ +## +persistence: + type: pvc + enabled: true + # storageClassName: default + accessModes: + - ReadWriteOnce + size: 10Gi + # annotations: {} + finalizers: + - kubernetes.io/pvc-protection + # selectorLabels: {} + ## Sub-directory of the PV to mount. Can be templated. + # subPath: "" + ## Name of an existing PVC. Can be templated. + existingClaim: grafana-nfs + + ## If persistence is not enabled, this allows to mount the + ## local storage in-memory to improve performance + ## + inMemory: + enabled: false + ## The maximum usage on memory medium EmptyDir would be + ## the minimum value between the SizeLimit specified + ## here and the sum of memory limits of all containers in a pod + ## + # sizeLimit: 300Mi + +initChownData: + ## If false, data ownership will not be reset at startup + ## This allows the prometheus-server to be run with an arbitrary user + ## + enabled: true + + ## initChownData container image + ## + image: + repository: busybox + tag: "1.31.1" + sha: "" + pullPolicy: IfNotPresent + + ## initChownData resource requests and limits + ## Ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + resources: {} + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + + +# Administrator credentials when not using an existing secret (see below) +adminUser: admin +# adminPassword: strongpassword + +# Use an existing secret for the admin user. +admin: + ## Name of the secret. Can be templated. + existingSecret: "" + userKey: admin-user + passwordKey: admin-password + +## Define command to be executed at startup by grafana container +## Needed if using `vault-env` to manage secrets (ref: https://banzaicloud.com/blog/inject-secrets-into-pods-vault/) +## Default is "run.sh" as defined in grafana's Dockerfile +# command: +# - "sh" +# - "/run.sh" + +## Use an alternate scheduler, e.g. "stork". +## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ +## +# schedulerName: + +## Extra environment variables that will be pass onto deployment pods +## +## to provide grafana with access to CloudWatch on AWS EKS: +## 1. create an iam role of type "Web identity" with provider oidc.eks.* (note the provider for later) +## 2. edit the "Trust relationships" of the role, add a line inside the StringEquals clause using the +## same oidc eks provider as noted before (same as the existing line) +## also, replace NAMESPACE and prometheus-operator-grafana with the service account namespace and name +## +## "oidc.eks.us-east-1.amazonaws.com/id/XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX:sub": "system:serviceaccount:NAMESPACE:prometheus-operator-grafana", +## +## 3. attach a policy to the role, you can use a built in policy called CloudWatchReadOnlyAccess +## 4. use the following env: (replace 123456789000 and iam-role-name-here with your aws account number and role name) +## +## env: +## AWS_ROLE_ARN: arn:aws:iam::123456789000:role/iam-role-name-here +## AWS_WEB_IDENTITY_TOKEN_FILE: /var/run/secrets/eks.amazonaws.com/serviceaccount/token +## AWS_REGION: us-east-1 +## +## 5. uncomment the EKS section in extraSecretMounts: below +## 6. uncomment the annotation section in the serviceAccount: above +## make sure to replace arn:aws:iam::123456789000:role/iam-role-name-here with your role arn + +env: {} + +## "valueFrom" environment variable references that will be added to deployment pods. Name is templated. +## ref: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.19/#envvarsource-v1-core +## Renders in container spec as: +## env: +## ... +## - name: +## valueFrom: +## +envValueFrom: {} + # ENV_NAME: + # configMapKeyRef: + # name: configmap-name + # key: value_key + +## The name of a secret in the same kubernetes namespace which contain values to be added to the environment +## This can be useful for auth tokens, etc. Value is templated. +envFromSecret: "" + +## Sensible environment variables that will be rendered as new secret object +## This can be useful for auth tokens, etc +envRenderSecret: {} + +## The names of secrets in the same kubernetes namespace which contain values to be added to the environment +## Each entry should contain a name key, and can optionally specify whether the secret must be defined with an optional key. +## Name is templated. +envFromSecrets: [] +## - name: secret-name +## optional: true + +## The names of conifgmaps in the same kubernetes namespace which contain values to be added to the environment +## Each entry should contain a name key, and can optionally specify whether the configmap must be defined with an optional key. +## Name is templated. +## ref: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#configmapenvsource-v1-core +envFromConfigMaps: [] +## - name: configmap-name +## optional: true + +# Inject Kubernetes services as environment variables. +# See https://kubernetes.io/docs/concepts/services-networking/connect-applications-service/#environment-variables +enableServiceLinks: true + +## Additional grafana server secret mounts +# Defines additional mounts with secrets. Secrets must be manually created in the namespace. +extraSecretMounts: [] + # - name: secret-files + # mountPath: /etc/secrets + # secretName: grafana-secret-files + # readOnly: true + # subPath: "" + # + # for AWS EKS (cloudwatch) use the following (see also instruction in env: above) + # - name: aws-iam-token + # mountPath: /var/run/secrets/eks.amazonaws.com/serviceaccount + # readOnly: true + # projected: + # defaultMode: 420 + # sources: + # - serviceAccountToken: + # audience: sts.amazonaws.com + # expirationSeconds: 86400 + # path: token + # + # for CSI e.g. Azure Key Vault use the following + # - name: secrets-store-inline + # mountPath: /run/secrets + # readOnly: true + # csi: + # driver: secrets-store.csi.k8s.io + # readOnly: true + # volumeAttributes: + # secretProviderClass: "akv-grafana-spc" + # nodePublishSecretRef: # Only required when using service principal mode + # name: grafana-akv-creds # Only required when using service principal mode + +## Additional grafana server volume mounts +# Defines additional volume mounts. +extraVolumeMounts: [] + # - name: extra-volume-0 + # mountPath: /mnt/volume0 + # readOnly: true + # existingClaim: volume-claim + # - name: extra-volume-1 + # mountPath: /mnt/volume1 + # readOnly: true + # hostPath: /usr/shared/ + +## Container Lifecycle Hooks. Execute a specific bash command or make an HTTP request +lifecycleHooks: {} + # postStart: + # exec: + # command: [] + +## Pass the plugins you want installed as a list. +## +plugins: [] + # - digrich-bubblechart-panel + # - grafana-clock-panel + +## Configure grafana datasources +## ref: http://docs.grafana.org/administration/provisioning/#datasources +## +datasources: {} +# datasources.yaml: +# apiVersion: 1 +# datasources: +# - name: Prometheus +# type: prometheus +# url: http://prometheus-prometheus-server +# access: proxy +# isDefault: true +# - name: CloudWatch +# type: cloudwatch +# access: proxy +# uid: cloudwatch +# editable: false +# jsonData: +# authType: default +# defaultRegion: us-east-1 + +## Configure notifiers +## ref: http://docs.grafana.org/administration/provisioning/#alert-notification-channels +## +notifiers: {} +# notifiers.yaml: +# notifiers: +# - name: email-notifier +# type: email +# uid: email1 +# # either: +# org_id: 1 +# # or +# org_name: Main Org. +# is_default: true +# settings: +# addresses: an_email_address@example.com +# delete_notifiers: + +## Configure grafana dashboard providers +## ref: http://docs.grafana.org/administration/provisioning/#dashboards +## +## `path` must be /var/lib/grafana/dashboards/ +## +dashboardProviders: {} +# dashboardproviders.yaml: +# apiVersion: 1 +# providers: +# - name: 'default' +# orgId: 1 +# folder: '' +# type: file +# disableDeletion: false +# editable: true +# options: +# path: /var/lib/grafana/dashboards/default + +## Configure grafana dashboard to import +## NOTE: To use dashboards you must also enable/configure dashboardProviders +## ref: https://grafana.com/dashboards +## +## dashboards per provider, use provider name as key. +## +dashboards: {} + # default: + # some-dashboard: + # json: | + # $RAW_JSON + # custom-dashboard: + # file: dashboards/custom-dashboard.json + # prometheus-stats: + # gnetId: 2 + # revision: 2 + # datasource: Prometheus + # local-dashboard: + # url: https://example.com/repository/test.json + # token: '' + # local-dashboard-base64: + # url: https://example.com/repository/test-b64.json + # token: '' + # b64content: true + +## Reference to external ConfigMap per provider. Use provider name as key and ConfigMap name as value. +## A provider dashboards must be defined either by external ConfigMaps or in values.yaml, not in both. +## ConfigMap data example: +## +## data: +## example-dashboard.json: | +## RAW_JSON +## +dashboardsConfigMaps: {} +# default: "" + +## Grafana's primary configuration +## NOTE: values in map will be converted to ini format +## ref: http://docs.grafana.org/installation/configuration/ +## +grafana.ini: + paths: + data: /var/lib/grafana/ + logs: /var/log/grafana + plugins: /var/lib/grafana/plugins + provisioning: /etc/grafana/provisioning + analytics: + check_for_updates: true + log: + mode: console + grafana_net: + url: https://grafana.net +## grafana Authentication can be enabled with the following values on grafana.ini + # server: + # The full public facing url you use in browser, used for redirects and emails + # root_url: + # https://grafana.com/docs/grafana/latest/auth/github/#enable-github-in-grafana + # auth.github: + # enabled: false + # allow_sign_up: false + # scopes: user:email,read:org + # auth_url: https://github.com/login/oauth/authorize + # token_url: https://github.com/login/oauth/access_token + # api_url: https://api.github.com/user + # team_ids: + # allowed_organizations: + # client_id: + # client_secret: +## LDAP Authentication can be enabled with the following values on grafana.ini +## NOTE: Grafana will fail to start if the value for ldap.toml is invalid + # auth.ldap: + # enabled: true + # allow_sign_up: true + # config_file: /etc/grafana/ldap.toml + +## Grafana's LDAP configuration +## Templated by the template in _helpers.tpl +## NOTE: To enable the grafana.ini must be configured with auth.ldap.enabled +## ref: http://docs.grafana.org/installation/configuration/#auth-ldap +## ref: http://docs.grafana.org/installation/ldap/#configuration +ldap: + enabled: false + # `existingSecret` is a reference to an existing secret containing the ldap configuration + # for Grafana in a key `ldap-toml`. + existingSecret: "" + # `config` is the content of `ldap.toml` that will be stored in the created secret + config: "" + # config: |- + # verbose_logging = true + + # [[servers]] + # host = "my-ldap-server" + # port = 636 + # use_ssl = true + # start_tls = false + # ssl_skip_verify = false + # bind_dn = "uid=%s,ou=users,dc=myorg,dc=com" + +## Grafana's SMTP configuration +## NOTE: To enable, grafana.ini must be configured with smtp.enabled +## ref: http://docs.grafana.org/installation/configuration/#smtp +smtp: + # `existingSecret` is a reference to an existing secret containing the smtp configuration + # for Grafana. + existingSecret: "" + userKey: "user" + passwordKey: "password" + +## Sidecars that collect the configmaps with specified label and stores the included files them into the respective folders +## Requires at least Grafana 5 to work and can't be used together with parameters dashboardProviders, datasources and dashboards +sidecar: + image: + repository: quay.io/kiwigrid/k8s-sidecar + tag: 1.15.6 + sha: "" + imagePullPolicy: IfNotPresent + resources: {} +# limits: +# cpu: 100m +# memory: 100Mi +# requests: +# cpu: 50m +# memory: 50Mi + securityContext: {} + # skipTlsVerify Set to true to skip tls verification for kube api calls + # skipTlsVerify: true + enableUniqueFilenames: false + readinessProbe: {} + livenessProbe: {} + dashboards: + enabled: false + SCProvider: true + # label that the configmaps with dashboards are marked with + label: grafana_dashboard + # value of label that the configmaps with dashboards are set to + labelValue: null + # folder in the pod that should hold the collected dashboards (unless `defaultFolderName` is set) + folder: /tmp/dashboards + # The default folder name, it will create a subfolder under the `folder` and put dashboards in there instead + defaultFolderName: null + # Namespaces list. If specified, the sidecar will search for config-maps/secrets inside these namespaces. + # Otherwise the namespace in which the sidecar is running will be used. + # It's also possible to specify ALL to search in all namespaces. + searchNamespace: null + # Method to use to detect ConfigMap changes. With WATCH the sidecar will do a WATCH requests, with SLEEP it will list all ConfigMaps, then sleep for 60 seconds. + watchMethod: WATCH + # search in configmap, secret or both + resource: both + # If specified, the sidecar will look for annotation with this name to create folder and put graph here. + # You can use this parameter together with `provider.foldersFromFilesStructure`to annotate configmaps and create folder structure. + folderAnnotation: null + # Absolute path to shell script to execute after a configmap got reloaded + script: null + # watchServerTimeout: request to the server, asking it to cleanly close the connection after that. + # defaults to 60sec; much higher values like 3600 seconds (1h) are feasible for non-Azure K8S + # watchServerTimeout: 3600 + # + # watchClientTimeout: is a client-side timeout, configuring your local socket. + # If you have a network outage dropping all packets with no RST/FIN, + # this is how long your client waits before realizing & dropping the connection. + # defaults to 66sec (sic!) + # watchClientTimeout: 60 + # + # provider configuration that lets grafana manage the dashboards + provider: + # name of the provider, should be unique + name: sidecarProvider + # orgid as configured in grafana + orgid: 1 + # folder in which the dashboards should be imported in grafana + folder: '' + # type of the provider + type: file + # disableDelete to activate a import-only behaviour + disableDelete: false + # allow updating provisioned dashboards from the UI + allowUiUpdates: false + # allow Grafana to replicate dashboard structure from filesystem + foldersFromFilesStructure: false + # Additional dashboard sidecar volume mounts + extraMounts: [] + # Sets the size limit of the dashboard sidecar emptyDir volume + sizeLimit: {} + datasources: + enabled: false + # label that the configmaps with datasources are marked with + label: grafana_datasource + # value of label that the configmaps with datasources are set to + labelValue: null + # If specified, the sidecar will search for datasource config-maps inside this namespace. + # Otherwise the namespace in which the sidecar is running will be used. + # It's also possible to specify ALL to search in all namespaces + searchNamespace: null + # Method to use to detect ConfigMap changes. With WATCH the sidecar will do a WATCH requests, with SLEEP it will list all ConfigMaps, then sleep for 60 seconds. + watchMethod: WATCH + # search in configmap, secret or both + resource: both + # Endpoint to send request to reload datasources + reloadURL: "http://localhost:3000/api/admin/provisioning/datasources/reload" + skipReload: false + # Deploy the datasource sidecar as an initContainer in addition to a container. + # This is needed if skipReload is true, to load any datasources defined at startup time. + initDatasources: false + # Sets the size limit of the datasource sidecar emptyDir volume + sizeLimit: {} + plugins: + enabled: false + # label that the configmaps with plugins are marked with + label: grafana_plugin + # value of label that the configmaps with plugins are set to + labelValue: null + # If specified, the sidecar will search for plugin config-maps inside this namespace. + # Otherwise the namespace in which the sidecar is running will be used. + # It's also possible to specify ALL to search in all namespaces + searchNamespace: null + # Method to use to detect ConfigMap changes. With WATCH the sidecar will do a WATCH requests, with SLEEP it will list all ConfigMaps, then sleep for 60 seconds. + watchMethod: WATCH + # search in configmap, secret or both + resource: both + # Endpoint to send request to reload plugins + reloadURL: "http://localhost:3000/api/admin/provisioning/plugins/reload" + skipReload: false + # Deploy the datasource sidecar as an initContainer in addition to a container. + # This is needed if skipReload is true, to load any plugins defined at startup time. + initPlugins: false + # Sets the size limit of the plugin sidecar emptyDir volume + sizeLimit: {} + notifiers: + enabled: false + # label that the configmaps with notifiers are marked with + label: grafana_notifier + # If specified, the sidecar will search for notifier config-maps inside this namespace. + # Otherwise the namespace in which the sidecar is running will be used. + # It's also possible to specify ALL to search in all namespaces + searchNamespace: null + # search in configmap, secret or both + resource: both + # Sets the size limit of the notifier sidecar emptyDir volume + sizeLimit: {} + +## Override the deployment namespace +## +namespaceOverride: "" + +## Number of old ReplicaSets to retain +## +revisionHistoryLimit: 10 + +## Add a seperate remote image renderer deployment/service +imageRenderer: + # Enable the image-renderer deployment & service + enabled: false + replicas: 1 + image: + # image-renderer Image repository + repository: grafana/grafana-image-renderer + # image-renderer Image tag + tag: latest + # image-renderer Image sha (optional) + sha: "" + # image-renderer ImagePullPolicy + pullPolicy: Always + # extra environment variables + env: + HTTP_HOST: "0.0.0.0" + # RENDERING_ARGS: --no-sandbox,--disable-gpu,--window-size=1280x758 + # RENDERING_MODE: clustered + # IGNORE_HTTPS_ERRORS: true + # image-renderer deployment serviceAccount + serviceAccountName: "" + # image-renderer deployment securityContext + securityContext: {} + # image-renderer deployment Host Aliases + hostAliases: [] + # image-renderer deployment priority class + priorityClassName: '' + service: + # Enable the image-renderer service + enabled: true + # image-renderer service port name + portName: 'http' + # image-renderer service port used by both service and deployment + port: 8081 + targetPort: 8081 + # If https is enabled in Grafana, this needs to be set as 'https' to correctly configure the callback used in Grafana + grafanaProtocol: http + # In case a sub_path is used this needs to be added to the image renderer callback + grafanaSubPath: "" + # name of the image-renderer port on the pod + podPortName: http + # number of image-renderer replica sets to keep + revisionHistoryLimit: 10 + networkPolicy: + # Enable a NetworkPolicy to limit inbound traffic to only the created grafana pods + limitIngress: true + # Enable a NetworkPolicy to limit outbound traffic to only the created grafana pods + limitEgress: false + resources: {} +# limits: +# cpu: 100m +# memory: 100Mi +# requests: +# cpu: 50m +# memory: 50Mi + ## Node labels for pod assignment + ## ref: https://kubernetes.io/docs/user-guide/node-selection/ + # + nodeSelector: {} + + ## Tolerations for pod assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + + ## Affinity for pod assignment (evaluated as template) + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## + affinity: {} + +# Create a dynamic manifests via values: +extraObjects: [] + # - apiVersion: "kubernetes-client.io/v1" + # kind: ExternalSecret + # metadata: + # name: grafana-secrets + # spec: + # backendType: gcpSecretsManager + # data: + # - key: grafana-admin-password + # name: adminPassword + diff --git a/apps/monitoring/influxdb-telegraf.values.yaml b/apps/monitoring/influxdb-telegraf.values.yaml new file mode 100644 index 0000000..65488e0 --- /dev/null +++ b/apps/monitoring/influxdb-telegraf.values.yaml @@ -0,0 +1,157 @@ +## Default values.yaml for Telegraf +## This is a YAML-formatted file. +## ref: https://hub.docker.com/r/library/telegraf/tags/ + +image: + repo: "telegraf" + tag: "1.22" + pullPolicy: IfNotPresent + +## Configure resource requests and limits +## ref: http://kubernetes.io/docs/user-guide/compute-resources/ +resources: + requests: + memory: 256Mi + cpu: 0.1 + limits: + memory: 1Gi + cpu: 1 + +## Pod annotations +podAnnotations: {} + +## Pod labels +podLabels: {} + +## Configure args passed to Telegraf containers +args: [] + +## The name of a secret in the same kubernetes namespace which contains values to +## be added to the environment (must be manually created) +## This can be useful for auth tokens, etc. +# envFromSecret: "telegraf-tokens" + +## Environment +env: + # This pulls HOSTNAME from the node, not the pod. + - name: HOSTNAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + # In test clusters where hostnames are resolved in /etc/hosts on each node, + # the HOSTNAME is not resolvable from inside containers + # So inject the host IP as well + - name: HOSTIP + valueFrom: + fieldRef: + fieldPath: status.hostIP + # Mount the host filesystem and set the appropriate env variables. + # ref: https://github.com/influxdata/telegraf/blob/master/docs/FAQ.md + # HOST_PROC is required by the cpu, disk, diskio, kernel and processes input plugins + - name: "HOST_PROC" + value: "/hostfs/proc" + # HOST_SYS is required by the diskio plugin + - name: "HOST_SYS" + value: "/hostfs/sys" + - name: "HOST_MOUNT_PREFIX" + value: "/hostfs" + +## Add custom volumes and mounts +# volumes: +# - name: telegraf-output-influxdb2 +# configMap: +# name: "telegraf-output-influxdb2" +# mountPoints: +# - name: telegraf-output-influxdb2 +# mountPath: /etc/telegraf/conf.d +# subPath: influxdb2.conf + +## Tolerations for pod assignment +## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ +## +tolerations: [] + +## If the DaemonSet should run on the host's network namespace +## hostNetwork: true + +## If using hostNetwork=true, set dnsPolicy to ClusterFirstWithHostNet +## ref: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/# +## dnsPolicy: ClusterFirstWithHostNet + +## If using dnsPolicy=None, set dnsConfig +## ref: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-dns-config +## dnsConfig: +## nameservers: +## - 1.2.3.4 +## searches: +## - ns1.svc.cluster-domain.example +## - my.dns.search.suffix +## options: +## - name: ndots +## value: "2" +## - name: edns0 + +rbac: + # Specifies whether RBAC resources should be created + create: true + +serviceAccount: + # Specifies whether a ServiceAccount should be created + create: true + # The name of the ServiceAccount to use. + # If not set and create is true, a name is generated using the fullname template + # name: + # Annotations for the ServiceAccount + annotations: {} + +## Specify priorityClassName +## Ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ +# priorityClassName: system-node-critical + +# Specify the pod's SecurityContext, including the OS user and group to run the pod +podSecurityContext: {} + +override_config: + toml: ~ + # Provide a literal TOML config + # toml: |+ + # [global_tags] + # foo = "bar" + # [agent] + # interval = "10s" + # [[inputs.mem]] + # [[outputs.influxdb_v2]] + # urls = ["https://us-west-2-1.aws.cloud2.influxdata.com"] + # bucket = "data" + # organization = "OurCompany" + # token = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" + +## Exposed telegraf configuration +## ref: https://docs.influxdata.com/telegraf/v1.13/administration/configuration/ +config: + # global_tags: + # cluster: "mycluster" + agent: + interval: "10s" + round_interval: true + metric_batch_size: 1000 + metric_buffer_limit: 10000 + collection_jitter: "0s" + flush_interval: "10s" + flush_jitter: "0s" + precision: "" + debug: false + quiet: false + logfile: "" + hostname: "$HOSTNAME" + omit_hostname: false + outputs: + - influxdb_v2: + urls: + - "http://influxdb-influxdb2.monitoring:80" + token: N_jNm1hZTfyhJneTJj2G357mQ7EJdNzdvebjSJX6JkbyaXNup_IAqeYowblMgV8EjLypNvauTl27ewJvI_rbqQ== + organization: "influxdata" + bucket: "kluster" + monitor_self: false + docker_endpoint: "unix:///run/k3s/containerd/containerd.sock" + diff --git a/apps/monitoring/influxdb.pvc.yaml b/apps/monitoring/influxdb.pvc.yaml new file mode 100644 index 0000000..7d32f18 --- /dev/null +++ b/apps/monitoring/influxdb.pvc.yaml @@ -0,0 +1,35 @@ +--- +apiVersion: v1 +kind: PersistentVolume +metadata: + name: influxdb-nfs + labels: + directory: influxdb +spec: + storageClassName: slow + capacity: + storage: "10Gi" + volumeMode: Filesystem + accessModes: + - ReadWriteOnce + nfs: + path: /export/kluster/influxdb + server: 192.168.1.157 +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: influxdb-nfs +spec: + storageClassName: slow + accessModes: + - ReadWriteOnce + resources: + requests: + storage: "10Gi" + selector: + matchLabels: + directory: influxdb + + + diff --git a/apps/monitoring/influxdb.values.yaml b/apps/monitoring/influxdb.values.yaml new file mode 100644 index 0000000..66e4515 --- /dev/null +++ b/apps/monitoring/influxdb.values.yaml @@ -0,0 +1,195 @@ +image: + repository: influxdb + tag: 2.3.0-alpine + pullPolicy: IfNotPresent + +## Annotations to be added to InfluxDB pods +## +podAnnotations: {} + +## Labels to be added to InfluxDB pods +## +podLabels: {} + +nameOverride: "" +fullnameOverride: "" + +## Configure resource requests and limits +## ref: http://kubernetes.io/docs/user-guide/compute-resources/ +## +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + +## Node labels for pod assignment +## ref: https://kubernetes.io/docs/user-guide/node-selection/ +## +nodeSelector: {} + +## Tolerations for pod assignment +## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ +## +tolerations: [] + +## Affinity for pod assignment +## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity +## +affinity: {} + +securityContext: {} + +## Customize liveness, readiness and startup probes +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/ +## +livenessProbe: {} + # path: "/health" + # scheme: "HTTP" + # initialDelaySeconds: 0 + # periodSeconds: 10 + # timeoutSeconds: 1 + # failureThreshold: 3 + +readinessProbe: {} + # path: "/health" + # scheme: "HTTP" + # initialDelaySeconds: 0 + # periodSeconds: 10 + # timeoutSeconds: 1 + # successThreshold: 1 + # failureThreshold: 3 + +startupProbe: + enabled: false + # path: "/health" + # scheme: "HTTP" + # initialDelaySeconds: 30 + # periodSeconds: 5 + # timeoutSeconds: 1 + # failureThreshold: 6 + +## Extra environment variables to configure influxdb +## e.g. +# env: +# - name: FOO +# value: BAR +# - name: BAZ +# valueFrom: +# secretKeyRef: +# name: my-secret +# key: my-key +env: {} + +## Create default user through docker entrypoint +## Defaults indicated below +## +adminUser: + organization: "influxdata" + bucket: "default" + user: "admin" + retention_policy: "0s" + ## Leave empty to generate a random password and token. + ## Or fill any of these values to use fixed values. + password: "" + token: "" + + ## The password and token are obtained from an existing secret. The expected + ## keys are `admin-password` and `admin-token`. + ## If set, the password and token values above are ignored. + # existingSecret: influxdb-auth + +## Persist data to a persistent volume +## +persistence: + enabled: true + ## If true will use an existing PVC instead of creating one + useExisting: true + ## Name of existing PVC to be used in the influx deployment + name: influxdb-nfs + ## influxdb data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + accessMode: ReadWriteOnce + size: 10Gi + mountPath: /var/lib/influxdb2 + subPath: "" + +## Add custom volume and volumeMounts +## +# volumes: +# - name: influxdb2-templates +# hostPath: +# path: /data/influxdb2-templates +# type: Directory +# mountPoints: +# - name: influxdb2-templates +# mountPath: /influxdb2-templates +# readOnly: true + +## Allow executing custom init scripts +## If the container finds any files with the .sh extension inside of the +## /docker-entrypoint-initdb.d folder, it will execute them. +## When multiple scripts are present, they will be executed in lexical sort order by name. +## For more details see Custom Initialization Scripts in https://hub.docker.com/_/influxdb +initScripts: + enabled: false + scripts: + init.sh: |+ + #!/bin/bash + influx apply --force yes -u https://raw.githubusercontent.com/influxdata/community-templates/master/influxdb2_operational_monitoring/influxdb2_operational_monitoring.yml + +## Specify a service type +## ref: http://kubernetes.io/docs/user-guide/services/ +## +service: + type: LoadBalancer + loadBalancerIP: 192.168.3.4 + port: 80 + targetPort: 8086 + annotations: {} + labels: {} + portName: http + +serviceAccount: + # Specifies whether a ServiceAccount should be created + create: true + # The name of the ServiceAccount to use. + # If not set and create is true, a name is generated using the fullname template + name: + # Annotations for the ServiceAccount + annotations: {} + +ingress: + enabled: false + # For Kubernetes >= 1.18 you should specify the ingress-controller via the field ingressClassName + # See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#specifying-the-class-of-an-ingress + # className: nginx + tls: false + # secretName: my-tls-cert # only needed if tls above is true or default certificate is not configured for Nginx + hostname: influxdb.foobar.com + annotations: {} + # kubernetes.io/ingress.class: "nginx" + # kubernetes.io/tls-acme: "true" + path: / + +## Pod disruption budget configuration +## +pdb: + ## Specifies whether a Pod disruption budget should be created + ## + create: true + minAvailable: 1 + # maxUnavailable: 1 + diff --git a/apps/monitoring/telegraf-adguard.values.yaml b/apps/monitoring/telegraf-adguard.values.yaml new file mode 100644 index 0000000..de7290a --- /dev/null +++ b/apps/monitoring/telegraf-adguard.values.yaml @@ -0,0 +1,167 @@ +## Default values.yaml for Telegraf +## This is a YAML-formatted file. +## ref: https://hub.docker.com/r/library/telegraf/tags/ + +replicaCount: 1 +image: + repo: "telegraf" + tag: "1.25" + pullPolicy: IfNotPresent +podAnnotations: {} +podLabels: {} +imagePullSecrets: [] +## Configure args passed to Telegraf containers +args: [] +# The name of a secret in the same kubernetes namespace which contains values to +# be added to the environment (must be manually created) +# This can be useful for auth tokens, etc. + +# envFromSecret: "telegraf-tokens" +env: + - name: HOSTNAME + value: "telegraf-polling-service" +# An older "volumeMounts" key was previously added which will likely +# NOT WORK as you expect. Please use this newer configuration. + +# volumes: +# - name: telegraf-output-influxdb2 +# configMap: +# name: "telegraf-output-influxdb2" +# mountPoints: +# - name: telegraf-output-influxdb2 +# mountPath: /etc/telegraf/conf.d +# subPath: influxdb2.conf + +## Configure resource requests and limits +## ref: http://kubernetes.io/docs/user-guide/compute-resources/ +resources: {} +# requests: +# memory: 128Mi +# cpu: 100m +# limits: +# memory: 128Mi +# cpu: 100m + +## Node labels for pod assignment +## ref: https://kubernetes.io/docs/user-guide/node-selection/ +nodeSelector: {} +## Affinity for pod assignment +## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity +## +affinity: {} +## Tolerations for pod assignment +## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ +## +tolerations: [] +# - key: "key" +# operator: "Equal|Exists" +# value: "value" +# effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" + +service: + enabled: false + type: ClusterIP + annotations: {} +rbac: + # Specifies whether RBAC resources should be created + create: true + # Create only for the release namespace or cluster wide (Role vs ClusterRole) + clusterWide: false + # Rules for the created rule + rules: [] +# When using the prometheus input to scrape all pods you need extra rules set to the ClusterRole to be +# able to scan the pods for scraping labels. The following rules have been taken from: +# https://github.com/helm/charts/blob/master/stable/prometheus/templates/server-clusterrole.yaml#L8-L46 +# - apiGroups: +# - "" +# resources: +# - nodes +# - nodes/proxy +# - nodes/metrics +# - services +# - endpoints +# - pods +# - ingresses +# - configmaps +# verbs: +# - get +# - list +# - watch +# - apiGroups: +# - "extensions" +# resources: +# - ingresses/status +# - ingresses +# verbs: +# - get +# - list +# - watch +# - nonResourceURLs: +# - "/metrics" +# verbs: +# - get + +serviceAccount: + # Specifies whether a ServiceAccount should be created + create: false +## Exposed telegraf configuration +## For full list of possible values see `/docs/all-config-values.yaml` and `/docs/all-config-values.toml` +## ref: https://docs.influxdata.com/telegraf/v1.1/administration/configuration/ +config: + agent: + interval: "2m" + round_interval: true + metric_batch_size: 1000 + metric_buffer_limit: 10000 + collection_jitter: "0s" + flush_interval: "10s" + flush_jitter: "0s" + precision: "" + debug: false + quiet: false + logfile: "" + hostname: "$HOSTNAME" + omit_hostname: false + processors: + - enum: + mapping: + field: "status" + dest: "status_code" + value_mappings: + healthy: 1 + problem: 2 + critical: 3 + outputs: + - influxdb_v2: + urls: + - "http://influxdb-influxdb2.monitoring:80" + token: We64mk4L4bqYCL77x3fAUSYfOse9Kktyf2eBLyrryG9c3-y8PQFiKPIh9EvSWuq78QSQz6hUcsm7XSFR2Zj1MA== + organization: "influxdata" + bucket: "homeassistant" + inputs: + - http: + urls: + - "http://adguard-home.adguard:3000/control/stats" + data_format: "json" +metrics: + health: + enabled: false + service_address: "http://:8888" + threshold: 5000.0 + internal: + enabled: true + collect_memstats: false +# Lifecycle hooks +# hooks: +# postStart: ["/bin/sh", "-c", "echo Telegraf started"] +# preStop: ["/bin/sh", "-c", "sleep 60"] + +## Pod disruption budget configuration +## +pdb: + ## Specifies whether a Pod disruption budget should be created + ## + create: true + minAvailable: 1 + # maxUnavailable: 1 + diff --git a/apps/monitoring/telegraf-speedtest.values.yaml b/apps/monitoring/telegraf-speedtest.values.yaml new file mode 100644 index 0000000..69345bc --- /dev/null +++ b/apps/monitoring/telegraf-speedtest.values.yaml @@ -0,0 +1,110 @@ +## Default values.yaml for Telegraf +## This is a YAML-formatted file. +## ref: https://hub.docker.com/r/library/telegraf/tags/ + +replicaCount: 1 +image: + repo: "telegraf" + tag: "1.25" + pullPolicy: IfNotPresent +podAnnotations: {} +podLabels: {} +imagePullSecrets: [] +## Configure args passed to Telegraf containers +args: [] +# The name of a secret in the same kubernetes namespace which contains values to +# be added to the environment (must be manually created) +# This can be useful for auth tokens, etc. + +# envFromSecret: "telegraf-tokens" +env: + - name: HOSTNAME + value: "telegraf-speedtest" + +## Configure resource requests and limits +## ref: http://kubernetes.io/docs/user-guide/compute-resources/ +resources: {} +# requests: +# memory: 128Mi +# cpu: 100m +# limits: +# memory: 128Mi +# cpu: 100m + +## Node labels for pod assignment +## ref: https://kubernetes.io/docs/user-guide/node-selection/ +nodeSelector: {} +## Affinity for pod assignment +## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity +## +affinity: {} +## Tolerations for pod assignment +## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ +## +tolerations: [] +# - key: "key" +# operator: "Equal|Exists" +# value: "value" +# effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" +service: + enabled: false +rbac: + # Specifies whether RBAC resources should be created + create: false + +serviceAccount: + # Specifies whether a ServiceAccount should be created + create: false + + +## Exposed telegraf configuration +## For full list of possible values see `/docs/all-config-values.yaml` and `/docs/all-config-values.toml` +## ref: https://docs.influxdata.com/telegraf/v1.1/administration/configuration/ +config: + agent: + interval: "2h" + round_interval: true + metric_batch_size: 1000 + metric_buffer_limit: 10000 + collection_jitter: "0s" + flush_interval: "10s" + flush_jitter: "0s" + precision: "" + debug: false + quiet: false + logfile: "" + hostname: "$HOSTNAME" + omit_hostname: false + processors: + - enum: + mapping: + field: "status" + dest: "status_code" + value_mappings: + healthy: 1 + problem: 2 + critical: 3 + outputs: + - influxdb_v2: + urls: + - "http://influxdb-influxdb2.monitoring:80" + token: We64mk4L4bqYCL77x3fAUSYfOse9Kktyf2eBLyrryG9c3-y8PQFiKPIh9EvSWuq78QSQz6hUcsm7XSFR2Zj1MA== + organization: "influxdata" + bucket: "homeassistant" + inputs: + - internet_speed: + enable_file_download: false + +# Lifecycle hooks +# hooks: +# postStart: ["/bin/sh", "-c", "echo Telegraf started"] +# preStop: ["/bin/sh", "-c", "sleep 60"] + +## Pod disruption budget configuration +## +pdb: + ## Specifies whether a Pod disruption budget should be created + ## + create: true + minAvailable: 1 + # maxUnavailable: 1 diff --git a/apps/nextcloud/ingress.yaml b/apps/nextcloud/ingress.yaml new file mode 100644 index 0000000..990ba5b --- /dev/null +++ b/apps/nextcloud/ingress.yaml @@ -0,0 +1,17 @@ +apiVersion: traefik.containo.us/v1alpha1 +kind: IngressRoute +metadata: + namespace: nextcloud + name: nextcloud-ingressroute + +spec: + entryPoints: + - websecure + routes: + - match: Host(`nextcloud.kluster.moll.re`) + kind: Rule + services: + - name: nextcloud + port: 8080 + tls: + certResolver: default-tls \ No newline at end of file diff --git a/apps/nextcloud/pvc.yaml b/apps/nextcloud/pvc.yaml new file mode 100644 index 0000000..f63c21c --- /dev/null +++ b/apps/nextcloud/pvc.yaml @@ -0,0 +1,34 @@ +--- +apiVersion: v1 +kind: PersistentVolume +metadata: + namespace: nextcloud + name: nextcloud-nfs + labels: + directory: nextcloud +spec: + storageClassName: fast + capacity: + storage: "150Gi" + volumeMode: Filesystem + accessModes: + - ReadWriteOnce + nfs: + path: /kluster/nextcloud + server: 192.168.1.157 +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + namespace: nextcloud + name: nextcloud-nfs +spec: + storageClassName: fast + accessModes: + - ReadWriteOnce + resources: + requests: + storage: "150Gi" + selector: + matchLabels: + directory: nextcloud diff --git a/apps/nextcloud/values.yaml b/apps/nextcloud/values.yaml new file mode 100644 index 0000000..6fdf3a2 --- /dev/null +++ b/apps/nextcloud/values.yaml @@ -0,0 +1,294 @@ +## Official nextcloud image version +## ref: https://hub.docker.com/r/library/nextcloud/tags/ +## +image: + repository: nextcloud + tag: "27" # needs to be a string because of the template + pullPolicy: IfNotPresent + +nameOverride: "" +fullnameOverride: "" +podAnnotations: {} +deploymentAnnotations: {} + +# Number of replicas to be deployed +replicaCount: 1 + +## Allowing use of ingress controllers +## ref: https://kubernetes.io/docs/concepts/services-networking/ingress/ +## +ingress: + enabled: false + + +# Allow configuration of lifecycle hooks +# ref: https://kubernetes.io/docs/tasks/configure-pod-container/attach-handler-lifecycle-event/ +lifecycle: {} + # postStartCommand: [] + # preStopCommand: [] + +nextcloud: + host: nextcloud.kluster.moll.re + username: admin + password: changeme + ## Use an existing secret + existingSecret: + enabled: false + update: 0 + # If web server is not binding default port, you can define it + # containerPort: 8080 + datadir: /var/www/html/data + persistence: + subPath: + mail: + enabled: false + # PHP Configuration files + # Will be injected in /usr/local/etc/php/conf.d for apache image and in /usr/local/etc/php-fpm.d when nginx.enabled: true + phpConfigs: {} + # Default config files + # IMPORTANT: Will be used only if you put extra configs, otherwise default will come from nextcloud itself + # Default confgurations can be found here: https://github.com/nextcloud/docker/tree/master/16.0/apache/config + defaultConfigs: + # To protect /var/www/html/config + .htaccess: true + # Redis default configuration + redis.config.php: true + # Apache configuration for rewrite urls + apache-pretty-urls.config.php: true + # Define APCu as local cache + apcu.config.php: true + # Apps directory configs + apps.config.php: true + # Used for auto configure database + autoconfig.php: true + # SMTP default configuration + smtp.config.php: true + # Extra config files created in /var/www/html/config/ + # ref: https://docs.nextcloud.com/server/15/admin_manual/configuration_server/config_sample_php_parameters.html#multiple-config-php-file + configs: {} + + # For example, to use S3 as primary storage + # ref: https://docs.nextcloud.com/server/13/admin_manual/configuration_files/primary_storage.html#simple-storage-service-s3 + # + # configs: + # s3.config.php: |- + # array( + # 'class' => '\\OC\\Files\\ObjectStore\\S3', + # 'arguments' => array( + # 'bucket' => 'my-bucket', + # 'autocreate' => true, + # 'key' => 'xxx', + # 'secret' => 'xxx', + # 'region' => 'us-east-1', + # 'use_ssl' => true + # ) + # ) + # ); + + ## Strategy used to replace old pods + ## IMPORTANT: use with care, it is suggested to leave as that for upgrade purposes + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy + strategy: + type: Recreate + # type: RollingUpdate + # rollingUpdate: + # maxSurge: 1 + # maxUnavailable: 0 + + ## + ## Extra environment variables + extraEnv: + # - name: SOME_SECRET_ENV + # valueFrom: + # secretKeyRef: + # name: nextcloud + # key: secret_key + + # Extra mounts for the pods. Example shown is for connecting a legacy NFS volume + # to NextCloud pods in Kubernetes. This can then be configured in External Storage + extraVolumes: + # - name: nfs + # nfs: + # server: "10.0.0.1" + # path: "/nextcloud_data" + # readOnly: false + extraVolumeMounts: + # - name: nfs + # mountPath: "/legacy_data" + + # Extra secuurityContext parameters. For example you may need to define runAsNonRoot directive + # extraSecurityContext: + # runAsUser: "33" + # runAsGroup: "33" + # runAsNonRoot: true + # readOnlyRootFilesystem: true + +nginx: + ## You need to set an fpm version of the image for nextcloud if you want to use nginx! + enabled: false + resources: {} + +internalDatabase: + enabled: true + name: nextcloud + +## +## External database configuration +## +externalDatabase: + enabled: true + + ## Supported database engines: mysql or postgresql + type: postgresql + + ## Database host + host: postgres-postgresql.postgres + + ## Database user + user: nextcloud + + ## Database password + password: test + + ## Database name + database: nextcloud + + ## Use a existing secret + existingSecret: + enabled: false + # secretName: nameofsecret + # usernameKey: username + # passwordKey: password + +## +## MariaDB chart configuration +## +mariadb: + ## Whether to deploy a mariadb server to satisfy the applications database requirements. To use an external database set this to false and configure the externalDatabase parameters + enabled: false + +postgresql: + enabled: false +## +## Redis chart configuration +## for more options see https://github.com/bitnami/charts/tree/master/bitnami/redis +## + +redis: + enabled: false + auth: + enabled: true + password: 'changeme' + +## Cronjob to execute Nextcloud background tasks +## ref: https://docs.nextcloud.com/server/latest/admin_manual/configuration_server/background_jobs_configuration.html#webcron +## +cronjob: + enabled: false + # Nexcl +service: + type: ClusterIP + port: 8080 + loadBalancerIP: nil + nodePort: nil + +## Enable persistence using Persistent Volume Claims +## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ +## +persistence: + # Nextcloud Data (/var/www/html) + enabled: true + annotations: {} + ## nextcloud data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + + ## A manually managed Persistent Volume and Claim + ## Requires persistence.enabled: true + ## If defined, PVC must be created manually before volume will be bound + existingClaim: nextcloud-nfs + + accessMode: ReadWriteOnce + size: 150Gi + + ## Use an additional pvc for the data directory rather than a subpath of the default PVC + ## Useful to store data on a different storageClass (e.g. on slower disks) + nextcloudData: + enabled: false + subPath: + annotations: {} + # storageClass: "-" + # existingClaim: + accessMode: ReadWriteOnce + size: 8Gi + +resources: + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + limits: + cpu: 2000m + memory: 2Gi + requests: + cpu: 100m + memory: 128Mi + +## Liveness and readiness probe values +## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes +## +livenessProbe: + enabled: true + initialDelaySeconds: 250 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 3 + successThreshold: 1 +readinessProbe: + enabled: true + initialDelaySeconds: 250 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 3 + successThreshold: 1 +startupProbe: + enabled: false + initialDelaySeconds: 250 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 30 + successThreshold: 1 + + +## Enable pod autoscaling using HorizontalPodAutoscaler +## ref: https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/ +## +hpa: + enabled: false + +nodeSelector: {} + +tolerations: [] + +affinity: {} + + +## Prometheus Exporter / Metrics +## +metrics: + enabled: false + + +rbac: + enabled: false + serviceaccount: + create: true + name: nextcloud-serviceaccount + diff --git a/apps/pix2tex/deployment.yaml b/apps/pix2tex/deployment.yaml new file mode 100644 index 0000000..6acff22 --- /dev/null +++ b/apps/pix2tex/deployment.yaml @@ -0,0 +1,81 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: pix2tex + labels: + app: pix2tex +spec: + replicas: 1 + selector: + matchLabels: + app: pix2tex + template: + metadata: + labels: + app: pix2tex + spec: + containers: + - name: pix2tex + image: lukasblecher/pix2tex:api + tty: true + resources: + requests: + memory: "250M" + cpu: 500m + ephemeral-storage: "2Gi" + limits: + ephemeral-storage: "4Gi" + memory: "500M" + cpu: 1000m + ports: + - containerPort: 8501 + command: ["python", "pix2tex/api/run.py"] + nodeSelector: + kubernetes.io/arch: amd64 + +--- +apiVersion: v1 +kind: Service +metadata: + name: pix2tex-http + namespace: pix2tex + labels: + app: pix2tex +spec: + ports: + - name: http + port: 8501 + targetPort: 8501 + selector: + app: pix2tex + +--- +apiVersion: traefik.containo.us/v1alpha1 +kind: IngressRoute +metadata: + name: pix2tex-ingress +spec: + entryPoints: + - websecure + routes: + - match: Host(`pix2tex.kluster.moll.re`) + kind: Rule + middlewares: + - name: pix2tex-websocket + services: + - name: pix2tex-http + port: 8501 + tls: + certResolver: default-tls +--- +apiVersion: traefik.containo.us/v1alpha1 +kind: Middleware +metadata: + name: pix2tex-websocket +spec: + headers: + customRequestHeaders: + X-Forwarded-Proto: "https" + # enable websockets + Upgrade: "websocket" + diff --git a/apps/whoami/base/deployment.yaml b/apps/whoami/base/deployment.yaml new file mode 100644 index 0000000..670df6b --- /dev/null +++ b/apps/whoami/base/deployment.yaml @@ -0,0 +1,53 @@ +kind: Namespace +apiVersion: v1 +metadata: + name: whoami + +--- +kind: Deployment +apiVersion: apps/v1 +metadata: + name: whoami + namespace: whoami + labels: + app: traefiklabs + name: whoami + +spec: + selector: + matchLabels: + app: traefiklabs + task: whoami + template: + metadata: + labels: + app: traefiklabs + task: whoami + spec: + containers: + - name: whoami + image: traefik/whoami + ports: + - containerPort: 80 + resources: + requests: + cpu: "5m" + memory: "5Mi" + limits: + cpu: "10m" + memory: "10Mi" + +--- +apiVersion: v1 +kind: Service +metadata: + name: whoami + namespace: whoami + +spec: + ports: + - name: http + port: 80 + selector: + app: traefiklabs + task: whoami diff --git a/apps/whoami/base/ingress.yaml b/apps/whoami/base/ingress.yaml new file mode 100644 index 0000000..002dbc3 --- /dev/null +++ b/apps/whoami/base/ingress.yaml @@ -0,0 +1,16 @@ +apiVersion: traefik.containo.us/v1alpha1 +kind: IngressRoute +metadata: + namespace: whoami + name: whoami-ingressroute +spec: + entryPoints: + - websecure + routes: + - match: Host(`whoami.kluster.moll.re`) + kind: Rule + services: + - name: whoami + port: 80 + tls: + certResolver: default-tls diff --git a/apps/whoami/base/kustomization.yaml b/apps/whoami/base/kustomization.yaml new file mode 100644 index 0000000..7a76cc5 --- /dev/null +++ b/apps/whoami/base/kustomization.yaml @@ -0,0 +1,5 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: +- ./deployment.yaml +- ./ingress.yaml \ No newline at end of file diff --git a/apps/whoami/overlays/main/kustomization.yaml b/apps/whoami/overlays/main/kustomization.yaml new file mode 100644 index 0000000..403486d --- /dev/null +++ b/apps/whoami/overlays/main/kustomization.yaml @@ -0,0 +1,6 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization + +resources: + - ../../base + diff --git a/infrastructure/backup/base/cronjob.yaml b/infrastructure/backup/base/cronjob.yaml new file mode 100644 index 0000000..9c341e0 --- /dev/null +++ b/infrastructure/backup/base/cronjob.yaml @@ -0,0 +1,43 @@ +apiVersion: batch/v1 +kind: CronJob +metadata: + name: restic-rclone-gdrive + +spec: + successfulJobsHistoryLimit: 2 + failedJobsHistoryLimit: 2 + + jobTemplate: + spec: + template: + spec: + restartPolicy: Never + hostname: restic-k3s-pod + # used by restic to identify the host + containers: + - name: restic-base-container + image: restic/restic:latest + command: + - /bin/sh + - -c + # >- strips newlines + # RESTIC_ARGS Can be for instance: --verbose --dry-run + args: [] + + volumeMounts: + - mountPath: /data + name: backup-nfs-access + + env: + - name: RESTIC_REPOSITORY + value: rest:http://rclone-gcloud:8000/kluster + # lives in the same namespace + - name: RESTIC_PASSWORD + valueFrom: + secretKeyRef: + name: restic-gdrive-credentials + key: restic-password + volumes: + - name: backup-nfs-access + persistentVolumeClaim: + claimName: backup-nfs-access diff --git a/infrastructure/backup/base/kustomization.yaml b/infrastructure/backup/base/kustomization.yaml new file mode 100644 index 0000000..c79cfb4 --- /dev/null +++ b/infrastructure/backup/base/kustomization.yaml @@ -0,0 +1,5 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: +- ./cronjob.yaml +- ./restic-password.secret.yaml \ No newline at end of file diff --git a/infrastructure/backup/overlays/applying.md b/infrastructure/backup/overlays/applying.md new file mode 100644 index 0000000..e5c20c3 --- /dev/null +++ b/infrastructure/backup/overlays/applying.md @@ -0,0 +1,8 @@ +``` +k kustomize backup/overlays/backup | k apply -f - +> secret/restic-credentials-backup created +> cronjob.batch/restic-backblaze-backup created +k kustomize backup/overlays/prune | k apply -f - +> secret/restic-credentials-prune created +> cronjob.batch/restic-backblaze-prune created +``` \ No newline at end of file diff --git a/infrastructure/backup/overlays/backup/kustomization.yaml b/infrastructure/backup/overlays/backup/kustomization.yaml new file mode 100644 index 0000000..3da5d5d --- /dev/null +++ b/infrastructure/backup/overlays/backup/kustomization.yaml @@ -0,0 +1,16 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization + +# namespace: backup +nameSuffix: -backup +resources: + - ../../base + # - ./restic-commands.yaml + + +# patch the cronjob args field: +patches: + - path: ./restic-commands.yaml + target: + kind: CronJob + \ No newline at end of file diff --git a/infrastructure/backup/overlays/backup/restic-commands.yaml b/infrastructure/backup/overlays/backup/restic-commands.yaml new file mode 100644 index 0000000..6b9a3bb --- /dev/null +++ b/infrastructure/backup/overlays/backup/restic-commands.yaml @@ -0,0 +1,25 @@ +apiVersion: batch/v1 +kind: CronJob +metadata: + name: backup-patch +spec: + schedule: "0 2 * * *" + # at 2:00, every day + + jobTemplate: + spec: + template: + spec: + containers: + - name: restic-base-container + args: + # >- strips newlines + # -r $(RESTIC_REPOSITORY) not needed, bc set as env var + - >- + restic backup + --verbose=2 + /data + --exclude=s3/ + && + restic + list snapshots diff --git a/infrastructure/backup/overlays/prune/kustomization.yaml b/infrastructure/backup/overlays/prune/kustomization.yaml new file mode 100644 index 0000000..a2079af --- /dev/null +++ b/infrastructure/backup/overlays/prune/kustomization.yaml @@ -0,0 +1,15 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization + +# namespace: backup +nameSuffix: -prune +resources: + - ../../base + # - ./restic-commands.yaml + + +# patch the cronjob args field: +patches: + - path: ./restic-commands.yaml + target: + kind: CronJob diff --git a/infrastructure/backup/overlays/prune/restic-commands.yaml b/infrastructure/backup/overlays/prune/restic-commands.yaml new file mode 100644 index 0000000..c6f4fbd --- /dev/null +++ b/infrastructure/backup/overlays/prune/restic-commands.yaml @@ -0,0 +1,24 @@ +apiVersion: batch/v1 +kind: CronJob +metadata: + name: prune-patch +spec: + schedule: "0 0 1/15 * *" + # at midnight, the first and 15. of every month + + jobTemplate: + spec: + template: + spec: + containers: + - name: restic-base-container + args: + # >- strips newlines + # RESTIC_ARGS Can be for instance: --verbose --dry-run + # RESTIC_REPOSITORY is set in the secret + - >- + restic forget + -r $(RESTIC_REPOSITORY) + --verbose=2 + --keep-daily 7 --keep-weekly 5 + --prune diff --git a/infrastructure/backup/rclone-config.sealedsecret.yaml b/infrastructure/backup/rclone-config.sealedsecret.yaml new file mode 100644 index 0000000..727d3f1 --- /dev/null +++ b/infrastructure/backup/rclone-config.sealedsecret.yaml @@ -0,0 +1,22 @@ +{ + "kind": "SealedSecret", + "apiVersion": "bitnami.com/v1alpha1", + "metadata": { + "name": "rclone-config-files", + "namespace": "backup", + "creationTimestamp": null + }, + "spec": { + "template": { + "metadata": { + "name": "rclone-config-files", + "namespace": "backup", + "creationTimestamp": null + }, + "type": "Opaque" + }, + "encryptedData": { + "rclone.conf": "AgCQ13IG+R6bs+nAxe1xuDXttYlvGlLfV3oQ6c0qtoF2jXB8hN3LftydHn+Se3LjghmQKAIErfsA7ZRhJoWfFuSm2AIc3w2mMonsga5gjBx/56/tZSvnT2Bzn/5UXktTVxwEINSBP0dYiMcn4/G+5hO3bngmG+lCZXeI7yWoTW8H+8NKYxDHUzdoBBhPPPLTERTRZHB8EzOPUlefHq/2y/NpUfkxyLSjYk0/X45W6XNzH6MfdA2x6omxd4giDQSEwJGdXqIXu1rPnPjV7WVcA8qJzkQbxhzjqpUcFgM12YsLGVVW8HSSdAy+ZNdTXmhCIu2+pI+AVuol4QY9r/gU3xlGhFmc3asW5k4iOfn7/ZEr3Yk8JplAYM+GWQ07s59MqYdGOhqFUpVmkjO97Z29iaeReQZCwxzl/PmxUtfI20eTmtUlFKE3fObMr27sZcXgeJS3ktHOONGoqvHHeuqd4hfTaVAGwVOAEoBY8Xnkq3ECN5ld8V4zR8e52QHtANflN4IJgjnGO5pMQyAW+XASAJDxG48q7ruu9i0mI4vuM0rVuoWi2v9I30/M7Mv2xAYnmKC7NIao1mDya3paidHwkIu12480oBDdHZpm5NSqHtQr/HKMQWnbu6CrufrDmTqoVe/ew5uaqjbfrBBys35k5ObUUPlhU3putgfmsR3YZXDaAqOwIoXQ30wm02gCA5z/WNEY3EaKP6RhgsowwkrPPniQfz4EaxQQjmZ/toe/xpwzSZjmoVnJtJabiuqL/B/eY6WpNOTjOzsc7Z69EOyhZMs41gNoA32RRUbFO1ppOu8518cE12KpsGbH6K6NcucSrKh2Gd3xNGwjaGQVT2vLTVi9YwByiwvrsVpNU06f2v0fcZWeRgoFoUkKMj746lw0E+X7oF0+PmfPT2IeTRszHECkbStSvFZNDivcdJyDFutocAZKNjDoAnVPlTNVYwKKcmHvw3sOOXhVN7NOj/+9UxSNyRvip7GPZKtRF1u9ftlD6OaLCCVSip7MJ41a7TugBTUUaMJbQUTmidWKZn6A0nctAvdrPbBatPI2BZQ4amwdXa2bWyE7DI13WaCm6kAVJijsAmfVrVX3C+Ft5p8unbjsVQ/ErdpKTjlq9mJsie3TQdME5r74GlcURiVXdLc7KcV7vpf6yy88XS6ee+Y9WmlYDAwRX+taMilRDlunMeF5Zmh12DCXMzsradEifEOZ/Mg5BMznxvrZv3iHDArm/j4QW7Bi0To3+f2826IAaXMlI4ze7e9Ny3NUbgy85yE+RNYiio0+wvWRKraxpqI0EODy/juBed3VcoWlOfch0hKU4BZTVrU5rDEmwYcp6oWnXE92fhVH7wjy4IV3WUSubYg=" + } + } +} diff --git a/infrastructure/backup/rclone-gcloud.deployment.yaml b/infrastructure/backup/rclone-gcloud.deployment.yaml new file mode 100644 index 0000000..6bab8e5 --- /dev/null +++ b/infrastructure/backup/rclone-gcloud.deployment.yaml @@ -0,0 +1,54 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: rclone-gcloud + +spec: + selector: + matchLabels: + app: rclone-gcloud + template: + metadata: + labels: + app: rclone-gcloud + spec: + containers: + - name: rclone + image: rclone/rclone:latest + command: ["/bin/sh", "-c"] + args: # mounted as a secret + # >- strips newlines + # sleep infinity + - >- + rclone + --config /config/rclone.conf + serve restic + --addr :8000 + -v + ETHZ-gdrive:backup + + volumeMounts: + # from secret + - name: rclone-config + mountPath: /config + readOnly: true + volumes: + - name: rclone-config + secret: + secretName: rclone-config-files + +--- +apiVersion: v1 +kind: Service +metadata: + name: rclone-gcloud + +spec: + selector: + app: rclone-gcloud + ports: + - protocol: TCP + port: 8000 + targetPort: 8000 + + diff --git a/infrastructure/backup/restic-rclone.env b/infrastructure/backup/restic-rclone.env new file mode 100644 index 0000000..730298b --- /dev/null +++ b/infrastructure/backup/restic-rclone.env @@ -0,0 +1,2 @@ +export RESTIC_REPOSITORY=rest:http://127.0.0.1:8000/kluster +export RESTIC_PASSWORD="2r,TE0.,U@gni3e%xr)_LC64" \ No newline at end of file diff --git a/infrastructure/external/omv-s3.ingress.yaml b/infrastructure/external/omv-s3.ingress.yaml new file mode 100644 index 0000000..f767c67 --- /dev/null +++ b/infrastructure/external/omv-s3.ingress.yaml @@ -0,0 +1,38 @@ +apiVersion: traefik.containo.us/v1alpha1 +kind: IngressRoute +metadata: + name: omv-s3-ingressroute + namespace: external +spec: + entryPoints: + - websecure + routes: + - match: Host(`s3.kluster.moll.re`) + kind: Rule + services: + - name: omv-s3 + port: 9000 + # scheme: https + tls: + certResolver: default-tls +--- +apiVersion: v1 +kind: Endpoints +metadata: + name: omv-s3 + namespace: external +subsets: + - addresses: + - ip: 192.168.1.157 + ports: + - port: 9000 +--- +apiVersion: v1 +kind: Service +metadata: + name: omv-s3 + namespace: external +spec: + ports: + - port: 9000 + targetPort: 9000 \ No newline at end of file diff --git a/infrastructure/external/openmediavault.ingress.yaml b/infrastructure/external/openmediavault.ingress.yaml new file mode 100644 index 0000000..1f83cfe --- /dev/null +++ b/infrastructure/external/openmediavault.ingress.yaml @@ -0,0 +1,38 @@ +apiVersion: traefik.containo.us/v1alpha1 +kind: IngressRoute +metadata: + name: omv-ingressroute + namespace: external +spec: + entryPoints: + - websecure + routes: + - match: Host(`omv.kluster.moll.re`) + kind: Rule + services: + - name: omv + port: 443 + scheme: https + tls: + certResolver: default-tls +--- +apiVersion: v1 +kind: Endpoints +metadata: + name: omv + namespace: external +subsets: + - addresses: + - ip: 192.168.1.157 + ports: + - port: 443 +--- +apiVersion: v1 +kind: Service +metadata: + name: omv + namespace: external +spec: + ports: + - port: 443 + targetPort: 443 \ No newline at end of file diff --git a/infrastructure/external/proxmox.ingress.yaml b/infrastructure/external/proxmox.ingress.yaml new file mode 100644 index 0000000..0c81199 --- /dev/null +++ b/infrastructure/external/proxmox.ingress.yaml @@ -0,0 +1,55 @@ +apiVersion: traefik.containo.us/v1alpha1 +kind: IngressRoute +metadata: + name: proxmox-ingressroute + namespace: external +spec: + entryPoints: + - websecure + routes: + - match: Host(`proxmox.kluster.moll.re`) + middlewares: + - name: proxmox-websocket + + kind: Rule + services: + - name: proxmox + port: 8006 + scheme: https + tls: + certResolver: default-tls +--- +apiVersion: traefik.containo.us/v1alpha1 +kind: Middleware +metadata: + name: proxmox-websocket + namespace: external +spec: + headers: + customRequestHeaders: + X-Forwarded-Proto: "https" + # enable websockets + Upgrade: "websocket" + + +--- +apiVersion: v1 +kind: Endpoints +metadata: + name: proxmox + namespace: external +subsets: + - addresses: + - ip: 192.168.1.150 + ports: + - port: 8006 +--- +apiVersion: v1 +kind: Service +metadata: + name: proxmox + namespace: external +spec: + ports: + - port: 8006 + targetPort: 8006 \ No newline at end of file diff --git a/infrastructure/metallb-system/config.values.yaml b/infrastructure/metallb-system/config.values.yaml new file mode 100644 index 0000000..fcd16bc --- /dev/null +++ b/infrastructure/metallb-system/config.values.yaml @@ -0,0 +1,2 @@ +name: metallb +chart: metallb/metallb \ No newline at end of file diff --git a/infrastructure/metallb-system/configmap.yaml b/infrastructure/metallb-system/configmap.yaml new file mode 100644 index 0000000..c6f22b0 --- /dev/null +++ b/infrastructure/metallb-system/configmap.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: config +spec: + secretTemplates: + - name: secret-1 + labels: + label1: value1 + annotations: + key1: value1 + stringData: + data-name0: data-value0 + data: + data-name1: ZGF0YS12YWx1ZTE= \ No newline at end of file diff --git a/infrastructure/metallb-system/ipaddresspool.yaml b/infrastructure/metallb-system/ipaddresspool.yaml new file mode 100644 index 0000000..2b0dd17 --- /dev/null +++ b/infrastructure/metallb-system/ipaddresspool.yaml @@ -0,0 +1,14 @@ +apiVersion: metallb.io/v1beta1 +kind: IPAddressPool +metadata: + name: default + namespace: metallb-system +spec: + addresses: + - 192.168.3.0/24 +--- +apiVersion: metallb.io/v1beta1 +kind: L2Advertisement +metadata: + name: empty + namespace: metallb-system \ No newline at end of file diff --git a/infrastructure/metallb-system/values.yaml b/infrastructure/metallb-system/values.yaml new file mode 100644 index 0000000..2110e28 --- /dev/null +++ b/infrastructure/metallb-system/values.yaml @@ -0,0 +1,337 @@ +# Default values for metallb. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +imagePullSecrets: [] +nameOverride: "" +fullnameOverride: "" +loadBalancerClass: "" + +# existingConfigMap: "config" + +rbac: + # create specifies whether to install and use RBAC rules. + create: true + +prometheus: + # scrape annotations specifies whether to add Prometheus metric + # auto-collection annotations to pods. See + # https://github.com/prometheus/prometheus/blob/release-2.1/documentation/examples/prometheus-kubernetes.yml + # for a corresponding Prometheus configuration. Alternatively, you + # may want to use the Prometheus Operator + # (https://github.com/coreos/prometheus-operator) for more powerful + # monitoring configuration. If you use the Prometheus operator, this + # can be left at false. + scrapeAnnotations: false + + # port both controller and speaker will listen on for metrics + metricsPort: 7472 + + # if set, enables rbac proxy on the controller and speaker to expose + # the metrics via tls. + # secureMetricsPort: 9120 + + # the name of the secret to be mounted in the speaker pod + # to expose the metrics securely. If not present, a self signed + # certificate to be used. + speakerMetricsTLSSecret: "" + + # the name of the secret to be mounted in the controller pod + # to expose the metrics securely. If not present, a self signed + # certificate to be used. + controllerMetricsTLSSecret: "" + + # prometheus doens't have the permission to scrape all namespaces so we give it permission to scrape metallb's one + rbacPrometheus: true + + # the service account used by prometheus + # required when " .Values.prometheus.rbacPrometheus == true " and " .Values.prometheus.podMonitor.enabled=true or prometheus.serviceMonitor.enabled=true " + serviceAccount: "" + + # the namespace where prometheus is deployed + # required when " .Values.prometheus.rbacPrometheus == true " and " .Values.prometheus.podMonitor.enabled=true or prometheus.serviceMonitor.enabled=true " + namespace: "" + + # the image to be used for the kuberbacproxy container + rbacProxy: + repository: gcr.io/kubebuilder/kube-rbac-proxy + tag: v0.12.0 + pullPolicy: + + # Prometheus Operator PodMonitors + podMonitor: + # enable support for Prometheus Operator + enabled: false + + # optional additionnal labels for podMonitors + additionalLabels: {} + + # optional annotations for podMonitors + annotations: {} + + # Job label for scrape target + jobLabel: "app.kubernetes.io/name" + + # Scrape interval. If not set, the Prometheus default scrape interval is used. + interval: + + # metric relabel configs to apply to samples before ingestion. + metricRelabelings: [] + # - action: keep + # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' + # sourceLabels: [__name__] + + # relabel configs to apply to samples before ingestion. + relabelings: [] + # - sourceLabels: [__meta_kubernetes_pod_node_name] + # separator: ; + # regex: ^(.*)$ + # target_label: nodename + # replacement: $1 + # action: replace + + # Prometheus Operator ServiceMonitors. To be used as an alternative + # to podMonitor, supports secure metrics. + serviceMonitor: + # enable support for Prometheus Operator + enabled: false + + speaker: + # optional additional labels for the speaker serviceMonitor + additionalLabels: {} + # optional additional annotations for the speaker serviceMonitor + annotations: {} + # optional tls configuration for the speaker serviceMonitor, in case + # secure metrics are enabled. + tlsConfig: + insecureSkipVerify: true + + controller: + # optional additional labels for the controller serviceMonitor + additionalLabels: {} + # optional additional annotations for the controller serviceMonitor + annotations: {} + # optional tls configuration for the controller serviceMonitor, in case + # secure metrics are enabled. + tlsConfig: + insecureSkipVerify: true + + # Job label for scrape target + jobLabel: "app.kubernetes.io/name" + + # Scrape interval. If not set, the Prometheus default scrape interval is used. + interval: + + # metric relabel configs to apply to samples before ingestion. + metricRelabelings: [] + # - action: keep + # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' + # sourceLabels: [__name__] + + # relabel configs to apply to samples before ingestion. + relabelings: [] + # - sourceLabels: [__meta_kubernetes_pod_node_name] + # separator: ; + # regex: ^(.*)$ + # target_label: nodename + # replacement: $1 + # action: replace + + # Prometheus Operator alertmanager alerts + prometheusRule: + # enable alertmanager alerts + enabled: false + + # optional additionnal labels for prometheusRules + additionalLabels: {} + + # optional annotations for prometheusRules + annotations: {} + + # MetalLBStaleConfig + staleConfig: + enabled: true + labels: + severity: warning + + # MetalLBConfigNotLoaded + configNotLoaded: + enabled: true + labels: + severity: warning + + # MetalLBAddressPoolExhausted + addressPoolExhausted: + enabled: true + labels: + severity: alert + + addressPoolUsage: + enabled: true + thresholds: + - percent: 75 + labels: + severity: warning + - percent: 85 + labels: + severity: warning + - percent: 95 + labels: + severity: alert + + # MetalLBBGPSessionDown + bgpSessionDown: + enabled: true + labels: + severity: alert + + extraAlerts: [] + +# controller contains configuration specific to the MetalLB cluster +# controller. +controller: + enabled: true + # -- Controller log level. Must be one of: `all`, `debug`, `info`, `warn`, `error` or `none` + logLevel: info + # command: /controller + # webhookMode: enabled + image: + repository: quay.io/metallb/controller + tag: + pullPolicy: + ## @param controller.updateStrategy.type Metallb controller deployment strategy type. + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy + ## e.g: + ## strategy: + ## type: RollingUpdate + ## rollingUpdate: + ## maxSurge: 25% + ## maxUnavailable: 25% + ## + strategy: + type: RollingUpdate + serviceAccount: + # Specifies whether a ServiceAccount should be created + create: true + # The name of the ServiceAccount to use. If not set and create is + # true, a name is generated using the fullname template + name: "" + annotations: {} + securityContext: + runAsNonRoot: true + # nobody + runAsUser: 65534 + fsGroup: 65534 + resources: {} + # limits: + # cpu: 100m + # memory: 100Mi + nodeSelector: {} + tolerations: [] + priorityClassName: "" + runtimeClassName: "" + affinity: {} + podAnnotations: {} + livenessProbe: + enabled: true + failureThreshold: 3 + initialDelaySeconds: 10 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + readinessProbe: + enabled: true + failureThreshold: 3 + initialDelaySeconds: 10 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + +# speaker contains configuration specific to the MetalLB speaker +# daemonset. +speaker: + enabled: true + # command: /speaker + # -- Speaker log level. Must be one of: `all`, `debug`, `info`, `warn`, `error` or `none` + logLevel: info + tolerateMaster: true + memberlist: + enabled: true + mlBindPort: 7946 + mlSecretKeyPath: "/etc/ml_secret_key" + image: + repository: quay.io/metallb/speaker + tag: + pullPolicy: + ## @param speaker.updateStrategy.type Speaker daemonset strategy type + ## ref: https://kubernetes.io/docs/tasks/manage-daemon/update-daemon-set/ + ## + updateStrategy: + ## StrategyType + ## Can be set to RollingUpdate or OnDelete + ## + type: RollingUpdate + serviceAccount: + # Specifies whether a ServiceAccount should be created + create: true + # The name of the ServiceAccount to use. If not set and create is + # true, a name is generated using the fullname template + name: "" + annotations: {} + ## Defines a secret name for the controller to generate a memberlist encryption secret + ## By default secretName: {{ "metallb.fullname" }}-memberlist + ## + # secretName: + resources: {} + # limits: + # cpu: 100m + # memory: 100Mi + nodeSelector: {} + tolerations: [] + priorityClassName: "" + affinity: {} + ## Selects which runtime class will be used by the pod. + runtimeClassName: "" + podAnnotations: {} + livenessProbe: + enabled: true + failureThreshold: 3 + initialDelaySeconds: 10 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + readinessProbe: + enabled: true + failureThreshold: 3 + initialDelaySeconds: 10 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + startupProbe: + enabled: true + failureThreshold: 30 + periodSeconds: 5 + # frr contains configuration specific to the MetalLB FRR container, + # for speaker running alongside FRR. + frr: + enabled: false + image: + repository: quay.io/frrouting/frr + tag: 7.5.1 + pullPolicy: + metricsPort: 7473 + resources: {} + + # if set, enables a rbac proxy sidecar container on the speaker to + # expose the frr metrics via tls. + # secureMetricsPort: 9121 + + reloader: + resources: {} + + frrMetrics: + resources: {} + +crds: + enabled: true + validationFailurePolicy: Fail diff --git a/infrastructure/nfs/USAGE.md b/infrastructure/nfs/USAGE.md new file mode 100644 index 0000000..0a7eb4a --- /dev/null +++ b/infrastructure/nfs/USAGE.md @@ -0,0 +1,13 @@ +``` +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: test-claim +spec: + storageClassName: nfs-client + accessModes: + - ReadWriteMany + resources: + requests: + storage: 1Mi +``` \ No newline at end of file diff --git a/infrastructure/nfs/kustomization.yaml b/infrastructure/nfs/kustomization.yaml new file mode 100644 index 0000000..29f190b --- /dev/null +++ b/infrastructure/nfs/kustomization.yaml @@ -0,0 +1,9 @@ +namespace: nfs-provisioner +bases: + - github.com/kubernetes-sigs/nfs-subdir-external-provisioner//deploy + + +resources: + - namespace.yaml +patchesStrategicMerge: + - nfs_values.yaml \ No newline at end of file diff --git a/infrastructure/nfs/namespace.yaml b/infrastructure/nfs/namespace.yaml new file mode 100644 index 0000000..ff60d7d --- /dev/null +++ b/infrastructure/nfs/namespace.yaml @@ -0,0 +1,5 @@ +# namespace.yaml +apiVersion: v1 +kind: Namespace +metadata: + name: nfs-provisioner \ No newline at end of file diff --git a/infrastructure/nfs/nfs_values.yaml b/infrastructure/nfs/nfs_values.yaml new file mode 100644 index 0000000..95dfd9c --- /dev/null +++ b/infrastructure/nfs/nfs_values.yaml @@ -0,0 +1,21 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: nfs-client-provisioner + name: nfs-client-provisioner +spec: + template: + spec: + containers: + - name: nfs-client-provisioner + env: + - name: NFS_SERVER + value: 192.168.1.157 + - name: NFS_PATH + value: /export/kluster/ + volumes: + - name: nfs-client-root + nfs: + server: 192.168.1.157 + path: /export/kluster/ \ No newline at end of file diff --git a/infrastructure/postgres/adding.md b/infrastructure/postgres/adding.md new file mode 100644 index 0000000..0f50e05 --- /dev/null +++ b/infrastructure/postgres/adding.md @@ -0,0 +1,13 @@ +Create a new role by executing the createuser command. With the options below, the new role will not be a superuser and will not have privileges for creating new databases or new roles (this is usually the default for the createuser command). + +k exec -it -n postgres postgres-postgresql-0 -- bash + +``` +createuser -U postgres USER_NAME -S -D -R -P +``` +You will be prompted to enter first the password for the new role and to reenter it, and then to enter the postgres role password. + +Create a new database with the new role as the owner: +``` +createdb -U postgres DATABASE_NAME -O USER_NAME +``` \ No newline at end of file diff --git a/infrastructure/postgres/config.values.yaml b/infrastructure/postgres/config.values.yaml new file mode 100644 index 0000000..cc47103 --- /dev/null +++ b/infrastructure/postgres/config.values.yaml @@ -0,0 +1,2 @@ +name: postgres +chart: bitnami/postgresql \ No newline at end of file diff --git a/infrastructure/postgres/postgres-password.sealedsecret.yaml b/infrastructure/postgres/postgres-password.sealedsecret.yaml new file mode 100644 index 0000000..a5f6fc8 --- /dev/null +++ b/infrastructure/postgres/postgres-password.sealedsecret.yaml @@ -0,0 +1,21 @@ +{ + "kind": "SealedSecret", + "apiVersion": "bitnami.com/v1alpha1", + "metadata": { + "name": "postgres-password", + "namespace": "postgres", + "creationTimestamp": null + }, + "spec": { + "template": { + "metadata": { + "name": "postgres-password", + "namespace": "postgres", + "creationTimestamp": null + } + }, + "encryptedData": { + "password": "AgCVytxZbe1yjT7OQuA7LocPTgn6Ikx9pDJAA49Ktboy86dJWlxnBke23O0qn3ELFTGUTDaMhBcJB0neqA0RjTTW3o7PsvbxEBvrP5F1EK4jN2vHti8Jgt/CUbOlJVfFuGPaL2DG9M7vafUnL3AQvZv/YkL79Q32Wcg9nPq+4iT7fTGQzUu22G6bmKJv/SnByAnBIzZRsL3R3pP4J7suG+5+K6PDlNRbIb0mIoy1vjBz5PKQAR2Hrh1+kLFIJEIwDuinSDHRDUoa9fChC52x/Oc4PavFw8RWTXjot5cnEOkUK3umSx0jnD247nPc8sRW87hmHE3O/T+doDqEetQxtarSNPxCZXwkVJCIAxg48M29mdkPiOUu2Rr9W9w+HnN8j7mA2rHYAxxi3KPeDBL7kaFH+Xtyv+MT6upRr9BHfSbA/gMPjT37dJmbEYJAvEEyZZJK6TpXUkLh3jnhg1P180t8AnJVX4KQhjUm+UmgUCytxEjp082vxoKEHop6I7f4qzUYfudaG825i0zL11yjSvUbQbdoe8j3C5pNs5OgNBboGqYGfreCcp76zKdNrNI6GYhtj04AuOQZP5SD9/bqsP4JW4yFYsWsq3XuqIxE/2ExCRvDOFu2H1rnPnkcvUYr30doYPIugP40l7AY18YucUsbH19ww7jM1TOejo5QS5wb39uygwf4j0+XjbD3iV12AQzaEnk/pfo=" + } + } +} diff --git a/infrastructure/postgres/pvc.yaml b/infrastructure/postgres/pvc.yaml new file mode 100644 index 0000000..b04cd07 --- /dev/null +++ b/infrastructure/postgres/pvc.yaml @@ -0,0 +1,37 @@ +--- +apiVersion: v1 +kind: PersistentVolume +metadata: + namespace: postgres + name: postgres-nfs + labels: + directory: postgres +spec: + storageClassName: fast + capacity: + storage: "50Gi" + volumeMode: Filesystem + accessModes: + - ReadWriteMany + nfs: + path: /export/kluster/postgres + server: 192.168.1.157 +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + namespace: postgres + name: postgres-nfs +spec: + storageClassName: fast + accessModes: + - ReadWriteMany + resources: + requests: + storage: "50Gi" + selector: + matchLabels: + directory: postgres + + + diff --git a/infrastructure/postgres/values.yaml b/infrastructure/postgres/values.yaml new file mode 100644 index 0000000..db74b98 --- /dev/null +++ b/infrastructure/postgres/values.yaml @@ -0,0 +1,1037 @@ +## @section Global parameters +## Please, note that this will override the parameters, including dependencies, configured to use the global value +## +global: + storageClass: "" + postgresql: + ## @param global.postgresql.auth.postgresPassword Password for the "postgres" admin user (overrides `auth.postgresPassword`) + ## @param global.postgresql.auth.username Name for a custom user to create (overrides `auth.username`) + ## @param global.postgresql.auth.password Password for the custom user to create (overrides `auth.password`) + ## @param global.postgresql.auth.database Name for a custom database to create (overrides `auth.database`) + ## @param global.postgresql.auth.existingSecret Name of existing secret to use for PostgreSQL credentials (overrides `auth.existingSecret`). + ## @param global.postgresql.auth.secretKeys.adminPasswordKey Name of key in existing secret to use for PostgreSQL credentials (overrides `auth.secretKeys.adminPasswordKey`). Only used when `global.postgresql.auth.existingSecret` is set. + ## @param global.postgresql.auth.secretKeys.userPasswordKey Name of key in existing secret to use for PostgreSQL credentials (overrides `auth.secretKeys.userPasswordKey`). Only used when `global.postgresql.auth.existingSecret` is set. + ## @param global.postgresql.auth.secretKeys.replicationPasswordKey Name of key in existing secret to use for PostgreSQL credentials (overrides `auth.secretKeys.replicationPasswordKey`). Only used when `global.postgresql.auth.existingSecret` is set. + ## + auth: + postgresPassword: "" + username: "" + password: "" + database: "" + existingSecret: "" + secretKeys: + adminPasswordKey: "" + userPasswordKey: "" + replicationPasswordKey: "" + ## @param global.postgresql.service.ports.postgresql PostgreSQL service port (overrides `service.ports.postgresql`) + ## + service: + ports: + postgresql: "" + +## @section Common parameters +## + +## @param kubeVersion Override Kubernetes version +## +kubeVersion: "" +## @param nameOverride String to partially override common.names.fullname template (will maintain the release name) +## +nameOverride: "" +## @param fullnameOverride String to fully override common.names.fullname template +## +fullnameOverride: "" +## @param clusterDomain Kubernetes Cluster Domain +## +clusterDomain: cluster.local +## @param extraDeploy Array of extra objects to deploy with the release (evaluated as a template) +## +extraDeploy: [] +## @param commonLabels Add labels to all the deployed resources +## +commonLabels: {} +## @param commonAnnotations Add annotations to all the deployed resources +## +commonAnnotations: {} +## Enable diagnostic mode in the statefulset +## +diagnosticMode: + ## @param diagnosticMode.enabled Enable diagnostic mode (all probes will be disabled and the command will be overridden) + ## + enabled: false + ## @param diagnosticMode.command Command to override all containers in the statefulset + ## + command: + - sleep + ## @param diagnosticMode.args Args to override all containers in the statefulset + ## + args: + - infinity + +## @section PostgreSQL common parameters +## + +## Bitnami PostgreSQL image version +## ref: https://hub.docker.com/r/bitnami/postgresql/tags/ +## @param image.registry PostgreSQL image registry +## @param image.repository PostgreSQL image repository +## @param image.tag PostgreSQL image tag (immutable tags are recommended) +## @param image.digest PostgreSQL image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag +## @param image.pullPolicy PostgreSQL image pull policy +## @param image.pullSecrets Specify image pull secrets +## @param image.debug Specify if debug values should be set +## +image: + registry: docker.io + repository: bitnami/postgresql + tag: 15.2.0-debian-11-r16 + digest: "" + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: https://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + debug: false +## Authentication parameters +## ref: https://github.com/bitnami/containers/tree/main/bitnami/postgresql#setting-the-root-password-on-first-run +## ref: https://github.com/bitnami/containers/tree/main/bitnami/postgresql#creating-a-database-on-first-run +## ref: https://github.com/bitnami/containers/tree/main/bitnami/postgresql#creating-a-database-user-on-first-run +## +auth: + ## @param auth.enablePostgresUser Assign a password to the "postgres" admin user. Otherwise, remote access will be blocked for this user + ## + enablePostgresUser: true + ## @param auth.postgresPassword Password for the "postgres" admin user. Ignored if `auth.existingSecret` is provided + ## + postgresPassword: "password" + ## @param auth.username Name for a custom user to create + ## + username: "" + ## @param auth.password Password for the custom user to create. Ignored if `auth.existingSecret` is provided + ## + password: "" + ## @param auth.database Name for a custom database to create + ## + database: "" + ## @param auth.replicationUsername Name of the replication user + ## + replicationUsername: repl_user + ## @param auth.replicationPassword Password for the replication user. Ignored if `auth.existingSecret` is provided + ## + replicationPassword: "" + ## @param auth.existingSecret Name of existing secret to use for PostgreSQL credentials. `auth.postgresPassword`, `auth.password`, and `auth.replicationPassword` will be ignored and picked up from this secret. The secret might also contains the key `ldap-password` if LDAP is enabled. `ldap.bind_password` will be ignored and picked from this secret in this case. + ## + existingSecret: "" + ## @param auth.secretKeys.adminPasswordKey Name of key in existing secret to use for PostgreSQL credentials. Only used when `auth.existingSecret` is set. + ## @param auth.secretKeys.userPasswordKey Name of key in existing secret to use for PostgreSQL credentials. Only used when `auth.existingSecret` is set. + ## @param auth.secretKeys.replicationPasswordKey Name of key in existing secret to use for PostgreSQL credentials. Only used when `auth.existingSecret` is set. + ## + secretKeys: + adminPasswordKey: postgres-password + userPasswordKey: password + replicationPasswordKey: replication-password + ## @param auth.usePasswordFiles Mount credentials as a files instead of using an environment variable + ## + usePasswordFiles: false +## @param architecture PostgreSQL architecture (`standalone` or `replication`) +## +architecture: standalone +## Replication configuration +## Ignored if `architecture` is `standalone` +## +replication: + ## @param replication.synchronousCommit Set synchronous commit mode. Allowed values: `on`, `remote_apply`, `remote_write`, `local` and `off` + ## @param replication.numSynchronousReplicas Number of replicas that will have synchronous replication. Note: Cannot be greater than `readReplicas.replicaCount`. + ## ref: https://www.postgresql.org/docs/current/runtime-config-wal.html#GUC-SYNCHRONOUS-COMMIT + ## + synchronousCommit: "off" + numSynchronousReplicas: 0 + ## @param replication.applicationName Cluster application name. Useful for advanced replication settings + ## + applicationName: my_application +## @param containerPorts.postgresql PostgreSQL container port +## +containerPorts: + postgresql: 5432 +## Audit settings +## https://github.com/bitnami/containers/tree/main/bitnami/postgresql#auditing +## @param audit.logHostname Log client hostnames +## @param audit.logConnections Add client log-in operations to the log file +## @param audit.logDisconnections Add client log-outs operations to the log file +## @param audit.pgAuditLog Add operations to log using the pgAudit extension +## @param audit.pgAuditLogCatalog Log catalog using pgAudit +## @param audit.clientMinMessages Message log level to share with the user +## @param audit.logLinePrefix Template for log line prefix (default if not set) +## @param audit.logTimezone Timezone for the log timestamps +## +audit: + logHostname: false + logConnections: false + logDisconnections: false + pgAuditLog: "" + pgAuditLogCatalog: "off" + clientMinMessages: error + logLinePrefix: "" + logTimezone: "" +## LDAP configuration +## @param ldap.enabled Enable LDAP support +## DEPRECATED ldap.url It will removed in a future, please use 'ldap.uri' instead +## @param ldap.server IP address or name of the LDAP server. +## @param ldap.port Port number on the LDAP server to connect to +## @param ldap.prefix String to prepend to the user name when forming the DN to bind +## @param ldap.suffix String to append to the user name when forming the DN to bind +## DEPRECATED ldap.baseDN It will removed in a future, please use 'ldap.basedn' instead +## DEPRECATED ldap.bindDN It will removed in a future, please use 'ldap.binddn' instead +## DEPRECATED ldap.bind_password It will removed in a future, please use 'ldap.bindpw' instead +## @param ldap.basedn Root DN to begin the search for the user in +## @param ldap.binddn DN of user to bind to LDAP +## @param ldap.bindpw Password for the user to bind to LDAP +## DEPRECATED ldap.search_attr It will removed in a future, please use 'ldap.searchAttribute' instead +## DEPRECATED ldap.search_filter It will removed in a future, please use 'ldap.searchFilter' instead +## @param ldap.searchAttribute Attribute to match against the user name in the search +## @param ldap.searchFilter The search filter to use when doing search+bind authentication +## @param ldap.scheme Set to `ldaps` to use LDAPS +## DEPRECATED ldap.tls as string is deprecated,please use 'ldap.tls.enabled' instead +## @param ldap.tls.enabled Se to true to enable TLS encryption +## +ldap: + enabled: false + +## @param postgresqlDataDir PostgreSQL data dir folder +## +postgresqlDataDir: /bitnami/postgresql/data +## @param postgresqlSharedPreloadLibraries Shared preload libraries (comma-separated list) +## +postgresqlSharedPreloadLibraries: "pgaudit" +## Start PostgreSQL pod(s) without limitations on shm memory. +## By default docker and containerd (and possibly other container runtimes) limit `/dev/shm` to `64M` +## ref: https://github.com/docker-library/postgres/issues/416 +## ref: https://github.com/containerd/containerd/issues/3654 +## +shmVolume: + ## @param shmVolume.enabled Enable emptyDir volume for /dev/shm for PostgreSQL pod(s) + ## + enabled: true + ## @param shmVolume.sizeLimit Set this to enable a size limit on the shm tmpfs + ## Note: the size of the tmpfs counts against container's memory limit + ## e.g: + ## sizeLimit: 1Gi + ## + sizeLimit: "" +## TLS configuration +## +tls: + ## @param tls.enabled Enable TLS traffic support + ## + enabled: true + ## @param tls.autoGenerated Generate automatically self-signed TLS certificates + ## + autoGenerated: true + ## @param tls.preferServerCiphers Whether to use the server's TLS cipher preferences rather than the client's + ## + preferServerCiphers: true + ## @param tls.certificatesSecret Name of an existing secret that contains the certificates + ## + certificatesSecret: "" + ## @param tls.certFilename Certificate filename + ## + certFilename: "" + ## @param tls.certKeyFilename Certificate key filename + ## + certKeyFilename: "" + ## @param tls.certCAFilename CA Certificate filename + ## If provided, PostgreSQL will authenticate TLS/SSL clients by requesting them a certificate + ## ref: https://www.postgresql.org/docs/9.6/auth-methods.html + ## + certCAFilename: "" + ## @param tls.crlFilename File containing a Certificate Revocation List + ## + crlFilename: "" + +## @section PostgreSQL Primary parameters +## +primary: + ## @param primary.name Name of the primary database (eg primary, master, leader, ...) + ## + name: primary + ## @param primary.configuration PostgreSQL Primary main configuration to be injected as ConfigMap + ## ref: https://www.postgresql.org/docs/current/static/runtime-config.html + ## + configuration: "" + ## @param primary.pgHbaConfiguration PostgreSQL Primary client authentication configuration + ## ref: https://www.postgresql.org/docs/current/static/auth-pg-hba-conf.html + ## e.g:# + ## pgHbaConfiguration: |- + ## local all all trust + ## host all all localhost trust + ## host mydatabase mysuser 192.168.0.0/24 md5 + ## + pgHbaConfiguration: "" + ## @param primary.existingConfigmap Name of an existing ConfigMap with PostgreSQL Primary configuration + ## NOTE: `primary.configuration` and `primary.pgHbaConfiguration` will be ignored + ## + existingConfigmap: "" + ## @param primary.extendedConfiguration Extended PostgreSQL Primary configuration (appended to main or default configuration) + ## ref: https://github.com/bitnami/containers/tree/main/bitnami/postgresql#allow-settings-to-be-loaded-from-files-other-than-the-default-postgresqlconf + ## + extendedConfiguration: "" + ## @param primary.existingExtendedConfigmap Name of an existing ConfigMap with PostgreSQL Primary extended configuration + ## NOTE: `primary.extendedConfiguration` will be ignored + ## + existingExtendedConfigmap: "" + ## Initdb configuration + ## ref: https://github.com/bitnami/containers/tree/main/bitnami/postgresql#specifying-initdb-arguments + ## + initdb: + ## @param primary.initdb.args PostgreSQL initdb extra arguments + ## + args: "" + ## @param primary.initdb.postgresqlWalDir Specify a custom location for the PostgreSQL transaction log + ## + postgresqlWalDir: "" + ## @param primary.initdb.scripts Dictionary of initdb scripts + ## Specify dictionary of scripts to be run at first boot + ## e.g: + ## scripts: + ## my_init_script.sh: | + ## #!/bin/sh + ## echo "Do something." + ## + scripts: {} + ## @param primary.initdb.scriptsConfigMap ConfigMap with scripts to be run at first boot + ## NOTE: This will override `primary.initdb.scripts` + ## + scriptsConfigMap: "" + ## @param primary.initdb.scriptsSecret Secret with scripts to be run at first boot (in case it contains sensitive information) + ## NOTE: This can work along `primary.initdb.scripts` or `primary.initdb.scriptsConfigMap` + ## + scriptsSecret: "" + ## @param primary.initdb.user Specify the PostgreSQL username to execute the initdb scripts + ## + user: "" + ## @param primary.initdb.password Specify the PostgreSQL password to execute the initdb scripts + ## + password: "" + ## Configure current cluster's primary server to be the standby server in other cluster. + ## This will allow cross cluster replication and provide cross cluster high availability. + ## You will need to configure pgHbaConfiguration if you want to enable this feature with local cluster replication enabled. + ## @param primary.standby.enabled Whether to enable current cluster's primary as standby server of another cluster or not + ## @param primary.standby.primaryHost The Host of replication primary in the other cluster + ## @param primary.standby.primaryPort The Port of replication primary in the other cluster + ## + standby: + enabled: false + primaryHost: "" + primaryPort: "" + ## @param primary.extraEnvVars Array with extra environment variables to add to PostgreSQL Primary nodes + ## e.g: + ## extraEnvVars: + ## - name: FOO + ## value: "bar" + ## + extraEnvVars: [] + ## @param primary.extraEnvVarsCM Name of existing ConfigMap containing extra env vars for PostgreSQL Primary nodes + ## + extraEnvVarsCM: "" + ## @param primary.extraEnvVarsSecret Name of existing Secret containing extra env vars for PostgreSQL Primary nodes + ## + extraEnvVarsSecret: "" + ## @param primary.command Override default container command (useful when using custom images) + ## + command: [] + ## @param primary.args Override default container args (useful when using custom images) + ## + args: [] + ## Configure extra options for PostgreSQL Primary containers' liveness, readiness and startup probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes + ## @param primary.livenessProbe.enabled Enable livenessProbe on PostgreSQL Primary containers + ## @param primary.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe + ## @param primary.livenessProbe.periodSeconds Period seconds for livenessProbe + ## @param primary.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe + ## @param primary.livenessProbe.failureThreshold Failure threshold for livenessProbe + ## @param primary.livenessProbe.successThreshold Success threshold for livenessProbe + ## + livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + ## @param primary.readinessProbe.enabled Enable readinessProbe on PostgreSQL Primary containers + ## @param primary.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe + ## @param primary.readinessProbe.periodSeconds Period seconds for readinessProbe + ## @param primary.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe + ## @param primary.readinessProbe.failureThreshold Failure threshold for readinessProbe + ## @param primary.readinessProbe.successThreshold Success threshold for readinessProbe + ## + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + ## @param primary.startupProbe.enabled Enable startupProbe on PostgreSQL Primary containers + ## @param primary.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe + ## @param primary.startupProbe.periodSeconds Period seconds for startupProbe + ## @param primary.startupProbe.timeoutSeconds Timeout seconds for startupProbe + ## @param primary.startupProbe.failureThreshold Failure threshold for startupProbe + ## @param primary.startupProbe.successThreshold Success threshold for startupProbe + ## + startupProbe: + enabled: false + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 1 + failureThreshold: 15 + successThreshold: 1 + ## @param primary.customLivenessProbe Custom livenessProbe that overrides the default one + ## + customLivenessProbe: {} + ## @param primary.customReadinessProbe Custom readinessProbe that overrides the default one + ## + customReadinessProbe: {} + ## @param primary.customStartupProbe Custom startupProbe that overrides the default one + ## + customStartupProbe: {} + ## @param primary.lifecycleHooks for the PostgreSQL Primary container to automate configuration before or after startup + ## + lifecycleHooks: {} + ## PostgreSQL Primary resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## @param primary.resources.limits The resources limits for the PostgreSQL Primary containers + ## @param primary.resources.requests.memory The requested memory for the PostgreSQL Primary containers + ## @param primary.resources.requests.cpu The requested cpu for the PostgreSQL Primary containers + ## + resources: + limits: {} + requests: + memory: 256Mi + cpu: 250m + ## Pod Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + ## @param primary.podSecurityContext.enabled Enable security context + ## @param primary.podSecurityContext.fsGroup Group ID for the pod + ## + podSecurityContext: + enabled: true + fsGroup: 1001 + ## Container Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + ## @param primary.containerSecurityContext.enabled Enable container security context + ## @param primary.containerSecurityContext.runAsUser User ID for the container + ## + containerSecurityContext: + enabled: true + runAsUser: 1001 + ## @param primary.hostAliases PostgreSQL primary pods host aliases + ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ + ## + hostAliases: [] + ## @param primary.hostNetwork Specify if host network should be enabled for PostgreSQL pod (postgresql primary) + ## + hostNetwork: false + ## @param primary.hostIPC Specify if host IPC should be enabled for PostgreSQL pod (postgresql primary) + ## + hostIPC: false + ## @param primary.labels Map of labels to add to the statefulset (postgresql primary) + ## + labels: {} + ## @param primary.annotations Annotations for PostgreSQL primary pods + ## + annotations: {} + ## @param primary.podLabels Map of labels to add to the pods (postgresql primary) + ## + podLabels: {} + ## @param primary.podAnnotations Map of annotations to add to the pods (postgresql primary) + ## + podAnnotations: {} + ## @param primary.podAffinityPreset PostgreSQL primary pod affinity preset. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAffinityPreset: "" + ## @param primary.podAntiAffinityPreset PostgreSQL primary pod anti-affinity preset. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAntiAffinityPreset: soft + ## PostgreSQL Primary node affinity preset + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity + ## + nodeAffinityPreset: + ## @param primary.nodeAffinityPreset.type PostgreSQL primary node affinity preset type. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard` + ## + type: "" + ## @param primary.nodeAffinityPreset.key PostgreSQL primary node label key to match Ignored if `primary.affinity` is set. + ## E.g. + ## key: "kubernetes.io/e2e-az-name" + ## + key: "" + ## @param primary.nodeAffinityPreset.values PostgreSQL primary node label values to match. Ignored if `primary.affinity` is set. + ## E.g. + ## values: + ## - e2e-az1 + ## - e2e-az2 + ## + values: [] + ## @param primary.affinity Affinity for PostgreSQL primary pods assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## Note: primary.podAffinityPreset, primary.podAntiAffinityPreset, and primary.nodeAffinityPreset will be ignored when it's set + ## + affinity: {} + ## @param primary.nodeSelector Node labels for PostgreSQL primary pods assignment + ## ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + ## @param primary.tolerations Tolerations for PostgreSQL primary pods assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + ## @param primary.topologySpreadConstraints Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template + ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/#spread-constraints-for-pods + ## + topologySpreadConstraints: [] + ## @param primary.priorityClassName Priority Class to use for each pod (postgresql primary) + ## + priorityClassName: "" + ## @param primary.schedulerName Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + schedulerName: "" + ## @param primary.terminationGracePeriodSeconds Seconds PostgreSQL primary pod needs to terminate gracefully + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods + ## + terminationGracePeriodSeconds: "" + ## @param primary.updateStrategy.type PostgreSQL Primary statefulset strategy type + ## @param primary.updateStrategy.rollingUpdate PostgreSQL Primary statefulset rolling update configuration parameters + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies + ## + updateStrategy: + type: RollingUpdate + rollingUpdate: {} + ## @param primary.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the PostgreSQL Primary container(s) + ## + extraVolumeMounts: [] + ## @param primary.extraVolumes Optionally specify extra list of additional volumes for the PostgreSQL Primary pod(s) + ## + extraVolumes: [] + ## @param primary.sidecars Add additional sidecar containers to the PostgreSQL Primary pod(s) + ## For example: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + ## + sidecars: [] + ## @param primary.initContainers Add additional init containers to the PostgreSQL Primary pod(s) + ## Example + ## + ## initContainers: + ## - name: do-something + ## image: busybox + ## command: ['do', 'something'] + ## + initContainers: [] + ## @param primary.extraPodSpec Optionally specify extra PodSpec for the PostgreSQL Primary pod(s) + ## + extraPodSpec: {} + ## PostgreSQL Primary service configuration + ## + service: + ## @param primary.service.type Kubernetes Service type + ## + type: ClusterIP + ## @param primary.service.ports.postgresql PostgreSQL service port + ## + ports: + postgresql: 5432 + ## Node ports to expose + ## NOTE: choose port between <30000-32767> + ## @param primary.service.nodePorts.postgresql Node port for PostgreSQL + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + nodePorts: + postgresql: "" + ## @param primary.service.clusterIP Static clusterIP or None for headless services + ## e.g: + ## clusterIP: None + ## + clusterIP: "" + ## @param primary.service.annotations Annotations for PostgreSQL primary service + ## + annotations: {} + ## @param primary.service.loadBalancerIP Load balancer IP if service type is `LoadBalancer` + ## Set the LoadBalancer service type to internal only + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + loadBalancerIP: "" + ## @param primary.service.externalTrafficPolicy Enable client source IP preservation + ## ref https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + ## + externalTrafficPolicy: Cluster + ## @param primary.service.loadBalancerSourceRanges Addresses that are allowed when service is LoadBalancer + ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## + ## loadBalancerSourceRanges: + ## - 10.10.10.0/24 + ## + loadBalancerSourceRanges: [] + ## @param primary.service.extraPorts Extra ports to expose in the PostgreSQL primary service + ## + extraPorts: [] + ## @param primary.service.sessionAffinity Session Affinity for Kubernetes service, can be "None" or "ClientIP" + ## If "ClientIP", consecutive client requests will be directed to the same Pod + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + ## + sessionAffinity: None + ## @param primary.service.sessionAffinityConfig Additional settings for the sessionAffinity + ## sessionAffinityConfig: + ## clientIP: + ## timeoutSeconds: 300 + ## + sessionAffinityConfig: {} + ## Headless service properties + ## + headless: + ## @param primary.service.headless.annotations Additional custom annotations for headless PostgreSQL primary service + ## + annotations: {} + ## PostgreSQL Primary persistence configuration + ## + persistence: + ## @param primary.persistence.enabled Enable PostgreSQL Primary data persistence using PVC + ## + enabled: true + ## @param primary.persistence.existingClaim Name of an existing PVC to use + ## + existingClaim: "postgres-nfs" + ## @param primary.persistence.mountPath The path the volume will be mounted at + ## Note: useful when using custom PostgreSQL images + ## + mountPath: /bitnami/postgresql + ## @param primary.persistence.subPath The subdirectory of the volume to mount to + ## Useful in dev environments and one PV for multiple services + ## + +## @section PostgreSQL read only replica parameters (only used when `architecture` is set to `replication`) +## +readReplicas: + ## @param readReplicas.name Name of the read replicas database (eg secondary, slave, ...) + ## + name: read + ## @param readReplicas.replicaCount Number of PostgreSQL read only replicas + ## + replicaCount: 1 + ## @param readReplicas.extendedConfiguration Extended PostgreSQL read only replicas configuration (appended to main or default configuration) + ## ref: https://github.com/bitnami/containers/tree/main/bitnami/postgresql#allow-settings-to-be-loaded-from-files-other-than-the-default-postgresqlconf + ## + extendedConfiguration: "" + ## @param readReplicas.extraEnvVars Array with extra environment variables to add to PostgreSQL read only nodes + ## e.g: + ## extraEnvVars: + ## - name: FOO + ## value: "bar" + ## + extraEnvVars: [] + ## @param readReplicas.extraEnvVarsCM Name of existing ConfigMap containing extra env vars for PostgreSQL read only nodes + ## + extraEnvVarsCM: "" + ## @param readReplicas.extraEnvVarsSecret Name of existing Secret containing extra env vars for PostgreSQL read only nodes + ## + extraEnvVarsSecret: "" + ## @param readReplicas.command Override default container command (useful when using custom images) + ## + command: [] + ## @param readReplicas.args Override default container args (useful when using custom images) + ## + args: [] + ## Configure extra options for PostgreSQL read only containers' liveness, readiness and startup probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes + ## @param readReplicas.livenessProbe.enabled Enable livenessProbe on PostgreSQL read only containers + ## @param readReplicas.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe + ## @param readReplicas.livenessProbe.periodSeconds Period seconds for livenessProbe + ## @param readReplicas.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe + ## @param readReplicas.livenessProbe.failureThreshold Failure threshold for livenessProbe + ## @param readReplicas.livenessProbe.successThreshold Success threshold for livenessProbe + ## + livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + ## @param readReplicas.readinessProbe.enabled Enable readinessProbe on PostgreSQL read only containers + ## @param readReplicas.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe + ## @param readReplicas.readinessProbe.periodSeconds Period seconds for readinessProbe + ## @param readReplicas.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe + ## @param readReplicas.readinessProbe.failureThreshold Failure threshold for readinessProbe + ## @param readReplicas.readinessProbe.successThreshold Success threshold for readinessProbe + ## + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + ## @param readReplicas.startupProbe.enabled Enable startupProbe on PostgreSQL read only containers + ## @param readReplicas.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe + ## @param readReplicas.startupProbe.periodSeconds Period seconds for startupProbe + ## @param readReplicas.startupProbe.timeoutSeconds Timeout seconds for startupProbe + ## @param readReplicas.startupProbe.failureThreshold Failure threshold for startupProbe + ## @param readReplicas.startupProbe.successThreshold Success threshold for startupProbe + ## + startupProbe: + enabled: false + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 1 + failureThreshold: 15 + successThreshold: 1 + ## @param readReplicas.customLivenessProbe Custom livenessProbe that overrides the default one + ## + customLivenessProbe: {} + ## @param readReplicas.customReadinessProbe Custom readinessProbe that overrides the default one + ## + customReadinessProbe: {} + ## @param readReplicas.customStartupProbe Custom startupProbe that overrides the default one + ## + customStartupProbe: {} + ## @param readReplicas.lifecycleHooks for the PostgreSQL read only container to automate configuration before or after startup + ## + lifecycleHooks: {} + ## PostgreSQL read only resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## @param readReplicas.resources.limits The resources limits for the PostgreSQL read only containers + ## @param readReplicas.resources.requests.memory The requested memory for the PostgreSQL read only containers + ## @param readReplicas.resources.requests.cpu The requested cpu for the PostgreSQL read only containers + ## + resources: + limits: {} + requests: + memory: 256Mi + cpu: 250m + ## Pod Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + ## @param readReplicas.podSecurityContext.enabled Enable security context + ## @param readReplicas.podSecurityContext.fsGroup Group ID for the pod + ## + podSecurityContext: + enabled: true + fsGroup: 1001 + ## Container Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + ## @param readReplicas.containerSecurityContext.enabled Enable container security context + ## @param readReplicas.containerSecurityContext.runAsUser User ID for the container + ## + containerSecurityContext: + enabled: true + runAsUser: 1001 + ## @param readReplicas.hostAliases PostgreSQL read only pods host aliases + ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ + ## + hostAliases: [] + ## @param readReplicas.hostNetwork Specify if host network should be enabled for PostgreSQL pod (PostgreSQL read only) + ## + hostNetwork: false + ## @param readReplicas.hostIPC Specify if host IPC should be enabled for PostgreSQL pod (postgresql primary) + ## + hostIPC: false + ## @param readReplicas.labels Map of labels to add to the statefulset (PostgreSQL read only) + ## + labels: {} + ## @param readReplicas.annotations Annotations for PostgreSQL read only pods + ## + annotations: {} + ## @param readReplicas.podLabels Map of labels to add to the pods (PostgreSQL read only) + ## + podLabels: {} + ## @param readReplicas.podAnnotations Map of annotations to add to the pods (PostgreSQL read only) + ## + podAnnotations: {} + ## @param readReplicas.podAffinityPreset PostgreSQL read only pod affinity preset. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAffinityPreset: "" + ## @param readReplicas.podAntiAffinityPreset PostgreSQL read only pod anti-affinity preset. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAntiAffinityPreset: soft + ## PostgreSQL read only node affinity preset + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity + ## + nodeAffinityPreset: + ## @param readReplicas.nodeAffinityPreset.type PostgreSQL read only node affinity preset type. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard` + ## + type: "" + ## @param readReplicas.nodeAffinityPreset.key PostgreSQL read only node label key to match Ignored if `primary.affinity` is set. + ## E.g. + ## key: "kubernetes.io/e2e-az-name" + ## + key: "" + ## @param readReplicas.nodeAffinityPreset.values PostgreSQL read only node label values to match. Ignored if `primary.affinity` is set. + ## E.g. + ## values: + ## - e2e-az1 + ## - e2e-az2 + ## + values: [] + ## @param readReplicas.affinity Affinity for PostgreSQL read only pods assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## Note: primary.podAffinityPreset, primary.podAntiAffinityPreset, and primary.nodeAffinityPreset will be ignored when it's set + ## + affinity: {} + ## @param readReplicas.nodeSelector Node labels for PostgreSQL read only pods assignment + ## ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + ## @param readReplicas.tolerations Tolerations for PostgreSQL read only pods assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + ## @param readReplicas.topologySpreadConstraints Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template + ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/#spread-constraints-for-pods + ## + topologySpreadConstraints: [] + ## @param readReplicas.priorityClassName Priority Class to use for each pod (PostgreSQL read only) + ## + priorityClassName: "" + ## @param readReplicas.schedulerName Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + schedulerName: "" + ## @param readReplicas.terminationGracePeriodSeconds Seconds PostgreSQL read only pod needs to terminate gracefully + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods + ## + terminationGracePeriodSeconds: "" + ## @param readReplicas.updateStrategy.type PostgreSQL read only statefulset strategy type + ## @param readReplicas.updateStrategy.rollingUpdate PostgreSQL read only statefulset rolling update configuration parameters + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies + ## + updateStrategy: + type: RollingUpdate + rollingUpdate: {} + ## @param readReplicas.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the PostgreSQL read only container(s) + ## + extraVolumeMounts: [] + ## @param readReplicas.extraVolumes Optionally specify extra list of additional volumes for the PostgreSQL read only pod(s) + ## + extraVolumes: [] + ## @param readReplicas.sidecars Add additional sidecar containers to the PostgreSQL read only pod(s) + ## For example: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + ## + sidecars: [] + ## @param readReplicas.initContainers Add additional init containers to the PostgreSQL read only pod(s) + ## Example + ## + ## initContainers: + ## - name: do-something + ## image: busybox + ## command: ['do', 'something'] + ## + initContainers: [] + ## @param readReplicas.extraPodSpec Optionally specify extra PodSpec for the PostgreSQL read only pod(s) + ## + extraPodSpec: {} + ## PostgreSQL read only service configuration + ## + service: + ## @param readReplicas.service.type Kubernetes Service type + ## + type: ClusterIP + ## @param readReplicas.service.ports.postgresql PostgreSQL service port + ## + ports: + postgresql: 5432 + ## Node ports to expose + ## NOTE: choose port between <30000-32767> + ## @param readReplicas.service.nodePorts.postgresql Node port for PostgreSQL + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + nodePorts: + postgresql: "" + ## @param readReplicas.service.clusterIP Static clusterIP or None for headless services + ## e.g: + ## clusterIP: None + ## + clusterIP: "" + ## @param readReplicas.service.annotations Annotations for PostgreSQL read only service + ## + annotations: {} + ## @param readReplicas.service.loadBalancerIP Load balancer IP if service type is `LoadBalancer` + ## Set the LoadBalancer service type to internal only + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + loadBalancerIP: "" + ## @param readReplicas.service.externalTrafficPolicy Enable client source IP preservation + ## ref https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + ## + externalTrafficPolicy: Cluster + ## @param readReplicas.service.loadBalancerSourceRanges Addresses that are allowed when service is LoadBalancer + ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## + ## loadBalancerSourceRanges: + ## - 10.10.10.0/24 + ## + loadBalancerSourceRanges: [] + ## @param readReplicas.service.extraPorts Extra ports to expose in the PostgreSQL read only service + ## + extraPorts: [] + ## @param readReplicas.service.sessionAffinity Session Affinity for Kubernetes service, can be "None" or "ClientIP" + ## If "ClientIP", consecutive client requests will be directed to the same Pod + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + ## + sessionAffinity: None + ## @param readReplicas.service.sessionAffinityConfig Additional settings for the sessionAffinity + ## sessionAffinityConfig: + ## clientIP: + ## timeoutSeconds: 300 + ## + sessionAffinityConfig: {} + ## Headless service properties + ## + headless: + ## @param readReplicas.service.headless.annotations Additional custom annotations for headless PostgreSQL read only service + ## + annotations: {} + ## PostgreSQL read only persistence configuration + ## + persistence: + ## @param readReplicas.persistence.enabled Enable PostgreSQL read only data persistence using PVC + ## + enabled: true + ## @param readReplicas.persistence.existingClaim Name of an existing PVC to use + ## + existingClaim: "postgres-nfs" + ## @param readReplicas.persistence.mountPath The path the volume will be mounted at + ## Note: useful when using custom PostgreSQL images + ## + mountPath: /bitnami/postgresql + + +## @section NetworkPolicy parameters +## + +## Add networkpolicies +## +networkPolicy: + ## @param networkPolicy.enabled Enable network policies + ## + enabled: false + +## @section Volume Permissions parameters +## + +## Init containers parameters: +## volumePermissions: Change the owner and group of the persistent volume(s) mountpoint(s) to 'runAsUser:fsGroup' on each node +## +volumePermissions: + ## @param volumePermissions.enabled Enable init container that changes the owner and group of the persistent volume + ## + enabled: true + ## @param volumePermissions.image.registry Init container volume-permissions image registry + ## @param volumePermissions.image.repository Init container volume-permissions image repository + ## @param volumePermissions.image.tag Init container volume-permissions image tag (immutable tags are recommended) + ## @param volumePermissions.image.digest Init container volume-permissions image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag + ## @param volumePermissions.image.pullPolicy Init container volume-permissions image pull policy + ## @param volumePermissions.image.pullSecrets Init container volume-permissions image pull secrets + ## + image: + registry: docker.io + repository: bitnami/bitnami-shell + tag: 11-debian-11-r102 + digest: "" + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## Example: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## Init container resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## @param volumePermissions.resources.limits Init container volume-permissions resource limits + ## @param volumePermissions.resources.requests Init container volume-permissions resource requests + ## + resources: + limits: {} + requests: {} + ## Init container' Security Context + ## Note: the chown of the data folder is done to containerSecurityContext.runAsUser + ## and not the below volumePermissions.containerSecurityContext.runAsUser + ## @param volumePermissions.containerSecurityContext.runAsUser User ID for the init container + ## + containerSecurityContext: + runAsUser: 0 + +## @section Other Parameters +## + +## @param serviceBindings.enabled Create secret for service binding (Experimental) +## Ref: https://servicebinding.io/service-provider/ +## +serviceBindings: + enabled: false + +## Service account for PostgreSQL to use. +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ +## +serviceAccount: + ## @param serviceAccount.create Enable creation of ServiceAccount for PostgreSQL pod + ## + create: false + ## @param serviceAccount.name The name of the ServiceAccount to use. + ## If not set and create is true, a name is generated using the common.names.fullname template + ## + name: "" + ## @param serviceAccount.automountServiceAccountToken Allows auto mount of ServiceAccountToken on the serviceAccount created + ## Can be set to false if pods using this serviceAccount do not need to use K8s API + ## + automountServiceAccountToken: true + ## @param serviceAccount.annotations Additional custom annotations for the ServiceAccount + ## + annotations: {} +## Creates role for ServiceAccount +## @param rbac.create Create Role and RoleBinding (required for PSP to work) +## +rbac: + create: false + ## @param rbac.rules Custom RBAC rules to set + ## e.g: + ## rules: + ## - apiGroups: + ## - "" + ## resources: + ## - pods + ## verbs: + ## - get + ## - list + ## + rules: [] +## Pod Security Policy +## ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/ +## @param psp.create Whether to create a PodSecurityPolicy. WARNING: PodSecurityPolicy is deprecated in Kubernetes v1.21 or later, unavailable in v1.25 or later +## +psp: + create: false + +## @section Metrics Parameters +## + +metrics: + ## @param metrics.enabled Start a prometheus exporter + ## + enabled: false diff --git a/infrastructure/sealedsecrets/controller.yaml b/infrastructure/sealedsecrets/controller.yaml new file mode 100644 index 0000000..7b7ded0 --- /dev/null +++ b/infrastructure/sealedsecrets/controller.yaml @@ -0,0 +1,377 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + annotations: {} + labels: + name: sealed-secrets-service-proxier + name: sealed-secrets-service-proxier + namespace: kube-system +rules: +- apiGroups: + - "" + resourceNames: + - sealed-secrets-controller + resources: + - services + verbs: + - get +- apiGroups: + - "" + resourceNames: + - 'http:sealed-secrets-controller:' + - http:sealed-secrets-controller:http + - sealed-secrets-controller + resources: + - services/proxy + verbs: + - create + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + annotations: {} + labels: + name: sealed-secrets-controller + name: sealed-secrets-controller + namespace: kube-system +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: sealed-secrets-key-admin +subjects: +- kind: ServiceAccount + name: sealed-secrets-controller + namespace: kube-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + annotations: {} + labels: + name: sealed-secrets-key-admin + name: sealed-secrets-key-admin + namespace: kube-system +rules: +- apiGroups: + - "" + resources: + - secrets + verbs: + - create + - list +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + annotations: {} + labels: + name: secrets-unsealer + name: secrets-unsealer +rules: +- apiGroups: + - bitnami.com + resources: + - sealedsecrets + verbs: + - get + - list + - watch +- apiGroups: + - bitnami.com + resources: + - sealedsecrets/status + verbs: + - update +- apiGroups: + - "" + resources: + - secrets + verbs: + - get + - list + - create + - update + - delete + - watch +- apiGroups: + - "" + resources: + - events + verbs: + - create + - patch +- apiGroups: + - "" + resources: + - namespaces + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + annotations: {} + labels: + name: sealed-secrets-service-proxier + name: sealed-secrets-service-proxier + namespace: kube-system +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: sealed-secrets-service-proxier +subjects: +- apiGroup: rbac.authorization.k8s.io + kind: Group + name: system:authenticated +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: {} + labels: + name: sealed-secrets-controller + name: sealed-secrets-controller + namespace: kube-system +spec: + minReadySeconds: 30 + replicas: 1 + revisionHistoryLimit: 10 + selector: + matchLabels: + name: sealed-secrets-controller + strategy: + rollingUpdate: + maxSurge: 25% + maxUnavailable: 25% + type: RollingUpdate + template: + metadata: + annotations: {} + labels: + name: sealed-secrets-controller + spec: + containers: + - args: [] + command: + - controller + env: [] + image: docker.io/bitnami/sealed-secrets-controller:v0.23.1 + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /healthz + port: http + name: sealed-secrets-controller + ports: + - containerPort: 8080 + name: http + readinessProbe: + httpGet: + path: /healthz + port: http + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + stdin: false + tty: false + volumeMounts: + - mountPath: /tmp + name: tmp + imagePullSecrets: [] + initContainers: [] + securityContext: + fsGroup: 65534 + runAsNonRoot: true + runAsUser: 1001 + seccompProfile: + type: RuntimeDefault + serviceAccountName: sealed-secrets-controller + terminationGracePeriodSeconds: 30 + volumes: + - emptyDir: {} + name: tmp +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: sealedsecrets.bitnami.com +spec: + group: bitnami.com + names: + kind: SealedSecret + listKind: SealedSecretList + plural: sealedsecrets + singular: sealedsecret + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + description: SealedSecret is the K8s representation of a "sealed Secret" - + a regular k8s Secret that has been sealed (encrypted) using the controller's + key. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: SealedSecretSpec is the specification of a SealedSecret + properties: + data: + description: Data is deprecated and will be removed eventually. Use + per-value EncryptedData instead. + format: byte + type: string + encryptedData: + additionalProperties: + type: string + type: object + x-kubernetes-preserve-unknown-fields: true + template: + description: Template defines the structure of the Secret that will + be created from this sealed secret. + properties: + data: + additionalProperties: + type: string + description: Keys that should be templated using decrypted data + nullable: true + type: object + metadata: + description: 'Standard object''s metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata' + nullable: true + properties: + annotations: + additionalProperties: + type: string + type: object + finalizers: + items: + type: string + type: array + labels: + additionalProperties: + type: string + type: object + name: + type: string + namespace: + type: string + type: object + x-kubernetes-preserve-unknown-fields: true + type: + description: Used to facilitate programmatic handling of secret + data. + type: string + type: object + required: + - encryptedData + type: object + status: + description: SealedSecretStatus is the most recently observed status of + the SealedSecret. + properties: + conditions: + description: Represents the latest available observations of a sealed + secret's current state. + items: + description: SealedSecretCondition describes the state of a sealed + secret at a certain point. + properties: + lastTransitionTime: + description: Last time the condition transitioned from one status + to another. + format: date-time + type: string + lastUpdateTime: + description: The last time this condition was updated. + format: date-time + type: string + message: + description: A human readable message indicating details about + the transition. + type: string + reason: + description: The reason for the condition's last transition. + type: string + status: + description: 'Status of the condition for a sealed secret. Valid + values for "Synced": "True", "False", or "Unknown".' + type: string + type: + description: 'Type of condition for a sealed secret. Valid value: + "Synced"' + type: string + required: + - status + - type + type: object + type: array + observedGeneration: + description: ObservedGeneration reflects the generation most recently + observed by the sealed-secrets controller. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: v1 +kind: Service +metadata: + annotations: {} + labels: + name: sealed-secrets-controller + name: sealed-secrets-controller + namespace: kube-system +spec: + ports: + - port: 8080 + targetPort: 8080 + selector: + name: sealed-secrets-controller + type: ClusterIP +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + annotations: {} + labels: + name: sealed-secrets-controller + name: sealed-secrets-controller +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: secrets-unsealer +subjects: +- kind: ServiceAccount + name: sealed-secrets-controller + namespace: kube-system +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + annotations: {} + labels: + name: sealed-secrets-controller + name: sealed-secrets-controller + namespace: kube-system diff --git a/infrastructure/traefik-system/config.values.yaml b/infrastructure/traefik-system/config.values.yaml new file mode 100644 index 0000000..bd209f7 --- /dev/null +++ b/infrastructure/traefik-system/config.values.yaml @@ -0,0 +1,2 @@ +name: traefik +chart: traefik/traefik \ No newline at end of file diff --git a/infrastructure/traefik-system/configmap.yml b/infrastructure/traefik-system/configmap.yml new file mode 100644 index 0000000..3123b0c --- /dev/null +++ b/infrastructure/traefik-system/configmap.yml @@ -0,0 +1,87 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: traefik-config + namespace: traefik-system +data: + traefik.toml: | + [ping] + + [global] + checkNewVersion = true + sendAnonymousUsage = false + + [log] + level = "INFO" + + [accessLog] + # format = "json" + # filePath = "/var/log/traefik/access.log" + [accessLog.fields] + defaultMode = "keep" + [accessLog.fields.names] + "RequestProtocol" = "drop" + "level" = "drop" + "RequestContentSize" = "drop" + "RequestScheme" = "drop" + "StartLocal" = "drop" + "StartUTC" = "drop" + # ClientUsername: drop + # DownstreamStatusLine: drop + # RequestAddr: drop + # RequestCount: drop + # RequestHost: drop + # RequestLine: drop + # UpstreamAddr: drop + # UpstreamStatusLine: drop + # duration: drop + # msg: drop + # time: drop + # upstream: drop + # user_agent: drop + [api] + dashboard = true + insecure = true + debug = false + + [providers] + [providers.kubernetesCRD] + allowCrossNamespace = true + [providers.kubernetesIngress] + allowExternalNameServices = true + ingressClass = "traefik" + + [serversTransport] + insecureSkipVerify = true + + [entryPoints] + [entryPoints.web] + address = ":8000" + [entryPoints.web.http] + [entryPoints.web.http.redirections] + [entryPoints.web.http.redirections.entryPoint] + to = ":443" # should be the same as websecure but the loadbalancer maps 443 -> 8443 + scheme = "https" + + [entryPoints.websecure] + address = ":8443" + [entryPoints.metrics] + address = ":9100" + [entryPoints.traefik] + address = ":9000" + + [metrics] + [metrics.influxDB2] + address = "http://influxdb-influxdb2.monitoring:80" + token = "N_jNm1hZTfyhJneTJj2G357mQ7EJdNzdvebjSJX6JkbyaXNup_IAqeYowblMgV8EjLypNvauTl27ewJvI_rbqQ==" + org = "influxdata" + bucket = "kluster" + + [certificatesResolvers.default-tls.acme] + email = "me@moll.re" + storage = "/certs/acme.json" + [certificatesResolvers.default-tls.acme.tlsChallenge] + + [experimental.plugins.traefik-plugin-geoblock] + moduleName = "github.com/nscuro/traefik-plugin-geoblock" + version = "v0.10.0" \ No newline at end of file diff --git a/infrastructure/traefik-system/pvc.yaml b/infrastructure/traefik-system/pvc.yaml new file mode 100644 index 0000000..8a6eef5 --- /dev/null +++ b/infrastructure/traefik-system/pvc.yaml @@ -0,0 +1,33 @@ +apiVersion: v1 +kind: PersistentVolume +metadata: + namespace: traefik-system + name: traefik-certificate + labels: + directory: traefik +spec: + storageClassName: fast + capacity: + storage: "10Mi" + volumeMode: Filesystem + accessModes: + - ReadWriteOnce + nfs: + path: /export/kluster/traefik/certs + server: 192.168.1.157 +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + namespace: traefik-system + name: traefik-certificate +spec: + storageClassName: fast + accessModes: + - ReadWriteOnce + resources: + requests: + storage: "10Mi" + selector: + matchLabels: + directory: traefik diff --git a/infrastructure/traefik-system/telegraf.config.values.yaml b/infrastructure/traefik-system/telegraf.config.values.yaml new file mode 100644 index 0000000..a1eec38 --- /dev/null +++ b/infrastructure/traefik-system/telegraf.config.values.yaml @@ -0,0 +1,2 @@ +name: telegraf-traefik +chart: influxdata/telegraf \ No newline at end of file diff --git a/infrastructure/traefik-system/telegraf.values.yaml b/infrastructure/traefik-system/telegraf.values.yaml new file mode 100644 index 0000000..024f1bc --- /dev/null +++ b/infrastructure/traefik-system/telegraf.values.yaml @@ -0,0 +1,151 @@ +## Default values.yaml for Telegraf +## This is a YAML-formatted file. +## ref: https://hub.docker.com/r/library/telegraf/tags/ + +replicaCount: 1 +image: + repo: "telegraf" + tag: "1.24" + pullPolicy: IfNotPresent +podAnnotations: {} +podLabels: {} +imagePullSecrets: [] +## Configure args passed to Telegraf containers +args: [] +# The name of a secret in the same kubernetes namespace which contains values to +# be added to the environment (must be manually created) +# This can be useful for auth tokens, etc. + +# envFromSecret: "telegraf-tokens" +env: + - name: HOSTNAME + value: "telegraf-polling-service" +# An older "volumeMounts" key was previously added which will likely +# NOT WORK as you expect. Please use this newer configuration. + +volumes: +- name: traefik-logs + persistentVolumeClaim: + claimName: traefik-logs +mountPoints: +- name: traefik-logs + mountPath: /traefik_logs + + +## Node labels for pod assignment +## ref: https://kubernetes.io/docs/user-guide/node-selection/ +nodeSelector: {} +## Affinity for pod assignment +## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity +## +affinity: # to read the traefik logs the pod must be on the same node as traefik + podAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: # matches labels: app.kubernetes.io/name=traefik + - key: app.kubernetes.io/name + operator: In + values: + - traefik + topologyKey: "kubernetes.io/hostname" + +## Tolerations for pod assignment +## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ +## +tolerations: [] +# - key: "key" +# operator: "Equal|Exists" +# value: "value" +# effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" + +service: + enabled: false + type: ClusterIP + annotations: {} +rbac: + # Specifies whether RBAC resources should be created + create: true + # Create only for the release namespace or cluster wide (Role vs ClusterRole) + clusterWide: false + # Rules for the created rule + rules: [] +# When using the prometheus input to scrape all pods you need extra rules set to the ClusterRole to be +# able to scan the pods for scraping labels. The following rules have been taken from: +# https://github.com/helm/charts/blob/master/stable/prometheus/templates/server-clusterrole.yaml#L8-L46 +# - apiGroups: +# - "" +# resources: +# - nodes +# - nodes/proxy +# - nodes/metrics +# - services +# - endpoints +# - pods +# - ingresses +# - configmaps +# verbs: +# - get +# - list +# - watch +# - apiGroups: +# - "extensions" +# resources: +# - ingresses/status +# - ingresses +# verbs: +# - get +# - list +# - watch +# - nonResourceURLs: +# - "/metrics" +# verbs: +# - get + +serviceAccount: + # Specifies whether a ServiceAccount should be created + create: true + # The name of the ServiceAccount to use. + # If not set and create is true, a name is generated using the fullname template + name: + # Annotations for the ServiceAccount + annotations: {} +## Exposed telegraf configuration +## For full list of possible values see `/docs/all-config-values.yaml` and `/docs/all-config-values.toml` +## ref: https://docs.influxdata.com/telegraf/v1.1/administration/configuration/ +config: + agent: + interval: "10s" + round_interval: true + metric_batch_size: 1000 + metric_buffer_limit: 10000 + collection_jitter: "0s" + flush_interval: "10s" + flush_jitter: "0s" + precision: "" + debug: false + quiet: false + logfile: "" + hostname: "$HOSTNAME" + omit_hostname: true + # processors: + # - enum: + # mapping: + # field: "status" + # dest: "status_code"-+ + # value_mappings: + # healthy: 1 + # problem: 2 + # critical: 3 + outputs: + - influxdb_v2: + urls: + - "http://influxdb-influxdb2.monitoring:80" + token: N_jNm1hZTfyhJneTJj2G357mQ7EJdNzdvebjSJX6JkbyaXNup_IAqeYowblMgV8EjLypNvauTl27ewJvI_rbqQ== + organization: "influxdata" + bucket: "kluster" + # retention_policy: "2w" + inputs: + - docker_log: + endpoint: "unix:///var/run/docker.sock" + from_beginning: false + container_name_include: ["traefik"] \ No newline at end of file diff --git a/infrastructure/traefik-system/values.yaml b/infrastructure/traefik-system/values.yaml new file mode 100644 index 0000000..8e9e637 --- /dev/null +++ b/infrastructure/traefik-system/values.yaml @@ -0,0 +1,241 @@ +# Default values for Traefik +image: + name: traefik + # defaults to appVersion + tag: "" + pullPolicy: IfNotPresent + + +# +# Configure the deployment +# +deployment: + enabled: true + # Can be either Deployment or DaemonSet + kind: Deployment + # Number of pods of the deployment (only applies when kind == Deployment) + replicas: 1 + # Number of old history to retain to allow rollback (If not set, default Kubernetes value is set to 10) + # revisionHistoryLimit: 1 + # Amount of time (in seconds) before Kubernetes will send the SIGKILL signal if Traefik does not shut down + terminationGracePeriodSeconds: 60 + # The minimum number of seconds Traefik needs to be up and running before the DaemonSet/Deployment controller considers it available + minReadySeconds: 0 + # Additional deployment annotations (e.g. for jaeger-operator sidecar injection) + annotations: {} + # Additional deployment labels (e.g. for filtering deployment by custom labels) + labels: {} + # Additional pod annotations (e.g. for mesh injection or prometheus scraping) + podAnnotations: {} + # Additional Pod labels (e.g. for filtering Pod by custom labels) + podLabels: {} + # Additional containers (e.g. for metric offloading sidecars) + additionalContainers: [] + # https://docs.datadoghq.com/developers/dogstatsd/unix_socket/?tab=host + # - name: socat-proxy + # image: alpine/socat:1.0.5 + # args: ["-s", "-u", "udp-recv:8125", "unix-sendto:/socket/socket"] + # volumeMounts: + # - name: dsdsocket + # mountPath: /socket + # Additional volumes available for use with initContainers and additionalContainers + additionalVolumes: + # - name: traefik-logs + # persistentVolumeClaim: + # claimName: traefik-logs + - name: traefik-certificate + persistentVolumeClaim: + claimName: traefik-certificate + - name: traefik-config + configMap: + name: traefik-config + # - name: dsdsocket + # hostPath: + # path: /var/run/statsd-exporter + # Additional initContainers (e.g. for setting file permission as shown below) + initContainers: [] + # The "volume-permissions" init container is required if you run into permission issues. + # Related issue: https://github.com/traefik/traefik/issues/6972 + # - name: volume-permissions + # image: busybox:1.31.1 + # command: ["sh", "-c", "chmod -Rv 600 /data/*"] + # volumeMounts: + # - name: data + # mountPath: /data + # Use process namespace sharing + shareProcessNamespace: false + # Custom pod DNS policy. Apply if `hostNetwork: true` + # dnsPolicy: ClusterFirstWithHostNet + # Additional imagePullSecrets + imagePullSecrets: [] + # - name: myRegistryKeySecretName + + +# Use ingressClass. Ignored if Traefik version < 2.3 / kubernetes < 1.18.x +ingressClass: + # true is not unit-testable yet, pending https://github.com/rancher/helm-unittest/pull/12 + enabled: true + isDefaultClass: true + # Use to force a networking.k8s.io API Version for certain CI/CD applications. E.g. "v1beta1" + fallbackApiVersion: "" + +# Activate Pilot integration +pilot: + enabled: false + token: "" + # Toggle Pilot Dashboard + # dashboard: false + +# Enable experimental features +experimental: + http3: + enabled: false + plugins: + enabled: false + + kubernetesGateway: + enabled: false + # certificate: + # group: "core" + # kind: "Secret" + # name: "mysecret" + # By default, Gateway would be created to the Namespace you are deploying Traefik to. + # You may create that Gateway in another namespace, setting its name below: + # namespace: default + +# Create an IngressRoute for the dashboard +ingressRoute: + dashboard: + enabled: false + # Additional ingressRoute annotations (e.g. for kubernetes.io/ingress.class) + annotations: {} + # Additional ingressRoute labels (e.g. for filtering IngressRoute by custom labels) + labels: {} + + + +# +# Configure providers +# +providers: + kubernetesCRD: + enabled: true + allowCrossNamespace: false + allowExternalNameServices: true + allowEmptyServices: false + # ingressClass: traefik-internal + # labelSelector: environment=production,method=traefik + namespaces: [] + # - "default" + + kubernetesIngress: + enabled: true + allowExternalNameServices: true + allowEmptyServices: false + ingressClass: traefik + # labelSelector: environment=production,method=traefik + namespaces: [] + # - "default" + # IP used for Kubernetes Ingress endpoints + publishedService: + enabled: false + # Published Kubernetes Service to copy status from. Format: namespace/servicename + # By default this Traefik service + # pathOverride: "" + + +# Add volumes to the traefik pod. The volume name will be passed to tpl. +# This can be used to mount a cert pair or a configmap that holds a config.toml file. +# After the volume has been mounted, add the configs into traefik by using the `additionalArguments` list below, eg: +# additionalArguments: +# - "--providers.file.filename=/config/dynamic.toml" +# - "--ping" +# - "--ping.entrypoint=web" +volumes: [] + # - name: traefik-config + # mountPath: /config + # configMap: + # name: traefik-config + + +# - name: public-cert +# mountPath: "/certs" +# type: secret +# - name: '{{ printf "%s-configs" .Release.Name }}' +# mountPath: "/config" +# type: configMap + +# Additional volumeMounts to add to the Traefik container +additionalVolumeMounts: +# - name: traefik-logs +# mountPath: /var/log/traefik +# nfs: +# server: 192.168.1.157 +# path: /kluster/traefik +# # For instance when using a logshipper for access logs + # - name: traefik-logs + # # claimName: traefik-logs + # mountPath: /var/log/traefik + - name: traefik-certificate + # claimName: traefik-certificate + mountPath: /certs + - name: traefik-config + mountPath: /config + + +globalArguments: + - "--configfile=/config/traefik.toml" + +additionalArguments: [] + +# Environment variables to be passed to Traefik's binary +env: + - name: TZ + value: "Europe/Berlin" +# - name: SOME_VAR +# value: some-var-value +# - name: SOME_VAR_FROM_CONFIG_MAP +# valueFrom: +# configMapRef: +# name: configmap-name +# key: config-key +# - name: SOME_SECRET +# valueFrom: +# secretKeyRef: +# name: secret-name +# key: secret-key + + + + +# Configure ports +ports: {} # leave unconfigured to use the values from the toml file + + +envFrom: [] +# - configMapRef: +# name: config-map-name +# - secretRef: +# name: secret-name + + +tlsOptions: {} + +# Options for the main traefik service, where the entrypoints traffic comes +# from. +service: + enabled: true + type: LoadBalancer + # Additional annotations applied to both TCP and UDP services (e.g. for cloud provider specific config) + annotations: {} + # Additional annotations for TCP service only + annotationsTCP: {} + # Additional annotations for UDP service only + annotationsUDP: {} + # Additional service labels (e.g. for filtering Service by custom labels) + labels: {} + # Additional entries here will be added to the service spec. + # Cannot contain type, selector or ports entries. + spec: + # externalTrafficPolicy: Local + loadBalancerIP: 192.168.3.1 diff --git a/kluster/flux-system/dashboard.yaml b/kluster/flux-system/dashboard.yaml new file mode 100644 index 0000000..f7b0d34 --- /dev/null +++ b/kluster/flux-system/dashboard.yaml @@ -0,0 +1,56 @@ +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: HelmRepository +metadata: + annotations: + metadata.weave.works/description: This is the source location for the Weave GitOps + Dashboard's helm chart. + labels: + app.kubernetes.io/component: ui + app.kubernetes.io/created-by: weave-gitops-cli + app.kubernetes.io/name: weave-gitops-dashboard + app.kubernetes.io/part-of: weave-gitops + name: flux-dashboard + namespace: flux-system +spec: + interval: 1h0m0s + type: oci + url: oci://ghcr.io/weaveworks/charts +--- +apiVersion: helm.toolkit.fluxcd.io/v2beta1 +kind: HelmRelease +metadata: + annotations: + metadata.weave.works/description: This is the Weave GitOps Dashboard. It provides + a simple way to get insights into your GitOps workloads. + name: flux-dashboard + namespace: flux-system +spec: + chart: + spec: + chart: weave-gitops + sourceRef: + kind: HelmRepository + name: flux-dashboard + interval: 1h0m0s + values: + adminUser: + create: true + passwordHash: $2a$10$k0UXfoFU9qbQQYOD/fJWY.Wlr5z9YVTyC0WrnOk50QhKuo1Y0SZoK + username: admin +# --- +# apiVersion: traefik.containo.us/v1alpha1 +# kind: IngressRoute +# metadata: +# namespace: flux-system +# name: flux-ingressroute +# spec: +# entryPoints: +# - websecure +# routes: +# - match: Host(`flux.kluster.moll.re`) +# kind: Rule +# services: +# - name: flux-dashboard-weave-gitops +# port: 9001 +# tls: +# certResolver: default-tls diff --git a/kluster/journal/deploymentreference.yaml b/kluster/journal/deploymentreference.yaml new file mode 100644 index 0000000..0dad838 --- /dev/null +++ b/kluster/journal/deploymentreference.yaml @@ -0,0 +1,75 @@ +apiVersion: source.toolkit.fluxcd.io/v1 +kind: GitRepository +metadata: + name: journal-bot + namespace: flux-system +spec: + interval: 1m0s + ref: + branch: main + secretRef: + name: journal-bot + timeout: 60s + url: ssh://git@git.kluster.moll.re:2222/remoll/journal-bot.git +--- +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: journal-bot-kustomize + namespace: flux-system +spec: + force: true + interval: 1m0s + path: ./deployment/overlays/main + prune: false + sourceRef: + kind: GitRepository + name: journal-bot + +--- +apiVersion: image.toolkit.fluxcd.io/v1beta2 +kind: ImageRepository +metadata: + name: journal-bot-registry + namespace: flux-system +spec: + image: registry.hub.docker.com/mollre/journal-bot + interval: 15m + +--- +apiVersion: image.toolkit.fluxcd.io/v1beta2 +kind: ImagePolicy +metadata: + name: journal-bot-imagerange + namespace: flux-system +spec: + imageRepositoryRef: + name: journal-bot-registry + policy: + semver: + range: 1.x.x +--- +apiVersion: image.toolkit.fluxcd.io/v1beta1 +kind: ImageUpdateAutomation +metadata: + name: journal-bot-automation + namespace: flux-system +spec: + interval: 30m + sourceRef: + kind: GitRepository + name: journal-bot + git: + checkout: + ref: + branch: main + commit: + author: + email: flux@moll.re + name: fluxcdbot + messageTemplate: '[CI SKIP] Bump {{range .Updated.Images}}{{println .}}{{end}}' + push: + branch: main + update: + path: ./deployment/base/deployment.yaml + strategy: Setters \ No newline at end of file diff --git a/kluster/whoami/deploymentreference.yaml b/kluster/whoami/deploymentreference.yaml new file mode 100644 index 0000000..8406bde --- /dev/null +++ b/kluster/whoami/deploymentreference.yaml @@ -0,0 +1,20 @@ +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: whoami-kustomize + namespace: flux-system +spec: + force: true + interval: 1m0s + path: ./whoami/overlays/main + prune: false + sourceRef: + kind: GitRepository + name: k3s-app-collection + + + +# apiVersion: kustomize.toolkit.fluxcd.io/v1 +# kind: HelmRelease +# ... + diff --git a/unused/aio.deployment.yaml b/unused/aio.deployment.yaml new file mode 100644 index 0000000..ab6bcbd --- /dev/null +++ b/unused/aio.deployment.yaml @@ -0,0 +1,34 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: aio + namespace: aio + labels: + app: aio +spec: + replicas: 1 + selector: + matchLabels: + app: aio + template: + metadata: + labels: + app: aio + spec: + containers: + - name: aio + image: mollre/aio:latest + tty: true + volumeMounts: + - mountPath: /keys/ + name: aio-nfs + resources: + requests: + memory: "250Mi" + cpu: 0.5 + + + volumes: + - name: aio-nfs + persistentVolumeClaim: + claimName: aio-nfs diff --git a/unused/aio.pvc.yaml b/unused/aio.pvc.yaml new file mode 100644 index 0000000..aa4f30b --- /dev/null +++ b/unused/aio.pvc.yaml @@ -0,0 +1,34 @@ +--- +apiVersion: v1 +kind: PersistentVolume +metadata: + namespace: aio + name: "aio-nfs" + labels: + directory: "aio" +spec: + storageClassName: fast + capacity: + storage: "100Mi" + accessModes: + - ReadWriteOnce + nfs: + path: /aio + server: 10.43.239.43 # assigned to nfs-server service. Won't change as long as service is not redeployed + +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + namespace: aio + name: "aio-nfs" +spec: + storageClassName: "fast" + accessModes: + - ReadWriteOnce + resources: + requests: + storage: "100Mi" + selector: + matchLabels: + directory: "aio" diff --git a/unused/anki/deployment.yaml b/unused/anki/deployment.yaml new file mode 100644 index 0000000..b2bed6e --- /dev/null +++ b/unused/anki/deployment.yaml @@ -0,0 +1,114 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: anki + +--- + +apiVersion: apps/v1 +kind: Deployment +metadata: + name: anki + namespace: anki + labels: + app: anki +spec: + replicas: 1 + selector: + matchLabels: + app: anki + template: + metadata: + labels: + app: anki + spec: + containers: + - name: anki-server + image: ankicommunity/anki-sync-server:20220516 + tty: true + volumeMounts: + - mountPath: /app/data + name: anki-data-nfs + resources: + requests: + memory: "250Mi" + cpu: 0.5 + nodeSelector: + kubernetes.io/arch: amd64 + + + volumes: + - name: anki-data-nfs + persistentVolumeClaim: + claimName: anki-data-nfs + +--- +apiVersion: v1 +kind: Service +metadata: + name: anki-http + namespace: anki +spec: + selector: + app: anki + ports: + - protocol: TCP + port: 27701 + targetPort: 27701 + type: ClusterIP + +--- +apiVersion: v1 +kind: PersistentVolume +metadata: + namespace: anki + name: "anki-data-nfs" + labels: + directory: "anki" +spec: + storageClassName: fast + capacity: + storage: "100Mi" + accessModes: + - ReadWriteOnce + nfs: + path: /anki + server: 10.43.239.43 # assigned to nfs-server service. Won't change as long as service is not redeployed + +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + namespace: anki + name: "anki-data-nfs" +spec: + storageClassName: "fast" + accessModes: + - ReadWriteOnce + resources: + requests: + storage: "100Mi" + selector: + matchLabels: + directory: "anki" + + +--- +apiVersion: traefik.containo.us/v1alpha1 +kind: IngressRoute +metadata: + name: anki-ingress + namespace: anki +spec: + entryPoints: + - websecure + routes: + - match: Host(`anki.kluster.moll.re`) + kind: Rule + services: + - name: anki-http + port: 27701 + tls: + certResolver: default-tls + + diff --git a/unused/anonaddy.values.yaml b/unused/anonaddy.values.yaml new file mode 100644 index 0000000..50473bf --- /dev/null +++ b/unused/anonaddy.values.yaml @@ -0,0 +1,92 @@ +# +# IMPORTANT NOTE +# +# This chart inherits from our common library chart. You can check the default values/options here: +# https://github.com/k8s-at-home/library-charts/tree/main/charts/stable/common/values.yaml +# + +image: + # -- image repository + repository: anonaddy/anonaddy + # -- image tag + tag: 0.11.2 + # -- image pull policy + pullPolicy: IfNotPresent + +strategy: + type: Recreate + +# -- environment variables. See more environment variables in the [anonaddy documentation](https://github.com/anonaddy/docker#environment-variables). +# @default -- See below +env: + TZ: "Europe/Berlin" + # -- Application key for encrypter service + # You can generate one through `anonaddy key:generate --show` or `echo "base64:$(openssl rand -base64 32)"` + APP_KEY: + # -- Root domain to receive email from + ANONADDY_DOMAIN: anonaddy.kluster.moll.re + # -- Long random string used when hashing data for the anonymous replies + ANONADDY_SECRET: + +# -- Configures service settings for the chart. +# @default -- See values.yaml +service: + main: + ports: + http: + port: 8000 + smtp: + enabled: true + port: 25 + type: LoadBalancer + +ingress: + # -- Enable and configure ingress settings for the chart under this key. + # @default -- See values.yaml + main: + enabled: true + annotations: + kubernetes.io/ingress.class: nginx + cert-manager.io/cluster-issuer: cloudflare-letsencrypt-prod + hosts: + - host: anonaddy.kluster.moll.re + paths: + - path: / + pathType: Prefix + service: + port: 8000 + tls: + - hosts: + - anonaddy.kluster.moll.re + secretName: cloudflare-letsencrypt-issuer-account-key + +# -- Configure persistence settings for the chart under this key. +# @default -- See values.yaml +persistence: + config: + enabled: false + emptydir: + enabled: false + +# https://github.com/bitnami/charts/tree/master/bitnami/mariadb/#installing-the-chart +mariadb: + enabled: true + image: + name: arm64v8/mariadb:latest + pullSecrets: [] + # primary: + # persistence: + # enabled: true + # auth: + # username: "username" + # password: "password" + # database: database + +# -- Enable and configure redis subchart under this key. +# For more options see [redis chart documentation](https://github.com/bitnami/charts/tree/master/bitnami/redis) +# @default -- See values.yaml +redis: + enabled: false + # auth: + # enabled: false + diff --git a/unused/archive.deployment.yaml b/unused/archive.deployment.yaml new file mode 100644 index 0000000..c108d49 --- /dev/null +++ b/unused/archive.deployment.yaml @@ -0,0 +1,119 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: archive + labels: + app: archive + +--- + +apiVersion: v1 +kind: PersistentVolume +metadata: + namespace: archive + name: archive-data-nfs + labels: + directory: archive +spec: + storageClassName: fast + capacity: + storage: "100Gi" + volumeMode: Filesystem + accessModes: + - ReadWriteOnce + nfs: + path: /helbing_archive + server: 10.43.239.43 # assigned to nfs-server service. Won't change as long as service is not redeployed +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + namespace: archive + name: archive-data-nfs +spec: + storageClassName: fast + accessModes: + - ReadWriteOnce + resources: + requests: + storage: "100Gi" + selector: + matchLabels: + directory: archive + +--- + +apiVersion: apps/v1 +kind: Deployment +metadata: + name: archive + namespace: archive + labels: + app: archive +spec: + replicas: 1 + selector: + matchLabels: + app: archive + template: + metadata: + labels: + app: archive + spec: + containers: + - name: archive + image: archivebox/archivebox + tty: true + ports: + - containerPort: 8000 + volumeMounts: + - mountPath: /data + name: archive-data + + + volumes: + - name: archive-data + persistentVolumeClaim: + claimName: archive-data-nfs + +--- +apiVersion: v1 +kind: Service +metadata: + name: archive + namespace: archive + +spec: + type: ClusterIP + ports: + - name: http + port: 8000 + selector: + app: archive + +--- + +kind: Ingress +apiVersion: networking.k8s.io/v1 +metadata: + namespace: archive + name: archive-ingress + annotations: + kubernetes.io/ingress.class: nginx + cert-manager.io/cluster-issuer: cloudflare-letsencrypt-prod +spec: + tls: + - hosts: + - archive.kluster.moll.re + secretName: cloudflare-letsencrypt-issuer-account-key + rules: + - host: archive.kluster.moll.re + http: + paths: + - backend: + service: + name: archive + port: + number: 8000 + path: / + pathType: Prefix \ No newline at end of file diff --git a/unused/authelia/pvc.yaml b/unused/authelia/pvc.yaml new file mode 100644 index 0000000..5a6ba53 --- /dev/null +++ b/unused/authelia/pvc.yaml @@ -0,0 +1,34 @@ +--- +apiVersion: v1 +kind: PersistentVolume +metadata: + namespace: authelia + name: authelia-config-nfs + labels: + directory: authelia +spec: + storageClassName: fast + capacity: + storage: "1Gi" + volumeMode: Filesystem + accessModes: + - ReadWriteOnce + nfs: + path: /authelia + server: 10.43.239.43 # assigned to nfs-server service. Won't change as long as service is not redeployed +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + namespace: authelia + name: authelia-config-nfs +spec: + storageClassName: fast + accessModes: + - ReadWriteOnce + resources: + requests: + storage: "1Gi" + selector: + matchLabels: + directory: authelia \ No newline at end of file diff --git a/unused/authelia/values.yaml b/unused/authelia/values.yaml new file mode 100644 index 0000000..2546247 --- /dev/null +++ b/unused/authelia/values.yaml @@ -0,0 +1,1235 @@ +--- +## @formatter:off +## values.yaml +## +## Repository: authelia https://charts.authelia.com +## Chart: authelia +## +## This values file is designed for full deployment, eventually for in production once the chart makes it to 1.0.0. +## It uses the following providers: +## - authentication: LDAP +## - storage: MySQL +## - session: redis + +## Version Override allows changing some chart characteristics that render only on specific versions. +## This does NOT affect the image used, please see the below image section instead for this. +## If this value is not specified, it's assumed the appVersion of the chart is the version. +## The format of this value is x.x.x, for example 4.100.0. +## +## Important Points: +## - No guarantees of support for prior versions is given. The chart is intended to be used with the AppVersion. +## - Does not and will not support any version prior to 4.30.0 due to a significant refactor of the configuration +## system. +versionOverride: "" + +## Image Parameters +## ref: https://hub.docker.com/r/authelia/authelia/tags/ +## +image: + # registry: docker.io + registry: ghcr.io + repository: authelia/authelia + tag: "" + pullPolicy: IfNotPresent + pullSecrets: [] + # pullSecrets: + # - myPullSecretName + +# nameOverride: authelia-deployment-name +# appNameOverride: authelia + +## +## extra labels/annotations applied to all resources +## +annotations: {} +# annotations: +# myAnnotation: myValue + +labels: {} +# labels: +# myLabel: myValue + +## +## RBAC Configuration. +## +rbac: + + ## Enable RBAC. Turning this on associates Authelia with a service account. + ## If the vault injector is enabled, then RBAC must be enabled. + enabled: false + + annotations: {} + labels: {} + + serviceAccountName: authelia + + +## Authelia Domain +## Should be the root domain you want to protect. +## For example if you have apps app1.example.com and app2.example.com it should be example.com +## This affects the ingress (partially sets the domain used) and configMap. +## Authelia must be served from the domain or a subdomain under it. +domain: kluster.moll.re + +service: + annotations: {} + # annotations: + # myAnnotation: myValue + + labels: {} + # labels: + # myLabel: myValue + + port: 80 + + # clusterIP: + + +ingress: + enabled: true + + annotations: + kubernetes.io/ingress.class: nginx + cert-manager.io/cluster-issuer: cloudflare-letsencrypt-prod + + labels: {} + # labels: + # myLabel: myValue + + certManager: false + rewriteTarget: true + + ## The Ingress Class Name. + # className: ingress-nginx + + ## Subdomain is the only thing required since we specify the domain as part of the root values of the chart. + ## Example: To get Authelia to listen on https://auth.example.com specify 'auth' for ingress.subdomain, + ## and specify example.com for the domain. + subdomain: auth + + tls: + enabled: true + secretName: cloudflare-letsencrypt-issuer-account-key + hosts: + - auth.kluster.moll.re + + # hostNameOverride: + + traefikCRD: + enabled: false + + ## Use a standard Ingress object, not an IngressRoute. + disableIngressRoute: false + + # matchOverride: Host(`auth.example.com`) && PathPrefix(`/`) + + entryPoints: [] + # entryPoints: + # - http + + # priority: 10 + + # weight: 10 + + sticky: false + + # stickyCookieNameOverride: authelia_traefik_lb + + # strategy: RoundRobin + + # responseForwardingFlushInterval: 100ms + + middlewares: + auth: + # nameOverride: authelia-auth + authResponseHeaders: + - Remote-User + - Remote-Name + - Remote-Email + - Remote-Groups + + chains: + auth: + # nameOverride: authelia-auth-chain + + # List of Middlewares to apply before the forwardAuth Middleware in the authentication chain. + before: [] + # before: + # - name: extra-middleware-name + # namespace: default + + # List of Middlewares to apply after the forwardAuth Middleware in the authentication chain. + after: [] + # after: + # - name: extra-middleware-name + # namespace: default + + ingressRoute: + + # List of Middlewares to apply before the middleware in the IngressRoute chain. + before: [] + # before: + # - name: extra-middleware-name + # namespace: default + + # List of Middlewares to apply after the middleware in the IngressRoute chain. + after: [] + # after: + # - name: extra-middleware-name + # namespace: default + + # Specific options for the TraefikCRD TLS configuration. The above TLS section is still used. + tls: + ## Disables inclusion of the IngressRoute TLSOptions. + disableTLSOptions: false + # existingOptions: + # name: default-traefik-options + # namespace: default + # certResolver: default + # sans: + # - *.example.com + # + options: + # nameOverride: authelia-tls-options + nameOverride: "" + + minVersion: VersionTLS12 + maxVersion: VersionTLS13 + sniStrict: false + cipherSuites: + - TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + - TLS_RSA_WITH_AES_256_GCM_SHA384 + curvePreferences: [] + # curvePreferences: + # - CurveP521 + # - CurveP384 + +pod: + # Must be Deployment, DaemonSet, or StatefulSet. + kind: DaemonSet + + annotations: {} + # annotations: + # myAnnotation: myValue + + labels: {} + # labels: + # myLabel: myValue + + replicas: 1 + revisionHistoryLimit: 5 + + strategy: + type: RollingUpdate + # rollingUpdate: + # partition: 1 + # maxSurge: 25% + # maxUnavailable: 25% + + securityContext: + container: {} + # container: + # runAsUser: 2000 + # runAsGroup: 2000 + # fsGroup: 2000 + pod: {} + # pod: + # readOnlyRootFilesystem: true + # allowPrivilegeEscalation: false + # privileged: false + + tolerations: [] + # tolerations: + # - key: key1 + # operator: Equal + # value: value1 + # effect: NoSchedule + # tolerationSeconds: 3600 + + selectors: + # nodeName: worker-1 + + nodeSelector: {} + # nodeSelector: + # disktype: ssd + # kubernetes.io/hostname: worker-1 + + affinity: + nodeAffinity: {} + # nodeAffinity: + # requiredDuringSchedulingIgnoredDuringExecution: + # nodeSelectorTerms: + # - matchExpressions: + # - key: kubernetes.io/hostname + # operator: In + # values: + # - worker-1 + # - worker-2 + # preferredDuringSchedulingIgnoredDuringExecution: + # - weight: 1 + # preference: + # matchExpressions: + # - key: node-label-key + # operator: NotIn + # values: + # - not-this + podAffinity: {} + # podAffinity: + # requiredDuringSchedulingIgnoredDuringExecution: + # - labelSelector: + # matchExpressions: + # - key: security + # operator: In + # values: + # - S1 + # topologyKey: topology.kubernetes.io/zone + podAntiAffinity: {} + # podAntiAffinity: + # preferredDuringSchedulingIgnoredDuringExecution: + # - weight: 100 + # podAffinityTerm: + # labelSelector: + # matchExpressions: + # - key: security + # operator: In + # values: + # - S2 + # topologyKey: topology.kubernetes.io/zone + + env: [] + # env: + # - name: TZ + # value: Australia/Melbourne + + resources: + limits: {} + # limits: + # cpu: "4.00" + # memory: 125Mi + requests: {} + # requests: + # cpu: "0.25" + # memory: 50Mi + + probes: + method: + httpGet: + path: /api/health + port: http + scheme: HTTP + + liveness: + initialDelaySeconds: 0 + periodSeconds: 30 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + + readiness: + initialDelaySeconds: 0 + periodSeconds: 5 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + + ## Note: Startup Probes are a Kubernetes feature gate which must be manually enabled pre-1.18. + ## Ref: https://kubernetes.io/docs/reference/command-line-tools-reference/feature-gates/ + startup: + initialDelaySeconds: 10 + periodSeconds: 5 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 6 + + extraVolumeMounts: [] + extraVolumes: [] + + +## +## Authelia Config Map Generator +## +configMap: + + # Enable the configMap source for the Authelia config. + # If this is false you need to provide a volumeMount via PV/PVC or other means that mounts to /config. + enabled: true + + annotations: {} + # annotations: + # myAnnotation: myValue + + labels: {} + # labels: + # myLabel: myValue + + key: configuration.yaml + + existingConfigMap: "" + + ## + ## Server Configuration + ## + server: + ## + ## Port sets the configured port for the daemon, service, and the probes. + ## Default is 9091 and should not need to be changed. + ## + port: 9091 + + ## Set the single level path Authelia listens on. + ## Must be alphanumeric chars and should not contain any slashes. + path: "" + + ## Set the path on disk to Authelia assets. + ## Useful to allow overriding of specific static assets. + # asset_path: /config/assets/ + asset_path: "" + + ## Customize Authelia headers. + headers: + ## Read the Authelia docs before setting this advanced option. + ## https://www.authelia.com/configuration/miscellaneous/server/#csp_template. + csp_template: "" + + ## Buffers usually should be configured to be the same value. + ## Explanation at https://www.authelia.com/configuration/miscellaneous/server/ + ## Read buffer size adjusts the server's max incoming request size in bytes. + ## Write buffer size does the same for outgoing responses. + read_buffer_size: 4096 + write_buffer_size: 4096 + + log: + ## Level of verbosity for logs: info, debug, trace. + level: info + + ## Format the logs are written as: json, text. + format: text + + ## TODO: Statefulness check should check if this is set, and the configMap should enable it. + ## File path where the logs will be written. If not set logs are written to stdout. + # file_path: /config/authelia.log + file_path: "" + + ## + ## Telemetry Configuration + ## + telemetry: + + ## + ## Metrics Configuration + ## + metrics: + ## Enable Metrics. + enabled: false + + ## The port to listen on for metrics. This should be on a different port to the main server.port value. + port: 9959 + + serviceMonitor: + enabled: false + annotations: {} + labels: {} + + ## Default redirection URL + ## + ## If user tries to authenticate without any referer, Authelia does not know where to redirect the user to at the end + ## of the authentication process. This parameter allows you to specify the default redirection URL Authelia will use + ## in such a case. + ## + ## Note: this parameter is optional. If not provided, user won't be redirected upon successful authentication. + ## Default is https://www. (value at the top of the values.yaml). + default_redirection_url: "" + # default_redirection_url: https://example.com + + ## Set the default 2FA method for new users and for when a user has a preferred method configured that has been + ## disabled. This setting must be a method that is enabled. + ## Options are totp, webauthn, mobile_push. + default_2fa_method: "totp" + + theme: light + + ## + ## TOTP Configuration + ## + ## Parameters used for TOTP generation. + totp: + ## Disable TOTP. + disable: false + + ## The issuer name displayed in the Authenticator application of your choice. + ## Defaults to . + issuer: "" + + ## The TOTP algorithm to use. + ## It is CRITICAL you read the documentation before changing this option: + ## https://www.authelia.com/configuration/second-factor/time-based-one-time-password/#algorithm + algorithm: sha1 + + ## The number of digits a user has to input. Must either be 6 or 8. + ## Changing this option only affects newly generated TOTP configurations. + ## It is CRITICAL you read the documentation before changing this option: + ## https://www.authelia.com/configuration/second-factor/time-based-one-time-password/#digits + digits: 6 + + ## The period in seconds a one-time password is valid for. + ## Changing this option only affects newly generated TOTP configurations. + period: 30 + + ## The skew controls number of one-time passwords either side of the current one that are valid. + ## Warning: before changing skew read the docs link below. + ## See: https://www.authelia.com/configuration/second-factor/time-based-one-time-password/#input-validation to read the documentation. + skew: 1 + + ## The size of the generated shared secrets. Default is 32 and is sufficient in most use cases, minimum is 20. + secret_size: 32 + + ## + ## WebAuthn Configuration + ## + ## Parameters used for WebAuthn. + webauthn: + ## Disable Webauthn. + disable: false + + ## Adjust the interaction timeout for Webauthn dialogues. + timeout: 60s + + ## The display name the browser should show the user for when using Webauthn to login/register. + display_name: Authelia + + ## Conveyance preference controls if we collect the attestation statement including the AAGUID from the device. + ## Options are none, indirect, direct. + attestation_conveyance_preference: indirect + + ## User verification controls if the user must make a gesture or action to confirm they are present. + ## Options are required, preferred, discouraged. + user_verification: preferred + + + + ## + ## Authentication Backend Provider Configuration + ## + ## Used for verifying user passwords and retrieve information such as email address and groups users belong to. + ## + ## The available providers are: `file`, `ldap`. You must use one and only one of these providers. + authentication_backend: + + ## Password Reset Options. + password_reset: + + ## Disable both the HTML element and the API for reset password functionality + disable: false + + ## External reset password url that redirects the user to an external reset portal. This disables the internal reset + ## functionality. + custom_url: "" + + ## The amount of time to wait before we refresh data from the authentication backend. Uses duration notation. + ## To disable this feature set it to 'disable', this will slightly reduce security because for Authelia, users will + ## always belong to groups they belonged to at the time of login even if they have been removed from them in LDAP. + ## To force update on every request you can set this to '0' or 'always', this will increase processor demand. + ## See the below documentation for more information. + ## Duration Notation docs: https://www.authelia.com/configuration/prologue/common/#duration-notation-format + ## Refresh Interval docs: https://www.authelia.com/configuration/first-factor/ldap/#refresh-interval + refresh_interval: 5m + + ## LDAP backend configuration. + ## + ## This backend allows Authelia to be scaled to more + ## than one instance and therefore is recommended for + ## production. + ldap: + enabled: false + + ## File (Authentication Provider) + ## + ## With this backend, the users database is stored in a file which is updated when users reset their passwords. + ## Therefore, this backend is meant to be used in a dev environment and not in production since it prevents Authelia + ## to be scaled to more than one instance. The options under 'password' have sane defaults, and as it has security + ## implications it is highly recommended you leave the default values. Before considering changing these settings + ## please read the docs page: https://www.authelia.com/reference/guides/passwords/#tuning + ## + ## Important: Kubernetes (or HA) users must read https://www.authelia.com/overview/authorization/statelessness/ + ## + file: + enabled: true + path: /config/users_database.yml + password: + algorithm: argon2id + iterations: 1 + key_length: 32 + salt_length: 16 + memory: 1024 + parallelism: 8 + + ## + ## Password Policy Configuration. + ## + password_policy: + + ## The standard policy allows you to tune individual settings manually. + standard: + enabled: false + + ## Require a minimum length for passwords. + min_length: 8 + + ## Require a maximum length for passwords. + max_length: 0 + + ## Require uppercase characters. + require_uppercase: true + + ## Require lowercase characters. + require_lowercase: true + + ## Require numeric characters. + require_number: true + + ## Require special characters. + require_special: true + + ## zxcvbn is a well known and used password strength algorithm. It does not have tunable settings. + zxcvbn: + enabled: false + + ## Configures the minimum score allowed. + min_score: 0 + + ## + ## Access Control Configuration + ## + ## Access control is a list of rules defining the authorizations applied for one resource to users or group of users. + ## + ## If 'access_control' is not defined, ACL rules are disabled and the 'bypass' rule is applied, i.e., access is allowed + ## to anyone. Otherwise restrictions follow the rules defined. + ## + ## Note: One can use the wildcard * to match any subdomain. + ## It must stand at the beginning of the pattern. (example: *.mydomain.com) + ## + ## Note: You must put patterns containing wildcards between simple quotes for the YAML to be syntactically correct. + ## + ## Definition: A 'rule' is an object with the following keys: 'domain', 'subject', 'policy' and 'resources'. + ## + ## - 'domain' defines which domain or set of domains the rule applies to. + ## + ## - 'subject' defines the subject to apply authorizations to. This parameter is optional and matching any user if not + ## provided. If provided, the parameter represents either a user or a group. It should be of the form + ## 'user:' or 'group:'. + ## + ## - 'policy' is the policy to apply to resources. It must be either 'bypass', 'one_factor', 'two_factor' or 'deny'. + ## + ## - 'resources' is a list of regular expressions that matches a set of resources to apply the policy to. This parameter + ## is optional and matches any resource if not provided. + ## + ## Note: the order of the rules is important. The first policy matching (domain, resource, subject) applies. + access_control: + + ## Configure the ACL as a Secret instead of part of the ConfigMap. + secret: + + ## Enables the ACL section being generated as a secret. + enabled: false + + ## The key in the secret which contains the file to mount. + key: configuration.acl.yaml + + ## An existingSecret name, if configured this will force the secret to be mounted using the key above. + existingSecret: "" + + ## Default policy can either be 'bypass', 'one_factor', 'two_factor' or 'deny'. It is the policy applied to any + ## resource if there is no policy to be applied to the user. + default_policy: deny + + networks: [] + # networks: + # - name: private + # networks: + # - 10.0.0.0/8 + # - 172.16.0.0/12 + # - 192.168.0.0/16 + # - name: vpn + # networks: + # - 10.9.0.0/16 + + rules: [] + # rules: + # - domain_regex: '^.*\.example.com$' + # policy: bypass + # - domain: public.example.com + # policy: bypass + # - domain: "*.example.com" + # policy: bypass + # methods: + # - OPTIONS + # - domain: secure.example.com + # policy: one_factor + # networks: + # - private + # - vpn + # - 192.168.1.0/24 + # - 10.0.0.1 + # - domain: + # - secure.example.com + # - private.example.com + # policy: two_factor + # - domain: singlefactor.example.com + # policy: one_factor + # - domain: "mx2.mail.example.com" + # subject: "group:admins" + # policy: deny + # - domain: "*.example.com" + # subject: + # - "group:admins" + # - "group:moderators" + # policy: two_factor + # - domain: dev.example.com + # resources: + # - "^/groups/dev/.*$" + # subject: "group:dev" + # policy: two_factor + # - domain: dev.example.com + # resources: + # - "^/users/john/.*$" + # subject: + # - ["group:dev", "user:john"] + # - "group:admins" + # policy: two_factor + # - domain: "{user}.example.com" + # policy: bypass + + ## + ## Session Provider Configuration + ## + ## The session cookies identify the user once logged in. + ## The available providers are: `memory`, `redis`. Memory is the provider unless redis is defined. + session: + ## The name of the session cookie. (default: authelia_session). + name: authelia_session + + ## Sets the Cookie SameSite value. Possible options are none, lax, or strict. + ## Please read https://www.authelia.com/configuration/session/introduction/#same_site + same_site: lax + + ## The time in seconds before the cookie expires and session is reset. + expiration: 1h + + ## The inactivity time in seconds before the session is reset. + inactivity: 5m + + ## The remember me duration. + ## Value is in seconds, or duration notation. Value of 0 disables remember me. + ## See: https://www.authelia.com/configuration/prologue/common/#duration-notation-format + ## Longer periods are considered less secure because a stolen cookie will last longer giving attackers more time to + ## spy or attack. Currently the default is 1M or 1 month. + remember_me_duration: 1M + + ## + ## Redis Provider + ## + ## Important: Kubernetes (or HA) users must read https://www.authelia.com/overview/authorization/statelessness/ + ## + ## The redis connection details + redis: + enabled: false + + + ## + ## Regulation Configuration + ## + ## This mechanism prevents attackers from brute forcing the first factor. It bans the user if too many attempts are done + ## in a short period of time. + regulation: + ## The number of failed login attempts before user is banned. Set it to 0 to disable regulation. + max_retries: 3 + + ## The time range during which the user can attempt login before being banned. The user is banned if the + ## authentication failed 'max_retries' times in a 'find_time' seconds window. Find Time accepts duration notation. + ## See: https://www.authelia.com/configuration/prologue/common/#duration-notation-format + find_time: 2m + + ## The length of time before a banned user can login again. Ban Time accepts duration notation. + ## See: https://www.authelia.com/configuration/prologue/common/#duration-notation-format + ban_time: 5m + + + ## + ## Storage Provider Configuration + ## + ## The available providers are: `local`, `mysql`, `postgres`. You must use one and only one of these providers. + storage: + ## + ## Local (Storage Provider) + ## + ## This stores the data in a SQLite3 Database. + ## This is only recommended for lightweight non-stateful installations. + ## + ## Important: Kubernetes (or HA) users must read https://www.authelia.com/overview/authorization/statelessness/ + ## + local: + enabled: true + path: /config/db.sqlite3 + + ## + ## MySQL (Storage Provider) + ## + ## Also supports MariaDB + ## + mysql: + enabled: false + ## + ## PostgreSQL (Storage Provider) + ## + postgres: + enabled: false + + ## + ## Notification Provider + ## + ## + ## Notifications are sent to users when they require a password reset, a u2f registration or a TOTP registration. + ## The available providers are: filesystem, smtp. You must use one and only one of these providers. + notifier: + ## You can disable the notifier startup check by setting this to true. + disable_startup_check: false + + ## + ## File System (Notification Provider) + ## + ## Important: Kubernetes (or HA) users must read https://www.authelia.com/overview/authorization/statelessness/ + ## + filesystem: + enabled: true + filename: /config/notification.txt + + ## + ## SMTP (Notification Provider) + ## + ## Use a SMTP server for sending notifications. Authelia uses the PLAIN or LOGIN methods to authenticate. + ## [Security] By default Authelia will: + ## - force all SMTP connections over TLS including unauthenticated connections + ## - use the disable_require_tls boolean value to disable this requirement + ## (only works for unauthenticated connections) + ## - validate the SMTP server x509 certificate during the TLS handshake against the hosts trusted certificates + ## (configure in tls section) + smtp: + enabled: false + + identity_providers: + oidc: + ## Enables this in the config map. Currently in beta stage. + ## See https://www.authelia.com/r/openid-connect/ + enabled: false + + access_token_lifespan: 1h + authorize_code_lifespan: 1m + id_token_lifespan: 1h + refresh_token_lifespan: 90m + + ## Adjusts the PKCE enforcement. Options are always, public_clients_only, never. + ## For security reasons it's recommended this option is public_clients_only or always, however always is not + ## compatible with all clients. + enforce_pkce: public_clients_only + + ## Enables the plain PKCE challenge which is not recommended for security reasons but may be necessary for some clients. + enable_pkce_plain_challenge: false + + ## SECURITY NOTICE: It's not recommended changing this option, and highly discouraged to have it below 8 for + ## security reasons. + minimum_parameter_entropy: 8 + + ## Enables additional debug messages. + enable_client_debug_messages: false + + ## Cross-Origin Resource Sharing (CORS) settings. + cors: + ## List of endpoints in addition to the metadata endpoints to permit cross-origin requests on. + # endpoints: + # - authorization + # - token + # - revocation + # - introspection + # - userinfo + endpoints: [] + + ## List of allowed origins. + ## Any origin with https is permitted unless this option is configured or the + ## allowed_origins_from_client_redirect_uris option is enabled. + # allowed_origins: + # - https://example.com + allowed_origins: [] + + ## Automatically adds the origin portion of all redirect URI's on all clients to the list of allowed_origins, + ## provided they have the scheme http or https and do not have the hostname of localhost. + allowed_origins_from_client_redirect_uris: true + + clients: [] + # clients: + # - + ## The ID is the OpenID Connect ClientID which is used to link an application to a configuration. + # id: myapp + + ## The description to show to users when they end up on the consent screen. Defaults to the ID above. + # description: My Application + + ## The client secret is a shared secret between Authelia and the consumer of this client. + # secret: apple123 + + ## Sector Identifiers are occasionally used to generate pairwise subject identifiers. In most cases this is not + ## necessary. Read the documentation for more information. + ## The subject identifier must be the host component of a URL, which is a domain name with an optional port. + # sector_identifier: example.com + + ## Sets the client to public. This should typically not be set, please see the documentation for usage. + # public: false + + ## The policy to require for this client; one_factor or two_factor. + # authorization_policy: two_factor + + ## By default users cannot remember pre-configured consents. Setting this value to a period of time using a + ## duration notation will enable users to remember consent for this client. The time configured is the amount + ## of time the pre-configured consent is valid for granting new authorizations to the user. + # pre_configured_consent_duration: 30d + + ## Audience this client is allowed to request. + # audience: [] + + ## Scopes this client is allowed to request. + # scopes: + # - openid + # - profile + # - email + # - groups + + ## Redirect URI's specifies a list of valid case-sensitive callbacks for this client. + # redirect_uris: + # - https://oidc.example.com/oauth2/callback + + ## Grant Types configures which grants this client can obtain. + ## It's not recommended to configure this unless you know what you're doing. + # grant_types: + # - refresh_token + # - authorization_code + + ## Response Types configures which responses this client can be sent. + ## It's not recommended to configure this unless you know what you're doing. + # response_types: + # - code + + ## Response Modes configures which response modes this client supports. + ## It's not recommended to configure this unless you know what you're doing. + # response_modes: + # - form_post + # - query + # - fragment + + ## The algorithm used to sign userinfo endpoint responses for this client, either none or RS256. + # userinfo_signing_algorithm: none + +## +## Authelia Secret Generator. +## +## If both the values and existingSecret are not defined, this chart randomly generates a new secret on each +## install. It is recommended that you use something like sealed-secrets (https://github.com/bitnami-labs/sealed-secrets) +## and use the existingSecrets. All secrets can be stored in a single k8s secret if desired using the key option. +## +secret: + existingSecret: "" + # existingSecret: authelia + + annotations: {} + # annotations: + # myAnnotation: myValue + + labels: {} + # labels: + # myLabel: myValue + + mountPath: /secrets + + excludeVolumeAndMounts: false + + ## Secrets. + jwt: + key: JWT_TOKEN + value: "" + filename: JWT_TOKEN + ldap: + key: LDAP_PASSWORD + value: "" + filename: LDAP_PASSWORD + storage: + key: STORAGE_PASSWORD + value: "" + filename: STORAGE_PASSWORD + storageEncryptionKey: + key: STORAGE_ENCRYPTION_KEY + value: "" + filename: STORAGE_ENCRYPTION_KEY + session: + key: SESSION_ENCRYPTION_KEY + value: "" + filename: SESSION_ENCRYPTION_KEY + duo: + key: DUO_API_KEY + value: "" + filename: DUO_API_KEY + redis: + key: REDIS_PASSWORD + value: "" + filename: REDIS_PASSWORD + redisSentinel: + key: REDIS_SENTINEL_PASSWORD + value: "" + filename: REDIS_SENTINEL_PASSWORD + smtp: + key: SMTP_PASSWORD + value: "" + filename: SMTP_PASSWORD + oidcPrivateKey: + key: OIDC_PRIVATE_KEY + value: "" + filename: OIDC_PRIVATE_KEY + oidcHMACSecret: + key: OIDC_HMAC_SECRET + value: "" + filename: OIDC_HMAC_SECRET + + ## HashiCorp Vault Injector configuration. + vaultInjector: + + ## Enable the vault injector annotations. This will disable secret injection via other means. + ## To see the annotations and what they do see: https://www.vaultproject.io/docs/platform/k8s/injector/annotations + ## Annotations with a blank string do not get configured at all. + ## Additional annotations can be configured via the secret.annotations: {} above. + ## Secrets are by default rendered in the /secrets directory. Changing this can be done via editing the + ## secret.mountPath value. You can alter the filenames with the secret..filename values. + ## Secrets are loaded from vault path specified below with secrets..path values. Its format should be + ## :. + ## Secrets are by default rendered by template suitable for vault KV v1 or database secrets engines. If other used, + ## it can be overriden per each secret by specifying secrets..templateValue. For example for KV v2 + ## secrets engine would be '{{ with secret "" }}{{ .Data.data. }}{{ end }}'. + enabled: false + + ## The vault role to assign via annotations. + ## Annotation: vault.hashicorp.com/role + role: authelia + + agent: + ## Annotation: vault.hashicorp.com/agent-inject-status + status: update + + ## Annotation: vault.hashicorp.com/agent-configmap + configMap: "" + + ## Annotation: vault.hashicorp.com/agent-image + image: "" + + ## Annotation: vault.hashicorp.com/agent-init-first + initFirst: "false" + + ## Annotation: vault.hashicorp.com/agent-inject-command + command: "sh -c 'kill HUP $(pidof authelia)'" + + ## Annotation: vault.hashicorp.com/agent-run-as-same-user + runAsSameUser: "true" + + secrets: + jwt: + ## Vault Path to the Authelia JWT secret. + ## Annotation: vault.hashicorp.com/agent-inject-secret-jwt + path: secrets/authelia/jwt:token + + ## Vault template specific to JWT. + ## Annotation: vault.hashicorp.com/agent-inject-template-jwt + templateValue: "" + + ## Vault after render command specific to JWT. + ## Annotation: vault.hashicorp.com/agent-inject-command-jwt + command: "" + ldap: + ## Vault Path to the Authelia LDAP secret. + ## Annotation: vault.hashicorp.com/agent-inject-secret-ldap + path: secrets/authelia/ldap:password + + ## Vault template specific to LDAP. + ## Annotation: vault.hashicorp.com/agent-inject-template-ldap + templateValue: "" + + ## Vault after render command specific to LDAP. + ## Annotation: vault.hashicorp.com/agent-inject-command-ldap + command: "" + storage: + ## Vault Path to the Authelia storage password secret. + ## Annotation: vault.hashicorp.com/agent-inject-secret-storage + path: secrets/authelia/storage:password + + ## Vault template specific to the storage password. + ## Annotation: vault.hashicorp.com/agent-inject-template-storage + templateValue: "" + + ## Vault after render command specific to the storage password. + ## Annotation: vault.hashicorp.com/agent-inject-command-storage + command: "" + storageEncryptionKey: + ## Vault Path to the Authelia storage encryption key secret. + ## Annotation: vault.hashicorp.com/agent-inject-secret-storage-encryption-key + path: secrets/authelia/storage:encryption_key + + ## Vault template specific to the storage encryption key. + ## Annotation: vault.hashicorp.com/agent-inject-template-storage-encryption-key + templateValue: "" + + ## Vault after render command specific to the storage encryption key. + ## Annotation: vault.hashicorp.com/agent-inject-command-storage-encryption-key + command: "" + session: + ## Vault Path to the Authelia session secret. + ## Annotation: vault.hashicorp.com/agent-inject-secret-session + path: secrets/authelia/session:encryption_key + + ## Vault template specific to session. + ## Annotation: vault.hashicorp.com/agent-inject-template-session + templateValue: "" + + ## Vault after render command specific to session. + ## Annotation: vault.hashicorp.com/agent-inject-command-session + command: "" + duo: + ## Vault Path to the Authelia duo secret. + ## Annotation: vault.hashicorp.com/agent-inject-secret-duo + path: secrets/authelia/duo:api_key + + ## Vault template specific to duo. + ## Annotation: vault.hashicorp.com/agent-inject-template-duo + templateValue: "" + + ## Vault after render command specific to duo. + ## Annotation: vault.hashicorp.com/agent-inject-command-duo + command: "" + redis: + ## Vault Path to the Authelia redis secret. + ## Annotation: vault.hashicorp.com/agent-inject-secret-redis + path: secrets/authelia/redis:password + + ## Vault template specific to redis. + ## Annotation: vault.hashicorp.com/agent-inject-template-redis + templateValue: "" + + ## Vault after render command specific to redis. + ## Annotation: vault.hashicorp.com/agent-inject-command-redis + command: "" + redisSentinel: + ## Vault Path to the Authelia redis sentinel secret. + ## Annotation: vault.hashicorp.com/agent-inject-secret-redis-sentinel + path: secrets/authelia/redis_sentinel:password + + ## Vault template specific to redis sentinel. + ## Annotation: vault.hashicorp.com/agent-inject-template-redis-sentinel + templateValue: "" + + ## Vault after render command specific to redis sentinel. + ## Annotation: vault.hashicorp.com/agent-inject-command-redis-sentinel + command: "" + smtp: + ## Vault Path to the Authelia SMTP secret. + ## Annotation: vault.hashicorp.com/agent-inject-secret-smtp + path: secrets/authelia/smtp:password + + ## Vault template specific to SMTP. + ## Annotation: vault.hashicorp.com/agent-inject-template-smtp + templateValue: "" + + ## Vault after render command specific to SMTP. + ## Annotation: vault.hashicorp.com/agent-inject-command-smtp + command: "" + oidcPrivateKey: + ## Vault Path to the Authelia OIDC private key secret. + ## Annotation: vault.hashicorp.com/agent-inject-secret-oidc-private-key + path: secrets/authelia/oidc:private_key + + ## Vault template specific to OIDC private key. + ## Annotation: vault.hashicorp.com/agent-inject-template-oidc-private-key + templateValue: "" + + ## Vault after render command specific to OIDC private key. + ## Annotation: vault.hashicorp.com/agent-inject-command-oidc-private-key + command: "" + oidcHMACSecret: + ## Vault Path to the Authelia OIDC HMAC secret. + ## Annotation: vault.hashicorp.com/agent-inject-secret-oidc-hmac-secret + path: secrets/authelia/oidc:hmac_secret + + ## Vault template specific to OIDC HMAC secret. + ## Annotation: vault.hashicorp.com/agent-inject-template-oidc-hmac-secret + templateValue: "" + + ## Vault after render command specific to OIDC HMAC secret. + ## Annotation: vault.hashicorp.com/agent-inject-command-oidc-hmac-secret + command: "" + +certificates: + existingSecret: "" + # existingSecret: authelia + + annotations: {} + # annotations: + # myAnnotation: myValue + + labels: {} + # labels: + # myLabel: myValue + + values: [] + # values: + # - name: Example_Com_Root_Certificate_Authority_B64.pem + # secretValue: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURYekNDQWtlZ0F3SUJBZ0lMQkFBQUFBQUJJVmhUQ0tJd0RRWUpLb1pJaHZjTkFRRUxCUUF3VERFZ01CNEcKQTFVRUN4TVhSMnh2WW1Gc1UybG5iaUJTYjI5MElFTkJJQzBnVWpNeEV6QVJCZ05WQkFvVENrZHNiMkpoYkZOcApaMjR4RXpBUkJnTlZCQU1UQ2tkc2IySmhiRk5wWjI0d0hoY05NRGt3TXpFNE1UQXdNREF3V2hjTk1qa3dNekU0Ck1UQXdNREF3V2pCTU1TQXdIZ1lEVlFRTEV4ZEhiRzlpWVd4VGFXZHVJRkp2YjNRZ1EwRWdMU0JTTXpFVE1CRUcKQTFVRUNoTUtSMnh2WW1Gc1UybG5iakVUTUJFR0ExVUVBeE1LUjJ4dlltRnNVMmxuYmpDQ0FTSXdEUVlKS29aSQpodmNOQVFFQkJRQURnZ0VQQURDQ0FRb0NnZ0VCQU13bGRwQjVCbmdpRnZYQWc3YUV5aWllL1FWMkVjV3RpSEw4ClJnSkR4N0tLblFSZkpNc3VTK0ZnZ2tiaFVxc01nVWR3Yk4xazBldjFMS01QZ2owTUs2NlgxN1lVaGhCNXV6c1QKZ0hlTUNPRkowbXBpTHg5ZStwWm8zNGtubFRpZkJ0Yyt5Y3NtV1ExejNyREk2U1lPZ3hYRzcxdUwwZ1JneWttbQpLUFpwTy9iTHlDaVI1WjJLWVZjM3JIUVUzSFRnT3U1eUx5NmMrOUM3di9VOUFPRUdNK2lDSzY1VHBqb1djNHpkClFRNGdPc0MwcDZIcHNrK1FMakpnNlZmTHVRU1NhR2psT0NaZ2RiS2ZkLytSRk8rdUlFbjhyVUFWU05FQ01XRVoKWHJpWDc2MTN0MlNhZXI5ZndSUHZtMkw3RFd6Z1ZHa1dxUVBhYnVtRGszRjJ4bW1GZ2hjQ0F3RUFBYU5DTUVBdwpEZ1lEVlIwUEFRSC9CQVFEQWdFR01BOEdBMVVkRXdFQi93UUZNQU1CQWY4d0hRWURWUjBPQkJZRUZJL3dTMytvCkxrVWtyazFRK21PYWk5N2kzUnU4TUEwR0NTcUdTSWIzRFFFQkN3VUFBNElCQVFCTFFOdkFVS3IreUF6djk1WlUKUlVtN2xnQUpRYXl6RTRhR0tBY3p5bXZtZExtNkFDMnVwQXJUOWZIeEQ0cS9jMmRLZzhkRWUzamdyMjVzYndNcApqak01UmNPTzVMbFhiS3I4RXBic1U4WXQ1Q1JzdVpSais5eFRhR2RXUG9PNHp6VWh3OGxvL3M3YXdsT3F6SkNLCjZmQmRSb3lWM1hwWUtCb3ZIZDdOQURkQmorMUViZGRUS0pkKzgyY0VIaFhYaXBhMDA5NU1KNlJNRzNOemR2UVgKbWNJZmVnN2pMUWl0Q2h3cy96eXJWUTRQa1g0MjY4TlhTYjdoTGkxOFlJdkRRVkVUSTUzTzl6SnJsQUdvbWVjcwpNeDg2T3lYU2hrRE9PeXlHZU1saEx4UzY3dHRWYjkrRTdnVUpUYjBvMkhMTzAySlFaUjdya3BlRE1kbXp0Y3BICldEOWYKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQ== + # - name: Example_Com_Root_Certificate_Authority.pem + # value: | + # -----BEGIN CERTIFICATE----- + # MIIDXzCCAkegAwIBAgILBAAAAAABIVhTCKIwDQYJKoZIhvcNAQELBQAwTDEgMB4G + # A1UECxMXR2xvYmFsU2lnbiBSb290IENBIC0gUjMxEzARBgNVBAoTCkdsb2JhbFNp + # Z24xEzARBgNVBAMTCkdsb2JhbFNpZ24wHhcNMDkwMzE4MTAwMDAwWhcNMjkwMzE4 + # MTAwMDAwWjBMMSAwHgYDVQQLExdHbG9iYWxTaWduIFJvb3QgQ0EgLSBSMzETMBEG + # A1UEChMKR2xvYmFsU2lnbjETMBEGA1UEAxMKR2xvYmFsU2lnbjCCASIwDQYJKoZI + # hvcNAQEBBQADggEPADCCAQoCggEBAMwldpB5BngiFvXAg7aEyiie/QV2EcWtiHL8 + # RgJDx7KKnQRfJMsuS+FggkbhUqsMgUdwbN1k0ev1LKMPgj0MK66X17YUhhB5uzsT + # gHeMCOFJ0mpiLx9e+pZo34knlTifBtc+ycsmWQ1z3rDI6SYOgxXG71uL0gRgykmm + # KPZpO/bLyCiR5Z2KYVc3rHQU3HTgOu5yLy6c+9C7v/U9AOEGM+iCK65TpjoWc4zd + # QQ4gOsC0p6Hpsk+QLjJg6VfLuQSSaGjlOCZgdbKfd/+RFO+uIEn8rUAVSNECMWEZ + # XriX7613t2Saer9fwRPvm2L7DWzgVGkWqQPabumDk3F2xmmFghcCAwEAAaNCMEAw + # DgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFI/wS3+o + # LkUkrk1Q+mOai97i3Ru8MA0GCSqGSIb3DQEBCwUAA4IBAQBLQNvAUKr+yAzv95ZU + # RUm7lgAJQayzE4aGKAczymvmdLm6AC2upArT9fHxD4q/c2dKg8dEe3jgr25sbwMp + # jjM5RcOO5LlXbKr8EpbsU8Yt5CRsuZRj+9xTaGdWPoO4zzUhw8lo/s7awlOqzJCK + # 6fBdRoyV3XpYKBovHd7NADdBj+1EbddTKJd+82cEHhXXipa0095MJ6RMG3NzdvQX + # mcIfeg7jLQitChws/zyrVQ4PkX4268NXSb7hLi18YIvDQVETI53O9zJrlAGomecs + # Mx86OyXShkDOOyyGeMlhLxS67ttVb9+E7gUJTb0o2HLO02JQZR7rkpeDMdmztcpH + # WD9f + # -----END CERTIFICATE----- + +## +## Authelia Persistence Configuration. +## +## Useful in scenarios where you need persistent storage. +## Auth Provider Use Case: file; we recommend you use the ldap provider instead. +## Storage Provider Use Case: local; we recommend you use the mysql/mariadb or postgres provider instead. +## Configuration Use Case: when you want to manually configure the configuration entirely (set configMap.enabled = false). +## +persistence: + enabled: true + + annotations: {} + # annotations: + readOnly: false + + existingClaim: "authelia-config-nfs" + # existingClaim: my-claim-name + + storageClass: "" + # storageClass: "my-storage-class" + + accessModes: + - ReadWriteOnce + diff --git a/unused/authentik/ingress.yml b/unused/authentik/ingress.yml new file mode 100644 index 0000000..decab01 --- /dev/null +++ b/unused/authentik/ingress.yml @@ -0,0 +1,34 @@ + +apiVersion: traefik.containo.us/v1alpha1 +kind: IngressRoute +metadata: + name: authentik-ingress + namespace: authentik +spec: + entryPoints: + - websecure + routes: + - match: Host(`authentik.kluster.moll.re`) + kind: Rule + middlewares: + - name: authentik-websocket + services: + - name: authentik + port: 80 + tls: + certResolver: default-tls + + +--- +apiVersion: traefik.containo.us/v1alpha1 +kind: Middleware +metadata: + name: authentik-websocket + namespace: authentik +spec: + headers: + customRequestHeaders: + X-Forwarded-Proto: "https" + Upgrade: "websocket" + + diff --git a/unused/authentik/pvc.yaml b/unused/authentik/pvc.yaml new file mode 100644 index 0000000..e867991 --- /dev/null +++ b/unused/authentik/pvc.yaml @@ -0,0 +1,37 @@ +--- +apiVersion: v1 +kind: PersistentVolume +metadata: + namespace: authentik + name: authentik-postgres-nfs + labels: + directory: authentik +spec: + storageClassName: slow + capacity: + storage: "5Gi" + volumeMode: Filesystem + accessModes: + - ReadWriteOnce + nfs: + path: /export/kluster/authentik + server: 192.168.1.157 +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + namespace: authentik + name: authentik-postgres-nfs +spec: + storageClassName: slow + accessModes: + - ReadWriteOnce + resources: + requests: + storage: "5Gi" + selector: + matchLabels: + directory: authentik + + + diff --git a/unused/authentik/values.yaml b/unused/authentik/values.yaml new file mode 100644 index 0000000..90cf0f8 --- /dev/null +++ b/unused/authentik/values.yaml @@ -0,0 +1,172 @@ +# -- Server replicas +replicas: 1 +# -- Custom priority class for different treatment by the scheduler +priorityClassName: +# -- server securityContext +securityContext: {} + +worker: + # -- worker replicas + replicas: 1 + # -- Custom priority class for different treatment by the scheduler + priorityClassName: + # -- worker securityContext + securityContext: {} + +image: + repository: ghcr.io/goauthentik/server + tag: 2023.4.1 + pullPolicy: IfNotPresent + pullSecrets: [] + +# -- See https://github.com/k8s-at-home/library-charts/tree/main/charts/stable/common#values +initContainers: {} + +# -- See https://github.com/k8s-at-home/library-charts/tree/main/charts/stable/common#values +additionalContainers: {} + +authentik: + # -- Log level for server and worker + log_level: info + # -- Secret key used for cookie singing and unique user IDs, + # don't change this after the first install + secret_key: "K9F5uNx1gzsk3q5tnjwFabBYgjBJcAv0qM135QRgzL81hRg4" + # -- Path for the geoip database. If the file doesn't exist, GeoIP features are disabled. + geoip: /geoip/GeoLite2-City.mmdb + # -- Mode for the avatars. Defaults to gravatar. Possible options 'gravatar' and 'none' + avatars: gravatar + + outposts: + # -- Template used for managed outposts. The following placeholders can be used + # %(type)s - the type of the outpost + # %(version)s - version of your authentik install + # %(build_hash)s - only for beta versions, the build hash of the image + container_image_base: ghcr.io/goauthentik/%(type)s:%(version)s + error_reporting: + # -- This sends anonymous usage-data, stack traces on errors and + # performance data to sentry.beryju.org, and is fully opt-in + enabled: false + # -- This is a string that is sent to sentry with your error reports + environment: "k8s" + # -- Send PII (Personally identifiable information) data to sentry + send_pii: false + + + postgresql: + # -- set the postgresql hostname to talk to + # if unset and .Values.postgresql.enabled == true, will generate the default + # @default -- `{{ .Release.Name }}-postgresql` + host: 'postgres-postgresql.postgres' + # -- postgresql Database name + # @default -- `authentik` + name: "authentik" + # -- postgresql Username + # @default -- `authentik` + user: "authentik" + password: "authentik" + port: 5432 + + + redis: + # -- set the redis hostname to talk to + # @default -- `{{ .Release.Name }}-redis-master` + host: '{{ .Release.Name }}-redis-master' + password: "" + +# -- see configuration options at https://goauthentik.io/docs/installation/configuration/ +env: {} +# AUTHENTIK_VAR_NAME: VALUE + +envFrom: [] +# - configMapRef: +# name: special-config + +envValueFrom: {} +# AUTHENTIK_VAR_NAME: +# secretKeyRef: +# key: password +# name: my-secret + +service: + # -- Service that is created to access authentik + enabled: true + type: ClusterIP + port: 80 + name: http + protocol: TCP + labels: {} + annotations: {} + +volumes: [] + +volumeMounts: [] + +# -- affinity applied to the deployments +affinity: {} + +# -- nodeSelector applied to the deployments + +resources: + server: {} + worker: {} + +# WARNING! When initially deploying, authentik has to do a few DB migrations. This may cause it to die from probe +# failure, but will continue on reboot. You can disable this during deployment if this is not desired +livenessProbe: + # -- enables or disables the livenessProbe + enabled: true + httpGet: + # -- liveness probe url path + path: /-/health/live/ + port: http + initialDelaySeconds: 50 + periodSeconds: 10 + +readinessProbe: + enabled: true + httpGet: + path: /-/health/ready/ + port: http + initialDelaySeconds: 50 + periodSeconds: 10 + +serviceAccount: + # -- Service account is needed for managed outposts + create: true + +prometheus: + serviceMonitor: + create: false + interval: 30s + scrapeTimeout: 3s + rules: + create: false + +geoip: + # -- optional GeoIP, deploys a cronjob to download the maxmind database + enabled: false + # -- sign up under https://www.maxmind.com/en/geolite2/signup + accountId: "" + # -- sign up under https://www.maxmind.com/en/geolite2/signup + licenseKey: "" + editionIds: "GeoLite2-City" + image: maxmindinc/geoipupdate:v4.8 + # -- number of hours between update runs + updateInterval: 8 + +postgresql: + # -- enable the bundled bitnami postgresql chart + enabled: false + postgresqlUsername: "authentik" + postgresqlPassword: "authentik" + postgresqlDatabase: "authentik" + # persistence: + # enabled: true + # existingClaim: authentik-postgres-nfs +redis: + # -- enable the bundled bitnami redis chart + enabled: true + architecture: standalone + auth: + enabled: false + diff --git a/unused/backup/backup.pvc.yaml b/unused/backup/backup.pvc.yaml new file mode 100644 index 0000000..d0f70b4 --- /dev/null +++ b/unused/backup/backup.pvc.yaml @@ -0,0 +1,34 @@ +apiVersion: v1 +kind: PersistentVolume +metadata: + namespace: backup + name: backup-nfs-access + labels: + directory: backup +spec: + storageClassName: fast + volumeMode: Filesystem + accessModes: + - ReadOnlyMany + capacity: + storage: "5M" + + nfs: + path: /export/kluster + server: 192.168.1.157 +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + namespace: backup + name: backup-nfs-access +spec: + resources: + requests: + storage: "5M" + storageClassName: fast + accessModes: + - ReadOnlyMany + selector: + matchLabels: + directory: backup diff --git a/unused/backup/base/cronjob.yaml b/unused/backup/base/cronjob.yaml new file mode 100644 index 0000000..998f246 --- /dev/null +++ b/unused/backup/base/cronjob.yaml @@ -0,0 +1,64 @@ +apiVersion: batch/v1 +kind: CronJob +metadata: + name: restic-backblaze + +spec: + schedule: "0 2 * * *" + # at 2:00, every tuesday and saturday + successfulJobsHistoryLimit: 2 + failedJobsHistoryLimit: 2 + + jobTemplate: + spec: + template: + spec: + # nodeSelector: + # kubernetes.io/arch: arm64 + # TODO no arm64 nodes anymore + restartPolicy: Never + hostname: restic-k3s-pod + # used by restic to identify the host + containers: + - name: restic-base-container + image: restic/restic:latest + command: + - /bin/sh + - -c + # >- strips newlines + # RESTIC_ARGS Can be for instance: --verbose --dry-run + args: [] + + volumeMounts: + - mountPath: /data + name: backup-nfs-access + - mountPath: /credentials + name: restic-credentials + + env: + - name: RESTIC_REPOSITORY + valueFrom: + secretKeyRef: + name: restic-credentials + key: RESTIC_REPOSITORY + - name: B2_ACCOUNT_ID + valueFrom: + secretKeyRef: + name: restic-credentials + key: B2_ACCOUNT_ID + - name: B2_ACCOUNT_KEY + valueFrom: + secretKeyRef: + name: restic-credentials + key: B2_ACCOUNT_KEY + - name: RESTIC_PASSWORD_FILE + value: /credentials/restic-password + + volumes: + - name: backup-nfs-access + persistentVolumeClaim: + claimName: backup-nfs-access + - name: restic-credentials + secret: + secretName: restic-credentials + optional: false \ No newline at end of file diff --git a/unused/backup/base/kustomization.yaml b/unused/backup/base/kustomization.yaml new file mode 100644 index 0000000..b7c59b8 --- /dev/null +++ b/unused/backup/base/kustomization.yaml @@ -0,0 +1,5 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: +- ./cronjob.yaml +- ./restic-credentials.secret.yaml \ No newline at end of file diff --git a/unused/backup/overlays/applying.md b/unused/backup/overlays/applying.md new file mode 100644 index 0000000..e5c20c3 --- /dev/null +++ b/unused/backup/overlays/applying.md @@ -0,0 +1,8 @@ +``` +k kustomize backup/overlays/backup | k apply -f - +> secret/restic-credentials-backup created +> cronjob.batch/restic-backblaze-backup created +k kustomize backup/overlays/prune | k apply -f - +> secret/restic-credentials-prune created +> cronjob.batch/restic-backblaze-prune created +``` \ No newline at end of file diff --git a/unused/backup/overlays/backup/kustomization.yaml b/unused/backup/overlays/backup/kustomization.yaml new file mode 100644 index 0000000..903a955 --- /dev/null +++ b/unused/backup/overlays/backup/kustomization.yaml @@ -0,0 +1,16 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization + +namespace: backup +nameSuffix: -backup +resources: + - ../../base + # - ./restic-commands.yaml + + +# patch the cronjob args field: +patches: + - path: ./restic-commands.yaml + target: + kind: CronJob + \ No newline at end of file diff --git a/unused/backup/overlays/backup/restic-commands.yaml b/unused/backup/overlays/backup/restic-commands.yaml new file mode 100644 index 0000000..6b895ed --- /dev/null +++ b/unused/backup/overlays/backup/restic-commands.yaml @@ -0,0 +1,26 @@ +apiVersion: batch/v1 +kind: CronJob +metadata: + name: backup-patch +spec: + jobTemplate: + spec: + template: + spec: + containers: + - name: restic-base-container + args: + # >- strips newlines + # RESTIC_ARGS Can be for instance: --verbose --dry-run + # restic_reository is set in the secret + - >- + restic backup + -r $(RESTIC_REPOSITORY) + --verbose=2 + /data + --exclude=s3/ + # && + # restic + # -r $(RESTIC_REPOSITORY) + # list snapshots + # Add command to copy existing backups to here! \ No newline at end of file diff --git a/unused/backup/overlays/prune/kustomization.yaml b/unused/backup/overlays/prune/kustomization.yaml new file mode 100644 index 0000000..1713faf --- /dev/null +++ b/unused/backup/overlays/prune/kustomization.yaml @@ -0,0 +1,15 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization + +namespace: backup +nameSuffix: -prune +resources: + - ../../base + # - ./restic-commands.yaml + + +# patch the cronjob args field: +patches: + - path: ./restic-commands.yaml + target: + kind: CronJob diff --git a/unused/backup/overlays/prune/restic-commands.yaml b/unused/backup/overlays/prune/restic-commands.yaml new file mode 100644 index 0000000..8a085bd --- /dev/null +++ b/unused/backup/overlays/prune/restic-commands.yaml @@ -0,0 +1,23 @@ +apiVersion: batch/v1 +kind: CronJob +metadata: + name: prune-patch +spec: + schedule: "0 0 1/15 * *" + # at midnight, the first and 15. of every month + jobTemplate: + spec: + template: + spec: + containers: + - name: restic-base-container + args: + # >- strips newlines + # RESTIC_ARGS Can be for instance: --verbose --dry-run + # RESTIC_REPOSITORY is set in the secret + - >- + restic forget + -r $(RESTIC_REPOSITORY) + --verbose=2 + --keep-daily 7 --keep-weekly 5 + --prune diff --git a/unused/cert-manager/cluster-issuer.yaml b/unused/cert-manager/cluster-issuer.yaml new file mode 100644 index 0000000..e472462 --- /dev/null +++ b/unused/cert-manager/cluster-issuer.yaml @@ -0,0 +1,54 @@ +# apiVersion: v1 +# kind: Secret +# metadata: +# name: cloudflare-api-token-secret +# namespace: cert-manager +# type: Opaque +# stringData: +# api-token: + + +# --- + +apiVersion: cert-manager.io/v1 +kind: ClusterIssuer +metadata: + name: cloudflare-letsencrypt-staging +spec: + acme: + email: me@moll.re + server: https://acme-staging-v02.api.letsencrypt.org/directory + privateKeySecretRef: + # Secret resource that will be used to store the account's private key. + name: cloudflare-letsencrypt-issuer-account-key + solvers: + - dns01: + cloudflare: + email: mollator2@gmail.com + apiTokenSecretRef: + # Name of the secret created on the other resource + name: cloudflare-api-token-secret + key: api-token + +--- + +apiVersion: cert-manager.io/v1 +kind: ClusterIssuer +metadata: + name: cloudflare-letsencrypt-prod +spec: + acme: + email: me@moll.re + server: https://acme-v02.api.letsencrypt.org/directory + privateKeySecretRef: + # Secret resource that will be used to store the account's private key. + name: cloudflare-letsencrypt-issuer-account-key + solvers: + - dns01: + cloudflare: + email: mollator2@gmail.com + apiTokenSecretRef: + # Name of the secret created on the other resource + name: cloudflare-api-token-secret + key: api-token + diff --git a/unused/cert-manager/values.yaml b/unused/cert-manager/values.yaml new file mode 100644 index 0000000..06e2425 --- /dev/null +++ b/unused/cert-manager/values.yaml @@ -0,0 +1,494 @@ +# Default values for cert-manager. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. +global: + ## Reference to one or more secrets to be used when pulling images + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + imagePullSecrets: [] + # - name: "image-pull-secret" + + # Optional priority class to be used for the cert-manager pods + priorityClassName: "" + rbac: + create: true + + podSecurityPolicy: + enabled: false + useAppArmor: true + + # Set the verbosity of cert-manager. Range of 0 - 6 with 6 being the most verbose. + logLevel: 2 + + leaderElection: + # Override the namespace used to store the ConfigMap for leader election + namespace: "kube-system" + + # The duration that non-leader candidates will wait after observing a + # leadership renewal until attempting to acquire leadership of a led but + # unrenewed leader slot. This is effectively the maximum duration that a + # leader can be stopped before it is replaced by another candidate. + # leaseDuration: 60s + + # The interval between attempts by the acting master to renew a leadership + # slot before it stops leading. This must be less than or equal to the + # lease duration. + # renewDeadline: 40s + + # The duration the clients should wait between attempting acquisition and + # renewal of a leadership. + # retryPeriod: 15s + +installCRDs: false + +replicaCount: 1 + +strategy: {} + # type: RollingUpdate + # rollingUpdate: + # maxSurge: 0 + # maxUnavailable: 1 + +# Comma separated list of feature gates that should be enabled on the +# controller pod. +featureGates: "" + +image: + repository: quay.io/jetstack/cert-manager-controller + # You can manage a registry with + # registry: quay.io + # repository: jetstack/cert-manager-controller + + # Override the image tag to deploy by setting this variable. + # If no value is set, the chart's appVersion will be used. + # tag: canary + + # Setting a digest will override any tag + # digest: sha256:0e072dddd1f7f8fc8909a2ca6f65e76c5f0d2fcfb8be47935ae3457e8bbceb20 + pullPolicy: IfNotPresent + +# Override the namespace used to store DNS provider credentials etc. for ClusterIssuer +# resources. By default, the same namespace as cert-manager is deployed within is +# used. This namespace will not be automatically created by the Helm chart. +clusterResourceNamespace: "" + +serviceAccount: + # Specifies whether a service account should be created + create: true + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + # name: "" + # Optional additional annotations to add to the controller's ServiceAccount + # annotations: {} + # Automount API credentials for a Service Account. + automountServiceAccountToken: true + +# Optional additional arguments +extraArgs: [] + # Use this flag to set a namespace that cert-manager will use to store + # supporting resources required for each ClusterIssuer (default is kube-system) + # - --cluster-resource-namespace=kube-system + # When this flag is enabled, secrets will be automatically removed when the certificate resource is deleted + # - --enable-certificate-owner-ref=true + # Use this flag to enabled or disable arbitrary controllers, for example, disable the CertificiateRequests approver + # - --controllers=*,-certificaterequests-approver + +extraEnv: [] +# - name: SOME_VAR +# value: 'some value' + +resources: {} + # requests: + # cpu: 10m + # memory: 32Mi + +# Pod Security Context +# ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ +securityContext: + runAsNonRoot: true +# legacy securityContext parameter format: if enabled is set to true, only fsGroup and runAsUser are supported +# securityContext: +# enabled: false +# fsGroup: 1001 +# runAsUser: 1001 +# to support additional securityContext parameters, omit the `enabled` parameter and simply specify the parameters +# you want to set, e.g. +# securityContext: +# fsGroup: 1000 +# runAsUser: 1000 +# runAsNonRoot: true + +# Container Security Context to be set on the controller component container +# ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ +containerSecurityContext: {} + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + + +volumes: [] + +volumeMounts: [] + +# Optional additional annotations to add to the controller Deployment +# deploymentAnnotations: {} + +# Optional additional annotations to add to the controller Pods +# podAnnotations: {} + +podLabels: {} + +# Optional additional labels to add to the controller Service +# serviceLabels: {} + +# Optional additional annotations to add to the controller service +# serviceAnnotations: {} + +# Optional DNS settings, useful if you have a public and private DNS zone for +# the same domain on Route 53. What follows is an example of ensuring +# cert-manager can access an ingress or DNS TXT records at all times. +# NOTE: This requires Kubernetes 1.10 or `CustomPodDNS` feature gate enabled for +# the cluster to work. +# podDnsPolicy: "None" +# podDnsConfig: +# nameservers: +# - "1.1.1.1" +# - "8.8.8.8" + +nodeSelector: {} + +ingressShim: {} + # defaultIssuerName: "" + # defaultIssuerKind: "" + # defaultIssuerGroup: "" + +prometheus: + enabled: true + servicemonitor: + enabled: false + prometheusInstance: default + targetPort: 9402 + path: /metrics + interval: 60s + scrapeTimeout: 30s + labels: {} + +# Use these variables to configure the HTTP_PROXY environment variables +# http_proxy: "http://proxy:8080" +# https_proxy: "https://proxy:8080" +# no_proxy: 127.0.0.1,localhost + +# expects input structure as per specification https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.11/#affinity-v1-core +# for example: +# affinity: +# nodeAffinity: +# requiredDuringSchedulingIgnoredDuringExecution: +# nodeSelectorTerms: +# - matchExpressions: +# - key: foo.bar.com/role +# operator: In +# values: +# - master +affinity: {} + +# expects input structure as per specification https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.11/#toleration-v1-core +# for example: +# tolerations: +# - key: foo.bar.com/role +# operator: Equal +# value: master +# effect: NoSchedule +tolerations: [] + +webhook: + replicaCount: 1 + timeoutSeconds: 10 + + strategy: {} + # type: RollingUpdate + # rollingUpdate: + # maxSurge: 0 + # maxUnavailable: 1 + + # Pod Security Context to be set on the webhook component Pod + # ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + securityContext: + runAsNonRoot: true + + # Container Security Context to be set on the webhook component container + # ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + containerSecurityContext: {} + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + + # Optional additional annotations to add to the webhook Deployment + # deploymentAnnotations: {} + + # Optional additional annotations to add to the webhook Pods + # podAnnotations: {} + + # Optional additional annotations to add to the webhook MutatingWebhookConfiguration + # mutatingWebhookConfigurationAnnotations: {} + + # Optional additional annotations to add to the webhook ValidatingWebhookConfiguration + # validatingWebhookConfigurationAnnotations: {} + + # Optional additional annotations to add to the webhook service + # serviceAnnotations: {} + + # Optional additional arguments for webhook + extraArgs: [] + + resources: {} + # requests: + # cpu: 10m + # memory: 32Mi + + ## Liveness and readiness probe values + ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes + ## + livenessProbe: + failureThreshold: 3 + initialDelaySeconds: 60 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + readinessProbe: + failureThreshold: 3 + initialDelaySeconds: 5 + periodSeconds: 5 + successThreshold: 1 + timeoutSeconds: 1 + + nodeSelector: {} + + affinity: {} + + tolerations: [] + + # Optional additional labels to add to the Webhook Pods + podLabels: {} + + # Optional additional labels to add to the Webhook Service + serviceLabels: {} + + image: + repository: quay.io/jetstack/cert-manager-webhook + # You can manage a registry with + # registry: quay.io + # repository: jetstack/cert-manager-webhook + + # Override the image tag to deploy by setting this variable. + # If no value is set, the chart's appVersion will be used. + # tag: canary + + # Setting a digest will override any tag + # digest: sha256:0e072dddd1f7f8fc8909a2ca6f65e76c5f0d2fcfb8be47935ae3457e8bbceb20 + + pullPolicy: IfNotPresent + + serviceAccount: + # Specifies whether a service account should be created + create: true + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + # name: "" + # Optional additional annotations to add to the controller's ServiceAccount + # annotations: {} + # Automount API credentials for a Service Account. + automountServiceAccountToken: true + + # The port that the webhook should listen on for requests. + # In GKE private clusters, by default kubernetes apiservers are allowed to + # talk to the cluster nodes only on 443 and 10250. so configuring + # securePort: 10250, will work out of the box without needing to add firewall + # rules or requiring NET_BIND_SERVICE capabilities to bind port numbers <1000 + securePort: 10250 + + # Specifies if the webhook should be started in hostNetwork mode. + # + # Required for use in some managed kubernetes clusters (such as AWS EKS) with custom + # CNI (such as calico), because control-plane managed by AWS cannot communicate + # with pods' IP CIDR and admission webhooks are not working + # + # Since the default port for the webhook conflicts with kubelet on the host + # network, `webhook.securePort` should be changed to an available port if + # running in hostNetwork mode. + hostNetwork: false + + # Specifies how the service should be handled. Useful if you want to expose the + # webhook to outside of the cluster. In some cases, the control plane cannot + # reach internal services. + serviceType: ClusterIP + # loadBalancerIP: + + # Overrides the mutating webhook and validating webhook so they reach the webhook + # service using the `url` field instead of a service. + url: {} + # host: + +cainjector: + enabled: true + replicaCount: 1 + + strategy: {} + # type: RollingUpdate + # rollingUpdate: + # maxSurge: 0 + # maxUnavailable: 1 + + # Pod Security Context to be set on the cainjector component Pod + # ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + securityContext: + runAsNonRoot: true + + # Container Security Context to be set on the cainjector component container + # ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + containerSecurityContext: {} + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + + + # Optional additional annotations to add to the cainjector Deployment + # deploymentAnnotations: {} + + # Optional additional annotations to add to the cainjector Pods + # podAnnotations: {} + + # Optional additional arguments for cainjector + extraArgs: [] + + resources: {} + # requests: + # cpu: 10m + # memory: 32Mi + + nodeSelector: {} + + affinity: {} + + tolerations: [] + + # Optional additional labels to add to the CA Injector Pods + podLabels: {} + + image: + repository: quay.io/jetstack/cert-manager-cainjector + # You can manage a registry with + # registry: quay.io + # repository: jetstack/cert-manager-cainjector + + # Override the image tag to deploy by setting this variable. + # If no value is set, the chart's appVersion will be used. + # tag: canary + + # Setting a digest will override any tag + # digest: sha256:0e072dddd1f7f8fc8909a2ca6f65e76c5f0d2fcfb8be47935ae3457e8bbceb20 + + pullPolicy: IfNotPresent + + serviceAccount: + # Specifies whether a service account should be created + create: true + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + # name: "" + # Optional additional annotations to add to the controller's ServiceAccount + # annotations: {} + # Automount API credentials for a Service Account. + automountServiceAccountToken: true + +# This startupapicheck is a Helm post-install hook that waits for the webhook +# endpoints to become available. +# The check is implemented using a Kubernetes Job- if you are injecting mesh +# sidecar proxies into cert-manager pods, you probably want to ensure that they +# are not injected into this Job's pod. Otherwise the installation may time out +# due to the Job never being completed because the sidecar proxy does not exit. +# See https://github.com/jetstack/cert-manager/pull/4414 for context. +startupapicheck: + enabled: true + + # Pod Security Context to be set on the startupapicheck component Pod + # ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + securityContext: + runAsNonRoot: true + + # Timeout for 'kubectl check api' command + timeout: 1m + + # Job backoffLimit + backoffLimit: 4 + + # Optional additional annotations to add to the startupapicheck Job + jobAnnotations: + helm.sh/hook: post-install + helm.sh/hook-weight: "1" + helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded + + # Optional additional annotations to add to the startupapicheck Pods + # podAnnotations: {} + + # Optional additional arguments for startupapicheck + extraArgs: [] + + resources: {} + # requests: + # cpu: 10m + # memory: 32Mi + + nodeSelector: {} + + affinity: {} + + tolerations: [] + + # Optional additional labels to add to the startupapicheck Pods + podLabels: {} + + image: + repository: quay.io/jetstack/cert-manager-ctl + # You can manage a registry with + # registry: quay.io + # repository: jetstack/cert-manager-ctl + + # Override the image tag to deploy by setting this variable. + # If no value is set, the chart's appVersion will be used. + # tag: canary + + # Setting a digest will override any tag + # digest: sha256:0e072dddd1f7f8fc8909a2ca6f65e76c5f0d2fcfb8be47935ae3457e8bbceb20 + + pullPolicy: IfNotPresent + + rbac: + # annotations for the startup API Check job RBAC and PSP resources + annotations: + helm.sh/hook: post-install + helm.sh/hook-weight: "-5" + helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded + + serviceAccount: + # Specifies whether a service account should be created + create: true + + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + # name: "" + + # Optional additional annotations to add to the Job's ServiceAccount + annotations: + helm.sh/hook: post-install + helm.sh/hook-weight: "-5" + helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded + + # Automount API credentials for a Service Account. + automountServiceAccountToken: true + diff --git a/unused/crowdsec.ingress.yaml b/unused/crowdsec.ingress.yaml new file mode 100644 index 0000000..099d312 --- /dev/null +++ b/unused/crowdsec.ingress.yaml @@ -0,0 +1,26 @@ +kind: Ingress +apiVersion: networking.k8s.io/v1 +metadata: + namespace: crowdsec + name: crowdsec-ingress + annotations: + kubernetes.io/ingress.class: nginx + cert-manager.io/cluster-issuer: cloudflare-letsencrypt-prod + + +spec: + tls: + - hosts: + - crowdsec.kluster.moll.re + secretName: cloudflare-letsencrypt-issuer-account-key + rules: + - host: crowdsec.kluster.moll.re + http: + paths: + - backend: + service: + name: crowdsec-service + port: + number: 3000 + path: / + pathType: Prefix diff --git a/unused/crowdsec.nginx-bouncer.yaml b/unused/crowdsec.nginx-bouncer.yaml new file mode 100644 index 0000000..14c6aab --- /dev/null +++ b/unused/crowdsec.nginx-bouncer.yaml @@ -0,0 +1,30 @@ +controller: + extraVolumes: + - name: crowdsec-bouncer-plugin + emptyDir: {} + extraInitContainers: + - name: init-clone-crowdsec-bouncer + image: crowdsecurity/lua-bouncer-plugin + imagePullPolicy: IfNotPresent + env: + - name: API_URL + value: "http://crowdsec-service.crowdsec.svc.cluster.local:8080" # crowdsec lapi service-name + - name: API_KEY + value: "6cc4c975f123f4f24174e2d544e81282" # generated with `cscli bouncers add -n + - name: BOUNCER_CONFIG + value: "/crowdsec/crowdsec-bouncer.conf" + - name: BAN_TEMPLATE_PATH + value: /etc/nginx/lua/plugins/crowdsec/templates/ban.html + - name: CAPTCHA_TEMPLATE_PATH + value: /etc/nginx/lua/plugins/crowdsec/templates/captcha.html + command: ['sh', '-c', "sh /docker_start.sh; mkdir -p /lua_plugins/crowdsec/; cp -R /crowdsec/* /lua_plugins/crowdsec/"] + volumeMounts: + - name: crowdsec-bouncer-plugin + mountPath: /lua_plugins + extraVolumeMounts: + - name: crowdsec-bouncer-plugin + mountPath: /etc/nginx/lua/plugins/crowdsec + subPath: crowdsec + config: + plugins: "crowdsec" + lua-shared-dicts: "crowdsec_cache: 50m" diff --git a/unused/crowdsec.values.yaml b/unused/crowdsec.values.yaml new file mode 100644 index 0000000..f619088 --- /dev/null +++ b/unused/crowdsec.values.yaml @@ -0,0 +1,178 @@ +# Default values for crowdsec-chart. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +# -- for raw logs format: json or cri (docker|containerd) +container_runtime: containerd + +image: + # -- docker image repository name + repository: crowdsecurity/crowdsec + # -- pullPolicy + pullPolicy: IfNotPresent + # -- docker image tag + tag: "" + +# If you want to specify secrets that will be used for all your crowdsec-agents +# secrets can be provided be env variables +secrets: + # -- agent username (default is generated randomly) + username: "" + # -- agent password (default is generated randomly) + password: "" + +# lapi will deploy pod with crowdsec lapi and dashboard as deployment +lapi: + # -- environment variables from crowdsecurity/crowdsec docker image + env: [] + # by default disable the agent because it only the local API. + #- name: DISABLE_AGENT + # value: "true" + dashboard: + # -- Enable Metabase Dashboard (by default disabled) + enabled: true + image: + # -- docker image repository name + repository: loancrate/metabase + # -- pullPolicy + pullPolicy: IfNotPresent + # -- docker image tag + tag: "latest" + # -- Metabase SQLite static DB containing Dashboards + assetURL: https://crowdsec-statics-assets.s3-eu-west-1.amazonaws.com/metabase_sqlite.zip + + # -- Enable ingress object + ingress: + enabled: false + annotations: + # metabase only supports http so we need this annotation + nginx.ingress.kubernetes.io/backend-protocol: "HTTP" + # labels: {} + ingressClassName: "nginx" + host: "" # metabase.example.com + # tls: {} + + resources: + limits: + memory: 100Mi + requests: + cpu: 150m + memory: 100Mi + # -- Enable persistent volumes + persistentVolume: + # -- Persistent volume for data folder. Stores e.g. registered bouncer api keys + data: + enabled: true + accessModes: + - ReadWriteOnce + storageClassName: "" + size: 1Gi + # -- Persistent volume for config folder. Stores e.g. online api credentials + config: + enabled: true + accessModes: + - ReadWriteOnce + storageClassName: "" + size: 100Mi + + # -- nodeSelector for lapi + nodeSelector: {} + # -- tolerations for lapi + tolerations: {} + + # -- Enable service monitoring (exposes "metrics" port "6060" for Prometheus) + metrics: + enabled: false + # -- Creates a ServiceMonitor so Prometheus will monitor this service + # -- Prometheus needs to be configured to watch on all namespaces for ServiceMonitors + # -- See the documentation: https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack#prometheusioscrape + # -- See also: https://github.com/prometheus-community/helm-charts/issues/106#issuecomment-700847774 + serviceMonitor: + enabled: false + + strategy: + type: RollingUpdate + +# agent will deploy pod on every node as daemonSet to read wanted pods logs +agent: + acquisition: + # -- Specify each pod you want to process it logs (namespace, podName and program) + - namespace: kube-system + # -- to select pod logs to process + podName: nginx-nginx-ingress-* + # -- program name related to specific parser you will use (see https://hub.crowdsec.net/author/crowdsecurity/configurations/docker-logs) + program: nginx + resources: + limits: + memory: 100Mi + requests: + cpu: 150m + memory: 100Mi + # -- Enable persistent volumes + persistentVolume: + # -- Persistent volume for config folder. Stores local config (parsers, scenarios etc.) + config: + enabled: true + accessModes: + - ReadWriteOnce + storageClassName: "" + size: 100Mi + # -- environment variables from crowdsecurity/crowdsec docker image + env: [] + # by default we the docker-logs parser to be able to parse docker logs in k8s + # by default we disable local API on the agent pod + # - name: SCENARIOS + # value: "scenario/name otherScenario/name" + # - name: PARSERS + # value: "parser/name otherParser/name" + # - name: POSTOVERFLOWS + # value: "postoverflow/name otherPostoverflow/name" + # - name: CONFIG_FILE + # value: "/etc/crowdsec/config.yaml" + # - name: DSN + # value: "file:///var/log/toto.log" + # - name: TYPE + # value: "Labels.type_for_time-machine_mode" + # - name: TEST_MODE + # value: "false" + # - name: TZ + # value: "" + # - name: DISABLE_AGENT + # value: "false" + # - name: DISABLE_ONLINE_API + # value: "false" + # - name: LEVEL_TRACE + # value: "false" + # - name: LEVEL_DEBUG + # value: "false" + # - name: LEVEL_INFO + # value: "false" + + # -- nodeSelector for agent + nodeSelector: {} + # -- tolerations for agent + tolerations: {} + + # -- Enable service monitoring (exposes "metrics" port "6060" for Prometheus) + metrics: + enabled: false + # -- Creates a ServiceMonitor so Prometheus will monitor this service + # -- Prometheus needs to be configured to watch on all namespaces for ServiceMonitors + # -- See the documentation: https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack#prometheusioscrape + # -- See also: https://github.com/prometheus-community/helm-charts/issues/106#issuecomment-700847774 + serviceMonitor: + enabled: false + + # -- wait-for-lapi init container + wait_for_lapi: + image: + # -- docker image repository name + repository: busybox + # -- pullPolicy + pullPolicy: IfNotPresent + # -- docker image tag + tag: "1.28" + +#service: {} + + diff --git a/unused/ddns/deployment.yaml b/unused/ddns/deployment.yaml new file mode 100644 index 0000000..81187c0 --- /dev/null +++ b/unused/ddns/deployment.yaml @@ -0,0 +1,34 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + namespace: ddns + name: cloudflare-ddns +spec: + selector: + matchLabels: + app: cloudflare-ddns + + template: + metadata: + labels: + app: cloudflare-ddns + + spec: + containers: + - name: cloudflare-ddns + image: timothyjmiller/cloudflare-ddns:latest + resources: + limits: + memory: "32Mi" + cpu: "50m" + env: + - name: CONFIG_PATH + value: "/etc/cloudflare-ddns/" + volumeMounts: + - mountPath: "/etc/cloudflare-ddns" + name: config-cloudflare-ddns + readOnly: true + volumes: + - name: config-cloudflare-ddns + secret: + secretName: config-cloudflare-ddns diff --git a/unused/focalboard/ingress.yaml b/unused/focalboard/ingress.yaml new file mode 100644 index 0000000..c468165 --- /dev/null +++ b/unused/focalboard/ingress.yaml @@ -0,0 +1,32 @@ +apiVersion: traefik.containo.us/v1alpha1 +kind: IngressRoute +metadata: + name: focalboard-ingress + namespace: focalboard +spec: + entryPoints: + - websecure + routes: + - match: Host(`focalboard.kluster.moll.re`) + middlewares: + - name: focalboard-websocket + kind: Rule + services: + - name: focalboard + port: 8000 + tls: + certResolver: default-tls + +--- +apiVersion: traefik.containo.us/v1alpha1 +kind: Middleware +metadata: + name: focalboard-websocket + namespace: focalboard +spec: + headers: + customRequestHeaders: + X-Forwarded-Proto: "https" + Upgrade: "websocket" + + diff --git a/unused/focalboard/pvc.yaml b/unused/focalboard/pvc.yaml new file mode 100644 index 0000000..64b0d62 --- /dev/null +++ b/unused/focalboard/pvc.yaml @@ -0,0 +1,37 @@ +--- +apiVersion: v1 +kind: PersistentVolume +metadata: + namespace: focalboard + name: focalboard-nfs + labels: + directory: focalboard +spec: + storageClassName: fast + capacity: + storage: "5Gi" + volumeMode: Filesystem + accessModes: + - ReadWriteOnce + nfs: + path: /focalboard + server: 10.43.239.43 # assigned to nfs-server service. Won't change as long as service is not redeployed +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + namespace: focalboard + name: focalboard-nfs +spec: + storageClassName: fast + accessModes: + - ReadWriteOnce + resources: + requests: + storage: "5Gi" + selector: + matchLabels: + directory: focalboard + + + diff --git a/unused/focalboard/values.yaml b/unused/focalboard/values.yaml new file mode 100644 index 0000000..9a9727d --- /dev/null +++ b/unused/focalboard/values.yaml @@ -0,0 +1,63 @@ +# +# IMPORTANT NOTE +# +# This chart inherits from our common library chart. You can check the default values/options here: +# https://github.com/k8s-at-home/library-charts/tree/main/charts/stable/common/values.yaml +# + +image: + # -- image repository + repository: flyskype2021/focalboard + # -- image pull policy + pullPolicy: IfNotPresent + # -- image tag + tag: latest + + +enableServiceLinks: false + +# -- environment variables. +# @default -- See below +env: {} + +# See the Administrator's Guide for config reference: https://www.focalboard.com/guide/admin/ +config: | + { + "serverRoot": "https://focalboard.kluster.moll.re", + "port": 8000, + "dbtype": "sqlite3", + "dbconfig": "/data/focalboard.db", + "postgres_dbconfig": "dbname=focalboard sslmode=disable", + "useSSL": false, + "webpath": "./pack", + "filespath": "/data/files", + "telemetry": false, + "session_expire_time": 2592000, + "session_refresh_time": 18000, + "localOnly": false, + "enableLocalMode": true, + "localModeSocketLocation": "/var/tmp/focalboard_local.socket" + } + + +# -- Configures service settings for the chart. +# @default -- See values.yaml +service: + main: + ports: + http: + port: 8000 + +ingress: + # -- Enable and configure ingress settings for the chart under this key. + # @default -- See values.yaml + main: + enabled: false + +# -- Configure persistence settings for the chart under this key. +# @default -- See values.yaml +persistence: + data: + enabled: true + existingClaim: focalboard-nfs + diff --git a/unused/freshrss/freshrss.values.yaml b/unused/freshrss/freshrss.values.yaml new file mode 100644 index 0000000..70fc9a7 --- /dev/null +++ b/unused/freshrss/freshrss.values.yaml @@ -0,0 +1,47 @@ +# +# IMPORTANT NOTE +# +# This chart inherits from our common library chart. You can check the default values/options here: +# https://github.com/k8s-at-home/library-charts/tree/main/charts/stable/common/values.yaml +# + +image: + # -- image repository + repository: linuxserver/freshrss + # -- image pull policy + pullPolicy: IfNotPresent + # -- image tag + tag: version-1.18.1 + +# -- environment variables. See more environment variables in the [freshrss documentation](https://github.com/linuxserver/docker-freshrss#parameters). +# @default -- See below +env: + # -- Set the container timezone + TZ: "Europe/Berlin" + # -- Set the container user id + PUID: "1001" + # -- Set the container group id + PGID: "1001" + +# -- Configures service settings for the chart. +# @default -- See values.yaml +service: + main: + ports: + http: + port: 80 + +ingress: + # -- Enable and configure ingress settings for the chart under this key. + # @default -- See values.yaml + main: + enabled: false + +# -- Configure persistence settings for the chart under this key. +# @default -- See values.yaml +persistence: + config: + enabled: true + useExisting: true + name: freshrss-nfs + diff --git a/unused/freshrss/ingress.yaml b/unused/freshrss/ingress.yaml new file mode 100644 index 0000000..88f3394 --- /dev/null +++ b/unused/freshrss/ingress.yaml @@ -0,0 +1,24 @@ +kind: Ingress +apiVersion: networking.k8s.io/v1 +metadata: + namespace: freshrss + name: freshrss-ingress + annotations: + kubernetes.io/ingress.class: nginx + cert-manager.io/cluster-issuer: cloudflare-letsencrypt-prod +spec: + tls: + - hosts: + - rss.kluster.moll.re + secretName: cloudflare-letsencrypt-issuer-account-key + rules: + - host: rss.kluster.moll.re + http: + paths: + - backend: + service: + name: freshrss + port: + number: 80 + path: / + pathType: Prefix \ No newline at end of file diff --git a/unused/freshrss/pvc.yaml b/unused/freshrss/pvc.yaml new file mode 100644 index 0000000..ed00d8f --- /dev/null +++ b/unused/freshrss/pvc.yaml @@ -0,0 +1,37 @@ +--- +apiVersion: v1 +kind: PersistentVolume +metadata: + namespace: freshrss + name: freshrss-nfs + labels: + directory: freshrss +spec: + storageClassName: slow + capacity: + storage: "1Gi" + volumeMode: Filesystem + accessModes: + - ReadWriteOnce + nfs: + path: /freshrss + server: 10.43.239.43 # assigned to nfs-server service. Won't change as long as service is not redeployed +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + namespace: freshrss + name: freshrss-nfs +spec: + storageClassName: slow + accessModes: + - ReadWriteOnce + resources: + requests: + storage: "1Gi" + selector: + matchLabels: + directory: freshrss + + + diff --git a/unused/grsync.cronjob.yaml b/unused/grsync.cronjob.yaml new file mode 100644 index 0000000..7ca482f --- /dev/null +++ b/unused/grsync.cronjob.yaml @@ -0,0 +1,54 @@ +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: gcloud-backup + namespace: backup + +spec: + schedule: "0 2 15 * *" + # at 2:00, the 1. and 15. of every month + successfulJobsHistoryLimit: 2 + failedJobsHistoryLimit: 2 + + jobTemplate: + spec: + template: + spec: + restartPolicy: Never + containers: + - name: gcloud-backup + image: shirakiya/gcloud-sdk:latest + command: ["/bin/bash", "-c", "--"] + args: + - | + ln -s /config/.boto /root/.boto && + gsutil -m rsync -x "^(jellyfin|config|webtop|other root folder)/.*$" -U -r -e -d /data gs://kluster-backup + # command: + # -m multithreaded + # -U skip unsupported objects + # -e don't follow symlinks + # -r recursively follow folder structure + # -d deletes files from dst if they are not in src anymore + # -n dry runs + # This command runs with the knowledge the gs-bucket is set up with versioning. Rsync therefore serves as an incremental backup whose individual stages can be recovered + volumeMounts: + - mountPath: /data + name: backup-nfs-access + - mountPath: /config + name: gcloud-credentials + # entry .boto in the secret is mounted as /root/.boto + + volumes: + - name: backup-nfs-access + persistentVolumeClaim: + claimName: backup-nfs-access + - name: gcloud-credentials + secret: + secretName: gcloud-credentials + optional: false + + + + + diff --git a/unused/ingress-nginx/values.yaml b/unused/ingress-nginx/values.yaml new file mode 100644 index 0000000..de70dbb --- /dev/null +++ b/unused/ingress-nginx/values.yaml @@ -0,0 +1,749 @@ +## nginx configuration +## Ref: https://github.com/kubernetes/ingress-nginx/blob/main/docs/user-guide/nginx-configuration/index.md +## + +## Overrides for generated resource names +# See templates/_helpers.tpl +# nameOverride: +# fullnameOverride: + +## Labels to apply to all resources +## +commonLabels: {} +# scmhash: abc123 +# myLabel: aakkmd + +controller: + name: controller + image: + ## Keep false as default for now! + chroot: false + registry: registry.k8s.io + image: ingress-nginx/controller + ## for backwards compatibility consider setting the full image url via the repository value below + ## use *either* current default registry/image or repository format or installing chart by providing the values.yaml will fail + ## repository: + tag: "v1.3.0" + digest: sha256:d1707ca76d3b044ab8a28277a2466a02100ee9f58a86af1535a3edf9323ea1b5 + digestChroot: sha256:0fcb91216a22aae43b374fc2e6a03b8afe9e8c78cbf07a09d75636dc4ea3c191 + pullPolicy: IfNotPresent + # www-data -> uid 101 + runAsUser: 101 + allowPrivilegeEscalation: true + + # -- Use an existing PSP instead of creating one + existingPsp: "" + + # -- Configures the controller container name + containerName: controller + + # -- Configures the ports that the nginx-controller listens on + containerPort: + http: 80 + https: 443 + + # -- Will add custom configuration options to Nginx https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/configmap/ + config: {} + + # -- Annotations to be added to the controller config configuration configmap. + configAnnotations: {} + + # -- Will add custom headers before sending traffic to backends according to https://github.com/kubernetes/ingress-nginx/tree/main/docs/examples/customization/custom-headers + proxySetHeaders: {} + + # -- Will add custom headers before sending response traffic to the client according to: https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/configmap/#add-headers + addHeaders: {} + + # -- Optionally customize the pod dnsConfig. + dnsConfig: {} + + # -- Optionally customize the pod hostname. + hostname: {} + + # -- Optionally change this to ClusterFirstWithHostNet in case you have 'hostNetwork: true'. + # By default, while using host network, name resolution uses the host's DNS. If you wish nginx-controller + # to keep resolving names inside the k8s network, use ClusterFirstWithHostNet. + dnsPolicy: ClusterFirst + + # -- Bare-metal considerations via the host network https://kubernetes.github.io/ingress-nginx/deploy/baremetal/#via-the-host-network + # Ingress status was blank because there is no Service exposing the NGINX Ingress controller in a configuration using the host network, the default --publish-service flag used in standard cloud setups does not apply + reportNodeInternalIp: false + + # -- Process Ingress objects without ingressClass annotation/ingressClassName field + # Overrides value for --watch-ingress-without-class flag of the controller binary + # Defaults to false + watchIngressWithoutClass: false + + # -- Process IngressClass per name (additionally as per spec.controller). + ingressClassByName: false + + # -- This configuration defines if Ingress Controller should allow users to set + # their own *-snippet annotations, otherwise this is forbidden / dropped + # when users add those annotations. + # Global snippets in ConfigMap are still respected + allowSnippetAnnotations: true + + # -- Required for use with CNI based kubernetes installations (such as ones set up by kubeadm), + # since CNI and hostport don't mix yet. Can be deprecated once https://github.com/kubernetes/kubernetes/issues/23920 + # is merged + hostNetwork: false + + ## Use host ports 80 and 443 + ## Disabled by default + hostPort: + # -- Enable 'hostPort' or not + enabled: false + ports: + # -- 'hostPort' http port + http: 80 + # -- 'hostPort' https port + https: 443 + + # -- Election ID to use for status update + electionID: ingress-controller-leader + + ## This section refers to the creation of the IngressClass resource + ## IngressClass resources are supported since k8s >= 1.18 and required since k8s >= 1.19 + ingressClassResource: + # -- Name of the ingressClass + name: nginx-new + # -- Is this ingressClass enabled or not + enabled: true + # -- Is this the default ingressClass for the cluster + default: false + # -- Controller-value of the controller that is processing this ingressClass + controllerValue: "k8s.io/ingress-nginx" + + # -- Parameters is a link to a custom resource containing additional + # configuration for the controller. This is optional if the controller + # does not require extra parameters. + parameters: {} + + # -- For backwards compatibility with ingress.class annotation, use ingressClass. + # Algorithm is as follows, first ingressClassName is considered, if not present, controller looks for ingress.class annotation + ingressClass: nginx + + # -- Labels to add to the pod container metadata + podLabels: {} + # key: value + + # -- Security Context policies for controller pods + podSecurityContext: {} + + # -- See https://kubernetes.io/docs/tasks/administer-cluster/sysctl-cluster/ for notes on enabling and using sysctls + sysctls: {} + # sysctls: + # "net.core.somaxconn": "8192" + + # -- Allows customization of the source of the IP address or FQDN to report + # in the ingress status field. By default, it reads the information provided + # by the service. If disable, the status field reports the IP address of the + # node or nodes where an ingress controller pod is running. + publishService: + # -- Enable 'publishService' or not + enabled: true + # -- Allows overriding of the publish service to bind to + # Must be / + pathOverride: "" + + # Limit the scope of the controller to a specific namespace + scope: + # -- Enable 'scope' or not + enabled: false + # -- Namespace to limit the controller to; defaults to $(POD_NAMESPACE) + namespace: "" + # -- When scope.enabled == false, instead of watching all namespaces, we watching namespaces whose labels + # only match with namespaceSelector. Format like foo=bar. Defaults to empty, means watching all namespaces. + namespaceSelector: "" + + # -- Allows customization of the configmap / nginx-configmap namespace; defaults to $(POD_NAMESPACE) + configMapNamespace: "" + + tcp: + # -- Allows customization of the tcp-services-configmap; defaults to $(POD_NAMESPACE) + configMapNamespace: "" + # -- Annotations to be added to the tcp config configmap + annotations: {} + + udp: + # -- Allows customization of the udp-services-configmap; defaults to $(POD_NAMESPACE) + configMapNamespace: "" + # -- Annotations to be added to the udp config configmap + annotations: {} + + # -- Maxmind license key to download GeoLite2 Databases. + ## https://blog.maxmind.com/2019/12/18/significant-changes-to-accessing-and-using-geolite2-databases + maxmindLicenseKey: "" + + # -- Additional command line arguments to pass to nginx-ingress-controller + # E.g. to specify the default SSL certificate you can use + extraArgs: {} + ## extraArgs: + ## default-ssl-certificate: "/" + + # -- Additional environment variables to set + extraEnvs: [] + # extraEnvs: + # - name: FOO + # valueFrom: + # secretKeyRef: + # key: FOO + # name: secret-resource + + # -- Use a `DaemonSet` or `Deployment` + kind: Deployment + + # -- Annotations to be added to the controller Deployment or DaemonSet + ## + annotations: {} + # keel.sh/pollSchedule: "@every 60m" + + # -- Labels to be added to the controller Deployment or DaemonSet and other resources that do not have option to specify labels + ## + labels: {} + # keel.sh/policy: patch + # keel.sh/trigger: poll + + + # -- The update strategy to apply to the Deployment or DaemonSet + ## + updateStrategy: {} + # rollingUpdate: + # maxUnavailable: 1 + # type: RollingUpdate + + # -- `minReadySeconds` to avoid killing pods before we are ready + ## + minReadySeconds: 0 + + + # -- Node tolerations for server scheduling to nodes with taints + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + ## + tolerations: [] + # - key: "key" + # operator: "Equal|Exists" + # value: "value" + # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" + + # -- Affinity and anti-affinity rules for server scheduling to nodes + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## + affinity: {} + # # An example of preferred pod anti-affinity, weight is in the range 1-100 + # podAntiAffinity: + # preferredDuringSchedulingIgnoredDuringExecution: + # - weight: 100 + # podAffinityTerm: + # labelSelector: + # matchExpressions: + # - key: app.kubernetes.io/name + # operator: In + # values: + # - ingress-nginx + # - key: app.kubernetes.io/instance + # operator: In + # values: + # - ingress-nginx + # - key: app.kubernetes.io/component + # operator: In + # values: + # - controller + # topologyKey: kubernetes.io/hostname + + # # An example of required pod anti-affinity + # podAntiAffinity: + # requiredDuringSchedulingIgnoredDuringExecution: + # - labelSelector: + # matchExpressions: + # - key: app.kubernetes.io/name + # operator: In + # values: + # - ingress-nginx + # - key: app.kubernetes.io/instance + # operator: In + # values: + # - ingress-nginx + # - key: app.kubernetes.io/component + # operator: In + # values: + # - controller + # topologyKey: "kubernetes.io/hostname" + + # -- Topology spread constraints rely on node labels to identify the topology domain(s) that each Node is in. + ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ + ## + topologySpreadConstraints: [] + # - maxSkew: 1 + # topologyKey: topology.kubernetes.io/zone + # whenUnsatisfiable: DoNotSchedule + # labelSelector: + # matchLabels: + # app.kubernetes.io/instance: ingress-nginx-internal + + # -- `terminationGracePeriodSeconds` to avoid killing pods before we are ready + ## wait up to five minutes for the drain of connections + ## + terminationGracePeriodSeconds: 300 + + # -- Node labels for controller pod assignment + ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: + kubernetes.io/os: linux + + ## Liveness and readiness probe values + ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes + ## + ## startupProbe: + ## httpGet: + ## # should match container.healthCheckPath + ## path: "/healthz" + ## port: 10254 + ## scheme: HTTP + ## initialDelaySeconds: 5 + ## periodSeconds: 5 + ## timeoutSeconds: 2 + ## successThreshold: 1 + ## failureThreshold: 5 + livenessProbe: + httpGet: + # should match container.healthCheckPath + path: "/healthz" + port: 10254 + scheme: HTTP + initialDelaySeconds: 10 + periodSeconds: 10 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + httpGet: + # should match container.healthCheckPath + path: "/healthz" + port: 10254 + scheme: HTTP + initialDelaySeconds: 10 + periodSeconds: 10 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 3 + + + # -- Path of the health check endpoint. All requests received on the port defined by + # the healthz-port parameter are forwarded internally to this path. + healthCheckPath: "/healthz" + + # -- Address to bind the health check endpoint. + # It is better to set this option to the internal node address + # if the ingress nginx controller is running in the `hostNetwork: true` mode. + healthCheckHost: "" + + # -- Annotations to be added to controller pods + ## + podAnnotations: {} + + replicaCount: 1 + + minAvailable: 1 + + ## Define requests resources to avoid probe issues due to CPU utilization in busy nodes + ## ref: https://github.com/kubernetes/ingress-nginx/issues/4735#issuecomment-551204903 + ## Ideally, there should be no limits. + ## https://engineering.indeedblog.com/blog/2019/12/cpu-throttling-regression-fix/ + resources: + ## limits: + ## cpu: 100m + ## memory: 90Mi + requests: + cpu: 100m + memory: 90Mi + + # Mutually exclusive with keda autoscaling + autoscaling: + enabled: false + minReplicas: 1 + maxReplicas: 11 + targetCPUUtilizationPercentage: 50 + targetMemoryUtilizationPercentage: 50 + behavior: {} + # scaleDown: + # stabilizationWindowSeconds: 300 + # policies: + # - type: Pods + # value: 1 + # periodSeconds: 180 + # scaleUp: + # stabilizationWindowSeconds: 300 + # policies: + # - type: Pods + # value: 2 + # periodSeconds: 60 + + autoscalingTemplate: [] + # Custom or additional autoscaling metrics + # ref: https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/#support-for-custom-metrics + # - type: Pods + # pods: + # metric: + # name: nginx_ingress_controller_nginx_process_requests_total + # target: + # type: AverageValue + # averageValue: 10000m + + # Mutually exclusive with hpa autoscaling + + # -- Enable mimalloc as a drop-in replacement for malloc. + ## ref: https://github.com/microsoft/mimalloc + ## + enableMimalloc: true + + ## Override NGINX template + customTemplate: + configMapName: "" + configMapKey: "" + + service: + enabled: true + + # -- If enabled is adding an appProtocol option for Kubernetes service. An appProtocol field replacing annotations that were + # using for setting a backend protocol. Here is an example for AWS: service.beta.kubernetes.io/aws-load-balancer-backend-protocol: http + # It allows choosing the protocol for each backend specified in the Kubernetes service. + # See the following GitHub issue for more details about the purpose: https://github.com/kubernetes/kubernetes/issues/40244 + # Will be ignored for Kubernetes versions older than 1.20 + ## + appProtocol: true + + annotations: {} + labels: {} + # clusterIP: "" + + # -- List of IP addresses at which the controller services are available + ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips + ## + externalIPs: [] + + # -- Used by cloud providers to connect the resulting `LoadBalancer` to a pre-existing static IP according to https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer + loadBalancerSourceRanges: [] + + enableHttp: true + enableHttps: true + + ## Set external traffic policy to: "Local" to preserve source IP on providers supporting it. + ## Ref: https://kubernetes.io/docs/tutorials/services/source-ip/#source-ip-for-services-with-typeloadbalancer + # externalTrafficPolicy: "" + + ## Must be either "None" or "ClientIP" if set. Kubernetes will default to "None". + ## Ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + # sessionAffinity: "" + + ## Specifies the health check node port (numeric port number) for the service. If healthCheckNodePort isn’t specified, + ## the service controller allocates a port from your cluster’s NodePort range. + ## Ref: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + # healthCheckNodePort: 0 + + # -- Represents the dual-stack-ness requested or required by this Service. Possible values are + # SingleStack, PreferDualStack or RequireDualStack. + # The ipFamilies and clusterIPs fields depend on the value of this field. + ## Ref: https://kubernetes.io/docs/concepts/services-networking/dual-stack/ + ipFamilyPolicy: "SingleStack" + + # -- List of IP families (e.g. IPv4, IPv6) assigned to the service. This field is usually assigned automatically + # based on cluster configuration and the ipFamilyPolicy field. + ## Ref: https://kubernetes.io/docs/concepts/services-networking/dual-stack/ + ipFamilies: + - IPv4 + + ports: + http: 80 + https: 443 + + targetPorts: + http: http + https: https + + type: LoadBalancer + loadBalancerIP: "192.168.1.4" + + ## type: NodePort + ## nodePorts: + ## http: 32080 + ## https: 32443 + ## tcp: + ## 8080: 32808 + + + # shareProcessNamespace enables process namespace sharing within the pod. + # This can be used for example to signal log rotation using `kill -USR1` from a sidecar. + shareProcessNamespace: false + + + extraContainers: [] + # - name: my-sidecar + # image: nginx:latest + # - name: lemonldap-ng-controller + # image: lemonldapng/lemonldap-ng-controller:0.2.0 + # args: + # - /lemonldap-ng-controller + # - --alsologtostderr + # - --configmap=$(POD_NAMESPACE)/lemonldap-ng-configuration + # env: + # - name: POD_NAME + # valueFrom: + # fieldRef: + # fieldPath: metadata.name + # - name: POD_NAMESPACE + # valueFrom: + # fieldRef: + # fieldPath: metadata.namespace + # volumeMounts: + # - name: copy-portal-skins + # mountPath: /srv/var/lib/lemonldap-ng/portal/skins + + # -- Additional volumeMounts to the controller main container. + extraVolumeMounts: [] + + # - name: copy-portal-skins + # mountPath: /var/lib/lemonldap-ng/portal/skins + + # -- Additional volumes to the controller pod. + extraVolumes: [] + # - name: copy-portal-skins + # emptyDir: {} + + # -- Containers, which are run before the app containers are started. + extraInitContainers: [] + # - name: init-myservice + # image: busybox + # command: ['sh', '-c', 'until nslookup myservice; do echo waiting for myservice; sleep 2; done;'] + + extraModules: [] + ## Modules, which are mounted into the core nginx image + # - name: opentelemetry + # image: registry.k8s.io/ingress-nginx/opentelemetry:v20220801-g00ee51f09@sha256:482562feba02ad178411efc284f8eb803a185e3ea5588b6111ccbc20b816b427 + # + # The image must contain a `/usr/local/bin/init_module.sh` executable, which + # will be executed as initContainers, to move its config files within the + # mounted volume. + + admissionWebhooks: + annotations: {} + # ignore-check.kube-linter.io/no-read-only-rootfs: "This deployment needs write access to root filesystem". + + ## Additional annotations to the admission webhooks. + ## These annotations will be added to the ValidatingWebhookConfiguration and + ## the Jobs Spec of the admission webhooks. + enabled: true + # -- Additional environment variables to set + extraEnvs: [] + # extraEnvs: + # - name: FOO + # valueFrom: + # secretKeyRef: + # key: FOO + # name: secret-resource + # -- Admission Webhook failure policy to use + failurePolicy: Fail + # timeoutSeconds: 10 + port: 8443 + certificate: "/usr/local/certificates/cert" + key: "/usr/local/certificates/key" + namespaceSelector: {} + objectSelector: {} + # -- Labels to be added to admission webhooks + labels: {} + + # -- Use an existing PSP instead of creating one + existingPsp: "" + networkPolicyEnabled: false + + service: + annotations: {} + # clusterIP: "" + externalIPs: [] + # loadBalancerIP: "" + loadBalancerSourceRanges: [] + servicePort: 443 + type: ClusterIP + + createSecretJob: + resources: {} + # limits: + # cpu: 10m + # memory: 20Mi + # requests: + # cpu: 10m + # memory: 20Mi + + patchWebhookJob: + resources: {} + + patch: + enabled: true + image: + registry: registry.k8s.io + image: ingress-nginx/kube-webhook-certgen + ## for backwards compatibility consider setting the full image url via the repository value below + ## use *either* current default registry/image or repository format or installing chart by providing the values.yaml will fail + ## repository: + tag: v1.3.0 + digest: sha256:549e71a6ca248c5abd51cdb73dbc3083df62cf92ed5e6147c780e30f7e007a47 + pullPolicy: IfNotPresent + # -- Provide a priority class name to the webhook patching job + ## + priorityClassName: "" + podAnnotations: {} + nodeSelector: + kubernetes.io/os: linux + tolerations: [] + # -- Labels to be added to patch job resources + labels: {} + securityContext: + runAsNonRoot: true + runAsUser: 2000 + fsGroup: 2000 + + + metrics: + port: 10254 + # if this port is changed, change healthz-port: in extraArgs: accordingly + enabled: false + + service: + annotations: {} + # prometheus.io/scrape: "true" + # prometheus.io/port: "10254" + + # clusterIP: "" + + # -- List of IP addresses at which the stats-exporter service is available + ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips + ## + externalIPs: [] + + # loadBalancerIP: "" + loadBalancerSourceRanges: [] + servicePort: 10254 + type: ClusterIP + # externalTrafficPolicy: "" + # nodePort: "" + + serviceMonitor: + enabled: false + additionalLabels: {} + ## The label to use to retrieve the job name from. + ## jobLabel: "app.kubernetes.io/name" + namespace: "" + namespaceSelector: {} + ## Default: scrape .Release.Namespace only + ## To scrape all, use the following: + ## namespaceSelector: + ## any: true + scrapeInterval: 30s + # honorLabels: true + targetLabels: [] + relabelings: [] + metricRelabelings: [] + + prometheusRule: + enabled: false + additionalLabels: {} + # namespace: "" + rules: [] + # # These are just examples rules, please adapt them to your needs + # - alert: NGINXConfigFailed + # expr: count(nginx_ingress_controller_config_last_reload_successful == 0) > 0 + # for: 1s + # labels: + # severity: critical + # annotations: + # description: bad ingress config - nginx config test failed + # summary: uninstall the latest ingress changes to allow config reloads to resume + # - alert: NGINXCertificateExpiry + # expr: (avg(nginx_ingress_controller_ssl_expire_time_seconds) by (host) - time()) < 604800 + # for: 1s + # labels: + # severity: critical + # annotations: + # description: ssl certificate(s) will expire in less then a week + # summary: renew expiring certificates to avoid downtime + # - alert: NGINXTooMany500s + # expr: 100 * ( sum( nginx_ingress_controller_requests{status=~"5.+"} ) / sum(nginx_ingress_controller_requests) ) > 5 + # for: 1m + # labels: + # severity: warning + # annotations: + # description: Too many 5XXs + # summary: More than 5% of all requests returned 5XX, this requires your attention + # - alert: NGINXTooMany400s + # expr: 100 * ( sum( nginx_ingress_controller_requests{status=~"4.+"} ) / sum(nginx_ingress_controller_requests) ) > 5 + # for: 1m + # labels: + # severity: warning + # annotations: + # description: Too many 4XXs + # summary: More than 5% of all requests returned 4XX, this requires your attention + + # -- Improve connection draining when ingress controller pod is deleted using a lifecycle hook: + # With this new hook, we increased the default terminationGracePeriodSeconds from 30 seconds + # to 300, allowing the draining of connections up to five minutes. + # If the active connections end before that, the pod will terminate gracefully at that time. + # To effectively take advantage of this feature, the Configmap feature + # worker-shutdown-timeout new value is 240s instead of 10s. + ## + lifecycle: + preStop: + exec: + command: + - /wait-shutdown + + priorityClassName: "" + +# -- Rollback limit +## +revisionHistoryLimit: 10 + + +## Enable RBAC as per https://github.com/kubernetes/ingress-nginx/blob/main/docs/deploy/rbac.md and https://github.com/kubernetes/ingress-nginx/issues/266 +rbac: + create: true + scope: false + +## If true, create & use Pod Security Policy resources +## https://kubernetes.io/docs/concepts/policy/pod-security-policy/ +podSecurityPolicy: + enabled: false + +serviceAccount: + create: true + name: "" + automountServiceAccountToken: true + # -- Annotations for the controller service account + annotations: {} + +# -- Optional array of imagePullSecrets containing private registry credentials +## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ +imagePullSecrets: [] +# - name: secretName + +# -- TCP service key-value pairs +## Ref: https://github.com/kubernetes/ingress-nginx/blob/main/docs/user-guide/exposing-tcp-udp-services.md +## +tcp: {} +# 8080: "default/example-tcp-svc:9000" + +# -- UDP service key-value pairs +## Ref: https://github.com/kubernetes/ingress-nginx/blob/main/docs/user-guide/exposing-tcp-udp-services.md +## +udp: {} +# 53: "kube-system/kube-dns:53" + +# -- Prefix for TCP and UDP ports names in ingress controller service +## Some cloud providers, like Yandex Cloud may have a requirements for a port name regex to support cloud load balancer integration +portNamePrefix: "" + +# -- (string) A base64-encoded Diffie-Hellman parameter. +# This can be generated with: `openssl dhparam 4096 2> /dev/null | base64` +## Ref: https://github.com/kubernetes/ingress-nginx/tree/main/docs/examples/customization/ssl-dh-param +dhParam: + diff --git a/unused/jenkins.pvc.yaml b/unused/jenkins.pvc.yaml new file mode 100644 index 0000000..083da3b --- /dev/null +++ b/unused/jenkins.pvc.yaml @@ -0,0 +1,34 @@ +--- + apiVersion: v1 + kind: PersistentVolume + metadata: + namespace: gitea + name: jenkins-data-nfs + labels: + directory: jenkins + spec: + storageClassName: fast + capacity: + storage: "10Gi" + volumeMode: Filesystem + accessModes: + - ReadWriteOnce + nfs: + path: /jenkins + server: 10.43.239.43 # assigned to nfs-server service. Won't change as long as service is not redeployed +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + namespace: gitea + name: jenkins-data-nfs +spec: + storageClassName: fast + accessModes: + - ReadWriteOnce + resources: + requests: + storage: "10Gi" + selector: + matchLabels: + directory: jenkins \ No newline at end of file diff --git a/unused/jenkins.values.yaml b/unused/jenkins.values.yaml new file mode 100644 index 0000000..d89f190 --- /dev/null +++ b/unused/jenkins.values.yaml @@ -0,0 +1,669 @@ +# Default values for jenkins. +# This is a YAML-formatted file. +# Declare name/value pairs to be passed into your templates. +# name: value + +## Overrides for generated resource names +# See templates/_helpers.tpl +# nameOverride: +# fullnameOverride: +# namespaceOverride: + +# For FQDN resolving of the controller service. Change this value to match your existing configuration. +# ref: https://github.com/kubernetes/dns/blob/master/docs/specification.md +clusterZone: "cluster.local" + +renderHelmLabels: true + +controller: + # Used for label app.kubernetes.io/component + componentName: "jenkins-controller" + image: "jenkins/jenkins" + # tag: "2.346.1-jdk11" + tagLabel: jdk11 + imagePullPolicy: "Always" + imagePullSecretName: + # Optionally configure lifetime for controller-container + lifecycle: + # postStart: + # exec: + # command: + # - "uname" + # - "-a" + disableRememberMe: false + numExecutors: 0 + # configures the executor mode of the Jenkins node. Possible values are: NORMAL or EXCLUSIVE + executorMode: "NORMAL" + # This is ignored if enableRawHtmlMarkupFormatter is true + markupFormatter: plainText + customJenkinsLabels: [] + # The default configuration uses this secret to configure an admin user + # If you don't need that user or use a different security realm then you can disable it + adminSecret: true + + hostNetworking: false + # When enabling LDAP or another non-Jenkins identity source, the built-in admin account will no longer exist. + # If you disable the non-Jenkins identity store and instead use the Jenkins internal one, + # you should revert controller.adminUser to your preferred admin user: + adminUser: "admin" + # adminPassword: + admin: + existingSecret: "" + userKey: jenkins-admin-user + passwordKey: jenkins-admin-password + # This values should not be changed unless you use your custom image of jenkins or any devired from. If you want to use + # Cloudbees Jenkins Distribution docker, you should set jenkinsHome: "/var/cloudbees-jenkins-distribution" + jenkinsHome: "/var/jenkins_home" + # This values should not be changed unless you use your custom image of jenkins or any devired from. If you want to use + # Cloudbees Jenkins Distribution docker, you should set jenkinsRef: "/usr/share/cloudbees-jenkins-distribution/ref" + jenkinsRef: "/usr/share/jenkins/ref" + # Path to the jenkins war file which is used by jenkins-plugin-cli. + jenkinsWar: "/usr/share/jenkins/jenkins.war" + # Overrides the default arguments passed to the war + # overrideArgs: + # - --httpPort=8080 + resources: + requests: + cpu: "50m" + memory: "256Mi" + limits: + cpu: "2000m" + memory: "4096Mi" + # Overrides the init container default values + # initContainerResources: + # requests: + # cpu: "50m" + # memory: "256Mi" + # limits: + # cpu: "2000m" + # memory: "4096Mi" + # Environment variables that get added to the init container (useful for e.g. http_proxy) + # initContainerEnv: + # - name: http_proxy + # value: "http://192.168.64.1:3128" + # containerEnv: + # - name: http_proxy + # value: "http://192.168.64.1:3128" + # Set min/max heap here if needed with: + # javaOpts: "-Xms512m -Xmx512m" + # jenkinsOpts: "" + # If you are using the ingress definitions provided by this chart via the `controller.ingress` block the configured hostname will be the ingress hostname starting with `https://` or `http://` depending on the `tls` configuration. + # The Protocol can be overwritten by specifying `controller.jenkinsUrlProtocol`. + # jenkinsUrlProtocol: "https" + # If you are not using the provided ingress you can specify `controller.jenkinsUrl` to change the url definition. + # jenkinsUrl: "" + # If you set this prefix and use ingress controller then you might want to set the ingress path below + # jenkinsUriPrefix: "/jenkins" + # Enable pod security context (must be `true` if podSecurityContextOverride, runAsUser or fsGroup are set) + usePodSecurityContext: true + # Note that `runAsUser`, `fsGroup`, and `securityContextCapabilities` are + # being deprecated and replaced by `podSecurityContextOverride`. + # Set runAsUser to 1000 to let Jenkins run as non-root user 'jenkins' which exists in 'jenkins/jenkins' docker image. + # When setting runAsUser to a different value than 0 also set fsGroup to the same value: + runAsUser: 1000 + fsGroup: 1000 + # If you have PodSecurityPolicies that require dropping of capabilities as suggested by CIS K8s benchmark, put them here + securityContextCapabilities: {} + # drop: + # - NET_RAW + # Completely overwrites the contents of the `securityContext`, ignoring the + # values provided for the deprecated fields: `runAsUser`, `fsGroup`, and + # `securityContextCapabilities`. In the case of mounting an ext4 filesystem, + # it might be desirable to use `supplementalGroups` instead of `fsGroup` in + # the `securityContext` block: https://github.com/kubernetes/kubernetes/issues/67014#issuecomment-589915496 + # podSecurityContextOverride: + # runAsUser: 1000 + # runAsNonRoot: true + # supplementalGroups: [1000] + # # capabilities: {} + # Container securityContext + containerSecurityContext: + runAsUser: 1000 + runAsGroup: 1000 + readOnlyRootFilesystem: true + allowPrivilegeEscalation: false + servicePort: 8080 + targetPort: 8080 + # For minikube, set this to NodePort, elsewhere use LoadBalancer + # Use ClusterIP if your setup includes ingress controller + serviceType: ClusterIP + # Use Local to preserve the client source IP and avoids a second hop for LoadBalancer and Nodeport type services, + # but risks potentially imbalanced traffic spreading. + serviceExternalTrafficPolicy: + # Jenkins controller service annotations + serviceAnnotations: {} + # Jenkins controller custom labels + statefulSetLabels: {} + # foo: bar + # bar: foo + # Jenkins controller service labels + serviceLabels: {} + # service.beta.kubernetes.io/aws-load-balancer-backend-protocol: https + # Put labels on Jenkins controller pod + podLabels: {} + # Used to create Ingress record (should used with ServiceType: ClusterIP) + # nodePort: + # -Dcom.sun.management.jmxremote.port=4000 + # -Dcom.sun.management.jmxremote.authenticate=false + # -Dcom.sun.management.jmxremote.ssl=false + # jmxPort: 4000 + # Optionally configure other ports to expose in the controller container + extraPorts: [] + # - name: BuildInfoProxy + # port: 9000 + + # List of plugins to be install during Jenkins controller start + installPlugins: + - kubernetes:3600.v144b_cd192ca_a_ + - workflow-aggregator:581.v0c46fa_697ffd + - git:4.11.3 + - gitea:1.4.3 + - configuration-as-code:1429.v09b_044a_c93de + + # Set to false to download the minimum required version of all dependencies. + installLatestPlugins: true + + # Set to true to download latest dependencies of any plugin that is requested to have the latest version. + installLatestSpecifiedPlugins: false + + # List of plugins to install in addition to those listed in controller.installPlugins + additionalPlugins: [] + + # Enable to initialize the Jenkins controller only once on initial installation. + # Without this, whenever the controller gets restarted (Evicted, etc.) it will fetch plugin updates which has the potential to cause breakage. + # Note that for this to work, `persistence.enabled` needs to be set to `true` + initializeOnce: false + + # Enable to always override the installed plugins with the values of 'controller.installPlugins' on upgrade or redeployment. + # overwritePlugins: true + + # Configures if plugins bundled with `controller.image` should be overwritten with the values of 'controller.installPlugins' on upgrade or redeployment. + overwritePluginsFromImage: true + + # Enable HTML parsing using OWASP Markup Formatter Plugin (antisamy-markup-formatter), useful with ghprb plugin. + # The plugin is not installed by default, please update controller.installPlugins. + enableRawHtmlMarkupFormatter: false + # Used to approve a list of groovy functions in pipelines used the script-security plugin. Can be viewed under /scriptApproval + scriptApproval: [] + # - "method groovy.json.JsonSlurperClassic parseText java.lang.String" + # - "new groovy.json.JsonSlurperClassic" + # List of groovy init scripts to be executed during Jenkins controller start + initScripts: [] + # - | + # print 'adding global pipeline libraries, register properties, bootstrap jobs...' + + # 'name' is a name of an existing secret in same namespace as jenkins, + # 'keyName' is the name of one of the keys inside current secret. + # the 'name' and 'keyName' are concatenated with a '-' in between, so for example: + # an existing secret "secret-credentials" and a key inside it named "github-password" should be used in Jcasc as ${secret-credentials-github-password} + # 'name' and 'keyName' must be lowercase RFC 1123 label must consist of lower case alphanumeric characters or '-', + # and must start and end with an alphanumeric character (e.g. 'my-name', or '123-abc') + additionalExistingSecrets: [] + # - name: secret-name-1 + # keyName: username + # - name: secret-name-1 + # keyName: password + + additionalSecrets: [] + # - name: nameOfSecret + # value: secretText + + # Generate SecretClaim resources in order to create Kubernetes secrets from HashiCorp Vault using kube-vault-controller. + # 'name' is name of the secret that will be created in Kubernetes. The Jenkins fullname is prepended to this value. + # 'path' is the fully qualified path to the secret in Vault + # 'type' is an optional Kubernetes secret type. Defaults to 'Opaque' + # 'renew' is an optional secret renewal time in seconds + secretClaims: [] + # - name: secretName # required + # path: testPath # required + # type: kubernetes.io/tls # optional + # renew: 60 # optional + + # Name of default cloud configuration. + cloudName: "kubernetes" + + # Below is the implementation of Jenkins Configuration as Code. Add a key under configScripts for each configuration area, + # where each corresponds to a plugin or section of the UI. Each key (prior to | character) is just a label, and can be any value. + # Keys are only used to give the section a meaningful name. The only restriction is they may only contain RFC 1123 \ DNS label + # characters: lowercase letters, numbers, and hyphens. The keys become the name of a configuration yaml file on the controller in + # /var/jenkins_home/casc_configs (by default) and will be processed by the Configuration as Code Plugin. The lines after each | + # become the content of the configuration yaml file. The first line after this is a JCasC root element, eg jenkins, credentials, + # etc. Best reference is https:///configuration-as-code/reference. The example below creates a welcome message: + JCasC: + defaultConfig: true + configScripts: {} + # welcome-message: | + # jenkins: + # systemMessage: Welcome to our CI\CD server. This Jenkins is configured and managed 'as code'. + # Ignored if securityRealm is defined in controller.JCasC.configScripts and + securityRealm: |- + local: + allowsSignup: false + enableCaptcha: false + users: + - id: "${chart-admin-username}" + name: "Jenkins Admin" + password: "${chart-admin-password}" + # Ignored if authorizationStrategy is defined in controller.JCasC.configScripts + authorizationStrategy: |- + loggedInUsersCanDoAnything: + allowAnonymousRead: false + # Optionally specify additional init-containers + customInitContainers: [] + # - name: custom-init + # image: "alpine:3.7" + # imagePullPolicy: Always + # command: [ "uname", "-a" ] + + sidecars: + configAutoReload: + # If enabled: true, Jenkins Configuration as Code will be reloaded on-the-fly without a reboot. If false or not-specified, + # jcasc changes will cause a reboot and will only be applied at the subsequent start-up. Auto-reload uses the + # http:///reload-configuration-as-code endpoint to reapply config when changes to the configScripts are detected. + enabled: true + image: kiwigrid/k8s-sidecar:1.15.0 + imagePullPolicy: IfNotPresent + resources: {} + # limits: + # cpu: 100m + # memory: 100Mi + # requests: + # cpu: 50m + # memory: 50Mi + # How many connection-related errors to retry on + reqRetryConnect: 10 + # env: + # - name: REQ_TIMEOUT + # value: "30" + # SSH port value can be set to any unused TCP port. The default, 1044, is a non-standard SSH port that has been chosen at random. + # Is only used to reload jcasc config from the sidecar container running in the Jenkins controller pod. + # This TCP port will not be open in the pod (unless you specifically configure this), so Jenkins will not be + # accessible via SSH from outside of the pod. Note if you use non-root pod privileges (runAsUser & fsGroup), + # this must be > 1024: + sshTcpPort: 1044 + # folder in the pod that should hold the collected dashboards: + folder: "/var/jenkins_home/casc_configs" + # If specified, the sidecar will search for JCasC config-maps inside this namespace. + # Otherwise the namespace in which the sidecar is running will be used. + # It's also possible to specify ALL to search in all namespaces: + # searchNamespace: + containerSecurityContext: + readOnlyRootFilesystem: true + allowPrivilegeEscalation: false + + # Allows you to inject additional/other sidecars + other: [] + ## The example below runs the client for https://smee.io as sidecar container next to Jenkins, + ## that allows to trigger build behind a secure firewall. + ## https://jenkins.io/blog/2019/01/07/webhook-firewalls/#triggering-builds-with-webhooks-behind-a-secure-firewall + ## + ## Note: To use it you should go to https://smee.io/new and update the url to the generete one. + # - name: smee + # image: docker.io/twalter/smee-client:1.0.2 + # args: ["--port", "{{ .Values.controller.servicePort }}", "--path", "/github-webhook/", "--url", "https://smee.io/new"] + # resources: + # limits: + # cpu: 50m + # memory: 128Mi + # requests: + # cpu: 10m + # memory: 32Mi + # Name of the Kubernetes scheduler to use + schedulerName: "" + # Node labels and tolerations for pod assignment + # ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + # ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature + nodeSelector: {} + + terminationGracePeriodSeconds: + + terminationMessagePath: + terminationMessagePolicy: + + tolerations: [] + + affinity: {} + # Leverage a priorityClass to ensure your pods survive resource shortages + # ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ + priorityClassName: + + podAnnotations: {} + # Add StatefulSet annotations + statefulSetAnnotations: {} + + # StatefulSet updateStrategy + # ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies + updateStrategy: {} + + ingress: + enabled: true + # Override for the default paths that map requests to the backend + paths: [] + # - backend: + # serviceName: >- + # {{ template "jenkins.fullname" . }} + # # Don't use string here, use only integer value! + # servicePort: 8080 + # For Kubernetes v1.19+, use 'networking.k8s.io/v1' + apiVersion: "networking.k8s.io/v1" + labels: {} + annotations: + kubernetes.io/ingress.class: nginx + cert-manager.io/cluster-issuer: cloudflare-letsencrypt-prod + + hostName: jenkins.kluster.moll.re + tls: + - secretName: cloudflare-letsencrypt-issuer-account-key + hosts: + - jenkins.kluster.moll.re + + # often you want to have your controller all locked down and private + # but you still want to get webhooks from your SCM + # A secondary ingress will let you expose different urls + # with a differnt configuration + secondaryingress: + enabled: false + # paths you want forwarded to the backend + # ex /github-webhook + paths: [] + # For Kubernetes v1.14+, use 'networking.k8s.io/v1beta1' + # For Kubernetes v1.19+, use 'networking.k8s.io/v1' + apiVersion: "extensions/v1beta1" + labels: {} + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + # For Kubernetes >= 1.18 you should specify the ingress-controller via the field ingressClassName + # See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#specifying-the-class-of-an-ingress + # ingressClassName: nginx + # configures the hostname e.g. jenkins-external.example.com + hostName: + tls: + # - secretName: jenkins-external.example.com + # hosts: + # - jenkins-external.example.com + + +agent: + enabled: true + defaultsProviderTemplate: "" + # URL for connecting to the Jenkins contoller + jenkinsUrl: + # connect to the specified host and port, instead of connecting directly to the Jenkins controller + jenkinsTunnel: + kubernetesConnectTimeout: 5 + kubernetesReadTimeout: 15 + maxRequestsPerHostStr: "32" + namespace: + image: "jenkins/inbound-agent" + tag: "4.11.2-4" + workingDir: "/home/jenkins/agent" + nodeUsageMode: "NORMAL" + customJenkinsLabels: [] + # name of the secret to be used for image pulling + imagePullSecretName: + componentName: "jenkins-agent" + websocket: false + privileged: false + runAsUser: + runAsGroup: + resources: + requests: + cpu: "512m" + memory: "512Mi" + limits: + cpu: "512m" + memory: "512Mi" + # You may want to change this to true while testing a new image + alwaysPullImage: false + # Controls how agent pods are retained after the Jenkins build completes + # Possible values: Always, Never, OnFailure + podRetention: "Never" + # Disable if you do not want the Yaml the agent pod template to show up + # in the job Console Output. This can be helpful for either security reasons + # or simply to clean up the output to make it easier to read. + showRawYaml: true + # You can define the volumes that you want to mount for this container + # Allowed types are: ConfigMap, EmptyDir, HostPath, Nfs, PVC, Secret + # Configure the attributes as they appear in the corresponding Java class for that type + # https://github.com/jenkinsci/kubernetes-plugin/tree/master/src/main/java/org/csanchez/jenkins/plugins/kubernetes/volumes + volumes: [] + # - type: ConfigMap + # configMapName: myconfigmap + # mountPath: /var/myapp/myconfigmap + # - type: EmptyDir + # mountPath: /var/myapp/myemptydir + # memory: false + # - type: HostPath + # hostPath: /var/lib/containers + # mountPath: /var/myapp/myhostpath + # - type: Nfs + # mountPath: /var/myapp/mynfs + # readOnly: false + # serverAddress: "192.0.2.0" + # serverPath: /var/lib/containers + # - type: PVC + # claimName: mypvc + # mountPath: /var/myapp/mypvc + # readOnly: false + # - type: Secret + # defaultMode: "600" + # mountPath: /var/myapp/mysecret + # secretName: mysecret + # Pod-wide environment, these vars are visible to any container in the agent pod + + # You can define the workspaceVolume that you want to mount for this container + # Allowed types are: DynamicPVC, EmptyDir, HostPath, Nfs, PVC + # Configure the attributes as they appear in the corresponding Java class for that type + # https://github.com/jenkinsci/kubernetes-plugin/tree/master/src/main/java/org/csanchez/jenkins/plugins/kubernetes/volumes/workspace + workspaceVolume: {} + ## DynamicPVC example + # type: DynamicPVC + # configMapName: myconfigmap + ## EmptyDir example + # type: EmptyDir + # memory: false + ## HostPath example + # type: HostPath + # hostPath: /var/lib/containers + ## NFS example + # type: Nfs + # readOnly: false + # serverAddress: "192.0.2.0" + # serverPath: /var/lib/containers + ## PVC example + # type: PVC + # claimName: mypvc + # readOnly: false + # + # Pod-wide environment, these vars are visible to any container in the agent pod + envVars: [] + # - name: PATH + # value: /usr/local/bin + nodeSelector: {} + # Key Value selectors. Ex: + # jenkins-agent: v1 + + # Executed command when side container gets started + command: + args: "${computer.jnlpmac} ${computer.name}" + # Side container name + sideContainerName: "jnlp" + # Doesn't allocate pseudo TTY by default + TTYEnabled: false + # Max number of spawned agent + containerCap: 10 + # Pod name + podName: "default" + # Allows the Pod to remain active for reuse until the configured number of + # minutes has passed since the last step was executed on it. + idleMinutes: 0 + # Raw yaml template for the Pod. For example this allows usage of toleration for agent pods. + # https://github.com/jenkinsci/kubernetes-plugin#using-yaml-to-define-pod-templates + # https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + yamlTemplate: "" + # yamlTemplate: |- + # apiVersion: v1 + # kind: Pod + # spec: + # tolerations: + # - key: "key" + # operator: "Equal" + # value: "value" + # Defines how the raw yaml field gets merged with yaml definitions from inherited pod templates: merge or override + yamlMergeStrategy: "override" + # Timeout in seconds for an agent to be online + connectTimeout: 100 + # Annotations to apply to the pod. + annotations: {} + + # Disable the default Jenkins Agent configuration. + # Useful when configuring agents only with the podTemplates value, since the default podTemplate populated by values mentioned above will be excluded in the rendered template. + disableDefaultAgent: false + + # Below is the implementation of custom pod templates for the default configured kubernetes cloud. + # Add a key under podTemplates for each pod template. Each key (prior to | character) is just a label, and can be any value. + # Keys are only used to give the pod template a meaningful name. The only restriction is they may only contain RFC 1123 \ DNS label + # characters: lowercase letters, numbers, and hyphens. Each pod template can contain multiple containers. + # For this pod templates configuration to be loaded the following values must be set: + # controller.JCasC.defaultConfig: true + # Best reference is https:///configuration-as-code/reference#Cloud-kubernetes. The example below creates a python pod template. + podTemplates: {} + # python: | + # - name: python + # label: jenkins-python + # serviceAccount: jenkins + # containers: + # - name: python + # image: python:3 + # command: "/bin/sh -c" + # args: "cat" + # ttyEnabled: true + # privileged: true + # resourceRequestCpu: "400m" + # resourceRequestMemory: "512Mi" + # resourceLimitCpu: "1" + # resourceLimitMemory: "1024Mi" + +# Here you can add additional agents +# They inherit all values from `agent` so you only need to specify values which differ +additionalAgents: {} +# maven: +# podName: maven +# customJenkinsLabels: maven +# # An example of overriding the jnlp container +# # sideContainerName: jnlp +# image: jenkins/jnlp-agent-maven +# tag: latest +# python: +# podName: python +# customJenkinsLabels: python +# sideContainerName: python +# image: python +# tag: "3" +# command: "/bin/sh -c" +# args: "cat" +# TTYEnabled: true + +persistence: + enabled: true + ## A manually managed Persistent Volume and Claim + ## Requires persistence.enabled: true + ## If defined, PVC must be created manually before volume will be bound + existingClaim: jenkins-data-nfs + +## Install Default RBAC roles and bindings +rbac: + create: true + readSecrets: false + +serviceAccount: + create: true + # The name of the service account is autogenerated by default + name: + annotations: {} + imagePullSecretName: diff --git a/unused/mathieu_ghost/mathieu.pvc.yaml b/unused/mathieu_ghost/mathieu.pvc.yaml new file mode 100644 index 0000000..e72dc29 --- /dev/null +++ b/unused/mathieu_ghost/mathieu.pvc.yaml @@ -0,0 +1,34 @@ +--- +apiVersion: v1 +kind: PersistentVolume +metadata: + namespace: mathieu + name: mathieu-nfs + labels: + directory: mathieu +spec: + storageClassName: fast + capacity: + storage: "10Gi" + volumeMode: Filesystem + accessModes: + - ReadWriteOnce + nfs: + path: /mathieu + server: 10.43.239.43 # assigned to nfs-server service. Won't change as long as service is not redeployed +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + namespace: mathieu + name: mathieu-nfs +spec: + storageClassName: fast + accessModes: + - ReadWriteOnce + resources: + requests: + storage: "10Gi" + selector: + matchLabels: + directory: mathieu \ No newline at end of file diff --git a/unused/mathieu_ghost/mathieu.values.yaml b/unused/mathieu_ghost/mathieu.values.yaml new file mode 100644 index 0000000..a1283ba --- /dev/null +++ b/unused/mathieu_ghost/mathieu.values.yaml @@ -0,0 +1,72 @@ +# +# IMPORTANT NOTE +# +# This chart inherits from our common library chart. You can check the default values/options here: +# https://github.com/k8s-at-home/library-charts/tree/main/charts/stable/common/values.yaml +# + +image: + # -- image repository + repository: ghost + # -- image tag + # @default -- chart.appVersion + tag: + # -- image pull policy + pullPolicy: IfNotPresent + +# See https://ghost.org/docs/config/#running-ghost-with-config-env-variables +env: + url: "https://cinema.kluster.moll.re" + database__client: sqlite3 + database__connection__filename: "content/data/ghost-data.db" + database__useNullAsDefault: true, + database__debug: false + NODE_ENV: production + +# -- Configures service settings for the chart. +# @default -- See values.yaml +service: + main: + ports: + http: + port: 2368 + + + +ingress: + # -- Enable and configure ingress settings for the chart under this key. + # @default -- See values.yaml + main: + enabled: true + annotations: + kubernetes.io/ingress.class: nginx + cert-manager.io/cluster-issuer: cloudflare-letsencrypt-prod + hosts: + - host: cinema.kluster.moll.re + paths: + - path: / + pathType: Prefix + tls: + - hosts: + - cinema.kluster.moll.re + secretName: cloudflare-letsencrypt-issuer-account-key + +# -- Configure persistence settings for the chart under this key. +# @default -- See values.yaml +persistence: + content: + enabled: true + existingClaim: mathieu-nfs + +mariadb: + enabled: false + architecture: standalone + auth: + database: ghost + username: ghost + password: ghost + rootPassword: ghost-rootpass + primary: + persistance: + enabled: false + diff --git a/unused/mc-forwarding.deployment.yaml b/unused/mc-forwarding.deployment.yaml new file mode 100644 index 0000000..28edb40 --- /dev/null +++ b/unused/mc-forwarding.deployment.yaml @@ -0,0 +1,52 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: minecraft + labels: + app: minecraft + +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: forwarding + namespace: minecraft + labels: + app: forwarding +spec: + replicas: 1 + selector: + matchLabels: + app: forwarding + template: + metadata: + labels: + app: forwarding + spec: + containers: + - name: forwarding + image: simonrupf/socat + tty: true + ports: + - containerPort: 25565 + args: ["TCP4-LISTEN:25565,fork", "TCP6:mc.game.moll.re:25565"] + hostNetwork: true + nodeSelector: + hdd: enabled + # ensures we are running on 192.168.1.122, ie pi node 0 +--- +apiVersion: v1 +kind: Service +metadata: + name: forwarding + namespace: minecraft + +spec: + type: NodePort + ipFamilyPolicy: PreferDualStack + ports: + - name: mc + port: 25565 + selector: + app: forwarding + diff --git a/unused/nginx.values.yaml b/unused/nginx.values.yaml new file mode 100644 index 0000000..73b9326 --- /dev/null +++ b/unused/nginx.values.yaml @@ -0,0 +1,351 @@ +controller: + ## The name of the Ingress Controller daemonset or deployment. + ## Autogenerated if not set or set to "". + # name: nginx-ingress + + ## The kind of the Ingress Controller installation - deployment or daemonset. + kind: deployment + + ## Deploys the Ingress Controller for NGINX Plus. + nginxplus: false + + # Timeout in milliseconds which the Ingress Controller will wait for a successful NGINX reload after a change or at the initial start. + nginxReloadTimeout: 60000 + + ## Support for App Protect + appprotect: + ## Enable the App Protect module in the Ingress Controller. + enable: false + ## Sets log level for App Protect. Allowed values: fatal, error, warn, info, debug, trace + # logLevel: fatal + + ## Support for App Protect Dos + appprotectdos: + ## Enable the App Protect Dos module in the Ingress Controller. + enable: false + ## Enable debugging for App Protect Dos. + debug: false + ## Max number of nginx processes to support. + maxWorkers: 0 + ## Max number of ADMD instances. + maxDaemons: 0 + ## RAM memory size to consume in MB. + memory: 0 + + ## Enables the Ingress Controller pods to use the host's network namespace. + hostNetwork: false + + ## Enables debugging for NGINX. Uses the nginx-debug binary. Requires error-log-level: debug in the ConfigMap via `controller.config.entries`. + nginxDebug: false + + ## The log level of the Ingress Controller. + logLevel: 1 + + ## A list of custom ports to expose on the NGINX ingress controller pod. Follows the conventional Kubernetes yaml syntax for container ports. + customPorts: [] + + image: + ## The image repository of the Ingress Controller. + repository: nginx/nginx-ingress + + ## The tag of the Ingress Controller image. + tag: "2.2.0" + + ## The pull policy for the Ingress Controller image. + pullPolicy: IfNotPresent + + config: + ## The name of the ConfigMap used by the Ingress Controller. + ## Autogenerated if not set or set to "". + # name: nginx-config + + ## The annotations of the Ingress Controller configmap. + annotations: {} + + ## The entries of the ConfigMap for customizing NGINX configuration. + entries: {} + + ## It is recommended to use your own TLS certificates and keys + defaultTLS: + ## The base64-encoded TLS certificate for the default HTTPS server. If not specified, a pre-generated self-signed certificate is used. + ## Note: It is recommended that you specify your own certificate. + cert: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUN2akNDQWFZQ0NRREFPRjl0THNhWFhEQU5CZ2txaGtpRzl3MEJBUXNGQURBaE1SOHdIUVlEVlFRRERCWk8KUjBsT1dFbHVaM0psYzNORGIyNTBjbTlzYkdWeU1CNFhEVEU0TURreE1qRTRNRE16TlZvWERUSXpNRGt4TVRFNApNRE16TlZvd0lURWZNQjBHQTFVRUF3d1dUa2RKVGxoSmJtZHlaWE56UTI5dWRISnZiR3hsY2pDQ0FTSXdEUVlKCktvWklodmNOQVFFQkJRQURnZ0VQQURDQ0FRb0NnZ0VCQUwvN2hIUEtFWGRMdjNyaUM3QlBrMTNpWkt5eTlyQ08KR2xZUXYyK2EzUDF0azIrS3YwVGF5aGRCbDRrcnNUcTZzZm8vWUk1Y2Vhbkw4WGM3U1pyQkVRYm9EN2REbWs1Qgo4eDZLS2xHWU5IWlg0Rm5UZ0VPaStlM2ptTFFxRlBSY1kzVnNPazFFeUZBL0JnWlJVbkNHZUtGeERSN0tQdGhyCmtqSXVuektURXUyaDU4Tlp0S21ScUJHdDEwcTNRYzhZT3ExM2FnbmovUWRjc0ZYYTJnMjB1K1lYZDdoZ3krZksKWk4vVUkxQUQ0YzZyM1lma1ZWUmVHd1lxQVp1WXN2V0RKbW1GNWRwdEMzN011cDBPRUxVTExSakZJOTZXNXIwSAo1TmdPc25NWFJNV1hYVlpiNWRxT3R0SmRtS3FhZ25TZ1JQQVpQN2MwQjFQU2FqYzZjNGZRVXpNQ0F3RUFBVEFOCkJna3Foa2lHOXcwQkFRc0ZBQU9DQVFFQWpLb2tRdGRPcEsrTzhibWVPc3lySmdJSXJycVFVY2ZOUitjb0hZVUoKdGhrYnhITFMzR3VBTWI5dm15VExPY2xxeC9aYzJPblEwMEJCLzlTb0swcitFZ1U2UlVrRWtWcitTTFA3NTdUWgozZWI4dmdPdEduMS9ienM3bzNBaS9kclkrcUI5Q2k1S3lPc3FHTG1US2xFaUtOYkcyR1ZyTWxjS0ZYQU80YTY3Cklnc1hzYktNbTQwV1U3cG9mcGltU1ZmaXFSdkV5YmN3N0NYODF6cFErUyt1eHRYK2VBZ3V0NHh3VlI5d2IyVXYKelhuZk9HbWhWNThDd1dIQnNKa0kxNXhaa2VUWXdSN0diaEFMSkZUUkk3dkhvQXprTWIzbjAxQjQyWjNrN3RXNQpJUDFmTlpIOFUvOWxiUHNoT21FRFZkdjF5ZytVRVJxbStGSis2R0oxeFJGcGZnPT0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo= + + ## The base64-encoded TLS key for the default HTTPS server. Note: If not specified, a pre-generated key is used. + ## Note: It is recommended that you specify your own key. + key: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFcEFJQkFBS0NBUUVBdi91RWM4b1JkMHUvZXVJTHNFK1RYZUprckxMMnNJNGFWaEMvYjVyYy9XMlRiNHEvClJOcktGMEdYaVN1eE9ycXgrajlnamx4NXFjdnhkenRKbXNFUkJ1Z1B0ME9hVGtIekhvb3FVWmcwZGxmZ1dkT0EKUTZMNTdlT1l0Q29VOUZ4amRXdzZUVVRJVUQ4R0JsRlNjSVo0b1hFTkhzbysyR3VTTWk2Zk1wTVM3YUhudzFtMApxWkdvRWEzWFNyZEJ6eGc2clhkcUNlUDlCMXl3VmRyYURiUzc1aGQzdUdETDU4cGszOVFqVUFQaHpxdmRoK1JWClZGNGJCaW9CbTVpeTlZTW1hWVhsMm0wTGZzeTZuUTRRdFFzdEdNVWozcGJtdlFmazJBNnljeGRFeFpkZFZsdmwKMm82MjBsMllxcHFDZEtCRThCay90elFIVTlKcU56cHpoOUJUTXdJREFRQUJBb0lCQVFDZklHbXowOHhRVmorNwpLZnZJUXQwQ0YzR2MxNld6eDhVNml4MHg4Mm15d1kxUUNlL3BzWE9LZlRxT1h1SENyUlp5TnUvZ2IvUUQ4bUFOCmxOMjRZTWl0TWRJODg5TEZoTkp3QU5OODJDeTczckM5bzVvUDlkazAvYzRIbjAzSkVYNzZ5QjgzQm9rR1FvYksKMjhMNk0rdHUzUmFqNjd6Vmc2d2szaEhrU0pXSzBwV1YrSjdrUkRWYmhDYUZhNk5nMUZNRWxhTlozVDhhUUtyQgpDUDNDeEFTdjYxWTk5TEI4KzNXWVFIK3NYaTVGM01pYVNBZ1BkQUk3WEh1dXFET1lvMU5PL0JoSGt1aVg2QnRtCnorNTZud2pZMy8yUytSRmNBc3JMTnIwMDJZZi9oY0IraVlDNzVWYmcydVd6WTY3TWdOTGQ5VW9RU3BDRkYrVm4KM0cyUnhybnhBb0dCQU40U3M0ZVlPU2huMVpQQjdhTUZsY0k2RHR2S2ErTGZTTXFyY2pOZjJlSEpZNnhubmxKdgpGenpGL2RiVWVTbWxSekR0WkdlcXZXaHFISy9iTjIyeWJhOU1WMDlRQ0JFTk5jNmtWajJTVHpUWkJVbEx4QzYrCk93Z0wyZHhKendWelU0VC84ajdHalRUN05BZVpFS2FvRHFyRG5BYWkyaW5oZU1JVWZHRXFGKzJyQW9HQkFOMVAKK0tZL0lsS3RWRzRKSklQNzBjUis3RmpyeXJpY05iWCtQVzUvOXFHaWxnY2grZ3l4b25BWlBpd2NpeDN3QVpGdwpaZC96ZFB2aTBkWEppc1BSZjRMazg5b2pCUmpiRmRmc2l5UmJYbyt3TFU4NUhRU2NGMnN5aUFPaTVBRHdVU0FkCm45YWFweUNweEFkREtERHdObit3ZFhtaTZ0OHRpSFRkK3RoVDhkaVpBb0dCQUt6Wis1bG9OOTBtYlF4VVh5YUwKMjFSUm9tMGJjcndsTmVCaWNFSmlzaEhYa2xpSVVxZ3hSZklNM2hhUVRUcklKZENFaHFsV01aV0xPb2I2NTNyZgo3aFlMSXM1ZUtka3o0aFRVdnpldm9TMHVXcm9CV2xOVHlGanIrSWhKZnZUc0hpOGdsU3FkbXgySkJhZUFVWUNXCndNdlQ4NmNLclNyNkQrZG8wS05FZzFsL0FvR0FlMkFVdHVFbFNqLzBmRzgrV3hHc1RFV1JqclRNUzRSUjhRWXQKeXdjdFA4aDZxTGxKUTRCWGxQU05rMXZLTmtOUkxIb2pZT2pCQTViYjhibXNVU1BlV09NNENoaFJ4QnlHbmR2eAphYkJDRkFwY0IvbEg4d1R0alVZYlN5T294ZGt5OEp0ek90ajJhS0FiZHd6NlArWDZDODhjZmxYVFo5MWpYL3RMCjF3TmRKS2tDZ1lCbyt0UzB5TzJ2SWFmK2UwSkN5TGhzVDQ5cTN3Zis2QWVqWGx2WDJ1VnRYejN5QTZnbXo5aCsKcDNlK2JMRUxwb3B0WFhNdUFRR0xhUkcrYlNNcjR5dERYbE5ZSndUeThXczNKY3dlSTdqZVp2b0ZpbmNvVlVIMwphdmxoTUVCRGYxSjltSDB5cDBwWUNaS2ROdHNvZEZtQktzVEtQMjJhTmtsVVhCS3gyZzR6cFE9PQotLS0tLUVORCBSU0EgUFJJVkFURSBLRVktLS0tLQo= + + ## The secret with a TLS certificate and key for the default HTTPS server. + ## The value must follow the following format: `/`. + ## Used as an alternative to specifying a certificate and key using `controller.defaultTLS.cert` and `controller.defaultTLS.key` parameters. + ## Format: / + secret: + + wildcardTLS: + ## The base64-encoded TLS certificate for every Ingress/VirtualServer host that has TLS enabled but no secret specified. + ## If the parameter is not set, for such Ingress/VirtualServer hosts NGINX will break any attempt to establish a TLS connection. + cert: "" + + ## The base64-encoded TLS key for every Ingress/VirtualServer host that has TLS enabled but no secret specified. + ## If the parameter is not set, for such Ingress/VirtualServer hosts NGINX will break any attempt to establish a TLS connection. + key: "" + + ## The secret with a TLS certificate and key for every Ingress/VirtualServer host that has TLS enabled but no secret specified. + ## The value must follow the following format: `/`. + ## Used as an alternative to specifying a certificate and key using `controller.wildcardTLS.cert` and `controller.wildcardTLS.key` parameters. + ## Format: / + secret: + + ## The node selector for pod assignment for the Ingress Controller pods. + nodeSelector: {} + + ## The termination grace period of the Ingress Controller pod. + terminationGracePeriodSeconds: 30 + + ## The resources of the Ingress Controller pods. + resources: {} + # limits: + # cpu: 100m + # memory: 64Mi + # requests: + # cpu: 100m + # memory: 64Mi + + ## The tolerations of the Ingress Controller pods. + tolerations: [] + + ## The affinity of the Ingress Controller pods. + affinity: {} + + ## The volumes of the Ingress Controller pods. + volumes: [] + # - name: extra-conf + # configMap: + # name: extra-conf + + ## The volumeMounts of the Ingress Controller pods. + volumeMounts: [] + # - name: extra-conf + # mountPath: /etc/nginx/conf.d/extra.conf + # subPath: extra.conf + + ## InitContainers for the Ingress Controller pods. + initContainers: [] + # - name: init-container + # image: busybox:1.34 + # command: ['sh', '-c', 'echo this is initial setup!'] + + ## Extra containers for the Ingress Controller pods. + extraContainers: [] + # - name: container + # image: busybox:1.34 + # command: ['sh', '-c', 'echo this is a sidecar!'] + + ## The number of replicas of the Ingress Controller deployment. + replicaCount: 1 + + ## A class of the Ingress Controller. + + ## IngressClass resource with the name equal to the class must be deployed. Otherwise, + ## the Ingress Controller will fail to start. + ## The Ingress Controller only processes resources that belong to its class - i.e. have the "ingressClassName" field resource equal to the class. + + ## The Ingress Controller processes all the resources that do not have the "ingressClassName" field for all versions of kubernetes. + ingressClass: nginx + + ## New Ingresses without an ingressClassName field specified will be assigned the class specified in `controller.ingressClass`. + setAsDefaultIngress: false + + ## Namespace to watch for Ingress resources. By default the Ingress Controller watches all namespaces. + watchNamespace: "" + + ## Enable the custom resources. + enableCustomResources: true + + ## Enable preview policies. This parameter is deprecated. To enable OIDC Policies please use controller.enableOIDC instead. + enablePreviewPolicies: false + + ## Enable OIDC policies. + enableOIDC: false + + ## Enable TLS Passthrough on port 443. Requires controller.enableCustomResources. + enableTLSPassthrough: false + + ## Enable cert manager for Virtual Server resources. Requires controller.enableCustomResources. + enableCertManager: false + + globalConfiguration: + ## Creates the GlobalConfiguration custom resource. Requires controller.enableCustomResources. + create: false + + ## The spec of the GlobalConfiguration for defining the global configuration parameters of the Ingress Controller. + spec: {} + # listeners: + # - name: dns-udp + # port: 5353 + # protocol: UDP + # - name: dns-tcp + # port: 5353 + # protocol: TCP + + ## Enable custom NGINX configuration snippets in Ingress, VirtualServer, VirtualServerRoute and TransportServer resources. + enableSnippets: false + + ## Add a location based on the value of health-status-uri to the default server. The location responds with the 200 status code for any request. + ## Useful for external health-checking of the Ingress Controller. + healthStatus: false + + ## Sets the URI of health status location in the default server. Requires controller.healthStatus. + healthStatusURI: "/nginx-health" + + nginxStatus: + ## Enable the NGINX stub_status, or the NGINX Plus API. + enable: true + + ## Set the port where the NGINX stub_status or the NGINX Plus API is exposed. + port: 8080 + + ## Add IPv4 IP/CIDR blocks to the allow list for NGINX stub_status or the NGINX Plus API. Separate multiple IP/CIDR by commas. + allowCidrs: "127.0.0.1" + + service: + ## Creates a service to expose the Ingress Controller pods. + create: true + + ## The type of service to create for the Ingress Controller. + type: LoadBalancer + + ## The externalTrafficPolicy of the service. The value Local preserves the client source IP. + externalTrafficPolicy: Local + + ## The annotations of the Ingress Controller service. + annotations: {} + + ## The extra labels of the service. + extraLabels: {} + + ## The static IP address for the load balancer. Requires controller.service.type set to LoadBalancer. The cloud provider must support this feature. + loadBalancerIP: "" + + ## The list of external IPs for the Ingress Controller service. + externalIPs: [] + + ## The IP ranges (CIDR) that are allowed to access the load balancer. Requires controller.service.type set to LoadBalancer. The cloud provider must support this feature. + loadBalancerSourceRanges: [] + + ## The name of the service + ## Autogenerated if not set or set to "". + # name: nginx-ingress + + httpPort: + ## Enables the HTTP port for the Ingress Controller service. + enable: true + + ## The HTTP port of the Ingress Controller service. + port: 80 + + ## The custom NodePort for the HTTP port. Requires controller.service.type set to NodePort. + nodePort: "" + + ## The HTTP port on the POD where the Ingress Controller service is running. + targetPort: 80 + + httpsPort: + ## Enables the HTTPS port for the Ingress Controller service. + enable: true + + ## The HTTPS port of the Ingress Controller service. + port: 443 + + ## The custom NodePort for the HTTPS port. Requires controller.service.type set to NodePort. + nodePort: "" + + ## The HTTPS port on the POD where the Ingress Controller service is running. + targetPort: 443 + + ## A list of custom ports to expose through the Ingress Controller service. Follows the conventional Kubernetes yaml syntax for service ports. + customPorts: [] + + serviceAccount: + ## The name of the service account of the Ingress Controller pods. Used for RBAC. + ## Autogenerated if not set or set to "". + # name: nginx-ingress + + ## The name of the secret containing docker registry credentials. + ## Secret must exist in the same namespace as the helm release. + imagePullSecretName: "" + + reportIngressStatus: + ## Updates the address field in the status of Ingress resources with an external address of the Ingress Controller. + ## You must also specify the source of the external address either through an external service via controller.reportIngressStatus.externalService, + ## controller.reportIngressStatus.ingressLink or the external-status-address entry in the ConfigMap via controller.config.entries. + ## Note: controller.config.entries.external-status-address takes precedence over the others. + enable: true + + ## Specifies the name of the service with the type LoadBalancer through which the Ingress Controller is exposed externally. + ## The external address of the service is used when reporting the status of Ingress, VirtualServer and VirtualServerRoute resources. + ## controller.reportIngressStatus.enable must be set to true. + ## The default is autogenerated and matches the created service (see controller.service.create). + # externalService: nginx-ingress + + ## Specifies the name of the IngressLink resource, which exposes the Ingress Controller pods via a BIG-IP system. + ## The IP of the BIG-IP system is used when reporting the status of Ingress, VirtualServer and VirtualServerRoute resources. + ## controller.reportIngressStatus.enable must be set to true. + ingressLink: "" + + ## Enable Leader election to avoid multiple replicas of the controller reporting the status of Ingress resources. controller.reportIngressStatus.enable must be set to true. + enableLeaderElection: true + + ## Specifies the name of the ConfigMap, within the same namespace as the controller, used as the lock for leader election. controller.reportIngressStatus.enableLeaderElection must be set to true. + ## Autogenerated if not set or set to "". + # leaderElectionLockName: "nginx-ingress-leader-election" + + ## The annotations of the leader election configmap. + annotations: {} + + pod: + ## The annotations of the Ingress Controller pod. + annotations: {} + + ## The additional extra labels of the Ingress Controller pod. + extraLabels: {} + + ## The PriorityClass of the ingress controller pods. + priorityClassName: + + readyStatus: + ## Enables readiness endpoint "/nginx-ready". The endpoint returns a success code when NGINX has loaded all the config after startup. + enable: true + + ## Set the port where the readiness endpoint is exposed. + port: 8081 + + ## Enable collection of latency metrics for upstreams. Requires prometheus.create. + enableLatencyMetrics: false + +rbac: + ## Configures RBAC. + create: true + +prometheus: + ## Expose NGINX or NGINX Plus metrics in the Prometheus format. + create: true + + ## Configures the port to scrape the metrics. + port: 9113 + + ## Specifies the namespace/name of a Kubernetes TLS Secret which will be used to protect the Prometheus endpoint. + secret: "" + + ## Configures the HTTP scheme used. + scheme: http + +nginxServiceMesh: + ## Enables integration with NGINX Service Mesh. + ## Requires controller.nginxplus + enable: false + + ## Enables NGINX Service Mesh workload to route egress traffic through the Ingress Controller. + ## Requires nginxServiceMesh.enable + enableEgress: false + diff --git a/unused/nocodb.deployment.yaml b/unused/nocodb.deployment.yaml new file mode 100644 index 0000000..5a33f39 --- /dev/null +++ b/unused/nocodb.deployment.yaml @@ -0,0 +1,75 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: nocodb + labels: + app: nocodb + + + +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: nocodb + namespace: nocodb + labels: + app: nocodb +spec: + replicas: 1 + selector: + matchLabels: + app: nocodb + template: + metadata: + labels: + app: nocodb + spec: + containers: + - name: nocodb + image: nocodb/nocodb + tty: true + ports: + - containerPort: 8080 + +--- +apiVersion: v1 +kind: Service +metadata: + name: nocodb + namespace: nocodb + +spec: + type: ClusterIP + ports: + - name: http + port: 8080 + selector: + app: nocodb + +--- + +kind: Ingress +apiVersion: networking.k8s.io/v1 +metadata: + namespace: nocodb + name: nocodb-ingress + annotations: + kubernetes.io/ingress.class: nginx + cert-manager.io/cluster-issuer: cloudflare-letsencrypt-prod +spec: + tls: + - hosts: + - nocodb.kluster.moll.re + secretName: cloudflare-letsencrypt-issuer-account-key + rules: + - host: nocodb.kluster.moll.re + http: + paths: + - backend: + service: + name: nocodb + port: + number: 8080 + path: / + pathType: Prefix \ No newline at end of file diff --git a/unused/pihole.ingress.yaml b/unused/pihole.ingress.yaml new file mode 100644 index 0000000..4badd26 --- /dev/null +++ b/unused/pihole.ingress.yaml @@ -0,0 +1,26 @@ +kind: Ingress +apiVersion: networking.k8s.io/v1 +metadata: + namespace: pihole + name: pihole-ingress + annotations: + kubernetes.io/ingress.class: nginx + cert-manager.io/cluster-issuer: cloudflare-letsencrypt-prod + + +spec: + tls: + - hosts: + - pihole.kluster.moll.re + secretName: cloudflare-letsencrypt-issuer-account-key + rules: + - host: pihole.kluster.moll.re + http: + paths: + - backend: + service: + name: pihole-web + port: + number: 80 + path: / + pathType: Prefix diff --git a/unused/pihole.persistentvolume.yml b/unused/pihole.persistentvolume.yml new file mode 100644 index 0000000..23e9d19 --- /dev/null +++ b/unused/pihole.persistentvolume.yml @@ -0,0 +1,37 @@ +--- +apiVersion: v1 +kind: PersistentVolume +metadata: + namespace: pihole + name: pihole-nfs + labels: + directory: pihole +spec: + storageClassName: slow + capacity: + storage: "500Mi" + volumeMode: Filesystem + accessModes: + - ReadWriteOnce + nfs: + path: /pihole + server: 10.43.239.43 # assigned to nfs-server service. Won't change as long as service is not redeployed +--- +## pihole.persistentvolumeclaim.yml +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + namespace: pihole + name: pihole-nfs +spec: + storageClassName: slow + accessModes: + - ReadWriteOnce + resources: + requests: + storage: "500Mi" + selector: + matchLabels: + directory: pihole +--- \ No newline at end of file diff --git a/unused/pihole.values.yml b/unused/pihole.values.yml new file mode 100644 index 0000000..9a4f098 --- /dev/null +++ b/unused/pihole.values.yml @@ -0,0 +1,397 @@ +# Default values for pihole. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +# -- The number of replicas +replicaCount: 1 + +# -- The `spec.strategyTpye` for updates +strategyType: RollingUpdate + +# -- The maximum number of Pods that can be created over the desired number of `ReplicaSet` during updating. +maxSurge: 1 + +# -- The maximum number of Pods that can be unavailable during updating +maxUnavailable: 1 + +image: + # -- the repostory to pull the image from + repository: "pihole/pihole" + # -- the docker tag, if left empty it will get it from the chart's appVersion + tag: "" + # -- the pull policy + pullPolicy: IfNotPresent + +dualStack: + # -- set this to true to enable creation of DualStack services or creation of separate IPv6 services if `serviceDns.type` is set to `"LoadBalancer"` + enabled: false + +dnsHostPort: + # -- set this to true to enable dnsHostPort + enabled: false + # -- default port for this pod + port: 53 + +# -- Configuration for the DNS service on port 53 +serviceDns: + + # -- deploys a mixed (TCP + UDP) Service instead of separate ones + mixedService: false + + # -- `spec.type` for the DNS Service + type: LoadBalancer + + # -- The port of the DNS service + port: 53 + + # -- Optional node port for the DNS service + nodePort: "" + + # -- `spec.externalTrafficPolicy` for the DHCP Service + externalTrafficPolicy: Local + + # -- A fixed `spec.loadBalancerIP` for the DNS Service + loadBalancerIP: 192.168.1.3 + # -- A fixed `spec.loadBalancerIP` for the IPv6 DNS Service + loadBalancerIPv6: "" + + # -- Annotations for the DNS service + annotations: + # metallb.universe.tf/address-pool: network-services + metallb.universe.tf/allow-shared-ip: pihole-svc + +# -- Configuration for the DHCP service on port 67 +serviceDhcp: + + # -- Generate a Service resource for DHCP traffic + enabled: false + + # -- `spec.type` for the DHCP Service + type: NodePort + + # -- `spec.externalTrafficPolicy` for the DHCP Service + externalTrafficPolicy: Local + + # -- A fixed `spec.loadBalancerIP` for the DHCP Service + loadBalancerIP: "" + # -- A fixed `spec.loadBalancerIP` for the IPv6 DHCP Service + loadBalancerIPv6: "" + + # -- Annotations for the DHCP service + annotations: {} + # metallb.universe.tf/address-pool: network-services + # metallb.universe.tf/allow-shared-ip: pihole-svc + +# -- Configuration for the web interface service +serviceWeb: + # -- Configuration for the HTTP web interface listener + http: + + # -- Generate a service for HTTP traffic + enabled: true + + # -- The port of the web HTTP service + port: 80 + + # -- Configuration for the HTTPS web interface listener + https: + # -- Generate a service for HTTPS traffic + enabled: true + + # -- The port of the web HTTPS service + port: 443 + + # -- `spec.type` for the web interface Service + type: ClusterIP + + # -- `spec.externalTrafficPolicy` for the web interface Service + externalTrafficPolicy: Local + + # -- A fixed `spec.loadBalancerIP` for the web interface Service + loadBalancerIP: "" + # -- A fixed `spec.loadBalancerIP` for the IPv6 web interface Service + loadBalancerIPv6: "" + + # -- Annotations for the DHCP service + annotations: {} + # metallb.universe.tf/address-pool: network-services + # metallb.universe.tf/allow-shared-ip: pihole-svc + +virtualHost: pi.hole + +# -- Configuration for the Ingress +ingress: + # -- Generate a Ingress resource + enabled: false # DONE EXTERNALLY + + # -- Specify an ingressClassName + # ingressClassName: nginx + + # -- Annotations for the ingress + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + path: / + hosts: + # virtualHost (default value is pi.hole) will be appended to the hosts + - chart-example.local + tls: [] + # - secretName: chart-example-tls + # hosts: + # #- virtualHost (default value is pi.hole) will be appended to the hosts + # - chart-example.local + +# -- Probes configuration +probes: + # -- probes.liveness -- Configure the healthcheck for the ingress controller + liveness: + # -- Generate a liveness probe + enabled: true + initialDelaySeconds: 60 + failureThreshold: 10 + timeoutSeconds: 5 + readiness: + # -- Generate a readiness probe + enabled: true + initialDelaySeconds: 60 + failureThreshold: 3 + timeoutSeconds: 5 + +# -- We usually recommend not to specify default resources and to leave this as a conscious +# -- choice for the user. This also increases chances charts run on environments with little +# -- resources, such as Minikube. If you do want to specify resources, uncomment the following +# -- lines, adjust them as necessary, and remove the curly braces after 'resources:'. +resources: {} + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + +# -- `spec.PersitentVolumeClaim` configuration +persistentVolumeClaim: + # -- set to true to use pvc + enabled: true + + # -- specify an existing `PersistentVolumeClaim` to use + existingClaim: "pihole-nfs" + + # -- Annotations for the `PersitentVolumeClaim` + annotations: {} + + accessModes: + - ReadWriteOnce + + size: "500Mi" + + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + + ## If subPath is set mount a sub folder of a volume instead of the root of the volume. + ## This is especially handy for volume plugins that don't natively support sub mounting (like glusterfs). + + ## subPath: "pihole" + +nodeSelector: {} + +tolerations: [] + +# -- Specify a priorityClassName +# priorityClassName: "" + +# Reference: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ +topologySpreadConstraints: [] +# - maxSkew: +# topologyKey: +# whenUnsatisfiable: +# labelSelector: + +affinity: {} + +# -- Administrator password when not using an existing secret (see below) +adminPassword: "admin" + +# -- Use an existing secret for the admin password. +admin: + # -- Specify an existing secret to use as admin password + existingSecret: "" + # -- Specify the key inside the secret to use + passwordKey: "" + +# -- extraEnvironmentVars is a list of extra enviroment variables to set for pihole to use +extraEnvVars: {} + # TZ: UTC + +# -- extraEnvVarsSecret is a list of secrets to load in as environment variables. +extraEnvVarsSecret: {} + # env_var: + # name: secret-name + # key: secret-key + +# -- default upstream DNS 1 server to use +DNS1: "8.8.8.8" +# -- default upstream DNS 2 server to use +DNS2: "8.8.4.4" + +antiaff: + # -- set to true to enable antiaffinity (example: 2 pihole DNS in the same cluster) + enabled: false + # -- Here you can set the pihole release (you set in `helm install ...`) + # you want to avoid + avoidRelease: pihole1 + # -- Here you can choose between preferred or required + strict: true + +doh: + # -- set to true to enabled DNS over HTTPs via cloudflared + enabled: false + name: "cloudflared" + repository: "crazymax/cloudflared" + tag: latest + pullPolicy: IfNotPresent + # -- Here you can pass environment variables to the DoH container, for example: + envVars: {} + # TUNNEL_DNS_UPSTREAM: "https://1.1.1.2/dns-query,https://1.0.0.2/dns-query" + + # -- Probes configuration + probes: + # -- Configure the healthcheck for the doh container + liveness: + # -- set to true to enable liveness probe + enabled: true + # -- defines the initial delay for the liveness probe + initialDelaySeconds: 60 + # -- defines the failure threshold for the liveness probe + failureThreshold: 10 + # -- defines the timeout in secondes for the liveness probe + timeoutSeconds: 5 + +dnsmasq: + # -- Add upstream dns servers. All lines will be added to the pihole dnsmasq configuration + upstreamServers: [] + # - server=/foo.bar/192.168.178.10 + # - server=/bar.foo/192.168.178.11 + + # -- Add custom dns entries to override the dns resolution. All lines will be added to the pihole dnsmasq configuration. + customDnsEntries: [] + # - address=/foo.bar/192.168.178.10 + # - address=/bar.foo/192.168.178.11 + + # -- Dnsmasq reads the /etc/hosts file to resolve ips. You can add additional entries if you like + additionalHostsEntries: [] + # - 192.168.0.3 host4 + # - 192.168.0.4 host5 + + # -- Static DHCP config + staticDhcpEntries: [] + # staticDhcpEntries: + # - dhcp-host=MAC_ADDRESS,IP_ADDRESS,HOSTNAME + + # -- Other options + customSettings: + # otherSettings: + # - rebind-domain-ok=/plex.direct/ + + # -- Here we specify custom cname entries that should point to `A` records or + # elements in customDnsEntries array. + # The format should be: + # - cname=cname.foo.bar,foo.bar + # - cname=cname.bar.foo,bar.foo + # - cname=cname record,dns record + customCnameEntries: [] + # Here we specify custom cname entries that should point to `A` records or + # elements in customDnsEntries array. + # The format should be: + # - cname=cname.foo.bar,foo.bar + # - cname=cname.bar.foo,bar.foo + # - cname=cname record,dns record + +# -- list of adlists to import during initial start of the container +adlists: {} + # If you want to provide blocklists, add them here. + # - https://hosts-file.net/grm.txt + # - https://reddestdream.github.io/Projects/MinimalHosts/etc/MinimalHostsBlocker/minimalhosts + +# -- list of whitelisted domains to import during initial start of the container +whitelist: {} + # If you want to provide whitelisted domains, add them here. + # - clients4.google.com + +# -- list of blacklisted domains to import during initial start of the container +blacklist: {} + # If you want to have special domains blacklisted, add them here + # - *.blackist.com + +# -- list of blacklisted regex expressions to import during initial start of the container +regex: {} + # Add regular expression blacklist items + # - (^|\.)facebook\.com$ + +# -- values that should be added to pihole-FTL.conf +ftl: {} + # Add values for pihole-FTL.conf + # MAXDBDAYS: 14 + +# -- port the container should use to expose HTTP traffic +webHttp: "80" + +# -- port the container should use to expose HTTPS traffic +webHttps: "443" + +# -- hostname of pod +hostname: "" + +# -- should the container use host network +hostNetwork: "false" + +# -- should container run in privileged mode +privileged: "false" + +customVolumes: + # -- set this to true to enable custom volumes + enabled: false + # -- any volume type can be used here + config: {} + # hostPath: + # path: "/mnt/data" + +# -- Additional annotations for pods +podAnnotations: {} + # Example below allows Prometheus to scape on metric port (requires pihole-exporter sidecar enabled) + # prometheus.io/port: '9617' + # prometheus.io/scrape: 'true' + +monitoring: + # -- Preferably adding prometheus scrape annotations rather than enabling podMonitor. + podMonitor: + # -- set this to true to enable podMonitor + enabled: false + # -- Sidecar configuration + sidecar: + # -- set this to true to enable podMonitor as sidecar + enabled: false + port: 9617 + image: + repository: ekofr/pihole-exporter + tag: 0.0.10 + pullPolicy: IfNotPresent + resources: + limits: + memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + +podDnsConfig: + enabled: true + policy: "None" + nameservers: + - 127.0.0.1 + - 8.8.8.8 + diff --git a/unused/portainer/deployment.yaml b/unused/portainer/deployment.yaml new file mode 100644 index 0000000..0fd8921 --- /dev/null +++ b/unused/portainer/deployment.yaml @@ -0,0 +1,68 @@ +# Default values for portainer. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +replicaCount: 1 + +# If enterpriseEdition is enabled, then use the values below _instead_ of those in .image +enterpriseEdition: + enabled: false + image: + repository: portainer/portainer-ee + tag: 2.12.2 + pullPolicy: Always + +image: + repository: portainer/portainer-ce + tag: latest + pullPolicy: Always + +imagePullSecrets: [] + +nodeSelector: {} + +serviceAccount: + annotations: {} + name: portainer-sa-clusteradmin + +service: + # Set the httpNodePort and edgeNodePort only if the type is NodePort + # For Ingress, set the type to be ClusterIP and set ingress.enabled to true + # For Cloud Providers, set the type to be LoadBalancer + type: ClusterIP + httpPort: 9000 + httpsPort: 9443 + httpNodePort: 30777 + httpsNodePort: 30779 + edgePort: 8000 + edgeNodePort: 30776 + annotations: {} + +tls: + # If set, Portainer will be configured to use TLS only + force: false + # If set, will mount the existing secret into the pod + existingSecret: "" + +feature: + flags: "" + +ingress: + enabled: false + ingressClassName: "" + annotations: {} + # kubernetes.io/ingress.class: nginx + # Only use below if tls.force=true + # nginx.ingress.kubernetes.io/backend-protocol: HTTPS + # Note: Hosts and paths are of type array + hosts: + - host: + paths: [] + # - path: "/" + tls: [] + +resources: {} + +persistence: + existingClaim: portainer-data + diff --git a/unused/portainer/ingress.yaml b/unused/portainer/ingress.yaml new file mode 100644 index 0000000..d7e2509 --- /dev/null +++ b/unused/portainer/ingress.yaml @@ -0,0 +1,17 @@ +apiVersion: traefik.containo.us/v1alpha1 +kind: IngressRoute +metadata: + namespace: portainer + name: portainer-ingressroute + +spec: + entryPoints: + - websecure + routes: + - match: Host(`portainer.kluster.moll.re`) + kind: Rule + services: + - name: portainer + port: 9000 + tls: + certResolver: default-tls \ No newline at end of file diff --git a/unused/portainer/pvc.yaml b/unused/portainer/pvc.yaml new file mode 100644 index 0000000..12e7e65 --- /dev/null +++ b/unused/portainer/pvc.yaml @@ -0,0 +1,37 @@ +--- +apiVersion: v1 +kind: PersistentVolume +metadata: + namespace: portainer + name: portainer-data + labels: + directory: portainer +spec: + storageClassName: fast + capacity: + storage: "10Gi" + volumeMode: Filesystem + accessModes: + - ReadWriteOnce + nfs: + path: /portainer + server: 10.43.239.43 # assigned to nfs-server service. Won't change as long as service is not redeployed +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + namespace: portainer + name: portainer-data +spec: + storageClassName: fast + accessModes: + - ReadWriteOnce + resources: + requests: + storage: "10Gi" + selector: + matchLabels: + directory: portainer + + + diff --git a/unused/prometheus.pv.yml b/unused/prometheus.pv.yml new file mode 100644 index 0000000..d0d3b49 --- /dev/null +++ b/unused/prometheus.pv.yml @@ -0,0 +1,19 @@ +--- +apiVersion: v1 +kind: PersistentVolume +metadata: + namespace: monitoring + name: prometheus-data-nfs + labels: + directory: prometheus +spec: + storageClassName: slow + capacity: + storage: "50Gi" + volumeMode: Filesystem + accessModes: + - ReadWriteOnce + nfs: + path: /prometheus + server: 10.43.239.43 # assigned to nfs-server service. Won't change as long as service is not redeployed +--- \ No newline at end of file diff --git a/unused/prometheus.values.yaml b/unused/prometheus.values.yaml new file mode 100644 index 0000000..54fdc72 --- /dev/null +++ b/unused/prometheus.values.yaml @@ -0,0 +1,2154 @@ +# Default values for kube-prometheus-stack. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +## Provide a name in place of kube-prometheus-stack for `app:` labels +## +nameOverride: "" + +## Override the deployment namespace +## +namespaceOverride: "" + +## Provide a k8s version to auto dashboard import script example: kubeTargetVersionOverride: 1.16.6 +## +kubeTargetVersionOverride: "" + +## Allow kubeVersion to be overridden while creating the ingress +## +kubeVersionOverride: "" + +## Provide a name to substitute for the full names of resources +## +fullnameOverride: "" + +## Labels to apply to all resources +## +commonLabels: {} +# scmhash: abc123 +# myLabel: aakkmd + +## Create default rules for monitoring the cluster +## +defaultRules: + create: true + rules: + alertmanager: true + etcd: true + configReloaders: true + general: true + k8s: true + kubeApiserverAvailability: true + kubeApiserverBurnrate: true + kubeApiserverHistogram: true + kubeApiserverSlos: true + kubelet: true + kubeProxy: true + kubePrometheusGeneral: true + kubePrometheusNodeRecording: true + kubernetesApps: true + kubernetesResources: true + kubernetesStorage: true + kubernetesSystem: true + kubeScheduler: true + kubeStateMetrics: true + network: true + node: true + nodeExporterAlerting: true + nodeExporterRecording: true + prometheus: true + prometheusOperator: true + + ## Reduce app namespace alert scope + appNamespacesTarget: ".*" + + ## Labels for default rules + labels: {} + ## Annotations for default rules + annotations: {} + + ## Additional labels for PrometheusRule alerts + additionalRuleLabels: {} + + ## Additional annotations for PrometheusRule alerts + additionalRuleAnnotations: {} + + ## Prefix for runbook URLs. Use this to override the first part of the runbookURLs that is common to all rules. + runbookUrl: "https://runbooks.prometheus-operator.dev/runbooks" + + ## Disabled PrometheusRule alerts + disabled: {} + # KubeAPIDown: true + # NodeRAIDDegraded: true + +## Deprecated way to provide custom recording or alerting rules to be deployed into the cluster. +## +# additionalPrometheusRules: [] +# - name: my-rule-file +# groups: +# - name: my_group +# rules: +# - record: my_record +# expr: 100 * my_record + +## Provide custom recording or alerting rules to be deployed into the cluster. +## +additionalPrometheusRulesMap: {} +# rule-name: +# groups: +# - name: my_group +# rules: +# - record: my_record +# expr: 100 * my_record + +## +global: + rbac: + create: true + + ## Create ClusterRoles that extend the existing view, edit and admin ClusterRoles to interact with prometheus-operator CRDs + ## Ref: https://kubernetes.io/docs/reference/access-authn-authz/rbac/#aggregated-clusterroles + createAggregateClusterRoles: false + pspEnabled: false + pspAnnotations: {} + ## Specify pod annotations + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#apparmor + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#seccomp + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#sysctl + ## + # seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*' + # seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default' + # apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default' + + ## Reference to one or more secrets to be used when pulling images + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + imagePullSecrets: [] + # - name: "image-pull-secret" + # or + # - "image-pull-secret" + +## Configuration for alertmanager +## ref: https://prometheus.io/docs/alerting/alertmanager/ +## + +## Using default values from https://github.com/grafana/helm-charts/blob/main/charts/grafana/values.yaml +## +grafana: + enabled: true + +## Component scraping the kube api server +## +kubeApiServer: + enabled: true + tlsConfig: + serverName: kubernetes + insecureSkipVerify: false + serviceMonitor: + ## Scrape interval. If not set, the Prometheus default scrape interval is used. + ## + interval: "" + ## proxyUrl: URL of a proxy that should be used for scraping. + ## + proxyUrl: "" + + jobLabel: component + selector: + matchLabels: + component: apiserver + provider: kubernetes + + ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion. + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig + ## + metricRelabelings: [] + # - action: keep + # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' + # sourceLabels: [__name__] + + ## RelabelConfigs to apply to samples before scraping + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig + ## + relabelings: [] + # - sourceLabels: + # - __meta_kubernetes_namespace + # - __meta_kubernetes_service_name + # - __meta_kubernetes_endpoint_port_name + # action: keep + # regex: default;kubernetes;https + # - targetLabel: __address__ + # replacement: kubernetes.default.svc:443 + +## Component scraping the kubelet and kubelet-hosted cAdvisor +## +kubelet: + enabled: true + namespace: kube-system + + serviceMonitor: + ## Scrape interval. If not set, the Prometheus default scrape interval is used. + ## + interval: "" + + ## proxyUrl: URL of a proxy that should be used for scraping. + ## + proxyUrl: "" + + ## Enable scraping the kubelet over https. For requirements to enable this see + ## https://github.com/prometheus-operator/prometheus-operator/issues/926 + ## + https: true + + ## Enable scraping /metrics/cadvisor from kubelet's service + ## + cAdvisor: true + + ## Enable scraping /metrics/probes from kubelet's service + ## + probes: true + + ## Enable scraping /metrics/resource from kubelet's service + ## This is disabled by default because container metrics are already exposed by cAdvisor + ## + resource: false + # From kubernetes 1.18, /metrics/resource/v1alpha1 renamed to /metrics/resource + resourcePath: "/metrics/resource/v1alpha1" + + ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion. + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig + ## + cAdvisorMetricRelabelings: [] + # - sourceLabels: [__name__, image] + # separator: ; + # regex: container_([a-z_]+); + # replacement: $1 + # action: drop + # - sourceLabels: [__name__] + # separator: ; + # regex: container_(network_tcp_usage_total|network_udp_usage_total|tasks_state|cpu_load_average_10s) + # replacement: $1 + # action: drop + + ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion. + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig + ## + probesMetricRelabelings: [] + # - sourceLabels: [__name__, image] + # separator: ; + # regex: container_([a-z_]+); + # replacement: $1 + # action: drop + # - sourceLabels: [__name__] + # separator: ; + # regex: container_(network_tcp_usage_total|network_udp_usage_total|tasks_state|cpu_load_average_10s) + # replacement: $1 + # action: drop + + ## RelabelConfigs to apply to samples before scraping + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig + ## + ## metrics_path is required to match upstream rules and charts + cAdvisorRelabelings: + - sourceLabels: [__metrics_path__] + targetLabel: metrics_path + # - sourceLabels: [__meta_kubernetes_pod_node_name] + # separator: ; + # regex: ^(.*)$ + # targetLabel: nodename + # replacement: $1 + # action: replace + + ## RelabelConfigs to apply to samples before scraping + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig + ## + probesRelabelings: + - sourceLabels: [__metrics_path__] + targetLabel: metrics_path + # - sourceLabels: [__meta_kubernetes_pod_node_name] + # separator: ; + # regex: ^(.*)$ + # targetLabel: nodename + # replacement: $1 + # action: replace + + ## RelabelConfigs to apply to samples before scraping + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig + ## + resourceRelabelings: + - sourceLabels: [__metrics_path__] + targetLabel: metrics_path + # - sourceLabels: [__meta_kubernetes_pod_node_name] + # separator: ; + # regex: ^(.*)$ + # targetLabel: nodename + # replacement: $1 + # action: replace + + ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion. + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig + ## + metricRelabelings: [] + # - sourceLabels: [__name__, image] + # separator: ; + # regex: container_([a-z_]+); + # replacement: $1 + # action: drop + # - sourceLabels: [__name__] + # separator: ; + # regex: container_(network_tcp_usage_total|network_udp_usage_total|tasks_state|cpu_load_average_10s) + # replacement: $1 + # action: drop + + ## RelabelConfigs to apply to samples before scraping + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig + ## + ## metrics_path is required to match upstream rules and charts + relabelings: + - sourceLabels: [__metrics_path__] + targetLabel: metrics_path + # - sourceLabels: [__meta_kubernetes_pod_node_name] + # separator: ; + # regex: ^(.*)$ + # targetLabel: nodename + # replacement: $1 + # action: replace + +## Component scraping the kube controller manager +## +kubeControllerManager: + enabled: true + + ## If your kube controller manager is not deployed as a pod, specify IPs it can be found on + ## + endpoints: [] + # - 10.141.4.22 + # - 10.141.4.23 + # - 10.141.4.24 + + ## If using kubeControllerManager.endpoints only the port and targetPort are used + ## + service: + enabled: true + ## If null or unset, the value is determined dynamically based on target Kubernetes version due to change + ## of default port in Kubernetes 1.22. + ## + port: null + targetPort: null + # selector: + # component: kube-controller-manager + + serviceMonitor: + enabled: true + ## Scrape interval. If not set, the Prometheus default scrape interval is used. + ## + interval: "" + + ## proxyUrl: URL of a proxy that should be used for scraping. + ## + proxyUrl: "" + + ## Enable scraping kube-controller-manager over https. + ## Requires proper certs (not self-signed) and delegated authentication/authorization checks. + ## If null or unset, the value is determined dynamically based on target Kubernetes version. + ## + https: null + + # Skip TLS certificate validation when scraping + insecureSkipVerify: null + + # Name of the server to use when validating TLS certificate + serverName: null + + ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion. + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig + ## + metricRelabelings: [] + # - action: keep + # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' + # sourceLabels: [__name__] + + ## RelabelConfigs to apply to samples before scraping + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig + ## + relabelings: [] + # - sourceLabels: [__meta_kubernetes_pod_node_name] + # separator: ; + # regex: ^(.*)$ + # targetLabel: nodename + # replacement: $1 + # action: replace + +## Component scraping coreDns. Use either this or kubeDns +## +coreDns: + enabled: true + service: + port: 9153 + targetPort: 9153 + # selector: + # k8s-app: kube-dns + serviceMonitor: + ## Scrape interval. If not set, the Prometheus default scrape interval is used. + ## + interval: "" + + ## proxyUrl: URL of a proxy that should be used for scraping. + ## + proxyUrl: "" + + ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion. + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig + ## + metricRelabelings: [] + # - action: keep + # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' + # sourceLabels: [__name__] + + ## RelabelConfigs to apply to samples before scraping + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig + ## + relabelings: [] + # - sourceLabels: [__meta_kubernetes_pod_node_name] + # separator: ; + # regex: ^(.*)$ + # targetLabel: nodename + # replacement: $1 + # action: replace + +## Component scraping kubeDns. Use either this or coreDns +## +kubeDns: + enabled: false + service: + dnsmasq: + port: 10054 + targetPort: 10054 + skydns: + port: 10055 + targetPort: 10055 + # selector: + # k8s-app: kube-dns + serviceMonitor: + ## Scrape interval. If not set, the Prometheus default scrape interval is used. + ## + interval: "" + + ## proxyUrl: URL of a proxy that should be used for scraping. + ## + proxyUrl: "" + + ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion. + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig + ## + metricRelabelings: [] + # - action: keep + # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' + # sourceLabels: [__name__] + + ## RelabelConfigs to apply to samples before scraping + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig + ## + relabelings: [] + # - sourceLabels: [__meta_kubernetes_pod_node_name] + # separator: ; + # regex: ^(.*)$ + # targetLabel: nodename + # replacement: $1 + # action: replace + + ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion. + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig + ## + dnsmasqMetricRelabelings: [] + # - action: keep + # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' + # sourceLabels: [__name__] + + ## RelabelConfigs to apply to samples before scraping + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig + ## + dnsmasqRelabelings: [] + # - sourceLabels: [__meta_kubernetes_pod_node_name] + # separator: ; + # regex: ^(.*)$ + # targetLabel: nodename + # replacement: $1 + # action: replace + +## Component scraping etcd +## +kubeEtcd: + enabled: true + + ## If your etcd is not deployed as a pod, specify IPs it can be found on + ## + endpoints: [] + # - 10.141.4.22 + # - 10.141.4.23 + # - 10.141.4.24 + + ## Etcd service. If using kubeEtcd.endpoints only the port and targetPort are used + ## + service: + enabled: true + port: 2379 + targetPort: 2379 + # selector: + # component: etcd + + ## Configure secure access to the etcd cluster by loading a secret into prometheus and + ## specifying security configuration below. For example, with a secret named etcd-client-cert + ## + ## serviceMonitor: + ## scheme: https + ## insecureSkipVerify: false + ## serverName: localhost + ## caFile: /etc/prometheus/secrets/etcd-client-cert/etcd-ca + ## certFile: /etc/prometheus/secrets/etcd-client-cert/etcd-client + ## keyFile: /etc/prometheus/secrets/etcd-client-cert/etcd-client-key + ## + serviceMonitor: + enabled: true + ## Scrape interval. If not set, the Prometheus default scrape interval is used. + ## + interval: "" + ## proxyUrl: URL of a proxy that should be used for scraping. + ## + proxyUrl: "" + scheme: http + insecureSkipVerify: false + serverName: "" + caFile: "" + certFile: "" + keyFile: "" + + ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion. + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig + ## + metricRelabelings: [] + # - action: keep + # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' + # sourceLabels: [__name__] + + ## RelabelConfigs to apply to samples before scraping + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig + ## + relabelings: [] + # - sourceLabels: [__meta_kubernetes_pod_node_name] + # separator: ; + # regex: ^(.*)$ + # targetLabel: nodename + # replacement: $1 + # action: replace + + +## Component scraping kube scheduler +## +kubeScheduler: + enabled: true + + ## If your kube scheduler is not deployed as a pod, specify IPs it can be found on + ## + endpoints: [] + # - 10.141.4.22 + # - 10.141.4.23 + # - 10.141.4.24 + + ## If using kubeScheduler.endpoints only the port and targetPort are used + ## + service: + enabled: true + ## If null or unset, the value is determined dynamically based on target Kubernetes version due to change + ## of default port in Kubernetes 1.23. + ## + port: null + targetPort: null + # selector: + # component: kube-scheduler + + serviceMonitor: + enabled: true + ## Scrape interval. If not set, the Prometheus default scrape interval is used. + ## + interval: "" + ## proxyUrl: URL of a proxy that should be used for scraping. + ## + proxyUrl: "" + ## Enable scraping kube-scheduler over https. + ## Requires proper certs (not self-signed) and delegated authentication/authorization checks. + ## If null or unset, the value is determined dynamically based on target Kubernetes version. + ## + https: null + + ## Skip TLS certificate validation when scraping + insecureSkipVerify: null + + ## Name of the server to use when validating TLS certificate + serverName: null + + ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion. + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig + ## + metricRelabelings: [] + # - action: keep + # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' + # sourceLabels: [__name__] + + ## RelabelConfigs to apply to samples before scraping + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig + ## + relabelings: [] + # - sourceLabels: [__meta_kubernetes_pod_node_name] + # separator: ; + # regex: ^(.*)$ + # targetLabel: nodename + # replacement: $1 + # action: replace + + +## Component scraping kube proxy +## +kubeProxy: + enabled: true + + ## If your kube proxy is not deployed as a pod, specify IPs it can be found on + ## + endpoints: [] + # - 10.141.4.22 + # - 10.141.4.23 + # - 10.141.4.24 + + service: + enabled: true + port: 10249 + targetPort: 10249 + # selector: + # k8s-app: kube-proxy + + serviceMonitor: + enabled: true + ## Scrape interval. If not set, the Prometheus default scrape interval is used. + ## + interval: "" + + ## proxyUrl: URL of a proxy that should be used for scraping. + ## + proxyUrl: "" + + ## Enable scraping kube-proxy over https. + ## Requires proper certs (not self-signed) and delegated authentication/authorization checks + ## + https: false + + ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion. + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig + ## + metricRelabelings: [] + # - action: keep + # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' + # sourceLabels: [__name__] + + ## RelabelConfigs to apply to samples before scraping + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig + ## + relabelings: [] + # - action: keep + # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' + # sourceLabels: [__name__] + + +## Component scraping kube state metrics +## +kubeStateMetrics: + enabled: true + +## Configuration for kube-state-metrics subchart +## +kube-state-metrics: + namespaceOverride: "" + rbac: + create: true + releaseLabel: true + prometheus: + monitor: + enabled: true + + ## Scrape interval. If not set, the Prometheus default scrape interval is used. + ## + interval: "" + + ## Scrape Timeout. If not set, the Prometheus default scrape timeout is used. + ## + scrapeTimeout: "" + + ## proxyUrl: URL of a proxy that should be used for scraping. + ## + proxyUrl: "" + + # Keep labels from scraped data, overriding server-side labels + ## + honorLabels: true + + ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion. + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig + ## + metricRelabelings: [] + # - action: keep + # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' + # sourceLabels: [__name__] + + ## RelabelConfigs to apply to samples before scraping + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig + ## + relabelings: [] + # - sourceLabels: [__meta_kubernetes_pod_node_name] + # separator: ; + # regex: ^(.*)$ + # targetLabel: nodename + # replacement: $1 + # action: replace + + selfMonitor: + enabled: false + +## Deploy node exporter as a daemonset to all nodes +## +nodeExporter: + enabled: true + +## Configuration for prometheus-node-exporter subchart +## +prometheus-node-exporter: + namespaceOverride: "" + podLabels: + ## Add the 'node-exporter' label to be used by serviceMonitor to match standard common usage in rules and grafana dashboards + ## + jobLabel: node-exporter + extraArgs: + - --collector.filesystem.mount-points-exclude=^/(dev|proc|sys|var/lib/docker/.+|var/lib/kubelet/.+)($|/) + - --collector.filesystem.fs-types-exclude=^(autofs|binfmt_misc|bpf|cgroup2?|configfs|debugfs|devpts|devtmpfs|fusectl|hugetlbfs|iso9660|mqueue|nsfs|overlay|proc|procfs|pstore|rpc_pipefs|securityfs|selinuxfs|squashfs|sysfs|tracefs)$ + service: + portName: http-metrics + prometheus: + monitor: + enabled: true + + jobLabel: jobLabel + + ## Scrape interval. If not set, the Prometheus default scrape interval is used. + ## + interval: "" + + ## How long until a scrape request times out. If not set, the Prometheus default scape timeout is used. + ## + scrapeTimeout: "" + + ## proxyUrl: URL of a proxy that should be used for scraping. + ## + proxyUrl: "" + + ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion. + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig + ## + metricRelabelings: [] + # - sourceLabels: [__name__] + # separator: ; + # regex: ^node_mountstats_nfs_(event|operations|transport)_.+ + # replacement: $1 + # action: drop + + ## RelabelConfigs to apply to samples before scraping + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig + ## + relabelings: [] + # - sourceLabels: [__meta_kubernetes_pod_node_name] + # separator: ; + # regex: ^(.*)$ + # targetLabel: nodename + # replacement: $1 + # action: replace + rbac: + ## If true, create PSPs for node-exporter + ## + pspEnabled: false + +## Manages Prometheus and Alertmanager components +## +prometheusOperator: + enabled: true + + ## Prometheus-Operator v0.39.0 and later support TLS natively. + ## + tls: + enabled: true + # Value must match version names from https://golang.org/pkg/crypto/tls/#pkg-constants + tlsMinVersion: VersionTLS13 + # The default webhook port is 10250 in order to work out-of-the-box in GKE private clusters and avoid adding firewall rules. + internalPort: 10250 + + ## Admission webhook support for PrometheusRules resources added in Prometheus Operator 0.30 can be enabled to prevent incorrectly formatted + ## rules from making their way into prometheus and potentially preventing the container from starting + admissionWebhooks: + failurePolicy: Fail + enabled: true + ## A PEM encoded CA bundle which will be used to validate the webhook's server certificate. + ## If unspecified, system trust roots on the apiserver are used. + caBundle: "" + ## If enabled, generate a self-signed certificate, then patch the webhook configurations with the generated data. + ## On chart upgrades (or if the secret exists) the cert will not be re-generated. You can use this to provide your own + ## certs ahead of time if you wish. + ## + patch: + enabled: true + image: + repository: k8s.gcr.io/ingress-nginx/kube-webhook-certgen + tag: v1.1.1 + sha: "" + pullPolicy: IfNotPresent + resources: {} + ## Provide a priority class name to the webhook patching job + ## + priorityClassName: "" + podAnnotations: {} + nodeSelector: {} + affinity: {} + tolerations: [] + + ## SecurityContext holds pod-level security attributes and common container settings. + ## This defaults to non root user with uid 2000 and gid 2000. *v1.PodSecurityContext false + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + ## + securityContext: + runAsGroup: 2000 + runAsNonRoot: true + runAsUser: 2000 + + # Use certmanager to generate webhook certs + certManager: + enabled: false + # self-signed root certificate + rootCert: + duration: "" # default to be 5y + admissionCert: + duration: "" # default to be 1y + # issuerRef: + # name: "issuer" + # kind: "ClusterIssuer" + + ## Namespaces to scope the interaction of the Prometheus Operator and the apiserver (allow list). + ## This is mutually exclusive with denyNamespaces. Setting this to an empty object will disable the configuration + ## + namespaces: {} + # releaseNamespace: true + # additional: + # - kube-system + + ## Namespaces not to scope the interaction of the Prometheus Operator (deny list). + ## + denyNamespaces: [] + + ## Filter namespaces to look for prometheus-operator custom resources + ## + alertmanagerInstanceNamespaces: [] + prometheusInstanceNamespaces: [] + thanosRulerInstanceNamespaces: [] + + ## The clusterDomain value will be added to the cluster.peer option of the alertmanager. + ## Without this specified option cluster.peer will have value alertmanager-monitoring-alertmanager-0.alertmanager-operated:9094 (default value) + ## With this specified option cluster.peer will have value alertmanager-monitoring-alertmanager-0.alertmanager-operated.namespace.svc.cluster-domain:9094 + ## + # clusterDomain: "cluster.local" + + ## Service account for Alertmanager to use. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ + ## + serviceAccount: + create: true + name: "" + + ## Configuration for Prometheus operator service + ## + service: + annotations: {} + labels: {} + clusterIP: "" + + ## Port to expose on each node + ## Only used if service.type is 'NodePort' + ## + nodePort: 30080 + + nodePortTls: 30443 + + ## Additional ports to open for Prometheus service + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#multi-port-services + ## + additionalPorts: [] + + ## Loadbalancer IP + ## Only use if service.type is "LoadBalancer" + ## + loadBalancerIP: "" + loadBalancerSourceRanges: [] + + ## Denotes if this Service desires to route external traffic to node-local or cluster-wide endpoints + ## + externalTrafficPolicy: Cluster + + ## Service type + ## NodePort, ClusterIP, LoadBalancer + ## + type: ClusterIP + + ## List of IP addresses at which the Prometheus server service is available + ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips + ## + externalIPs: [] + + ## Labels to add to the operator pod + ## + podLabels: {} + + ## Annotations to add to the operator pod + ## + podAnnotations: {} + + ## Assign a PriorityClassName to pods if set + # priorityClassName: "" + + ## Define Log Format + # Use logfmt (default) or json logging + # logFormat: logfmt + + ## Decrease log verbosity to errors only + # logLevel: error + + ## If true, the operator will create and maintain a service for scraping kubelets + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/helm/prometheus-operator/README.md + ## + kubeletService: + enabled: true + namespace: kube-system + ## Use '{{ template "kube-prometheus-stack.fullname" . }}-kubelet' by default + name: "" + + ## Create a servicemonitor for the operator + ## + serviceMonitor: + ## Scrape interval. If not set, the Prometheus default scrape interval is used. + ## + interval: "" + ## Scrape timeout. If not set, the Prometheus default scrape timeout is used. + scrapeTimeout: "" + selfMonitor: true + + ## Metric relabel configs to apply to samples before ingestion. + ## + metricRelabelings: [] + # - action: keep + # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' + # sourceLabels: [__name__] + + # relabel configs to apply to samples before ingestion. + ## + relabelings: [] + # - sourceLabels: [__meta_kubernetes_pod_node_name] + # separator: ; + # regex: ^(.*)$ + # targetLabel: nodename + # replacement: $1 + # action: replace + + ## Resource limits & requests + ## + resources: {} + # limits: + # cpu: 200m + # memory: 200Mi + # requests: + # cpu: 100m + # memory: 100Mi + + # Required for use in managed kubernetes clusters (such as AWS EKS) with custom CNI (such as calico), + # because control-plane managed by AWS cannot communicate with pods' IP CIDR and admission webhooks are not working + ## + hostNetwork: false + + ## Define which Nodes the Pods are scheduled on. + ## ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + + ## Tolerations for use with node taints + ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + # - key: "key" + # operator: "Equal" + # value: "value" + # effect: "NoSchedule" + + ## Assign custom affinity rules to the prometheus operator + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + ## + affinity: {} + # nodeAffinity: + # requiredDuringSchedulingIgnoredDuringExecution: + # nodeSelectorTerms: + # - matchExpressions: + # - key: kubernetes.io/e2e-az-name + # operator: In + # values: + # - e2e-az1 + # - e2e-az2 + dnsConfig: {} + # nameservers: + # - 1.2.3.4 + # searches: + # - ns1.svc.cluster-domain.example + # - my.dns.search.suffix + # options: + # - name: ndots + # value: "2" + # - name: edns0 + securityContext: + fsGroup: 65534 + runAsGroup: 65534 + runAsNonRoot: true + runAsUser: 65534 + + ## Container-specific security context configuration + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + ## + containerSecurityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + + ## Prometheus-operator image + ## + image: + repository: quay.io/prometheus-operator/prometheus-operator + tag: v0.57.0 + sha: "" + pullPolicy: IfNotPresent + + ## Prometheus image to use for prometheuses managed by the operator + ## + # prometheusDefaultBaseImage: quay.io/prometheus/prometheus + + ## Alertmanager image to use for alertmanagers managed by the operator + ## + # alertmanagerDefaultBaseImage: quay.io/prometheus/alertmanager + + ## Prometheus-config-reloader + ## + prometheusConfigReloader: + # image to use for config and rule reloading + image: + repository: quay.io/prometheus-operator/prometheus-config-reloader + tag: v0.57.0 + sha: "" + + # resource config for prometheusConfigReloader + resources: + requests: + cpu: 200m + memory: 50Mi + limits: + cpu: 200m + memory: 50Mi + + ## Thanos side-car image when configured + ## + thanosImage: + repository: quay.io/thanos/thanos + tag: v0.25.2 + sha: "" + + ## Set a Field Selector to filter watched secrets + ## + secretFieldSelector: "" + +## Deploy a Prometheus instance +## +prometheus: + + enabled: true + + ## Annotations for Prometheus + ## + annotations: {} + + ## Service account for Prometheuses to use. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ + ## + serviceAccount: + create: true + name: "" + annotations: {} + + # Service for thanos service discovery on sidecar + # Enable this can make Thanos Query can use + # `--store=dnssrv+_grpc._tcp.${kube-prometheus-stack.fullname}-thanos-discovery.${namespace}.svc.cluster.local` to discovery + # Thanos sidecar on prometheus nodes + # (Please remember to change ${kube-prometheus-stack.fullname} and ${namespace}. Not just copy and paste!) + thanosService: + enabled: false + annotations: {} + labels: {} + + ## Denotes if this Service desires to route external traffic to node-local or cluster-wide endpoints + ## + externalTrafficPolicy: Cluster + + ## Service type + ## + type: ClusterIP + + ## gRPC port config + portName: grpc + port: 10901 + targetPort: "grpc" + + ## HTTP port config (for metrics) + httpPortName: http + httpPort: 10902 + targetHttpPort: "http" + + ## ClusterIP to assign + # Default is to make this a headless service ("None") + clusterIP: "None" + + ## Port to expose on each node, if service type is NodePort + ## + nodePort: 30901 + httpNodePort: 30902 + + # ServiceMonitor to scrape Sidecar metrics + # Needs thanosService to be enabled as well + thanosServiceMonitor: + enabled: false + interval: "" + + ## scheme: HTTP scheme to use for scraping. Can be used with `tlsConfig` for example if using istio mTLS. + scheme: "" + + ## tlsConfig: TLS configuration to use when scraping the endpoint. For example if using istio mTLS. + ## Of type: https://github.com/coreos/prometheus-operator/blob/main/Documentation/api.md#tlsconfig + tlsConfig: {} + + bearerTokenFile: + + ## Metric relabel configs to apply to samples before ingestion. + metricRelabelings: [] + + ## relabel configs to apply to samples before ingestion. + relabelings: [] + + # Service for external access to sidecar + # Enabling this creates a service to expose thanos-sidecar outside the cluster. + thanosServiceExternal: + enabled: false + annotations: {} + labels: {} + loadBalancerIP: "" + loadBalancerSourceRanges: [] + + ## gRPC port config + portName: grpc + port: 10901 + targetPort: "grpc" + + ## HTTP port config (for metrics) + httpPortName: http + httpPort: 10902 + targetHttpPort: "http" + + ## Denotes if this Service desires to route external traffic to node-local or cluster-wide endpoints + ## + externalTrafficPolicy: Cluster + + ## Service type + ## + type: LoadBalancer + + ## Port to expose on each node + ## + nodePort: 30901 + httpNodePort: 30902 + + ## Configuration for Prometheus service + ## + service: + annotations: {} + labels: {} + clusterIP: "" + + ## Port for Prometheus Service to listen on + ## + port: 9090 + + ## To be used with a proxy extraContainer port + targetPort: 9090 + + ## List of IP addresses at which the Prometheus server service is available + ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips + ## + externalIPs: [] + + ## Port to expose on each node + ## Only used if service.type is 'NodePort' + ## + nodePort: 30090 + + ## Loadbalancer IP + ## Only use if service.type is "LoadBalancer" + loadBalancerIP: "" + loadBalancerSourceRanges: [] + + ## Denotes if this Service desires to route external traffic to node-local or cluster-wide endpoints + ## + externalTrafficPolicy: Cluster + + ## Service type + ## + type: ClusterIP + + ## Additional port to define in the Service + additionalPorts: [] + + ## Consider that all endpoints are considered "ready" even if the Pods themselves are not + ## Ref: https://kubernetes.io/docs/reference/kubernetes-api/service-resources/service-v1/#ServiceSpec + publishNotReadyAddresses: false + + sessionAffinity: "" + + ## Configuration for creating a separate Service for each statefulset Prometheus replica + ## + servicePerReplica: + enabled: false + annotations: {} + + ## Port for Prometheus Service per replica to listen on + ## + port: 9090 + + ## To be used with a proxy extraContainer port + targetPort: 9090 + + ## Port to expose on each node + ## Only used if servicePerReplica.type is 'NodePort' + ## + nodePort: 30091 + + ## Loadbalancer source IP ranges + ## Only used if servicePerReplica.type is "LoadBalancer" + loadBalancerSourceRanges: [] + + ## Denotes if this Service desires to route external traffic to node-local or cluster-wide endpoints + ## + externalTrafficPolicy: Cluster + + ## Service type + ## + type: ClusterIP + + ## Configure pod disruption budgets for Prometheus + ## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/#specifying-a-poddisruptionbudget + ## This configuration is immutable once created and will require the PDB to be deleted to be changed + ## https://github.com/kubernetes/kubernetes/issues/45398 + ## + podDisruptionBudget: + enabled: false + minAvailable: 1 + maxUnavailable: "" + + # Ingress exposes thanos sidecar outside the cluster + thanosIngress: + enabled: false + + # For Kubernetes >= 1.18 you should specify the ingress-controller via the field ingressClassName + # See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#specifying-the-class-of-an-ingress + # ingressClassName: nginx + + annotations: {} + labels: {} + servicePort: 10901 + + ## Port to expose on each node + ## Only used if service.type is 'NodePort' + ## + nodePort: 30901 + + ## Hosts must be provided if Ingress is enabled. + ## + hosts: [] + # - thanos-gateway.domain.com + + ## Paths to use for ingress rules + ## + paths: [] + # - / + + ## For Kubernetes >= 1.18 you should specify the pathType (determines how Ingress paths should be matched) + ## See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#better-path-matching-with-path-types + # pathType: ImplementationSpecific + + ## TLS configuration for Thanos Ingress + ## Secret must be manually created in the namespace + ## + tls: [] + # - secretName: thanos-gateway-tls + # hosts: + # - thanos-gateway.domain.com + # + + ## ExtraSecret can be used to store various data in an extra secret + ## (use it for example to store hashed basic auth credentials) + extraSecret: + ## if not set, name will be auto generated + # name: "" + annotations: {} + data: {} + # auth: | + # foo:$apr1$OFG3Xybp$ckL0FHDAkoXYIlH9.cysT0 + # someoneelse:$apr1$DMZX2Z4q$6SbQIfyuLQd.xmo/P0m2c. + + ingress: + enabled: false + + # For Kubernetes >= 1.18 you should specify the ingress-controller via the field ingressClassName + # See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#specifying-the-class-of-an-ingress + # ingressClassName: nginx + + annotations: {} + labels: {} + + ## Hostnames. + ## Must be provided if Ingress is enabled. + ## + # hosts: + # - prometheus.domain.com + hosts: [] + + ## Paths to use for ingress rules - one path should match the prometheusSpec.routePrefix + ## + paths: [] + # - / + + ## For Kubernetes >= 1.18 you should specify the pathType (determines how Ingress paths should be matched) + ## See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#better-path-matching-with-path-types + # pathType: ImplementationSpecific + + ## TLS configuration for Prometheus Ingress + ## Secret must be manually created in the namespace + ## + tls: [] + # - secretName: prometheus-general-tls + # hosts: + # - prometheus.example.com + + ## Configuration for creating an Ingress that will map to each Prometheus replica service + ## prometheus.servicePerReplica must be enabled + ## + ingressPerReplica: + enabled: false + + # For Kubernetes >= 1.18 you should specify the ingress-controller via the field ingressClassName + # See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#specifying-the-class-of-an-ingress + # ingressClassName: nginx + + annotations: {} + labels: {} + + ## Final form of the hostname for each per replica ingress is + ## {{ ingressPerReplica.hostPrefix }}-{{ $replicaNumber }}.{{ ingressPerReplica.hostDomain }} + ## + ## Prefix for the per replica ingress that will have `-$replicaNumber` + ## appended to the end + hostPrefix: "" + ## Domain that will be used for the per replica ingress + hostDomain: "" + + ## Paths to use for ingress rules + ## + paths: [] + # - / + + ## For Kubernetes >= 1.18 you should specify the pathType (determines how Ingress paths should be matched) + ## See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#better-path-matching-with-path-types + # pathType: ImplementationSpecific + + ## Secret name containing the TLS certificate for Prometheus per replica ingress + ## Secret must be manually created in the namespace + tlsSecretName: "" + + ## Separated secret for each per replica Ingress. Can be used together with cert-manager + ## + tlsSecretPerReplica: + enabled: false + ## Final form of the secret for each per replica ingress is + ## {{ tlsSecretPerReplica.prefix }}-{{ $replicaNumber }} + ## + prefix: "prometheus" + + ## Configure additional options for default pod security policy for Prometheus + ## ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/ + podSecurityPolicy: + allowedCapabilities: [] + allowedHostPaths: [] + volumes: [] + + serviceMonitor: + ## Scrape interval. If not set, the Prometheus default scrape interval is used. + ## + interval: "" + selfMonitor: true + + ## scheme: HTTP scheme to use for scraping. Can be used with `tlsConfig` for example if using istio mTLS. + scheme: "" + + ## tlsConfig: TLS configuration to use when scraping the endpoint. For example if using istio mTLS. + ## Of type: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#tlsconfig + tlsConfig: {} + + bearerTokenFile: + + ## Metric relabel configs to apply to samples before ingestion. + ## + metricRelabelings: [] + # - action: keep + # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' + # sourceLabels: [__name__] + + # relabel configs to apply to samples before ingestion. + ## + relabelings: [] + # - sourceLabels: [__meta_kubernetes_pod_node_name] + # separator: ; + # regex: ^(.*)$ + # targetLabel: nodename + # replacement: $1 + # action: replace + + ## Settings affecting prometheusSpec + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#prometheusspec + ## + prometheusSpec: + ## If true, pass --storage.tsdb.max-block-duration=2h to prometheus. This is already done if using Thanos + ## + disableCompaction: false + ## APIServerConfig + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#apiserverconfig + ## + apiserverConfig: {} + + ## Interval between consecutive scrapes. + ## Defaults to 30s. + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/release-0.44/pkg/prometheus/promcfg.go#L180-L183 + ## + scrapeInterval: "" + + ## Number of seconds to wait for target to respond before erroring + ## + scrapeTimeout: "" + + ## Interval between consecutive evaluations. + ## + evaluationInterval: "" + + ## ListenLocal makes the Prometheus server listen on loopback, so that it does not bind against the Pod IP. + ## + listenLocal: false + + ## EnableAdminAPI enables Prometheus the administrative HTTP API which includes functionality such as deleting time series. + ## This is disabled by default. + ## ref: https://prometheus.io/docs/prometheus/latest/querying/api/#tsdb-admin-apis + ## + enableAdminAPI: false + + ## WebTLSConfig defines the TLS parameters for HTTPS + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#webtlsconfig + web: {} + + # EnableFeatures API enables access to Prometheus disabled features. + # ref: https://prometheus.io/docs/prometheus/latest/disabled_features/ + enableFeatures: [] + # - exemplar-storage + + ## Image of Prometheus. + ## + image: + repository: quay.io/prometheus/prometheus + tag: v2.36.1 + sha: "" + + ## Tolerations for use with node taints + ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + # - key: "key" + # operator: "Equal" + # value: "value" + # effect: "NoSchedule" + + ## If specified, the pod's topology spread constraints. + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ + ## + topologySpreadConstraints: [] + # - maxSkew: 1 + # topologyKey: topology.kubernetes.io/zone + # whenUnsatisfiable: DoNotSchedule + # labelSelector: + # matchLabels: + # app: prometheus + + ## Alertmanagers to which alerts will be sent + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#alertmanagerendpoints + ## + ## Default configuration will connect to the alertmanager deployed as part of this release + ## + alertingEndpoints: [] + # - name: "" + # namespace: "" + # port: http + # scheme: http + # pathPrefix: "" + # tlsConfig: {} + # bearerTokenFile: "" + # apiVersion: v2 + + ## External labels to add to any time series or alerts when communicating with external systems + ## + externalLabels: {} + + ## enable --web.enable-remote-write-receiver flag on prometheus-server + ## + enableRemoteWriteReceiver: false + + ## Name of the external label used to denote replica name + ## + replicaExternalLabelName: "" + + ## If true, the Operator won't add the external label used to denote replica name + ## + replicaExternalLabelNameClear: false + + ## Name of the external label used to denote Prometheus instance name + ## + prometheusExternalLabelName: "" + + ## If true, the Operator won't add the external label used to denote Prometheus instance name + ## + prometheusExternalLabelNameClear: false + + ## External URL at which Prometheus will be reachable. + ## + externalUrl: "" + + ## Define which Nodes the Pods are scheduled on. + ## ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + + ## Secrets is a list of Secrets in the same namespace as the Prometheus object, which shall be mounted into the Prometheus Pods. + ## The Secrets are mounted into /etc/prometheus/secrets/. Secrets changes after initial creation of a Prometheus object are not + ## reflected in the running Pods. To change the secrets mounted into the Prometheus Pods, the object must be deleted and recreated + ## with the new list of secrets. + ## + secrets: [] + + ## ConfigMaps is a list of ConfigMaps in the same namespace as the Prometheus object, which shall be mounted into the Prometheus Pods. + ## The ConfigMaps are mounted into /etc/prometheus/configmaps/. + ## + configMaps: [] + + ## QuerySpec defines the query command line flags when starting Prometheus. + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#queryspec + ## + query: {} + + ## Namespaces to be selected for PrometheusRules discovery. + ## If nil, select own namespace. Namespaces to be selected for ServiceMonitor discovery. + ## See https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#namespaceselector for usage + ## + ruleNamespaceSelector: {} + + ## If true, a nil or {} value for prometheus.prometheusSpec.ruleSelector will cause the + ## prometheus resource to be created with selectors based on values in the helm deployment, + ## which will also match the PrometheusRule resources created + ## + ruleSelectorNilUsesHelmValues: true + + ## PrometheusRules to be selected for target discovery. + ## If {}, select all PrometheusRules + ## + ruleSelector: {} + ## Example which select all PrometheusRules resources + ## with label "prometheus" with values any of "example-rules" or "example-rules-2" + # ruleSelector: + # matchExpressions: + # - key: prometheus + # operator: In + # values: + # - example-rules + # - example-rules-2 + # + ## Example which select all PrometheusRules resources with label "role" set to "example-rules" + # ruleSelector: + # matchLabels: + # role: example-rules + + ## If true, a nil or {} value for prometheus.prometheusSpec.serviceMonitorSelector will cause the + ## prometheus resource to be created with selectors based on values in the helm deployment, + ## which will also match the servicemonitors created + ## + serviceMonitorSelectorNilUsesHelmValues: true + + ## ServiceMonitors to be selected for target discovery. + ## If {}, select all ServiceMonitors + ## + serviceMonitorSelector: {} + ## Example which selects ServiceMonitors with label "prometheus" set to "somelabel" + # serviceMonitorSelector: + # matchLabels: + # prometheus: somelabel + + ## Namespaces to be selected for ServiceMonitor discovery. + ## + serviceMonitorNamespaceSelector: {} + ## Example which selects ServiceMonitors in namespaces with label "prometheus" set to "somelabel" + # serviceMonitorNamespaceSelector: + # matchLabels: + # prometheus: somelabel + + ## If true, a nil or {} value for prometheus.prometheusSpec.podMonitorSelector will cause the + ## prometheus resource to be created with selectors based on values in the helm deployment, + ## which will also match the podmonitors created + ## + podMonitorSelectorNilUsesHelmValues: true + + ## PodMonitors to be selected for target discovery. + ## If {}, select all PodMonitors + ## + podMonitorSelector: {} + ## Example which selects PodMonitors with label "prometheus" set to "somelabel" + # podMonitorSelector: + # matchLabels: + # prometheus: somelabel + + ## Namespaces to be selected for PodMonitor discovery. + ## See https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#namespaceselector for usage + ## + podMonitorNamespaceSelector: {} + + ## If true, a nil or {} value for prometheus.prometheusSpec.probeSelector will cause the + ## prometheus resource to be created with selectors based on values in the helm deployment, + ## which will also match the probes created + ## + probeSelectorNilUsesHelmValues: true + + ## Probes to be selected for target discovery. + ## If {}, select all Probes + ## + probeSelector: {} + ## Example which selects Probes with label "prometheus" set to "somelabel" + # probeSelector: + # matchLabels: + # prometheus: somelabel + + ## Namespaces to be selected for Probe discovery. + ## See https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#namespaceselector for usage + ## + probeNamespaceSelector: {} + + ## How long to retain metrics + ## + retention: 10d + + ## Maximum size of metrics + ## + retentionSize: "" + + ## Enable compression of the write-ahead log using Snappy. + ## + walCompression: false + + ## If true, the Operator won't process any Prometheus configuration changes + ## + paused: false + + ## Number of replicas of each shard to deploy for a Prometheus deployment. + ## Number of replicas multiplied by shards is the total number of Pods created. + ## + replicas: 1 + + ## EXPERIMENTAL: Number of shards to distribute targets onto. + ## Number of replicas multiplied by shards is the total number of Pods created. + ## Note that scaling down shards will not reshard data onto remaining instances, it must be manually moved. + ## Increasing shards will not reshard data either but it will continue to be available from the same instances. + ## To query globally use Thanos sidecar and Thanos querier or remote write data to a central location. + ## Sharding is done on the content of the `__address__` target meta-label. + ## + shards: 1 + + ## Log level for Prometheus be configured in + ## + logLevel: info + + ## Log format for Prometheus be configured in + ## + logFormat: logfmt + + ## Prefix used to register routes, overriding externalUrl route. + ## Useful for proxies that rewrite URLs. + ## + routePrefix: / + + ## Standard object's metadata. More info: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#metadata + ## Metadata Labels and Annotations gets propagated to the prometheus pods. + ## + podMetadata: {} + # labels: + # app: prometheus + # k8s-app: prometheus + + ## Pod anti-affinity can prevent the scheduler from placing Prometheus replicas on the same node. + ## The default value "soft" means that the scheduler should *prefer* to not schedule two replica pods onto the same node but no guarantee is provided. + ## The value "hard" means that the scheduler is *required* to not schedule two replica pods onto the same node. + ## The value "" will disable pod anti-affinity so that no anti-affinity rules will be configured. + podAntiAffinity: "" + + ## If anti-affinity is enabled sets the topologyKey to use for anti-affinity. + ## This can be changed to, for example, failure-domain.beta.kubernetes.io/zone + ## + podAntiAffinityTopologyKey: kubernetes.io/hostname + + ## Assign custom affinity rules to the prometheus instance + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + ## + affinity: {} + # nodeAffinity: + # requiredDuringSchedulingIgnoredDuringExecution: + # nodeSelectorTerms: + # - matchExpressions: + # - key: kubernetes.io/e2e-az-name + # operator: In + # values: + # - e2e-az1 + # - e2e-az2 + + ## The remote_read spec configuration for Prometheus. + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#remotereadspec + remoteRead: [] + # - url: http://remote1/read + ## additionalRemoteRead is appended to remoteRead + additionalRemoteRead: [] + + ## The remote_write spec configuration for Prometheus. + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#remotewritespec + remoteWrite: [] + # - url: http://remote1/push + ## additionalRemoteWrite is appended to remoteWrite + additionalRemoteWrite: [] + + ## Enable/Disable Grafana dashboards provisioning for prometheus remote write feature + remoteWriteDashboards: false + + ## Resource limits & requests + ## + resources: {} + # requests: + # memory: 400Mi + + ## Prometheus StorageSpec for persistent data + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/user-guides/storage.md + ## + storageSpec: + ## Using PersistentVolumeClaim + ## + # volumeClaimTemplate: + spec: + storage: + volumeClaimTemplate: + spec: + resources: + requests: + storage: 50Gi + selector: + matchLabels: + directory: prometheus + + ## Using tmpfs volume + ## + # emptyDir: + # medium: Memory + + # Additional volumes on the output StatefulSet definition. + volumes: [] + + # Additional VolumeMounts on the output StatefulSet definition. + volumeMounts: [] + + ## AdditionalScrapeConfigs allows specifying additional Prometheus scrape configurations. Scrape configurations + ## are appended to the configurations generated by the Prometheus Operator. Job configurations must have the form + ## as specified in the official Prometheus documentation: + ## https://prometheus.io/docs/prometheus/latest/configuration/configuration/#scrape_config. As scrape configs are + ## appended, the user is responsible to make sure it is valid. Note that using this feature may expose the possibility + ## to break upgrades of Prometheus. It is advised to review Prometheus release notes to ensure that no incompatible + ## scrape configs are going to break Prometheus after the upgrade. + ## AdditionalScrapeConfigs can be defined as a list or as a templated string. + ## + ## The scrape configuration example below will find master nodes, provided they have the name .*mst.*, relabel the + ## port to 2379 and allow etcd scraping provided it is running on all Kubernetes master nodes + ## + additionalScrapeConfigs: [] + # - job_name: kube-etcd + # kubernetes_sd_configs: + # - role: node + # scheme: https + # tls_config: + # ca_file: /etc/prometheus/secrets/etcd-client-cert/etcd-ca + # cert_file: /etc/prometheus/secrets/etcd-client-cert/etcd-client + # key_file: /etc/prometheus/secrets/etcd-client-cert/etcd-client-key + # relabel_configs: + # - action: labelmap + # regex: __meta_kubernetes_node_label_(.+) + # - source_labels: [__address__] + # action: replace + # targetLabel: __address__ + # regex: ([^:;]+):(\d+) + # replacement: ${1}:2379 + # - source_labels: [__meta_kubernetes_node_name] + # action: keep + # regex: .*mst.* + # - source_labels: [__meta_kubernetes_node_name] + # action: replace + # targetLabel: node + # regex: (.*) + # replacement: ${1} + # metric_relabel_configs: + # - regex: (kubernetes_io_hostname|failure_domain_beta_kubernetes_io_region|beta_kubernetes_io_os|beta_kubernetes_io_arch|beta_kubernetes_io_instance_type|failure_domain_beta_kubernetes_io_zone) + # action: labeldrop + # + ## If scrape config contains a repetitive section, you may want to use a template. + ## In the following example, you can see how to define `gce_sd_configs` for multiple zones + # additionalScrapeConfigs: | + # - job_name: "node-exporter" + # gce_sd_configs: + # {{range $zone := .Values.gcp_zones}} + # - project: "project1" + # zone: "{{$zone}}" + # port: 9100 + # {{end}} + # relabel_configs: + # ... + + + ## If additional scrape configurations are already deployed in a single secret file you can use this section. + ## Expected values are the secret name and key + ## Cannot be used with additionalScrapeConfigs + additionalScrapeConfigsSecret: {} + # enabled: false + # name: + # key: + + ## additionalPrometheusSecretsAnnotations allows to add annotations to the kubernetes secret. This can be useful + ## when deploying via spinnaker to disable versioning on the secret, strategy.spinnaker.io/versioned: 'false' + additionalPrometheusSecretsAnnotations: {} + + ## AdditionalAlertManagerConfigs allows for manual configuration of alertmanager jobs in the form as specified + ## in the official Prometheus documentation https://prometheus.io/docs/prometheus/latest/configuration/configuration/#. + ## AlertManager configurations specified are appended to the configurations generated by the Prometheus Operator. + ## As AlertManager configs are appended, the user is responsible to make sure it is valid. Note that using this + ## feature may expose the possibility to break upgrades of Prometheus. It is advised to review Prometheus release + ## notes to ensure that no incompatible AlertManager configs are going to break Prometheus after the upgrade. + ## + additionalAlertManagerConfigs: [] + # - consul_sd_configs: + # - server: consul.dev.test:8500 + # scheme: http + # datacenter: dev + # tag_separator: ',' + # services: + # - metrics-prometheus-alertmanager + + ## If additional alertmanager configurations are already deployed in a single secret, or you want to manage + ## them separately from the helm deployment, you can use this section. + ## Expected values are the secret name and key + ## Cannot be used with additionalAlertManagerConfigs + additionalAlertManagerConfigsSecret: {} + # name: + # key: + + ## AdditionalAlertRelabelConfigs allows specifying Prometheus alert relabel configurations. Alert relabel configurations specified are appended + ## to the configurations generated by the Prometheus Operator. Alert relabel configurations specified must have the form as specified in the + ## official Prometheus documentation: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#alert_relabel_configs. + ## As alert relabel configs are appended, the user is responsible to make sure it is valid. Note that using this feature may expose the + ## possibility to break upgrades of Prometheus. It is advised to review Prometheus release notes to ensure that no incompatible alert relabel + ## configs are going to break Prometheus after the upgrade. + ## + additionalAlertRelabelConfigs: [] + # - separator: ; + # regex: prometheus_replica + # replacement: $1 + # action: labeldrop + + ## If additional alert relabel configurations are already deployed in a single secret, or you want to manage + ## them separately from the helm deployment, you can use this section. + ## Expected values are the secret name and key + ## Cannot be used with additionalAlertRelabelConfigs + additionalAlertRelabelConfigsSecret: {} + # name: + # key: + + ## SecurityContext holds pod-level security attributes and common container settings. + ## This defaults to non root user with uid 1000 and gid 2000. + ## https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md + ## + securityContext: + runAsGroup: 2000 + runAsNonRoot: true + runAsUser: 1000 + fsGroup: 2000 + + ## Priority class assigned to the Pods + ## + priorityClassName: "" + + ## Thanos configuration allows configuring various aspects of a Prometheus server in a Thanos environment. + ## This section is experimental, it may change significantly without deprecation notice in any release. + ## This is experimental and may change significantly without backward compatibility in any release. + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#thanosspec + ## + thanos: {} + # secretProviderClass: + # provider: gcp + # parameters: + # secrets: | + # - resourceName: "projects/$PROJECT_ID/secrets/testsecret/versions/latest" + # fileName: "objstore.yaml" + # objectStorageConfigFile: /var/secrets/object-store.yaml + + ## Containers allows injecting additional containers. This is meant to allow adding an authentication proxy to a Prometheus pod. + ## if using proxy extraContainer update targetPort with proxy container port + containers: [] + + ## InitContainers allows injecting additional initContainers. This is meant to allow doing some changes + ## (permissions, dir tree) on mounted volumes before starting prometheus + initContainers: [] + + ## PortName to use for Prometheus. + ## + portName: "http-web" + + ## ArbitraryFSAccessThroughSMs configures whether configuration based on a service monitor can access arbitrary files + ## on the file system of the Prometheus container e.g. bearer token files. + arbitraryFSAccessThroughSMs: false + + ## OverrideHonorLabels if set to true overrides all user configured honor_labels. If HonorLabels is set in ServiceMonitor + ## or PodMonitor to true, this overrides honor_labels to false. + overrideHonorLabels: false + + ## OverrideHonorTimestamps allows to globally enforce honoring timestamps in all scrape configs. + overrideHonorTimestamps: false + + ## IgnoreNamespaceSelectors if set to true will ignore NamespaceSelector settings from the podmonitor and servicemonitor + ## configs, and they will only discover endpoints within their current namespace. Defaults to false. + ignoreNamespaceSelectors: false + + ## EnforcedNamespaceLabel enforces adding a namespace label of origin for each alert and metric that is user created. + ## The label value will always be the namespace of the object that is being created. + ## Disabled by default + enforcedNamespaceLabel: "" + + ## PrometheusRulesExcludedFromEnforce - list of prometheus rules to be excluded from enforcing of adding namespace labels. + ## Works only if enforcedNamespaceLabel set to true. Make sure both ruleNamespace and ruleName are set for each pair + ## Deprecated, use `excludedFromEnforcement` instead + prometheusRulesExcludedFromEnforce: [] + + ## ExcludedFromEnforcement - list of object references to PodMonitor, ServiceMonitor, Probe and PrometheusRule objects + ## to be excluded from enforcing a namespace label of origin. + ## Works only if enforcedNamespaceLabel set to true. + ## See https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#objectreference + excludedFromEnforcement: [] + + ## QueryLogFile specifies the file to which PromQL queries are logged. Note that this location must be writable, + ## and can be persisted using an attached volume. Alternatively, the location can be set to a stdout location such + ## as /dev/stdout to log querie information to the default Prometheus log stream. This is only available in versions + ## of Prometheus >= 2.16.0. For more details, see the Prometheus docs (https://prometheus.io/docs/guides/query-log/) + queryLogFile: false + + ## EnforcedSampleLimit defines global limit on number of scraped samples that will be accepted. This overrides any SampleLimit + ## set per ServiceMonitor or/and PodMonitor. It is meant to be used by admins to enforce the SampleLimit to keep overall + ## number of samples/series under the desired limit. Note that if SampleLimit is lower that value will be taken instead. + enforcedSampleLimit: false + + ## EnforcedTargetLimit defines a global limit on the number of scraped targets. This overrides any TargetLimit set + ## per ServiceMonitor or/and PodMonitor. It is meant to be used by admins to enforce the TargetLimit to keep the overall + ## number of targets under the desired limit. Note that if TargetLimit is lower, that value will be taken instead, except + ## if either value is zero, in which case the non-zero value will be used. If both values are zero, no limit is enforced. + enforcedTargetLimit: false + + + ## Per-scrape limit on number of labels that will be accepted for a sample. If more than this number of labels are present + ## post metric-relabeling, the entire scrape will be treated as failed. 0 means no limit. Only valid in Prometheus versions + ## 2.27.0 and newer. + enforcedLabelLimit: false + + ## Per-scrape limit on length of labels name that will be accepted for a sample. If a label name is longer than this number + ## post metric-relabeling, the entire scrape will be treated as failed. 0 means no limit. Only valid in Prometheus versions + ## 2.27.0 and newer. + enforcedLabelNameLengthLimit: false + + ## Per-scrape limit on length of labels value that will be accepted for a sample. If a label value is longer than this + ## number post metric-relabeling, the entire scrape will be treated as failed. 0 means no limit. Only valid in Prometheus + ## versions 2.27.0 and newer. + enforcedLabelValueLengthLimit: false + + ## AllowOverlappingBlocks enables vertical compaction and vertical query merge in Prometheus. This is still experimental + ## in Prometheus so it may change in any upcoming release. + allowOverlappingBlocks: false + + additionalRulesForClusterRole: [] + # - apiGroups: [ "" ] + # resources: + # - nodes/proxy + # verbs: [ "get", "list", "watch" ] + + additionalServiceMonitors: [] + ## Name of the ServiceMonitor to create + ## + # - name: "" + + ## Additional labels to set used for the ServiceMonitorSelector. Together with standard labels from + ## the chart + ## + # additionalLabels: {} + + ## Service label for use in assembling a job name of the form